diff --git a/.clang-format b/.clang-format index 11a44d587c..63ebecbce1 100644 --- a/.clang-format +++ b/.clang-format @@ -19,3 +19,6 @@ BreakBeforeTernaryOperators: false IndentWrappedFunctionNames: true ContinuationIndentWidth: 4 ObjCSpaceBeforeProtocolList: true +--- +Language: Cpp +IncludeBlocks: Regroup diff --git a/.gitignore b/.gitignore index c43e108f51..a86b405fab 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ *_proto_cpp.xml *~ .*.sw? +.cache .cipd .clangd .classpath diff --git a/.gn b/.gn index 807c0a1685..d6f84df080 100644 --- a/.gn +++ b/.gn @@ -11,38 +11,20 @@ import("//build/dotfile_settings.gni") # The location of the build configuration file. buildconfig = "//build/config/BUILDCONFIG.gn" +# The python interpreter to use by default. On Windows, this will look +# for python3.exe and python3.bat. +script_executable = "python3" + # The secondary source root is a parallel directory tree where # GN build files are placed when they can not be placed directly # in the source tree, e.g. for third party source trees. secondary_source = "//build/secondary/" -# These are the targets to check headers for by default. The files in targets -# matching these patterns (see "gn help label_pattern" for format) will have +# These are the targets to skip header checking by default. The files in targets +# matching these patterns (see "gn help label_pattern" for format) will not have # their includes checked for proper dependencies when you run either # "gn check" or "gn gen --check". -check_targets = [ - ":webrtc_common", - "//api/*", - "//audio/*", - "//backup/*", - "//call/*", - "//common_audio/*", - "//common_video/*", - "//examples/*", - "//logging/*", - "//media/*", - "//modules/*", - "//p2p/*", - "//pc/*", - "//rtc_base/*", - "//rtc_tools/*", - "//sdk/*", - "//stats/*", - "//system_wrappers/*", - "//test/*", - "//video/*", - "//third_party/libyuv/*", -] +no_check_targets = [ "//third_party/icu/*" ] # These are the list of GN files that run exec_script. This whitelist exists # to force additional review for new uses of exec_script, which is strongly @@ -62,7 +44,7 @@ default_args = { mac_sdk_min = "10.12" - ios_deployment_target = "10.0" + ios_deployment_target = "12.0" # The SDK API level, in contrast, is set by build/android/AndroidManifest.xml. android32_ndk_api_level = 16 diff --git a/.vpython b/.vpython index fb75db51d8..df838dccf8 100644 --- a/.vpython +++ b/.vpython @@ -52,7 +52,7 @@ wheel: < wheel: < name: "infra/python/wheels/six-py2_py3" - version: "version:1.10.0" + version: "version:1.15.0" > wheel: < name: "infra/python/wheels/pbr-py2_py3" @@ -66,3 +66,11 @@ wheel: < name: "infra/python/wheels/mock-py2_py3" version: "version:2.0.0" > +wheel: < + name: "infra/python/wheels/protobuf-py2_py3" + version: "version:3.13.0" +> +wheel: < + name: "infra/python/wheels/requests-py2_py3" + version: "version:2.13.0" +> diff --git a/AUTHORS b/AUTHORS index 499c340639..b4d4100c6a 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,127 +1,154 @@ -# Names should be added to this file like so: -# Name or Organization +# Names should be added to this file with this pattern: +# +# For individuals: +# Name +# +# For organizations: +# Organization +# +# See python fnmatch module documentation for more information. +# +# Please keep the list sorted. +# BEGIN individuals section. +Aaron Clauson Adam Fedor Akshay Shah -Alex Henrie Alexander Brauckmann Alexandre Gouaillard +Alex Henrie Andrew MacDonald Andrey Efremov +Andrew Johnson Anil Kumar Ben Strong +Berthold Herrmann Bob Withers Bridger Maxwell -Chris Tserng Christophe Dumez +Chris Tserng Cody Barnes Colin Plumb Cyril Lashkevich +CZ Theng +Danail Kirov +Dave Cowart David Porter Dax Booysen -Danail Kirov +Dennis Angelo +Dharmesh Chauhan +Di Wu Dirk-Jan C. Binnema Dmitry Lizin +Eike Rathke Eric Rescorla, RTFM Inc. Frederik Riedel, Frogg GmbH Giji Gangadharan Graham Yoakum Gustavo Garcia +Hans Knoechel Hugues Ekra Jake Hilton James H. Brown +Jan Grulich Jan Kalab Jens Nielsen +Jesús Leganés-Combarro Jiawei Ou Jie Mao +Jiwon Kim Jose Antonio Olivera Ortega +Keiichi Enomoto Kiran Thind +Korniltsev Anatoly +Lennart Grahl Luke Weber Maksim Khobat Mallikarjuna Rao V Manish Jethani Martin Storsjo Matthias Liebig +Maxim Pavlov Maxim Potapov Michael Iedema +Michel Promonet +Miguel Paris Mike Gilbert +Min Wang Mo Zanaty Pali Rohar Paul Kapustin -Philipp Hancke Peng Yu +Philipp Hancke +Piasy Xu Rafael Lopez Diez Ralph Giles +Raman Budny +Ramprakash Jelari Riku Voipio Robert Bares Robert Nagy Ryan Yoakum -Satender Saroha Sarah Thompson +Satender Saroha Saul Kravitz +Sergio Garcia Murillo Silviu Caragea Stefan Gula +Stephan Hartmann Steve Reid Tarun Chawla +Todd Wong +Tomas Popela Trevor Hayes Uladzislau Susha -Vladimir Beloborodov Vicken Simonian Victor Costan +Vladimir Beloborodov Xiaohong Xu Xiaolei Yu Yura Yaroshevich Yuriy Pavlyshak -Hans Knoechel -Korniltsev Anatoly -Todd Wong -Sergio Garcia Murillo -Maxim Pavlov Yusuke Suzuki -Piasy Xu -Tomas Popela -Jan Grulich -Jiwon Kim -Eike Rathke -Michel Promonet -Min Wang -Ramprakash Jelari -CZ Theng -Miguel Paris -Raman Budny -Stephan Hartmann +# END individuals section. -&yet LLC <*@andyet.com> +# BEGIN organizations section. +8x8 Inc. <*@8x8.com> +8x8 Inc. <*@sip-communicator.org> Agora IO <*@agora.io> ARM Holdings <*@arm.com> BroadSoft Inc. <*@broadsoft.com> +CoSMo Software Consulting, Pte Ltd <*@cosmosoftware.io> Facebook Inc. <*@fb.com> Google Inc. <*@google.com> +Highfive, Inc. <*@highfive.com> HyperConnect Inc. <*@hpcnt.com> -Life On Air Inc. <*@lifeonair.com> Intel Corporation <*@intel.com> +Life On Air Inc. <*@lifeonair.com> Microsoft Corporation <*@microsoft.com> MIPS Technologies <*@mips.com> Mozilla Foundation <*@mozilla.com> +Netgem S.A. <*@netgem.com> NVIDIA Corporation <*@nvidia.com> Opera Software ASA <*@opera.com> Optical Tone Ltd <*@opticaltone.com> Pengutronix e.K. <*@pengutronix.de> RingCentral, Inc. <*@ringcentral.com> +Signal Messenger, LLC <*@signal.org> Sinch AB <*@sinch.com> struktur AG <*@struktur.de> Telenor Digital AS <*@telenor.com> Temasys Communications <*@temasys.io> The Chromium Authors <*@chromium.org> The WebRTC Authors <*@webrtc.org> +Threema GmbH <*@threema.ch> +Tuple, LLC <*@tuple.app> Twilio, Inc. <*@twilio.com> +Vewd Software AS <*@vewd.com> +Videona Socialmedia <*@videona.com> Videxio AS <*@videxio.com> Vidyo, Inc. <*@vidyo.com> Vonage Holdings Corp. <*@vonage.com> Wire Swiss GmbH <*@wire.com> -Vewd Software AS <*@vewd.com> -Highfive, Inc. <*@highfive.com> -CoSMo Software Consulting, Pte Ltd <*@cosmosoftware.io> -Tuple, LLC <*@tuple.app> -Videona Socialmedia <*@videona.com> -Threema GmbH <*@threema.ch> +&yet LLC <*@andyet.com> +# END organizations section. diff --git a/BUILD.gn b/BUILD.gn index f7d15f47a9..bc51df7c07 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -14,6 +14,7 @@ import("//build/config/linux/pkg_config.gni") import("//build/config/sanitizers/sanitizers.gni") +import("//third_party/google_benchmark/buildconfig.gni") import("webrtc.gni") if (rtc_enable_protobuf) { import("//third_party/protobuf/proto_library.gni") @@ -54,6 +55,7 @@ if (!build_with_chromium) { "modules/remote_bitrate_estimator:rtp_to_text", "modules/rtp_rtcp:test_packet_masks_metrics", "modules/video_capture:video_capture_internal_impl", + "net/dcsctp:dcsctp_unittests", "pc:peerconnection_unittests", "pc:rtc_pc_unittests", "rtc_tools:rtp_generator", @@ -88,6 +90,9 @@ if (!build_with_chromium) { ] } } + if (target_os == "android") { + deps += [ "tools_webrtc:binary_version_check" ] + } } } @@ -129,6 +134,14 @@ config("common_inherited_config") { defines += [ "RTC_DISABLE_CHECK_MSG" ] } + if (rtc_enable_avx2) { + defines += [ "WEBRTC_ENABLE_AVX2" ] + } + + if (rtc_enable_win_wgc) { + defines += [ "RTC_ENABLE_WIN_WGC" ] + } + # Some tests need to declare their own trace event handlers. If this define is # not set, the first time TRACE_EVENT_* is called it will store the return # value for the current handler in an static variable, so that subsequent @@ -169,7 +182,7 @@ config("common_inherited_config") { "WEBRTC_IOS", ] } - if (is_linux) { + if (is_linux || is_chromeos) { defines += [ "WEBRTC_LINUX" ] } if (is_mac) { @@ -254,7 +267,7 @@ config("common_config") { } if (rtc_enable_sctp) { - defines += [ "HAVE_SCTP" ] + defines += [ "WEBRTC_HAVE_SCTP" ] } if (rtc_enable_external_auth) { @@ -265,6 +278,10 @@ config("common_config") { defines += [ "WEBRTC_USE_H264" ] } + if (rtc_use_absl_mutex) { + defines += [ "WEBRTC_ABSL_MUTEX" ] + } + if (rtc_disable_logging) { defines += [ "RTC_DISABLE_LOGGING" ] } @@ -337,6 +354,13 @@ config("common_config") { # recognize. cflags += [ "-Wunused-lambda-capture" ] } + + if (use_xcode_clang) { + # This may be removed if the clang version in xcode > 12.4 includes the + # fix https://reviews.llvm.org/D73007. + # https://bugs.llvm.org/show_bug.cgi?id=44556 + cflags += [ "-Wno-range-loop-analysis" ] + } } if (is_win && !is_clang) { @@ -410,11 +434,7 @@ config("common_config") { } config("common_objc") { - libs = [ "Foundation.framework" ] - - if (rtc_use_metal_rendering) { - defines = [ "RTC_SUPPORTS_METAL" ] - } + frameworks = [ "Foundation.framework" ] } if (!build_with_chromium) { @@ -432,7 +452,6 @@ if (!build_with_chromium) { defines = [] deps = [ - ":webrtc_common", "api:create_peerconnection_factory", "api:libjingle_peerconnection_api", "api:rtc_error", @@ -497,6 +516,10 @@ if (!build_with_chromium) { rtc_executable("webrtc_lib_link_test") { testonly = true + # This target is used for checking to link, so do not check dependencies + # on gn check. + check_includes = false # no-presubmit-check TODO(bugs.webrtc.org/12785) + sources = [ "webrtc_lib_link_test.cc" ] deps = [ # NOTE: Don't add deps here. If this test fails to link, it means you @@ -507,15 +530,6 @@ if (!build_with_chromium) { } } -rtc_source_set("webrtc_common") { - # Client code SHOULD NOT USE THIS TARGET, but for now it needs to be public - # because there exists client code that uses it. - # TODO(bugs.webrtc.org/9808): Move to private visibility as soon as that - # client code gets updated. - visibility = [ "*" ] - sources = [ "common_types.h" ] -} - if (use_libfuzzer || use_afl) { # This target is only here for gn to discover fuzzer build targets under # webrtc/test/fuzzers/. @@ -525,22 +539,24 @@ if (use_libfuzzer || use_afl) { } } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_test("rtc_unittests") { testonly = true deps = [ - ":webrtc_common", "api:compile_all_headers", "api:rtc_api_unittests", "api/audio/test:audio_api_unittests", "api/audio_codecs/test:audio_codecs_api_unittests", + "api/numerics:numerics_unittests", "api/transport:stun_unittest", "api/video/test:rtc_api_video_unittests", "api/video_codecs/test:video_codecs_api_unittests", + "api/voip:compile_all_headers", "call:fake_network_pipe_unittests", "p2p:libstunprober_unittests", "p2p:rtc_p2p_unittests", + "rtc_base:callback_list_unittests", "rtc_base:rtc_base_approved_unittests", "rtc_base:rtc_base_unittests", "rtc_base:rtc_json_unittests", @@ -548,9 +564,10 @@ if (rtc_include_tests) { "rtc_base:rtc_operations_chain_unittests", "rtc_base:rtc_task_queue_unittests", "rtc_base:sigslot_unittest", + "rtc_base:untyped_function_unittest", "rtc_base:weak_ptr_unittests", "rtc_base/experiments:experiments_unittests", - "rtc_base/synchronization:sequence_checker_unittests", + "rtc_base/system:file_wrapper_unittests", "rtc_base/task_utils:pending_task_safety_flag_unittests", "rtc_base/task_utils:to_queued_task_unittests", "sdk:sdk_tests", @@ -580,6 +597,16 @@ if (rtc_include_tests) { } } + if (enable_google_benchmarks) { + rtc_test("benchmarks") { + testonly = true + deps = [ + "rtc_base/synchronization:mutex_benchmark", + "test:benchmark_main", + ] + } + } + # This runs tests that must run in real time and therefore can take some # time to execute. They are in a separate executable to avoid making the # regular unittest suite too slow to run frequently. @@ -687,6 +714,7 @@ if (rtc_include_tests) { rtc_test("voip_unittests") { testonly = true deps = [ + "api/voip:compile_all_headers", "api/voip:voip_engine_factory_unittests", "audio/voip/test:audio_channel_unittests", "audio/voip/test:audio_egress_unittests", diff --git a/DEPS b/DEPS index c1875dba2a..c24608a98a 100644 --- a/DEPS +++ b/DEPS @@ -1,44 +1,49 @@ # This file contains dependencies for WebRTC. gclient_gn_args_file = 'src/build/config/gclient_args.gni' -gclient_gn_args = [] +gclient_gn_args = [ + 'generate_location_tags', +] vars = { # By default, we should check out everything needed to run on the main # chromium waterfalls. More info at: crbug.com/570091. 'checkout_configuration': 'default', 'checkout_instrumented_libraries': 'checkout_linux and checkout_configuration == "default"', - 'chromium_revision': '0cd53b97fa7b20c9e008c29b2760c1a9fce5117d', + 'chromium_revision': '6d8828f6a6eea769a05fa1c0b7acf10aca631d4a', + + # Keep the Chromium default of generating location tags. + 'generate_location_tags': True, } deps = { # TODO(kjellander): Move this to be Android-only once the libevent dependency # in base/third_party/libevent is solved. 'src/base': - 'https://chromium.googlesource.com/chromium/src/base@234742ffa00d458eaa1f6918942c8845cd4e506f', + 'https://chromium.googlesource.com/chromium/src/base@e1acc6a30942360d4789d6c245cf7933e7e9bbec', 'src/build': - 'https://chromium.googlesource.com/chromium/src/build@814cd0c4415975d9779b9a1eb291f05aac05a98b', + 'https://chromium.googlesource.com/chromium/src/build@826926008327af276adbaafcfa92b525eb5bf326', 'src/buildtools': - 'https://chromium.googlesource.com/chromium/src/buildtools@64ebbe62e1288f58f6c95c9335ae30e8b811dab5', - # Gradle 4.3-rc4. Used for testing Android Studio project generation for WebRTC. + 'https://chromium.googlesource.com/chromium/src/buildtools@2500c1d8f3a20a66a7cbafe3f69079a2edb742dd', + # Gradle 6.6.1. Used for testing Android Studio project generation for WebRTC. 'src/examples/androidtests/third_party/gradle': { - 'url': 'https://chromium.googlesource.com/external/github.com/gradle/gradle.git@89af43c4d0506f69980f00dde78c97b2f81437f8', + 'url': 'https://chromium.googlesource.com/external/github.com/gradle/gradle.git@f2d1fb54a951d8b11d25748e4711bec8d128d7e3', 'condition': 'checkout_android', }, 'src/ios': { - 'url': 'https://chromium.googlesource.com/chromium/src/ios@c3b92e28640c20e4de456f1097a2139416355c28', + 'url': 'https://chromium.googlesource.com/chromium/src/ios@695a3541172406518e45c377048956a3e5270d7c', 'condition': 'checkout_ios', }, 'src/testing': - 'https://chromium.googlesource.com/chromium/src/testing@6bd825eb6a99d232ab7300c91c7d0515ff418708', + 'https://chromium.googlesource.com/chromium/src/testing@d749d1b98b475ea15face1c9d2311ed6b8e4b91f', 'src/third_party': - 'https://chromium.googlesource.com/chromium/src/third_party@52a7c4e5e09a39a478900f4a861f59d65cb28a9a', + 'https://chromium.googlesource.com/chromium/src/third_party@c1d40d8b399db4c5ebab5e5022a002dca5b3dbb2', 'src/buildtools/linux64': { 'packages': [ { 'package': 'gn/gn/linux-amd64', - 'version': 'git_revision:ab32747ae7a399c57b04280f38e49b8fdf237a8a', + 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', } ], 'dep_type': 'cipd', @@ -47,8 +52,8 @@ deps = { 'src/buildtools/mac': { 'packages': [ { - 'package': 'gn/gn/mac-amd64', - 'version': 'git_revision:ab32747ae7a399c57b04280f38e49b8fdf237a8a', + 'package': 'gn/gn/mac-${{arch}}', + 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', } ], 'dep_type': 'cipd', @@ -58,7 +63,7 @@ deps = { 'packages': [ { 'package': 'gn/gn/windows-amd64', - 'version': 'git_revision:ab32747ae7a399c57b04280f38e49b8fdf237a8a', + 'version': 'git_revision:24e2f7df92641de0351a96096fb2c490b2436bb8', } ], 'dep_type': 'cipd', @@ -66,13 +71,13 @@ deps = { }, 'src/buildtools/clang_format/script': - 'https://chromium.googlesource.com/chromium/llvm-project/cfe/tools/clang-format.git@96636aa0e9f047f17447f2d45a094d0b59ed7917', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/clang/tools/clang-format.git@99803d74e35962f63a775f29477882afd4d57d94', 'src/buildtools/third_party/libc++/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@d9040c75cfea5928c804ab7c235fed06a63f743a', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx.git@79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'src/buildtools/third_party/libc++abi/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@196ba1aaa8ac285d94f4ea8d9836390a45360533', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi.git@cb34896ebd62f93f708ff9aad26159cf11dde6f4', 'src/buildtools/third_party/libunwind/trunk': - 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@d999d54f4bca789543a2eb6c995af2d9b5a1f3ed', + 'https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind.git@e7ac0f84fc2f2f8bd2ad151a7348e7120d77648a', 'src/tools/clang/dsymutil': { 'packages': [ @@ -89,7 +94,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_system_sdk', - 'version': '4IAlMU4jo15KjMPF3EUnrPZs0RYoPW8n9jSJ4dvHDWUC', + 'version': 'no8ss5nRg6uYDM08HboypuIQuix7bS1kVqRGyWmwP-YC', }, ], 'condition': 'checkout_android', @@ -111,7 +116,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_tools_bundletool', - 'version': 'Yyhy8FicC3R0ATRzWqGNh4ffsCLz_0nu_BjmNPAdhvIC', + 'version': 'FIj7ed-law2zMv41QhjEXabkaJ7aN2ztmE0Tv3Z_gFUC', }, ], 'condition': 'checkout_android', @@ -119,20 +124,22 @@ deps = { }, 'src/third_party/boringssl/src': - 'https://boringssl.googlesource.com/boringssl.git@78b3337a10a7f7b3495b6cb8140a74e265290898', + 'https://boringssl.googlesource.com/boringssl.git@a10017c548b0805eb98e7847c37370dbd37cd8d6', 'src/third_party/breakpad/breakpad': - 'https://chromium.googlesource.com/breakpad/breakpad.git@2ffe116322aa4373d408a72b665fa7fe7a504d4a', + 'https://chromium.googlesource.com/breakpad/breakpad.git@b95c4868b10f69e642666742233aede1eb653012', 'src/third_party/catapult': - 'https://chromium.googlesource.com/catapult.git@503f81b8fe09ed0850144493713ad66bd72620fd', + 'https://chromium.googlesource.com/catapult.git@3345f09ed65020a999e108ea37d30b49c87e14ed', 'src/third_party/ced/src': { 'url': 'https://chromium.googlesource.com/external/github.com/google/compact_enc_det.git@ba412eaaacd3186085babcd901679a48863c7dd5', }, 'src/third_party/colorama/src': 'https://chromium.googlesource.com/external/colorama.git@799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', + 'src/third_party/crc32c/src': + 'https://chromium.googlesource.com/external/github.com/google/crc32c.git@fa5ade41ee480003d9c5af6f43567ba22e4e17e6', 'src/third_party/depot_tools': - 'https://chromium.googlesource.com/chromium/tools/depot_tools.git@5a7be3da2119b1a8614fed5609412638693f7025', + 'https://chromium.googlesource.com/chromium/tools/depot_tools.git@a806594b95a39141fdbf1f359087a44ffb2deaaf', 'src/third_party/ffmpeg': - 'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@be66dc5fd0e3c53646107b2dc5d7594a869ebdc6', + 'https://chromium.googlesource.com/chromium/third_party/ffmpeg.git@05c195662f0527913811827ba253cb93758ea4c0', 'src/third_party/findbugs': { 'url': 'https://chromium.googlesource.com/chromium/deps/findbugs.git@4275d9ac8610db6b1bc9a5e887f97e41b33fac67', 'condition': 'checkout_android', @@ -143,12 +150,15 @@ deps = { 'condition': 'checkout_linux', }, 'src/third_party/freetype/src': - 'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@62fea391fa9993f8c1d206a50080d690178ce518', + 'https://chromium.googlesource.com/chromium/src/third_party/freetype2.git@d3dc2da9b27af5b90575d62989389cc65fe7977c', 'src/third_party/harfbuzz-ng/src': - 'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@100d40c827eb8336b2b671856f151275d47e71ad', + 'https://chromium.googlesource.com/external/github.com/harfbuzz/harfbuzz.git@cc9bb294919e846ef8a0731b5e9f304f95ef3bb8', + 'src/third_party/google_benchmark/src': { + 'url': 'https://chromium.googlesource.com/external/github.com/google/benchmark.git@e991355c02b93fe17713efe04cbc2e278e00fdbd', + }, # WebRTC-only dependency (not present in Chromium). 'src/third_party/gtest-parallel': - 'https://chromium.googlesource.com/external/github.com/google/gtest-parallel@df0b4e476f98516cea7d593e5dbb0fca44f6ee7f', + 'https://chromium.googlesource.com/external/github.com/google/gtest-parallel@11cce5c2872be4849c087afc7d19fbed390fa928', 'src/third_party/google-truth': { 'packages': [ { @@ -160,26 +170,32 @@ deps = { 'dep_type': 'cipd', }, 'src/third_party/googletest/src': - 'https://chromium.googlesource.com/external/github.com/google/googletest.git@a09ea700d32bab83325aff9ff34d0582e50e3997', + 'https://chromium.googlesource.com/external/github.com/google/googletest.git@4ec4cd23f486bf70efcc5d2caa40f24368f752e3', 'src/third_party/icu': { - 'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@f2223961702f00a8833874b0560d615a2cc42738', + 'url': 'https://chromium.googlesource.com/chromium/deps/icu.git@b9dfc58bf9b02ea0365509244aca13841322feb0', }, 'src/third_party/jdk': { 'packages': [ { 'package': 'chromium/third_party/jdk', - 'version': 'PfRSnxe8Od6WU4zBXomq-zsgcJgWmm3z4gMQNB-r2QcC', + 'version': 'JhpgSvTpgVUkoKe56yQmYaR1jXNcY8NqlltA0mKIO4EC', }, + ], + 'condition': 'host_os == "linux" and checkout_android', + 'dep_type': 'cipd', + }, + 'src/third_party/jdk/extras': { + 'packages': [ { 'package': 'chromium/third_party/jdk/extras', - 'version': 'fkhuOQ3r-zKtWEdKplpo6k0vKkjl-LY_rJTmtzFCQN4C', + 'version': '-7m_pvgICYN60yQI3qmTj_8iKjtnT4NXicT0G_jJPqsC', }, - ], + ], 'condition': 'host_os == "linux" and checkout_android', 'dep_type': 'cipd', }, 'src/third_party/jsoncpp/source': - 'https://chromium.googlesource.com/external/github.com/open-source-parsers/jsoncpp.git@645250b6690785be60ab6780ce4b58698d884d11', # from svn 248 + 'https://chromium.googlesource.com/external/github.com/open-source-parsers/jsoncpp.git@9059f5cad030ba11d37818847443a53918c327b1', # from svn 248 'src/third_party/junit/src': { 'url': 'https://chromium.googlesource.com/external/junit.git@64155f8a9babcfcf4263cf4d08253a1556e75481', 'condition': 'checkout_android', @@ -188,21 +204,23 @@ deps = { 'src/third_party/libFuzzer/src': 'https://chromium.googlesource.com/chromium/llvm-project/compiler-rt/lib/fuzzer.git@debe7d2d1982e540fbd6bd78604bf001753f9e74', 'src/third_party/libjpeg_turbo': - 'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@7e3ad79800a7945fb37173149842b494ab8982b2', + 'https://chromium.googlesource.com/chromium/deps/libjpeg_turbo.git@e9e400e0af31baf72d235655850bc00e55b6c145', 'src/third_party/libsrtp': - 'https://chromium.googlesource.com/chromium/deps/libsrtp.git@650611720ecc23e0e6b32b0e3100f8b4df91696c', + 'https://chromium.googlesource.com/chromium/deps/libsrtp.git@5b7c744eb8310250ccc534f3f86a2015b3887a0a', 'src/third_party/libaom/source/libaom': - 'https://aomedia.googlesource.com/aom.git@c810066815b80dd1ac8ade15170ce962d6646368', + 'https://aomedia.googlesource.com/aom.git@aba245dde334bd51a20940eb009fa46b6ffd4511', 'src/third_party/libunwindstack': { - 'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@dfd3f3d84cfc222af93bc86b276414fc690977da', + 'url': 'https://chromium.googlesource.com/chromium/src/third_party/libunwindstack.git@b34a0059a648f179ef05da2c0927f564bdaea2b3', 'condition': 'checkout_android', }, + 'src/third_party/perfetto': + 'https://android.googlesource.com/platform/external/perfetto.git@aecbd80f576686b67e29bdfae8c9c03bb9ce1996', 'src/third_party/libvpx/source/libvpx': - 'https://chromium.googlesource.com/webm/libvpx.git@1243d2fc27b1cad8863ac2d688b4fde71a80a74d', + 'https://chromium.googlesource.com/webm/libvpx.git@eebc5cd487a89c51ba148f6d6ac45779970f72d7', 'src/third_party/libyuv': - 'https://chromium.googlesource.com/libyuv/libyuv.git@6afd9becdf58822b1da6770598d8597c583ccfad', + 'https://chromium.googlesource.com/libyuv/libyuv.git@49ebc996aa8c4bdf89c1b5ea461eb677234c61cc', 'src/third_party/lss': { - 'url': 'https://chromium.googlesource.com/linux-syscall-support.git@f70e2f1641e280e777edfdad7f73a2cfa38139c7', + 'url': 'https://chromium.googlesource.com/linux-syscall-support.git@92a65a8f5d705d1928874420c8d0d15bde8c89e5', 'condition': 'checkout_android or checkout_linux', }, 'src/third_party/mockito/src': { @@ -212,16 +230,16 @@ deps = { # Used by boringssl. 'src/third_party/nasm': { - 'url': 'https://chromium.googlesource.com/chromium/deps/nasm.git@4fa54ca5f7fc3a15a8c78ac94688e64d3e4e4fa1' + 'url': 'https://chromium.googlesource.com/chromium/deps/nasm.git@e9be5fd6d723a435ca2da162f9e0ffcb688747c1' }, 'src/third_party/openh264/src': - 'https://chromium.googlesource.com/external/github.com/cisco/openh264@a5473711f3e20c6bd1c33d81b6c7b9a0618aa18f', + 'https://chromium.googlesource.com/external/github.com/cisco/openh264@3dd5b80bc4f172dd82925bb259cb7c82348409c5', 'src/third_party/r8': { 'packages': [ { 'package': 'chromium/third_party/r8', - 'version': 'UAycWqc5QfELtJhhnoU4jQHjsyxPjRNyZ0EfvlojaY4C', + 'version': 'Nu_mvQJe34CotIXadFlA3w732CJ9EvQGuVs4udcZedAC', }, ], 'condition': 'checkout_android', @@ -238,11 +256,7 @@ deps = { 'dep_type': 'cipd', }, 'src/third_party/requests/src': { - 'url': 'https://chromium.googlesource.com/external/github.com/kennethreitz/requests.git@f172b30356d821d180fa4ecfa3e71c7274a32de4', - 'condition': 'checkout_android', - }, - 'src/third_party/robolectric/robolectric': { - 'url': 'https://chromium.googlesource.com/external/robolectric.git@f2df0efb033bb402399ebfb9bf58aefee5cced05', + 'url': 'https://chromium.googlesource.com/external/github.com/kennethreitz/requests.git@refs/tags/v2.23.0', 'condition': 'checkout_android', }, 'src/third_party/ub-uiautomator/lib': { @@ -250,16 +264,16 @@ deps = { 'condition': 'checkout_android', }, 'src/third_party/usrsctp/usrsctplib': - 'https://chromium.googlesource.com/external/github.com/sctplab/usrsctp@a8c51df76caae94254b1e59999405f739467490e', + 'https://chromium.googlesource.com/external/github.com/sctplab/usrsctp@1ade45cbadfd19298d2c47dc538962d4425ad2dd', # Dependency used by libjpeg-turbo. 'src/third_party/yasm/binaries': { 'url': 'https://chromium.googlesource.com/chromium/deps/yasm/binaries.git@52f9b3f4b0aa06da24ef8b123058bb61ee468881', 'condition': 'checkout_win', }, 'src/tools': - 'https://chromium.googlesource.com/chromium/src/tools@5a602c7ac7403049a5f17c480e9e27dc8945b8b4', + 'https://chromium.googlesource.com/chromium/src/tools@1a00526b21d46b8b86f13add37003fd33885f32b', 'src/tools/swarming_client': - 'https://chromium.googlesource.com/infra/luci/client-py.git@7e8636295f4763a5f5aace1e700b17a4af3c81b7', + 'https://chromium.googlesource.com/infra/luci/client-py.git@a32a1607f6093d338f756c7e7c7b4333b0c50c9c', 'src/third_party/accessibility_test_framework': { 'packages': [ @@ -331,18 +345,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/espresso', - 'version': 'c92dcfc4e894555a0b3c309f2b7939640eb1fee4', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/guava': { - 'packages': [ - { - 'package': 'chromium/third_party/guava', - 'version': 'y8Zx7cKTiOunLhOrfC4hOt5kDQrLJ_Rq7ISDmXkPdYsC', + 'version': 'y8fIfH8Leo2cPm7iGCYnBxZpwOlgLv8rm2mlcmJlvGsC', }, ], 'condition': 'checkout_android', @@ -361,19 +364,30 @@ deps = { }, 'src/third_party/android_ndk': { - 'url': 'https://chromium.googlesource.com/android_ndk.git@27c0a8d090c666a50e40fceb4ee5b40b1a2d3f87', + 'url': 'https://chromium.googlesource.com/android_ndk.git@401019bf85744311b26c88ced255cd53401af8b7', 'condition': 'checkout_android', }, + 'src/third_party/androidx': { + 'packages': [ + { + 'package': 'chromium/third_party/androidx', + 'version': '-umIXLPTAdxRy2iaK4QFSeOf4t7PAKglJP7ggvWhfRwC', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + 'src/third_party/android_sdk/public': { 'packages': [ { - 'package': 'chromium/third_party/android_sdk/public/build-tools/29.0.2', - 'version': 'n-b1Qd7iFb8qzHlr1C_jIeu070UDgO_BwePtH42UqGcC', + 'package': 'chromium/third_party/android_sdk/public/build-tools/30.0.1', + 'version': '8LZujEmLjSh0g3JciDA3cslSptxKs9HOa_iUPXkOeYQC', }, { 'package': 'chromium/third_party/android_sdk/public/emulator', - 'version': 'f4WdgkPvDdVCE8zBWPzcSIj4N9WFhKp3CSKDWylXuLEC', + 'version': 'A4EvXZUIuQho0QRDJopMUpgyp6NA3aiDQjGKPUKbowMC', }, { 'package': 'chromium/third_party/android_sdk/public/extras', @@ -385,11 +399,11 @@ deps = { }, { 'package': 'chromium/third_party/android_sdk/public/platform-tools', - 'version': 'zMVtBEihXp2Z0NYFNjLLmNrwy6252b_YWG6sh2l0QAcC', + 'version': '8tF0AOj7Dwlv4j7_nfkhxWB0jzrvWWYjEIpirt8FIWYC', }, { - 'package': 'chromium/third_party/android_sdk/public/platforms/android-29', - 'version': 'yb33klKQV9UzzB-lDSsq36vzhTXOUZ2aRONBvPGwvdcC', + 'package': 'chromium/third_party/android_sdk/public/platforms/android-30', + 'version': 'YMUu9EHNZ__2Xcxl-KsaSf-dI5TMt_P62IseUVsxktMC', }, { 'package': 'chromium/third_party/android_sdk/public/sources/android-29', @@ -397,7 +411,7 @@ deps = { }, { 'package': 'chromium/third_party/android_sdk/public/cmdline-tools', - 'version': 'CR25ixsRhwuRnhdgDpGFyl9S0C_0HO9SUgFrwX46zq8C', + 'version': 'V__2Ycej-H2-6AcXX5A3gi7sIk74SuN44PBm2uC_N1sC', }, ], 'condition': 'checkout_android', @@ -437,17 +451,6 @@ deps = { 'dep_type': 'cipd', }, - 'src/third_party/androidx': { - 'packages': [ - { - 'package': 'chromium/third_party/androidx', - 'version': 'BgU0HKOH7unGo87kXkIKJlPMmaSOCFhvUKcIr9aborwC', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - 'src/third_party/sqlite4java': { 'packages': [ { @@ -463,23 +466,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/turbine', - 'version': '3UJ600difG3ThRhtYrN9AfZ5kh8wCYtBiii1-NMlCrMC', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/turbine/src': { - 'url': 'https://chromium.googlesource.com/external/github.com/google/turbine.git' + '@' + '95f6fb6f1e962e8b6ec672905b0b04233f002dc2', - 'condition': 'checkout_android', - }, - - 'src/third_party/xstream': { - 'packages': [ - { - 'package': 'chromium/third_party/xstream', - 'version': '4278b1b78b86ab7a1a29e64d5aec9a47a9aab0fe', + 'version': 'Om6yIEXgJxuqghErK29h9RcMH6VaymMbxwScwXmcN6EC', }, ], 'condition': 'checkout_android', @@ -490,20 +477,34 @@ deps = { 'packages': [ { 'package': 'infra/tools/luci/isolate/${{platform}}', - 'version': 'git_revision:56ae79476e3caf14da59d75118408aa778637936', + 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', }, { 'package': 'infra/tools/luci/isolated/${{platform}}', - 'version': 'git_revision:56ae79476e3caf14da59d75118408aa778637936', + 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', }, { 'package': 'infra/tools/luci/swarming/${{platform}}', - 'version': 'git_revision:56ae79476e3caf14da59d75118408aa778637936', + 'version': 'git_revision:2ac8bd9cbc20824bb04a39b0f1b77178ace930b3', }, ], 'dep_type': 'cipd', }, + # TODO(crbug.com/1184780) Move this back to ANDROID_DEPS Generated Code + # section once org_robolectric_shadows_multidex is updated to a new version + # that does not need jetify. + 'src/third_party/android_deps/libs/org_robolectric_shadows_multidex': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_multidex', + 'version': 'version:4.3.1-cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + # Everything coming after this is automatically updated by the auto-roller. # === ANDROID_DEPS Generated Code Start === # Generated by //third_party/android_deps/fetch_all.py @@ -511,7 +512,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_core_common', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -522,7 +523,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_core_runtime', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -533,7 +534,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_common', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -544,7 +545,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_common_java8', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -555,7 +556,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_livedata', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -566,7 +567,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_livedata_core', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -577,7 +578,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_runtime', - 'version': 'version:1.1.1-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', @@ -588,909 +589,194 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/android_arch_lifecycle_viewmodel', - 'version': 'version:1.1.1-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_activity_activity': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_activity_activity', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_annotation_annotation': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_annotation_annotation', - 'version': 'version:1.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_appcompat_appcompat': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_appcompat_appcompat', - 'version': 'version:1.2.0-beta01-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_appcompat_appcompat_resources': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_appcompat_appcompat_resources', - 'version': 'version:1.2.0-beta01-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_arch_core_core_common': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_arch_core_core_common', - 'version': 'version:2.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_arch_core_core_runtime': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_arch_core_core_runtime', - 'version': 'version:2.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_asynclayoutinflater_asynclayoutinflater': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_asynclayoutinflater_asynclayoutinflater', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_cardview_cardview': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_cardview_cardview', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_collection_collection': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_collection_collection', - 'version': 'version:1.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_concurrent_concurrent_futures': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_concurrent_concurrent_futures', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_coordinatorlayout_coordinatorlayout': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_coordinatorlayout_coordinatorlayout', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_core_core': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_core_core', - 'version': 'version:1.3.0-beta01-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_cursoradapter_cursoradapter': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_cursoradapter_cursoradapter', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_customview_customview': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_customview_customview', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_documentfile_documentfile': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_documentfile_documentfile', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_drawerlayout_drawerlayout': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_drawerlayout_drawerlayout', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_fragment_fragment': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_fragment_fragment', - 'version': 'version:1.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_gridlayout_gridlayout': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_gridlayout_gridlayout', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_interpolator_interpolator': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_interpolator_interpolator', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_leanback_leanback': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_leanback_leanback', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_leanback_leanback_preference': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_leanback_leanback_preference', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_legacy_legacy_preference_v14': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_legacy_legacy_preference_v14', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_legacy_legacy_support_core_ui': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_legacy_legacy_support_core_ui', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_legacy_legacy_support_core_utils': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_legacy_legacy_support_core_utils', - 'version': 'version:1.0.0-cr0', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/androidx_legacy_legacy_support_v13': { + 'src/third_party/android_deps/libs/backport_util_concurrent_backport_util_concurrent': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/androidx_legacy_legacy_support_v13', - 'version': 'version:1.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/backport_util_concurrent_backport_util_concurrent', + 'version': 'version:2@3.1.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/androidx_legacy_legacy_support_v4': { + 'src/third_party/android_deps/libs/classworlds_classworlds': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/androidx_legacy_legacy_support_v4', - 'version': 'version:1.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/classworlds_classworlds', + 'version': 'version:2@1.1-alpha-2.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/androidx_lifecycle_lifecycle_common': { + 'src/third_party/android_deps/libs/com_android_support_animated_vector_drawable': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/androidx_lifecycle_lifecycle_common', - 'version': 'version:2.1.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_animated_vector_drawable', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/androidx_lifecycle_lifecycle_common_java8': { + 'src/third_party/android_deps/libs/com_android_support_appcompat_v7': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/androidx_lifecycle_lifecycle_common_java8', - 'version': 'version:2.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_lifecycle_lifecycle_livedata': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_lifecycle_lifecycle_livedata', - 'version': 'version:2.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_lifecycle_lifecycle_livedata_core': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_lifecycle_lifecycle_livedata_core', - 'version': 'version:2.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_lifecycle_lifecycle_runtime': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_lifecycle_lifecycle_runtime', - 'version': 'version:2.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_lifecycle_lifecycle_viewmodel': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_lifecycle_lifecycle_viewmodel', - 'version': 'version:2.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_loader_loader': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_loader_loader', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_localbroadcastmanager_localbroadcastmanager': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_localbroadcastmanager_localbroadcastmanager', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_media_media': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_media_media', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_mediarouter_mediarouter': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_mediarouter_mediarouter', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_multidex_multidex': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_multidex_multidex', - 'version': 'version:2.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_palette_palette': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_palette_palette', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_preference_preference': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_preference_preference', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_print_print': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_print_print', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_recyclerview_recyclerview': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_recyclerview_recyclerview', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_savedstate_savedstate': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_savedstate_savedstate', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_slidingpanelayout_slidingpanelayout': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_slidingpanelayout_slidingpanelayout', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_swiperefreshlayout_swiperefreshlayout': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_swiperefreshlayout_swiperefreshlayout', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_core': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_core', - 'version': 'version:1.2.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_espresso_espresso_core': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_espresso_espresso_core', - 'version': 'version:3.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_espresso_espresso_idling_resource': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_espresso_espresso_idling_resource', - 'version': 'version:3.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_ext_junit': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_ext_junit', - 'version': 'version:1.1.1-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_monitor': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_monitor', - 'version': 'version:1.2.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_rules': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_rules', - 'version': 'version:1.2.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_runner': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_runner', - 'version': 'version:1.2.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_test_uiautomator_uiautomator': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_test_uiautomator_uiautomator', - 'version': 'version:2.2.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_transition_transition': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_transition_transition', - 'version': 'version:1.0.0-rc02-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_tvprovider_tvprovider': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_tvprovider_tvprovider', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_vectordrawable_vectordrawable': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_vectordrawable_vectordrawable', - 'version': 'version:1.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_vectordrawable_vectordrawable_animated': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_vectordrawable_vectordrawable_animated', - 'version': 'version:1.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_versionedparcelable_versionedparcelable': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_versionedparcelable_versionedparcelable', - 'version': 'version:1.1.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/androidx_viewpager_viewpager': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/androidx_viewpager_viewpager', - 'version': 'version:1.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/backport_util_concurrent_backport_util_concurrent': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/backport_util_concurrent_backport_util_concurrent', - 'version': 'version:3.1-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/classworlds_classworlds': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/classworlds_classworlds', - 'version': 'version:1.1-alpha-2-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_animated_vector_drawable': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_animated_vector_drawable', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_appcompat_v7': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_appcompat_v7', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_asynclayoutinflater': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_asynclayoutinflater', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_cardview_v7': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_cardview_v7', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_collections': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_collections', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_coordinatorlayout': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_coordinatorlayout', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_cursoradapter': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_cursoradapter', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_customview': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_customview', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_design': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_design', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_appcompat_v7', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_documentfile': { + 'src/third_party/android_deps/libs/com_android_support_asynclayoutinflater': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_documentfile', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_asynclayoutinflater', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_drawerlayout': { + 'src/third_party/android_deps/libs/com_android_support_cardview_v7': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_drawerlayout', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_cardview_v7', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_gridlayout_v7': { + 'src/third_party/android_deps/libs/com_android_support_collections': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_gridlayout_v7', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_collections', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_interpolator': { + 'src/third_party/android_deps/libs/com_android_support_coordinatorlayout': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_interpolator', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_coordinatorlayout', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_leanback_v17': { + 'src/third_party/android_deps/libs/com_android_support_cursoradapter': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_leanback_v17', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_cursoradapter', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_loader': { + 'src/third_party/android_deps/libs/com_android_support_customview': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_loader', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_customview', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_localbroadcastmanager': { + 'src/third_party/android_deps/libs/com_android_support_design': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_localbroadcastmanager', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_design', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_mediarouter_v7': { + 'src/third_party/android_deps/libs/com_android_support_documentfile': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_mediarouter_v7', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_documentfile', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_multidex': { + 'src/third_party/android_deps/libs/com_android_support_drawerlayout': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_multidex', - 'version': 'version:1.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_drawerlayout', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_palette_v7': { + 'src/third_party/android_deps/libs/com_android_support_interpolator': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_palette_v7', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_interpolator', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_preference_leanback_v17': { + 'src/third_party/android_deps/libs/com_android_support_loader': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_preference_leanback_v17', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_loader', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_preference_v14': { + 'src/third_party/android_deps/libs/com_android_support_localbroadcastmanager': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_preference_v14', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_localbroadcastmanager', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_support_preference_v7': { + 'src/third_party/android_deps/libs/com_android_support_multidex': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_preference_v7', - 'version': 'version:28.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_support_multidex', + 'version': 'version:2@1.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1501,7 +787,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_print', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1512,7 +798,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_recyclerview_v7', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1523,7 +809,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_slidingpanelayout', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1534,7 +820,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_annotations', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1545,7 +831,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_compat', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1556,7 +842,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_core_ui', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1567,7 +853,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_core_utils', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1578,7 +864,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_fragment', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1589,18 +875,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_media_compat', - 'version': 'version:28.0.0-cr0', - }, - ], - 'condition': 'checkout_android', - 'dep_type': 'cipd', - }, - - 'src/third_party/android_deps/libs/com_android_support_support_v13': { - 'packages': [ - { - 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_v13', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1611,7 +886,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_v4', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1622,7 +897,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_support_vector_drawable', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1633,7 +908,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_swiperefreshlayout', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1644,7 +919,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_transition', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1655,7 +930,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_versionedparcelable', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1666,40 +941,62 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_android_support_viewpager', - 'version': 'version:28.0.0-cr0', + 'version': 'version:2@28.0.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_android_tools_common': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_android_tools_common', + 'version': 'version:2@30.0.0-alpha10.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_tools_build_jetifier_jetifier_core': { + 'src/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_tools_build_jetifier_jetifier_core', - 'version': 'version:1.0.0-beta08-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_tools_build_jetifier_jetifier_processor': { + 'src/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs_configuration': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_tools_build_jetifier_jetifier_processor', - 'version': 'version:1.0.0-beta08-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs_configuration', + 'version': 'version:2@1.1.1.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs': { + 'src/third_party/android_deps/libs/com_android_tools_layoutlib_layoutlib_api': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/com_android_tools_desugar_jdk_libs', - 'version': 'version:1.0.5-cr0', + 'package': 'chromium/third_party/android_deps/libs/com_android_tools_layoutlib_layoutlib_api', + 'version': 'version:2@30.0.0-alpha10.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_android_tools_sdk_common': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_android_tools_sdk_common', + 'version': 'version:2@30.0.0-alpha10.cr0', }, ], 'condition': 'checkout_android', @@ -1710,7 +1007,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_github_ben_manes_caffeine_caffeine', - 'version': 'version:2.7.0-cr0', + 'version': 'version:2@2.8.8.cr0', }, ], 'condition': 'checkout_android', @@ -1721,7 +1018,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_github_kevinstern_software_and_algorithms', - 'version': 'version:1.0-cr0', + 'version': 'version:2@1.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_android_datatransport_transport_api': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_android_datatransport_transport_api', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -1732,7 +1040,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1743,7 +1051,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_api_phone', - 'version': 'version:17.1.0-cr0', + 'version': 'version:2@17.5.0.cr0', }, ], 'condition': 'checkout_android', @@ -1754,7 +1062,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_base', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1765,7 +1073,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_base', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.5.0.cr0', }, ], 'condition': 'checkout_android', @@ -1776,7 +1084,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.5.0.cr0', }, ], 'condition': 'checkout_android', @@ -1787,7 +1095,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_cast', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1798,7 +1106,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_cast_framework', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1809,7 +1117,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_clearcut', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_android_gms_play_services_cloud_messaging': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_cloud_messaging', + 'version': 'version:2@16.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1820,7 +1139,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_fido', - 'version': 'version:18.1.0-cr0', + 'version': 'version:2@19.0.0-beta.cr0', }, ], 'condition': 'checkout_android', @@ -1831,7 +1150,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_flags', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1842,7 +1161,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_gcm', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1853,7 +1172,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_iid', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1864,7 +1183,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_instantapps', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1875,7 +1194,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_location', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1886,7 +1205,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_phenotype', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1897,7 +1216,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_places_placereport', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1908,7 +1227,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_stats', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1919,7 +1238,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks', - 'version': 'version:17.0.0-cr0', + 'version': 'version:2@17.2.0.cr0', }, ], 'condition': 'checkout_android', @@ -1930,7 +1249,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision', - 'version': 'version:18.0.0-cr0', + 'version': 'version:2@18.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1941,7 +1260,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision_common', - 'version': 'version:18.0.0-cr0', + 'version': 'version:2@18.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -1952,7 +1271,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_android_material_material', - 'version': 'version:1.0.0-rc02-cr0', + 'version': 'version:2@1.4.0-rc01.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_android_play_core': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_android_play_core', + 'version': 'version:2@1.10.0.cr0', }, ], 'condition': 'checkout_android', @@ -1963,7 +1293,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_auto_common', - 'version': 'version:0.10-cr0', + 'version': 'version:2@0.10.cr0', }, ], 'condition': 'checkout_android', @@ -1974,7 +1304,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_service_auto_service', - 'version': 'version:1.0-rc6-cr0', + 'version': 'version:2@1.0-rc6.cr0', }, ], 'condition': 'checkout_android', @@ -1985,18 +1315,29 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_auto_service_auto_service_annotations', - 'version': 'version:1.0-rc6-cr0', + 'version': 'version:2@1.0-rc6.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_auto_value_auto_value_annotations': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_auto_value_auto_value_annotations', + 'version': 'version:2@1.7.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/com_google_code_findbugs_jFormatString': { + 'src/third_party/android_deps/libs/com_google_code_findbugs_jformatstring': { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_code_findbugs_jformatstring', - 'version': 'version:3.0.0-cr0', + 'version': 'version:2@3.0.0.cr0', }, ], 'condition': 'checkout_android', @@ -2007,7 +1348,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_code_findbugs_jsr305', - 'version': 'version:3.0.2-cr0', + 'version': 'version:2@3.0.2.cr0', }, ], 'condition': 'checkout_android', @@ -2018,7 +1359,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_code_gson_gson', - 'version': 'version:2.8.0-cr0', + 'version': 'version:2@2.8.0.cr0', }, ], 'condition': 'checkout_android', @@ -2029,7 +1370,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger', - 'version': 'version:2.26-cr0', + 'version': 'version:2@2.30.cr0', }, ], 'condition': 'checkout_android', @@ -2040,7 +1381,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger_compiler', - 'version': 'version:2.26-cr0', + 'version': 'version:2@2.30.cr0', }, ], 'condition': 'checkout_android', @@ -2051,7 +1392,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger_producers', - 'version': 'version:2.26-cr0', + 'version': 'version:2@2.30.cr0', }, ], 'condition': 'checkout_android', @@ -2062,7 +1403,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_dagger_dagger_spi', - 'version': 'version:2.26-cr0', + 'version': 'version:2@2.30.cr0', }, ], 'condition': 'checkout_android', @@ -2073,7 +1414,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotation', - 'version': 'version:2.3.4-cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -2084,7 +1425,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_annotations', - 'version': 'version:2.3.4-cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -2095,7 +1436,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_check_api', - 'version': 'version:2.3.4-cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -2106,7 +1447,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_core', - 'version': 'version:2.3.4-cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -2117,7 +1458,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_error_prone_type_annotations', - 'version': 'version:2.3.4-cr0', + 'version': 'version:2@2.7.1.cr0', }, ], 'condition': 'checkout_android', @@ -2128,7 +1469,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_javac', - 'version': 'version:9+181-r4173-1-cr0', + 'version': 'version:2@9+181-r4173-1.cr0', }, ], 'condition': 'checkout_android', @@ -2139,7 +1480,128 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_errorprone_javac_shaded', - 'version': 'version:9-dev-r4023-3-cr0', + 'version': 'version:2@9-dev-r4023-3.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_annotations': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_annotations', + 'version': 'version:2@16.0.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_common': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_common', + 'version': 'version:2@19.5.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_components': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_components', + 'version': 'version:2@16.1.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_encoders': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_encoders', + 'version': 'version:2@16.1.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_encoders_json': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_encoders_json', + 'version': 'version:2@17.1.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_iid': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_iid', + 'version': 'version:2@21.0.1.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_iid_interop': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_iid_interop', + 'version': 'version:2@17.0.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_installations': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_installations', + 'version': 'version:2@16.3.5.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_installations_interop': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_installations_interop', + 'version': 'version:2@16.0.1.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_measurement_connector': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_measurement_connector', + 'version': 'version:2@18.0.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_firebase_firebase_messaging': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_firebase_firebase_messaging', + 'version': 'version:2@21.0.1.cr0', }, ], 'condition': 'checkout_android', @@ -2150,7 +1612,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_googlejavaformat_google_java_format', - 'version': 'version:1.5-cr0', + 'version': 'version:2@1.5.cr0', }, ], 'condition': 'checkout_android', @@ -2161,7 +1623,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_failureaccess', - 'version': 'version:1.0.1-cr0', + 'version': 'version:2@1.0.1.cr0', }, ], 'condition': 'checkout_android', @@ -2172,7 +1634,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava', - 'version': 'version:27.1-jre-cr0', + 'version': 'version:2@30.1-jre.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/com_google_guava_guava_android': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/com_google_guava_guava_android', + 'version': 'version:2@30.1-android.cr0', }, ], 'condition': 'checkout_android', @@ -2183,7 +1656,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_guava_listenablefuture', - 'version': 'version:1.0-cr0', + 'version': 'version:2@1.0.cr0', }, ], 'condition': 'checkout_android', @@ -2194,7 +1667,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_j2objc_j2objc_annotations', - 'version': 'version:1.1-cr0', + 'version': 'version:2@1.3.cr0', }, ], 'condition': 'checkout_android', @@ -2205,7 +1678,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_java', - 'version': 'version:3.4.0-cr0', + 'version': 'version:2@3.4.0.cr0', }, ], 'condition': 'checkout_android', @@ -2216,7 +1689,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_javalite', - 'version': 'version:3.11.4-cr0', + 'version': 'version:2@3.13.0.cr0', }, ], 'condition': 'checkout_android', @@ -2227,7 +1700,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_googlecode_java_diff_utils_diffutils', - 'version': 'version:1.3.0-cr0', + 'version': 'version:2@1.3.0.cr0', }, ], 'condition': 'checkout_android', @@ -2238,7 +1711,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_squareup_javapoet', - 'version': 'version:1.11.1-cr0', + 'version': 'version:2@1.13.0.cr0', }, ], 'condition': 'checkout_android', @@ -2249,18 +1722,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/com_squareup_javawriter', - 'version': 'version:2.1.1-cr0', + 'version': 'version:2@2.1.1.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/commons_cli_commons_cli': { + 'src/third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/commons_cli_commons_cli', - 'version': 'version:1.3.1-cr0', + 'package': 'chromium/third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils', + 'version': 'version:2@4.0.cr0', }, ], 'condition': 'checkout_android', @@ -2271,7 +1744,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/javax_annotation_javax_annotation_api', - 'version': 'version:1.3.2-cr0', + 'version': 'version:2@1.3.2.cr0', }, ], 'condition': 'checkout_android', @@ -2282,7 +1755,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/javax_annotation_jsr250_api', - 'version': 'version:1.0-cr0', + 'version': 'version:2@1.0.cr0', }, ], 'condition': 'checkout_android', @@ -2293,7 +1766,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/javax_inject_javax_inject', - 'version': 'version:1-cr0', + 'version': 'version:2@1.cr0', }, ], 'condition': 'checkout_android', @@ -2304,18 +1777,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/nekohtml_nekohtml', - 'version': 'version:1.9.6.2-cr0', + 'version': 'version:2@1.9.6.2.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/nekohtml_xercesMinimal': { + 'src/third_party/android_deps/libs/nekohtml_xercesminimal': { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/nekohtml_xercesminimal', - 'version': 'version:1.9.6.2-cr0', + 'version': 'version:2@1.9.6.2.cr0', }, ], 'condition': 'checkout_android', @@ -2326,7 +1799,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/net_ltgt_gradle_incap_incap', - 'version': 'version:0.2-cr0', + 'version': 'version:2@0.2.cr0', }, ], 'condition': 'checkout_android', @@ -2337,7 +1810,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/net_sf_kxml_kxml2', - 'version': 'version:2.3.0-cr0', + 'version': 'version:2@2.3.0.cr0', }, ], 'condition': 'checkout_android', @@ -2348,7 +1821,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_ant_ant', - 'version': 'version:1.8.0-cr0', + 'version': 'version:2@1.8.0.cr0', }, ], 'condition': 'checkout_android', @@ -2359,7 +1832,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_ant_ant_launcher', - 'version': 'version:1.8.0-cr0', + 'version': 'version:2@1.8.0.cr0', }, ], 'condition': 'checkout_android', @@ -2370,7 +1843,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_ant_tasks', - 'version': 'version:2.1.3-cr0', + 'version': 'version:2@2.1.3.cr0', }, ], 'condition': 'checkout_android', @@ -2381,7 +1854,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_artifact', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2392,7 +1865,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_artifact_manager', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2403,7 +1876,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_error_diagnostics', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2414,7 +1887,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_model', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2425,7 +1898,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_plugin_registry', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2436,7 +1909,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_profile', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2447,7 +1920,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_project', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2458,7 +1931,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_repository_metadata', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2469,7 +1942,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_maven_settings', - 'version': 'version:2.2.1-cr0', + 'version': 'version:2@2.2.1.cr0', }, ], 'condition': 'checkout_android', @@ -2480,7 +1953,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_file', - 'version': 'version:1.0-beta-6-cr0', + 'version': 'version:2@1.0-beta-6.cr0', }, ], 'condition': 'checkout_android', @@ -2491,7 +1964,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_http_lightweight', - 'version': 'version:1.0-beta-6-cr0', + 'version': 'version:2@1.0-beta-6.cr0', }, ], 'condition': 'checkout_android', @@ -2502,7 +1975,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_http_shared', - 'version': 'version:1.0-beta-6-cr0', + 'version': 'version:2@1.0-beta-6.cr0', }, ], 'condition': 'checkout_android', @@ -2513,51 +1986,51 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_apache_maven_wagon_wagon_provider_api', - 'version': 'version:1.0-beta-6-cr0', + 'version': 'version:2@1.0-beta-6.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_checkerframework_checker_compat_qual': { + 'src/third_party/android_deps/libs/org_ccil_cowan_tagsoup_tagsoup': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_compat_qual', - 'version': 'version:2.5.3-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_ccil_cowan_tagsoup_tagsoup', + 'version': 'version:2@1.2.1.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_checkerframework_checker_qual': { + 'src/third_party/android_deps/libs/org_checkerframework_checker_compat_qual': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual', - 'version': 'version:3.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_compat_qual', + 'version': 'version:2@2.5.5.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_checkerframework_dataflow': { + 'src/third_party/android_deps/libs/org_checkerframework_checker_qual': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow', - 'version': 'version:3.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_checker_qual', + 'version': 'version:2@3.8.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_checkerframework_javacutil': { + 'src/third_party/android_deps/libs/org_checkerframework_dataflow_shaded': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_javacutil', - 'version': 'version:3.0.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_checkerframework_dataflow_shaded', + 'version': 'version:2@3.11.0.cr0', }, ], 'condition': 'checkout_android', @@ -2568,7 +2041,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_codehaus_mojo_animal_sniffer_annotations', - 'version': 'version:1.17-cr0', + 'version': 'version:2@1.17.cr0', }, ], 'condition': 'checkout_android', @@ -2579,7 +2052,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_codehaus_plexus_plexus_container_default', - 'version': 'version:1.0-alpha-9-stable-1-cr0', + 'version': 'version:2@1.0-alpha-9-stable-1.cr0', }, ], 'condition': 'checkout_android', @@ -2590,7 +2063,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_codehaus_plexus_plexus_interpolation', - 'version': 'version:1.11-cr0', + 'version': 'version:2@1.11.cr0', }, ], 'condition': 'checkout_android', @@ -2601,18 +2074,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_codehaus_plexus_plexus_utils', - 'version': 'version:1.5.15-cr0', + 'version': 'version:2@1.5.15.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_jdom_jdom2': { + 'src/third_party/android_deps/libs/org_eclipse_jgit_org_eclipse_jgit': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_jdom_jdom2', - 'version': 'version:2.0.6-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_eclipse_jgit_org_eclipse_jgit', + 'version': 'version:2@4.4.1.201607150455-r.cr0', }, ], 'condition': 'checkout_android', @@ -2623,7 +2096,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_annotations', - 'version': 'version:13.0-cr0', + 'version': 'version:2@13.0.cr0', }, ], 'condition': 'checkout_android', @@ -2634,7 +2107,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib', - 'version': 'version:1.3.50-cr0', + 'version': 'version:2@1.5.10.cr0', }, ], 'condition': 'checkout_android', @@ -2645,117 +2118,128 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common', - 'version': 'version:1.3.50-cr0', + 'version': 'version:2@1.5.10.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_metadata_jvm': { + 'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_metadata_jvm', - 'version': 'version:0.1.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_ow2_asm_asm': { + 'src/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk8': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm', - 'version': 'version:7.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk8', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_ow2_asm_asm_analysis': { + 'src/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_android': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_analysis', - 'version': 'version:7.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_android', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_ow2_asm_asm_commons': { + 'src/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_core_jvm': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_commons', - 'version': 'version:7.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_coroutines_core_jvm', + 'version': 'version:2@1.5.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_ow2_asm_asm_tree': { + 'src/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_metadata_jvm': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_tree', - 'version': 'version:7.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlinx_kotlinx_metadata_jvm', + 'version': 'version:2@0.1.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_ow2_asm_asm_util': { + 'src/third_party/android_deps/libs/org_ow2_asm_asm': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_util', - 'version': 'version:7.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm', + 'version': 'version:2@7.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_pcollections_pcollections': { + 'src/third_party/android_deps/libs/org_ow2_asm_asm_analysis': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_pcollections_pcollections', - 'version': 'version:2.1.2-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_analysis', + 'version': 'version:2@7.0.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_ow2_asm_asm_commons': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_commons', + 'version': 'version:2@7.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_plumelib_plume_util': { + 'src/third_party/android_deps/libs/org_ow2_asm_asm_tree': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_plumelib_plume_util', - 'version': 'version:1.0.6-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_tree', + 'version': 'version:2@7.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_plumelib_reflection_util': { + 'src/third_party/android_deps/libs/org_ow2_asm_asm_util': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_plumelib_reflection_util', - 'version': 'version:0.0.2-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_ow2_asm_asm_util', + 'version': 'version:2@7.0.cr0', }, ], 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'src/third_party/android_deps/libs/org_plumelib_require_javadoc': { + 'src/third_party/android_deps/libs/org_pcollections_pcollections': { 'packages': [ { - 'package': 'chromium/third_party/android_deps/libs/org_plumelib_require_javadoc', - 'version': 'version:0.1.0-cr0', + 'package': 'chromium/third_party/android_deps/libs/org_pcollections_pcollections', + 'version': 'version:2@2.1.2.cr0', }, ], 'condition': 'checkout_android', @@ -2766,7 +2250,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_annotations', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2777,7 +2261,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_junit', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2788,7 +2272,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_pluginapi', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2799,7 +2283,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_plugins_maven_dependency_resolver', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2810,7 +2294,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_resources', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2821,7 +2305,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_robolectric', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2832,7 +2316,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_sandbox', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2843,7 +2327,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadowapi', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2854,7 +2338,18 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_framework', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, + + 'src/third_party/android_deps/libs/org_robolectric_shadows_playservices': { + 'packages': [ + { + 'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_playservices', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2865,7 +2360,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_utils', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -2876,7 +2371,7 @@ deps = { 'packages': [ { 'package': 'chromium/third_party/android_deps/libs/org_robolectric_utils_reflector', - 'version': 'version:4.3.1-cr0', + 'version': 'version:2@4.3.1.cr0', }, ], 'condition': 'checkout_android', @@ -3112,6 +2607,25 @@ hooks = [ '--bucket', 'chromium-webrtc-resources', 'src/resources'], }, + { + 'name': 'Generate component metadata for tests', + 'pattern': '.', + 'action': [ + 'vpython', + 'src/testing/generate_location_tags.py', + '--out', + 'src/testing/location_tags.json', + ], + }, + # Download and initialize "vpython" VirtualEnv environment packages. + { + 'name': 'vpython_common', + 'pattern': '.', + 'action': [ 'vpython', + '-vpython-spec', 'src/.vpython', + '-vpython-tool', 'install', + ], + }, ] recursedeps = [] @@ -3135,13 +2649,15 @@ include_rules = [ "+test", "+rtc_tools", - # Abseil whitelist. Keep this in sync with abseil-in-webrtc.md. + # Abseil allowlist. Keep this in sync with abseil-in-webrtc.md. "+absl/algorithm/algorithm.h", "+absl/algorithm/container.h", "+absl/base/attributes.h", "+absl/base/config.h", + "+absl/base/const_init.h", "+absl/base/macros.h", "+absl/container/inlined_vector.h", + "+absl/functional/bind_front.h", "+absl/memory/memory.h", "+absl/meta/type_traits.h", "+absl/strings/ascii.h", diff --git a/DIR_METADATA b/DIR_METADATA new file mode 100644 index 0000000000..a002d0947f --- /dev/null +++ b/DIR_METADATA @@ -0,0 +1,3 @@ +monorail { + project: "webrtc" +} diff --git a/ENG_REVIEW_OWNERS b/ENG_REVIEW_OWNERS index de5f240f22..b06ad40d0f 100644 --- a/ENG_REVIEW_OWNERS +++ b/ENG_REVIEW_OWNERS @@ -6,6 +6,5 @@ # review owners to ensure that the added dependency was OK. danilchap@webrtc.org -kwiberg@webrtc.org mbonadei@webrtc.org phoglund@webrtc.org diff --git a/OWNERS b/OWNERS index d6a78420b1..587c130ed7 100644 --- a/OWNERS +++ b/OWNERS @@ -1,6 +1,6 @@ henrika@webrtc.org +hta@webrtc.org juberti@webrtc.org -kwiberg@webrtc.org mflodman@webrtc.org stefan@webrtc.org tommi@webrtc.org @@ -8,15 +8,10 @@ per-file .gitignore=* per-file .gn=mbonadei@webrtc.org per-file *.gn=mbonadei@webrtc.org per-file *.gni=mbonadei@webrtc.org +per-file .vpython=mbonadei@webrtc.org per-file AUTHORS=* per-file DEPS=* -per-file pylintrc=phoglund@webrtc.org +per-file pylintrc=mbonadei@webrtc.org per-file WATCHLISTS=* -per-file abseil-in-webrtc.md=danilchap@webrtc.org -per-file abseil-in-webrtc.md=kwiberg@webrtc.org -per-file abseil-in-webrtc.md=mbonadei@webrtc.org -per-file style-guide.md=danilchap@webrtc.org -per-file style-guide.md=kwiberg@webrtc.org -per-file native-api.md=kwiberg@webrtc.org - -# COMPONENT: Internals>WebRTC +per-file native-api.md=mbonadei@webrtc.org +per-file *.lua=titovartem@webrtc.org diff --git a/PRESUBMIT.py b/PRESUBMIT.py index 247b78eaa0..21875f61af 100755 --- a/PRESUBMIT.py +++ b/PRESUBMIT.py @@ -14,30 +14,30 @@ from contextlib import contextmanager # Files and directories that are *skipped* by cpplint in the presubmit script. -CPPLINT_BLACKLIST = [ - 'api/video_codecs/video_decoder.h', - 'common_types.cc', - 'common_types.h', - 'examples/objc', - 'media/base/stream_params.h', - 'media/base/video_common.h', - 'media/sctp/sctp_transport.cc', - 'modules/audio_coding', - 'modules/audio_device', - 'modules/audio_processing', - 'modules/desktop_capture', - 'modules/include/module_common_types.h', - 'modules/utility', - 'modules/video_capture', - 'p2p/base/pseudo_tcp.cc', - 'p2p/base/pseudo_tcp.h', - 'rtc_base', - 'sdk/android/src/jni', - 'sdk/objc', - 'system_wrappers', - 'test', - 'tools_webrtc', - 'voice_engine', +CPPLINT_EXCEPTIONS = [ + 'api/video_codecs/video_decoder.h', + 'common_types.cc', + 'common_types.h', + 'examples/objc', + 'media/base/stream_params.h', + 'media/base/video_common.h', + 'media/sctp/usrsctp_transport.cc', + 'modules/audio_coding', + 'modules/audio_device', + 'modules/audio_processing', + 'modules/desktop_capture', + 'modules/include/module_common_types.h', + 'modules/utility', + 'modules/video_capture', + 'p2p/base/pseudo_tcp.cc', + 'p2p/base/pseudo_tcp.h', + 'rtc_base', + 'sdk/android/src/jni', + 'sdk/objc', + 'system_wrappers', + 'test', + 'tools_webrtc', + 'voice_engine', ] # These filters will always be removed, even if the caller specifies a filter @@ -45,13 +45,16 @@ # # Justifications for each filter: # - build/c++11 : Rvalue ref checks are unreliable (false positives), -# include file and feature blacklists are +# include file and feature blocklists are # google3-specific. +# - runtime/references : Mutable references are not banned by the Google +# C++ style guide anymore (starting from May 2020). # - whitespace/operators: Same as above (doesn't seem sufficient to eliminate # all move-related errors). -BLACKLIST_LINT_FILTERS = [ - '-build/c++11', - '-whitespace/operators', +DISABLED_LINT_FILTERS = [ + '-build/c++11', + '-runtime/references', + '-whitespace/operators', ] # List of directories of "supported" native APIs. That means changes to headers @@ -62,31 +65,31 @@ # webrtc-users@google.com (internal list). # 4. (later) The deprecated APIs are removed. NATIVE_API_DIRS = ( - 'api', # All subdirectories of api/ are included as well. - 'media/base', - 'media/engine', - 'modules/audio_device/include', - 'pc', + 'api', # All subdirectories of api/ are included as well. + 'media/base', + 'media/engine', + 'modules/audio_device/include', + 'pc', ) # These directories should not be used but are maintained only to avoid breaking # some legacy downstream code. LEGACY_API_DIRS = ( - 'common_audio/include', - 'modules/audio_coding/include', - 'modules/audio_processing/include', - 'modules/congestion_controller/include', - 'modules/include', - 'modules/remote_bitrate_estimator/include', - 'modules/rtp_rtcp/include', - 'modules/rtp_rtcp/source', - 'modules/utility/include', - 'modules/video_coding/codecs/h264/include', - 'modules/video_coding/codecs/vp8/include', - 'modules/video_coding/codecs/vp9/include', - 'modules/video_coding/include', - 'rtc_base', - 'system_wrappers/include', + 'common_audio/include', + 'modules/audio_coding/include', + 'modules/audio_processing/include', + 'modules/congestion_controller/include', + 'modules/include', + 'modules/remote_bitrate_estimator/include', + 'modules/rtp_rtcp/include', + 'modules/rtp_rtcp/source', + 'modules/utility/include', + 'modules/video_coding/codecs/h264/include', + 'modules/video_coding/codecs/vp8/include', + 'modules/video_coding/codecs/vp9/include', + 'modules/video_coding/include', + 'rtc_base', + 'system_wrappers/include', ) # NOTE: The set of directories in API_DIRS should be the same as those @@ -94,61 +97,67 @@ API_DIRS = NATIVE_API_DIRS[:] + LEGACY_API_DIRS[:] # TARGET_RE matches a GN target, and extracts the target name and the contents. -TARGET_RE = re.compile(r'(?P\s*)\w+\("(?P\w+)"\) {' - r'(?P.*?)' - r'(?P=indent)}', - re.MULTILINE | re.DOTALL) +TARGET_RE = re.compile( + r'(?P\s*)(?P\w+)\("(?P\w+)"\) {' + r'(?P.*?)' + r'(?P=indent)}', re.MULTILINE | re.DOTALL) # SOURCES_RE matches a block of sources inside a GN target. SOURCES_RE = re.compile(r'sources \+?= \[(?P.*?)\]', re.MULTILINE | re.DOTALL) +# DEPS_RE matches a block of sources inside a GN target. +DEPS_RE = re.compile(r'\bdeps \+?= \[(?P.*?)\]', + re.MULTILINE | re.DOTALL) + # FILE_PATH_RE matchies a file path. FILE_PATH_RE = re.compile(r'"(?P(\w|\/)+)(?P\.\w+)"') def FindSrcDirPath(starting_dir): - """Returns the abs path to the src/ dir of the project.""" - src_dir = starting_dir - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = starting_dir + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir @contextmanager def _AddToPath(*paths): - original_sys_path = sys.path - sys.path.extend(paths) - try: - yield - finally: - # Restore sys.path to what it was before. - sys.path = original_sys_path + original_sys_path = sys.path + sys.path.extend(paths) + try: + yield + finally: + # Restore sys.path to what it was before. + sys.path = original_sys_path def VerifyNativeApiHeadersListIsValid(input_api, output_api): - """Ensures the list of native API header directories is up to date.""" - non_existing_paths = [] - native_api_full_paths = [ - input_api.os_path.join(input_api.PresubmitLocalPath(), - *path.split('/')) for path in API_DIRS] - for path in native_api_full_paths: - if not os.path.isdir(path): - non_existing_paths.append(path) - if non_existing_paths: - return [output_api.PresubmitError( - 'Directories to native API headers have changed which has made the ' - 'list in PRESUBMIT.py outdated.\nPlease update it to the current ' - 'location of our native APIs.', - non_existing_paths)] - return [] + """Ensures the list of native API header directories is up to date.""" + non_existing_paths = [] + native_api_full_paths = [ + input_api.os_path.join(input_api.PresubmitLocalPath(), + *path.split('/')) for path in API_DIRS + ] + for path in native_api_full_paths: + if not os.path.isdir(path): + non_existing_paths.append(path) + if non_existing_paths: + return [ + output_api.PresubmitError( + 'Directories to native API headers have changed which has made ' + 'the list in PRESUBMIT.py outdated.\nPlease update it to the ' + 'current location of our native APIs.', non_existing_paths) + ] + return [] API_CHANGE_MSG = """ You seem to be changing native API header files. Please make sure that you: 1. Make compatible changes that don't break existing clients. Usually this is done by keeping the existing method signatures unchanged. - 2. Mark the old stuff as deprecated (see RTC_DEPRECATED macro). + 2. Mark the old stuff as deprecated (use the ABSL_DEPRECATED macro). 3. Create a timeline and plan for when the deprecated stuff will be removed. (The amount of time we give users to change their code should be informed by how much work it is for them. If they just @@ -165,532 +174,610 @@ def VerifyNativeApiHeadersListIsValid(input_api, output_api): def CheckNativeApiHeaderChanges(input_api, output_api): - """Checks to remind proper changing of native APIs.""" - files = [] - source_file_filter = lambda x: input_api.FilterSourceFile( - x, white_list=[r'.+\.(gn|gni|h)$']) - for f in input_api.AffectedSourceFiles(source_file_filter): - for path in API_DIRS: - dn = os.path.dirname(f.LocalPath()) - if path == 'api': - # Special case: Subdirectories included. - if dn == 'api' or dn.startswith('api/'): - files.append(f.LocalPath()) - else: - # Normal case: Subdirectories not included. - if dn == path: - files.append(f.LocalPath()) - - if files: - return [output_api.PresubmitNotifyResult(API_CHANGE_MSG, files)] - return [] - - -def CheckNoIOStreamInHeaders(input_api, output_api, - source_file_filter): - """Checks to make sure no .h files include .""" - files = [] - pattern = input_api.re.compile(r'^#include\s*', - input_api.re.MULTILINE) - file_filter = lambda x: (input_api.FilterSourceFile(x) - and source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h'): - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) - - if len(files): - return [output_api.PresubmitError( - 'Do not #include in header files, since it inserts static ' + - 'initialization into every file including the header. Instead, ' + - '#include . See http://crbug.com/94794', - files)] - return [] - - -def CheckNoPragmaOnce(input_api, output_api, - source_file_filter): - """Make sure that banned functions are not used.""" - files = [] - pattern = input_api.re.compile(r'^#pragma\s+once', - input_api.re.MULTILINE) - file_filter = lambda x: (input_api.FilterSourceFile(x) - and source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if not f.LocalPath().endswith('.h'): - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) - - if files: - return [output_api.PresubmitError( - 'Do not use #pragma once in header files.\n' - 'See http://www.chromium.org/developers/coding-style#TOC-File-headers', - files)] - return [] - - -def CheckNoFRIEND_TEST(input_api, output_api, # pylint: disable=invalid-name - source_file_filter): - """Make sure that gtest's FRIEND_TEST() macro is not used, the - FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be - used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes.""" - problems = [] + """Checks to remind proper changing of native APIs.""" + files = [] + source_file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_check=[r'.+\.(gn|gni|h)$']) + for f in input_api.AffectedSourceFiles(source_file_filter): + for path in API_DIRS: + dn = os.path.dirname(f.LocalPath()) + if path == 'api': + # Special case: Subdirectories included. + if dn == 'api' or dn.startswith('api/'): + files.append(f.LocalPath()) + else: + # Normal case: Subdirectories not included. + if dn == path: + files.append(f.LocalPath()) + + if files: + return [output_api.PresubmitNotifyResult(API_CHANGE_MSG, files)] + return [] + + +def CheckNoIOStreamInHeaders(input_api, output_api, source_file_filter): + """Checks to make sure no .h files include .""" + files = [] + pattern = input_api.re.compile(r'^#include\s*', + input_api.re.MULTILINE) + file_filter = lambda x: (input_api.FilterSourceFile(x) and + source_file_filter(x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h'): + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if len(files): + return [ + output_api.PresubmitError( + 'Do not #include in header files, since it inserts ' + 'static initialization into every file including the header. ' + 'Instead, #include . See http://crbug.com/94794', + files) + ] + return [] - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) - and source_file_filter(f)) - for f in input_api.AffectedFiles(file_filter=file_filter): - for line_num, line in f.ChangedContents(): - if 'FRIEND_TEST(' in line: - problems.append(' %s:%d' % (f.LocalPath(), line_num)) - if not problems: +def CheckNoPragmaOnce(input_api, output_api, source_file_filter): + """Make sure that banned functions are not used.""" + files = [] + pattern = input_api.re.compile(r'^#pragma\s+once', input_api.re.MULTILINE) + file_filter = lambda x: (input_api.FilterSourceFile(x) and + source_file_filter(x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h'): + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if files: + return [ + output_api.PresubmitError( + 'Do not use #pragma once in header files.\n' + 'See http://www.chromium.org/developers/coding-style' + '#TOC-File-headers', + files) + ] return [] - return [output_api.PresubmitPromptWarning('WebRTC\'s code should not use ' - 'gtest\'s FRIEND_TEST() macro. Include testsupport/gtest_prod_util.h and ' - 'use FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems))] +def CheckNoFRIEND_TEST(# pylint: disable=invalid-name + input_api, + output_api, + source_file_filter): + """Make sure that gtest's FRIEND_TEST() macro is not used, the + FRIEND_TEST_ALL_PREFIXES() macro from testsupport/gtest_prod_util.h should be + used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes.""" + problems = [] + + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + for f in input_api.AffectedFiles(file_filter=file_filter): + for line_num, line in f.ChangedContents(): + if 'FRIEND_TEST(' in line: + problems.append(' %s:%d' % (f.LocalPath(), line_num)) + + if not problems: + return [] + return [ + output_api.PresubmitPromptWarning( + 'WebRTC\'s code should not use gtest\'s FRIEND_TEST() macro. ' + 'Include testsupport/gtest_prod_util.h and use ' + 'FRIEND_TEST_ALL_PREFIXES() instead.\n' + '\n'.join(problems)) + ] -def IsLintBlacklisted(blacklist_paths, file_path): - """ Checks if a file is blacklisted for lint check.""" - for path in blacklist_paths: - if file_path == path or os.path.dirname(file_path).startswith(path): - return True - return False + +def IsLintDisabled(disabled_paths, file_path): + """ Checks if a file is disabled for lint check.""" + for path in disabled_paths: + if file_path == path or os.path.dirname(file_path).startswith(path): + return True + return False def CheckApprovedFilesLintClean(input_api, output_api, source_file_filter=None): - """Checks that all new or non-blacklisted .cc and .h files pass cpplint.py. + """Checks that all new or non-exempt .cc and .h files pass cpplint.py. This check is based on CheckChangeLintsClean in depot_tools/presubmit_canned_checks.py but has less filters and only checks added files.""" - result = [] - - # Initialize cpplint. - import cpplint - # Access to a protected member _XX of a client class - # pylint: disable=W0212 - cpplint._cpplint_state.ResetErrorCounts() - - lint_filters = cpplint._Filters() - lint_filters.extend(BLACKLIST_LINT_FILTERS) - cpplint._SetFilters(','.join(lint_filters)) - - # Create a platform independent blacklist for cpplint. - blacklist_paths = [input_api.os_path.join(*path.split('/')) - for path in CPPLINT_BLACKLIST] - - # Use the strictest verbosity level for cpplint.py (level 1) which is the - # default when running cpplint.py from command line. To make it possible to - # work with not-yet-converted code, we're only applying it to new (or - # moved/renamed) files and files not listed in CPPLINT_BLACKLIST. - verbosity_level = 1 - files = [] - for f in input_api.AffectedSourceFiles(source_file_filter): - # Note that moved/renamed files also count as added. - if f.Action() == 'A' or not IsLintBlacklisted(blacklist_paths, - f.LocalPath()): - files.append(f.AbsoluteLocalPath()) - - for file_name in files: - cpplint.ProcessFile(file_name, verbosity_level) - - if cpplint._cpplint_state.error_count > 0: - if input_api.is_committing: - res_type = output_api.PresubmitError - else: - res_type = output_api.PresubmitPromptWarning - result = [res_type('Changelist failed cpplint.py check.')] + result = [] - return result + # Initialize cpplint. + import cpplint + # Access to a protected member _XX of a client class + # pylint: disable=W0212 + cpplint._cpplint_state.ResetErrorCounts() + + lint_filters = cpplint._Filters() + lint_filters.extend(DISABLED_LINT_FILTERS) + cpplint._SetFilters(','.join(lint_filters)) + + # Create a platform independent exempt list for cpplint. + disabled_paths = [ + input_api.os_path.join(*path.split('/')) for path in CPPLINT_EXCEPTIONS + ] + + # Use the strictest verbosity level for cpplint.py (level 1) which is the + # default when running cpplint.py from command line. To make it possible to + # work with not-yet-converted code, we're only applying it to new (or + # moved/renamed) files and files not listed in CPPLINT_EXCEPTIONS. + verbosity_level = 1 + files = [] + for f in input_api.AffectedSourceFiles(source_file_filter): + # Note that moved/renamed files also count as added. + if f.Action() == 'A' or not IsLintDisabled(disabled_paths, + f.LocalPath()): + files.append(f.AbsoluteLocalPath()) + + for file_name in files: + cpplint.ProcessFile(file_name, verbosity_level) + + if cpplint._cpplint_state.error_count > 0: + if input_api.is_committing: + res_type = output_api.PresubmitError + else: + res_type = output_api.PresubmitPromptWarning + result = [res_type('Changelist failed cpplint.py check.')] + + return result def CheckNoSourcesAbove(input_api, gn_files, output_api): - # Disallow referencing source files with paths above the GN file location. - source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]', - re.MULTILINE | re.DOTALL) - file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"') - violating_gn_files = set() - violating_source_entries = [] - for gn_file in gn_files: - contents = input_api.ReadFile(gn_file) - for source_block_match in source_pattern.finditer(contents): - # Find all source list entries starting with ../ in the source block - # (exclude overrides entries). - for file_list_match in file_pattern.finditer(source_block_match.group(1)): - source_file = file_list_match.group(1) - if 'overrides/' not in source_file: - violating_source_entries.append(source_file) - violating_gn_files.add(gn_file) - if violating_gn_files: - return [output_api.PresubmitError( - 'Referencing source files above the directory of the GN file is not ' - 'allowed. Please introduce new GN targets in the proper location ' - 'instead.\n' - 'Invalid source entries:\n' - '%s\n' - 'Violating GN files:' % '\n'.join(violating_source_entries), - items=violating_gn_files)] - return [] + # Disallow referencing source files with paths above the GN file location. + source_pattern = input_api.re.compile(r' +sources \+?= \[(.*?)\]', + re.MULTILINE | re.DOTALL) + file_pattern = input_api.re.compile(r'"((\.\./.*?)|(//.*?))"') + violating_gn_files = set() + violating_source_entries = [] + for gn_file in gn_files: + contents = input_api.ReadFile(gn_file) + for source_block_match in source_pattern.finditer(contents): + # Find all source list entries starting with ../ in the source block + # (exclude overrides entries). + for file_list_match in file_pattern.finditer( + source_block_match.group(1)): + source_file = file_list_match.group(1) + if 'overrides/' not in source_file: + violating_source_entries.append(source_file) + violating_gn_files.add(gn_file) + if violating_gn_files: + return [ + output_api.PresubmitError( + 'Referencing source files above the directory of the GN file ' + 'is not allowed. Please introduce new GN targets in the proper ' + 'location instead.\n' + 'Invalid source entries:\n' + '%s\n' + 'Violating GN files:' % '\n'.join(violating_source_entries), + items=violating_gn_files) + ] + return [] + + +def CheckAbseilDependencies(input_api, gn_files, output_api): + """Checks that Abseil dependencies are declared in `absl_deps`.""" + absl_re = re.compile(r'third_party/abseil-cpp', re.MULTILINE | re.DOTALL) + target_types_to_check = [ + 'rtc_library', + 'rtc_source_set', + 'rtc_static_library', + 'webrtc_fuzzer_test', + ] + error_msg = ('Abseil dependencies in target "%s" (file: %s) ' + 'should be moved to the "absl_deps" parameter.') + errors = [] + + for gn_file in gn_files: + gn_file_content = input_api.ReadFile(gn_file) + for target_match in TARGET_RE.finditer(gn_file_content): + target_type = target_match.group('target_type') + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + if target_type in target_types_to_check: + for deps_match in DEPS_RE.finditer(target_contents): + deps = deps_match.group('deps').splitlines() + for dep in deps: + if re.search(absl_re, dep): + errors.append( + output_api.PresubmitError( + error_msg % + (target_name, gn_file.LocalPath()))) + break # no need to warn more than once per target + return errors def CheckNoMixingSources(input_api, gn_files, output_api): - """Disallow mixing C, C++ and Obj-C/Obj-C++ in the same target. + """Disallow mixing C, C++ and Obj-C/Obj-C++ in the same target. See bugs.webrtc.org/7743 for more context. """ - def _MoreThanOneSourceUsed(*sources_lists): - sources_used = 0 - for source_list in sources_lists: - if len(source_list): - sources_used += 1 - return sources_used > 1 - - errors = defaultdict(lambda: []) - for gn_file in gn_files: - gn_file_content = input_api.ReadFile(gn_file) - for target_match in TARGET_RE.finditer(gn_file_content): - # list_of_sources is a list of tuples of the form - # (c_files, cc_files, objc_files) that keeps track of all the sources - # defined in a target. A GN target can have more that on definition of - # sources (since it supports if/else statements). - # E.g.: - # rtc_static_library("foo") { - # if (is_win) { - # sources = [ "foo.cc" ] - # } else { - # sources = [ "foo.mm" ] - # } - # } - # This is allowed and the presubmit check should support this case. - list_of_sources = [] - c_files = [] - cc_files = [] - objc_files = [] - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - if '+=' not in sources_match.group(0): - if c_files or cc_files or objc_files: + def _MoreThanOneSourceUsed(*sources_lists): + sources_used = 0 + for source_list in sources_lists: + if len(source_list): + sources_used += 1 + return sources_used > 1 + + errors = defaultdict(lambda: []) + for gn_file in gn_files: + gn_file_content = input_api.ReadFile(gn_file) + for target_match in TARGET_RE.finditer(gn_file_content): + # list_of_sources is a list of tuples of the form + # (c_files, cc_files, objc_files) that keeps track of all the + # sources defined in a target. A GN target can have more that + # on definition of sources (since it supports if/else statements). + # E.g.: + # rtc_static_library("foo") { + # if (is_win) { + # sources = [ "foo.cc" ] + # } else { + # sources = [ "foo.mm" ] + # } + # } + # This is allowed and the presubmit check should support this case. + list_of_sources = [] + c_files = [] + cc_files = [] + objc_files = [] + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + if '+=' not in sources_match.group(0): + if c_files or cc_files or objc_files: + list_of_sources.append((c_files, cc_files, objc_files)) + c_files = [] + cc_files = [] + objc_files = [] + for file_match in FILE_PATH_RE.finditer( + sources_match.group(1)): + file_path = file_match.group('file_path') + extension = file_match.group('extension') + if extension == '.c': + c_files.append(file_path + extension) + if extension == '.cc': + cc_files.append(file_path + extension) + if extension in ['.m', '.mm']: + objc_files.append(file_path + extension) list_of_sources.append((c_files, cc_files, objc_files)) - c_files = [] - cc_files = [] - objc_files = [] - for file_match in FILE_PATH_RE.finditer(sources_match.group(1)): - file_path = file_match.group('file_path') - extension = file_match.group('extension') - if extension == '.c': - c_files.append(file_path + extension) - if extension == '.cc': - cc_files.append(file_path + extension) - if extension in ['.m', '.mm']: - objc_files.append(file_path + extension) - list_of_sources.append((c_files, cc_files, objc_files)) - for c_files_list, cc_files_list, objc_files_list in list_of_sources: - if _MoreThanOneSourceUsed(c_files_list, cc_files_list, objc_files_list): - all_sources = sorted(c_files_list + cc_files_list + objc_files_list) - errors[gn_file.LocalPath()].append((target_name, all_sources)) - if errors: - return [output_api.PresubmitError( - 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' - 'Please create a separate target for each collection of sources.\n' - 'Mixed sources: \n' - '%s\n' - 'Violating GN files:\n%s\n' % (json.dumps(errors, indent=2), - '\n'.join(errors.keys())))] - return [] + for c_files_list, cc_files_list, objc_files_list in list_of_sources: + if _MoreThanOneSourceUsed(c_files_list, cc_files_list, + objc_files_list): + all_sources = sorted(c_files_list + cc_files_list + + objc_files_list) + errors[gn_file.LocalPath()].append( + (target_name, all_sources)) + if errors: + return [ + output_api.PresubmitError( + 'GN targets cannot mix .c, .cc and .m (or .mm) source files.\n' + 'Please create a separate target for each collection of ' + 'sources.\n' + 'Mixed sources: \n' + '%s\n' + 'Violating GN files:\n%s\n' % + (json.dumps(errors, indent=2), '\n'.join(errors.keys()))) + ] + return [] def CheckNoPackageBoundaryViolations(input_api, gn_files, output_api): - cwd = input_api.PresubmitLocalPath() - with _AddToPath(input_api.os_path.join( - cwd, 'tools_webrtc', 'presubmit_checks_lib')): - from check_package_boundaries import CheckPackageBoundaries - build_files = [os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files] - errors = CheckPackageBoundaries(cwd, build_files)[:5] - if errors: - return [output_api.PresubmitError( - 'There are package boundary violations in the following GN files:', - long_text='\n\n'.join(str(err) for err in errors))] - return [] + cwd = input_api.PresubmitLocalPath() + with _AddToPath( + input_api.os_path.join(cwd, 'tools_webrtc', + 'presubmit_checks_lib')): + from check_package_boundaries import CheckPackageBoundaries + build_files = [ + os.path.join(cwd, gn_file.LocalPath()) for gn_file in gn_files + ] + errors = CheckPackageBoundaries(cwd, build_files)[:5] + if errors: + return [ + output_api.PresubmitError( + 'There are package boundary violations in the following GN ' + 'files:', long_text='\n\n'.join(str(err) for err in errors)) + ] + return [] def _ReportFileAndLine(filename, line_num): - """Default error formatter for _FindNewViolationsOfRule.""" - return '%s (line %s)' % (filename, line_num) + """Default error formatter for _FindNewViolationsOfRule.""" + return '%s (line %s)' % (filename, line_num) -def CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api, +def CheckNoWarningSuppressionFlagsAreAdded(gn_files, + input_api, + output_api, error_formatter=_ReportFileAndLine): - """Make sure that warning suppression flags are not added wihtout a reason.""" - msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' - 'in WebRTC.\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code) or you want to add an exception,\n' - 'you can add a comment on the line that causes the problem:\n\n' - '"-Wno-odr" # no-presubmit-check TODO(bugs.webrtc.org/BUG_ID)\n' - '\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - clang_warn_re = input_api.re.compile(r'//build/config/clang:extra_warnings') - no_presubmit_re = input_api.re.compile( - r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') - for f in gn_files: - for line_num, line in f.ChangedContents(): - if clang_warn_re.search(line) and not no_presubmit_re.search(line): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(msg, errors)] - return [] - - -def CheckNoTestCaseUsageIsAdded(input_api, output_api, source_file_filter, + """Ensure warning suppression flags are not added wihtout a reason.""" + msg = ('Usage of //build/config/clang:extra_warnings is discouraged ' + 'in WebRTC.\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code) or you want to add an exception,\n' + 'you can add a comment on the line that causes the problem:\n\n' + '"-Wno-odr" # no-presubmit-check TODO(bugs.webrtc.org/BUG_ID)\n' + '\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + clang_warn_re = input_api.re.compile( + r'//build/config/clang:extra_warnings') + no_presubmit_re = input_api.re.compile( + r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') + for f in gn_files: + for line_num, line in f.ChangedContents(): + if clang_warn_re.search(line) and not no_presubmit_re.search(line): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(msg, errors)] + return [] + + +def CheckNoTestCaseUsageIsAdded(input_api, + output_api, + source_file_filter, error_formatter=_ReportFileAndLine): - error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the ' - 'new API: https://github.com/google/googletest/blob/master/' - 'googletest/docs/primer.md#beware-of-the-nomenclature.\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - test_case_re = input_api.re.compile(r'TEST_CASE') - file_filter = lambda f: (source_file_filter(f) - and f.LocalPath().endswith('.cc')) - for f in input_api.AffectedSourceFiles(file_filter): - for line_num, line in f.ChangedContents(): - if test_case_re.search(line): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(error_msg, errors)] - return [] - - -def CheckNoStreamUsageIsAdded(input_api, output_api, + error_msg = ('Usage of legacy GoogleTest API detected!\nPlease use the ' + 'new API: https://github.com/google/googletest/blob/master/' + 'googletest/docs/primer.md#beware-of-the-nomenclature.\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + test_case_re = input_api.re.compile(r'TEST_CASE') + file_filter = lambda f: (source_file_filter(f) and f.LocalPath().endswith( + '.cc')) + for f in input_api.AffectedSourceFiles(file_filter): + for line_num, line in f.ChangedContents(): + if test_case_re.search(line): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(error_msg, errors)] + return [] + + +def CheckNoStreamUsageIsAdded(input_api, + output_api, source_file_filter, error_formatter=_ReportFileAndLine): - """Make sure that no more dependencies on stringstream are added.""" - error_msg = ('Usage of , and in WebRTC is ' - 'deprecated.\n' - 'This includes the following types:\n' - 'std::istringstream, std::ostringstream, std::wistringstream, ' - 'std::wostringstream,\n' - 'std::wstringstream, std::ostream, std::wostream, std::istream,' - 'std::wistream,\n' - 'std::iostream, std::wiostream.\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code),\n' - 'you can add a comment on the line that causes the problem:\n\n' - '#include // no-presubmit-check TODO(webrtc:8982)\n' - 'std::ostream& F() { // no-presubmit-check TODO(webrtc:8982)\n' - '\n' - 'If you are adding new code, consider using ' - 'rtc::SimpleStringBuilder\n' - '(in rtc_base/strings/string_builder.h).\n' - 'Affected files:\n') - errors = [] # 2-element tuples with (file, line number) - include_re = input_api.re.compile(r'#include <(i|o|s)stream>') - usage_re = input_api.re.compile(r'std::(w|i|o|io|wi|wo|wio)(string)*stream') - no_presubmit_re = input_api.re.compile( - r'// no-presubmit-check TODO\(webrtc:8982\)') - file_filter = lambda x: (input_api.FilterSourceFile(x) - and source_file_filter(x)) - - def _IsException(file_path): - is_test = any(file_path.endswith(x) for x in ['_test.cc', '_tests.cc', - '_unittest.cc', - '_unittests.cc']) - return (file_path.startswith('examples') or - file_path.startswith('test') or - is_test) - - - for f in input_api.AffectedSourceFiles(file_filter): - # Usage of stringstream is allowed under examples/ and in tests. - if f.LocalPath() == 'PRESUBMIT.py' or _IsException(f.LocalPath()): - continue - for line_num, line in f.ChangedContents(): - if ((include_re.search(line) or usage_re.search(line)) - and not no_presubmit_re.search(line)): - errors.append(error_formatter(f.LocalPath(), line_num)) - if errors: - return [output_api.PresubmitError(error_msg, errors)] - return [] + """Make sure that no more dependencies on stringstream are added.""" + error_msg = ( + 'Usage of , and in WebRTC is ' + 'deprecated.\n' + 'This includes the following types:\n' + 'std::istringstream, std::ostringstream, std::wistringstream, ' + 'std::wostringstream,\n' + 'std::wstringstream, std::ostream, std::wostream, std::istream,' + 'std::wistream,\n' + 'std::iostream, std::wiostream.\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code),\n' + 'you can add a comment on the line that causes the problem:\n\n' + '#include // no-presubmit-check TODO(webrtc:8982)\n' + 'std::ostream& F() { // no-presubmit-check TODO(webrtc:8982)\n' + '\n' + 'If you are adding new code, consider using ' + 'rtc::SimpleStringBuilder\n' + '(in rtc_base/strings/string_builder.h).\n' + 'Affected files:\n') + errors = [] # 2-element tuples with (file, line number) + include_re = input_api.re.compile(r'#include <(i|o|s)stream>') + usage_re = input_api.re.compile( + r'std::(w|i|o|io|wi|wo|wio)(string)*stream') + no_presubmit_re = input_api.re.compile( + r'// no-presubmit-check TODO\(webrtc:8982\)') + file_filter = lambda x: (input_api.FilterSourceFile(x) and + source_file_filter(x)) + + def _IsException(file_path): + is_test = any( + file_path.endswith(x) for x in + ['_test.cc', '_tests.cc', '_unittest.cc', '_unittests.cc']) + return (file_path.startswith('examples') + or file_path.startswith('test') or is_test) + + for f in input_api.AffectedSourceFiles(file_filter): + # Usage of stringstream is allowed under examples/ and in tests. + if f.LocalPath() == 'PRESUBMIT.py' or _IsException(f.LocalPath()): + continue + for line_num, line in f.ChangedContents(): + if ((include_re.search(line) or usage_re.search(line)) + and not no_presubmit_re.search(line)): + errors.append(error_formatter(f.LocalPath(), line_num)) + if errors: + return [output_api.PresubmitError(error_msg, errors)] + return [] def CheckPublicDepsIsNotUsed(gn_files, input_api, output_api): - """Checks that public_deps is not used without a good reason.""" - result = [] - no_presubmit_check_re = input_api.re.compile( - r'# no-presubmit-check TODO\(webrtc:\d+\)') - error_msg = ('public_deps is not recommended in WebRTC BUILD.gn files ' - 'because it doesn\'t map well to downstream build systems.\n' - 'Used in: %s (line %d).\n' - 'If you are not adding this code (e.g. you are just moving ' - 'existing code) or you have a good reason, you can add this ' - 'comment (verbatim) on the line that causes the problem:\n\n' - 'public_deps = [ # no-presubmit-check TODO(webrtc:8603)\n') - for affected_file in gn_files: - for (line_number, affected_line) in affected_file.ChangedContents(): - if 'public_deps' in affected_line: - surpressed = no_presubmit_check_re.search(affected_line) - if not surpressed: - result.append( - output_api.PresubmitError(error_msg % (affected_file.LocalPath(), - line_number))) - return result + """Checks that public_deps is not used without a good reason.""" + result = [] + no_presubmit_check_re = input_api.re.compile( + r'# no-presubmit-check TODO\(webrtc:\d+\)') + error_msg = ('public_deps is not recommended in WebRTC BUILD.gn files ' + 'because it doesn\'t map well to downstream build systems.\n' + 'Used in: %s (line %d).\n' + 'If you are not adding this code (e.g. you are just moving ' + 'existing code) or you have a good reason, you can add this ' + 'comment (verbatim) on the line that causes the problem:\n\n' + 'public_deps = [ # no-presubmit-check TODO(webrtc:8603)\n') + for affected_file in gn_files: + for (line_number, affected_line) in affected_file.ChangedContents(): + if 'public_deps' in affected_line: + surpressed = no_presubmit_check_re.search(affected_line) + if not surpressed: + result.append( + output_api.PresubmitError( + error_msg % + (affected_file.LocalPath(), line_number))) + return result def CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api): - result = [] - error_msg = ('check_includes overrides are not allowed since it can cause ' - 'incorrect dependencies to form. It effectively means that your ' - 'module can include any .h file without depending on its ' - 'corresponding target. There are some exceptional cases when ' - 'this is allowed: if so, get approval from a .gn owner in the ' - 'root OWNERS file.\n' - 'Used in: %s (line %d).') - no_presubmit_re = input_api.re.compile( - r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') - for affected_file in gn_files: - for (line_number, affected_line) in affected_file.ChangedContents(): - if ('check_includes' in affected_line - and not no_presubmit_re.search(affected_line)): - result.append( - output_api.PresubmitError(error_msg % (affected_file.LocalPath(), - line_number))) - return result + result = [] + error_msg = ( + 'check_includes overrides are not allowed since it can cause ' + 'incorrect dependencies to form. It effectively means that your ' + 'module can include any .h file without depending on its ' + 'corresponding target. There are some exceptional cases when ' + 'this is allowed: if so, get approval from a .gn owner in the ' + 'root OWNERS file.\n' + 'Used in: %s (line %d).') + no_presubmit_re = input_api.re.compile( + r'# no-presubmit-check TODO\(bugs\.webrtc\.org/\d+\)') + for affected_file in gn_files: + for (line_number, affected_line) in affected_file.ChangedContents(): + if ('check_includes' in affected_line + and not no_presubmit_re.search(affected_line)): + result.append( + output_api.PresubmitError( + error_msg % (affected_file.LocalPath(), line_number))) + return result def CheckGnChanges(input_api, output_api): - file_filter = lambda x: (input_api.FilterSourceFile( - x, white_list=(r'.+\.(gn|gni)$',), - black_list=(r'.*/presubmit_checks_lib/testdata/.*',))) - - gn_files = [] - for f in input_api.AffectedSourceFiles(file_filter): - gn_files.append(f) - - result = [] - if gn_files: - result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api)) - result.extend(CheckNoMixingSources(input_api, gn_files, output_api)) - result.extend(CheckNoPackageBoundaryViolations(input_api, gn_files, + file_filter = lambda x: (input_api.FilterSourceFile( + x, + files_to_check=(r'.+\.(gn|gni)$', ), + files_to_skip=(r'.*/presubmit_checks_lib/testdata/.*', ))) + + gn_files = [] + for f in input_api.AffectedSourceFiles(file_filter): + gn_files.append(f) + + result = [] + if gn_files: + result.extend(CheckNoSourcesAbove(input_api, gn_files, output_api)) + result.extend(CheckNoMixingSources(input_api, gn_files, output_api)) + result.extend(CheckAbseilDependencies(input_api, gn_files, output_api)) + result.extend( + CheckNoPackageBoundaryViolations(input_api, gn_files, output_api)) + result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, + output_api)) + result.extend( + CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api)) + result.extend( + CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, output_api)) - result.extend(CheckPublicDepsIsNotUsed(gn_files, input_api, output_api)) - result.extend(CheckCheckIncludesIsNotUsed(gn_files, input_api, output_api)) - result.extend(CheckNoWarningSuppressionFlagsAreAdded(gn_files, input_api, - output_api)) - return result + return result def CheckGnGen(input_api, output_api): - """Runs `gn gen --check` with default args to detect mismatches between + """Runs `gn gen --check` with default args to detect mismatches between #includes and dependencies in the BUILD.gn files, as well as general build errors. """ - with _AddToPath(input_api.os_path.join( - input_api.PresubmitLocalPath(), 'tools_webrtc', 'presubmit_checks_lib')): - from build_helpers import RunGnCheck - errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5] - if errors: - return [output_api.PresubmitPromptWarning( - 'Some #includes do not match the build dependency graph. Please run:\n' - ' gn gen --check ', - long_text='\n\n'.join(errors))] - return [] + with _AddToPath( + input_api.os_path.join(input_api.PresubmitLocalPath(), + 'tools_webrtc', 'presubmit_checks_lib')): + from build_helpers import RunGnCheck + errors = RunGnCheck(FindSrcDirPath(input_api.PresubmitLocalPath()))[:5] + if errors: + return [ + output_api.PresubmitPromptWarning( + 'Some #includes do not match the build dependency graph. ' + 'Please run:\n' + ' gn gen --check ', + long_text='\n\n'.join(errors)) + ] + return [] def CheckUnwantedDependencies(input_api, output_api, source_file_filter): - """Runs checkdeps on #include statements added in this + """Runs checkdeps on #include statements added in this change. Breaking - rules is an error, breaking ! rules is a warning. """ - # Copied from Chromium's src/PRESUBMIT.py. - - # We need to wait until we have an input_api object and use this - # roundabout construct to import checkdeps because this file is - # eval-ed and thus doesn't have __file__. - src_path = FindSrcDirPath(input_api.PresubmitLocalPath()) - checkdeps_path = input_api.os_path.join(src_path, 'buildtools', 'checkdeps') - if not os.path.exists(checkdeps_path): - return [output_api.PresubmitError( - 'Cannot find checkdeps at %s\nHave you run "gclient sync" to ' - 'download all the DEPS entries?' % checkdeps_path)] - with _AddToPath(checkdeps_path): - import checkdeps - from cpp_checker import CppChecker - from rules import Rule - - added_includes = [] - for f in input_api.AffectedFiles(file_filter=source_file_filter): - if not CppChecker.IsCppFile(f.LocalPath()): - continue - - changed_lines = [line for _, line in f.ChangedContents()] - added_includes.append([f.LocalPath(), changed_lines]) - - deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) - - error_descriptions = [] - warning_descriptions = [] - for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( - added_includes): - description_with_path = '%s\n %s' % (path, rule_description) - if rule_type == Rule.DISALLOW: - error_descriptions.append(description_with_path) - else: - warning_descriptions.append(description_with_path) - - results = [] - if error_descriptions: - results.append(output_api.PresubmitError( - 'You added one or more #includes that violate checkdeps rules.\n' - 'Check that the DEPS files in these locations contain valid rules.\n' - 'See https://cs.chromium.org/chromium/src/buildtools/checkdeps/ for ' - 'more details about checkdeps.', - error_descriptions)) - if warning_descriptions: - results.append(output_api.PresubmitPromptOrNotify( - 'You added one or more #includes of files that are temporarily\n' - 'allowed but being removed. Can you avoid introducing the\n' - '#include? See relevant DEPS file(s) for details and contacts.\n' - 'See https://cs.chromium.org/chromium/src/buildtools/checkdeps/ for ' - 'more details about checkdeps.', - warning_descriptions)) - return results + # Copied from Chromium's src/PRESUBMIT.py. + + # We need to wait until we have an input_api object and use this + # roundabout construct to import checkdeps because this file is + # eval-ed and thus doesn't have __file__. + src_path = FindSrcDirPath(input_api.PresubmitLocalPath()) + checkdeps_path = input_api.os_path.join(src_path, 'buildtools', + 'checkdeps') + if not os.path.exists(checkdeps_path): + return [ + output_api.PresubmitError( + 'Cannot find checkdeps at %s\nHave you run "gclient sync" to ' + 'download all the DEPS entries?' % checkdeps_path) + ] + with _AddToPath(checkdeps_path): + import checkdeps + from cpp_checker import CppChecker + from rules import Rule + + added_includes = [] + for f in input_api.AffectedFiles(file_filter=source_file_filter): + if not CppChecker.IsCppFile(f.LocalPath()): + continue + + changed_lines = [line for _, line in f.ChangedContents()] + added_includes.append([f.LocalPath(), changed_lines]) + + deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath()) + + error_descriptions = [] + warning_descriptions = [] + for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes( + added_includes): + description_with_path = '%s\n %s' % (path, rule_description) + if rule_type == Rule.DISALLOW: + error_descriptions.append(description_with_path) + else: + warning_descriptions.append(description_with_path) + + results = [] + if error_descriptions: + results.append( + output_api.PresubmitError( + 'You added one or more #includes that violate checkdeps rules.' + '\nCheck that the DEPS files in these locations contain valid ' + 'rules.\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', error_descriptions)) + if warning_descriptions: + results.append( + output_api.PresubmitPromptOrNotify( + 'You added one or more #includes of files that are temporarily' + '\nallowed but being removed. Can you avoid introducing the\n' + '#include? See relevant DEPS file(s) for details and contacts.' + '\nSee ' + 'https://cs.chromium.org/chromium/src/buildtools/checkdeps/ ' + 'for more details about checkdeps.', warning_descriptions)) + return results def CheckCommitMessageBugEntry(input_api, output_api): - """Check that bug entries are well-formed in commit message.""" - bogus_bug_msg = ( - 'Bogus Bug entry: %s. Please specify the issue tracker prefix and the ' - 'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.') - results = [] - for bug in input_api.change.BugsFromDescription(): - bug = bug.strip() - if bug.lower() == 'none': - continue - if 'b/' not in bug and ':' not in bug: - try: - if int(bug) > 100000: - # Rough indicator for current chromium bugs. - prefix_guess = 'chromium' - else: - prefix_guess = 'webrtc' - results.append('Bug entry requires issue tracker prefix, e.g. %s:%s' % - (prefix_guess, bug)) - except ValueError: - results.append(bogus_bug_msg % bug) - elif not (re.match(r'\w+:\d+', bug) or re.match(r'b/\d+', bug)): - results.append(bogus_bug_msg % bug) - return [output_api.PresubmitError(r) for r in results] + """Check that bug entries are well-formed in commit message.""" + bogus_bug_msg = ( + 'Bogus Bug entry: %s. Please specify the issue tracker prefix and the ' + 'issue number, separated by a colon, e.g. webrtc:123 or chromium:12345.' + ) + results = [] + for bug in input_api.change.BugsFromDescription(): + bug = bug.strip() + if bug.lower() == 'none': + continue + if 'b/' not in bug and ':' not in bug: + try: + if int(bug) > 100000: + # Rough indicator for current chromium bugs. + prefix_guess = 'chromium' + else: + prefix_guess = 'webrtc' + results.append( + 'Bug entry requires issue tracker prefix, e.g. %s:%s' % + (prefix_guess, bug)) + except ValueError: + results.append(bogus_bug_msg % bug) + elif not (re.match(r'\w+:\d+', bug) or re.match(r'b/\d+', bug)): + results.append(bogus_bug_msg % bug) + return [output_api.PresubmitError(r) for r in results] def CheckChangeHasBugField(input_api, output_api): - """Requires that the changelist is associated with a bug. + """Requires that the changelist is associated with a bug. This check is stricter than the one in depot_tools/presubmit_canned_checks.py since it fails the presubmit if the bug field is missing or doesn't contain @@ -699,212 +786,271 @@ def CheckChangeHasBugField(input_api, output_api): This supports both 'BUG=' and 'Bug:' since we are in the process of migrating to Gerrit and it encourages the usage of 'Bug:'. """ - if input_api.change.BugsFromDescription(): - return [] - else: - return [output_api.PresubmitError( - 'The "Bug: [bug number]" footer is mandatory. Please create a bug and ' - 'reference it using either of:\n' - ' * https://bugs.webrtc.org - reference it using Bug: webrtc:XXXX\n' - ' * https://crbug.com - reference it using Bug: chromium:XXXXXX')] + if input_api.change.BugsFromDescription(): + return [] + else: + return [ + output_api.PresubmitError( + 'The "Bug: [bug number]" footer is mandatory. Please create a ' + 'bug and reference it using either of:\n' + ' * https://bugs.webrtc.org - reference it using Bug: ' + 'webrtc:XXXX\n' + ' * https://crbug.com - reference it using Bug: chromium:XXXXXX' + ) + ] def CheckJSONParseErrors(input_api, output_api, source_file_filter): - """Check that JSON files do not contain syntax errors.""" - - def FilterFile(affected_file): - return (input_api.os_path.splitext(affected_file.LocalPath())[1] == '.json' - and source_file_filter(affected_file)) - - def GetJSONParseError(input_api, filename): - try: - contents = input_api.ReadFile(filename) - input_api.json.loads(contents) - except ValueError as e: - return e - return None - - results = [] - for affected_file in input_api.AffectedFiles( - file_filter=FilterFile, include_deletes=False): - parse_error = GetJSONParseError(input_api, - affected_file.AbsoluteLocalPath()) - if parse_error: - results.append(output_api.PresubmitError('%s could not be parsed: %s' % - (affected_file.LocalPath(), - parse_error))) - return results + """Check that JSON files do not contain syntax errors.""" + + def FilterFile(affected_file): + return (input_api.os_path.splitext( + affected_file.LocalPath())[1] == '.json' + and source_file_filter(affected_file)) + + def GetJSONParseError(input_api, filename): + try: + contents = input_api.ReadFile(filename) + input_api.json.loads(contents) + except ValueError as e: + return e + return None + + results = [] + for affected_file in input_api.AffectedFiles(file_filter=FilterFile, + include_deletes=False): + parse_error = GetJSONParseError(input_api, + affected_file.AbsoluteLocalPath()) + if parse_error: + results.append( + output_api.PresubmitError( + '%s could not be parsed: %s' % + (affected_file.LocalPath(), parse_error))) + return results def RunPythonTests(input_api, output_api): - def Join(*args): - return input_api.os_path.join(input_api.PresubmitLocalPath(), *args) - - test_directories = [ - input_api.PresubmitLocalPath(), - Join('rtc_tools', 'py_event_log_analyzer'), - Join('audio', 'test', 'unittests'), - ] + [ - root for root, _, files in os.walk(Join('tools_webrtc')) - if any(f.endswith('_test.py') for f in files) - ] - - tests = [] - for directory in test_directories: - tests.extend( - input_api.canned_checks.GetUnitTestsInDirectory( - input_api, - output_api, - directory, - whitelist=[r'.+_test\.py$'])) - return input_api.RunTests(tests, parallel=True) + def Join(*args): + return input_api.os_path.join(input_api.PresubmitLocalPath(), *args) + + test_directories = [ + input_api.PresubmitLocalPath(), + Join('rtc_tools', 'py_event_log_analyzer'), + Join('audio', 'test', 'unittests'), + ] + [ + root for root, _, files in os.walk(Join('tools_webrtc')) if any( + f.endswith('_test.py') for f in files) + ] + + tests = [] + for directory in test_directories: + tests.extend( + input_api.canned_checks.GetUnitTestsInDirectory( + input_api, + output_api, + directory, + files_to_check=[r'.+_test\.py$'])) + return input_api.RunTests(tests, parallel=True) def CheckUsageOfGoogleProtobufNamespace(input_api, output_api, source_file_filter): - """Checks that the namespace google::protobuf has not been used.""" - files = [] - pattern = input_api.re.compile(r'google::protobuf') - proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h') - file_filter = lambda x: (input_api.FilterSourceFile(x) - and source_file_filter(x)) - for f in input_api.AffectedSourceFiles(file_filter): - if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']: - continue - contents = input_api.ReadFile(f) - if pattern.search(contents): - files.append(f) - - if files: - return [output_api.PresubmitError( - 'Please avoid to use namespace `google::protobuf` directly.\n' - 'Add a using directive in `%s` and include that header instead.' - % proto_utils_path, files)] - return [] + """Checks that the namespace google::protobuf has not been used.""" + files = [] + pattern = input_api.re.compile(r'google::protobuf') + proto_utils_path = os.path.join('rtc_base', 'protobuf_utils.h') + file_filter = lambda x: (input_api.FilterSourceFile(x) and + source_file_filter(x)) + for f in input_api.AffectedSourceFiles(file_filter): + if f.LocalPath() in [proto_utils_path, 'PRESUBMIT.py']: + continue + contents = input_api.ReadFile(f) + if pattern.search(contents): + files.append(f) + + if files: + return [ + output_api.PresubmitError( + 'Please avoid to use namespace `google::protobuf` directly.\n' + 'Add a using directive in `%s` and include that header instead.' + % proto_utils_path, files) + ] + return [] def _LicenseHeader(input_api): - """Returns the license header regexp.""" - # Accept any year number from 2003 to the current year - current_year = int(input_api.time.strftime('%Y')) - allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) - years_re = '(' + '|'.join(allowed_years) + ')' - license_header = ( - r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' r'All [Rr]ights [Rr]eserved\.\n' - r'.*?\n' - r'.*? Use of this source code is governed by a BSD-style license\n' - r'.*? that can be found in the LICENSE file in the root of the source\n' - r'.*? tree\. An additional intellectual property rights grant can be ' + r'.*?\n' + r'.*? Use of this source code is governed by a BSD-style license\n' + r'.*? that can be found in the LICENSE file in the root of the source\n' + r'.*? tree\. An additional intellectual property rights grant can be ' r'found\n' - r'.*? in the file PATENTS\. All contributing project authors may\n' - r'.*? be found in the AUTHORS file in the root of the source tree\.\n' - ) % { - 'year': years_re, - } - return license_header + r'.*? in the file PATENTS\. All contributing project authors may\n' + r'.*? be found in the AUTHORS file in the root of the source tree\.\n' + ) % { + 'year': years_re, + } + return license_header def CommonChecks(input_api, output_api): - """Checks common to both upload and commit.""" - results = [] - # Filter out files that are in objc or ios dirs from being cpplint-ed since - # they do not follow C++ lint rules. - black_list = input_api.DEFAULT_BLACK_LIST + ( - r".*\bobjc[\\\/].*", - r".*objc\.[hcm]+$", - ) - source_file_filter = lambda x: input_api.FilterSourceFile(x, None, black_list) - results.extend(CheckApprovedFilesLintClean( - input_api, output_api, source_file_filter)) - results.extend(input_api.canned_checks.CheckLicense( - input_api, output_api, _LicenseHeader(input_api))) - results.extend(input_api.canned_checks.RunPylint(input_api, output_api, - black_list=(r'^base[\\\/].*\.py$', - r'^build[\\\/].*\.py$', - r'^buildtools[\\\/].*\.py$', - r'^infra[\\\/].*\.py$', - r'^ios[\\\/].*\.py$', - r'^out.*[\\\/].*\.py$', - r'^testing[\\\/].*\.py$', - r'^third_party[\\\/].*\.py$', - r'^tools[\\\/].*\.py$', - # TODO(phoglund): should arguably be checked. - r'^tools_webrtc[\\\/]mb[\\\/].*\.py$', - r'^xcodebuild.*[\\\/].*\.py$',), - pylintrc='pylintrc')) - - # TODO(nisse): talk/ is no more, so make below checks simpler? - # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function since - # we need to have different license checks in talk/ and webrtc/ directories. - # Instead, hand-picked checks are included below. - - # .m and .mm files are ObjC files. For simplicity we will consider .h files in - # ObjC subdirectories ObjC headers. - objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') - # Skip long-lines check for DEPS and GN files. - build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') - # Also we will skip most checks for third_party directory. - third_party_filter_list = (r'^third_party[\\\/].+',) - eighty_char_sources = lambda x: input_api.FilterSourceFile(x, - black_list=build_file_filter_list + objc_filter_list + - third_party_filter_list) - hundred_char_sources = lambda x: input_api.FilterSourceFile(x, - white_list=objc_filter_list) - non_third_party_sources = lambda x: input_api.FilterSourceFile(x, - black_list=third_party_filter_list) - - results.extend(input_api.canned_checks.CheckLongLines( - input_api, output_api, maxlen=80, source_file_filter=eighty_char_sources)) - results.extend(input_api.canned_checks.CheckLongLines( - input_api, output_api, maxlen=100, - source_file_filter=hundred_char_sources)) - results.extend(input_api.canned_checks.CheckChangeHasNoTabs( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(input_api.canned_checks.CheckAuthorizedAuthor( - input_api, output_api, bot_whitelist=[ - 'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com' - ])) - results.extend(input_api.canned_checks.CheckChangeTodoHasOwner( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(input_api.canned_checks.CheckPatchFormatted( - input_api, output_api)) - results.extend(CheckNativeApiHeaderChanges(input_api, output_api)) - results.extend(CheckNoIOStreamInHeaders( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckNoPragmaOnce( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckNoFRIEND_TEST( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckGnChanges(input_api, output_api)) - results.extend(CheckUnwantedDependencies( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckJSONParseErrors( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(RunPythonTests(input_api, output_api)) - results.extend(CheckUsageOfGoogleProtobufNamespace( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckOrphanHeaders( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckNewlineAtTheEndOfProtoFiles( - input_api, output_api, source_file_filter=non_third_party_sources)) - results.extend(CheckNoStreamUsageIsAdded( - input_api, output_api, non_third_party_sources)) - results.extend(CheckNoTestCaseUsageIsAdded( - input_api, output_api, non_third_party_sources)) - results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api)) - results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api)) - results.extend(CheckAbslMemoryInclude( - input_api, output_api, non_third_party_sources)) - results.extend(CheckBannedAbslMakeUnique( - input_api, output_api, non_third_party_sources)) - return results + """Checks common to both upload and commit.""" + results = [] + # Filter out files that are in objc or ios dirs from being cpplint-ed since + # they do not follow C++ lint rules. + exception_list = input_api.DEFAULT_FILES_TO_SKIP + ( + r".*\bobjc[\\\/].*", + r".*objc\.[hcm]+$", + ) + source_file_filter = lambda x: input_api.FilterSourceFile( + x, None, exception_list) + results.extend( + CheckApprovedFilesLintClean(input_api, output_api, source_file_filter)) + results.extend( + input_api.canned_checks.CheckLicense(input_api, output_api, + _LicenseHeader(input_api))) + + # TODO(bugs.webrtc.org/12114): Delete this filter and run pylint on + # all python files. This is a temporary solution. + python_file_filter = lambda f: (f.LocalPath().endswith('.py') and + source_file_filter(f)) + python_changed_files = [f.LocalPath() for f in input_api.AffectedFiles( + file_filter=python_file_filter)] + + results.extend( + input_api.canned_checks.RunPylint( + input_api, + output_api, + files_to_check=python_changed_files, + files_to_skip=( + r'^base[\\\/].*\.py$', + r'^build[\\\/].*\.py$', + r'^buildtools[\\\/].*\.py$', + r'^infra[\\\/].*\.py$', + r'^ios[\\\/].*\.py$', + r'^out.*[\\\/].*\.py$', + r'^testing[\\\/].*\.py$', + r'^third_party[\\\/].*\.py$', + r'^tools[\\\/].*\.py$', + # TODO(phoglund): should arguably be checked. + r'^tools_webrtc[\\\/]mb[\\\/].*\.py$', + r'^xcodebuild.*[\\\/].*\.py$', + ), + pylintrc='pylintrc')) + + # TODO(nisse): talk/ is no more, so make below checks simpler? + # WebRTC can't use the presubmit_canned_checks.PanProjectChecks function + # since we need to have different license checks + # in talk/ and webrtc/directories. + # Instead, hand-picked checks are included below. + + # .m and .mm files are ObjC files. For simplicity we will consider + # .h files in ObjC subdirectories ObjC headers. + objc_filter_list = (r'.+\.m$', r'.+\.mm$', r'.+objc\/.+\.h$') + # Skip long-lines check for DEPS and GN files. + build_file_filter_list = (r'.+\.gn$', r'.+\.gni$', 'DEPS') + # Also we will skip most checks for third_party directory. + third_party_filter_list = (r'^third_party[\\\/].+', ) + eighty_char_sources = lambda x: input_api.FilterSourceFile( + x, + files_to_skip=build_file_filter_list + objc_filter_list + + third_party_filter_list) + hundred_char_sources = lambda x: input_api.FilterSourceFile( + x, files_to_check=objc_filter_list) + non_third_party_sources = lambda x: input_api.FilterSourceFile( + x, files_to_skip=third_party_filter_list) + + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=80, + source_file_filter=eighty_char_sources)) + results.extend( + input_api.canned_checks.CheckLongLines( + input_api, + output_api, + maxlen=100, + source_file_filter=hundred_char_sources)) + results.extend( + input_api.canned_checks.CheckChangeHasNoTabs( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckChangeHasNoStrayWhitespace( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckAuthorizedAuthor( + input_api, + output_api, + bot_allowlist=[ + 'chromium-webrtc-autoroll@webrtc-ci.iam.gserviceaccount.com', + 'webrtc-version-updater@webrtc-ci.iam.gserviceaccount.com', + ])) + results.extend( + input_api.canned_checks.CheckChangeTodoHasOwner( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + input_api.canned_checks.CheckPatchFormatted(input_api, output_api)) + results.extend(CheckNativeApiHeaderChanges(input_api, output_api)) + results.extend( + CheckNoIOStreamInHeaders(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNoPragmaOnce(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNoFRIEND_TEST(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend(CheckGnChanges(input_api, output_api)) + results.extend( + CheckUnwantedDependencies(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckJSONParseErrors(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend(RunPythonTests(input_api, output_api)) + results.extend( + CheckUsageOfGoogleProtobufNamespace( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + CheckOrphanHeaders(input_api, + output_api, + source_file_filter=non_third_party_sources)) + results.extend( + CheckNewlineAtTheEndOfProtoFiles( + input_api, output_api, source_file_filter=non_third_party_sources)) + results.extend( + CheckNoStreamUsageIsAdded(input_api, output_api, + non_third_party_sources)) + results.extend( + CheckNoTestCaseUsageIsAdded(input_api, output_api, + non_third_party_sources)) + results.extend(CheckAddedDepsHaveTargetApprovals(input_api, output_api)) + results.extend(CheckApiDepsFileIsUpToDate(input_api, output_api)) + results.extend( + CheckAbslMemoryInclude(input_api, output_api, non_third_party_sources)) + results.extend( + CheckBannedAbslMakeUnique(input_api, output_api, + non_third_party_sources)) + results.extend( + CheckObjcApiSymbols(input_api, output_api, non_third_party_sources)) + return results def CheckApiDepsFileIsUpToDate(input_api, output_api): - """Check that 'include_rules' in api/DEPS is up to date. + """Check that 'include_rules' in api/DEPS is up to date. The file api/DEPS must be kept up to date in order to avoid to avoid to include internal header from WebRTC's api/ headers. @@ -913,307 +1059,366 @@ def CheckApiDepsFileIsUpToDate(input_api, output_api): rule for each root level directory. More focused allow rules can be added to 'specific_include_rules'. """ - results = [] - api_deps = os.path.join(input_api.PresubmitLocalPath(), 'api', 'DEPS') - with open(api_deps) as f: - deps_content = _ParseDeps(f.read()) - - include_rules = deps_content.get('include_rules', []) - dirs_to_skip = set(['api', 'docs']) - - # Only check top level directories affected by the current CL. - dirs_to_check = set() - for f in input_api.AffectedFiles(): - path_tokens = [t for t in f.LocalPath().split(os.sep) if t] - if len(path_tokens) > 1: - if (path_tokens[0] not in dirs_to_skip and - os.path.isdir(os.path.join(input_api.PresubmitLocalPath(), - path_tokens[0]))): - dirs_to_check.add(path_tokens[0]) - - missing_include_rules = set() - for p in dirs_to_check: - rule = '-%s' % p - if rule not in include_rules: - missing_include_rules.add(rule) - - if missing_include_rules: - error_msg = [ - 'include_rules = [\n', - ' ...\n', - ] + results = [] + api_deps = os.path.join(input_api.PresubmitLocalPath(), 'api', 'DEPS') + with open(api_deps) as f: + deps_content = _ParseDeps(f.read()) + + include_rules = deps_content.get('include_rules', []) + dirs_to_skip = set(['api', 'docs']) + + # Only check top level directories affected by the current CL. + dirs_to_check = set() + for f in input_api.AffectedFiles(): + path_tokens = [t for t in f.LocalPath().split(os.sep) if t] + if len(path_tokens) > 1: + if (path_tokens[0] not in dirs_to_skip and os.path.isdir( + os.path.join(input_api.PresubmitLocalPath(), + path_tokens[0]))): + dirs_to_check.add(path_tokens[0]) + + missing_include_rules = set() + for p in dirs_to_check: + rule = '-%s' % p + if rule not in include_rules: + missing_include_rules.add(rule) + + if missing_include_rules: + error_msg = [ + 'include_rules = [\n', + ' ...\n', + ] + + for r in sorted(missing_include_rules): + error_msg.append(' "%s",\n' % str(r)) + + error_msg.append(' ...\n') + error_msg.append(']\n') + + results.append( + output_api.PresubmitError( + 'New root level directory detected! WebRTC api/ headers should ' + 'not #include headers from \n' + 'the new directory, so please update "include_rules" in file\n' + '"%s". Example:\n%s\n' % (api_deps, ''.join(error_msg)))) + + return results - for r in sorted(missing_include_rules): - error_msg.append(' "%s",\n' % str(r)) - error_msg.append(' ...\n') - error_msg.append(']\n') +def CheckBannedAbslMakeUnique(input_api, output_api, source_file_filter): + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + for _, line in f.ChangedContents(): + if 'absl::make_unique' in line: + files.append(f) + break + + if len(files): + return [ + output_api.PresubmitError( + 'Please use std::make_unique instead of absl::make_unique.\n' + 'Affected files:', files) + ] + return [] - results.append(output_api.PresubmitError( - 'New root level directory detected! WebRTC api/ headers should ' - 'not #include headers from \n' - 'the new directory, so please update "include_rules" in file\n' - '"%s". Example:\n%s\n' % (api_deps, ''.join(error_msg)))) - return results +def CheckObjcApiSymbols(input_api, output_api, source_file_filter): + rtc_objc_export = re.compile(r'RTC_OBJC_EXPORT(.|\n){26}', + re.MULTILINE | re.DOTALL) + file_filter = lambda f: (f.LocalPath().endswith(('.h')) and + source_file_filter(f)) + + files = [] + file_filter = lambda x: (input_api.FilterSourceFile(x) and + source_file_filter(x)) + for f in input_api.AffectedSourceFiles(file_filter): + if not f.LocalPath().endswith('.h') or not 'sdk/objc' in f.LocalPath(): + continue + if f.LocalPath().endswith('sdk/objc/base/RTCMacros.h'): + continue + contents = input_api.ReadFile(f) + for match in rtc_objc_export.finditer(contents): + export_block = match.group(0) + if 'RTC_OBJC_TYPE' not in export_block: + files.append(f.LocalPath()) + + if len(files): + return [ + output_api.PresubmitError( + 'RTC_OBJC_EXPORT types must be wrapped into an RTC_OBJC_TYPE() ' + + 'macro.\n\n' + 'For example:\n' + + 'RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE(RtcFoo)\n\n' + + 'RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE(RtcFoo)\n\n' + + 'Please fix the following files:', files) + ] + return [] -def CheckBannedAbslMakeUnique(input_api, output_api, source_file_filter): - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) - and source_file_filter(f)) - - files = [] - for f in input_api.AffectedFiles( - include_deletes=False, file_filter=file_filter): - for _, line in f.ChangedContents(): - if 'absl::make_unique' in line: - files.append(f) - break - - if len(files): - return [output_api.PresubmitError( - 'Please use std::make_unique instead of absl::make_unique.\n' - 'Affected files:', - files)] - return [] def CheckAbslMemoryInclude(input_api, output_api, source_file_filter): - pattern = input_api.re.compile( - r'^#include\s*"absl/memory/memory.h"', input_api.re.MULTILINE) - file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) - and source_file_filter(f)) - - files = [] - for f in input_api.AffectedFiles( - include_deletes=False, file_filter=file_filter): - contents = input_api.ReadFile(f) - if pattern.search(contents): - continue - for _, line in f.ChangedContents(): - if 'absl::WrapUnique' in line: - files.append(f) - break - - if len(files): - return [output_api.PresubmitError( - 'Please include "absl/memory/memory.h" header for absl::WrapUnique.\n' - 'This header may or may not be included transitively depending on the ' - 'C++ standard version.', - files)] - return [] + pattern = input_api.re.compile(r'^#include\s*"absl/memory/memory.h"', + input_api.re.MULTILINE) + file_filter = lambda f: (f.LocalPath().endswith(('.cc', '.h')) and + source_file_filter(f)) + + files = [] + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + contents = input_api.ReadFile(f) + if pattern.search(contents): + continue + for _, line in f.ChangedContents(): + if 'absl::WrapUnique' in line: + files.append(f) + break + + if len(files): + return [ + output_api.PresubmitError( + 'Please include "absl/memory/memory.h" header for ' + 'absl::WrapUnique.\nThis header may or may not be included ' + 'transitively depending on the C++ standard version.', files) + ] + return [] + def CheckChangeOnUpload(input_api, output_api): - results = [] - results.extend(CommonChecks(input_api, output_api)) - results.extend(CheckGnGen(input_api, output_api)) - results.extend( - input_api.canned_checks.CheckGNFormatted(input_api, output_api)) - return results + results = [] + results.extend(CommonChecks(input_api, output_api)) + results.extend(CheckGnGen(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckGNFormatted(input_api, output_api)) + return results def CheckChangeOnCommit(input_api, output_api): - results = [] - results.extend(CommonChecks(input_api, output_api)) - results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api)) - results.extend(input_api.canned_checks.CheckOwners(input_api, output_api)) - results.extend(input_api.canned_checks.CheckChangeWasUploaded( - input_api, output_api)) - results.extend(input_api.canned_checks.CheckChangeHasDescription( - input_api, output_api)) - results.extend(CheckChangeHasBugField(input_api, output_api)) - results.extend(CheckCommitMessageBugEntry(input_api, output_api)) - results.extend(input_api.canned_checks.CheckTreeIsOpen( - input_api, output_api, - json_url='http://webrtc-status.appspot.com/current?format=json')) - return results + results = [] + results.extend(CommonChecks(input_api, output_api)) + results.extend(VerifyNativeApiHeadersListIsValid(input_api, output_api)) + results.extend(input_api.canned_checks.CheckOwners(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckChangeWasUploaded(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckChangeHasDescription( + input_api, output_api)) + results.extend(CheckChangeHasBugField(input_api, output_api)) + results.extend(CheckCommitMessageBugEntry(input_api, output_api)) + results.extend( + input_api.canned_checks.CheckTreeIsOpen( + input_api, + output_api, + json_url='http://webrtc-status.appspot.com/current?format=json')) + return results def CheckOrphanHeaders(input_api, output_api, source_file_filter): - # We need to wait until we have an input_api object and use this - # roundabout construct to import prebubmit_checks_lib because this file is - # eval-ed and thus doesn't have __file__. - error_msg = """{} should be listed in {}.""" - results = [] - orphan_blacklist = [ - os.path.join('tools_webrtc', 'ios', 'SDK'), - ] - with _AddToPath(input_api.os_path.join( - input_api.PresubmitLocalPath(), 'tools_webrtc', 'presubmit_checks_lib')): - from check_orphan_headers import GetBuildGnPathFromFilePath - from check_orphan_headers import IsHeaderInBuildGn - - file_filter = lambda x: input_api.FilterSourceFile( - x, black_list=orphan_blacklist) and source_file_filter(x) - for f in input_api.AffectedSourceFiles(file_filter): - if f.LocalPath().endswith('.h'): - file_path = os.path.abspath(f.LocalPath()) - root_dir = os.getcwd() - gn_file_path = GetBuildGnPathFromFilePath(file_path, os.path.exists, - root_dir) - in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path) - if not in_build_gn: - results.append(output_api.PresubmitError(error_msg.format( - f.LocalPath(), os.path.relpath(gn_file_path)))) - return results - - -def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, source_file_filter): - """Checks that all .proto files are terminated with a newline.""" - error_msg = 'File {} must end with exactly one newline.' - results = [] - file_filter = lambda x: input_api.FilterSourceFile( - x, white_list=(r'.+\.proto$',)) and source_file_filter(x) - for f in input_api.AffectedSourceFiles(file_filter): - file_path = f.LocalPath() - with open(file_path) as f: - lines = f.readlines() - if len(lines) > 0 and not lines[-1].endswith('\n'): - results.append(output_api.PresubmitError(error_msg.format(file_path))) - return results + # We need to wait until we have an input_api object and use this + # roundabout construct to import prebubmit_checks_lib because this file is + # eval-ed and thus doesn't have __file__. + error_msg = """{} should be listed in {}.""" + results = [] + exempt_paths = [ + os.path.join('tools_webrtc', 'ios', 'SDK'), + ] + with _AddToPath( + input_api.os_path.join(input_api.PresubmitLocalPath(), + 'tools_webrtc', 'presubmit_checks_lib')): + from check_orphan_headers import GetBuildGnPathFromFilePath + from check_orphan_headers import IsHeaderInBuildGn + + file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_skip=exempt_paths) and source_file_filter(x) + for f in input_api.AffectedSourceFiles(file_filter): + if f.LocalPath().endswith('.h'): + file_path = os.path.abspath(f.LocalPath()) + root_dir = os.getcwd() + gn_file_path = GetBuildGnPathFromFilePath(file_path, + os.path.exists, root_dir) + in_build_gn = IsHeaderInBuildGn(file_path, gn_file_path) + if not in_build_gn: + results.append( + output_api.PresubmitError( + error_msg.format(f.LocalPath(), + os.path.relpath(gn_file_path)))) + return results + + +def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, + source_file_filter): + """Checks that all .proto files are terminated with a newline.""" + error_msg = 'File {} must end with exactly one newline.' + results = [] + file_filter = lambda x: input_api.FilterSourceFile( + x, files_to_check=(r'.+\.proto$', )) and source_file_filter(x) + for f in input_api.AffectedSourceFiles(file_filter): + file_path = f.LocalPath() + with open(file_path) as f: + lines = f.readlines() + if len(lines) > 0 and not lines[-1].endswith('\n'): + results.append( + output_api.PresubmitError(error_msg.format(file_path))) + return results def _ExtractAddRulesFromParsedDeps(parsed_deps): - """Extract the rules that add dependencies from a parsed DEPS file. + """Extract the rules that add dependencies from a parsed DEPS file. Args: parsed_deps: the locals dictionary from evaluating the DEPS file.""" - add_rules = set() - add_rules.update([ - rule[1:] for rule in parsed_deps.get('include_rules', []) - if rule.startswith('+') or rule.startswith('!') - ]) - for _, rules in parsed_deps.get('specific_include_rules', - {}).iteritems(): + add_rules = set() add_rules.update([ - rule[1:] for rule in rules + rule[1:] for rule in parsed_deps.get('include_rules', []) if rule.startswith('+') or rule.startswith('!') ]) - return add_rules + for _, rules in parsed_deps.get('specific_include_rules', {}).iteritems(): + add_rules.update([ + rule[1:] for rule in rules + if rule.startswith('+') or rule.startswith('!') + ]) + return add_rules def _ParseDeps(contents): - """Simple helper for parsing DEPS files.""" - # Stubs for handling special syntax in the root DEPS file. - class VarImpl(object): + """Simple helper for parsing DEPS files.""" - def __init__(self, local_scope): - self._local_scope = local_scope + # Stubs for handling special syntax in the root DEPS file. + class VarImpl(object): + def __init__(self, local_scope): + self._local_scope = local_scope - def Lookup(self, var_name): - """Implements the Var syntax.""" - try: - return self._local_scope['vars'][var_name] - except KeyError: - raise Exception('Var is not defined: %s' % var_name) + def Lookup(self, var_name): + """Implements the Var syntax.""" + try: + return self._local_scope['vars'][var_name] + except KeyError: + raise Exception('Var is not defined: %s' % var_name) - local_scope = {} - global_scope = { - 'Var': VarImpl(local_scope).Lookup, - } - exec contents in global_scope, local_scope - return local_scope + local_scope = {} + global_scope = { + 'Var': VarImpl(local_scope).Lookup, + } + exec contents in global_scope, local_scope + return local_scope def _CalculateAddedDeps(os_path, old_contents, new_contents): - """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns + """Helper method for _CheckAddedDepsHaveTargetApprovals. Returns a set of DEPS entries that we should look up. For a directory (rather than a specific filename) we fake a path to a specific filename by adding /DEPS. This is chosen as a file that will seldom or never be subject to per-file include_rules. """ - # We ignore deps entries on auto-generated directories. - auto_generated_dirs = ['grit', 'jni'] + # We ignore deps entries on auto-generated directories. + auto_generated_dirs = ['grit', 'jni'] - old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) - new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) + old_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(old_contents)) + new_deps = _ExtractAddRulesFromParsedDeps(_ParseDeps(new_contents)) - added_deps = new_deps.difference(old_deps) + added_deps = new_deps.difference(old_deps) - results = set() - for added_dep in added_deps: - if added_dep.split('/')[0] in auto_generated_dirs: - continue - # Assume that a rule that ends in .h is a rule for a specific file. - if added_dep.endswith('.h'): - results.add(added_dep) - else: - results.add(os_path.join(added_dep, 'DEPS')) - return results + results = set() + for added_dep in added_deps: + if added_dep.split('/')[0] in auto_generated_dirs: + continue + # Assume that a rule that ends in .h is a rule for a specific file. + if added_dep.endswith('.h'): + results.add(added_dep) + else: + results.add(os_path.join(added_dep, 'DEPS')) + return results def CheckAddedDepsHaveTargetApprovals(input_api, output_api): - """When a dependency prefixed with + is added to a DEPS file, we - want to make sure that the change is reviewed by an OWNER of the - target file or directory, to avoid layering violations from being - introduced. This check verifies that this happens. - """ - virtual_depended_on_files = set() - - file_filter = lambda f: not input_api.re.match( - r"^third_party[\\\/](WebKit|blink)[\\\/].*", f.LocalPath()) - for f in input_api.AffectedFiles(include_deletes=False, - file_filter=file_filter): - filename = input_api.os_path.basename(f.LocalPath()) - if filename == 'DEPS': - virtual_depended_on_files.update(_CalculateAddedDeps( - input_api.os_path, - '\n'.join(f.OldContents()), - '\n'.join(f.NewContents()))) - - if not virtual_depended_on_files: - return [] + """When a dependency prefixed with + is added to a DEPS file, we + want to make sure that the change is reviewed by an OWNER of the + target file or directory, to avoid layering violations from being + introduced. This check verifies that this happens. + """ + virtual_depended_on_files = set() + + file_filter = lambda f: not input_api.re.match( + r"^third_party[\\\/](WebKit|blink)[\\\/].*", f.LocalPath()) + for f in input_api.AffectedFiles(include_deletes=False, + file_filter=file_filter): + filename = input_api.os_path.basename(f.LocalPath()) + if filename == 'DEPS': + virtual_depended_on_files.update( + _CalculateAddedDeps(input_api.os_path, + '\n'.join(f.OldContents()), + '\n'.join(f.NewContents()))) + + if not virtual_depended_on_files: + return [] - if input_api.is_committing: - if input_api.tbr: - return [output_api.PresubmitNotifyResult( - '--tbr was specified, skipping OWNERS check for DEPS additions')] - if input_api.dry_run: - return [output_api.PresubmitNotifyResult( - 'This is a dry run, skipping OWNERS check for DEPS additions')] - if not input_api.change.issue: - return [output_api.PresubmitError( - "DEPS approval by OWNERS check failed: this change has " - "no change number, so we can't check it for approvals.")] - output = output_api.PresubmitError - else: - output = output_api.PresubmitNotifyResult - - owners_db = input_api.owners_db - owner_email, reviewers = ( - input_api.canned_checks.GetCodereviewOwnerAndReviewers( - input_api, - owners_db.email_regexp, - approval_needed=input_api.is_committing)) - - owner_email = owner_email or input_api.change.author_email - - reviewers_plus_owner = set(reviewers) - if owner_email: - reviewers_plus_owner.add(owner_email) - missing_files = owners_db.files_not_covered_by(virtual_depended_on_files, - reviewers_plus_owner) - - # We strip the /DEPS part that was added by - # _FilesToCheckForIncomingDeps to fake a path to a file in a - # directory. - def StripDeps(path): - start_deps = path.rfind('/DEPS') - if start_deps != -1: - return path[:start_deps] + if input_api.is_committing: + if input_api.tbr: + return [ + output_api.PresubmitNotifyResult( + '--tbr was specified, skipping OWNERS check for DEPS ' + 'additions' + ) + ] + if input_api.dry_run: + return [ + output_api.PresubmitNotifyResult( + 'This is a dry run, skipping OWNERS check for DEPS ' + 'additions' + ) + ] + if not input_api.change.issue: + return [ + output_api.PresubmitError( + "DEPS approval by OWNERS check failed: this change has " + "no change number, so we can't check it for approvals.") + ] + output = output_api.PresubmitError else: - return path - unapproved_dependencies = ["'+%s'," % StripDeps(path) - for path in missing_files] - - if unapproved_dependencies: - output_list = [ - output('You need LGTM from owners of depends-on paths in DEPS that were ' - 'modified in this CL:\n %s' % - '\n '.join(sorted(unapproved_dependencies)))] - suggested_owners = owners_db.reviewers_for(missing_files, owner_email) - output_list.append(output( - 'Suggested missing target path OWNERS:\n %s' % - '\n '.join(suggested_owners or []))) - return output_list - - return [] + output = output_api.PresubmitNotifyResult + + owner_email, reviewers = ( + input_api.canned_checks.GetCodereviewOwnerAndReviewers( + input_api, + None, + approval_needed=input_api.is_committing)) + + owner_email = owner_email or input_api.change.author_email + + approval_status = input_api.owners_client.GetFilesApprovalStatus( + virtual_depended_on_files, reviewers.union([owner_email]), []) + missing_files = [ + f for f in virtual_depended_on_files + if approval_status[f] != input_api.owners_client.APPROVED] + + # We strip the /DEPS part that was added by + # _FilesToCheckForIncomingDeps to fake a path to a file in a + # directory. + def StripDeps(path): + start_deps = path.rfind('/DEPS') + if start_deps != -1: + return path[:start_deps] + else: + return path + + unapproved_dependencies = [ + "'+%s'," % StripDeps(path) for path in missing_files + ] + + if unapproved_dependencies: + output_list = [ + output( + 'You need LGTM from owners of depends-on paths in DEPS that ' + ' were modified in this CL:\n %s' % + '\n '.join(sorted(unapproved_dependencies))) + ] + suggested_owners = input_api.owners_client.SuggestOwners( + missing_files, exclude=[owner_email]) + output_list.append( + output('Suggested missing target path OWNERS:\n %s' % + '\n '.join(suggested_owners or []))) + return output_list + + return [] diff --git a/README.chromium b/README.chromium index 246c13dc09..58c8da8403 100644 --- a/README.chromium +++ b/README.chromium @@ -1,13 +1,14 @@ -Name: WebRTC -URL: http://www.webrtc.org -Version: 90 -License: BSD -License File: LICENSE - -Description: -WebRTC provides real time voice and video processing -functionality to enable the implementation of -PeerConnection/MediaStream. - -Third party code used in this project is described -in the file LICENSE_THIRD_PARTY. +Name: WebRTC +URL: http://www.webrtc.org +Version: 90 +CPEPrefix: cpe:/a:webrtc_project:webrtc:90 +License: BSD +License File: LICENSE + +Description: +WebRTC provides real time voice and video processing +functionality to enable the implementation of +PeerConnection/MediaStream. + +Third party code used in this project is described +in the file LICENSE_THIRD_PARTY. diff --git a/README.md b/README.md index 23f82d3f4b..1ae9ea9cb7 100644 --- a/README.md +++ b/README.md @@ -31,8 +31,9 @@ native API header files. * Master source code repo: https://webrtc.googlesource.com/src * Samples and reference apps: https://github.com/webrtc * Mailing list: http://groups.google.com/group/discuss-webrtc - * Continuous build: http://build.chromium.org/p/client.webrtc - * [Coding style guide](style-guide.md) + * Continuous build: https://ci.chromium.org/p/webrtc/g/ci/console + * [Coding style guide](g3doc/style-guide.md) * [Code of conduct](CODE_OF_CONDUCT.md) + * [Reporting bugs](docs/bug-reporting.md) [native-dev]: https://webrtc.googlesource.com/src/+/refs/heads/master/docs/native-code/index.md diff --git a/WATCHLISTS b/WATCHLISTS index cf4ea32c40..750a4c2164 100644 --- a/WATCHLISTS +++ b/WATCHLISTS @@ -113,8 +113,9 @@ 'peah@webrtc.org', 'saza@webrtc.org'], 'audio': ['peah@webrtc.org'], - 'api': ['kwiberg@webrtc.org','peah@webrtc.org'], - 'base': ['kwiberg@webrtc.org'], + 'api': ['hta@webrtc.org', + 'peah@webrtc.org'], + 'base': ['hta@webrtc.org'], 'call': ['mflodman@webrtc.org', 'stefan@webrtc.org'], 'video': ['mflodman@webrtc.org', @@ -134,7 +135,6 @@ 'audio_coding': ['alessiob@webrtc.org', 'audio-team@agora.io', 'henrik.lundin@webrtc.org', - 'kwiberg@webrtc.org', 'minyue@webrtc.org', 'peah@webrtc.org', 'saza@webrtc.org'], @@ -152,7 +152,6 @@ 'audio-team@agora.io', 'fhernqvist@webrtc.org', 'henrik.lundin@webrtc.org', - 'kwiberg@webrtc.org', 'minyue@webrtc.org', 'peah@webrtc.org', 'saza@webrtc.org'], diff --git a/api/BUILD.gn b/api/BUILD.gn index 0a0a501412..c775a1a871 100644 --- a/api/BUILD.gn +++ b/api/BUILD.gn @@ -29,7 +29,10 @@ rtc_source_set("call_api") { rtc_source_set("callfactory_api") { visibility = [ "*" ] sources = [ "call/call_factory_interface.h" ] - deps = [ "../rtc_base/system:rtc_export" ] + deps = [ + "../call:rtp_interfaces", + "../rtc_base/system:rtc_export", + ] } if (!build_with_chromium) { @@ -52,9 +55,11 @@ if (!build_with_chromium) { "../pc:peerconnection", "../rtc_base", "../rtc_base:rtc_base_approved", + "../rtc_base:threading", "audio:audio_mixer_api", "audio_codecs:audio_codecs_api", "task_queue:default_task_queue_factory", + "transport:field_trial_based_config", "video_codecs:video_codecs_api", ] } @@ -68,11 +73,10 @@ rtc_library("rtp_headers") { ] deps = [ ":array_view", - "..:webrtc_common", "units:timestamp", "video:video_rtp_headers", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("rtp_packet_info") { @@ -87,11 +91,11 @@ rtc_library("rtp_packet_info") { ":refcountedbase", ":rtp_headers", ":scoped_refptr", - "..:webrtc_common", "../rtc_base:rtc_base_approved", "../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", + "units:timestamp", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("media_stream_interface") { @@ -99,6 +103,7 @@ rtc_library("media_stream_interface") { sources = [ "media_stream_interface.cc", "media_stream_interface.h", + "media_stream_track.h", "notifier.h", ] deps = [ @@ -111,8 +116,8 @@ rtc_library("media_stream_interface") { "../rtc_base/system:rtc_export", "video:recordable_encoded_frame", "video:video_frame", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("libjingle_peerconnection_api") { @@ -133,14 +138,8 @@ rtc_library("libjingle_peerconnection_api") { "jsep_ice_candidate.cc", "jsep_ice_candidate.h", "jsep_session_description.h", - "media_stream_proxy.h", - "media_stream_track_proxy.h", - "peer_connection_factory_proxy.h", "peer_connection_interface.cc", "peer_connection_interface.h", - "peer_connection_proxy.h", - "proxy.cc", - "proxy.h", "rtp_receiver_interface.cc", "rtp_receiver_interface.h", "rtp_sender_interface.cc", @@ -149,15 +148,17 @@ rtc_library("libjingle_peerconnection_api") { "rtp_transceiver_interface.h", "sctp_transport_interface.cc", "sctp_transport_interface.h", + "set_local_description_observer_interface.h", "set_remote_description_observer_interface.h", "stats_types.cc", "stats_types.h", "turn_customizer.h", "uma_metrics.h", - "video_track_source_proxy.h", + "video_track_source_proxy_factory.h", ] deps = [ ":array_view", + ":async_dns_resolver", ":audio_options_api", ":callfactory_api", ":fec_controller_api", @@ -173,6 +174,10 @@ rtc_library("libjingle_peerconnection_api") { ":rtp_parameters", ":rtp_transceiver_direction", ":scoped_refptr", + ":sequence_checker", + "../call:rtp_interfaces", + "../rtc_base:network_constants", + "adaptation:resource_adaptation_api", "audio:audio_mixer_api", "audio_codecs:audio_codecs_api", "crypto:frame_decryptor_interface", @@ -182,37 +187,38 @@ rtc_library("libjingle_peerconnection_api") { "rtc_event_log", "task_queue", "transport:bitrate_settings", - "transport:datagram_transport_interface", "transport:enums", "transport:network_control", + "transport:sctp_transport_factory_interface", "transport:webrtc_key_value_config", - "transport/media:audio_interfaces", - "transport/media:media_transport_interface", - "transport/media:video_interfaces", "transport/rtp:rtp_source", "units:data_rate", "units:timestamp", "video:encoded_image", + "video:video_bitrate_allocator_factory", "video:video_frame", "video:video_rtp_headers", - "//third_party/abseil-cpp/absl/algorithm:container", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/strings", - "//third_party/abseil-cpp/absl/types:optional", # Basically, don't add stuff here. You might break sensitive downstream # targets like pnacl. API should not depend on anything outside of this # file, really. All these should arguably go away in time. - "..:webrtc_common", "../media:rtc_media_base", "../media:rtc_media_config", "../modules/audio_processing:audio_processing_statistics", "../rtc_base", "../rtc_base:checks", - "../rtc_base:deprecation", + "../rtc_base:ip_address", "../rtc_base:rtc_base_approved", + "../rtc_base:socket_address", + "../rtc_base:threading", "../rtc_base/system:rtc_export", ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] } rtc_source_set("frame_transformer_interface") { @@ -237,8 +243,8 @@ rtc_library("rtc_error") { "../rtc_base:logging", "../rtc_base:macromagic", "../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("packet_socket_factory") { @@ -248,7 +254,18 @@ rtc_source_set("packet_socket_factory") { "packet_socket_factory.h", ] deps = [ + ":async_dns_resolver", + "../rtc_base:async_resolver_interface", "../rtc_base:rtc_base", + "../rtc_base:socket_address", + "../rtc_base/system:rtc_export", + ] +} + +rtc_source_set("async_dns_resolver") { + sources = [ "async_dns_resolver.h" ] + deps = [ + "../rtc_base:socket_address", "../rtc_base/system:rtc_export", ] } @@ -274,7 +291,6 @@ rtc_source_set("video_quality_test_fixture_api") { "../test:video_test_common", "transport:bitrate_settings", "transport:network_control", - "transport/media:media_transport_interface", "video_codecs:video_codecs_api", ] } @@ -285,18 +301,23 @@ rtc_source_set("video_quality_analyzer_api") { sources = [ "test/video_quality_analyzer_interface.h" ] deps = [ + ":array_view", ":stats_observer_interface", "video:encoded_image", "video:video_frame", "video:video_rtp_headers", "video_codecs:video_codecs_api", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] } -rtc_source_set("track_id_stream_label_map") { +rtc_source_set("track_id_stream_info_map") { visibility = [ "*" ] - sources = [ "test/track_id_stream_label_map.h" ] + sources = [ "test/track_id_stream_info_map.h" ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_source_set("rtp_transceiver_direction") { @@ -323,6 +344,8 @@ rtc_library("rtp_parameters") { "../rtc_base:checks", "../rtc_base:stringutils", "../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -341,7 +364,7 @@ rtc_source_set("audio_quality_analyzer_api") { deps = [ ":stats_observer_interface", - ":track_id_stream_label_map", + ":track_id_stream_info_map", ] } @@ -350,11 +373,9 @@ rtc_source_set("stats_observer_interface") { testonly = true sources = [ "test/stats_observer_interface.h" ] - deps = [ - # For api/stats_types.h - ":libjingle_peerconnection_api", - ":rtp_parameters", - ] + deps = [ ":rtc_stats_api" ] + + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_source_set("peer_connection_quality_test_fixture_api") { @@ -375,16 +396,19 @@ rtc_source_set("peer_connection_quality_test_fixture_api") { ":rtp_parameters", ":simulated_network_api", ":stats_observer_interface", + ":track_id_stream_info_map", ":video_quality_analyzer_api", "../media:rtc_media_base", "../rtc_base:rtc_base", + "../rtc_base:threading", "rtc_event_log", "task_queue", "transport:network_control", - "transport/media:media_transport_interface", "units:time_delta", "video:video_frame", "video_codecs:video_codecs_api", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", @@ -394,13 +418,16 @@ rtc_source_set("peer_connection_quality_test_fixture_api") { rtc_source_set("frame_generator_api") { visibility = [ "*" ] testonly = true - sources = [ "test/frame_generator_interface.h" ] + sources = [ + "test/frame_generator_interface.cc", + "test/frame_generator_interface.h", + ] deps = [ ":scoped_refptr", "video:video_frame", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("test_dependency_factory") { @@ -418,22 +445,6 @@ rtc_library("test_dependency_factory") { } if (rtc_include_tests) { - rtc_library("create_video_quality_test_fixture_api") { - visibility = [ "*" ] - testonly = true - sources = [ - "test/create_video_quality_test_fixture.cc", - "test/create_video_quality_test_fixture.h", - ] - deps = [ - ":fec_controller_api", - ":network_state_predictor_api", - ":scoped_refptr", - ":video_quality_test_fixture_api", - "../video:video_quality_test", - ] - } - # TODO(srte): Move to network_emulation sub directory. rtc_library("create_network_emulation_manager") { visibility = [ "*" ] @@ -448,20 +459,39 @@ if (rtc_include_tests) { ] } - rtc_library("create_peerconnection_quality_test_fixture") { - visibility = [ "*" ] - testonly = true - sources = [ - "test/create_peerconnection_quality_test_fixture.cc", - "test/create_peerconnection_quality_test_fixture.h", - ] + if (!build_with_chromium) { + rtc_library("create_video_quality_test_fixture_api") { + visibility = [ "*" ] + testonly = true + sources = [ + "test/create_video_quality_test_fixture.cc", + "test/create_video_quality_test_fixture.h", + ] + deps = [ + ":fec_controller_api", + ":network_state_predictor_api", + ":scoped_refptr", + ":video_quality_test_fixture_api", + "../video:video_quality_test", + ] + } - deps = [ - ":audio_quality_analyzer_api", - ":peer_connection_quality_test_fixture_api", - ":video_quality_analyzer_api", - "../test/pc/e2e:peerconnection_quality_test", - ] + rtc_library("create_peerconnection_quality_test_fixture") { + visibility = [ "*" ] + testonly = true + sources = [ + "test/create_peerconnection_quality_test_fixture.cc", + "test/create_peerconnection_quality_test_fixture.h", + ] + + deps = [ + ":audio_quality_analyzer_api", + ":peer_connection_quality_test_fixture_api", + ":time_controller", + ":video_quality_analyzer_api", + "../test/pc/e2e:peerconnection_quality_test", + ] + } } } @@ -477,8 +507,8 @@ rtc_library("create_frame_generator") { "../rtc_base:checks", "../system_wrappers", "../test:frame_generator_impl", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("create_peer_connection_quality_test_frame_generator") { @@ -494,8 +524,8 @@ rtc_library("create_peer_connection_quality_test_frame_generator") { ":peer_connection_quality_test_fixture_api", "../rtc_base:checks", "../test:fileutils", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("libjingle_logging_api") { @@ -531,6 +561,7 @@ rtc_source_set("rtc_stats_api") { deps = [ ":scoped_refptr", + "../api:refcountedbase", "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../rtc_base/system:rtc_export", @@ -548,8 +579,8 @@ rtc_library("audio_options_api") { ":array_view", "../rtc_base:stringutils", "../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("transport_api") { @@ -558,6 +589,10 @@ rtc_library("transport_api") { "call/transport.cc", "call/transport.h", ] + deps = [ + ":refcountedbase", + ":scoped_refptr", + ] } rtc_source_set("bitrate_allocation") { @@ -573,11 +608,8 @@ rtc_source_set("bitrate_allocation") { rtc_source_set("simulated_network_api") { visibility = [ "*" ] sources = [ "test/simulated_network.h" ] - deps = [ - "../rtc_base", - "../rtc_base:criticalsection", - "//third_party/abseil-cpp/absl/types:optional", - ] + deps = [ "../rtc_base" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } # TODO(srte): Move to network_emulation sub directory. @@ -588,10 +620,13 @@ rtc_source_set("network_emulation_manager_api") { "test/network_emulation_manager.h", ] deps = [ + ":array_view", ":simulated_network_api", ":time_controller", "../call:simulated_network", "../rtc_base", + "../rtc_base:network_constants", + "../rtc_base:threading", "test/network_emulation", "units:data_rate", "units:data_size", @@ -609,6 +644,7 @@ rtc_source_set("time_controller") { deps = [ "../modules/utility", "../rtc_base", + "../rtc_base:threading", "../rtc_base/synchronization:yield_policy", "../system_wrappers", "task_queue", @@ -647,7 +683,10 @@ rtc_source_set("array_view") { rtc_source_set("refcountedbase") { visibility = [ "*" ] sources = [ "ref_counted_base.h" ] - deps = [ "../rtc_base:rtc_base_approved" ] + deps = [ + "../rtc_base:macromagic", + "../rtc_base:refcount", + ] } rtc_library("ice_transport_factory") { @@ -662,6 +701,7 @@ rtc_library("ice_transport_factory") { ":scoped_refptr", "../p2p:rtc_p2p", "../rtc_base", + "../rtc_base:threading", "../rtc_base/system:rtc_export", "rtc_event_log:rtc_event_log", ] @@ -681,8 +721,18 @@ rtc_source_set("function_view") { deps = [ "../rtc_base:checks" ] } +rtc_source_set("sequence_checker") { + visibility = [ "*" ] + sources = [ "sequence_checker.h" ] + deps = [ + "../rtc_base:checks", + "../rtc_base:macromagic", + "../rtc_base/synchronization:sequence_checker_internal", + ] +} + if (rtc_include_tests) { - if (rtc_enable_protobuf) { + if (rtc_enable_protobuf && !build_with_chromium) { rtc_library("audioproc_f_api") { visibility = [ "*" ] testonly = true @@ -710,6 +760,8 @@ if (rtc_include_tests) { "../modules/audio_coding:neteq_test_factory", "../rtc_base:checks", "neteq:neteq_api", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:parse", "//third_party/abseil-cpp/absl/strings", @@ -748,7 +800,7 @@ if (rtc_include_tests) { "test/videocodec_test_stats.h", ] deps = [ - "..:webrtc_common", + "../media:rtc_h264_profile_id", "../modules/video_coding:video_codec_interface", "../rtc_base:stringutils", "video:video_frame_type", @@ -782,6 +834,17 @@ if (rtc_include_tests) { ] } + rtc_source_set("mock_data_channel") { + visibility = [ "*" ] + testonly = true + sources = [ "test/mock_data_channel.h" ] + + deps = [ + ":libjingle_peerconnection_api", + "../test:test_support", + ] + } + rtc_source_set("mock_fec_controller_override") { testonly = true sources = [ "test/mock_fec_controller_override.h" ] @@ -822,7 +885,6 @@ if (rtc_include_tests) { ":array_view", ":libjingle_peerconnection_api", ":rtp_parameters", - "..:webrtc_common", "../rtc_base:checks", "../rtc_base:rtc_base_approved", "crypto:frame_encryptor_interface", @@ -839,13 +901,23 @@ if (rtc_include_tests) { ":array_view", ":libjingle_peerconnection_api", ":rtp_parameters", - "..:webrtc_common", "../rtc_base:checks", "../rtc_base:rtc_base_approved", "crypto:frame_decryptor_interface", ] } + rtc_source_set("mock_media_stream_interface") { + visibility = [ "*" ] + testonly = true + sources = [ "test/mock_media_stream_interface.h" ] + + deps = [ + ":media_stream_interface", + "../test:test_support", + ] + } + rtc_source_set("dummy_peer_connection") { visibility = [ "*" ] testonly = true @@ -860,6 +932,7 @@ if (rtc_include_tests) { } rtc_source_set("mock_peerconnectioninterface") { + visibility = [ "*" ] testonly = true sources = [ "test/mock_peerconnectioninterface.h" ] @@ -869,9 +942,31 @@ if (rtc_include_tests) { ] } + rtc_source_set("mock_peer_connection_factory_interface") { + visibility = [ "*" ] + testonly = true + sources = [ "test/mock_peer_connection_factory_interface.h" ] + + deps = [ + ":libjingle_peerconnection_api", + "../test:test_support", + ] + } + + rtc_source_set("mock_async_dns_resolver") { + testonly = true + sources = [ "test/mock_async_dns_resolver.h" ] + deps = [ + ":async_dns_resolver", + "../test:test_support", + ] + } + rtc_source_set("mock_rtp") { + visibility = [ "*" ] testonly = true sources = [ + "test/mock_rtp_transceiver.h", "test/mock_rtpreceiver.h", "test/mock_rtpsender.h", ] @@ -949,39 +1044,6 @@ if (rtc_include_tests) { ] } - rtc_source_set("fake_media_transport") { - testonly = true - - sources = [ - "test/fake_datagram_transport.h", - "test/fake_media_transport.h", - ] - - deps = [ - "../rtc_base:checks", - "transport:datagram_transport_interface", - "transport/media:media_transport_interface", - "//third_party/abseil-cpp/absl/algorithm:container", - ] - } - - rtc_library("loopback_media_transport") { - testonly = true - - sources = [ - "test/loopback_media_transport.cc", - "test/loopback_media_transport.h", - ] - - deps = [ - "../rtc_base", - "../rtc_base:checks", - "transport:datagram_transport_interface", - "transport/media:media_transport_interface", - "//third_party/abseil-cpp/absl/algorithm:container", - ] - } - rtc_library("create_time_controller") { visibility = [ "*" ] testonly = true @@ -995,6 +1057,7 @@ if (rtc_include_tests) { ":time_controller", "../call", "../call:call_interfaces", + "../call:rtp_interfaces", "../test/time_controller", ] } @@ -1011,8 +1074,8 @@ if (rtc_include_tests) { "rtp_packet_infos_unittest.cc", "rtp_parameters_unittest.cc", "scoped_refptr_unittest.cc", + "sequence_checker_unittest.cc", "test/create_time_controller_unittest.cc", - "test/loopback_media_transport_unittest.cc", ] deps = [ @@ -1020,17 +1083,18 @@ if (rtc_include_tests) { ":create_time_controller", ":function_view", ":libjingle_peerconnection_api", - ":loopback_media_transport", ":rtc_error", ":rtc_event_log_output_file", ":rtp_packet_info", ":rtp_parameters", ":scoped_refptr", + ":sequence_checker", ":time_controller", "../rtc_base:checks", "../rtc_base:gunit_helpers", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_task_queue", + "../rtc_base:task_queue_for_test", "../rtc_base/task_utils:repeating_task", "../test:fileutils", "../test:test_support", @@ -1038,6 +1102,7 @@ if (rtc_include_tests) { "units:time_delta", "units:timestamp", "units:units_unittests", + "video:rtp_video_frame_assembler_unittests", "video:video_unittests", ] } @@ -1051,11 +1116,13 @@ if (rtc_include_tests) { ":dummy_peer_connection", ":fake_frame_decryptor", ":fake_frame_encryptor", - ":fake_media_transport", - ":loopback_media_transport", + ":mock_async_dns_resolver", ":mock_audio_mixer", + ":mock_data_channel", ":mock_frame_decryptor", ":mock_frame_encryptor", + ":mock_media_stream_interface", + ":mock_peer_connection_factory_interface", ":mock_peerconnectioninterface", ":mock_rtp", ":mock_transformable_video_frame", diff --git a/api/DEPS b/api/DEPS index 1212b43be8..cdd17e9909 100644 --- a/api/DEPS +++ b/api/DEPS @@ -11,10 +11,12 @@ include_rules = [ "-common_video", "-data", "-examples", + "-g3doc", "-ios", "-infra", "-logging", "-media", + "-net", "-modules", "-out", "-p2p", @@ -40,12 +42,16 @@ include_rules = [ specific_include_rules = { # Some internal headers are allowed even in API headers: + + "call_factory_interface\.h": [ + "+call/rtp_transport_controller_send_factory_interface.h", + ], + ".*\.h": [ "+rtc_base/checks.h", "+rtc_base/system/rtc_export.h", "+rtc_base/system/rtc_export_template.h", "+rtc_base/units/unit_base.h", - "+rtc_base/deprecation.h", ], "array_view\.h": [ @@ -63,6 +69,10 @@ specific_include_rules = { "+rtc_base/async_resolver_interface.h", ], + "async_dns_resolver\.h": [ + "+rtc_base/socket_address.h", + ], + "candidate\.h": [ "+rtc_base/network_constants.h", "+rtc_base/socket_address.h", @@ -115,30 +125,27 @@ specific_include_rules = { "+rtc_base/ref_count.h", ], - "media_transport_interface\.h": [ - "+rtc_base/copy_on_write_buffer.h", # As used by datachannelinterface.h - "+rtc_base/network_route.h", - ], - "packet_socket_factory\.h": [ "+rtc_base/proxy_info.h", "+rtc_base/async_packet_socket.h", ], - "peer_connection_factory_proxy\.h": [ - "+rtc_base/bind.h", - ], - "peer_connection_interface\.h": [ + "+call/rtp_transport_controller_send_factory_interface.h", "+media/base/media_config.h", "+media/base/media_engine.h", + "+p2p/base/port.h", "+p2p/base/port_allocator.h", "+rtc_base/network.h", + "+rtc_base/network_constants.h", + "+rtc_base/network_monitor_factory.h", + "+rtc_base/ref_count.h", "+rtc_base/rtc_certificate.h", "+rtc_base/rtc_certificate_generator.h", "+rtc_base/socket_address.h", "+rtc_base/ssl_certificate.h", "+rtc_base/ssl_stream_adapter.h", + "+rtc_base/thread.h", ], "proxy\.h": [ @@ -177,6 +184,9 @@ specific_include_rules = { "+rtc_base/ref_count.h", ], + "set_local_description_observer_interface\.h": [ + "+rtc_base/ref_count.h", + ], "set_remote_description_observer_interface\.h": [ "+rtc_base/ref_count.h", ], @@ -184,7 +194,6 @@ specific_include_rules = { "stats_types\.h": [ "+rtc_base/constructor_magic.h", "+rtc_base/ref_count.h", - "+rtc_base/string_encode.h", "+rtc_base/thread_checker.h", ], @@ -263,7 +272,6 @@ specific_include_rules = { ], "simulated_network\.h": [ - "+rtc_base/critical_section.h", "+rtc_base/random.h", "+rtc_base/thread_annotations.h", ], @@ -284,6 +292,11 @@ specific_include_rules = { "+rtc_base/ref_count.h", ], + "sequence_checker\.h": [ + "+rtc_base/synchronization/sequence_checker_internal.h", + "+rtc_base/thread_annotations.h", + ], + # .cc files in api/ should not be restricted in what they can #include, # so we re-add all the top-level directories here. (That's because .h # files leak their #includes to whoever's #including them, but .cc files diff --git a/api/OWNERS b/api/OWNERS index 4cf3915175..6ffb2588aa 100644 --- a/api/OWNERS +++ b/api/OWNERS @@ -2,7 +2,6 @@ crodbro@webrtc.org deadbeef@webrtc.org hta@webrtc.org juberti@webrtc.org -kwiberg@webrtc.org magjed@webrtc.org perkj@webrtc.org tkchin@webrtc.org @@ -11,4 +10,5 @@ tommi@webrtc.org per-file peer_connection*=hbos@webrtc.org per-file DEPS=mbonadei@webrtc.org -per-file DEPS=kwiberg@webrtc.org + +per-file uma_metrics.h=kron@webrtc.org diff --git a/api/README.md b/api/README.md index 4cc799362d..7c1a27f512 100644 --- a/api/README.md +++ b/api/README.md @@ -1,6 +1,6 @@ # How to write code in the `api/` directory -Mostly, just follow the regular [style guide](../style-guide.md), but: +Mostly, just follow the regular [style guide](../g3doc/style-guide.md), but: * Note that `api/` code is not exempt from the “`.h` and `.cc` files come in pairs” rule, so if you declare something in `api/path/to/foo.h`, it should be @@ -17,7 +17,7 @@ it from a `.cc` file, so that users of our API headers won’t transitively For headers in `api/` that need to refer to non-public types, forward declarations are often a lesser evil than including non-public header files. The -usual [rules](../style-guide.md#forward-declarations) still apply, though. +usual [rules](../g3doc/style-guide.md#forward-declarations) still apply, though. `.cc` files in `api/` should preferably be kept reasonably small. If a substantial implementation is needed, consider putting it with our non-public diff --git a/api/adaptation/BUILD.gn b/api/adaptation/BUILD.gn new file mode 100644 index 0000000000..2cba5f407e --- /dev/null +++ b/api/adaptation/BUILD.gn @@ -0,0 +1,24 @@ +# Copyright(c) 2020 The WebRTC project authors.All Rights Reserved. +# +# Use of this source code is governed by a BSD - style license +# that can be found in the LICENSE file in the root of the source +# tree.An additional intellectual property rights grant can be found +# in the file PATENTS.All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +rtc_source_set("resource_adaptation_api") { + visibility = [ "*" ] + sources = [ + "resource.cc", + "resource.h", + ] + deps = [ + "../../api:scoped_refptr", + "../../rtc_base:checks", + "../../rtc_base:refcount", + "../../rtc_base:rtc_base_approved", + "../../rtc_base/system:rtc_export", + ] +} diff --git a/api/adaptation/DEPS b/api/adaptation/DEPS new file mode 100644 index 0000000000..cab7fb8e14 --- /dev/null +++ b/api/adaptation/DEPS @@ -0,0 +1,7 @@ +specific_include_rules = { + "resource\.h": [ + # ref_count.h is a public_deps of rtc_base_approved. Necessary because of + # rtc::RefCountInterface. + "+rtc_base/ref_count.h", + ], +} \ No newline at end of file diff --git a/api/adaptation/resource.cc b/api/adaptation/resource.cc new file mode 100644 index 0000000000..dac03fe019 --- /dev/null +++ b/api/adaptation/resource.cc @@ -0,0 +1,33 @@ +/* + * Copyright 2019 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/adaptation/resource.h" + +#include "rtc_base/checks.h" + +namespace webrtc { + +const char* ResourceUsageStateToString(ResourceUsageState usage_state) { + switch (usage_state) { + case ResourceUsageState::kOveruse: + return "kOveruse"; + case ResourceUsageState::kUnderuse: + return "kUnderuse"; + } + RTC_CHECK_NOTREACHED(); +} + +ResourceListener::~ResourceListener() {} + +Resource::Resource() {} + +Resource::~Resource() {} + +} // namespace webrtc diff --git a/api/adaptation/resource.h b/api/adaptation/resource.h new file mode 100644 index 0000000000..9b3968055f --- /dev/null +++ b/api/adaptation/resource.h @@ -0,0 +1,67 @@ +/* + * Copyright 2019 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_ADAPTATION_RESOURCE_H_ +#define API_ADAPTATION_RESOURCE_H_ + +#include + +#include "api/scoped_refptr.h" +#include "rtc_base/ref_count.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +class Resource; + +enum class ResourceUsageState { + // Action is needed to minimze the load on this resource. + kOveruse, + // Increasing the load on this resource is desired, if possible. + kUnderuse, +}; + +RTC_EXPORT const char* ResourceUsageStateToString( + ResourceUsageState usage_state); + +class RTC_EXPORT ResourceListener { + public: + virtual ~ResourceListener(); + + virtual void OnResourceUsageStateMeasured( + rtc::scoped_refptr resource, + ResourceUsageState usage_state) = 0; +}; + +// A Resource monitors an implementation-specific resource. It may report +// kOveruse or kUnderuse when resource usage is high or low enough that we +// should perform some sort of mitigation to fulfil the resource's constraints. +// +// The methods on this interface are invoked on the adaptation task queue. +// Resource usage measurements may be performed on an any task queue. +// +// The Resource is reference counted to prevent use-after-free when posting +// between task queues. As such, the implementation MUST NOT make any +// assumptions about which task queue Resource is destructed on. +class RTC_EXPORT Resource : public rtc::RefCountInterface { + public: + Resource(); + // Destruction may happen on any task queue. + ~Resource() override; + + virtual std::string Name() const = 0; + // The |listener| may be informed of resource usage measurements on any task + // queue, but not after this method is invoked with the null argument. + virtual void SetResourceListener(ResourceListener* listener) = 0; +}; + +} // namespace webrtc + +#endif // API_ADAPTATION_RESOURCE_H_ diff --git a/api/array_view.h b/api/array_view.h index a66369a3d3..df365cb740 100644 --- a/api/array_view.h +++ b/api/array_view.h @@ -13,6 +13,7 @@ #include #include +#include #include #include "rtc_base/checks.h" @@ -258,6 +259,18 @@ class ArrayView final : public impl::ArrayViewBase { T* end() const { return this->data() + this->size(); } const T* cbegin() const { return this->data(); } const T* cend() const { return this->data() + this->size(); } + std::reverse_iterator rbegin() const { + return std::make_reverse_iterator(end()); + } + std::reverse_iterator rend() const { + return std::make_reverse_iterator(begin()); + } + std::reverse_iterator crbegin() const { + return std::make_reverse_iterator(cend()); + } + std::reverse_iterator crend() const { + return std::make_reverse_iterator(cbegin()); + } ArrayView subview(size_t offset, size_t size) const { return offset < this->size() diff --git a/api/array_view_unittest.cc b/api/array_view_unittest.cc index 0357f68aa2..97267df006 100644 --- a/api/array_view_unittest.cc +++ b/api/array_view_unittest.cc @@ -451,6 +451,20 @@ TEST(ArrayViewTest, TestIterationEmpty) { } } +TEST(ArrayViewTest, TestReverseIterationEmpty) { + // Variable-size. + ArrayView>>> av; + EXPECT_EQ(av.rbegin(), av.rend()); + EXPECT_EQ(av.crbegin(), av.crend()); + EXPECT_TRUE(av.empty()); + + // Fixed-size. + ArrayView>>, 0> af; + EXPECT_EQ(af.begin(), af.end()); + EXPECT_EQ(af.cbegin(), af.cend()); + EXPECT_TRUE(af.empty()); +} + TEST(ArrayViewTest, TestIterationVariable) { char arr[] = "Arrr!"; ArrayView av(arr); @@ -472,6 +486,25 @@ TEST(ArrayViewTest, TestIterationVariable) { } } +TEST(ArrayViewTest, TestReverseIterationVariable) { + char arr[] = "Arrr!"; + ArrayView av(arr); + EXPECT_EQ('\0', *av.rbegin()); + EXPECT_EQ('\0', *av.crbegin()); + EXPECT_EQ('A', *(av.rend() - 1)); + EXPECT_EQ('A', *(av.crend() - 1)); + + const char* cit = av.cend() - 1; + for (auto crit = av.crbegin(); crit != av.crend(); ++crit, --cit) { + EXPECT_EQ(*cit, *crit); + } + + char* it = av.end() - 1; + for (auto rit = av.rbegin(); rit != av.rend(); ++rit, --it) { + EXPECT_EQ(*it, *rit); + } +} + TEST(ArrayViewTest, TestIterationFixed) { char arr[] = "Arrr!"; ArrayView av(arr); @@ -493,6 +526,25 @@ TEST(ArrayViewTest, TestIterationFixed) { } } +TEST(ArrayViewTest, TestReverseIterationFixed) { + char arr[] = "Arrr!"; + ArrayView av(arr); + EXPECT_EQ('\0', *av.rbegin()); + EXPECT_EQ('\0', *av.crbegin()); + EXPECT_EQ('A', *(av.rend() - 1)); + EXPECT_EQ('A', *(av.crend() - 1)); + + const char* cit = av.cend() - 1; + for (auto crit = av.crbegin(); crit != av.crend(); ++crit, --cit) { + EXPECT_EQ(*cit, *crit); + } + + char* it = av.end() - 1; + for (auto rit = av.rbegin(); rit != av.rend(); ++rit, --it) { + EXPECT_EQ(*it, *rit); + } +} + TEST(ArrayViewTest, TestEmpty) { EXPECT_TRUE(ArrayView().empty()); const int a[] = {1, 2, 3}; diff --git a/api/async_dns_resolver.h b/api/async_dns_resolver.h new file mode 100644 index 0000000000..eabb41c11f --- /dev/null +++ b/api/async_dns_resolver.h @@ -0,0 +1,86 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_ASYNC_DNS_RESOLVER_H_ +#define API_ASYNC_DNS_RESOLVER_H_ + +#include +#include + +#include "rtc_base/socket_address.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +// This interface defines the methods to resolve a hostname asynchronously. +// The AsyncDnsResolverInterface class encapsulates a single name query. +// +// Usage: +// std::unique_ptr resolver = +// factory->Create(address-to-be-resolved, [r = resolver.get()]() { +// if (r->result.GetResolvedAddress(AF_INET, &addr) { +// // success +// } else { +// // failure +// error = r->result().GetError(); +// } +// // Release resolver. +// resolver_list.erase(std::remove_if(resolver_list.begin(), +// resolver_list.end(), +// [](refptr) { refptr.get() == r; }); +// }); +// resolver_list.push_back(std::move(resolver)); + +class AsyncDnsResolverResult { + public: + virtual ~AsyncDnsResolverResult() = default; + // Returns true iff the address from |Start| was successfully resolved. + // If the address was successfully resolved, sets |addr| to a copy of the + // address from |Start| with the IP address set to the top most resolved + // address of |family| (|addr| will have both hostname and the resolved ip). + virtual bool GetResolvedAddress(int family, + rtc::SocketAddress* addr) const = 0; + // Returns error from resolver. + virtual int GetError() const = 0; +}; + +class RTC_EXPORT AsyncDnsResolverInterface { + public: + virtual ~AsyncDnsResolverInterface() = default; + + // Start address resolution of the hostname in |addr|. + virtual void Start(const rtc::SocketAddress& addr, + std::function callback) = 0; + virtual const AsyncDnsResolverResult& result() const = 0; +}; + +// An abstract factory for creating AsyncDnsResolverInterfaces. This allows +// client applications to provide WebRTC with their own mechanism for +// performing DNS resolution. +class AsyncDnsResolverFactoryInterface { + public: + virtual ~AsyncDnsResolverFactoryInterface() = default; + + // Creates an AsyncDnsResolver and starts resolving the name. The callback + // will be called when resolution is finished. + // The callback will be called on the thread that the caller runs on. + virtual std::unique_ptr CreateAndResolve( + const rtc::SocketAddress& addr, + std::function callback) = 0; + // Creates an AsyncDnsResolver and does not start it. + // For backwards compatibility, will be deprecated and removed. + // One has to do a separate Start() call on the + // resolver to start name resolution. + virtual std::unique_ptr Create() = 0; +}; + +} // namespace webrtc + +#endif // API_ASYNC_DNS_RESOLVER_H_ diff --git a/api/audio/BUILD.gn b/api/audio/BUILD.gn index 4c8004ed2d..d0465bbc40 100644 --- a/api/audio/BUILD.gn +++ b/api/audio/BUILD.gn @@ -24,6 +24,11 @@ rtc_library("audio_frame_api") { ] } +rtc_source_set("audio_frame_processor") { + visibility = [ "*" ] + sources = [ "audio_frame_processor.h" ] +} + rtc_source_set("audio_mixer_api") { visibility = [ "*" ] sources = [ "audio_mixer.h" ] @@ -61,8 +66,8 @@ rtc_library("aec3_config_json") { "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_json", "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("aec3_factory") { diff --git a/api/audio/audio_frame.cc b/api/audio/audio_frame.cc index 47459ac333..c6e5cf4dd6 100644 --- a/api/audio/audio_frame.cc +++ b/api/audio/audio_frame.cc @@ -11,6 +11,8 @@ #include "api/audio/audio_frame.h" #include +#include +#include #include "rtc_base/checks.h" #include "rtc_base/time_utils.h" @@ -22,6 +24,28 @@ AudioFrame::AudioFrame() { static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes"); } +void swap(AudioFrame& a, AudioFrame& b) { + using std::swap; + swap(a.timestamp_, b.timestamp_); + swap(a.elapsed_time_ms_, b.elapsed_time_ms_); + swap(a.ntp_time_ms_, b.ntp_time_ms_); + swap(a.samples_per_channel_, b.samples_per_channel_); + swap(a.sample_rate_hz_, b.sample_rate_hz_); + swap(a.num_channels_, b.num_channels_); + swap(a.channel_layout_, b.channel_layout_); + swap(a.speech_type_, b.speech_type_); + swap(a.vad_activity_, b.vad_activity_); + swap(a.profile_timestamp_ms_, b.profile_timestamp_ms_); + swap(a.packet_infos_, b.packet_infos_); + const size_t length_a = a.samples_per_channel_ * a.num_channels_; + const size_t length_b = b.samples_per_channel_ * b.num_channels_; + RTC_DCHECK_LE(length_a, AudioFrame::kMaxDataSizeSamples); + RTC_DCHECK_LE(length_b, AudioFrame::kMaxDataSizeSamples); + std::swap_ranges(a.data_, a.data_ + std::max(length_a, length_b), b.data_); + swap(a.muted_, b.muted_); + swap(a.absolute_capture_timestamp_ms_, b.absolute_capture_timestamp_ms_); +} + void AudioFrame::Reset() { ResetWithoutMuting(); muted_ = true; diff --git a/api/audio/audio_frame.h b/api/audio/audio_frame.h index 06b0b28b38..78539f57eb 100644 --- a/api/audio/audio_frame.h +++ b/api/audio/audio_frame.h @@ -14,6 +14,8 @@ #include #include +#include + #include "api/audio/channel_layout.h" #include "api/rtp_packet_infos.h" #include "rtc_base/constructor_magic.h" @@ -58,6 +60,8 @@ class AudioFrame { AudioFrame(); + friend void swap(AudioFrame& a, AudioFrame& b); + // Resets all members to their default state. void Reset(); // Same as Reset(), but leaves mute state unchanged. Muting a frame requires diff --git a/api/audio/audio_frame_processor.h b/api/audio/audio_frame_processor.h new file mode 100644 index 0000000000..bc21d14858 --- /dev/null +++ b/api/audio/audio_frame_processor.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_AUDIO_AUDIO_FRAME_PROCESSOR_H_ +#define API_AUDIO_AUDIO_FRAME_PROCESSOR_H_ + +#include +#include + +namespace webrtc { + +class AudioFrame; + +// If passed into PeerConnectionFactory, will be used for additional +// processing of captured audio frames, performed before encoding. +// Implementations must be thread-safe. +class AudioFrameProcessor { + public: + using OnAudioFrameCallback = std::function)>; + virtual ~AudioFrameProcessor() = default; + + // Processes the frame received from WebRTC, is called by WebRTC off the + // realtime audio capturing path. AudioFrameProcessor must reply with + // processed frames by calling |sink_callback| if it was provided in SetSink() + // call. |sink_callback| can be called in the context of Process(). + virtual void Process(std::unique_ptr frame) = 0; + + // Atomically replaces the current sink with the new one. Before the + // first call to this function, or if the provided |sink_callback| is nullptr, + // processed frames are simply discarded. + virtual void SetSink(OnAudioFrameCallback sink_callback) = 0; +}; + +} // namespace webrtc + +#endif // API_AUDIO_AUDIO_FRAME_PROCESSOR_H_ diff --git a/api/audio/echo_canceller3_config.cc b/api/audio/echo_canceller3_config.cc index aeb809efa9..b38d6b5b7e 100644 --- a/api/audio/echo_canceller3_config.cc +++ b/api/audio/echo_canceller3_config.cc @@ -153,6 +153,7 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) { res = res & Limit(&c->filter.config_change_duration_blocks, 0, 100000); res = res & Limit(&c->filter.initial_state_seconds, 0.f, 100.f); + res = res & Limit(&c->filter.coarse_reset_hangover_blocks, 0, 250000); res = res & Limit(&c->erle.min, 1.f, 100000.f); res = res & Limit(&c->erle.max_l, 1.f, 100000.f); @@ -228,6 +229,12 @@ bool EchoCanceller3Config::Validate(EchoCanceller3Config* config) { res = res & Limit(&c->suppressor.nearend_tuning.max_dec_factor_lf, 0.f, 100.f); + res = res & Limit(&c->suppressor.last_permanent_lf_smoothing_band, 0, 64); + res = res & Limit(&c->suppressor.last_lf_smoothing_band, 0, 64); + res = res & Limit(&c->suppressor.last_lf_band, 0, 63); + res = res & + Limit(&c->suppressor.first_hf_band, c->suppressor.last_lf_band + 1, 64); + res = res & Limit(&c->suppressor.dominant_nearend_detection.enr_threshold, 0.f, 1000000.f); res = res & Limit(&c->suppressor.dominant_nearend_detection.snr_threshold, diff --git a/api/audio/echo_canceller3_config.h b/api/audio/echo_canceller3_config.h index a505625538..087e8da439 100644 --- a/api/audio/echo_canceller3_config.h +++ b/api/audio/echo_canceller3_config.h @@ -43,6 +43,7 @@ struct RTC_EXPORT EchoCanceller3Config { size_t hysteresis_limit_blocks = 1; size_t fixed_capture_delay_samples = 0; float delay_estimate_smoothing = 0.7f; + float delay_estimate_smoothing_delay_found = 0.7f; float delay_candidate_detection_threshold = 0.2f; struct DelaySelectionThresholds { int initial; @@ -86,9 +87,11 @@ struct RTC_EXPORT EchoCanceller3Config { size_t config_change_duration_blocks = 250; float initial_state_seconds = 2.5f; + int coarse_reset_hangover_blocks = 25; bool conservative_initial_phase = false; bool enable_coarse_filter_output_usage = true; bool use_linear_filter = true; + bool high_pass_filter_echo_reference = false; bool export_linear_aec_output = false; } filter; @@ -107,6 +110,7 @@ struct RTC_EXPORT EchoCanceller3Config { float default_len = 0.83f; bool echo_can_saturate = true; bool bounded_erl = false; + bool erle_onset_compensation_in_dominant_nearend = false; } ep_strength; struct EchoAudibility { @@ -143,6 +147,7 @@ struct RTC_EXPORT EchoCanceller3Config { float noise_gate_slope = 0.3f; size_t render_pre_window_size = 1; size_t render_post_window_size = 1; + bool model_reverb_in_nonlinear_mode = true; } echo_model; struct ComfortNoise { @@ -189,6 +194,12 @@ struct RTC_EXPORT EchoCanceller3Config { 2.0f, 0.25f); + bool lf_smoothing_during_initial_phase = true; + int last_permanent_lf_smoothing_band = 0; + int last_lf_smoothing_band = 5; + int last_lf_band = 5; + int first_hf_band = 8; + struct DominantNearendDetection { float enr_threshold = .25f; float enr_exit_threshold = 10.f; @@ -215,11 +226,12 @@ struct RTC_EXPORT EchoCanceller3Config { struct HighBandsSuppression { float enr_threshold = 1.f; float max_gain_during_echo = 1.f; - float anti_howling_activation_threshold = 25.f; - float anti_howling_gain = 0.01f; + float anti_howling_activation_threshold = 400.f; + float anti_howling_gain = 1.f; } high_bands_suppression; float floor_first_increase = 0.00001f; + bool conservative_hf_suppression = false; } suppressor; }; } // namespace webrtc diff --git a/api/audio/echo_canceller3_config_json.cc b/api/audio/echo_canceller3_config_json.cc index f5c1249674..263599c538 100644 --- a/api/audio/echo_canceller3_config_json.cc +++ b/api/audio/echo_canceller3_config_json.cc @@ -11,6 +11,7 @@ #include +#include #include #include @@ -156,9 +157,14 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, *parsing_successful = true; Json::Value root; - bool success = Json::Reader().parse(std::string(json_string), root); + Json::CharReaderBuilder builder; + std::string error_message; + std::unique_ptr reader(builder.newCharReader()); + bool success = + reader->parse(json_string.data(), json_string.data() + json_string.size(), + &root, &error_message); if (!success) { - RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << json_string; + RTC_LOG(LS_ERROR) << "Incorrect JSON format: " << error_message; *parsing_successful = false; return; } @@ -191,6 +197,8 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, &cfg.delay.fixed_capture_delay_samples); ReadParam(section, "delay_estimate_smoothing", &cfg.delay.delay_estimate_smoothing); + ReadParam(section, "delay_estimate_smoothing_delay_found", + &cfg.delay.delay_estimate_smoothing_delay_found); ReadParam(section, "delay_candidate_detection_threshold", &cfg.delay.delay_candidate_detection_threshold); @@ -223,11 +231,15 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, &cfg.filter.config_change_duration_blocks); ReadParam(section, "initial_state_seconds", &cfg.filter.initial_state_seconds); + ReadParam(section, "coarse_reset_hangover_blocks", + &cfg.filter.coarse_reset_hangover_blocks); ReadParam(section, "conservative_initial_phase", &cfg.filter.conservative_initial_phase); ReadParam(section, "enable_coarse_filter_output_usage", &cfg.filter.enable_coarse_filter_output_usage); ReadParam(section, "use_linear_filter", &cfg.filter.use_linear_filter); + ReadParam(section, "high_pass_filter_echo_reference", + &cfg.filter.high_pass_filter_echo_reference); ReadParam(section, "export_linear_aec_output", &cfg.filter.export_linear_aec_output); } @@ -249,6 +261,8 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, ReadParam(section, "default_len", &cfg.ep_strength.default_len); ReadParam(section, "echo_can_saturate", &cfg.ep_strength.echo_can_saturate); ReadParam(section, "bounded_erl", &cfg.ep_strength.bounded_erl); + ReadParam(section, "erle_onset_compensation_in_dominant_nearend", + &cfg.ep_strength.erle_onset_compensation_in_dominant_nearend); } if (rtc::GetValueFromJsonObject(aec3_root, "echo_audibility", §ion)) { @@ -302,6 +316,8 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, &cfg.echo_model.render_pre_window_size); ReadParam(section, "render_post_window_size", &cfg.echo_model.render_post_window_size); + ReadParam(section, "model_reverb_in_nonlinear_mode", + &cfg.echo_model.model_reverb_in_nonlinear_mode); } if (rtc::GetValueFromJsonObject(aec3_root, "comfort_noise", §ion)) { @@ -331,6 +347,15 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, &cfg.suppressor.nearend_tuning.max_dec_factor_lf); } + ReadParam(section, "lf_smoothing_during_initial_phase", + &cfg.suppressor.lf_smoothing_during_initial_phase); + ReadParam(section, "last_permanent_lf_smoothing_band", + &cfg.suppressor.last_permanent_lf_smoothing_band); + ReadParam(section, "last_lf_smoothing_band", + &cfg.suppressor.last_lf_smoothing_band); + ReadParam(section, "last_lf_band", &cfg.suppressor.last_lf_band); + ReadParam(section, "first_hf_band", &cfg.suppressor.first_hf_band); + if (rtc::GetValueFromJsonObject(section, "dominant_nearend_detection", &subsection)) { ReadParam(subsection, "enr_threshold", @@ -381,6 +406,8 @@ void Aec3ConfigFromJsonString(absl::string_view json_string, ReadParam(section, "floor_first_increase", &cfg.suppressor.floor_first_increase); + ReadParam(section, "conservative_hf_suppression", + &cfg.suppressor.conservative_hf_suppression); } } @@ -415,6 +442,8 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { << config.delay.fixed_capture_delay_samples << ","; ost << "\"delay_estimate_smoothing\": " << config.delay.delay_estimate_smoothing << ","; + ost << "\"delay_estimate_smoothing_delay_found\": " + << config.delay.delay_estimate_smoothing_delay_found << ","; ost << "\"delay_candidate_detection_threshold\": " << config.delay.delay_candidate_detection_threshold << ","; @@ -498,6 +527,8 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { << config.filter.config_change_duration_blocks << ","; ost << "\"initial_state_seconds\": " << config.filter.initial_state_seconds << ","; + ost << "\"coarse_reset_hangover_blocks\": " + << config.filter.coarse_reset_hangover_blocks << ","; ost << "\"conservative_initial_phase\": " << (config.filter.conservative_initial_phase ? "true" : "false") << ","; ost << "\"enable_coarse_filter_output_usage\": " @@ -505,6 +536,9 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { << ","; ost << "\"use_linear_filter\": " << (config.filter.use_linear_filter ? "true" : "false") << ","; + ost << "\"high_pass_filter_echo_reference\": " + << (config.filter.high_pass_filter_echo_reference ? "true" : "false") + << ","; ost << "\"export_linear_aec_output\": " << (config.filter.export_linear_aec_output ? "true" : "false"); @@ -529,8 +563,11 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { ost << "\"echo_can_saturate\": " << (config.ep_strength.echo_can_saturate ? "true" : "false") << ","; ost << "\"bounded_erl\": " - << (config.ep_strength.bounded_erl ? "true" : "false"); - + << (config.ep_strength.bounded_erl ? "true" : "false") << ","; + ost << "\"erle_onset_compensation_in_dominant_nearend\": " + << (config.ep_strength.erle_onset_compensation_in_dominant_nearend + ? "true" + : "false"); ost << "},"; ost << "\"echo_audibility\": {"; @@ -585,7 +622,9 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { ost << "\"render_pre_window_size\": " << config.echo_model.render_pre_window_size << ","; ost << "\"render_post_window_size\": " - << config.echo_model.render_post_window_size; + << config.echo_model.render_post_window_size << ","; + ost << "\"model_reverb_in_nonlinear_mode\": " + << (config.echo_model.model_reverb_in_nonlinear_mode ? "true" : "false"); ost << "},"; ost << "\"comfort_noise\": {"; @@ -627,6 +666,16 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { ost << "\"max_dec_factor_lf\": " << config.suppressor.nearend_tuning.max_dec_factor_lf; ost << "},"; + ost << "\"lf_smoothing_during_initial_phase\": " + << (config.suppressor.lf_smoothing_during_initial_phase ? "true" + : "false") + << ","; + ost << "\"last_permanent_lf_smoothing_band\": " + << config.suppressor.last_permanent_lf_smoothing_band << ","; + ost << "\"last_lf_smoothing_band\": " + << config.suppressor.last_lf_smoothing_band << ","; + ost << "\"last_lf_band\": " << config.suppressor.last_lf_band << ","; + ost << "\"first_hf_band\": " << config.suppressor.first_hf_band << ","; ost << "\"dominant_nearend_detection\": {"; ost << "\"enr_threshold\": " << config.suppressor.dominant_nearend_detection.enr_threshold << ","; @@ -672,7 +721,10 @@ std::string Aec3ConfigToJsonString(const EchoCanceller3Config& config) { ost << "\"anti_howling_gain\": " << config.suppressor.high_bands_suppression.anti_howling_gain; ost << "},"; - ost << "\"floor_first_increase\": " << config.suppressor.floor_first_increase; + ost << "\"floor_first_increase\": " << config.suppressor.floor_first_increase + << ","; + ost << "\"conservative_hf_suppression\": " + << config.suppressor.conservative_hf_suppression; ost << "}"; ost << "}"; ost << "}"; diff --git a/api/audio/echo_control.h b/api/audio/echo_control.h index 8d567bf2b8..74fbc27b12 100644 --- a/api/audio/echo_control.h +++ b/api/audio/echo_control.h @@ -48,6 +48,13 @@ class EchoControl { // Provides an optional external estimate of the audio buffer delay. virtual void SetAudioBufferDelay(int delay_ms) = 0; + // Specifies whether the capture output will be used. The purpose of this is + // to allow the echo controller to deactivate some of the processing when the + // resulting output is anyway not used, for instance when the endpoint is + // muted. + // TODO(b/177830919): Make pure virtual. + virtual void SetCaptureOutputUsage(bool capture_output_used) {} + // Returns wheter the signal is altered. virtual bool ActiveProcessing() const = 0; diff --git a/api/audio/echo_detector_creator.cc b/api/audio/echo_detector_creator.cc index 4c3d9e61fe..04215b0deb 100644 --- a/api/audio/echo_detector_creator.cc +++ b/api/audio/echo_detector_creator.cc @@ -15,7 +15,7 @@ namespace webrtc { rtc::scoped_refptr CreateEchoDetector() { - return new rtc::RefCountedObject(); + return rtc::make_ref_counted(); } } // namespace webrtc diff --git a/api/audio/test/audio_frame_unittest.cc b/api/audio/test/audio_frame_unittest.cc index dbf45ceabc..f8d3318274 100644 --- a/api/audio/test/audio_frame_unittest.cc +++ b/api/audio/test/audio_frame_unittest.cc @@ -133,4 +133,54 @@ TEST(AudioFrameTest, CopyFrom) { EXPECT_EQ(0, memcmp(frame2.data(), frame1.data(), sizeof(samples))); } +TEST(AudioFrameTest, SwapFrames) { + AudioFrame frame1, frame2; + int16_t samples1[kNumChannelsMono * kSamplesPerChannel]; + for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) { + samples1[i] = i; + } + frame1.UpdateFrame(kTimestamp, samples1, kSamplesPerChannel, kSampleRateHz, + AudioFrame::kPLC, AudioFrame::kVadActive, + kNumChannelsMono); + frame1.set_absolute_capture_timestamp_ms(12345678); + const auto frame1_channel_layout = frame1.channel_layout(); + + int16_t samples2[(kNumChannelsMono + 1) * (kSamplesPerChannel + 1)]; + for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1); + ++i) { + samples2[i] = 1000 + i; + } + frame2.UpdateFrame(kTimestamp + 1, samples2, kSamplesPerChannel + 1, + kSampleRateHz + 1, AudioFrame::kNormalSpeech, + AudioFrame::kVadPassive, kNumChannelsMono + 1); + const auto frame2_channel_layout = frame2.channel_layout(); + + swap(frame1, frame2); + + EXPECT_EQ(kTimestamp + 1, frame1.timestamp_); + ASSERT_EQ(kSamplesPerChannel + 1, frame1.samples_per_channel_); + EXPECT_EQ(kSampleRateHz + 1, frame1.sample_rate_hz_); + EXPECT_EQ(AudioFrame::kNormalSpeech, frame1.speech_type_); + EXPECT_EQ(AudioFrame::kVadPassive, frame1.vad_activity_); + ASSERT_EQ(kNumChannelsMono + 1, frame1.num_channels_); + for (size_t i = 0; i < (kNumChannelsMono + 1) * (kSamplesPerChannel + 1); + ++i) { + EXPECT_EQ(samples2[i], frame1.data()[i]); + } + EXPECT_FALSE(frame1.absolute_capture_timestamp_ms()); + EXPECT_EQ(frame2_channel_layout, frame1.channel_layout()); + + EXPECT_EQ(kTimestamp, frame2.timestamp_); + ASSERT_EQ(kSamplesPerChannel, frame2.samples_per_channel_); + EXPECT_EQ(kSampleRateHz, frame2.sample_rate_hz_); + EXPECT_EQ(AudioFrame::kPLC, frame2.speech_type_); + EXPECT_EQ(AudioFrame::kVadActive, frame2.vad_activity_); + ASSERT_EQ(kNumChannelsMono, frame2.num_channels_); + for (size_t i = 0; i < kNumChannelsMono * kSamplesPerChannel; ++i) { + EXPECT_EQ(samples1[i], frame2.data()[i]); + } + EXPECT_EQ(12345678, frame2.absolute_capture_timestamp_ms()); + EXPECT_EQ(frame1_channel_layout, frame2.channel_layout()); +} + } // namespace webrtc diff --git a/api/audio/test/echo_canceller3_config_json_unittest.cc b/api/audio/test/echo_canceller3_config_json_unittest.cc index a149c17a76..d6edd07d2e 100644 --- a/api/audio/test/echo_canceller3_config_json_unittest.cc +++ b/api/audio/test/echo_canceller3_config_json_unittest.cc @@ -21,7 +21,10 @@ TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) { cfg.delay.log_warning_on_delay_changes = true; cfg.filter.refined.error_floor = 2.f; cfg.filter.coarse_initial.length_blocks = 3u; + cfg.filter.high_pass_filter_echo_reference = + !cfg.filter.high_pass_filter_echo_reference; cfg.comfort_noise.noise_floor_dbfs = 100.f; + cfg.echo_model.model_reverb_in_nonlinear_mode = false; cfg.suppressor.normal_tuning.mask_hf.enr_suppress = .5f; cfg.suppressor.subband_nearend_detection.nearend_average_blocks = 3; cfg.suppressor.subband_nearend_detection.subband1 = {1, 3}; @@ -46,8 +49,12 @@ TEST(EchoCanceller3JsonHelpers, ToStringAndParseJson) { cfg_transformed.filter.coarse_initial.length_blocks); EXPECT_EQ(cfg.filter.refined.error_floor, cfg_transformed.filter.refined.error_floor); + EXPECT_EQ(cfg.filter.high_pass_filter_echo_reference, + cfg_transformed.filter.high_pass_filter_echo_reference); EXPECT_EQ(cfg.comfort_noise.noise_floor_dbfs, cfg_transformed.comfort_noise.noise_floor_dbfs); + EXPECT_EQ(cfg.echo_model.model_reverb_in_nonlinear_mode, + cfg_transformed.echo_model.model_reverb_in_nonlinear_mode); EXPECT_EQ(cfg.suppressor.normal_tuning.mask_hf.enr_suppress, cfg_transformed.suppressor.normal_tuning.mask_hf.enr_suppress); EXPECT_EQ(cfg.suppressor.subband_nearend_detection.nearend_average_blocks, diff --git a/api/audio_codecs/BUILD.gn b/api/audio_codecs/BUILD.gn index 987e20f178..5926f5ec2e 100644 --- a/api/audio_codecs/BUILD.gn +++ b/api/audio_codecs/BUILD.gn @@ -33,11 +33,12 @@ rtc_library("audio_codecs_api") { "..:bitrate_allocation", "..:scoped_refptr", "../../rtc_base:checks", - "../../rtc_base:deprecation", "../../rtc_base:rtc_base_approved", "../../rtc_base:sanitizer", "../../rtc_base/system:rtc_export", "../units:time_delta", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/api/audio_codecs/L16/BUILD.gn b/api/audio_codecs/L16/BUILD.gn index bef671237e..1f7a1e5a0b 100644 --- a/api/audio_codecs/L16/BUILD.gn +++ b/api/audio_codecs/L16/BUILD.gn @@ -25,6 +25,8 @@ rtc_library("audio_encoder_L16") { "../../../rtc_base:rtc_base_approved", "../../../rtc_base:safe_minmax", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -42,6 +44,8 @@ rtc_library("audio_decoder_L16") { "../../../modules/audio_coding:pcm16b", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/api/audio_codecs/OWNERS b/api/audio_codecs/OWNERS index fe417854d6..77e9d0022a 100644 --- a/api/audio_codecs/OWNERS +++ b/api/audio_codecs/OWNERS @@ -1 +1,2 @@ -kwiberg@webrtc.org +minyue@webrtc.org +henrik.lundin@webrtc.org diff --git a/api/audio_codecs/audio_decoder.cc b/api/audio_codecs/audio_decoder.cc index 97cda27a03..4b18b4ab52 100644 --- a/api/audio_codecs/audio_decoder.cc +++ b/api/audio_codecs/audio_decoder.cc @@ -162,7 +162,7 @@ AudioDecoder::SpeechType AudioDecoder::ConvertSpeechType(int16_t type) { case 2: return kComfortNoise; default: - assert(false); + RTC_NOTREACHED(); return kSpeech; } } diff --git a/api/audio_codecs/audio_decoder.h b/api/audio_codecs/audio_decoder.h index 557ffe2759..ce235946da 100644 --- a/api/audio_codecs/audio_decoder.h +++ b/api/audio_codecs/audio_decoder.h @@ -136,7 +136,7 @@ class AudioDecoder { // with the decoded audio on either side of the concealment. // Note: The default implementation of GeneratePlc will be deleted soon. All // implementations must provide their own, which can be a simple as a no-op. - // TODO(bugs.webrtc.org/9676): Remove default impementation. + // TODO(bugs.webrtc.org/9676): Remove default implementation. virtual void GeneratePlc(size_t requested_samples_per_channel, rtc::BufferT* concealment_audio); diff --git a/api/audio_codecs/audio_decoder_factory.h b/api/audio_codecs/audio_decoder_factory.h index c36a0e103b..2811f6704b 100644 --- a/api/audio_codecs/audio_decoder_factory.h +++ b/api/audio_codecs/audio_decoder_factory.h @@ -38,6 +38,8 @@ class AudioDecoderFactory : public rtc::RefCountInterface { // communication between the AudioEncoder and AudioDecoder instances, which is // needed for some codecs with built-in bandwidth adaptation.) // + // Returns null if the format isn't supported. + // // Note: Implementations need to be robust against combinations other than // one encoder, one decoder getting the same ID; such decoders must still // work. diff --git a/api/audio_codecs/audio_decoder_factory_template.h b/api/audio_codecs/audio_decoder_factory_template.h index e628cb62dc..388668d4c6 100644 --- a/api/audio_codecs/audio_decoder_factory_template.h +++ b/api/audio_codecs/audio_decoder_factory_template.h @@ -123,9 +123,8 @@ rtc::scoped_refptr CreateAudioDecoderFactory() { static_assert(sizeof...(Ts) >= 1, "Caller must give at least one template parameter"); - return rtc::scoped_refptr( - new rtc::RefCountedObject< - audio_decoder_factory_template_impl::AudioDecoderFactoryT>()); + return rtc::make_ref_counted< + audio_decoder_factory_template_impl::AudioDecoderFactoryT>(); } } // namespace webrtc diff --git a/api/audio_codecs/audio_encoder.h b/api/audio_codecs/audio_encoder.h index fd2d948863..92e42cf107 100644 --- a/api/audio_codecs/audio_encoder.h +++ b/api/audio_codecs/audio_encoder.h @@ -16,12 +16,12 @@ #include #include +#include "absl/base/attributes.h" #include "absl/types/optional.h" #include "api/array_view.h" #include "api/call/bitrate_allocation.h" #include "api/units/time_delta.h" #include "rtc_base/buffer.h" -#include "rtc_base/deprecation.h" namespace webrtc { @@ -182,12 +182,11 @@ class AudioEncoder { // implementation does nothing. virtual void SetMaxPlaybackRate(int frequency_hz); - // This is to be deprecated. Please use |OnReceivedTargetAudioBitrate| - // instead. // Tells the encoder what average bitrate we'd like it to produce. The // encoder is free to adjust or disregard the given bitrate (the default // implementation does the latter). - RTC_DEPRECATED virtual void SetTargetBitrate(int target_bps); + ABSL_DEPRECATED("Use OnReceivedTargetAudioBitrate instead") + virtual void SetTargetBitrate(int target_bps); // Causes this encoder to let go of any other encoders it contains, and // returns a pointer to an array where they are stored (which is required to @@ -210,7 +209,8 @@ class AudioEncoder { virtual void OnReceivedUplinkPacketLossFraction( float uplink_packet_loss_fraction); - RTC_DEPRECATED virtual void OnReceivedUplinkRecoverablePacketLossFraction( + ABSL_DEPRECATED("") + virtual void OnReceivedUplinkRecoverablePacketLossFraction( float uplink_recoverable_packet_loss_fraction); // Provides target audio bitrate to this encoder to allow it to adapt. diff --git a/api/audio_codecs/audio_encoder_factory.h b/api/audio_codecs/audio_encoder_factory.h index 48995a876d..6128b1b6f3 100644 --- a/api/audio_codecs/audio_encoder_factory.h +++ b/api/audio_codecs/audio_encoder_factory.h @@ -44,6 +44,8 @@ class AudioEncoderFactory : public rtc::RefCountInterface { // communication between the AudioEncoder and AudioDecoder instances, which is // needed for some codecs with built-in bandwidth adaptation.) // + // Returns null if the format isn't supported. + // // Note: Implementations need to be robust against combinations other than // one encoder, one decoder getting the same ID; such encoders must still // work. diff --git a/api/audio_codecs/audio_encoder_factory_template.h b/api/audio_codecs/audio_encoder_factory_template.h index 74cb053425..cdc7defd25 100644 --- a/api/audio_codecs/audio_encoder_factory_template.h +++ b/api/audio_codecs/audio_encoder_factory_template.h @@ -142,9 +142,8 @@ rtc::scoped_refptr CreateAudioEncoderFactory() { static_assert(sizeof...(Ts) >= 1, "Caller must give at least one template parameter"); - return rtc::scoped_refptr( - new rtc::RefCountedObject< - audio_encoder_factory_template_impl::AudioEncoderFactoryT>()); + return rtc::make_ref_counted< + audio_encoder_factory_template_impl::AudioEncoderFactoryT>(); } } // namespace webrtc diff --git a/api/audio_codecs/g711/BUILD.gn b/api/audio_codecs/g711/BUILD.gn index ba0586b901..92d77bed9f 100644 --- a/api/audio_codecs/g711/BUILD.gn +++ b/api/audio_codecs/g711/BUILD.gn @@ -25,6 +25,8 @@ rtc_library("audio_encoder_g711") { "../../../rtc_base:rtc_base_approved", "../../../rtc_base:safe_minmax", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -42,6 +44,8 @@ rtc_library("audio_decoder_g711") { "../../../modules/audio_coding:g711", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/api/audio_codecs/g722/BUILD.gn b/api/audio_codecs/g722/BUILD.gn index 8738ef889a..a186eabbb7 100644 --- a/api/audio_codecs/g722/BUILD.gn +++ b/api/audio_codecs/g722/BUILD.gn @@ -31,6 +31,8 @@ rtc_library("audio_encoder_g722") { "../../../rtc_base:rtc_base_approved", "../../../rtc_base:safe_minmax", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -48,6 +50,8 @@ rtc_library("audio_decoder_g722") { "../../../modules/audio_coding:g722", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/api/audio_codecs/ilbc/BUILD.gn b/api/audio_codecs/ilbc/BUILD.gn index 066a73cff2..b6a5045eaf 100644 --- a/api/audio_codecs/ilbc/BUILD.gn +++ b/api/audio_codecs/ilbc/BUILD.gn @@ -30,6 +30,8 @@ rtc_library("audio_encoder_ilbc") { "../../../modules/audio_coding:ilbc", "../../../rtc_base:rtc_base_approved", "../../../rtc_base:safe_minmax", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -46,6 +48,8 @@ rtc_library("audio_decoder_ilbc") { "..:audio_codecs_api", "../../../modules/audio_coding:ilbc", "../../../rtc_base:rtc_base_approved", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/api/audio_codecs/ilbc/audio_encoder_ilbc.cc b/api/audio_codecs/ilbc/audio_encoder_ilbc.cc index bd653b7979..035b0dc34f 100644 --- a/api/audio_codecs/ilbc/audio_encoder_ilbc.cc +++ b/api/audio_codecs/ilbc/audio_encoder_ilbc.cc @@ -32,7 +32,7 @@ int GetIlbcBitrate(int ptime) { // 50 bytes per frame of 30 ms => (approx) 13333 bits/s. return 13333; default: - FATAL(); + RTC_CHECK_NOTREACHED(); } } } // namespace diff --git a/api/audio_codecs/isac/BUILD.gn b/api/audio_codecs/isac/BUILD.gn index 9eb32147e1..6ff6e5f092 100644 --- a/api/audio_codecs/isac/BUILD.gn +++ b/api/audio_codecs/isac/BUILD.gn @@ -68,6 +68,8 @@ rtc_library("audio_encoder_isac_fix") { "../../../modules/audio_coding:isac_fix", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -85,6 +87,8 @@ rtc_library("audio_decoder_isac_fix") { "../../../modules/audio_coding:isac_fix", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -102,6 +106,8 @@ rtc_library("audio_encoder_isac_float") { "../../../modules/audio_coding:isac", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -119,6 +125,8 @@ rtc_library("audio_decoder_isac_float") { "../../../modules/audio_coding:isac", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/api/audio_codecs/opus/BUILD.gn b/api/audio_codecs/opus/BUILD.gn index 5fb626d990..586e9b3dd8 100644 --- a/api/audio_codecs/opus/BUILD.gn +++ b/api/audio_codecs/opus/BUILD.gn @@ -23,8 +23,8 @@ rtc_library("audio_encoder_opus_config") { deps = [ "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] defines = [] if (rtc_opus_variable_complexity) { defines += [ "WEBRTC_OPUS_VARIABLE_COMPLEXITY=1" ] @@ -49,6 +49,8 @@ rtc_library("audio_encoder_opus") { "../../../modules/audio_coding:webrtc_opus", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -66,6 +68,8 @@ rtc_library("audio_decoder_opus") { "../../../modules/audio_coding:webrtc_opus", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -82,8 +86,8 @@ rtc_library("audio_encoder_multiopus") { "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", "../opus:audio_encoder_opus_config", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("audio_decoder_multiopus") { @@ -99,6 +103,8 @@ rtc_library("audio_decoder_multiopus") { "../../../modules/audio_coding:webrtc_multiopus", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", diff --git a/api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.cc b/api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.cc index f01caf11b6..0052c429b2 100644 --- a/api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.cc +++ b/api/audio_codecs/opus/audio_encoder_multi_channel_opus_config.cc @@ -38,7 +38,7 @@ operator=(const AudioEncoderMultiChannelOpusConfig&) = default; bool AudioEncoderMultiChannelOpusConfig::IsOk() const { if (frame_size_ms <= 0 || frame_size_ms % 10 != 0) return false; - if (num_channels < 0 || num_channels >= 255) { + if (num_channels >= 255) { return false; } if (bitrate_bps < kMinBitrateBps || bitrate_bps > kMaxBitrateBps) @@ -47,7 +47,7 @@ bool AudioEncoderMultiChannelOpusConfig::IsOk() const { return false; // Check the lengths: - if (num_channels < 0 || num_streams < 0 || coupled_streams < 0) { + if (num_streams < 0 || coupled_streams < 0) { return false; } if (num_streams < coupled_streams) { diff --git a/api/audio_codecs/opus/audio_encoder_opus_config.cc b/api/audio_codecs/opus/audio_encoder_opus_config.cc index 2f36d0261e..0e6f55ee65 100644 --- a/api/audio_codecs/opus/audio_encoder_opus_config.cc +++ b/api/audio_codecs/opus/audio_encoder_opus_config.cc @@ -61,7 +61,7 @@ bool AudioEncoderOpusConfig::IsOk() const { // well; we can add support for them when needed.) return false; } - if (num_channels < 0 || num_channels >= 255) { + if (num_channels >= 255) { return false; } if (!bitrate_bps) diff --git a/api/audio_codecs/test/audio_decoder_factory_template_unittest.cc b/api/audio_codecs/test/audio_decoder_factory_template_unittest.cc index 0e2e8c229f..464ecfd487 100644 --- a/api/audio_codecs/test/audio_decoder_factory_template_unittest.cc +++ b/api/audio_codecs/test/audio_decoder_factory_template_unittest.cc @@ -78,7 +78,7 @@ struct AudioDecoderFakeApi { TEST(AudioDecoderFactoryTemplateTest, NoDecoderTypes) { rtc::scoped_refptr factory( - new rtc::RefCountedObject< + rtc::make_ref_counted< audio_decoder_factory_template_impl::AudioDecoderFactoryT<>>()); EXPECT_THAT(factory->GetSupportedDecoders(), ::testing::IsEmpty()); EXPECT_FALSE(factory->IsSupportedDecoder({"foo", 8000, 1})); diff --git a/api/audio_codecs/test/audio_encoder_factory_template_unittest.cc b/api/audio_codecs/test/audio_encoder_factory_template_unittest.cc index 95ea85576d..110f9930bd 100644 --- a/api/audio_codecs/test/audio_encoder_factory_template_unittest.cc +++ b/api/audio_codecs/test/audio_encoder_factory_template_unittest.cc @@ -78,7 +78,7 @@ struct AudioEncoderFakeApi { TEST(AudioEncoderFactoryTemplateTest, NoEncoderTypes) { rtc::scoped_refptr factory( - new rtc::RefCountedObject< + rtc::make_ref_counted< audio_encoder_factory_template_impl::AudioEncoderFactoryT<>>()); EXPECT_THAT(factory->GetSupportedEncoders(), ::testing::IsEmpty()); EXPECT_EQ(absl::nullopt, factory->QueryAudioEncoder({"foo", 8000, 1})); diff --git a/api/audio_options.h b/api/audio_options.h index b714998c6b..1b0d1ad0bd 100644 --- a/api/audio_options.h +++ b/api/audio_options.h @@ -75,6 +75,8 @@ struct RTC_EXPORT AudioOptions { // and check if any other AudioOptions members are unused. absl::optional combined_audio_video_bwe; // Enable audio network adaptor. + // TODO(webrtc:11717): Remove this API in favor of adaptivePtime in + // RtpEncodingParameters. absl::optional audio_network_adaptor; // Config string for audio network adaptor. absl::optional audio_network_adaptor_config; diff --git a/api/call/transport.h b/api/call/transport.h index 2a2a87a5f6..8bff28825d 100644 --- a/api/call/transport.h +++ b/api/call/transport.h @@ -14,7 +14,8 @@ #include #include -#include +#include "api/ref_counted_base.h" +#include "api/scoped_refptr.h" namespace webrtc { @@ -30,7 +31,7 @@ struct PacketOptions { int packet_id = -1; // Additional data bound to the RTP packet for use in application code, // outside of WebRTC. - std::vector application_data; + rtc::scoped_refptr additional_data; // Whether this is a retransmission of an earlier packet. bool is_retransmit = false; bool included_in_feedback = false; diff --git a/api/candidate.cc b/api/candidate.cc index c857f89c3c..d5fe3a0672 100644 --- a/api/candidate.cc +++ b/api/candidate.cc @@ -12,6 +12,7 @@ #include "rtc_base/helpers.h" #include "rtc_base/ip_address.h" +#include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" namespace cricket { @@ -129,9 +130,21 @@ Candidate Candidate::ToSanitizedCopy(bool use_hostname_address, bool filter_related_address) const { Candidate copy(*this); if (use_hostname_address) { - rtc::SocketAddress hostname_only_addr(address().hostname(), - address().port()); - copy.set_address(hostname_only_addr); + rtc::IPAddress ip; + if (address().hostname().empty()) { + // IP needs to be redacted, but no hostname available. + rtc::SocketAddress redacted_addr("redacted-ip.invalid", address().port()); + copy.set_address(redacted_addr); + } else if (IPFromString(address().hostname(), &ip)) { + // The hostname is an IP literal, and needs to be redacted too. + rtc::SocketAddress redacted_addr("redacted-literal.invalid", + address().port()); + copy.set_address(redacted_addr); + } else { + rtc::SocketAddress hostname_only_addr(address().hostname(), + address().port()); + copy.set_address(hostname_only_addr); + } } if (filter_related_address) { copy.set_related_address( diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc index 6223150079..008fce3e80 100644 --- a/api/create_peerconnection_factory.cc +++ b/api/create_peerconnection_factory.cc @@ -18,6 +18,7 @@ #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" +#include "api/transport/field_trial_based_config.h" #include "media/base/media_engine.h" #include "media/engine/webrtc_media_engine.h" #include "modules/audio_device/include/audio_device.h" @@ -36,7 +37,8 @@ rtc::scoped_refptr CreatePeerConnectionFactory( std::unique_ptr video_encoder_factory, std::unique_ptr video_decoder_factory, rtc::scoped_refptr audio_mixer, - rtc::scoped_refptr audio_processing) { + rtc::scoped_refptr audio_processing, + AudioFrameProcessor* audio_frame_processor) { PeerConnectionFactoryDependencies dependencies; dependencies.network_thread = network_thread; dependencies.worker_thread = worker_thread; @@ -45,12 +47,14 @@ rtc::scoped_refptr CreatePeerConnectionFactory( dependencies.call_factory = CreateCallFactory(); dependencies.event_log_factory = std::make_unique( dependencies.task_queue_factory.get()); + dependencies.trials = std::make_unique(); cricket::MediaEngineDependencies media_dependencies; media_dependencies.task_queue_factory = dependencies.task_queue_factory.get(); media_dependencies.adm = std::move(default_adm); media_dependencies.audio_encoder_factory = std::move(audio_encoder_factory); media_dependencies.audio_decoder_factory = std::move(audio_decoder_factory); + media_dependencies.audio_frame_processor = audio_frame_processor; if (audio_processing) { media_dependencies.audio_processing = std::move(audio_processing); } else { @@ -59,6 +63,7 @@ rtc::scoped_refptr CreatePeerConnectionFactory( media_dependencies.audio_mixer = std::move(audio_mixer); media_dependencies.video_encoder_factory = std::move(video_encoder_factory); media_dependencies.video_decoder_factory = std::move(video_decoder_factory); + media_dependencies.trials = dependencies.trials.get(); dependencies.media_engine = cricket::CreateMediaEngine(std::move(media_dependencies)); diff --git a/api/create_peerconnection_factory.h b/api/create_peerconnection_factory.h index ac50736b80..4eb0a00e54 100644 --- a/api/create_peerconnection_factory.h +++ b/api/create_peerconnection_factory.h @@ -31,6 +31,7 @@ class Thread; namespace webrtc { class AudioDeviceModule; +class AudioFrameProcessor; class AudioProcessing; // Create a new instance of PeerConnectionFactoryInterface with optional video @@ -47,7 +48,8 @@ CreatePeerConnectionFactory( std::unique_ptr video_encoder_factory, std::unique_ptr video_decoder_factory, rtc::scoped_refptr audio_mixer, - rtc::scoped_refptr audio_processing); + rtc::scoped_refptr audio_processing, + AudioFrameProcessor* audio_frame_processor = nullptr); } // namespace webrtc diff --git a/api/crypto/frame_decryptor_interface.h b/api/crypto/frame_decryptor_interface.h index ec900ab80a..2f6bdac4b4 100644 --- a/api/crypto/frame_decryptor_interface.h +++ b/api/crypto/frame_decryptor_interface.h @@ -27,7 +27,6 @@ namespace webrtc { // without it. You may assume that this interface will have the same lifetime // as the RTPReceiver it is attached to. It must only be attached to one // RTPReceiver. Additional data may be null. -// Note: This interface is not ready for production use. class FrameDecryptorInterface : public rtc::RefCountInterface { public: // The Status enum represents all possible states that can be diff --git a/api/crypto/frame_encryptor_interface.h b/api/crypto/frame_encryptor_interface.h index d5b6d8a2d5..1452b80189 100644 --- a/api/crypto/frame_encryptor_interface.h +++ b/api/crypto/frame_encryptor_interface.h @@ -24,7 +24,6 @@ namespace webrtc { // addition to the standard SRTP mechanism and is not intended to be used // without it. Implementations of this interface will have the same lifetime as // the RTPSenders it is attached to. Additional data may be null. -// Note: This interface is not ready for production use. class FrameEncryptorInterface : public rtc::RefCountInterface { public: ~FrameEncryptorInterface() override {} diff --git a/api/data_channel_interface.h b/api/data_channel_interface.h index 5b2b1263ab..56bb6c98fb 100644 --- a/api/data_channel_interface.h +++ b/api/data_channel_interface.h @@ -44,11 +44,13 @@ struct DataChannelInit { // // Cannot be set along with |maxRetransmits|. // This is called |maxPacketLifeTime| in the WebRTC JS API. + // Negative values are ignored, and positive values are clamped to [0-65535] absl::optional maxRetransmitTime; // The max number of retransmissions. // // Cannot be set along with |maxRetransmitTime|. + // Negative values are ignored, and positive values are clamped to [0-65535] absl::optional maxRetransmits; // This is set by the application and opaque to the WebRTC implementation. diff --git a/api/g3doc/index.md b/api/g3doc/index.md new file mode 100644 index 0000000000..49637d191a --- /dev/null +++ b/api/g3doc/index.md @@ -0,0 +1,51 @@ + + + +# The WebRTC API + +The public API of the WebRTC library consists of the api/ directory and +its subdirectories. No other files should be depended on by webrtc users. + +Before starting to code against the API, it is important to understand +some basic concepts, such as: + +* Memory management, including webrtc's reference counted objects +* [Thread management](threading_design.md) + +## Using WebRTC through the PeerConnection class + +The +[PeerConnectionInterface](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/peer_connection_interface.h?q=webrtc::PeerConnectionInterface) +class is the recommended way to use the WebRTC library. + +It is closely modeled after the Javascript API documented in the [WebRTC +specification](https://w3c.github.io/webrtc-pc/). + +PeerConnections are created using the [PeerConnectionFactoryInterface](https://source.chromium.org/search?q=webrtc::PeerConnectionFactoryInterface). + +There are two levels of customization available: + +* Pass a PeerConnectionFactoryDependencies object to the function that creates + a PeerConnectionFactory. This object defines factories for a lot of internal + objects inside the PeerConnection, so that users can override them. + All PeerConnections using this interface will have the same options. +* Pass a PeerConnectionInterface::RTCConfiguration object to the + CreatePeerConnectionOrError() function on the + PeerConnectionFactoryInterface. These customizations will apply only to a + single PeerConnection. + +Most functions on the PeerConnection interface are asynchronous, and take a +callback that is executed when the function is finished. The callbacks are +mostly called on the thread that is passed as the "signaling thread" field of +the PeerConnectionFactoryDependencies, or the thread that called +PeerConnectionFactory::CreatePeerConnectionOrError() if no thread is given. + +See each class' module documentation for details. + +## Using WebRTC components without the PeerConnection class + +This needs to be done carefully, and in consultation with the WebRTC team. There +are non-obvious dependencies between many of the components. + + + diff --git a/api/DESIGN.md b/api/g3doc/threading_design.md similarity index 93% rename from api/DESIGN.md rename to api/g3doc/threading_design.md index 0a2f36eb2b..20c3539b22 100644 --- a/api/DESIGN.md +++ b/api/g3doc/threading_design.md @@ -1,4 +1,6 @@ -# Design considerations + + +# API Threading Design considerations The header files in this directory form the API to the WebRTC library that is intended for client applications' use. @@ -30,12 +32,12 @@ the two calls. sequential execution - other names for such constructs are task runners and sequenced task queues. -# Client threads and callbacks +## Client threads and callbacks At the moment, the API does not give any guarantee on which thread* the callbacks and events are called on. So it's best to write all callback and event handlers like this (pseudocode): -
+```
 void ObserverClass::Handler(event) {
   if (!called_on_client_thread()) {
     dispatch_to_client_thread(bind(handler(event)));
@@ -43,11 +45,11 @@ void ObserverClass::Handler(event) {
   }
   // Process event, we're now on the right thread
 }
-
+``` In the future, the implementation may change to always call the callbacks and event handlers on the client thread. -# Implementation considerations +## Implementation considerations The C++ classes that are part of the public API are also used to derive classes that form part of the implementation. diff --git a/api/ice_transport_factory.cc b/api/ice_transport_factory.cc index c32d7d2e11..26ef88bf1c 100644 --- a/api/ice_transport_factory.cc +++ b/api/ice_transport_factory.cc @@ -14,6 +14,7 @@ #include #include "p2p/base/ice_transport_internal.h" +#include "p2p/base/p2p_constants.h" #include "p2p/base/p2p_transport_channel.h" #include "p2p/base/port_allocator.h" #include "rtc_base/thread.h" @@ -41,7 +42,7 @@ class IceTransportWithTransportChannel : public IceTransportInterface { } private: - const rtc::ThreadChecker thread_checker_{}; + const SequenceChecker thread_checker_{}; const std::unique_ptr internal_ RTC_GUARDED_BY(thread_checker_); }; @@ -57,10 +58,18 @@ rtc::scoped_refptr CreateIceTransport( rtc::scoped_refptr CreateIceTransport( IceTransportInit init) { - return new rtc::RefCountedObject( - std::make_unique( - "", 0, init.port_allocator(), init.async_resolver_factory(), - init.event_log())); + if (init.async_resolver_factory()) { + // Backwards compatibility mode + return rtc::make_ref_counted( + std::make_unique( + "", cricket::ICE_CANDIDATE_COMPONENT_RTP, init.port_allocator(), + init.async_resolver_factory(), init.event_log())); + } else { + return rtc::make_ref_counted( + cricket::P2PTransportChannel::Create( + "", cricket::ICE_CANDIDATE_COMPONENT_RTP, init.port_allocator(), + init.async_dns_resolver_factory(), init.event_log())); + } } } // namespace webrtc diff --git a/api/ice_transport_interface.h b/api/ice_transport_interface.h index d2f1edc012..a3b364c87a 100644 --- a/api/ice_transport_interface.h +++ b/api/ice_transport_interface.h @@ -13,6 +13,7 @@ #include +#include "api/async_dns_resolver.h" #include "api/async_resolver_factory.h" #include "api/rtc_error.h" #include "api/rtc_event_log/rtc_event_log.h" @@ -52,11 +53,21 @@ struct IceTransportInit final { port_allocator_ = port_allocator; } + AsyncDnsResolverFactoryInterface* async_dns_resolver_factory() { + return async_dns_resolver_factory_; + } + void set_async_dns_resolver_factory( + AsyncDnsResolverFactoryInterface* async_dns_resolver_factory) { + RTC_DCHECK(!async_resolver_factory_); + async_dns_resolver_factory_ = async_dns_resolver_factory; + } AsyncResolverFactory* async_resolver_factory() { return async_resolver_factory_; } + ABSL_DEPRECATED("bugs.webrtc.org/12598") void set_async_resolver_factory( AsyncResolverFactory* async_resolver_factory) { + RTC_DCHECK(!async_dns_resolver_factory_); async_resolver_factory_ = async_resolver_factory; } @@ -65,8 +76,11 @@ struct IceTransportInit final { private: cricket::PortAllocator* port_allocator_ = nullptr; + AsyncDnsResolverFactoryInterface* async_dns_resolver_factory_ = nullptr; + // For backwards compatibility. Only one resolver factory can be set. AsyncResolverFactory* async_resolver_factory_ = nullptr; RtcEventLog* event_log_ = nullptr; + // TODO(https://crbug.com/webrtc/12657): Redesign to have const members. }; // TODO(qingsi): The factory interface is defined in this file instead of its diff --git a/api/jsep.h b/api/jsep.h index cf8aeb0cb4..b56cf1d15b 100644 --- a/api/jsep.h +++ b/api/jsep.h @@ -28,7 +28,6 @@ #include "absl/types/optional.h" #include "api/rtc_error.h" -#include "rtc_base/deprecation.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" @@ -136,6 +135,13 @@ class RTC_EXPORT SessionDescriptionInterface { virtual ~SessionDescriptionInterface() {} + // Create a new SessionDescriptionInterface object + // with the same values as the old object. + // TODO(bugs.webrtc.org:12215): Remove default implementation + virtual std::unique_ptr Clone() const { + return nullptr; + } + // Only for use internally. virtual cricket::SessionDescription* description() = 0; virtual const cricket::SessionDescription* description() const = 0; diff --git a/api/jsep_ice_candidate.h b/api/jsep_ice_candidate.h index 4ee84cf79c..1a4247cb07 100644 --- a/api/jsep_ice_candidate.h +++ b/api/jsep_ice_candidate.h @@ -34,6 +34,8 @@ class RTC_EXPORT JsepIceCandidate : public IceCandidateInterface { JsepIceCandidate(const std::string& sdp_mid, int sdp_mline_index, const cricket::Candidate& candidate); + JsepIceCandidate(const JsepIceCandidate&) = delete; + JsepIceCandidate& operator=(const JsepIceCandidate&) = delete; ~JsepIceCandidate() override; // |err| may be null. bool Initialize(const std::string& sdp, SdpParseError* err); @@ -53,8 +55,6 @@ class RTC_EXPORT JsepIceCandidate : public IceCandidateInterface { std::string sdp_mid_; int sdp_mline_index_; cricket::Candidate candidate_; - - RTC_DISALLOW_COPY_AND_ASSIGN(JsepIceCandidate); }; // Implementation of IceCandidateCollection which stores JsepIceCandidates. @@ -64,6 +64,8 @@ class JsepCandidateCollection : public IceCandidateCollection { // Move constructor is defined so that a vector of JsepCandidateCollections // can be resized. JsepCandidateCollection(JsepCandidateCollection&& o); + // Returns a copy of the candidate collection. + JsepCandidateCollection Clone() const; size_t count() const override; bool HasCandidate(const IceCandidateInterface* candidate) const override; // Adds and takes ownership of the JsepIceCandidate. diff --git a/api/jsep_session_description.h b/api/jsep_session_description.h index 79e15e21fe..70ac9398a6 100644 --- a/api/jsep_session_description.h +++ b/api/jsep_session_description.h @@ -23,7 +23,6 @@ #include "api/jsep.h" #include "api/jsep_ice_candidate.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/deprecation.h" namespace cricket { class SessionDescription; @@ -49,6 +48,8 @@ class JsepSessionDescription : public SessionDescriptionInterface { const std::string& session_id, const std::string& session_version); + virtual std::unique_ptr Clone() const; + virtual cricket::SessionDescription* description() { return description_.get(); } diff --git a/api/media_stream_interface.h b/api/media_stream_interface.h index bd4a2c0292..8892ee5a0b 100644 --- a/api/media_stream_interface.h +++ b/api/media_stream_interface.h @@ -216,6 +216,11 @@ class AudioTrackSinkInterface { number_of_frames); } + // Returns the number of channels encoded by the sink. This can be less than + // the number_of_channels if down-mixing occur. A value of -1 means an unknown + // number. + virtual int NumPreferredChannels() const { return -1; } + protected: virtual ~AudioTrackSinkInterface() {} }; diff --git a/pc/media_stream_track.h b/api/media_stream_track.h similarity index 88% rename from pc/media_stream_track.h rename to api/media_stream_track.h index 358d89a25b..738f034143 100644 --- a/pc/media_stream_track.h +++ b/api/media_stream_track.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef PC_MEDIA_STREAM_TRACK_H_ -#define PC_MEDIA_STREAM_TRACK_H_ +#ifndef API_MEDIA_STREAM_TRACK_H_ +#define API_MEDIA_STREAM_TRACK_H_ #include @@ -38,6 +38,7 @@ class MediaStreamTrack : public Notifier { } return fire_on_change; } + void set_ended() { set_state(MediaStreamTrackInterface::TrackState::kEnded); } protected: explicit MediaStreamTrack(const std::string& id) @@ -53,10 +54,10 @@ class MediaStreamTrack : public Notifier { private: bool enabled_; - std::string id_; + const std::string id_; MediaStreamTrackInterface::TrackState state_; }; } // namespace webrtc -#endif // PC_MEDIA_STREAM_TRACK_H_ +#endif // API_MEDIA_STREAM_TRACK_H_ diff --git a/api/media_types.cc b/api/media_types.cc index 6bc693860d..3453ce3905 100644 --- a/api/media_types.cc +++ b/api/media_types.cc @@ -26,10 +26,12 @@ std::string MediaTypeToString(MediaType type) { return kMediaTypeVideo; case MEDIA_TYPE_DATA: return kMediaTypeData; + case MEDIA_TYPE_UNSUPPORTED: + // Unsupported media stores the m= differently. + RTC_NOTREACHED(); + return ""; } - FATAL(); - // Not reachable; avoids compile warning. - return ""; + RTC_CHECK_NOTREACHED(); } } // namespace cricket diff --git a/api/media_types.h b/api/media_types.h index 8c6ba3d1ed..b2ff08c0c3 100644 --- a/api/media_types.h +++ b/api/media_types.h @@ -20,7 +20,12 @@ namespace cricket { -enum MediaType { MEDIA_TYPE_AUDIO, MEDIA_TYPE_VIDEO, MEDIA_TYPE_DATA }; +enum MediaType { + MEDIA_TYPE_AUDIO, + MEDIA_TYPE_VIDEO, + MEDIA_TYPE_DATA, + MEDIA_TYPE_UNSUPPORTED +}; extern const char kMediaTypeAudio[]; extern const char kMediaTypeVideo[]; diff --git a/api/neteq/BUILD.gn b/api/neteq/BUILD.gn index 1ab02ec92b..4e85c4d268 100644 --- a/api/neteq/BUILD.gn +++ b/api/neteq/BUILD.gn @@ -23,8 +23,8 @@ rtc_source_set("neteq_api") { "../../rtc_base:rtc_base_approved", "../../system_wrappers:system_wrappers", "../audio_codecs:audio_codecs_api", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("custom_neteq_factory") { @@ -56,8 +56,8 @@ rtc_source_set("neteq_controller_api") { ":tick_timer", "../../rtc_base:rtc_base_approved", "../../system_wrappers:system_wrappers", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("default_neteq_controller_factory") { diff --git a/api/neteq/neteq.cc b/api/neteq/neteq.cc index 155ddf2cf3..e8ef4dbd39 100644 --- a/api/neteq/neteq.cc +++ b/api/neteq/neteq.cc @@ -30,7 +30,8 @@ std::string NetEq::Config::ToString() const { << ", min_delay_ms=" << min_delay_ms << ", enable_fast_accelerate=" << (enable_fast_accelerate ? "true" : "false") << ", enable_muted_state=" << (enable_muted_state ? "true" : "false") - << ", enable_rtx_handling=" << (enable_rtx_handling ? "true" : "false"); + << ", enable_rtx_handling=" << (enable_rtx_handling ? "true" : "false") + << ", extra_output_delay_ms=" << extra_output_delay_ms; return ss.str(); } diff --git a/api/neteq/neteq.h b/api/neteq/neteq.h index f62d3795f0..ea7079e369 100644 --- a/api/neteq/neteq.h +++ b/api/neteq/neteq.h @@ -36,7 +36,6 @@ struct NetEqNetworkStatistics { uint16_t preferred_buffer_size_ms; // Target buffer size in ms. uint16_t jitter_peaks_found; // 1 if adding extra delay due to peaky // jitter; 0 otherwise. - uint16_t packet_loss_rate; // Loss rate (network + late) in Q14. uint16_t expand_rate; // Fraction (of original stream) of synthesized // audio inserted through expansion (in Q14). uint16_t speech_expand_rate; // Fraction (of original stream) of synthesized @@ -49,7 +48,6 @@ struct NetEqNetworkStatistics { // decoding (in Q14). uint16_t secondary_discarded_rate; // Fraction of discarded FEC/RED data (in // Q14). - size_t added_zero_samples; // Number of zero samples added in "off" mode. // Statistics for packet waiting times, i.e., the time between a packet // arrives until it is decoded. int mean_waiting_time_ms; @@ -138,6 +136,10 @@ class NetEq { bool enable_rtx_handling = false; absl::optional codec_pair_id; bool for_test_no_time_stretching = false; // Use only for testing. + // Adds extra delay to the output of NetEq, without affecting jitter or + // loss behavior. This is mainly for testing. Value must be a non-negative + // multiple of 10 ms. + int extra_output_delay_ms = 0; }; enum ReturnCodes { kOK = 0, kFail = -1 }; @@ -212,11 +214,15 @@ class NetEq { // |data_| in |audio_frame| is not written, but should be interpreted as being // all zeros. For testing purposes, an override can be supplied in the // |action_override| argument, which will cause NetEq to take this action - // next, instead of the action it would normally choose. + // next, instead of the action it would normally choose. An optional output + // argument for fetching the current sample rate can be provided, which + // will return the same value as last_output_sample_rate_hz() but will avoid + // additional synchronization. // Returns kOK on success, or kFail in case of an error. virtual int GetAudio( AudioFrame* audio_frame, bool* muted, + int* current_sample_rate_hz = nullptr, absl::optional action_override = absl::nullopt) = 0; // Replaces the current set of decoders with the given one. @@ -270,6 +276,9 @@ class NetEq { // after the call. virtual int NetworkStatistics(NetEqNetworkStatistics* stats) = 0; + // Current values only, not resetting any state. + virtual NetEqNetworkStatistics CurrentNetworkStatistics() const = 0; + // Returns a copy of this class's lifetime statistics. These statistics are // never reset. virtual NetEqLifetimeStatistics GetLifetimeStatistics() const = 0; diff --git a/api/neteq/neteq_controller.h b/api/neteq/neteq_controller.h index 1d47eaca78..4c49a0c24a 100644 --- a/api/neteq/neteq_controller.h +++ b/api/neteq/neteq_controller.h @@ -97,6 +97,15 @@ class NetEqController { size_t sync_buffer_samples; }; + struct PacketArrivedInfo { + size_t packet_length_samples; + uint32_t main_timestamp; + uint16_t main_sequence_number; + bool is_cng_or_dtmf; + bool is_dtx; + bool buffer_flush; + }; + virtual ~NetEqController() = default; // Resets object to a clean state. @@ -152,16 +161,17 @@ class NetEqController { virtual void AddSampleMemory(int32_t value) = 0; // Returns the target buffer level in ms. - virtual int TargetLevelMs() = 0; + virtual int TargetLevelMs() const = 0; // Notify the NetEqController that a packet has arrived. Returns the relative // arrival delay, if it can be computed. - virtual absl::optional PacketArrived(bool last_cng_or_dtmf, - size_t packet_length_samples, + virtual absl::optional PacketArrived(int fs_hz, bool should_update_stats, - uint16_t main_sequence_number, - uint32_t main_timestamp, - int fs_hz) = 0; + const PacketArrivedInfo& info) = 0; + + // Notify the NetEqController that we are currently in muted state. + // TODO(ivoc): Make pure virtual when downstream is updated. + virtual void NotifyMutedState() {} // Returns true if a peak was found. virtual bool PeakFound() const = 0; diff --git a/api/numerics/BUILD.gn b/api/numerics/BUILD.gn new file mode 100644 index 0000000000..408dc5b9f1 --- /dev/null +++ b/api/numerics/BUILD.gn @@ -0,0 +1,41 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +rtc_library("numerics") { + visibility = [ "*" ] + + sources = [ + "samples_stats_counter.cc", + "samples_stats_counter.h", + ] + deps = [ + "..:array_view", + "../../rtc_base:checks", + "../../rtc_base:rtc_numerics", + "../../rtc_base:timeutils", + "../units:timestamp", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] +} + +if (rtc_include_tests) { + rtc_library("numerics_unittests") { + visibility = [ "*" ] + testonly = true + + sources = [ "samples_stats_counter_unittest.cc" ] + + deps = [ + ":numerics", + "../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] + } +} diff --git a/api/numerics/DEPS b/api/numerics/DEPS new file mode 100644 index 0000000000..2d89d57557 --- /dev/null +++ b/api/numerics/DEPS @@ -0,0 +1,6 @@ +specific_include_rules = { + # Some internal headers are allowed even in API headers: + "samples_stats_counter\.h": [ + "+rtc_base/numerics/running_statistics.h", + ] +} diff --git a/rtc_base/numerics/samples_stats_counter.cc b/api/numerics/samples_stats_counter.cc similarity index 97% rename from rtc_base/numerics/samples_stats_counter.cc rename to api/numerics/samples_stats_counter.cc index 9b98a3181d..36871a6713 100644 --- a/rtc_base/numerics/samples_stats_counter.cc +++ b/api/numerics/samples_stats_counter.cc @@ -8,8 +8,9 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "rtc_base/numerics/samples_stats_counter.h" +#include "api/numerics/samples_stats_counter.h" +#include #include #include "absl/algorithm/container.h" diff --git a/rtc_base/numerics/samples_stats_counter.h b/api/numerics/samples_stats_counter.h similarity index 92% rename from rtc_base/numerics/samples_stats_counter.h rename to api/numerics/samples_stats_counter.h index a4ec443d31..283c1e4ed2 100644 --- a/rtc_base/numerics/samples_stats_counter.h +++ b/api/numerics/samples_stats_counter.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef RTC_BASE_NUMERICS_SAMPLES_STATS_COUNTER_H_ -#define RTC_BASE_NUMERICS_SAMPLES_STATS_COUNTER_H_ +#ifndef API_NUMERICS_SAMPLES_STATS_COUNTER_H_ +#define API_NUMERICS_SAMPLES_STATS_COUNTER_H_ #include @@ -45,6 +45,8 @@ class SamplesStatsCounter { // Returns if there are any values in O(1) time. bool IsEmpty() const { return samples_.empty(); } + // Returns the amount of samples added into counter in O(1) time. + int64_t NumSamples() const { return stats_.Size(); } // Returns min in O(1) time. This function may not be called if there are no // samples. @@ -98,7 +100,7 @@ class SamplesStatsCounter { } private: - RunningStatistics stats_; + webrtc_impl::RunningStatistics stats_; std::vector samples_; bool sorted_ = false; }; @@ -116,4 +118,4 @@ SamplesStatsCounter operator/(const SamplesStatsCounter& counter, double value); } // namespace webrtc -#endif // RTC_BASE_NUMERICS_SAMPLES_STATS_COUNTER_H_ +#endif // API_NUMERICS_SAMPLES_STATS_COUNTER_H_ diff --git a/rtc_base/numerics/samples_stats_counter_unittest.cc b/api/numerics/samples_stats_counter_unittest.cc similarity index 99% rename from rtc_base/numerics/samples_stats_counter_unittest.cc rename to api/numerics/samples_stats_counter_unittest.cc index 1221e9b2a5..1f9cabfb29 100644 --- a/rtc_base/numerics/samples_stats_counter_unittest.cc +++ b/api/numerics/samples_stats_counter_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "rtc_base/numerics/samples_stats_counter.h" +#include "api/numerics/samples_stats_counter.h" #include diff --git a/api/peer_connection_interface.cc b/api/peer_connection_interface.cc index 0c25405784..230731c42d 100644 --- a/api/peer_connection_interface.cc +++ b/api/peer_connection_interface.cc @@ -10,8 +10,7 @@ #include "api/peer_connection_interface.h" -#include "api/dtls_transport_interface.h" -#include "api/sctp_transport_interface.h" +#include namespace webrtc { @@ -53,27 +52,6 @@ RTCError PeerConnectionInterface::SetConfiguration( return RTCError(); } -RTCError PeerConnectionInterface::SetBitrate(const BitrateSettings& bitrate) { - BitrateParameters bitrate_parameters; - bitrate_parameters.min_bitrate_bps = bitrate.min_bitrate_bps; - bitrate_parameters.current_bitrate_bps = bitrate.start_bitrate_bps; - bitrate_parameters.max_bitrate_bps = bitrate.max_bitrate_bps; - return SetBitrate(bitrate_parameters); -} - -RTCError PeerConnectionInterface::SetBitrate( - const BitrateParameters& bitrate_parameters) { - BitrateSettings bitrate; - bitrate.min_bitrate_bps = bitrate_parameters.min_bitrate_bps; - bitrate.start_bitrate_bps = bitrate_parameters.current_bitrate_bps; - bitrate.max_bitrate_bps = bitrate_parameters.max_bitrate_bps; - return SetBitrate(bitrate); -} - -PeerConnectionInterface::BitrateParameters::BitrateParameters() = default; - -PeerConnectionInterface::BitrateParameters::~BitrateParameters() = default; - PeerConnectionDependencies::PeerConnectionDependencies( PeerConnectionObserver* observer_in) : observer(observer_in) {} @@ -98,14 +76,34 @@ PeerConnectionFactoryInterface::CreatePeerConnection( std::unique_ptr allocator, std::unique_ptr cert_generator, PeerConnectionObserver* observer) { - return nullptr; + PeerConnectionDependencies dependencies(observer); + dependencies.allocator = std::move(allocator); + dependencies.cert_generator = std::move(cert_generator); + auto result = + CreatePeerConnectionOrError(configuration, std::move(dependencies)); + if (!result.ok()) { + return nullptr; + } + return result.MoveValue(); } rtc::scoped_refptr PeerConnectionFactoryInterface::CreatePeerConnection( const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies) { - return nullptr; + auto result = + CreatePeerConnectionOrError(configuration, std::move(dependencies)); + if (!result.ok()) { + return nullptr; + } + return result.MoveValue(); +} + +RTCErrorOr> +PeerConnectionFactoryInterface::CreatePeerConnectionOrError( + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies dependencies) { + return RTCError(RTCErrorType::INTERNAL_ERROR); } RtpCapabilities PeerConnectionFactoryInterface::GetRtpSenderCapabilities( diff --git a/api/peer_connection_interface.h b/api/peer_connection_interface.h index 1d81de74d8..5499b7d87c 100644 --- a/api/peer_connection_interface.h +++ b/api/peer_connection_interface.h @@ -67,18 +67,25 @@ #ifndef API_PEER_CONNECTION_INTERFACE_H_ #define API_PEER_CONNECTION_INTERFACE_H_ +#include #include +#include #include #include #include +#include "absl/base/attributes.h" +#include "absl/types/optional.h" +#include "api/adaptation/resource.h" +#include "api/async_dns_resolver.h" #include "api/async_resolver_factory.h" #include "api/audio/audio_mixer.h" #include "api/audio_codecs/audio_decoder_factory.h" #include "api/audio_codecs/audio_encoder_factory.h" #include "api/audio_options.h" #include "api/call/call_factory_interface.h" +#include "api/candidate.h" #include "api/crypto/crypto_options.h" #include "api/data_channel_interface.h" #include "api/dtls_transport_interface.h" @@ -86,39 +93,50 @@ #include "api/ice_transport_interface.h" #include "api/jsep.h" #include "api/media_stream_interface.h" +#include "api/media_types.h" #include "api/neteq/neteq_factory.h" #include "api/network_state_predictor.h" #include "api/packet_socket_factory.h" #include "api/rtc_error.h" #include "api/rtc_event_log/rtc_event_log_factory_interface.h" #include "api/rtc_event_log_output.h" +#include "api/rtp_parameters.h" #include "api/rtp_receiver_interface.h" #include "api/rtp_sender_interface.h" #include "api/rtp_transceiver_interface.h" +#include "api/scoped_refptr.h" #include "api/sctp_transport_interface.h" +#include "api/set_local_description_observer_interface.h" #include "api/set_remote_description_observer_interface.h" #include "api/stats/rtc_stats_collector_callback.h" #include "api/stats_types.h" #include "api/task_queue/task_queue_factory.h" #include "api/transport/bitrate_settings.h" #include "api/transport/enums.h" -#include "api/transport/media/media_transport_interface.h" #include "api/transport/network_control.h" +#include "api/transport/sctp_transport_factory_interface.h" #include "api/transport/webrtc_key_value_config.h" #include "api/turn_customizer.h" +#include "api/video/video_bitrate_allocator_factory.h" +#include "call/rtp_transport_controller_send_factory_interface.h" #include "media/base/media_config.h" #include "media/base/media_engine.h" // TODO(bugs.webrtc.org/7447): We plan to provide a way to let applications // inject a PacketSocketFactory and/or NetworkManager, and not expose -// PortAllocator in the PeerConnection api. +// PortAllocator in the PeerConnection api. This will let us remove nogncheck. +#include "p2p/base/port.h" // nogncheck #include "p2p/base/port_allocator.h" // nogncheck #include "rtc_base/network.h" +#include "rtc_base/network_constants.h" +#include "rtc_base/network_monitor_factory.h" +#include "rtc_base/ref_count.h" #include "rtc_base/rtc_certificate.h" #include "rtc_base/rtc_certificate_generator.h" #include "rtc_base/socket_address.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/system/rtc_export.h" +#include "rtc_base/thread.h" namespace rtc { class Thread; @@ -401,12 +419,6 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // from consideration for gathering ICE candidates. bool disable_link_local_networks = false; - // If set to true, use RTP data channels instead of SCTP. - // TODO(deadbeef): Remove this. We no longer commit to supporting RTP data - // channels, though some applications are still working on moving off of - // them. - bool enable_rtp_data_channel = false; - // Minimum bitrate at which screencast video tracks will be encoded at. // This means adding padding bits up to this bitrate, which can help // when switching from a static scene to one with motion. @@ -613,46 +625,14 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // correctly. This flag will be deprecated soon. Do not rely on it. bool active_reset_srtp_params = false; - // DEPRECATED. Do not use. This option is ignored by peer connection. - // TODO(webrtc:9719): Delete this option. - bool use_media_transport = false; - - // DEPRECATED. Do not use. This option is ignored by peer connection. - // TODO(webrtc:9719): Delete this option. - bool use_media_transport_for_data_channels = false; - - // If MediaTransportFactory is provided in PeerConnectionFactory, this flag - // informs PeerConnection that it should use the DatagramTransportInterface - // for packets instead DTLS. It's invalid to set it to |true| if the - // MediaTransportFactory wasn't provided. - absl::optional use_datagram_transport; - - // If MediaTransportFactory is provided in PeerConnectionFactory, this flag - // informs PeerConnection that it should use the DatagramTransport's - // implementation of DataChannelTransportInterface for data channels instead - // of SCTP-DTLS. - absl::optional use_datagram_transport_for_data_channels; - - // If true, this PeerConnection will only use datagram transport for data - // channels when receiving an incoming offer that includes datagram - // transport parameters. It will not request use of a datagram transport - // when it creates the initial, outgoing offer. - // This setting only applies when |use_datagram_transport_for_data_channels| - // is true. - absl::optional use_datagram_transport_for_data_channels_receive_only; - // Defines advanced optional cryptographic settings related to SRTP and // frame encryption for native WebRTC. Setting this will overwrite any // settings set in PeerConnectionFactory (which is deprecated). absl::optional crypto_options; // Configure if we should include the SDP attribute extmap-allow-mixed in - // our offer. Although we currently do support this, it's not included in - // our offer by default due to a previous bug that caused the SDP parser to - // abort parsing if this attribute was present. This is fixed in Chrome 71. - // TODO(webrtc:9985): Change default to true once sufficient time has - // passed. - bool offer_extmap_allow_mixed = false; + // our offer on session level. + bool offer_extmap_allow_mixed = true; // TURN logging identifier. // This identifier is added to a TURN allocation @@ -666,8 +646,13 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Whether network condition based codec switching is allowed. absl::optional allow_codec_switching; - bool enable_simulcast_stats = true; + // The delay before doing a usage histogram report for long-lived + // PeerConnections. Used for testing only. + absl::optional report_usage_pattern_delay_ms; + // The ping interval (ms) when the connection is stable and writable. This + // parameter overrides the default value in the ICE implementation if set. + absl::optional stable_writable_connection_ping_interval_ms; // // Don't forget to update operator== if adding something. // @@ -934,9 +919,28 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Also, calling CreateDataChannel is the only way to get a data "m=" section // in SDP, so it should be done before CreateOffer is called, if the // application plans to use data channels. + virtual RTCErrorOr> + CreateDataChannelOrError(const std::string& label, + const DataChannelInit* config) { + return RTCError(RTCErrorType::INTERNAL_ERROR, "dummy function called"); + } + // TODO(crbug.com/788659): Remove "virtual" below and default implementation + // above once mock in Chrome is fixed. + ABSL_DEPRECATED("Use CreateDataChannelOrError") virtual rtc::scoped_refptr CreateDataChannel( const std::string& label, - const DataChannelInit* config) = 0; + const DataChannelInit* config) { + auto result = CreateDataChannelOrError(label, config); + if (!result.ok()) { + return nullptr; + } else { + return result.MoveValue(); + } + } + + // NOTE: For the following 6 methods, it's only safe to dereference the + // SessionDescriptionInterface on signaling_thread() (for example, calling + // ToString). // Returns the more recently applied description; "pending" if it exists, and // otherwise "current". See below. @@ -977,26 +981,66 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { const RTCOfferAnswerOptions& options) = 0; // Sets the local session description. - // The PeerConnection takes the ownership of |desc| even if it fails. - // The |observer| callback will be called when done. - // TODO(deadbeef): Change |desc| to be a unique_ptr, to make it clear - // that this method always takes ownership of it. + // + // According to spec, the local session description MUST be the same as was + // returned by CreateOffer() or CreateAnswer() or else the operation should + // fail. Our implementation however allows some amount of "SDP munging", but + // please note that this is HIGHLY DISCOURAGED. If you do not intent to munge + // SDP, the method below that doesn't take |desc| as an argument will create + // the offer or answer for you. + // + // The observer is invoked as soon as the operation completes, which could be + // before or after the SetLocalDescription() method has exited. + virtual void SetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) {} + // Creates an offer or answer (depending on current signaling state) and sets + // it as the local session description. + // + // The observer is invoked as soon as the operation completes, which could be + // before or after the SetLocalDescription() method has exited. + virtual void SetLocalDescription( + rtc::scoped_refptr observer) {} + // Like SetLocalDescription() above, but the observer is invoked with a delay + // after the operation completes. This helps avoid recursive calls by the + // observer but also makes it possible for states to change in-between the + // operation completing and the observer getting called. This makes them racy + // for synchronizing peer connection states to the application. + // TODO(https://crbug.com/webrtc/11798): Delete these methods in favor of the + // ones taking SetLocalDescriptionObserverInterface as argument. virtual void SetLocalDescription(SetSessionDescriptionObserver* observer, SessionDescriptionInterface* desc) = 0; - // Implicitly creates an offer or answer (depending on the current signaling - // state) and performs SetLocalDescription() with the newly generated session - // description. - // TODO(hbos): Make pure virtual when implemented by downstream projects. virtual void SetLocalDescription(SetSessionDescriptionObserver* observer) {} + // Sets the remote session description. - // The PeerConnection takes the ownership of |desc| even if it fails. - // The |observer| callback will be called when done. - // TODO(hbos): Remove when Chrome implements the new signature. - virtual void SetRemoteDescription(SetSessionDescriptionObserver* observer, - SessionDescriptionInterface* desc) {} + // + // (Unlike "SDP munging" before SetLocalDescription(), modifying a remote + // offer or answer is allowed by the spec.) + // + // The observer is invoked as soon as the operation completes, which could be + // before or after the SetRemoteDescription() method has exited. virtual void SetRemoteDescription( std::unique_ptr desc, rtc::scoped_refptr observer) = 0; + // Like SetRemoteDescription() above, but the observer is invoked with a delay + // after the operation completes. This helps avoid recursive calls by the + // observer but also makes it possible for states to change in-between the + // operation completing and the observer getting called. This makes them racy + // for synchronizing peer connection states to the application. + // TODO(https://crbug.com/webrtc/11798): Delete this method in favor of the + // ones taking SetRemoteDescriptionObserverInterface as argument. + virtual void SetRemoteDescription(SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc) {} + + // According to spec, we must only fire "negotiationneeded" if the Operations + // Chain is empty. This method takes care of validating an event previously + // generated with PeerConnectionObserver::OnNegotiationNeededEvent() to make + // sure that even if there was a delay (e.g. due to a PostTask) between the + // event being generated and the time of firing, the Operations Chain is empty + // and the event is still valid to be fired. + virtual bool ShouldFireNegotiationNeededEvent(uint32_t event_id) { + return true; + } virtual PeerConnectionInterface::RTCConfiguration GetConfiguration() = 0; @@ -1041,32 +1085,20 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // Removes a group of remote candidates from the ICE agent. Needed mainly for // continual gathering, to avoid an ever-growing list of candidates as - // networks come and go. + // networks come and go. Note that the candidates' transport_name must be set + // to the MID of the m= section that generated the candidate. + // TODO(bugs.webrtc.org/8395): Use IceCandidateInterface instead of + // cricket::Candidate, which would avoid the transport_name oddity. virtual bool RemoveIceCandidates( const std::vector& candidates) = 0; - // 0 <= min <= current <= max should hold for set parameters. - struct BitrateParameters { - BitrateParameters(); - ~BitrateParameters(); - - absl::optional min_bitrate_bps; - absl::optional current_bitrate_bps; - absl::optional max_bitrate_bps; - }; - // SetBitrate limits the bandwidth allocated for all RTP streams sent by // this PeerConnection. Other limitations might affect these limits and // are respected (for example "b=AS" in SDP). // // Setting |current_bitrate_bps| will reset the current bitrate estimate // to the provided value. - virtual RTCError SetBitrate(const BitrateSettings& bitrate); - - // TODO(nisse): Deprecated - use version above. These two default - // implementations require subclasses to implement one or the other - // of the methods. - virtual RTCError SetBitrate(const BitrateParameters& bitrate_parameters); + virtual RTCError SetBitrate(const BitrateSettings& bitrate) = 0; // Enable/disable playout of received audio streams. Enabled by default. Note // that even if playout is enabled, streams will only be played out if the @@ -1074,13 +1106,11 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // playout of the underlying audio device but starts a task which will poll // for audio data every 10ms to ensure that audio processing happens and the // audio statistics are updated. - // TODO(henrika): deprecate and remove this. virtual void SetAudioPlayout(bool playout) {} // Enable/disable recording of transmitted audio streams. Enabled by default. // Note that even if recording is enabled, streams will only be recorded if // the appropriate SDP is also applied. - // TODO(henrika): deprecate and remove this. virtual void SetAudioRecording(bool recording) {} // Looks up the DtlsTransport associated with a MID value. @@ -1118,6 +1148,14 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { return absl::nullopt; } + // When a resource is overused, the PeerConnection will try to reduce the load + // on the sysem, for example by reducing the resolution or frame rate of + // encoded streams. The Resource API allows injecting platform-specific usage + // measurements. The conditions to trigger kOveruse or kUnderuse are up to the + // implementation. + // TODO(hbos): Make pure virtual when implemented by downstream projects. + virtual void AddAdaptationResource(rtc::scoped_refptr resource) {} + // Start RtcEventLog using an existing output-sink. Takes ownership of // |output| and passes it on to Call, which will take the ownership. If the // operation fails the output will be closed and deallocated. The event log @@ -1142,6 +1180,14 @@ class RTC_EXPORT PeerConnectionInterface : public rtc::RefCountInterface { // thus the observer object can be safely destroyed. virtual void Close() = 0; + // The thread on which all PeerConnectionObserver callbacks will be invoked, + // as well as callbacks for other classes such as DataChannelObserver. + // + // Also the only thread on which it's safe to use SessionDescriptionInterface + // pointers. + // TODO(deadbeef): Make pure virtual when all subclasses implement it. + virtual rtc::Thread* signaling_thread() const { return nullptr; } + protected: // Dtor protected as objects shouldn't be deleted via this interface. ~PeerConnectionInterface() override = default; @@ -1170,7 +1216,17 @@ class PeerConnectionObserver { // Triggered when renegotiation is needed. For example, an ICE restart // has begun. - virtual void OnRenegotiationNeeded() = 0; + // TODO(hbos): Delete in favor of OnNegotiationNeededEvent() when downstream + // projects have migrated. + virtual void OnRenegotiationNeeded() {} + // Used to fire spec-compliant onnegotiationneeded events, which should only + // fire when the Operations Chain is empty. The observer is responsible for + // queuing a task (e.g. Chromium: jump to main thread) to maybe fire the + // event. The event identified using |event_id| must only fire if + // PeerConnection::ShouldFireNegotiationNeededEvent() returns true since it is + // possible for the event to become invalidated by operations subsequently + // chained. + virtual void OnNegotiationNeededEvent(uint32_t event_id) {} // Called any time the legacy IceConnectionState changes. // @@ -1293,6 +1349,10 @@ struct RTC_EXPORT PeerConnectionDependencies final { // packet_socket_factory, not both. std::unique_ptr allocator; std::unique_ptr packet_socket_factory; + // Factory for creating resolvers that look up hostnames in DNS + std::unique_ptr + async_dns_resolver_factory; + // Deprecated - use async_dns_resolver_factory std::unique_ptr async_resolver_factory; std::unique_ptr ice_transport_factory; std::unique_ptr cert_generator; @@ -1332,9 +1392,15 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final { std::unique_ptr network_state_predictor_factory; std::unique_ptr network_controller_factory; - std::unique_ptr media_transport_factory; + // This will only be used if CreatePeerConnection is called without a + // |port_allocator|, causing the default allocator and network manager to be + // used. + std::unique_ptr network_monitor_factory; std::unique_ptr neteq_factory; + std::unique_ptr sctp_factory; std::unique_ptr trials; + std::unique_ptr + transport_controller_send_factory; }; // PeerConnectionFactoryInterface is the factory interface used for creating @@ -1362,10 +1428,6 @@ class RTC_EXPORT PeerConnectionFactoryInterface // testing/debugging. bool disable_encryption = false; - // Deprecated. The only effect of setting this to true is that - // CreateDataChannel will fail, which is not that useful. - bool disable_sctp_data_channels = false; - // If set to true, any platform-supported network monitoring capability // won't be used, and instead networks will only be updated via polling. // @@ -1394,6 +1456,13 @@ class RTC_EXPORT PeerConnectionFactoryInterface // configuration and a PeerConnectionDependencies structure. // TODO(benwright): Make pure virtual once downstream mock PC factory classes // are updated. + virtual RTCErrorOr> + CreatePeerConnectionOrError( + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies dependencies); + // Deprecated creator - does not return an error code on error. + // TODO(bugs.webrtc.org:12238): Deprecate and remove. + ABSL_DEPRECATED("Use CreatePeerConnectionOrError") virtual rtc::scoped_refptr CreatePeerConnection( const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies); @@ -1407,6 +1476,7 @@ class RTC_EXPORT PeerConnectionFactoryInterface // responsibility of the caller to delete it. It can be safely deleted after // Close has been called on the returned PeerConnection, which ensures no // more observer callbacks will be invoked. + ABSL_DEPRECATED("Use CreatePeerConnectionOrError") virtual rtc::scoped_refptr CreatePeerConnection( const PeerConnectionInterface::RTCConfiguration& configuration, std::unique_ptr allocator, diff --git a/api/proxy.cc b/api/proxy.cc deleted file mode 100644 index e668285ba2..0000000000 --- a/api/proxy.cc +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "api/proxy.h" - -namespace webrtc { -namespace internal { - -SynchronousMethodCall::SynchronousMethodCall(rtc::MessageHandler* proxy) - : proxy_(proxy) {} - -SynchronousMethodCall::~SynchronousMethodCall() = default; - -void SynchronousMethodCall::Invoke(const rtc::Location& posted_from, - rtc::Thread* t) { - if (t->IsCurrent()) { - proxy_->OnMessage(nullptr); - } else { - t->Post(posted_from, this, 0); - e_.Wait(rtc::Event::kForever); - } -} - -void SynchronousMethodCall::OnMessage(rtc::Message*) { - proxy_->OnMessage(nullptr); - e_.Set(); -} - -} // namespace internal -} // namespace webrtc diff --git a/api/ref_counted_base.h b/api/ref_counted_base.h index a1761db851..931cb20762 100644 --- a/api/ref_counted_base.h +++ b/api/ref_counted_base.h @@ -10,8 +10,9 @@ #ifndef API_REF_COUNTED_BASE_H_ #define API_REF_COUNTED_BASE_H_ +#include + #include "rtc_base/constructor_magic.h" -#include "rtc_base/ref_count.h" #include "rtc_base/ref_counter.h" namespace rtc { @@ -30,6 +31,10 @@ class RefCountedBase { } protected: + // Provided for internal webrtc subclasses for corner cases where it's + // necessary to know whether or not a reference is exclusively held. + bool HasOneRef() const { return ref_count_.HasOneRef(); } + virtual ~RefCountedBase() = default; private: @@ -38,6 +43,55 @@ class RefCountedBase { RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedBase); }; +// Template based version of `RefCountedBase` for simple implementations that do +// not need (or want) destruction via virtual destructor or the overhead of a +// vtable. +// +// To use: +// struct MyInt : public rtc::RefCountedNonVirtual { +// int foo_ = 0; +// }; +// +// rtc::scoped_refptr my_int(new MyInt()); +// +// sizeof(MyInt) on a 32 bit system would then be 8, int + refcount and no +// vtable generated. +template +class RefCountedNonVirtual { + public: + RefCountedNonVirtual() = default; + + void AddRef() const { ref_count_.IncRef(); } + RefCountReleaseStatus Release() const { + // If you run into this assert, T has virtual methods. There are two + // options: + // 1) The class doesn't actually need virtual methods, the type is complete + // so the virtual attribute(s) can be removed. + // 2) The virtual methods are a part of the design of the class. In this + // case you can consider using `RefCountedBase` instead or alternatively + // use `rtc::RefCountedObject`. + static_assert(!std::is_polymorphic::value, + "T has virtual methods. RefCountedBase is a better fit."); + const auto status = ref_count_.DecRef(); + if (status == RefCountReleaseStatus::kDroppedLastRef) { + delete static_cast(this); + } + return status; + } + + protected: + // Provided for internal webrtc subclasses for corner cases where it's + // necessary to know whether or not a reference is exclusively held. + bool HasOneRef() const { return ref_count_.HasOneRef(); } + + ~RefCountedNonVirtual() = default; + + private: + mutable webrtc::webrtc_impl::RefCounter ref_count_{0}; + + RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedNonVirtual); +}; + } // namespace rtc #endif // API_REF_COUNTED_BASE_H_ diff --git a/api/rtc_error.h b/api/rtc_error.h index b8cb7f0bcd..7cfd89ab75 100644 --- a/api/rtc_error.h +++ b/api/rtc_error.h @@ -11,9 +11,9 @@ #ifndef API_RTC_ERROR_H_ #define API_RTC_ERROR_H_ -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include #include // For std::move. @@ -137,7 +137,7 @@ class RTC_EXPORT RTCError { RTCErrorDetailType error_detail() const { return error_detail_; } void set_error_detail(RTCErrorDetailType detail) { error_detail_ = detail; } - absl::optional sctp_cause_code() { return sctp_cause_code_; } + absl::optional sctp_cause_code() const { return sctp_cause_code_; } void set_sctp_cause_code(uint16_t cause_code) { sctp_cause_code_ = cause_code; } @@ -161,7 +161,7 @@ class RTC_EXPORT RTCError { RTC_EXPORT const char* ToString(RTCErrorType error); RTC_EXPORT const char* ToString(RTCErrorDetailType error); -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& stream, // no-presubmit-check TODO(webrtc:8982) RTCErrorType error) { @@ -173,7 +173,7 @@ inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) RTCErrorDetailType error) { return stream << ToString(error); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST // Helper macro that can be used by implementations to create an error with a // message and log it. |message| should be a string literal or movable diff --git a/api/rtc_event_log/BUILD.gn b/api/rtc_event_log/BUILD.gn index e853058e25..158dc06a7b 100644 --- a/api/rtc_event_log/BUILD.gn +++ b/api/rtc_event_log/BUILD.gn @@ -37,6 +37,7 @@ rtc_library("rtc_event_log_factory") { ":rtc_event_log", "../../rtc_base:checks", "../../rtc_base/system:rtc_export", + "../../system_wrappers:field_trial", "../task_queue", ] diff --git a/api/rtc_event_log/rtc_event.cc b/api/rtc_event_log/rtc_event.cc index 81e6a4e6da..631188b915 100644 --- a/api/rtc_event_log/rtc_event.cc +++ b/api/rtc_event_log/rtc_event.cc @@ -14,6 +14,6 @@ namespace webrtc { -RtcEvent::RtcEvent() : timestamp_us_(rtc::TimeMicros()) {} +RtcEvent::RtcEvent() : timestamp_us_(rtc::TimeMillis() * 1000) {} } // namespace webrtc diff --git a/api/rtc_event_log/rtc_event.h b/api/rtc_event_log/rtc_event.h index 101f78f255..51db8f0b4d 100644 --- a/api/rtc_event_log/rtc_event.h +++ b/api/rtc_event_log/rtc_event.h @@ -52,7 +52,8 @@ class RtcEvent { VideoSendStreamConfig, GenericPacketSent, GenericPacketReceived, - GenericAckReceived + GenericAckReceived, + FrameDecoded }; RtcEvent(); diff --git a/api/rtc_event_log/rtc_event_log_factory.cc b/api/rtc_event_log/rtc_event_log_factory.cc index 2013584399..fdf267b7ba 100644 --- a/api/rtc_event_log/rtc_event_log_factory.cc +++ b/api/rtc_event_log/rtc_event_log_factory.cc @@ -14,6 +14,7 @@ #include #include "rtc_base/checks.h" +#include "system_wrappers/include/field_trial.h" #ifdef WEBRTC_ENABLE_RTC_EVENT_LOG #include "logging/rtc_event_log/rtc_event_log_impl.h" @@ -29,6 +30,9 @@ RtcEventLogFactory::RtcEventLogFactory(TaskQueueFactory* task_queue_factory) std::unique_ptr RtcEventLogFactory::CreateRtcEventLog( RtcEventLog::EncodingType encoding_type) { #ifdef WEBRTC_ENABLE_RTC_EVENT_LOG + if (field_trial::IsEnabled("WebRTC-RtcEventLogKillSwitch")) { + return std::make_unique(); + } return std::make_unique(encoding_type, task_queue_factory_); #else return std::make_unique(); diff --git a/api/rtp_headers.cc b/api/rtp_headers.cc index bf973b6fe5..e0ad9eb26e 100644 --- a/api/rtp_headers.cc +++ b/api/rtp_headers.cc @@ -26,9 +26,7 @@ RTPHeaderExtension::RTPHeaderExtension() videoRotation(kVideoRotation_0), hasVideoContentType(false), videoContentType(VideoContentType::UNSPECIFIED), - has_video_timing(false), - has_frame_marking(false), - frame_marking({false, false, false, false, false, 0xFF, 0, 0}) {} + has_video_timing(false) {} RTPHeaderExtension::RTPHeaderExtension(const RTPHeaderExtension& other) = default; diff --git a/api/rtp_headers.h b/api/rtp_headers.h index 163347f675..cf3d909499 100644 --- a/api/rtp_headers.h +++ b/api/rtp_headers.h @@ -21,10 +21,8 @@ #include "api/units/timestamp.h" #include "api/video/color_space.h" #include "api/video/video_content_type.h" -#include "api/video/video_frame_marking.h" #include "api/video/video_rotation.h" #include "api/video/video_timing.h" -#include "common_types.h" // NOLINT(build/include) namespace webrtc { @@ -143,19 +141,15 @@ struct RTPHeaderExtension { bool has_video_timing; VideoSendTiming video_timing; - bool has_frame_marking; - FrameMarking frame_marking; - - PlayoutDelay playout_delay = {-1, -1}; + VideoPlayoutDelay playout_delay; // For identification of a stream when ssrc is not signaled. See - // https://tools.ietf.org/html/draft-ietf-avtext-rid-09 - // TODO(danilchap): Update url from draft to release version. + // https://tools.ietf.org/html/rfc8852 std::string stream_id; std::string repaired_stream_id; // For identifying the media section used to interpret this RTP packet. See - // https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38 + // https://tools.ietf.org/html/rfc8843 std::string mid; absl::optional color_space; diff --git a/api/rtp_packet_info.cc b/api/rtp_packet_info.cc index a9ebd9df48..db818f7657 100644 --- a/api/rtp_packet_info.cc +++ b/api/rtp_packet_info.cc @@ -16,7 +16,7 @@ namespace webrtc { RtpPacketInfo::RtpPacketInfo() - : ssrc_(0), rtp_timestamp_(0), receive_time_ms_(-1) {} + : ssrc_(0), rtp_timestamp_(0), receive_time_(Timestamp::MinusInfinity()) {} RtpPacketInfo::RtpPacketInfo( uint32_t ssrc, @@ -24,19 +24,19 @@ RtpPacketInfo::RtpPacketInfo( uint32_t rtp_timestamp, absl::optional audio_level, absl::optional absolute_capture_time, - int64_t receive_time_ms) + Timestamp receive_time) : ssrc_(ssrc), csrcs_(std::move(csrcs)), rtp_timestamp_(rtp_timestamp), audio_level_(audio_level), absolute_capture_time_(absolute_capture_time), - receive_time_ms_(receive_time_ms) {} + receive_time_(receive_time) {} RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header, - int64_t receive_time_ms) + Timestamp receive_time) : ssrc_(rtp_header.ssrc), rtp_timestamp_(rtp_header.timestamp), - receive_time_ms_(receive_time_ms) { + receive_time_(receive_time) { const auto& extension = rtp_header.extension; const auto csrcs_count = std::min(rtp_header.numCSRCs, kRtpCsrcSize); @@ -49,12 +49,31 @@ RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header, absolute_capture_time_ = extension.absolute_capture_time; } +RtpPacketInfo::RtpPacketInfo( + uint32_t ssrc, + std::vector csrcs, + uint32_t rtp_timestamp, + absl::optional audio_level, + absl::optional absolute_capture_time, + int64_t receive_time_ms) + : RtpPacketInfo(ssrc, + csrcs, + rtp_timestamp, + audio_level, + absolute_capture_time, + Timestamp::Millis(receive_time_ms)) {} +RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header, + int64_t receive_time_ms) + : RtpPacketInfo(rtp_header, Timestamp::Millis(receive_time_ms)) {} + bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs) { return (lhs.ssrc() == rhs.ssrc()) && (lhs.csrcs() == rhs.csrcs()) && (lhs.rtp_timestamp() == rhs.rtp_timestamp()) && (lhs.audio_level() == rhs.audio_level()) && (lhs.absolute_capture_time() == rhs.absolute_capture_time()) && - (lhs.receive_time_ms() == rhs.receive_time_ms()); + (lhs.receive_time() == rhs.receive_time() && + (lhs.local_capture_clock_offset() == + rhs.local_capture_clock_offset())); } } // namespace webrtc diff --git a/api/rtp_packet_info.h b/api/rtp_packet_info.h index 639ba32770..605620d638 100644 --- a/api/rtp_packet_info.h +++ b/api/rtp_packet_info.h @@ -17,6 +17,7 @@ #include "absl/types/optional.h" #include "api/rtp_headers.h" +#include "api/units/timestamp.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { @@ -35,8 +36,18 @@ class RTC_EXPORT RtpPacketInfo { uint32_t rtp_timestamp, absl::optional audio_level, absl::optional absolute_capture_time, - int64_t receive_time_ms); + Timestamp receive_time); + + RtpPacketInfo(const RTPHeader& rtp_header, Timestamp receive_time); + // TODO(bugs.webrtc.org/12722): Deprecated, remove once downstream projects + // are updated. + RtpPacketInfo(uint32_t ssrc, + std::vector csrcs, + uint32_t rtp_timestamp, + absl::optional audio_level, + absl::optional absolute_capture_time, + int64_t receive_time_ms); RtpPacketInfo(const RTPHeader& rtp_header, int64_t receive_time_ms); RtpPacketInfo(const RtpPacketInfo& other) = default; @@ -64,8 +75,19 @@ class RTC_EXPORT RtpPacketInfo { absolute_capture_time_ = value; } - int64_t receive_time_ms() const { return receive_time_ms_; } - void set_receive_time_ms(int64_t value) { receive_time_ms_ = value; } + const absl::optional& local_capture_clock_offset() const { + return local_capture_clock_offset_; + } + + void set_local_capture_clock_offset(const absl::optional& value) { + local_capture_clock_offset_ = value; + } + + Timestamp receive_time() const { return receive_time_; } + void set_receive_time(Timestamp value) { receive_time_ = value; } + // TODO(bugs.webrtc.org/12722): Deprecated, remove once downstream projects + // are updated. + int64_t receive_time_ms() const { return receive_time_.ms(); } private: // Fields from the RTP header: @@ -80,10 +102,19 @@ class RTC_EXPORT RtpPacketInfo { // Fields from the Absolute Capture Time header extension: // http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time + // To not be confused with |local_capture_clock_offset_|, the + // |estimated_capture_clock_offset| in |absolute_capture_time_| should + // represent the clock offset between a remote sender and the capturer, and + // thus equals to the corresponding values in the received RTP packets, + // subjected to possible interpolations. absl::optional absolute_capture_time_; + // Clock offset against capturer's clock. Should be derived from the estimated + // capture clock offset defined in the Absolute Capture Time header extension. + absl::optional local_capture_clock_offset_; + // Local |webrtc::Clock|-based timestamp of when the packet was received. - int64_t receive_time_ms_; + Timestamp receive_time_; }; bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs); diff --git a/api/rtp_packet_info_unittest.cc b/api/rtp_packet_info_unittest.cc index fe79f6df3c..601d34f49e 100644 --- a/api/rtp_packet_info_unittest.cc +++ b/api/rtp_packet_info_unittest.cc @@ -37,7 +37,7 @@ TEST(RtpPacketInfoTest, Ssrc) { rhs = RtpPacketInfo(); EXPECT_NE(rhs.ssrc(), value); - rhs = RtpPacketInfo(value, {}, {}, {}, {}, {}); + rhs = RtpPacketInfo(value, {}, {}, {}, {}, Timestamp::Millis(0)); EXPECT_EQ(rhs.ssrc(), value); } @@ -64,7 +64,7 @@ TEST(RtpPacketInfoTest, Csrcs) { rhs = RtpPacketInfo(); EXPECT_NE(rhs.csrcs(), value); - rhs = RtpPacketInfo({}, value, {}, {}, {}, {}); + rhs = RtpPacketInfo({}, value, {}, {}, {}, Timestamp::Millis(0)); EXPECT_EQ(rhs.csrcs(), value); } @@ -91,7 +91,7 @@ TEST(RtpPacketInfoTest, RtpTimestamp) { rhs = RtpPacketInfo(); EXPECT_NE(rhs.rtp_timestamp(), value); - rhs = RtpPacketInfo({}, {}, value, {}, {}, {}); + rhs = RtpPacketInfo({}, {}, value, {}, {}, Timestamp::Millis(0)); EXPECT_EQ(rhs.rtp_timestamp(), value); } @@ -118,7 +118,7 @@ TEST(RtpPacketInfoTest, AudioLevel) { rhs = RtpPacketInfo(); EXPECT_NE(rhs.audio_level(), value); - rhs = RtpPacketInfo({}, {}, {}, value, {}, {}); + rhs = RtpPacketInfo({}, {}, {}, value, {}, Timestamp::Millis(0)); EXPECT_EQ(rhs.audio_level(), value); } @@ -145,12 +145,41 @@ TEST(RtpPacketInfoTest, AbsoluteCaptureTime) { rhs = RtpPacketInfo(); EXPECT_NE(rhs.absolute_capture_time(), value); - rhs = RtpPacketInfo({}, {}, {}, {}, value, {}); + rhs = RtpPacketInfo({}, {}, {}, {}, value, Timestamp::Millis(0)); EXPECT_EQ(rhs.absolute_capture_time(), value); } +TEST(RtpPacketInfoTest, LocalCaptureClockOffset) { + RtpPacketInfo lhs; + RtpPacketInfo rhs; + + EXPECT_TRUE(lhs == rhs); + EXPECT_FALSE(lhs != rhs); + + const absl::optional value = 10; + rhs.set_local_capture_clock_offset(value); + EXPECT_EQ(rhs.local_capture_clock_offset(), value); + + EXPECT_FALSE(lhs == rhs); + EXPECT_TRUE(lhs != rhs); + + lhs = rhs; + + EXPECT_TRUE(lhs == rhs); + EXPECT_FALSE(lhs != rhs); + + // Default local capture clock offset is null. + rhs = RtpPacketInfo(); + EXPECT_EQ(rhs.local_capture_clock_offset(), absl::nullopt); + + // Default local capture clock offset is null. + rhs = RtpPacketInfo({}, {}, {}, {}, AbsoluteCaptureTime{12, 34}, + Timestamp::Millis(0)); + EXPECT_EQ(rhs.local_capture_clock_offset(), absl::nullopt); +} + TEST(RtpPacketInfoTest, ReceiveTimeMs) { - const int64_t value = 8868963877546349045LL; + const Timestamp timestamp = Timestamp::Micros(8868963877546349045LL); RtpPacketInfo lhs; RtpPacketInfo rhs; @@ -158,8 +187,8 @@ TEST(RtpPacketInfoTest, ReceiveTimeMs) { EXPECT_TRUE(lhs == rhs); EXPECT_FALSE(lhs != rhs); - rhs.set_receive_time_ms(value); - EXPECT_EQ(rhs.receive_time_ms(), value); + rhs.set_receive_time(timestamp); + EXPECT_EQ(rhs.receive_time(), timestamp); EXPECT_FALSE(lhs == rhs); EXPECT_TRUE(lhs != rhs); @@ -170,10 +199,10 @@ TEST(RtpPacketInfoTest, ReceiveTimeMs) { EXPECT_FALSE(lhs != rhs); rhs = RtpPacketInfo(); - EXPECT_NE(rhs.receive_time_ms(), value); + EXPECT_NE(rhs.receive_time(), timestamp); - rhs = RtpPacketInfo({}, {}, {}, {}, {}, value); - EXPECT_EQ(rhs.receive_time_ms(), value); + rhs = RtpPacketInfo({}, {}, {}, {}, {}, timestamp); + EXPECT_EQ(rhs.receive_time(), timestamp); } } // namespace webrtc diff --git a/api/rtp_packet_infos_unittest.cc b/api/rtp_packet_infos_unittest.cc index ce502ac378..e83358fc17 100644 --- a/api/rtp_packet_infos_unittest.cc +++ b/api/rtp_packet_infos_unittest.cc @@ -27,9 +27,12 @@ RtpPacketInfos::vector_type ToVector(Iterator begin, Iterator end) { } // namespace TEST(RtpPacketInfosTest, BasicFunctionality) { - RtpPacketInfo p0(123, {1, 2}, 89, 5, AbsoluteCaptureTime{45, 78}, 7); - RtpPacketInfo p1(456, {3, 4}, 89, 4, AbsoluteCaptureTime{13, 21}, 1); - RtpPacketInfo p2(789, {5, 6}, 88, 1, AbsoluteCaptureTime{99, 78}, 7); + RtpPacketInfo p0(123, {1, 2}, 89, 5, AbsoluteCaptureTime{45, 78}, + Timestamp::Millis(7)); + RtpPacketInfo p1(456, {3, 4}, 89, 4, AbsoluteCaptureTime{13, 21}, + Timestamp::Millis(1)); + RtpPacketInfo p2(789, {5, 6}, 88, 1, AbsoluteCaptureTime{99, 78}, + Timestamp::Millis(7)); RtpPacketInfos x({p0, p1, p2}); @@ -52,9 +55,12 @@ TEST(RtpPacketInfosTest, BasicFunctionality) { } TEST(RtpPacketInfosTest, CopyShareData) { - RtpPacketInfo p0(123, {1, 2}, 89, 5, AbsoluteCaptureTime{45, 78}, 7); - RtpPacketInfo p1(456, {3, 4}, 89, 4, AbsoluteCaptureTime{13, 21}, 1); - RtpPacketInfo p2(789, {5, 6}, 88, 1, AbsoluteCaptureTime{99, 78}, 7); + RtpPacketInfo p0(123, {1, 2}, 89, 5, AbsoluteCaptureTime{45, 78}, + Timestamp::Millis(7)); + RtpPacketInfo p1(456, {3, 4}, 89, 4, AbsoluteCaptureTime{13, 21}, + Timestamp::Millis(1)); + RtpPacketInfo p2(789, {5, 6}, 88, 1, AbsoluteCaptureTime{99, 78}, + Timestamp::Millis(7)); RtpPacketInfos lhs({p0, p1, p2}); RtpPacketInfos rhs = lhs; diff --git a/api/rtp_parameters.cc b/api/rtp_parameters.cc index a05b2bfa7b..5ce6780753 100644 --- a/api/rtp_parameters.cc +++ b/api/rtp_parameters.cc @@ -18,6 +18,21 @@ namespace webrtc { +const char* DegradationPreferenceToString( + DegradationPreference degradation_preference) { + switch (degradation_preference) { + case DegradationPreference::DISABLED: + return "disabled"; + case DegradationPreference::MAINTAIN_FRAMERATE: + return "maintain-framerate"; + case DegradationPreference::MAINTAIN_RESOLUTION: + return "maintain-resolution"; + case DegradationPreference::BALANCED: + return "balanced"; + } + RTC_CHECK_NOTREACHED(); +} + const double kDefaultBitratePriority = 1.0; RtcpFeedback::RtcpFeedback() = default; @@ -105,9 +120,9 @@ constexpr char RtpExtension::kAbsoluteCaptureTimeUri[]; constexpr char RtpExtension::kVideoRotationUri[]; constexpr char RtpExtension::kVideoContentTypeUri[]; constexpr char RtpExtension::kVideoTimingUri[]; -constexpr char RtpExtension::kFrameMarkingUri[]; constexpr char RtpExtension::kGenericFrameDescriptorUri00[]; constexpr char RtpExtension::kDependencyDescriptorUri[]; +constexpr char RtpExtension::kVideoLayersAllocationUri[]; constexpr char RtpExtension::kTransportSequenceNumberUri[]; constexpr char RtpExtension::kTransportSequenceNumberV2Uri[]; constexpr char RtpExtension::kPlayoutDelayUri[]; @@ -115,6 +130,7 @@ constexpr char RtpExtension::kColorSpaceUri[]; constexpr char RtpExtension::kMidUri[]; constexpr char RtpExtension::kRidUri[]; constexpr char RtpExtension::kRepairedRidUri[]; +constexpr char RtpExtension::kVideoFrameTrackingIdUri[]; constexpr int RtpExtension::kMinId; constexpr int RtpExtension::kMaxId; @@ -144,71 +160,131 @@ bool RtpExtension::IsSupportedForVideo(absl::string_view uri) { uri == webrtc::RtpExtension::kVideoContentTypeUri || uri == webrtc::RtpExtension::kVideoTimingUri || uri == webrtc::RtpExtension::kMidUri || - uri == webrtc::RtpExtension::kFrameMarkingUri || uri == webrtc::RtpExtension::kGenericFrameDescriptorUri00 || uri == webrtc::RtpExtension::kDependencyDescriptorUri || uri == webrtc::RtpExtension::kColorSpaceUri || uri == webrtc::RtpExtension::kRidUri || - uri == webrtc::RtpExtension::kRepairedRidUri; + uri == webrtc::RtpExtension::kRepairedRidUri || + uri == webrtc::RtpExtension::kVideoLayersAllocationUri || + uri == webrtc::RtpExtension::kVideoFrameTrackingIdUri; } bool RtpExtension::IsEncryptionSupported(absl::string_view uri) { - return uri == webrtc::RtpExtension::kAudioLevelUri || - uri == webrtc::RtpExtension::kTimestampOffsetUri || -#if !defined(ENABLE_EXTERNAL_AUTH) - // TODO(jbauch): Figure out a way to always allow "kAbsSendTimeUri" - // here and filter out later if external auth is really used in - // srtpfilter. External auth is used by Chromium and replaces the - // extension header value of "kAbsSendTimeUri", so it must not be - // encrypted (which can't be done by Chromium). - uri == webrtc::RtpExtension::kAbsSendTimeUri || + return +#if defined(ENABLE_EXTERNAL_AUTH) + // TODO(jbauch): Figure out a way to always allow "kAbsSendTimeUri" + // here and filter out later if external auth is really used in + // srtpfilter. External auth is used by Chromium and replaces the + // extension header value of "kAbsSendTimeUri", so it must not be + // encrypted (which can't be done by Chromium). + uri != webrtc::RtpExtension::kAbsSendTimeUri && #endif - uri == webrtc::RtpExtension::kAbsoluteCaptureTimeUri || - uri == webrtc::RtpExtension::kVideoRotationUri || - uri == webrtc::RtpExtension::kTransportSequenceNumberUri || - uri == webrtc::RtpExtension::kTransportSequenceNumberV2Uri || - uri == webrtc::RtpExtension::kPlayoutDelayUri || - uri == webrtc::RtpExtension::kVideoContentTypeUri || - uri == webrtc::RtpExtension::kMidUri || - uri == webrtc::RtpExtension::kRidUri || - uri == webrtc::RtpExtension::kRepairedRidUri; + uri != webrtc::RtpExtension::kEncryptHeaderExtensionsUri; } -const RtpExtension* RtpExtension::FindHeaderExtensionByUri( +// Returns whether a header extension with the given URI exists. +// Note: This does not differentiate between encrypted and non-encrypted +// extensions, so use with care! +static bool HeaderExtensionWithUriExists( const std::vector& extensions, absl::string_view uri) { for (const auto& extension : extensions) { if (extension.uri == uri) { + return true; + } + } + return false; +} + +const RtpExtension* RtpExtension::FindHeaderExtensionByUri( + const std::vector& extensions, + absl::string_view uri, + Filter filter) { + const webrtc::RtpExtension* fallback_extension = nullptr; + for (const auto& extension : extensions) { + if (extension.uri != uri) { + continue; + } + + switch (filter) { + case kDiscardEncryptedExtension: + // We only accept an unencrypted extension. + if (!extension.encrypt) { + return &extension; + } + break; + + case kPreferEncryptedExtension: + // We prefer an encrypted extension but we can fall back to an + // unencrypted extension. + if (extension.encrypt) { + return &extension; + } else { + fallback_extension = &extension; + } + break; + + case kRequireEncryptedExtension: + // We only accept an encrypted extension. + if (extension.encrypt) { + return &extension; + } + break; + } + } + + // Returning fallback extension (if any) + return fallback_extension; +} + +const RtpExtension* RtpExtension::FindHeaderExtensionByUri( + const std::vector& extensions, + absl::string_view uri) { + return FindHeaderExtensionByUri(extensions, uri, kPreferEncryptedExtension); +} + +const RtpExtension* RtpExtension::FindHeaderExtensionByUriAndEncryption( + const std::vector& extensions, + absl::string_view uri, + bool encrypt) { + for (const auto& extension : extensions) { + if (extension.uri == uri && extension.encrypt == encrypt) { return &extension; } } return nullptr; } -std::vector RtpExtension::FilterDuplicateNonEncrypted( - const std::vector& extensions) { +const std::vector RtpExtension::DeduplicateHeaderExtensions( + const std::vector& extensions, + Filter filter) { std::vector filtered; - for (auto extension = extensions.begin(); extension != extensions.end(); - ++extension) { - if (extension->encrypt) { - filtered.push_back(*extension); - continue; - } - // Only add non-encrypted extension if no encrypted with the same URI - // is also present... - if (std::any_of(extension + 1, extensions.end(), - [&](const RtpExtension& check) { - return extension->uri == check.uri; - })) { - continue; + // If we do not discard encrypted extensions, add them first + if (filter != kDiscardEncryptedExtension) { + for (const auto& extension : extensions) { + if (!extension.encrypt) { + continue; + } + if (!HeaderExtensionWithUriExists(filtered, extension.uri)) { + filtered.push_back(extension); + } } + } - // ...and has not been added before. - if (!FindHeaderExtensionByUri(filtered, extension->uri)) { - filtered.push_back(*extension); + // If we do not require encrypted extensions, add missing, non-encrypted + // extensions. + if (filter != kRequireEncryptedExtension) { + for (const auto& extension : extensions) { + if (extension.encrypt) { + continue; + } + if (!HeaderExtensionWithUriExists(filtered, extension.uri)) { + filtered.push_back(extension); + } } } + return filtered; } } // namespace webrtc diff --git a/api/rtp_parameters.h b/api/rtp_parameters.h index d7156db05b..a098bad6b0 100644 --- a/api/rtp_parameters.h +++ b/api/rtp_parameters.h @@ -92,6 +92,9 @@ enum class DegradationPreference { BALANCED, }; +RTC_EXPORT const char* DegradationPreferenceToString( + DegradationPreference degradation_preference); + RTC_EXPORT extern const double kDefaultBitratePriority; struct RTC_EXPORT RtcpFeedback { @@ -219,7 +222,7 @@ struct RTC_EXPORT RtpHeaderExtensionCapability { bool preferred_encrypt = false; // The direction of the extension. The kStopped value is only used with - // RtpTransceiverInterface::header_extensions_offered() and + // RtpTransceiverInterface::HeaderExtensionsToOffer() and // SetOfferedRtpHeaderExtensions(). RtpTransceiverDirection direction = RtpTransceiverDirection::kSendRecv; @@ -243,6 +246,18 @@ struct RTC_EXPORT RtpHeaderExtensionCapability { // RTP header extension, see RFC8285. struct RTC_EXPORT RtpExtension { + enum Filter { + // Encrypted extensions will be ignored and only non-encrypted extensions + // will be considered. + kDiscardEncryptedExtension, + // Encrypted extensions will be preferred but will fall back to + // non-encrypted extensions if necessary. + kPreferEncryptedExtension, + // Encrypted extensions will be required, so any non-encrypted extensions + // will be discarded. + kRequireEncryptedExtension, + }; + RtpExtension(); RtpExtension(absl::string_view uri, int id); RtpExtension(absl::string_view uri, int id, bool encrypt); @@ -257,17 +272,28 @@ struct RTC_EXPORT RtpExtension { // Return "true" if the given RTP header extension URI may be encrypted. static bool IsEncryptionSupported(absl::string_view uri); - // Returns the named header extension if found among all extensions, - // nullptr otherwise. + // Returns the header extension with the given URI or nullptr if not found. + static const RtpExtension* FindHeaderExtensionByUri( + const std::vector& extensions, + absl::string_view uri, + Filter filter); + ABSL_DEPRECATED( + "Use RtpExtension::FindHeaderExtensionByUri with filter argument") static const RtpExtension* FindHeaderExtensionByUri( const std::vector& extensions, absl::string_view uri); - // Return a list of RTP header extensions with the non-encrypted extensions - // removed if both the encrypted and non-encrypted extension is present for - // the same URI. - static std::vector FilterDuplicateNonEncrypted( - const std::vector& extensions); + // Returns the header extension with the given URI and encrypt parameter, + // if found, otherwise nullptr. + static const RtpExtension* FindHeaderExtensionByUriAndEncryption( + const std::vector& extensions, + absl::string_view uri, + bool encrypt); + + // Returns a list of extensions where any extension URI is unique. + static const std::vector DeduplicateHeaderExtensions( + const std::vector& extensions, + Filter filter); // Encryption of Header Extensions, see RFC 6904 for details: // https://tools.ietf.org/html/rfc6904 @@ -307,10 +333,6 @@ struct RTC_EXPORT RtpExtension { static constexpr char kVideoTimingUri[] = "http://www.webrtc.org/experiments/rtp-hdrext/video-timing"; - // Header extension for video frame marking. - static constexpr char kFrameMarkingUri[] = - "http://tools.ietf.org/html/draft-ietf-avtext-framemarking-07"; - // Experimental codec agnostic frame descriptor. static constexpr char kGenericFrameDescriptorUri00[] = "http://www.webrtc.org/experiments/rtp-hdrext/" @@ -319,6 +341,10 @@ struct RTC_EXPORT RtpExtension { "https://aomediacodec.github.io/av1-rtp-spec/" "#dependency-descriptor-rtp-header-extension"; + // Experimental extension for signalling target bitrate per layer. + static constexpr char kVideoLayersAllocationUri[] = + "http://www.webrtc.org/experiments/rtp-hdrext/video-layers-allocation00"; + // Header extension for transport sequence number, see url for details: // http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions static constexpr char kTransportSequenceNumberUri[] = @@ -350,6 +376,15 @@ struct RTC_EXPORT RtpExtension { static constexpr char kRepairedRidUri[] = "urn:ietf:params:rtp-hdrext:sdes:repaired-rtp-stream-id"; + // Header extension to propagate webrtc::VideoFrame id field + static constexpr char kVideoFrameTrackingIdUri[] = + "http://www.webrtc.org/experiments/rtp-hdrext/video-frame-tracking-id"; + + // Header extension for Mixer-to-Client audio levels per CSRC as defined in + // https://tools.ietf.org/html/rfc6465 + static constexpr char kCsrcAudioLevelsUri[] = + "urn:ietf:params:rtp-hdrext:csrc-audio-level"; + // Inclusive min and max IDs for two-byte header extensions and one-byte // header extensions, per RFC8285 Section 4.2-4.3. static constexpr int kMinId = 1; @@ -462,6 +497,9 @@ struct RTC_EXPORT RtpEncodingParameters { // For video, scale the resolution down by this factor. absl::optional scale_resolution_down_by; + // https://w3c.github.io/webrtc-svc/#rtcrtpencodingparameters + absl::optional scalability_mode; + // For an RtpSender, set to true to cause this encoding to be encoded and // sent, and false for it not to be encoded and sent. This allows control // across multiple encodings of a sender for turning simulcast layers on and @@ -474,6 +512,10 @@ struct RTC_EXPORT RtpEncodingParameters { // Called "encodingId" in ORTC. std::string rid; + // Allow dynamic frame length changes for audio: + // https://w3c.github.io/webrtc-extensions/#dom-rtcrtpencodingparameters-adaptiveptime + bool adaptive_ptime = false; + bool operator==(const RtpEncodingParameters& o) const { return ssrc == o.ssrc && bitrate_priority == o.bitrate_priority && network_priority == o.network_priority && @@ -482,7 +524,8 @@ struct RTC_EXPORT RtpEncodingParameters { max_framerate == o.max_framerate && num_temporal_layers == o.num_temporal_layers && scale_resolution_down_by == o.scale_resolution_down_by && - active == o.active && rid == o.rid; + active == o.active && rid == o.rid && + adaptive_ptime == o.adaptive_ptime; } bool operator!=(const RtpEncodingParameters& o) const { return !(*this == o); diff --git a/api/rtp_parameters_unittest.cc b/api/rtp_parameters_unittest.cc index 5928cbda63..51ad426748 100644 --- a/api/rtp_parameters_unittest.cc +++ b/api/rtp_parameters_unittest.cc @@ -23,28 +23,249 @@ static const RtpExtension kExtension1(kExtensionUri1, 1); static const RtpExtension kExtension1Encrypted(kExtensionUri1, 10, true); static const RtpExtension kExtension2(kExtensionUri2, 2); -TEST(RtpExtensionTest, FilterDuplicateNonEncrypted) { +TEST(RtpExtensionTest, DeduplicateHeaderExtensions) { std::vector extensions; std::vector filtered; + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1}, filtered); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); + + extensions.clear(); extensions.push_back(kExtension1); extensions.push_back(kExtension1Encrypted); - filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ(std::vector{kExtension1}, filtered); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); EXPECT_EQ(1u, filtered.size()); EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); extensions.clear(); extensions.push_back(kExtension1Encrypted); extensions.push_back(kExtension1); - filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); EXPECT_EQ(1u, filtered.size()); EXPECT_EQ(std::vector{kExtension1Encrypted}, filtered); extensions.clear(); extensions.push_back(kExtension1); extensions.push_back(kExtension2); - filtered = RtpExtension::FilterDuplicateNonEncrypted(extensions); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(2u, filtered.size()); + EXPECT_EQ(extensions, filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); EXPECT_EQ(2u, filtered.size()); EXPECT_EQ(extensions, filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + EXPECT_EQ(0u, filtered.size()); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + extensions.push_back(kExtension1Encrypted); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kDiscardEncryptedExtension); + EXPECT_EQ(2u, filtered.size()); + EXPECT_EQ((std::vector{kExtension1, kExtension2}), filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kPreferEncryptedExtension); + EXPECT_EQ(2u, filtered.size()); + EXPECT_EQ((std::vector{kExtension1Encrypted, kExtension2}), + filtered); + filtered = RtpExtension::DeduplicateHeaderExtensions( + extensions, RtpExtension::Filter::kRequireEncryptedExtension); + EXPECT_EQ(1u, filtered.size()); + EXPECT_EQ((std::vector{kExtension1Encrypted}), filtered); +} + +TEST(RtpExtensionTest, FindHeaderExtensionByUriAndEncryption) { + std::vector extensions; + + extensions.clear(); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, false)); + + extensions.clear(); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, false)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, true)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri2, false)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, false)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri2, false)); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri1, true)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUriAndEncryption( + extensions, kExtensionUri2, true)); +} + +TEST(RtpExtensionTest, FindHeaderExtensionByUri) { + std::vector extensions; + + extensions.clear(); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1Encrypted); + extensions.push_back(kExtension1); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kRequireEncryptedExtension)); + + extensions.clear(); + extensions.push_back(kExtension1); + extensions.push_back(kExtension2); + extensions.push_back(kExtension1Encrypted); + EXPECT_EQ(kExtension1, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(kExtension1Encrypted, + *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri1, + RtpExtension::Filter::kRequireEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kDiscardEncryptedExtension)); + EXPECT_EQ(kExtension2, *RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kPreferEncryptedExtension)); + EXPECT_EQ(nullptr, RtpExtension::FindHeaderExtensionByUri( + extensions, kExtensionUri2, + RtpExtension::Filter::kRequireEncryptedExtension)); } } // namespace webrtc diff --git a/api/rtp_receiver_interface.h b/api/rtp_receiver_interface.h index a15864e34a..327c9f2fee 100644 --- a/api/rtp_receiver_interface.h +++ b/api/rtp_receiver_interface.h @@ -22,11 +22,9 @@ #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" -#include "api/proxy.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" #include "api/transport/rtp/rtp_source.h" -#include "rtc_base/deprecation.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" @@ -101,11 +99,13 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface { // before it is sent across the network. This will decrypt the entire frame // using the user provided decryption mechanism regardless of whether SRTP is // enabled or not. + // TODO(bugs.webrtc.org/12772): Remove. virtual void SetFrameDecryptor( rtc::scoped_refptr frame_decryptor); // Returns a pointer to the frame decryptor set previously by the // user. This can be used to update the state of the object. + // TODO(bugs.webrtc.org/12772): Remove. virtual rtc::scoped_refptr GetFrameDecryptor() const; // Sets a frame transformer between the depacketizer and the decoder to enable @@ -118,32 +118,6 @@ class RTC_EXPORT RtpReceiverInterface : public rtc::RefCountInterface { ~RtpReceiverInterface() override = default; }; -// Define proxy for RtpReceiverInterface. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_SIGNALING_PROXY_MAP(RtpReceiver) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) -PROXY_CONSTMETHOD0(std::vector, stream_ids) -PROXY_CONSTMETHOD0(std::vector>, - streams) -PROXY_CONSTMETHOD0(cricket::MediaType, media_type) -PROXY_CONSTMETHOD0(std::string, id) -PROXY_CONSTMETHOD0(RtpParameters, GetParameters) -PROXY_METHOD1(void, SetObserver, RtpReceiverObserverInterface*) -PROXY_METHOD1(void, SetJitterBufferMinimumDelay, absl::optional) -PROXY_CONSTMETHOD0(std::vector, GetSources) -PROXY_METHOD1(void, - SetFrameDecryptor, - rtc::scoped_refptr) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, - GetFrameDecryptor) -PROXY_METHOD1(void, - SetDepacketizerToDecoderFrameTransformer, - rtc::scoped_refptr) -END_PROXY_MAP() - } // namespace webrtc #endif // API_RTP_RECEIVER_INTERFACE_H_ diff --git a/api/rtp_sender_interface.h b/api/rtp_sender_interface.h index bdbd6dc645..9ffad68644 100644 --- a/api/rtp_sender_interface.h +++ b/api/rtp_sender_interface.h @@ -23,7 +23,6 @@ #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" -#include "api/proxy.h" #include "api/rtc_error.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" @@ -101,33 +100,6 @@ class RTC_EXPORT RtpSenderInterface : public rtc::RefCountInterface { ~RtpSenderInterface() override = default; }; -// Define proxy for RtpSenderInterface. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_SIGNALING_PROXY_MAP(RtpSender) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) -PROXY_CONSTMETHOD0(uint32_t, ssrc) -PROXY_CONSTMETHOD0(cricket::MediaType, media_type) -PROXY_CONSTMETHOD0(std::string, id) -PROXY_CONSTMETHOD0(std::vector, stream_ids) -PROXY_CONSTMETHOD0(std::vector, init_send_encodings) -PROXY_CONSTMETHOD0(RtpParameters, GetParameters) -PROXY_METHOD1(RTCError, SetParameters, const RtpParameters&) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, GetDtmfSender) -PROXY_METHOD1(void, - SetFrameEncryptor, - rtc::scoped_refptr) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, - GetFrameEncryptor) -PROXY_METHOD1(void, SetStreams, const std::vector&) -PROXY_METHOD1(void, - SetEncoderToPacketizerFrameTransformer, - rtc::scoped_refptr) -END_PROXY_MAP() - } // namespace webrtc #endif // API_RTP_SENDER_INTERFACE_H_ diff --git a/api/rtp_transceiver_interface.cc b/api/rtp_transceiver_interface.cc index d4e2b26e33..fd5085c336 100644 --- a/api/rtp_transceiver_interface.cc +++ b/api/rtp_transceiver_interface.cc @@ -25,6 +25,23 @@ RtpTransceiverInterface::fired_direction() const { return absl::nullopt; } +bool RtpTransceiverInterface::stopping() const { + return false; +} + +void RtpTransceiverInterface::Stop() { + StopInternal(); +} + +RTCError RtpTransceiverInterface::StopStandard() { + RTC_NOTREACHED() << "DEBUG: RtpTransceiverInterface::StopStandard called"; + return RTCError::OK(); +} + +void RtpTransceiverInterface::StopInternal() { + RTC_NOTREACHED() << "DEBUG: RtpTransceiverInterface::StopInternal called"; +} + RTCError RtpTransceiverInterface::SetCodecPreferences( rtc::ArrayView) { RTC_NOTREACHED() << "Not implemented"; @@ -41,4 +58,28 @@ RtpTransceiverInterface::HeaderExtensionsToOffer() const { return {}; } +webrtc::RTCError RtpTransceiverInterface::SetOfferedRtpHeaderExtensions( + rtc::ArrayView + header_extensions_to_offer) { + return webrtc::RTCError(webrtc::RTCErrorType::UNSUPPORTED_OPERATION); +} + +std::vector +RtpTransceiverInterface::HeaderExtensionsNegotiated() const { + return {}; +} + +// TODO(bugs.webrtc.org/11839) Remove default implementations when clients +// are updated. +void RtpTransceiverInterface::SetDirection( + RtpTransceiverDirection new_direction) { + SetDirectionWithError(new_direction); +} + +RTCError RtpTransceiverInterface::SetDirectionWithError( + RtpTransceiverDirection new_direction) { + RTC_NOTREACHED() << "Default implementation called"; + return RTCError::OK(); +} + } // namespace webrtc diff --git a/api/rtp_transceiver_interface.h b/api/rtp_transceiver_interface.h index 9dbafd46ec..4799c4b153 100644 --- a/api/rtp_transceiver_interface.h +++ b/api/rtp_transceiver_interface.h @@ -14,6 +14,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/types/optional.h" #include "api/array_view.h" #include "api/media_types.h" @@ -89,6 +90,16 @@ class RTC_EXPORT RtpTransceiverInterface : public rtc::RefCountInterface { // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stopped virtual bool stopped() const = 0; + // The stopping attribute indicates that the user has indicated that the + // sender of this transceiver will stop sending, and that the receiver will + // no longer receive. It is always true if stopped() is true. + // If stopping() is true and stopped() is false, it means that the + // transceiver's stop() method has been called, but the negotiation with + // the other end for shutting down the transceiver is not yet done. + // https://w3c.github.io/webrtc-pc/#dfn-stopping-0 + // TODO(hta): Remove default implementation. + virtual bool stopping() const; + // The direction attribute indicates the preferred direction of this // transceiver, which will be used in calls to CreateOffer and CreateAnswer. // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction @@ -99,7 +110,11 @@ class RTC_EXPORT RtpTransceiverInterface : public rtc::RefCountInterface { // CreateOffer and CreateAnswer mark the corresponding media descriptions as // sendrecv, sendonly, recvonly, or inactive. // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction - virtual void SetDirection(RtpTransceiverDirection new_direction) = 0; + // TODO(hta): Deprecate SetDirection without error and rename + // SetDirectionWithError to SetDirection, remove default implementations. + ABSL_DEPRECATED("Use SetDirectionWithError instead") + virtual void SetDirection(RtpTransceiverDirection new_direction); + virtual RTCError SetDirectionWithError(RtpTransceiverDirection new_direction); // The current_direction attribute indicates the current direction negotiated // for this transceiver. If this transceiver has never been represented in an @@ -114,10 +129,19 @@ class RTC_EXPORT RtpTransceiverInterface : public rtc::RefCountInterface { // Exposed in the public interface for use by Chromium. virtual absl::optional fired_direction() const; - // The Stop method irreversibly stops the RtpTransceiver. The sender of this - // transceiver will no longer send, the receiver will no longer receive. + // Initiates a stop of the transceiver. + // The stop is complete when stopped() returns true. + // A stopped transceiver can be reused for a different track. // https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stop - virtual void Stop() = 0; + // TODO(hta): Rename to Stop() when users of the non-standard Stop() are + // updated. + virtual RTCError StopStandard(); + + // Stops a transceiver immediately, without waiting for signalling. + // This is an internal function, and is exposed for historical reasons. + // https://w3c.github.io/webrtc-pc/#dfn-stop-the-rtcrtptransceiver + virtual void StopInternal(); + ABSL_DEPRECATED("Use StopStandard instead") virtual void Stop(); // The SetCodecPreferences method overrides the default codec preferences used // by WebRTC for this transceiver. @@ -133,6 +157,19 @@ class RTC_EXPORT RtpTransceiverInterface : public rtc::RefCountInterface { virtual std::vector HeaderExtensionsToOffer() const; + // Readonly attribute which is either empty if negotation has not yet + // happened, or a vector of the negotiated header extensions. + // https://w3c.github.io/webrtc-extensions/#rtcrtptransceiver-interface + virtual std::vector HeaderExtensionsNegotiated() + const; + + // The SetOfferedRtpHeaderExtensions method modifies the next SDP negotiation + // so that it negotiates use of header extensions which are not kStopped. + // https://w3c.github.io/webrtc-extensions/#rtcrtptransceiver-interface + virtual webrtc::RTCError SetOfferedRtpHeaderExtensions( + rtc::ArrayView + header_extensions_to_offer); + protected: ~RtpTransceiverInterface() override = default; }; diff --git a/api/scoped_refptr.h b/api/scoped_refptr.h index fa4e83dbaf..4e3f0ebfc8 100644 --- a/api/scoped_refptr.h +++ b/api/scoped_refptr.h @@ -104,6 +104,7 @@ class scoped_refptr { T* get() const { return ptr_; } operator T*() const { return ptr_; } + T& operator*() const { return *ptr_; } T* operator->() const { return ptr_; } // Returns the (possibly null) raw pointer, and makes the scoped_refptr hold a diff --git a/api/sctp_transport_interface.h b/api/sctp_transport_interface.h index 6af0bfce34..7080889fcf 100644 --- a/api/sctp_transport_interface.h +++ b/api/sctp_transport_interface.h @@ -35,6 +35,8 @@ enum class SctpTransportState { // http://w3c.github.io/webrtc-pc/#rtcsctptransport-interface class RTC_EXPORT SctpTransportInformation { public: + SctpTransportInformation() = default; + SctpTransportInformation(const SctpTransportInformation&) = default; explicit SctpTransportInformation(SctpTransportState state); SctpTransportInformation( SctpTransportState state, diff --git a/api/sequence_checker.h b/api/sequence_checker.h new file mode 100644 index 0000000000..5db7b9e4df --- /dev/null +++ b/api/sequence_checker.h @@ -0,0 +1,116 @@ +/* + * Copyright 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef API_SEQUENCE_CHECKER_H_ +#define API_SEQUENCE_CHECKER_H_ + +#include "rtc_base/checks.h" +#include "rtc_base/synchronization/sequence_checker_internal.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// SequenceChecker is a helper class used to help verify that some methods +// of a class are called on the same task queue or thread. A +// SequenceChecker is bound to a a task queue if the object is +// created on a task queue, or a thread otherwise. +// +// +// Example: +// class MyClass { +// public: +// void Foo() { +// RTC_DCHECK_RUN_ON(&sequence_checker_); +// ... (do stuff) ... +// } +// +// private: +// SequenceChecker sequence_checker_; +// } +// +// In Release mode, IsCurrent will always return true. +class RTC_LOCKABLE SequenceChecker +#if RTC_DCHECK_IS_ON + : public webrtc_sequence_checker_internal::SequenceCheckerImpl { + using Impl = webrtc_sequence_checker_internal::SequenceCheckerImpl; +#else + : public webrtc_sequence_checker_internal::SequenceCheckerDoNothing { + using Impl = webrtc_sequence_checker_internal::SequenceCheckerDoNothing; +#endif + public: + // Returns true if sequence checker is attached to the current sequence. + bool IsCurrent() const { return Impl::IsCurrent(); } + // Detaches checker from sequence to which it is attached. Next attempt + // to do a check with this checker will result in attaching this checker + // to the sequence on which check was performed. + void Detach() { Impl::Detach(); } +}; + +} // namespace webrtc + +// RTC_RUN_ON/RTC_GUARDED_BY/RTC_DCHECK_RUN_ON macros allows to annotate +// variables are accessed from same thread/task queue. +// Using tools designed to check mutexes, it checks at compile time everywhere +// variable is access, there is a run-time dcheck thread/task queue is correct. +// +// class SequenceCheckerExample { +// public: +// int CalledFromPacer() RTC_RUN_ON(pacer_sequence_checker_) { +// return var2_; +// } +// +// void CallMeFromPacer() { +// RTC_DCHECK_RUN_ON(&pacer_sequence_checker_) +// << "Should be called from pacer"; +// CalledFromPacer(); +// } +// +// private: +// int pacer_var_ RTC_GUARDED_BY(pacer_sequence_checker_); +// SequenceChecker pacer_sequence_checker_; +// }; +// +// class TaskQueueExample { +// public: +// class Encoder { +// public: +// rtc::TaskQueueBase& Queue() { return encoder_queue_; } +// void Encode() { +// RTC_DCHECK_RUN_ON(&encoder_queue_); +// DoSomething(var_); +// } +// +// private: +// rtc::TaskQueueBase& encoder_queue_; +// Frame var_ RTC_GUARDED_BY(encoder_queue_); +// }; +// +// void Encode() { +// // Will fail at runtime when DCHECK is enabled: +// // encoder_->Encode(); +// // Will work: +// rtc::scoped_refptr encoder = encoder_; +// encoder_->Queue().PostTask([encoder] { encoder->Encode(); }); +// } +// +// private: +// rtc::scoped_refptr encoder_; +// } + +// Document if a function expected to be called from same thread/task queue. +#define RTC_RUN_ON(x) \ + RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x)) + +#define RTC_DCHECK_RUN_ON(x) \ + webrtc::webrtc_sequence_checker_internal::SequenceCheckerScope \ + seq_check_scope(x); \ + RTC_DCHECK((x)->IsCurrent()) \ + << webrtc::webrtc_sequence_checker_internal::ExpectationToString(x) + +#endif // API_SEQUENCE_CHECKER_H_ diff --git a/rtc_base/synchronization/sequence_checker_unittest.cc b/api/sequence_checker_unittest.cc similarity index 88% rename from rtc_base/synchronization/sequence_checker_unittest.cc rename to api/sequence_checker_unittest.cc index a173a825bd..21a0894a8e 100644 --- a/rtc_base/synchronization/sequence_checker_unittest.cc +++ b/api/sequence_checker_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "rtc_base/synchronization/sequence_checker.h" +#include "api/sequence_checker.h" #include #include @@ -17,7 +17,6 @@ #include "rtc_base/event.h" #include "rtc_base/platform_thread.h" #include "rtc_base/task_queue_for_test.h" -#include "rtc_base/thread_checker.h" #include "test/gtest.h" namespace webrtc { @@ -31,7 +30,7 @@ class CompileTimeTestForGuardedBy { int CalledOnSequence() RTC_RUN_ON(sequence_checker_) { return guarded_; } void CallMeFromSequence() { - RTC_DCHECK_RUN_ON(&sequence_checker_) << "Should be called on sequence"; + RTC_DCHECK_RUN_ON(&sequence_checker_); guarded_ = 41; } @@ -41,21 +40,14 @@ class CompileTimeTestForGuardedBy { }; void RunOnDifferentThread(rtc::FunctionView run) { - struct Object { - static void Run(void* obj) { - auto* me = static_cast(obj); - me->run(); - me->thread_has_run_event.Set(); - } - - rtc::FunctionView run; - rtc::Event thread_has_run_event; - } object{run}; - - rtc::PlatformThread thread(&Object::Run, &object, "thread"); - thread.Start(); - EXPECT_TRUE(object.thread_has_run_event.Wait(1000)); - thread.Stop(); + rtc::Event thread_has_run_event; + rtc::PlatformThread::SpawnJoinable( + [&] { + run(); + thread_has_run_event.Set(); + }, + "thread"); + EXPECT_TRUE(thread_has_run_event.Wait(1000)); } } // namespace diff --git a/api/set_local_description_observer_interface.h b/api/set_local_description_observer_interface.h new file mode 100644 index 0000000000..90d000cd81 --- /dev/null +++ b/api/set_local_description_observer_interface.h @@ -0,0 +1,30 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_SET_LOCAL_DESCRIPTION_OBSERVER_INTERFACE_H_ +#define API_SET_LOCAL_DESCRIPTION_OBSERVER_INTERFACE_H_ + +#include "api/rtc_error.h" +#include "rtc_base/ref_count.h" + +namespace webrtc { + +// OnSetLocalDescriptionComplete() invokes as soon as +// PeerConnectionInterface::SetLocalDescription() operation completes, allowing +// the observer to examine the effects of the operation without delay. +class SetLocalDescriptionObserverInterface : public rtc::RefCountInterface { + public: + // On success, |error.ok()| is true. + virtual void OnSetLocalDescriptionComplete(RTCError error) = 0; +}; + +} // namespace webrtc + +#endif // API_SET_LOCAL_DESCRIPTION_OBSERVER_INTERFACE_H_ diff --git a/api/stats/rtc_stats.h b/api/stats/rtc_stats.h index d45902e0a5..9290e803fa 100644 --- a/api/stats/rtc_stats.h +++ b/api/stats/rtc_stats.h @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -237,6 +238,9 @@ class RTCStatsMemberInterface { kSequenceUint64, // std::vector kSequenceDouble, // std::vector kSequenceString, // std::vector + + kMapStringUint64, // std::map + kMapStringDouble, // std::map }; virtual ~RTCStatsMemberInterface() {} @@ -319,6 +323,14 @@ class RTCStatsMember : public RTCStatsMemberInterface { std::string ValueToString() const override; std::string ValueToJson() const override; + template + inline T ValueOrDefault(U default_value) const { + if (is_defined()) { + return *(*this); + } + return default_value; + } + // Assignment operators. T& operator=(const T& value) { value_ = value; @@ -355,6 +367,13 @@ class RTCStatsMember : public RTCStatsMemberInterface { T value_; }; +namespace rtc_stats_internal { + +typedef std::map MapStringUint64; +typedef std::map MapStringDouble; + +} // namespace rtc_stats_internal + #define WEBRTC_DECLARE_RTCSTATSMEMBER(T) \ template <> \ RTC_EXPORT RTCStatsMemberInterface::Type RTCStatsMember::StaticType(); \ @@ -383,6 +402,8 @@ WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); WEBRTC_DECLARE_RTCSTATSMEMBER(std::vector); +WEBRTC_DECLARE_RTCSTATSMEMBER(rtc_stats_internal::MapStringUint64); +WEBRTC_DECLARE_RTCSTATSMEMBER(rtc_stats_internal::MapStringDouble); // Using inheritance just so that it's obvious from the member's declaration // whether it's standardized or not. @@ -447,6 +468,10 @@ extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) RTCNonStandardStatsMember>; extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) RTCNonStandardStatsMember>; +extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) + RTCNonStandardStatsMember>; +extern template class RTC_EXPORT_TEMPLATE_DECLARE(RTC_EXPORT) + RTCNonStandardStatsMember>; } // namespace webrtc diff --git a/api/stats/rtc_stats_collector_callback.h b/api/stats/rtc_stats_collector_callback.h index c3e08245ea..506cc63e6f 100644 --- a/api/stats/rtc_stats_collector_callback.h +++ b/api/stats/rtc_stats_collector_callback.h @@ -17,7 +17,7 @@ namespace webrtc { -class RTCStatsCollectorCallback : public virtual rtc::RefCountInterface { +class RTCStatsCollectorCallback : public rtc::RefCountInterface { public: ~RTCStatsCollectorCallback() override = default; diff --git a/api/stats/rtc_stats_report.h b/api/stats/rtc_stats_report.h index dc15937690..0fe5ce91f9 100644 --- a/api/stats/rtc_stats_report.h +++ b/api/stats/rtc_stats_report.h @@ -19,9 +19,11 @@ #include #include +#include "api/ref_counted_base.h" #include "api/scoped_refptr.h" #include "api/stats/rtc_stats.h" -#include "rtc_base/ref_count.h" +// TODO(tommi): Remove this include after fixing iwyu issue in chromium. +// See: third_party/blink/renderer/platform/peerconnection/rtc_stats.cc #include "rtc_base/ref_counted_object.h" #include "rtc_base/system/rtc_export.h" @@ -29,7 +31,8 @@ namespace webrtc { // A collection of stats. // This is accessible as a map from |RTCStats::id| to |RTCStats|. -class RTC_EXPORT RTCStatsReport : public rtc::RefCountInterface { +class RTC_EXPORT RTCStatsReport final + : public rtc::RefCountedNonVirtual { public: typedef std::map> StatsMap; @@ -84,8 +87,8 @@ class RTC_EXPORT RTCStatsReport : public rtc::RefCountInterface { // Removes the stats object from the report, returning ownership of it or null // if there is no object with |id|. std::unique_ptr Take(const std::string& id); - // Takes ownership of all the stats in |victim|, leaving it empty. - void TakeMembersFrom(rtc::scoped_refptr victim); + // Takes ownership of all the stats in |other|, leaving it empty. + void TakeMembersFrom(rtc::scoped_refptr other); // Stats iterators. Stats are ordered lexicographically on |RTCStats::id|. ConstIterator begin() const; @@ -107,11 +110,11 @@ class RTC_EXPORT RTCStatsReport : public rtc::RefCountInterface { // listing all of its stats objects. std::string ToJson() const; - friend class rtc::RefCountedObject; + protected: + friend class rtc::RefCountedNonVirtual; + ~RTCStatsReport() = default; private: - ~RTCStatsReport() override; - int64_t timestamp_us_; StatsMap stats_; }; diff --git a/api/stats/rtcstats_objects.h b/api/stats/rtcstats_objects.h index 28d841db09..2030380918 100644 --- a/api/stats/rtcstats_objects.h +++ b/api/stats/rtcstats_objects.h @@ -13,6 +13,7 @@ #include +#include #include #include #include @@ -115,6 +116,7 @@ class RTC_EXPORT RTCCodecStats final : public RTCStats { RTCCodecStats(const RTCCodecStats& other); ~RTCCodecStats() override; + RTCStatsMember transport_id; RTCStatsMember payload_type; RTCStatsMember mime_type; RTCStatsMember clock_rate; @@ -134,7 +136,7 @@ class RTC_EXPORT RTCDataChannelStats final : public RTCStats { RTCStatsMember label; RTCStatsMember protocol; - RTCStatsMember datachannelid; + RTCStatsMember data_channel_identifier; // TODO(hbos): Support enum types? "RTCStatsMember"? RTCStatsMember state; RTCStatsMember messages_sent; @@ -160,6 +162,7 @@ class RTC_EXPORT RTCIceCandidatePairStats final : public RTCStats { // TODO(hbos): Support enum types? // "RTCStatsMember"? RTCStatsMember state; + // Obsolete: priority RTCStatsMember priority; RTCStatsMember nominated; // TODO(hbos): Collect this the way the spec describes it. We have a value for @@ -207,9 +210,11 @@ class RTC_EXPORT RTCIceCandidateStats : public RTCStats { ~RTCIceCandidateStats() override; RTCStatsMember transport_id; + // Obsolete: is_remote RTCStatsMember is_remote; RTCStatsMember network_type; RTCStatsMember ip; + RTCStatsMember address; RTCStatsMember port; RTCStatsMember protocol; RTCStatsMember relay_protocol; @@ -218,9 +223,6 @@ class RTC_EXPORT RTCIceCandidateStats : public RTCStats { RTCStatsMember priority; // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/632723 RTCStatsMember url; - // TODO(hbos): |deleted = true| case is not supported by |RTCStatsCollector|. - // crbug.com/632723 - RTCStatsMember deleted; // = false protected: RTCIceCandidateStats(const std::string& id, @@ -373,34 +375,64 @@ class RTC_EXPORT RTCRTPStreamStats : public RTCStats { ~RTCRTPStreamStats() override; RTCStatsMember ssrc; - // TODO(hbos): Remote case not supported by |RTCStatsCollector|. - // crbug.com/657855, 657856 - RTCStatsMember is_remote; // = false - RTCStatsMember media_type; // renamed to kind. RTCStatsMember kind; + // Obsolete: track_id RTCStatsMember track_id; RTCStatsMember transport_id; RTCStatsMember codec_id; - // FIR and PLI counts are only defined for |media_type == "video"|. - RTCStatsMember fir_count; - RTCStatsMember pli_count; - // TODO(hbos): NACK count should be collected by |RTCStatsCollector| for both - // audio and video but is only defined in the "video" case. crbug.com/657856 - RTCStatsMember nack_count; - // TODO(hbos): Not collected by |RTCStatsCollector|. crbug.com/657854 - // SLI count is only defined for |media_type == "video"|. - RTCStatsMember sli_count; - RTCStatsMember qp_sum; + + // Obsolete + RTCStatsMember media_type; // renamed to kind. protected: RTCRTPStreamStats(const std::string& id, int64_t timestamp_us); RTCRTPStreamStats(std::string&& id, int64_t timestamp_us); }; +// https://www.w3.org/TR/webrtc-stats/#receivedrtpstats-dict* +class RTC_EXPORT RTCReceivedRtpStreamStats : public RTCRTPStreamStats { + public: + WEBRTC_RTCSTATS_DECL(); + + RTCReceivedRtpStreamStats(const RTCReceivedRtpStreamStats& other); + ~RTCReceivedRtpStreamStats() override; + + // TODO(hbos) The following fields need to be added and migrated + // both from RTCInboundRtpStreamStats and RTCRemoteInboundRtpStreamStats: + // packetsReceived, packetsDiscarded, packetsRepaired, burstPacketsLost, + // burstPacketDiscarded, burstLossCount, burstDiscardCount, burstLossRate, + // burstDiscardRate, gapLossRate, gapDiscardRate, framesDropped, + // partialFramesLost, fullFramesLost + // crbug.com/webrtc/12532 + RTCStatsMember jitter; + RTCStatsMember packets_lost; // Signed per RFC 3550 + + protected: + RTCReceivedRtpStreamStats(const std::string&& id, int64_t timestamp_us); + RTCReceivedRtpStreamStats(std::string&& id, int64_t timestamp_us); +}; + +// https://www.w3.org/TR/webrtc-stats/#sentrtpstats-dict* +class RTC_EXPORT RTCSentRtpStreamStats : public RTCRTPStreamStats { + public: + WEBRTC_RTCSTATS_DECL(); + + RTCSentRtpStreamStats(const RTCSentRtpStreamStats& other); + ~RTCSentRtpStreamStats() override; + + RTCStatsMember packets_sent; + RTCStatsMember bytes_sent; + + protected: + RTCSentRtpStreamStats(const std::string&& id, int64_t timestamp_us); + RTCSentRtpStreamStats(std::string&& id, int64_t timestamp_us); +}; + // https://w3c.github.io/webrtc-stats/#inboundrtpstats-dict* // TODO(hbos): Support the remote case |is_remote = true|. // https://bugs.webrtc.org/7065 -class RTC_EXPORT RTCInboundRTPStreamStats final : public RTCRTPStreamStats { +class RTC_EXPORT RTCInboundRTPStreamStats final + : public RTCReceivedRtpStreamStats { public: WEBRTC_RTCSTATS_DECL(); @@ -409,16 +441,25 @@ class RTC_EXPORT RTCInboundRTPStreamStats final : public RTCRTPStreamStats { RTCInboundRTPStreamStats(const RTCInboundRTPStreamStats& other); ~RTCInboundRTPStreamStats() override; + RTCStatsMember remote_id; RTCStatsMember packets_received; RTCStatsMember fec_packets_received; RTCStatsMember fec_packets_discarded; RTCStatsMember bytes_received; RTCStatsMember header_bytes_received; - RTCStatsMember packets_lost; // Signed per RFC 3550 RTCStatsMember last_packet_received_timestamp; - // TODO(hbos): Collect and populate this value for both "audio" and "video", - // currently not collected for "video". https://bugs.webrtc.org/7065 - RTCStatsMember jitter; + RTCStatsMember jitter_buffer_delay; + RTCStatsMember jitter_buffer_emitted_count; + RTCStatsMember total_samples_received; + RTCStatsMember concealed_samples; + RTCStatsMember silent_concealed_samples; + RTCStatsMember concealment_events; + RTCStatsMember inserted_samples_for_deceleration; + RTCStatsMember removed_samples_for_acceleration; + RTCStatsMember audio_level; + RTCStatsMember total_audio_energy; + RTCStatsMember total_samples_duration; + RTCStatsMember frames_received; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 RTCStatsMember round_trip_time; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 @@ -441,8 +482,13 @@ class RTC_EXPORT RTCInboundRTPStreamStats final : public RTCRTPStreamStats { RTCStatsMember gap_loss_rate; // TODO(hbos): Collect and populate this value. https://bugs.webrtc.org/7065 RTCStatsMember gap_discard_rate; + RTCStatsMember frame_width; + RTCStatsMember frame_height; + RTCStatsMember frame_bit_depth; + RTCStatsMember frames_per_second; RTCStatsMember frames_decoded; RTCStatsMember key_frames_decoded; + RTCStatsMember frames_dropped; RTCStatsMember total_decode_time; RTCStatsMember total_inter_frame_delay; RTCStatsMember total_squared_inter_frame_delay; @@ -453,6 +499,11 @@ class RTC_EXPORT RTCInboundRTPStreamStats final : public RTCRTPStreamStats { // TODO(hbos): This is only implemented for video; implement it for audio as // well. RTCStatsMember decoder_implementation; + // FIR and PLI counts are only defined for |media_type == "video"|. + RTCStatsMember fir_count; + RTCStatsMember pli_count; + RTCStatsMember nack_count; + RTCStatsMember qp_sum; }; // https://w3c.github.io/webrtc-stats/#outboundrtpstats-dict* @@ -490,10 +541,8 @@ class RTC_EXPORT RTCOutboundRTPStreamStats final : public RTCRTPStreamStats { // implement it for audio as well. RTCStatsMember total_packet_send_delay; // Enum type RTCQualityLimitationReason - // TODO(https://crbug.com/webrtc/10686): Also expose - // qualityLimitationDurations. Requires RTCStatsMember support for - // "record", see https://crbug.com/webrtc/10685. RTCStatsMember quality_limitation_reason; + RTCStatsMember> quality_limitation_durations; // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges RTCStatsMember quality_limitation_resolution_changes; // https://henbos.github.io/webrtc-provisional-stats/#dom-rtcoutboundrtpstreamstats-contenttype @@ -501,18 +550,16 @@ class RTC_EXPORT RTCOutboundRTPStreamStats final : public RTCRTPStreamStats { // TODO(hbos): This is only implemented for video; implement it for audio as // well. RTCStatsMember encoder_implementation; + // FIR and PLI counts are only defined for |media_type == "video"|. + RTCStatsMember fir_count; + RTCStatsMember pli_count; + RTCStatsMember nack_count; + RTCStatsMember qp_sum; }; -// TODO(https://crbug.com/webrtc/10671): Refactor the stats dictionaries to have -// the same hierarchy as in the spec; implement RTCReceivedRtpStreamStats. -// Several metrics are shared between "outbound-rtp", "remote-inbound-rtp", -// "inbound-rtp" and "remote-outbound-rtp". In the spec there is a hierarchy of -// dictionaries that minimizes defining the same metrics in multiple places. -// From JavaScript this hierarchy is not observable and the spec's hierarchy is -// purely editorial. In C++ non-final classes in the hierarchy could be used to -// refer to different stats objects within the hierarchy. // https://w3c.github.io/webrtc-stats/#remoteinboundrtpstats-dict* -class RTC_EXPORT RTCRemoteInboundRtpStreamStats final : public RTCStats { +class RTC_EXPORT RTCRemoteInboundRtpStreamStats final + : public RTCReceivedRtpStreamStats { public: WEBRTC_RTCSTATS_DECL(); @@ -521,17 +568,6 @@ class RTC_EXPORT RTCRemoteInboundRtpStreamStats final : public RTCStats { RTCRemoteInboundRtpStreamStats(const RTCRemoteInboundRtpStreamStats& other); ~RTCRemoteInboundRtpStreamStats() override; - // In the spec RTCRemoteInboundRtpStreamStats inherits from RTCRtpStreamStats - // and RTCReceivedRtpStreamStats. The members here are listed based on where - // they are defined in the spec. - // RTCRtpStreamStats - RTCStatsMember ssrc; - RTCStatsMember kind; - RTCStatsMember transport_id; - RTCStatsMember codec_id; - // RTCReceivedRtpStreamStats - RTCStatsMember packets_lost; - RTCStatsMember jitter; // TODO(hbos): The following RTCReceivedRtpStreamStats metrics should also be // implemented: packetsReceived, packetsDiscarded, packetsRepaired, // burstPacketsLost, burstPacketsDiscarded, burstLossCount, burstDiscardCount, @@ -539,8 +575,25 @@ class RTC_EXPORT RTCRemoteInboundRtpStreamStats final : public RTCStats { // RTCRemoteInboundRtpStreamStats RTCStatsMember local_id; RTCStatsMember round_trip_time; - // TODO(hbos): The following RTCRemoteInboundRtpStreamStats metric should also - // be implemented: fractionLost. + RTCStatsMember fraction_lost; + RTCStatsMember total_round_trip_time; + RTCStatsMember round_trip_time_measurements; +}; + +// https://w3c.github.io/webrtc-stats/#remoteoutboundrtpstats-dict* +class RTC_EXPORT RTCRemoteOutboundRtpStreamStats final + : public RTCSentRtpStreamStats { + public: + WEBRTC_RTCSTATS_DECL(); + + RTCRemoteOutboundRtpStreamStats(const std::string& id, int64_t timestamp_us); + RTCRemoteOutboundRtpStreamStats(std::string&& id, int64_t timestamp_us); + RTCRemoteOutboundRtpStreamStats(const RTCRemoteOutboundRtpStreamStats& other); + ~RTCRemoteOutboundRtpStreamStats() override; + + RTCStatsMember local_id; + RTCStatsMember remote_timestamp; + RTCStatsMember reports_sent; }; // https://w3c.github.io/webrtc-stats/#dom-rtcmediasourcestats @@ -572,6 +625,8 @@ class RTC_EXPORT RTCAudioSourceStats final : public RTCMediaSourceStats { RTCStatsMember audio_level; RTCStatsMember total_audio_energy; RTCStatsMember total_samples_duration; + RTCStatsMember echo_return_loss; + RTCStatsMember echo_return_loss_enhancement; }; // https://w3c.github.io/webrtc-stats/#dom-rtcvideosourcestats @@ -586,7 +641,6 @@ class RTC_EXPORT RTCVideoSourceStats final : public RTCMediaSourceStats { RTCStatsMember width; RTCStatsMember height; - // TODO(hbos): Implement this metric. RTCStatsMember frames; RTCStatsMember frames_per_second; }; @@ -602,7 +656,9 @@ class RTC_EXPORT RTCTransportStats final : public RTCStats { ~RTCTransportStats() override; RTCStatsMember bytes_sent; + RTCStatsMember packets_sent; RTCStatsMember bytes_received; + RTCStatsMember packets_received; RTCStatsMember rtcp_transport_stats_id; // TODO(hbos): Support enum types? "RTCStatsMember"? RTCStatsMember dtls_state; diff --git a/api/stats_types.cc b/api/stats_types.cc index 7dcbd134a1..6fdc7e85a5 100644 --- a/api/stats_types.cc +++ b/api/stats_types.cc @@ -15,6 +15,7 @@ #include "absl/algorithm/container.h" #include "rtc_base/checks.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/string_encode.h" // TODO(tommi): Could we have a static map of value name -> expected type // and use this to RTC_DCHECK on correct usage (somewhat strongly typed values)? diff --git a/api/stats_types.h b/api/stats_types.h index c1922a8a22..d032462da6 100644 --- a/api/stats_types.h +++ b/api/stats_types.h @@ -21,11 +21,10 @@ #include #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ref_count.h" -#include "rtc_base/string_encode.h" #include "rtc_base/system/rtc_export.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -344,7 +343,7 @@ class RTC_EXPORT StatsReport { const StatsValueName name; private: - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; mutable int ref_count_ RTC_GUARDED_BY(thread_checker_) = 0; const Type type_; @@ -447,7 +446,7 @@ class StatsCollection { private: Container list_; - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; }; } // namespace webrtc diff --git a/api/task_queue/BUILD.gn b/api/task_queue/BUILD.gn index 4c9f591ec1..1072057e3f 100644 --- a/api/task_queue/BUILD.gn +++ b/api/task_queue/BUILD.gn @@ -21,6 +21,8 @@ rtc_library("task_queue") { "../../rtc_base:checks", "../../rtc_base:macromagic", "../../rtc_base/system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:config", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/strings", @@ -51,6 +53,8 @@ rtc_library("task_queue_test") { deps = [ "../../../webrtc_overrides:webrtc_component", "../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", ] @@ -62,6 +66,8 @@ rtc_library("task_queue_test") { "../../rtc_base:timeutils", "../../rtc_base/task_utils:to_queued_task", "../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", ] diff --git a/api/task_queue/task_queue_base.h b/api/task_queue/task_queue_base.h index 90b1efd31e..88419edd8f 100644 --- a/api/task_queue/task_queue_base.h +++ b/api/task_queue/task_queue_base.h @@ -27,12 +27,14 @@ class RTC_LOCKABLE RTC_EXPORT TaskQueueBase { // Starts destruction of the task queue. // On return ensures no task are running and no new tasks are able to start // on the task queue. - // Responsible for deallocation. Deallocation may happen syncrhoniously during + // Responsible for deallocation. Deallocation may happen synchronously during // Delete or asynchronously after Delete returns. // Code not running on the TaskQueue should not make any assumption when // TaskQueue is deallocated and thus should not call any methods after Delete. // Code running on the TaskQueue should not call Delete, but can assume // TaskQueue still exists and may call other methods, e.g. PostTask. + // Should be called on the same task queue or thread that this task queue + // was created on. virtual void Delete() = 0; // Schedules a task to execute. Tasks are executed in FIFO order. @@ -43,17 +45,20 @@ class RTC_LOCKABLE RTC_EXPORT TaskQueueBase { // TaskQueue or it may happen asynchronously after TaskQueue is deleted. // This may vary from one implementation to the next so assumptions about // lifetimes of pending tasks should not be made. + // May be called on any thread or task queue, including this task queue. virtual void PostTask(std::unique_ptr task) = 0; // Schedules a task to execute a specified number of milliseconds from when // the call is made. The precision should be considered as "best effort" // and in some cases, such as on Windows when all high precision timers have // been used up, can be off by as much as 15 millseconds. + // May be called on any thread or task queue, including this task queue. virtual void PostDelayedTask(std::unique_ptr task, uint32_t milliseconds) = 0; // Returns the task queue that is running the current thread. // Returns nullptr if this thread is not associated with any task queue. + // May be called on any thread or task queue, including this task queue. static TaskQueueBase* Current(); bool IsCurrent() const { return Current() == this; } diff --git a/api/task_queue/task_queue_test.cc b/api/task_queue/task_queue_test.cc index 3f638b7c69..0d411d2d9c 100644 --- a/api/task_queue/task_queue_test.cc +++ b/api/task_queue/task_queue_test.cc @@ -271,5 +271,10 @@ TEST_P(TaskQueueTest, PostTwoWithSharedUnprotectedState) { EXPECT_TRUE(done.Wait(1000)); } +// TaskQueueTest is a set of tests for any implementation of the TaskQueueBase. +// Tests are instantiated next to the concrete implementation(s). +// https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#creating-value-parameterized-abstract-tests +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(TaskQueueTest); + } // namespace } // namespace webrtc diff --git a/api/test/DEPS b/api/test/DEPS index 1a02bf16e9..329076830c 100644 --- a/api/test/DEPS +++ b/api/test/DEPS @@ -8,15 +8,6 @@ specific_include_rules = { "dummy_peer_connection\.h": [ "+rtc_base/ref_counted_object.h", ], - "fake_constraints\.h": [ - "+rtc_base/string_encode.h", - ], - "loopback_media_transport\.h": [ - "+rtc_base/async_invoker.h", - "+rtc_base/critical_section.h", - "+rtc_base/thread.h", - "+rtc_base/thread_checker.h", - ], "neteq_factory_with_codecs\.h": [ "+system_wrappers/include/clock.h", ], diff --git a/api/test/OWNERS b/api/test/OWNERS index 770c299568..a7392abe31 100644 --- a/api/test/OWNERS +++ b/api/test/OWNERS @@ -1,2 +1,5 @@ +mbonadei@webrtc.org +sprang@webrtc.org srte@webrtc.org +titovartem@webrtc.org diff --git a/api/test/audio_quality_analyzer_interface.h b/api/test/audio_quality_analyzer_interface.h index 88392d7fd2..c1044795d1 100644 --- a/api/test/audio_quality_analyzer_interface.h +++ b/api/test/audio_quality_analyzer_interface.h @@ -14,7 +14,7 @@ #include #include "api/test/stats_observer_interface.h" -#include "api/test/track_id_stream_label_map.h" +#include "api/test/track_id_stream_info_map.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -31,7 +31,7 @@ class AudioQualityAnalyzerInterface : public StatsObserverInterface { // stream_id matching. The caller is responsible for ensuring the // AnalyzerHelper outlives the instance of the AudioQualityAnalyzerInterface. virtual void Start(std::string test_case_name, - TrackIdStreamLabelMap* analyzer_helper) = 0; + TrackIdStreamInfoMap* analyzer_helper) = 0; // Will be called by the framework at the end of the test. The analyzer // has to finalize all its stats and it should report them. diff --git a/api/test/compile_all_headers.cc b/api/test/compile_all_headers.cc index d9cc82cbb0..5ecdcc1eb8 100644 --- a/api/test/compile_all_headers.cc +++ b/api/test/compile_all_headers.cc @@ -27,14 +27,18 @@ // "api/test/videocodec_test_fixture.h" // "api/test/videocodec_test_stats.h" +#include "api/test/dummy_peer_connection.h" #include "api/test/fake_frame_decryptor.h" #include "api/test/fake_frame_encryptor.h" -#include "api/test/fake_media_transport.h" -#include "api/test/loopback_media_transport.h" +#include "api/test/mock_async_dns_resolver.h" #include "api/test/mock_audio_mixer.h" +#include "api/test/mock_data_channel.h" #include "api/test/mock_frame_decryptor.h" #include "api/test/mock_frame_encryptor.h" +#include "api/test/mock_media_stream_interface.h" +#include "api/test/mock_peer_connection_factory_interface.h" #include "api/test/mock_peerconnectioninterface.h" +#include "api/test/mock_rtp_transceiver.h" #include "api/test/mock_rtpreceiver.h" #include "api/test/mock_rtpsender.h" #include "api/test/mock_transformable_video_frame.h" diff --git a/api/test/create_network_emulation_manager.h b/api/test/create_network_emulation_manager.h index c57c34874c..f444743786 100644 --- a/api/test/create_network_emulation_manager.h +++ b/api/test/create_network_emulation_manager.h @@ -1,4 +1,3 @@ - /* * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. * @@ -18,6 +17,7 @@ namespace webrtc { +// Returns a non-null NetworkEmulationManager instance. std::unique_ptr CreateNetworkEmulationManager( TimeMode mode = TimeMode::kRealTime); diff --git a/api/test/create_peerconnection_quality_test_fixture.cc b/api/test/create_peerconnection_quality_test_fixture.cc index 1e027bf31a..2d9d0821fc 100644 --- a/api/test/create_peerconnection_quality_test_fixture.cc +++ b/api/test/create_peerconnection_quality_test_fixture.cc @@ -13,6 +13,7 @@ #include #include +#include "api/test/time_controller.h" #include "test/pc/e2e/peer_connection_quality_test.h" namespace webrtc { @@ -21,11 +22,12 @@ namespace webrtc_pc_e2e { std::unique_ptr CreatePeerConnectionE2EQualityTestFixture( std::string test_case_name, + TimeController& time_controller, std::unique_ptr audio_quality_analyzer, std::unique_ptr video_quality_analyzer) { return std::make_unique( - std::move(test_case_name), std::move(audio_quality_analyzer), - std::move(video_quality_analyzer)); + std::move(test_case_name), time_controller, + std::move(audio_quality_analyzer), std::move(video_quality_analyzer)); } } // namespace webrtc_pc_e2e diff --git a/api/test/create_peerconnection_quality_test_fixture.h b/api/test/create_peerconnection_quality_test_fixture.h index 330d86de02..95b9ced5d2 100644 --- a/api/test/create_peerconnection_quality_test_fixture.h +++ b/api/test/create_peerconnection_quality_test_fixture.h @@ -15,19 +15,25 @@ #include "api/test/audio_quality_analyzer_interface.h" #include "api/test/peerconnection_quality_test_fixture.h" +#include "api/test/time_controller.h" #include "api/test/video_quality_analyzer_interface.h" namespace webrtc { namespace webrtc_pc_e2e { // API is in development. Can be changed/removed without notice. + // Create test fixture to establish test call between Alice and Bob. // During the test Alice will be caller and Bob will answer the call. // |test_case_name| is a name of test case, that will be used for all metrics // reporting. +// |time_controller| is used to manage all rtc::Thread's and TaskQueue +// instances. Instance of |time_controller| have to outlive created fixture. +// Returns a non-null PeerConnectionE2EQualityTestFixture instance. std::unique_ptr CreatePeerConnectionE2EQualityTestFixture( std::string test_case_name, + TimeController& time_controller, std::unique_ptr audio_quality_analyzer, std::unique_ptr video_quality_analyzer); diff --git a/api/test/create_time_controller.cc b/api/test/create_time_controller.cc index d3b046bd61..f7faeaab42 100644 --- a/api/test/create_time_controller.cc +++ b/api/test/create_time_controller.cc @@ -13,6 +13,8 @@ #include #include "call/call.h" +#include "call/rtp_transport_config.h" +#include "call/rtp_transport_controller_send_factory_interface.h" #include "test/time_controller/external_time_controller.h" #include "test/time_controller/simulated_time_controller.h" @@ -35,13 +37,23 @@ std::unique_ptr CreateTimeControllerBasedCallFactory( explicit TimeControllerBasedCallFactory(TimeController* time_controller) : time_controller_(time_controller) {} Call* CreateCall(const Call::Config& config) override { - return Call::Create(config, time_controller_->GetClock(), - time_controller_->CreateProcessThread("CallModules"), - time_controller_->CreateProcessThread("Pacer")); + if (!module_thread_) { + module_thread_ = SharedModuleThread::Create( + time_controller_->CreateProcessThread("CallModules"), + [this]() { module_thread_ = nullptr; }); + } + + RtpTransportConfig transportConfig = config.ExtractTransportConfig(); + + return Call::Create(config, time_controller_->GetClock(), module_thread_, + config.rtp_transport_controller_send_factory->Create( + transportConfig, time_controller_->GetClock(), + time_controller_->CreateProcessThread("Pacer"))); } private: TimeController* time_controller_; + rtc::scoped_refptr module_thread_; }; return std::make_unique(time_controller); } diff --git a/api/test/dummy_peer_connection.h b/api/test/dummy_peer_connection.h index 102b0684c0..80ae20c3c7 100644 --- a/api/test/dummy_peer_connection.h +++ b/api/test/dummy_peer_connection.h @@ -36,7 +36,7 @@ class DummyPeerConnection : public PeerConnectionInterface { bool AddStream(MediaStreamInterface* stream) override { return false; } void RemoveStream(MediaStreamInterface* stream) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } RTCErrorOr> AddTrack( @@ -100,24 +100,24 @@ class DummyPeerConnection : public PeerConnectionInterface { } void GetStats(RTCStatsCollectorCallback* callback) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void GetStats( rtc::scoped_refptr selector, rtc::scoped_refptr callback) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void GetStats( rtc::scoped_refptr selector, rtc::scoped_refptr callback) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void ClearStatsCache() override {} - rtc::scoped_refptr CreateDataChannel( + RTCErrorOr> CreateDataChannelOrError( const std::string& label, const DataChannelInit* config) override { - return nullptr; + return RTCError(RTCErrorType::INTERNAL_ERROR, "Dummy function called"); } const SessionDescriptionInterface* local_description() const override { @@ -145,33 +145,33 @@ class DummyPeerConnection : public PeerConnectionInterface { return nullptr; } - void RestartIce() override { FATAL() << "Not implemented"; } + void RestartIce() override { RTC_CHECK_NOTREACHED(); } // Create a new offer. // The CreateSessionDescriptionObserver callback will be called when done. void CreateOffer(CreateSessionDescriptionObserver* observer, const RTCOfferAnswerOptions& options) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void CreateAnswer(CreateSessionDescriptionObserver* observer, const RTCOfferAnswerOptions& options) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void SetLocalDescription(SetSessionDescriptionObserver* observer, SessionDescriptionInterface* desc) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void SetRemoteDescription(SetSessionDescriptionObserver* observer, SessionDescriptionInterface* desc) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } void SetRemoteDescription( std::unique_ptr desc, rtc::scoped_refptr observer) override { - FATAL() << "Not implemented"; + RTC_CHECK_NOTREACHED(); } PeerConnectionInterface::RTCConfiguration GetConfiguration() override { @@ -194,14 +194,8 @@ class DummyPeerConnection : public PeerConnectionInterface { return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, "Not implemented"); } - RTCError SetBitrate(const BitrateParameters& bitrate_parameters) override { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, "Not implemented"); - } - - void SetAudioPlayout(bool playout) override { FATAL() << "Not implemented"; } - void SetAudioRecording(bool recording) override { - FATAL() << "Not implemented"; - } + void SetAudioPlayout(bool playout) override { RTC_CHECK_NOTREACHED(); } + void SetAudioRecording(bool recording) override { RTC_CHECK_NOTREACHED(); } rtc::scoped_refptr LookupDtlsTransportByMid( const std::string& mid) override { @@ -239,9 +233,13 @@ class DummyPeerConnection : public PeerConnectionInterface { return false; } - void StopRtcEventLog() { FATAL() << "Not implemented"; } + void StopRtcEventLog() { RTC_CHECK_NOTREACHED(); } + + void Close() override {} - void Close() {} + rtc::Thread* signaling_thread() const override { + return rtc::Thread::Current(); + } }; static_assert( diff --git a/api/test/fake_datagram_transport.h b/api/test/fake_datagram_transport.h deleted file mode 100644 index 847b4d842a..0000000000 --- a/api/test/fake_datagram_transport.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2019 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_TEST_FAKE_DATAGRAM_TRANSPORT_H_ -#define API_TEST_FAKE_DATAGRAM_TRANSPORT_H_ - -#include -#include - -#include "api/transport/datagram_transport_interface.h" -#include "api/transport/media/media_transport_interface.h" - -namespace webrtc { - -// Maxmum size of datagrams sent by |FakeDatagramTransport|. -constexpr size_t kMaxFakeDatagramSize = 1000; - -// Fake datagram transport. Does not support making an actual connection -// or sending data. Only used for tests that need to stub out a transport. -class FakeDatagramTransport : public DatagramTransportInterface { - public: - FakeDatagramTransport( - const MediaTransportSettings& settings, - std::string transport_parameters, - const std::function& - are_parameters_compatible) - : settings_(settings), - transport_parameters_(transport_parameters), - are_parameters_compatible_(are_parameters_compatible) {} - - ~FakeDatagramTransport() override { RTC_DCHECK(!state_callback_); } - - void Connect(rtc::PacketTransportInternal* packet_transport) override { - packet_transport_ = packet_transport; - } - - CongestionControlInterface* congestion_control() override { - return nullptr; // Datagram interface doesn't provide this yet. - } - - void SetTransportStateCallback( - MediaTransportStateCallback* callback) override { - state_callback_ = callback; - } - - RTCError SendDatagram(rtc::ArrayView data, - DatagramId datagram_id) override { - return RTCError::OK(); - } - - size_t GetLargestDatagramSize() const override { - return kMaxFakeDatagramSize; - } - - void SetDatagramSink(DatagramSinkInterface* sink) override {} - - std::string GetTransportParameters() const override { - if (settings_.remote_transport_parameters) { - return *settings_.remote_transport_parameters; - } - return transport_parameters_; - } - - RTCError SetRemoteTransportParameters( - absl::string_view remote_parameters) override { - if (are_parameters_compatible_(GetTransportParameters(), - remote_parameters)) { - return RTCError::OK(); - } - return RTCError(RTCErrorType::UNSUPPORTED_PARAMETER, - "Incompatible remote transport parameters"); - } - - RTCError OpenChannel(int channel_id) override { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); - } - - RTCError SendData(int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) override { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); - } - - RTCError CloseChannel(int channel_id) override { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); - } - - void SetDataSink(DataChannelSink* /*sink*/) override {} - - bool IsReadyToSend() const override { return false; } - - rtc::PacketTransportInternal* packet_transport() { return packet_transport_; } - - void set_state(webrtc::MediaTransportState state) { - if (state_callback_) { - state_callback_->OnStateChanged(state); - } - } - - const MediaTransportSettings& settings() { return settings_; } - - private: - const MediaTransportSettings settings_; - const std::string transport_parameters_; - const std::function - are_parameters_compatible_; - - rtc::PacketTransportInternal* packet_transport_ = nullptr; - MediaTransportStateCallback* state_callback_ = nullptr; -}; - -} // namespace webrtc - -#endif // API_TEST_FAKE_DATAGRAM_TRANSPORT_H_ diff --git a/api/test/fake_media_transport.h b/api/test/fake_media_transport.h deleted file mode 100644 index 530394710a..0000000000 --- a/api/test/fake_media_transport.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_TEST_FAKE_MEDIA_TRANSPORT_H_ -#define API_TEST_FAKE_MEDIA_TRANSPORT_H_ - -#include -#include -#include -#include - -#include "absl/algorithm/container.h" -#include "api/test/fake_datagram_transport.h" -#include "api/transport/media/media_transport_interface.h" - -namespace webrtc { - -// Fake media transport factory creates fake media transport. -// Also creates fake datagram transport, since both media and datagram -// transports are created by |MediaTransportFactory|. -class FakeMediaTransportFactory : public MediaTransportFactory { - public: - explicit FakeMediaTransportFactory( - const absl::optional& transport_offer = "") - : transport_offer_(transport_offer) {} - ~FakeMediaTransportFactory() = default; - - std::string GetTransportName() const override { return "fake"; } - - RTCErrorOr> CreateMediaTransport( - rtc::PacketTransportInternal* packet_transport, - rtc::Thread* network_thread, - const MediaTransportSettings& settings) override { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); - } - - RTCErrorOr> CreateMediaTransport( - rtc::Thread* network_thread, - const MediaTransportSettings& settings) override { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); - } - - RTCErrorOr> - CreateDatagramTransport(rtc::Thread* network_thread, - const MediaTransportSettings& settings) override { - return std::unique_ptr( - new FakeDatagramTransport(settings, transport_offer_.value_or(""), - transport_parameters_comparison_)); - } - - void set_transport_parameters_comparison( - std::function comparison) { - transport_parameters_comparison_ = std::move(comparison); - } - - private: - const absl::optional transport_offer_; - std::function - transport_parameters_comparison_ = - [](absl::string_view local, absl::string_view remote) { - return local == remote; - }; -}; - -} // namespace webrtc - -#endif // API_TEST_FAKE_MEDIA_TRANSPORT_H_ diff --git a/api/test/frame_generator_interface.cc b/api/test/frame_generator_interface.cc new file mode 100644 index 0000000000..356fe3af53 --- /dev/null +++ b/api/test/frame_generator_interface.cc @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/test/frame_generator_interface.h" + +namespace webrtc { +namespace test { + +// static +const char* FrameGeneratorInterface::OutputTypeToString( + FrameGeneratorInterface::OutputType type) { + switch (type) { + case OutputType::kI420: + return "I420"; + case OutputType::kI420A: + return "I420A"; + case OutputType::kI010: + return "I010"; + case OutputType::kNV12: + return "NV12"; + default: + RTC_NOTREACHED(); + } +} + +} // namespace test +} // namespace webrtc diff --git a/api/test/frame_generator_interface.h b/api/test/frame_generator_interface.h index 691b6ee3f7..90e60debac 100644 --- a/api/test/frame_generator_interface.h +++ b/api/test/frame_generator_interface.h @@ -32,7 +32,8 @@ class FrameGeneratorInterface { absl::optional update_rect; }; - enum class OutputType { kI420, kI420A, kI010 }; + enum class OutputType { kI420, kI420A, kI010, kNV12 }; + static const char* OutputTypeToString(OutputType type); virtual ~FrameGeneratorInterface() = default; diff --git a/api/test/loopback_media_transport.cc b/api/test/loopback_media_transport.cc deleted file mode 100644 index 18ce93cd7e..0000000000 --- a/api/test/loopback_media_transport.cc +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "api/test/loopback_media_transport.h" - -#include - -#include "absl/algorithm/container.h" -#include "rtc_base/time_utils.h" - -namespace webrtc { - -namespace { - -constexpr size_t kLoopbackMaxDatagramSize = 1200; - -class WrapperDatagramTransport : public DatagramTransportInterface { - public: - explicit WrapperDatagramTransport(DatagramTransportInterface* wrapped) - : wrapped_(wrapped) {} - - // Datagram transport overrides. - void Connect(rtc::PacketTransportInternal* packet_transport) override { - return wrapped_->Connect(packet_transport); - } - - CongestionControlInterface* congestion_control() override { - return wrapped_->congestion_control(); - } - - void SetTransportStateCallback( - MediaTransportStateCallback* callback) override { - return wrapped_->SetTransportStateCallback(callback); - } - - RTCError SendDatagram(rtc::ArrayView data, - DatagramId datagram_id) override { - return wrapped_->SendDatagram(data, datagram_id); - } - - size_t GetLargestDatagramSize() const override { - return wrapped_->GetLargestDatagramSize(); - } - - void SetDatagramSink(DatagramSinkInterface* sink) override { - return wrapped_->SetDatagramSink(sink); - } - - std::string GetTransportParameters() const override { - return wrapped_->GetTransportParameters(); - } - - RTCError SetRemoteTransportParameters(absl::string_view parameters) override { - return wrapped_->SetRemoteTransportParameters(parameters); - } - - // Data channel overrides. - RTCError OpenChannel(int channel_id) override { - return wrapped_->OpenChannel(channel_id); - } - - RTCError SendData(int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) override { - return wrapped_->SendData(channel_id, params, buffer); - } - - RTCError CloseChannel(int channel_id) override { - return wrapped_->CloseChannel(channel_id); - } - - void SetDataSink(DataChannelSink* sink) override { - wrapped_->SetDataSink(sink); - } - - bool IsReadyToSend() const override { return wrapped_->IsReadyToSend(); } - - private: - DatagramTransportInterface* wrapped_; -}; - -} // namespace - -WrapperMediaTransportFactory::WrapperMediaTransportFactory( - DatagramTransportInterface* wrapped_datagram_transport) - : wrapped_datagram_transport_(wrapped_datagram_transport) {} - -WrapperMediaTransportFactory::WrapperMediaTransportFactory( - MediaTransportFactory* wrapped) - : wrapped_factory_(wrapped) {} - -RTCErrorOr> -WrapperMediaTransportFactory::CreateMediaTransport( - rtc::PacketTransportInternal* packet_transport, - rtc::Thread* network_thread, - const MediaTransportSettings& settings) { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); -} - -RTCErrorOr> -WrapperMediaTransportFactory::CreateDatagramTransport( - rtc::Thread* network_thread, - const MediaTransportSettings& settings) { - created_transport_count_++; - if (wrapped_factory_) { - return wrapped_factory_->CreateDatagramTransport(network_thread, settings); - } - return { - std::make_unique(wrapped_datagram_transport_)}; -} - -std::string WrapperMediaTransportFactory::GetTransportName() const { - if (wrapped_factory_) { - return wrapped_factory_->GetTransportName(); - } - return "wrapped-transport"; -} - -int WrapperMediaTransportFactory::created_transport_count() const { - return created_transport_count_; -} - -RTCErrorOr> -WrapperMediaTransportFactory::CreateMediaTransport( - rtc::Thread* network_thread, - const MediaTransportSettings& settings) { - return RTCError(RTCErrorType::UNSUPPORTED_OPERATION); -} - -MediaTransportPair::MediaTransportPair(rtc::Thread* thread) - : first_datagram_transport_(thread), - second_datagram_transport_(thread), - first_factory_(&first_datagram_transport_), - second_factory_(&second_datagram_transport_) { - first_datagram_transport_.Connect(&second_datagram_transport_); - second_datagram_transport_.Connect(&first_datagram_transport_); -} - -MediaTransportPair::~MediaTransportPair() = default; - -MediaTransportPair::LoopbackDataChannelTransport::LoopbackDataChannelTransport( - rtc::Thread* thread) - : thread_(thread) {} - -MediaTransportPair::LoopbackDataChannelTransport:: - ~LoopbackDataChannelTransport() { - RTC_CHECK(data_sink_ == nullptr); -} - -void MediaTransportPair::LoopbackDataChannelTransport::Connect( - LoopbackDataChannelTransport* other) { - other_ = other; -} - -RTCError MediaTransportPair::LoopbackDataChannelTransport::OpenChannel( - int channel_id) { - // No-op. No need to open channels for the loopback. - return RTCError::OK(); -} - -RTCError MediaTransportPair::LoopbackDataChannelTransport::SendData( - int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) { - invoker_.AsyncInvoke(RTC_FROM_HERE, thread_, - [this, channel_id, params, buffer] { - other_->OnData(channel_id, params.type, buffer); - }); - return RTCError::OK(); -} - -RTCError MediaTransportPair::LoopbackDataChannelTransport::CloseChannel( - int channel_id) { - invoker_.AsyncInvoke(RTC_FROM_HERE, thread_, [this, channel_id] { - other_->OnRemoteCloseChannel(channel_id); - rtc::CritScope lock(&sink_lock_); - if (data_sink_) { - data_sink_->OnChannelClosed(channel_id); - } - }); - return RTCError::OK(); -} - -void MediaTransportPair::LoopbackDataChannelTransport::SetDataSink( - DataChannelSink* sink) { - rtc::CritScope lock(&sink_lock_); - data_sink_ = sink; - if (data_sink_ && ready_to_send_) { - data_sink_->OnReadyToSend(); - } -} - -bool MediaTransportPair::LoopbackDataChannelTransport::IsReadyToSend() const { - rtc::CritScope lock(&sink_lock_); - return ready_to_send_; -} - -void MediaTransportPair::LoopbackDataChannelTransport::FlushAsyncInvokes() { - invoker_.Flush(thread_); -} - -void MediaTransportPair::LoopbackDataChannelTransport::OnData( - int channel_id, - DataMessageType type, - const rtc::CopyOnWriteBuffer& buffer) { - rtc::CritScope lock(&sink_lock_); - if (data_sink_) { - data_sink_->OnDataReceived(channel_id, type, buffer); - } -} - -void MediaTransportPair::LoopbackDataChannelTransport::OnRemoteCloseChannel( - int channel_id) { - rtc::CritScope lock(&sink_lock_); - if (data_sink_) { - data_sink_->OnChannelClosing(channel_id); - data_sink_->OnChannelClosed(channel_id); - } -} - -void MediaTransportPair::LoopbackDataChannelTransport::OnReadyToSend( - bool ready_to_send) { - invoker_.AsyncInvoke(RTC_FROM_HERE, thread_, [this, ready_to_send] { - rtc::CritScope lock(&sink_lock_); - ready_to_send_ = ready_to_send; - // Propagate state to data channel sink, if present. - if (data_sink_ && ready_to_send_) { - data_sink_->OnReadyToSend(); - } - }); -} - -MediaTransportPair::LoopbackDatagramTransport::LoopbackDatagramTransport( - rtc::Thread* thread) - : thread_(thread), dc_transport_(thread) {} - -void MediaTransportPair::LoopbackDatagramTransport::Connect( - LoopbackDatagramTransport* other) { - other_ = other; - dc_transport_.Connect(&other->dc_transport_); -} - -void MediaTransportPair::LoopbackDatagramTransport::Connect( - rtc::PacketTransportInternal* packet_transport) { - if (state_after_connect_) { - SetState(*state_after_connect_); - } -} - -CongestionControlInterface* -MediaTransportPair::LoopbackDatagramTransport::congestion_control() { - return nullptr; -} - -void MediaTransportPair::LoopbackDatagramTransport::SetTransportStateCallback( - MediaTransportStateCallback* callback) { - RTC_DCHECK_RUN_ON(thread_); - state_callback_ = callback; - if (state_callback_) { - state_callback_->OnStateChanged(state_); - } -} - -RTCError MediaTransportPair::LoopbackDatagramTransport::SendDatagram( - rtc::ArrayView data, - DatagramId datagram_id) { - rtc::CopyOnWriteBuffer buffer; - buffer.SetData(data.data(), data.size()); - invoker_.AsyncInvoke( - RTC_FROM_HERE, thread_, [this, datagram_id, buffer = std::move(buffer)] { - RTC_DCHECK_RUN_ON(thread_); - other_->DeliverDatagram(std::move(buffer)); - if (sink_) { - DatagramAck ack; - ack.datagram_id = datagram_id; - ack.receive_timestamp = Timestamp::Micros(rtc::TimeMicros()); - sink_->OnDatagramAcked(ack); - } - }); - return RTCError::OK(); -} - -size_t MediaTransportPair::LoopbackDatagramTransport::GetLargestDatagramSize() - const { - return kLoopbackMaxDatagramSize; -} - -void MediaTransportPair::LoopbackDatagramTransport::SetDatagramSink( - DatagramSinkInterface* sink) { - RTC_DCHECK_RUN_ON(thread_); - sink_ = sink; -} - -std::string -MediaTransportPair::LoopbackDatagramTransport::GetTransportParameters() const { - return transport_parameters_; -} - -RTCError -MediaTransportPair::LoopbackDatagramTransport::SetRemoteTransportParameters( - absl::string_view remote_parameters) { - RTC_DCHECK_RUN_ON(thread_); - if (transport_parameters_comparison_(GetTransportParameters(), - remote_parameters)) { - return RTCError::OK(); - } - return RTCError(RTCErrorType::UNSUPPORTED_PARAMETER, - "Incompatible remote transport parameters"); -} - -RTCError MediaTransportPair::LoopbackDatagramTransport::OpenChannel( - int channel_id) { - return dc_transport_.OpenChannel(channel_id); -} - -RTCError MediaTransportPair::LoopbackDatagramTransport::SendData( - int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) { - return dc_transport_.SendData(channel_id, params, buffer); -} - -RTCError MediaTransportPair::LoopbackDatagramTransport::CloseChannel( - int channel_id) { - return dc_transport_.CloseChannel(channel_id); -} - -void MediaTransportPair::LoopbackDatagramTransport::SetDataSink( - DataChannelSink* sink) { - dc_transport_.SetDataSink(sink); -} - -bool MediaTransportPair::LoopbackDatagramTransport::IsReadyToSend() const { - return dc_transport_.IsReadyToSend(); -} - -void MediaTransportPair::LoopbackDatagramTransport::SetState( - MediaTransportState state) { - invoker_.AsyncInvoke(RTC_FROM_HERE, thread_, [this, state] { - RTC_DCHECK_RUN_ON(thread_); - state_ = state; - if (state_callback_) { - state_callback_->OnStateChanged(state_); - } - }); - dc_transport_.OnReadyToSend(state == MediaTransportState::kWritable); -} - -void MediaTransportPair::LoopbackDatagramTransport::SetStateAfterConnect( - MediaTransportState state) { - state_after_connect_ = state; -} - -void MediaTransportPair::LoopbackDatagramTransport::FlushAsyncInvokes() { - dc_transport_.FlushAsyncInvokes(); -} - -void MediaTransportPair::LoopbackDatagramTransport::DeliverDatagram( - rtc::CopyOnWriteBuffer buffer) { - RTC_DCHECK_RUN_ON(thread_); - if (sink_) { - sink_->OnDatagramReceived(buffer); - } -} - -} // namespace webrtc diff --git a/api/test/loopback_media_transport.h b/api/test/loopback_media_transport.h deleted file mode 100644 index 468965ba31..0000000000 --- a/api/test/loopback_media_transport.h +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_TEST_LOOPBACK_MEDIA_TRANSPORT_H_ -#define API_TEST_LOOPBACK_MEDIA_TRANSPORT_H_ - -#include -#include -#include -#include - -#include "api/transport/datagram_transport_interface.h" -#include "api/transport/media/media_transport_interface.h" -#include "rtc_base/async_invoker.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" - -namespace webrtc { - -// Wrapper used to hand out unique_ptrs to loopback media -// transport without ownership changes to the underlying -// transport. -// It works in two modes: -// It can either wrap a factory, or it can wrap an existing interface. -// In the former mode, it delegates the work to the wrapped factory. -// In the latter mode, it always returns static instance of the transport -// interface. -// -// Example use: -// Factory wrap_static_interface = Wrapper(media_transport_interface); -// Factory wrap_factory = Wrapper(wrap_static_interface); -// The second factory may be created multiple times, and ownership may be passed -// to the client. The first factory counts the number of invocations of -// CreateMediaTransport(); -class WrapperMediaTransportFactory : public MediaTransportFactory { - public: - explicit WrapperMediaTransportFactory( - DatagramTransportInterface* wrapped_datagram_transport); - explicit WrapperMediaTransportFactory(MediaTransportFactory* wrapped); - - RTCErrorOr> CreateMediaTransport( - rtc::PacketTransportInternal* packet_transport, - rtc::Thread* network_thread, - const MediaTransportSettings& settings) override; - - RTCErrorOr> CreateMediaTransport( - rtc::Thread* network_thread, - const MediaTransportSettings& settings) override; - - RTCErrorOr> - CreateDatagramTransport(rtc::Thread* network_thread, - const MediaTransportSettings& settings) override; - - std::string GetTransportName() const override; - - int created_transport_count() const; - - private: - DatagramTransportInterface* wrapped_datagram_transport_ = nullptr; - MediaTransportFactory* wrapped_factory_ = nullptr; - int created_transport_count_ = 0; -}; - -// Contains two MediaTransportsInterfaces that are connected to each other. -// Currently supports audio only. -class MediaTransportPair { - public: - struct Stats { - int sent_audio_frames = 0; - int received_audio_frames = 0; - int sent_video_frames = 0; - int received_video_frames = 0; - }; - - explicit MediaTransportPair(rtc::Thread* thread); - ~MediaTransportPair(); - - DatagramTransportInterface* first_datagram_transport() { - return &first_datagram_transport_; - } - DatagramTransportInterface* second_datagram_transport() { - return &second_datagram_transport_; - } - - std::unique_ptr first_factory() { - return std::make_unique(&first_factory_); - } - - std::unique_ptr second_factory() { - return std::make_unique(&second_factory_); - } - - void SetState(MediaTransportState state) { - first_datagram_transport_.SetState(state); - second_datagram_transport_.SetState(state); - } - - void SetFirstState(MediaTransportState state) { - first_datagram_transport_.SetState(state); - } - - void SetSecondStateAfterConnect(MediaTransportState state) { - second_datagram_transport_.SetState(state); - } - - void SetFirstDatagramTransportParameters(const std::string& params) { - first_datagram_transport_.set_transport_parameters(params); - } - - void SetSecondDatagramTransportParameters(const std::string& params) { - second_datagram_transport_.set_transport_parameters(params); - } - - void SetFirstDatagramTransportParametersComparison( - std::function comparison) { - first_datagram_transport_.set_transport_parameters_comparison( - std::move(comparison)); - } - - void SetSecondDatagramTransportParametersComparison( - std::function comparison) { - second_datagram_transport_.set_transport_parameters_comparison( - std::move(comparison)); - } - - void FlushAsyncInvokes() { - first_datagram_transport_.FlushAsyncInvokes(); - second_datagram_transport_.FlushAsyncInvokes(); - } - - int first_factory_transport_count() const { - return first_factory_.created_transport_count(); - } - - int second_factory_transport_count() const { - return second_factory_.created_transport_count(); - } - - private: - class LoopbackDataChannelTransport : public DataChannelTransportInterface { - public: - explicit LoopbackDataChannelTransport(rtc::Thread* thread); - ~LoopbackDataChannelTransport() override; - - void Connect(LoopbackDataChannelTransport* other); - - RTCError OpenChannel(int channel_id) override; - - RTCError SendData(int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) override; - - RTCError CloseChannel(int channel_id) override; - - bool IsReadyToSend() const override; - - void SetDataSink(DataChannelSink* sink) override; - - void OnReadyToSend(bool ready_to_send); - - void FlushAsyncInvokes(); - - private: - void OnData(int channel_id, - DataMessageType type, - const rtc::CopyOnWriteBuffer& buffer); - - void OnRemoteCloseChannel(int channel_id); - - rtc::Thread* const thread_; - rtc::CriticalSection sink_lock_; - DataChannelSink* data_sink_ RTC_GUARDED_BY(sink_lock_) = nullptr; - - bool ready_to_send_ RTC_GUARDED_BY(sink_lock_) = false; - - LoopbackDataChannelTransport* other_; - - rtc::AsyncInvoker invoker_; - }; - - class LoopbackDatagramTransport : public DatagramTransportInterface { - public: - explicit LoopbackDatagramTransport(rtc::Thread* thread); - - void Connect(LoopbackDatagramTransport* other); - - // Datagram transport overrides. - void Connect(rtc::PacketTransportInternal* packet_transport) override; - CongestionControlInterface* congestion_control() override; - void SetTransportStateCallback( - MediaTransportStateCallback* callback) override; - RTCError SendDatagram(rtc::ArrayView data, - DatagramId datagram_id) override; - size_t GetLargestDatagramSize() const override; - void SetDatagramSink(DatagramSinkInterface* sink) override; - std::string GetTransportParameters() const override; - RTCError SetRemoteTransportParameters( - absl::string_view remote_parameters) override; - - // Data channel overrides. - RTCError OpenChannel(int channel_id) override; - RTCError SendData(int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) override; - RTCError CloseChannel(int channel_id) override; - void SetDataSink(DataChannelSink* sink) override; - bool IsReadyToSend() const override; - - // Loopback-specific functionality. - void SetState(MediaTransportState state); - - // When Connect() is called, the datagram transport will enter this state. - // This is useful for mimicking zero-RTT connectivity, for example. - void SetStateAfterConnect(MediaTransportState state); - void FlushAsyncInvokes(); - - void set_transport_parameters(const std::string& value) { - transport_parameters_ = value; - } - - void set_transport_parameters_comparison( - std::function comparison) { - thread_->Invoke( - RTC_FROM_HERE, [this, comparison = std::move(comparison)] { - RTC_DCHECK_RUN_ON(thread_); - transport_parameters_comparison_ = std::move(comparison); - }); - } - - private: - void DeliverDatagram(rtc::CopyOnWriteBuffer buffer); - - rtc::Thread* thread_; - LoopbackDataChannelTransport dc_transport_; - - MediaTransportState state_ RTC_GUARDED_BY(thread_) = - MediaTransportState::kPending; - DatagramSinkInterface* sink_ RTC_GUARDED_BY(thread_) = nullptr; - MediaTransportStateCallback* state_callback_ RTC_GUARDED_BY(thread_) = - nullptr; - LoopbackDatagramTransport* other_; - - std::string transport_parameters_; - std::function - transport_parameters_comparison_ RTC_GUARDED_BY(thread_) = - [](absl::string_view a, absl::string_view b) { return a == b; }; - - absl::optional state_after_connect_; - - rtc::AsyncInvoker invoker_; - }; - - LoopbackDatagramTransport first_datagram_transport_; - LoopbackDatagramTransport second_datagram_transport_; - WrapperMediaTransportFactory first_factory_; - WrapperMediaTransportFactory second_factory_; -}; - -} // namespace webrtc - -#endif // API_TEST_LOOPBACK_MEDIA_TRANSPORT_H_ diff --git a/api/test/loopback_media_transport_unittest.cc b/api/test/loopback_media_transport_unittest.cc deleted file mode 100644 index 464942992b..0000000000 --- a/api/test/loopback_media_transport_unittest.cc +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "api/test/loopback_media_transport.h" - -#include -#include -#include - -#include "test/gmock.h" - -namespace webrtc { - -namespace { - -class MockMediaTransportAudioSinkInterface - : public MediaTransportAudioSinkInterface { - public: - MOCK_METHOD(void, - OnData, - (uint64_t, MediaTransportEncodedAudioFrame), - (override)); -}; - -class MockMediaTransportVideoSinkInterface - : public MediaTransportVideoSinkInterface { - public: - MOCK_METHOD(void, - OnData, - (uint64_t, MediaTransportEncodedVideoFrame), - (override)); -}; - -class MockMediaTransportKeyFrameRequestCallback - : public MediaTransportKeyFrameRequestCallback { - public: - MOCK_METHOD(void, OnKeyFrameRequested, (uint64_t), (override)); -}; - -class MockDataChannelSink : public DataChannelSink { - public: - MOCK_METHOD(void, - OnDataReceived, - (int, DataMessageType, const rtc::CopyOnWriteBuffer&), - (override)); - MOCK_METHOD(void, OnChannelClosing, (int), (override)); - MOCK_METHOD(void, OnChannelClosed, (int), (override)); - MOCK_METHOD(void, OnReadyToSend, (), (override)); -}; - -class MockStateCallback : public MediaTransportStateCallback { - public: - MOCK_METHOD(void, OnStateChanged, (MediaTransportState), (override)); -}; - -} // namespace - -TEST(LoopbackMediaTransport, DataDeliveredToSink) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - MockDataChannelSink sink; - transport_pair.first_datagram_transport()->SetDataSink(&sink); - - const int channel_id = 1; - EXPECT_CALL( - sink, OnDataReceived( - channel_id, DataMessageType::kText, - ::testing::Property( - &rtc::CopyOnWriteBuffer::cdata, ::testing::StrEq("foo")))); - - SendDataParams params; - params.type = DataMessageType::kText; - rtc::CopyOnWriteBuffer buffer("foo"); - transport_pair.second_datagram_transport()->SendData(channel_id, params, - buffer); - - transport_pair.FlushAsyncInvokes(); - transport_pair.first_datagram_transport()->SetDataSink(nullptr); -} - -TEST(LoopbackMediaTransport, CloseDeliveredToSink) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - MockDataChannelSink first_sink; - transport_pair.first_datagram_transport()->SetDataSink(&first_sink); - - MockDataChannelSink second_sink; - transport_pair.second_datagram_transport()->SetDataSink(&second_sink); - - const int channel_id = 1; - { - ::testing::InSequence s; - EXPECT_CALL(second_sink, OnChannelClosing(channel_id)); - EXPECT_CALL(second_sink, OnChannelClosed(channel_id)); - EXPECT_CALL(first_sink, OnChannelClosed(channel_id)); - } - - transport_pair.first_datagram_transport()->CloseChannel(channel_id); - - transport_pair.FlushAsyncInvokes(); - transport_pair.first_datagram_transport()->SetDataSink(nullptr); - transport_pair.second_datagram_transport()->SetDataSink(nullptr); -} - -TEST(LoopbackMediaTransport, InitialStateDeliveredWhenCallbackSet) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - MockStateCallback state_callback; - EXPECT_CALL(state_callback, OnStateChanged(MediaTransportState::kPending)); - - thread->Invoke(RTC_FROM_HERE, [&transport_pair, &state_callback] { - transport_pair.first_datagram_transport()->SetTransportStateCallback( - &state_callback); - }); - transport_pair.FlushAsyncInvokes(); -} - -TEST(LoopbackMediaTransport, ChangedStateDeliveredWhenCallbackSet) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - transport_pair.SetState(MediaTransportState::kWritable); - transport_pair.FlushAsyncInvokes(); - - MockStateCallback state_callback; - - EXPECT_CALL(state_callback, OnStateChanged(MediaTransportState::kWritable)); - thread->Invoke(RTC_FROM_HERE, [&transport_pair, &state_callback] { - transport_pair.first_datagram_transport()->SetTransportStateCallback( - &state_callback); - }); - transport_pair.FlushAsyncInvokes(); -} - -TEST(LoopbackMediaTransport, StateChangeDeliveredToCallback) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - MockStateCallback state_callback; - - EXPECT_CALL(state_callback, OnStateChanged(MediaTransportState::kPending)); - EXPECT_CALL(state_callback, OnStateChanged(MediaTransportState::kWritable)); - thread->Invoke(RTC_FROM_HERE, [&transport_pair, &state_callback] { - transport_pair.first_datagram_transport()->SetTransportStateCallback( - &state_callback); - }); - transport_pair.SetState(MediaTransportState::kWritable); - transport_pair.FlushAsyncInvokes(); -} - -TEST(LoopbackMediaTransport, NotReadyToSendWhenDataSinkSet) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - MockDataChannelSink data_channel_sink; - EXPECT_CALL(data_channel_sink, OnReadyToSend()).Times(0); - - transport_pair.first_datagram_transport()->SetDataSink(&data_channel_sink); - transport_pair.FlushAsyncInvokes(); - transport_pair.first_datagram_transport()->SetDataSink(nullptr); -} - -TEST(LoopbackMediaTransport, ReadyToSendWhenDataSinkSet) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - transport_pair.SetState(MediaTransportState::kWritable); - transport_pair.FlushAsyncInvokes(); - - MockDataChannelSink data_channel_sink; - EXPECT_CALL(data_channel_sink, OnReadyToSend()); - - transport_pair.first_datagram_transport()->SetDataSink(&data_channel_sink); - transport_pair.FlushAsyncInvokes(); - transport_pair.first_datagram_transport()->SetDataSink(nullptr); -} - -TEST(LoopbackMediaTransport, StateChangeDeliveredToDataSink) { - std::unique_ptr thread = rtc::Thread::Create(); - thread->Start(); - MediaTransportPair transport_pair(thread.get()); - - MockDataChannelSink data_channel_sink; - EXPECT_CALL(data_channel_sink, OnReadyToSend()); - - transport_pair.first_datagram_transport()->SetDataSink(&data_channel_sink); - transport_pair.SetState(MediaTransportState::kWritable); - transport_pair.FlushAsyncInvokes(); - transport_pair.first_datagram_transport()->SetDataSink(nullptr); -} - -} // namespace webrtc diff --git a/api/test/mock_async_dns_resolver.h b/api/test/mock_async_dns_resolver.h new file mode 100644 index 0000000000..e863cac6e6 --- /dev/null +++ b/api/test/mock_async_dns_resolver.h @@ -0,0 +1,54 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_ASYNC_DNS_RESOLVER_H_ +#define API_TEST_MOCK_ASYNC_DNS_RESOLVER_H_ + +#include +#include + +#include "api/async_dns_resolver.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockAsyncDnsResolverResult : public AsyncDnsResolverResult { + public: + MOCK_METHOD(bool, + GetResolvedAddress, + (int, rtc::SocketAddress*), + (const override)); + MOCK_METHOD(int, GetError, (), (const override)); +}; + +class MockAsyncDnsResolver : public AsyncDnsResolverInterface { + public: + MOCK_METHOD(void, + Start, + (const rtc::SocketAddress&, std::function), + (override)); + MOCK_METHOD(AsyncDnsResolverResult&, result, (), (const override)); +}; + +class MockAsyncDnsResolverFactory : public AsyncDnsResolverFactoryInterface { + public: + MOCK_METHOD(std::unique_ptr, + CreateAndResolve, + (const rtc::SocketAddress&, std::function), + (override)); + MOCK_METHOD(std::unique_ptr, + Create, + (), + (override)); +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_ASYNC_DNS_RESOLVER_H_ diff --git a/api/test/mock_data_channel.h b/api/test/mock_data_channel.h new file mode 100644 index 0000000000..9346ffd638 --- /dev/null +++ b/api/test/mock_data_channel.h @@ -0,0 +1,60 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_DATA_CHANNEL_H_ +#define API_TEST_MOCK_DATA_CHANNEL_H_ + +#include + +#include "api/data_channel_interface.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockDataChannelInterface final + : public rtc::RefCountedObject { + public: + static rtc::scoped_refptr Create() { + return new MockDataChannelInterface(); + } + + MOCK_METHOD(void, + RegisterObserver, + (DataChannelObserver * observer), + (override)); + MOCK_METHOD(void, UnregisterObserver, (), (override)); + MOCK_METHOD(std::string, label, (), (const, override)); + MOCK_METHOD(bool, reliable, (), (const, override)); + MOCK_METHOD(bool, ordered, (), (const, override)); + MOCK_METHOD(uint16_t, maxRetransmitTime, (), (const, override)); + MOCK_METHOD(uint16_t, maxRetransmits, (), (const, override)); + MOCK_METHOD(absl::optional, maxRetransmitsOpt, (), (const, override)); + MOCK_METHOD(absl::optional, maxPacketLifeTime, (), (const, override)); + MOCK_METHOD(std::string, protocol, (), (const, override)); + MOCK_METHOD(bool, negotiated, (), (const, override)); + MOCK_METHOD(int, id, (), (const, override)); + MOCK_METHOD(Priority, priority, (), (const, override)); + MOCK_METHOD(DataState, state, (), (const, override)); + MOCK_METHOD(RTCError, error, (), (const, override)); + MOCK_METHOD(uint32_t, messages_sent, (), (const, override)); + MOCK_METHOD(uint64_t, bytes_sent, (), (const, override)); + MOCK_METHOD(uint32_t, messages_received, (), (const, override)); + MOCK_METHOD(uint64_t, bytes_received, (), (const, override)); + MOCK_METHOD(uint64_t, buffered_amount, (), (const, override)); + MOCK_METHOD(void, Close, (), (override)); + MOCK_METHOD(bool, Send, (const DataBuffer& buffer), (override)); + + protected: + MockDataChannelInterface() = default; +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_DATA_CHANNEL_H_ diff --git a/api/test/mock_media_stream_interface.h b/api/test/mock_media_stream_interface.h new file mode 100644 index 0000000000..29521e6e23 --- /dev/null +++ b/api/test/mock_media_stream_interface.h @@ -0,0 +1,89 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_MEDIA_STREAM_INTERFACE_H_ +#define API_TEST_MOCK_MEDIA_STREAM_INTERFACE_H_ + +#include + +#include "api/media_stream_interface.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockAudioSource final + : public rtc::RefCountedObject { + public: + static rtc::scoped_refptr Create() { + return new MockAudioSource(); + } + + MOCK_METHOD(void, + RegisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(void, + UnregisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(SourceState, state, (), (const, override)); + MOCK_METHOD(bool, remote, (), (const, override)); + MOCK_METHOD(void, SetVolume, (double volume), (override)); + MOCK_METHOD(void, + RegisterAudioObserver, + (AudioObserver * observer), + (override)); + MOCK_METHOD(void, + UnregisterAudioObserver, + (AudioObserver * observer), + (override)); + MOCK_METHOD(void, AddSink, (AudioTrackSinkInterface * sink), (override)); + MOCK_METHOD(void, RemoveSink, (AudioTrackSinkInterface * sink), (override)); + MOCK_METHOD(const cricket::AudioOptions, options, (), (const, override)); + + private: + MockAudioSource() = default; +}; + +class MockAudioTrack final : public rtc::RefCountedObject { + public: + static rtc::scoped_refptr Create() { + return new MockAudioTrack(); + } + + MOCK_METHOD(void, + RegisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(void, + UnregisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(std::string, kind, (), (const, override)); + MOCK_METHOD(std::string, id, (), (const override)); + MOCK_METHOD(bool, enabled, (), (const, override)); + MOCK_METHOD(bool, set_enabled, (bool enable), (override)); + MOCK_METHOD(TrackState, state, (), (const, override)); + MOCK_METHOD(AudioSourceInterface*, GetSource, (), (const, override)); + MOCK_METHOD(void, AddSink, (AudioTrackSinkInterface * sink), (override)); + MOCK_METHOD(void, RemoveSink, (AudioTrackSinkInterface * sink), (override)); + MOCK_METHOD(bool, GetSignalLevel, (int* level), (override)); + MOCK_METHOD(rtc::scoped_refptr, + GetAudioProcessor, + (), + (override)); + + private: + MockAudioTrack() = default; +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_MEDIA_STREAM_INTERFACE_H_ diff --git a/api/test/mock_peer_connection_factory_interface.h b/api/test/mock_peer_connection_factory_interface.h new file mode 100644 index 0000000000..c2f2435fb8 --- /dev/null +++ b/api/test/mock_peer_connection_factory_interface.h @@ -0,0 +1,80 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_PEER_CONNECTION_FACTORY_INTERFACE_H_ +#define API_TEST_MOCK_PEER_CONNECTION_FACTORY_INTERFACE_H_ + +#include +#include + +#include "api/peer_connection_interface.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockPeerConnectionFactoryInterface final + : public rtc::RefCountedObject { + public: + static rtc::scoped_refptr Create() { + return new MockPeerConnectionFactoryInterface(); + } + + MOCK_METHOD(void, SetOptions, (const Options&), (override)); + MOCK_METHOD(rtc::scoped_refptr, + CreatePeerConnection, + (const PeerConnectionInterface::RTCConfiguration&, + PeerConnectionDependencies), + (override)); + MOCK_METHOD(RTCErrorOr>, + CreatePeerConnectionOrError, + (const PeerConnectionInterface::RTCConfiguration&, + PeerConnectionDependencies), + (override)); + MOCK_METHOD(rtc::scoped_refptr, + CreatePeerConnection, + (const PeerConnectionInterface::RTCConfiguration&, + std::unique_ptr, + std::unique_ptr, + PeerConnectionObserver*), + (override)); + MOCK_METHOD(RtpCapabilities, + GetRtpSenderCapabilities, + (cricket::MediaType), + (const override)); + MOCK_METHOD(RtpCapabilities, + GetRtpReceiverCapabilities, + (cricket::MediaType), + (const override)); + MOCK_METHOD(rtc::scoped_refptr, + CreateLocalMediaStream, + (const std::string&), + (override)); + MOCK_METHOD(rtc::scoped_refptr, + CreateAudioSource, + (const cricket::AudioOptions&), + (override)); + MOCK_METHOD(rtc::scoped_refptr, + CreateVideoTrack, + (const std::string&, VideoTrackSourceInterface*), + (override)); + MOCK_METHOD(rtc::scoped_refptr, + CreateAudioTrack, + (const std::string&, AudioSourceInterface*), + (override)); + MOCK_METHOD(bool, StartAecDump, (FILE*, int64_t), (override)); + MOCK_METHOD(void, StopAecDump, (), (override)); + + protected: + MockPeerConnectionFactoryInterface() = default; +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_PEER_CONNECTION_FACTORY_INTERFACE_H_ diff --git a/api/test/mock_peerconnectioninterface.h b/api/test/mock_peerconnectioninterface.h index 6b247b7cee..b5d94238c8 100644 --- a/api/test/mock_peerconnectioninterface.h +++ b/api/test/mock_peerconnectioninterface.h @@ -100,8 +100,8 @@ class MockPeerConnectionInterface GetSctpTransport, (), (const override)); - MOCK_METHOD(rtc::scoped_refptr, - CreateDataChannel, + MOCK_METHOD(RTCErrorOr>, + CreateDataChannelOrError, (const std::string&, const DataChannelInit*), (override)); MOCK_METHOD(const SessionDescriptionInterface*, @@ -167,7 +167,6 @@ class MockPeerConnectionInterface (const std::vector&), (override)); MOCK_METHOD(RTCError, SetBitrate, (const BitrateSettings&), (override)); - MOCK_METHOD(RTCError, SetBitrate, (const BitrateParameters&), (override)); MOCK_METHOD(void, SetAudioPlayout, (bool), (override)); MOCK_METHOD(void, SetAudioRecording, (bool), (override)); MOCK_METHOD(rtc::scoped_refptr, diff --git a/api/test/mock_rtp_transceiver.h b/api/test/mock_rtp_transceiver.h new file mode 100644 index 0000000000..a0a08c4772 --- /dev/null +++ b/api/test/mock_rtp_transceiver.h @@ -0,0 +1,85 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_MOCK_RTP_TRANSCEIVER_H_ +#define API_TEST_MOCK_RTP_TRANSCEIVER_H_ + +#include +#include + +#include "api/rtp_transceiver_interface.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockRtpTransceiver final + : public rtc::RefCountedObject { + public: + static rtc::scoped_refptr Create() { + return new MockRtpTransceiver(); + } + + MOCK_METHOD(cricket::MediaType, media_type, (), (const, override)); + MOCK_METHOD(absl::optional, mid, (), (const, override)); + MOCK_METHOD(rtc::scoped_refptr, + sender, + (), + (const, override)); + MOCK_METHOD(rtc::scoped_refptr, + receiver, + (), + (const, override)); + MOCK_METHOD(bool, stopped, (), (const, override)); + MOCK_METHOD(bool, stopping, (), (const, override)); + MOCK_METHOD(RtpTransceiverDirection, direction, (), (const, override)); + MOCK_METHOD(void, + SetDirection, + (RtpTransceiverDirection new_direction), + (override)); + MOCK_METHOD(RTCError, + SetDirectionWithError, + (RtpTransceiverDirection new_direction), + (override)); + MOCK_METHOD(absl::optional, + current_direction, + (), + (const, override)); + MOCK_METHOD(absl::optional, + fired_direction, + (), + (const, override)); + MOCK_METHOD(RTCError, StopStandard, (), (override)); + MOCK_METHOD(void, StopInternal, (), (override)); + MOCK_METHOD(void, Stop, (), (override)); + MOCK_METHOD(RTCError, + SetCodecPreferences, + (rtc::ArrayView codecs), + (override)); + MOCK_METHOD(std::vector, + codec_preferences, + (), + (const, override)); + MOCK_METHOD(std::vector, + HeaderExtensionsToOffer, + (), + (const, override)); + MOCK_METHOD(webrtc::RTCError, + SetOfferedRtpHeaderExtensions, + (rtc::ArrayView + header_extensions_to_offer), + (override)); + + private: + MockRtpTransceiver() = default; +}; + +} // namespace webrtc + +#endif // API_TEST_MOCK_RTP_TRANSCEIVER_H_ diff --git a/api/test/mock_video_encoder.h b/api/test/mock_video_encoder.h index 26d758fd6a..11e0f64b3f 100644 --- a/api/test/mock_video_encoder.h +++ b/api/test/mock_video_encoder.h @@ -22,9 +22,7 @@ class MockEncodedImageCallback : public EncodedImageCallback { public: MOCK_METHOD(Result, OnEncodedImage, - (const EncodedImage& encodedImage, - const CodecSpecificInfo*, - const RTPFragmentationHeader*), + (const EncodedImage&, const CodecSpecificInfo*), (override)); MOCK_METHOD(void, OnDroppedFrame, (DropReason reason), (override)); }; diff --git a/api/test/neteq_simulator_factory.cc b/api/test/neteq_simulator_factory.cc index ea5be8517d..82b27e546d 100644 --- a/api/test/neteq_simulator_factory.cc +++ b/api/test/neteq_simulator_factory.cc @@ -21,6 +21,24 @@ namespace webrtc { namespace test { +namespace { +NetEqTestFactory::Config convertConfig( + const NetEqSimulatorFactory::Config& simulation_config, + absl::string_view replacement_audio_filename) { + NetEqTestFactory::Config config; + config.replacement_audio_file = std::string(replacement_audio_filename); + config.max_nr_packets_in_buffer = simulation_config.max_nr_packets_in_buffer; + config.initial_dummy_packets = simulation_config.initial_dummy_packets; + config.skip_get_audio_events = simulation_config.skip_get_audio_events; + config.field_trial_string = simulation_config.field_trial_string; + config.output_audio_filename = simulation_config.output_audio_filename; + config.pythonplot = simulation_config.python_plot_filename.has_value(); + config.plot_scripts_basename = simulation_config.python_plot_filename; + config.textlog = simulation_config.text_log_filename.has_value(); + config.textlog_filename = simulation_config.text_log_filename; + return config; +} +} // namespace NetEqSimulatorFactory::NetEqSimulatorFactory() : factory_(std::make_unique()) {} @@ -31,13 +49,8 @@ std::unique_ptr NetEqSimulatorFactory::CreateSimulatorFromFile( absl::string_view event_log_filename, absl::string_view replacement_audio_filename, Config simulation_config) { - NetEqTestFactory::Config config; - config.replacement_audio_file = std::string(replacement_audio_filename); - config.max_nr_packets_in_buffer = simulation_config.max_nr_packets_in_buffer; - config.initial_dummy_packets = simulation_config.initial_dummy_packets; - config.skip_get_audio_events = simulation_config.skip_get_audio_events; - config.field_trial_string = simulation_config.field_trial_string; - config.output_audio_filename = simulation_config.output_audio_filename; + NetEqTestFactory::Config config = + convertConfig(simulation_config, replacement_audio_filename); return factory_->InitializeTestFromFile( std::string(event_log_filename), simulation_config.neteq_factory, config); } @@ -47,12 +60,8 @@ NetEqSimulatorFactory::CreateSimulatorFromString( absl::string_view event_log_file_contents, absl::string_view replacement_audio_filename, Config simulation_config) { - NetEqTestFactory::Config config; - config.replacement_audio_file = std::string(replacement_audio_filename); - config.max_nr_packets_in_buffer = simulation_config.max_nr_packets_in_buffer; - config.initial_dummy_packets = simulation_config.initial_dummy_packets; - config.skip_get_audio_events = simulation_config.skip_get_audio_events; - config.field_trial_string = simulation_config.field_trial_string; + NetEqTestFactory::Config config = + convertConfig(simulation_config, replacement_audio_filename); return factory_->InitializeTestFromString( std::string(event_log_file_contents), simulation_config.neteq_factory, config); diff --git a/api/test/neteq_simulator_factory.h b/api/test/neteq_simulator_factory.h index b3c77b1404..2a716e665e 100644 --- a/api/test/neteq_simulator_factory.h +++ b/api/test/neteq_simulator_factory.h @@ -44,6 +44,10 @@ class NetEqSimulatorFactory { std::string field_trial_string; // A filename for the generated output audio file. absl::optional output_audio_filename; + // A filename for the python plot. + absl::optional python_plot_filename; + // A filename for the text log. + absl::optional text_log_filename; // A custom NetEqFactory can be used. NetEqFactory* neteq_factory = nullptr; }; diff --git a/api/test/network_emulation/BUILD.gn b/api/test/network_emulation/BUILD.gn index 5fda1e288a..a8044d7230 100644 --- a/api/test/network_emulation/BUILD.gn +++ b/api/test/network_emulation/BUILD.gn @@ -12,17 +12,41 @@ rtc_library("network_emulation") { visibility = [ "*" ] sources = [ + "cross_traffic.h", "network_emulation_interfaces.cc", "network_emulation_interfaces.h", ] deps = [ + "../..:array_view", "../../../rtc_base", "../../../rtc_base:checks", + "../../../rtc_base:ip_address", "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:socket_address", + "../../numerics", + "../../task_queue", "../../units:data_rate", "../../units:data_size", + "../../units:time_delta", "../../units:timestamp", - "//third_party/abseil-cpp/absl/types:optional", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("create_cross_traffic") { + visibility = [ "*" ] + testonly = true + + sources = [ + "create_cross_traffic.cc", + "create_cross_traffic.h", + ] + + deps = [ + ":network_emulation", + "../..:network_emulation_manager_api", + "../../../rtc_base/task_utils:repeating_task", + "../../../test/network:emulated_network", ] } diff --git a/api/test/network_emulation/create_cross_traffic.cc b/api/test/network_emulation/create_cross_traffic.cc new file mode 100644 index 0000000000..36a535cec6 --- /dev/null +++ b/api/test/network_emulation/create_cross_traffic.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "api/test/network_emulation/create_cross_traffic.h" + +#include + +#include "rtc_base/task_utils/repeating_task.h" +#include "test/network/cross_traffic.h" + +namespace webrtc { + +std::unique_ptr CreateRandomWalkCrossTraffic( + CrossTrafficRoute* traffic_route, + RandomWalkConfig config) { + return std::make_unique(config, traffic_route); +} + +std::unique_ptr CreatePulsedPeaksCrossTraffic( + CrossTrafficRoute* traffic_route, + PulsedPeaksConfig config) { + return std::make_unique(config, traffic_route); +} + +std::unique_ptr CreateFakeTcpCrossTraffic( + EmulatedRoute* send_route, + EmulatedRoute* ret_route, + FakeTcpConfig config) { + return std::make_unique(config, send_route, + ret_route); +} + +} // namespace webrtc diff --git a/api/test/network_emulation/create_cross_traffic.h b/api/test/network_emulation/create_cross_traffic.h new file mode 100644 index 0000000000..42fc855392 --- /dev/null +++ b/api/test/network_emulation/create_cross_traffic.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef API_TEST_NETWORK_EMULATION_CREATE_CROSS_TRAFFIC_H_ +#define API_TEST_NETWORK_EMULATION_CREATE_CROSS_TRAFFIC_H_ + +#include + +#include "api/test/network_emulation/cross_traffic.h" +#include "api/test/network_emulation_manager.h" + +namespace webrtc { + +// This API is still in development and can be changed without prior notice. + +std::unique_ptr CreateRandomWalkCrossTraffic( + CrossTrafficRoute* traffic_route, + RandomWalkConfig config); + +std::unique_ptr CreatePulsedPeaksCrossTraffic( + CrossTrafficRoute* traffic_route, + PulsedPeaksConfig config); + +std::unique_ptr CreateFakeTcpCrossTraffic( + EmulatedRoute* send_route, + EmulatedRoute* ret_route, + FakeTcpConfig config); + +} // namespace webrtc + +#endif // API_TEST_NETWORK_EMULATION_CREATE_CROSS_TRAFFIC_H_ diff --git a/api/test/network_emulation/cross_traffic.h b/api/test/network_emulation/cross_traffic.h new file mode 100644 index 0000000000..85343e44d2 --- /dev/null +++ b/api/test/network_emulation/cross_traffic.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef API_TEST_NETWORK_EMULATION_CROSS_TRAFFIC_H_ +#define API_TEST_NETWORK_EMULATION_CROSS_TRAFFIC_H_ + +#include "api/task_queue/task_queue_base.h" +#include "api/test/network_emulation/network_emulation_interfaces.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" + +namespace webrtc { + +// This API is still in development and can be changed without prior notice. + +// Represents the endpoint for cross traffic that is going through the network. +// It can be used to emulate unexpected network load. +class CrossTrafficRoute { + public: + virtual ~CrossTrafficRoute() = default; + + // Triggers sending of dummy packets with size |packet_size| bytes. + virtual void TriggerPacketBurst(size_t num_packets, size_t packet_size) = 0; + // Sends a packet over the nodes. The content of the packet is unspecified; + // only the size metter for the emulation purposes. + virtual void SendPacket(size_t packet_size) = 0; + // Sends a packet over the nodes and runs |action| when it has been delivered. + virtual void NetworkDelayedAction(size_t packet_size, + std::function action) = 0; +}; + +// Describes a way of generating cross traffic on some route. Used by +// NetworkEmulationManager to produce cross traffic during some period of time. +class CrossTrafficGenerator { + public: + virtual ~CrossTrafficGenerator() = default; + + // Time between Process calls. + virtual TimeDelta GetProcessInterval() const = 0; + + // Called periodically by NetworkEmulationManager. Generates traffic on the + // route. + virtual void Process(Timestamp at_time) = 0; +}; + +// Config of a cross traffic generator. Generated traffic rises and falls +// randomly. +struct RandomWalkConfig { + int random_seed = 1; + DataRate peak_rate = DataRate::KilobitsPerSec(100); + DataSize min_packet_size = DataSize::Bytes(200); + TimeDelta min_packet_interval = TimeDelta::Millis(1); + TimeDelta update_interval = TimeDelta::Millis(200); + double variance = 0.6; + double bias = -0.1; +}; + +// Config of a cross traffic generator. Generated traffic has form of periodic +// peaks alternating with periods of silence. +struct PulsedPeaksConfig { + DataRate peak_rate = DataRate::KilobitsPerSec(100); + DataSize min_packet_size = DataSize::Bytes(200); + TimeDelta min_packet_interval = TimeDelta::Millis(1); + TimeDelta send_duration = TimeDelta::Millis(100); + TimeDelta hold_duration = TimeDelta::Millis(2000); +}; + +struct FakeTcpConfig { + DataSize packet_size = DataSize::Bytes(1200); + DataSize send_limit = DataSize::PlusInfinity(); + TimeDelta process_interval = TimeDelta::Millis(200); + TimeDelta packet_timeout = TimeDelta::Seconds(1); +}; + +} // namespace webrtc + +#endif // API_TEST_NETWORK_EMULATION_CROSS_TRAFFIC_H_ diff --git a/api/test/network_emulation/network_emulation_interfaces.h b/api/test/network_emulation/network_emulation_interfaces.h index 0986df4a08..c8e6ed053e 100644 --- a/api/test/network_emulation/network_emulation_interfaces.h +++ b/api/test/network_emulation/network_emulation_interfaces.h @@ -10,7 +10,13 @@ #ifndef API_TEST_NETWORK_EMULATION_NETWORK_EMULATION_INTERFACES_H_ #define API_TEST_NETWORK_EMULATION_NETWORK_EMULATION_INTERFACES_H_ +#include +#include +#include + #include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/numerics/samples_stats_counter.h" #include "api/units/data_rate.h" #include "api/units/data_size.h" #include "api/units/timestamp.h" @@ -56,40 +62,145 @@ class EmulatedNetworkReceiverInterface { virtual void OnPacketReceived(EmulatedIpPacket packet) = 0; }; -struct EmulatedNetworkStats { - int64_t packets_sent = 0; - DataSize bytes_sent = DataSize::Zero(); +class EmulatedNetworkOutgoingStats { + public: + virtual ~EmulatedNetworkOutgoingStats() = default; + + virtual int64_t PacketsSent() const = 0; + + virtual DataSize BytesSent() const = 0; + + // Returns the timestamped sizes of all sent packets if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& SentPacketsSizeCounter() const = 0; + + virtual DataSize FirstSentPacketSize() const = 0; + + // Returns time of the first packet sent or infinite value if no packets were + // sent. + virtual Timestamp FirstPacketSentTime() const = 0; + + // Returns time of the last packet sent or infinite value if no packets were + // sent. + virtual Timestamp LastPacketSentTime() const = 0; + + // Returns average send rate. Requires that at least 2 packets were sent. + virtual DataRate AverageSendRate() const = 0; +}; + +class EmulatedNetworkIncomingStats { + public: + virtual ~EmulatedNetworkIncomingStats() = default; + // Total amount of packets received with or without destination. - int64_t packets_received = 0; + virtual int64_t PacketsReceived() const = 0; // Total amount of bytes in received packets. - DataSize bytes_received = DataSize::Zero(); + virtual DataSize BytesReceived() const = 0; + // Returns the timestamped sizes of all received packets if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& ReceivedPacketsSizeCounter() const = 0; // Total amount of packets that were received, but no destination was found. - int64_t packets_dropped = 0; + virtual int64_t PacketsDropped() const = 0; // Total amount of bytes in dropped packets. - DataSize bytes_dropped = DataSize::Zero(); - - DataSize first_received_packet_size = DataSize::Zero(); - DataSize first_sent_packet_size = DataSize::Zero(); - - Timestamp first_packet_sent_time = Timestamp::PlusInfinity(); - Timestamp last_packet_sent_time = Timestamp::PlusInfinity(); - Timestamp first_packet_received_time = Timestamp::PlusInfinity(); - Timestamp last_packet_received_time = Timestamp::PlusInfinity(); - - DataRate AverageSendRate() const { - RTC_DCHECK_GE(packets_sent, 2); - return (bytes_sent - first_sent_packet_size) / - (last_packet_sent_time - first_packet_sent_time); - } - DataRate AverageReceiveRate() const { - RTC_DCHECK_GE(packets_received, 2); - return (bytes_received - first_received_packet_size) / - (last_packet_received_time - first_packet_received_time); - } + virtual DataSize BytesDropped() const = 0; + // Returns the timestamped sizes of all packets that were received, + // but no destination was found if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& DroppedPacketsSizeCounter() const = 0; + + virtual DataSize FirstReceivedPacketSize() const = 0; + + // Returns time of the first packet received or infinite value if no packets + // were received. + virtual Timestamp FirstPacketReceivedTime() const = 0; + + // Returns time of the last packet received or infinite value if no packets + // were received. + virtual Timestamp LastPacketReceivedTime() const = 0; + + virtual DataRate AverageReceiveRate() const = 0; +}; + +class EmulatedNetworkStats { + public: + virtual ~EmulatedNetworkStats() = default; + + // List of IP addresses that were used to send data considered in this stats + // object. + virtual std::vector LocalAddresses() const = 0; + + virtual int64_t PacketsSent() const = 0; + + virtual DataSize BytesSent() const = 0; + // Returns the timestamped sizes of all sent packets if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& SentPacketsSizeCounter() const = 0; + // Returns the timestamped duration between packet was received on + // network interface and was dispatched to the network in microseconds if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& SentPacketsQueueWaitTimeUs() const = 0; + + virtual DataSize FirstSentPacketSize() const = 0; + // Returns time of the first packet sent or infinite value if no packets were + // sent. + virtual Timestamp FirstPacketSentTime() const = 0; + // Returns time of the last packet sent or infinite value if no packets were + // sent. + virtual Timestamp LastPacketSentTime() const = 0; + + virtual DataRate AverageSendRate() const = 0; + // Total amount of packets received regardless of the destination address. + virtual int64_t PacketsReceived() const = 0; + // Total amount of bytes in received packets. + virtual DataSize BytesReceived() const = 0; + // Returns the timestamped sizes of all received packets if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& ReceivedPacketsSizeCounter() const = 0; + // Total amount of packets that were received, but no destination was found. + virtual int64_t PacketsDropped() const = 0; + // Total amount of bytes in dropped packets. + virtual DataSize BytesDropped() const = 0; + // Returns counter with timestamped sizes of all packets that were received, + // but no destination was found if + // EmulatedEndpointConfig::stats_gatherming_mode was set to + // StatsGatheringMode::kDebug; otherwise, the returned value will be empty. + // Returned reference is valid until the next call to a non-const method. + virtual const SamplesStatsCounter& DroppedPacketsSizeCounter() const = 0; + + virtual DataSize FirstReceivedPacketSize() const = 0; + // Returns time of the first packet received or infinite value if no packets + // were received. + virtual Timestamp FirstPacketReceivedTime() const = 0; + // Returns time of the last packet received or infinite value if no packets + // were received. + virtual Timestamp LastPacketReceivedTime() const = 0; + + virtual DataRate AverageReceiveRate() const = 0; + + virtual std::map> + OutgoingStatsPerDestination() const = 0; + + virtual std::map> + IncomingStatsPerSource() const = 0; }; // EmulatedEndpoint is an abstraction for network interface on device. Instances -// of this are created by NetworkEmulationManager::CreateEndpoint. +// of this are created by NetworkEmulationManager::CreateEndpoint and +// thread safe. class EmulatedEndpoint : public EmulatedNetworkReceiverInterface { public: // Send packet into network. @@ -111,14 +222,25 @@ class EmulatedEndpoint : public EmulatedNetworkReceiverInterface { // |desired_port| != 0 and is free or will be the one, selected by endpoint) // or absl::nullopt if desired_port in used. Also fails if there are no more // free ports to bind to. + // + // The Bind- and Unbind-methods must not be called from within a bound + // receiver's OnPacketReceived method. virtual absl::optional BindReceiver( uint16_t desired_port, EmulatedNetworkReceiverInterface* receiver) = 0; + // Unbinds receiver from the specified port. Do nothing if no receiver was + // bound before. After this method returns, no more packets can be delivered + // to the receiver, and it is safe to destroy it. virtual void UnbindReceiver(uint16_t port) = 0; + // Binds receiver that will accept all packets which arrived on any port + // for which there are no bound receiver. + virtual void BindDefaultReceiver( + EmulatedNetworkReceiverInterface* receiver) = 0; + // Unbinds default receiver. Do nothing if no default receiver was bound + // before. + virtual void UnbindDefaultReceiver() = 0; virtual rtc::IPAddress GetPeerLocalAddress() const = 0; - virtual EmulatedNetworkStats stats() = 0; - private: // Ensure that there can be no other subclass than EmulatedEndpointImpl. This // means that it's always safe to downcast EmulatedEndpoint instances to diff --git a/api/test/network_emulation_manager.cc b/api/test/network_emulation_manager.cc index 602c90aac1..9c148a069b 100644 --- a/api/test/network_emulation_manager.cc +++ b/api/test/network_emulation_manager.cc @@ -56,18 +56,20 @@ NetworkEmulationManager::SimulatedNetworkNode::Builder::packet_queue_length( } NetworkEmulationManager::SimulatedNetworkNode -NetworkEmulationManager::SimulatedNetworkNode::Builder::Build() const { +NetworkEmulationManager::SimulatedNetworkNode::Builder::Build( + uint64_t random_seed) const { RTC_CHECK(net_); - return Build(net_); + return Build(net_, random_seed); } NetworkEmulationManager::SimulatedNetworkNode NetworkEmulationManager::SimulatedNetworkNode::Builder::Build( - NetworkEmulationManager* net) const { + NetworkEmulationManager* net, + uint64_t random_seed) const { RTC_CHECK(net); RTC_CHECK(net_ == nullptr || net_ == net); SimulatedNetworkNode res; - auto behavior = std::make_unique(config_); + auto behavior = std::make_unique(config_, random_seed); res.simulation = behavior.get(); res.node = net->CreateEmulatedNode(std::move(behavior)); return res; diff --git a/api/test/network_emulation_manager.h b/api/test/network_emulation_manager.h index 3e9cf113d2..ec51b290e0 100644 --- a/api/test/network_emulation_manager.h +++ b/api/test/network_emulation_manager.h @@ -11,9 +11,13 @@ #ifndef API_TEST_NETWORK_EMULATION_MANAGER_H_ #define API_TEST_NETWORK_EMULATION_MANAGER_H_ +#include #include +#include #include +#include "api/array_view.h" +#include "api/test/network_emulation/cross_traffic.h" #include "api/test/network_emulation/network_emulation_interfaces.h" #include "api/test/simulated_network.h" #include "api/test/time_controller.h" @@ -44,7 +48,16 @@ class EmulatedRoute; struct EmulatedEndpointConfig { enum class IpAddressFamily { kIpv4, kIpv6 }; + enum class StatsGatheringMode { + // Gather main network stats counters. + kDefault, + // kDefault + also gather per packet statistics. In this mode more memory + // will be used. + kDebug + }; + // If specified will be used to name endpoint for logging purposes. + absl::optional name = absl::nullopt; IpAddressFamily generated_ip_family = IpAddressFamily::kIpv4; // If specified will be used as IP address for endpoint node. Must be unique // among all created nodes. @@ -54,8 +67,48 @@ struct EmulatedEndpointConfig { bool start_as_enabled = true; // Network type which will be used to represent endpoint to WebRTC. rtc::AdapterType type = rtc::AdapterType::ADAPTER_TYPE_UNKNOWN; + StatsGatheringMode stats_gathering_mode = StatsGatheringMode::kDefault; + // Allow endpoint to send packets specifying source IP address different to + // the current endpoint IP address. If false endpoint will crash if attempt + // to send such packet will be done. + bool allow_send_packet_with_different_source_ip = false; + // Allow endpoint to receive packet with destination IP address different to + // the current endpoint IP address. If false endpoint will crash if such + // packet will arrive. + bool allow_receive_packets_with_different_dest_ip = false; +}; + +struct EmulatedTURNServerConfig { + EmulatedEndpointConfig client_config; + EmulatedEndpointConfig peer_config; }; +// EmulatedTURNServer is an abstraction for a TURN server. +class EmulatedTURNServerInterface { + public: + struct IceServerConfig { + std::string username; + std::string password; + std::string url; + }; + + virtual ~EmulatedTURNServerInterface() {} + + // Get an IceServer configuration suitable to add to a PeerConnection. + virtual IceServerConfig GetIceServerConfig() const = 0; + + // Get non-null client endpoint, an endpoint that accepts TURN allocations. + // This shall typically be connected to one or more webrtc endpoint. + virtual EmulatedEndpoint* GetClientEndpoint() const = 0; + + // Returns socket address, which client should use to connect to TURN server + // and do TURN allocation. + virtual rtc::SocketAddress GetClientEndpointAddress() const = 0; + + // Get non-null peer endpoint, that is "connected to the internet". + // This shall typically be connected to another TURN server. + virtual EmulatedEndpoint* GetPeerEndpoint() const = 0; +}; // Provide interface to obtain all required objects to inject network emulation // layer into PeerConnection. Also contains information about network interfaces @@ -64,12 +117,24 @@ class EmulatedNetworkManagerInterface { public: virtual ~EmulatedNetworkManagerInterface() = default; + // Returns non-null pointer to thread that have to be used as network thread + // for WebRTC to properly setup network emulation. Returned thread is owned + // by EmulatedNetworkManagerInterface implementation. virtual rtc::Thread* network_thread() = 0; + // Returns non-null pointer to network manager that have to be injected into + // WebRTC to properly setup network emulation. Returned manager is owned by + // EmulatedNetworkManagerInterface implementation. virtual rtc::NetworkManager* network_manager() = 0; + // Returns list of endpoints that are associated with this instance. Pointers + // are guaranteed to be non-null and are owned by NetworkEmulationManager. + virtual std::vector endpoints() const = 0; - // Returns summarized network stats for endpoints for this manager. + // Passes summarized network stats for endpoints for this manager into + // specified |stats_callback|. Callback will be executed on network emulation + // internal task queue. virtual void GetStats( - std::function stats_callback) const = 0; + std::function)> stats_callback) + const = 0; }; enum class TimeMode { kRealTime, kSimulated }; @@ -98,8 +163,9 @@ class NetworkEmulationManager { Builder& capacity_Mbps(int link_capacity_Mbps); Builder& loss(double loss_rate); Builder& packet_queue_length(int max_queue_length_in_packets); - SimulatedNetworkNode Build() const; - SimulatedNetworkNode Build(NetworkEmulationManager* net) const; + SimulatedNetworkNode Build(uint64_t random_seed = 1) const; + SimulatedNetworkNode Build(NetworkEmulationManager* net, + uint64_t random_seed = 1) const; private: NetworkEmulationManager* const net_; @@ -109,11 +175,19 @@ class NetworkEmulationManager { virtual ~NetworkEmulationManager() = default; virtual TimeController* time_controller() = 0; + // Returns a mode in which underlying time controller operates. + virtual TimeMode time_mode() const = 0; // Creates an emulated network node, which represents single network in - // the emulated network layer. + // the emulated network layer. Uses default implementation on network behavior + // which can be configured with |config|. |random_seed| can be provided to + // alter randomization behavior. virtual EmulatedNetworkNode* CreateEmulatedNode( - BuiltInNetworkBehaviorConfig config) = 0; + BuiltInNetworkBehaviorConfig config, + uint64_t random_seed = 1) = 0; + // Creates an emulated network node, which represents single network in + // the emulated network layer. |network_behavior| determines how created node + // will forward incoming packets to the next receiver. virtual EmulatedNetworkNode* CreateEmulatedNode( std::unique_ptr network_behavior) = 0; @@ -160,9 +234,39 @@ class NetworkEmulationManager { virtual EmulatedRoute* CreateRoute( const std::vector& via_nodes) = 0; + // Creates a default route between endpoints going through specified network + // nodes. Default route is used for packet when there is no known route for + // packet's destination IP. + // + // This route is single direction only and describe how traffic that was + // sent by network interface |from| have to be delivered in case if routing + // was unspecified. Return object can be used to remove created route. The + // route must contains at least one network node inside it. + // + // Assume that E{0-9} are endpoints and N{0-9} are network nodes, then + // creation of the route have to follow these rules: + // 1. A route consists of a source endpoint, an ordered list of one or + // more network nodes, and a destination endpoint. + // 2. If (E1, ..., E2) is a route, then E1 != E2. + // In other words, the source and the destination may not be the same. + // 3. Given two simultaneously existing routes (E1, ..., E2) and + // (E3, ..., E4), either E1 != E3 or E2 != E4. + // In other words, there may be at most one route from any given source + // endpoint to any given destination endpoint. + // 4. Given two simultaneously existing routes (E1, ..., N1, ..., E2) + // and (E3, ..., N2, ..., E4), either N1 != N2 or E2 != E4. + // In other words, a network node may not belong to two routes that lead + // to the same destination endpoint. + // 5. Any node N can belong to only one default route. + virtual EmulatedRoute* CreateDefaultRoute( + EmulatedEndpoint* from, + const std::vector& via_nodes, + EmulatedEndpoint* to) = 0; + // Removes route previously created by CreateRoute(...). // Caller mustn't call this function with route, that have been already - // removed earlier. + // removed earlier. Removing a route that is currently in use will lead to + // packets being dropped. virtual void ClearRoute(EmulatedRoute* route) = 0; // Creates a simulated TCP connection using |send_route| for traffic and @@ -172,6 +276,20 @@ class NetworkEmulationManager { virtual TcpMessageRoute* CreateTcpRoute(EmulatedRoute* send_route, EmulatedRoute* ret_route) = 0; + // Creates a route over the given |via_nodes|. Returns an object that can be + // used to emulate network load with cross traffic over the created route. + virtual CrossTrafficRoute* CreateCrossTrafficRoute( + const std::vector& via_nodes) = 0; + + // Starts generating cross traffic using given |generator|. Takes ownership + // over the generator. + virtual CrossTrafficGenerator* StartCrossTraffic( + std::unique_ptr generator) = 0; + + // Stops generating cross traffic that was started using given |generator|. + // The |generator| shouldn't be used after and the reference may be invalid. + virtual void StopCrossTraffic(CrossTrafficGenerator* generator) = 0; + // Creates EmulatedNetworkManagerInterface which can be used then to inject // network emulation layer into PeerConnection. |endpoints| - are available // network interfaces for PeerConnection. If endpoint is enabled, it will be @@ -180,6 +298,21 @@ class NetworkEmulationManager { virtual EmulatedNetworkManagerInterface* CreateEmulatedNetworkManagerInterface( const std::vector& endpoints) = 0; + + // Passes summarized network stats for specified |endpoints| into specified + // |stats_callback|. Callback will be executed on network emulation + // internal task queue. + virtual void GetStats( + rtc::ArrayView endpoints, + std::function)> + stats_callback) = 0; + + // Create a EmulatedTURNServer. + // The TURN server has 2 endpoints that need to be connected with routes, + // - GetClientEndpoint() - the endpoint that accepts TURN allocations. + // - GetPeerEndpoint() - the endpoint that is "connected to the internet". + virtual EmulatedTURNServerInterface* CreateTURNServer( + EmulatedTURNServerConfig config) = 0; }; } // namespace webrtc diff --git a/api/test/peerconnection_quality_test_fixture.h b/api/test/peerconnection_quality_test_fixture.h index 8165443d36..8717e8f73d 100644 --- a/api/test/peerconnection_quality_test_fixture.h +++ b/api/test/peerconnection_quality_test_fixture.h @@ -32,8 +32,8 @@ #include "api/test/frame_generator_interface.h" #include "api/test/simulated_network.h" #include "api/test/stats_observer_interface.h" +#include "api/test/track_id_stream_info_map.h" #include "api/test/video_quality_analyzer_interface.h" -#include "api/transport/media/media_transport_interface.h" #include "api/transport/network_control.h" #include "api/units/time_delta.h" #include "api/video_codecs/video_decoder_factory.h" @@ -220,17 +220,24 @@ class PeerConnectionE2EQualityTestFixture { // was captured during the test for this video stream on sender side. // It is useful when generator is used as input. absl::optional input_dump_file_name; + // Used only if |input_dump_file_name| is set. Specifies the module for the + // video frames to be dumped. Modulo equals X means every Xth frame will be + // written to the dump file. The value must be greater than 0. + int input_dump_sampling_modulo = 1; // If specified this file will be used as output on the receiver side for // this stream. If multiple streams will be produced by input stream, // output files will be appended with indexes. The produced files contains // what was rendered for this video stream on receiver side. absl::optional output_dump_file_name; + // Used only if |output_dump_file_name| is set. Specifies the module for the + // video frames to be dumped. Modulo equals X means every Xth frame will be + // written to the dump file. The value must be greater than 0. + int output_dump_sampling_modulo = 1; // If true will display input and output video on the user's screen. bool show_on_screen = false; // If specified, determines a sync group to which this video stream belongs. // According to bugs.webrtc.org/4762 WebRTC supports synchronization only - // for pair of single audio and single video stream. Framework won't do any - // enforcements on this field. + // for pair of single audio and single video stream. absl::optional sync_group; }; @@ -257,8 +264,7 @@ class PeerConnectionE2EQualityTestFixture { int sampling_frequency_in_hz = 48000; // If specified, determines a sync group to which this audio stream belongs. // According to bugs.webrtc.org/4762 WebRTC supports synchronization only - // for pair of single audio and single video stream. Framework won't do any - // enforcements on this field. + // for pair of single audio and single video stream. absl::optional sync_group; }; @@ -287,8 +293,6 @@ class PeerConnectionE2EQualityTestFixture { virtual PeerConfigurer* SetNetworkControllerFactory( std::unique_ptr network_controller_factory) = 0; - virtual PeerConfigurer* SetMediaTransportFactory( - std::unique_ptr media_transport_factory) = 0; virtual PeerConfigurer* SetVideoEncoderFactory( std::unique_ptr video_encoder_factory) = 0; virtual PeerConfigurer* SetVideoDecoderFactory( @@ -337,8 +341,8 @@ class PeerConnectionE2EQualityTestFixture { PeerConnectionInterface::RTCConfiguration configuration) = 0; // Set bitrate parameters on PeerConnection. This constraints will be // applied to all summed RTP streams for this peer. - virtual PeerConfigurer* SetBitrateParameters( - PeerConnectionInterface::BitrateParameters bitrate_params) = 0; + virtual PeerConfigurer* SetBitrateSettings( + BitrateSettings bitrate_settings) = 0; }; // Contains configuration for echo emulator. @@ -412,7 +416,14 @@ class PeerConnectionE2EQualityTestFixture { // Invoked by framework after peer connection factory and peer connection // itself will be created but before offer/answer exchange will be started. - virtual void Start(absl::string_view test_case_name) = 0; + // |test_case_name| is name of test case, that should be used to report all + // metrics. + // |reporter_helper| is a pointer to a class that will allow track_id to + // stream_id matching. The caller is responsible for ensuring the + // TrackIdStreamInfoMap will be valid from Start() to + // StopAndReportResults(). + virtual void Start(absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) = 0; // Invoked by framework after call is ended and peer connection factory and // peer connection are destroyed. @@ -448,6 +459,12 @@ class PeerConnectionE2EQualityTestFixture { virtual void AddPeer(rtc::Thread* network_thread, rtc::NetworkManager* network_manager, rtc::FunctionView configurer) = 0; + // Runs the media quality test, which includes setting up the call with + // configured participants, running it according to provided |run_params| and + // terminating it properly at the end. During call duration media quality + // metrics are gathered, which are then reported to stdout and (if configured) + // to the json/protobuf output file through the WebRTC perf test results + // reporting system. virtual void Run(RunParams run_params) = 0; // Returns real test duration - the time of test execution measured during diff --git a/api/test/simulated_network.h b/api/test/simulated_network.h index 0d5c6613a6..fcac51f4ea 100644 --- a/api/test/simulated_network.h +++ b/api/test/simulated_network.h @@ -19,7 +19,6 @@ #include #include "absl/types/optional.h" -#include "rtc_base/critical_section.h" #include "rtc_base/random.h" #include "rtc_base/thread_annotations.h" @@ -47,8 +46,7 @@ struct PacketDeliveryInfo { // for built-in network behavior that will be used by WebRTC if no custom // NetworkBehaviorInterface is provided. struct BuiltInNetworkBehaviorConfig { - BuiltInNetworkBehaviorConfig() {} - // Queue length in number of packets. + // Queue length in number of packets. size_t queue_length_packets = 0; // Delay in addition to capacity induced delay. int queue_delay_ms = 0; diff --git a/api/test/simulcast_test_fixture.h b/api/test/simulcast_test_fixture.h index 5270d13306..cd470703c3 100644 --- a/api/test/simulcast_test_fixture.h +++ b/api/test/simulcast_test_fixture.h @@ -34,6 +34,8 @@ class SimulcastTestFixture { virtual void TestSpatioTemporalLayers321PatternEncoder() = 0; virtual void TestStrideEncodeDecode() = 0; virtual void TestDecodeWidthHeightSet() = 0; + virtual void + TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() = 0; }; } // namespace test diff --git a/api/test/stats_observer_interface.h b/api/test/stats_observer_interface.h index 98c8dd937f..ea4d6c23db 100644 --- a/api/test/stats_observer_interface.h +++ b/api/test/stats_observer_interface.h @@ -11,9 +11,8 @@ #ifndef API_TEST_STATS_OBSERVER_INTERFACE_H_ #define API_TEST_STATS_OBSERVER_INTERFACE_H_ -#include - -#include "api/stats_types.h" +#include "absl/strings/string_view.h" +#include "api/stats/rtc_stats_report.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -25,8 +24,9 @@ class StatsObserverInterface { // Method called when stats reports are available for the PeerConnection // identified by |pc_label|. - virtual void OnStatsReports(const std::string& pc_label, - const StatsReports& reports) = 0; + virtual void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) = 0; }; } // namespace webrtc_pc_e2e diff --git a/api/test/time_controller.h b/api/test/time_controller.h index 4d7f9e6c39..bd3192ddf2 100644 --- a/api/test/time_controller.h +++ b/api/test/time_controller.h @@ -46,6 +46,7 @@ class TimeController { const char* thread_name) = 0; // Creates an rtc::Thread instance. If |socket_server| is nullptr, a default // noop socket server is created. + // Returned thread is not null and started. virtual std::unique_ptr CreateThread( const std::string& name, std::unique_ptr socket_server = nullptr) = 0; @@ -59,6 +60,8 @@ class TimeController { // Waits until condition() == true, polling condition() in small time // intervals. + // Returns true if condition() was evaluated to true before |max_duration| + // elapsed and false otherwise. bool Wait(const std::function& condition, TimeDelta max_duration = TimeDelta::Seconds(5)); }; diff --git a/api/test/track_id_stream_info_map.h b/api/test/track_id_stream_info_map.h new file mode 100644 index 0000000000..bb73cfd997 --- /dev/null +++ b/api/test/track_id_stream_info_map.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TEST_TRACK_ID_STREAM_INFO_MAP_H_ +#define API_TEST_TRACK_ID_STREAM_INFO_MAP_H_ + +#include "absl/strings/string_view.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +// Instances of |TrackIdStreamInfoMap| provide bookkeeping capabilities that +// are useful to associate stats reports track_ids to the remote stream info. +class TrackIdStreamInfoMap { + public: + virtual ~TrackIdStreamInfoMap() = default; + + // These methods must be called on the same thread where + // StatsObserverInterface::OnStatsReports is invoked. + + // Returns a reference to a stream label owned by the TrackIdStreamInfoMap. + // Precondition: |track_id| must be already mapped to stream label. + virtual absl::string_view GetStreamLabelFromTrackId( + absl::string_view track_id) const = 0; + + // Returns a reference to a sync group name owned by the TrackIdStreamInfoMap. + // Precondition: |track_id| must be already mapped to sync group. + virtual absl::string_view GetSyncGroupLabelFromTrackId( + absl::string_view track_id) const = 0; +}; + +} // namespace webrtc_pc_e2e +} // namespace webrtc + +#endif // API_TEST_TRACK_ID_STREAM_INFO_MAP_H_ diff --git a/api/test/track_id_stream_label_map.h b/api/test/track_id_stream_label_map.h deleted file mode 100644 index e8dc947ab1..0000000000 --- a/api/test/track_id_stream_label_map.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_TEST_TRACK_ID_STREAM_LABEL_MAP_H_ -#define API_TEST_TRACK_ID_STREAM_LABEL_MAP_H_ - -#include - -namespace webrtc { -namespace webrtc_pc_e2e { - -// Instances of |TrackIdStreamLabelMap| provide bookkeeping capabilities that -// are useful to associate stats reports track_ids to the remote stream_id. -class TrackIdStreamLabelMap { - public: - virtual ~TrackIdStreamLabelMap() = default; - - // This method must be called on the same thread where - // StatsObserverInterface::OnStatsReports is invoked. - // Returns a reference to a stream label owned by the TrackIdStreamLabelMap. - // Precondition: |track_id| must be already mapped to a stream_label. - virtual const std::string& GetStreamLabelFromTrackId( - const std::string& track_id) const = 0; -}; - -} // namespace webrtc_pc_e2e -} // namespace webrtc - -#endif // API_TEST_TRACK_ID_STREAM_LABEL_MAP_H_ diff --git a/api/test/video/function_video_encoder_factory.h b/api/test/video/function_video_encoder_factory.h index 40a187acf2..a452eee7c4 100644 --- a/api/test/video/function_video_encoder_factory.h +++ b/api/test/video/function_video_encoder_factory.h @@ -43,14 +43,6 @@ class FunctionVideoEncoderFactory final : public VideoEncoderFactory { return {}; } - CodecInfo QueryVideoEncoder( - const SdpVideoFormat& /* format */) const override { - CodecInfo codec_info; - codec_info.is_hardware_accelerated = false; - codec_info.has_internal_source = false; - return codec_info; - } - std::unique_ptr CreateVideoEncoder( const SdpVideoFormat& format) override { return create_(format); diff --git a/api/test/video_quality_analyzer_interface.h b/api/test/video_quality_analyzer_interface.h index 0d3f441534..4488e5abf4 100644 --- a/api/test/video_quality_analyzer_interface.h +++ b/api/test/video_quality_analyzer_interface.h @@ -14,7 +14,9 @@ #include #include +#include "absl/strings/string_view.h" #include "absl/types/optional.h" +#include "api/array_view.h" #include "api/test/stats_observer_interface.h" #include "api/video/encoded_image.h" #include "api/video/video_frame.h" @@ -76,42 +78,68 @@ class VideoQualityAnalyzerInterface : public StatsObserverInterface { // calculations. Analyzer can perform simple calculations on the calling // thread in each method, but should remember, that it is the same thread, // that is used in video pipeline. - virtual void Start(std::string test_case_name, int max_threads_count) {} + virtual void Start(std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count) {} // Will be called when frame was generated from the input stream. + // |peer_name| is name of the peer on which side frame was captured. // Returns frame id, that will be set by framework to the frame. - virtual uint16_t OnFrameCaptured(const std::string& stream_label, + virtual uint16_t OnFrameCaptured(absl::string_view peer_name, + const std::string& stream_label, const VideoFrame& frame) = 0; // Will be called before calling the encoder. - virtual void OnFramePreEncode(const VideoFrame& frame) {} + // |peer_name| is name of the peer on which side frame came to encoder. + virtual void OnFramePreEncode(absl::string_view peer_name, + const VideoFrame& frame) {} // Will be called for each EncodedImage received from encoder. Single // VideoFrame can produce multiple EncodedImages. Each encoded image will // have id from VideoFrame. - virtual void OnFrameEncoded(uint16_t frame_id, + // |peer_name| is name of the peer on which side frame was encoded. + virtual void OnFrameEncoded(absl::string_view peer_name, + uint16_t frame_id, const EncodedImage& encoded_image, const EncoderStats& stats) {} // Will be called for each frame dropped by encoder. - virtual void OnFrameDropped(EncodedImageCallback::DropReason reason) {} + // |peer_name| is name of the peer on which side frame drop was detected. + virtual void OnFrameDropped(absl::string_view peer_name, + EncodedImageCallback::DropReason reason) {} // Will be called before calling the decoder. - virtual void OnFramePreDecode(uint16_t frame_id, + // |peer_name| is name of the peer on which side frame was received. + virtual void OnFramePreDecode(absl::string_view peer_name, + uint16_t frame_id, const EncodedImage& encoded_image) {} // Will be called after decoding the frame. - virtual void OnFrameDecoded(const VideoFrame& frame, + // |peer_name| is name of the peer on which side frame was decoded. + virtual void OnFrameDecoded(absl::string_view peer_name, + const VideoFrame& frame, const DecoderStats& stats) {} // Will be called when frame will be obtained from PeerConnection stack. - virtual void OnFrameRendered(const VideoFrame& frame) {} + // |peer_name| is name of the peer on which side frame was rendered. + virtual void OnFrameRendered(absl::string_view peer_name, + const VideoFrame& frame) {} // Will be called if encoder return not WEBRTC_VIDEO_CODEC_OK. // All available codes are listed in // modules/video_coding/include/video_error_codes.h - virtual void OnEncoderError(const VideoFrame& frame, int32_t error_code) {} + // |peer_name| is name of the peer on which side error acquired. + virtual void OnEncoderError(absl::string_view peer_name, + const VideoFrame& frame, + int32_t error_code) {} // Will be called if decoder return not WEBRTC_VIDEO_CODEC_OK. // All available codes are listed in // modules/video_coding/include/video_error_codes.h - virtual void OnDecoderError(uint16_t frame_id, int32_t error_code) {} + // |peer_name| is name of the peer on which side error acquired. + virtual void OnDecoderError(absl::string_view peer_name, + uint16_t frame_id, + int32_t error_code) {} // Will be called every time new stats reports are available for the // Peer Connection identified by |pc_label|. - void OnStatsReports(const std::string& pc_label, - const StatsReports& stats_reports) override {} + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override {} + + // Will be called before test adds new participant in the middle of a call. + virtual void RegisterParticipantInCall(absl::string_view peer_name) {} // Tells analyzer that analysis complete and it should calculate final // statistics. diff --git a/api/test/video_quality_test_fixture.h b/api/test/video_quality_test_fixture.h index ec07c23cd4..92c398aa54 100644 --- a/api/test/video_quality_test_fixture.h +++ b/api/test/video_quality_test_fixture.h @@ -22,6 +22,7 @@ #include "api/test/simulated_network.h" #include "api/transport/bitrate_settings.h" #include "api/transport/network_control.h" +#include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_config.h" #include "api/video_codecs/video_encoder_factory.h" @@ -31,60 +32,56 @@ namespace webrtc { class VideoQualityTestFixtureInterface { public: // Parameters are grouped into smaller structs to make it easier to set - // the desired elements and skip unused, using aggregate initialization. - // Unfortunately, C++11 (as opposed to C11) doesn't support unnamed structs, - // which makes the implementation of VideoQualityTest a bit uglier. + // the desired elements and skip unused. struct Params { - Params(); - ~Params(); struct CallConfig { - bool send_side_bwe; - bool generic_descriptor; + bool send_side_bwe = false; + bool generic_descriptor = false; BitrateConstraints call_bitrate_config; - int num_thumbnails; + int num_thumbnails = 0; // Indicates if secondary_(video|ss|screenshare) structures are used. - bool dual_video; + bool dual_video = false; } call; struct Video { - bool enabled; - size_t width; - size_t height; - int32_t fps; - int min_bitrate_bps; - int target_bitrate_bps; - int max_bitrate_bps; - bool suspend_below_min_bitrate; - std::string codec; - int num_temporal_layers; - int selected_tl; - int min_transmit_bps; - bool ulpfec; - bool flexfec; - bool automatic_scaling; + bool enabled = false; + size_t width = 640; + size_t height = 480; + int32_t fps = 30; + int min_bitrate_bps = 50; + int target_bitrate_bps = 800; + int max_bitrate_bps = 800; + bool suspend_below_min_bitrate = false; + std::string codec = "VP8"; + int num_temporal_layers = 1; + int selected_tl = -1; + int min_transmit_bps = 0; + bool ulpfec = false; + bool flexfec = false; + bool automatic_scaling = false; std::string clip_path; // "Generator" to generate frames instead. - size_t capture_device_index; + size_t capture_device_index = 0; SdpVideoFormat::Parameters sdp_params; - double encoder_overshoot_factor; + double encoder_overshoot_factor = 0.0; } video[2]; struct Audio { - bool enabled; - bool sync_video; - bool dtx; - bool use_real_adm; + bool enabled = false; + bool sync_video = false; + bool dtx = false; + bool use_real_adm = false; absl::optional ana_config; } audio; struct Screenshare { - bool enabled; - bool generate_slides; - int32_t slide_change_interval; - int32_t scroll_duration; + bool enabled = false; + bool generate_slides = false; + int32_t slide_change_interval = 10; + int32_t scroll_duration = 0; std::vector slides; } screenshare[2]; struct Analyzer { std::string test_label; - double avg_psnr_threshold; // (*) - double avg_ssim_threshold; // (*) - int test_durations_secs; + double avg_psnr_threshold = 0.0; // (*) + double avg_ssim_threshold = 0.0; // (*) + int test_durations_secs = 0; std::string graph_data_output_filename; std::string graph_title; } analyzer; @@ -95,14 +92,14 @@ class VideoQualityTestFixtureInterface { absl::optional config; struct SS { // Spatial scalability. std::vector streams; // If empty, one stream is assumed. - size_t selected_stream; - int num_spatial_layers; - int selected_sl; - InterLayerPredMode inter_layer_pred; + size_t selected_stream = 0; + int num_spatial_layers = 0; + int selected_sl = -1; + InterLayerPredMode inter_layer_pred = InterLayerPredMode::kOn; // If empty, bitrates are generated in VP9Impl automatically. std::vector spatial_layers; // If set, default parameters will be used instead of |streams|. - bool infer_streams; + bool infer_streams = false; } ss[2]; struct Logging { std::string rtc_event_log_name; diff --git a/api/test/videocodec_test_fixture.h b/api/test/videocodec_test_fixture.h index afb3f8a5e4..e0f804fe46 100644 --- a/api/test/videocodec_test_fixture.h +++ b/api/test/videocodec_test_fixture.h @@ -17,6 +17,7 @@ #include "api/test/videocodec_test_stats.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" +#include "media/base/h264_profile_level_id.h" #include "modules/video_coding/include/video_codec_interface.h" namespace webrtc { @@ -58,7 +59,7 @@ class VideoCodecTestFixture { class EncodedFrameChecker { public: virtual ~EncodedFrameChecker() = default; - virtual void CheckEncodedFrame(webrtc::VideoCodecType codec, + virtual void CheckEncodedFrame(VideoCodecType codec, const EncodedImage& encoded_frame) const = 0; }; @@ -87,6 +88,17 @@ class VideoCodecTestFixture { // Plain name of YUV file to process without file extension. std::string filename; + // Dimensions of test clip. Falls back to (codec_settings.width/height) if + // not set. + absl::optional clip_width; + absl::optional clip_height; + // Framerate of input clip. Defaults to 30fps if not set. + absl::optional clip_fps; + + // The resolution at which psnr/ssim comparisons should be made. Frames + // will be scaled to this size if different. + absl::optional reference_width; + absl::optional reference_height; // File to process. This must be a video file in the YUV format. std::string filepath; @@ -111,16 +123,16 @@ class VideoCodecTestFixture { bool encode_in_real_time = false; // Codec settings to use. - webrtc::VideoCodec codec_settings; + VideoCodec codec_settings; // Name of the codec being tested. std::string codec_name; // H.264 specific settings. struct H264CodecSettings { - H264::Profile profile = H264::kProfileConstrainedBaseline; + H264Profile profile = H264Profile::kProfileConstrainedBaseline; H264PacketizationMode packetization_mode = - webrtc::H264PacketizationMode::NonInterleaved; + H264PacketizationMode::NonInterleaved; } h264_codec_settings; // Custom checker that will be called for each frame. @@ -137,6 +149,9 @@ class VideoCodecTestFixture { bool save_encoded_ivf = false; bool save_decoded_y4m = false; } visualization_params; + + // Enables quality analysis for dropped frames. + bool analyze_quality_of_dropped_frames = false; }; virtual ~VideoCodecTestFixture() = default; diff --git a/api/test/videocodec_test_stats.cc b/api/test/videocodec_test_stats.cc index b2f88a4661..b973dc2d12 100644 --- a/api/test/videocodec_test_stats.cc +++ b/api/test/videocodec_test_stats.cc @@ -24,71 +24,91 @@ VideoCodecTestStats::FrameStatistics::FrameStatistics(size_t frame_number, std::string VideoCodecTestStats::FrameStatistics::ToString() const { rtc::StringBuilder ss; - ss << "frame_number " << frame_number; - ss << " decoded_width " << decoded_width; - ss << " decoded_height " << decoded_height; - ss << " spatial_idx " << spatial_idx; - ss << " temporal_idx " << temporal_idx; - ss << " inter_layer_predicted " << inter_layer_predicted; - ss << " non_ref_for_inter_layer_pred " << non_ref_for_inter_layer_pred; - ss << " frame_type " << static_cast(frame_type); - ss << " length_bytes " << length_bytes; - ss << " qp " << qp; - ss << " psnr " << psnr; - ss << " psnr_y " << psnr_y; - ss << " psnr_u " << psnr_u; - ss << " psnr_v " << psnr_v; - ss << " ssim " << ssim; - ss << " encode_time_us " << encode_time_us; - ss << " decode_time_us " << decode_time_us; - ss << " rtp_timestamp " << rtp_timestamp; - ss << " target_bitrate_kbps " << target_bitrate_kbps; - ss << " target_framerate_fps " << target_framerate_fps; + for (const auto& entry : ToMap()) { + if (ss.size() > 0) { + ss << " "; + } + ss << entry.first << " " << entry.second; + } return ss.Release(); } +std::map VideoCodecTestStats::FrameStatistics::ToMap() + const { + std::map map; + map["frame_number"] = std::to_string(frame_number); + map["decoded_width"] = std::to_string(decoded_width); + map["decoded_height"] = std::to_string(decoded_height); + map["spatial_idx"] = std::to_string(spatial_idx); + map["temporal_idx"] = std::to_string(temporal_idx); + map["inter_layer_predicted"] = std::to_string(inter_layer_predicted); + map["non_ref_for_inter_layer_pred"] = + std::to_string(non_ref_for_inter_layer_pred); + map["frame_type"] = std::to_string(static_cast(frame_type)); + map["length_bytes"] = std::to_string(length_bytes); + map["qp"] = std::to_string(qp); + map["psnr"] = std::to_string(psnr); + map["psnr_y"] = std::to_string(psnr_y); + map["psnr_u"] = std::to_string(psnr_u); + map["psnr_v"] = std::to_string(psnr_v); + map["ssim"] = std::to_string(ssim); + map["encode_time_us"] = std::to_string(encode_time_us); + map["decode_time_us"] = std::to_string(decode_time_us); + map["rtp_timestamp"] = std::to_string(rtp_timestamp); + map["target_bitrate_kbps"] = std::to_string(target_bitrate_kbps); + map["target_framerate_fps"] = std::to_string(target_framerate_fps); + return map; +} + std::string VideoCodecTestStats::VideoStatistics::ToString( std::string prefix) const { rtc::StringBuilder ss; - ss << prefix << "target_bitrate_kbps: " << target_bitrate_kbps; - ss << "\n" << prefix << "input_framerate_fps: " << input_framerate_fps; - ss << "\n" << prefix << "spatial_idx: " << spatial_idx; - ss << "\n" << prefix << "temporal_idx: " << temporal_idx; - ss << "\n" << prefix << "width: " << width; - ss << "\n" << prefix << "height: " << height; - ss << "\n" << prefix << "length_bytes: " << length_bytes; - ss << "\n" << prefix << "bitrate_kbps: " << bitrate_kbps; - ss << "\n" << prefix << "framerate_fps: " << framerate_fps; - ss << "\n" << prefix << "enc_speed_fps: " << enc_speed_fps; - ss << "\n" << prefix << "dec_speed_fps: " << dec_speed_fps; - ss << "\n" << prefix << "avg_delay_sec: " << avg_delay_sec; - ss << "\n" - << prefix << "max_key_frame_delay_sec: " << max_key_frame_delay_sec; - ss << "\n" - << prefix << "max_delta_frame_delay_sec: " << max_delta_frame_delay_sec; - ss << "\n" - << prefix << "time_to_reach_target_bitrate_sec: " - << time_to_reach_target_bitrate_sec; - ss << "\n" - << prefix << "avg_key_frame_size_bytes: " << avg_key_frame_size_bytes; - ss << "\n" - << prefix << "avg_delta_frame_size_bytes: " << avg_delta_frame_size_bytes; - ss << "\n" << prefix << "avg_qp: " << avg_qp; - ss << "\n" << prefix << "avg_psnr: " << avg_psnr; - ss << "\n" << prefix << "min_psnr: " << min_psnr; - ss << "\n" << prefix << "avg_ssim: " << avg_ssim; - ss << "\n" << prefix << "min_ssim: " << min_ssim; - ss << "\n" << prefix << "num_input_frames: " << num_input_frames; - ss << "\n" << prefix << "num_encoded_frames: " << num_encoded_frames; - ss << "\n" << prefix << "num_decoded_frames: " << num_decoded_frames; - ss << "\n" - << prefix - << "num_dropped_frames: " << num_input_frames - num_encoded_frames; - ss << "\n" << prefix << "num_key_frames: " << num_key_frames; - ss << "\n" << prefix << "num_spatial_resizes: " << num_spatial_resizes; - ss << "\n" << prefix << "max_nalu_size_bytes: " << max_nalu_size_bytes; + for (const auto& entry : ToMap()) { + if (ss.size() > 0) { + ss << "\n"; + } + ss << prefix << entry.first << ": " << entry.second; + } return ss.Release(); } +std::map VideoCodecTestStats::VideoStatistics::ToMap() + const { + std::map map; + map["target_bitrate_kbps"] = std::to_string(target_bitrate_kbps); + map["input_framerate_fps"] = std::to_string(input_framerate_fps); + map["spatial_idx"] = std::to_string(spatial_idx); + map["temporal_idx"] = std::to_string(temporal_idx); + map["width"] = std::to_string(width); + map["height"] = std::to_string(height); + map["length_bytes"] = std::to_string(length_bytes); + map["bitrate_kbps"] = std::to_string(bitrate_kbps); + map["framerate_fps"] = std::to_string(framerate_fps); + map["enc_speed_fps"] = std::to_string(enc_speed_fps); + map["dec_speed_fps"] = std::to_string(dec_speed_fps); + map["avg_delay_sec"] = std::to_string(avg_delay_sec); + map["max_key_frame_delay_sec"] = std::to_string(max_key_frame_delay_sec); + map["max_delta_frame_delay_sec"] = std::to_string(max_delta_frame_delay_sec); + map["time_to_reach_target_bitrate_sec"] = + std::to_string(time_to_reach_target_bitrate_sec); + map["avg_key_frame_size_bytes"] = std::to_string(avg_key_frame_size_bytes); + map["avg_delta_frame_size_bytes"] = + std::to_string(avg_delta_frame_size_bytes); + map["avg_qp"] = std::to_string(avg_qp); + map["avg_psnr"] = std::to_string(avg_psnr); + map["min_psnr"] = std::to_string(min_psnr); + map["avg_ssim"] = std::to_string(avg_ssim); + map["min_ssim"] = std::to_string(min_ssim); + map["num_input_frames"] = std::to_string(num_input_frames); + map["num_encoded_frames"] = std::to_string(num_encoded_frames); + map["num_decoded_frames"] = std::to_string(num_decoded_frames); + map["num_dropped_frames"] = + std::to_string(num_input_frames - num_encoded_frames); + map["num_key_frames"] = std::to_string(num_key_frames); + map["num_spatial_resizes"] = std::to_string(num_spatial_resizes); + map["max_nalu_size_bytes"] = std::to_string(max_nalu_size_bytes); + return map; +} + } // namespace test } // namespace webrtc diff --git a/api/test/videocodec_test_stats.h b/api/test/videocodec_test_stats.h index 63e15768dc..02a18a71d9 100644 --- a/api/test/videocodec_test_stats.h +++ b/api/test/videocodec_test_stats.h @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -33,6 +34,9 @@ class VideoCodecTestStats { std::string ToString() const; + // Returns name -> value text map of frame statistics. + std::map ToMap() const; + size_t frame_number = 0; size_t rtp_timestamp = 0; @@ -67,6 +71,7 @@ class VideoCodecTestStats { int qp = -1; // Quality. + bool quality_analysis_successful = false; float psnr_y = 0.0f; float psnr_u = 0.0f; float psnr_v = 0.0f; @@ -77,6 +82,9 @@ class VideoCodecTestStats { struct VideoStatistics { std::string ToString(std::string prefix) const; + // Returns name -> value text map of video statistics. + std::map ToMap() const; + size_t target_bitrate_kbps = 0; float input_framerate_fps = 0.0f; diff --git a/api/transport/BUILD.gn b/api/transport/BUILD.gn index 0f07301fe4..30955273b0 100644 --- a/api/transport/BUILD.gn +++ b/api/transport/BUILD.gn @@ -14,10 +14,8 @@ rtc_library("bitrate_settings") { "bitrate_settings.cc", "bitrate_settings.h", ] - deps = [ - "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", - ] + deps = [ "../../rtc_base/system:rtc_export" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("enums") { @@ -35,12 +33,13 @@ rtc_library("network_control") { deps = [ ":webrtc_key_value_config", - "../../rtc_base:deprecation", "../rtc_event_log", "../units:data_rate", "../units:data_size", "../units:time_delta", "../units:timestamp", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/types:optional", ] @@ -49,10 +48,8 @@ rtc_library("network_control") { rtc_source_set("webrtc_key_value_config") { visibility = [ "*" ] sources = [ "webrtc_key_value_config.h" ] - deps = [ - "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/strings", - ] + deps = [ "../../rtc_base/system:rtc_export" ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("field_trial_based_config") { @@ -64,26 +61,20 @@ rtc_library("field_trial_based_config") { deps = [ ":webrtc_key_value_config", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } +# TODO(nisse): Rename? rtc_source_set("datagram_transport_interface") { visibility = [ "*" ] - sources = [ - "congestion_control_interface.h", - "data_channel_transport_interface.h", - "datagram_transport_interface.h", - ] + sources = [ "data_channel_transport_interface.h" ] deps = [ - ":network_control", "..:array_view", "..:rtc_error", "../../rtc_base:rtc_base_approved", - "../units:data_rate", - "../units:timestamp", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("goog_cc") { @@ -97,8 +88,13 @@ rtc_library("goog_cc") { ":webrtc_key_value_config", "..:network_state_predictor_api", "../../modules/congestion_controller/goog_cc", - "../../rtc_base:deprecation", ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] +} + +rtc_source_set("sctp_transport_factory_interface") { + visibility = [ "*" ] + sources = [ "sctp_transport_factory_interface.h" ] } rtc_source_set("stun_types") { @@ -109,10 +105,14 @@ rtc_source_set("stun_types") { ] deps = [ + "../../api:array_view", "../../rtc_base:checks", + "../../rtc_base:ip_address", "../../rtc_base:rtc_base", "../../rtc_base:rtc_base_approved", + "../../rtc_base:socket_address", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } if (rtc_include_tests) { @@ -147,8 +147,8 @@ if (rtc_include_tests) { deps = [ ":stun_types", "../../rtc_base", - "../../rtc_base:macromagic", "../../rtc_base:rtc_base_approved", + "../../rtc_base:socket_address", "../../test:test_support", "//testing/gtest", ] diff --git a/api/transport/congestion_control_interface.h b/api/transport/congestion_control_interface.h deleted file mode 100644 index 40552cb4ff..0000000000 --- a/api/transport/congestion_control_interface.h +++ /dev/null @@ -1,75 +0,0 @@ -/* Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media and datagram transports. - -#ifndef API_TRANSPORT_CONGESTION_CONTROL_INTERFACE_H_ -#define API_TRANSPORT_CONGESTION_CONTROL_INTERFACE_H_ - -#include -#include -#include - -#include "api/transport/network_control.h" -#include "api/units/data_rate.h" - -namespace webrtc { - -// TODO(nisse): Defined together with MediaTransportInterface. But we should use -// types that aren't tied to media, so that MediaTransportInterface can depend -// on CongestionControlInterface, but not the other way around. -// api/transport/network_control.h may be a reasonable place. -class MediaTransportRttObserver; -struct MediaTransportAllocatedBitrateLimits; -struct MediaTransportTargetRateConstraints; - -// Defines congestion control feedback interface for media and datagram -// transports. -class CongestionControlInterface { - public: - virtual ~CongestionControlInterface() = default; - - // Updates allocation limits. - virtual void SetAllocatedBitrateLimits( - const MediaTransportAllocatedBitrateLimits& limits) = 0; - - // Sets starting rate. - virtual void SetTargetBitrateLimits( - const MediaTransportTargetRateConstraints& target_rate_constraints) = 0; - - // Intended for receive side. AddRttObserver registers an observer to be - // called for each RTT measurement, typically once per ACK. Before media - // transport is destructed the observer must be unregistered. - // - // TODO(sukhanov): Looks like AddRttObserver and RemoveRttObserver were - // never implemented for media transport, so keeping noop implementation. - virtual void AddRttObserver(MediaTransportRttObserver* observer) {} - virtual void RemoveRttObserver(MediaTransportRttObserver* observer) {} - - // Adds a target bitrate observer. Before media transport is destructed - // the observer must be unregistered (by calling - // RemoveTargetTransferRateObserver). - // A newly registered observer will be called back with the latest recorded - // target rate, if available. - virtual void AddTargetTransferRateObserver( - TargetTransferRateObserver* observer) = 0; - - // Removes an existing |observer| from observers. If observer was never - // registered, an error is logged and method does nothing. - virtual void RemoveTargetTransferRateObserver( - TargetTransferRateObserver* observer) = 0; - - // Returns the last known target transfer rate as reported to the above - // observers. - virtual absl::optional GetLatestTargetTransferRate() = 0; -}; - -} // namespace webrtc - -#endif // API_TRANSPORT_CONGESTION_CONTROL_INTERFACE_H_ diff --git a/api/transport/data_channel_transport_interface.h b/api/transport/data_channel_transport_interface.h index 671deffc6e..2b2f5d2e6d 100644 --- a/api/transport/data_channel_transport_interface.h +++ b/api/transport/data_channel_transport_interface.h @@ -35,8 +35,8 @@ enum class DataMessageType { // sent reliably and in-order, even if the data channel is configured for // unreliable delivery. struct SendDataParams { - SendDataParams(); - SendDataParams(const SendDataParams&); + SendDataParams() = default; + SendDataParams(const SendDataParams&) = default; DataMessageType type = DataMessageType::kText; @@ -47,15 +47,15 @@ struct SendDataParams { // If set, the maximum number of times this message may be // retransmitted by the transport before it is dropped. // Setting this value to zero disables retransmission. - // Must be non-negative. |max_rtx_count| and |max_rtx_ms| may not be set - // simultaneously. + // Valid values are in the range [0-UINT16_MAX]. + // |max_rtx_count| and |max_rtx_ms| may not be set simultaneously. absl::optional max_rtx_count; // If set, the maximum number of milliseconds for which the transport // may retransmit this message before it is dropped. // Setting this value to zero disables retransmission. - // Must be non-negative. |max_rtx_count| and |max_rtx_ms| may not be set - // simultaneously. + // Valid values are in the range [0-UINT16_MAX]. + // |max_rtx_count| and |max_rtx_ms| may not be set simultaneously. absl::optional max_rtx_ms; }; @@ -88,7 +88,7 @@ class DataChannelSink { // Callback issued when the data channel becomes unusable (closed). // TODO(https://crbug.com/webrtc/10360): Make pure virtual when all // consumers updated. - virtual void OnTransportClosed() {} + virtual void OnTransportClosed(RTCError error) {} }; // Transport for data channels. diff --git a/api/transport/datagram_transport_interface.h b/api/transport/datagram_transport_interface.h deleted file mode 100644 index 01736b978d..0000000000 --- a/api/transport/datagram_transport_interface.h +++ /dev/null @@ -1,151 +0,0 @@ -/* Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media and datagram transports. - -#ifndef API_TRANSPORT_DATAGRAM_TRANSPORT_INTERFACE_H_ -#define API_TRANSPORT_DATAGRAM_TRANSPORT_INTERFACE_H_ - -#include -#include -#include - -#include "absl/types/optional.h" -#include "api/array_view.h" -#include "api/rtc_error.h" -#include "api/transport/congestion_control_interface.h" -#include "api/transport/data_channel_transport_interface.h" -#include "api/units/data_rate.h" -#include "api/units/timestamp.h" - -namespace rtc { -class PacketTransportInternal; -} // namespace rtc - -namespace webrtc { - -class MediaTransportStateCallback; - -typedef int64_t DatagramId; - -struct DatagramAck { - // |datagram_id| is same as passed in - // DatagramTransportInterface::SendDatagram. - DatagramId datagram_id; - - // The timestamp at which the remote peer received the identified datagram, - // according to that peer's clock. - Timestamp receive_timestamp = Timestamp::MinusInfinity(); -}; - -// All sink methods are called on network thread. -class DatagramSinkInterface { - public: - virtual ~DatagramSinkInterface() {} - - // Called when new packet is received. - virtual void OnDatagramReceived(rtc::ArrayView data) = 0; - - // Called when datagram is actually sent (datragram can be delayed due - // to congestion control or fusing). |datagram_id| is same as passed in - // DatagramTransportInterface::SendDatagram. - virtual void OnDatagramSent(DatagramId datagram_id) = 0; - - // Called when datagram is ACKed. - virtual void OnDatagramAcked(const DatagramAck& datagram_ack) = 0; - - // Called when a datagram is lost. - virtual void OnDatagramLost(DatagramId datagram_id) = 0; -}; - -// Datagram transport allows to send and receive unreliable packets (datagrams) -// and receive feedback from congestion control (via -// CongestionControlInterface). The idea is to send RTP packets as datagrams and -// have underlying implementation of datagram transport to use QUIC datagram -// protocol. -class DatagramTransportInterface : public DataChannelTransportInterface { - public: - virtual ~DatagramTransportInterface() = default; - - // Connect the datagram transport to the ICE transport. - // The implementation must be able to ignore incoming packets that don't - // belong to it. - virtual void Connect(rtc::PacketTransportInternal* packet_transport) = 0; - - // Returns congestion control feedback interface or nullptr if datagram - // transport does not implement congestion control. - // - // Note that right now datagram transport is used without congestion control, - // but we plan to use it in the future. - virtual CongestionControlInterface* congestion_control() = 0; - - // Sets a state observer callback. Before datagram transport is destroyed, the - // callback must be unregistered by setting it to nullptr. - // A newly registered callback will be called with the current state. - // Datagram transport does not invoke this callback concurrently. - virtual void SetTransportStateCallback( - MediaTransportStateCallback* callback) = 0; - - // Start asynchronous send of datagram. The status returned by this method - // only pertains to the synchronous operations (e.g. serialization / - // packetization), not to the asynchronous operation. - // - // Datagrams larger than GetLargestDatagramSize() will fail and return error. - // - // Datagrams are sent in FIFO order. - // - // |datagram_id| is only used in ACK/LOST notifications in - // DatagramSinkInterface and does not need to be unique. - virtual RTCError SendDatagram(rtc::ArrayView data, - DatagramId datagram_id) = 0; - - // Returns maximum size of datagram message, does not change. - // TODO(sukhanov): Because value may be undefined before connection setup - // is complete, consider returning error when called before connection is - // established. Currently returns hardcoded const, because integration - // prototype may call before connection is established. - virtual size_t GetLargestDatagramSize() const = 0; - - // Sets packet sink. Sink must be unset by calling - // SetDataTransportSink(nullptr) before the data transport is destroyed or - // before new sink is set. - virtual void SetDatagramSink(DatagramSinkInterface* sink) = 0; - - // Retrieves transport parameters for this datagram transport. May be called - // on either client- or server-perspective transports. - // - // For servers, the parameters represent what kind of connections and data the - // server is prepared to accept. This is generally a superset of acceptable - // parameters. - // - // For clients, the parameters echo the server configuration used to create - // the client, possibly removing any fields or parameters which the client - // does not understand. - virtual std::string GetTransportParameters() const = 0; - - // Sets remote transport parameters. |remote_params| is a serialized string - // of opaque parameters, understood by the datagram transport implementation. - // Returns an error if |remote_params| are not compatible with this transport. - // - // TODO(mellem): Make pure virtual. The default implementation maintains - // original negotiation behavior (negotiation falls back to RTP if the - // remote datagram transport fails to echo exactly the local parameters). - virtual RTCError SetRemoteTransportParameters( - absl::string_view remote_params) { - if (remote_params == GetTransportParameters()) { - return RTCError::OK(); - } - return RTCError(RTCErrorType::UNSUPPORTED_PARAMETER, - "Local and remote transport parameters do not match"); - } -}; - -} // namespace webrtc - -#endif // API_TRANSPORT_DATAGRAM_TRANSPORT_INTERFACE_H_ diff --git a/api/transport/goog_cc_factory.h b/api/transport/goog_cc_factory.h index b14d6dcd78..e12755d745 100644 --- a/api/transport/goog_cc_factory.h +++ b/api/transport/goog_cc_factory.h @@ -12,9 +12,9 @@ #define API_TRANSPORT_GOOG_CC_FACTORY_H_ #include +#include "absl/base/attributes.h" #include "api/network_state_predictor.h" #include "api/transport/network_control.h" -#include "rtc_base/deprecation.h" namespace webrtc { class RtcEventLog; @@ -31,8 +31,8 @@ class GoogCcNetworkControllerFactory : public NetworkControllerFactoryInterface { public: GoogCcNetworkControllerFactory() = default; - explicit RTC_DEPRECATED GoogCcNetworkControllerFactory( - RtcEventLog* event_log); + ABSL_DEPRECATED("") + explicit GoogCcNetworkControllerFactory(RtcEventLog* event_log); explicit GoogCcNetworkControllerFactory( NetworkStatePredictorFactoryInterface* network_state_predictor_factory); @@ -49,7 +49,8 @@ class GoogCcNetworkControllerFactory // Deprecated, use GoogCcFactoryConfig to enable feedback only mode instead. // Factory to create packet feedback only GoogCC, this can be used for // connections providing packet receive time feedback but no other reports. -class RTC_DEPRECATED GoogCcFeedbackNetworkControllerFactory +class ABSL_DEPRECATED("use GoogCcFactoryConfig instead") + GoogCcFeedbackNetworkControllerFactory : public GoogCcNetworkControllerFactory { public: explicit GoogCcFeedbackNetworkControllerFactory(RtcEventLog* event_log); diff --git a/api/transport/media/BUILD.gn b/api/transport/media/BUILD.gn deleted file mode 100644 index 24a364c2e5..0000000000 --- a/api/transport/media/BUILD.gn +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. -# -# Use of this source code is governed by a BSD-style license -# that can be found in the LICENSE file in the root of the source -# tree. An additional intellectual property rights grant can be found -# in the file PATENTS. All contributing project authors may -# be found in the AUTHORS file in the root of the source tree. - -import("../../../webrtc.gni") - -rtc_library("media_transport_interface") { - visibility = [ "*" ] - sources = [ - "media_transport_config.cc", - "media_transport_config.h", - "media_transport_interface.cc", - "media_transport_interface.h", - ] - deps = [ - ":audio_interfaces", - ":video_interfaces", - "..:datagram_transport_interface", - "..:network_control", - "../..:array_view", - "../..:rtc_error", - "../../..:webrtc_common", - "../../../rtc_base", - "../../../rtc_base:checks", - "../../../rtc_base:rtc_base_approved", - "../../../rtc_base:stringutils", - "../../units:data_rate", - "//third_party/abseil-cpp/absl/types:optional", - ] -} - -rtc_library("audio_interfaces") { - visibility = [ "*" ] - sources = [ - "audio_transport.cc", - "audio_transport.h", - ] - deps = [ "../..:array_view" ] -} - -rtc_library("video_interfaces") { - visibility = [ "*" ] - sources = [ - "video_transport.cc", - "video_transport.h", - ] - deps = [ "../../video:encoded_image" ] -} diff --git a/api/transport/media/audio_transport.cc b/api/transport/media/audio_transport.cc deleted file mode 100644 index 0f5fe8bcf2..0000000000 --- a/api/transport/media/audio_transport.cc +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media transport. -// -// The goal is to refactor WebRTC code so that audio and video frames -// are sent / received through the media transport interface. This will -// enable different media transport implementations, including QUIC-based -// media transport. - -#include "api/transport/media/audio_transport.h" - -#include - -namespace webrtc { - -MediaTransportEncodedAudioFrame::~MediaTransportEncodedAudioFrame() {} - -MediaTransportEncodedAudioFrame::MediaTransportEncodedAudioFrame( - int sampling_rate_hz, - int starting_sample_index, - int samples_per_channel, - int sequence_number, - FrameType frame_type, - int payload_type, - std::vector encoded_data) - : sampling_rate_hz_(sampling_rate_hz), - starting_sample_index_(starting_sample_index), - samples_per_channel_(samples_per_channel), - sequence_number_(sequence_number), - frame_type_(frame_type), - payload_type_(payload_type), - encoded_data_(std::move(encoded_data)) {} - -MediaTransportEncodedAudioFrame& MediaTransportEncodedAudioFrame::operator=( - const MediaTransportEncodedAudioFrame&) = default; - -MediaTransportEncodedAudioFrame& MediaTransportEncodedAudioFrame::operator=( - MediaTransportEncodedAudioFrame&&) = default; - -MediaTransportEncodedAudioFrame::MediaTransportEncodedAudioFrame( - const MediaTransportEncodedAudioFrame&) = default; - -MediaTransportEncodedAudioFrame::MediaTransportEncodedAudioFrame( - MediaTransportEncodedAudioFrame&&) = default; - -} // namespace webrtc diff --git a/api/transport/media/audio_transport.h b/api/transport/media/audio_transport.h deleted file mode 100644 index dcbdcd7afe..0000000000 --- a/api/transport/media/audio_transport.h +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media transport. -// -// The goal is to refactor WebRTC code so that audio and video frames -// are sent / received through the media transport interface. This will -// enable different media transport implementations, including QUIC-based -// media transport. - -#ifndef API_TRANSPORT_MEDIA_AUDIO_TRANSPORT_H_ -#define API_TRANSPORT_MEDIA_AUDIO_TRANSPORT_H_ - -#include - -#include "api/array_view.h" - -namespace webrtc { - -// Represents encoded audio frame in any encoding (type of encoding is opaque). -// To avoid copying of encoded data use move semantics when passing by value. -class MediaTransportEncodedAudioFrame final { - public: - enum class FrameType { - // Normal audio frame (equivalent to webrtc::kAudioFrameSpeech). - kSpeech, - - // DTX frame (equivalent to webrtc::kAudioFrameCN). - kDiscontinuousTransmission, - // TODO(nisse): Mis-spelled version, update users, then delete. - kDiscountinuousTransmission = kDiscontinuousTransmission, - }; - - MediaTransportEncodedAudioFrame( - // Audio sampling rate, for example 48000. - int sampling_rate_hz, - - // Starting sample index of the frame, i.e. how many audio samples were - // before this frame since the beginning of the call or beginning of time - // in one channel (the starting point should not matter for NetEq). In - // WebRTC it is used as a timestamp of the frame. - // TODO(sukhanov): Starting_sample_index is currently adjusted on the - // receiver side in RTP path. Non-RTP implementations should preserve it. - // For NetEq initial offset should not matter so we should consider fixing - // RTP path. - int starting_sample_index, - - // Number of audio samples in audio frame in 1 channel. - int samples_per_channel, - - // Sequence number of the frame in the order sent, it is currently - // required by NetEq, but we can fix NetEq, because starting_sample_index - // should be enough. - int sequence_number, - - // If audio frame is a speech or discontinued transmission. - FrameType frame_type, - - // Opaque payload type. In RTP codepath payload type is stored in RTP - // header. In other implementations it should be simply passed through the - // wire -- it's needed for decoder. - int payload_type, - - // Vector with opaque encoded data. - std::vector encoded_data); - - ~MediaTransportEncodedAudioFrame(); - MediaTransportEncodedAudioFrame(const MediaTransportEncodedAudioFrame&); - MediaTransportEncodedAudioFrame& operator=( - const MediaTransportEncodedAudioFrame& other); - MediaTransportEncodedAudioFrame& operator=( - MediaTransportEncodedAudioFrame&& other); - MediaTransportEncodedAudioFrame(MediaTransportEncodedAudioFrame&&); - - // Getters. - int sampling_rate_hz() const { return sampling_rate_hz_; } - int starting_sample_index() const { return starting_sample_index_; } - int samples_per_channel() const { return samples_per_channel_; } - int sequence_number() const { return sequence_number_; } - - int payload_type() const { return payload_type_; } - FrameType frame_type() const { return frame_type_; } - - rtc::ArrayView encoded_data() const { return encoded_data_; } - - private: - int sampling_rate_hz_; - int starting_sample_index_; - int samples_per_channel_; - - // TODO(sukhanov): Refactor NetEq so we don't need sequence number. - // Having sample_index and samples_per_channel should be enough. - int sequence_number_; - - FrameType frame_type_; - - int payload_type_; - - std::vector encoded_data_; -}; - -// Interface for receiving encoded audio frames from MediaTransportInterface -// implementations. -class MediaTransportAudioSinkInterface { - public: - virtual ~MediaTransportAudioSinkInterface() = default; - - // Called when new encoded audio frame is received. - virtual void OnData(uint64_t channel_id, - MediaTransportEncodedAudioFrame frame) = 0; -}; - -} // namespace webrtc -#endif // API_TRANSPORT_MEDIA_AUDIO_TRANSPORT_H_ diff --git a/api/transport/media/media_transport_config.cc b/api/transport/media/media_transport_config.cc deleted file mode 100644 index b9b19cb6f0..0000000000 --- a/api/transport/media/media_transport_config.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "api/transport/media/media_transport_config.h" - -#include "rtc_base/checks.h" -#include "rtc_base/strings/string_builder.h" - -namespace webrtc { - -MediaTransportConfig::MediaTransportConfig(size_t rtp_max_packet_size) - : rtp_max_packet_size(rtp_max_packet_size) { - RTC_DCHECK_GT(rtp_max_packet_size, 0); -} - -std::string MediaTransportConfig::DebugString() const { - rtc::StringBuilder result; - result << "{rtp_max_packet_size: " << rtp_max_packet_size.value_or(0) << "}"; - return result.Release(); -} - -} // namespace webrtc diff --git a/api/transport/media/media_transport_config.h b/api/transport/media/media_transport_config.h deleted file mode 100644 index 7ef65453ae..0000000000 --- a/api/transport/media/media_transport_config.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef API_TRANSPORT_MEDIA_MEDIA_TRANSPORT_CONFIG_H_ -#define API_TRANSPORT_MEDIA_MEDIA_TRANSPORT_CONFIG_H_ - -#include -#include -#include - -#include "absl/types/optional.h" - -namespace webrtc { - -// Media transport config is made available to both transport and audio / video -// layers, but access to individual interfaces should not be open without -// necessity. -struct MediaTransportConfig { - // Default constructor for no-media transport scenarios. - MediaTransportConfig() = default; - - // Constructor for datagram transport scenarios. - explicit MediaTransportConfig(size_t rtp_max_packet_size); - - std::string DebugString() const; - - // If provided, limits RTP packet size (excludes ICE, IP or network overhead). - absl::optional rtp_max_packet_size; -}; - -} // namespace webrtc - -#endif // API_TRANSPORT_MEDIA_MEDIA_TRANSPORT_CONFIG_H_ diff --git a/api/transport/media/media_transport_interface.cc b/api/transport/media/media_transport_interface.cc deleted file mode 100644 index 323ddca689..0000000000 --- a/api/transport/media/media_transport_interface.cc +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media transport. -// -// The goal is to refactor WebRTC code so that audio and video frames -// are sent / received through the media transport interface. This will -// enable different media transport implementations, including QUIC-based -// media transport. - -#include "api/transport/media/media_transport_interface.h" - -#include -#include - -#include "api/transport/datagram_transport_interface.h" - -namespace webrtc { - -MediaTransportSettings::MediaTransportSettings() = default; -MediaTransportSettings::MediaTransportSettings(const MediaTransportSettings&) = - default; -MediaTransportSettings& MediaTransportSettings::operator=( - const MediaTransportSettings&) = default; -MediaTransportSettings::~MediaTransportSettings() = default; - -SendDataParams::SendDataParams() = default; -SendDataParams::SendDataParams(const SendDataParams&) = default; - -RTCErrorOr> -MediaTransportFactory::CreateMediaTransport( - rtc::PacketTransportInternal* packet_transport, - rtc::Thread* network_thread, - const MediaTransportSettings& settings) { - return std::unique_ptr(nullptr); -} - -RTCErrorOr> -MediaTransportFactory::CreateMediaTransport( - rtc::Thread* network_thread, - const MediaTransportSettings& settings) { - return std::unique_ptr(nullptr); -} - -RTCErrorOr> -MediaTransportFactory::CreateDatagramTransport( - rtc::Thread* network_thread, - const MediaTransportSettings& settings) { - return std::unique_ptr(nullptr); -} - -std::string MediaTransportFactory::GetTransportName() const { - return ""; -} - -MediaTransportInterface::MediaTransportInterface() = default; -MediaTransportInterface::~MediaTransportInterface() = default; - -absl::optional -MediaTransportInterface::GetTransportParametersOffer() const { - return absl::nullopt; -} - -void MediaTransportInterface::Connect( - rtc::PacketTransportInternal* packet_transport) {} - -void MediaTransportInterface::SetKeyFrameRequestCallback( - MediaTransportKeyFrameRequestCallback* callback) {} - -absl::optional -MediaTransportInterface::GetLatestTargetTransferRate() { - return absl::nullopt; -} - -void MediaTransportInterface::AddNetworkChangeCallback( - MediaTransportNetworkChangeCallback* callback) {} - -void MediaTransportInterface::RemoveNetworkChangeCallback( - MediaTransportNetworkChangeCallback* callback) {} - -void MediaTransportInterface::SetFirstAudioPacketReceivedObserver( - AudioPacketReceivedObserver* observer) {} - -void MediaTransportInterface::AddTargetTransferRateObserver( - TargetTransferRateObserver* observer) {} -void MediaTransportInterface::RemoveTargetTransferRateObserver( - TargetTransferRateObserver* observer) {} - -void MediaTransportInterface::AddRttObserver( - MediaTransportRttObserver* observer) {} -void MediaTransportInterface::RemoveRttObserver( - MediaTransportRttObserver* observer) {} - -size_t MediaTransportInterface::GetAudioPacketOverhead() const { - return 0; -} - -void MediaTransportInterface::SetAllocatedBitrateLimits( - const MediaTransportAllocatedBitrateLimits& limits) {} - -} // namespace webrtc diff --git a/api/transport/media/media_transport_interface.h b/api/transport/media/media_transport_interface.h deleted file mode 100644 index dbe68d344b..0000000000 --- a/api/transport/media/media_transport_interface.h +++ /dev/null @@ -1,320 +0,0 @@ -/* Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media transport. -// -// The goal is to refactor WebRTC code so that audio and video frames -// are sent / received through the media transport interface. This will -// enable different media transport implementations, including QUIC-based -// media transport. - -#ifndef API_TRANSPORT_MEDIA_MEDIA_TRANSPORT_INTERFACE_H_ -#define API_TRANSPORT_MEDIA_MEDIA_TRANSPORT_INTERFACE_H_ - -#include -#include -#include - -#include "absl/types/optional.h" -#include "api/array_view.h" -#include "api/rtc_error.h" -#include "api/transport/data_channel_transport_interface.h" -#include "api/transport/media/audio_transport.h" -#include "api/transport/media/video_transport.h" -#include "api/transport/network_control.h" -#include "api/units/data_rate.h" -#include "rtc_base/copy_on_write_buffer.h" -#include "rtc_base/network_route.h" - -namespace rtc { -class PacketTransportInternal; -class Thread; -} // namespace rtc - -namespace webrtc { - -class DatagramTransportInterface; -class RtcEventLog; - -class AudioPacketReceivedObserver { - public: - virtual ~AudioPacketReceivedObserver() = default; - - // Invoked for the first received audio packet on a given channel id. - // It will be invoked once for each channel id. - virtual void OnFirstAudioPacketReceived(int64_t channel_id) = 0; -}; - -// Used to configure stream allocations. -struct MediaTransportAllocatedBitrateLimits { - DataRate min_pacing_rate = DataRate::Zero(); - DataRate max_padding_bitrate = DataRate::Zero(); - DataRate max_total_allocated_bitrate = DataRate::Zero(); -}; - -// Used to configure target bitrate constraints. -// If the value is provided, the constraint is updated. -// If the value is omitted, the value is left unchanged. -struct MediaTransportTargetRateConstraints { - absl::optional min_bitrate; - absl::optional max_bitrate; - absl::optional starting_bitrate; -}; - -// A collection of settings for creation of media transport. -struct MediaTransportSettings final { - MediaTransportSettings(); - MediaTransportSettings(const MediaTransportSettings&); - MediaTransportSettings& operator=(const MediaTransportSettings&); - ~MediaTransportSettings(); - - // Group calls are not currently supported, in 1:1 call one side must set - // is_caller = true and another is_caller = false. - bool is_caller; - - // Must be set if a pre-shared key is used for the call. - // TODO(bugs.webrtc.org/9944): This should become zero buffer in the distant - // future. - absl::optional pre_shared_key; - - // If present, this is a config passed from the caller to the answerer in the - // offer. Each media transport knows how to understand its own parameters. - absl::optional remote_transport_parameters; - - // If present, provides the event log that media transport should use. - // Media transport does not own it. The lifetime of |event_log| will exceed - // the lifetime of the instance of MediaTransportInterface instance. - RtcEventLog* event_log = nullptr; -}; - -// Callback to notify about network route changes. -class MediaTransportNetworkChangeCallback { - public: - virtual ~MediaTransportNetworkChangeCallback() = default; - - // Called when the network route is changed, with the new network route. - virtual void OnNetworkRouteChanged( - const rtc::NetworkRoute& new_network_route) = 0; -}; - -// State of the media transport. Media transport begins in the pending state. -// It transitions to writable when it is ready to send media. It may transition -// back to pending if the connection is blocked. It may transition to closed at -// any time. Closed is terminal: a transport will never re-open once closed. -enum class MediaTransportState { - kPending, - kWritable, - kClosed, -}; - -// Callback invoked whenever the state of the media transport changes. -class MediaTransportStateCallback { - public: - virtual ~MediaTransportStateCallback() = default; - - // Invoked whenever the state of the media transport changes. - virtual void OnStateChanged(MediaTransportState state) = 0; -}; - -// Callback for RTT measurements on the receive side. -// TODO(nisse): Related interfaces: CallStatsObserver and RtcpRttStats. It's -// somewhat unclear what type of measurement is needed. It's used to configure -// NACK generation and playout buffer. Either raw measurement values or recent -// maximum would make sense for this use. Need consolidation of RTT signalling. -class MediaTransportRttObserver { - public: - virtual ~MediaTransportRttObserver() = default; - - // Invoked when a new RTT measurement is available, typically once per ACK. - virtual void OnRttUpdated(int64_t rtt_ms) = 0; -}; - -// Media transport interface for sending / receiving encoded audio/video frames -// and receiving bandwidth estimate update from congestion control. -class MediaTransportInterface : public DataChannelTransportInterface { - public: - MediaTransportInterface(); - virtual ~MediaTransportInterface(); - - // Retrieves callers config (i.e. media transport offer) that should be passed - // to the callee, before the call is connected. Such config is opaque to SDP - // (sdp just passes it through). The config is a binary blob, so SDP may - // choose to use base64 to serialize it (or any other approach that guarantees - // that the binary blob goes through). This should only be called for the - // caller's perspective. - // - // This may return an unset optional, which means that the given media - // transport is not supported / disabled and shouldn't be reported in SDP. - // - // It may also return an empty string, in which case the media transport is - // supported, but without any extra settings. - // TODO(psla): Make abstract. - virtual absl::optional GetTransportParametersOffer() const; - - // Connect the media transport to the ICE transport. - // The implementation must be able to ignore incoming packets that don't - // belong to it. - // TODO(psla): Make abstract. - virtual void Connect(rtc::PacketTransportInternal* packet_transport); - - // Start asynchronous send of audio frame. The status returned by this method - // only pertains to the synchronous operations (e.g. - // serialization/packetization), not to the asynchronous operation. - - virtual RTCError SendAudioFrame(uint64_t channel_id, - MediaTransportEncodedAudioFrame frame) = 0; - - // Start asynchronous send of video frame. The status returned by this method - // only pertains to the synchronous operations (e.g. - // serialization/packetization), not to the asynchronous operation. - virtual RTCError SendVideoFrame( - uint64_t channel_id, - const MediaTransportEncodedVideoFrame& frame) = 0; - - // Used by video sender to be notified on key frame requests. - virtual void SetKeyFrameRequestCallback( - MediaTransportKeyFrameRequestCallback* callback); - - // Requests a keyframe for the particular channel (stream). The caller should - // check that the keyframe is not present in a jitter buffer already (i.e. - // don't request a keyframe if there is one that you will get from the jitter - // buffer in a moment). - virtual RTCError RequestKeyFrame(uint64_t channel_id) = 0; - - // Sets audio sink. Sink must be unset by calling SetReceiveAudioSink(nullptr) - // before the media transport is destroyed or before new sink is set. - virtual void SetReceiveAudioSink(MediaTransportAudioSinkInterface* sink) = 0; - - // Registers a video sink. Before destruction of media transport, you must - // pass a nullptr. - virtual void SetReceiveVideoSink(MediaTransportVideoSinkInterface* sink) = 0; - - // Adds a target bitrate observer. Before media transport is destructed - // the observer must be unregistered (by calling - // RemoveTargetTransferRateObserver). - // A newly registered observer will be called back with the latest recorded - // target rate, if available. - virtual void AddTargetTransferRateObserver( - TargetTransferRateObserver* observer); - - // Removes an existing |observer| from observers. If observer was never - // registered, an error is logged and method does nothing. - virtual void RemoveTargetTransferRateObserver( - TargetTransferRateObserver* observer); - - // Sets audio packets observer, which gets informed about incoming audio - // packets. Before destruction, the observer must be unregistered by setting - // nullptr. - // - // This method may be temporary, when the multiplexer is implemented (or - // multiplexer may use it to demultiplex channel ids). - virtual void SetFirstAudioPacketReceivedObserver( - AudioPacketReceivedObserver* observer); - - // Intended for receive side. AddRttObserver registers an observer to be - // called for each RTT measurement, typically once per ACK. Before media - // transport is destructed the observer must be unregistered. - virtual void AddRttObserver(MediaTransportRttObserver* observer); - virtual void RemoveRttObserver(MediaTransportRttObserver* observer); - - // Returns the last known target transfer rate as reported to the above - // observers. - virtual absl::optional GetLatestTargetTransferRate(); - - // Gets the audio packet overhead in bytes. Returned overhead does not include - // transport overhead (ipv4/6, turn channeldata, tcp/udp, etc.). - // If the transport is capable of fusing packets together, this overhead - // might not be a very accurate number. - // TODO(nisse): Deprecated. - virtual size_t GetAudioPacketOverhead() const; - - // Corresponding observers for audio and video overhead. Before destruction, - // the observers must be unregistered by setting nullptr. - - // Registers an observer for network change events. If the network route is - // already established when the callback is added, |callback| will be called - // immediately with the current network route. Before media transport is - // destroyed, the callback must be removed. - virtual void AddNetworkChangeCallback( - MediaTransportNetworkChangeCallback* callback); - virtual void RemoveNetworkChangeCallback( - MediaTransportNetworkChangeCallback* callback); - - // Sets a state observer callback. Before media transport is destroyed, the - // callback must be unregistered by setting it to nullptr. - // A newly registered callback will be called with the current state. - // Media transport does not invoke this callback concurrently. - virtual void SetMediaTransportStateCallback( - MediaTransportStateCallback* callback) = 0; - - // Updates allocation limits. - // TODO(psla): Make abstract when downstream implementation implement it. - virtual void SetAllocatedBitrateLimits( - const MediaTransportAllocatedBitrateLimits& limits); - - // Sets starting rate. - // TODO(psla): Make abstract when downstream implementation implement it. - virtual void SetTargetBitrateLimits( - const MediaTransportTargetRateConstraints& target_rate_constraints) {} - - // TODO(sukhanov): RtcEventLogs. -}; - -// If media transport factory is set in peer connection factory, it will be -// used to create media transport for sending/receiving encoded frames and -// this transport will be used instead of default RTP/SRTP transport. -// -// Currently Media Transport negotiation is not supported in SDP. -// If application is using media transport, it must negotiate it before -// setting media transport factory in peer connection. -class MediaTransportFactory { - public: - virtual ~MediaTransportFactory() = default; - - // Creates media transport. - // - Does not take ownership of packet_transport or network_thread. - // - Does not support group calls, in 1:1 call one side must set - // is_caller = true and another is_caller = false. - virtual RTCErrorOr> - CreateMediaTransport(rtc::PacketTransportInternal* packet_transport, - rtc::Thread* network_thread, - const MediaTransportSettings& settings); - - // Creates a new Media Transport in a disconnected state. If the media - // transport for the caller is created, one can then call - // MediaTransportInterface::GetTransportParametersOffer on that new instance. - // TODO(psla): Make abstract. - virtual RTCErrorOr> - CreateMediaTransport(rtc::Thread* network_thread, - const MediaTransportSettings& settings); - - // Creates a new Datagram Transport in a disconnected state. If the datagram - // transport for the caller is created, one can then call - // DatagramTransportInterface::GetTransportParametersOffer on that new - // instance. - // - // TODO(sukhanov): Consider separating media and datagram transport factories. - // TODO(sukhanov): Move factory to a separate .h file. - virtual RTCErrorOr> - CreateDatagramTransport(rtc::Thread* network_thread, - const MediaTransportSettings& settings); - - // Gets a transport name which is supported by the implementation. - // Different factories should return different transport names, and at runtime - // it will be checked that different names were used. - // For example, "rtp" or "generic" may be returned by two different - // implementations. - // The value returned by this method must never change in the lifetime of the - // factory. - // TODO(psla): Make abstract. - virtual std::string GetTransportName() const; -}; - -} // namespace webrtc -#endif // API_TRANSPORT_MEDIA_MEDIA_TRANSPORT_INTERFACE_H_ diff --git a/api/transport/media/video_transport.cc b/api/transport/media/video_transport.cc deleted file mode 100644 index a6f5304048..0000000000 --- a/api/transport/media/video_transport.cc +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media transport. -// -// The goal is to refactor WebRTC code so that audio and video frames -// are sent / received through the media transport interface. This will -// enable different media transport implementations, including QUIC-based -// media transport. - -#include "api/transport/media/video_transport.h" - -#include - -namespace webrtc { - -MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame() = default; - -MediaTransportEncodedVideoFrame::~MediaTransportEncodedVideoFrame() = default; - -MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame( - int64_t frame_id, - std::vector referenced_frame_ids, - int payload_type, - const webrtc::EncodedImage& encoded_image) - : payload_type_(payload_type), - encoded_image_(encoded_image), - frame_id_(frame_id), - referenced_frame_ids_(std::move(referenced_frame_ids)) {} - -MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=( - const MediaTransportEncodedVideoFrame&) = default; - -MediaTransportEncodedVideoFrame& MediaTransportEncodedVideoFrame::operator=( - MediaTransportEncodedVideoFrame&&) = default; - -MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame( - const MediaTransportEncodedVideoFrame& o) - : MediaTransportEncodedVideoFrame() { - *this = o; -} - -MediaTransportEncodedVideoFrame::MediaTransportEncodedVideoFrame( - MediaTransportEncodedVideoFrame&& o) - : MediaTransportEncodedVideoFrame() { - *this = std::move(o); -} - -} // namespace webrtc diff --git a/api/transport/media/video_transport.h b/api/transport/media/video_transport.h deleted file mode 100644 index affd2e0d38..0000000000 --- a/api/transport/media/video_transport.h +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// This is EXPERIMENTAL interface for media transport. -// -// The goal is to refactor WebRTC code so that audio and video frames -// are sent / received through the media transport interface. This will -// enable different media transport implementations, including QUIC-based -// media transport. - -#ifndef API_TRANSPORT_MEDIA_VIDEO_TRANSPORT_H_ -#define API_TRANSPORT_MEDIA_VIDEO_TRANSPORT_H_ - -#include - -#include "api/video/encoded_image.h" - -namespace webrtc { - -// Represents encoded video frame, along with the codec information. -class MediaTransportEncodedVideoFrame final { - public: - MediaTransportEncodedVideoFrame(int64_t frame_id, - std::vector referenced_frame_ids, - int payload_type, - const webrtc::EncodedImage& encoded_image); - ~MediaTransportEncodedVideoFrame(); - MediaTransportEncodedVideoFrame(const MediaTransportEncodedVideoFrame&); - MediaTransportEncodedVideoFrame& operator=( - const MediaTransportEncodedVideoFrame& other); - MediaTransportEncodedVideoFrame& operator=( - MediaTransportEncodedVideoFrame&& other); - MediaTransportEncodedVideoFrame(MediaTransportEncodedVideoFrame&&); - - int payload_type() const { return payload_type_; } - const webrtc::EncodedImage& encoded_image() const { return encoded_image_; } - - int64_t frame_id() const { return frame_id_; } - const std::vector& referenced_frame_ids() const { - return referenced_frame_ids_; - } - - // Hack to workaround lack of ownership of the EncodedImage buffer. If we - // don't already own the underlying data, make a copy. - void Retain() { encoded_image_.Retain(); } - - private: - MediaTransportEncodedVideoFrame(); - - int payload_type_; - - // The buffer is not always owned by the encoded image. On the sender it means - // that it will need to make a copy using the Retain() method, if it wants to - // deliver it asynchronously. - webrtc::EncodedImage encoded_image_; - - // Frame id uniquely identifies a frame in a stream. It needs to be unique in - // a given time window (i.e. technically unique identifier for the lifetime of - // the connection is not needed, but you need to guarantee that remote side - // got rid of the previous frame_id if you plan to reuse it). - // - // It is required by a remote jitter buffer, and is the same as - // EncodedFrame::id::picture_id. - // - // This data must be opaque to the media transport, and media transport should - // itself not make any assumptions about what it is and its uniqueness. - int64_t frame_id_; - - // A single frame might depend on other frames. This is set of identifiers on - // which the current frame depends. - std::vector referenced_frame_ids_; -}; - -// Interface for receiving encoded video frames from MediaTransportInterface -// implementations. -class MediaTransportVideoSinkInterface { - public: - virtual ~MediaTransportVideoSinkInterface() = default; - - // Called when new encoded video frame is received. - virtual void OnData(uint64_t channel_id, - MediaTransportEncodedVideoFrame frame) = 0; -}; - -// Interface for video sender to be notified of received key frame request. -class MediaTransportKeyFrameRequestCallback { - public: - virtual ~MediaTransportKeyFrameRequestCallback() = default; - - // Called when a key frame request is received on the transport. - virtual void OnKeyFrameRequested(uint64_t channel_id) = 0; -}; - -} // namespace webrtc -#endif // API_TRANSPORT_MEDIA_VIDEO_TRANSPORT_H_ diff --git a/api/transport/network_control.h b/api/transport/network_control.h index 6fc1f7c0d1..c2b005e713 100644 --- a/api/transport/network_control.h +++ b/api/transport/network_control.h @@ -61,42 +61,42 @@ class NetworkControllerInterface { virtual ~NetworkControllerInterface() = default; // Called when network availabilty changes. - virtual NetworkControlUpdate OnNetworkAvailability(NetworkAvailability) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnNetworkAvailability( + NetworkAvailability) = 0; // Called when the receiving or sending endpoint changes address. - virtual NetworkControlUpdate OnNetworkRouteChange(NetworkRouteChange) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnNetworkRouteChange( + NetworkRouteChange) = 0; // Called periodically with a periodicy as specified by // NetworkControllerFactoryInterface::GetProcessInterval. - virtual NetworkControlUpdate OnProcessInterval(ProcessInterval) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnProcessInterval( + ProcessInterval) = 0; // Called when remotely calculated bitrate is received. - virtual NetworkControlUpdate OnRemoteBitrateReport(RemoteBitrateReport) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnRemoteBitrateReport( + RemoteBitrateReport) = 0; // Called round trip time has been calculated by protocol specific mechanisms. - virtual NetworkControlUpdate OnRoundTripTimeUpdate(RoundTripTimeUpdate) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnRoundTripTimeUpdate( + RoundTripTimeUpdate) = 0; // Called when a packet is sent on the network. - virtual NetworkControlUpdate OnSentPacket(SentPacket) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnSentPacket( + SentPacket) = 0; // Called when a packet is received from the remote client. - virtual NetworkControlUpdate OnReceivedPacket(ReceivedPacket) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnReceivedPacket( + ReceivedPacket) = 0; // Called when the stream specific configuration has been updated. - virtual NetworkControlUpdate OnStreamsConfig(StreamsConfig) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnStreamsConfig( + StreamsConfig) = 0; // Called when target transfer rate constraints has been changed. - virtual NetworkControlUpdate OnTargetRateConstraints(TargetRateConstraints) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnTargetRateConstraints( + TargetRateConstraints) = 0; // Called when a protocol specific calculation of packet loss has been made. - virtual NetworkControlUpdate OnTransportLossReport(TransportLossReport) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnTransportLossReport( + TransportLossReport) = 0; // Called with per packet feedback regarding receive time. - virtual NetworkControlUpdate OnTransportPacketsFeedback( - TransportPacketsFeedback) ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnTransportPacketsFeedback( + TransportPacketsFeedback) = 0; // Called with network state estimate updates. - virtual NetworkControlUpdate OnNetworkStateEstimate(NetworkStateEstimate) - ABSL_MUST_USE_RESULT = 0; + ABSL_MUST_USE_RESULT virtual NetworkControlUpdate OnNetworkStateEstimate( + NetworkStateEstimate) = 0; }; // NetworkControllerFactoryInterface is an interface for creating a network diff --git a/api/transport/network_types.cc b/api/transport/network_types.cc index 88b67b3a47..7451940151 100644 --- a/api/transport/network_types.cc +++ b/api/transport/network_types.cc @@ -48,7 +48,7 @@ std::vector TransportPacketsFeedback::ReceivedWithSendInfo() const { std::vector res; for (const PacketResult& fb : packet_feedbacks) { - if (fb.receive_time.IsFinite()) { + if (fb.IsReceived()) { res.push_back(fb); } } @@ -58,7 +58,7 @@ std::vector TransportPacketsFeedback::ReceivedWithSendInfo() std::vector TransportPacketsFeedback::LostWithSendInfo() const { std::vector res; for (const PacketResult& fb : packet_feedbacks) { - if (fb.receive_time.IsPlusInfinity()) { + if (!fb.IsReceived()) { res.push_back(fb); } } @@ -74,7 +74,7 @@ std::vector TransportPacketsFeedback::SortedByReceiveTime() const { std::vector res; for (const PacketResult& fb : packet_feedbacks) { - if (fb.receive_time.IsFinite()) { + if (fb.IsReceived()) { res.push_back(fb); } } diff --git a/api/transport/network_types.h b/api/transport/network_types.h index 10fc0beedf..4e96b0f12e 100644 --- a/api/transport/network_types.h +++ b/api/transport/network_types.h @@ -19,7 +19,6 @@ #include "api/units/data_size.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" -#include "rtc_base/deprecation.h" namespace webrtc { @@ -159,6 +158,8 @@ struct PacketResult { PacketResult(const PacketResult&); ~PacketResult(); + inline bool IsReceived() const { return !receive_time.IsPlusInfinity(); } + SentPacket sent_packet; Timestamp receive_time = Timestamp::PlusInfinity(); }; diff --git a/api/transport/rtp/BUILD.gn b/api/transport/rtp/BUILD.gn index b0849502c8..7b01169360 100644 --- a/api/transport/rtp/BUILD.gn +++ b/api/transport/rtp/BUILD.gn @@ -14,15 +14,20 @@ rtc_source_set("rtp_source") { deps = [ "../../../api:rtp_headers", "../../../rtc_base:checks", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("dependency_descriptor") { visibility = [ "*" ] - sources = [ "dependency_descriptor.h" ] - deps = [ + sources = [ + "dependency_descriptor.cc", + "dependency_descriptor.h", + ] + deps = [ "../../../rtc_base:checks" ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", + "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] } diff --git a/api/transport/rtp/dependency_descriptor.cc b/api/transport/rtp/dependency_descriptor.cc new file mode 100644 index 0000000000..2a9b6d9a71 --- /dev/null +++ b/api/transport/rtp/dependency_descriptor.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/transport/rtp/dependency_descriptor.h" + +#include "absl/container/inlined_vector.h" +#include "absl/strings/string_view.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +constexpr int DependencyDescriptor::kMaxSpatialIds; +constexpr int DependencyDescriptor::kMaxTemporalIds; +constexpr int DependencyDescriptor::kMaxTemplates; +constexpr int DependencyDescriptor::kMaxDecodeTargets; + +namespace webrtc_impl { + +absl::InlinedVector StringToDecodeTargetIndications( + absl::string_view symbols) { + absl::InlinedVector dtis; + dtis.reserve(symbols.size()); + for (char symbol : symbols) { + DecodeTargetIndication indication; + switch (symbol) { + case '-': + indication = DecodeTargetIndication::kNotPresent; + break; + case 'D': + indication = DecodeTargetIndication::kDiscardable; + break; + case 'R': + indication = DecodeTargetIndication::kRequired; + break; + case 'S': + indication = DecodeTargetIndication::kSwitch; + break; + default: + RTC_NOTREACHED(); + } + dtis.push_back(indication); + } + return dtis; +} + +} // namespace webrtc_impl +} // namespace webrtc diff --git a/api/transport/rtp/dependency_descriptor.h b/api/transport/rtp/dependency_descriptor.h index a488f56dfd..6967c83517 100644 --- a/api/transport/rtp/dependency_descriptor.h +++ b/api/transport/rtp/dependency_descriptor.h @@ -13,10 +13,12 @@ #include +#include #include #include #include "absl/container/inlined_vector.h" +#include "absl/strings/string_view.h" #include "absl/types/optional.h" namespace webrtc { @@ -52,6 +54,13 @@ enum class DecodeTargetIndication { }; struct FrameDependencyTemplate { + // Setters are named briefly to chain them when building the template. + FrameDependencyTemplate& S(int spatial_layer); + FrameDependencyTemplate& T(int temporal_layer); + FrameDependencyTemplate& Dtis(absl::string_view dtis); + FrameDependencyTemplate& FrameDiffs(std::initializer_list diffs); + FrameDependencyTemplate& ChainDiffs(std::initializer_list diffs); + friend bool operator==(const FrameDependencyTemplate& lhs, const FrameDependencyTemplate& rhs) { return lhs.spatial_id == rhs.spatial_id && @@ -82,14 +91,18 @@ struct FrameDependencyStructure { int num_decode_targets = 0; int num_chains = 0; // If chains are used (num_chains > 0), maps decode target index into index of - // the chain protecting that target or |num_chains| value if decode target is - // not protected by a chain. + // the chain protecting that target. absl::InlinedVector decode_target_protected_by_chain; absl::InlinedVector resolutions; std::vector templates; }; struct DependencyDescriptor { + static constexpr int kMaxSpatialIds = 4; + static constexpr int kMaxTemporalIds = 8; + static constexpr int kMaxDecodeTargets = 32; + static constexpr int kMaxTemplates = 64; + bool first_packet_in_frame = true; bool last_packet_in_frame = true; int frame_number = 0; @@ -99,6 +112,37 @@ struct DependencyDescriptor { std::unique_ptr attached_structure; }; +// Below are implementation details. +namespace webrtc_impl { +absl::InlinedVector StringToDecodeTargetIndications( + absl::string_view indication_symbols); +} // namespace webrtc_impl + +inline FrameDependencyTemplate& FrameDependencyTemplate::S(int spatial_layer) { + this->spatial_id = spatial_layer; + return *this; +} +inline FrameDependencyTemplate& FrameDependencyTemplate::T(int temporal_layer) { + this->temporal_id = temporal_layer; + return *this; +} +inline FrameDependencyTemplate& FrameDependencyTemplate::Dtis( + absl::string_view dtis) { + this->decode_target_indications = + webrtc_impl::StringToDecodeTargetIndications(dtis); + return *this; +} +inline FrameDependencyTemplate& FrameDependencyTemplate::FrameDiffs( + std::initializer_list diffs) { + this->frame_diffs.assign(diffs.begin(), diffs.end()); + return *this; +} +inline FrameDependencyTemplate& FrameDependencyTemplate::ChainDiffs( + std::initializer_list diffs) { + this->chain_diffs.assign(diffs.begin(), diffs.end()); + return *this; +} + } // namespace webrtc #endif // API_TRANSPORT_RTP_DEPENDENCY_DESCRIPTOR_H_ diff --git a/api/transport/sctp_transport_factory_interface.h b/api/transport/sctp_transport_factory_interface.h new file mode 100644 index 0000000000..912be3a374 --- /dev/null +++ b/api/transport/sctp_transport_factory_interface.h @@ -0,0 +1,42 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_TRANSPORT_SCTP_TRANSPORT_FACTORY_INTERFACE_H_ +#define API_TRANSPORT_SCTP_TRANSPORT_FACTORY_INTERFACE_H_ + +#include + +// These classes are not part of the API, and are treated as opaque pointers. +namespace cricket { +class SctpTransportInternal; +} // namespace cricket + +namespace rtc { +class PacketTransportInternal; +} // namespace rtc + +namespace webrtc { + +// Factory class which can be used to allow fake SctpTransports to be injected +// for testing. An application is not intended to implement this interface nor +// 'cricket::SctpTransportInternal' because SctpTransportInternal is not +// guaranteed to remain stable in future WebRTC versions. +class SctpTransportFactoryInterface { + public: + virtual ~SctpTransportFactoryInterface() = default; + + // Create an SCTP transport using |channel| for the underlying transport. + virtual std::unique_ptr CreateSctpTransport( + rtc::PacketTransportInternal* channel) = 0; +}; + +} // namespace webrtc + +#endif // API_TRANSPORT_SCTP_TRANSPORT_FACTORY_INTERFACE_H_ diff --git a/api/transport/stun.cc b/api/transport/stun.cc index b083f15834..1b5bf0c409 100644 --- a/api/transport/stun.cc +++ b/api/transport/stun.cc @@ -11,8 +11,9 @@ #include "api/transport/stun.h" #include - #include +#include +#include #include #include @@ -25,8 +26,15 @@ using rtc::ByteBufferReader; using rtc::ByteBufferWriter; +namespace cricket { + namespace { +const int k127Utf8CharactersLengthInBytes = 508; +const int kDefaultMaxAttributeLength = 508; +const int kMessageIntegrityAttributeLength = 20; +const int kTheoreticalMaximumAttributeLength = 65535; + uint32_t ReduceTransactionId(const std::string& transaction_id) { RTC_DCHECK(transaction_id.length() == cricket::kStunTransactionIdLength || transaction_id.length() == @@ -40,9 +48,46 @@ uint32_t ReduceTransactionId(const std::string& transaction_id) { return result; } -} // namespace +// Check the maximum length of a BYTE_STRING attribute against specifications. +bool LengthValid(int type, int length) { + // "Less than 509 bytes" is intended to indicate a maximum of 127 + // UTF-8 characters, which may take up to 4 bytes per character. + switch (type) { + case STUN_ATTR_USERNAME: + return length <= + k127Utf8CharactersLengthInBytes; // RFC 8489 section 14.3 + case STUN_ATTR_MESSAGE_INTEGRITY: + return length == + kMessageIntegrityAttributeLength; // RFC 8489 section 14.5 + case STUN_ATTR_REALM: + return length <= + k127Utf8CharactersLengthInBytes; // RFC 8489 section 14.9 + case STUN_ATTR_NONCE: + return length <= + k127Utf8CharactersLengthInBytes; // RFC 8489 section 14.10 + case STUN_ATTR_SOFTWARE: + return length <= + k127Utf8CharactersLengthInBytes; // RFC 8489 section 14.14 + case STUN_ATTR_ORIGIN: + // 0x802F is unassigned by IANA. + // RESPONSE-ORIGIN is defined in RFC 5780 section 7.3, but does not + // specify a maximum length. It's an URL, so return an arbitrary + // restriction. + return length <= kDefaultMaxAttributeLength; + case STUN_ATTR_DATA: + // No length restriction in RFC; it's the content of an UDP datagram, + // which in theory can be up to 65.535 bytes. + // TODO(bugs.webrtc.org/12179): Write a test to find the real limit. + return length <= kTheoreticalMaximumAttributeLength; + default: + // Return an arbitrary restriction for all other types. + return length <= kTheoreticalMaximumAttributeLength; + } + RTC_NOTREACHED(); + return true; +} -namespace cricket { +} // namespace const char STUN_ERROR_REASON_TRY_ALTERNATE_SERVER[] = "Try Alternate Server"; const char STUN_ERROR_REASON_BAD_REQUEST[] = "Bad Request"; @@ -201,6 +246,31 @@ const StunUInt16ListAttribute* StunMessage::GetUnknownAttributes() const { GetAttribute(STUN_ATTR_UNKNOWN_ATTRIBUTES)); } +StunMessage::IntegrityStatus StunMessage::ValidateMessageIntegrity( + const std::string& password) { + password_ = password; + if (GetByteString(STUN_ATTR_MESSAGE_INTEGRITY)) { + if (ValidateMessageIntegrityOfType( + STUN_ATTR_MESSAGE_INTEGRITY, kStunMessageIntegritySize, + buffer_.c_str(), buffer_.size(), password)) { + integrity_ = IntegrityStatus::kIntegrityOk; + } else { + integrity_ = IntegrityStatus::kIntegrityBad; + } + } else if (GetByteString(STUN_ATTR_GOOG_MESSAGE_INTEGRITY_32)) { + if (ValidateMessageIntegrityOfType( + STUN_ATTR_GOOG_MESSAGE_INTEGRITY_32, kStunMessageIntegrity32Size, + buffer_.c_str(), buffer_.size(), password)) { + integrity_ = IntegrityStatus::kIntegrityOk; + } else { + integrity_ = IntegrityStatus::kIntegrityBad; + } + } else { + integrity_ = IntegrityStatus::kNoIntegrity; + } + return integrity_; +} + bool StunMessage::ValidateMessageIntegrity(const char* data, size_t size, const std::string& password) { @@ -308,11 +378,6 @@ bool StunMessage::AddMessageIntegrity(const std::string& password) { password.size()); } -bool StunMessage::AddMessageIntegrity(const char* key, size_t keylen) { - return AddMessageIntegrityOfType(STUN_ATTR_MESSAGE_INTEGRITY, - kStunMessageIntegritySize, key, keylen); -} - bool StunMessage::AddMessageIntegrity32(absl::string_view password) { return AddMessageIntegrityOfType(STUN_ATTR_GOOG_MESSAGE_INTEGRITY_32, kStunMessageIntegrity32Size, password.data(), @@ -350,6 +415,8 @@ bool StunMessage::AddMessageIntegrityOfType(int attr_type, // Insert correct HMAC into the attribute. msg_integrity_attr->CopyBytes(hmac, attr_size); + password_.assign(key, keylen); + integrity_ = IntegrityStatus::kIntegrityOk; return true; } @@ -428,6 +495,9 @@ bool StunMessage::AddFingerprint() { } bool StunMessage::Read(ByteBufferReader* buf) { + // Keep a copy of the buffer data around for later verification. + buffer_.assign(buf->Data(), buf->Length()); + if (!buf->ReadUInt16(&type_)) { return false; } @@ -555,7 +625,7 @@ StunAttributeValueType StunMessage::GetAttributeValueType(int type) const { return STUN_VALUE_BYTE_STRING; case STUN_ATTR_RETRANSMIT_COUNT: return STUN_VALUE_UINT32; - case STUN_ATTR_LAST_ICE_CHECK_RECEIVED: + case STUN_ATTR_GOOG_LAST_ICE_CHECK_RECEIVED: return STUN_VALUE_BYTE_STRING; case STUN_ATTR_GOOG_MISC_INFO: return STUN_VALUE_UINT16_LIST; @@ -993,6 +1063,10 @@ bool StunByteStringAttribute::Read(ByteBufferReader* buf) { } bool StunByteStringAttribute::Write(ByteBufferWriter* buf) const { + // Check that length is legal according to specs + if (!LengthValid(type(), length())) { + return false; + } buf->WriteBytes(bytes_, length()); WritePadding(buf); return true; @@ -1309,7 +1383,7 @@ StunMessage* TurnMessage::CreateNew() const { StunAttributeValueType IceMessage::GetAttributeValueType(int type) const { switch (type) { case STUN_ATTR_PRIORITY: - case STUN_ATTR_NETWORK_INFO: + case STUN_ATTR_GOOG_NETWORK_INFO: case STUN_ATTR_NOMINATION: return STUN_VALUE_UINT32; case STUN_ATTR_USE_CANDIDATE: diff --git a/api/transport/stun.h b/api/transport/stun.h index 51ca30653c..682a17a945 100644 --- a/api/transport/stun.h +++ b/api/transport/stun.h @@ -17,10 +17,13 @@ #include #include +#include #include #include #include +#include "absl/strings/string_view.h" +#include "api/array_view.h" #include "rtc_base/byte_buffer.h" #include "rtc_base/ip_address.h" #include "rtc_base/socket_address.h" @@ -133,7 +136,6 @@ class StunAddressAttribute; class StunAttribute; class StunByteStringAttribute; class StunErrorCodeAttribute; - class StunUInt16ListAttribute; class StunUInt32Attribute; class StunUInt64Attribute; @@ -148,15 +150,24 @@ class StunMessage { StunMessage(); virtual ~StunMessage(); + // The verification status of the message. This is checked on parsing, + // or set by AddMessageIntegrity. + enum class IntegrityStatus { + kNotSet, + kNoIntegrity, // Message-integrity attribute missing + kIntegrityOk, // Message-integrity checked OK + kIntegrityBad, // Message-integrity verification failed + }; + int type() const { return type_; } size_t length() const { return length_; } const std::string& transaction_id() const { return transaction_id_; } uint32_t reduced_transaction_id() const { return reduced_transaction_id_; } // Returns true if the message confirms to RFC3489 rather than - // RFC5389. The main difference between two version of the STUN + // RFC5389. The main difference between the two versions of the STUN // protocol is the presence of the magic cookie and different length - // of transaction ID. For outgoing packets version of the protocol + // of transaction ID. For outgoing packets the version of the protocol // is determined by the lengths of the transaction ID. bool IsLegacy() const; @@ -190,19 +201,27 @@ class StunMessage { // Remote all attributes and releases them. void ClearAttributes(); - // Validates that a raw STUN message has a correct MESSAGE-INTEGRITY value. - // This can't currently be done on a StunMessage, since it is affected by - // padding data (which we discard when reading a StunMessage). - static bool ValidateMessageIntegrity(const char* data, - size_t size, - const std::string& password); - static bool ValidateMessageIntegrity32(const char* data, - size_t size, - const std::string& password); + // Validates that a STUN message has a correct MESSAGE-INTEGRITY value. + // This uses the buffered raw-format message stored by Read(). + IntegrityStatus ValidateMessageIntegrity(const std::string& password); + + // Returns the current integrity status of the message. + IntegrityStatus integrity() const { return integrity_; } + + // Shortcut for checking if integrity is verified. + bool IntegrityOk() const { + return integrity_ == IntegrityStatus::kIntegrityOk; + } + + // Returns the password attribute used to set or check the integrity. + // Can only be called after adding or checking the integrity. + std::string password() const { + RTC_DCHECK(integrity_ != IntegrityStatus::kNotSet); + return password_; + } // Adds a MESSAGE-INTEGRITY attribute that is valid for the current message. bool AddMessageIntegrity(const std::string& password); - bool AddMessageIntegrity(const char* key, size_t keylen); // Adds a STUN_ATTR_GOOG_MESSAGE_INTEGRITY_32 attribute that is valid for the // current message. @@ -243,6 +262,30 @@ class StunMessage { bool EqualAttributes(const StunMessage* other, std::function attribute_type_mask) const; + // Expose raw-buffer ValidateMessageIntegrity function for testing. + static bool ValidateMessageIntegrityForTesting(const char* data, + size_t size, + const std::string& password) { + return ValidateMessageIntegrity(data, size, password); + } + // Expose raw-buffer ValidateMessageIntegrity function for testing. + static bool ValidateMessageIntegrity32ForTesting( + const char* data, + size_t size, + const std::string& password) { + return ValidateMessageIntegrity32(data, size, password); + } + // Validates that a STUN message in byte buffer form + // has a correct MESSAGE-INTEGRITY value. + // These functions are not recommended and will be deprecated; use + // ValidateMessageIntegrity(password) on the parsed form instead. + static bool ValidateMessageIntegrity(const char* data, + size_t size, + const std::string& password); + static bool ValidateMessageIntegrity32(const char* data, + size_t size, + const std::string& password); + protected: // Verifies that the given attribute is allowed for this message. virtual StunAttributeValueType GetAttributeValueType(int type) const; @@ -268,6 +311,10 @@ class StunMessage { std::string transaction_id_; uint32_t reduced_transaction_id_; uint32_t stun_magic_cookie_; + // The original buffer for messages created by Read(). + std::string buffer_; + IntegrityStatus integrity_ = IntegrityStatus::kNotSet; + std::string password_; }; // Base class for all STUN/TURN attributes. @@ -667,11 +714,16 @@ enum IceAttributeType { STUN_ATTR_NOMINATION = 0xC001, // UInt32 // UInt32. The higher 16 bits are the network ID. The lower 16 bits are the // network cost. - STUN_ATTR_NETWORK_INFO = 0xC057, + STUN_ATTR_GOOG_NETWORK_INFO = 0xC057, // Experimental: Transaction ID of the last connectivity check received. - STUN_ATTR_LAST_ICE_CHECK_RECEIVED = 0xC058, + STUN_ATTR_GOOG_LAST_ICE_CHECK_RECEIVED = 0xC058, // Uint16List. Miscellaneous attributes for future extension. STUN_ATTR_GOOG_MISC_INFO = 0xC059, + // Obsolete. + STUN_ATTR_GOOG_OBSOLETE_1 = 0xC05A, + STUN_ATTR_GOOG_CONNECTION_ID = 0xC05B, // Not yet implemented. + STUN_ATTR_GOOG_DELTA = 0xC05C, // Not yet implemented. + STUN_ATTR_GOOG_DELTA_ACK = 0xC05D, // Not yet implemented. // MESSAGE-INTEGRITY truncated to 32-bit. STUN_ATTR_GOOG_MESSAGE_INTEGRITY_32 = 0xC060, }; diff --git a/api/transport/stun_unittest.cc b/api/transport/stun_unittest.cc index 0884b2ca1c..bf791f257d 100644 --- a/api/transport/stun_unittest.cc +++ b/api/transport/stun_unittest.cc @@ -1196,24 +1196,24 @@ TEST_F(StunTest, FailToReadRtcpPacket) { // Check our STUN message validation code against the RFC5769 test messages. TEST_F(StunTest, ValidateMessageIntegrity) { // Try the messages from RFC 5769. - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleRequest), sizeof(kRfc5769SampleRequest), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleRequest), sizeof(kRfc5769SampleRequest), "InvalidPassword")); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleResponse), sizeof(kRfc5769SampleResponse), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleResponse), sizeof(kRfc5769SampleResponse), "InvalidPassword")); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleResponseIPv6), sizeof(kRfc5769SampleResponseIPv6), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleResponseIPv6), sizeof(kRfc5769SampleResponseIPv6), "InvalidPassword")); @@ -1222,40 +1222,40 @@ TEST_F(StunTest, ValidateMessageIntegrity) { ComputeStunCredentialHash(kRfc5769SampleMsgWithAuthUsername, kRfc5769SampleMsgWithAuthRealm, kRfc5769SampleMsgWithAuthPassword, &key); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleRequestLongTermAuth), sizeof(kRfc5769SampleRequestLongTermAuth), key)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kRfc5769SampleRequestLongTermAuth), sizeof(kRfc5769SampleRequestLongTermAuth), "InvalidPassword")); // Try some edge cases. - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithZeroLength), sizeof(kStunMessageWithZeroLength), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithExcessLength), sizeof(kStunMessageWithExcessLength), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithSmallLength), sizeof(kStunMessageWithSmallLength), kRfc5769SampleMsgPassword)); // Again, but with the lengths matching what is claimed in the headers. - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithZeroLength), kStunHeaderSize + rtc::GetBE16(&kStunMessageWithZeroLength[2]), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithExcessLength), kStunHeaderSize + rtc::GetBE16(&kStunMessageWithExcessLength[2]), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithSmallLength), kStunHeaderSize + rtc::GetBE16(&kStunMessageWithSmallLength[2]), kRfc5769SampleMsgPassword)); // Check that a too-short HMAC doesn't cause buffer overflow. - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(kStunMessageWithBadHmacAtEnd), sizeof(kStunMessageWithBadHmacAtEnd), kRfc5769SampleMsgPassword)); @@ -1268,8 +1268,8 @@ TEST_F(StunTest, ValidateMessageIntegrity) { if (i > 0) buf[i - 1] ^= 0x01; EXPECT_EQ(i >= sizeof(buf) - 8, - StunMessage::ValidateMessageIntegrity(buf, sizeof(buf), - kRfc5769SampleMsgPassword)); + StunMessage::ValidateMessageIntegrityForTesting( + buf, sizeof(buf), kRfc5769SampleMsgPassword)); } } @@ -1291,7 +1291,7 @@ TEST_F(StunTest, AddMessageIntegrity) { rtc::ByteBufferWriter buf1; EXPECT_TRUE(msg.Write(&buf1)); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(buf1.Data()), buf1.Length(), kRfc5769SampleMsgPassword)); @@ -1309,7 +1309,7 @@ TEST_F(StunTest, AddMessageIntegrity) { rtc::ByteBufferWriter buf3; EXPECT_TRUE(msg2.Write(&buf3)); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(buf3.Data()), buf3.Length(), kRfc5769SampleMsgPassword)); } @@ -1317,40 +1317,40 @@ TEST_F(StunTest, AddMessageIntegrity) { // Check our STUN message validation code against the RFC5769 test messages. TEST_F(StunTest, ValidateMessageIntegrity32) { // Try the messages from RFC 5769. - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kSampleRequestMI32), sizeof(kSampleRequestMI32), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kSampleRequestMI32), sizeof(kSampleRequestMI32), "InvalidPassword")); // Try some edge cases. - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithZeroLength), sizeof(kStunMessageWithZeroLength), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithExcessLength), sizeof(kStunMessageWithExcessLength), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithSmallLength), sizeof(kStunMessageWithSmallLength), kRfc5769SampleMsgPassword)); // Again, but with the lengths matching what is claimed in the headers. - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithZeroLength), kStunHeaderSize + rtc::GetBE16(&kStunMessageWithZeroLength[2]), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithExcessLength), kStunHeaderSize + rtc::GetBE16(&kStunMessageWithExcessLength[2]), kRfc5769SampleMsgPassword)); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithSmallLength), kStunHeaderSize + rtc::GetBE16(&kStunMessageWithSmallLength[2]), kRfc5769SampleMsgPassword)); // Check that a too-short HMAC doesn't cause buffer overflow. - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(kStunMessageWithBadHmacAtEnd), sizeof(kStunMessageWithBadHmacAtEnd), kRfc5769SampleMsgPassword)); @@ -1363,7 +1363,7 @@ TEST_F(StunTest, ValidateMessageIntegrity32) { if (i > 0) buf[i - 1] ^= 0x01; EXPECT_EQ(i >= sizeof(buf) - 8, - StunMessage::ValidateMessageIntegrity32( + StunMessage::ValidateMessageIntegrity32ForTesting( buf, sizeof(buf), kRfc5769SampleMsgPassword)); } } @@ -1384,7 +1384,7 @@ TEST_F(StunTest, AddMessageIntegrity32) { rtc::ByteBufferWriter buf1; EXPECT_TRUE(msg.Write(&buf1)); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(buf1.Data()), buf1.Length(), kRfc5769SampleMsgPassword)); @@ -1402,7 +1402,7 @@ TEST_F(StunTest, AddMessageIntegrity32) { rtc::ByteBufferWriter buf3; EXPECT_TRUE(msg2.Write(&buf3)); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(buf3.Data()), buf3.Length(), kRfc5769SampleMsgPassword)); } @@ -1420,14 +1420,14 @@ TEST_F(StunTest, AddMessageIntegrity32AndMessageIntegrity) { rtc::ByteBufferWriter buf1; EXPECT_TRUE(msg.Write(&buf1)); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(buf1.Data()), buf1.Length(), "password1")); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( + EXPECT_TRUE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(buf1.Data()), buf1.Length(), "password2")); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrity32ForTesting( reinterpret_cast(buf1.Data()), buf1.Length(), "password2")); - EXPECT_FALSE(StunMessage::ValidateMessageIntegrity( + EXPECT_FALSE(StunMessage::ValidateMessageIntegrityForTesting( reinterpret_cast(buf1.Data()), buf1.Length(), "password1")); } @@ -1903,4 +1903,16 @@ TEST_F(StunTest, IsStunMethod) { sizeof(kRfc5769SampleRequest))); } +TEST_F(StunTest, SizeRestrictionOnAttributes) { + StunMessage msg; + msg.SetType(STUN_BINDING_REQUEST); + msg.SetTransactionID("ABCDEFGH"); + auto long_username = StunAttribute::CreateByteString(STUN_ATTR_USERNAME); + std::string long_string(509, 'x'); + long_username->CopyBytes(long_string.c_str(), long_string.size()); + msg.AddAttribute(std::move(long_username)); + rtc::ByteBufferWriter out; + ASSERT_FALSE(msg.Write(&out)); +} + } // namespace cricket diff --git a/api/uma_metrics.h b/api/uma_metrics.h index 8436d4f9e5..a975b82aeb 100644 --- a/api/uma_metrics.h +++ b/api/uma_metrics.h @@ -8,42 +8,34 @@ * be found in the AUTHORS file in the root of the source tree. */ -// This file contains enums related to IPv4/IPv6 metrics. +// This file contains enums related to Chrome UMA histograms. See +// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md#requirements +// for requirements when adding or changing metrics. #ifndef API_UMA_METRICS_H_ #define API_UMA_METRICS_H_ -#include "rtc_base/ref_count.h" - namespace webrtc { -// Currently this contains information related to WebRTC network/transport -// information. - -// The difference between PeerConnectionEnumCounter and -// PeerConnectionMetricsName is that the "EnumCounter" is only counting the -// occurrences of events, while "Name" has a value associated with it which is -// used to form a histogram. - -// This enum is backed by Chromium's histograms.xml, -// chromium/src/tools/metrics/histograms/histograms.xml -// Existing values cannot be re-ordered and new enums must be added -// before kBoundary. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum PeerConnectionAddressFamilyCounter { - kPeerConnection_IPv4, - kPeerConnection_IPv6, - kBestConnections_IPv4, - kBestConnections_IPv6, - kPeerConnectionAddressFamilyCounter_Max, + kPeerConnection_IPv4 = 0, + kPeerConnection_IPv6 = 1, + kBestConnections_IPv4 = 2, + kBestConnections_IPv6 = 3, + kPeerConnectionAddressFamilyCounter_Max }; // This enum defines types for UMA samples, which will have a range. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum PeerConnectionMetricsName { - kNetworkInterfaces_IPv4, // Number of IPv4 interfaces. - kNetworkInterfaces_IPv6, // Number of IPv6 interfaces. - kTimeToConnect, // In milliseconds. - kLocalCandidates_IPv4, // Number of IPv4 local candidates. - kLocalCandidates_IPv6, // Number of IPv6 local candidates. + kNetworkInterfaces_IPv4 = 0, // Number of IPv4 interfaces. + kNetworkInterfaces_IPv6 = 1, // Number of IPv6 interfaces. + kTimeToConnect = 2, // In milliseconds. + kLocalCandidates_IPv4 = 3, // Number of IPv4 local candidates. + kLocalCandidates_IPv6 = 4, // Number of IPv6 local candidates. kPeerConnectionMetricsName_Max }; @@ -51,109 +43,180 @@ enum PeerConnectionMetricsName { // _. It is recorded based on the // type of candidate pair used when the PeerConnection first goes to a completed // state. When BUNDLE is enabled, only the first transport gets recorded. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum IceCandidatePairType { // HostHost is deprecated. It was replaced with the set of types at the bottom // to report private or public host IP address. - kIceCandidatePairHostHost, - kIceCandidatePairHostSrflx, - kIceCandidatePairHostRelay, - kIceCandidatePairHostPrflx, - kIceCandidatePairSrflxHost, - kIceCandidatePairSrflxSrflx, - kIceCandidatePairSrflxRelay, - kIceCandidatePairSrflxPrflx, - kIceCandidatePairRelayHost, - kIceCandidatePairRelaySrflx, - kIceCandidatePairRelayRelay, - kIceCandidatePairRelayPrflx, - kIceCandidatePairPrflxHost, - kIceCandidatePairPrflxSrflx, - kIceCandidatePairPrflxRelay, + kIceCandidatePairHostHost = 0, + kIceCandidatePairHostSrflx = 1, + kIceCandidatePairHostRelay = 2, + kIceCandidatePairHostPrflx = 3, + kIceCandidatePairSrflxHost = 4, + kIceCandidatePairSrflxSrflx = 5, + kIceCandidatePairSrflxRelay = 6, + kIceCandidatePairSrflxPrflx = 7, + kIceCandidatePairRelayHost = 8, + kIceCandidatePairRelaySrflx = 9, + kIceCandidatePairRelayRelay = 10, + kIceCandidatePairRelayPrflx = 11, + kIceCandidatePairPrflxHost = 12, + kIceCandidatePairPrflxSrflx = 13, + kIceCandidatePairPrflxRelay = 14, // The following 9 types tell whether local and remote hosts have hostname, // private or public IP addresses. - kIceCandidatePairHostPrivateHostPrivate, - kIceCandidatePairHostPrivateHostPublic, - kIceCandidatePairHostPublicHostPrivate, - kIceCandidatePairHostPublicHostPublic, - kIceCandidatePairHostNameHostName, - kIceCandidatePairHostNameHostPrivate, - kIceCandidatePairHostNameHostPublic, - kIceCandidatePairHostPrivateHostName, - kIceCandidatePairHostPublicHostName, + kIceCandidatePairHostPrivateHostPrivate = 15, + kIceCandidatePairHostPrivateHostPublic = 16, + kIceCandidatePairHostPublicHostPrivate = 17, + kIceCandidatePairHostPublicHostPublic = 18, + kIceCandidatePairHostNameHostName = 19, + kIceCandidatePairHostNameHostPrivate = 20, + kIceCandidatePairHostNameHostPublic = 21, + kIceCandidatePairHostPrivateHostName = 22, + kIceCandidatePairHostPublicHostName = 23, kIceCandidatePairMax }; +// The difference between PeerConnectionEnumCounter and +// PeerConnectionMetricsName is that the "EnumCounter" is only counting the +// occurrences of events, while "Name" has a value associated with it which is +// used to form a histogram. + +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum KeyExchangeProtocolType { - kEnumCounterKeyProtocolDtls, - kEnumCounterKeyProtocolSdes, + kEnumCounterKeyProtocolDtls = 0, + kEnumCounterKeyProtocolSdes = 1, kEnumCounterKeyProtocolMax }; +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum KeyExchangeProtocolMedia { - kEnumCounterKeyProtocolMediaTypeDtlsAudio, - kEnumCounterKeyProtocolMediaTypeDtlsVideo, - kEnumCounterKeyProtocolMediaTypeDtlsData, - kEnumCounterKeyProtocolMediaTypeSdesAudio, - kEnumCounterKeyProtocolMediaTypeSdesVideo, - kEnumCounterKeyProtocolMediaTypeSdesData, + kEnumCounterKeyProtocolMediaTypeDtlsAudio = 0, + kEnumCounterKeyProtocolMediaTypeDtlsVideo = 1, + kEnumCounterKeyProtocolMediaTypeDtlsData = 2, + kEnumCounterKeyProtocolMediaTypeSdesAudio = 3, + kEnumCounterKeyProtocolMediaTypeSdesVideo = 4, + kEnumCounterKeyProtocolMediaTypeSdesData = 5, kEnumCounterKeyProtocolMediaTypeMax }; +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum SdpSemanticRequested { - kSdpSemanticRequestDefault, - kSdpSemanticRequestPlanB, - kSdpSemanticRequestUnifiedPlan, + kSdpSemanticRequestDefault = 0, + kSdpSemanticRequestPlanB = 1, + kSdpSemanticRequestUnifiedPlan = 2, kSdpSemanticRequestMax }; +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum SdpSemanticNegotiated { - kSdpSemanticNegotiatedNone, - kSdpSemanticNegotiatedPlanB, - kSdpSemanticNegotiatedUnifiedPlan, - kSdpSemanticNegotiatedMixed, + kSdpSemanticNegotiatedNone = 0, + kSdpSemanticNegotiatedPlanB = 1, + kSdpSemanticNegotiatedUnifiedPlan = 2, + kSdpSemanticNegotiatedMixed = 3, kSdpSemanticNegotiatedMax }; // Metric which records the format of the received SDP for tracking how much the // difference between Plan B and Unified Plan affect users. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum SdpFormatReceived { // No audio or video tracks. This is worth special casing since it seems to be // the most common scenario (data-channel only). - kSdpFormatReceivedNoTracks, + kSdpFormatReceivedNoTracks = 0, // No more than one audio and one video track. Should be compatible with both // Plan B and Unified Plan endpoints. - kSdpFormatReceivedSimple, + kSdpFormatReceivedSimple = 1, // More than one audio track or more than one video track in the Plan B format // (e.g., one audio media section with multiple streams). - kSdpFormatReceivedComplexPlanB, + kSdpFormatReceivedComplexPlanB = 2, // More than one audio track or more than one video track in the Unified Plan // format (e.g., two audio media sections). - kSdpFormatReceivedComplexUnifiedPlan, + kSdpFormatReceivedComplexUnifiedPlan = 3, kSdpFormatReceivedMax }; // Metric for counting the outcome of adding an ICE candidate +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum AddIceCandidateResult { - kAddIceCandidateSuccess, - kAddIceCandidateFailClosed, - kAddIceCandidateFailNoRemoteDescription, - kAddIceCandidateFailNullCandidate, - kAddIceCandidateFailNotValid, - kAddIceCandidateFailNotReady, - kAddIceCandidateFailInAddition, - kAddIceCandidateFailNotUsable, + kAddIceCandidateSuccess = 0, + kAddIceCandidateFailClosed = 1, + kAddIceCandidateFailNoRemoteDescription = 2, + kAddIceCandidateFailNullCandidate = 3, + kAddIceCandidateFailNotValid = 4, + kAddIceCandidateFailNotReady = 5, + kAddIceCandidateFailInAddition = 6, + kAddIceCandidateFailNotUsable = 7, kAddIceCandidateMax }; // Metric for recording which api surface was used to enable simulcast. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. enum SimulcastApiVersion { - kSimulcastApiVersionNone, - kSimulcastApiVersionLegacy, - kSimulcastApiVersionSpecCompliant, - kSimulcastApiVersionMax, + kSimulcastApiVersionNone = 0, + kSimulcastApiVersionLegacy = 1, + kSimulcastApiVersionSpecCompliant = 2, + kSimulcastApiVersionMax }; +// Metrics for reporting usage of BUNDLE. +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum BundleUsage { + // There are no m-lines in the SDP, only a session description. + kBundleUsageEmpty = 0, + // Only a data channel is negotiated but BUNDLE is not negotiated. + kBundleUsageNoBundleDatachannelOnly = 1, + // BUNDLE is not negotiated and there is at most one m-line per media type, + kBundleUsageNoBundleSimple = 2, + // BUNDLE is not negotiated and there are multiple m-lines per media type, + kBundleUsageNoBundleComplex = 3, + // Only a data channel is negotiated and BUNDLE is negotiated. + kBundleUsageBundleDatachannelOnly = 4, + // BUNDLE is negotiated but there is at most one m-line per media type, + kBundleUsageBundleSimple = 5, + // BUNDLE is negotiated and there are multiple m-lines per media type, + kBundleUsageBundleComplex = 6, + // Legacy plan-b metrics. + kBundleUsageNoBundlePlanB = 7, + kBundleUsageBundlePlanB = 8, + kBundleUsageMax +}; + +// Metrics for reporting configured BUNDLE policy, mapping directly to +// https://w3c.github.io/webrtc-pc/#rtcbundlepolicy-enum +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum BundlePolicyUsage { + kBundlePolicyUsageBalanced = 0, + kBundlePolicyUsageMaxBundle = 1, + kBundlePolicyUsageMaxCompat = 2, + kBundlePolicyUsageMax +}; + +// Metrics for provisional answers as described in +// https://datatracker.ietf.org/doc/html/rfc8829#section-4.1.10.1 +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum ProvisionalAnswerUsage { + kProvisionalAnswerNotUsed = 0, + kProvisionalAnswerLocal = 1, + kProvisionalAnswerRemote = 2, + kProvisionalAnswerMax +}; + +// When adding new metrics please consider using the style described in +// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md#usage +// instead of the legacy enums used above. + } // namespace webrtc #endif // API_UMA_METRICS_H_ diff --git a/api/units/data_rate.h b/api/units/data_rate.h index 5c8a61fd9c..98572123c5 100644 --- a/api/units/data_rate.h +++ b/api/units/data_rate.h @@ -11,9 +11,9 @@ #ifndef API_UNITS_DATA_RATE_H_ #define API_UNITS_DATA_RATE_H_ -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include // no-presubmit-check TODO(webrtc:8982) -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include #include @@ -142,13 +142,13 @@ inline std::string ToLogString(DataRate value) { return ToString(value); } -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& stream, // no-presubmit-check TODO(webrtc:8982) DataRate value) { return stream << ToString(value); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST } // namespace webrtc diff --git a/api/units/data_size.h b/api/units/data_size.h index 27a2a4e4dc..6817e24c26 100644 --- a/api/units/data_size.h +++ b/api/units/data_size.h @@ -11,9 +11,9 @@ #ifndef API_UNITS_DATA_SIZE_H_ #define API_UNITS_DATA_SIZE_H_ -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include // no-presubmit-check TODO(webrtc:8982) -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include #include @@ -53,13 +53,13 @@ inline std::string ToLogString(DataSize value) { return ToString(value); } -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& stream, // no-presubmit-check TODO(webrtc:8982) DataSize value) { return stream << ToString(value); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST } // namespace webrtc diff --git a/api/units/frequency.h b/api/units/frequency.h index 88912c64d5..8e9cc2b5f4 100644 --- a/api/units/frequency.h +++ b/api/units/frequency.h @@ -10,9 +10,9 @@ #ifndef API_UNITS_FREQUENCY_H_ #define API_UNITS_FREQUENCY_H_ -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include // no-presubmit-check TODO(webrtc:8982) -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include #include @@ -89,13 +89,13 @@ inline std::string ToLogString(Frequency value) { return ToString(value); } -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& stream, // no-presubmit-check TODO(webrtc:8982) Frequency value) { return stream << ToString(value); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST } // namespace webrtc #endif // API_UNITS_FREQUENCY_H_ diff --git a/api/units/time_delta.h b/api/units/time_delta.h index 173affcc56..6f1910379b 100644 --- a/api/units/time_delta.h +++ b/api/units/time_delta.h @@ -11,9 +11,9 @@ #ifndef API_UNITS_TIME_DELTA_H_ #define API_UNITS_TIME_DELTA_H_ -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include // no-presubmit-check TODO(webrtc:8982) -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include #include @@ -92,13 +92,13 @@ inline std::string ToLogString(TimeDelta value) { return ToString(value); } -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& stream, // no-presubmit-check TODO(webrtc:8982) TimeDelta value) { return stream << ToString(value); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST } // namespace webrtc diff --git a/api/units/timestamp.h b/api/units/timestamp.h index f83477e808..1e9f9d1dc5 100644 --- a/api/units/timestamp.h +++ b/api/units/timestamp.h @@ -11,9 +11,9 @@ #ifndef API_UNITS_TIMESTAMP_H_ #define API_UNITS_TIMESTAMP_H_ -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include // no-presubmit-check TODO(webrtc:8982) -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include #include @@ -125,13 +125,13 @@ inline std::string ToLogString(Timestamp value) { return ToString(value); } -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& stream, // no-presubmit-check TODO(webrtc:8982) Timestamp value) { return stream << ToString(value); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST } // namespace webrtc diff --git a/api/video/BUILD.gn b/api/video/BUILD.gn index 7f9b034192..ec90bc137e 100644 --- a/api/video/BUILD.gn +++ b/api/video/BUILD.gn @@ -21,7 +21,6 @@ rtc_library("video_rtp_headers") { "hdr_metadata.h", "video_content_type.cc", "video_content_type.h", - "video_frame_marking.h", "video_rotation.h", "video_timing.cc", "video_timing.h", @@ -31,6 +30,10 @@ rtc_library("video_rtp_headers") { "..:array_view", "../../rtc_base:rtc_base_approved", "../../rtc_base/system:rtc_export", + "../units:data_rate", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -38,6 +41,10 @@ rtc_library("video_rtp_headers") { rtc_library("video_frame") { visibility = [ "*" ] sources = [ + "i420_buffer.cc", + "i420_buffer.h", + "nv12_buffer.cc", + "nv12_buffer.h", "video_codec_type.h", "video_frame.cc", "video_frame.h", @@ -55,9 +62,11 @@ rtc_library("video_frame") { "..:scoped_refptr", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", + "../../rtc_base/memory:aligned_malloc", "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", + "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } if (is_android) { @@ -66,31 +75,11 @@ if (is_android) { } } -rtc_source_set("recordable_encoded_frame") { - visibility = [ "*" ] - sources = [ "recordable_encoded_frame.h" ] - - deps = [ - ":encoded_image", - ":video_frame", - ":video_rtp_headers", - "..:array_view", - "..:scoped_refptr", - "../../rtc_base:refcount", - "../units:timestamp", - ] -} - -rtc_source_set("video_frame_type") { - visibility = [ "*" ] - sources = [ "video_frame_type.h" ] -} - -rtc_library("video_frame_i420") { +rtc_library("video_frame_i010") { visibility = [ "*" ] sources = [ - "i420_buffer.cc", - "i420_buffer.h", + "i010_buffer.cc", + "i010_buffer.h", ] deps = [ ":video_frame", @@ -99,29 +88,30 @@ rtc_library("video_frame_i420") { "../../rtc_base", "../../rtc_base:checks", "../../rtc_base/memory:aligned_malloc", - "../../rtc_base/system:rtc_export", "//third_party/libyuv", ] } -rtc_library("video_frame_i010") { +rtc_source_set("recordable_encoded_frame") { visibility = [ "*" ] - sources = [ - "i010_buffer.cc", - "i010_buffer.h", - ] + sources = [ "recordable_encoded_frame.h" ] + deps = [ + ":encoded_image", ":video_frame", - ":video_frame_i420", ":video_rtp_headers", + "..:array_view", "..:scoped_refptr", - "../../rtc_base", - "../../rtc_base:checks", - "../../rtc_base/memory:aligned_malloc", - "//third_party/libyuv", + "../../rtc_base:refcount", + "../units:timestamp", ] } +rtc_source_set("video_frame_type") { + visibility = [ "*" ] + sources = [ "video_frame_type.h" ] +} + rtc_library("encoded_image") { visibility = [ "*" ] sources = [ @@ -136,13 +126,11 @@ rtc_library("encoded_image") { "..:refcountedbase", "..:rtp_packet_info", "..:scoped_refptr", - "../..:webrtc_common", "../../rtc_base:checks", - "../../rtc_base:deprecation", "../../rtc_base:rtc_base_approved", "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("encoded_frame") { @@ -155,6 +143,41 @@ rtc_library("encoded_frame") { deps = [ "../../modules/video_coding:encoded_frame" ] } +rtc_library("rtp_video_frame_assembler") { + visibility = [ "*" ] + sources = [ + "rtp_video_frame_assembler.cc", + "rtp_video_frame_assembler.h", + ] + + deps = [ + ":encoded_frame", + "../../modules/rtp_rtcp:rtp_rtcp", + "../../modules/rtp_rtcp:rtp_rtcp_format", + "../../modules/video_coding:video_coding", + "../../rtc_base:logging", + ] + + absl_deps = [ + "//third_party/abseil-cpp/absl/container:inlined_vector", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("rtp_video_frame_assembler_unittests") { + testonly = true + sources = [ "rtp_video_frame_assembler_unittests.cc" ] + + deps = [ + ":rtp_video_frame_assembler", + "..:array_view", + "../../modules/rtp_rtcp:rtp_packetizer_av1_test_helper", + "../../modules/rtp_rtcp:rtp_rtcp", + "../../modules/rtp_rtcp:rtp_rtcp_format", + "../../test:test_support", + ] +} + rtc_source_set("video_codec_constants") { visibility = [ "*" ] sources = [ "video_codec_constants.h" ] @@ -173,8 +196,15 @@ rtc_library("video_bitrate_allocation") { "../../rtc_base:safe_conversions", "../../rtc_base:stringutils", "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_source_set("video_layers_allocation") { + visibility = [ "*" ] + sources = [ "video_layers_allocation.h" ] + deps = [ "../units:data_rate" ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ] } rtc_library("video_bitrate_allocator") { @@ -206,11 +236,12 @@ rtc_source_set("video_stream_decoder") { deps = [ ":encoded_frame", ":video_frame", + ":video_rtp_headers", "../task_queue", "../units:time_delta", "../video_codecs:video_codecs_api", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("video_stream_decoder_create") { @@ -237,7 +268,10 @@ rtc_library("video_adaptation") { "video_adaptation_reason.h", ] - deps = [ "../../rtc_base:checks" ] + deps = [ + "../../rtc_base:checks", + "../../rtc_base:stringutils", + ] } rtc_source_set("video_stream_encoder") { @@ -255,13 +289,16 @@ rtc_source_set("video_stream_encoder") { ":video_bitrate_allocator_factory", ":video_codec_constants", ":video_frame", + ":video_layers_allocation", "..:rtp_parameters", + "..:scoped_refptr", "../:fec_controller_api", "../:rtp_parameters", + "../adaptation:resource_adaptation_api", "../units:data_rate", "../video_codecs:video_codecs_api", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("video_frame_metadata") { @@ -274,29 +311,13 @@ rtc_source_set("video_frame_metadata") { "..:array_view", "../../modules/rtp_rtcp:rtp_video_header", "../transport/rtp:dependency_descriptor", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/types:optional", ] } -rtc_library("video_stream_encoder_create") { - visibility = [ "*" ] - sources = [ - "video_stream_encoder_create.cc", - "video_stream_encoder_create.h", - ] - - deps = [ - ":video_frame", - ":video_stream_encoder", - "../../api:scoped_refptr", - "../../video:video_stream_encoder_impl", - "../../video/adaptation:video_adaptation", - "../task_queue", - "../video_codecs:video_codecs_api", - ] -} - rtc_library("builtin_video_bitrate_allocator_factory") { visibility = [ "*" ] sources = [ @@ -308,14 +329,13 @@ rtc_library("builtin_video_bitrate_allocator_factory") { ":video_bitrate_allocation", ":video_bitrate_allocator", ":video_bitrate_allocator_factory", - "../../:webrtc_common", "../../api:scoped_refptr", "../../media:rtc_media_base", "../../modules/video_coding:video_coding_utility", - "../../modules/video_coding:webrtc_vp9_helpers", + "../../modules/video_coding/svc:svc_rate_allocator", "../video_codecs:video_codecs_api", - "//third_party/abseil-cpp/absl/base:core_headers", ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] } if (rtc_include_tests) { diff --git a/api/video/DEPS b/api/video/DEPS index 3af594cd8a..cf6770dce0 100644 --- a/api/video/DEPS +++ b/api/video/DEPS @@ -18,6 +18,10 @@ specific_include_rules = { "+rtc_base/memory/aligned_malloc.h", ], + "nv12_buffer\.h": [ + "+rtc_base/memory/aligned_malloc.h", + ], + "recordable_encoded_frame\.h": [ "+rtc_base/ref_count.h", ], @@ -36,4 +40,8 @@ specific_include_rules = { "video_stream_encoder_create.cc": [ "+video/video_stream_encoder.h", ], + + "rtp_video_frame_assembler.h": [ + "+modules/rtp_rtcp/source/rtp_packet_received.h", + ], } diff --git a/api/video/OWNERS b/api/video/OWNERS index 315f85e7d0..e4a16c360a 100644 --- a/api/video/OWNERS +++ b/api/video/OWNERS @@ -1,3 +1,4 @@ +brandtr@webrtc.org magjed@webrtc.org nisse@webrtc.org diff --git a/api/video/builtin_video_bitrate_allocator_factory.cc b/api/video/builtin_video_bitrate_allocator_factory.cc index 2966a024c7..4c24a0e75d 100644 --- a/api/video/builtin_video_bitrate_allocator_factory.cc +++ b/api/video/builtin_video_bitrate_allocator_factory.cc @@ -15,7 +15,7 @@ #include "absl/base/macros.h" #include "api/video/video_bitrate_allocator.h" #include "api/video_codecs/video_codec.h" -#include "modules/video_coding/codecs/vp9/svc_rate_allocator.h" +#include "modules/video_coding/svc/svc_rate_allocator.h" #include "modules/video_coding/utility/simulcast_rate_allocator.h" namespace webrtc { @@ -30,15 +30,13 @@ class BuiltinVideoBitrateAllocatorFactory std::unique_ptr CreateVideoBitrateAllocator( const VideoCodec& codec) override { - std::unique_ptr rate_allocator; switch (codec.codecType) { + case kVideoCodecAV1: case kVideoCodecVP9: - rate_allocator.reset(new SvcRateAllocator(codec)); - break; + return std::make_unique(codec); default: - rate_allocator.reset(new SimulcastRateAllocator(codec)); + return std::make_unique(codec); } - return rate_allocator; } }; diff --git a/api/video/encoded_frame.cc b/api/video/encoded_frame.cc index 26a794ec02..42d6b06b84 100644 --- a/api/video/encoded_frame.cc +++ b/api/video/encoded_frame.cc @@ -11,11 +11,9 @@ #include "api/video/encoded_frame.h" namespace webrtc { -namespace video_coding { bool EncodedFrame::delayed_by_retransmission() const { return 0; } -} // namespace video_coding } // namespace webrtc diff --git a/api/video/encoded_frame.h b/api/video/encoded_frame.h index f0a67a1ceb..5f046327fa 100644 --- a/api/video/encoded_frame.h +++ b/api/video/encoded_frame.h @@ -17,37 +17,6 @@ #include "modules/video_coding/encoded_frame.h" namespace webrtc { -namespace video_coding { - -// NOTE: This class is still under development and may change without notice. -struct VideoLayerFrameId { - // TODO(philipel): The default ctor is currently used internaly, but have a - // look if we can remove it. - VideoLayerFrameId() : picture_id(-1), spatial_layer(0) {} - VideoLayerFrameId(int64_t picture_id, uint8_t spatial_layer) - : picture_id(picture_id), spatial_layer(spatial_layer) {} - - bool operator==(const VideoLayerFrameId& rhs) const { - return picture_id == rhs.picture_id && spatial_layer == rhs.spatial_layer; - } - - bool operator!=(const VideoLayerFrameId& rhs) const { - return !(*this == rhs); - } - - bool operator<(const VideoLayerFrameId& rhs) const { - if (picture_id == rhs.picture_id) - return spatial_layer < rhs.spatial_layer; - return picture_id < rhs.picture_id; - } - - bool operator<=(const VideoLayerFrameId& rhs) const { return !(rhs < *this); } - bool operator>(const VideoLayerFrameId& rhs) const { return rhs < *this; } - bool operator>=(const VideoLayerFrameId& rhs) const { return rhs <= *this; } - - int64_t picture_id; - uint8_t spatial_layer; -}; // TODO(philipel): Remove webrtc::VCMEncodedFrame inheritance. // TODO(philipel): Move transport specific info out of EncodedFrame. @@ -73,19 +42,23 @@ class EncodedFrame : public webrtc::VCMEncodedFrame { bool is_keyframe() const { return num_references == 0; } - VideoLayerFrameId id; + void SetId(int64_t id) { id_ = id; } + int64_t Id() const { return id_; } // TODO(philipel): Add simple modify/access functions to prevent adding too // many |references|. size_t num_references = 0; int64_t references[kMaxFrameReferences]; - bool inter_layer_predicted = false; // Is this subframe the last one in the superframe (In RTP stream that would // mean that the last packet has a marker bit set). bool is_last_spatial_layer = true; + + private: + // The ID of the frame is determined from RTP level information. The IDs are + // used to describe order and dependencies between frames. + int64_t id_ = -1; }; -} // namespace video_coding } // namespace webrtc #endif // API_VIDEO_ENCODED_FRAME_H_ diff --git a/api/video/encoded_image.cc b/api/video/encoded_image.cc index 13d57ef5ff..fc77b9415b 100644 --- a/api/video/encoded_image.cc +++ b/api/video/encoded_image.cc @@ -32,13 +32,13 @@ EncodedImageBuffer::~EncodedImageBuffer() { // static rtc::scoped_refptr EncodedImageBuffer::Create(size_t size) { - return new rtc::RefCountedObject(size); + return rtc::make_ref_counted(size); } // static rtc::scoped_refptr EncodedImageBuffer::Create( const uint8_t* data, size_t size) { - return new rtc::RefCountedObject(data, size); + return rtc::make_ref_counted(data, size); } const uint8_t* EncodedImageBuffer::data() const { @@ -61,26 +61,16 @@ void EncodedImageBuffer::Realloc(size_t size) { size_ = size; } -EncodedImage::EncodedImage() : EncodedImage(nullptr, 0, 0) {} +EncodedImage::EncodedImage() = default; EncodedImage::EncodedImage(EncodedImage&&) = default; EncodedImage::EncodedImage(const EncodedImage&) = default; -EncodedImage::EncodedImage(uint8_t* buffer, size_t size, size_t capacity) - : size_(size), buffer_(buffer), capacity_(capacity) {} - EncodedImage::~EncodedImage() = default; EncodedImage& EncodedImage::operator=(EncodedImage&&) = default; EncodedImage& EncodedImage::operator=(const EncodedImage&) = default; -void EncodedImage::Retain() { - if (buffer_) { - encoded_data_ = EncodedImageBuffer::Create(buffer_, size_); - buffer_ = nullptr; - } -} - void EncodedImage::SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms) { timing_.encode_start_ms = encode_start_ms; diff --git a/api/video/encoded_image.h b/api/video/encoded_image.h index d89095f467..dae4e3a60a 100644 --- a/api/video/encoded_image.h +++ b/api/video/encoded_image.h @@ -21,14 +21,11 @@ #include "api/scoped_refptr.h" #include "api/video/color_space.h" #include "api/video/video_codec_constants.h" -#include "api/video/video_codec_type.h" #include "api/video/video_content_type.h" #include "api/video/video_frame_type.h" #include "api/video/video_rotation.h" #include "api/video/video_timing.h" -#include "common_types.h" // NOLINT(build/include_directory) #include "rtc_base/checks.h" -#include "rtc_base/deprecation.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" @@ -74,14 +71,11 @@ class RTC_EXPORT EncodedImage { public: EncodedImage(); EncodedImage(EncodedImage&&); - // Discouraged: potentially expensive. EncodedImage(const EncodedImage&); - EncodedImage(uint8_t* buffer, size_t length, size_t capacity); ~EncodedImage(); EncodedImage& operator=(EncodedImage&&); - // Discouraged: potentially expensive. EncodedImage& operator=(const EncodedImage&); // TODO(nisse): Change style to timestamp(), set_timestamp(), for consistency @@ -115,6 +109,15 @@ class RTC_EXPORT EncodedImage { color_space_ = color_space; } + // These methods along with the private member video_frame_tracking_id_ are + // meant for media quality testing purpose only. + absl::optional VideoFrameTrackingId() const { + return video_frame_tracking_id_; + } + void SetVideoFrameTrackingId(absl::optional tracking_id) { + video_frame_tracking_id_ = tracking_id; + } + const RtpPacketInfos& PacketInfos() const { return packet_infos_; } void SetPacketInfos(RtpPacketInfos packet_infos) { packet_infos_ = std::move(packet_infos); @@ -131,44 +134,26 @@ class RTC_EXPORT EncodedImage { RTC_DCHECK_LE(new_size, new_size == 0 ? 0 : capacity()); size_ = new_size; } - // TODO(nisse): Delete, provide only read-only access to the buffer. - size_t capacity() const { - return buffer_ ? capacity_ : (encoded_data_ ? encoded_data_->size() : 0); - } void SetEncodedData( rtc::scoped_refptr encoded_data) { encoded_data_ = encoded_data; size_ = encoded_data->size(); - buffer_ = nullptr; } void ClearEncodedData() { encoded_data_ = nullptr; size_ = 0; - buffer_ = nullptr; - capacity_ = 0; } rtc::scoped_refptr GetEncodedData() const { - RTC_DCHECK(buffer_ == nullptr); return encoded_data_; } - // TODO(nisse): Delete, provide only read-only access to the buffer. - uint8_t* data() { - return buffer_ ? buffer_ - : (encoded_data_ ? encoded_data_->data() : nullptr); - } const uint8_t* data() const { - return buffer_ ? buffer_ - : (encoded_data_ ? encoded_data_->data() : nullptr); + return encoded_data_ ? encoded_data_->data() : nullptr; } - // Hack to workaround lack of ownership of the encoded data. If we don't - // already own the underlying data, make an owned copy. - void Retain(); - uint32_t _encodedWidth = 0; uint32_t _encodedHeight = 0; // NTP time of the capture time in local timebase in milliseconds. @@ -178,13 +163,12 @@ class RTC_EXPORT EncodedImage { VideoFrameType _frameType = VideoFrameType::kVideoFrameDelta; VideoRotation rotation_ = kVideoRotation_0; VideoContentType content_type_ = VideoContentType::UNSPECIFIED; - bool _completeFrame = false; int qp_ = -1; // Quantizer value. // When an application indicates non-zero values here, it is taken as an // indication that all future frames will be constrained with those limits // until the application indicates a change again. - PlayoutDelay playout_delay_ = {-1, -1}; + VideoPlayoutDelay playout_delay_; struct Timing { uint8_t flags = VideoSendTiming::kInvalid; @@ -199,18 +183,17 @@ class RTC_EXPORT EncodedImage { } timing_; private: - // TODO(bugs.webrtc.org/9378): We're transitioning to always owning the - // encoded data. + size_t capacity() const { return encoded_data_ ? encoded_data_->size() : 0; } + rtc::scoped_refptr encoded_data_; - size_t size_; // Size of encoded frame data. - // Non-null when used with an un-owned buffer. - uint8_t* buffer_; - // Allocated size of _buffer; relevant only if it's non-null. - size_t capacity_; + size_t size_ = 0; // Size of encoded frame data. uint32_t timestamp_rtp_ = 0; absl::optional spatial_index_; std::map spatial_layer_frame_size_bytes_; absl::optional color_space_; + // This field is meant for media quality testing purpose only. When enabled it + // carries the webrtc::VideoFrame id field from the sender to the receiver. + absl::optional video_frame_tracking_id_; // Information about packets used to assemble this video frame. This is needed // by |SourceTracker| when the frame is delivered to the RTCRtpReceiver's // MediaStreamTrack, in order to implement getContributingSources(). See: diff --git a/api/video/i010_buffer.cc b/api/video/i010_buffer.cc index 7286676ded..74d37d1b57 100644 --- a/api/video/i010_buffer.cc +++ b/api/video/i010_buffer.cc @@ -56,8 +56,8 @@ I010Buffer::~I010Buffer() {} // static rtc::scoped_refptr I010Buffer::Create(int width, int height) { - return new rtc::RefCountedObject( - width, height, width, (width + 1) / 2, (width + 1) / 2); + return rtc::make_ref_counted(width, height, width, + (width + 1) / 2, (width + 1) / 2); } // static diff --git a/api/video/i420_buffer.cc b/api/video/i420_buffer.cc index 62fa1837ed..8783a4a313 100644 --- a/api/video/i420_buffer.cc +++ b/api/video/i420_buffer.cc @@ -60,7 +60,7 @@ I420Buffer::~I420Buffer() {} // static rtc::scoped_refptr I420Buffer::Create(int width, int height) { - return new rtc::RefCountedObject(width, height); + return rtc::make_ref_counted(width, height); } // static @@ -69,8 +69,8 @@ rtc::scoped_refptr I420Buffer::Create(int width, int stride_y, int stride_u, int stride_v) { - return new rtc::RefCountedObject(width, height, stride_y, - stride_u, stride_v); + return rtc::make_ref_counted(width, height, stride_y, stride_u, + stride_v); } // static @@ -215,9 +215,11 @@ void I420Buffer::CropAndScaleFrom(const I420BufferInterface& src, void I420Buffer::CropAndScaleFrom(const I420BufferInterface& src) { const int crop_width = - std::min(src.width(), width() * src.height() / height()); + height() > 0 ? std::min(src.width(), width() * src.height() / height()) + : src.width(); const int crop_height = - std::min(src.height(), height() * src.width() / width()); + width() > 0 ? std::min(src.height(), height() * src.width() / width()) + : src.height(); CropAndScaleFrom(src, (src.width() - crop_width) / 2, (src.height() - crop_height) / 2, crop_width, crop_height); diff --git a/api/video/nv12_buffer.cc b/api/video/nv12_buffer.cc new file mode 100644 index 0000000000..37d688b88b --- /dev/null +++ b/api/video/nv12_buffer.cc @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video/nv12_buffer.h" + +#include "api/video/i420_buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/ref_counted_object.h" +#include "third_party/libyuv/include/libyuv/convert.h" +#include "third_party/libyuv/include/libyuv/scale.h" + +namespace webrtc { + +namespace { + +static const int kBufferAlignment = 64; + +int NV12DataSize(int height, int stride_y, int stride_uv) { + return stride_y * height + stride_uv * ((height + 1) / 2); +} + +} // namespace + +NV12Buffer::NV12Buffer(int width, int height) + : NV12Buffer(width, height, width, width + width % 2) {} + +NV12Buffer::NV12Buffer(int width, int height, int stride_y, int stride_uv) + : width_(width), + height_(height), + stride_y_(stride_y), + stride_uv_(stride_uv), + data_(static_cast( + AlignedMalloc(NV12DataSize(height_, stride_y_, stride_uv), + kBufferAlignment))) { + RTC_DCHECK_GT(width, 0); + RTC_DCHECK_GT(height, 0); + RTC_DCHECK_GE(stride_y, width); + RTC_DCHECK_GE(stride_uv, (width + width % 2)); +} + +NV12Buffer::~NV12Buffer() = default; + +// static +rtc::scoped_refptr NV12Buffer::Create(int width, int height) { + return rtc::make_ref_counted(width, height); +} + +// static +rtc::scoped_refptr NV12Buffer::Create(int width, + int height, + int stride_y, + int stride_uv) { + return rtc::make_ref_counted(width, height, stride_y, stride_uv); +} + +// static +rtc::scoped_refptr NV12Buffer::Copy( + const I420BufferInterface& i420_buffer) { + rtc::scoped_refptr buffer = + NV12Buffer::Create(i420_buffer.width(), i420_buffer.height()); + libyuv::I420ToNV12( + i420_buffer.DataY(), i420_buffer.StrideY(), i420_buffer.DataU(), + i420_buffer.StrideU(), i420_buffer.DataV(), i420_buffer.StrideV(), + buffer->MutableDataY(), buffer->StrideY(), buffer->MutableDataUV(), + buffer->StrideUV(), buffer->width(), buffer->height()); + return buffer; +} + +rtc::scoped_refptr NV12Buffer::ToI420() { + rtc::scoped_refptr i420_buffer = + I420Buffer::Create(width(), height()); + libyuv::NV12ToI420(DataY(), StrideY(), DataUV(), StrideUV(), + i420_buffer->MutableDataY(), i420_buffer->StrideY(), + i420_buffer->MutableDataU(), i420_buffer->StrideU(), + i420_buffer->MutableDataV(), i420_buffer->StrideV(), + width(), height()); + return i420_buffer; +} + +int NV12Buffer::width() const { + return width_; +} +int NV12Buffer::height() const { + return height_; +} + +int NV12Buffer::StrideY() const { + return stride_y_; +} +int NV12Buffer::StrideUV() const { + return stride_uv_; +} + +const uint8_t* NV12Buffer::DataY() const { + return data_.get(); +} + +const uint8_t* NV12Buffer::DataUV() const { + return data_.get() + UVOffset(); +} + +uint8_t* NV12Buffer::MutableDataY() { + return data_.get(); +} + +uint8_t* NV12Buffer::MutableDataUV() { + return data_.get() + UVOffset(); +} + +size_t NV12Buffer::UVOffset() const { + return stride_y_ * height_; +} + +void NV12Buffer::InitializeData() { + memset(data_.get(), 0, NV12DataSize(height_, stride_y_, stride_uv_)); +} + +void NV12Buffer::CropAndScaleFrom(const NV12BufferInterface& src, + int offset_x, + int offset_y, + int crop_width, + int crop_height) { + RTC_CHECK_LE(crop_width, src.width()); + RTC_CHECK_LE(crop_height, src.height()); + RTC_CHECK_LE(crop_width + offset_x, src.width()); + RTC_CHECK_LE(crop_height + offset_y, src.height()); + RTC_CHECK_GE(offset_x, 0); + RTC_CHECK_GE(offset_y, 0); + + // Make sure offset is even so that u/v plane becomes aligned. + const int uv_offset_x = offset_x / 2; + const int uv_offset_y = offset_y / 2; + offset_x = uv_offset_x * 2; + offset_y = uv_offset_y * 2; + + const uint8_t* y_plane = src.DataY() + src.StrideY() * offset_y + offset_x; + const uint8_t* uv_plane = + src.DataUV() + src.StrideUV() * uv_offset_y + uv_offset_x * 2; + + int res = libyuv::NV12Scale(y_plane, src.StrideY(), uv_plane, src.StrideUV(), + crop_width, crop_height, MutableDataY(), + StrideY(), MutableDataUV(), StrideUV(), width(), + height(), libyuv::kFilterBox); + + RTC_DCHECK_EQ(res, 0); +} + +} // namespace webrtc diff --git a/api/video/nv12_buffer.h b/api/video/nv12_buffer.h new file mode 100644 index 0000000000..cb989e84b0 --- /dev/null +++ b/api/video/nv12_buffer.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_NV12_BUFFER_H_ +#define API_VIDEO_NV12_BUFFER_H_ + +#include +#include + +#include "api/scoped_refptr.h" +#include "api/video/video_frame_buffer.h" +#include "rtc_base/memory/aligned_malloc.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +// NV12 is a biplanar encoding format, with full-resolution Y and +// half-resolution interleved UV. More information can be found at +// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12. +class RTC_EXPORT NV12Buffer : public NV12BufferInterface { + public: + static rtc::scoped_refptr Create(int width, int height); + static rtc::scoped_refptr Create(int width, + int height, + int stride_y, + int stride_uv); + static rtc::scoped_refptr Copy( + const I420BufferInterface& i420_buffer); + + rtc::scoped_refptr ToI420() override; + + int width() const override; + int height() const override; + + int StrideY() const override; + int StrideUV() const override; + + const uint8_t* DataY() const override; + const uint8_t* DataUV() const override; + + uint8_t* MutableDataY(); + uint8_t* MutableDataUV(); + + // Sets all three planes to all zeros. Used to work around for + // quirks in memory checkers + // (https://bugs.chromium.org/p/libyuv/issues/detail?id=377) and + // ffmpeg (http://crbug.com/390941). + // TODO(nisse): Deprecated. Should be deleted if/when those issues + // are resolved in a better way. Or in the mean time, use SetBlack. + void InitializeData(); + + // Scale the cropped area of |src| to the size of |this| buffer, and + // write the result into |this|. + void CropAndScaleFrom(const NV12BufferInterface& src, + int offset_x, + int offset_y, + int crop_width, + int crop_height); + + protected: + NV12Buffer(int width, int height); + NV12Buffer(int width, int height, int stride_y, int stride_uv); + + ~NV12Buffer() override; + + private: + size_t UVOffset() const; + + const int width_; + const int height_; + const int stride_y_; + const int stride_uv_; + const std::unique_ptr data_; +}; + +} // namespace webrtc + +#endif // API_VIDEO_NV12_BUFFER_H_ diff --git a/api/video/recordable_encoded_frame.h b/api/video/recordable_encoded_frame.h index db59964f26..b4ad83a344 100644 --- a/api/video/recordable_encoded_frame.h +++ b/api/video/recordable_encoded_frame.h @@ -26,8 +26,10 @@ class RecordableEncodedFrame { public: // Encoded resolution in pixels struct EncodedResolution { - unsigned width; - unsigned height; + bool empty() const { return width == 0 && height == 0; } + + unsigned width = 0; + unsigned height = 0; }; virtual ~RecordableEncodedFrame() = default; diff --git a/api/video/rtp_video_frame_assembler.cc b/api/video/rtp_video_frame_assembler.cc new file mode 100644 index 0000000000..8f3d04c30b --- /dev/null +++ b/api/video/rtp_video_frame_assembler.cc @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video/rtp_video_frame_assembler.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "absl/types/optional.h" +#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_generic.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_h264.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h" +#include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h" +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/packet_buffer.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { +std::unique_ptr CreateDepacketizer( + RtpVideoFrameAssembler::PayloadFormat payload_format) { + switch (payload_format) { + case RtpVideoFrameAssembler::kRaw: + return std::make_unique(); + case RtpVideoFrameAssembler::kH264: + return std::make_unique(); + case RtpVideoFrameAssembler::kVp8: + return std::make_unique(); + case RtpVideoFrameAssembler::kVp9: + return std::make_unique(); + case RtpVideoFrameAssembler::kAv1: + return std::make_unique(); + case RtpVideoFrameAssembler::kGeneric: + return std::make_unique(); + } + RTC_NOTREACHED(); + return nullptr; +} +} // namespace + +class RtpVideoFrameAssembler::Impl { + public: + explicit Impl(std::unique_ptr depacketizer); + ~Impl() = default; + + FrameVector InsertPacket(const RtpPacketReceived& packet); + + private: + using RtpFrameVector = + absl::InlinedVector, 3>; + + RtpFrameVector AssembleFrames( + video_coding::PacketBuffer::InsertResult insert_result); + FrameVector FindReferences(RtpFrameVector frames); + FrameVector UpdateWithPadding(uint16_t seq_num); + bool ParseDependenciesDescriptorExtension(const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header); + bool ParseGenericDescriptorExtension(const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header); + void ClearOldData(uint16_t incoming_seq_num); + + std::unique_ptr video_structure_; + SeqNumUnwrapper frame_id_unwrapper_; + absl::optional video_structure_frame_id_; + std::unique_ptr depacketizer_; + video_coding::PacketBuffer packet_buffer_; + RtpFrameReferenceFinder reference_finder_; +}; + +RtpVideoFrameAssembler::Impl::Impl( + std::unique_ptr depacketizer) + : depacketizer_(std::move(depacketizer)), + packet_buffer_(/*start_buffer_size=*/2048, /*max_buffer_size=*/2048) {} + +RtpVideoFrameAssembler::FrameVector RtpVideoFrameAssembler::Impl::InsertPacket( + const RtpPacketReceived& rtp_packet) { + absl::optional parsed_payload = + depacketizer_->Parse(rtp_packet.PayloadBuffer()); + + if (parsed_payload == absl::nullopt) { + return {}; + } + + if (parsed_payload->video_payload.size() == 0) { + ClearOldData(rtp_packet.SequenceNumber()); + return UpdateWithPadding(rtp_packet.SequenceNumber()); + } + + if (rtp_packet.HasExtension()) { + if (!ParseDependenciesDescriptorExtension(rtp_packet, + parsed_payload->video_header)) { + return {}; + } + } else if (rtp_packet.HasExtension()) { + if (!ParseGenericDescriptorExtension(rtp_packet, + parsed_payload->video_header)) { + return {}; + } + } + + parsed_payload->video_header.is_last_packet_in_frame |= rtp_packet.Marker(); + + auto packet = std::make_unique( + rtp_packet, parsed_payload->video_header); + packet->video_payload = std::move(parsed_payload->video_payload); + + ClearOldData(rtp_packet.SequenceNumber()); + return FindReferences( + AssembleFrames(packet_buffer_.InsertPacket(std::move(packet)))); +} + +void RtpVideoFrameAssembler::Impl::ClearOldData(uint16_t incoming_seq_num) { + constexpr uint16_t kOldSeqNumThreshold = 2000; + uint16_t old_seq_num = incoming_seq_num - kOldSeqNumThreshold; + packet_buffer_.ClearTo(old_seq_num); + reference_finder_.ClearTo(old_seq_num); +} + +RtpVideoFrameAssembler::Impl::RtpFrameVector +RtpVideoFrameAssembler::Impl::AssembleFrames( + video_coding::PacketBuffer::InsertResult insert_result) { + video_coding::PacketBuffer::Packet* first_packet = nullptr; + std::vector> payloads; + RtpFrameVector result; + + for (auto& packet : insert_result.packets) { + if (packet->is_first_packet_in_frame()) { + first_packet = packet.get(); + payloads.clear(); + } + payloads.emplace_back(packet->video_payload); + + if (packet->is_last_packet_in_frame()) { + rtc::scoped_refptr bitstream = + depacketizer_->AssembleFrame(payloads); + + if (!bitstream) { + continue; + } + + const video_coding::PacketBuffer::Packet& last_packet = *packet; + result.push_back(std::make_unique( + first_packet->seq_num, // + last_packet.seq_num, // + last_packet.marker_bit, // + /*times_nacked=*/0, // + /*first_packet_received_time=*/0, // + /*last_packet_received_time=*/0, // + first_packet->timestamp, // + /*ntp_time_ms=*/0, // + /*timing=*/VideoSendTiming(), // + first_packet->payload_type, // + first_packet->codec(), // + last_packet.video_header.rotation, // + last_packet.video_header.content_type, // + first_packet->video_header, // + last_packet.video_header.color_space, // + /*packet_infos=*/RtpPacketInfos(), // + std::move(bitstream))); + } + } + + return result; +} + +RtpVideoFrameAssembler::FrameVector +RtpVideoFrameAssembler::Impl::FindReferences(RtpFrameVector frames) { + FrameVector res; + for (auto& frame : frames) { + auto complete_frames = reference_finder_.ManageFrame(std::move(frame)); + for (std::unique_ptr& complete_frame : complete_frames) { + res.push_back(std::move(complete_frame)); + } + } + return res; +} + +RtpVideoFrameAssembler::FrameVector +RtpVideoFrameAssembler::Impl::UpdateWithPadding(uint16_t seq_num) { + auto res = + FindReferences(AssembleFrames(packet_buffer_.InsertPadding(seq_num))); + auto ref_finder_update = reference_finder_.PaddingReceived(seq_num); + + res.insert(res.end(), std::make_move_iterator(ref_finder_update.begin()), + std::make_move_iterator(ref_finder_update.end())); + + return res; +} + +bool RtpVideoFrameAssembler::Impl::ParseDependenciesDescriptorExtension( + const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header) { + webrtc::DependencyDescriptor dependency_descriptor; + + if (!rtp_packet.GetExtension( + video_structure_.get(), &dependency_descriptor)) { + // Descriptor is either malformed, or the template referenced is not in + // the `video_structure_` currently being held. + // TODO(bugs.webrtc.org/10342): Improve packet reordering behavior. + RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc() + << " Failed to parse dependency descriptor."; + return false; + } + + if (dependency_descriptor.attached_structure != nullptr && + !dependency_descriptor.first_packet_in_frame) { + RTC_LOG(LS_WARNING) << "ssrc: " << rtp_packet.Ssrc() + << "Invalid dependency descriptor: structure " + "attached to non first packet of a frame."; + return false; + } + + video_header.is_first_packet_in_frame = + dependency_descriptor.first_packet_in_frame; + video_header.is_last_packet_in_frame = + dependency_descriptor.last_packet_in_frame; + + int64_t frame_id = + frame_id_unwrapper_.Unwrap(dependency_descriptor.frame_number); + auto& generic_descriptor_info = video_header.generic.emplace(); + generic_descriptor_info.frame_id = frame_id; + generic_descriptor_info.spatial_index = + dependency_descriptor.frame_dependencies.spatial_id; + generic_descriptor_info.temporal_index = + dependency_descriptor.frame_dependencies.temporal_id; + + for (int fdiff : dependency_descriptor.frame_dependencies.frame_diffs) { + generic_descriptor_info.dependencies.push_back(frame_id - fdiff); + } + for (int cdiff : dependency_descriptor.frame_dependencies.chain_diffs) { + generic_descriptor_info.chain_diffs.push_back(frame_id - cdiff); + } + generic_descriptor_info.decode_target_indications = + dependency_descriptor.frame_dependencies.decode_target_indications; + if (dependency_descriptor.resolution) { + video_header.width = dependency_descriptor.resolution->Width(); + video_header.height = dependency_descriptor.resolution->Height(); + } + if (dependency_descriptor.active_decode_targets_bitmask.has_value()) { + generic_descriptor_info.active_decode_targets = + *dependency_descriptor.active_decode_targets_bitmask; + } + + // FrameDependencyStructure is sent in the dependency descriptor of the first + // packet of a key frame and is required to parse all subsequent packets until + // the next key frame. + if (dependency_descriptor.attached_structure) { + RTC_DCHECK(dependency_descriptor.first_packet_in_frame); + if (video_structure_frame_id_ > frame_id) { + RTC_LOG(LS_WARNING) + << "Arrived key frame with id " << frame_id << " and structure id " + << dependency_descriptor.attached_structure->structure_id + << " is older than the latest received key frame with id " + << *video_structure_frame_id_ << " and structure id " + << video_structure_->structure_id; + return false; + } + video_structure_ = std::move(dependency_descriptor.attached_structure); + video_structure_frame_id_ = frame_id; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + } else { + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + } + return true; +} + +bool RtpVideoFrameAssembler::Impl::ParseGenericDescriptorExtension( + const RtpPacketReceived& rtp_packet, + RTPVideoHeader& video_header) { + RtpGenericFrameDescriptor generic_frame_descriptor; + if (!rtp_packet.GetExtension( + &generic_frame_descriptor)) { + return false; + } + + video_header.is_first_packet_in_frame = + generic_frame_descriptor.FirstPacketInSubFrame(); + video_header.is_last_packet_in_frame = + generic_frame_descriptor.LastPacketInSubFrame(); + + if (generic_frame_descriptor.FirstPacketInSubFrame()) { + video_header.frame_type = + generic_frame_descriptor.FrameDependenciesDiffs().empty() + ? VideoFrameType::kVideoFrameKey + : VideoFrameType::kVideoFrameDelta; + + auto& generic_descriptor_info = video_header.generic.emplace(); + int64_t frame_id = + frame_id_unwrapper_.Unwrap(generic_frame_descriptor.FrameId()); + generic_descriptor_info.frame_id = frame_id; + generic_descriptor_info.spatial_index = + generic_frame_descriptor.SpatialLayer(); + generic_descriptor_info.temporal_index = + generic_frame_descriptor.TemporalLayer(); + for (uint16_t fdiff : generic_frame_descriptor.FrameDependenciesDiffs()) { + generic_descriptor_info.dependencies.push_back(frame_id - fdiff); + } + } + video_header.width = generic_frame_descriptor.Width(); + video_header.height = generic_frame_descriptor.Height(); + return true; +} + +RtpVideoFrameAssembler::RtpVideoFrameAssembler(PayloadFormat payload_format) + : impl_(std::make_unique(CreateDepacketizer(payload_format))) {} + +RtpVideoFrameAssembler::~RtpVideoFrameAssembler() = default; + +RtpVideoFrameAssembler::FrameVector RtpVideoFrameAssembler::InsertPacket( + const RtpPacketReceived& packet) { + return impl_->InsertPacket(packet); +} + +} // namespace webrtc diff --git a/api/video/rtp_video_frame_assembler.h b/api/video/rtp_video_frame_assembler.h new file mode 100644 index 0000000000..353942bdc8 --- /dev/null +++ b/api/video/rtp_video_frame_assembler.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_RTP_VIDEO_FRAME_ASSEMBLER_H_ +#define API_VIDEO_RTP_VIDEO_FRAME_ASSEMBLER_H_ + +#include +#include + +#include "absl/container/inlined_vector.h" +#include "api/video/encoded_frame.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" + +namespace webrtc { +// The RtpVideoFrameAssembler takes RtpPacketReceived and assembles them into +// complete frames. A frame is considered complete when all packets of the frame +// has been received, the bitstream data has successfully extracted, an ID has +// been assigned, and all dependencies are known. Frame IDs are strictly +// monotonic in decode order, dependencies are expressed as frame IDs. +class RtpVideoFrameAssembler { + public: + // FrameVector is just a vector-like type of std::unique_ptr. + // The vector type may change without notice. + using FrameVector = absl::InlinedVector, 3>; + enum PayloadFormat { kRaw, kH264, kVp8, kVp9, kAv1, kGeneric }; + + explicit RtpVideoFrameAssembler(PayloadFormat payload_format); + RtpVideoFrameAssembler(const RtpVideoFrameAssembler& other) = delete; + RtpVideoFrameAssembler& operator=(const RtpVideoFrameAssembler& other) = + delete; + ~RtpVideoFrameAssembler(); + + // Typically when a packet is inserted zero or one frame is completed. In the + // case of RTP packets being inserted out of order then sometime multiple + // frames could be completed from a single packet, hence the 'FrameVector' + // return type. + FrameVector InsertPacket(const RtpPacketReceived& packet); + + private: + class Impl; + std::unique_ptr impl_; +}; + +} // namespace webrtc + +#endif // API_VIDEO_RTP_VIDEO_FRAME_ASSEMBLER_H_ diff --git a/api/video/rtp_video_frame_assembler_unittests.cc b/api/video/rtp_video_frame_assembler_unittests.cc new file mode 100644 index 0000000000..916a83cd73 --- /dev/null +++ b/api/video/rtp_video_frame_assembler_unittests.cc @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "api/array_view.h" +#include "api/video/rtp_video_frame_assembler.h" +#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_format.h" +#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" +#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::ElementsAreArray; +using ::testing::Eq; +using ::testing::IsEmpty; +using ::testing::Matches; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; +using ::testing::UnorderedElementsAreArray; +using PayloadFormat = RtpVideoFrameAssembler::PayloadFormat; + +class PacketBuilder { + public: + explicit PacketBuilder(PayloadFormat format) + : format_(format), packet_to_send_(&extension_manager_) {} + + PacketBuilder& WithSeqNum(uint16_t seq_num) { + seq_num_ = seq_num; + return *this; + } + + PacketBuilder& WithPayload(rtc::ArrayView payload) { + payload_.assign(payload.begin(), payload.end()); + return *this; + } + + PacketBuilder& WithVideoHeader(const RTPVideoHeader& video_header) { + video_header_ = video_header; + return *this; + } + + template + PacketBuilder& WithExtension(int id, const Args&... args) { + extension_manager_.Register(id); + packet_to_send_.IdentifyExtensions(extension_manager_); + packet_to_send_.SetExtension(std::forward(args)...); + return *this; + } + + RtpPacketReceived Build() { + auto packetizer = + RtpPacketizer::Create(GetVideoCodecType(), payload_, {}, video_header_); + packetizer->NextPacket(&packet_to_send_); + packet_to_send_.SetSequenceNumber(seq_num_); + + RtpPacketReceived received(&extension_manager_); + received.Parse(packet_to_send_.Buffer()); + return received; + } + + private: + absl::optional GetVideoCodecType() { + switch (format_) { + case PayloadFormat::kRaw: { + return absl::nullopt; + } + case PayloadFormat::kH264: { + return kVideoCodecH264; + } + case PayloadFormat::kVp8: { + return kVideoCodecVP8; + } + case PayloadFormat::kVp9: { + return kVideoCodecVP9; + } + case PayloadFormat::kAv1: { + return kVideoCodecAV1; + } + case PayloadFormat::kGeneric: { + return kVideoCodecGeneric; + } + } + RTC_NOTREACHED(); + return absl::nullopt; + } + + const RtpVideoFrameAssembler::PayloadFormat format_; + uint16_t seq_num_ = 0; + std::vector payload_; + RTPVideoHeader video_header_; + RtpPacketReceived::ExtensionManager extension_manager_; + RtpPacketToSend packet_to_send_; +}; + +void AppendFrames(RtpVideoFrameAssembler::FrameVector from, + RtpVideoFrameAssembler::FrameVector& to) { + to.insert(to.end(), std::make_move_iterator(from.begin()), + std::make_move_iterator(from.end())); +} + +rtc::ArrayView References(const std::unique_ptr& frame) { + return rtc::MakeArrayView(frame->references, frame->num_references); +} + +rtc::ArrayView Payload(const std::unique_ptr& frame) { + return rtc::ArrayView(*frame->GetEncodedData()); +} + +TEST(RtpVideoFrameAssembler, Vp8Packetization) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kVp8); + + // When sending VP8 over RTP parts of the payload is actually inspected at the + // RTP level. It just so happen that the initial 'V' sets the keyframe bit + // (0x01) to the correct value. + uint8_t kKeyframePayload[] = "Vp8Keyframe"; + ASSERT_EQ(kKeyframePayload[0] & 0x01, 0); + + uint8_t kDeltaframePayload[] = "SomeFrame"; + ASSERT_EQ(kDeltaframePayload[0] & 0x01, 1); + + RtpVideoFrameAssembler::FrameVector frames; + + RTPVideoHeader video_header; + auto& vp8_header = + video_header.video_type_header.emplace(); + + vp8_header.pictureId = 10; + vp8_header.tl0PicIdx = 0; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp8) + .WithPayload(kKeyframePayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + vp8_header.pictureId = 11; + vp8_header.tl0PicIdx = 1; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp8) + .WithPayload(kDeltaframePayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(10)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kKeyframePayload)); + + EXPECT_THAT(frames[1]->Id(), Eq(11)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kDeltaframePayload)); +} + +TEST(RtpVideoFrameAssembler, Vp9Packetization) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kVp9); + RtpVideoFrameAssembler::FrameVector frames; + + uint8_t kPayload[] = "SomePayload"; + + RTPVideoHeader video_header; + auto& vp9_header = + video_header.video_type_header.emplace(); + vp9_header.InitRTPVideoHeaderVP9(); + + vp9_header.picture_id = 10; + vp9_header.tl0_pic_idx = 0; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp9) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + vp9_header.picture_id = 11; + vp9_header.tl0_pic_idx = 1; + vp9_header.inter_pic_predicted = true; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kVp9) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(10)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(11)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); +} + +TEST(RtpVideoFrameAssembler, Av1Packetization) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kAv1); + RtpVideoFrameAssembler::FrameVector frames; + + auto kKeyframePayload = + BuildAv1Frame({Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3}), + Av1Obu(kAv1ObuTypeFrame).WithPayload({4, 5, 6})}); + + auto kDeltaframePayload = + BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame).WithPayload({7, 8, 9})}); + + RTPVideoHeader video_header; + + video_header.frame_type = VideoFrameType::kVideoFrameKey; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kAv1) + .WithPayload(kKeyframePayload) + .WithVideoHeader(video_header) + .WithSeqNum(20) + .Build()), + frames); + + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kAv1) + .WithPayload(kDeltaframePayload) + .WithSeqNum(21) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(20)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kKeyframePayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(21)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kDeltaframePayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(20)); +} + +TEST(RtpVideoFrameAssembler, RawPacketizationDependencyDescriptorExtension) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kRaw); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + FrameDependencyStructure dependency_structure; + dependency_structure.num_decode_targets = 1; + dependency_structure.num_chains = 1; + dependency_structure.decode_target_protected_by_chain.push_back(0); + dependency_structure.templates.push_back( + FrameDependencyTemplate().S(0).T(0).Dtis("S").ChainDiffs({0})); + dependency_structure.templates.push_back( + FrameDependencyTemplate().S(0).T(0).Dtis("S").ChainDiffs({10}).FrameDiffs( + {10})); + + DependencyDescriptor dependency_descriptor; + + dependency_descriptor.frame_number = 10; + dependency_descriptor.frame_dependencies = dependency_structure.templates[0]; + dependency_descriptor.attached_structure = + std::make_unique(dependency_structure); + AppendFrames(assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension( + 1, dependency_structure, dependency_descriptor) + .Build()), + frames); + + dependency_descriptor.frame_number = 20; + dependency_descriptor.frame_dependencies = dependency_structure.templates[1]; + dependency_descriptor.attached_structure.reset(); + AppendFrames(assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension( + 1, dependency_structure, dependency_descriptor) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(10)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(20)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(10)); +} + +TEST(RtpVideoFrameAssembler, RawPacketizationGenericDescriptor00Extension) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kRaw); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + RtpGenericFrameDescriptor generic; + + generic.SetFirstPacketInSubFrame(true); + generic.SetLastPacketInSubFrame(true); + generic.SetFrameId(100); + AppendFrames( + assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension(1, generic) + .Build()), + frames); + + generic.SetFrameId(102); + generic.AddFrameDependencyDiff(2); + AppendFrames( + assembler.InsertPacket( + PacketBuilder(PayloadFormat::kRaw) + .WithPayload(kPayload) + .WithExtension(1, generic) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(100)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(102)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(100)); +} + +TEST(RtpVideoFrameAssembler, RawPacketizationGenericPayloadDescriptor) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + RTPVideoHeader video_header; + + video_header.frame_type = VideoFrameType::kVideoFrameKey; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(123) + .Build()), + frames); + + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(124) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[0]->Id(), Eq(123)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + EXPECT_THAT(frames[1]->Id(), Eq(124)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(123)); +} + +TEST(RtpVideoFrameAssembler, Padding) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + RtpVideoFrameAssembler::FrameVector frames; + uint8_t kPayload[] = "SomePayload"; + + RTPVideoHeader video_header; + + video_header.frame_type = VideoFrameType::kVideoFrameKey; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(123) + .Build()), + frames); + + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + AppendFrames(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(125) + .Build()), + frames); + + ASSERT_THAT(frames, SizeIs(1)); + + EXPECT_THAT(frames[0]->Id(), Eq(123)); + EXPECT_THAT(Payload(frames[0]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[0]), IsEmpty()); + + // Padding packets have no bitstream data. An easy way to generate one is to + // build a normal packet and then simply remove the bitstream portion of the + // payload. + RtpPacketReceived padding_packet = PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(124) + .Build(); + // The payload descriptor is one byte, keep it. + padding_packet.SetPayloadSize(1); + + AppendFrames(assembler.InsertPacket(padding_packet), frames); + + ASSERT_THAT(frames, SizeIs(2)); + + EXPECT_THAT(frames[1]->Id(), Eq(125)); + EXPECT_THAT(Payload(frames[1]), ElementsAreArray(kPayload)); + EXPECT_THAT(References(frames[1]), UnorderedElementsAre(123)); +} + +TEST(RtpVideoFrameAssembler, ClearOldPackets) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + + // If we don't have a payload the packet will be counted as a padding packet. + uint8_t kPayload[] = "DontCare"; + + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(1)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(2000) + .Build()), + SizeIs(1)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(0)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(1) + .Build()), + SizeIs(1)); +} + +TEST(RtpVideoFrameAssembler, ClearOldPacketsWithPadding) { + RtpVideoFrameAssembler assembler(RtpVideoFrameAssembler::kGeneric); + uint8_t kPayload[] = "DontCare"; + + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(1)); + + // Padding packets have no bitstream data. An easy way to generate one is to + // build a normal packet and then simply remove the bitstream portion of the + // payload. + RtpPacketReceived padding_packet = PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(2000) + .Build(); + // The payload descriptor is one byte, keep it. + padding_packet.SetPayloadSize(1); + EXPECT_THAT(assembler.InsertPacket(padding_packet), SizeIs(0)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(0) + .Build()), + SizeIs(0)); + + EXPECT_THAT(assembler.InsertPacket(PacketBuilder(PayloadFormat::kGeneric) + .WithPayload(kPayload) + .WithVideoHeader(video_header) + .WithSeqNum(1) + .Build()), + SizeIs(1)); +} + +} // namespace +} // namespace webrtc diff --git a/api/video/test/BUILD.gn b/api/video/test/BUILD.gn index 5f697a081c..1573e7848f 100644 --- a/api/video/test/BUILD.gn +++ b/api/video/test/BUILD.gn @@ -12,6 +12,7 @@ rtc_library("rtc_api_video_unittests") { testonly = true sources = [ "color_space_unittest.cc", + "nv12_buffer_unittest.cc", "video_adaptation_counters_unittest.cc", "video_bitrate_allocation_unittest.cc", ] @@ -20,9 +21,10 @@ rtc_library("rtc_api_video_unittests") { "..:video_bitrate_allocation", "..:video_frame", "..:video_rtp_headers", + "../../../test:frame_utils", "../../../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("mock_recordable_encoded_frame") { diff --git a/api/video/test/nv12_buffer_unittest.cc b/api/video/test/nv12_buffer_unittest.cc new file mode 100644 index 0000000000..d84adb5bf5 --- /dev/null +++ b/api/video/test/nv12_buffer_unittest.cc @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video/nv12_buffer.h" + +#include "api/video/i420_buffer.h" +#include "test/frame_utils.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { +int GetY(rtc::scoped_refptr buf, int col, int row) { + return buf->DataY()[row * buf->StrideY() + col]; +} + +int GetU(rtc::scoped_refptr buf, int col, int row) { + return buf->DataUV()[(row / 2) * buf->StrideUV() + (col / 2) * 2]; +} + +int GetV(rtc::scoped_refptr buf, int col, int row) { + return buf->DataUV()[(row / 2) * buf->StrideUV() + (col / 2) * 2 + 1]; +} + +void FillNV12Buffer(rtc::scoped_refptr buf) { + const uint8_t Y = 1; + const uint8_t U = 2; + const uint8_t V = 3; + for (int row = 0; row < buf->height(); ++row) { + for (int col = 0; col < buf->width(); ++col) { + buf->MutableDataY()[row * buf->StrideY() + col] = Y; + } + } + // Fill interleaving UV values. + for (int row = 0; row < buf->ChromaHeight(); row++) { + for (int col = 0; col < buf->StrideUV(); col += 2) { + int uv_index = row * buf->StrideUV() + col; + buf->MutableDataUV()[uv_index] = U; + buf->MutableDataUV()[uv_index + 1] = V; + } + } +} + +} // namespace + +TEST(NV12BufferTest, InitialData) { + constexpr int stride_y = 3; + constexpr int stride_uv = 4; + constexpr int width = 3; + constexpr int height = 3; + + rtc::scoped_refptr nv12_buffer(NV12Buffer::Create(width, height)); + EXPECT_EQ(width, nv12_buffer->width()); + EXPECT_EQ(height, nv12_buffer->height()); + EXPECT_EQ(stride_y, nv12_buffer->StrideY()); + EXPECT_EQ(stride_uv, nv12_buffer->StrideUV()); + EXPECT_EQ(2, nv12_buffer->ChromaWidth()); + EXPECT_EQ(2, nv12_buffer->ChromaHeight()); +} + +TEST(NV12BufferTest, ReadPixels) { + constexpr int width = 3; + constexpr int height = 3; + + rtc::scoped_refptr nv12_buffer(NV12Buffer::Create(width, height)); + // Y = 1, U = 2, V = 3. + FillNV12Buffer(nv12_buffer); + for (int row = 0; row < height; row++) { + for (int col = 0; col < width; col++) { + EXPECT_EQ(1, GetY(nv12_buffer, col, row)); + EXPECT_EQ(2, GetU(nv12_buffer, col, row)); + EXPECT_EQ(3, GetV(nv12_buffer, col, row)); + } + } +} + +TEST(NV12BufferTest, ToI420) { + constexpr int width = 3; + constexpr int height = 3; + constexpr int size_y = width * height; + constexpr int size_u = (width + 1) / 2 * (height + 1) / 2; + constexpr int size_v = (width + 1) / 2 * (height + 1) / 2; + rtc::scoped_refptr reference(I420Buffer::Create(width, height)); + memset(reference->MutableDataY(), 8, size_y); + memset(reference->MutableDataU(), 4, size_u); + memset(reference->MutableDataV(), 2, size_v); + + rtc::scoped_refptr nv12_buffer(NV12Buffer::Create(width, height)); + // Convert the reference buffer to NV12. + memset(nv12_buffer->MutableDataY(), 8, size_y); + // Interleaving u/v values. + for (int i = 0; i < size_u + size_v; i += 2) { + nv12_buffer->MutableDataUV()[i] = 4; + nv12_buffer->MutableDataUV()[i + 1] = 2; + } + // Confirm YUV values are as expected. + for (int row = 0; row < height; row++) { + for (int col = 0; col < width; col++) { + EXPECT_EQ(8, GetY(nv12_buffer, col, row)); + EXPECT_EQ(4, GetU(nv12_buffer, col, row)); + EXPECT_EQ(2, GetV(nv12_buffer, col, row)); + } + } + + rtc::scoped_refptr i420_buffer(nv12_buffer->ToI420()); + EXPECT_EQ(height, i420_buffer->height()); + EXPECT_EQ(width, i420_buffer->width()); + EXPECT_TRUE(test::FrameBufsEqual(reference, i420_buffer)); +} + +} // namespace webrtc diff --git a/api/video/video_adaptation_counters.cc b/api/video/video_adaptation_counters.cc index 25e0bee1ff..df1769d5d4 100644 --- a/api/video/video_adaptation_counters.cc +++ b/api/video/video_adaptation_counters.cc @@ -10,6 +10,8 @@ #include "api/video/video_adaptation_counters.h" +#include "rtc_base/strings/string_builder.h" + namespace webrtc { bool VideoAdaptationCounters::operator==( @@ -30,4 +32,11 @@ VideoAdaptationCounters VideoAdaptationCounters::operator+( fps_adaptations + other.fps_adaptations); } +std::string VideoAdaptationCounters::ToString() const { + rtc::StringBuilder ss; + ss << "{ res=" << resolution_adaptations << " fps=" << fps_adaptations + << " }"; + return ss.Release(); +} + } // namespace webrtc diff --git a/api/video/video_adaptation_counters.h b/api/video/video_adaptation_counters.h index eff0baaa21..2dea902f2f 100644 --- a/api/video/video_adaptation_counters.h +++ b/api/video/video_adaptation_counters.h @@ -11,6 +11,8 @@ #ifndef API_VIDEO_VIDEO_ADAPTATION_COUNTERS_H_ #define API_VIDEO_VIDEO_ADAPTATION_COUNTERS_H_ +#include + #include "rtc_base/checks.h" namespace webrtc { @@ -33,6 +35,8 @@ struct VideoAdaptationCounters { VideoAdaptationCounters operator+(const VideoAdaptationCounters& other) const; + std::string ToString() const; + int resolution_adaptations; int fps_adaptations; }; diff --git a/api/video/video_bitrate_allocator.cc b/api/video/video_bitrate_allocator.cc index 8ad5f75244..f4e843b348 100644 --- a/api/video/video_bitrate_allocator.cc +++ b/api/video/video_bitrate_allocator.cc @@ -49,4 +49,6 @@ VideoBitrateAllocation VideoBitrateAllocator::Allocate( return GetAllocation(parameters.total_bitrate.bps(), parameters.framerate); } +void VideoBitrateAllocator::SetLegacyConferenceMode(bool enabled) {} + } // namespace webrtc diff --git a/api/video/video_bitrate_allocator.h b/api/video/video_bitrate_allocator.h index 04de04c1b0..fdc86dbc57 100644 --- a/api/video/video_bitrate_allocator.h +++ b/api/video/video_bitrate_allocator.h @@ -40,6 +40,10 @@ class VideoBitrateAllocator { virtual VideoBitrateAllocation Allocate( VideoBitrateAllocationParameters parameters); + + // Deprecated: Only used to work around issues with the legacy conference + // screenshare mode and shouldn't be needed by any subclasses. + virtual void SetLegacyConferenceMode(bool enabled); }; class VideoBitrateAllocationObserver { diff --git a/api/video/video_codec_constants.h b/api/video/video_codec_constants.h index 6b6feee4cb..5859f9b4cf 100644 --- a/api/video/video_codec_constants.h +++ b/api/video/video_codec_constants.h @@ -17,6 +17,7 @@ enum : int { kMaxEncoderBuffers = 8 }; enum : int { kMaxSimulcastStreams = 3 }; enum : int { kMaxSpatialLayers = 5 }; enum : int { kMaxTemporalStreams = 4 }; +enum : int { kMaxPreferredPixelFormats = 5 }; } // namespace webrtc diff --git a/api/video/video_frame.h b/api/video/video_frame.h index 08c939d916..e073fd5e42 100644 --- a/api/video/video_frame.h +++ b/api/video/video_frame.h @@ -134,11 +134,11 @@ class RTC_EXPORT VideoFrame { // Get frame size in pixels. uint32_t size() const; - // Get frame ID. Returns 0 if ID is not set. Not guarantee to be transferred - // from the sender to the receiver, but preserved on single side. The id + // Get frame ID. Returns 0 if ID is not set. Not guaranteed to be transferred + // from the sender to the receiver, but preserved on the sender side. The id // should be propagated between all frame modifications during its lifetime // from capturing to sending as encoded image. It is intended to be unique - // over a time window of a few minutes for peer connection, to which + // over a time window of a few minutes for the peer connection to which the // corresponding video stream belongs to. uint16_t id() const { return id_; } void set_id(uint16_t id) { id_ = id; } @@ -186,6 +186,16 @@ class RTC_EXPORT VideoFrame { color_space_ = color_space; } + // max_composition_delay_in_frames() is used in an experiment of a low-latency + // renderer algorithm see crbug.com/1138888. + absl::optional max_composition_delay_in_frames() const { + return max_composition_delay_in_frames_; + } + void set_max_composition_delay_in_frames( + absl::optional max_composition_delay_in_frames) { + max_composition_delay_in_frames_ = max_composition_delay_in_frames; + } + // Get render time in milliseconds. // TODO(nisse): Deprecated. Migrate all users to timestamp_us(). int64_t render_time_ms() const; @@ -255,6 +265,7 @@ class RTC_EXPORT VideoFrame { int64_t timestamp_us_; VideoRotation rotation_; absl::optional color_space_; + absl::optional max_composition_delay_in_frames_; // Updated since the last frame area. If present it means that the bounding // box of all the changes is within the rectangular area and is close to it. // If absent, it means that there's no information about the change at all and diff --git a/api/video/video_frame_buffer.cc b/api/video/video_frame_buffer.cc index b9fd9cd92a..7085010325 100644 --- a/api/video/video_frame_buffer.cc +++ b/api/video/video_frame_buffer.cc @@ -10,10 +10,26 @@ #include "api/video/video_frame_buffer.h" +#include "api/video/i420_buffer.h" +#include "api/video/nv12_buffer.h" #include "rtc_base/checks.h" namespace webrtc { +rtc::scoped_refptr VideoFrameBuffer::CropAndScale( + int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) { + rtc::scoped_refptr result = + I420Buffer::Create(scaled_width, scaled_height); + result->CropAndScaleFrom(*this->ToI420(), offset_x, offset_y, crop_width, + crop_height); + return result; +} + const I420BufferInterface* VideoFrameBuffer::GetI420() const { // Overridden by subclasses that can return an I420 buffer without any // conversion, in particular, I420BufferInterface. @@ -35,10 +51,40 @@ const I010BufferInterface* VideoFrameBuffer::GetI010() const { return static_cast(this); } +const NV12BufferInterface* VideoFrameBuffer::GetNV12() const { + RTC_CHECK(type() == Type::kNV12); + return static_cast(this); +} + +rtc::scoped_refptr VideoFrameBuffer::GetMappedFrameBuffer( + rtc::ArrayView types) { + RTC_CHECK(type() == Type::kNative); + return nullptr; +} + VideoFrameBuffer::Type I420BufferInterface::type() const { return Type::kI420; } +const char* VideoFrameBufferTypeToString(VideoFrameBuffer::Type type) { + switch (type) { + case VideoFrameBuffer::Type::kNative: + return "kNative"; + case VideoFrameBuffer::Type::kI420: + return "kI420"; + case VideoFrameBuffer::Type::kI420A: + return "kI420A"; + case VideoFrameBuffer::Type::kI444: + return "kI444"; + case VideoFrameBuffer::Type::kI010: + return "kI010"; + case VideoFrameBuffer::Type::kNV12: + return "kNV12"; + default: + RTC_NOTREACHED(); + } +} + int I420BufferInterface::ChromaWidth() const { return (width() + 1) / 2; } @@ -83,4 +129,29 @@ int I010BufferInterface::ChromaHeight() const { return (height() + 1) / 2; } +VideoFrameBuffer::Type NV12BufferInterface::type() const { + return Type::kNV12; +} + +int NV12BufferInterface::ChromaWidth() const { + return (width() + 1) / 2; +} + +int NV12BufferInterface::ChromaHeight() const { + return (height() + 1) / 2; +} + +rtc::scoped_refptr NV12BufferInterface::CropAndScale( + int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) { + rtc::scoped_refptr result = + NV12Buffer::Create(scaled_width, scaled_height); + result->CropAndScaleFrom(*this, offset_x, offset_y, crop_width, crop_height); + return result; +} + } // namespace webrtc diff --git a/api/video/video_frame_buffer.h b/api/video/video_frame_buffer.h index d87a4230a4..62adc204f6 100644 --- a/api/video/video_frame_buffer.h +++ b/api/video/video_frame_buffer.h @@ -13,6 +13,7 @@ #include +#include "api/array_view.h" #include "api/scoped_refptr.h" #include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" @@ -23,6 +24,7 @@ class I420BufferInterface; class I420ABufferInterface; class I444BufferInterface; class I010BufferInterface; +class NV12BufferInterface; // Base class for frame buffers of different types of pixel format and storage. // The tag in type() indicates how the data is represented, and each type is @@ -50,6 +52,7 @@ class RTC_EXPORT VideoFrameBuffer : public rtc::RefCountInterface { kI420A, kI444, kI010, + kNV12, }; // This function specifies in what pixel format the data is stored in. @@ -72,18 +75,50 @@ class RTC_EXPORT VideoFrameBuffer : public rtc::RefCountInterface { // WebrtcVideoFrameAdapter in Chrome - it's I420 buffer backed by a shared // memory buffer. Therefore it must have type kNative. Yet, ToI420() // doesn't affect binary data at all. Another example is any I420A buffer. + // TODO(https://crbug.com/webrtc/12021): Make this method non-virtual and + // behave as the other GetXXX methods below. virtual const I420BufferInterface* GetI420() const; + // A format specific scale function. Default implementation works by + // converting to I420. But more efficient implementations may override it, + // especially for kNative. + // First, the image is cropped to |crop_width| and |crop_height| and then + // scaled to |scaled_width| and |scaled_height|. + virtual rtc::scoped_refptr CropAndScale(int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height); + + // Alias for common use case. + rtc::scoped_refptr Scale(int scaled_width, + int scaled_height) { + return CropAndScale(0, 0, width(), height(), scaled_width, scaled_height); + } + // These functions should only be called if type() is of the correct type. // Calling with a different type will result in a crash. const I420ABufferInterface* GetI420A() const; const I444BufferInterface* GetI444() const; const I010BufferInterface* GetI010() const; + const NV12BufferInterface* GetNV12() const; + + // From a kNative frame, returns a VideoFrameBuffer with a pixel format in + // the list of types that is in the main memory with a pixel perfect + // conversion for encoding with a software encoder. Returns nullptr if the + // frame type is not supported, mapping is not possible, or if the kNative + // frame has not implemented this method. Only callable if type() is kNative. + virtual rtc::scoped_refptr GetMappedFrameBuffer( + rtc::ArrayView types); protected: ~VideoFrameBuffer() override {} }; +// Update when VideoFrameBuffer::Type is updated. +const char* VideoFrameBufferTypeToString(VideoFrameBuffer::Type type); + // This interface represents planar formats. class PlanarYuvBuffer : public VideoFrameBuffer { public: @@ -175,6 +210,49 @@ class I010BufferInterface : public PlanarYuv16BBuffer { ~I010BufferInterface() override {} }; +class BiplanarYuvBuffer : public VideoFrameBuffer { + public: + virtual int ChromaWidth() const = 0; + virtual int ChromaHeight() const = 0; + + // Returns the number of steps(in terms of Data*() return type) between + // successive rows for a given plane. + virtual int StrideY() const = 0; + virtual int StrideUV() const = 0; + + protected: + ~BiplanarYuvBuffer() override {} +}; + +class BiplanarYuv8Buffer : public BiplanarYuvBuffer { + public: + virtual const uint8_t* DataY() const = 0; + virtual const uint8_t* DataUV() const = 0; + + protected: + ~BiplanarYuv8Buffer() override {} +}; + +// Represents Type::kNV12. NV12 is full resolution Y and half-resolution +// interleved UV. +class RTC_EXPORT NV12BufferInterface : public BiplanarYuv8Buffer { + public: + Type type() const override; + + int ChromaWidth() const final; + int ChromaHeight() const final; + + rtc::scoped_refptr CropAndScale(int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) override; + + protected: + ~NV12BufferInterface() override {} +}; + } // namespace webrtc #endif // API_VIDEO_VIDEO_FRAME_BUFFER_H_ diff --git a/api/video/video_frame_marking.h b/api/video/video_frame_marking.h deleted file mode 100644 index 2a34852f1d..0000000000 --- a/api/video/video_frame_marking.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_VIDEO_VIDEO_FRAME_MARKING_H_ -#define API_VIDEO_VIDEO_FRAME_MARKING_H_ - -namespace webrtc { - -struct FrameMarking { - bool start_of_frame; - bool end_of_frame; - bool independent_frame; - bool discardable_frame; - bool base_layer_sync; - uint8_t temporal_id; - uint8_t layer_id; - uint8_t tl0_pic_idx; -}; - -} // namespace webrtc - -#endif // API_VIDEO_VIDEO_FRAME_MARKING_H_ diff --git a/api/video/video_layers_allocation.h b/api/video/video_layers_allocation.h new file mode 100644 index 0000000000..39734151ae --- /dev/null +++ b/api/video/video_layers_allocation.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_VIDEO_LAYERS_ALLOCATION_H_ +#define API_VIDEO_VIDEO_LAYERS_ALLOCATION_H_ + +#include + +#include "absl/container/inlined_vector.h" +#include "api/units/data_rate.h" + +namespace webrtc { + +// This struct contains additional stream-level information needed by a +// Selective Forwarding Middlebox to make relay decisions of RTP streams. +struct VideoLayersAllocation { + static constexpr int kMaxSpatialIds = 4; + static constexpr int kMaxTemporalIds = 4; + + friend bool operator==(const VideoLayersAllocation& lhs, + const VideoLayersAllocation& rhs) { + return lhs.rtp_stream_index == rhs.rtp_stream_index && + lhs.resolution_and_frame_rate_is_valid == + rhs.resolution_and_frame_rate_is_valid && + lhs.active_spatial_layers == rhs.active_spatial_layers; + } + + friend bool operator!=(const VideoLayersAllocation& lhs, + const VideoLayersAllocation& rhs) { + return !(lhs == rhs); + } + + struct SpatialLayer { + friend bool operator==(const SpatialLayer& lhs, const SpatialLayer& rhs) { + return lhs.rtp_stream_index == rhs.rtp_stream_index && + lhs.spatial_id == rhs.spatial_id && + lhs.target_bitrate_per_temporal_layer == + rhs.target_bitrate_per_temporal_layer && + lhs.width == rhs.width && lhs.height == rhs.height && + lhs.frame_rate_fps == rhs.frame_rate_fps; + } + + friend bool operator!=(const SpatialLayer& lhs, const SpatialLayer& rhs) { + return !(lhs == rhs); + } + int rtp_stream_index = 0; + // Index of the spatial layer per `rtp_stream_index`. + int spatial_id = 0; + // Target bitrate per decode target. + absl::InlinedVector + target_bitrate_per_temporal_layer; + + // These fields are only valid if `resolution_and_frame_rate_is_valid` is + // true + uint16_t width = 0; + uint16_t height = 0; + // Max frame rate used in any temporal layer of this spatial layer. + uint8_t frame_rate_fps = 0; + }; + + // Index of the rtp stream this allocation is sent on. Used for mapping + // a SpatialLayer to a rtp stream. + int rtp_stream_index = 0; + bool resolution_and_frame_rate_is_valid = false; + absl::InlinedVector active_spatial_layers; +}; + +} // namespace webrtc + +#endif // API_VIDEO_VIDEO_LAYERS_ALLOCATION_H_ diff --git a/api/video/video_source_interface.h b/api/video/video_source_interface.h index b03d7c5483..8b5823fc27 100644 --- a/api/video/video_source_interface.h +++ b/api/video/video_source_interface.h @@ -12,6 +12,7 @@ #define API_VIDEO_VIDEO_SOURCE_INTERFACE_H_ #include +#include #include "absl/types/optional.h" #include "api/video/video_sink_interface.h" @@ -22,6 +23,15 @@ namespace rtc { // VideoSinkWants is used for notifying the source of properties a video frame // should have when it is delivered to a certain sink. struct RTC_EXPORT VideoSinkWants { + struct FrameSize { + FrameSize(int width, int height) : width(width), height(height) {} + FrameSize(const FrameSize&) = default; + ~FrameSize() = default; + + int width; + int height; + }; + VideoSinkWants(); VideoSinkWants(const VideoSinkWants&); ~VideoSinkWants(); @@ -49,8 +59,34 @@ struct RTC_EXPORT VideoSinkWants { // Note that this field is unrelated to any horizontal or vertical stride // requirements the encoder has on the incoming video frame buffers. int resolution_alignment = 1; + + // The resolutions that sink is configured to consume. If the sink is an + // encoder this is what the encoder is configured to encode. In singlecast we + // only encode one resolution, but in simulcast and SVC this can mean multiple + // resolutions per frame. + // + // The sink is always configured to consume a subset of the + // webrtc::VideoFrame's resolution. In the case of encoding, we usually encode + // at webrtc::VideoFrame's resolution but this may not always be the case due + // to scaleResolutionDownBy or turning off simulcast or SVC layers. + // + // For example, we may capture at 720p and due to adaptation (e.g. applying + // |max_pixel_count| constraints) create webrtc::VideoFrames of size 480p, but + // if we do scaleResolutionDownBy:2 then the only resolution we end up + // encoding is 240p. In this case we still need to provide webrtc::VideoFrames + // of size 480p but we can optimize internal buffers for 240p, avoiding + // downsampling to 480p if possible. + // + // Note that the |resolutions| can change while frames are in flight and + // should only be used as a hint when constructing the webrtc::VideoFrame. + std::vector resolutions; }; +inline bool operator==(const VideoSinkWants::FrameSize& a, + const VideoSinkWants::FrameSize& b) { + return a.width == b.width && a.height == b.height; +} + template class VideoSourceInterface { public: diff --git a/api/video/video_stream_decoder.h b/api/video/video_stream_decoder.h index 8f27fa4dbe..8d71dd300c 100644 --- a/api/video/video_stream_decoder.h +++ b/api/video/video_stream_decoder.h @@ -17,6 +17,7 @@ #include "api/units/time_delta.h" #include "api/video/encoded_frame.h" +#include "api/video/video_content_type.h" #include "api/video/video_frame.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_decoder_factory.h" @@ -29,22 +30,23 @@ class VideoStreamDecoderInterface { public: virtual ~Callbacks() = default; + struct FrameInfo { + absl::optional qp; + VideoContentType content_type; + }; + // Called when the VideoStreamDecoder enters a non-decodable state. virtual void OnNonDecodableState() = 0; - // Called with the last continuous frame. - virtual void OnContinuousUntil( - const video_coding::VideoLayerFrameId& key) = 0; + virtual void OnContinuousUntil(int64_t frame_id) {} - // Called with the decoded frame. - virtual void OnDecodedFrame(VideoFrame decodedImage, - absl::optional decode_time_ms, - absl::optional qp) = 0; + virtual void OnDecodedFrame(VideoFrame frame, + const FrameInfo& frame_info) = 0; }; virtual ~VideoStreamDecoderInterface() = default; - virtual void OnFrame(std::unique_ptr frame) = 0; + virtual void OnFrame(std::unique_ptr frame) = 0; virtual void SetMinPlayoutDelay(TimeDelta min_delay) = 0; virtual void SetMaxPlayoutDelay(TimeDelta max_delay) = 0; diff --git a/api/video/video_stream_decoder_create_unittest.cc b/api/video/video_stream_decoder_create_unittest.cc index 7b142a90b9..849a054a04 100644 --- a/api/video/video_stream_decoder_create_unittest.cc +++ b/api/video/video_stream_decoder_create_unittest.cc @@ -21,10 +21,9 @@ class NullCallbacks : public VideoStreamDecoderInterface::Callbacks { public: ~NullCallbacks() override = default; void OnNonDecodableState() override {} - void OnContinuousUntil(const video_coding::VideoLayerFrameId& key) override {} - void OnDecodedFrame(VideoFrame decodedImage, - absl::optional decode_time_ms, - absl::optional qp) override {} + void OnDecodedFrame(VideoFrame frame, + const VideoStreamDecoderInterface::Callbacks::FrameInfo& + frame_info) override {} }; TEST(VideoStreamDecoderCreate, CreateVideoStreamDecoder) { diff --git a/api/video/video_stream_encoder_create.cc b/api/video/video_stream_encoder_create.cc deleted file mode 100644 index 3a2ebe79e1..0000000000 --- a/api/video/video_stream_encoder_create.cc +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "api/video/video_stream_encoder_create.h" - -#include - -#include "video/adaptation/overuse_frame_detector.h" -#include "video/video_stream_encoder.h" - -namespace webrtc { - -std::unique_ptr CreateVideoStreamEncoder( - Clock* clock, - TaskQueueFactory* task_queue_factory, - uint32_t number_of_cores, - VideoStreamEncoderObserver* encoder_stats_observer, - const VideoStreamEncoderSettings& settings) { - return std::make_unique( - clock, number_of_cores, encoder_stats_observer, settings, - std::make_unique(encoder_stats_observer), - task_queue_factory); -} - -} // namespace webrtc diff --git a/api/video/video_stream_encoder_create.h b/api/video/video_stream_encoder_create.h deleted file mode 100644 index 3946b95f00..0000000000 --- a/api/video/video_stream_encoder_create.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_VIDEO_VIDEO_STREAM_ENCODER_CREATE_H_ -#define API_VIDEO_VIDEO_STREAM_ENCODER_CREATE_H_ - -#include - -#include - -#include "api/task_queue/task_queue_factory.h" -#include "api/video/video_frame.h" -#include "api/video/video_sink_interface.h" -#include "api/video/video_stream_encoder_interface.h" -#include "api/video/video_stream_encoder_observer.h" -#include "api/video/video_stream_encoder_settings.h" - -namespace webrtc { -// TODO(srte): Find a way to avoid this forward declaration. -class Clock; - -std::unique_ptr CreateVideoStreamEncoder( - Clock* clock, - TaskQueueFactory* task_queue_factory, - uint32_t number_of_cores, - VideoStreamEncoderObserver* encoder_stats_observer, - const VideoStreamEncoderSettings& settings); -} // namespace webrtc - -#endif // API_VIDEO_VIDEO_STREAM_ENCODER_CREATE_H_ diff --git a/api/video/video_stream_encoder_interface.h b/api/video/video_stream_encoder_interface.h index 8e1df0f858..34fa6421c4 100644 --- a/api/video/video_stream_encoder_interface.h +++ b/api/video/video_stream_encoder_interface.h @@ -13,10 +13,13 @@ #include +#include "api/adaptation/resource.h" #include "api/fec_controller_override.h" #include "api/rtp_parameters.h" // For DegradationPreference. +#include "api/scoped_refptr.h" #include "api/units/data_rate.h" #include "api/video/video_bitrate_allocator.h" +#include "api/video/video_layers_allocation.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" #include "api/video_codecs/video_encoder.h" @@ -47,8 +50,23 @@ class VideoStreamEncoderInterface : public rtc::VideoSinkInterface { bool is_svc, VideoEncoderConfig::ContentType content_type, int min_transmit_bitrate_bps) = 0; + + virtual void OnBitrateAllocationUpdated( + const VideoBitrateAllocation& allocation) = 0; + + virtual void OnVideoLayersAllocationUpdated( + VideoLayersAllocation allocation) = 0; }; + // If the resource is overusing, the VideoStreamEncoder will try to reduce + // resolution or frame rate until no resource is overusing. + // TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor + // is moved to Call this method could be deleted altogether in favor of + // Call-level APIs only. + virtual void AddAdaptationResource(rtc::scoped_refptr resource) = 0; + virtual std::vector> + GetAdaptationResources() = 0; + // Sets the source that will provide video frames to the VideoStreamEncoder's // OnFrame method. |degradation_preference| control whether or not resolution // or frame rate may be reduced. The VideoStreamEncoder registers itself with @@ -99,11 +117,6 @@ class VideoStreamEncoderInterface : public rtc::VideoSinkInterface { int64_t round_trip_time_ms, double cwnd_reduce_ratio) = 0; - // Register observer for the bitrate allocation between the temporal - // and spatial layers. - virtual void SetBitrateAllocationObserver( - VideoBitrateAllocationObserver* bitrate_observer) = 0; - // Set a FecControllerOverride, through which the encoder may override // decisions made by FecController. virtual void SetFecControllerOverride( diff --git a/api/video/video_timing.h b/api/video/video_timing.h index 4cc75dd0b0..80320daa83 100644 --- a/api/video/video_timing.h +++ b/api/video/video_timing.h @@ -41,7 +41,7 @@ struct VideoSendTiming { uint16_t pacer_exit_delta_ms; uint16_t network_timestamp_delta_ms; uint16_t network2_timestamp_delta_ms; - uint8_t flags; + uint8_t flags = TimingFrameFlags::kInvalid; }; // Used to report precise timings of a 'timing frames'. Contains all important @@ -100,6 +100,30 @@ struct TimingFrameInfo { uint8_t flags; // Flags indicating validity and/or why tracing was triggered. }; +// Minimum and maximum playout delay values from capture to render. +// These are best effort values. +// +// A value < 0 indicates no change from previous valid value. +// +// min = max = 0 indicates that the receiver should try and render +// frame as soon as possible. +// +// min = x, max = y indicates that the receiver is free to adapt +// in the range (x, y) based on network jitter. +struct VideoPlayoutDelay { + VideoPlayoutDelay() = default; + VideoPlayoutDelay(int min_ms, int max_ms) : min_ms(min_ms), max_ms(max_ms) {} + int min_ms = -1; + int max_ms = -1; + + bool operator==(const VideoPlayoutDelay& rhs) const { + return min_ms == rhs.min_ms && max_ms == rhs.max_ms; + } +}; + +// TODO(bugs.webrtc.org/7660): Old name, delete after downstream use is updated. +using PlayoutDelay = VideoPlayoutDelay; + } // namespace webrtc #endif // API_VIDEO_VIDEO_TIMING_H_ diff --git a/api/video_codecs/BUILD.gn b/api/video_codecs/BUILD.gn index 21a5f6faa0..83d67fcac4 100644 --- a/api/video_codecs/BUILD.gn +++ b/api/video_codecs/BUILD.gn @@ -15,13 +15,16 @@ if (is_android) { rtc_library("video_codecs_api") { visibility = [ "*" ] sources = [ + "h264_profile_level_id.cc", + "h264_profile_level_id.h", "sdp_video_format.cc", "sdp_video_format.h", + "spatial_layer.cc", + "spatial_layer.h", "video_codec.cc", "video_codec.h", "video_decoder.cc", "video_decoder.h", - "video_decoder_factory.cc", "video_decoder_factory.h", "video_encoder.cc", "video_encoder.h", @@ -33,12 +36,14 @@ rtc_library("video_codecs_api") { "vp8_frame_config.h", "vp8_temporal_layers.cc", "vp8_temporal_layers.h", + "vp9_profile.cc", + "vp9_profile.h", ] deps = [ "..:fec_controller_api", "..:scoped_refptr", - "../..:webrtc_common", + "../../api:array_view", "../../modules/video_coding:codec_globals_headers", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", @@ -49,6 +54,8 @@ rtc_library("video_codecs_api") { "../video:video_codec_constants", "../video:video_frame", "../video:video_rtp_headers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/strings", @@ -100,8 +107,8 @@ rtc_library("builtin_video_encoder_factory") { "../../media:rtc_media_base", "../../rtc_base:checks", "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("vp8_temporal_layers_factory") { @@ -134,8 +141,7 @@ rtc_library("rtc_software_fallback_wrappers") { deps = [ ":video_codecs_api", "..:fec_controller_api", - "../../api/video:video_frame_i420", - "../../media:rtc_h264_profile_id", + "../../api/video:video_frame", "../../media:rtc_media_base", "../../modules/video_coding:video_codec_interface", "../../modules/video_coding:video_coding_utility", @@ -148,6 +154,8 @@ rtc_library("rtc_software_fallback_wrappers") { "../video:video_bitrate_allocation", "../video:video_frame", "../video:video_rtp_headers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", diff --git a/api/video_codecs/builtin_video_encoder_factory.cc b/api/video_codecs/builtin_video_encoder_factory.cc index 6888daae48..9463a9cdf2 100644 --- a/api/video_codecs/builtin_video_encoder_factory.cc +++ b/api/video_codecs/builtin_video_encoder_factory.cc @@ -26,18 +26,6 @@ namespace webrtc { namespace { -bool IsFormatSupported(const std::vector& supported_formats, - const SdpVideoFormat& format) { - for (const SdpVideoFormat& supported_format : supported_formats) { - if (cricket::IsSameCodec(format.name, format.parameters, - supported_format.name, - supported_format.parameters)) { - return true; - } - } - return false; -} - // This class wraps the internal factory and adds simulcast. class BuiltinVideoEncoderFactory : public VideoEncoderFactory { public: @@ -47,11 +35,9 @@ class BuiltinVideoEncoderFactory : public VideoEncoderFactory { VideoEncoderFactory::CodecInfo QueryVideoEncoder( const SdpVideoFormat& format) const override { // Format must be one of the internal formats. - RTC_DCHECK(IsFormatSupported( - internal_encoder_factory_->GetSupportedFormats(), format)); + RTC_DCHECK( + format.IsCodecInList(internal_encoder_factory_->GetSupportedFormats())); VideoEncoderFactory::CodecInfo info; - info.has_internal_source = false; - info.is_hardware_accelerated = false; return info; } @@ -59,8 +45,8 @@ class BuiltinVideoEncoderFactory : public VideoEncoderFactory { const SdpVideoFormat& format) override { // Try creating internal encoder. std::unique_ptr internal_encoder; - if (IsFormatSupported(internal_encoder_factory_->GetSupportedFormats(), - format)) { + if (format.IsCodecInList( + internal_encoder_factory_->GetSupportedFormats())) { internal_encoder = std::make_unique( internal_encoder_factory_.get(), format); } diff --git a/api/video_codecs/h264_profile_level_id.cc b/api/video_codecs/h264_profile_level_id.cc new file mode 100644 index 0000000000..fa47758189 --- /dev/null +++ b/api/video_codecs/h264_profile_level_id.cc @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video_codecs/h264_profile_level_id.h" + +#include +#include +#include + +#include "rtc_base/arraysize.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { + +const char kProfileLevelId[] = "profile-level-id"; + +// For level_idc=11 and profile_idc=0x42, 0x4D, or 0x58, the constraint set3 +// flag specifies if level 1b or level 1.1 is used. +const uint8_t kConstraintSet3Flag = 0x10; + +// Convert a string of 8 characters into a byte where the positions containing +// character c will have their bit set. For example, c = 'x', str = "x1xx0000" +// will return 0b10110000. constexpr is used so that the pattern table in +// kProfilePatterns is statically initialized. +constexpr uint8_t ByteMaskString(char c, const char (&str)[9]) { + return (str[0] == c) << 7 | (str[1] == c) << 6 | (str[2] == c) << 5 | + (str[3] == c) << 4 | (str[4] == c) << 3 | (str[5] == c) << 2 | + (str[6] == c) << 1 | (str[7] == c) << 0; +} + +// Class for matching bit patterns such as "x1xx0000" where 'x' is allowed to be +// either 0 or 1. +class BitPattern { + public: + explicit constexpr BitPattern(const char (&str)[9]) + : mask_(~ByteMaskString('x', str)), + masked_value_(ByteMaskString('1', str)) {} + + bool IsMatch(uint8_t value) const { return masked_value_ == (value & mask_); } + + private: + const uint8_t mask_; + const uint8_t masked_value_; +}; + +// Table for converting between profile_idc/profile_iop to H264Profile. +struct ProfilePattern { + const uint8_t profile_idc; + const BitPattern profile_iop; + const H264Profile profile; +}; + +// This is from https://tools.ietf.org/html/rfc6184#section-8.1. +constexpr ProfilePattern kProfilePatterns[] = { + {0x42, BitPattern("x1xx0000"), H264Profile::kProfileConstrainedBaseline}, + {0x4D, BitPattern("1xxx0000"), H264Profile::kProfileConstrainedBaseline}, + {0x58, BitPattern("11xx0000"), H264Profile::kProfileConstrainedBaseline}, + {0x42, BitPattern("x0xx0000"), H264Profile::kProfileBaseline}, + {0x58, BitPattern("10xx0000"), H264Profile::kProfileBaseline}, + {0x4D, BitPattern("0x0x0000"), H264Profile::kProfileMain}, + {0x64, BitPattern("00000000"), H264Profile::kProfileHigh}, + {0x64, BitPattern("00001100"), H264Profile::kProfileConstrainedHigh}}; + +struct LevelConstraint { + const int max_macroblocks_per_second; + const int max_macroblock_frame_size; + const H264Level level; +}; + +// This is from ITU-T H.264 (02/2016) Table A-1 – Level limits. +static constexpr LevelConstraint kLevelConstraints[] = { + {1485, 99, H264Level::kLevel1}, + {1485, 99, H264Level::kLevel1_b}, + {3000, 396, H264Level::kLevel1_1}, + {6000, 396, H264Level::kLevel1_2}, + {11880, 396, H264Level::kLevel1_3}, + {11880, 396, H264Level::kLevel2}, + {19800, 792, H264Level::kLevel2_1}, + {20250, 1620, H264Level::kLevel2_2}, + {40500, 1620, H264Level::kLevel3}, + {108000, 3600, H264Level::kLevel3_1}, + {216000, 5120, H264Level::kLevel3_2}, + {245760, 8192, H264Level::kLevel4}, + {245760, 8192, H264Level::kLevel4_1}, + {522240, 8704, H264Level::kLevel4_2}, + {589824, 22080, H264Level::kLevel5}, + {983040, 36864, H264Level::kLevel5_1}, + {2073600, 36864, H264Level::kLevel5_2}, +}; + +} // anonymous namespace + +absl::optional ParseH264ProfileLevelId(const char* str) { + // The string should consist of 3 bytes in hexadecimal format. + if (strlen(str) != 6u) + return absl::nullopt; + const uint32_t profile_level_id_numeric = strtol(str, nullptr, 16); + if (profile_level_id_numeric == 0) + return absl::nullopt; + + // Separate into three bytes. + const uint8_t level_idc = + static_cast(profile_level_id_numeric & 0xFF); + const uint8_t profile_iop = + static_cast((profile_level_id_numeric >> 8) & 0xFF); + const uint8_t profile_idc = + static_cast((profile_level_id_numeric >> 16) & 0xFF); + + // Parse level based on level_idc and constraint set 3 flag. + H264Level level_casted = static_cast(level_idc); + H264Level level; + + switch (level_casted) { + case H264Level::kLevel1_1: + level = (profile_iop & kConstraintSet3Flag) != 0 ? H264Level::kLevel1_b + : H264Level::kLevel1_1; + break; + case H264Level::kLevel1: + case H264Level::kLevel1_2: + case H264Level::kLevel1_3: + case H264Level::kLevel2: + case H264Level::kLevel2_1: + case H264Level::kLevel2_2: + case H264Level::kLevel3: + case H264Level::kLevel3_1: + case H264Level::kLevel3_2: + case H264Level::kLevel4: + case H264Level::kLevel4_1: + case H264Level::kLevel4_2: + case H264Level::kLevel5: + case H264Level::kLevel5_1: + case H264Level::kLevel5_2: + level = level_casted; + break; + default: + // Unrecognized level_idc. + return absl::nullopt; + } + + // Parse profile_idc/profile_iop into a Profile enum. + for (const ProfilePattern& pattern : kProfilePatterns) { + if (profile_idc == pattern.profile_idc && + pattern.profile_iop.IsMatch(profile_iop)) { + return H264ProfileLevelId(pattern.profile, level); + } + } + + // Unrecognized profile_idc/profile_iop combination. + return absl::nullopt; +} + +absl::optional H264SupportedLevel(int max_frame_pixel_count, + float max_fps) { + static const int kPixelsPerMacroblock = 16 * 16; + + for (int i = arraysize(kLevelConstraints) - 1; i >= 0; --i) { + const LevelConstraint& level_constraint = kLevelConstraints[i]; + if (level_constraint.max_macroblock_frame_size * kPixelsPerMacroblock <= + max_frame_pixel_count && + level_constraint.max_macroblocks_per_second <= + max_fps * level_constraint.max_macroblock_frame_size) { + return level_constraint.level; + } + } + + // No level supported. + return absl::nullopt; +} + +absl::optional ParseSdpForH264ProfileLevelId( + const SdpVideoFormat::Parameters& params) { + // TODO(magjed): The default should really be kProfileBaseline and kLevel1 + // according to the spec: https://tools.ietf.org/html/rfc6184#section-8.1. In + // order to not break backwards compatibility with older versions of WebRTC + // where external codecs don't have any parameters, use + // kProfileConstrainedBaseline kLevel3_1 instead. This workaround will only be + // done in an interim period to allow external clients to update their code. + // http://crbug/webrtc/6337. + static const H264ProfileLevelId kDefaultProfileLevelId( + H264Profile::kProfileConstrainedBaseline, H264Level::kLevel3_1); + + const auto profile_level_id_it = params.find(kProfileLevelId); + return (profile_level_id_it == params.end()) + ? kDefaultProfileLevelId + : ParseH264ProfileLevelId(profile_level_id_it->second.c_str()); +} + +absl::optional H264ProfileLevelIdToString( + const H264ProfileLevelId& profile_level_id) { + // Handle special case level == 1b. + if (profile_level_id.level == H264Level::kLevel1_b) { + switch (profile_level_id.profile) { + case H264Profile::kProfileConstrainedBaseline: + return {"42f00b"}; + case H264Profile::kProfileBaseline: + return {"42100b"}; + case H264Profile::kProfileMain: + return {"4d100b"}; + // Level 1b is not allowed for other profiles. + default: + return absl::nullopt; + } + } + + const char* profile_idc_iop_string; + switch (profile_level_id.profile) { + case H264Profile::kProfileConstrainedBaseline: + profile_idc_iop_string = "42e0"; + break; + case H264Profile::kProfileBaseline: + profile_idc_iop_string = "4200"; + break; + case H264Profile::kProfileMain: + profile_idc_iop_string = "4d00"; + break; + case H264Profile::kProfileConstrainedHigh: + profile_idc_iop_string = "640c"; + break; + case H264Profile::kProfileHigh: + profile_idc_iop_string = "6400"; + break; + // Unrecognized profile. + default: + return absl::nullopt; + } + + char str[7]; + snprintf(str, 7u, "%s%02x", profile_idc_iop_string, profile_level_id.level); + return {str}; +} + +bool H264IsSameProfile(const SdpVideoFormat::Parameters& params1, + const SdpVideoFormat::Parameters& params2) { + const absl::optional profile_level_id = + ParseSdpForH264ProfileLevelId(params1); + const absl::optional other_profile_level_id = + ParseSdpForH264ProfileLevelId(params2); + // Compare H264 profiles, but not levels. + return profile_level_id && other_profile_level_id && + profile_level_id->profile == other_profile_level_id->profile; +} + +} // namespace webrtc diff --git a/api/video_codecs/h264_profile_level_id.h b/api/video_codecs/h264_profile_level_id.h new file mode 100644 index 0000000000..51d025cd7b --- /dev/null +++ b/api/video_codecs/h264_profile_level_id.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_CODECS_H264_PROFILE_LEVEL_ID_H_ +#define API_VIDEO_CODECS_H264_PROFILE_LEVEL_ID_H_ + +#include + +#include "absl/types/optional.h" +#include "api/video_codecs/sdp_video_format.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +enum class H264Profile { + kProfileConstrainedBaseline, + kProfileBaseline, + kProfileMain, + kProfileConstrainedHigh, + kProfileHigh, +}; + +// All values are equal to ten times the level number, except level 1b which is +// special. +enum class H264Level { + kLevel1_b = 0, + kLevel1 = 10, + kLevel1_1 = 11, + kLevel1_2 = 12, + kLevel1_3 = 13, + kLevel2 = 20, + kLevel2_1 = 21, + kLevel2_2 = 22, + kLevel3 = 30, + kLevel3_1 = 31, + kLevel3_2 = 32, + kLevel4 = 40, + kLevel4_1 = 41, + kLevel4_2 = 42, + kLevel5 = 50, + kLevel5_1 = 51, + kLevel5_2 = 52 +}; + +struct H264ProfileLevelId { + constexpr H264ProfileLevelId(H264Profile profile, H264Level level) + : profile(profile), level(level) {} + H264Profile profile; + H264Level level; +}; + +// Parse profile level id that is represented as a string of 3 hex bytes. +// Nothing will be returned if the string is not a recognized H264 +// profile level id. +absl::optional ParseH264ProfileLevelId(const char* str); + +// Parse profile level id that is represented as a string of 3 hex bytes +// contained in an SDP key-value map. A default profile level id will be +// returned if the profile-level-id key is missing. Nothing will be returned if +// the key is present but the string is invalid. +RTC_EXPORT absl::optional ParseSdpForH264ProfileLevelId( + const SdpVideoFormat::Parameters& params); + +// Given that a decoder supports up to a given frame size (in pixels) at up to a +// given number of frames per second, return the highest H.264 level where it +// can guarantee that it will be able to support all valid encoded streams that +// are within that level. +RTC_EXPORT absl::optional H264SupportedLevel( + int max_frame_pixel_count, + float max_fps); + +// Returns canonical string representation as three hex bytes of the profile +// level id, or returns nothing for invalid profile level ids. +RTC_EXPORT absl::optional H264ProfileLevelIdToString( + const H264ProfileLevelId& profile_level_id); + +// Returns true if the parameters have the same H264 profile (Baseline, High, +// etc). +RTC_EXPORT bool H264IsSameProfile(const SdpVideoFormat::Parameters& params1, + const SdpVideoFormat::Parameters& params2); + +} // namespace webrtc + +#endif // API_VIDEO_CODECS_H264_PROFILE_LEVEL_ID_H_ diff --git a/api/video_codecs/sdp_video_format.cc b/api/video_codecs/sdp_video_format.cc index f8901492ee..689c337ced 100644 --- a/api/video_codecs/sdp_video_format.cc +++ b/api/video_codecs/sdp_video_format.cc @@ -10,10 +10,57 @@ #include "api/video_codecs/sdp_video_format.h" +#include "absl/strings/match.h" +#include "api/video_codecs/h264_profile_level_id.h" +#include "api/video_codecs/video_codec.h" +#include "api/video_codecs/vp9_profile.h" +#include "rtc_base/checks.h" #include "rtc_base/strings/string_builder.h" namespace webrtc { +namespace { + +std::string H264GetPacketizationModeOrDefault( + const SdpVideoFormat::Parameters& params) { + constexpr char kH264FmtpPacketizationMode[] = "packetization-mode"; + const auto it = params.find(kH264FmtpPacketizationMode); + if (it != params.end()) { + return it->second; + } + // If packetization-mode is not present, default to "0". + // https://tools.ietf.org/html/rfc6184#section-6.2 + return "0"; +} + +bool H264IsSamePacketizationMode(const SdpVideoFormat::Parameters& left, + const SdpVideoFormat::Parameters& right) { + return H264GetPacketizationModeOrDefault(left) == + H264GetPacketizationModeOrDefault(right); +} + +// Some (video) codecs are actually families of codecs and rely on parameters +// to distinguish different incompatible family members. +bool IsSameCodecSpecific(const SdpVideoFormat& format1, + const SdpVideoFormat& format2) { + // The assumption when calling this function is that the two formats have the + // same name. + RTC_DCHECK(absl::EqualsIgnoreCase(format1.name, format2.name)); + + VideoCodecType codec_type = PayloadStringToCodecType(format1.name); + switch (codec_type) { + case kVideoCodecH264: + return H264IsSameProfile(format1.parameters, format2.parameters) && + H264IsSamePacketizationMode(format1.parameters, + format2.parameters); + case kVideoCodecVP9: + return VP9IsSameProfile(format1.parameters, format2.parameters); + default: + return true; + } +} +} // namespace + SdpVideoFormat::SdpVideoFormat(const std::string& name) : name(name) {} SdpVideoFormat::SdpVideoFormat(const std::string& name, @@ -37,6 +84,23 @@ std::string SdpVideoFormat::ToString() const { return builder.str(); } +bool SdpVideoFormat::IsSameCodec(const SdpVideoFormat& other) const { + // Two codecs are considered the same if the name matches (case insensitive) + // and certain codec-specific parameters match. + return absl::EqualsIgnoreCase(name, other.name) && + IsSameCodecSpecific(*this, other); +} + +bool SdpVideoFormat::IsCodecInList( + rtc::ArrayView formats) const { + for (const auto& format : formats) { + if (IsSameCodec(format)) { + return true; + } + } + return false; +} + bool operator==(const SdpVideoFormat& a, const SdpVideoFormat& b) { return a.name == b.name && a.parameters == b.parameters; } diff --git a/api/video_codecs/sdp_video_format.h b/api/video_codecs/sdp_video_format.h index 97bb75489d..a1e23f4f9c 100644 --- a/api/video_codecs/sdp_video_format.h +++ b/api/video_codecs/sdp_video_format.h @@ -14,6 +14,7 @@ #include #include +#include "api/array_view.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { @@ -32,6 +33,13 @@ struct RTC_EXPORT SdpVideoFormat { ~SdpVideoFormat(); + // Returns true if the SdpVideoFormats have the same names as well as codec + // specific parameters. Please note that two SdpVideoFormats can represent the + // same codec even though not all parameters are the same. + bool IsSameCodec(const SdpVideoFormat& other) const; + bool IsCodecInList( + rtc::ArrayView formats) const; + std::string ToString() const; friend RTC_EXPORT bool operator==(const SdpVideoFormat& a, diff --git a/api/video_codecs/spatial_layer.cc b/api/video_codecs/spatial_layer.cc new file mode 100644 index 0000000000..25ccdfeb48 --- /dev/null +++ b/api/video_codecs/spatial_layer.cc @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video_codecs/spatial_layer.h" + +namespace webrtc { + +bool SpatialLayer::operator==(const SpatialLayer& other) const { + return (width == other.width && height == other.height && + maxFramerate == other.maxFramerate && + numberOfTemporalLayers == other.numberOfTemporalLayers && + maxBitrate == other.maxBitrate && + targetBitrate == other.targetBitrate && + minBitrate == other.minBitrate && qpMax == other.qpMax && + active == other.active); +} + +} // namespace webrtc diff --git a/api/video_codecs/spatial_layer.h b/api/video_codecs/spatial_layer.h new file mode 100644 index 0000000000..5a1b425427 --- /dev/null +++ b/api/video_codecs/spatial_layer.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_CODECS_SPATIAL_LAYER_H_ +#define API_VIDEO_CODECS_SPATIAL_LAYER_H_ + +namespace webrtc { + +struct SpatialLayer { + bool operator==(const SpatialLayer& other) const; + bool operator!=(const SpatialLayer& other) const { return !(*this == other); } + + unsigned short width; // NOLINT(runtime/int) + unsigned short height; // NOLINT(runtime/int) + float maxFramerate; // fps. + unsigned char numberOfTemporalLayers; + unsigned int maxBitrate; // kilobits/sec. + unsigned int targetBitrate; // kilobits/sec. + unsigned int minBitrate; // kilobits/sec. + unsigned int qpMax; // minimum quality + bool active; // encoded and sent. +}; + +} // namespace webrtc +#endif // API_VIDEO_CODECS_SPATIAL_LAYER_H_ diff --git a/api/video_codecs/test/BUILD.gn b/api/video_codecs/test/BUILD.gn index 243b78267f..c082dbc562 100644 --- a/api/video_codecs/test/BUILD.gn +++ b/api/video_codecs/test/BUILD.gn @@ -13,6 +13,8 @@ if (rtc_include_tests) { testonly = true sources = [ "builtin_video_encoder_factory_unittest.cc", + "h264_profile_level_id_unittest.cc", + "sdp_video_format_unittest.cc", "video_decoder_software_fallback_wrapper_unittest.cc", "video_encoder_software_fallback_wrapper_unittest.cc", ] @@ -24,7 +26,6 @@ if (rtc_include_tests) { "../..:fec_controller_api", "../..:mock_video_encoder", "../../../api:scoped_refptr", - "../../../modules:module_api", "../../../modules/video_coding:video_codec_interface", "../../../modules/video_coding:video_coding_utility", "../../../modules/video_coding:webrtc_vp8", @@ -36,9 +37,9 @@ if (rtc_include_tests) { "../../video:encoded_image", "../../video:video_bitrate_allocation", "../../video:video_frame", - "../../video:video_frame_i420", "../../video:video_rtp_headers", "//testing/gtest", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } diff --git a/api/video_codecs/test/h264_profile_level_id_unittest.cc b/api/video_codecs/test/h264_profile_level_id_unittest.cc new file mode 100644 index 0000000000..47098d2682 --- /dev/null +++ b/api/video_codecs/test/h264_profile_level_id_unittest.cc @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video_codecs/h264_profile_level_id.h" + +#include +#include + +#include "absl/types/optional.h" +#include "test/gtest.h" + +namespace webrtc { + +TEST(H264ProfileLevelId, TestParsingInvalid) { + // Malformed strings. + EXPECT_FALSE(ParseH264ProfileLevelId("")); + EXPECT_FALSE(ParseH264ProfileLevelId(" 42e01f")); + EXPECT_FALSE(ParseH264ProfileLevelId("4242e01f")); + EXPECT_FALSE(ParseH264ProfileLevelId("e01f")); + EXPECT_FALSE(ParseH264ProfileLevelId("gggggg")); + + // Invalid level. + EXPECT_FALSE(ParseH264ProfileLevelId("42e000")); + EXPECT_FALSE(ParseH264ProfileLevelId("42e00f")); + EXPECT_FALSE(ParseH264ProfileLevelId("42e0ff")); + + // Invalid profile. + EXPECT_FALSE(ParseH264ProfileLevelId("42e11f")); + EXPECT_FALSE(ParseH264ProfileLevelId("58601f")); + EXPECT_FALSE(ParseH264ProfileLevelId("64e01f")); +} + +TEST(H264ProfileLevelId, TestParsingLevel) { + EXPECT_EQ(H264Level::kLevel3_1, ParseH264ProfileLevelId("42e01f")->level); + EXPECT_EQ(H264Level::kLevel1_1, ParseH264ProfileLevelId("42e00b")->level); + EXPECT_EQ(H264Level::kLevel1_b, ParseH264ProfileLevelId("42f00b")->level); + EXPECT_EQ(H264Level::kLevel4_2, ParseH264ProfileLevelId("42C02A")->level); + EXPECT_EQ(H264Level::kLevel5_2, ParseH264ProfileLevelId("640c34")->level); +} + +TEST(H264ProfileLevelId, TestParsingConstrainedBaseline) { + EXPECT_EQ(H264Profile::kProfileConstrainedBaseline, + ParseH264ProfileLevelId("42e01f")->profile); + EXPECT_EQ(H264Profile::kProfileConstrainedBaseline, + ParseH264ProfileLevelId("42C02A")->profile); + EXPECT_EQ(H264Profile::kProfileConstrainedBaseline, + ParseH264ProfileLevelId("4de01f")->profile); + EXPECT_EQ(H264Profile::kProfileConstrainedBaseline, + ParseH264ProfileLevelId("58f01f")->profile); +} + +TEST(H264ProfileLevelId, TestParsingBaseline) { + EXPECT_EQ(H264Profile::kProfileBaseline, + ParseH264ProfileLevelId("42a01f")->profile); + EXPECT_EQ(H264Profile::kProfileBaseline, + ParseH264ProfileLevelId("58A01F")->profile); +} + +TEST(H264ProfileLevelId, TestParsingMain) { + EXPECT_EQ(H264Profile::kProfileMain, + ParseH264ProfileLevelId("4D401f")->profile); +} + +TEST(H264ProfileLevelId, TestParsingHigh) { + EXPECT_EQ(H264Profile::kProfileHigh, + ParseH264ProfileLevelId("64001f")->profile); +} + +TEST(H264ProfileLevelId, TestParsingConstrainedHigh) { + EXPECT_EQ(H264Profile::kProfileConstrainedHigh, + ParseH264ProfileLevelId("640c1f")->profile); +} + +TEST(H264ProfileLevelId, TestSupportedLevel) { + EXPECT_EQ(H264Level::kLevel2_1, *H264SupportedLevel(640 * 480, 25)); + EXPECT_EQ(H264Level::kLevel3_1, *H264SupportedLevel(1280 * 720, 30)); + EXPECT_EQ(H264Level::kLevel4_2, *H264SupportedLevel(1920 * 1280, 60)); +} + +// Test supported level below level 1 requirements. +TEST(H264ProfileLevelId, TestSupportedLevelInvalid) { + EXPECT_FALSE(H264SupportedLevel(0, 0)); + // All levels support fps > 5. + EXPECT_FALSE(H264SupportedLevel(1280 * 720, 5)); + // All levels support frame sizes > 183 * 137. + EXPECT_FALSE(H264SupportedLevel(183 * 137, 30)); +} + +TEST(H264ProfileLevelId, TestToString) { + EXPECT_EQ("42e01f", *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileConstrainedBaseline, + H264Level::kLevel3_1))); + EXPECT_EQ("42000a", *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileBaseline, H264Level::kLevel1))); + EXPECT_EQ("4d001f", H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileMain, H264Level::kLevel3_1))); + EXPECT_EQ("640c2a", + *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileConstrainedHigh, H264Level::kLevel4_2))); + EXPECT_EQ("64002a", *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileHigh, H264Level::kLevel4_2))); +} + +TEST(H264ProfileLevelId, TestToStringLevel1b) { + EXPECT_EQ("42f00b", *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileConstrainedBaseline, + H264Level::kLevel1_b))); + EXPECT_EQ("42100b", + *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileBaseline, H264Level::kLevel1_b))); + EXPECT_EQ("4d100b", *H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileMain, H264Level::kLevel1_b))); +} + +TEST(H264ProfileLevelId, TestToStringRoundTrip) { + EXPECT_EQ("42e01f", + *H264ProfileLevelIdToString(*ParseH264ProfileLevelId("42e01f"))); + EXPECT_EQ("42e01f", + *H264ProfileLevelIdToString(*ParseH264ProfileLevelId("42E01F"))); + EXPECT_EQ("4d100b", + *H264ProfileLevelIdToString(*ParseH264ProfileLevelId("4d100b"))); + EXPECT_EQ("4d100b", + *H264ProfileLevelIdToString(*ParseH264ProfileLevelId("4D100B"))); + EXPECT_EQ("640c2a", + *H264ProfileLevelIdToString(*ParseH264ProfileLevelId("640c2a"))); + EXPECT_EQ("640c2a", + *H264ProfileLevelIdToString(*ParseH264ProfileLevelId("640C2A"))); +} + +TEST(H264ProfileLevelId, TestToStringInvalid) { + EXPECT_FALSE(H264ProfileLevelIdToString( + H264ProfileLevelId(H264Profile::kProfileHigh, H264Level::kLevel1_b))); + EXPECT_FALSE(H264ProfileLevelIdToString(H264ProfileLevelId( + H264Profile::kProfileConstrainedHigh, H264Level::kLevel1_b))); + EXPECT_FALSE(H264ProfileLevelIdToString( + H264ProfileLevelId(static_cast(255), H264Level::kLevel3_1))); +} + +TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdEmpty) { + const absl::optional profile_level_id = + ParseSdpForH264ProfileLevelId(SdpVideoFormat::Parameters()); + EXPECT_TRUE(profile_level_id); + EXPECT_EQ(H264Profile::kProfileConstrainedBaseline, + profile_level_id->profile); + EXPECT_EQ(H264Level::kLevel3_1, profile_level_id->level); +} + +TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdConstrainedHigh) { + SdpVideoFormat::Parameters params; + params["profile-level-id"] = "640c2a"; + const absl::optional profile_level_id = + ParseSdpForH264ProfileLevelId(params); + EXPECT_TRUE(profile_level_id); + EXPECT_EQ(H264Profile::kProfileConstrainedHigh, profile_level_id->profile); + EXPECT_EQ(H264Level::kLevel4_2, profile_level_id->level); +} + +TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdInvalid) { + SdpVideoFormat::Parameters params; + params["profile-level-id"] = "foobar"; + EXPECT_FALSE(ParseSdpForH264ProfileLevelId(params)); +} + +} // namespace webrtc diff --git a/api/video_codecs/test/sdp_video_format_unittest.cc b/api/video_codecs/test/sdp_video_format_unittest.cc new file mode 100644 index 0000000000..d55816690e --- /dev/null +++ b/api/video_codecs/test/sdp_video_format_unittest.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "api/video_codecs/sdp_video_format.h" + +#include + +#include "test/gtest.h" + +namespace webrtc { + +typedef SdpVideoFormat Sdp; +typedef SdpVideoFormat::Parameters Params; + +TEST(SdpVideoFormatTest, SameCodecNameNoParameters) { + EXPECT_TRUE(Sdp("H264").IsSameCodec(Sdp("h264"))); + EXPECT_TRUE(Sdp("VP8").IsSameCodec(Sdp("vp8"))); + EXPECT_TRUE(Sdp("Vp9").IsSameCodec(Sdp("vp9"))); + EXPECT_TRUE(Sdp("AV1").IsSameCodec(Sdp("Av1"))); +} +TEST(SdpVideoFormatTest, DifferentCodecNameNoParameters) { + EXPECT_FALSE(Sdp("H264").IsSameCodec(Sdp("VP8"))); + EXPECT_FALSE(Sdp("VP8").IsSameCodec(Sdp("VP9"))); + EXPECT_FALSE(Sdp("AV1").IsSameCodec(Sdp(""))); +} +TEST(SdpVideoFormatTest, SameCodecNameSameParameters) { + EXPECT_TRUE(Sdp("VP9").IsSameCodec(Sdp("VP9", Params{{"profile-id", "0"}}))); + EXPECT_TRUE(Sdp("VP9", Params{{"profile-id", "0"}}) + .IsSameCodec(Sdp("VP9", Params{{"profile-id", "0"}}))); + EXPECT_TRUE(Sdp("VP9", Params{{"profile-id", "2"}}) + .IsSameCodec(Sdp("VP9", Params{{"profile-id", "2"}}))); + EXPECT_TRUE( + Sdp("H264", Params{{"profile-level-id", "42e01f"}}) + .IsSameCodec(Sdp("H264", Params{{"profile-level-id", "42e01f"}}))); + EXPECT_TRUE( + Sdp("H264", Params{{"profile-level-id", "640c34"}}) + .IsSameCodec(Sdp("H264", Params{{"profile-level-id", "640c34"}}))); +} + +TEST(SdpVideoFormatTest, SameCodecNameDifferentParameters) { + EXPECT_FALSE(Sdp("VP9").IsSameCodec(Sdp("VP9", Params{{"profile-id", "2"}}))); + EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "0"}}) + .IsSameCodec(Sdp("VP9", Params{{"profile-id", "1"}}))); + EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "2"}}) + .IsSameCodec(Sdp("VP9", Params{{"profile-id", "0"}}))); + EXPECT_FALSE( + Sdp("H264", Params{{"profile-level-id", "42e01f"}}) + .IsSameCodec(Sdp("H264", Params{{"profile-level-id", "640c34"}}))); + EXPECT_FALSE( + Sdp("H264", Params{{"profile-level-id", "640c34"}}) + .IsSameCodec(Sdp("H264", Params{{"profile-level-id", "42f00b"}}))); +} + +TEST(SdpVideoFormatTest, DifferentCodecNameSameParameters) { + EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "0"}}) + .IsSameCodec(Sdp("H264", Params{{"profile-id", "0"}}))); + EXPECT_FALSE(Sdp("VP9", Params{{"profile-id", "2"}}) + .IsSameCodec(Sdp("VP8", Params{{"profile-id", "2"}}))); + EXPECT_FALSE( + Sdp("H264", Params{{"profile-level-id", "42e01f"}}) + .IsSameCodec(Sdp("VP9", Params{{"profile-level-id", "42e01f"}}))); + EXPECT_FALSE( + Sdp("H264", Params{{"profile-level-id", "640c34"}}) + .IsSameCodec(Sdp("VP8", Params{{"profile-level-id", "640c34"}}))); +} + +} // namespace webrtc diff --git a/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc b/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc index d2c3666d86..2d8b002f2d 100644 --- a/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc +++ b/api/video_codecs/test/video_encoder_software_fallback_wrapper_unittest.cc @@ -29,7 +29,6 @@ #include "api/video/video_rotation.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" @@ -82,8 +81,7 @@ VideoEncoder::EncoderInfo GetEncoderInfoWithInternalSource( class FakeEncodedImageCallback : public EncodedImageCallback { public: Result OnEncodedImage(const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { ++callback_count_; return Result(Result::OK, callback_count_); } @@ -123,8 +121,7 @@ class VideoEncoderSoftwareFallbackWrapperTestBase : public ::testing::Test { last_video_frame_ = frame; if (encode_complete_callback_ && encode_return_code_ == WEBRTC_VIDEO_CODEC_OK) { - encode_complete_callback_->OnEncodedImage(EncodedImage(), nullptr, - nullptr); + encode_complete_callback_->OnEncodedImage(EncodedImage(), nullptr); } return encode_return_code_; } @@ -616,13 +613,13 @@ TEST_F(ForcedFallbackTestEnabled, FallbackIsEndedForNonValidSettings) { EncodeFrameAndVerifyLastName("libvpx"); // Re-initialize encoder with invalid setting, expect no fallback. - codec_.VP8()->numberOfTemporalLayers = 2; + codec_.numberOfSimulcastStreams = 2; InitEncode(kWidth, kHeight); EXPECT_EQ(1, fake_encoder_->init_encode_count_); EncodeFrameAndVerifyLastName("fake-encoder"); // Re-initialize encoder with valid setting. - codec_.VP8()->numberOfTemporalLayers = 1; + codec_.numberOfSimulcastStreams = 1; InitEncode(kWidth, kHeight); EXPECT_EQ(1, fake_encoder_->init_encode_count_); EncodeFrameAndVerifyLastName("libvpx"); @@ -856,11 +853,15 @@ class PreferTemporalLayersFallbackTest : public ::testing::Test { protected: void SetSupportsLayers(VideoEncoder::EncoderInfo* info, bool tl_enabled) { - info->fps_allocation[0].clear(); int num_layers = 1; if (tl_enabled) { num_layers = codec_settings.VP8()->numberOfTemporalLayers; } + SetNumLayers(info, num_layers); + } + + void SetNumLayers(VideoEncoder::EncoderInfo* info, int num_layers) { + info->fps_allocation[0].clear(); for (int i = 0; i < num_layers; ++i) { info->fps_allocation[0].push_back( VideoEncoder::EncoderInfo::kMaxFramerateFraction >> @@ -913,6 +914,15 @@ TEST_F(PreferTemporalLayersFallbackTest, UsesMainWhenNeitherSupportsTemporal) { EXPECT_EQ(wrapper_->GetEncoderInfo().implementation_name, "hw"); } +TEST_F(PreferTemporalLayersFallbackTest, UsesFallbackWhenLayersAreUndefined) { + codec_settings.VP8()->numberOfTemporalLayers = 2; + SetNumLayers(&hw_info_, 1); + SetNumLayers(&sw_info_, 0); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + wrapper_->InitEncode(&codec_settings, kSettings)); + EXPECT_EQ(wrapper_->GetEncoderInfo().implementation_name, "sw"); +} + TEST_F(PreferTemporalLayersFallbackTest, PrimesEncoderOnSwitch) { codec_settings.VP8()->numberOfTemporalLayers = 2; // Both support temporal layers, will use main one. diff --git a/api/video_codecs/video_codec.cc b/api/video_codecs/video_codec.cc index d03082b91e..d05eb456fc 100644 --- a/api/video_codecs/video_codec.cc +++ b/api/video_codecs/video_codec.cc @@ -56,19 +56,8 @@ bool VideoCodecH264::operator==(const VideoCodecH264& other) const { numberOfTemporalLayers == other.numberOfTemporalLayers); } -bool SpatialLayer::operator==(const SpatialLayer& other) const { - return (width == other.width && height == other.height && - maxFramerate == other.maxFramerate && - numberOfTemporalLayers == other.numberOfTemporalLayers && - maxBitrate == other.maxBitrate && - targetBitrate == other.targetBitrate && - minBitrate == other.minBitrate && qpMax == other.qpMax && - active == other.active); -} - VideoCodec::VideoCodec() : codecType(kVideoCodecGeneric), - plType(0), width(0), height(0), startBitrate(0), @@ -83,6 +72,7 @@ VideoCodec::VideoCodec() mode(VideoCodecMode::kRealtimeVideo), expect_encode_from_texture(false), timing_frame_thresholds({0, 0}), + legacy_conference_mode(false), codec_specific_() {} VideoCodecVP8* VideoCodec::VP8() { @@ -130,6 +120,7 @@ const char* CodecTypeToPayloadString(VideoCodecType type) { case kVideoCodecGeneric: return kPayloadNameGeneric; } + RTC_CHECK_NOTREACHED(); } VideoCodecType PayloadStringToCodecType(const std::string& name) { diff --git a/api/video_codecs/video_codec.h b/api/video_codecs/video_codec.h index 330bbbce19..e1a8d06b21 100644 --- a/api/video_codecs/video_codec.h +++ b/api/video_codecs/video_codec.h @@ -16,10 +16,11 @@ #include +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/video/video_bitrate_allocation.h" #include "api/video/video_codec_type.h" -#include "common_types.h" // NOLINT(build/include) +#include "api/video_codecs/spatial_layer.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { @@ -101,9 +102,16 @@ class RTC_EXPORT VideoCodec { public: VideoCodec(); + // Scalability mode as described in + // https://www.w3.org/TR/webrtc-svc/#scalabilitymodes* + // or value 'NONE' to indicate no scalability. + absl::string_view ScalabilityMode() const { return scalability_mode_; } + void SetScalabilityMode(absl::string_view scalability_mode) { + scalability_mode_ = std::string(scalability_mode); + } + // Public variables. TODO(hta): Make them private with accessors. VideoCodecType codecType; - unsigned char plType; // TODO(nisse): Change to int, for consistency. uint16_t width; @@ -121,7 +129,7 @@ class RTC_EXPORT VideoCodec { unsigned int qpMax; unsigned char numberOfSimulcastStreams; - SimulcastStream simulcastStream[kMaxSimulcastStreams]; + SpatialLayer simulcastStream[kMaxSimulcastStreams]; SpatialLayer spatialLayers[kMaxSpatialLayers]; VideoCodecMode mode; @@ -146,6 +154,9 @@ class RTC_EXPORT VideoCodec { uint16_t outlier_ratio_percent; } timing_frame_thresholds; + // Legacy Google conference mode flag for simulcast screenshare + bool legacy_conference_mode; + bool operator==(const VideoCodec& other) const = delete; bool operator!=(const VideoCodec& other) const = delete; @@ -164,6 +175,7 @@ class RTC_EXPORT VideoCodec { // TODO(hta): Consider replacing the union with a pointer type. // This will allow removing the VideoCodec* types from this file. VideoCodecUnion codec_specific_; + std::string scalability_mode_; }; } // namespace webrtc diff --git a/api/video_codecs/video_decoder.cc b/api/video_codecs/video_decoder.cc index b181323911..04673e6c31 100644 --- a/api/video_codecs/video_decoder.cc +++ b/api/video_codecs/video_decoder.cc @@ -10,6 +10,8 @@ #include "api/video_codecs/video_decoder.h" +#include "rtc_base/strings/string_builder.h" + namespace webrtc { int32_t DecodedImageCallback::Decoded(VideoFrame& decodedImage, @@ -24,12 +26,31 @@ void DecodedImageCallback::Decoded(VideoFrame& decodedImage, Decoded(decodedImage, decode_time_ms.value_or(-1)); } -bool VideoDecoder::PrefersLateDecoding() const { - return true; +VideoDecoder::DecoderInfo VideoDecoder::GetDecoderInfo() const { + DecoderInfo info; + info.implementation_name = ImplementationName(); + return info; } const char* VideoDecoder::ImplementationName() const { return "unknown"; } +std::string VideoDecoder::DecoderInfo::ToString() const { + char string_buf[2048]; + rtc::SimpleStringBuilder oss(string_buf); + + oss << "DecoderInfo { " + << "prefers_late_decoding = " + << "implementation_name = '" << implementation_name << "', " + << "is_hardware_accelerated = " + << (is_hardware_accelerated ? "true" : "false") << " }"; + return oss.str(); +} + +bool VideoDecoder::DecoderInfo::operator==(const DecoderInfo& rhs) const { + return is_hardware_accelerated == rhs.is_hardware_accelerated && + implementation_name == rhs.implementation_name; +} + } // namespace webrtc diff --git a/api/video_codecs/video_decoder.h b/api/video_codecs/video_decoder.h index 266d653693..04052de08b 100644 --- a/api/video_codecs/video_decoder.h +++ b/api/video_codecs/video_decoder.h @@ -42,6 +42,18 @@ class RTC_EXPORT DecodedImageCallback { class RTC_EXPORT VideoDecoder { public: + struct DecoderInfo { + // Descriptive name of the decoder implementation. + std::string implementation_name; + + // True if the decoder is backed by hardware acceleration. + bool is_hardware_accelerated = false; + + std::string ToString() const; + bool operator==(const DecoderInfo& rhs) const; + bool operator!=(const DecoderInfo& rhs) const { return !(*this == rhs); } + }; + virtual ~VideoDecoder() {} virtual int32_t InitDecode(const VideoCodec* codec_settings, @@ -56,11 +68,9 @@ class RTC_EXPORT VideoDecoder { virtual int32_t Release() = 0; - // Returns true if the decoder prefer to decode frames late. - // That is, it can not decode infinite number of frames before the decoded - // frame is consumed. - virtual bool PrefersLateDecoding() const; + virtual DecoderInfo GetDecoderInfo() const; + // Deprecated, use GetDecoderInfo().implementation_name instead. virtual const char* ImplementationName() const; }; diff --git a/api/video_codecs/video_decoder_factory.h b/api/video_codecs/video_decoder_factory.h index e4d83c2465..0b6ea4f9f2 100644 --- a/api/video_codecs/video_decoder_factory.h +++ b/api/video_codecs/video_decoder_factory.h @@ -15,31 +15,51 @@ #include #include +#include "absl/types/optional.h" +#include "api/video_codecs/sdp_video_format.h" #include "rtc_base/system/rtc_export.h" namespace webrtc { class VideoDecoder; -struct SdpVideoFormat; // A factory that creates VideoDecoders. // NOTE: This class is still under development and may change without notice. class RTC_EXPORT VideoDecoderFactory { public: + struct CodecSupport { + bool is_supported = false; + bool is_power_efficient = false; + }; + // Returns a list of supported video formats in order of preference, to use // for signaling etc. virtual std::vector GetSupportedFormats() const = 0; + // Query whether the specifed format is supported or not and if it will be + // power efficient, which is currently interpreted as if there is support for + // hardware acceleration. + // See https://w3c.github.io/webrtc-svc/#scalabilitymodes* for a specification + // of valid values for |scalability_mode|. + // NOTE: QueryCodecSupport is currently an experimental feature that is + // subject to change without notice. + virtual CodecSupport QueryCodecSupport( + const SdpVideoFormat& format, + absl::optional scalability_mode) const { + // Default implementation, query for supported formats and check if the + // specified format is supported. Returns false if scalability_mode is + // specified. + CodecSupport codec_support; + if (!scalability_mode) { + codec_support.is_supported = format.IsCodecInList(GetSupportedFormats()); + } + return codec_support; + } + // Creates a VideoDecoder for the specified format. virtual std::unique_ptr CreateVideoDecoder( const SdpVideoFormat& format) = 0; - // Note: Do not call or override this method! This method is a legacy - // workaround and is scheduled for removal without notice. - virtual std::unique_ptr LegacyCreateVideoDecoder( - const SdpVideoFormat& format, - const std::string& receive_stream_id); - virtual ~VideoDecoderFactory() {} }; diff --git a/api/video_codecs/video_decoder_software_fallback_wrapper.cc b/api/video_codecs/video_decoder_software_fallback_wrapper.cc index 128087f207..e5743b3a2e 100644 --- a/api/video_codecs/video_decoder_software_fallback_wrapper.cc +++ b/api/video_codecs/video_decoder_software_fallback_wrapper.cc @@ -50,8 +50,8 @@ class VideoDecoderSoftwareFallbackWrapper final : public VideoDecoder { DecodedImageCallback* callback) override; int32_t Release() override; - bool PrefersLateDecoding() const override; + DecoderInfo GetDecoderInfo() const override; const char* ImplementationName() const override; private: @@ -262,14 +262,23 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Release() { return status; } -bool VideoDecoderSoftwareFallbackWrapper::PrefersLateDecoding() const { - return active_decoder().PrefersLateDecoding(); +VideoDecoder::DecoderInfo VideoDecoderSoftwareFallbackWrapper::GetDecoderInfo() + const { + DecoderInfo info = active_decoder().GetDecoderInfo(); + if (decoder_type_ == DecoderType::kFallback) { + // Cached "A (fallback from B)" string. + info.implementation_name = fallback_implementation_name_; + } + return info; } const char* VideoDecoderSoftwareFallbackWrapper::ImplementationName() const { - return decoder_type_ == DecoderType::kFallback - ? fallback_implementation_name_.c_str() - : hw_decoder_->ImplementationName(); + if (decoder_type_ == DecoderType::kFallback) { + // Cached "A (fallback from B)" string. + return fallback_implementation_name_.c_str(); + } else { + return hw_decoder_->ImplementationName(); + } } VideoDecoder& VideoDecoderSoftwareFallbackWrapper::active_decoder() const { diff --git a/api/video_codecs/video_encoder.cc b/api/video_codecs/video_encoder.cc index 4427d6c1f1..a7e9d7487c 100644 --- a/api/video_codecs/video_encoder.cc +++ b/api/video_codecs/video_encoder.cc @@ -94,6 +94,7 @@ bool VideoEncoder::ResolutionBitrateLimits::operator==( VideoEncoder::EncoderInfo::EncoderInfo() : scaling_settings(VideoEncoder::ScalingSettings::kOff), requested_resolution_alignment(1), + apply_alignment_to_all_simulcast_layers(false), supports_native_handle(false), implementation_name("unknown"), has_trusted_rate_controller(false), @@ -102,7 +103,8 @@ VideoEncoder::EncoderInfo::EncoderInfo() fps_allocation{absl::InlinedVector( 1, kMaxFramerateFraction)}, - supports_simulcast(false) {} + supports_simulcast(false), + preferred_pixel_formats{VideoFrameBuffer::Type::kI420} {} VideoEncoder::EncoderInfo::EncoderInfo(const EncoderInfo&) = default; @@ -123,6 +125,8 @@ std::string VideoEncoder::EncoderInfo::ToString() const { oss << "min_pixels_per_frame = " << scaling_settings.min_pixels_per_frame << " }"; oss << ", requested_resolution_alignment = " << requested_resolution_alignment + << ", apply_alignment_to_all_simulcast_layers = " + << apply_alignment_to_all_simulcast_layers << ", supports_native_handle = " << supports_native_handle << ", implementation_name = '" << implementation_name << "'" @@ -131,8 +135,17 @@ std::string VideoEncoder::EncoderInfo::ToString() const { << ", is_hardware_accelerated = " << is_hardware_accelerated << ", has_internal_source = " << has_internal_source << ", fps_allocation = ["; + size_t num_spatial_layer_with_fps_allocation = 0; + for (size_t i = 0; i < kMaxSpatialLayers; ++i) { + if (!fps_allocation[i].empty()) { + num_spatial_layer_with_fps_allocation = i + 1; + } + } bool first = true; - for (size_t i = 0; i < fps_allocation->size(); ++i) { + for (size_t i = 0; i < num_spatial_layer_with_fps_allocation; ++i) { + if (fps_allocation[i].empty()) { + break; + } if (!first) { oss << ", "; } @@ -166,7 +179,15 @@ std::string VideoEncoder::EncoderInfo::ToString() const { } oss << "] " ", supports_simulcast = " - << supports_simulcast << "}"; + << supports_simulcast; + oss << ", preferred_pixel_formats = ["; + for (size_t i = 0; i < preferred_pixel_formats.size(); ++i) { + if (i > 0) + oss << ", "; + oss << VideoFrameBufferTypeToString(preferred_pixel_formats.at(i)); + } + oss << "]"; + oss << "}"; return oss.str(); } diff --git a/api/video_codecs/video_encoder.h b/api/video_codecs/video_encoder.h index 064dc8ffb5..caf069718b 100644 --- a/api/video_codecs/video_encoder.h +++ b/api/video_codecs/video_encoder.h @@ -30,13 +30,12 @@ namespace webrtc { -class RTPFragmentationHeader; // TODO(pbos): Expose these through a public (root) header or change these APIs. struct CodecSpecificInfo; constexpr int kDefaultMinPixelsPerFrame = 320 * 180; -class EncodedImageCallback { +class RTC_EXPORT EncodedImageCallback { public: virtual ~EncodedImageCallback() {} @@ -75,8 +74,7 @@ class EncodedImageCallback { // Callback function which is called when an image has been encoded. virtual Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) = 0; + const CodecSpecificInfo* codec_specific_info) = 0; virtual void OnDroppedFrame(DropReason reason) {} }; @@ -176,6 +174,15 @@ class RTC_EXPORT VideoEncoder { // requirements the encoder has on the incoming video frame buffers. int requested_resolution_alignment; + // Same as above but if true, each simulcast layer should also be divisible + // by |requested_resolution_alignment|. + // Note that scale factors |scale_resolution_down_by| may be adjusted so a + // common multiple is not too large to avoid largely cropped frames and + // possibly with an aspect ratio far from the original. + // Warning: large values of scale_resolution_down_by could be changed + // considerably, especially if |requested_resolution_alignment| is large. + bool apply_alignment_to_all_simulcast_layers; + // If true, encoder supports working with a native handle (e.g. texture // handle for hw codecs) rather than requiring a raw I420 buffer. bool supports_native_handle; @@ -247,6 +254,12 @@ class RTC_EXPORT VideoEncoder { // in such case the encoder should return // WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED. bool supports_simulcast; + + // The list of pixel formats preferred by the encoder. It is assumed that if + // the list is empty and supports_native_handle is false, then {I420} is the + // preferred pixel format. The order of the formats does not matter. + absl::InlinedVector + preferred_pixel_formats; }; struct RTC_EXPORT RateControlParameters { @@ -260,6 +273,9 @@ class RTC_EXPORT VideoEncoder { // Target bitrate, per spatial/temporal layer. // A target bitrate of 0bps indicates a layer should not be encoded at all. + VideoBitrateAllocation target_bitrate; + // Adjusted target bitrate, per spatial/temporal layer. May be lower or + // higher than the target depending on encoder behaviour. VideoBitrateAllocation bitrate; // Target framerate, in fps. A value <= 0.0 is invalid and should be // interpreted as framerate target not available. In this case the encoder @@ -348,7 +364,7 @@ class RTC_EXPORT VideoEncoder { // TODO(bugs.webrtc.org/10720): After updating downstream projects and posting // an announcement to discuss-webrtc, remove the three-parameters variant // and make the two-parameters variant pure-virtual. - /* RTC_DEPRECATED */ virtual int32_t InitEncode( + /* ABSL_DEPRECATED("bugs.webrtc.org/10720") */ virtual int32_t InitEncode( const VideoCodec* codec_settings, int32_t number_of_cores, size_t max_payload_size); @@ -368,7 +384,7 @@ class RTC_EXPORT VideoEncoder { // Return value : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise. virtual int32_t Release() = 0; - // Encode an I420 image (as a part of a video stream). The encoded image + // Encode an image (as a part of a video stream). The encoded image // will be returned to the user through the encode complete callback. // // Input: diff --git a/api/video_codecs/video_encoder_config.cc b/api/video_codecs/video_encoder_config.cc index 6efcbf2bdd..0321da24da 100644 --- a/api/video_codecs/video_encoder_config.cc +++ b/api/video_codecs/video_encoder_config.cc @@ -43,6 +43,7 @@ std::string VideoStream::ToString() const { ss << ", num_temporal_layers: " << num_temporal_layers.value_or(1); ss << ", bitrate_priority: " << bitrate_priority.value_or(0); ss << ", active: " << active; + ss << ", scale_down_by: " << scale_resolution_down_by; return ss.str(); } @@ -55,7 +56,9 @@ VideoEncoderConfig::VideoEncoderConfig() min_transmit_bitrate_bps(0), max_bitrate_bps(0), bitrate_priority(1.0), - number_of_streams(0) {} + number_of_streams(0), + legacy_conference_mode(false), + is_quality_scaling_allowed(false) {} VideoEncoderConfig::VideoEncoderConfig(VideoEncoderConfig&&) = default; diff --git a/api/video_codecs/video_encoder_config.h b/api/video_codecs/video_encoder_config.h index ef8db100a3..59163743a2 100644 --- a/api/video_codecs/video_encoder_config.h +++ b/api/video_codecs/video_encoder_config.h @@ -64,6 +64,8 @@ struct VideoStream { // between multiple streams. absl::optional bitrate_priority; + absl::optional scalability_mode; + // If this stream is enabled by the user, or not. bool active; }; @@ -176,6 +178,12 @@ class VideoEncoderConfig { // Max number of encoded VideoStreams to produce. size_t number_of_streams; + // Legacy Google conference mode flag for simulcast screenshare + bool legacy_conference_mode; + + // Indicates whether quality scaling can be used or not. + bool is_quality_scaling_allowed; + private: // Access to the copy constructor is private to force use of the Copy() // method for those exceptional cases where we do use it. diff --git a/api/video_codecs/video_encoder_factory.h b/api/video_codecs/video_encoder_factory.h index 630b7aa70c..c2d66cfa86 100644 --- a/api/video_codecs/video_encoder_factory.h +++ b/api/video_codecs/video_encoder_factory.h @@ -12,6 +12,7 @@ #define API_VIDEO_CODECS_VIDEO_ENCODER_FACTORY_H_ #include +#include #include #include "absl/types/optional.h" @@ -28,15 +29,17 @@ class VideoEncoderFactory { public: // TODO(magjed): Try to get rid of this struct. struct CodecInfo { - // |is_hardware_accelerated| is true if the encoders created by this factory - // of the given codec will use hardware support. - bool is_hardware_accelerated; // |has_internal_source| is true if encoders created by this factory of the // given codec will use internal camera sources, meaning that they don't // require/expect frames to be delivered via webrtc::VideoEncoder::Encode. // This flag is used as the internal_source parameter to // webrtc::ViEExternalCodec::RegisterExternalSendCodec. - bool has_internal_source; + bool has_internal_source = false; + }; + + struct CodecSupport { + bool is_supported = false; + bool is_power_efficient = false; }; // An injectable class that is continuously updated with encoding conditions @@ -73,8 +76,33 @@ class VideoEncoderFactory { // Returns information about how this format will be encoded. The specified // format must be one of the supported formats by this factory. - // TODO(magjed): Try to get rid of this method. - virtual CodecInfo QueryVideoEncoder(const SdpVideoFormat& format) const = 0; + + // TODO(magjed): Try to get rid of this method. Since is_hardware_accelerated + // is unused, only factories producing internal source encoders (in itself a + // deprecated feature) needs to override this method. + virtual CodecInfo QueryVideoEncoder(const SdpVideoFormat& format) const { + return CodecInfo(); + } + + // Query whether the specifed format is supported or not and if it will be + // power efficient, which is currently interpreted as if there is support for + // hardware acceleration. + // See https://w3c.github.io/webrtc-svc/#scalabilitymodes* for a specification + // of valid values for |scalability_mode|. + // NOTE: QueryCodecSupport is currently an experimental feature that is + // subject to change without notice. + virtual CodecSupport QueryCodecSupport( + const SdpVideoFormat& format, + absl::optional scalability_mode) const { + // Default implementation, query for supported formats and check if the + // specified format is supported. Returns false if scalability_mode is + // specified. + CodecSupport codec_support; + if (!scalability_mode) { + codec_support.is_supported = format.IsCodecInList(GetSupportedFormats()); + } + return codec_support; + } // Creates a VideoEncoder for the specified format. virtual std::unique_ptr CreateVideoEncoder( diff --git a/api/video_codecs/video_encoder_software_fallback_wrapper.cc b/api/video_codecs/video_encoder_software_fallback_wrapper.cc index 354e8c25ac..bcce9dcd93 100644 --- a/api/video_codecs/video_encoder_software_fallback_wrapper.cc +++ b/api/video_codecs/video_encoder_software_fallback_wrapper.cc @@ -25,6 +25,7 @@ #include "api/video/video_frame.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" +#include "media/base/video_common.h" #include "modules/video_coding/include/video_error_codes.h" #include "modules/video_coding/utility/simulcast_utility.h" #include "rtc_base/checks.h" @@ -50,13 +51,12 @@ struct ForcedFallbackParams { return enable_resolution_based_switch && codec.codecType == kVideoCodecVP8 && codec.numberOfSimulcastStreams <= 1 && - codec.VP8().numberOfTemporalLayers == 1 && codec.width * codec.height <= max_pixels; } bool SupportsTemporalBasedSwitch(const VideoCodec& codec) const { return enable_temporal_based_switch && - SimulcastUtility::NumberOfTemporalLayers(codec, 0) > 1; + SimulcastUtility::NumberOfTemporalLayers(codec, 0) != 1; } bool enable_temporal_based_switch = false; @@ -162,6 +162,7 @@ class VideoEncoderSoftwareFallbackWrapper final : public VideoEncoder { case EncoderState::kForcedFallback: return fallback_encoder_.get(); } + RTC_CHECK_NOTREACHED(); } // Updates encoder with last observed parameters, such as callbacks, rates, @@ -345,7 +346,9 @@ int32_t VideoEncoderSoftwareFallbackWrapper::Encode( case EncoderState::kForcedFallback: return fallback_encoder_->Encode(frame, frame_types); } + RTC_CHECK_NOTREACHED(); } + int32_t VideoEncoderSoftwareFallbackWrapper::EncodeWithMainEncoder( const VideoFrame& frame, const std::vector* frame_types) { @@ -367,9 +370,12 @@ int32_t VideoEncoderSoftwareFallbackWrapper::EncodeWithMainEncoder( RTC_LOG(LS_ERROR) << "Failed to convert from to I420"; return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE; } - rtc::scoped_refptr dst_buffer = - I420Buffer::Create(codec_settings_.width, codec_settings_.height); - dst_buffer->ScaleFrom(*src_buffer); + rtc::scoped_refptr dst_buffer = + src_buffer->Scale(codec_settings_.width, codec_settings_.height); + if (!dst_buffer) { + RTC_LOG(LS_ERROR) << "Failed to scale video frame."; + return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE; + } VideoFrame scaled_frame = frame; scaled_frame.set_video_frame_buffer(dst_buffer); scaled_frame.set_update_rect(VideoFrame::UpdateRect{ @@ -412,6 +418,13 @@ VideoEncoder::EncoderInfo VideoEncoderSoftwareFallbackWrapper::GetEncoderInfo() EncoderInfo info = IsFallbackActive() ? fallback_encoder_info : default_encoder_info; + info.requested_resolution_alignment = cricket::LeastCommonMultiple( + fallback_encoder_info.requested_resolution_alignment, + default_encoder_info.requested_resolution_alignment); + info.apply_alignment_to_all_simulcast_layers = + fallback_encoder_info.apply_alignment_to_all_simulcast_layers || + default_encoder_info.apply_alignment_to_all_simulcast_layers; + if (fallback_params_.has_value()) { const auto settings = (encoder_state_ == EncoderState::kForcedFallback) ? fallback_encoder_info.scaling_settings @@ -456,7 +469,7 @@ bool VideoEncoderSoftwareFallbackWrapper::TryInitForcedFallbackEncoder() { } if (encoder_state_ == EncoderState::kMainEncoderUsed && - encoder_->GetEncoderInfo().fps_allocation[0].size() > 1) { + encoder_->GetEncoderInfo().fps_allocation[0].size() != 1) { // Primary encoder already supports temporal layers, use that instead. return true; } @@ -465,7 +478,7 @@ bool VideoEncoderSoftwareFallbackWrapper::TryInitForcedFallbackEncoder() { if (fallback_encoder_->InitEncode(&codec_settings_, encoder_settings_.value()) == WEBRTC_VIDEO_CODEC_OK) { - if (fallback_encoder_->GetEncoderInfo().fps_allocation[0].size() > 1) { + if (fallback_encoder_->GetEncoderInfo().fps_allocation[0].size() != 1) { // Fallback encoder available and supports temporal layers, use it! if (encoder_state_ == EncoderState::kMainEncoderUsed) { // Main encoder initialized but does not support temporal layers, diff --git a/media/base/vp9_profile.cc b/api/video_codecs/vp9_profile.cc similarity index 86% rename from media/base/vp9_profile.cc rename to api/video_codecs/vp9_profile.cc index cfecc5e545..5e2bd53a86 100644 --- a/media/base/vp9_profile.cc +++ b/api/video_codecs/vp9_profile.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "media/base/vp9_profile.h" +#include "api/video_codecs/vp9_profile.h" #include #include @@ -24,6 +24,8 @@ std::string VP9ProfileToString(VP9Profile profile) { switch (profile) { case VP9Profile::kProfile0: return "0"; + case VP9Profile::kProfile1: + return "1"; case VP9Profile::kProfile2: return "2"; } @@ -38,12 +40,13 @@ absl::optional StringToVP9Profile(const std::string& str) { switch (i.value()) { case 0: return VP9Profile::kProfile0; + case 1: + return VP9Profile::kProfile1; case 2: return VP9Profile::kProfile2; default: return absl::nullopt; } - return absl::nullopt; } absl::optional ParseSdpForVP9Profile( @@ -55,7 +58,7 @@ absl::optional ParseSdpForVP9Profile( return StringToVP9Profile(profile_str); } -bool IsSameVP9Profile(const SdpVideoFormat::Parameters& params1, +bool VP9IsSameProfile(const SdpVideoFormat::Parameters& params1, const SdpVideoFormat::Parameters& params2) { const absl::optional profile = ParseSdpForVP9Profile(params1); const absl::optional other_profile = diff --git a/api/video_codecs/vp9_profile.h b/api/video_codecs/vp9_profile.h new file mode 100644 index 0000000000..e632df437b --- /dev/null +++ b/api/video_codecs/vp9_profile.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_CODECS_VP9_PROFILE_H_ +#define API_VIDEO_CODECS_VP9_PROFILE_H_ + +#include + +#include "absl/types/optional.h" +#include "api/video_codecs/sdp_video_format.h" +#include "rtc_base/system/rtc_export.h" + +namespace webrtc { + +// Profile information for VP9 video. +extern RTC_EXPORT const char kVP9FmtpProfileId[]; + +enum class VP9Profile { + kProfile0, + kProfile1, + kProfile2, +}; + +// Helper functions to convert VP9Profile to std::string. Returns "0" by +// default. +RTC_EXPORT std::string VP9ProfileToString(VP9Profile profile); + +// Helper functions to convert std::string to VP9Profile. Returns null if given +// an invalid profile string. +absl::optional StringToVP9Profile(const std::string& str); + +// Parse profile that is represented as a string of single digit contained in an +// SDP key-value map. A default profile(kProfile0) will be returned if the +// profile key is missing. Nothing will be returned if the key is present but +// the string is invalid. +RTC_EXPORT absl::optional ParseSdpForVP9Profile( + const SdpVideoFormat::Parameters& params); + +// Returns true if the parameters have the same VP9 profile, or neither contains +// VP9 profile. +bool VP9IsSameProfile(const SdpVideoFormat::Parameters& params1, + const SdpVideoFormat::Parameters& params2); + +} // namespace webrtc + +#endif // API_VIDEO_CODECS_VP9_PROFILE_H_ diff --git a/api/video_track_source_proxy.h b/api/video_track_source_proxy.h deleted file mode 100644 index 528b7cf701..0000000000 --- a/api/video_track_source_proxy.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef API_VIDEO_TRACK_SOURCE_PROXY_H_ -#define API_VIDEO_TRACK_SOURCE_PROXY_H_ - -#include "api/media_stream_interface.h" -#include "api/proxy.h" - -namespace webrtc { - -// Makes sure the real VideoTrackSourceInterface implementation is destroyed on -// the signaling thread and marshals all method calls to the signaling thread. -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_PROXY_MAP(VideoTrackSource) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_CONSTMETHOD0(SourceState, state) -PROXY_CONSTMETHOD0(bool, remote) -PROXY_CONSTMETHOD0(bool, is_screencast) -PROXY_CONSTMETHOD0(absl::optional, needs_denoising) -PROXY_METHOD1(bool, GetStats, Stats*) -PROXY_WORKER_METHOD2(void, - AddOrUpdateSink, - rtc::VideoSinkInterface*, - const rtc::VideoSinkWants&) -PROXY_WORKER_METHOD1(void, RemoveSink, rtc::VideoSinkInterface*) -PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) -PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -PROXY_CONSTMETHOD0(bool, SupportsEncodedOutput) -PROXY_WORKER_METHOD0(void, GenerateKeyFrame) -PROXY_WORKER_METHOD1(void, - AddEncodedSink, - rtc::VideoSinkInterface*) -PROXY_WORKER_METHOD1(void, - RemoveEncodedSink, - rtc::VideoSinkInterface*) -END_PROXY_MAP() - -} // namespace webrtc - -#endif // API_VIDEO_TRACK_SOURCE_PROXY_H_ diff --git a/api/video_track_source_proxy_factory.h b/api/video_track_source_proxy_factory.h new file mode 100644 index 0000000000..974720d50b --- /dev/null +++ b/api/video_track_source_proxy_factory.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VIDEO_TRACK_SOURCE_PROXY_FACTORY_H_ +#define API_VIDEO_TRACK_SOURCE_PROXY_FACTORY_H_ + +#include "api/media_stream_interface.h" + +namespace webrtc { + +// Creates a proxy source for |source| which makes sure the real +// VideoTrackSourceInterface implementation is destroyed on the signaling thread +// and marshals calls to |worker_thread| and |signaling_thread|. +rtc::scoped_refptr RTC_EXPORT +CreateVideoTrackSourceProxy(rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, + VideoTrackSourceInterface* source); + +} // namespace webrtc + +#endif // API_VIDEO_TRACK_SOURCE_PROXY_FACTORY_H_ diff --git a/api/voip/BUILD.gn b/api/voip/BUILD.gn index 2c5f71c988..714490a526 100644 --- a/api/voip/BUILD.gn +++ b/api/voip/BUILD.gn @@ -13,12 +13,19 @@ rtc_source_set("voip_api") { sources = [ "voip_base.h", "voip_codec.h", + "voip_dtmf.h", "voip_engine.h", "voip_network.h", + "voip_statistics.h", + "voip_volume_control.h", ] deps = [ "..:array_view", "../audio_codecs:audio_codecs_api", + "../neteq:neteq_api", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -42,9 +49,21 @@ rtc_library("voip_engine_factory") { } if (rtc_include_tests) { + rtc_source_set("mock_voip_engine") { + testonly = true + visibility = [ "*" ] + sources = [ "test/mock_voip_engine.h" ] + deps = [ + ":voip_api", + "..:array_view", + "../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } + rtc_library("voip_engine_factory_unittests") { testonly = true - sources = [ "voip_engine_factory_unittest.cc" ] + sources = [ "test/voip_engine_factory_unittest.cc" ] deps = [ ":voip_engine_factory", "../../modules/audio_device:mock_audio_device", @@ -54,4 +73,13 @@ if (rtc_include_tests) { "../task_queue:default_task_queue_factory", ] } + + rtc_library("compile_all_headers") { + testonly = true + sources = [ "test/compile_all_headers.cc" ] + deps = [ + ":mock_voip_engine", + "../../test:test_support", + ] + } } diff --git a/api/voip/test/compile_all_headers.cc b/api/voip/test/compile_all_headers.cc new file mode 100644 index 0000000000..73a0f0d1c4 --- /dev/null +++ b/api/voip/test/compile_all_headers.cc @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This file verifies that all include files in this directory can be +// compiled without errors or other required includes. + +#include "api/voip/test/mock_voip_engine.h" diff --git a/api/voip/test/mock_voip_engine.h b/api/voip/test/mock_voip_engine.h new file mode 100644 index 0000000000..74b880d652 --- /dev/null +++ b/api/voip/test/mock_voip_engine.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VOIP_TEST_MOCK_VOIP_ENGINE_H_ +#define API_VOIP_TEST_MOCK_VOIP_ENGINE_H_ + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/voip/voip_base.h" +#include "api/voip/voip_codec.h" +#include "api/voip/voip_dtmf.h" +#include "api/voip/voip_engine.h" +#include "api/voip/voip_network.h" +#include "api/voip/voip_statistics.h" +#include "api/voip/voip_volume_control.h" +#include "test/gmock.h" + +namespace webrtc { + +class MockVoipBase : public VoipBase { + public: + MOCK_METHOD(ChannelId, + CreateChannel, + (Transport*, absl::optional), + (override)); + MOCK_METHOD(VoipResult, ReleaseChannel, (ChannelId), (override)); + MOCK_METHOD(VoipResult, StartSend, (ChannelId), (override)); + MOCK_METHOD(VoipResult, StopSend, (ChannelId), (override)); + MOCK_METHOD(VoipResult, StartPlayout, (ChannelId), (override)); + MOCK_METHOD(VoipResult, StopPlayout, (ChannelId), (override)); +}; + +class MockVoipCodec : public VoipCodec { + public: + MOCK_METHOD(VoipResult, + SetSendCodec, + (ChannelId, int, const SdpAudioFormat&), + (override)); + MOCK_METHOD(VoipResult, + SetReceiveCodecs, + (ChannelId, (const std::map&)), + (override)); +}; + +class MockVoipDtmf : public VoipDtmf { + public: + MOCK_METHOD(VoipResult, + RegisterTelephoneEventType, + (ChannelId, int, int), + (override)); + MOCK_METHOD(VoipResult, + SendDtmfEvent, + (ChannelId, DtmfEvent, int), + (override)); +}; + +class MockVoipNetwork : public VoipNetwork { + public: + MOCK_METHOD(VoipResult, + ReceivedRTPPacket, + (ChannelId channel_id, rtc::ArrayView rtp_packet), + (override)); + MOCK_METHOD(VoipResult, + ReceivedRTCPPacket, + (ChannelId channel_id, rtc::ArrayView rtcp_packet), + (override)); +}; + +class MockVoipStatistics : public VoipStatistics { + public: + MOCK_METHOD(VoipResult, + GetIngressStatistics, + (ChannelId, IngressStatistics&), + (override)); + MOCK_METHOD(VoipResult, + GetChannelStatistics, + (ChannelId channel_id, ChannelStatistics&), + (override)); +}; + +class MockVoipVolumeControl : public VoipVolumeControl { + public: + MOCK_METHOD(VoipResult, SetInputMuted, (ChannelId, bool), (override)); + + MOCK_METHOD(VoipResult, + GetInputVolumeInfo, + (ChannelId, VolumeInfo&), + (override)); + MOCK_METHOD(VoipResult, + GetOutputVolumeInfo, + (ChannelId, VolumeInfo&), + (override)); +}; + +class MockVoipEngine : public VoipEngine { + public: + VoipBase& Base() override { return base_; } + VoipNetwork& Network() override { return network_; } + VoipCodec& Codec() override { return codec_; } + VoipDtmf& Dtmf() override { return dtmf_; } + VoipStatistics& Statistics() override { return statistics_; } + VoipVolumeControl& VolumeControl() override { return volume_; } + + // Direct access to underlying members are required for testing. + MockVoipBase base_; + MockVoipNetwork network_; + MockVoipCodec codec_; + MockVoipDtmf dtmf_; + MockVoipStatistics statistics_; + MockVoipVolumeControl volume_; +}; + +} // namespace webrtc + +#endif // API_VOIP_TEST_MOCK_VOIP_ENGINE_H_ diff --git a/api/voip/voip_engine_factory_unittest.cc b/api/voip/test/voip_engine_factory_unittest.cc similarity index 80% rename from api/voip/voip_engine_factory_unittest.cc rename to api/voip/test/voip_engine_factory_unittest.cc index d0b8438368..f967a0ba8f 100644 --- a/api/voip/voip_engine_factory_unittest.cc +++ b/api/voip/test/voip_engine_factory_unittest.cc @@ -24,11 +24,11 @@ namespace { // Create voip engine with mock modules as normal use case. TEST(VoipEngineFactoryTest, CreateEngineWithMockModules) { VoipEngineConfig config; - config.encoder_factory = new rtc::RefCountedObject(); - config.decoder_factory = new rtc::RefCountedObject(); + config.encoder_factory = rtc::make_ref_counted(); + config.decoder_factory = rtc::make_ref_counted(); config.task_queue_factory = CreateDefaultTaskQueueFactory(); config.audio_processing = - new rtc::RefCountedObject(); + rtc::make_ref_counted>(); config.audio_device_module = test::MockAudioDeviceModule::CreateNice(); auto voip_engine = CreateVoipEngine(std::move(config)); @@ -38,8 +38,8 @@ TEST(VoipEngineFactoryTest, CreateEngineWithMockModules) { // Create voip engine without setting audio processing as optional component. TEST(VoipEngineFactoryTest, UseNoAudioProcessing) { VoipEngineConfig config; - config.encoder_factory = new rtc::RefCountedObject(); - config.decoder_factory = new rtc::RefCountedObject(); + config.encoder_factory = rtc::make_ref_counted(); + config.decoder_factory = rtc::make_ref_counted(); config.task_queue_factory = CreateDefaultTaskQueueFactory(); config.audio_device_module = test::MockAudioDeviceModule::CreateNice(); diff --git a/api/voip/voip_base.h b/api/voip/voip_base.h index ef83b51ed8..d469ea4bd4 100644 --- a/api/voip/voip_base.h +++ b/api/voip/voip_base.h @@ -11,6 +11,7 @@ #ifndef API_VOIP_VOIP_BASE_H_ #define API_VOIP_VOIP_BASE_H_ +#include "absl/base/attributes.h" #include "absl/types/optional.h" namespace webrtc { @@ -35,6 +36,21 @@ class Transport; enum class ChannelId : int {}; +enum class ABSL_MUST_USE_RESULT VoipResult { + // kOk indicates the function was successfully invoked with no error. + kOk, + // kInvalidArgument indicates the caller specified an invalid argument, such + // as an invalid ChannelId. + kInvalidArgument, + // kFailedPrecondition indicates that the operation was failed due to not + // satisfying prerequisite such as not setting codec type before sending. + kFailedPrecondition, + // kInternal is used to indicate various internal failures that are not the + // caller's fault. Further detail is commented on each function that uses this + // return value. + kInternal, +}; + class VoipBase { public: // Creates a channel. @@ -46,40 +62,48 @@ class VoipBase { // and injection for incoming RTP from remote endpoint is handled via // VoipNetwork interface. |local_ssrc| is optional and when local_ssrc is not // set, some random value will be used by voip engine. - // Returns value is optional as to indicate the failure to create channel. - virtual absl::optional CreateChannel( - Transport* transport, - absl::optional local_ssrc) = 0; + // Returns a ChannelId created for caller to handle subsequent Channel + // operations. + virtual ChannelId CreateChannel(Transport* transport, + absl::optional local_ssrc) = 0; // Releases |channel_id| that no longer has any use. - virtual void ReleaseChannel(ChannelId channel_id) = 0; + // Returns following VoipResult; + // kOk - |channel_id| is released. + // kInvalidArgument - |channel_id| is invalid. + // kInternal - Fails to stop audio output device. + virtual VoipResult ReleaseChannel(ChannelId channel_id) = 0; - // Starts sending on |channel_id|. This will start microphone if not started - // yet. Returns false if initialization has failed on selected microphone - // device. API is subject to expand to reflect error condition to application - // later. - virtual bool StartSend(ChannelId channel_id) = 0; + // Starts sending on |channel_id|. This starts microphone if not started yet. + // Returns following VoipResult; + // kOk - Channel successfully started to send. + // kInvalidArgument - |channel_id| is invalid. + // kFailedPrecondition - Missing prerequisite on VoipCodec::SetSendCodec. + // kInternal - initialization has failed on selected microphone. + virtual VoipResult StartSend(ChannelId channel_id) = 0; // Stops sending on |channel_id|. If this is the last active channel, it will // stop microphone input from underlying audio platform layer. - // Returns false if termination logic has failed on selected microphone - // device. API is subject to expand to reflect error condition to application - // later. - virtual bool StopSend(ChannelId channel_id) = 0; + // Returns following VoipResult; + // kOk - Channel successfully stopped to send. + // kInvalidArgument - |channel_id| is invalid. + // kInternal - Failed to stop the active microphone device. + virtual VoipResult StopSend(ChannelId channel_id) = 0; // Starts playing on speaker device for |channel_id|. // This will start underlying platform speaker device if not started. - // Returns false if initialization has failed - // on selected speaker device. API is subject to expand to reflect error - // condition to application later. - virtual bool StartPlayout(ChannelId channel_id) = 0; + // Returns following VoipResult; + // kOk - Channel successfully started to play out. + // kInvalidArgument - |channel_id| is invalid. + // kFailedPrecondition - Missing prerequisite on VoipCodec::SetReceiveCodecs. + // kInternal - Failed to initializate the selected speaker device. + virtual VoipResult StartPlayout(ChannelId channel_id) = 0; // Stops playing on speaker device for |channel_id|. - // If this is the last active channel playing, then it will stop speaker - // from the platform layer. - // Returns false if termination logic has failed on selected speaker device. - // API is subject to expand to reflect error condition to application later. - virtual bool StopPlayout(ChannelId channel_id) = 0; + // Returns following VoipResult; + // kOk - Channel successfully stopped t play out. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult StopPlayout(ChannelId channel_id) = 0; protected: virtual ~VoipBase() = default; diff --git a/api/voip/voip_codec.h b/api/voip/voip_codec.h index eb42c449d9..fec3827dbe 100644 --- a/api/voip/voip_codec.h +++ b/api/voip/voip_codec.h @@ -29,15 +29,21 @@ namespace webrtc { class VoipCodec { public: // Set encoder type here along with its payload type to use. - virtual void SetSendCodec(ChannelId channel_id, - int payload_type, - const SdpAudioFormat& encoder_spec) = 0; + // Returns following VoipResult; + // kOk - sending codec is set as provided. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult SetSendCodec(ChannelId channel_id, + int payload_type, + const SdpAudioFormat& encoder_spec) = 0; // Set decoder payload type here. In typical offer and answer model, // this should be called after payload type has been agreed in media // session. Note that payload type can differ with same codec in each // direction. - virtual void SetReceiveCodecs( + // Returns following VoipResult; + // kOk - receiving codecs are set as provided. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult SetReceiveCodecs( ChannelId channel_id, const std::map& decoder_specs) = 0; diff --git a/api/voip/voip_dtmf.h b/api/voip/voip_dtmf.h new file mode 100644 index 0000000000..a7367bed53 --- /dev/null +++ b/api/voip/voip_dtmf.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VOIP_VOIP_DTMF_H_ +#define API_VOIP_VOIP_DTMF_H_ + +#include "api/voip/voip_base.h" + +namespace webrtc { + +// DTMF events and their event codes as defined in +// https://tools.ietf.org/html/rfc4733#section-7 +enum class DtmfEvent : uint8_t { + kDigitZero = 0, + kDigitOne, + kDigitTwo, + kDigitThree, + kDigitFour, + kDigitFive, + kDigitSix, + kDigitSeven, + kDigitEight, + kDigitNine, + kAsterisk, + kHash, + kLetterA, + kLetterB, + kLetterC, + kLetterD +}; + +// VoipDtmf interface provides DTMF related interfaces such +// as sending DTMF events to the remote endpoint. +class VoipDtmf { + public: + // Register the payload type and sample rate for DTMF (RFC 4733) payload. + // Must be called exactly once prior to calling SendDtmfEvent after payload + // type has been negotiated with remote. + // Returns following VoipResult; + // kOk - telephone event type is registered as provided. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult RegisterTelephoneEventType(ChannelId channel_id, + int rtp_payload_type, + int sample_rate_hz) = 0; + + // Send DTMF named event as specified by + // https://tools.ietf.org/html/rfc4733#section-3.2 + // |duration_ms| specifies the duration of DTMF packets that will be emitted + // in place of real RTP packets instead. + // Must be called after RegisterTelephoneEventType and VoipBase::StartSend + // have been called. + // Returns following VoipResult; + // kOk - requested DTMF event is successfully scheduled. + // kInvalidArgument - |channel_id| is invalid. + // kFailedPrecondition - Missing prerequisite on RegisterTelephoneEventType + // or sending state. + virtual VoipResult SendDtmfEvent(ChannelId channel_id, + DtmfEvent dtmf_event, + int duration_ms) = 0; + + protected: + virtual ~VoipDtmf() = default; +}; + +} // namespace webrtc + +#endif // API_VOIP_VOIP_DTMF_H_ diff --git a/api/voip/voip_engine.h b/api/voip/voip_engine.h index 81c97c02e5..d223f6ad6c 100644 --- a/api/voip/voip_engine.h +++ b/api/voip/voip_engine.h @@ -16,11 +16,14 @@ namespace webrtc { class VoipBase; class VoipCodec; class VoipNetwork; +class VoipDtmf; +class VoipStatistics; +class VoipVolumeControl; // VoipEngine is the main interface serving as the entry point for all VoIP // APIs. A single instance of VoipEngine should suffice the most of the need for // typical VoIP applications as it handles multiple media sessions including a -// specialized session type like ad-hoc mesh conferencing. Below example code +// specialized session type like ad-hoc conference. Below example code // describes the typical sequence of API usage. Each API header contains more // description on what the methods are used for. // @@ -35,36 +38,35 @@ class VoipNetwork; // config.audio_processing = AudioProcessingBuilder().Create(); // // auto voip_engine = CreateVoipEngine(std::move(config)); -// if (!voip_engine) return some_failure; // // auto& voip_base = voip_engine->Base(); // auto& voip_codec = voip_engine->Codec(); // auto& voip_network = voip_engine->Network(); // -// absl::optional channel = -// voip_base.CreateChannel(&app_transport_); -// if (!channel) return some_failure; +// ChannelId channel = voip_base.CreateChannel(&app_transport_); // // // After SDP offer/answer, set payload type and codecs that have been // // decided through SDP negotiation. -// voip_codec.SetSendCodec(*channel, ...); -// voip_codec.SetReceiveCodecs(*channel, ...); +// // VoipResult handling omitted here. +// voip_codec.SetSendCodec(channel, ...); +// voip_codec.SetReceiveCodecs(channel, ...); // // // Start sending and playing RTP on voip channel. -// voip_base.StartSend(*channel); -// voip_base.StartPlayout(*channel); +// // VoipResult handling omitted here. +// voip_base.StartSend(channel); +// voip_base.StartPlayout(channel); // // // Inject received RTP/RTCP through VoipNetwork interface. -// voip_network.ReceivedRTPPacket(*channel, ...); -// voip_network.ReceivedRTCPPacket(*channel, ...); +// // VoipResult handling omitted here. +// voip_network.ReceivedRTPPacket(channel, ...); +// voip_network.ReceivedRTCPPacket(channel, ...); // // // Stop and release voip channel. -// voip_base.StopSend(*channel); -// voip_base.StopPlayout(*channel); -// voip_base.ReleaseChannel(*channel); +// // VoipResult handling omitted here. +// voip_base.StopSend(channel); +// voip_base.StopPlayout(channel); +// voip_base.ReleaseChannel(channel); // -// Current VoipEngine defines three sub-API classes and is subject to expand in -// near future. class VoipEngine { public: virtual ~VoipEngine() = default; @@ -80,6 +82,16 @@ class VoipEngine { // VoipCodec provides codec configuration APIs for encoder and decoders. virtual VoipCodec& Codec() = 0; + + // VoipDtmf provides DTMF event APIs to register and send DTMF events. + virtual VoipDtmf& Dtmf() = 0; + + // VoipStatistics provides performance metrics around audio decoding module + // and jitter buffer (NetEq). + virtual VoipStatistics& Statistics() = 0; + + // VoipVolumeControl provides various input/output volume control. + virtual VoipVolumeControl& VolumeControl() = 0; }; } // namespace webrtc diff --git a/api/voip/voip_engine_factory.cc b/api/voip/voip_engine_factory.cc index 6ac3c86214..88f63f9c92 100644 --- a/api/voip/voip_engine_factory.cc +++ b/api/voip/voip_engine_factory.cc @@ -27,18 +27,11 @@ std::unique_ptr CreateVoipEngine(VoipEngineConfig config) { RTC_DLOG(INFO) << "No audio processing functionality provided."; } - auto voip_core = std::make_unique(); - - if (!voip_core->Init(std::move(config.encoder_factory), - std::move(config.decoder_factory), - std::move(config.task_queue_factory), - std::move(config.audio_device_module), - std::move(config.audio_processing))) { - RTC_DLOG(LS_ERROR) << "Failed to initialize VoIP core."; - return nullptr; - } - - return voip_core; + return std::make_unique(std::move(config.encoder_factory), + std::move(config.decoder_factory), + std::move(config.task_queue_factory), + std::move(config.audio_device_module), + std::move(config.audio_processing)); } } // namespace webrtc diff --git a/api/voip/voip_engine_factory.h b/api/voip/voip_engine_factory.h index 658ebfac83..62fe8011a6 100644 --- a/api/voip/voip_engine_factory.h +++ b/api/voip/voip_engine_factory.h @@ -61,9 +61,6 @@ struct VoipEngineConfig { }; // Creates a VoipEngine instance with provided VoipEngineConfig. -// This could return nullptr if AudioDeviceModule (ADM) initialization fails -// during construction of VoipEngine which would render VoipEngine -// nonfunctional. std::unique_ptr CreateVoipEngine(VoipEngineConfig config); } // namespace webrtc diff --git a/api/voip/voip_network.h b/api/voip/voip_network.h index c49c7695b9..c820ca04a3 100644 --- a/api/voip/voip_network.h +++ b/api/voip/voip_network.h @@ -18,20 +18,22 @@ namespace webrtc { // VoipNetwork interface provides any network related interfaces such as // processing received RTP/RTCP packet from remote endpoint. This interface -// requires a ChannelId created via VoipBase interface. Note that using invalid -// (previously released) ChannelId will silently fail these API calls as it -// would have released underlying audio components. It's anticipated that caller -// may be using different thread for network I/O where released channel id is -// still used to input incoming RTP packets in which case we should silently -// ignore. The interface is subjected to expand as needed in near future. +// requires a ChannelId created via VoipBase interface. class VoipNetwork { public: // The data received from the network including RTP header is passed here. - virtual void ReceivedRTPPacket(ChannelId channel_id, - rtc::ArrayView rtp_packet) = 0; + // Returns following VoipResult; + // kOk - received RTP packet is processed. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult ReceivedRTPPacket( + ChannelId channel_id, + rtc::ArrayView rtp_packet) = 0; // The data received from the network including RTCP header is passed here. - virtual void ReceivedRTCPPacket( + // Returns following VoipResult; + // kOk - received RTCP packet is processed. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult ReceivedRTCPPacket( ChannelId channel_id, rtc::ArrayView rtcp_packet) = 0; diff --git a/api/voip/voip_statistics.h b/api/voip/voip_statistics.h new file mode 100644 index 0000000000..1b9b1646b9 --- /dev/null +++ b/api/voip/voip_statistics.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VOIP_VOIP_STATISTICS_H_ +#define API_VOIP_VOIP_STATISTICS_H_ + +#include "api/neteq/neteq.h" +#include "api/voip/voip_base.h" + +namespace webrtc { + +struct IngressStatistics { + // Stats included from api/neteq/neteq.h. + NetEqLifetimeStatistics neteq_stats; + + // Represents the total duration in seconds of all samples that have been + // received. + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalsamplesduration + double total_duration = 0.0; +}; + +// Remote statistics obtained via remote RTCP SR/RR report received. +struct RemoteRtcpStatistics { + // Jitter as defined in RFC 3550 [6.4.1] expressed in seconds. + double jitter = 0.0; + + // Cumulative packets lost as defined in RFC 3550 [6.4.1] + int64_t packets_lost = 0; + + // Fraction lost as defined in RFC 3550 [6.4.1] expressed as a floating + // pointer number. + double fraction_lost = 0.0; + + // https://w3c.github.io/webrtc-stats/#dom-rtcremoteinboundrtpstreamstats-roundtriptime + absl::optional round_trip_time; + + // Last time (not RTP timestamp) when RTCP report received in milliseconds. + int64_t last_report_received_timestamp_ms; +}; + +struct ChannelStatistics { + // https://w3c.github.io/webrtc-stats/#dom-rtcsentrtpstreamstats-packetssent + uint64_t packets_sent = 0; + + // https://w3c.github.io/webrtc-stats/#dom-rtcsentrtpstreamstats-bytessent + uint64_t bytes_sent = 0; + + // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats-packetsreceived + uint64_t packets_received = 0; + + // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-bytesreceived + uint64_t bytes_received = 0; + + // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats-jitter + double jitter = 0.0; + + // https://w3c.github.io/webrtc-stats/#dom-rtcreceivedrtpstreamstats-packetslost + int64_t packets_lost = 0; + + // SSRC from remote media endpoint as indicated either by RTP header in RFC + // 3550 [5.1] or RTCP SSRC of sender in RFC 3550 [6.4.1]. + absl::optional remote_ssrc; + + absl::optional remote_rtcp; +}; + +// VoipStatistics interface provides the interfaces for querying metrics around +// the jitter buffer (NetEq) performance. +class VoipStatistics { + public: + // Gets the audio ingress statistics by |ingress_stats| reference. + // Returns following VoipResult; + // kOk - successfully set provided IngressStatistics reference. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult GetIngressStatistics(ChannelId channel_id, + IngressStatistics& ingress_stats) = 0; + + // Gets the channel statistics by |channel_stats| reference. + // Returns following VoipResult; + // kOk - successfully set provided ChannelStatistics reference. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult GetChannelStatistics(ChannelId channel_id, + ChannelStatistics& channel_stats) = 0; + + protected: + virtual ~VoipStatistics() = default; +}; + +} // namespace webrtc + +#endif // API_VOIP_VOIP_STATISTICS_H_ diff --git a/api/voip/voip_volume_control.h b/api/voip/voip_volume_control.h new file mode 100644 index 0000000000..d91eabc5a9 --- /dev/null +++ b/api/voip/voip_volume_control.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef API_VOIP_VOIP_VOLUME_CONTROL_H_ +#define API_VOIP_VOIP_VOLUME_CONTROL_H_ + +#include "api/voip/voip_base.h" + +namespace webrtc { + +struct VolumeInfo { + // https://w3c.github.io/webrtc-stats/#dom-rtcaudiosourcestats-audiolevel + double audio_level = 0; + // https://w3c.github.io/webrtc-stats/#dom-rtcaudiosourcestats-totalaudioenergy + double total_energy = 0.0; + // https://w3c.github.io/webrtc-stats/#dom-rtcaudiosourcestats-totalsamplesduration + double total_duration = 0.0; +}; + +// VoipVolumeControl interface. +// +// This sub-API supports functions related to the input (microphone) and output +// (speaker) device. +// +// Caller must ensure that ChannelId is valid otherwise it will result in no-op +// with error logging. +class VoipVolumeControl { + public: + // Mute/unmutes the microphone input sample before encoding process. Note that + // mute doesn't affect audio input level and energy values as input sample is + // silenced after the measurement. + // Returns following VoipResult; + // kOk - input source muted or unmuted as provided by |enable|. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult SetInputMuted(ChannelId channel_id, bool enable) = 0; + + // Gets the microphone volume info via |volume_info| reference. + // Returns following VoipResult; + // kOk - successfully set provided input volume info. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult GetInputVolumeInfo(ChannelId channel_id, + VolumeInfo& volume_info) = 0; + + // Gets the speaker volume info via |volume_info| reference. + // Returns following VoipResult; + // kOk - successfully set provided output volume info. + // kInvalidArgument - |channel_id| is invalid. + virtual VoipResult GetOutputVolumeInfo(ChannelId channel_id, + VolumeInfo& volume_info) = 0; + + protected: + virtual ~VoipVolumeControl() = default; +}; + +} // namespace webrtc + +#endif // API_VOIP_VOIP_VOLUME_CONTROL_H_ diff --git a/audio/BUILD.gn b/audio/BUILD.gn index 725128bb1a..200f9f4038 100644 --- a/audio/BUILD.gn +++ b/audio/BUILD.gn @@ -47,9 +47,11 @@ rtc_library("audio") { "../api:rtp_headers", "../api:rtp_parameters", "../api:scoped_refptr", + "../api:sequence_checker", "../api:transport_api", "../api/audio:aec3_factory", "../api/audio:audio_frame_api", + "../api/audio:audio_frame_processor", "../api/audio:audio_mixer_api", "../api/audio_codecs:audio_codecs_api", "../api/crypto:frame_decryptor_interface", @@ -67,17 +69,18 @@ rtc_library("audio") { "../common_audio:common_audio_c", "../logging:rtc_event_audio", "../logging:rtc_stream_config", + "../modules/async_audio_processing", "../modules/audio_coding", "../modules/audio_coding:audio_coding_module_typedefs", "../modules/audio_coding:audio_encoder_cng", "../modules/audio_coding:audio_network_adaptor_config", + "../modules/audio_coding:red", "../modules/audio_device", "../modules/audio_processing", "../modules/audio_processing:api", "../modules/audio_processing:audio_frame_proxies", "../modules/audio_processing:rms_level", "../modules/pacing", - "../modules/remote_bitrate_estimator", "../modules/rtp_rtcp", "../modules/rtp_rtcp:rtp_rtcp_format", "../modules/utility", @@ -88,13 +91,18 @@ rtc_library("audio") { "../rtc_base:rtc_base_approved", "../rtc_base:rtc_task_queue", "../rtc_base:safe_minmax", + "../rtc_base:threading", "../rtc_base/experiments:field_trial_parser", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:no_unique_address", + "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:to_queued_task", "../system_wrappers", "../system_wrappers:field_trial", "../system_wrappers:metrics", "utility:audio_frame_operations", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", ] @@ -132,6 +140,7 @@ if (rtc_include_tests) { "mock_voe_channel_proxy.h", "remix_resample_unittest.cc", "test/audio_stats_test.cc", + "test/nack_test.cc", ] deps = [ ":audio", @@ -144,6 +153,7 @@ if (rtc_include_tests) { "../api/audio_codecs:audio_codecs_api", "../api/audio_codecs/opus:audio_decoder_opus", "../api/audio_codecs/opus:audio_encoder_opus", + "../api/crypto:frame_decryptor_interface", "../api/rtc_event_log", "../api/task_queue:default_task_queue_factory", "../api/units:time_delta", @@ -185,7 +195,7 @@ if (rtc_include_tests) { ] } - if (rtc_enable_protobuf) { + if (rtc_enable_protobuf && !build_with_chromium) { rtc_test("low_bandwidth_audio_test") { testonly = true @@ -202,6 +212,7 @@ if (rtc_include_tests) { "../api:network_emulation_manager_api", "../api:peer_connection_quality_test_fixture_api", "../api:simulated_network_api", + "../api:time_controller", "../call:simulated_network", "../common_audio", "../system_wrappers", @@ -212,8 +223,8 @@ if (rtc_include_tests) { "../test:test_support", "../test/pc/e2e:network_quality_metrics_reporter", "//testing/gtest", - "//third_party/abseil-cpp/absl/flags:flag", ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag" ] if (is_android) { deps += [ "//testing/android/native_test:native_test_native_code" ] } @@ -249,7 +260,7 @@ if (rtc_include_tests) { data += [ "${root_out_dir}/low_bandwidth_audio_test" ] } - if (is_linux || is_android) { + if (is_linux || is_chromeos || is_android) { data += [ "../tools_webrtc/audio_quality/linux/PolqaOem64", "../tools_webrtc/audio_quality/linux/pesq", @@ -271,30 +282,32 @@ if (rtc_include_tests) { } } - rtc_library("audio_perf_tests") { - testonly = true + if (!build_with_chromium) { + rtc_library("audio_perf_tests") { + testonly = true - sources = [ - "test/audio_bwe_integration_test.cc", - "test/audio_bwe_integration_test.h", - ] - deps = [ - "../api:simulated_network_api", - "../api/task_queue", - "../call:fake_network", - "../call:simulated_network", - "../common_audio", - "../rtc_base:rtc_base_approved", - "../rtc_base:task_queue_for_test", - "../system_wrappers", - "../test:field_trial", - "../test:fileutils", - "../test:test_common", - "../test:test_main", - "../test:test_support", - "//testing/gtest", - ] + sources = [ + "test/audio_bwe_integration_test.cc", + "test/audio_bwe_integration_test.h", + ] + deps = [ + "../api:simulated_network_api", + "../api/task_queue", + "../call:fake_network", + "../call:simulated_network", + "../common_audio", + "../rtc_base:rtc_base_approved", + "../rtc_base:task_queue_for_test", + "../system_wrappers", + "../test:field_trial", + "../test:fileutils", + "../test:test_common", + "../test:test_main", + "../test:test_support", + "//testing/gtest", + ] - data = [ "//resources/voice_engine/audio_dtx16.wav" ] + data = [ "//resources/voice_engine/audio_dtx16.wav" ] + } } } diff --git a/audio/DEPS b/audio/DEPS index 8bb1f80805..9b89dc39ab 100644 --- a/audio/DEPS +++ b/audio/DEPS @@ -2,6 +2,7 @@ include_rules = [ "+call", "+common_audio", "+logging/rtc_event_log", + "+modules/async_audio_processing", "+modules/audio_coding", "+modules/audio_device", "+modules/audio_mixer", @@ -10,7 +11,6 @@ include_rules = [ "+modules/bitrate_controller", "+modules/congestion_controller", "+modules/pacing", - "+modules/remote_bitrate_estimator", "+modules/rtp_rtcp", "+modules/utility", "+system_wrappers", diff --git a/audio/audio_level.cc b/audio/audio_level.cc index 06702b4c0d..7874b73f1c 100644 --- a/audio/audio_level.cc +++ b/audio/audio_level.cc @@ -22,7 +22,7 @@ AudioLevel::AudioLevel() AudioLevel::~AudioLevel() {} void AudioLevel::Reset() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); abs_max_ = 0; count_ = 0; current_level_full_range_ = 0; @@ -31,24 +31,24 @@ void AudioLevel::Reset() { } int16_t AudioLevel::LevelFullRange() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return current_level_full_range_; } void AudioLevel::ResetLevelFullRange() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); abs_max_ = 0; count_ = 0; current_level_full_range_ = 0; } double AudioLevel::TotalEnergy() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return total_energy_; } double AudioLevel::TotalDuration() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return total_duration_; } @@ -63,7 +63,7 @@ void AudioLevel::ComputeLevel(const AudioFrame& audioFrame, double duration) { // Protect member access using a lock since this method is called on a // dedicated audio thread in the RecordedDataIsAvailable() callback. - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); if (abs_value > abs_max_) abs_max_ = abs_value; diff --git a/audio/audio_level.h b/audio/audio_level.h index 430edb1703..acd1231fe2 100644 --- a/audio/audio_level.h +++ b/audio/audio_level.h @@ -11,7 +11,7 @@ #ifndef AUDIO_AUDIO_LEVEL_H_ #define AUDIO_AUDIO_LEVEL_H_ -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -59,14 +59,14 @@ class AudioLevel { private: enum { kUpdateFrequency = 10 }; - rtc::CriticalSection crit_sect_; + mutable Mutex mutex_; - int16_t abs_max_ RTC_GUARDED_BY(crit_sect_); - int16_t count_ RTC_GUARDED_BY(crit_sect_); - int16_t current_level_full_range_ RTC_GUARDED_BY(crit_sect_); + int16_t abs_max_ RTC_GUARDED_BY(mutex_); + int16_t count_ RTC_GUARDED_BY(mutex_); + int16_t current_level_full_range_ RTC_GUARDED_BY(mutex_); - double total_energy_ RTC_GUARDED_BY(crit_sect_) = 0.0; - double total_duration_ RTC_GUARDED_BY(crit_sect_) = 0.0; + double total_energy_ RTC_GUARDED_BY(mutex_) = 0.0; + double total_duration_ RTC_GUARDED_BY(mutex_) = 0.0; }; } // namespace voe diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc index 6bc0d4137e..f243fa67db 100644 --- a/audio/audio_receive_stream.cc +++ b/audio/audio_receive_stream.cc @@ -18,12 +18,14 @@ #include "api/audio_codecs/audio_format.h" #include "api/call/audio_sink.h" #include "api/rtp_parameters.h" +#include "api/sequence_checker.h" #include "audio/audio_send_stream.h" #include "audio/audio_state.h" #include "audio/channel_receive.h" #include "audio/conversion.h" #include "call/rtp_config.h" #include "call/rtp_stream_receiver_controller_interface.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" @@ -68,7 +70,6 @@ namespace { std::unique_ptr CreateChannelReceive( Clock* clock, webrtc::AudioState* audio_state, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, const webrtc::AudioReceiveStream::Config& config, RtcEventLog* event_log) { @@ -76,67 +77,70 @@ std::unique_ptr CreateChannelReceive( internal::AudioState* internal_audio_state = static_cast(audio_state); return voe::CreateChannelReceive( - clock, module_process_thread, neteq_factory, - internal_audio_state->audio_device_module(), config.rtcp_send_transport, - event_log, config.rtp.local_ssrc, config.rtp.remote_ssrc, - config.jitter_buffer_max_packets, config.jitter_buffer_fast_accelerate, - config.jitter_buffer_min_delay_ms, + clock, neteq_factory, internal_audio_state->audio_device_module(), + config.rtcp_send_transport, event_log, config.rtp.local_ssrc, + config.rtp.remote_ssrc, config.jitter_buffer_max_packets, + config.jitter_buffer_fast_accelerate, config.jitter_buffer_min_delay_ms, config.jitter_buffer_enable_rtx_handling, config.decoder_factory, - config.codec_pair_id, config.frame_decryptor, config.crypto_options, - std::move(config.frame_transformer)); + config.codec_pair_id, std::move(config.frame_decryptor), + config.crypto_options, std::move(config.frame_transformer)); } } // namespace AudioReceiveStream::AudioReceiveStream( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, webrtc::RtcEventLog* event_log) : AudioReceiveStream(clock, - receiver_controller, packet_router, config, audio_state, event_log, CreateChannelReceive(clock, audio_state.get(), - module_process_thread, neteq_factory, config, event_log)) {} AudioReceiveStream::AudioReceiveStream( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, webrtc::RtcEventLog* event_log, std::unique_ptr channel_receive) - : audio_state_(audio_state), - channel_receive_(std::move(channel_receive)), - source_tracker_(clock) { + : config_(config), + audio_state_(audio_state), + source_tracker_(clock), + channel_receive_(std::move(channel_receive)) { RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc; RTC_DCHECK(config.decoder_factory); RTC_DCHECK(config.rtcp_send_transport); RTC_DCHECK(audio_state_); RTC_DCHECK(channel_receive_); - module_process_thread_checker_.Detach(); + packet_sequence_checker_.Detach(); - RTC_DCHECK(receiver_controller); RTC_DCHECK(packet_router); // Configure bandwidth estimation. channel_receive_->RegisterReceiverCongestionControlObjects(packet_router); - // Register with transport. - rtp_stream_receiver_ = receiver_controller->CreateReceiver( - config.rtp.remote_ssrc, channel_receive_.get()); - ConfigureStream(this, config, true); + // When output is muted, ChannelReceive will directly notify the source + // tracker of "delivered" frames, so RtpReceiver information will continue to + // be updated. + channel_receive_->SetSourceTracker(&source_tracker_); + + // Complete configuration. + // TODO(solenberg): Config NACK history window (which is a packet count), + // using the actual packet size for the configured codec. + channel_receive_->SetNACKStatus(config.rtp.nack.rtp_history_ms != 0, + config.rtp.nack.rtp_history_ms / 20); + channel_receive_->SetReceiveCodecs(config.decoder_map); + // `frame_transformer` and `frame_decryptor` have been given to + // `channel_receive_` already. } AudioReceiveStream::~AudioReceiveStream() { @@ -147,10 +151,43 @@ AudioReceiveStream::~AudioReceiveStream() { channel_receive_->ResetReceiverCongestionControlObjects(); } -void AudioReceiveStream::Reconfigure( +void AudioReceiveStream::RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK(!rtp_stream_receiver_); + rtp_stream_receiver_ = receiver_controller->CreateReceiver( + config_.rtp.remote_ssrc, channel_receive_.get()); +} + +void AudioReceiveStream::UnregisterFromTransport() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_stream_receiver_.reset(); +} + +void AudioReceiveStream::ReconfigureForTesting( const webrtc::AudioReceiveStream::Config& config) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - ConfigureStream(this, config, false); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + + // SSRC can't be changed mid-stream. + RTC_DCHECK_EQ(config_.rtp.remote_ssrc, config.rtp.remote_ssrc); + RTC_DCHECK_EQ(config_.rtp.local_ssrc, config.rtp.local_ssrc); + + // Configuration parameters which cannot be changed. + RTC_DCHECK_EQ(config_.rtcp_send_transport, config.rtcp_send_transport); + // Decoder factory cannot be changed because it is configured at + // voe::Channel construction time. + RTC_DCHECK_EQ(config_.decoder_factory, config.decoder_factory); + + // TODO(solenberg): Config NACK history window (which is a packet count), + // using the actual packet size for the configured codec. + RTC_DCHECK_EQ(config_.rtp.nack.rtp_history_ms, config.rtp.nack.rtp_history_ms) + << "Use SetUseTransportCcAndNackHistory"; + + RTC_DCHECK(config_.decoder_map == config.decoder_map) << "Use SetDecoderMap"; + RTC_DCHECK_EQ(config_.frame_transformer, config.frame_transformer) + << "Use SetDepacketizerToDecoderFrameTransformer"; + + config_ = config; } void AudioReceiveStream::Start() { @@ -173,7 +210,56 @@ void AudioReceiveStream::Stop() { audio_state()->RemoveReceivingStream(this); } -webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const { +bool AudioReceiveStream::IsRunning() const { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + return playing_; +} + +void AudioReceiveStream::SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr frame_transformer) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + channel_receive_->SetDepacketizerToDecoderFrameTransformer( + std::move(frame_transformer)); +} + +void AudioReceiveStream::SetDecoderMap( + std::map decoder_map) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + config_.decoder_map = std::move(decoder_map); + channel_receive_->SetReceiveCodecs(config_.decoder_map); +} + +void AudioReceiveStream::SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_GE(history_ms, 0); + config_.rtp.transport_cc = use_transport_cc; + if (config_.rtp.nack.rtp_history_ms != history_ms) { + config_.rtp.nack.rtp_history_ms = history_ms; + // TODO(solenberg): Config NACK history window (which is a packet count), + // using the actual packet size for the configured codec. + channel_receive_->SetNACKStatus(history_ms != 0, history_ms / 20); + } +} + +void AudioReceiveStream::SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) { + // TODO(bugs.webrtc.org/11993): This is called via WebRtcAudioReceiveStream, + // expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + channel_receive_->SetFrameDecryptor(std::move(frame_decryptor)); +} + +void AudioReceiveStream::SetRtpExtensions( + std::vector extensions) { + // TODO(bugs.webrtc.org/11993): This is called via WebRtcAudioReceiveStream, + // expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + config_.rtp.extensions = std::move(extensions); +} + +webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats( + bool get_and_clear_legacy_stats) const { RTC_DCHECK_RUN_ON(&worker_thread_checker_); webrtc::AudioReceiveStream::Stats stats; stats.remote_ssrc = config_.rtp.remote_ssrc; @@ -192,6 +278,7 @@ webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const { call_stats.header_and_padding_bytes_rcvd; stats.packets_rcvd = call_stats.packetsReceived; stats.packets_lost = call_stats.cumulativeLost; + stats.nacks_sent = call_stats.nacks_sent; stats.capture_start_ntp_time_ms = call_stats.capture_start_ntp_time_ms_; stats.last_packet_received_timestamp_ms = call_stats.last_packet_received_timestamp_ms; @@ -210,7 +297,7 @@ webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const { rtc::TimeMillis()); // Get jitter buffer and total delay (alg + jitter + playout) stats. - auto ns = channel_receive_->GetNetworkStatistics(); + auto ns = channel_receive_->GetNetworkStatistics(get_and_clear_legacy_stats); stats.fec_packets_received = ns.fecPacketsReceived; stats.fec_packets_discarded = ns.fecPacketsDiscarded; stats.jitter_buffer_ms = ns.currentBufferSize; @@ -252,6 +339,14 @@ webrtc::AudioReceiveStream::Stats AudioReceiveStream::GetStats() const { stats.decoding_plc_cng = ds.decoded_plc_cng; stats.decoding_muted_output = ds.decoded_muted_output; + stats.last_sender_report_timestamp_ms = + call_stats.last_sender_report_timestamp_ms; + stats.last_sender_report_remote_timestamp_ms = + call_stats.last_sender_report_remote_timestamp_ms; + stats.sender_reports_packets_sent = call_stats.sender_reports_packets_sent; + stats.sender_reports_bytes_sent = call_stats.sender_reports_bytes_sent; + stats.sender_reports_reports_count = call_stats.sender_reports_reports_count; + return stats; } @@ -305,14 +400,10 @@ uint32_t AudioReceiveStream::id() const { } absl::optional AudioReceiveStream::GetInfo() const { - RTC_DCHECK_RUN_ON(&module_process_thread_checker_); - absl::optional info = channel_receive_->GetSyncInfo(); - - if (!info) - return absl::nullopt; - - info->current_delay_ms = channel_receive_->GetDelayEstimate(); - return info; + // TODO(bugs.webrtc.org/11993): This is called via RtpStreamsSynchronizer, + // expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + return channel_receive_->GetSyncInfo(); } bool AudioReceiveStream::GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp, @@ -329,13 +420,15 @@ void AudioReceiveStream::SetEstimatedPlayoutNtpTimestampMs( time_ms); } -void AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) { - RTC_DCHECK_RUN_ON(&module_process_thread_checker_); +bool AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) { + // TODO(bugs.webrtc.org/11993): This is called via RtpStreamsSynchronizer, + // expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); return channel_receive_->SetMinimumPlayoutDelay(delay_ms); } void AudioReceiveStream::AssociateSendStream(AudioSendStream* send_stream) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); channel_receive_->SetAssociatedSendChannel( send_stream ? send_stream->GetChannel() : nullptr); associated_send_stream_ = send_stream; @@ -349,12 +442,22 @@ void AudioReceiveStream::DeliverRtcp(const uint8_t* packet, size_t length) { channel_receive_->ReceivedRTCPPacket(packet, length); } -void AudioReceiveStream::OnRtpPacket(const RtpPacketReceived& packet) { - // TODO(solenberg): Tests call this function on a network thread, libjingle - // calls on the worker thread. We should move towards always using a network - // thread. Then this check can be enabled. - // RTC_DCHECK(!thread_checker_.IsCurrent()); - channel_receive_->OnRtpPacket(packet); +void AudioReceiveStream::SetSyncGroup(const std::string& sync_group) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + config_.sync_group = sync_group; +} + +void AudioReceiveStream::SetLocalSsrc(uint32_t local_ssrc) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + // TODO(tommi): Consider storing local_ssrc in one place. + config_.rtp.local_ssrc = local_ssrc; + channel_receive_->OnLocalSsrcChange(local_ssrc); +} + +uint32_t AudioReceiveStream::local_ssrc() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK_EQ(config_.rtp.local_ssrc, channel_receive_->GetLocalSsrc()); + return config_.rtp.local_ssrc; } const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const { @@ -364,7 +467,7 @@ const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const { const AudioSendStream* AudioReceiveStream::GetAssociatedSendStreamForTesting() const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); return associated_send_stream_; } @@ -373,50 +476,5 @@ internal::AudioState* AudioReceiveStream::audio_state() const { RTC_DCHECK(audio_state); return audio_state; } - -void AudioReceiveStream::ConfigureStream(AudioReceiveStream* stream, - const Config& new_config, - bool first_time) { - RTC_LOG(LS_INFO) << "AudioReceiveStream::ConfigureStream: " - << new_config.ToString(); - RTC_DCHECK(stream); - const auto& channel_receive = stream->channel_receive_; - const auto& old_config = stream->config_; - - // Configuration parameters which cannot be changed. - RTC_DCHECK(first_time || - old_config.rtp.remote_ssrc == new_config.rtp.remote_ssrc); - RTC_DCHECK(first_time || - old_config.rtcp_send_transport == new_config.rtcp_send_transport); - // Decoder factory cannot be changed because it is configured at - // voe::Channel construction time. - RTC_DCHECK(first_time || - old_config.decoder_factory == new_config.decoder_factory); - - if (!first_time) { - // SSRC can't be changed mid-stream. - RTC_DCHECK_EQ(old_config.rtp.local_ssrc, new_config.rtp.local_ssrc); - RTC_DCHECK_EQ(old_config.rtp.remote_ssrc, new_config.rtp.remote_ssrc); - } - - // TODO(solenberg): Config NACK history window (which is a packet count), - // using the actual packet size for the configured codec. - if (first_time || old_config.rtp.nack.rtp_history_ms != - new_config.rtp.nack.rtp_history_ms) { - channel_receive->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0, - new_config.rtp.nack.rtp_history_ms / 20); - } - if (first_time || old_config.decoder_map != new_config.decoder_map) { - channel_receive->SetReceiveCodecs(new_config.decoder_map); - } - - if (first_time || - old_config.frame_transformer != new_config.frame_transformer) { - channel_receive->SetDepacketizerToDecoderFrameTransformer( - new_config.frame_transformer); - } - - stream->config_ = new_config; -} } // namespace internal } // namespace webrtc diff --git a/audio/audio_receive_stream.h b/audio/audio_receive_stream.h index c197aa8833..61ebc2719f 100644 --- a/audio/audio_receive_stream.h +++ b/audio/audio_receive_stream.h @@ -11,18 +11,20 @@ #ifndef AUDIO_AUDIO_RECEIVE_STREAM_H_ #define AUDIO_AUDIO_RECEIVE_STREAM_H_ +#include #include +#include #include #include "api/audio/audio_mixer.h" #include "api/neteq/neteq_factory.h" #include "api/rtp_headers.h" +#include "api/sequence_checker.h" #include "audio/audio_state.h" #include "call/audio_receive_stream.h" #include "call/syncable.h" #include "modules/rtp_rtcp/source/source_tracker.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -45,9 +47,7 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, public Syncable { public: AudioReceiveStream(Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, @@ -55,31 +55,55 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, // For unit tests, which need to supply a mock channel receive. AudioReceiveStream( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, PacketRouter* packet_router, const webrtc::AudioReceiveStream::Config& config, const rtc::scoped_refptr& audio_state, webrtc::RtcEventLog* event_log, std::unique_ptr channel_receive); + + AudioReceiveStream() = delete; + AudioReceiveStream(const AudioReceiveStream&) = delete; + AudioReceiveStream& operator=(const AudioReceiveStream&) = delete; + + // Destruction happens on the worker thread. Prior to destruction the caller + // must ensure that a registration with the transport has been cleared. See + // `RegisterWithTransport` for details. + // TODO(tommi): As a further improvement to this, performing the full + // destruction on the network thread could be made the default. ~AudioReceiveStream() override; + // Called on the network thread to register/unregister with the network + // transport. + void RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller); + // If registration has previously been done (via `RegisterWithTransport`) then + // `UnregisterFromTransport` must be called prior to destruction, on the + // network thread. + void UnregisterFromTransport(); + // webrtc::AudioReceiveStream implementation. - void Reconfigure(const webrtc::AudioReceiveStream::Config& config) override; void Start() override; void Stop() override; - webrtc::AudioReceiveStream::Stats GetStats() const override; + const RtpConfig& rtp_config() const override { return config_.rtp; } + bool IsRunning() const override; + void SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr frame_transformer) + override; + void SetDecoderMap(std::map decoder_map) override; + void SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) override; + void SetFrameDecryptor(rtc::scoped_refptr + frame_decryptor) override; + void SetRtpExtensions(std::vector extensions) override; + + webrtc::AudioReceiveStream::Stats GetStats( + bool get_and_clear_legacy_stats) const override; void SetSink(AudioSinkInterface* sink) override; void SetGain(float gain) override; bool SetBaseMinimumPlayoutDelayMs(int delay_ms) override; int GetBaseMinimumPlayoutDelayMs() const override; std::vector GetSources() const override; - // TODO(nisse): We don't formally implement RtpPacketSinkInterface, and this - // method shouldn't be needed. But it's currently used by the - // AudioReceiveStreamTest.ReceiveRtpPacket unittest. Figure out if that test - // shuld be refactored or deleted, and then delete this method. - void OnRtpPacket(const RtpPacketReceived& packet); - // AudioMixer::Source AudioFrameInfo GetAudioFrameWithInfo(int sample_rate_hz, AudioFrame* audio_frame) override; @@ -93,33 +117,52 @@ class AudioReceiveStream final : public webrtc::AudioReceiveStream, int64_t* time_ms) const override; void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, int64_t time_ms) override; - void SetMinimumPlayoutDelay(int delay_ms) override; + bool SetMinimumPlayoutDelay(int delay_ms) override; void AssociateSendStream(AudioSendStream* send_stream); void DeliverRtcp(const uint8_t* packet, size_t length); + + void SetSyncGroup(const std::string& sync_group); + + void SetLocalSsrc(uint32_t local_ssrc); + + uint32_t local_ssrc() const; + + uint32_t remote_ssrc() const { + // The remote_ssrc member variable of config_ will never change and can be + // considered const. + return config_.rtp.remote_ssrc; + } + const webrtc::AudioReceiveStream::Config& config() const; const AudioSendStream* GetAssociatedSendStreamForTesting() const; - private: - static void ConfigureStream(AudioReceiveStream* stream, - const Config& new_config, - bool first_time); + // TODO(tommi): Remove this method. + void ReconfigureForTesting(const webrtc::AudioReceiveStream::Config& config); + private: AudioState* audio_state() const; - rtc::ThreadChecker worker_thread_checker_; - rtc::ThreadChecker module_process_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_; + // TODO(bugs.webrtc.org/11993): This checker conceptually represents + // operations that belong to the network thread. The Call class is currently + // moving towards handling network packets on the network thread and while + // that work is ongoing, this checker may in practice represent the worker + // thread, but still serves as a mechanism of grouping together concepts + // that belong to the network thread. Once the packets are fully delivered + // on the network thread, this comment will be deleted. + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; webrtc::AudioReceiveStream::Config config_; rtc::scoped_refptr audio_state_; - const std::unique_ptr channel_receive_; SourceTracker source_tracker_; - AudioSendStream* associated_send_stream_ = nullptr; + const std::unique_ptr channel_receive_; + AudioSendStream* associated_send_stream_ + RTC_GUARDED_BY(packet_sequence_checker_) = nullptr; bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false; - std::unique_ptr rtp_stream_receiver_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioReceiveStream); + std::unique_ptr rtp_stream_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); }; } // namespace internal } // namespace webrtc diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc index 7759dd1e72..fb5f1cb876 100644 --- a/audio/audio_receive_stream_unittest.cc +++ b/audio/audio_receive_stream_unittest.cc @@ -36,6 +36,7 @@ namespace { using ::testing::_; using ::testing::FloatEq; +using ::testing::NiceMock; using ::testing::Return; AudioDecodingCallStats MakeAudioDecodeStatsForTest() { @@ -53,8 +54,6 @@ AudioDecodingCallStats MakeAudioDecodeStatsForTest() { const uint32_t kRemoteSsrc = 1234; const uint32_t kLocalSsrc = 5678; -const size_t kOneByteExtensionHeaderLength = 4; -const size_t kOneByteExtensionLength = 4; const int kAudioLevelId = 3; const int kTransportSequenceNumberId = 4; const int kJitterBufferDelay = -7; @@ -69,14 +68,13 @@ const std::pair kReceiveCodec = { 123, {"codec_name_recv", 96000, 0}}; const NetworkStatistics kNetworkStats = { - 123, 456, false, 789012, 3456, 123, 456, 789, 543, 123, - 432, 321, 123, 101, 0, {}, 789, 12, 345, 678, - 901, 0, -1, -1, -1, -1, 0, 0, 0, 0}; + 123, 456, false, 789012, 3456, 123, 456, 789, 543, 123, 432, 321, 123, + 101, 789, 12, 345, 678, 901, 0, -1, -1, 0, 0, 0, 0}; const AudioDecodingCallStats kAudioDecodeStats = MakeAudioDecodeStatsForTest(); struct ConfigHelper { explicit ConfigHelper(bool use_null_audio_processing) - : ConfigHelper(new rtc::RefCountedObject(), + : ConfigHelper(rtc::make_ref_counted(), use_null_audio_processing) {} ConfigHelper(rtc::scoped_refptr audio_mixer, @@ -89,9 +87,9 @@ struct ConfigHelper { config.audio_processing = use_null_audio_processing ? nullptr - : new rtc::RefCountedObject(); + : rtc::make_ref_counted>(); config.audio_device_module = - new rtc::RefCountedObject>(); + rtc::make_ref_counted>(); audio_state_ = AudioState::Create(config); channel_receive_ = new ::testing::StrictMock(); @@ -106,8 +104,7 @@ struct ConfigHelper { .WillRepeatedly(Invoke([](const std::map& codecs) { EXPECT_THAT(codecs, ::testing::IsEmpty()); })); - EXPECT_CALL(*channel_receive_, SetDepacketizerToDecoderFrameTransformer(_)) - .Times(1); + EXPECT_CALL(*channel_receive_, SetSourceTracker(_)); stream_config_.rtp.local_ssrc = kLocalSsrc; stream_config_.rtp.remote_ssrc = kRemoteSsrc; @@ -118,15 +115,16 @@ struct ConfigHelper { RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId)); stream_config_.rtcp_send_transport = &rtcp_send_transport_; stream_config_.decoder_factory = - new rtc::RefCountedObject; + rtc::make_ref_counted(); } std::unique_ptr CreateAudioReceiveStream() { - return std::unique_ptr( - new internal::AudioReceiveStream( - Clock::GetRealTimeClock(), &rtp_stream_receiver_controller_, - &packet_router_, stream_config_, audio_state_, &event_log_, - std::unique_ptr(channel_receive_))); + auto ret = std::make_unique( + Clock::GetRealTimeClock(), &packet_router_, stream_config_, + audio_state_, &event_log_, + std::unique_ptr(channel_receive_)); + ret->RegisterWithTransport(&rtp_stream_receiver_controller_); + return ret; } AudioReceiveStream::Config& config() { return stream_config_; } @@ -148,7 +146,7 @@ struct ConfigHelper { .WillOnce(Return(kTotalOutputEnergy)); EXPECT_CALL(*channel_receive_, GetTotalOutputDuration()) .WillOnce(Return(kTotalOutputDuration)); - EXPECT_CALL(*channel_receive_, GetNetworkStatistics()) + EXPECT_CALL(*channel_receive_, GetNetworkStatistics(_)) .WillOnce(Return(kNetworkStats)); EXPECT_CALL(*channel_receive_, GetDecodingCallStatistics()) .WillOnce(Return(kAudioDecodeStats)); @@ -169,45 +167,6 @@ struct ConfigHelper { MockTransport rtcp_send_transport_; }; -void BuildOneByteExtension(std::vector::iterator it, - int id, - uint32_t extension_value, - size_t value_length) { - const uint16_t kRtpOneByteHeaderExtensionId = 0xBEDE; - ByteWriter::WriteBigEndian(&(*it), kRtpOneByteHeaderExtensionId); - it += 2; - - ByteWriter::WriteBigEndian(&(*it), kOneByteExtensionLength / 4); - it += 2; - const size_t kExtensionDataLength = kOneByteExtensionLength - 1; - uint32_t shifted_value = extension_value - << (8 * (kExtensionDataLength - value_length)); - *it = (id << 4) + (static_cast(value_length) - 1); - ++it; - ByteWriter::WriteBigEndian(&(*it), - shifted_value); -} - -const std::vector CreateRtpHeaderWithOneByteExtension( - int extension_id, - uint32_t extension_value, - size_t value_length) { - std::vector header; - header.resize(webrtc::kRtpHeaderSize + kOneByteExtensionHeaderLength + - kOneByteExtensionLength); - header[0] = 0x80; // Version 2. - header[0] |= 0x10; // Set extension bit. - header[1] = 100; // Payload type. - header[1] |= 0x80; // Marker bit is set. - ByteWriter::WriteBigEndian(&header[2], 0x1234); // Sequence number. - ByteWriter::WriteBigEndian(&header[4], 0x5678); // Timestamp. - ByteWriter::WriteBigEndian(&header[8], 0x4321); // SSRC. - - BuildOneByteExtension(header.begin() + webrtc::kRtpHeaderSize, extension_id, - extension_value, value_length); - return header; -} - const std::vector CreateRtcpSenderReport() { std::vector packet; const size_t kRtcpSrLength = 28; // In bytes. @@ -239,27 +198,7 @@ TEST(AudioReceiveStreamTest, ConstructDestruct) { for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(use_null_audio_processing); auto recv_stream = helper.CreateAudioReceiveStream(); - } -} - -TEST(AudioReceiveStreamTest, ReceiveRtpPacket) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - helper.config().rtp.transport_cc = true; - auto recv_stream = helper.CreateAudioReceiveStream(); - const int kTransportSequenceNumberValue = 1234; - std::vector rtp_packet = CreateRtpHeaderWithOneByteExtension( - kTransportSequenceNumberId, kTransportSequenceNumberValue, 2); - constexpr int64_t packet_time_us = 5678000; - - RtpPacketReceived parsed_packet; - ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size())); - parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000); - - EXPECT_CALL(*helper.channel_receive(), - OnRtpPacket(::testing::Ref(parsed_packet))); - - recv_stream->OnRtpPacket(parsed_packet); + recv_stream->UnregisterFromTransport(); } } @@ -273,6 +212,7 @@ TEST(AudioReceiveStreamTest, ReceiveRtcpPacket) { ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size())) .WillOnce(Return()); recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()); + recv_stream->UnregisterFromTransport(); } } @@ -281,7 +221,8 @@ TEST(AudioReceiveStreamTest, GetStats) { ConfigHelper helper(use_null_audio_processing); auto recv_stream = helper.CreateAudioReceiveStream(); helper.SetupMockForGetStats(); - AudioReceiveStream::Stats stats = recv_stream->GetStats(); + AudioReceiveStream::Stats stats = + recv_stream->GetStats(/*get_and_clear_legacy_stats=*/true); EXPECT_EQ(kRemoteSsrc, stats.remote_ssrc); EXPECT_EQ(kCallStats.payload_bytes_rcvd, stats.payload_bytes_rcvd); EXPECT_EQ(kCallStats.header_and_padding_bytes_rcvd, @@ -336,6 +277,7 @@ TEST(AudioReceiveStreamTest, GetStats) { EXPECT_EQ(kCallStats.capture_start_ntp_time_ms_, stats.capture_start_ntp_time_ms); EXPECT_EQ(kPlayoutNtpTimestampMs, stats.estimated_playout_ntp_timestamp_ms); + recv_stream->UnregisterFromTransport(); } } @@ -346,6 +288,7 @@ TEST(AudioReceiveStreamTest, SetGain) { EXPECT_CALL(*helper.channel_receive(), SetChannelOutputVolumeScaling(FloatEq(0.765f))); recv_stream->SetGain(0.765f); + recv_stream->UnregisterFromTransport(); } } @@ -377,14 +320,9 @@ TEST(AudioReceiveStreamTest, StreamsShouldBeAddedToMixerOnceOnStart) { // Stop stream before it is being destructed. recv_stream2->Stop(); - } -} -TEST(AudioReceiveStreamTest, ReconfigureWithSameConfig) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - auto recv_stream = helper.CreateAudioReceiveStream(); - recv_stream->Reconfigure(helper.config()); + recv_stream1->UnregisterFromTransport(); + recv_stream2->UnregisterFromTransport(); } } @@ -394,20 +332,32 @@ TEST(AudioReceiveStreamTest, ReconfigureWithUpdatedConfig) { auto recv_stream = helper.CreateAudioReceiveStream(); auto new_config = helper.config(); - new_config.rtp.nack.rtp_history_ms = 300 + 20; + new_config.rtp.extensions.clear(); new_config.rtp.extensions.push_back( RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId + 1)); new_config.rtp.extensions.push_back( RtpExtension(RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId + 1)); - new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1)); MockChannelReceive& channel_receive = *helper.channel_receive(); - EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1); + + // TODO(tommi, nisse): This applies new extensions to the internal config, + // but there's nothing that actually verifies that the changes take effect. + // In fact Call manages the extensions separately in Call::ReceiveRtpConfig + // and changing this config value (there seem to be a few copies), doesn't + // affect that logic. + recv_stream->ReconfigureForTesting(new_config); + + new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1)); EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map)); + recv_stream->SetDecoderMap(new_config.decoder_map); + + EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1); + recv_stream->SetUseTransportCcAndNackHistory(new_config.rtp.transport_cc, + 300 + 20); - recv_stream->Reconfigure(new_config); + recv_stream->UnregisterFromTransport(); } } @@ -418,17 +368,23 @@ TEST(AudioReceiveStreamTest, ReconfigureWithFrameDecryptor) { auto new_config_0 = helper.config(); rtc::scoped_refptr mock_frame_decryptor_0( - new rtc::RefCountedObject()); + rtc::make_ref_counted()); new_config_0.frame_decryptor = mock_frame_decryptor_0; - recv_stream->Reconfigure(new_config_0); + // TODO(tommi): While this changes the internal config value, it doesn't + // actually change what frame_decryptor is used. WebRtcAudioReceiveStream + // recreates the whole instance in order to change this value. + // So, it's not clear if changing this post initialization needs to be + // supported. + recv_stream->ReconfigureForTesting(new_config_0); auto new_config_1 = helper.config(); rtc::scoped_refptr mock_frame_decryptor_1( - new rtc::RefCountedObject()); + rtc::make_ref_counted()); new_config_1.frame_decryptor = mock_frame_decryptor_1; new_config_1.crypto_options.sframe.require_frame_encryption = true; - recv_stream->Reconfigure(new_config_1); + recv_stream->ReconfigureForTesting(new_config_1); + recv_stream->UnregisterFromTransport(); } } diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc index cdeea1a107..62dd53d337 100644 --- a/audio/audio_send_stream.cc +++ b/audio/audio_send_stream.cc @@ -31,6 +31,7 @@ #include "logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h" #include "logging/rtc_event_log/rtc_stream_config.h" #include "modules/audio_coding/codecs/cng/audio_encoder_cng.h" +#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h" #include "modules/audio_processing/include/audio_processing.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "rtc_base/checks.h" @@ -101,7 +102,6 @@ AudioSendStream::AudioSendStream( const webrtc::AudioSendStream::Config& config, const rtc::scoped_refptr& audio_state, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, RtpTransportControllerSendInterface* rtp_transport, BitrateAllocatorInterface* bitrate_allocator, RtcEventLog* event_log, @@ -115,18 +115,19 @@ AudioSendStream::AudioSendStream( bitrate_allocator, event_log, suspended_rtp_state, - voe::CreateChannelSend(clock, - task_queue_factory, - module_process_thread, - config.send_transport, - rtcp_rtt_stats, - event_log, - config.frame_encryptor, - config.crypto_options, - config.rtp.extmap_allow_mixed, - config.rtcp_report_interval_ms, - config.rtp.ssrc, - config.frame_transformer)) {} + voe::CreateChannelSend( + clock, + task_queue_factory, + config.send_transport, + rtcp_rtt_stats, + event_log, + config.frame_encryptor, + config.crypto_options, + config.rtp.extmap_allow_mixed, + config.rtcp_report_interval_ms, + config.rtp.ssrc, + config.frame_transformer, + rtp_transport->transport_feedback_observer())) {} AudioSendStream::AudioSendStream( Clock* clock, @@ -139,14 +140,13 @@ AudioSendStream::AudioSendStream( const absl::optional& suspended_rtp_state, std::unique_ptr channel_send) : clock_(clock), - worker_queue_(rtp_transport->GetWorkerQueue()), - audio_send_side_bwe_(field_trial::IsEnabled("WebRTC-Audio-SendSideBwe")), + rtp_transport_queue_(rtp_transport->GetWorkerQueue()), allocate_audio_without_feedback_( field_trial::IsEnabled("WebRTC-Audio-ABWENoTWCC")), enable_audio_alr_probing_( !field_trial::IsDisabled("WebRTC-Audio-AlrProbing")), send_side_bwe_with_overhead_( - field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")), + !field_trial::IsDisabled("WebRTC-SendSideBwe-WithOverhead")), config_(Config(/*send_transport=*/nullptr)), audio_state_(audio_state), channel_send_(std::move(channel_send)), @@ -158,7 +158,7 @@ AudioSendStream::AudioSendStream( rtp_rtcp_module_(channel_send_->GetRtpRtcp()), suspended_rtp_state_(suspended_rtp_state) { RTC_LOG(LS_INFO) << "AudioSendStream: " << config.rtp.ssrc; - RTC_DCHECK(worker_queue_); + RTC_DCHECK(rtp_transport_queue_); RTC_DCHECK(audio_state_); RTC_DCHECK(channel_send_); RTC_DCHECK(bitrate_allocator_); @@ -166,31 +166,32 @@ AudioSendStream::AudioSendStream( RTC_DCHECK(rtp_rtcp_module_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); ConfigureStream(config, true); - + UpdateCachedTargetAudioBitrateConstraints(); pacer_thread_checker_.Detach(); } AudioSendStream::~AudioSendStream() { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); RTC_LOG(LS_INFO) << "~AudioSendStream: " << config_.rtp.ssrc; RTC_DCHECK(!sending_); channel_send_->ResetSenderCongestionControlObjects(); // Blocking call to synchronize state with worker queue to ensure that there // are no pending tasks left that keeps references to audio. rtc::Event thread_sync_event; - worker_queue_->PostTask([&] { thread_sync_event.Set(); }); + rtp_transport_queue_->PostTask([&] { thread_sync_event.Set(); }); thread_sync_event.Wait(rtc::Event::kForever); } const webrtc::AudioSendStream::Config& AudioSendStream::GetConfig() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); return config_; } void AudioSendStream::Reconfigure( const webrtc::AudioSendStream::Config& new_config) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); ConfigureStream(new_config, false); } @@ -286,7 +287,7 @@ void AudioSendStream::ConfigureStream( RtcpBandwidthObserver* bandwidth_observer = nullptr; - if (audio_send_side_bwe_ && !allocate_audio_without_feedback_ && + if (!allocate_audio_without_feedback_ && new_ids.transport_sequence_number != 0) { rtp_rtcp_module_->RegisterRtpHeaderExtension( TransportSequenceNumber::kUri, new_ids.transport_sequence_number); @@ -344,25 +345,27 @@ void AudioSendStream::ConfigureStream( // Set currently known overhead (used in ANA, opus only). { - rtc::CritScope cs(&overhead_per_packet_lock_); + MutexLock lock(&overhead_per_packet_lock_); UpdateOverheadForEncoder(); } channel_send_->CallEncoder([this](AudioEncoder* encoder) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); if (!encoder) { return; } - worker_queue_->PostTask( - [this, length_range = encoder->GetFrameLengthRange()] { - RTC_DCHECK_RUN_ON(worker_queue_); - frame_length_range_ = length_range; - }); + frame_length_range_ = encoder->GetFrameLengthRange(); + UpdateCachedTargetAudioBitrateConstraints(); }); if (sending_) { ReconfigureBitrateObserver(new_config); } + config_ = new_config; + if (!first_time) { + UpdateCachedTargetAudioBitrateConstraints(); + } } void AudioSendStream::Start() { @@ -377,13 +380,7 @@ void AudioSendStream::Start() { if (send_side_bwe_with_overhead_) rtp_transport_->IncludeOverheadInPacedSender(); rtp_rtcp_module_->SetAsPartOfAllocation(true); - rtc::Event thread_sync_event; - worker_queue_->PostTask([&] { - RTC_DCHECK_RUN_ON(worker_queue_); - ConfigureBitrateObserver(); - thread_sync_event.Set(); - }); - thread_sync_event.Wait(rtc::Event::kForever); + ConfigureBitrateObserver(); } else { rtp_rtcp_module_->SetAsPartOfAllocation(false); } @@ -394,7 +391,7 @@ void AudioSendStream::Start() { } void AudioSendStream::Stop() { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); if (!sending_) { return; } @@ -419,7 +416,7 @@ void AudioSendStream::SendAudioData(std::unique_ptr audio_frame) { // TODO(https://crbug.com/webrtc/10771): All "media-source" related stats // should move from send-streams to the local audio sources or tracks; a // send-stream should not be required to read the microphone audio levels. - rtc::CritScope cs(&audio_level_lock_); + MutexLock lock(&audio_level_lock_); audio_level_.ComputeLevel(*audio_frame, duration); } channel_send_->ProcessAndEncodeAudio(std::move(audio_frame)); @@ -429,14 +426,14 @@ bool AudioSendStream::SendTelephoneEvent(int payload_type, int payload_frequency, int event, int duration_ms) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); channel_send_->SetSendTelephoneEventPayloadType(payload_type, payload_frequency); return channel_send_->SendTelephoneEventOutband(event, duration_ms); } void AudioSendStream::SetMuted(bool muted) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); channel_send_->SetInputMute(muted); } @@ -446,7 +443,7 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const { webrtc::AudioSendStream::Stats AudioSendStream::GetStats( bool has_remote_tracks) const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); webrtc::AudioSendStream::Stats stats; stats.local_ssrc = config_.rtp.ssrc; stats.target_bitrate_bps = channel_send_->GetBitrate(); @@ -485,7 +482,7 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats( } { - rtc::CritScope cs(&audio_level_lock_); + MutexLock lock(&audio_level_lock_); stats.audio_level = audio_level_.LevelFullRange(); stats.total_input_energy = audio_level_.TotalEnergy(); stats.total_input_duration = audio_level_.TotalDuration(); @@ -501,32 +498,35 @@ webrtc::AudioSendStream::Stats AudioSendStream::GetStats( stats.report_block_datas = std::move(call_stats.report_block_datas); + stats.nacks_rcvd = call_stats.nacks_rcvd; + return stats; } void AudioSendStream::DeliverRtcp(const uint8_t* packet, size_t length) { - // TODO(solenberg): Tests call this function on a network thread, libjingle - // calls on the worker thread. We should move towards always using a network - // thread. Then this check can be enabled. - // RTC_DCHECK(!worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); channel_send_->ReceivedRTCPPacket(packet, length); - worker_queue_->PostTask([&]() { + + { // Poll if overhead has changed, which it can do if ack triggers us to stop // sending mid/rid. - rtc::CritScope cs(&overhead_per_packet_lock_); + MutexLock lock(&overhead_per_packet_lock_); UpdateOverheadForEncoder(); - }); + } + UpdateCachedTargetAudioBitrateConstraints(); } uint32_t AudioSendStream::OnBitrateUpdated(BitrateAllocationUpdate update) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); // Pick a target bitrate between the constraints. Overrules the allocator if // it 1) allocated a bitrate of zero to disable the stream or 2) allocated a // higher than max to allow for e.g. extra FEC. - auto constraints = GetMinMaxBitrateConstraints(); - update.target_bitrate.Clamp(constraints.min, constraints.max); - update.stable_target_bitrate.Clamp(constraints.min, constraints.max); + RTC_DCHECK(cached_constraints_.has_value()); + update.target_bitrate.Clamp(cached_constraints_->min, + cached_constraints_->max); + update.stable_target_bitrate.Clamp(cached_constraints_->min, + cached_constraints_->max); channel_send_->OnBitrateAllocation(update); @@ -537,13 +537,17 @@ uint32_t AudioSendStream::OnBitrateUpdated(BitrateAllocationUpdate update) { void AudioSendStream::SetTransportOverhead( int transport_overhead_per_packet_bytes) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope cs(&overhead_per_packet_lock_); - transport_overhead_per_packet_bytes_ = transport_overhead_per_packet_bytes; - UpdateOverheadForEncoder(); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + { + MutexLock lock(&overhead_per_packet_lock_); + transport_overhead_per_packet_bytes_ = transport_overhead_per_packet_bytes; + UpdateOverheadForEncoder(); + } + UpdateCachedTargetAudioBitrateConstraints(); } void AudioSendStream::UpdateOverheadForEncoder() { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); size_t overhead_per_packet_bytes = GetPerPacketOverheadBytes(); if (overhead_per_packet_ == overhead_per_packet_bytes) { return; @@ -553,24 +557,16 @@ void AudioSendStream::UpdateOverheadForEncoder() { channel_send_->CallEncoder([&](AudioEncoder* encoder) { encoder->OnReceivedOverhead(overhead_per_packet_bytes); }); - auto update_task = [this, overhead_per_packet_bytes] { - RTC_DCHECK_RUN_ON(worker_queue_); - if (total_packet_overhead_bytes_ != overhead_per_packet_bytes) { - total_packet_overhead_bytes_ = overhead_per_packet_bytes; - if (registered_with_allocator_) { - ConfigureBitrateObserver(); - } + if (total_packet_overhead_bytes_ != overhead_per_packet_bytes) { + total_packet_overhead_bytes_ = overhead_per_packet_bytes; + if (registered_with_allocator_) { + ConfigureBitrateObserver(); } - }; - if (worker_queue_->IsCurrent()) { - update_task(); - } else { - worker_queue_->PostTask(update_task); } } size_t AudioSendStream::TestOnlyGetPerPacketOverheadBytes() const { - rtc::CritScope cs(&overhead_per_packet_lock_); + MutexLock lock(&overhead_per_packet_lock_); return GetPerPacketOverheadBytes(); } @@ -603,7 +599,6 @@ const internal::AudioState* AudioSendStream::audio_state() const { void AudioSendStream::StoreEncoderProperties(int sample_rate_hz, size_t num_channels) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); encoder_sample_rate_hz_ = sample_rate_hz; encoder_num_channels_ = num_channels; if (sending_) { @@ -638,15 +633,15 @@ bool AudioSendStream::SetupSendCodec(const Config& new_config) { if (new_config.audio_network_adaptor_config) { if (encoder->EnableAudioNetworkAdaptor( *new_config.audio_network_adaptor_config, event_log_)) { - RTC_DLOG(LS_INFO) << "Audio network adaptor enabled on SSRC " - << new_config.rtp.ssrc; + RTC_LOG(LS_INFO) << "Audio network adaptor enabled on SSRC " + << new_config.rtp.ssrc; } else { - RTC_DLOG(LS_INFO) << "Failed to enable Audio network adaptor on SSRC " - << new_config.rtp.ssrc; + RTC_LOG(LS_INFO) << "Failed to enable Audio network adaptor on SSRC " + << new_config.rtp.ssrc; } } - // Wrap the encoder in a an AudioEncoderCNG, if VAD is enabled. + // Wrap the encoder in an AudioEncoderCNG, if VAD is enabled. if (spec.cng_payload_type) { AudioEncoderCngConfig cng_config; cng_config.num_channels = encoder->NumChannels(); @@ -659,10 +654,18 @@ bool AudioSendStream::SetupSendCodec(const Config& new_config) { new_config.send_codec_spec->format.clockrate_hz); } + // Wrap the encoder in a RED encoder, if RED is enabled. + if (spec.red_payload_type) { + AudioEncoderCopyRed::Config red_config; + red_config.payload_type = *spec.red_payload_type; + red_config.speech_encoder = std::move(encoder); + encoder = std::make_unique(std::move(red_config)); + } + // Set currently known overhead (used in ANA, opus only). // If overhead changes later, it will be updated in UpdateOverheadForEncoder. { - rtc::CritScope cs(&overhead_per_packet_lock_); + MutexLock lock(&overhead_per_packet_lock_); size_t overhead = GetPerPacketOverheadBytes(); if (overhead > 0) { encoder->OnReceivedOverhead(overhead); @@ -726,21 +729,29 @@ void AudioSendStream::ReconfigureANA(const Config& new_config) { return; } if (new_config.audio_network_adaptor_config) { + // This lock needs to be acquired before CallEncoder, since it aquires + // another lock and we need to maintain the same order at all call sites to + // avoid deadlock. + MutexLock lock(&overhead_per_packet_lock_); + size_t overhead = GetPerPacketOverheadBytes(); channel_send_->CallEncoder([&](AudioEncoder* encoder) { if (encoder->EnableAudioNetworkAdaptor( *new_config.audio_network_adaptor_config, event_log_)) { - RTC_DLOG(LS_INFO) << "Audio network adaptor enabled on SSRC " - << new_config.rtp.ssrc; + RTC_LOG(LS_INFO) << "Audio network adaptor enabled on SSRC " + << new_config.rtp.ssrc; + if (overhead > 0) { + encoder->OnReceivedOverhead(overhead); + } } else { - RTC_DLOG(LS_INFO) << "Failed to enable Audio network adaptor on SSRC " - << new_config.rtp.ssrc; + RTC_LOG(LS_INFO) << "Failed to enable Audio network adaptor on SSRC " + << new_config.rtp.ssrc; } }); } else { channel_send_->CallEncoder( [&](AudioEncoder* encoder) { encoder->DisableAudioNetworkAdaptor(); }); - RTC_DLOG(LS_INFO) << "Audio network adaptor disabled on SSRC " - << new_config.rtp.ssrc; + RTC_LOG(LS_INFO) << "Audio network adaptor disabled on SSRC " + << new_config.rtp.ssrc; } } @@ -785,7 +796,6 @@ void AudioSendStream::ReconfigureCNG(const Config& new_config) { void AudioSendStream::ReconfigureBitrateObserver( const webrtc::AudioSendStream::Config& new_config) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); // Since the Config's default is for both of these to be -1, this test will // allow us to configure the bitrate observer if the new config has bitrate // limits set, but would only have us call RemoveBitrateObserver if we were @@ -793,8 +803,7 @@ void AudioSendStream::ReconfigureBitrateObserver( if (config_.min_bitrate_bps == new_config.min_bitrate_bps && config_.max_bitrate_bps == new_config.max_bitrate_bps && config_.bitrate_priority == new_config.bitrate_priority && - (TransportSeqNumId(config_) == TransportSeqNumId(new_config) || - !audio_send_side_bwe_) && + TransportSeqNumId(config_) == TransportSeqNumId(new_config) && config_.audio_network_adaptor_config == new_config.audio_network_adaptor_config) { return; @@ -805,20 +814,13 @@ void AudioSendStream::ReconfigureBitrateObserver( rtp_transport_->AccountForAudioPacketsInPacedSender(true); if (send_side_bwe_with_overhead_) rtp_transport_->IncludeOverheadInPacedSender(); - rtc::Event thread_sync_event; - worker_queue_->PostTask([&] { - RTC_DCHECK_RUN_ON(worker_queue_); - // We may get a callback immediately as the observer is registered, so - // make - // sure the bitrate limits in config_ are up-to-date. - config_.min_bitrate_bps = new_config.min_bitrate_bps; - config_.max_bitrate_bps = new_config.max_bitrate_bps; - - config_.bitrate_priority = new_config.bitrate_priority; - ConfigureBitrateObserver(); - thread_sync_event.Set(); - }); - thread_sync_event.Wait(rtc::Event::kForever); + // We may get a callback immediately as the observer is registered, so + // make sure the bitrate limits in config_ are up-to-date. + config_.min_bitrate_bps = new_config.min_bitrate_bps; + config_.max_bitrate_bps = new_config.max_bitrate_bps; + + config_.bitrate_priority = new_config.bitrate_priority; + ConfigureBitrateObserver(); rtp_rtcp_module_->SetAsPartOfAllocation(true); } else { rtp_transport_->AccountForAudioPacketsInPacedSender(false); @@ -831,6 +833,7 @@ void AudioSendStream::ConfigureBitrateObserver() { // This either updates the current observer or adds a new observer. // TODO(srte): Add overhead compensation here. auto constraints = GetMinMaxBitrateConstraints(); + RTC_DCHECK(constraints.has_value()); DataRate priority_bitrate = allocation_settings_.priority_bitrate; if (send_side_bwe_with_overhead_) { @@ -852,30 +855,41 @@ void AudioSendStream::ConfigureBitrateObserver() { if (allocation_settings_.priority_bitrate_raw) priority_bitrate = *allocation_settings_.priority_bitrate_raw; - bitrate_allocator_->AddObserver( - this, - MediaStreamAllocationConfig{ - constraints.min.bps(), constraints.max.bps(), 0, - priority_bitrate.bps(), true, - allocation_settings_.bitrate_priority.value_or( - config_.bitrate_priority)}); + rtp_transport_queue_->PostTask([this, constraints, priority_bitrate, + config_bitrate_priority = + config_.bitrate_priority] { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + bitrate_allocator_->AddObserver( + this, + MediaStreamAllocationConfig{ + constraints->min.bps(), constraints->max.bps(), + 0, priority_bitrate.bps(), true, + allocation_settings_.bitrate_priority.value_or( + config_bitrate_priority)}); + }); registered_with_allocator_ = true; } void AudioSendStream::RemoveBitrateObserver() { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + registered_with_allocator_ = false; rtc::Event thread_sync_event; - worker_queue_->PostTask([this, &thread_sync_event] { - RTC_DCHECK_RUN_ON(worker_queue_); - registered_with_allocator_ = false; + rtp_transport_queue_->PostTask([this, &thread_sync_event] { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); bitrate_allocator_->RemoveObserver(this); thread_sync_event.Set(); }); thread_sync_event.Wait(rtc::Event::kForever); } -AudioSendStream::TargetAudioBitrateConstraints +absl::optional AudioSendStream::GetMinMaxBitrateConstraints() const { + if (config_.min_bitrate_bps < 0 || config_.max_bitrate_bps < 0) { + RTC_LOG(LS_WARNING) << "Config is invalid: min_bitrate_bps=" + << config_.min_bitrate_bps + << "; max_bitrate_bps=" << config_.max_bitrate_bps + << "; both expected greater or equal to 0"; + return absl::nullopt; + } TargetAudioBitrateConstraints constraints{ DataRate::BitsPerSec(config_.min_bitrate_bps), DataRate::BitsPerSec(config_.max_bitrate_bps)}; @@ -888,7 +902,11 @@ AudioSendStream::GetMinMaxBitrateConstraints() const { RTC_DCHECK_GE(constraints.min, DataRate::Zero()); RTC_DCHECK_GE(constraints.max, DataRate::Zero()); - RTC_DCHECK_GE(constraints.max, constraints.min); + if (constraints.max < constraints.min) { + RTC_LOG(LS_WARNING) << "TargetAudioBitrateConstraints::max is less than " + << "TargetAudioBitrateConstraints::min"; + return absl::nullopt; + } if (send_side_bwe_with_overhead_) { if (use_legacy_overhead_calculation_) { // OverheadPerPacket = Ipv4(20B) + UDP(8B) + SRTP(10B) + RTP(12) @@ -899,7 +917,10 @@ AudioSendStream::GetMinMaxBitrateConstraints() const { constraints.min += kMinOverhead; constraints.max += kMinOverhead; } else { - RTC_DCHECK(frame_length_range_); + if (!frame_length_range_.has_value()) { + RTC_LOG(LS_WARNING) << "frame_length_range_ is not set"; + return absl::nullopt; + } const DataSize kOverheadPerPacket = DataSize::Bytes(total_packet_overhead_bytes_); constraints.min += kOverheadPerPacket / frame_length_range_->second; @@ -913,5 +934,18 @@ void AudioSendStream::RegisterCngPayloadType(int payload_type, int clockrate_hz) { channel_send_->RegisterCngPayloadType(payload_type, clockrate_hz); } + +void AudioSendStream::UpdateCachedTargetAudioBitrateConstraints() { + absl::optional + new_constraints = GetMinMaxBitrateConstraints(); + if (!new_constraints.has_value()) { + return; + } + rtp_transport_queue_->PostTask([this, new_constraints]() { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + cached_constraints_ = new_constraints; + }); +} + } // namespace internal } // namespace webrtc diff --git a/audio/audio_send_stream.h b/audio/audio_send_stream.h index 5f8a936aab..e0b15dc0c9 100644 --- a/audio/audio_send_stream.h +++ b/audio/audio_send_stream.h @@ -15,17 +15,17 @@ #include #include +#include "api/sequence_checker.h" #include "audio/audio_level.h" #include "audio/channel_send.h" #include "call/audio_send_stream.h" #include "call/audio_state.h" #include "call/bitrate_allocator.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" -#include "rtc_base/constructor_magic.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/experiments/struct_parameters_parser.h" #include "rtc_base/race_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" namespace webrtc { class RtcEventLog; @@ -58,7 +58,6 @@ class AudioSendStream final : public webrtc::AudioSendStream, const webrtc::AudioSendStream::Config& config, const rtc::scoped_refptr& audio_state, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, RtpTransportControllerSendInterface* rtp_transport, BitrateAllocatorInterface* bitrate_allocator, RtcEventLog* event_log, @@ -74,6 +73,11 @@ class AudioSendStream final : public webrtc::AudioSendStream, RtcEventLog* event_log, const absl::optional& suspended_rtp_state, std::unique_ptr channel_send); + + AudioSendStream() = delete; + AudioSendStream(const AudioSendStream&) = delete; + AudioSendStream& operator=(const AudioSendStream&) = delete; + ~AudioSendStream() override; // webrtc::AudioSendStream implementation. @@ -116,22 +120,29 @@ class AudioSendStream final : public webrtc::AudioSendStream, internal::AudioState* audio_state(); const internal::AudioState* audio_state() const; - void StoreEncoderProperties(int sample_rate_hz, size_t num_channels); - - void ConfigureStream(const Config& new_config, bool first_time); - bool SetupSendCodec(const Config& new_config); - bool ReconfigureSendCodec(const Config& new_config); - void ReconfigureANA(const Config& new_config); - void ReconfigureCNG(const Config& new_config); - void ReconfigureBitrateObserver(const Config& new_config); - - void ConfigureBitrateObserver() RTC_RUN_ON(worker_queue_); - void RemoveBitrateObserver(); + void StoreEncoderProperties(int sample_rate_hz, size_t num_channels) + RTC_RUN_ON(worker_thread_checker_); + + void ConfigureStream(const Config& new_config, bool first_time) + RTC_RUN_ON(worker_thread_checker_); + bool SetupSendCodec(const Config& new_config) + RTC_RUN_ON(worker_thread_checker_); + bool ReconfigureSendCodec(const Config& new_config) + RTC_RUN_ON(worker_thread_checker_); + void ReconfigureANA(const Config& new_config) + RTC_RUN_ON(worker_thread_checker_); + void ReconfigureCNG(const Config& new_config) + RTC_RUN_ON(worker_thread_checker_); + void ReconfigureBitrateObserver(const Config& new_config) + RTC_RUN_ON(worker_thread_checker_); + + void ConfigureBitrateObserver() RTC_RUN_ON(worker_thread_checker_); + void RemoveBitrateObserver() RTC_RUN_ON(worker_thread_checker_); // Returns bitrate constraints, maybe including overhead when enabled by // field trial. - TargetAudioBitrateConstraints GetMinMaxBitrateConstraints() const - RTC_RUN_ON(worker_queue_); + absl::optional GetMinMaxBitrateConstraints() + const RTC_RUN_ON(worker_thread_checker_); // Sets per-packet overhead on encoded (for ANA) based on current known values // of transport and packetization overheads. @@ -142,40 +153,48 @@ class AudioSendStream final : public webrtc::AudioSendStream, size_t GetPerPacketOverheadBytes() const RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_); - void RegisterCngPayloadType(int payload_type, int clockrate_hz); + void RegisterCngPayloadType(int payload_type, int clockrate_hz) + RTC_RUN_ON(worker_thread_checker_); + + void UpdateCachedTargetAudioBitrateConstraints() + RTC_RUN_ON(worker_thread_checker_); + Clock* clock_; - rtc::ThreadChecker worker_thread_checker_; - rtc::ThreadChecker pacer_thread_checker_; + SequenceChecker worker_thread_checker_; + SequenceChecker pacer_thread_checker_; rtc::RaceChecker audio_capture_race_checker_; - rtc::TaskQueue* worker_queue_; + rtc::TaskQueue* rtp_transport_queue_; - const bool audio_send_side_bwe_; const bool allocate_audio_without_feedback_; const bool force_no_audio_feedback_ = allocate_audio_without_feedback_; const bool enable_audio_alr_probing_; const bool send_side_bwe_with_overhead_; const AudioAllocationConfig allocation_settings_; - webrtc::AudioSendStream::Config config_; + webrtc::AudioSendStream::Config config_ + RTC_GUARDED_BY(worker_thread_checker_); rtc::scoped_refptr audio_state_; const std::unique_ptr channel_send_; RtcEventLog* const event_log_; const bool use_legacy_overhead_calculation_; - int encoder_sample_rate_hz_ = 0; - size_t encoder_num_channels_ = 0; - bool sending_ = false; - rtc::CriticalSection audio_level_lock_; + int encoder_sample_rate_hz_ RTC_GUARDED_BY(worker_thread_checker_) = 0; + size_t encoder_num_channels_ RTC_GUARDED_BY(worker_thread_checker_) = 0; + bool sending_ RTC_GUARDED_BY(worker_thread_checker_) = false; + mutable Mutex audio_level_lock_; // Keeps track of audio level, total audio energy and total samples duration. // https://w3c.github.io/webrtc-stats/#dom-rtcaudiohandlerstats-totalaudioenergy - webrtc::voe::AudioLevel audio_level_; + webrtc::voe::AudioLevel audio_level_ RTC_GUARDED_BY(audio_level_lock_); BitrateAllocatorInterface* const bitrate_allocator_ - RTC_GUARDED_BY(worker_queue_); + RTC_GUARDED_BY(rtp_transport_queue_); + // Constrains cached to be accessed from |rtp_transport_queue_|. + absl::optional + cached_constraints_ RTC_GUARDED_BY(rtp_transport_queue_) = absl::nullopt; RtpTransportControllerSendInterface* const rtp_transport_; - RtpRtcp* const rtp_rtcp_module_; + RtpRtcpInterface* const rtp_rtcp_module_; absl::optional const suspended_rtp_state_; // RFC 5285: Each distinct extension MUST have a unique ID. The value 0 is @@ -194,19 +213,19 @@ class AudioSendStream final : public webrtc::AudioSendStream, const std::vector& extensions); static int TransportSeqNumId(const Config& config); - rtc::CriticalSection overhead_per_packet_lock_; + mutable Mutex overhead_per_packet_lock_; size_t overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_) = 0; // Current transport overhead (ICE, TURN, etc.) size_t transport_overhead_per_packet_bytes_ RTC_GUARDED_BY(overhead_per_packet_lock_) = 0; - bool registered_with_allocator_ RTC_GUARDED_BY(worker_queue_) = false; - size_t total_packet_overhead_bytes_ RTC_GUARDED_BY(worker_queue_) = 0; + bool registered_with_allocator_ RTC_GUARDED_BY(worker_thread_checker_) = + false; + size_t total_packet_overhead_bytes_ RTC_GUARDED_BY(worker_thread_checker_) = + 0; absl::optional> frame_length_range_ - RTC_GUARDED_BY(worker_queue_); - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioSendStream); + RTC_GUARDED_BY(worker_thread_checker_); }; } // namespace internal } // namespace webrtc diff --git a/audio/audio_send_stream_tests.cc b/audio/audio_send_stream_tests.cc index d2ea99ce08..e3895039d8 100644 --- a/audio/audio_send_stream_tests.cc +++ b/audio/audio_send_stream_tests.cc @@ -188,17 +188,10 @@ class TransportWideSequenceNumberObserver : public AudioSendTest { }; TEST_F(AudioSendStreamCallTest, SendsTransportWideSequenceNumbersInFieldTrial) { - ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); TransportWideSequenceNumberObserver test(/*expect_sequence_number=*/true); RunBaseTest(&test); } -TEST_F(AudioSendStreamCallTest, - DoesNotSendTransportWideSequenceNumbersPerDefault) { - TransportWideSequenceNumberObserver test(/*expect_sequence_number=*/false); - RunBaseTest(&test); -} - TEST_F(AudioSendStreamCallTest, SendDtmf) { static const uint8_t kDtmfPayloadType = 120; static const int kDtmfPayloadFrequency = 8000; diff --git a/audio/audio_send_stream_unittest.cc b/audio/audio_send_stream_unittest.cc index 89f94cdb0c..357e08040c 100644 --- a/audio/audio_send_stream_unittest.cc +++ b/audio/audio_send_stream_unittest.cc @@ -45,8 +45,10 @@ using ::testing::_; using ::testing::AnyNumber; using ::testing::Eq; using ::testing::Field; +using ::testing::InSequence; using ::testing::Invoke; using ::testing::Ne; +using ::testing::NiceMock; using ::testing::Return; using ::testing::StrEq; @@ -119,7 +121,7 @@ std::unique_ptr SetupAudioEncoderMock( rtc::scoped_refptr SetupEncoderFactoryMock() { rtc::scoped_refptr factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); ON_CALL(*factory.get(), GetSupportedEncoders()) .WillByDefault(Return(std::vector( std::begin(kCodecSpecs), std::end(kCodecSpecs)))); @@ -152,7 +154,7 @@ struct ConfigHelper { audio_processing_( use_null_audio_processing ? nullptr - : new rtc::RefCountedObject()), + : rtc::make_ref_counted>()), bitrate_allocator_(&limit_observer_), worker_queue_(task_queue_factory_->CreateTaskQueue( "ConfigHelper_worker_queue", @@ -163,8 +165,7 @@ struct ConfigHelper { AudioState::Config config; config.audio_mixer = AudioMixerImpl::Create(); config.audio_processing = audio_processing_; - config.audio_device_module = - new rtc::RefCountedObject(); + config.audio_device_module = rtc::make_ref_counted(); audio_state_ = AudioState::Create(config); SetupDefaultChannelSend(audio_bwe_enabled); @@ -203,7 +204,7 @@ struct ConfigHelper { return *static_cast( stream_config_.encoder_factory.get()); } - MockRtpRtcp* rtp_rtcp() { return &rtp_rtcp_; } + MockRtpRtcpInterface* rtp_rtcp() { return &rtp_rtcp_; } MockChannelSend* channel_send() { return channel_send_; } RtpTransportControllerSendInterface* transport() { return &rtp_transport_; } @@ -332,7 +333,7 @@ struct ConfigHelper { ::testing::StrictMock bandwidth_observer_; ::testing::NiceMock event_log_; ::testing::NiceMock rtp_transport_; - ::testing::NiceMock rtp_rtcp_; + ::testing::NiceMock rtp_rtcp_; ::testing::NiceMock limit_observer_; BitrateAllocator bitrate_allocator_; // |worker_queue| is defined last to ensure all pending tasks are cancelled @@ -366,11 +367,13 @@ TEST(AudioSendStreamTest, ConfigToString) { config.rtp.c_name = kCName; config.min_bitrate_bps = 12000; config.max_bitrate_bps = 34000; + config.has_dscp = true; config.send_codec_spec = AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat); config.send_codec_spec->nack_enabled = true; config.send_codec_spec->transport_cc_enabled = false; config.send_codec_spec->cng_payload_type = 42; + config.send_codec_spec->red_payload_type = 43; config.encoder_factory = MockAudioEncoderFactory::CreateUnusedFactory(); config.rtp.extmap_allow_mixed = true; config.rtp.extensions.push_back( @@ -381,9 +384,10 @@ TEST(AudioSendStreamTest, ConfigToString) { "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], " "c_name: foo_name}, rtcp_report_interval_ms: 2500, " "send_transport: null, " - "min_bitrate_bps: 12000, max_bitrate_bps: 34000, " + "min_bitrate_bps: 12000, max_bitrate_bps: 34000, has " + "audio_network_adaptor_config: false, has_dscp: true, " "send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, " - "cng_payload_type: 42, payload_type: 103, " + "cng_payload_type: 42, red_payload_type: 43, payload_type: 103, " "format: {name: isac, clockrate_hz: 16000, num_channels: 1, " "parameters: {}}}}", config.ToString()); @@ -417,7 +421,6 @@ TEST(AudioSendStreamTest, SetMuted) { } TEST(AudioSendStreamTest, AudioBweCorrectObjectsOnChannelProxy) { - ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(true, true, use_null_audio_processing); auto send_stream = helper.CreateAudioSendStream(); @@ -519,14 +522,12 @@ TEST(AudioSendStreamTest, GetStatsAudioLevel) { TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) { for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(false, true, use_null_audio_processing); + ConfigHelper helper(true, true, use_null_audio_processing); helper.config().send_codec_spec = AudioSendStream::Config::SendCodecSpec(0, kOpusFormat); const std::string kAnaConfigString = "abcde"; const std::string kAnaReconfigString = "12345"; - helper.config().rtp.extensions.push_back(RtpExtension( - RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberId)); helper.config().audio_network_adaptor_config = kAnaConfigString; EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _)) @@ -553,6 +554,46 @@ TEST(AudioSendStreamTest, SendCodecAppliesAudioNetworkAdaptor) { } } +TEST(AudioSendStreamTest, AudioNetworkAdaptorReceivesOverhead) { + for (bool use_null_audio_processing : {false, true}) { + ConfigHelper helper(true, true, use_null_audio_processing); + helper.config().send_codec_spec = + AudioSendStream::Config::SendCodecSpec(0, kOpusFormat); + const std::string kAnaConfigString = "abcde"; + + EXPECT_CALL(helper.mock_encoder_factory(), MakeAudioEncoderMock(_, _, _, _)) + .WillOnce(Invoke( + [&kAnaConfigString](int payload_type, const SdpAudioFormat& format, + absl::optional codec_pair_id, + std::unique_ptr* return_value) { + auto mock_encoder = SetupAudioEncoderMock(payload_type, format); + InSequence s; + EXPECT_CALL( + *mock_encoder, + OnReceivedOverhead(Eq(kOverheadPerPacket.bytes()))) + .Times(2); + EXPECT_CALL(*mock_encoder, + EnableAudioNetworkAdaptor(StrEq(kAnaConfigString), _)) + .WillOnce(Return(true)); + // Note: Overhead is received AFTER ANA has been enabled. + EXPECT_CALL( + *mock_encoder, + OnReceivedOverhead(Eq(kOverheadPerPacket.bytes()))) + .WillOnce(Return()); + *return_value = std::move(mock_encoder); + })); + EXPECT_CALL(*helper.rtp_rtcp(), ExpectedPerPacketOverhead) + .WillRepeatedly(Return(kOverheadPerPacket.bytes())); + + auto send_stream = helper.CreateAudioSendStream(); + + auto stream_config = helper.config(); + stream_config.audio_network_adaptor_config = kAnaConfigString; + + send_stream->Reconfigure(stream_config); + } +} + // VAD is applied when codec is mono and the CNG frequency matches the codec // clock rate. TEST(AudioSendStreamTest, SendCodecCanApplyVad) { @@ -601,7 +642,6 @@ TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) { } TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) { - ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(true, true, use_null_audio_processing); auto send_stream = helper.CreateAudioSendStream(); @@ -620,7 +660,6 @@ TEST(AudioSendStreamTest, SSBweTargetInRangeRespected) { TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) { ScopedFieldTrials field_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(true, true, use_null_audio_processing); @@ -638,7 +677,6 @@ TEST(AudioSendStreamTest, SSBweFieldTrialMinRespected) { TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) { ScopedFieldTrials field_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(true, true, use_null_audio_processing); @@ -656,8 +694,6 @@ TEST(AudioSendStreamTest, SSBweFieldTrialMaxRespected) { TEST(AudioSendStreamTest, SSBweWithOverhead) { ScopedFieldTrials field_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" - "WebRTC-SendSideBwe-WithOverhead/Enabled/" "WebRTC-Audio-LegacyOverhead/Disabled/"); for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(true, true, use_null_audio_processing); @@ -679,8 +715,6 @@ TEST(AudioSendStreamTest, SSBweWithOverhead) { TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) { ScopedFieldTrials field_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" - "WebRTC-SendSideBwe-WithOverhead/Enabled/" "WebRTC-Audio-LegacyOverhead/Disabled/" "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); for (bool use_null_audio_processing : {false, true}) { @@ -701,8 +735,6 @@ TEST(AudioSendStreamTest, SSBweWithOverheadMinRespected) { TEST(AudioSendStreamTest, SSBweWithOverheadMaxRespected) { ScopedFieldTrials field_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" - "WebRTC-SendSideBwe-WithOverhead/Enabled/" "WebRTC-Audio-LegacyOverhead/Disabled/" "WebRTC-Audio-Allocation/min:6kbps,max:64kbps/"); for (bool use_null_audio_processing : {false, true}) { @@ -762,7 +794,6 @@ TEST(AudioSendStreamTest, DontRecreateEncoder) { } TEST(AudioSendStreamTest, ReconfigureTransportCcResetsFirst) { - ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); for (bool use_null_audio_processing : {false, true}) { ConfigHelper helper(false, true, use_null_audio_processing); auto send_stream = helper.CreateAudioSendStream(); @@ -891,7 +922,7 @@ TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) { auto new_config = helper.config(); rtc::scoped_refptr mock_frame_encryptor_0( - new rtc::RefCountedObject()); + rtc::make_ref_counted()); new_config.frame_encryptor = mock_frame_encryptor_0; EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))) .Times(1); @@ -904,7 +935,7 @@ TEST(AudioSendStreamTest, ReconfigureWithFrameEncryptor) { // Updating frame encryptor to a new object should force a call to the // proxy. rtc::scoped_refptr mock_frame_encryptor_1( - new rtc::RefCountedObject()); + rtc::make_ref_counted()); new_config.frame_encryptor = mock_frame_encryptor_1; new_config.crypto_options.sframe.require_frame_encryption = true; EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))) diff --git a/audio/audio_state.cc b/audio/audio_state.cc index 73366e20a8..0e60f0372b 100644 --- a/audio/audio_state.cc +++ b/audio/audio_state.cc @@ -28,7 +28,9 @@ namespace internal { AudioState::AudioState(const AudioState::Config& config) : config_(config), - audio_transport_(config_.audio_mixer, config_.audio_processing.get()) { + audio_transport_(config_.audio_mixer, + config_.audio_processing.get(), + config_.async_audio_processing_factory.get()) { process_thread_checker_.Detach(); RTC_DCHECK(config_.audio_mixer); RTC_DCHECK(config_.audio_device_module); @@ -185,6 +187,6 @@ void AudioState::UpdateNullAudioPollerState() { rtc::scoped_refptr AudioState::Create( const AudioState::Config& config) { - return new rtc::RefCountedObject(config); + return rtc::make_ref_counted(config); } } // namespace webrtc diff --git a/audio/audio_state.h b/audio/audio_state.h index f696d5a8fe..89c748dc4e 100644 --- a/audio/audio_state.h +++ b/audio/audio_state.h @@ -15,13 +15,11 @@ #include #include +#include "api/sequence_checker.h" #include "audio/audio_transport_impl.h" #include "audio/null_audio_poller.h" #include "call/audio_state.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/ref_count.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -33,6 +31,11 @@ namespace internal { class AudioState : public webrtc::AudioState { public: explicit AudioState(const AudioState::Config& config); + + AudioState() = delete; + AudioState(const AudioState&) = delete; + AudioState& operator=(const AudioState&) = delete; + ~AudioState() override; AudioProcessing* audio_processing() override; @@ -62,8 +65,8 @@ class AudioState : public webrtc::AudioState { void UpdateAudioTransportWithSendingStreams(); void UpdateNullAudioPollerState(); - rtc::ThreadChecker thread_checker_; - rtc::ThreadChecker process_thread_checker_; + SequenceChecker thread_checker_; + SequenceChecker process_thread_checker_; const webrtc::AudioState::Config config_; bool recording_enabled_ = true; bool playout_enabled_ = true; @@ -83,8 +86,6 @@ class AudioState : public webrtc::AudioState { size_t num_channels = 0; }; std::map sending_streams_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioState); }; } // namespace internal } // namespace webrtc diff --git a/audio/audio_state_unittest.cc b/audio/audio_state_unittest.cc index 2bbe0fb0b7..5f07a7b339 100644 --- a/audio/audio_state_unittest.cc +++ b/audio/audio_state_unittest.cc @@ -11,6 +11,7 @@ #include "audio/audio_state.h" #include +#include #include #include "call/test/mock_audio_send_stream.h" @@ -26,28 +27,105 @@ namespace { using ::testing::_; using ::testing::Matcher; +using ::testing::NiceMock; +using ::testing::StrictMock; +using ::testing::Values; constexpr int kSampleRate = 16000; constexpr int kNumberOfChannels = 1; +struct FakeAsyncAudioProcessingHelper { + class FakeTaskQueue : public StrictMock { + public: + FakeTaskQueue() = default; + + void Delete() override { delete this; } + void PostTask(std::unique_ptr task) override { + std::move(task)->Run(); + } + MOCK_METHOD(void, + PostDelayedTask, + (std::unique_ptr task, uint32_t milliseconds), + (override)); + }; + + class FakeTaskQueueFactory : public TaskQueueFactory { + public: + FakeTaskQueueFactory() = default; + ~FakeTaskQueueFactory() override = default; + std::unique_ptr CreateTaskQueue( + absl::string_view name, + Priority priority) const override { + return std::unique_ptr( + new FakeTaskQueue()); + } + }; + + class MockAudioFrameProcessor : public AudioFrameProcessor { + public: + ~MockAudioFrameProcessor() override = default; + + MOCK_METHOD(void, ProcessCalled, ()); + MOCK_METHOD(void, SinkSet, ()); + MOCK_METHOD(void, SinkCleared, ()); + + void Process(std::unique_ptr frame) override { + ProcessCalled(); + sink_callback_(std::move(frame)); + } + + void SetSink(OnAudioFrameCallback sink_callback) override { + sink_callback_ = std::move(sink_callback); + if (sink_callback_ == nullptr) + SinkCleared(); + else + SinkSet(); + } + + private: + OnAudioFrameCallback sink_callback_; + }; + + NiceMock audio_frame_processor_; + FakeTaskQueueFactory task_queue_factory_; + + rtc::scoped_refptr CreateFactory() { + return rtc::make_ref_counted( + audio_frame_processor_, task_queue_factory_); + } +}; + struct ConfigHelper { - explicit ConfigHelper(bool use_null_audio_processing) + struct Params { + bool use_null_audio_processing; + bool use_async_audio_processing; + }; + + explicit ConfigHelper(const Params& params) : audio_mixer(AudioMixerImpl::Create()) { audio_state_config.audio_mixer = audio_mixer; audio_state_config.audio_processing = - use_null_audio_processing + params.use_null_audio_processing ? nullptr - : new rtc::RefCountedObject< - testing::NiceMock>(); + : rtc::make_ref_counted>(); audio_state_config.audio_device_module = - new rtc::RefCountedObject(); + rtc::make_ref_counted>(); + if (params.use_async_audio_processing) { + audio_state_config.async_audio_processing_factory = + async_audio_processing_helper_.CreateFactory(); + } } AudioState::Config& config() { return audio_state_config; } rtc::scoped_refptr mixer() { return audio_mixer; } + NiceMock& + mock_audio_frame_processor() { + return async_audio_processing_helper_.audio_frame_processor_; + } private: AudioState::Config audio_state_config; rtc::scoped_refptr audio_mixer; + FakeAsyncAudioProcessingHelper async_audio_processing_helper_; }; class FakeAudioSource : public AudioMixer::Source { @@ -93,184 +171,200 @@ std::vector ComputeChannelLevels(AudioFrame* audio_frame) { } } // namespace -TEST(AudioStateTest, Create) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - auto audio_state = AudioState::Create(helper.config()); - EXPECT_TRUE(audio_state.get()); - } +class AudioStateTest : public ::testing::TestWithParam {}; + +TEST_P(AudioStateTest, Create) { + ConfigHelper helper(GetParam()); + auto audio_state = AudioState::Create(helper.config()); + EXPECT_TRUE(audio_state.get()); } -TEST(AudioStateTest, ConstructDestruct) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - rtc::scoped_refptr audio_state( - new rtc::RefCountedObject(helper.config())); - } +TEST_P(AudioStateTest, ConstructDestruct) { + ConfigHelper helper(GetParam()); + rtc::scoped_refptr audio_state( + rtc::make_ref_counted(helper.config())); } -TEST(AudioStateTest, RecordedAudioArrivesAtSingleStream) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - rtc::scoped_refptr audio_state( - new rtc::RefCountedObject(helper.config())); - - MockAudioSendStream stream; - audio_state->AddSendingStream(&stream, 8000, 2); - - EXPECT_CALL( - stream, - SendAudioDataForMock(::testing::AllOf( - ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)), - ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u))))) - .WillOnce( - // Verify that channels are not swapped by default. - ::testing::Invoke([](AudioFrame* audio_frame) { - auto levels = ComputeChannelLevels(audio_frame); - EXPECT_LT(0u, levels[0]); - EXPECT_EQ(0u, levels[1]); - })); - MockAudioProcessing* ap = use_null_audio_processing - ? nullptr - : static_cast( - audio_state->audio_processing()); - if (ap) { - EXPECT_CALL(*ap, set_stream_delay_ms(0)); - EXPECT_CALL(*ap, set_stream_key_pressed(false)); - EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher(_))); - } +TEST_P(AudioStateTest, RecordedAudioArrivesAtSingleStream) { + ConfigHelper helper(GetParam()); - constexpr int kSampleRate = 16000; - constexpr size_t kNumChannels = 2; - auto audio_data = Create10msTestData(kSampleRate, kNumChannels); - uint32_t new_mic_level = 667; - audio_state->audio_transport()->RecordedDataIsAvailable( - &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, - kSampleRate, 0, 0, 0, false, new_mic_level); - EXPECT_EQ(667u, new_mic_level); + if (GetParam().use_async_audio_processing) { + EXPECT_CALL(helper.mock_audio_frame_processor(), SinkSet); + EXPECT_CALL(helper.mock_audio_frame_processor(), ProcessCalled); + EXPECT_CALL(helper.mock_audio_frame_processor(), SinkCleared); + } - audio_state->RemoveSendingStream(&stream); + rtc::scoped_refptr audio_state( + rtc::make_ref_counted(helper.config())); + + MockAudioSendStream stream; + audio_state->AddSendingStream(&stream, 8000, 2); + + EXPECT_CALL( + stream, + SendAudioDataForMock(::testing::AllOf( + ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(8000)), + ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(2u))))) + .WillOnce( + // Verify that channels are not swapped by default. + ::testing::Invoke([](AudioFrame* audio_frame) { + auto levels = ComputeChannelLevels(audio_frame); + EXPECT_LT(0u, levels[0]); + EXPECT_EQ(0u, levels[1]); + })); + MockAudioProcessing* ap = + GetParam().use_null_audio_processing + ? nullptr + : static_cast(audio_state->audio_processing()); + if (ap) { + EXPECT_CALL(*ap, set_stream_delay_ms(0)); + EXPECT_CALL(*ap, set_stream_key_pressed(false)); + EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher(_))); } + + constexpr int kSampleRate = 16000; + constexpr size_t kNumChannels = 2; + auto audio_data = Create10msTestData(kSampleRate, kNumChannels); + uint32_t new_mic_level = 667; + audio_state->audio_transport()->RecordedDataIsAvailable( + &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, + kSampleRate, 0, 0, 0, false, new_mic_level); + EXPECT_EQ(667u, new_mic_level); + + audio_state->RemoveSendingStream(&stream); } -TEST(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - rtc::scoped_refptr audio_state( - new rtc::RefCountedObject(helper.config())); - - MockAudioSendStream stream_1; - MockAudioSendStream stream_2; - audio_state->AddSendingStream(&stream_1, 8001, 2); - audio_state->AddSendingStream(&stream_2, 32000, 1); - - EXPECT_CALL( - stream_1, - SendAudioDataForMock(::testing::AllOf( - ::testing::Field(&AudioFrame::sample_rate_hz_, - ::testing::Eq(16000)), - ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) - .WillOnce( - // Verify that there is output signal. - ::testing::Invoke([](AudioFrame* audio_frame) { - auto levels = ComputeChannelLevels(audio_frame); - EXPECT_LT(0u, levels[0]); - })); - EXPECT_CALL( - stream_2, - SendAudioDataForMock(::testing::AllOf( - ::testing::Field(&AudioFrame::sample_rate_hz_, - ::testing::Eq(16000)), - ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) - .WillOnce( - // Verify that there is output signal. - ::testing::Invoke([](AudioFrame* audio_frame) { - auto levels = ComputeChannelLevels(audio_frame); - EXPECT_LT(0u, levels[0]); - })); - MockAudioProcessing* ap = - static_cast(audio_state->audio_processing()); - if (ap) { - EXPECT_CALL(*ap, set_stream_delay_ms(5)); - EXPECT_CALL(*ap, set_stream_key_pressed(true)); - EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher(_))); - } +TEST_P(AudioStateTest, RecordedAudioArrivesAtMultipleStreams) { + ConfigHelper helper(GetParam()); + + if (GetParam().use_async_audio_processing) { + EXPECT_CALL(helper.mock_audio_frame_processor(), SinkSet); + EXPECT_CALL(helper.mock_audio_frame_processor(), ProcessCalled); + EXPECT_CALL(helper.mock_audio_frame_processor(), SinkCleared); + } - constexpr int kSampleRate = 16000; - constexpr size_t kNumChannels = 1; - auto audio_data = Create10msTestData(kSampleRate, kNumChannels); - uint32_t new_mic_level = 667; - audio_state->audio_transport()->RecordedDataIsAvailable( - &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, - kSampleRate, 5, 0, 0, true, new_mic_level); - EXPECT_EQ(667u, new_mic_level); - - audio_state->RemoveSendingStream(&stream_1); - audio_state->RemoveSendingStream(&stream_2); + rtc::scoped_refptr audio_state( + rtc::make_ref_counted(helper.config())); + + MockAudioSendStream stream_1; + MockAudioSendStream stream_2; + audio_state->AddSendingStream(&stream_1, 8001, 2); + audio_state->AddSendingStream(&stream_2, 32000, 1); + + EXPECT_CALL( + stream_1, + SendAudioDataForMock(::testing::AllOf( + ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)), + ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) + .WillOnce( + // Verify that there is output signal. + ::testing::Invoke([](AudioFrame* audio_frame) { + auto levels = ComputeChannelLevels(audio_frame); + EXPECT_LT(0u, levels[0]); + })); + EXPECT_CALL( + stream_2, + SendAudioDataForMock(::testing::AllOf( + ::testing::Field(&AudioFrame::sample_rate_hz_, ::testing::Eq(16000)), + ::testing::Field(&AudioFrame::num_channels_, ::testing::Eq(1u))))) + .WillOnce( + // Verify that there is output signal. + ::testing::Invoke([](AudioFrame* audio_frame) { + auto levels = ComputeChannelLevels(audio_frame); + EXPECT_LT(0u, levels[0]); + })); + MockAudioProcessing* ap = + static_cast(audio_state->audio_processing()); + if (ap) { + EXPECT_CALL(*ap, set_stream_delay_ms(5)); + EXPECT_CALL(*ap, set_stream_key_pressed(true)); + EXPECT_CALL(*ap, ProcessStream(_, _, _, Matcher(_))); } + + constexpr int kSampleRate = 16000; + constexpr size_t kNumChannels = 1; + auto audio_data = Create10msTestData(kSampleRate, kNumChannels); + uint32_t new_mic_level = 667; + audio_state->audio_transport()->RecordedDataIsAvailable( + &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, + kSampleRate, 5, 0, 0, true, new_mic_level); + EXPECT_EQ(667u, new_mic_level); + + audio_state->RemoveSendingStream(&stream_1); + audio_state->RemoveSendingStream(&stream_2); } -TEST(AudioStateTest, EnableChannelSwap) { +TEST_P(AudioStateTest, EnableChannelSwap) { constexpr int kSampleRate = 16000; constexpr size_t kNumChannels = 2; - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - rtc::scoped_refptr audio_state( - new rtc::RefCountedObject(helper.config())); - - audio_state->SetStereoChannelSwapping(true); - - MockAudioSendStream stream; - audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels); - - EXPECT_CALL(stream, SendAudioDataForMock(_)) - .WillOnce( - // Verify that channels are swapped. - ::testing::Invoke([](AudioFrame* audio_frame) { - auto levels = ComputeChannelLevels(audio_frame); - EXPECT_EQ(0u, levels[0]); - EXPECT_LT(0u, levels[1]); - })); - - auto audio_data = Create10msTestData(kSampleRate, kNumChannels); - uint32_t new_mic_level = 667; - audio_state->audio_transport()->RecordedDataIsAvailable( - &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, - kSampleRate, 0, 0, 0, false, new_mic_level); - EXPECT_EQ(667u, new_mic_level); - - audio_state->RemoveSendingStream(&stream); + ConfigHelper helper(GetParam()); + + if (GetParam().use_async_audio_processing) { + EXPECT_CALL(helper.mock_audio_frame_processor(), SinkSet); + EXPECT_CALL(helper.mock_audio_frame_processor(), ProcessCalled); + EXPECT_CALL(helper.mock_audio_frame_processor(), SinkCleared); } + + rtc::scoped_refptr audio_state( + rtc::make_ref_counted(helper.config())); + + audio_state->SetStereoChannelSwapping(true); + + MockAudioSendStream stream; + audio_state->AddSendingStream(&stream, kSampleRate, kNumChannels); + + EXPECT_CALL(stream, SendAudioDataForMock(_)) + .WillOnce( + // Verify that channels are swapped. + ::testing::Invoke([](AudioFrame* audio_frame) { + auto levels = ComputeChannelLevels(audio_frame); + EXPECT_EQ(0u, levels[0]); + EXPECT_LT(0u, levels[1]); + })); + + auto audio_data = Create10msTestData(kSampleRate, kNumChannels); + uint32_t new_mic_level = 667; + audio_state->audio_transport()->RecordedDataIsAvailable( + &audio_data[0], kSampleRate / 100, kNumChannels * 2, kNumChannels, + kSampleRate, 0, 0, 0, false, new_mic_level); + EXPECT_EQ(667u, new_mic_level); + + audio_state->RemoveSendingStream(&stream); } -TEST(AudioStateTest, - QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) { - for (bool use_null_audio_processing : {false, true}) { - ConfigHelper helper(use_null_audio_processing); - auto audio_state = AudioState::Create(helper.config()); - - FakeAudioSource fake_source; - helper.mixer()->AddSource(&fake_source); - - EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _)) - .WillOnce( - ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) { - audio_frame->sample_rate_hz_ = sample_rate_hz; - audio_frame->samples_per_channel_ = sample_rate_hz / 100; - audio_frame->num_channels_ = kNumberOfChannels; - return AudioMixer::Source::AudioFrameInfo::kNormal; - })); - - int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels]; - size_t n_samples_out; - int64_t elapsed_time_ms; - int64_t ntp_time_ms; - audio_state->audio_transport()->NeedMorePlayData( - kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, - kSampleRate, audio_buffer, n_samples_out, &elapsed_time_ms, - &ntp_time_ms); - } +TEST_P(AudioStateTest, + QueryingTransportForAudioShouldResultInGetAudioCallOnMixerSource) { + ConfigHelper helper(GetParam()); + auto audio_state = AudioState::Create(helper.config()); + + FakeAudioSource fake_source; + helper.mixer()->AddSource(&fake_source); + + EXPECT_CALL(fake_source, GetAudioFrameWithInfo(_, _)) + .WillOnce( + ::testing::Invoke([](int sample_rate_hz, AudioFrame* audio_frame) { + audio_frame->sample_rate_hz_ = sample_rate_hz; + audio_frame->samples_per_channel_ = sample_rate_hz / 100; + audio_frame->num_channels_ = kNumberOfChannels; + return AudioMixer::Source::AudioFrameInfo::kNormal; + })); + + int16_t audio_buffer[kSampleRate / 100 * kNumberOfChannels]; + size_t n_samples_out; + int64_t elapsed_time_ms; + int64_t ntp_time_ms; + audio_state->audio_transport()->NeedMorePlayData( + kSampleRate / 100, kNumberOfChannels * 2, kNumberOfChannels, kSampleRate, + audio_buffer, n_samples_out, &elapsed_time_ms, &ntp_time_ms); } + +INSTANTIATE_TEST_SUITE_P(AudioStateTest, + AudioStateTest, + Values(ConfigHelper::Params({false, false}), + ConfigHelper::Params({true, false}), + ConfigHelper::Params({false, true}), + ConfigHelper::Params({true, true}))); + } // namespace test } // namespace webrtc diff --git a/audio/audio_transport_impl.cc b/audio/audio_transport_impl.cc index 7648fb948f..8710ced9b7 100644 --- a/audio/audio_transport_impl.cc +++ b/audio/audio_transport_impl.cc @@ -17,6 +17,7 @@ #include "audio/remix_resample.h" #include "audio/utility/audio_frame_operations.h" #include "call/audio_sender.h" +#include "modules/async_audio_processing/async_audio_processing.h" #include "modules/audio_processing/include/audio_frame_proxies.h" #include "rtc_base/checks.h" @@ -83,9 +84,19 @@ int Resample(const AudioFrame& frame, } } // namespace -AudioTransportImpl::AudioTransportImpl(AudioMixer* mixer, - AudioProcessing* audio_processing) - : audio_processing_(audio_processing), mixer_(mixer) { +AudioTransportImpl::AudioTransportImpl( + AudioMixer* mixer, + AudioProcessing* audio_processing, + AsyncAudioProcessing::Factory* async_audio_processing_factory) + : audio_processing_(audio_processing), + async_audio_processing_( + async_audio_processing_factory + ? async_audio_processing_factory->CreateAsyncAudioProcessing( + [this](std::unique_ptr frame) { + this->SendProcessedData(std::move(frame)); + }) + : nullptr), + mixer_(mixer) { RTC_DCHECK(mixer); } @@ -118,7 +129,7 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable( size_t send_num_channels = 0; bool swap_stereo_channels = false; { - rtc::CritScope lock(&capture_lock_); + MutexLock lock(&capture_lock_); send_sample_rate_hz = send_sample_rate_hz_; send_num_channels = send_num_channels_; swap_stereo_channels = swap_stereo_channels_; @@ -149,25 +160,36 @@ int32_t AudioTransportImpl::RecordedDataIsAvailable( // Copy frame and push to each sending stream. The copy is required since an // encoding task will be posted internally to each stream. { - rtc::CritScope lock(&capture_lock_); + MutexLock lock(&capture_lock_); typing_noise_detected_ = typing_detected; - - RTC_DCHECK_GT(audio_frame->samples_per_channel_, 0); - if (!audio_senders_.empty()) { - auto it = audio_senders_.begin(); - while (++it != audio_senders_.end()) { - std::unique_ptr audio_frame_copy(new AudioFrame()); - audio_frame_copy->CopyFrom(*audio_frame); - (*it)->SendAudioData(std::move(audio_frame_copy)); - } - // Send the original frame to the first stream w/o copying. - (*audio_senders_.begin())->SendAudioData(std::move(audio_frame)); - } } + RTC_DCHECK_GT(audio_frame->samples_per_channel_, 0); + if (async_audio_processing_) + async_audio_processing_->Process(std::move(audio_frame)); + else + SendProcessedData(std::move(audio_frame)); + return 0; } +void AudioTransportImpl::SendProcessedData( + std::unique_ptr audio_frame) { + RTC_DCHECK_GT(audio_frame->samples_per_channel_, 0); + MutexLock lock(&capture_lock_); + if (audio_senders_.empty()) + return; + + auto it = audio_senders_.begin(); + while (++it != audio_senders_.end()) { + auto audio_frame_copy = std::make_unique(); + audio_frame_copy->CopyFrom(*audio_frame); + (*it)->SendAudioData(std::move(audio_frame_copy)); + } + // Send the original frame to the first stream w/o copying. + (*audio_senders_.begin())->SendAudioData(std::move(audio_frame)); +} + // Mix all received streams, feed the result to the AudioProcessing module, then // resample the result to the requested output rate. int32_t AudioTransportImpl::NeedMorePlayData(const size_t nSamples, @@ -237,19 +259,19 @@ void AudioTransportImpl::PullRenderData(int bits_per_sample, void AudioTransportImpl::UpdateAudioSenders(std::vector senders, int send_sample_rate_hz, size_t send_num_channels) { - rtc::CritScope lock(&capture_lock_); + MutexLock lock(&capture_lock_); audio_senders_ = std::move(senders); send_sample_rate_hz_ = send_sample_rate_hz; send_num_channels_ = send_num_channels; } void AudioTransportImpl::SetStereoChannelSwapping(bool enable) { - rtc::CritScope lock(&capture_lock_); + MutexLock lock(&capture_lock_); swap_stereo_channels_ = enable; } bool AudioTransportImpl::typing_noise_detected() const { - rtc::CritScope lock(&capture_lock_); + MutexLock lock(&capture_lock_); return typing_noise_detected_; } } // namespace webrtc diff --git a/audio/audio_transport_impl.h b/audio/audio_transport_impl.h index 2d9b4cf3a1..f3ca2fa848 100644 --- a/audio/audio_transport_impl.h +++ b/audio/audio_transport_impl.h @@ -11,16 +11,17 @@ #ifndef AUDIO_AUDIO_TRANSPORT_IMPL_H_ #define AUDIO_AUDIO_TRANSPORT_IMPL_H_ +#include #include #include "api/audio/audio_mixer.h" #include "api/scoped_refptr.h" #include "common_audio/resampler/include/push_resampler.h" +#include "modules/async_audio_processing/async_audio_processing.h" #include "modules/audio_device/include/audio_device.h" #include "modules/audio_processing/include/audio_processing.h" #include "modules/audio_processing/typing_detection.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -29,7 +30,15 @@ class AudioSender; class AudioTransportImpl : public AudioTransport { public: - AudioTransportImpl(AudioMixer* mixer, AudioProcessing* audio_processing); + AudioTransportImpl( + AudioMixer* mixer, + AudioProcessing* audio_processing, + AsyncAudioProcessing::Factory* async_audio_processing_factory); + + AudioTransportImpl() = delete; + AudioTransportImpl(const AudioTransportImpl&) = delete; + AudioTransportImpl& operator=(const AudioTransportImpl&) = delete; + ~AudioTransportImpl() override; int32_t RecordedDataIsAvailable(const void* audioSamples, @@ -67,11 +76,17 @@ class AudioTransportImpl : public AudioTransport { bool typing_noise_detected() const; private: + void SendProcessedData(std::unique_ptr audio_frame); + // Shared. AudioProcessing* audio_processing_ = nullptr; // Capture side. - rtc::CriticalSection capture_lock_; + + // Thread-safe. + const std::unique_ptr async_audio_processing_; + + mutable Mutex capture_lock_; std::vector audio_senders_ RTC_GUARDED_BY(capture_lock_); int send_sample_rate_hz_ RTC_GUARDED_BY(capture_lock_) = 8000; size_t send_num_channels_ RTC_GUARDED_BY(capture_lock_) = 1; @@ -81,12 +96,11 @@ class AudioTransportImpl : public AudioTransport { TypingDetection typing_detection_; // Render side. + rtc::scoped_refptr mixer_; AudioFrame mixed_frame_; // Converts mixed audio to the audio device output rate. PushResampler render_resampler_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTransportImpl); }; } // namespace webrtc diff --git a/audio/channel_receive.cc b/audio/channel_receive.cc index 66b4bb11f5..57269cd193 100644 --- a/audio/channel_receive.cc +++ b/audio/channel_receive.cc @@ -10,8 +10,6 @@ #include "audio/channel_receive.h" -#include - #include #include #include @@ -22,6 +20,8 @@ #include "api/crypto/frame_decryptor_interface.h" #include "api/frame_transformer_interface.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_base.h" #include "audio/audio_level.h" #include "audio/channel_receive_frame_transformer_delegate.h" #include "audio/channel_send.h" @@ -33,20 +33,23 @@ #include "modules/pacing/packet_router.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" +#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/utility/include/process_thread.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/format_macros.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_minmax.h" #include "rtc_base/race_checker.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/metrics.h" @@ -78,12 +81,12 @@ AudioCodingModule::Config AcmConfig( return acm_config; } -class ChannelReceive : public ChannelReceiveInterface { +class ChannelReceive : public ChannelReceiveInterface, + public RtcpPacketTypeCounterObserver { public: // Used for receive streams. ChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, @@ -128,12 +131,13 @@ class ChannelReceive : public ChannelReceiveInterface { double GetTotalOutputDuration() const override; // Stats. - NetworkStatistics GetNetworkStatistics() const override; + NetworkStatistics GetNetworkStatistics( + bool get_and_clear_legacy_stats) const override; AudioDecodingCallStats GetDecodingCallStatistics() const override; // Audio+Video Sync. uint32_t GetDelayEstimate() const override; - void SetMinimumPlayoutDelay(int delayMs) override; + bool SetMinimumPlayoutDelay(int delayMs) override; bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp, int64_t* time_ms) const override; void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, @@ -161,6 +165,8 @@ class ChannelReceive : public ChannelReceiveInterface { int PreferredSampleRate() const override; + void SetSourceTracker(SourceTracker* source_tracker) override; + // Associate to a send channel. // Used for obtaining RTT for a receive-only channel. void SetAssociatedSendChannel(const ChannelSendInterface* channel) override; @@ -171,44 +177,55 @@ class ChannelReceive : public ChannelReceiveInterface { rtc::scoped_refptr frame_transformer) override; + void SetFrameDecryptor(rtc::scoped_refptr + frame_decryptor) override; + + void OnLocalSsrcChange(uint32_t local_ssrc) override; + uint32_t GetLocalSsrc() const override; + + void RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) override; + private: void ReceivePacket(const uint8_t* packet, size_t packet_length, - const RTPHeader& header); + const RTPHeader& header) + RTC_RUN_ON(worker_thread_checker_); int ResendPackets(const uint16_t* sequence_numbers, int length); - void UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms); + void UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms) + RTC_RUN_ON(worker_thread_checker_); int GetRtpTimestampRateHz() const; int64_t GetRTT() const; void OnReceivedPayloadData(rtc::ArrayView payload, - const RTPHeader& rtpHeader); + const RTPHeader& rtpHeader) + RTC_RUN_ON(worker_thread_checker_); void InitFrameTransformerDelegate( - rtc::scoped_refptr frame_transformer); - - bool Playing() const { - rtc::CritScope lock(&playing_lock_); - return playing_; - } + rtc::scoped_refptr frame_transformer) + RTC_RUN_ON(worker_thread_checker_); // Thread checkers document and lock usage of some methods to specific threads // we know about. The goal is to eventually split up voe::ChannelReceive into // parts with single-threaded semantics, and thereby reduce the need for // locks. - rtc::ThreadChecker worker_thread_checker_; - rtc::ThreadChecker module_process_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker network_thread_checker_; + + TaskQueueBase* const worker_thread_; + ScopedTaskSafety worker_safety_; + // Methods accessed from audio and video threads are checked for sequential- // only access. We don't necessarily own and control these threads, so thread // checkers cannot be used. E.g. Chromium may transfer "ownership" from one // audio thread to another, but access is still sequential. rtc::RaceChecker audio_thread_race_checker_; - rtc::RaceChecker video_capture_thread_race_checker_; - rtc::CriticalSection _callbackCritSect; - rtc::CriticalSection volume_settings_critsect_; + Mutex callback_mutex_; + Mutex volume_settings_mutex_; - rtc::CriticalSection playing_lock_; - bool playing_ RTC_GUARDED_BY(&playing_lock_) = false; + bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false; RtcEventLog* const event_log_; @@ -216,38 +233,38 @@ class ChannelReceive : public ChannelReceiveInterface { std::map payload_type_frequencies_; std::unique_ptr rtp_receive_statistics_; - std::unique_ptr _rtpRtcpModule; + std::unique_ptr rtp_rtcp_; const uint32_t remote_ssrc_; + SourceTracker* source_tracker_ = nullptr; // Info for GetSyncInfo is updated on network or worker thread, and queried on // the worker thread. - rtc::CriticalSection sync_info_lock_; absl::optional last_received_rtp_timestamp_ - RTC_GUARDED_BY(&sync_info_lock_); + RTC_GUARDED_BY(&worker_thread_checker_); absl::optional last_received_rtp_system_time_ms_ - RTC_GUARDED_BY(&sync_info_lock_); + RTC_GUARDED_BY(&worker_thread_checker_); // The AcmReceiver is thread safe, using its own lock. acm2::AcmReceiver acm_receiver_; AudioSinkInterface* audio_sink_ = nullptr; AudioLevel _outputAudioLevel; + Clock* const clock_; RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_); // Timestamp of the audio pulled from NetEq. absl::optional jitter_buffer_playout_timestamp_; - rtc::CriticalSection video_sync_lock_; - uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_); + uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(worker_thread_checker_); absl::optional playout_timestamp_rtp_time_ms_ - RTC_GUARDED_BY(video_sync_lock_); - uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_); + RTC_GUARDED_BY(worker_thread_checker_); + uint32_t playout_delay_ms_ RTC_GUARDED_BY(worker_thread_checker_); absl::optional playout_timestamp_ntp_ - RTC_GUARDED_BY(video_sync_lock_); + RTC_GUARDED_BY(worker_thread_checker_); absl::optional playout_timestamp_ntp_time_ms_ - RTC_GUARDED_BY(video_sync_lock_); + RTC_GUARDED_BY(worker_thread_checker_); - rtc::CriticalSection ts_stats_lock_; + mutable Mutex ts_stats_lock_; std::unique_ptr rtp_ts_wraparound_handler_; // The rtp timestamp of the first played out audio frame. @@ -256,36 +273,64 @@ class ChannelReceive : public ChannelReceiveInterface { // frame. int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_); - // uses - ProcessThread* _moduleProcessThreadPtr; AudioDeviceModule* _audioDeviceModulePtr; - float _outputGain RTC_GUARDED_BY(volume_settings_critsect_); + float _outputGain RTC_GUARDED_BY(volume_settings_mutex_); - // An associated send channel. - rtc::CriticalSection assoc_send_channel_lock_; const ChannelSendInterface* associated_send_channel_ - RTC_GUARDED_BY(assoc_send_channel_lock_); + RTC_GUARDED_BY(network_thread_checker_); PacketRouter* packet_router_ = nullptr; - rtc::ThreadChecker construction_thread_; + SequenceChecker construction_thread_; // E2EE Audio Frame Decryption - rtc::scoped_refptr frame_decryptor_; + rtc::scoped_refptr frame_decryptor_ + RTC_GUARDED_BY(worker_thread_checker_); webrtc::CryptoOptions crypto_options_; - webrtc::AbsoluteCaptureTimeReceiver absolute_capture_time_receiver_; + webrtc::AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_ + RTC_GUARDED_BY(worker_thread_checker_); + + webrtc::CaptureClockOffsetUpdater capture_clock_offset_updater_; rtc::scoped_refptr frame_transformer_delegate_; + + // Counter that's used to control the frequency of reporting histograms + // from the `GetAudioFrameWithInfo` callback. + int audio_frame_interval_count_ RTC_GUARDED_BY(audio_thread_race_checker_) = + 0; + // Controls how many callbacks we let pass by before reporting callback stats. + // A value of 100 means 100 callbacks, each one of which represents 10ms worth + // of data, so the stats reporting frequency will be 1Hz (modulo failures). + constexpr static int kHistogramReportingInterval = 100; + + mutable Mutex rtcp_counter_mutex_; + RtcpPacketTypeCounter rtcp_packet_type_counter_ + RTC_GUARDED_BY(rtcp_counter_mutex_); }; void ChannelReceive::OnReceivedPayloadData( rtc::ArrayView payload, const RTPHeader& rtpHeader) { - if (!Playing()) { + if (!playing_) { // Avoid inserting into NetEQ when we are not playing. Count the // packet as discarded. + + // If we have a source_tracker_, tell it that the frame has been + // "delivered". Normally, this happens in AudioReceiveStream when audio + // frames are pulled out, but when playout is muted, nothing is pulling + // frames. The downside of this approach is that frames delivered this way + // won't be delayed for playout, and therefore will be unsynchronized with + // (a) audio delay when playing and (b) any audio/video synchronization. But + // the alternative is that muting playout also stops the SourceTracker from + // updating RtpSource information. + if (source_tracker_) { + RtpPacketInfos::vector_type packet_vector = { + RtpPacketInfo(rtpHeader, clock_->CurrentTime())}; + source_tracker_->OnFrameDelivered(RtpPacketInfos(packet_vector)); + } + return; } @@ -297,7 +342,7 @@ void ChannelReceive::OnReceivedPayloadData( } int64_t round_trip_time = 0; - _rtpRtcpModule->RTT(remote_ssrc_, &round_trip_time, NULL, NULL, NULL); + rtp_rtcp_->RTT(remote_ssrc_, &round_trip_time, NULL, NULL, NULL); std::vector nack_list = acm_receiver_.GetNackList(round_trip_time); if (!nack_list.empty()) { @@ -311,18 +356,20 @@ void ChannelReceive::InitFrameTransformerDelegate( rtc::scoped_refptr frame_transformer) { RTC_DCHECK(frame_transformer); RTC_DCHECK(!frame_transformer_delegate_); + RTC_DCHECK(worker_thread_->IsCurrent()); // Pass a callback to ChannelReceive::OnReceivedPayloadData, to be called by // the delegate to receive transformed audio. ChannelReceiveFrameTransformerDelegate::ReceiveFrameCallback receive_audio_callback = [this](rtc::ArrayView packet, const RTPHeader& header) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); OnReceivedPayloadData(packet, header); }; frame_transformer_delegate_ = - new rtc::RefCountedObject( + rtc::make_ref_counted( std::move(receive_audio_callback), std::move(frame_transformer), - rtc::Thread::Current()); + worker_thread_); frame_transformer_delegate_->Init(); } @@ -359,7 +406,7 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo( // scaling/panning, as that applies to the mix operation. // External recipients of the audio (e.g. via AudioTrack), will do their // own mixing/dynamic processing. - rtc::CritScope cs(&_callbackCritSect); + MutexLock lock(&callback_mutex_); if (audio_sink_) { AudioSinkInterface::Data data( audio_frame->data(), audio_frame->samples_per_channel_, @@ -371,7 +418,7 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo( float output_gain = 1.0f; { - rtc::CritScope cs(&volume_settings_critsect_); + MutexLock lock(&volume_settings_mutex_); output_gain = _outputGain; } @@ -403,7 +450,7 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo( (GetRtpTimestampRateHz() / 1000); { - rtc::CritScope lock(&ts_stats_lock_); + MutexLock lock(&ts_stats_lock_); // Compute ntp time. audio_frame->ntp_time_ms_ = ntp_estimator_.Estimate(audio_frame->timestamp_); @@ -417,17 +464,37 @@ AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo( } } - { - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs", - acm_receiver_.TargetDelayMs()); - const int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs(); - rtc::CritScope lock(&video_sync_lock_); - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs", - jitter_buffer_delay + playout_delay_ms_); - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs", - jitter_buffer_delay); - RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs", - playout_delay_ms_); + // Fill in local capture clock offset in |audio_frame->packet_infos_|. + RtpPacketInfos::vector_type packet_infos; + for (auto& packet_info : audio_frame->packet_infos_) { + absl::optional local_capture_clock_offset; + if (packet_info.absolute_capture_time().has_value()) { + local_capture_clock_offset = + capture_clock_offset_updater_.AdjustEstimatedCaptureClockOffset( + packet_info.absolute_capture_time() + ->estimated_capture_clock_offset); + } + RtpPacketInfo new_packet_info(packet_info); + new_packet_info.set_local_capture_clock_offset(local_capture_clock_offset); + packet_infos.push_back(std::move(new_packet_info)); + } + audio_frame->packet_infos_ = RtpPacketInfos(packet_infos); + + ++audio_frame_interval_count_; + if (audio_frame_interval_count_ >= kHistogramReportingInterval) { + audio_frame_interval_count_ = 0; + worker_thread_->PostTask(ToQueuedTask(worker_safety_, [this]() { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.TargetJitterBufferDelayMs", + acm_receiver_.TargetDelayMs()); + const int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs(); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDelayEstimateMs", + jitter_buffer_delay + playout_delay_ms_); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverJitterBufferDelayMs", + jitter_buffer_delay); + RTC_HISTOGRAM_COUNTS_1000("WebRTC.Audio.ReceiverDeviceDelayMs", + playout_delay_ms_); + })); } return muted ? AudioMixer::Source::AudioFrameInfo::kMuted @@ -441,9 +508,12 @@ int ChannelReceive::PreferredSampleRate() const { acm_receiver_.last_output_sample_rate_hz()); } +void ChannelReceive::SetSourceTracker(SourceTracker* source_tracker) { + source_tracker_ = source_tracker; +} + ChannelReceive::ChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, @@ -459,7 +529,8 @@ ChannelReceive::ChannelReceive( rtc::scoped_refptr frame_decryptor, const webrtc::CryptoOptions& crypto_options, rtc::scoped_refptr frame_transformer) - : event_log_(rtc_event_log), + : worker_thread_(TaskQueueBase::Current()), + event_log_(rtc_event_log), rtp_receive_statistics_(ReceiveStatistics::Create(clock)), remote_ssrc_(remote_ssrc), acm_receiver_(AcmConfig(neteq_factory, @@ -468,25 +539,23 @@ ChannelReceive::ChannelReceive( jitter_buffer_max_packets, jitter_buffer_fast_playout)), _outputAudioLevel(), + clock_(clock), ntp_estimator_(clock), playout_timestamp_rtp_(0), playout_delay_ms_(0), rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), capture_start_rtp_time_stamp_(-1), capture_start_ntp_time_ms_(-1), - _moduleProcessThreadPtr(module_process_thread), _audioDeviceModulePtr(audio_device_module), _outputGain(1.0f), associated_send_channel_(nullptr), frame_decryptor_(frame_decryptor), crypto_options_(crypto_options), - absolute_capture_time_receiver_(clock) { - // TODO(nisse): Use _moduleProcessThreadPtr instead? - module_process_thread_checker_.Detach(); - - RTC_DCHECK(module_process_thread); + absolute_capture_time_interpolator_(clock) { RTC_DCHECK(audio_device_module); + network_thread_checker_.Detach(); + acm_receiver_.ResetInitialDelay(); acm_receiver_.SetMinimumDelay(0); acm_receiver_.SetMaximumDelay(0); @@ -495,7 +564,7 @@ ChannelReceive::ChannelReceive( _outputAudioLevel.ResetLevelFullRange(); rtp_receive_statistics_->EnableRetransmitDetection(remote_ssrc_, true); - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.clock = clock; configuration.audio = true; configuration.receiver_only = true; @@ -503,61 +572,55 @@ ChannelReceive::ChannelReceive( configuration.receive_statistics = rtp_receive_statistics_.get(); configuration.event_log = event_log_; configuration.local_media_ssrc = local_ssrc; + configuration.rtcp_packet_type_counter_observer = this; if (frame_transformer) InitFrameTransformerDelegate(std::move(frame_transformer)); - _rtpRtcpModule = RtpRtcp::Create(configuration); - _rtpRtcpModule->SetSendingMediaStatus(false); - _rtpRtcpModule->SetRemoteSSRC(remote_ssrc_); - - _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE); + rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(configuration); + rtp_rtcp_->SetSendingMediaStatus(false); + rtp_rtcp_->SetRemoteSSRC(remote_ssrc_); // Ensure that RTCP is enabled for the created channel. - _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound); + rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); } ChannelReceive::~ChannelReceive() { - RTC_DCHECK(construction_thread_.IsCurrent()); + RTC_DCHECK_RUN_ON(&construction_thread_); // Resets the delegate's callback to ChannelReceive::OnReceivedPayloadData. if (frame_transformer_delegate_) frame_transformer_delegate_->Reset(); StopPlayout(); - - if (_moduleProcessThreadPtr) - _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); } void ChannelReceive::SetSink(AudioSinkInterface* sink) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope cs(&_callbackCritSect); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + MutexLock lock(&callback_mutex_); audio_sink_ = sink; } void ChannelReceive::StartPlayout() { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope lock(&playing_lock_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); playing_ = true; } void ChannelReceive::StopPlayout() { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope lock(&playing_lock_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); playing_ = false; _outputAudioLevel.ResetLevelFullRange(); } absl::optional> ChannelReceive::GetReceiveCodec() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); return acm_receiver_.LastDecoder(); } void ChannelReceive::SetReceiveCodecs( const std::map& codecs) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); for (const auto& kv : codecs) { RTC_DCHECK_GE(kv.second.clockrate_hz, 1000); payload_type_frequencies_[kv.first] = kv.second.clockrate_hz; @@ -565,15 +628,15 @@ void ChannelReceive::SetReceiveCodecs( acm_receiver_.SetCodecs(codecs); } -// May be called on either worker thread or network thread. void ChannelReceive::OnRtpPacket(const RtpPacketReceived& packet) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + // TODO(bugs.webrtc.org/11993): Expect to be called exclusively on the + // network thread. Once that's done, the same applies to + // UpdatePlayoutTimestamp and int64_t now_ms = rtc::TimeMillis(); - { - rtc::CritScope cs(&sync_info_lock_); - last_received_rtp_timestamp_ = packet.Timestamp(); - last_received_rtp_system_time_ms_ = now_ms; - } + last_received_rtp_timestamp_ = packet.Timestamp(); + last_received_rtp_system_time_ms_ = now_ms; // Store playout timestamp for the received RTP packet UpdatePlayoutTimestamp(false, now_ms); @@ -592,9 +655,9 @@ void ChannelReceive::OnRtpPacket(const RtpPacketReceived& packet) { // Interpolates absolute capture timestamp RTP header extension. header.extension.absolute_capture_time = - absolute_capture_time_receiver_.OnReceivePacket( - AbsoluteCaptureTimeReceiver::GetSource(header.ssrc, - header.arrOfCSRCs), + absolute_capture_time_interpolator_.OnReceivePacket( + AbsoluteCaptureTimeInterpolator::GetSource(header.ssrc, + header.arrOfCSRCs), header.timestamp, rtc::saturated_cast(packet_copy.payload_type_frequency()), header.extension.absolute_capture_time); @@ -606,7 +669,7 @@ void ChannelReceive::ReceivePacket(const uint8_t* packet, size_t packet_length, const RTPHeader& header) { const uint8_t* payload = packet + header.headerLength; - assert(packet_length >= header.headerLength); + RTC_DCHECK_GE(packet_length, header.headerLength); size_t payload_length = packet_length - header.headerLength; size_t payload_data_length = payload_length - header.paddingLength; @@ -653,13 +716,16 @@ void ChannelReceive::ReceivePacket(const uint8_t* packet, } } -// May be called on either worker thread or network thread. void ChannelReceive::ReceivedRTCPPacket(const uint8_t* data, size_t length) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + // TODO(bugs.webrtc.org/11993): Expect to be called exclusively on the + // network thread. + // Store playout timestamp for the received RTCP packet UpdatePlayoutTimestamp(true, rtc::TimeMillis()); // Deliver RTCP packet to RTP/RTCP module for parsing - _rtpRtcpModule->IncomingRtcpPacket(data, length); + rtp_rtcp_->IncomingRtcpPacket(data, length); int64_t rtt = GetRTT(); if (rtt == 0) { @@ -670,63 +736,70 @@ void ChannelReceive::ReceivedRTCPPacket(const uint8_t* data, size_t length) { uint32_t ntp_secs = 0; uint32_t ntp_frac = 0; uint32_t rtp_timestamp = 0; - if (0 != _rtpRtcpModule->RemoteNTP(&ntp_secs, &ntp_frac, NULL, NULL, - &rtp_timestamp)) { + if (rtp_rtcp_->RemoteNTP(&ntp_secs, &ntp_frac, + /*rtcp_arrival_time_secs=*/nullptr, + /*rtcp_arrival_time_frac=*/nullptr, + &rtp_timestamp) != 0) { // Waiting for RTCP. return; } { - rtc::CritScope lock(&ts_stats_lock_); + MutexLock lock(&ts_stats_lock_); ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp); + absl::optional remote_to_local_clock_offset_ms = + ntp_estimator_.EstimateRemoteToLocalClockOffsetMs(); + if (remote_to_local_clock_offset_ms.has_value()) { + capture_clock_offset_updater_.SetRemoteToLocalClockOffset( + Int64MsToQ32x32(*remote_to_local_clock_offset_ms)); + } } } int ChannelReceive::GetSpeechOutputLevelFullRange() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); return _outputAudioLevel.LevelFullRange(); } double ChannelReceive::GetTotalOutputEnergy() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); return _outputAudioLevel.TotalEnergy(); } double ChannelReceive::GetTotalOutputDuration() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); return _outputAudioLevel.TotalDuration(); } void ChannelReceive::SetChannelOutputVolumeScaling(float scaling) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope cs(&volume_settings_critsect_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + MutexLock lock(&volume_settings_mutex_); _outputGain = scaling; } void ChannelReceive::RegisterReceiverCongestionControlObjects( PacketRouter* packet_router) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); RTC_DCHECK(packet_router); RTC_DCHECK(!packet_router_); constexpr bool remb_candidate = false; - packet_router->AddReceiveRtpModule(_rtpRtcpModule.get(), remb_candidate); + packet_router->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate); packet_router_ = packet_router; } void ChannelReceive::ResetReceiverCongestionControlObjects() { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); RTC_DCHECK(packet_router_); - packet_router_->RemoveReceiveRtpModule(_rtpRtcpModule.get()); + packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get()); packet_router_ = nullptr; } CallReceiveStatistics ChannelReceive::GetRTCPStatistics() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - // --- RtcpStatistics + RTC_DCHECK_RUN_ON(&worker_thread_checker_); CallReceiveStatistics stats; - // The jitter statistics is updated for each received RTP packet and is - // based on received packets. + // The jitter statistics is updated for each received RTP packet and is based + // on received packets. RtpReceiveStats rtp_stats; StreamStatistician* statistician = rtp_receive_statistics_->GetStatistician(remote_ssrc_); @@ -737,10 +810,9 @@ CallReceiveStatistics ChannelReceive::GetRTCPStatistics() const { stats.cumulativeLost = rtp_stats.packets_lost; stats.jitterSamples = rtp_stats.jitter; - // --- RTT stats.rttMs = GetRTT(); - // --- Data counters + // Data counters. if (statistician) { stats.payload_bytes_rcvd = rtp_stats.packet_counter.payload_bytes; @@ -757,16 +829,38 @@ CallReceiveStatistics ChannelReceive::GetRTCPStatistics() const { stats.last_packet_received_timestamp_ms = absl::nullopt; } - // --- Timestamps { - rtc::CritScope lock(&ts_stats_lock_); + MutexLock lock(&rtcp_counter_mutex_); + stats.nacks_sent = rtcp_packet_type_counter_.nack_packets; + } + + // Timestamps. + { + MutexLock lock(&ts_stats_lock_); stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_; } + + absl::optional rtcp_sr_stats = + rtp_rtcp_->GetSenderReportStats(); + if (rtcp_sr_stats.has_value()) { + // Number of seconds since 1900 January 1 00:00 GMT (see + // https://tools.ietf.org/html/rfc868). + constexpr int64_t kNtpJan1970Millisecs = + 2208988800 * rtc::kNumMillisecsPerSec; + stats.last_sender_report_timestamp_ms = + rtcp_sr_stats->last_arrival_timestamp.ToMs() - kNtpJan1970Millisecs; + stats.last_sender_report_remote_timestamp_ms = + rtcp_sr_stats->last_remote_timestamp.ToMs() - kNtpJan1970Millisecs; + stats.sender_reports_packets_sent = rtcp_sr_stats->packets_sent; + stats.sender_reports_bytes_sent = rtcp_sr_stats->bytes_sent; + stats.sender_reports_reports_count = rtcp_sr_stats->reports_count; + } + return stats; } void ChannelReceive::SetNACKStatus(bool enable, int max_packets) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); // None of these functions can fail. if (enable) { rtp_receive_statistics_->SetMaxReorderingThreshold(max_packets); @@ -781,49 +875,83 @@ void ChannelReceive::SetNACKStatus(bool enable, int max_packets) { // Called when we are missing one or more packets. int ChannelReceive::ResendPackets(const uint16_t* sequence_numbers, int length) { - return _rtpRtcpModule->SendNACK(sequence_numbers, length); + return rtp_rtcp_->SendNACK(sequence_numbers, length); +} + +void ChannelReceive::RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) { + if (ssrc != remote_ssrc_) { + return; + } + MutexLock lock(&rtcp_counter_mutex_); + rtcp_packet_type_counter_ = packet_counter; } void ChannelReceive::SetAssociatedSendChannel( const ChannelSendInterface* channel) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope lock(&assoc_send_channel_lock_); + RTC_DCHECK_RUN_ON(&network_thread_checker_); associated_send_channel_ = channel; } void ChannelReceive::SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); // Depending on when the channel is created, the transformer might be set // twice. Don't replace the delegate if it was already initialized. - if (!frame_transformer || frame_transformer_delegate_) + if (!frame_transformer || frame_transformer_delegate_) { + RTC_NOTREACHED() << "Not setting the transformer?"; return; + } + InitFrameTransformerDelegate(std::move(frame_transformer)); } -NetworkStatistics ChannelReceive::GetNetworkStatistics() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); +void ChannelReceive::SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + frame_decryptor_ = std::move(frame_decryptor); +} + +void ChannelReceive::OnLocalSsrcChange(uint32_t local_ssrc) { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + rtp_rtcp_->SetLocalSsrc(local_ssrc); +} + +uint32_t ChannelReceive::GetLocalSsrc() const { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + return rtp_rtcp_->local_media_ssrc(); +} + +NetworkStatistics ChannelReceive::GetNetworkStatistics( + bool get_and_clear_legacy_stats) const { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); NetworkStatistics stats; - acm_receiver_.GetNetworkStatistics(&stats); + acm_receiver_.GetNetworkStatistics(&stats, get_and_clear_legacy_stats); return stats; } AudioDecodingCallStats ChannelReceive::GetDecodingCallStatistics() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); AudioDecodingCallStats stats; acm_receiver_.GetDecodingCallStatistics(&stats); return stats; } uint32_t ChannelReceive::GetDelayEstimate() const { - RTC_DCHECK(worker_thread_checker_.IsCurrent() || - module_process_thread_checker_.IsCurrent()); - rtc::CritScope lock(&video_sync_lock_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + // Return the current jitter buffer delay + playout delay. return acm_receiver_.FilteredCurrentDelayMs() + playout_delay_ms_; } -void ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) { - RTC_DCHECK(module_process_thread_checker_.IsCurrent()); +bool ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) { + // TODO(bugs.webrtc.org/11993): This should run on the network thread. + // We get here via RtpStreamsSynchronizer. Once that's done, many (all?) of + // these locks aren't needed. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); // Limit to range accepted by both VoE and ACM, so we're at least getting as // close as possible, instead of failing. delay_ms = rtc::SafeClamp(delay_ms, kVoiceEngineMinMinPlayoutDelayMs, @@ -831,34 +959,31 @@ void ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) { if (acm_receiver_.SetMinimumDelay(delay_ms) != 0) { RTC_DLOG(LS_ERROR) << "SetMinimumPlayoutDelay() failed to set min playout delay"; + return false; } + return true; } bool ChannelReceive::GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp, int64_t* time_ms) const { - RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_); - { - rtc::CritScope lock(&video_sync_lock_); - if (!playout_timestamp_rtp_time_ms_) - return false; - *rtp_timestamp = playout_timestamp_rtp_; - *time_ms = playout_timestamp_rtp_time_ms_.value(); - return true; - } + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + if (!playout_timestamp_rtp_time_ms_) + return false; + *rtp_timestamp = playout_timestamp_rtp_; + *time_ms = playout_timestamp_rtp_time_ms_.value(); + return true; } void ChannelReceive::SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, int64_t time_ms) { - RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_); - rtc::CritScope lock(&video_sync_lock_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); playout_timestamp_ntp_ = ntp_timestamp_ms; playout_timestamp_ntp_time_ms_ = time_ms; } absl::optional ChannelReceive::GetCurrentEstimatedPlayoutNtpTimestampMs(int64_t now_ms) const { - RTC_DCHECK(worker_thread_checker_.IsCurrent()); - rtc::CritScope lock(&video_sync_lock_); + RTC_DCHECK_RUN_ON(&worker_thread_checker_); if (!playout_timestamp_ntp_ || !playout_timestamp_ntp_time_ms_) return absl::nullopt; @@ -875,25 +1000,36 @@ int ChannelReceive::GetBaseMinimumPlayoutDelayMs() const { } absl::optional ChannelReceive::GetSyncInfo() const { - RTC_DCHECK(module_process_thread_checker_.IsCurrent()); + // TODO(bugs.webrtc.org/11993): This should run on the network thread. + // We get here via RtpStreamsSynchronizer. Once that's done, many of + // these locks aren't needed. + RTC_DCHECK_RUN_ON(&worker_thread_checker_); Syncable::Info info; - if (_rtpRtcpModule->RemoteNTP(&info.capture_time_ntp_secs, - &info.capture_time_ntp_frac, nullptr, nullptr, - &info.capture_time_source_clock) != 0) { + if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs, + &info.capture_time_ntp_frac, + /*rtcp_arrival_time_secs=*/nullptr, + /*rtcp_arrival_time_frac=*/nullptr, + &info.capture_time_source_clock) != 0) { return absl::nullopt; } - { - rtc::CritScope cs(&sync_info_lock_); - if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) { - return absl::nullopt; - } - info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; - info.latest_receive_time_ms = *last_received_rtp_system_time_ms_; + + if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) { + return absl::nullopt; } + info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; + info.latest_receive_time_ms = *last_received_rtp_system_time_ms_; + + int jitter_buffer_delay = acm_receiver_.FilteredCurrentDelayMs(); + info.current_delay_ms = jitter_buffer_delay + playout_delay_ms_; + return info; } +// RTC_RUN_ON(worker_thread_checker_) void ChannelReceive::UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms) { + // TODO(bugs.webrtc.org/11993): Expect to be called exclusively on the + // network thread. Once that's done, we won't need video_sync_lock_. + jitter_buffer_playout_timestamp_ = acm_receiver_.GetPlayoutTimestamp(); if (!jitter_buffer_playout_timestamp_) { @@ -916,14 +1052,11 @@ void ChannelReceive::UpdatePlayoutTimestamp(bool rtcp, int64_t now_ms) { // Remove the playout delay. playout_timestamp -= (delay_ms * (GetRtpTimestampRateHz() / 1000)); - { - rtc::CritScope lock(&video_sync_lock_); - if (!rtcp && playout_timestamp != playout_timestamp_rtp_) { - playout_timestamp_rtp_ = playout_timestamp; - playout_timestamp_rtp_time_ms_ = now_ms; - } - playout_delay_ms_ = delay_ms; + if (!rtcp && playout_timestamp != playout_timestamp_rtp_) { + playout_timestamp_rtp_ = playout_timestamp; + playout_timestamp_rtp_time_ms_ = now_ms; } + playout_delay_ms_ = delay_ms; } int ChannelReceive::GetRtpTimestampRateHz() const { @@ -941,38 +1074,32 @@ int ChannelReceive::GetRtpTimestampRateHz() const { } int64_t ChannelReceive::GetRTT() const { - std::vector report_blocks; - _rtpRtcpModule->RemoteRTCPStat(&report_blocks); + RTC_DCHECK_RUN_ON(&network_thread_checker_); + std::vector report_blocks = + rtp_rtcp_->GetLatestReportBlockData(); - // TODO(nisse): Could we check the return value from the ->RTT() call below, - // instead of checking if we have any report blocks? if (report_blocks.empty()) { - rtc::CritScope lock(&assoc_send_channel_lock_); - // Tries to get RTT from an associated channel. + // Try fall back on an RTT from an associated channel. if (!associated_send_channel_) { return 0; } return associated_send_channel_->GetRTT(); } - int64_t rtt = 0; - int64_t avg_rtt = 0; - int64_t max_rtt = 0; - int64_t min_rtt = 0; // TODO(nisse): This method computes RTT based on sender reports, even though // a receive stream is not supposed to do that. - if (_rtpRtcpModule->RTT(remote_ssrc_, &rtt, &avg_rtt, &min_rtt, &max_rtt) != - 0) { - return 0; + for (const ReportBlockData& data : report_blocks) { + if (data.report_block().sender_ssrc == remote_ssrc_) { + return data.last_rtt_ms(); + } } - return rtt; + return 0; } } // namespace std::unique_ptr CreateChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, @@ -989,12 +1116,11 @@ std::unique_ptr CreateChannelReceive( const webrtc::CryptoOptions& crypto_options, rtc::scoped_refptr frame_transformer) { return std::make_unique( - clock, module_process_thread, neteq_factory, audio_device_module, - rtcp_send_transport, rtc_event_log, local_ssrc, remote_ssrc, - jitter_buffer_max_packets, jitter_buffer_fast_playout, - jitter_buffer_min_delay_ms, jitter_buffer_enable_rtx_handling, - decoder_factory, codec_pair_id, frame_decryptor, crypto_options, - std::move(frame_transformer)); + clock, neteq_factory, audio_device_module, rtcp_send_transport, + rtc_event_log, local_ssrc, remote_ssrc, jitter_buffer_max_packets, + jitter_buffer_fast_playout, jitter_buffer_min_delay_ms, + jitter_buffer_enable_rtx_handling, decoder_factory, codec_pair_id, + std::move(frame_decryptor), crypto_options, std::move(frame_transformer)); } } // namespace voe diff --git a/audio/channel_receive.h b/audio/channel_receive.h index bc02ff3023..deec49feaf 100644 --- a/audio/channel_receive.h +++ b/audio/channel_receive.h @@ -28,6 +28,7 @@ #include "call/rtp_packet_sink_interface.h" #include "call/syncable.h" #include "modules/audio_coding/include/audio_coding_module_typedefs.h" +#include "modules/rtp_rtcp/source/source_tracker.h" #include "system_wrappers/include/clock.h" // TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence @@ -43,7 +44,6 @@ namespace webrtc { class AudioDeviceModule; class FrameDecryptorInterface; class PacketRouter; -class ProcessThread; class RateLimiter; class ReceiveStatistics; class RtcEventLog; @@ -57,13 +57,23 @@ struct CallReceiveStatistics { int64_t payload_bytes_rcvd = 0; int64_t header_and_padding_bytes_rcvd = 0; int packetsReceived; - // The capture ntp time (in local timebase) of the first played out audio + uint32_t nacks_sent = 0; + // The capture NTP time (in local timebase) of the first played out audio // frame. int64_t capture_start_ntp_time_ms_; // The timestamp at which the last packet was received, i.e. the time of the // local clock when it was received - not the RTP timestamp of that packet. // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-lastpacketreceivedtimestamp absl::optional last_packet_received_timestamp_ms; + // Remote outbound stats derived by the received RTCP sender reports. + // Note that the timestamps below correspond to the time elapsed since the + // Unix epoch. + // https://w3c.github.io/webrtc-stats/#remoteoutboundrtpstats-dict* + absl::optional last_sender_report_timestamp_ms; + absl::optional last_sender_report_remote_timestamp_ms; + uint32_t sender_reports_packets_sent = 0; + uint64_t sender_reports_bytes_sent = 0; + uint64_t sender_reports_reports_count = 0; }; namespace voe { @@ -99,12 +109,13 @@ class ChannelReceiveInterface : public RtpPacketSinkInterface { virtual double GetTotalOutputDuration() const = 0; // Stats. - virtual NetworkStatistics GetNetworkStatistics() const = 0; + virtual NetworkStatistics GetNetworkStatistics( + bool get_and_clear_legacy_stats) const = 0; virtual AudioDecodingCallStats GetDecodingCallStatistics() const = 0; // Audio+Video Sync. virtual uint32_t GetDelayEstimate() const = 0; - virtual void SetMinimumPlayoutDelay(int delay_ms) = 0; + virtual bool SetMinimumPlayoutDelay(int delay_ms) = 0; virtual bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp, int64_t* time_ms) const = 0; virtual void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, @@ -134,6 +145,10 @@ class ChannelReceiveInterface : public RtpPacketSinkInterface { virtual int PreferredSampleRate() const = 0; + // Sets the source tracker to notify about "delivered" packets when output is + // muted. + virtual void SetSourceTracker(SourceTracker* source_tracker) = 0; + // Associate to a send channel. // Used for obtaining RTT for a receive-only channel. virtual void SetAssociatedSendChannel( @@ -144,11 +159,16 @@ class ChannelReceiveInterface : public RtpPacketSinkInterface { virtual void SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) = 0; + + virtual void SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) = 0; + + virtual void OnLocalSsrcChange(uint32_t local_ssrc) = 0; + virtual uint32_t GetLocalSsrc() const = 0; }; std::unique_ptr CreateChannelReceive( Clock* clock, - ProcessThread* module_process_thread, NetEqFactory* neteq_factory, AudioDeviceModule* audio_device_module, Transport* rtcp_send_transport, diff --git a/audio/channel_receive_frame_transformer_delegate.cc b/audio/channel_receive_frame_transformer_delegate.cc index 261afbb100..7e617df780 100644 --- a/audio/channel_receive_frame_transformer_delegate.cc +++ b/audio/channel_receive_frame_transformer_delegate.cc @@ -47,7 +47,7 @@ class TransformableAudioFrame : public TransformableAudioFrameInterface { ChannelReceiveFrameTransformerDelegate::ChannelReceiveFrameTransformerDelegate( ReceiveFrameCallback receive_frame_callback, rtc::scoped_refptr frame_transformer, - rtc::Thread* channel_receive_thread) + TaskQueueBase* channel_receive_thread) : receive_frame_callback_(receive_frame_callback), frame_transformer_(std::move(frame_transformer)), channel_receive_thread_(channel_receive_thread) {} diff --git a/audio/channel_receive_frame_transformer_delegate.h b/audio/channel_receive_frame_transformer_delegate.h index 73112d10e3..f59834d24e 100644 --- a/audio/channel_receive_frame_transformer_delegate.h +++ b/audio/channel_receive_frame_transformer_delegate.h @@ -14,7 +14,8 @@ #include #include "api/frame_transformer_interface.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "api/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" #include "rtc_base/thread.h" @@ -31,7 +32,7 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback { ChannelReceiveFrameTransformerDelegate( ReceiveFrameCallback receive_frame_callback, rtc::scoped_refptr frame_transformer, - rtc::Thread* channel_receive_thread); + TaskQueueBase* channel_receive_thread); // Registers |this| as callback for |frame_transformer_|, to get the // transformed frames. @@ -61,12 +62,12 @@ class ChannelReceiveFrameTransformerDelegate : public TransformedFrameCallback { ~ChannelReceiveFrameTransformerDelegate() override = default; private: - SequenceChecker sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; ReceiveFrameCallback receive_frame_callback_ RTC_GUARDED_BY(sequence_checker_); rtc::scoped_refptr frame_transformer_ RTC_GUARDED_BY(sequence_checker_); - rtc::Thread* channel_receive_thread_; + TaskQueueBase* const channel_receive_thread_; }; } // namespace webrtc diff --git a/audio/channel_receive_frame_transformer_delegate_unittest.cc b/audio/channel_receive_frame_transformer_delegate_unittest.cc index e7f5a454b8..01aac45b24 100644 --- a/audio/channel_receive_frame_transformer_delegate_unittest.cc +++ b/audio/channel_receive_frame_transformer_delegate_unittest.cc @@ -41,9 +41,9 @@ class MockChannelReceive { TEST(ChannelReceiveFrameTransformerDelegateTest, RegisterTransformedFrameCallbackOnInit) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( ChannelReceiveFrameTransformerDelegate::ReceiveFrameCallback(), mock_frame_transformer, nullptr); EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback); @@ -55,9 +55,9 @@ TEST(ChannelReceiveFrameTransformerDelegateTest, TEST(ChannelReceiveFrameTransformerDelegateTest, UnregisterTransformedFrameCallbackOnReset) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( ChannelReceiveFrameTransformerDelegate::ReceiveFrameCallback(), mock_frame_transformer, nullptr); EXPECT_CALL(*mock_frame_transformer, UnregisterTransformedFrameCallback); @@ -69,10 +69,10 @@ TEST(ChannelReceiveFrameTransformerDelegateTest, TEST(ChannelReceiveFrameTransformerDelegateTest, TransformRunsChannelReceiveCallback) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); + rtc::make_ref_counted>(); MockChannelReceive mock_channel; rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( mock_channel.callback(), mock_frame_transformer, rtc::Thread::Current()); rtc::scoped_refptr callback; @@ -100,10 +100,10 @@ TEST(ChannelReceiveFrameTransformerDelegateTest, TEST(ChannelReceiveFrameTransformerDelegateTest, OnTransformedDoesNotRunChannelReceiveCallbackAfterReset) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); + rtc::make_ref_counted>(); MockChannelReceive mock_channel; rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( mock_channel.callback(), mock_frame_transformer, rtc::Thread::Current()); diff --git a/audio/channel_send.cc b/audio/channel_send.cc index 3387f271ba..06e9238ce8 100644 --- a/audio/channel_send.cc +++ b/audio/channel_send.cc @@ -21,6 +21,7 @@ #include "api/call/transport.h" #include "api/crypto/frame_encryptor_interface.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/sequence_checker.h" #include "audio/channel_send_frame_transformer_delegate.h" #include "audio/utility/audio_frame_operations.h" #include "call/rtp_transport_controller_send_interface.h" @@ -29,6 +30,7 @@ #include "modules/audio_coding/include/audio_coding_module.h" #include "modules/audio_processing/rms_level.h" #include "modules/pacing/packet_router.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/utility/include/process_thread.h" #include "rtc_base/checks.h" #include "rtc_base/event.h" @@ -38,8 +40,8 @@ #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/race_checker.h" #include "rtc_base/rate_limiter.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/clock.h" #include "system_wrappers/include/field_trial.h" @@ -54,13 +56,13 @@ constexpr int64_t kMaxRetransmissionWindowMs = 1000; constexpr int64_t kMinRetransmissionWindowMs = 30; class RtpPacketSenderProxy; -class TransportFeedbackProxy; class TransportSequenceNumberProxy; class VoERtcpObserver; class ChannelSend : public ChannelSendInterface, - public AudioPacketizationCallback { // receive encoded - // packets from the ACM + public AudioPacketizationCallback, // receive encoded + // packets from the ACM + public RtcpPacketTypeCounterObserver { public: // TODO(nisse): Make OnUplinkPacketLossRate public, and delete friend // declaration. @@ -68,7 +70,6 @@ class ChannelSend : public ChannelSendInterface, ChannelSend(Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -77,7 +78,8 @@ class ChannelSend : public ChannelSendInterface, bool extmap_allow_mixed, int rtcp_report_interval_ms, uint32_t ssrc, - rtc::scoped_refptr frame_transformer); + rtc::scoped_refptr frame_transformer, + TransportFeedbackObserver* feedback_observer); ~ChannelSend() override; @@ -106,7 +108,7 @@ class ChannelSend : public ChannelSendInterface, ANAStats GetANAStatistics() const override; // Used by AudioSendStream. - RtpRtcp* GetRtpRtcp() const override; + RtpRtcpInterface* GetRtpRtcp() const override; void RegisterCngPayloadType(int payload_type, int payload_frequency) override; @@ -149,6 +151,11 @@ class ChannelSend : public ChannelSendInterface, rtc::scoped_refptr frame_transformer) override; + // RtcpPacketTypeCounterObserver. + void RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) override; + private: // From AudioPacketizationCallback in the ACM int32_t SendData(AudioFrameType frameType, @@ -177,30 +184,29 @@ class ChannelSend : public ChannelSendInterface, // specific threads we know about. The goal is to eventually split up // voe::Channel into parts with single-threaded semantics, and thereby reduce // the need for locks. - rtc::ThreadChecker worker_thread_checker_; - rtc::ThreadChecker module_process_thread_checker_; + SequenceChecker worker_thread_checker_; // Methods accessed from audio and video threads are checked for sequential- // only access. We don't necessarily own and control these threads, so thread // checkers cannot be used. E.g. Chromium may transfer "ownership" from one // audio thread to another, but access is still sequential. rtc::RaceChecker audio_thread_race_checker_; - rtc::CriticalSection volume_settings_critsect_; + mutable Mutex volume_settings_mutex_; + const uint32_t ssrc_; bool sending_ RTC_GUARDED_BY(&worker_thread_checker_) = false; RtcEventLog* const event_log_; - std::unique_ptr _rtpRtcpModule; + std::unique_ptr rtp_rtcp_; std::unique_ptr rtp_sender_audio_; std::unique_ptr audio_coding_; uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_); // uses - ProcessThread* const _moduleProcessThreadPtr; RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_); - bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_); + bool input_mute_ RTC_GUARDED_BY(volume_settings_mutex_); bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_); // VoeRTP_RTCP // TODO(henrika): can today be accessed on the main thread and on the @@ -212,12 +218,11 @@ class ChannelSend : public ChannelSendInterface, PacketRouter* packet_router_ RTC_GUARDED_BY(&worker_thread_checker_) = nullptr; - const std::unique_ptr feedback_observer_proxy_; + TransportFeedbackObserver* const feedback_observer_; const std::unique_ptr rtp_packet_pacer_proxy_; const std::unique_ptr retransmission_rate_limiter_; - rtc::ThreadChecker construction_thread_; - + SequenceChecker construction_thread_; bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_) = false; @@ -233,73 +238,42 @@ class ChannelSend : public ChannelSendInterface, rtc::scoped_refptr frame_transformer_delegate_ RTC_GUARDED_BY(encoder_queue_); - rtc::CriticalSection bitrate_crit_section_; - int configured_bitrate_bps_ RTC_GUARDED_BY(bitrate_crit_section_) = 0; + mutable Mutex bitrate_mutex_; + int configured_bitrate_bps_ RTC_GUARDED_BY(bitrate_mutex_) = 0; // Defined last to ensure that there are no running tasks when the other // members are destroyed. rtc::TaskQueue encoder_queue_; -}; - -const int kTelephoneEventAttenuationdB = 10; -class TransportFeedbackProxy : public TransportFeedbackObserver { - public: - TransportFeedbackProxy() : feedback_observer_(nullptr) { - pacer_thread_.Detach(); - network_thread_.Detach(); - } - - void SetTransportFeedbackObserver( - TransportFeedbackObserver* feedback_observer) { - RTC_DCHECK(thread_checker_.IsCurrent()); - rtc::CritScope lock(&crit_); - feedback_observer_ = feedback_observer; - } + const bool fixing_timestamp_stall_; - // Implements TransportFeedbackObserver. - void OnAddPacket(const RtpPacketSendInfo& packet_info) override { - RTC_DCHECK(pacer_thread_.IsCurrent()); - rtc::CritScope lock(&crit_); - if (feedback_observer_) - feedback_observer_->OnAddPacket(packet_info); - } - - void OnTransportFeedback(const rtcp::TransportFeedback& feedback) override { - RTC_DCHECK(network_thread_.IsCurrent()); - rtc::CritScope lock(&crit_); - if (feedback_observer_) - feedback_observer_->OnTransportFeedback(feedback); - } - - private: - rtc::CriticalSection crit_; - rtc::ThreadChecker thread_checker_; - rtc::ThreadChecker pacer_thread_; - rtc::ThreadChecker network_thread_; - TransportFeedbackObserver* feedback_observer_ RTC_GUARDED_BY(&crit_); + mutable Mutex rtcp_counter_mutex_; + RtcpPacketTypeCounter rtcp_packet_type_counter_ + RTC_GUARDED_BY(rtcp_counter_mutex_); }; +const int kTelephoneEventAttenuationdB = 10; + class RtpPacketSenderProxy : public RtpPacketSender { public: RtpPacketSenderProxy() : rtp_packet_pacer_(nullptr) {} void SetPacketPacer(RtpPacketSender* rtp_packet_pacer) { RTC_DCHECK(thread_checker_.IsCurrent()); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); rtp_packet_pacer_ = rtp_packet_pacer; } void EnqueuePackets( std::vector> packets) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); rtp_packet_pacer_->EnqueuePackets(std::move(packets)); } private: - rtc::ThreadChecker thread_checker_; - rtc::CriticalSection crit_; - RtpPacketSender* rtp_packet_pacer_ RTC_GUARDED_BY(&crit_); + SequenceChecker thread_checker_; + Mutex mutex_; + RtpPacketSender* rtp_packet_pacer_ RTC_GUARDED_BY(&mutex_); }; class VoERtcpObserver : public RtcpBandwidthObserver { @@ -309,12 +283,12 @@ class VoERtcpObserver : public RtcpBandwidthObserver { ~VoERtcpObserver() override {} void SetBandwidthObserver(RtcpBandwidthObserver* bandwidth_observer) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); bandwidth_observer_ = bandwidth_observer; } void OnReceivedEstimatedBitrate(uint32_t bitrate) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (bandwidth_observer_) { bandwidth_observer_->OnReceivedEstimatedBitrate(bitrate); } @@ -324,7 +298,7 @@ class VoERtcpObserver : public RtcpBandwidthObserver { int64_t rtt, int64_t now_ms) override { { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (bandwidth_observer_) { bandwidth_observer_->OnReceivedRtcpReceiverReport(report_blocks, rtt, now_ms); @@ -372,8 +346,8 @@ class VoERtcpObserver : public RtcpBandwidthObserver { ChannelSend* owner_; // Maps remote side ssrc to extended highest sequence number received. std::map extended_max_sequence_number_; - rtc::CriticalSection crit_; - RtcpBandwidthObserver* bandwidth_observer_ RTC_GUARDED_BY(crit_); + Mutex mutex_; + RtcpBandwidthObserver* bandwidth_observer_ RTC_GUARDED_BY(mutex_); }; int32_t ChannelSend::SendData(AudioFrameType frameType, @@ -388,9 +362,9 @@ int32_t ChannelSend::SendData(AudioFrameType frameType, // Asynchronously transform the payload before sending it. After the payload // is transformed, the delegate will call SendRtpAudio to send it. frame_transformer_delegate_->Transform( - frameType, payloadType, rtp_timestamp, _rtpRtcpModule->StartTimestamp(), + frameType, payloadType, rtp_timestamp, rtp_rtcp_->StartTimestamp(), payloadData, payloadSize, absolute_capture_timestamp_ms, - _rtpRtcpModule->SSRC()); + rtp_rtcp_->SSRC()); return 0; } return SendRtpAudio(frameType, payloadType, rtp_timestamp, payload, @@ -427,7 +401,7 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, // Encrypt the audio payload into the buffer. size_t bytes_written = 0; int encrypt_status = frame_encryptor_->Encrypt( - cricket::MEDIA_TYPE_AUDIO, _rtpRtcpModule->SSRC(), + cricket::MEDIA_TYPE_AUDIO, rtp_rtcp_->SSRC(), /*additional_data=*/nullptr, payload, encrypted_audio_payload, &bytes_written); if (encrypt_status != 0) { @@ -449,12 +423,12 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, // Push data from ACM to RTP/RTCP-module to deliver audio frame for // packetization. - if (!_rtpRtcpModule->OnSendingRtpFrame(rtp_timestamp, - // Leaving the time when this frame was - // received from the capture device as - // undefined for voice for now. - -1, payloadType, - /*force_sender_report=*/false)) { + if (!rtp_rtcp_->OnSendingRtpFrame(rtp_timestamp, + // Leaving the time when this frame was + // received from the capture device as + // undefined for voice for now. + -1, payloadType, + /*force_sender_report=*/false)) { return -1; } @@ -466,9 +440,8 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, // This call will trigger Transport::SendPacket() from the RTP/RTCP module. if (!rtp_sender_audio_->SendAudio( - frameType, payloadType, - rtp_timestamp + _rtpRtcpModule->StartTimestamp(), payload.data(), - payload.size(), absolute_capture_timestamp_ms)) { + frameType, payloadType, rtp_timestamp + rtp_rtcp_->StartTimestamp(), + payload.data(), payload.size(), absolute_capture_timestamp_ms)) { RTC_DLOG(LS_ERROR) << "ChannelSend::SendData() failed to send data to RTP/RTCP module"; return -1; @@ -480,7 +453,6 @@ int32_t ChannelSend::SendRtpAudio(AudioFrameType frameType, ChannelSend::ChannelSend( Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -489,16 +461,17 @@ ChannelSend::ChannelSend( bool extmap_allow_mixed, int rtcp_report_interval_ms, uint32_t ssrc, - rtc::scoped_refptr frame_transformer) - : event_log_(rtc_event_log), + rtc::scoped_refptr frame_transformer, + TransportFeedbackObserver* feedback_observer) + : ssrc_(ssrc), + event_log_(rtc_event_log), _timeStamp(0), // This is just an offset, RTP module will add it's own // random offset - _moduleProcessThreadPtr(module_process_thread), input_mute_(false), previous_frame_muted_(false), _includeAudioLevelIndication(false), rtcp_observer_(new VoERtcpObserver(this)), - feedback_observer_proxy_(new TransportFeedbackProxy()), + feedback_observer_(feedback_observer), rtp_packet_pacer_proxy_(new RtpPacketSenderProxy()), retransmission_rate_limiter_( new RateLimiter(clock, kMaxRetransmissionWindowMs)), @@ -506,15 +479,14 @@ ChannelSend::ChannelSend( crypto_options_(crypto_options), encoder_queue_(task_queue_factory->CreateTaskQueue( "AudioEncoder", - TaskQueueFactory::Priority::NORMAL)) { - RTC_DCHECK(module_process_thread); - module_process_thread_checker_.Detach(); - + TaskQueueFactory::Priority::NORMAL)), + fixing_timestamp_stall_( + !field_trial::IsDisabled("WebRTC-Audio-FixTimestampStall")) { audio_coding_.reset(AudioCodingModule::Create(AudioCodingModule::Config())); - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.bandwidth_callback = rtcp_observer_.get(); - configuration.transport_feedback_callback = feedback_observer_proxy_.get(); + configuration.transport_feedback_callback = feedback_observer_; configuration.clock = (clock ? clock : Clock::GetRealTimeClock()); configuration.audio = true; configuration.outgoing_transport = rtp_transport; @@ -527,19 +499,18 @@ ChannelSend::ChannelSend( retransmission_rate_limiter_.get(); configuration.extmap_allow_mixed = extmap_allow_mixed; configuration.rtcp_report_interval_ms = rtcp_report_interval_ms; + configuration.rtcp_packet_type_counter_observer = this; configuration.local_media_ssrc = ssrc; - _rtpRtcpModule = RtpRtcp::Create(configuration); - _rtpRtcpModule->SetSendingMediaStatus(false); - - rtp_sender_audio_ = std::make_unique( - configuration.clock, _rtpRtcpModule->RtpSender()); + rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(configuration); + rtp_rtcp_->SetSendingMediaStatus(false); - _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE); + rtp_sender_audio_ = std::make_unique(configuration.clock, + rtp_rtcp_->RtpSender()); // Ensure that RTCP is enabled by default for the created channel. - _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound); + rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); int error = audio_coding_->RegisterTransportCallback(this); RTC_DCHECK_EQ(0, error); @@ -557,9 +528,6 @@ ChannelSend::~ChannelSend() { StopSend(); int error = audio_coding_->RegisterTransportCallback(NULL); RTC_DCHECK_EQ(0, error); - - if (_moduleProcessThreadPtr) - _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); } void ChannelSend::StartSend() { @@ -567,8 +535,8 @@ void ChannelSend::StartSend() { RTC_DCHECK(!sending_); sending_ = true; - _rtpRtcpModule->SetSendingMediaStatus(true); - int ret = _rtpRtcpModule->SetSendingStatus(true); + rtp_rtcp_->SetSendingMediaStatus(true); + int ret = rtp_rtcp_->SetSendingStatus(true); RTC_DCHECK_EQ(0, ret); // It is now OK to start processing on the encoder task queue. encoder_queue_.PostTask([this] { @@ -594,10 +562,10 @@ void ChannelSend::StopSend() { // Reset sending SSRC and sequence number and triggers direct transmission // of RTCP BYE - if (_rtpRtcpModule->SetSendingStatus(false) == -1) { + if (rtp_rtcp_->SetSendingStatus(false) == -1) { RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to stop sending"; } - _rtpRtcpModule->SetSendingMediaStatus(false); + rtp_rtcp_->SetSendingMediaStatus(false); } void ChannelSend::SetEncoder(int payload_type, @@ -608,8 +576,8 @@ void ChannelSend::SetEncoder(int payload_type, // The RTP/RTCP module needs to know the RTP timestamp rate (i.e. clockrate) // as well as some other things, so we collect this info and send it along. - _rtpRtcpModule->RegisterSendPayloadFrequency(payload_type, - encoder->RtpTimestampRateHz()); + rtp_rtcp_->RegisterSendPayloadFrequency(payload_type, + encoder->RtpTimestampRateHz()); rtp_sender_audio_->RegisterAudioPayload("audio", payload_type, encoder->RtpTimestampRateHz(), encoder->NumChannels(), 0); @@ -642,7 +610,7 @@ void ChannelSend::OnBitrateAllocation(BitrateAllocationUpdate update) { // rules. // RTC_DCHECK(worker_thread_checker_.IsCurrent() || // module_process_thread_checker_.IsCurrent()); - rtc::CritScope lock(&bitrate_crit_section_); + MutexLock lock(&bitrate_mutex_); CallEncoder([&](AudioEncoder* encoder) { encoder->OnReceivedUplinkAllocation(update); @@ -652,7 +620,7 @@ void ChannelSend::OnBitrateAllocation(BitrateAllocationUpdate update) { } int ChannelSend::GetBitrate() const { - rtc::CritScope lock(&bitrate_crit_section_); + MutexLock lock(&bitrate_mutex_); return configured_bitrate_bps_; } @@ -663,8 +631,10 @@ void ChannelSend::OnUplinkPacketLossRate(float packet_loss_rate) { } void ChannelSend::ReceivedRTCPPacket(const uint8_t* data, size_t length) { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + // Deliver RTCP packet to RTP/RTCP module for parsing - _rtpRtcpModule->IncomingRtcpPacket(data, length); + rtp_rtcp_->IncomingRtcpPacket(data, length); int64_t rtt = GetRTT(); if (rtt == 0) { @@ -685,12 +655,12 @@ void ChannelSend::ReceivedRTCPPacket(const uint8_t* data, size_t length) { void ChannelSend::SetInputMute(bool enable) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); - rtc::CritScope cs(&volume_settings_critsect_); + MutexLock lock(&volume_settings_mutex_); input_mute_ = enable; } bool ChannelSend::InputMute() const { - rtc::CritScope cs(&volume_settings_critsect_); + MutexLock lock(&volume_settings_mutex_); return input_mute_; } @@ -713,7 +683,7 @@ bool ChannelSend::SendTelephoneEventOutband(int event, int duration_ms) { void ChannelSend::RegisterCngPayloadType(int payload_type, int payload_frequency) { - _rtpRtcpModule->RegisterSendPayloadFrequency(payload_type, payload_frequency); + rtp_rtcp_->RegisterSendPayloadFrequency(payload_type, payload_frequency); rtp_sender_audio_->RegisterAudioPayload("CN", payload_type, payload_frequency, 1, 0); } @@ -723,7 +693,7 @@ void ChannelSend::SetSendTelephoneEventPayloadType(int payload_type, RTC_DCHECK_RUN_ON(&worker_thread_checker_); RTC_DCHECK_LE(0, payload_type); RTC_DCHECK_GE(127, payload_type); - _rtpRtcpModule->RegisterSendPayloadFrequency(payload_type, payload_frequency); + rtp_rtcp_->RegisterSendPayloadFrequency(payload_type, payload_frequency); rtp_sender_audio_->RegisterAudioPayload("telephone-event", payload_type, payload_frequency, 0, 0); } @@ -732,9 +702,9 @@ void ChannelSend::SetSendAudioLevelIndicationStatus(bool enable, int id) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); _includeAudioLevelIndication = enable; if (enable) { - _rtpRtcpModule->RegisterRtpHeaderExtension(AudioLevel::kUri, id); + rtp_rtcp_->RegisterRtpHeaderExtension(AudioLevel::kUri, id); } else { - _rtpRtcpModule->DeregisterSendRtpHeaderExtension(AudioLevel::kUri); + rtp_rtcp_->DeregisterSendRtpHeaderExtension(AudioLevel::kUri); } } @@ -743,31 +713,25 @@ void ChannelSend::RegisterSenderCongestionControlObjects( RtcpBandwidthObserver* bandwidth_observer) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); RtpPacketSender* rtp_packet_pacer = transport->packet_sender(); - TransportFeedbackObserver* transport_feedback_observer = - transport->transport_feedback_observer(); PacketRouter* packet_router = transport->packet_router(); RTC_DCHECK(rtp_packet_pacer); - RTC_DCHECK(transport_feedback_observer); RTC_DCHECK(packet_router); RTC_DCHECK(!packet_router_); rtcp_observer_->SetBandwidthObserver(bandwidth_observer); - feedback_observer_proxy_->SetTransportFeedbackObserver( - transport_feedback_observer); rtp_packet_pacer_proxy_->SetPacketPacer(rtp_packet_pacer); - _rtpRtcpModule->SetStorePacketsStatus(true, 600); + rtp_rtcp_->SetStorePacketsStatus(true, 600); constexpr bool remb_candidate = false; - packet_router->AddSendRtpModule(_rtpRtcpModule.get(), remb_candidate); + packet_router->AddSendRtpModule(rtp_rtcp_.get(), remb_candidate); packet_router_ = packet_router; } void ChannelSend::ResetSenderCongestionControlObjects() { RTC_DCHECK_RUN_ON(&worker_thread_checker_); RTC_DCHECK(packet_router_); - _rtpRtcpModule->SetStorePacketsStatus(false, 600); + rtp_rtcp_->SetStorePacketsStatus(false, 600); rtcp_observer_->SetBandwidthObserver(nullptr); - feedback_observer_proxy_->SetTransportFeedbackObserver(nullptr); - packet_router_->RemoveSendRtpModule(_rtpRtcpModule.get()); + packet_router_->RemoveSendRtpModule(rtp_rtcp_.get()); packet_router_ = nullptr; rtp_packet_pacer_proxy_->SetPacketPacer(nullptr); } @@ -776,7 +740,7 @@ void ChannelSend::SetRTCP_CNAME(absl::string_view c_name) { RTC_DCHECK_RUN_ON(&worker_thread_checker_); // Note: SetCNAME() accepts a c string of length at most 255. const std::string c_name_limited(c_name.substr(0, 255)); - int ret = _rtpRtcpModule->SetCNAME(c_name_limited.c_str()) != 0; + int ret = rtp_rtcp_->SetCNAME(c_name_limited.c_str()) != 0; RTC_DCHECK_EQ(0, ret) << "SetRTCP_CNAME() failed to set RTCP CNAME"; } @@ -785,25 +749,20 @@ std::vector ChannelSend::GetRemoteRTCPReportBlocks() const { // Get the report blocks from the latest received RTCP Sender or Receiver // Report. Each element in the vector contains the sender's SSRC and a // report block according to RFC 3550. - std::vector rtcp_report_blocks; - - int ret = _rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks); - RTC_DCHECK_EQ(0, ret); - std::vector report_blocks; - - std::vector::const_iterator it = rtcp_report_blocks.begin(); - for (; it != rtcp_report_blocks.end(); ++it) { + for (const ReportBlockData& data : rtp_rtcp_->GetLatestReportBlockData()) { ReportBlock report_block; - report_block.sender_SSRC = it->sender_ssrc; - report_block.source_SSRC = it->source_ssrc; - report_block.fraction_lost = it->fraction_lost; - report_block.cumulative_num_packets_lost = it->packets_lost; + report_block.sender_SSRC = data.report_block().sender_ssrc; + report_block.source_SSRC = data.report_block().source_ssrc; + report_block.fraction_lost = data.report_block().fraction_lost; + report_block.cumulative_num_packets_lost = data.report_block().packets_lost; report_block.extended_highest_sequence_number = - it->extended_highest_sequence_number; - report_block.interarrival_jitter = it->jitter; - report_block.last_SR_timestamp = it->last_sender_report_timestamp; - report_block.delay_since_last_SR = it->delay_since_last_sender_report; + data.report_block().extended_highest_sequence_number; + report_block.interarrival_jitter = data.report_block().jitter; + report_block.last_SR_timestamp = + data.report_block().last_sender_report_timestamp; + report_block.delay_since_last_SR = + data.report_block().delay_since_last_sender_report; report_blocks.push_back(report_block); } return report_blocks; @@ -816,7 +775,7 @@ CallSendStatistics ChannelSend::GetRTCPStatistics() const { StreamDataCounters rtp_stats; StreamDataCounters rtx_stats; - _rtpRtcpModule->GetSendStreamDataCounters(&rtp_stats, &rtx_stats); + rtp_rtcp_->GetSendStreamDataCounters(&rtp_stats, &rtx_stats); stats.payload_bytes_sent = rtp_stats.transmitted.payload_bytes + rtx_stats.transmitted.payload_bytes; stats.header_and_padding_bytes_sent = @@ -829,11 +788,26 @@ CallSendStatistics ChannelSend::GetRTCPStatistics() const { stats.packetsSent = rtp_stats.transmitted.packets + rtx_stats.transmitted.packets; stats.retransmitted_packets_sent = rtp_stats.retransmitted.packets; - stats.report_block_datas = _rtpRtcpModule->GetLatestReportBlockData(); + stats.report_block_datas = rtp_rtcp_->GetLatestReportBlockData(); + + { + MutexLock lock(&rtcp_counter_mutex_); + stats.nacks_rcvd = rtcp_packet_type_counter_.nack_packets; + } return stats; } +void ChannelSend::RtcpPacketTypesCounterUpdated( + uint32_t ssrc, + const RtcpPacketTypeCounter& packet_counter) { + if (ssrc != ssrc_) { + return; + } + MutexLock lock(&rtcp_counter_mutex_); + rtcp_packet_type_counter_ = packet_counter; +} + void ChannelSend::ProcessAndEncodeAudio( std::unique_ptr audio_frame) { RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_); @@ -847,6 +821,10 @@ void ChannelSend::ProcessAndEncodeAudio( [this, audio_frame = std::move(audio_frame)]() mutable { RTC_DCHECK_RUN_ON(&encoder_queue_); if (!encoder_queue_is_active_) { + if (fixing_timestamp_stall_) { + _timeStamp += + static_cast(audio_frame->samples_per_channel_); + } return; } // Measure time between when the audio frame is added to the task queue @@ -894,30 +872,20 @@ ANAStats ChannelSend::GetANAStatistics() const { return audio_coding_->GetANAStats(); } -RtpRtcp* ChannelSend::GetRtpRtcp() const { - RTC_DCHECK(module_process_thread_checker_.IsCurrent()); - return _rtpRtcpModule.get(); +RtpRtcpInterface* ChannelSend::GetRtpRtcp() const { + return rtp_rtcp_.get(); } int64_t ChannelSend::GetRTT() const { - std::vector report_blocks; - _rtpRtcpModule->RemoteRTCPStat(&report_blocks); - + std::vector report_blocks = + rtp_rtcp_->GetLatestReportBlockData(); if (report_blocks.empty()) { return 0; } - int64_t rtt = 0; - int64_t avg_rtt = 0; - int64_t max_rtt = 0; - int64_t min_rtt = 0; // We don't know in advance the remote ssrc used by the other end's receiver - // reports, so use the SSRC of the first report block for calculating the RTT. - if (_rtpRtcpModule->RTT(report_blocks[0].sender_ssrc, &rtt, &avg_rtt, - &min_rtt, &max_rtt) != 0) { - return 0; - } - return rtt; + // reports, so use the first report block for the RTT. + return report_blocks.front().last_rtt_ms(); } void ChannelSend::SetFrameEncryptor( @@ -965,7 +933,7 @@ void ChannelSend::InitFrameTransformerDelegate( absolute_capture_timestamp_ms); }; frame_transformer_delegate_ = - new rtc::RefCountedObject( + rtc::make_ref_counted( std::move(send_audio_callback), std::move(frame_transformer), &encoder_queue_); frame_transformer_delegate_->Init(); @@ -976,7 +944,6 @@ void ChannelSend::InitFrameTransformerDelegate( std::unique_ptr CreateChannelSend( Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -985,12 +952,13 @@ std::unique_ptr CreateChannelSend( bool extmap_allow_mixed, int rtcp_report_interval_ms, uint32_t ssrc, - rtc::scoped_refptr frame_transformer) { + rtc::scoped_refptr frame_transformer, + TransportFeedbackObserver* feedback_observer) { return std::make_unique( - clock, task_queue_factory, module_process_thread, rtp_transport, - rtcp_rtt_stats, rtc_event_log, frame_encryptor, crypto_options, - extmap_allow_mixed, rtcp_report_interval_ms, ssrc, - std::move(frame_transformer)); + clock, task_queue_factory, rtp_transport, rtcp_rtt_stats, rtc_event_log, + frame_encryptor, crypto_options, extmap_allow_mixed, + rtcp_report_interval_ms, ssrc, std::move(frame_transformer), + feedback_observer); } } // namespace voe diff --git a/audio/channel_send.h b/audio/channel_send.h index cb3b99287b..67391af956 100644 --- a/audio/channel_send.h +++ b/audio/channel_send.h @@ -22,15 +22,13 @@ #include "api/function_view.h" #include "api/task_queue/task_queue_factory.h" #include "modules/rtp_rtcp/include/report_block_data.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_sender_audio.h" namespace webrtc { class FrameEncryptorInterface; -class ProcessThread; class RtcEventLog; -class RtpRtcp; class RtpTransportControllerSendInterface; struct CallSendStatistics { @@ -47,6 +45,7 @@ struct CallSendStatistics { // ReportBlockData represents the latest Report Block that was received for // that pair. std::vector report_block_datas; + uint32_t nacks_rcvd; }; // See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details. @@ -97,7 +96,7 @@ class ChannelSendInterface { virtual void ProcessAndEncodeAudio( std::unique_ptr audio_frame) = 0; - virtual RtpRtcp* GetRtpRtcp() const = 0; + virtual RtpRtcpInterface* GetRtpRtcp() const = 0; // In RTP we currently rely on RTCP packets (|ReceivedRTCPPacket|) to inform // about RTT. @@ -127,7 +126,6 @@ class ChannelSendInterface { std::unique_ptr CreateChannelSend( Clock* clock, TaskQueueFactory* task_queue_factory, - ProcessThread* module_process_thread, Transport* rtp_transport, RtcpRttStats* rtcp_rtt_stats, RtcEventLog* rtc_event_log, @@ -136,7 +134,8 @@ std::unique_ptr CreateChannelSend( bool extmap_allow_mixed, int rtcp_report_interval_ms, uint32_t ssrc, - rtc::scoped_refptr frame_transformer); + rtc::scoped_refptr frame_transformer, + TransportFeedbackObserver* feedback_observer); } // namespace voe } // namespace webrtc diff --git a/audio/channel_send_frame_transformer_delegate.cc b/audio/channel_send_frame_transformer_delegate.cc index 53b573eb8b..72a459d897 100644 --- a/audio/channel_send_frame_transformer_delegate.cc +++ b/audio/channel_send_frame_transformer_delegate.cc @@ -77,7 +77,7 @@ void ChannelSendFrameTransformerDelegate::Reset() { frame_transformer_->UnregisterTransformedFrameCallback(); frame_transformer_ = nullptr; - rtc::CritScope lock(&send_lock_); + MutexLock lock(&send_lock_); send_frame_callback_ = SendFrameCallback(); } @@ -97,7 +97,7 @@ void ChannelSendFrameTransformerDelegate::Transform( void ChannelSendFrameTransformerDelegate::OnTransformedFrame( std::unique_ptr frame) { - rtc::CritScope lock(&send_lock_); + MutexLock lock(&send_lock_); if (!send_frame_callback_) return; rtc::scoped_refptr delegate = this; @@ -109,7 +109,7 @@ void ChannelSendFrameTransformerDelegate::OnTransformedFrame( void ChannelSendFrameTransformerDelegate::SendFrame( std::unique_ptr frame) const { - rtc::CritScope lock(&send_lock_); + MutexLock lock(&send_lock_); RTC_DCHECK_RUN_ON(encoder_queue_); if (!send_frame_callback_) return; diff --git a/audio/channel_send_frame_transformer_delegate.h b/audio/channel_send_frame_transformer_delegate.h index 5added7b31..9b7eb33b5c 100644 --- a/audio/channel_send_frame_transformer_delegate.h +++ b/audio/channel_send_frame_transformer_delegate.h @@ -14,10 +14,10 @@ #include #include "api/frame_transformer_interface.h" +#include "api/sequence_checker.h" #include "modules/audio_coding/include/audio_coding_module_typedefs.h" #include "rtc_base/buffer.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" namespace webrtc { @@ -72,7 +72,7 @@ class ChannelSendFrameTransformerDelegate : public TransformedFrameCallback { ~ChannelSendFrameTransformerDelegate() override = default; private: - rtc::CriticalSection send_lock_; + mutable Mutex send_lock_; SendFrameCallback send_frame_callback_ RTC_GUARDED_BY(send_lock_); rtc::scoped_refptr frame_transformer_; rtc::TaskQueue* encoder_queue_ RTC_GUARDED_BY(send_lock_); diff --git a/audio/channel_send_frame_transformer_delegate_unittest.cc b/audio/channel_send_frame_transformer_delegate_unittest.cc index e2f3647c0a..2ec78f8922 100644 --- a/audio/channel_send_frame_transformer_delegate_unittest.cc +++ b/audio/channel_send_frame_transformer_delegate_unittest.cc @@ -53,9 +53,9 @@ class MockChannelSend { TEST(ChannelSendFrameTransformerDelegateTest, RegisterTransformedFrameCallbackOnInit) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( ChannelSendFrameTransformerDelegate::SendFrameCallback(), mock_frame_transformer, nullptr); EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback); @@ -67,9 +67,9 @@ TEST(ChannelSendFrameTransformerDelegateTest, TEST(ChannelSendFrameTransformerDelegateTest, UnregisterTransformedFrameCallbackOnReset) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( ChannelSendFrameTransformerDelegate::SendFrameCallback(), mock_frame_transformer, nullptr); EXPECT_CALL(*mock_frame_transformer, UnregisterTransformedFrameCallback); @@ -82,10 +82,10 @@ TEST(ChannelSendFrameTransformerDelegateTest, TransformRunsChannelSendCallback) { TaskQueueForTest channel_queue("channel_queue"); rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); + rtc::make_ref_counted>(); MockChannelSend mock_channel; rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( mock_channel.callback(), mock_frame_transformer, &channel_queue); rtc::scoped_refptr callback; EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameCallback) @@ -112,10 +112,10 @@ TEST(ChannelSendFrameTransformerDelegateTest, OnTransformedDoesNotRunChannelSendCallbackAfterReset) { TaskQueueForTest channel_queue("channel_queue"); rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); + rtc::make_ref_counted>(); MockChannelSend mock_channel; rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + rtc::make_ref_counted( mock_channel.callback(), mock_frame_transformer, &channel_queue); delegate->Reset(); diff --git a/audio/mock_voe_channel_proxy.h b/audio/mock_voe_channel_proxy.h index c0fcbc4872..ea2a2ac3f0 100644 --- a/audio/mock_voe_channel_proxy.h +++ b/audio/mock_voe_channel_proxy.h @@ -17,6 +17,7 @@ #include #include +#include "api/crypto/frame_decryptor_interface.h" #include "api/test/mock_frame_encryptor.h" #include "audio/channel_receive.h" #include "audio/channel_send.h" @@ -35,7 +36,10 @@ class MockChannelReceive : public voe::ChannelReceiveInterface { (override)); MOCK_METHOD(void, ResetReceiverCongestionControlObjects, (), (override)); MOCK_METHOD(CallReceiveStatistics, GetRTCPStatistics, (), (const, override)); - MOCK_METHOD(NetworkStatistics, GetNetworkStatistics, (), (const, override)); + MOCK_METHOD(NetworkStatistics, + GetNetworkStatistics, + (bool), + (const, override)); MOCK_METHOD(AudioDecodingCallStats, GetDecodingCallStatistics, (), @@ -56,6 +60,7 @@ class MockChannelReceive : public voe::ChannelReceiveInterface { (int sample_rate_hz, AudioFrame*), (override)); MOCK_METHOD(int, PreferredSampleRate, (), (const, override)); + MOCK_METHOD(void, SetSourceTracker, (SourceTracker*), (override)); MOCK_METHOD(void, SetAssociatedSendChannel, (const voe::ChannelSendInterface*), @@ -76,7 +81,7 @@ class MockChannelReceive : public voe::ChannelReceiveInterface { GetSyncInfo, (), (const, override)); - MOCK_METHOD(void, SetMinimumPlayoutDelay, (int delay_ms), (override)); + MOCK_METHOD(bool, SetMinimumPlayoutDelay, (int delay_ms), (override)); MOCK_METHOD(bool, SetBaseMinimumPlayoutDelayMs, (int delay_ms), (override)); MOCK_METHOD(int, GetBaseMinimumPlayoutDelayMs, (), (const, override)); MOCK_METHOD((absl::optional>), @@ -94,6 +99,13 @@ class MockChannelReceive : public voe::ChannelReceiveInterface { SetDepacketizerToDecoderFrameTransformer, (rtc::scoped_refptr frame_transformer), (override)); + MOCK_METHOD( + void, + SetFrameDecryptor, + (rtc::scoped_refptr frame_decryptor), + (override)); + MOCK_METHOD(void, OnLocalSsrcChange, (uint32_t local_ssrc), (override)); + MOCK_METHOD(uint32_t, GetLocalSsrc, (), (const, override)); }; class MockChannelSend : public voe::ChannelSendInterface { @@ -152,7 +164,7 @@ class MockChannelSend : public voe::ChannelSendInterface { ProcessAndEncodeAudio, (std::unique_ptr), (override)); - MOCK_METHOD(RtpRtcp*, GetRtpRtcp, (), (const, override)); + MOCK_METHOD(RtpRtcpInterface*, GetRtpRtcp, (), (const, override)); MOCK_METHOD(int, GetBitrate, (), (const, override)); MOCK_METHOD(int64_t, GetRTT, (), (const, override)); MOCK_METHOD(void, StartSend, (), (override)); diff --git a/audio/null_audio_poller.h b/audio/null_audio_poller.h index 97cd2c7e6c..47e67a91da 100644 --- a/audio/null_audio_poller.h +++ b/audio/null_audio_poller.h @@ -13,9 +13,9 @@ #include +#include "api/sequence_checker.h" #include "modules/audio_device/include/audio_device_defines.h" #include "rtc_base/message_handler.h" -#include "rtc_base/thread_checker.h" namespace webrtc { namespace internal { @@ -29,7 +29,7 @@ class NullAudioPoller final : public rtc::MessageHandler { void OnMessage(rtc::Message* msg) override; private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; AudioTransport* const audio_transport_; int64_t reschedule_at_; }; diff --git a/audio/remix_resample.cc b/audio/remix_resample.cc index 3694d34e40..178af622a1 100644 --- a/audio/remix_resample.cc +++ b/audio/remix_resample.cc @@ -56,9 +56,10 @@ void RemixAndResample(const int16_t* src_data, if (resampler->InitializeIfNeeded(sample_rate_hz, dst_frame->sample_rate_hz_, audio_ptr_num_channels) == -1) { - FATAL() << "InitializeIfNeeded failed: sample_rate_hz = " << sample_rate_hz - << ", dst_frame->sample_rate_hz_ = " << dst_frame->sample_rate_hz_ - << ", audio_ptr_num_channels = " << audio_ptr_num_channels; + RTC_FATAL() << "InitializeIfNeeded failed: sample_rate_hz = " + << sample_rate_hz << ", dst_frame->sample_rate_hz_ = " + << dst_frame->sample_rate_hz_ + << ", audio_ptr_num_channels = " << audio_ptr_num_channels; } // TODO(yujo): for muted input frames, don't resample. Either 1) allow @@ -70,9 +71,10 @@ void RemixAndResample(const int16_t* src_data, resampler->Resample(audio_ptr, src_length, dst_frame->mutable_data(), AudioFrame::kMaxDataSizeSamples); if (out_length == -1) { - FATAL() << "Resample failed: audio_ptr = " << audio_ptr - << ", src_length = " << src_length - << ", dst_frame->mutable_data() = " << dst_frame->mutable_data(); + RTC_FATAL() << "Resample failed: audio_ptr = " << audio_ptr + << ", src_length = " << src_length + << ", dst_frame->mutable_data() = " + << dst_frame->mutable_data(); } dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels; diff --git a/audio/test/audio_bwe_integration_test.cc b/audio/test/audio_bwe_integration_test.cc index eed7acb8de..f9953955df 100644 --- a/audio/test/audio_bwe_integration_test.cc +++ b/audio/test/audio_bwe_integration_test.cc @@ -160,9 +160,6 @@ using AudioBweIntegrationTest = CallTest; // TODO(tschumim): This test is flaky when run on android and mac. Re-enable the // test for when the issue is fixed. TEST_F(AudioBweIntegrationTest, DISABLED_NoBandwidthDropAfterDtx) { - webrtc::test::ScopedFieldTrials override_field_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" - "WebRTC-SendSideBwe-WithOverhead/Enabled/"); NoBandwidthDropAfterDtx test; RunBaseTest(&test); } diff --git a/audio/test/audio_end_to_end_test.cc b/audio/test/audio_end_to_end_test.cc index 896b0f2dae..0d8529a913 100644 --- a/audio/test/audio_end_to_end_test.cc +++ b/audio/test/audio_end_to_end_test.cc @@ -92,6 +92,8 @@ void AudioEndToEndTest::ModifyAudioConfigs( {{"stereo", "1"}}); send_config->send_codec_spec = AudioSendStream::Config::SendCodecSpec( test::CallTest::kAudioSendPayloadType, kDefaultFormat); + send_config->min_bitrate_bps = 32000; + send_config->max_bitrate_bps = 32000; } void AudioEndToEndTest::OnAudioStreamsCreated( diff --git a/audio/test/audio_stats_test.cc b/audio/test/audio_stats_test.cc index c91183c66b..ea3327056b 100644 --- a/audio/test/audio_stats_test.cc +++ b/audio/test/audio_stats_test.cc @@ -65,7 +65,8 @@ class NoLossTest : public AudioEndToEndTest { EXPECT_FALSE(send_stats.apm_statistics.residual_echo_likelihood_recent_max); EXPECT_EQ(false, send_stats.typing_noise_detected); - AudioReceiveStream::Stats recv_stats = receive_stream()->GetStats(); + AudioReceiveStream::Stats recv_stats = + receive_stream()->GetStats(/*get_and_clear_legacy_stats=*/true); EXPECT_PRED2(IsNear, kBytesSent, recv_stats.payload_bytes_rcvd); EXPECT_PRED2(IsNear, kPacketsSent, recv_stats.packets_rcvd); EXPECT_EQ(0u, recv_stats.packets_lost); diff --git a/audio/test/low_bandwidth_audio_test.cc b/audio/test/low_bandwidth_audio_test.cc index 049b5e5150..50cf499920 100644 --- a/audio/test/low_bandwidth_audio_test.cc +++ b/audio/test/low_bandwidth_audio_test.cc @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "api/test/simulated_network.h" #include "audio/test/audio_end_to_end_test.h" diff --git a/audio/test/low_bandwidth_audio_test.py b/audio/test/low_bandwidth_audio_test.py index 51273f7486..9aaf30f364 100755 --- a/audio/test/low_bandwidth_audio_test.py +++ b/audio/test/low_bandwidth_audio_test.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """ This script is the wrapper that runs the low-bandwidth audio test. @@ -23,315 +22,352 @@ import subprocess import sys - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir)) NO_TOOLS_ERROR_MESSAGE = ( - 'Could not find PESQ or POLQA at %s.\n' - '\n' - 'To fix this run:\n' - ' python %s %s\n' - '\n' - 'Note that these tools are Google-internal due to licensing, so in order to ' - 'use them you will have to get your own license and manually put them in the ' - 'right location.\n' - 'See https://cs.chromium.org/chromium/src/third_party/webrtc/tools_webrtc/' - 'download_tools.py?rcl=bbceb76f540159e2dba0701ac03c514f01624130&l=13') + 'Could not find PESQ or POLQA at %s.\n' + '\n' + 'To fix this run:\n' + ' python %s %s\n' + '\n' + 'Note that these tools are Google-internal due to licensing, so in order to ' + 'use them you will have to get your own license and manually put them in the ' + 'right location.\n' + 'See https://cs.chromium.org/chromium/src/third_party/webrtc/tools_webrtc/' + 'download_tools.py?rcl=bbceb76f540159e2dba0701ac03c514f01624130&l=13') def _LogCommand(command): - logging.info('Running %r', command) - return command + logging.info('Running %r', command) + return command def _ParseArgs(): - parser = argparse.ArgumentParser(description='Run low-bandwidth audio tests.') - parser.add_argument('build_dir', - help='Path to the build directory (e.g. out/Release).') - parser.add_argument('--remove', action='store_true', - help='Remove output audio files after testing.') - parser.add_argument('--android', action='store_true', - help='Perform the test on a connected Android device instead.') - parser.add_argument('--adb-path', help='Path to adb binary.', default='adb') - parser.add_argument('--num-retries', default='0', - help='Number of times to retry the test on Android.') - parser.add_argument('--isolated-script-test-perf-output', default=None, - help='Path to store perf results in histogram proto format.') - parser.add_argument('--extra-test-args', default=[], action='append', - help='Extra args to path to the test binary.') - - # Ignore Chromium-specific flags - parser.add_argument('--test-launcher-summary-output', - type=str, default=None) - args = parser.parse_args() - - return args + parser = argparse.ArgumentParser( + description='Run low-bandwidth audio tests.') + parser.add_argument('build_dir', + help='Path to the build directory (e.g. out/Release).') + parser.add_argument('--remove', + action='store_true', + help='Remove output audio files after testing.') + parser.add_argument( + '--android', + action='store_true', + help='Perform the test on a connected Android device instead.') + parser.add_argument('--adb-path', + help='Path to adb binary.', + default='adb') + parser.add_argument('--num-retries', + default='0', + help='Number of times to retry the test on Android.') + parser.add_argument( + '--isolated-script-test-perf-output', + default=None, + help='Path to store perf results in histogram proto format.') + parser.add_argument('--extra-test-args', + default=[], + action='append', + help='Extra args to path to the test binary.') + + # Ignore Chromium-specific flags + parser.add_argument('--test-launcher-summary-output', + type=str, + default=None) + args = parser.parse_args() + + return args def _GetPlatform(): - if sys.platform == 'win32': - return 'win' - elif sys.platform == 'darwin': - return 'mac' - elif sys.platform.startswith('linux'): - return 'linux' + if sys.platform == 'win32': + return 'win' + elif sys.platform == 'darwin': + return 'mac' + elif sys.platform.startswith('linux'): + return 'linux' def _GetExtension(): - return '.exe' if sys.platform == 'win32' else '' + return '.exe' if sys.platform == 'win32' else '' def _GetPathToTools(): - tools_dir = os.path.join(SRC_DIR, 'tools_webrtc') - toolchain_dir = os.path.join(tools_dir, 'audio_quality') + tools_dir = os.path.join(SRC_DIR, 'tools_webrtc') + toolchain_dir = os.path.join(tools_dir, 'audio_quality') - platform = _GetPlatform() - ext = _GetExtension() + platform = _GetPlatform() + ext = _GetExtension() - pesq_path = os.path.join(toolchain_dir, platform, 'pesq' + ext) - if not os.path.isfile(pesq_path): - pesq_path = None + pesq_path = os.path.join(toolchain_dir, platform, 'pesq' + ext) + if not os.path.isfile(pesq_path): + pesq_path = None - polqa_path = os.path.join(toolchain_dir, platform, 'PolqaOem64' + ext) - if not os.path.isfile(polqa_path): - polqa_path = None + polqa_path = os.path.join(toolchain_dir, platform, 'PolqaOem64' + ext) + if not os.path.isfile(polqa_path): + polqa_path = None - if (platform != 'mac' and not polqa_path) or not pesq_path: - logging.error(NO_TOOLS_ERROR_MESSAGE, - toolchain_dir, - os.path.join(tools_dir, 'download_tools.py'), - toolchain_dir) + if (platform != 'mac' and not polqa_path) or not pesq_path: + logging.error(NO_TOOLS_ERROR_MESSAGE, toolchain_dir, + os.path.join(tools_dir, 'download_tools.py'), + toolchain_dir) - return pesq_path, polqa_path + return pesq_path, polqa_path def ExtractTestRuns(lines, echo=False): - """Extracts information about tests from the output of a test runner. + """Extracts information about tests from the output of a test runner. Produces tuples (android_device, test_name, reference_file, degraded_file, cur_perf_results). """ - for line in lines: - if echo: - sys.stdout.write(line) - - # Output from Android has a prefix with the device name. - android_prefix_re = r'(?:I\b.+\brun_tests_on_device\((.+?)\)\s*)?' - test_re = r'^' + android_prefix_re + (r'TEST (\w+) ([^ ]+?) ([^\s]+)' - r' ?([^\s]+)?\s*$') - - match = re.search(test_re, line) + for line in lines: + if echo: + sys.stdout.write(line) + + # Output from Android has a prefix with the device name. + android_prefix_re = r'(?:I\b.+\brun_tests_on_device\((.+?)\)\s*)?' + test_re = r'^' + android_prefix_re + (r'TEST (\w+) ([^ ]+?) ([^\s]+)' + r' ?([^\s]+)?\s*$') + + match = re.search(test_re, line) + if match: + yield match.groups() + + +def _GetFile(file_path, + out_dir, + move=False, + android=False, + adb_prefix=('adb', )): + out_file_name = os.path.basename(file_path) + out_file_path = os.path.join(out_dir, out_file_name) + + if android: + # Pull the file from the connected Android device. + adb_command = adb_prefix + ('pull', file_path, out_dir) + subprocess.check_call(_LogCommand(adb_command)) + if move: + # Remove that file. + adb_command = adb_prefix + ('shell', 'rm', file_path) + subprocess.check_call(_LogCommand(adb_command)) + elif os.path.abspath(file_path) != os.path.abspath(out_file_path): + if move: + shutil.move(file_path, out_file_path) + else: + shutil.copy(file_path, out_file_path) + + return out_file_path + + +def _RunPesq(executable_path, + reference_file, + degraded_file, + sample_rate_hz=16000): + directory = os.path.dirname(reference_file) + assert os.path.dirname(degraded_file) == directory + + # Analyze audio. + command = [ + executable_path, + '+%d' % sample_rate_hz, + os.path.basename(reference_file), + os.path.basename(degraded_file) + ] + # Need to provide paths in the current directory due to a bug in PESQ: + # On Mac, for some 'path/to/file.wav', if 'file.wav' is longer than + # 'path/to', PESQ crashes. + out = subprocess.check_output(_LogCommand(command), + cwd=directory, + stderr=subprocess.STDOUT) + + # Find the scores in stdout of PESQ. + match = re.search( + r'Prediction \(Raw MOS, MOS-LQO\):\s+=\s+([\d.]+)\s+([\d.]+)', out) if match: - yield match.groups() - - -def _GetFile(file_path, out_dir, move=False, - android=False, adb_prefix=('adb',)): - out_file_name = os.path.basename(file_path) - out_file_path = os.path.join(out_dir, out_file_name) - - if android: - # Pull the file from the connected Android device. - adb_command = adb_prefix + ('pull', file_path, out_dir) - subprocess.check_call(_LogCommand(adb_command)) - if move: - # Remove that file. - adb_command = adb_prefix + ('shell', 'rm', file_path) - subprocess.check_call(_LogCommand(adb_command)) - elif os.path.abspath(file_path) != os.path.abspath(out_file_path): - if move: - shutil.move(file_path, out_file_path) - else: - shutil.copy(file_path, out_file_path) - - return out_file_path - + raw_mos, _ = match.groups() -def _RunPesq(executable_path, reference_file, degraded_file, - sample_rate_hz=16000): - directory = os.path.dirname(reference_file) - assert os.path.dirname(degraded_file) == directory - - # Analyze audio. - command = [executable_path, '+%d' % sample_rate_hz, - os.path.basename(reference_file), - os.path.basename(degraded_file)] - # Need to provide paths in the current directory due to a bug in PESQ: - # On Mac, for some 'path/to/file.wav', if 'file.wav' is longer than - # 'path/to', PESQ crashes. - out = subprocess.check_output(_LogCommand(command), - cwd=directory, stderr=subprocess.STDOUT) - - # Find the scores in stdout of PESQ. - match = re.search( - r'Prediction \(Raw MOS, MOS-LQO\):\s+=\s+([\d.]+)\s+([\d.]+)', out) - if match: - raw_mos, _ = match.groups() - - return {'pesq_mos': (raw_mos, 'unitless')} - else: - logging.error('PESQ: %s', out.splitlines()[-1]) - return {} + return {'pesq_mos': (raw_mos, 'unitless')} + else: + logging.error('PESQ: %s', out.splitlines()[-1]) + return {} def _RunPolqa(executable_path, reference_file, degraded_file): - # Analyze audio. - command = [executable_path, '-q', '-LC', 'NB', - '-Ref', reference_file, '-Test', degraded_file] - process = subprocess.Popen(_LogCommand(command), - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = process.communicate() - - # Find the scores in stdout of POLQA. - match = re.search(r'\bMOS-LQO:\s+([\d.]+)', out) - - if process.returncode != 0 or not match: - if process.returncode == 2: - logging.warning('%s (2)', err.strip()) - logging.warning('POLQA license error, skipping test.') - else: - logging.error('%s (%d)', err.strip(), process.returncode) - return {} - - mos_lqo, = match.groups() - return {'polqa_mos_lqo': (mos_lqo, 'unitless')} + # Analyze audio. + command = [ + executable_path, '-q', '-LC', 'NB', '-Ref', reference_file, '-Test', + degraded_file + ] + process = subprocess.Popen(_LogCommand(command), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = process.communicate() + + # Find the scores in stdout of POLQA. + match = re.search(r'\bMOS-LQO:\s+([\d.]+)', out) + + if process.returncode != 0 or not match: + if process.returncode == 2: + logging.warning('%s (2)', err.strip()) + logging.warning('POLQA license error, skipping test.') + else: + logging.error('%s (%d)', err.strip(), process.returncode) + return {} + + mos_lqo, = match.groups() + return {'polqa_mos_lqo': (mos_lqo, 'unitless')} def _MergeInPerfResultsFromCcTests(histograms, run_perf_results_file): - from tracing.value import histogram_set + from tracing.value import histogram_set - cc_histograms = histogram_set.HistogramSet() - with open(run_perf_results_file, 'rb') as f: - contents = f.read() - if not contents: - return + cc_histograms = histogram_set.HistogramSet() + with open(run_perf_results_file, 'rb') as f: + contents = f.read() + if not contents: + return - cc_histograms.ImportProto(contents) + cc_histograms.ImportProto(contents) - histograms.Merge(cc_histograms) + histograms.Merge(cc_histograms) -Analyzer = collections.namedtuple('Analyzer', ['name', 'func', 'executable', - 'sample_rate_hz']) +Analyzer = collections.namedtuple( + 'Analyzer', ['name', 'func', 'executable', 'sample_rate_hz']) def _ConfigurePythonPath(args): - script_dir = os.path.dirname(os.path.realpath(__file__)) - checkout_root = os.path.abspath( - os.path.join(script_dir, os.pardir, os.pardir)) - - # TODO(https://crbug.com/1029452): Use a copy rule and add these from the out - # dir like for the third_party/protobuf code. - sys.path.insert(0, os.path.join(checkout_root, 'third_party', 'catapult', - 'tracing')) - - # The low_bandwidth_audio_perf_test gn rule will build the protobuf stub for - # python, so put it in the path for this script before we attempt to import - # it. - histogram_proto_path = os.path.join( - os.path.abspath(args.build_dir), 'pyproto', 'tracing', 'tracing', 'proto') - sys.path.insert(0, histogram_proto_path) - proto_stub_path = os.path.join(os.path.abspath(args.build_dir), 'pyproto') - sys.path.insert(0, proto_stub_path) - - # Fail early in case the proto hasn't been built. - try: - import histogram_pb2 - except ImportError as e: - logging.exception(e) - raise ImportError('Could not import histogram_pb2. You need to build the ' - 'low_bandwidth_audio_perf_test target before invoking ' - 'this script. Expected to find ' - 'histogram_pb2.py in %s.' % histogram_proto_path) + script_dir = os.path.dirname(os.path.realpath(__file__)) + checkout_root = os.path.abspath( + os.path.join(script_dir, os.pardir, os.pardir)) + + # TODO(https://crbug.com/1029452): Use a copy rule and add these from the out + # dir like for the third_party/protobuf code. + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing')) + + # The low_bandwidth_audio_perf_test gn rule will build the protobuf stub for + # python, so put it in the path for this script before we attempt to import + # it. + histogram_proto_path = os.path.join(os.path.abspath(args.build_dir), + 'pyproto', 'tracing', 'tracing', + 'proto') + sys.path.insert(0, histogram_proto_path) + proto_stub_path = os.path.join(os.path.abspath(args.build_dir), 'pyproto') + sys.path.insert(0, proto_stub_path) + + # Fail early in case the proto hasn't been built. + try: + import histogram_pb2 + except ImportError as e: + logging.exception(e) + raise ImportError( + 'Could not import histogram_pb2. You need to build the ' + 'low_bandwidth_audio_perf_test target before invoking ' + 'this script. Expected to find ' + 'histogram_pb2.py in %s.' % histogram_proto_path) def main(): - # pylint: disable=W0101 - logging.basicConfig(level=logging.INFO) - logging.info('Invoked with %s', str(sys.argv)) - - args = _ParseArgs() - - _ConfigurePythonPath(args) - - # Import catapult modules here after configuring the pythonpath. - from tracing.value import histogram_set - from tracing.value.diagnostics import reserved_infos - from tracing.value.diagnostics import generic_set - - pesq_path, polqa_path = _GetPathToTools() - if pesq_path is None: - return 1 - - out_dir = os.path.join(args.build_dir, '..') - if args.android: - test_command = [os.path.join(args.build_dir, 'bin', - 'run_low_bandwidth_audio_test'), - '-v', '--num-retries', args.num_retries] - else: - test_command = [os.path.join(args.build_dir, 'low_bandwidth_audio_test')] - - analyzers = [Analyzer('pesq', _RunPesq, pesq_path, 16000)] - # Check if POLQA can run at all, or skip the 48 kHz tests entirely. - example_path = os.path.join(SRC_DIR, 'resources', - 'voice_engine', 'audio_tiny48.wav') - if polqa_path and _RunPolqa(polqa_path, example_path, example_path): - analyzers.append(Analyzer('polqa', _RunPolqa, polqa_path, 48000)) - - histograms = histogram_set.HistogramSet() - for analyzer in analyzers: - # Start the test executable that produces audio files. - test_process = subprocess.Popen( - _LogCommand(test_command + [ + # pylint: disable=W0101 + logging.basicConfig(level=logging.INFO) + logging.info('Invoked with %s', str(sys.argv)) + + args = _ParseArgs() + + _ConfigurePythonPath(args) + + # Import catapult modules here after configuring the pythonpath. + from tracing.value import histogram_set + from tracing.value.diagnostics import reserved_infos + from tracing.value.diagnostics import generic_set + + pesq_path, polqa_path = _GetPathToTools() + if pesq_path is None: + return 1 + + out_dir = os.path.join(args.build_dir, '..') + if args.android: + test_command = [ + os.path.join(args.build_dir, 'bin', + 'run_low_bandwidth_audio_test'), '-v', + '--num-retries', args.num_retries + ] + else: + test_command = [ + os.path.join(args.build_dir, 'low_bandwidth_audio_test') + ] + + analyzers = [Analyzer('pesq', _RunPesq, pesq_path, 16000)] + # Check if POLQA can run at all, or skip the 48 kHz tests entirely. + example_path = os.path.join(SRC_DIR, 'resources', 'voice_engine', + 'audio_tiny48.wav') + if polqa_path and _RunPolqa(polqa_path, example_path, example_path): + analyzers.append(Analyzer('polqa', _RunPolqa, polqa_path, 48000)) + + histograms = histogram_set.HistogramSet() + for analyzer in analyzers: + # Start the test executable that produces audio files. + test_process = subprocess.Popen(_LogCommand(test_command + [ '--sample_rate_hz=%d' % analyzer.sample_rate_hz, '--test_case_prefix=%s' % analyzer.name, - ] + args.extra_test_args), - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - perf_results_file = None - try: - lines = iter(test_process.stdout.readline, '') - for result in ExtractTestRuns(lines, echo=True): - (android_device, test_name, reference_file, degraded_file, - perf_results_file) = result - - adb_prefix = (args.adb_path,) - if android_device: - adb_prefix += ('-s', android_device) - - reference_file = _GetFile(reference_file, out_dir, - android=args.android, adb_prefix=adb_prefix) - degraded_file = _GetFile(degraded_file, out_dir, move=True, - android=args.android, adb_prefix=adb_prefix) - - analyzer_results = analyzer.func(analyzer.executable, - reference_file, degraded_file) - for metric, (value, units) in analyzer_results.items(): - hist = histograms.CreateHistogram(metric, units, [value]) - user_story = generic_set.GenericSet([test_name]) - hist.diagnostics[reserved_infos.STORIES.name] = user_story - - # Output human readable results. - print 'RESULT %s: %s= %s %s' % (metric, test_name, value, units) - - if args.remove: - os.remove(reference_file) - os.remove(degraded_file) - finally: - test_process.terminate() - if perf_results_file: - perf_results_file = _GetFile(perf_results_file, out_dir, move=True, - android=args.android, adb_prefix=adb_prefix) - _MergeInPerfResultsFromCcTests(histograms, perf_results_file) - if args.remove: - os.remove(perf_results_file) - - if args.isolated_script_test_perf_output: - with open(args.isolated_script_test_perf_output, 'wb') as f: - f.write(histograms.AsProto().SerializeToString()) - - return test_process.wait() + ] + args.extra_test_args), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + perf_results_file = None + try: + lines = iter(test_process.stdout.readline, '') + for result in ExtractTestRuns(lines, echo=True): + (android_device, test_name, reference_file, degraded_file, + perf_results_file) = result + + adb_prefix = (args.adb_path, ) + if android_device: + adb_prefix += ('-s', android_device) + + reference_file = _GetFile(reference_file, + out_dir, + android=args.android, + adb_prefix=adb_prefix) + degraded_file = _GetFile(degraded_file, + out_dir, + move=True, + android=args.android, + adb_prefix=adb_prefix) + + analyzer_results = analyzer.func(analyzer.executable, + reference_file, degraded_file) + for metric, (value, units) in analyzer_results.items(): + hist = histograms.CreateHistogram(metric, units, [value]) + user_story = generic_set.GenericSet([test_name]) + hist.diagnostics[reserved_infos.STORIES.name] = user_story + + # Output human readable results. + print 'RESULT %s: %s= %s %s' % (metric, test_name, value, + units) + + if args.remove: + os.remove(reference_file) + os.remove(degraded_file) + finally: + test_process.terminate() + if perf_results_file: + perf_results_file = _GetFile(perf_results_file, + out_dir, + move=True, + android=args.android, + adb_prefix=adb_prefix) + _MergeInPerfResultsFromCcTests(histograms, perf_results_file) + if args.remove: + os.remove(perf_results_file) + + if args.isolated_script_test_perf_output: + with open(args.isolated_script_test_perf_output, 'wb') as f: + f.write(histograms.AsProto().SerializeToString()) + + return test_process.wait() if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/audio/test/nack_test.cc b/audio/test/nack_test.cc new file mode 100644 index 0000000000..13cfe74a28 --- /dev/null +++ b/audio/test/nack_test.cc @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "audio/test/audio_end_to_end_test.h" +#include "system_wrappers/include/sleep.h" +#include "test/gtest.h" + +namespace webrtc { +namespace test { + +using NackTest = CallTest; + +TEST_F(NackTest, ShouldNackInLossyNetwork) { + class NackTest : public AudioEndToEndTest { + public: + const int kTestDurationMs = 2000; + const int64_t kRttMs = 30; + const int64_t kLossPercent = 30; + const int kNackHistoryMs = 1000; + + BuiltInNetworkBehaviorConfig GetNetworkPipeConfig() const override { + BuiltInNetworkBehaviorConfig pipe_config; + pipe_config.queue_delay_ms = kRttMs / 2; + pipe_config.loss_percent = kLossPercent; + return pipe_config; + } + + void ModifyAudioConfigs( + AudioSendStream::Config* send_config, + std::vector* receive_configs) override { + ASSERT_EQ(receive_configs->size(), 1U); + (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackHistoryMs; + AudioEndToEndTest::ModifyAudioConfigs(send_config, receive_configs); + } + + void PerformTest() override { SleepMs(kTestDurationMs); } + + void OnStreamsStopped() override { + AudioReceiveStream::Stats recv_stats = + receive_stream()->GetStats(/*get_and_clear_legacy_stats=*/true); + EXPECT_GT(recv_stats.nacks_sent, 0U); + AudioSendStream::Stats send_stats = send_stream()->GetStats(); + EXPECT_GT(send_stats.retransmitted_packets_sent, 0U); + EXPECT_GT(send_stats.nacks_rcvd, 0U); + } + } test; + + RunBaseTest(&test); +} + +} // namespace test +} // namespace webrtc diff --git a/audio/test/pc_low_bandwidth_audio_test.cc b/audio/test/pc_low_bandwidth_audio_test.cc index aafb65f15d..95a32238c5 100644 --- a/audio/test/pc_low_bandwidth_audio_test.cc +++ b/audio/test/pc_low_bandwidth_audio_test.cc @@ -10,12 +10,14 @@ #include +#include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "api/test/create_network_emulation_manager.h" #include "api/test/create_peerconnection_quality_test_fixture.h" #include "api/test/network_emulation_manager.h" #include "api/test/peerconnection_quality_test_fixture.h" #include "api/test/simulated_network.h" +#include "api/test/time_controller.h" #include "call/simulated_network.h" #include "test/gtest.h" #include "test/pc/e2e/network_quality_metrics_reporter.h" @@ -70,12 +72,13 @@ CreateTwoNetworkLinks(NetworkEmulationManager* emulation, std::unique_ptr CreateTestFixture(const std::string& test_case_name, + TimeController& time_controller, std::pair network_links, rtc::FunctionView alice_configurer, rtc::FunctionView bob_configurer) { auto fixture = webrtc_pc_e2e::CreatePeerConnectionE2EQualityTestFixture( - test_case_name, /*audio_quality_analyzer=*/nullptr, + test_case_name, time_controller, /*audio_quality_analyzer=*/nullptr, /*video_quality_analyzer=*/nullptr); fixture->AddPeer(network_links.first->network_thread(), network_links.first->network_manager(), alice_configurer); @@ -127,7 +130,7 @@ TEST(PCLowBandwidthAudioTest, PCGoodNetworkHighBitrate) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( - GetMetricTestCaseName(), + GetMetricTestCaseName(), *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -154,7 +157,7 @@ TEST(PCLowBandwidthAudioTest, PC40kbpsNetwork) { config.queue_delay_ms = 400; config.loss_percent = 1; auto fixture = CreateTestFixture( - GetMetricTestCaseName(), + GetMetricTestCaseName(), *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { AudioConfig audio; diff --git a/audio/test/unittests/low_bandwidth_audio_test_test.py b/audio/test/unittests/low_bandwidth_audio_test_test.py index 7403663cd4..1b73269528 100755 --- a/audio/test/unittests/low_bandwidth_audio_test_test.py +++ b/audio/test/unittests/low_bandwidth_audio_test_test.py @@ -11,7 +11,6 @@ import unittest import sys - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir) sys.path.append(PARENT_DIR) @@ -19,46 +18,51 @@ class TestExtractTestRuns(unittest.TestCase): - def _TestLog(self, log, *expected): - self.assertEqual( - tuple(low_bandwidth_audio_test.ExtractTestRuns(log.splitlines(True))), - expected) + def _TestLog(self, log, *expected): + self.assertEqual( + tuple( + low_bandwidth_audio_test.ExtractTestRuns( + log.splitlines(True))), expected) - def testLinux(self): - self._TestLog(LINUX_LOG, - (None, 'GoodNetworkHighBitrate', - '/webrtc/src/resources/voice_engine/audio_tiny16.wav', - '/webrtc/src/out/LowBandwidth_GoodNetworkHighBitrate.wav', None), - (None, 'Mobile2GNetwork', - '/webrtc/src/resources/voice_engine/audio_tiny16.wav', - '/webrtc/src/out/LowBandwidth_Mobile2GNetwork.wav', None), - (None, 'PCGoodNetworkHighBitrate', - '/webrtc/src/resources/voice_engine/audio_tiny16.wav', - '/webrtc/src/out/PCLowBandwidth_PCGoodNetworkHighBitrate.wav', - '/webrtc/src/out/PCLowBandwidth_perf_48.json'), - (None, 'PCMobile2GNetwork', - '/webrtc/src/resources/voice_engine/audio_tiny16.wav', - '/webrtc/src/out/PCLowBandwidth_PCMobile2GNetwork.wav', - '/webrtc/src/out/PCLowBandwidth_perf_48.json')) + def testLinux(self): + self._TestLog( + LINUX_LOG, + (None, 'GoodNetworkHighBitrate', + '/webrtc/src/resources/voice_engine/audio_tiny16.wav', + '/webrtc/src/out/LowBandwidth_GoodNetworkHighBitrate.wav', None), + (None, 'Mobile2GNetwork', + '/webrtc/src/resources/voice_engine/audio_tiny16.wav', + '/webrtc/src/out/LowBandwidth_Mobile2GNetwork.wav', None), + (None, 'PCGoodNetworkHighBitrate', + '/webrtc/src/resources/voice_engine/audio_tiny16.wav', + '/webrtc/src/out/PCLowBandwidth_PCGoodNetworkHighBitrate.wav', + '/webrtc/src/out/PCLowBandwidth_perf_48.json'), + (None, 'PCMobile2GNetwork', + '/webrtc/src/resources/voice_engine/audio_tiny16.wav', + '/webrtc/src/out/PCLowBandwidth_PCMobile2GNetwork.wav', + '/webrtc/src/out/PCLowBandwidth_perf_48.json')) - def testAndroid(self): - self._TestLog(ANDROID_LOG, - ('ddfa6149', 'Mobile2GNetwork', - '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', - '/sdcard/chromium_tests_root/LowBandwidth_Mobile2GNetwork.wav', None), - ('TA99205CNO', 'GoodNetworkHighBitrate', - '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', - '/sdcard/chromium_tests_root/LowBandwidth_GoodNetworkHighBitrate.wav', - None), - ('ddfa6149', 'PCMobile2GNetwork', - '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', - '/sdcard/chromium_tests_root/PCLowBandwidth_PCMobile2GNetwork.wav', - '/sdcard/chromium_tests_root/PCLowBandwidth_perf_48.json'), - ('TA99205CNO', 'PCGoodNetworkHighBitrate', - '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', - ('/sdcard/chromium_tests_root/' - 'PCLowBandwidth_PCGoodNetworkHighBitrate.wav'), - '/sdcard/chromium_tests_root/PCLowBandwidth_perf_48.json')) + def testAndroid(self): + self._TestLog(ANDROID_LOG, ( + 'ddfa6149', 'Mobile2GNetwork', + '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', + '/sdcard/chromium_tests_root/LowBandwidth_Mobile2GNetwork.wav', + None + ), ( + 'TA99205CNO', 'GoodNetworkHighBitrate', + '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', + '/sdcard/chromium_tests_root/LowBandwidth_GoodNetworkHighBitrate.wav', + None + ), ( + 'ddfa6149', 'PCMobile2GNetwork', + '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', + '/sdcard/chromium_tests_root/PCLowBandwidth_PCMobile2GNetwork.wav', + '/sdcard/chromium_tests_root/PCLowBandwidth_perf_48.json' + ), ('TA99205CNO', 'PCGoodNetworkHighBitrate', + '/sdcard/chromium_tests_root/resources/voice_engine/audio_tiny16.wav', + ('/sdcard/chromium_tests_root/' + 'PCLowBandwidth_PCGoodNetworkHighBitrate.wav'), + '/sdcard/chromium_tests_root/PCLowBandwidth_perf_48.json')) LINUX_LOG = r'''\ @@ -233,6 +237,5 @@ def testAndroid(self): I 16.608s tear_down_device(TA99205CNO) Wrote device cache: /webrtc/src/out/debug-android/device_cache_TA99305CMO.json ''' - if __name__ == "__main__": - unittest.main() + unittest.main() diff --git a/audio/utility/BUILD.gn b/audio/utility/BUILD.gn index 54ca04698d..933553d81b 100644 --- a/audio/utility/BUILD.gn +++ b/audio/utility/BUILD.gn @@ -26,10 +26,10 @@ rtc_library("audio_frame_operations") { "../../api/audio:audio_frame_api", "../../common_audio", "../../rtc_base:checks", - "../../rtc_base:deprecation", "../../rtc_base:rtc_base_approved", "../../system_wrappers:field_trial", ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] } if (rtc_include_tests) { diff --git a/audio/utility/audio_frame_operations.cc b/audio/utility/audio_frame_operations.cc index a9d2cf1632..e13a09bace 100644 --- a/audio/utility/audio_frame_operations.cc +++ b/audio/utility/audio_frame_operations.cc @@ -169,10 +169,10 @@ void AudioFrameOperations::UpmixChannels(size_t target_number_of_channels, if (!frame->muted()) { // Up-mixing done in place. Going backwards through the frame ensure nothing // is irrevocably overwritten. + int16_t* frame_data = frame->mutable_data(); for (int i = frame->samples_per_channel_ - 1; i >= 0; i--) { for (size_t j = 0; j < target_number_of_channels; ++j) { - frame->mutable_data()[target_number_of_channels * i + j] = - frame->data()[i]; + frame_data[target_number_of_channels * i + j] = frame_data[i]; } } } diff --git a/audio/utility/audio_frame_operations.h b/audio/utility/audio_frame_operations.h index 65c310c489..2f1540bcf5 100644 --- a/audio/utility/audio_frame_operations.h +++ b/audio/utility/audio_frame_operations.h @@ -14,8 +14,8 @@ #include #include +#include "absl/base/attributes.h" #include "api/audio/audio_frame.h" -#include "rtc_base/deprecation.h" namespace webrtc { @@ -36,12 +36,14 @@ class AudioFrameOperations { // |frame.num_channels_| will be updated. This version checks for sufficient // buffer size and that |num_channels_| is mono. Use UpmixChannels // instead. TODO(bugs.webrtc.org/8649): remove. - RTC_DEPRECATED static int MonoToStereo(AudioFrame* frame); + ABSL_DEPRECATED("bugs.webrtc.org/8649") + static int MonoToStereo(AudioFrame* frame); // |frame.num_channels_| will be updated. This version checks that // |num_channels_| is stereo. Use DownmixChannels // instead. TODO(bugs.webrtc.org/8649): remove. - RTC_DEPRECATED static int StereoToMono(AudioFrame* frame); + ABSL_DEPRECATED("bugs.webrtc.org/8649") + static int StereoToMono(AudioFrame* frame); // Downmixes 4 channels |src_audio| to stereo |dst_audio|. This is an in-place // operation, meaning |src_audio| and |dst_audio| may point to the same diff --git a/audio/voip/BUILD.gn b/audio/voip/BUILD.gn index 60232d5144..5311d7242b 100644 --- a/audio/voip/BUILD.gn +++ b/audio/voip/BUILD.gn @@ -26,8 +26,9 @@ rtc_library("voip_core") { "../../modules/utility:utility", "../../rtc_base:criticalsection", "../../rtc_base:logging", - "//third_party/abseil-cpp/absl/types:optional", + "../../rtc_base/synchronization:mutex", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("audio_channel") { @@ -66,6 +67,7 @@ rtc_library("audio_ingress") { "../../api:transport_api", "../../api/audio:audio_mixer_api", "../../api/audio_codecs:audio_codecs_api", + "../../api/voip:voip_api", "../../modules/audio_coding", "../../modules/rtp_rtcp", "../../modules/rtp_rtcp:rtp_rtcp_format", @@ -74,8 +76,10 @@ rtc_library("audio_ingress") { "../../rtc_base:logging", "../../rtc_base:safe_minmax", "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", "../utility:audio_frame_operations", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("audio_egress") { @@ -85,6 +89,7 @@ rtc_library("audio_egress") { ] deps = [ "..:audio", + "../../api:sequence_checker", "../../api/audio_codecs:audio_codecs_api", "../../api/task_queue", "../../call:audio_sender_interface", @@ -93,8 +98,9 @@ rtc_library("audio_egress") { "../../modules/rtp_rtcp:rtp_rtcp_format", "../../rtc_base:logging", "../../rtc_base:rtc_task_queue", - "../../rtc_base:thread_checker", "../../rtc_base:timeutils", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", "../utility:audio_frame_operations", ] } diff --git a/audio/voip/audio_channel.cc b/audio/voip/audio_channel.cc index b9ce7accd1..b4a50eec12 100644 --- a/audio/voip/audio_channel.cc +++ b/audio/voip/audio_channel.cc @@ -16,7 +16,7 @@ #include "api/audio_codecs/audio_format.h" #include "api/task_queue/task_queue_factory.h" #include "modules/rtp_rtcp/include/receive_statistics.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" @@ -32,18 +32,16 @@ AudioChannel::AudioChannel( Transport* transport, uint32_t local_ssrc, TaskQueueFactory* task_queue_factory, - ProcessThread* process_thread, AudioMixer* audio_mixer, rtc::scoped_refptr decoder_factory) - : audio_mixer_(audio_mixer), process_thread_(process_thread) { + : audio_mixer_(audio_mixer) { RTC_DCHECK(task_queue_factory); - RTC_DCHECK(process_thread); RTC_DCHECK(audio_mixer); Clock* clock = Clock::GetRealTimeClock(); receive_statistics_ = ReceiveStatistics::Create(clock); - RtpRtcp::Configuration rtp_config; + RtpRtcpInterface::Configuration rtp_config; rtp_config.clock = clock; rtp_config.audio = true; rtp_config.receive_statistics = receive_statistics_.get(); @@ -51,14 +49,11 @@ AudioChannel::AudioChannel( rtp_config.outgoing_transport = transport; rtp_config.local_media_ssrc = local_ssrc; - rtp_rtcp_ = RtpRtcp::Create(rtp_config); + rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(rtp_config); rtp_rtcp_->SetSendingMediaStatus(false); rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); - // ProcessThread periodically services RTP stack for RTCP. - process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); - ingress_ = std::make_unique(rtp_rtcp_.get(), clock, receive_statistics_.get(), std::move(decoder_factory)); @@ -79,48 +74,101 @@ AudioChannel::~AudioChannel() { } audio_mixer_->RemoveSource(ingress_.get()); - process_thread_->DeRegisterModule(rtp_rtcp_.get()); + + // TODO(bugs.webrtc.org/11581): unclear if we still need to clear |egress_| + // here. + egress_.reset(); + ingress_.reset(); } -void AudioChannel::StartSend() { - egress_->StartSend(); +bool AudioChannel::StartSend() { + // If encoder has not been set, return false. + if (!egress_->StartSend()) { + return false; + } // Start sending with RTP stack if it has not been sending yet. - if (!rtp_rtcp_->Sending() && rtp_rtcp_->SetSendingStatus(true) != 0) { - RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to start sending"; + if (!rtp_rtcp_->Sending()) { + rtp_rtcp_->SetSendingStatus(true); } + return true; } void AudioChannel::StopSend() { egress_->StopSend(); - // If the channel is not playing and RTP stack is active then deactivate RTP - // stack. SetSendingStatus(false) triggers the transmission of RTCP BYE + // Deactivate RTP stack when both sending and receiving are stopped. + // SetSendingStatus(false) triggers the transmission of RTCP BYE // message to remote endpoint. - if (!IsPlaying() && rtp_rtcp_->Sending() && - rtp_rtcp_->SetSendingStatus(false) != 0) { - RTC_DLOG(LS_ERROR) << "StopSend() RTP/RTCP failed to stop sending"; + if (!ingress_->IsPlaying() && rtp_rtcp_->Sending()) { + rtp_rtcp_->SetSendingStatus(false); } } -void AudioChannel::StartPlay() { - ingress_->StartPlay(); +bool AudioChannel::StartPlay() { + // If decoders have not been set, return false. + if (!ingress_->StartPlay()) { + return false; + } // If RTP stack is not sending then start sending as in recv-only mode, RTCP // receiver report is expected. - if (!rtp_rtcp_->Sending() && rtp_rtcp_->SetSendingStatus(true) != 0) { - RTC_DLOG(LS_ERROR) << "StartPlay() RTP/RTCP failed to start sending"; + if (!rtp_rtcp_->Sending()) { + rtp_rtcp_->SetSendingStatus(true); } + return true; } void AudioChannel::StopPlay() { ingress_->StopPlay(); // Deactivate RTP stack only when both sending and receiving are stopped. - if (!IsSendingMedia() && rtp_rtcp_->Sending() && - rtp_rtcp_->SetSendingStatus(false) != 0) { - RTC_DLOG(LS_ERROR) << "StopPlay() RTP/RTCP failed to stop sending"; + if (!rtp_rtcp_->SendingMedia() && rtp_rtcp_->Sending()) { + rtp_rtcp_->SetSendingStatus(false); } } +IngressStatistics AudioChannel::GetIngressStatistics() { + IngressStatistics ingress_stats; + NetworkStatistics stats = ingress_->GetNetworkStatistics(); + ingress_stats.neteq_stats.total_samples_received = stats.totalSamplesReceived; + ingress_stats.neteq_stats.concealed_samples = stats.concealedSamples; + ingress_stats.neteq_stats.concealment_events = stats.concealmentEvents; + ingress_stats.neteq_stats.jitter_buffer_delay_ms = stats.jitterBufferDelayMs; + ingress_stats.neteq_stats.jitter_buffer_emitted_count = + stats.jitterBufferEmittedCount; + ingress_stats.neteq_stats.jitter_buffer_target_delay_ms = + stats.jitterBufferTargetDelayMs; + ingress_stats.neteq_stats.inserted_samples_for_deceleration = + stats.insertedSamplesForDeceleration; + ingress_stats.neteq_stats.removed_samples_for_acceleration = + stats.removedSamplesForAcceleration; + ingress_stats.neteq_stats.silent_concealed_samples = + stats.silentConcealedSamples; + ingress_stats.neteq_stats.fec_packets_received = stats.fecPacketsReceived; + ingress_stats.neteq_stats.fec_packets_discarded = stats.fecPacketsDiscarded; + ingress_stats.neteq_stats.delayed_packet_outage_samples = + stats.delayedPacketOutageSamples; + ingress_stats.neteq_stats.relative_packet_arrival_delay_ms = + stats.relativePacketArrivalDelayMs; + ingress_stats.neteq_stats.interruption_count = stats.interruptionCount; + ingress_stats.neteq_stats.total_interruption_duration_ms = + stats.totalInterruptionDurationMs; + ingress_stats.total_duration = ingress_->GetOutputTotalDuration(); + return ingress_stats; +} + +ChannelStatistics AudioChannel::GetChannelStatistics() { + ChannelStatistics channel_stat = ingress_->GetChannelStatistics(); + + StreamDataCounters rtp_stats, rtx_stats; + rtp_rtcp_->GetSendStreamDataCounters(&rtp_stats, &rtx_stats); + channel_stat.bytes_sent = + rtp_stats.transmitted.payload_bytes + rtx_stats.transmitted.payload_bytes; + channel_stat.packets_sent = + rtp_stats.transmitted.packets + rtx_stats.transmitted.packets; + + return channel_stat; +} + } // namespace webrtc diff --git a/audio/voip/audio_channel.h b/audio/voip/audio_channel.h index 8b6f1a8e59..7338d9faab 100644 --- a/audio/voip/audio_channel.h +++ b/audio/voip/audio_channel.h @@ -18,11 +18,10 @@ #include "api/task_queue/task_queue_factory.h" #include "api/voip/voip_base.h" +#include "api/voip/voip_statistics.h" #include "audio/voip/audio_egress.h" #include "audio/voip/audio_ingress.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" -#include "modules/utility/include/process_thread.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "rtc_base/ref_count.h" namespace webrtc { @@ -35,7 +34,6 @@ class AudioChannel : public rtc::RefCountInterface { AudioChannel(Transport* transport, uint32_t local_ssrc, TaskQueueFactory* task_queue_factory, - ProcessThread* process_thread, AudioMixer* audio_mixer, rtc::scoped_refptr decoder_factory); ~AudioChannel() override; @@ -46,9 +44,11 @@ class AudioChannel : public rtc::RefCountInterface { ChannelId GetId() const { return id_; } // APIs to start/stop audio channel on each direction. - void StartSend(); + // StartSend/StartPlay returns false if encoder/decoders + // have not been set, respectively. + bool StartSend(); void StopSend(); - void StartPlay(); + bool StartPlay(); void StopPlay(); // APIs relayed to AudioEgress. @@ -62,6 +62,13 @@ class AudioChannel : public rtc::RefCountInterface { absl::optional GetEncoderFormat() const { return egress_->GetEncoderFormat(); } + void RegisterTelephoneEventType(int rtp_payload_type, int sample_rate_hz) { + egress_->RegisterTelephoneEventType(rtp_payload_type, sample_rate_hz); + } + bool SendTelephoneEvent(int dtmf_event, int duration_ms) { + return egress_->SendTelephoneEvent(dtmf_event, duration_ms); + } + void SetMute(bool enable) { egress_->SetMute(enable); } // APIs relayed to AudioIngress. bool IsPlaying() const { return ingress_->IsPlaying(); } @@ -74,6 +81,35 @@ class AudioChannel : public rtc::RefCountInterface { void SetReceiveCodecs(const std::map& codecs) { ingress_->SetReceiveCodecs(codecs); } + IngressStatistics GetIngressStatistics(); + ChannelStatistics GetChannelStatistics(); + + // See comments on the methods used from AudioEgress and AudioIngress. + // Conversion to double is following what is done in + // DoubleAudioLevelFromIntAudioLevel method in rtc_stats_collector.cc to be + // consistent. + double GetInputAudioLevel() const { + return egress_->GetInputAudioLevel() / 32767.0; + } + double GetInputTotalEnergy() const { return egress_->GetInputTotalEnergy(); } + double GetInputTotalDuration() const { + return egress_->GetInputTotalDuration(); + } + double GetOutputAudioLevel() const { + return ingress_->GetOutputAudioLevel() / 32767.0; + } + double GetOutputTotalEnergy() const { + return ingress_->GetOutputTotalEnergy(); + } + double GetOutputTotalDuration() const { + return ingress_->GetOutputTotalDuration(); + } + + // Internal API for testing purpose. + void SendRTCPReportForTesting(RTCPPacketType type) { + int32_t result = rtp_rtcp_->SendRTCP(type); + RTC_DCHECK(result == 0); + } private: // ChannelId that this audio channel belongs for logging purpose. @@ -82,13 +118,10 @@ class AudioChannel : public rtc::RefCountInterface { // Synchronization is handled internally by AudioMixer. AudioMixer* audio_mixer_; - // Synchronization is handled internally by ProcessThread. - ProcessThread* process_thread_; - // Listed in order for safe destruction of AudioChannel object. // Synchronization for these are handled internally. std::unique_ptr receive_statistics_; - std::unique_ptr rtp_rtcp_; + std::unique_ptr rtp_rtcp_; std::unique_ptr ingress_; std::unique_ptr egress_; }; diff --git a/audio/voip/audio_egress.cc b/audio/voip/audio_egress.cc index a7bc202a41..1162824c9e 100644 --- a/audio/voip/audio_egress.cc +++ b/audio/voip/audio_egress.cc @@ -17,7 +17,7 @@ namespace webrtc { -AudioEgress::AudioEgress(RtpRtcp* rtp_rtcp, +AudioEgress::AudioEgress(RtpRtcpInterface* rtp_rtcp, Clock* clock, TaskQueueFactory* task_queue_factory) : rtp_rtcp_(rtp_rtcp), @@ -56,8 +56,13 @@ void AudioEgress::SetEncoder(int payload_type, audio_coding_->SetEncoder(std::move(encoder)); } -void AudioEgress::StartSend() { +bool AudioEgress::StartSend() { + if (!GetEncoderFormat()) { + RTC_DLOG(LS_WARNING) << "Send codec has not been set yet"; + return false; + } rtp_rtcp_->SetSendingMediaStatus(true); + return true; } void AudioEgress::StopSend() { @@ -75,6 +80,12 @@ void AudioEgress::SendAudioData(std::unique_ptr audio_frame) { return; } + double duration_seconds = + static_cast(audio_frame->samples_per_channel_) / + audio_frame->sample_rate_hz_; + + input_audio_level_.ComputeLevel(*audio_frame, duration_seconds); + AudioFrameOperations::Mute(audio_frame.get(), encoder_context_.previously_muted_, encoder_context_.mute_); diff --git a/audio/voip/audio_egress.h b/audio/voip/audio_egress.h index e5632cde32..a39c7e225a 100644 --- a/audio/voip/audio_egress.h +++ b/audio/voip/audio_egress.h @@ -15,15 +15,17 @@ #include #include "api/audio_codecs/audio_format.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" +#include "audio/audio_level.h" #include "audio/utility/audio_frame_operations.h" #include "call/audio_sender.h" #include "modules/audio_coding/include/audio_coding_module.h" #include "modules/rtp_rtcp/include/report_block_data.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_sender_audio.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" #include "rtc_base/time_utils.h" namespace webrtc { @@ -43,7 +45,7 @@ namespace webrtc { // smaller footprint. class AudioEgress : public AudioSender, public AudioPacketizationCallback { public: - AudioEgress(RtpRtcp* rtp_rtcp, + AudioEgress(RtpRtcpInterface* rtp_rtcp, Clock* clock, TaskQueueFactory* task_queue_factory); ~AudioEgress() override; @@ -58,8 +60,9 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback { // Start or stop sending operation of AudioEgress. This will start/stop // the RTP stack also causes encoder queue thread to start/stop - // processing input audio samples. - void StartSend(); + // processing input audio samples. StartSend will return false if + // a send codec has not been set. + bool StartSend(); void StopSend(); // Query the state of the RTP stack. This returns true if StartSend() @@ -72,7 +75,7 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback { // Retrieve current encoder format info. This returns encoder format set // by SetEncoder() and if encoder is not set, this will return nullopt. absl::optional GetEncoderFormat() const { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); return encoder_format_; } @@ -87,6 +90,16 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback { // otherwise false when the dtmf queue reached maximum of 20 events. bool SendTelephoneEvent(int dtmf_event, int duration_ms); + // See comments on LevelFullRange, TotalEnergy, TotalDuration from + // audio/audio_level.h. + int GetInputAudioLevel() const { return input_audio_level_.LevelFullRange(); } + double GetInputTotalEnergy() const { + return input_audio_level_.TotalEnergy(); + } + double GetInputTotalDuration() const { + return input_audio_level_.TotalDuration(); + } + // Implementation of AudioSender interface. void SendAudioData(std::unique_ptr audio_frame) override; @@ -99,17 +112,17 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback { private: void SetEncoderFormat(const SdpAudioFormat& encoder_format) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); encoder_format_ = encoder_format; } - rtc::CriticalSection lock_; + mutable Mutex lock_; // Current encoder format selected by caller. absl::optional encoder_format_ RTC_GUARDED_BY(lock_); // Synchronization is handled internally by RtpRtcp. - RtpRtcp* const rtp_rtcp_; + RtpRtcpInterface* const rtp_rtcp_; // Synchronization is handled internally by RTPSenderAudio. RTPSenderAudio rtp_sender_audio_; @@ -117,6 +130,9 @@ class AudioEgress : public AudioSender, public AudioPacketizationCallback { // Synchronization is handled internally by AudioCodingModule. const std::unique_ptr audio_coding_; + // Synchronization is handled internally by voe::AudioLevel. + voe::AudioLevel input_audio_level_; + // Struct that holds all variables used by encoder task queue. struct EncoderContext { // Offset used to mark rtp timestamp in sample rate unit in diff --git a/audio/voip/audio_ingress.cc b/audio/voip/audio_ingress.cc index fb43fcd753..8aa552bb28 100644 --- a/audio/voip/audio_ingress.cc +++ b/audio/voip/audio_ingress.cc @@ -17,7 +17,10 @@ #include "api/audio_codecs/audio_format.h" #include "audio/utility/audio_frame_operations.h" #include "modules/audio_coding/include/audio_coding_module.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/byte_io.h" +#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" +#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" +#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_minmax.h" @@ -36,7 +39,7 @@ AudioCodingModule::Config CreateAcmConfig( } // namespace AudioIngress::AudioIngress( - RtpRtcp* rtp_rtcp, + RtpRtcpInterface* rtp_rtcp, Clock* clock, ReceiveStatistics* receive_statistics, rtc::scoped_refptr decoder_factory) @@ -74,6 +77,12 @@ AudioMixer::Source::AudioFrameInfo AudioIngress::GetAudioFrameWithInfo( constexpr double kAudioSampleDurationSeconds = 0.01; output_audio_level_.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds); + // If caller invoked StopPlay(), then mute the frame. + if (!playing_) { + AudioFrameOperations::Mute(audio_frame); + muted = true; + } + // Set first rtp timestamp with first audio frame with valid timestamp. if (first_rtp_timestamp_ < 0 && audio_frame->timestamp_ != 0) { first_rtp_timestamp_ = audio_frame->timestamp_; @@ -83,7 +92,7 @@ AudioMixer::Source::AudioFrameInfo AudioIngress::GetAudioFrameWithInfo( // Compute elapsed and NTP times. int64_t unwrap_timestamp; { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); unwrap_timestamp = timestamp_wrap_handler_.Unwrap(audio_frame->timestamp_); audio_frame->ntp_time_ms_ = @@ -104,10 +113,22 @@ AudioMixer::Source::AudioFrameInfo AudioIngress::GetAudioFrameWithInfo( : AudioMixer::Source::AudioFrameInfo::kNormal; } +bool AudioIngress::StartPlay() { + { + MutexLock lock(&lock_); + if (receive_codec_info_.empty()) { + RTC_DLOG(LS_WARNING) << "Receive codecs have not been set yet"; + return false; + } + } + playing_ = true; + return true; +} + void AudioIngress::SetReceiveCodecs( const std::map& codecs) { { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); for (const auto& kv : codecs) { receive_codec_info_[kv.first] = kv.second.clockrate_hz; } @@ -116,16 +137,12 @@ void AudioIngress::SetReceiveCodecs( } void AudioIngress::ReceivedRTPPacket(rtc::ArrayView rtp_packet) { - if (!IsPlaying()) { - return; - } - RtpPacketReceived rtp_packet_received; rtp_packet_received.Parse(rtp_packet.data(), rtp_packet.size()); // Set payload type's sampling rate before we feed it into ReceiveStatistics. { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); const auto& it = receive_codec_info_.find(rtp_packet_received.PayloadType()); // If sampling rate info is not available in our received codec set, it @@ -140,6 +157,12 @@ void AudioIngress::ReceivedRTPPacket(rtc::ArrayView rtp_packet) { rtp_packet_received.set_payload_type_frequency(it->second); } + // Track current remote SSRC. + if (rtp_packet_received.Ssrc() != remote_ssrc_) { + rtp_rtcp_->SetRemoteSSRC(rtp_packet_received.Ssrc()); + remote_ssrc_.store(rtp_packet_received.Ssrc()); + } + rtp_receive_statistics_->OnRtpPacket(rtp_packet_received); RTPHeader header; @@ -168,11 +191,28 @@ void AudioIngress::ReceivedRTPPacket(rtc::ArrayView rtp_packet) { void AudioIngress::ReceivedRTCPPacket( rtc::ArrayView rtcp_packet) { - // Deliver RTCP packet to RTP/RTCP module for parsing. + rtcp::CommonHeader rtcp_header; + if (rtcp_header.Parse(rtcp_packet.data(), rtcp_packet.size()) && + (rtcp_header.type() == rtcp::SenderReport::kPacketType || + rtcp_header.type() == rtcp::ReceiverReport::kPacketType)) { + RTC_DCHECK_GE(rtcp_packet.size(), 8); + + uint32_t sender_ssrc = + ByteReader::ReadBigEndian(rtcp_packet.data() + 4); + + // If we don't have remote ssrc at this point, it's likely that remote + // endpoint is receive-only or it could have restarted the media. + if (sender_ssrc != remote_ssrc_) { + rtp_rtcp_->SetRemoteSSRC(sender_ssrc); + remote_ssrc_.store(sender_ssrc); + } + } + + // Deliver RTCP packet to RTP/RTCP module for parsing and processing. rtp_rtcp_->IncomingRtcpPacket(rtcp_packet.data(), rtcp_packet.size()); - int64_t rtt = GetRoundTripTime(); - if (rtt == -1) { + int64_t rtt = 0; + if (rtp_rtcp_->RTT(remote_ssrc_, &rtt, nullptr, nullptr, nullptr) != 0) { // Waiting for valid RTT. return; } @@ -185,35 +225,70 @@ void AudioIngress::ReceivedRTCPPacket( } { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp); } } -int64_t AudioIngress::GetRoundTripTime() { - const std::vector& report_data = - rtp_rtcp_->GetLatestReportBlockData(); +ChannelStatistics AudioIngress::GetChannelStatistics() { + ChannelStatistics channel_stats; - // If we do not have report block which means remote RTCP hasn't be received - // yet, return -1 as to indicate uninitialized value. - if (report_data.empty()) { - return -1; + // Get clockrate for current decoder ahead of jitter calculation. + uint32_t clockrate_hz = 0; + absl::optional> decoder = + acm_receiver_.LastDecoder(); + if (decoder) { + clockrate_hz = decoder->second.clockrate_hz; } - // We don't know in advance the remote SSRC used by the other end's receiver - // reports, so use the SSRC of the first report block as remote SSRC for now. - // TODO(natim@webrtc.org): handle the case where remote end is changing ssrc - // and update accordingly here. - const ReportBlockData& block_data = report_data[0]; - - const uint32_t sender_ssrc = block_data.report_block().sender_ssrc; + StreamStatistician* statistician = + rtp_receive_statistics_->GetStatistician(remote_ssrc_); + if (statistician) { + RtpReceiveStats stats = statistician->GetStats(); + channel_stats.packets_lost = stats.packets_lost; + channel_stats.packets_received = stats.packet_counter.packets; + channel_stats.bytes_received = stats.packet_counter.payload_bytes; + channel_stats.remote_ssrc = remote_ssrc_; + if (clockrate_hz > 0) { + channel_stats.jitter = static_cast(stats.jitter) / clockrate_hz; + } + } - if (sender_ssrc != remote_ssrc_.load()) { - remote_ssrc_.store(sender_ssrc); - rtp_rtcp_->SetRemoteSSRC(sender_ssrc); + // Get RTCP report using remote SSRC. + const std::vector& report_data = + rtp_rtcp_->GetLatestReportBlockData(); + for (const ReportBlockData& block_data : report_data) { + const RTCPReportBlock& rtcp_report = block_data.report_block(); + if (rtp_rtcp_->SSRC() != rtcp_report.source_ssrc || + remote_ssrc_ != rtcp_report.sender_ssrc) { + continue; + } + RemoteRtcpStatistics remote_stat; + remote_stat.packets_lost = rtcp_report.packets_lost; + remote_stat.fraction_lost = + static_cast(rtcp_report.fraction_lost) / (1 << 8); + if (clockrate_hz > 0) { + remote_stat.jitter = + static_cast(rtcp_report.jitter) / clockrate_hz; + } + if (block_data.has_rtt()) { + remote_stat.round_trip_time = + static_cast(block_data.last_rtt_ms()) / + rtc::kNumMillisecsPerSec; + } + remote_stat.last_report_received_timestamp_ms = + block_data.report_block_timestamp_utc_us() / + rtc::kNumMicrosecsPerMillisec; + channel_stats.remote_rtcp = remote_stat; + + // Receive only channel won't send any RTP packets. + if (!channel_stats.remote_ssrc.has_value()) { + channel_stats.remote_ssrc = remote_ssrc_; + } + break; } - return (block_data.has_rtt() ? block_data.last_rtt_ms() : -1); + return channel_stats; } } // namespace webrtc diff --git a/audio/voip/audio_ingress.h b/audio/voip/audio_ingress.h index 99766741d6..9a36a46563 100644 --- a/audio/voip/audio_ingress.h +++ b/audio/voip/audio_ingress.h @@ -17,18 +17,20 @@ #include #include +#include "absl/types/optional.h" #include "api/array_view.h" #include "api/audio/audio_mixer.h" #include "api/rtp_headers.h" #include "api/scoped_refptr.h" +#include "api/voip/voip_statistics.h" #include "audio/audio_level.h" #include "modules/audio_coding/acm2/acm_receiver.h" #include "modules/audio_coding/include/audio_coding_module.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" namespace webrtc { @@ -44,14 +46,14 @@ namespace webrtc { // smaller footprint. class AudioIngress : public AudioMixer::Source { public: - AudioIngress(RtpRtcp* rtp_rtcp, + AudioIngress(RtpRtcpInterface* rtp_rtcp, Clock* clock, ReceiveStatistics* receive_statistics, rtc::scoped_refptr decoder_factory); ~AudioIngress() override; // Start or stop receiving operation of AudioIngress. - void StartPlay() { playing_ = true; } + bool StartPlay(); void StopPlay() { playing_ = false; output_audio_level_.ResetLevelFullRange(); @@ -68,29 +70,25 @@ class AudioIngress : public AudioMixer::Source { void ReceivedRTPPacket(rtc::ArrayView rtp_packet); void ReceivedRTCPPacket(rtc::ArrayView rtcp_packet); - // Retrieve highest speech output level in last 100 ms. Note that - // this isn't RMS but absolute raw audio level on int16_t sample unit. - // Therefore, the return value will vary between 0 ~ 0xFFFF. This type of - // value may be useful to be used for measuring active speaker gauge. - int GetSpeechOutputLevelFullRange() const { + // See comments on LevelFullRange, TotalEnergy, TotalDuration from + // audio/audio_level.h. + int GetOutputAudioLevel() const { return output_audio_level_.LevelFullRange(); } - - // Returns network round trip time (RTT) measued by RTCP exchange with - // remote media endpoint. RTT value -1 indicates that it's not initialized. - int64_t GetRoundTripTime(); + double GetOutputTotalEnergy() { return output_audio_level_.TotalEnergy(); } + double GetOutputTotalDuration() { + return output_audio_level_.TotalDuration(); + } NetworkStatistics GetNetworkStatistics() const { NetworkStatistics stats; - acm_receiver_.GetNetworkStatistics(&stats); - return stats; - } - AudioDecodingCallStats GetDecodingStatistics() const { - AudioDecodingCallStats stats; - acm_receiver_.GetDecodingCallStatistics(&stats); + acm_receiver_.GetNetworkStatistics(&stats, + /*get_and_clear_legacy_stats=*/false); return stats; } + ChannelStatistics GetChannelStatistics(); + // Implementation of AudioMixer::Source interface. AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo( int sampling_rate, @@ -122,8 +120,8 @@ class AudioIngress : public AudioMixer::Source { // Synchronizaton is handled internally by ReceiveStatistics. ReceiveStatistics* const rtp_receive_statistics_; - // Synchronizaton is handled internally by RtpRtcp. - RtpRtcp* const rtp_rtcp_; + // Synchronizaton is handled internally by RtpRtcpInterface. + RtpRtcpInterface* const rtp_rtcp_; // Synchronizaton is handled internally by acm2::AcmReceiver. acm2::AcmReceiver acm_receiver_; @@ -131,7 +129,7 @@ class AudioIngress : public AudioMixer::Source { // Synchronizaton is handled internally by voe::AudioLevel. voe::AudioLevel output_audio_level_; - rtc::CriticalSection lock_; + Mutex lock_; RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(lock_); diff --git a/audio/voip/test/BUILD.gn b/audio/voip/test/BUILD.gn index 39f100a3aa..132f448307 100644 --- a/audio/voip/test/BUILD.gn +++ b/audio/voip/test/BUILD.gn @@ -9,37 +9,51 @@ import("../../../webrtc.gni") if (rtc_include_tests) { - rtc_library("voip_core_unittests") { + rtc_source_set("mock_task_queue") { testonly = true - sources = [ "voip_core_unittest.cc" ] + visibility = [ "*" ] + sources = [ "mock_task_queue.h" ] deps = [ - "..:voip_core", - "../../../api/audio_codecs:builtin_audio_decoder_factory", - "../../../api/audio_codecs:builtin_audio_encoder_factory", - "../../../api/task_queue:default_task_queue_factory", - "../../../modules/audio_device:mock_audio_device", - "../../../modules/audio_processing:mocks", - "../../../test:audio_codec_mocks", - "../../../test:mock_transport", + "../../../api/task_queue:task_queue", "../../../test:test_support", ] } + if (!build_with_chromium) { + rtc_library("voip_core_unittests") { + testonly = true + sources = [ "voip_core_unittest.cc" ] + deps = [ + "..:voip_core", + "../../../api/audio_codecs:builtin_audio_decoder_factory", + "../../../api/audio_codecs:builtin_audio_encoder_factory", + "../../../api/task_queue:default_task_queue_factory", + "../../../modules/audio_device:mock_audio_device", + "../../../modules/audio_processing:mocks", + "../../../modules/utility:mock_process_thread", + "../../../test:audio_codec_mocks", + "../../../test:mock_transport", + "../../../test:test_support", + ] + } + } + rtc_library("audio_channel_unittests") { testonly = true sources = [ "audio_channel_unittest.cc" ] deps = [ + ":mock_task_queue", "..:audio_channel", "../../../api:transport_api", "../../../api/audio_codecs:builtin_audio_decoder_factory", "../../../api/audio_codecs:builtin_audio_encoder_factory", - "../../../api/task_queue:default_task_queue_factory", + "../../../api/task_queue:task_queue", "../../../modules/audio_mixer:audio_mixer_impl", "../../../modules/audio_mixer:audio_mixer_test_utils", + "../../../modules/rtp_rtcp:rtp_rtcp", "../../../modules/rtp_rtcp:rtp_rtcp_format", "../../../modules/utility", "../../../rtc_base:logging", - "../../../rtc_base:rtc_event", "../../../test:mock_transport", "../../../test:test_support", ] @@ -56,6 +70,7 @@ if (rtc_include_tests) { "../../../api/audio_codecs:builtin_audio_encoder_factory", "../../../api/task_queue:default_task_queue_factory", "../../../modules/audio_mixer:audio_mixer_test_utils", + "../../../modules/rtp_rtcp:rtp_rtcp", "../../../rtc_base:logging", "../../../rtc_base:rtc_event", "../../../test:mock_transport", @@ -72,6 +87,7 @@ if (rtc_include_tests) { "../../../api/audio_codecs:builtin_audio_encoder_factory", "../../../api/task_queue:default_task_queue_factory", "../../../modules/audio_mixer:audio_mixer_test_utils", + "../../../modules/rtp_rtcp:rtp_rtcp", "../../../modules/rtp_rtcp:rtp_rtcp_format", "../../../rtc_base:logging", "../../../rtc_base:rtc_event", diff --git a/audio/voip/test/audio_channel_unittest.cc b/audio/voip/test/audio_channel_unittest.cc index ce557823cb..a4f518c5bd 100644 --- a/audio/voip/test/audio_channel_unittest.cc +++ b/audio/voip/test/audio_channel_unittest.cc @@ -12,12 +12,11 @@ #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/call/transport.h" -#include "api/task_queue/default_task_queue_factory.h" +#include "api/task_queue/task_queue_factory.h" +#include "audio/voip/test/mock_task_queue.h" #include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/audio_mixer/sine_wave_generator.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/utility/include/process_thread.h" -#include "rtc_base/event.h" #include "rtc_base/logging.h" #include "test/gmock.h" #include "test/gtest.h" @@ -28,6 +27,7 @@ namespace { using ::testing::Invoke; using ::testing::NiceMock; +using ::testing::Return; using ::testing::Unused; constexpr uint64_t kStartTime = 123456789; @@ -41,30 +41,38 @@ class AudioChannelTest : public ::testing::Test { AudioChannelTest() : fake_clock_(kStartTime), wave_generator_(1000.0, kAudioLevel) { - process_thread_ = ProcessThread::Create("ModuleProcessThread"); + task_queue_factory_ = std::make_unique(&task_queue_); audio_mixer_ = AudioMixerImpl::Create(); - task_queue_factory_ = CreateDefaultTaskQueueFactory(); encoder_factory_ = CreateBuiltinAudioEncoderFactory(); decoder_factory_ = CreateBuiltinAudioDecoderFactory(); - } - void SetUp() override { - audio_channel_ = new rtc::RefCountedObject( - &transport_, kLocalSsrc, task_queue_factory_.get(), - process_thread_.get(), audio_mixer_.get(), decoder_factory_); - - audio_channel_->SetEncoder(kPcmuPayload, kPcmuFormat, - encoder_factory_->MakeAudioEncoder( - kPcmuPayload, kPcmuFormat, absl::nullopt)); - audio_channel_->SetReceiveCodecs({{kPcmuPayload, kPcmuFormat}}); - audio_channel_->StartSend(); - audio_channel_->StartPlay(); + // By default, run the queued task immediately. + ON_CALL(task_queue_, PostTask) + .WillByDefault( + Invoke([&](std::unique_ptr task) { task->Run(); })); } - void TearDown() override { - audio_channel_->StopSend(); - audio_channel_->StopPlay(); - audio_channel_ = nullptr; + void SetUp() override { audio_channel_ = CreateAudioChannel(kLocalSsrc); } + + void TearDown() override { audio_channel_ = nullptr; } + + rtc::scoped_refptr CreateAudioChannel(uint32_t ssrc) { + // Use same audio mixer here for simplicity sake as we are not checking + // audio activity of RTP in our testcases. If we need to do test on audio + // signal activity then we need to assign audio mixer for each channel. + // Also this uses the same transport object for different audio channel to + // simplify network routing logic. + rtc::scoped_refptr audio_channel = + rtc::make_ref_counted( + &transport_, ssrc, task_queue_factory_.get(), audio_mixer_.get(), + decoder_factory_); + audio_channel->SetEncoder(kPcmuPayload, kPcmuFormat, + encoder_factory_->MakeAudioEncoder( + kPcmuPayload, kPcmuFormat, absl::nullopt)); + audio_channel->SetReceiveCodecs({{kPcmuPayload, kPcmuFormat}}); + audio_channel->StartSend(); + audio_channel->StartPlay(); + return audio_channel; } std::unique_ptr GetAudioFrame(int order) { @@ -80,11 +88,11 @@ class AudioChannelTest : public ::testing::Test { SimulatedClock fake_clock_; SineWaveGenerator wave_generator_; NiceMock transport_; + NiceMock task_queue_; std::unique_ptr task_queue_factory_; rtc::scoped_refptr audio_mixer_; rtc::scoped_refptr decoder_factory_; rtc::scoped_refptr encoder_factory_; - std::unique_ptr process_thread_; rtc::scoped_refptr audio_channel_; }; @@ -92,11 +100,9 @@ class AudioChannelTest : public ::testing::Test { // Resulted RTP packet is looped back into AudioChannel and gets decoded into // audio frame to see if it has some signal to indicate its validity. TEST_F(AudioChannelTest, PlayRtpByLocalLoop) { - rtc::Event event; auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) { audio_channel_->ReceivedRTPPacket( rtc::ArrayView(packet, length)); - event.Set(); return true; }; EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp)); @@ -105,8 +111,6 @@ TEST_F(AudioChannelTest, PlayRtpByLocalLoop) { audio_sender->SendAudioData(GetAudioFrame(0)); audio_sender->SendAudioData(GetAudioFrame(1)); - event.Wait(/*ms=*/1000); - AudioFrame empty_frame, audio_frame; empty_frame.Mute(); empty_frame.mutable_data(); // This will zero out the data. @@ -122,10 +126,8 @@ TEST_F(AudioChannelTest, PlayRtpByLocalLoop) { // Validate assigned local SSRC is resulted in RTP packet. TEST_F(AudioChannelTest, VerifyLocalSsrcAsAssigned) { RtpPacketReceived rtp; - rtc::Event event; auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) { rtp.Parse(packet, length); - event.Set(); return true; }; EXPECT_CALL(transport_, SendRtp).WillOnce(Invoke(loop_rtp)); @@ -134,10 +136,220 @@ TEST_F(AudioChannelTest, VerifyLocalSsrcAsAssigned) { audio_sender->SendAudioData(GetAudioFrame(0)); audio_sender->SendAudioData(GetAudioFrame(1)); - event.Wait(/*ms=*/1000); - EXPECT_EQ(rtp.Ssrc(), kLocalSsrc); } +// Check metrics after processing an RTP packet. +TEST_F(AudioChannelTest, TestIngressStatistics) { + auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) { + audio_channel_->ReceivedRTPPacket( + rtc::ArrayView(packet, length)); + return true; + }; + EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(loop_rtp)); + + auto audio_sender = audio_channel_->GetAudioSender(); + audio_sender->SendAudioData(GetAudioFrame(0)); + audio_sender->SendAudioData(GetAudioFrame(1)); + + AudioFrame audio_frame; + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + + absl::optional ingress_stats = + audio_channel_->GetIngressStatistics(); + EXPECT_TRUE(ingress_stats); + EXPECT_EQ(ingress_stats->neteq_stats.total_samples_received, 160ULL); + EXPECT_EQ(ingress_stats->neteq_stats.concealed_samples, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.concealment_events, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.inserted_samples_for_deceleration, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.removed_samples_for_acceleration, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.silent_concealed_samples, 0ULL); + // To extract the jitter buffer length in millisecond, jitter_buffer_delay_ms + // needs to be divided by jitter_buffer_emitted_count (number of samples). + EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_delay_ms, 1600ULL); + EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_emitted_count, 160ULL); + EXPECT_GT(ingress_stats->neteq_stats.jitter_buffer_target_delay_ms, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.interruption_count, 0); + EXPECT_EQ(ingress_stats->neteq_stats.total_interruption_duration_ms, 0); + EXPECT_DOUBLE_EQ(ingress_stats->total_duration, 0.02); + + // Now without any RTP pending in jitter buffer pull more. + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + + // Send another RTP packet to intentionally break PLC. + audio_sender->SendAudioData(GetAudioFrame(2)); + audio_sender->SendAudioData(GetAudioFrame(3)); + + ingress_stats = audio_channel_->GetIngressStatistics(); + EXPECT_TRUE(ingress_stats); + EXPECT_EQ(ingress_stats->neteq_stats.total_samples_received, 320ULL); + EXPECT_EQ(ingress_stats->neteq_stats.concealed_samples, 168ULL); + EXPECT_EQ(ingress_stats->neteq_stats.concealment_events, 1ULL); + EXPECT_EQ(ingress_stats->neteq_stats.inserted_samples_for_deceleration, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.removed_samples_for_acceleration, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.silent_concealed_samples, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_delay_ms, 1600ULL); + EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_emitted_count, 160ULL); + EXPECT_GT(ingress_stats->neteq_stats.jitter_buffer_target_delay_ms, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.interruption_count, 0); + EXPECT_EQ(ingress_stats->neteq_stats.total_interruption_duration_ms, 0); + EXPECT_DOUBLE_EQ(ingress_stats->total_duration, 0.04); + + // Pull the last RTP packet. + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + + ingress_stats = audio_channel_->GetIngressStatistics(); + EXPECT_TRUE(ingress_stats); + EXPECT_EQ(ingress_stats->neteq_stats.total_samples_received, 480ULL); + EXPECT_EQ(ingress_stats->neteq_stats.concealed_samples, 168ULL); + EXPECT_EQ(ingress_stats->neteq_stats.concealment_events, 1ULL); + EXPECT_EQ(ingress_stats->neteq_stats.inserted_samples_for_deceleration, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.removed_samples_for_acceleration, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.silent_concealed_samples, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_delay_ms, 3200ULL); + EXPECT_EQ(ingress_stats->neteq_stats.jitter_buffer_emitted_count, 320ULL); + EXPECT_GT(ingress_stats->neteq_stats.jitter_buffer_target_delay_ms, 0ULL); + EXPECT_EQ(ingress_stats->neteq_stats.interruption_count, 0); + EXPECT_EQ(ingress_stats->neteq_stats.total_interruption_duration_ms, 0); + EXPECT_DOUBLE_EQ(ingress_stats->total_duration, 0.06); +} + +// Check ChannelStatistics metric after processing RTP and RTCP packets. +TEST_F(AudioChannelTest, TestChannelStatistics) { + auto loop_rtp = [&](const uint8_t* packet, size_t length, Unused) { + audio_channel_->ReceivedRTPPacket( + rtc::ArrayView(packet, length)); + return true; + }; + auto loop_rtcp = [&](const uint8_t* packet, size_t length) { + audio_channel_->ReceivedRTCPPacket( + rtc::ArrayView(packet, length)); + return true; + }; + EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(loop_rtp)); + EXPECT_CALL(transport_, SendRtcp).WillRepeatedly(Invoke(loop_rtcp)); + + // Simulate microphone giving audio frame (10 ms). This will trigger tranport + // to send RTP as handled in loop_rtp above. + auto audio_sender = audio_channel_->GetAudioSender(); + audio_sender->SendAudioData(GetAudioFrame(0)); + audio_sender->SendAudioData(GetAudioFrame(1)); + + // Simulate speaker requesting audio frame (10 ms). This will trigger VoIP + // engine to fetch audio samples from RTP packets stored in jitter buffer. + AudioFrame audio_frame; + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + + // Force sending RTCP SR report in order to have remote_rtcp field available + // in channel statistics. This will trigger tranport to send RTCP as handled + // in loop_rtcp above. + audio_channel_->SendRTCPReportForTesting(kRtcpSr); + + absl::optional channel_stats = + audio_channel_->GetChannelStatistics(); + EXPECT_TRUE(channel_stats); + + EXPECT_EQ(channel_stats->packets_sent, 1ULL); + EXPECT_EQ(channel_stats->bytes_sent, 160ULL); + + EXPECT_EQ(channel_stats->packets_received, 1ULL); + EXPECT_EQ(channel_stats->bytes_received, 160ULL); + EXPECT_EQ(channel_stats->jitter, 0); + EXPECT_EQ(channel_stats->packets_lost, 0); + EXPECT_EQ(channel_stats->remote_ssrc.value(), kLocalSsrc); + + EXPECT_TRUE(channel_stats->remote_rtcp.has_value()); + + EXPECT_EQ(channel_stats->remote_rtcp->jitter, 0); + EXPECT_EQ(channel_stats->remote_rtcp->packets_lost, 0); + EXPECT_EQ(channel_stats->remote_rtcp->fraction_lost, 0); + EXPECT_GT(channel_stats->remote_rtcp->last_report_received_timestamp_ms, 0); + EXPECT_FALSE(channel_stats->remote_rtcp->round_trip_time.has_value()); +} + +// Check ChannelStatistics RTT metric after processing RTP and RTCP packets +// using three audio channels where each represents media endpoint. +// +// 1) AC1 <- RTP/RTCP -> AC2 +// 2) AC1 <- RTP/RTCP -> AC3 +// +// During step 1), AC1 should be able to check RTT from AC2's SSRC. +// During step 2), AC1 should be able to check RTT from AC3's SSRC. +TEST_F(AudioChannelTest, RttIsAvailableAfterChangeOfRemoteSsrc) { + // Create AC2 and AC3. + constexpr uint32_t kAc2Ssrc = 0xdeadbeef; + constexpr uint32_t kAc3Ssrc = 0xdeafbeef; + + auto ac_2 = CreateAudioChannel(kAc2Ssrc); + auto ac_3 = CreateAudioChannel(kAc3Ssrc); + + auto send_recv_rtp = [&](rtc::scoped_refptr rtp_sender, + rtc::scoped_refptr rtp_receiver) { + // Setup routing logic via transport_. + auto route_rtp = [&](const uint8_t* packet, size_t length, Unused) { + rtp_receiver->ReceivedRTPPacket(rtc::MakeArrayView(packet, length)); + return true; + }; + ON_CALL(transport_, SendRtp).WillByDefault(route_rtp); + + // This will trigger route_rtp callback via transport_. + rtp_sender->GetAudioSender()->SendAudioData(GetAudioFrame(0)); + rtp_sender->GetAudioSender()->SendAudioData(GetAudioFrame(1)); + + // Process received RTP in receiver. + AudioFrame audio_frame; + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + audio_mixer_->Mix(/*number_of_channels=*/1, &audio_frame); + + // Revert to default to avoid using reference in route_rtp lambda. + ON_CALL(transport_, SendRtp).WillByDefault(Return(true)); + }; + + auto send_recv_rtcp = [&](rtc::scoped_refptr rtcp_sender, + rtc::scoped_refptr rtcp_receiver) { + // Setup routing logic via transport_. + auto route_rtcp = [&](const uint8_t* packet, size_t length) { + rtcp_receiver->ReceivedRTCPPacket(rtc::MakeArrayView(packet, length)); + return true; + }; + ON_CALL(transport_, SendRtcp).WillByDefault(route_rtcp); + + // This will trigger route_rtcp callback via transport_. + rtcp_sender->SendRTCPReportForTesting(kRtcpSr); + + // Revert to default to avoid using reference in route_rtcp lambda. + ON_CALL(transport_, SendRtcp).WillByDefault(Return(true)); + }; + + // AC1 <-- RTP/RTCP --> AC2 + send_recv_rtp(audio_channel_, ac_2); + send_recv_rtp(ac_2, audio_channel_); + send_recv_rtcp(audio_channel_, ac_2); + send_recv_rtcp(ac_2, audio_channel_); + + absl::optional channel_stats = + audio_channel_->GetChannelStatistics(); + ASSERT_TRUE(channel_stats); + EXPECT_EQ(channel_stats->remote_ssrc, kAc2Ssrc); + ASSERT_TRUE(channel_stats->remote_rtcp); + EXPECT_GT(channel_stats->remote_rtcp->round_trip_time, 0.0); + + // AC1 <-- RTP/RTCP --> AC3 + send_recv_rtp(audio_channel_, ac_3); + send_recv_rtp(ac_3, audio_channel_); + send_recv_rtcp(audio_channel_, ac_3); + send_recv_rtcp(ac_3, audio_channel_); + + channel_stats = audio_channel_->GetChannelStatistics(); + ASSERT_TRUE(channel_stats); + EXPECT_EQ(channel_stats->remote_ssrc, kAc3Ssrc); + ASSERT_TRUE(channel_stats->remote_rtcp); + EXPECT_GT(channel_stats->remote_rtcp->round_trip_time, 0.0); +} + } // namespace } // namespace webrtc diff --git a/audio/voip/test/audio_egress_unittest.cc b/audio/voip/test/audio_egress_unittest.cc index 3391265880..0692ef2df4 100644 --- a/audio/voip/test/audio_egress_unittest.cc +++ b/audio/voip/test/audio_egress_unittest.cc @@ -14,6 +14,7 @@ #include "api/task_queue/default_task_queue_factory.h" #include "modules/audio_mixer/sine_wave_generator.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "test/gmock.h" @@ -27,27 +28,28 @@ using ::testing::Invoke; using ::testing::NiceMock; using ::testing::Unused; -std::unique_ptr CreateRtpStack(Clock* clock, - Transport* transport, - uint32_t remote_ssrc) { - RtpRtcp::Configuration rtp_config; +std::unique_ptr CreateRtpStack(Clock* clock, + Transport* transport, + uint32_t remote_ssrc) { + RtpRtcpInterface::Configuration rtp_config; rtp_config.clock = clock; rtp_config.audio = true; rtp_config.rtcp_report_interval_ms = 5000; rtp_config.outgoing_transport = transport; rtp_config.local_media_ssrc = remote_ssrc; - auto rtp_rtcp = RtpRtcp::Create(rtp_config); + auto rtp_rtcp = ModuleRtpRtcpImpl2::Create(rtp_config); rtp_rtcp->SetSendingMediaStatus(false); rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound); return rtp_rtcp; } +constexpr int16_t kAudioLevel = 3004; // Used for sine wave level. + // AudioEgressTest configures audio egress by using Rtp Stack, fake clock, // and task queue factory. Encoder factory is needed to create codec and // configure the RTP stack in audio egress. class AudioEgressTest : public ::testing::Test { public: - static constexpr int16_t kAudioLevel = 3004; // Used for sine wave level. static constexpr uint16_t kSeqNum = 12345; static constexpr uint64_t kStartTime = 123456789; static constexpr uint32_t kRemoteSsrc = 0xDEADBEEF; @@ -100,7 +102,7 @@ class AudioEgressTest : public ::testing::Test { SimulatedClock fake_clock_; NiceMock transport_; SineWaveGenerator wave_generator_; - std::unique_ptr rtp_rtcp_; + std::unique_ptr rtp_rtcp_; std::unique_ptr task_queue_factory_; rtc::scoped_refptr encoder_factory_; std::unique_ptr egress_; @@ -285,5 +287,37 @@ TEST_F(AudioEgressTest, SendDTMF) { EXPECT_EQ(dtmf_count, kExpected); } +TEST_F(AudioEgressTest, TestAudioInputLevelAndEnergyDuration) { + // Per audio_level's kUpdateFrequency, we need more than 10 audio samples to + // get audio level from input source. + constexpr int kExpected = 6; + rtc::Event event; + int rtp_count = 0; + auto rtp_sent = [&](const uint8_t* packet, size_t length, Unused) { + if (++rtp_count == kExpected) { + event.Set(); + } + return true; + }; + + EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(rtp_sent)); + + // Two 10 ms audio frames will result in rtp packet with ptime 20. + for (size_t i = 0; i < kExpected * 2; i++) { + egress_->SendAudioData(GetAudioFrame(i)); + fake_clock_.AdvanceTimeMilliseconds(10); + } + + event.Wait(/*give_up_after_ms=*/1000); + EXPECT_EQ(rtp_count, kExpected); + + constexpr double kExpectedEnergy = 0.00016809565587789564; + constexpr double kExpectedDuration = 0.11999999999999998; + + EXPECT_EQ(egress_->GetInputAudioLevel(), kAudioLevel); + EXPECT_DOUBLE_EQ(egress_->GetInputTotalEnergy(), kExpectedEnergy); + EXPECT_DOUBLE_EQ(egress_->GetInputTotalDuration(), kExpectedDuration); +} + } // namespace } // namespace webrtc diff --git a/audio/voip/test/audio_ingress_unittest.cc b/audio/voip/test/audio_ingress_unittest.cc index bedb82e211..55ecfec695 100644 --- a/audio/voip/test/audio_ingress_unittest.cc +++ b/audio/voip/test/audio_ingress_unittest.cc @@ -15,6 +15,7 @@ #include "api/task_queue/default_task_queue_factory.h" #include "audio/voip/audio_egress.h" #include "modules/audio_mixer/sine_wave_generator.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "test/gmock.h" @@ -38,14 +39,14 @@ class AudioIngressTest : public ::testing::Test { : fake_clock_(123456789), wave_generator_(1000.0, kAudioLevel) { receive_statistics_ = ReceiveStatistics::Create(&fake_clock_); - RtpRtcp::Configuration rtp_config; + RtpRtcpInterface::Configuration rtp_config; rtp_config.clock = &fake_clock_; rtp_config.audio = true; rtp_config.receive_statistics = receive_statistics_.get(); rtp_config.rtcp_report_interval_ms = 5000; rtp_config.outgoing_transport = &transport_; rtp_config.local_media_ssrc = 0xdeadc0de; - rtp_rtcp_ = RtpRtcp::Create(rtp_config); + rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(rtp_config); rtp_rtcp_->SetSendingMediaStatus(false); rtp_rtcp_->SetRTCPStatus(RtcpMode::kCompound); @@ -94,7 +95,7 @@ class AudioIngressTest : public ::testing::Test { SineWaveGenerator wave_generator_; NiceMock transport_; std::unique_ptr receive_statistics_; - std::unique_ptr rtp_rtcp_; + std::unique_ptr rtp_rtcp_; rtc::scoped_refptr encoder_factory_; rtc::scoped_refptr decoder_factory_; std::unique_ptr task_queue_factory_; @@ -133,9 +134,10 @@ TEST_F(AudioIngressTest, GetAudioFrameAfterRtpReceived) { EXPECT_EQ(audio_frame.elapsed_time_ms_, 0); } -TEST_F(AudioIngressTest, GetSpeechOutputLevelFullRange) { - // Per audio_level's kUpdateFrequency, we need 11 RTP to get audio level. - constexpr int kNumRtp = 11; +TEST_F(AudioIngressTest, TestSpeechOutputLevelAndEnergyDuration) { + // Per audio_level's kUpdateFrequency, we need more than 10 audio samples to + // get audio level from output source. + constexpr int kNumRtp = 6; int rtp_count = 0; rtc::Event event; auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) { @@ -150,15 +152,21 @@ TEST_F(AudioIngressTest, GetSpeechOutputLevelFullRange) { egress_->SendAudioData(GetAudioFrame(i)); fake_clock_.AdvanceTimeMilliseconds(10); } - event.Wait(/*ms=*/1000); + event.Wait(/*give_up_after_ms=*/1000); - for (int i = 0; i < kNumRtp; ++i) { + for (int i = 0; i < kNumRtp * 2; ++i) { AudioFrame audio_frame; EXPECT_EQ( ingress_->GetAudioFrameWithInfo(kPcmuFormat.clockrate_hz, &audio_frame), AudioMixer::Source::AudioFrameInfo::kNormal); } - EXPECT_EQ(ingress_->GetSpeechOutputLevelFullRange(), kAudioLevel); + EXPECT_EQ(ingress_->GetOutputAudioLevel(), kAudioLevel); + + constexpr double kExpectedEnergy = 0.00016809565587789564; + constexpr double kExpectedDuration = 0.11999999999999998; + + EXPECT_DOUBLE_EQ(ingress_->GetOutputTotalEnergy(), kExpectedEnergy); + EXPECT_DOUBLE_EQ(ingress_->GetOutputTotalDuration(), kExpectedDuration); } TEST_F(AudioIngressTest, PreferredSampleRate) { @@ -180,5 +188,48 @@ TEST_F(AudioIngressTest, PreferredSampleRate) { EXPECT_EQ(ingress_->PreferredSampleRate(), kPcmuFormat.clockrate_hz); } +// This test highlights the case where caller invokes StopPlay() which then +// AudioIngress should play silence frame afterwards. +TEST_F(AudioIngressTest, GetMutedAudioFrameAfterRtpReceivedAndStopPlay) { + // StopPlay before we start sending RTP packet with sine wave. + ingress_->StopPlay(); + + // Send 6 RTP packets to generate more than 100 ms audio sample to get + // valid speech level. + constexpr int kNumRtp = 6; + int rtp_count = 0; + rtc::Event event; + auto handle_rtp = [&](const uint8_t* packet, size_t length, Unused) { + ingress_->ReceivedRTPPacket(rtc::ArrayView(packet, length)); + if (++rtp_count == kNumRtp) { + event.Set(); + } + return true; + }; + EXPECT_CALL(transport_, SendRtp).WillRepeatedly(Invoke(handle_rtp)); + for (int i = 0; i < kNumRtp * 2; i++) { + egress_->SendAudioData(GetAudioFrame(i)); + fake_clock_.AdvanceTimeMilliseconds(10); + } + event.Wait(/*give_up_after_ms=*/1000); + + for (int i = 0; i < kNumRtp * 2; ++i) { + AudioFrame audio_frame; + EXPECT_EQ( + ingress_->GetAudioFrameWithInfo(kPcmuFormat.clockrate_hz, &audio_frame), + AudioMixer::Source::AudioFrameInfo::kMuted); + const int16_t* audio_data = audio_frame.data(); + size_t length = + audio_frame.samples_per_channel_ * audio_frame.num_channels_; + for (size_t j = 0; j < length; ++j) { + EXPECT_EQ(audio_data[j], 0); + } + } + + // Now we should still see valid speech output level as StopPlay won't affect + // the measurement. + EXPECT_EQ(ingress_->GetOutputAudioLevel(), kAudioLevel); +} + } // namespace } // namespace webrtc diff --git a/audio/voip/test/mock_task_queue.h b/audio/voip/test/mock_task_queue.h new file mode 100644 index 0000000000..c3553a21e7 --- /dev/null +++ b/audio/voip/test/mock_task_queue.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef AUDIO_VOIP_TEST_MOCK_TASK_QUEUE_H_ +#define AUDIO_VOIP_TEST_MOCK_TASK_QUEUE_H_ + +#include + +#include "api/task_queue/task_queue_factory.h" +#include "test/gmock.h" + +namespace webrtc { + +// MockTaskQueue enables immediate task run from global TaskQueueBase. +// It's necessary for some tests depending on TaskQueueBase internally. +class MockTaskQueue : public TaskQueueBase { + public: + MockTaskQueue() : current_(this) {} + + // Delete is deliberately defined as no-op as MockTaskQueue is expected to + // hold onto current global TaskQueueBase throughout the testing. + void Delete() override {} + + MOCK_METHOD(void, PostTask, (std::unique_ptr), (override)); + MOCK_METHOD(void, + PostDelayedTask, + (std::unique_ptr, uint32_t), + (override)); + + private: + CurrentTaskQueueSetter current_; +}; + +class MockTaskQueueFactory : public TaskQueueFactory { + public: + explicit MockTaskQueueFactory(MockTaskQueue* task_queue) + : task_queue_(task_queue) {} + + std::unique_ptr CreateTaskQueue( + absl::string_view name, + Priority priority) const override { + // Default MockTaskQueue::Delete is no-op, therefore it's safe to pass the + // raw pointer. + return std::unique_ptr(task_queue_); + } + + private: + MockTaskQueue* task_queue_; +}; + +} // namespace webrtc + +#endif // AUDIO_VOIP_TEST_MOCK_TASK_QUEUE_H_ diff --git a/audio/voip/test/voip_core_unittest.cc b/audio/voip/test/voip_core_unittest.cc index c1969d6ed0..896d0d98bb 100644 --- a/audio/voip/test/voip_core_unittest.cc +++ b/audio/voip/test/voip_core_unittest.cc @@ -24,6 +24,9 @@ using ::testing::NiceMock; using ::testing::Return; constexpr int kPcmuPayload = 0; +constexpr int kPcmuSampleRateHz = 8000; +constexpr int kDtmfEventDurationMs = 1000; +constexpr DtmfEvent kDtmfEventCode = DtmfEvent::kDigitZero; class VoipCoreTest : public ::testing::Test { public: @@ -35,12 +38,12 @@ class VoipCoreTest : public ::testing::Test { auto encoder_factory = CreateBuiltinAudioEncoderFactory(); auto decoder_factory = CreateBuiltinAudioDecoderFactory(); rtc::scoped_refptr audio_processing = - new rtc::RefCountedObject(); + rtc::make_ref_counted>(); - voip_core_ = std::make_unique(); - voip_core_->Init(std::move(encoder_factory), std::move(decoder_factory), - CreateDefaultTaskQueueFactory(), audio_device_, - std::move(audio_processing)); + voip_core_ = std::make_unique( + std::move(encoder_factory), std::move(decoder_factory), + CreateDefaultTaskQueueFactory(), audio_device_, + std::move(audio_processing)); } std::unique_ptr voip_core_; @@ -60,13 +63,23 @@ TEST_F(VoipCoreTest, BasicVoipCoreOperation) { EXPECT_CALL(*audio_device_, StartPlayout()).WillOnce(Return(0)); auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de); - EXPECT_TRUE(channel); - voip_core_->SetSendCodec(*channel, kPcmuPayload, kPcmuFormat); - voip_core_->SetReceiveCodecs(*channel, {{kPcmuPayload, kPcmuFormat}}); + EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat), + VoipResult::kOk); + EXPECT_EQ( + voip_core_->SetReceiveCodecs(channel, {{kPcmuPayload, kPcmuFormat}}), + VoipResult::kOk); - EXPECT_TRUE(voip_core_->StartSend(*channel)); - EXPECT_TRUE(voip_core_->StartPlayout(*channel)); + EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kOk); + EXPECT_EQ(voip_core_->StartPlayout(channel), VoipResult::kOk); + + EXPECT_EQ(voip_core_->RegisterTelephoneEventType(channel, kPcmuPayload, + kPcmuSampleRateHz), + VoipResult::kOk); + + EXPECT_EQ( + voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs), + VoipResult::kOk); // Program mock as operational that is ready to be stopped. EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(true)); @@ -74,26 +87,103 @@ TEST_F(VoipCoreTest, BasicVoipCoreOperation) { EXPECT_CALL(*audio_device_, StopRecording()).WillOnce(Return(0)); EXPECT_CALL(*audio_device_, StopPlayout()).WillOnce(Return(0)); - EXPECT_TRUE(voip_core_->StopSend(*channel)); - EXPECT_TRUE(voip_core_->StopPlayout(*channel)); - voip_core_->ReleaseChannel(*channel); + EXPECT_EQ(voip_core_->StopSend(channel), VoipResult::kOk); + EXPECT_EQ(voip_core_->StopPlayout(channel), VoipResult::kOk); + EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); } TEST_F(VoipCoreTest, ExpectFailToUseReleasedChannelId) { auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de); - EXPECT_TRUE(channel); // Release right after creation. - voip_core_->ReleaseChannel(*channel); + EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); // Now use released channel. - // These should be no-op. - voip_core_->SetSendCodec(*channel, kPcmuPayload, kPcmuFormat); - voip_core_->SetReceiveCodecs(*channel, {{kPcmuPayload, kPcmuFormat}}); + EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat), + VoipResult::kInvalidArgument); + EXPECT_EQ( + voip_core_->SetReceiveCodecs(channel, {{kPcmuPayload, kPcmuFormat}}), + VoipResult::kInvalidArgument); + EXPECT_EQ(voip_core_->RegisterTelephoneEventType(channel, kPcmuPayload, + kPcmuSampleRateHz), + VoipResult::kInvalidArgument); + EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kInvalidArgument); + EXPECT_EQ(voip_core_->StartPlayout(channel), VoipResult::kInvalidArgument); + EXPECT_EQ( + voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs), + VoipResult::kInvalidArgument); +} + +TEST_F(VoipCoreTest, SendDtmfEventWithoutRegistering) { + // Program mock as non-operational and ready to start send. + EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(false)); + EXPECT_CALL(*audio_device_, InitRecording()).WillOnce(Return(0)); + EXPECT_CALL(*audio_device_, StartRecording()).WillOnce(Return(0)); + + auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de); + + EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat), + VoipResult::kOk); + + EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kOk); + // Send Dtmf event without registering beforehand, thus payload + // type is not set and kFailedPrecondition is expected. + EXPECT_EQ( + voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs), + VoipResult::kFailedPrecondition); + + // Program mock as sending and is ready to be stopped. + EXPECT_CALL(*audio_device_, Recording()).WillOnce(Return(true)); + EXPECT_CALL(*audio_device_, StopRecording()).WillOnce(Return(0)); + + EXPECT_EQ(voip_core_->StopSend(channel), VoipResult::kOk); + EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); +} + +TEST_F(VoipCoreTest, SendDtmfEventWithoutStartSend) { + auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de); + + EXPECT_EQ(voip_core_->RegisterTelephoneEventType(channel, kPcmuPayload, + kPcmuSampleRateHz), + VoipResult::kOk); + + // Send Dtmf event without calling StartSend beforehand, thus + // Dtmf events cannot be sent and kFailedPrecondition is expected. + EXPECT_EQ( + voip_core_->SendDtmfEvent(channel, kDtmfEventCode, kDtmfEventDurationMs), + VoipResult::kFailedPrecondition); + + EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); +} + +TEST_F(VoipCoreTest, StartSendAndPlayoutWithoutSettingCodec) { + auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de); + + // Call StartSend and StartPlayout without setting send/receive + // codec. Code should see that codecs aren't set and return false. + EXPECT_EQ(voip_core_->StartSend(channel), VoipResult::kFailedPrecondition); + EXPECT_EQ(voip_core_->StartPlayout(channel), VoipResult::kFailedPrecondition); + + EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); +} + +TEST_F(VoipCoreTest, StopSendAndPlayoutWithoutStarting) { + auto channel = voip_core_->CreateChannel(&transport_, 0xdeadc0de); + + EXPECT_EQ(voip_core_->SetSendCodec(channel, kPcmuPayload, kPcmuFormat), + VoipResult::kOk); + EXPECT_EQ( + voip_core_->SetReceiveCodecs(channel, {{kPcmuPayload, kPcmuFormat}}), + VoipResult::kOk); + + // Call StopSend and StopPlayout without starting them in + // the first place. Should see that it is already in the + // stopped state and return true. + EXPECT_EQ(voip_core_->StopSend(channel), VoipResult::kOk); + EXPECT_EQ(voip_core_->StopPlayout(channel), VoipResult::kOk); - EXPECT_FALSE(voip_core_->StartSend(*channel)); - EXPECT_FALSE(voip_core_->StartPlayout(*channel)); + EXPECT_EQ(voip_core_->ReleaseChannel(channel), VoipResult::kOk); } } // namespace diff --git a/audio/voip/voip_core.cc b/audio/voip/voip_core.cc index 3275f028cd..fd66379f4a 100644 --- a/audio/voip/voip_core.cc +++ b/audio/voip/voip_core.cc @@ -15,7 +15,6 @@ #include #include "api/audio_codecs/audio_format.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" namespace webrtc { @@ -38,29 +37,33 @@ static constexpr int kMaxChannelId = 100000; } // namespace -bool VoipCore::Init(rtc::scoped_refptr encoder_factory, - rtc::scoped_refptr decoder_factory, - std::unique_ptr task_queue_factory, - rtc::scoped_refptr audio_device_module, - rtc::scoped_refptr audio_processing) { +VoipCore::VoipCore(rtc::scoped_refptr encoder_factory, + rtc::scoped_refptr decoder_factory, + std::unique_ptr task_queue_factory, + rtc::scoped_refptr audio_device_module, + rtc::scoped_refptr audio_processing) { encoder_factory_ = std::move(encoder_factory); decoder_factory_ = std::move(decoder_factory); task_queue_factory_ = std::move(task_queue_factory); audio_device_module_ = std::move(audio_device_module); - - process_thread_ = ProcessThread::Create("ModuleProcessThread"); + audio_processing_ = std::move(audio_processing); audio_mixer_ = AudioMixerImpl::Create(); - if (audio_processing) { - audio_processing_ = std::move(audio_processing); - AudioProcessing::Config apm_config = audio_processing_->GetConfig(); - apm_config.echo_canceller.enabled = true; - audio_processing_->ApplyConfig(apm_config); - } - // AudioTransportImpl depends on audio mixer and audio processing instances. audio_transport_ = std::make_unique( - audio_mixer_.get(), audio_processing_.get()); + audio_mixer_.get(), audio_processing_.get(), nullptr); +} + +bool VoipCore::InitializeIfNeeded() { + // |audio_device_module_| internally owns a lock and the whole logic here + // needs to be executed atomically once using another lock in VoipCore. + // Further changes in this method will need to make sure that no deadlock is + // introduced in the future. + MutexLock lock(&lock_); + + if (initialized_) { + return true; + } // Initialize ADM. if (audio_device_module_->Init() != 0) { @@ -73,7 +76,6 @@ bool VoipCore::Init(rtc::scoped_refptr encoder_factory, // recording device functioning (e.g webinar where only speaker is available). // It's also possible that there are other audio devices available that may // work. - // TODO(natim@webrtc.org): consider moving this part out of initialization. // Initialize default speaker device. if (audio_device_module_->SetPlayoutDevice(kAudioDeviceId) != 0) { @@ -114,13 +116,14 @@ bool VoipCore::Init(rtc::scoped_refptr encoder_factory, RTC_LOG(LS_WARNING) << "Unable to register audio callback."; } + initialized_ = true; + return true; } -absl::optional VoipCore::CreateChannel( - Transport* transport, - absl::optional local_ssrc) { - absl::optional channel; +ChannelId VoipCore::CreateChannel(Transport* transport, + absl::optional local_ssrc) { + ChannelId channel_id; // Set local ssrc to random if not set by caller. if (!local_ssrc) { @@ -128,16 +131,16 @@ absl::optional VoipCore::CreateChannel( local_ssrc = random.Rand(); } - rtc::scoped_refptr audio_channel = - new rtc::RefCountedObject( - transport, local_ssrc.value(), task_queue_factory_.get(), - process_thread_.get(), audio_mixer_.get(), decoder_factory_); + rtc::scoped_refptr channel = + rtc::make_ref_counted(transport, local_ssrc.value(), + task_queue_factory_.get(), + audio_mixer_.get(), decoder_factory_); { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); - channel = static_cast(next_channel_id_); - channels_[*channel] = audio_channel; + channel_id = static_cast(next_channel_id_); + channels_[channel_id] = channel; next_channel_id_++; if (next_channel_id_ >= kMaxChannelId) { next_channel_id_ = 0; @@ -145,41 +148,65 @@ absl::optional VoipCore::CreateChannel( } // Set ChannelId in audio channel for logging/debugging purpose. - audio_channel->SetId(*channel); + channel->SetId(channel_id); - return channel; + return channel_id; } -void VoipCore::ReleaseChannel(ChannelId channel) { +VoipResult VoipCore::ReleaseChannel(ChannelId channel_id) { // Destroy channel outside of the lock. - rtc::scoped_refptr audio_channel; + rtc::scoped_refptr channel; + + bool no_channels_after_release = false; + { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); - auto iter = channels_.find(channel); + auto iter = channels_.find(channel_id); if (iter != channels_.end()) { - audio_channel = std::move(iter->second); + channel = std::move(iter->second); channels_.erase(iter); } + + no_channels_after_release = channels_.empty(); + } + + VoipResult status_code = VoipResult::kOk; + if (!channel) { + RTC_LOG(LS_WARNING) << "Channel " << channel_id << " not found"; + status_code = VoipResult::kInvalidArgument; } - if (!audio_channel) { - RTC_LOG(LS_WARNING) << "Channel " << channel << " not found"; + + if (no_channels_after_release) { + // TODO(bugs.webrtc.org/11581): unclear if we still need to clear |channel| + // here. + channel = nullptr; + + // Make sure to stop playout on ADM if it is playing. + if (audio_device_module_->Playing()) { + if (audio_device_module_->StopPlayout() != 0) { + RTC_LOG(LS_WARNING) << "StopPlayout failed"; + status_code = VoipResult::kInternal; + } + } } + + return status_code; } -rtc::scoped_refptr VoipCore::GetChannel(ChannelId channel) { - rtc::scoped_refptr audio_channel; +rtc::scoped_refptr VoipCore::GetChannel(ChannelId channel_id) { + rtc::scoped_refptr channel; { - rtc::CritScope lock(&lock_); - auto iter = channels_.find(channel); + MutexLock lock(&lock_); + auto iter = channels_.find(channel_id); if (iter != channels_.end()) { - audio_channel = iter->second; + channel = iter->second; } } - if (!audio_channel) { - RTC_LOG(LS_ERROR) << "Channel " << channel << " not found"; + if (!channel) { + RTC_LOG(LS_ERROR) << "Channel " << channel_id << " not found"; } - return audio_channel; + return channel; } bool VoipCore::UpdateAudioTransportWithSenders() { @@ -191,7 +218,7 @@ bool VoipCore::UpdateAudioTransportWithSenders() { int max_sampling_rate = 8000; size_t max_num_channels = 1; { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); // Reserve to prevent run time vector re-allocation. audio_senders.reserve(channels_.size()); for (auto kv : channels_) { @@ -217,6 +244,11 @@ bool VoipCore::UpdateAudioTransportWithSenders() { // Depending on availability of senders, turn on or off ADM recording. if (!audio_senders.empty()) { + // Initialize audio device module and default device if needed. + if (!InitializeIfNeeded()) { + return false; + } + if (!audio_device_module_->Recording()) { if (audio_device_module_->InitRecording() != 0) { RTC_LOG(LS_ERROR) << "InitRecording failed"; @@ -237,112 +269,232 @@ bool VoipCore::UpdateAudioTransportWithSenders() { return true; } -bool VoipCore::StartSend(ChannelId channel) { - auto audio_channel = GetChannel(channel); - if (!audio_channel) { - return false; +VoipResult VoipCore::StartSend(ChannelId channel_id) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } - audio_channel->StartSend(); + if (!channel->StartSend()) { + return VoipResult::kFailedPrecondition; + } - return UpdateAudioTransportWithSenders(); + return UpdateAudioTransportWithSenders() ? VoipResult::kOk + : VoipResult::kInternal; } -bool VoipCore::StopSend(ChannelId channel) { - auto audio_channel = GetChannel(channel); - if (!audio_channel) { - return false; +VoipResult VoipCore::StopSend(ChannelId channel_id) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } - audio_channel->StopSend(); + channel->StopSend(); - return UpdateAudioTransportWithSenders(); + return UpdateAudioTransportWithSenders() ? VoipResult::kOk + : VoipResult::kInternal; } -bool VoipCore::StartPlayout(ChannelId channel) { - auto audio_channel = GetChannel(channel); - if (!audio_channel) { - return false; +VoipResult VoipCore::StartPlayout(ChannelId channel_id) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } - audio_channel->StartPlay(); + if (channel->IsPlaying()) { + return VoipResult::kOk; + } + + if (!channel->StartPlay()) { + return VoipResult::kFailedPrecondition; + } + + // Initialize audio device module and default device if needed. + if (!InitializeIfNeeded()) { + return VoipResult::kInternal; + } if (!audio_device_module_->Playing()) { if (audio_device_module_->InitPlayout() != 0) { RTC_LOG(LS_ERROR) << "InitPlayout failed"; - return false; + return VoipResult::kInternal; } if (audio_device_module_->StartPlayout() != 0) { RTC_LOG(LS_ERROR) << "StartPlayout failed"; - return false; + return VoipResult::kInternal; } } - return true; + + return VoipResult::kOk; } -bool VoipCore::StopPlayout(ChannelId channel) { - auto audio_channel = GetChannel(channel); - if (!audio_channel) { - return false; +VoipResult VoipCore::StopPlayout(ChannelId channel_id) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } - audio_channel->StopPlay(); + channel->StopPlay(); - bool stop_device = true; - { - rtc::CritScope lock(&lock_); - for (auto kv : channels_) { - rtc::scoped_refptr& channel = kv.second; - if (channel->IsPlaying()) { - stop_device = false; - break; - } - } + return VoipResult::kOk; +} + +VoipResult VoipCore::ReceivedRTPPacket( + ChannelId channel_id, + rtc::ArrayView rtp_packet) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } - if (stop_device && audio_device_module_->Playing()) { - if (audio_device_module_->StopPlayout() != 0) { - RTC_LOG(LS_ERROR) << "StopPlayout failed"; - return false; - } + channel->ReceivedRTPPacket(rtp_packet); + + return VoipResult::kOk; +} + +VoipResult VoipCore::ReceivedRTCPPacket( + ChannelId channel_id, + rtc::ArrayView rtcp_packet) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } - return true; + + channel->ReceivedRTCPPacket(rtcp_packet); + + return VoipResult::kOk; } -void VoipCore::ReceivedRTPPacket(ChannelId channel, - rtc::ArrayView rtp_packet) { - // Failure to locate channel is logged internally in GetChannel. - if (auto audio_channel = GetChannel(channel)) { - audio_channel->ReceivedRTPPacket(rtp_packet); +VoipResult VoipCore::SetSendCodec(ChannelId channel_id, + int payload_type, + const SdpAudioFormat& encoder_format) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } + + auto encoder = encoder_factory_->MakeAudioEncoder( + payload_type, encoder_format, absl::nullopt); + channel->SetEncoder(payload_type, encoder_format, std::move(encoder)); + + return VoipResult::kOk; } -void VoipCore::ReceivedRTCPPacket(ChannelId channel, - rtc::ArrayView rtcp_packet) { - // Failure to locate channel is logged internally in GetChannel. - if (auto audio_channel = GetChannel(channel)) { - audio_channel->ReceivedRTCPPacket(rtcp_packet); +VoipResult VoipCore::SetReceiveCodecs( + ChannelId channel_id, + const std::map& decoder_specs) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } + + channel->SetReceiveCodecs(decoder_specs); + + return VoipResult::kOk; } -void VoipCore::SetSendCodec(ChannelId channel, - int payload_type, - const SdpAudioFormat& encoder_format) { - // Failure to locate channel is logged internally in GetChannel. - if (auto audio_channel = GetChannel(channel)) { - auto encoder = encoder_factory_->MakeAudioEncoder( - payload_type, encoder_format, absl::nullopt); - audio_channel->SetEncoder(payload_type, encoder_format, std::move(encoder)); +VoipResult VoipCore::RegisterTelephoneEventType(ChannelId channel_id, + int rtp_payload_type, + int sample_rate_hz) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } + + channel->RegisterTelephoneEventType(rtp_payload_type, sample_rate_hz); + + return VoipResult::kOk; } -void VoipCore::SetReceiveCodecs( - ChannelId channel, - const std::map& decoder_specs) { - // Failure to locate channel is logged internally in GetChannel. - if (auto audio_channel = GetChannel(channel)) { - audio_channel->SetReceiveCodecs(decoder_specs); +VoipResult VoipCore::SendDtmfEvent(ChannelId channel_id, + DtmfEvent dtmf_event, + int duration_ms) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; + } + + return (channel->SendTelephoneEvent(static_cast(dtmf_event), duration_ms) + ? VoipResult::kOk + : VoipResult::kFailedPrecondition); +} + +VoipResult VoipCore::GetIngressStatistics(ChannelId channel_id, + IngressStatistics& ingress_stats) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; + } + + ingress_stats = channel->GetIngressStatistics(); + + return VoipResult::kOk; +} + +VoipResult VoipCore::GetChannelStatistics(ChannelId channel_id, + ChannelStatistics& channel_stats) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; } + + channel_stats = channel->GetChannelStatistics(); + + return VoipResult::kOk; +} + +VoipResult VoipCore::SetInputMuted(ChannelId channel_id, bool enable) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; + } + + channel->SetMute(enable); + + return VoipResult::kOk; +} + +VoipResult VoipCore::GetInputVolumeInfo(ChannelId channel_id, + VolumeInfo& input_volume) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; + } + + input_volume.audio_level = channel->GetInputAudioLevel(); + input_volume.total_energy = channel->GetInputTotalEnergy(); + input_volume.total_duration = channel->GetInputTotalDuration(); + + return VoipResult::kOk; +} + +VoipResult VoipCore::GetOutputVolumeInfo(ChannelId channel_id, + VolumeInfo& output_volume) { + rtc::scoped_refptr channel = GetChannel(channel_id); + + if (!channel) { + return VoipResult::kInvalidArgument; + } + + output_volume.audio_level = channel->GetOutputAudioLevel(); + output_volume.total_energy = channel->GetOutputTotalEnergy(); + output_volume.total_duration = channel->GetOutputTotalDuration(); + + return VoipResult::kOk; } } // namespace webrtc diff --git a/audio/voip/voip_core.h b/audio/voip/voip_core.h index 08929d3afd..359e07272d 100644 --- a/audio/voip/voip_core.h +++ b/audio/voip/voip_core.h @@ -23,15 +23,17 @@ #include "api/task_queue/task_queue_factory.h" #include "api/voip/voip_base.h" #include "api/voip/voip_codec.h" +#include "api/voip/voip_dtmf.h" #include "api/voip/voip_engine.h" #include "api/voip/voip_network.h" +#include "api/voip/voip_statistics.h" +#include "api/voip/voip_volume_control.h" #include "audio/audio_transport_impl.h" #include "audio/voip/audio_channel.h" #include "modules/audio_device/include/audio_device.h" #include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/audio_processing/include/audio_processing.h" -#include "modules/utility/include/process_thread.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -45,53 +47,90 @@ namespace webrtc { class VoipCore : public VoipEngine, public VoipBase, public VoipNetwork, - public VoipCodec { + public VoipCodec, + public VoipDtmf, + public VoipStatistics, + public VoipVolumeControl { public: + // Construct VoipCore with provided arguments. + // ProcessThread implementation can be injected by |process_thread| + // (mainly for testing purpose) and when set to nullptr, default + // implementation will be used. + VoipCore(rtc::scoped_refptr encoder_factory, + rtc::scoped_refptr decoder_factory, + std::unique_ptr task_queue_factory, + rtc::scoped_refptr audio_device_module, + rtc::scoped_refptr audio_processing); ~VoipCore() override = default; - // Initialize VoipCore components with provided arguments. - // Returns false only when |audio_device_module| fails to initialize which - // would presumably render further processing useless. - // TODO(natim@webrtc.org): Need to report audio device errors to user layer. - bool Init(rtc::scoped_refptr encoder_factory, - rtc::scoped_refptr decoder_factory, - std::unique_ptr task_queue_factory, - rtc::scoped_refptr audio_device_module, - rtc::scoped_refptr audio_processing); - // Implements VoipEngine interfaces. VoipBase& Base() override { return *this; } VoipNetwork& Network() override { return *this; } VoipCodec& Codec() override { return *this; } + VoipDtmf& Dtmf() override { return *this; } + VoipStatistics& Statistics() override { return *this; } + VoipVolumeControl& VolumeControl() override { return *this; } // Implements VoipBase interfaces. - absl::optional CreateChannel( - Transport* transport, - absl::optional local_ssrc) override; - void ReleaseChannel(ChannelId channel) override; - bool StartSend(ChannelId channel) override; - bool StopSend(ChannelId channel) override; - bool StartPlayout(ChannelId channel) override; - bool StopPlayout(ChannelId channel) override; + ChannelId CreateChannel(Transport* transport, + absl::optional local_ssrc) override; + VoipResult ReleaseChannel(ChannelId channel_id) override; + VoipResult StartSend(ChannelId channel_id) override; + VoipResult StopSend(ChannelId channel_id) override; + VoipResult StartPlayout(ChannelId channel_id) override; + VoipResult StopPlayout(ChannelId channel_id) override; // Implements VoipNetwork interfaces. - void ReceivedRTPPacket(ChannelId channel, - rtc::ArrayView rtp_packet) override; - void ReceivedRTCPPacket(ChannelId channel, - rtc::ArrayView rtcp_packet) override; + VoipResult ReceivedRTPPacket( + ChannelId channel_id, + rtc::ArrayView rtp_packet) override; + VoipResult ReceivedRTCPPacket( + ChannelId channel_id, + rtc::ArrayView rtcp_packet) override; // Implements VoipCodec interfaces. - void SetSendCodec(ChannelId channel, - int payload_type, - const SdpAudioFormat& encoder_format) override; - void SetReceiveCodecs( - ChannelId channel, + VoipResult SetSendCodec(ChannelId channel_id, + int payload_type, + const SdpAudioFormat& encoder_format) override; + VoipResult SetReceiveCodecs( + ChannelId channel_id, const std::map& decoder_specs) override; + // Implements VoipDtmf interfaces. + VoipResult RegisterTelephoneEventType(ChannelId channel_id, + int rtp_payload_type, + int sample_rate_hz) override; + VoipResult SendDtmfEvent(ChannelId channel_id, + DtmfEvent dtmf_event, + int duration_ms) override; + + // Implements VoipStatistics interfaces. + VoipResult GetIngressStatistics(ChannelId channel_id, + IngressStatistics& ingress_stats) override; + VoipResult GetChannelStatistics(ChannelId channe_id, + ChannelStatistics& channel_stats) override; + + // Implements VoipVolumeControl interfaces. + VoipResult SetInputMuted(ChannelId channel_id, bool enable) override; + VoipResult GetInputVolumeInfo(ChannelId channel_id, + VolumeInfo& volume_info) override; + VoipResult GetOutputVolumeInfo(ChannelId channel_id, + VolumeInfo& volume_info) override; + private: + // Initialize ADM and default audio device if needed. + // Returns true if ADM is successfully initialized or already in such state + // (e.g called more than once). Returns false when ADM fails to initialize + // which would presumably render further processing useless. Note that such + // failure won't necessarily succeed in next initialization attempt as it + // would mean changing the ADM implementation. From Android N and onwards, the + // mobile app may not be able to gain microphone access when in background + // mode. Therefore it would be better to delay the logic as late as possible. + bool InitializeIfNeeded(); + // Fetches the corresponding AudioChannel assigned with given |channel|. // Returns nullptr if not found. - rtc::scoped_refptr GetChannel(ChannelId channel); + rtc::scoped_refptr GetChannel(ChannelId channel_id); // Updates AudioTransportImpl with a new set of actively sending AudioSender // (AudioEgress). This needs to be invoked whenever StartSend/StopSend is @@ -104,7 +143,7 @@ class VoipCore : public VoipEngine, rtc::scoped_refptr decoder_factory_; std::unique_ptr task_queue_factory_; - // Synchronization is handled internally by AudioProessing. + // Synchronization is handled internally by AudioProcessing. // Must be placed before |audio_device_module_| for proper destruction. rtc::scoped_refptr audio_processing_; @@ -119,11 +158,7 @@ class VoipCore : public VoipEngine, // Synchronization is handled internally by AudioDeviceModule. rtc::scoped_refptr audio_device_module_; - // Synchronization is handled internally by ProcessThread. - // Must be placed before |channels_| for proper destruction. - std::unique_ptr process_thread_; - - rtc::CriticalSection lock_; + Mutex lock_; // Member to track a next ChannelId for new AudioChannel. int next_channel_id_ RTC_GUARDED_BY(lock_) = 0; @@ -132,6 +167,9 @@ class VoipCore : public VoipEngine, // ChannelId. std::unordered_map> channels_ RTC_GUARDED_BY(lock_); + + // Boolean flag to ensure initialization only occurs once. + bool initialized_ RTC_GUARDED_BY(lock_) = false; }; } // namespace webrtc diff --git a/build_overrides/build.gni b/build_overrides/build.gni index 669044db81..137b6a40b2 100644 --- a/build_overrides/build.gni +++ b/build_overrides/build.gni @@ -9,26 +9,23 @@ # Some non-Chromium builds don't support building java targets. enable_java_templates = true -# Some non-Chromium builds don't use Chromium's third_party/binutils. -linux_use_bundled_binutils_override = true - # Don't set this variable to true when building stadalone WebRTC, it is # only needed to support both WebRTC standalone and Chromium builds. build_with_chromium = false +# WebRTC checks out google_benchmark by default since it is always used. +checkout_google_benchmark = true + # Use our own suppressions files. asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc" lsan_suppressions_file = "//tools_webrtc/sanitizers/lsan_suppressions_webrtc.cc" tsan_suppressions_file = "//tools_webrtc/sanitizers/tsan_suppressions_webrtc.cc" -msan_blacklist_path = - rebase_path("//tools_webrtc/msan/blacklist.txt", root_build_dir) -ubsan_blacklist_path = - rebase_path("//tools_webrtc/ubsan/blacklist.txt", root_build_dir) -ubsan_vptr_blacklist_path = - rebase_path("//tools_webrtc/ubsan/vptr_blacklist.txt", root_build_dir) - -# Android lint suppressions file -lint_suppressions_file = "//tools_webrtc/android/suppressions.xml" +msan_ignorelist_path = + rebase_path("//tools_webrtc/msan/suppressions.txt", root_build_dir) +ubsan_ignorelist_path = + rebase_path("//tools_webrtc/ubsan/suppressions.txt", root_build_dir) +ubsan_vptr_ignorelist_path = + rebase_path("//tools_webrtc/ubsan/vptr_suppressions.txt", root_build_dir) # For Chromium, Android 32-bit non-component, non-clang builds hit a 4GiB size # limit, making them requiring symbol_level=2. WebRTC doesn't hit that problem @@ -37,7 +34,8 @@ ignore_elf32_limitations = true # Use bundled hermetic Xcode installation maintainted by Chromium, # except for local iOS builds where it's unsupported. -if (host_os == "mac") { +# Allow for mac cross compile on linux machines. +if (host_os == "mac" || host_os == "linux") { _result = exec_script("//build/mac/should_use_hermetic_xcode.py", [ target_os ], "value") @@ -46,3 +44,22 @@ if (host_os == "mac") { "hermetic toolchain if the minimum OS version is not met.") use_system_xcode = _result == 0 } + +declare_args() { + # WebRTC doesn't depend on //base from production code but only for testing + # purposes. In any case, it doesn't depend on //third_party/perfetto which + # is used for base tracing, so this feature is disabled. + enable_base_tracing = false + use_perfetto_client_library = false + + # Limits the defined //third_party/android_deps targets to only "buildCompile" + # and "buildCompileNoDeps" targets. This is useful for third-party + # repositories which do not use JUnit tests. For instance, + # limit_android_deps == true removes "gn gen" requirement for + # //third_party/robolectric . + limit_android_deps = false + + # If true, it assumes that //third_party/abseil-cpp is an available + # dependency for googletest. + gtest_enable_absl_printers = true +} diff --git a/call/BUILD.gn b/call/BUILD.gn index a5e21b10cf..638eb0b910 100644 --- a/call/BUILD.gn +++ b/call/BUILD.gn @@ -8,6 +8,14 @@ import("../webrtc.gni") +rtc_library("version") { + sources = [ + "version.cc", + "version.h", + ] + visibility = [ ":*" ] +} + rtc_library("call_interfaces") { sources = [ "audio_receive_stream.cc", @@ -27,8 +35,10 @@ rtc_library("call_interfaces") { if (!build_with_mozilla) { sources += [ "audio_send_stream.cc" ] } + deps = [ ":audio_sender_interface", + ":receive_stream_interface", ":rtp_interfaces", ":video_stream_api", "../api:fec_controller_api", @@ -39,9 +49,10 @@ rtc_library("call_interfaces") { "../api:rtp_parameters", "../api:scoped_refptr", "../api:transport_api", + "../api/adaptation:resource_adaptation_api", + "../api/audio:audio_frame_processor", "../api/audio:audio_mixer_api", "../api/audio_codecs:audio_codecs_api", - "../api/crypto:frame_decryptor_interface", "../api/crypto:frame_encryptor_interface", "../api/crypto:options", "../api/neteq:neteq_api", @@ -49,7 +60,7 @@ rtc_library("call_interfaces") { "../api/transport:bitrate_settings", "../api/transport:network_control", "../api/transport:webrtc_key_value_config", - "../api/transport/rtp:rtp_source", + "../modules/async_audio_processing", "../modules/audio_device", "../modules/audio_processing", "../modules/audio_processing:api", @@ -61,6 +72,9 @@ rtc_library("call_interfaces") { "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../rtc_base/network:sent_packet", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/functional:bind_front", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -80,26 +94,35 @@ rtc_library("rtp_interfaces") { # client code gets updated. visibility = [ "*" ] sources = [ - "rtcp_packet_sink_interface.h", "rtp_config.cc", "rtp_config.h", "rtp_packet_sink_interface.h", "rtp_stream_receiver_controller_interface.h", + "rtp_transport_config.h", + "rtp_transport_controller_send_factory_interface.h", "rtp_transport_controller_send_interface.h", ] deps = [ "../api:array_view", "../api:fec_controller_api", "../api:frame_transformer_interface", + "../api:network_state_predictor_api", "../api:rtp_headers", "../api:rtp_parameters", "../api/crypto:options", "../api/rtc_event_log", "../api/transport:bitrate_settings", + "../api/transport:network_control", + "../api/transport:webrtc_key_value_config", "../api/units:timestamp", + "../common_video:frame_counts", "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/utility", "../rtc_base:checks", "../rtc_base:rtc_base_approved", + "../rtc_base:rtc_task_queue", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/types:optional", ] @@ -108,28 +131,26 @@ rtc_library("rtp_interfaces") { rtc_library("rtp_receiver") { visibility = [ "*" ] sources = [ - "rtcp_demuxer.cc", - "rtcp_demuxer.h", "rtp_demuxer.cc", "rtp_demuxer.h", - "rtp_rtcp_demuxer_helper.cc", - "rtp_rtcp_demuxer_helper.h", "rtp_stream_receiver_controller.cc", "rtp_stream_receiver_controller.h", "rtx_receive_stream.cc", "rtx_receive_stream.h", - "ssrc_binding_observer.h", ] deps = [ ":rtp_interfaces", "../api:array_view", "../api:rtp_headers", + "../api:sequence_checker", "../modules/rtp_rtcp", "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:checks", "../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", + "../rtc_base/containers:flat_map", + "../rtc_base/containers:flat_set", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("rtp_sender") { @@ -138,6 +159,7 @@ rtc_library("rtp_sender") { "rtp_payload_params.h", "rtp_transport_controller_send.cc", "rtp_transport_controller_send.h", + "rtp_transport_controller_send_factory.h", "rtp_video_sender.cc", "rtp_video_sender.h", "rtp_video_sender_interface.h", @@ -150,6 +172,7 @@ rtc_library("rtp_sender") { "../api:fec_controller_api", "../api:network_state_predictor_api", "../api:rtp_parameters", + "../api:sequence_checker", "../api:transport_api", "../api/rtc_event_log", "../api/transport:field_trial_based_config", @@ -160,6 +183,7 @@ rtc_library("rtp_sender") { "../api/units:time_delta", "../api/units:timestamp", "../api/video:video_frame", + "../api/video:video_layers_allocation", "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", "../logging:rtc_event_bwe", @@ -180,7 +204,10 @@ rtc_library("rtp_sender") { "../rtc_base:rate_limiter", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_task_queue", + "../rtc_base/synchronization:mutex", "../rtc_base/task_utils:repeating_task", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/strings:strings", @@ -203,8 +230,8 @@ rtc_library("bitrate_configurator") { "../api/units:data_rate", "../rtc_base:checks", "../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("bitrate_allocator") { @@ -214,18 +241,19 @@ rtc_library("bitrate_allocator") { ] deps = [ "../api:bitrate_allocation", + "../api:sequence_checker", "../api/transport:network_control", "../api/units:data_rate", "../api/units:time_delta", "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../rtc_base:safe_minmax", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/system:no_unique_address", "../system_wrappers", "../system_wrappers:field_trial", "../system_wrappers:metrics", - "//third_party/abseil-cpp/absl/algorithm:container", ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] } rtc_library("call") { @@ -249,12 +277,14 @@ rtc_library("call") { ":rtp_receiver", ":rtp_sender", ":simulated_network", + ":version", ":video_stream_api", "../api:array_view", "../api:callfactory_api", "../api:fec_controller_api", "../api:rtp_headers", "../api:rtp_parameters", + "../api:sequence_checker", "../api:simulated_network_api", "../api:transport_api", "../api/rtc_event_log", @@ -266,7 +296,6 @@ rtc_library("call") { "../logging:rtc_event_rtp_rtcp", "../logging:rtc_event_video", "../logging:rtc_stream_config", - "../modules:module_api", "../modules/congestion_controller", "../modules/pacing", "../modules/rtp_rtcp", @@ -280,16 +309,31 @@ rtc_library("call") { "../rtc_base:safe_minmax", "../rtc_base/experiments:field_trial_parser", "../rtc_base/network:sent_packet", - "../rtc_base/synchronization:rw_lock_wrapper", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/system:no_unique_address", + "../rtc_base/task_utils:pending_task_safety_flag", "../system_wrappers", "../system_wrappers:field_trial", "../system_wrappers:metrics", "../video", + "adaptation:resource_adaptation", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/functional:bind_front", "//third_party/abseil-cpp/absl/types:optional", ] } +rtc_source_set("receive_stream_interface") { + sources = [ "receive_stream.h" ] + deps = [ + "../api:frame_transformer_interface", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api/crypto:frame_decryptor_interface", + "../api/transport/rtp:rtp_source", + ] +} + rtc_library("video_stream_api") { sources = [ "video_receive_stream.cc", @@ -298,26 +342,28 @@ rtc_library("video_stream_api") { "video_send_stream.h", ] deps = [ + ":receive_stream_interface", ":rtp_interfaces", "../api:frame_transformer_interface", "../api:rtp_headers", "../api:rtp_parameters", + "../api:scoped_refptr", "../api:transport_api", - "../api/crypto:frame_decryptor_interface", + "../api/adaptation:resource_adaptation_api", "../api/crypto:frame_encryptor_interface", "../api/crypto:options", - "../api/transport/rtp:rtp_source", "../api/video:recordable_encoded_frame", "../api/video:video_frame", "../api/video:video_rtp_headers", "../api/video:video_stream_encoder", "../api/video_codecs:video_codecs_api", "../common_video", + "../common_video:frame_counts", "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:checks", "../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("simulated_network") { @@ -326,6 +372,7 @@ rtc_library("simulated_network") { "simulated_network.h", ] deps = [ + "../api:sequence_checker", "../api:simulated_network_api", "../api/units:data_rate", "../api/units:data_size", @@ -333,9 +380,9 @@ rtc_library("simulated_network") { "../api/units:timestamp", "../rtc_base:checks", "../rtc_base:rtc_base_approved", - "../rtc_base/synchronization:sequence_checker", - "//third_party/abseil-cpp/absl/types:optional", + "../rtc_base/synchronization:mutex", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("simulated_packet_receiver") { @@ -356,147 +403,160 @@ rtc_library("fake_network") { ":simulated_network", ":simulated_packet_receiver", "../api:rtp_parameters", + "../api:sequence_checker", "../api:simulated_network_api", "../api:transport_api", "../modules/utility", "../rtc_base:checks", "../rtc_base:rtc_base_approved", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", "../system_wrappers", ] } if (rtc_include_tests) { - rtc_library("call_tests") { - testonly = true + if (!build_with_chromium) { + rtc_library("call_tests") { + testonly = true - sources = [ - "bitrate_allocator_unittest.cc", - "bitrate_estimator_tests.cc", - "call_unittest.cc", - "flexfec_receive_stream_unittest.cc", - "receive_time_calculator_unittest.cc", - "rtcp_demuxer_unittest.cc", - "rtp_bitrate_configurator_unittest.cc", - "rtp_demuxer_unittest.cc", - "rtp_payload_params_unittest.cc", - "rtp_rtcp_demuxer_helper_unittest.cc", - "rtp_video_sender_unittest.cc", - "rtx_receive_stream_unittest.cc", - ] - deps = [ - ":bitrate_allocator", - ":bitrate_configurator", - ":call", - ":call_interfaces", - ":mock_rtp_interfaces", - ":rtp_interfaces", - ":rtp_receiver", - ":rtp_sender", - ":simulated_network", - "../api:array_view", - "../api:create_frame_generator", - "../api:mock_audio_mixer", - "../api:rtp_headers", - "../api:rtp_parameters", - "../api:transport_api", - "../api/audio_codecs:builtin_audio_decoder_factory", - "../api/rtc_event_log", - "../api/task_queue:default_task_queue_factory", - "../api/transport:field_trial_based_config", - "../api/video:video_frame", - "../api/video:video_rtp_headers", - "../audio", - "../modules/audio_device:mock_audio_device", - "../modules/audio_mixer", - "../modules/audio_mixer:audio_mixer_impl", - "../modules/audio_processing:mocks", - "../modules/congestion_controller", - "../modules/pacing", - "../modules/rtp_rtcp", - "../modules/rtp_rtcp:mock_rtp_rtcp", - "../modules/rtp_rtcp:rtp_rtcp_format", - "../modules/utility:mock_process_thread", - "../modules/video_coding", - "../modules/video_coding:codec_globals_headers", - "../modules/video_coding:video_codec_interface", - "../rtc_base:checks", - "../rtc_base:rate_limiter", - "../rtc_base:rtc_base_approved", - "../rtc_base:task_queue_for_test", - "../system_wrappers", - "../test:audio_codec_mocks", - "../test:direct_transport", - "../test:encoder_settings", - "../test:fake_video_codecs", - "../test:field_trial", - "../test:mock_frame_transformer", - "../test:mock_transport", - "../test:test_common", - "../test:test_support", - "../test:video_test_common", - "../test/time_controller:time_controller", - "../video", - "//test/scenario:scenario", - "//testing/gmock", - "//testing/gtest", - "//third_party/abseil-cpp/absl/container:inlined_vector", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/types:optional", - ] - } + sources = [ + "bitrate_allocator_unittest.cc", + "bitrate_estimator_tests.cc", + "call_unittest.cc", + "flexfec_receive_stream_unittest.cc", + "receive_time_calculator_unittest.cc", + "rtp_bitrate_configurator_unittest.cc", + "rtp_demuxer_unittest.cc", + "rtp_payload_params_unittest.cc", + "rtp_video_sender_unittest.cc", + "rtx_receive_stream_unittest.cc", + ] + deps = [ + ":bitrate_allocator", + ":bitrate_configurator", + ":call", + ":call_interfaces", + ":mock_rtp_interfaces", + ":rtp_interfaces", + ":rtp_receiver", + ":rtp_sender", + ":simulated_network", + "../api:array_view", + "../api:create_frame_generator", + "../api:mock_audio_mixer", + "../api:rtp_headers", + "../api:rtp_parameters", + "../api:transport_api", + "../api/audio_codecs:builtin_audio_decoder_factory", + "../api/rtc_event_log", + "../api/task_queue:default_task_queue_factory", + "../api/test/video:function_video_factory", + "../api/transport:field_trial_based_config", + "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../audio", + "../modules:module_api", + "../modules/audio_device:mock_audio_device", + "../modules/audio_mixer", + "../modules/audio_mixer:audio_mixer_impl", + "../modules/audio_processing:mocks", + "../modules/congestion_controller", + "../modules/pacing", + "../modules/rtp_rtcp", + "../modules/rtp_rtcp:mock_rtp_rtcp", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/utility:mock_process_thread", + "../modules/video_coding", + "../modules/video_coding:codec_globals_headers", + "../modules/video_coding:video_codec_interface", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:rate_limiter", + "../rtc_base:rtc_base_approved", + "../rtc_base:task_queue_for_test", + "../rtc_base/synchronization:mutex", + "../system_wrappers", + "../test:audio_codec_mocks", + "../test:direct_transport", + "../test:encoder_settings", + "../test:explicit_key_value_config", + "../test:fake_video_codecs", + "../test:field_trial", + "../test:mock_frame_transformer", + "../test:mock_transport", + "../test:test_common", + "../test:test_support", + "../test:video_test_common", + "../test/scenario", + "../test/time_controller:time_controller", + "../video", + "adaptation:resource_adaptation_test_utilities", + "//testing/gmock", + "//testing/gtest", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/container:inlined_vector", + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:optional", + "//third_party/abseil-cpp/absl/types:variant", + ] + } - rtc_library("call_perf_tests") { - testonly = true + rtc_library("call_perf_tests") { + testonly = true - sources = [ - "call_perf_tests.cc", - "rampup_tests.cc", - "rampup_tests.h", - ] - deps = [ - ":call_interfaces", - ":simulated_network", - ":video_stream_api", - "../api:rtc_event_log_output_file", - "../api:simulated_network_api", - "../api/audio_codecs:builtin_audio_encoder_factory", - "../api/rtc_event_log", - "../api/rtc_event_log:rtc_event_log_factory", - "../api/task_queue", - "../api/task_queue:default_task_queue_factory", - "../api/video:builtin_video_bitrate_allocator_factory", - "../api/video:video_bitrate_allocation", - "../api/video_codecs:video_codecs_api", - "../modules/audio_coding", - "../modules/audio_device", - "../modules/audio_device:audio_device_impl", - "../modules/audio_mixer:audio_mixer_impl", - "../modules/rtp_rtcp", - "../modules/rtp_rtcp:rtp_rtcp_format", - "../rtc_base", - "../rtc_base:checks", - "../rtc_base:rtc_base_approved", - "../rtc_base:task_queue_for_test", - "../rtc_base:task_queue_for_test", - "../rtc_base/task_utils:repeating_task", - "../system_wrappers", - "../system_wrappers:metrics", - "../test:direct_transport", - "../test:encoder_settings", - "../test:fake_video_codecs", - "../test:field_trial", - "../test:fileutils", - "../test:null_transport", - "../test:perf_test", - "../test:rtp_test_utils", - "../test:test_common", - "../test:test_support", - "../test:video_test_common", - "../video", - "//testing/gtest", - "//third_party/abseil-cpp/absl/flags:flag", - ] + sources = [ + "call_perf_tests.cc", + "rampup_tests.cc", + "rampup_tests.h", + ] + deps = [ + ":call_interfaces", + ":simulated_network", + ":video_stream_api", + "../api:rtc_event_log_output_file", + "../api:simulated_network_api", + "../api/audio_codecs:builtin_audio_encoder_factory", + "../api/rtc_event_log", + "../api/rtc_event_log:rtc_event_log_factory", + "../api/task_queue", + "../api/task_queue:default_task_queue_factory", + "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_bitrate_allocation", + "../api/video_codecs:video_codecs_api", + "../modules/audio_coding", + "../modules/audio_device", + "../modules/audio_device:audio_device_impl", + "../modules/audio_mixer:audio_mixer_impl", + "../modules/rtp_rtcp", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:rtc_base_approved", + "../rtc_base:task_queue_for_test", + "../rtc_base:task_queue_for_test", + "../rtc_base:threading", + "../rtc_base/synchronization:mutex", + "../rtc_base/task_utils:repeating_task", + "../system_wrappers", + "../system_wrappers:metrics", + "../test:direct_transport", + "../test:encoder_settings", + "../test:fake_video_codecs", + "../test:field_trial", + "../test:fileutils", + "../test:null_transport", + "../test:perf_test", + "../test:rtp_test_utils", + "../test:test_common", + "../test:test_support", + "../test:video_test_common", + "../video", + "//testing/gtest", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag" ] + } } # TODO(eladalon): This should be moved, as with the TODO for |rtp_interfaces|. @@ -554,7 +614,7 @@ if (rtc_include_tests) { "../system_wrappers", "../test:test_support", "//testing/gtest", - "//third_party/abseil-cpp/absl/algorithm:container", ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] } } diff --git a/call/DEPS b/call/DEPS index f823a7b9c3..2260ceaf53 100644 --- a/call/DEPS +++ b/call/DEPS @@ -1,6 +1,7 @@ include_rules = [ "+audio", "+logging/rtc_event_log", + "+modules/async_audio_processing", "+modules/audio_coding", "+modules/audio_device", "+modules/audio_mixer", @@ -17,11 +18,12 @@ include_rules = [ specific_include_rules = { "video_receive_stream\.h": [ - "+common_video/include", - "+media/base", + "+common_video/frame_counts.h", ], "video_send_stream\.h": [ - "+common_video/include", - "+media/base", + "+common_video", ], + "rtp_transport_controller_send_interface\.h": [ + "+common_video/frame_counts.h", + ] } diff --git a/call/OWNERS b/call/OWNERS index 1be02c2e4e..f863b939bc 100644 --- a/call/OWNERS +++ b/call/OWNERS @@ -2,3 +2,4 @@ mflodman@webrtc.org stefan@webrtc.org srte@webrtc.org terelius@webrtc.org +sprang@webrtc.org diff --git a/call/adaptation/BUILD.gn b/call/adaptation/BUILD.gn index 2a6933ebd5..10a46a3d43 100644 --- a/call/adaptation/BUILD.gn +++ b/call/adaptation/BUILD.gn @@ -10,10 +10,14 @@ import("../../webrtc.gni") rtc_library("resource_adaptation") { sources = [ + "adaptation_constraint.cc", + "adaptation_constraint.h", + "broadcast_resource_listener.cc", + "broadcast_resource_listener.h", + "degradation_preference_provider.cc", + "degradation_preference_provider.h", "encoder_settings.cc", "encoder_settings.h", - "resource.cc", - "resource.h", "resource_adaptation_processor.cc", "resource_adaptation_processor.h", "resource_adaptation_processor_interface.cc", @@ -30,6 +34,9 @@ rtc_library("resource_adaptation") { deps = [ "../../api:rtp_parameters", "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../api/adaptation:resource_adaptation_api", + "../../api/task_queue:task_queue", "../../api/video:video_adaptation", "../../api/video:video_frame", "../../api/video:video_stream_encoder", @@ -39,9 +46,14 @@ rtc_library("resource_adaptation") { "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_task_queue", "../../rtc_base/experiments:balanced_degradation_settings", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", + "../../rtc_base/task_utils:to_queued_task", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/types:optional", + "//third_party/abseil-cpp/absl/types:variant", ] } @@ -50,6 +62,7 @@ if (rtc_include_tests) { testonly = true sources = [ + "broadcast_resource_listener_unittest.cc", "resource_adaptation_processor_unittest.cc", "resource_unittest.cc", "video_source_restrictions_unittest.cc", @@ -60,35 +73,49 @@ if (rtc_include_tests) { ":resource_adaptation", ":resource_adaptation_test_utilities", "../../api:scoped_refptr", + "../../api/adaptation:resource_adaptation_api", "../../api/task_queue:default_task_queue_factory", "../../api/task_queue:task_queue", "../../api/video:video_adaptation", "../../api/video_codecs:video_codecs_api", "../../rtc_base:checks", + "../../rtc_base:gunit_helpers", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_task_queue", "../../rtc_base:task_queue_for_test", + "../../rtc_base/synchronization:mutex", "../../test:field_trial", "../../test:rtc_expect_death", "../../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("resource_adaptation_test_utilities") { testonly = true sources = [ + "test/fake_adaptation_constraint.cc", + "test/fake_adaptation_constraint.h", "test/fake_frame_rate_provider.cc", "test/fake_frame_rate_provider.h", "test/fake_resource.cc", "test/fake_resource.h", + "test/fake_video_stream_input_state_provider.cc", + "test/fake_video_stream_input_state_provider.h", + "test/mock_resource_listener.h", ] deps = [ ":resource_adaptation", + "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../api/adaptation:resource_adaptation_api", + "../../api/task_queue:task_queue", "../../api/video:video_stream_encoder", "../../rtc_base:rtc_base_approved", + "../../rtc_base/task_utils:to_queued_task", "../../test:test_support", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } diff --git a/call/adaptation/OWNERS b/call/adaptation/OWNERS index e4bec4aebb..b65c763efc 100644 --- a/call/adaptation/OWNERS +++ b/call/adaptation/OWNERS @@ -1,2 +1,3 @@ eshr@google.com hbos@webrtc.org +ilnik@webrtc.org diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCDefaultVideoDecoderFactory.h b/call/adaptation/adaptation_constraint.cc similarity index 62% rename from sdk/objc/Framework/Headers/WebRTC/RTCDefaultVideoDecoderFactory.h rename to call/adaptation/adaptation_constraint.cc index dc46f3f67b..d62bb74f87 100644 --- a/sdk/objc/Framework/Headers/WebRTC/RTCDefaultVideoDecoderFactory.h +++ b/call/adaptation/adaptation_constraint.cc @@ -1,5 +1,5 @@ /* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * Copyright 2020 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,4 +8,10 @@ * be found in the AUTHORS file in the root of the source tree. */ -#import "components/video_codec/RTCDefaultVideoDecoderFactory.h" +#include "call/adaptation/adaptation_constraint.h" + +namespace webrtc { + +AdaptationConstraint::~AdaptationConstraint() {} + +} // namespace webrtc diff --git a/call/adaptation/adaptation_constraint.h b/call/adaptation/adaptation_constraint.h new file mode 100644 index 0000000000..9ad6414cd1 --- /dev/null +++ b/call/adaptation/adaptation_constraint.h @@ -0,0 +1,41 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_ +#define CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_ + +#include + +#include "api/adaptation/resource.h" +#include "call/adaptation/video_source_restrictions.h" +#include "call/adaptation/video_stream_input_state.h" + +namespace webrtc { + +// Adaptation constraints have the ability to prevent applying a proposed +// adaptation (expressed as restrictions before/after adaptation). +class AdaptationConstraint { + public: + virtual ~AdaptationConstraint(); + + virtual std::string Name() const = 0; + + // TODO(https://crbug.com/webrtc/11172): When we have multi-stream adaptation + // support, this interface needs to indicate which stream the adaptation + // applies to. + virtual bool IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const = 0; +}; + +} // namespace webrtc + +#endif // CALL_ADAPTATION_ADAPTATION_CONSTRAINT_H_ diff --git a/call/adaptation/broadcast_resource_listener.cc b/call/adaptation/broadcast_resource_listener.cc new file mode 100644 index 0000000000..876d4c0bf6 --- /dev/null +++ b/call/adaptation/broadcast_resource_listener.cc @@ -0,0 +1,120 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "call/adaptation/broadcast_resource_listener.h" + +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { + +// The AdapterResource redirects resource usage measurements from its parent to +// a single ResourceListener. +class BroadcastResourceListener::AdapterResource : public Resource { + public: + explicit AdapterResource(std::string name) : name_(std::move(name)) {} + ~AdapterResource() override { RTC_DCHECK(!listener_); } + + // The parent is letting us know we have a usage neasurement. + void OnResourceUsageStateMeasured(ResourceUsageState usage_state) { + MutexLock lock(&lock_); + if (!listener_) + return; + listener_->OnResourceUsageStateMeasured(this, usage_state); + } + + // Resource implementation. + std::string Name() const override { return name_; } + void SetResourceListener(ResourceListener* listener) override { + MutexLock lock(&lock_); + RTC_DCHECK(!listener_ || !listener); + listener_ = listener; + } + + private: + const std::string name_; + Mutex lock_; + ResourceListener* listener_ RTC_GUARDED_BY(lock_) = nullptr; +}; + +BroadcastResourceListener::BroadcastResourceListener( + rtc::scoped_refptr source_resource) + : source_resource_(source_resource), is_listening_(false) { + RTC_DCHECK(source_resource_); +} + +BroadcastResourceListener::~BroadcastResourceListener() { + RTC_DCHECK(!is_listening_); +} + +rtc::scoped_refptr BroadcastResourceListener::SourceResource() const { + return source_resource_; +} + +void BroadcastResourceListener::StartListening() { + MutexLock lock(&lock_); + RTC_DCHECK(!is_listening_); + source_resource_->SetResourceListener(this); + is_listening_ = true; +} + +void BroadcastResourceListener::StopListening() { + MutexLock lock(&lock_); + RTC_DCHECK(is_listening_); + RTC_DCHECK(adapters_.empty()); + source_resource_->SetResourceListener(nullptr); + is_listening_ = false; +} + +rtc::scoped_refptr +BroadcastResourceListener::CreateAdapterResource() { + MutexLock lock(&lock_); + RTC_DCHECK(is_listening_); + rtc::scoped_refptr adapter = + rtc::make_ref_counted(source_resource_->Name() + + "Adapter"); + adapters_.push_back(adapter); + return adapter; +} + +void BroadcastResourceListener::RemoveAdapterResource( + rtc::scoped_refptr resource) { + MutexLock lock(&lock_); + auto it = std::find(adapters_.begin(), adapters_.end(), resource); + RTC_DCHECK(it != adapters_.end()); + adapters_.erase(it); +} + +std::vector> +BroadcastResourceListener::GetAdapterResources() { + std::vector> resources; + MutexLock lock(&lock_); + for (const auto& adapter : adapters_) { + resources.push_back(adapter); + } + return resources; +} + +void BroadcastResourceListener::OnResourceUsageStateMeasured( + rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + RTC_DCHECK_EQ(resource, source_resource_); + MutexLock lock(&lock_); + for (const auto& adapter : adapters_) { + adapter->OnResourceUsageStateMeasured(usage_state); + } +} + +} // namespace webrtc diff --git a/call/adaptation/broadcast_resource_listener.h b/call/adaptation/broadcast_resource_listener.h new file mode 100644 index 0000000000..2c5a5c703b --- /dev/null +++ b/call/adaptation/broadcast_resource_listener.h @@ -0,0 +1,75 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_ +#define CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_ + +#include + +#include "api/adaptation/resource.h" +#include "api/scoped_refptr.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { + +// Responsible for forwarding 1 resource usage measurement to N listeners by +// creating N "adapter" resources. +// +// Example: +// If we have ResourceA, ResourceListenerX and ResourceListenerY we can create a +// BroadcastResourceListener that listens to ResourceA, use CreateAdapter() to +// spawn adapter resources ResourceX and ResourceY and let ResourceListenerX +// listen to ResourceX and ResourceListenerY listen to ResourceY. When ResourceA +// makes a measurement it will be echoed by both ResourceX and ResourceY. +// +// TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor is +// moved to call there will only be one ResourceAdaptationProcessor that needs +// to listen to the injected resources. When this is the case, delete this class +// and DCHECK that a Resource's listener is never overwritten. +class BroadcastResourceListener : public ResourceListener { + public: + explicit BroadcastResourceListener( + rtc::scoped_refptr source_resource); + ~BroadcastResourceListener() override; + + rtc::scoped_refptr SourceResource() const; + void StartListening(); + void StopListening(); + + // Creates a Resource that redirects any resource usage measurements that + // BroadcastResourceListener receives to its listener. + rtc::scoped_refptr CreateAdapterResource(); + + // Unregister the adapter from the BroadcastResourceListener; it will no + // longer receive resource usage measurement and will no longer be referenced. + // Use this to prevent memory leaks of old adapters. + void RemoveAdapterResource(rtc::scoped_refptr resource); + std::vector> GetAdapterResources(); + + // ResourceListener implementation. + void OnResourceUsageStateMeasured(rtc::scoped_refptr resource, + ResourceUsageState usage_state) override; + + private: + class AdapterResource; + friend class AdapterResource; + + const rtc::scoped_refptr source_resource_; + Mutex lock_; + bool is_listening_ RTC_GUARDED_BY(lock_); + // The AdapterResource unregisters itself prior to destruction, guaranteeing + // that these pointers are safe to use. + std::vector> adapters_ + RTC_GUARDED_BY(lock_); +}; + +} // namespace webrtc + +#endif // CALL_ADAPTATION_BROADCAST_RESOURCE_LISTENER_H_ diff --git a/call/adaptation/broadcast_resource_listener_unittest.cc b/call/adaptation/broadcast_resource_listener_unittest.cc new file mode 100644 index 0000000000..9cd80500c2 --- /dev/null +++ b/call/adaptation/broadcast_resource_listener_unittest.cc @@ -0,0 +1,121 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "call/adaptation/broadcast_resource_listener.h" + +#include "call/adaptation/test/fake_resource.h" +#include "call/adaptation/test/mock_resource_listener.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +using ::testing::_; +using ::testing::StrictMock; + +TEST(BroadcastResourceListenerTest, CreateAndRemoveAdapterResource) { + rtc::scoped_refptr source_resource = + FakeResource::Create("SourceResource"); + BroadcastResourceListener broadcast_resource_listener(source_resource); + broadcast_resource_listener.StartListening(); + + EXPECT_TRUE(broadcast_resource_listener.GetAdapterResources().empty()); + rtc::scoped_refptr adapter = + broadcast_resource_listener.CreateAdapterResource(); + StrictMock listener; + adapter->SetResourceListener(&listener); + EXPECT_EQ(std::vector>{adapter}, + broadcast_resource_listener.GetAdapterResources()); + + // The removed adapter is not referenced by the broadcaster. + broadcast_resource_listener.RemoveAdapterResource(adapter); + EXPECT_TRUE(broadcast_resource_listener.GetAdapterResources().empty()); + // The removed adapter is not forwarding measurements. + EXPECT_CALL(listener, OnResourceUsageStateMeasured(_, _)).Times(0); + source_resource->SetUsageState(ResourceUsageState::kOveruse); + // Cleanup. + adapter->SetResourceListener(nullptr); + broadcast_resource_listener.StopListening(); +} + +TEST(BroadcastResourceListenerTest, AdapterNameIsBasedOnSourceResourceName) { + rtc::scoped_refptr source_resource = + FakeResource::Create("FooBarResource"); + BroadcastResourceListener broadcast_resource_listener(source_resource); + broadcast_resource_listener.StartListening(); + + rtc::scoped_refptr adapter = + broadcast_resource_listener.CreateAdapterResource(); + EXPECT_EQ("FooBarResourceAdapter", adapter->Name()); + + broadcast_resource_listener.RemoveAdapterResource(adapter); + broadcast_resource_listener.StopListening(); +} + +TEST(BroadcastResourceListenerTest, AdaptersForwardsUsageMeasurements) { + rtc::scoped_refptr source_resource = + FakeResource::Create("SourceResource"); + BroadcastResourceListener broadcast_resource_listener(source_resource); + broadcast_resource_listener.StartListening(); + + StrictMock destination_listener1; + StrictMock destination_listener2; + rtc::scoped_refptr adapter1 = + broadcast_resource_listener.CreateAdapterResource(); + adapter1->SetResourceListener(&destination_listener1); + rtc::scoped_refptr adapter2 = + broadcast_resource_listener.CreateAdapterResource(); + adapter2->SetResourceListener(&destination_listener2); + + // Expect kOveruse to be echoed. + EXPECT_CALL(destination_listener1, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([adapter1](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(adapter1, resource); + EXPECT_EQ(ResourceUsageState::kOveruse, usage_state); + }); + EXPECT_CALL(destination_listener2, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([adapter2](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(adapter2, resource); + EXPECT_EQ(ResourceUsageState::kOveruse, usage_state); + }); + source_resource->SetUsageState(ResourceUsageState::kOveruse); + + // Expect kUnderuse to be echoed. + EXPECT_CALL(destination_listener1, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([adapter1](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(adapter1, resource); + EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state); + }); + EXPECT_CALL(destination_listener2, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([adapter2](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(adapter2, resource); + EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state); + }); + source_resource->SetUsageState(ResourceUsageState::kUnderuse); + + // Adapters have to be unregistered before they or the broadcaster is + // destroyed, ensuring safe use of raw pointers. + adapter1->SetResourceListener(nullptr); + adapter2->SetResourceListener(nullptr); + + broadcast_resource_listener.RemoveAdapterResource(adapter1); + broadcast_resource_listener.RemoveAdapterResource(adapter2); + broadcast_resource_listener.StopListening(); +} + +} // namespace webrtc diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCDefaultVideoEncoderFactory.h b/call/adaptation/degradation_preference_provider.cc similarity index 62% rename from sdk/objc/Framework/Headers/WebRTC/RTCDefaultVideoEncoderFactory.h rename to call/adaptation/degradation_preference_provider.cc index 7588ffb84a..c87e49f366 100644 --- a/sdk/objc/Framework/Headers/WebRTC/RTCDefaultVideoEncoderFactory.h +++ b/call/adaptation/degradation_preference_provider.cc @@ -1,5 +1,5 @@ /* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * Copyright 2020 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,4 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#import "components/video_codec/RTCDefaultVideoEncoderFactory.h" +#include "call/adaptation/degradation_preference_provider.h" + +webrtc::DegradationPreferenceProvider::~DegradationPreferenceProvider() = + default; diff --git a/call/adaptation/degradation_preference_provider.h b/call/adaptation/degradation_preference_provider.h new file mode 100644 index 0000000000..1f75901cc5 --- /dev/null +++ b/call/adaptation/degradation_preference_provider.h @@ -0,0 +1,27 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_ +#define CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_ + +#include "api/rtp_parameters.h" + +namespace webrtc { + +class DegradationPreferenceProvider { + public: + virtual ~DegradationPreferenceProvider(); + + virtual DegradationPreference degradation_preference() const = 0; +}; + +} // namespace webrtc + +#endif // CALL_ADAPTATION_DEGRADATION_PREFERENCE_PROVIDER_H_ diff --git a/call/adaptation/resource.cc b/call/adaptation/resource.cc deleted file mode 100644 index a546450bc6..0000000000 --- a/call/adaptation/resource.cc +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2019 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "call/adaptation/resource.h" - -#include "absl/algorithm/container.h" -#include "rtc_base/checks.h" - -namespace webrtc { - -ResourceListener::~ResourceListener() {} - -Resource::Resource() - : encoder_queue_(nullptr), - resource_adaptation_queue_(nullptr), - usage_state_(absl::nullopt), - listener_(nullptr) {} - -Resource::~Resource() { - RTC_DCHECK(!listener_) - << "There is a listener depending on a Resource being destroyed."; -} - -void Resource::Initialize(rtc::TaskQueue* encoder_queue, - rtc::TaskQueue* resource_adaptation_queue) { - RTC_DCHECK(!encoder_queue_); - RTC_DCHECK(encoder_queue); - RTC_DCHECK(!resource_adaptation_queue_); - RTC_DCHECK(resource_adaptation_queue); - encoder_queue_ = encoder_queue; - resource_adaptation_queue_ = resource_adaptation_queue; -} - -void Resource::SetResourceListener(ResourceListener* listener) { - RTC_DCHECK(resource_adaptation_queue_); - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - // If you want to change listener you need to unregister the old listener by - // setting it to null first. - RTC_DCHECK(!listener_ || !listener) << "A listener is already set"; - listener_ = listener; -} - -absl::optional Resource::usage_state() const { - RTC_DCHECK(resource_adaptation_queue_); - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - return usage_state_; -} - -void Resource::ClearUsageState() { - RTC_DCHECK(resource_adaptation_queue_); - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - usage_state_ = absl::nullopt; -} - -bool Resource::IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const { - return true; -} - -void Resource::OnAdaptationApplied( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) {} - -rtc::TaskQueue* Resource::encoder_queue() const { - return encoder_queue_; -} - -rtc::TaskQueue* Resource::resource_adaptation_queue() const { - return resource_adaptation_queue_; -} - -void Resource::OnResourceUsageStateMeasured(ResourceUsageState usage_state) { - RTC_DCHECK(resource_adaptation_queue_); - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - usage_state_ = usage_state; - if (!listener_) - return; - listener_->OnResourceUsageStateMeasured(this); -} - -} // namespace webrtc diff --git a/call/adaptation/resource.h b/call/adaptation/resource.h deleted file mode 100644 index 2ee0c720d2..0000000000 --- a/call/adaptation/resource.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2019 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef CALL_ADAPTATION_RESOURCE_H_ -#define CALL_ADAPTATION_RESOURCE_H_ - -#include -#include - -#include "absl/types/optional.h" -#include "api/scoped_refptr.h" -#include "call/adaptation/video_source_restrictions.h" -#include "call/adaptation/video_stream_input_state.h" -#include "rtc_base/ref_count.h" -#include "rtc_base/task_queue.h" - -namespace webrtc { - -class Resource; - -enum class ResourceUsageState { - // Action is needed to minimze the load on this resource. - kOveruse, - // Increasing the load on this resource is desired, if possible. - kUnderuse, -}; - -class ResourceListener { - public: - virtual ~ResourceListener(); - - // Informs the listener of a new measurement of resource usage. This means - // that |resource->usage_state()| is now up-to-date. - virtual void OnResourceUsageStateMeasured( - rtc::scoped_refptr resource) = 0; -}; - -class Resource : public rtc::RefCountInterface { - public: - // By default, usage_state() is null until a measurement is made. - Resource(); - ~Resource() override; - - void Initialize(rtc::TaskQueue* encoder_queue, - rtc::TaskQueue* resource_adaptation_queue); - - void SetResourceListener(ResourceListener* listener); - - absl::optional usage_state() const; - void ClearUsageState(); - - // This method allows the Resource to reject a proposed adaptation in the "up" - // direction if it predicts this would cause overuse of this resource. The - // default implementation unconditionally returns true (= allowed). - virtual bool IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const; - virtual void OnAdaptationApplied( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource); - - virtual std::string name() const = 0; - - protected: - rtc::TaskQueue* encoder_queue() const; - rtc::TaskQueue* resource_adaptation_queue() const; - - // Updates the usage state and informs all registered listeners. - void OnResourceUsageStateMeasured(ResourceUsageState usage_state); - - private: - rtc::TaskQueue* encoder_queue_; - rtc::TaskQueue* resource_adaptation_queue_; - absl::optional usage_state_ - RTC_GUARDED_BY(resource_adaptation_queue_); - ResourceListener* listener_ RTC_GUARDED_BY(resource_adaptation_queue_); -}; - -} // namespace webrtc - -#endif // CALL_ADAPTATION_RESOURCE_H_ diff --git a/call/adaptation/resource_adaptation_processor.cc b/call/adaptation/resource_adaptation_processor.cc index 0224ac3bb2..741575ae38 100644 --- a/call/adaptation/resource_adaptation_processor.cc +++ b/call/adaptation/resource_adaptation_processor.cc @@ -11,339 +11,382 @@ #include "call/adaptation/resource_adaptation_processor.h" #include +#include #include #include "absl/algorithm/container.h" +#include "api/sequence_checker.h" +#include "api/video/video_adaptation_counters.h" +#include "call/adaptation/video_stream_adapter.h" +#include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace webrtc { +ResourceAdaptationProcessor::ResourceListenerDelegate::ResourceListenerDelegate( + ResourceAdaptationProcessor* processor) + : task_queue_(nullptr), processor_(processor) {} + +void ResourceAdaptationProcessor::ResourceListenerDelegate::SetTaskQueue( + TaskQueueBase* task_queue) { + RTC_DCHECK(!task_queue_); + RTC_DCHECK(task_queue); + task_queue_ = task_queue; + RTC_DCHECK_RUN_ON(task_queue_); +} + +void ResourceAdaptationProcessor::ResourceListenerDelegate:: + OnProcessorDestroyed() { + RTC_DCHECK_RUN_ON(task_queue_); + processor_ = nullptr; +} + +void ResourceAdaptationProcessor::ResourceListenerDelegate:: + OnResourceUsageStateMeasured(rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + if (!task_queue_->IsCurrent()) { + task_queue_->PostTask(ToQueuedTask( + [this_ref = rtc::scoped_refptr(this), + resource, usage_state] { + this_ref->OnResourceUsageStateMeasured(resource, usage_state); + })); + return; + } + RTC_DCHECK_RUN_ON(task_queue_); + if (processor_) { + processor_->OnResourceUsageStateMeasured(resource, usage_state); + } +} + +ResourceAdaptationProcessor::MitigationResultAndLogMessage:: + MitigationResultAndLogMessage() + : result(MitigationResult::kAdaptationApplied), message() {} + +ResourceAdaptationProcessor::MitigationResultAndLogMessage:: + MitigationResultAndLogMessage(MitigationResult result, std::string message) + : result(result), message(std::move(message)) {} + ResourceAdaptationProcessor::ResourceAdaptationProcessor( - VideoStreamInputStateProvider* input_state_provider, - VideoStreamEncoderObserver* encoder_stats_observer) - : sequence_checker_(), - is_resource_adaptation_enabled_(false), - input_state_provider_(input_state_provider), - encoder_stats_observer_(encoder_stats_observer), + VideoStreamAdapter* stream_adapter) + : task_queue_(nullptr), + resource_listener_delegate_( + rtc::make_ref_counted(this)), resources_(), - degradation_preference_(DegradationPreference::DISABLED), - effective_degradation_preference_(DegradationPreference::DISABLED), - is_screenshare_(false), - stream_adapter_(std::make_unique()), + stream_adapter_(stream_adapter), last_reported_source_restrictions_(), - processing_in_progress_(false) { - sequence_checker_.Detach(); + previous_mitigation_results_() { + RTC_DCHECK(stream_adapter_); } ResourceAdaptationProcessor::~ResourceAdaptationProcessor() { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(!is_resource_adaptation_enabled_); - RTC_DCHECK(adaptation_listeners_.empty()) - << "There are listener(s) depending on a ResourceAdaptationProcessor " - << "being destroyed."; + RTC_DCHECK_RUN_ON(task_queue_); RTC_DCHECK(resources_.empty()) << "There are resource(s) attached to a ResourceAdaptationProcessor " << "being destroyed."; + stream_adapter_->RemoveRestrictionsListener(this); + resource_listener_delegate_->OnProcessorDestroyed(); } -void ResourceAdaptationProcessor::InitializeOnResourceAdaptationQueue() { - // Allows |sequence_checker_| to attach to the resource adaptation queue. - // The caller is responsible for ensuring that this is the current queue. - RTC_DCHECK_RUN_ON(&sequence_checker_); -} - -DegradationPreference ResourceAdaptationProcessor::degradation_preference() - const { - RTC_DCHECK_RUN_ON(&sequence_checker_); - return degradation_preference_; +void ResourceAdaptationProcessor::SetTaskQueue(TaskQueueBase* task_queue) { + RTC_DCHECK(!task_queue_); + RTC_DCHECK(task_queue); + task_queue_ = task_queue; + resource_listener_delegate_->SetTaskQueue(task_queue); + RTC_DCHECK_RUN_ON(task_queue_); + // Now that we have the queue we can attach as adaptation listener. + stream_adapter_->AddRestrictionsListener(this); } -DegradationPreference -ResourceAdaptationProcessor::effective_degradation_preference() const { - RTC_DCHECK_RUN_ON(&sequence_checker_); - return effective_degradation_preference_; +void ResourceAdaptationProcessor::AddResourceLimitationsListener( + ResourceLimitationsListener* limitations_listener) { + RTC_DCHECK_RUN_ON(task_queue_); + RTC_DCHECK(std::find(resource_limitations_listeners_.begin(), + resource_limitations_listeners_.end(), + limitations_listener) == + resource_limitations_listeners_.end()); + resource_limitations_listeners_.push_back(limitations_listener); } -void ResourceAdaptationProcessor::StartResourceAdaptation() { - RTC_DCHECK_RUN_ON(&sequence_checker_); - if (is_resource_adaptation_enabled_) - return; - for (const auto& resource : resources_) { - resource->SetResourceListener(this); - } - is_resource_adaptation_enabled_ = true; +void ResourceAdaptationProcessor::RemoveResourceLimitationsListener( + ResourceLimitationsListener* limitations_listener) { + RTC_DCHECK_RUN_ON(task_queue_); + auto it = + std::find(resource_limitations_listeners_.begin(), + resource_limitations_listeners_.end(), limitations_listener); + RTC_DCHECK(it != resource_limitations_listeners_.end()); + resource_limitations_listeners_.erase(it); } -void ResourceAdaptationProcessor::StopResourceAdaptation() { - RTC_DCHECK_RUN_ON(&sequence_checker_); - if (!is_resource_adaptation_enabled_) - return; - for (const auto& resource : resources_) { - resource->SetResourceListener(nullptr); +void ResourceAdaptationProcessor::AddResource( + rtc::scoped_refptr resource) { + RTC_DCHECK(resource); + { + MutexLock crit(&resources_lock_); + RTC_DCHECK(absl::c_find(resources_, resource) == resources_.end()) + << "Resource \"" << resource->Name() << "\" was already registered."; + resources_.push_back(resource); } - is_resource_adaptation_enabled_ = false; -} - -void ResourceAdaptationProcessor::AddAdaptationListener( - ResourceAdaptationProcessorListener* adaptation_listener) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(std::find(adaptation_listeners_.begin(), - adaptation_listeners_.end(), - adaptation_listener) == adaptation_listeners_.end()); - adaptation_listeners_.push_back(adaptation_listener); -} - -void ResourceAdaptationProcessor::RemoveAdaptationListener( - ResourceAdaptationProcessorListener* adaptation_listener) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - auto it = std::find(adaptation_listeners_.begin(), - adaptation_listeners_.end(), adaptation_listener); - RTC_DCHECK(it != adaptation_listeners_.end()); - adaptation_listeners_.erase(it); + resource->SetResourceListener(resource_listener_delegate_); + RTC_LOG(INFO) << "Registered resource \"" << resource->Name() << "\"."; } -void ResourceAdaptationProcessor::AddResource( - rtc::scoped_refptr resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - // TODO(hbos): Allow adding resources while |is_resource_adaptation_enabled_| - // by registering as a listener of the resource on adding it. - RTC_DCHECK(!is_resource_adaptation_enabled_); - RTC_DCHECK(std::find(resources_.begin(), resources_.end(), resource) == - resources_.end()); - resources_.push_back(resource); +std::vector> +ResourceAdaptationProcessor::GetResources() const { + MutexLock crit(&resources_lock_); + return resources_; } void ResourceAdaptationProcessor::RemoveResource( rtc::scoped_refptr resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - // TODO(hbos): Allow removing resources while - // |is_resource_adaptation_enabled_| by unregistering as a listener of the - // resource on removing it. - RTC_DCHECK(!is_resource_adaptation_enabled_); - auto it = std::find(resources_.begin(), resources_.end(), resource); - RTC_DCHECK(it != resources_.end()); - resources_.erase(it); + RTC_DCHECK(resource); + RTC_LOG(INFO) << "Removing resource \"" << resource->Name() << "\"."; + resource->SetResourceListener(nullptr); + { + MutexLock crit(&resources_lock_); + auto it = absl::c_find(resources_, resource); + RTC_DCHECK(it != resources_.end()) << "Resource \"" << resource->Name() + << "\" was not a registered resource."; + resources_.erase(it); + } + RemoveLimitationsImposedByResource(std::move(resource)); } -void ResourceAdaptationProcessor::SetDegradationPreference( - DegradationPreference degradation_preference) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - degradation_preference_ = degradation_preference; - MaybeUpdateEffectiveDegradationPreference(); -} +void ResourceAdaptationProcessor::RemoveLimitationsImposedByResource( + rtc::scoped_refptr resource) { + if (!task_queue_->IsCurrent()) { + task_queue_->PostTask(ToQueuedTask( + [this, resource]() { RemoveLimitationsImposedByResource(resource); })); + return; + } + RTC_DCHECK_RUN_ON(task_queue_); + auto resource_adaptation_limits = + adaptation_limits_by_resources_.find(resource); + if (resource_adaptation_limits != adaptation_limits_by_resources_.end()) { + VideoStreamAdapter::RestrictionsWithCounters adaptation_limits = + resource_adaptation_limits->second; + adaptation_limits_by_resources_.erase(resource_adaptation_limits); + if (adaptation_limits_by_resources_.empty()) { + // Only the resource being removed was adapted so clear restrictions. + stream_adapter_->ClearRestrictions(); + return; + } -void ResourceAdaptationProcessor::SetIsScreenshare(bool is_screenshare) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - is_screenshare_ = is_screenshare; - MaybeUpdateEffectiveDegradationPreference(); -} + VideoStreamAdapter::RestrictionsWithCounters most_limited = + FindMostLimitedResources().second; -void ResourceAdaptationProcessor::MaybeUpdateEffectiveDegradationPreference() { - RTC_DCHECK_RUN_ON(&sequence_checker_); - effective_degradation_preference_ = - (is_screenshare_ && - degradation_preference_ == DegradationPreference::BALANCED) - ? DegradationPreference::MAINTAIN_RESOLUTION - : degradation_preference_; - stream_adapter_->SetDegradationPreference(effective_degradation_preference_); - MaybeUpdateVideoSourceRestrictions(nullptr); -} + if (adaptation_limits.counters.Total() <= most_limited.counters.Total()) { + // The removed limitations were less limited than the most limited + // resource. Don't change the current restrictions. + return; + } -void ResourceAdaptationProcessor::ResetVideoSourceRestrictions() { - RTC_DCHECK_RUN_ON(&sequence_checker_); - stream_adapter_->ClearRestrictions(); - adaptations_counts_by_resource_.clear(); - MaybeUpdateVideoSourceRestrictions(nullptr); -} + // Apply the new most limited resource as the next restrictions. + Adaptation adapt_to = stream_adapter_->GetAdaptationTo( + most_limited.counters, most_limited.restrictions); + RTC_DCHECK_EQ(adapt_to.status(), Adaptation::Status::kValid); + stream_adapter_->ApplyAdaptation(adapt_to, nullptr); -void ResourceAdaptationProcessor::MaybeUpdateVideoSourceRestrictions( - rtc::scoped_refptr reason) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - VideoSourceRestrictions new_source_restrictions = - FilterRestrictionsByDegradationPreference( - stream_adapter_->source_restrictions(), - effective_degradation_preference_); - if (last_reported_source_restrictions_ != new_source_restrictions) { - last_reported_source_restrictions_ = std::move(new_source_restrictions); - for (auto* adaptation_listener : adaptation_listeners_) { - adaptation_listener->OnVideoSourceRestrictionsUpdated( - last_reported_source_restrictions_, - stream_adapter_->adaptation_counters(), reason); - } - if (reason) { - UpdateResourceDegradationCounts(reason); - } + RTC_LOG(INFO) << "Most limited resource removed. Restoring restrictions to " + "next most limited restrictions: " + << most_limited.restrictions.ToString() << " with counters " + << most_limited.counters.ToString(); } } void ResourceAdaptationProcessor::OnResourceUsageStateMeasured( - rtc::scoped_refptr resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(resource->usage_state().has_value()); - switch (resource->usage_state().value()) { + rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + RTC_DCHECK_RUN_ON(task_queue_); + RTC_DCHECK(resource); + // |resource| could have been removed after signalling. + { + MutexLock crit(&resources_lock_); + if (absl::c_find(resources_, resource) == resources_.end()) { + RTC_LOG(INFO) << "Ignoring signal from removed resource \"" + << resource->Name() << "\"."; + return; + } + } + MitigationResultAndLogMessage result_and_message; + switch (usage_state) { case ResourceUsageState::kOveruse: - OnResourceOveruse(resource); + result_and_message = OnResourceOveruse(resource); break; case ResourceUsageState::kUnderuse: - OnResourceUnderuse(resource); + result_and_message = OnResourceUnderuse(resource); break; } -} - -bool ResourceAdaptationProcessor::HasSufficientInputForAdaptation( - const VideoStreamInputState& input_state) const { - RTC_DCHECK_RUN_ON(&sequence_checker_); - return input_state.HasInputFrameSizeAndFramesPerSecond() && - (effective_degradation_preference_ != - DegradationPreference::MAINTAIN_RESOLUTION || - input_state.frames_per_second() >= kMinFrameRateFps); -} - -void ResourceAdaptationProcessor::OnResourceUnderuse( - rtc::scoped_refptr reason_resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(!processing_in_progress_); - processing_in_progress_ = true; - // Clear all usage states. In order to re-run adaptation logic, resources need - // to provide new resource usage measurements. - // TODO(hbos): Support not unconditionally clearing usage states by having the - // ResourceAdaptationProcessor check in on its resources at certain intervals. - for (const auto& resource : resources_) { - resource->ClearUsageState(); - } - VideoStreamInputState input_state = input_state_provider_->InputState(); - if (effective_degradation_preference_ == DegradationPreference::DISABLED || - !HasSufficientInputForAdaptation(input_state)) { - processing_in_progress_ = false; + // Maybe log the result of the operation. + auto it = previous_mitigation_results_.find(resource.get()); + if (it != previous_mitigation_results_.end() && + it->second == result_and_message.result) { + // This resource has previously reported the same result and we haven't + // successfully adapted since - don't log to avoid spam. return; } - if (!IsResourceAllowedToAdaptUp(reason_resource)) { - processing_in_progress_ = false; - return; + RTC_LOG(INFO) << "Resource \"" << resource->Name() << "\" signalled " + << ResourceUsageStateToString(usage_state) << ". " + << result_and_message.message; + if (result_and_message.result == MitigationResult::kAdaptationApplied) { + previous_mitigation_results_.clear(); + } else { + previous_mitigation_results_.insert( + std::make_pair(resource.get(), result_and_message.result)); } - // Update video input states and encoder settings for accurate adaptation. - stream_adapter_->SetInput(input_state); +} + +ResourceAdaptationProcessor::MitigationResultAndLogMessage +ResourceAdaptationProcessor::OnResourceUnderuse( + rtc::scoped_refptr reason_resource) { + RTC_DCHECK_RUN_ON(task_queue_); // How can this stream be adapted up? Adaptation adaptation = stream_adapter_->GetAdaptationUp(); if (adaptation.status() != Adaptation::Status::kValid) { - processing_in_progress_ = false; - return; + rtc::StringBuilder message; + message << "Not adapting up because VideoStreamAdapter returned " + << Adaptation::StatusToString(adaptation.status()); + return MitigationResultAndLogMessage(MitigationResult::kRejectedByAdapter, + message.Release()); } - // Are all resources OK with this adaptation being applied? - VideoSourceRestrictions restrictions_before = - stream_adapter_->source_restrictions(); - VideoSourceRestrictions restrictions_after = - stream_adapter_->PeekNextRestrictions(adaptation); - if (!absl::c_all_of(resources_, [&input_state, &restrictions_before, - &restrictions_after, &reason_resource]( - rtc::scoped_refptr resource) { - return resource->IsAdaptationUpAllowed(input_state, restrictions_before, - restrictions_after, - reason_resource); - })) { - processing_in_progress_ = false; - return; + // Check that resource is most limited. + std::vector> most_limited_resources; + VideoStreamAdapter::RestrictionsWithCounters most_limited_restrictions; + std::tie(most_limited_resources, most_limited_restrictions) = + FindMostLimitedResources(); + + // If the most restricted resource is less limited than current restrictions + // then proceed with adapting up. + if (!most_limited_resources.empty() && + most_limited_restrictions.counters.Total() >= + stream_adapter_->adaptation_counters().Total()) { + // If |reason_resource| is not one of the most limiting resources then abort + // adaptation. + if (absl::c_find(most_limited_resources, reason_resource) == + most_limited_resources.end()) { + rtc::StringBuilder message; + message << "Resource \"" << reason_resource->Name() + << "\" was not the most limited resource."; + return MitigationResultAndLogMessage( + MitigationResult::kNotMostLimitedResource, message.Release()); + } + + if (most_limited_resources.size() > 1) { + // If there are multiple most limited resources, all must signal underuse + // before the adaptation is applied. + UpdateResourceLimitations(reason_resource, adaptation.restrictions(), + adaptation.counters()); + rtc::StringBuilder message; + message << "Resource \"" << reason_resource->Name() + << "\" was not the only most limited resource."; + return MitigationResultAndLogMessage( + MitigationResult::kSharedMostLimitedResource, message.Release()); + } } // Apply adaptation. - stream_adapter_->ApplyAdaptation(adaptation); - for (const auto& resource : resources_) { - resource->OnAdaptationApplied(input_state, restrictions_before, - restrictions_after, reason_resource); - } - // Update VideoSourceRestrictions based on adaptation. This also informs the - // |adaptation_listeners_|. - MaybeUpdateVideoSourceRestrictions(reason_resource); - processing_in_progress_ = false; + stream_adapter_->ApplyAdaptation(adaptation, reason_resource); + rtc::StringBuilder message; + message << "Adapted up successfully. Unfiltered adaptations: " + << stream_adapter_->adaptation_counters().ToString(); + return MitigationResultAndLogMessage(MitigationResult::kAdaptationApplied, + message.Release()); } -void ResourceAdaptationProcessor::OnResourceOveruse( +ResourceAdaptationProcessor::MitigationResultAndLogMessage +ResourceAdaptationProcessor::OnResourceOveruse( rtc::scoped_refptr reason_resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(!processing_in_progress_); - processing_in_progress_ = true; - // Clear all usage states. In order to re-run adaptation logic, resources need - // to provide new resource usage measurements. - // TODO(hbos): Support not unconditionally clearing usage states by having the - // ResourceAdaptationProcessor check in on its resources at certain intervals. - for (const auto& resource : resources_) { - resource->ClearUsageState(); - } - VideoStreamInputState input_state = input_state_provider_->InputState(); - if (!input_state.has_input()) { - processing_in_progress_ = false; - return; - } - if (effective_degradation_preference_ == DegradationPreference::DISABLED || - !HasSufficientInputForAdaptation(input_state)) { - processing_in_progress_ = false; - return; - } - // Update video input states and encoder settings for accurate adaptation. - stream_adapter_->SetInput(input_state); + RTC_DCHECK_RUN_ON(task_queue_); // How can this stream be adapted up? Adaptation adaptation = stream_adapter_->GetAdaptationDown(); - if (adaptation.min_pixel_limit_reached()) { - encoder_stats_observer_->OnMinPixelLimitReached(); + if (adaptation.status() == Adaptation::Status::kLimitReached) { + // Add resource as most limited. + VideoStreamAdapter::RestrictionsWithCounters restrictions; + std::tie(std::ignore, restrictions) = FindMostLimitedResources(); + UpdateResourceLimitations(reason_resource, restrictions.restrictions, + restrictions.counters); } if (adaptation.status() != Adaptation::Status::kValid) { - processing_in_progress_ = false; - return; + rtc::StringBuilder message; + message << "Not adapting down because VideoStreamAdapter returned " + << Adaptation::StatusToString(adaptation.status()); + return MitigationResultAndLogMessage(MitigationResult::kRejectedByAdapter, + message.Release()); } // Apply adaptation. - VideoSourceRestrictions restrictions_before = - stream_adapter_->source_restrictions(); - VideoSourceRestrictions restrictions_after = - stream_adapter_->PeekNextRestrictions(adaptation); - stream_adapter_->ApplyAdaptation(adaptation); - for (const auto& resource : resources_) { - resource->OnAdaptationApplied(input_state, restrictions_before, - restrictions_after, reason_resource); - } - // Update VideoSourceRestrictions based on adaptation. This also informs the - // |adaptation_listeners_|. - MaybeUpdateVideoSourceRestrictions(reason_resource); - processing_in_progress_ = false; + UpdateResourceLimitations(reason_resource, adaptation.restrictions(), + adaptation.counters()); + stream_adapter_->ApplyAdaptation(adaptation, reason_resource); + rtc::StringBuilder message; + message << "Adapted down successfully. Unfiltered adaptations: " + << stream_adapter_->adaptation_counters().ToString(); + return MitigationResultAndLogMessage(MitigationResult::kAdaptationApplied, + message.Release()); } -void ResourceAdaptationProcessor::TriggerAdaptationDueToFrameDroppedDueToSize( - rtc::scoped_refptr reason_resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - VideoAdaptationCounters counters_before = - stream_adapter_->adaptation_counters(); - OnResourceOveruse(reason_resource); - if (degradation_preference_ == DegradationPreference::BALANCED && - stream_adapter_->adaptation_counters().fps_adaptations > - counters_before.fps_adaptations) { - // Oops, we adapted frame rate. Adapt again, maybe it will adapt resolution! - // Though this is not guaranteed... - OnResourceOveruse(reason_resource); - } - if (stream_adapter_->adaptation_counters().resolution_adaptations > - counters_before.resolution_adaptations) { - encoder_stats_observer_->OnInitialQualityResolutionAdaptDown(); +std::pair>, + VideoStreamAdapter::RestrictionsWithCounters> +ResourceAdaptationProcessor::FindMostLimitedResources() const { + std::vector> most_limited_resources; + VideoStreamAdapter::RestrictionsWithCounters most_limited_restrictions{ + VideoSourceRestrictions(), VideoAdaptationCounters()}; + + for (const auto& resource_and_adaptation_limit_ : + adaptation_limits_by_resources_) { + const auto& restrictions_with_counters = + resource_and_adaptation_limit_.second; + if (restrictions_with_counters.counters.Total() > + most_limited_restrictions.counters.Total()) { + most_limited_restrictions = restrictions_with_counters; + most_limited_resources.clear(); + most_limited_resources.push_back(resource_and_adaptation_limit_.first); + } else if (most_limited_restrictions.counters == + restrictions_with_counters.counters) { + most_limited_resources.push_back(resource_and_adaptation_limit_.first); + } } + return std::make_pair(std::move(most_limited_resources), + most_limited_restrictions); } -void ResourceAdaptationProcessor::UpdateResourceDegradationCounts( - rtc::scoped_refptr resource) { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(resource); - int delta = stream_adapter_->adaptation_counters().Total(); - for (const auto& adaptations : adaptations_counts_by_resource_) { - delta -= adaptations.second; +void ResourceAdaptationProcessor::UpdateResourceLimitations( + rtc::scoped_refptr reason_resource, + const VideoSourceRestrictions& restrictions, + const VideoAdaptationCounters& counters) { + auto& adaptation_limits = adaptation_limits_by_resources_[reason_resource]; + if (adaptation_limits.restrictions == restrictions && + adaptation_limits.counters == counters) { + return; } + adaptation_limits = {restrictions, counters}; - // Default value is 0, inserts the value if missing. - adaptations_counts_by_resource_[resource] += delta; - RTC_DCHECK_GE(adaptations_counts_by_resource_[resource], 0); + std::map, VideoAdaptationCounters> limitations; + for (const auto& p : adaptation_limits_by_resources_) { + limitations.insert(std::make_pair(p.first, p.second.counters)); + } + for (auto limitations_listener : resource_limitations_listeners_) { + limitations_listener->OnResourceLimitationChanged(reason_resource, + limitations); + } } -bool ResourceAdaptationProcessor::IsResourceAllowedToAdaptUp( - rtc::scoped_refptr resource) const { - RTC_DCHECK_RUN_ON(&sequence_checker_); - RTC_DCHECK(resource); - const auto& adaptations = adaptations_counts_by_resource_.find(resource); - return adaptations != adaptations_counts_by_resource_.end() && - adaptations->second > 0; +void ResourceAdaptationProcessor::OnVideoSourceRestrictionsUpdated( + VideoSourceRestrictions restrictions, + const VideoAdaptationCounters& adaptation_counters, + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) { + RTC_DCHECK_RUN_ON(task_queue_); + if (reason) { + UpdateResourceLimitations(reason, unfiltered_restrictions, + adaptation_counters); + } else if (adaptation_counters.Total() == 0) { + // Adaptations are cleared. + adaptation_limits_by_resources_.clear(); + previous_mitigation_results_.clear(); + for (auto limitations_listener : resource_limitations_listeners_) { + limitations_listener->OnResourceLimitationChanged(nullptr, {}); + } + } } } // namespace webrtc diff --git a/call/adaptation/resource_adaptation_processor.h b/call/adaptation/resource_adaptation_processor.h index cf1e187026..c84d359fec 100644 --- a/call/adaptation/resource_adaptation_processor.h +++ b/call/adaptation/resource_adaptation_processor.h @@ -13,20 +13,23 @@ #include #include +#include +#include #include #include "absl/types/optional.h" +#include "api/adaptation/resource.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_base.h" +#include "api/video/video_adaptation_counters.h" #include "api/video/video_frame.h" #include "api/video/video_stream_encoder_observer.h" -#include "call/adaptation/resource.h" #include "call/adaptation/resource_adaptation_processor_interface.h" #include "call/adaptation/video_source_restrictions.h" #include "call/adaptation/video_stream_adapter.h" #include "call/adaptation/video_stream_input_state.h" #include "call/adaptation/video_stream_input_state_provider.h" -#include "rtc_base/synchronization/sequence_checker.h" namespace webrtc { @@ -45,111 +48,119 @@ namespace webrtc { // // The ResourceAdaptationProcessor is single-threaded. It may be constructed on // any thread but MUST subsequently be used and destroyed on a single sequence, -// i.e. the "resource adaptation task queue". +// i.e. the "resource adaptation task queue". Resources can be added and removed +// from any thread. class ResourceAdaptationProcessor : public ResourceAdaptationProcessorInterface, + public VideoSourceRestrictionsListener, public ResourceListener { public: - ResourceAdaptationProcessor( - VideoStreamInputStateProvider* input_state_provider, - VideoStreamEncoderObserver* encoder_stats_observer); + explicit ResourceAdaptationProcessor( + VideoStreamAdapter* video_stream_adapter); ~ResourceAdaptationProcessor() override; - void InitializeOnResourceAdaptationQueue() override; + void SetTaskQueue(TaskQueueBase* task_queue) override; // ResourceAdaptationProcessorInterface implementation. - DegradationPreference degradation_preference() const override; - DegradationPreference effective_degradation_preference() const override; - - void StartResourceAdaptation() override; - void StopResourceAdaptation() override; - void AddAdaptationListener( - ResourceAdaptationProcessorListener* adaptation_listener) override; - void RemoveAdaptationListener( - ResourceAdaptationProcessorListener* adaptation_listener) override; + void AddResourceLimitationsListener( + ResourceLimitationsListener* limitations_listener) override; + void RemoveResourceLimitationsListener( + ResourceLimitationsListener* limitations_listener) override; void AddResource(rtc::scoped_refptr resource) override; + std::vector> GetResources() const override; void RemoveResource(rtc::scoped_refptr resource) override; - void SetDegradationPreference( - DegradationPreference degradation_preference) override; - void SetIsScreenshare(bool is_screenshare) override; - void ResetVideoSourceRestrictions() override; - // ResourceListener implementation. // Triggers OnResourceUnderuse() or OnResourceOveruse(). - void OnResourceUsageStateMeasured( - rtc::scoped_refptr resource) override; + void OnResourceUsageStateMeasured(rtc::scoped_refptr resource, + ResourceUsageState usage_state) override; - // May trigger 1-2 adaptations. It is meant to reduce resolution but this is - // not guaranteed. It may adapt frame rate, which does not address the issue. - // TODO(hbos): Can we get rid of this? - void TriggerAdaptationDueToFrameDroppedDueToSize( - rtc::scoped_refptr reason_resource) override; + // VideoSourceRestrictionsListener implementation. + void OnVideoSourceRestrictionsUpdated( + VideoSourceRestrictions restrictions, + const VideoAdaptationCounters& adaptation_counters, + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) override; private: - bool HasSufficientInputForAdaptation( - const VideoStreamInputState& input_state) const; + // If resource usage measurements happens off the adaptation task queue, this + // class takes care of posting the measurement for the processor to handle it + // on the adaptation task queue. + class ResourceListenerDelegate : public rtc::RefCountInterface, + public ResourceListener { + public: + explicit ResourceListenerDelegate(ResourceAdaptationProcessor* processor); + + void SetTaskQueue(TaskQueueBase* task_queue); + void OnProcessorDestroyed(); + + // ResourceListener implementation. + void OnResourceUsageStateMeasured(rtc::scoped_refptr resource, + ResourceUsageState usage_state) override; + + private: + TaskQueueBase* task_queue_; + ResourceAdaptationProcessor* processor_ RTC_GUARDED_BY(task_queue_); + }; + + enum class MitigationResult { + kNotMostLimitedResource, + kSharedMostLimitedResource, + kRejectedByAdapter, + kAdaptationApplied, + }; + + struct MitigationResultAndLogMessage { + MitigationResultAndLogMessage(); + MitigationResultAndLogMessage(MitigationResult result, std::string message); + MitigationResult result; + std::string message; + }; // Performs the adaptation by getting the next target, applying it and // informing listeners of the new VideoSourceRestriction and adaptation // counters. - void OnResourceUnderuse(rtc::scoped_refptr reason_resource); - void OnResourceOveruse(rtc::scoped_refptr reason_resource); - - // Needs to be invoked any time |degradation_preference_| or |is_screenshare_| - // changes to ensure |effective_degradation_preference_| is up-to-date. - void MaybeUpdateEffectiveDegradationPreference(); - // If the filtered source restrictions are different than - // |last_reported_source_restrictions_|, inform the listeners. - void MaybeUpdateVideoSourceRestrictions(rtc::scoped_refptr reason); - // Updates the number of times the resource has degraded based on the latest - // degradation applied. - void UpdateResourceDegradationCounts(rtc::scoped_refptr resource); - // Returns true if a Resource has been overused in the pass and is responsible - // for creating a VideoSourceRestriction. The current algorithm counts the - // number of times the resource caused an adaptation and allows adapting up - // if that number is non-zero. This is consistent with how adaptation has - // traditionally been handled. - // TODO(crbug.com/webrtc/11553) Change this algorithm to look at the resources - // restrictions rather than just the counters. - bool IsResourceAllowedToAdaptUp(rtc::scoped_refptr resource) const; - - webrtc::SequenceChecker sequence_checker_; - bool is_resource_adaptation_enabled_ RTC_GUARDED_BY(sequence_checker_); + MitigationResultAndLogMessage OnResourceUnderuse( + rtc::scoped_refptr reason_resource); + MitigationResultAndLogMessage OnResourceOveruse( + rtc::scoped_refptr reason_resource); + + void UpdateResourceLimitations(rtc::scoped_refptr reason_resource, + const VideoSourceRestrictions& restrictions, + const VideoAdaptationCounters& counters) + RTC_RUN_ON(task_queue_); + + // Searches |adaptation_limits_by_resources_| for each resource with the + // highest total adaptation counts. Adaptation up may only occur if the + // resource performing the adaptation is the only most limited resource. This + // function returns the list of all most limited resources as well as the + // corresponding adaptation of that resource. + std::pair>, + VideoStreamAdapter::RestrictionsWithCounters> + FindMostLimitedResources() const RTC_RUN_ON(task_queue_); + + void RemoveLimitationsImposedByResource( + rtc::scoped_refptr resource); + + TaskQueueBase* task_queue_; + rtc::scoped_refptr resource_listener_delegate_; // Input and output. - VideoStreamInputStateProvider* const input_state_provider_ - RTC_GUARDED_BY(sequence_checker_); - VideoStreamEncoderObserver* const encoder_stats_observer_ - RTC_GUARDED_BY(sequence_checker_); - std::vector adaptation_listeners_ - RTC_GUARDED_BY(sequence_checker_); + mutable Mutex resources_lock_; std::vector> resources_ - RTC_GUARDED_BY(sequence_checker_); + RTC_GUARDED_BY(resources_lock_); + std::vector resource_limitations_listeners_ + RTC_GUARDED_BY(task_queue_); // Purely used for statistics, does not ensure mapped resources stay alive. - std::map adaptations_counts_by_resource_ - RTC_GUARDED_BY(sequence_checker_); - // Adaptation strategy settings. - DegradationPreference degradation_preference_ - RTC_GUARDED_BY(sequence_checker_); - DegradationPreference effective_degradation_preference_ - RTC_GUARDED_BY(sequence_checker_); - bool is_screenshare_ RTC_GUARDED_BY(sequence_checker_); + std::map, + VideoStreamAdapter::RestrictionsWithCounters> + adaptation_limits_by_resources_ RTC_GUARDED_BY(task_queue_); // Responsible for generating and applying possible adaptations. - const std::unique_ptr stream_adapter_ - RTC_GUARDED_BY(sequence_checker_); + VideoStreamAdapter* const stream_adapter_ RTC_GUARDED_BY(task_queue_); VideoSourceRestrictions last_reported_source_restrictions_ - RTC_GUARDED_BY(sequence_checker_); - // Prevents recursion. - // - // This is used to prevent triggering resource adaptation in the process of - // already handling resouce adaptation, since that could cause the same states - // to be modified in unexpected ways. Example: - // - // Resource::OnResourceUsageStateMeasured() -> - // ResourceAdaptationProcessor::OnResourceOveruse() -> - // Resource::OnAdaptationApplied() -> - // Resource::OnResourceUsageStateMeasured() -> - // ResourceAdaptationProcessor::OnResourceOveruse() // Boom, not allowed. - bool processing_in_progress_ RTC_GUARDED_BY(sequence_checker_); + RTC_GUARDED_BY(task_queue_); + // Keeps track of previous mitigation results per resource since the last + // successful adaptation. Used to avoid RTC_LOG spam. + std::map previous_mitigation_results_ + RTC_GUARDED_BY(task_queue_); }; } // namespace webrtc diff --git a/call/adaptation/resource_adaptation_processor_interface.cc b/call/adaptation/resource_adaptation_processor_interface.cc index 4e5251ce90..79f099b267 100644 --- a/call/adaptation/resource_adaptation_processor_interface.cc +++ b/call/adaptation/resource_adaptation_processor_interface.cc @@ -12,8 +12,9 @@ namespace webrtc { -ResourceAdaptationProcessorListener::~ResourceAdaptationProcessorListener() {} +ResourceAdaptationProcessorInterface::~ResourceAdaptationProcessorInterface() = + default; -ResourceAdaptationProcessorInterface::~ResourceAdaptationProcessorInterface() {} +ResourceLimitationsListener::~ResourceLimitationsListener() = default; } // namespace webrtc diff --git a/call/adaptation/resource_adaptation_processor_interface.h b/call/adaptation/resource_adaptation_processor_interface.h index d6295c4d75..8b1f94b73a 100644 --- a/call/adaptation/resource_adaptation_processor_interface.h +++ b/call/adaptation/resource_adaptation_processor_interface.h @@ -11,31 +11,32 @@ #ifndef CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_ #define CALL_ADAPTATION_RESOURCE_ADAPTATION_PROCESSOR_INTERFACE_H_ +#include +#include + #include "absl/types/optional.h" +#include "api/adaptation/resource.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_base.h" #include "api/video/video_adaptation_counters.h" #include "api/video/video_frame.h" +#include "call/adaptation/adaptation_constraint.h" #include "call/adaptation/encoder_settings.h" -#include "call/adaptation/resource.h" #include "call/adaptation/video_source_restrictions.h" -#include "rtc_base/task_queue.h" namespace webrtc { -// The listener is responsible for carrying out the reconfiguration of the video -// source such that the VideoSourceRestrictions are fulfilled. -class ResourceAdaptationProcessorListener { +class ResourceLimitationsListener { public: - virtual ~ResourceAdaptationProcessorListener(); + virtual ~ResourceLimitationsListener(); - // The |restrictions| are filtered by degradation preference but not the - // |adaptation_counters|, which are currently only reported for legacy stats - // calculation purposes. - virtual void OnVideoSourceRestrictionsUpdated( - VideoSourceRestrictions restrictions, - const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) = 0; + // The limitations on a resource were changed. This does not mean the current + // video restrictions have changed. + virtual void OnResourceLimitationChanged( + rtc::scoped_refptr resource, + const std::map, VideoAdaptationCounters>& + resource_limitations) = 0; }; // The Resource Adaptation Processor is responsible for reacting to resource @@ -46,43 +47,21 @@ class ResourceAdaptationProcessorInterface { public: virtual ~ResourceAdaptationProcessorInterface(); - virtual void InitializeOnResourceAdaptationQueue() = 0; - - virtual DegradationPreference degradation_preference() const = 0; - // Reinterprets "balanced + screenshare" as "maintain-resolution". - // TODO(hbos): Don't do this. This is not what "balanced" means. If the - // application wants to maintain resolution it should set that degradation - // preference rather than depend on non-standard behaviors. - virtual DegradationPreference effective_degradation_preference() const = 0; + virtual void SetTaskQueue(TaskQueueBase* task_queue) = 0; + virtual void AddResourceLimitationsListener( + ResourceLimitationsListener* limitations_listener) = 0; + virtual void RemoveResourceLimitationsListener( + ResourceLimitationsListener* limitations_listener) = 0; // Starts or stops listening to resources, effectively enabling or disabling - // processing. + // processing. May be called from anywhere. // TODO(https://crbug.com/webrtc/11172): Automatically register and unregister // with AddResource() and RemoveResource() instead. When the processor is // multi-stream aware, stream-specific resouces will get added and removed // over time. - virtual void StartResourceAdaptation() = 0; - virtual void StopResourceAdaptation() = 0; - virtual void AddAdaptationListener( - ResourceAdaptationProcessorListener* adaptation_listener) = 0; - virtual void RemoveAdaptationListener( - ResourceAdaptationProcessorListener* adaptation_listener) = 0; virtual void AddResource(rtc::scoped_refptr resource) = 0; + virtual std::vector> GetResources() const = 0; virtual void RemoveResource(rtc::scoped_refptr resource) = 0; - - virtual void SetDegradationPreference( - DegradationPreference degradation_preference) = 0; - virtual void SetIsScreenshare(bool is_screenshare) = 0; - virtual void ResetVideoSourceRestrictions() = 0; - - // May trigger one or more adaptations. It is meant to reduce resolution - - // useful if a frame was dropped due to its size - however, the implementation - // may not guarantee this (see resource_adaptation_processor.h). - // TODO(hbos): This is only part of the interface for backwards-compatiblity - // reasons. Can we replace this by something which actually satisfies the - // resolution constraints or get rid of it altogether? - virtual void TriggerAdaptationDueToFrameDroppedDueToSize( - rtc::scoped_refptr reason_resource) = 0; }; } // namespace webrtc diff --git a/call/adaptation/resource_adaptation_processor_unittest.cc b/call/adaptation/resource_adaptation_processor_unittest.cc index e94b3a99d7..5e4f44b221 100644 --- a/call/adaptation/resource_adaptation_processor_unittest.cc +++ b/call/adaptation/resource_adaptation_processor_unittest.cc @@ -10,15 +10,17 @@ #include "call/adaptation/resource_adaptation_processor.h" +#include "api/adaptation/resource.h" #include "api/scoped_refptr.h" #include "api/video/video_adaptation_counters.h" -#include "call/adaptation/resource.h" #include "call/adaptation/resource_adaptation_processor_interface.h" #include "call/adaptation/test/fake_frame_rate_provider.h" #include "call/adaptation/test/fake_resource.h" #include "call/adaptation/video_source_restrictions.h" #include "call/adaptation/video_stream_input_state_provider.h" #include "rtc_base/event.h" +#include "rtc_base/gunit.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "test/gtest.h" @@ -28,31 +30,42 @@ namespace { const int kDefaultFrameRate = 30; const int kDefaultFrameSize = 1280 * 720; +const int kDefaultTimeoutMs = 5000; -class ResourceAdaptationProcessorListenerForTesting - : public ResourceAdaptationProcessorListener { +class VideoSourceRestrictionsListenerForTesting + : public VideoSourceRestrictionsListener { public: - ResourceAdaptationProcessorListenerForTesting() + VideoSourceRestrictionsListenerForTesting() : restrictions_updated_count_(0), restrictions_(), adaptation_counters_(), reason_(nullptr) {} - ~ResourceAdaptationProcessorListenerForTesting() override {} + ~VideoSourceRestrictionsListenerForTesting() override {} size_t restrictions_updated_count() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); return restrictions_updated_count_; } - const VideoSourceRestrictions& restrictions() const { return restrictions_; } - const VideoAdaptationCounters& adaptation_counters() const { + VideoSourceRestrictions restrictions() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return restrictions_; + } + VideoAdaptationCounters adaptation_counters() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); return adaptation_counters_; } - rtc::scoped_refptr reason() const { return reason_; } + rtc::scoped_refptr reason() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return reason_; + } - // ResourceAdaptationProcessorListener implementation. + // VideoSourceRestrictionsListener implementation. void OnVideoSourceRestrictionsUpdated( VideoSourceRestrictions restrictions, const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) override { + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) override { + RTC_DCHECK_RUN_ON(&sequence_checker_); ++restrictions_updated_count_; restrictions_ = restrictions; adaptation_counters_ = adaptation_counters; @@ -60,47 +73,35 @@ class ResourceAdaptationProcessorListenerForTesting } private: - size_t restrictions_updated_count_; - VideoSourceRestrictions restrictions_; - VideoAdaptationCounters adaptation_counters_; - rtc::scoped_refptr reason_; + SequenceChecker sequence_checker_; + size_t restrictions_updated_count_ RTC_GUARDED_BY(&sequence_checker_); + VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_); + VideoAdaptationCounters adaptation_counters_ + RTC_GUARDED_BY(&sequence_checker_); + rtc::scoped_refptr reason_ RTC_GUARDED_BY(&sequence_checker_); }; class ResourceAdaptationProcessorTest : public ::testing::Test { public: ResourceAdaptationProcessorTest() - : resource_adaptation_queue_("ResourceAdaptationQueue"), - encoder_queue_("EncoderQueue"), - frame_rate_provider_(), + : frame_rate_provider_(), input_state_provider_(&frame_rate_provider_), - resource_(new FakeResource("FakeResource")), - other_resource_(new FakeResource("OtherFakeResource")), + resource_(FakeResource::Create("FakeResource")), + other_resource_(FakeResource::Create("OtherFakeResource")), + video_stream_adapter_( + std::make_unique(&input_state_provider_, + &frame_rate_provider_)), processor_(std::make_unique( - &input_state_provider_, - /*encoder_stats_observer=*/&frame_rate_provider_)) { - resource_->Initialize(&encoder_queue_, &resource_adaptation_queue_); - other_resource_->Initialize(&encoder_queue_, &resource_adaptation_queue_); - rtc::Event event; - resource_adaptation_queue_.PostTask([this, &event] { - processor_->InitializeOnResourceAdaptationQueue(); - processor_->AddAdaptationListener(&processor_listener_); - processor_->AddResource(resource_); - processor_->AddResource(other_resource_); - event.Set(); - }); - event.Wait(rtc::Event::kForever); + video_stream_adapter_.get())) { + processor_->SetTaskQueue(TaskQueueBase::Current()); + video_stream_adapter_->AddRestrictionsListener(&restrictions_listener_); + processor_->AddResource(resource_); + processor_->AddResource(other_resource_); } ~ResourceAdaptationProcessorTest() override { - rtc::Event event; - resource_adaptation_queue_.PostTask([this, &event] { - processor_->StopResourceAdaptation(); - processor_->RemoveResource(resource_); - processor_->RemoveResource(other_resource_); - processor_->RemoveAdaptationListener(&processor_listener_); - processor_.reset(); - event.Set(); - }); - event.Wait(rtc::Event::kForever); + if (processor_) { + DestroyProcessor(); + } } void SetInputStates(bool has_input, int fps, int frame_size) { @@ -117,53 +118,53 @@ class ResourceAdaptationProcessorTest : public ::testing::Test { : restrictions.max_pixels_per_frame().value_or(kDefaultFrameSize)); } + void DestroyProcessor() { + if (resource_) { + processor_->RemoveResource(resource_); + } + if (other_resource_) { + processor_->RemoveResource(other_resource_); + } + video_stream_adapter_->RemoveRestrictionsListener(&restrictions_listener_); + processor_.reset(); + } + + static void WaitUntilTaskQueueIdle() { + ASSERT_TRUE(rtc::Thread::Current()->ProcessMessages(0)); + } + protected: - TaskQueueForTest resource_adaptation_queue_; - TaskQueueForTest encoder_queue_; FakeFrameRateProvider frame_rate_provider_; VideoStreamInputStateProvider input_state_provider_; rtc::scoped_refptr resource_; rtc::scoped_refptr other_resource_; + std::unique_ptr video_stream_adapter_; std::unique_ptr processor_; - ResourceAdaptationProcessorListenerForTesting processor_listener_; + VideoSourceRestrictionsListenerForTesting restrictions_listener_; }; } // namespace TEST_F(ResourceAdaptationProcessorTest, DisabledByDefault) { - resource_adaptation_queue_.SendTask( - [this] { - EXPECT_EQ(DegradationPreference::DISABLED, - processor_->degradation_preference()); - EXPECT_EQ(DegradationPreference::DISABLED, - processor_->effective_degradation_preference()); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - processor_->StartResourceAdaptation(); - // Adaptation does not happen when disabled. - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(0u, processor_listener_.restrictions_updated_count()); - }, - RTC_FROM_HERE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + // Adaptation does not happen when disabled. + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); } TEST_F(ResourceAdaptationProcessorTest, InsufficientInput) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - // Adaptation does not happen if input is insufficient. - // When frame size is missing (OnFrameSizeObserved not called yet). - input_state_provider_.OnHasInputChanged(true); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(0u, processor_listener_.restrictions_updated_count()); - // When "has input" is missing. - SetInputStates(false, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(0u, processor_listener_.restrictions_updated_count()); - // Note: frame rate cannot be missing, if unset it is 0. - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + // Adaptation does not happen if input is insufficient. + // When frame size is missing (OnFrameSizeObserved not called yet). + input_state_provider_.OnHasInputChanged(true); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); + // When "has input" is missing. + SetInputStates(false, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); + // Note: frame rate cannot be missing, if unset it is 0. } // These tests verify that restrictions are applied, but not exactly how much @@ -172,273 +173,565 @@ TEST_F(ResourceAdaptationProcessorTest, InsufficientInput) { // restrictions. For that, see video_stream_adapter_unittest.cc. TEST_F(ResourceAdaptationProcessorTest, OveruseTriggersRestrictingResolutionInMaintainFrameRate) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - EXPECT_TRUE(processor_listener_.restrictions() - .max_pixels_per_frame() - .has_value()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); + EXPECT_TRUE( + restrictions_listener_.restrictions().max_pixels_per_frame().has_value()); } TEST_F(ResourceAdaptationProcessorTest, OveruseTriggersRestrictingFrameRateInMaintainResolution) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_RESOLUTION); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - EXPECT_TRUE( - processor_listener_.restrictions().max_frame_rate().has_value()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_RESOLUTION); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); + EXPECT_TRUE( + restrictions_listener_.restrictions().max_frame_rate().has_value()); } TEST_F(ResourceAdaptationProcessorTest, OveruseTriggersRestrictingFrameRateAndResolutionInBalanced) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference(DegradationPreference::BALANCED); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - // Adapting multiple times eventually resticts both frame rate and - // resolution. Exactly many times we need to adapt depends on - // BalancedDegradationSettings, VideoStreamAdapter and default input - // states. This test requires it to be achieved within 4 adaptations. - for (size_t i = 0; i < 4; ++i) { - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(i + 1, processor_listener_.restrictions_updated_count()); - RestrictSource(processor_listener_.restrictions()); - } - EXPECT_TRUE(processor_listener_.restrictions() - .max_pixels_per_frame() - .has_value()); - EXPECT_TRUE( - processor_listener_.restrictions().max_frame_rate().has_value()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::BALANCED); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + // Adapting multiple times eventually resticts both frame rate and + // resolution. Exactly many times we need to adapt depends on + // BalancedDegradationSettings, VideoStreamAdapter and default input + // states. This test requires it to be achieved within 4 adaptations. + for (size_t i = 0; i < 4; ++i) { + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(i + 1, restrictions_listener_.restrictions_updated_count()); + RestrictSource(restrictions_listener_.restrictions()); + } + EXPECT_TRUE( + restrictions_listener_.restrictions().max_pixels_per_frame().has_value()); + EXPECT_TRUE( + restrictions_listener_.restrictions().max_frame_rate().has_value()); } TEST_F(ResourceAdaptationProcessorTest, AwaitingPreviousAdaptation) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - // If we don't restrict the source then adaptation will not happen again - // due to "awaiting previous adaptation". This prevents "double-adapt". - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); + // If we don't restrict the source then adaptation will not happen again + // due to "awaiting previous adaptation". This prevents "double-adapt". + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); } TEST_F(ResourceAdaptationProcessorTest, CannotAdaptUpWhenUnrestricted) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(0u, processor_listener_.restrictions_updated_count()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); } TEST_F(ResourceAdaptationProcessorTest, UnderuseTakesUsBackToUnrestricted) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - RestrictSource(processor_listener_.restrictions()); - resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(2u, processor_listener_.restrictions_updated_count()); - EXPECT_EQ(VideoSourceRestrictions(), - processor_listener_.restrictions()); - }, - RTC_FROM_HERE); -} - -TEST_F(ResourceAdaptationProcessorTest, ResourcesCanPreventAdaptingUp) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - // Adapt down so that we can adapt up. - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - RestrictSource(processor_listener_.restrictions()); - // Adapting up is prevented. - resource_->set_is_adaptation_up_allowed(false); - resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2u, restrictions_listener_.restrictions_updated_count()); + EXPECT_EQ(VideoSourceRestrictions(), restrictions_listener_.restrictions()); } TEST_F(ResourceAdaptationProcessorTest, ResourcesCanNotAdaptUpIfNeverAdaptedDown) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - RestrictSource(processor_listener_.restrictions()); - - // Other resource signals under-use - other_resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); + RestrictSource(restrictions_listener_.restrictions()); + + // Other resource signals under-use + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); } TEST_F(ResourceAdaptationProcessorTest, ResourcesCanNotAdaptUpIfNotAdaptedDownAfterReset) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - - processor_->ResetVideoSourceRestrictions(); - EXPECT_EQ(0, processor_listener_.adaptation_counters().Total()); - other_resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - - // resource_ did not overuse after we reset the restrictions, so adapt - // up should be disallowed. - resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(1, processor_listener_.adaptation_counters().Total()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1u, restrictions_listener_.restrictions_updated_count()); + + video_stream_adapter_->ClearRestrictions(); + EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total()); + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // resource_ did not overuse after we reset the restrictions, so adapt + // up should be disallowed. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); +} + +TEST_F(ResourceAdaptationProcessorTest, OnlyMostLimitedResourceMayAdaptUp) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // |other_resource_| is most limited, resource_ can't adapt up. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // |resource_| and |other_resource_| are now most limited, so both must + // signal underuse to adapt up. + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); } TEST_F(ResourceAdaptationProcessorTest, MultipleResourcesCanTriggerMultipleAdaptations) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - other_resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(2, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - other_resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(3, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - - resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(2, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - // Does not trigger adaptation since resource has no adaptations left. - resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(2, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - - other_resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(1, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - other_resource_->set_usage_state(ResourceUsageState::kUnderuse); - EXPECT_EQ(0, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - }, - RTC_FROM_HERE); + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // resource_ is not most limited so can't adapt from underuse. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + // resource_ is still not most limited so can't adapt from underuse. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // However it will be after overuse + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // Now other_resource_ can't adapt up as it is not most restricted. + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(3, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + // resource_ is limited at 3 adaptations and other_resource_ 2. + // With the most limited resource signalling underuse in the following + // order we get back to unrestricted video. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + // Both resource_ and other_resource_ are most limited. + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + // Again both are most limited. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total()); } -TEST_F(ResourceAdaptationProcessorTest, AdaptingTriggersOnAdaptationApplied) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, resource_->num_adaptations_applied()); - }, - RTC_FROM_HERE); +TEST_F(ResourceAdaptationProcessorTest, + MostLimitedResourceAdaptationWorksAfterChangingDegradataionPreference) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + // Adapt down until we can't anymore. + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + int last_total = restrictions_listener_.adaptation_counters().Total(); + + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_RESOLUTION); + // resource_ can not adapt up since we have never reduced FPS. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(last_total, restrictions_listener_.adaptation_counters().Total()); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(last_total + 1, + restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + // other_resource_ is most limited so should be able to adapt up. + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(last_total, restrictions_listener_.adaptation_counters().Total()); } -TEST_F(ResourceAdaptationProcessorTest, AdaptingClearsResourceUsageState) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(1u, processor_listener_.restrictions_updated_count()); - EXPECT_FALSE(resource_->usage_state().has_value()); - }, - RTC_FROM_HERE); +TEST_F(ResourceAdaptationProcessorTest, + AdaptsDownWhenOtherResourceIsAlwaysUnderused) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + // Does not trigger adapataion because there's no restriction. + EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total()); + + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + // Adapts down even if other resource asked for adapting up. + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + + RestrictSource(restrictions_listener_.restrictions()); + other_resource_->SetUsageState(ResourceUsageState::kUnderuse); + // Doesn't adapt up because adaptation is due to another resource. + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); } TEST_F(ResourceAdaptationProcessorTest, - FailingAdaptingAlsoClearsResourceUsageState) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference(DegradationPreference::DISABLED); - processor_->StartResourceAdaptation(); - resource_->set_usage_state(ResourceUsageState::kOveruse); - EXPECT_EQ(0u, processor_listener_.restrictions_updated_count()); - EXPECT_FALSE(resource_->usage_state().has_value()); - }, - RTC_FROM_HERE); + TriggerOveruseNotOnAdaptationTaskQueue) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + TaskQueueForTest resource_task_queue("ResourceTaskQueue"); + resource_task_queue.PostTask(ToQueuedTask( + [&]() { resource_->SetUsageState(ResourceUsageState::kOveruse); })); + + EXPECT_EQ_WAIT(1u, restrictions_listener_.restrictions_updated_count(), + kDefaultTimeoutMs); } TEST_F(ResourceAdaptationProcessorTest, - AdaptsDownWhenOtherResourceIsAlwaysUnderused) { - resource_adaptation_queue_.SendTask( - [this] { - processor_->SetDegradationPreference( - DegradationPreference::MAINTAIN_FRAMERATE); - processor_->StartResourceAdaptation(); - SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); - other_resource_->set_usage_state(ResourceUsageState::kUnderuse); - // Does not trigger adapataion because there's no restriction. - EXPECT_EQ(0, processor_listener_.adaptation_counters().Total()); - - RestrictSource(processor_listener_.restrictions()); - resource_->set_usage_state(ResourceUsageState::kOveruse); - // Adapts down even if other resource asked for adapting up. - EXPECT_EQ(1, processor_listener_.adaptation_counters().Total()); - - RestrictSource(processor_listener_.restrictions()); - other_resource_->set_usage_state(ResourceUsageState::kUnderuse); - // Doesn't adapt up because adaptation is due to another resource. - EXPECT_EQ(1, processor_listener_.adaptation_counters().Total()); - RestrictSource(processor_listener_.restrictions()); - }, - RTC_FROM_HERE); + DestroyProcessorWhileResourceListenerDelegateHasTaskInFlight) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + // Wait for |resource_| to signal oversue first so we know that the delegate + // has passed it on to the processor's task queue. + rtc::Event resource_event; + TaskQueueForTest resource_task_queue("ResourceTaskQueue"); + resource_task_queue.PostTask(ToQueuedTask([&]() { + resource_->SetUsageState(ResourceUsageState::kOveruse); + resource_event.Set(); + })); + + EXPECT_TRUE(resource_event.Wait(kDefaultTimeoutMs)); + // Now destroy the processor while handling the overuse is in flight. + DestroyProcessor(); + + // Because the processor was destroyed by the time the delegate's task ran, + // the overuse signal must not have been handled. + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); +} + +TEST_F(ResourceAdaptationProcessorTest, + ResourceOveruseIgnoredWhenSignalledDuringRemoval) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + rtc::Event overuse_event; + TaskQueueForTest resource_task_queue("ResourceTaskQueue"); + // Queues task for |resource_| overuse while |processor_| is still listening. + resource_task_queue.PostTask(ToQueuedTask([&]() { + resource_->SetUsageState(ResourceUsageState::kOveruse); + overuse_event.Set(); + })); + EXPECT_TRUE(overuse_event.Wait(kDefaultTimeoutMs)); + // Once we know the overuse task is queued, remove |resource_| so that + // |processor_| is not listening to it. + processor_->RemoveResource(resource_); + + // Runs the queued task so |processor_| gets signalled kOveruse from + // |resource_| even though |processor_| was not listening. + WaitUntilTaskQueueIdle(); + + // No restrictions should change even though |resource_| signaled |kOveruse|. + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingOnlyAdaptedResourceResetsAdaptation) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + RestrictSource(restrictions_listener_.restrictions()); + + processor_->RemoveResource(resource_); + EXPECT_EQ(0, restrictions_listener_.adaptation_counters().Total()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingMostLimitedResourceSetsAdaptationToNextLimitedLevel) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::BALANCED); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + VideoSourceRestrictions next_limited_restrictions = + restrictions_listener_.restrictions(); + VideoAdaptationCounters next_limited_counters = + restrictions_listener_.adaptation_counters(); + + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + + // Removing most limited |resource_| should revert us back to + processor_->RemoveResource(resource_); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions()); + EXPECT_EQ(next_limited_counters, + restrictions_listener_.adaptation_counters()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingMostLimitedResourceSetsAdaptationIfInputStateUnchanged) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + VideoSourceRestrictions next_limited_restrictions = + restrictions_listener_.restrictions(); + VideoAdaptationCounters next_limited_counters = + restrictions_listener_.adaptation_counters(); + + // Overuse twice and underuse once. After the underuse we don't restrict the + // source. Normally this would block future underuses. + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + + // Removing most limited |resource_| should revert us back to, even though we + // did not call RestrictSource() after |resource_| was overused. Normally + // adaptation for MAINTAIN_FRAMERATE would be blocked here but for removal we + // allow this anyways. + processor_->RemoveResource(resource_); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions()); + EXPECT_EQ(next_limited_counters, + restrictions_listener_.adaptation_counters()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingResourceNotMostLimitedHasNoEffectOnLimitations) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::BALANCED); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + VideoSourceRestrictions current_restrictions = + restrictions_listener_.restrictions(); + VideoAdaptationCounters current_counters = + restrictions_listener_.adaptation_counters(); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + + // Removing most limited |resource_| should revert us back to + processor_->RemoveResource(other_resource_); + EXPECT_EQ(current_restrictions, restrictions_listener_.restrictions()); + EXPECT_EQ(current_counters, restrictions_listener_.adaptation_counters()); + + // Delete |other_resource_| for cleanup. + other_resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingMostLimitedResourceAfterSwitchingDegradationPreferences) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + VideoSourceRestrictions next_limited_restrictions = + restrictions_listener_.restrictions(); + VideoAdaptationCounters next_limited_counters = + restrictions_listener_.adaptation_counters(); + + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_RESOLUTION); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + + // Revert to |other_resource_| when removing |resource_| even though the + // degradation preference was different when it was overused. + processor_->RemoveResource(resource_); + EXPECT_EQ(next_limited_counters, + restrictions_listener_.adaptation_counters()); + + // After switching back to MAINTAIN_FRAMERATE, the next most limited settings + // are restored. + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingMostLimitedResourceSetsNextLimitationsInDisabled) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + VideoSourceRestrictions next_limited_restrictions = + restrictions_listener_.restrictions(); + VideoAdaptationCounters next_limited_counters = + restrictions_listener_.adaptation_counters(); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(2, restrictions_listener_.adaptation_counters().Total()); + + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::DISABLED); + + // Revert to |other_resource_| when removing |resource_| even though the + // current degradataion preference is disabled. + processor_->RemoveResource(resource_); + + // After switching back to MAINTAIN_FRAMERATE, the next most limited settings + // are restored. + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + EXPECT_EQ(next_limited_restrictions, restrictions_listener_.restrictions()); + EXPECT_EQ(next_limited_counters, + restrictions_listener_.adaptation_counters()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovedResourceSignalsIgnoredByProcessor) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + processor_->RemoveResource(resource_); + resource_->SetUsageState(ResourceUsageState::kOveruse); + EXPECT_EQ(0u, restrictions_listener_.restrictions_updated_count()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + RemovingResourceWhenMultipleMostLimtedHasNoEffect) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + // Adapt |resource_| up and then down so that both resource's are most + // limited at 1 adaptation. + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + resource_->SetUsageState(ResourceUsageState::kUnderuse); + RestrictSource(restrictions_listener_.restrictions()); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + + // Removing |resource_| has no effect since both |resource_| and + // |other_resource_| are most limited. + processor_->RemoveResource(resource_); + EXPECT_EQ(1, restrictions_listener_.adaptation_counters().Total()); + + // Delete |resource_| for cleanup. + resource_ = nullptr; +} + +TEST_F(ResourceAdaptationProcessorTest, + ResourceOverusedAtLimitReachedWillShareMostLimited) { + video_stream_adapter_->SetDegradationPreference( + DegradationPreference::MAINTAIN_FRAMERATE); + SetInputStates(true, kDefaultFrameRate, kDefaultFrameSize); + + bool has_reached_min_pixels = false; + ON_CALL(frame_rate_provider_, OnMinPixelLimitReached()) + .WillByDefault(testing::Assign(&has_reached_min_pixels, true)); + + // Adapt 10 times, which should make us hit the limit. + for (int i = 0; i < 10; ++i) { + resource_->SetUsageState(ResourceUsageState::kOveruse); + RestrictSource(restrictions_listener_.restrictions()); + } + EXPECT_TRUE(has_reached_min_pixels); + auto last_update_count = restrictions_listener_.restrictions_updated_count(); + other_resource_->SetUsageState(ResourceUsageState::kOveruse); + // Now both |resource_| and |other_resource_| are most limited. Underuse of + // |resource_| will not adapt up. + resource_->SetUsageState(ResourceUsageState::kUnderuse); + EXPECT_EQ(last_update_count, + restrictions_listener_.restrictions_updated_count()); } } // namespace webrtc diff --git a/call/adaptation/resource_unittest.cc b/call/adaptation/resource_unittest.cc index 9436a02a64..a2291dfdce 100644 --- a/call/adaptation/resource_unittest.cc +++ b/call/adaptation/resource_unittest.cc @@ -8,14 +8,13 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "call/adaptation/resource.h" +#include "api/adaptation/resource.h" #include #include "api/scoped_refptr.h" #include "call/adaptation/test/fake_resource.h" -#include "rtc_base/event.h" -#include "rtc_base/task_queue_for_test.h" +#include "call/adaptation/test/mock_resource_listener.h" #include "test/gmock.h" #include "test/gtest.h" @@ -24,57 +23,33 @@ namespace webrtc { using ::testing::_; using ::testing::StrictMock; -class MockResourceListener : public ResourceListener { - public: - MOCK_METHOD(void, - OnResourceUsageStateMeasured, - (rtc::scoped_refptr resource), - (override)); -}; - class ResourceTest : public ::testing::Test { public: - ResourceTest() - : resource_adaptation_queue_("ResourceAdaptationQueue"), - encoder_queue_("EncoderQueue"), - fake_resource_(new FakeResource("FakeResource")) { - fake_resource_->Initialize(&encoder_queue_, &resource_adaptation_queue_); - } + ResourceTest() : fake_resource_(FakeResource::Create("FakeResource")) {} protected: - const std::unique_ptr task_queue_factory_; - TaskQueueForTest resource_adaptation_queue_; - TaskQueueForTest encoder_queue_; rtc::scoped_refptr fake_resource_; }; TEST_F(ResourceTest, RegisteringListenerReceivesCallbacks) { - resource_adaptation_queue_.SendTask( - [this] { - StrictMock resource_listener; - fake_resource_->SetResourceListener(&resource_listener); - EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_)) - .Times(1) - .WillOnce([](rtc::scoped_refptr resource) { - EXPECT_EQ(ResourceUsageState::kOveruse, resource->usage_state()); - }); - fake_resource_->set_usage_state(ResourceUsageState::kOveruse); - fake_resource_->SetResourceListener(nullptr); - }, - RTC_FROM_HERE); + StrictMock resource_listener; + fake_resource_->SetResourceListener(&resource_listener); + EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(ResourceUsageState::kOveruse, usage_state); + }); + fake_resource_->SetUsageState(ResourceUsageState::kOveruse); + fake_resource_->SetResourceListener(nullptr); } TEST_F(ResourceTest, UnregisteringListenerStopsCallbacks) { - resource_adaptation_queue_.SendTask( - [this] { - StrictMock resource_listener; - fake_resource_->SetResourceListener(&resource_listener); - fake_resource_->SetResourceListener(nullptr); - EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_)) - .Times(0); - fake_resource_->set_usage_state(ResourceUsageState::kOveruse); - }, - RTC_FROM_HERE); + StrictMock resource_listener; + fake_resource_->SetResourceListener(&resource_listener); + fake_resource_->SetResourceListener(nullptr); + EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)).Times(0); + fake_resource_->SetUsageState(ResourceUsageState::kOveruse); } } // namespace webrtc diff --git a/call/adaptation/test/fake_adaptation_constraint.cc b/call/adaptation/test/fake_adaptation_constraint.cc new file mode 100644 index 0000000000..18b8e8b696 --- /dev/null +++ b/call/adaptation/test/fake_adaptation_constraint.cc @@ -0,0 +1,38 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "call/adaptation/test/fake_adaptation_constraint.h" + +#include + +namespace webrtc { + +FakeAdaptationConstraint::FakeAdaptationConstraint(std::string name) + : name_(std::move(name)), is_adaptation_up_allowed_(true) {} + +FakeAdaptationConstraint::~FakeAdaptationConstraint() = default; + +void FakeAdaptationConstraint::set_is_adaptation_up_allowed( + bool is_adaptation_up_allowed) { + is_adaptation_up_allowed_ = is_adaptation_up_allowed; +} + +std::string FakeAdaptationConstraint::Name() const { + return name_; +} + +bool FakeAdaptationConstraint::IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const { + return is_adaptation_up_allowed_; +} + +} // namespace webrtc diff --git a/call/adaptation/test/fake_adaptation_constraint.h b/call/adaptation/test/fake_adaptation_constraint.h new file mode 100644 index 0000000000..021e46a501 --- /dev/null +++ b/call/adaptation/test/fake_adaptation_constraint.h @@ -0,0 +1,41 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_ADAPTATION_TEST_FAKE_ADAPTATION_CONSTRAINT_H_ +#define CALL_ADAPTATION_TEST_FAKE_ADAPTATION_CONSTRAINT_H_ + +#include + +#include "call/adaptation/adaptation_constraint.h" + +namespace webrtc { + +class FakeAdaptationConstraint : public AdaptationConstraint { + public: + explicit FakeAdaptationConstraint(std::string name); + ~FakeAdaptationConstraint() override; + + void set_is_adaptation_up_allowed(bool is_adaptation_up_allowed); + + // AdaptationConstraint implementation. + std::string Name() const override; + bool IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const override; + + private: + const std::string name_; + bool is_adaptation_up_allowed_; +}; + +} // namespace webrtc + +#endif // CALL_ADAPTATION_TEST_FAKE_ADAPTATION_CONSTRAINT_H_ diff --git a/call/adaptation/test/fake_resource.cc b/call/adaptation/test/fake_resource.cc index 4c0a129d04..d125468cb6 100644 --- a/call/adaptation/test/fake_resource.cc +++ b/call/adaptation/test/fake_resource.cc @@ -10,44 +10,35 @@ #include "call/adaptation/test/fake_resource.h" +#include #include +#include "rtc_base/ref_counted_object.h" + namespace webrtc { +// static +rtc::scoped_refptr FakeResource::Create(std::string name) { + return rtc::make_ref_counted(name); +} + FakeResource::FakeResource(std::string name) - : rtc::RefCountedObject(), - name_(std::move(name)), - is_adaptation_up_allowed_(true), - num_adaptations_applied_(0) {} + : Resource(), name_(std::move(name)), listener_(nullptr) {} FakeResource::~FakeResource() {} -void FakeResource::set_usage_state(ResourceUsageState usage_state) { - OnResourceUsageStateMeasured(usage_state); -} - -void FakeResource::set_is_adaptation_up_allowed(bool is_adaptation_up_allowed) { - is_adaptation_up_allowed_ = is_adaptation_up_allowed; -} - -size_t FakeResource::num_adaptations_applied() const { - return num_adaptations_applied_; +void FakeResource::SetUsageState(ResourceUsageState usage_state) { + if (listener_) { + listener_->OnResourceUsageStateMeasured(this, usage_state); + } } -bool FakeResource::IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const { - return is_adaptation_up_allowed_; +std::string FakeResource::Name() const { + return name_; } -void FakeResource::OnAdaptationApplied( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) { - ++num_adaptations_applied_; +void FakeResource::SetResourceListener(ResourceListener* listener) { + listener_ = listener; } } // namespace webrtc diff --git a/call/adaptation/test/fake_resource.h b/call/adaptation/test/fake_resource.h index beaca54614..e88d97db7a 100644 --- a/call/adaptation/test/fake_resource.h +++ b/call/adaptation/test/fake_resource.h @@ -12,39 +12,31 @@ #define CALL_ADAPTATION_TEST_FAKE_RESOURCE_H_ #include +#include -#include "call/adaptation/resource.h" -#include "rtc_base/ref_counted_object.h" +#include "absl/types/optional.h" +#include "api/adaptation/resource.h" +#include "api/scoped_refptr.h" namespace webrtc { // Fake resource used for testing. -class FakeResource : public rtc::RefCountedObject { +class FakeResource : public Resource { public: + static rtc::scoped_refptr Create(std::string name); + explicit FakeResource(std::string name); ~FakeResource() override; - void set_usage_state(ResourceUsageState usage_state); - void set_is_adaptation_up_allowed(bool is_adaptation_up_allowed); - size_t num_adaptations_applied() const; + void SetUsageState(ResourceUsageState usage_state); // Resource implementation. - std::string name() const override { return name_; } - bool IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const override; - void OnAdaptationApplied( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) override; + std::string Name() const override; + void SetResourceListener(ResourceListener* listener) override; private: const std::string name_; - bool is_adaptation_up_allowed_; - size_t num_adaptations_applied_; + ResourceListener* listener_; }; } // namespace webrtc diff --git a/call/adaptation/test/fake_video_stream_input_state_provider.cc b/call/adaptation/test/fake_video_stream_input_state_provider.cc new file mode 100644 index 0000000000..ce92dfb204 --- /dev/null +++ b/call/adaptation/test/fake_video_stream_input_state_provider.cc @@ -0,0 +1,35 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "call/adaptation/test/fake_video_stream_input_state_provider.h" + +namespace webrtc { + +FakeVideoStreamInputStateProvider::FakeVideoStreamInputStateProvider() + : VideoStreamInputStateProvider(nullptr) {} + +FakeVideoStreamInputStateProvider::~FakeVideoStreamInputStateProvider() = + default; + +void FakeVideoStreamInputStateProvider::SetInputState( + int input_pixels, + int input_fps, + int min_pixels_per_frame) { + fake_input_state_.set_has_input(true); + fake_input_state_.set_frame_size_pixels(input_pixels); + fake_input_state_.set_frames_per_second(input_fps); + fake_input_state_.set_min_pixels_per_frame(min_pixels_per_frame); +} + +VideoStreamInputState FakeVideoStreamInputStateProvider::InputState() { + return fake_input_state_; +} + +} // namespace webrtc diff --git a/call/adaptation/test/fake_video_stream_input_state_provider.h b/call/adaptation/test/fake_video_stream_input_state_provider.h new file mode 100644 index 0000000000..93f7dba7e6 --- /dev/null +++ b/call/adaptation/test/fake_video_stream_input_state_provider.h @@ -0,0 +1,32 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_ADAPTATION_TEST_FAKE_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_ +#define CALL_ADAPTATION_TEST_FAKE_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_ + +#include "call/adaptation/video_stream_input_state_provider.h" + +namespace webrtc { + +class FakeVideoStreamInputStateProvider : public VideoStreamInputStateProvider { + public: + FakeVideoStreamInputStateProvider(); + virtual ~FakeVideoStreamInputStateProvider(); + + void SetInputState(int input_pixels, int input_fps, int min_pixels_per_frame); + VideoStreamInputState InputState() override; + + private: + VideoStreamInputState fake_input_state_; +}; + +} // namespace webrtc + +#endif // CALL_ADAPTATION_TEST_FAKE_VIDEO_STREAM_INPUT_STATE_PROVIDER_H_ diff --git a/call/adaptation/test/mock_resource_listener.h b/call/adaptation/test/mock_resource_listener.h new file mode 100644 index 0000000000..f0f998f2e3 --- /dev/null +++ b/call/adaptation/test/mock_resource_listener.h @@ -0,0 +1,31 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_ADAPTATION_TEST_MOCK_RESOURCE_LISTENER_H_ +#define CALL_ADAPTATION_TEST_MOCK_RESOURCE_LISTENER_H_ + +#include "api/adaptation/resource.h" + +#include "test/gmock.h" + +namespace webrtc { + +class MockResourceListener : public ResourceListener { + public: + MOCK_METHOD(void, + OnResourceUsageStateMeasured, + (rtc::scoped_refptr resource, + ResourceUsageState usage_state), + (override)); +}; + +} // namespace webrtc + +#endif // CALL_ADAPTATION_TEST_MOCK_RESOURCE_LISTENER_H_ diff --git a/call/adaptation/video_source_restrictions.cc b/call/adaptation/video_source_restrictions.cc index 6fbdcb42a6..e9d6c26137 100644 --- a/call/adaptation/video_source_restrictions.cc +++ b/call/adaptation/video_source_restrictions.cc @@ -13,6 +13,7 @@ #include #include "rtc_base/checks.h" +#include "rtc_base/strings/string_builder.h" namespace webrtc { @@ -36,6 +37,19 @@ VideoSourceRestrictions::VideoSourceRestrictions( RTC_DCHECK(!max_frame_rate_.has_value() || max_frame_rate_.value() > 0.0); } +std::string VideoSourceRestrictions::ToString() const { + rtc::StringBuilder ss; + ss << "{"; + if (max_frame_rate_) + ss << " max_fps=" << max_frame_rate_.value(); + if (max_pixels_per_frame_) + ss << " max_pixels_per_frame=" << max_pixels_per_frame_.value(); + if (target_pixels_per_frame_) + ss << " target_pixels_per_frame=" << target_pixels_per_frame_.value(); + ss << " }"; + return ss.Release(); +} + const absl::optional& VideoSourceRestrictions::max_pixels_per_frame() const { return max_pixels_per_frame_; diff --git a/call/adaptation/video_source_restrictions.h b/call/adaptation/video_source_restrictions.h index 506bae6133..7f79a48e5d 100644 --- a/call/adaptation/video_source_restrictions.h +++ b/call/adaptation/video_source_restrictions.h @@ -11,6 +11,7 @@ #ifndef CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_ #define CALL_ADAPTATION_VIDEO_SOURCE_RESTRICTIONS_H_ +#include #include #include "absl/types/optional.h" @@ -38,6 +39,8 @@ class VideoSourceRestrictions { return !(*this == rhs); } + std::string ToString() const; + // The source must produce a resolution less than or equal to // max_pixels_per_frame(). const absl::optional& max_pixels_per_frame() const; diff --git a/call/adaptation/video_stream_adapter.cc b/call/adaptation/video_stream_adapter.cc index 4ebe00fb0c..64e1a77786 100644 --- a/call/adaptation/video_stream_adapter.cc +++ b/call/adaptation/video_stream_adapter.cc @@ -15,8 +15,14 @@ #include #include "absl/types/optional.h" +#include "absl/types/variant.h" +#include "api/sequence_checker.h" +#include "api/video/video_adaptation_counters.h" #include "api/video/video_adaptation_reason.h" #include "api/video_codecs/video_encoder.h" +#include "call/adaptation/video_source_restrictions.h" +#include "call/adaptation/video_stream_input_state.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" @@ -27,13 +33,6 @@ const int kMinFrameRateFps = 2; namespace { -// Generate suggested higher and lower frame rates and resolutions, to be -// applied to the VideoSourceRestrictor. These are used in "maintain-resolution" -// and "maintain-framerate". The "balanced" degradation preference also makes -// use of BalancedDegradationPreference when generating suggestions. The -// VideoSourceRestrictor decidedes whether or not a proposed adaptation is -// valid. - // For frame rate, the steps we take are 2/3 (down) and 3/2 (up). int GetLowerFrameRateThan(int fps) { RTC_DCHECK(fps != std::numeric_limits::max()); @@ -46,21 +45,72 @@ int GetHigherFrameRateThan(int fps) { : std::numeric_limits::max(); } -// For resolution, the steps we take are 3/5 (down) and 5/3 (up). -// Notice the asymmetry of which restriction property is set depending on if -// we are adapting up or down: -// - VideoSourceRestrictor::DecreaseResolution() sets the max_pixels_per_frame() -// to the desired target and target_pixels_per_frame() to null. -// - VideoSourceRestrictor::IncreaseResolutionTo() sets the -// target_pixels_per_frame() to the desired target, and max_pixels_per_frame() -// is set according to VideoSourceRestrictor::GetIncreasedMaxPixelsWanted(). -int GetLowerResolutionThan(int pixel_count) { - RTC_DCHECK(pixel_count != std::numeric_limits::max()); - return (pixel_count * 3) / 5; +int GetIncreasedMaxPixelsWanted(int target_pixels) { + if (target_pixels == std::numeric_limits::max()) + return std::numeric_limits::max(); + // When we decrease resolution, we go down to at most 3/5 of current pixels. + // Thus to increase resolution, we need 3/5 to get back to where we started. + // When going up, the desired max_pixels_per_frame() has to be significantly + // higher than the target because the source's native resolutions might not + // match the target. We pick 12/5 of the target. + // + // (This value was historically 4 times the old target, which is (3/5)*4 of + // the new target - or 12/5 - assuming the target is adjusted according to + // the above steps.) + RTC_DCHECK(target_pixels != std::numeric_limits::max()); + return (target_pixels * 12) / 5; +} + +bool CanDecreaseResolutionTo(int target_pixels, + int target_pixels_min, + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions) { + int max_pixels_per_frame = + rtc::dchecked_cast(restrictions.max_pixels_per_frame().value_or( + std::numeric_limits::max())); + return target_pixels < max_pixels_per_frame && + target_pixels_min >= input_state.min_pixels_per_frame(); +} + +bool CanIncreaseResolutionTo(int target_pixels, + const VideoSourceRestrictions& restrictions) { + int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels); + int max_pixels_per_frame = + rtc::dchecked_cast(restrictions.max_pixels_per_frame().value_or( + std::numeric_limits::max())); + return max_pixels_wanted > max_pixels_per_frame; +} + +bool CanDecreaseFrameRateTo(int max_frame_rate, + const VideoSourceRestrictions& restrictions) { + const int fps_wanted = std::max(kMinFrameRateFps, max_frame_rate); + return fps_wanted < + rtc::dchecked_cast(restrictions.max_frame_rate().value_or( + std::numeric_limits::max())); +} + +bool CanIncreaseFrameRateTo(int max_frame_rate, + const VideoSourceRestrictions& restrictions) { + return max_frame_rate > + rtc::dchecked_cast(restrictions.max_frame_rate().value_or( + std::numeric_limits::max())); +} + +bool MinPixelLimitReached(const VideoStreamInputState& input_state) { + if (input_state.single_active_stream_pixels().has_value()) { + return GetLowerResolutionThan( + input_state.single_active_stream_pixels().value()) < + input_state.min_pixels_per_frame(); + } + return input_state.frame_size_pixels().has_value() && + GetLowerResolutionThan(input_state.frame_size_pixels().value()) < + input_state.min_pixels_per_frame(); } } // namespace +VideoSourceRestrictionsListener::~VideoSourceRestrictionsListener() = default; + VideoSourceRestrictions FilterRestrictionsByDegradationPreference( VideoSourceRestrictions source_restrictions, DegradationPreference degradation_preference) { @@ -82,26 +132,17 @@ VideoSourceRestrictions FilterRestrictionsByDegradationPreference( return source_restrictions; } -VideoAdaptationCounters FilterVideoAdaptationCountersByDegradationPreference( - VideoAdaptationCounters counters, - DegradationPreference degradation_preference) { - switch (degradation_preference) { - case DegradationPreference::BALANCED: - break; - case DegradationPreference::MAINTAIN_FRAMERATE: - counters.fps_adaptations = 0; - break; - case DegradationPreference::MAINTAIN_RESOLUTION: - counters.resolution_adaptations = 0; - break; - case DegradationPreference::DISABLED: - counters.resolution_adaptations = 0; - counters.fps_adaptations = 0; - break; - default: - RTC_NOTREACHED(); - } - return counters; +// For resolution, the steps we take are 3/5 (down) and 5/3 (up). +// Notice the asymmetry of which restriction property is set depending on if +// we are adapting up or down: +// - VideoSourceRestrictor::DecreaseResolution() sets the max_pixels_per_frame() +// to the desired target and target_pixels_per_frame() to null. +// - VideoSourceRestrictor::IncreaseResolutionTo() sets the +// target_pixels_per_frame() to the desired target, and max_pixels_per_frame() +// is set according to VideoSourceRestrictor::GetIncreasedMaxPixelsWanted(). +int GetLowerResolutionThan(int pixel_count) { + RTC_DCHECK(pixel_count != std::numeric_limits::max()); + return (pixel_count * 3) / 5; } // TODO(hbos): Use absl::optional<> instead? @@ -111,38 +152,37 @@ int GetHigherResolutionThan(int pixel_count) { : std::numeric_limits::max(); } -Adaptation::Step::Step(StepType type, int target) - : type(type), target(target) {} - -Adaptation::Adaptation(int validation_id, Step step) - : validation_id_(validation_id), - status_(Status::kValid), - step_(std::move(step)), - min_pixel_limit_reached_(false) {} +// static +const char* Adaptation::StatusToString(Adaptation::Status status) { + switch (status) { + case Adaptation::Status::kValid: + return "kValid"; + case Adaptation::Status::kLimitReached: + return "kLimitReached"; + case Adaptation::Status::kAwaitingPreviousAdaptation: + return "kAwaitingPreviousAdaptation"; + case Status::kInsufficientInput: + return "kInsufficientInput"; + case Status::kAdaptationDisabled: + return "kAdaptationDisabled"; + case Status::kRejectedByConstraint: + return "kRejectedByConstraint"; + } + RTC_CHECK_NOTREACHED(); +} Adaptation::Adaptation(int validation_id, - Step step, - bool min_pixel_limit_reached) + VideoSourceRestrictions restrictions, + VideoAdaptationCounters counters, + VideoStreamInputState input_state) : validation_id_(validation_id), status_(Status::kValid), - step_(std::move(step)), - min_pixel_limit_reached_(min_pixel_limit_reached) {} + input_state_(std::move(input_state)), + restrictions_(std::move(restrictions)), + counters_(std::move(counters)) {} Adaptation::Adaptation(int validation_id, Status invalid_status) - : validation_id_(validation_id), - status_(invalid_status), - step_(absl::nullopt), - min_pixel_limit_reached_(false) { - RTC_DCHECK_NE(status_, Status::kValid); -} - -Adaptation::Adaptation(int validation_id, - Status invalid_status, - bool min_pixel_limit_reached) - : validation_id_(validation_id), - status_(invalid_status), - step_(absl::nullopt), - min_pixel_limit_reached_(min_pixel_limit_reached) { + : validation_id_(validation_id), status_(invalid_status) { RTC_DCHECK_NE(status_, Status::kValid); } @@ -150,380 +190,552 @@ Adaptation::Status Adaptation::status() const { return status_; } -bool Adaptation::min_pixel_limit_reached() const { - return min_pixel_limit_reached_; +const VideoStreamInputState& Adaptation::input_state() const { + return input_state_; } -const Adaptation::Step& Adaptation::step() const { - RTC_DCHECK_EQ(status_, Status::kValid); - return step_.value(); +const VideoSourceRestrictions& Adaptation::restrictions() const { + return restrictions_; } -// VideoSourceRestrictor is responsible for keeping track of current -// VideoSourceRestrictions. -class VideoStreamAdapter::VideoSourceRestrictor { - public: - VideoSourceRestrictor() {} - - VideoSourceRestrictions source_restrictions() const { - return source_restrictions_; - } - const VideoAdaptationCounters& adaptation_counters() const { - return adaptations_; - } - void ClearRestrictions() { - source_restrictions_ = VideoSourceRestrictions(); - adaptations_ = VideoAdaptationCounters(); - } - - void set_min_pixels_per_frame(int min_pixels_per_frame) { - min_pixels_per_frame_ = min_pixels_per_frame; - } - - int min_pixels_per_frame() const { return min_pixels_per_frame_; } - - bool CanDecreaseResolutionTo(int target_pixels) { - int max_pixels_per_frame = rtc::dchecked_cast( - source_restrictions_.max_pixels_per_frame().value_or( - std::numeric_limits::max())); - return target_pixels < max_pixels_per_frame && - target_pixels >= min_pixels_per_frame_; - } - - bool CanIncreaseResolutionTo(int target_pixels) { - int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels); - int max_pixels_per_frame = rtc::dchecked_cast( - source_restrictions_.max_pixels_per_frame().value_or( - std::numeric_limits::max())); - return max_pixels_wanted > max_pixels_per_frame; - } - - bool CanDecreaseFrameRateTo(int max_frame_rate) { - const int fps_wanted = std::max(kMinFrameRateFps, max_frame_rate); - return fps_wanted < rtc::dchecked_cast( - source_restrictions_.max_frame_rate().value_or( - std::numeric_limits::max())); - } - - bool CanIncreaseFrameRateTo(int max_frame_rate) { - return max_frame_rate > rtc::dchecked_cast( - source_restrictions_.max_frame_rate().value_or( - std::numeric_limits::max())); - } - - void ApplyAdaptationStep(const Adaptation::Step& step, - DegradationPreference degradation_preference) { - switch (step.type) { - case Adaptation::StepType::kIncreaseResolution: - IncreaseResolutionTo(step.target); - break; - case Adaptation::StepType::kDecreaseResolution: - DecreaseResolutionTo(step.target); - break; - case Adaptation::StepType::kIncreaseFrameRate: - IncreaseFrameRateTo(step.target); - // TODO(https://crbug.com/webrtc/11222): Don't adapt in two steps. - // GetAdaptationUp() should tell us the correct value, but BALANCED - // logic in DecrementFramerate() makes it hard to predict whether this - // will be the last step. Remove the dependency on - // adaptation_counters(). - if (degradation_preference == DegradationPreference::BALANCED && - adaptation_counters().fps_adaptations == 0 && - step.target != std::numeric_limits::max()) { - RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting."; - IncreaseFrameRateTo(std::numeric_limits::max()); - } - break; - case Adaptation::StepType::kDecreaseFrameRate: - DecreaseFrameRateTo(step.target); - break; - } - } - - private: - static int GetIncreasedMaxPixelsWanted(int target_pixels) { - if (target_pixels == std::numeric_limits::max()) - return std::numeric_limits::max(); - // When we decrease resolution, we go down to at most 3/5 of current pixels. - // Thus to increase resolution, we need 3/5 to get back to where we started. - // When going up, the desired max_pixels_per_frame() has to be significantly - // higher than the target because the source's native resolutions might not - // match the target. We pick 12/5 of the target. - // - // (This value was historically 4 times the old target, which is (3/5)*4 of - // the new target - or 12/5 - assuming the target is adjusted according to - // the above steps.) - RTC_DCHECK(target_pixels != std::numeric_limits::max()); - return (target_pixels * 12) / 5; - } - - void DecreaseResolutionTo(int target_pixels) { - RTC_DCHECK(CanDecreaseResolutionTo(target_pixels)); - RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: " - << target_pixels; - source_restrictions_.set_max_pixels_per_frame( - target_pixels != std::numeric_limits::max() - ? absl::optional(target_pixels) - : absl::nullopt); - source_restrictions_.set_target_pixels_per_frame(absl::nullopt); - ++adaptations_.resolution_adaptations; - } - - void IncreaseResolutionTo(int target_pixels) { - RTC_DCHECK(CanIncreaseResolutionTo(target_pixels)); - int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels); - RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: " - << max_pixels_wanted; - source_restrictions_.set_max_pixels_per_frame( - max_pixels_wanted != std::numeric_limits::max() - ? absl::optional(max_pixels_wanted) - : absl::nullopt); - source_restrictions_.set_target_pixels_per_frame( - max_pixels_wanted != std::numeric_limits::max() - ? absl::optional(target_pixels) - : absl::nullopt); - --adaptations_.resolution_adaptations; - RTC_DCHECK_GE(adaptations_.resolution_adaptations, 0); - } - - void DecreaseFrameRateTo(int max_frame_rate) { - RTC_DCHECK(CanDecreaseFrameRateTo(max_frame_rate)); - max_frame_rate = std::max(kMinFrameRateFps, max_frame_rate); - RTC_LOG(LS_INFO) << "Scaling down framerate: " << max_frame_rate; - source_restrictions_.set_max_frame_rate( - max_frame_rate != std::numeric_limits::max() - ? absl::optional(max_frame_rate) - : absl::nullopt); - ++adaptations_.fps_adaptations; - } - - void IncreaseFrameRateTo(int max_frame_rate) { - RTC_DCHECK(CanIncreaseFrameRateTo(max_frame_rate)); - RTC_LOG(LS_INFO) << "Scaling up framerate: " << max_frame_rate; - source_restrictions_.set_max_frame_rate( - max_frame_rate != std::numeric_limits::max() - ? absl::optional(max_frame_rate) - : absl::nullopt); - --adaptations_.fps_adaptations; - RTC_DCHECK_GE(adaptations_.fps_adaptations, 0); - } - - // Needed by CanDecreaseResolutionTo(). - int min_pixels_per_frame_ = 0; - // Current State. - VideoSourceRestrictions source_restrictions_; - VideoAdaptationCounters adaptations_; -}; +const VideoAdaptationCounters& Adaptation::counters() const { + return counters_; +} -VideoStreamAdapter::VideoStreamAdapter() - : source_restrictor_(std::make_unique()), - balanced_settings_(), +VideoStreamAdapter::VideoStreamAdapter( + VideoStreamInputStateProvider* input_state_provider, + VideoStreamEncoderObserver* encoder_stats_observer) + : input_state_provider_(input_state_provider), + encoder_stats_observer_(encoder_stats_observer), adaptation_validation_id_(0), degradation_preference_(DegradationPreference::DISABLED), - input_state_(), - last_adaptation_request_(absl::nullopt) {} + awaiting_frame_size_change_(absl::nullopt) { + sequence_checker_.Detach(); + RTC_DCHECK(input_state_provider_); + RTC_DCHECK(encoder_stats_observer_); +} -VideoStreamAdapter::~VideoStreamAdapter() {} +VideoStreamAdapter::~VideoStreamAdapter() { + RTC_DCHECK(adaptation_constraints_.empty()) + << "There are constaint(s) attached to a VideoStreamAdapter being " + "destroyed."; +} VideoSourceRestrictions VideoStreamAdapter::source_restrictions() const { - return source_restrictor_->source_restrictions(); + RTC_DCHECK_RUN_ON(&sequence_checker_); + return current_restrictions_.restrictions; } const VideoAdaptationCounters& VideoStreamAdapter::adaptation_counters() const { - return source_restrictor_->adaptation_counters(); + RTC_DCHECK_RUN_ON(&sequence_checker_); + return current_restrictions_.counters; } void VideoStreamAdapter::ClearRestrictions() { + RTC_DCHECK_RUN_ON(&sequence_checker_); // Invalidate any previously returned Adaptation. + RTC_LOG(INFO) << "Resetting restrictions"; ++adaptation_validation_id_; - source_restrictor_->ClearRestrictions(); - last_adaptation_request_.reset(); + current_restrictions_ = {VideoSourceRestrictions(), + VideoAdaptationCounters()}; + awaiting_frame_size_change_ = absl::nullopt; + BroadcastVideoRestrictionsUpdate(input_state_provider_->InputState(), + nullptr); +} + +void VideoStreamAdapter::AddRestrictionsListener( + VideoSourceRestrictionsListener* restrictions_listener) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(std::find(restrictions_listeners_.begin(), + restrictions_listeners_.end(), + restrictions_listener) == restrictions_listeners_.end()); + restrictions_listeners_.push_back(restrictions_listener); +} + +void VideoStreamAdapter::RemoveRestrictionsListener( + VideoSourceRestrictionsListener* restrictions_listener) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = std::find(restrictions_listeners_.begin(), + restrictions_listeners_.end(), restrictions_listener); + RTC_DCHECK(it != restrictions_listeners_.end()); + restrictions_listeners_.erase(it); +} + +void VideoStreamAdapter::AddAdaptationConstraint( + AdaptationConstraint* adaptation_constraint) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(std::find(adaptation_constraints_.begin(), + adaptation_constraints_.end(), + adaptation_constraint) == adaptation_constraints_.end()); + adaptation_constraints_.push_back(adaptation_constraint); +} + +void VideoStreamAdapter::RemoveAdaptationConstraint( + AdaptationConstraint* adaptation_constraint) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = std::find(adaptation_constraints_.begin(), + adaptation_constraints_.end(), adaptation_constraint); + RTC_DCHECK(it != adaptation_constraints_.end()); + adaptation_constraints_.erase(it); } void VideoStreamAdapter::SetDegradationPreference( DegradationPreference degradation_preference) { + RTC_DCHECK_RUN_ON(&sequence_checker_); if (degradation_preference_ == degradation_preference) return; // Invalidate any previously returned Adaptation. ++adaptation_validation_id_; - if (degradation_preference == DegradationPreference::BALANCED || - degradation_preference_ == DegradationPreference::BALANCED) { + bool balanced_switch = + degradation_preference == DegradationPreference::BALANCED || + degradation_preference_ == DegradationPreference::BALANCED; + degradation_preference_ = degradation_preference; + if (balanced_switch) { + // ClearRestrictions() calls BroadcastVideoRestrictionsUpdate(nullptr). ClearRestrictions(); + } else { + BroadcastVideoRestrictionsUpdate(input_state_provider_->InputState(), + nullptr); } - degradation_preference_ = degradation_preference; } -void VideoStreamAdapter::SetInput(VideoStreamInputState input_state) { - // Invalidate any previously returned Adaptation. +struct VideoStreamAdapter::RestrictionsOrStateVisitor { + Adaptation operator()(const RestrictionsWithCounters& r) const { + return Adaptation(adaptation_validation_id, r.restrictions, r.counters, + input_state); + } + Adaptation operator()(const Adaptation::Status& status) const { + RTC_DCHECK_NE(status, Adaptation::Status::kValid); + return Adaptation(adaptation_validation_id, status); + } + + const int adaptation_validation_id; + const VideoStreamInputState& input_state; +}; + +Adaptation VideoStreamAdapter::RestrictionsOrStateToAdaptation( + VideoStreamAdapter::RestrictionsOrState step_or_state, + const VideoStreamInputState& input_state) const { + RTC_DCHECK(!step_or_state.valueless_by_exception()); + return absl::visit( + RestrictionsOrStateVisitor{adaptation_validation_id_, input_state}, + step_or_state); +} + +Adaptation VideoStreamAdapter::GetAdaptationUp( + const VideoStreamInputState& input_state) const { + RestrictionsOrState step = GetAdaptationUpStep(input_state); + // If an adaptation proposed, check with the constraints that it is ok. + if (absl::holds_alternative(step)) { + RestrictionsWithCounters restrictions = + absl::get(step); + for (const auto* constraint : adaptation_constraints_) { + if (!constraint->IsAdaptationUpAllowed(input_state, + current_restrictions_.restrictions, + restrictions.restrictions)) { + RTC_LOG(INFO) << "Not adapting up because constraint \"" + << constraint->Name() << "\" disallowed it"; + step = Adaptation::Status::kRejectedByConstraint; + } + } + } + return RestrictionsOrStateToAdaptation(step, input_state); +} + +Adaptation VideoStreamAdapter::GetAdaptationUp() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + VideoStreamInputState input_state = input_state_provider_->InputState(); ++adaptation_validation_id_; - input_state_ = input_state; - source_restrictor_->set_min_pixels_per_frame( - input_state_.min_pixels_per_frame()); + Adaptation adaptation = GetAdaptationUp(input_state); + return adaptation; } -Adaptation VideoStreamAdapter::GetAdaptationUp() const { - RTC_DCHECK_NE(degradation_preference_, DegradationPreference::DISABLED); - RTC_DCHECK(input_state_.HasInputFrameSizeAndFramesPerSecond()); +VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::GetAdaptationUpStep( + const VideoStreamInputState& input_state) const { + if (!HasSufficientInputForAdaptation(input_state)) { + return Adaptation::Status::kInsufficientInput; + } // Don't adapt if we're awaiting a previous adaptation to have an effect. - bool last_request_increased_resolution = - last_adaptation_request_ && last_adaptation_request_->step_type_ == - Adaptation::StepType::kIncreaseResolution; - if (last_request_increased_resolution && + if (awaiting_frame_size_change_ && + awaiting_frame_size_change_->pixels_increased && degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE && - input_state_.frame_size_pixels().value() <= - last_adaptation_request_->input_pixel_count_) { - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kAwaitingPreviousAdaptation); + input_state.frame_size_pixels().value() <= + awaiting_frame_size_change_->frame_size_pixels) { + return Adaptation::Status::kAwaitingPreviousAdaptation; } // Maybe propose targets based on degradation preference. switch (degradation_preference_) { case DegradationPreference::BALANCED: { // Attempt to increase target frame rate. - int target_fps = - balanced_settings_.MaxFps(input_state_.video_codec_type(), - input_state_.frame_size_pixels().value()); - if (source_restrictor_->CanIncreaseFrameRateTo(target_fps)) { - return Adaptation( - adaptation_validation_id_, - Adaptation::Step(Adaptation::StepType::kIncreaseFrameRate, - target_fps)); + RestrictionsOrState increase_frame_rate = + IncreaseFramerate(input_state, current_restrictions_); + if (absl::holds_alternative( + increase_frame_rate)) { + return increase_frame_rate; } - // Scale up resolution. + // else, increase resolution. ABSL_FALLTHROUGH_INTENDED; } case DegradationPreference::MAINTAIN_FRAMERATE: { // Attempt to increase pixel count. - int target_pixels = input_state_.frame_size_pixels().value(); - if (source_restrictor_->adaptation_counters().resolution_adaptations == - 1) { - RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting."; - target_pixels = std::numeric_limits::max(); - } - target_pixels = GetHigherResolutionThan(target_pixels); - if (!source_restrictor_->CanIncreaseResolutionTo(target_pixels)) { - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kLimitReached); - } - return Adaptation( - adaptation_validation_id_, - Adaptation::Step(Adaptation::StepType::kIncreaseResolution, - target_pixels)); + return IncreaseResolution(input_state, current_restrictions_); } case DegradationPreference::MAINTAIN_RESOLUTION: { // Scale up framerate. - int target_fps = input_state_.frames_per_second(); - if (source_restrictor_->adaptation_counters().fps_adaptations == 1) { - RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting."; - target_fps = std::numeric_limits::max(); - } - target_fps = GetHigherFrameRateThan(target_fps); - if (!source_restrictor_->CanIncreaseFrameRateTo(target_fps)) { - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kLimitReached); - } - return Adaptation( - adaptation_validation_id_, - Adaptation::Step(Adaptation::StepType::kIncreaseFrameRate, - target_fps)); + return IncreaseFramerate(input_state, current_restrictions_); } case DegradationPreference::DISABLED: - RTC_NOTREACHED(); - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kLimitReached); + return Adaptation::Status::kAdaptationDisabled; + } + RTC_CHECK_NOTREACHED(); +} + +Adaptation VideoStreamAdapter::GetAdaptationDown() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + VideoStreamInputState input_state = input_state_provider_->InputState(); + ++adaptation_validation_id_; + RestrictionsOrState restrictions_or_state = + GetAdaptationDownStep(input_state, current_restrictions_); + if (MinPixelLimitReached(input_state)) { + encoder_stats_observer_->OnMinPixelLimitReached(); + } + // Check for min_fps + if (degradation_preference_ == DegradationPreference::BALANCED && + absl::holds_alternative( + restrictions_or_state)) { + restrictions_or_state = AdaptIfFpsDiffInsufficient( + input_state, + absl::get(restrictions_or_state)); } + return RestrictionsOrStateToAdaptation(restrictions_or_state, input_state); } -Adaptation VideoStreamAdapter::GetAdaptationDown() const { - RTC_DCHECK_NE(degradation_preference_, DegradationPreference::DISABLED); - RTC_DCHECK(input_state_.HasInputFrameSizeAndFramesPerSecond()); +VideoStreamAdapter::RestrictionsOrState +VideoStreamAdapter::AdaptIfFpsDiffInsufficient( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& restrictions) const { + RTC_DCHECK_EQ(degradation_preference_, DegradationPreference::BALANCED); + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); + absl::optional min_fps_diff = + balanced_settings_.MinFpsDiff(frame_size_pixels); + if (current_restrictions_.counters.fps_adaptations < + restrictions.counters.fps_adaptations && + min_fps_diff && input_state.frames_per_second() > 0) { + int fps_diff = input_state.frames_per_second() - + restrictions.restrictions.max_frame_rate().value(); + if (fps_diff < min_fps_diff.value()) { + return GetAdaptationDownStep(input_state, restrictions); + } + } + return restrictions; +} + +VideoStreamAdapter::RestrictionsOrState +VideoStreamAdapter::GetAdaptationDownStep( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) const { + if (!HasSufficientInputForAdaptation(input_state)) { + return Adaptation::Status::kInsufficientInput; + } // Don't adapt if we're awaiting a previous adaptation to have an effect or // if we switched degradation preference. - bool last_request_decreased_resolution = - last_adaptation_request_ && last_adaptation_request_->step_type_ == - Adaptation::StepType::kDecreaseResolution; - if (last_request_decreased_resolution && + if (awaiting_frame_size_change_ && + !awaiting_frame_size_change_->pixels_increased && degradation_preference_ == DegradationPreference::MAINTAIN_FRAMERATE && - input_state_.frame_size_pixels().value() >= - last_adaptation_request_->input_pixel_count_) { - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kAwaitingPreviousAdaptation); + input_state.frame_size_pixels().value() >= + awaiting_frame_size_change_->frame_size_pixels) { + return Adaptation::Status::kAwaitingPreviousAdaptation; } - // Maybe propose targets based on degradation preference. switch (degradation_preference_) { case DegradationPreference::BALANCED: { // Try scale down framerate, if lower. - int target_fps = - balanced_settings_.MinFps(input_state_.video_codec_type(), - input_state_.frame_size_pixels().value()); - if (source_restrictor_->CanDecreaseFrameRateTo(target_fps)) { - return Adaptation( - adaptation_validation_id_, - Adaptation::Step(Adaptation::StepType::kDecreaseFrameRate, - target_fps)); + RestrictionsOrState decrease_frame_rate = + DecreaseFramerate(input_state, current_restrictions); + if (absl::holds_alternative( + decrease_frame_rate)) { + return decrease_frame_rate; } - // Scale down resolution. + // else, decrease resolution. ABSL_FALLTHROUGH_INTENDED; } case DegradationPreference::MAINTAIN_FRAMERATE: { - // Scale down resolution. - int target_pixels = - GetLowerResolutionThan(input_state_.frame_size_pixels().value()); - bool min_pixel_limit_reached = - target_pixels < source_restrictor_->min_pixels_per_frame(); - if (!source_restrictor_->CanDecreaseResolutionTo(target_pixels)) { - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kLimitReached, - min_pixel_limit_reached); - } - return Adaptation( - adaptation_validation_id_, - Adaptation::Step(Adaptation::StepType::kDecreaseResolution, - target_pixels), - min_pixel_limit_reached); + return DecreaseResolution(input_state, current_restrictions); } case DegradationPreference::MAINTAIN_RESOLUTION: { - int target_fps = GetLowerFrameRateThan(input_state_.frames_per_second()); - if (!source_restrictor_->CanDecreaseFrameRateTo(target_fps)) { - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kLimitReached); - } - return Adaptation( - adaptation_validation_id_, - Adaptation::Step(Adaptation::StepType::kDecreaseFrameRate, - target_fps)); + return DecreaseFramerate(input_state, current_restrictions); } case DegradationPreference::DISABLED: - RTC_NOTREACHED(); - return Adaptation(adaptation_validation_id_, - Adaptation::Status::kLimitReached); + return Adaptation::Status::kAdaptationDisabled; } + RTC_CHECK_NOTREACHED(); } -VideoSourceRestrictions VideoStreamAdapter::PeekNextRestrictions( - const Adaptation& adaptation) const { - RTC_DCHECK_EQ(adaptation.validation_id_, adaptation_validation_id_); - if (adaptation.status() != Adaptation::Status::kValid) - return source_restrictor_->source_restrictions(); - VideoSourceRestrictor restrictor_copy = *source_restrictor_; - restrictor_copy.ApplyAdaptationStep(adaptation.step(), - degradation_preference_); - return restrictor_copy.source_restrictions(); +VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseResolution( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) { + int target_pixels = + GetLowerResolutionThan(input_state.frame_size_pixels().value()); + // Use single active stream if set, this stream could be lower than the input. + int target_pixels_min = + GetLowerResolutionThan(input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value())); + if (!CanDecreaseResolutionTo(target_pixels, target_pixels_min, input_state, + current_restrictions.restrictions)) { + return Adaptation::Status::kLimitReached; + } + RestrictionsWithCounters new_restrictions = current_restrictions; + RTC_LOG(LS_INFO) << "Scaling down resolution, max pixels: " << target_pixels; + new_restrictions.restrictions.set_max_pixels_per_frame( + target_pixels != std::numeric_limits::max() + ? absl::optional(target_pixels) + : absl::nullopt); + new_restrictions.restrictions.set_target_pixels_per_frame(absl::nullopt); + ++new_restrictions.counters.resolution_adaptations; + return new_restrictions; +} + +VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::DecreaseFramerate( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) const { + int max_frame_rate; + if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) { + max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second()); + } else if (degradation_preference_ == DegradationPreference::BALANCED) { + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); + max_frame_rate = balanced_settings_.MinFps(input_state.video_codec_type(), + frame_size_pixels); + } else { + RTC_NOTREACHED(); + max_frame_rate = GetLowerFrameRateThan(input_state.frames_per_second()); + } + if (!CanDecreaseFrameRateTo(max_frame_rate, + current_restrictions.restrictions)) { + return Adaptation::Status::kLimitReached; + } + RestrictionsWithCounters new_restrictions = current_restrictions; + max_frame_rate = std::max(kMinFrameRateFps, max_frame_rate); + RTC_LOG(LS_INFO) << "Scaling down framerate: " << max_frame_rate; + new_restrictions.restrictions.set_max_frame_rate( + max_frame_rate != std::numeric_limits::max() + ? absl::optional(max_frame_rate) + : absl::nullopt); + ++new_restrictions.counters.fps_adaptations; + return new_restrictions; } -void VideoStreamAdapter::ApplyAdaptation(const Adaptation& adaptation) { +VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseResolution( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) { + int target_pixels = input_state.frame_size_pixels().value(); + if (current_restrictions.counters.resolution_adaptations == 1) { + RTC_LOG(LS_INFO) << "Removing resolution down-scaling setting."; + target_pixels = std::numeric_limits::max(); + } + target_pixels = GetHigherResolutionThan(target_pixels); + if (!CanIncreaseResolutionTo(target_pixels, + current_restrictions.restrictions)) { + return Adaptation::Status::kLimitReached; + } + int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels); + RestrictionsWithCounters new_restrictions = current_restrictions; + RTC_LOG(LS_INFO) << "Scaling up resolution, max pixels: " + << max_pixels_wanted; + new_restrictions.restrictions.set_max_pixels_per_frame( + max_pixels_wanted != std::numeric_limits::max() + ? absl::optional(max_pixels_wanted) + : absl::nullopt); + new_restrictions.restrictions.set_target_pixels_per_frame( + max_pixels_wanted != std::numeric_limits::max() + ? absl::optional(target_pixels) + : absl::nullopt); + --new_restrictions.counters.resolution_adaptations; + RTC_DCHECK_GE(new_restrictions.counters.resolution_adaptations, 0); + return new_restrictions; +} + +VideoStreamAdapter::RestrictionsOrState VideoStreamAdapter::IncreaseFramerate( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) const { + int max_frame_rate; + if (degradation_preference_ == DegradationPreference::MAINTAIN_RESOLUTION) { + max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second()); + } else if (degradation_preference_ == DegradationPreference::BALANCED) { + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); + max_frame_rate = balanced_settings_.MaxFps(input_state.video_codec_type(), + frame_size_pixels); + // Temporary fix for cases when there are fewer framerate adaptation steps + // up than down. Make number of down/up steps equal. + if (max_frame_rate == std::numeric_limits::max() && + current_restrictions.counters.fps_adaptations > 1) { + // Do not unrestrict framerate to allow additional adaptation up steps. + RTC_LOG(LS_INFO) << "Modifying framerate due to remaining fps count."; + max_frame_rate -= current_restrictions.counters.fps_adaptations; + } + // In BALANCED, the max_frame_rate must be checked before proceeding. This + // is because the MaxFps might be the current Fps and so the balanced + // settings may want to scale up the resolution. + if (!CanIncreaseFrameRateTo(max_frame_rate, + current_restrictions.restrictions)) { + return Adaptation::Status::kLimitReached; + } + } else { + RTC_NOTREACHED(); + max_frame_rate = GetHigherFrameRateThan(input_state.frames_per_second()); + } + if (current_restrictions.counters.fps_adaptations == 1) { + RTC_LOG(LS_INFO) << "Removing framerate down-scaling setting."; + max_frame_rate = std::numeric_limits::max(); + } + if (!CanIncreaseFrameRateTo(max_frame_rate, + current_restrictions.restrictions)) { + return Adaptation::Status::kLimitReached; + } + RTC_LOG(LS_INFO) << "Scaling up framerate: " << max_frame_rate; + RestrictionsWithCounters new_restrictions = current_restrictions; + new_restrictions.restrictions.set_max_frame_rate( + max_frame_rate != std::numeric_limits::max() + ? absl::optional(max_frame_rate) + : absl::nullopt); + --new_restrictions.counters.fps_adaptations; + RTC_DCHECK_GE(new_restrictions.counters.fps_adaptations, 0); + return new_restrictions; +} + +Adaptation VideoStreamAdapter::GetAdaptDownResolution() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + VideoStreamInputState input_state = input_state_provider_->InputState(); + switch (degradation_preference_) { + case DegradationPreference::DISABLED: + return RestrictionsOrStateToAdaptation( + Adaptation::Status::kAdaptationDisabled, input_state); + case DegradationPreference::MAINTAIN_RESOLUTION: + return RestrictionsOrStateToAdaptation(Adaptation::Status::kLimitReached, + input_state); + case DegradationPreference::MAINTAIN_FRAMERATE: + return GetAdaptationDown(); + case DegradationPreference::BALANCED: { + return RestrictionsOrStateToAdaptation( + GetAdaptDownResolutionStepForBalanced(input_state), input_state); + } + } + RTC_CHECK_NOTREACHED(); +} + +VideoStreamAdapter::RestrictionsOrState +VideoStreamAdapter::GetAdaptDownResolutionStepForBalanced( + const VideoStreamInputState& input_state) const { + // Adapt twice if the first adaptation did not decrease resolution. + auto first_step = GetAdaptationDownStep(input_state, current_restrictions_); + if (!absl::holds_alternative(first_step)) { + return first_step; + } + auto first_restrictions = absl::get(first_step); + if (first_restrictions.counters.resolution_adaptations > + current_restrictions_.counters.resolution_adaptations) { + return first_step; + } + // We didn't decrease resolution so force it; amend a resolution resuction + // to the existing framerate reduction in |first_restrictions|. + auto second_step = DecreaseResolution(input_state, first_restrictions); + if (absl::holds_alternative(second_step)) { + return second_step; + } + // If the second step was not successful then settle for the first one. + return first_step; +} + +void VideoStreamAdapter::ApplyAdaptation( + const Adaptation& adaptation, + rtc::scoped_refptr resource) { + RTC_DCHECK_RUN_ON(&sequence_checker_); RTC_DCHECK_EQ(adaptation.validation_id_, adaptation_validation_id_); if (adaptation.status() != Adaptation::Status::kValid) return; // Remember the input pixels and fps of this adaptation. Used to avoid // adapting again before this adaptation has had an effect. - last_adaptation_request_.emplace(AdaptationRequest{ - input_state_.frame_size_pixels().value(), - input_state_.frames_per_second(), adaptation.step().type}); - // Adapt! - source_restrictor_->ApplyAdaptationStep(adaptation.step(), - degradation_preference_); + if (DidIncreaseResolution(current_restrictions_.restrictions, + adaptation.restrictions())) { + awaiting_frame_size_change_.emplace( + true, adaptation.input_state().frame_size_pixels().value()); + } else if (DidDecreaseResolution(current_restrictions_.restrictions, + adaptation.restrictions())) { + awaiting_frame_size_change_.emplace( + false, adaptation.input_state().frame_size_pixels().value()); + } else { + awaiting_frame_size_change_ = absl::nullopt; + } + current_restrictions_ = {adaptation.restrictions(), adaptation.counters()}; + BroadcastVideoRestrictionsUpdate(adaptation.input_state(), resource); +} + +Adaptation VideoStreamAdapter::GetAdaptationTo( + const VideoAdaptationCounters& counters, + const VideoSourceRestrictions& restrictions) { + // Adapts up/down from the current levels so counters are equal. + RTC_DCHECK_RUN_ON(&sequence_checker_); + VideoStreamInputState input_state = input_state_provider_->InputState(); + return Adaptation(adaptation_validation_id_, restrictions, counters, + input_state); +} + +void VideoStreamAdapter::BroadcastVideoRestrictionsUpdate( + const VideoStreamInputState& input_state, + const rtc::scoped_refptr& resource) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + VideoSourceRestrictions filtered = FilterRestrictionsByDegradationPreference( + source_restrictions(), degradation_preference_); + if (last_filtered_restrictions_ == filtered) { + return; + } + for (auto* restrictions_listener : restrictions_listeners_) { + restrictions_listener->OnVideoSourceRestrictionsUpdated( + filtered, current_restrictions_.counters, resource, + source_restrictions()); + } + last_video_source_restrictions_ = current_restrictions_.restrictions; + last_filtered_restrictions_ = filtered; +} + +bool VideoStreamAdapter::HasSufficientInputForAdaptation( + const VideoStreamInputState& input_state) const { + return input_state.HasInputFrameSizeAndFramesPerSecond() && + (degradation_preference_ != + DegradationPreference::MAINTAIN_RESOLUTION || + input_state.frames_per_second() >= kMinFrameRateFps); +} + +VideoStreamAdapter::AwaitingFrameSizeChange::AwaitingFrameSizeChange( + bool pixels_increased, + int frame_size_pixels) + : pixels_increased(pixels_increased), + frame_size_pixels(frame_size_pixels) {} + +absl::optional VideoStreamAdapter::GetSingleActiveLayerPixels( + const VideoCodec& codec) { + int num_active = 0; + absl::optional pixels; + if (codec.codecType == VideoCodecType::kVideoCodecVP9) { + for (int i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) { + if (codec.spatialLayers[i].active) { + ++num_active; + pixels = codec.spatialLayers[i].width * codec.spatialLayers[i].height; + } + } + } else { + for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) { + if (codec.simulcastStream[i].active) { + ++num_active; + pixels = + codec.simulcastStream[i].width * codec.simulcastStream[i].height; + } + } + } + return (num_active > 1) ? absl::nullopt : pixels; } } // namespace webrtc diff --git a/call/adaptation/video_stream_adapter.h b/call/adaptation/video_stream_adapter.h index f313e6bed6..3c876b8970 100644 --- a/call/adaptation/video_stream_adapter.h +++ b/call/adaptation/video_stream_adapter.h @@ -12,18 +12,43 @@ #define CALL_ADAPTATION_VIDEO_STREAM_ADAPTER_H_ #include +#include +#include #include "absl/types/optional.h" +#include "absl/types/variant.h" +#include "api/adaptation/resource.h" #include "api/rtp_parameters.h" #include "api/video/video_adaptation_counters.h" -#include "call/adaptation/resource.h" +#include "api/video/video_stream_encoder_observer.h" +#include "call/adaptation/adaptation_constraint.h" +#include "call/adaptation/degradation_preference_provider.h" #include "call/adaptation/video_source_restrictions.h" #include "call/adaptation/video_stream_input_state.h" +#include "call/adaptation/video_stream_input_state_provider.h" #include "modules/video_coding/utility/quality_scaler.h" #include "rtc_base/experiments/balanced_degradation_settings.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { +// The listener is responsible for carrying out the reconfiguration of the video +// source such that the VideoSourceRestrictions are fulfilled. +class VideoSourceRestrictionsListener { + public: + virtual ~VideoSourceRestrictionsListener(); + + // The |restrictions| are filtered by degradation preference but not the + // |adaptation_counters|, which are currently only reported for legacy stats + // calculation purposes. + virtual void OnVideoSourceRestrictionsUpdated( + VideoSourceRestrictions restrictions, + const VideoAdaptationCounters& adaptation_counters, + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) = 0; +}; + class VideoStreamAdapter; extern const int kMinFrameRateFps; @@ -32,15 +57,12 @@ VideoSourceRestrictions FilterRestrictionsByDegradationPreference( VideoSourceRestrictions source_restrictions, DegradationPreference degradation_preference); -VideoAdaptationCounters FilterVideoAdaptationCountersByDegradationPreference( - VideoAdaptationCounters counters, - DegradationPreference degradation_preference); - +int GetLowerResolutionThan(int pixel_count); int GetHigherResolutionThan(int pixel_count); -// Represents one step that the VideoStreamAdapter can take when adapting the -// VideoSourceRestrictions up or down. Or, if adaptation is not valid, provides -// a Status code indicating the reason for not adapting. +// Either represents the next VideoSourceRestrictions the VideoStreamAdapter +// will take, or provides a Status code indicating the reason for not adapting +// if the adaptation is not valid. class Adaptation final { public: enum class Status { @@ -54,51 +76,42 @@ class Adaptation final { // adaptation has not yet been reflected in the input resolution or frame // rate; adaptation is refused to avoid "double-adapting". kAwaitingPreviousAdaptation, + // Not enough input. + kInsufficientInput, + // Adaptation disabled via degradation preference. + kAdaptationDisabled, + // Adaptation up was rejected by a VideoAdaptationConstraint. + kRejectedByConstraint, }; - // The status of this Adaptation. To find out how this Adaptation affects - // VideoSourceRestrictions, see VideoStreamAdapter::PeekNextRestrictions(). + static const char* StatusToString(Status status); + Status status() const; - // Used for stats reporting. - bool min_pixel_limit_reached() const; + const VideoStreamInputState& input_state() const; + const VideoSourceRestrictions& restrictions() const; + const VideoAdaptationCounters& counters() const; private: - // The adapter needs to know about step type and step target in order to - // construct and perform an Adaptation, which is a detail we do not want to - // expose to the public interface. friend class VideoStreamAdapter; - enum class StepType { - kIncreaseResolution, - kDecreaseResolution, - kIncreaseFrameRate, - kDecreaseFrameRate, - }; - - struct Step { - Step(StepType type, int target); - const StepType type; - const int target; // Pixel or frame rate depending on |type|. - }; - - // Constructs with a valid adaptation Step. Status is kValid. - Adaptation(int validation_id, Step step); - Adaptation(int validation_id, Step step, bool min_pixel_limit_reached); + // Constructs with a valid adaptation. Status is kValid. + Adaptation(int validation_id, + VideoSourceRestrictions restrictions, + VideoAdaptationCounters counters, + VideoStreamInputState input_state); // Constructor when adaptation is not valid. Status MUST NOT be kValid. Adaptation(int validation_id, Status invalid_status); - Adaptation(int validation_id, - Status invalid_status, - bool min_pixel_limit_reached); - - const Step& step() const; // Only callable if |status_| is kValid. // An Adaptation can become invalidated if the state of VideoStreamAdapter is // modified before the Adaptation is applied. To guard against this, this ID // has to match VideoStreamAdapter::adaptation_validation_id_ when applied. + // TODO(https://crbug.com/webrtc/11700): Remove the validation_id_. const int validation_id_; const Status status_; - const absl::optional step_; // Only present if |status_| is kValid. - const bool min_pixel_limit_reached_; + // Input state when adaptation was made. + const VideoStreamInputState input_state_; + const VideoSourceRestrictions restrictions_; + const VideoAdaptationCounters counters_; }; // Owns the VideoSourceRestriction for a single stream and is responsible for @@ -109,68 +122,146 @@ class Adaptation final { // 3. Modify the stream's restrictions in one of the valid ways. class VideoStreamAdapter { public: - VideoStreamAdapter(); + VideoStreamAdapter(VideoStreamInputStateProvider* input_state_provider, + VideoStreamEncoderObserver* encoder_stats_observer); ~VideoStreamAdapter(); VideoSourceRestrictions source_restrictions() const; const VideoAdaptationCounters& adaptation_counters() const; void ClearRestrictions(); + void AddRestrictionsListener( + VideoSourceRestrictionsListener* restrictions_listener); + void RemoveRestrictionsListener( + VideoSourceRestrictionsListener* restrictions_listener); + void AddAdaptationConstraint(AdaptationConstraint* adaptation_constraint); + void RemoveAdaptationConstraint(AdaptationConstraint* adaptation_constraint); + // TODO(hbos): Setting the degradation preference should not clear // restrictions! This is not defined in the spec and is unexpected, there is a // tiny risk that people would discover and rely on this behavior. void SetDegradationPreference(DegradationPreference degradation_preference); - // The adaptaiton logic depends on these inputs. - void SetInput(VideoStreamInputState input_state); // Returns an adaptation that we are guaranteed to be able to apply, or a // status code indicating the reason why we cannot adapt. - Adaptation GetAdaptationUp() const; - Adaptation GetAdaptationDown() const; - // Returns the restrictions that result from applying the adaptation, without - // actually applying it. If the adaptation is not valid, current restrictions - // are returned. - VideoSourceRestrictions PeekNextRestrictions( - const Adaptation& adaptation) const; - // Updates source_restrictions() based according to the Adaptation. - void ApplyAdaptation(const Adaptation& adaptation); + Adaptation GetAdaptationUp(); + Adaptation GetAdaptationDown(); + Adaptation GetAdaptationTo(const VideoAdaptationCounters& counters, + const VideoSourceRestrictions& restrictions); + // Tries to adapt the resolution one step. This is used for initial frame + // dropping. Does nothing if the degradation preference is not BALANCED or + // MAINTAIN_FRAMERATE. In the case of BALANCED, it will try twice to reduce + // the resolution. If it fails twice it gives up. + Adaptation GetAdaptDownResolution(); - private: - class VideoSourceRestrictor; - - // The input frame rate and resolution at the time of an adaptation in the - // direction described by |mode_| (up or down). - // TODO(https://crbug.com/webrtc/11393): Can this be renamed? Can this be - // merged with AdaptationTarget? - struct AdaptationRequest { - // The pixel count produced by the source at the time of the adaptation. - int input_pixel_count_; - // Framerate received from the source at the time of the adaptation. - int framerate_fps_; - // Degradation preference for the request. - Adaptation::StepType step_type_; + // Updates source_restrictions() the Adaptation. + void ApplyAdaptation(const Adaptation& adaptation, + rtc::scoped_refptr resource); + + struct RestrictionsWithCounters { + VideoSourceRestrictions restrictions; + VideoAdaptationCounters counters; }; - // Owner and modifier of the VideoSourceRestriction of this stream adaptor. - const std::unique_ptr source_restrictor_; + static absl::optional GetSingleActiveLayerPixels( + const VideoCodec& codec); + + private: + void BroadcastVideoRestrictionsUpdate( + const VideoStreamInputState& input_state, + const rtc::scoped_refptr& resource); + + bool HasSufficientInputForAdaptation(const VideoStreamInputState& input_state) + const RTC_RUN_ON(&sequence_checker_); + + using RestrictionsOrState = + absl::variant; + RestrictionsOrState GetAdaptationUpStep( + const VideoStreamInputState& input_state) const + RTC_RUN_ON(&sequence_checker_); + RestrictionsOrState GetAdaptationDownStep( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) const + RTC_RUN_ON(&sequence_checker_); + RestrictionsOrState GetAdaptDownResolutionStepForBalanced( + const VideoStreamInputState& input_state) const + RTC_RUN_ON(&sequence_checker_); + RestrictionsOrState AdaptIfFpsDiffInsufficient( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& restrictions) const + RTC_RUN_ON(&sequence_checker_); + + Adaptation GetAdaptationUp(const VideoStreamInputState& input_state) const + RTC_RUN_ON(&sequence_checker_); + Adaptation GetAdaptationDown(const VideoStreamInputState& input_state) const + RTC_RUN_ON(&sequence_checker_); + + static RestrictionsOrState DecreaseResolution( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions); + static RestrictionsOrState IncreaseResolution( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions); + // Framerate methods are member functions because they need internal state + // if the degradation preference is BALANCED. + RestrictionsOrState DecreaseFramerate( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) const + RTC_RUN_ON(&sequence_checker_); + RestrictionsOrState IncreaseFramerate( + const VideoStreamInputState& input_state, + const RestrictionsWithCounters& current_restrictions) const + RTC_RUN_ON(&sequence_checker_); + + struct RestrictionsOrStateVisitor; + Adaptation RestrictionsOrStateToAdaptation( + RestrictionsOrState step_or_state, + const VideoStreamInputState& input_state) const + RTC_RUN_ON(&sequence_checker_); + + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_ + RTC_GUARDED_BY(&sequence_checker_); + // Gets the input state which is the basis of all adaptations. + // Thread safe. + VideoStreamInputStateProvider* input_state_provider_; + // Used to signal when min pixel limit has been reached. + VideoStreamEncoderObserver* const encoder_stats_observer_; // Decides the next adaptation target in DegradationPreference::BALANCED. const BalancedDegradationSettings balanced_settings_; // To guard against applying adaptations that have become invalidated, an // Adaptation that is applied has to have a matching validation ID. - int adaptation_validation_id_; + int adaptation_validation_id_ RTC_GUARDED_BY(&sequence_checker_); // When deciding the next target up or down, different strategies are used // depending on the DegradationPreference. // https://w3c.github.io/mst-content-hint/#dom-rtcdegradationpreference - DegradationPreference degradation_preference_; - VideoStreamInputState input_state_; - // The input frame rate, resolution and adaptation direction of the last - // ApplyAdaptationTarget(). Used to avoid adapting twice if a recent - // adaptation has not had an effect on the input frame rate or resolution yet. + DegradationPreference degradation_preference_ + RTC_GUARDED_BY(&sequence_checker_); + // Used to avoid adapting twice. Stores the resolution at the time of the last + // adaptation. // TODO(hbos): Can we implement a more general "cooldown" mechanism of // resources intead? If we already have adapted it seems like we should wait // a while before adapting again, so that we are not acting on usage // measurements that are made obsolete/unreliable by an "ongoing" adaptation. - absl::optional last_adaptation_request_; + struct AwaitingFrameSizeChange { + AwaitingFrameSizeChange(bool pixels_increased, int frame_size); + const bool pixels_increased; + const int frame_size_pixels; + }; + absl::optional awaiting_frame_size_change_ + RTC_GUARDED_BY(&sequence_checker_); + // The previous restrictions value. Starts as unrestricted. + VideoSourceRestrictions last_video_source_restrictions_ + RTC_GUARDED_BY(&sequence_checker_); + VideoSourceRestrictions last_filtered_restrictions_ + RTC_GUARDED_BY(&sequence_checker_); + + std::vector restrictions_listeners_ + RTC_GUARDED_BY(&sequence_checker_); + std::vector adaptation_constraints_ + RTC_GUARDED_BY(&sequence_checker_); + + RestrictionsWithCounters current_restrictions_ + RTC_GUARDED_BY(&sequence_checker_); }; } // namespace webrtc diff --git a/call/adaptation/video_stream_adapter_unittest.cc b/call/adaptation/video_stream_adapter_unittest.cc index 79247a7837..aba9cf1f29 100644 --- a/call/adaptation/video_stream_adapter_unittest.cc +++ b/call/adaptation/video_stream_adapter_unittest.cc @@ -14,12 +14,18 @@ #include #include "absl/types/optional.h" +#include "api/scoped_refptr.h" #include "api/video/video_adaptation_reason.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/video_encoder_config.h" +#include "call/adaptation/adaptation_constraint.h" #include "call/adaptation/encoder_settings.h" +#include "call/adaptation/test/fake_frame_rate_provider.h" +#include "call/adaptation/test/fake_resource.h" +#include "call/adaptation/test/fake_video_stream_input_state_provider.h" #include "call/adaptation/video_source_restrictions.h" +#include "call/adaptation/video_stream_input_state.h" #include "rtc_base/string_encode.h" #include "test/field_trial.h" #include "test/gmock.h" @@ -28,6 +34,11 @@ namespace webrtc { +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; + namespace { const int kBalancedHighResolutionPixels = 1280 * 720; @@ -49,17 +60,6 @@ std::string BalancedFieldTrialConfig() { rtc::ToString(kBalancedHighFrameRateFps) + "/"; } -VideoStreamInputState InputState(int input_pixels, - int input_fps, - int min_pixels_per_frame) { - VideoStreamInputState input_state; - input_state.set_has_input(true); - input_state.set_frame_size_pixels(input_pixels); - input_state.set_frames_per_second(input_fps); - input_state.set_min_pixels_per_frame(min_pixels_per_frame); - return input_state; -} - // Responsible for adjusting the inputs to VideoStreamAdapter (SetInput), such // as pixels and frame rate, according to the most recent source restrictions. // This helps tests that apply adaptations multiple times: if the input is not @@ -68,15 +68,16 @@ VideoStreamInputState InputState(int input_pixels, class FakeVideoStream { public: FakeVideoStream(VideoStreamAdapter* adapter, + FakeVideoStreamInputStateProvider* provider, int input_pixels, int input_fps, int min_pixels_per_frame) : adapter_(adapter), + provider_(provider), input_pixels_(input_pixels), input_fps_(input_fps), min_pixels_per_frame_(min_pixels_per_frame) { - adapter_->SetInput( - InputState(input_pixels_, input_fps_, min_pixels_per_frame_)); + provider_->SetInputState(input_pixels_, input_fps_, min_pixels_per_frame_); } int input_pixels() const { return input_pixels_; } @@ -85,7 +86,7 @@ class FakeVideoStream { // Performs ApplyAdaptation() followed by SetInput() with input pixels and // frame rate adjusted according to the resulting restrictions. void ApplyAdaptation(Adaptation adaptation) { - adapter_->ApplyAdaptation(adaptation); + adapter_->ApplyAdaptation(adaptation, nullptr); // Update input pixels and fps according to the resulting restrictions. auto restrictions = adapter_->source_restrictions(); if (restrictions.target_pixels_per_frame().has_value()) { @@ -99,201 +100,240 @@ class FakeVideoStream { if (restrictions.max_frame_rate().has_value()) { input_fps_ = restrictions.max_frame_rate().value(); } - adapter_->SetInput( - InputState(input_pixels_, input_fps_, min_pixels_per_frame_)); + provider_->SetInputState(input_pixels_, input_fps_, min_pixels_per_frame_); } private: VideoStreamAdapter* adapter_; + FakeVideoStreamInputStateProvider* provider_; int input_pixels_; int input_fps_; int min_pixels_per_frame_; }; +class FakeVideoStreamAdapterListner : public VideoSourceRestrictionsListener { + public: + void OnVideoSourceRestrictionsUpdated( + VideoSourceRestrictions restrictions, + const VideoAdaptationCounters& adaptation_counters, + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) override { + calls_++; + last_restrictions_ = unfiltered_restrictions; + } + + int calls() const { return calls_; } + + VideoSourceRestrictions last_restrictions() const { + return last_restrictions_; + } + + private: + int calls_ = 0; + VideoSourceRestrictions last_restrictions_; +}; + +class MockAdaptationConstraint : public AdaptationConstraint { + public: + MOCK_METHOD(bool, + IsAdaptationUpAllowed, + (const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after), + (const, override)); + + // MOCK_METHOD(std::string, Name, (), (const, override)); + std::string Name() const override { return "MockAdaptationConstraint"; } +}; + } // namespace -TEST(VideoStreamAdapterTest, NoRestrictionsByDefault) { - VideoStreamAdapter adapter; - EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_EQ(0, adapter.adaptation_counters().Total()); +class VideoStreamAdapterTest : public ::testing::Test { + public: + VideoStreamAdapterTest() + : field_trials_(BalancedFieldTrialConfig()), + resource_(FakeResource::Create("FakeResource")), + adapter_(&input_state_provider_, &encoder_stats_observer_) {} + + protected: + webrtc::test::ScopedFieldTrials field_trials_; + FakeVideoStreamInputStateProvider input_state_provider_; + rtc::scoped_refptr resource_; + testing::StrictMock encoder_stats_observer_; + VideoStreamAdapter adapter_; +}; + +TEST_F(VideoStreamAdapterTest, NoRestrictionsByDefault) { + EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adapter_.adaptation_counters().Total()); } -TEST(VideoStreamAdapterTest, MaintainFramerate_DecreasesPixelsToThreeFifths) { +TEST_F(VideoStreamAdapterTest, MaintainFramerate_DecreasesPixelsToThreeFifths) { const int kInputPixels = 1280 * 720; - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - adapter.SetInput(InputState(kInputPixels, 30, kDefaultMinPixelsPerFrame)); - Adaptation adaptation = adapter.GetAdaptationDown(); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + input_state_provider_.SetInputState(kInputPixels, 30, + kDefaultMinPixelsPerFrame); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - EXPECT_FALSE(adaptation.min_pixel_limit_reached()); - adapter.ApplyAdaptation(adaptation); + adapter_.ApplyAdaptation(adaptation, nullptr); EXPECT_EQ(static_cast((kInputPixels * 3) / 5), - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); - EXPECT_EQ(absl::nullopt, adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + adapter_.source_restrictions().target_pixels_per_frame()); + EXPECT_EQ(absl::nullopt, adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); } -TEST(VideoStreamAdapterTest, MaintainFramerate_DecreasesPixelsToLimitReached) { +TEST_F(VideoStreamAdapterTest, + MaintainFramerate_DecreasesPixelsToLimitReached) { const int kMinPixelsPerFrame = 640 * 480; - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - adapter.SetInput(InputState(kMinPixelsPerFrame + 1, 30, kMinPixelsPerFrame)); + + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + input_state_provider_.SetInputState(kMinPixelsPerFrame + 1, 30, + kMinPixelsPerFrame); + EXPECT_CALL(encoder_stats_observer_, OnMinPixelLimitReached()); // Even though we are above kMinPixelsPerFrame, because adapting down would // have exceeded the limit, we are said to have reached the limit already. // This differs from the frame rate adaptation logic, which would have clamped // to the limit in the first step and reported kLimitReached in the second // step. - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kLimitReached, adaptation.status()); - EXPECT_TRUE(adaptation.min_pixel_limit_reached()); } -TEST(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToFiveThirds) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToFiveThirds) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Go down twice, ensuring going back up is still a restricted resolution. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations); int input_pixels = fake_stream.input_pixels(); // Go up once. The target is 5/3 and the max is 12/5 of the target. const int target = (input_pixels * 5) / 3; - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); EXPECT_EQ(static_cast((target * 12) / 5), - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(static_cast(target), - adapter.source_restrictions().target_pixels_per_frame()); - EXPECT_EQ(absl::nullopt, adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + adapter_.source_restrictions().target_pixels_per_frame()); + EXPECT_EQ(absl::nullopt, adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); } -TEST(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToUnrestricted) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, MaintainFramerate_IncreasePixelsToUnrestricted) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // We are unrestricted by default and should not be able to adapt up. EXPECT_EQ(Adaptation::Status::kLimitReached, - adapter.GetAdaptationUp().status()); + adapter_.GetAdaptationUp().status()); // If we go down once and then back up we should not have any restrictions. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_EQ(0, adapter.adaptation_counters().Total()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); + EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adapter_.adaptation_counters().Total()); } -TEST(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToTwoThirds) { +TEST_F(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToTwoThirds) { const int kInputFps = 30; - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - adapter.SetInput( - InputState(1280 * 720, kInputFps, kDefaultMinPixelsPerFrame)); - Adaptation adaptation = adapter.GetAdaptationDown(); + + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + input_state_provider_.SetInputState(1280 * 720, kInputFps, + kDefaultMinPixelsPerFrame); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - adapter.ApplyAdaptation(adaptation); + adapter_.ApplyAdaptation(adaptation, nullptr); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); + adapter_.source_restrictions().target_pixels_per_frame()); EXPECT_EQ(static_cast((kInputFps * 2) / 3), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); } -TEST(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToLimitReached) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - FakeVideoStream fake_stream(&adapter, 1280 * 720, kMinFrameRateFps + 1, - kDefaultMinPixelsPerFrame); +TEST_F(VideoStreamAdapterTest, MaintainResolution_DecreasesFpsToLimitReached) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, + kMinFrameRateFps + 1, kDefaultMinPixelsPerFrame); // If we are not yet at the limit and the next step would exceed it, the step // is clamped such that we end up exactly on the limit. - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); EXPECT_EQ(static_cast(kMinFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); // Having reached the limit, the next adaptation down is not valid. EXPECT_EQ(Adaptation::Status::kLimitReached, - adapter.GetAdaptationDown().status()); + adapter_.GetAdaptationDown().status()); } -TEST(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToThreeHalves) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToThreeHalves) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Go down twice, ensuring going back up is still a restricted frame rate. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(2, adapter.adaptation_counters().fps_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(2, adapter_.adaptation_counters().fps_adaptations); int input_fps = fake_stream.input_fps(); // Go up once. The target is 3/2 of the input. - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); + adapter_.source_restrictions().target_pixels_per_frame()); EXPECT_EQ(static_cast((input_fps * 3) / 2), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); } -TEST(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToUnrestricted) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, MaintainResolution_IncreaseFpsToUnrestricted) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // We are unrestricted by default and should not be able to adapt up. EXPECT_EQ(Adaptation::Status::kLimitReached, - adapter.GetAdaptationUp().status()); + adapter_.GetAdaptationUp().status()); // If we go down once and then back up we should not have any restrictions. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_EQ(0, adapter.adaptation_counters().Total()); -} - -TEST(VideoStreamAdapterTest, Balanced_DecreaseFrameRate) { - webrtc::test::ScopedFieldTrials balanced_field_trials( - BalancedFieldTrialConfig()); - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::BALANCED); - adapter.SetInput(InputState(kBalancedMediumResolutionPixels, - kBalancedHighFrameRateFps, - kDefaultMinPixelsPerFrame)); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); + EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adapter_.adaptation_counters().Total()); +} + +TEST_F(VideoStreamAdapterTest, Balanced_DecreaseFrameRate) { + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + input_state_provider_.SetInputState(kBalancedMediumResolutionPixels, + kBalancedHighFrameRateFps, + kDefaultMinPixelsPerFrame); // If our frame rate is higher than the frame rate associated with our // resolution we should try to adapt to the frame rate associated with our // resolution: kBalancedMediumFrameRateFps. - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - adapter.ApplyAdaptation(adaptation); + adapter_.ApplyAdaptation(adaptation, nullptr); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); + adapter_.source_restrictions().target_pixels_per_frame()); EXPECT_EQ(static_cast(kBalancedMediumFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(0, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); -} - -TEST(VideoStreamAdapterTest, Balanced_DecreaseResolution) { - webrtc::test::ScopedFieldTrials balanced_field_trials( - BalancedFieldTrialConfig()); - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::BALANCED); - FakeVideoStream fake_stream(&adapter, kBalancedHighResolutionPixels, - kBalancedHighFrameRateFps, - kDefaultMinPixelsPerFrame); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); +} + +TEST_F(VideoStreamAdapterTest, Balanced_DecreaseResolution) { + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + FakeVideoStream fake_stream( + &adapter_, &input_state_provider_, kBalancedHighResolutionPixels, + kBalancedHighFrameRateFps, kDefaultMinPixelsPerFrame); // If we are not below the current resolution's frame rate limit, we should // adapt resolution according to "maintain-framerate" logic (three fifths). // @@ -303,35 +343,35 @@ TEST(VideoStreamAdapterTest, Balanced_DecreaseResolution) { // does prevent the source from going higher, though, so it's technically not // a NO-OP. { - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); } EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); + adapter_.source_restrictions().target_pixels_per_frame()); EXPECT_EQ(static_cast(kBalancedHighFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(0, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); // Verify "maintain-framerate" logic the second time we adapt: Frame rate // restrictions remains the same and resolution goes down. { - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); } constexpr size_t kReducedPixelsFirstStep = static_cast((kBalancedHighResolutionPixels * 3) / 5); EXPECT_EQ(kReducedPixelsFirstStep, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); + adapter_.source_restrictions().target_pixels_per_frame()); EXPECT_EQ(static_cast(kBalancedHighFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); // If we adapt again, because the balanced settings' proposed frame rate is // still kBalancedHighFrameRateFps, "maintain-framerate" will trigger again. static_assert(kReducedPixelsFirstStep > kBalancedMediumResolutionPixels, @@ -339,18 +379,18 @@ TEST(VideoStreamAdapterTest, Balanced_DecreaseResolution) { "balanced setting resolution"); constexpr size_t kReducedPixelsSecondStep = (kReducedPixelsFirstStep * 3) / 5; { - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); } EXPECT_EQ(kReducedPixelsSecondStep, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); EXPECT_EQ(absl::nullopt, - adapter.source_restrictions().target_pixels_per_frame()); + adapter_.source_restrictions().target_pixels_per_frame()); EXPECT_EQ(static_cast(kBalancedHighFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); } // Testing when to adapt frame rate and when to adapt resolution is quite @@ -360,14 +400,11 @@ TEST(VideoStreamAdapterTest, Balanced_DecreaseResolution) { // adapt up we don't do it in the reverse order. Instead we always try to adapt // frame rate first according to balanced settings' configs and only when the // frame rate is already achieved do we adjust the resolution. -TEST(VideoStreamAdapterTest, Balanced_IncreaseFrameRateAndResolution) { - webrtc::test::ScopedFieldTrials balanced_field_trials( - BalancedFieldTrialConfig()); - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::BALANCED); - FakeVideoStream fake_stream(&adapter, kBalancedHighResolutionPixels, - kBalancedHighFrameRateFps, - kDefaultMinPixelsPerFrame); +TEST_F(VideoStreamAdapterTest, Balanced_IncreaseFrameRateAndResolution) { + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + FakeVideoStream fake_stream( + &adapter_, &input_state_provider_, kBalancedHighResolutionPixels, + kBalancedHighFrameRateFps, kDefaultMinPixelsPerFrame); // The desired starting point of this test is having adapted frame rate twice. // This requires performing a number of adaptations. constexpr size_t kReducedPixelsFirstStep = @@ -385,41 +422,41 @@ TEST(VideoStreamAdapterTest, Balanced_IncreaseFrameRateAndResolution) { "settings' medium pixel configuration"); // The first adaptation should affect the frame rate: See // Balanced_DecreaseResolution for explanation why. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); EXPECT_EQ(static_cast(kBalancedHighFrameRateFps), - adapter.source_restrictions().max_frame_rate()); + adapter_.source_restrictions().max_frame_rate()); // The next three adaptations affects the resolution, because we have to reach // kBalancedMediumResolutionPixels before a lower frame rate is considered by // BalancedDegradationSettings. The number three is derived from the // static_asserts above. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); EXPECT_EQ(kReducedPixelsFirstStep, - adapter.source_restrictions().max_pixels_per_frame()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + adapter_.source_restrictions().max_pixels_per_frame()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); EXPECT_EQ(kReducedPixelsSecondStep, - adapter.source_restrictions().max_pixels_per_frame()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + adapter_.source_restrictions().max_pixels_per_frame()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); EXPECT_EQ(kReducedPixelsThirdStep, - adapter.source_restrictions().max_pixels_per_frame()); + adapter_.source_restrictions().max_pixels_per_frame()); // Thus, the next adaptation will reduce frame rate to // kBalancedMediumFrameRateFps. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); EXPECT_EQ(static_cast(kBalancedMediumFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(3, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(2, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(3, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(2, adapter_.adaptation_counters().fps_adaptations); // Adapt up! // While our resolution is in the medium-range, the frame rate associated with // the next resolution configuration up ("high") is kBalancedHighFrameRateFps // and "balanced" prefers adapting frame rate if not already applied. { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); EXPECT_EQ(static_cast(kBalancedHighFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(3, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(3, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); } // Now that we have already achieved the next frame rate up, we act according // to "maintain-framerate". We go back up in resolution. Due to rounding @@ -429,63 +466,61 @@ TEST(VideoStreamAdapterTest, Balanced_IncreaseFrameRateAndResolution) { constexpr size_t kReducedPixelsSecondStepUp = (kReducedPixelsThirdStep * 5) / 3; { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); EXPECT_EQ(kReducedPixelsSecondStepUp, - adapter.source_restrictions().target_pixels_per_frame()); - EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().target_pixels_per_frame()); + EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); } // Now that our resolution is back in the high-range, the next frame rate to // try out is "unlimited". { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); - EXPECT_EQ(absl::nullopt, adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(0, adapter.adaptation_counters().fps_adaptations); + EXPECT_EQ(absl::nullopt, adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations); } // Now only adapting resolution remains. constexpr size_t kReducedPixelsFirstStepUp = (kReducedPixelsSecondStepUp * 5) / 3; { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); EXPECT_EQ(kReducedPixelsFirstStepUp, - adapter.source_restrictions().target_pixels_per_frame()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(0, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().target_pixels_per_frame()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations); } // The last step up should make us entirely unrestricted. { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); - EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_EQ(0, adapter.adaptation_counters().Total()); + EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adapter_.adaptation_counters().Total()); } } -TEST(VideoStreamAdapterTest, Balanced_LimitReached) { - webrtc::test::ScopedFieldTrials balanced_field_trials( - BalancedFieldTrialConfig()); - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::BALANCED); - FakeVideoStream fake_stream(&adapter, kBalancedLowResolutionPixels, - kBalancedLowFrameRateFps, - kDefaultMinPixelsPerFrame); +TEST_F(VideoStreamAdapterTest, Balanced_LimitReached) { + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + FakeVideoStream fake_stream( + &adapter_, &input_state_provider_, kBalancedLowResolutionPixels, + kBalancedLowFrameRateFps, kDefaultMinPixelsPerFrame); // Attempting to adapt up while unrestricted should result in kLimitReached. EXPECT_EQ(Adaptation::Status::kLimitReached, - adapter.GetAdaptationUp().status()); + adapter_.GetAdaptationUp().status()); // Adapting down once result in restricted frame rate, in this case we reach // the lowest possible frame rate immediately: kBalancedLowFrameRateFps. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + EXPECT_CALL(encoder_stats_observer_, OnMinPixelLimitReached()).Times(2); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); EXPECT_EQ(static_cast(kBalancedLowFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); // Any further adaptation must follow "maintain-framerate" rules (these are // covered in more depth by the MaintainFramerate tests). This test does not // assert exactly how resolution is adjusted, only that resolution always @@ -494,241 +529,387 @@ TEST(VideoStreamAdapterTest, Balanced_LimitReached) { bool did_reach_limit = false; // If we have not reached the limit within 5 adaptations something is wrong... for (int i = 0; i < 5; i++) { - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); if (adaptation.status() == Adaptation::Status::kLimitReached) { did_reach_limit = true; break; } EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); - EXPECT_LT(adapter.source_restrictions().max_pixels_per_frame().value(), + EXPECT_LT(adapter_.source_restrictions().max_pixels_per_frame().value(), previous_resolution); previous_resolution = - adapter.source_restrictions().max_pixels_per_frame().value(); + adapter_.source_restrictions().max_pixels_per_frame().value(); } EXPECT_TRUE(did_reach_limit); // Frame rate restrictions are the same as before. EXPECT_EQ(static_cast(kBalancedLowFrameRateFps), - adapter.source_restrictions().max_frame_rate()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + adapter_.source_restrictions().max_frame_rate()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); } // kAwaitingPreviousAdaptation is only supported in "maintain-framerate". -TEST(VideoStreamAdapterTest, MaintainFramerate_AwaitingPreviousAdaptationDown) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - adapter.SetInput(InputState(1280 * 720, 30, kDefaultMinPixelsPerFrame)); +TEST_F(VideoStreamAdapterTest, + MaintainFramerate_AwaitingPreviousAdaptationDown) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); // Adapt down once, but don't update the input. - adapter.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); { // Having performed the adaptation, but not updated the input based on the // new restrictions, adapting again in the same direction will not work. - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation, adaptation.status()); } } // kAwaitingPreviousAdaptation is only supported in "maintain-framerate". -TEST(VideoStreamAdapterTest, MaintainFramerate_AwaitingPreviousAdaptationUp) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, MaintainFramerate_AwaitingPreviousAdaptationUp) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Perform two adaptation down so that adapting up twice is possible. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(2, adapter.adaptation_counters().resolution_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(2, adapter_.adaptation_counters().resolution_adaptations); // Adapt up once, but don't update the input. - adapter.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + adapter_.ApplyAdaptation(adapter_.GetAdaptationUp(), nullptr); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); { // Having performed the adaptation, but not updated the input based on the // new restrictions, adapting again in the same direction will not work. - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation, adaptation.status()); } } -TEST(VideoStreamAdapterTest, - MaintainResolution_AdaptsUpAfterSwitchingDegradationPreference) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, + MaintainResolution_AdaptsUpAfterSwitchingDegradationPreference) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Adapt down in fps for later. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); - EXPECT_EQ(0, adapter.adaptation_counters().resolution_adaptations); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); + EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations); // We should be able to adapt in framerate one last time after the change of // degradation preference. - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - Adaptation adaptation = adapter.GetAdaptationUp(); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(0, adapter.adaptation_counters().fps_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); + EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations); } -TEST(VideoStreamAdapterTest, - MaintainFramerate_AdaptsUpAfterSwitchingDegradationPreference) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, + MaintainFramerate_AdaptsUpAfterSwitchingDegradationPreference) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Adapt down in resolution for later. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); - EXPECT_EQ(0, adapter.adaptation_counters().fps_adaptations); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); + EXPECT_EQ(0, adapter_.adaptation_counters().fps_adaptations); // We should be able to adapt in framerate one last time after the change of // degradation preference. - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - Adaptation adaptation = adapter.GetAdaptationUp(); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - fake_stream.ApplyAdaptation(adapter.GetAdaptationUp()); - EXPECT_EQ(0, adapter.adaptation_counters().resolution_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationUp()); + EXPECT_EQ(0, adapter_.adaptation_counters().resolution_adaptations); } -TEST(VideoStreamAdapterTest, - PendingResolutionIncreaseAllowsAdaptUpAfterSwitchToMaintainResolution) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, + PendingResolutionIncreaseAllowsAdaptUpAfterSwitchToMaintainResolution) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Adapt fps down so we can adapt up later in the test. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); // Apply adaptation up but don't update input. - adapter.ApplyAdaptation(adapter.GetAdaptationUp()); + adapter_.ApplyAdaptation(adapter_.GetAdaptationUp(), nullptr); EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation, - adapter.GetAdaptationUp().status()); + adapter_.GetAdaptationUp().status()); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - Adaptation adaptation = adapter.GetAdaptationUp(); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); } -TEST(VideoStreamAdapterTest, - MaintainFramerate_AdaptsDownAfterSwitchingDegradationPreference) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, + MaintainFramerate_AdaptsDownAfterSwitchingDegradationPreference) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Adapt down once, should change FPS. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); // Adaptation down should apply after the degradation prefs change. - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); } -TEST(VideoStreamAdapterTest, - MaintainResolution_AdaptsDownAfterSwitchingDegradationPreference) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F(VideoStreamAdapterTest, + MaintainResolution_AdaptsDownAfterSwitchingDegradationPreference) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Adapt down once, should change FPS. - fake_stream.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + fake_stream.ApplyAdaptation(adapter_.GetAdaptationDown()); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - Adaptation adaptation = adapter.GetAdaptationDown(); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); fake_stream.ApplyAdaptation(adaptation); - EXPECT_EQ(1, adapter.adaptation_counters().fps_adaptations); - EXPECT_EQ(1, adapter.adaptation_counters().resolution_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().fps_adaptations); + EXPECT_EQ(1, adapter_.adaptation_counters().resolution_adaptations); } -TEST(VideoStreamAdapterTest, - PendingResolutionDecreaseAllowsAdaptDownAfterSwitchToMaintainResolution) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, +TEST_F( + VideoStreamAdapterTest, + PendingResolutionDecreaseAllowsAdaptDownAfterSwitchToMaintainResolution) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // Apply adaptation but don't update the input. - adapter.ApplyAdaptation(adapter.GetAdaptationDown()); + adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr); EXPECT_EQ(Adaptation::Status::kAwaitingPreviousAdaptation, - adapter.GetAdaptationDown().status()); - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - Adaptation adaptation = adapter.GetAdaptationDown(); + adapter_.GetAdaptationDown().status()); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); } -TEST(VideoStreamAdapterTest, PeekNextRestrictions) { - VideoStreamAdapter adapter; +TEST_F(VideoStreamAdapterTest, RestrictionBroadcasted) { + FakeVideoStreamAdapterListner listener; + adapter_.AddRestrictionsListener(&listener); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, + kDefaultMinPixelsPerFrame); + // Not broadcast on invalid ApplyAdaptation. + { + Adaptation adaptation = adapter_.GetAdaptationUp(); + adapter_.ApplyAdaptation(adaptation, nullptr); + EXPECT_EQ(0, listener.calls()); + } + + // Broadcast on ApplyAdaptation. + { + Adaptation adaptation = adapter_.GetAdaptationDown(); + fake_stream.ApplyAdaptation(adaptation); + EXPECT_EQ(1, listener.calls()); + EXPECT_EQ(adaptation.restrictions(), listener.last_restrictions()); + } + + // Broadcast on ClearRestrictions(). + adapter_.ClearRestrictions(); + EXPECT_EQ(2, listener.calls()); + EXPECT_EQ(VideoSourceRestrictions(), listener.last_restrictions()); +} + +TEST_F(VideoStreamAdapterTest, AdaptationHasNextRestrcitions) { // Any non-disabled DegradationPreference will do. - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - FakeVideoStream fake_stream(&adapter, 1280 * 720, 30, + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, kDefaultMinPixelsPerFrame); // When adaptation is not possible. { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kLimitReached, adaptation.status()); - EXPECT_EQ(adapter.PeekNextRestrictions(adaptation), - adapter.source_restrictions()); + EXPECT_EQ(adaptation.restrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adaptation.counters().Total()); } // When we adapt down. { - Adaptation adaptation = adapter.GetAdaptationDown(); + Adaptation adaptation = adapter_.GetAdaptationDown(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - VideoSourceRestrictions next_restrictions = - adapter.PeekNextRestrictions(adaptation); fake_stream.ApplyAdaptation(adaptation); - EXPECT_EQ(next_restrictions, adapter.source_restrictions()); + EXPECT_EQ(adaptation.restrictions(), adapter_.source_restrictions()); + EXPECT_EQ(adaptation.counters(), adapter_.adaptation_counters()); } // When we adapt up. { - Adaptation adaptation = adapter.GetAdaptationUp(); + Adaptation adaptation = adapter_.GetAdaptationUp(); EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); - VideoSourceRestrictions next_restrictions = - adapter.PeekNextRestrictions(adaptation); fake_stream.ApplyAdaptation(adaptation); - EXPECT_EQ(next_restrictions, adapter.source_restrictions()); + EXPECT_EQ(adaptation.restrictions(), adapter_.source_restrictions()); + EXPECT_EQ(adaptation.counters(), adapter_.adaptation_counters()); } } -TEST(VideoStreamAdapterTest, - SetDegradationPreferenceToOrFromBalancedClearsRestrictions) { - VideoStreamAdapter adapter; - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - adapter.SetInput(InputState(1280 * 720, 30, kDefaultMinPixelsPerFrame)); - adapter.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_NE(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_NE(0, adapter.adaptation_counters().Total()); +TEST_F(VideoStreamAdapterTest, + SetDegradationPreferenceToOrFromBalancedClearsRestrictions) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr); + EXPECT_NE(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_NE(0, adapter_.adaptation_counters().Total()); // Changing from non-balanced to balanced clears the restrictions. - adapter.SetDegradationPreference(DegradationPreference::BALANCED); - EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_EQ(0, adapter.adaptation_counters().Total()); + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adapter_.adaptation_counters().Total()); // Apply adaptation again. - adapter.ApplyAdaptation(adapter.GetAdaptationDown()); - EXPECT_NE(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_NE(0, adapter.adaptation_counters().Total()); + adapter_.ApplyAdaptation(adapter_.GetAdaptationDown(), nullptr); + EXPECT_NE(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_NE(0, adapter_.adaptation_counters().Total()); // Changing from balanced to non-balanced clears the restrictions. - adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - EXPECT_EQ(VideoSourceRestrictions(), adapter.source_restrictions()); - EXPECT_EQ(0, adapter.adaptation_counters().Total()); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + EXPECT_EQ(VideoSourceRestrictions(), adapter_.source_restrictions()); + EXPECT_EQ(0, adapter_.adaptation_counters().Total()); +} + +TEST_F(VideoStreamAdapterTest, + GetAdaptDownResolutionAdaptsResolutionInMaintainFramerate) { + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + + auto adaptation = adapter_.GetAdaptDownResolution(); + EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); + EXPECT_EQ(1, adaptation.counters().resolution_adaptations); + EXPECT_EQ(0, adaptation.counters().fps_adaptations); +} + +TEST_F(VideoStreamAdapterTest, + GetAdaptDownResolutionReturnsWithStatusInDisabledAndMaintainResolution) { + adapter_.SetDegradationPreference(DegradationPreference::DISABLED); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + EXPECT_EQ(Adaptation::Status::kAdaptationDisabled, + adapter_.GetAdaptDownResolution().status()); + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); + EXPECT_EQ(Adaptation::Status::kLimitReached, + adapter_.GetAdaptDownResolution().status()); +} + +TEST_F(VideoStreamAdapterTest, + GetAdaptDownResolutionAdaptsFpsAndResolutionInBalanced) { + // Note: This test depends on BALANCED implementation, but with current + // implementation and input state settings, BALANCED will adapt resolution and + // frame rate once. + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + + auto adaptation = adapter_.GetAdaptDownResolution(); + EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); + EXPECT_EQ(1, adaptation.counters().resolution_adaptations); + EXPECT_EQ(1, adaptation.counters().fps_adaptations); +} + +TEST_F( + VideoStreamAdapterTest, + GetAdaptDownResolutionAdaptsOnlyResolutionIfFpsAlreadyAdapterInBalanced) { + // Note: This test depends on BALANCED implementation, but with current + // implementation and input state settings, BALANCED will adapt resolution + // only. + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + input_state_provider_.SetInputState(1280 * 720, 5, kDefaultMinPixelsPerFrame); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, + kDefaultMinPixelsPerFrame); + + auto first_adaptation = adapter_.GetAdaptationDown(); + fake_stream.ApplyAdaptation(first_adaptation); + + auto adaptation = adapter_.GetAdaptDownResolution(); + EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); + EXPECT_EQ(1, adaptation.counters().resolution_adaptations); + EXPECT_EQ(first_adaptation.counters().fps_adaptations, + adaptation.counters().fps_adaptations); +} + +TEST_F(VideoStreamAdapterTest, + GetAdaptDownResolutionAdaptsOnlyFpsIfResolutionLowInBalanced) { + // Note: This test depends on BALANCED implementation, but with current + // implementation and input state settings, BALANCED will adapt resolution + // only. + adapter_.SetDegradationPreference(DegradationPreference::BALANCED); + input_state_provider_.SetInputState(kDefaultMinPixelsPerFrame, 30, + kDefaultMinPixelsPerFrame); + + auto adaptation = adapter_.GetAdaptDownResolution(); + EXPECT_EQ(Adaptation::Status::kValid, adaptation.status()); + EXPECT_EQ(0, adaptation.counters().resolution_adaptations); + EXPECT_EQ(1, adaptation.counters().fps_adaptations); +} + +TEST_F(VideoStreamAdapterTest, + AdaptationDisabledStatusAlwaysWhenDegradationPreferenceDisabled) { + adapter_.SetDegradationPreference(DegradationPreference::DISABLED); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + EXPECT_EQ(Adaptation::Status::kAdaptationDisabled, + adapter_.GetAdaptationDown().status()); + EXPECT_EQ(Adaptation::Status::kAdaptationDisabled, + adapter_.GetAdaptationUp().status()); + EXPECT_EQ(Adaptation::Status::kAdaptationDisabled, + adapter_.GetAdaptDownResolution().status()); +} + +TEST_F(VideoStreamAdapterTest, AdaptationConstraintAllowsAdaptationsUp) { + testing::StrictMock adaptation_constraint; + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + adapter_.AddAdaptationConstraint(&adaptation_constraint); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, + kDefaultMinPixelsPerFrame); + // Adapt down once so we can adapt up later. + auto first_adaptation = adapter_.GetAdaptationDown(); + fake_stream.ApplyAdaptation(first_adaptation); + + EXPECT_CALL(adaptation_constraint, + IsAdaptationUpAllowed(_, first_adaptation.restrictions(), _)) + .WillOnce(Return(true)); + EXPECT_EQ(Adaptation::Status::kValid, adapter_.GetAdaptationUp().status()); + adapter_.RemoveAdaptationConstraint(&adaptation_constraint); +} + +TEST_F(VideoStreamAdapterTest, AdaptationConstraintDisallowsAdaptationsUp) { + testing::StrictMock adaptation_constraint; + adapter_.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); + adapter_.AddAdaptationConstraint(&adaptation_constraint); + input_state_provider_.SetInputState(1280 * 720, 30, + kDefaultMinPixelsPerFrame); + FakeVideoStream fake_stream(&adapter_, &input_state_provider_, 1280 * 720, 30, + kDefaultMinPixelsPerFrame); + // Adapt down once so we can adapt up later. + auto first_adaptation = adapter_.GetAdaptationDown(); + fake_stream.ApplyAdaptation(first_adaptation); + + EXPECT_CALL(adaptation_constraint, + IsAdaptationUpAllowed(_, first_adaptation.restrictions(), _)) + .WillOnce(Return(false)); + EXPECT_EQ(Adaptation::Status::kRejectedByConstraint, + adapter_.GetAdaptationUp().status()); + adapter_.RemoveAdaptationConstraint(&adaptation_constraint); } // Death tests. @@ -738,21 +919,25 @@ TEST(VideoStreamAdapterTest, TEST(VideoStreamAdapterDeathTest, SetDegradationPreferenceInvalidatesAdaptations) { - VideoStreamAdapter adapter; + FakeVideoStreamInputStateProvider input_state_provider; + testing::StrictMock encoder_stats_observer_; + VideoStreamAdapter adapter(&input_state_provider, &encoder_stats_observer_); adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_FRAMERATE); - adapter.SetInput(InputState(1280 * 720, 30, kDefaultMinPixelsPerFrame)); + input_state_provider.SetInputState(1280 * 720, 30, kDefaultMinPixelsPerFrame); Adaptation adaptation = adapter.GetAdaptationDown(); adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - EXPECT_DEATH(adapter.ApplyAdaptation(adaptation), ""); + EXPECT_DEATH(adapter.ApplyAdaptation(adaptation, nullptr), ""); } -TEST(VideoStreamAdapterDeathTest, SetInputInvalidatesAdaptations) { - VideoStreamAdapter adapter; +TEST(VideoStreamAdapterDeathTest, AdaptDownInvalidatesAdaptations) { + FakeVideoStreamInputStateProvider input_state_provider; + testing::StrictMock encoder_stats_observer_; + VideoStreamAdapter adapter(&input_state_provider, &encoder_stats_observer_); adapter.SetDegradationPreference(DegradationPreference::MAINTAIN_RESOLUTION); - adapter.SetInput(InputState(1280 * 720, 30, kDefaultMinPixelsPerFrame)); + input_state_provider.SetInputState(1280 * 720, 30, kDefaultMinPixelsPerFrame); Adaptation adaptation = adapter.GetAdaptationDown(); - adapter.SetInput(InputState(1280 * 720, 31, kDefaultMinPixelsPerFrame)); - EXPECT_DEATH(adapter.PeekNextRestrictions(adaptation), ""); + adapter.GetAdaptationDown(); + EXPECT_DEATH(adapter.ApplyAdaptation(adaptation, nullptr), ""); } #endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) diff --git a/call/adaptation/video_stream_input_state.cc b/call/adaptation/video_stream_input_state.cc index dc3315e6d0..9c0d475902 100644 --- a/call/adaptation/video_stream_input_state.cc +++ b/call/adaptation/video_stream_input_state.cc @@ -19,7 +19,8 @@ VideoStreamInputState::VideoStreamInputState() frame_size_pixels_(absl::nullopt), frames_per_second_(0), video_codec_type_(VideoCodecType::kVideoCodecGeneric), - min_pixels_per_frame_(kDefaultMinPixelsPerFrame) {} + min_pixels_per_frame_(kDefaultMinPixelsPerFrame), + single_active_stream_pixels_(absl::nullopt) {} void VideoStreamInputState::set_has_input(bool has_input) { has_input_ = has_input; @@ -43,6 +44,11 @@ void VideoStreamInputState::set_min_pixels_per_frame(int min_pixels_per_frame) { min_pixels_per_frame_ = min_pixels_per_frame; } +void VideoStreamInputState::set_single_active_stream_pixels( + absl::optional single_active_stream_pixels) { + single_active_stream_pixels_ = single_active_stream_pixels; +} + bool VideoStreamInputState::has_input() const { return has_input_; } @@ -63,6 +69,10 @@ int VideoStreamInputState::min_pixels_per_frame() const { return min_pixels_per_frame_; } +absl::optional VideoStreamInputState::single_active_stream_pixels() const { + return single_active_stream_pixels_; +} + bool VideoStreamInputState::HasInputFrameSizeAndFramesPerSecond() const { return has_input_ && frame_size_pixels_.has_value(); } diff --git a/call/adaptation/video_stream_input_state.h b/call/adaptation/video_stream_input_state.h index af0d7c78e9..191e22386a 100644 --- a/call/adaptation/video_stream_input_state.h +++ b/call/adaptation/video_stream_input_state.h @@ -27,12 +27,15 @@ class VideoStreamInputState { void set_frames_per_second(int frames_per_second); void set_video_codec_type(VideoCodecType video_codec_type); void set_min_pixels_per_frame(int min_pixels_per_frame); + void set_single_active_stream_pixels( + absl::optional single_active_stream_pixels); bool has_input() const; absl::optional frame_size_pixels() const; int frames_per_second() const; VideoCodecType video_codec_type() const; int min_pixels_per_frame() const; + absl::optional single_active_stream_pixels() const; bool HasInputFrameSizeAndFramesPerSecond() const; @@ -42,6 +45,7 @@ class VideoStreamInputState { int frames_per_second_; VideoCodecType video_codec_type_; int min_pixels_per_frame_; + absl::optional single_active_stream_pixels_; }; } // namespace webrtc diff --git a/call/adaptation/video_stream_input_state_provider.cc b/call/adaptation/video_stream_input_state_provider.cc index eac30bbfac..3261af39ea 100644 --- a/call/adaptation/video_stream_input_state_provider.cc +++ b/call/adaptation/video_stream_input_state_provider.cc @@ -10,36 +10,43 @@ #include "call/adaptation/video_stream_input_state_provider.h" +#include "call/adaptation/video_stream_adapter.h" + namespace webrtc { VideoStreamInputStateProvider::VideoStreamInputStateProvider( VideoStreamEncoderObserver* frame_rate_provider) : frame_rate_provider_(frame_rate_provider) {} +VideoStreamInputStateProvider::~VideoStreamInputStateProvider() {} + void VideoStreamInputStateProvider::OnHasInputChanged(bool has_input) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); input_state_.set_has_input(has_input); } void VideoStreamInputStateProvider::OnFrameSizeObserved(int frame_size_pixels) { RTC_DCHECK_GT(frame_size_pixels, 0); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); input_state_.set_frame_size_pixels(frame_size_pixels); } void VideoStreamInputStateProvider::OnEncoderSettingsChanged( EncoderSettings encoder_settings) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); input_state_.set_video_codec_type( encoder_settings.encoder_config().codec_type); input_state_.set_min_pixels_per_frame( encoder_settings.encoder_info().scaling_settings.min_pixels_per_frame); + input_state_.set_single_active_stream_pixels( + VideoStreamAdapter::GetSingleActiveLayerPixels( + encoder_settings.video_codec())); } VideoStreamInputState VideoStreamInputStateProvider::InputState() { // GetInputFrameRate() is thread-safe. int input_fps = frame_rate_provider_->GetInputFrameRate(); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); input_state_.set_frames_per_second(input_fps); return input_state_; } diff --git a/call/adaptation/video_stream_input_state_provider.h b/call/adaptation/video_stream_input_state_provider.h index 7093e97fdd..f4a3e0bfa0 100644 --- a/call/adaptation/video_stream_input_state_provider.h +++ b/call/adaptation/video_stream_input_state_provider.h @@ -14,7 +14,7 @@ #include "api/video/video_stream_encoder_observer.h" #include "call/adaptation/encoder_settings.h" #include "call/adaptation/video_stream_input_state.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -22,17 +22,18 @@ class VideoStreamInputStateProvider { public: VideoStreamInputStateProvider( VideoStreamEncoderObserver* frame_rate_provider); + virtual ~VideoStreamInputStateProvider(); void OnHasInputChanged(bool has_input); void OnFrameSizeObserved(int frame_size_pixels); void OnEncoderSettingsChanged(EncoderSettings encoder_settings); - VideoStreamInputState InputState(); + virtual VideoStreamInputState InputState(); private: - mutable rtc::CriticalSection crit_; + Mutex mutex_; VideoStreamEncoderObserver* const frame_rate_provider_; - VideoStreamInputState input_state_ RTC_GUARDED_BY(crit_); + VideoStreamInputState input_state_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/call/adaptation/video_stream_input_state_provider_unittest.cc b/call/adaptation/video_stream_input_state_provider_unittest.cc index 49c662c581..5da2ef21cd 100644 --- a/call/adaptation/video_stream_input_state_provider_unittest.cc +++ b/call/adaptation/video_stream_input_state_provider_unittest.cc @@ -28,6 +28,7 @@ TEST(VideoStreamInputStateProviderTest, DefaultValues) { EXPECT_EQ(0, input_state.frames_per_second()); EXPECT_EQ(VideoCodecType::kVideoCodecGeneric, input_state.video_codec_type()); EXPECT_EQ(kDefaultMinPixelsPerFrame, input_state.min_pixels_per_frame()); + EXPECT_EQ(absl::nullopt, input_state.single_active_stream_pixels()); } TEST(VideoStreamInputStateProviderTest, ValuesSet) { @@ -40,14 +41,22 @@ TEST(VideoStreamInputStateProviderTest, ValuesSet) { encoder_info.scaling_settings.min_pixels_per_frame = 1337; VideoEncoderConfig encoder_config; encoder_config.codec_type = VideoCodecType::kVideoCodecVP9; + VideoCodec video_codec; + video_codec.codecType = VideoCodecType::kVideoCodecVP8; + video_codec.numberOfSimulcastStreams = 2; + video_codec.simulcastStream[0].active = false; + video_codec.simulcastStream[1].active = true; + video_codec.simulcastStream[1].width = 111; + video_codec.simulcastStream[1].height = 222; input_state_provider.OnEncoderSettingsChanged(EncoderSettings( - std::move(encoder_info), std::move(encoder_config), VideoCodec())); + std::move(encoder_info), std::move(encoder_config), video_codec)); VideoStreamInputState input_state = input_state_provider.InputState(); EXPECT_EQ(true, input_state.has_input()); EXPECT_EQ(42, input_state.frame_size_pixels()); EXPECT_EQ(123, input_state.frames_per_second()); EXPECT_EQ(VideoCodecType::kVideoCodecVP9, input_state.video_codec_type()); EXPECT_EQ(1337, input_state.min_pixels_per_frame()); + EXPECT_EQ(111 * 222, input_state.single_active_stream_pixels()); } } // namespace webrtc diff --git a/call/audio_receive_stream.h b/call/audio_receive_stream.h index d4012bf7e3..8403e6bea0 100644 --- a/call/audio_receive_stream.h +++ b/call/audio_receive_stream.h @@ -20,17 +20,14 @@ #include "api/audio_codecs/audio_decoder_factory.h" #include "api/call/transport.h" #include "api/crypto/crypto_options.h" -#include "api/crypto/frame_decryptor_interface.h" -#include "api/frame_transformer_interface.h" #include "api/rtp_parameters.h" -#include "api/scoped_refptr.h" -#include "api/transport/rtp/rtp_source.h" +#include "call/receive_stream.h" #include "call/rtp_config.h" namespace webrtc { class AudioSinkInterface; -class AudioReceiveStream { +class AudioReceiveStream : public MediaReceiveStream { public: struct Stats { Stats(); @@ -42,6 +39,7 @@ class AudioReceiveStream { uint64_t fec_packets_received = 0; uint64_t fec_packets_discarded = 0; uint32_t packets_lost = 0; + uint32_t nacks_sent = 0; std::string codec_name; absl::optional codec_payload_type; uint32_t jitter_ms = 0; @@ -90,6 +88,13 @@ class AudioReceiveStream { int32_t total_interruption_duration_ms = 0; // https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp absl::optional estimated_playout_ntp_timestamp_ms; + // Remote outbound stats derived by the received RTCP sender reports. + // https://w3c.github.io/webrtc-stats/#remoteoutboundrtpstats-dict* + absl::optional last_sender_report_timestamp_ms; + absl::optional last_sender_report_remote_timestamp_ms; + uint32_t sender_reports_packets_sent = 0; + uint64_t sender_reports_bytes_sent = 0; + uint64_t sender_reports_reports_count = 0; }; struct Config { @@ -99,29 +104,14 @@ class AudioReceiveStream { std::string ToString() const; // Receive-stream specific RTP settings. - struct Rtp { + struct Rtp : public RtpConfig { Rtp(); ~Rtp(); std::string ToString() const; - // Synchronization source (stream identifier) to be received. - uint32_t remote_ssrc = 0; - - // Sender SSRC used for sending RTCP (such as receiver reports). - uint32_t local_ssrc = 0; - - // Enable feedback for send side bandwidth estimation. - // See - // https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions - // for details. - bool transport_cc = false; - // See NackConfig for description. NackConfig nack; - - // RTP header extensions used for the received stream. - std::vector extensions; } rtp; Transport* rtcp_send_transport = nullptr; @@ -150,24 +140,32 @@ class AudioReceiveStream { // An optional custom frame decryptor that allows the entire frame to be // decrypted in whatever way the caller choses. This is not required by // default. + // TODO(tommi): Remove this member variable from the struct. It's not + // a part of the AudioReceiveStream state but rather a pass through + // variable. rtc::scoped_refptr frame_decryptor; // An optional frame transformer used by insertable streams to transform // encoded frames. + // TODO(tommi): Remove this member variable from the struct. It's not + // a part of the AudioReceiveStream state but rather a pass through + // variable. rtc::scoped_refptr frame_transformer; }; - // Reconfigure the stream according to the Configuration. - virtual void Reconfigure(const Config& config) = 0; + // Methods that support reconfiguring the stream post initialization. + virtual void SetDecoderMap(std::map decoder_map) = 0; + virtual void SetUseTransportCcAndNackHistory(bool use_transport_cc, + int history_ms) = 0; + // Set/change the rtp header extensions. Must be called on the packet + // delivery thread. + virtual void SetRtpExtensions(std::vector extensions) = 0; - // Starts stream activity. - // When a stream is active, it can receive, process and deliver packets. - virtual void Start() = 0; - // Stops stream activity. - // When a stream is stopped, it can't receive, process or deliver packets. - virtual void Stop() = 0; + // Returns true if the stream has been started. + virtual bool IsRunning() const = 0; - virtual Stats GetStats() const = 0; + virtual Stats GetStats(bool get_and_clear_legacy_stats) const = 0; + Stats GetStats() { return GetStats(/*get_and_clear_legacy_stats=*/true); } // Sets an audio sink that receives unmixed audio from the receive stream. // Ownership of the sink is managed by the caller. @@ -191,8 +189,6 @@ class AudioReceiveStream { // Returns current value of base minimum delay in milliseconds. virtual int GetBaseMinimumPlayoutDelayMs() const = 0; - virtual std::vector GetSources() const = 0; - protected: virtual ~AudioReceiveStream() {} }; diff --git a/call/audio_send_stream.cc b/call/audio_send_stream.cc index ddcba031a7..916336b929 100644 --- a/call/audio_send_stream.cc +++ b/call/audio_send_stream.cc @@ -12,7 +12,6 @@ #include -#include "rtc_base/string_encode.h" #include "rtc_base/strings/audio_format_to_string.h" #include "rtc_base/strings/string_builder.h" @@ -27,17 +26,19 @@ AudioSendStream::Config::Config(Transport* send_transport) AudioSendStream::Config::~Config() = default; std::string AudioSendStream::Config::ToString() const { - char buf[1024]; - rtc::SimpleStringBuilder ss(buf); + rtc::StringBuilder ss; ss << "{rtp: " << rtp.ToString(); ss << ", rtcp_report_interval_ms: " << rtcp_report_interval_ms; ss << ", send_transport: " << (send_transport ? "(Transport)" : "null"); ss << ", min_bitrate_bps: " << min_bitrate_bps; ss << ", max_bitrate_bps: " << max_bitrate_bps; + ss << ", has audio_network_adaptor_config: " + << (audio_network_adaptor_config ? "true" : "false"); + ss << ", has_dscp: " << (has_dscp ? "true" : "false"); ss << ", send_codec_spec: " << (send_codec_spec ? send_codec_spec->ToString() : ""); - ss << '}'; - return ss.str(); + ss << "}"; + return ss.Release(); } AudioSendStream::Config::Rtp::Rtp() = default; @@ -48,6 +49,12 @@ std::string AudioSendStream::Config::Rtp::ToString() const { char buf[1024]; rtc::SimpleStringBuilder ss(buf); ss << "{ssrc: " << ssrc; + if (!rid.empty()) { + ss << ", rid: " << rid; + } + if (!mid.empty()) { + ss << ", mid: " << mid; + } ss << ", extmap-allow-mixed: " << (extmap_allow_mixed ? "true" : "false"); ss << ", extensions: ["; for (size_t i = 0; i < extensions.size(); ++i) { @@ -75,6 +82,8 @@ std::string AudioSendStream::Config::SendCodecSpec::ToString() const { ss << ", transport_cc_enabled: " << (transport_cc_enabled ? "true" : "false"); ss << ", cng_payload_type: " << (cng_payload_type ? rtc::ToString(*cng_payload_type) : ""); + ss << ", red_payload_type: " + << (red_payload_type ? rtc::ToString(*red_payload_type) : ""); ss << ", payload_type: " << payload_type; ss << ", format: " << rtc::ToString(format); ss << '}'; @@ -86,6 +95,7 @@ bool AudioSendStream::Config::SendCodecSpec::operator==( if (nack_enabled == rhs.nack_enabled && transport_cc_enabled == rhs.transport_cc_enabled && cng_payload_type == rhs.cng_payload_type && + red_payload_type == rhs.red_payload_type && payload_type == rhs.payload_type && format == rhs.format && target_bitrate_bps == rhs.target_bitrate_bps) { return true; diff --git a/call/audio_send_stream.h b/call/audio_send_stream.h index 86cea38938..e084d4219d 100644 --- a/call/audio_send_stream.h +++ b/call/audio_send_stream.h @@ -70,6 +70,7 @@ class AudioSendStream : public AudioSender { // per-pair the ReportBlockData represents the latest Report Block that was // received for that pair. std::vector report_block_datas; + uint32_t nacks_rcvd = 0; }; struct Config { @@ -140,6 +141,7 @@ class AudioSendStream : public AudioSender { bool nack_enabled = false; bool transport_cc_enabled = false; absl::optional cng_payload_type; + absl::optional red_payload_type; // If unset, use the encoder's default target bitrate. absl::optional target_bitrate_bps; }; diff --git a/call/audio_state.h b/call/audio_state.h index 89267c5ab3..79fb5cf981 100644 --- a/call/audio_state.h +++ b/call/audio_state.h @@ -12,6 +12,7 @@ #include "api/audio/audio_mixer.h" #include "api/scoped_refptr.h" +#include "modules/async_audio_processing/async_audio_processing.h" #include "modules/audio_device/include/audio_device.h" #include "modules/audio_processing/include/audio_processing.h" #include "rtc_base/ref_count.h" @@ -37,6 +38,9 @@ class AudioState : public rtc::RefCountInterface { // TODO(solenberg): Temporary: audio device module. rtc::scoped_refptr audio_device_module; + + rtc::scoped_refptr + async_audio_processing_factory; }; virtual AudioProcessing* audio_processing() = 0; diff --git a/call/bitrate_allocator.h b/call/bitrate_allocator.h index 8d9a1adb0e..c0d664b6f0 100644 --- a/call/bitrate_allocator.h +++ b/call/bitrate_allocator.h @@ -20,8 +20,9 @@ #include #include "api/call/bitrate_allocation.h" +#include "api/sequence_checker.h" #include "api/transport/network_types.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { @@ -148,7 +149,7 @@ class BitrateAllocator : public BitrateAllocatorInterface { // video send stream. static uint8_t GetTransmissionMaxBitrateMultiplier(); - SequenceChecker sequenced_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequenced_checker_; LimitObserver* const limit_observer_ RTC_GUARDED_BY(&sequenced_checker_); // Stored in a list to keep track of the insertion order. std::vector allocatable_tracks_ diff --git a/call/bitrate_estimator_tests.cc b/call/bitrate_estimator_tests.cc index 50da12bbdf..4634f6e147 100644 --- a/call/bitrate_estimator_tests.cc +++ b/call/bitrate_estimator_tests.cc @@ -19,6 +19,7 @@ #include "rtc_base/checks.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/thread_annotations.h" #include "test/call_test.h" @@ -49,7 +50,7 @@ class LogObserver { class Callback : public rtc::LogSink { public: void OnLogMessage(const std::string& message) override { - rtc::CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); // Ignore log lines that are due to missing AST extensions, these are // logged when we switch back from AST to TOF until the wrapping bitrate // estimator gives up on using AST. @@ -78,15 +79,15 @@ class LogObserver { bool Wait() { return done_.Wait(test::CallTest::kDefaultTimeoutMs); } void PushExpectedLogLine(const std::string& expected_log_line) { - rtc::CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); expected_log_lines_.push_back(expected_log_line); } private: typedef std::list Strings; - rtc::CriticalSection crit_sect_; - Strings received_log_lines_ RTC_GUARDED_BY(crit_sect_); - Strings expected_log_lines_ RTC_GUARDED_BY(crit_sect_); + Mutex mutex_; + Strings received_log_lines_ RTC_GUARDED_BY(mutex_); + Strings expected_log_lines_ RTC_GUARDED_BY(mutex_); rtc::Event done_; }; @@ -190,7 +191,7 @@ class BitrateEstimatorTest : public test::CallTest { send_stream_->Start(); VideoReceiveStream::Decoder decoder; - decoder.decoder_factory = &decoder_factory_; + test_->receive_config_.decoder_factory = &decoder_factory_; decoder.payload_type = test_->GetVideoSendConfig()->rtp.payload_type; decoder.video_format = SdpVideoFormat(test_->GetVideoSendConfig()->rtp.payload_name); diff --git a/call/call.cc b/call/call.cc index 210f72d40c..fb1d7cd3bc 100644 --- a/call/call.cc +++ b/call/call.cc @@ -13,23 +13,29 @@ #include #include +#include #include #include #include #include #include +#include "absl/functional/bind_front.h" #include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/sequence_checker.h" #include "api/transport/network_control.h" #include "audio/audio_receive_stream.h" #include "audio/audio_send_stream.h" #include "audio/audio_state.h" +#include "call/adaptation/broadcast_resource_listener.h" #include "call/bitrate_allocator.h" #include "call/flexfec_receive_stream_impl.h" #include "call/receive_time_calculator.h" #include "call/rtp_stream_receiver_controller.h" #include "call/rtp_transport_controller_send.h" +#include "call/rtp_transport_controller_send_factory.h" +#include "call/version.h" #include "logging/rtc_event_log/events/rtc_event_audio_receive_stream_config.h" #include "logging/rtc_event_log/events/rtc_event_rtcp_packet_incoming.h" #include "logging/rtc_event_log/events/rtc_event_rtp_packet_incoming.h" @@ -41,7 +47,7 @@ #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/fec_controller_default.h" #include "rtc_base/checks.h" @@ -49,8 +55,8 @@ #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" -#include "rtc_base/synchronization/rw_lock_wrapper.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread_annotations.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" @@ -75,12 +81,10 @@ bool SendPeriodicFeedback(const std::vector& extensions) { return true; } -// TODO(nisse): This really begs for a shared context struct. -bool UseSendSideBwe(const std::vector& extensions, - bool transport_cc) { - if (!transport_cc) +bool UseSendSideBwe(const ReceiveStream::RtpConfig& rtp) { + if (!rtp.transport_cc) return false; - for (const auto& extension : extensions) { + for (const auto& extension : rtp.extensions) { if (extension.uri == RtpExtension::kTransportSequenceNumberUri || extension.uri == RtpExtension::kTransportSequenceNumberV2Uri) return true; @@ -88,18 +92,6 @@ bool UseSendSideBwe(const std::vector& extensions, return false; } -bool UseSendSideBwe(const VideoReceiveStream::Config& config) { - return UseSendSideBwe(config.rtp.extensions, config.rtp.transport_cc); -} - -bool UseSendSideBwe(const AudioReceiveStream::Config& config) { - return UseSendSideBwe(config.rtp.extensions, config.rtp.transport_cc); -} - -bool UseSendSideBwe(const FlexfecReceiveStream::Config& config) { - return UseSendSideBwe(config.rtp_header_extensions, config.transport_cc); -} - const int* FindKeyByValue(const std::map& m, int v) { for (const auto& kv : m) { if (kv.second == v) @@ -152,11 +144,6 @@ std::unique_ptr CreateRtcLogStreamConfig( return rtclog_config; } -bool IsRtcp(const uint8_t* packet, size_t length) { - RtpUtility::RtpHeaderParser rtp_parser(packet, length); - return rtp_parser.RTCP(); -} - TaskQueueBase* GetCurrentTaskQueueOrThread() { TaskQueueBase* current = TaskQueueBase::Current(); if (!current) @@ -168,6 +155,47 @@ TaskQueueBase* GetCurrentTaskQueueOrThread() { namespace internal { +// Wraps an injected resource in a BroadcastResourceListener and handles adding +// and removing adapter resources to individual VideoSendStreams. +class ResourceVideoSendStreamForwarder { + public: + ResourceVideoSendStreamForwarder( + rtc::scoped_refptr resource) + : broadcast_resource_listener_(resource) { + broadcast_resource_listener_.StartListening(); + } + ~ResourceVideoSendStreamForwarder() { + RTC_DCHECK(adapter_resources_.empty()); + broadcast_resource_listener_.StopListening(); + } + + rtc::scoped_refptr Resource() const { + return broadcast_resource_listener_.SourceResource(); + } + + void OnCreateVideoSendStream(VideoSendStream* video_send_stream) { + RTC_DCHECK(adapter_resources_.find(video_send_stream) == + adapter_resources_.end()); + auto adapter_resource = + broadcast_resource_listener_.CreateAdapterResource(); + video_send_stream->AddAdaptationResource(adapter_resource); + adapter_resources_.insert( + std::make_pair(video_send_stream, adapter_resource)); + } + + void OnDestroyVideoSendStream(VideoSendStream* video_send_stream) { + auto it = adapter_resources_.find(video_send_stream); + RTC_DCHECK(it != adapter_resources_.end()); + broadcast_resource_listener_.RemoveAdapterResource(it->second); + adapter_resources_.erase(it); + } + + private: + BroadcastResourceListener broadcast_resource_listener_; + std::map> + adapter_resources_; +}; + class Call final : public webrtc::Call, public PacketReceiver, public RecoveredPacketReceiver, @@ -177,7 +205,7 @@ class Call final : public webrtc::Call, Call(Clock* clock, const Call::Config& config, std::unique_ptr transport_send, - std::unique_ptr module_process_thread, + rtc::scoped_refptr module_process_thread, TaskQueueFactory* task_queue_factory); ~Call() override; @@ -212,10 +240,17 @@ class Call final : public webrtc::Call, void DestroyFlexfecReceiveStream( FlexfecReceiveStream* receive_stream) override; + void AddAdaptationResource(rtc::scoped_refptr resource) override; + RtpTransportControllerSendInterface* GetTransportControllerSend() override; Stats GetStats() const override; + const WebRtcKeyValueConfig& trials() const override; + + TaskQueueBase* network_thread() const override; + TaskQueueBase* worker_thread() const override; + // Implements PacketReceiver. DeliveryStatus DeliverPacket(MediaType media_type, rtc::CopyOnWriteBuffer packet, @@ -229,6 +264,12 @@ class Call final : public webrtc::Call, void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) override; + void OnLocalSsrcUpdated(webrtc::AudioReceiveStream& stream, + uint32_t local_ssrc) override; + + void OnUpdateSyncGroup(webrtc::AudioReceiveStream& stream, + const std::string& sync_group) override; + void OnSentPacket(const rtc::SentPacket& sent_packet) override; // Implements TargetTransferRateObserver, @@ -241,153 +282,186 @@ class Call final : public webrtc::Call, void SetClientBitratePreferences(const BitrateSettings& preferences) override; private: - DeliveryStatus DeliverRtcp(MediaType media_type, - const uint8_t* packet, - size_t length); + // Thread-compatible class that collects received packet stats and exposes + // them as UMA histograms on destruction. + class ReceiveStats { + public: + explicit ReceiveStats(Clock* clock); + ~ReceiveStats(); + + void AddReceivedRtcpBytes(int bytes); + void AddReceivedAudioBytes(int bytes, webrtc::Timestamp arrival_time); + void AddReceivedVideoBytes(int bytes, webrtc::Timestamp arrival_time); + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + RateCounter received_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + RateCounter received_audio_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + RateCounter received_video_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + RateCounter received_rtcp_bytes_per_second_counter_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional first_received_rtp_audio_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional last_received_rtp_audio_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional first_received_rtp_video_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + absl::optional last_received_rtp_video_timestamp_ + RTC_GUARDED_BY(sequence_checker_); + }; + + // Thread-compatible class that collects sent packet stats and exposes + // them as UMA histograms on destruction, provided SetFirstPacketTime was + // called with a non-empty packet timestamp before the destructor. + class SendStats { + public: + explicit SendStats(Clock* clock); + ~SendStats(); + + void SetFirstPacketTime(absl::optional first_sent_packet_time); + void PauseSendAndPacerBitrateCounters(); + void AddTargetBitrateSample(uint32_t target_bitrate_bps); + void SetMinAllocatableRate(BitrateAllocationLimits limits); + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker destructor_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + Clock* const clock_ RTC_GUARDED_BY(destructor_sequence_checker_); + AvgCounter estimated_send_bitrate_kbps_counter_ + RTC_GUARDED_BY(sequence_checker_); + AvgCounter pacer_bitrate_kbps_counter_ RTC_GUARDED_BY(sequence_checker_); + uint32_t min_allocated_send_bitrate_bps_ RTC_GUARDED_BY(sequence_checker_){ + 0}; + absl::optional first_sent_packet_time_ + RTC_GUARDED_BY(destructor_sequence_checker_); + }; + + void DeliverRtcp(MediaType media_type, rtc::CopyOnWriteBuffer packet) + RTC_RUN_ON(network_thread_); DeliveryStatus DeliverRtp(MediaType media_type, rtc::CopyOnWriteBuffer packet, - int64_t packet_time_us); - void ConfigureSync(const std::string& sync_group) - RTC_EXCLUSIVE_LOCKS_REQUIRED(receive_crit_); + int64_t packet_time_us) RTC_RUN_ON(worker_thread_); + void ConfigureSync(const std::string& sync_group) RTC_RUN_ON(worker_thread_); void NotifyBweOfReceivedPacket(const RtpPacketReceived& packet, MediaType media_type) - RTC_SHARED_LOCKS_REQUIRED(receive_crit_); + RTC_RUN_ON(worker_thread_); - void UpdateSendHistograms(Timestamp first_sent_packet) - RTC_EXCLUSIVE_LOCKS_REQUIRED(&bitrate_crit_); - void UpdateReceiveHistograms(); - void UpdateHistograms(); void UpdateAggregateNetworkState(); - void RegisterRateObserver(); - - rtc::TaskQueue* network_queue() const { - return transport_send_ptr_->GetWorkerQueue(); - } + // Ensure that necessary process threads are started, and any required + // callbacks have been registered. + void EnsureStarted() RTC_RUN_ON(worker_thread_); Clock* const clock_; TaskQueueFactory* const task_queue_factory_; + TaskQueueBase* const worker_thread_; + TaskQueueBase* const network_thread_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker send_transport_sequence_checker_; const int num_cpu_cores_; - const std::unique_ptr module_process_thread_; + const rtc::scoped_refptr module_process_thread_; const std::unique_ptr call_stats_; const std::unique_ptr bitrate_allocator_; - Call::Config config_; - SequenceChecker configuration_sequence_checker_; - SequenceChecker network_sequence_checker_; + const Call::Config config_ RTC_GUARDED_BY(worker_thread_); + // Maps to config_.trials, can be used from any thread via `trials()`. + const WebRtcKeyValueConfig& trials_; - NetworkState audio_network_state_; - NetworkState video_network_state_; - bool aggregate_network_up_ RTC_GUARDED_BY(configuration_sequence_checker_); + NetworkState audio_network_state_ RTC_GUARDED_BY(worker_thread_); + NetworkState video_network_state_ RTC_GUARDED_BY(worker_thread_); + // TODO(bugs.webrtc.org/11993): Move aggregate_network_up_ over to the + // network thread. + bool aggregate_network_up_ RTC_GUARDED_BY(worker_thread_); - std::unique_ptr receive_crit_; // Audio, Video, and FlexFEC receive streams are owned by the client that // creates them. + // TODO(bugs.webrtc.org/11993): Move audio_receive_streams_, + // video_receive_streams_ and sync_stream_mapping_ over to the network thread. std::set audio_receive_streams_ - RTC_GUARDED_BY(receive_crit_); + RTC_GUARDED_BY(worker_thread_); std::set video_receive_streams_ - RTC_GUARDED_BY(receive_crit_); - + RTC_GUARDED_BY(worker_thread_); std::map sync_stream_mapping_ - RTC_GUARDED_BY(receive_crit_); + RTC_GUARDED_BY(worker_thread_); // TODO(nisse): Should eventually be injected at creation, // with a single object in the bundled case. - RtpStreamReceiverController audio_receiver_controller_; - RtpStreamReceiverController video_receiver_controller_; + RtpStreamReceiverController audio_receiver_controller_ + RTC_GUARDED_BY(worker_thread_); + RtpStreamReceiverController video_receiver_controller_ + RTC_GUARDED_BY(worker_thread_); // This extra map is used for receive processing which is // independent of media type. - // TODO(nisse): In the RTP transport refactoring, we should have a - // single mapping from ssrc to a more abstract receive stream, with - // accessor methods for all configuration we need at this level. - struct ReceiveRtpConfig { - explicit ReceiveRtpConfig(const webrtc::AudioReceiveStream::Config& config) - : extensions(config.rtp.extensions), - use_send_side_bwe(UseSendSideBwe(config)) {} - explicit ReceiveRtpConfig(const webrtc::VideoReceiveStream::Config& config) - : extensions(config.rtp.extensions), - use_send_side_bwe(UseSendSideBwe(config)) {} - explicit ReceiveRtpConfig(const FlexfecReceiveStream::Config& config) - : extensions(config.rtp_header_extensions), - use_send_side_bwe(UseSendSideBwe(config)) {} - - // Registered RTP header extensions for each stream. Note that RTP header - // extensions are negotiated per track ("m= line") in the SDP, but we have - // no notion of tracks at the Call level. We therefore store the RTP header - // extensions per SSRC instead, which leads to some storage overhead. - const RtpHeaderExtensionMap extensions; - // Set if both RTP extension the RTCP feedback message needed for - // send side BWE are negotiated. - const bool use_send_side_bwe; - }; - std::map receive_rtp_config_ - RTC_GUARDED_BY(receive_crit_); + // TODO(bugs.webrtc.org/11993): Move receive_rtp_config_ over to the + // network thread. + std::map receive_rtp_config_ + RTC_GUARDED_BY(worker_thread_); - std::unique_ptr send_crit_; // Audio and Video send streams are owned by the client that creates them. std::map audio_send_ssrcs_ - RTC_GUARDED_BY(send_crit_); + RTC_GUARDED_BY(worker_thread_); std::map video_send_ssrcs_ - RTC_GUARDED_BY(send_crit_); - std::set video_send_streams_ RTC_GUARDED_BY(send_crit_); + RTC_GUARDED_BY(worker_thread_); + std::set video_send_streams_ RTC_GUARDED_BY(worker_thread_); + // True if |video_send_streams_| is empty, false if not. The atomic variable + // is used to decide UMA send statistics behavior and enables avoiding a + // PostTask(). + std::atomic video_send_streams_empty_{true}; + + // Each forwarder wraps an adaptation resource that was added to the call. + std::vector> + adaptation_resource_forwarders_ RTC_GUARDED_BY(worker_thread_); using RtpStateMap = std::map; - RtpStateMap suspended_audio_send_ssrcs_ - RTC_GUARDED_BY(configuration_sequence_checker_); - RtpStateMap suspended_video_send_ssrcs_ - RTC_GUARDED_BY(configuration_sequence_checker_); + RtpStateMap suspended_audio_send_ssrcs_ RTC_GUARDED_BY(worker_thread_); + RtpStateMap suspended_video_send_ssrcs_ RTC_GUARDED_BY(worker_thread_); using RtpPayloadStateMap = std::map; RtpPayloadStateMap suspended_video_payload_states_ - RTC_GUARDED_BY(configuration_sequence_checker_); - - webrtc::RtcEventLog* event_log_; - - // The following members are only accessed (exclusively) from one thread and - // from the destructor, and therefore doesn't need any explicit - // synchronization. - RateCounter received_bytes_per_second_counter_; - RateCounter received_audio_bytes_per_second_counter_; - RateCounter received_video_bytes_per_second_counter_; - RateCounter received_rtcp_bytes_per_second_counter_; - absl::optional first_received_rtp_audio_ms_; - absl::optional last_received_rtp_audio_ms_; - absl::optional first_received_rtp_video_ms_; - absl::optional last_received_rtp_video_ms_; - - rtc::CriticalSection last_bandwidth_bps_crit_; - uint32_t last_bandwidth_bps_ RTC_GUARDED_BY(&last_bandwidth_bps_crit_); - // TODO(holmer): Remove this lock once BitrateController no longer calls - // OnNetworkChanged from multiple threads. - rtc::CriticalSection bitrate_crit_; - uint32_t min_allocated_send_bitrate_bps_ - RTC_GUARDED_BY(&network_sequence_checker_); - uint32_t configured_max_padding_bitrate_bps_ RTC_GUARDED_BY(&bitrate_crit_); - AvgCounter estimated_send_bitrate_kbps_counter_ - RTC_GUARDED_BY(&bitrate_crit_); - AvgCounter pacer_bitrate_kbps_counter_ RTC_GUARDED_BY(&bitrate_crit_); + RTC_GUARDED_BY(worker_thread_); + + webrtc::RtcEventLog* const event_log_; + + // TODO(bugs.webrtc.org/11993) ready to move stats access to the network + // thread. + ReceiveStats receive_stats_ RTC_GUARDED_BY(worker_thread_); + SendStats send_stats_ RTC_GUARDED_BY(send_transport_sequence_checker_); + // |last_bandwidth_bps_| and |configured_max_padding_bitrate_bps_| being + // atomic avoids a PostTask. The variables are used for stats gathering. + std::atomic last_bandwidth_bps_{0}; + std::atomic configured_max_padding_bitrate_bps_{0}; ReceiveSideCongestionController receive_side_cc_; const std::unique_ptr receive_time_calculator_; const std::unique_ptr video_send_delay_stats_; - const int64_t start_ms_; + const Timestamp start_of_call_; + + // Note that |task_safety_| needs to be at a greater scope than the task queue + // owned by |transport_send_| since calls might arrive on the network thread + // while Call is being deleted and the task queue is being torn down. + const ScopedTaskSafety task_safety_; // Caches transport_send_.get(), to avoid racing with destructor. // Note that this is declared before transport_send_ to ensure that it is not // invalidated until no more tasks can be running on the transport_send_ task // queue. - RtpTransportControllerSendInterface* const transport_send_ptr_; + // For more details on the background of this member variable, see: + // https://webrtc-review.googlesource.com/c/src/+/63023/9/call/call.cc + // https://bugs.chromium.org/p/chromium/issues/detail?id=992640 + RtpTransportControllerSendInterface* const transport_send_ptr_ + RTC_GUARDED_BY(send_transport_sequence_checker_); // Declared last since it will issue callbacks from a task queue. Declaring it // last ensures that it is destroyed first and any running tasks are finished. - std::unique_ptr transport_send_; + const std::unique_ptr transport_send_; - bool is_target_rate_observer_registered_ - RTC_GUARDED_BY(&configuration_sequence_checker_) = false; + bool is_started_ RTC_GUARDED_BY(worker_thread_) = false; RTC_DISALLOW_COPY_AND_ASSIGN(Call); }; @@ -407,171 +481,196 @@ std::string Call::Stats::ToString(int64_t time_ms) const { } Call* Call::Create(const Call::Config& config) { - return Create(config, Clock::GetRealTimeClock(), - ProcessThread::Create("ModuleProcessThread"), + rtc::scoped_refptr call_thread = + SharedModuleThread::Create(ProcessThread::Create("ModuleProcessThread"), + nullptr); + return Create(config, Clock::GetRealTimeClock(), std::move(call_thread), ProcessThread::Create("PacerThread")); } Call* Call::Create(const Call::Config& config, Clock* clock, - std::unique_ptr call_thread, + rtc::scoped_refptr call_thread, std::unique_ptr pacer_thread) { RTC_DCHECK(config.task_queue_factory); + + RtpTransportControllerSendFactory transport_controller_factory_; + + RtpTransportConfig transportConfig = config.ExtractTransportConfig(); + return new internal::Call( clock, config, - std::make_unique( - clock, config.event_log, config.network_state_predictor_factory, - config.network_controller_factory, config.bitrate_config, - std::move(pacer_thread), config.task_queue_factory, config.trials), + transport_controller_factory_.Create(transportConfig, clock, + std::move(pacer_thread)), std::move(call_thread), config.task_queue_factory); } -// This method here to avoid subclasses has to implement this method. -// Call perf test will use Internal::Call::CreateVideoSendStream() to inject -// FecController. -VideoSendStream* Call::CreateVideoSendStream( - VideoSendStream::Config config, - VideoEncoderConfig encoder_config, - std::unique_ptr fec_controller) { - return nullptr; +Call* Call::Create(const Call::Config& config, + Clock* clock, + rtc::scoped_refptr call_thread, + std::unique_ptr + transportControllerSend) { + RTC_DCHECK(config.task_queue_factory); + return new internal::Call(clock, config, std::move(transportControllerSend), + std::move(call_thread), config.task_queue_factory); } -namespace internal { +class SharedModuleThread::Impl { + public: + Impl(std::unique_ptr process_thread, + std::function on_one_ref_remaining) + : module_thread_(std::move(process_thread)), + on_one_ref_remaining_(std::move(on_one_ref_remaining)) {} + + void EnsureStarted() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (started_) + return; + started_ = true; + module_thread_->Start(); + } -Call::Call(Clock* clock, - const Call::Config& config, - std::unique_ptr transport_send, - std::unique_ptr module_process_thread, - TaskQueueFactory* task_queue_factory) - : clock_(clock), - task_queue_factory_(task_queue_factory), - num_cpu_cores_(CpuInfo::DetectNumberOfCores()), - module_process_thread_(std::move(module_process_thread)), - call_stats_(new CallStats(clock_, GetCurrentTaskQueueOrThread())), - bitrate_allocator_(new BitrateAllocator(this)), - config_(config), - audio_network_state_(kNetworkDown), - video_network_state_(kNetworkDown), - aggregate_network_up_(false), - receive_crit_(RWLockWrapper::CreateRWLock()), - send_crit_(RWLockWrapper::CreateRWLock()), - event_log_(config.event_log), - received_bytes_per_second_counter_(clock_, nullptr, true), - received_audio_bytes_per_second_counter_(clock_, nullptr, true), - received_video_bytes_per_second_counter_(clock_, nullptr, true), - received_rtcp_bytes_per_second_counter_(clock_, nullptr, true), - last_bandwidth_bps_(0), - min_allocated_send_bitrate_bps_(0), - configured_max_padding_bitrate_bps_(0), - estimated_send_bitrate_kbps_counter_(clock_, nullptr, true), - pacer_bitrate_kbps_counter_(clock_, nullptr, true), - receive_side_cc_(clock_, transport_send->packet_router()), - receive_time_calculator_(ReceiveTimeCalculator::CreateFromFieldTrial()), - video_send_delay_stats_(new SendDelayStats(clock_)), - start_ms_(clock_->TimeInMilliseconds()), - transport_send_ptr_(transport_send.get()), - transport_send_(std::move(transport_send)) { - RTC_DCHECK(config.event_log != nullptr); - RTC_DCHECK(config.trials != nullptr); - network_sequence_checker_.Detach(); + ProcessThread* process_thread() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return module_thread_.get(); + } - call_stats_->RegisterStatsObserver(&receive_side_cc_); + void AddRef() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + ++ref_count_; + } - module_process_thread_->RegisterModule( - receive_side_cc_.GetRemoteBitrateEstimator(true), RTC_FROM_HERE); - module_process_thread_->RegisterModule(&receive_side_cc_, RTC_FROM_HERE); -} + rtc::RefCountReleaseStatus Release() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + --ref_count_; -Call::~Call() { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + if (ref_count_ == 0) { + module_thread_->Stop(); + return rtc::RefCountReleaseStatus::kDroppedLastRef; + } - RTC_CHECK(audio_send_ssrcs_.empty()); - RTC_CHECK(video_send_ssrcs_.empty()); - RTC_CHECK(video_send_streams_.empty()); - RTC_CHECK(audio_receive_streams_.empty()); - RTC_CHECK(video_receive_streams_.empty()); + if (ref_count_ == 1 && on_one_ref_remaining_) { + auto moved_fn = std::move(on_one_ref_remaining_); + // NOTE: after this function returns, chances are that |this| has been + // deleted - do not touch any member variables. + // If the owner of the last reference implements a lambda that releases + // that last reference inside of the callback (which is legal according + // to this implementation), we will recursively enter Release() above, + // call Stop() and release the last reference. + moved_fn(); + } - module_process_thread_->Stop(); - module_process_thread_->DeRegisterModule( - receive_side_cc_.GetRemoteBitrateEstimator(true)); - module_process_thread_->DeRegisterModule(&receive_side_cc_); - call_stats_->DeregisterStatsObserver(&receive_side_cc_); + return rtc::RefCountReleaseStatus::kOtherRefsRemained; + } - absl::optional first_sent_packet_ms = - transport_send_->GetFirstPacketTime(); + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + mutable int ref_count_ RTC_GUARDED_BY(sequence_checker_) = 0; + std::unique_ptr const module_thread_; + std::function const on_one_ref_remaining_; + bool started_ = false; +}; - // Only update histograms after process threads have been shut down, so that - // they won't try to concurrently update stats. - if (first_sent_packet_ms) { - rtc::CritScope lock(&bitrate_crit_); - UpdateSendHistograms(*first_sent_packet_ms); - } +SharedModuleThread::SharedModuleThread( + std::unique_ptr process_thread, + std::function on_one_ref_remaining) + : impl_(std::make_unique(std::move(process_thread), + std::move(on_one_ref_remaining))) {} - UpdateReceiveHistograms(); - UpdateHistograms(); -} +SharedModuleThread::~SharedModuleThread() = default; -void Call::RegisterRateObserver() { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); +// static - if (is_target_rate_observer_registered_) - return; +rtc::scoped_refptr SharedModuleThread::Create( + std::unique_ptr process_thread, + std::function on_one_ref_remaining) { + return new SharedModuleThread(std::move(process_thread), + std::move(on_one_ref_remaining)); +} - is_target_rate_observer_registered_ = true; +void SharedModuleThread::EnsureStarted() { + impl_->EnsureStarted(); +} - // This call seems to kick off a number of things, so probably better left - // off being kicked off on request rather than in the ctor. - transport_send_ptr_->RegisterTargetTransferRateObserver(this); +ProcessThread* SharedModuleThread::process_thread() { + return impl_->process_thread(); +} - module_process_thread_->Start(); +void SharedModuleThread::AddRef() const { + impl_->AddRef(); } -void Call::SetClientBitratePreferences(const BitrateSettings& preferences) { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); - GetTransportControllerSend()->SetClientBitratePreferences(preferences); +rtc::RefCountReleaseStatus SharedModuleThread::Release() const { + auto ret = impl_->Release(); + if (ret == rtc::RefCountReleaseStatus::kDroppedLastRef) + delete this; + return ret; } -void Call::UpdateHistograms() { - RTC_HISTOGRAM_COUNTS_100000( - "WebRTC.Call.LifetimeInSeconds", - (clock_->TimeInMilliseconds() - start_ms_) / 1000); +// This method here to avoid subclasses has to implement this method. +// Call perf test will use Internal::Call::CreateVideoSendStream() to inject +// FecController. +VideoSendStream* Call::CreateVideoSendStream( + VideoSendStream::Config config, + VideoEncoderConfig encoder_config, + std::unique_ptr fec_controller) { + return nullptr; } -// Called from the dtor. -void Call::UpdateSendHistograms(Timestamp first_sent_packet) { - int64_t elapsed_sec = - (clock_->TimeInMilliseconds() - first_sent_packet.ms()) / 1000; - if (elapsed_sec < metrics::kMinRunTimeInSeconds) - return; - const int kMinRequiredPeriodicSamples = 5; - AggregatedStats send_bitrate_stats = - estimated_send_bitrate_kbps_counter_.ProcessAndGetStats(); - if (send_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.EstimatedSendBitrateInKbps", - send_bitrate_stats.average); - RTC_LOG(LS_INFO) << "WebRTC.Call.EstimatedSendBitrateInKbps, " - << send_bitrate_stats.ToString(); - } - AggregatedStats pacer_bitrate_stats = - pacer_bitrate_kbps_counter_.ProcessAndGetStats(); - if (pacer_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { - RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.PacerBitrateInKbps", - pacer_bitrate_stats.average); - RTC_LOG(LS_INFO) << "WebRTC.Call.PacerBitrateInKbps, " - << pacer_bitrate_stats.ToString(); +namespace internal { + +Call::ReceiveStats::ReceiveStats(Clock* clock) + : received_bytes_per_second_counter_(clock, nullptr, false), + received_audio_bytes_per_second_counter_(clock, nullptr, false), + received_video_bytes_per_second_counter_(clock, nullptr, false), + received_rtcp_bytes_per_second_counter_(clock, nullptr, false) { + sequence_checker_.Detach(); +} + +void Call::ReceiveStats::AddReceivedRtcpBytes(int bytes) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (received_bytes_per_second_counter_.HasSample()) { + // First RTP packet has been received. + received_bytes_per_second_counter_.Add(static_cast(bytes)); + received_rtcp_bytes_per_second_counter_.Add(static_cast(bytes)); } } -void Call::UpdateReceiveHistograms() { - if (first_received_rtp_audio_ms_) { +void Call::ReceiveStats::AddReceivedAudioBytes(int bytes, + webrtc::Timestamp arrival_time) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + received_bytes_per_second_counter_.Add(bytes); + received_audio_bytes_per_second_counter_.Add(bytes); + if (!first_received_rtp_audio_timestamp_) + first_received_rtp_audio_timestamp_ = arrival_time; + last_received_rtp_audio_timestamp_ = arrival_time; +} + +void Call::ReceiveStats::AddReceivedVideoBytes(int bytes, + webrtc::Timestamp arrival_time) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + received_bytes_per_second_counter_.Add(bytes); + received_video_bytes_per_second_counter_.Add(bytes); + if (!first_received_rtp_video_timestamp_) + first_received_rtp_video_timestamp_ = arrival_time; + last_received_rtp_video_timestamp_ = arrival_time; +} + +Call::ReceiveStats::~ReceiveStats() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (first_received_rtp_audio_timestamp_) { RTC_HISTOGRAM_COUNTS_100000( "WebRTC.Call.TimeReceivingAudioRtpPacketsInSeconds", - (*last_received_rtp_audio_ms_ - *first_received_rtp_audio_ms_) / 1000); + (*last_received_rtp_audio_timestamp_ - + *first_received_rtp_audio_timestamp_) + .seconds()); } - if (first_received_rtp_video_ms_) { + if (first_received_rtp_video_timestamp_) { RTC_HISTOGRAM_COUNTS_100000( "WebRTC.Call.TimeReceivingVideoRtpPacketsInSeconds", - (*last_received_rtp_video_ms_ - *first_received_rtp_video_ms_) / 1000); + (*last_received_rtp_video_timestamp_ - + *first_received_rtp_video_timestamp_) + .seconds()); } const int kMinRequiredPeriodicSamples = 5; AggregatedStats video_bytes_per_sec = @@ -608,17 +707,174 @@ void Call::UpdateReceiveHistograms() { } } +Call::SendStats::SendStats(Clock* clock) + : clock_(clock), + estimated_send_bitrate_kbps_counter_(clock, nullptr, true), + pacer_bitrate_kbps_counter_(clock, nullptr, true) { + destructor_sequence_checker_.Detach(); + sequence_checker_.Detach(); +} + +Call::SendStats::~SendStats() { + RTC_DCHECK_RUN_ON(&destructor_sequence_checker_); + if (!first_sent_packet_time_) + return; + + TimeDelta elapsed = clock_->CurrentTime() - *first_sent_packet_time_; + if (elapsed.seconds() < metrics::kMinRunTimeInSeconds) + return; + + const int kMinRequiredPeriodicSamples = 5; + AggregatedStats send_bitrate_stats = + estimated_send_bitrate_kbps_counter_.ProcessAndGetStats(); + if (send_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.EstimatedSendBitrateInKbps", + send_bitrate_stats.average); + RTC_LOG(LS_INFO) << "WebRTC.Call.EstimatedSendBitrateInKbps, " + << send_bitrate_stats.ToString(); + } + AggregatedStats pacer_bitrate_stats = + pacer_bitrate_kbps_counter_.ProcessAndGetStats(); + if (pacer_bitrate_stats.num_samples > kMinRequiredPeriodicSamples) { + RTC_HISTOGRAM_COUNTS_100000("WebRTC.Call.PacerBitrateInKbps", + pacer_bitrate_stats.average); + RTC_LOG(LS_INFO) << "WebRTC.Call.PacerBitrateInKbps, " + << pacer_bitrate_stats.ToString(); + } +} + +void Call::SendStats::SetFirstPacketTime( + absl::optional first_sent_packet_time) { + RTC_DCHECK_RUN_ON(&destructor_sequence_checker_); + first_sent_packet_time_ = first_sent_packet_time; +} + +void Call::SendStats::PauseSendAndPacerBitrateCounters() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + estimated_send_bitrate_kbps_counter_.ProcessAndPause(); + pacer_bitrate_kbps_counter_.ProcessAndPause(); +} + +void Call::SendStats::AddTargetBitrateSample(uint32_t target_bitrate_bps) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + estimated_send_bitrate_kbps_counter_.Add(target_bitrate_bps / 1000); + // Pacer bitrate may be higher than bitrate estimate if enforcing min + // bitrate. + uint32_t pacer_bitrate_bps = + std::max(target_bitrate_bps, min_allocated_send_bitrate_bps_); + pacer_bitrate_kbps_counter_.Add(pacer_bitrate_bps / 1000); +} + +void Call::SendStats::SetMinAllocatableRate(BitrateAllocationLimits limits) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + min_allocated_send_bitrate_bps_ = limits.min_allocatable_rate.bps(); +} + +Call::Call(Clock* clock, + const Call::Config& config, + std::unique_ptr transport_send, + rtc::scoped_refptr module_process_thread, + TaskQueueFactory* task_queue_factory) + : clock_(clock), + task_queue_factory_(task_queue_factory), + worker_thread_(GetCurrentTaskQueueOrThread()), + // If |network_task_queue_| was set to nullptr, network related calls + // must be made on |worker_thread_| (i.e. they're one and the same). + network_thread_(config.network_task_queue_ ? config.network_task_queue_ + : worker_thread_), + num_cpu_cores_(CpuInfo::DetectNumberOfCores()), + module_process_thread_(std::move(module_process_thread)), + call_stats_(new CallStats(clock_, worker_thread_)), + bitrate_allocator_(new BitrateAllocator(this)), + config_(config), + trials_(*config.trials), + audio_network_state_(kNetworkDown), + video_network_state_(kNetworkDown), + aggregate_network_up_(false), + event_log_(config.event_log), + receive_stats_(clock_), + send_stats_(clock_), + receive_side_cc_(clock, + absl::bind_front(&PacketRouter::SendCombinedRtcpPacket, + transport_send->packet_router()), + absl::bind_front(&PacketRouter::SendRemb, + transport_send->packet_router()), + /*network_state_estimator=*/nullptr), + receive_time_calculator_(ReceiveTimeCalculator::CreateFromFieldTrial()), + video_send_delay_stats_(new SendDelayStats(clock_)), + start_of_call_(clock_->CurrentTime()), + transport_send_ptr_(transport_send.get()), + transport_send_(std::move(transport_send)) { + RTC_DCHECK(config.event_log != nullptr); + RTC_DCHECK(config.trials != nullptr); + RTC_DCHECK(network_thread_); + RTC_DCHECK(worker_thread_->IsCurrent()); + + send_transport_sequence_checker_.Detach(); + + // Do not remove this call; it is here to convince the compiler that the + // WebRTC source timestamp string needs to be in the final binary. + LoadWebRTCVersionInRegister(); + + call_stats_->RegisterStatsObserver(&receive_side_cc_); + + module_process_thread_->process_thread()->RegisterModule( + receive_side_cc_.GetRemoteBitrateEstimator(true), RTC_FROM_HERE); + module_process_thread_->process_thread()->RegisterModule(&receive_side_cc_, + RTC_FROM_HERE); +} + +Call::~Call() { + RTC_DCHECK_RUN_ON(worker_thread_); + + RTC_CHECK(audio_send_ssrcs_.empty()); + RTC_CHECK(video_send_ssrcs_.empty()); + RTC_CHECK(video_send_streams_.empty()); + RTC_CHECK(audio_receive_streams_.empty()); + RTC_CHECK(video_receive_streams_.empty()); + + module_process_thread_->process_thread()->DeRegisterModule( + receive_side_cc_.GetRemoteBitrateEstimator(true)); + module_process_thread_->process_thread()->DeRegisterModule(&receive_side_cc_); + call_stats_->DeregisterStatsObserver(&receive_side_cc_); + send_stats_.SetFirstPacketTime(transport_send_->GetFirstPacketTime()); + + RTC_HISTOGRAM_COUNTS_100000( + "WebRTC.Call.LifetimeInSeconds", + (clock_->CurrentTime() - start_of_call_).seconds()); +} + +void Call::EnsureStarted() { + if (is_started_) { + return; + } + is_started_ = true; + + call_stats_->EnsureStarted(); + + // This call seems to kick off a number of things, so probably better left + // off being kicked off on request rather than in the ctor. + transport_send_->RegisterTargetTransferRateObserver(this); + + module_process_thread_->EnsureStarted(); + transport_send_->EnsureStarted(); +} + +void Call::SetClientBitratePreferences(const BitrateSettings& preferences) { + RTC_DCHECK_RUN_ON(worker_thread_); + GetTransportControllerSend()->SetClientBitratePreferences(preferences); +} + PacketReceiver* Call::Receiver() { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); return this; } webrtc::AudioSendStream* Call::CreateAudioSendStream( const webrtc::AudioSendStream::Config& config) { TRACE_EVENT0("webrtc", "Call::CreateAudioSendStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); - RegisterRateObserver(); + EnsureStarted(); // Stream config is logged in AudioSendStream::ConfigureStream, as it may // change during the stream's lifetime. @@ -632,30 +888,28 @@ webrtc::AudioSendStream* Call::CreateAudioSendStream( AudioSendStream* send_stream = new AudioSendStream( clock_, config, config_.audio_state, task_queue_factory_, - module_process_thread_.get(), transport_send_ptr_, - bitrate_allocator_.get(), event_log_, call_stats_->AsRtcpRttStats(), - suspended_rtp_state); - { - WriteLockScoped write_lock(*send_crit_); - RTC_DCHECK(audio_send_ssrcs_.find(config.rtp.ssrc) == - audio_send_ssrcs_.end()); - audio_send_ssrcs_[config.rtp.ssrc] = send_stream; - } - { - ReadLockScoped read_lock(*receive_crit_); - for (AudioReceiveStream* stream : audio_receive_streams_) { - if (stream->config().rtp.local_ssrc == config.rtp.ssrc) { - stream->AssociateSendStream(send_stream); - } + transport_send_.get(), bitrate_allocator_.get(), event_log_, + call_stats_->AsRtcpRttStats(), suspended_rtp_state); + RTC_DCHECK(audio_send_ssrcs_.find(config.rtp.ssrc) == + audio_send_ssrcs_.end()); + audio_send_ssrcs_[config.rtp.ssrc] = send_stream; + + // TODO(bugs.webrtc.org/11993): call AssociateSendStream and + // UpdateAggregateNetworkState asynchronously on the network thread. + for (AudioReceiveStream* stream : audio_receive_streams_) { + if (stream->local_ssrc() == config.rtp.ssrc) { + stream->AssociateSendStream(send_stream); } } + UpdateAggregateNetworkState(); + return send_stream; } void Call::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) { TRACE_EVENT0("webrtc", "Call::DestroyAudioSendStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); RTC_DCHECK(send_stream != nullptr); send_stream->Stop(); @@ -664,49 +918,53 @@ void Call::DestroyAudioSendStream(webrtc::AudioSendStream* send_stream) { webrtc::internal::AudioSendStream* audio_send_stream = static_cast(send_stream); suspended_audio_send_ssrcs_[ssrc] = audio_send_stream->GetRtpState(); - { - WriteLockScoped write_lock(*send_crit_); - size_t num_deleted = audio_send_ssrcs_.erase(ssrc); - RTC_DCHECK_EQ(1, num_deleted); - } - { - ReadLockScoped read_lock(*receive_crit_); - for (AudioReceiveStream* stream : audio_receive_streams_) { - if (stream->config().rtp.local_ssrc == ssrc) { - stream->AssociateSendStream(nullptr); - } + + size_t num_deleted = audio_send_ssrcs_.erase(ssrc); + RTC_DCHECK_EQ(1, num_deleted); + + // TODO(bugs.webrtc.org/11993): call AssociateSendStream and + // UpdateAggregateNetworkState asynchronously on the network thread. + for (AudioReceiveStream* stream : audio_receive_streams_) { + if (stream->local_ssrc() == ssrc) { + stream->AssociateSendStream(nullptr); } } + UpdateAggregateNetworkState(); + delete send_stream; } webrtc::AudioReceiveStream* Call::CreateAudioReceiveStream( const webrtc::AudioReceiveStream::Config& config) { TRACE_EVENT0("webrtc", "Call::CreateAudioReceiveStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); - RegisterRateObserver(); + RTC_DCHECK_RUN_ON(worker_thread_); + EnsureStarted(); event_log_->Log(std::make_unique( CreateRtcLogStreamConfig(config))); + AudioReceiveStream* receive_stream = new AudioReceiveStream( - clock_, &audio_receiver_controller_, transport_send_ptr_->packet_router(), - module_process_thread_.get(), config_.neteq_factory, config, + clock_, transport_send_->packet_router(), config_.neteq_factory, config, config_.audio_state, event_log_); - { - WriteLockScoped write_lock(*receive_crit_); - receive_rtp_config_.emplace(config.rtp.remote_ssrc, - ReceiveRtpConfig(config)); - audio_receive_streams_.insert(receive_stream); + audio_receive_streams_.insert(receive_stream); - ConfigureSync(config.sync_group); - } - { - ReadLockScoped read_lock(*send_crit_); - auto it = audio_send_ssrcs_.find(config.rtp.local_ssrc); - if (it != audio_send_ssrcs_.end()) { - receive_stream->AssociateSendStream(it->second); - } + // TODO(bugs.webrtc.org/11993): Make the registration on the network thread + // (asynchronously). The registration and `audio_receiver_controller_` need + // to live on the network thread. + receive_stream->RegisterWithTransport(&audio_receiver_controller_); + + // TODO(bugs.webrtc.org/11993): Update the below on the network thread. + // We could possibly set up the audio_receiver_controller_ association up + // as part of the async setup. + receive_rtp_config_.emplace(config.rtp.remote_ssrc, receive_stream); + + ConfigureSync(config.sync_group); + + auto it = audio_send_ssrcs_.find(config.rtp.local_ssrc); + if (it != audio_send_ssrcs_.end()) { + receive_stream->AssociateSendStream(it->second); } + UpdateAggregateNetworkState(); return receive_stream; } @@ -714,27 +972,34 @@ webrtc::AudioReceiveStream* Call::CreateAudioReceiveStream( void Call::DestroyAudioReceiveStream( webrtc::AudioReceiveStream* receive_stream) { TRACE_EVENT0("webrtc", "Call::DestroyAudioReceiveStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); RTC_DCHECK(receive_stream != nullptr); webrtc::internal::AudioReceiveStream* audio_receive_stream = static_cast(receive_stream); - { - WriteLockScoped write_lock(*receive_crit_); - const AudioReceiveStream::Config& config = audio_receive_stream->config(); - uint32_t ssrc = config.rtp.remote_ssrc; - receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config)) - ->RemoveStream(ssrc); - audio_receive_streams_.erase(audio_receive_stream); - const std::string& sync_group = audio_receive_stream->config().sync_group; - const auto it = sync_stream_mapping_.find(sync_group); - if (it != sync_stream_mapping_.end() && - it->second == audio_receive_stream) { - sync_stream_mapping_.erase(it); - ConfigureSync(sync_group); - } - receive_rtp_config_.erase(ssrc); + + // TODO(bugs.webrtc.org/11993): Access the map, rtp config, call ConfigureSync + // and UpdateAggregateNetworkState on the network thread. The call to + // `UnregisterFromTransport` should also happen on the network thread. + audio_receive_stream->UnregisterFromTransport(); + + uint32_t ssrc = audio_receive_stream->remote_ssrc(); + const AudioReceiveStream::Config& config = audio_receive_stream->config(); + receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config.rtp)) + ->RemoveStream(ssrc); + + audio_receive_streams_.erase(audio_receive_stream); + + const auto it = sync_stream_mapping_.find(config.sync_group); + if (it != sync_stream_mapping_.end() && it->second == audio_receive_stream) { + sync_stream_mapping_.erase(it); + ConfigureSync(config.sync_group); } + receive_rtp_config_.erase(ssrc); + UpdateAggregateNetworkState(); + // TODO(bugs.webrtc.org/11993): Consider if deleting |audio_receive_stream| + // on the network thread would be better or if we'd need to tear down the + // state in two phases. delete audio_receive_stream; } @@ -744,9 +1009,9 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( VideoEncoderConfig encoder_config, std::unique_ptr fec_controller) { TRACE_EVENT0("webrtc", "Call::CreateVideoSendStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); - RegisterRateObserver(); + EnsureStarted(); video_send_delay_stats_->AddSsrcs(config); for (size_t ssrc_index = 0; ssrc_index < config.rtp.ssrcs.size(); @@ -761,20 +1026,24 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( std::vector ssrcs = config.rtp.ssrcs; VideoSendStream* send_stream = new VideoSendStream( - clock_, num_cpu_cores_, module_process_thread_.get(), task_queue_factory_, - call_stats_->AsRtcpRttStats(), transport_send_ptr_, + clock_, num_cpu_cores_, task_queue_factory_, + call_stats_->AsRtcpRttStats(), transport_send_.get(), bitrate_allocator_.get(), video_send_delay_stats_.get(), event_log_, std::move(config), std::move(encoder_config), suspended_video_send_ssrcs_, suspended_video_payload_states_, std::move(fec_controller)); - { - WriteLockScoped write_lock(*send_crit_); - for (uint32_t ssrc : ssrcs) { - RTC_DCHECK(video_send_ssrcs_.find(ssrc) == video_send_ssrcs_.end()); - video_send_ssrcs_[ssrc] = send_stream; - } - video_send_streams_.insert(send_stream); + for (uint32_t ssrc : ssrcs) { + RTC_DCHECK(video_send_ssrcs_.find(ssrc) == video_send_ssrcs_.end()); + video_send_ssrcs_[ssrc] = send_stream; + } + video_send_streams_.insert(send_stream); + video_send_streams_empty_.store(false, std::memory_order_relaxed); + + // Forward resources that were previously added to the call to the new stream. + for (const auto& resource_forwarder : adaptation_resource_forwarders_) { + resource_forwarder->OnCreateVideoSendStream(send_stream); } + UpdateAggregateNetworkState(); return send_stream; @@ -783,6 +1052,7 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( webrtc::VideoSendStream* Call::CreateVideoSendStream( webrtc::VideoSendStream::Config config, VideoEncoderConfig encoder_config) { + RTC_DCHECK_RUN_ON(worker_thread_); if (config_.fec_controller_factory) { RTC_LOG(LS_INFO) << "External FEC Controller will be used."; } @@ -797,30 +1067,33 @@ webrtc::VideoSendStream* Call::CreateVideoSendStream( void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) { TRACE_EVENT0("webrtc", "Call::DestroyVideoSendStream"); RTC_DCHECK(send_stream != nullptr); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); - - send_stream->Stop(); - - VideoSendStream* send_stream_impl = nullptr; - { - WriteLockScoped write_lock(*send_crit_); - auto it = video_send_ssrcs_.begin(); - while (it != video_send_ssrcs_.end()) { - if (it->second == static_cast(send_stream)) { - send_stream_impl = it->second; - video_send_ssrcs_.erase(it++); - } else { - ++it; - } - } - video_send_streams_.erase(send_stream_impl); - } - RTC_CHECK(send_stream_impl != nullptr); + RTC_DCHECK_RUN_ON(worker_thread_); + VideoSendStream* send_stream_impl = + static_cast(send_stream); VideoSendStream::RtpStateMap rtp_states; VideoSendStream::RtpPayloadStateMap rtp_payload_states; send_stream_impl->StopPermanentlyAndGetRtpStates(&rtp_states, &rtp_payload_states); + + auto it = video_send_ssrcs_.begin(); + while (it != video_send_ssrcs_.end()) { + if (it->second == static_cast(send_stream)) { + send_stream_impl = it->second; + video_send_ssrcs_.erase(it++); + } else { + ++it; + } + } + + // Stop forwarding resources to the stream being destroyed. + for (const auto& resource_forwarder : adaptation_resource_forwarders_) { + resource_forwarder->OnDestroyVideoSendStream(send_stream_impl); + } + video_send_streams_.erase(send_stream_impl); + if (video_send_streams_.empty()) + video_send_streams_empty_.store(true, std::memory_order_relaxed); + for (const auto& kv : rtp_states) { suspended_video_send_ssrcs_[kv.first] = kv.second; } @@ -829,43 +1102,45 @@ void Call::DestroyVideoSendStream(webrtc::VideoSendStream* send_stream) { } UpdateAggregateNetworkState(); + // TODO(tommi): consider deleting on the same thread as runs + // StopPermanentlyAndGetRtpStates. delete send_stream_impl; } webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream( webrtc::VideoReceiveStream::Config configuration) { TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); receive_side_cc_.SetSendPeriodicFeedback( SendPeriodicFeedback(configuration.rtp.extensions)); - RegisterRateObserver(); + EnsureStarted(); - TaskQueueBase* current = GetCurrentTaskQueueOrThread(); - RTC_CHECK(current); + // TODO(bugs.webrtc.org/11993): Move the registration between |receive_stream| + // and |video_receiver_controller_| out of VideoReceiveStream2 construction + // and set it up asynchronously on the network thread (the registration and + // |video_receiver_controller_| need to live on the network thread). VideoReceiveStream2* receive_stream = new VideoReceiveStream2( - task_queue_factory_, current, &video_receiver_controller_, num_cpu_cores_, - transport_send_ptr_->packet_router(), std::move(configuration), - module_process_thread_.get(), call_stats_.get(), clock_, - new VCMTiming(clock_)); + task_queue_factory_, this, num_cpu_cores_, + transport_send_->packet_router(), std::move(configuration), + call_stats_.get(), clock_, new VCMTiming(clock_)); + // TODO(bugs.webrtc.org/11993): Set this up asynchronously on the network + // thread. + receive_stream->RegisterWithTransport(&video_receiver_controller_); const webrtc::VideoReceiveStream::Config& config = receive_stream->config(); - { - WriteLockScoped write_lock(*receive_crit_); - if (config.rtp.rtx_ssrc) { - // We record identical config for the rtx stream as for the main - // stream. Since the transport_send_cc negotiation is per payload - // type, we may get an incorrect value for the rtx stream, but - // that is unlikely to matter in practice. - receive_rtp_config_.emplace(config.rtp.rtx_ssrc, - ReceiveRtpConfig(config)); - } - receive_rtp_config_.emplace(config.rtp.remote_ssrc, - ReceiveRtpConfig(config)); - video_receive_streams_.insert(receive_stream); - ConfigureSync(config.sync_group); + if (config.rtp.rtx_ssrc) { + // We record identical config for the rtx stream as for the main + // stream. Since the transport_send_cc negotiation is per payload + // type, we may get an incorrect value for the rtx stream, but + // that is unlikely to matter in practice. + receive_rtp_config_.emplace(config.rtp.rtx_ssrc, receive_stream); } + receive_rtp_config_.emplace(config.rtp.remote_ssrc, receive_stream); + video_receive_streams_.insert(receive_stream); + ConfigureSync(config.sync_group); + receive_stream->SignalNetworkState(video_network_state_); UpdateAggregateNetworkState(); event_log_->Log(std::make_unique( @@ -876,24 +1151,25 @@ webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream( void Call::DestroyVideoReceiveStream( webrtc::VideoReceiveStream* receive_stream) { TRACE_EVENT0("webrtc", "Call::DestroyVideoReceiveStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); RTC_DCHECK(receive_stream != nullptr); VideoReceiveStream2* receive_stream_impl = static_cast(receive_stream); + // TODO(bugs.webrtc.org/11993): Unregister on the network thread. + receive_stream_impl->UnregisterFromTransport(); + const VideoReceiveStream::Config& config = receive_stream_impl->config(); - { - WriteLockScoped write_lock(*receive_crit_); - // Remove all ssrcs pointing to a receive stream. As RTX retransmits on a - // separate SSRC there can be either one or two. - receive_rtp_config_.erase(config.rtp.remote_ssrc); - if (config.rtp.rtx_ssrc) { - receive_rtp_config_.erase(config.rtp.rtx_ssrc); - } - video_receive_streams_.erase(receive_stream_impl); - ConfigureSync(config.sync_group); + + // Remove all ssrcs pointing to a receive stream. As RTX retransmits on a + // separate SSRC there can be either one or two. + receive_rtp_config_.erase(config.rtp.remote_ssrc); + if (config.rtp.rtx_ssrc) { + receive_rtp_config_.erase(config.rtp.rtx_ssrc); } + video_receive_streams_.erase(receive_stream_impl); + ConfigureSync(config.sync_group); - receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config)) + receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config.rtp)) ->RemoveStream(config.rtp.remote_ssrc); UpdateAggregateNetworkState(); @@ -903,30 +1179,28 @@ void Call::DestroyVideoReceiveStream( FlexfecReceiveStream* Call::CreateFlexfecReceiveStream( const FlexfecReceiveStream::Config& config) { TRACE_EVENT0("webrtc", "Call::CreateFlexfecReceiveStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); RecoveredPacketReceiver* recovered_packet_receiver = this; FlexfecReceiveStreamImpl* receive_stream; - { - WriteLockScoped write_lock(*receive_crit_); - // Unlike the video and audio receive streams, - // FlexfecReceiveStream implements RtpPacketSinkInterface itself, - // and hence its constructor passes its |this| pointer to - // video_receiver_controller_->CreateStream(). Calling the - // constructor while holding |receive_crit_| ensures that we don't - // call OnRtpPacket until the constructor is finished and the - // object is in a valid state. - // TODO(nisse): Fix constructor so that it can be moved outside of - // this locked scope. - receive_stream = new FlexfecReceiveStreamImpl( - clock_, &video_receiver_controller_, config, recovered_packet_receiver, - call_stats_->AsRtcpRttStats(), module_process_thread_.get()); - - RTC_DCHECK(receive_rtp_config_.find(config.remote_ssrc) == - receive_rtp_config_.end()); - receive_rtp_config_.emplace(config.remote_ssrc, ReceiveRtpConfig(config)); - } + + // Unlike the video and audio receive streams, FlexfecReceiveStream implements + // RtpPacketSinkInterface itself, and hence its constructor passes its |this| + // pointer to video_receiver_controller_->CreateStream(). Calling the + // constructor while on the worker thread ensures that we don't call + // OnRtpPacket until the constructor is finished and the object is + // in a valid state, since OnRtpPacket runs on the same thread. + receive_stream = new FlexfecReceiveStreamImpl( + clock_, config, recovered_packet_receiver, call_stats_->AsRtcpRttStats()); + + // TODO(bugs.webrtc.org/11993): Set this up asynchronously on the network + // thread. + receive_stream->RegisterWithTransport(&video_receiver_controller_); + + RTC_DCHECK(receive_rtp_config_.find(config.rtp.remote_ssrc) == + receive_rtp_config_.end()); + receive_rtp_config_.emplace(config.rtp.remote_ssrc, receive_stream); // TODO(brandtr): Store config in RtcEventLog here. @@ -935,45 +1209,47 @@ FlexfecReceiveStream* Call::CreateFlexfecReceiveStream( void Call::DestroyFlexfecReceiveStream(FlexfecReceiveStream* receive_stream) { TRACE_EVENT0("webrtc", "Call::DestroyFlexfecReceiveStream"); - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DCHECK(receive_stream != nullptr); - { - WriteLockScoped write_lock(*receive_crit_); + FlexfecReceiveStreamImpl* receive_stream_impl = + static_cast(receive_stream); + // TODO(bugs.webrtc.org/11993): Unregister on the network thread. + receive_stream_impl->UnregisterFromTransport(); - const FlexfecReceiveStream::Config& config = receive_stream->GetConfig(); - uint32_t ssrc = config.remote_ssrc; - receive_rtp_config_.erase(ssrc); + RTC_DCHECK(receive_stream != nullptr); + const FlexfecReceiveStream::RtpConfig& rtp = receive_stream->rtp_config(); + receive_rtp_config_.erase(rtp.remote_ssrc); - // Remove all SSRCs pointing to the FlexfecReceiveStreamImpl to be - // destroyed. - receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(config)) - ->RemoveStream(ssrc); - } + // Remove all SSRCs pointing to the FlexfecReceiveStreamImpl to be + // destroyed. + receive_side_cc_.GetRemoteBitrateEstimator(UseSendSideBwe(rtp)) + ->RemoveStream(rtp.remote_ssrc); delete receive_stream; } +void Call::AddAdaptationResource(rtc::scoped_refptr resource) { + RTC_DCHECK_RUN_ON(worker_thread_); + adaptation_resource_forwarders_.push_back( + std::make_unique(resource)); + const auto& resource_forwarder = adaptation_resource_forwarders_.back(); + for (VideoSendStream* send_stream : video_send_streams_) { + resource_forwarder->OnCreateVideoSendStream(send_stream); + } +} + RtpTransportControllerSendInterface* Call::GetTransportControllerSend() { - return transport_send_ptr_; + return transport_send_.get(); } Call::Stats Call::GetStats() const { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); - - // TODO(tommi): The following stats are managed on the process thread: - // - pacer_delay_ms (PacedSender::Process) - // - rtt_ms - // - recv_bandwidth_bps - // These are delivered on the network TQ: - // - send_bandwidth_bps (see OnTargetTransferRate) - // - max_padding_bitrate_bps (see OnAllocationLimitsChanged) + RTC_DCHECK_RUN_ON(worker_thread_); Stats stats; // TODO(srte): It is unclear if we only want to report queues if network is // available. stats.pacer_delay_ms = - aggregate_network_up_ ? transport_send_ptr_->GetPacerQueuingDelayMs() : 0; + aggregate_network_up_ ? transport_send_->GetPacerQueuingDelayMs() : 0; stats.rtt_ms = call_stats_->LastProcessedRtt(); @@ -983,70 +1259,81 @@ Call::Stats Call::GetStats() const { receive_side_cc_.GetRemoteBitrateEstimator(false)->LatestEstimate( &ssrcs, &recv_bandwidth); stats.recv_bandwidth_bps = recv_bandwidth; + stats.send_bandwidth_bps = + last_bandwidth_bps_.load(std::memory_order_relaxed); + stats.max_padding_bitrate_bps = + configured_max_padding_bitrate_bps_.load(std::memory_order_relaxed); - { - rtc::CritScope cs(&last_bandwidth_bps_crit_); - stats.send_bandwidth_bps = last_bandwidth_bps_; - } + return stats; +} - { - rtc::CritScope cs(&bitrate_crit_); - stats.max_padding_bitrate_bps = configured_max_padding_bitrate_bps_; - } +const WebRtcKeyValueConfig& Call::trials() const { + return trials_; +} - return stats; +TaskQueueBase* Call::network_thread() const { + return network_thread_; +} + +TaskQueueBase* Call::worker_thread() const { + return worker_thread_; } void Call::SignalChannelNetworkState(MediaType media, NetworkState state) { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); - switch (media) { - case MediaType::AUDIO: + RTC_DCHECK_RUN_ON(network_thread_); + RTC_DCHECK(media == MediaType::AUDIO || media == MediaType::VIDEO); + + auto closure = [this, media, state]() { + // TODO(bugs.webrtc.org/11993): Move this over to the network thread. + RTC_DCHECK_RUN_ON(worker_thread_); + if (media == MediaType::AUDIO) { audio_network_state_ = state; - break; - case MediaType::VIDEO: + } else { + RTC_DCHECK_EQ(media, MediaType::VIDEO); video_network_state_ = state; - break; - case MediaType::ANY: - case MediaType::DATA: - RTC_NOTREACHED(); - break; - } + } - UpdateAggregateNetworkState(); - { - ReadLockScoped read_lock(*receive_crit_); + // TODO(tommi): Is it necessary to always do this, including if there + // was no change in state? + UpdateAggregateNetworkState(); + + // TODO(tommi): Is it right to do this if media == AUDIO? for (VideoReceiveStream2* video_receive_stream : video_receive_streams_) { video_receive_stream->SignalNetworkState(video_network_state_); } + }; + + if (network_thread_ == worker_thread_) { + closure(); + } else { + // TODO(bugs.webrtc.org/11993): Remove workaround when we no longer need to + // post to the worker thread. + worker_thread_->PostTask(ToQueuedTask(task_safety_, std::move(closure))); } } void Call::OnAudioTransportOverheadChanged(int transport_overhead_per_packet) { - ReadLockScoped read_lock(*send_crit_); - for (auto& kv : audio_send_ssrcs_) { - kv.second->SetTransportOverhead(transport_overhead_per_packet); - } + RTC_DCHECK_RUN_ON(network_thread_); + worker_thread_->PostTask( + ToQueuedTask(task_safety_, [this, transport_overhead_per_packet]() { + // TODO(bugs.webrtc.org/11993): Move this over to the network thread. + RTC_DCHECK_RUN_ON(worker_thread_); + for (auto& kv : audio_send_ssrcs_) { + kv.second->SetTransportOverhead(transport_overhead_per_packet); + } + })); } void Call::UpdateAggregateNetworkState() { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); + // TODO(bugs.webrtc.org/11993): Move this over to the network thread. + // RTC_DCHECK_RUN_ON(network_thread_); - bool have_audio = false; - bool have_video = false; - { - ReadLockScoped read_lock(*send_crit_); - if (!audio_send_ssrcs_.empty()) - have_audio = true; - if (!video_send_ssrcs_.empty()) - have_video = true; - } - { - ReadLockScoped read_lock(*receive_crit_); - if (!audio_receive_streams_.empty()) - have_audio = true; - if (!video_receive_streams_.empty()) - have_video = true; - } + RTC_DCHECK_RUN_ON(worker_thread_); + + bool have_audio = + !audio_send_ssrcs_.empty() || !audio_receive_streams_.empty(); + bool have_video = + !video_send_ssrcs_.empty() || !video_receive_streams_.empty(); bool aggregate_network_up = ((have_video && video_network_state_ == kNetworkUp) || @@ -1063,74 +1350,82 @@ void Call::UpdateAggregateNetworkState() { } aggregate_network_up_ = aggregate_network_up; - transport_send_ptr_->OnNetworkAvailability(aggregate_network_up); + transport_send_->OnNetworkAvailability(aggregate_network_up); +} + +void Call::OnLocalSsrcUpdated(webrtc::AudioReceiveStream& stream, + uint32_t local_ssrc) { + RTC_DCHECK_RUN_ON(worker_thread_); + webrtc::internal::AudioReceiveStream& receive_stream = + static_cast(stream); + + receive_stream.SetLocalSsrc(local_ssrc); + auto it = audio_send_ssrcs_.find(local_ssrc); + receive_stream.AssociateSendStream(it != audio_send_ssrcs_.end() ? it->second + : nullptr); +} + +void Call::OnUpdateSyncGroup(webrtc::AudioReceiveStream& stream, + const std::string& sync_group) { + RTC_DCHECK_RUN_ON(worker_thread_); + webrtc::internal::AudioReceiveStream& receive_stream = + static_cast(stream); + receive_stream.SetSyncGroup(sync_group); + ConfigureSync(sync_group); } void Call::OnSentPacket(const rtc::SentPacket& sent_packet) { + // In production and with most tests, this method will be called on the + // network thread. However some test classes such as DirectTransport don't + // incorporate a network thread. This means that tests for RtpSenderEgress + // and ModuleRtpRtcpImpl2 that use DirectTransport, will call this method + // on a ProcessThread. This is alright as is since we forward the call to + // implementations that either just do a PostTask or use locking. video_send_delay_stats_->OnSentPacket(sent_packet.packet_id, clock_->TimeInMilliseconds()); - transport_send_ptr_->OnSentPacket(sent_packet); + transport_send_->OnSentPacket(sent_packet); } void Call::OnStartRateUpdate(DataRate start_rate) { - RTC_DCHECK(network_queue()->IsCurrent()); + RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_); bitrate_allocator_->UpdateStartRate(start_rate.bps()); } void Call::OnTargetTransferRate(TargetTransferRate msg) { - RTC_DCHECK(network_queue()->IsCurrent()); - RTC_DCHECK_RUN_ON(&network_sequence_checker_); - { - rtc::CritScope cs(&last_bandwidth_bps_crit_); - last_bandwidth_bps_ = msg.target_rate.bps(); - } + RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_); uint32_t target_bitrate_bps = msg.target_rate.bps(); // For controlling the rate of feedback messages. receive_side_cc_.OnBitrateChanged(target_bitrate_bps); bitrate_allocator_->OnNetworkEstimateChanged(msg); - // Ignore updates if bitrate is zero (the aggregate network state is down). - if (target_bitrate_bps == 0) { - rtc::CritScope lock(&bitrate_crit_); - estimated_send_bitrate_kbps_counter_.ProcessAndPause(); - pacer_bitrate_kbps_counter_.ProcessAndPause(); - return; - } - - bool sending_video; - { - ReadLockScoped read_lock(*send_crit_); - sending_video = !video_send_streams_.empty(); - } + last_bandwidth_bps_.store(target_bitrate_bps, std::memory_order_relaxed); - rtc::CritScope lock(&bitrate_crit_); - if (!sending_video) { - // Do not update the stats if we are not sending video. - estimated_send_bitrate_kbps_counter_.ProcessAndPause(); - pacer_bitrate_kbps_counter_.ProcessAndPause(); - return; + // Ignore updates if bitrate is zero (the aggregate network state is + // down) or if we're not sending video. + // Using |video_send_streams_empty_| is racy but as the caller can't + // reasonably expect synchronize with changes in |video_send_streams_| (being + // on |send_transport_sequence_checker|), we can avoid a PostTask this way. + if (target_bitrate_bps == 0 || + video_send_streams_empty_.load(std::memory_order_relaxed)) { + send_stats_.PauseSendAndPacerBitrateCounters(); + } else { + send_stats_.AddTargetBitrateSample(target_bitrate_bps); } - estimated_send_bitrate_kbps_counter_.Add(target_bitrate_bps / 1000); - // Pacer bitrate may be higher than bitrate estimate if enforcing min bitrate. - uint32_t pacer_bitrate_bps = - std::max(target_bitrate_bps, min_allocated_send_bitrate_bps_); - pacer_bitrate_kbps_counter_.Add(pacer_bitrate_bps / 1000); } void Call::OnAllocationLimitsChanged(BitrateAllocationLimits limits) { - RTC_DCHECK(network_queue()->IsCurrent()); - RTC_DCHECK_RUN_ON(&network_sequence_checker_); + RTC_DCHECK_RUN_ON(&send_transport_sequence_checker_); transport_send_ptr_->SetAllocatedSendBitrateLimits(limits); - - min_allocated_send_bitrate_bps_ = limits.min_allocatable_rate.bps(); - - rtc::CritScope lock(&bitrate_crit_); - configured_max_padding_bitrate_bps_ = limits.max_padding_rate.bps(); + send_stats_.SetMinAllocatableRate(limits); + configured_max_padding_bitrate_bps_.store(limits.max_padding_rate.bps(), + std::memory_order_relaxed); } +// RTC_RUN_ON(worker_thread_) void Call::ConfigureSync(const std::string& sync_group) { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. // Set sync only if there was no previous one. if (sync_group.empty()) return; @@ -1180,60 +1475,62 @@ void Call::ConfigureSync(const std::string& sync_group) { } } -PacketReceiver::DeliveryStatus Call::DeliverRtcp(MediaType media_type, - const uint8_t* packet, - size_t length) { +// RTC_RUN_ON(network_thread_) +void Call::DeliverRtcp(MediaType media_type, rtc::CopyOnWriteBuffer packet) { TRACE_EVENT0("webrtc", "Call::DeliverRtcp"); - // TODO(pbos): Make sure it's a valid packet. - // Return DELIVERY_UNKNOWN_SSRC if it can be determined that - // there's no receiver of the packet. - if (received_bytes_per_second_counter_.HasSample()) { - // First RTP packet has been received. - received_bytes_per_second_counter_.Add(static_cast(length)); - received_rtcp_bytes_per_second_counter_.Add(static_cast(length)); - } - bool rtcp_delivered = false; - if (media_type == MediaType::ANY || media_type == MediaType::VIDEO) { - ReadLockScoped read_lock(*receive_crit_); - for (VideoReceiveStream2* stream : video_receive_streams_) { - if (stream->DeliverRtcp(packet, length)) - rtcp_delivered = true; - } - } - if (media_type == MediaType::ANY || media_type == MediaType::AUDIO) { - ReadLockScoped read_lock(*receive_crit_); - for (AudioReceiveStream* stream : audio_receive_streams_) { - stream->DeliverRtcp(packet, length); - rtcp_delivered = true; - } - } - if (media_type == MediaType::ANY || media_type == MediaType::VIDEO) { - ReadLockScoped read_lock(*send_crit_); - for (VideoSendStream* stream : video_send_streams_) { - stream->DeliverRtcp(packet, length); - rtcp_delivered = true; - } - } - if (media_type == MediaType::ANY || media_type == MediaType::AUDIO) { - ReadLockScoped read_lock(*send_crit_); - for (auto& kv : audio_send_ssrcs_) { - kv.second->DeliverRtcp(packet, length); - rtcp_delivered = true; - } - } - if (rtcp_delivered) { - event_log_->Log(std::make_unique( - rtc::MakeArrayView(packet, length))); - } + // TODO(bugs.webrtc.org/11993): This DCHECK is here just to maintain the + // invariant that currently the only call path to this function is via + // `PeerConnection::InitializeRtcpCallback()`. DeliverRtp on the other hand + // gets called via the channel classes and + // WebRtc[Audio|Video]Channel's `OnPacketReceived`. We'll remove the + // PeerConnection involvement as well as + // `JsepTransportController::OnRtcpPacketReceived_n` and `rtcp_handler` + // and make sure that the flow of packets is consistent from the + // `RtpTransport` class, via the *Channel and *Engine classes and into Call. + // This way we'll also know more about the context of the packet. + RTC_DCHECK_EQ(media_type, MediaType::ANY); + + // TODO(bugs.webrtc.org/11993): This should execute directly on the network + // thread. + worker_thread_->PostTask( + ToQueuedTask(task_safety_, [this, packet = std::move(packet)]() { + RTC_DCHECK_RUN_ON(worker_thread_); + + receive_stats_.AddReceivedRtcpBytes(static_cast(packet.size())); + bool rtcp_delivered = false; + for (VideoReceiveStream2* stream : video_receive_streams_) { + if (stream->DeliverRtcp(packet.cdata(), packet.size())) + rtcp_delivered = true; + } - return rtcp_delivered ? DELIVERY_OK : DELIVERY_PACKET_ERROR; + for (AudioReceiveStream* stream : audio_receive_streams_) { + stream->DeliverRtcp(packet.cdata(), packet.size()); + rtcp_delivered = true; + } + + for (VideoSendStream* stream : video_send_streams_) { + stream->DeliverRtcp(packet.cdata(), packet.size()); + rtcp_delivered = true; + } + + for (auto& kv : audio_send_ssrcs_) { + kv.second->DeliverRtcp(packet.cdata(), packet.size()); + rtcp_delivered = true; + } + + if (rtcp_delivered) { + event_log_->Log(std::make_unique( + rtc::MakeArrayView(packet.cdata(), packet.size()))); + } + })); } PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { TRACE_EVENT0("webrtc", "Call::DeliverRtp"); + RTC_DCHECK_NE(media_type, MediaType::ANY); RtpPacketReceived parsed_packet; if (!parsed_packet.Parse(std::move(packet))) @@ -1246,9 +1543,9 @@ PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, packet_time_us = receive_time_calculator_->ReconcileReceiveTimes( packet_time_us, rtc::TimeUTCMicros(), clock_->TimeInMicroseconds()); } - parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000); + parsed_packet.set_arrival_time(Timestamp::Micros(packet_time_us)); } else { - parsed_packet.set_arrival_time_ms(clock_->TimeInMilliseconds()); + parsed_packet.set_arrival_time(clock_->CurrentTime()); } // We might get RTP keep-alive packets in accordance with RFC6263 section 4.6. @@ -1259,21 +1556,20 @@ PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, RTC_DCHECK(media_type == MediaType::AUDIO || media_type == MediaType::VIDEO || is_keep_alive_packet); - ReadLockScoped read_lock(*receive_crit_); auto it = receive_rtp_config_.find(parsed_packet.Ssrc()); if (it == receive_rtp_config_.end()) { RTC_LOG(LS_ERROR) << "receive_rtp_config_ lookup failed for ssrc " << parsed_packet.Ssrc(); // Destruction of the receive stream, including deregistering from the - // RtpDemuxer, is not protected by the |receive_crit_| lock. But - // deregistering in the |receive_rtp_config_| map is protected by that lock. - // So by not passing the packet on to demuxing in this case, we prevent - // incoming packets to be passed on via the demuxer to a receive stream - // which is being torned down. + // RtpDemuxer, is not protected by the |worker_thread_|. + // But deregistering in the |receive_rtp_config_| map is. So by not passing + // the packet on to demuxing in this case, we prevent incoming packets to be + // passed on via the demuxer to a receive stream which is being torned down. return DELIVERY_UNKNOWN_SSRC; } - parsed_packet.IdentifyExtensions(it->second.extensions); + parsed_packet.IdentifyExtensions( + RtpHeaderExtensionMap(it->second->rtp_config().extensions)); NotifyBweOfReceivedPacket(parsed_packet, media_type); @@ -1282,29 +1578,19 @@ PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type, int length = static_cast(parsed_packet.size()); if (media_type == MediaType::AUDIO) { if (audio_receiver_controller_.OnRtpPacket(parsed_packet)) { - received_bytes_per_second_counter_.Add(length); - received_audio_bytes_per_second_counter_.Add(length); + receive_stats_.AddReceivedAudioBytes(length, + parsed_packet.arrival_time()); event_log_->Log( std::make_unique(parsed_packet)); - const int64_t arrival_time_ms = parsed_packet.arrival_time_ms(); - if (!first_received_rtp_audio_ms_) { - first_received_rtp_audio_ms_.emplace(arrival_time_ms); - } - last_received_rtp_audio_ms_.emplace(arrival_time_ms); return DELIVERY_OK; } } else if (media_type == MediaType::VIDEO) { parsed_packet.set_payload_type_frequency(kVideoPayloadTypeFrequency); if (video_receiver_controller_.OnRtpPacket(parsed_packet)) { - received_bytes_per_second_counter_.Add(length); - received_video_bytes_per_second_counter_.Add(length); + receive_stats_.AddReceivedVideoBytes(length, + parsed_packet.arrival_time()); event_log_->Log( std::make_unique(parsed_packet)); - const int64_t arrival_time_ms = parsed_packet.arrival_time_ms(); - if (!first_received_rtp_video_ms_) { - first_received_rtp_video_ms_.emplace(arrival_time_ms); - } - last_received_rtp_video_ms_.emplace(arrival_time_ms); return DELIVERY_OK; } } @@ -1315,56 +1601,64 @@ PacketReceiver::DeliveryStatus Call::DeliverPacket( MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { - RTC_DCHECK_RUN_ON(&configuration_sequence_checker_); - if (IsRtcp(packet.cdata(), packet.size())) - return DeliverRtcp(media_type, packet.cdata(), packet.size()); + if (IsRtcpPacket(packet)) { + RTC_DCHECK_RUN_ON(network_thread_); + DeliverRtcp(media_type, std::move(packet)); + return DELIVERY_OK; + } + RTC_DCHECK_RUN_ON(worker_thread_); return DeliverRtp(media_type, std::move(packet), packet_time_us); } void Call::OnRecoveredPacket(const uint8_t* packet, size_t length) { + // TODO(bugs.webrtc.org/11993): Expect to be called on the network thread. + // This method is called synchronously via |OnRtpPacket()| (see DeliverRtp) + // on the same thread. + RTC_DCHECK_RUN_ON(worker_thread_); RtpPacketReceived parsed_packet; if (!parsed_packet.Parse(packet, length)) return; parsed_packet.set_recovered(true); - ReadLockScoped read_lock(*receive_crit_); auto it = receive_rtp_config_.find(parsed_packet.Ssrc()); if (it == receive_rtp_config_.end()) { RTC_LOG(LS_ERROR) << "receive_rtp_config_ lookup failed for ssrc " << parsed_packet.Ssrc(); // Destruction of the receive stream, including deregistering from the - // RtpDemuxer, is not protected by the |receive_crit_| lock. But - // deregistering in the |receive_rtp_config_| map is protected by that lock. + // RtpDemuxer, is not protected by the |worker_thread_|. + // But deregistering in the |receive_rtp_config_| map is. // So by not passing the packet on to demuxing in this case, we prevent // incoming packets to be passed on via the demuxer to a receive stream // which is being torn down. return; } - parsed_packet.IdentifyExtensions(it->second.extensions); + parsed_packet.IdentifyExtensions( + RtpHeaderExtensionMap(it->second->rtp_config().extensions)); // TODO(brandtr): Update here when we support protecting audio packets too. parsed_packet.set_payload_type_frequency(kVideoPayloadTypeFrequency); video_receiver_controller_.OnRtpPacket(parsed_packet); } +// RTC_RUN_ON(worker_thread_) void Call::NotifyBweOfReceivedPacket(const RtpPacketReceived& packet, MediaType media_type) { auto it = receive_rtp_config_.find(packet.Ssrc()); - bool use_send_side_bwe = - (it != receive_rtp_config_.end()) && it->second.use_send_side_bwe; + bool use_send_side_bwe = (it != receive_rtp_config_.end()) && + UseSendSideBwe(it->second->rtp_config()); RTPHeader header; packet.GetHeader(&header); ReceivedPacket packet_msg; packet_msg.size = DataSize::Bytes(packet.payload_size()); - packet_msg.receive_time = Timestamp::Millis(packet.arrival_time_ms()); + packet_msg.receive_time = packet.arrival_time(); if (header.extension.hasAbsoluteSendTime) { packet_msg.send_time = header.extension.GetAbsoluteSendTimestamp(); } - transport_send_ptr_->OnReceivedPacket(packet_msg); + transport_send_->OnReceivedPacket(packet_msg); if (!use_send_side_bwe && header.extension.hasTransportSequenceNumber) { // Inconsistent configuration of send side BWE. Do nothing. @@ -1380,8 +1674,8 @@ void Call::NotifyBweOfReceivedPacket(const RtpPacketReceived& packet, if (media_type == MediaType::VIDEO || (use_send_side_bwe && header.extension.hasTransportSequenceNumber)) { receive_side_cc_.OnReceivedPacket( - packet.arrival_time_ms(), packet.payload_size() + packet.padding_size(), - header); + packet.arrival_time().ms(), + packet.payload_size() + packet.padding_size(), header); } } diff --git a/call/call.h b/call/call.h index 77cd3d2690..f6388c3c78 100644 --- a/call/call.h +++ b/call/call.h @@ -15,7 +15,9 @@ #include #include +#include "api/adaptation/resource.h" #include "api/media_types.h" +#include "api/task_queue/task_queue_base.h" #include "call/audio_receive_stream.h" #include "call/audio_send_stream.h" #include "call/call_config.h" @@ -28,9 +30,41 @@ #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/network/sent_packet.h" #include "rtc_base/network_route.h" +#include "rtc_base/ref_count.h" namespace webrtc { +// A restricted way to share the module process thread across multiple instances +// of Call that are constructed on the same worker thread (which is what the +// peer connection factory guarantees). +// SharedModuleThread supports a callback that is issued when only one reference +// remains, which is used to indicate to the original owner that the thread may +// be discarded. +class SharedModuleThread : public rtc::RefCountInterface { + protected: + SharedModuleThread(std::unique_ptr process_thread, + std::function on_one_ref_remaining); + friend class rtc::scoped_refptr; + ~SharedModuleThread() override; + + public: + // Allows injection of an externally created process thread. + static rtc::scoped_refptr Create( + std::unique_ptr process_thread, + std::function on_one_ref_remaining); + + void EnsureStarted(); + + ProcessThread* process_thread(); + + private: + void AddRef() const override; + rtc::RefCountReleaseStatus Release() const override; + + class Impl; + mutable std::unique_ptr impl_; +}; + // A Call instance can contain several send and/or receive streams. All streams // are assumed to have the same remote endpoint and will share bitrate estimates // etc. @@ -51,8 +85,13 @@ class Call { static Call* Create(const Call::Config& config); static Call* Create(const Call::Config& config, Clock* clock, - std::unique_ptr call_thread, + rtc::scoped_refptr call_thread, std::unique_ptr pacer_thread); + static Call* Create(const Call::Config& config, + Clock* clock, + rtc::scoped_refptr call_thread, + std::unique_ptr + transportControllerSend); virtual AudioSendStream* CreateAudioSendStream( const AudioSendStream::Config& config) = 0; @@ -86,6 +125,11 @@ class Call { virtual void DestroyFlexfecReceiveStream( FlexfecReceiveStream* receive_stream) = 0; + // When a resource is overused, the Call will try to reduce the load on the + // sysem, for example by reducing the resolution or frame rate of encoded + // streams. + virtual void AddAdaptationResource(rtc::scoped_refptr resource) = 0; + // All received RTP and RTCP packets for the call should be inserted to this // PacketReceiver. The PacketReceiver pointer is valid as long as the // Call instance exists. @@ -111,11 +155,24 @@ class Call { virtual void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) = 0; + // Called when a receive stream's local ssrc has changed and association with + // send streams needs to be updated. + virtual void OnLocalSsrcUpdated(AudioReceiveStream& stream, + uint32_t local_ssrc) = 0; + + virtual void OnUpdateSyncGroup(AudioReceiveStream& stream, + const std::string& sync_group) = 0; + virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0; virtual void SetClientBitratePreferences( const BitrateSettings& preferences) = 0; + virtual const WebRtcKeyValueConfig& trials() const = 0; + + virtual TaskQueueBase* network_thread() const = 0; + virtual TaskQueueBase* worker_thread() const = 0; + virtual ~Call() {} }; diff --git a/call/call_config.cc b/call/call_config.cc index b149c889ea..23b60ce436 100644 --- a/call/call_config.cc +++ b/call/call_config.cc @@ -14,12 +14,27 @@ namespace webrtc { -CallConfig::CallConfig(RtcEventLog* event_log) : event_log(event_log) { +CallConfig::CallConfig(RtcEventLog* event_log, + TaskQueueBase* network_task_queue /* = nullptr*/) + : event_log(event_log), network_task_queue_(network_task_queue) { RTC_DCHECK(event_log); } CallConfig::CallConfig(const CallConfig& config) = default; +RtpTransportConfig CallConfig::ExtractTransportConfig() const { + RtpTransportConfig transportConfig; + transportConfig.bitrate_config = bitrate_config; + transportConfig.event_log = event_log; + transportConfig.network_controller_factory = network_controller_factory; + transportConfig.network_state_predictor_factory = + network_state_predictor_factory; + transportConfig.task_queue_factory = task_queue_factory; + transportConfig.trials = trials; + + return transportConfig; +} + CallConfig::~CallConfig() = default; } // namespace webrtc diff --git a/call/call_config.h b/call/call_config.h index 205f7a48bb..ba6dec3ad6 100644 --- a/call/call_config.h +++ b/call/call_config.h @@ -19,6 +19,8 @@ #include "api/transport/network_control.h" #include "api/transport/webrtc_key_value_config.h" #include "call/audio_state.h" +#include "call/rtp_transport_config.h" +#include "call/rtp_transport_controller_send_factory_interface.h" namespace webrtc { @@ -26,8 +28,13 @@ class AudioProcessing; class RtcEventLog; struct CallConfig { - explicit CallConfig(RtcEventLog* event_log); + // If |network_task_queue| is set to nullptr, Call will assume that network + // related callbacks will be made on the same TQ as the Call instance was + // constructed on. + explicit CallConfig(RtcEventLog* event_log, + TaskQueueBase* network_task_queue = nullptr); CallConfig(const CallConfig&); + RtpTransportConfig ExtractTransportConfig() const; ~CallConfig(); // Bitrate config used until valid bitrate estimates are calculated. Also @@ -42,7 +49,7 @@ struct CallConfig { // RtcEventLog to use for this call. Required. // Use webrtc::RtcEventLog::CreateNull() for a null implementation. - RtcEventLog* event_log = nullptr; + RtcEventLog* const event_log = nullptr; // FecController to use for this call. FecControllerFactoryInterface* fec_controller_factory = nullptr; @@ -63,6 +70,11 @@ struct CallConfig { // Key-value mapping of internal configurations to apply, // e.g. field trials. const WebRtcKeyValueConfig* trials = nullptr; + + TaskQueueBase* const network_task_queue_ = nullptr; + // RtpTransportControllerSend to use for this call. + RtpTransportControllerSendFactoryInterface* + rtp_transport_controller_send_factory = nullptr; }; } // namespace webrtc diff --git a/call/call_factory.cc b/call/call_factory.cc index 6b4f419742..aeb3cbdaa7 100644 --- a/call/call_factory.cc +++ b/call/call_factory.cc @@ -14,11 +14,13 @@ #include #include +#include #include "absl/types/optional.h" #include "api/test/simulated_network.h" #include "call/call.h" #include "call/degraded_call.h" +#include "call/rtp_transport_config.h" #include "rtc_base/checks.h" #include "system_wrappers/include/field_trial.h" @@ -70,19 +72,44 @@ absl::optional ParseDegradationConfig( } } // namespace +CallFactory::CallFactory() { + call_thread_.Detach(); +} + Call* CallFactory::CreateCall(const Call::Config& config) { + RTC_DCHECK_RUN_ON(&call_thread_); absl::optional send_degradation_config = ParseDegradationConfig(true); absl::optional receive_degradation_config = ParseDegradationConfig(false); + RtpTransportConfig transportConfig = config.ExtractTransportConfig(); + if (send_degradation_config || receive_degradation_config) { - return new DegradedCall(std::unique_ptr(Call::Create(config)), - send_degradation_config, receive_degradation_config, - config.task_queue_factory); + return new DegradedCall( + std::unique_ptr(Call::Create( + config, Clock::GetRealTimeClock(), + SharedModuleThread::Create( + ProcessThread::Create("ModuleProcessThread"), nullptr), + config.rtp_transport_controller_send_factory->Create( + transportConfig, Clock::GetRealTimeClock(), + ProcessThread::Create("PacerThread")))), + send_degradation_config, receive_degradation_config, + config.task_queue_factory); + } + + if (!module_thread_) { + module_thread_ = SharedModuleThread::Create( + ProcessThread::Create("SharedModThread"), [this]() { + RTC_DCHECK_RUN_ON(&call_thread_); + module_thread_ = nullptr; + }); } - return Call::Create(config); + return Call::Create(config, Clock::GetRealTimeClock(), module_thread_, + config.rtp_transport_controller_send_factory->Create( + transportConfig, Clock::GetRealTimeClock(), + ProcessThread::Create("PacerThread"))); } std::unique_ptr CreateCallFactory() { diff --git a/call/call_factory.h b/call/call_factory.h index f0d695c915..469bec39e1 100644 --- a/call/call_factory.h +++ b/call/call_factory.h @@ -12,15 +12,25 @@ #define CALL_CALL_FACTORY_H_ #include "api/call/call_factory_interface.h" +#include "api/sequence_checker.h" #include "call/call.h" #include "call/call_config.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { class CallFactory : public CallFactoryInterface { + public: + CallFactory(); + + private: ~CallFactory() override {} Call* CreateCall(const CallConfig& config) override; + + RTC_NO_UNIQUE_ADDRESS SequenceChecker call_thread_; + rtc::scoped_refptr module_thread_ + RTC_GUARDED_BY(call_thread_); }; } // namespace webrtc diff --git a/call/call_perf_tests.cc b/call/call_perf_tests.cc index 123be7da4c..c163ab2fe7 100644 --- a/call/call_perf_tests.cc +++ b/call/call_perf_tests.cc @@ -29,6 +29,7 @@ #include "modules/audio_mixer/audio_mixer_impl.h" #include "modules/rtp_rtcp/source/rtp_packet.h" #include "rtc_base/checks.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" @@ -181,7 +182,6 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec, std::unique_ptr audio_send_transport; std::unique_ptr video_send_transport; std::unique_ptr receive_transport; - test::NullTransport rtcp_send_transport; AudioSendStream* audio_send_stream; AudioReceiveStream* audio_receive_stream; @@ -270,7 +270,7 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec, AudioReceiveStream::Config audio_recv_config; audio_recv_config.rtp.remote_ssrc = kAudioSendSsrc; audio_recv_config.rtp.local_ssrc = kAudioRecvSsrc; - audio_recv_config.rtcp_send_transport = &rtcp_send_transport; + audio_recv_config.rtcp_send_transport = receive_transport.get(); audio_recv_config.sync_group = kSyncGroup; audio_recv_config.decoder_factory = audio_decoder_factory_; audio_recv_config.decoder_map = { @@ -312,14 +312,18 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec, DestroyStreams(); - video_send_transport.reset(); - audio_send_transport.reset(); - receive_transport.reset(); - sender_call_->DestroyAudioSendStream(audio_send_stream); receiver_call_->DestroyAudioReceiveStream(audio_receive_stream); DestroyCalls(); + // Call may post periodic rtcp packet to the transport on the process + // thread, thus transport should be destroyed after the call objects. + // Though transports keep pointers to the call objects, transports handle + // packets on the task_queue() and thus wouldn't create a race while current + // destruction happens in the same task as destruction of the call objects. + video_send_transport.reset(); + audio_send_transport.reset(); + receive_transport.reset(); }); observer->PrintResults(); @@ -336,27 +340,29 @@ void CallPerfTest::TestAudioVideoSync(FecMode fec, ToQueuedTask([to_delete = observer.release()]() { delete to_delete; })); } -TEST_F(CallPerfTest, PlaysOutAudioAndVideoInSyncWithoutClockDrift) { +TEST_F(CallPerfTest, Synchronization_PlaysOutAudioAndVideoWithoutClockDrift) { TestAudioVideoSync(FecMode::kOff, CreateOrder::kAudioFirst, DriftingClock::kNoDrift, DriftingClock::kNoDrift, DriftingClock::kNoDrift, "_video_no_drift"); } -TEST_F(CallPerfTest, PlaysOutAudioAndVideoInSyncWithVideoNtpDrift) { +TEST_F(CallPerfTest, Synchronization_PlaysOutAudioAndVideoWithVideoNtpDrift) { TestAudioVideoSync(FecMode::kOff, CreateOrder::kAudioFirst, DriftingClock::PercentsFaster(10.0f), DriftingClock::kNoDrift, DriftingClock::kNoDrift, "_video_ntp_drift"); } -TEST_F(CallPerfTest, PlaysOutAudioAndVideoInSyncWithAudioFasterThanVideoDrift) { +TEST_F(CallPerfTest, + Synchronization_PlaysOutAudioAndVideoWithAudioFasterThanVideoDrift) { TestAudioVideoSync(FecMode::kOff, CreateOrder::kAudioFirst, DriftingClock::kNoDrift, DriftingClock::PercentsSlower(30.0f), DriftingClock::PercentsFaster(30.0f), "_audio_faster"); } -TEST_F(CallPerfTest, PlaysOutAudioAndVideoInSyncWithVideoFasterThanAudioDrift) { +TEST_F(CallPerfTest, + Synchronization_PlaysOutAudioAndVideoWithVideoFasterThanAudioDrift) { TestAudioVideoSync(FecMode::kOn, CreateOrder::kVideoFirst, DriftingClock::kNoDrift, DriftingClock::PercentsFaster(30.0f), @@ -409,7 +415,7 @@ void CallPerfTest::TestCaptureNtpTime( } void OnFrame(const VideoFrame& video_frame) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (video_frame.ntp_time_ms() <= 0) { // Haven't got enough RTCP SR in order to calculate the capture ntp // time. @@ -445,7 +451,7 @@ void CallPerfTest::TestCaptureNtpTime( } Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -488,7 +494,7 @@ void CallPerfTest::TestCaptureNtpTime( time_offset_ms_list_, "ms", true); } - rtc::CriticalSection crit_; + Mutex mutex_; const BuiltInNetworkBehaviorConfig net_config_; Clock* const clock_; int threshold_ms_; @@ -499,7 +505,7 @@ void CallPerfTest::TestCaptureNtpTime( bool rtp_start_timestamp_set_; uint32_t rtp_start_timestamp_; typedef std::map FrameCaptureTimeList; - FrameCaptureTimeList capture_time_list_ RTC_GUARDED_BY(&crit_); + FrameCaptureTimeList capture_time_list_ RTC_GUARDED_BY(&mutex_); std::vector time_offset_ms_list_; } test(net_config, threshold_ms, start_time_ms, run_time_ms); @@ -508,7 +514,7 @@ void CallPerfTest::TestCaptureNtpTime( // Flaky tests, disabled on Mac and Windows due to webrtc:8291. #if !(defined(WEBRTC_MAC) || defined(WEBRTC_WIN)) -TEST_F(CallPerfTest, CaptureNtpTimeWithNetworkDelay) { +TEST_F(CallPerfTest, Real_Estimated_CaptureNtpTimeWithNetworkDelay) { BuiltInNetworkBehaviorConfig net_config; net_config.queue_delay_ms = 100; // TODO(wu): lower the threshold as the calculation/estimatation becomes more @@ -519,7 +525,7 @@ TEST_F(CallPerfTest, CaptureNtpTimeWithNetworkDelay) { TestCaptureNtpTime(net_config, kThresholdMs, kStartTimeMs, kRunTimeMs); } -TEST_F(CallPerfTest, CaptureNtpTimeWithNetworkJitter) { +TEST_F(CallPerfTest, Real_Estimated_CaptureNtpTimeWithNetworkJitter) { BuiltInNetworkBehaviorConfig net_config; net_config.queue_delay_ms = 100; net_config.delay_standard_deviation_ms = 10; @@ -555,6 +561,18 @@ TEST_F(CallPerfTest, ReceivesCpuOveruseAndUnderuse) { // TODO(sprang): Add integration test for maintain-framerate mode? void OnSinkWantsChanged(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) override { + // The sink wants can change either because an adaptation happened (i.e. + // the pixels or frame rate changed) or for other reasons, such as encoded + // resolutions being communicated (happens whenever we capture a new frame + // size). In this test, we only care about adaptations. + bool did_adapt = + last_wants_.max_pixel_count != wants.max_pixel_count || + last_wants_.target_pixel_count != wants.target_pixel_count || + last_wants_.max_framerate_fps != wants.max_framerate_fps; + last_wants_ = wants; + if (!did_adapt) { + return; + } // At kStart expect CPU overuse. Then expect CPU underuse when the encoder // delay has been decreased. switch (test_phase_) { @@ -619,6 +637,9 @@ TEST_F(CallPerfTest, ReceivesCpuOveruseAndUnderuse) { kAdaptedDown, kAdaptedUp } test_phase_; + + private: + rtc::VideoSinkWants last_wants_; } test; RunBaseTest(&test); @@ -633,7 +654,8 @@ void CallPerfTest::TestMinTransmitBitrate(bool pad_to_min_bitrate) { static const int kAcceptableBitrateErrorMargin = 15; // +- 7 class BitrateObserver : public test::EndToEndTest { public: - explicit BitrateObserver(bool using_min_transmit_bitrate) + explicit BitrateObserver(bool using_min_transmit_bitrate, + TaskQueueBase* task_queue) : EndToEndTest(kLongTimeoutMs), send_stream_(nullptr), converged_(false), @@ -646,27 +668,31 @@ void CallPerfTest::TestMinTransmitBitrate(bool pad_to_min_bitrate) { ? kMaxAcceptableTransmitBitrate : (kMaxEncodeBitrateKbps + kAcceptableBitrateErrorMargin / 2)), - num_bitrate_observations_in_range_(0) {} + num_bitrate_observations_in_range_(0), + task_queue_(task_queue) {} private: // TODO(holmer): Run this with a timer instead of once per packet. Action OnSendRtp(const uint8_t* packet, size_t length) override { - VideoSendStream::Stats stats = send_stream_->GetStats(); - if (!stats.substreams.empty()) { - RTC_DCHECK_EQ(1, stats.substreams.size()); - int bitrate_kbps = - stats.substreams.begin()->second.total_bitrate_bps / 1000; - if (bitrate_kbps > min_acceptable_bitrate_ && - bitrate_kbps < max_acceptable_bitrate_) { - converged_ = true; - ++num_bitrate_observations_in_range_; - if (num_bitrate_observations_in_range_ == - kNumBitrateObservationsInRange) - observation_complete_.Set(); + task_queue_->PostTask(ToQueuedTask([this]() { + VideoSendStream::Stats stats = send_stream_->GetStats(); + + if (!stats.substreams.empty()) { + RTC_DCHECK_EQ(1, stats.substreams.size()); + int bitrate_kbps = + stats.substreams.begin()->second.total_bitrate_bps / 1000; + if (bitrate_kbps > min_acceptable_bitrate_ && + bitrate_kbps < max_acceptable_bitrate_) { + converged_ = true; + ++num_bitrate_observations_in_range_; + if (num_bitrate_observations_in_range_ == + kNumBitrateObservationsInRange) + observation_complete_.Set(); + } + if (converged_) + bitrate_kbps_list_.push_back(bitrate_kbps); } - if (converged_) - bitrate_kbps_list_.push_back(bitrate_kbps); - } + })); return SEND_PACKET; } @@ -703,17 +729,18 @@ void CallPerfTest::TestMinTransmitBitrate(bool pad_to_min_bitrate) { const int max_acceptable_bitrate_; int num_bitrate_observations_in_range_; std::vector bitrate_kbps_list_; - } test(pad_to_min_bitrate); + TaskQueueBase* task_queue_; + } test(pad_to_min_bitrate, task_queue()); fake_encoder_max_bitrate_ = kMaxEncodeBitrateKbps; RunBaseTest(&test); } -TEST_F(CallPerfTest, PadsToMinTransmitBitrate) { +TEST_F(CallPerfTest, Bitrate_Kbps_PadsToMinTransmitBitrate) { TestMinTransmitBitrate(true); } -TEST_F(CallPerfTest, NoPadWithoutMinTransmitBitrate) { +TEST_F(CallPerfTest, Bitrate_Kbps_NoPadWithoutMinTransmitBitrate) { TestMinTransmitBitrate(false); } @@ -729,6 +756,11 @@ TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) { static const uint32_t kInitialBitrateKbps = 400; static const uint32_t kReconfigureThresholdKbps = 600; + // We get lower bitrate than expected by this test if the following field + // trial is enabled. + test::ScopedFieldTrials field_trials( + "WebRTC-SendSideBwe-WithOverhead/Disabled/"); + class VideoStreamFactory : public VideoEncoderConfig::VideoStreamFactoryInterface { public: @@ -749,7 +781,7 @@ TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) { class BitrateObserver : public test::EndToEndTest, public test::FakeEncoder { public: - BitrateObserver() + explicit BitrateObserver(TaskQueueBase* task_queue) : EndToEndTest(kDefaultTimeoutMs), FakeEncoder(Clock::GetRealTimeClock()), encoder_inits_(0), @@ -758,7 +790,8 @@ TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) { frame_generator_(nullptr), encoder_factory_(this), bitrate_allocator_factory_( - CreateBuiltinVideoBitrateAllocatorFactory()) {} + CreateBuiltinVideoBitrateAllocatorFactory()), + task_queue_(task_queue) {} int32_t InitEncode(const VideoCodec* config, const VideoEncoder::Settings& settings) override { @@ -808,7 +841,7 @@ TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) { bitrate_allocator_factory_.get(); encoder_config->max_bitrate_bps = 2 * kReconfigureThresholdKbps * 1000; encoder_config->video_stream_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); encoder_config_ = encoder_config->Copy(); } @@ -828,7 +861,9 @@ TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) { ASSERT_TRUE(time_to_reconfigure_.Wait(kDefaultTimeoutMs)) << "Timed out before receiving an initial high bitrate."; frame_generator_->ChangeResolution(kDefaultWidth * 2, kDefaultHeight * 2); - send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy()); + SendTask(RTC_FROM_HERE, task_queue_, [&]() { + send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy()); + }); EXPECT_TRUE(Wait()) << "Timed out while waiting for a couple of high bitrate estimates " "after reconfiguring the send stream."; @@ -843,7 +878,8 @@ TEST_F(CallPerfTest, MAYBE_KeepsHighBitrateWhenReconfiguringSender) { test::VideoEncoderProxyFactory encoder_factory_; std::unique_ptr bitrate_allocator_factory_; VideoEncoderConfig encoder_config_; - } test; + TaskQueueBase* task_queue_; + } test(task_queue()); RunBaseTest(&test); } @@ -1002,11 +1038,11 @@ void CallPerfTest::TestMinAudioVideoBitrate(int test_bitrate_from, // TODO(bugs.webrtc.org/8878) #if defined(WEBRTC_MAC) -#define MAYBE_MinVideoAndAudioBitrate DISABLED_MinVideoAndAudioBitrate +#define MAYBE_Min_Bitrate_VideoAndAudio DISABLED_Min_Bitrate_VideoAndAudio #else -#define MAYBE_MinVideoAndAudioBitrate MinVideoAndAudioBitrate +#define MAYBE_Min_Bitrate_VideoAndAudio Min_Bitrate_VideoAndAudio #endif -TEST_F(CallPerfTest, MAYBE_MinVideoAndAudioBitrate) { +TEST_F(CallPerfTest, MAYBE_Min_Bitrate_VideoAndAudio) { TestMinAudioVideoBitrate(110, 40, -10, 10000, 70000, 200000); } diff --git a/call/call_unittest.cc b/call/call_unittest.cc index 8afcf25121..92a037f157 100644 --- a/call/call_unittest.cc +++ b/call/call_unittest.cc @@ -20,13 +20,18 @@ #include "api/rtc_event_log/rtc_event_log.h" #include "api/task_queue/default_task_queue_factory.h" #include "api/test/mock_audio_mixer.h" +#include "api/test/video/function_video_encoder_factory.h" #include "api/transport/field_trial_based_config.h" +#include "api/video/builtin_video_bitrate_allocator_factory.h" #include "audio/audio_receive_stream.h" #include "audio/audio_send_stream.h" +#include "call/adaptation/test/fake_resource.h" +#include "call/adaptation/test/mock_resource_listener.h" #include "call/audio_state.h" #include "modules/audio_device/include/mock_audio_device.h" #include "modules/audio_processing/include/mock_audio_processing.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" +#include "modules/include/module.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "test/fake_encoder.h" #include "test/gtest.h" #include "test/mock_audio_decoder_factory.h" @@ -35,18 +40,24 @@ namespace { +using ::testing::_; +using ::testing::Contains; +using ::testing::NiceMock; +using ::testing::StrictMock; + struct CallHelper { explicit CallHelper(bool use_null_audio_processing) { task_queue_factory_ = webrtc::CreateDefaultTaskQueueFactory(); webrtc::AudioState::Config audio_state_config; audio_state_config.audio_mixer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); audio_state_config.audio_processing = use_null_audio_processing ? nullptr - : new rtc::RefCountedObject(); + : rtc::make_ref_counted< + NiceMock>(); audio_state_config.audio_device_module = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); webrtc::Call::Config config(&event_log_); config.audio_state = webrtc::AudioState::Create(audio_state_config); config.task_queue_factory = task_queue_factory_.get(); @@ -67,6 +78,20 @@ struct CallHelper { namespace webrtc { +namespace { + +rtc::scoped_refptr FindResourceWhoseNameContains( + const std::vector>& resources, + const std::string& name_contains) { + for (const auto& resource : resources) { + if (resource->Name().find(name_contains) != std::string::npos) + return resource; + } + return nullptr; +} + +} // namespace + TEST(CallTest, ConstructDestruct) { for (bool use_null_audio_processing : {false, true}) { CallHelper call(use_null_audio_processing); @@ -93,7 +118,7 @@ TEST(CallTest, CreateDestroy_AudioReceiveStream) { config.rtp.remote_ssrc = 42; config.rtcp_send_transport = &rtcp_send_transport; config.decoder_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); AudioReceiveStream* stream = call->CreateAudioReceiveStream(config); EXPECT_NE(stream, nullptr); call->DestroyAudioReceiveStream(stream); @@ -132,7 +157,7 @@ TEST(CallTest, CreateDestroy_AudioReceiveStreams) { MockTransport rtcp_send_transport; config.rtcp_send_transport = &rtcp_send_transport; config.decoder_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); std::list streams; for (int i = 0; i < 2; ++i) { for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) { @@ -162,7 +187,7 @@ TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_RecvFirst) { recv_config.rtp.local_ssrc = 777; recv_config.rtcp_send_transport = &rtcp_send_transport; recv_config.decoder_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config); EXPECT_NE(recv_stream, nullptr); @@ -201,7 +226,7 @@ TEST(CallTest, CreateDestroy_AssociateAudioSendReceiveStreams_SendFirst) { recv_config.rtp.local_ssrc = 777; recv_config.rtcp_send_transport = &rtcp_send_transport; recv_config.decoder_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); AudioReceiveStream* recv_stream = call->CreateAudioReceiveStream(recv_config); EXPECT_NE(recv_stream, nullptr); @@ -223,7 +248,7 @@ TEST(CallTest, CreateDestroy_FlexfecReceiveStream) { MockTransport rtcp_send_transport; FlexfecReceiveStream::Config config(&rtcp_send_transport); config.payload_type = 118; - config.remote_ssrc = 38837212; + config.rtp.remote_ssrc = 38837212; config.protected_media_ssrcs = {27273}; FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config); @@ -242,7 +267,7 @@ TEST(CallTest, CreateDestroy_FlexfecReceiveStreams) { for (int i = 0; i < 2; ++i) { for (uint32_t ssrc = 0; ssrc < 1234567; ssrc += 34567) { - config.remote_ssrc = ssrc; + config.rtp.remote_ssrc = ssrc; config.protected_media_ssrcs = {ssrc + 1}; FlexfecReceiveStream* stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); @@ -270,22 +295,22 @@ TEST(CallTest, MultipleFlexfecReceiveStreamsProtectingSingleVideoStream) { FlexfecReceiveStream* stream; std::list streams; - config.remote_ssrc = 838383; + config.rtp.remote_ssrc = 838383; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); - config.remote_ssrc = 424993; + config.rtp.remote_ssrc = 424993; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); - config.remote_ssrc = 99383; + config.rtp.remote_ssrc = 99383; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); - config.remote_ssrc = 5548; + config.rtp.remote_ssrc = 5548; stream = call->CreateFlexfecReceiveStream(config); EXPECT_NE(stream, nullptr); streams.push_back(stream); @@ -321,8 +346,186 @@ TEST(CallTest, RecreatingAudioStreamWithSameSsrcReusesRtpState) { EXPECT_EQ(rtp_state1.capture_time_ms, rtp_state2.capture_time_ms); EXPECT_EQ(rtp_state1.last_timestamp_time_ms, rtp_state2.last_timestamp_time_ms); - EXPECT_EQ(rtp_state1.media_has_been_sent, rtp_state2.media_has_been_sent); } } +TEST(CallTest, AddAdaptationResourceAfterCreatingVideoSendStream) { + CallHelper call(true); + // Create a VideoSendStream. + test::FunctionVideoEncoderFactory fake_encoder_factory([]() { + return std::make_unique(Clock::GetRealTimeClock()); + }); + auto bitrate_allocator_factory = CreateBuiltinVideoBitrateAllocatorFactory(); + MockTransport send_transport; + VideoSendStream::Config config(&send_transport); + config.rtp.payload_type = 110; + config.rtp.ssrcs = {42}; + config.encoder_settings.encoder_factory = &fake_encoder_factory; + config.encoder_settings.bitrate_allocator_factory = + bitrate_allocator_factory.get(); + VideoEncoderConfig encoder_config; + encoder_config.max_bitrate_bps = 1337; + VideoSendStream* stream1 = + call->CreateVideoSendStream(config.Copy(), encoder_config.Copy()); + EXPECT_NE(stream1, nullptr); + config.rtp.ssrcs = {43}; + VideoSendStream* stream2 = + call->CreateVideoSendStream(config.Copy(), encoder_config.Copy()); + EXPECT_NE(stream2, nullptr); + // Add a fake resource. + auto fake_resource = FakeResource::Create("FakeResource"); + call->AddAdaptationResource(fake_resource); + // An adapter resource mirroring the |fake_resource| should now be present on + // both streams. + auto injected_resource1 = FindResourceWhoseNameContains( + stream1->GetAdaptationResources(), fake_resource->Name()); + EXPECT_TRUE(injected_resource1); + auto injected_resource2 = FindResourceWhoseNameContains( + stream2->GetAdaptationResources(), fake_resource->Name()); + EXPECT_TRUE(injected_resource2); + // Overwrite the real resource listeners with mock ones to verify the signal + // gets through. + injected_resource1->SetResourceListener(nullptr); + StrictMock resource_listener1; + EXPECT_CALL(resource_listener1, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([injected_resource1](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(injected_resource1, resource); + EXPECT_EQ(ResourceUsageState::kOveruse, usage_state); + }); + injected_resource1->SetResourceListener(&resource_listener1); + injected_resource2->SetResourceListener(nullptr); + StrictMock resource_listener2; + EXPECT_CALL(resource_listener2, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([injected_resource2](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(injected_resource2, resource); + EXPECT_EQ(ResourceUsageState::kOveruse, usage_state); + }); + injected_resource2->SetResourceListener(&resource_listener2); + // The kOveruse signal should get to our resource listeners. + fake_resource->SetUsageState(ResourceUsageState::kOveruse); + call->DestroyVideoSendStream(stream1); + call->DestroyVideoSendStream(stream2); +} + +TEST(CallTest, AddAdaptationResourceBeforeCreatingVideoSendStream) { + CallHelper call(true); + // Add a fake resource. + auto fake_resource = FakeResource::Create("FakeResource"); + call->AddAdaptationResource(fake_resource); + // Create a VideoSendStream. + test::FunctionVideoEncoderFactory fake_encoder_factory([]() { + return std::make_unique(Clock::GetRealTimeClock()); + }); + auto bitrate_allocator_factory = CreateBuiltinVideoBitrateAllocatorFactory(); + MockTransport send_transport; + VideoSendStream::Config config(&send_transport); + config.rtp.payload_type = 110; + config.rtp.ssrcs = {42}; + config.encoder_settings.encoder_factory = &fake_encoder_factory; + config.encoder_settings.bitrate_allocator_factory = + bitrate_allocator_factory.get(); + VideoEncoderConfig encoder_config; + encoder_config.max_bitrate_bps = 1337; + VideoSendStream* stream1 = + call->CreateVideoSendStream(config.Copy(), encoder_config.Copy()); + EXPECT_NE(stream1, nullptr); + config.rtp.ssrcs = {43}; + VideoSendStream* stream2 = + call->CreateVideoSendStream(config.Copy(), encoder_config.Copy()); + EXPECT_NE(stream2, nullptr); + // An adapter resource mirroring the |fake_resource| should be present on both + // streams. + auto injected_resource1 = FindResourceWhoseNameContains( + stream1->GetAdaptationResources(), fake_resource->Name()); + EXPECT_TRUE(injected_resource1); + auto injected_resource2 = FindResourceWhoseNameContains( + stream2->GetAdaptationResources(), fake_resource->Name()); + EXPECT_TRUE(injected_resource2); + // Overwrite the real resource listeners with mock ones to verify the signal + // gets through. + injected_resource1->SetResourceListener(nullptr); + StrictMock resource_listener1; + EXPECT_CALL(resource_listener1, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([injected_resource1](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(injected_resource1, resource); + EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state); + }); + injected_resource1->SetResourceListener(&resource_listener1); + injected_resource2->SetResourceListener(nullptr); + StrictMock resource_listener2; + EXPECT_CALL(resource_listener2, OnResourceUsageStateMeasured(_, _)) + .Times(1) + .WillOnce([injected_resource2](rtc::scoped_refptr resource, + ResourceUsageState usage_state) { + EXPECT_EQ(injected_resource2, resource); + EXPECT_EQ(ResourceUsageState::kUnderuse, usage_state); + }); + injected_resource2->SetResourceListener(&resource_listener2); + // The kUnderuse signal should get to our resource listeners. + fake_resource->SetUsageState(ResourceUsageState::kUnderuse); + call->DestroyVideoSendStream(stream1); + call->DestroyVideoSendStream(stream2); +} + +TEST(CallTest, SharedModuleThread) { + class SharedModuleThreadUser : public Module { + public: + SharedModuleThreadUser(ProcessThread* expected_thread, + rtc::scoped_refptr thread) + : expected_thread_(expected_thread), thread_(std::move(thread)) { + thread_->EnsureStarted(); + thread_->process_thread()->RegisterModule(this, RTC_FROM_HERE); + } + + ~SharedModuleThreadUser() override { + thread_->process_thread()->DeRegisterModule(this); + EXPECT_TRUE(thread_was_checked_); + } + + private: + int64_t TimeUntilNextProcess() override { return 1000; } + void Process() override {} + void ProcessThreadAttached(ProcessThread* process_thread) override { + if (!process_thread) { + // Being detached. + return; + } + EXPECT_EQ(process_thread, expected_thread_); + thread_was_checked_ = true; + } + + bool thread_was_checked_ = false; + ProcessThread* const expected_thread_; + rtc::scoped_refptr thread_; + }; + + // Create our test instance and pass a lambda to it that gets executed when + // the reference count goes back to 1 - meaning |shared| again is the only + // reference, which means we can free the variable and deallocate the thread. + rtc::scoped_refptr shared; + shared = + SharedModuleThread::Create(ProcessThread::Create("MySharedProcessThread"), + [&shared]() { shared = nullptr; }); + ProcessThread* process_thread = shared->process_thread(); + + ASSERT_TRUE(shared.get()); + + { + // Create a couple of users of the thread. + // These instances are in a separate scope to trigger the callback to our + // lambda, which will run when these go out of scope. + SharedModuleThreadUser user1(process_thread, shared); + SharedModuleThreadUser user2(process_thread, shared); + } + + // The thread should now have been stopped and freed. + EXPECT_FALSE(shared); +} + } // namespace webrtc diff --git a/call/degraded_call.cc b/call/degraded_call.cc index 9c8d2be508..5462085490 100644 --- a/call/degraded_call.cc +++ b/call/degraded_call.cc @@ -245,6 +245,11 @@ void DegradedCall::DestroyFlexfecReceiveStream( call_->DestroyFlexfecReceiveStream(receive_stream); } +void DegradedCall::AddAdaptationResource( + rtc::scoped_refptr resource) { + call_->AddAdaptationResource(std::move(resource)); +} + PacketReceiver* DegradedCall::Receiver() { if (receive_config_) { return this; @@ -261,6 +266,18 @@ Call::Stats DegradedCall::GetStats() const { return call_->GetStats(); } +const WebRtcKeyValueConfig& DegradedCall::trials() const { + return call_->trials(); +} + +TaskQueueBase* DegradedCall::network_thread() const { + return call_->network_thread(); +} + +TaskQueueBase* DegradedCall::worker_thread() const { + return call_->worker_thread(); +} + void DegradedCall::SignalChannelNetworkState(MediaType media, NetworkState state) { call_->SignalChannelNetworkState(media, state); @@ -271,6 +288,16 @@ void DegradedCall::OnAudioTransportOverheadChanged( call_->OnAudioTransportOverheadChanged(transport_overhead_per_packet); } +void DegradedCall::OnLocalSsrcUpdated(AudioReceiveStream& stream, + uint32_t local_ssrc) { + call_->OnLocalSsrcUpdated(stream, local_ssrc); +} + +void DegradedCall::OnUpdateSyncGroup(AudioReceiveStream& stream, + const std::string& sync_group) { + call_->OnUpdateSyncGroup(stream, sync_group); +} + void DegradedCall::OnSentPacket(const rtc::SentPacket& sent_packet) { if (send_config_) { // If we have a degraded send-transport, we have already notified call diff --git a/call/degraded_call.h b/call/degraded_call.h index 49230ca1ed..70dc126807 100644 --- a/call/degraded_call.h +++ b/call/degraded_call.h @@ -16,6 +16,7 @@ #include #include +#include #include "absl/types/optional.h" #include "api/call/transport.h" @@ -77,15 +78,26 @@ class DegradedCall : public Call, private PacketReceiver { void DestroyFlexfecReceiveStream( FlexfecReceiveStream* receive_stream) override; + void AddAdaptationResource(rtc::scoped_refptr resource) override; + PacketReceiver* Receiver() override; RtpTransportControllerSendInterface* GetTransportControllerSend() override; Stats GetStats() const override; + const WebRtcKeyValueConfig& trials() const override; + + TaskQueueBase* network_thread() const override; + TaskQueueBase* worker_thread() const override; + void SignalChannelNetworkState(MediaType media, NetworkState state) override; void OnAudioTransportOverheadChanged( int transport_overhead_per_packet) override; + void OnLocalSsrcUpdated(AudioReceiveStream& stream, + uint32_t local_ssrc) override; + void OnUpdateSyncGroup(AudioReceiveStream& stream, + const std::string& sync_group) override; void OnSentPacket(const rtc::SentPacket& sent_packet) override; protected: diff --git a/call/fake_network_pipe.cc b/call/fake_network_pipe.cc index 8844700e67..324a7bd793 100644 --- a/call/fake_network_pipe.cc +++ b/call/fake_network_pipe.cc @@ -122,17 +122,17 @@ FakeNetworkPipe::~FakeNetworkPipe() { } void FakeNetworkPipe::SetReceiver(PacketReceiver* receiver) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); receiver_ = receiver; } void FakeNetworkPipe::AddActiveTransport(Transport* transport) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); active_transports_[transport]++; } void FakeNetworkPipe::RemoveActiveTransport(Transport* transport) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); auto it = active_transports_.find(transport); RTC_CHECK(it != active_transports_.end()); if (--(it->second) == 0) { @@ -186,7 +186,7 @@ PacketReceiver::DeliveryStatus FakeNetworkPipe::DeliverPacket( } void FakeNetworkPipe::SetClockOffset(int64_t offset_ms) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); clock_offset_ms_ = offset_ms; } @@ -198,7 +198,7 @@ bool FakeNetworkPipe::EnqueuePacket(rtc::CopyOnWriteBuffer packet, bool is_rtcp, MediaType media_type, absl::optional packet_time_us) { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); int64_t time_now_us = clock_->TimeInMicroseconds(); return EnqueuePacket(NetworkPacket(std::move(packet), time_now_us, time_now_us, options, is_rtcp, media_type, @@ -209,7 +209,7 @@ bool FakeNetworkPipe::EnqueuePacket(rtc::CopyOnWriteBuffer packet, absl::optional options, bool is_rtcp, Transport* transport) { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); int64_t time_now_us = clock_->TimeInMicroseconds(); return EnqueuePacket(NetworkPacket(std::move(packet), time_now_us, time_now_us, options, is_rtcp, @@ -233,7 +233,7 @@ bool FakeNetworkPipe::EnqueuePacket(NetworkPacket&& net_packet) { } float FakeNetworkPipe::PercentageLoss() { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); if (sent_packets_ == 0) return 0; @@ -242,7 +242,7 @@ float FakeNetworkPipe::PercentageLoss() { } int FakeNetworkPipe::AverageDelay() { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); if (sent_packets_ == 0) return 0; @@ -251,12 +251,12 @@ int FakeNetworkPipe::AverageDelay() { } size_t FakeNetworkPipe::DroppedPackets() { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); return dropped_packets_; } size_t FakeNetworkPipe::SentPackets() { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); return sent_packets_; } @@ -264,7 +264,7 @@ void FakeNetworkPipe::Process() { int64_t time_now_us; std::queue packets_to_deliver; { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); time_now_us = clock_->TimeInMicroseconds(); if (time_now_us - last_log_time_us_ > kLogIntervalMs * 1000) { int64_t queueing_delay_us = 0; @@ -318,7 +318,7 @@ void FakeNetworkPipe::Process() { } } - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); while (!packets_to_deliver.empty()) { NetworkPacket packet = std::move(packets_to_deliver.front()); packets_to_deliver.pop(); @@ -354,7 +354,7 @@ void FakeNetworkPipe::DeliverNetworkPacket(NetworkPacket* packet) { } absl::optional FakeNetworkPipe::TimeUntilNextProcess() { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); absl::optional delivery_us = network_behavior_->NextDeliveryTimeUs(); if (delivery_us) { int64_t delay_us = *delivery_us - clock_->TimeInMicroseconds(); @@ -364,17 +364,17 @@ absl::optional FakeNetworkPipe::TimeUntilNextProcess() { } bool FakeNetworkPipe::HasReceiver() const { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); return receiver_ != nullptr; } void FakeNetworkPipe::DeliverPacketWithLock(NetworkPacket* packet) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); DeliverNetworkPacket(packet); } void FakeNetworkPipe::ResetStats() { - rtc::CritScope crit(&process_lock_); + MutexLock lock(&process_lock_); dropped_packets_ = 0; sent_packets_ = 0; total_packet_delay_us_ = 0; diff --git a/call/fake_network_pipe.h b/call/fake_network_pipe.h index 24340a2f29..1e5bb513bf 100644 --- a/call/fake_network_pipe.h +++ b/call/fake_network_pipe.h @@ -24,7 +24,7 @@ #include "call/call.h" #include "call/simulated_packet_receiver.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -204,14 +204,14 @@ class FakeNetworkPipe : public SimulatedPacketReceiverInterface { Clock* const clock_; // |config_lock| guards the mostly constant things like the callbacks. - rtc::CriticalSection config_lock_; + mutable Mutex config_lock_; const std::unique_ptr network_behavior_; PacketReceiver* receiver_ RTC_GUARDED_BY(config_lock_); Transport* const global_transport_; // |process_lock| guards the data structures involved in delay and loss // processes, such as the packet queues. - rtc::CriticalSection process_lock_; + Mutex process_lock_; // Packets are added at the back of the deque, this makes the deque ordered // by increasing send time. The common case when removing packets from the // deque is removing early packets, which will be close to the front of the diff --git a/call/flexfec_receive_stream.h b/call/flexfec_receive_stream.h index 2f7438f9a4..72e544e7ec 100644 --- a/call/flexfec_receive_stream.h +++ b/call/flexfec_receive_stream.h @@ -19,11 +19,13 @@ #include "api/call/transport.h" #include "api/rtp_headers.h" #include "api/rtp_parameters.h" +#include "call/receive_stream.h" #include "call/rtp_packet_sink_interface.h" namespace webrtc { -class FlexfecReceiveStream : public RtpPacketSinkInterface { +class FlexfecReceiveStream : public RtpPacketSinkInterface, + public ReceiveStream { public: ~FlexfecReceiveStream() override = default; @@ -48,8 +50,7 @@ class FlexfecReceiveStream : public RtpPacketSinkInterface { // Payload type for FlexFEC. int payload_type = -1; - // SSRC for FlexFEC stream to be received. - uint32_t remote_ssrc = 0; + RtpConfig rtp; // Vector containing a single element, corresponding to the SSRC of the // media stream being protected by this FlexFEC stream. The vector MUST have @@ -59,26 +60,14 @@ class FlexfecReceiveStream : public RtpPacketSinkInterface { // protection. std::vector protected_media_ssrcs; - // SSRC for RTCP reports to be sent. - uint32_t local_ssrc = 0; - // What RTCP mode to use in the reports. RtcpMode rtcp_mode = RtcpMode::kCompound; // Transport for outgoing RTCP packets. Transport* rtcp_send_transport = nullptr; - - // |transport_cc| is true whenever the send-side BWE RTCP feedback message - // has been negotiated. This is a prerequisite for enabling send-side BWE. - bool transport_cc = false; - - // RTP header extensions that have been negotiated for this track. - std::vector rtp_header_extensions; }; virtual Stats GetStats() const = 0; - - virtual const Config& GetConfig() const = 0; }; } // namespace webrtc diff --git a/call/flexfec_receive_stream_impl.cc b/call/flexfec_receive_stream_impl.cc index 40005efe83..688efb7b5e 100644 --- a/call/flexfec_receive_stream_impl.cc +++ b/call/flexfec_receive_stream_impl.cc @@ -22,7 +22,6 @@ #include "call/rtp_stream_receiver_controller_interface.h" #include "modules/rtp_rtcp/include/flexfec_receiver.h" #include "modules/rtp_rtcp/include/receive_statistics.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/utility/include/process_thread.h" #include "rtc_base/checks.h" @@ -45,21 +44,21 @@ std::string FlexfecReceiveStream::Config::ToString() const { char buf[1024]; rtc::SimpleStringBuilder ss(buf); ss << "{payload_type: " << payload_type; - ss << ", remote_ssrc: " << remote_ssrc; - ss << ", local_ssrc: " << local_ssrc; + ss << ", remote_ssrc: " << rtp.remote_ssrc; + ss << ", local_ssrc: " << rtp.local_ssrc; ss << ", protected_media_ssrcs: ["; size_t i = 0; for (; i + 1 < protected_media_ssrcs.size(); ++i) ss << protected_media_ssrcs[i] << ", "; if (!protected_media_ssrcs.empty()) ss << protected_media_ssrcs[i]; - ss << "], transport_cc: " << (transport_cc ? "on" : "off"); - ss << ", rtp_header_extensions: ["; + ss << "], transport_cc: " << (rtp.transport_cc ? "on" : "off"); + ss << ", rtp.extensions: ["; i = 0; - for (; i + 1 < rtp_header_extensions.size(); ++i) - ss << rtp_header_extensions[i].ToString() << ", "; - if (!rtp_header_extensions.empty()) - ss << rtp_header_extensions[i].ToString(); + for (; i + 1 < rtp.extensions.size(); ++i) + ss << rtp.extensions[i].ToString() << ", "; + if (!rtp.extensions.empty()) + ss << rtp.extensions[i].ToString(); ss << "]}"; return ss.str(); } @@ -69,7 +68,7 @@ bool FlexfecReceiveStream::Config::IsCompleteAndEnabled() const { if (payload_type < 0) return false; // Do we have the necessary SSRC information? - if (remote_ssrc == 0) + if (rtp.remote_ssrc == 0) return false; // TODO(brandtr): Update this check when we support multistream protection. if (protected_media_ssrcs.size() != 1u) @@ -92,7 +91,7 @@ std::unique_ptr MaybeCreateFlexfecReceiver( } RTC_DCHECK_GE(config.payload_type, 0); RTC_DCHECK_LE(config.payload_type, 127); - if (config.remote_ssrc == 0) { + if (config.rtp.remote_ssrc == 0) { RTC_LOG(LS_WARNING) << "Invalid FlexFEC SSRC given. " "This FlexfecReceiveStream will therefore be useless."; @@ -115,35 +114,33 @@ std::unique_ptr MaybeCreateFlexfecReceiver( } RTC_DCHECK_EQ(1U, config.protected_media_ssrcs.size()); return std::unique_ptr(new FlexfecReceiver( - clock, config.remote_ssrc, config.protected_media_ssrcs[0], + clock, config.rtp.remote_ssrc, config.protected_media_ssrcs[0], recovered_packet_receiver)); } -std::unique_ptr CreateRtpRtcpModule( +std::unique_ptr CreateRtpRtcpModule( Clock* clock, ReceiveStatistics* receive_statistics, const FlexfecReceiveStreamImpl::Config& config, RtcpRttStats* rtt_stats) { - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.audio = false; configuration.receiver_only = true; configuration.clock = clock; configuration.receive_statistics = receive_statistics; configuration.outgoing_transport = config.rtcp_send_transport; configuration.rtt_stats = rtt_stats; - configuration.local_media_ssrc = config.local_ssrc; - return RtpRtcp::Create(configuration); + configuration.local_media_ssrc = config.rtp.local_ssrc; + return ModuleRtpRtcpImpl2::Create(configuration); } } // namespace FlexfecReceiveStreamImpl::FlexfecReceiveStreamImpl( Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, const Config& config, RecoveredPacketReceiver* recovered_packet_receiver, - RtcpRttStats* rtt_stats, - ProcessThread* process_thread) + RtcpRttStats* rtt_stats) : config_(config), receiver_(MaybeCreateFlexfecReceiver(clock, config_, @@ -152,32 +149,38 @@ FlexfecReceiveStreamImpl::FlexfecReceiveStreamImpl( rtp_rtcp_(CreateRtpRtcpModule(clock, rtp_receive_statistics_.get(), config_, - rtt_stats)), - process_thread_(process_thread) { + rtt_stats)) { RTC_LOG(LS_INFO) << "FlexfecReceiveStreamImpl: " << config_.ToString(); + packet_sequence_checker_.Detach(); + // RTCP reporting. rtp_rtcp_->SetRTCPStatus(config_.rtcp_mode); - process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); +} + +FlexfecReceiveStreamImpl::~FlexfecReceiveStreamImpl() { + RTC_LOG(LS_INFO) << "~FlexfecReceiveStreamImpl: " << config_.ToString(); +} + +void FlexfecReceiveStreamImpl::RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK(!rtp_stream_receiver_); + + if (!receiver_) + return; - // Register with transport. // TODO(nisse): OnRtpPacket in this class delegates all real work to - // |receiver_|. So maybe we don't need to implement RtpPacketSinkInterface + // `receiver_`. So maybe we don't need to implement RtpPacketSinkInterface // here at all, we'd then delete the OnRtpPacket method and instead register - // |receiver_| as the RtpPacketSinkInterface for this stream. - // TODO(nisse): Passing |this| from the constructor to the RtpDemuxer, before - // the object is fully initialized, is risky. But it works in this case - // because locking in our caller, Call::CreateFlexfecReceiveStream, ensures - // that the demuxer doesn't call OnRtpPacket before this object is fully - // constructed. Registering |receiver_| instead of |this| would solve this - // problem too. + // `receiver_` as the RtpPacketSinkInterface for this stream. rtp_stream_receiver_ = - receiver_controller->CreateReceiver(config_.remote_ssrc, this); + receiver_controller->CreateReceiver(config_.rtp.remote_ssrc, this); } -FlexfecReceiveStreamImpl::~FlexfecReceiveStreamImpl() { - RTC_LOG(LS_INFO) << "~FlexfecReceiveStreamImpl: " << config_.ToString(); - process_thread_->DeRegisterModule(rtp_rtcp_.get()); +void FlexfecReceiveStreamImpl::UnregisterFromTransport() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_stream_receiver_.reset(); } void FlexfecReceiveStreamImpl::OnRtpPacket(const RtpPacketReceived& packet) { @@ -187,7 +190,7 @@ void FlexfecReceiveStreamImpl::OnRtpPacket(const RtpPacketReceived& packet) { receiver_->OnRtpPacket(packet); // Do not report media packets in the RTCP RRs generated by |rtp_rtcp_|. - if (packet.Ssrc() == config_.remote_ssrc) { + if (packet.Ssrc() == config_.rtp.remote_ssrc) { rtp_receive_statistics_->OnRtpPacket(packet); } } @@ -198,9 +201,4 @@ FlexfecReceiveStreamImpl::Stats FlexfecReceiveStreamImpl::GetStats() const { return FlexfecReceiveStream::Stats(); } -const FlexfecReceiveStream::Config& FlexfecReceiveStreamImpl::GetConfig() - const { - return config_; -} - } // namespace webrtc diff --git a/call/flexfec_receive_stream_impl.h b/call/flexfec_receive_stream_impl.h index d4fdc7431a..285a33f7bb 100644 --- a/call/flexfec_receive_stream_impl.h +++ b/call/flexfec_receive_stream_impl.h @@ -15,12 +15,13 @@ #include "call/flexfec_receive_stream.h" #include "call/rtp_packet_sink_interface.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "rtc_base/system/no_unique_address.h" #include "system_wrappers/include/clock.h" namespace webrtc { class FlexfecReceiver; -class ProcessThread; class ReceiveStatistics; class RecoveredPacketReceiver; class RtcpRttStats; @@ -31,22 +32,37 @@ class RtpStreamReceiverInterface; class FlexfecReceiveStreamImpl : public FlexfecReceiveStream { public: - FlexfecReceiveStreamImpl( - Clock* clock, - RtpStreamReceiverControllerInterface* receiver_controller, - const Config& config, - RecoveredPacketReceiver* recovered_packet_receiver, - RtcpRttStats* rtt_stats, - ProcessThread* process_thread); + FlexfecReceiveStreamImpl(Clock* clock, + const Config& config, + RecoveredPacketReceiver* recovered_packet_receiver, + RtcpRttStats* rtt_stats); + // Destruction happens on the worker thread. Prior to destruction the caller + // must ensure that a registration with the transport has been cleared. See + // `RegisterWithTransport` for details. + // TODO(tommi): As a further improvement to this, performing the full + // destruction on the network thread could be made the default. ~FlexfecReceiveStreamImpl() override; + // Called on the network thread to register/unregister with the network + // transport. + void RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller); + // If registration has previously been done (via `RegisterWithTransport`) then + // `UnregisterFromTransport` must be called prior to destruction, on the + // network thread. + void UnregisterFromTransport(); + // RtpPacketSinkInterface. void OnRtpPacket(const RtpPacketReceived& packet) override; Stats GetStats() const override; - const Config& GetConfig() const override; + + // ReceiveStream impl. + const RtpConfig& rtp_config() const override { return config_.rtp; } private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; + // Config. const Config config_; @@ -55,10 +71,10 @@ class FlexfecReceiveStreamImpl : public FlexfecReceiveStream { // RTCP reporting. const std::unique_ptr rtp_receive_statistics_; - const std::unique_ptr rtp_rtcp_; - ProcessThread* process_thread_; + const std::unique_ptr rtp_rtcp_; - std::unique_ptr rtp_stream_receiver_; + std::unique_ptr rtp_stream_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); }; } // namespace webrtc diff --git a/call/flexfec_receive_stream_unittest.cc b/call/flexfec_receive_stream_unittest.cc index 5e8ee47433..312fe0c907 100644 --- a/call/flexfec_receive_stream_unittest.cc +++ b/call/flexfec_receive_stream_unittest.cc @@ -26,7 +26,6 @@ #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/utility/include/mock/mock_process_thread.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -45,7 +44,7 @@ FlexfecReceiveStream::Config CreateDefaultConfig( Transport* rtcp_send_transport) { FlexfecReceiveStream::Config config(rtcp_send_transport); config.payload_type = kFlexfecPlType; - config.remote_ssrc = ByteReader::ReadBigEndian(kFlexfecSsrc); + config.rtp.remote_ssrc = ByteReader::ReadBigEndian(kFlexfecSsrc); config.protected_media_ssrcs = { ByteReader::ReadBigEndian(kMediaSsrc)}; EXPECT_TRUE(config.IsCompleteAndEnabled()); @@ -64,16 +63,16 @@ TEST(FlexfecReceiveStreamConfigTest, IsCompleteAndEnabled) { MockTransport rtcp_send_transport; FlexfecReceiveStream::Config config(&rtcp_send_transport); - config.local_ssrc = 18374743; + config.rtp.local_ssrc = 18374743; config.rtcp_mode = RtcpMode::kCompound; - config.transport_cc = true; - config.rtp_header_extensions.emplace_back(TransportSequenceNumber::kUri, 7); + config.rtp.transport_cc = true; + config.rtp.extensions.emplace_back(TransportSequenceNumber::kUri, 7); EXPECT_FALSE(config.IsCompleteAndEnabled()); config.payload_type = 123; EXPECT_FALSE(config.IsCompleteAndEnabled()); - config.remote_ssrc = 238423838; + config.rtp.remote_ssrc = 238423838; EXPECT_FALSE(config.IsCompleteAndEnabled()); config.protected_media_ssrcs.push_back(138989393); @@ -87,21 +86,20 @@ class FlexfecReceiveStreamTest : public ::testing::Test { protected: FlexfecReceiveStreamTest() : config_(CreateDefaultConfig(&rtcp_send_transport_)) { - EXPECT_CALL(process_thread_, RegisterModule(_, _)).Times(1); receive_stream_ = std::make_unique( - Clock::GetRealTimeClock(), &rtp_stream_receiver_controller_, config_, - &recovered_packet_receiver_, &rtt_stats_, &process_thread_); + Clock::GetRealTimeClock(), config_, &recovered_packet_receiver_, + &rtt_stats_); + receive_stream_->RegisterWithTransport(&rtp_stream_receiver_controller_); } ~FlexfecReceiveStreamTest() { - EXPECT_CALL(process_thread_, DeRegisterModule(_)).Times(1); + receive_stream_->UnregisterFromTransport(); } MockTransport rtcp_send_transport_; FlexfecReceiveStream::Config config_; MockRecoveredPacketReceiver recovered_packet_receiver_; MockRtcpRttStats rtt_stats_; - MockProcessThread process_thread_; RtpStreamReceiverController rtp_stream_receiver_controller_; std::unique_ptr receive_stream_; }; @@ -144,10 +142,10 @@ TEST_F(FlexfecReceiveStreamTest, RecoversPacket) { // clang-format on ::testing::StrictMock recovered_packet_receiver; - EXPECT_CALL(process_thread_, RegisterModule(_, _)).Times(1); - FlexfecReceiveStreamImpl receive_stream( - Clock::GetRealTimeClock(), &rtp_stream_receiver_controller_, config_, - &recovered_packet_receiver, &rtt_stats_, &process_thread_); + FlexfecReceiveStreamImpl receive_stream(Clock::GetRealTimeClock(), config_, + &recovered_packet_receiver, + &rtt_stats_); + receive_stream.RegisterWithTransport(&rtp_stream_receiver_controller_); EXPECT_CALL(recovered_packet_receiver, OnRecoveredPacket(_, kRtpHeaderSize + kPayloadLength[1])); @@ -155,7 +153,7 @@ TEST_F(FlexfecReceiveStreamTest, RecoversPacket) { receive_stream.OnRtpPacket(ParsePacket(kFlexfecPacket)); // Tear-down - EXPECT_CALL(process_thread_, DeRegisterModule(_)).Times(1); + receive_stream.UnregisterFromTransport(); } } // namespace webrtc diff --git a/call/packet_receiver.h b/call/packet_receiver.h index df57d8f4f4..13d3b84c90 100644 --- a/call/packet_receiver.h +++ b/call/packet_receiver.h @@ -10,11 +10,6 @@ #ifndef CALL_PACKET_RECEIVER_H_ #define CALL_PACKET_RECEIVER_H_ -#include -#include -#include -#include - #include "api/media_types.h" #include "rtc_base/copy_on_write_buffer.h" diff --git a/call/rampup_tests.cc b/call/rampup_tests.cc index 89fbe3dde7..bf136a5df9 100644 --- a/call/rampup_tests.cc +++ b/call/rampup_tests.cc @@ -160,7 +160,7 @@ void RampUpTester::ModifyVideoConfigs( encoder_config->number_of_streams = num_video_streams_; encoder_config->max_bitrate_bps = 2000000; encoder_config->video_stream_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); if (num_video_streams_ == 1) { // For single stream rampup until 1mbps expected_bitrate_bps_ = kSingleStreamTargetBps; @@ -295,16 +295,16 @@ void RampUpTester::ModifyFlexfecConfigs( return; RTC_DCHECK_EQ(1, num_flexfec_streams_); (*receive_configs)[0].payload_type = test::CallTest::kFlexfecPayloadType; - (*receive_configs)[0].remote_ssrc = test::CallTest::kFlexfecSendSsrc; + (*receive_configs)[0].rtp.remote_ssrc = test::CallTest::kFlexfecSendSsrc; (*receive_configs)[0].protected_media_ssrcs = {video_ssrcs_[0]}; - (*receive_configs)[0].local_ssrc = video_ssrcs_[0]; + (*receive_configs)[0].rtp.local_ssrc = video_ssrcs_[0]; if (extension_type_ == RtpExtension::kAbsSendTimeUri) { - (*receive_configs)[0].transport_cc = false; - (*receive_configs)[0].rtp_header_extensions.push_back( + (*receive_configs)[0].rtp.transport_cc = false; + (*receive_configs)[0].rtp.extensions.push_back( RtpExtension(extension_type_.c_str(), kAbsSendTimeExtensionId)); } else if (extension_type_ == RtpExtension::kTransportSequenceNumberUri) { - (*receive_configs)[0].transport_cc = true; - (*receive_configs)[0].rtp_header_extensions.push_back(RtpExtension( + (*receive_configs)[0].rtp.transport_cc = true; + (*receive_configs)[0].rtp.extensions.push_back(RtpExtension( extension_type_.c_str(), kTransportSequenceNumberExtensionId)); } } @@ -370,7 +370,10 @@ void RampUpTester::TriggerTestDone() { if (!send_stream_) return; - VideoSendStream::Stats send_stats = send_stream_->GetStats(); + VideoSendStream::Stats send_stats; + SendTask(RTC_FROM_HERE, task_queue_, + [&] { send_stats = send_stream_->GetStats(); }); + send_stream_ = nullptr; // To avoid dereferencing a bad pointer. size_t total_packets_sent = 0; @@ -663,7 +666,6 @@ TEST_F(RampUpTest, DISABLED_UpDownUpTransportSequenceNumberPacketLoss) { UpDownUpAudioVideoTransportSequenceNumberRtx #endif TEST_F(RampUpTest, MAYBE_UpDownUpAudioVideoTransportSequenceNumberRtx) { - test::ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); std::vector loss_rates = {0, 0, 0, 0}; RampUpDownUpTester test(3, 1, 0, kStartBitrateBps, RtpExtension::kTransportSequenceNumberUri, true, @@ -672,7 +674,6 @@ TEST_F(RampUpTest, MAYBE_UpDownUpAudioVideoTransportSequenceNumberRtx) { } TEST_F(RampUpTest, UpDownUpAudioTransportSequenceNumberRtx) { - test::ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); std::vector loss_rates = {0, 0, 0, 0}; RampUpDownUpTester test(0, 1, 0, kStartBitrateBps, RtpExtension::kTransportSequenceNumberUri, true, diff --git a/call/receive_stream.h b/call/receive_stream.h new file mode 100644 index 0000000000..0f59b37ae3 --- /dev/null +++ b/call/receive_stream.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_RECEIVE_STREAM_H_ +#define CALL_RECEIVE_STREAM_H_ + +#include + +#include "api/crypto/frame_decryptor_interface.h" +#include "api/frame_transformer_interface.h" +#include "api/media_types.h" +#include "api/scoped_refptr.h" +#include "api/transport/rtp/rtp_source.h" + +namespace webrtc { + +// Common base interface for MediaReceiveStream based classes and +// FlexfecReceiveStream. +class ReceiveStream { + public: + // Receive-stream specific RTP settings. + struct RtpConfig { + // Synchronization source (stream identifier) to be received. + // This member will not change mid-stream and can be assumed to be const + // post initialization. + uint32_t remote_ssrc = 0; + + // Sender SSRC used for sending RTCP (such as receiver reports). + // This value may change mid-stream and must be done on the same thread + // that the value is read on (i.e. packet delivery). + uint32_t local_ssrc = 0; + + // Enable feedback for send side bandwidth estimation. + // See + // https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions + // for details. + // This value may change mid-stream and must be done on the same thread + // that the value is read on (i.e. packet delivery). + bool transport_cc = false; + + // RTP header extensions used for the received stream. + // This value may change mid-stream and must be done on the same thread + // that the value is read on (i.e. packet delivery). + std::vector extensions; + }; + + // Called on the packet delivery thread since some members of the config may + // change mid-stream (e.g. the local ssrc). All mutation must also happen on + // the packet delivery thread. Return value can be assumed to + // only be used in the calling context (on the stack basically). + virtual const RtpConfig& rtp_config() const = 0; + + protected: + virtual ~ReceiveStream() {} +}; + +// Either an audio or video receive stream. +class MediaReceiveStream : public ReceiveStream { + public: + // Starts stream activity. + // When a stream is active, it can receive, process and deliver packets. + virtual void Start() = 0; + + // Stops stream activity. Must be called to match with a previous call to + // `Start()`. When a stream has been stopped, it won't receive, decode, + // process or deliver packets to downstream objects such as callback pointers + // set in the config struct. + virtual void Stop() = 0; + + virtual void SetDepacketizerToDecoderFrameTransformer( + rtc::scoped_refptr + frame_transformer) = 0; + + virtual void SetFrameDecryptor( + rtc::scoped_refptr frame_decryptor) = 0; + + virtual std::vector GetSources() const = 0; +}; + +} // namespace webrtc + +#endif // CALL_RECEIVE_STREAM_H_ diff --git a/call/rtcp_demuxer.cc b/call/rtcp_demuxer.cc deleted file mode 100644 index 738109fa43..0000000000 --- a/call/rtcp_demuxer.cc +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "call/rtcp_demuxer.h" - -#include - -#include -#include - -#include "absl/types/optional.h" -#include "api/rtp_headers.h" -#include "call/rtcp_packet_sink_interface.h" -#include "call/rtp_rtcp_demuxer_helper.h" -#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/checks.h" - -namespace webrtc { - -RtcpDemuxer::RtcpDemuxer() = default; - -RtcpDemuxer::~RtcpDemuxer() { - RTC_DCHECK(ssrc_sinks_.empty()); - RTC_DCHECK(rsid_sinks_.empty()); - RTC_DCHECK(broadcast_sinks_.empty()); -} - -void RtcpDemuxer::AddSink(uint32_t sender_ssrc, RtcpPacketSinkInterface* sink) { - RTC_DCHECK(sink); - RTC_DCHECK(!ContainerHasKey(broadcast_sinks_, sink)); - RTC_DCHECK(!MultimapAssociationExists(ssrc_sinks_, sender_ssrc, sink)); - ssrc_sinks_.emplace(sender_ssrc, sink); -} - -void RtcpDemuxer::AddSink(const std::string& rsid, - RtcpPacketSinkInterface* sink) { - RTC_DCHECK(IsLegalRsidName(rsid)); - RTC_DCHECK(sink); - RTC_DCHECK(!ContainerHasKey(broadcast_sinks_, sink)); - RTC_DCHECK(!MultimapAssociationExists(rsid_sinks_, rsid, sink)); - rsid_sinks_.emplace(rsid, sink); -} - -void RtcpDemuxer::AddBroadcastSink(RtcpPacketSinkInterface* sink) { - RTC_DCHECK(sink); - RTC_DCHECK(!MultimapHasValue(ssrc_sinks_, sink)); - RTC_DCHECK(!MultimapHasValue(rsid_sinks_, sink)); - RTC_DCHECK(!ContainerHasKey(broadcast_sinks_, sink)); - broadcast_sinks_.push_back(sink); -} - -void RtcpDemuxer::RemoveSink(const RtcpPacketSinkInterface* sink) { - RTC_DCHECK(sink); - size_t removal_count = RemoveFromMultimapByValue(&ssrc_sinks_, sink) + - RemoveFromMultimapByValue(&rsid_sinks_, sink); - RTC_DCHECK_GT(removal_count, 0); -} - -void RtcpDemuxer::RemoveBroadcastSink(const RtcpPacketSinkInterface* sink) { - RTC_DCHECK(sink); - auto it = std::find(broadcast_sinks_.begin(), broadcast_sinks_.end(), sink); - RTC_DCHECK(it != broadcast_sinks_.end()); - broadcast_sinks_.erase(it); -} - -void RtcpDemuxer::OnRtcpPacket(rtc::ArrayView packet) { - // Perform sender-SSRC-based demuxing for packets with a sender-SSRC. - absl::optional sender_ssrc = ParseRtcpPacketSenderSsrc(packet); - if (sender_ssrc) { - auto it_range = ssrc_sinks_.equal_range(*sender_ssrc); - for (auto it = it_range.first; it != it_range.second; ++it) { - it->second->OnRtcpPacket(packet); - } - } - - // All packets, even those without a sender-SSRC, are broadcast to sinks - // which listen to broadcasts. - for (RtcpPacketSinkInterface* sink : broadcast_sinks_) { - sink->OnRtcpPacket(packet); - } -} - -void RtcpDemuxer::OnSsrcBoundToRsid(const std::string& rsid, uint32_t ssrc) { - // Record the new SSRC association for all of the sinks that were associated - // with the RSID. - auto it_range = rsid_sinks_.equal_range(rsid); - for (auto it = it_range.first; it != it_range.second; ++it) { - RtcpPacketSinkInterface* sink = it->second; - // Watch out for pre-existing SSRC-based associations. - if (!MultimapAssociationExists(ssrc_sinks_, ssrc, sink)) { - AddSink(ssrc, sink); - } - } - - // RSIDs are uniquely associated with SSRCs; no need to keep in memory - // the RSID-to-sink association of resolved RSIDs. - rsid_sinks_.erase(it_range.first, it_range.second); -} - -} // namespace webrtc diff --git a/call/rtcp_demuxer.h b/call/rtcp_demuxer.h deleted file mode 100644 index 494e0cea4b..0000000000 --- a/call/rtcp_demuxer.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef CALL_RTCP_DEMUXER_H_ -#define CALL_RTCP_DEMUXER_H_ - -#include -#include -#include - -#include "api/array_view.h" -#include "call/ssrc_binding_observer.h" - -namespace webrtc { - -class RtcpPacketSinkInterface; - -// This class represents the RTCP demuxing, for a single RTP session (i.e., one -// SSRC space, see RFC 7656). It isn't thread aware, leaving responsibility of -// multithreading issues to the user of this class. -class RtcpDemuxer : public SsrcBindingObserver { - public: - RtcpDemuxer(); - ~RtcpDemuxer() override; - - // Registers a sink. The sink will be notified of incoming RTCP packets with - // that sender-SSRC. The same sink can be registered for multiple SSRCs, and - // the same SSRC can have multiple sinks. Null pointer is not allowed. - // Sinks may be associated with both an SSRC and an RSID. - // Sinks may be registered as SSRC/RSID-specific or broadcast, but not both. - void AddSink(uint32_t sender_ssrc, RtcpPacketSinkInterface* sink); - - // Registers a sink. Once the RSID is resolved to an SSRC, the sink will be - // notified of all RTCP packets with that sender-SSRC. - // The same sink can be registered for multiple RSIDs, and - // the same RSID can have multiple sinks. Null pointer is not allowed. - // Sinks may be associated with both an SSRC and an RSID. - // Sinks may be registered as SSRC/RSID-specific or broadcast, but not both. - void AddSink(const std::string& rsid, RtcpPacketSinkInterface* sink); - - // Registers a sink. The sink will be notified of any incoming RTCP packet. - // Null pointer is not allowed. - // Sinks may be registered as SSRC/RSID-specific or broadcast, but not both. - void AddBroadcastSink(RtcpPacketSinkInterface* sink); - - // Undo previous AddSink() calls with the given sink. - void RemoveSink(const RtcpPacketSinkInterface* sink); - - // Undo AddBroadcastSink(). - void RemoveBroadcastSink(const RtcpPacketSinkInterface* sink); - - // Process a new RTCP packet and forward it to the appropriate sinks. - void OnRtcpPacket(rtc::ArrayView packet); - - // Implement SsrcBindingObserver - become notified whenever RSIDs resolve to - // an SSRC. - void OnSsrcBoundToRsid(const std::string& rsid, uint32_t ssrc) override; - - // TODO(eladalon): Add the ability to resolve RSIDs and inform observers, - // like in the RtpDemuxer case, once the relevant standard is finalized. - - private: - // Records the association SSRCs to sinks. - std::multimap ssrc_sinks_; - - // Records the association RSIDs to sinks. - std::multimap rsid_sinks_; - - // Sinks which will receive notifications of all incoming RTCP packets. - // Additional/removal of sinks is expected to be significantly less frequent - // than RTCP message reception; container chosen for iteration performance. - std::vector broadcast_sinks_; -}; - -} // namespace webrtc - -#endif // CALL_RTCP_DEMUXER_H_ diff --git a/call/rtcp_demuxer_unittest.cc b/call/rtcp_demuxer_unittest.cc deleted file mode 100644 index f3949ca78b..0000000000 --- a/call/rtcp_demuxer_unittest.cc +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "call/rtcp_demuxer.h" - -#include - -#include - -#include "api/rtp_headers.h" -#include "call/rtcp_packet_sink_interface.h" -#include "modules/rtp_rtcp/source/rtcp_packet/bye.h" -#include "modules/rtp_rtcp/source/rtp_header_extensions.h" -#include "rtc_base/arraysize.h" -#include "rtc_base/buffer.h" -#include "rtc_base/checks.h" -#include "test/gmock.h" -#include "test/gtest.h" - -namespace webrtc { - -namespace { - -using ::testing::_; -using ::testing::AtLeast; -using ::testing::ElementsAreArray; -using ::testing::InSequence; -using ::testing::Matcher; -using ::testing::NiceMock; - -class MockRtcpPacketSink : public RtcpPacketSinkInterface { - public: - MOCK_METHOD(void, OnRtcpPacket, (rtc::ArrayView), (override)); -}; - -class RtcpDemuxerTest : public ::testing::Test { - protected: - ~RtcpDemuxerTest() { - for (auto* sink : sinks_to_tear_down_) { - demuxer_.RemoveSink(sink); - } - for (auto* sink : broadcast_sinks_to_tear_down_) { - demuxer_.RemoveBroadcastSink(sink); - } - } - - void AddSsrcSink(uint32_t ssrc, RtcpPacketSinkInterface* sink) { - demuxer_.AddSink(ssrc, sink); - sinks_to_tear_down_.insert(sink); - } - - void AddRsidSink(const std::string& rsid, RtcpPacketSinkInterface* sink) { - demuxer_.AddSink(rsid, sink); - sinks_to_tear_down_.insert(sink); - } - - void RemoveSink(RtcpPacketSinkInterface* sink) { - sinks_to_tear_down_.erase(sink); - demuxer_.RemoveSink(sink); - } - - void AddBroadcastSink(RtcpPacketSinkInterface* sink) { - demuxer_.AddBroadcastSink(sink); - broadcast_sinks_to_tear_down_.insert(sink); - } - - void RemoveBroadcastSink(RtcpPacketSinkInterface* sink) { - broadcast_sinks_to_tear_down_.erase(sink); - demuxer_.RemoveBroadcastSink(sink); - } - - RtcpDemuxer demuxer_; - std::set sinks_to_tear_down_; - std::set broadcast_sinks_to_tear_down_; -}; - -class RtcpDemuxerDeathTest : public RtcpDemuxerTest {}; - -// Produces a packet buffer representing an RTCP packet with a given SSRC, -// as it would look when sent over the wire. -// |distinguishing_string| allows different RTCP packets with the same SSRC -// to be distinguished. How this is set into the actual packet is -// unimportant, and depends on which RTCP message we choose to use. -rtc::Buffer CreateRtcpPacket(uint32_t ssrc, - const std::string& distinguishing_string = "") { - rtcp::Bye packet; - packet.SetSenderSsrc(ssrc); - if (distinguishing_string != "") { - // Actual way we use |distinguishing_string| is unimportant, so long - // as it ends up in the packet. - packet.SetReason(distinguishing_string); - } - return packet.Build(); -} - -static Matcher> SamePacketAs( - const rtc::Buffer& other) { - return ElementsAreArray(other.cbegin(), other.cend()); -} - -} // namespace - -TEST_F(RtcpDemuxerTest, OnRtcpPacketCalledOnCorrectSinkBySsrc) { - constexpr uint32_t ssrcs[] = {101, 202, 303}; - MockRtcpPacketSink sinks[arraysize(ssrcs)]; - for (size_t i = 0; i < arraysize(ssrcs); i++) { - AddSsrcSink(ssrcs[i], &sinks[i]); - } - - for (size_t i = 0; i < arraysize(ssrcs); i++) { - auto packet = CreateRtcpPacket(ssrcs[i]); - EXPECT_CALL(sinks[i], OnRtcpPacket(SamePacketAs(packet))).Times(1); - demuxer_.OnRtcpPacket(packet); - } -} - -TEST_F(RtcpDemuxerTest, OnRtcpPacketCalledOnResolvedRsidSink) { - // Set up some RSID sinks. - const std::string rsids[] = {"a", "b", "c"}; - MockRtcpPacketSink sinks[arraysize(rsids)]; - for (size_t i = 0; i < arraysize(rsids); i++) { - AddRsidSink(rsids[i], &sinks[i]); - } - - // Only resolve one of the sinks. - constexpr size_t resolved_sink_index = 0; - constexpr uint32_t ssrc = 345; - demuxer_.OnSsrcBoundToRsid(rsids[resolved_sink_index], ssrc); - - // The resolved sink gets notifications of RTCP messages with its SSRC. - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sinks[resolved_sink_index], OnRtcpPacket(SamePacketAs(packet))) - .Times(1); - - // RTCP received; expected calls triggered. - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, - SingleCallbackAfterResolutionOfAnRsidToAlreadyRegisteredSsrc) { - // Associate a sink with an SSRC. - MockRtcpPacketSink sink; - constexpr uint32_t ssrc = 999; - AddSsrcSink(ssrc, &sink); - - // Associate the same sink with an RSID. - const std::string rsid = "r"; - AddRsidSink(rsid, &sink); - - // Resolve the RSID to the aforementioned SSRC. - demuxer_.OnSsrcBoundToRsid(rsid, ssrc); - - // OnRtcpPacket still called only a single time for messages with this SSRC. - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))).Times(1); - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, - OnRtcpPacketCalledOnAllBroadcastSinksForAllRtcpPackets) { - MockRtcpPacketSink sinks[3]; - for (MockRtcpPacketSink& sink : sinks) { - AddBroadcastSink(&sink); - } - - constexpr uint32_t ssrc = 747; - auto packet = CreateRtcpPacket(ssrc); - - for (MockRtcpPacketSink& sink : sinks) { - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))).Times(1); - } - - // RTCP received; expected calls triggered. - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, PacketsDeliveredInRightOrderToNonBroadcastSink) { - constexpr uint32_t ssrc = 101; - MockRtcpPacketSink sink; - AddSsrcSink(ssrc, &sink); - - std::vector packets; - for (size_t i = 0; i < 5; i++) { - packets.push_back(CreateRtcpPacket(ssrc, std::to_string(i))); - } - - InSequence sequence; - for (const auto& packet : packets) { - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))).Times(1); - } - - for (const auto& packet : packets) { - demuxer_.OnRtcpPacket(packet); - } -} - -TEST_F(RtcpDemuxerTest, PacketsDeliveredInRightOrderToBroadcastSink) { - MockRtcpPacketSink sink; - AddBroadcastSink(&sink); - - std::vector packets; - for (size_t i = 0; i < 5; i++) { - constexpr uint32_t ssrc = 101; - packets.push_back(CreateRtcpPacket(ssrc, std::to_string(i))); - } - - InSequence sequence; - for (const auto& packet : packets) { - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))).Times(1); - } - - for (const auto& packet : packets) { - demuxer_.OnRtcpPacket(packet); - } -} - -TEST_F(RtcpDemuxerTest, MultipleSinksMappedToSameSsrc) { - MockRtcpPacketSink sinks[3]; - constexpr uint32_t ssrc = 404; - for (auto& sink : sinks) { - AddSsrcSink(ssrc, &sink); - } - - // Reception of an RTCP packet associated with the shared SSRC triggers the - // callback on all of the sinks associated with it. - auto packet = CreateRtcpPacket(ssrc); - for (auto& sink : sinks) { - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))); - } - - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, SinkMappedToMultipleSsrcs) { - constexpr uint32_t ssrcs[] = {404, 505, 606}; - MockRtcpPacketSink sink; - for (uint32_t ssrc : ssrcs) { - AddSsrcSink(ssrc, &sink); - } - - // The sink which is associated with multiple SSRCs gets the callback - // triggered for each of those SSRCs. - for (uint32_t ssrc : ssrcs) { - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))); - demuxer_.OnRtcpPacket(packet); - } -} - -TEST_F(RtcpDemuxerTest, MultipleRsidsOnSameSink) { - // Sink associated with multiple sinks. - MockRtcpPacketSink sink; - const std::string rsids[] = {"a", "b", "c"}; - for (const auto& rsid : rsids) { - AddRsidSink(rsid, &sink); - } - - // RSIDs resolved to SSRCs. - uint32_t ssrcs[arraysize(rsids)]; - for (size_t i = 0; i < arraysize(rsids); i++) { - ssrcs[i] = 1000 + static_cast(i); - demuxer_.OnSsrcBoundToRsid(rsids[i], ssrcs[i]); - } - - // Set up packets to match those RSIDs/SSRCs. - std::vector packets; - for (size_t i = 0; i < arraysize(rsids); i++) { - packets.push_back(CreateRtcpPacket(ssrcs[i])); - } - - // The sink expects to receive all of the packets. - for (const auto& packet : packets) { - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))).Times(1); - } - - // Packet demuxed correctly; OnRtcpPacket() triggered on sink. - for (const auto& packet : packets) { - demuxer_.OnRtcpPacket(packet); - } -} - -TEST_F(RtcpDemuxerTest, RsidUsedByMultipleSinks) { - MockRtcpPacketSink sinks[3]; - const std::string shared_rsid = "a"; - - for (MockRtcpPacketSink& sink : sinks) { - AddRsidSink(shared_rsid, &sink); - } - - constexpr uint32_t shared_ssrc = 888; - demuxer_.OnSsrcBoundToRsid(shared_rsid, shared_ssrc); - - auto packet = CreateRtcpPacket(shared_ssrc); - - for (MockRtcpPacketSink& sink : sinks) { - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet))).Times(1); - } - - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, NoCallbackOnSsrcSinkRemovedBeforeFirstPacket) { - constexpr uint32_t ssrc = 404; - MockRtcpPacketSink sink; - AddSsrcSink(ssrc, &sink); - - RemoveSink(&sink); - - // The removed sink does not get callbacks. - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(_)).Times(0); // Not called. - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, NoCallbackOnSsrcSinkRemovedAfterFirstPacket) { - constexpr uint32_t ssrc = 404; - NiceMock sink; - AddSsrcSink(ssrc, &sink); - - auto before_packet = CreateRtcpPacket(ssrc); - demuxer_.OnRtcpPacket(before_packet); - - RemoveSink(&sink); - - // The removed sink does not get callbacks. - auto after_packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(_)).Times(0); // Not called. - demuxer_.OnRtcpPacket(after_packet); -} - -TEST_F(RtcpDemuxerTest, NoCallbackOnRsidSinkRemovedBeforeRsidResolution) { - const std::string rsid = "a"; - constexpr uint32_t ssrc = 404; - MockRtcpPacketSink sink; - AddRsidSink(rsid, &sink); - - // Removal before resolution. - RemoveSink(&sink); - demuxer_.OnSsrcBoundToRsid(rsid, ssrc); - - // The removed sink does not get callbacks. - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(_)).Times(0); // Not called. - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, NoCallbackOnRsidSinkRemovedAfterRsidResolution) { - const std::string rsid = "a"; - constexpr uint32_t ssrc = 404; - MockRtcpPacketSink sink; - AddRsidSink(rsid, &sink); - - // Removal after resolution. - demuxer_.OnSsrcBoundToRsid(rsid, ssrc); - RemoveSink(&sink); - - // The removed sink does not get callbacks. - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(_)).Times(0); // Not called. - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, NoCallbackOnBroadcastSinkRemovedBeforeFirstPacket) { - MockRtcpPacketSink sink; - AddBroadcastSink(&sink); - - RemoveBroadcastSink(&sink); - - // The removed sink does not get callbacks. - constexpr uint32_t ssrc = 404; - auto packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(_)).Times(0); // Not called. - demuxer_.OnRtcpPacket(packet); -} - -TEST_F(RtcpDemuxerTest, NoCallbackOnBroadcastSinkRemovedAfterFirstPacket) { - NiceMock sink; - AddBroadcastSink(&sink); - - constexpr uint32_t ssrc = 404; - auto before_packet = CreateRtcpPacket(ssrc); - demuxer_.OnRtcpPacket(before_packet); - - RemoveBroadcastSink(&sink); - - // The removed sink does not get callbacks. - auto after_packet = CreateRtcpPacket(ssrc); - EXPECT_CALL(sink, OnRtcpPacket(_)).Times(0); // Not called. - demuxer_.OnRtcpPacket(after_packet); -} - -// The RSID to SSRC mapping should be one-to-one. If we end up receiving -// two (or more) packets with the same SSRC, but different RSIDs, we guarantee -// remembering the first one; no guarantees are made about further associations. -TEST_F(RtcpDemuxerTest, FirstResolutionOfRsidNotForgotten) { - MockRtcpPacketSink sink; - const std::string rsid = "a"; - AddRsidSink(rsid, &sink); - - constexpr uint32_t ssrc_a = 111; // First resolution - guaranteed effective. - demuxer_.OnSsrcBoundToRsid(rsid, ssrc_a); - - constexpr uint32_t ssrc_b = 222; // Second resolution - no guarantees. - demuxer_.OnSsrcBoundToRsid(rsid, ssrc_b); - - auto packet_a = CreateRtcpPacket(ssrc_a); - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet_a))).Times(1); - demuxer_.OnRtcpPacket(packet_a); - - auto packet_b = CreateRtcpPacket(ssrc_b); - EXPECT_CALL(sink, OnRtcpPacket(SamePacketAs(packet_b))).Times(AtLeast(0)); - demuxer_.OnRtcpPacket(packet_b); -} - -#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) - -TEST_F(RtcpDemuxerDeathTest, RepeatedSsrcToSinkAssociationsDisallowed) { - MockRtcpPacketSink sink; - - constexpr uint32_t ssrc = 101; - AddSsrcSink(ssrc, &sink); - EXPECT_DEATH(AddSsrcSink(ssrc, &sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, RepeatedRsidToSinkAssociationsDisallowed) { - MockRtcpPacketSink sink; - - const std::string rsid = "z"; - AddRsidSink(rsid, &sink); - EXPECT_DEATH(AddRsidSink(rsid, &sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, RepeatedBroadcastSinkRegistrationDisallowed) { - MockRtcpPacketSink sink; - - AddBroadcastSink(&sink); - EXPECT_DEATH(AddBroadcastSink(&sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, SsrcSinkCannotAlsoBeRegisteredAsBroadcast) { - MockRtcpPacketSink sink; - - constexpr uint32_t ssrc = 101; - AddSsrcSink(ssrc, &sink); - EXPECT_DEATH(AddBroadcastSink(&sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, RsidSinkCannotAlsoBeRegisteredAsBroadcast) { - MockRtcpPacketSink sink; - - const std::string rsid = "z"; - AddRsidSink(rsid, &sink); - EXPECT_DEATH(AddBroadcastSink(&sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, BroadcastSinkCannotAlsoBeRegisteredAsSsrcSink) { - MockRtcpPacketSink sink; - - AddBroadcastSink(&sink); - constexpr uint32_t ssrc = 101; - EXPECT_DEATH(AddSsrcSink(ssrc, &sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, BroadcastSinkCannotAlsoBeRegisteredAsRsidSink) { - MockRtcpPacketSink sink; - - AddBroadcastSink(&sink); - const std::string rsid = "j"; - EXPECT_DEATH(AddRsidSink(rsid, &sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, MayNotCallRemoveSinkOnNeverAddedSink) { - MockRtcpPacketSink sink; - EXPECT_DEATH(RemoveSink(&sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, MayNotCallRemoveBroadcastSinkOnNeverAddedSink) { - MockRtcpPacketSink sink; - EXPECT_DEATH(RemoveBroadcastSink(&sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, RsidMustBeNonEmpty) { - MockRtcpPacketSink sink; - EXPECT_DEATH(AddRsidSink("", &sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, RsidMustBeAlphaNumeric) { - MockRtcpPacketSink sink; - EXPECT_DEATH(AddRsidSink("a_3", &sink), ""); -} - -TEST_F(RtcpDemuxerDeathTest, RsidMustNotExceedMaximumLength) { - MockRtcpPacketSink sink; - std::string rsid(BaseRtpStringExtension::kMaxValueSizeBytes + 1, 'a'); - EXPECT_DEATH(AddRsidSink(rsid, &sink), ""); -} - -#endif - -} // namespace webrtc diff --git a/call/rtcp_packet_sink_interface.h b/call/rtcp_packet_sink_interface.h deleted file mode 100644 index 8ea3f7d21c..0000000000 --- a/call/rtcp_packet_sink_interface.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef CALL_RTCP_PACKET_SINK_INTERFACE_H_ -#define CALL_RTCP_PACKET_SINK_INTERFACE_H_ - -#include "api/array_view.h" - -namespace webrtc { - -// This class represents a receiver of unparsed RTCP packets. -// TODO(eladalon): Replace this by demuxing over parsed rather than raw data. -// Whether this should be over an entire RTCP packet, or over RTCP blocks, -// is still under discussion. -class RtcpPacketSinkInterface { - public: - virtual ~RtcpPacketSinkInterface() = default; - virtual void OnRtcpPacket(rtc::ArrayView packet) = 0; -}; - -} // namespace webrtc - -#endif // CALL_RTCP_PACKET_SINK_INTERFACE_H_ diff --git a/call/rtp_demuxer.cc b/call/rtp_demuxer.cc index 3ab75c7f98..28962fd2eb 100644 --- a/call/rtp_demuxer.cc +++ b/call/rtp_demuxer.cc @@ -11,8 +11,6 @@ #include "call/rtp_demuxer.h" #include "call/rtp_packet_sink_interface.h" -#include "call/rtp_rtcp_demuxer_helper.h" -#include "call/ssrc_binding_observer.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "rtc_base/checks.h" @@ -20,10 +18,42 @@ #include "rtc_base/strings/string_builder.h" namespace webrtc { +namespace { + +template +size_t RemoveFromMultimapByValue(Container* multimap, const Value& value) { + size_t count = 0; + for (auto it = multimap->begin(); it != multimap->end();) { + if (it->second == value) { + it = multimap->erase(it); + ++count; + } else { + ++it; + } + } + return count; +} + +template +size_t RemoveFromMapByValue(Map* map, const Value& value) { + return EraseIf(*map, [&](const auto& elem) { return elem.second == value; }); +} + +} // namespace RtpDemuxerCriteria::RtpDemuxerCriteria() = default; RtpDemuxerCriteria::~RtpDemuxerCriteria() = default; +bool RtpDemuxerCriteria::operator==(const RtpDemuxerCriteria& other) const { + return this->mid == other.mid && this->rsid == other.rsid && + this->ssrcs == other.ssrcs && + this->payload_types == other.payload_types; +} + +bool RtpDemuxerCriteria::operator!=(const RtpDemuxerCriteria& other) const { + return !(*this == other); +} + std::string RtpDemuxerCriteria::ToString() const { rtc::StringBuilder sb; sb << "{mid: " << (mid.empty() ? "" : mid) @@ -62,7 +92,7 @@ std::string RtpDemuxer::DescribePacket(const RtpPacketReceived& packet) { return sb.Release(); } -RtpDemuxer::RtpDemuxer() = default; +RtpDemuxer::RtpDemuxer(bool use_mid /* = true*/) : use_mid_(use_mid) {} RtpDemuxer::~RtpDemuxer() { RTC_DCHECK(sink_by_mid_.empty()); @@ -70,7 +100,6 @@ RtpDemuxer::~RtpDemuxer() { RTC_DCHECK(sinks_by_pt_.empty()); RTC_DCHECK(sink_by_mid_and_rsid_.empty()); RTC_DCHECK(sink_by_rsid_.empty()); - RTC_DCHECK(ssrc_binding_observers_.empty()); } bool RtpDemuxer::AddSink(const RtpDemuxerCriteria& criteria, @@ -327,12 +356,7 @@ RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByMid(const std::string& mid, const auto it = sink_by_mid_.find(mid); if (it != sink_by_mid_.end()) { RtpPacketSinkInterface* sink = it->second; - bool notify = AddSsrcSinkBinding(ssrc, sink); - if (notify) { - for (auto* observer : ssrc_binding_observers_) { - observer->OnSsrcBoundToMid(mid, ssrc); - } - } + AddSsrcSinkBinding(ssrc, sink); return sink; } return nullptr; @@ -345,39 +369,22 @@ RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByMidRsid( const auto it = sink_by_mid_and_rsid_.find(std::make_pair(mid, rsid)); if (it != sink_by_mid_and_rsid_.end()) { RtpPacketSinkInterface* sink = it->second; - bool notify = AddSsrcSinkBinding(ssrc, sink); - if (notify) { - for (auto* observer : ssrc_binding_observers_) { - observer->OnSsrcBoundToMidRsid(mid, rsid, ssrc); - } - } + AddSsrcSinkBinding(ssrc, sink); return sink; } return nullptr; } -void RtpDemuxer::RegisterRsidResolutionObserver(SsrcBindingObserver* observer) { - RegisterSsrcBindingObserver(observer); -} RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByRsid(const std::string& rsid, uint32_t ssrc) { const auto it = sink_by_rsid_.find(rsid); if (it != sink_by_rsid_.end()) { RtpPacketSinkInterface* sink = it->second; - bool notify = AddSsrcSinkBinding(ssrc, sink); - if (notify) { - for (auto* observer : ssrc_binding_observers_) { - observer->OnSsrcBoundToRsid(rsid, ssrc); - } - } + AddSsrcSinkBinding(ssrc, sink); return sink; } return nullptr; } -void RtpDemuxer::DeregisterRsidResolutionObserver( - const SsrcBindingObserver* observer) { - DeregisterSsrcBindingObserver(observer); -} RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByPayloadType( uint8_t payload_type, @@ -388,25 +395,20 @@ RtpPacketSinkInterface* RtpDemuxer::ResolveSinkByPayloadType( const auto end = range.second; if (std::next(it) == end) { RtpPacketSinkInterface* sink = it->second; - bool notify = AddSsrcSinkBinding(ssrc, sink); - if (notify) { - for (auto* observer : ssrc_binding_observers_) { - observer->OnSsrcBoundToPayloadType(payload_type, ssrc); - } - } + AddSsrcSinkBinding(ssrc, sink); return sink; } } return nullptr; } -bool RtpDemuxer::AddSsrcSinkBinding(uint32_t ssrc, +void RtpDemuxer::AddSsrcSinkBinding(uint32_t ssrc, RtpPacketSinkInterface* sink) { if (sink_by_ssrc_.size() >= kMaxSsrcBindings) { RTC_LOG(LS_WARNING) << "New SSRC=" << ssrc << " sink binding ignored; limit of" << kMaxSsrcBindings << " bindings has been reached."; - return false; + return; } auto result = sink_by_ssrc_.emplace(ssrc, sink); @@ -415,31 +417,11 @@ bool RtpDemuxer::AddSsrcSinkBinding(uint32_t ssrc, if (inserted) { RTC_LOG(LS_INFO) << "Added sink = " << sink << " binding with SSRC=" << ssrc; - return true; - } - if (it->second != sink) { + } else if (it->second != sink) { RTC_LOG(LS_INFO) << "Updated sink = " << sink << " binding with SSRC=" << ssrc; it->second = sink; - return true; } - return false; -} - -void RtpDemuxer::RegisterSsrcBindingObserver(SsrcBindingObserver* observer) { - RTC_DCHECK(observer); - RTC_DCHECK(!ContainerHasKey(ssrc_binding_observers_, observer)); - - ssrc_binding_observers_.push_back(observer); -} - -void RtpDemuxer::DeregisterSsrcBindingObserver( - const SsrcBindingObserver* observer) { - RTC_DCHECK(observer); - auto it = std::find(ssrc_binding_observers_.begin(), - ssrc_binding_observers_.end(), observer); - RTC_DCHECK(it != ssrc_binding_observers_.end()); - ssrc_binding_observers_.erase(it); } } // namespace webrtc diff --git a/call/rtp_demuxer.h b/call/rtp_demuxer.h index dae7a53b70..fb65fce368 100644 --- a/call/rtp_demuxer.h +++ b/call/rtp_demuxer.h @@ -12,16 +12,17 @@ #define CALL_RTP_DEMUXER_H_ #include -#include #include #include #include +#include "rtc_base/containers/flat_map.h" +#include "rtc_base/containers/flat_set.h" + namespace webrtc { class RtpPacketReceived; class RtpPacketSinkInterface; -class SsrcBindingObserver; // This struct describes the criteria that will be used to match packets to a // specific sink. @@ -29,6 +30,9 @@ struct RtpDemuxerCriteria { RtpDemuxerCriteria(); ~RtpDemuxerCriteria(); + bool operator==(const RtpDemuxerCriteria& other) const; + bool operator!=(const RtpDemuxerCriteria& other) const; + // If not the empty string, will match packets with this MID. std::string mid; @@ -40,10 +44,10 @@ struct RtpDemuxerCriteria { std::string rsid; // Will match packets with any of these SSRCs. - std::set ssrcs; + flat_set ssrcs; // Will match packets with any of these payload types. - std::set payload_types; + flat_set payload_types; // Return string representation of demux criteria to facilitate logging std::string ToString() const; @@ -95,7 +99,7 @@ class RtpDemuxer { // relevant for demuxing. static std::string DescribePacket(const RtpPacketReceived& packet); - RtpDemuxer(); + explicit RtpDemuxer(bool use_mid = true); ~RtpDemuxer(); RtpDemuxer(const RtpDemuxer&) = delete; @@ -133,21 +137,6 @@ class RtpDemuxer { // if the packet was forwarded and false if the packet was dropped. bool OnRtpPacket(const RtpPacketReceived& packet); - // The Observer will be notified when an attribute (e.g., RSID, MID, etc.) is - // bound to an SSRC. - void RegisterSsrcBindingObserver(SsrcBindingObserver* observer); - // Deprecated: Use the above method. - void RegisterRsidResolutionObserver(SsrcBindingObserver* observer); - - // Undo a previous RegisterSsrcBindingObserver(). - void DeregisterSsrcBindingObserver(const SsrcBindingObserver* observer); - // Deprecated: Use the above method. - void DeregisterRsidResolutionObserver(const SsrcBindingObserver* observer); - - // Configure whether to look at the MID header extension when demuxing - // incoming RTP packets. By default this is enabled. - void set_use_mid(bool use_mid) { use_mid_ = use_mid; } - private: // Returns true if adding a sink with the given criteria would cause conflicts // with the existing criteria and should be rejected. @@ -181,35 +170,29 @@ class RtpDemuxer { // Note: Mappings are only modified by AddSink/RemoveSink (except for // SSRC mapping which receives all MID, payload type, or RSID to SSRC bindings // discovered when demuxing packets). - std::map sink_by_mid_; - std::map sink_by_ssrc_; + flat_map sink_by_mid_; + flat_map sink_by_ssrc_; std::multimap sinks_by_pt_; - std::map, RtpPacketSinkInterface*> + flat_map, RtpPacketSinkInterface*> sink_by_mid_and_rsid_; - std::map sink_by_rsid_; + flat_map sink_by_rsid_; // Tracks all the MIDs that have been identified in added criteria. Used to // determine if a packet should be dropped right away because the MID is // unknown. - std::set known_mids_; + flat_set known_mids_; // Records learned mappings of MID --> SSRC and RSID --> SSRC as packets are // received. // This is stored separately from the sink mappings because if a sink is // removed we want to still remember these associations. - std::map mid_by_ssrc_; - std::map rsid_by_ssrc_; - - // Adds a binding from the SSRC to the given sink. Returns true if there was - // not already a sink bound to the SSRC or if the sink replaced a different - // sink. Returns false if the binding was unchanged. - bool AddSsrcSinkBinding(uint32_t ssrc, RtpPacketSinkInterface* sink); + flat_map mid_by_ssrc_; + flat_map rsid_by_ssrc_; - // Observers which will be notified when an RSID association to an SSRC is - // resolved by this object. - std::vector ssrc_binding_observers_; + // Adds a binding from the SSRC to the given sink. + void AddSsrcSinkBinding(uint32_t ssrc, RtpPacketSinkInterface* sink); - bool use_mid_ = true; + const bool use_mid_; }; } // namespace webrtc diff --git a/call/rtp_demuxer_unittest.cc b/call/rtp_demuxer_unittest.cc index 59baafe9ff..a4abab73ed 100644 --- a/call/rtp_demuxer_unittest.cc +++ b/call/rtp_demuxer_unittest.cc @@ -14,7 +14,6 @@ #include #include -#include "call/ssrc_binding_observer.h" #include "call/test/mock_rtp_packet_sink_interface.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" @@ -31,39 +30,15 @@ namespace { using ::testing::_; using ::testing::AtLeast; -using ::testing::AtMost; using ::testing::InSequence; using ::testing::NiceMock; -class MockSsrcBindingObserver : public SsrcBindingObserver { - public: - MOCK_METHOD(void, - OnSsrcBoundToRsid, - (const std::string& rsid, uint32_t ssrc), - (override)); - MOCK_METHOD(void, - OnSsrcBoundToMid, - (const std::string& mid, uint32_t ssrc), - (override)); - MOCK_METHOD(void, - OnSsrcBoundToMidRsid, - (const std::string& mid, const std::string& rsid, uint32_t ssrc), - (override)); - MOCK_METHOD(void, - OnSsrcBoundToPayloadType, - (uint8_t payload_type, uint32_t ssrc), - (override)); -}; - class RtpDemuxerTest : public ::testing::Test { protected: ~RtpDemuxerTest() { for (auto* sink : sinks_to_tear_down_) { demuxer_.RemoveSink(sink); } - for (auto* observer : observers_to_tear_down_) { - demuxer_.DeregisterSsrcBindingObserver(observer); - } } // These are convenience methods for calling demuxer.AddSink with different @@ -111,20 +86,6 @@ class RtpDemuxerTest : public ::testing::Test { return demuxer_.RemoveSink(sink); } - // These are convenience methods for calling - // demuxer.{Register|Unregister}SsrcBindingObserver such that observers are - // automatically removed when the test finishes. - - void RegisterSsrcBindingObserver(SsrcBindingObserver* observer) { - demuxer_.RegisterSsrcBindingObserver(observer); - observers_to_tear_down_.insert(observer); - } - - void DeregisterSsrcBindingObserver(SsrcBindingObserver* observer) { - demuxer_.DeregisterSsrcBindingObserver(observer); - observers_to_tear_down_.erase(observer); - } - // The CreatePacket* methods are helpers for creating new RTP packets with // various attributes set. Tests should use the helper that provides the // minimum information needed to exercise the behavior under test. Tests also @@ -214,7 +175,6 @@ class RtpDemuxerTest : public ::testing::Test { RtpDemuxer demuxer_; std::set sinks_to_tear_down_; - std::set observers_to_tear_down_; uint16_t next_sequence_number_ = 1; }; @@ -756,73 +716,6 @@ TEST_F(RtpDemuxerTest, AssociatingByRsidAndBySsrcCannotTriggerDoubleCall) { EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); } -TEST_F(RtpDemuxerTest, ObserversNotifiedOfSsrcBoundToMid) { - const std::string mid = "v"; - constexpr uint32_t ssrc = 10; - - NiceMock sink; - AddSinkOnlyMid(mid, &sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - auto packet = CreatePacketWithSsrcMid(ssrc, mid); - EXPECT_CALL(observer, OnSsrcBoundToMid(mid, ssrc)); - EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); -} - -TEST_F(RtpDemuxerTest, ObserversNotifiedOfSsrcBoundToRsid) { - const std::string rsid = "1"; - constexpr uint32_t ssrc = 111; - - // Only RSIDs which the demuxer knows may be resolved. - NiceMock sink; - AddSinkOnlyRsid(rsid, &sink); - - NiceMock rsid_resolution_observers[3]; - for (auto& observer : rsid_resolution_observers) { - RegisterSsrcBindingObserver(&observer); - EXPECT_CALL(observer, OnSsrcBoundToRsid(rsid, ssrc)).Times(1); - } - - // The expected calls to OnSsrcBoundToRsid() will be triggered by this. - auto packet = CreatePacketWithSsrcRsid(ssrc, rsid); - EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); -} - -TEST_F(RtpDemuxerTest, ObserversNotifiedOfSsrcBoundToMidRsid) { - const std::string mid = "v"; - const std::string rsid = "1"; - constexpr uint32_t ssrc = 10; - - NiceMock sink; - AddSinkBothMidRsid(mid, rsid, &sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - auto packet = CreatePacketWithSsrcMidRsid(ssrc, mid, rsid); - EXPECT_CALL(observer, OnSsrcBoundToMidRsid(mid, rsid, ssrc)); - EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); -} - -TEST_F(RtpDemuxerTest, ObserversNotifiedOfSsrcBoundToPayloadType) { - constexpr uint8_t payload_type = 3; - constexpr uint32_t ssrc = 10; - - RtpDemuxerCriteria criteria; - criteria.payload_types = {payload_type}; - NiceMock sink; - AddSink(criteria, &sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - auto packet = CreatePacketWithSsrc(ssrc); - packet->SetPayloadType(payload_type); - EXPECT_CALL(observer, OnSsrcBoundToPayloadType(payload_type, ssrc)); - EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); -} // If one sink is associated with SSRC x, and another sink with RSID y, then if // we receive a packet with both SSRC x and RSID y, route that to only the sink @@ -857,9 +750,6 @@ TEST_F(RtpDemuxerTest, NiceMock rsid_sink; AddSinkOnlyRsid(rsid, &rsid_sink); - NiceMock observer; - RegisterSsrcBindingObserver(&observer); - // The SSRC was mapped to an SSRC sink, but was even active (packets flowed // over it). auto packet = CreatePacketWithSsrcRsid(ssrc, rsid); @@ -870,7 +760,6 @@ TEST_F(RtpDemuxerTest, // is guaranteed. RemoveSink(&ssrc_sink); EXPECT_CALL(rsid_sink, OnRtpPacket(SamePacketAs(*packet))).Times(AtLeast(0)); - EXPECT_CALL(observer, OnSsrcBoundToRsid(rsid, ssrc)).Times(AtLeast(0)); EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); } @@ -1365,127 +1254,6 @@ TEST_F(RtpDemuxerTest, PacketWithMidAndUnknownRsidIsNotRoutedByPayloadType) { EXPECT_FALSE(demuxer_.OnRtpPacket(*packet)); } -// Observers are only notified of an SSRC binding to an RSID if we care about -// the RSID (i.e., have a sink added for that RSID). -TEST_F(RtpDemuxerTest, ObserversNotNotifiedOfUntrackedRsids) { - const std::string rsid = "1"; - constexpr uint32_t ssrc = 111; - - MockSsrcBindingObserver rsid_resolution_observers[3]; - for (auto& observer : rsid_resolution_observers) { - RegisterSsrcBindingObserver(&observer); - EXPECT_CALL(observer, OnSsrcBoundToRsid(_, _)).Times(0); - } - - // Since no sink is registered for this SSRC/RSID, expect the packet to not be - // routed and no observers notified of the SSRC -> RSID binding. - EXPECT_FALSE(demuxer_.OnRtpPacket(*CreatePacketWithSsrcRsid(ssrc, rsid))); -} - -// Ensure that observers are notified of SSRC bindings only once per unique -// binding source (e.g., SSRC -> MID, SSRC -> RSID, etc.) -TEST_F(RtpDemuxerTest, ObserversNotifiedOfSsrcBoundtoMidOnlyOnce) { - const std::string mid = "v"; - constexpr uint32_t ssrc = 10; - - NiceMock sink; - AddSinkOnlyMid(mid, &sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - EXPECT_CALL(observer, OnSsrcBoundToMid(mid, ssrc)).Times(1); - - demuxer_.OnRtpPacket(*CreatePacketWithSsrcMid(ssrc, mid)); - demuxer_.OnRtpPacket(*CreatePacketWithSsrcMid(ssrc, mid)); -} - -// Ensure that when a new SSRC -> MID binding is discovered observers are also -// notified of that, even if there has already been an SSRC bound to the MID. -TEST_F(RtpDemuxerTest, ObserversNotifiedOfSsrcBoundtoMidWhenSsrcChanges) { - const std::string mid = "v"; - constexpr uint32_t ssrc1 = 10; - constexpr uint32_t ssrc2 = 11; - - NiceMock sink; - AddSinkOnlyMid(mid, &sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - InSequence seq; - EXPECT_CALL(observer, OnSsrcBoundToMid(mid, ssrc1)).Times(1); - EXPECT_CALL(observer, OnSsrcBoundToMid(mid, ssrc2)).Times(1); - - auto p1 = CreatePacketWithSsrcMid(ssrc1, mid); - demuxer_.OnRtpPacket(*p1); - - auto p2 = CreatePacketWithSsrcMid(ssrc2, mid); - demuxer_.OnRtpPacket(*p2); -} - -TEST_F(RtpDemuxerTest, DeregisteredRsidObserversNotInformedOfResolutions) { - constexpr uint32_t ssrc = 111; - const std::string rsid = "a"; - NiceMock sink; - AddSinkOnlyRsid(rsid, &sink); - - // Register several, then deregister only one, to show that not all of the - // observers had been forgotten when one was removed. - MockSsrcBindingObserver observer_1; - MockSsrcBindingObserver observer_2_removed; - MockSsrcBindingObserver observer_3; - - RegisterSsrcBindingObserver(&observer_1); - RegisterSsrcBindingObserver(&observer_2_removed); - RegisterSsrcBindingObserver(&observer_3); - - DeregisterSsrcBindingObserver(&observer_2_removed); - - EXPECT_CALL(observer_1, OnSsrcBoundToRsid(rsid, ssrc)).Times(1); - EXPECT_CALL(observer_2_removed, OnSsrcBoundToRsid(_, _)).Times(0); - EXPECT_CALL(observer_3, OnSsrcBoundToRsid(rsid, ssrc)).Times(1); - - // The expected calls to OnSsrcBoundToRsid() will be triggered by this. - demuxer_.OnRtpPacket(*CreatePacketWithSsrcRsid(ssrc, rsid)); -} - -TEST_F(RtpDemuxerTest, - PacketFittingBothRsidSinkAndSsrcSinkTriggersResolutionCallbacks) { - constexpr uint32_t ssrc = 111; - NiceMock ssrc_sink; - AddSinkOnlySsrc(ssrc, &ssrc_sink); - - const std::string rsid = "a"; - NiceMock rsid_sink; - AddSinkOnlyRsid(rsid, &rsid_sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - auto packet = CreatePacketWithSsrcRsid(ssrc, rsid); - EXPECT_CALL(observer, OnSsrcBoundToRsid(rsid, ssrc)).Times(1); - demuxer_.OnRtpPacket(*packet); -} - -TEST_F(RtpDemuxerTest, MaliciousPeerCannotCauseMemoryOveruse) { - const std::string mid = "v"; - - NiceMock sink; - AddSinkOnlyMid(mid, &sink); - - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - - EXPECT_CALL(observer, OnSsrcBoundToMid(_, _)) - .Times(AtMost(RtpDemuxer::kMaxSsrcBindings)); - - for (int i = 0; i < RtpDemuxer::kMaxSsrcBindings + 1; i++) { - auto packet = CreatePacketWithSsrcMid(i, mid); - EXPECT_TRUE(demuxer_.OnRtpPacket(*packet)); - } -} - #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) TEST_F(RtpDemuxerDeathTest, CriteriaMustBeNonEmpty) { @@ -1516,19 +1284,6 @@ TEST_F(RtpDemuxerDeathTest, MidMustNotExceedMaximumLength) { EXPECT_DEATH(AddSinkOnlyMid(mid, &sink), ""); } -TEST_F(RtpDemuxerDeathTest, - DoubleRegisterationOfSsrcBindingObserverDisallowed) { - MockSsrcBindingObserver observer; - RegisterSsrcBindingObserver(&observer); - EXPECT_DEATH(RegisterSsrcBindingObserver(&observer), ""); -} - -TEST_F(RtpDemuxerDeathTest, - DregisterationOfNeverRegisteredSsrcBindingObserverDisallowed) { - MockSsrcBindingObserver observer; - EXPECT_DEATH(DeregisterSsrcBindingObserver(&observer), ""); -} - #endif } // namespace diff --git a/call/rtp_payload_params.cc b/call/rtp_payload_params.cc index 635ed52c1a..c6a56a389e 100644 --- a/call/rtp_payload_params.cc +++ b/call/rtp_payload_params.cc @@ -36,6 +36,7 @@ void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info, absl::optional spatial_index, RTPVideoHeader* rtp) { rtp->codec = info.codecType; + rtp->is_last_frame_in_picture = info.end_of_picture; switch (info.codecType) { case kVideoCodecVP8: { auto& vp8_header = rtp->video_type_header.emplace(); @@ -85,7 +86,7 @@ void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info, for (int i = 0; i < info.codecSpecific.VP9.num_ref_pics; ++i) { vp9_header.pid_diff[i] = info.codecSpecific.VP9.p_diff[i]; } - vp9_header.end_of_picture = info.codecSpecific.VP9.end_of_picture; + vp9_header.end_of_picture = info.end_of_picture; return; } case kVideoCodecH264: { @@ -93,15 +94,6 @@ void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info, h264_header.packetization_mode = info.codecSpecific.H264.packetization_mode; rtp->simulcastIdx = spatial_index.value_or(0); - rtp->frame_marking.temporal_id = kNoTemporalIdx; - if (info.codecSpecific.H264.temporal_idx != kNoTemporalIdx) { - rtp->frame_marking.temporal_id = info.codecSpecific.H264.temporal_idx; - rtp->frame_marking.layer_id = 0; - rtp->frame_marking.independent_frame = - info.codecSpecific.H264.idr_frame; - rtp->frame_marking.base_layer_sync = - info.codecSpecific.H264.base_layer_sync; - } return; } case kVideoCodecMultiplex: @@ -140,9 +132,9 @@ RtpPayloadParams::RtpPayloadParams(const uint32_t ssrc, generic_picture_id_experiment_( absl::StartsWith(trials.Lookup("WebRTC-GenericPictureId"), "Enabled")), - generic_descriptor_experiment_( - !absl::StartsWith(trials.Lookup("WebRTC-GenericDescriptor"), - "Disabled")) { + simulate_generic_vp9_( + absl::StartsWith(trials.Lookup("WebRTC-Vp9DependencyDescriptor"), + "Enabled")) { for (auto& spatial_layer : last_shared_frame_id_) spatial_layer.fill(-1); @@ -167,7 +159,7 @@ RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader( PopulateRtpWithCodecSpecifics(*codec_specific_info, image.SpatialIndex(), &rtp_video_header); } - rtp_video_header.frame_type = image._frameType, + rtp_video_header.frame_type = image._frameType; rtp_video_header.rotation = image.rotation_; rtp_video_header.content_type = image.content_type_; rtp_video_header.playout_delay = image.playout_delay_; @@ -176,6 +168,7 @@ RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader( rtp_video_header.color_space = image.ColorSpace() ? absl::make_optional(*image.ColorSpace()) : absl::nullopt; + rtp_video_header.video_frame_tracking_id = image.VideoFrameTrackingId(); SetVideoTiming(image, &rtp_video_header.video_timing); const bool is_keyframe = image._frameType == VideoFrameType::kVideoFrameKey; @@ -186,9 +179,8 @@ RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader( SetCodecSpecific(&rtp_video_header, first_frame_in_picture); - if (generic_descriptor_experiment_) - SetGeneric(codec_specific_info, shared_frame_id, is_keyframe, - &rtp_video_header); + SetGeneric(codec_specific_info, shared_frame_id, is_keyframe, + &rtp_video_header); return rtp_video_header; } @@ -237,14 +229,6 @@ void RtpPayloadParams::SetCodecSpecific(RTPVideoHeader* rtp_video_header, vp9_header.tl0_pic_idx = state_.tl0_pic_idx; } } - if (rtp_video_header->codec == kVideoCodecH264) { - if (rtp_video_header->frame_marking.temporal_id != kNoTemporalIdx) { - if (rtp_video_header->frame_marking.temporal_id == 0) { - ++state_.tl0_pic_idx; - } - rtp_video_header->frame_marking.tl0_pic_idx = state_.tl0_pic_idx; - } - } if (generic_picture_id_experiment_ && rtp_video_header->codec == kVideoCodecGeneric) { rtp_video_header->video_type_header.emplace() @@ -255,17 +239,17 @@ void RtpPayloadParams::SetCodecSpecific(RTPVideoHeader* rtp_video_header, RTPVideoHeader::GenericDescriptorInfo RtpPayloadParams::GenericDescriptorFromFrameInfo( const GenericFrameInfo& frame_info, - int64_t frame_id, - VideoFrameType frame_type) { + int64_t frame_id) { RTPVideoHeader::GenericDescriptorInfo generic; generic.frame_id = frame_id; generic.dependencies = dependencies_calculator_.FromBuffersUsage( - frame_type, frame_id, frame_info.encoder_buffers); + frame_id, frame_info.encoder_buffers); generic.chain_diffs = chains_calculator_.From(frame_id, frame_info.part_of_chain); generic.spatial_index = frame_info.spatial_id; generic.temporal_index = frame_info.temporal_id; generic.decode_target_indications = frame_info.decode_target_indications; + generic.active_decode_targets = frame_info.active_decode_targets; return generic; } @@ -280,9 +264,8 @@ void RtpPayloadParams::SetGeneric(const CodecSpecificInfo* codec_specific_info, chains_calculator_.Reset( codec_specific_info->generic_frame_info->part_of_chain); } - rtp_video_header->generic = - GenericDescriptorFromFrameInfo(*codec_specific_info->generic_frame_info, - frame_id, rtp_video_header->frame_type); + rtp_video_header->generic = GenericDescriptorFromFrameInfo( + *codec_specific_info->generic_frame_info, frame_id); return; } @@ -297,8 +280,13 @@ void RtpPayloadParams::SetGeneric(const CodecSpecificInfo* codec_specific_info, } return; case VideoCodecType::kVideoCodecVP9: + if (simulate_generic_vp9_ && codec_specific_info != nullptr) { + Vp9ToGeneric(codec_specific_info->codecSpecific.VP9, frame_id, + *rtp_video_header); + } + return; case VideoCodecType::kVideoCodecAV1: - // TODO(philipel): Implement VP9 and AV1 to generic descriptor. + // TODO(philipel): Implement AV1 to generic descriptor. return; case VideoCodecType::kVideoCodecH264: if (codec_specific_info) { @@ -419,6 +407,150 @@ void RtpPayloadParams::Vp8ToGeneric(const CodecSpecificInfoVP8& vp8_info, } } +FrameDependencyStructure RtpPayloadParams::MinimalisticVp9Structure( + const CodecSpecificInfoVP9& vp9) { + const int num_spatial_layers = vp9.num_spatial_layers; + const int num_temporal_layers = kMaxTemporalStreams; + FrameDependencyStructure structure; + structure.num_decode_targets = num_spatial_layers * num_temporal_layers; + structure.num_chains = num_spatial_layers; + structure.templates.reserve(num_spatial_layers * num_temporal_layers); + for (int sid = 0; sid < num_spatial_layers; ++sid) { + for (int tid = 0; tid < num_temporal_layers; ++tid) { + FrameDependencyTemplate a_template; + a_template.spatial_id = sid; + a_template.temporal_id = tid; + for (int s = 0; s < num_spatial_layers; ++s) { + for (int t = 0; t < num_temporal_layers; ++t) { + // Prefer kSwitch for indication frame is part of the decode target + // because RtpPayloadParams::Vp9ToGeneric uses that indication more + // often that kRequired, increasing chance custom dti need not to + // use more bits in dependency descriptor on the wire. + a_template.decode_target_indications.push_back( + sid <= s && tid <= t ? DecodeTargetIndication::kSwitch + : DecodeTargetIndication::kNotPresent); + } + } + a_template.frame_diffs.push_back(tid == 0 ? num_spatial_layers * + num_temporal_layers + : num_spatial_layers); + a_template.chain_diffs.assign(structure.num_chains, 1); + structure.templates.push_back(a_template); + + structure.decode_target_protected_by_chain.push_back(sid); + } + if (vp9.ss_data_available && vp9.spatial_layer_resolution_present) { + structure.resolutions.emplace_back(vp9.width[sid], vp9.height[sid]); + } + } + return structure; +} + +void RtpPayloadParams::Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info, + int64_t shared_frame_id, + RTPVideoHeader& rtp_video_header) { + const auto& vp9_header = + absl::get(rtp_video_header.video_type_header); + const int num_spatial_layers = vp9_header.num_spatial_layers; + const int num_temporal_layers = kMaxTemporalStreams; + + int spatial_index = + vp9_header.spatial_idx != kNoSpatialIdx ? vp9_header.spatial_idx : 0; + int temporal_index = + vp9_header.temporal_idx != kNoTemporalIdx ? vp9_header.temporal_idx : 0; + + if (spatial_index >= num_spatial_layers || + temporal_index >= num_temporal_layers || + num_spatial_layers > RtpGenericFrameDescriptor::kMaxSpatialLayers) { + // Prefer to generate no generic layering than an inconsistent one. + return; + } + + RTPVideoHeader::GenericDescriptorInfo& result = + rtp_video_header.generic.emplace(); + + result.frame_id = shared_frame_id; + result.spatial_index = spatial_index; + result.temporal_index = temporal_index; + + result.decode_target_indications.reserve(num_spatial_layers * + num_temporal_layers); + for (int sid = 0; sid < num_spatial_layers; ++sid) { + for (int tid = 0; tid < num_temporal_layers; ++tid) { + DecodeTargetIndication dti; + if (sid < spatial_index || tid < temporal_index) { + dti = DecodeTargetIndication::kNotPresent; + } else if (spatial_index != sid && + vp9_header.non_ref_for_inter_layer_pred) { + dti = DecodeTargetIndication::kNotPresent; + } else if (sid == spatial_index && tid == temporal_index) { + // Assume that if frame is decodable, all of its own layer is decodable. + dti = DecodeTargetIndication::kSwitch; + } else if (sid == spatial_index && vp9_header.temporal_up_switch) { + dti = DecodeTargetIndication::kSwitch; + } else if (!vp9_header.inter_pic_predicted) { + // Key frame or spatial upswitch + dti = DecodeTargetIndication::kSwitch; + } else { + // Make no other assumptions. That should be safe, though suboptimal. + // To provide more accurate dti, encoder wrapper should fill in + // CodecSpecificInfo::generic_frame_info + dti = DecodeTargetIndication::kRequired; + } + result.decode_target_indications.push_back(dti); + } + } + + // Calculate frame dependencies. + static constexpr int kPictureDiffLimit = 128; + if (last_vp9_frame_id_.empty()) { + // Create the array only if it is ever used. + last_vp9_frame_id_.resize(kPictureDiffLimit); + } + if (vp9_header.inter_layer_predicted && spatial_index > 0) { + result.dependencies.push_back( + last_vp9_frame_id_[vp9_header.picture_id % kPictureDiffLimit] + [spatial_index - 1]); + } + if (vp9_header.inter_pic_predicted) { + for (size_t i = 0; i < vp9_header.num_ref_pics; ++i) { + // picture_id is 15 bit number that wraps around. Though undeflow may + // produce picture that exceeds 2^15, it is ok because in this + // code block only last 7 bits of the picture_id are used. + uint16_t depend_on = vp9_header.picture_id - vp9_header.pid_diff[i]; + result.dependencies.push_back( + last_vp9_frame_id_[depend_on % kPictureDiffLimit][spatial_index]); + } + } + last_vp9_frame_id_[vp9_header.picture_id % kPictureDiffLimit][spatial_index] = + shared_frame_id; + + // Calculate chains, asuming chain includes all frames with temporal_id = 0 + if (!vp9_header.inter_pic_predicted && !vp9_header.inter_layer_predicted) { + // Assume frames without dependencies also reset chains. + for (int sid = spatial_index; sid < num_spatial_layers; ++sid) { + chain_last_frame_id_[sid] = -1; + } + } + result.chain_diffs.resize(num_spatial_layers); + for (int sid = 0; sid < num_spatial_layers; ++sid) { + if (chain_last_frame_id_[sid] == -1) { + result.chain_diffs[sid] = 0; + continue; + } + result.chain_diffs[sid] = shared_frame_id - chain_last_frame_id_[sid]; + } + + if (temporal_index == 0) { + chain_last_frame_id_[spatial_index] = shared_frame_id; + if (!vp9_header.non_ref_for_inter_layer_pred) { + for (int sid = spatial_index + 1; sid < num_spatial_layers; ++sid) { + chain_last_frame_id_[sid] = shared_frame_id; + } + } + } +} + void RtpPayloadParams::SetDependenciesVp8Deprecated( const CodecSpecificInfoVP8& vp8_info, int64_t shared_frame_id, diff --git a/call/rtp_payload_params.h b/call/rtp_payload_params.h index fa51efd7cc..da53cbc5c4 100644 --- a/call/rtp_payload_params.h +++ b/call/rtp_payload_params.h @@ -12,6 +12,7 @@ #define CALL_RTP_PAYLOAD_PARAMS_H_ #include +#include #include "absl/types/optional.h" #include "api/transport/webrtc_key_value_config.h" @@ -41,6 +42,14 @@ class RtpPayloadParams final { const CodecSpecificInfo* codec_specific_info, int64_t shared_frame_id); + // Returns structure that aligns with simulated generic info for VP9. + // The templates allow to produce valid dependency descriptor for any vp9 + // stream with up to 4 temporal layers. The set of the templates is not tuned + // for any paricular structure thus dependency descriptor would use more bytes + // on the wire than with tuned templates. + static FrameDependencyStructure MinimalisticVp9Structure( + const CodecSpecificInfoVP9& vp9); + uint32_t ssrc() const; RtpPayloadState state() const; @@ -50,8 +59,7 @@ class RtpPayloadParams final { bool first_frame_in_picture); RTPVideoHeader::GenericDescriptorInfo GenericDescriptorFromFrameInfo( const GenericFrameInfo& frame_info, - int64_t frame_id, - VideoFrameType frame_type); + int64_t frame_id); void SetGeneric(const CodecSpecificInfo* codec_specific_info, int64_t frame_id, bool is_keyframe, @@ -62,6 +70,10 @@ class RtpPayloadParams final { bool is_keyframe, RTPVideoHeader* rtp_video_header); + void Vp9ToGeneric(const CodecSpecificInfoVP9& vp9_info, + int64_t shared_frame_id, + RTPVideoHeader& rtp_video_header); + void H264ToGeneric(const CodecSpecificInfoH264& h264_info, int64_t shared_frame_id, bool is_keyframe, @@ -95,6 +107,13 @@ class RtpPayloadParams final { std::array, RtpGenericFrameDescriptor::kMaxSpatialLayers> last_shared_frame_id_; + // circular buffer of frame ids for the last 128 vp9 pictures. + // ids for the `picture_id` are stored at the index `picture_id % 128`. + std::vector> + last_vp9_frame_id_; + // Last frame id for each chain + std::array + chain_last_frame_id_; // TODO(eladalon): When additional codecs are supported, // set kMaxCodecBuffersCount to the max() of these codecs' buffer count. @@ -114,7 +133,7 @@ class RtpPayloadParams final { RtpPayloadState state_; const bool generic_picture_id_experiment_; - const bool generic_descriptor_experiment_; + const bool simulate_generic_vp9_; }; } // namespace webrtc #endif // CALL_RTP_PAYLOAD_PARAMS_H_ diff --git a/call/rtp_payload_params_unittest.cc b/call/rtp_payload_params_unittest.cc index 75860a806e..7db38dbcb8 100644 --- a/call/rtp_payload_params_unittest.cc +++ b/call/rtp_payload_params_unittest.cc @@ -26,10 +26,12 @@ #include "modules/video_coding/codecs/vp8/include/vp8_globals.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" #include "modules/video_coding/include/video_codec_interface.h" +#include "test/explicit_key_value_config.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" +using ::testing::Each; using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::SizeIs; @@ -103,7 +105,7 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) { codec_info.codecSpecific.VP9.num_spatial_layers = 3; codec_info.codecSpecific.VP9.first_frame_in_picture = true; codec_info.codecSpecific.VP9.temporal_idx = 2; - codec_info.codecSpecific.VP9.end_of_picture = false; + codec_info.end_of_picture = false; RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info, kDontCare); @@ -120,12 +122,11 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) { EXPECT_EQ(vp9_header.spatial_idx, encoded_image.SpatialIndex()); EXPECT_EQ(vp9_header.num_spatial_layers, codec_info.codecSpecific.VP9.num_spatial_layers); - EXPECT_EQ(vp9_header.end_of_picture, - codec_info.codecSpecific.VP9.end_of_picture); + EXPECT_EQ(vp9_header.end_of_picture, codec_info.end_of_picture); // Next spatial layer. codec_info.codecSpecific.VP9.first_frame_in_picture = false; - codec_info.codecSpecific.VP9.end_of_picture = true; + codec_info.end_of_picture = true; encoded_image.SetSpatialIndex(1); ColorSpace color_space( @@ -144,56 +145,7 @@ TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) { EXPECT_EQ(vp9_header.spatial_idx, encoded_image.SpatialIndex()); EXPECT_EQ(vp9_header.num_spatial_layers, codec_info.codecSpecific.VP9.num_spatial_layers); - EXPECT_EQ(vp9_header.end_of_picture, - codec_info.codecSpecific.VP9.end_of_picture); -} - -TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_H264) { - RtpPayloadState state; - state.picture_id = kPictureId; - state.tl0_pic_idx = kInitialTl0PicIdx1; - RtpPayloadParams params(kSsrc1, &state, FieldTrialBasedConfig()); - - EncodedImage encoded_image; - CodecSpecificInfo codec_info; - CodecSpecificInfoH264* h264info = &codec_info.codecSpecific.H264; - codec_info.codecType = kVideoCodecH264; - h264info->packetization_mode = H264PacketizationMode::SingleNalUnit; - h264info->temporal_idx = kNoTemporalIdx; - - RTPVideoHeader header = - params.GetRtpVideoHeader(encoded_image, &codec_info, 10); - - EXPECT_EQ(0, header.simulcastIdx); - EXPECT_EQ(kVideoCodecH264, header.codec); - const auto& h264 = absl::get(header.video_type_header); - EXPECT_EQ(H264PacketizationMode::SingleNalUnit, h264.packetization_mode); - - // test temporal param 1 - h264info->temporal_idx = 1; - h264info->base_layer_sync = true; - h264info->idr_frame = false; - - header = params.GetRtpVideoHeader(encoded_image, &codec_info, 20); - - EXPECT_EQ(kVideoCodecH264, header.codec); - EXPECT_EQ(header.frame_marking.tl0_pic_idx, kInitialTl0PicIdx1); - EXPECT_EQ(header.frame_marking.temporal_id, h264info->temporal_idx); - EXPECT_EQ(header.frame_marking.base_layer_sync, h264info->base_layer_sync); - EXPECT_EQ(header.frame_marking.independent_frame, h264info->idr_frame); - - // test temporal param 2 - h264info->temporal_idx = 0; - h264info->base_layer_sync = false; - h264info->idr_frame = true; - - header = params.GetRtpVideoHeader(encoded_image, &codec_info, 30); - - EXPECT_EQ(kVideoCodecH264, header.codec); - EXPECT_EQ(header.frame_marking.tl0_pic_idx, kInitialTl0PicIdx1 + 1); - EXPECT_EQ(header.frame_marking.temporal_id, h264info->temporal_idx); - EXPECT_EQ(header.frame_marking.base_layer_sync, h264info->base_layer_sync); - EXPECT_EQ(header.frame_marking.independent_frame, h264info->idr_frame); + EXPECT_EQ(vp9_header.end_of_picture, codec_info.end_of_picture); } TEST(RtpPayloadParamsTest, PictureIdIsSetForVp8) { @@ -350,8 +302,6 @@ TEST(RtpPayloadParamsTest, PictureIdForOldGenericFormat) { } TEST(RtpPayloadParamsTest, GenericDescriptorForGenericCodec) { - test::ScopedFieldTrials generic_picture_id( - "WebRTC-GenericDescriptor/Enabled/"); RtpPayloadState state{}; EncodedImage encoded_image; @@ -376,8 +326,6 @@ TEST(RtpPayloadParamsTest, GenericDescriptorForGenericCodec) { } TEST(RtpPayloadParamsTest, SetsGenericFromGenericFrameInfo) { - test::ScopedFieldTrials generic_picture_id( - "WebRTC-GenericDescriptor/Enabled/"); RtpPayloadState state; EncodedImage encoded_image; CodecSpecificInfo codec_info; @@ -426,9 +374,7 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test { enum LayerSync { kNoSync, kSync }; RtpPayloadParamsVp8ToGenericTest() - : generic_descriptor_field_trial_("WebRTC-GenericDescriptor/Enabled/"), - state_(), - params_(123, &state_, trials_config_) {} + : state_(), params_(123, &state_, trials_config_) {} void ConvertAndCheck(int temporal_index, int64_t shared_frame_id, @@ -464,7 +410,6 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test { } protected: - test::ScopedFieldTrials generic_descriptor_field_trial_; FieldTrialBasedConfig trials_config_; RtpPayloadState state_; RtpPayloadParams params_; @@ -518,14 +463,416 @@ TEST_F(RtpPayloadParamsVp8ToGenericTest, FrameIdGaps) { ConvertAndCheck(1, 20, VideoFrameType::kVideoFrameDelta, kNoSync, {10, 15}); } +class RtpPayloadParamsVp9ToGenericTest : public ::testing::Test { + protected: + RtpPayloadParamsVp9ToGenericTest() + : field_trials_("WebRTC-Vp9DependencyDescriptor/Enabled/") {} + + test::ExplicitKeyValueConfig field_trials_; + RtpPayloadState state_; +}; + +TEST_F(RtpPayloadParamsVp9ToGenericTest, NoScalability) { + RtpPayloadParams params(/*ssrc=*/123, &state_, field_trials_); + + EncodedImage encoded_image; + CodecSpecificInfo codec_info; + codec_info.codecType = kVideoCodecVP9; + codec_info.codecSpecific.VP9.num_spatial_layers = 1; + codec_info.codecSpecific.VP9.temporal_idx = kNoTemporalIdx; + codec_info.codecSpecific.VP9.first_frame_in_picture = true; + codec_info.end_of_picture = true; + + // Key frame. + encoded_image._frameType = VideoFrameType::kVideoFrameKey; + codec_info.codecSpecific.VP9.inter_pic_predicted = false; + codec_info.codecSpecific.VP9.num_ref_pics = 0; + RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info, + /*shared_frame_id=*/1); + + ASSERT_TRUE(header.generic); + EXPECT_EQ(header.generic->spatial_index, 0); + EXPECT_EQ(header.generic->temporal_index, 0); + EXPECT_EQ(header.generic->frame_id, 1); + ASSERT_THAT(header.generic->decode_target_indications, Not(IsEmpty())); + EXPECT_EQ(header.generic->decode_target_indications[0], + DecodeTargetIndication::kSwitch); + EXPECT_THAT(header.generic->dependencies, IsEmpty()); + EXPECT_THAT(header.generic->chain_diffs, ElementsAre(0)); + + // Delta frame. + encoded_image._frameType = VideoFrameType::kVideoFrameDelta; + codec_info.codecSpecific.VP9.inter_pic_predicted = true; + codec_info.codecSpecific.VP9.num_ref_pics = 1; + codec_info.codecSpecific.VP9.p_diff[0] = 1; + header = params.GetRtpVideoHeader(encoded_image, &codec_info, + /*shared_frame_id=*/3); + + ASSERT_TRUE(header.generic); + EXPECT_EQ(header.generic->spatial_index, 0); + EXPECT_EQ(header.generic->temporal_index, 0); + EXPECT_EQ(header.generic->frame_id, 3); + ASSERT_THAT(header.generic->decode_target_indications, Not(IsEmpty())); + EXPECT_EQ(header.generic->decode_target_indications[0], + DecodeTargetIndication::kSwitch); + EXPECT_THAT(header.generic->dependencies, ElementsAre(1)); + // previous frame in the chain was frame#1, + EXPECT_THAT(header.generic->chain_diffs, ElementsAre(3 - 1)); +} + +TEST_F(RtpPayloadParamsVp9ToGenericTest, TemporalScalabilityWith2Layers) { + // Test with 2 temporal layers structure that is not used by webrtc: + // 1---3 5 + // / / / ... + // 0---2---4--- + RtpPayloadParams params(/*ssrc=*/123, &state_, field_trials_); + + EncodedImage image; + CodecSpecificInfo info; + info.codecType = kVideoCodecVP9; + info.codecSpecific.VP9.num_spatial_layers = 1; + info.codecSpecific.VP9.first_frame_in_picture = true; + info.end_of_picture = true; + + RTPVideoHeader headers[6]; + // Key frame. + image._frameType = VideoFrameType::kVideoFrameKey; + info.codecSpecific.VP9.inter_pic_predicted = false; + info.codecSpecific.VP9.num_ref_pics = 0; + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 0; + headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1); + + // Delta frames. + info.codecSpecific.VP9.inter_pic_predicted = true; + image._frameType = VideoFrameType::kVideoFrameDelta; + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 1; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 1; + headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3); + + info.codecSpecific.VP9.temporal_up_switch = false; + info.codecSpecific.VP9.temporal_idx = 0; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 2; + headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5); + + info.codecSpecific.VP9.temporal_up_switch = false; + info.codecSpecific.VP9.temporal_idx = 1; + info.codecSpecific.VP9.num_ref_pics = 2; + info.codecSpecific.VP9.p_diff[0] = 1; + info.codecSpecific.VP9.p_diff[1] = 2; + headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 0; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 2; + headers[4] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/9); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 1; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 1; + headers[5] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/11); + + ASSERT_TRUE(headers[0].generic); + int num_decode_targets = headers[0].generic->decode_target_indications.size(); + ASSERT_GE(num_decode_targets, 2); + + for (int frame_idx = 0; frame_idx < 6; ++frame_idx) { + const RTPVideoHeader& header = headers[frame_idx]; + ASSERT_TRUE(header.generic); + EXPECT_EQ(header.generic->spatial_index, 0); + EXPECT_EQ(header.generic->temporal_index, frame_idx % 2); + EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx); + ASSERT_THAT(header.generic->decode_target_indications, + SizeIs(num_decode_targets)); + // Expect only T0 frames are needed for the 1st decode target. + if (header.generic->temporal_index == 0) { + EXPECT_NE(header.generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + } else { + EXPECT_EQ(header.generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + } + // Expect all frames are needed for the 2nd decode target. + EXPECT_NE(header.generic->decode_target_indications[1], + DecodeTargetIndication::kNotPresent); + } + + // Expect switch at every beginning of the pattern. + EXPECT_THAT(headers[0].generic->decode_target_indications, + Each(DecodeTargetIndication::kSwitch)); + EXPECT_THAT(headers[4].generic->decode_target_indications, + Each(DecodeTargetIndication::kSwitch)); + + EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // T0, 1 + EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // T1, 3 + EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(1)); // T0, 5 + EXPECT_THAT(headers[3].generic->dependencies, ElementsAre(5, 3)); // T1, 7 + EXPECT_THAT(headers[4].generic->dependencies, ElementsAre(5)); // T0, 9 + EXPECT_THAT(headers[5].generic->dependencies, ElementsAre(9)); // T1, 11 + + EXPECT_THAT(headers[0].generic->chain_diffs, ElementsAre(0)); + EXPECT_THAT(headers[1].generic->chain_diffs, ElementsAre(2)); + EXPECT_THAT(headers[2].generic->chain_diffs, ElementsAre(4)); + EXPECT_THAT(headers[3].generic->chain_diffs, ElementsAre(2)); + EXPECT_THAT(headers[4].generic->chain_diffs, ElementsAre(4)); + EXPECT_THAT(headers[5].generic->chain_diffs, ElementsAre(2)); +} + +TEST_F(RtpPayloadParamsVp9ToGenericTest, TemporalScalabilityWith3Layers) { + // Test with 3 temporal layers structure that is not used by webrtc, but used + // by chromium: https://imgur.com/pURAGvp + RtpPayloadParams params(/*ssrc=*/123, &state_, field_trials_); + + EncodedImage image; + CodecSpecificInfo info; + info.codecType = kVideoCodecVP9; + info.codecSpecific.VP9.num_spatial_layers = 1; + info.codecSpecific.VP9.first_frame_in_picture = true; + info.end_of_picture = true; + + RTPVideoHeader headers[9]; + // Key frame. + image._frameType = VideoFrameType::kVideoFrameKey; + info.codecSpecific.VP9.inter_pic_predicted = false; + info.codecSpecific.VP9.num_ref_pics = 0; + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 0; + headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1); + + // Delta frames. + info.codecSpecific.VP9.inter_pic_predicted = true; + image._frameType = VideoFrameType::kVideoFrameDelta; + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 2; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 1; + headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 1; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 2; + headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 2; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 1; + headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7); + + info.codecSpecific.VP9.temporal_up_switch = false; + info.codecSpecific.VP9.temporal_idx = 0; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 4; + headers[4] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/9); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 2; + info.codecSpecific.VP9.num_ref_pics = 2; + info.codecSpecific.VP9.p_diff[0] = 1; + info.codecSpecific.VP9.p_diff[1] = 3; + headers[5] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/11); + + info.codecSpecific.VP9.temporal_up_switch = false; + info.codecSpecific.VP9.temporal_idx = 1; + info.codecSpecific.VP9.num_ref_pics = 2; + info.codecSpecific.VP9.p_diff[0] = 2; + info.codecSpecific.VP9.p_diff[1] = 4; + headers[6] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/13); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 2; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 1; + headers[7] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/15); + + info.codecSpecific.VP9.temporal_up_switch = true; + info.codecSpecific.VP9.temporal_idx = 0; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 4; + headers[8] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/17); + + ASSERT_TRUE(headers[0].generic); + int num_decode_targets = headers[0].generic->decode_target_indications.size(); + ASSERT_GE(num_decode_targets, 3); + + for (int frame_idx = 0; frame_idx < 9; ++frame_idx) { + const RTPVideoHeader& header = headers[frame_idx]; + ASSERT_TRUE(header.generic); + EXPECT_EQ(header.generic->spatial_index, 0); + EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx); + ASSERT_THAT(header.generic->decode_target_indications, + SizeIs(num_decode_targets)); + // Expect only T0 frames are needed for the 1st decode target. + if (header.generic->temporal_index == 0) { + EXPECT_NE(header.generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + } else { + EXPECT_EQ(header.generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + } + // Expect only T0 and T1 frames are needed for the 2nd decode target. + if (header.generic->temporal_index <= 1) { + EXPECT_NE(header.generic->decode_target_indications[1], + DecodeTargetIndication::kNotPresent); + } else { + EXPECT_EQ(header.generic->decode_target_indications[1], + DecodeTargetIndication::kNotPresent); + } + // Expect all frames are needed for the 3rd decode target. + EXPECT_NE(header.generic->decode_target_indications[2], + DecodeTargetIndication::kNotPresent); + } + + EXPECT_EQ(headers[0].generic->temporal_index, 0); + EXPECT_EQ(headers[1].generic->temporal_index, 2); + EXPECT_EQ(headers[2].generic->temporal_index, 1); + EXPECT_EQ(headers[3].generic->temporal_index, 2); + EXPECT_EQ(headers[4].generic->temporal_index, 0); + EXPECT_EQ(headers[5].generic->temporal_index, 2); + EXPECT_EQ(headers[6].generic->temporal_index, 1); + EXPECT_EQ(headers[7].generic->temporal_index, 2); + EXPECT_EQ(headers[8].generic->temporal_index, 0); + + // Expect switch at every beginning of the pattern. + EXPECT_THAT(headers[0].generic->decode_target_indications, + Each(DecodeTargetIndication::kSwitch)); + EXPECT_THAT(headers[8].generic->decode_target_indications, + Each(DecodeTargetIndication::kSwitch)); + + EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // T0, 1 + EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // T2, 3 + EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(1)); // T1, 5 + EXPECT_THAT(headers[3].generic->dependencies, ElementsAre(5)); // T2, 7 + EXPECT_THAT(headers[4].generic->dependencies, ElementsAre(1)); // T0, 9 + EXPECT_THAT(headers[5].generic->dependencies, ElementsAre(9, 5)); // T2, 11 + EXPECT_THAT(headers[6].generic->dependencies, ElementsAre(9, 5)); // T1, 13 + EXPECT_THAT(headers[7].generic->dependencies, ElementsAre(13)); // T2, 15 + EXPECT_THAT(headers[8].generic->dependencies, ElementsAre(9)); // T0, 17 + + EXPECT_THAT(headers[0].generic->chain_diffs, ElementsAre(0)); + EXPECT_THAT(headers[1].generic->chain_diffs, ElementsAre(2)); + EXPECT_THAT(headers[2].generic->chain_diffs, ElementsAre(4)); + EXPECT_THAT(headers[3].generic->chain_diffs, ElementsAre(6)); + EXPECT_THAT(headers[4].generic->chain_diffs, ElementsAre(8)); + EXPECT_THAT(headers[5].generic->chain_diffs, ElementsAre(2)); + EXPECT_THAT(headers[6].generic->chain_diffs, ElementsAre(4)); + EXPECT_THAT(headers[7].generic->chain_diffs, ElementsAre(6)); + EXPECT_THAT(headers[8].generic->chain_diffs, ElementsAre(8)); +} + +TEST_F(RtpPayloadParamsVp9ToGenericTest, SpatialScalabilityKSvc) { + // 1---3-- + // | ... + // 0---2-- + RtpPayloadParams params(/*ssrc=*/123, &state_, field_trials_); + + EncodedImage image; + CodecSpecificInfo info; + info.codecType = kVideoCodecVP9; + info.codecSpecific.VP9.num_spatial_layers = 2; + info.codecSpecific.VP9.first_frame_in_picture = true; + + RTPVideoHeader headers[4]; + // Key frame. + image._frameType = VideoFrameType::kVideoFrameKey; + image.SetSpatialIndex(0); + info.codecSpecific.VP9.inter_pic_predicted = false; + info.codecSpecific.VP9.inter_layer_predicted = false; + info.codecSpecific.VP9.non_ref_for_inter_layer_pred = false; + info.codecSpecific.VP9.num_ref_pics = 0; + info.codecSpecific.VP9.first_frame_in_picture = true; + info.end_of_picture = false; + headers[0] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/1); + + image.SetSpatialIndex(1); + info.codecSpecific.VP9.inter_layer_predicted = true; + info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true; + info.codecSpecific.VP9.first_frame_in_picture = false; + info.end_of_picture = true; + headers[1] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/3); + + // Delta frames. + info.codecSpecific.VP9.inter_pic_predicted = true; + image._frameType = VideoFrameType::kVideoFrameDelta; + info.codecSpecific.VP9.num_ref_pics = 1; + info.codecSpecific.VP9.p_diff[0] = 1; + + image.SetSpatialIndex(0); + info.codecSpecific.VP9.inter_layer_predicted = false; + info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true; + info.codecSpecific.VP9.first_frame_in_picture = true; + info.end_of_picture = false; + headers[2] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/5); + + image.SetSpatialIndex(1); + info.codecSpecific.VP9.inter_layer_predicted = false; + info.codecSpecific.VP9.non_ref_for_inter_layer_pred = true; + info.codecSpecific.VP9.first_frame_in_picture = false; + info.end_of_picture = true; + headers[3] = params.GetRtpVideoHeader(image, &info, /*shared_frame_id=*/7); + + ASSERT_TRUE(headers[0].generic); + int num_decode_targets = headers[0].generic->decode_target_indications.size(); + // Rely on implementation detail there are always kMaxTemporalStreams temporal + // layers assumed, in particular assume Decode Target#0 matches layer S0T0, + // and Decode Target#kMaxTemporalStreams matches layer S1T0. + ASSERT_EQ(num_decode_targets, kMaxTemporalStreams * 2); + + for (int frame_idx = 0; frame_idx < 4; ++frame_idx) { + const RTPVideoHeader& header = headers[frame_idx]; + ASSERT_TRUE(header.generic); + EXPECT_EQ(header.generic->spatial_index, frame_idx % 2); + EXPECT_EQ(header.generic->temporal_index, 0); + EXPECT_EQ(header.generic->frame_id, 1 + 2 * frame_idx); + ASSERT_THAT(header.generic->decode_target_indications, + SizeIs(num_decode_targets)); + } + + // Expect S0 key frame is switch for both Decode Targets. + EXPECT_EQ(headers[0].generic->decode_target_indications[0], + DecodeTargetIndication::kSwitch); + EXPECT_EQ(headers[0].generic->decode_target_indications[kMaxTemporalStreams], + DecodeTargetIndication::kSwitch); + // S1 key frame is only needed for the 2nd Decode Targets. + EXPECT_EQ(headers[1].generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + EXPECT_NE(headers[1].generic->decode_target_indications[kMaxTemporalStreams], + DecodeTargetIndication::kNotPresent); + // Delta frames are only needed for their own Decode Targets. + EXPECT_NE(headers[2].generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + EXPECT_EQ(headers[2].generic->decode_target_indications[kMaxTemporalStreams], + DecodeTargetIndication::kNotPresent); + EXPECT_EQ(headers[3].generic->decode_target_indications[0], + DecodeTargetIndication::kNotPresent); + EXPECT_NE(headers[3].generic->decode_target_indications[kMaxTemporalStreams], + DecodeTargetIndication::kNotPresent); + + EXPECT_THAT(headers[0].generic->dependencies, IsEmpty()); // S0, 1 + EXPECT_THAT(headers[1].generic->dependencies, ElementsAre(1)); // S1, 3 + EXPECT_THAT(headers[2].generic->dependencies, ElementsAre(1)); // S0, 5 + EXPECT_THAT(headers[3].generic->dependencies, ElementsAre(3)); // S1, 7 + + EXPECT_THAT(headers[0].generic->chain_diffs, ElementsAre(0, 0)); + EXPECT_THAT(headers[1].generic->chain_diffs, ElementsAre(2, 2)); + EXPECT_THAT(headers[2].generic->chain_diffs, ElementsAre(4, 2)); + EXPECT_THAT(headers[3].generic->chain_diffs, ElementsAre(2, 4)); +} + class RtpPayloadParamsH264ToGenericTest : public ::testing::Test { public: enum LayerSync { kNoSync, kSync }; RtpPayloadParamsH264ToGenericTest() - : generic_descriptor_field_trial_("WebRTC-GenericDescriptor/Enabled/"), - state_(), - params_(123, &state_, trials_config_) {} + : state_(), params_(123, &state_, trials_config_) {} void ConvertAndCheck(int temporal_index, int64_t shared_frame_id, @@ -561,7 +908,6 @@ class RtpPayloadParamsH264ToGenericTest : public ::testing::Test { } protected: - test::ScopedFieldTrials generic_descriptor_field_trial_; FieldTrialBasedConfig trials_config_; RtpPayloadState state_; RtpPayloadParams params_; diff --git a/call/rtp_rtcp_demuxer_helper.cc b/call/rtp_rtcp_demuxer_helper.cc deleted file mode 100644 index 125169b077..0000000000 --- a/call/rtp_rtcp_demuxer_helper.cc +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "call/rtp_rtcp_demuxer_helper.h" - -#include "modules/rtp_rtcp/source/byte_io.h" -#include "modules/rtp_rtcp/source/rtcp_packet/bye.h" -#include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" -#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h" -#include "modules/rtp_rtcp/source/rtcp_packet/psfb.h" -#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" -#include "modules/rtp_rtcp/source/rtcp_packet/rtpfb.h" -#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h" - -namespace webrtc { - -absl::optional ParseRtcpPacketSenderSsrc( - rtc::ArrayView packet) { - rtcp::CommonHeader header; - for (const uint8_t* next_packet = packet.begin(); next_packet < packet.end(); - next_packet = header.NextPacket()) { - if (!header.Parse(next_packet, packet.end() - next_packet)) { - return absl::nullopt; - } - - switch (header.type()) { - case rtcp::Bye::kPacketType: - case rtcp::ExtendedReports::kPacketType: - case rtcp::Psfb::kPacketType: - case rtcp::ReceiverReport::kPacketType: - case rtcp::Rtpfb::kPacketType: - case rtcp::SenderReport::kPacketType: { - // Sender SSRC at the beginning of the RTCP payload. - if (header.payload_size_bytes() >= sizeof(uint32_t)) { - const uint32_t ssrc_sender = - ByteReader::ReadBigEndian(header.payload()); - return ssrc_sender; - } else { - return absl::nullopt; - } - } - } - } - - return absl::nullopt; -} - -} // namespace webrtc diff --git a/call/rtp_rtcp_demuxer_helper.h b/call/rtp_rtcp_demuxer_helper.h deleted file mode 100644 index 6134d56143..0000000000 --- a/call/rtp_rtcp_demuxer_helper.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef CALL_RTP_RTCP_DEMUXER_HELPER_H_ -#define CALL_RTP_RTCP_DEMUXER_HELPER_H_ - -#include -#include - -#include "absl/types/optional.h" -#include "api/array_view.h" - -namespace webrtc { - -// TODO(eladalon): Remove this in the next CL. -template -bool MultimapAssociationExists(const Container& multimap, - const typename Container::key_type& key, - const typename Container::mapped_type& val) { - auto it_range = multimap.equal_range(key); - using Reference = typename Container::const_reference; - return std::any_of(it_range.first, it_range.second, - [val](Reference elem) { return elem.second == val; }); -} - -template -size_t RemoveFromMultimapByValue(Container* multimap, const Value& value) { - size_t count = 0; - for (auto it = multimap->begin(); it != multimap->end();) { - if (it->second == value) { - it = multimap->erase(it); - ++count; - } else { - ++it; - } - } - return count; -} - -template -size_t RemoveFromMapByValue(Map* map, const Value& value) { - size_t count = 0; - for (auto it = map->begin(); it != map->end();) { - if (it->second == value) { - it = map->erase(it); - ++count; - } else { - ++it; - } - } - return count; -} - -template -bool ContainerHasKey(const Container& c, const Key& k) { - return std::find(c.cbegin(), c.cend(), k) != c.cend(); -} - -// TODO(eladalon): Remove this in the next CL. -template -bool MultimapHasValue(const Container& c, - const typename Container::mapped_type& v) { - auto predicate = [v](const typename Container::value_type& it) { - return it.second == v; - }; - return std::any_of(c.cbegin(), c.cend(), predicate); -} - -template -bool MapHasValue(const Map& map, const typename Map::mapped_type& value) { - auto predicate = [value](const typename Map::value_type& it) { - return it.second == value; - }; - return std::any_of(map.cbegin(), map.cend(), predicate); -} - -template -bool MultimapHasKey(const Container& c, - const typename Container::key_type& key) { - auto it_range = c.equal_range(key); - return it_range.first != it_range.second; -} - -absl::optional ParseRtcpPacketSenderSsrc( - rtc::ArrayView packet); - -} // namespace webrtc - -#endif // CALL_RTP_RTCP_DEMUXER_HELPER_H_ diff --git a/call/rtp_rtcp_demuxer_helper_unittest.cc b/call/rtp_rtcp_demuxer_helper_unittest.cc deleted file mode 100644 index 17e6617fb0..0000000000 --- a/call/rtp_rtcp_demuxer_helper_unittest.cc +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "call/rtp_rtcp_demuxer_helper.h" - -#include - -#include - -#include "modules/rtp_rtcp/source/rtcp_packet/bye.h" -#include "modules/rtp_rtcp/source/rtcp_packet/extended_jitter_report.h" -#include "modules/rtp_rtcp/source/rtcp_packet/extended_reports.h" -#include "modules/rtp_rtcp/source/rtcp_packet/pli.h" -#include "modules/rtp_rtcp/source/rtcp_packet/rapid_resync_request.h" -#include "modules/rtp_rtcp/source/rtcp_packet/receiver_report.h" -#include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h" -#include "rtc_base/arraysize.h" -#include "rtc_base/buffer.h" -#include "test/gtest.h" - -namespace webrtc { - -namespace { -constexpr uint32_t kSsrc = 8374; -} // namespace - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_ByePacket) { - webrtc::rtcp::Bye rtcp_packet; - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_EQ(ssrc, kSsrc); -} - -TEST(RtpRtcpDemuxerHelperTest, - ParseRtcpPacketSenderSsrc_ExtendedReportsPacket) { - webrtc::rtcp::ExtendedReports rtcp_packet; - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_EQ(ssrc, kSsrc); -} - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_PsfbPacket) { - webrtc::rtcp::Pli rtcp_packet; // Psfb is abstract; use a subclass. - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_EQ(ssrc, kSsrc); -} - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_ReceiverReportPacket) { - webrtc::rtcp::ReceiverReport rtcp_packet; - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_EQ(ssrc, kSsrc); -} - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_RtpfbPacket) { - // Rtpfb is abstract; use a subclass. - webrtc::rtcp::RapidResyncRequest rtcp_packet; - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_EQ(ssrc, kSsrc); -} - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_SenderReportPacket) { - webrtc::rtcp::SenderReport rtcp_packet; - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_EQ(ssrc, kSsrc); -} - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_MalformedRtcpPacket) { - uint8_t garbage[100]; - memset(&garbage[0], 0, arraysize(garbage)); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(garbage); - EXPECT_FALSE(ssrc); -} - -TEST(RtpRtcpDemuxerHelperTest, - ParseRtcpPacketSenderSsrc_RtcpMessageWithoutSenderSsrc) { - webrtc::rtcp::ExtendedJitterReport rtcp_packet; // Has no sender SSRC. - rtc::Buffer raw_packet = rtcp_packet.Build(); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc(raw_packet); - EXPECT_FALSE(ssrc); -} - -TEST(RtpRtcpDemuxerHelperTest, ParseRtcpPacketSenderSsrc_TruncatedRtcpMessage) { - webrtc::rtcp::Bye rtcp_packet; - rtcp_packet.SetSenderSsrc(kSsrc); - rtc::Buffer raw_packet = rtcp_packet.Build(); - - constexpr size_t rtcp_length_bytes = 8; - ASSERT_EQ(rtcp_length_bytes, raw_packet.size()); - - absl::optional ssrc = ParseRtcpPacketSenderSsrc( - rtc::ArrayView(raw_packet.data(), rtcp_length_bytes - 1)); - EXPECT_FALSE(ssrc); -} - -} // namespace webrtc diff --git a/call/rtp_stream_receiver_controller.cc b/call/rtp_stream_receiver_controller.cc index f440b426d6..7150b34bdb 100644 --- a/call/rtp_stream_receiver_controller.cc +++ b/call/rtp_stream_receiver_controller.cc @@ -37,11 +37,7 @@ RtpStreamReceiverController::Receiver::~Receiver() { controller_->RemoveSink(sink_); } -RtpStreamReceiverController::RtpStreamReceiverController() { - // At this level the demuxer is only configured to demux by SSRC, so don't - // worry about MIDs (MIDs are handled by upper layers). - demuxer_.set_use_mid(false); -} +RtpStreamReceiverController::RtpStreamReceiverController() {} RtpStreamReceiverController::~RtpStreamReceiverController() = default; @@ -52,19 +48,19 @@ RtpStreamReceiverController::CreateReceiver(uint32_t ssrc, } bool RtpStreamReceiverController::OnRtpPacket(const RtpPacketReceived& packet) { - rtc::CritScope cs(&lock_); + RTC_DCHECK_RUN_ON(&demuxer_sequence_); return demuxer_.OnRtpPacket(packet); } bool RtpStreamReceiverController::AddSink(uint32_t ssrc, RtpPacketSinkInterface* sink) { - rtc::CritScope cs(&lock_); + RTC_DCHECK_RUN_ON(&demuxer_sequence_); return demuxer_.AddSink(ssrc, sink); } size_t RtpStreamReceiverController::RemoveSink( const RtpPacketSinkInterface* sink) { - rtc::CritScope cs(&lock_); + RTC_DCHECK_RUN_ON(&demuxer_sequence_); return demuxer_.RemoveSink(sink); } diff --git a/call/rtp_stream_receiver_controller.h b/call/rtp_stream_receiver_controller.h index 045af3cf8d..284c9fa12f 100644 --- a/call/rtp_stream_receiver_controller.h +++ b/call/rtp_stream_receiver_controller.h @@ -12,9 +12,9 @@ #include +#include "api/sequence_checker.h" #include "call/rtp_demuxer.h" #include "call/rtp_stream_receiver_controller_interface.h" -#include "rtc_base/critical_section.h" namespace webrtc { @@ -58,13 +58,18 @@ class RtpStreamReceiverController RtpPacketSinkInterface* const sink_; }; - // TODO(nisse): Move to a TaskQueue for synchronization. When used - // by Call, we expect construction and all methods but OnRtpPacket - // to be called on the same thread, and OnRtpPacket to be called - // by a single, but possibly distinct, thread. But applications not - // using Call may have use threads differently. - rtc::CriticalSection lock_; - RtpDemuxer demuxer_ RTC_GUARDED_BY(&lock_); + // TODO(bugs.webrtc.org/11993): We expect construction and all methods to be + // called on the same thread/tq. Currently this is the worker thread + // (including OnRtpPacket) but a more natural fit would be the network thread. + // Using a sequence checker to ensure that usage is correct but at the same + // time not require a specific thread/tq, an instance of this class + the + // associated functionality should be easily moved from one execution context + // to another (i.e. when network packets don't hop to the worker thread inside + // of Call). + SequenceChecker demuxer_sequence_; + // At this level the demuxer is only configured to demux by SSRC, so don't + // worry about MIDs (MIDs are handled by upper layers). + RtpDemuxer demuxer_ RTC_GUARDED_BY(&demuxer_sequence_){false /*use_mid*/}; }; } // namespace webrtc diff --git a/call/rtp_transport_config.h b/call/rtp_transport_config.h new file mode 100644 index 0000000000..9aa9f14c16 --- /dev/null +++ b/call/rtp_transport_config.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_RTP_TRANSPORT_CONFIG_H_ +#define CALL_RTP_TRANSPORT_CONFIG_H_ + +#include + +#include "api/network_state_predictor.h" +#include "api/rtc_event_log/rtc_event_log.h" +#include "api/transport/bitrate_settings.h" +#include "api/transport/network_control.h" +#include "api/transport/webrtc_key_value_config.h" +#include "modules/utility/include/process_thread.h" +#include "rtc_base/task_queue.h" + +namespace webrtc { + +struct RtpTransportConfig { + // Bitrate config used until valid bitrate estimates are calculated. Also + // used to cap total bitrate used. This comes from the remote connection. + BitrateConstraints bitrate_config; + + // RtcEventLog to use for this call. Required. + // Use webrtc::RtcEventLog::CreateNull() for a null implementation. + RtcEventLog* event_log = nullptr; + + // Task Queue Factory to be used in this call. Required. + TaskQueueFactory* task_queue_factory = nullptr; + + // NetworkStatePredictor to use for this call. + NetworkStatePredictorFactoryInterface* network_state_predictor_factory = + nullptr; + + // Network controller factory to use for this call. + NetworkControllerFactoryInterface* network_controller_factory = nullptr; + + // Key-value mapping of internal configurations to apply, + // e.g. field trials. + const WebRtcKeyValueConfig* trials = nullptr; +}; +} // namespace webrtc + +#endif // CALL_RTP_TRANSPORT_CONFIG_H_ diff --git a/call/rtp_transport_controller_send.cc b/call/rtp_transport_controller_send.cc index 9baf164a60..f7b6b11fd7 100644 --- a/call/rtp_transport_controller_send.cc +++ b/call/rtp_transport_controller_send.cc @@ -64,6 +64,11 @@ bool IsEnabled(const WebRtcKeyValueConfig* trials, absl::string_view key) { return absl::StartsWith(trials->Lookup(key), "Enabled"); } +bool IsDisabled(const WebRtcKeyValueConfig* trials, absl::string_view key) { + RTC_DCHECK(trials != nullptr); + return absl::StartsWith(trials->Lookup(key), "Disabled"); +} + bool IsRelayed(const rtc::NetworkRoute& route) { return route.local.uses_turn() || route.remote.uses_turn(); } @@ -82,6 +87,7 @@ RtpTransportControllerSend::RtpTransportControllerSend( : clock_(clock), event_log_(event_log), bitrate_configurator_(bitrate_config), + pacer_started_(false), process_thread_(std::move(process_thread)), use_task_queue_pacer_(IsEnabled(trials, "WebRTC-TaskQueuePacer")), process_thread_pacer_(use_task_queue_pacer_ @@ -110,7 +116,7 @@ RtpTransportControllerSend::RtpTransportControllerSend( reset_feedback_on_route_change_( !IsEnabled(trials, "WebRTC-Bwe-NoFeedbackReset")), send_side_bwe_with_overhead_( - IsEnabled(trials, "WebRTC-SendSideBwe-WithOverhead")), + !IsDisabled(trials, "WebRTC-SendSideBwe-WithOverhead")), add_pacing_to_cwin_( IsEnabled(trials, "WebRTC-AddPacingToCongestionWindowPushback")), relay_bandwidth_cap_("relay_cap", DataRate::PlusInfinity()), @@ -130,15 +136,14 @@ RtpTransportControllerSend::RtpTransportControllerSend( pacer()->SetPacingRates( DataRate::BitsPerSec(bitrate_config.start_bitrate_bps), DataRate::Zero()); - if (!use_task_queue_pacer_) { - process_thread_->Start(); + if (absl::StartsWith(trials->Lookup("WebRTC-LazyPacerStart"), "Disabled")) { + EnsureStarted(); } } RtpTransportControllerSend::~RtpTransportControllerSend() { - if (!use_task_queue_pacer_) { - process_thread_->Stop(); - } + RTC_DCHECK(video_rtp_senders_.empty()); + process_thread_->Stop(); } RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender( @@ -152,6 +157,7 @@ RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender( std::unique_ptr fec_controller, const RtpSenderFrameEncryptionConfig& frame_encryption_config, rtc::scoped_refptr frame_transformer) { + RTC_DCHECK_RUN_ON(&main_thread_); video_rtp_senders_.push_back(std::make_unique( clock_, suspended_ssrcs, states, rtp_config, rtcp_report_interval_ms, send_transport, observers, @@ -165,6 +171,7 @@ RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender( void RtpTransportControllerSend::DestroyRtpVideoSender( RtpVideoSenderInterface* rtp_video_sender) { + RTC_DCHECK_RUN_ON(&main_thread_); std::vector>::iterator it = video_rtp_senders_.end(); for (it = video_rtp_senders_.begin(); it != video_rtp_senders_.end(); ++it) { @@ -350,6 +357,7 @@ void RtpTransportControllerSend::OnNetworkRouteChanged( } } void RtpTransportControllerSend::OnNetworkAvailability(bool network_available) { + RTC_DCHECK_RUN_ON(&main_thread_); RTC_LOG(LS_VERBOSE) << "SignalNetworkState " << (network_available ? "Up" : "Down"); NetworkAvailability msg; @@ -466,6 +474,7 @@ RtpTransportControllerSend::ApplyOrLiftRelayCap(bool is_relayed) { void RtpTransportControllerSend::OnTransportOverheadChanged( size_t transport_overhead_bytes_per_packet) { + RTC_DCHECK_RUN_ON(&main_thread_); if (transport_overhead_bytes_per_packet >= kMaxOverheadBytes) { RTC_LOG(LS_ERROR) << "Transport overhead exceeds " << kMaxOverheadBytes; return; @@ -491,6 +500,17 @@ void RtpTransportControllerSend::IncludeOverheadInPacedSender() { pacer()->SetIncludeOverhead(); } +void RtpTransportControllerSend::EnsureStarted() { + if (!pacer_started_) { + pacer_started_ = true; + if (use_task_queue_pacer_) { + task_queue_pacer_->EnsureStarted(); + } else { + process_thread_->Start(); + } + } +} + void RtpTransportControllerSend::OnReceivedEstimatedBitrate(uint32_t bitrate) { RemoteBitrateReport msg; msg.receive_time = Timestamp::Millis(clock_->TimeInMilliseconds()); diff --git a/call/rtp_transport_controller_send.h b/call/rtp_transport_controller_send.h index e7310334cf..7455060945 100644 --- a/call/rtp_transport_controller_send.h +++ b/call/rtp_transport_controller_send.h @@ -18,6 +18,7 @@ #include #include "api/network_state_predictor.h" +#include "api/sequence_checker.h" #include "api/transport/network_control.h" #include "api/units/data_rate.h" #include "call/rtp_bitrate_configurator.h" @@ -62,6 +63,7 @@ class RtpTransportControllerSend final const WebRtcKeyValueConfig* trials); ~RtpTransportControllerSend() override; + // TODO(tommi): Change to std::unique_ptr<>. RtpVideoSenderInterface* CreateRtpVideoSender( std::map suspended_ssrcs, const std::map& @@ -110,6 +112,7 @@ class RtpTransportControllerSend final void AccountForAudioPacketsInPacedSender(bool account_for_audio) override; void IncludeOverheadInPacedSender() override; + void EnsureStarted() override; // Implements RtcpBandwidthObserver interface void OnReceivedEstimatedBitrate(uint32_t bitrate) override; @@ -147,10 +150,13 @@ class RtpTransportControllerSend final Clock* const clock_; RtcEventLog* const event_log_; + SequenceChecker main_thread_; PacketRouter packet_router_; - std::vector> video_rtp_senders_; + std::vector> video_rtp_senders_ + RTC_GUARDED_BY(&main_thread_); RtpBitrateConfigurator bitrate_configurator_; std::map network_routes_; + bool pacer_started_; const std::unique_ptr process_thread_; const bool use_task_queue_pacer_; std::unique_ptr process_thread_pacer_; diff --git a/call/rtp_transport_controller_send_factory.h b/call/rtp_transport_controller_send_factory.h new file mode 100644 index 0000000000..a857ca7e6f --- /dev/null +++ b/call/rtp_transport_controller_send_factory.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_ +#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_ + +#include +#include + +#include "call/rtp_transport_controller_send.h" +#include "call/rtp_transport_controller_send_factory_interface.h" + +namespace webrtc { +class RtpTransportControllerSendFactory + : public RtpTransportControllerSendFactoryInterface { + public: + std::unique_ptr Create( + const RtpTransportConfig& config, + Clock* clock, + std::unique_ptr process_thread) override { + return std::make_unique( + clock, config.event_log, config.network_state_predictor_factory, + config.network_controller_factory, config.bitrate_config, + std::move(process_thread), config.task_queue_factory, config.trials); + } + + virtual ~RtpTransportControllerSendFactory() {} +}; +} // namespace webrtc +#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_H_ diff --git a/call/rtp_transport_controller_send_factory_interface.h b/call/rtp_transport_controller_send_factory_interface.h new file mode 100644 index 0000000000..a0218532a1 --- /dev/null +++ b/call/rtp_transport_controller_send_factory_interface.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_ +#define CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_ + +#include + +#include "call/rtp_transport_config.h" +#include "call/rtp_transport_controller_send_interface.h" +#include "modules/utility/include/process_thread.h" + +namespace webrtc { +// A factory used for dependency injection on the send side of the transport +// controller. +class RtpTransportControllerSendFactoryInterface { + public: + virtual std::unique_ptr Create( + const RtpTransportConfig& config, + Clock* clock, + std::unique_ptr process_thread) = 0; + + virtual ~RtpTransportControllerSendFactoryInterface() {} +}; +} // namespace webrtc +#endif // CALL_RTP_TRANSPORT_CONTROLLER_SEND_FACTORY_INTERFACE_H_ diff --git a/call/rtp_transport_controller_send_interface.h b/call/rtp_transport_controller_send_interface.h index f073424968..2aa6d739da 100644 --- a/call/rtp_transport_controller_send_interface.h +++ b/call/rtp_transport_controller_send_interface.h @@ -26,6 +26,7 @@ #include "api/transport/bitrate_settings.h" #include "api/units/timestamp.h" #include "call/rtp_config.h" +#include "common_video/frame_counts.h" #include "modules/rtp_rtcp/include/report_block_data.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" #include "modules/rtp_rtcp/include/rtp_packet_sender.h" @@ -39,25 +40,18 @@ class TaskQueue; } // namespace rtc namespace webrtc { -class CallStatsObserver; class FrameEncryptorInterface; class TargetTransferRateObserver; class Transport; -class Module; -class PacedSender; class PacketRouter; class RtpVideoSenderInterface; -class RateLimiter; class RtcpBandwidthObserver; class RtpPacketSender; -class SendDelayStats; -class SendStatisticsProxy; struct RtpSenderObservers { RtcpRttStats* rtcp_rtt_stats; RtcpIntraFrameObserver* intra_frame_callback; RtcpLossNotificationObserver* rtcp_loss_notification_observer; - RtcpStatisticsCallback* rtcp_stats; ReportBlockDataObserver* report_block_data_observer; StreamDataCountersCallback* rtp_stats; BitrateStatisticsObserver* bitrate_observer; @@ -140,7 +134,13 @@ class RtpTransportControllerSendInterface { virtual int64_t GetPacerQueuingDelayMs() const = 0; virtual absl::optional GetFirstPacketTime() const = 0; virtual void EnablePeriodicAlrProbing(bool enable) = 0; + + // Called when a packet has been sent. + // The call should arrive on the network thread, but may not in all cases + // (some tests don't adhere to this). Implementations today should not block + // the calling thread or make assumptions about the thread context. virtual void OnSentPacket(const rtc::SentPacket& sent_packet) = 0; + virtual void OnReceivedPacket(const ReceivedPacket& received_packet) = 0; virtual void SetSdpBitrateParameters( @@ -153,6 +153,8 @@ class RtpTransportControllerSendInterface { virtual void AccountForAudioPacketsInPacedSender(bool account_for_audio) = 0; virtual void IncludeOverheadInPacedSender() = 0; + + virtual void EnsureStarted() = 0; }; } // namespace webrtc diff --git a/call/rtp_video_sender.cc b/call/rtp_video_sender.cc index b6cb054488..7fad89b20b 100644 --- a/call/rtp_video_sender.cc +++ b/call/rtp_video_sender.cc @@ -22,21 +22,23 @@ #include "api/video_codecs/video_codec.h" #include "call/rtp_transport_controller_send_interface.h" #include "modules/pacing/packet_router.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/rtp_sender.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/include/video_codec_interface.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/trace_event.h" namespace webrtc { namespace webrtc_internal_rtp_video_sender { RtpStreamSender::RtpStreamSender( - std::unique_ptr rtp_rtcp, + std::unique_ptr rtp_rtcp, std::unique_ptr sender_video, std::unique_ptr fec_generator) : rtp_rtcp(std::move(rtp_rtcp)), @@ -199,7 +201,7 @@ std::vector CreateRtpStreamSenders( const WebRtcKeyValueConfig& trials) { RTC_DCHECK_GT(rtp_config.ssrcs.size(), 0); - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.clock = clock; configuration.audio = false; configuration.receiver_only = false; @@ -215,7 +217,6 @@ std::vector CreateRtpStreamSenders( configuration.rtt_stats = observers.rtcp_rtt_stats; configuration.rtcp_packet_type_counter_observer = observers.rtcp_type_observer; - configuration.rtcp_statistics_callback = observers.rtcp_stats; configuration.report_block_data_observer = observers.report_block_data_observer; configuration.paced_sender = transport->packet_sender(); @@ -243,7 +244,6 @@ std::vector CreateRtpStreamSenders( std::unique_ptr fec_generator = MaybeCreateFecGenerator(clock, rtp_config, suspended_ssrcs, i, trials); configuration.fec_generator = fec_generator.get(); - video_config.fec_generator = fec_generator.get(); configuration.rtx_send_ssrc = rtp_config.GetRtxSsrcAssociatedWithMediaSsrc(rtp_config.ssrcs[i]); @@ -252,7 +252,8 @@ std::vector CreateRtpStreamSenders( configuration.need_rtp_packet_infos = rtp_config.lntf.enabled; - auto rtp_rtcp = RtpRtcp::Create(configuration); + std::unique_ptr rtp_rtcp( + ModuleRtpRtcpImpl2::Create(configuration)); rtp_rtcp->SetSendingStatus(false); rtp_rtcp->SetSendingMediaStatus(false); rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound); @@ -281,6 +282,7 @@ std::vector CreateRtpStreamSenders( video_config.fec_overhead_bytes = fec_generator->MaxPacketOverhead(); } video_config.frame_transformer = frame_transformer; + video_config.send_transport_queue = transport->GetWorkerQueue()->Get(); auto sender_video = std::make_unique(video_config); rtp_streams.emplace_back(std::move(rtp_rtcp), std::move(sender_video), std::move(fec_generator)); @@ -288,15 +290,6 @@ std::vector CreateRtpStreamSenders( return rtp_streams; } -DataRate CalculateOverheadRate(DataRate data_rate, - DataSize packet_size, - DataSize overhead_per_packet) { - Frequency packet_rate = data_rate / packet_size; - // TOSO(srte): We should not need to round to nearest whole packet per second - // rate here. - return packet_rate.RoundUpTo(Frequency::Hertz(1)) * overhead_per_packet; -} - absl::optional GetVideoCodecType(const RtpConfig& config) { if (config.raw_payload) { return absl::nullopt; @@ -308,6 +301,48 @@ bool TransportSeqNumExtensionConfigured(const RtpConfig& config) { return ext.uri == RtpExtension::kTransportSequenceNumberUri; }); } + +// Returns true when some coded video sequence can be decoded starting with +// this frame without requiring any previous frames. +// e.g. it is the same as a key frame when spatial scalability is not used. +// When spatial scalability is used, then it is true for layer frames of +// a key frame without inter-layer dependencies. +bool IsFirstFrameOfACodedVideoSequence( + const EncodedImage& encoded_image, + const CodecSpecificInfo* codec_specific_info) { + if (encoded_image._frameType != VideoFrameType::kVideoFrameKey) { + return false; + } + + if (codec_specific_info != nullptr) { + if (codec_specific_info->generic_frame_info.has_value()) { + // This function is used before + // `codec_specific_info->generic_frame_info->frame_diffs` are calculated, + // so need to use a more complicated way to check for presence of the + // dependencies. + return absl::c_none_of( + codec_specific_info->generic_frame_info->encoder_buffers, + [](const CodecBufferUsage& buffer) { return buffer.referenced; }); + } + + if (codec_specific_info->codecType == VideoCodecType::kVideoCodecVP8 || + codec_specific_info->codecType == VideoCodecType::kVideoCodecH264 || + codec_specific_info->codecType == VideoCodecType::kVideoCodecGeneric) { + // These codecs do not support intra picture dependencies, so a frame + // marked as a key frame should be a key frame. + return true; + } + } + + // Without depenedencies described in generic format do an educated guess. + // It might be wrong for VP9 with spatial layer 0 skipped or higher spatial + // layer not depending on the spatial layer 0. This corner case is unimportant + // for current usage of this helper function. + + // Use <= to accept both 0 (i.e. the first) and nullopt (i.e. the only). + return encoded_image.SpatialIndex() <= 0; +} + } // namespace RtpVideoSender::RtpVideoSender( @@ -325,18 +360,17 @@ RtpVideoSender::RtpVideoSender( FrameEncryptorInterface* frame_encryptor, const CryptoOptions& crypto_options, rtc::scoped_refptr frame_transformer) - : send_side_bwe_with_overhead_(absl::StartsWith( + : send_side_bwe_with_overhead_(!absl::StartsWith( field_trials_.Lookup("WebRTC-SendSideBwe-WithOverhead"), - "Enabled")), - account_for_packetization_overhead_(!absl::StartsWith( - field_trials_.Lookup("WebRTC-SubtractPacketizationOverhead"), - "Disabled")), - use_early_loss_detection_(!absl::StartsWith( - field_trials_.Lookup("WebRTC-UseEarlyLossDetection"), "Disabled")), + use_frame_rate_for_overhead_(absl::StartsWith( + field_trials_.Lookup("WebRTC-Video-UseFrameRateForOverhead"), + "Enabled")), has_packet_feedback_(TransportSeqNumExtensionConfigured(rtp_config)), + simulate_vp9_structure_(absl::StartsWith( + field_trials_.Lookup("WebRTC-Vp9DependencyDescriptor"), + "Enabled")), active_(false), - module_process_thread_(nullptr), suspended_ssrcs_(std::move(suspended_ssrcs)), fec_controller_(std::move(fec_controller)), fec_allowed_(true), @@ -364,7 +398,6 @@ RtpVideoSender::RtpVideoSender( RTC_DCHECK_EQ(rtp_config_.ssrcs.size(), rtp_streams_.size()); if (send_side_bwe_with_overhead_ && has_packet_feedback_) transport_->IncludeOverheadInPacedSender(); - module_process_thread_checker_.Detach(); // SSRCs are assumed to be sorted in the same order as |rtp_modules|. for (uint32_t ssrc : rtp_config_.ssrcs) { // Restore state if it previously existed. @@ -379,18 +412,6 @@ RtpVideoSender::RtpVideoSender( // RTP/RTCP initialization. - // We add the highest spatial layer first to ensure it'll be prioritized - // when sending padding, with the hope that the packet rate will be smaller, - // and that it's more important to protect than the lower layers. - - // TODO(nisse): Consider moving registration with PacketRouter last, after the - // modules are fully configured. - for (const RtpStreamSender& stream : rtp_streams_) { - constexpr bool remb_candidate = true; - transport->packet_router()->AddSendRtpModule(stream.rtp_rtcp.get(), - remb_candidate); - } - for (size_t i = 0; i < rtp_config_.extensions.size(); ++i) { const std::string& extension = rtp_config_.extensions[i].uri; int id = rtp_config_.extensions[i].id; @@ -431,33 +452,14 @@ RtpVideoSender::RtpVideoSender( } RtpVideoSender::~RtpVideoSender() { - for (const RtpStreamSender& stream : rtp_streams_) { - transport_->packet_router()->RemoveSendRtpModule(stream.rtp_rtcp.get()); - } + SetActiveModulesLocked( + std::vector(rtp_streams_.size(), /*active=*/false)); transport_->GetStreamFeedbackProvider()->DeRegisterStreamFeedbackObserver( this); } -void RtpVideoSender::RegisterProcessThread( - ProcessThread* module_process_thread) { - RTC_DCHECK_RUN_ON(&module_process_thread_checker_); - RTC_DCHECK(!module_process_thread_); - module_process_thread_ = module_process_thread; - - for (const RtpStreamSender& stream : rtp_streams_) { - module_process_thread_->RegisterModule(stream.rtp_rtcp.get(), - RTC_FROM_HERE); - } -} - -void RtpVideoSender::DeRegisterProcessThread() { - RTC_DCHECK_RUN_ON(&module_process_thread_checker_); - for (const RtpStreamSender& stream : rtp_streams_) - module_process_thread_->DeRegisterModule(stream.rtp_rtcp.get()); -} - void RtpVideoSender::SetActive(bool active) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (active_ == active) return; const std::vector active_modules(rtp_streams_.size(), active); @@ -465,7 +467,7 @@ void RtpVideoSender::SetActive(bool active) { } void RtpVideoSender::SetActiveModules(const std::vector active_modules) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return SetActiveModulesLocked(active_modules); } @@ -477,15 +479,34 @@ void RtpVideoSender::SetActiveModulesLocked( if (active_modules[i]) { active_ = true; } + + RtpRtcpInterface& rtp_module = *rtp_streams_[i].rtp_rtcp; + const bool was_active = rtp_module.SendingMedia(); + const bool should_be_active = active_modules[i]; + // Sends a kRtcpByeCode when going from true to false. - rtp_streams_[i].rtp_rtcp->SetSendingStatus(active_modules[i]); + rtp_module.SetSendingStatus(active_modules[i]); + + if (was_active && !should_be_active) { + // Disabling media, remove from packet router map to reduce size and + // prevent any stray packets in the pacer from asynchronously arriving + // to a disabled module. + transport_->packet_router()->RemoveSendRtpModule(&rtp_module); + } + // If set to false this module won't send media. - rtp_streams_[i].rtp_rtcp->SetSendingMediaStatus(active_modules[i]); + rtp_module.SetSendingMediaStatus(active_modules[i]); + + if (!was_active && should_be_active) { + // Turning on media, register with packet router. + transport_->packet_router()->AddSendRtpModule(&rtp_module, + /*remb_candidate=*/true); + } } } bool RtpVideoSender::IsActive() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return IsActiveLocked(); } @@ -495,11 +516,10 @@ bool RtpVideoSender::IsActiveLocked() { EncodedImageCallback::Result RtpVideoSender::OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { + const CodecSpecificInfo* codec_specific_info) { fec_controller_->UpdateWithEncodedData(encoded_image.size(), encoded_image._frameType); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RTC_DCHECK(!rtp_streams_.empty()); if (!active_) return Result(Result::ERROR_SEND_FAILED); @@ -537,19 +557,26 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage( rtp_streams_[stream_index].rtp_rtcp->ExpectedRetransmissionTimeMs(); } - if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) { + if (IsFirstFrameOfACodedVideoSequence(encoded_image, codec_specific_info)) { // If encoder adapter produce FrameDependencyStructure, pass it so that // dependency descriptor rtp header extension can be used. // If not supported, disable using dependency descriptor by passing nullptr. - rtp_streams_[stream_index].sender_video->SetVideoStructure( - (codec_specific_info && codec_specific_info->template_structure) - ? &*codec_specific_info->template_structure - : nullptr); + RTPSenderVideo& sender_video = *rtp_streams_[stream_index].sender_video; + if (codec_specific_info && codec_specific_info->template_structure) { + sender_video.SetVideoStructure(&*codec_specific_info->template_structure); + } else if (simulate_vp9_structure_ && codec_specific_info && + codec_specific_info->codecType == kVideoCodecVP9) { + FrameDependencyStructure structure = + RtpPayloadParams::MinimalisticVp9Structure( + codec_specific_info->codecSpecific.VP9); + sender_video.SetVideoStructure(&structure); + } else { + sender_video.SetVideoStructure(nullptr); + } } bool send_result = rtp_streams_[stream_index].sender_video->SendEncodedImage( rtp_config_.payload_type, codec_type_, rtp_timestamp, encoded_image, - fragmentation, params_[stream_index].GetRtpVideoHeader( encoded_image, codec_specific_info, shared_frame_id_), expected_retransmission_time_ms); @@ -573,7 +600,7 @@ EncodedImageCallback::Result RtpVideoSender::OnEncodedImage( void RtpVideoSender::OnBitrateAllocationUpdated( const VideoBitrateAllocation& bitrate) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (IsActiveLocked()) { if (rtp_streams_.size() == 1) { // If spatial scalability is enabled, it is covered by a single stream. @@ -598,6 +625,18 @@ void RtpVideoSender::OnBitrateAllocationUpdated( } } } +void RtpVideoSender::OnVideoLayersAllocationUpdated( + const VideoLayersAllocation& allocation) { + MutexLock lock(&mutex_); + if (IsActiveLocked()) { + for (size_t i = 0; i < rtp_streams_.size(); ++i) { + VideoLayersAllocation stream_allocation = allocation; + stream_allocation.rtp_stream_index = i; + rtp_streams_[i].sender_video->SetVideoLayersAllocation( + std::move(stream_allocation)); + } + } +} bool RtpVideoSender::NackEnabled() const { const bool nack_enabled = rtp_config_.nack.rtp_history_ms > 0; @@ -626,7 +665,7 @@ void RtpVideoSender::ConfigureSsrcs() { RTC_CHECK(ssrc_to_rtp_module_.empty()); for (size_t i = 0; i < rtp_config_.ssrcs.size(); ++i) { uint32_t ssrc = rtp_config_.ssrcs[i]; - RtpRtcp* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get(); + RtpRtcpInterface* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get(); // Restore RTP state if previous existed. auto it = suspended_ssrcs_.find(ssrc); @@ -643,7 +682,7 @@ void RtpVideoSender::ConfigureSsrcs() { RTC_DCHECK_EQ(rtp_config_.rtx.ssrcs.size(), rtp_config_.ssrcs.size()); for (size_t i = 0; i < rtp_config_.rtx.ssrcs.size(); ++i) { uint32_t ssrc = rtp_config_.rtx.ssrcs[i]; - RtpRtcp* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get(); + RtpRtcpInterface* const rtp_rtcp = rtp_streams_[i].rtp_rtcp.get(); auto it = suspended_ssrcs_.find(ssrc); if (it != suspended_ssrcs_.end()) rtp_rtcp->SetRtxState(it->second); @@ -716,7 +755,7 @@ std::map RtpVideoSender::GetRtpStates() const { std::map RtpVideoSender::GetRtpPayloadStates() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); std::map payload_states; for (const auto& param : params_) { payload_states[param.ssrc()] = param.state(); @@ -727,7 +766,7 @@ std::map RtpVideoSender::GetRtpPayloadStates() void RtpVideoSender::OnTransportOverheadChanged( size_t transport_overhead_bytes_per_packet) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); transport_overhead_bytes_per_packet_ = transport_overhead_bytes_per_packet; size_t max_rtp_packet_size = @@ -741,7 +780,7 @@ void RtpVideoSender::OnTransportOverheadChanged( void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update, int framerate) { // Substract overhead from bitrate. - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); size_t num_active_streams = 0; size_t overhead_bytes_per_packet = 0; for (const auto& stream : rtp_streams_) { @@ -760,8 +799,9 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update, rtp_config_.max_packet_size + transport_overhead_bytes_per_packet_); uint32_t payload_bitrate_bps = update.target_bitrate.bps(); if (send_side_bwe_with_overhead_ && has_packet_feedback_) { - DataRate overhead_rate = CalculateOverheadRate( - update.target_bitrate, max_total_packet_size, packet_overhead); + DataRate overhead_rate = + CalculateOverheadRate(update.target_bitrate, max_total_packet_size, + packet_overhead, Frequency::Hertz(framerate)); // TODO(srte): We probably should not accept 0 payload bitrate here. payload_bitrate_bps = rtc::saturated_cast(payload_bitrate_bps - overhead_rate.bps()); @@ -781,16 +821,13 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update, // since |fec_allowed_| may be toggled back on at any moment. } - uint32_t packetization_rate_bps = 0; - if (account_for_packetization_overhead_) { // Subtract packetization overhead from the encoder target. If target rate // is really low, cap the overhead at 50%. This also avoids the case where // |encoder_target_rate_bps_| is 0 due to encoder pause event while the // packetization rate is positive since packets are still flowing. - packetization_rate_bps = - std::min(GetPacketizationOverheadRate(), encoder_target_rate_bps_ / 2); - encoder_target_rate_bps_ -= packetization_rate_bps; - } + uint32_t packetization_rate_bps = + std::min(GetPacketizationOverheadRate(), encoder_target_rate_bps_ / 2); + encoder_target_rate_bps_ -= packetization_rate_bps; loss_mask_vector_.clear(); @@ -803,7 +840,7 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update, DataRate encoder_overhead_rate = CalculateOverheadRate( DataRate::BitsPerSec(encoder_target_rate_bps_), max_total_packet_size - DataSize::Bytes(overhead_bytes_per_packet), - packet_overhead); + packet_overhead, Frequency::Hertz(framerate)); encoder_overhead_rate_bps = std::min( encoder_overhead_rate.bps(), update.target_bitrate.bps() - encoder_target_rate_bps_); @@ -845,27 +882,27 @@ int RtpVideoSender::ProtectionRequest(const FecProtectionParams* delta_params, *sent_nack_rate_bps = 0; *sent_fec_rate_bps = 0; for (const RtpStreamSender& stream : rtp_streams_) { - if (stream.fec_generator) { - stream.fec_generator->SetProtectionParameters(*delta_params, *key_params); - *sent_fec_rate_bps += stream.fec_generator->CurrentFecRate().bps(); - } - *sent_video_rate_bps += stream.sender_video->VideoBitrateSent(); - *sent_nack_rate_bps += - stream.rtp_rtcp->GetSendRates()[RtpPacketMediaType::kRetransmission] - .bps(); + stream.rtp_rtcp->SetFecProtectionParams(*delta_params, *key_params); + + auto send_bitrate = stream.rtp_rtcp->GetSendRates(); + *sent_video_rate_bps += send_bitrate[RtpPacketMediaType::kVideo].bps(); + *sent_fec_rate_bps += + send_bitrate[RtpPacketMediaType::kForwardErrorCorrection].bps(); + *sent_nack_rate_bps += + send_bitrate[RtpPacketMediaType::kRetransmission].bps(); } return 0; } void RtpVideoSender::SetFecAllowed(bool fec_allowed) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); fec_allowed_ = fec_allowed; } void RtpVideoSender::OnPacketFeedbackVector( std::vector packet_feedback_vector) { if (fec_controller_->UseLossVectorMask()) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); for (const StreamPacketInfo& packet : packet_feedback_vector) { loss_mask_vector_.push_back(!packet.received); } @@ -874,36 +911,36 @@ void RtpVideoSender::OnPacketFeedbackVector( // Map from SSRC to all acked packets for that RTP module. std::map> acked_packets_per_ssrc; for (const StreamPacketInfo& packet : packet_feedback_vector) { - if (packet.received) { - acked_packets_per_ssrc[packet.ssrc].push_back(packet.rtp_sequence_number); + if (packet.received && packet.ssrc) { + acked_packets_per_ssrc[*packet.ssrc].push_back( + packet.rtp_sequence_number); } } - if (use_early_loss_detection_) { - // Map from SSRC to vector of RTP sequence numbers that are indicated as - // lost by feedback, without being trailed by any received packets. - std::map> early_loss_detected_per_ssrc; + // Map from SSRC to vector of RTP sequence numbers that are indicated as + // lost by feedback, without being trailed by any received packets. + std::map> early_loss_detected_per_ssrc; - for (const StreamPacketInfo& packet : packet_feedback_vector) { - if (!packet.received) { - // Last known lost packet, might not be detectable as lost by remote - // jitter buffer. - early_loss_detected_per_ssrc[packet.ssrc].push_back( - packet.rtp_sequence_number); - } else { - // Packet received, so any loss prior to this is already detectable. - early_loss_detected_per_ssrc.erase(packet.ssrc); - } + for (const StreamPacketInfo& packet : packet_feedback_vector) { + // Only include new media packets, not retransmissions/padding/fec. + if (!packet.received && packet.ssrc && !packet.is_retransmission) { + // Last known lost packet, might not be detectable as lost by remote + // jitter buffer. + early_loss_detected_per_ssrc[*packet.ssrc].push_back( + packet.rtp_sequence_number); + } else { + // Packet received, so any loss prior to this is already detectable. + early_loss_detected_per_ssrc.erase(*packet.ssrc); } + } - for (const auto& kv : early_loss_detected_per_ssrc) { - const uint32_t ssrc = kv.first; - auto it = ssrc_to_rtp_module_.find(ssrc); - RTC_DCHECK(it != ssrc_to_rtp_module_.end()); - RTPSender* rtp_sender = it->second->RtpSender(); - for (uint16_t sequence_number : kv.second) { - rtp_sender->ReSendPacket(sequence_number); - } + for (const auto& kv : early_loss_detected_per_ssrc) { + const uint32_t ssrc = kv.first; + auto it = ssrc_to_rtp_module_.find(ssrc); + RTC_CHECK(it != ssrc_to_rtp_module_.end()); + RTPSender* rtp_sender = it->second->RtpSender(); + for (uint16_t sequence_number : kv.second) { + rtp_sender->ReSendPacket(sequence_number); } } @@ -911,8 +948,8 @@ void RtpVideoSender::OnPacketFeedbackVector( const uint32_t ssrc = kv.first; auto it = ssrc_to_rtp_module_.find(ssrc); if (it == ssrc_to_rtp_module_.end()) { - // Packets not for a media SSRC, so likely RTX or FEC. If so, ignore - // since there's no RTP history to clean up anyway. + // No media, likely FEC or padding. Ignore since there's no RTP history to + // clean up anyway. continue; } rtc::ArrayView rtp_sequence_numbers(kv.second); @@ -926,4 +963,19 @@ void RtpVideoSender::SetEncodingData(size_t width, fec_controller_->SetEncodingData(width, height, num_temporal_layers, rtp_config_.max_packet_size); } + +DataRate RtpVideoSender::CalculateOverheadRate(DataRate data_rate, + DataSize packet_size, + DataSize overhead_per_packet, + Frequency framerate) const { + Frequency packet_rate = data_rate / packet_size; + if (use_frame_rate_for_overhead_) { + framerate = std::max(framerate, Frequency::Hertz(1)); + DataSize frame_size = data_rate / framerate; + int packets_per_frame = ceil(frame_size / packet_size); + packet_rate = packets_per_frame * framerate; + } + return packet_rate.RoundUpTo(Frequency::Hertz(1)) * overhead_per_packet; +} + } // namespace webrtc diff --git a/call/rtp_video_sender.h b/call/rtp_video_sender.h index 58bb7f412e..991276fe79 100644 --- a/call/rtp_video_sender.h +++ b/call/rtp_video_sender.h @@ -22,6 +22,7 @@ #include "api/fec_controller.h" #include "api/fec_controller_override.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/sequence_checker.h" #include "api/transport/field_trial_based_config.h" #include "api/video_codecs/video_encoder.h" #include "call/rtp_config.h" @@ -29,29 +30,26 @@ #include "call/rtp_transport_controller_send_interface.h" #include "call/rtp_video_sender_interface.h" #include "modules/rtp_rtcp/include/flexfec_sender.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/rtp_sender.h" #include "modules/rtp_rtcp/source/rtp_sender_video.h" #include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" -#include "modules/utility/include/process_thread.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/rate_limiter.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" namespace webrtc { class FrameEncryptorInterface; -class RTPFragmentationHeader; -class RtpRtcp; class RtpTransportControllerSendInterface; namespace webrtc_internal_rtp_video_sender { // RTP state for a single simulcast stream. Internal to the implementation of // RtpVideoSender. struct RtpStreamSender { - RtpStreamSender(std::unique_ptr rtp_rtcp, + RtpStreamSender(std::unique_ptr rtp_rtcp, std::unique_ptr sender_video, std::unique_ptr fec_generator); ~RtpStreamSender(); @@ -60,7 +58,7 @@ struct RtpStreamSender { RtpStreamSender& operator=(RtpStreamSender&&) = default; // Note: Needs pointer stability. - std::unique_ptr rtp_rtcp; + std::unique_ptr rtp_rtcp; std::unique_ptr sender_video; std::unique_ptr fec_generator; }; @@ -91,33 +89,24 @@ class RtpVideoSender : public RtpVideoSenderInterface, rtc::scoped_refptr frame_transformer); ~RtpVideoSender() override; - // RegisterProcessThread register |module_process_thread| with those objects - // that use it. Registration has to happen on the thread were - // |module_process_thread| was created (libjingle's worker thread). - // TODO(perkj): Replace the use of |module_process_thread| with a TaskQueue, - // maybe |worker_queue|. - void RegisterProcessThread(ProcessThread* module_process_thread) - RTC_LOCKS_EXCLUDED(crit_) override; - void DeRegisterProcessThread() RTC_LOCKS_EXCLUDED(crit_) override; - // RtpVideoSender will only route packets if being active, all packets will be // dropped otherwise. - void SetActive(bool active) RTC_LOCKS_EXCLUDED(crit_) override; + void SetActive(bool active) RTC_LOCKS_EXCLUDED(mutex_) override; // Sets the sending status of the rtp modules and appropriately sets the // payload router to active if any rtp modules are active. void SetActiveModules(const std::vector active_modules) - RTC_LOCKS_EXCLUDED(crit_) override; - bool IsActive() RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; + bool IsActive() RTC_LOCKS_EXCLUDED(mutex_) override; void OnNetworkAvailability(bool network_available) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; std::map GetRtpStates() const - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; std::map GetRtpPayloadStates() const - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; void DeliverRtcp(const uint8_t* packet, size_t length) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; // Implements webrtc::VCMProtectionCallback. int ProtectionRequest(const FecProtectionParams* delta_params, @@ -125,69 +114,71 @@ class RtpVideoSender : public RtpVideoSenderInterface, uint32_t* sent_video_rate_bps, uint32_t* sent_nack_rate_bps, uint32_t* sent_fec_rate_bps) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; // Implements FecControllerOverride. - void SetFecAllowed(bool fec_allowed) RTC_LOCKS_EXCLUDED(crit_) override; + void SetFecAllowed(bool fec_allowed) RTC_LOCKS_EXCLUDED(mutex_) override; // Implements EncodedImageCallback. // Returns 0 if the packet was routed / sent, -1 otherwise. EncodedImageCallback::Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) - RTC_LOCKS_EXCLUDED(crit_) override; + const CodecSpecificInfo* codec_specific_info) + RTC_LOCKS_EXCLUDED(mutex_) override; void OnBitrateAllocationUpdated(const VideoBitrateAllocation& bitrate) - RTC_LOCKS_EXCLUDED(crit_) override; - + RTC_LOCKS_EXCLUDED(mutex_) override; + void OnVideoLayersAllocationUpdated( + const VideoLayersAllocation& layers) override; void OnTransportOverheadChanged(size_t transport_overhead_bytes_per_packet) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; void OnBitrateUpdated(BitrateAllocationUpdate update, int framerate) - RTC_LOCKS_EXCLUDED(crit_) override; - uint32_t GetPayloadBitrateBps() const RTC_LOCKS_EXCLUDED(crit_) override; - uint32_t GetProtectionBitrateBps() const RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; + uint32_t GetPayloadBitrateBps() const RTC_LOCKS_EXCLUDED(mutex_) override; + uint32_t GetProtectionBitrateBps() const RTC_LOCKS_EXCLUDED(mutex_) override; void SetEncodingData(size_t width, size_t height, size_t num_temporal_layers) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; std::vector GetSentRtpPacketInfos( uint32_t ssrc, rtc::ArrayView sequence_numbers) const - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; // From StreamFeedbackObserver. void OnPacketFeedbackVector( std::vector packet_feedback_vector) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; private: - bool IsActiveLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + bool IsActiveLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void SetActiveModulesLocked(const std::vector active_modules) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void ConfigureProtection(); void ConfigureSsrcs(); void ConfigureRids(); bool NackEnabled() const; uint32_t GetPacketizationOverheadRate() const; + DataRate CalculateOverheadRate(DataRate data_rate, + DataSize packet_size, + DataSize overhead_per_packet, + Frequency framerate) const; const FieldTrialBasedConfig field_trials_; const bool send_side_bwe_with_overhead_; - const bool account_for_packetization_overhead_; - const bool use_early_loss_detection_; + const bool use_frame_rate_for_overhead_; const bool has_packet_feedback_; + const bool simulate_vp9_structure_; - // TODO(holmer): Remove crit_ once RtpVideoSender runs on the + // TODO(holmer): Remove mutex_ once RtpVideoSender runs on the // transport task queue. - rtc::CriticalSection crit_; - bool active_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + bool active_ RTC_GUARDED_BY(mutex_); - ProcessThread* module_process_thread_; - rtc::ThreadChecker module_process_thread_checker_; std::map suspended_ssrcs_; const std::unique_ptr fec_controller_; - bool fec_allowed_ RTC_GUARDED_BY(crit_); + bool fec_allowed_ RTC_GUARDED_BY(mutex_); // Rtp modules are assumed to be sorted in simulcast index order. const std::vector @@ -201,21 +192,21 @@ class RtpVideoSender : public RtpVideoSenderInterface, // rewrite the frame id), therefore |shared_frame_id| has to live in a place // where we are aware of all the different streams. int64_t shared_frame_id_ = 0; - std::vector params_ RTC_GUARDED_BY(crit_); + std::vector params_ RTC_GUARDED_BY(mutex_); - size_t transport_overhead_bytes_per_packet_ RTC_GUARDED_BY(crit_); + size_t transport_overhead_bytes_per_packet_ RTC_GUARDED_BY(mutex_); uint32_t protection_bitrate_bps_; uint32_t encoder_target_rate_bps_; - std::vector loss_mask_vector_ RTC_GUARDED_BY(crit_); + std::vector loss_mask_vector_ RTC_GUARDED_BY(mutex_); - std::vector frame_counts_ RTC_GUARDED_BY(crit_); + std::vector frame_counts_ RTC_GUARDED_BY(mutex_); FrameCountObserver* const frame_count_observer_; // Effectively const map from SSRC to RtpRtcp, for all media SSRCs. // This map is set at construction time and never changed, but it's // non-trivial to make it properly const. - std::map ssrc_to_rtp_module_; + std::map ssrc_to_rtp_module_; RTC_DISALLOW_COPY_AND_ASSIGN(RtpVideoSender); }; diff --git a/call/rtp_video_sender_interface.h b/call/rtp_video_sender_interface.h index bb72eb5996..a0b4baccb4 100644 --- a/call/rtp_video_sender_interface.h +++ b/call/rtp_video_sender_interface.h @@ -18,10 +18,10 @@ #include "api/array_view.h" #include "api/call/bitrate_allocation.h" #include "api/fec_controller_override.h" +#include "api/video/video_layers_allocation.h" #include "call/rtp_config.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" -#include "modules/utility/include/process_thread.h" #include "modules/video_coding/include/video_codec_interface.h" namespace webrtc { @@ -31,9 +31,6 @@ struct FecProtectionParams; class RtpVideoSenderInterface : public EncodedImageCallback, public FecControllerOverride { public: - virtual void RegisterProcessThread(ProcessThread* module_process_thread) = 0; - virtual void DeRegisterProcessThread() = 0; - // RtpVideoSender will only route packets if being active, all // packets will be dropped otherwise. virtual void SetActive(bool active) = 0; @@ -50,6 +47,8 @@ class RtpVideoSenderInterface : public EncodedImageCallback, virtual void OnBitrateAllocationUpdated( const VideoBitrateAllocation& bitrate) = 0; + virtual void OnVideoLayersAllocationUpdated( + const VideoLayersAllocation& allocation) = 0; virtual void OnBitrateUpdated(BitrateAllocationUpdate update, int framerate) = 0; virtual void OnTransportOverheadChanged( diff --git a/call/rtp_video_sender_unittest.cc b/call/rtp_video_sender_unittest.cc index 048fe1a7ec..334d97ccfa 100644 --- a/call/rtp_video_sender_unittest.cc +++ b/call/rtp_video_sender_unittest.cc @@ -30,7 +30,6 @@ #include "test/mock_transport.h" #include "test/scenario/scenario.h" #include "test/time_controller/simulated_time_controller.h" -#include "video/call_stats.h" #include "video/send_delay_stats.h" #include "video/send_statistics_proxy.h" @@ -62,7 +61,6 @@ class MockRtcpIntraFrameObserver : public RtcpIntraFrameObserver { RtpSenderObservers CreateObservers( RtcpRttStats* rtcp_rtt_stats, RtcpIntraFrameObserver* intra_frame_callback, - RtcpStatisticsCallback* rtcp_stats, ReportBlockDataObserver* report_block_data_observer, StreamDataCountersCallback* rtp_stats, BitrateStatisticsObserver* bitrate_observer, @@ -74,7 +72,6 @@ RtpSenderObservers CreateObservers( observers.rtcp_rtt_stats = rtcp_rtt_stats; observers.intra_frame_callback = intra_frame_callback; observers.rtcp_loss_notification_observer = nullptr; - observers.rtcp_stats = rtcp_stats; observers.report_block_data_observer = report_block_data_observer; observers.rtp_stats = rtp_stats; observers.bitrate_observer = bitrate_observer; @@ -108,6 +105,7 @@ VideoSendStream::Config CreateVideoSendStreamConfig( kTransportsSequenceExtensionId); config.rtp.extensions.emplace_back(RtpDependencyDescriptorExtension::kUri, kDependencyDescriptorExtensionId); + config.rtp.extmap_allow_mixed = true; return config; } @@ -136,21 +134,19 @@ class RtpVideoSenderTestFixture { time_controller_.CreateProcessThread("PacerThread"), time_controller_.GetTaskQueueFactory(), &field_trials_), - process_thread_(time_controller_.CreateProcessThread("test_thread")), - call_stats_(time_controller_.GetClock(), process_thread_.get()), stats_proxy_(time_controller_.GetClock(), config_, VideoEncoderConfig::ContentType::kRealtimeVideo), retransmission_rate_limiter_(time_controller_.GetClock(), kRetransmitWindowSizeMs) { + transport_controller_.EnsureStarted(); std::map suspended_ssrcs; router_ = std::make_unique( time_controller_.GetClock(), suspended_ssrcs, suspended_payload_states, config_.rtp, config_.rtcp_report_interval_ms, &transport_, - CreateObservers(&call_stats_, &encoder_feedback_, &stats_proxy_, - &stats_proxy_, &stats_proxy_, &stats_proxy_, - frame_count_observer, &stats_proxy_, &stats_proxy_, - &send_delay_stats_), + CreateObservers(nullptr, &encoder_feedback_, &stats_proxy_, + &stats_proxy_, &stats_proxy_, frame_count_observer, + &stats_proxy_, &stats_proxy_, &send_delay_stats_), &transport_controller_, &event_log_, &retransmission_rate_limiter_, std::make_unique(time_controller_.GetClock()), nullptr, CryptoOptions{}, frame_transformer); @@ -195,13 +191,18 @@ class RtpVideoSenderTestFixture { BitrateConstraints bitrate_config_; const FieldTrialBasedConfig field_trials_; RtpTransportControllerSend transport_controller_; - std::unique_ptr process_thread_; - // TODO(tommi): Use internal::CallStats. - CallStats call_stats_; SendStatisticsProxy stats_proxy_; RateLimiter retransmission_rate_limiter_; std::unique_ptr router_; }; + +BitrateAllocationUpdate CreateBitrateAllocationUpdate(int target_bitrate_bps) { + BitrateAllocationUpdate update; + update.target_bitrate = DataRate::BitsPerSec(target_bitrate_bps); + update.round_trip_time = TimeDelta::Zero(); + return update; +} + } // namespace TEST(RtpVideoSenderTest, SendOnOneModule) { @@ -213,24 +214,20 @@ TEST(RtpVideoSenderTest, SendOnOneModule) { encoded_image.SetEncodedData(EncodedImageBuffer::Create(&kPayload, 1)); RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {}); - EXPECT_NE( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_NE(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); test.router()->SetActive(true); - EXPECT_EQ( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_EQ(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); test.router()->SetActive(false); - EXPECT_NE( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_NE(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); test.router()->SetActive(true); - EXPECT_EQ( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_EQ(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); } TEST(RtpVideoSenderTest, SendSimulcastSetActive) { @@ -249,27 +246,19 @@ TEST(RtpVideoSenderTest, SendSimulcastSetActive) { test.router()->SetActive(true); EXPECT_EQ(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_1, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_1, &codec_info).error); EncodedImage encoded_image_2(encoded_image_1); encoded_image_2.SetSpatialIndex(1); EXPECT_EQ(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_2, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_2, &codec_info).error); // Inactive. test.router()->SetActive(false); EXPECT_NE(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_1, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_1, &codec_info).error); EXPECT_NE(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_2, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_2, &codec_info).error); } // Tests how setting individual rtp modules to active affects the overall @@ -297,9 +286,7 @@ TEST(RtpVideoSenderTest, SendSimulcastSetActiveModules) { std::vector active_modules({true, false}); test.router()->SetActiveModules(active_modules); EXPECT_EQ(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_1, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_1, &codec_info).error); // Setting both streams to inactive will turn the payload router to // inactive. @@ -308,13 +295,9 @@ TEST(RtpVideoSenderTest, SendSimulcastSetActiveModules) { // An incoming encoded image will not ask the module to send outgoing data // because the payload router is inactive. EXPECT_NE(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_1, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_1, &codec_info).error); EXPECT_NE(EncodedImageCallback::Result::OK, - test.router() - ->OnEncodedImage(encoded_image_1, &codec_info, nullptr) - .error); + test.router()->OnEncodedImage(encoded_image_1, &codec_info).error); } TEST(RtpVideoSenderTest, CreateWithNoPreviousStates) { @@ -381,9 +364,8 @@ TEST(RtpVideoSenderTest, FrameCountCallbacks) { // No callbacks when not active. EXPECT_CALL(callback, FrameCountUpdated).Times(0); - EXPECT_NE( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_NE(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); ::testing::Mock::VerifyAndClearExpectations(&callback); test.router()->SetActive(true); @@ -391,9 +373,8 @@ TEST(RtpVideoSenderTest, FrameCountCallbacks) { FrameCounts frame_counts; EXPECT_CALL(callback, FrameCountUpdated(_, kSsrc1)) .WillOnce(SaveArg<0>(&frame_counts)); - EXPECT_EQ( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_EQ(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); EXPECT_EQ(1, frame_counts.key_frames); EXPECT_EQ(0, frame_counts.delta_frames); @@ -403,9 +384,8 @@ TEST(RtpVideoSenderTest, FrameCountCallbacks) { encoded_image._frameType = VideoFrameType::kVideoFrameDelta; EXPECT_CALL(callback, FrameCountUpdated(_, kSsrc1)) .WillOnce(SaveArg<0>(&frame_counts)); - EXPECT_EQ( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_EQ(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); EXPECT_EQ(1, frame_counts.key_frames); EXPECT_EQ(1, frame_counts.delta_frames); @@ -440,14 +420,12 @@ TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) { transport_sequence_numbers.push_back(options.packet_id); return true; }); - EXPECT_EQ( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_EQ(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); encoded_image.SetTimestamp(2); encoded_image.capture_time_ms_ = 3; - EXPECT_EQ( - EncodedImageCallback::Result::OK, - test.router()->OnEncodedImage(encoded_image, nullptr, nullptr).error); + EXPECT_EQ(EncodedImageCallback::Result::OK, + test.router()->OnEncodedImage(encoded_image, nullptr).error); test.AdvanceTime(TimeDelta::Millis(33)); @@ -484,11 +462,13 @@ TEST(RtpVideoSenderTest, DoesNotRetrasmitAckedPackets) { lost_packet_feedback.rtp_sequence_number = rtp_sequence_numbers[0]; lost_packet_feedback.ssrc = kSsrc1; lost_packet_feedback.received = false; + lost_packet_feedback.is_retransmission = false; StreamFeedbackObserver::StreamPacketInfo received_packet_feedback; received_packet_feedback.rtp_sequence_number = rtp_sequence_numbers[1]; received_packet_feedback.ssrc = kSsrc1; received_packet_feedback.received = true; + lost_packet_feedback.is_retransmission = false; test.router()->OnPacketFeedbackVector( {lost_packet_feedback, received_packet_feedback}); @@ -612,9 +592,7 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) { EXPECT_EQ(rtp_packet.Ssrc(), kSsrc1); return true; }); - EXPECT_EQ(test.router() - ->OnEncodedImage(encoded_image, &codec_specific, nullptr) - .error, + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, EncodedImageCallback::Result::OK); test.AdvanceTime(TimeDelta::Millis(33)); @@ -634,9 +612,7 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) { EXPECT_EQ(rtp_packet.Ssrc(), kSsrc2); return true; }); - EXPECT_EQ(test.router() - ->OnEncodedImage(encoded_image, &codec_specific, nullptr) - .error, + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, EncodedImageCallback::Result::OK); test.AdvanceTime(TimeDelta::Millis(33)); @@ -664,11 +640,13 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) { first_packet_feedback.rtp_sequence_number = frame1_rtp_sequence_number; first_packet_feedback.ssrc = kSsrc1; first_packet_feedback.received = false; + first_packet_feedback.is_retransmission = false; StreamFeedbackObserver::StreamPacketInfo second_packet_feedback; second_packet_feedback.rtp_sequence_number = frame2_rtp_sequence_number; second_packet_feedback.ssrc = kSsrc2; second_packet_feedback.received = true; + first_packet_feedback.is_retransmission = false; test.router()->OnPacketFeedbackVector( {first_packet_feedback, second_packet_feedback}); @@ -678,8 +656,6 @@ TEST(RtpVideoSenderTest, EarlyRetransmits) { } TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) { - test::ScopedFieldTrials trials("WebRTC-GenericDescriptor/Enabled/"); - RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}); test.router()->SetActive(true); @@ -707,9 +683,9 @@ TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) { codec_specific.template_structure.emplace(); codec_specific.template_structure->num_decode_targets = 1; codec_specific.template_structure->templates = { - GenericFrameInfo::Builder().T(0).Dtis("S").Build(), - GenericFrameInfo::Builder().T(0).Dtis("S").Fdiffs({2}).Build(), - GenericFrameInfo::Builder().T(1).Dtis("D").Fdiffs({1}).Build(), + FrameDependencyTemplate().T(0).Dtis("S"), + FrameDependencyTemplate().T(0).Dtis("S").FrameDiffs({2}), + FrameDependencyTemplate().T(1).Dtis("D").FrameDiffs({1}), }; // Send two tiny images, mapping to single RTP packets. @@ -718,9 +694,7 @@ TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) { codec_specific.generic_frame_info = GenericFrameInfo::Builder().T(0).Dtis("S").Build(); codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}}; - EXPECT_EQ(test.router() - ->OnEncodedImage(encoded_image, &codec_specific, nullptr) - .error, + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, EncodedImageCallback::Result::OK); test.AdvanceTime(TimeDelta::Millis(33)); ASSERT_THAT(sent_packets, SizeIs(1)); @@ -733,9 +707,7 @@ TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) { codec_specific.generic_frame_info = GenericFrameInfo::Builder().T(1).Dtis("D").Build(); codec_specific.generic_frame_info->encoder_buffers = {{0, true, false}}; - EXPECT_EQ(test.router() - ->OnEncodedImage(encoded_image, &codec_specific, nullptr) - .error, + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, EncodedImageCallback::Result::OK); test.AdvanceTime(TimeDelta::Millis(33)); ASSERT_THAT(sent_packets, SizeIs(2)); @@ -743,9 +715,118 @@ TEST(RtpVideoSenderTest, SupportsDependencyDescriptor) { sent_packets.back().HasExtension()); } -TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) { - test::ScopedFieldTrials trials("WebRTC-GenericDescriptor/Enabled/"); +TEST(RtpVideoSenderTest, SupportsDependencyDescriptorForVp9) { + RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}); + test.router()->SetActive(true); + + RtpHeaderExtensionMap extensions; + extensions.Register( + kDependencyDescriptorExtensionId); + std::vector sent_packets; + ON_CALL(test.transport(), SendRtp) + .WillByDefault([&](const uint8_t* packet, size_t length, + const PacketOptions& options) { + sent_packets.emplace_back(&extensions); + EXPECT_TRUE(sent_packets.back().Parse(packet, length)); + return true; + }); + + const uint8_t kPayload[1] = {'a'}; + EncodedImage encoded_image; + encoded_image.SetTimestamp(1); + encoded_image.capture_time_ms_ = 2; + encoded_image._frameType = VideoFrameType::kVideoFrameKey; + encoded_image.SetEncodedData( + EncodedImageBuffer::Create(kPayload, sizeof(kPayload))); + CodecSpecificInfo codec_specific; + codec_specific.codecType = VideoCodecType::kVideoCodecVP9; + codec_specific.template_structure.emplace(); + codec_specific.template_structure->num_decode_targets = 2; + codec_specific.template_structure->templates = { + FrameDependencyTemplate().S(0).Dtis("SS"), + FrameDependencyTemplate().S(1).Dtis("-S").FrameDiffs({1}), + }; + + // Send two tiny images, each mapping to single RTP packet. + // Send in key frame for the base spatial layer. + codec_specific.generic_frame_info = + GenericFrameInfo::Builder().S(0).Dtis("SS").Build(); + codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}}; + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, + EncodedImageCallback::Result::OK); + // Send in 2nd spatial layer. + codec_specific.template_structure = absl::nullopt; + codec_specific.generic_frame_info = + GenericFrameInfo::Builder().S(1).Dtis("-S").Build(); + codec_specific.generic_frame_info->encoder_buffers = {{0, true, false}, + {1, false, true}}; + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, + EncodedImageCallback::Result::OK); + + test.AdvanceTime(TimeDelta::Millis(33)); + ASSERT_THAT(sent_packets, SizeIs(2)); + EXPECT_TRUE(sent_packets[0].HasExtension()); + EXPECT_TRUE(sent_packets[1].HasExtension()); +} + +TEST(RtpVideoSenderTest, + SupportsDependencyDescriptorForVp9NotProvidedByEncoder) { + test::ScopedFieldTrials field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}); + test.router()->SetActive(true); + + RtpHeaderExtensionMap extensions; + extensions.Register( + kDependencyDescriptorExtensionId); + std::vector sent_packets; + ON_CALL(test.transport(), SendRtp) + .WillByDefault([&](const uint8_t* packet, size_t length, + const PacketOptions& options) { + sent_packets.emplace_back(&extensions); + EXPECT_TRUE(sent_packets.back().Parse(packet, length)); + return true; + }); + + const uint8_t kPayload[1] = {'a'}; + EncodedImage encoded_image; + encoded_image.SetTimestamp(1); + encoded_image.capture_time_ms_ = 2; + encoded_image._frameType = VideoFrameType::kVideoFrameKey; + encoded_image._encodedWidth = 320; + encoded_image._encodedHeight = 180; + encoded_image.SetEncodedData( + EncodedImageBuffer::Create(kPayload, sizeof(kPayload))); + + CodecSpecificInfo codec_specific; + codec_specific.codecType = VideoCodecType::kVideoCodecVP9; + codec_specific.codecSpecific.VP9.num_spatial_layers = 1; + codec_specific.codecSpecific.VP9.temporal_idx = kNoTemporalIdx; + codec_specific.codecSpecific.VP9.first_frame_in_picture = true; + codec_specific.end_of_picture = true; + codec_specific.codecSpecific.VP9.inter_pic_predicted = false; + + // Send two tiny images, each mapping to single RTP packet. + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, + EncodedImageCallback::Result::OK); + + // Send in 2nd picture. + encoded_image._frameType = VideoFrameType::kVideoFrameDelta; + encoded_image.SetTimestamp(3000); + codec_specific.codecSpecific.VP9.inter_pic_predicted = true; + codec_specific.codecSpecific.VP9.num_ref_pics = 1; + codec_specific.codecSpecific.VP9.p_diff[0] = 1; + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, + EncodedImageCallback::Result::OK); + + test.AdvanceTime(TimeDelta::Millis(33)); + ASSERT_THAT(sent_packets, SizeIs(2)); + EXPECT_TRUE(sent_packets[0].HasExtension()); + EXPECT_TRUE(sent_packets[1].HasExtension()); +} + +TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) { RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}); test.router()->SetActive(true); @@ -773,9 +854,9 @@ TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) { codec_specific.template_structure.emplace(); codec_specific.template_structure->num_decode_targets = 1; codec_specific.template_structure->templates = { - GenericFrameInfo::Builder().T(0).Dtis("S").Build(), - GenericFrameInfo::Builder().T(0).Dtis("S").Fdiffs({2}).Build(), - GenericFrameInfo::Builder().T(1).Dtis("D").Fdiffs({1}).Build(), + FrameDependencyTemplate().T(0).Dtis("S"), + FrameDependencyTemplate().T(0).Dtis("S").FrameDiffs({2}), + FrameDependencyTemplate().T(1).Dtis("D").FrameDiffs({1}), }; // Send two tiny images, mapping to single RTP packets. @@ -784,9 +865,7 @@ TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) { codec_specific.generic_frame_info = GenericFrameInfo::Builder().T(0).Dtis("S").Build(); codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}}; - EXPECT_EQ(test.router() - ->OnEncodedImage(encoded_image, &codec_specific, nullptr) - .error, + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, EncodedImageCallback::Result::OK); test.AdvanceTime(TimeDelta::Millis(33)); ASSERT_THAT(sent_packets, SizeIs(1)); @@ -796,9 +875,7 @@ TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) { // Send in a new key frame without the support for the dependency descriptor. encoded_image._frameType = VideoFrameType::kVideoFrameKey; codec_specific.template_structure = absl::nullopt; - EXPECT_EQ(test.router() - ->OnEncodedImage(encoded_image, &codec_specific, nullptr) - .error, + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, EncodedImageCallback::Result::OK); test.AdvanceTime(TimeDelta::Millis(33)); ASSERT_THAT(sent_packets, SizeIs(2)); @@ -806,31 +883,73 @@ TEST(RtpVideoSenderTest, SupportsStoppingUsingDependencyDescriptor) { sent_packets.back().HasExtension()); } -TEST(RtpVideoSenderTest, CanSetZeroBitrateWithOverhead) { - test::ScopedFieldTrials trials("WebRTC-SendSideBwe-WithOverhead/Enabled/"); - RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {}); - BitrateAllocationUpdate update; - update.target_bitrate = DataRate::Zero(); - update.packet_loss_ratio = 0; - update.round_trip_time = TimeDelta::Zero(); +TEST(RtpVideoSenderTest, + SupportsStoppingUsingDependencyDescriptorForVp8Simulcast) { + RtpVideoSenderTestFixture test({kSsrc1, kSsrc2}, {}, kPayloadType, {}); + test.router()->SetActive(true); - test.router()->OnBitrateUpdated(update, /*framerate*/ 0); -} + RtpHeaderExtensionMap extensions; + extensions.Register( + kDependencyDescriptorExtensionId); + std::vector sent_packets; + ON_CALL(test.transport(), SendRtp) + .WillByDefault([&](const uint8_t* packet, size_t length, + const PacketOptions& options) { + sent_packets.emplace_back(&extensions); + EXPECT_TRUE(sent_packets.back().Parse(packet, length)); + return true; + }); -TEST(RtpVideoSenderTest, CanSetZeroBitrateWithoutOverhead) { - RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {}); + const uint8_t kPayload[1] = {'a'}; + EncodedImage encoded_image; + encoded_image.SetTimestamp(1); + encoded_image.capture_time_ms_ = 2; + encoded_image.SetEncodedData( + EncodedImageBuffer::Create(kPayload, sizeof(kPayload))); + // VP8 simulcast uses spatial index to communicate simulcast stream. + encoded_image.SetSpatialIndex(1); - BitrateAllocationUpdate update; - update.target_bitrate = DataRate::Zero(); - update.packet_loss_ratio = 0; - update.round_trip_time = TimeDelta::Zero(); + CodecSpecificInfo codec_specific; + codec_specific.codecType = VideoCodecType::kVideoCodecVP8; + codec_specific.template_structure.emplace(); + codec_specific.template_structure->num_decode_targets = 1; + codec_specific.template_structure->templates = { + FrameDependencyTemplate().T(0).Dtis("S")}; - test.router()->OnBitrateUpdated(update, /*framerate*/ 0); + // Send two tiny images, mapping to single RTP packets. + // Send in a key frame. + encoded_image._frameType = VideoFrameType::kVideoFrameKey; + codec_specific.generic_frame_info = + GenericFrameInfo::Builder().T(0).Dtis("S").Build(); + codec_specific.generic_frame_info->encoder_buffers = {{0, false, true}}; + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, + EncodedImageCallback::Result::OK); + test.AdvanceTime(TimeDelta::Millis(33)); + ASSERT_THAT(sent_packets, SizeIs(1)); + EXPECT_TRUE( + sent_packets.back().HasExtension()); + + // Send in a new key frame without the support for the dependency descriptor. + encoded_image._frameType = VideoFrameType::kVideoFrameKey; + codec_specific.template_structure = absl::nullopt; + codec_specific.generic_frame_info = absl::nullopt; + EXPECT_EQ(test.router()->OnEncodedImage(encoded_image, &codec_specific).error, + EncodedImageCallback::Result::OK); + test.AdvanceTime(TimeDelta::Millis(33)); + ASSERT_THAT(sent_packets, SizeIs(2)); + EXPECT_FALSE( + sent_packets.back().HasExtension()); +} + +TEST(RtpVideoSenderTest, CanSetZeroBitrate) { + RtpVideoSenderTestFixture test({kSsrc1}, {kRtxSsrc1}, kPayloadType, {}); + test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(0), + /*framerate*/ 0); } TEST(RtpVideoSenderTest, SimulcastSenderRegistersFrameTransformers) { rtc::scoped_refptr transformer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); EXPECT_CALL(*transformer, RegisterTransformedFrameSinkCallback(_, kSsrc1)); EXPECT_CALL(*transformer, RegisterTransformedFrameSinkCallback(_, kSsrc2)); @@ -840,4 +959,41 @@ TEST(RtpVideoSenderTest, SimulcastSenderRegistersFrameTransformers) { EXPECT_CALL(*transformer, UnregisterTransformedFrameSinkCallback(kSsrc1)); EXPECT_CALL(*transformer, UnregisterTransformedFrameSinkCallback(kSsrc2)); } + +TEST(RtpVideoSenderTest, OverheadIsSubtractedFromTargetBitrate) { + test::ScopedFieldTrials field_trials( + "WebRTC-Video-UseFrameRateForOverhead/Enabled/"); + + // TODO(jakobi): RTP header size should not be hard coded. + constexpr uint32_t kRtpHeaderSizeBytes = 20; + constexpr uint32_t kTransportPacketOverheadBytes = 40; + constexpr uint32_t kOverheadPerPacketBytes = + kRtpHeaderSizeBytes + kTransportPacketOverheadBytes; + RtpVideoSenderTestFixture test({kSsrc1}, {}, kPayloadType, {}); + test.router()->OnTransportOverheadChanged(kTransportPacketOverheadBytes); + test.router()->SetActive(true); + + { + test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(300000), + /*framerate*/ 15); + // 1 packet per frame. + EXPECT_EQ(test.router()->GetPayloadBitrateBps(), + 300000 - kOverheadPerPacketBytes * 8 * 30); + } + { + test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(150000), + /*framerate*/ 15); + // 1 packet per frame. + EXPECT_EQ(test.router()->GetPayloadBitrateBps(), + 150000 - kOverheadPerPacketBytes * 8 * 15); + } + { + test.router()->OnBitrateUpdated(CreateBitrateAllocationUpdate(1000000), + /*framerate*/ 30); + // 3 packets per frame. + EXPECT_EQ(test.router()->GetPayloadBitrateBps(), + 1000000 - kOverheadPerPacketBytes * 8 * 30 * 3); + } +} + } // namespace webrtc diff --git a/call/rtx_receive_stream.cc b/call/rtx_receive_stream.cc index 9e4a41bc8f..c0b138b416 100644 --- a/call/rtx_receive_stream.cc +++ b/call/rtx_receive_stream.cc @@ -64,7 +64,7 @@ void RtxReceiveStream::OnRtpPacket(const RtpPacketReceived& rtx_packet) { media_packet.SetSequenceNumber((payload[0] << 8) + payload[1]); media_packet.SetPayloadType(it->second); media_packet.set_recovered(true); - media_packet.set_arrival_time_ms(rtx_packet.arrival_time_ms()); + media_packet.set_arrival_time(rtx_packet.arrival_time()); // Skip the RTX header. rtc::ArrayView rtx_payload = payload.subview(kRtxHeaderSize); diff --git a/call/rtx_receive_stream_unittest.cc b/call/rtx_receive_stream_unittest.cc index 75086fef9c..b06990820f 100644 --- a/call/rtx_receive_stream_unittest.cc +++ b/call/rtx_receive_stream_unittest.cc @@ -194,9 +194,9 @@ TEST(RtxReceiveStreamTest, PropagatesArrivalTime) { RtxReceiveStream rtx_sink(&media_sink, PayloadTypeMapping(), kMediaSSRC); RtpPacketReceived rtx_packet(nullptr); EXPECT_TRUE(rtx_packet.Parse(rtc::ArrayView(kRtxPacket))); - rtx_packet.set_arrival_time_ms(123); - EXPECT_CALL(media_sink, - OnRtpPacket(Property(&RtpPacketReceived::arrival_time_ms, 123))); + rtx_packet.set_arrival_time(Timestamp::Millis(123)); + EXPECT_CALL(media_sink, OnRtpPacket(Property(&RtpPacketReceived::arrival_time, + Timestamp::Millis(123)))); rtx_sink.OnRtpPacket(rtx_packet); } diff --git a/call/simulated_network.cc b/call/simulated_network.cc index b298fdb4e2..f8a5bd893d 100644 --- a/call/simulated_network.cc +++ b/call/simulated_network.cc @@ -77,6 +77,7 @@ bool CoDelSimulation::DropDequeuedPacket(Timestamp now, } return false; } + RTC_CHECK_NOTREACHED(); } SimulatedNetwork::SimulatedNetwork(Config config, uint64_t random_seed) @@ -87,7 +88,7 @@ SimulatedNetwork::SimulatedNetwork(Config config, uint64_t random_seed) SimulatedNetwork::~SimulatedNetwork() = default; void SimulatedNetwork::SetConfig(const Config& config) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); config_state_.config = config; // Shallow copy of the struct. double prob_loss = config.loss_percent / 100.0; if (config_state_.config.avg_burst_loss_length == -1) { @@ -113,12 +114,12 @@ void SimulatedNetwork::SetConfig(const Config& config) { void SimulatedNetwork::UpdateConfig( std::function config_modifier) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); config_modifier(&config_state_.config); } void SimulatedNetwork::PauseTransmissionUntil(int64_t until_us) { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); config_state_.pause_transmission_until_us = until_us; } @@ -260,7 +261,7 @@ void SimulatedNetwork::UpdateCapacityQueue(ConfigState state, } SimulatedNetwork::ConfigState SimulatedNetwork::GetConfigState() const { - rtc::CritScope crit(&config_lock_); + MutexLock lock(&config_lock_); return config_state_; } diff --git a/call/simulated_network.h b/call/simulated_network.h index 2ff90ec284..68d066cb82 100644 --- a/call/simulated_network.h +++ b/call/simulated_network.h @@ -17,14 +17,14 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/test/simulated_network.h" #include "api/units/data_size.h" #include "api/units/timestamp.h" -#include "rtc_base/critical_section.h" #include "rtc_base/race_checker.h" #include "rtc_base/random.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" namespace webrtc { // Implementation of the CoDel active queue management algorithm. Loosely based @@ -96,7 +96,7 @@ class SimulatedNetwork : public SimulatedNetworkInterface { RTC_RUN_ON(&process_checker_); ConfigState GetConfigState() const; - rtc::CriticalSection config_lock_; + mutable Mutex config_lock_; // |process_checker_| guards the data structures involved in delay and loss // processes, such as the packet queues. diff --git a/call/ssrc_binding_observer.h b/call/ssrc_binding_observer.h deleted file mode 100644 index ada505610f..0000000000 --- a/call/ssrc_binding_observer.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef CALL_SSRC_BINDING_OBSERVER_H_ -#define CALL_SSRC_BINDING_OBSERVER_H_ - -#include - -namespace webrtc { - -// With newer versions of SDP, SSRC is often not explicitly signaled and must -// be learned on the fly. This happens by correlating packet SSRCs with included -// RTP extension headers like MID and RSID, or by receiving information from -// RTCP messages. -// SsrcBindingObservers will be notified when a new binding is learned, which -// can happen during call setup and/or during the call. -class SsrcBindingObserver { - public: - virtual ~SsrcBindingObserver() = default; - - virtual void OnSsrcBoundToRsid(const std::string& rsid, uint32_t ssrc) {} - - virtual void OnSsrcBoundToMid(const std::string& mid, uint32_t ssrc) {} - - virtual void OnSsrcBoundToMidRsid(const std::string& mid, - const std::string& rsid, - uint32_t ssrc) {} - - virtual void OnSsrcBoundToPayloadType(uint8_t payload_type, uint32_t ssrc) {} -}; - -} // namespace webrtc - -#endif // CALL_SSRC_BINDING_OBSERVER_H_ diff --git a/call/syncable.h b/call/syncable.h index 3bbe50c8d1..43b16a0720 100644 --- a/call/syncable.h +++ b/call/syncable.h @@ -37,7 +37,7 @@ class Syncable { virtual absl::optional GetInfo() const = 0; virtual bool GetPlayoutRtpTimestamp(uint32_t* rtp_timestamp, int64_t* time_ms) const = 0; - virtual void SetMinimumPlayoutDelay(int delay_ms) = 0; + virtual bool SetMinimumPlayoutDelay(int delay_ms) = 0; virtual void SetEstimatedPlayoutNtpTimestampMs(int64_t ntp_timestamp_ms, int64_t time_ms) = 0; }; diff --git a/call/test/mock_rtp_transport_controller_send.h b/call/test/mock_rtp_transport_controller_send.h index 308c087a40..b468aa6cb2 100644 --- a/call/test/mock_rtp_transport_controller_send.h +++ b/call/test/mock_rtp_transport_controller_send.h @@ -99,6 +99,7 @@ class MockRtpTransportControllerSend MOCK_METHOD(void, AccountForAudioPacketsInPacedSender, (bool), (override)); MOCK_METHOD(void, IncludeOverheadInPacedSender, (), (override)); MOCK_METHOD(void, OnReceivedPacket, (const ReceivedPacket&), (override)); + MOCK_METHOD(void, EnsureStarted, (), (override)); }; } // namespace webrtc #endif // CALL_TEST_MOCK_RTP_TRANSPORT_CONTROLLER_SEND_H_ diff --git a/call/version.cc b/call/version.cc new file mode 100644 index 0000000000..a76af47b41 --- /dev/null +++ b/call/version.cc @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "call/version.h" + +namespace webrtc { + +// The timestamp is always in UTC. +const char* const kSourceTimestamp = "WebRTC source stamp 2021-07-13T04:01:55"; + +void LoadWebRTCVersionInRegister() { + // Using volatile to instruct the compiler to not optimize `p` away even + // if it looks unused. + const char* volatile p = kSourceTimestamp; + static_cast(p); +} + +} // namespace webrtc diff --git a/call/version.h b/call/version.h new file mode 100644 index 0000000000..d476e0e108 --- /dev/null +++ b/call/version.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef CALL_VERSION_H_ +#define CALL_VERSION_H_ + +// LoadWebRTCVersionInRegistry is a helper function that loads the pointer to +// the WebRTC version string into a register. While this function doesn't do +// anything useful, it is needed in order to avoid that compiler optimizations +// remove the WebRTC version string from the final binary. + +namespace webrtc { + +void LoadWebRTCVersionInRegister(); + +} // namespace webrtc + +#endif // CALL_VERSION_H_ diff --git a/call/video_receive_stream.cc b/call/video_receive_stream.cc index e0f3de366b..d0518b6e0d 100644 --- a/call/video_receive_stream.cc +++ b/call/video_receive_stream.cc @@ -14,10 +14,18 @@ namespace webrtc { +VideoReceiveStream::Decoder::Decoder(SdpVideoFormat video_format, + int payload_type) + : video_format(std::move(video_format)), payload_type(payload_type) {} VideoReceiveStream::Decoder::Decoder() : video_format("Unset") {} VideoReceiveStream::Decoder::Decoder(const Decoder&) = default; VideoReceiveStream::Decoder::~Decoder() = default; +bool VideoReceiveStream::Decoder::operator==(const Decoder& other) const { + return payload_type == other.payload_type && + video_format == other.video_format; +} + std::string VideoReceiveStream::Decoder::ToString() const { char buf[1024]; rtc::SimpleStringBuilder ss(buf); @@ -74,8 +82,10 @@ std::string VideoReceiveStream::Stats::ToString(int64_t time_ms) const { VideoReceiveStream::Config::Config(const Config&) = default; VideoReceiveStream::Config::Config(Config&&) = default; -VideoReceiveStream::Config::Config(Transport* rtcp_send_transport) - : rtcp_send_transport(rtcp_send_transport) {} +VideoReceiveStream::Config::Config(Transport* rtcp_send_transport, + VideoDecoderFactory* decoder_factory) + : decoder_factory(decoder_factory), + rtcp_send_transport(rtcp_send_transport) {} VideoReceiveStream::Config& VideoReceiveStream::Config::operator=(Config&&) = default; diff --git a/call/video_receive_stream.h b/call/video_receive_stream.h index 388c28be24..86e5052151 100644 --- a/call/video_receive_stream.h +++ b/call/video_receive_stream.h @@ -20,18 +20,17 @@ #include "api/call/transport.h" #include "api/crypto/crypto_options.h" -#include "api/crypto/frame_decryptor_interface.h" -#include "api/frame_transformer_interface.h" #include "api/rtp_headers.h" #include "api/rtp_parameters.h" -#include "api/transport/rtp/rtp_source.h" #include "api/video/recordable_encoded_frame.h" #include "api/video/video_content_type.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_timing.h" #include "api/video_codecs/sdp_video_format.h" +#include "call/receive_stream.h" #include "call/rtp_config.h" +#include "common_video/frame_counts.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" @@ -40,7 +39,7 @@ namespace webrtc { class RtpPacketSinkInterface; class VideoDecoderFactory; -class VideoReceiveStream { +class VideoReceiveStream : public MediaReceiveStream { public: // Class for handling moving in/out recording state. struct RecordingState { @@ -52,11 +51,6 @@ class VideoReceiveStream { // Callback stored from the VideoReceiveStream. The VideoReceiveStream // client should not interpret the attribute. std::function callback; - // Memento of internal state in VideoReceiveStream, recording wether - // we're currently causing generation of a keyframe from the sender. Needed - // to avoid sending double keyframe requests. The VideoReceiveStream client - // should not interpret the attribute. - bool keyframe_needed = false; // Memento of when a keyframe request was last sent. The VideoReceiveStream // client should not interpret the attribute. absl::optional last_keyframe_request_ms; @@ -65,15 +59,15 @@ class VideoReceiveStream { // TODO(mflodman) Move all these settings to VideoDecoder and move the // declaration to common_types.h. struct Decoder { + Decoder(SdpVideoFormat video_format, int payload_type); Decoder(); Decoder(const Decoder&); ~Decoder(); + + bool operator==(const Decoder& other) const; + std::string ToString() const; - // Ownership stays with WebrtcVideoEngine (delegated from PeerConnection). - // TODO(nisse): Move one level out, to VideoReceiveStream::Config, and later - // to the configuration of VideoStreamDecoder. - VideoDecoderFactory* decoder_factory = nullptr; SdpVideoFormat video_format; // Received RTP packets with this payload type will be sent to this decoder @@ -160,7 +154,8 @@ class VideoReceiveStream { public: Config() = delete; Config(Config&&); - explicit Config(Transport* rtcp_send_transport); + Config(Transport* rtcp_send_transport, + VideoDecoderFactory* decoder_factory = nullptr); Config& operator=(Config&&); Config& operator=(const Config&) = delete; ~Config(); @@ -173,18 +168,18 @@ class VideoReceiveStream { // Decoders for every payload that we can receive. std::vector decoders; + // Ownership stays with WebrtcVideoEngine (delegated from PeerConnection). + VideoDecoderFactory* decoder_factory = nullptr; + // Receive-stream specific RTP settings. - struct Rtp { + struct Rtp : public RtpConfig { Rtp(); Rtp(const Rtp&); ~Rtp(); std::string ToString() const; - // Synchronization source (stream identifier) to be received. - uint32_t remote_ssrc = 0; - - // Sender SSRC used for sending RTCP (such as receiver reports). - uint32_t local_ssrc = 0; + // See NackConfig for description. + NackConfig nack; // See RtcpMode for description. RtcpMode rtcp_mode = RtcpMode::kCompound; @@ -196,15 +191,9 @@ class VideoReceiveStream { bool receiver_reference_time_report = false; } rtcp_xr; - // See draft-holmer-rmcat-transport-wide-cc-extensions for details. - bool transport_cc = false; - // See LntfConfig for description. LntfConfig lntf; - // See NackConfig for description. - NackConfig nack; - // Payload types for ULPFEC and RED, respectively. int ulpfec_payload_type = -1; int red_payload_type = -1; @@ -215,6 +204,10 @@ class VideoReceiveStream { // Set if the stream is protected using FlexFEC. bool protected_by_flexfec = false; + // Optional callback sink to support additional packet handlsers such as + // FlexFec. + RtpPacketSinkInterface* packet_sink_ = nullptr; + // Map from rtx payload type -> media payload type. // For RTX to be enabled, both an SSRC and this mapping are needed. std::map rtx_associated_payload_types; @@ -224,9 +217,6 @@ class VideoReceiveStream { // meta data is expected to be present in generic frame descriptor // RTP header extension). std::set raw_payload_types; - - // RTP header extensions used for the received stream. - std::vector extensions; } rtp; // Transport for outgoing packets (RTCP). @@ -252,10 +242,6 @@ class VideoReceiveStream { // used for streaming instead of a real-time call. int target_delay_ms = 0; - // TODO(nisse): Used with VideoDecoderFactory::LegacyCreateVideoDecoder. - // Delete when that method is retired. - std::string stream_id; - // An optional custom frame decryptor that allows the entire frame to be // decrypted in whatever way the caller choses. This is not required by // default. @@ -267,25 +253,9 @@ class VideoReceiveStream { rtc::scoped_refptr frame_transformer; }; - // Starts stream activity. - // When a stream is active, it can receive, process and deliver packets. - virtual void Start() = 0; - // Stops stream activity. - // When a stream is stopped, it can't receive, process or deliver packets. - virtual void Stop() = 0; - // TODO(pbos): Add info on currently-received codec to Stats. virtual Stats GetStats() const = 0; - // RtpDemuxer only forwards a given RTP packet to one sink. However, some - // sinks, such as FlexFEC, might wish to be informed of all of the packets - // a given sink receives (or any set of sinks). They may do so by registering - // themselves as secondary sinks. - virtual void AddSecondarySink(RtpPacketSinkInterface* sink) = 0; - virtual void RemoveSecondarySink(const RtpPacketSinkInterface* sink) = 0; - - virtual std::vector GetSources() const = 0; - // Sets a base minimum for the playout delay. Base minimum delay sets lower // bound on minimum delay value determining lower bound on playout delay. // @@ -295,16 +265,6 @@ class VideoReceiveStream { // Returns current value of base minimum delay in milliseconds. virtual int GetBaseMinimumPlayoutDelayMs() const = 0; - // Allows a FrameDecryptor to be attached to a VideoReceiveStream after - // creation without resetting the decoder state. - virtual void SetFrameDecryptor( - rtc::scoped_refptr frame_decryptor) = 0; - - // Allows a frame transformer to be attached to a VideoReceiveStream after - // creation without resetting the decoder state. - virtual void SetDepacketizerToDecoderFrameTransformer( - rtc::scoped_refptr frame_transformer) = 0; - // Sets and returns recording state. The old state is moved out // of the video receive stream and returned to the caller, and |state| // is moved in. If the state's callback is set, it will be called with @@ -324,6 +284,16 @@ class VideoReceiveStream { virtual ~VideoReceiveStream() {} }; +class DEPRECATED_VideoReceiveStream : public VideoReceiveStream { + public: + // RtpDemuxer only forwards a given RTP packet to one sink. However, some + // sinks, such as FlexFEC, might wish to be informed of all of the packets + // a given sink receives (or any set of sinks). They may do so by registering + // themselves as secondary sinks. + virtual void AddSecondarySink(RtpPacketSinkInterface* sink) = 0; + virtual void RemoveSecondarySink(const RtpPacketSinkInterface* sink) = 0; +}; + } // namespace webrtc #endif // CALL_VIDEO_RECEIVE_STREAM_H_ diff --git a/call/video_send_stream.cc b/call/video_send_stream.cc index a4b6744918..25513e4e4c 100644 --- a/call/video_send_stream.cc +++ b/call/video_send_stream.cc @@ -28,6 +28,7 @@ const char* StreamTypeToString(VideoSendStream::StreamStats::StreamType type) { case VideoSendStream::StreamStats::StreamType::kFlexfec: return "flexfec"; } + RTC_CHECK_NOTREACHED(); } } // namespace @@ -50,8 +51,13 @@ std::string VideoSendStream::StreamStats::ToString() const { ss << "retransmit_bps: " << retransmit_bitrate_bps << ", "; ss << "avg_delay_ms: " << avg_delay_ms << ", "; ss << "max_delay_ms: " << max_delay_ms << ", "; - ss << "cum_loss: " << rtcp_stats.packets_lost << ", "; - ss << "max_ext_seq: " << rtcp_stats.extended_highest_sequence_number << ", "; + if (report_block_data) { + ss << "cum_loss: " << report_block_data->report_block().packets_lost + << ", "; + ss << "max_ext_seq: " + << report_block_data->report_block().extended_highest_sequence_number + << ", "; + } ss << "nack: " << rtcp_packet_type_counts.nack_packets << ", "; ss << "fir: " << rtcp_packet_type_counts.fir_packets << ", "; ss << "pli: " << rtcp_packet_type_counts.pli_packets; diff --git a/call/video_send_stream.h b/call/video_send_stream.h index 392c955f47..42e6249fcd 100644 --- a/call/video_send_stream.h +++ b/call/video_send_stream.h @@ -18,10 +18,12 @@ #include #include "absl/types/optional.h" +#include "api/adaptation/resource.h" #include "api/call/transport.h" #include "api/crypto/crypto_options.h" #include "api/frame_transformer_interface.h" #include "api/rtp_parameters.h" +#include "api/scoped_refptr.h" #include "api/video/video_content_type.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" @@ -29,6 +31,7 @@ #include "api/video/video_stream_encoder_settings.h" #include "api/video_codecs/video_encoder_config.h" #include "call/rtp_config.h" +#include "common_video/frame_counts.h" #include "common_video/include/quality_limitation_reason.h" #include "modules/rtp_rtcp/include/report_block_data.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" @@ -79,7 +82,6 @@ class VideoSendStream { uint64_t total_packet_send_delay_ms = 0; StreamDataCounters rtp_stats; RtcpPacketTypeCounter rtcp_packet_type_counts; - RtcpStatistics rtcp_stats; // A snapshot of the most recent Report Block with additional data of // interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats. absl::optional report_block_data; @@ -105,6 +107,7 @@ class VideoSendStream { uint64_t total_encode_time_ms = 0; // https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodedbytestarget uint64_t total_encoded_bytes_target = 0; + uint32_t frames = 0; uint32_t frames_dropped_by_capturer = 0; uint32_t frames_dropped_by_encoder_queue = 0; uint32_t frames_dropped_by_rate_limiter = 0; @@ -215,6 +218,24 @@ class VideoSendStream { // When a stream is stopped, it can't receive, process or deliver packets. virtual void Stop() = 0; + // Accessor for determining if the stream is active. This is an inexpensive + // call that must be made on the same thread as `Start()` and `Stop()` methods + // are called on and will return `true` iff activity has been started either + // via `Start()` or `UpdateActiveSimulcastLayers()`. If activity is either + // stopped or is in the process of being stopped as a result of a call to + // either `Stop()` or `UpdateActiveSimulcastLayers()` where all layers were + // deactivated, the return value will be `false`. + virtual bool started() = 0; + + // If the resource is overusing, the VideoSendStream will try to reduce + // resolution or frame rate until no resource is overusing. + // TODO(https://crbug.com/webrtc/11565): When the ResourceAdaptationProcessor + // is moved to Call this method could be deleted altogether in favor of + // Call-level APIs only. + virtual void AddAdaptationResource(rtc::scoped_refptr resource) = 0; + virtual std::vector> + GetAdaptationResources() = 0; + virtual void SetSource( rtc::VideoSourceInterface* source, const DegradationPreference& degradation_preference) = 0; diff --git a/common_audio/BUILD.gn b/common_audio/BUILD.gn index 72eed1f003..5b1e581410 100644 --- a/common_audio/BUILD.gn +++ b/common_audio/BUILD.gn @@ -54,10 +54,9 @@ rtc_library("common_audio") { "../rtc_base/system:arch", "../rtc_base/system:file_wrapper", "../system_wrappers", - "../system_wrappers:cpu_features_api", "third_party/ooura:fft_size_256", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] defines = [] @@ -67,6 +66,7 @@ rtc_library("common_audio") { if (current_cpu == "x86" || current_cpu == "x64") { deps += [ ":common_audio_sse2" ] + deps += [ ":common_audio_avx2" ] } } @@ -184,7 +184,6 @@ rtc_library("common_audio_c") { "../rtc_base:sanitizer", "../rtc_base/system:arch", "../system_wrappers", - "../system_wrappers:cpu_features_api", "third_party/ooura:fft_size_256", "third_party/spl_sqrt_floor", ] @@ -231,10 +230,11 @@ rtc_library("fir_filter_factory") { "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../rtc_base/system:arch", - "../system_wrappers:cpu_features_api", + "../system_wrappers", ] if (current_cpu == "x86" || current_cpu == "x64") { deps += [ ":common_audio_sse2" ] + deps += [ ":common_audio_avx2" ] } if (rtc_build_with_neon) { deps += [ ":common_audio_neon" ] @@ -261,6 +261,31 @@ if (current_cpu == "x86" || current_cpu == "x64") { "../rtc_base/memory:aligned_malloc", ] } + + rtc_library("common_audio_avx2") { + sources = [ + "fir_filter_avx2.cc", + "fir_filter_avx2.h", + "resampler/sinc_resampler_avx2.cc", + ] + + if (is_win) { + cflags = [ "/arch:AVX2" ] + } else { + cflags = [ + "-mavx2", + "-mfma", + ] + } + + deps = [ + ":fir_filter", + ":sinc_resampler", + "../rtc_base:checks", + "../rtc_base:rtc_base_approved", + "../rtc_base/memory:aligned_malloc", + ] + } } if (rtc_build_with_neon) { @@ -310,7 +335,7 @@ if (rtc_build_with_neon) { } } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_test("common_audio_unittests") { visibility += webrtc_default_visibility testonly = true @@ -356,7 +381,7 @@ if (rtc_include_tests) { "../rtc_base:rtc_base_approved", "../rtc_base:rtc_base_tests_utils", "../rtc_base/system:arch", - "../system_wrappers:cpu_features_api", + "../system_wrappers", "../test:fileutils", "../test:rtc_expect_death", "../test:test_main", diff --git a/common_audio/OWNERS b/common_audio/OWNERS index 7c9c9af12a..4cb53169b3 100644 --- a/common_audio/OWNERS +++ b/common_audio/OWNERS @@ -1,2 +1,3 @@ henrik.lundin@webrtc.org -kwiberg@webrtc.org +minyue@webrtc.org +peah@webrtc.org diff --git a/common_audio/fir_filter_avx2.cc b/common_audio/fir_filter_avx2.cc new file mode 100644 index 0000000000..26468e2981 --- /dev/null +++ b/common_audio/fir_filter_avx2.cc @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "common_audio/fir_filter_avx2.h" + +#include +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/memory/aligned_malloc.h" + +namespace webrtc { + +FIRFilterAVX2::FIRFilterAVX2(const float* unaligned_coefficients, + size_t unaligned_coefficients_length, + size_t max_input_length) + : // Closest higher multiple of eight. + coefficients_length_((unaligned_coefficients_length + 7) & ~0x07), + state_length_(coefficients_length_ - 1), + coefficients_(static_cast( + AlignedMalloc(sizeof(float) * coefficients_length_, 32))), + state_(static_cast( + AlignedMalloc(sizeof(float) * (max_input_length + state_length_), + 32))) { + // Add zeros at the end of the coefficients. + RTC_DCHECK_GE(coefficients_length_, unaligned_coefficients_length); + size_t padding = coefficients_length_ - unaligned_coefficients_length; + memset(coefficients_.get(), 0, padding * sizeof(coefficients_[0])); + // The coefficients are reversed to compensate for the order in which the + // input samples are acquired (most recent last). + for (size_t i = 0; i < unaligned_coefficients_length; ++i) { + coefficients_[i + padding] = + unaligned_coefficients[unaligned_coefficients_length - i - 1]; + } + memset(state_.get(), 0, + (max_input_length + state_length_) * sizeof(state_[0])); +} + +FIRFilterAVX2::~FIRFilterAVX2() = default; + +void FIRFilterAVX2::Filter(const float* in, size_t length, float* out) { + RTC_DCHECK_GT(length, 0); + + memcpy(&state_[state_length_], in, length * sizeof(*in)); + + // Convolves the input signal |in| with the filter kernel |coefficients_| + // taking into account the previous state. + for (size_t i = 0; i < length; ++i) { + float* in_ptr = &state_[i]; + float* coef_ptr = coefficients_.get(); + + __m256 m_sum = _mm256_setzero_ps(); + __m256 m_in; + + // Depending on if the pointer is aligned with 32 bytes or not it is loaded + // differently. + if (reinterpret_cast(in_ptr) & 0x1F) { + for (size_t j = 0; j < coefficients_length_; j += 8) { + m_in = _mm256_loadu_ps(in_ptr + j); + m_sum = _mm256_fmadd_ps(m_in, _mm256_load_ps(coef_ptr + j), m_sum); + } + } else { + for (size_t j = 0; j < coefficients_length_; j += 8) { + m_in = _mm256_load_ps(in_ptr + j); + m_sum = _mm256_fmadd_ps(m_in, _mm256_load_ps(coef_ptr + j), m_sum); + } + } + __m128 m128_sum = _mm_add_ps(_mm256_extractf128_ps(m_sum, 0), + _mm256_extractf128_ps(m_sum, 1)); + m128_sum = _mm_add_ps(_mm_movehl_ps(m128_sum, m128_sum), m128_sum); + _mm_store_ss(out + i, + _mm_add_ss(m128_sum, _mm_shuffle_ps(m128_sum, m128_sum, 1))); + } + + // Update current state. + memmove(state_.get(), &state_[length], state_length_ * sizeof(state_[0])); +} + +} // namespace webrtc diff --git a/common_audio/fir_filter_avx2.h b/common_audio/fir_filter_avx2.h new file mode 100644 index 0000000000..893b60bf6e --- /dev/null +++ b/common_audio/fir_filter_avx2.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_AUDIO_FIR_FILTER_AVX2_H_ +#define COMMON_AUDIO_FIR_FILTER_AVX2_H_ + +#include + +#include + +#include "common_audio/fir_filter.h" +#include "rtc_base/memory/aligned_malloc.h" + +namespace webrtc { + +class FIRFilterAVX2 : public FIRFilter { + public: + FIRFilterAVX2(const float* coefficients, + size_t coefficients_length, + size_t max_input_length); + ~FIRFilterAVX2() override; + + void Filter(const float* in, size_t length, float* out) override; + + private: + const size_t coefficients_length_; + const size_t state_length_; + std::unique_ptr coefficients_; + std::unique_ptr state_; +}; + +} // namespace webrtc + +#endif // COMMON_AUDIO_FIR_FILTER_AVX2_H_ diff --git a/common_audio/fir_filter_factory.cc b/common_audio/fir_filter_factory.cc index 19528e312e..4bcf05245f 100644 --- a/common_audio/fir_filter_factory.cc +++ b/common_audio/fir_filter_factory.cc @@ -17,6 +17,7 @@ #if defined(WEBRTC_HAS_NEON) #include "common_audio/fir_filter_neon.h" #elif defined(WEBRTC_ARCH_X86_FAMILY) +#include "common_audio/fir_filter_avx2.h" #include "common_audio/fir_filter_sse.h" #include "system_wrappers/include/cpu_features_wrapper.h" // kSSE2, WebRtc_G... #endif @@ -34,18 +35,16 @@ FIRFilter* CreateFirFilter(const float* coefficients, FIRFilter* filter = nullptr; // If we know the minimum architecture at compile time, avoid CPU detection. #if defined(WEBRTC_ARCH_X86_FAMILY) -#if defined(__SSE2__) - filter = - new FIRFilterSSE2(coefficients, coefficients_length, max_input_length); -#else // x86 CPU detection required. - if (WebRtc_GetCPUInfo(kSSE2)) { + if (GetCPUInfo(kAVX2)) { + filter = + new FIRFilterAVX2(coefficients, coefficients_length, max_input_length); + } else if (GetCPUInfo(kSSE2)) { filter = new FIRFilterSSE2(coefficients, coefficients_length, max_input_length); } else { filter = new FIRFilterC(coefficients, coefficients_length); } -#endif #elif defined(WEBRTC_HAS_NEON) filter = new FIRFilterNEON(coefficients, coefficients_length, max_input_length); diff --git a/common_audio/resampler/include/resampler.h b/common_audio/resampler/include/resampler.h index 04c487b331..41940f9a12 100644 --- a/common_audio/resampler/include/resampler.h +++ b/common_audio/resampler/include/resampler.h @@ -90,8 +90,8 @@ class Resampler { size_t num_channels_; // Extra instance for stereo - Resampler* slave_left_; - Resampler* slave_right_; + Resampler* helper_left_; + Resampler* helper_right_; }; } // namespace webrtc diff --git a/common_audio/resampler/resampler.cc b/common_audio/resampler/resampler.cc index ce38ef56de..0fdb249052 100644 --- a/common_audio/resampler/resampler.cc +++ b/common_audio/resampler/resampler.cc @@ -37,8 +37,8 @@ Resampler::Resampler() my_out_frequency_khz_(0), my_mode_(kResamplerMode1To1), num_channels_(0), - slave_left_(nullptr), - slave_right_(nullptr) {} + helper_left_(nullptr), + helper_right_(nullptr) {} Resampler::Resampler(int inFreq, int outFreq, size_t num_channels) : Resampler() { @@ -61,11 +61,11 @@ Resampler::~Resampler() { if (out_buffer_) { free(out_buffer_); } - if (slave_left_) { - delete slave_left_; + if (helper_left_) { + delete helper_left_; } - if (slave_right_) { - delete slave_right_; + if (helper_right_) { + delete helper_right_; } } @@ -120,13 +120,13 @@ int Resampler::Reset(int inFreq, int outFreq, size_t num_channels) { free(out_buffer_); out_buffer_ = nullptr; } - if (slave_left_) { - delete slave_left_; - slave_left_ = nullptr; + if (helper_left_) { + delete helper_left_; + helper_left_ = nullptr; } - if (slave_right_) { - delete slave_right_; - slave_right_ = nullptr; + if (helper_right_) { + delete helper_right_; + helper_right_ = nullptr; } in_buffer_size_ = 0; @@ -140,8 +140,8 @@ int Resampler::Reset(int inFreq, int outFreq, size_t num_channels) { if (num_channels_ == 2) { // Create two mono resamplers. - slave_left_ = new Resampler(inFreq, outFreq, 1); - slave_right_ = new Resampler(inFreq, outFreq, 1); + helper_left_ = new Resampler(inFreq, outFreq, 1); + helper_right_ = new Resampler(inFreq, outFreq, 1); } // Now create the states we need. @@ -401,7 +401,7 @@ int Resampler::Push(const int16_t* samplesIn, size_t maxLen, size_t& outLen) { if (num_channels_ == 2) { - // Split up the signal and call the slave object for each channel + // Split up the signal and call the helper object for each channel int16_t* left = static_cast(malloc(lengthIn * sizeof(int16_t) / 2)); int16_t* right = @@ -422,10 +422,10 @@ int Resampler::Push(const int16_t* samplesIn, size_t actualOutLen_left = 0; size_t actualOutLen_right = 0; // Do resampling for right channel - res |= slave_left_->Push(left, lengthIn, out_left, maxLen / 2, - actualOutLen_left); - res |= slave_right_->Push(right, lengthIn, out_right, maxLen / 2, - actualOutLen_right); + res |= helper_left_->Push(left, lengthIn, out_left, maxLen / 2, + actualOutLen_left); + res |= helper_right_->Push(right, lengthIn, out_right, maxLen / 2, + actualOutLen_right); if (res || (actualOutLen_left != actualOutLen_right)) { free(left); free(right); @@ -916,7 +916,6 @@ int Resampler::Push(const int16_t* samplesIn, outLen = (lengthIn * 8) / 11; free(tmp_mem); return 0; - break; } return 0; } diff --git a/common_audio/resampler/sinc_resampler.cc b/common_audio/resampler/sinc_resampler.cc index 21707e9e4e..4fa78c5ede 100644 --- a/common_audio/resampler/sinc_resampler.cc +++ b/common_audio/resampler/sinc_resampler.cc @@ -122,28 +122,22 @@ double SincScaleFactor(double io_ratio) { const size_t SincResampler::kKernelSize; // If we know the minimum architecture at compile time, avoid CPU detection. -#if defined(WEBRTC_ARCH_X86_FAMILY) -#if defined(__SSE2__) -#define CONVOLVE_FUNC Convolve_SSE -void SincResampler::InitializeCPUSpecificFeatures() {} -#else -// x86 CPU detection required. Function will be set by -// InitializeCPUSpecificFeatures(). -// TODO(dalecurtis): Once Chrome moves to an SSE baseline this can be removed. -#define CONVOLVE_FUNC convolve_proc_ - void SincResampler::InitializeCPUSpecificFeatures() { - convolve_proc_ = WebRtc_GetCPUInfo(kSSE2) ? Convolve_SSE : Convolve_C; -} -#endif -#elif defined(WEBRTC_HAS_NEON) -#define CONVOLVE_FUNC Convolve_NEON -void SincResampler::InitializeCPUSpecificFeatures() {} +#if defined(WEBRTC_HAS_NEON) + convolve_proc_ = Convolve_NEON; +#elif defined(WEBRTC_ARCH_X86_FAMILY) + // Using AVX2 instead of SSE2 when AVX2 supported. + if (GetCPUInfo(kAVX2)) + convolve_proc_ = Convolve_AVX2; + else if (GetCPUInfo(kSSE2)) + convolve_proc_ = Convolve_SSE; + else + convolve_proc_ = Convolve_C; #else -// Unknown architecture. -#define CONVOLVE_FUNC Convolve_C -void SincResampler::InitializeCPUSpecificFeatures() {} + // Unknown architecture. + convolve_proc_ = Convolve_C; #endif +} SincResampler::SincResampler(double io_sample_rate_ratio, size_t request_frames, @@ -152,24 +146,20 @@ SincResampler::SincResampler(double io_sample_rate_ratio, read_cb_(read_cb), request_frames_(request_frames), input_buffer_size_(request_frames_ + kKernelSize), - // Create input buffers with a 16-byte alignment for SSE optimizations. + // Create input buffers with a 32-byte alignment for SIMD optimizations. kernel_storage_(static_cast( - AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))), + AlignedMalloc(sizeof(float) * kKernelStorageSize, 32))), kernel_pre_sinc_storage_(static_cast( - AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))), + AlignedMalloc(sizeof(float) * kKernelStorageSize, 32))), kernel_window_storage_(static_cast( - AlignedMalloc(sizeof(float) * kKernelStorageSize, 16))), + AlignedMalloc(sizeof(float) * kKernelStorageSize, 32))), input_buffer_(static_cast( - AlignedMalloc(sizeof(float) * input_buffer_size_, 16))), -#if defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__) + AlignedMalloc(sizeof(float) * input_buffer_size_, 32))), convolve_proc_(nullptr), -#endif r1_(input_buffer_.get()), r2_(input_buffer_.get() + kKernelSize / 2) { -#if defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__) InitializeCPUSpecificFeatures(); RTC_DCHECK(convolve_proc_); -#endif RTC_DCHECK_GT(request_frames_, 0); Flush(); RTC_DCHECK_GT(block_size_, kKernelSize); @@ -302,10 +292,10 @@ void SincResampler::Resample(size_t frames, float* destination) { const float* const k1 = kernel_ptr + offset_idx * kKernelSize; const float* const k2 = k1 + kKernelSize; - // Ensure |k1|, |k2| are 16-byte aligned for SIMD usage. Should always be - // true so long as kKernelSize is a multiple of 16. - RTC_DCHECK_EQ(0, reinterpret_cast(k1) % 16); - RTC_DCHECK_EQ(0, reinterpret_cast(k2) % 16); + // Ensure |k1|, |k2| are 32-byte aligned for SIMD usage. Should always be + // true so long as kKernelSize is a multiple of 32. + RTC_DCHECK_EQ(0, reinterpret_cast(k1) % 32); + RTC_DCHECK_EQ(0, reinterpret_cast(k2) % 32); // Initialize input pointer based on quantized |virtual_source_idx_|. const float* const input_ptr = r1_ + source_idx; @@ -314,7 +304,7 @@ void SincResampler::Resample(size_t frames, float* destination) { const double kernel_interpolation_factor = virtual_offset_idx - offset_idx; *destination++ = - CONVOLVE_FUNC(input_ptr, k1, k2, kernel_interpolation_factor); + convolve_proc_(input_ptr, k1, k2, kernel_interpolation_factor); // Advance the virtual index. virtual_source_idx_ += current_io_ratio; diff --git a/common_audio/resampler/sinc_resampler.h b/common_audio/resampler/sinc_resampler.h index 5181c18dac..a72a0c62c4 100644 --- a/common_audio/resampler/sinc_resampler.h +++ b/common_audio/resampler/sinc_resampler.h @@ -112,6 +112,10 @@ class SincResampler { const float* k1, const float* k2, double kernel_interpolation_factor); + static float Convolve_AVX2(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor); #elif defined(WEBRTC_HAS_NEON) static float Convolve_NEON(const float* input_ptr, const float* k1, @@ -155,13 +159,11 @@ class SincResampler { // TODO(ajm): Move to using a global static which must only be initialized // once by the user. We're not doing this initially, because we don't have // e.g. a LazyInstance helper in webrtc. -#if defined(WEBRTC_ARCH_X86_FAMILY) && !defined(__SSE2__) typedef float (*ConvolveProc)(const float*, const float*, const float*, double); ConvolveProc convolve_proc_; -#endif // Pointers to the various regions inside |input_buffer_|. See the diagram at // the top of the .cc file for more information. diff --git a/common_audio/resampler/sinc_resampler_avx2.cc b/common_audio/resampler/sinc_resampler_avx2.cc new file mode 100644 index 0000000000..3eb5d4a1b1 --- /dev/null +++ b/common_audio/resampler/sinc_resampler_avx2.cc @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include +#include +#include + +#include "common_audio/resampler/sinc_resampler.h" + +namespace webrtc { + +float SincResampler::Convolve_AVX2(const float* input_ptr, + const float* k1, + const float* k2, + double kernel_interpolation_factor) { + __m256 m_input; + __m256 m_sums1 = _mm256_setzero_ps(); + __m256 m_sums2 = _mm256_setzero_ps(); + + // Based on |input_ptr| alignment, we need to use loadu or load. Unrolling + // these loops has not been tested or benchmarked. + bool aligned_input = (reinterpret_cast(input_ptr) & 0x1F) == 0; + if (!aligned_input) { + for (size_t i = 0; i < kKernelSize; i += 8) { + m_input = _mm256_loadu_ps(input_ptr + i); + m_sums1 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k1 + i), m_sums1); + m_sums2 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k2 + i), m_sums2); + } + } else { + for (size_t i = 0; i < kKernelSize; i += 8) { + m_input = _mm256_load_ps(input_ptr + i); + m_sums1 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k1 + i), m_sums1); + m_sums2 = _mm256_fmadd_ps(m_input, _mm256_load_ps(k2 + i), m_sums2); + } + } + + // Linearly interpolate the two "convolutions". + __m128 m128_sums1 = _mm_add_ps(_mm256_extractf128_ps(m_sums1, 0), + _mm256_extractf128_ps(m_sums1, 1)); + __m128 m128_sums2 = _mm_add_ps(_mm256_extractf128_ps(m_sums2, 0), + _mm256_extractf128_ps(m_sums2, 1)); + m128_sums1 = _mm_mul_ps( + m128_sums1, + _mm_set_ps1(static_cast(1.0 - kernel_interpolation_factor))); + m128_sums2 = _mm_mul_ps( + m128_sums2, _mm_set_ps1(static_cast(kernel_interpolation_factor))); + m128_sums1 = _mm_add_ps(m128_sums1, m128_sums2); + + // Sum components together. + float result; + m128_sums2 = _mm_add_ps(_mm_movehl_ps(m128_sums1, m128_sums1), m128_sums1); + _mm_store_ss(&result, _mm_add_ss(m128_sums2, + _mm_shuffle_ps(m128_sums2, m128_sums2, 1))); + + return result; +} + +} // namespace webrtc diff --git a/common_audio/resampler/sinc_resampler_unittest.cc b/common_audio/resampler/sinc_resampler_unittest.cc index b067b23b88..92dff70131 100644 --- a/common_audio/resampler/sinc_resampler_unittest.cc +++ b/common_audio/resampler/sinc_resampler_unittest.cc @@ -23,7 +23,6 @@ #include #include "common_audio/resampler/sinusoidal_linear_chirp_source.h" -#include "rtc_base/stringize_macros.h" #include "rtc_base/system/arch.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/cpu_features_wrapper.h" @@ -116,22 +115,14 @@ TEST(SincResamplerTest, DISABLED_SetRatioBench) { printf("SetRatio() took %.2fms.\n", total_time_c_us / 1000); } -// Define platform independent function name for Convolve* tests. -#if defined(WEBRTC_ARCH_X86_FAMILY) -#define CONVOLVE_FUNC Convolve_SSE -#elif defined(WEBRTC_ARCH_ARM_V7) -#define CONVOLVE_FUNC Convolve_NEON -#endif - // Ensure various optimized Convolve() methods return the same value. Only run // this test if other optimized methods exist, otherwise the default Convolve() // will be tested by the parameterized SincResampler tests below. -#if defined(CONVOLVE_FUNC) TEST(SincResamplerTest, Convolve) { #if defined(WEBRTC_ARCH_X86_FAMILY) - ASSERT_TRUE(WebRtc_GetCPUInfo(kSSE2)); + ASSERT_TRUE(GetCPUInfo(kSSE2)); #elif defined(WEBRTC_ARCH_ARM_V7) - ASSERT_TRUE(WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON); + ASSERT_TRUE(GetCPUFeaturesARM() & kCPUFeatureNEON); #endif // Initialize a dummy resampler. @@ -148,7 +139,7 @@ TEST(SincResamplerTest, Convolve) { double result = resampler.Convolve_C( resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), kKernelInterpolationFactor); - double result2 = resampler.CONVOLVE_FUNC( + double result2 = resampler.convolve_proc_( resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), kKernelInterpolationFactor); EXPECT_NEAR(result2, result, kEpsilon); @@ -157,12 +148,11 @@ TEST(SincResamplerTest, Convolve) { result = resampler.Convolve_C( resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), kKernelInterpolationFactor); - result2 = resampler.CONVOLVE_FUNC( + result2 = resampler.convolve_proc_( resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), kKernelInterpolationFactor); EXPECT_NEAR(result2, result, kEpsilon); } -#endif // Benchmark for the various Convolve() methods. Make sure to build with // branding=Chrome so that RTC_DCHECKs are compiled out when benchmarking. @@ -190,46 +180,45 @@ TEST(SincResamplerTest, ConvolveBenchmark) { (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec; printf("Convolve_C took %.2fms.\n", total_time_c_us / 1000); -#if defined(CONVOLVE_FUNC) #if defined(WEBRTC_ARCH_X86_FAMILY) - ASSERT_TRUE(WebRtc_GetCPUInfo(kSSE2)); + ASSERT_TRUE(GetCPUInfo(kSSE2)); #elif defined(WEBRTC_ARCH_ARM_V7) - ASSERT_TRUE(WebRtc_GetCPUFeaturesARM() & kCPUFeatureNEON); + ASSERT_TRUE(GetCPUFeaturesARM() & kCPUFeatureNEON); #endif // Benchmark with unaligned input pointer. start = rtc::TimeNanos(); for (int j = 0; j < kConvolveIterations; ++j) { - resampler.CONVOLVE_FUNC( + resampler.convolve_proc_( resampler.kernel_storage_.get() + 1, resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), kKernelInterpolationFactor); } double total_time_optimized_unaligned_us = (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec; - printf(STRINGIZE(CONVOLVE_FUNC) "(unaligned) took %.2fms; which is %.2fx " - "faster than Convolve_C.\n", total_time_optimized_unaligned_us / 1000, - total_time_c_us / total_time_optimized_unaligned_us); + printf( + "convolve_proc_(unaligned) took %.2fms; which is %.2fx " + "faster than Convolve_C.\n", + total_time_optimized_unaligned_us / 1000, + total_time_c_us / total_time_optimized_unaligned_us); // Benchmark with aligned input pointer. start = rtc::TimeNanos(); for (int j = 0; j < kConvolveIterations; ++j) { - resampler.CONVOLVE_FUNC( + resampler.convolve_proc_( resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), resampler.kernel_storage_.get(), kKernelInterpolationFactor); } double total_time_optimized_aligned_us = (rtc::TimeNanos() - start) / rtc::kNumNanosecsPerMicrosec; - printf(STRINGIZE(CONVOLVE_FUNC) " (aligned) took %.2fms; which is %.2fx " - "faster than Convolve_C and %.2fx faster than " - STRINGIZE(CONVOLVE_FUNC) " (unaligned).\n", - total_time_optimized_aligned_us / 1000, - total_time_c_us / total_time_optimized_aligned_us, - total_time_optimized_unaligned_us / total_time_optimized_aligned_us); -#endif + printf( + "convolve_proc_ (aligned) took %.2fms; which is %.2fx " + "faster than Convolve_C and %.2fx faster than " + "convolve_proc_ (unaligned).\n", + total_time_optimized_aligned_us / 1000, + total_time_c_us / total_time_optimized_aligned_us, + total_time_optimized_unaligned_us / total_time_optimized_aligned_us); } -#undef CONVOLVE_FUNC - typedef std::tuple SincResamplerTestData; class SincResamplerTest : public ::testing::TestWithParam { @@ -352,7 +341,7 @@ INSTANTIATE_TEST_SUITE_P( std::make_tuple(16000, 44100, kResamplingRMSError, -62.54), std::make_tuple(22050, 44100, kResamplingRMSError, -73.53), std::make_tuple(32000, 44100, kResamplingRMSError, -63.32), - std::make_tuple(44100, 44100, kResamplingRMSError, -73.53), + std::make_tuple(44100, 44100, kResamplingRMSError, -73.52), std::make_tuple(48000, 44100, -15.01, -64.04), std::make_tuple(96000, 44100, -18.49, -25.51), std::make_tuple(192000, 44100, -20.50, -13.31), @@ -360,7 +349,7 @@ INSTANTIATE_TEST_SUITE_P( // To 48kHz std::make_tuple(8000, 48000, kResamplingRMSError, -63.43), std::make_tuple(11025, 48000, kResamplingRMSError, -62.61), - std::make_tuple(16000, 48000, kResamplingRMSError, -63.96), + std::make_tuple(16000, 48000, kResamplingRMSError, -63.95), std::make_tuple(22050, 48000, kResamplingRMSError, -62.42), std::make_tuple(32000, 48000, kResamplingRMSError, -64.04), std::make_tuple(44100, 48000, kResamplingRMSError, -62.63), diff --git a/common_audio/signal_processing/division_operations.c b/common_audio/signal_processing/division_operations.c index c6195e7999..4764ddfccd 100644 --- a/common_audio/signal_processing/division_operations.c +++ b/common_audio/signal_processing/division_operations.c @@ -98,8 +98,7 @@ int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den) return div; } -int32_t RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/5486 -WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low) +int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low) { int16_t approx, tmp_hi, tmp_low, num_hi, num_low; int32_t tmpW32; @@ -111,8 +110,8 @@ WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low) tmpW32 = (den_hi * approx << 1) + ((den_low * approx >> 15) << 1); // tmpW32 = den * approx - tmpW32 = (int32_t)0x7fffffffL - tmpW32; // result in Q30 (tmpW32 = 2.0-(den*approx)) - // UBSan: 2147483647 - -2 cannot be represented in type 'int' + // result in Q30 (tmpW32 = 2.0-(den*approx)) + tmpW32 = (int32_t)((int64_t)0x7fffffffL - tmpW32); // Store tmpW32 in hi and low format tmp_hi = (int16_t)(tmpW32 >> 16); diff --git a/common_audio/signal_processing/include/signal_processing_library.h b/common_audio/signal_processing/include/signal_processing_library.h index 4ad92c4c2b..0c13071a27 100644 --- a/common_audio/signal_processing/include/signal_processing_library.h +++ b/common_audio/signal_processing/include/signal_processing_library.h @@ -228,6 +228,25 @@ int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length); int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length); #endif +// Returns both the minimum and maximum values of a 16-bit vector. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// Ouput: +// - max_val : Maximum sample value in |vector|. +// - min_val : Minimum sample value in |vector|. +void WebRtcSpl_MinMaxW16(const int16_t* vector, + size_t length, + int16_t* min_val, + int16_t* max_val); +#if defined(WEBRTC_HAS_NEON) +void WebRtcSpl_MinMaxW16Neon(const int16_t* vector, + size_t length, + int16_t* min_val, + int16_t* max_val); +#endif + // Returns the vector index to the largest absolute value of a 16-bit vector. // // Input: @@ -240,6 +259,17 @@ int32_t WebRtcSpl_MinValueW32_mips(const int32_t* vector, size_t length); // -32768 presenting an int16 absolute value of 32767). size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length); +// Returns the element with the largest absolute value of a 16-bit vector. Note +// that this function can return a negative value. +// +// Input: +// - vector : 16-bit input vector. +// - length : Number of samples in vector. +// +// Return value : The element with the largest absolute value. Note that this +// may be a negative value. +int16_t WebRtcSpl_MaxAbsElementW16(const int16_t* vector, size_t length); + // Returns the vector index to the maximum sample value of a 16-bit vector. // // Input: diff --git a/common_audio/signal_processing/min_max_operations.c b/common_audio/signal_processing/min_max_operations.c index d249a02d40..1b9542e7ef 100644 --- a/common_audio/signal_processing/min_max_operations.c +++ b/common_audio/signal_processing/min_max_operations.c @@ -155,6 +155,15 @@ size_t WebRtcSpl_MaxAbsIndexW16(const int16_t* vector, size_t length) { return index; } +int16_t WebRtcSpl_MaxAbsElementW16(const int16_t* vector, size_t length) { + int16_t min_val, max_val; + WebRtcSpl_MinMaxW16(vector, length, &min_val, &max_val); + if (min_val == max_val || min_val < -max_val) { + return min_val; + } + return max_val; +} + // Index of maximum value in a word16 vector. size_t WebRtcSpl_MaxIndexW16(const int16_t* vector, size_t length) { size_t i = 0, index = 0; @@ -222,3 +231,26 @@ size_t WebRtcSpl_MinIndexW32(const int32_t* vector, size_t length) { return index; } + +// Finds both the minimum and maximum elements in an array of 16-bit integers. +void WebRtcSpl_MinMaxW16(const int16_t* vector, size_t length, + int16_t* min_val, int16_t* max_val) { +#if defined(WEBRTC_HAS_NEON) + return WebRtcSpl_MinMaxW16Neon(vector, length, min_val, max_val); +#else + int16_t minimum = WEBRTC_SPL_WORD16_MAX; + int16_t maximum = WEBRTC_SPL_WORD16_MIN; + size_t i = 0; + + RTC_DCHECK_GT(length, 0); + + for (i = 0; i < length; i++) { + if (vector[i] < minimum) + minimum = vector[i]; + if (vector[i] > maximum) + maximum = vector[i]; + } + *min_val = minimum; + *max_val = maximum; +#endif +} diff --git a/common_audio/signal_processing/min_max_operations_neon.c b/common_audio/signal_processing/min_max_operations_neon.c index 53217df7be..e5b4b7c71b 100644 --- a/common_audio/signal_processing/min_max_operations_neon.c +++ b/common_audio/signal_processing/min_max_operations_neon.c @@ -281,3 +281,53 @@ int32_t WebRtcSpl_MinValueW32Neon(const int32_t* vector, size_t length) { return minimum; } +// Finds both the minimum and maximum elements in an array of 16-bit integers. +void WebRtcSpl_MinMaxW16Neon(const int16_t* vector, size_t length, + int16_t* min_val, int16_t* max_val) { + int16_t minimum = WEBRTC_SPL_WORD16_MAX; + int16_t maximum = WEBRTC_SPL_WORD16_MIN; + size_t i = 0; + size_t residual = length & 0x7; + + RTC_DCHECK_GT(length, 0); + + const int16_t* p_start = vector; + int16x8_t min16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MAX); + int16x8_t max16x8 = vdupq_n_s16(WEBRTC_SPL_WORD16_MIN); + + // First part, unroll the loop 8 times. + for (i = 0; i < length - residual; i += 8) { + int16x8_t in16x8 = vld1q_s16(p_start); + min16x8 = vminq_s16(min16x8, in16x8); + max16x8 = vmaxq_s16(max16x8, in16x8); + p_start += 8; + } + +#if defined(WEBRTC_ARCH_ARM64) + minimum = vminvq_s16(min16x8); + maximum = vmaxvq_s16(max16x8); +#else + int16x4_t min16x4 = vmin_s16(vget_low_s16(min16x8), vget_high_s16(min16x8)); + min16x4 = vpmin_s16(min16x4, min16x4); + min16x4 = vpmin_s16(min16x4, min16x4); + + minimum = vget_lane_s16(min16x4, 0); + + int16x4_t max16x4 = vmax_s16(vget_low_s16(max16x8), vget_high_s16(max16x8)); + max16x4 = vpmax_s16(max16x4, max16x4); + max16x4 = vpmax_s16(max16x4, max16x4); + + maximum = vget_lane_s16(max16x4, 0); +#endif + + // Second part, do the remaining iterations (if any). + for (i = residual; i > 0; i--) { + if (*p_start < minimum) + minimum = *p_start; + if (*p_start > maximum) + maximum = *p_start; + p_start++; + } + *min_val = minimum; + *max_val = maximum; +} diff --git a/common_audio/signal_processing/signal_processing_unittest.cc b/common_audio/signal_processing/signal_processing_unittest.cc index 3106c47d2d..9ec8590d6c 100644 --- a/common_audio/signal_processing/signal_processing_unittest.cc +++ b/common_audio/signal_processing/signal_processing_unittest.cc @@ -289,6 +289,12 @@ TEST(SplTest, MinMaxOperationsTest) { WebRtcSpl_MinValueW32(vector32, kVectorSize)); EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MinIndexW16(vector16, kVectorSize)); EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MinIndexW32(vector32, kVectorSize)); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, + WebRtcSpl_MaxAbsElementW16(vector16, kVectorSize)); + int16_t min_value, max_value; + WebRtcSpl_MinMaxW16(vector16, kVectorSize, &min_value, &max_value); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, min_value); + EXPECT_EQ(12334, max_value); // Test the cases where maximum values have to be caught // outside of the unrolled loops in ARM-Neon. @@ -306,6 +312,11 @@ TEST(SplTest, MinMaxOperationsTest) { EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxAbsIndexW16(vector16, kVectorSize)); EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxIndexW16(vector16, kVectorSize)); EXPECT_EQ(kVectorSize - 1, WebRtcSpl_MaxIndexW32(vector32, kVectorSize)); + EXPECT_EQ(WEBRTC_SPL_WORD16_MAX, + WebRtcSpl_MaxAbsElementW16(vector16, kVectorSize)); + WebRtcSpl_MinMaxW16(vector16, kVectorSize, &min_value, &max_value); + EXPECT_EQ(-29871, min_value); + EXPECT_EQ(WEBRTC_SPL_WORD16_MAX, max_value); // Test the cases where multiple maximum and minimum values are present. vector16[1] = WEBRTC_SPL_WORD16_MAX; @@ -332,6 +343,43 @@ TEST(SplTest, MinMaxOperationsTest) { EXPECT_EQ(1u, WebRtcSpl_MaxIndexW32(vector32, kVectorSize)); EXPECT_EQ(6u, WebRtcSpl_MinIndexW16(vector16, kVectorSize)); EXPECT_EQ(6u, WebRtcSpl_MinIndexW32(vector32, kVectorSize)); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, + WebRtcSpl_MaxAbsElementW16(vector16, kVectorSize)); + WebRtcSpl_MinMaxW16(vector16, kVectorSize, &min_value, &max_value); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, min_value); + EXPECT_EQ(WEBRTC_SPL_WORD16_MAX, max_value); + + // Test a one-element vector. + int16_t single_element_vector = 0; + EXPECT_EQ(0, WebRtcSpl_MaxAbsValueW16(&single_element_vector, 1)); + EXPECT_EQ(0, WebRtcSpl_MaxValueW16(&single_element_vector, 1)); + EXPECT_EQ(0, WebRtcSpl_MinValueW16(&single_element_vector, 1)); + EXPECT_EQ(0u, WebRtcSpl_MaxAbsIndexW16(&single_element_vector, 1)); + EXPECT_EQ(0u, WebRtcSpl_MaxIndexW16(&single_element_vector, 1)); + EXPECT_EQ(0u, WebRtcSpl_MinIndexW16(&single_element_vector, 1)); + EXPECT_EQ(0, WebRtcSpl_MaxAbsElementW16(&single_element_vector, 1)); + WebRtcSpl_MinMaxW16(&single_element_vector, 1, &min_value, &max_value); + EXPECT_EQ(0, min_value); + EXPECT_EQ(0, max_value); + + // Test a two-element vector with the values WEBRTC_SPL_WORD16_MIN and + // WEBRTC_SPL_WORD16_MAX. + int16_t two_element_vector[2] = {WEBRTC_SPL_WORD16_MIN, + WEBRTC_SPL_WORD16_MAX}; + EXPECT_EQ(WEBRTC_SPL_WORD16_MAX, + WebRtcSpl_MaxAbsValueW16(two_element_vector, 2)); + EXPECT_EQ(WEBRTC_SPL_WORD16_MAX, + WebRtcSpl_MaxValueW16(two_element_vector, 2)); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, + WebRtcSpl_MinValueW16(two_element_vector, 2)); + EXPECT_EQ(0u, WebRtcSpl_MaxAbsIndexW16(two_element_vector, 2)); + EXPECT_EQ(1u, WebRtcSpl_MaxIndexW16(two_element_vector, 2)); + EXPECT_EQ(0u, WebRtcSpl_MinIndexW16(two_element_vector, 2)); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, + WebRtcSpl_MaxAbsElementW16(two_element_vector, 2)); + WebRtcSpl_MinMaxW16(two_element_vector, 2, &min_value, &max_value); + EXPECT_EQ(WEBRTC_SPL_WORD16_MIN, min_value); + EXPECT_EQ(WEBRTC_SPL_WORD16_MAX, max_value); } TEST(SplTest, VectorOperationsTest) { diff --git a/common_audio/signal_processing/splitting_filter.c b/common_audio/signal_processing/splitting_filter.c index 399433f0fb..b0d83f1388 100644 --- a/common_audio/signal_processing/splitting_filter.c +++ b/common_audio/signal_processing/splitting_filter.c @@ -44,9 +44,11 @@ static const uint16_t WebRtcSpl_kAllPassFilter2[3] = {21333, 49062, 63010}; // |data_length| // -void WebRtcSpl_AllPassQMF(int32_t* in_data, size_t data_length, - int32_t* out_data, const uint16_t* filter_coefficients, - int32_t* filter_state) +static void WebRtcSpl_AllPassQMF(int32_t* in_data, + size_t data_length, + int32_t* out_data, + const uint16_t* filter_coefficients, + int32_t* filter_state) { // The procedure is to filter the input with three first order all pass filters // (cascade operations). diff --git a/common_audio/smoothing_filter.h b/common_audio/smoothing_filter.h index e5f561ecf2..e96d52a6f7 100644 --- a/common_audio/smoothing_filter.h +++ b/common_audio/smoothing_filter.h @@ -14,7 +14,6 @@ #include #include "absl/types/optional.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -42,6 +41,11 @@ class SmoothingFilterImpl final : public SmoothingFilter { // will be set to |init_time_ms| first and can be changed through // |SetTimeConstantMs|. explicit SmoothingFilterImpl(int init_time_ms); + + SmoothingFilterImpl() = delete; + SmoothingFilterImpl(const SmoothingFilterImpl&) = delete; + SmoothingFilterImpl& operator=(const SmoothingFilterImpl&) = delete; + ~SmoothingFilterImpl() override; void AddSample(float sample) override; @@ -64,8 +68,6 @@ class SmoothingFilterImpl final : public SmoothingFilter { float alpha_; float state_; int64_t last_state_time_ms_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(SmoothingFilterImpl); }; } // namespace webrtc diff --git a/common_audio/third_party/ooura/BUILD.gn b/common_audio/third_party/ooura/BUILD.gn index 742f620044..0cdf98e591 100644 --- a/common_audio/third_party/ooura/BUILD.gn +++ b/common_audio/third_party/ooura/BUILD.gn @@ -16,7 +16,7 @@ rtc_library("fft_size_128") { ] deps = [ "../../../rtc_base/system:arch", - "../../../system_wrappers:cpu_features_api", + "../../../system_wrappers", ] cflags = [] diff --git a/common_audio/third_party/ooura/fft_size_128/ooura_fft.cc b/common_audio/third_party/ooura/fft_size_128/ooura_fft.cc index 2918374bba..693312012b 100644 --- a/common_audio/third_party/ooura/fft_size_128/ooura_fft.cc +++ b/common_audio/third_party/ooura/fft_size_128/ooura_fft.cc @@ -313,9 +313,17 @@ static void rftbsub_128_C(float* a) { } // namespace +OouraFft::OouraFft(bool sse2_available) { +#if defined(WEBRTC_ARCH_X86_FAMILY) + use_sse2_ = sse2_available; +#else + use_sse2_ = false; +#endif +} + OouraFft::OouraFft() { #if defined(WEBRTC_ARCH_X86_FAMILY) - use_sse2_ = (WebRtc_GetCPUInfo(kSSE2) != 0); + use_sse2_ = (GetCPUInfo(kSSE2) != 0); #else use_sse2_ = false; #endif diff --git a/common_audio/third_party/ooura/fft_size_128/ooura_fft.h b/common_audio/third_party/ooura/fft_size_128/ooura_fft.h index 0cdd6aa66f..8273dfe58e 100644 --- a/common_audio/third_party/ooura/fft_size_128/ooura_fft.h +++ b/common_audio/third_party/ooura/fft_size_128/ooura_fft.h @@ -38,6 +38,10 @@ void rftbsub_128_neon(float* a); class OouraFft { public: + // Ctor allowing the availability of SSE2 support to be specified. + explicit OouraFft(bool sse2_available); + + // Deprecated: This Ctor will soon be removed. OouraFft(); ~OouraFft(); void Fft(float* a) const; diff --git a/common_audio/wav_header.cc b/common_audio/wav_header.cc index d3dca9055d..ce119f1095 100644 --- a/common_audio/wav_header.cc +++ b/common_audio/wav_header.cc @@ -132,7 +132,7 @@ uint16_t MapWavFormatToHeaderField(WavFormat format) { case WavFormat::kWavFormatMuLaw: return 7; } - RTC_CHECK(false); + RTC_CHECK_NOTREACHED(); } WavFormat MapHeaderFieldToWavFormat(uint16_t format_header_value) { @@ -278,10 +278,8 @@ size_t GetFormatBytesPerSample(WavFormat format) { return 1; case WavFormat::kWavFormatIeeeFloat: return 4; - default: - RTC_CHECK(false); - return 2; } + RTC_CHECK_NOTREACHED(); } bool CheckWavParameters(size_t num_channels, diff --git a/common_audio/window_generator.h b/common_audio/window_generator.h index 0cbe24a402..c0a89c4f93 100644 --- a/common_audio/window_generator.h +++ b/common_audio/window_generator.h @@ -13,18 +13,17 @@ #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { // Helper class with generators for various signal transform windows. class WindowGenerator { public: + WindowGenerator() = delete; + WindowGenerator(const WindowGenerator&) = delete; + WindowGenerator& operator=(const WindowGenerator&) = delete; + static void Hanning(int length, float* window); static void KaiserBesselDerived(float alpha, size_t length, float* window); - - private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WindowGenerator); }; } // namespace webrtc diff --git a/common_types.h b/common_types.h deleted file mode 100644 index cd63f5f72b..0000000000 --- a/common_types.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef COMMON_TYPES_H_ -#define COMMON_TYPES_H_ - -#include // For size_t - -#include - -namespace webrtc { - -struct FrameCounts { - FrameCounts() : key_frames(0), delta_frames(0) {} - int key_frames; - int delta_frames; -}; - -// Callback, used to notify an observer whenever frame counts have been updated. -class FrameCountObserver { - public: - virtual ~FrameCountObserver() {} - virtual void FrameCountUpdated(const FrameCounts& frame_counts, - uint32_t ssrc) = 0; -}; - -// ================================================================== -// Video specific types -// ================================================================== - -// TODO(magjed): Move this and other H264 related classes out to their own file. -namespace H264 { - -enum Profile { - kProfileConstrainedBaseline, - kProfileBaseline, - kProfileMain, - kProfileConstrainedHigh, - kProfileHigh, -}; - -} // namespace H264 - -struct SpatialLayer { - bool operator==(const SpatialLayer& other) const; - bool operator!=(const SpatialLayer& other) const { return !(*this == other); } - - unsigned short width; - unsigned short height; - float maxFramerate; // fps. - unsigned char numberOfTemporalLayers; - unsigned int maxBitrate; // kilobits/sec. - unsigned int targetBitrate; // kilobits/sec. - unsigned int minBitrate; // kilobits/sec. - unsigned int qpMax; // minimum quality - bool active; // encoded and sent. -}; - -// Simulcast is when the same stream is encoded multiple times with different -// settings such as resolution. -typedef SpatialLayer SimulcastStream; - -// Minimum and maximum playout delay values from capture to render. -// These are best effort values. -// -// A value < 0 indicates no change from previous valid value. -// -// min = max = 0 indicates that the receiver should try and render -// frame as soon as possible. -// -// min = x, max = y indicates that the receiver is free to adapt -// in the range (x, y) based on network jitter. -// -// Note: Given that this gets embedded in a union, it is up-to the owner to -// initialize these values. -struct PlayoutDelay { - PlayoutDelay(int min_ms, int max_ms) : min_ms(min_ms), max_ms(max_ms) {} - int min_ms; - int max_ms; - - static PlayoutDelay Noop() { return PlayoutDelay(-1, -1); } - - bool IsNoop() const { return min_ms == -1 && max_ms == -1; } - bool operator==(const PlayoutDelay& rhs) const { - return min_ms == rhs.min_ms && max_ms == rhs.max_ms; - } -}; - -} // namespace webrtc - -#endif // COMMON_TYPES_H_ diff --git a/common_video/BUILD.gn b/common_video/BUILD.gn index ddf4c2d495..8e5376725c 100644 --- a/common_video/BUILD.gn +++ b/common_video/BUILD.gn @@ -21,28 +21,27 @@ rtc_library("common_video") { "h264/h264_common.h", "h264/pps_parser.cc", "h264/pps_parser.h", - "h264/profile_level_id.h", "h264/sps_parser.cc", "h264/sps_parser.h", "h264/sps_vui_rewriter.cc", "h264/sps_vui_rewriter.h", - "i420_buffer_pool.cc", "include/bitrate_adjuster.h", - "include/i420_buffer_pool.h", "include/incoming_video_stream.h", "include/quality_limitation_reason.h", - "include/video_frame.h", "include/video_frame_buffer.h", + "include/video_frame_buffer_pool.h", "incoming_video_stream.cc", "libyuv/include/webrtc_libyuv.h", "libyuv/webrtc_libyuv.cc", "video_frame_buffer.cc", + "video_frame_buffer_pool.cc", "video_render_frames.cc", "video_render_frames.h", ] deps = [ "../api:scoped_refptr", + "../api:sequence_checker", "../api/task_queue", "../api/units:time_delta", "../api/units:timestamp", @@ -50,22 +49,28 @@ rtc_library("common_video") { "../api/video:video_bitrate_allocation", "../api/video:video_bitrate_allocator", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../api/video_codecs:bitstream_parser_api", - "../media:rtc_h264_profile_id", + "../api/video_codecs:video_codecs_api", "../rtc_base", "../rtc_base:checks", "../rtc_base:rtc_task_queue", "../rtc_base:safe_minmax", + "../rtc_base/synchronization:mutex", "../rtc_base/system:rtc_export", "../system_wrappers:metrics", - "//third_party/abseil-cpp/absl/types:optional", "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } -if (rtc_include_tests) { +rtc_source_set("frame_counts") { + visibility = [ "*" ] + + sources = [ "frame_counts.h" ] +} + +if (rtc_include_tests && !build_with_chromium) { common_video_resources = [ "../resources/foreman_cif.yuv" ] if (is_ios) { @@ -84,24 +89,21 @@ if (rtc_include_tests) { "frame_rate_estimator_unittest.cc", "h264/h264_bitstream_parser_unittest.cc", "h264/pps_parser_unittest.cc", - "h264/profile_level_id_unittest.cc", "h264/sps_parser_unittest.cc", "h264/sps_vui_rewriter_unittest.cc", - "i420_buffer_pool_unittest.cc", "libyuv/libyuv_unittest.cc", + "video_frame_buffer_pool_unittest.cc", "video_frame_unittest.cc", ] deps = [ ":common_video", - "../:webrtc_common", "../api:scoped_refptr", "../api/units:time_delta", "../api/video:video_frame", "../api/video:video_frame_i010", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", - "../media:rtc_h264_profile_id", + "../api/video_codecs:video_codecs_api", "../rtc_base", "../rtc_base:checks", "../rtc_base:rtc_base_approved", @@ -116,6 +118,8 @@ if (rtc_include_tests) { "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + data = common_video_resources if (is_android) { deps += [ "//testing/android/native_test:native_test_support" ] diff --git a/common_video/bitrate_adjuster.cc b/common_video/bitrate_adjuster.cc index ca52ed9e69..c53c3a02f6 100644 --- a/common_video/bitrate_adjuster.cc +++ b/common_video/bitrate_adjuster.cc @@ -39,7 +39,7 @@ BitrateAdjuster::BitrateAdjuster(float min_adjusted_bitrate_pct, } void BitrateAdjuster::SetTargetBitrateBps(uint32_t bitrate_bps) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); // If the change in target bitrate is large, update the adjusted bitrate // immediately since it's likely we have gained or lost a sizeable amount of // bandwidth and we'll want to respond quickly. @@ -58,22 +58,22 @@ void BitrateAdjuster::SetTargetBitrateBps(uint32_t bitrate_bps) { } uint32_t BitrateAdjuster::GetTargetBitrateBps() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return target_bitrate_bps_; } uint32_t BitrateAdjuster::GetAdjustedBitrateBps() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return adjusted_bitrate_bps_; } absl::optional BitrateAdjuster::GetEstimatedBitrateBps() { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return bitrate_tracker_.Rate(rtc::TimeMillis()); } void BitrateAdjuster::Update(size_t frame_size) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); uint32_t current_time_ms = rtc::TimeMillis(); bitrate_tracker_.Update(frame_size, current_time_ms); UpdateBitrate(current_time_ms); @@ -100,7 +100,7 @@ uint32_t BitrateAdjuster::GetMaxAdjustedBitrateBps() const { // Only safe to call this after Update calls have stopped void BitrateAdjuster::Reset() { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); target_bitrate_bps_ = 0; adjusted_bitrate_bps_ = 0; last_adjusted_target_bitrate_bps_ = 0; diff --git a/common_video/frame_counts.h b/common_video/frame_counts.h new file mode 100644 index 0000000000..663fda4a2f --- /dev/null +++ b/common_video/frame_counts.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef COMMON_VIDEO_FRAME_COUNTS_H_ +#define COMMON_VIDEO_FRAME_COUNTS_H_ + +namespace webrtc { + +struct FrameCounts { + FrameCounts() : key_frames(0), delta_frames(0) {} + int key_frames; + int delta_frames; +}; + +// Callback, used to notify an observer whenever frame counts have been updated. +class FrameCountObserver { + public: + virtual ~FrameCountObserver() {} + virtual void FrameCountUpdated(const FrameCounts& frame_counts, + uint32_t ssrc) = 0; +}; + +} // namespace webrtc + +#endif // COMMON_VIDEO_FRAME_COUNTS_H_ diff --git a/common_video/generic_frame_descriptor/BUILD.gn b/common_video/generic_frame_descriptor/BUILD.gn index 05a4e2396c..ab97e887f2 100644 --- a/common_video/generic_frame_descriptor/BUILD.gn +++ b/common_video/generic_frame_descriptor/BUILD.gn @@ -19,6 +19,8 @@ rtc_library("generic_frame_descriptor") { "../../api/transport/rtp:dependency_descriptor", "../../api/video:video_codec_constants", "../../rtc_base:checks", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", diff --git a/common_video/generic_frame_descriptor/generic_frame_info.cc b/common_video/generic_frame_descriptor/generic_frame_info.cc index ef78fac7b0..af66bbaf67 100644 --- a/common_video/generic_frame_descriptor/generic_frame_info.cc +++ b/common_video/generic_frame_descriptor/generic_frame_info.cc @@ -15,33 +15,6 @@ namespace webrtc { -absl::InlinedVector -GenericFrameInfo::DecodeTargetInfo(absl::string_view indication_symbols) { - absl::InlinedVector decode_targets; - for (char symbol : indication_symbols) { - DecodeTargetIndication indication; - switch (symbol) { - case '-': - indication = DecodeTargetIndication::kNotPresent; - break; - case 'D': - indication = DecodeTargetIndication::kDiscardable; - break; - case 'R': - indication = DecodeTargetIndication::kRequired; - break; - case 'S': - indication = DecodeTargetIndication::kSwitch; - break; - default: - RTC_NOTREACHED(); - } - decode_targets.push_back(indication); - } - - return decode_targets; -} - GenericFrameInfo::GenericFrameInfo() = default; GenericFrameInfo::GenericFrameInfo(const GenericFrameInfo&) = default; GenericFrameInfo::~GenericFrameInfo() = default; @@ -65,19 +38,8 @@ GenericFrameInfo::Builder& GenericFrameInfo::Builder::S(int spatial_id) { GenericFrameInfo::Builder& GenericFrameInfo::Builder::Dtis( absl::string_view indication_symbols) { - info_.decode_target_indications = DecodeTargetInfo(indication_symbols); - return *this; -} - -GenericFrameInfo::Builder& GenericFrameInfo::Builder::Fdiffs( - std::initializer_list frame_diffs) { - info_.frame_diffs.assign(frame_diffs.begin(), frame_diffs.end()); - return *this; -} - -GenericFrameInfo::Builder& GenericFrameInfo::Builder::ChainDiffs( - std::initializer_list chain_diffs) { - info_.chain_diffs.assign(chain_diffs.begin(), chain_diffs.end()); + info_.decode_target_indications = + webrtc_impl::StringToDecodeTargetIndications(indication_symbols); return *this; } diff --git a/common_video/generic_frame_descriptor/generic_frame_info.h b/common_video/generic_frame_descriptor/generic_frame_info.h index 7ac3665afc..19f413b5d4 100644 --- a/common_video/generic_frame_descriptor/generic_frame_info.h +++ b/common_video/generic_frame_descriptor/generic_frame_info.h @@ -11,6 +11,7 @@ #ifndef COMMON_VIDEO_GENERIC_FRAME_DESCRIPTOR_GENERIC_FRAME_INFO_H_ #define COMMON_VIDEO_GENERIC_FRAME_DESCRIPTOR_GENERIC_FRAME_INFO_H_ +#include #include #include @@ -32,9 +33,6 @@ struct CodecBufferUsage { }; struct GenericFrameInfo : public FrameDependencyTemplate { - static absl::InlinedVector DecodeTargetInfo( - absl::string_view indication_symbols); - class Builder; GenericFrameInfo(); @@ -43,6 +41,7 @@ struct GenericFrameInfo : public FrameDependencyTemplate { absl::InlinedVector encoder_buffers; std::vector part_of_chain; + std::bitset<32> active_decode_targets = ~uint32_t{0}; }; class GenericFrameInfo::Builder { @@ -54,8 +53,6 @@ class GenericFrameInfo::Builder { Builder& T(int temporal_id); Builder& S(int spatial_id); Builder& Dtis(absl::string_view indication_symbols); - Builder& Fdiffs(std::initializer_list frame_diffs); - Builder& ChainDiffs(std::initializer_list chain_diffs); private: GenericFrameInfo info_; diff --git a/common_video/h264/OWNERS b/common_video/h264/OWNERS new file mode 100644 index 0000000000..361ed7e84a --- /dev/null +++ b/common_video/h264/OWNERS @@ -0,0 +1 @@ +ssilkin@webrtc.org diff --git a/common_video/h264/h264_bitstream_parser.cc b/common_video/h264/h264_bitstream_parser.cc index 5a75f48f88..3b41599fa0 100644 --- a/common_video/h264/h264_bitstream_parser.cc +++ b/common_video/h264/h264_bitstream_parser.cc @@ -28,11 +28,13 @@ const int kMaxQpValue = 51; namespace webrtc { -#define RETURN_ON_FAIL(x, res) \ - if (!(x)) { \ - RTC_LOG_F(LS_ERROR) << "FAILED: " #x; \ - return res; \ - } +#define RETURN_ON_FAIL(x, res) \ + do { \ + if (!(x)) { \ + RTC_LOG_F(LS_ERROR) << "FAILED: " #x; \ + return res; \ + } \ + } while (0) #define RETURN_INV_ON_FAIL(x) RETURN_ON_FAIL(x, kInvalidStream) @@ -62,64 +64,63 @@ H264BitstreamParser::Result H264BitstreamParser::ParseNonParameterSetNalu( uint32_t bits_tmp; // first_mb_in_slice: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); // slice_type: ue(v) uint32_t slice_type; - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&slice_type)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(slice_type)); // slice_type's 5..9 range is used to indicate that all slices of a picture // have the same value of slice_type % 5, we don't care about that, so we map // to the corresponding 0..4 range. slice_type %= 5; // pic_parameter_set_id: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); if (sps_->separate_colour_plane_flag == 1) { // colour_plane_id - RETURN_INV_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 2)); + RETURN_INV_ON_FAIL(slice_reader.ReadBits(2, bits_tmp)); } // frame_num: u(v) // Represented by log2_max_frame_num bits. - RETURN_INV_ON_FAIL( - slice_reader.ReadBits(&bits_tmp, sps_->log2_max_frame_num)); + RETURN_INV_ON_FAIL(slice_reader.ReadBits(sps_->log2_max_frame_num, bits_tmp)); uint32_t field_pic_flag = 0; if (sps_->frame_mbs_only_flag == 0) { // field_pic_flag: u(1) - RETURN_INV_ON_FAIL(slice_reader.ReadBits(&field_pic_flag, 1)); + RETURN_INV_ON_FAIL(slice_reader.ReadBits(1, field_pic_flag)); if (field_pic_flag != 0) { // bottom_field_flag: u(1) - RETURN_INV_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 1)); + RETURN_INV_ON_FAIL(slice_reader.ReadBits(1, bits_tmp)); } } if (is_idr) { // idr_pic_id: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } // pic_order_cnt_lsb: u(v) // Represented by sps_.log2_max_pic_order_cnt_lsb bits. if (sps_->pic_order_cnt_type == 0) { RETURN_INV_ON_FAIL( - slice_reader.ReadBits(&bits_tmp, sps_->log2_max_pic_order_cnt_lsb)); + slice_reader.ReadBits(sps_->log2_max_pic_order_cnt_lsb, bits_tmp)); if (pps_->bottom_field_pic_order_in_frame_present_flag && field_pic_flag == 0) { // delta_pic_order_cnt_bottom: se(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } } if (sps_->pic_order_cnt_type == 1 && !sps_->delta_pic_order_always_zero_flag) { // delta_pic_order_cnt[0]: se(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); if (pps_->bottom_field_pic_order_in_frame_present_flag && !field_pic_flag) { // delta_pic_order_cnt[1]: se(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } } if (pps_->redundant_pic_cnt_present_flag) { // redundant_pic_cnt: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } if (slice_type == H264::SliceType::kB) { // direct_spatial_mv_pred_flag: u(1) - RETURN_INV_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 1)); + RETURN_INV_ON_FAIL(slice_reader.ReadBits(1, bits_tmp)); } switch (slice_type) { case H264::SliceType::kP: @@ -128,13 +129,13 @@ H264BitstreamParser::Result H264BitstreamParser::ParseNonParameterSetNalu( uint32_t num_ref_idx_active_override_flag; // num_ref_idx_active_override_flag: u(1) RETURN_INV_ON_FAIL( - slice_reader.ReadBits(&num_ref_idx_active_override_flag, 1)); + slice_reader.ReadBits(1, num_ref_idx_active_override_flag)); if (num_ref_idx_active_override_flag != 0) { // num_ref_idx_l0_active_minus1: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); if (slice_type == H264::SliceType::kB) { // num_ref_idx_l1_active_minus1: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } } break; @@ -158,20 +159,20 @@ H264BitstreamParser::Result H264BitstreamParser::ParseNonParameterSetNalu( // ref_pic_list_modification_flag_l0: u(1) uint32_t ref_pic_list_modification_flag_l0; RETURN_INV_ON_FAIL( - slice_reader.ReadBits(&ref_pic_list_modification_flag_l0, 1)); + slice_reader.ReadBits(1, ref_pic_list_modification_flag_l0)); if (ref_pic_list_modification_flag_l0) { uint32_t modification_of_pic_nums_idc; do { // modification_of_pic_nums_idc: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb( - &modification_of_pic_nums_idc)); + RETURN_INV_ON_FAIL( + slice_reader.ReadExponentialGolomb(modification_of_pic_nums_idc)); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { // abs_diff_pic_num_minus1: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } else if (modification_of_pic_nums_idc == 2) { // long_term_pic_num: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } } while (modification_of_pic_nums_idc != 3); } @@ -180,20 +181,20 @@ H264BitstreamParser::Result H264BitstreamParser::ParseNonParameterSetNalu( // ref_pic_list_modification_flag_l1: u(1) uint32_t ref_pic_list_modification_flag_l1; RETURN_INV_ON_FAIL( - slice_reader.ReadBits(&ref_pic_list_modification_flag_l1, 1)); + slice_reader.ReadBits(1, ref_pic_list_modification_flag_l1)); if (ref_pic_list_modification_flag_l1) { uint32_t modification_of_pic_nums_idc; do { // modification_of_pic_nums_idc: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb( - &modification_of_pic_nums_idc)); + RETURN_INV_ON_FAIL( + slice_reader.ReadExponentialGolomb(modification_of_pic_nums_idc)); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { // abs_diff_pic_num_minus1: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } else if (modification_of_pic_nums_idc == 2) { // long_term_pic_num: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } } while (modification_of_pic_nums_idc != 3); } @@ -215,35 +216,35 @@ H264BitstreamParser::Result H264BitstreamParser::ParseNonParameterSetNalu( if (is_idr) { // no_output_of_prior_pics_flag: u(1) // long_term_reference_flag: u(1) - RETURN_INV_ON_FAIL(slice_reader.ReadBits(&bits_tmp, 2)); + RETURN_INV_ON_FAIL(slice_reader.ReadBits(2, bits_tmp)); } else { // adaptive_ref_pic_marking_mode_flag: u(1) uint32_t adaptive_ref_pic_marking_mode_flag; RETURN_INV_ON_FAIL( - slice_reader.ReadBits(&adaptive_ref_pic_marking_mode_flag, 1)); + slice_reader.ReadBits(1, adaptive_ref_pic_marking_mode_flag)); if (adaptive_ref_pic_marking_mode_flag) { uint32_t memory_management_control_operation; do { // memory_management_control_operation: ue(v) RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb( - &memory_management_control_operation)); + memory_management_control_operation)); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) { // difference_of_pic_nums_minus1: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } if (memory_management_control_operation == 2) { // long_term_pic_num: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } if (memory_management_control_operation == 3 || memory_management_control_operation == 6) { // long_term_frame_idx: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } if (memory_management_control_operation == 4) { // max_long_term_frame_idx_plus1: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } } while (memory_management_control_operation != 0); } @@ -252,12 +253,12 @@ H264BitstreamParser::Result H264BitstreamParser::ParseNonParameterSetNalu( if (pps_->entropy_coding_mode_flag && slice_type != H264::SliceType::kI && slice_type != H264::SliceType::kSi) { // cabac_init_idc: ue(v) - RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(&golomb_tmp)); + RETURN_INV_ON_FAIL(slice_reader.ReadExponentialGolomb(golomb_tmp)); } int32_t last_slice_qp_delta; RETURN_INV_ON_FAIL( - slice_reader.ReadSignedExponentialGolomb(&last_slice_qp_delta)); + slice_reader.ReadSignedExponentialGolomb(last_slice_qp_delta)); if (abs(last_slice_qp_delta) > kMaxAbsQpDeltaValue) { // Something has gone wrong, and the parsed value is invalid. RTC_LOG(LS_WARNING) << "Parsed QP value out of range."; @@ -275,14 +276,14 @@ void H264BitstreamParser::ParseSlice(const uint8_t* slice, size_t length) { sps_ = SpsParser::ParseSps(slice + H264::kNaluTypeSize, length - H264::kNaluTypeSize); if (!sps_) - RTC_LOG(LS_WARNING) << "Unable to parse SPS from H264 bitstream."; + RTC_DLOG(LS_WARNING) << "Unable to parse SPS from H264 bitstream."; break; } case H264::NaluType::kPps: { pps_ = PpsParser::ParsePps(slice + H264::kNaluTypeSize, length - H264::kNaluTypeSize); if (!pps_) - RTC_LOG(LS_WARNING) << "Unable to parse PPS from H264 bitstream."; + RTC_DLOG(LS_WARNING) << "Unable to parse PPS from H264 bitstream."; break; } case H264::NaluType::kAud: @@ -291,40 +292,29 @@ void H264BitstreamParser::ParseSlice(const uint8_t* slice, size_t length) { default: Result res = ParseNonParameterSetNalu(slice, length, nalu_type); if (res != kOk) - RTC_LOG(LS_INFO) << "Failed to parse bitstream. Error: " << res; + RTC_DLOG(LS_INFO) << "Failed to parse bitstream. Error: " << res; break; } } -void H264BitstreamParser::ParseBitstream(const uint8_t* bitstream, - size_t length) { +void H264BitstreamParser::ParseBitstream( + rtc::ArrayView bitstream) { std::vector nalu_indices = - H264::FindNaluIndices(bitstream, length); + H264::FindNaluIndices(bitstream.data(), bitstream.size()); for (const H264::NaluIndex& index : nalu_indices) - ParseSlice(&bitstream[index.payload_start_offset], index.payload_size); + ParseSlice(bitstream.data() + index.payload_start_offset, + index.payload_size); } -bool H264BitstreamParser::GetLastSliceQp(int* qp) const { +absl::optional H264BitstreamParser::GetLastSliceQp() const { if (!last_slice_qp_delta_ || !pps_) - return false; - const int parsed_qp = 26 + pps_->pic_init_qp_minus26 + *last_slice_qp_delta_; - if (parsed_qp < kMinQpValue || parsed_qp > kMaxQpValue) { + return absl::nullopt; + const int qp = 26 + pps_->pic_init_qp_minus26 + *last_slice_qp_delta_; + if (qp < kMinQpValue || qp > kMaxQpValue) { RTC_LOG(LS_ERROR) << "Parsed invalid QP from bitstream."; - return false; + return absl::nullopt; } - *qp = parsed_qp; - return true; -} - -void H264BitstreamParser::ParseBitstream( - rtc::ArrayView bitstream) { - ParseBitstream(bitstream.data(), bitstream.size()); -} - -absl::optional H264BitstreamParser::GetLastSliceQp() const { - int qp; - bool success = GetLastSliceQp(&qp); - return success ? absl::optional(qp) : absl::nullopt; + return qp; } } // namespace webrtc diff --git a/common_video/h264/h264_bitstream_parser.h b/common_video/h264/h264_bitstream_parser.h index 48190665f0..05427825ac 100644 --- a/common_video/h264/h264_bitstream_parser.h +++ b/common_video/h264/h264_bitstream_parser.h @@ -31,11 +31,6 @@ class H264BitstreamParser : public BitstreamParser { H264BitstreamParser(); ~H264BitstreamParser() override; - // These are here for backwards-compatability for the time being. - void ParseBitstream(const uint8_t* bitstream, size_t length); - bool GetLastSliceQp(int* qp) const; - - // New interface. void ParseBitstream(rtc::ArrayView bitstream) override; absl::optional GetLastSliceQp() const override; diff --git a/common_video/h264/h264_bitstream_parser_unittest.cc b/common_video/h264/h264_bitstream_parser_unittest.cc index 1509d67753..3f4f202af2 100644 --- a/common_video/h264/h264_bitstream_parser_unittest.cc +++ b/common_video/h264/h264_bitstream_parser_unittest.cc @@ -46,43 +46,39 @@ uint8_t kH264BitstreamNextImageSliceChunkCabac[] = { TEST(H264BitstreamParserTest, ReportsNoQpWithoutParsedSlices) { H264BitstreamParser h264_parser; - int qp; - EXPECT_FALSE(h264_parser.GetLastSliceQp(&qp)); + EXPECT_FALSE(h264_parser.GetLastSliceQp().has_value()); } TEST(H264BitstreamParserTest, ReportsNoQpWithOnlyParsedPpsAndSpsSlices) { H264BitstreamParser h264_parser; - h264_parser.ParseBitstream(kH264SpsPps, sizeof(kH264SpsPps)); - int qp; - EXPECT_FALSE(h264_parser.GetLastSliceQp(&qp)); + h264_parser.ParseBitstream(kH264SpsPps); + EXPECT_FALSE(h264_parser.GetLastSliceQp().has_value()); } TEST(H264BitstreamParserTest, ReportsLastSliceQpForImageSlices) { H264BitstreamParser h264_parser; - h264_parser.ParseBitstream(kH264BitstreamChunk, sizeof(kH264BitstreamChunk)); - int qp; - ASSERT_TRUE(h264_parser.GetLastSliceQp(&qp)); - EXPECT_EQ(35, qp); + h264_parser.ParseBitstream(kH264BitstreamChunk); + absl::optional qp = h264_parser.GetLastSliceQp(); + ASSERT_TRUE(qp.has_value()); + EXPECT_EQ(35, *qp); // Parse an additional image slice. - h264_parser.ParseBitstream(kH264BitstreamNextImageSliceChunk, - sizeof(kH264BitstreamNextImageSliceChunk)); - ASSERT_TRUE(h264_parser.GetLastSliceQp(&qp)); - EXPECT_EQ(37, qp); + h264_parser.ParseBitstream(kH264BitstreamNextImageSliceChunk); + qp = h264_parser.GetLastSliceQp(); + ASSERT_TRUE(qp.has_value()); + EXPECT_EQ(37, *qp); } TEST(H264BitstreamParserTest, ReportsLastSliceQpForCABACImageSlices) { H264BitstreamParser h264_parser; - h264_parser.ParseBitstream(kH264BitstreamChunkCabac, - sizeof(kH264BitstreamChunkCabac)); - int qp; - EXPECT_FALSE(h264_parser.GetLastSliceQp(&qp)); + h264_parser.ParseBitstream(kH264BitstreamChunkCabac); + EXPECT_FALSE(h264_parser.GetLastSliceQp().has_value()); // Parse an additional image slice. - h264_parser.ParseBitstream(kH264BitstreamNextImageSliceChunkCabac, - sizeof(kH264BitstreamNextImageSliceChunkCabac)); - ASSERT_TRUE(h264_parser.GetLastSliceQp(&qp)); - EXPECT_EQ(24, qp); + h264_parser.ParseBitstream(kH264BitstreamNextImageSliceChunkCabac); + absl::optional qp = h264_parser.GetLastSliceQp(); + ASSERT_TRUE(qp.has_value()); + EXPECT_EQ(24, *qp); } } // namespace webrtc diff --git a/common_video/h264/pps_parser.cc b/common_video/h264/pps_parser.cc index ae01652189..3d3725f95a 100644 --- a/common_video/h264/pps_parser.cc +++ b/common_video/h264/pps_parser.cc @@ -18,9 +18,11 @@ #include "rtc_base/checks.h" #define RETURN_EMPTY_ON_FAIL(x) \ - if (!(x)) { \ - return absl::nullopt; \ - } + do { \ + if (!(x)) { \ + return absl::nullopt; \ + } \ + } while (0) namespace { const int kMaxPicInitQpDeltaValue = 25; @@ -64,14 +66,14 @@ absl::optional PpsParser::ParsePpsIdFromSlice(const uint8_t* data, uint32_t golomb_tmp; // first_mb_in_slice: ue(v) - if (!slice_reader.ReadExponentialGolomb(&golomb_tmp)) + if (!slice_reader.ReadExponentialGolomb(golomb_tmp)) return absl::nullopt; // slice_type: ue(v) - if (!slice_reader.ReadExponentialGolomb(&golomb_tmp)) + if (!slice_reader.ReadExponentialGolomb(golomb_tmp)) return absl::nullopt; // pic_parameter_set_id: ue(v) uint32_t slice_pps_id; - if (!slice_reader.ReadExponentialGolomb(&slice_pps_id)) + if (!slice_reader.ReadExponentialGolomb(slice_pps_id)) return absl::nullopt; return slice_pps_id; } @@ -86,30 +88,29 @@ absl::optional PpsParser::ParseInternal( uint32_t golomb_ignored; // entropy_coding_mode_flag: u(1) uint32_t entropy_coding_mode_flag; - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(&entropy_coding_mode_flag, 1)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(1, entropy_coding_mode_flag)); pps.entropy_coding_mode_flag = entropy_coding_mode_flag != 0; // bottom_field_pic_order_in_frame_present_flag: u(1) uint32_t bottom_field_pic_order_in_frame_present_flag; RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadBits(&bottom_field_pic_order_in_frame_present_flag, 1)); + bit_buffer->ReadBits(1, bottom_field_pic_order_in_frame_present_flag)); pps.bottom_field_pic_order_in_frame_present_flag = bottom_field_pic_order_in_frame_present_flag != 0; // num_slice_groups_minus1: ue(v) uint32_t num_slice_groups_minus1; RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadExponentialGolomb(&num_slice_groups_minus1)); + bit_buffer->ReadExponentialGolomb(num_slice_groups_minus1)); if (num_slice_groups_minus1 > 0) { uint32_t slice_group_map_type; // slice_group_map_type: ue(v) RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadExponentialGolomb(&slice_group_map_type)); + bit_buffer->ReadExponentialGolomb(slice_group_map_type)); if (slice_group_map_type == 0) { for (uint32_t i_group = 0; i_group <= num_slice_groups_minus1; ++i_group) { // run_length_minus1[iGroup]: ue(v) - RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); } } else if (slice_group_map_type == 1) { // TODO(sprang): Implement support for dispersed slice group map type. @@ -118,23 +119,21 @@ absl::optional PpsParser::ParseInternal( for (uint32_t i_group = 0; i_group <= num_slice_groups_minus1; ++i_group) { // top_left[iGroup]: ue(v) - RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); // bottom_right[iGroup]: ue(v) - RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); } } else if (slice_group_map_type == 3 || slice_group_map_type == 4 || slice_group_map_type == 5) { // slice_group_change_direction_flag: u(1) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(&bits_tmp, 1)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(1, bits_tmp)); // slice_group_change_rate_minus1: ue(v) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); } else if (slice_group_map_type == 6) { // pic_size_in_map_units_minus1: ue(v) uint32_t pic_size_in_map_units_minus1; RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadExponentialGolomb(&pic_size_in_map_units_minus1)); + bit_buffer->ReadExponentialGolomb(pic_size_in_map_units_minus1)); uint32_t slice_group_id_bits = 0; uint32_t num_slice_groups = num_slice_groups_minus1 + 1; // If num_slice_groups is not a power of two an additional bit is required @@ -149,39 +148,39 @@ absl::optional PpsParser::ParseInternal( // slice_group_id[i]: u(v) // Represented by ceil(log2(num_slice_groups_minus1 + 1)) bits. RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadBits(&bits_tmp, slice_group_id_bits)); + bit_buffer->ReadBits(slice_group_id_bits, bits_tmp)); } } } // num_ref_idx_l0_default_active_minus1: ue(v) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); // num_ref_idx_l1_default_active_minus1: ue(v) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); // weighted_pred_flag: u(1) uint32_t weighted_pred_flag; - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(&weighted_pred_flag, 1)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(1, weighted_pred_flag)); pps.weighted_pred_flag = weighted_pred_flag != 0; // weighted_bipred_idc: u(2) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(&pps.weighted_bipred_idc, 2)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(2, pps.weighted_bipred_idc)); // pic_init_qp_minus26: se(v) RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadSignedExponentialGolomb(&pps.pic_init_qp_minus26)); + bit_buffer->ReadSignedExponentialGolomb(pps.pic_init_qp_minus26)); // Sanity-check parsed value if (pps.pic_init_qp_minus26 > kMaxPicInitQpDeltaValue || pps.pic_init_qp_minus26 < kMinPicInitQpDeltaValue) { RETURN_EMPTY_ON_FAIL(false); } // pic_init_qs_minus26: se(v) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); // chroma_qp_index_offset: se(v) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadExponentialGolomb(golomb_ignored)); // deblocking_filter_control_present_flag: u(1) // constrained_intra_pred_flag: u(1) - RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(&bits_tmp, 2)); + RETURN_EMPTY_ON_FAIL(bit_buffer->ReadBits(2, bits_tmp)); // redundant_pic_cnt_present_flag: u(1) RETURN_EMPTY_ON_FAIL( - bit_buffer->ReadBits(&pps.redundant_pic_cnt_present_flag, 1)); + bit_buffer->ReadBits(1, pps.redundant_pic_cnt_present_flag)); return pps; } @@ -189,11 +188,15 @@ absl::optional PpsParser::ParseInternal( bool PpsParser::ParsePpsIdsInternal(rtc::BitBuffer* bit_buffer, uint32_t* pps_id, uint32_t* sps_id) { + if (pps_id == nullptr) + return false; // pic_parameter_set_id: ue(v) - if (!bit_buffer->ReadExponentialGolomb(pps_id)) + if (!bit_buffer->ReadExponentialGolomb(*pps_id)) + return false; + if (sps_id == nullptr) return false; // seq_parameter_set_id: ue(v) - if (!bit_buffer->ReadExponentialGolomb(sps_id)) + if (!bit_buffer->ReadExponentialGolomb(*sps_id)) return false; return true; } diff --git a/common_video/h264/profile_level_id.h b/common_video/h264/profile_level_id.h deleted file mode 100644 index 07b49e57c7..0000000000 --- a/common_video/h264/profile_level_id.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef COMMON_VIDEO_H264_PROFILE_LEVEL_ID_H_ -#define COMMON_VIDEO_H264_PROFILE_LEVEL_ID_H_ - -#include "media/base/h264_profile_level_id.h" - -// TODO(zhihuang): Delete this file once dependent applications switch to -// including "webrtc/media/base/h264_profile_level_id.h" directly. - -#endif // COMMON_VIDEO_H264_PROFILE_LEVEL_ID_H_ diff --git a/common_video/h264/profile_level_id_unittest.cc b/common_video/h264/profile_level_id_unittest.cc deleted file mode 100644 index 957b434a3c..0000000000 --- a/common_video/h264/profile_level_id_unittest.cc +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "common_video/h264/profile_level_id.h" - -#include -#include - -#include "absl/types/optional.h" -#include "media/base/h264_profile_level_id.h" -#include "test/gtest.h" - -namespace webrtc { -namespace H264 { - -TEST(H264ProfileLevelId, TestParsingInvalid) { - // Malformed strings. - EXPECT_FALSE(ParseProfileLevelId("")); - EXPECT_FALSE(ParseProfileLevelId(" 42e01f")); - EXPECT_FALSE(ParseProfileLevelId("4242e01f")); - EXPECT_FALSE(ParseProfileLevelId("e01f")); - EXPECT_FALSE(ParseProfileLevelId("gggggg")); - - // Invalid level. - EXPECT_FALSE(ParseProfileLevelId("42e000")); - EXPECT_FALSE(ParseProfileLevelId("42e00f")); - EXPECT_FALSE(ParseProfileLevelId("42e0ff")); - - // Invalid profile. - EXPECT_FALSE(ParseProfileLevelId("42e11f")); - EXPECT_FALSE(ParseProfileLevelId("58601f")); - EXPECT_FALSE(ParseProfileLevelId("64e01f")); -} - -TEST(H264ProfileLevelId, TestParsingLevel) { - EXPECT_EQ(kLevel3_1, ParseProfileLevelId("42e01f")->level); - EXPECT_EQ(kLevel1_1, ParseProfileLevelId("42e00b")->level); - EXPECT_EQ(kLevel1_b, ParseProfileLevelId("42f00b")->level); - EXPECT_EQ(kLevel4_2, ParseProfileLevelId("42C02A")->level); - EXPECT_EQ(kLevel5_2, ParseProfileLevelId("640c34")->level); -} - -TEST(H264ProfileLevelId, TestParsingConstrainedBaseline) { - EXPECT_EQ(kProfileConstrainedBaseline, - ParseProfileLevelId("42e01f")->profile); - EXPECT_EQ(kProfileConstrainedBaseline, - ParseProfileLevelId("42C02A")->profile); - EXPECT_EQ(kProfileConstrainedBaseline, - ParseProfileLevelId("4de01f")->profile); - EXPECT_EQ(kProfileConstrainedBaseline, - ParseProfileLevelId("58f01f")->profile); -} - -TEST(H264ProfileLevelId, TestParsingBaseline) { - EXPECT_EQ(kProfileBaseline, ParseProfileLevelId("42a01f")->profile); - EXPECT_EQ(kProfileBaseline, ParseProfileLevelId("58A01F")->profile); -} - -TEST(H264ProfileLevelId, TestParsingMain) { - EXPECT_EQ(kProfileMain, ParseProfileLevelId("4D401f")->profile); -} - -TEST(H264ProfileLevelId, TestParsingHigh) { - EXPECT_EQ(kProfileHigh, ParseProfileLevelId("64001f")->profile); -} - -TEST(H264ProfileLevelId, TestParsingConstrainedHigh) { - EXPECT_EQ(kProfileConstrainedHigh, ParseProfileLevelId("640c1f")->profile); -} - -TEST(H264ProfileLevelId, TestSupportedLevel) { - EXPECT_EQ(kLevel2_1, *SupportedLevel(640 * 480, 25)); - EXPECT_EQ(kLevel3_1, *SupportedLevel(1280 * 720, 30)); - EXPECT_EQ(kLevel4_2, *SupportedLevel(1920 * 1280, 60)); -} - -// Test supported level below level 1 requirements. -TEST(H264ProfileLevelId, TestSupportedLevelInvalid) { - EXPECT_FALSE(SupportedLevel(0, 0)); - // All levels support fps > 5. - EXPECT_FALSE(SupportedLevel(1280 * 720, 5)); - // All levels support frame sizes > 183 * 137. - EXPECT_FALSE(SupportedLevel(183 * 137, 30)); -} - -TEST(H264ProfileLevelId, TestToString) { - EXPECT_EQ("42e01f", *ProfileLevelIdToString(ProfileLevelId( - kProfileConstrainedBaseline, kLevel3_1))); - EXPECT_EQ("42000a", - *ProfileLevelIdToString(ProfileLevelId(kProfileBaseline, kLevel1))); - EXPECT_EQ("4d001f", - ProfileLevelIdToString(ProfileLevelId(kProfileMain, kLevel3_1))); - EXPECT_EQ("640c2a", *ProfileLevelIdToString( - ProfileLevelId(kProfileConstrainedHigh, kLevel4_2))); - EXPECT_EQ("64002a", - *ProfileLevelIdToString(ProfileLevelId(kProfileHigh, kLevel4_2))); -} - -TEST(H264ProfileLevelId, TestToStringLevel1b) { - EXPECT_EQ("42f00b", *ProfileLevelIdToString(ProfileLevelId( - kProfileConstrainedBaseline, kLevel1_b))); - EXPECT_EQ("42100b", *ProfileLevelIdToString( - ProfileLevelId(kProfileBaseline, kLevel1_b))); - EXPECT_EQ("4d100b", - *ProfileLevelIdToString(ProfileLevelId(kProfileMain, kLevel1_b))); -} - -TEST(H264ProfileLevelId, TestToStringRoundTrip) { - EXPECT_EQ("42e01f", *ProfileLevelIdToString(*ParseProfileLevelId("42e01f"))); - EXPECT_EQ("42e01f", *ProfileLevelIdToString(*ParseProfileLevelId("42E01F"))); - EXPECT_EQ("4d100b", *ProfileLevelIdToString(*ParseProfileLevelId("4d100b"))); - EXPECT_EQ("4d100b", *ProfileLevelIdToString(*ParseProfileLevelId("4D100B"))); - EXPECT_EQ("640c2a", *ProfileLevelIdToString(*ParseProfileLevelId("640c2a"))); - EXPECT_EQ("640c2a", *ProfileLevelIdToString(*ParseProfileLevelId("640C2A"))); -} - -TEST(H264ProfileLevelId, TestToStringInvalid) { - EXPECT_FALSE(ProfileLevelIdToString(ProfileLevelId(kProfileHigh, kLevel1_b))); - EXPECT_FALSE(ProfileLevelIdToString( - ProfileLevelId(kProfileConstrainedHigh, kLevel1_b))); - EXPECT_FALSE(ProfileLevelIdToString( - ProfileLevelId(static_cast(255), kLevel3_1))); -} - -TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdEmpty) { - const absl::optional profile_level_id = - ParseSdpProfileLevelId(CodecParameterMap()); - EXPECT_TRUE(profile_level_id); - EXPECT_EQ(kProfileConstrainedBaseline, profile_level_id->profile); - EXPECT_EQ(kLevel3_1, profile_level_id->level); -} - -TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdConstrainedHigh) { - CodecParameterMap params; - params["profile-level-id"] = "640c2a"; - const absl::optional profile_level_id = - ParseSdpProfileLevelId(params); - EXPECT_TRUE(profile_level_id); - EXPECT_EQ(kProfileConstrainedHigh, profile_level_id->profile); - EXPECT_EQ(kLevel4_2, profile_level_id->level); -} - -TEST(H264ProfileLevelId, TestParseSdpProfileLevelIdInvalid) { - CodecParameterMap params; - params["profile-level-id"] = "foobar"; - EXPECT_FALSE(ParseSdpProfileLevelId(params)); -} - -TEST(H264ProfileLevelId, TestGenerateProfileLevelIdForAnswerEmpty) { - CodecParameterMap answer_params; - GenerateProfileLevelIdForAnswer(CodecParameterMap(), CodecParameterMap(), - &answer_params); - EXPECT_TRUE(answer_params.empty()); -} - -TEST(H264ProfileLevelId, - TestGenerateProfileLevelIdForAnswerLevelSymmetryCapped) { - CodecParameterMap low_level; - low_level["profile-level-id"] = "42e015"; - CodecParameterMap high_level; - high_level["profile-level-id"] = "42e01f"; - - // Level asymmetry is not allowed; test that answer level is the lower of the - // local and remote levels. - CodecParameterMap answer_params; - GenerateProfileLevelIdForAnswer(low_level /* local_supported */, - high_level /* remote_offered */, - &answer_params); - EXPECT_EQ("42e015", answer_params["profile-level-id"]); - - CodecParameterMap answer_params2; - GenerateProfileLevelIdForAnswer(high_level /* local_supported */, - low_level /* remote_offered */, - &answer_params2); - EXPECT_EQ("42e015", answer_params2["profile-level-id"]); -} - -TEST(H264ProfileLevelId, - TestGenerateProfileLevelIdForAnswerConstrainedBaselineLevelAsymmetry) { - CodecParameterMap local_params; - local_params["profile-level-id"] = "42e01f"; - local_params["level-asymmetry-allowed"] = "1"; - CodecParameterMap remote_params; - remote_params["profile-level-id"] = "42e015"; - remote_params["level-asymmetry-allowed"] = "1"; - CodecParameterMap answer_params; - GenerateProfileLevelIdForAnswer(local_params, remote_params, &answer_params); - // When level asymmetry is allowed, we can answer a higher level than what was - // offered. - EXPECT_EQ("42e01f", answer_params["profile-level-id"]); -} - -} // namespace H264 -} // namespace webrtc diff --git a/common_video/h264/sps_parser.cc b/common_video/h264/sps_parser.cc index 3d78184e7a..f505928f29 100644 --- a/common_video/h264/sps_parser.cc +++ b/common_video/h264/sps_parser.cc @@ -71,14 +71,14 @@ absl::optional SpsParser::ParseSpsUpToVui( // profile_idc: u(8). We need it to determine if we need to read/skip chroma // formats. uint8_t profile_idc; - RETURN_EMPTY_ON_FAIL(buffer->ReadUInt8(&profile_idc)); + RETURN_EMPTY_ON_FAIL(buffer->ReadUInt8(profile_idc)); // constraint_set0_flag through constraint_set5_flag + reserved_zero_2bits // 1 bit each for the flags + 2 bits = 8 bits = 1 byte. RETURN_EMPTY_ON_FAIL(buffer->ConsumeBytes(1)); // level_idc: u(8) RETURN_EMPTY_ON_FAIL(buffer->ConsumeBytes(1)); // seq_parameter_set_id: ue(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&sps.id)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(sps.id)); sps.separate_colour_plane_flag = 0; // See if profile_idc has chroma format information. if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 || @@ -86,21 +86,20 @@ absl::optional SpsParser::ParseSpsUpToVui( profile_idc == 86 || profile_idc == 118 || profile_idc == 128 || profile_idc == 138 || profile_idc == 139 || profile_idc == 134) { // chroma_format_idc: ue(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&chroma_format_idc)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(chroma_format_idc)); if (chroma_format_idc == 3) { // separate_colour_plane_flag: u(1) - RETURN_EMPTY_ON_FAIL( - buffer->ReadBits(&sps.separate_colour_plane_flag, 1)); + RETURN_EMPTY_ON_FAIL(buffer->ReadBits(1, sps.separate_colour_plane_flag)); } // bit_depth_luma_minus8: ue(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(golomb_ignored)); // bit_depth_chroma_minus8: ue(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(golomb_ignored)); // qpprime_y_zero_transform_bypass_flag: u(1) RETURN_EMPTY_ON_FAIL(buffer->ConsumeBits(1)); // seq_scaling_matrix_present_flag: u(1) uint32_t seq_scaling_matrix_present_flag; - RETURN_EMPTY_ON_FAIL(buffer->ReadBits(&seq_scaling_matrix_present_flag, 1)); + RETURN_EMPTY_ON_FAIL(buffer->ReadBits(1, seq_scaling_matrix_present_flag)); if (seq_scaling_matrix_present_flag) { // Process the scaling lists just enough to be able to properly // skip over them, so we can still read the resolution on streams @@ -110,7 +109,7 @@ absl::optional SpsParser::ParseSpsUpToVui( // seq_scaling_list_present_flag[i] : u(1) uint32_t seq_scaling_list_present_flags; RETURN_EMPTY_ON_FAIL( - buffer->ReadBits(&seq_scaling_list_present_flags, 1)); + buffer->ReadBits(1, seq_scaling_list_present_flags)); if (seq_scaling_list_present_flags != 0) { int last_scale = 8; int next_scale = 8; @@ -120,7 +119,7 @@ absl::optional SpsParser::ParseSpsUpToVui( int32_t delta_scale; // delta_scale: se(v) RETURN_EMPTY_ON_FAIL( - buffer->ReadSignedExponentialGolomb(&delta_scale)); + buffer->ReadSignedExponentialGolomb(delta_scale)); RETURN_EMPTY_ON_FAIL(delta_scale >= kScalingDeltaMin && delta_scale <= kScaldingDeltaMax); next_scale = (last_scale + delta_scale + 256) % 256; @@ -140,18 +139,18 @@ absl::optional SpsParser::ParseSpsUpToVui( // log2_max_frame_num_minus4: ue(v) uint32_t log2_max_frame_num_minus4; - if (!buffer->ReadExponentialGolomb(&log2_max_frame_num_minus4) || + if (!buffer->ReadExponentialGolomb(log2_max_frame_num_minus4) || log2_max_frame_num_minus4 > kMaxLog2Minus4) { return OptionalSps(); } sps.log2_max_frame_num = log2_max_frame_num_minus4 + 4; // pic_order_cnt_type: ue(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&sps.pic_order_cnt_type)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(sps.pic_order_cnt_type)); if (sps.pic_order_cnt_type == 0) { // log2_max_pic_order_cnt_lsb_minus4: ue(v) uint32_t log2_max_pic_order_cnt_lsb_minus4; - if (!buffer->ReadExponentialGolomb(&log2_max_pic_order_cnt_lsb_minus4) || + if (!buffer->ReadExponentialGolomb(log2_max_pic_order_cnt_lsb_minus4) || log2_max_pic_order_cnt_lsb_minus4 > kMaxLog2Minus4) { return OptionalSps(); } @@ -159,22 +158,22 @@ absl::optional SpsParser::ParseSpsUpToVui( } else if (sps.pic_order_cnt_type == 1) { // delta_pic_order_always_zero_flag: u(1) RETURN_EMPTY_ON_FAIL( - buffer->ReadBits(&sps.delta_pic_order_always_zero_flag, 1)); + buffer->ReadBits(1, sps.delta_pic_order_always_zero_flag)); // offset_for_non_ref_pic: se(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(golomb_ignored)); // offset_for_top_to_bottom_field: se(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(golomb_ignored)); // num_ref_frames_in_pic_order_cnt_cycle: ue(v) uint32_t num_ref_frames_in_pic_order_cnt_cycle; RETURN_EMPTY_ON_FAIL( - buffer->ReadExponentialGolomb(&num_ref_frames_in_pic_order_cnt_cycle)); + buffer->ReadExponentialGolomb(num_ref_frames_in_pic_order_cnt_cycle)); for (size_t i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; ++i) { // offset_for_ref_frame[i]: se(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&golomb_ignored)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(golomb_ignored)); } } // max_num_ref_frames: ue(v) - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&sps.max_num_ref_frames)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(sps.max_num_ref_frames)); // gaps_in_frame_num_value_allowed_flag: u(1) RETURN_EMPTY_ON_FAIL(buffer->ConsumeBits(1)); // @@ -185,13 +184,13 @@ absl::optional SpsParser::ParseSpsUpToVui( // // pic_width_in_mbs_minus1: ue(v) uint32_t pic_width_in_mbs_minus1; - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&pic_width_in_mbs_minus1)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(pic_width_in_mbs_minus1)); // pic_height_in_map_units_minus1: ue(v) uint32_t pic_height_in_map_units_minus1; RETURN_EMPTY_ON_FAIL( - buffer->ReadExponentialGolomb(&pic_height_in_map_units_minus1)); + buffer->ReadExponentialGolomb(pic_height_in_map_units_minus1)); // frame_mbs_only_flag: u(1) - RETURN_EMPTY_ON_FAIL(buffer->ReadBits(&sps.frame_mbs_only_flag, 1)); + RETURN_EMPTY_ON_FAIL(buffer->ReadBits(1, sps.frame_mbs_only_flag)); if (!sps.frame_mbs_only_flag) { // mb_adaptive_frame_field_flag: u(1) RETURN_EMPTY_ON_FAIL(buffer->ConsumeBits(1)); @@ -207,19 +206,18 @@ absl::optional SpsParser::ParseSpsUpToVui( uint32_t frame_crop_right_offset = 0; uint32_t frame_crop_top_offset = 0; uint32_t frame_crop_bottom_offset = 0; - RETURN_EMPTY_ON_FAIL(buffer->ReadBits(&frame_cropping_flag, 1)); + RETURN_EMPTY_ON_FAIL(buffer->ReadBits(1, frame_cropping_flag)); if (frame_cropping_flag) { // frame_crop_{left, right, top, bottom}_offset: ue(v) + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(frame_crop_left_offset)); RETURN_EMPTY_ON_FAIL( - buffer->ReadExponentialGolomb(&frame_crop_left_offset)); + buffer->ReadExponentialGolomb(frame_crop_right_offset)); + RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(frame_crop_top_offset)); RETURN_EMPTY_ON_FAIL( - buffer->ReadExponentialGolomb(&frame_crop_right_offset)); - RETURN_EMPTY_ON_FAIL(buffer->ReadExponentialGolomb(&frame_crop_top_offset)); - RETURN_EMPTY_ON_FAIL( - buffer->ReadExponentialGolomb(&frame_crop_bottom_offset)); + buffer->ReadExponentialGolomb(frame_crop_bottom_offset)); } // vui_parameters_present_flag: u(1) - RETURN_EMPTY_ON_FAIL(buffer->ReadBits(&sps.vui_params_present, 1)); + RETURN_EMPTY_ON_FAIL(buffer->ReadBits(1, sps.vui_params_present)); // Far enough! We don't use the rest of the SPS. diff --git a/common_video/h264/sps_vui_rewriter.cc b/common_video/h264/sps_vui_rewriter.cc index 1c420a9e4b..856b012b32 100644 --- a/common_video/h264/sps_vui_rewriter.cc +++ b/common_video/h264/sps_vui_rewriter.cc @@ -45,29 +45,31 @@ enum SpsValidEvent { kSpsRewrittenMax = 8 }; -#define RETURN_FALSE_ON_FAIL(x) \ - if (!(x)) { \ - RTC_LOG_F(LS_ERROR) << " (line:" << __LINE__ << ") FAILED: " #x; \ - return false; \ - } +#define RETURN_FALSE_ON_FAIL(x) \ + do { \ + if (!(x)) { \ + RTC_LOG_F(LS_ERROR) << " (line:" << __LINE__ << ") FAILED: " #x; \ + return false; \ + } \ + } while (0) #define COPY_UINT8(src, dest, tmp) \ do { \ - RETURN_FALSE_ON_FAIL((src)->ReadUInt8(&tmp)); \ + RETURN_FALSE_ON_FAIL((src)->ReadUInt8(tmp)); \ if (dest) \ RETURN_FALSE_ON_FAIL((dest)->WriteUInt8(tmp)); \ } while (0) #define COPY_EXP_GOLOMB(src, dest, tmp) \ do { \ - RETURN_FALSE_ON_FAIL((src)->ReadExponentialGolomb(&tmp)); \ + RETURN_FALSE_ON_FAIL((src)->ReadExponentialGolomb(tmp)); \ if (dest) \ RETURN_FALSE_ON_FAIL((dest)->WriteExponentialGolomb(tmp)); \ } while (0) #define COPY_BITS(src, dest, tmp, bits) \ do { \ - RETURN_FALSE_ON_FAIL((src)->ReadBits(&tmp, bits)); \ + RETURN_FALSE_ON_FAIL((src)->ReadBits(bits, tmp)); \ if (dest) \ RETURN_FALSE_ON_FAIL((dest)->WriteBits(tmp, bits)); \ } while (0) @@ -210,32 +212,23 @@ SpsVuiRewriter::ParseResult SpsVuiRewriter::ParseAndRewriteSps( return result; } -void SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps( +rtc::Buffer SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite( rtc::ArrayView buffer, - size_t num_nalus, - const size_t* nalu_offsets, - const size_t* nalu_lengths, - const webrtc::ColorSpace* color_space, - rtc::Buffer* output_buffer, - size_t* output_nalu_offsets, - size_t* output_nalu_lengths) { - // Allocate some extra space for potentially adding a missing VUI. - output_buffer->EnsureCapacity(buffer.size() + num_nalus * kMaxVuiSpsIncrease); - - const uint8_t* prev_nalu_ptr = buffer.data(); - size_t prev_nalu_length = 0; + const webrtc::ColorSpace* color_space) { + std::vector nalus = + H264::FindNaluIndices(buffer.data(), buffer.size()); - for (size_t i = 0; i < num_nalus; ++i) { - const uint8_t* nalu_ptr = buffer.data() + nalu_offsets[i]; - const size_t nalu_length = nalu_lengths[i]; + // Allocate some extra space for potentially adding a missing VUI. + rtc::Buffer output_buffer(/*size=*/0, /*capacity=*/buffer.size() + + nalus.size() * kMaxVuiSpsIncrease); + for (const H264::NaluIndex& nalu : nalus) { // Copy NAL unit start code. - const uint8_t* start_code_ptr = prev_nalu_ptr + prev_nalu_length; + const uint8_t* start_code_ptr = buffer.data() + nalu.start_offset; const size_t start_code_length = - (nalu_ptr - prev_nalu_ptr) - prev_nalu_length; - output_buffer->AppendData(start_code_ptr, start_code_length); - - bool updated_sps = false; + nalu.payload_start_offset - nalu.start_offset; + const uint8_t* nalu_ptr = buffer.data() + nalu.payload_start_offset; + const size_t nalu_length = nalu.payload_size; if (H264::ParseNaluType(nalu_ptr[0]) == H264::NaluType::kSps) { // Check if stream uses picture order count type 0, and if so rewrite it @@ -260,22 +253,20 @@ void SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps( nalu_ptr + H264::kNaluTypeSize, nalu_length - H264::kNaluTypeSize, &sps, color_space, &output_nalu, Direction::kOutgoing); if (result == ParseResult::kVuiRewritten) { - updated_sps = true; - output_nalu_offsets[i] = output_buffer->size(); - output_nalu_lengths[i] = output_nalu.size(); - output_buffer->AppendData(output_nalu.data(), output_nalu.size()); + output_buffer.AppendData(start_code_ptr, start_code_length); + output_buffer.AppendData(output_nalu.data(), output_nalu.size()); + continue; } + } else if (H264::ParseNaluType(nalu_ptr[0]) == H264::NaluType::kAud) { + // Skip the access unit delimiter copy. + continue; } - if (!updated_sps) { - output_nalu_offsets[i] = output_buffer->size(); - output_nalu_lengths[i] = nalu_length; - output_buffer->AppendData(nalu_ptr, nalu_length); - } - - prev_nalu_ptr = nalu_ptr; - prev_nalu_length = nalu_length; + // vui wasn't rewritten and it is not aud, copy the nal unit as is. + output_buffer.AppendData(start_code_ptr, start_code_length); + output_buffer.AppendData(nalu_ptr, nalu_length); } + return output_buffer; } namespace { @@ -380,7 +371,7 @@ bool CopyAndRewriteVui(const SpsParser::SpsState& sps, // bitstream_restriction_flag: u(1) uint32_t bitstream_restriction_flag; - RETURN_FALSE_ON_FAIL(source->ReadBits(&bitstream_restriction_flag, 1)); + RETURN_FALSE_ON_FAIL(source->ReadBits(1, bitstream_restriction_flag)); RETURN_FALSE_ON_FAIL(destination->WriteBits(1, 1)); if (bitstream_restriction_flag == 0) { // We're adding one from scratch. @@ -407,9 +398,9 @@ bool CopyAndRewriteVui(const SpsParser::SpsState& sps, // want, then we don't need to be rewriting. uint32_t max_num_reorder_frames, max_dec_frame_buffering; RETURN_FALSE_ON_FAIL( - source->ReadExponentialGolomb(&max_num_reorder_frames)); + source->ReadExponentialGolomb(max_num_reorder_frames)); RETURN_FALSE_ON_FAIL( - source->ReadExponentialGolomb(&max_dec_frame_buffering)); + source->ReadExponentialGolomb(max_dec_frame_buffering)); RETURN_FALSE_ON_FAIL(destination->WriteExponentialGolomb(0)); RETURN_FALSE_ON_FAIL( destination->WriteExponentialGolomb(sps.max_num_ref_frames)); @@ -522,15 +513,15 @@ bool CopyOrRewriteVideoSignalTypeInfo( uint8_t colour_primaries = 3; // H264 default: unspecified uint8_t transfer_characteristics = 3; // H264 default: unspecified uint8_t matrix_coefficients = 3; // H264 default: unspecified - RETURN_FALSE_ON_FAIL(source->ReadBits(&video_signal_type_present_flag, 1)); + RETURN_FALSE_ON_FAIL(source->ReadBits(1, video_signal_type_present_flag)); if (video_signal_type_present_flag) { - RETURN_FALSE_ON_FAIL(source->ReadBits(&video_format, 3)); - RETURN_FALSE_ON_FAIL(source->ReadBits(&video_full_range_flag, 1)); - RETURN_FALSE_ON_FAIL(source->ReadBits(&colour_description_present_flag, 1)); + RETURN_FALSE_ON_FAIL(source->ReadBits(3, video_format)); + RETURN_FALSE_ON_FAIL(source->ReadBits(1, video_full_range_flag)); + RETURN_FALSE_ON_FAIL(source->ReadBits(1, colour_description_present_flag)); if (colour_description_present_flag) { - RETURN_FALSE_ON_FAIL(source->ReadUInt8(&colour_primaries)); - RETURN_FALSE_ON_FAIL(source->ReadUInt8(&transfer_characteristics)); - RETURN_FALSE_ON_FAIL(source->ReadUInt8(&matrix_coefficients)); + RETURN_FALSE_ON_FAIL(source->ReadUInt8(colour_primaries)); + RETURN_FALSE_ON_FAIL(source->ReadUInt8(transfer_characteristics)); + RETURN_FALSE_ON_FAIL(source->ReadUInt8(matrix_coefficients)); } } diff --git a/common_video/h264/sps_vui_rewriter.h b/common_video/h264/sps_vui_rewriter.h index 4cd4cb976d..311db30d50 100644 --- a/common_video/h264/sps_vui_rewriter.h +++ b/common_video/h264/sps_vui_rewriter.h @@ -50,20 +50,11 @@ class SpsVuiRewriter : private SpsParser { rtc::Buffer* destination, Direction Direction); - // Parses NAL units from |buffer| based on |nalu_offsets| and |nalu_lengths| - // and rewrites VUI in SPS blocks if necessary. - // The result is written to |output_buffer| and modified NAL unit offsets - // and lenghts are written to |output_nalu_offsets| and |output_nalu_lenghts| - // to account for any added data. - static void ParseOutgoingBitstreamAndRewriteSps( + // Parses NAL units from |buffer|, strips AUD blocks and rewrites VUI in SPS + // blocks if necessary. + static rtc::Buffer ParseOutgoingBitstreamAndRewrite( rtc::ArrayView buffer, - size_t num_nalus, - const size_t* nalu_offsets, - const size_t* nalu_lengths, - const ColorSpace* color_space, - rtc::Buffer* output_buffer, - size_t* output_nalu_offsets, - size_t* output_nalu_lengths); + const ColorSpace* color_space); private: static ParseResult ParseAndRewriteSps( diff --git a/common_video/h264/sps_vui_rewriter_unittest.cc b/common_video/h264/sps_vui_rewriter_unittest.cc index e008948ce1..2907949e6c 100644 --- a/common_video/h264/sps_vui_rewriter_unittest.cc +++ b/common_video/h264/sps_vui_rewriter_unittest.cc @@ -36,6 +36,7 @@ static const size_t kWidth = 640; static const size_t kHeight = 480; static const uint8_t kStartSequence[] = {0x00, 0x00, 0x00, 0x01}; +static const uint8_t kAud[] = {H264::NaluType::kAud, 0x09, 0x10}; static const uint8_t kSpsNaluType[] = {H264::NaluType::kSps}; static const uint8_t kIdr1[] = {H264::NaluType::kIdr, 0xFF, 0x00, 0x00, 0x04}; static const uint8_t kIdr2[] = {H264::NaluType::kIdr, 0xFF, 0x00, 0x11}; @@ -396,36 +397,13 @@ TEST(SpsVuiRewriterOutgoingVuiTest, ParseOutgoingBitstreamOptimalVui) { GenerateFakeSps(kVuiNoFrameBuffering, &optimal_sps); rtc::Buffer buffer; - const size_t kNumNalus = 2; - size_t nalu_offsets[kNumNalus]; - size_t nalu_lengths[kNumNalus]; buffer.AppendData(kStartSequence); - nalu_offsets[0] = buffer.size(); - nalu_lengths[0] = optimal_sps.size(); buffer.AppendData(optimal_sps); buffer.AppendData(kStartSequence); - nalu_offsets[1] = buffer.size(); - nalu_lengths[1] = sizeof(kIdr1); buffer.AppendData(kIdr1); - rtc::Buffer modified_buffer; - size_t modified_nalu_offsets[kNumNalus]; - size_t modified_nalu_lengths[kNumNalus]; - - SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps( - buffer, kNumNalus, nalu_offsets, nalu_lengths, nullptr, &modified_buffer, - modified_nalu_offsets, modified_nalu_lengths); - - EXPECT_THAT( - std::vector(modified_buffer.data(), - modified_buffer.data() + modified_buffer.size()), - ::testing::ElementsAreArray(buffer.data(), buffer.size())); - EXPECT_THAT(std::vector(modified_nalu_offsets, - modified_nalu_offsets + kNumNalus), - ::testing::ElementsAreArray(nalu_offsets, kNumNalus)); - EXPECT_THAT(std::vector(modified_nalu_lengths, - modified_nalu_lengths + kNumNalus), - ::testing::ElementsAreArray(nalu_lengths, kNumNalus)); + EXPECT_THAT(SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite(buffer, nullptr), + ::testing::ElementsAreArray(buffer)); } TEST(SpsVuiRewriterOutgoingVuiTest, ParseOutgoingBitstreamNoVui) { @@ -435,61 +413,51 @@ TEST(SpsVuiRewriterOutgoingVuiTest, ParseOutgoingBitstreamNoVui) { GenerateFakeSps(kVuiNotPresent, &sps); rtc::Buffer buffer; - const size_t kNumNalus = 3; - size_t nalu_offsets[kNumNalus]; - size_t nalu_lengths[kNumNalus]; buffer.AppendData(kStartSequence); - nalu_offsets[0] = buffer.size(); - nalu_lengths[0] = sizeof(kIdr1); buffer.AppendData(kIdr1); buffer.AppendData(kStartSequence); - nalu_offsets[1] = buffer.size(); - nalu_lengths[1] = sizeof(kSpsNaluType) + sps.size(); buffer.AppendData(kSpsNaluType); buffer.AppendData(sps); buffer.AppendData(kStartSequence); - nalu_offsets[2] = buffer.size(); - nalu_lengths[2] = sizeof(kIdr2); buffer.AppendData(kIdr2); rtc::Buffer optimal_sps; GenerateFakeSps(kVuiNoFrameBuffering, &optimal_sps); rtc::Buffer expected_buffer; - size_t expected_nalu_offsets[kNumNalus]; - size_t expected_nalu_lengths[kNumNalus]; expected_buffer.AppendData(kStartSequence); - expected_nalu_offsets[0] = expected_buffer.size(); - expected_nalu_lengths[0] = sizeof(kIdr1); expected_buffer.AppendData(kIdr1); expected_buffer.AppendData(kStartSequence); - expected_nalu_offsets[1] = expected_buffer.size(); - expected_nalu_lengths[1] = sizeof(kSpsNaluType) + optimal_sps.size(); expected_buffer.AppendData(kSpsNaluType); expected_buffer.AppendData(optimal_sps); expected_buffer.AppendData(kStartSequence); - expected_nalu_offsets[2] = expected_buffer.size(); - expected_nalu_lengths[2] = sizeof(kIdr2); expected_buffer.AppendData(kIdr2); - rtc::Buffer modified_buffer; - size_t modified_nalu_offsets[kNumNalus]; - size_t modified_nalu_lengths[kNumNalus]; - - SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps( - buffer, kNumNalus, nalu_offsets, nalu_lengths, nullptr, &modified_buffer, - modified_nalu_offsets, modified_nalu_lengths); - - EXPECT_THAT( - std::vector(modified_buffer.data(), - modified_buffer.data() + modified_buffer.size()), - ::testing::ElementsAreArray(expected_buffer.data(), - expected_buffer.size())); - EXPECT_THAT(std::vector(modified_nalu_offsets, - modified_nalu_offsets + kNumNalus), - ::testing::ElementsAreArray(expected_nalu_offsets, kNumNalus)); - EXPECT_THAT(std::vector(modified_nalu_lengths, - modified_nalu_lengths + kNumNalus), - ::testing::ElementsAreArray(expected_nalu_lengths, kNumNalus)); + EXPECT_THAT(SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite(buffer, nullptr), + ::testing::ElementsAreArray(expected_buffer)); +} + +TEST(SpsVuiRewriterOutgoingAudTest, ParseOutgoingBitstreamWithAud) { + rtc::LogMessage::LogToDebug(rtc::LS_VERBOSE); + + rtc::Buffer optimal_sps; + GenerateFakeSps(kVuiNoFrameBuffering, &optimal_sps); + + rtc::Buffer buffer; + buffer.AppendData(kStartSequence); + buffer.AppendData(kAud); + buffer.AppendData(kStartSequence); + buffer.AppendData(optimal_sps); + buffer.AppendData(kStartSequence); + buffer.AppendData(kIdr1); + + rtc::Buffer expected_buffer; + expected_buffer.AppendData(kStartSequence); + expected_buffer.AppendData(optimal_sps); + expected_buffer.AppendData(kStartSequence); + expected_buffer.AppendData(kIdr1); + + EXPECT_THAT(SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite(buffer, nullptr), + ::testing::ElementsAreArray(expected_buffer)); } } // namespace webrtc diff --git a/common_video/i420_buffer_pool.cc b/common_video/i420_buffer_pool.cc deleted file mode 100644 index d13da6a172..0000000000 --- a/common_video/i420_buffer_pool.cc +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "common_video/include/i420_buffer_pool.h" - -#include - -#include "rtc_base/checks.h" - -namespace webrtc { - -I420BufferPool::I420BufferPool() : I420BufferPool(false) {} -I420BufferPool::I420BufferPool(bool zero_initialize) - : I420BufferPool(zero_initialize, std::numeric_limits::max()) {} -I420BufferPool::I420BufferPool(bool zero_initialize, - size_t max_number_of_buffers) - : zero_initialize_(zero_initialize), - max_number_of_buffers_(max_number_of_buffers) {} -I420BufferPool::~I420BufferPool() = default; - -void I420BufferPool::Release() { - buffers_.clear(); -} - -bool I420BufferPool::Resize(size_t max_number_of_buffers) { - RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); - size_t used_buffers_count = 0; - for (const rtc::scoped_refptr& buffer : buffers_) { - // If the buffer is in use, the ref count will be >= 2, one from the list we - // are looping over and one from the application. If the ref count is 1, - // then the list we are looping over holds the only reference and it's safe - // to reuse. - if (!buffer->HasOneRef()) { - used_buffers_count++; - } - } - if (used_buffers_count > max_number_of_buffers) { - return false; - } - max_number_of_buffers_ = max_number_of_buffers; - - size_t buffers_to_purge = buffers_.size() - max_number_of_buffers_; - auto iter = buffers_.begin(); - while (iter != buffers_.end() && buffers_to_purge > 0) { - if ((*iter)->HasOneRef()) { - iter = buffers_.erase(iter); - buffers_to_purge--; - } else { - ++iter; - } - } - return true; -} - -rtc::scoped_refptr I420BufferPool::CreateBuffer(int width, - int height) { - // Default stride_y is width, default uv stride is width / 2 (rounding up). - return CreateBuffer(width, height, width, (width + 1) / 2, (width + 1) / 2); -} - -rtc::scoped_refptr I420BufferPool::CreateBuffer(int width, - int height, - int stride_y, - int stride_u, - int stride_v) { - RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); - // Release buffers with wrong resolution. - for (auto it = buffers_.begin(); it != buffers_.end();) { - const auto& buffer = *it; - if (buffer->width() != width || buffer->height() != height || - buffer->StrideY() != stride_y || buffer->StrideU() != stride_u || - buffer->StrideV() != stride_v) { - it = buffers_.erase(it); - } else { - ++it; - } - } - // Look for a free buffer. - for (const rtc::scoped_refptr& buffer : buffers_) { - // If the buffer is in use, the ref count will be >= 2, one from the list we - // are looping over and one from the application. If the ref count is 1, - // then the list we are looping over holds the only reference and it's safe - // to reuse. - if (buffer->HasOneRef()) - return buffer; - } - - if (buffers_.size() >= max_number_of_buffers_) - return nullptr; - // Allocate new buffer. - rtc::scoped_refptr buffer = - new PooledI420Buffer(width, height, stride_y, stride_u, stride_v); - if (zero_initialize_) - buffer->InitializeData(); - buffers_.push_back(buffer); - return buffer; -} - -} // namespace webrtc diff --git a/common_video/i420_buffer_pool_unittest.cc b/common_video/i420_buffer_pool_unittest.cc deleted file mode 100644 index 27503e5b8a..0000000000 --- a/common_video/i420_buffer_pool_unittest.cc +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "common_video/include/i420_buffer_pool.h" - -#include -#include - -#include "api/scoped_refptr.h" -#include "api/video/i420_buffer.h" -#include "api/video/video_frame_buffer.h" -#include "test/gtest.h" - -namespace webrtc { - -TEST(TestI420BufferPool, SimpleFrameReuse) { - I420BufferPool pool; - auto buffer = pool.CreateBuffer(16, 16); - EXPECT_EQ(16, buffer->width()); - EXPECT_EQ(16, buffer->height()); - // Extract non-refcounted pointers for testing. - const uint8_t* y_ptr = buffer->DataY(); - const uint8_t* u_ptr = buffer->DataU(); - const uint8_t* v_ptr = buffer->DataV(); - // Release buffer so that it is returned to the pool. - buffer = nullptr; - // Check that the memory is resued. - buffer = pool.CreateBuffer(16, 16); - EXPECT_EQ(y_ptr, buffer->DataY()); - EXPECT_EQ(u_ptr, buffer->DataU()); - EXPECT_EQ(v_ptr, buffer->DataV()); -} - -TEST(TestI420BufferPool, FrameReuseWithDefaultThenExplicitStride) { - I420BufferPool pool; - auto buffer = pool.CreateBuffer(15, 16); - EXPECT_EQ(15, buffer->width()); - EXPECT_EQ(16, buffer->height()); - // The default Y stride is width and UV stride is halfwidth (rounded up). - ASSERT_EQ(15, buffer->StrideY()); - ASSERT_EQ(8, buffer->StrideU()); - ASSERT_EQ(8, buffer->StrideV()); - // Extract non-refcounted pointers for testing. - const uint8_t* y_ptr = buffer->DataY(); - const uint8_t* u_ptr = buffer->DataU(); - const uint8_t* v_ptr = buffer->DataV(); - // Release buffer so that it is returned to the pool. - buffer = nullptr; - // Check that the memory is resued with explicit strides if they match the - // assumed default above. - buffer = pool.CreateBuffer(15, 16, 15, 8, 8); - EXPECT_EQ(y_ptr, buffer->DataY()); - EXPECT_EQ(u_ptr, buffer->DataU()); - EXPECT_EQ(v_ptr, buffer->DataV()); - EXPECT_EQ(15, buffer->width()); - EXPECT_EQ(16, buffer->height()); - EXPECT_EQ(15, buffer->StrideY()); - EXPECT_EQ(8, buffer->StrideU()); - EXPECT_EQ(8, buffer->StrideV()); -} - -TEST(TestI420BufferPool, FailToReuseWrongSize) { - // Set max frames to 1, just to make sure the first buffer is being released. - I420BufferPool pool(/*zero_initialize=*/false, 1); - auto buffer = pool.CreateBuffer(16, 16); - EXPECT_EQ(16, buffer->width()); - EXPECT_EQ(16, buffer->height()); - // Release buffer so that it is returned to the pool. - buffer = nullptr; - // Check that the pool doesn't try to reuse buffers of incorrect size. - buffer = pool.CreateBuffer(32, 16); - ASSERT_TRUE(buffer); - EXPECT_EQ(32, buffer->width()); - EXPECT_EQ(16, buffer->height()); -} - -TEST(TestI420BufferPool, FailToReuseWrongStride) { - // Set max frames to 1, just to make sure the first buffer is being released. - I420BufferPool pool(/*zero_initialize=*/false, 1); - auto buffer = pool.CreateBuffer(32, 32, 32, 16, 16); - // Make sure the stride was read correctly, for the rest of the test. - ASSERT_EQ(16, buffer->StrideU()); - ASSERT_EQ(16, buffer->StrideV()); - buffer = pool.CreateBuffer(32, 32, 32, 20, 20); - ASSERT_TRUE(buffer); - EXPECT_EQ(32, buffer->StrideY()); - EXPECT_EQ(20, buffer->StrideU()); - EXPECT_EQ(20, buffer->StrideV()); -} - -TEST(TestI420BufferPool, FrameValidAfterPoolDestruction) { - rtc::scoped_refptr buffer; - { - I420BufferPool pool; - buffer = pool.CreateBuffer(16, 16); - } - EXPECT_EQ(16, buffer->width()); - EXPECT_EQ(16, buffer->height()); - // Try to trigger use-after-free errors by writing to y-plane. - memset(buffer->MutableDataY(), 0xA5, 16 * buffer->StrideY()); -} - -TEST(TestI420BufferPool, MaxNumberOfBuffers) { - I420BufferPool pool(false, 1); - auto buffer1 = pool.CreateBuffer(16, 16); - EXPECT_NE(nullptr, buffer1.get()); - EXPECT_EQ(nullptr, pool.CreateBuffer(16, 16).get()); -} - -} // namespace webrtc diff --git a/common_video/include/bitrate_adjuster.h b/common_video/include/bitrate_adjuster.h index aea1872216..4b208307a1 100644 --- a/common_video/include/bitrate_adjuster.h +++ b/common_video/include/bitrate_adjuster.h @@ -15,8 +15,8 @@ #include #include "absl/types/optional.h" -#include "rtc_base/critical_section.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/thread_annotations.h" @@ -60,29 +60,31 @@ class RTC_EXPORT BitrateAdjuster { bool IsWithinTolerance(uint32_t bitrate_bps, uint32_t target_bitrate_bps); // Returns smallest possible adjusted value. - uint32_t GetMinAdjustedBitrateBps() const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + uint32_t GetMinAdjustedBitrateBps() const + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns largest possible adjusted value. - uint32_t GetMaxAdjustedBitrateBps() const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + uint32_t GetMaxAdjustedBitrateBps() const + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void Reset(); void UpdateBitrate(uint32_t current_time_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - rtc::CriticalSection crit_; + mutable Mutex mutex_; const float min_adjusted_bitrate_pct_; const float max_adjusted_bitrate_pct_; // The bitrate we want. - volatile uint32_t target_bitrate_bps_ RTC_GUARDED_BY(crit_); + volatile uint32_t target_bitrate_bps_ RTC_GUARDED_BY(mutex_); // The bitrate we use to get what we want. - volatile uint32_t adjusted_bitrate_bps_ RTC_GUARDED_BY(crit_); + volatile uint32_t adjusted_bitrate_bps_ RTC_GUARDED_BY(mutex_); // The target bitrate that the adjusted bitrate was computed from. - volatile uint32_t last_adjusted_target_bitrate_bps_ RTC_GUARDED_BY(crit_); + volatile uint32_t last_adjusted_target_bitrate_bps_ RTC_GUARDED_BY(mutex_); // Used to estimate bitrate. - RateStatistics bitrate_tracker_ RTC_GUARDED_BY(crit_); + RateStatistics bitrate_tracker_ RTC_GUARDED_BY(mutex_); // The last time we tried to adjust the bitrate. - uint32_t last_bitrate_update_time_ms_ RTC_GUARDED_BY(crit_); + uint32_t last_bitrate_update_time_ms_ RTC_GUARDED_BY(mutex_); // The number of frames since the last time we tried to adjust the bitrate. - uint32_t frames_since_last_update_ RTC_GUARDED_BY(crit_); + uint32_t frames_since_last_update_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/common_video/include/incoming_video_stream.h b/common_video/include/incoming_video_stream.h index 0dcd4efcbf..d616c5a2ec 100644 --- a/common_video/include/incoming_video_stream.h +++ b/common_video/include/incoming_video_stream.h @@ -13,13 +13,14 @@ #include +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "common_video/video_render_frames.h" #include "rtc_base/race_checker.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -34,10 +35,10 @@ class IncomingVideoStream : public rtc::VideoSinkInterface { void OnFrame(const VideoFrame& video_frame) override; void Dequeue(); - rtc::ThreadChecker main_thread_checker_; + SequenceChecker main_thread_checker_; rtc::RaceChecker decoder_race_checker_; - VideoRenderFrames render_buffers_; // Only touched on the TaskQueue. + VideoRenderFrames render_buffers_ RTC_GUARDED_BY(&incoming_render_queue_); rtc::VideoSinkInterface* const callback_; rtc::TaskQueue incoming_render_queue_; }; diff --git a/common_video/include/video_frame_buffer.h b/common_video/include/video_frame_buffer.h index bc70f34ec8..593464abe4 100644 --- a/common_video/include/video_frame_buffer.h +++ b/common_video/include/video_frame_buffer.h @@ -12,10 +12,10 @@ #define COMMON_VIDEO_INCLUDE_VIDEO_FRAME_BUFFER_H_ #include +#include #include "api/scoped_refptr.h" #include "api/video/video_frame_buffer.h" -#include "rtc_base/callback.h" #include "rtc_base/ref_counted_object.h" namespace webrtc { @@ -29,7 +29,7 @@ rtc::scoped_refptr WrapI420Buffer( int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used); + std::function no_longer_used); rtc::scoped_refptr WrapI444Buffer( int width, @@ -40,7 +40,7 @@ rtc::scoped_refptr WrapI444Buffer( int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used); + std::function no_longer_used); rtc::scoped_refptr WrapI420ABuffer( int width, @@ -53,7 +53,7 @@ rtc::scoped_refptr WrapI420ABuffer( int v_stride, const uint8_t* a_plane, int a_stride, - const rtc::Callback0& no_longer_used); + std::function no_longer_used); rtc::scoped_refptr WrapYuvBuffer( VideoFrameBuffer::Type type, @@ -65,7 +65,7 @@ rtc::scoped_refptr WrapYuvBuffer( int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used); + std::function no_longer_used); rtc::scoped_refptr WrapI010Buffer( int width, @@ -76,7 +76,7 @@ rtc::scoped_refptr WrapI010Buffer( int u_stride, const uint16_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used); + std::function no_longer_used); } // namespace webrtc diff --git a/common_video/include/i420_buffer_pool.h b/common_video/include/video_frame_buffer_pool.h similarity index 53% rename from common_video/include/i420_buffer_pool.h rename to common_video/include/video_frame_buffer_pool.h index 44f4821798..6af117577e 100644 --- a/common_video/include/i420_buffer_pool.h +++ b/common_video/include/video_frame_buffer_pool.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef COMMON_VIDEO_INCLUDE_I420_BUFFER_POOL_H_ -#define COMMON_VIDEO_INCLUDE_I420_BUFFER_POOL_H_ +#ifndef COMMON_VIDEO_INCLUDE_VIDEO_FRAME_BUFFER_POOL_H_ +#define COMMON_VIDEO_INCLUDE_VIDEO_FRAME_BUFFER_POOL_H_ #include @@ -17,36 +17,33 @@ #include "api/scoped_refptr.h" #include "api/video/i420_buffer.h" +#include "api/video/nv12_buffer.h" #include "rtc_base/race_checker.h" #include "rtc_base/ref_counted_object.h" namespace webrtc { -// Simple buffer pool to avoid unnecessary allocations of I420Buffer objects. -// The pool manages the memory of the I420Buffer returned from CreateBuffer. -// When the I420Buffer is destructed, the memory is returned to the pool for use -// by subsequent calls to CreateBuffer. If the resolution passed to CreateBuffer -// changes, old buffers will be purged from the pool. -// Note that CreateBuffer will crash if more than kMaxNumberOfFramesBeforeCrash -// are created. This is to prevent memory leaks where frames are not returned. -class I420BufferPool { +// Simple buffer pool to avoid unnecessary allocations of video frame buffers. +// The pool manages the memory of the I420Buffer/NV12Buffer returned from +// Create(I420|NV12)Buffer. When the buffer is destructed, the memory is +// returned to the pool for use by subsequent calls to Create(I420|NV12)Buffer. +// If the resolution passed to Create(I420|NV12)Buffer changes or requested +// pixel format changes, old buffers will be purged from the pool. +// Note that Create(I420|NV12)Buffer will crash if more than +// kMaxNumberOfFramesBeforeCrash are created. This is to prevent memory leaks +// where frames are not returned. +class VideoFrameBufferPool { public: - I420BufferPool(); - explicit I420BufferPool(bool zero_initialize); - I420BufferPool(bool zero_initialze, size_t max_number_of_buffers); - ~I420BufferPool(); + VideoFrameBufferPool(); + explicit VideoFrameBufferPool(bool zero_initialize); + VideoFrameBufferPool(bool zero_initialize, size_t max_number_of_buffers); + ~VideoFrameBufferPool(); // Returns a buffer from the pool. If no suitable buffer exist in the pool // and there are less than |max_number_of_buffers| pending, a buffer is // created. Returns null otherwise. - rtc::scoped_refptr CreateBuffer(int width, int height); - - // Returns a buffer from the pool with the explicitly specified stride. - rtc::scoped_refptr CreateBuffer(int width, - int height, - int stride_y, - int stride_u, - int stride_v); + rtc::scoped_refptr CreateI420Buffer(int width, int height); + rtc::scoped_refptr CreateNV12Buffer(int width, int height); // Changes the max amount of buffers in the pool to the new value. // Returns true if change was successful and false if the amount of already @@ -58,12 +55,11 @@ class I420BufferPool { void Release(); private: - // Explicitly use a RefCountedObject to get access to HasOneRef, - // needed by the pool to check exclusive access. - using PooledI420Buffer = rtc::RefCountedObject; + rtc::scoped_refptr + GetExistingBuffer(int width, int height, VideoFrameBuffer::Type type); rtc::RaceChecker race_checker_; - std::list> buffers_; + std::list> buffers_; // If true, newly allocated buffers are zero-initialized. Note that recycled // buffers are not zero'd before reuse. This is required of buffers used by // FFmpeg according to http://crbug.com/390941, which only requires it for the @@ -76,4 +72,4 @@ class I420BufferPool { } // namespace webrtc -#endif // COMMON_VIDEO_INCLUDE_I420_BUFFER_POOL_H_ +#endif // COMMON_VIDEO_INCLUDE_VIDEO_FRAME_BUFFER_POOL_H_ diff --git a/common_video/incoming_video_stream.cc b/common_video/incoming_video_stream.cc index d1f8beac5b..15c668e78e 100644 --- a/common_video/incoming_video_stream.cc +++ b/common_video/incoming_video_stream.cc @@ -42,7 +42,7 @@ void IncomingVideoStream::OnFrame(const VideoFrame& video_frame) { // into the lambda instead of copying it, but it doesn't work unless we change // OnFrame to take its frame argument by value instead of const reference. incoming_render_queue_.PostTask([this, video_frame = video_frame]() mutable { - RTC_DCHECK(incoming_render_queue_.IsCurrent()); + RTC_DCHECK_RUN_ON(&incoming_render_queue_); if (render_buffers_.AddFrame(std::move(video_frame)) == 1) Dequeue(); }); @@ -50,7 +50,7 @@ void IncomingVideoStream::OnFrame(const VideoFrame& video_frame) { void IncomingVideoStream::Dequeue() { TRACE_EVENT0("webrtc", "IncomingVideoStream::Dequeue"); - RTC_DCHECK(incoming_render_queue_.IsCurrent()); + RTC_DCHECK_RUN_ON(&incoming_render_queue_); absl::optional frame_to_render = render_buffers_.FrameToRender(); if (frame_to_render) callback_->OnFrame(*frame_to_render); diff --git a/common_video/libyuv/include/webrtc_libyuv.h b/common_video/libyuv/include/webrtc_libyuv.h index ba17577216..03c9ff49ca 100644 --- a/common_video/libyuv/include/webrtc_libyuv.h +++ b/common_video/libyuv/include/webrtc_libyuv.h @@ -32,17 +32,12 @@ enum class VideoType { kI420, kIYUV, kRGB24, - kABGR, kARGB, - kARGB4444, kRGB565, - kARGB1555, kYUY2, kYV12, kUYVY, kMJPEG, - kNV21, - kNV12, kBGRA, }; @@ -58,16 +53,6 @@ const double kPerfectPSNR = 48.0f; // video frame. size_t CalcBufferSize(VideoType type, int width, int height); -// TODO(mikhal): Add unit test for these two functions and determine location. -// Print VideoFrame to file -// Input: -// - frame : Reference to video frame. -// - file : pointer to file object. It is assumed that the file is -// already open for writing. -// Return value: 0 if OK, < 0 otherwise. -int PrintVideoFrame(const VideoFrame& frame, FILE* file); -int PrintVideoFrame(const I420BufferInterface& frame, FILE* file); - // Extract buffer from VideoFrame or I420BufferInterface (consecutive // planes, no stride) // Input: diff --git a/common_video/libyuv/libyuv_unittest.cc b/common_video/libyuv/libyuv_unittest.cc index 2a7992865a..62d9e87fa6 100644 --- a/common_video/libyuv/libyuv_unittest.cc +++ b/common_video/libyuv/libyuv_unittest.cc @@ -31,6 +31,38 @@ void Calc16ByteAlignedStride(int width, int* stride_y, int* stride_uv) { *stride_uv = 16 * ((width + 31) / 32); } +int PrintPlane(const uint8_t* buf, + int width, + int height, + int stride, + FILE* file) { + for (int i = 0; i < height; i++, buf += stride) { + if (fwrite(buf, 1, width, file) != static_cast(width)) + return -1; + } + return 0; +} + +int PrintVideoFrame(const I420BufferInterface& frame, FILE* file) { + int width = frame.width(); + int height = frame.height(); + int chroma_width = frame.ChromaWidth(); + int chroma_height = frame.ChromaHeight(); + + if (PrintPlane(frame.DataY(), width, height, frame.StrideY(), file) < 0) { + return -1; + } + if (PrintPlane(frame.DataU(), chroma_width, chroma_height, frame.StrideU(), + file) < 0) { + return -1; + } + if (PrintPlane(frame.DataV(), chroma_width, chroma_height, frame.StrideV(), + file) < 0) { + return -1; + } + return 0; +} + } // Anonymous namespace class TestLibYuv : public ::testing::Test { diff --git a/common_video/libyuv/webrtc_libyuv.cc b/common_video/libyuv/webrtc_libyuv.cc index 833001cf1c..cc6a71a61c 100644 --- a/common_video/libyuv/webrtc_libyuv.cc +++ b/common_video/libyuv/webrtc_libyuv.cc @@ -14,7 +14,6 @@ #include "api/video/i420_buffer.h" #include "common_video/include/video_frame_buffer.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" #include "third_party/libyuv/include/libyuv.h" @@ -26,8 +25,6 @@ size_t CalcBufferSize(VideoType type, int width, int height) { size_t buffer_size = 0; switch (type) { case VideoType::kI420: - case VideoType::kNV12: - case VideoType::kNV21: case VideoType::kIYUV: case VideoType::kYV12: { int half_width = (width + 1) >> 1; @@ -35,9 +32,7 @@ size_t CalcBufferSize(VideoType type, int width, int height) { buffer_size = width * height + half_width * half_height * 2; break; } - case VideoType::kARGB4444: case VideoType::kRGB565: - case VideoType::kARGB1555: case VideoType::kYUY2: case VideoType::kUYVY: buffer_size = width * height * 2; @@ -56,43 +51,6 @@ size_t CalcBufferSize(VideoType type, int width, int height) { return buffer_size; } -static int PrintPlane(const uint8_t* buf, - int width, - int height, - int stride, - FILE* file) { - for (int i = 0; i < height; i++, buf += stride) { - if (fwrite(buf, 1, width, file) != static_cast(width)) - return -1; - } - return 0; -} - -// TODO(nisse): Belongs with the test code? -int PrintVideoFrame(const I420BufferInterface& frame, FILE* file) { - int width = frame.width(); - int height = frame.height(); - int chroma_width = frame.ChromaWidth(); - int chroma_height = frame.ChromaHeight(); - - if (PrintPlane(frame.DataY(), width, height, frame.StrideY(), file) < 0) { - return -1; - } - if (PrintPlane(frame.DataU(), chroma_width, chroma_height, frame.StrideU(), - file) < 0) { - return -1; - } - if (PrintPlane(frame.DataV(), chroma_width, chroma_height, frame.StrideV(), - file) < 0) { - return -1; - } - return 0; -} - -int PrintVideoFrame(const VideoFrame& frame, FILE* file) { - return PrintVideoFrame(*frame.video_frame_buffer()->ToI420(), file); -} - int ExtractBuffer(const rtc::scoped_refptr& input_frame, size_t size, uint8_t* buffer) { @@ -135,8 +93,6 @@ int ConvertVideoType(VideoType video_type) { return libyuv::FOURCC_YV12; case VideoType::kRGB24: return libyuv::FOURCC_24BG; - case VideoType::kABGR: - return libyuv::FOURCC_ABGR; case VideoType::kRGB565: return libyuv::FOURCC_RGBP; case VideoType::kYUY2: @@ -145,18 +101,10 @@ int ConvertVideoType(VideoType video_type) { return libyuv::FOURCC_UYVY; case VideoType::kMJPEG: return libyuv::FOURCC_MJPG; - case VideoType::kNV21: - return libyuv::FOURCC_NV21; - case VideoType::kNV12: - return libyuv::FOURCC_NV12; case VideoType::kARGB: return libyuv::FOURCC_ARGB; case VideoType::kBGRA: return libyuv::FOURCC_BGRA; - case VideoType::kARGB4444: - return libyuv::FOURCC_R444; - case VideoType::kARGB1555: - return libyuv::FOURCC_RGBO; } RTC_NOTREACHED(); return libyuv::FOURCC_ANY; @@ -175,10 +123,6 @@ int ConvertFromI420(const VideoFrame& src_frame, ConvertVideoType(dst_video_type)); } -// Helper functions for keeping references alive. -void KeepBufferRefs(rtc::scoped_refptr, - rtc::scoped_refptr) {} - rtc::scoped_refptr ScaleI420ABuffer( const I420ABufferInterface& buffer, int target_width, @@ -197,7 +141,8 @@ rtc::scoped_refptr ScaleI420ABuffer( yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(), yuv_buffer->DataV(), yuv_buffer->StrideV(), axx_buffer->DataY(), axx_buffer->StrideY(), - rtc::Bind(&KeepBufferRefs, yuv_buffer, axx_buffer)); + // To keep references alive. + [yuv_buffer, axx_buffer] {}); return merged_buffer; } diff --git a/common_video/video_frame_buffer.cc b/common_video/video_frame_buffer.cc index 823c5ad7a1..78a126419a 100644 --- a/common_video/video_frame_buffer.cc +++ b/common_video/video_frame_buffer.cc @@ -30,7 +30,7 @@ class WrappedYuvBuffer : public Base { int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used) + std::function no_longer_used) : width_(width), height_(height), y_plane_(y_plane), @@ -70,7 +70,7 @@ class WrappedYuvBuffer : public Base { const int y_stride_; const int u_stride_; const int v_stride_; - rtc::Callback0 no_longer_used_cb_; + std::function no_longer_used_cb_; }; // Template to implement a wrapped buffer for a I4??BufferInterface. @@ -87,7 +87,7 @@ class WrappedYuvaBuffer : public WrappedYuvBuffer { int v_stride, const uint8_t* a_plane, int a_stride, - const rtc::Callback0& no_longer_used) + std::function no_longer_used) : WrappedYuvBuffer(width, height, y_plane, @@ -136,7 +136,7 @@ class WrappedYuv16BBuffer : public Base { int u_stride, const uint16_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used) + std::function no_longer_used) : width_(width), height_(height), y_plane_(y_plane), @@ -176,7 +176,7 @@ class WrappedYuv16BBuffer : public Base { const int y_stride_; const int u_stride_; const int v_stride_; - rtc::Callback0 no_longer_used_cb_; + std::function no_longer_used_cb_; }; class I010BufferBase : public I010BufferInterface { @@ -206,9 +206,9 @@ rtc::scoped_refptr WrapI420Buffer( int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used) { + std::function no_longer_used) { return rtc::scoped_refptr( - new rtc::RefCountedObject>( + rtc::make_ref_counted>( width, height, y_plane, y_stride, u_plane, u_stride, v_plane, v_stride, no_longer_used)); } @@ -224,9 +224,9 @@ rtc::scoped_refptr WrapI420ABuffer( int v_stride, const uint8_t* a_plane, int a_stride, - const rtc::Callback0& no_longer_used) { + std::function no_longer_used) { return rtc::scoped_refptr( - new rtc::RefCountedObject>( + rtc::make_ref_counted>( width, height, y_plane, y_stride, u_plane, u_stride, v_plane, v_stride, a_plane, a_stride, no_longer_used)); } @@ -240,9 +240,9 @@ rtc::scoped_refptr WrapI444Buffer( int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used) { + std::function no_longer_used) { return rtc::scoped_refptr( - new rtc::RefCountedObject>( + rtc::make_ref_counted>( width, height, y_plane, y_stride, u_plane, u_stride, v_plane, v_stride, no_longer_used)); } @@ -257,7 +257,7 @@ rtc::scoped_refptr WrapYuvBuffer( int u_stride, const uint8_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used) { + std::function no_longer_used) { switch (type) { case VideoFrameBuffer::Type::kI420: return WrapI420Buffer(width, height, y_plane, y_stride, u_plane, u_stride, @@ -266,8 +266,7 @@ rtc::scoped_refptr WrapYuvBuffer( return WrapI444Buffer(width, height, y_plane, y_stride, u_plane, u_stride, v_plane, v_stride, no_longer_used); default: - FATAL() << "Unexpected frame buffer type."; - return nullptr; + RTC_CHECK_NOTREACHED(); } } @@ -280,9 +279,9 @@ rtc::scoped_refptr WrapI010Buffer( int u_stride, const uint16_t* v_plane, int v_stride, - const rtc::Callback0& no_longer_used) { + std::function no_longer_used) { return rtc::scoped_refptr( - new rtc::RefCountedObject>( + rtc::make_ref_counted>( width, height, y_plane, y_stride, u_plane, u_stride, v_plane, v_stride, no_longer_used)); } diff --git a/common_video/video_frame_buffer_pool.cc b/common_video/video_frame_buffer_pool.cc new file mode 100644 index 0000000000..d225370a4d --- /dev/null +++ b/common_video/video_frame_buffer_pool.cc @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "common_video/include/video_frame_buffer_pool.h" + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { +bool HasOneRef(const rtc::scoped_refptr& buffer) { + // Cast to rtc::RefCountedObject is safe because this function is only called + // on locally created VideoFrameBuffers, which are either + // |rtc::RefCountedObject| or |rtc::RefCountedObject|. + switch (buffer->type()) { + case VideoFrameBuffer::Type::kI420: { + return static_cast*>(buffer.get()) + ->HasOneRef(); + } + case VideoFrameBuffer::Type::kNV12: { + return static_cast*>(buffer.get()) + ->HasOneRef(); + } + default: + RTC_NOTREACHED(); + } + return false; +} + +} // namespace + +VideoFrameBufferPool::VideoFrameBufferPool() : VideoFrameBufferPool(false) {} + +VideoFrameBufferPool::VideoFrameBufferPool(bool zero_initialize) + : VideoFrameBufferPool(zero_initialize, + std::numeric_limits::max()) {} + +VideoFrameBufferPool::VideoFrameBufferPool(bool zero_initialize, + size_t max_number_of_buffers) + : zero_initialize_(zero_initialize), + max_number_of_buffers_(max_number_of_buffers) {} + +VideoFrameBufferPool::~VideoFrameBufferPool() = default; + +void VideoFrameBufferPool::Release() { + buffers_.clear(); +} + +bool VideoFrameBufferPool::Resize(size_t max_number_of_buffers) { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + size_t used_buffers_count = 0; + for (const rtc::scoped_refptr& buffer : buffers_) { + // If the buffer is in use, the ref count will be >= 2, one from the list we + // are looping over and one from the application. If the ref count is 1, + // then the list we are looping over holds the only reference and it's safe + // to reuse. + if (!HasOneRef(buffer)) { + used_buffers_count++; + } + } + if (used_buffers_count > max_number_of_buffers) { + return false; + } + max_number_of_buffers_ = max_number_of_buffers; + + size_t buffers_to_purge = buffers_.size() - max_number_of_buffers_; + auto iter = buffers_.begin(); + while (iter != buffers_.end() && buffers_to_purge > 0) { + if (HasOneRef(*iter)) { + iter = buffers_.erase(iter); + buffers_to_purge--; + } else { + ++iter; + } + } + return true; +} + +rtc::scoped_refptr VideoFrameBufferPool::CreateI420Buffer( + int width, + int height) { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + + rtc::scoped_refptr existing_buffer = + GetExistingBuffer(width, height, VideoFrameBuffer::Type::kI420); + if (existing_buffer) { + // Cast is safe because the only way kI420 buffer is created is + // in the same function below, where |RefCountedObject| is + // created. + rtc::RefCountedObject* raw_buffer = + static_cast*>(existing_buffer.get()); + // Creates a new scoped_refptr, which is also pointing to the same + // RefCountedObject as buffer, increasing ref count. + return rtc::scoped_refptr(raw_buffer); + } + + if (buffers_.size() >= max_number_of_buffers_) + return nullptr; + // Allocate new buffer. + rtc::scoped_refptr buffer = + rtc::make_ref_counted(width, height); + + if (zero_initialize_) + buffer->InitializeData(); + + buffers_.push_back(buffer); + return buffer; +} + +rtc::scoped_refptr VideoFrameBufferPool::CreateNV12Buffer( + int width, + int height) { + RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); + + rtc::scoped_refptr existing_buffer = + GetExistingBuffer(width, height, VideoFrameBuffer::Type::kNV12); + if (existing_buffer) { + // Cast is safe because the only way kI420 buffer is created is + // in the same function below, where |RefCountedObject| is + // created. + rtc::RefCountedObject* raw_buffer = + static_cast*>(existing_buffer.get()); + // Creates a new scoped_refptr, which is also pointing to the same + // RefCountedObject as buffer, increasing ref count. + return rtc::scoped_refptr(raw_buffer); + } + + if (buffers_.size() >= max_number_of_buffers_) + return nullptr; + // Allocate new buffer. + rtc::scoped_refptr buffer = + rtc::make_ref_counted(width, height); + + if (zero_initialize_) + buffer->InitializeData(); + + buffers_.push_back(buffer); + return buffer; +} + +rtc::scoped_refptr VideoFrameBufferPool::GetExistingBuffer( + int width, + int height, + VideoFrameBuffer::Type type) { + // Release buffers with wrong resolution or different type. + for (auto it = buffers_.begin(); it != buffers_.end();) { + const auto& buffer = *it; + if (buffer->width() != width || buffer->height() != height || + buffer->type() != type) { + it = buffers_.erase(it); + } else { + ++it; + } + } + // Look for a free buffer. + for (const rtc::scoped_refptr& buffer : buffers_) { + // If the buffer is in use, the ref count will be >= 2, one from the list we + // are looping over and one from the application. If the ref count is 1, + // then the list we are looping over holds the only reference and it's safe + // to reuse. + if (HasOneRef(buffer)) { + RTC_CHECK(buffer->type() == type); + return buffer; + } + } + return nullptr; +} + +} // namespace webrtc diff --git a/common_video/video_frame_buffer_pool_unittest.cc b/common_video/video_frame_buffer_pool_unittest.cc new file mode 100644 index 0000000000..eb9b73f1a2 --- /dev/null +++ b/common_video/video_frame_buffer_pool_unittest.cc @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "common_video/include/video_frame_buffer_pool.h" + +#include +#include + +#include "api/scoped_refptr.h" +#include "api/video/i420_buffer.h" +#include "api/video/video_frame_buffer.h" +#include "test/gtest.h" + +namespace webrtc { + +TEST(TestVideoFrameBufferPool, SimpleFrameReuse) { + VideoFrameBufferPool pool; + auto buffer = pool.CreateI420Buffer(16, 16); + EXPECT_EQ(16, buffer->width()); + EXPECT_EQ(16, buffer->height()); + // Extract non-refcounted pointers for testing. + const uint8_t* y_ptr = buffer->DataY(); + const uint8_t* u_ptr = buffer->DataU(); + const uint8_t* v_ptr = buffer->DataV(); + // Release buffer so that it is returned to the pool. + buffer = nullptr; + // Check that the memory is resued. + buffer = pool.CreateI420Buffer(16, 16); + EXPECT_EQ(y_ptr, buffer->DataY()); + EXPECT_EQ(u_ptr, buffer->DataU()); + EXPECT_EQ(v_ptr, buffer->DataV()); +} + +TEST(TestVideoFrameBufferPool, FailToReuseWrongSize) { + // Set max frames to 1, just to make sure the first buffer is being released. + VideoFrameBufferPool pool(/*zero_initialize=*/false, 1); + auto buffer = pool.CreateI420Buffer(16, 16); + EXPECT_EQ(16, buffer->width()); + EXPECT_EQ(16, buffer->height()); + // Release buffer so that it is returned to the pool. + buffer = nullptr; + // Check that the pool doesn't try to reuse buffers of incorrect size. + buffer = pool.CreateI420Buffer(32, 16); + ASSERT_TRUE(buffer); + EXPECT_EQ(32, buffer->width()); + EXPECT_EQ(16, buffer->height()); +} + +TEST(TestVideoFrameBufferPool, FrameValidAfterPoolDestruction) { + rtc::scoped_refptr buffer; + { + VideoFrameBufferPool pool; + buffer = pool.CreateI420Buffer(16, 16); + } + EXPECT_EQ(16, buffer->width()); + EXPECT_EQ(16, buffer->height()); + // Access buffer, so that ASAN could find any issues if buffer + // doesn't outlive the buffer pool. + memset(buffer->MutableDataY(), 0xA5, 16 * buffer->StrideY()); +} + +TEST(TestVideoFrameBufferPool, MaxNumberOfBuffers) { + VideoFrameBufferPool pool(false, 1); + auto buffer = pool.CreateI420Buffer(16, 16); + EXPECT_NE(nullptr, buffer.get()); + EXPECT_EQ(nullptr, pool.CreateI420Buffer(16, 16).get()); +} + +TEST(TestVideoFrameBufferPool, ProducesNv12) { + VideoFrameBufferPool pool(false, 1); + auto buffer = pool.CreateNV12Buffer(16, 16); + EXPECT_NE(nullptr, buffer.get()); +} + +TEST(TestVideoFrameBufferPool, SwitchingPixelFormat) { + VideoFrameBufferPool pool(false, 1); + auto buffer = pool.CreateNV12Buffer(16, 16); + EXPECT_EQ(nullptr, pool.CreateNV12Buffer(16, 16).get()); + auto buffer2 = pool.CreateI420Buffer(16, 16); + EXPECT_NE(nullptr, buffer2.get()); + EXPECT_EQ(nullptr, pool.CreateI420Buffer(16, 16).get()); +} + +} // namespace webrtc diff --git a/common_video/video_frame_unittest.cc b/common_video/video_frame_unittest.cc index 225a7d3089..b82c14716c 100644 --- a/common_video/video_frame_unittest.cc +++ b/common_video/video_frame_unittest.cc @@ -15,7 +15,7 @@ #include "api/video/i010_buffer.h" #include "api/video/i420_buffer.h" -#include "rtc_base/bind.h" +#include "api/video/nv12_buffer.h" #include "rtc_base/time_utils.h" #include "test/fake_texture_frame.h" #include "test/frame_utils.h" @@ -157,6 +157,29 @@ rtc::scoped_refptr CreateGradient(VideoFrameBuffer::Type type, return I010Buffer::Copy(*buffer); } +rtc::scoped_refptr CreateNV12Gradient(int width, + int height) { + rtc::scoped_refptr buffer(NV12Buffer::Create(width, height)); + // Initialize with gradient, Y = 128(x/w + y/h), U = 256 x/w, V = 256 y/h + for (int x = 0; x < width; x++) { + for (int y = 0; y < height; y++) { + buffer->MutableDataY()[x + y * width] = + 128 * (x * height + y * width) / (width * height); + } + } + int chroma_width = buffer->ChromaWidth(); + int chroma_height = buffer->ChromaHeight(); + for (int x = 0; x < chroma_width; x++) { + for (int y = 0; y < chroma_height; y++) { + buffer->MutableDataUV()[x * 2 + y * buffer->StrideUV()] = + 255 * x / (chroma_width - 1); + buffer->MutableDataUV()[x * 2 + 1 + y * buffer->StrideUV()] = + 255 * y / (chroma_height - 1); + } + } + return buffer; +} + // The offsets and sizes describe the rectangle extracted from the // original (gradient) frame, in relative coordinates where the // original frame correspond to the unit square, 0.0 <= x, y < 1.0. @@ -495,6 +518,35 @@ INSTANTIATE_TEST_SUITE_P(All, ::testing::Values(VideoFrameBuffer::Type::kI420, VideoFrameBuffer::Type::kI010)); +TEST(TestNV12Buffer, CropAndScale) { + const int kSourceWidth = 640; + const int kSourceHeight = 480; + const int kScaledWidth = 320; + const int kScaledHeight = 240; + const int kCropLeft = 40; + const int kCropTop = 30; + const int kCropRight = 0; + const int kCropBottom = 30; + + rtc::scoped_refptr buf = + CreateNV12Gradient(kSourceWidth, kSourceHeight); + + rtc::scoped_refptr scaled_buffer = buf->CropAndScale( + kCropLeft, kCropTop, kSourceWidth - kCropLeft - kCropRight, + kSourceHeight - kCropTop - kCropBottom, kScaledWidth, kScaledHeight); + + // Parameters to CheckCrop indicate what part of the source frame is in the + // scaled frame. + const float kOffsetX = (kCropLeft + 0.0) / kSourceWidth; + const float kOffsetY = (kCropTop + 0.0) / kSourceHeight; + const float kRelativeWidth = + (kSourceWidth - kCropLeft - kCropRight + 0.0) / kSourceWidth; + const float kRelativeHeight = + (kSourceHeight - kCropTop - kCropBottom + 0.0) / kSourceHeight; + CheckCrop(*scaled_buffer->ToI420(), kOffsetX, kOffsetY, kRelativeWidth, + kRelativeHeight); +} + class TestPlanarYuvBufferRotate : public ::testing::TestWithParam< std::tuple> {}; diff --git a/docs/bug-reporting.md b/docs/bug-reporting.md new file mode 100644 index 0000000000..4e5cbda3e2 --- /dev/null +++ b/docs/bug-reporting.md @@ -0,0 +1,170 @@ +There are a couple bug trackers relevant to WebRTC: + + * [crbug.com](https://crbug.com) -- for Chrome. + + * [bugzilla.mozilla.org](https://bugzilla.mozilla.org/) -- for Firefox. + + * [bugs.webkit.org](https://bugs.webkit.org/) -- for Safari. + + * [developer.microsoft.com](https://developer.microsoft.com/en-us/microsoft-edge/platform/issues/) -- for Microsoft Edge. + + * [bugs.opera.com/wizard](https://bugs.opera.com/wizard/) -- for Opera. + + * [bugs.webrtc.org](http://bugs.webrtc.org) -- for WebRTC native code. + +Anyone with a [Google account][1] can file bugs in the Chrome and WebRTC trackers and they're continuously triaged by Chrome and WebRTC engineers. + + +### How to File a Good Bug Report + +#### Instructions + +* Identify which bug tracker to use: + + * If you're hitting a problem in Chrome, file the bug using the + [the Chromium issue wizard](https://chromiumbugs.appspot.com/?token=0) + Choose "Web Developer" and "API", then fill out the form. For the component choose + * Blink>GetUserMedia for camera/microphone issues + * Blink>MediaRecording for issues with the MediaRecorder API + * Blink>WebRTC for issues with the RTCPeerConnection API + This ensures the right people will look at your bug. + + * If you're a developer working with the native code, file the bug at + [this link][4]. + +* Include as much as possible from the data points listed below. + +#### Example Data Points + + * Version of the browser/app + + * For Chrome: copy/paste from **chrome://version** + + * For WebRTC native code: if applicable, include the branch (e.g. trunk) + and WebRTC revision (e.g. r8207) your application uses + + * Operating system (Windows, Mac, Linux, Android, iOS, etc.) and version + (e.g. Windows 7, OS X 10.9, Ubuntu 14, etc.) + + * Hardware platform/device model (e.g. PC, Mac, Samsung 4S, Nexus 7, iPhone + 5S, iPad Air 2 etc) + + * Camera and microphone model and version (if applicable) + + * For Chrome audio and video device issues, please run the tests at + . After the tests finish running, click the bug + icon at the top, download the report, and attach the report to the issue + tracker. + + * Web site URL + + * Reproduction steps: detailed information on how to reproduce the bug. If + applicable, please either attach or link to a minimal test page in + HTML+JavaScript. + + * For **crashes** + + * If you experience a crash while using Chrome, please include a crash ID + by following [these instructions][2]. + + * If you experience a crash while using WebRTC native code, please include + the full stacktrace. + + * For **functional** issues or **ICE** issues, in either Chrome or a native + application, please gather a [native log][5]. + + * For **connectivity** issues on Chrome, ensure **chrome://webrtc-internals** + is open in another tab before starting the call and while the call is in progress, + + * expand the **Create Dump** section, + + * click the **Download the PeerConnection updates and stats data** button. + You will be prompted to save the dump to your local machine. Please + attach that dump to the bug report. + + * For **audio quality** issues on Chrome, while the call is in progress, + + * please open **chrome://webrtc-internals** in another tab, + + * expand the **Create Dump** section, + + * fill in the **Enable diagnostic audio recordings** checkbox. You will be + prompted to save the recording to your local machine. After ending the + call, attach the recording to the bug. + + * For **echo** issues, please try to capture an audio recording from the + side that is _generating_ the echo, not the side that _hears_ the echo. + For example, if UserA and UserB are in a call, and UserA hears herself + speak, please obtain an audio recording from UserB. + + * For **regressions**, i.e. things that worked in one version and stopped working in + a later versioņ, provide both versions. If you know steps to reproduce you might + want to try [a bisect](https://www.chromium.org/developers/bisect-builds-py) to + identify the commit that changed the behaviour. + + * For **video problems**, e.g. artifacts or decoder failures, a rtpdump file + with the unencrypted RTP traffic. This can by replayed using the video_replay + tool from the rtc_tools directory. + + * For problem with the webcam, a dump or screenshot of the "Video Capture" tab + in chrome://media-internals. + +### Filing a Security Bug + +The WebRTC team takes security very seriously. If you find a vulnerability in +WebRTC, please file a [Chromium security bug][ChromeSecurity], even if the bug +only affects native WebRTC code and not Chromium. + +A history of fixed Chromium security bugs is best found via [security notes in +Stable Channel updates on the Google Chrome releases blog][ChromeSecurityBlog]. + +You can also find fixed, publicly visible [Type=Bug-Security][ChromeBugList] +bugs in the issue tracker (note: security bugs normally become publicly +visible 14 weeks after they are fixed). If there is a bug in WebRTC code +that Chromium isn’t using (such as the Java/ObjC wrappers for Android/iOS) +we will announce fixes separately on [discuss-webrtc][DiscussWebRTC]. + +[Tracking released security bug disclosures][WebRtcBugList]. + +Note that we will generally NOT merge security fixes backwards to any branches, +so if you’re using older branches it’s your responsibility to make sure the +relevant security fixes get merged. + + +### Receiving notifications about security bugs in Chrome + +To get automatic notifications about activity/comments in security bugs in +Chrome you need to be either explicitly cc:d on specific bugs (by someone who +has access to the bug) or be part of a special mailing list for all security bug +notifications. To get on that list you have to apply to the Chrome Security +team, see more about this on the [Chrome Security page][ChromeSecurity] under +"How can I get access to Chromium vulnerabilities?" at the bottom of the page. + +Please note that Chrome's security-notify list will receive notifications about +all security bugs in Chrome and not just the WebRTC ones. Normally it shouldn't +be a problem to figure out whether an issue affects WebRTC since it will most +likely be tagged with one of the WebRTC-related components (one of Blink>WebRTC, +Blink>GetUserMedia, Blink>MediaStream, Blink>MediaRecording) or their sub- +components. + +Also note that access granted by the list will only apply to bugs of Type=Bug- +Security. Not all bugs with crashes, memory leaks and other potential +vulnerabilities are marked as Bug-Security though. You can read more about what +categories of bugs are deemed security bugs in the [Severity Guidelines for +Security Issues][SeverityGuidelines] and also on the [Security FAQ][SecurityFaq] +page. + + +[1]: https://accounts.google.com/ +[2]: http://www.chromium.org/for-testers/bug-reporting-guidelines/reporting-crash-bug +[3]: https://code.google.com/p/chromium/issues/entry?template=Audio/Video%20Issue +[4]: https://bugs.chromium.org/p/webrtc/issues/entry +[5]: native-code/logging.md +[ChromeSecurity]: https://www.chromium.org/Home/chromium-security/reporting-security-bugs +[DiscussWebRTC]: https://groups.google.com/group/discuss-webrtc +[ChromeSecurityBlog]: https://chromereleases.googleblog.com/search/label/Stable%20updates +[ChromeBugList]: https://bugs.chromium.org/p/chromium/issues/list?can=1&q=Type%3DBug-Security+component%3ABlink%3EWebRTC+-status%3ADuplicate%2CWontfix&sort=-closed&colspec=ID+Pri+M+Component+Status+Owner+Summary+OS+Closed&x=m&y=releaseblock&cells=ids +[WebRtcBugList]: https://bugs.chromium.org/p/webrtc/issues/list?q=Type%3DBug-Security&can=1 +[ChromeSecurity]: https://www.chromium.org/Home/chromium-security +[SeverityGuidelines]: https://chromium.googlesource.com/chromium/src/+/master/docs/security/severity-guidelines.md +[SecurityFaq]: https://chromium.googlesource.com/chromium/src/+/master/docs/security/faq.md diff --git a/docs/faq.md b/docs/faq.md index ed9143812a..700fd15b9e 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -142,6 +142,16 @@ Please see [Getting Started][getting-started-link] and Yes, each Contributor must sign and return the [Contributor License Agreement][cla-link] +### How can I become a WebRTC committer? + +After 10-20 non-trivial patches you can apply for commit rights. If you are +writing a lot of patches you can also apply for try job access before then. +To apply, open a bug on https://bugs.chromium.org/p/webrtc/ specifying the +e-mail you will use to commit code to WebRTC and list all the relevant CLs to +show your previous contributions (even in case you are only requiring try job +access since it is still required a certain number of contributions for that +as well). + [cla-link]: https://developers.google.com/open-source/cla/individual?hl=en @@ -231,7 +241,7 @@ Yes, you still have the right to redistribute and you still have a patent license for Google's patents that cover the code that Google released. -### What if my competitor uses the code and brings patent litigation against me for something unrelated to the code. Does he or she still have a patent license? +### What if my competitor uses the code and brings patent litigation against me for something unrelated to the code. Do they still have a patent license? -Yes, he/she still has the right to redistribute and he/she still has a patent +Yes, they still have the right to redistribute and they still have a patent license for Google's patents that cover the code that Google released. diff --git a/docs/native-code/development/index.md b/docs/native-code/development/index.md index 04393a9bb8..b19a15ca5e 100644 --- a/docs/native-code/development/index.md +++ b/docs/native-code/development/index.md @@ -98,6 +98,12 @@ configuration untouched (stored in the args.gn file), do: $ gn clean out/Default ``` +To build the fuzzers residing in the [test/fuzzers][fuzzers] directory, use +``` +$ gn gen out/fuzzers --args='use_libfuzzer=true optimize_for_fuzzing=true' +``` +Depending on the fuzzer additional arguments like `is_asan`, `is_msan` or `is_ubsan_security` might be required. + See the [GN][gn-doc] documentation for all available options. There are also more platform specific tips on the [Android][webrtc-android-development] and [iOS][webrtc-ios-development] instructions. @@ -113,6 +119,14 @@ For [Ninja][ninja] project files generated in `out/Default`: $ ninja -C out/Default ``` +To build everything in the generated folder (`out/Default`): + +``` +$ ninja all -C out/Default +``` + +See [Ninja build rules][ninja-build-rules] to read more about difference between `ninja` and `ninja all`. + ## Using Another Build System @@ -134,10 +148,10 @@ $ git branch -r ``` To create a local branch tracking a remote release branch (in this example, -the 43 branch): +the branch corresponding to Chrome M80): ``` -$ git checkout -b my_branch refs/remotes/branch-heads/43 +$ git checkout -b my_branch refs/remotes/branch-heads/3987 $ gclient sync ``` @@ -159,11 +173,13 @@ $ # verify the current branch becomes REMOTE:origin/master The above is untested and unsupported, but it might help. -Commit log for the branch: [https://webrtc.googlesource.com/src/+log/branch-heads/43][m43-log] -To browse it: [https://webrtc.googlesource.com/src/+/branch-heads/43][m43] +Commit log for the branch: [https://webrtc.googlesource.com/src/+log/branch-heads/3987][m80-log] +To browse it: [https://webrtc.googlesource.com/src/+/branch-heads/3987][m80] For more details, read Chromium's [Working with Branches][chromium-work-branches] and [Working with Release Branches][chromium-work-release-branches] pages. +To find the branch corresponding to a Chrome release check the +[Chromium Dashboard][https://chromiumdash.appspot.com/branches]. ## Contributing Patches @@ -254,6 +270,7 @@ Target name `turnserver`. Used for unit tests. [ninja]: https://ninja-build.org/ +[ninja-build-rules]: https://gn.googlesource.com/gn/+/master/docs/reference.md#the-all-and-default-rules [gn]: https://gn.googlesource.com/gn/+/master/README.md [gn-doc]: https://gn.googlesource.com/gn/+/master/docs/reference.md#IDE-options [webrtc-android-development]: https://webrtc.googlesource.com/src/+/refs/heads/master/docs/native-code/android/index.md @@ -264,5 +281,6 @@ Target name `turnserver`. Used for unit tests. [depot-tools]: http://commondatastorage.googleapis.com/chrome-infra-docs/flat/depot_tools/docs/html/depot_tools_tutorial.html#_setting_up [rfc-5389]: https://tools.ietf.org/html/rfc5389 [rfc-5766]: https://tools.ietf.org/html/rfc5766 -[m43-log]: https://webrtc.googlesource.com/src/+log/branch-heads/43 -[m43]: https://webrtc.googlesource.com/src/+/branch-heads/43 +[m80-log]: https://webrtc.googlesource.com/src/+log/branch-heads/3987 +[m80]: https://webrtc.googlesource.com/src/+/branch-heads/3987 +[fuzzers]: https://chromium.googlesource.com/external/webrtc/+/refs/heads/master/test/fuzzers/ diff --git a/docs/native-code/ios/index.md b/docs/native-code/ios/index.md index 6c9d2de0e8..e2f6c3dfd6 100644 --- a/docs/native-code/ios/index.md +++ b/docs/native-code/ios/index.md @@ -173,7 +173,7 @@ a script is available [here][framework-script] To build the framework with bitcode support, pass the `--bitcode` flag to the script like so ``` -$ python build_ios_libs.py --bitcode +$ python tools_webrtc/ios/build_ios_libs.py --bitcode ``` The resulting framework can be found in out_ios_libs/. diff --git a/docs/native-code/logging.md b/docs/native-code/logging.md new file mode 100644 index 0000000000..1daadbe2b5 --- /dev/null +++ b/docs/native-code/logging.md @@ -0,0 +1,42 @@ +Native logs are often valuable in order to debug issues that can't be easily +reproduced. Following are instructions for gathering logs on various platforms. + +To enable native logs for a native application, you can either: + + * Use a debug build of WebRTC (a build where `NDEBUG` is not defined), + which will enable `INFO` logging by default. + + * Call `rtc::LogMessage::LogToDebug(rtc::LS_INFO)` within your application. + Or use `LS_VERBOSE` to enable `VERBOSE` logging. + +For the location of the log output on different platforms, see below. + +#### Android + +Logged to Android system log. Can be obtained using: + +~~~~ bash +adb logcat -s "libjingle" +~~~~ + +To enable the logging in a non-debug build from Java code, use +`Logging.enableLogToDebugOutput(Logging.Severity.LS_INFO)`. + +#### iOS + +Only logged to `stderr` by default. To log to a file, use `RTCFileLogger`. + +#### Mac + +For debug builds of WebRTC (builds where `NDEBUG` is not defined), logs to +`stderr`. To do this for release builds as well, set a boolean preference named +'logToStderr' to `true` for your application. Or, use `RTCFileLogger` to log to +a file. + +#### Windows + +Logs to the debugger and `stderr`. + +#### Linux/Other Platforms + +Logs to `stderr`. diff --git a/docs/native-code/rtp-hdrext/index.md b/docs/native-code/rtp-hdrext/index.md index c0c0b75aba..081a727c59 100644 --- a/docs/native-code/rtp-hdrext/index.md +++ b/docs/native-code/rtp-hdrext/index.md @@ -10,3 +10,4 @@ The following subpages define experiemental RTP header extensions: * [video-content-type](video-content-type/README.md) * [video-timing](video-timing/README.md) * [inband-cn](inband-cn/README.md) + * [video-layers-allocation00](video-layes-allocation00/README.md) diff --git a/docs/native-code/rtp-hdrext/video-frame-tracking-id/README.md b/docs/native-code/rtp-hdrext/video-frame-tracking-id/README.md new file mode 100644 index 0000000000..d1c609744e --- /dev/null +++ b/docs/native-code/rtp-hdrext/video-frame-tracking-id/README.md @@ -0,0 +1,27 @@ +# Video Frame Tracking Id + +The Video Frame Tracking Id extension is meant for media quality testing +purpose and shouldn't be used in production. It tracks webrtc::VideoFrame id +field from the sender to the receiver to gather referenced base media quality +metrics such as PSNR or SSIM. +Contact for more info. + +**Name:** "Video Frame Tracking Id" + +**Formal name:** + + +**Status:** This extension is defined to allow for media quality testing. It is +enabled by using a field trial and should only be used in a testing environment. + +### Data layout overview + 1-byte header + 2 bytes of data: + + 0              1 2 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | ID   | L=1 | video-frame-tracking-id | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +Notes: The extension shoud be present only in the first packet of each frame. +If attached to other packets it can be ignored. \ No newline at end of file diff --git a/docs/native-code/rtp-hdrext/video-layers-allocation00/README.md b/docs/native-code/rtp-hdrext/video-layers-allocation00/README.md new file mode 100644 index 0000000000..f367adab4c --- /dev/null +++ b/docs/native-code/rtp-hdrext/video-layers-allocation00/README.md @@ -0,0 +1,84 @@ +# Video Layers Allocation + +The goal of this extension is for a video sender to provide information about +the target bitrate, resolution and frame rate of each scalability layer in order +to aid a middle box to decide which layer to relay. + +**Name:** "Video layers allocation version 0" + +**Formal name:** + + +**Status:** This extension is defined here to allow for experimentation. + +In a conference scenario, a video from a single sender may be received by +several recipients with different downlink bandwidth constraints and UI +requirements. To allow this, a sender can send video with several scalability +layers and a middle box can choose a layer to relay for each receiver. + +This extension support temporal layers, multiple spatial layers sent on a single +rtp stream (SVC), or independent spatial layers sent on multiple rtp streams +(Simulcast). + +## RTP header extension format + +### Data layout + +``` +// +-+-+-+-+-+-+-+-+ +// |RID| NS| sl_bm | +// +-+-+-+-+-+-+-+-+ +// Spatial layer bitmask |sl0_bm |sl1_bm | +// up to 2 bytes |---------------| +// when sl_bm == 0 |sl2_bm |sl3_bm | +// +-+-+-+-+-+-+-+-+ +// Number of temporal |#tl|#tl|#tl|#tl| +// layers per spatial layer :---------------: +// up to 4 bytes | ... | +// +-+-+-+-+-+-+-+-+ +// Target bitrate in kpbs | | +// per temporal layer : ... : +// leb128 encoded | | +// +-+-+-+-+-+-+-+-+ +// Resolution and framerate | | +// 5 bytes per spatial layer + width-1 for + +// (optional) | rid=0, sid=0 | +// +---------------+ +// | | +// + height-1 for + +// | rid=0, sid=0 | +// +---------------+ +// | max framerate | +// +-+-+-+-+-+-+-+-+ +// : ... : +// +-+-+-+-+-+-+-+-+ +``` + +RID: RTP stream index this allocation is sent on, numbered from 0. 2 bits. + +NS: Number of RTP streams - 1. 2 bits, thus allowing up-to 4 RTP streams. + +sl_bm: BitMask of the active Spatial Layers when same for all RTP streams or 0 +otherwise. 4 bits thus allows up to 4 spatial layers per RTP streams. + +slX_bm: BitMask of the active Spatial Layers for RTP stream with index=X. +byte-aligned. When NS < 2, takes one byte, otherwise uses two bytes. + +\#tl: 2-bit value of number of temporal layers-1, thus allowing up-to 4 temporal +layer per spatial layer. One per spatial layer per RTP stream. values are stored +in (RTP stream id, spatial id) ascending order. zero-padded to byte alignment. + +Target bitrate in kbps. Values are stored using leb128 encoding. one value per +temporal layer. values are stored in (RTP stream id, spatial id, temporal id) +ascending order. All bitrates are total required bitrate to receive the +corresponding layer, i.e. in simulcast mode they include only corresponding +spatial layer, in full-svc all lower spatial layers are included. All lower +temporal layers are also included. + +Resolution and framerate. Optional. Presence is inferred from the rtp header +extension size. Encoded (width - 1), 16-bit, (height - 1), 16-bit, max frame +rate 8-bit per spatial layer per RTP stream. Values are stored in (RTP stream +id, spatial id) ascending order. + +An empty layer allocation (i.e nothing sent on ssrc) is encoded as +special case with a single 0 byte. diff --git a/docs/release-notes.md b/docs/release-notes.md new file mode 100644 index 0000000000..5f77b9eb6e --- /dev/null +++ b/docs/release-notes.md @@ -0,0 +1,73 @@ +# Release notes +This document contains pointers to the WebRTC release notes for each Chrome release. The +release notes are posted to the [discuss-webrtc](https://groups.google.com/group/discuss-webrtc) +mailing list before the release. + +## Current release +To find out the current release and schedule, refer to the +[chromium dashboard](https://chromiumdash.appspot.com/schedule) + +## List of releases + * [M89 Release Notes](https://groups.google.com/g/discuss-webrtc/c/Zrsn2hi8FV0/m/KIbn0EZPBQAJ) + * [M88 Release Notes](https://groups.google.com/g/discuss-webrtc/c/A0FjOcTW2c0/m/UAv-veyPCAAJ) + * [M87 Release Notes](https://groups.google.com/g/discuss-webrtc/c/6VmKkCjRK0k/m/YyOTQyQ5AAAJ) + * [M86 Release Notes](https://groups.google.com/g/discuss-webrtc/c/pKCOpi9Llyc/m/QhZjyE02BgAJ) + * [M85 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/Qq3nsR2w2HU/7WGLPscPBwAJ) + * [M84 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/MRAV4jgHYV0/5019yB-HAwAJ) + * [M83 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/EieMDYtQ9sg/7po9fl8_AgAJ) + * NOTE: M82 release was cancelled due to cancellation of Chrome 82 release + * [M81 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/a5_zncyPc3Y/iirhUr6bCwAJ) + * [M80 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/Ozvbd0p7Q1Y/M4WN2cRKCwAJ) + * [M79 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/X8q5Ae9VKco/oEiGuteoBAAJ) + * [M78 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/qbX55tFk1o4/KgFA-ZksCwAJ) + * [M77 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/b1tdwrXKuHI/OH7oSL7OBwAJ) + * [M76 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/Y7TIuNbgP8M/UoXP-RuxAwAJ) + * [M75 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/_jlUbYjv-hQ/mCtjlVyjAgAJ) + * [M74 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/cXEtXIIYrQs/R7y0yIK2AQAJ) + * [M73 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/l0gc3RjBhc0/FsMqOlOSBwAJ) + * [M72 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/3h4y0fimHwg/j6G4dTVvCAAJ) + * [M71 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/HUpIxlDlkSE/qR1nswqZCwAJ) + * [M70 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/6ImvPjWQvbE/AlCtGQnYBQAJ) + * [M69 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/i1Td6qhfKlQ/ryXly46JCwAJ) + * [M68 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/nDdDqIBtFBM/bf_0eknmAwAJ) + * [M67 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/8D5O5NAVzes/QxeMGr0rAwAJ) + * [M66 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/kG4DJSfP2ck/YlI0xyeLAgAJ) + * [M65 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/QJHpBnGQPKk/oKR0pSD-CgAJ) + * [M64 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/fIWg5n67xHo/QIhRnv6vBgAJ) + * [M63 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/qDtSDxoNSII/69b6fAkxAQAJ) + * [M62 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/DFRDTFJmO5g/Sz5zOz-KFQAJ) + * [M61 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/8gJyg8EFPdo/OxUdyMjXBwAJ) + * [M60 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/iw3c5xYXOUw/WF5QxRReBgAJ) + * [M59 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/ogPObezLpHw/hwVgcW57BgAJ) + * [M58 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/13BE3fbHcLU/bQJWNBihBgAJ) + * [M57 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/xXjeKbW_JYI/LIXzVrKWCwAJ) + * [M56 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/DyeVS9IMTLc/1gUM7osoCwAJ) + * [M55 Release Notes](https://groups.google.com/d/msg/discuss-webrtc/BqqFMSR6s1E/rlPYFD0NCQAJ) + * [M54 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/S5yex8rNIjA/discussion) + * [M53 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/ism_KD14rzc/discussion) + * [M52 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/bDfxOA8XiJI/discussion) + * [M51 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/GdsmhrVaxdU/discussion) + * [M50 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/JuXLl5BJoJE/discussion) + * [M49 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/mcApW-3YADI/discussion) + * [M48 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/_5hL0HeBeEA/discussion) + * [M47 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/sq5CVmY69sc/discussion) + * [M46 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/uMWoBvCceSg/discussion) + * [M45 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/RZDCCUga1zc/discussion) + * [M44 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/mrLyXc6Y464/discussion) + * [M43 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/KiKykbMGW9w/discussion) + * [M42 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/PwuzgUypYos/discussion) + * [M41 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/aGsdjGtjIQA/discussion) + * [M40 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/vGW4O3QOyLM/discussion) + * [M39 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/Cv4g9jllrSE/discussion) + * [M38 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/ANnsPbI0PWg/discussion) + * [M37 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/Qt99-FXzKkU/discussion) + * [M36 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/JlU2ItCJuZU/discussion) + * [M35 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/8Um1WESQ97g/discussion) + * [M34 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/Feg4ajTp2Gg/discussion) + * [M33 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/lAn7IvSIQ_g/discussion) + * [M32 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/AefA5Pg_xIU/discussion) + * [M31 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/0dReVX4BX3c/discussion) + * [M30 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/_zdJBwP4vNU/discussion) + * [M29 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/DytR3rKvmw4/discussion) + * [M28 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/CLa_9sYY6ek/discussion) + * [M27 Release Notes](https://groups.google.com/d/topic/discuss-webrtc/NDwzHExp9zM/discussion) diff --git a/examples/BUILD.gn b/examples/BUILD.gn index 2acb0dd70f..b109d903e5 100644 --- a/examples/BUILD.gn +++ b/examples/BUILD.gn @@ -27,6 +27,7 @@ group("examples") { ":AppRTCMobile", ":AppRTCMobile_test_apk", ":libwebrtc_unity", + "androidvoip", ] # TODO(sakal): We include some code from the tests. Remove this dependency @@ -44,7 +45,7 @@ group("examples") { deps += [ ":AppRTCMobile" ] } - if (is_linux || is_win) { + if (is_linux || is_chromeos || is_win) { deps += [ ":peerconnection_server", ":stunserver", @@ -87,7 +88,7 @@ if (is_android) { testonly = true apk_name = "AppRTCMobile" android_manifest = "androidapp/AndroidManifest.xml" - min_sdk_version = 16 + min_sdk_version = 21 target_sdk_version = 29 deps = [ @@ -129,6 +130,7 @@ if (is_android) { "androidapp/src/org/appspot/apprtc/util/AsyncHttpURLConnection.java", ] + resources_package = "org.appspot.apprtc" deps = [ ":AppRTCMobile_resources", "../rtc_base:base_java", @@ -180,15 +182,15 @@ if (is_android) { "androidapp/res/layout/fragment_call.xml", "androidapp/res/layout/fragment_hud.xml", "androidapp/res/menu/connect_menu.xml", - "androidapp/res/values/arrays.xml", - "androidapp/res/values/strings.xml", "androidapp/res/values-v17/styles.xml", "androidapp/res/values-v21/styles.xml", + "androidapp/res/values/arrays.xml", + "androidapp/res/values/strings.xml", "androidapp/res/xml/preferences.xml", ] - custom_package = "org.appspot.apprtc" # Needed for Bazel converter. + custom_package = "org.appspot.apprtc" resource_dirs = [ "androidapp/res" ] assert(resource_dirs != []) # Mark as used. } @@ -196,7 +198,7 @@ if (is_android) { rtc_instrumentation_test_apk("AppRTCMobile_test_apk") { apk_name = "AppRTCMobileTest" android_manifest = "androidtests/AndroidManifest.xml" - min_sdk_version = 16 + min_sdk_version = 21 target_sdk_version = 21 sources = [ @@ -207,7 +209,11 @@ if (is_android) { deps = [ ":AppRTCMobile_javalib", + "../sdk/android:base_java", + "../sdk/android:camera_java", "../sdk/android:libjingle_peerconnection_java", + "../sdk/android:peerconnection_java", + "../sdk/android:video_api_java", "../sdk/android:video_java", "//third_party/android_support_test_runner:runner_java", "//third_party/junit", @@ -227,13 +233,7 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "objc/AppRTCMobile/common/ARDUtilities.m", ] public_configs = [ ":apprtc_common_config" ] - - if (is_ios) { - # iOS must use WebRTC.framework which is dynamically linked. - deps = [ "../sdk:framework_objc+link" ] - } else { - deps = [ "../sdk:mac_framework_objc+link" ] - } + deps = [ "../sdk:base_objc" ] } config("apprtc_signaling_config") { @@ -253,8 +253,6 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "objc/AppRTCMobile/ARDAppClient.m", "objc/AppRTCMobile/ARDAppEngineClient.h", "objc/AppRTCMobile/ARDAppEngineClient.m", - "objc/AppRTCMobile/ARDBitrateTracker.h", - "objc/AppRTCMobile/ARDBitrateTracker.m", "objc/AppRTCMobile/ARDCaptureController.h", "objc/AppRTCMobile/ARDCaptureController.m", "objc/AppRTCMobile/ARDExternalSampleCapturer.h", @@ -292,18 +290,17 @@ if (is_ios || (is_mac && target_cpu != "x86")) { deps = [ ":apprtc_common", ":socketrocket", - ] - - if (is_ios) { - # iOS must use WebRTC.framework which is dynamically linked. - deps += [ - "../sdk:framework_objc+link", - "../sdk:ios_framework_bundle", - ] - } else { - deps += [ "../sdk:mac_framework_objc+link" ] - } - libs = [ + "../sdk:base_objc", + "../sdk:default_codec_factory_objc", + "../sdk:file_logger_objc", + "../sdk:helpers_objc", + "../sdk:mediaconstraints_objc", + "../sdk:peerconnectionfactory_base_objc", + "../sdk:videocapture_objc", + "../sdk:videoframebuffer_objc", + "../sdk:videosource_objc", + ] + frameworks = [ "CoreMedia.framework", "QuartzCore.framework", ] @@ -341,11 +338,21 @@ if (is_ios || (is_mac && target_cpu != "x86")) { deps = [ ":apprtc_common", ":apprtc_signaling", - "../sdk:framework_objc+link", - "../sdk:ios_framework_bundle", + "../sdk:audio_session_objc", + "../sdk:base_objc", + "../sdk:helpers_objc", + "../sdk:mediaconstraints_objc", + "../sdk:metal_objc", + "../sdk:peerconnectionfactory_base_objc", + "../sdk:peerconnectionfactory_base_objc", + "../sdk:videocapture_objc", + "../sdk:videocodec_objc", ] + if (rtc_ios_macos_use_opengl_rendering) { + deps += [ "../sdk:opengl_ui_objc" ] + } - libs = [ "AVFoundation.framework" ] + frameworks = [ "AVFoundation.framework" ] } ios_app_bundle("AppRTCMobile") { @@ -379,14 +386,18 @@ if (is_ios || (is_mac && target_cpu != "x86")) { if (rtc_apprtcmobile_broadcast_extension) { bundle_data("AppRTCMobileBroadcastUpload_extension_bundle") { testonly = true - public_deps = [ ":AppRTCMobileBroadcastUpload" ] # no-presubmit-check TODO(webrtc:8603) + public_deps = [ # no-presubmit-check TODO(webrtc:8603) + ":AppRTCMobileBroadcastUpload", # prevent code format + ] sources = [ "$root_out_dir/AppRTCMobileBroadcastUpload.appex" ] outputs = [ "{{bundle_contents_dir}}/Plugins/{{source_file_part}}" ] } bundle_data("AppRTCMobileBroadcastSetupUI_extension_bundle") { testonly = true - public_deps = [ ":AppRTCMobileBroadcastSetupUI" ] # no-presubmit-check TODO(webrtc:8603) + public_deps = [ # no-presubmit-check TODO(webrtc:8603) + ":AppRTCMobileBroadcastSetupUI", # prevent code format + ] sources = [ "$root_out_dir/AppRTCMobileBroadcastSetupUI.appex" ] outputs = [ "{{bundle_contents_dir}}/Plugins/{{source_file_part}}" ] } @@ -404,7 +415,7 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "../sdk:ios_framework_bundle", ] - libs = [ "ReplayKit.framework" ] + frameworks = [ "ReplayKit.framework" ] } ios_appex_bundle("AppRTCMobileBroadcastUpload") { @@ -428,7 +439,7 @@ if (is_ios || (is_mac && target_cpu != "x86")) { info_plist = "objc/AppRTCMobile/ios/broadcast_extension/BroadcastSetupUIInfo.plist" - libs = [ "ReplayKit.framework" ] + frameworks = [ "ReplayKit.framework" ] deps = [ ":AppRTCMobile_ios_bundle_data" ] } @@ -484,17 +495,18 @@ if (is_ios || (is_mac && target_cpu != "x86")) { "../modules/audio_processing:api", "../pc:libjingle_peerconnection", "../rtc_base", + "../rtc_base/synchronization:mutex", "../sdk:base_objc", "../sdk:default_codec_factory_objc", "../sdk:helpers_objc", + "../sdk:metal_objc", "../sdk:native_api", - "../sdk:ui_objc", "../sdk:videocapture_objc", "../sdk:videotoolbox_objc", ] - if (current_cpu == "arm64") { - deps += [ "../sdk:metal_objc" ] + if (rtc_ios_macos_use_opengl_rendering) { + deps += [ "../sdk:opengl_ui_objc" ] } } @@ -528,7 +540,15 @@ if (is_ios || (is_mac && target_cpu != "x86")) { deps = [ ":apprtc_common", ":apprtc_signaling", - "../sdk:mac_framework_objc+link", + "../sdk:base_objc", + "../sdk:helpers_objc", + "../sdk:mediaconstraints_objc", + "../sdk:metal_objc", + "../sdk:opengl_ui_objc", + "../sdk:peerconnectionfactory_base_objc", + "../sdk:peerconnectionfactory_base_objc", + "../sdk:videocapture_objc", + "../sdk:videocodec_objc", ] } @@ -542,7 +562,7 @@ if (is_ios || (is_mac && target_cpu != "x86")) { info_plist = "objc/AppRTCMobile/mac/Info.plist" - libs = [ "AppKit.framework" ] + frameworks = [ "AppKit.framework" ] ldflags = [ "-rpath", @@ -587,10 +607,10 @@ if (is_ios || (is_mac && target_cpu != "x86")) { configs += [ ":socketrocket_warning_config" ] public_configs = [ ":socketrocket_include_config" ] - libs = [ + libs = [ "icucore" ] + frameworks = [ "CFNetwork.framework", "Security.framework", - "icucore", ] } @@ -613,14 +633,16 @@ if (is_ios || (is_mac && target_cpu != "x86")) { ":AppRTCMobile_lib", ":apprtc_signaling", "../rtc_base", - "../sdk:framework_objc+link", - "../sdk:ios_framework_bundle", + "../sdk:mediaconstraints_objc", + "../sdk:peerconnectionfactory_base_objc", + "../sdk:videocapture_objc", "//build/config/ios:xctest", "//third_party/ocmock", ] } - rtc_ios_xctest_test("apprtcmobile_tests") { + rtc_test("apprtcmobile_tests") { + is_xctest = true info_plist = "objc/AppRTCMobile/ios/Info.plist" sources = [ "objc/AppRTCMobile/tests/main.mm" ] deps = [ @@ -635,7 +657,7 @@ if (is_ios || (is_mac && target_cpu != "x86")) { } } -if (is_linux || is_win) { +if (is_linux || is_chromeos || is_win) { rtc_executable("peerconnection_client") { testonly = true sources = [ @@ -655,12 +677,15 @@ if (is_linux || is_win) { "../api:scoped_refptr", "../api/audio:audio_mixer_api", "../api/audio_codecs:audio_codecs_api", - "../api/video:video_frame_i420", + "../api/video:video_frame", "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", "../media:rtc_media_base", "../p2p:rtc_p2p", + "../pc:video_track_source", "../rtc_base:checks", + "../rtc_base:net_helpers", + "../rtc_base:threading", "../rtc_base/third_party/sigslot", "../system_wrappers:field_trial", "../test:field_trial", @@ -677,9 +702,12 @@ if (is_linux || is_win) { "peerconnection/client/main_wnd.h", ] configs += [ "//build/config/win:windowed" ] - deps += [ "../media:rtc_media_base" ] + deps += [ + "../media:rtc_media_base", + "../rtc_base:win32", + ] } - if (is_linux) { + if (is_linux || is_chromeos) { sources += [ "peerconnection/client/linux/main.cc", "peerconnection/client/linux/main_wnd.cc", @@ -732,6 +760,7 @@ if (is_linux || is_win) { "peerconnection/server/utils.h", ] deps = [ + "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../system_wrappers:field_trial", "../test:field_trial", @@ -749,7 +778,11 @@ if (is_linux || is_win) { "../p2p:rtc_p2p", "../pc:rtc_pc", "../rtc_base", + "../rtc_base:ip_address", "../rtc_base:rtc_base_approved", + "../rtc_base:socket_address", + "../rtc_base:socket_server", + "../rtc_base:threading", ] } rtc_executable("stunserver") { @@ -761,6 +794,9 @@ if (is_linux || is_win) { "../pc:rtc_pc", "../rtc_base", "../rtc_base:rtc_base_approved", + "../rtc_base:socket_address", + "../rtc_base:socket_server", + "../rtc_base:threading", ] } } @@ -807,6 +843,7 @@ if (is_win || is_android) { "../modules/video_capture:video_capture_module", "../pc:libjingle_peerconnection", "../pc:peerconnection", + "../pc:video_track_source", "../rtc_base", "../test:platform_video_capturer", "../test:video_test_common", @@ -829,6 +866,7 @@ if (is_android) { "../sdk/android:camera_java", "../sdk/android:libjingle_peerconnection_java", "../sdk/android:peerconnection_java", + "../sdk/android:video_api_java", "../sdk/android:video_java", "//third_party/android_deps:com_android_support_support_annotations_java", ] @@ -859,7 +897,9 @@ if (is_android) { deps = [ ":AppRTCMobile_javalib", + "../sdk/android:peerconnection_java", "//base:base_java_test_support", + "//third_party/androidx:androidx_test_core_java", "//third_party/google-truth:google_truth_java", ] @@ -881,6 +921,8 @@ if (!build_with_chromium) { "../rtc_base", "../rtc_base:checks", "../rtc_base:rtc_base_approved", + "../rtc_base:socket_address", + "../rtc_base:threading", "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:parse", ] diff --git a/examples/aarproject/OWNERS b/examples/aarproject/OWNERS index 3c4e54174e..cf092a316a 100644 --- a/examples/aarproject/OWNERS +++ b/examples/aarproject/OWNERS @@ -1 +1 @@ -sakal@webrtc.org +xalep@webrtc.org diff --git a/examples/aarproject/app/build.gradle b/examples/aarproject/app/build.gradle index dde0707ace..b4f2acdbdd 100644 --- a/examples/aarproject/app/build.gradle +++ b/examples/aarproject/app/build.gradle @@ -2,10 +2,9 @@ apply plugin: 'com.android.application' android { compileSdkVersion 27 - buildToolsVersion "27.0.1" defaultConfig { applicationId "org.appspot.apprtc" - minSdkVersion 16 + minSdkVersion 21 targetSdkVersion 21 versionCode 1 versionName "1.0" diff --git a/examples/aarproject/build.gradle b/examples/aarproject/build.gradle index 5a016efb7e..6780c439e1 100644 --- a/examples/aarproject/build.gradle +++ b/examples/aarproject/build.gradle @@ -7,7 +7,7 @@ buildscript { jcenter() } dependencies { - classpath 'com.android.tools.build:gradle:3.0.0-beta2' + classpath 'com.android.tools.build:gradle:4.0.0' // NOTE: Do not place your application dependencies here; they belong diff --git a/examples/androidapp/AndroidManifest.xml b/examples/androidapp/AndroidManifest.xml index 8a9035e782..c4e1e797d0 100644 --- a/examples/androidapp/AndroidManifest.xml +++ b/examples/androidapp/AndroidManifest.xml @@ -8,7 +8,7 @@ - + diff --git a/examples/androidapp/OWNERS b/examples/androidapp/OWNERS index 299e8b20ec..109bea2725 100644 --- a/examples/androidapp/OWNERS +++ b/examples/androidapp/OWNERS @@ -1,2 +1,2 @@ magjed@webrtc.org -sakal@webrtc.org +xalep@webrtc.org diff --git a/examples/androidapp/res/values/arrays.xml b/examples/androidapp/res/values/arrays.xml index e0e6ccbdc2..4a2948c875 100644 --- a/examples/androidapp/res/values/arrays.xml +++ b/examples/androidapp/res/values/arrays.xml @@ -34,6 +34,7 @@ VP9 H264 Baseline H264 High + AV1 diff --git a/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java b/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java index 7ae3d838dd..c32ab964ad 100644 --- a/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java +++ b/examples/androidapp/src/org/appspot/apprtc/AppRTCAudioManager.java @@ -185,8 +185,8 @@ private AppRTCAudioManager(Context context) { // Note that, the sensor will not be active until start() has been called. proximitySensor = AppRTCProximitySensor.create(context, // This method will be called each time a state change is detected. - // Example: user holds his hand over the device (closer than ~5 cm), - // or removes his hand from the device. + // Example: user holds their hand over the device (closer than ~5 cm), + // or removes their hand from the device. this ::onProximitySensorChangedState); Log.d(TAG, "defaultAudioDevice: " + defaultAudioDevice); diff --git a/examples/androidapp/src/org/appspot/apprtc/CallActivity.java b/examples/androidapp/src/org/appspot/apprtc/CallActivity.java index d55a9704d3..10d2b6eca2 100644 --- a/examples/androidapp/src/org/appspot/apprtc/CallActivity.java +++ b/examples/androidapp/src/org/appspot/apprtc/CallActivity.java @@ -786,7 +786,7 @@ public void run() { } @Override - public void onRemoteDescription(final SessionDescription sdp) { + public void onRemoteDescription(final SessionDescription desc) { final long delta = System.currentTimeMillis() - callStartedTimeMs; runOnUiThread(new Runnable() { @Override @@ -795,8 +795,8 @@ public void run() { Log.e(TAG, "Received remote SDP for non-initilized peer connection."); return; } - logAndToast("Received remote " + sdp.type + ", delay=" + delta + "ms"); - peerConnectionClient.setRemoteDescription(sdp); + logAndToast("Received remote " + desc.type + ", delay=" + delta + "ms"); + peerConnectionClient.setRemoteDescription(desc); if (!signalingParameters.initiator) { logAndToast("Creating ANSWER..."); // Create answer. Answer SDP will be sent to offering client in @@ -856,17 +856,17 @@ public void onChannelError(final String description) { // All callbacks are invoked from peer connection client looper thread and // are routed to UI thread. @Override - public void onLocalDescription(final SessionDescription sdp) { + public void onLocalDescription(final SessionDescription desc) { final long delta = System.currentTimeMillis() - callStartedTimeMs; runOnUiThread(new Runnable() { @Override public void run() { if (appRtcClient != null) { - logAndToast("Sending " + sdp.type + ", delay=" + delta + "ms"); + logAndToast("Sending " + desc.type + ", delay=" + delta + "ms"); if (signalingParameters.initiator) { - appRtcClient.sendOfferSdp(sdp); + appRtcClient.sendOfferSdp(desc); } else { - appRtcClient.sendAnswerSdp(sdp); + appRtcClient.sendAnswerSdp(desc); } } if (peerConnectionParameters.videoMaxBitrate > 0) { diff --git a/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java b/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java index 2817afea01..b3282a6955 100644 --- a/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java +++ b/examples/androidapp/src/org/appspot/apprtc/PeerConnectionClient.java @@ -36,6 +36,7 @@ import java.util.regex.Pattern; import org.appspot.apprtc.AppRTCClient.SignalingParameters; import org.appspot.apprtc.RecordedAudioToFileController; +import org.webrtc.AddIceObserver; import org.webrtc.AudioSource; import org.webrtc.AudioTrack; import org.webrtc.CameraVideoCapturer; @@ -94,6 +95,8 @@ public class PeerConnectionClient { private static final String VIDEO_CODEC_H264 = "H264"; private static final String VIDEO_CODEC_H264_BASELINE = "H264 Baseline"; private static final String VIDEO_CODEC_H264_HIGH = "H264 High"; + private static final String VIDEO_CODEC_AV1 = "AV1"; + private static final String VIDEO_CODEC_AV1_SDP_CODEC_NAME = "AV1X"; private static final String AUDIO_CODEC_OPUS = "opus"; private static final String AUDIO_CODEC_ISAC = "ISAC"; private static final String VIDEO_CODEC_PARAM_START_BITRATE = "x-google-start-bitrate"; @@ -152,8 +155,7 @@ public class PeerConnectionClient { @Nullable private List queuedRemoteCandidates; private boolean isInitiator; - @Nullable - private SessionDescription localSdp; // either offer or answer SDP + @Nullable private SessionDescription localDescription; // either offer or answer description @Nullable private VideoCapturer videoCapturer; // enableVideo is set to true if video should be rendered and sent. @@ -825,7 +827,16 @@ public void addRemoteIceCandidate(final IceCandidate candidate) { if (queuedRemoteCandidates != null) { queuedRemoteCandidates.add(candidate); } else { - peerConnection.addIceCandidate(candidate); + peerConnection.addIceCandidate(candidate, new AddIceObserver() { + @Override + public void onAddSuccess() { + Log.d(TAG, "Candidate " + candidate + " successfully added."); + } + @Override + public void onAddFailure(String error) { + Log.d(TAG, "Candidate " + candidate + " addition failed: " + error); + } + }); } } }); @@ -843,25 +854,24 @@ public void removeRemoteIceCandidates(final IceCandidate[] candidates) { }); } - public void setRemoteDescription(final SessionDescription sdp) { + public void setRemoteDescription(final SessionDescription desc) { executor.execute(() -> { if (peerConnection == null || isError) { return; } - String sdpDescription = sdp.description; + String sdp = desc.description; if (preferIsac) { - sdpDescription = preferCodec(sdpDescription, AUDIO_CODEC_ISAC, true); + sdp = preferCodec(sdp, AUDIO_CODEC_ISAC, true); } if (isVideoCallEnabled()) { - sdpDescription = - preferCodec(sdpDescription, getSdpVideoCodecName(peerConnectionParameters), false); + sdp = preferCodec(sdp, getSdpVideoCodecName(peerConnectionParameters), false); } if (peerConnectionParameters.audioStartBitrate > 0) { - sdpDescription = setStartBitrate( - AUDIO_CODEC_OPUS, false, sdpDescription, peerConnectionParameters.audioStartBitrate); + sdp = setStartBitrate( + AUDIO_CODEC_OPUS, false, sdp, peerConnectionParameters.audioStartBitrate); } Log.d(TAG, "Set remote SDP."); - SessionDescription sdpRemote = new SessionDescription(sdp.type, sdpDescription); + SessionDescription sdpRemote = new SessionDescription(desc.type, sdp); peerConnection.setRemoteDescription(sdpObserver, sdpRemote); }); } @@ -978,6 +988,8 @@ private static String getSdpVideoCodecName(PeerConnectionParameters parameters) return VIDEO_CODEC_VP8; case VIDEO_CODEC_VP9: return VIDEO_CODEC_VP9; + case VIDEO_CODEC_AV1: + return VIDEO_CODEC_AV1_SDP_CODEC_NAME; case VIDEO_CODEC_H264_HIGH: case VIDEO_CODEC_H264_BASELINE: return VIDEO_CODEC_H264; @@ -1002,8 +1014,8 @@ private static String getFieldTrials(PeerConnectionParameters peerConnectionPara @SuppressWarnings("StringSplitter") private static String setStartBitrate( - String codec, boolean isVideoCodec, String sdpDescription, int bitrateKbps) { - String[] lines = sdpDescription.split("\r\n"); + String codec, boolean isVideoCodec, String sdp, int bitrateKbps) { + String[] lines = sdp.split("\r\n"); int rtpmapLineIndex = -1; boolean sdpFormatUpdated = false; String codecRtpMap = null; @@ -1021,7 +1033,7 @@ private static String setStartBitrate( } if (codecRtpMap == null) { Log.w(TAG, "No rtpmap for " + codec + " codec"); - return sdpDescription; + return sdp; } Log.d(TAG, "Found " + codec + " rtpmap " + codecRtpMap + " at " + lines[rtpmapLineIndex]); @@ -1112,12 +1124,12 @@ private static String joinString( return joinString(newLineParts, " ", false /* delimiterAtEnd */); } - private static String preferCodec(String sdpDescription, String codec, boolean isAudio) { - final String[] lines = sdpDescription.split("\r\n"); + private static String preferCodec(String sdp, String codec, boolean isAudio) { + final String[] lines = sdp.split("\r\n"); final int mLineIndex = findMediaDescriptionLine(isAudio, lines); if (mLineIndex == -1) { Log.w(TAG, "No mediaDescription line, so can't prefer " + codec); - return sdpDescription; + return sdp; } // A list with all the payload types with name |codec|. The payload types are integers in the // range 96-127, but they are stored as strings here. @@ -1132,12 +1144,12 @@ private static String preferCodec(String sdpDescription, String codec, boolean i } if (codecPayloadTypes.isEmpty()) { Log.w(TAG, "No payload types with name " + codec); - return sdpDescription; + return sdp; } final String newMLine = movePayloadTypesToFront(codecPayloadTypes, lines[mLineIndex]); if (newMLine == null) { - return sdpDescription; + return sdp; } Log.d(TAG, "Change media description from: " + lines[mLineIndex] + " to " + newMLine); lines[mLineIndex] = newMLine; @@ -1148,7 +1160,16 @@ private void drainCandidates() { if (queuedRemoteCandidates != null) { Log.d(TAG, "Add " + queuedRemoteCandidates.size() + " remote candidates"); for (IceCandidate candidate : queuedRemoteCandidates) { - peerConnection.addIceCandidate(candidate); + peerConnection.addIceCandidate(candidate, new AddIceObserver() { + @Override + public void onAddSuccess() { + Log.d(TAG, "Candidate " + candidate + " successfully added."); + } + @Override + public void onAddFailure(String error) { + Log.d(TAG, "Candidate " + candidate + " addition failed: " + error); + } + }); } queuedRemoteCandidates = null; } @@ -1295,31 +1316,33 @@ public void onRenegotiationNeeded() { @Override public void onAddTrack(final RtpReceiver receiver, final MediaStream[] mediaStreams) {} + + @Override + public void onRemoveTrack(final RtpReceiver receiver) {} } // Implementation detail: handle offer creation/signaling and answer setting, // as well as adding remote ICE candidates once the answer SDP is set. private class SDPObserver implements SdpObserver { @Override - public void onCreateSuccess(final SessionDescription origSdp) { - if (localSdp != null) { + public void onCreateSuccess(final SessionDescription desc) { + if (localDescription != null) { reportError("Multiple SDP create."); return; } - String sdpDescription = origSdp.description; + String sdp = desc.description; if (preferIsac) { - sdpDescription = preferCodec(sdpDescription, AUDIO_CODEC_ISAC, true); + sdp = preferCodec(sdp, AUDIO_CODEC_ISAC, true); } if (isVideoCallEnabled()) { - sdpDescription = - preferCodec(sdpDescription, getSdpVideoCodecName(peerConnectionParameters), false); + sdp = preferCodec(sdp, getSdpVideoCodecName(peerConnectionParameters), false); } - final SessionDescription sdp = new SessionDescription(origSdp.type, sdpDescription); - localSdp = sdp; + final SessionDescription newDesc = new SessionDescription(desc.type, sdp); + localDescription = newDesc; executor.execute(() -> { if (peerConnection != null && !isError) { - Log.d(TAG, "Set local SDP from " + sdp.type); - peerConnection.setLocalDescription(sdpObserver, sdp); + Log.d(TAG, "Set local SDP from " + desc.type); + peerConnection.setLocalDescription(sdpObserver, newDesc); } }); } @@ -1336,7 +1359,7 @@ public void onSetSuccess() { if (peerConnection.getRemoteDescription() == null) { // We've just set our local SDP so time to send it. Log.d(TAG, "Local SDP set succesfully"); - events.onLocalDescription(localSdp); + events.onLocalDescription(localDescription); } else { // We've just set remote description, so drain remote // and send local ICE candidates. @@ -1350,7 +1373,7 @@ public void onSetSuccess() { // We've just set our local SDP so time to send it, drain // remote and send local ICE candidates. Log.d(TAG, "Local SDP set succesfully"); - events.onLocalDescription(localSdp); + events.onLocalDescription(localDescription); drainCandidates(); } else { // We've just set remote SDP - do nothing for now - diff --git a/examples/androidapp/start_loopback_stubbed_camera_saved_video_out.py b/examples/androidapp/start_loopback_stubbed_camera_saved_video_out.py index 491af38f68..b1cf84611f 100644 --- a/examples/androidapp/start_loopback_stubbed_camera_saved_video_out.py +++ b/examples/androidapp/start_loopback_stubbed_camera_saved_video_out.py @@ -15,110 +15,113 @@ from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice + def main(): - parser = OptionParser() + parser = OptionParser() - parser.add_option('--devname', dest='devname', help='The device id') + parser.add_option('--devname', dest='devname', help='The device id') - parser.add_option( - '--videooutsave', - dest='videooutsave', - help='The path where to save the video out file on local computer') + parser.add_option( + '--videooutsave', + dest='videooutsave', + help='The path where to save the video out file on local computer') - parser.add_option( - '--videoout', - dest='videoout', - help='The path where to put the video out file') + parser.add_option('--videoout', + dest='videoout', + help='The path where to put the video out file') - parser.add_option( - '--videoout_width', - dest='videoout_width', - type='int', - help='The width for the video out file') + parser.add_option('--videoout_width', + dest='videoout_width', + type='int', + help='The width for the video out file') - parser.add_option( - '--videoout_height', - dest='videoout_height', - type='int', - help='The height for the video out file') + parser.add_option('--videoout_height', + dest='videoout_height', + type='int', + help='The height for the video out file') - parser.add_option( - '--videoin', - dest='videoin', - help='The path where to read input file instead of camera') + parser.add_option( + '--videoin', + dest='videoin', + help='The path where to read input file instead of camera') - parser.add_option( - '--call_length', - dest='call_length', - type='int', - help='The length of the call') + parser.add_option('--call_length', + dest='call_length', + type='int', + help='The length of the call') - (options, args) = parser.parse_args() + (options, args) = parser.parse_args() - print (options, args) + print(options, args) - devname = options.devname + devname = options.devname - videoin = options.videoin + videoin = options.videoin - videoout = options.videoout - videoout_width = options.videoout_width - videoout_height = options.videoout_height + videoout = options.videoout + videoout_width = options.videoout_width + videoout_height = options.videoout_height - videooutsave = options.videooutsave + videooutsave = options.videooutsave - call_length = options.call_length or 10 + call_length = options.call_length or 10 - room = ''.join(random.choice(string.ascii_letters + string.digits) - for _ in range(8)) + room = ''.join( + random.choice(string.ascii_letters + string.digits) for _ in range(8)) - # Delete output video file. - if videoout: - subprocess.check_call(['adb', '-s', devname, 'shell', 'rm', '-f', - videoout]) + # Delete output video file. + if videoout: + subprocess.check_call( + ['adb', '-s', devname, 'shell', 'rm', '-f', videoout]) - device = MonkeyRunner.waitForConnection(2, devname) + device = MonkeyRunner.waitForConnection(2, devname) - extras = { - 'org.appspot.apprtc.USE_VALUES_FROM_INTENT': True, - 'org.appspot.apprtc.AUDIOCODEC': 'OPUS', - 'org.appspot.apprtc.LOOPBACK': True, - 'org.appspot.apprtc.VIDEOCODEC': 'VP8', - 'org.appspot.apprtc.CAPTURETOTEXTURE': False, - 'org.appspot.apprtc.CAMERA2': False, - 'org.appspot.apprtc.ROOMID': room} + extras = { + 'org.appspot.apprtc.USE_VALUES_FROM_INTENT': True, + 'org.appspot.apprtc.AUDIOCODEC': 'OPUS', + 'org.appspot.apprtc.LOOPBACK': True, + 'org.appspot.apprtc.VIDEOCODEC': 'VP8', + 'org.appspot.apprtc.CAPTURETOTEXTURE': False, + 'org.appspot.apprtc.CAMERA2': False, + 'org.appspot.apprtc.ROOMID': room + } - if videoin: - extras.update({'org.appspot.apprtc.VIDEO_FILE_AS_CAMERA': videoin}) + if videoin: + extras.update({'org.appspot.apprtc.VIDEO_FILE_AS_CAMERA': videoin}) - if videoout: - extras.update({ - 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE': videoout, - 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_WIDTH': videoout_width, - 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_HEIGHT': videoout_height}) + if videoout: + extras.update({ + 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE': + videoout, + 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_WIDTH': + videoout_width, + 'org.appspot.apprtc.SAVE_REMOTE_VIDEO_TO_FILE_HEIGHT': + videoout_height + }) - print extras + print extras - device.startActivity(data='https://appr.tc', - action='android.intent.action.VIEW', - component='org.appspot.apprtc/.ConnectActivity', extras=extras) + device.startActivity(data='https://appr.tc', + action='android.intent.action.VIEW', + component='org.appspot.apprtc/.ConnectActivity', + extras=extras) - print 'Running a call for %d seconds' % call_length - for _ in xrange(call_length): - sys.stdout.write('.') - sys.stdout.flush() - time.sleep(1) - print '\nEnding call.' + print 'Running a call for %d seconds' % call_length + for _ in xrange(call_length): + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(1) + print '\nEnding call.' - # Press back to end the call. Will end on both sides. - device.press('KEYCODE_BACK', MonkeyDevice.DOWN_AND_UP) + # Press back to end the call. Will end on both sides. + device.press('KEYCODE_BACK', MonkeyDevice.DOWN_AND_UP) - if videooutsave: - time.sleep(2) + if videooutsave: + time.sleep(2) - subprocess.check_call(['adb', '-s', devname, 'pull', - videoout, videooutsave]) + subprocess.check_call( + ['adb', '-s', devname, 'pull', videoout, videooutsave]) -if __name__ == '__main__': - main() +if __name__ == '__main__': + main() diff --git a/examples/androidjunit/OWNERS b/examples/androidjunit/OWNERS index 3c4e54174e..cf092a316a 100644 --- a/examples/androidjunit/OWNERS +++ b/examples/androidjunit/OWNERS @@ -1 +1 @@ -sakal@webrtc.org +xalep@webrtc.org diff --git a/examples/androidjunit/src/org/appspot/apprtc/BluetoothManagerTest.java b/examples/androidjunit/src/org/appspot/apprtc/BluetoothManagerTest.java index b97f1f0bf6..3060bd7a56 100644 --- a/examples/androidjunit/src/org/appspot/apprtc/BluetoothManagerTest.java +++ b/examples/androidjunit/src/org/appspot/apprtc/BluetoothManagerTest.java @@ -29,6 +29,7 @@ import android.content.IntentFilter; import android.media.AudioManager; import android.util.Log; +import androidx.test.core.app.ApplicationProvider; import java.util.ArrayList; import java.util.List; import org.appspot.apprtc.AppRTCBluetoothManager.State; @@ -36,7 +37,6 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.robolectric.RuntimeEnvironment; import org.robolectric.annotation.Config; import org.robolectric.shadows.ShadowLog; @@ -65,7 +65,7 @@ public class BluetoothManagerTest { @Before public void setUp() { ShadowLog.stream = System.out; - context = RuntimeEnvironment.application; + context = ApplicationProvider.getApplicationContext(); mockedAppRtcAudioManager = mock(AppRTCAudioManager.class); mockedAudioManager = mock(AudioManager.class); mockedBluetoothHeadset = mock(BluetoothHeadset.class); diff --git a/examples/androidnativeapi/AndroidManifest.xml b/examples/androidnativeapi/AndroidManifest.xml index f10f55a1b6..9257c4132e 100644 --- a/examples/androidnativeapi/AndroidManifest.xml +++ b/examples/androidnativeapi/AndroidManifest.xml @@ -2,7 +2,7 @@ - + diff --git a/examples/androidnativeapi/BUILD.gn b/examples/androidnativeapi/BUILD.gn index 9c114e859c..9253c0bcd9 100644 --- a/examples/androidnativeapi/BUILD.gn +++ b/examples/androidnativeapi/BUILD.gn @@ -5,7 +5,7 @@ if (is_android) { testonly = true apk_name = "androidnativeapi" android_manifest = "AndroidManifest.xml" - min_sdk_version = 19 + min_sdk_version = 21 target_sdk_version = 27 sources = [ @@ -16,6 +16,7 @@ if (is_android) { deps = [ ":resources", "//modules/audio_device:audio_device_java", + "//rtc_base:base_java", "//sdk/android:camera_java", "//sdk/android:surfaceviewrenderer_java", "//sdk/android:video_api_java", @@ -47,6 +48,7 @@ if (is_android) { deps = [ ":generated_jni", "../../api:scoped_refptr", + "../../rtc_base/synchronization:mutex", "//api:libjingle_peerconnection_api", "//api/rtc_event_log:rtc_event_log_factory", "//api/task_queue:default_task_queue_factory", diff --git a/examples/androidnativeapi/OWNERS b/examples/androidnativeapi/OWNERS index 3c4e54174e..cf092a316a 100644 --- a/examples/androidnativeapi/OWNERS +++ b/examples/androidnativeapi/OWNERS @@ -1 +1 @@ -sakal@webrtc.org +xalep@webrtc.org diff --git a/examples/androidnativeapi/jni/android_call_client.cc b/examples/androidnativeapi/jni/android_call_client.cc index 03968335d9..f38de24a3f 100644 --- a/examples/androidnativeapi/jni/android_call_client.cc +++ b/examples/androidnativeapi/jni/android_call_client.cc @@ -43,7 +43,7 @@ class AndroidCallClient::PCObserver : public webrtc::PeerConnectionObserver { void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) override; private: - const AndroidCallClient* client_; + AndroidCallClient* const client_; }; namespace { @@ -88,7 +88,7 @@ void AndroidCallClient::Call(JNIEnv* env, const webrtc::JavaRef& remote_sink) { RTC_DCHECK_RUN_ON(&thread_checker_); - rtc::CritScope lock(&pc_mutex_); + webrtc::MutexLock lock(&pc_mutex_); if (call_started_) { RTC_LOG(LS_WARNING) << "Call already started."; return; @@ -112,7 +112,7 @@ void AndroidCallClient::Hangup(JNIEnv* env) { call_started_ = false; { - rtc::CritScope lock(&pc_mutex_); + webrtc::MutexLock lock(&pc_mutex_); if (pc_ != nullptr) { pc_->Close(); pc_ = nullptr; @@ -174,14 +174,14 @@ void AndroidCallClient::CreatePeerConnectionFactory() { } void AndroidCallClient::CreatePeerConnection() { - rtc::CritScope lock(&pc_mutex_); + webrtc::MutexLock lock(&pc_mutex_); webrtc::PeerConnectionInterface::RTCConfiguration config; config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan; // DTLS SRTP has to be disabled for loopback to work. config.enable_dtls_srtp = false; - pc_ = pcf_->CreatePeerConnection(config, nullptr /* port_allocator */, - nullptr /* cert_generator */, - pc_observer_.get()); + webrtc::PeerConnectionDependencies deps(pc_observer_.get()); + pc_ = pcf_->CreatePeerConnectionOrError(config, std::move(deps)).MoveValue(); + RTC_LOG(LS_INFO) << "PeerConnection created: " << pc_; rtc::scoped_refptr local_video_track = @@ -205,7 +205,7 @@ void AndroidCallClient::CreatePeerConnection() { } void AndroidCallClient::Connect() { - rtc::CritScope lock(&pc_mutex_); + webrtc::MutexLock lock(&pc_mutex_); pc_->CreateOffer(new rtc::RefCountedObject(pc_), webrtc::PeerConnectionInterface::RTCOfferAnswerOptions()); } @@ -240,7 +240,7 @@ void AndroidCallClient::PCObserver::OnIceGatheringChange( void AndroidCallClient::PCObserver::OnIceCandidate( const webrtc::IceCandidateInterface* candidate) { RTC_LOG(LS_INFO) << "OnIceCandidate: " << candidate->server_url(); - rtc::CritScope lock(&client_->pc_mutex_); + webrtc::MutexLock lock(&client_->pc_mutex_); RTC_DCHECK(client_->pc_ != nullptr); client_->pc_->AddIceCandidate(candidate); } diff --git a/examples/androidnativeapi/jni/android_call_client.h b/examples/androidnativeapi/jni/android_call_client.h index 13992f5960..c9153d09bd 100644 --- a/examples/androidnativeapi/jni/android_call_client.h +++ b/examples/androidnativeapi/jni/android_call_client.h @@ -18,8 +18,8 @@ #include "api/peer_connection_interface.h" #include "api/scoped_refptr.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/thread_checker.h" +#include "api/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "sdk/android/native_api/jni/scoped_java_ref.h" #include "sdk/android/native_api/video/video_source.h" @@ -46,7 +46,7 @@ class AndroidCallClient { void CreatePeerConnection() RTC_RUN_ON(thread_checker_); void Connect() RTC_RUN_ON(thread_checker_); - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; bool call_started_ RTC_GUARDED_BY(thread_checker_); @@ -66,7 +66,7 @@ class AndroidCallClient { rtc::scoped_refptr video_source_ RTC_GUARDED_BY(thread_checker_); - rtc::CriticalSection pc_mutex_; + webrtc::Mutex pc_mutex_; rtc::scoped_refptr pc_ RTC_GUARDED_BY(pc_mutex_); }; diff --git a/examples/androidtests/AndroidManifest.xml b/examples/androidtests/AndroidManifest.xml index dae2e980a6..8e995366dc 100644 --- a/examples/androidtests/AndroidManifest.xml +++ b/examples/androidtests/AndroidManifest.xml @@ -14,7 +14,7 @@ package="org.appspot.apprtc.test"> - + diff --git a/examples/androidtests/OWNERS b/examples/androidtests/OWNERS index 3c4e54174e..cf092a316a 100644 --- a/examples/androidtests/OWNERS +++ b/examples/androidtests/OWNERS @@ -1 +1 @@ -sakal@webrtc.org +xalep@webrtc.org diff --git a/examples/androidtests/gradle_project_test.py b/examples/androidtests/gradle_project_test.py index 7db5797ef2..097232d07f 100644 --- a/examples/androidtests/gradle_project_test.py +++ b/examples/androidtests/gradle_project_test.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """ This scripts tests creating an Android Studio project using the generate_gradle.py script and making a debug build using it. @@ -23,58 +22,59 @@ import sys import tempfile - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir)) -GENERATE_GRADLE_SCRIPT = os.path.join(SRC_DIR, - 'build/android/gradle/generate_gradle.py') +GENERATE_GRADLE_SCRIPT = os.path.join( + SRC_DIR, 'build/android/gradle/generate_gradle.py') GRADLEW_BIN = os.path.join(SCRIPT_DIR, 'third_party/gradle/gradlew') def _RunCommand(argv, cwd=SRC_DIR, **kwargs): - logging.info('Running %r', argv) - subprocess.check_call(argv, cwd=cwd, **kwargs) + logging.info('Running %r', argv) + subprocess.check_call(argv, cwd=cwd, **kwargs) def _ParseArgs(): - parser = argparse.ArgumentParser( - description='Test generating Android gradle project.') - parser.add_argument('build_dir_android', - help='The path to the build directory for Android.') - parser.add_argument('--project_dir', - help='A temporary directory to put the output.') + parser = argparse.ArgumentParser( + description='Test generating Android gradle project.') + parser.add_argument('build_dir_android', + help='The path to the build directory for Android.') + parser.add_argument('--project_dir', + help='A temporary directory to put the output.') - args = parser.parse_args() - return args + args = parser.parse_args() + return args def main(): - logging.basicConfig(level=logging.INFO) - args = _ParseArgs() - - project_dir = args.project_dir - if not project_dir: - project_dir = tempfile.mkdtemp() - - output_dir = os.path.abspath(args.build_dir_android) - project_dir = os.path.abspath(project_dir) - - try: - env = os.environ.copy() - env['PATH'] = os.pathsep.join([ - os.path.join(SRC_DIR, 'third_party', 'depot_tools'), env.get('PATH', '') - ]) - _RunCommand([GENERATE_GRADLE_SCRIPT, '--output-directory', output_dir, - '--target', '//examples:AppRTCMobile', - '--project-dir', project_dir, - '--use-gradle-process-resources', '--split-projects'], - env=env) - _RunCommand([GRADLEW_BIN, 'assembleDebug'], project_dir) - finally: - # Do not delete temporary directory if user specified it manually. - if not args.project_dir: - shutil.rmtree(project_dir, True) + logging.basicConfig(level=logging.INFO) + args = _ParseArgs() + + project_dir = args.project_dir + if not project_dir: + project_dir = tempfile.mkdtemp() + + output_dir = os.path.abspath(args.build_dir_android) + project_dir = os.path.abspath(project_dir) + + try: + env = os.environ.copy() + env['PATH'] = os.pathsep.join([ + os.path.join(SRC_DIR, 'third_party', 'depot_tools'), + env.get('PATH', '') + ]) + _RunCommand([ + GENERATE_GRADLE_SCRIPT, '--output-directory', output_dir, + '--target', '//examples:AppRTCMobile', '--project-dir', + project_dir, '--use-gradle-process-resources', '--split-projects' + ], + env=env) + _RunCommand([GRADLEW_BIN, 'assembleDebug'], project_dir) + finally: + # Do not delete temporary directory if user specified it manually. + if not args.project_dir: + shutil.rmtree(project_dir, True) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java b/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java index 96a4178eec..99e0ff6531 100644 --- a/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java +++ b/examples/androidtests/src/org/appspot/apprtc/test/PeerConnectionClientTest.java @@ -77,9 +77,9 @@ public class PeerConnectionClientTest implements PeerConnectionEvents { private ExecutorService signalingExecutor; private boolean isClosed; private boolean isIceConnected; - private SessionDescription localSdp; + private SessionDescription localDesc; private List iceCandidates = new ArrayList<>(); - private final Object localSdpEvent = new Object(); + private final Object localDescEvent = new Object(); private final Object iceCandidateEvent = new Object(); private final Object iceConnectedEvent = new Object(); private final Object closeEvent = new Object(); @@ -133,11 +133,11 @@ public boolean waitForFramesRendered(int timeoutMs) throws InterruptedException // Peer connection events implementation. @Override - public void onLocalDescription(SessionDescription sdp) { - Log.d(TAG, "LocalSDP type: " + sdp.type); - synchronized (localSdpEvent) { - localSdp = sdp; - localSdpEvent.notifyAll(); + public void onLocalDescription(SessionDescription desc) { + Log.d(TAG, "Local description type: " + desc.type); + synchronized (localDescEvent) { + localDesc = desc; + localDescEvent.notifyAll(); } } @@ -211,15 +211,15 @@ public void onPeerConnectionError(String description) { public void onPeerConnectionStatsReady(StatsReport[] reports) {} // Helper wait functions. - private boolean waitForLocalSDP(int timeoutMs) throws InterruptedException { - synchronized (localSdpEvent) { + private boolean waitForLocalDescription(int timeoutMs) throws InterruptedException { + synchronized (localDescEvent) { final long endTimeMs = System.currentTimeMillis() + timeoutMs; - while (localSdp == null) { + while (localDesc == null) { final long waitTimeMs = endTimeMs - System.currentTimeMillis(); if (waitTimeMs < 0) { return false; } - localSdpEvent.wait(waitTimeMs); + localDescEvent.wait(waitTimeMs); } return true; } @@ -369,8 +369,8 @@ public void testSetLocalOfferMakesVideoFlowLocally() throws InterruptedException createParametersForVideoCall(VIDEO_CODEC_VP8), createCameraCapturer(false /* captureToTexture */)); - // Wait for local SDP and ice candidates set events. - assertTrue("Local SDP was not set.", waitForLocalSDP(WAIT_TIMEOUT)); + // Wait for local description and ice candidates set events. + assertTrue("Local description was not set.", waitForLocalDescription(WAIT_TIMEOUT)); assertTrue("ICE candidates were not generated.", waitForIceCandidates(WAIT_TIMEOUT)); // Check that local video frames were rendered. @@ -397,11 +397,11 @@ private void doLoopbackTest(PeerConnectionParameters parameters, VideoCapturer v } pcClient = createPeerConnectionClient(localRenderer, remoteRenderer, parameters, videoCapturer); - // Wait for local SDP, rename it to answer and set as remote SDP. - assertTrue("Local SDP was not set.", waitForLocalSDP(WAIT_TIMEOUT)); - SessionDescription remoteSdp = new SessionDescription( - SessionDescription.Type.fromCanonicalForm("answer"), localSdp.description); - pcClient.setRemoteDescription(remoteSdp); + // Wait for local description, change type to answer and set as remote description. + assertTrue("Local description was not set.", waitForLocalDescription(WAIT_TIMEOUT)); + SessionDescription remoteDescription = new SessionDescription( + SessionDescription.Type.fromCanonicalForm("answer"), localDesc.description); + pcClient.setRemoteDescription(remoteDescription); // Wait for ICE connection. assertTrue("ICE connection failure.", waitForIceConnected(ICE_CONNECTION_WAIT_TIMEOUT)); @@ -520,11 +520,11 @@ public void testCameraSwitch() throws InterruptedException { createParametersForVideoCall(VIDEO_CODEC_VP8), createCameraCapturer(false /* captureToTexture */)); - // Wait for local SDP, rename it to answer and set as remote SDP. - assertTrue("Local SDP was not set.", waitForLocalSDP(WAIT_TIMEOUT)); - SessionDescription remoteSdp = new SessionDescription( - SessionDescription.Type.fromCanonicalForm("answer"), localSdp.description); - pcClient.setRemoteDescription(remoteSdp); + // Wait for local description, set type to answer and set as remote description. + assertTrue("Local description was not set.", waitForLocalDescription(WAIT_TIMEOUT)); + SessionDescription remoteDescription = new SessionDescription( + SessionDescription.Type.fromCanonicalForm("answer"), localDesc.description); + pcClient.setRemoteDescription(remoteDescription); // Wait for ICE connection. assertTrue("ICE connection failure.", waitForIceConnected(ICE_CONNECTION_WAIT_TIMEOUT)); @@ -568,11 +568,11 @@ public void testVideoSourceRestart() throws InterruptedException { createParametersForVideoCall(VIDEO_CODEC_VP8), createCameraCapturer(false /* captureToTexture */)); - // Wait for local SDP, rename it to answer and set as remote SDP. - assertTrue("Local SDP was not set.", waitForLocalSDP(WAIT_TIMEOUT)); - SessionDescription remoteSdp = new SessionDescription( - SessionDescription.Type.fromCanonicalForm("answer"), localSdp.description); - pcClient.setRemoteDescription(remoteSdp); + // Wait for local description, set type to answer and set as remote description. + assertTrue("Local description was not set.", waitForLocalDescription(WAIT_TIMEOUT)); + SessionDescription remoteDescription = new SessionDescription( + SessionDescription.Type.fromCanonicalForm("answer"), localDesc.description); + pcClient.setRemoteDescription(remoteDescription); // Wait for ICE connection. assertTrue("ICE connection failure.", waitForIceConnected(ICE_CONNECTION_WAIT_TIMEOUT)); @@ -617,11 +617,11 @@ public void testCaptureFormatChange() throws InterruptedException { createParametersForVideoCall(VIDEO_CODEC_VP8), createCameraCapturer(false /* captureToTexture */)); - // Wait for local SDP, rename it to answer and set as remote SDP. - assertTrue("Local SDP was not set.", waitForLocalSDP(WAIT_TIMEOUT)); - SessionDescription remoteSdp = new SessionDescription( - SessionDescription.Type.fromCanonicalForm("answer"), localSdp.description); - pcClient.setRemoteDescription(remoteSdp); + // Wait for local description, set type to answer and set as remote description. + assertTrue("Local description was not set.", waitForLocalDescription(WAIT_TIMEOUT)); + SessionDescription remoteDescription = new SessionDescription( + SessionDescription.Type.fromCanonicalForm("answer"), localDesc.description); + pcClient.setRemoteDescription(remoteDescription); // Wait for ICE connection. assertTrue("ICE connection failure.", waitForIceConnected(ICE_CONNECTION_WAIT_TIMEOUT)); diff --git a/examples/androidvoip/AndroidManifest.xml b/examples/androidvoip/AndroidManifest.xml new file mode 100644 index 0000000000..106f71171d --- /dev/null +++ b/examples/androidvoip/AndroidManifest.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/androidvoip/BUILD.gn b/examples/androidvoip/BUILD.gn new file mode 100644 index 0000000000..66dde947ac --- /dev/null +++ b/examples/androidvoip/BUILD.gn @@ -0,0 +1,92 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("//webrtc.gni") + +if (is_android) { + rtc_android_apk("androidvoip") { + testonly = true + apk_name = "androidvoip" + android_manifest = "AndroidManifest.xml" + min_sdk_version = 21 + target_sdk_version = 27 + + sources = [ + "java/org/webrtc/examples/androidvoip/MainActivity.java", + "java/org/webrtc/examples/androidvoip/OnVoipClientTaskCompleted.java", + "java/org/webrtc/examples/androidvoip/VoipClient.java", + ] + + deps = [ + ":resources", + "//modules/audio_device:audio_device_java", + "//rtc_base:base_java", + "//sdk/android:base_java", + "//sdk/android:java_audio_device_module_java", + "//sdk/android:video_java", + "//third_party/androidx:androidx_core_core_java", + "//third_party/androidx:androidx_legacy_legacy_support_v4_java", + ] + + shared_libraries = [ ":examples_androidvoip_jni" ] + } + + generate_jni("generated_jni") { + testonly = true + sources = [ "java/org/webrtc/examples/androidvoip/VoipClient.java" ] + namespace = "webrtc_examples" + jni_generator_include = "//sdk/android/src/jni/jni_generator_helper.h" + } + + rtc_shared_library("examples_androidvoip_jni") { + testonly = true + sources = [ + "jni/android_voip_client.cc", + "jni/android_voip_client.h", + "jni/onload.cc", + ] + + suppressed_configs += [ "//build/config/android:hide_all_but_jni_onload" ] + configs += [ "//build/config/android:hide_all_but_jni" ] + + deps = [ + ":generated_jni", + "../../rtc_base:socket_address", + "../../rtc_base:socket_server", + "../../rtc_base:threading", + "//api:transport_api", + "//api/audio_codecs:audio_codecs_api", + "//api/audio_codecs:builtin_audio_decoder_factory", + "//api/audio_codecs:builtin_audio_encoder_factory", + "//api/task_queue:default_task_queue_factory", + "//api/voip:voip_api", + "//api/voip:voip_engine_factory", + "//modules/utility:utility", + "//rtc_base", + "//rtc_base/third_party/sigslot:sigslot", + "//sdk/android:native_api_audio_device_module", + "//sdk/android:native_api_base", + "//sdk/android:native_api_jni", + "//third_party/abseil-cpp/absl/memory:memory", + ] + } + + android_resources("resources") { + testonly = true + custom_package = "org.webrtc.examples.androidvoip" + sources = [ + "res/layout/activity_main.xml", + "res/values/colors.xml", + "res/values/strings.xml", + ] + + # Needed for Bazel converter. + resource_dirs = [ "res" ] + assert(resource_dirs != []) # Mark as used. + } +} diff --git a/examples/androidvoip/DEPS b/examples/androidvoip/DEPS new file mode 100644 index 0000000000..edb714dd44 --- /dev/null +++ b/examples/androidvoip/DEPS @@ -0,0 +1,3 @@ +include_rules = [ + "+sdk/android/native_api", +] diff --git a/examples/androidvoip/OWNERS b/examples/androidvoip/OWNERS new file mode 100644 index 0000000000..e7d3200562 --- /dev/null +++ b/examples/androidvoip/OWNERS @@ -0,0 +1,2 @@ +natim@webrtc.org +xalep@webrtc.org diff --git a/examples/androidvoip/java/org/webrtc/examples/androidvoip/MainActivity.java b/examples/androidvoip/java/org/webrtc/examples/androidvoip/MainActivity.java new file mode 100644 index 0000000000..d06d6adf0d --- /dev/null +++ b/examples/androidvoip/java/org/webrtc/examples/androidvoip/MainActivity.java @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +package org.webrtc.examples.androidvoip; + +import android.Manifest.permission; +import android.app.Activity; +import android.app.AlertDialog; +import android.content.Context; +import android.content.pm.PackageManager; +import android.os.Bundle; +import android.view.Gravity; +import android.view.View; +import android.widget.AdapterView; +import android.widget.ArrayAdapter; +import android.widget.Button; +import android.widget.EditText; +import android.widget.RelativeLayout; +import android.widget.ScrollView; +import android.widget.Spinner; +import android.widget.Switch; +import android.widget.TextView; +import android.widget.Toast; +import android.widget.ToggleButton; +import androidx.core.app.ActivityCompat; +import androidx.core.content.ContextCompat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.webrtc.ContextUtils; + +public class MainActivity extends Activity implements OnVoipClientTaskCompleted { + private static final int NUM_SUPPORTED_CODECS = 6; + + private VoipClient voipClient; + private List supportedCodecs; + private boolean[] isDecoderSelected; + private Set selectedDecoders; + + private Toast toast; + private ScrollView scrollView; + private TextView localIPAddressTextView; + private EditText localPortNumberEditText; + private EditText remoteIPAddressEditText; + private EditText remotePortNumberEditText; + private Spinner encoderSpinner; + private Button decoderSelectionButton; + private TextView decodersTextView; + private ToggleButton sessionButton; + private RelativeLayout switchLayout; + private Switch sendSwitch; + private Switch playoutSwitch; + + @Override + protected void onCreate(Bundle savedInstance) { + ContextUtils.initialize(getApplicationContext()); + + super.onCreate(savedInstance); + setContentView(R.layout.activity_main); + + System.loadLibrary("examples_androidvoip_jni"); + + voipClient = new VoipClient(getApplicationContext(), this); + voipClient.getAndSetUpLocalIPAddress(); + voipClient.getAndSetUpSupportedCodecs(); + + isDecoderSelected = new boolean[NUM_SUPPORTED_CODECS]; + selectedDecoders = new HashSet<>(); + + toast = Toast.makeText(this, "", Toast.LENGTH_SHORT); + + scrollView = (ScrollView) findViewById(R.id.scroll_view); + localIPAddressTextView = (TextView) findViewById(R.id.local_ip_address_text_view); + localPortNumberEditText = (EditText) findViewById(R.id.local_port_number_edit_text); + remoteIPAddressEditText = (EditText) findViewById(R.id.remote_ip_address_edit_text); + remotePortNumberEditText = (EditText) findViewById(R.id.remote_port_number_edit_text); + encoderSpinner = (Spinner) findViewById(R.id.encoder_spinner); + decoderSelectionButton = (Button) findViewById(R.id.decoder_selection_button); + decodersTextView = (TextView) findViewById(R.id.decoders_text_view); + sessionButton = (ToggleButton) findViewById(R.id.session_button); + switchLayout = (RelativeLayout) findViewById(R.id.switch_layout); + sendSwitch = (Switch) findViewById(R.id.start_send_switch); + playoutSwitch = (Switch) findViewById(R.id.start_playout_switch); + + setUpSessionButton(); + setUpSendAndPlayoutSwitch(); + } + + private void setUpEncoderSpinner(List supportedCodecs) { + ArrayAdapter encoderAdapter = + new ArrayAdapter(this, android.R.layout.simple_spinner_item, supportedCodecs); + encoderAdapter.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item); + encoderSpinner.setAdapter(encoderAdapter); + encoderSpinner.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() { + @Override + public void onItemSelected(AdapterView parent, View view, int position, long id) { + voipClient.setEncoder((String) parent.getSelectedItem()); + } + @Override + public void onNothingSelected(AdapterView parent) {} + }); + } + + private List getSelectedDecoders() { + List decoders = new ArrayList<>(); + for (int i = 0; i < supportedCodecs.size(); i++) { + if (selectedDecoders.contains(i)) { + decoders.add(supportedCodecs.get(i)); + } + } + return decoders; + } + + private void setUpDecoderSelectionButton(List supportedCodecs) { + decoderSelectionButton.setOnClickListener((view) -> { + AlertDialog.Builder dialogBuilder = new AlertDialog.Builder(this); + dialogBuilder.setTitle(R.string.dialog_title); + + // Populate multi choice items with supported decoders. + String[] supportedCodecsArray = supportedCodecs.toArray(new String[0]); + dialogBuilder.setMultiChoiceItems( + supportedCodecsArray, isDecoderSelected, (dialog, position, isChecked) -> { + if (isChecked) { + selectedDecoders.add(position); + } else if (!isChecked) { + selectedDecoders.remove(position); + } + }); + + // "Ok" button. + dialogBuilder.setPositiveButton(R.string.ok_label, (dialog, position) -> { + List decoders = getSelectedDecoders(); + String result = decoders.stream().collect(Collectors.joining(", ")); + if (result.isEmpty()) { + decodersTextView.setText(R.string.decoders_text_view_default); + } else { + decodersTextView.setText(result); + } + voipClient.setDecoders(decoders); + }); + + // "Dismiss" button. + dialogBuilder.setNegativeButton( + R.string.dismiss_label, (dialog, position) -> { dialog.dismiss(); }); + + // "Clear All" button. + dialogBuilder.setNeutralButton(R.string.clear_all_label, (dialog, position) -> { + Arrays.fill(isDecoderSelected, false); + selectedDecoders.clear(); + decodersTextView.setText(R.string.decoders_text_view_default); + }); + + AlertDialog dialog = dialogBuilder.create(); + dialog.show(); + }); + } + + private void setUpSessionButton() { + sessionButton.setOnCheckedChangeListener((button, isChecked) -> { + // Ask for permission on RECORD_AUDIO if not granted. + if (ContextCompat.checkSelfPermission(this, permission.RECORD_AUDIO) + != PackageManager.PERMISSION_GRANTED) { + String[] sList = {permission.RECORD_AUDIO}; + ActivityCompat.requestPermissions(this, sList, 1); + } + + if (isChecked) { + // Order matters here, addresses have to be set before starting session + // before setting codec. + voipClient.setLocalAddress(localIPAddressTextView.getText().toString(), + Integer.parseInt(localPortNumberEditText.getText().toString())); + voipClient.setRemoteAddress(remoteIPAddressEditText.getText().toString(), + Integer.parseInt(remotePortNumberEditText.getText().toString())); + voipClient.startSession(); + voipClient.setEncoder((String) encoderSpinner.getSelectedItem()); + voipClient.setDecoders(getSelectedDecoders()); + } else { + voipClient.stopSession(); + } + }); + } + + private void setUpSendAndPlayoutSwitch() { + sendSwitch.setOnCheckedChangeListener((button, isChecked) -> { + if (isChecked) { + voipClient.startSend(); + } else { + voipClient.stopSend(); + } + }); + + playoutSwitch.setOnCheckedChangeListener((button, isChecked) -> { + if (isChecked) { + voipClient.startPlayout(); + } else { + voipClient.stopPlayout(); + } + }); + } + + private void setUpIPAddressEditTexts(String localIPAddress) { + if (localIPAddress.isEmpty()) { + showToast("Please check your network configuration"); + } else { + localIPAddressTextView.setText(localIPAddress); + // By default remote IP address is the same as local IP address. + remoteIPAddressEditText.setText(localIPAddress); + } + } + + private void showToast(String message) { + if (toast != null) { + toast.cancel(); + toast = Toast.makeText(this, message, Toast.LENGTH_SHORT); + toast.setGravity(Gravity.TOP, 0, 200); + toast.show(); + } + } + + @Override + protected void onDestroy() { + voipClient.close(); + voipClient = null; + + super.onDestroy(); + } + + @Override + public void onGetLocalIPAddressCompleted(String localIPAddress) { + runOnUiThread(() -> { setUpIPAddressEditTexts(localIPAddress); }); + } + + @Override + public void onGetSupportedCodecsCompleted(List supportedCodecs) { + runOnUiThread(() -> { + this.supportedCodecs = supportedCodecs; + setUpEncoderSpinner(supportedCodecs); + setUpDecoderSelectionButton(supportedCodecs); + }); + } + + @Override + public void onVoipClientInitializationCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (!isSuccessful) { + showToast("Error initializing audio device"); + } + }); + } + + @Override + public void onStartSessionCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (isSuccessful) { + showToast("Session started"); + switchLayout.setVisibility(View.VISIBLE); + scrollView.post(() -> { scrollView.fullScroll(ScrollView.FOCUS_DOWN); }); + } else { + showToast("Failed to start session"); + } + }); + } + + @Override + public void onStopSessionCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (isSuccessful) { + showToast("Session stopped"); + // Set listeners to null so the checked state can be changed programmatically. + sendSwitch.setOnCheckedChangeListener(null); + playoutSwitch.setOnCheckedChangeListener(null); + sendSwitch.setChecked(false); + playoutSwitch.setChecked(false); + // Redo the switch listener setup. + setUpSendAndPlayoutSwitch(); + switchLayout.setVisibility(View.GONE); + } else { + showToast("Failed to stop session"); + } + }); + } + + @Override + public void onStartSendCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (isSuccessful) { + showToast("Started sending"); + } else { + showToast("Error initializing microphone"); + } + }); + } + + @Override + public void onStopSendCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (isSuccessful) { + showToast("Stopped sending"); + } else { + showToast("Microphone termination failed"); + } + }); + } + + @Override + public void onStartPlayoutCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (isSuccessful) { + showToast("Started playout"); + } else { + showToast("Error initializing speaker"); + } + }); + } + + @Override + public void onStopPlayoutCompleted(boolean isSuccessful) { + runOnUiThread(() -> { + if (isSuccessful) { + showToast("Stopped playout"); + } else { + showToast("Speaker termination failed"); + } + }); + } + + @Override + public void onUninitializedVoipClient() { + runOnUiThread(() -> { showToast("Voip client is uninitialized"); }); + } +} diff --git a/examples/androidvoip/java/org/webrtc/examples/androidvoip/OnVoipClientTaskCompleted.java b/examples/androidvoip/java/org/webrtc/examples/androidvoip/OnVoipClientTaskCompleted.java new file mode 100644 index 0000000000..bb85e048bb --- /dev/null +++ b/examples/androidvoip/java/org/webrtc/examples/androidvoip/OnVoipClientTaskCompleted.java @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +package org.webrtc.examples.androidvoip; + +import java.util.List; + +public interface OnVoipClientTaskCompleted { + void onGetLocalIPAddressCompleted(String localIPAddress); + void onGetSupportedCodecsCompleted(List supportedCodecs); + void onVoipClientInitializationCompleted(boolean isSuccessful); + void onStartSessionCompleted(boolean isSuccessful); + void onStopSessionCompleted(boolean isSuccessful); + void onStartSendCompleted(boolean isSuccessful); + void onStopSendCompleted(boolean isSuccessful); + void onStartPlayoutCompleted(boolean isSuccessful); + void onStopPlayoutCompleted(boolean isSuccessful); + void onUninitializedVoipClient(); +} diff --git a/examples/androidvoip/java/org/webrtc/examples/androidvoip/VoipClient.java b/examples/androidvoip/java/org/webrtc/examples/androidvoip/VoipClient.java new file mode 100644 index 0000000000..69a993d344 --- /dev/null +++ b/examples/androidvoip/java/org/webrtc/examples/androidvoip/VoipClient.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +package org.webrtc.examples.androidvoip; + +import android.content.Context; +import android.os.Handler; +import android.os.HandlerThread; +import java.util.ArrayList; +import java.util.List; +import org.webrtc.CalledByNative; + +public class VoipClient { + private long nativeClient; + private OnVoipClientTaskCompleted listener; + + public VoipClient(Context applicationContext, OnVoipClientTaskCompleted listener) { + this.listener = listener; + nativeClient = nativeCreateClient(applicationContext, this); + } + + private boolean isInitialized() { + return nativeClient != 0; + } + + public void getAndSetUpSupportedCodecs() { + if (isInitialized()) { + nativeGetSupportedCodecs(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void getAndSetUpLocalIPAddress() { + if (isInitialized()) { + nativeGetLocalIPAddress(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void setEncoder(String encoder) { + if (isInitialized()) { + nativeSetEncoder(nativeClient, encoder); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void setDecoders(List decoders) { + if (isInitialized()) { + nativeSetDecoders(nativeClient, decoders); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void setLocalAddress(String ipAddress, int portNumber) { + if (isInitialized()) { + nativeSetLocalAddress(nativeClient, ipAddress, portNumber); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void setRemoteAddress(String ipAddress, int portNumber) { + if (isInitialized()) { + nativeSetRemoteAddress(nativeClient, ipAddress, portNumber); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void startSession() { + if (isInitialized()) { + nativeStartSession(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void stopSession() { + if (isInitialized()) { + nativeStopSession(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void startSend() { + if (isInitialized()) { + nativeStartSend(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void stopSend() { + if (isInitialized()) { + nativeStopSend(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void startPlayout() { + if (isInitialized()) { + nativeStartPlayout(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void stopPlayout() { + if (isInitialized()) { + nativeStopPlayout(nativeClient); + } else { + listener.onUninitializedVoipClient(); + } + } + + public void close() { + nativeDelete(nativeClient); + nativeClient = 0; + } + + @CalledByNative + public void onGetLocalIPAddressCompleted(String localIPAddress) { + listener.onGetLocalIPAddressCompleted(localIPAddress); + } + + @CalledByNative + public void onGetSupportedCodecsCompleted(List supportedCodecs) { + listener.onGetSupportedCodecsCompleted(supportedCodecs); + } + + @CalledByNative + public void onStartSessionCompleted(boolean isSuccessful) { + listener.onStartSessionCompleted(isSuccessful); + } + + @CalledByNative + public void onStopSessionCompleted(boolean isSuccessful) { + listener.onStopSessionCompleted(isSuccessful); + } + + @CalledByNative + public void onStartSendCompleted(boolean isSuccessful) { + listener.onStartSendCompleted(isSuccessful); + } + + @CalledByNative + public void onStopSendCompleted(boolean isSuccessful) { + listener.onStopSendCompleted(isSuccessful); + } + + @CalledByNative + public void onStartPlayoutCompleted(boolean isSuccessful) { + listener.onStartPlayoutCompleted(isSuccessful); + } + + @CalledByNative + public void onStopPlayoutCompleted(boolean isSuccessful) { + listener.onStopPlayoutCompleted(isSuccessful); + } + + private static native long nativeCreateClient( + Context applicationContext, VoipClient javaVoipClient); + private static native void nativeGetSupportedCodecs(long nativeAndroidVoipClient); + private static native void nativeGetLocalIPAddress(long nativeAndroidVoipClient); + private static native void nativeSetEncoder(long nativeAndroidVoipClient, String encoder); + private static native void nativeSetDecoders(long nativeAndroidVoipClient, List decoders); + private static native void nativeSetLocalAddress( + long nativeAndroidVoipClient, String ipAddress, int portNumber); + private static native void nativeSetRemoteAddress( + long nativeAndroidVoipClient, String ipAddress, int portNumber); + private static native void nativeStartSession(long nativeAndroidVoipClient); + private static native void nativeStopSession(long nativeAndroidVoipClient); + private static native void nativeStartSend(long nativeAndroidVoipClient); + private static native void nativeStopSend(long nativeAndroidVoipClient); + private static native void nativeStartPlayout(long nativeAndroidVoipClient); + private static native void nativeStopPlayout(long nativeAndroidVoipClient); + private static native void nativeDelete(long nativeAndroidVoipClient); +} diff --git a/examples/androidvoip/jni/android_voip_client.cc b/examples/androidvoip/jni/android_voip_client.cc new file mode 100644 index 0000000000..95d3ed407f --- /dev/null +++ b/examples/androidvoip/jni/android_voip_client.cc @@ -0,0 +1,519 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "examples/androidvoip/jni/android_voip_client.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "api/audio_codecs/builtin_audio_decoder_factory.h" +#include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "api/voip/voip_codec.h" +#include "api/voip/voip_engine_factory.h" +#include "api/voip/voip_network.h" +#include "examples/androidvoip/generated_jni/VoipClient_jni.h" +#include "rtc_base/logging.h" +#include "rtc_base/network.h" +#include "rtc_base/socket_server.h" +#include "sdk/android/native_api/audio_device_module/audio_device_android.h" +#include "sdk/android/native_api/jni/java_types.h" +#include "sdk/android/native_api/jni/jvm.h" +#include "sdk/android/native_api/jni/scoped_java_ref.h" + +namespace { + +#define RUN_ON_VOIP_THREAD(method, ...) \ + if (!voip_thread_->IsCurrent()) { \ + voip_thread_->PostTask( \ + RTC_FROM_HERE, \ + std::bind(&AndroidVoipClient::method, this, ##__VA_ARGS__)); \ + return; \ + } \ + RTC_DCHECK_RUN_ON(voip_thread_.get()); + +// Connects a UDP socket to a public address and returns the local +// address associated with it. Since it binds to the "any" address +// internally, it returns the default local address on a multi-homed +// endpoint. Implementation copied from +// BasicNetworkManager::QueryDefaultLocalAddress. +rtc::IPAddress QueryDefaultLocalAddress(int family) { + const char kPublicIPv4Host[] = "8.8.8.8"; + const char kPublicIPv6Host[] = "2001:4860:4860::8888"; + const int kPublicPort = 53; + std::unique_ptr thread = rtc::Thread::CreateWithSocketServer(); + + RTC_DCHECK(thread->socketserver() != nullptr); + RTC_DCHECK(family == AF_INET || family == AF_INET6); + + std::unique_ptr socket( + thread->socketserver()->CreateAsyncSocket(family, SOCK_DGRAM)); + if (!socket) { + RTC_LOG_ERR(LERROR) << "Socket creation failed"; + return rtc::IPAddress(); + } + + auto host = family == AF_INET ? kPublicIPv4Host : kPublicIPv6Host; + if (socket->Connect(rtc::SocketAddress(host, kPublicPort)) < 0) { + if (socket->GetError() != ENETUNREACH && + socket->GetError() != EHOSTUNREACH) { + RTC_LOG(LS_INFO) << "Connect failed with " << socket->GetError(); + } + return rtc::IPAddress(); + } + return socket->GetLocalAddress().ipaddr(); +} + +// Assigned payload type for supported built-in codecs. PCMU, PCMA, +// and G722 have set payload types. Whereas opus, ISAC, and ILBC +// have dynamic payload types. +enum class PayloadType : int { + kPcmu = 0, + kPcma = 8, + kG722 = 9, + kOpus = 96, + kIsac = 97, + kIlbc = 98, +}; + +// Returns the payload type corresponding to codec_name. Only +// supports the built-in codecs. +int GetPayloadType(const std::string& codec_name) { + RTC_DCHECK(codec_name == "PCMU" || codec_name == "PCMA" || + codec_name == "G722" || codec_name == "opus" || + codec_name == "ISAC" || codec_name == "ILBC"); + + if (codec_name == "PCMU") { + return static_cast(PayloadType::kPcmu); + } else if (codec_name == "PCMA") { + return static_cast(PayloadType::kPcma); + } else if (codec_name == "G722") { + return static_cast(PayloadType::kG722); + } else if (codec_name == "opus") { + return static_cast(PayloadType::kOpus); + } else if (codec_name == "ISAC") { + return static_cast(PayloadType::kIsac); + } else if (codec_name == "ILBC") { + return static_cast(PayloadType::kIlbc); + } + + RTC_NOTREACHED(); + return -1; +} + +} // namespace + +namespace webrtc_examples { + +void AndroidVoipClient::Init( + JNIEnv* env, + const webrtc::JavaParamRef& application_context) { + webrtc::VoipEngineConfig config; + config.encoder_factory = webrtc::CreateBuiltinAudioEncoderFactory(); + config.decoder_factory = webrtc::CreateBuiltinAudioDecoderFactory(); + config.task_queue_factory = webrtc::CreateDefaultTaskQueueFactory(); + config.audio_device_module = + webrtc::CreateJavaAudioDeviceModule(env, application_context.obj()); + config.audio_processing = webrtc::AudioProcessingBuilder().Create(); + + voip_thread_->Start(); + + // Due to consistent thread requirement on + // modules/audio_device/android/audio_device_template.h, + // code is invoked in the context of voip_thread_. + voip_thread_->Invoke(RTC_FROM_HERE, [this, &config] { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + supported_codecs_ = config.encoder_factory->GetSupportedEncoders(); + env_ = webrtc::AttachCurrentThreadIfNeeded(); + voip_engine_ = webrtc::CreateVoipEngine(std::move(config)); + }); +} + +AndroidVoipClient::~AndroidVoipClient() { + voip_thread_->Invoke(RTC_FROM_HERE, [this] { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + JavaVM* jvm = nullptr; + env_->GetJavaVM(&jvm); + if (!jvm) { + RTC_LOG(LS_ERROR) << "Failed to retrieve JVM"; + return; + } + jint res = jvm->DetachCurrentThread(); + if (res != JNI_OK) { + RTC_LOG(LS_ERROR) << "DetachCurrentThread failed: " << res; + } + }); + + voip_thread_->Stop(); +} + +AndroidVoipClient* AndroidVoipClient::Create( + JNIEnv* env, + const webrtc::JavaParamRef& application_context, + const webrtc::JavaParamRef& j_voip_client) { + // Using `new` to access a non-public constructor. + auto voip_client = + absl::WrapUnique(new AndroidVoipClient(env, j_voip_client)); + voip_client->Init(env, application_context); + return voip_client.release(); +} + +void AndroidVoipClient::GetSupportedCodecs(JNIEnv* env) { + RUN_ON_VOIP_THREAD(GetSupportedCodecs, env); + + std::vector names; + for (const webrtc::AudioCodecSpec& spec : supported_codecs_) { + names.push_back(spec.format.name); + } + webrtc::ScopedJavaLocalRef (*convert_function)( + JNIEnv*, const std::string&) = &webrtc::NativeToJavaString; + Java_VoipClient_onGetSupportedCodecsCompleted( + env_, j_voip_client_, NativeToJavaList(env_, names, convert_function)); +} + +void AndroidVoipClient::GetLocalIPAddress(JNIEnv* env) { + RUN_ON_VOIP_THREAD(GetLocalIPAddress, env); + + std::string local_ip_address; + rtc::IPAddress ipv4_address = QueryDefaultLocalAddress(AF_INET); + if (!ipv4_address.IsNil()) { + local_ip_address = ipv4_address.ToString(); + } else { + rtc::IPAddress ipv6_address = QueryDefaultLocalAddress(AF_INET6); + if (!ipv6_address.IsNil()) { + local_ip_address = ipv6_address.ToString(); + } + } + Java_VoipClient_onGetLocalIPAddressCompleted( + env_, j_voip_client_, webrtc::NativeToJavaString(env_, local_ip_address)); +} + +void AndroidVoipClient::SetEncoder(const std::string& encoder) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + return; + } + for (const webrtc::AudioCodecSpec& codec : supported_codecs_) { + if (codec.format.name == encoder) { + webrtc::VoipResult result = voip_engine_->Codec().SetSendCodec( + *channel_, GetPayloadType(codec.format.name), codec.format); + RTC_CHECK(result == webrtc::VoipResult::kOk); + return; + } + } +} + +void AndroidVoipClient::SetEncoder( + JNIEnv* env, + const webrtc::JavaParamRef& j_encoder_string) { + const std::string& chosen_encoder = + webrtc::JavaToNativeString(env, j_encoder_string); + voip_thread_->PostTask( + RTC_FROM_HERE, [this, chosen_encoder] { SetEncoder(chosen_encoder); }); +} + +void AndroidVoipClient::SetDecoders(const std::vector& decoders) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + return; + } + std::map decoder_specs; + for (const webrtc::AudioCodecSpec& codec : supported_codecs_) { + if (std::find(decoders.begin(), decoders.end(), codec.format.name) != + decoders.end()) { + decoder_specs.insert({GetPayloadType(codec.format.name), codec.format}); + } + } + + webrtc::VoipResult result = + voip_engine_->Codec().SetReceiveCodecs(*channel_, decoder_specs); + RTC_CHECK(result == webrtc::VoipResult::kOk); +} + +void AndroidVoipClient::SetDecoders( + JNIEnv* env, + const webrtc::JavaParamRef& j_decoder_strings) { + const std::vector& chosen_decoders = + webrtc::JavaListToNativeVector( + env, j_decoder_strings, &webrtc::JavaToNativeString); + voip_thread_->PostTask( + RTC_FROM_HERE, [this, chosen_decoders] { SetDecoders(chosen_decoders); }); +} + +void AndroidVoipClient::SetLocalAddress(const std::string& ip_address, + const int port_number) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + rtp_local_address_ = rtc::SocketAddress(ip_address, port_number); + rtcp_local_address_ = rtc::SocketAddress(ip_address, port_number + 1); +} + +void AndroidVoipClient::SetLocalAddress( + JNIEnv* env, + const webrtc::JavaParamRef& j_ip_address_string, + jint j_port_number_int) { + const std::string& ip_address = + webrtc::JavaToNativeString(env, j_ip_address_string); + voip_thread_->PostTask(RTC_FROM_HERE, [this, ip_address, j_port_number_int] { + SetLocalAddress(ip_address, j_port_number_int); + }); +} + +void AndroidVoipClient::SetRemoteAddress(const std::string& ip_address, + const int port_number) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + rtp_remote_address_ = rtc::SocketAddress(ip_address, port_number); + rtcp_remote_address_ = rtc::SocketAddress(ip_address, port_number + 1); +} + +void AndroidVoipClient::SetRemoteAddress( + JNIEnv* env, + const webrtc::JavaParamRef& j_ip_address_string, + jint j_port_number_int) { + const std::string& ip_address = + webrtc::JavaToNativeString(env, j_ip_address_string); + voip_thread_->PostTask(RTC_FROM_HERE, [this, ip_address, j_port_number_int] { + SetRemoteAddress(ip_address, j_port_number_int); + }); +} + +void AndroidVoipClient::StartSession(JNIEnv* env) { + RUN_ON_VOIP_THREAD(StartSession, env); + + // CreateChannel guarantees to return valid channel id. + channel_ = voip_engine_->Base().CreateChannel(this, absl::nullopt); + + rtp_socket_.reset(rtc::AsyncUDPSocket::Create(voip_thread_->socketserver(), + rtp_local_address_)); + if (!rtp_socket_) { + RTC_LOG_ERR(LERROR) << "Socket creation failed"; + Java_VoipClient_onStartSessionCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + rtp_socket_->SignalReadPacket.connect( + this, &AndroidVoipClient::OnSignalReadRTPPacket); + + rtcp_socket_.reset(rtc::AsyncUDPSocket::Create(voip_thread_->socketserver(), + rtcp_local_address_)); + if (!rtcp_socket_) { + RTC_LOG_ERR(LERROR) << "Socket creation failed"; + Java_VoipClient_onStartSessionCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + rtcp_socket_->SignalReadPacket.connect( + this, &AndroidVoipClient::OnSignalReadRTCPPacket); + Java_VoipClient_onStartSessionCompleted(env_, j_voip_client_, + /*isSuccessful=*/true); +} + +void AndroidVoipClient::StopSession(JNIEnv* env) { + RUN_ON_VOIP_THREAD(StopSession, env); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + Java_VoipClient_onStopSessionCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + if (voip_engine_->Base().StopSend(*channel_) != webrtc::VoipResult::kOk || + voip_engine_->Base().StopPlayout(*channel_) != webrtc::VoipResult::kOk) { + Java_VoipClient_onStopSessionCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + + rtp_socket_->Close(); + rtcp_socket_->Close(); + + webrtc::VoipResult result = voip_engine_->Base().ReleaseChannel(*channel_); + RTC_CHECK(result == webrtc::VoipResult::kOk); + + channel_ = absl::nullopt; + Java_VoipClient_onStopSessionCompleted(env_, j_voip_client_, + /*isSuccessful=*/true); +} + +void AndroidVoipClient::StartSend(JNIEnv* env) { + RUN_ON_VOIP_THREAD(StartSend, env); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + Java_VoipClient_onStartSendCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + bool sending_started = + (voip_engine_->Base().StartSend(*channel_) == webrtc::VoipResult::kOk); + Java_VoipClient_onStartSendCompleted(env_, j_voip_client_, sending_started); +} + +void AndroidVoipClient::StopSend(JNIEnv* env) { + RUN_ON_VOIP_THREAD(StopSend, env); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + Java_VoipClient_onStopSendCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + bool sending_stopped = + (voip_engine_->Base().StopSend(*channel_) == webrtc::VoipResult::kOk); + Java_VoipClient_onStopSendCompleted(env_, j_voip_client_, sending_stopped); +} + +void AndroidVoipClient::StartPlayout(JNIEnv* env) { + RUN_ON_VOIP_THREAD(StartPlayout, env); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + Java_VoipClient_onStartPlayoutCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + bool playout_started = + (voip_engine_->Base().StartPlayout(*channel_) == webrtc::VoipResult::kOk); + Java_VoipClient_onStartPlayoutCompleted(env_, j_voip_client_, + playout_started); +} + +void AndroidVoipClient::StopPlayout(JNIEnv* env) { + RUN_ON_VOIP_THREAD(StopPlayout, env); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + Java_VoipClient_onStopPlayoutCompleted(env_, j_voip_client_, + /*isSuccessful=*/false); + return; + } + bool playout_stopped = + (voip_engine_->Base().StopPlayout(*channel_) == webrtc::VoipResult::kOk); + Java_VoipClient_onStopPlayoutCompleted(env_, j_voip_client_, playout_stopped); +} + +void AndroidVoipClient::Delete(JNIEnv* env) { + delete this; +} + +void AndroidVoipClient::SendRtpPacket(const std::vector& packet_copy) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + if (!rtp_socket_->SendTo(packet_copy.data(), packet_copy.size(), + rtp_remote_address_, rtc::PacketOptions())) { + RTC_LOG(LS_ERROR) << "Failed to send RTP packet"; + } +} + +bool AndroidVoipClient::SendRtp(const uint8_t* packet, + size_t length, + const webrtc::PacketOptions& options) { + std::vector packet_copy(packet, packet + length); + voip_thread_->PostTask(RTC_FROM_HERE, + [this, packet_copy = std::move(packet_copy)] { + SendRtpPacket(packet_copy); + }); + return true; +} + +void AndroidVoipClient::SendRtcpPacket( + const std::vector& packet_copy) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + if (!rtcp_socket_->SendTo(packet_copy.data(), packet_copy.size(), + rtcp_remote_address_, rtc::PacketOptions())) { + RTC_LOG(LS_ERROR) << "Failed to send RTCP packet"; + } +} + +bool AndroidVoipClient::SendRtcp(const uint8_t* packet, size_t length) { + std::vector packet_copy(packet, packet + length); + voip_thread_->PostTask(RTC_FROM_HERE, + [this, packet_copy = std::move(packet_copy)] { + SendRtcpPacket(packet_copy); + }); + return true; +} + +void AndroidVoipClient::ReadRTPPacket(const std::vector& packet_copy) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + return; + } + webrtc::VoipResult result = voip_engine_->Network().ReceivedRTPPacket( + *channel_, + rtc::ArrayView(packet_copy.data(), packet_copy.size())); + RTC_CHECK(result == webrtc::VoipResult::kOk); +} + +void AndroidVoipClient::OnSignalReadRTPPacket(rtc::AsyncPacketSocket* socket, + const char* rtp_packet, + size_t size, + const rtc::SocketAddress& addr, + const int64_t& timestamp) { + std::vector packet_copy(rtp_packet, rtp_packet + size); + voip_thread_->PostTask(RTC_FROM_HERE, + [this, packet_copy = std::move(packet_copy)] { + ReadRTPPacket(packet_copy); + }); +} + +void AndroidVoipClient::ReadRTCPPacket( + const std::vector& packet_copy) { + RTC_DCHECK_RUN_ON(voip_thread_.get()); + + if (!channel_) { + RTC_LOG(LS_ERROR) << "Channel has not been created"; + return; + } + webrtc::VoipResult result = voip_engine_->Network().ReceivedRTCPPacket( + *channel_, + rtc::ArrayView(packet_copy.data(), packet_copy.size())); + RTC_CHECK(result == webrtc::VoipResult::kOk); +} + +void AndroidVoipClient::OnSignalReadRTCPPacket(rtc::AsyncPacketSocket* socket, + const char* rtcp_packet, + size_t size, + const rtc::SocketAddress& addr, + const int64_t& timestamp) { + std::vector packet_copy(rtcp_packet, rtcp_packet + size); + voip_thread_->PostTask(RTC_FROM_HERE, + [this, packet_copy = std::move(packet_copy)] { + ReadRTCPPacket(packet_copy); + }); +} + +static jlong JNI_VoipClient_CreateClient( + JNIEnv* env, + const webrtc::JavaParamRef& application_context, + const webrtc::JavaParamRef& j_voip_client) { + return webrtc::NativeToJavaPointer( + AndroidVoipClient::Create(env, application_context, j_voip_client)); +} + +} // namespace webrtc_examples diff --git a/examples/androidvoip/jni/android_voip_client.h b/examples/androidvoip/jni/android_voip_client.h new file mode 100644 index 0000000000..bfca7e8b79 --- /dev/null +++ b/examples/androidvoip/jni/android_voip_client.h @@ -0,0 +1,189 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef EXAMPLES_ANDROIDVOIP_JNI_ANDROID_VOIP_CLIENT_H_ +#define EXAMPLES_ANDROIDVOIP_JNI_ANDROID_VOIP_CLIENT_H_ + +#include + +#include +#include +#include + +#include "api/audio_codecs/audio_format.h" +#include "api/call/transport.h" +#include "api/voip/voip_base.h" +#include "api/voip/voip_engine.h" +#include "rtc_base/async_packet_socket.h" +#include "rtc_base/async_udp_socket.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "sdk/android/native_api/jni/scoped_java_ref.h" + +namespace webrtc_examples { + +// AndroidVoipClient facilitates the use of the VoIP API defined in +// api/voip/voip_engine.h. One instance of AndroidVoipClient should +// suffice for most VoIP applications. AndroidVoipClient implements +// webrtc::Transport to send RTP/RTCP packets to the remote endpoint. +// It also creates methods (slots) for sockets to connect to in +// order to receive RTP/RTCP packets. AndroidVoipClient does all +// operations with rtc::Thread (voip_thread_), this is to comply +// with consistent thread usage requirement with ProcessThread used +// within VoipEngine, as well as providing asynchronicity to the +// caller. AndroidVoipClient is meant to be used by Java through JNI. +class AndroidVoipClient : public webrtc::Transport, + public sigslot::has_slots<> { + public: + // Returns a pointer to an AndroidVoipClient object. Clients should + // use this factory method to create AndroidVoipClient objects. The + // method will return a nullptr in case of initialization errors. + // It is the client's responsibility to delete the pointer when + // they are done with it (this class provides a Delete() method). + static AndroidVoipClient* Create( + JNIEnv* env, + const webrtc::JavaParamRef& application_context, + const webrtc::JavaParamRef& j_voip_client); + + ~AndroidVoipClient() override; + + // Provides client with a Java List of Strings containing names of + // the built-in supported codecs through callback. + void GetSupportedCodecs(JNIEnv* env); + + // Provides client with a Java String of the default local IPv4 address + // through callback. If IPv4 address is not found, provide the default + // local IPv6 address. If IPv6 address is not found, provide an empty + // string. + void GetLocalIPAddress(JNIEnv* env); + + // Sets the encoder used by the VoIP API. + void SetEncoder(JNIEnv* env, + const webrtc::JavaParamRef& j_encoder_string); + + // Sets the decoders used by the VoIP API. + void SetDecoders(JNIEnv* env, + const webrtc::JavaParamRef& j_decoder_strings); + + // Sets two local/remote addresses, one for RTP packets, and another for + // RTCP packets. The RTP address will have IP address j_ip_address_string + // and port number j_port_number_int, the RTCP address will have IP address + // j_ip_address_string and port number j_port_number_int+1. + void SetLocalAddress(JNIEnv* env, + const webrtc::JavaParamRef& j_ip_address_string, + jint j_port_number_int); + void SetRemoteAddress( + JNIEnv* env, + const webrtc::JavaParamRef& j_ip_address_string, + jint j_port_number_int); + + // Starts a VoIP session, then calls a callback method with a boolean + // value indicating if the session has started successfully. The VoIP + // operations below can only be used after a session has already started. + void StartSession(JNIEnv* env); + + // Stops the current session, then calls a callback method with a + // boolean value indicating if the session has stopped successfully. + void StopSession(JNIEnv* env); + + // Starts sending RTP/RTCP packets to the remote endpoint, then calls + // a callback method with a boolean value indicating if sending + // has started successfully. + void StartSend(JNIEnv* env); + + // Stops sending RTP/RTCP packets to the remote endpoint, then calls + // a callback method with a boolean value indicating if sending + // has stopped successfully. + void StopSend(JNIEnv* env); + + // Starts playing out the voice data received from the remote endpoint, + // then calls a callback method with a boolean value indicating if + // playout has started successfully. + void StartPlayout(JNIEnv* env); + + // Stops playing out the voice data received from the remote endpoint, + // then calls a callback method with a boolean value indicating if + // playout has stopped successfully. + void StopPlayout(JNIEnv* env); + + // Deletes this object. Used by client when they are done. + void Delete(JNIEnv* env); + + // Implementation for Transport. + bool SendRtp(const uint8_t* packet, + size_t length, + const webrtc::PacketOptions& options) override; + bool SendRtcp(const uint8_t* packet, size_t length) override; + + // Slots for sockets to connect to. + void OnSignalReadRTPPacket(rtc::AsyncPacketSocket* socket, + const char* rtp_packet, + size_t size, + const rtc::SocketAddress& addr, + const int64_t& timestamp); + void OnSignalReadRTCPPacket(rtc::AsyncPacketSocket* socket, + const char* rtcp_packet, + size_t size, + const rtc::SocketAddress& addr, + const int64_t& timestamp); + + private: + AndroidVoipClient(JNIEnv* env, + const webrtc::JavaParamRef& j_voip_client) + : voip_thread_(rtc::Thread::CreateWithSocketServer()), + j_voip_client_(env, j_voip_client) {} + + void Init(JNIEnv* env, + const webrtc::JavaParamRef& application_context); + + // Overloaded methods having native C++ variables as arguments. + void SetEncoder(const std::string& encoder); + void SetDecoders(const std::vector& decoders); + void SetLocalAddress(const std::string& ip_address, const int port_number); + void SetRemoteAddress(const std::string& ip_address, const int port_number); + + // Methods to send and receive RTP/RTCP packets. Takes in a + // copy of a packet as a vector to prolong the lifetime of + // the packet as these methods will be called asynchronously. + void SendRtpPacket(const std::vector& packet_copy); + void SendRtcpPacket(const std::vector& packet_copy); + void ReadRTPPacket(const std::vector& packet_copy); + void ReadRTCPPacket(const std::vector& packet_copy); + + // Used to invoke operations and send/receive RTP/RTCP packets. + std::unique_ptr voip_thread_; + // Reference to the VoipClient java instance used to + // invoke callbacks when operations are finished. + webrtc::ScopedJavaGlobalRef j_voip_client_ + RTC_GUARDED_BY(voip_thread_); + // A list of AudioCodecSpec supported by the built-in + // encoder/decoder factories. + std::vector supported_codecs_ + RTC_GUARDED_BY(voip_thread_); + // A JNI context used by the voip_thread_. + JNIEnv* env_ RTC_GUARDED_BY(voip_thread_); + // The entry point to all VoIP APIs. + std::unique_ptr voip_engine_ RTC_GUARDED_BY(voip_thread_); + // Used by the VoIP API to facilitate a VoIP session. + absl::optional channel_ RTC_GUARDED_BY(voip_thread_); + // Members below are used for network related operations. + std::unique_ptr rtp_socket_ RTC_GUARDED_BY(voip_thread_); + std::unique_ptr rtcp_socket_ + RTC_GUARDED_BY(voip_thread_); + rtc::SocketAddress rtp_local_address_ RTC_GUARDED_BY(voip_thread_); + rtc::SocketAddress rtcp_local_address_ RTC_GUARDED_BY(voip_thread_); + rtc::SocketAddress rtp_remote_address_ RTC_GUARDED_BY(voip_thread_); + rtc::SocketAddress rtcp_remote_address_ RTC_GUARDED_BY(voip_thread_); +}; + +} // namespace webrtc_examples + +#endif // EXAMPLES_ANDROIDVOIP_JNI_ANDROID_VOIP_CLIENT_H_ diff --git a/examples/androidvoip/jni/onload.cc b/examples/androidvoip/jni/onload.cc new file mode 100644 index 0000000000..b952de348b --- /dev/null +++ b/examples/androidvoip/jni/onload.cc @@ -0,0 +1,28 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "rtc_base/ssl_adapter.h" +#include "sdk/android/native_api/base/init.h" + +namespace webrtc_examples { + +extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM* jvm, void* reserved) { + webrtc::InitAndroid(jvm); + RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()"; + return JNI_VERSION_1_6; +} + +extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM* jvm, void* reserved) { + RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()"; +} + +} // namespace webrtc_examples diff --git a/examples/androidvoip/res/layout/activity_main.xml b/examples/androidvoip/res/layout/activity_main.xml new file mode 100644 index 0000000000..c7fa5a9b31 --- /dev/null +++ b/examples/androidvoip/res/layout/activity_main.xml @@ -0,0 +1,303 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ' - '') - - return self._NEW_LINE.join(html) - - def _BuildScoreTab(self, score_name, anchor_data): - """Builds the content of a tab.""" - # Find unique values. - scores = self._scores_data_frame[ - self._scores_data_frame.eval_score_name == score_name] - apm_configs = sorted(self._FindUniqueTuples(scores, ['apm_config'])) - test_data_gen_configs = sorted(self._FindUniqueTuples( - scores, ['test_data_gen', 'test_data_gen_params'])) - - html = [ - '
', - '
', - '
', - (''), - ] - - # Header. - html.append('') - for test_data_gen_info in test_data_gen_configs: - html.append(''.format( - self._FormatName(test_data_gen_info[0]), test_data_gen_info[1])) - html.append('') - - # Body. - html.append('') - for apm_config in apm_configs: - html.append('') - for test_data_gen_info in test_data_gen_configs: - dialog_id = self._ScoreStatsInspectorDialogId( - score_name, apm_config[0], test_data_gen_info[0], - test_data_gen_info[1]) + html = ['', 'Results'] + + # Add Material Design hosted libs. + html.append('') + html.append( + '') html.append( - ''.format( - dialog_id, self._BuildScoreTableCell( - score_name, test_data_gen_info[0], test_data_gen_info[1], - apm_config[0]))) - html.append('') - html.append('') - - html.append('
APM config / Test data generator{} {}
' + self._FormatName(apm_config[0]) + '{}
') - - html.append(self._BuildScoreStatsInspectorDialogs( - score_name, apm_configs, test_data_gen_configs, - anchor_data)) - - return self._NEW_LINE.join(html) - - def _BuildScoreTableCell(self, score_name, test_data_gen, - test_data_gen_params, apm_config): - """Builds the content of a table cell for a score table.""" - scores = self._SliceDataForScoreTableCell( - score_name, apm_config, test_data_gen, test_data_gen_params) - stats = self._ComputeScoreStats(scores) - - html = [] - items_id_prefix = ( - score_name + test_data_gen + test_data_gen_params + apm_config) - if stats['count'] == 1: - # Show the only available score. - item_id = hashlib.md5(items_id_prefix.encode('utf-8')).hexdigest() - html.append('
{1:f}
'.format( - item_id, scores['score'].mean())) - html.append('
{}' - '
'.format(item_id, 'single value')) - else: - # Show stats. - for stat_name in ['min', 'max', 'mean', 'std dev']: - item_id = hashlib.md5( - (items_id_prefix + stat_name).encode('utf-8')).hexdigest() - html.append('
{1:f}
'.format( - item_id, stats[stat_name])) - html.append('
{}' - '
'.format(item_id, stat_name)) - - return self._NEW_LINE.join(html) - - def _BuildScoreStatsInspectorDialogs( - self, score_name, apm_configs, test_data_gen_configs, anchor_data): - """Builds a set of score stats inspector dialogs.""" - html = [] - for apm_config in apm_configs: - for test_data_gen_info in test_data_gen_configs: - dialog_id = self._ScoreStatsInspectorDialogId( - score_name, apm_config[0], - test_data_gen_info[0], test_data_gen_info[1]) - - html.append(''.format(dialog_id)) - - # Content. - html.append('
') - html.append('
APM config preset: {}
' - 'Test data generator: {} ({})
'.format( - self._FormatName(apm_config[0]), - self._FormatName(test_data_gen_info[0]), - test_data_gen_info[1])) - html.append(self._BuildScoreStatsInspectorDialog( - score_name, apm_config[0], test_data_gen_info[0], - test_data_gen_info[1], anchor_data + (dialog_id,))) + '') + html.append('') + + # Embed custom JavaScript and CSS files. + html.append('') + html.append('') + + html.append('') + + return self._NEW_LINE.join(html) + + def _BuildBody(self): + """Builds the content of the section.""" + score_names = self._scores_data_frame[ + 'eval_score_name'].drop_duplicates().values.tolist() + + html = [ + ('
'), + '
', + '
', + 'APM QA results ({})'.format( + self._output_filepath), + '
', + ] + + # Tab selectors. + html.append('
') + for tab_index, score_name in enumerate(score_names): + is_active = tab_index == 0 + html.append('' + '{}'.format(tab_index, + ' is-active' if is_active else '', + self._FormatName(score_name))) html.append('
') - # Actions. - html.append('
') - html.append('') + html.append('
') + html.append( + '
') + + # Tabs content. + for tab_index, score_name in enumerate(score_names): + html.append('
'.format( + ' is-active' if is_active else '', tab_index)) + html.append('
') + html.append( + self._BuildScoreTab(score_name, ('s{}'.format(tab_index), ))) + html.append('
') + html.append('
') + + html.append('
') html.append('
') - html.append('
') - - return self._NEW_LINE.join(html) - - def _BuildScoreStatsInspectorDialog( - self, score_name, apm_config, test_data_gen, test_data_gen_params, - anchor_data): - """Builds one score stats inspector dialog.""" - scores = self._SliceDataForScoreTableCell( - score_name, apm_config, test_data_gen, test_data_gen_params) - - capture_render_pairs = sorted(self._FindUniqueTuples( - scores, ['capture', 'render'])) - echo_simulators = sorted(self._FindUniqueTuples(scores, ['echo_simulator'])) - - html = [''] - - # Header. - html.append('') - for echo_simulator in echo_simulators: - html.append('') - html.append('') - - # Body. - html.append('') - for row, (capture, render) in enumerate(capture_render_pairs): - html.append(''.format( - capture, render)) - for col, echo_simulator in enumerate(echo_simulators): - score_tuple = self._SliceDataForScoreStatsTableCell( - scores, capture, render, echo_simulator[0]) - cell_class = 'r{}c{}'.format(row, col) - html.append(''.format( - cell_class, self._BuildScoreStatsInspectorTableCell( - score_tuple, anchor_data + (cell_class,)))) - html.append('') - html.append('') - - html.append('
Capture-Render / Echo simulator' + self._FormatName(echo_simulator[0]) +'
{}
{}
{}
') - - # Placeholder for the audio inspector. - html.append('
') - - return self._NEW_LINE.join(html) - - def _BuildScoreStatsInspectorTableCell(self, score_tuple, anchor_data): - """Builds the content of a cell of a score stats inspector.""" - anchor = '&'.join(anchor_data) - html = [('
{}
' - '').format(score_tuple.score, anchor)] - - # Add all the available file paths as hidden data. - for field_name in score_tuple.keys(): - if field_name.endswith('_filepath'): - html.append(''.format( - field_name, score_tuple[field_name])) - - return self._NEW_LINE.join(html) - - def _SliceDataForScoreTableCell( - self, score_name, apm_config, test_data_gen, test_data_gen_params): - """Slices |self._scores_data_frame| to extract the data for a tab.""" - masks = [] - masks.append(self._scores_data_frame.eval_score_name == score_name) - masks.append(self._scores_data_frame.apm_config == apm_config) - masks.append(self._scores_data_frame.test_data_gen == test_data_gen) - masks.append( - self._scores_data_frame.test_data_gen_params == test_data_gen_params) - mask = functools.reduce((lambda i1, i2: i1 & i2), masks) - del masks - return self._scores_data_frame[mask] - - @classmethod - def _SliceDataForScoreStatsTableCell( - cls, scores, capture, render, echo_simulator): - """Slices |scores| to extract the data for a tab.""" - masks = [] - - masks.append(scores.capture == capture) - masks.append(scores.render == render) - masks.append(scores.echo_simulator == echo_simulator) - mask = functools.reduce((lambda i1, i2: i1 & i2), masks) - del masks - - sliced_data = scores[mask] - assert len(sliced_data) == 1, 'single score is expected' - return sliced_data.iloc[0] - - @classmethod - def _FindUniqueTuples(cls, data_frame, fields): - """Slices |data_frame| to a list of fields and finds unique tuples.""" - return data_frame[fields].drop_duplicates().values.tolist() - - @classmethod - def _ComputeScoreStats(cls, data_frame): - """Computes score stats.""" - scores = data_frame['score'] - return { - 'count': scores.count(), - 'min': scores.min(), - 'max': scores.max(), - 'mean': scores.mean(), - 'std dev': scores.std(), - } - - @classmethod - def _ScoreStatsInspectorDialogId(cls, score_name, apm_config, test_data_gen, - test_data_gen_params): - """Assigns a unique name to a dialog.""" - return 'score-stats-dialog-' + hashlib.md5( - 'score-stats-inspector-{}-{}-{}-{}'.format( - score_name, apm_config, test_data_gen, - test_data_gen_params).encode('utf-8')).hexdigest() - - @classmethod - def _Save(cls, output_filepath, html): - """Writes the HTML file. + # Add snackbar for notifications. + html.append( + '
' + '
' + '' + '
') + + return self._NEW_LINE.join(html) + + def _BuildScoreTab(self, score_name, anchor_data): + """Builds the content of a tab.""" + # Find unique values. + scores = self._scores_data_frame[ + self._scores_data_frame.eval_score_name == score_name] + apm_configs = sorted(self._FindUniqueTuples(scores, ['apm_config'])) + test_data_gen_configs = sorted( + self._FindUniqueTuples(scores, + ['test_data_gen', 'test_data_gen_params'])) + + html = [ + '
', + '
', + '
', + (''), + ] + + # Header. + html.append('') + for test_data_gen_info in test_data_gen_configs: + html.append(''.format( + self._FormatName(test_data_gen_info[0]), + test_data_gen_info[1])) + html.append('') + + # Body. + html.append('') + for apm_config in apm_configs: + html.append('') + for test_data_gen_info in test_data_gen_configs: + dialog_id = self._ScoreStatsInspectorDialogId( + score_name, apm_config[0], test_data_gen_info[0], + test_data_gen_info[1]) + html.append( + ''. + format( + dialog_id, + self._BuildScoreTableCell(score_name, + test_data_gen_info[0], + test_data_gen_info[1], + apm_config[0]))) + html.append('') + html.append('') + + html.append( + '
APM config / Test data generator{} {}
' + self._FormatName(apm_config[0]) + '{}
') + + html.append( + self._BuildScoreStatsInspectorDialogs(score_name, apm_configs, + test_data_gen_configs, + anchor_data)) + + return self._NEW_LINE.join(html) + + def _BuildScoreTableCell(self, score_name, test_data_gen, + test_data_gen_params, apm_config): + """Builds the content of a table cell for a score table.""" + scores = self._SliceDataForScoreTableCell(score_name, apm_config, + test_data_gen, + test_data_gen_params) + stats = self._ComputeScoreStats(scores) + + html = [] + items_id_prefix = (score_name + test_data_gen + test_data_gen_params + + apm_config) + if stats['count'] == 1: + # Show the only available score. + item_id = hashlib.md5(items_id_prefix.encode('utf-8')).hexdigest() + html.append('
{1:f}
'.format( + item_id, scores['score'].mean())) + html.append( + '
{}' + '
'.format(item_id, 'single value')) + else: + # Show stats. + for stat_name in ['min', 'max', 'mean', 'std dev']: + item_id = hashlib.md5( + (items_id_prefix + stat_name).encode('utf-8')).hexdigest() + html.append('
{1:f}
'.format( + item_id, stats[stat_name])) + html.append( + '
{}' + '
'.format(item_id, stat_name)) + + return self._NEW_LINE.join(html) + + def _BuildScoreStatsInspectorDialogs(self, score_name, apm_configs, + test_data_gen_configs, anchor_data): + """Builds a set of score stats inspector dialogs.""" + html = [] + for apm_config in apm_configs: + for test_data_gen_info in test_data_gen_configs: + dialog_id = self._ScoreStatsInspectorDialogId( + score_name, apm_config[0], test_data_gen_info[0], + test_data_gen_info[1]) + + html.append(''.format(dialog_id)) + + # Content. + html.append('
') + html.append( + '
APM config preset: {}
' + 'Test data generator: {} ({})
'. + format(self._FormatName(apm_config[0]), + self._FormatName(test_data_gen_info[0]), + test_data_gen_info[1])) + html.append( + self._BuildScoreStatsInspectorDialog( + score_name, apm_config[0], test_data_gen_info[0], + test_data_gen_info[1], anchor_data + (dialog_id, ))) + html.append('
') + + # Actions. + html.append('
') + html.append('') + html.append('
') + + html.append('
') + + return self._NEW_LINE.join(html) + + def _BuildScoreStatsInspectorDialog(self, score_name, apm_config, + test_data_gen, test_data_gen_params, + anchor_data): + """Builds one score stats inspector dialog.""" + scores = self._SliceDataForScoreTableCell(score_name, apm_config, + test_data_gen, + test_data_gen_params) + + capture_render_pairs = sorted( + self._FindUniqueTuples(scores, ['capture', 'render'])) + echo_simulators = sorted( + self._FindUniqueTuples(scores, ['echo_simulator'])) + + html = [ + '' + ] + + # Header. + html.append('') + for echo_simulator in echo_simulators: + html.append('') + html.append('') + + # Body. + html.append('') + for row, (capture, render) in enumerate(capture_render_pairs): + html.append(''.format( + capture, render)) + for col, echo_simulator in enumerate(echo_simulators): + score_tuple = self._SliceDataForScoreStatsTableCell( + scores, capture, render, echo_simulator[0]) + cell_class = 'r{}c{}'.format(row, col) + html.append(''.format( + cell_class, + self._BuildScoreStatsInspectorTableCell( + score_tuple, anchor_data + (cell_class, )))) + html.append('') + html.append('') + + html.append('
Capture-Render / Echo simulator' + self._FormatName(echo_simulator[0]) + '
{}
{}
{}
') + + # Placeholder for the audio inspector. + html.append('
') + + return self._NEW_LINE.join(html) + + def _BuildScoreStatsInspectorTableCell(self, score_tuple, anchor_data): + """Builds the content of a cell of a score stats inspector.""" + anchor = '&'.join(anchor_data) + html = [('
{}
' + '').format(score_tuple.score, anchor)] + + # Add all the available file paths as hidden data. + for field_name in score_tuple.keys(): + if field_name.endswith('_filepath'): + html.append( + ''.format( + field_name, score_tuple[field_name])) + + return self._NEW_LINE.join(html) + + def _SliceDataForScoreTableCell(self, score_name, apm_config, + test_data_gen, test_data_gen_params): + """Slices |self._scores_data_frame| to extract the data for a tab.""" + masks = [] + masks.append(self._scores_data_frame.eval_score_name == score_name) + masks.append(self._scores_data_frame.apm_config == apm_config) + masks.append(self._scores_data_frame.test_data_gen == test_data_gen) + masks.append(self._scores_data_frame.test_data_gen_params == + test_data_gen_params) + mask = functools.reduce((lambda i1, i2: i1 & i2), masks) + del masks + return self._scores_data_frame[mask] + + @classmethod + def _SliceDataForScoreStatsTableCell(cls, scores, capture, render, + echo_simulator): + """Slices |scores| to extract the data for a tab.""" + masks = [] + + masks.append(scores.capture == capture) + masks.append(scores.render == render) + masks.append(scores.echo_simulator == echo_simulator) + mask = functools.reduce((lambda i1, i2: i1 & i2), masks) + del masks + + sliced_data = scores[mask] + assert len(sliced_data) == 1, 'single score is expected' + return sliced_data.iloc[0] + + @classmethod + def _FindUniqueTuples(cls, data_frame, fields): + """Slices |data_frame| to a list of fields and finds unique tuples.""" + return data_frame[fields].drop_duplicates().values.tolist() + + @classmethod + def _ComputeScoreStats(cls, data_frame): + """Computes score stats.""" + scores = data_frame['score'] + return { + 'count': scores.count(), + 'min': scores.min(), + 'max': scores.max(), + 'mean': scores.mean(), + 'std dev': scores.std(), + } + + @classmethod + def _ScoreStatsInspectorDialogId(cls, score_name, apm_config, + test_data_gen, test_data_gen_params): + """Assigns a unique name to a dialog.""" + return 'score-stats-dialog-' + hashlib.md5( + 'score-stats-inspector-{}-{}-{}-{}'.format( + score_name, apm_config, test_data_gen, + test_data_gen_params).encode('utf-8')).hexdigest() + + @classmethod + def _Save(cls, output_filepath, html): + """Writes the HTML file. Args: output_filepath: output file path. html: string with the HTML content. """ - with open(output_filepath, 'w') as f: - f.write(html) + with open(output_filepath, 'w') as f: + f.write(html) - @classmethod - def _FormatName(cls, name): - """Formats a name. + @classmethod + def _FormatName(cls, name): + """Formats a name. Args: name: a string. @@ -399,4 +423,4 @@ def _FormatName(cls, name): Returns: A copy of name in which underscores and dashes are replaced with a space. """ - return re.sub(r'[_\-]', ' ', name) + return re.sub(r'[_\-]', ' ', name) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py index 264af7e994..412aa7c4e7 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/export_unittest.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Unit tests for the export module. """ @@ -27,60 +26,61 @@ class TestExport(unittest.TestCase): - """Unit tests for the export module. + """Unit tests for the export module. """ - _CLEAN_TMP_OUTPUT = True + _CLEAN_TMP_OUTPUT = True - def setUp(self): - """Creates temporary data to export.""" - self._tmp_path = tempfile.mkdtemp() + def setUp(self): + """Creates temporary data to export.""" + self._tmp_path = tempfile.mkdtemp() - # Run a fake experiment to produce data to export. - simulator = simulation.ApmModuleSimulator( - test_data_generator_factory=( - test_data_generation_factory.TestDataGeneratorFactory( - aechen_ir_database_path='', - noise_tracks_path='', - copy_with_identity=False)), - evaluation_score_factory=( - eval_scores_factory.EvaluationScoreWorkerFactory( - polqa_tool_bin_path=os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'fake_polqa'), - echo_metric_tool_bin_path=None - )), - ap_wrapper=audioproc_wrapper.AudioProcWrapper( - audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH), - evaluator=evaluation.ApmModuleEvaluator()) - simulator.Run( - config_filepaths=['apm_configs/default.json'], - capture_input_filepaths=[ - os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'), - os.path.join(self._tmp_path, 'pure_tone-880_1000.wav'), - ], - test_data_generator_names=['identity', 'white_noise'], - eval_score_names=['audio_level_peak', 'audio_level_mean'], - output_dir=self._tmp_path) + # Run a fake experiment to produce data to export. + simulator = simulation.ApmModuleSimulator( + test_data_generator_factory=( + test_data_generation_factory.TestDataGeneratorFactory( + aechen_ir_database_path='', + noise_tracks_path='', + copy_with_identity=False)), + evaluation_score_factory=( + eval_scores_factory.EvaluationScoreWorkerFactory( + polqa_tool_bin_path=os.path.join( + os.path.dirname(os.path.abspath(__file__)), + 'fake_polqa'), + echo_metric_tool_bin_path=None)), + ap_wrapper=audioproc_wrapper.AudioProcWrapper( + audioproc_wrapper.AudioProcWrapper. + DEFAULT_APM_SIMULATOR_BIN_PATH), + evaluator=evaluation.ApmModuleEvaluator()) + simulator.Run( + config_filepaths=['apm_configs/default.json'], + capture_input_filepaths=[ + os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'), + os.path.join(self._tmp_path, 'pure_tone-880_1000.wav'), + ], + test_data_generator_names=['identity', 'white_noise'], + eval_score_names=['audio_level_peak', 'audio_level_mean'], + output_dir=self._tmp_path) - # Export results. - p = collect_data.InstanceArgumentsParser() - args = p.parse_args(['--output_dir', self._tmp_path]) - src_path = collect_data.ConstructSrcPath(args) - self._data_to_export = collect_data.FindScores(src_path, args) + # Export results. + p = collect_data.InstanceArgumentsParser() + args = p.parse_args(['--output_dir', self._tmp_path]) + src_path = collect_data.ConstructSrcPath(args) + self._data_to_export = collect_data.FindScores(src_path, args) - def tearDown(self): - """Recursively deletes temporary folders.""" - if self._CLEAN_TMP_OUTPUT: - shutil.rmtree(self._tmp_path) - else: - logging.warning(self.id() + ' did not clean the temporary path ' + ( - self._tmp_path)) + def tearDown(self): + """Recursively deletes temporary folders.""" + if self._CLEAN_TMP_OUTPUT: + shutil.rmtree(self._tmp_path) + else: + logging.warning(self.id() + ' did not clean the temporary path ' + + (self._tmp_path)) - def testCreateHtmlReport(self): - fn_out = os.path.join(self._tmp_path, 'results.html') - exporter = export.HtmlExport(fn_out) - exporter.Export(self._data_to_export) + def testCreateHtmlReport(self): + fn_out = os.path.join(self._tmp_path, 'results.html') + exporter = export.HtmlExport(fn_out) + exporter.Export(self._data_to_export) - document = pq.PyQuery(filename=fn_out) - self.assertIsInstance(document, pq.PyQuery) - # TODO(alessiob): Use PyQuery API to check the HTML file. + document = pq.PyQuery(filename=fn_out) + self.assertIsInstance(document, pq.PyQuery) + # TODO(alessiob): Use PyQuery API to check the HTML file. diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py index 01418d84fe..a7db7b4840 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/external_vad.py @@ -16,62 +16,60 @@ import tempfile try: - import numpy as np + import numpy as np except ImportError: - logging.critical('Cannot import the third-party Python package numpy') - sys.exit(1) + logging.critical('Cannot import the third-party Python package numpy') + sys.exit(1) from . import signal_processing -class ExternalVad(object): - def __init__(self, path_to_binary, name): - """Args: +class ExternalVad(object): + def __init__(self, path_to_binary, name): + """Args: path_to_binary: path to binary that accepts '-i ', '-o '. There must be one float value per 10ms audio name: a name to identify the external VAD. Used for saving the output as extvad_output-. """ - self._path_to_binary = path_to_binary - self.name = name - assert os.path.exists(self._path_to_binary), ( - self._path_to_binary) - self._vad_output = None + self._path_to_binary = path_to_binary + self.name = name + assert os.path.exists(self._path_to_binary), (self._path_to_binary) + self._vad_output = None - def Run(self, wav_file_path): - _signal = signal_processing.SignalProcessingUtils.LoadWav(wav_file_path) - if _signal.channels != 1: - raise NotImplementedError('Multiple-channel' - ' annotations not implemented') - if _signal.frame_rate != 48000: - raise NotImplementedError('Frame rates ' - 'other than 48000 not implemented') + def Run(self, wav_file_path): + _signal = signal_processing.SignalProcessingUtils.LoadWav( + wav_file_path) + if _signal.channels != 1: + raise NotImplementedError('Multiple-channel' + ' annotations not implemented') + if _signal.frame_rate != 48000: + raise NotImplementedError('Frame rates ' + 'other than 48000 not implemented') - tmp_path = tempfile.mkdtemp() - try: - output_file_path = os.path.join( - tmp_path, self.name + '_vad.tmp') - subprocess.call([ - self._path_to_binary, - '-i', wav_file_path, - '-o', output_file_path - ]) - self._vad_output = np.fromfile(output_file_path, np.float32) - except Exception as e: - logging.error('Error while running the ' + self.name + - ' VAD (' + e.message + ')') - finally: - if os.path.exists(tmp_path): - shutil.rmtree(tmp_path) + tmp_path = tempfile.mkdtemp() + try: + output_file_path = os.path.join(tmp_path, self.name + '_vad.tmp') + subprocess.call([ + self._path_to_binary, '-i', wav_file_path, '-o', + output_file_path + ]) + self._vad_output = np.fromfile(output_file_path, np.float32) + except Exception as e: + logging.error('Error while running the ' + self.name + ' VAD (' + + e.message + ')') + finally: + if os.path.exists(tmp_path): + shutil.rmtree(tmp_path) - def GetVadOutput(self): - assert self._vad_output is not None - return self._vad_output + def GetVadOutput(self): + assert self._vad_output is not None + return self._vad_output - @classmethod - def ConstructVadDict(cls, vad_paths, vad_names): - external_vads = {} - for path, name in zip(vad_paths, vad_names): - external_vads[name] = ExternalVad(path, name) - return external_vads + @classmethod + def ConstructVadDict(cls, vad_paths, vad_names): + external_vads = {} + for path, name in zip(vad_paths, vad_names): + external_vads[name] = ExternalVad(path, name) + return external_vads diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py index 7c75e8f5c3..f679f8c94a 100755 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_external_vad.py @@ -9,16 +9,17 @@ import argparse import numpy as np + def main(): - parser = argparse.ArgumentParser() - parser.add_argument('-i', required=True) - parser.add_argument('-o', required=True) + parser = argparse.ArgumentParser() + parser.add_argument('-i', required=True) + parser.add_argument('-o', required=True) - args = parser.parse_args() + args = parser.parse_args() - array = np.arange(100, dtype=np.float32) - array.tofile(open(args.o, 'w')) + array = np.arange(100, dtype=np.float32) + array.tofile(open(args.o, 'w')) if __name__ == '__main__': - main() + main() diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc b/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc index 62d8ebb84d..bae652e283 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/fake_polqa.cc @@ -43,7 +43,7 @@ int main(int argc, char* argv[]) { return 0; } } - FATAL() << kErrorMessage; + RTC_FATAL() << kErrorMessage; } } // namespace test diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py index b1afe14454..f9125fa7f3 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Input mixer module. """ @@ -17,24 +16,24 @@ class ApmInputMixer(object): - """Class to mix a set of audio segments down to the APM input.""" + """Class to mix a set of audio segments down to the APM input.""" - _HARD_CLIPPING_LOG_MSG = 'hard clipping detected in the mixed signal' + _HARD_CLIPPING_LOG_MSG = 'hard clipping detected in the mixed signal' - def __init__(self): - pass + def __init__(self): + pass - @classmethod - def HardClippingLogMessage(cls): - """Returns the log message used when hard clipping is detected in the mix. + @classmethod + def HardClippingLogMessage(cls): + """Returns the log message used when hard clipping is detected in the mix. This method is mainly intended to be used by the unit tests. """ - return cls._HARD_CLIPPING_LOG_MSG + return cls._HARD_CLIPPING_LOG_MSG - @classmethod - def Mix(cls, output_path, capture_input_filepath, echo_filepath): - """Mixes capture and echo. + @classmethod + def Mix(cls, output_path, capture_input_filepath, echo_filepath): + """Mixes capture and echo. Creates the overall capture input for APM by mixing the "echo-free" capture signal with the echo signal (e.g., echo simulated via the @@ -58,38 +57,41 @@ def Mix(cls, output_path, capture_input_filepath, echo_filepath): Returns: Path to the mix audio track file. """ - if echo_filepath is None: - return capture_input_filepath - - # Build the mix output file name as a function of the echo file name. - # This ensures that if the internal parameters of the echo path simulator - # change, no erroneous cache hit occurs. - echo_file_name, _ = os.path.splitext(os.path.split(echo_filepath)[1]) - capture_input_file_name, _ = os.path.splitext( - os.path.split(capture_input_filepath)[1]) - mix_filepath = os.path.join(output_path, 'mix_capture_{}_{}.wav'.format( - capture_input_file_name, echo_file_name)) - - # Create the mix if not done yet. - mix = None - if not os.path.exists(mix_filepath): - echo_free_capture = signal_processing.SignalProcessingUtils.LoadWav( - capture_input_filepath) - echo = signal_processing.SignalProcessingUtils.LoadWav(echo_filepath) - - if signal_processing.SignalProcessingUtils.CountSamples(echo) < ( - signal_processing.SignalProcessingUtils.CountSamples( - echo_free_capture)): - raise exceptions.InputMixerException( - 'echo cannot be shorter than capture') - - mix = echo_free_capture.overlay(echo) - signal_processing.SignalProcessingUtils.SaveWav(mix_filepath, mix) - - # Check if hard clipping occurs. - if mix is None: - mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath) - if signal_processing.SignalProcessingUtils.DetectHardClipping(mix): - logging.warning(cls._HARD_CLIPPING_LOG_MSG) - - return mix_filepath + if echo_filepath is None: + return capture_input_filepath + + # Build the mix output file name as a function of the echo file name. + # This ensures that if the internal parameters of the echo path simulator + # change, no erroneous cache hit occurs. + echo_file_name, _ = os.path.splitext(os.path.split(echo_filepath)[1]) + capture_input_file_name, _ = os.path.splitext( + os.path.split(capture_input_filepath)[1]) + mix_filepath = os.path.join( + output_path, + 'mix_capture_{}_{}.wav'.format(capture_input_file_name, + echo_file_name)) + + # Create the mix if not done yet. + mix = None + if not os.path.exists(mix_filepath): + echo_free_capture = signal_processing.SignalProcessingUtils.LoadWav( + capture_input_filepath) + echo = signal_processing.SignalProcessingUtils.LoadWav( + echo_filepath) + + if signal_processing.SignalProcessingUtils.CountSamples(echo) < ( + signal_processing.SignalProcessingUtils.CountSamples( + echo_free_capture)): + raise exceptions.InputMixerException( + 'echo cannot be shorter than capture') + + mix = echo_free_capture.overlay(echo) + signal_processing.SignalProcessingUtils.SaveWav(mix_filepath, mix) + + # Check if hard clipping occurs. + if mix is None: + mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath) + if signal_processing.SignalProcessingUtils.DetectHardClipping(mix): + logging.warning(cls._HARD_CLIPPING_LOG_MSG) + + return mix_filepath diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py index b212614199..4fd5e4f1ee 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_mixer_unittest.py @@ -5,21 +5,15 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Unit tests for the input mixer module. """ import logging import os import shutil -import sys import tempfile import unittest -SRC = os.path.abspath(os.path.join( - os.path.dirname((__file__)), os.pardir, os.pardir, os.pardir, os.pardir)) -sys.path.append(os.path.join(SRC, 'third_party', 'pymock')) - import mock from . import exceptions @@ -28,122 +22,119 @@ class TestApmInputMixer(unittest.TestCase): - """Unit tests for the ApmInputMixer class. + """Unit tests for the ApmInputMixer class. """ - # Audio track file names created in setUp(). - _FILENAMES = ['capture', 'echo_1', 'echo_2', 'shorter', 'longer'] - - # Target peak power level (dBFS) of each audio track file created in setUp(). - # These values are hand-crafted in order to make saturation happen when - # capture and echo_2 are mixed and the contrary for capture and echo_1. - # None means that the power is not changed. - _MAX_PEAK_POWER_LEVELS = [-10.0, -5.0, 0.0, None, None] - - # Audio track file durations in milliseconds. - _DURATIONS = [1000, 1000, 1000, 800, 1200] - - _SAMPLE_RATE = 48000 - - def setUp(self): - """Creates temporary data.""" - self._tmp_path = tempfile.mkdtemp() - - # Create audio track files. - self._audio_tracks = {} - for filename, peak_power, duration in zip( - self._FILENAMES, self._MAX_PEAK_POWER_LEVELS, self._DURATIONS): - audio_track_filepath = os.path.join(self._tmp_path, '{}.wav'.format( - filename)) - - # Create a pure tone with the target peak power level. - template = signal_processing.SignalProcessingUtils.GenerateSilence( - duration=duration, sample_rate=self._SAMPLE_RATE) - signal = signal_processing.SignalProcessingUtils.GeneratePureTone( - template) - if peak_power is not None: - signal = signal.apply_gain(-signal.max_dBFS + peak_power) - - signal_processing.SignalProcessingUtils.SaveWav( - audio_track_filepath, signal) - self._audio_tracks[filename] = { - 'filepath': audio_track_filepath, - 'num_samples': signal_processing.SignalProcessingUtils.CountSamples( - signal) - } - - def tearDown(self): - """Recursively deletes temporary folders.""" - shutil.rmtree(self._tmp_path) - - def testCheckMixSameDuration(self): - """Checks the duration when mixing capture and echo with same duration.""" - mix_filepath = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['echo_1']['filepath']) - self.assertTrue(os.path.exists(mix_filepath)) - - mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath) - self.assertEqual(self._audio_tracks['capture']['num_samples'], - signal_processing.SignalProcessingUtils.CountSamples(mix)) - - def testRejectShorterEcho(self): - """Rejects echo signals that are shorter than the capture signal.""" - try: - _ = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['shorter']['filepath']) - self.fail('no exception raised') - except exceptions.InputMixerException: - pass - - def testCheckMixDurationWithLongerEcho(self): - """Checks the duration when mixing an echo longer than the capture.""" - mix_filepath = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['longer']['filepath']) - self.assertTrue(os.path.exists(mix_filepath)) - - mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath) - self.assertEqual(self._audio_tracks['capture']['num_samples'], - signal_processing.SignalProcessingUtils.CountSamples(mix)) - - def testCheckOutputFileNamesConflict(self): - """Checks that different echo files lead to different output file names.""" - mix1_filepath = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['echo_1']['filepath']) - self.assertTrue(os.path.exists(mix1_filepath)) - - mix2_filepath = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['echo_2']['filepath']) - self.assertTrue(os.path.exists(mix2_filepath)) - - self.assertNotEqual(mix1_filepath, mix2_filepath) - - def testHardClippingLogExpected(self): - """Checks that hard clipping warning is raised when occurring.""" - logging.warning = mock.MagicMock(name='warning') - _ = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['echo_2']['filepath']) - logging.warning.assert_called_once_with( - input_mixer.ApmInputMixer.HardClippingLogMessage()) - - def testHardClippingLogNotExpected(self): - """Checks that hard clipping warning is not raised when not occurring.""" - logging.warning = mock.MagicMock(name='warning') - _ = input_mixer.ApmInputMixer.Mix( - self._tmp_path, - self._audio_tracks['capture']['filepath'], - self._audio_tracks['echo_1']['filepath']) - self.assertNotIn( - mock.call(input_mixer.ApmInputMixer.HardClippingLogMessage()), - logging.warning.call_args_list) + # Audio track file names created in setUp(). + _FILENAMES = ['capture', 'echo_1', 'echo_2', 'shorter', 'longer'] + + # Target peak power level (dBFS) of each audio track file created in setUp(). + # These values are hand-crafted in order to make saturation happen when + # capture and echo_2 are mixed and the contrary for capture and echo_1. + # None means that the power is not changed. + _MAX_PEAK_POWER_LEVELS = [-10.0, -5.0, 0.0, None, None] + + # Audio track file durations in milliseconds. + _DURATIONS = [1000, 1000, 1000, 800, 1200] + + _SAMPLE_RATE = 48000 + + def setUp(self): + """Creates temporary data.""" + self._tmp_path = tempfile.mkdtemp() + + # Create audio track files. + self._audio_tracks = {} + for filename, peak_power, duration in zip(self._FILENAMES, + self._MAX_PEAK_POWER_LEVELS, + self._DURATIONS): + audio_track_filepath = os.path.join(self._tmp_path, + '{}.wav'.format(filename)) + + # Create a pure tone with the target peak power level. + template = signal_processing.SignalProcessingUtils.GenerateSilence( + duration=duration, sample_rate=self._SAMPLE_RATE) + signal = signal_processing.SignalProcessingUtils.GeneratePureTone( + template) + if peak_power is not None: + signal = signal.apply_gain(-signal.max_dBFS + peak_power) + + signal_processing.SignalProcessingUtils.SaveWav( + audio_track_filepath, signal) + self._audio_tracks[filename] = { + 'filepath': + audio_track_filepath, + 'num_samples': + signal_processing.SignalProcessingUtils.CountSamples(signal) + } + + def tearDown(self): + """Recursively deletes temporary folders.""" + shutil.rmtree(self._tmp_path) + + def testCheckMixSameDuration(self): + """Checks the duration when mixing capture and echo with same duration.""" + mix_filepath = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['echo_1']['filepath']) + self.assertTrue(os.path.exists(mix_filepath)) + + mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath) + self.assertEqual( + self._audio_tracks['capture']['num_samples'], + signal_processing.SignalProcessingUtils.CountSamples(mix)) + + def testRejectShorterEcho(self): + """Rejects echo signals that are shorter than the capture signal.""" + try: + _ = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['shorter']['filepath']) + self.fail('no exception raised') + except exceptions.InputMixerException: + pass + + def testCheckMixDurationWithLongerEcho(self): + """Checks the duration when mixing an echo longer than the capture.""" + mix_filepath = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['longer']['filepath']) + self.assertTrue(os.path.exists(mix_filepath)) + + mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath) + self.assertEqual( + self._audio_tracks['capture']['num_samples'], + signal_processing.SignalProcessingUtils.CountSamples(mix)) + + def testCheckOutputFileNamesConflict(self): + """Checks that different echo files lead to different output file names.""" + mix1_filepath = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['echo_1']['filepath']) + self.assertTrue(os.path.exists(mix1_filepath)) + + mix2_filepath = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['echo_2']['filepath']) + self.assertTrue(os.path.exists(mix2_filepath)) + + self.assertNotEqual(mix1_filepath, mix2_filepath) + + def testHardClippingLogExpected(self): + """Checks that hard clipping warning is raised when occurring.""" + logging.warning = mock.MagicMock(name='warning') + _ = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['echo_2']['filepath']) + logging.warning.assert_called_once_with( + input_mixer.ApmInputMixer.HardClippingLogMessage()) + + def testHardClippingLogNotExpected(self): + """Checks that hard clipping warning is not raised when not occurring.""" + logging.warning = mock.MagicMock(name='warning') + _ = input_mixer.ApmInputMixer.Mix( + self._tmp_path, self._audio_tracks['capture']['filepath'], + self._audio_tracks['echo_1']['filepath']) + self.assertNotIn( + mock.call(input_mixer.ApmInputMixer.HardClippingLogMessage()), + logging.warning.call_args_list) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py index 1feec47b4c..b64fdcca89 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/input_signal_creator.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Input signal creator module. """ @@ -14,12 +13,12 @@ class InputSignalCreator(object): - """Input signal creator class. + """Input signal creator class. """ - @classmethod - def Create(cls, name, raw_params): - """Creates a input signal and its metadata. + @classmethod + def Create(cls, name, raw_params): + """Creates a input signal and its metadata. Args: name: Input signal creator name. @@ -28,29 +27,30 @@ def Create(cls, name, raw_params): Returns: (AudioSegment, dict) tuple. """ - try: - signal = {} - params = {} + try: + signal = {} + params = {} - if name == 'pure_tone': - params['frequency'] = float(raw_params[0]) - params['duration'] = int(raw_params[1]) - signal = cls._CreatePureTone(params['frequency'], params['duration']) - else: - raise exceptions.InputSignalCreatorException( - 'Invalid input signal creator name') + if name == 'pure_tone': + params['frequency'] = float(raw_params[0]) + params['duration'] = int(raw_params[1]) + signal = cls._CreatePureTone(params['frequency'], + params['duration']) + else: + raise exceptions.InputSignalCreatorException( + 'Invalid input signal creator name') - # Complete metadata. - params['signal'] = name + # Complete metadata. + params['signal'] = name - return signal, params - except (TypeError, AssertionError) as e: - raise exceptions.InputSignalCreatorException( - 'Invalid signal creator parameters: {}'.format(e)) + return signal, params + except (TypeError, AssertionError) as e: + raise exceptions.InputSignalCreatorException( + 'Invalid signal creator parameters: {}'.format(e)) - @classmethod - def _CreatePureTone(cls, frequency, duration): - """ + @classmethod + def _CreatePureTone(cls, frequency, duration): + """ Generates a pure tone at 48000 Hz. Args: @@ -60,8 +60,9 @@ def _CreatePureTone(cls, frequency, duration): Returns: AudioSegment instance. """ - assert 0 < frequency <= 24000 - assert duration > 0 - template = signal_processing.SignalProcessingUtils.GenerateSilence(duration) - return signal_processing.SignalProcessingUtils.GeneratePureTone( - template, frequency) + assert 0 < frequency <= 24000 + assert duration > 0 + template = signal_processing.SignalProcessingUtils.GenerateSilence( + duration) + return signal_processing.SignalProcessingUtils.GeneratePureTone( + template, frequency) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py index fd731fd19a..e41637cd8d 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Signal processing utility module. """ @@ -16,44 +15,44 @@ import enum try: - import numpy as np + import numpy as np except ImportError: - logging.critical('Cannot import the third-party Python package numpy') - sys.exit(1) + logging.critical('Cannot import the third-party Python package numpy') + sys.exit(1) try: - import pydub - import pydub.generators + import pydub + import pydub.generators except ImportError: - logging.critical('Cannot import the third-party Python package pydub') - sys.exit(1) + logging.critical('Cannot import the third-party Python package pydub') + sys.exit(1) try: - import scipy.signal - import scipy.fftpack + import scipy.signal + import scipy.fftpack except ImportError: - logging.critical('Cannot import the third-party Python package scipy') - sys.exit(1) + logging.critical('Cannot import the third-party Python package scipy') + sys.exit(1) from . import exceptions class SignalProcessingUtils(object): - """Collection of signal processing utilities. + """Collection of signal processing utilities. """ - @enum.unique - class MixPadding(enum.Enum): - NO_PADDING = 0 - ZERO_PADDING = 1 - LOOP = 2 + @enum.unique + class MixPadding(enum.Enum): + NO_PADDING = 0 + ZERO_PADDING = 1 + LOOP = 2 - def __init__(self): - pass + def __init__(self): + pass - @classmethod - def LoadWav(cls, filepath, channels=1): - """Loads wav file. + @classmethod + def LoadWav(cls, filepath, channels=1): + """Loads wav file. Args: filepath: path to the wav audio track file to load. @@ -62,25 +61,26 @@ def LoadWav(cls, filepath, channels=1): Returns: AudioSegment instance. """ - if not os.path.exists(filepath): - logging.error('cannot find the <%s> audio track file', filepath) - raise exceptions.FileNotFoundError() - return pydub.AudioSegment.from_file( - filepath, format='wav', channels=channels) + if not os.path.exists(filepath): + logging.error('cannot find the <%s> audio track file', filepath) + raise exceptions.FileNotFoundError() + return pydub.AudioSegment.from_file(filepath, + format='wav', + channels=channels) - @classmethod - def SaveWav(cls, output_filepath, signal): - """Saves wav file. + @classmethod + def SaveWav(cls, output_filepath, signal): + """Saves wav file. Args: output_filepath: path to the wav audio track file to save. signal: AudioSegment instance. """ - return signal.export(output_filepath, format='wav') + return signal.export(output_filepath, format='wav') - @classmethod - def CountSamples(cls, signal): - """Number of samples per channel. + @classmethod + def CountSamples(cls, signal): + """Number of samples per channel. Args: signal: AudioSegment instance. @@ -88,14 +88,14 @@ def CountSamples(cls, signal): Returns: An integer. """ - number_of_samples = len(signal.get_array_of_samples()) - assert signal.channels > 0 - assert number_of_samples % signal.channels == 0 - return number_of_samples / signal.channels + number_of_samples = len(signal.get_array_of_samples()) + assert signal.channels > 0 + assert number_of_samples % signal.channels == 0 + return number_of_samples / signal.channels - @classmethod - def GenerateSilence(cls, duration=1000, sample_rate=48000): - """Generates silence. + @classmethod + def GenerateSilence(cls, duration=1000, sample_rate=48000): + """Generates silence. This method can also be used to create a template AudioSegment instance. A template can then be used with other Generate*() methods accepting an @@ -108,11 +108,11 @@ def GenerateSilence(cls, duration=1000, sample_rate=48000): Returns: AudioSegment instance. """ - return pydub.AudioSegment.silent(duration, sample_rate) + return pydub.AudioSegment.silent(duration, sample_rate) - @classmethod - def GeneratePureTone(cls, template, frequency=440.0): - """Generates a pure tone. + @classmethod + def GeneratePureTone(cls, template, frequency=440.0): + """Generates a pure tone. The pure tone is generated with the same duration and in the same format of the given template signal. @@ -124,21 +124,18 @@ def GeneratePureTone(cls, template, frequency=440.0): Return: AudioSegment instance. """ - if frequency > template.frame_rate >> 1: - raise exceptions.SignalProcessingException('Invalid frequency') + if frequency > template.frame_rate >> 1: + raise exceptions.SignalProcessingException('Invalid frequency') - generator = pydub.generators.Sine( - sample_rate=template.frame_rate, - bit_depth=template.sample_width * 8, - freq=frequency) + generator = pydub.generators.Sine(sample_rate=template.frame_rate, + bit_depth=template.sample_width * 8, + freq=frequency) - return generator.to_audio_segment( - duration=len(template), - volume=0.0) + return generator.to_audio_segment(duration=len(template), volume=0.0) - @classmethod - def GenerateWhiteNoise(cls, template): - """Generates white noise. + @classmethod + def GenerateWhiteNoise(cls, template): + """Generates white noise. The white noise is generated with the same duration and in the same format of the given template signal. @@ -149,33 +146,32 @@ def GenerateWhiteNoise(cls, template): Return: AudioSegment instance. """ - generator = pydub.generators.WhiteNoise( - sample_rate=template.frame_rate, - bit_depth=template.sample_width * 8) - return generator.to_audio_segment( - duration=len(template), - volume=0.0) - - @classmethod - def AudioSegmentToRawData(cls, signal): - samples = signal.get_array_of_samples() - if samples.typecode != 'h': - raise exceptions.SignalProcessingException('Unsupported samples type') - return np.array(signal.get_array_of_samples(), np.int16) - - @classmethod - def Fft(cls, signal, normalize=True): - if signal.channels != 1: - raise NotImplementedError('multiple-channel FFT not implemented') - x = cls.AudioSegmentToRawData(signal).astype(np.float32) - if normalize: - x /= max(abs(np.max(x)), 1.0) - y = scipy.fftpack.fft(x) - return y[:len(y) / 2] - - @classmethod - def DetectHardClipping(cls, signal, threshold=2): - """Detects hard clipping. + generator = pydub.generators.WhiteNoise( + sample_rate=template.frame_rate, + bit_depth=template.sample_width * 8) + return generator.to_audio_segment(duration=len(template), volume=0.0) + + @classmethod + def AudioSegmentToRawData(cls, signal): + samples = signal.get_array_of_samples() + if samples.typecode != 'h': + raise exceptions.SignalProcessingException( + 'Unsupported samples type') + return np.array(signal.get_array_of_samples(), np.int16) + + @classmethod + def Fft(cls, signal, normalize=True): + if signal.channels != 1: + raise NotImplementedError('multiple-channel FFT not implemented') + x = cls.AudioSegmentToRawData(signal).astype(np.float32) + if normalize: + x /= max(abs(np.max(x)), 1.0) + y = scipy.fftpack.fft(x) + return y[:len(y) / 2] + + @classmethod + def DetectHardClipping(cls, signal, threshold=2): + """Detects hard clipping. Hard clipping is simply detected by counting samples that touch either the lower or upper bound too many times in a row (according to |threshold|). @@ -189,32 +185,33 @@ def DetectHardClipping(cls, signal, threshold=2): Returns: True if hard clipping is detect, False otherwise. """ - if signal.channels != 1: - raise NotImplementedError('multiple-channel clipping not implemented') - if signal.sample_width != 2: # Note that signal.sample_width is in bytes. - raise exceptions.SignalProcessingException( - 'hard-clipping detection only supported for 16 bit samples') - samples = cls.AudioSegmentToRawData(signal) - - # Detect adjacent clipped samples. - samples_type_info = np.iinfo(samples.dtype) - mask_min = samples == samples_type_info.min - mask_max = samples == samples_type_info.max - - def HasLongSequence(vector, min_legth=threshold): - """Returns True if there are one or more long sequences of True flags.""" - seq_length = 0 - for b in vector: - seq_length = seq_length + 1 if b else 0 - if seq_length >= min_legth: - return True - return False - - return HasLongSequence(mask_min) or HasLongSequence(mask_max) - - @classmethod - def ApplyImpulseResponse(cls, signal, impulse_response): - """Applies an impulse response to a signal. + if signal.channels != 1: + raise NotImplementedError( + 'multiple-channel clipping not implemented') + if signal.sample_width != 2: # Note that signal.sample_width is in bytes. + raise exceptions.SignalProcessingException( + 'hard-clipping detection only supported for 16 bit samples') + samples = cls.AudioSegmentToRawData(signal) + + # Detect adjacent clipped samples. + samples_type_info = np.iinfo(samples.dtype) + mask_min = samples == samples_type_info.min + mask_max = samples == samples_type_info.max + + def HasLongSequence(vector, min_legth=threshold): + """Returns True if there are one or more long sequences of True flags.""" + seq_length = 0 + for b in vector: + seq_length = seq_length + 1 if b else 0 + if seq_length >= min_legth: + return True + return False + + return HasLongSequence(mask_min) or HasLongSequence(mask_max) + + @classmethod + def ApplyImpulseResponse(cls, signal, impulse_response): + """Applies an impulse response to a signal. Args: signal: AudioSegment instance. @@ -223,44 +220,48 @@ def ApplyImpulseResponse(cls, signal, impulse_response): Returns: AudioSegment instance. """ - # Get samples. - assert signal.channels == 1, ( - 'multiple-channel recordings not supported') - samples = signal.get_array_of_samples() - - # Convolve. - logging.info('applying %d order impulse response to a signal lasting %d ms', - len(impulse_response), len(signal)) - convolved_samples = scipy.signal.fftconvolve( - in1=samples, - in2=impulse_response, - mode='full').astype(np.int16) - logging.info('convolution computed') - - # Cast. - convolved_samples = array.array(signal.array_type, convolved_samples) - - # Verify. - logging.debug('signal length: %d samples', len(samples)) - logging.debug('convolved signal length: %d samples', len(convolved_samples)) - assert len(convolved_samples) > len(samples) - - # Generate convolved signal AudioSegment instance. - convolved_signal = pydub.AudioSegment( - data=convolved_samples, - metadata={ - 'sample_width': signal.sample_width, - 'frame_rate': signal.frame_rate, - 'frame_width': signal.frame_width, - 'channels': signal.channels, - }) - assert len(convolved_signal) > len(signal) - - return convolved_signal - - @classmethod - def Normalize(cls, signal): - """Normalizes a signal. + # Get samples. + assert signal.channels == 1, ( + 'multiple-channel recordings not supported') + samples = signal.get_array_of_samples() + + # Convolve. + logging.info( + 'applying %d order impulse response to a signal lasting %d ms', + len(impulse_response), len(signal)) + convolved_samples = scipy.signal.fftconvolve(in1=samples, + in2=impulse_response, + mode='full').astype( + np.int16) + logging.info('convolution computed') + + # Cast. + convolved_samples = array.array(signal.array_type, convolved_samples) + + # Verify. + logging.debug('signal length: %d samples', len(samples)) + logging.debug('convolved signal length: %d samples', + len(convolved_samples)) + assert len(convolved_samples) > len(samples) + + # Generate convolved signal AudioSegment instance. + convolved_signal = pydub.AudioSegment(data=convolved_samples, + metadata={ + 'sample_width': + signal.sample_width, + 'frame_rate': + signal.frame_rate, + 'frame_width': + signal.frame_width, + 'channels': signal.channels, + }) + assert len(convolved_signal) > len(signal) + + return convolved_signal + + @classmethod + def Normalize(cls, signal): + """Normalizes a signal. Args: signal: AudioSegment instance. @@ -268,11 +269,11 @@ def Normalize(cls, signal): Returns: An AudioSegment instance. """ - return signal.apply_gain(-signal.max_dBFS) + return signal.apply_gain(-signal.max_dBFS) - @classmethod - def Copy(cls, signal): - """Makes a copy os a signal. + @classmethod + def Copy(cls, signal): + """Makes a copy os a signal. Args: signal: AudioSegment instance. @@ -280,19 +281,21 @@ def Copy(cls, signal): Returns: An AudioSegment instance. """ - return pydub.AudioSegment( - data=signal.get_array_of_samples(), - metadata={ - 'sample_width': signal.sample_width, - 'frame_rate': signal.frame_rate, - 'frame_width': signal.frame_width, - 'channels': signal.channels, - }) - - @classmethod - def MixSignals(cls, signal, noise, target_snr=0.0, - pad_noise=MixPadding.NO_PADDING): - """Mixes |signal| and |noise| with a target SNR. + return pydub.AudioSegment(data=signal.get_array_of_samples(), + metadata={ + 'sample_width': signal.sample_width, + 'frame_rate': signal.frame_rate, + 'frame_width': signal.frame_width, + 'channels': signal.channels, + }) + + @classmethod + def MixSignals(cls, + signal, + noise, + target_snr=0.0, + pad_noise=MixPadding.NO_PADDING): + """Mixes |signal| and |noise| with a target SNR. Mix |signal| and |noise| with a desired SNR by scaling |noise|. If the target SNR is +/- infinite, a copy of signal/noise is returned. @@ -312,45 +315,45 @@ def MixSignals(cls, signal, noise, target_snr=0.0, Returns: An AudioSegment instance. """ - # Handle infinite target SNR. - if target_snr == -np.Inf: - # Return a copy of noise. - logging.warning('SNR = -Inf, returning noise') - return cls.Copy(noise) - elif target_snr == np.Inf: - # Return a copy of signal. - logging.warning('SNR = +Inf, returning signal') - return cls.Copy(signal) - - # Check signal and noise power. - signal_power = float(signal.dBFS) - noise_power = float(noise.dBFS) - if signal_power == -np.Inf: - logging.error('signal has -Inf power, cannot mix') - raise exceptions.SignalProcessingException( - 'cannot mix a signal with -Inf power') - if noise_power == -np.Inf: - logging.error('noise has -Inf power, cannot mix') - raise exceptions.SignalProcessingException( - 'cannot mix a signal with -Inf power') - - # Mix. - gain_db = signal_power - noise_power - target_snr - signal_duration = len(signal) - noise_duration = len(noise) - if signal_duration <= noise_duration: - # Ignore |pad_noise|, |noise| is truncated if longer that |signal|, the - # mix will have the same length of |signal|. - return signal.overlay(noise.apply_gain(gain_db)) - elif pad_noise == cls.MixPadding.NO_PADDING: - # |signal| is longer than |noise|, but no padding is applied to |noise|. - # Truncate |signal|. - return noise.overlay(signal, gain_during_overlay=gain_db) - elif pad_noise == cls.MixPadding.ZERO_PADDING: - # TODO(alessiob): Check that this works as expected. - return signal.overlay(noise.apply_gain(gain_db)) - elif pad_noise == cls.MixPadding.LOOP: - # |signal| is longer than |noise|, extend |noise| by looping. - return signal.overlay(noise.apply_gain(gain_db), loop=True) - else: - raise exceptions.SignalProcessingException('invalid padding type') + # Handle infinite target SNR. + if target_snr == -np.Inf: + # Return a copy of noise. + logging.warning('SNR = -Inf, returning noise') + return cls.Copy(noise) + elif target_snr == np.Inf: + # Return a copy of signal. + logging.warning('SNR = +Inf, returning signal') + return cls.Copy(signal) + + # Check signal and noise power. + signal_power = float(signal.dBFS) + noise_power = float(noise.dBFS) + if signal_power == -np.Inf: + logging.error('signal has -Inf power, cannot mix') + raise exceptions.SignalProcessingException( + 'cannot mix a signal with -Inf power') + if noise_power == -np.Inf: + logging.error('noise has -Inf power, cannot mix') + raise exceptions.SignalProcessingException( + 'cannot mix a signal with -Inf power') + + # Mix. + gain_db = signal_power - noise_power - target_snr + signal_duration = len(signal) + noise_duration = len(noise) + if signal_duration <= noise_duration: + # Ignore |pad_noise|, |noise| is truncated if longer that |signal|, the + # mix will have the same length of |signal|. + return signal.overlay(noise.apply_gain(gain_db)) + elif pad_noise == cls.MixPadding.NO_PADDING: + # |signal| is longer than |noise|, but no padding is applied to |noise|. + # Truncate |signal|. + return noise.overlay(signal, gain_during_overlay=gain_db) + elif pad_noise == cls.MixPadding.ZERO_PADDING: + # TODO(alessiob): Check that this works as expected. + return signal.overlay(noise.apply_gain(gain_db)) + elif pad_noise == cls.MixPadding.LOOP: + # |signal| is longer than |noise|, extend |noise| by looping. + return signal.overlay(noise.apply_gain(gain_db), loop=True) + else: + raise exceptions.SignalProcessingException('invalid padding type') diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py index 30ada41fb9..881fb66800 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing_unittest.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Unit tests for the signal_processing module. """ @@ -19,168 +18,166 @@ class TestSignalProcessing(unittest.TestCase): - """Unit tests for the signal_processing module. + """Unit tests for the signal_processing module. """ - def testMixSignals(self): - # Generate a template signal with which white noise can be generated. - silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000) - - # Generate two distinct AudioSegment instances with 1 second of white noise. - signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - silence) - noise = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - silence) - - # Extract samples. - signal_samples = signal.get_array_of_samples() - noise_samples = noise.get_array_of_samples() - - # Test target SNR -Inf (noise expected). - mix_neg_inf = signal_processing.SignalProcessingUtils.MixSignals( - signal, noise, -np.Inf) - self.assertTrue(len(noise), len(mix_neg_inf)) # Check duration. - mix_neg_inf_samples = mix_neg_inf.get_array_of_samples() - self.assertTrue( # Check samples. - all([x == y for x, y in zip(noise_samples, mix_neg_inf_samples)])) - - # Test target SNR 0.0 (different data expected). - mix_0 = signal_processing.SignalProcessingUtils.MixSignals( - signal, noise, 0.0) - self.assertTrue(len(signal), len(mix_0)) # Check duration. - self.assertTrue(len(noise), len(mix_0)) - mix_0_samples = mix_0.get_array_of_samples() - self.assertTrue( - any([x != y for x, y in zip(signal_samples, mix_0_samples)])) - self.assertTrue( - any([x != y for x, y in zip(noise_samples, mix_0_samples)])) - - # Test target SNR +Inf (signal expected). - mix_pos_inf = signal_processing.SignalProcessingUtils.MixSignals( - signal, noise, np.Inf) - self.assertTrue(len(signal), len(mix_pos_inf)) # Check duration. - mix_pos_inf_samples = mix_pos_inf.get_array_of_samples() - self.assertTrue( # Check samples. - all([x == y for x, y in zip(signal_samples, mix_pos_inf_samples)])) - - def testMixSignalsMinInfPower(self): - silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000) - signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - silence) - - with self.assertRaises(exceptions.SignalProcessingException): - _ = signal_processing.SignalProcessingUtils.MixSignals( - signal, silence, 0.0) - - with self.assertRaises(exceptions.SignalProcessingException): - _ = signal_processing.SignalProcessingUtils.MixSignals( - silence, signal, 0.0) - - def testMixSignalNoiseDifferentLengths(self): - # Test signals. - shorter = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - pydub.AudioSegment.silent(duration=1000, frame_rate=8000)) - longer = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - pydub.AudioSegment.silent(duration=2000, frame_rate=8000)) - - # When the signal is shorter than the noise, the mix length always equals - # that of the signal regardless of whether padding is applied. - # No noise padding, length of signal less than that of noise. - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=shorter, - noise=longer, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.NO_PADDING) - self.assertEqual(len(shorter), len(mix)) - # With noise padding, length of signal less than that of noise. - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=shorter, - noise=longer, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.ZERO_PADDING) - self.assertEqual(len(shorter), len(mix)) - - # When the signal is longer than the noise, the mix length depends on - # whether padding is applied. - # No noise padding, length of signal greater than that of noise. - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=longer, - noise=shorter, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.NO_PADDING) - self.assertEqual(len(shorter), len(mix)) - # With noise padding, length of signal greater than that of noise. - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=longer, - noise=shorter, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.ZERO_PADDING) - self.assertEqual(len(longer), len(mix)) - - def testMixSignalNoisePaddingTypes(self): - # Test signals. - shorter = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - pydub.AudioSegment.silent(duration=1000, frame_rate=8000)) - longer = signal_processing.SignalProcessingUtils.GeneratePureTone( - pydub.AudioSegment.silent(duration=2000, frame_rate=8000), 440.0) - - # Zero padding: expect pure tone only in 1-2s. - mix_zero_pad = signal_processing.SignalProcessingUtils.MixSignals( - signal=longer, - noise=shorter, - target_snr=-6, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.ZERO_PADDING) - - # Loop: expect pure tone plus noise in 1-2s. - mix_loop = signal_processing.SignalProcessingUtils.MixSignals( - signal=longer, - noise=shorter, - target_snr=-6, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.LOOP) - - def Energy(signal): - samples = signal_processing.SignalProcessingUtils.AudioSegmentToRawData( - signal).astype(np.float32) - return np.sum(samples * samples) - - e_mix_zero_pad = Energy(mix_zero_pad[-1000:]) - e_mix_loop = Energy(mix_loop[-1000:]) - self.assertLess(0, e_mix_zero_pad) - self.assertLess(e_mix_zero_pad, e_mix_loop) - - def testMixSignalSnr(self): - # Test signals. - tone_low = signal_processing.SignalProcessingUtils.GeneratePureTone( - pydub.AudioSegment.silent(duration=64, frame_rate=8000), 250.0) - tone_high = signal_processing.SignalProcessingUtils.GeneratePureTone( - pydub.AudioSegment.silent(duration=64, frame_rate=8000), 3000.0) - - def ToneAmplitudes(mix): - """Returns the amplitude of the coefficients #16 and #192, which + def testMixSignals(self): + # Generate a template signal with which white noise can be generated. + silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000) + + # Generate two distinct AudioSegment instances with 1 second of white noise. + signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + silence) + noise = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + silence) + + # Extract samples. + signal_samples = signal.get_array_of_samples() + noise_samples = noise.get_array_of_samples() + + # Test target SNR -Inf (noise expected). + mix_neg_inf = signal_processing.SignalProcessingUtils.MixSignals( + signal, noise, -np.Inf) + self.assertTrue(len(noise), len(mix_neg_inf)) # Check duration. + mix_neg_inf_samples = mix_neg_inf.get_array_of_samples() + self.assertTrue( # Check samples. + all([x == y for x, y in zip(noise_samples, mix_neg_inf_samples)])) + + # Test target SNR 0.0 (different data expected). + mix_0 = signal_processing.SignalProcessingUtils.MixSignals( + signal, noise, 0.0) + self.assertTrue(len(signal), len(mix_0)) # Check duration. + self.assertTrue(len(noise), len(mix_0)) + mix_0_samples = mix_0.get_array_of_samples() + self.assertTrue( + any([x != y for x, y in zip(signal_samples, mix_0_samples)])) + self.assertTrue( + any([x != y for x, y in zip(noise_samples, mix_0_samples)])) + + # Test target SNR +Inf (signal expected). + mix_pos_inf = signal_processing.SignalProcessingUtils.MixSignals( + signal, noise, np.Inf) + self.assertTrue(len(signal), len(mix_pos_inf)) # Check duration. + mix_pos_inf_samples = mix_pos_inf.get_array_of_samples() + self.assertTrue( # Check samples. + all([x == y for x, y in zip(signal_samples, mix_pos_inf_samples)])) + + def testMixSignalsMinInfPower(self): + silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000) + signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + silence) + + with self.assertRaises(exceptions.SignalProcessingException): + _ = signal_processing.SignalProcessingUtils.MixSignals( + signal, silence, 0.0) + + with self.assertRaises(exceptions.SignalProcessingException): + _ = signal_processing.SignalProcessingUtils.MixSignals( + silence, signal, 0.0) + + def testMixSignalNoiseDifferentLengths(self): + # Test signals. + shorter = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + pydub.AudioSegment.silent(duration=1000, frame_rate=8000)) + longer = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + pydub.AudioSegment.silent(duration=2000, frame_rate=8000)) + + # When the signal is shorter than the noise, the mix length always equals + # that of the signal regardless of whether padding is applied. + # No noise padding, length of signal less than that of noise. + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=shorter, + noise=longer, + pad_noise=signal_processing.SignalProcessingUtils.MixPadding. + NO_PADDING) + self.assertEqual(len(shorter), len(mix)) + # With noise padding, length of signal less than that of noise. + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=shorter, + noise=longer, + pad_noise=signal_processing.SignalProcessingUtils.MixPadding. + ZERO_PADDING) + self.assertEqual(len(shorter), len(mix)) + + # When the signal is longer than the noise, the mix length depends on + # whether padding is applied. + # No noise padding, length of signal greater than that of noise. + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=longer, + noise=shorter, + pad_noise=signal_processing.SignalProcessingUtils.MixPadding. + NO_PADDING) + self.assertEqual(len(shorter), len(mix)) + # With noise padding, length of signal greater than that of noise. + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=longer, + noise=shorter, + pad_noise=signal_processing.SignalProcessingUtils.MixPadding. + ZERO_PADDING) + self.assertEqual(len(longer), len(mix)) + + def testMixSignalNoisePaddingTypes(self): + # Test signals. + shorter = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + pydub.AudioSegment.silent(duration=1000, frame_rate=8000)) + longer = signal_processing.SignalProcessingUtils.GeneratePureTone( + pydub.AudioSegment.silent(duration=2000, frame_rate=8000), 440.0) + + # Zero padding: expect pure tone only in 1-2s. + mix_zero_pad = signal_processing.SignalProcessingUtils.MixSignals( + signal=longer, + noise=shorter, + target_snr=-6, + pad_noise=signal_processing.SignalProcessingUtils.MixPadding. + ZERO_PADDING) + + # Loop: expect pure tone plus noise in 1-2s. + mix_loop = signal_processing.SignalProcessingUtils.MixSignals( + signal=longer, + noise=shorter, + target_snr=-6, + pad_noise=signal_processing.SignalProcessingUtils.MixPadding.LOOP) + + def Energy(signal): + samples = signal_processing.SignalProcessingUtils.AudioSegmentToRawData( + signal).astype(np.float32) + return np.sum(samples * samples) + + e_mix_zero_pad = Energy(mix_zero_pad[-1000:]) + e_mix_loop = Energy(mix_loop[-1000:]) + self.assertLess(0, e_mix_zero_pad) + self.assertLess(e_mix_zero_pad, e_mix_loop) + + def testMixSignalSnr(self): + # Test signals. + tone_low = signal_processing.SignalProcessingUtils.GeneratePureTone( + pydub.AudioSegment.silent(duration=64, frame_rate=8000), 250.0) + tone_high = signal_processing.SignalProcessingUtils.GeneratePureTone( + pydub.AudioSegment.silent(duration=64, frame_rate=8000), 3000.0) + + def ToneAmplitudes(mix): + """Returns the amplitude of the coefficients #16 and #192, which correspond to the tones at 250 and 3k Hz respectively.""" - mix_fft = np.absolute(signal_processing.SignalProcessingUtils.Fft(mix)) - return mix_fft[16], mix_fft[192] - - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=tone_low, - noise=tone_high, - target_snr=-6) - ampl_low, ampl_high = ToneAmplitudes(mix) - self.assertLess(ampl_low, ampl_high) - - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=tone_high, - noise=tone_low, - target_snr=-6) - ampl_low, ampl_high = ToneAmplitudes(mix) - self.assertLess(ampl_high, ampl_low) - - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=tone_low, - noise=tone_high, - target_snr=6) - ampl_low, ampl_high = ToneAmplitudes(mix) - self.assertLess(ampl_high, ampl_low) - - mix = signal_processing.SignalProcessingUtils.MixSignals( - signal=tone_high, - noise=tone_low, - target_snr=6) - ampl_low, ampl_high = ToneAmplitudes(mix) - self.assertLess(ampl_low, ampl_high) + mix_fft = np.absolute( + signal_processing.SignalProcessingUtils.Fft(mix)) + return mix_fft[16], mix_fft[192] + + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=tone_low, noise=tone_high, target_snr=-6) + ampl_low, ampl_high = ToneAmplitudes(mix) + self.assertLess(ampl_low, ampl_high) + + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=tone_high, noise=tone_low, target_snr=-6) + ampl_low, ampl_high = ToneAmplitudes(mix) + self.assertLess(ampl_high, ampl_low) + + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=tone_low, noise=tone_high, target_snr=6) + ampl_low, ampl_high = ToneAmplitudes(mix) + self.assertLess(ampl_high, ampl_low) + + mix = signal_processing.SignalProcessingUtils.MixSignals( + signal=tone_high, noise=tone_low, target_snr=6) + ampl_low, ampl_high = ToneAmplitudes(mix) + self.assertLess(ampl_low, ampl_high) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py index 37db2efc27..fe30c9c44c 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """APM module simulator. """ @@ -25,85 +24,93 @@ class ApmModuleSimulator(object): - """Audio processing module (APM) simulator class. + """Audio processing module (APM) simulator class. """ - _TEST_DATA_GENERATOR_CLASSES = ( - test_data_generation.TestDataGenerator.REGISTERED_CLASSES) - _EVAL_SCORE_WORKER_CLASSES = eval_scores.EvaluationScore.REGISTERED_CLASSES - - _PREFIX_APM_CONFIG = 'apmcfg-' - _PREFIX_CAPTURE = 'capture-' - _PREFIX_RENDER = 'render-' - _PREFIX_ECHO_SIMULATOR = 'echosim-' - _PREFIX_TEST_DATA_GEN = 'datagen-' - _PREFIX_TEST_DATA_GEN_PARAMS = 'datagen_params-' - _PREFIX_SCORE = 'score-' - - def __init__(self, test_data_generator_factory, evaluation_score_factory, - ap_wrapper, evaluator, external_vads=None): - if external_vads is None: - external_vads = {} - self._test_data_generator_factory = test_data_generator_factory - self._evaluation_score_factory = evaluation_score_factory - self._audioproc_wrapper = ap_wrapper - self._evaluator = evaluator - self._annotator = annotations.AudioAnnotationsExtractor( - annotations.AudioAnnotationsExtractor.VadType.ENERGY_THRESHOLD | - annotations.AudioAnnotationsExtractor.VadType.WEBRTC_COMMON_AUDIO | - annotations.AudioAnnotationsExtractor.VadType.WEBRTC_APM, - external_vads - ) - - # Init. - self._test_data_generator_factory.SetOutputDirectoryPrefix( - self._PREFIX_TEST_DATA_GEN_PARAMS) - self._evaluation_score_factory.SetScoreFilenamePrefix( - self._PREFIX_SCORE) - - # Properties for each run. - self._base_output_path = None - self._output_cache_path = None - self._test_data_generators = None - self._evaluation_score_workers = None - self._config_filepaths = None - self._capture_input_filepaths = None - self._render_input_filepaths = None - self._echo_path_simulator_class = None - - @classmethod - def GetPrefixApmConfig(cls): - return cls._PREFIX_APM_CONFIG - - @classmethod - def GetPrefixCapture(cls): - return cls._PREFIX_CAPTURE - - @classmethod - def GetPrefixRender(cls): - return cls._PREFIX_RENDER - - @classmethod - def GetPrefixEchoSimulator(cls): - return cls._PREFIX_ECHO_SIMULATOR - - @classmethod - def GetPrefixTestDataGenerator(cls): - return cls._PREFIX_TEST_DATA_GEN - - @classmethod - def GetPrefixTestDataGeneratorParameters(cls): - return cls._PREFIX_TEST_DATA_GEN_PARAMS - - @classmethod - def GetPrefixScore(cls): - return cls._PREFIX_SCORE - - def Run(self, config_filepaths, capture_input_filepaths, - test_data_generator_names, eval_score_names, output_dir, - render_input_filepaths=None, echo_path_simulator_name=( - echo_path_simulation.NoEchoPathSimulator.NAME)): - """Runs the APM simulation. + _TEST_DATA_GENERATOR_CLASSES = ( + test_data_generation.TestDataGenerator.REGISTERED_CLASSES) + _EVAL_SCORE_WORKER_CLASSES = eval_scores.EvaluationScore.REGISTERED_CLASSES + + _PREFIX_APM_CONFIG = 'apmcfg-' + _PREFIX_CAPTURE = 'capture-' + _PREFIX_RENDER = 'render-' + _PREFIX_ECHO_SIMULATOR = 'echosim-' + _PREFIX_TEST_DATA_GEN = 'datagen-' + _PREFIX_TEST_DATA_GEN_PARAMS = 'datagen_params-' + _PREFIX_SCORE = 'score-' + + def __init__(self, + test_data_generator_factory, + evaluation_score_factory, + ap_wrapper, + evaluator, + external_vads=None): + if external_vads is None: + external_vads = {} + self._test_data_generator_factory = test_data_generator_factory + self._evaluation_score_factory = evaluation_score_factory + self._audioproc_wrapper = ap_wrapper + self._evaluator = evaluator + self._annotator = annotations.AudioAnnotationsExtractor( + annotations.AudioAnnotationsExtractor.VadType.ENERGY_THRESHOLD + | annotations.AudioAnnotationsExtractor.VadType.WEBRTC_COMMON_AUDIO + | annotations.AudioAnnotationsExtractor.VadType.WEBRTC_APM, + external_vads) + + # Init. + self._test_data_generator_factory.SetOutputDirectoryPrefix( + self._PREFIX_TEST_DATA_GEN_PARAMS) + self._evaluation_score_factory.SetScoreFilenamePrefix( + self._PREFIX_SCORE) + + # Properties for each run. + self._base_output_path = None + self._output_cache_path = None + self._test_data_generators = None + self._evaluation_score_workers = None + self._config_filepaths = None + self._capture_input_filepaths = None + self._render_input_filepaths = None + self._echo_path_simulator_class = None + + @classmethod + def GetPrefixApmConfig(cls): + return cls._PREFIX_APM_CONFIG + + @classmethod + def GetPrefixCapture(cls): + return cls._PREFIX_CAPTURE + + @classmethod + def GetPrefixRender(cls): + return cls._PREFIX_RENDER + + @classmethod + def GetPrefixEchoSimulator(cls): + return cls._PREFIX_ECHO_SIMULATOR + + @classmethod + def GetPrefixTestDataGenerator(cls): + return cls._PREFIX_TEST_DATA_GEN + + @classmethod + def GetPrefixTestDataGeneratorParameters(cls): + return cls._PREFIX_TEST_DATA_GEN_PARAMS + + @classmethod + def GetPrefixScore(cls): + return cls._PREFIX_SCORE + + def Run(self, + config_filepaths, + capture_input_filepaths, + test_data_generator_names, + eval_score_names, + output_dir, + render_input_filepaths=None, + echo_path_simulator_name=( + echo_path_simulation.NoEchoPathSimulator.NAME)): + """Runs the APM simulation. Initializes paths and required instances, then runs all the simulations. The render input can be optionally added. If added, the number of capture @@ -120,132 +127,140 @@ def Run(self, config_filepaths, capture_input_filepaths, echo_path_simulator_name: name of the echo path simulator to use when render input is provided. """ - assert render_input_filepaths is None or ( - len(capture_input_filepaths) == len(render_input_filepaths)), ( - 'render input set size not matching input set size') - assert render_input_filepaths is None or echo_path_simulator_name in ( - echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES), ( - 'invalid echo path simulator') - self._base_output_path = os.path.abspath(output_dir) - - # Output path used to cache the data shared across simulations. - self._output_cache_path = os.path.join(self._base_output_path, '_cache') - - # Instance test data generators. - self._test_data_generators = [self._test_data_generator_factory.GetInstance( - test_data_generators_class=( - self._TEST_DATA_GENERATOR_CLASSES[name])) for name in ( - test_data_generator_names)] - - # Instance evaluation score workers. - self._evaluation_score_workers = [ - self._evaluation_score_factory.GetInstance( - evaluation_score_class=self._EVAL_SCORE_WORKER_CLASSES[name]) for ( - name) in eval_score_names] - - # Set APM configuration file paths. - self._config_filepaths = self._CreatePathsCollection(config_filepaths) - - # Set probing signal file paths. - if render_input_filepaths is None: - # Capture input only. - self._capture_input_filepaths = self._CreatePathsCollection( - capture_input_filepaths) - self._render_input_filepaths = None - else: - # Set both capture and render input signals. - self._SetTestInputSignalFilePaths( - capture_input_filepaths, render_input_filepaths) - - # Set the echo path simulator class. - self._echo_path_simulator_class = ( - echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES[ - echo_path_simulator_name]) - - self._SimulateAll() - - def _SimulateAll(self): - """Runs all the simulations. + assert render_input_filepaths is None or ( + len(capture_input_filepaths) == len(render_input_filepaths)), ( + 'render input set size not matching input set size') + assert render_input_filepaths is None or echo_path_simulator_name in ( + echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES), ( + 'invalid echo path simulator') + self._base_output_path = os.path.abspath(output_dir) + + # Output path used to cache the data shared across simulations. + self._output_cache_path = os.path.join(self._base_output_path, + '_cache') + + # Instance test data generators. + self._test_data_generators = [ + self._test_data_generator_factory.GetInstance( + test_data_generators_class=( + self._TEST_DATA_GENERATOR_CLASSES[name])) + for name in (test_data_generator_names) + ] + + # Instance evaluation score workers. + self._evaluation_score_workers = [ + self._evaluation_score_factory.GetInstance( + evaluation_score_class=self._EVAL_SCORE_WORKER_CLASSES[name]) + for (name) in eval_score_names + ] + + # Set APM configuration file paths. + self._config_filepaths = self._CreatePathsCollection(config_filepaths) + + # Set probing signal file paths. + if render_input_filepaths is None: + # Capture input only. + self._capture_input_filepaths = self._CreatePathsCollection( + capture_input_filepaths) + self._render_input_filepaths = None + else: + # Set both capture and render input signals. + self._SetTestInputSignalFilePaths(capture_input_filepaths, + render_input_filepaths) + + # Set the echo path simulator class. + self._echo_path_simulator_class = ( + echo_path_simulation.EchoPathSimulator. + REGISTERED_CLASSES[echo_path_simulator_name]) + + self._SimulateAll() + + def _SimulateAll(self): + """Runs all the simulations. Iterates over the combinations of APM configurations, probing signals, and test data generators. This method is mainly responsible for the creation of the cache and output directories required in order to call _Simulate(). """ - without_render_input = self._render_input_filepaths is None - - # Try different APM config files. - for config_name in self._config_filepaths: - config_filepath = self._config_filepaths[config_name] - - # Try different capture-render pairs. - for capture_input_name in self._capture_input_filepaths: - # Output path for the capture signal annotations. - capture_annotations_cache_path = os.path.join( - self._output_cache_path, - self._PREFIX_CAPTURE + capture_input_name) - data_access.MakeDirectory(capture_annotations_cache_path) - - # Capture. - capture_input_filepath = self._capture_input_filepaths[ - capture_input_name] - if not os.path.exists(capture_input_filepath): - # If the input signal file does not exist, try to create using the - # available input signal creators. - self._CreateInputSignal(capture_input_filepath) - assert os.path.exists(capture_input_filepath) - self._ExtractCaptureAnnotations( - capture_input_filepath, capture_annotations_cache_path) - - # Render and simulated echo path (optional). - render_input_filepath = None if without_render_input else ( - self._render_input_filepaths[capture_input_name]) - render_input_name = '(none)' if without_render_input else ( - self._ExtractFileName(render_input_filepath)) - echo_path_simulator = ( - echo_path_simulation_factory.EchoPathSimulatorFactory.GetInstance( - self._echo_path_simulator_class, render_input_filepath)) - - # Try different test data generators. - for test_data_generators in self._test_data_generators: - logging.info('APM config preset: <%s>, capture: <%s>, render: <%s>,' - 'test data generator: <%s>, echo simulator: <%s>', - config_name, capture_input_name, render_input_name, - test_data_generators.NAME, echo_path_simulator.NAME) - - # Output path for the generated test data. - test_data_cache_path = os.path.join( - capture_annotations_cache_path, - self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME) - data_access.MakeDirectory(test_data_cache_path) - logging.debug('test data cache path: <%s>', test_data_cache_path) - - # Output path for the echo simulator and APM input mixer output. - echo_test_data_cache_path = os.path.join( - test_data_cache_path, 'echosim-{}'.format( - echo_path_simulator.NAME)) - data_access.MakeDirectory(echo_test_data_cache_path) - logging.debug('echo test data cache path: <%s>', - echo_test_data_cache_path) - - # Full output path. - output_path = os.path.join( - self._base_output_path, - self._PREFIX_APM_CONFIG + config_name, - self._PREFIX_CAPTURE + capture_input_name, - self._PREFIX_RENDER + render_input_name, - self._PREFIX_ECHO_SIMULATOR + echo_path_simulator.NAME, - self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME) - data_access.MakeDirectory(output_path) - logging.debug('output path: <%s>', output_path) - - self._Simulate(test_data_generators, capture_input_filepath, - render_input_filepath, test_data_cache_path, - echo_test_data_cache_path, output_path, - config_filepath, echo_path_simulator) - - @staticmethod - def _CreateInputSignal(input_signal_filepath): - """Creates a missing input signal file. + without_render_input = self._render_input_filepaths is None + + # Try different APM config files. + for config_name in self._config_filepaths: + config_filepath = self._config_filepaths[config_name] + + # Try different capture-render pairs. + for capture_input_name in self._capture_input_filepaths: + # Output path for the capture signal annotations. + capture_annotations_cache_path = os.path.join( + self._output_cache_path, + self._PREFIX_CAPTURE + capture_input_name) + data_access.MakeDirectory(capture_annotations_cache_path) + + # Capture. + capture_input_filepath = self._capture_input_filepaths[ + capture_input_name] + if not os.path.exists(capture_input_filepath): + # If the input signal file does not exist, try to create using the + # available input signal creators. + self._CreateInputSignal(capture_input_filepath) + assert os.path.exists(capture_input_filepath) + self._ExtractCaptureAnnotations( + capture_input_filepath, capture_annotations_cache_path) + + # Render and simulated echo path (optional). + render_input_filepath = None if without_render_input else ( + self._render_input_filepaths[capture_input_name]) + render_input_name = '(none)' if without_render_input else ( + self._ExtractFileName(render_input_filepath)) + echo_path_simulator = (echo_path_simulation_factory. + EchoPathSimulatorFactory.GetInstance( + self._echo_path_simulator_class, + render_input_filepath)) + + # Try different test data generators. + for test_data_generators in self._test_data_generators: + logging.info( + 'APM config preset: <%s>, capture: <%s>, render: <%s>,' + 'test data generator: <%s>, echo simulator: <%s>', + config_name, capture_input_name, render_input_name, + test_data_generators.NAME, echo_path_simulator.NAME) + + # Output path for the generated test data. + test_data_cache_path = os.path.join( + capture_annotations_cache_path, + self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME) + data_access.MakeDirectory(test_data_cache_path) + logging.debug('test data cache path: <%s>', + test_data_cache_path) + + # Output path for the echo simulator and APM input mixer output. + echo_test_data_cache_path = os.path.join( + test_data_cache_path, + 'echosim-{}'.format(echo_path_simulator.NAME)) + data_access.MakeDirectory(echo_test_data_cache_path) + logging.debug('echo test data cache path: <%s>', + echo_test_data_cache_path) + + # Full output path. + output_path = os.path.join( + self._base_output_path, + self._PREFIX_APM_CONFIG + config_name, + self._PREFIX_CAPTURE + capture_input_name, + self._PREFIX_RENDER + render_input_name, + self._PREFIX_ECHO_SIMULATOR + echo_path_simulator.NAME, + self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME) + data_access.MakeDirectory(output_path) + logging.debug('output path: <%s>', output_path) + + self._Simulate(test_data_generators, + capture_input_filepath, + render_input_filepath, test_data_cache_path, + echo_test_data_cache_path, output_path, + config_filepath, echo_path_simulator) + + @staticmethod + def _CreateInputSignal(input_signal_filepath): + """Creates a missing input signal file. The file name is parsed to extract input signal creator and params. If a creator is matched and the parameters are valid, a new signal is generated @@ -257,30 +272,33 @@ def _CreateInputSignal(input_signal_filepath): Raises: InputSignalCreatorException """ - filename = os.path.splitext(os.path.split(input_signal_filepath)[-1])[0] - filename_parts = filename.split('-') - - if len(filename_parts) < 2: - raise exceptions.InputSignalCreatorException( - 'Cannot parse input signal file name') - - signal, metadata = input_signal_creator.InputSignalCreator.Create( - filename_parts[0], filename_parts[1].split('_')) - - signal_processing.SignalProcessingUtils.SaveWav( - input_signal_filepath, signal) - data_access.Metadata.SaveFileMetadata(input_signal_filepath, metadata) - - def _ExtractCaptureAnnotations(self, input_filepath, output_path, - annotation_name=""): - self._annotator.Extract(input_filepath) - self._annotator.Save(output_path, annotation_name) - - def _Simulate(self, test_data_generators, clean_capture_input_filepath, - render_input_filepath, test_data_cache_path, - echo_test_data_cache_path, output_path, config_filepath, - echo_path_simulator): - """Runs a single set of simulation. + filename = os.path.splitext( + os.path.split(input_signal_filepath)[-1])[0] + filename_parts = filename.split('-') + + if len(filename_parts) < 2: + raise exceptions.InputSignalCreatorException( + 'Cannot parse input signal file name') + + signal, metadata = input_signal_creator.InputSignalCreator.Create( + filename_parts[0], filename_parts[1].split('_')) + + signal_processing.SignalProcessingUtils.SaveWav( + input_signal_filepath, signal) + data_access.Metadata.SaveFileMetadata(input_signal_filepath, metadata) + + def _ExtractCaptureAnnotations(self, + input_filepath, + output_path, + annotation_name=""): + self._annotator.Extract(input_filepath) + self._annotator.Save(output_path, annotation_name) + + def _Simulate(self, test_data_generators, clean_capture_input_filepath, + render_input_filepath, test_data_cache_path, + echo_test_data_cache_path, output_path, config_filepath, + echo_path_simulator): + """Runs a single set of simulation. Simulates a given combination of APM configuration, probing signal, and test data generator. It iterates over the test data generator @@ -298,90 +316,92 @@ def _Simulate(self, test_data_generators, clean_capture_input_filepath, config_filepath: APM configuration file to test. echo_path_simulator: EchoPathSimulator instance. """ - # Generate pairs of noisy input and reference signal files. - test_data_generators.Generate( - input_signal_filepath=clean_capture_input_filepath, - test_data_cache_path=test_data_cache_path, - base_output_path=output_path) - - # Extract metadata linked to the clean input file (if any). - apm_input_metadata = None - try: - apm_input_metadata = data_access.Metadata.LoadFileMetadata( - clean_capture_input_filepath) - except IOError as e: - apm_input_metadata = {} - apm_input_metadata['test_data_gen_name'] = test_data_generators.NAME - apm_input_metadata['test_data_gen_config'] = None - - # For each test data pair, simulate a call and evaluate. - for config_name in test_data_generators.config_names: - logging.info(' - test data generator config: <%s>', config_name) - apm_input_metadata['test_data_gen_config'] = config_name - - # Paths to the test data generator output. - # Note that the reference signal does not depend on the render input - # which is optional. - noisy_capture_input_filepath = ( - test_data_generators.noisy_signal_filepaths[config_name]) - reference_signal_filepath = ( - test_data_generators.reference_signal_filepaths[config_name]) - - # Output path for the evaluation (e.g., APM output file). - evaluation_output_path = test_data_generators.apm_output_paths[ - config_name] - - # Paths to the APM input signals. - echo_path_filepath = echo_path_simulator.Simulate( - echo_test_data_cache_path) - apm_input_filepath = input_mixer.ApmInputMixer.Mix( - echo_test_data_cache_path, noisy_capture_input_filepath, - echo_path_filepath) - - # Extract annotations for the APM input mix. - apm_input_basepath, apm_input_filename = os.path.split( - apm_input_filepath) - self._ExtractCaptureAnnotations( - apm_input_filepath, apm_input_basepath, - os.path.splitext(apm_input_filename)[0] + '-') - - # Simulate a call using APM. - self._audioproc_wrapper.Run( - config_filepath=config_filepath, - capture_input_filepath=apm_input_filepath, - render_input_filepath=render_input_filepath, - output_path=evaluation_output_path) - - try: - # Evaluate. - self._evaluator.Run( - evaluation_score_workers=self._evaluation_score_workers, - apm_input_metadata=apm_input_metadata, - apm_output_filepath=self._audioproc_wrapper.output_filepath, - reference_input_filepath=reference_signal_filepath, - render_input_filepath=render_input_filepath, - output_path=evaluation_output_path, - ) - - # Save simulation metadata. - data_access.Metadata.SaveAudioTestDataPaths( - output_path=evaluation_output_path, - clean_capture_input_filepath=clean_capture_input_filepath, - echo_free_capture_filepath=noisy_capture_input_filepath, - echo_filepath=echo_path_filepath, - render_filepath=render_input_filepath, - capture_filepath=apm_input_filepath, - apm_output_filepath=self._audioproc_wrapper.output_filepath, - apm_reference_filepath=reference_signal_filepath, - apm_config_filepath=config_filepath, - ) - except exceptions.EvaluationScoreException as e: - logging.warning('the evaluation failed: %s', e.message) - continue - - def _SetTestInputSignalFilePaths(self, capture_input_filepaths, - render_input_filepaths): - """Sets input and render input file paths collections. + # Generate pairs of noisy input and reference signal files. + test_data_generators.Generate( + input_signal_filepath=clean_capture_input_filepath, + test_data_cache_path=test_data_cache_path, + base_output_path=output_path) + + # Extract metadata linked to the clean input file (if any). + apm_input_metadata = None + try: + apm_input_metadata = data_access.Metadata.LoadFileMetadata( + clean_capture_input_filepath) + except IOError as e: + apm_input_metadata = {} + apm_input_metadata['test_data_gen_name'] = test_data_generators.NAME + apm_input_metadata['test_data_gen_config'] = None + + # For each test data pair, simulate a call and evaluate. + for config_name in test_data_generators.config_names: + logging.info(' - test data generator config: <%s>', config_name) + apm_input_metadata['test_data_gen_config'] = config_name + + # Paths to the test data generator output. + # Note that the reference signal does not depend on the render input + # which is optional. + noisy_capture_input_filepath = ( + test_data_generators.noisy_signal_filepaths[config_name]) + reference_signal_filepath = ( + test_data_generators.reference_signal_filepaths[config_name]) + + # Output path for the evaluation (e.g., APM output file). + evaluation_output_path = test_data_generators.apm_output_paths[ + config_name] + + # Paths to the APM input signals. + echo_path_filepath = echo_path_simulator.Simulate( + echo_test_data_cache_path) + apm_input_filepath = input_mixer.ApmInputMixer.Mix( + echo_test_data_cache_path, noisy_capture_input_filepath, + echo_path_filepath) + + # Extract annotations for the APM input mix. + apm_input_basepath, apm_input_filename = os.path.split( + apm_input_filepath) + self._ExtractCaptureAnnotations( + apm_input_filepath, apm_input_basepath, + os.path.splitext(apm_input_filename)[0] + '-') + + # Simulate a call using APM. + self._audioproc_wrapper.Run( + config_filepath=config_filepath, + capture_input_filepath=apm_input_filepath, + render_input_filepath=render_input_filepath, + output_path=evaluation_output_path) + + try: + # Evaluate. + self._evaluator.Run( + evaluation_score_workers=self._evaluation_score_workers, + apm_input_metadata=apm_input_metadata, + apm_output_filepath=self._audioproc_wrapper. + output_filepath, + reference_input_filepath=reference_signal_filepath, + render_input_filepath=render_input_filepath, + output_path=evaluation_output_path, + ) + + # Save simulation metadata. + data_access.Metadata.SaveAudioTestDataPaths( + output_path=evaluation_output_path, + clean_capture_input_filepath=clean_capture_input_filepath, + echo_free_capture_filepath=noisy_capture_input_filepath, + echo_filepath=echo_path_filepath, + render_filepath=render_input_filepath, + capture_filepath=apm_input_filepath, + apm_output_filepath=self._audioproc_wrapper. + output_filepath, + apm_reference_filepath=reference_signal_filepath, + apm_config_filepath=config_filepath, + ) + except exceptions.EvaluationScoreException as e: + logging.warning('the evaluation failed: %s', e.message) + continue + + def _SetTestInputSignalFilePaths(self, capture_input_filepaths, + render_input_filepaths): + """Sets input and render input file paths collections. Pairs the input and render input files by storing the file paths into two collections. The key is the file name of the input file. @@ -390,20 +410,20 @@ def _SetTestInputSignalFilePaths(self, capture_input_filepaths, capture_input_filepaths: list of file paths. render_input_filepaths: list of file paths. """ - self._capture_input_filepaths = {} - self._render_input_filepaths = {} - assert len(capture_input_filepaths) == len(render_input_filepaths) - for capture_input_filepath, render_input_filepath in zip( - capture_input_filepaths, render_input_filepaths): - name = self._ExtractFileName(capture_input_filepath) - self._capture_input_filepaths[name] = os.path.abspath( - capture_input_filepath) - self._render_input_filepaths[name] = os.path.abspath( - render_input_filepath) - - @classmethod - def _CreatePathsCollection(cls, filepaths): - """Creates a collection of file paths. + self._capture_input_filepaths = {} + self._render_input_filepaths = {} + assert len(capture_input_filepaths) == len(render_input_filepaths) + for capture_input_filepath, render_input_filepath in zip( + capture_input_filepaths, render_input_filepaths): + name = self._ExtractFileName(capture_input_filepath) + self._capture_input_filepaths[name] = os.path.abspath( + capture_input_filepath) + self._render_input_filepaths[name] = os.path.abspath( + render_input_filepath) + + @classmethod + def _CreatePathsCollection(cls, filepaths): + """Creates a collection of file paths. Given a list of file paths, makes a collection with one item for each file path. The value is absolute path, the key is the file name without @@ -415,12 +435,12 @@ def _CreatePathsCollection(cls, filepaths): Returns: A dict. """ - filepaths_collection = {} - for filepath in filepaths: - name = cls._ExtractFileName(filepath) - filepaths_collection[name] = os.path.abspath(filepath) - return filepaths_collection - - @classmethod - def _ExtractFileName(cls, filepath): - return os.path.splitext(os.path.split(filepath)[-1])[0] + filepaths_collection = {} + for filepath in filepaths: + name = cls._ExtractFileName(filepath) + filepaths_collection[name] = os.path.abspath(filepath) + return filepaths_collection + + @classmethod + def _ExtractFileName(cls, filepath): + return os.path.splitext(os.path.split(filepath)[-1])[0] diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py index c39b12dd00..78ca17f589 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/simulation_unittest.py @@ -5,21 +5,15 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Unit tests for the simulation module. """ import logging import os import shutil -import sys import tempfile import unittest -SRC = os.path.abspath(os.path.join( - os.path.dirname((__file__)), os.pardir, os.pardir, os.pardir, os.pardir)) -sys.path.append(os.path.join(SRC, 'third_party', 'pymock')) - import mock import pydub @@ -33,177 +27,177 @@ class TestApmModuleSimulator(unittest.TestCase): - """Unit tests for the ApmModuleSimulator class. + """Unit tests for the ApmModuleSimulator class. """ - def setUp(self): - """Create temporary folders and fake audio track.""" - self._output_path = tempfile.mkdtemp() - self._tmp_path = tempfile.mkdtemp() - - silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000) - fake_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - silence) - self._fake_audio_track_path = os.path.join(self._output_path, 'fake.wav') - signal_processing.SignalProcessingUtils.SaveWav( - self._fake_audio_track_path, fake_signal) - - def tearDown(self): - """Recursively delete temporary folders.""" - shutil.rmtree(self._output_path) - shutil.rmtree(self._tmp_path) - - def testSimulation(self): - # Instance dependencies to mock and inject. - ap_wrapper = audioproc_wrapper.AudioProcWrapper( - audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH) - evaluator = evaluation.ApmModuleEvaluator() - ap_wrapper.Run = mock.MagicMock(name='Run') - evaluator.Run = mock.MagicMock(name='Run') - - # Instance non-mocked dependencies. - test_data_generator_factory = ( - test_data_generation_factory.TestDataGeneratorFactory( - aechen_ir_database_path='', - noise_tracks_path='', - copy_with_identity=False)) - evaluation_score_factory = eval_scores_factory.EvaluationScoreWorkerFactory( - polqa_tool_bin_path=os.path.join( - os.path.dirname(__file__), 'fake_polqa'), - echo_metric_tool_bin_path=None - ) - - # Instance simulator. - simulator = simulation.ApmModuleSimulator( - test_data_generator_factory=test_data_generator_factory, - evaluation_score_factory=evaluation_score_factory, - ap_wrapper=ap_wrapper, - evaluator=evaluator, - external_vads={'fake': external_vad.ExternalVad(os.path.join( - os.path.dirname(__file__), 'fake_external_vad.py'), 'fake')} - ) - - # What to simulate. - config_files = ['apm_configs/default.json'] - input_files = [self._fake_audio_track_path] - test_data_generators = ['identity', 'white_noise'] - eval_scores = ['audio_level_mean', 'polqa'] - - # Run all simulations. - simulator.Run( - config_filepaths=config_files, - capture_input_filepaths=input_files, - test_data_generator_names=test_data_generators, - eval_score_names=eval_scores, - output_dir=self._output_path) - - # Check. - # TODO(alessiob): Once the TestDataGenerator classes can be configured by - # the client code (e.g., number of SNR pairs for the white noise test data - # generator), the exact number of calls to ap_wrapper.Run and evaluator.Run - # is known; use that with assertEqual. - min_number_of_simulations = len(config_files) * len(input_files) * len( - test_data_generators) - self.assertGreaterEqual(len(ap_wrapper.Run.call_args_list), - min_number_of_simulations) - self.assertGreaterEqual(len(evaluator.Run.call_args_list), - min_number_of_simulations) - - def testInputSignalCreation(self): - # Instance simulator. - simulator = simulation.ApmModuleSimulator( - test_data_generator_factory=( - test_data_generation_factory.TestDataGeneratorFactory( - aechen_ir_database_path='', - noise_tracks_path='', - copy_with_identity=False)), - evaluation_score_factory=( - eval_scores_factory.EvaluationScoreWorkerFactory( - polqa_tool_bin_path=os.path.join( - os.path.dirname(__file__), 'fake_polqa'), - echo_metric_tool_bin_path=None - )), - ap_wrapper=audioproc_wrapper.AudioProcWrapper( - audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH), - evaluator=evaluation.ApmModuleEvaluator()) - - # Inexistent input files to be silently created. - input_files = [ - os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'), - os.path.join(self._tmp_path, 'pure_tone-1000_500.wav'), - ] - self.assertFalse(any([os.path.exists(input_file) for input_file in ( - input_files)])) - - # The input files are created during the simulation. - simulator.Run( - config_filepaths=['apm_configs/default.json'], - capture_input_filepaths=input_files, - test_data_generator_names=['identity'], - eval_score_names=['audio_level_peak'], - output_dir=self._output_path) - self.assertTrue(all([os.path.exists(input_file) for input_file in ( - input_files)])) - - def testPureToneGenerationWithTotalHarmonicDistorsion(self): - logging.warning = mock.MagicMock(name='warning') - - # Instance simulator. - simulator = simulation.ApmModuleSimulator( - test_data_generator_factory=( + def setUp(self): + """Create temporary folders and fake audio track.""" + self._output_path = tempfile.mkdtemp() + self._tmp_path = tempfile.mkdtemp() + + silence = pydub.AudioSegment.silent(duration=1000, frame_rate=48000) + fake_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + silence) + self._fake_audio_track_path = os.path.join(self._output_path, + 'fake.wav') + signal_processing.SignalProcessingUtils.SaveWav( + self._fake_audio_track_path, fake_signal) + + def tearDown(self): + """Recursively delete temporary folders.""" + shutil.rmtree(self._output_path) + shutil.rmtree(self._tmp_path) + + def testSimulation(self): + # Instance dependencies to mock and inject. + ap_wrapper = audioproc_wrapper.AudioProcWrapper( + audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH) + evaluator = evaluation.ApmModuleEvaluator() + ap_wrapper.Run = mock.MagicMock(name='Run') + evaluator.Run = mock.MagicMock(name='Run') + + # Instance non-mocked dependencies. + test_data_generator_factory = ( test_data_generation_factory.TestDataGeneratorFactory( aechen_ir_database_path='', noise_tracks_path='', - copy_with_identity=False)), - evaluation_score_factory=( - eval_scores_factory.EvaluationScoreWorkerFactory( - polqa_tool_bin_path=os.path.join( - os.path.dirname(__file__), 'fake_polqa'), - echo_metric_tool_bin_path=None - )), - ap_wrapper=audioproc_wrapper.AudioProcWrapper( - audioproc_wrapper.AudioProcWrapper.DEFAULT_APM_SIMULATOR_BIN_PATH), - evaluator=evaluation.ApmModuleEvaluator()) - - # What to simulate. - config_files = ['apm_configs/default.json'] - input_files = [os.path.join(self._tmp_path, 'pure_tone-440_1000.wav')] - eval_scores = ['thd'] - - # Should work. - simulator.Run( - config_filepaths=config_files, - capture_input_filepaths=input_files, - test_data_generator_names=['identity'], - eval_score_names=eval_scores, - output_dir=self._output_path) - self.assertFalse(logging.warning.called) - - # Warning expected. - simulator.Run( - config_filepaths=config_files, - capture_input_filepaths=input_files, - test_data_generator_names=['white_noise'], # Not allowed with THD. - eval_score_names=eval_scores, - output_dir=self._output_path) - logging.warning.assert_called_with('the evaluation failed: %s', ( - 'The THD score cannot be used with any test data generator other than ' - '"identity"')) - - # # Init. - # generator = test_data_generation.IdentityTestDataGenerator('tmp') - # input_signal_filepath = os.path.join( - # self._test_data_cache_path, 'pure_tone-440_1000.wav') - - # # Check that the input signal is generated. - # self.assertFalse(os.path.exists(input_signal_filepath)) - # generator.Generate( - # input_signal_filepath=input_signal_filepath, - # test_data_cache_path=self._test_data_cache_path, - # base_output_path=self._base_output_path) - # self.assertTrue(os.path.exists(input_signal_filepath)) - - # # Check input signal properties. - # input_signal = signal_processing.SignalProcessingUtils.LoadWav( - # input_signal_filepath) - # self.assertEqual(1000, len(input_signal)) + copy_with_identity=False)) + evaluation_score_factory = eval_scores_factory.EvaluationScoreWorkerFactory( + polqa_tool_bin_path=os.path.join(os.path.dirname(__file__), + 'fake_polqa'), + echo_metric_tool_bin_path=None) + + # Instance simulator. + simulator = simulation.ApmModuleSimulator( + test_data_generator_factory=test_data_generator_factory, + evaluation_score_factory=evaluation_score_factory, + ap_wrapper=ap_wrapper, + evaluator=evaluator, + external_vads={ + 'fake': + external_vad.ExternalVad( + os.path.join(os.path.dirname(__file__), + 'fake_external_vad.py'), 'fake') + }) + + # What to simulate. + config_files = ['apm_configs/default.json'] + input_files = [self._fake_audio_track_path] + test_data_generators = ['identity', 'white_noise'] + eval_scores = ['audio_level_mean', 'polqa'] + + # Run all simulations. + simulator.Run(config_filepaths=config_files, + capture_input_filepaths=input_files, + test_data_generator_names=test_data_generators, + eval_score_names=eval_scores, + output_dir=self._output_path) + + # Check. + # TODO(alessiob): Once the TestDataGenerator classes can be configured by + # the client code (e.g., number of SNR pairs for the white noise test data + # generator), the exact number of calls to ap_wrapper.Run and evaluator.Run + # is known; use that with assertEqual. + min_number_of_simulations = len(config_files) * len(input_files) * len( + test_data_generators) + self.assertGreaterEqual(len(ap_wrapper.Run.call_args_list), + min_number_of_simulations) + self.assertGreaterEqual(len(evaluator.Run.call_args_list), + min_number_of_simulations) + + def testInputSignalCreation(self): + # Instance simulator. + simulator = simulation.ApmModuleSimulator( + test_data_generator_factory=( + test_data_generation_factory.TestDataGeneratorFactory( + aechen_ir_database_path='', + noise_tracks_path='', + copy_with_identity=False)), + evaluation_score_factory=( + eval_scores_factory.EvaluationScoreWorkerFactory( + polqa_tool_bin_path=os.path.join(os.path.dirname(__file__), + 'fake_polqa'), + echo_metric_tool_bin_path=None)), + ap_wrapper=audioproc_wrapper.AudioProcWrapper( + audioproc_wrapper.AudioProcWrapper. + DEFAULT_APM_SIMULATOR_BIN_PATH), + evaluator=evaluation.ApmModuleEvaluator()) + + # Inexistent input files to be silently created. + input_files = [ + os.path.join(self._tmp_path, 'pure_tone-440_1000.wav'), + os.path.join(self._tmp_path, 'pure_tone-1000_500.wav'), + ] + self.assertFalse( + any([os.path.exists(input_file) for input_file in (input_files)])) + + # The input files are created during the simulation. + simulator.Run(config_filepaths=['apm_configs/default.json'], + capture_input_filepaths=input_files, + test_data_generator_names=['identity'], + eval_score_names=['audio_level_peak'], + output_dir=self._output_path) + self.assertTrue( + all([os.path.exists(input_file) for input_file in (input_files)])) + + def testPureToneGenerationWithTotalHarmonicDistorsion(self): + logging.warning = mock.MagicMock(name='warning') + + # Instance simulator. + simulator = simulation.ApmModuleSimulator( + test_data_generator_factory=( + test_data_generation_factory.TestDataGeneratorFactory( + aechen_ir_database_path='', + noise_tracks_path='', + copy_with_identity=False)), + evaluation_score_factory=( + eval_scores_factory.EvaluationScoreWorkerFactory( + polqa_tool_bin_path=os.path.join(os.path.dirname(__file__), + 'fake_polqa'), + echo_metric_tool_bin_path=None)), + ap_wrapper=audioproc_wrapper.AudioProcWrapper( + audioproc_wrapper.AudioProcWrapper. + DEFAULT_APM_SIMULATOR_BIN_PATH), + evaluator=evaluation.ApmModuleEvaluator()) + + # What to simulate. + config_files = ['apm_configs/default.json'] + input_files = [os.path.join(self._tmp_path, 'pure_tone-440_1000.wav')] + eval_scores = ['thd'] + + # Should work. + simulator.Run(config_filepaths=config_files, + capture_input_filepaths=input_files, + test_data_generator_names=['identity'], + eval_score_names=eval_scores, + output_dir=self._output_path) + self.assertFalse(logging.warning.called) + + # Warning expected. + simulator.Run( + config_filepaths=config_files, + capture_input_filepaths=input_files, + test_data_generator_names=['white_noise'], # Not allowed with THD. + eval_score_names=eval_scores, + output_dir=self._output_path) + logging.warning.assert_called_with('the evaluation failed: %s', ( + 'The THD score cannot be used with any test data generator other than ' + '"identity"')) + + # # Init. + # generator = test_data_generation.IdentityTestDataGenerator('tmp') + # input_signal_filepath = os.path.join( + # self._test_data_cache_path, 'pure_tone-440_1000.wav') + + # # Check that the input signal is generated. + # self.assertFalse(os.path.exists(input_signal_filepath)) + # generator.Generate( + # input_signal_filepath=input_signal_filepath, + # test_data_cache_path=self._test_data_cache_path, + # base_output_path=self._base_output_path) + # self.assertTrue(os.path.exists(input_signal_filepath)) + + # # Check input signal properties. + # input_signal = signal_processing.SignalProcessingUtils.LoadWav( + # input_signal_filepath) + # self.assertEqual(1000, len(input_signal)) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py index dac4328588..7e86faccec 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Test data generators producing signals pairs intended to be used to test the APM module. Each pair consists of a noisy input and a reference signal. The former is used as APM input and it is generated by adding noise to a @@ -27,10 +26,10 @@ import sys try: - import scipy.io + import scipy.io except ImportError: - logging.critical('Cannot import the third-party Python package scipy') - sys.exit(1) + logging.critical('Cannot import the third-party Python package scipy') + sys.exit(1) from . import data_access from . import exceptions @@ -38,7 +37,7 @@ class TestDataGenerator(object): - """Abstract class responsible for the generation of noisy signals. + """Abstract class responsible for the generation of noisy signals. Given a clean signal, it generates two streams named noisy signal and reference. The former is the clean signal deteriorated by the noise source, @@ -50,24 +49,24 @@ class TestDataGenerator(object): An test data generator generates one or more pairs. """ - NAME = None - REGISTERED_CLASSES = {} - - def __init__(self, output_directory_prefix): - self._output_directory_prefix = output_directory_prefix - # Init dictionaries with one entry for each test data generator - # configuration (e.g., different SNRs). - # Noisy audio track files (stored separately in a cache folder). - self._noisy_signal_filepaths = None - # Path to be used for the APM simulation output files. - self._apm_output_paths = None - # Reference audio track files (stored separately in a cache folder). - self._reference_signal_filepaths = None - self.Clear() - - @classmethod - def RegisterClass(cls, class_to_register): - """Registers a TestDataGenerator implementation. + NAME = None + REGISTERED_CLASSES = {} + + def __init__(self, output_directory_prefix): + self._output_directory_prefix = output_directory_prefix + # Init dictionaries with one entry for each test data generator + # configuration (e.g., different SNRs). + # Noisy audio track files (stored separately in a cache folder). + self._noisy_signal_filepaths = None + # Path to be used for the APM simulation output files. + self._apm_output_paths = None + # Reference audio track files (stored separately in a cache folder). + self._reference_signal_filepaths = None + self.Clear() + + @classmethod + def RegisterClass(cls, class_to_register): + """Registers a TestDataGenerator implementation. Decorator to automatically register the classes that extend TestDataGenerator. @@ -77,28 +76,28 @@ def RegisterClass(cls, class_to_register): class IdentityGenerator(TestDataGenerator): pass """ - cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register - return class_to_register + cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register + return class_to_register - @property - def config_names(self): - return self._noisy_signal_filepaths.keys() + @property + def config_names(self): + return self._noisy_signal_filepaths.keys() - @property - def noisy_signal_filepaths(self): - return self._noisy_signal_filepaths + @property + def noisy_signal_filepaths(self): + return self._noisy_signal_filepaths - @property - def apm_output_paths(self): - return self._apm_output_paths + @property + def apm_output_paths(self): + return self._apm_output_paths - @property - def reference_signal_filepaths(self): - return self._reference_signal_filepaths + @property + def reference_signal_filepaths(self): + return self._reference_signal_filepaths - def Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - """Generates a set of noisy input and reference audiotrack file pairs. + def Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + """Generates a set of noisy input and reference audiotrack file pairs. This method initializes an empty set of pairs and calls the _Generate() method implemented in a concrete class. @@ -109,26 +108,26 @@ def Generate( files. base_output_path: base path where output is written. """ - self.Clear() - self._Generate( - input_signal_filepath, test_data_cache_path, base_output_path) + self.Clear() + self._Generate(input_signal_filepath, test_data_cache_path, + base_output_path) - def Clear(self): - """Clears the generated output path dictionaries. + def Clear(self): + """Clears the generated output path dictionaries. """ - self._noisy_signal_filepaths = {} - self._apm_output_paths = {} - self._reference_signal_filepaths = {} + self._noisy_signal_filepaths = {} + self._apm_output_paths = {} + self._reference_signal_filepaths = {} - def _Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - """Abstract method to be implemented in each concrete class. + def _Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + """Abstract method to be implemented in each concrete class. """ - raise NotImplementedError() + raise NotImplementedError() - def _AddNoiseSnrPairs(self, base_output_path, noisy_mix_filepaths, - snr_value_pairs): - """Adds noisy-reference signal pairs. + def _AddNoiseSnrPairs(self, base_output_path, noisy_mix_filepaths, + snr_value_pairs): + """Adds noisy-reference signal pairs. Args: base_output_path: noisy tracks base output path. @@ -136,22 +135,22 @@ def _AddNoiseSnrPairs(self, base_output_path, noisy_mix_filepaths, by noisy track name and SNR level. snr_value_pairs: list of SNR pairs. """ - for noise_track_name in noisy_mix_filepaths: - for snr_noisy, snr_refence in snr_value_pairs: - config_name = '{0}_{1:d}_{2:d}_SNR'.format( - noise_track_name, snr_noisy, snr_refence) - output_path = self._MakeDir(base_output_path, config_name) - self._AddNoiseReferenceFilesPair( - config_name=config_name, - noisy_signal_filepath=noisy_mix_filepaths[ - noise_track_name][snr_noisy], - reference_signal_filepath=noisy_mix_filepaths[ - noise_track_name][snr_refence], - output_path=output_path) - - def _AddNoiseReferenceFilesPair(self, config_name, noisy_signal_filepath, - reference_signal_filepath, output_path): - """Adds one noisy-reference signal pair. + for noise_track_name in noisy_mix_filepaths: + for snr_noisy, snr_refence in snr_value_pairs: + config_name = '{0}_{1:d}_{2:d}_SNR'.format( + noise_track_name, snr_noisy, snr_refence) + output_path = self._MakeDir(base_output_path, config_name) + self._AddNoiseReferenceFilesPair( + config_name=config_name, + noisy_signal_filepath=noisy_mix_filepaths[noise_track_name] + [snr_noisy], + reference_signal_filepath=noisy_mix_filepaths[ + noise_track_name][snr_refence], + output_path=output_path) + + def _AddNoiseReferenceFilesPair(self, config_name, noisy_signal_filepath, + reference_signal_filepath, output_path): + """Adds one noisy-reference signal pair. Args: config_name: name of the APM configuration. @@ -159,264 +158,275 @@ def _AddNoiseReferenceFilesPair(self, config_name, noisy_signal_filepath, reference_signal_filepath: path to reference audio track file. output_path: APM output path. """ - assert config_name not in self._noisy_signal_filepaths - self._noisy_signal_filepaths[config_name] = os.path.abspath( - noisy_signal_filepath) - self._apm_output_paths[config_name] = os.path.abspath(output_path) - self._reference_signal_filepaths[config_name] = os.path.abspath( - reference_signal_filepath) + assert config_name not in self._noisy_signal_filepaths + self._noisy_signal_filepaths[config_name] = os.path.abspath( + noisy_signal_filepath) + self._apm_output_paths[config_name] = os.path.abspath(output_path) + self._reference_signal_filepaths[config_name] = os.path.abspath( + reference_signal_filepath) - def _MakeDir(self, base_output_path, test_data_generator_config_name): - output_path = os.path.join( - base_output_path, - self._output_directory_prefix + test_data_generator_config_name) - data_access.MakeDirectory(output_path) - return output_path + def _MakeDir(self, base_output_path, test_data_generator_config_name): + output_path = os.path.join( + base_output_path, + self._output_directory_prefix + test_data_generator_config_name) + data_access.MakeDirectory(output_path) + return output_path @TestDataGenerator.RegisterClass class IdentityTestDataGenerator(TestDataGenerator): - """Generator that adds no noise. + """Generator that adds no noise. Both the noisy and the reference signals are the input signal. """ - NAME = 'identity' + NAME = 'identity' - def __init__(self, output_directory_prefix, copy_with_identity): - TestDataGenerator.__init__(self, output_directory_prefix) - self._copy_with_identity = copy_with_identity + def __init__(self, output_directory_prefix, copy_with_identity): + TestDataGenerator.__init__(self, output_directory_prefix) + self._copy_with_identity = copy_with_identity - @property - def copy_with_identity(self): - return self._copy_with_identity + @property + def copy_with_identity(self): + return self._copy_with_identity - def _Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - config_name = 'default' - output_path = self._MakeDir(base_output_path, config_name) + def _Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + config_name = 'default' + output_path = self._MakeDir(base_output_path, config_name) - if self._copy_with_identity: - input_signal_filepath_new = os.path.join( - test_data_cache_path, os.path.split(input_signal_filepath)[1]) - logging.info('copying ' + input_signal_filepath + ' to ' + ( - input_signal_filepath_new)) - shutil.copy(input_signal_filepath, input_signal_filepath_new) - input_signal_filepath = input_signal_filepath_new + if self._copy_with_identity: + input_signal_filepath_new = os.path.join( + test_data_cache_path, + os.path.split(input_signal_filepath)[1]) + logging.info('copying ' + input_signal_filepath + ' to ' + + (input_signal_filepath_new)) + shutil.copy(input_signal_filepath, input_signal_filepath_new) + input_signal_filepath = input_signal_filepath_new - self._AddNoiseReferenceFilesPair( - config_name=config_name, - noisy_signal_filepath=input_signal_filepath, - reference_signal_filepath=input_signal_filepath, - output_path=output_path) + self._AddNoiseReferenceFilesPair( + config_name=config_name, + noisy_signal_filepath=input_signal_filepath, + reference_signal_filepath=input_signal_filepath, + output_path=output_path) @TestDataGenerator.RegisterClass class WhiteNoiseTestDataGenerator(TestDataGenerator): - """Generator that adds white noise. + """Generator that adds white noise. """ - NAME = 'white_noise' - - # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs. - # The reference (second value of each pair) always has a lower amount of noise - # - i.e., the SNR is 10 dB higher. - _SNR_VALUE_PAIRS = [ - [20, 30], # Smallest noise. - [10, 20], - [5, 15], - [0, 10], # Largest noise. - ] - - _NOISY_SIGNAL_FILENAME_TEMPLATE = 'noise_{0:d}_SNR.wav' - - def __init__(self, output_directory_prefix): - TestDataGenerator.__init__(self, output_directory_prefix) - - def _Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - # Load the input signal. - input_signal = signal_processing.SignalProcessingUtils.LoadWav( - input_signal_filepath) - - # Create the noise track. - noise_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( - input_signal) - - # Create the noisy mixes (once for each unique SNR value). - noisy_mix_filepaths = {} - snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair]) - for snr in snr_values: - noisy_signal_filepath = os.path.join( - test_data_cache_path, - self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(snr)) - - # Create and save if not done. - if not os.path.exists(noisy_signal_filepath): - # Create noisy signal. - noisy_signal = signal_processing.SignalProcessingUtils.MixSignals( - input_signal, noise_signal, snr) - - # Save. - signal_processing.SignalProcessingUtils.SaveWav( - noisy_signal_filepath, noisy_signal) - - # Add file to the collection of mixes. - noisy_mix_filepaths[snr] = noisy_signal_filepath - - # Add all the noisy-reference signal pairs. - for snr_noisy, snr_refence in self._SNR_VALUE_PAIRS: - config_name = '{0:d}_{1:d}_SNR'.format(snr_noisy, snr_refence) - output_path = self._MakeDir(base_output_path, config_name) - self._AddNoiseReferenceFilesPair( - config_name=config_name, - noisy_signal_filepath=noisy_mix_filepaths[snr_noisy], - reference_signal_filepath=noisy_mix_filepaths[snr_refence], - output_path=output_path) + NAME = 'white_noise' + + # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs. + # The reference (second value of each pair) always has a lower amount of noise + # - i.e., the SNR is 10 dB higher. + _SNR_VALUE_PAIRS = [ + [20, 30], # Smallest noise. + [10, 20], + [5, 15], + [0, 10], # Largest noise. + ] + + _NOISY_SIGNAL_FILENAME_TEMPLATE = 'noise_{0:d}_SNR.wav' + + def __init__(self, output_directory_prefix): + TestDataGenerator.__init__(self, output_directory_prefix) + + def _Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + # Load the input signal. + input_signal = signal_processing.SignalProcessingUtils.LoadWav( + input_signal_filepath) + + # Create the noise track. + noise_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise( + input_signal) + + # Create the noisy mixes (once for each unique SNR value). + noisy_mix_filepaths = {} + snr_values = set( + [snr for pair in self._SNR_VALUE_PAIRS for snr in pair]) + for snr in snr_values: + noisy_signal_filepath = os.path.join( + test_data_cache_path, + self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(snr)) + + # Create and save if not done. + if not os.path.exists(noisy_signal_filepath): + # Create noisy signal. + noisy_signal = signal_processing.SignalProcessingUtils.MixSignals( + input_signal, noise_signal, snr) + + # Save. + signal_processing.SignalProcessingUtils.SaveWav( + noisy_signal_filepath, noisy_signal) + + # Add file to the collection of mixes. + noisy_mix_filepaths[snr] = noisy_signal_filepath + + # Add all the noisy-reference signal pairs. + for snr_noisy, snr_refence in self._SNR_VALUE_PAIRS: + config_name = '{0:d}_{1:d}_SNR'.format(snr_noisy, snr_refence) + output_path = self._MakeDir(base_output_path, config_name) + self._AddNoiseReferenceFilesPair( + config_name=config_name, + noisy_signal_filepath=noisy_mix_filepaths[snr_noisy], + reference_signal_filepath=noisy_mix_filepaths[snr_refence], + output_path=output_path) # TODO(alessiob): remove comment when class implemented. # @TestDataGenerator.RegisterClass class NarrowBandNoiseTestDataGenerator(TestDataGenerator): - """Generator that adds narrow-band noise. + """Generator that adds narrow-band noise. """ - NAME = 'narrow_band_noise' + NAME = 'narrow_band_noise' - def __init__(self, output_directory_prefix): - TestDataGenerator.__init__(self, output_directory_prefix) + def __init__(self, output_directory_prefix): + TestDataGenerator.__init__(self, output_directory_prefix) - def _Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - # TODO(alessiob): implement. - pass + def _Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + # TODO(alessiob): implement. + pass @TestDataGenerator.RegisterClass class AdditiveNoiseTestDataGenerator(TestDataGenerator): - """Generator that adds noise loops. + """Generator that adds noise loops. This generator uses all the wav files in a given path (default: noise_tracks/) and mixes them to the clean speech with different target SNRs (hard-coded). """ - NAME = 'additive_noise' - _NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav' - - DEFAULT_NOISE_TRACKS_PATH = os.path.join( - os.path.dirname(__file__), os.pardir, 'noise_tracks') - - # TODO(alessiob): Make the list of SNR pairs customizable. - # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs. - # The reference (second value of each pair) always has a lower amount of noise - # - i.e., the SNR is 10 dB higher. - _SNR_VALUE_PAIRS = [ - [20, 30], # Smallest noise. - [10, 20], - [5, 15], - [0, 10], # Largest noise. - ] - - def __init__(self, output_directory_prefix, noise_tracks_path): - TestDataGenerator.__init__(self, output_directory_prefix) - self._noise_tracks_path = noise_tracks_path - self._noise_tracks_file_names = [n for n in os.listdir( - self._noise_tracks_path) if n.lower().endswith('.wav')] - if len(self._noise_tracks_file_names) == 0: - raise exceptions.InitializationException( - 'No wav files found in the noise tracks path %s' % ( - self._noise_tracks_path)) - - def _Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - """Generates test data pairs using environmental noise. + NAME = 'additive_noise' + _NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav' + + DEFAULT_NOISE_TRACKS_PATH = os.path.join(os.path.dirname(__file__), + os.pardir, 'noise_tracks') + + # TODO(alessiob): Make the list of SNR pairs customizable. + # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs. + # The reference (second value of each pair) always has a lower amount of noise + # - i.e., the SNR is 10 dB higher. + _SNR_VALUE_PAIRS = [ + [20, 30], # Smallest noise. + [10, 20], + [5, 15], + [0, 10], # Largest noise. + ] + + def __init__(self, output_directory_prefix, noise_tracks_path): + TestDataGenerator.__init__(self, output_directory_prefix) + self._noise_tracks_path = noise_tracks_path + self._noise_tracks_file_names = [ + n for n in os.listdir(self._noise_tracks_path) + if n.lower().endswith('.wav') + ] + if len(self._noise_tracks_file_names) == 0: + raise exceptions.InitializationException( + 'No wav files found in the noise tracks path %s' % + (self._noise_tracks_path)) + + def _Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + """Generates test data pairs using environmental noise. For each noise track and pair of SNR values, the following two audio tracks are created: the noisy signal and the reference signal. The former is obtained by mixing the (clean) input signal to the corresponding noise track enforcing the target SNR. """ - # Init. - snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair]) - - # Load the input signal. - input_signal = signal_processing.SignalProcessingUtils.LoadWav( - input_signal_filepath) - - noisy_mix_filepaths = {} - for noise_track_filename in self._noise_tracks_file_names: - # Load the noise track. - noise_track_name, _ = os.path.splitext(noise_track_filename) - noise_track_filepath = os.path.join( - self._noise_tracks_path, noise_track_filename) - if not os.path.exists(noise_track_filepath): - logging.error('cannot find the <%s> noise track', noise_track_filename) - raise exceptions.FileNotFoundError() - - noise_signal = signal_processing.SignalProcessingUtils.LoadWav( - noise_track_filepath) - - # Create the noisy mixes (once for each unique SNR value). - noisy_mix_filepaths[noise_track_name] = {} - for snr in snr_values: - noisy_signal_filepath = os.path.join( - test_data_cache_path, - self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(noise_track_name, snr)) - - # Create and save if not done. - if not os.path.exists(noisy_signal_filepath): - # Create noisy signal. - noisy_signal = signal_processing.SignalProcessingUtils.MixSignals( - input_signal, noise_signal, snr, - pad_noise=signal_processing.SignalProcessingUtils.MixPadding.LOOP) - - # Save. - signal_processing.SignalProcessingUtils.SaveWav( - noisy_signal_filepath, noisy_signal) - - # Add file to the collection of mixes. - noisy_mix_filepaths[noise_track_name][snr] = noisy_signal_filepath - - # Add all the noise-SNR pairs. - self._AddNoiseSnrPairs( - base_output_path, noisy_mix_filepaths, self._SNR_VALUE_PAIRS) + # Init. + snr_values = set( + [snr for pair in self._SNR_VALUE_PAIRS for snr in pair]) + + # Load the input signal. + input_signal = signal_processing.SignalProcessingUtils.LoadWav( + input_signal_filepath) + + noisy_mix_filepaths = {} + for noise_track_filename in self._noise_tracks_file_names: + # Load the noise track. + noise_track_name, _ = os.path.splitext(noise_track_filename) + noise_track_filepath = os.path.join(self._noise_tracks_path, + noise_track_filename) + if not os.path.exists(noise_track_filepath): + logging.error('cannot find the <%s> noise track', + noise_track_filename) + raise exceptions.FileNotFoundError() + + noise_signal = signal_processing.SignalProcessingUtils.LoadWav( + noise_track_filepath) + + # Create the noisy mixes (once for each unique SNR value). + noisy_mix_filepaths[noise_track_name] = {} + for snr in snr_values: + noisy_signal_filepath = os.path.join( + test_data_cache_path, + self._NOISY_SIGNAL_FILENAME_TEMPLATE.format( + noise_track_name, snr)) + + # Create and save if not done. + if not os.path.exists(noisy_signal_filepath): + # Create noisy signal. + noisy_signal = signal_processing.SignalProcessingUtils.MixSignals( + input_signal, + noise_signal, + snr, + pad_noise=signal_processing.SignalProcessingUtils. + MixPadding.LOOP) + + # Save. + signal_processing.SignalProcessingUtils.SaveWav( + noisy_signal_filepath, noisy_signal) + + # Add file to the collection of mixes. + noisy_mix_filepaths[noise_track_name][ + snr] = noisy_signal_filepath + + # Add all the noise-SNR pairs. + self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths, + self._SNR_VALUE_PAIRS) @TestDataGenerator.RegisterClass class ReverberationTestDataGenerator(TestDataGenerator): - """Generator that adds reverberation noise. + """Generator that adds reverberation noise. TODO(alessiob): Make this class more generic since the impulse response can be anything (not just reverberation); call it e.g., ConvolutionalNoiseTestDataGenerator. """ - NAME = 'reverberation' + NAME = 'reverberation' - _IMPULSE_RESPONSES = { - 'lecture': 'air_binaural_lecture_0_0_1.mat', # Long echo. - 'booth': 'air_binaural_booth_0_0_1.mat', # Short echo. - } - _MAX_IMPULSE_RESPONSE_LENGTH = None + _IMPULSE_RESPONSES = { + 'lecture': 'air_binaural_lecture_0_0_1.mat', # Long echo. + 'booth': 'air_binaural_booth_0_0_1.mat', # Short echo. + } + _MAX_IMPULSE_RESPONSE_LENGTH = None - # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs. - # The reference (second value of each pair) always has a lower amount of noise - # - i.e., the SNR is 5 dB higher. - _SNR_VALUE_PAIRS = [ - [3, 8], # Smallest noise. - [-3, 2], # Largest noise. - ] + # Each pair indicates the clean vs. noisy and reference vs. noisy SNRs. + # The reference (second value of each pair) always has a lower amount of noise + # - i.e., the SNR is 5 dB higher. + _SNR_VALUE_PAIRS = [ + [3, 8], # Smallest noise. + [-3, 2], # Largest noise. + ] - _NOISE_TRACK_FILENAME_TEMPLATE = '{0}.wav' - _NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav' + _NOISE_TRACK_FILENAME_TEMPLATE = '{0}.wav' + _NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav' - def __init__(self, output_directory_prefix, aechen_ir_database_path): - TestDataGenerator.__init__(self, output_directory_prefix) - self._aechen_ir_database_path = aechen_ir_database_path + def __init__(self, output_directory_prefix, aechen_ir_database_path): + TestDataGenerator.__init__(self, output_directory_prefix) + self._aechen_ir_database_path = aechen_ir_database_path - def _Generate( - self, input_signal_filepath, test_data_cache_path, base_output_path): - """Generates test data pairs using reverberation noise. + def _Generate(self, input_signal_filepath, test_data_cache_path, + base_output_path): + """Generates test data pairs using reverberation noise. For each impulse response, one noise track is created. For each impulse response and pair of SNR values, the following 2 audio tracks are @@ -424,61 +434,64 @@ def _Generate( obtained by mixing the (clean) input signal to the corresponding noise track enforcing the target SNR. """ - # Init. - snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair]) - - # Load the input signal. - input_signal = signal_processing.SignalProcessingUtils.LoadWav( - input_signal_filepath) - - noisy_mix_filepaths = {} - for impulse_response_name in self._IMPULSE_RESPONSES: - noise_track_filename = self._NOISE_TRACK_FILENAME_TEMPLATE.format( - impulse_response_name) - noise_track_filepath = os.path.join( - test_data_cache_path, noise_track_filename) - noise_signal = None - try: - # Load noise track. - noise_signal = signal_processing.SignalProcessingUtils.LoadWav( - noise_track_filepath) - except exceptions.FileNotFoundError: - # Generate noise track by applying the impulse response. - impulse_response_filepath = os.path.join( - self._aechen_ir_database_path, - self._IMPULSE_RESPONSES[impulse_response_name]) - noise_signal = self._GenerateNoiseTrack( - noise_track_filepath, input_signal, impulse_response_filepath) - assert noise_signal is not None - - # Create the noisy mixes (once for each unique SNR value). - noisy_mix_filepaths[impulse_response_name] = {} - for snr in snr_values: - noisy_signal_filepath = os.path.join( - test_data_cache_path, - self._NOISY_SIGNAL_FILENAME_TEMPLATE.format( - impulse_response_name, snr)) - - # Create and save if not done. - if not os.path.exists(noisy_signal_filepath): - # Create noisy signal. - noisy_signal = signal_processing.SignalProcessingUtils.MixSignals( - input_signal, noise_signal, snr) - - # Save. - signal_processing.SignalProcessingUtils.SaveWav( - noisy_signal_filepath, noisy_signal) - - # Add file to the collection of mixes. - noisy_mix_filepaths[impulse_response_name][snr] = noisy_signal_filepath - - # Add all the noise-SNR pairs. - self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths, - self._SNR_VALUE_PAIRS) - - def _GenerateNoiseTrack(self, noise_track_filepath, input_signal, + # Init. + snr_values = set( + [snr for pair in self._SNR_VALUE_PAIRS for snr in pair]) + + # Load the input signal. + input_signal = signal_processing.SignalProcessingUtils.LoadWav( + input_signal_filepath) + + noisy_mix_filepaths = {} + for impulse_response_name in self._IMPULSE_RESPONSES: + noise_track_filename = self._NOISE_TRACK_FILENAME_TEMPLATE.format( + impulse_response_name) + noise_track_filepath = os.path.join(test_data_cache_path, + noise_track_filename) + noise_signal = None + try: + # Load noise track. + noise_signal = signal_processing.SignalProcessingUtils.LoadWav( + noise_track_filepath) + except exceptions.FileNotFoundError: + # Generate noise track by applying the impulse response. + impulse_response_filepath = os.path.join( + self._aechen_ir_database_path, + self._IMPULSE_RESPONSES[impulse_response_name]) + noise_signal = self._GenerateNoiseTrack( + noise_track_filepath, input_signal, + impulse_response_filepath) + assert noise_signal is not None + + # Create the noisy mixes (once for each unique SNR value). + noisy_mix_filepaths[impulse_response_name] = {} + for snr in snr_values: + noisy_signal_filepath = os.path.join( + test_data_cache_path, + self._NOISY_SIGNAL_FILENAME_TEMPLATE.format( + impulse_response_name, snr)) + + # Create and save if not done. + if not os.path.exists(noisy_signal_filepath): + # Create noisy signal. + noisy_signal = signal_processing.SignalProcessingUtils.MixSignals( + input_signal, noise_signal, snr) + + # Save. + signal_processing.SignalProcessingUtils.SaveWav( + noisy_signal_filepath, noisy_signal) + + # Add file to the collection of mixes. + noisy_mix_filepaths[impulse_response_name][ + snr] = noisy_signal_filepath + + # Add all the noise-SNR pairs. + self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths, + self._SNR_VALUE_PAIRS) + + def _GenerateNoiseTrack(self, noise_track_filepath, input_signal, impulse_response_filepath): - """Generates noise track. + """Generates noise track. Generate a signal by convolving input_signal with the impulse response in impulse_response_filepath; then save to noise_track_filepath. @@ -491,21 +504,23 @@ def _GenerateNoiseTrack(self, noise_track_filepath, input_signal, Returns: AudioSegment instance. """ - # Load impulse response. - data = scipy.io.loadmat(impulse_response_filepath) - impulse_response = data['h_air'].flatten() - if self._MAX_IMPULSE_RESPONSE_LENGTH is not None: - logging.info('truncating impulse response from %d to %d samples', - len(impulse_response), self._MAX_IMPULSE_RESPONSE_LENGTH) - impulse_response = impulse_response[:self._MAX_IMPULSE_RESPONSE_LENGTH] - - # Apply impulse response. - processed_signal = ( - signal_processing.SignalProcessingUtils.ApplyImpulseResponse( - input_signal, impulse_response)) - - # Save. - signal_processing.SignalProcessingUtils.SaveWav( - noise_track_filepath, processed_signal) - - return processed_signal + # Load impulse response. + data = scipy.io.loadmat(impulse_response_filepath) + impulse_response = data['h_air'].flatten() + if self._MAX_IMPULSE_RESPONSE_LENGTH is not None: + logging.info('truncating impulse response from %d to %d samples', + len(impulse_response), + self._MAX_IMPULSE_RESPONSE_LENGTH) + impulse_response = impulse_response[:self. + _MAX_IMPULSE_RESPONSE_LENGTH] + + # Apply impulse response. + processed_signal = ( + signal_processing.SignalProcessingUtils.ApplyImpulseResponse( + input_signal, impulse_response)) + + # Save. + signal_processing.SignalProcessingUtils.SaveWav( + noise_track_filepath, processed_signal) + + return processed_signal diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py index c80d150228..948888e775 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_factory.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """TestDataGenerator factory class. """ @@ -16,15 +15,15 @@ class TestDataGeneratorFactory(object): - """Factory class used to create test data generators. + """Factory class used to create test data generators. Usage: Create a factory passing parameters to the ctor with which the generators will be produced. """ - def __init__(self, aechen_ir_database_path, noise_tracks_path, - copy_with_identity): - """Ctor. + def __init__(self, aechen_ir_database_path, noise_tracks_path, + copy_with_identity): + """Ctor. Args: aechen_ir_database_path: Path to the Aechen Impulse Response database. @@ -32,16 +31,16 @@ def __init__(self, aechen_ir_database_path, noise_tracks_path, copy_with_identity: Flag indicating whether the identity generator has to make copies of the clean speech input files. """ - self._output_directory_prefix = None - self._aechen_ir_database_path = aechen_ir_database_path - self._noise_tracks_path = noise_tracks_path - self._copy_with_identity = copy_with_identity + self._output_directory_prefix = None + self._aechen_ir_database_path = aechen_ir_database_path + self._noise_tracks_path = noise_tracks_path + self._copy_with_identity = copy_with_identity - def SetOutputDirectoryPrefix(self, prefix): - self._output_directory_prefix = prefix + def SetOutputDirectoryPrefix(self, prefix): + self._output_directory_prefix = prefix - def GetInstance(self, test_data_generators_class): - """Creates an TestDataGenerator instance given a class object. + def GetInstance(self, test_data_generators_class): + """Creates an TestDataGenerator instance given a class object. Args: test_data_generators_class: TestDataGenerator class object (not an @@ -50,22 +49,23 @@ def GetInstance(self, test_data_generators_class): Returns: TestDataGenerator instance. """ - if self._output_directory_prefix is None: - raise exceptions.InitializationException( - 'The output directory prefix for test data generators is not set') - logging.debug('factory producing %s', test_data_generators_class) + if self._output_directory_prefix is None: + raise exceptions.InitializationException( + 'The output directory prefix for test data generators is not set' + ) + logging.debug('factory producing %s', test_data_generators_class) - if test_data_generators_class == ( - test_data_generation.IdentityTestDataGenerator): - return test_data_generation.IdentityTestDataGenerator( - self._output_directory_prefix, self._copy_with_identity) - elif test_data_generators_class == ( - test_data_generation.ReverberationTestDataGenerator): - return test_data_generation.ReverberationTestDataGenerator( - self._output_directory_prefix, self._aechen_ir_database_path) - elif test_data_generators_class == ( - test_data_generation.AdditiveNoiseTestDataGenerator): - return test_data_generation.AdditiveNoiseTestDataGenerator( - self._output_directory_prefix, self._noise_tracks_path) - else: - return test_data_generators_class(self._output_directory_prefix) + if test_data_generators_class == ( + test_data_generation.IdentityTestDataGenerator): + return test_data_generation.IdentityTestDataGenerator( + self._output_directory_prefix, self._copy_with_identity) + elif test_data_generators_class == ( + test_data_generation.ReverberationTestDataGenerator): + return test_data_generation.ReverberationTestDataGenerator( + self._output_directory_prefix, self._aechen_ir_database_path) + elif test_data_generators_class == ( + test_data_generation.AdditiveNoiseTestDataGenerator): + return test_data_generation.AdditiveNoiseTestDataGenerator( + self._output_directory_prefix, self._noise_tracks_path) + else: + return test_data_generators_class(self._output_directory_prefix) diff --git a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py index b0d003dbe8..6d0cb79f5b 100644 --- a/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py +++ b/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Unit tests for the test_data_generation module. """ @@ -23,141 +22,143 @@ class TestTestDataGenerators(unittest.TestCase): - """Unit tests for the test_data_generation module. + """Unit tests for the test_data_generation module. """ - def setUp(self): - """Create temporary folders.""" - self._base_output_path = tempfile.mkdtemp() - self._test_data_cache_path = tempfile.mkdtemp() - self._fake_air_db_path = tempfile.mkdtemp() - - # Fake AIR DB impulse responses. - # TODO(alessiob): ReverberationTestDataGenerator will change to allow custom - # impulse responses. When changed, the coupling below between - # impulse_response_mat_file_names and - # ReverberationTestDataGenerator._IMPULSE_RESPONSES can be removed. - impulse_response_mat_file_names = [ - 'air_binaural_lecture_0_0_1.mat', - 'air_binaural_booth_0_0_1.mat', - ] - for impulse_response_mat_file_name in impulse_response_mat_file_names: - data = {'h_air': np.random.rand(1, 1000).astype('SetRuntimeSetting( + AudioProcessing::RuntimeSetting::CreateCaptureOutputUsedSetting( + setting.capture_output_used())); } } } // namespace webrtc diff --git a/modules/audio_processing/test/test_utils.cc b/modules/audio_processing/test/test_utils.cc index 37a20cee1e..839358d497 100644 --- a/modules/audio_processing/test/test_utils.cc +++ b/modules/audio_processing/test/test_utils.cc @@ -146,8 +146,7 @@ AudioProcessing::ChannelLayout LayoutFromChannels(size_t num_channels) { case 2: return AudioProcessing::kStereo; default: - RTC_CHECK(false); - return AudioProcessing::kMono; + RTC_CHECK_NOTREACHED(); } } diff --git a/modules/audio_processing/test/wav_based_simulator.cc b/modules/audio_processing/test/wav_based_simulator.cc index 75946fb3fa..e6a6fe92eb 100644 --- a/modules/audio_processing/test/wav_based_simulator.cc +++ b/modules/audio_processing/test/wav_based_simulator.cc @@ -14,6 +14,7 @@ #include +#include "modules/audio_processing/logging/apm_data_dumper.h" #include "modules/audio_processing/test/test_utils.h" #include "rtc_base/checks.h" #include "rtc_base/system/file_wrapper.h" @@ -43,8 +44,9 @@ WavBasedSimulator::GetCustomEventChain(const std::string& filename) { case '\n': break; default: - FATAL() << "Incorrect custom call order file, reverting to using the " - "default call order"; + RTC_FATAL() + << "Incorrect custom call order file, reverting to using the " + << "default call order"; return WavBasedSimulator::GetDefaultEventChain(); } @@ -105,12 +107,15 @@ void WavBasedSimulator::Process() { bool samples_left_to_process = true; int call_chain_index = 0; - int num_forward_chunks_processed = 0; + int capture_frames_since_init = 0; + constexpr int kInitIndex = 1; while (samples_left_to_process) { switch (call_chain_[call_chain_index]) { case SimulationEventType::kProcessStream: + SelectivelyToggleDataDumping(kInitIndex, capture_frames_since_init); + samples_left_to_process = HandleProcessStreamCall(); - ++num_forward_chunks_processed; + ++capture_frames_since_init; break; case SimulationEventType::kProcessReverseStream: if (settings_.reverse_input_filename) { @@ -118,7 +123,7 @@ void WavBasedSimulator::Process() { } break; default: - RTC_CHECK(false); + RTC_CHECK_NOTREACHED(); } call_chain_index = (call_chain_index + 1) % call_chain_.size(); @@ -127,6 +132,14 @@ void WavBasedSimulator::Process() { DetachAecDump(); } +void WavBasedSimulator::Analyze() { + std::cout << "Inits:" << std::endl; + std::cout << "1: -->" << std::endl; + std::cout << " Time:" << std::endl; + std::cout << " Capture: 0 s (0 frames) " << std::endl; + std::cout << " Render: 0 s (0 frames)" << std::endl; +} + bool WavBasedSimulator::HandleProcessStreamCall() { bool samples_left_to_process = buffer_reader_->Read(in_buf_.get()); if (samples_left_to_process) { diff --git a/modules/audio_processing/test/wav_based_simulator.h b/modules/audio_processing/test/wav_based_simulator.h index 3adbe7022c..ff88fd5535 100644 --- a/modules/audio_processing/test/wav_based_simulator.h +++ b/modules/audio_processing/test/wav_based_simulator.h @@ -14,7 +14,6 @@ #include #include "modules/audio_processing/test/audio_processing_simulator.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { namespace test { @@ -25,11 +24,20 @@ class WavBasedSimulator final : public AudioProcessingSimulator { WavBasedSimulator(const SimulationSettings& settings, rtc::scoped_refptr audio_processing, std::unique_ptr ap_builder); + + WavBasedSimulator() = delete; + WavBasedSimulator(const WavBasedSimulator&) = delete; + WavBasedSimulator& operator=(const WavBasedSimulator&) = delete; + ~WavBasedSimulator() override; // Processes the WAV input. void Process() override; + // Only analyzes the data for the simulation, instead of perform any + // processing. + void Analyze() override; + private: enum SimulationEventType { kProcessStream, @@ -46,8 +54,6 @@ class WavBasedSimulator final : public AudioProcessingSimulator { const std::string& filename); std::vector call_chain_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WavBasedSimulator); }; } // namespace test diff --git a/modules/audio_processing/transient/BUILD.gn b/modules/audio_processing/transient/BUILD.gn index 13e319f88e..5f9a13969a 100644 --- a/modules/audio_processing/transient/BUILD.gn +++ b/modules/audio_processing/transient/BUILD.gn @@ -14,10 +14,10 @@ rtc_source_set("transient_suppressor_api") { rtc_library("transient_suppressor_impl") { visibility = [ - "..:optionally_built_submodule_creators", + ":click_annotate", ":transient_suppression_test", ":transient_suppression_unittests", - ":click_annotate", + "..:optionally_built_submodule_creators", ] sources = [ "common.h", @@ -49,42 +49,44 @@ rtc_library("transient_suppressor_impl") { } if (rtc_include_tests) { - rtc_executable("click_annotate") { - testonly = true - sources = [ - "click_annotate.cc", - "file_utils.cc", - "file_utils.h", - ] - deps = [ - ":transient_suppressor_impl", - "..:audio_processing", - "../../../rtc_base/system:file_wrapper", - "../../../system_wrappers", - ] - } + if (!build_with_chromium) { + rtc_executable("click_annotate") { + testonly = true + sources = [ + "click_annotate.cc", + "file_utils.cc", + "file_utils.h", + ] + deps = [ + ":transient_suppressor_impl", + "..:audio_processing", + "../../../rtc_base/system:file_wrapper", + "../../../system_wrappers", + ] + } - rtc_executable("transient_suppression_test") { - testonly = true - sources = [ - "file_utils.cc", - "file_utils.h", - "transient_suppression_test.cc", - ] - deps = [ - ":transient_suppressor_impl", - "..:audio_processing", - "../../../common_audio", - "../../../rtc_base:rtc_base_approved", - "../../../rtc_base/system:file_wrapper", - "../../../system_wrappers", - "../../../test:fileutils", - "../../../test:test_support", - "../agc:level_estimation", - "//testing/gtest", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - ] + rtc_executable("transient_suppression_test") { + testonly = true + sources = [ + "file_utils.cc", + "file_utils.h", + "transient_suppression_test.cc", + ] + deps = [ + ":transient_suppressor_impl", + "..:audio_processing", + "../../../common_audio", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base/system:file_wrapper", + "../../../system_wrappers", + "../../../test:fileutils", + "../../../test:test_support", + "../agc:level_estimation", + "//testing/gtest", + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + ] + } } rtc_library("transient_suppression_unittests") { diff --git a/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc b/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc index 989e362a49..ff7022dba4 100644 --- a/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc +++ b/modules/audio_processing/utility/cascaded_biquad_filter_unittest.cc @@ -103,7 +103,7 @@ TEST(CascadedBiquadFilter, TransparentConfiguration) { #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) // Verifies that the check of the lengths for the input and output works for the // non-in-place call. -TEST(CascadedBiquadFilter, InputSizeCheckVerification) { +TEST(CascadedBiquadFilterDeathTest, InputSizeCheckVerification) { const std::vector input = CreateInputWithIncreasingValues(10); std::vector output(input.size() - 1); diff --git a/modules/audio_processing/utility/pffft_wrapper_unittest.cc b/modules/audio_processing/utility/pffft_wrapper_unittest.cc index 9aed548934..2ad6849cd4 100644 --- a/modules/audio_processing/utility/pffft_wrapper_unittest.cc +++ b/modules/audio_processing/utility/pffft_wrapper_unittest.cc @@ -125,23 +125,24 @@ TEST(PffftTest, CreateWrapperWithValidSize) { #if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -class PffftInvalidSizeTest : public ::testing::Test, - public ::testing::WithParamInterface {}; +class PffftInvalidSizeDeathTest : public ::testing::Test, + public ::testing::WithParamInterface { +}; -TEST_P(PffftInvalidSizeTest, DoNotCreateRealWrapper) { +TEST_P(PffftInvalidSizeDeathTest, DoNotCreateRealWrapper) { size_t fft_size = GetParam(); ASSERT_FALSE(Pffft::IsValidFftSize(fft_size, Pffft::FftType::kReal)); EXPECT_DEATH(CreatePffftWrapper(fft_size, Pffft::FftType::kReal), ""); } -TEST_P(PffftInvalidSizeTest, DoNotCreateComplexWrapper) { +TEST_P(PffftInvalidSizeDeathTest, DoNotCreateComplexWrapper) { size_t fft_size = GetParam(); ASSERT_FALSE(Pffft::IsValidFftSize(fft_size, Pffft::FftType::kComplex)); EXPECT_DEATH(CreatePffftWrapper(fft_size, Pffft::FftType::kComplex), ""); } INSTANTIATE_TEST_SUITE_P(PffftTest, - PffftInvalidSizeTest, + PffftInvalidSizeDeathTest, ::testing::Values(17, 33, 65, diff --git a/modules/congestion_controller/BUILD.gn b/modules/congestion_controller/BUILD.gn index 6f2b853f8f..c0b064d9ed 100644 --- a/modules/congestion_controller/BUILD.gn +++ b/modules/congestion_controller/BUILD.gn @@ -22,12 +22,18 @@ rtc_library("congestion_controller") { sources = [ "include/receive_side_congestion_controller.h", "receive_side_congestion_controller.cc", + "remb_throttler.cc", + "remb_throttler.h", ] deps = [ "..:module_api", "../../api/transport:field_trial_based_config", "../../api/transport:network_control", + "../../api/units:data_rate", + "../../api/units:time_delta", + "../../api/units:timestamp", + "../../rtc_base/synchronization:mutex", "../pacing", "../remote_bitrate_estimator", "../rtp_rtcp:rtp_rtcp_format", @@ -38,13 +44,21 @@ rtc_library("congestion_controller") { } } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_library("congestion_controller_unittests") { testonly = true - sources = [ "receive_side_congestion_controller_unittest.cc" ] + sources = [ + "receive_side_congestion_controller_unittest.cc", + "remb_throttler_unittest.cc", + ] deps = [ ":congestion_controller", + "../../api/test/network_emulation", + "../../api/test/network_emulation:create_cross_traffic", + "../../api/units:data_rate", + "../../api/units:time_delta", + "../../api/units:timestamp", "../../system_wrappers", "../../test:test_support", "../../test/scenario", diff --git a/modules/congestion_controller/goog_cc/BUILD.gn b/modules/congestion_controller/goog_cc/BUILD.gn index fa95bc186c..ea20da87a3 100644 --- a/modules/congestion_controller/goog_cc/BUILD.gn +++ b/modules/congestion_controller/goog_cc/BUILD.gn @@ -31,7 +31,6 @@ rtc_library("goog_cc") { ":probe_controller", ":pushback_controller", "../..:module_api", - "../../..:webrtc_common", "../../../api:network_state_predictor_api", "../../../api/rtc_event_log", "../../../api/transport:field_trial_based_config", @@ -45,12 +44,13 @@ rtc_library("goog_cc") { "../../../logging:rtc_event_pacing", "../../../rtc_base:checks", "../../../rtc_base:logging", - "../../../rtc_base:macromagic", "../../../rtc_base/experiments:alr_experiment", "../../../rtc_base/experiments:field_trial_parser", "../../../rtc_base/experiments:rate_control_settings", "../../../system_wrappers", "../../remote_bitrate_estimator", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -64,8 +64,8 @@ rtc_library("link_capacity_estimator") { deps = [ "../../../api/units:data_rate", "../../../rtc_base:safe_minmax", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("pushback_controller") { @@ -79,6 +79,8 @@ rtc_library("pushback_controller") { "../../../api/units:data_size", "../../../rtc_base:checks", "../../../rtc_base/experiments:rate_control_settings", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -100,8 +102,8 @@ rtc_library("alr_detector") { "../../../rtc_base/experiments:alr_experiment", "../../../rtc_base/experiments:field_trial_parser", "../../pacing:interval_budget", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("estimators") { configs += [ ":bwe_test_logging" ] @@ -137,6 +139,8 @@ rtc_library("estimators") { "../../../rtc_base:safe_minmax", "../../../rtc_base/experiments:field_trial_parser", "../../remote_bitrate_estimator", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -153,6 +157,7 @@ rtc_library("loss_based_controller") { deps = [ "../../../api/rtc_event_log", "../../../api/transport:network_control", + "../../../api/transport:webrtc_key_value_config", "../../../api/units:data_rate", "../../../api/units:time_delta", "../../../api/units:timestamp", @@ -163,6 +168,8 @@ rtc_library("loss_based_controller") { "../../../system_wrappers:field_trial", "../../../system_wrappers:metrics", "../../remote_bitrate_estimator", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -173,6 +180,8 @@ rtc_library("delay_based_bwe") { sources = [ "delay_based_bwe.cc", "delay_based_bwe.h", + "inter_arrival_delta.cc", + "inter_arrival_delta.h", ] deps = [ @@ -180,8 +189,9 @@ rtc_library("delay_based_bwe") { "../../../api:network_state_predictor_api", "../../../api/rtc_event_log", "../../../api/transport:network_control", - "../../../api/transport:network_control", "../../../api/transport:webrtc_key_value_config", + "../../../api/units:time_delta", + "../../../api/units:timestamp", "../../../logging:rtc_event_bwe", "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", @@ -189,6 +199,8 @@ rtc_library("delay_based_bwe") { "../../../system_wrappers:metrics", "../../pacing", "../../remote_bitrate_estimator", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -214,8 +226,10 @@ rtc_library("probe_controller") { "../../../rtc_base:macromagic", "../../../rtc_base:safe_conversions", "../../../rtc_base/experiments:field_trial_parser", - "../../../rtc_base/system:unused", "../../../system_wrappers:metrics", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -240,53 +254,58 @@ if (rtc_include_tests) { "../../../rtc_base:checks", "../../../test/logging:log_writer", "../../remote_bitrate_estimator", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } - rtc_library("goog_cc_unittests") { - testonly = true + if (!build_with_chromium) { + rtc_library("goog_cc_unittests") { + testonly = true - sources = [ - "acknowledged_bitrate_estimator_unittest.cc", - "alr_detector_unittest.cc", - "congestion_window_pushback_controller_unittest.cc", - "delay_based_bwe_unittest.cc", - "delay_based_bwe_unittest_helper.cc", - "delay_based_bwe_unittest_helper.h", - "goog_cc_network_control_unittest.cc", - "probe_bitrate_estimator_unittest.cc", - "probe_controller_unittest.cc", - "robust_throughput_estimator_unittest.cc", - "send_side_bandwidth_estimation_unittest.cc", - "trendline_estimator_unittest.cc", - ] - deps = [ - ":alr_detector", - ":delay_based_bwe", - ":estimators", - ":goog_cc", - ":loss_based_controller", - ":probe_controller", - ":pushback_controller", - "../../../api/rtc_event_log", - "../../../api/transport:field_trial_based_config", - "../../../api/transport:goog_cc", - "../../../api/transport:network_control", - "../../../api/transport:webrtc_key_value_config", - "../../../api/units:data_rate", - "../../../api/units:timestamp", - "../../../logging:mocks", - "../../../logging:rtc_event_bwe", - "../../../rtc_base:checks", - "../../../rtc_base:rtc_base_approved", - "../../../rtc_base:rtc_base_tests_utils", - "../../../rtc_base/experiments:alr_experiment", - "../../../system_wrappers", - "../../../test:field_trial", - "../../../test:test_support", - "../../../test/scenario", - "../../pacing", - "//testing/gmock", - ] + sources = [ + "acknowledged_bitrate_estimator_unittest.cc", + "alr_detector_unittest.cc", + "congestion_window_pushback_controller_unittest.cc", + "delay_based_bwe_unittest.cc", + "delay_based_bwe_unittest_helper.cc", + "delay_based_bwe_unittest_helper.h", + "goog_cc_network_control_unittest.cc", + "probe_bitrate_estimator_unittest.cc", + "probe_controller_unittest.cc", + "robust_throughput_estimator_unittest.cc", + "send_side_bandwidth_estimation_unittest.cc", + "trendline_estimator_unittest.cc", + ] + deps = [ + ":alr_detector", + ":delay_based_bwe", + ":estimators", + ":goog_cc", + ":loss_based_controller", + ":probe_controller", + ":pushback_controller", + "../../../api/rtc_event_log", + "../../../api/test/network_emulation", + "../../../api/test/network_emulation:create_cross_traffic", + "../../../api/transport:field_trial_based_config", + "../../../api/transport:goog_cc", + "../../../api/transport:network_control", + "../../../api/transport:webrtc_key_value_config", + "../../../api/units:data_rate", + "../../../api/units:timestamp", + "../../../logging:mocks", + "../../../logging:rtc_event_bwe", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:rtc_base_tests_utils", + "../../../rtc_base/experiments:alr_experiment", + "../../../system_wrappers", + "../../../test:explicit_key_value_config", + "../../../test:field_trial", + "../../../test:test_support", + "../../../test/scenario", + "../../pacing", + "//testing/gmock", + ] + } } } diff --git a/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc b/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc index 9031c5d272..e5b733b119 100644 --- a/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc +++ b/modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator_unittest.cc @@ -35,10 +35,12 @@ constexpr size_t kPayloadSize = 10; class MockBitrateEstimator : public BitrateEstimator { public: using BitrateEstimator::BitrateEstimator; - MOCK_METHOD3(Update, - void(Timestamp at_time, DataSize data_size, bool in_alr)); - MOCK_CONST_METHOD0(bitrate, absl::optional()); - MOCK_METHOD0(ExpectFastRateChange, void()); + MOCK_METHOD(void, + Update, + (Timestamp at_time, DataSize data_size, bool in_alr), + (override)); + MOCK_METHOD(absl::optional, bitrate, (), (const, override)); + MOCK_METHOD(void, ExpectFastRateChange, (), (override)); }; struct AcknowledgedBitrateEstimatorTestStates { diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe.cc b/modules/congestion_controller/goog_cc/delay_based_bwe.cc index 1c02301284..185b09d8ab 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe.cc +++ b/modules/congestion_controller/goog_cc/delay_based_bwe.cc @@ -20,8 +20,10 @@ #include "absl/strings/match.h" #include "api/rtc_event_log/rtc_event.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/units/time_delta.h" #include "logging/rtc_event_log/events/rtc_event_bwe_update_delay_based.h" #include "modules/congestion_controller/goog_cc/trendline_estimator.h" +#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -30,6 +32,11 @@ namespace webrtc { namespace { constexpr TimeDelta kStreamTimeOut = TimeDelta::Seconds(2); + +// Used with field trial "WebRTC-Bwe-NewInterArrivalDelta/Enabled/ +constexpr TimeDelta kSendTimeGroupLength = TimeDelta::Millis(5); + +// Used unless field trial "WebRTC-Bwe-NewInterArrivalDelta/Enabled/" constexpr int kTimestampGroupLengthMs = 5; constexpr int kAbsSendTimeFraction = 18; constexpr int kAbsSendTimeInterArrivalUpshift = 8; @@ -45,23 +52,8 @@ constexpr double kTimestampToMs = constexpr uint32_t kFixedSsrc = 0; } // namespace -constexpr char BweIgnoreSmallPacketsSettings::kKey[]; constexpr char BweSeparateAudioPacketsSettings::kKey[]; -BweIgnoreSmallPacketsSettings::BweIgnoreSmallPacketsSettings( - const WebRtcKeyValueConfig* key_value_config) { - Parser()->Parse( - key_value_config->Lookup(BweIgnoreSmallPacketsSettings::kKey)); -} - -std::unique_ptr -BweIgnoreSmallPacketsSettings::Parser() { - return StructParametersParser::Create("smoothing", &smoothing_factor, // - "fraction_large", &fraction_large, // - "large", &large_threshold, // - "small", &small_threshold); -} - BweSeparateAudioPacketsSettings::BweSeparateAudioPacketsSettings( const WebRtcKeyValueConfig* key_value_config) { Parser()->Parse( @@ -83,28 +75,17 @@ DelayBasedBwe::Result::Result() recovered_from_overuse(false), backoff_in_alr(false) {} -DelayBasedBwe::Result::Result(bool probe, DataRate target_bitrate) - : updated(true), - probe(probe), - target_bitrate(target_bitrate), - recovered_from_overuse(false), - backoff_in_alr(false) {} - DelayBasedBwe::DelayBasedBwe(const WebRtcKeyValueConfig* key_value_config, RtcEventLog* event_log, NetworkStatePredictor* network_state_predictor) : event_log_(event_log), key_value_config_(key_value_config), - ignore_small_(key_value_config), - fraction_large_packets_(0.5), separate_audio_(key_value_config), audio_packets_since_last_video_(0), last_video_packet_recv_time_(Timestamp::MinusInfinity()), network_state_predictor_(network_state_predictor), - video_inter_arrival_(), video_delay_detector_( new TrendlineEstimator(key_value_config_, network_state_predictor_)), - audio_inter_arrival_(), audio_delay_detector_( new TrendlineEstimator(key_value_config_, network_state_predictor_)), active_delay_detector_(video_delay_detector_.get()), @@ -114,15 +95,16 @@ DelayBasedBwe::DelayBasedBwe(const WebRtcKeyValueConfig* key_value_config, prev_bitrate_(DataRate::Zero()), has_once_detected_overuse_(false), prev_state_(BandwidthUsage::kBwNormal), + use_new_inter_arrival_delta_(absl::StartsWith( + key_value_config->Lookup("WebRTC-Bwe-NewInterArrivalDelta"), + "Enabled")), alr_limited_backoff_enabled_(absl::StartsWith( key_value_config->Lookup("WebRTC-Bwe-AlrLimitedBackoff"), "Enabled")) { - RTC_LOG(LS_INFO) << "Initialized DelayBasedBwe with small packet filtering " - << ignore_small_.Parser()->Encode() - << ", separate audio overuse detection" - << separate_audio_.Parser()->Encode() - << " and alr limited backoff " - << (alr_limited_backoff_enabled_ ? "enabled" : "disabled"); + RTC_LOG(LS_INFO) + << "Initialized DelayBasedBwe with separate audio overuse detection" + << separate_audio_.Parser()->Encode() << " and alr limited backoff " + << (alr_limited_backoff_enabled_ ? "enabled" : "disabled"); } DelayBasedBwe::~DelayBasedBwe() {} @@ -180,42 +162,31 @@ void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback, // Reset if the stream has timed out. if (last_seen_packet_.IsInfinite() || at_time - last_seen_packet_ > kStreamTimeOut) { - video_inter_arrival_.reset( - new InterArrival(kTimestampGroupTicks, kTimestampToMs, true)); + if (use_new_inter_arrival_delta_) { + video_inter_arrival_delta_ = + std::make_unique(kSendTimeGroupLength); + audio_inter_arrival_delta_ = + std::make_unique(kSendTimeGroupLength); + } else { + video_inter_arrival_ = std::make_unique( + kTimestampGroupTicks, kTimestampToMs, true); + audio_inter_arrival_ = std::make_unique( + kTimestampGroupTicks, kTimestampToMs, true); + } video_delay_detector_.reset( new TrendlineEstimator(key_value_config_, network_state_predictor_)); - audio_inter_arrival_.reset( - new InterArrival(kTimestampGroupTicks, kTimestampToMs, true)); audio_delay_detector_.reset( new TrendlineEstimator(key_value_config_, network_state_predictor_)); active_delay_detector_ = video_delay_detector_.get(); } last_seen_packet_ = at_time; - // Ignore "small" packets if many/most packets in the call are "large". The - // packet size may have a significant effect on the propagation delay, - // especially at low bandwidths. Variations in packet size will then show up - // as noise in the delay measurement. By default, we include all packets. - DataSize packet_size = packet_feedback.sent_packet.size; - if (!ignore_small_.small_threshold.IsZero()) { - double is_large = - static_cast(packet_size >= ignore_small_.large_threshold); - fraction_large_packets_ += - ignore_small_.smoothing_factor * (is_large - fraction_large_packets_); - if (packet_size <= ignore_small_.small_threshold && - fraction_large_packets_ >= ignore_small_.fraction_large) { - return; - } - } - // As an alternative to ignoring small packets, we can separate audio and // video packets for overuse detection. - InterArrival* inter_arrival_for_packet = video_inter_arrival_.get(); DelayIncreaseDetectorInterface* delay_detector_for_packet = video_delay_detector_.get(); if (separate_audio_.enabled) { if (packet_feedback.sent_packet.audio) { - inter_arrival_for_packet = audio_inter_arrival_.get(); delay_detector_for_packet = audio_delay_detector_.get(); audio_packets_since_last_video_++; if (audio_packets_since_last_video_ > separate_audio_.packet_threshold && @@ -230,29 +201,59 @@ void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback, active_delay_detector_ = video_delay_detector_.get(); } } + DataSize packet_size = packet_feedback.sent_packet.size; - uint32_t send_time_24bits = - static_cast( - ((static_cast(packet_feedback.sent_packet.send_time.ms()) - << kAbsSendTimeFraction) + - 500) / - 1000) & - 0x00FFFFFF; - // Shift up send time to use the full 32 bits that inter_arrival works with, - // so wrapping works properly. - uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift; - - uint32_t timestamp_delta = 0; - int64_t recv_delta_ms = 0; - int size_delta = 0; - bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas( - timestamp, packet_feedback.receive_time.ms(), at_time.ms(), - packet_size.bytes(), ×tamp_delta, &recv_delta_ms, &size_delta); - double send_delta_ms = (1000.0 * timestamp_delta) / (1 << kInterArrivalShift); - delay_detector_for_packet->Update(recv_delta_ms, send_delta_ms, - packet_feedback.sent_packet.send_time.ms(), - packet_feedback.receive_time.ms(), - packet_size.bytes(), calculated_deltas); + if (use_new_inter_arrival_delta_) { + TimeDelta send_delta = TimeDelta::Zero(); + TimeDelta recv_delta = TimeDelta::Zero(); + int size_delta = 0; + + InterArrivalDelta* inter_arrival_for_packet = + (separate_audio_.enabled && packet_feedback.sent_packet.audio) + ? video_inter_arrival_delta_.get() + : audio_inter_arrival_delta_.get(); + bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas( + packet_feedback.sent_packet.send_time, packet_feedback.receive_time, + at_time, packet_size.bytes(), &send_delta, &recv_delta, &size_delta); + + delay_detector_for_packet->Update( + recv_delta.ms(), send_delta.ms(), + packet_feedback.sent_packet.send_time.ms(), + packet_feedback.receive_time.ms(), packet_size.bytes(), + calculated_deltas); + } else { + InterArrival* inter_arrival_for_packet = + (separate_audio_.enabled && packet_feedback.sent_packet.audio) + ? video_inter_arrival_.get() + : audio_inter_arrival_.get(); + + uint32_t send_time_24bits = + static_cast( + ((static_cast(packet_feedback.sent_packet.send_time.ms()) + << kAbsSendTimeFraction) + + 500) / + 1000) & + 0x00FFFFFF; + // Shift up send time to use the full 32 bits that inter_arrival works with, + // so wrapping works properly. + uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift; + + uint32_t timestamp_delta = 0; + int64_t recv_delta_ms = 0; + int size_delta = 0; + + bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas( + timestamp, packet_feedback.receive_time.ms(), at_time.ms(), + packet_size.bytes(), ×tamp_delta, &recv_delta_ms, &size_delta); + double send_delta_ms = + (1000.0 * timestamp_delta) / (1 << kInterArrivalShift); + + delay_detector_for_packet->Update( + recv_delta_ms, send_delta_ms, + packet_feedback.sent_packet.send_time.ms(), + packet_feedback.receive_time.ms(), packet_size.bytes(), + calculated_deltas); + } } DataRate DelayBasedBwe::TriggerOveruse(Timestamp at_time, diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe.h b/modules/congestion_controller/goog_cc/delay_based_bwe.h index 25f5a3be72..85ce6eaa82 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe.h +++ b/modules/congestion_controller/goog_cc/delay_based_bwe.h @@ -22,32 +22,16 @@ #include "api/transport/network_types.h" #include "api/transport/webrtc_key_value_config.h" #include "modules/congestion_controller/goog_cc/delay_increase_detector_interface.h" +#include "modules/congestion_controller/goog_cc/inter_arrival_delta.h" #include "modules/congestion_controller/goog_cc/probe_bitrate_estimator.h" #include "modules/remote_bitrate_estimator/aimd_rate_control.h" -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/inter_arrival.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/struct_parameters_parser.h" #include "rtc_base/race_checker.h" namespace webrtc { class RtcEventLog; -struct BweIgnoreSmallPacketsSettings { - static constexpr char kKey[] = "WebRTC-BweIgnoreSmallPacketsFix"; - - BweIgnoreSmallPacketsSettings() = default; - explicit BweIgnoreSmallPacketsSettings( - const WebRtcKeyValueConfig* key_value_config); - - double smoothing_factor = 0.1; - double fraction_large = 1.0; - DataSize large_threshold = DataSize::Zero(); - DataSize small_threshold = DataSize::Zero(); - - std::unique_ptr Parser(); -}; - struct BweSeparateAudioPacketsSettings { static constexpr char kKey[] = "WebRTC-Bwe-SeparateAudioPackets"; @@ -66,7 +50,6 @@ class DelayBasedBwe { public: struct Result { Result(); - Result(bool probe, DataRate target_bitrate); ~Result() = default; bool updated; bool probe; @@ -78,6 +61,11 @@ class DelayBasedBwe { explicit DelayBasedBwe(const WebRtcKeyValueConfig* key_value_config, RtcEventLog* event_log, NetworkStatePredictor* network_state_predictor); + + DelayBasedBwe() = delete; + DelayBasedBwe(const DelayBasedBwe&) = delete; + DelayBasedBwe& operator=(const DelayBasedBwe&) = delete; + virtual ~DelayBasedBwe(); Result IncomingPacketFeedbackVector( @@ -109,19 +97,14 @@ class DelayBasedBwe { Timestamp at_time); // Updates the current remote rate estimate and returns true if a valid // estimate exists. - bool UpdateEstimate(Timestamp now, + bool UpdateEstimate(Timestamp at_time, absl::optional acked_bitrate, - DataRate* target_bitrate); + DataRate* target_rate); rtc::RaceChecker network_race_; RtcEventLog* const event_log_; const WebRtcKeyValueConfig* const key_value_config_; - // Filtering out small packets. Intention is to base the detection only - // on video packets even if we have TWCC sequence numbers for audio. - BweIgnoreSmallPacketsSettings ignore_small_; - double fraction_large_packets_; - // Alternatively, run two separate overuse detectors for audio and video, // and fall back to the audio one if we haven't seen a video packet in a // while. @@ -131,8 +114,10 @@ class DelayBasedBwe { NetworkStatePredictor* network_state_predictor_; std::unique_ptr video_inter_arrival_; + std::unique_ptr video_inter_arrival_delta_; std::unique_ptr video_delay_detector_; std::unique_ptr audio_inter_arrival_; + std::unique_ptr audio_inter_arrival_delta_; std::unique_ptr audio_delay_detector_; DelayIncreaseDetectorInterface* active_delay_detector_; @@ -142,8 +127,8 @@ class DelayBasedBwe { DataRate prev_bitrate_; bool has_once_detected_overuse_; BandwidthUsage prev_state_; + const bool use_new_inter_arrival_delta_; bool alr_limited_backoff_enabled_; - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DelayBasedBwe); }; } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc index 7860c3d84d..06345c4d9b 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc +++ b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest.cc @@ -10,6 +10,8 @@ #include "modules/congestion_controller/goog_cc/delay_based_bwe.h" +#include + #include "api/transport/network_types.h" #include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h" #include "modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h" @@ -26,7 +28,15 @@ const PacedPacketInfo kPacingInfo1(1, kNumProbesCluster1, 4000); constexpr float kTargetUtilizationFraction = 0.95f; } // namespace -TEST_F(DelayBasedBweTest, ProbeDetection) { +INSTANTIATE_TEST_SUITE_P( + , + DelayBasedBweTest, + ::testing::Values("", "WebRTC-Bwe-NewInterArrivalDelta/Enabled/"), + [](::testing::TestParamInfo info) { + return info.param == "" ? "Default" : "NewInterArrival"; + }); + +TEST_P(DelayBasedBweTest, ProbeDetection) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 10 = 800 kbps. @@ -48,7 +58,7 @@ TEST_F(DelayBasedBweTest, ProbeDetection) { EXPECT_GT(bitrate_observer_.latest_bitrate(), 1500000u); } -TEST_F(DelayBasedBweTest, ProbeDetectionNonPacedPackets) { +TEST_P(DelayBasedBweTest, ProbeDetectionNonPacedPackets) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 10 = 800 kbps, but with every other packet // not being paced which could mess things up. @@ -65,7 +75,7 @@ TEST_F(DelayBasedBweTest, ProbeDetectionNonPacedPackets) { EXPECT_GT(bitrate_observer_.latest_bitrate(), 800000u); } -TEST_F(DelayBasedBweTest, ProbeDetectionFasterArrival) { +TEST_P(DelayBasedBweTest, ProbeDetectionFasterArrival) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 10 = 800 kbps. // Arriving at 8 * 1000 / 5 = 1600 kbps. @@ -80,7 +90,7 @@ TEST_F(DelayBasedBweTest, ProbeDetectionFasterArrival) { EXPECT_FALSE(bitrate_observer_.updated()); } -TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrival) { +TEST_P(DelayBasedBweTest, ProbeDetectionSlowerArrival) { int64_t now_ms = clock_.TimeInMilliseconds(); // First burst sent at 8 * 1000 / 5 = 1600 kbps. // Arriving at 8 * 1000 / 7 = 1142 kbps. @@ -99,7 +109,7 @@ TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrival) { kTargetUtilizationFraction * 1140000u, 10000u); } -TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) { +TEST_P(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) { int64_t now_ms = clock_.TimeInMilliseconds(); // Burst sent at 8 * 1000 / 1 = 8000 kbps. // Arriving at 8 * 1000 / 2 = 4000 kbps. @@ -118,7 +128,7 @@ TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) { kTargetUtilizationFraction * 4000000u, 10000u); } -TEST_F(DelayBasedBweTest, GetExpectedBwePeriodMs) { +TEST_P(DelayBasedBweTest, GetExpectedBwePeriodMs) { auto default_interval = bitrate_estimator_->GetExpectedBwePeriod(); EXPECT_GT(default_interval.ms(), 0); CapacityDropTestHelper(1, true, 333, 0); @@ -127,45 +137,45 @@ TEST_F(DelayBasedBweTest, GetExpectedBwePeriodMs) { EXPECT_NE(interval.ms(), default_interval.ms()); } -TEST_F(DelayBasedBweTest, InitialBehavior) { +TEST_P(DelayBasedBweTest, InitialBehavior) { InitialBehaviorTestHelper(730000); } -TEST_F(DelayBasedBweTest, RateIncreaseReordering) { +TEST_P(DelayBasedBweTest, RateIncreaseReordering) { RateIncreaseReorderingTestHelper(730000); } -TEST_F(DelayBasedBweTest, RateIncreaseRtpTimestamps) { +TEST_P(DelayBasedBweTest, RateIncreaseRtpTimestamps) { RateIncreaseRtpTimestampsTestHelper(622); } -TEST_F(DelayBasedBweTest, CapacityDropOneStream) { +TEST_P(DelayBasedBweTest, CapacityDropOneStream) { CapacityDropTestHelper(1, false, 300, 0); } -TEST_F(DelayBasedBweTest, CapacityDropPosOffsetChange) { +TEST_P(DelayBasedBweTest, CapacityDropPosOffsetChange) { CapacityDropTestHelper(1, false, 867, 30000); } -TEST_F(DelayBasedBweTest, CapacityDropNegOffsetChange) { +TEST_P(DelayBasedBweTest, CapacityDropNegOffsetChange) { CapacityDropTestHelper(1, false, 933, -30000); } -TEST_F(DelayBasedBweTest, CapacityDropOneStreamWrap) { +TEST_P(DelayBasedBweTest, CapacityDropOneStreamWrap) { CapacityDropTestHelper(1, true, 333, 0); } -TEST_F(DelayBasedBweTest, TestTimestampGrouping) { +TEST_P(DelayBasedBweTest, TestTimestampGrouping) { TestTimestampGroupingTestHelper(); } -TEST_F(DelayBasedBweTest, TestShortTimeoutAndWrap) { +TEST_P(DelayBasedBweTest, TestShortTimeoutAndWrap) { // Simulate a client leaving and rejoining the call after 35 seconds. This // will make abs send time wrap, so if streams aren't timed out properly // the next 30 seconds of packets will be out of order. TestWrappingHelper(35); } -TEST_F(DelayBasedBweTest, TestLongTimeoutAndWrap) { +TEST_P(DelayBasedBweTest, TestLongTimeoutAndWrap) { // Simulate a client leaving and rejoining the call after some multiple of // 64 seconds later. This will cause a zero difference in abs send times due // to the wrap, but a big difference in arrival time, if streams aren't @@ -173,7 +183,7 @@ TEST_F(DelayBasedBweTest, TestLongTimeoutAndWrap) { TestWrappingHelper(10 * 64); } -TEST_F(DelayBasedBweTest, TestInitialOveruse) { +TEST_P(DelayBasedBweTest, TestInitialOveruse) { const DataRate kStartBitrate = DataRate::KilobitsPerSec(300); const DataRate kInitialCapacity = DataRate::KilobitsPerSec(200); const uint32_t kDummySsrc = 0; @@ -213,15 +223,16 @@ TEST_F(DelayBasedBweTest, TestInitialOveruse) { } class DelayBasedBweTestWithBackoffTimeoutExperiment : public DelayBasedBweTest { - public: - DelayBasedBweTestWithBackoffTimeoutExperiment() - : DelayBasedBweTest( - "WebRTC-BweAimdRateControlConfig/initial_backoff_interval:200ms/") { - } }; +INSTANTIATE_TEST_SUITE_P( + , + DelayBasedBweTestWithBackoffTimeoutExperiment, + ::testing::Values( + "WebRTC-BweAimdRateControlConfig/initial_backoff_interval:200ms/")); + // This test subsumes and improves DelayBasedBweTest.TestInitialOveruse above. -TEST_F(DelayBasedBweTestWithBackoffTimeoutExperiment, TestInitialOveruse) { +TEST_P(DelayBasedBweTestWithBackoffTimeoutExperiment, TestInitialOveruse) { const DataRate kStartBitrate = DataRate::KilobitsPerSec(300); const DataRate kInitialCapacity = DataRate::KilobitsPerSec(200); const uint32_t kDummySsrc = 0; diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc index 14bac1e455..946805ab8a 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc +++ b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.cc @@ -146,21 +146,7 @@ int64_t StreamGenerator::GenerateFrame(std::vector* packets, } // namespace test DelayBasedBweTest::DelayBasedBweTest() - : field_trial(), - clock_(100000000), - acknowledged_bitrate_estimator_( - AcknowledgedBitrateEstimatorInterface::Create(&field_trial_config_)), - probe_bitrate_estimator_(new ProbeBitrateEstimator(nullptr)), - bitrate_estimator_( - new DelayBasedBwe(&field_trial_config_, nullptr, nullptr)), - stream_generator_(new test::StreamGenerator(1e6, // Capacity. - clock_.TimeInMicroseconds())), - arrival_time_offset_ms_(0), - first_update_(true) {} - -DelayBasedBweTest::DelayBasedBweTest(const std::string& field_trial_string) - : field_trial( - std::make_unique(field_trial_string)), + : field_trial(std::make_unique(GetParam())), clock_(100000000), acknowledged_bitrate_estimator_( AcknowledgedBitrateEstimatorInterface::Create(&field_trial_config_)), diff --git a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h index 608cd6bc70..24e558c2d7 100644 --- a/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h +++ b/modules/congestion_controller/goog_cc/delay_based_bwe_unittest_helper.h @@ -113,10 +113,9 @@ class StreamGenerator { }; } // namespace test -class DelayBasedBweTest : public ::testing::Test { +class DelayBasedBweTest : public ::testing::TestWithParam { public: DelayBasedBweTest(); - explicit DelayBasedBweTest(const std::string& field_trial_string); ~DelayBasedBweTest() override; protected: @@ -176,9 +175,8 @@ class DelayBasedBweTest : public ::testing::Test { std::unique_ptr stream_generator_; int64_t arrival_time_offset_ms_; bool first_update_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DelayBasedBweTest); }; + } // namespace webrtc #endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_DELAY_BASED_BWE_UNITTEST_HELPER_H_ diff --git a/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h b/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h index 8fe3f669bb..eaadb0d124 100644 --- a/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h +++ b/modules/congestion_controller/goog_cc/delay_increase_detector_interface.h @@ -12,7 +12,7 @@ #include -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" +#include "api/network_state_predictor.h" #include "rtc_base/constructor_magic.h" namespace webrtc { diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control.cc b/modules/congestion_controller/goog_cc/goog_cc_network_control.cc index b8be0982d6..2344f45a65 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control.cc +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control.cc @@ -96,7 +96,8 @@ GoogCcNetworkController::GoogCcNetworkController(NetworkControllerConfig config, key_value_config_) : nullptr), bandwidth_estimation_( - std::make_unique(event_log_)), + std::make_unique(key_value_config_, + event_log_)), alr_detector_( std::make_unique(key_value_config_, config.event_log)), probe_bitrate_estimator_(new ProbeBitrateEstimator(config.event_log)), @@ -464,7 +465,7 @@ NetworkControlUpdate GoogCcNetworkController::OnTransportPacketsFeedback( expected_packets_since_last_loss_update_ += report.PacketsWithFeedback().size(); for (const auto& packet_feedback : report.PacketsWithFeedback()) { - if (packet_feedback.receive_time.IsInfinite()) + if (!packet_feedback.IsReceived()) lost_packets_since_last_loss_update_ += 1; } if (report.feedback_time > next_loss_update_) { diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control.h b/modules/congestion_controller/goog_cc/goog_cc_network_control.h index 1e4dcf62e1..6dd70c8969 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control.h +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control.h @@ -33,7 +33,6 @@ #include "modules/congestion_controller/goog_cc/delay_based_bwe.h" #include "modules/congestion_controller/goog_cc/probe_controller.h" #include "modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/experiments/rate_control_settings.h" @@ -48,6 +47,11 @@ class GoogCcNetworkController : public NetworkControllerInterface { public: GoogCcNetworkController(NetworkControllerConfig config, GoogCcConfig goog_cc_config); + + GoogCcNetworkController() = delete; + GoogCcNetworkController(const GoogCcNetworkController&) = delete; + GoogCcNetworkController& operator=(const GoogCcNetworkController&) = delete; + ~GoogCcNetworkController() override; // NetworkControllerInterface @@ -137,8 +141,6 @@ class GoogCcNetworkController : public NetworkControllerInterface { bool previously_in_alr_ = false; absl::optional current_data_window_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(GoogCcNetworkController); }; } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc b/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc index 361da92ff2..7e8d7b9ac6 100644 --- a/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc +++ b/modules/congestion_controller/goog_cc/goog_cc_network_control_unittest.cc @@ -10,7 +10,10 @@ #include +#include "api/test/network_emulation/create_cross_traffic.h" +#include "api/test/network_emulation/cross_traffic.h" #include "api/transport/goog_cc_factory.h" +#include "api/units/data_rate.h" #include "logging/rtc_event_log/mock/mock_rtc_event_log.h" #include "test/field_trial.h" #include "test/gtest.h" @@ -121,6 +124,35 @@ void UpdatesTargetRateBasedOnLinkCapacity(std::string test_name = "") { truth->PrintRow(); EXPECT_NEAR(client->target_rate().kbps(), 90, 25); } + +DataRate RunRembDipScenario(std::string test_name) { + Scenario s(test_name); + NetworkSimulationConfig net_conf; + net_conf.bandwidth = DataRate::KilobitsPerSec(2000); + net_conf.delay = TimeDelta::Millis(50); + auto* client = s.CreateClient("send", [&](CallClientConfig* c) { + c->transport.rates.start_rate = DataRate::KilobitsPerSec(1000); + }); + auto send_net = {s.CreateSimulationNode(net_conf)}; + auto ret_net = {s.CreateSimulationNode(net_conf)}; + auto* route = s.CreateRoutes( + client, send_net, s.CreateClient("return", CallClientConfig()), ret_net); + s.CreateVideoStream(route->forward(), VideoStreamConfig()); + + s.RunFor(TimeDelta::Seconds(10)); + EXPECT_GT(client->send_bandwidth().kbps(), 1500); + + DataRate RembLimit = DataRate::KilobitsPerSec(250); + client->SetRemoteBitrate(RembLimit); + s.RunFor(TimeDelta::Seconds(1)); + EXPECT_EQ(client->send_bandwidth(), RembLimit); + + DataRate RembLimitLifted = DataRate::KilobitsPerSec(10000); + client->SetRemoteBitrate(RembLimitLifted); + s.RunFor(TimeDelta::Seconds(10)); + + return client->send_bandwidth(); +} } // namespace class GoogCcNetworkControllerTest : public ::testing::Test { @@ -546,8 +578,9 @@ DataRate AverageBitrateAfterCrossInducedLoss(std::string name) { s.RunFor(TimeDelta::Seconds(10)); for (int i = 0; i < 4; ++i) { // Sends TCP cross traffic inducing loss. - auto* tcp_traffic = - s.net()->StartFakeTcpCrossTraffic(send_net, ret_net, FakeTcpConfig()); + auto* tcp_traffic = s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic( + s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net), + FakeTcpConfig())); s.RunFor(TimeDelta::Seconds(2)); // Allow the ccongestion controller to recover. s.net()->StopCrossTraffic(tcp_traffic); @@ -568,22 +601,21 @@ DataRate AverageBitrateAfterCrossInducedLoss(std::string name) { } TEST_F(GoogCcNetworkControllerTest, - NoLossBasedRecoversSlowerAfterCrossInducedLoss) { + LossBasedRecoversFasterAfterCrossInducedLoss) { // This test acts as a reference for the test below, showing that without the // trial, we have worse behavior. - DataRate average_bitrate = + DataRate average_bitrate_without_loss_based = AverageBitrateAfterCrossInducedLoss("googcc_unit/no_cross_loss_based"); - RTC_DCHECK_LE(average_bitrate, DataRate::KilobitsPerSec(650)); -} -TEST_F(GoogCcNetworkControllerTest, - LossBasedRecoversFasterAfterCrossInducedLoss) { // We recover bitrate better when subject to loss spikes from cross traffic // when loss based controller is used. ScopedFieldTrials trial("WebRTC-Bwe-LossBasedControl/Enabled/"); - DataRate average_bitrate = + SetUp(); + DataRate average_bitrate_with_loss_based = AverageBitrateAfterCrossInducedLoss("googcc_unit/cross_loss_based"); - RTC_DCHECK_GE(average_bitrate, DataRate::KilobitsPerSec(750)); + + EXPECT_GE(average_bitrate_with_loss_based, + average_bitrate_without_loss_based * 1.1); } TEST_F(GoogCcNetworkControllerTest, LossBasedEstimatorCapsRateAtModerateLoss) { @@ -698,7 +730,7 @@ TEST_F(GoogCcNetworkControllerTest, DetectsHighRateInSafeResetTrial) { {s.CreateSimulationNode(NetworkSimulationConfig())}); s.CreateVideoStream(route->forward(), VideoStreamConfig()); // Allow the controller to stabilize. - s.RunFor(TimeDelta::Millis(1000)); + s.RunFor(TimeDelta::Millis(2000)); EXPECT_NEAR(client->send_bandwidth().kbps(), kInitialLinkCapacity.kbps(), 50); s.ChangeRoute(route->forward(), {new_net}); // Allow new settings to propagate, but not probes to be received. @@ -836,7 +868,9 @@ TEST_F(GoogCcNetworkControllerTest, IsFairToTCP) { auto* route = s.CreateRoutes( client, send_net, s.CreateClient("return", CallClientConfig()), ret_net); s.CreateVideoStream(route->forward(), VideoStreamConfig()); - s.net()->StartFakeTcpCrossTraffic(send_net, ret_net, FakeTcpConfig()); + s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic( + s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net), + FakeTcpConfig())); s.RunFor(TimeDelta::Seconds(10)); // Currently only testing for the upper limit as we in practice back out @@ -845,5 +879,18 @@ TEST_F(GoogCcNetworkControllerTest, IsFairToTCP) { EXPECT_LT(client->send_bandwidth().kbps(), 750); } +TEST(GoogCcScenario, FastRampupOnRembCapLiftedWithFieldTrial) { + ScopedFieldTrials trial("WebRTC-Bwe-ReceiverLimitCapsOnly/Enabled/"); + DataRate final_estimate = + RunRembDipScenario("googcc_unit/fast_rampup_on_remb_cap_lifted"); + EXPECT_GT(final_estimate.kbps(), 1500); +} + +TEST(GoogCcScenario, SlowRampupOnRembCapLifted) { + DataRate final_estimate = + RunRembDipScenario("googcc_unit/default_slow_rampup_on_remb_cap_lifted"); + EXPECT_LT(final_estimate.kbps(), 1000); +} + } // namespace test } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/inter_arrival_delta.cc b/modules/congestion_controller/goog_cc/inter_arrival_delta.cc new file mode 100644 index 0000000000..791867db67 --- /dev/null +++ b/modules/congestion_controller/goog_cc/inter_arrival_delta.cc @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/congestion_controller/goog_cc/inter_arrival_delta.h" + +#include + +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +static constexpr TimeDelta kBurstDeltaThreshold = TimeDelta::Millis(5); +static constexpr TimeDelta kMaxBurstDuration = TimeDelta::Millis(100); +constexpr TimeDelta InterArrivalDelta::kArrivalTimeOffsetThreshold; + +InterArrivalDelta::InterArrivalDelta(TimeDelta send_time_group_length) + : send_time_group_length_(send_time_group_length), + current_timestamp_group_(), + prev_timestamp_group_(), + num_consecutive_reordered_packets_(0) {} + +bool InterArrivalDelta::ComputeDeltas(Timestamp send_time, + Timestamp arrival_time, + Timestamp system_time, + size_t packet_size, + TimeDelta* send_time_delta, + TimeDelta* arrival_time_delta, + int* packet_size_delta) { + bool calculated_deltas = false; + if (current_timestamp_group_.IsFirstPacket()) { + // We don't have enough data to update the filter, so we store it until we + // have two frames of data to process. + current_timestamp_group_.send_time = send_time; + current_timestamp_group_.first_send_time = send_time; + current_timestamp_group_.first_arrival = arrival_time; + } else if (current_timestamp_group_.first_send_time > send_time) { + // Reordered packet. + return false; + } else if (NewTimestampGroup(arrival_time, send_time)) { + // First packet of a later send burst, the previous packets sample is ready. + if (prev_timestamp_group_.complete_time.IsFinite()) { + *send_time_delta = + current_timestamp_group_.send_time - prev_timestamp_group_.send_time; + *arrival_time_delta = current_timestamp_group_.complete_time - + prev_timestamp_group_.complete_time; + + TimeDelta system_time_delta = current_timestamp_group_.last_system_time - + prev_timestamp_group_.last_system_time; + + if (*arrival_time_delta - system_time_delta >= + kArrivalTimeOffsetThreshold) { + RTC_LOG(LS_WARNING) + << "The arrival time clock offset has changed (diff = " + << arrival_time_delta->ms() - system_time_delta.ms() + << " ms), resetting."; + Reset(); + return false; + } + if (*arrival_time_delta < TimeDelta::Zero()) { + // The group of packets has been reordered since receiving its local + // arrival timestamp. + ++num_consecutive_reordered_packets_; + if (num_consecutive_reordered_packets_ >= kReorderedResetThreshold) { + RTC_LOG(LS_WARNING) + << "Packets between send burst arrived out of order, resetting." + << " arrival_time_delta" << arrival_time_delta->ms() + << " send time delta " << send_time_delta->ms(); + Reset(); + } + return false; + } else { + num_consecutive_reordered_packets_ = 0; + } + *packet_size_delta = static_cast(current_timestamp_group_.size) - + static_cast(prev_timestamp_group_.size); + calculated_deltas = true; + } + prev_timestamp_group_ = current_timestamp_group_; + // The new timestamp is now the current frame. + current_timestamp_group_.first_send_time = send_time; + current_timestamp_group_.send_time = send_time; + current_timestamp_group_.first_arrival = arrival_time; + current_timestamp_group_.size = 0; + } else { + current_timestamp_group_.send_time = + std::max(current_timestamp_group_.send_time, send_time); + } + // Accumulate the frame size. + current_timestamp_group_.size += packet_size; + current_timestamp_group_.complete_time = arrival_time; + current_timestamp_group_.last_system_time = system_time; + + return calculated_deltas; +} + +// Assumes that |timestamp| is not reordered compared to +// |current_timestamp_group_|. +bool InterArrivalDelta::NewTimestampGroup(Timestamp arrival_time, + Timestamp send_time) const { + if (current_timestamp_group_.IsFirstPacket()) { + return false; + } else if (BelongsToBurst(arrival_time, send_time)) { + return false; + } else { + return send_time - current_timestamp_group_.first_send_time > + send_time_group_length_; + } +} + +bool InterArrivalDelta::BelongsToBurst(Timestamp arrival_time, + Timestamp send_time) const { + RTC_DCHECK(current_timestamp_group_.complete_time.IsFinite()); + TimeDelta arrival_time_delta = + arrival_time - current_timestamp_group_.complete_time; + TimeDelta send_time_delta = send_time - current_timestamp_group_.send_time; + if (send_time_delta.IsZero()) + return true; + TimeDelta propagation_delta = arrival_time_delta - send_time_delta; + if (propagation_delta < TimeDelta::Zero() && + arrival_time_delta <= kBurstDeltaThreshold && + arrival_time - current_timestamp_group_.first_arrival < kMaxBurstDuration) + return true; + return false; +} + +void InterArrivalDelta::Reset() { + num_consecutive_reordered_packets_ = 0; + current_timestamp_group_ = SendTimeGroup(); + prev_timestamp_group_ = SendTimeGroup(); +} +} // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/inter_arrival_delta.h b/modules/congestion_controller/goog_cc/inter_arrival_delta.h new file mode 100644 index 0000000000..28dc806249 --- /dev/null +++ b/modules/congestion_controller/goog_cc/inter_arrival_delta.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_CONGESTION_CONTROLLER_GOOG_CC_INTER_ARRIVAL_DELTA_H_ +#define MODULES_CONGESTION_CONTROLLER_GOOG_CC_INTER_ARRIVAL_DELTA_H_ + +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" + +namespace webrtc { + +// Helper class to compute the inter-arrival time delta and the size delta +// between two send bursts. This code is branched from +// modules/remote_bitrate_estimator/inter_arrival. +class InterArrivalDelta { + public: + // After this many packet groups received out of order InterArrival will + // reset, assuming that clocks have made a jump. + static constexpr int kReorderedResetThreshold = 3; + static constexpr TimeDelta kArrivalTimeOffsetThreshold = + TimeDelta::Seconds(3); + + // A send time group is defined as all packets with a send time which are at + // most send_time_group_length older than the first timestamp in that + // group. + explicit InterArrivalDelta(TimeDelta send_time_group_length); + + InterArrivalDelta() = delete; + InterArrivalDelta(const InterArrivalDelta&) = delete; + InterArrivalDelta& operator=(const InterArrivalDelta&) = delete; + + // This function returns true if a delta was computed, or false if the current + // group is still incomplete or if only one group has been completed. + // |send_time| is the send time. + // |arrival_time| is the time at which the packet arrived. + // |packet_size| is the size of the packet. + // |timestamp_delta| (output) is the computed send time delta. + // |arrival_time_delta_ms| (output) is the computed arrival-time delta. + // |packet_size_delta| (output) is the computed size delta. + bool ComputeDeltas(Timestamp send_time, + Timestamp arrival_time, + Timestamp system_time, + size_t packet_size, + TimeDelta* send_time_delta, + TimeDelta* arrival_time_delta, + int* packet_size_delta); + + private: + struct SendTimeGroup { + SendTimeGroup() + : size(0), + first_send_time(Timestamp::MinusInfinity()), + send_time(Timestamp::MinusInfinity()), + first_arrival(Timestamp::MinusInfinity()), + complete_time(Timestamp::MinusInfinity()), + last_system_time(Timestamp::MinusInfinity()) {} + + bool IsFirstPacket() const { return complete_time.IsInfinite(); } + + size_t size; + Timestamp first_send_time; + Timestamp send_time; + Timestamp first_arrival; + Timestamp complete_time; + Timestamp last_system_time; + }; + + // Returns true if the last packet was the end of the current batch and the + // packet with |send_time| is the first of a new batch. + bool NewTimestampGroup(Timestamp arrival_time, Timestamp send_time) const; + + bool BelongsToBurst(Timestamp arrival_time, Timestamp send_time) const; + + void Reset(); + + const TimeDelta send_time_group_length_; + SendTimeGroup current_timestamp_group_; + SendTimeGroup prev_timestamp_group_; + int num_consecutive_reordered_packets_; +}; +} // namespace webrtc + +#endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_INTER_ARRIVAL_DELTA_H_ diff --git a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc index 1d2aab8521..c7f53c62f2 100644 --- a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc +++ b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.cc @@ -14,14 +14,18 @@ #include #include +#include "absl/strings/match.h" #include "api/units/data_rate.h" #include "api/units/time_delta.h" -#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace { const char kBweLossBasedControl[] = "WebRTC-Bwe-LossBasedControl"; +// Expecting RTCP feedback to be sent with roughly 1s intervals, a 5s gap +// indicates a channel outage. +constexpr TimeDelta kMaxRtcpFeedbackInterval = TimeDelta::Millis(5000); + // Increase slower when RTT is high. double GetIncreaseFactor(const LossBasedControlConfig& config, TimeDelta rtt) { // Clamp the RTT @@ -32,7 +36,7 @@ double GetIncreaseFactor(const LossBasedControlConfig& config, TimeDelta rtt) { } auto rtt_range = config.increase_high_rtt.Get() - config.increase_low_rtt; if (rtt_range <= TimeDelta::Zero()) { - RTC_DCHECK(false); // Only on misconfiguration. + RTC_NOTREACHED(); // Only on misconfiguration. return config.min_increase_factor; } auto rtt_offset = rtt - config.increase_low_rtt; @@ -53,7 +57,7 @@ DataRate BitrateFromLoss(double loss, DataRate loss_bandwidth_balance, double exponent) { if (exponent <= 0) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return DataRate::Infinity(); } if (loss < 1e-5) @@ -65,16 +69,22 @@ double ExponentialUpdate(TimeDelta window, TimeDelta interval) { // Use the convention that exponential window length (which is really // infinite) is the time it takes to dampen to 1/e. if (window <= TimeDelta::Zero()) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return 1.0f; } return 1.0f - exp(interval / window * -1.0); } +bool IsEnabled(const webrtc::WebRtcKeyValueConfig& key_value_config, + absl::string_view name) { + return absl::StartsWith(key_value_config.Lookup(name), "Enabled"); +} + } // namespace -LossBasedControlConfig::LossBasedControlConfig() - : enabled(field_trial::IsEnabled(kBweLossBasedControl)), +LossBasedControlConfig::LossBasedControlConfig( + const WebRtcKeyValueConfig* key_value_config) + : enabled(IsEnabled(*key_value_config, kBweLossBasedControl)), min_increase_factor("min_incr", 1.02), max_increase_factor("max_incr", 1.08), increase_low_rtt("incr_low_rtt", TimeDelta::Millis(200)), @@ -88,26 +98,28 @@ LossBasedControlConfig::LossBasedControlConfig() DataRate::KilobitsPerSec(0.5)), loss_bandwidth_balance_decrease("balance_decr", DataRate::KilobitsPerSec(4)), + loss_bandwidth_balance_reset("balance_reset", + DataRate::KilobitsPerSec(0.1)), loss_bandwidth_balance_exponent("exponent", 0.5), allow_resets("resets", false), decrease_interval("decr_intvl", TimeDelta::Millis(300)), loss_report_timeout("timeout", TimeDelta::Millis(6000)) { - std::string trial_string = field_trial::FindFullName(kBweLossBasedControl); ParseFieldTrial( {&min_increase_factor, &max_increase_factor, &increase_low_rtt, &increase_high_rtt, &decrease_factor, &loss_window, &loss_max_window, &acknowledged_rate_max_window, &increase_offset, &loss_bandwidth_balance_increase, &loss_bandwidth_balance_decrease, - &loss_bandwidth_balance_exponent, &allow_resets, &decrease_interval, - &loss_report_timeout}, - trial_string); + &loss_bandwidth_balance_reset, &loss_bandwidth_balance_exponent, + &allow_resets, &decrease_interval, &loss_report_timeout}, + key_value_config->Lookup(kBweLossBasedControl)); } LossBasedControlConfig::LossBasedControlConfig(const LossBasedControlConfig&) = default; LossBasedControlConfig::~LossBasedControlConfig() = default; -LossBasedBandwidthEstimation::LossBasedBandwidthEstimation() - : config_(LossBasedControlConfig()), +LossBasedBandwidthEstimation::LossBasedBandwidthEstimation( + const WebRtcKeyValueConfig* key_value_config) + : config_(key_value_config), average_loss_(0), average_loss_max_(0), loss_based_bitrate_(DataRate::Zero()), @@ -122,12 +134,12 @@ void LossBasedBandwidthEstimation::UpdateLossStatistics( const std::vector& packet_results, Timestamp at_time) { if (packet_results.empty()) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return; } int loss_count = 0; for (const auto& pkt : packet_results) { - loss_count += pkt.receive_time.IsInfinite() ? 1 : 0; + loss_count += !pkt.IsReceived() ? 1 : 0; } last_loss_ratio_ = static_cast(loss_count) / packet_results.size(); const TimeDelta time_passed = last_loss_packet_report_.IsFinite() @@ -164,9 +176,14 @@ void LossBasedBandwidthEstimation::UpdateAcknowledgedBitrate( } } -void LossBasedBandwidthEstimation::Update(Timestamp at_time, - DataRate min_bitrate, - TimeDelta last_round_trip_time) { +DataRate LossBasedBandwidthEstimation::Update(Timestamp at_time, + DataRate min_bitrate, + DataRate wanted_bitrate, + TimeDelta last_round_trip_time) { + if (loss_based_bitrate_.IsZero()) { + loss_based_bitrate_ = wanted_bitrate; + } + // Only increase if loss has been low for some time. const double loss_estimate_for_increase = average_loss_max_; // Avoid multiple decreases from averaging over one loss spike. @@ -176,8 +193,15 @@ void LossBasedBandwidthEstimation::Update(Timestamp at_time, !has_decreased_since_last_loss_report_ && (at_time - time_last_decrease_ >= last_round_trip_time + config_.decrease_interval); + // If packet lost reports are too old, dont increase bitrate. + const bool loss_report_valid = + at_time - last_loss_packet_report_ < 1.2 * kMaxRtcpFeedbackInterval; - if (loss_estimate_for_increase < loss_increase_threshold()) { + if (loss_report_valid && config_.allow_resets && + loss_estimate_for_increase < loss_reset_threshold()) { + loss_based_bitrate_ = wanted_bitrate; + } else if (loss_report_valid && + loss_estimate_for_increase < loss_increase_threshold()) { // Increase bitrate by RTT-adaptive ratio. DataRate new_increased_bitrate = min_bitrate * GetIncreaseFactor(config_, last_round_trip_time) + @@ -203,14 +227,21 @@ void LossBasedBandwidthEstimation::Update(Timestamp at_time, loss_based_bitrate_ = new_decreased_bitrate; } } + return loss_based_bitrate_; } -void LossBasedBandwidthEstimation::Reset(DataRate bitrate) { +void LossBasedBandwidthEstimation::Initialize(DataRate bitrate) { loss_based_bitrate_ = bitrate; average_loss_ = 0; average_loss_max_ = 0; } +double LossBasedBandwidthEstimation::loss_reset_threshold() const { + return LossFromBitrate(loss_based_bitrate_, + config_.loss_bandwidth_balance_reset, + config_.loss_bandwidth_balance_exponent); +} + double LossBasedBandwidthEstimation::loss_increase_threshold() const { return LossFromBitrate(loss_based_bitrate_, config_.loss_bandwidth_balance_increase, @@ -226,14 +257,4 @@ double LossBasedBandwidthEstimation::loss_decrease_threshold() const { DataRate LossBasedBandwidthEstimation::decreased_bitrate() const { return config_.decrease_factor * acknowledged_bitrate_max_; } - -void LossBasedBandwidthEstimation::MaybeReset(DataRate bitrate) { - if (config_.allow_resets) - Reset(bitrate); -} - -void LossBasedBandwidthEstimation::SetInitialBitrate(DataRate bitrate) { - Reset(bitrate); -} - } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h index b63363cadd..20ff092e6f 100644 --- a/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h +++ b/modules/congestion_controller/goog_cc/loss_based_bandwidth_estimation.h @@ -14,6 +14,7 @@ #include #include "api/transport/network_types.h" +#include "api/transport/webrtc_key_value_config.h" #include "api/units/data_rate.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" @@ -22,7 +23,7 @@ namespace webrtc { struct LossBasedControlConfig { - LossBasedControlConfig(); + explicit LossBasedControlConfig(const WebRtcKeyValueConfig* key_value_config); LossBasedControlConfig(const LossBasedControlConfig&); LossBasedControlConfig& operator=(const LossBasedControlConfig&) = default; ~LossBasedControlConfig(); @@ -38,23 +39,34 @@ struct LossBasedControlConfig { FieldTrialParameter increase_offset; FieldTrialParameter loss_bandwidth_balance_increase; FieldTrialParameter loss_bandwidth_balance_decrease; + FieldTrialParameter loss_bandwidth_balance_reset; FieldTrialParameter loss_bandwidth_balance_exponent; FieldTrialParameter allow_resets; FieldTrialParameter decrease_interval; FieldTrialParameter loss_report_timeout; }; +// Estimates an upper BWE limit based on loss. +// It requires knowledge about lost packets and acknowledged bitrate. +// Ie, this class require transport feedback. class LossBasedBandwidthEstimation { public: - LossBasedBandwidthEstimation(); - void Update(Timestamp at_time, - DataRate min_bitrate, - TimeDelta last_round_trip_time); + explicit LossBasedBandwidthEstimation( + const WebRtcKeyValueConfig* key_value_config); + // Returns the new estimate. + DataRate Update(Timestamp at_time, + DataRate min_bitrate, + DataRate wanted_bitrate, + TimeDelta last_round_trip_time); void UpdateAcknowledgedBitrate(DataRate acknowledged_bitrate, Timestamp at_time); - void MaybeReset(DataRate bitrate); - void SetInitialBitrate(DataRate bitrate); + void Initialize(DataRate bitrate); bool Enabled() const { return config_.enabled; } + // Returns true if LossBasedBandwidthEstimation is enabled and have + // received loss statistics. Ie, this class require transport feedback. + bool InUse() const { + return Enabled() && last_loss_packet_report_.IsFinite(); + } void UpdateLossStatistics(const std::vector& packet_results, Timestamp at_time); DataRate GetEstimate() const { return loss_based_bitrate_; } @@ -64,9 +76,11 @@ class LossBasedBandwidthEstimation { void Reset(DataRate bitrate); double loss_increase_threshold() const; double loss_decrease_threshold() const; + double loss_reset_threshold() const; + DataRate decreased_bitrate() const; - LossBasedControlConfig config_; + const LossBasedControlConfig config_; double average_loss_; double average_loss_max_; DataRate loss_based_bitrate_; diff --git a/modules/congestion_controller/goog_cc/probe_controller.h b/modules/congestion_controller/goog_cc/probe_controller.h index 11e92b97ae..bcaa293209 100644 --- a/modules/congestion_controller/goog_cc/probe_controller.h +++ b/modules/congestion_controller/goog_cc/probe_controller.h @@ -16,6 +16,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event_log.h" #include "api/transport/network_control.h" @@ -23,7 +24,6 @@ #include "api/units/data_rate.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/field_trial_parser.h" -#include "rtc_base/system/unused.h" namespace webrtc { @@ -63,7 +63,7 @@ class ProbeController { RtcEventLog* event_log); ~ProbeController(); - RTC_WARN_UNUSED_RESULT std::vector SetBitrates( + ABSL_MUST_USE_RESULT std::vector SetBitrates( int64_t min_bitrate_bps, int64_t start_bitrate_bps, int64_t max_bitrate_bps, @@ -71,14 +71,14 @@ class ProbeController { // The total bitrate, as opposed to the max bitrate, is the sum of the // configured bitrates for all active streams. - RTC_WARN_UNUSED_RESULT std::vector + ABSL_MUST_USE_RESULT std::vector OnMaxTotalAllocatedBitrate(int64_t max_total_allocated_bitrate, int64_t at_time_ms); - RTC_WARN_UNUSED_RESULT std::vector OnNetworkAvailability( + ABSL_MUST_USE_RESULT std::vector OnNetworkAvailability( NetworkAvailability msg); - RTC_WARN_UNUSED_RESULT std::vector SetEstimatedBitrate( + ABSL_MUST_USE_RESULT std::vector SetEstimatedBitrate( int64_t bitrate_bps, int64_t at_time_ms); @@ -87,7 +87,7 @@ class ProbeController { void SetAlrStartTimeMs(absl::optional alr_start_time); void SetAlrEndedTimeMs(int64_t alr_end_time); - RTC_WARN_UNUSED_RESULT std::vector RequestProbe( + ABSL_MUST_USE_RESULT std::vector RequestProbe( int64_t at_time_ms); // Sets a new maximum probing bitrate, without generating a new probe cluster. @@ -97,7 +97,7 @@ class ProbeController { // created EXCEPT for |enable_periodic_alr_probing_|. void Reset(int64_t at_time_ms); - RTC_WARN_UNUSED_RESULT std::vector Process( + ABSL_MUST_USE_RESULT std::vector Process( int64_t at_time_ms); private: @@ -110,9 +110,9 @@ class ProbeController { kProbingComplete, }; - RTC_WARN_UNUSED_RESULT std::vector + ABSL_MUST_USE_RESULT std::vector InitiateExponentialProbing(int64_t at_time_ms); - RTC_WARN_UNUSED_RESULT std::vector InitiateProbing( + ABSL_MUST_USE_RESULT std::vector InitiateProbing( int64_t now_ms, std::vector bitrates_to_probe, bool probe_further); diff --git a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc index d2ae528404..c5f51df99b 100644 --- a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc +++ b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.cc @@ -19,6 +19,9 @@ #include "absl/strings/match.h" #include "api/rtc_event_log/rtc_event.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/transport/webrtc_key_value_config.h" +#include "api/units/data_rate.h" +#include "api/units/time_delta.h" #include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h" #include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "rtc_base/checks.h" @@ -153,19 +156,24 @@ DataRate LinkCapacityTracker::estimate() const { return DataRate::BitsPerSec(capacity_estimate_bps_); } -RttBasedBackoff::RttBasedBackoff() - : rtt_limit_("limit", TimeDelta::Seconds(3)), +RttBasedBackoff::RttBasedBackoff(const WebRtcKeyValueConfig* key_value_config) + : disabled_("Disabled"), + configured_limit_("limit", TimeDelta::Seconds(3)), drop_fraction_("fraction", 0.8), drop_interval_("interval", TimeDelta::Seconds(1)), bandwidth_floor_("floor", DataRate::KilobitsPerSec(5)), + rtt_limit_(TimeDelta::PlusInfinity()), // By initializing this to plus infinity, we make sure that we never // trigger rtt backoff unless packet feedback is enabled. last_propagation_rtt_update_(Timestamp::PlusInfinity()), last_propagation_rtt_(TimeDelta::Zero()), last_packet_sent_(Timestamp::MinusInfinity()) { - ParseFieldTrial( - {&rtt_limit_, &drop_fraction_, &drop_interval_, &bandwidth_floor_}, - field_trial::FindFullName("WebRTC-Bwe-MaxRttLimit")); + ParseFieldTrial({&disabled_, &configured_limit_, &drop_fraction_, + &drop_interval_, &bandwidth_floor_}, + key_value_config->Lookup("WebRTC-Bwe-MaxRttLimit")); + if (!disabled_) { + rtt_limit_ = configured_limit_.Get(); + } } void RttBasedBackoff::UpdatePropagationRtt(Timestamp at_time, @@ -186,8 +194,11 @@ TimeDelta RttBasedBackoff::CorrectedRtt(Timestamp at_time) const { RttBasedBackoff::~RttBasedBackoff() = default; -SendSideBandwidthEstimation::SendSideBandwidthEstimation(RtcEventLog* event_log) - : lost_packets_since_last_loss_update_(0), +SendSideBandwidthEstimation::SendSideBandwidthEstimation( + const WebRtcKeyValueConfig* key_value_config, + RtcEventLog* event_log) + : rtt_backoff_(key_value_config), + lost_packets_since_last_loss_update_(0), expected_packets_since_last_loss_update_(0), current_target_(DataRate::Zero()), last_logged_target_(DataRate::Zero()), @@ -214,7 +225,9 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation(RtcEventLog* event_log) last_rtc_event_log_(Timestamp::MinusInfinity()), low_loss_threshold_(kDefaultLowLossThreshold), high_loss_threshold_(kDefaultHighLossThreshold), - bitrate_threshold_(kDefaultBitrateThreshold) { + bitrate_threshold_(kDefaultBitrateThreshold), + loss_based_bandwidth_estimation_(key_value_config), + receiver_limit_caps_only_("Enabled") { RTC_DCHECK(event_log); if (BweLossExperimentIsEnabled()) { uint32_t bitrate_threshold_kbps; @@ -227,6 +240,8 @@ SendSideBandwidthEstimation::SendSideBandwidthEstimation(RtcEventLog* event_log) bitrate_threshold_ = DataRate::KilobitsPerSec(bitrate_threshold_kbps); } } + ParseFieldTrial({&receiver_limit_caps_only_}, + key_value_config->Lookup("WebRTC-Bwe-ReceiverLimitCapsOnly")); } SendSideBandwidthEstimation::~SendSideBandwidthEstimation() {} @@ -273,9 +288,6 @@ void SendSideBandwidthEstimation::SetSendBitrate(DataRate bitrate, RTC_DCHECK_GT(bitrate, DataRate::Zero()); // Reset to avoid being capped by the estimate. delay_based_limit_ = DataRate::PlusInfinity(); - if (loss_based_bandwidth_estimation_.Enabled()) { - loss_based_bandwidth_estimation_.MaybeReset(bitrate); - } UpdateTargetBitrate(bitrate, at_time); // Clear last sent bitrate history so the new value can be used directly // and not capped. @@ -298,7 +310,10 @@ int SendSideBandwidthEstimation::GetMinBitrate() const { } DataRate SendSideBandwidthEstimation::target_rate() const { - return std::max(min_bitrate_configured_, current_target_); + DataRate target = current_target_; + if (receiver_limit_caps_only_) + target = std::min(target, receiver_limit_); + return std::max(min_bitrate_configured_, target); } DataRate SendSideBandwidthEstimation::GetEstimatedLinkCapacity() const { @@ -340,8 +355,8 @@ void SendSideBandwidthEstimation::IncomingPacketFeedbackVector( } } -void SendSideBandwidthEstimation::UpdatePacketsLost(int packets_lost, - int number_of_packets, +void SendSideBandwidthEstimation::UpdatePacketsLost(int64_t packets_lost, + int64_t number_of_packets, Timestamp at_time) { last_loss_feedback_ = at_time; if (first_report_time_.IsInfinite()) @@ -349,21 +364,23 @@ void SendSideBandwidthEstimation::UpdatePacketsLost(int packets_lost, // Check sequence number diff and weight loss report if (number_of_packets > 0) { - // Accumulate reports. - lost_packets_since_last_loss_update_ += packets_lost; - expected_packets_since_last_loss_update_ += number_of_packets; + int64_t expected = + expected_packets_since_last_loss_update_ + number_of_packets; // Don't generate a loss rate until it can be based on enough packets. - if (expected_packets_since_last_loss_update_ < kLimitNumPackets) + if (expected < kLimitNumPackets) { + // Accumulate reports. + expected_packets_since_last_loss_update_ = expected; + lost_packets_since_last_loss_update_ += packets_lost; return; + } has_decreased_since_last_fraction_loss_ = false; - int64_t lost_q8 = lost_packets_since_last_loss_update_ << 8; - int64_t expected = expected_packets_since_last_loss_update_; + int64_t lost_q8 = (lost_packets_since_last_loss_update_ + packets_lost) + << 8; last_fraction_loss_ = std::min(lost_q8 / expected, 255); // Reset accumulators. - lost_packets_since_last_loss_update_ = 0; expected_packets_since_last_loss_update_ = 0; last_loss_packet_report_ = at_time; @@ -443,7 +460,7 @@ void SendSideBandwidthEstimation::UpdateEstimate(Timestamp at_time) { if (delay_based_limit_.IsFinite()) new_bitrate = std::max(delay_based_limit_, new_bitrate); if (loss_based_bandwidth_estimation_.Enabled()) { - loss_based_bandwidth_estimation_.SetInitialBitrate(new_bitrate); + loss_based_bandwidth_estimation_.Initialize(new_bitrate); } if (new_bitrate != current_target_) { @@ -466,10 +483,10 @@ void SendSideBandwidthEstimation::UpdateEstimate(Timestamp at_time) { return; } - if (loss_based_bandwidth_estimation_.Enabled()) { - loss_based_bandwidth_estimation_.Update( - at_time, min_bitrate_history_.front().second, last_round_trip_time_); - DataRate new_bitrate = MaybeRampupOrBackoff(current_target_, at_time); + if (loss_based_bandwidth_estimation_.InUse()) { + DataRate new_bitrate = loss_based_bandwidth_estimation_.Update( + at_time, min_bitrate_history_.front().second, delay_based_limit_, + last_round_trip_time_); UpdateTargetBitrate(new_bitrate, at_time); return; } @@ -566,28 +583,11 @@ void SendSideBandwidthEstimation::UpdateMinHistory(Timestamp at_time) { min_bitrate_history_.push_back(std::make_pair(at_time, current_target_)); } -DataRate SendSideBandwidthEstimation::MaybeRampupOrBackoff(DataRate new_bitrate, - Timestamp at_time) { - // TODO(crodbro): reuse this code in UpdateEstimate instead of current - // inlining of very similar functionality. - const TimeDelta time_since_loss_packet_report = - at_time - last_loss_packet_report_; - if (time_since_loss_packet_report < 1.2 * kMaxRtcpFeedbackInterval) { - new_bitrate = min_bitrate_history_.front().second * 1.08; - new_bitrate += DataRate::BitsPerSec(1000); - } - return new_bitrate; -} - DataRate SendSideBandwidthEstimation::GetUpperLimit() const { - DataRate upper_limit = std::min(delay_based_limit_, receiver_limit_); - upper_limit = std::min(upper_limit, max_bitrate_configured_); - if (loss_based_bandwidth_estimation_.Enabled() && - loss_based_bandwidth_estimation_.GetEstimate() > DataRate::Zero()) { - upper_limit = - std::min(upper_limit, loss_based_bandwidth_estimation_.GetEstimate()); - } - return upper_limit; + DataRate upper_limit = delay_based_limit_; + if (!receiver_limit_caps_only_) + upper_limit = std::min(upper_limit, receiver_limit_); + return std::min(upper_limit, max_bitrate_configured_); } void SendSideBandwidthEstimation::MaybeLogLowBitrateWarning(DataRate bitrate, diff --git a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h index 241ec8c841..b97b940db0 100644 --- a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h +++ b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation.h @@ -21,6 +21,7 @@ #include "absl/types/optional.h" #include "api/transport/network_types.h" +#include "api/transport/webrtc_key_value_config.h" #include "api/units/data_rate.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" @@ -54,17 +55,19 @@ class LinkCapacityTracker { class RttBasedBackoff { public: - RttBasedBackoff(); + explicit RttBasedBackoff(const WebRtcKeyValueConfig* key_value_config); ~RttBasedBackoff(); void UpdatePropagationRtt(Timestamp at_time, TimeDelta propagation_rtt); TimeDelta CorrectedRtt(Timestamp at_time) const; - FieldTrialParameter rtt_limit_; + FieldTrialFlag disabled_; + FieldTrialParameter configured_limit_; FieldTrialParameter drop_fraction_; FieldTrialParameter drop_interval_; FieldTrialParameter bandwidth_floor_; public: + TimeDelta rtt_limit_; Timestamp last_propagation_rtt_update_; TimeDelta last_propagation_rtt_; Timestamp last_packet_sent_; @@ -73,7 +76,8 @@ class RttBasedBackoff { class SendSideBandwidthEstimation { public: SendSideBandwidthEstimation() = delete; - explicit SendSideBandwidthEstimation(RtcEventLog* event_log); + SendSideBandwidthEstimation(const WebRtcKeyValueConfig* key_value_config, + RtcEventLog* event_log); ~SendSideBandwidthEstimation(); void OnRouteChange(); @@ -95,8 +99,8 @@ class SendSideBandwidthEstimation { void UpdateDelayBasedEstimate(Timestamp at_time, DataRate bitrate); // Call when we receive a RTCP message with a ReceiveBlock. - void UpdatePacketsLost(int packets_lost, - int number_of_packets, + void UpdatePacketsLost(int64_t packets_lost, + int64_t number_of_packets, Timestamp at_time); // Call when we receive a RTCP message with a ReceiveBlock. @@ -127,8 +131,6 @@ class SendSideBandwidthEstimation { // min bitrate used during last kBweIncreaseIntervalMs. void UpdateMinHistory(Timestamp at_time); - DataRate MaybeRampupOrBackoff(DataRate new_bitrate, Timestamp at_time); - // Gets the upper limit for the target bitrate. This is the minimum of the // delay based limit, the receiver limit and the loss based controller limit. DataRate GetUpperLimit() const; @@ -188,6 +190,7 @@ class SendSideBandwidthEstimation { float high_loss_threshold_; DataRate bitrate_threshold_; LossBasedBandwidthEstimation loss_based_bandwidth_estimation_; + FieldTrialFlag receiver_limit_caps_only_; }; } // namespace webrtc #endif // MODULES_CONGESTION_CONTROLLER_GOOG_CC_SEND_SIDE_BANDWIDTH_ESTIMATION_H_ diff --git a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc index 06e3925358..85ce401098 100644 --- a/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc +++ b/modules/congestion_controller/goog_cc/send_side_bandwidth_estimation_unittest.cc @@ -13,6 +13,7 @@ #include "api/rtc_event_log/rtc_event.h" #include "logging/rtc_event_log/events/rtc_event_bwe_update_loss_based.h" #include "logging/rtc_event_log/mock/mock_rtc_event_log.h" +#include "test/explicit_key_value_config.h" #include "test/gmock.h" #include "test/gtest.h" @@ -36,7 +37,8 @@ MATCHER(LossBasedBweUpdateWithBitrateAndLossFraction, "") { void TestProbing(bool use_delay_based) { ::testing::NiceMock event_log; - SendSideBandwidthEstimation bwe(&event_log); + test::ExplicitKeyValueConfig key_value_config(""); + SendSideBandwidthEstimation bwe(&key_value_config, &event_log); int64_t now_ms = 0; bwe.SetMinMaxBitrate(DataRate::BitsPerSec(100000), DataRate::BitsPerSec(1500000)); @@ -88,7 +90,8 @@ TEST(SendSideBweTest, DoesntReapplyBitrateDecreaseWithoutFollowingRemb) { EXPECT_CALL(event_log, LogProxy(LossBasedBweUpdateWithBitrateAndLossFraction())) .Times(1); - SendSideBandwidthEstimation bwe(&event_log); + test::ExplicitKeyValueConfig key_value_config(""); + SendSideBandwidthEstimation bwe(&key_value_config, &event_log); static const int kMinBitrateBps = 100000; static const int kInitialBitrateBps = 1000000; int64_t now_ms = 1000; @@ -138,7 +141,8 @@ TEST(SendSideBweTest, DoesntReapplyBitrateDecreaseWithoutFollowingRemb) { TEST(SendSideBweTest, SettingSendBitrateOverridesDelayBasedEstimate) { ::testing::NiceMock event_log; - SendSideBandwidthEstimation bwe(&event_log); + test::ExplicitKeyValueConfig key_value_config(""); + SendSideBandwidthEstimation bwe(&key_value_config, &event_log); static const int kMinBitrateBps = 10000; static const int kMaxBitrateBps = 10000000; static const int kInitialBitrateBps = 300000; @@ -163,4 +167,17 @@ TEST(SendSideBweTest, SettingSendBitrateOverridesDelayBasedEstimate) { EXPECT_EQ(bwe.target_rate().bps(), kForcedHighBitrate); } +TEST(RttBasedBackoff, DefaultEnabled) { + test::ExplicitKeyValueConfig key_value_config(""); + RttBasedBackoff rtt_backoff(&key_value_config); + EXPECT_TRUE(rtt_backoff.rtt_limit_.IsFinite()); +} + +TEST(RttBasedBackoff, CanBeDisabled) { + test::ExplicitKeyValueConfig key_value_config( + "WebRTC-Bwe-MaxRttLimit/Disabled/"); + RttBasedBackoff rtt_backoff(&key_value_config); + EXPECT_TRUE(rtt_backoff.rtt_limit_.IsPlusInfinity()); +} + } // namespace webrtc diff --git a/modules/congestion_controller/goog_cc/trendline_estimator.cc b/modules/congestion_controller/goog_cc/trendline_estimator.cc index c04db7351d..1008badf6a 100644 --- a/modules/congestion_controller/goog_cc/trendline_estimator.cc +++ b/modules/congestion_controller/goog_cc/trendline_estimator.cc @@ -17,7 +17,7 @@ #include "absl/strings/match.h" #include "absl/types/optional.h" -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" +#include "api/network_state_predictor.h" #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" #include "rtc_base/checks.h" #include "rtc_base/experiments/struct_parameters_parser.h" diff --git a/modules/congestion_controller/goog_cc/trendline_estimator.h b/modules/congestion_controller/goog_cc/trendline_estimator.h index 2db2903412..75b971d187 100644 --- a/modules/congestion_controller/goog_cc/trendline_estimator.h +++ b/modules/congestion_controller/goog_cc/trendline_estimator.h @@ -20,7 +20,6 @@ #include "api/network_state_predictor.h" #include "api/transport/webrtc_key_value_config.h" #include "modules/congestion_controller/goog_cc/delay_increase_detector_interface.h" -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/struct_parameters_parser.h" diff --git a/modules/congestion_controller/include/receive_side_congestion_controller.h b/modules/congestion_controller/include/receive_side_congestion_controller.h index 4f13b4d549..84661c05b7 100644 --- a/modules/congestion_controller/include/receive_side_congestion_controller.h +++ b/modules/congestion_controller/include/receive_side_congestion_controller.h @@ -16,10 +16,12 @@ #include "api/transport/field_trial_based_config.h" #include "api/transport/network_control.h" +#include "api/units/data_rate.h" +#include "modules/congestion_controller/remb_throttler.h" #include "modules/include/module.h" +#include "modules/pacing/packet_router.h" #include "modules/remote_bitrate_estimator/remote_estimator_proxy.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { class RemoteBitrateEstimator; @@ -33,10 +35,10 @@ class RemoteBitrateObserver; class ReceiveSideCongestionController : public CallStatsObserver, public Module { public: - ReceiveSideCongestionController(Clock* clock, PacketRouter* packet_router); ReceiveSideCongestionController( Clock* clock, - PacketRouter* packet_router, + RemoteEstimatorProxy::TransportFeedbackSender feedback_sender, + RembThrottler::RembSender remb_sender, NetworkStateEstimator* network_state_estimator); ~ReceiveSideCongestionController() override {} @@ -57,6 +59,10 @@ class ReceiveSideCongestionController : public CallStatsObserver, // This is send bitrate, used to control the rate of feedback messages. void OnBitrateChanged(int bitrate_bps); + // Ensures the remote party is notified of the receive bitrate no larger than + // |bitrate| using RTCP REMB. + void SetMaxDesiredReceiveBitrate(DataRate bitrate); + // Implements Module. int64_t TimeUntilNextProcess() override; void Process() override; @@ -66,6 +72,11 @@ class ReceiveSideCongestionController : public CallStatsObserver, public: WrappingBitrateEstimator(RemoteBitrateObserver* observer, Clock* clock); + WrappingBitrateEstimator() = delete; + WrappingBitrateEstimator(const WrappingBitrateEstimator&) = delete; + WrappingBitrateEstimator& operator=(const WrappingBitrateEstimator&) = + delete; + ~WrappingBitrateEstimator() override; void IncomingPacket(int64_t arrival_time_ms, @@ -87,20 +98,19 @@ class ReceiveSideCongestionController : public CallStatsObserver, private: void PickEstimatorFromHeader(const RTPHeader& header) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); - void PickEstimator() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void PickEstimator() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); RemoteBitrateObserver* observer_; Clock* const clock_; - rtc::CriticalSection crit_sect_; + mutable Mutex mutex_; std::unique_ptr rbe_; bool using_absolute_send_time_; uint32_t packets_since_absolute_send_time_; int min_bitrate_bps_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WrappingBitrateEstimator); }; const FieldTrialBasedConfig field_trial_config_; + RembThrottler remb_throttler_; WrappingBitrateEstimator remote_bitrate_estimator_; RemoteEstimatorProxy remote_estimator_proxy_; }; diff --git a/modules/congestion_controller/pcc/BUILD.gn b/modules/congestion_controller/pcc/BUILD.gn index d0111725d2..38a3b8ad7c 100644 --- a/modules/congestion_controller/pcc/BUILD.gn +++ b/modules/congestion_controller/pcc/BUILD.gn @@ -37,8 +37,8 @@ rtc_library("pcc_controller") { "../../../api/units:timestamp", "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("monitor_interval") { @@ -94,11 +94,11 @@ rtc_library("bitrate_controller") { "../../../api/transport:network_control", "../../../api/units:data_rate", "../../../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_library("pcc_unittests") { testonly = true sources = [ diff --git a/modules/congestion_controller/pcc/bitrate_controller_unittest.cc b/modules/congestion_controller/pcc/bitrate_controller_unittest.cc index 6693b7a833..957d99b1de 100644 --- a/modules/congestion_controller/pcc/bitrate_controller_unittest.cc +++ b/modules/congestion_controller/pcc/bitrate_controller_unittest.cc @@ -67,8 +67,10 @@ std::vector CreatePacketResults( class MockUtilityFunction : public PccUtilityFunctionInterface { public: - MOCK_CONST_METHOD1(Compute, - double(const PccMonitorInterval& monitor_interval)); + MOCK_METHOD(double, + Compute, + (const PccMonitorInterval& monitor_interval), + (const, override)); }; } // namespace diff --git a/modules/congestion_controller/pcc/monitor_interval.cc b/modules/congestion_controller/pcc/monitor_interval.cc index c8efd5b59a..6bc9f4a7ef 100644 --- a/modules/congestion_controller/pcc/monitor_interval.cc +++ b/modules/congestion_controller/pcc/monitor_interval.cc @@ -47,7 +47,7 @@ void PccMonitorInterval::OnPacketsFeedback( feedback_collection_done_ = true; return; } - if (packet_result.receive_time.IsInfinite()) { + if (!packet_result.IsReceived()) { lost_packets_sent_time_.push_back(packet_result.sent_packet.send_time); } else { received_packets_.push_back( diff --git a/modules/congestion_controller/pcc/rtt_tracker.cc b/modules/congestion_controller/pcc/rtt_tracker.cc index 0814912b49..af9dc8f11b 100644 --- a/modules/congestion_controller/pcc/rtt_tracker.cc +++ b/modules/congestion_controller/pcc/rtt_tracker.cc @@ -23,7 +23,7 @@ void RttTracker::OnPacketsFeedback( Timestamp feedback_received_time) { TimeDelta packet_rtt = TimeDelta::MinusInfinity(); for (const PacketResult& packet_result : packet_feedbacks) { - if (packet_result.receive_time.IsInfinite()) + if (!packet_result.IsReceived()) continue; packet_rtt = std::max( packet_rtt, diff --git a/modules/congestion_controller/receive_side_congestion_controller.cc b/modules/congestion_controller/receive_side_congestion_controller.cc index 7448ec28b2..61a126fbe3 100644 --- a/modules/congestion_controller/receive_side_congestion_controller.cc +++ b/modules/congestion_controller/receive_side_congestion_controller.cc @@ -10,6 +10,7 @@ #include "modules/congestion_controller/include/receive_side_congestion_controller.h" +#include "api/units/data_rate.h" #include "modules/pacing/packet_router.h" #include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h" @@ -38,45 +39,45 @@ void ReceiveSideCongestionController::WrappingBitrateEstimator::IncomingPacket( int64_t arrival_time_ms, size_t payload_size, const RTPHeader& header) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); PickEstimatorFromHeader(header); rbe_->IncomingPacket(arrival_time_ms, payload_size, header); } void ReceiveSideCongestionController::WrappingBitrateEstimator::Process() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); rbe_->Process(); } int64_t ReceiveSideCongestionController::WrappingBitrateEstimator:: TimeUntilNextProcess() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return rbe_->TimeUntilNextProcess(); } void ReceiveSideCongestionController::WrappingBitrateEstimator::OnRttUpdate( int64_t avg_rtt_ms, int64_t max_rtt_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); rbe_->OnRttUpdate(avg_rtt_ms, max_rtt_ms); } void ReceiveSideCongestionController::WrappingBitrateEstimator::RemoveStream( unsigned int ssrc) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); rbe_->RemoveStream(ssrc); } bool ReceiveSideCongestionController::WrappingBitrateEstimator::LatestEstimate( std::vector* ssrcs, unsigned int* bitrate_bps) const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return rbe_->LatestEstimate(ssrcs, bitrate_bps); } void ReceiveSideCongestionController::WrappingBitrateEstimator::SetMinBitrate( int min_bitrate_bps) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); rbe_->SetMinBitrate(min_bitrate_bps); min_bitrate_bps_ = min_bitrate_bps; } @@ -120,16 +121,13 @@ void ReceiveSideCongestionController::WrappingBitrateEstimator:: ReceiveSideCongestionController::ReceiveSideCongestionController( Clock* clock, - PacketRouter* packet_router) - : ReceiveSideCongestionController(clock, packet_router, nullptr) {} - -ReceiveSideCongestionController::ReceiveSideCongestionController( - Clock* clock, - PacketRouter* packet_router, + RemoteEstimatorProxy::TransportFeedbackSender feedback_sender, + RembThrottler::RembSender remb_sender, NetworkStateEstimator* network_state_estimator) - : remote_bitrate_estimator_(packet_router, clock), + : remb_throttler_(std::move(remb_sender), clock), + remote_bitrate_estimator_(&remb_throttler_, clock), remote_estimator_proxy_(clock, - packet_router, + std::move(feedback_sender), &field_trial_config_, network_state_estimator) {} @@ -186,4 +184,9 @@ void ReceiveSideCongestionController::Process() { remote_bitrate_estimator_.Process(); } +void ReceiveSideCongestionController::SetMaxDesiredReceiveBitrate( + DataRate bitrate) { + remb_throttler_.SetMaxDesiredReceiveBitrate(bitrate); +} + } // namespace webrtc diff --git a/modules/congestion_controller/receive_side_congestion_controller_unittest.cc b/modules/congestion_controller/receive_side_congestion_controller_unittest.cc index 95143f7175..5e03179f42 100644 --- a/modules/congestion_controller/receive_side_congestion_controller_unittest.cc +++ b/modules/congestion_controller/receive_side_congestion_controller_unittest.cc @@ -10,6 +10,8 @@ #include "modules/congestion_controller/include/receive_side_congestion_controller.h" +#include "api/test/network_emulation/create_cross_traffic.h" +#include "api/test/network_emulation/cross_traffic.h" #include "modules/pacing/packet_router.h" #include "system_wrappers/include/clock.h" #include "test/gmock.h" @@ -18,10 +20,8 @@ using ::testing::_; using ::testing::AtLeast; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::SaveArg; -using ::testing::StrictMock; +using ::testing::ElementsAre; +using ::testing::MockFunction; namespace webrtc { @@ -35,32 +35,28 @@ uint32_t AbsSendTime(int64_t t, int64_t denom) { return (((t << 18) + (denom >> 1)) / denom) & 0x00fffffful; } -class MockPacketRouter : public PacketRouter { - public: - MOCK_METHOD2(OnReceiveBitrateChanged, - void(const std::vector& ssrcs, uint32_t bitrate)); -}; - const uint32_t kInitialBitrateBps = 60000; } // namespace namespace test { -TEST(ReceiveSideCongestionControllerTest, OnReceivedPacketWithAbsSendTime) { - StrictMock packet_router; +TEST(ReceiveSideCongestionControllerTest, SendsRembWithAbsSendTime) { + MockFunction>)> + feedback_sender; + MockFunction)> remb_sender; SimulatedClock clock_(123456); - ReceiveSideCongestionController controller(&clock_, &packet_router); + ReceiveSideCongestionController controller( + &clock_, feedback_sender.AsStdFunction(), remb_sender.AsStdFunction(), + nullptr); size_t payload_size = 1000; RTPHeader header; header.ssrc = 0x11eb21c; header.extension.hasAbsoluteSendTime = true; - std::vector ssrcs; - EXPECT_CALL(packet_router, OnReceiveBitrateChanged(_, _)) - .WillRepeatedly(SaveArg<0>(&ssrcs)); + EXPECT_CALL(remb_sender, Call(_, ElementsAre(header.ssrc))).Times(AtLeast(1)); for (int i = 0; i < 10; ++i) { clock_.AdvanceTimeMilliseconds((1000 * payload_size) / kInitialBitrateBps); @@ -68,9 +64,20 @@ TEST(ReceiveSideCongestionControllerTest, OnReceivedPacketWithAbsSendTime) { header.extension.absoluteSendTime = AbsSendTime(now_ms, 1000); controller.OnReceivedPacket(now_ms, payload_size, header); } +} + +TEST(ReceiveSideCongestionControllerTest, + SendsRembAfterSetMaxDesiredReceiveBitrate) { + MockFunction>)> + feedback_sender; + MockFunction)> remb_sender; + SimulatedClock clock_(123456); - ASSERT_EQ(1u, ssrcs.size()); - EXPECT_EQ(header.ssrc, ssrcs[0]); + ReceiveSideCongestionController controller( + &clock_, feedback_sender.AsStdFunction(), remb_sender.AsStdFunction(), + nullptr); + EXPECT_CALL(remb_sender, Call(123, _)); + controller.SetMaxDesiredReceiveBitrate(DataRate::BitsPerSec(123)); } TEST(ReceiveSideCongestionControllerTest, ConvergesToCapacity) { @@ -107,7 +114,9 @@ TEST(ReceiveSideCongestionControllerTest, IsFairToTCP) { VideoStreamConfig video; video.stream.packet_feedback = false; s.CreateVideoStream(route->forward(), video); - s.net()->StartFakeTcpCrossTraffic(send_net, ret_net, FakeTcpConfig()); + s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic( + s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net), + FakeTcpConfig())); s.RunFor(TimeDelta::Seconds(30)); // For some reason we get outcompeted by TCP here, this should probably be // fixed and a lower bound should be added to the test. diff --git a/modules/congestion_controller/remb_throttler.cc b/modules/congestion_controller/remb_throttler.cc new file mode 100644 index 0000000000..fcc30af9a8 --- /dev/null +++ b/modules/congestion_controller/remb_throttler.cc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/congestion_controller/remb_throttler.h" + +#include +#include + +namespace webrtc { + +namespace { +constexpr TimeDelta kRembSendInterval = TimeDelta::Millis(200); +} // namespace + +RembThrottler::RembThrottler(RembSender remb_sender, Clock* clock) + : remb_sender_(std::move(remb_sender)), + clock_(clock), + last_remb_time_(Timestamp::MinusInfinity()), + last_send_remb_bitrate_(DataRate::PlusInfinity()), + max_remb_bitrate_(DataRate::PlusInfinity()) {} + +void RembThrottler::OnReceiveBitrateChanged(const std::vector& ssrcs, + uint32_t bitrate_bps) { + DataRate receive_bitrate = DataRate::BitsPerSec(bitrate_bps); + Timestamp now = clock_->CurrentTime(); + { + MutexLock lock(&mutex_); + // % threshold for if we should send a new REMB asap. + const int64_t kSendThresholdPercent = 103; + if (receive_bitrate * kSendThresholdPercent / 100 > + last_send_remb_bitrate_ && + now < last_remb_time_ + kRembSendInterval) { + return; + } + last_remb_time_ = now; + last_send_remb_bitrate_ = receive_bitrate; + receive_bitrate = std::min(last_send_remb_bitrate_, max_remb_bitrate_); + } + remb_sender_(receive_bitrate.bps(), ssrcs); +} + +void RembThrottler::SetMaxDesiredReceiveBitrate(DataRate bitrate) { + Timestamp now = clock_->CurrentTime(); + { + MutexLock lock(&mutex_); + max_remb_bitrate_ = bitrate; + if (now - last_remb_time_ < kRembSendInterval && + !last_send_remb_bitrate_.IsZero() && + last_send_remb_bitrate_ <= max_remb_bitrate_) { + return; + } + } + remb_sender_(bitrate.bps(), /*ssrcs=*/{}); +} + +} // namespace webrtc diff --git a/modules/congestion_controller/remb_throttler.h b/modules/congestion_controller/remb_throttler.h new file mode 100644 index 0000000000..67c0280749 --- /dev/null +++ b/modules/congestion_controller/remb_throttler.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_CONGESTION_CONTROLLER_REMB_THROTTLER_H_ +#define MODULES_CONGESTION_CONTROLLER_REMB_THROTTLER_H_ + +#include +#include + +#include "api/units/data_rate.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "modules/remote_bitrate_estimator/remote_estimator_proxy.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { + +// RembThrottler is a helper class used for throttling RTCP REMB messages. +// Throttles small changes to the received BWE within 200ms. +class RembThrottler : public RemoteBitrateObserver { + public: + using RembSender = + std::function ssrcs)>; + RembThrottler(RembSender remb_sender, Clock* clock); + + // Ensures the remote party is notified of the receive bitrate no larger than + // |bitrate| using RTCP REMB. + void SetMaxDesiredReceiveBitrate(DataRate bitrate); + + // Implements RemoteBitrateObserver; + // Called every time there is a new bitrate estimate for a receive channel + // group. This call will trigger a new RTCP REMB packet if the bitrate + // estimate has decreased or if no RTCP REMB packet has been sent for + // a certain time interval. + void OnReceiveBitrateChanged(const std::vector& ssrcs, + uint32_t bitrate_bps) override; + + private: + const RembSender remb_sender_; + Clock* const clock_; + mutable Mutex mutex_; + Timestamp last_remb_time_ RTC_GUARDED_BY(mutex_); + DataRate last_send_remb_bitrate_ RTC_GUARDED_BY(mutex_); + DataRate max_remb_bitrate_ RTC_GUARDED_BY(mutex_); +}; + +} // namespace webrtc +#endif // MODULES_CONGESTION_CONTROLLER_REMB_THROTTLER_H_ diff --git a/modules/congestion_controller/remb_throttler_unittest.cc b/modules/congestion_controller/remb_throttler_unittest.cc new file mode 100644 index 0000000000..3f8df8a7bb --- /dev/null +++ b/modules/congestion_controller/remb_throttler_unittest.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/congestion_controller/remb_throttler.h" + +#include + +#include "api/units/data_rate.h" +#include "api/units/time_delta.h" +#include "system_wrappers/include/clock.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +using ::testing::_; +using ::testing::MockFunction; + +TEST(RembThrottlerTest, CallRembSenderOnFirstReceiveBitrateChange) { + SimulatedClock clock(Timestamp::Zero()); + MockFunction)> remb_sender; + RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock); + + EXPECT_CALL(remb_sender, Call(12345, std::vector({1, 2, 3}))); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12345); +} + +TEST(RembThrottlerTest, ThrottlesSmallReceiveBitrateDecrease) { + SimulatedClock clock(Timestamp::Zero()); + MockFunction)> remb_sender; + RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock); + + EXPECT_CALL(remb_sender, Call); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12346); + clock.AdvanceTime(TimeDelta::Millis(100)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12345); + + EXPECT_CALL(remb_sender, Call(12345, _)); + clock.AdvanceTime(TimeDelta::Millis(101)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/12345); +} + +TEST(RembThrottlerTest, DoNotThrottleLargeReceiveBitrateDecrease) { + SimulatedClock clock(Timestamp::Zero()); + MockFunction)> remb_sender; + RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock); + + EXPECT_CALL(remb_sender, Call(2345, _)); + EXPECT_CALL(remb_sender, Call(1234, _)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/2345); + clock.AdvanceTime(TimeDelta::Millis(1)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/1234); +} + +TEST(RembThrottlerTest, ThrottlesReceiveBitrateIncrease) { + SimulatedClock clock(Timestamp::Zero()); + MockFunction)> remb_sender; + RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock); + + EXPECT_CALL(remb_sender, Call); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/1234); + clock.AdvanceTime(TimeDelta::Millis(100)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/2345); + + // Updates 200ms after previous callback is not throttled. + EXPECT_CALL(remb_sender, Call(2345, _)); + clock.AdvanceTime(TimeDelta::Millis(101)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/2345); +} + +TEST(RembThrottlerTest, CallRembSenderOnSetMaxDesiredReceiveBitrate) { + SimulatedClock clock(Timestamp::Zero()); + MockFunction)> remb_sender; + RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock); + EXPECT_CALL(remb_sender, Call(1234, _)); + remb_throttler.SetMaxDesiredReceiveBitrate(DataRate::BitsPerSec(1234)); +} + +TEST(RembThrottlerTest, CallRembSenderWithMinOfMaxDesiredAndOnReceivedBitrate) { + SimulatedClock clock(Timestamp::Zero()); + MockFunction)> remb_sender; + RembThrottler remb_throttler(remb_sender.AsStdFunction(), &clock); + + EXPECT_CALL(remb_sender, Call(1234, _)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/1234); + clock.AdvanceTime(TimeDelta::Millis(1)); + remb_throttler.SetMaxDesiredReceiveBitrate(DataRate::BitsPerSec(4567)); + + clock.AdvanceTime(TimeDelta::Millis(200)); + EXPECT_CALL(remb_sender, Call(4567, _)); + remb_throttler.OnReceiveBitrateChanged({1, 2, 3}, /*bitrate_bps=*/5678); +} + +} // namespace webrtc diff --git a/modules/congestion_controller/rtp/BUILD.gn b/modules/congestion_controller/rtp/BUILD.gn index b444f5495b..1a70447307 100644 --- a/modules/congestion_controller/rtp/BUILD.gn +++ b/modules/congestion_controller/rtp/BUILD.gn @@ -24,17 +24,18 @@ rtc_library("control_handler") { ] deps = [ + "../../../api:sequence_checker", "../../../api/transport:network_control", "../../../api/units:data_rate", "../../../api/units:data_size", "../../../api/units:time_delta", "../../../rtc_base:checks", "../../../rtc_base:safe_minmax", - "../../../rtc_base/synchronization:sequence_checker", + "../../../rtc_base/system:no_unique_address", "../../../system_wrappers:field_trial", "../../pacing", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] if (!build_with_mozilla) { deps += [ "../../../rtc_base" ] @@ -51,6 +52,7 @@ rtc_library("transport_feedback") { deps = [ "../..:module_api_public", + "../../../api:sequence_checker", "../../../api/transport:network_control", "../../../api/units:data_size", "../../../api/units:timestamp", @@ -58,9 +60,12 @@ rtc_library("transport_feedback") { "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", "../../../rtc_base/network:sent_packet", + "../../../rtc_base/synchronization:mutex", "../../../system_wrappers", "../../../system_wrappers:field_trial", "../../rtp_rtcp:rtp_rtcp_format", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/types:optional", ] diff --git a/modules/congestion_controller/rtp/control_handler.h b/modules/congestion_controller/rtp/control_handler.h index 9cce0d72bf..1da6463219 100644 --- a/modules/congestion_controller/rtp/control_handler.h +++ b/modules/congestion_controller/rtp/control_handler.h @@ -14,12 +14,13 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/transport/network_types.h" #include "api/units/data_size.h" #include "api/units/time_delta.h" #include "modules/pacing/paced_sender.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { // This is used to observe the network controller state and route calls to @@ -46,7 +47,7 @@ class CongestionControlHandler { const bool disable_pacer_emergency_stop_; int64_t pacer_expected_queue_ms_ = 0; - SequenceChecker sequenced_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequenced_checker_; RTC_DISALLOW_COPY_AND_ASSIGN(CongestionControlHandler); }; } // namespace webrtc diff --git a/modules/congestion_controller/rtp/transport_feedback_adapter.h b/modules/congestion_controller/rtp/transport_feedback_adapter.h index b8148a252f..deb7925d77 100644 --- a/modules/congestion_controller/rtp/transport_feedback_adapter.h +++ b/modules/congestion_controller/rtp/transport_feedback_adapter.h @@ -16,14 +16,13 @@ #include #include +#include "api/sequence_checker.h" #include "api/transport/network_types.h" #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/critical_section.h" #include "rtc_base/network/sent_packet.h" #include "rtc_base/network_route.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" namespace webrtc { diff --git a/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc b/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc index 1c74b196d8..933abd9bf0 100644 --- a/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc +++ b/modules/congestion_controller/rtp/transport_feedback_adapter_unittest.cc @@ -27,9 +27,9 @@ using ::testing::_; using ::testing::Invoke; namespace webrtc { -namespace webrtc_cc { namespace { +constexpr uint32_t kSsrc = 8492; const PacedPacketInfo kPacingInfo0(0, 5, 2000); const PacedPacketInfo kPacingInfo1(1, 8, 4000); const PacedPacketInfo kPacingInfo2(2, 14, 7000); @@ -49,8 +49,8 @@ void ComparePacketFeedbackVectors(const std::vector& truth, // equal. However, the difference must be the same for all x. TimeDelta arrival_time_delta = truth[0].receive_time - input[0].receive_time; for (size_t i = 0; i < len; ++i) { - RTC_CHECK(truth[i].receive_time.IsFinite()); - if (input[i].receive_time.IsFinite()) { + RTC_CHECK(truth[i].IsReceived()); + if (input[i].IsReceived()) { EXPECT_EQ(truth[i].receive_time - input[i].receive_time, arrival_time_delta); } @@ -77,16 +77,16 @@ PacketResult CreatePacket(int64_t receive_time_ms, return res; } -} // namespace - -namespace test { - class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver { public: - MOCK_METHOD1(OnPacketFeedbackVector, - void(std::vector packet_feedback_vector)); + MOCK_METHOD(void, + OnPacketFeedbackVector, + (std::vector packet_feedback_vector), + (override)); }; +} // namespace + class TransportFeedbackAdapterTest : public ::testing::Test { public: TransportFeedbackAdapterTest() : clock_(0) {} @@ -106,7 +106,7 @@ class TransportFeedbackAdapterTest : public ::testing::Test { void OnSentPacket(const PacketResult& packet_feedback) { RtpPacketSendInfo packet_info; - packet_info.ssrc = kSsrc; + packet_info.media_ssrc = kSsrc; packet_info.transport_sequence_number = packet_feedback.sent_packet.sequence_number; packet_info.rtp_sequence_number = 0; @@ -120,8 +120,6 @@ class TransportFeedbackAdapterTest : public ::testing::Test { packet_feedback.sent_packet.send_time.ms(), rtc::PacketInfo())); } - static constexpr uint32_t kSsrc = 8492; - SimulatedClock clock_; std::unique_ptr adapter_; }; @@ -391,7 +389,7 @@ TEST_F(TransportFeedbackAdapterTest, IgnoreDuplicatePacketSentCalls) { // Add a packet and then mark it as sent. RtpPacketSendInfo packet_info; - packet_info.ssrc = kSsrc; + packet_info.media_ssrc = kSsrc; packet_info.transport_sequence_number = packet.sent_packet.sequence_number; packet_info.length = packet.sent_packet.size.bytes(); packet_info.pacing_info = packet.sent_packet.pacing_info; @@ -410,6 +408,4 @@ TEST_F(TransportFeedbackAdapterTest, IgnoreDuplicatePacketSentCalls) { EXPECT_FALSE(duplicate_packet.has_value()); } -} // namespace test -} // namespace webrtc_cc } // namespace webrtc diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer.cc b/modules/congestion_controller/rtp/transport_feedback_demuxer.cc index 045ba38cd3..6ab3ad80fa 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer.cc +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer.cc @@ -18,7 +18,7 @@ static const size_t kMaxPacketsInHistory = 5000; void TransportFeedbackDemuxer::RegisterStreamFeedbackObserver( std::vector ssrcs, StreamFeedbackObserver* observer) { - rtc::CritScope cs(&observers_lock_); + MutexLock lock(&observers_lock_); RTC_DCHECK(observer); RTC_DCHECK(absl::c_find_if(observers_, [=](const auto& pair) { return pair.second == observer; @@ -28,7 +28,7 @@ void TransportFeedbackDemuxer::RegisterStreamFeedbackObserver( void TransportFeedbackDemuxer::DeRegisterStreamFeedbackObserver( StreamFeedbackObserver* observer) { - rtc::CritScope cs(&observers_lock_); + MutexLock lock(&observers_lock_); RTC_DCHECK(observer); const auto it = absl::c_find_if( observers_, [=](const auto& pair) { return pair.second == observer; }); @@ -37,16 +37,17 @@ void TransportFeedbackDemuxer::DeRegisterStreamFeedbackObserver( } void TransportFeedbackDemuxer::AddPacket(const RtpPacketSendInfo& packet_info) { - rtc::CritScope cs(&lock_); - if (packet_info.ssrc != 0) { - StreamFeedbackObserver::StreamPacketInfo info; - info.ssrc = packet_info.ssrc; - info.rtp_sequence_number = packet_info.rtp_sequence_number; - info.received = false; - history_.insert( - {seq_num_unwrapper_.Unwrap(packet_info.transport_sequence_number), - info}); - } + MutexLock lock(&lock_); + + StreamFeedbackObserver::StreamPacketInfo info; + info.ssrc = packet_info.media_ssrc; + info.rtp_sequence_number = packet_info.rtp_sequence_number; + info.received = false; + info.is_retransmission = + packet_info.packet_type == RtpPacketMediaType::kRetransmission; + history_.insert( + {seq_num_unwrapper_.Unwrap(packet_info.transport_sequence_number), info}); + while (history_.size() > kMaxPacketsInHistory) { history_.erase(history_.begin()); } @@ -56,7 +57,7 @@ void TransportFeedbackDemuxer::OnTransportFeedback( const rtcp::TransportFeedback& feedback) { std::vector stream_feedbacks; { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); for (const auto& packet : feedback.GetAllPackets()) { int64_t seq_num = seq_num_unwrapper_.UnwrapWithoutUpdate(packet.sequence_number()); @@ -71,7 +72,7 @@ void TransportFeedbackDemuxer::OnTransportFeedback( } } - rtc::CritScope cs(&observers_lock_); + MutexLock lock(&observers_lock_); for (auto& observer : observers_) { std::vector selected_feedback; for (const auto& packet_info : stream_feedbacks) { diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer.h b/modules/congestion_controller/rtp/transport_feedback_demuxer.h index bcd25d5835..634a37ea1a 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer.h +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer.h @@ -16,7 +16,7 @@ #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -32,7 +32,7 @@ class TransportFeedbackDemuxer : public StreamFeedbackProvider { void OnTransportFeedback(const rtcp::TransportFeedback& feedback); private: - rtc::CriticalSection lock_; + Mutex lock_; SequenceNumberUnwrapper seq_num_unwrapper_ RTC_GUARDED_BY(&lock_); std::map history_ RTC_GUARDED_BY(&lock_); @@ -40,7 +40,7 @@ class TransportFeedbackDemuxer : public StreamFeedbackProvider { // Maps a set of ssrcs to corresponding observer. Vectors are used rather than // set/map to ensure that the processing order is consistent independently of // the randomized ssrcs. - rtc::CriticalSection observers_lock_; + Mutex observers_lock_; std::vector, StreamFeedbackObserver*>> observers_ RTC_GUARDED_BY(&observers_lock_); }; diff --git a/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc b/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc index dce52de557..482f58d1bb 100644 --- a/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc +++ b/modules/congestion_controller/rtp/transport_feedback_demuxer_unittest.cc @@ -16,51 +16,81 @@ namespace webrtc { namespace { -using ::testing::_; +using ::testing::AllOf; +using ::testing::ElementsAre; +using ::testing::Field; +using PacketInfo = StreamFeedbackObserver::StreamPacketInfo; + static constexpr uint32_t kSsrc = 8492; class MockStreamFeedbackObserver : public webrtc::StreamFeedbackObserver { public: - MOCK_METHOD1(OnPacketFeedbackVector, - void(std::vector packet_feedback_vector)); + MOCK_METHOD(void, + OnPacketFeedbackVector, + (std::vector packet_feedback_vector), + (override)); }; RtpPacketSendInfo CreatePacket(uint32_t ssrc, - int16_t rtp_sequence_number, - int64_t transport_sequence_number) { + uint16_t rtp_sequence_number, + int64_t transport_sequence_number, + bool is_retransmission) { RtpPacketSendInfo res; - res.ssrc = ssrc; + res.media_ssrc = ssrc; res.transport_sequence_number = transport_sequence_number; res.rtp_sequence_number = rtp_sequence_number; + res.packet_type = is_retransmission ? RtpPacketMediaType::kRetransmission + : RtpPacketMediaType::kVideo; return res; } } // namespace + TEST(TransportFeedbackDemuxerTest, ObserverSanity) { TransportFeedbackDemuxer demuxer; MockStreamFeedbackObserver mock; demuxer.RegisterStreamFeedbackObserver({kSsrc}, &mock); - demuxer.AddPacket(CreatePacket(kSsrc, 55, 1)); - demuxer.AddPacket(CreatePacket(kSsrc, 56, 2)); - demuxer.AddPacket(CreatePacket(kSsrc, 57, 3)); + const uint16_t kRtpStartSeq = 55; + const int64_t kTransportStartSeq = 1; + demuxer.AddPacket(CreatePacket(kSsrc, kRtpStartSeq, kTransportStartSeq, + /*is_retransmit=*/false)); + demuxer.AddPacket(CreatePacket(kSsrc, kRtpStartSeq + 1, + kTransportStartSeq + 1, + /*is_retransmit=*/false)); + demuxer.AddPacket(CreatePacket( + kSsrc, kRtpStartSeq + 2, kTransportStartSeq + 2, /*is_retransmit=*/true)); rtcp::TransportFeedback feedback; - feedback.SetBase(1, 1000); - ASSERT_TRUE(feedback.AddReceivedPacket(1, 1000)); - ASSERT_TRUE(feedback.AddReceivedPacket(2, 2000)); - ASSERT_TRUE(feedback.AddReceivedPacket(3, 3000)); + feedback.SetBase(kTransportStartSeq, 1000); + ASSERT_TRUE(feedback.AddReceivedPacket(kTransportStartSeq, 1000)); + // Drop middle packet. + ASSERT_TRUE(feedback.AddReceivedPacket(kTransportStartSeq + 2, 3000)); - EXPECT_CALL(mock, OnPacketFeedbackVector(_)).Times(1); + EXPECT_CALL( + mock, OnPacketFeedbackVector(ElementsAre( + AllOf(Field(&PacketInfo::received, true), + Field(&PacketInfo::ssrc, kSsrc), + Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq), + Field(&PacketInfo::is_retransmission, false)), + AllOf(Field(&PacketInfo::received, false), + Field(&PacketInfo::ssrc, kSsrc), + Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq + 1), + Field(&PacketInfo::is_retransmission, false)), + AllOf(Field(&PacketInfo::received, true), + Field(&PacketInfo::ssrc, kSsrc), + Field(&PacketInfo::rtp_sequence_number, kRtpStartSeq + 2), + Field(&PacketInfo::is_retransmission, true))))); demuxer.OnTransportFeedback(feedback); demuxer.DeRegisterStreamFeedbackObserver(&mock); - demuxer.AddPacket(CreatePacket(kSsrc, 58, 4)); + demuxer.AddPacket( + CreatePacket(kSsrc, kRtpStartSeq + 3, kTransportStartSeq + 3, false)); rtcp::TransportFeedback second_feedback; - second_feedback.SetBase(4, 4000); - ASSERT_TRUE(second_feedback.AddReceivedPacket(4, 4000)); + second_feedback.SetBase(kTransportStartSeq + 3, 4000); + ASSERT_TRUE(second_feedback.AddReceivedPacket(kTransportStartSeq + 3, 4000)); - EXPECT_CALL(mock, OnPacketFeedbackVector(_)).Times(0); + EXPECT_CALL(mock, OnPacketFeedbackVector).Times(0); demuxer.OnTransportFeedback(second_feedback); } } // namespace webrtc diff --git a/modules/desktop_capture/BUILD.gn b/modules/desktop_capture/BUILD.gn index 4f93c246fe..25b92bed45 100644 --- a/modules/desktop_capture/BUILD.gn +++ b/modules/desktop_capture/BUILD.gn @@ -11,6 +11,11 @@ import("//build/config/ui.gni") import("//tools/generate_stubs/rules.gni") import("../../webrtc.gni") +if (rtc_use_pipewire) { + assert(rtc_pipewire_version == "0.2" || rtc_pipewire_version == "0.3", + "Unsupported PipeWire version") +} + use_desktop_capture_differ_sse2 = current_cpu == "x86" || current_cpu == "x64" config("x11_config") { @@ -39,10 +44,12 @@ rtc_library("primitives") { "../../api:scoped_refptr", "../../rtc_base:checks", "../../rtc_base/system:rtc_export", + "//third_party/libyuv", ] if (!build_with_mozilla) { - deps += [ "../../rtc_base" ] # TODO(kjellander): Cleanup in bugs.webrtc.org/3806. + deps += [ "../../rtc_base" ] # TODO(kjellander): Cleanup in + # bugs.webrtc.org/3806. } } @@ -75,6 +82,9 @@ if (rtc_include_tests) { "window_finder_unittest.cc", ] public_configs = [ ":x11_config" ] + if (is_win) { + deps += [ "../../rtc_base:win32" ] + } } } @@ -106,6 +116,9 @@ if (rtc_include_tests) { "win/cursor_unittest_resources.rc", "win/screen_capture_utils_unittest.cc", "win/screen_capturer_win_directx_unittest.cc", + "win/test_support/test_window.cc", + "win/test_support/test_window.h", + "win/window_capture_utils_unittest.cc", ] } deps = [ @@ -113,18 +126,34 @@ if (rtc_include_tests) { ":desktop_capture_mock", ":primitives", "../../rtc_base:checks", + + # TODO(bugs.webrtc.org/9987): Remove this dep on rtc_base:rtc_base once + # rtc_base:threading is fully defined. + "../../rtc_base:rtc_base", "../../rtc_base:rtc_base_approved", - "../../system_wrappers:cpu_features_api", + "../../rtc_base:threading", + "../../system_wrappers", "../../test:test_support", ] if (rtc_desktop_capture_supported) { sources += [ "screen_capturer_helper_unittest.cc", - "screen_capturer_mac_unittest.cc", "screen_capturer_unittest.cc", "window_capturer_unittest.cc", ] - deps += [ ":desktop_capture_mock" ] + if (is_mac) { + sources += [ "screen_capturer_mac_unittest.cc" ] + } + if (rtc_enable_win_wgc) { + sources += [ + "win/wgc_capture_source_unittest.cc", + "win/wgc_capturer_win_unittest.cc", + ] + } + deps += [ + ":desktop_capture_mock", + "../../system_wrappers:metrics", + ] public_configs = [ ":x11_config" ] } } @@ -139,7 +168,7 @@ if (rtc_include_tests) { "screen_drawer.h", ] - if (is_linux) { + if (is_linux || is_chromeos) { sources += [ "screen_drawer_linux.cc" ] } @@ -185,7 +214,7 @@ if (rtc_include_tests) { } } -if (is_linux) { +if (is_linux || is_chromeos) { if (rtc_use_pipewire) { pkg_config("gio") { packages = [ @@ -194,22 +223,41 @@ if (is_linux) { ] } - if (rtc_link_pipewire) { + if (rtc_pipewire_version == "0.3") { pkg_config("pipewire") { - packages = [ "libpipewire-0.2" ] + packages = [ "libpipewire-0.3" ] + if (!rtc_link_pipewire) { + ignore_libs = true + } } } else { + pkg_config("pipewire") { + packages = [ "libpipewire-0.2" ] + if (!rtc_link_pipewire) { + ignore_libs = true + } + } + } + + if (!rtc_link_pipewire) { # When libpipewire is not directly linked, use stubs to allow for dlopening of # the binary. generate_stubs("pipewire_stubs") { - configs = [ "../../:common_config" ] + configs = [ + "../../:common_config", + ":pipewire", + ] deps = [ "../../rtc_base" ] extra_header = "linux/pipewire_stub_header.fragment" logging_function = "RTC_LOG(LS_VERBOSE)" logging_include = "rtc_base/logging.h" output_name = "linux/pipewire_stubs" path_from_source = "modules/desktop_capture/linux" - sigs = [ "linux/pipewire.sigs" ] + if (rtc_pipewire_version == "0.3") { + sigs = [ "linux/pipewire03.sigs" ] + } else { + sigs = [ "linux/pipewire02.sigs" ] + } } } @@ -224,7 +272,8 @@ if (is_linux) { rtc_source_set("desktop_capture") { visibility = [ "*" ] - public_deps = [ ":desktop_capture_generic" ] # no-presubmit-check TODO(webrtc:8603) + public_deps = # no-presubmit-check TODO(webrtc:8603) + [ ":desktop_capture_generic" ] if (is_mac) { public_deps += [ ":desktop_capture_objc" ] } @@ -256,11 +305,10 @@ if (is_mac) { "../../rtc_base", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", - "../../rtc_base/synchronization:rw_lock_wrapper", "../../rtc_base/system:rtc_export", "../../sdk:helpers_objc", ] - libs = [ + frameworks = [ "AppKit.framework", "IOKit.framework", "IOSurface.framework", @@ -278,14 +326,14 @@ rtc_library("desktop_capture_generic") { sources = [ "blank_detector_desktop_capturer_wrapper.cc", "blank_detector_desktop_capturer_wrapper.h", - "capture_result_desktop_capturer_wrapper.cc", - "capture_result_desktop_capturer_wrapper.h", "cropped_desktop_frame.cc", "cropped_desktop_frame.h", "cropping_window_capturer.cc", "cropping_window_capturer.h", "desktop_and_cursor_composer.cc", "desktop_and_cursor_composer.h", + "desktop_capture_metrics_helper.cc", + "desktop_capture_metrics_helper.h", "desktop_capture_options.cc", "desktop_capture_options.h", "desktop_capturer.cc", @@ -355,7 +403,7 @@ rtc_library("desktop_capture_generic") { "window_capturer_linux.cc", ] - if (build_with_mozilla && is_linux) { + if (build_with_mozilla && (is_linux || is_chromeos)) { sources += [ "app_capturer_linux.cc", "linux/app_capturer_x11.cc", @@ -398,6 +446,7 @@ rtc_library("desktop_capture_generic") { "Xfixes", "Xrender", "Xrandr", + "Xtst", ] } @@ -409,6 +458,21 @@ rtc_library("desktop_capture_generic") { ] } + deps = [ + ":primitives", + "../../api:function_view", + "../../api:refcountedbase", + "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../rtc_base", # TODO(kjellander): Cleanup in bugs.webrtc.org/3806. + "../../rtc_base:checks", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:arch", + "../../rtc_base/system:rtc_export", + "../../system_wrappers", + "../../system_wrappers:metrics", + ] + if (is_win) { sources += [ "cropping_window_capturer_win.cc", @@ -422,6 +486,8 @@ rtc_library("desktop_capture_generic") { "win/d3d_device.h", "win/desktop.cc", "win/desktop.h", + "win/desktop_capture_utils.cc", + "win/desktop_capture_utils.h", "win/display_configuration_monitor.cc", "win/display_configuration_monitor.h", "win/dxgi_adapter_duplicator.cc", @@ -457,6 +523,8 @@ rtc_library("desktop_capture_generic") { "win/selected_window_context.h", "win/window_capture_utils.cc", "win/window_capture_utils.h", + "win/window_capturer_win_gdi.cc", + "win/window_capturer_win_gdi.h", "window_capturer_win.cc", "window_finder_win.cc", "window_finder_win.h", @@ -465,23 +533,13 @@ rtc_library("desktop_capture_generic") { "d3d11.lib", "dxgi.lib", ] + deps += [ "../../rtc_base:win32" ] } - deps = [ - ":primitives", - "../../api:function_view", - "../../api:refcountedbase", - "../../api:scoped_refptr", - "../../rtc_base", # TODO(kjellander): Cleanup in bugs.webrtc.org/3806. - "../../rtc_base:checks", - "../../rtc_base/synchronization:rw_lock_wrapper", - "../../rtc_base/system:arch", - "../../rtc_base/system:rtc_export", - "../../system_wrappers", - "../../system_wrappers:cpu_features_api", - "../../system_wrappers:metrics", + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] if (rtc_use_x11_extensions) { @@ -502,23 +560,31 @@ rtc_library("desktop_capture_generic") { sources += [ "linux/base_capturer_pipewire.cc", "linux/base_capturer_pipewire.h", - "linux/screen_capturer_pipewire.cc", - "linux/screen_capturer_pipewire.h", - "linux/window_capturer_pipewire.cc", - "linux/window_capturer_pipewire.h", ] configs += [ ":pipewire_config", ":gio", + ":pipewire", ] - if (rtc_link_pipewire) { - configs += [ ":pipewire" ] - } else { + if (!rtc_link_pipewire) { deps += [ ":pipewire_stubs" ] } } + + if (rtc_enable_win_wgc) { + sources += [ + "win/wgc_capture_session.cc", + "win/wgc_capture_session.h", + "win/wgc_capture_source.cc", + "win/wgc_capture_source.h", + "win/wgc_capturer_win.cc", + "win/wgc_capturer_win.h", + "win/wgc_desktop_frame.cc", + "win/wgc_desktop_frame.h", + ] + } } if (use_desktop_capture_differ_sse2) { diff --git a/modules/desktop_capture/OWNERS b/modules/desktop_capture/OWNERS index eaa671cb70..79df492e69 100644 --- a/modules/desktop_capture/OWNERS +++ b/modules/desktop_capture/OWNERS @@ -1,2 +1,2 @@ jamiewalch@chromium.org -sergeyu@chromium.org +joedow@chromium.org diff --git a/modules/desktop_capture/capture_result_desktop_capturer_wrapper.cc b/modules/desktop_capture/capture_result_desktop_capturer_wrapper.cc deleted file mode 100644 index e1d4b993e0..0000000000 --- a/modules/desktop_capture/capture_result_desktop_capturer_wrapper.cc +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/desktop_capture/capture_result_desktop_capturer_wrapper.h" - -#include -#include - -#include "rtc_base/checks.h" - -namespace webrtc { - -CaptureResultDesktopCapturerWrapper::CaptureResultDesktopCapturerWrapper( - std::unique_ptr base_capturer, - ResultObserver* observer) - : DesktopCapturerWrapper(std::move(base_capturer)), observer_(observer) { - RTC_DCHECK(observer_); -} - -CaptureResultDesktopCapturerWrapper::~CaptureResultDesktopCapturerWrapper() = - default; - -void CaptureResultDesktopCapturerWrapper::Start(Callback* callback) { - if ((callback_ == nullptr) != (callback == nullptr)) { - if (callback) { - callback_ = callback; - base_capturer_->Start(this); - } else { - base_capturer_->Start(nullptr); - } - } - callback_ = callback; -} - -void CaptureResultDesktopCapturerWrapper::OnCaptureResult( - Result result, - std::unique_ptr frame) { - observer_->Observe(&result, &frame); - callback_->OnCaptureResult(result, std::move(frame)); -} - -} // namespace webrtc diff --git a/modules/desktop_capture/capture_result_desktop_capturer_wrapper.h b/modules/desktop_capture/capture_result_desktop_capturer_wrapper.h deleted file mode 100644 index 6d1d49a5e3..0000000000 --- a/modules/desktop_capture/capture_result_desktop_capturer_wrapper.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_DESKTOP_CAPTURE_CAPTURE_RESULT_DESKTOP_CAPTURER_WRAPPER_H_ -#define MODULES_DESKTOP_CAPTURE_CAPTURE_RESULT_DESKTOP_CAPTURER_WRAPPER_H_ - -#include - -#include "modules/desktop_capture/desktop_capturer.h" -#include "modules/desktop_capture/desktop_capturer_wrapper.h" -#include "modules/desktop_capture/desktop_frame.h" - -namespace webrtc { - -// A DesktopCapturerWrapper implementation to capture the result of -// |base_capturer|. Derived classes are expected to provide a ResultObserver -// implementation to observe the DesktopFrame returned by |base_capturer_|. -class CaptureResultDesktopCapturerWrapper : public DesktopCapturerWrapper, - public DesktopCapturer::Callback { - public: - using Callback = DesktopCapturer::Callback; - - // Provides a way to let derived classes or clients to modify the result - // returned by |base_capturer_|. - class ResultObserver { - public: - ResultObserver(); - virtual ~ResultObserver(); - - virtual void Observe(Result* result, - std::unique_ptr* frame) = 0; - }; - - // |observer| must outlive this instance and can be |this|. |observer| is - // guaranteed to be executed only after the constructor and before the - // destructor. - CaptureResultDesktopCapturerWrapper( - std::unique_ptr base_capturer, - ResultObserver* observer); - - ~CaptureResultDesktopCapturerWrapper() override; - - // DesktopCapturer implementations. - void Start(Callback* callback) final; - - private: - // DesktopCapturer::Callback implementation. - void OnCaptureResult(Result result, - std::unique_ptr frame) final; - - ResultObserver* const observer_; - Callback* callback_ = nullptr; -}; - -} // namespace webrtc - -#endif // MODULES_DESKTOP_CAPTURE_CAPTURE_RESULT_DESKTOP_CAPTURER_WRAPPER_H_ diff --git a/modules/desktop_capture/cropping_window_capturer_win.cc b/modules/desktop_capture/cropping_window_capturer_win.cc index 6e53ca3522..31ddbe1b33 100644 --- a/modules/desktop_capture/cropping_window_capturer_win.cc +++ b/modules/desktop_capture/cropping_window_capturer_win.cc @@ -118,7 +118,7 @@ struct TopWindowVerifierContext : public SelectedWindowContext { // firing an assert when enabled, report that the selected window isn't // topmost to avoid inadvertent capture of other windows. RTC_LOG(LS_ERROR) << "Failed to enumerate windows: " << lastError; - RTC_DCHECK(false); + RTC_NOTREACHED(); return false; } } @@ -130,6 +130,8 @@ class CroppingWindowCapturerWin : public CroppingWindowCapturer { public: explicit CroppingWindowCapturerWin(const DesktopCaptureOptions& options) : CroppingWindowCapturer(options), + enumerate_current_process_windows_( + options.enumerate_current_process_windows()), full_screen_window_detector_(options.full_screen_window_detector()) {} void CaptureFrame() override; @@ -148,19 +150,43 @@ class CroppingWindowCapturerWin : public CroppingWindowCapturer { WindowCaptureHelperWin window_capture_helper_; + bool enumerate_current_process_windows_; + rtc::scoped_refptr full_screen_window_detector_; }; void CroppingWindowCapturerWin::CaptureFrame() { DesktopCapturer* win_capturer = window_capturer(); if (win_capturer) { - // Update the list of available sources and override source to capture if - // FullScreenWindowDetector returns not zero + // Feed the actual list of windows into full screen window detector. if (full_screen_window_detector_) { full_screen_window_detector_->UpdateWindowListIfNeeded( - selected_window(), - [win_capturer](DesktopCapturer::SourceList* sources) { - return win_capturer->GetSourceList(sources); + selected_window(), [this](DesktopCapturer::SourceList* sources) { + // Get the list of top level windows, including ones with empty + // title. win_capturer_->GetSourceList can't be used here + // cause it filters out the windows with empty titles and + // it uses responsiveness check which could lead to performance + // issues. + SourceList result; + int window_list_flags = + enumerate_current_process_windows_ + ? GetWindowListFlags::kNone + : GetWindowListFlags::kIgnoreCurrentProcessWindows; + + if (!webrtc::GetWindowList(window_list_flags, &result)) + return false; + + // Filter out windows not visible on current desktop + auto it = std::remove_if( + result.begin(), result.end(), [this](const auto& source) { + HWND hwnd = reinterpret_cast(source.id); + return !window_capture_helper_ + .IsWindowVisibleOnCurrentDesktop(hwnd); + }); + result.erase(it, result.end()); + + sources->swap(result); + return true; }); } win_capturer->SelectSource(GetWindowToCapture()); diff --git a/modules/desktop_capture/desktop_and_cursor_composer.cc b/modules/desktop_capture/desktop_and_cursor_composer.cc index f282c1d500..69b8b40c73 100644 --- a/modules/desktop_capture/desktop_and_cursor_composer.cc +++ b/modules/desktop_capture/desktop_and_cursor_composer.cc @@ -207,7 +207,8 @@ void DesktopAndCursorComposer::OnCaptureResult( DesktopCapturer::Result result, std::unique_ptr frame) { if (frame && cursor_) { - if (frame->rect().Contains(cursor_position_) && + if (!frame->may_contain_cursor() && + frame->rect().Contains(cursor_position_) && !desktop_capturer_->IsOccluded(cursor_position_)) { DesktopVector relative_position = cursor_position_.subtract(frame->top_left()); @@ -228,6 +229,7 @@ void DesktopAndCursorComposer::OnCaptureResult( previous_cursor_rect_ = frame_with_cursor->cursor_rect(); cursor_changed_ = false; frame = std::move(frame_with_cursor); + frame->set_may_contain_cursor(true); } } diff --git a/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc b/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc index c9cb56d8c2..00253d38e2 100644 --- a/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc +++ b/modules/desktop_capture/desktop_and_cursor_composer_unittest.cc @@ -27,6 +27,8 @@ namespace webrtc { namespace { +const int kFrameXCoord = 100; +const int kFrameYCoord = 200; const int kScreenWidth = 100; const int kScreenHeight = 100; const int kCursorWidth = 10; @@ -249,11 +251,61 @@ TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfNoFrameCaptured) { } } +TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfFrameMayContainIt) { + // We can't use a shared frame because we need to detect modifications + // compared to a control. + std::unique_ptr control_frame(CreateTestFrame()); + control_frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord)); + + struct { + int x; + int y; + bool may_contain_cursor; + } tests[] = { + {100, 200, true}, + {100, 200, false}, + {150, 250, true}, + {150, 250, false}, + }; + + for (size_t i = 0; i < arraysize(tests); i++) { + SCOPED_TRACE(i); + + std::unique_ptr frame(CreateTestFrame()); + frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord)); + frame->set_may_contain_cursor(tests[i].may_contain_cursor); + fake_screen_->SetNextFrame(std::move(frame)); + + const DesktopVector abs_pos(tests[i].x, tests[i].y); + fake_cursor_->SetState(MouseCursorMonitor::INSIDE, abs_pos); + blender_.CaptureFrame(); + + // If the frame may already have contained the cursor, then |CaptureFrame()| + // should not have modified it, so it should be the same as the control. + EXPECT_TRUE(frame_); + const DesktopVector rel_pos(abs_pos.subtract(control_frame->top_left())); + if (tests[i].may_contain_cursor) { + EXPECT_EQ( + *reinterpret_cast(frame_->GetFrameDataAtPos(rel_pos)), + *reinterpret_cast( + control_frame->GetFrameDataAtPos(rel_pos))); + + } else { + // |CaptureFrame()| should have modified the frame to have the cursor. + EXPECT_NE( + *reinterpret_cast(frame_->GetFrameDataAtPos(rel_pos)), + *reinterpret_cast( + control_frame->GetFrameDataAtPos(rel_pos))); + EXPECT_TRUE(frame_->may_contain_cursor()); + } + } +} + TEST_F(DesktopAndCursorComposerTest, CursorShouldBeIgnoredIfItIsOutOfDesktopFrame) { std::unique_ptr frame( SharedDesktopFrame::Wrap(CreateTestFrame())); - frame->set_top_left(DesktopVector(100, 200)); + frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord)); // The frame covers (100, 200) - (200, 300). struct { @@ -279,7 +331,7 @@ TEST_F(DesktopAndCursorComposerTest, TEST_F(DesktopAndCursorComposerTest, IsOccludedShouldBeConsidered) { std::unique_ptr frame( SharedDesktopFrame::Wrap(CreateTestFrame())); - frame->set_top_left(DesktopVector(100, 200)); + frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord)); // The frame covers (100, 200) - (200, 300). struct { @@ -304,7 +356,7 @@ TEST_F(DesktopAndCursorComposerTest, IsOccludedShouldBeConsidered) { TEST_F(DesktopAndCursorComposerTest, CursorIncluded) { std::unique_ptr frame( SharedDesktopFrame::Wrap(CreateTestFrame())); - frame->set_top_left(DesktopVector(100, 200)); + frame->set_top_left(DesktopVector(kFrameXCoord, kFrameYCoord)); // The frame covers (100, 200) - (200, 300). struct { diff --git a/modules/desktop_capture/desktop_capture_metrics_helper.cc b/modules/desktop_capture/desktop_capture_metrics_helper.cc new file mode 100644 index 0000000000..6b741ef4bb --- /dev/null +++ b/modules/desktop_capture/desktop_capture_metrics_helper.cc @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/desktop_capture_metrics_helper.h" + +#include "modules/desktop_capture/desktop_capture_types.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { +namespace { +// This enum is logged via UMA so entries should not be reordered or have their +// values changed. This should also be kept in sync with the values in the +// DesktopCapturerId namespace. +enum class SequentialDesktopCapturerId { + kUnknown = 0, + kWgcCapturerWin = 1, + kScreenCapturerWinMagnifier = 2, + kWindowCapturerWinGdi = 3, + kScreenCapturerWinGdi = 4, + kScreenCapturerWinDirectx = 5, + kMaxValue = kScreenCapturerWinDirectx +}; +} // namespace + +void RecordCapturerImpl(uint32_t capturer_id) { + SequentialDesktopCapturerId sequential_id; + switch (capturer_id) { + case DesktopCapturerId::kWgcCapturerWin: + sequential_id = SequentialDesktopCapturerId::kWgcCapturerWin; + break; + case DesktopCapturerId::kScreenCapturerWinMagnifier: + sequential_id = SequentialDesktopCapturerId::kScreenCapturerWinMagnifier; + break; + case DesktopCapturerId::kWindowCapturerWinGdi: + sequential_id = SequentialDesktopCapturerId::kWindowCapturerWinGdi; + break; + case DesktopCapturerId::kScreenCapturerWinGdi: + sequential_id = SequentialDesktopCapturerId::kScreenCapturerWinGdi; + break; + case DesktopCapturerId::kScreenCapturerWinDirectx: + sequential_id = SequentialDesktopCapturerId::kScreenCapturerWinDirectx; + break; + case DesktopCapturerId::kUnknown: + default: + sequential_id = SequentialDesktopCapturerId::kUnknown; + } + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.DesktopCapture.Win.DesktopCapturerImpl", + static_cast(sequential_id), + static_cast(SequentialDesktopCapturerId::kMaxValue)); +} + +} // namespace webrtc diff --git a/test/fuzzers/rtp_rtcp_demuxer_helper_fuzzer.cc b/modules/desktop_capture/desktop_capture_metrics_helper.h similarity index 57% rename from test/fuzzers/rtp_rtcp_demuxer_helper_fuzzer.cc rename to modules/desktop_capture/desktop_capture_metrics_helper.h index f7403b9567..37542b84bb 100644 --- a/test/fuzzers/rtp_rtcp_demuxer_helper_fuzzer.cc +++ b/modules/desktop_capture/desktop_capture_metrics_helper.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,16 +8,15 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include -#include +#ifndef MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METRICS_HELPER_H_ +#define MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METRICS_HELPER_H_ -#include "api/array_view.h" -#include "call/rtp_rtcp_demuxer_helper.h" +#include namespace webrtc { -void FuzzOneInput(const uint8_t* data, size_t size) { - ParseRtcpPacketSenderSsrc(rtc::MakeArrayView(data, size)); -} +void RecordCapturerImpl(uint32_t capturer_id); } // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_DESKTOP_CAPTURE_METRICS_HELPER_H_ diff --git a/modules/desktop_capture/desktop_capture_options.h b/modules/desktop_capture/desktop_capture_options.h index 521c80b5c5..a693803aa0 100644 --- a/modules/desktop_capture/desktop_capture_options.h +++ b/modules/desktop_capture/desktop_capture_options.h @@ -98,6 +98,24 @@ class RTC_EXPORT DesktopCaptureOptions { } #if defined(WEBRTC_WIN) + // Enumerating windows owned by the current process on Windows has some + // complications due to |GetWindowText*()| APIs potentially causing a + // deadlock (see the comments in the |GetWindowListHandler()| function in + // window_capture_utils.cc for more details on the deadlock). + // To avoid this issue, consumers can either ensure that the thread that runs + // their message loop never waits on |GetSourceList()|, or they can set this + // flag to false which will prevent windows running in the current process + // from being enumerated and included in the results. Consumers can still + // provide the WindowId for their own windows to |SelectSource()| and capture + // them. + bool enumerate_current_process_windows() const { + return enumerate_current_process_windows_; + } + void set_enumerate_current_process_windows( + bool enumerate_current_process_windows) { + enumerate_current_process_windows_ = enumerate_current_process_windows; + } + bool allow_use_magnification_api() const { return allow_use_magnification_api_; } @@ -126,7 +144,19 @@ class RTC_EXPORT DesktopCaptureOptions { void set_allow_cropping_window_capturer(bool allow) { allow_cropping_window_capturer_ = allow; } -#endif + +#if defined(RTC_ENABLE_WIN_WGC) + // This flag enables the WGC capturer for both window and screen capture. + // This capturer should offer similar or better performance than the cropping + // capturer without the disadvantages listed above. However, the WGC capturer + // is only available on Windows 10 version 1809 (Redstone 5) and up. This flag + // will have no affect on older versions. + // If set, and running a supported version of Win10, this flag will take + // precedence over the cropping, directx, and magnification flags. + bool allow_wgc_capturer() const { return allow_wgc_capturer_; } + void set_allow_wgc_capturer(bool allow) { allow_wgc_capturer_ = allow; } +#endif // defined(RTC_ENABLE_WIN_WGC) +#endif // defined(WEBRTC_WIN) #if defined(WEBRTC_USE_PIPEWIRE) bool allow_pipewire() const { return allow_pipewire_; } @@ -146,9 +176,13 @@ class RTC_EXPORT DesktopCaptureOptions { rtc::scoped_refptr full_screen_window_detector_; #if defined(WEBRTC_WIN) + bool enumerate_current_process_windows_ = true; bool allow_use_magnification_api_ = false; bool allow_directx_capturer_ = false; bool allow_cropping_window_capturer_ = false; +#if defined(RTC_ENABLE_WIN_WGC) + bool allow_wgc_capturer_ = false; +#endif #endif #if defined(WEBRTC_USE_X11) bool use_update_notifications_ = false; diff --git a/modules/desktop_capture/desktop_capture_types.h b/modules/desktop_capture/desktop_capture_types.h index 5031cbf3ac..5f9966bb6d 100644 --- a/modules/desktop_capture/desktop_capture_types.h +++ b/modules/desktop_capture/desktop_capture_types.h @@ -36,8 +36,11 @@ const ScreenId kFullDesktopScreenId = -1; const ScreenId kInvalidScreenId = -2; -// An integer to attach to each DesktopFrame to differentiate the generator of -// the frame. +// Integers to attach to each DesktopFrame to differentiate the generator of +// the frame. The entries in this namespace should remain in sync with the +// SequentialDesktopCapturerId enum, which is logged via UMA. +// |kScreenCapturerWinGdi| and |kScreenCapturerWinDirectx| values are preserved +// to maintain compatibility namespace DesktopCapturerId { constexpr uint32_t CreateFourCC(char a, char b, char c, char d) { return ((static_cast(a)) | (static_cast(b) << 8) | @@ -45,6 +48,9 @@ constexpr uint32_t CreateFourCC(char a, char b, char c, char d) { } constexpr uint32_t kUnknown = 0; +constexpr uint32_t kWgcCapturerWin = 1; +constexpr uint32_t kScreenCapturerWinMagnifier = 2; +constexpr uint32_t kWindowCapturerWinGdi = 3; constexpr uint32_t kScreenCapturerWinGdi = CreateFourCC('G', 'D', 'I', ' '); constexpr uint32_t kScreenCapturerWinDirectx = CreateFourCC('D', 'X', 'G', 'I'); } // namespace DesktopCapturerId diff --git a/modules/desktop_capture/desktop_capturer.cc b/modules/desktop_capture/desktop_capturer.cc index 61926a6023..735aa4d530 100644 --- a/modules/desktop_capture/desktop_capturer.cc +++ b/modules/desktop_capture/desktop_capturer.cc @@ -20,6 +20,11 @@ #include "modules/desktop_capture/desktop_capture_options.h" #include "modules/desktop_capture/desktop_capturer_differ_wrapper.h" +#if defined(RTC_ENABLE_WIN_WGC) +#include "modules/desktop_capture/win/wgc_capturer_win.h" +#include "rtc_base/win/windows_version.h" +#endif // defined(RTC_ENABLE_WIN_WGC) + namespace webrtc { DesktopCapturer::~DesktopCapturer() = default; @@ -48,6 +53,13 @@ bool DesktopCapturer::IsOccluded(const DesktopVector& pos) { // static std::unique_ptr DesktopCapturer::CreateWindowCapturer( const DesktopCaptureOptions& options) { +#if defined(RTC_ENABLE_WIN_WGC) + if (options.allow_wgc_capturer() && + rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN10_RS5) { + return WgcCapturerWin::CreateRawWindowCapturer(options); + } +#endif // defined(RTC_ENABLE_WIN_WGC) + #if defined(WEBRTC_WIN) if (options.allow_cropping_window_capturer()) { return CroppingWindowCapturer::CreateCapturer(options); @@ -65,6 +77,13 @@ std::unique_ptr DesktopCapturer::CreateWindowCapturer( // static std::unique_ptr DesktopCapturer::CreateScreenCapturer( const DesktopCaptureOptions& options) { +#if defined(RTC_ENABLE_WIN_WGC) + if (options.allow_wgc_capturer() && + rtc::rtc_win::GetVersion() >= rtc::rtc_win::Version::VERSION_WIN10_RS5) { + return WgcCapturerWin::CreateRawScreenCapturer(options); + } +#endif // defined(RTC_ENABLE_WIN_WGC) + std::unique_ptr capturer = CreateRawScreenCapturer(options); if (capturer && options.detect_updated_region()) { capturer.reset(new DesktopCapturerDifferWrapper(std::move(capturer))); diff --git a/modules/desktop_capture/desktop_frame.cc b/modules/desktop_capture/desktop_frame.cc index fd10dd5d23..9e4a899fd2 100644 --- a/modules/desktop_capture/desktop_frame.cc +++ b/modules/desktop_capture/desktop_frame.cc @@ -19,6 +19,7 @@ #include "modules/desktop_capture/desktop_capture_types.h" #include "modules/desktop_capture/desktop_geometry.h" #include "rtc_base/checks.h" +#include "third_party/libyuv/include/libyuv/planar_functions.h" namespace webrtc { @@ -44,11 +45,9 @@ void DesktopFrame::CopyPixelsFrom(const uint8_t* src_buffer, RTC_CHECK(DesktopRect::MakeSize(size()).ContainsRect(dest_rect)); uint8_t* dest = GetFrameDataAtPos(dest_rect.top_left()); - for (int y = 0; y < dest_rect.height(); ++y) { - memcpy(dest, src_buffer, DesktopFrame::kBytesPerPixel * dest_rect.width()); - src_buffer += src_stride; - dest += stride(); - } + libyuv::CopyPlane(src_buffer, src_stride, dest, stride(), + DesktopFrame::kBytesPerPixel * dest_rect.width(), + dest_rect.height()); } void DesktopFrame::CopyPixelsFrom(const DesktopFrame& src_frame, @@ -158,11 +157,9 @@ BasicDesktopFrame::~BasicDesktopFrame() { // static DesktopFrame* BasicDesktopFrame::CopyOf(const DesktopFrame& frame) { DesktopFrame* result = new BasicDesktopFrame(frame.size()); - for (int y = 0; y < frame.size().height(); ++y) { - memcpy(result->data() + y * result->stride(), - frame.data() + y * frame.stride(), - frame.size().width() * kBytesPerPixel); - } + libyuv::CopyPlane(frame.data(), frame.stride(), result->data(), + result->stride(), frame.size().width() * kBytesPerPixel, + frame.size().height()); result->CopyFrameInfoFrom(frame); return result; } diff --git a/modules/desktop_capture/desktop_frame.h b/modules/desktop_capture/desktop_frame.h index 3a18b7852d..bc47cc50f2 100644 --- a/modules/desktop_capture/desktop_frame.h +++ b/modules/desktop_capture/desktop_frame.h @@ -29,7 +29,7 @@ const float kStandardDPI = 96.0f; // DesktopFrame represents a video frame captured from the screen. class RTC_EXPORT DesktopFrame { public: - // DesktopFrame objects always hold RGBA data. + // DesktopFrame objects always hold BGRA data. static const int kBytesPerPixel = 4; virtual ~DesktopFrame(); @@ -72,6 +72,15 @@ class RTC_EXPORT DesktopFrame { const DesktopVector& dpi() const { return dpi_; } void set_dpi(const DesktopVector& dpi) { dpi_ = dpi; } + // Indicates if this frame may have the mouse cursor in it. Capturers that + // support cursor capture may set this to true. If the cursor was + // outside of the captured area, this may be true even though the cursor is + // not in the image. + bool may_contain_cursor() const { return may_contain_cursor_; } + void set_may_contain_cursor(bool may_contain_cursor) { + may_contain_cursor_ = may_contain_cursor; + } + // Time taken to capture the frame in milliseconds. int64_t capture_time_ms() const { return capture_time_ms_; } void set_capture_time_ms(int64_t time_ms) { capture_time_ms_ = time_ms; } @@ -150,6 +159,7 @@ class RTC_EXPORT DesktopFrame { DesktopRegion updated_region_; DesktopVector top_left_; DesktopVector dpi_; + bool may_contain_cursor_ = false; int64_t capture_time_ms_; uint32_t capturer_id_; std::vector icc_profile_; diff --git a/modules/desktop_capture/desktop_region.cc b/modules/desktop_capture/desktop_region.cc index befbcc6f41..96f142d3dd 100644 --- a/modules/desktop_capture/desktop_region.cc +++ b/modules/desktop_capture/desktop_region.cc @@ -10,11 +10,11 @@ #include "modules/desktop_capture/desktop_region.h" -#include - #include #include +#include "rtc_base/checks.h" + namespace webrtc { DesktopRegion::RowSpan::RowSpan(int32_t left, int32_t right) @@ -109,7 +109,7 @@ void DesktopRegion::AddRect(const DesktopRect& rect) { // If the |top| falls in the middle of the |row| then split |row| into // two, at |top|, and leave |row| referring to the lower of the two, // ready to insert a new span into. - assert(top <= row->second->bottom); + RTC_DCHECK_LE(top, row->second->bottom); Rows::iterator new_row = rows_.insert( row, Rows::value_type(top, new Row(row->second->top, top))); row->second->top = top; @@ -148,7 +148,7 @@ void DesktopRegion::AddRects(const DesktopRect* rects, int count) { } void DesktopRegion::MergeWithPrecedingRow(Rows::iterator row) { - assert(row != rows_.end()); + RTC_DCHECK(row != rows_.end()); if (row != rows_.begin()) { Rows::iterator previous_row = row; @@ -230,7 +230,7 @@ void DesktopRegion::IntersectRows(const RowSpanSet& set1, RowSpanSet::const_iterator end1 = set1.end(); RowSpanSet::const_iterator it2 = set2.begin(); RowSpanSet::const_iterator end2 = set2.end(); - assert(it1 != end1 && it2 != end2); + RTC_DCHECK(it1 != end1 && it2 != end2); do { // Arrange for |it1| to always be the left-most of the spans. @@ -247,7 +247,7 @@ void DesktopRegion::IntersectRows(const RowSpanSet& set1, int32_t left = it2->left; int32_t right = std::min(it1->right, it2->right); - assert(left < right); + RTC_DCHECK_LT(left, right); output->push_back(RowSpan(left, right)); @@ -302,7 +302,7 @@ void DesktopRegion::Subtract(const DesktopRegion& region) { // If |top| falls in the middle of |row_a| then split |row_a| into two, at // |top|, and leave |row_a| referring to the lower of the two, ready to // subtract spans from. - assert(top <= row_a->second->bottom); + RTC_DCHECK_LE(top, row_a->second->bottom); Rows::iterator new_row = rows_.insert( row_a, Rows::value_type(top, new Row(row_a->second->top, top))); row_a->second->top = top; @@ -420,7 +420,7 @@ void DesktopRegion::AddSpanToRow(Row* row, int left, int right) { // Find the first span that ends at or after |left|. RowSpanSet::iterator start = std::lower_bound( row->spans.begin(), row->spans.end(), left, CompareSpanRight); - assert(start < row->spans.end()); + RTC_DCHECK(start < row->spans.end()); // Find the first span that starts after |right|. RowSpanSet::iterator end = @@ -467,7 +467,7 @@ bool DesktopRegion::IsSpanInRow(const Row& row, const RowSpan& span) { void DesktopRegion::SubtractRows(const RowSpanSet& set_a, const RowSpanSet& set_b, RowSpanSet* output) { - assert(!set_a.empty() && !set_b.empty()); + RTC_DCHECK(!set_a.empty() && !set_b.empty()); RowSpanSet::const_iterator it_b = set_b.begin(); @@ -503,7 +503,7 @@ DesktopRegion::Iterator::Iterator(const DesktopRegion& region) row_(region.rows_.begin()), previous_row_(region.rows_.end()) { if (!IsAtEnd()) { - assert(row_->second->spans.size() > 0); + RTC_DCHECK_GT(row_->second->spans.size(), 0); row_span_ = row_->second->spans.begin(); UpdateCurrentRect(); } @@ -516,7 +516,7 @@ bool DesktopRegion::Iterator::IsAtEnd() const { } void DesktopRegion::Iterator::Advance() { - assert(!IsAtEnd()); + RTC_DCHECK(!IsAtEnd()); while (true) { ++row_span_; @@ -524,7 +524,7 @@ void DesktopRegion::Iterator::Advance() { previous_row_ = row_; ++row_; if (row_ != region_.rows_.end()) { - assert(row_->second->spans.size() > 0); + RTC_DCHECK_GT(row_->second->spans.size(), 0); row_span_ = row_->second->spans.begin(); } } @@ -544,7 +544,7 @@ void DesktopRegion::Iterator::Advance() { break; } - assert(!IsAtEnd()); + RTC_DCHECK(!IsAtEnd()); UpdateCurrentRect(); } diff --git a/modules/desktop_capture/differ_block.cc b/modules/desktop_capture/differ_block.cc index dd9ab457e0..4f0c5430c9 100644 --- a/modules/desktop_capture/differ_block.cc +++ b/modules/desktop_capture/differ_block.cc @@ -35,7 +35,7 @@ bool VectorDifference(const uint8_t* image1, const uint8_t* image2) { // TODO(hclam): Implement a NEON version. diff_proc = &VectorDifference_C; #else - bool have_sse2 = WebRtc_GetCPUInfo(kSSE2) != 0; + bool have_sse2 = GetCPUInfo(kSSE2) != 0; // For x86 processors, check if SSE2 is supported. if (have_sse2 && kBlockSize == 32) { diff_proc = &VectorDifference_SSE2_W32; diff --git a/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc b/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc index 206791ca78..0b1ab7ed37 100644 --- a/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc +++ b/modules/desktop_capture/fallback_desktop_capturer_wrapper.cc @@ -14,8 +14,8 @@ #include +#include "api/sequence_checker.h" #include "rtc_base/checks.h" -#include "rtc_base/thread_checker.h" #include "system_wrappers/include/metrics.h" namespace webrtc { @@ -42,7 +42,7 @@ class SharedMemoryFactoryProxy : public SharedMemoryFactory { explicit SharedMemoryFactoryProxy(SharedMemoryFactory* factory); SharedMemoryFactory* factory_ = nullptr; - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; }; } // namespace diff --git a/modules/desktop_capture/full_screen_window_detector.h b/modules/desktop_capture/full_screen_window_detector.h index 46fb607b7d..ca30d95de4 100644 --- a/modules/desktop_capture/full_screen_window_detector.h +++ b/modules/desktop_capture/full_screen_window_detector.h @@ -32,7 +32,8 @@ namespace webrtc { // window using criteria provided by application specific // FullScreenApplicationHandler. -class FullScreenWindowDetector : public rtc::RefCountedBase { +class FullScreenWindowDetector + : public rtc::RefCountedNonVirtual { public: using ApplicationHandlerFactory = std::function( diff --git a/modules/desktop_capture/linux/base_capturer_pipewire.cc b/modules/desktop_capture/linux/base_capturer_pipewire.cc index 2640e93aa9..e5d001e476 100644 --- a/modules/desktop_capture/linux/base_capturer_pipewire.cc +++ b/modules/desktop_capture/linux/base_capturer_pipewire.cc @@ -14,8 +14,14 @@ #include #include #include +#if !PW_CHECK_VERSION(0, 3, 0) #include #include +#endif + +#include +#include +#include #include #include @@ -30,7 +36,11 @@ #include "modules/desktop_capture/linux/pipewire_stubs.h" using modules_desktop_capture_linux::InitializeStubs; -using modules_desktop_capture_linux::kModulePipewire; +#if PW_CHECK_VERSION(0, 3, 0) +using modules_desktop_capture_linux::kModulePipewire03; +#else +using modules_desktop_capture_linux::kModulePipewire02; +#endif using modules_desktop_capture_linux::StubPathMap; #endif // defined(WEBRTC_DLOPEN_PIPEWIRE) @@ -47,9 +57,156 @@ const char kScreenCastInterfaceName[] = "org.freedesktop.portal.ScreenCast"; const int kBytesPerPixel = 4; #if defined(WEBRTC_DLOPEN_PIPEWIRE) +#if PW_CHECK_VERSION(0, 3, 0) +const char kPipeWireLib[] = "libpipewire-0.3.so.0"; +#else const char kPipeWireLib[] = "libpipewire-0.2.so.1"; #endif +#endif + +// static +struct dma_buf_sync { + uint64_t flags; +}; +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +static void SyncDmaBuf(int fd, uint64_t start_or_end) { + struct dma_buf_sync sync = {0}; + + sync.flags = start_or_end | DMA_BUF_SYNC_READ; + + while (true) { + int ret; + ret = ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync); + if (ret == -1 && errno == EINTR) { + continue; + } else if (ret == -1) { + RTC_LOG(LS_ERROR) << "Failed to synchronize DMA buffer: " + << g_strerror(errno); + break; + } else { + break; + } + } +} + +class ScopedBuf { + public: + ScopedBuf() {} + ScopedBuf(unsigned char* map, int map_size, bool is_dma_buf, int fd) + : map_(map), map_size_(map_size), is_dma_buf_(is_dma_buf), fd_(fd) {} + ~ScopedBuf() { + if (map_ != MAP_FAILED) { + if (is_dma_buf_) { + SyncDmaBuf(fd_, DMA_BUF_SYNC_END); + } + munmap(map_, map_size_); + } + } + + operator bool() { return map_ != MAP_FAILED; } + + void initialize(unsigned char* map, int map_size, bool is_dma_buf, int fd) { + map_ = map; + map_size_ = map_size; + is_dma_buf_ = is_dma_buf; + fd_ = fd; + } + + unsigned char* get() { return map_; } + + protected: + unsigned char* map_ = nullptr; + int map_size_; + bool is_dma_buf_; + int fd_; +}; + +template +class Scoped { + public: + Scoped() {} + explicit Scoped(T* val) { ptr_ = val; } + ~Scoped() { RTC_NOTREACHED(); } + + T* operator->() { return ptr_; } + + bool operator!() { return ptr_ == nullptr; } + + T* get() { return ptr_; } + + T** receive() { + RTC_CHECK(!ptr_); + return &ptr_; + } + + Scoped& operator=(T* val) { + ptr_ = val; + return *this; + } + + protected: + T* ptr_ = nullptr; +}; + +template <> +Scoped::~Scoped() { + if (ptr_) { + g_error_free(ptr_); + } +} + +template <> +Scoped::~Scoped() { + if (ptr_) { + g_free(ptr_); + } +} + +template <> +Scoped::~Scoped() { + if (ptr_) { + g_variant_unref(ptr_); + } +} + +template <> +Scoped::~Scoped() { + if (ptr_) { + g_variant_iter_free(ptr_); + } +} + +template <> +Scoped::~Scoped() { + if (ptr_) { + g_object_unref(ptr_); + } +} + +template <> +Scoped::~Scoped() { + if (ptr_) { + g_object_unref(ptr_); + } +} + +#if PW_CHECK_VERSION(0, 3, 0) +void BaseCapturerPipeWire::OnCoreError(void* data, + uint32_t id, + int seq, + int res, + const char* message) { + BaseCapturerPipeWire* that = static_cast(data); + RTC_DCHECK(that); + RTC_LOG(LS_ERROR) << "PipeWire remote error: " << message; +} +#else // static void BaseCapturerPipeWire::OnStateChanged(void* data, pw_remote_state old_state, @@ -64,7 +221,7 @@ void BaseCapturerPipeWire::OnStateChanged(void* data, break; case PW_REMOTE_STATE_CONNECTED: RTC_LOG(LS_INFO) << "PipeWire remote state: connected."; - that->CreateReceivingStream(); + that->pw_stream_ = that->CreateReceivingStream(); break; case PW_REMOTE_STATE_CONNECTING: RTC_LOG(LS_INFO) << "PipeWire remote state: connecting."; @@ -74,6 +231,7 @@ void BaseCapturerPipeWire::OnStateChanged(void* data, break; } } +#endif // static void BaseCapturerPipeWire::OnStreamStateChanged(void* data, @@ -83,6 +241,18 @@ void BaseCapturerPipeWire::OnStreamStateChanged(void* data, BaseCapturerPipeWire* that = static_cast(data); RTC_DCHECK(that); +#if PW_CHECK_VERSION(0, 3, 0) + switch (state) { + case PW_STREAM_STATE_ERROR: + RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message; + break; + case PW_STREAM_STATE_PAUSED: + case PW_STREAM_STATE_STREAMING: + case PW_STREAM_STATE_UNCONNECTED: + case PW_STREAM_STATE_CONNECTING: + break; + } +#else switch (state) { case PW_STREAM_STATE_ERROR: RTC_LOG(LS_ERROR) << "PipeWire stream state error: " << error_message; @@ -97,36 +267,74 @@ void BaseCapturerPipeWire::OnStreamStateChanged(void* data, case PW_STREAM_STATE_STREAMING: break; } +#endif } // static +#if PW_CHECK_VERSION(0, 3, 0) +void BaseCapturerPipeWire::OnStreamParamChanged(void* data, + uint32_t id, + const struct spa_pod* format) { +#else void BaseCapturerPipeWire::OnStreamFormatChanged(void* data, const struct spa_pod* format) { +#endif BaseCapturerPipeWire* that = static_cast(data); RTC_DCHECK(that); RTC_LOG(LS_INFO) << "PipeWire stream format changed."; +#if PW_CHECK_VERSION(0, 3, 0) + if (!format || id != SPA_PARAM_Format) { +#else if (!format) { pw_stream_finish_format(that->pw_stream_, /*res=*/0, /*params=*/nullptr, /*n_params=*/0); +#endif return; } +#if PW_CHECK_VERSION(0, 3, 0) + spa_format_video_raw_parse(format, &that->spa_video_format_); +#else that->spa_video_format_ = new spa_video_info_raw(); spa_format_video_raw_parse(format, that->spa_video_format_, &that->pw_type_->format_video); +#endif +#if PW_CHECK_VERSION(0, 3, 0) + auto width = that->spa_video_format_.size.width; + auto height = that->spa_video_format_.size.height; +#else auto width = that->spa_video_format_->size.width; auto height = that->spa_video_format_->size.height; +#endif auto stride = SPA_ROUND_UP_N(width * kBytesPerPixel, 4); auto size = height * stride; + that->desktop_size_ = DesktopSize(width, height); + uint8_t buffer[1024] = {}; auto builder = spa_pod_builder{buffer, sizeof(buffer)}; // Setup buffers and meta header for new format. - const struct spa_pod* params[2]; + const struct spa_pod* params[3]; +#if PW_CHECK_VERSION(0, 3, 0) + params[0] = reinterpret_cast(spa_pod_builder_add_object( + &builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers, + SPA_PARAM_BUFFERS_size, SPA_POD_Int(size), SPA_PARAM_BUFFERS_stride, + SPA_POD_Int(stride), SPA_PARAM_BUFFERS_buffers, + SPA_POD_CHOICE_RANGE_Int(8, 1, 32))); + params[1] = reinterpret_cast(spa_pod_builder_add_object( + &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type, + SPA_POD_Id(SPA_META_Header), SPA_PARAM_META_size, + SPA_POD_Int(sizeof(struct spa_meta_header)))); + params[2] = reinterpret_cast(spa_pod_builder_add_object( + &builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta, SPA_PARAM_META_type, + SPA_POD_Id(SPA_META_VideoCrop), SPA_PARAM_META_size, + SPA_POD_Int(sizeof(struct spa_meta_region)))); + pw_stream_update_params(that->pw_stream_, params, 3); +#else params[0] = reinterpret_cast(spa_pod_builder_object( &builder, // id to enumerate buffer requirements @@ -155,8 +363,18 @@ void BaseCapturerPipeWire::OnStreamFormatChanged(void* data, // Size: size of the metadata, specified as integer (i) ":", that->pw_core_type_->param_meta.size, "i", sizeof(struct spa_meta_header))); - - pw_stream_finish_format(that->pw_stream_, /*res=*/0, params, /*n_params=*/2); + params[2] = reinterpret_cast(spa_pod_builder_object( + &builder, + // id to enumerate supported metadata + that->pw_core_type_->param.idMeta, that->pw_core_type_->param_meta.Meta, + // Type: specified as id or enum (I) + ":", that->pw_core_type_->param_meta.type, "I", + that->pw_core_type_->meta.VideoCrop, + // Size: size of the metadata, specified as integer (i) + ":", that->pw_core_type_->param_meta.size, "i", + sizeof(struct spa_meta_video_crop))); + pw_stream_finish_format(that->pw_stream_, /*res=*/0, params, /*n_params=*/3); +#endif } // static @@ -164,15 +382,26 @@ void BaseCapturerPipeWire::OnStreamProcess(void* data) { BaseCapturerPipeWire* that = static_cast(data); RTC_DCHECK(that); - pw_buffer* buf = nullptr; + struct pw_buffer* next_buffer; + struct pw_buffer* buffer = nullptr; + + next_buffer = pw_stream_dequeue_buffer(that->pw_stream_); + while (next_buffer) { + buffer = next_buffer; + next_buffer = pw_stream_dequeue_buffer(that->pw_stream_); + + if (next_buffer) { + pw_stream_queue_buffer(that->pw_stream_, buffer); + } + } - if (!(buf = pw_stream_dequeue_buffer(that->pw_stream_))) { + if (!buffer) { return; } - that->HandleBuffer(buf); + that->HandleBuffer(buffer); - pw_stream_queue_buffer(that->pw_stream_, buf); + pw_stream_queue_buffer(that->pw_stream_, buffer); } BaseCapturerPipeWire::BaseCapturerPipeWire(CaptureSourceType source_type) @@ -183,6 +412,7 @@ BaseCapturerPipeWire::~BaseCapturerPipeWire() { pw_thread_loop_stop(pw_main_loop_); } +#if !PW_CHECK_VERSION(0, 3, 0) if (pw_type_) { delete pw_type_; } @@ -190,30 +420,41 @@ BaseCapturerPipeWire::~BaseCapturerPipeWire() { if (spa_video_format_) { delete spa_video_format_; } +#endif if (pw_stream_) { pw_stream_destroy(pw_stream_); } +#if !PW_CHECK_VERSION(0, 3, 0) if (pw_remote_) { pw_remote_destroy(pw_remote_); } +#endif +#if PW_CHECK_VERSION(0, 3, 0) + if (pw_core_) { + pw_core_disconnect(pw_core_); + } + + if (pw_context_) { + pw_context_destroy(pw_context_); + } +#else if (pw_core_) { pw_core_destroy(pw_core_); } +#endif if (pw_main_loop_) { pw_thread_loop_destroy(pw_main_loop_); } +#if !PW_CHECK_VERSION(0, 3, 0) if (pw_loop_) { pw_loop_destroy(pw_loop_); } - - if (current_frame_) { - free(current_frame_); - } +#endif if (start_request_signal_id_) { g_dbus_connection_signal_unsubscribe(connection_, start_request_signal_id_); @@ -228,18 +469,16 @@ BaseCapturerPipeWire::~BaseCapturerPipeWire() { } if (session_handle_) { - GDBusMessage* message = g_dbus_message_new_method_call( - kDesktopBusName, session_handle_, kSessionInterfaceName, "Close"); - if (message) { - GError* error = nullptr; - g_dbus_connection_send_message(connection_, message, + Scoped message(g_dbus_message_new_method_call( + kDesktopBusName, session_handle_, kSessionInterfaceName, "Close")); + if (message.get()) { + Scoped error; + g_dbus_connection_send_message(connection_, message.get(), G_DBUS_SEND_MESSAGE_FLAGS_NONE, - /*out_serial=*/nullptr, &error); - if (error) { + /*out_serial=*/nullptr, error.receive()); + if (error.get()) { RTC_LOG(LS_ERROR) << "Failed to close the session: " << error->message; - g_error_free(error); } - g_object_unref(message); } } @@ -274,7 +513,11 @@ void BaseCapturerPipeWire::InitPipeWire() { StubPathMap paths; // Check if the PipeWire library is available. - paths[kModulePipewire].push_back(kPipeWireLib); +#if PW_CHECK_VERSION(0, 3, 0) + paths[kModulePipewire03].push_back(kPipeWireLib); +#else + paths[kModulePipewire02].push_back(kPipeWireLib); +#endif if (!InitializeStubs(paths)) { RTC_LOG(LS_ERROR) << "Failed to load the PipeWire library and symbols."; portal_init_failed_ = true; @@ -284,16 +527,46 @@ void BaseCapturerPipeWire::InitPipeWire() { pw_init(/*argc=*/nullptr, /*argc=*/nullptr); +#if PW_CHECK_VERSION(0, 3, 0) + pw_main_loop_ = pw_thread_loop_new("pipewire-main-loop", nullptr); + + pw_thread_loop_lock(pw_main_loop_); + + pw_context_ = + pw_context_new(pw_thread_loop_get_loop(pw_main_loop_), nullptr, 0); + if (!pw_context_) { + RTC_LOG(LS_ERROR) << "Failed to create PipeWire context"; + return; + } + + pw_core_ = pw_context_connect(pw_context_, nullptr, 0); + if (!pw_core_) { + RTC_LOG(LS_ERROR) << "Failed to connect PipeWire context"; + return; + } +#else pw_loop_ = pw_loop_new(/*properties=*/nullptr); pw_main_loop_ = pw_thread_loop_new(pw_loop_, "pipewire-main-loop"); + pw_thread_loop_lock(pw_main_loop_); + pw_core_ = pw_core_new(pw_loop_, /*properties=*/nullptr); pw_core_type_ = pw_core_get_type(pw_core_); pw_remote_ = pw_remote_new(pw_core_, nullptr, /*user_data_size=*/0); InitPipeWireTypes(); +#endif // Initialize event handlers, remote end and stream-related. +#if PW_CHECK_VERSION(0, 3, 0) + pw_core_events_.version = PW_VERSION_CORE_EVENTS; + pw_core_events_.error = &OnCoreError; + + pw_stream_events_.version = PW_VERSION_STREAM_EVENTS; + pw_stream_events_.state_changed = &OnStreamStateChanged; + pw_stream_events_.param_changed = &OnStreamParamChanged; + pw_stream_events_.process = &OnStreamProcess; +#else pw_remote_events_.version = PW_VERSION_REMOTE_EVENTS; pw_remote_events_.state_changed = &OnStateChanged; @@ -301,19 +574,33 @@ void BaseCapturerPipeWire::InitPipeWire() { pw_stream_events_.state_changed = &OnStreamStateChanged; pw_stream_events_.format_changed = &OnStreamFormatChanged; pw_stream_events_.process = &OnStreamProcess; +#endif + +#if PW_CHECK_VERSION(0, 3, 0) + pw_core_add_listener(pw_core_, &spa_core_listener_, &pw_core_events_, this); + pw_stream_ = CreateReceivingStream(); + if (!pw_stream_) { + RTC_LOG(LS_ERROR) << "Failed to create PipeWire stream"; + return; + } +#else pw_remote_add_listener(pw_remote_, &spa_remote_listener_, &pw_remote_events_, this); pw_remote_connect_fd(pw_remote_, pw_fd_); +#endif if (pw_thread_loop_start(pw_main_loop_) < 0) { RTC_LOG(LS_ERROR) << "Failed to start main PipeWire loop"; portal_init_failed_ = true; } + pw_thread_loop_unlock(pw_main_loop_); + RTC_LOG(LS_INFO) << "PipeWire remote opened."; } +#if !PW_CHECK_VERSION(0, 3, 0) void BaseCapturerPipeWire::InitPipeWireTypes() { spa_type_map* map = pw_core_type_->map; pw_type_ = new PipeWireType(); @@ -323,23 +610,44 @@ void BaseCapturerPipeWire::InitPipeWireTypes() { spa_type_format_video_map(map, &pw_type_->format_video); spa_type_video_format_map(map, &pw_type_->video_format); } +#endif -void BaseCapturerPipeWire::CreateReceivingStream() { +pw_stream* BaseCapturerPipeWire::CreateReceivingStream() { +#if !PW_CHECK_VERSION(0, 3, 0) + if (pw_remote_get_state(pw_remote_, nullptr) != PW_REMOTE_STATE_CONNECTED) { + RTC_LOG(LS_ERROR) << "Cannot create pipewire stream"; + return nullptr; + } +#endif spa_rectangle pwMinScreenBounds = spa_rectangle{1, 1}; - spa_rectangle pwScreenBounds = - spa_rectangle{static_cast(desktop_size_.width()), - static_cast(desktop_size_.height())}; - - spa_fraction pwFrameRateMin = spa_fraction{0, 1}; - spa_fraction pwFrameRateMax = spa_fraction{60, 1}; + spa_rectangle pwMaxScreenBounds = spa_rectangle{UINT32_MAX, UINT32_MAX}; pw_properties* reuseProps = pw_properties_new_string("pipewire.client.reuse=1"); - pw_stream_ = pw_stream_new(pw_remote_, "webrtc-consume-stream", reuseProps); +#if PW_CHECK_VERSION(0, 3, 0) + auto stream = pw_stream_new(pw_core_, "webrtc-consume-stream", reuseProps); +#else + auto stream = pw_stream_new(pw_remote_, "webrtc-consume-stream", reuseProps); +#endif uint8_t buffer[1024] = {}; const spa_pod* params[1]; spa_pod_builder builder = spa_pod_builder{buffer, sizeof(buffer)}; + +#if PW_CHECK_VERSION(0, 3, 0) + params[0] = reinterpret_cast(spa_pod_builder_add_object( + &builder, SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat, + SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), + SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), + SPA_FORMAT_VIDEO_format, + SPA_POD_CHOICE_ENUM_Id(5, SPA_VIDEO_FORMAT_BGRx, SPA_VIDEO_FORMAT_RGBx, + SPA_VIDEO_FORMAT_RGBA, SPA_VIDEO_FORMAT_BGRx, + SPA_VIDEO_FORMAT_BGRA), + SPA_FORMAT_VIDEO_size, + SPA_POD_CHOICE_RANGE_Rectangle(&pwMinScreenBounds, &pwMinScreenBounds, + &pwMaxScreenBounds), + 0)); +#else params[0] = reinterpret_cast(spa_pod_builder_object( &builder, // id to enumerate formats @@ -349,69 +657,208 @@ void BaseCapturerPipeWire::CreateReceivingStream() { // then allowed formats are enumerated (e) and the format is undecided (u) // to allow negotiation ":", pw_type_->format_video.format, "Ieu", pw_type_->video_format.BGRx, - SPA_POD_PROP_ENUM(2, pw_type_->video_format.RGBx, - pw_type_->video_format.BGRx), + SPA_POD_PROP_ENUM( + 4, pw_type_->video_format.RGBx, pw_type_->video_format.BGRx, + pw_type_->video_format.RGBA, pw_type_->video_format.BGRA), // Video size: specified as rectangle (R), preferred size is specified as // first parameter, then allowed size is defined as range (r) from min and // max values and the format is undecided (u) to allow negotiation - ":", pw_type_->format_video.size, "Rru", &pwScreenBounds, 2, - &pwMinScreenBounds, &pwScreenBounds, - // Frame rate: specified as fraction (F) and set to minimum frame rate - // value - ":", pw_type_->format_video.framerate, "F", &pwFrameRateMin, - // Max frame rate: specified as fraction (F), preferred frame rate is set - // to maximum value, then allowed frame rate is defined as range (r) from - // min and max values and it is undecided (u) to allow negotiation - ":", pw_type_->format_video.max_framerate, "Fru", &pwFrameRateMax, 2, - &pwFrameRateMin, &pwFrameRateMax)); - - pw_stream_add_listener(pw_stream_, &spa_stream_listener_, &pw_stream_events_, + ":", pw_type_->format_video.size, "Rru", &pwMinScreenBounds, + SPA_POD_PROP_MIN_MAX(&pwMinScreenBounds, &pwMaxScreenBounds))); +#endif + + pw_stream_add_listener(stream, &spa_stream_listener_, &pw_stream_events_, this); +#if PW_CHECK_VERSION(0, 3, 0) + if (pw_stream_connect(stream, PW_DIRECTION_INPUT, pw_stream_node_id_, + PW_STREAM_FLAG_AUTOCONNECT, params, 1) != 0) { +#else pw_stream_flags flags = static_cast( - PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_INACTIVE | - PW_STREAM_FLAG_MAP_BUFFERS); - if (pw_stream_connect(pw_stream_, PW_DIRECTION_INPUT, /*port_path=*/nullptr, + PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_INACTIVE); + if (pw_stream_connect(stream, PW_DIRECTION_INPUT, /*port_path=*/nullptr, flags, params, /*n_params=*/1) != 0) { +#endif RTC_LOG(LS_ERROR) << "Could not connect receiving stream."; portal_init_failed_ = true; - return; + return nullptr; } + + return stream; } void BaseCapturerPipeWire::HandleBuffer(pw_buffer* buffer) { spa_buffer* spaBuffer = buffer->buffer; - void* src = nullptr; + ScopedBuf map; + uint8_t* src = nullptr; - if (!(src = spaBuffer->datas[0].data)) { + if (spaBuffer->datas[0].chunk->size == 0) { + RTC_LOG(LS_ERROR) << "Failed to get video stream: Zero size."; return; } - uint32_t maxSize = spaBuffer->datas[0].maxsize; - int32_t srcStride = spaBuffer->datas[0].chunk->stride; - if (srcStride != (desktop_size_.width() * kBytesPerPixel)) { +#if PW_CHECK_VERSION(0, 3, 0) + if (spaBuffer->datas[0].type == SPA_DATA_MemFd || + spaBuffer->datas[0].type == SPA_DATA_DmaBuf) { +#else + if (spaBuffer->datas[0].type == pw_core_type_->data.MemFd || + spaBuffer->datas[0].type == pw_core_type_->data.DmaBuf) { +#endif + map.initialize( + static_cast( + mmap(nullptr, + spaBuffer->datas[0].maxsize + spaBuffer->datas[0].mapoffset, + PROT_READ, MAP_PRIVATE, spaBuffer->datas[0].fd, 0)), + spaBuffer->datas[0].maxsize + spaBuffer->datas[0].mapoffset, +#if PW_CHECK_VERSION(0, 3, 0) + spaBuffer->datas[0].type == SPA_DATA_DmaBuf, +#else + spaBuffer->datas[0].type == pw_core_type_->data.DmaBuf, +#endif + spaBuffer->datas[0].fd); + + if (!map) { + RTC_LOG(LS_ERROR) << "Failed to mmap the memory: " + << std::strerror(errno); + return; + } + +#if PW_CHECK_VERSION(0, 3, 0) + if (spaBuffer->datas[0].type == SPA_DATA_DmaBuf) { +#else + if (spaBuffer->datas[0].type == pw_core_type_->data.DmaBuf) { +#endif + SyncDmaBuf(spaBuffer->datas[0].fd, DMA_BUF_SYNC_START); + } + + src = SPA_MEMBER(map.get(), spaBuffer->datas[0].mapoffset, uint8_t); +#if PW_CHECK_VERSION(0, 3, 0) + } else if (spaBuffer->datas[0].type == SPA_DATA_MemPtr) { +#else + } else if (spaBuffer->datas[0].type == pw_core_type_->data.MemPtr) { +#endif + src = static_cast(spaBuffer->datas[0].data); + } + + if (!src) { + return; + } + +#if PW_CHECK_VERSION(0, 3, 0) + struct spa_meta_region* video_metadata = + static_cast(spa_buffer_find_meta_data( + spaBuffer, SPA_META_VideoCrop, sizeof(*video_metadata))); +#else + struct spa_meta_video_crop* video_metadata = + static_cast( + spa_buffer_find_meta(spaBuffer, pw_core_type_->meta.VideoCrop)); +#endif + + // Video size from metadata is bigger than an actual video stream size. + // The metadata are wrong or we should up-scale the video...in both cases + // just quit now. +#if PW_CHECK_VERSION(0, 3, 0) + if (video_metadata && (video_metadata->region.size.width > + static_cast(desktop_size_.width()) || + video_metadata->region.size.height > + static_cast(desktop_size_.height()))) { +#else + if (video_metadata && (video_metadata->width > desktop_size_.width() || + video_metadata->height > desktop_size_.height())) { +#endif + RTC_LOG(LS_ERROR) << "Stream metadata sizes are wrong!"; + return; + } + + // Use video metadata when video size from metadata is set and smaller than + // video stream size, so we need to adjust it. + bool video_metadata_use = false; + +#if PW_CHECK_VERSION(0, 3, 0) + const struct spa_rectangle* video_metadata_size = + video_metadata ? &video_metadata->region.size : nullptr; +#else + const struct spa_meta_video_crop* video_metadata_size = video_metadata; +#endif + + if (video_metadata_size && video_metadata_size->width != 0 && + video_metadata_size->height != 0 && + (static_cast(video_metadata_size->width) < desktop_size_.width() || + static_cast(video_metadata_size->height) < + desktop_size_.height())) { + video_metadata_use = true; + } + + DesktopSize video_size_prev = video_size_; + if (video_metadata_use) { + video_size_ = + DesktopSize(video_metadata_size->width, video_metadata_size->height); + } else { + video_size_ = desktop_size_; + } + + webrtc::MutexLock lock(¤t_frame_lock_); + if (!current_frame_ || !video_size_.equals(video_size_prev)) { + current_frame_ = std::make_unique( + video_size_.width() * video_size_.height() * kBytesPerPixel); + } + + const int32_t dst_stride = video_size_.width() * kBytesPerPixel; + const int32_t src_stride = spaBuffer->datas[0].chunk->stride; + + if (src_stride != (desktop_size_.width() * kBytesPerPixel)) { RTC_LOG(LS_ERROR) << "Got buffer with stride different from screen stride: " - << srcStride + << src_stride << " != " << (desktop_size_.width() * kBytesPerPixel); portal_init_failed_ = true; + return; } - if (!current_frame_) { - current_frame_ = static_cast(malloc(maxSize)); - } - RTC_DCHECK(current_frame_ != nullptr); - - // If both sides decided to go with the RGBx format we need to convert it to - // BGRx to match color format expected by WebRTC. - if (spa_video_format_->format == pw_type_->video_format.RGBx) { - uint8_t* tempFrame = static_cast(malloc(maxSize)); - std::memcpy(tempFrame, src, maxSize); - ConvertRGBxToBGRx(tempFrame, maxSize); - std::memcpy(current_frame_, tempFrame, maxSize); - free(tempFrame); - } else { - std::memcpy(current_frame_, src, maxSize); + // Adjust source content based on metadata video position +#if PW_CHECK_VERSION(0, 3, 0) + if (video_metadata_use && + (video_metadata->region.position.y + video_size_.height() <= + desktop_size_.height())) { + src += src_stride * video_metadata->region.position.y; + } + const int x_offset = + video_metadata_use && + (video_metadata->region.position.x + video_size_.width() <= + desktop_size_.width()) + ? video_metadata->region.position.x * kBytesPerPixel + : 0; +#else + if (video_metadata_use && + (video_metadata->y + video_size_.height() <= desktop_size_.height())) { + src += src_stride * video_metadata->y; + } + + const int x_offset = + video_metadata_use && + (video_metadata->x + video_size_.width() <= desktop_size_.width()) + ? video_metadata->x * kBytesPerPixel + : 0; +#endif + + uint8_t* dst = current_frame_.get(); + for (int i = 0; i < video_size_.height(); ++i) { + // Adjust source content based on crop video position if needed + src += x_offset; + std::memcpy(dst, src, dst_stride); + // If both sides decided to go with the RGBx format we need to convert it to + // BGRx to match color format expected by WebRTC. +#if PW_CHECK_VERSION(0, 3, 0) + if (spa_video_format_.format == SPA_VIDEO_FORMAT_RGBx || + spa_video_format_.format == SPA_VIDEO_FORMAT_RGBA) { +#else + if (spa_video_format_->format == pw_type_->video_format.RGBx || + spa_video_format_->format == pw_type_->video_format.RGBA) { +#endif + ConvertRGBxToBGRx(dst, dst_stride); + } + src += src_stride - x_offset; + dst += dst_stride; } } @@ -441,14 +888,13 @@ void BaseCapturerPipeWire::OnProxyRequested(GObject* /*object*/, BaseCapturerPipeWire* that = static_cast(user_data); RTC_DCHECK(that); - GError* error = nullptr; - GDBusProxy *proxy = g_dbus_proxy_new_finish(result, &error); + Scoped error; + GDBusProxy* proxy = g_dbus_proxy_new_finish(result, error.receive()); if (!proxy) { - if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) + if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED)) return; RTC_LOG(LS_ERROR) << "Failed to create a proxy for the screen cast portal: " << error->message; - g_error_free(error); that->portal_init_failed_ = true; return; } @@ -462,38 +908,36 @@ void BaseCapturerPipeWire::OnProxyRequested(GObject* /*object*/, // static gchar* BaseCapturerPipeWire::PrepareSignalHandle(GDBusConnection* connection, const gchar* token) { - gchar* sender = g_strdup(g_dbus_connection_get_unique_name(connection) + 1); - for (int i = 0; sender[i]; i++) { - if (sender[i] == '.') { - sender[i] = '_'; + Scoped sender( + g_strdup(g_dbus_connection_get_unique_name(connection) + 1)); + for (int i = 0; sender.get()[i]; i++) { + if (sender.get()[i] == '.') { + sender.get()[i] = '_'; } } - gchar* handle = g_strconcat(kDesktopRequestObjectPath, "/", sender, "/", + gchar* handle = g_strconcat(kDesktopRequestObjectPath, "/", sender.get(), "/", token, /*end of varargs*/ nullptr); - g_free(sender); return handle; } void BaseCapturerPipeWire::SessionRequest() { GVariantBuilder builder; - gchar* variant_string; + Scoped variant_string; g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT); variant_string = g_strdup_printf("webrtc_session%d", g_random_int_range(0, G_MAXINT)); g_variant_builder_add(&builder, "{sv}", "session_handle_token", - g_variant_new_string(variant_string)); - g_free(variant_string); + g_variant_new_string(variant_string.get())); variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT)); g_variant_builder_add(&builder, "{sv}", "handle_token", - g_variant_new_string(variant_string)); + g_variant_new_string(variant_string.get())); - portal_handle_ = PrepareSignalHandle(connection_, variant_string); + portal_handle_ = PrepareSignalHandle(connection_, variant_string.get()); session_request_signal_id_ = SetupRequestResponseSignal( portal_handle_, OnSessionRequestResponseSignal); - g_free(variant_string); RTC_LOG(LS_INFO) << "Screen cast session requested."; g_dbus_proxy_call( @@ -509,22 +953,21 @@ void BaseCapturerPipeWire::OnSessionRequested(GDBusProxy *proxy, BaseCapturerPipeWire* that = static_cast(user_data); RTC_DCHECK(that); - GError* error = nullptr; - GVariant* variant = g_dbus_proxy_call_finish(proxy, result, &error); + Scoped error; + Scoped variant( + g_dbus_proxy_call_finish(proxy, result, error.receive())); if (!variant) { - if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) + if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED)) return; RTC_LOG(LS_ERROR) << "Failed to create a screen cast session: " << error->message; - g_error_free(error); that->portal_init_failed_ = true; return; } RTC_LOG(LS_INFO) << "Initializing the screen cast session."; - gchar* handle = nullptr; - g_variant_get_child(variant, 0, "o", &handle); - g_variant_unref(variant); + Scoped handle; + g_variant_get_child(variant.get(), 0, "o", &handle); if (!handle) { RTC_LOG(LS_ERROR) << "Failed to initialize the screen cast session."; if (that->session_request_signal_id_) { @@ -536,8 +979,6 @@ void BaseCapturerPipeWire::OnSessionRequested(GDBusProxy *proxy, return; } - g_free(handle); - RTC_LOG(LS_INFO) << "Subscribing to the screen cast session."; } @@ -557,11 +998,11 @@ void BaseCapturerPipeWire::OnSessionRequestResponseSignal( << "Received response for the screen cast session subscription."; guint32 portal_response; - GVariant* response_data; - g_variant_get(parameters, "(u@a{sv})", &portal_response, &response_data); - g_variant_lookup(response_data, "session_handle", "s", + Scoped response_data; + g_variant_get(parameters, "(u@a{sv})", &portal_response, + response_data.receive()); + g_variant_lookup(response_data.get(), "session_handle", "s", &that->session_handle_); - g_variant_unref(response_data); if (!that->session_handle_ || portal_response) { RTC_LOG(LS_ERROR) @@ -575,23 +1016,40 @@ void BaseCapturerPipeWire::OnSessionRequestResponseSignal( void BaseCapturerPipeWire::SourcesRequest() { GVariantBuilder builder; - gchar* variant_string; + Scoped variant_string; g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT); // We want to record monitor content. - g_variant_builder_add(&builder, "{sv}", "types", - g_variant_new_uint32(capture_source_type_)); + g_variant_builder_add( + &builder, "{sv}", "types", + g_variant_new_uint32(static_cast(capture_source_type_))); // We don't want to allow selection of multiple sources. g_variant_builder_add(&builder, "{sv}", "multiple", g_variant_new_boolean(false)); + + Scoped variant( + g_dbus_proxy_get_cached_property(proxy_, "AvailableCursorModes")); + if (variant.get()) { + uint32_t modes = 0; + g_variant_get(variant.get(), "u", &modes); + // Request mouse cursor to be embedded as part of the stream, otherwise it + // is hidden by default. Make request only if this mode is advertised by + // the portal implementation. + if (modes & + static_cast(BaseCapturerPipeWire::CursorMode::kEmbedded)) { + g_variant_builder_add(&builder, "{sv}", "cursor_mode", + g_variant_new_uint32(static_cast( + BaseCapturerPipeWire::CursorMode::kEmbedded))); + } + } + variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT)); g_variant_builder_add(&builder, "{sv}", "handle_token", - g_variant_new_string(variant_string)); + g_variant_new_string(variant_string.get())); - sources_handle_ = PrepareSignalHandle(connection_, variant_string); + sources_handle_ = PrepareSignalHandle(connection_, variant_string.get()); sources_request_signal_id_ = SetupRequestResponseSignal( sources_handle_, OnSourcesRequestResponseSignal); - g_free(variant_string); RTC_LOG(LS_INFO) << "Requesting sources from the screen cast session."; g_dbus_proxy_call( @@ -608,22 +1066,21 @@ void BaseCapturerPipeWire::OnSourcesRequested(GDBusProxy *proxy, BaseCapturerPipeWire* that = static_cast(user_data); RTC_DCHECK(that); - GError* error = nullptr; - GVariant* variant = g_dbus_proxy_call_finish(proxy, result, &error); + Scoped error; + Scoped variant( + g_dbus_proxy_call_finish(proxy, result, error.receive())); if (!variant) { - if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) + if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED)) return; RTC_LOG(LS_ERROR) << "Failed to request the sources: " << error->message; - g_error_free(error); that->portal_init_failed_ = true; return; } RTC_LOG(LS_INFO) << "Sources requested from the screen cast session."; - gchar* handle = nullptr; - g_variant_get_child(variant, 0, "o", &handle); - g_variant_unref(variant); + Scoped handle; + g_variant_get_child(variant.get(), 0, "o", handle.receive()); if (!handle) { RTC_LOG(LS_ERROR) << "Failed to initialize the screen cast session."; if (that->sources_request_signal_id_) { @@ -635,8 +1092,6 @@ void BaseCapturerPipeWire::OnSourcesRequested(GDBusProxy *proxy, return; } - g_free(handle); - RTC_LOG(LS_INFO) << "Subscribed to sources signal."; } @@ -668,17 +1123,16 @@ void BaseCapturerPipeWire::OnSourcesRequestResponseSignal( void BaseCapturerPipeWire::StartRequest() { GVariantBuilder builder; - gchar* variant_string; + Scoped variant_string; g_variant_builder_init(&builder, G_VARIANT_TYPE_VARDICT); variant_string = g_strdup_printf("webrtc%d", g_random_int_range(0, G_MAXINT)); g_variant_builder_add(&builder, "{sv}", "handle_token", - g_variant_new_string(variant_string)); + g_variant_new_string(variant_string.get())); - start_handle_ = PrepareSignalHandle(connection_, variant_string); + start_handle_ = PrepareSignalHandle(connection_, variant_string.get()); start_request_signal_id_ = SetupRequestResponseSignal(start_handle_, OnStartRequestResponseSignal); - g_free(variant_string); // "Identifier for the application window", this is Wayland, so not "x11:...". const gchar parent_window[] = ""; @@ -698,23 +1152,22 @@ void BaseCapturerPipeWire::OnStartRequested(GDBusProxy *proxy, BaseCapturerPipeWire* that = static_cast(user_data); RTC_DCHECK(that); - GError* error = nullptr; - GVariant* variant = g_dbus_proxy_call_finish(proxy, result, &error); + Scoped error; + Scoped variant( + g_dbus_proxy_call_finish(proxy, result, error.receive())); if (!variant) { - if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) + if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED)) return; RTC_LOG(LS_ERROR) << "Failed to start the screen cast session: " << error->message; - g_error_free(error); that->portal_init_failed_ = true; return; } RTC_LOG(LS_INFO) << "Initializing the start of the screen cast session."; - gchar* handle = nullptr; - g_variant_get_child(variant, 0, "o", &handle); - g_variant_unref(variant); + Scoped handle; + g_variant_get_child(variant.get(), 0, "o", handle.receive()); if (!handle) { RTC_LOG(LS_ERROR) << "Failed to initialize the start of the screen cast session."; @@ -727,8 +1180,6 @@ void BaseCapturerPipeWire::OnStartRequested(GDBusProxy *proxy, return; } - g_free(handle); - RTC_LOG(LS_INFO) << "Subscribed to the start signal."; } @@ -746,9 +1197,10 @@ void BaseCapturerPipeWire::OnStartRequestResponseSignal( RTC_LOG(LS_INFO) << "Start signal received."; guint32 portal_response; - GVariant* response_data; - GVariantIter* iter = nullptr; - g_variant_get(parameters, "(u@a{sv})", &portal_response, &response_data); + Scoped response_data; + Scoped iter; + g_variant_get(parameters, "(u@a{sv})", &portal_response, + response_data.receive()); if (portal_response || !response_data) { RTC_LOG(LS_ERROR) << "Failed to start the screen cast session."; that->portal_init_failed_ = true; @@ -758,28 +1210,28 @@ void BaseCapturerPipeWire::OnStartRequestResponseSignal( // Array of PipeWire streams. See // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml // documentation for . - if (g_variant_lookup(response_data, "streams", "a(ua{sv})", &iter)) { - GVariant* variant; + if (g_variant_lookup(response_data.get(), "streams", "a(ua{sv})", + iter.receive())) { + Scoped variant; - while (g_variant_iter_next(iter, "@(ua{sv})", &variant)) { + while (g_variant_iter_next(iter.get(), "@(ua{sv})", variant.receive())) { guint32 stream_id; - gint32 width; - gint32 height; - GVariant* options; + guint32 type; + Scoped options; - g_variant_get(variant, "(u@a{sv})", &stream_id, &options); - RTC_DCHECK(options != nullptr); + g_variant_get(variant.get(), "(u@a{sv})", &stream_id, options.receive()); + RTC_DCHECK(options.get()); - g_variant_lookup(options, "size", "(ii)", &width, &height); + if (g_variant_lookup(options.get(), "source_type", "u", &type)) { + that->capture_source_type_ = + static_cast(type); + } - that->desktop_size_.set(width, height); + that->pw_stream_node_id_ = stream_id; - g_variant_unref(options); - g_variant_unref(variant); + break; } } - g_variant_iter_free(iter); - g_variant_unref(response_data); that->OpenPipeWireRemote(); } @@ -807,35 +1259,30 @@ void BaseCapturerPipeWire::OnOpenPipeWireRemoteRequested( BaseCapturerPipeWire* that = static_cast(user_data); RTC_DCHECK(that); - GError* error = nullptr; - GUnixFDList* outlist = nullptr; - GVariant* variant = g_dbus_proxy_call_with_unix_fd_list_finish( - proxy, &outlist, result, &error); + Scoped error; + Scoped outlist; + Scoped variant(g_dbus_proxy_call_with_unix_fd_list_finish( + proxy, outlist.receive(), result, error.receive())); if (!variant) { - if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_CANCELLED)) + if (g_error_matches(error.get(), G_IO_ERROR, G_IO_ERROR_CANCELLED)) return; RTC_LOG(LS_ERROR) << "Failed to open the PipeWire remote: " << error->message; - g_error_free(error); that->portal_init_failed_ = true; return; } gint32 index; - g_variant_get(variant, "(h)", &index); + g_variant_get(variant.get(), "(h)", &index); - if ((that->pw_fd_ = g_unix_fd_list_get(outlist, index, &error)) == -1) { + if ((that->pw_fd_ = + g_unix_fd_list_get(outlist.get(), index, error.receive())) == -1) { RTC_LOG(LS_ERROR) << "Failed to get file descriptor from the list: " << error->message; - g_error_free(error); - g_variant_unref(variant); that->portal_init_failed_ = true; return; } - g_variant_unref(variant); - g_object_unref(outlist); - that->InitPipeWire(); } @@ -854,15 +1301,18 @@ void BaseCapturerPipeWire::CaptureFrame() { return; } + webrtc::MutexLock lock(¤t_frame_lock_); if (!current_frame_) { callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr); return; } - std::unique_ptr result(new BasicDesktopFrame(desktop_size_)); + DesktopSize frame_size = video_size_; + + std::unique_ptr result(new BasicDesktopFrame(frame_size)); result->CopyPixelsFrom( - current_frame_, (desktop_size_.width() * kBytesPerPixel), - DesktopRect::MakeWH(desktop_size_.width(), desktop_size_.height())); + current_frame_.get(), (frame_size.width() * kBytesPerPixel), + DesktopRect::MakeWH(frame_size.width(), frame_size.height())); if (!result) { callback_->OnCaptureResult(Result::ERROR_TEMPORARY, nullptr); return; @@ -887,4 +1337,11 @@ bool BaseCapturerPipeWire::SelectSource(SourceId id) { return true; } +// static +std::unique_ptr BaseCapturerPipeWire::CreateRawCapturer( + const DesktopCaptureOptions& options) { + return std::make_unique( + BaseCapturerPipeWire::CaptureSourceType::kAny); +} + } // namespace webrtc diff --git a/modules/desktop_capture/linux/base_capturer_pipewire.h b/modules/desktop_capture/linux/base_capturer_pipewire.h index f28d7a558b..52264188a7 100644 --- a/modules/desktop_capture/linux/base_capturer_pipewire.h +++ b/modules/desktop_capture/linux/base_capturer_pipewire.h @@ -10,18 +10,23 @@ #ifndef MODULES_DESKTOP_CAPTURE_LINUX_BASE_CAPTURER_PIPEWIRE_H_ #define MODULES_DESKTOP_CAPTURE_LINUX_BASE_CAPTURER_PIPEWIRE_H_ - #include #define typeof __typeof__ #include #include +#if PW_CHECK_VERSION(0, 3, 0) +#include +#endif +#include "absl/types/optional.h" #include "modules/desktop_capture/desktop_capture_options.h" #include "modules/desktop_capture/desktop_capturer.h" #include "rtc_base/constructor_magic.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { +#if !PW_CHECK_VERSION(0, 3, 0) class PipeWireType { public: spa_type_media_type media_type; @@ -29,14 +34,31 @@ class PipeWireType { spa_type_format_video format_video; spa_type_video_format video_format; }; +#endif class BaseCapturerPipeWire : public DesktopCapturer { public: - enum CaptureSourceType { Screen = 1, Window }; + // Values are set based on source type property in + // xdg-desktop-portal/screencast + // https://github.com/flatpak/xdg-desktop-portal/blob/master/data/org.freedesktop.portal.ScreenCast.xml + enum class CaptureSourceType : uint32_t { + kScreen = 0b01, + kWindow = 0b10, + kAny = 0b11 + }; + + enum class CursorMode : uint32_t { + kHidden = 0b01, + kEmbedded = 0b10, + kMetadata = 0b100 + }; explicit BaseCapturerPipeWire(CaptureSourceType source_type); ~BaseCapturerPipeWire() override; + static std::unique_ptr CreateRawCapturer( + const DesktopCaptureOptions& options); + // DesktopCapturer interface. void Start(Callback* delegate) override; void CaptureFrame() override; @@ -45,6 +67,21 @@ class BaseCapturerPipeWire : public DesktopCapturer { private: // PipeWire types --> +#if PW_CHECK_VERSION(0, 3, 0) + struct pw_context* pw_context_ = nullptr; + struct pw_core* pw_core_ = nullptr; + struct pw_stream* pw_stream_ = nullptr; + struct pw_thread_loop* pw_main_loop_ = nullptr; + + spa_hook spa_core_listener_; + spa_hook spa_stream_listener_; + + // event handlers + pw_core_events pw_core_events_ = {}; + pw_stream_events pw_stream_events_ = {}; + + struct spa_video_info_raw spa_video_format_; +#else pw_core* pw_core_ = nullptr; pw_type* pw_core_type_ = nullptr; pw_stream* pw_stream_ = nullptr; @@ -60,11 +97,13 @@ class BaseCapturerPipeWire : public DesktopCapturer { pw_remote_events pw_remote_events_ = {}; spa_video_info_raw* spa_video_format_ = nullptr; +#endif + guint32 pw_stream_node_id_ = 0; gint32 pw_fd_ = -1; CaptureSourceType capture_source_type_ = - BaseCapturerPipeWire::CaptureSourceType::Screen; + BaseCapturerPipeWire::CaptureSourceType::kScreen; // <-- end of PipeWire types @@ -79,10 +118,12 @@ class BaseCapturerPipeWire : public DesktopCapturer { guint sources_request_signal_id_ = 0; guint start_request_signal_id_ = 0; + DesktopSize video_size_; DesktopSize desktop_size_ = {}; DesktopCaptureOptions options_ = {}; - uint8_t* current_frame_ = nullptr; + webrtc::Mutex current_frame_lock_; + std::unique_ptr current_frame_; Callback* callback_ = nullptr; bool portal_init_failed_ = false; @@ -91,21 +132,32 @@ class BaseCapturerPipeWire : public DesktopCapturer { void InitPipeWire(); void InitPipeWireTypes(); - void CreateReceivingStream(); + pw_stream* CreateReceivingStream(); void HandleBuffer(pw_buffer* buffer); void ConvertRGBxToBGRx(uint8_t* frame, uint32_t size); +#if PW_CHECK_VERSION(0, 3, 0) + static void OnCoreError(void* data, + uint32_t id, + int seq, + int res, + const char* message); + static void OnStreamParamChanged(void* data, + uint32_t id, + const struct spa_pod* format); +#else static void OnStateChanged(void* data, pw_remote_state old_state, pw_remote_state state, const char* error); + static void OnStreamFormatChanged(void* data, const struct spa_pod* format); +#endif static void OnStreamStateChanged(void* data, pw_stream_state old_state, pw_stream_state state, const char* error_message); - static void OnStreamFormatChanged(void* data, const struct spa_pod* format); static void OnStreamProcess(void* data); static void OnNewBuffer(void* data, uint32_t id); diff --git a/modules/desktop_capture/linux/pipewire.sigs b/modules/desktop_capture/linux/pipewire02.sigs similarity index 91% rename from modules/desktop_capture/linux/pipewire.sigs rename to modules/desktop_capture/linux/pipewire02.sigs index 3e21e9dc07..5ac3d1d22b 100644 --- a/modules/desktop_capture/linux/pipewire.sigs +++ b/modules/desktop_capture/linux/pipewire02.sigs @@ -26,6 +26,7 @@ void pw_remote_add_listener(pw_remote *remote, spa_hook *listener, const pw_remo int pw_remote_connect_fd(pw_remote *remote, int fd); void pw_remote_destroy(pw_remote *remote); pw_remote * pw_remote_new(pw_core *core, pw_properties *properties, size_t user_data_size); +enum pw_remote_state pw_remote_get_state(pw_remote *remote, const char **error); // stream.h void pw_stream_add_listener(pw_stream *stream, spa_hook *listener, const pw_stream_events *events, void *data); @@ -42,3 +43,5 @@ void pw_thread_loop_destroy(pw_thread_loop *loop); pw_thread_loop * pw_thread_loop_new(pw_loop *loop, const char *name); int pw_thread_loop_start(pw_thread_loop *loop); void pw_thread_loop_stop(pw_thread_loop *loop); +void pw_thread_loop_lock(struct pw_thread_loop *loop); +void pw_thread_loop_unlock(struct pw_thread_loop *loop); diff --git a/modules/desktop_capture/linux/pipewire03.sigs b/modules/desktop_capture/linux/pipewire03.sigs new file mode 100644 index 0000000000..78d241f40c --- /dev/null +++ b/modules/desktop_capture/linux/pipewire03.sigs @@ -0,0 +1,46 @@ +// Copyright 2018 The WebRTC project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +//------------------------------------------------ +// Functions from PipeWire used in capturer code. +//------------------------------------------------ + +// core.h +int pw_core_disconnect(pw_core *core); + +// loop.h +void pw_loop_destroy(pw_loop *loop); +pw_loop * pw_loop_new(const spa_dict *props); + + +// pipewire.h +void pw_init(int *argc, char **argv[]); + +// properties.h +pw_properties * pw_properties_new_string(const char *args); + +// stream.h +void pw_stream_add_listener(pw_stream *stream, spa_hook *listener, const pw_stream_events *events, void *data); +int pw_stream_connect(pw_stream *stream, enum pw_direction direction, uint32_t target_id, enum pw_stream_flags flags, const spa_pod **params, uint32_t n_params); +pw_buffer *pw_stream_dequeue_buffer(pw_stream *stream); +void pw_stream_destroy(pw_stream *stream); +pw_stream * pw_stream_new(pw_core *core, const char *name, pw_properties *props); +int pw_stream_queue_buffer(pw_stream *stream, pw_buffer *buffer); +int pw_stream_set_active(pw_stream *stream, bool active); +int pw_stream_update_params(pw_stream *stream, const spa_pod **params, uint32_t n_params); + +// thread-loop.h +void pw_thread_loop_destroy(pw_thread_loop *loop); +pw_thread_loop * pw_thread_loop_new(const char *name, const spa_dict *props); +int pw_thread_loop_start(pw_thread_loop *loop); +void pw_thread_loop_stop(pw_thread_loop *loop); +void pw_thread_loop_lock(pw_thread_loop *loop); +void pw_thread_loop_unlock(pw_thread_loop *loop); +pw_loop * pw_thread_loop_get_loop(pw_thread_loop *loop); + + +// context.h +void pw_context_destroy(pw_context *context); +pw_context *pw_context_new(pw_loop *main_loop, pw_properties *props, size_t user_data_size); +pw_core * pw_context_connect(pw_context *context, pw_properties *properties, size_t user_data_size); diff --git a/modules/desktop_capture/linux/screen_capturer_pipewire.cc b/modules/desktop_capture/linux/screen_capturer_pipewire.cc deleted file mode 100644 index fe672140cc..0000000000 --- a/modules/desktop_capture/linux/screen_capturer_pipewire.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/desktop_capture/linux/screen_capturer_pipewire.h" - -#include - - -namespace webrtc { - -ScreenCapturerPipeWire::ScreenCapturerPipeWire() - : BaseCapturerPipeWire(BaseCapturerPipeWire::CaptureSourceType::Screen) {} -ScreenCapturerPipeWire::~ScreenCapturerPipeWire() {} - -// static -std::unique_ptr -ScreenCapturerPipeWire::CreateRawScreenCapturer( - const DesktopCaptureOptions& options) { - return std::make_unique(); -} - -} // namespace webrtc diff --git a/modules/desktop_capture/linux/screen_capturer_pipewire.h b/modules/desktop_capture/linux/screen_capturer_pipewire.h deleted file mode 100644 index 66dcd680e0..0000000000 --- a/modules/desktop_capture/linux/screen_capturer_pipewire.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_DESKTOP_CAPTURE_LINUX_SCREEN_CAPTURER_PIPEWIRE_H_ -#define MODULES_DESKTOP_CAPTURE_LINUX_SCREEN_CAPTURER_PIPEWIRE_H_ - -#include - -#include "modules/desktop_capture/linux/base_capturer_pipewire.h" - -namespace webrtc { - -class ScreenCapturerPipeWire : public BaseCapturerPipeWire { - public: - ScreenCapturerPipeWire(); - ~ScreenCapturerPipeWire() override; - - static std::unique_ptr CreateRawScreenCapturer( - const DesktopCaptureOptions& options); - - RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerPipeWire); -}; - -} // namespace webrtc - -#endif // MODULES_DESKTOP_CAPTURE_LINUX_SCREEN_CAPTURER_PIPEWIRE_H_ diff --git a/modules/desktop_capture/linux/shared_x_display.cc b/modules/desktop_capture/linux/shared_x_display.cc index c475db6e78..f0b35f62d3 100644 --- a/modules/desktop_capture/linux/shared_x_display.cc +++ b/modules/desktop_capture/linux/shared_x_display.cc @@ -11,6 +11,7 @@ #include "modules/desktop_capture/linux/shared_x_display.h" #include +#include #include @@ -86,4 +87,15 @@ void SharedXDisplay::ProcessPendingXEvents() { } } +void SharedXDisplay::IgnoreXServerGrabs() { + int test_event_base = 0; + int test_error_base = 0; + int major = 0; + int minor = 0; + if (XTestQueryExtension(display(), &test_event_base, &test_error_base, &major, + &minor)) { + XTestGrabControl(display(), true); + } +} + } // namespace webrtc diff --git a/modules/desktop_capture/linux/shared_x_display.h b/modules/desktop_capture/linux/shared_x_display.h index 98b6101904..dd52e456ca 100644 --- a/modules/desktop_capture/linux/shared_x_display.h +++ b/modules/desktop_capture/linux/shared_x_display.h @@ -18,6 +18,7 @@ #include "api/ref_counted_base.h" #include "api/scoped_refptr.h" #include "rtc_base/constructor_magic.h" +#include "rtc_base/system/rtc_export.h" // Including Xlib.h will involve evil defines (Bool, Status, True, False), which // easily conflict with other headers. @@ -27,7 +28,8 @@ typedef union _XEvent XEvent; namespace webrtc { // A ref-counted object to store XDisplay connection. -class SharedXDisplay : public rtc::RefCountedBase { +class RTC_EXPORT SharedXDisplay + : public rtc::RefCountedNonVirtual { public: class XEventHandler { public: @@ -37,9 +39,6 @@ class SharedXDisplay : public rtc::RefCountedBase { virtual bool HandleXEvent(const XEvent& event) = 0; }; - // Takes ownership of |display|. - explicit SharedXDisplay(Display* display); - // Creates a new X11 Display for the |display_name|. NULL is returned if X11 // connection failed. Equivalent to CreateDefault() when |display_name| is // empty. @@ -62,8 +61,13 @@ class SharedXDisplay : public rtc::RefCountedBase { // Processes pending XEvents, calling corresponding event handlers. void ProcessPendingXEvents(); + void IgnoreXServerGrabs(); + + ~SharedXDisplay(); + protected: - ~SharedXDisplay() override; + // Takes ownership of |display|. + explicit SharedXDisplay(Display* display); private: typedef std::map > EventHandlersMap; diff --git a/modules/desktop_capture/linux/window_capturer_pipewire.cc b/modules/desktop_capture/linux/window_capturer_pipewire.cc deleted file mode 100644 index b4559156dc..0000000000 --- a/modules/desktop_capture/linux/window_capturer_pipewire.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/desktop_capture/linux/window_capturer_pipewire.h" - -#include - - -namespace webrtc { - -WindowCapturerPipeWire::WindowCapturerPipeWire() - : BaseCapturerPipeWire(BaseCapturerPipeWire::CaptureSourceType::Window) {} -WindowCapturerPipeWire::~WindowCapturerPipeWire() {} - -// static -std::unique_ptr -WindowCapturerPipeWire::CreateRawWindowCapturer( - const DesktopCaptureOptions& options) { - return std::make_unique(); -} - -} // namespace webrtc diff --git a/modules/desktop_capture/linux/window_capturer_pipewire.h b/modules/desktop_capture/linux/window_capturer_pipewire.h deleted file mode 100644 index 7f184ef299..0000000000 --- a/modules/desktop_capture/linux/window_capturer_pipewire.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_DESKTOP_CAPTURE_LINUX_WINDOW_CAPTURER_PIPEWIRE_H_ -#define MODULES_DESKTOP_CAPTURE_LINUX_WINDOW_CAPTURER_PIPEWIRE_H_ - -#include - -#include "modules/desktop_capture/linux/base_capturer_pipewire.h" - -namespace webrtc { - -class WindowCapturerPipeWire : public BaseCapturerPipeWire { - public: - WindowCapturerPipeWire(); - ~WindowCapturerPipeWire() override; - - static std::unique_ptr CreateRawWindowCapturer( - const DesktopCaptureOptions& options); - - RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerPipeWire); -}; - -} // namespace webrtc - -#endif // MODULES_DESKTOP_CAPTURE_LINUX_WINDOW_CAPTURER_PIPEWIRE_H_ diff --git a/modules/desktop_capture/linux/x_error_trap.cc b/modules/desktop_capture/linux/x_error_trap.cc index 53c907fc45..13233d8274 100644 --- a/modules/desktop_capture/linux/x_error_trap.cc +++ b/modules/desktop_capture/linux/x_error_trap.cc @@ -10,55 +10,40 @@ #include "modules/desktop_capture/linux/x_error_trap.h" -#include #include -#if defined(TOOLKIT_GTK) -#include -#endif // !defined(TOOLKIT_GTK) +#include "rtc_base/checks.h" namespace webrtc { namespace { -#if !defined(TOOLKIT_GTK) - // TODO(sergeyu): This code is not thread safe. Fix it. Bug 2202. static bool g_xserver_error_trap_enabled = false; static int g_last_xserver_error_code = 0; int XServerErrorHandler(Display* display, XErrorEvent* error_event) { - assert(g_xserver_error_trap_enabled); + RTC_DCHECK(g_xserver_error_trap_enabled); g_last_xserver_error_code = error_event->error_code; return 0; } -#endif // !defined(TOOLKIT_GTK) - } // namespace XErrorTrap::XErrorTrap(Display* display) : original_error_handler_(NULL), enabled_(true) { -#if defined(TOOLKIT_GTK) - gdk_error_trap_push(); -#else // !defined(TOOLKIT_GTK) - assert(!g_xserver_error_trap_enabled); + RTC_DCHECK(!g_xserver_error_trap_enabled); original_error_handler_ = XSetErrorHandler(&XServerErrorHandler); g_xserver_error_trap_enabled = true; g_last_xserver_error_code = 0; -#endif // !defined(TOOLKIT_GTK) } int XErrorTrap::GetLastErrorAndDisable() { enabled_ = false; -#if defined(TOOLKIT_GTK) - return gdk_error_trap_push(); -#else // !defined(TOOLKIT_GTK) - assert(g_xserver_error_trap_enabled); + RTC_DCHECK(g_xserver_error_trap_enabled); XSetErrorHandler(original_error_handler_); g_xserver_error_trap_enabled = false; return g_last_xserver_error_code; -#endif // !defined(TOOLKIT_GTK) } XErrorTrap::~XErrorTrap() { diff --git a/modules/desktop_capture/mac/desktop_configuration_monitor.cc b/modules/desktop_capture/mac/desktop_configuration_monitor.cc index e2225cd4a9..048a679ecc 100644 --- a/modules/desktop_capture/mac/desktop_configuration_monitor.cc +++ b/modules/desktop_capture/mac/desktop_configuration_monitor.cc @@ -21,7 +21,7 @@ DesktopConfigurationMonitor::DesktopConfigurationMonitor() { DesktopConfigurationMonitor::DisplaysReconfiguredCallback, this); if (err != kCGErrorSuccess) RTC_LOG(LS_ERROR) << "CGDisplayRegisterReconfigurationCallback " << err; - rtc::CritScope cs(&desktop_configuration_lock_); + MutexLock lock(&desktop_configuration_lock_); desktop_configuration_ = MacDesktopConfiguration::GetCurrent( MacDesktopConfiguration::TopLeftOrigin); } @@ -34,7 +34,7 @@ DesktopConfigurationMonitor::~DesktopConfigurationMonitor() { } MacDesktopConfiguration DesktopConfigurationMonitor::desktop_configuration() { - rtc::CritScope crit(&desktop_configuration_lock_); + MutexLock lock(&desktop_configuration_lock_); return desktop_configuration_; } @@ -64,7 +64,7 @@ void DesktopConfigurationMonitor::DisplaysReconfigured( reconfiguring_displays_.erase(display); if (reconfiguring_displays_.empty()) { - rtc::CritScope cs(&desktop_configuration_lock_); + MutexLock lock(&desktop_configuration_lock_); desktop_configuration_ = MacDesktopConfiguration::GetCurrent( MacDesktopConfiguration::TopLeftOrigin); } diff --git a/modules/desktop_capture/mac/desktop_configuration_monitor.h b/modules/desktop_capture/mac/desktop_configuration_monitor.h index 1ed4c6bbcf..aa0ebfbacc 100644 --- a/modules/desktop_capture/mac/desktop_configuration_monitor.h +++ b/modules/desktop_capture/mac/desktop_configuration_monitor.h @@ -19,21 +19,21 @@ #include "api/ref_counted_base.h" #include "modules/desktop_capture/mac/desktop_configuration.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { // The class provides functions to synchronize capturing and display // reconfiguring across threads, and the up-to-date MacDesktopConfiguration. -class DesktopConfigurationMonitor : public rtc::RefCountedBase { +class DesktopConfigurationMonitor final + : public rtc::RefCountedNonVirtual { public: DesktopConfigurationMonitor(); + ~DesktopConfigurationMonitor(); + // Returns the current desktop configuration. MacDesktopConfiguration desktop_configuration(); - protected: - ~DesktopConfigurationMonitor() override; - private: static void DisplaysReconfiguredCallback(CGDirectDisplayID display, CGDisplayChangeSummaryFlags flags, @@ -41,7 +41,7 @@ class DesktopConfigurationMonitor : public rtc::RefCountedBase { void DisplaysReconfigured(CGDirectDisplayID display, CGDisplayChangeSummaryFlags flags); - rtc::CriticalSection desktop_configuration_lock_; + Mutex desktop_configuration_lock_; MacDesktopConfiguration desktop_configuration_ RTC_GUARDED_BY(&desktop_configuration_lock_); std::set reconfiguring_displays_; diff --git a/modules/desktop_capture/mac/desktop_frame_provider.h b/modules/desktop_capture/mac/desktop_frame_provider.h index 4826f99e8e..f71959bda1 100644 --- a/modules/desktop_capture/mac/desktop_frame_provider.h +++ b/modules/desktop_capture/mac/desktop_frame_provider.h @@ -17,8 +17,8 @@ #include #include +#include "api/sequence_checker.h" #include "modules/desktop_capture/shared_desktop_frame.h" -#include "rtc_base/thread_checker.h" #include "sdk/objc/helpers/scoped_cftyperef.h" namespace webrtc { @@ -44,7 +44,7 @@ class DesktopFrameProvider { void Release(); private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; const bool allow_iosurface_; // Most recent IOSurface that contains a capture of matching display. diff --git a/modules/desktop_capture/mac/full_screen_mac_application_handler.cc b/modules/desktop_capture/mac/full_screen_mac_application_handler.cc index 9e6eacce85..36e16cbe54 100644 --- a/modules/desktop_capture/mac/full_screen_mac_application_handler.cc +++ b/modules/desktop_capture/mac/full_screen_mac_application_handler.cc @@ -14,6 +14,7 @@ #include #include #include "absl/strings/match.h" +#include "api/function_view.h" #include "modules/desktop_capture/mac/window_list_utils.h" namespace webrtc { @@ -59,17 +60,17 @@ class FullScreenMacApplicationHandler : public FullScreenApplicationHandler { title_predicate_(title_predicate), owner_pid_(GetWindowOwnerPid(sourceId)) {} + protected: + using CachePredicate = + rtc::FunctionView; + void InvalidateCacheIfNeeded(const DesktopCapturer::SourceList& source_list, - int64_t timestamp) const { - // Copy only sources with the same pid + int64_t timestamp, + CachePredicate predicate) const { if (timestamp != cache_timestamp_) { cache_sources_.clear(); std::copy_if(source_list.begin(), source_list.end(), - std::back_inserter(cache_sources_), - [&](const DesktopCapturer::Source& src) { - return src.id != GetSourceId() && - GetWindowOwnerPid(src.id) == owner_pid_; - }); + std::back_inserter(cache_sources_), predicate); cache_timestamp_ = timestamp; } } @@ -77,7 +78,11 @@ class FullScreenMacApplicationHandler : public FullScreenApplicationHandler { WindowId FindFullScreenWindowWithSamePid( const DesktopCapturer::SourceList& source_list, int64_t timestamp) const { - InvalidateCacheIfNeeded(source_list, timestamp); + InvalidateCacheIfNeeded(source_list, timestamp, + [&](const DesktopCapturer::Source& src) { + return src.id != GetSourceId() && + GetWindowOwnerPid(src.id) == owner_pid_; + }); if (cache_sources_.empty()) return kCGNullWindowID; @@ -119,7 +124,7 @@ class FullScreenMacApplicationHandler : public FullScreenApplicationHandler { : FindFullScreenWindowWithSamePid(source_list, timestamp); } - private: + protected: const TitlePredicate title_predicate_; const int owner_pid_; mutable int64_t cache_timestamp_ = 0; @@ -143,6 +148,52 @@ bool slide_show_title_predicate(const std::string& original_title, return false; } +class OpenOfficeApplicationHandler : public FullScreenMacApplicationHandler { + public: + OpenOfficeApplicationHandler(DesktopCapturer::SourceId sourceId) + : FullScreenMacApplicationHandler(sourceId, nullptr) {} + + DesktopCapturer::SourceId FindFullScreenWindow( + const DesktopCapturer::SourceList& source_list, + int64_t timestamp) const override { + InvalidateCacheIfNeeded(source_list, timestamp, + [&](const DesktopCapturer::Source& src) { + return GetWindowOwnerPid(src.id) == owner_pid_; + }); + + const auto original_window = GetSourceId(); + const std::string original_title = GetWindowTitle(original_window); + + // Check if we have only one document window, otherwise it's not possible + // to securely match a document window and a slide show window which has + // empty title. + if (std::any_of(cache_sources_.begin(), cache_sources_.end(), + [&original_title](const DesktopCapturer::Source& src) { + return src.title.length() && src.title != original_title; + })) { + return kCGNullWindowID; + } + + MacDesktopConfiguration desktop_config = + MacDesktopConfiguration::GetCurrent( + MacDesktopConfiguration::TopLeftOrigin); + + // Looking for slide show window, + // it must be a full screen window with empty title + const auto slide_show_window = std::find_if( + cache_sources_.begin(), cache_sources_.end(), [&](const auto& src) { + return src.title.empty() && + IsWindowFullScreen(desktop_config, src.id); + }); + + if (slide_show_window == cache_sources_.end()) { + return kCGNullWindowID; + } + + return slide_show_window->id; + } +}; + } // namespace std::unique_ptr @@ -154,6 +205,7 @@ CreateFullScreenMacApplicationHandler(DesktopCapturer::SourceId sourceId) { if (path_length > 0) { const char* last_slash = strrchr(buffer, '/'); const std::string name{last_slash ? last_slash + 1 : buffer}; + const std::string owner_name = GetWindowOwnerName(sourceId); FullScreenMacApplicationHandler::TitlePredicate predicate = nullptr; if (name.find("Google Chrome") == 0 || name == "Chromium") { predicate = equal_title_predicate; @@ -161,6 +213,8 @@ CreateFullScreenMacApplicationHandler(DesktopCapturer::SourceId sourceId) { predicate = slide_show_title_predicate; } else if (name == "Keynote") { predicate = equal_title_predicate; + } else if (owner_name == "OpenOffice") { + return std::make_unique(sourceId); } if (predicate) { diff --git a/modules/desktop_capture/mac/screen_capturer_mac.h b/modules/desktop_capture/mac/screen_capturer_mac.h index 8076e5b09a..68b8655b1c 100644 --- a/modules/desktop_capture/mac/screen_capturer_mac.h +++ b/modules/desktop_capture/mac/screen_capturer_mac.h @@ -16,6 +16,7 @@ #include #include +#include "api/sequence_checker.h" #include "modules/desktop_capture/desktop_capture_options.h" #include "modules/desktop_capture/desktop_capturer.h" #include "modules/desktop_capture/desktop_frame.h" @@ -27,7 +28,6 @@ #include "modules/desktop_capture/screen_capture_frame_queue.h" #include "modules/desktop_capture/screen_capturer_helper.h" #include "modules/desktop_capture/shared_desktop_frame.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -110,7 +110,7 @@ class ScreenCapturerMac final : public DesktopCapturer { DesktopFrameProvider desktop_frame_provider_; // Start, CaptureFrame and destructor have to called in the same thread. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerMac); }; diff --git a/modules/desktop_capture/mac/window_list_utils.cc b/modules/desktop_capture/mac/window_list_utils.cc index 67cf81c5ce..56d87ceaae 100644 --- a/modules/desktop_capture/mac/window_list_utils.cc +++ b/modules/desktop_capture/mac/window_list_utils.cc @@ -303,7 +303,7 @@ std::string GetWindowOwnerName(CFDictionaryRef window) { std::string GetWindowOwnerName(CGWindowID id) { std::string owner_name; if (GetWindowRef(id, [&owner_name](CFDictionaryRef window) { - owner_name = GetWindowOwnerPid(window); + owner_name = GetWindowOwnerName(window); })) { return owner_name; } diff --git a/modules/desktop_capture/mock_desktop_capturer_callback.h b/modules/desktop_capture/mock_desktop_capturer_callback.h index 659239ab9d..6530dc5542 100644 --- a/modules/desktop_capture/mock_desktop_capturer_callback.h +++ b/modules/desktop_capture/mock_desktop_capturer_callback.h @@ -22,9 +22,10 @@ class MockDesktopCapturerCallback : public DesktopCapturer::Callback { MockDesktopCapturerCallback(); ~MockDesktopCapturerCallback() override; - MOCK_METHOD2(OnCaptureResultPtr, - void(DesktopCapturer::Result result, - std::unique_ptr* frame)); + MOCK_METHOD(void, + OnCaptureResultPtr, + (DesktopCapturer::Result result, + std::unique_ptr* frame)); void OnCaptureResult(DesktopCapturer::Result result, std::unique_ptr frame) final; diff --git a/modules/desktop_capture/mouse_cursor.cc b/modules/desktop_capture/mouse_cursor.cc index 3b61e10a8b..e826552b0f 100644 --- a/modules/desktop_capture/mouse_cursor.cc +++ b/modules/desktop_capture/mouse_cursor.cc @@ -10,9 +10,8 @@ #include "modules/desktop_capture/mouse_cursor.h" -#include - #include "modules/desktop_capture/desktop_frame.h" +#include "rtc_base/checks.h" namespace webrtc { @@ -20,8 +19,8 @@ MouseCursor::MouseCursor() {} MouseCursor::MouseCursor(DesktopFrame* image, const DesktopVector& hotspot) : image_(image), hotspot_(hotspot) { - assert(0 <= hotspot_.x() && hotspot_.x() <= image_->size().width()); - assert(0 <= hotspot_.y() && hotspot_.y() <= image_->size().height()); + RTC_DCHECK(0 <= hotspot_.x() && hotspot_.x() <= image_->size().width()); + RTC_DCHECK(0 <= hotspot_.y() && hotspot_.y() <= image_->size().height()); } MouseCursor::~MouseCursor() {} diff --git a/modules/desktop_capture/mouse_cursor_monitor_unittest.cc b/modules/desktop_capture/mouse_cursor_monitor_unittest.cc index ee2dff32af..268e5e3475 100644 --- a/modules/desktop_capture/mouse_cursor_monitor_unittest.cc +++ b/modules/desktop_capture/mouse_cursor_monitor_unittest.cc @@ -65,7 +65,7 @@ TEST_F(MouseCursorMonitorTest, MAYBE(FromScreen)) { MouseCursorMonitor::CreateForScreen( DesktopCaptureOptions::CreateDefault(), webrtc::kFullDesktopScreenId)); - assert(capturer.get()); + RTC_DCHECK(capturer.get()); capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION); capturer->Capture(); @@ -102,7 +102,7 @@ TEST_F(MouseCursorMonitorTest, MAYBE(FromWindow)) { std::unique_ptr capturer( MouseCursorMonitor::CreateForWindow( DesktopCaptureOptions::CreateDefault(), sources[i].id)); - assert(capturer.get()); + RTC_DCHECK(capturer.get()); capturer->Init(this, MouseCursorMonitor::SHAPE_AND_POSITION); capturer->Capture(); @@ -118,7 +118,7 @@ TEST_F(MouseCursorMonitorTest, MAYBE(ShapeOnly)) { MouseCursorMonitor::CreateForScreen( DesktopCaptureOptions::CreateDefault(), webrtc::kFullDesktopScreenId)); - assert(capturer.get()); + RTC_DCHECK(capturer.get()); capturer->Init(this, MouseCursorMonitor::SHAPE_ONLY); capturer->Capture(); diff --git a/modules/desktop_capture/mouse_cursor_monitor_win.cc b/modules/desktop_capture/mouse_cursor_monitor_win.cc index bf0d8534e3..5a10ee1251 100644 --- a/modules/desktop_capture/mouse_cursor_monitor_win.cc +++ b/modules/desktop_capture/mouse_cursor_monitor_win.cc @@ -77,7 +77,7 @@ MouseCursorMonitorWin::MouseCursorMonitorWin(ScreenId screen) callback_(NULL), mode_(SHAPE_AND_POSITION), desktop_dc_(NULL) { - assert(screen >= kFullDesktopScreenId); + RTC_DCHECK_GE(screen, kFullDesktopScreenId); memset(&last_cursor_, 0, sizeof(CURSORINFO)); } @@ -87,8 +87,8 @@ MouseCursorMonitorWin::~MouseCursorMonitorWin() { } void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) { - assert(!callback_); - assert(callback); + RTC_DCHECK(!callback_); + RTC_DCHECK(callback); callback_ = callback; mode_ = mode; @@ -97,7 +97,7 @@ void MouseCursorMonitorWin::Init(Callback* callback, Mode mode) { } void MouseCursorMonitorWin::Capture() { - assert(callback_); + RTC_DCHECK(callback_); CURSORINFO cursor_info; cursor_info.cbSize = sizeof(CURSORINFO); @@ -158,7 +158,7 @@ void MouseCursorMonitorWin::Capture() { position = position.subtract(cropped_rect.top_left()); } } else { - assert(screen_ != kInvalidScreenId); + RTC_DCHECK_NE(screen_, kInvalidScreenId); DesktopRect rect = GetScreenRect(); if (inside) inside = rect.Contains(position); @@ -169,7 +169,7 @@ void MouseCursorMonitorWin::Capture() { } DesktopRect MouseCursorMonitorWin::GetScreenRect() { - assert(screen_ != kInvalidScreenId); + RTC_DCHECK_NE(screen_, kInvalidScreenId); if (screen_ == kFullDesktopScreenId) { return DesktopRect::MakeXYWH(GetSystemMetrics(SM_XVIRTUALSCREEN), GetSystemMetrics(SM_YVIRTUALSCREEN), diff --git a/modules/desktop_capture/screen_capturer_helper.cc b/modules/desktop_capture/screen_capturer_helper.cc index 8a23c88be6..e8bd3fc450 100644 --- a/modules/desktop_capture/screen_capturer_helper.cc +++ b/modules/desktop_capture/screen_capturer_helper.cc @@ -14,24 +14,19 @@ namespace webrtc { -ScreenCapturerHelper::ScreenCapturerHelper() - : invalid_region_lock_(RWLockWrapper::CreateRWLock()), log_grid_size_(0) {} - -ScreenCapturerHelper::~ScreenCapturerHelper() {} - void ScreenCapturerHelper::ClearInvalidRegion() { - WriteLockScoped scoped_invalid_region_lock(*invalid_region_lock_); + MutexLock scoped_invalid_region_lock(&invalid_region_mutex_); invalid_region_.Clear(); } void ScreenCapturerHelper::InvalidateRegion( const DesktopRegion& invalid_region) { - WriteLockScoped scoped_invalid_region_lock(*invalid_region_lock_); + MutexLock scoped_invalid_region_lock(&invalid_region_mutex_); invalid_region_.AddRegion(invalid_region); } void ScreenCapturerHelper::InvalidateScreen(const DesktopSize& size) { - WriteLockScoped scoped_invalid_region_lock(*invalid_region_lock_); + MutexLock scoped_invalid_region_lock(&invalid_region_mutex_); invalid_region_.AddRect(DesktopRect::MakeSize(size)); } @@ -39,7 +34,7 @@ void ScreenCapturerHelper::TakeInvalidRegion(DesktopRegion* invalid_region) { invalid_region->Clear(); { - WriteLockScoped scoped_invalid_region_lock(*invalid_region_lock_); + MutexLock scoped_invalid_region_lock(&invalid_region_mutex_); invalid_region->Swap(&invalid_region_); } @@ -79,7 +74,7 @@ static int UpToMultiple(int x, int n, int nMask) { void ScreenCapturerHelper::ExpandToGrid(const DesktopRegion& region, int log_grid_size, DesktopRegion* result) { - assert(log_grid_size >= 1); + RTC_DCHECK_GE(log_grid_size, 1); int grid_size = 1 << log_grid_size; int grid_size_mask = ~(grid_size - 1); diff --git a/modules/desktop_capture/screen_capturer_helper.h b/modules/desktop_capture/screen_capturer_helper.h index fc4c85b706..3e658605a1 100644 --- a/modules/desktop_capture/screen_capturer_helper.h +++ b/modules/desktop_capture/screen_capturer_helper.h @@ -16,7 +16,8 @@ #include "modules/desktop_capture/desktop_geometry.h" #include "modules/desktop_capture/desktop_region.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/synchronization/rw_lock_wrapper.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -26,8 +27,8 @@ namespace webrtc { // ScreenCapturer that owns it. class ScreenCapturerHelper { public: - ScreenCapturerHelper(); - ~ScreenCapturerHelper(); + ScreenCapturerHelper() = default; + ~ScreenCapturerHelper() = default; // Clear out the invalid region. void ClearInvalidRegion(); @@ -69,10 +70,10 @@ class ScreenCapturerHelper { // A region that has been manually invalidated (through InvalidateRegion). // These will be returned as dirty_region in the capture data during the next // capture. - DesktopRegion invalid_region_; + DesktopRegion invalid_region_ RTC_GUARDED_BY(invalid_region_mutex_); // A lock protecting |invalid_region_| across threads. - std::unique_ptr invalid_region_lock_; + Mutex invalid_region_mutex_; // The size of the most recently captured screen. DesktopSize size_most_recent_; @@ -80,7 +81,7 @@ class ScreenCapturerHelper { // The log (base 2) of the size of the grid to which the invalid region is // expanded. // If the value is <= 0, then the invalid region is not expanded to a grid. - int log_grid_size_; + int log_grid_size_ = 0; RTC_DISALLOW_COPY_AND_ASSIGN(ScreenCapturerHelper); }; diff --git a/modules/desktop_capture/screen_capturer_linux.cc b/modules/desktop_capture/screen_capturer_linux.cc index 82dbae4813..ed48b7d6d5 100644 --- a/modules/desktop_capture/screen_capturer_linux.cc +++ b/modules/desktop_capture/screen_capturer_linux.cc @@ -14,7 +14,7 @@ #include "modules/desktop_capture/desktop_capturer.h" #if defined(WEBRTC_USE_PIPEWIRE) -#include "modules/desktop_capture/linux/screen_capturer_pipewire.h" +#include "modules/desktop_capture/linux/base_capturer_pipewire.h" #endif // defined(WEBRTC_USE_PIPEWIRE) #if defined(WEBRTC_USE_X11) @@ -28,7 +28,7 @@ std::unique_ptr DesktopCapturer::CreateRawScreenCapturer( const DesktopCaptureOptions& options) { #if defined(WEBRTC_USE_PIPEWIRE) if (options.allow_pipewire() && DesktopCapturer::IsRunningUnderWayland()) { - return ScreenCapturerPipeWire::CreateRawScreenCapturer(options); + return BaseCapturerPipeWire::CreateRawCapturer(options); } #endif // defined(WEBRTC_USE_PIPEWIRE) diff --git a/modules/desktop_capture/screen_capturer_unittest.cc b/modules/desktop_capture/screen_capturer_unittest.cc index ea77069278..ba6b8bfe3d 100644 --- a/modules/desktop_capture/screen_capturer_unittest.cc +++ b/modules/desktop_capture/screen_capturer_unittest.cc @@ -99,7 +99,13 @@ ACTION_P(SaveUniquePtrArg, dest) { *dest = std::move(*arg1); } -TEST_F(ScreenCapturerTest, GetScreenListAndSelectScreen) { +// TODO(bugs.webrtc.org/12950): Re-enable when libc++ issue is fixed. +#if defined(WEBRTC_LINUX) && defined(MEMORY_SANITIZER) +#define MAYBE_GetScreenListAndSelectScreen DISABLED_GetScreenListAndSelectScreen +#else +#define MAYBE_GetScreenListAndSelectScreen GetScreenListAndSelectScreen +#endif +TEST_F(ScreenCapturerTest, MAYBE_GetScreenListAndSelectScreen) { webrtc::DesktopCapturer::SourceList screens; EXPECT_TRUE(capturer_->GetSourceList(&screens)); for (const auto& screen : screens) { diff --git a/modules/desktop_capture/screen_drawer_unittest.cc b/modules/desktop_capture/screen_drawer_unittest.cc index c38eee6991..2394260105 100644 --- a/modules/desktop_capture/screen_drawer_unittest.cc +++ b/modules/desktop_capture/screen_drawer_unittest.cc @@ -48,13 +48,12 @@ void TestScreenDrawerLock( ~Task() = default; - static void RunTask(void* me) { - Task* task = static_cast(me); - std::unique_ptr lock = task->ctor_(); + void RunTask() { + std::unique_ptr lock = ctor_(); ASSERT_TRUE(!!lock); - task->created_->store(true); + created_->store(true); // Wait for the main thread to get the signal of created_. - while (!task->ready_.load()) { + while (!ready_.load()) { SleepMs(1); } // At this point, main thread should begin to create a second lock. Though @@ -77,8 +76,8 @@ void TestScreenDrawerLock( const rtc::FunctionView()> ctor_; } task(&created, ready, ctor); - rtc::PlatformThread lock_thread(&Task::RunTask, &task, "lock_thread"); - lock_thread.Start(); + auto lock_thread = rtc::PlatformThread::SpawnJoinable( + [&task] { task.RunTask(); }, "lock_thread"); // Wait for the first lock in Task::RunTask() to be created. // TODO(zijiehe): Find a better solution to wait for the creation of the first @@ -95,7 +94,6 @@ void TestScreenDrawerLock( ASSERT_GT(kLockDurationMs, rtc::TimeMillis() - start_ms); ctor(); ASSERT_LE(kLockDurationMs, rtc::TimeMillis() - start_ms); - lock_thread.Stop(); } } // namespace diff --git a/modules/desktop_capture/shared_desktop_frame.h b/modules/desktop_capture/shared_desktop_frame.h index fd862d7f21..1f451b65df 100644 --- a/modules/desktop_capture/shared_desktop_frame.h +++ b/modules/desktop_capture/shared_desktop_frame.h @@ -23,7 +23,7 @@ namespace webrtc { // SharedDesktopFrame is a DesktopFrame that may have multiple instances all // sharing the same buffer. -class RTC_EXPORT SharedDesktopFrame : public DesktopFrame { +class RTC_EXPORT SharedDesktopFrame final : public DesktopFrame { public: ~SharedDesktopFrame() override; @@ -51,7 +51,7 @@ class RTC_EXPORT SharedDesktopFrame : public DesktopFrame { bool IsShared(); private: - typedef rtc::RefCountedObject> Core; + typedef rtc::FinalRefCountedObject> Core; SharedDesktopFrame(rtc::scoped_refptr core); diff --git a/modules/desktop_capture/win/d3d_device.cc b/modules/desktop_capture/win/d3d_device.cc index b220b138a5..3d46117501 100644 --- a/modules/desktop_capture/win/d3d_device.cc +++ b/modules/desktop_capture/win/d3d_device.cc @@ -12,6 +12,7 @@ #include +#include "modules/desktop_capture/win/desktop_capture_utils.h" #include "rtc_base/logging.h" namespace webrtc { @@ -38,17 +39,15 @@ bool D3dDevice::Initialize(const ComPtr& adapter) { nullptr, 0, D3D11_SDK_VERSION, d3d_device_.GetAddressOf(), &feature_level, context_.GetAddressOf()); if (error.Error() != S_OK || !d3d_device_ || !context_) { - RTC_LOG(LS_WARNING) << "D3D11CreateDeivce returns error " - << error.ErrorMessage() << " with code " - << error.Error(); + RTC_LOG(LS_WARNING) << "D3D11CreateDevice returned: " + << desktop_capture::utils::ComErrorToString(error); return false; } if (feature_level < D3D_FEATURE_LEVEL_11_0) { RTC_LOG(LS_WARNING) - << "D3D11CreateDevice returns an instance without DirectX " - "11 support, level " - << feature_level << ". Following initialization may fail."; + << "D3D11CreateDevice returned an instance without DirectX 11 support, " + << "level " << feature_level << ". Following initialization may fail."; // D3D_FEATURE_LEVEL_11_0 is not officially documented on MSDN to be a // requirement of Dxgi duplicator APIs. } @@ -57,9 +56,9 @@ bool D3dDevice::Initialize(const ComPtr& adapter) { if (error.Error() != S_OK || !dxgi_device_) { RTC_LOG(LS_WARNING) << "ID3D11Device is not an implementation of IDXGIDevice, " - "this usually means the system does not support DirectX " - "11. Error " - << error.ErrorMessage() << " with code " << error.Error(); + << "this usually means the system does not support DirectX " + << "11. Error received: " + << desktop_capture::utils::ComErrorToString(error); return false; } @@ -73,7 +72,8 @@ std::vector D3dDevice::EnumDevices() { CreateDXGIFactory1(__uuidof(IDXGIFactory1), reinterpret_cast(factory.GetAddressOf())); if (error.Error() != S_OK || !factory) { - RTC_LOG(LS_WARNING) << "Cannot create IDXGIFactory1."; + RTC_LOG(LS_WARNING) << "Cannot create IDXGIFactory1: " + << desktop_capture::utils::ComErrorToString(error); return std::vector(); } @@ -90,9 +90,8 @@ std::vector D3dDevice::EnumDevices() { break; } else { RTC_LOG(LS_WARNING) - << "IDXGIFactory1::EnumAdapters returns an unexpected " - "error " - << error.ErrorMessage() << " with code " << error.Error(); + << "IDXGIFactory1::EnumAdapters returned an unexpected error: " + << desktop_capture::utils::ComErrorToString(error); } } return result; diff --git a/modules/desktop_capture/win/desktop_capture_utils.cc b/modules/desktop_capture/win/desktop_capture_utils.cc new file mode 100644 index 0000000000..476ddc4aba --- /dev/null +++ b/modules/desktop_capture/win/desktop_capture_utils.cc @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/desktop_capture_utils.h" + +#include "rtc_base/strings/string_builder.h" + +namespace webrtc { +namespace desktop_capture { +namespace utils { + +// Generates a human-readable string from a COM error. +std::string ComErrorToString(const _com_error& error) { + char buffer[1024]; + rtc::SimpleStringBuilder string_builder(buffer); + // Use _bstr_t to simplify the wchar to char conversion for ErrorMessage(). + _bstr_t error_message(error.ErrorMessage()); + string_builder.AppendFormat("HRESULT: 0x%08X, Message: %s", error.Error(), + static_cast(error_message)); + return string_builder.str(); +} + +} // namespace utils +} // namespace desktop_capture +} // namespace webrtc diff --git a/modules/desktop_capture/win/desktop_capture_utils.h b/modules/desktop_capture/win/desktop_capture_utils.h new file mode 100644 index 0000000000..ebf31419ce --- /dev/null +++ b/modules/desktop_capture/win/desktop_capture_utils.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_CAPTURE_UTILS_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_CAPTURE_UTILS_H_ + +#include + +#include + +namespace webrtc { +namespace desktop_capture { +namespace utils { + +// Generates a human-readable string from a COM error. +std::string ComErrorToString(const _com_error& error); + +} // namespace utils +} // namespace desktop_capture +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_DESKTOP_CAPTURE_UTILS_H_ diff --git a/modules/desktop_capture/win/dxgi_adapter_duplicator.cc b/modules/desktop_capture/win/dxgi_adapter_duplicator.cc index e3f11ac30a..88ec4e25bf 100644 --- a/modules/desktop_capture/win/dxgi_adapter_duplicator.cc +++ b/modules/desktop_capture/win/dxgi_adapter_duplicator.cc @@ -15,6 +15,7 @@ #include +#include "modules/desktop_capture/win/desktop_capture_utils.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -53,17 +54,16 @@ bool DxgiAdapterDuplicator::DoInitialize() { } if (error.Error() == DXGI_ERROR_NOT_CURRENTLY_AVAILABLE) { - RTC_LOG(LS_WARNING) << "IDXGIAdapter::EnumOutputs returns " - "NOT_CURRENTLY_AVAILABLE. This may happen when " - "running in session 0."; + RTC_LOG(LS_WARNING) << "IDXGIAdapter::EnumOutputs returned " + << "NOT_CURRENTLY_AVAILABLE. This may happen when " + << "running in session 0."; break; } if (error.Error() != S_OK || !output) { - RTC_LOG(LS_WARNING) << "IDXGIAdapter::EnumOutputs returns an unexpected " - "result " - << error.ErrorMessage() << " with error code" - << error.Error(); + RTC_LOG(LS_WARNING) << "IDXGIAdapter::EnumOutputs returned an unexpected " + << "result: " + << desktop_capture::utils::ComErrorToString(error); continue; } @@ -75,16 +75,14 @@ bool DxgiAdapterDuplicator::DoInitialize() { error = output.As(&output1); if (error.Error() != S_OK || !output1) { RTC_LOG(LS_WARNING) - << "Failed to convert IDXGIOutput to IDXGIOutput1, " - "this usually means the system does not support " - "DirectX 11"; + << "Failed to convert IDXGIOutput to IDXGIOutput1, this usually " + << "means the system does not support DirectX 11"; continue; } DxgiOutputDuplicator duplicator(device_, output1, desc); if (!duplicator.Initialize()) { RTC_LOG(LS_WARNING) << "Failed to initialize DxgiOutputDuplicator on " - "output " - << i; + << "output " << i; continue; } diff --git a/modules/desktop_capture/win/dxgi_duplicator_controller.cc b/modules/desktop_capture/win/dxgi_duplicator_controller.cc index bdf495837e..4460ad94f2 100644 --- a/modules/desktop_capture/win/dxgi_duplicator_controller.cc +++ b/modules/desktop_capture/win/dxgi_duplicator_controller.cc @@ -85,14 +85,14 @@ void DxgiDuplicatorController::Release() { } bool DxgiDuplicatorController::IsSupported() { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); return Initialize(); } bool DxgiDuplicatorController::RetrieveD3dInfo(D3dInfo* info) { bool result = false; { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); result = Initialize(); *info = d3d_info_; } @@ -116,7 +116,7 @@ DxgiDuplicatorController::Result DxgiDuplicatorController::DuplicateMonitor( } DesktopVector DxgiDuplicatorController::dpi() { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); if (Initialize()) { return dpi_; } @@ -124,7 +124,7 @@ DesktopVector DxgiDuplicatorController::dpi() { } int DxgiDuplicatorController::ScreenCount() { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); if (Initialize()) { return ScreenCountUnlocked(); } @@ -133,7 +133,7 @@ int DxgiDuplicatorController::ScreenCount() { bool DxgiDuplicatorController::GetDeviceNames( std::vector* output) { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); if (Initialize()) { GetDeviceNamesUnlocked(output); return true; @@ -145,7 +145,7 @@ DxgiDuplicatorController::Result DxgiDuplicatorController::DoDuplicate( DxgiFrame* frame, int monitor_id) { RTC_DCHECK(frame); - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); // The dxgi components and APIs do not update the screen resolution without // a reinitialization. So we use the GetDC() function to retrieve the screen @@ -198,12 +198,12 @@ DxgiDuplicatorController::Result DxgiDuplicatorController::DoDuplicate( } void DxgiDuplicatorController::Unload() { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); Deinitialize(); } void DxgiDuplicatorController::Unregister(const Context* const context) { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); if (ContextExpired(context)) { // The Context has not been setup after a recent initialization, so it // should not been registered in duplicators. diff --git a/modules/desktop_capture/win/dxgi_duplicator_controller.h b/modules/desktop_capture/win/dxgi_duplicator_controller.h index a24e9781b3..5e714f35cf 100644 --- a/modules/desktop_capture/win/dxgi_duplicator_controller.h +++ b/modules/desktop_capture/win/dxgi_duplicator_controller.h @@ -25,7 +25,7 @@ #include "modules/desktop_capture/win/dxgi_adapter_duplicator.h" #include "modules/desktop_capture/win/dxgi_context.h" #include "modules/desktop_capture/win/dxgi_frame.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -142,95 +142,103 @@ class DxgiDuplicatorController { Result DoDuplicate(DxgiFrame* frame, int monitor_id); // Unload all the DXGI components and releases the resources. This function - // wraps Deinitialize() with |lock_|. + // wraps Deinitialize() with |mutex_|. void Unload(); // Unregisters Context from this instance and all DxgiAdapterDuplicator(s) // it owns. void Unregister(const Context* const context); - // All functions below should be called in |lock_| locked scope and should be + // All functions below should be called in |mutex_| locked scope and should be // after a successful Initialize(). // If current instance has not been initialized, executes DoInitialize() // function, and returns initialize result. Otherwise directly returns true. // This function may calls Deinitialize() if initialization failed. - bool Initialize(); + bool Initialize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Does the real initialization work, this function should only be called in // Initialize(). - bool DoInitialize(); + bool DoInitialize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Clears all COM components referred by this instance. So next Duplicate() // call will eventually initialize this instance again. - void Deinitialize(); + void Deinitialize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // A helper function to check whether a Context has been expired. - bool ContextExpired(const Context* const context) const; + bool ContextExpired(const Context* const context) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Updates Context if needed. - void Setup(Context* context); + void Setup(Context* context) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); bool DoDuplicateUnlocked(Context* context, int monitor_id, - SharedDesktopFrame* target); + SharedDesktopFrame* target) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Captures all monitors. - bool DoDuplicateAll(Context* context, SharedDesktopFrame* target); + bool DoDuplicateAll(Context* context, SharedDesktopFrame* target) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Captures one monitor. bool DoDuplicateOne(Context* context, int monitor_id, - SharedDesktopFrame* target); + SharedDesktopFrame* target) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // The minimum GetNumFramesCaptured() returned by |duplicators_|. - int64_t GetNumFramesCaptured() const; + int64_t GetNumFramesCaptured() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns a DesktopSize to cover entire |desktop_rect_|. - DesktopSize desktop_size() const; + DesktopSize desktop_size() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns the size of one screen. |id| should be >= 0. If system does not // support DXGI based capturer, or |id| is greater than the total screen count // of all the Duplicators, this function returns an empty DesktopRect. - DesktopRect ScreenRect(int id) const; + DesktopRect ScreenRect(int id) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - int ScreenCountUnlocked() const; + int ScreenCountUnlocked() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void GetDeviceNamesUnlocked(std::vector* output) const; + void GetDeviceNamesUnlocked(std::vector* output) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns the desktop size of the selected screen |monitor_id|. Setting // |monitor_id| < 0 to return the entire screen size. - DesktopSize SelectedDesktopSize(int monitor_id) const; + DesktopSize SelectedDesktopSize(int monitor_id) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Retries DoDuplicateAll() for several times until GetNumFramesCaptured() is // large enough. Returns false if DoDuplicateAll() returns false, or // GetNumFramesCaptured() has never reached the requirement. // According to http://crbug.com/682112, dxgi capturer returns a black frame // during first several capture attempts. - bool EnsureFrameCaptured(Context* context, SharedDesktopFrame* target); + bool EnsureFrameCaptured(Context* context, SharedDesktopFrame* target) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Moves |desktop_rect_| and all underlying |duplicators_|, putting top left // corner of the desktop at (0, 0). This is necessary because DXGI_OUTPUT_DESC // may return negative coordinates. Called from DoInitialize() after all // DxgiAdapterDuplicator and DxgiOutputDuplicator instances are initialized. - void TranslateRect(); + void TranslateRect() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // The count of references which are now "living". std::atomic_int refcount_; // This lock must be locked whenever accessing any of the following objects. - rtc::CriticalSection lock_; + Mutex mutex_; // A self-incremented integer to compare with the one in Context. It ensures // a Context instance is always initialized after DxgiDuplicatorController. - int identity_ = 0; - DesktopRect desktop_rect_; - DesktopVector dpi_; - std::vector duplicators_; - D3dInfo d3d_info_; - DisplayConfigurationMonitor display_configuration_monitor_; + int identity_ RTC_GUARDED_BY(mutex_) = 0; + DesktopRect desktop_rect_ RTC_GUARDED_BY(mutex_); + DesktopVector dpi_ RTC_GUARDED_BY(mutex_); + std::vector duplicators_ RTC_GUARDED_BY(mutex_); + D3dInfo d3d_info_ RTC_GUARDED_BY(mutex_); + DisplayConfigurationMonitor display_configuration_monitor_ + RTC_GUARDED_BY(mutex_); // A number to indicate how many succeeded duplications have been performed. - uint32_t succeeded_duplications_ = 0; + uint32_t succeeded_duplications_ RTC_GUARDED_BY(mutex_) = 0; }; } // namespace webrtc diff --git a/modules/desktop_capture/win/dxgi_output_duplicator.cc b/modules/desktop_capture/win/dxgi_output_duplicator.cc index 2d56b9af36..65a0d77667 100644 --- a/modules/desktop_capture/win/dxgi_output_duplicator.cc +++ b/modules/desktop_capture/win/dxgi_output_duplicator.cc @@ -18,6 +18,7 @@ #include +#include "modules/desktop_capture/win/desktop_capture_utils.h" #include "modules/desktop_capture/win/dxgi_texture_mapping.h" #include "modules/desktop_capture/win/dxgi_texture_staging.h" #include "rtc_base/checks.h" @@ -103,9 +104,8 @@ bool DxgiOutputDuplicator::DuplicateOutput() { output_->DuplicateOutput(static_cast(device_.d3d_device()), duplication_.GetAddressOf()); if (error.Error() != S_OK || !duplication_) { - RTC_LOG(LS_WARNING) - << "Failed to duplicate output from IDXGIOutput1, error " - << error.ErrorMessage() << ", with code " << error.Error(); + RTC_LOG(LS_WARNING) << "Failed to duplicate output from IDXGIOutput1: " + << desktop_capture::utils::ComErrorToString(error); return false; } @@ -113,9 +113,8 @@ bool DxgiOutputDuplicator::DuplicateOutput() { duplication_->GetDesc(&desc_); if (desc_.ModeDesc.Format != DXGI_FORMAT_B8G8R8A8_UNORM) { RTC_LOG(LS_ERROR) << "IDXGIDuplicateOutput does not use RGBA (8 bit) " - "format, which is required by downstream components, " - "format is " - << desc_.ModeDesc.Format; + << "format, which is required by downstream components, " + << "format is " << desc_.ModeDesc.Format; return false; } @@ -123,7 +122,7 @@ bool DxgiOutputDuplicator::DuplicateOutput() { static_cast(desc_.ModeDesc.Height) != desktop_rect_.height()) { RTC_LOG(LS_ERROR) << "IDXGIDuplicateOutput does not return a same size as its " - "IDXGIOutput1, size returned by IDXGIDuplicateOutput is " + << "IDXGIOutput1, size returned by IDXGIDuplicateOutput is " << desc_.ModeDesc.Width << " x " << desc_.ModeDesc.Height << ", size returned by IDXGIOutput1 is " << desktop_rect_.width() << " x " << desktop_rect_.height(); @@ -140,9 +139,8 @@ bool DxgiOutputDuplicator::ReleaseFrame() { RTC_DCHECK(duplication_); _com_error error = duplication_->ReleaseFrame(); if (error.Error() != S_OK) { - RTC_LOG(LS_ERROR) << "Failed to release frame from IDXGIOutputDuplication, " - "error" - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to release frame from IDXGIOutputDuplication: " + << desktop_capture::utils::ComErrorToString(error); return false; } return true; @@ -166,8 +164,8 @@ bool DxgiOutputDuplicator::Duplicate(Context* context, _com_error error = duplication_->AcquireNextFrame( kAcquireTimeoutMs, &frame_info, resource.GetAddressOf()); if (error.Error() != S_OK && error.Error() != DXGI_ERROR_WAIT_TIMEOUT) { - RTC_LOG(LS_ERROR) << "Failed to capture frame, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to capture frame: " + << desktop_capture::utils::ComErrorToString(error); return false; } @@ -269,11 +267,11 @@ bool DxgiOutputDuplicator::DoDetectUpdatedRegion( if (frame_info.TotalMetadataBufferSize == 0) { // This should not happen, since frame_info.AccumulatedFrames > 0. RTC_LOG(LS_ERROR) << "frame_info.AccumulatedFrames > 0, " - "but TotalMetadataBufferSize == 0"; + << "but TotalMetadataBufferSize == 0"; return false; } - if (metadata_.capacity() < frame_info.TotalMetadataBufferSize) { + if (metadata_.size() < frame_info.TotalMetadataBufferSize) { metadata_.clear(); // Avoid data copy metadata_.resize(frame_info.TotalMetadataBufferSize); } @@ -283,10 +281,10 @@ bool DxgiOutputDuplicator::DoDetectUpdatedRegion( reinterpret_cast(metadata_.data()); size_t move_rects_count = 0; _com_error error = duplication_->GetFrameMoveRects( - static_cast(metadata_.capacity()), move_rects, &buff_size); + static_cast(metadata_.size()), move_rects, &buff_size); if (error.Error() != S_OK) { - RTC_LOG(LS_ERROR) << "Failed to get move rectangles, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to get move rectangles: " + << desktop_capture::utils::ComErrorToString(error); return false; } move_rects_count = buff_size / sizeof(DXGI_OUTDUPL_MOVE_RECT); @@ -294,11 +292,10 @@ bool DxgiOutputDuplicator::DoDetectUpdatedRegion( RECT* dirty_rects = reinterpret_cast(metadata_.data() + buff_size); size_t dirty_rects_count = 0; error = duplication_->GetFrameDirtyRects( - static_cast(metadata_.capacity()) - buff_size, dirty_rects, - &buff_size); + static_cast(metadata_.size()) - buff_size, dirty_rects, &buff_size); if (error.Error() != S_OK) { - RTC_LOG(LS_ERROR) << "Failed to get dirty rectangles, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to get dirty rectangles: " + << desktop_capture::utils::ComErrorToString(error); return false; } dirty_rects_count = buff_size / sizeof(RECT); diff --git a/modules/desktop_capture/win/dxgi_output_duplicator.h b/modules/desktop_capture/win/dxgi_output_duplicator.h index 5395146042..3079d3967a 100644 --- a/modules/desktop_capture/win/dxgi_output_duplicator.h +++ b/modules/desktop_capture/win/dxgi_output_duplicator.h @@ -27,7 +27,6 @@ #include "modules/desktop_capture/win/d3d_device.h" #include "modules/desktop_capture/win/dxgi_context.h" #include "modules/desktop_capture/win/dxgi_texture.h" -#include "rtc_base/critical_section.h" #include "rtc_base/thread_annotations.h" namespace webrtc { diff --git a/modules/desktop_capture/win/dxgi_texture.cc b/modules/desktop_capture/win/dxgi_texture.cc index 2919692c40..b8f5b81f90 100644 --- a/modules/desktop_capture/win/dxgi_texture.cc +++ b/modules/desktop_capture/win/dxgi_texture.cc @@ -15,6 +15,7 @@ #include #include "modules/desktop_capture/desktop_region.h" +#include "modules/desktop_capture/win/desktop_capture_utils.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -49,9 +50,8 @@ bool DxgiTexture::CopyFrom(const DXGI_OUTDUPL_FRAME_INFO& frame_info, __uuidof(ID3D11Texture2D), reinterpret_cast(texture.GetAddressOf())); if (error.Error() != S_OK || !texture) { - RTC_LOG(LS_ERROR) << "Failed to convert IDXGIResource to ID3D11Texture2D, " - "error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to convert IDXGIResource to ID3D11Texture2D: " + << desktop_capture::utils::ComErrorToString(error); return false; } diff --git a/modules/desktop_capture/win/dxgi_texture_mapping.cc b/modules/desktop_capture/win/dxgi_texture_mapping.cc index 9e138d1d6f..7ecf1adc61 100644 --- a/modules/desktop_capture/win/dxgi_texture_mapping.cc +++ b/modules/desktop_capture/win/dxgi_texture_mapping.cc @@ -14,6 +14,7 @@ #include #include +#include "modules/desktop_capture/win/desktop_capture_utils.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -36,9 +37,8 @@ bool DxgiTextureMapping::CopyFromTexture( if (error.Error() != S_OK) { *rect() = {0}; RTC_LOG(LS_ERROR) - << "Failed to map the IDXGIOutputDuplication to a bitmap, " - "error " - << error.ErrorMessage() << ", code " << error.Error(); + << "Failed to map the IDXGIOutputDuplication to a bitmap: " + << desktop_capture::utils::ComErrorToString(error); return false; } @@ -48,8 +48,8 @@ bool DxgiTextureMapping::CopyFromTexture( bool DxgiTextureMapping::DoRelease() { _com_error error = duplication_->UnMapDesktopSurface(); if (error.Error() != S_OK) { - RTC_LOG(LS_ERROR) << "Failed to unmap the IDXGIOutputDuplication, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to unmap the IDXGIOutputDuplication: " + << desktop_capture::utils::ComErrorToString(error); return false; } return true; diff --git a/modules/desktop_capture/win/dxgi_texture_staging.cc b/modules/desktop_capture/win/dxgi_texture_staging.cc index 2bd1eb9a6f..17e8518a7d 100644 --- a/modules/desktop_capture/win/dxgi_texture_staging.cc +++ b/modules/desktop_capture/win/dxgi_texture_staging.cc @@ -15,6 +15,7 @@ #include #include +#include "modules/desktop_capture/win/desktop_capture_utils.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "system_wrappers/include/metrics.h" @@ -64,17 +65,15 @@ bool DxgiTextureStaging::InitializeStage(ID3D11Texture2D* texture) { _com_error error = device_.d3d_device()->CreateTexture2D( &desc, nullptr, stage_.GetAddressOf()); if (error.Error() != S_OK || !stage_) { - RTC_LOG(LS_ERROR) - << "Failed to create a new ID3D11Texture2D as stage, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to create a new ID3D11Texture2D as stage: " + << desktop_capture::utils::ComErrorToString(error); return false; } error = stage_.As(&surface_); if (error.Error() != S_OK || !surface_) { - RTC_LOG(LS_ERROR) - << "Failed to convert ID3D11Texture2D to IDXGISurface, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to convert ID3D11Texture2D to IDXGISurface: " + << desktop_capture::utils::ComErrorToString(error); return false; } @@ -110,8 +109,8 @@ bool DxgiTextureStaging::CopyFromTexture( _com_error error = surface_->Map(rect(), DXGI_MAP_READ); if (error.Error() != S_OK) { *rect() = {0}; - RTC_LOG(LS_ERROR) << "Failed to map the IDXGISurface to a bitmap, error " - << error.ErrorMessage() << ", code " << error.Error(); + RTC_LOG(LS_ERROR) << "Failed to map the IDXGISurface to a bitmap: " + << desktop_capture::utils::ComErrorToString(error); return false; } diff --git a/modules/desktop_capture/win/full_screen_win_application_handler.cc b/modules/desktop_capture/win/full_screen_win_application_handler.cc index 0b7e3fc437..dd21410b03 100644 --- a/modules/desktop_capture/win/full_screen_win_application_handler.cc +++ b/modules/desktop_capture/win/full_screen_win_application_handler.cc @@ -14,6 +14,9 @@ #include #include #include +#include "absl/strings/match.h" +#include "modules/desktop_capture/win/screen_capture_utils.h" +#include "modules/desktop_capture/win/window_capture_utils.h" #include "rtc_base/arraysize.h" #include "rtc_base/logging.h" // For RTC_LOG_GLE #include "rtc_base/string_utils.h" @@ -21,6 +24,25 @@ namespace webrtc { namespace { +// Utility function to verify that |window| has class name equal to |class_name| +bool CheckWindowClassName(HWND window, const wchar_t* class_name) { + const size_t classNameLength = wcslen(class_name); + + // https://docs.microsoft.com/en-us/windows/win32/api/winuser/ns-winuser-wndclassa + // says lpszClassName field in WNDCLASS is limited by 256 symbols, so we don't + // need to have a buffer bigger than that. + constexpr size_t kMaxClassNameLength = 256; + WCHAR buffer[kMaxClassNameLength]; + + const int length = ::GetClassNameW(window, buffer, kMaxClassNameLength); + if (length <= 0) + return false; + + if (static_cast(length) != classNameLength) + return false; + return wcsncmp(buffer, class_name, classNameLength) == 0; +} + std::string WindowText(HWND window) { size_t len = ::GetWindowTextLength(window); if (len == 0) @@ -146,20 +168,7 @@ class FullScreenPowerPointHandler : public FullScreenApplicationHandler { } bool IsEditorWindow(HWND window) const { - constexpr WCHAR kScreenClassName[] = L"PPTFrameClass"; - constexpr size_t kScreenClassNameLength = arraysize(kScreenClassName) - 1; - - // We need to verify that window class is equal to |kScreenClassName|. - // To do that we need a buffer large enough to include a null terminated - // string one code point bigger than |kScreenClassName|. It will help us to - // check that size of class name string returned by GetClassNameW is equal - // to |kScreenClassNameLength| not being limited by size of buffer (case - // when |kScreenClassName| is a prefix for class name string). - WCHAR buffer[arraysize(kScreenClassName) + 3]; - const int length = ::GetClassNameW(window, buffer, arraysize(buffer)); - if (length != kScreenClassNameLength) - return false; - return wcsncmp(buffer, kScreenClassName, kScreenClassNameLength) == 0; + return CheckWindowClassName(window, L"PPTFrameClass"); } bool IsSlideShowWindow(HWND window) const { @@ -170,6 +179,74 @@ class FullScreenPowerPointHandler : public FullScreenApplicationHandler { } }; +class OpenOfficeApplicationHandler : public FullScreenApplicationHandler { + public: + explicit OpenOfficeApplicationHandler(DesktopCapturer::SourceId sourceId) + : FullScreenApplicationHandler(sourceId) {} + + DesktopCapturer::SourceId FindFullScreenWindow( + const DesktopCapturer::SourceList& window_list, + int64_t timestamp) const override { + if (window_list.empty()) + return 0; + + DWORD process_id = WindowProcessId(reinterpret_cast(GetSourceId())); + + DesktopCapturer::SourceList app_windows = + GetProcessWindows(window_list, process_id, nullptr); + + DesktopCapturer::SourceList document_windows; + std::copy_if( + app_windows.begin(), app_windows.end(), + std::back_inserter(document_windows), + [this](const DesktopCapturer::Source& x) { return IsEditorWindow(x); }); + + // Check if we have only one document window, otherwise it's not possible + // to securely match a document window and a slide show window which has + // empty title. + if (document_windows.size() != 1) { + return 0; + } + + // Check if document window has been selected as a source + if (document_windows.front().id != GetSourceId()) { + return 0; + } + + // Check if we have a slide show window. + auto slide_show_window = + std::find_if(app_windows.begin(), app_windows.end(), + [this](const DesktopCapturer::Source& x) { + return IsSlideShowWindow(x); + }); + + if (slide_show_window == app_windows.end()) + return 0; + + return slide_show_window->id; + } + + private: + bool IsEditorWindow(const DesktopCapturer::Source& source) const { + if (source.title.empty()) { + return false; + } + + return CheckWindowClassName(reinterpret_cast(source.id), L"SALFRAME"); + } + + bool IsSlideShowWindow(const DesktopCapturer::Source& source) const { + // Check title size to filter out a Presenter Control window which shares + // window class with Slide Show window but has non empty title. + if (!source.title.empty()) { + return false; + } + + return CheckWindowClassName(reinterpret_cast(source.id), + L"SALTMPSUBFRAME"); + } +}; + std::wstring GetPathByWindowId(HWND window_id) { DWORD process_id = WindowProcessId(window_id); HANDLE process = @@ -193,13 +270,17 @@ std::wstring GetPathByWindowId(HWND window_id) { std::unique_ptr CreateFullScreenWinApplicationHandler(DesktopCapturer::SourceId source_id) { std::unique_ptr result; - std::wstring exe_path = GetPathByWindowId(reinterpret_cast(source_id)); + HWND hwnd = reinterpret_cast(source_id); + std::wstring exe_path = GetPathByWindowId(hwnd); std::wstring file_name = FileNameFromPath(exe_path); std::transform(file_name.begin(), file_name.end(), file_name.begin(), std::towupper); if (file_name == L"POWERPNT.EXE") { result = std::make_unique(source_id); + } else if (file_name == L"SOFFICE.BIN" && + absl::EndsWith(WindowText(hwnd), "OpenOffice Impress")) { + result = std::make_unique(source_id); } return result; diff --git a/modules/desktop_capture/win/scoped_gdi_object.h b/modules/desktop_capture/win/scoped_gdi_object.h index 56abe95a9e..d3ac9b9443 100644 --- a/modules/desktop_capture/win/scoped_gdi_object.h +++ b/modules/desktop_capture/win/scoped_gdi_object.h @@ -58,27 +58,29 @@ class ScopedGDIObject { template class DeleteObjectTraits { public: + DeleteObjectTraits() = delete; + DeleteObjectTraits(const DeleteObjectTraits&) = delete; + DeleteObjectTraits& operator=(const DeleteObjectTraits&) = delete; + // Closes the handle. static void Close(T handle) { if (handle) DeleteObject(handle); } - - private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DeleteObjectTraits); }; // The traits class that uses DestroyCursor() to close a handle. class DestroyCursorTraits { public: + DestroyCursorTraits() = delete; + DestroyCursorTraits(const DestroyCursorTraits&) = delete; + DestroyCursorTraits& operator=(const DestroyCursorTraits&) = delete; + // Closes the handle. static void Close(HCURSOR handle) { if (handle) DestroyCursor(handle); } - - private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DestroyCursorTraits); }; typedef ScopedGDIObject > ScopedBitmap; diff --git a/modules/desktop_capture/win/screen_capture_utils.cc b/modules/desktop_capture/win/screen_capture_utils.cc index 95f6d92059..53b6dd399c 100644 --- a/modules/desktop_capture/win/screen_capture_utils.cc +++ b/modules/desktop_capture/win/screen_capture_utils.cc @@ -16,7 +16,9 @@ #include #include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/desktop_geometry.h" #include "rtc_base/checks.h" +#include "rtc_base/logging.h" #include "rtc_base/string_utils.h" #include "rtc_base/win32.h" @@ -36,12 +38,14 @@ bool GetScreenList(DesktopCapturer::SourceList* screens, enum_result = EnumDisplayDevicesW(NULL, device_index, &device, 0); // |enum_result| is 0 if we have enumerated all devices. - if (!enum_result) + if (!enum_result) { break; + } // We only care about active displays. - if (!(device.StateFlags & DISPLAY_DEVICE_ACTIVE)) + if (!(device.StateFlags & DISPLAY_DEVICE_ACTIVE)) { continue; + } screens->push_back({device_index, std::string()}); if (device_names) { @@ -51,7 +55,64 @@ bool GetScreenList(DesktopCapturer::SourceList* screens, return true; } -bool IsScreenValid(DesktopCapturer::SourceId screen, std::wstring* device_key) { +bool GetHmonitorFromDeviceIndex(const DesktopCapturer::SourceId device_index, + HMONITOR* hmonitor) { + // A device index of |kFullDesktopScreenId| or -1 represents all screens, an + // HMONITOR of 0 indicates the same. + if (device_index == kFullDesktopScreenId) { + *hmonitor = 0; + return true; + } + + std::wstring device_key; + if (!IsScreenValid(device_index, &device_key)) { + return false; + } + + DesktopRect screen_rect = GetScreenRect(device_index, device_key); + if (screen_rect.is_empty()) { + return false; + } + + RECT rect = {screen_rect.left(), screen_rect.top(), screen_rect.right(), + screen_rect.bottom()}; + + HMONITOR monitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONULL); + if (monitor == NULL) { + RTC_LOG(LS_WARNING) << "No HMONITOR found for supplied device index."; + return false; + } + + *hmonitor = monitor; + return true; +} + +bool IsMonitorValid(const HMONITOR monitor) { + // An HMONITOR of 0 refers to a virtual monitor that spans all physical + // monitors. + if (monitor == 0) { + return true; + } + + MONITORINFO monitor_info; + monitor_info.cbSize = sizeof(MONITORINFO); + return GetMonitorInfoA(monitor, &monitor_info); +} + +DesktopRect GetMonitorRect(const HMONITOR monitor) { + MONITORINFO monitor_info; + monitor_info.cbSize = sizeof(MONITORINFO); + if (!GetMonitorInfoA(monitor, &monitor_info)) { + return DesktopRect(); + } + + return DesktopRect::MakeLTRB( + monitor_info.rcMonitor.left, monitor_info.rcMonitor.top, + monitor_info.rcMonitor.right, monitor_info.rcMonitor.bottom); +} + +bool IsScreenValid(const DesktopCapturer::SourceId screen, + std::wstring* device_key) { if (screen == kFullDesktopScreenId) { *device_key = L""; return true; @@ -60,8 +121,9 @@ bool IsScreenValid(DesktopCapturer::SourceId screen, std::wstring* device_key) { DISPLAY_DEVICEW device; device.cb = sizeof(device); BOOL enum_result = EnumDisplayDevicesW(NULL, screen, &device, 0); - if (enum_result) + if (enum_result) { *device_key = device.DeviceKey; + } return !!enum_result; } @@ -73,7 +135,7 @@ DesktopRect GetFullscreenRect() { GetSystemMetrics(SM_CYVIRTUALSCREEN)); } -DesktopRect GetScreenRect(DesktopCapturer::SourceId screen, +DesktopRect GetScreenRect(const DesktopCapturer::SourceId screen, const std::wstring& device_key) { if (screen == kFullDesktopScreenId) { return GetFullscreenRect(); @@ -82,23 +144,26 @@ DesktopRect GetScreenRect(DesktopCapturer::SourceId screen, DISPLAY_DEVICEW device; device.cb = sizeof(device); BOOL result = EnumDisplayDevicesW(NULL, screen, &device, 0); - if (!result) + if (!result) { return DesktopRect(); + } // Verifies the device index still maps to the same display device, to make // sure we are capturing the same device when devices are added or removed. // DeviceKey is documented as reserved, but it actually contains the registry // key for the device and is unique for each monitor, while DeviceID is not. - if (device_key != device.DeviceKey) + if (device_key != device.DeviceKey) { return DesktopRect(); + } DEVMODEW device_mode; device_mode.dmSize = sizeof(device_mode); device_mode.dmDriverExtra = 0; result = EnumDisplaySettingsExW(device.DeviceName, ENUM_CURRENT_SETTINGS, &device_mode, 0); - if (!result) + if (!result) { return DesktopRect(); + } return DesktopRect::MakeXYWH( device_mode.dmPosition.x, device_mode.dmPosition.y, diff --git a/modules/desktop_capture/win/screen_capture_utils.h b/modules/desktop_capture/win/screen_capture_utils.h index 5c4c11d542..dc993dad25 100644 --- a/modules/desktop_capture/win/screen_capture_utils.h +++ b/modules/desktop_capture/win/screen_capture_utils.h @@ -27,11 +27,26 @@ namespace webrtc { bool GetScreenList(DesktopCapturer::SourceList* screens, std::vector* device_names = nullptr); +// Converts a device index (which are returned by |GetScreenList|) into an +// HMONITOR. +bool GetHmonitorFromDeviceIndex(const DesktopCapturer::SourceId device_index, + HMONITOR* hmonitor); + +// Returns true if |monitor| represents a valid display +// monitor. Consumers should recheck the validity of HMONITORs before use if a +// WM_DISPLAYCHANGE message has been received. +bool IsMonitorValid(const HMONITOR monitor); + +// Returns the rect of the monitor identified by |monitor|, relative to the +// primary display's top-left. On failure, returns an empty rect. +DesktopRect GetMonitorRect(const HMONITOR monitor); + // Returns true if |screen| is a valid screen. The screen device key is // returned through |device_key| if the screen is valid. The device key can be // used in GetScreenRect to verify the screen matches the previously obtained // id. -bool IsScreenValid(DesktopCapturer::SourceId screen, std::wstring* device_key); +bool IsScreenValid(const DesktopCapturer::SourceId screen, + std::wstring* device_key); // Get the rect of the entire system in system coordinate system. I.e. the // primary monitor always starts from (0, 0). @@ -40,7 +55,7 @@ DesktopRect GetFullscreenRect(); // Get the rect of the screen identified by |screen|, relative to the primary // display's top-left. If the screen device key does not match |device_key|, or // the screen does not exist, or any error happens, an empty rect is returned. -RTC_EXPORT DesktopRect GetScreenRect(DesktopCapturer::SourceId screen, +RTC_EXPORT DesktopRect GetScreenRect(const DesktopCapturer::SourceId screen, const std::wstring& device_key); } // namespace webrtc diff --git a/modules/desktop_capture/win/screen_capture_utils_unittest.cc b/modules/desktop_capture/win/screen_capture_utils_unittest.cc index a71c4f7610..80d1fb3242 100644 --- a/modules/desktop_capture/win/screen_capture_utils_unittest.cc +++ b/modules/desktop_capture/win/screen_capture_utils_unittest.cc @@ -13,7 +13,9 @@ #include #include +#include "modules/desktop_capture/desktop_capture_types.h" #include "modules/desktop_capture/desktop_capturer.h" +#include "rtc_base/logging.h" #include "test/gtest.h" namespace webrtc { @@ -29,4 +31,29 @@ TEST(ScreenCaptureUtilsTest, GetScreenList) { ASSERT_EQ(screens.size(), device_names.size()); } +TEST(ScreenCaptureUtilsTest, DeviceIndexToHmonitor) { + DesktopCapturer::SourceList screens; + ASSERT_TRUE(GetScreenList(&screens)); + if (screens.size() == 0) { + RTC_LOG(LS_INFO) << "Skip screen capture test on systems with no monitors."; + GTEST_SKIP(); + } + + HMONITOR hmonitor; + ASSERT_TRUE(GetHmonitorFromDeviceIndex(screens[0].id, &hmonitor)); + ASSERT_TRUE(IsMonitorValid(hmonitor)); +} + +TEST(ScreenCaptureUtilsTest, FullScreenDeviceIndexToHmonitor) { + HMONITOR hmonitor; + ASSERT_TRUE(GetHmonitorFromDeviceIndex(kFullDesktopScreenId, &hmonitor)); + ASSERT_EQ(hmonitor, static_cast(0)); + ASSERT_TRUE(IsMonitorValid(hmonitor)); +} + +TEST(ScreenCaptureUtilsTest, InvalidDeviceIndexToHmonitor) { + HMONITOR hmonitor; + ASSERT_FALSE(GetHmonitorFromDeviceIndex(kInvalidScreenId, &hmonitor)); +} + } // namespace webrtc diff --git a/modules/desktop_capture/win/screen_capturer_win_directx.cc b/modules/desktop_capture/win/screen_capturer_win_directx.cc index df3bee8f26..1556d7c787 100644 --- a/modules/desktop_capture/win/screen_capturer_win_directx.cc +++ b/modules/desktop_capture/win/screen_capturer_win_directx.cc @@ -16,12 +16,15 @@ #include #include +#include "modules/desktop_capture/desktop_capture_metrics_helper.h" +#include "modules/desktop_capture/desktop_capture_types.h" #include "modules/desktop_capture/desktop_frame.h" #include "modules/desktop_capture/win/screen_capture_utils.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" +#include "system_wrappers/include/metrics.h" namespace webrtc { @@ -106,6 +109,7 @@ ScreenCapturerWinDirectx::~ScreenCapturerWinDirectx() = default; void ScreenCapturerWinDirectx::Start(Callback* callback) { RTC_DCHECK(!callback_); RTC_DCHECK(callback); + RecordCapturerImpl(DesktopCapturerId::kScreenCapturerWinDirectx); callback_ = callback; } @@ -169,8 +173,13 @@ void ScreenCapturerWinDirectx::CaptureFrame() { case DuplicateResult::SUCCEEDED: { std::unique_ptr frame = frames_.current_frame()->frame()->Share(); - frame->set_capture_time_ms((rtc::TimeNanos() - capture_start_time_nanos) / - rtc::kNumNanosecsPerMillisec); + + int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) / + rtc::kNumNanosecsPerMillisec; + RTC_HISTOGRAM_COUNTS_1000( + "WebRTC.DesktopCapture.Win.DirectXCapturerFrameTime", + capture_time_ms); + frame->set_capture_time_ms(capture_time_ms); frame->set_capturer_id(DesktopCapturerId::kScreenCapturerWinDirectx); // TODO(julien.isorce): http://crbug.com/945468. Set the icc profile on diff --git a/modules/desktop_capture/win/screen_capturer_win_gdi.cc b/modules/desktop_capture/win/screen_capturer_win_gdi.cc index bf6cb162a0..dc27344f82 100644 --- a/modules/desktop_capture/win/screen_capturer_win_gdi.cc +++ b/modules/desktop_capture/win/screen_capturer_win_gdi.cc @@ -12,7 +12,9 @@ #include +#include "modules/desktop_capture/desktop_capture_metrics_helper.h" #include "modules/desktop_capture/desktop_capture_options.h" +#include "modules/desktop_capture/desktop_capture_types.h" #include "modules/desktop_capture/desktop_frame.h" #include "modules/desktop_capture/desktop_frame_win.h" #include "modules/desktop_capture/desktop_region.h" @@ -24,6 +26,7 @@ #include "rtc_base/logging.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" +#include "system_wrappers/include/metrics.h" namespace webrtc { @@ -92,8 +95,12 @@ void ScreenCapturerWinGdi::CaptureFrame() { GetDeviceCaps(desktop_dc_, LOGPIXELSY))); frame->mutable_updated_region()->SetRect( DesktopRect::MakeSize(frame->size())); - frame->set_capture_time_ms((rtc::TimeNanos() - capture_start_time_nanos) / - rtc::kNumNanosecsPerMillisec); + + int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) / + rtc::kNumNanosecsPerMillisec; + RTC_HISTOGRAM_COUNTS_1000( + "WebRTC.DesktopCapture.Win.ScreenGdiCapturerFrameTime", capture_time_ms); + frame->set_capture_time_ms(capture_time_ms); frame->set_capturer_id(DesktopCapturerId::kScreenCapturerWinGdi); callback_->OnCaptureResult(Result::SUCCESS, std::move(frame)); } @@ -112,6 +119,7 @@ bool ScreenCapturerWinGdi::SelectSource(SourceId id) { void ScreenCapturerWinGdi::Start(Callback* callback) { RTC_DCHECK(!callback_); RTC_DCHECK(callback); + RecordCapturerImpl(DesktopCapturerId::kScreenCapturerWinGdi); callback_ = callback; diff --git a/modules/desktop_capture/win/screen_capturer_win_magnifier.cc b/modules/desktop_capture/win/screen_capturer_win_magnifier.cc index 1a7bbc18c8..214eb0e463 100644 --- a/modules/desktop_capture/win/screen_capturer_win_magnifier.cc +++ b/modules/desktop_capture/win/screen_capturer_win_magnifier.cc @@ -12,7 +12,9 @@ #include +#include "modules/desktop_capture/desktop_capture_metrics_helper.h" #include "modules/desktop_capture/desktop_capture_options.h" +#include "modules/desktop_capture/desktop_capture_types.h" #include "modules/desktop_capture/desktop_frame.h" #include "modules/desktop_capture/desktop_frame_win.h" #include "modules/desktop_capture/desktop_region.h" @@ -23,6 +25,7 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/time_utils.h" +#include "system_wrappers/include/metrics.h" namespace webrtc { @@ -62,6 +65,8 @@ ScreenCapturerWinMagnifier::~ScreenCapturerWinMagnifier() { void ScreenCapturerWinMagnifier::Start(Callback* callback) { RTC_DCHECK(!callback_); RTC_DCHECK(callback); + RecordCapturerImpl(DesktopCapturerId::kScreenCapturerWinMagnifier); + callback_ = callback; if (!InitializeMagnifier()) { @@ -115,8 +120,13 @@ void ScreenCapturerWinMagnifier::CaptureFrame() { GetDeviceCaps(desktop_dc_, LOGPIXELSY))); frame->mutable_updated_region()->SetRect( DesktopRect::MakeSize(frame->size())); - frame->set_capture_time_ms((rtc::TimeNanos() - capture_start_time_nanos) / - rtc::kNumNanosecsPerMillisec); + + int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) / + rtc::kNumNanosecsPerMillisec; + RTC_HISTOGRAM_COUNTS_1000( + "WebRTC.DesktopCapture.Win.MagnifierCapturerFrameTime", capture_time_ms); + frame->set_capture_time_ms(capture_time_ms); + frame->set_capturer_id(DesktopCapturerId::kScreenCapturerWinMagnifier); callback_->OnCaptureResult(Result::SUCCESS, std::move(frame)); } diff --git a/modules/desktop_capture/win/selected_window_context.cc b/modules/desktop_capture/win/selected_window_context.cc index 74459571ca..398ea1e53a 100644 --- a/modules/desktop_capture/win/selected_window_context.cc +++ b/modules/desktop_capture/win/selected_window_context.cc @@ -28,20 +28,19 @@ bool SelectedWindowContext::IsSelectedWindowValid() const { } bool SelectedWindowContext::IsWindowOwnedBySelectedWindow(HWND hwnd) const { - // This check works for drop-down menus & dialog pop-up windows. It doesn't - // work for context menus or tooltips, which are handled differently below. + // This check works for drop-down menus & dialog pop-up windows. if (GetAncestor(hwnd, GA_ROOTOWNER) == selected_window_) { return true; } - // Some pop-up windows aren't owned (e.g. context menus, tooltips); treat - // windows that belong to the same thread as owned. - DWORD enumerated_window_process_id = 0; - DWORD enumerated_window_thread_id = - GetWindowThreadProcessId(hwnd, &enumerated_window_process_id); - return enumerated_window_thread_id != 0 && - enumerated_window_process_id == selected_window_process_id_ && - enumerated_window_thread_id == selected_window_thread_id_; + // Assume that all other windows are unrelated to the selected window. + // This will cause some windows that are actually related to be missed, + // e.g. context menus and tool-tips, but avoids the risk of capturing + // unrelated windows. Using heuristics such as matching the thread and + // process Ids suffers from false-positives, e.g. in multi-document + // applications. + + return false; } bool SelectedWindowContext::IsWindowOverlappingSelectedWindow(HWND hwnd) const { diff --git a/modules/desktop_capture/win/test_support/test_window.cc b/modules/desktop_capture/win/test_support/test_window.cc new file mode 100644 index 0000000000..c07ff74aa5 --- /dev/null +++ b/modules/desktop_capture/win/test_support/test_window.cc @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/test_support/test_window.h" + +namespace webrtc { +namespace { + +const WCHAR kWindowClass[] = L"DesktopCaptureTestWindowClass"; +const int kWindowHeight = 200; +const int kWindowWidth = 300; + +LRESULT CALLBACK WindowProc(HWND hwnd, + UINT msg, + WPARAM w_param, + LPARAM l_param) { + switch (msg) { + case WM_PAINT: + PAINTSTRUCT paint_struct; + HDC hdc = BeginPaint(hwnd, &paint_struct); + + // Paint the window so the color is consistent and we can inspect the + // pixels in tests and know what to expect. + FillRect(hdc, &paint_struct.rcPaint, + CreateSolidBrush(RGB(kTestWindowRValue, kTestWindowGValue, + kTestWindowBValue))); + + EndPaint(hwnd, &paint_struct); + } + return DefWindowProc(hwnd, msg, w_param, l_param); +} + +} // namespace + +WindowInfo CreateTestWindow(const WCHAR* window_title, + const int height, + const int width, + const LONG extended_styles) { + WindowInfo info; + ::GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | + GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, + reinterpret_cast(&WindowProc), + &info.window_instance); + + WNDCLASSEXW wcex; + memset(&wcex, 0, sizeof(wcex)); + wcex.cbSize = sizeof(wcex); + wcex.style = CS_HREDRAW | CS_VREDRAW; + wcex.hInstance = info.window_instance; + wcex.lpfnWndProc = &WindowProc; + wcex.lpszClassName = kWindowClass; + info.window_class = ::RegisterClassExW(&wcex); + + // Use the default height and width if the caller did not supply the optional + // height and width parameters, or if they supplied invalid values. + int window_height = height <= 0 ? kWindowHeight : height; + int window_width = width <= 0 ? kWindowWidth : width; + info.hwnd = + ::CreateWindowExW(extended_styles, kWindowClass, window_title, + WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, + window_width, window_height, /*parent_window=*/nullptr, + /*menu_bar=*/nullptr, info.window_instance, + /*additional_params=*/nullptr); + + ::ShowWindow(info.hwnd, SW_SHOWNORMAL); + ::UpdateWindow(info.hwnd); + return info; +} + +void ResizeTestWindow(const HWND hwnd, const int width, const int height) { + // SWP_NOMOVE results in the x and y params being ignored. + ::SetWindowPos(hwnd, HWND_TOP, /*x-coord=*/0, /*y-coord=*/0, width, height, + SWP_SHOWWINDOW | SWP_NOMOVE); + ::UpdateWindow(hwnd); +} + +void MoveTestWindow(const HWND hwnd, const int x, const int y) { + // SWP_NOSIZE results in the width and height params being ignored. + ::SetWindowPos(hwnd, HWND_TOP, x, y, /*width=*/0, /*height=*/0, + SWP_SHOWWINDOW | SWP_NOSIZE); + ::UpdateWindow(hwnd); +} + +void MinimizeTestWindow(const HWND hwnd) { + ::ShowWindow(hwnd, SW_MINIMIZE); +} + +void UnminimizeTestWindow(const HWND hwnd) { + ::OpenIcon(hwnd); +} + +void DestroyTestWindow(WindowInfo info) { + ::DestroyWindow(info.hwnd); + ::UnregisterClass(MAKEINTATOM(info.window_class), info.window_instance); +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/test_support/test_window.h b/modules/desktop_capture/win/test_support/test_window.h new file mode 100644 index 0000000000..8701dc990b --- /dev/null +++ b/modules/desktop_capture/win/test_support/test_window.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_DESKTOP_CAPTURE_WIN_TEST_SUPPORT_TEST_WINDOW_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_TEST_SUPPORT_TEST_WINDOW_H_ + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include + +namespace webrtc { + +typedef unsigned char uint8_t; + +// Define an arbitrary color for the test window with unique R, G, and B values +// so consumers can verify captured content in tests. +const uint8_t kTestWindowRValue = 191; +const uint8_t kTestWindowGValue = 99; +const uint8_t kTestWindowBValue = 12; + +struct WindowInfo { + HWND hwnd; + HINSTANCE window_instance; + ATOM window_class; +}; + +WindowInfo CreateTestWindow(const WCHAR* window_title, + const int height = 0, + const int width = 0, + const LONG extended_styles = 0); + +void ResizeTestWindow(const HWND hwnd, const int width, const int height); + +void MoveTestWindow(const HWND hwnd, const int x, const int y); + +void MinimizeTestWindow(const HWND hwnd); + +void UnminimizeTestWindow(const HWND hwnd); + +void DestroyTestWindow(WindowInfo info); + +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_TEST_SUPPORT_TEST_WINDOW_H_ diff --git a/modules/desktop_capture/win/wgc_capture_session.cc b/modules/desktop_capture/win/wgc_capture_session.cc new file mode 100644 index 0000000000..48c56864b3 --- /dev/null +++ b/modules/desktop_capture/win/wgc_capture_session.cc @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/wgc_capture_session.h" + +#include +#include +#include + +#include +#include +#include + +#include "modules/desktop_capture/win/wgc_desktop_frame.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/win/create_direct3d_device.h" +#include "rtc_base/win/get_activation_factory.h" +#include "system_wrappers/include/metrics.h" + +using Microsoft::WRL::ComPtr; +namespace WGC = ABI::Windows::Graphics::Capture; + +namespace webrtc { +namespace { + +// We must use a BGRA pixel format that has 4 bytes per pixel, as required by +// the DesktopFrame interface. +const auto kPixelFormat = ABI::Windows::Graphics::DirectX::DirectXPixelFormat:: + DirectXPixelFormat_B8G8R8A8UIntNormalized; + +// We only want 1 buffer in our frame pool to reduce latency. If we had more, +// they would sit in the pool for longer and be stale by the time we are asked +// for a new frame. +const int kNumBuffers = 1; + +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum class StartCaptureResult { + kSuccess = 0, + kSourceClosed = 1, + kAddClosedFailed = 2, + kDxgiDeviceCastFailed = 3, + kD3dDelayLoadFailed = 4, + kD3dDeviceCreationFailed = 5, + kFramePoolActivationFailed = 6, + kFramePoolCastFailed = 7, + kGetItemSizeFailed = 8, + kCreateFreeThreadedFailed = 9, + kCreateCaptureSessionFailed = 10, + kStartCaptureFailed = 11, + kMaxValue = kStartCaptureFailed +}; + +// These values are persisted to logs. Entries should not be renumbered and +// numeric values should never be reused. +enum class GetFrameResult { + kSuccess = 0, + kItemClosed = 1, + kTryGetNextFrameFailed = 2, + kFrameDropped = 3, + kGetSurfaceFailed = 4, + kDxgiInterfaceAccessFailed = 5, + kTexture2dCastFailed = 6, + kCreateMappedTextureFailed = 7, + kMapFrameFailed = 8, + kGetContentSizeFailed = 9, + kResizeMappedTextureFailed = 10, + kRecreateFramePoolFailed = 11, + kMaxValue = kRecreateFramePoolFailed +}; + +void RecordStartCaptureResult(StartCaptureResult error) { + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.DesktopCapture.Win.WgcCaptureSessionStartResult", + static_cast(error), static_cast(StartCaptureResult::kMaxValue)); +} + +void RecordGetFrameResult(GetFrameResult error) { + RTC_HISTOGRAM_ENUMERATION( + "WebRTC.DesktopCapture.Win.WgcCaptureSessionGetFrameResult", + static_cast(error), static_cast(GetFrameResult::kMaxValue)); +} + +} // namespace + +WgcCaptureSession::WgcCaptureSession(ComPtr d3d11_device, + ComPtr item) + : d3d11_device_(std::move(d3d11_device)), item_(std::move(item)) {} +WgcCaptureSession::~WgcCaptureSession() = default; + +HRESULT WgcCaptureSession::StartCapture() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!is_capture_started_); + + if (item_closed_) { + RTC_LOG(LS_ERROR) << "The target source has been closed."; + RecordStartCaptureResult(StartCaptureResult::kSourceClosed); + return E_ABORT; + } + + RTC_DCHECK(d3d11_device_); + RTC_DCHECK(item_); + + // Listen for the Closed event, to detect if the source we are capturing is + // closed (e.g. application window is closed or monitor is disconnected). If + // it is, we should abort the capture. + auto closed_handler = + Microsoft::WRL::Callback>( + this, &WgcCaptureSession::OnItemClosed); + EventRegistrationToken item_closed_token; + HRESULT hr = item_->add_Closed(closed_handler.Get(), &item_closed_token); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kAddClosedFailed); + return hr; + } + + ComPtr dxgi_device; + hr = d3d11_device_->QueryInterface(IID_PPV_ARGS(&dxgi_device)); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kDxgiDeviceCastFailed); + return hr; + } + + if (!ResolveCoreWinRTDirect3DDelayload()) { + RecordStartCaptureResult(StartCaptureResult::kD3dDelayLoadFailed); + return E_FAIL; + } + + hr = CreateDirect3DDeviceFromDXGIDevice(dxgi_device.Get(), &direct3d_device_); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kD3dDeviceCreationFailed); + return hr; + } + + ComPtr frame_pool_statics; + hr = GetActivationFactory< + ABI::Windows::Graphics::Capture::IDirect3D11CaptureFramePoolStatics, + RuntimeClass_Windows_Graphics_Capture_Direct3D11CaptureFramePool>( + &frame_pool_statics); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kFramePoolActivationFailed); + return hr; + } + + // Cast to FramePoolStatics2 so we can use CreateFreeThreaded and avoid the + // need to have a DispatcherQueue. We don't listen for the FrameArrived event, + // so there's no difference. + ComPtr frame_pool_statics2; + hr = frame_pool_statics->QueryInterface(IID_PPV_ARGS(&frame_pool_statics2)); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kFramePoolCastFailed); + return hr; + } + + ABI::Windows::Graphics::SizeInt32 item_size; + hr = item_.Get()->get_Size(&item_size); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kGetItemSizeFailed); + return hr; + } + + previous_size_ = item_size; + + hr = frame_pool_statics2->CreateFreeThreaded(direct3d_device_.Get(), + kPixelFormat, kNumBuffers, + item_size, &frame_pool_); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kCreateFreeThreadedFailed); + return hr; + } + + hr = frame_pool_->CreateCaptureSession(item_.Get(), &session_); + if (FAILED(hr)) { + RecordStartCaptureResult(StartCaptureResult::kCreateCaptureSessionFailed); + return hr; + } + + hr = session_->StartCapture(); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "Failed to start CaptureSession: " << hr; + RecordStartCaptureResult(StartCaptureResult::kStartCaptureFailed); + return hr; + } + + RecordStartCaptureResult(StartCaptureResult::kSuccess); + + is_capture_started_ = true; + return hr; +} + +HRESULT WgcCaptureSession::GetFrame( + std::unique_ptr* output_frame) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + + if (item_closed_) { + RTC_LOG(LS_ERROR) << "The target source has been closed."; + RecordGetFrameResult(GetFrameResult::kItemClosed); + return E_ABORT; + } + + RTC_DCHECK(is_capture_started_); + + ComPtr capture_frame; + HRESULT hr = frame_pool_->TryGetNextFrame(&capture_frame); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "TryGetNextFrame failed: " << hr; + RecordGetFrameResult(GetFrameResult::kTryGetNextFrameFailed); + return hr; + } + + if (!capture_frame) { + RecordGetFrameResult(GetFrameResult::kFrameDropped); + return hr; + } + + // We need to get this CaptureFrame as an ID3D11Texture2D so that we can get + // the raw image data in the format required by the DesktopFrame interface. + ComPtr + d3d_surface; + hr = capture_frame->get_Surface(&d3d_surface); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kGetSurfaceFailed); + return hr; + } + + ComPtr + direct3DDxgiInterfaceAccess; + hr = d3d_surface->QueryInterface(IID_PPV_ARGS(&direct3DDxgiInterfaceAccess)); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kDxgiInterfaceAccessFailed); + return hr; + } + + ComPtr texture_2D; + hr = direct3DDxgiInterfaceAccess->GetInterface(IID_PPV_ARGS(&texture_2D)); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kTexture2dCastFailed); + return hr; + } + + if (!mapped_texture_) { + hr = CreateMappedTexture(texture_2D); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kCreateMappedTextureFailed); + return hr; + } + } + + // We need to copy |texture_2D| into |mapped_texture_| as the latter has the + // D3D11_CPU_ACCESS_READ flag set, which lets us access the image data. + // Otherwise it would only be readable by the GPU. + ComPtr d3d_context; + d3d11_device_->GetImmediateContext(&d3d_context); + d3d_context->CopyResource(mapped_texture_.Get(), texture_2D.Get()); + + D3D11_MAPPED_SUBRESOURCE map_info; + hr = d3d_context->Map(mapped_texture_.Get(), /*subresource_index=*/0, + D3D11_MAP_READ, /*D3D11_MAP_FLAG_DO_NOT_WAIT=*/0, + &map_info); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kMapFrameFailed); + return hr; + } + + ABI::Windows::Graphics::SizeInt32 new_size; + hr = capture_frame->get_ContentSize(&new_size); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kGetContentSizeFailed); + return hr; + } + + // If the size has changed since the last capture, we must be sure to use + // the smaller dimensions. Otherwise we might overrun our buffer, or + // read stale data from the last frame. + int image_height = std::min(previous_size_.Height, new_size.Height); + int image_width = std::min(previous_size_.Width, new_size.Width); + int row_data_length = image_width * DesktopFrame::kBytesPerPixel; + + // Make a copy of the data pointed to by |map_info.pData| so we are free to + // unmap our texture. + uint8_t* src_data = static_cast(map_info.pData); + std::vector image_data; + image_data.reserve(image_height * row_data_length); + uint8_t* image_data_ptr = image_data.data(); + for (int i = 0; i < image_height; i++) { + memcpy(image_data_ptr, src_data, row_data_length); + image_data_ptr += row_data_length; + src_data += map_info.RowPitch; + } + + // Transfer ownership of |image_data| to the output_frame. + DesktopSize size(image_width, image_height); + *output_frame = std::make_unique(size, row_data_length, + std::move(image_data)); + + d3d_context->Unmap(mapped_texture_.Get(), 0); + + // If the size changed, we must resize the texture and frame pool to fit the + // new size. + if (previous_size_.Height != new_size.Height || + previous_size_.Width != new_size.Width) { + hr = CreateMappedTexture(texture_2D, new_size.Width, new_size.Height); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kResizeMappedTextureFailed); + return hr; + } + + hr = frame_pool_->Recreate(direct3d_device_.Get(), kPixelFormat, + kNumBuffers, new_size); + if (FAILED(hr)) { + RecordGetFrameResult(GetFrameResult::kRecreateFramePoolFailed); + return hr; + } + } + + RecordGetFrameResult(GetFrameResult::kSuccess); + + previous_size_ = new_size; + return hr; +} + +HRESULT WgcCaptureSession::CreateMappedTexture( + ComPtr src_texture, + UINT width, + UINT height) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + + D3D11_TEXTURE2D_DESC src_desc; + src_texture->GetDesc(&src_desc); + D3D11_TEXTURE2D_DESC map_desc; + map_desc.Width = width == 0 ? src_desc.Width : width; + map_desc.Height = height == 0 ? src_desc.Height : height; + map_desc.MipLevels = src_desc.MipLevels; + map_desc.ArraySize = src_desc.ArraySize; + map_desc.Format = src_desc.Format; + map_desc.SampleDesc = src_desc.SampleDesc; + map_desc.Usage = D3D11_USAGE_STAGING; + map_desc.BindFlags = 0; + map_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ; + map_desc.MiscFlags = 0; + return d3d11_device_->CreateTexture2D(&map_desc, nullptr, &mapped_texture_); +} + +HRESULT WgcCaptureSession::OnItemClosed(WGC::IGraphicsCaptureItem* sender, + IInspectable* event_args) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + + RTC_LOG(LS_INFO) << "Capture target has been closed."; + item_closed_ = true; + is_capture_started_ = false; + + mapped_texture_ = nullptr; + session_ = nullptr; + frame_pool_ = nullptr; + direct3d_device_ = nullptr; + item_ = nullptr; + d3d11_device_ = nullptr; + + return S_OK; +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/wgc_capture_session.h b/modules/desktop_capture/win/wgc_capture_session.h new file mode 100644 index 0000000000..9f08b7cf2d --- /dev/null +++ b/modules/desktop_capture/win/wgc_capture_session.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SESSION_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SESSION_H_ + +#include +#include +#include + +#include + +#include "api/sequence_checker.h" +#include "modules/desktop_capture/desktop_capture_options.h" +#include "modules/desktop_capture/win/wgc_capture_source.h" + +namespace webrtc { + +class WgcCaptureSession final { + public: + WgcCaptureSession( + Microsoft::WRL::ComPtr d3d11_device, + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IGraphicsCaptureItem> item); + + // Disallow copy and assign. + WgcCaptureSession(const WgcCaptureSession&) = delete; + WgcCaptureSession& operator=(const WgcCaptureSession&) = delete; + + ~WgcCaptureSession(); + + HRESULT StartCapture(); + + // Returns a frame from the frame pool, if any are present. + HRESULT GetFrame(std::unique_ptr* output_frame); + + bool IsCaptureStarted() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return is_capture_started_; + } + + private: + // Initializes |mapped_texture_| with the properties of the |src_texture|, + // overrides the values of some necessary properties like the + // D3D11_CPU_ACCESS_READ flag. Also has optional parameters for what size + // |mapped_texture_| should be, if they aren't provided we will use the size + // of |src_texture|. + HRESULT CreateMappedTexture( + Microsoft::WRL::ComPtr src_texture, + UINT width = 0, + UINT height = 0); + + // Event handler for |item_|'s Closed event. + HRESULT OnItemClosed( + ABI::Windows::Graphics::Capture::IGraphicsCaptureItem* sender, + IInspectable* event_args); + + // A Direct3D11 Device provided by the caller. We use this to create an + // IDirect3DDevice, and also to create textures that will hold the image data. + Microsoft::WRL::ComPtr d3d11_device_; + + // This item represents what we are capturing, we use it to create the + // capture session, and also to listen for the Closed event. + Microsoft::WRL::ComPtr + item_; + + // The IDirect3DDevice is necessary to instantiate the frame pool. + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice> + direct3d_device_; + + // The frame pool is where frames are deposited during capture, we retrieve + // them from here with TryGetNextFrame(). + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IDirect3D11CaptureFramePool> + frame_pool_; + + // This texture holds the final image data. We made it a member so we can + // reuse it, instead of having to create a new texture every time we grab a + // frame. + Microsoft::WRL::ComPtr mapped_texture_; + + // This lets us know when the source has been resized, which is important + // because we must resize the framepool and our texture to be able to hold + // enough data for the frame. + ABI::Windows::Graphics::SizeInt32 previous_size_; + + // The capture session lets us set properties about the capture before it + // starts such as whether to capture the mouse cursor, and it lets us tell WGC + // to start capturing frames. + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IGraphicsCaptureSession> + session_; + + bool item_closed_ = false; + bool is_capture_started_ = false; + + SequenceChecker sequence_checker_; +}; + +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SESSION_H_ diff --git a/modules/desktop_capture/win/wgc_capture_source.cc b/modules/desktop_capture/win/wgc_capture_source.cc new file mode 100644 index 0000000000..9786ca67b5 --- /dev/null +++ b/modules/desktop_capture/win/wgc_capture_source.cc @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/wgc_capture_source.h" + +#include +#include + +#include + +#include "modules/desktop_capture/win/screen_capture_utils.h" +#include "modules/desktop_capture/win/window_capture_utils.h" +#include "rtc_base/win/get_activation_factory.h" + +using Microsoft::WRL::ComPtr; +namespace WGC = ABI::Windows::Graphics::Capture; + +namespace webrtc { + +WgcCaptureSource::WgcCaptureSource(DesktopCapturer::SourceId source_id) + : source_id_(source_id) {} +WgcCaptureSource::~WgcCaptureSource() = default; + +bool WgcCaptureSource::IsCapturable() { + // If we can create a capture item, then we can capture it. Unfortunately, + // we can't cache this item because it may be created in a different COM + // apartment than where capture will eventually start from. + ComPtr item; + return SUCCEEDED(CreateCaptureItem(&item)); +} + +bool WgcCaptureSource::FocusOnSource() { + return false; +} + +HRESULT WgcCaptureSource::GetCaptureItem( + ComPtr* result) { + HRESULT hr = S_OK; + if (!item_) + hr = CreateCaptureItem(&item_); + + *result = item_; + return hr; +} + +WgcCaptureSourceFactory::~WgcCaptureSourceFactory() = default; + +WgcWindowSourceFactory::WgcWindowSourceFactory() = default; +WgcWindowSourceFactory::~WgcWindowSourceFactory() = default; + +std::unique_ptr WgcWindowSourceFactory::CreateCaptureSource( + DesktopCapturer::SourceId source_id) { + return std::make_unique(source_id); +} + +WgcScreenSourceFactory::WgcScreenSourceFactory() = default; +WgcScreenSourceFactory::~WgcScreenSourceFactory() = default; + +std::unique_ptr WgcScreenSourceFactory::CreateCaptureSource( + DesktopCapturer::SourceId source_id) { + return std::make_unique(source_id); +} + +WgcWindowSource::WgcWindowSource(DesktopCapturer::SourceId source_id) + : WgcCaptureSource(source_id) {} +WgcWindowSource::~WgcWindowSource() = default; + +DesktopVector WgcWindowSource::GetTopLeft() { + DesktopRect window_rect; + if (!GetWindowRect(reinterpret_cast(GetSourceId()), &window_rect)) + return DesktopVector(); + + return window_rect.top_left(); +} + +bool WgcWindowSource::IsCapturable() { + if (!IsWindowValidAndVisible(reinterpret_cast(GetSourceId()))) + return false; + + return WgcCaptureSource::IsCapturable(); +} + +bool WgcWindowSource::FocusOnSource() { + if (!IsWindowValidAndVisible(reinterpret_cast(GetSourceId()))) + return false; + + return ::BringWindowToTop(reinterpret_cast(GetSourceId())) && + ::SetForegroundWindow(reinterpret_cast(GetSourceId())); +} + +HRESULT WgcWindowSource::CreateCaptureItem( + ComPtr* result) { + if (!ResolveCoreWinRTDelayload()) + return E_FAIL; + + ComPtr interop; + HRESULT hr = GetActivationFactory< + IGraphicsCaptureItemInterop, + RuntimeClass_Windows_Graphics_Capture_GraphicsCaptureItem>(&interop); + if (FAILED(hr)) + return hr; + + ComPtr item; + hr = interop->CreateForWindow(reinterpret_cast(GetSourceId()), + IID_PPV_ARGS(&item)); + if (FAILED(hr)) + return hr; + + if (!item) + return E_HANDLE; + + *result = std::move(item); + return hr; +} + +WgcScreenSource::WgcScreenSource(DesktopCapturer::SourceId source_id) + : WgcCaptureSource(source_id) { + // Getting the HMONITOR could fail if the source_id is invalid. In that case, + // we leave hmonitor_ uninitialized and |IsCapturable()| will fail. + HMONITOR hmon; + if (GetHmonitorFromDeviceIndex(GetSourceId(), &hmon)) + hmonitor_ = hmon; +} + +WgcScreenSource::~WgcScreenSource() = default; + +DesktopVector WgcScreenSource::GetTopLeft() { + if (!hmonitor_) + return DesktopVector(); + + return GetMonitorRect(*hmonitor_).top_left(); +} + +bool WgcScreenSource::IsCapturable() { + if (!hmonitor_) + return false; + + if (!IsMonitorValid(*hmonitor_)) + return false; + + return WgcCaptureSource::IsCapturable(); +} + +HRESULT WgcScreenSource::CreateCaptureItem( + ComPtr* result) { + if (!hmonitor_) + return E_ABORT; + + if (!ResolveCoreWinRTDelayload()) + return E_FAIL; + + ComPtr interop; + HRESULT hr = GetActivationFactory< + IGraphicsCaptureItemInterop, + RuntimeClass_Windows_Graphics_Capture_GraphicsCaptureItem>(&interop); + if (FAILED(hr)) + return hr; + + ComPtr item; + hr = interop->CreateForMonitor(*hmonitor_, IID_PPV_ARGS(&item)); + if (FAILED(hr)) + return hr; + + if (!item) + return E_HANDLE; + + *result = std::move(item); + return hr; +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/wgc_capture_source.h b/modules/desktop_capture/win/wgc_capture_source.h new file mode 100644 index 0000000000..135f92bb84 --- /dev/null +++ b/modules/desktop_capture/win/wgc_capture_source.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SOURCE_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SOURCE_H_ + +#include +#include + +#include + +#include "absl/types/optional.h" +#include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/desktop_geometry.h" + +namespace webrtc { + +// Abstract class to represent the source that WGC-based capturers capture +// from. Could represent an application window or a screen. Consumers should use +// the appropriate Wgc*SourceFactory class to create WgcCaptureSource objects +// of the appropriate type. +class WgcCaptureSource { + public: + explicit WgcCaptureSource(DesktopCapturer::SourceId source_id); + virtual ~WgcCaptureSource(); + + virtual DesktopVector GetTopLeft() = 0; + virtual bool IsCapturable(); + virtual bool FocusOnSource(); + HRESULT GetCaptureItem( + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result); + DesktopCapturer::SourceId GetSourceId() { return source_id_; } + + protected: + virtual HRESULT CreateCaptureItem( + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result) = 0; + + private: + Microsoft::WRL::ComPtr + item_; + const DesktopCapturer::SourceId source_id_; +}; + +class WgcCaptureSourceFactory { + public: + virtual ~WgcCaptureSourceFactory(); + + virtual std::unique_ptr CreateCaptureSource( + DesktopCapturer::SourceId) = 0; +}; + +class WgcWindowSourceFactory final : public WgcCaptureSourceFactory { + public: + WgcWindowSourceFactory(); + + // Disallow copy and assign. + WgcWindowSourceFactory(const WgcWindowSourceFactory&) = delete; + WgcWindowSourceFactory& operator=(const WgcWindowSourceFactory&) = delete; + + ~WgcWindowSourceFactory() override; + + std::unique_ptr CreateCaptureSource( + DesktopCapturer::SourceId) override; +}; + +class WgcScreenSourceFactory final : public WgcCaptureSourceFactory { + public: + WgcScreenSourceFactory(); + + WgcScreenSourceFactory(const WgcScreenSourceFactory&) = delete; + WgcScreenSourceFactory& operator=(const WgcScreenSourceFactory&) = delete; + + ~WgcScreenSourceFactory() override; + + std::unique_ptr CreateCaptureSource( + DesktopCapturer::SourceId) override; +}; + +// Class for capturing application windows. +class WgcWindowSource final : public WgcCaptureSource { + public: + explicit WgcWindowSource(DesktopCapturer::SourceId source_id); + + WgcWindowSource(const WgcWindowSource&) = delete; + WgcWindowSource& operator=(const WgcWindowSource&) = delete; + + ~WgcWindowSource() override; + + DesktopVector GetTopLeft() override; + bool IsCapturable() override; + bool FocusOnSource() override; + + private: + HRESULT CreateCaptureItem( + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result) + override; +}; + +// Class for capturing screens/monitors/displays. +class WgcScreenSource final : public WgcCaptureSource { + public: + explicit WgcScreenSource(DesktopCapturer::SourceId source_id); + + WgcScreenSource(const WgcScreenSource&) = delete; + WgcScreenSource& operator=(const WgcScreenSource&) = delete; + + ~WgcScreenSource() override; + + DesktopVector GetTopLeft() override; + bool IsCapturable() override; + + private: + HRESULT CreateCaptureItem( + Microsoft::WRL::ComPtr< + ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>* result) + override; + + // To maintain compatibility with other capturers, this class accepts a + // device index as it's SourceId. However, WGC requires we use an HMONITOR to + // describe which screen to capture. So, we internally convert the supplied + // device index into an HMONITOR when |IsCapturable()| is called. + absl::optional hmonitor_; +}; + +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURE_SOURCE_H_ diff --git a/modules/desktop_capture/win/wgc_capture_source_unittest.cc b/modules/desktop_capture/win/wgc_capture_source_unittest.cc new file mode 100644 index 0000000000..a230e12578 --- /dev/null +++ b/modules/desktop_capture/win/wgc_capture_source_unittest.cc @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/wgc_capture_source.h" + +#include +#include + +#include + +#include "modules/desktop_capture/desktop_capture_types.h" +#include "modules/desktop_capture/desktop_geometry.h" +#include "modules/desktop_capture/win/screen_capture_utils.h" +#include "modules/desktop_capture/win/test_support/test_window.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/win/scoped_com_initializer.h" +#include "rtc_base/win/windows_version.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +const WCHAR kWindowTitle[] = L"WGC Capture Source Test Window"; + +const int kFirstXCoord = 25; +const int kFirstYCoord = 50; +const int kSecondXCoord = 50; +const int kSecondYCoord = 75; + +enum SourceType { kWindowSource = 0, kScreenSource = 1 }; + +} // namespace + +class WgcCaptureSourceTest : public ::testing::TestWithParam { + public: + void SetUp() override { + if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN10_RS5) { + RTC_LOG(LS_INFO) + << "Skipping WgcCaptureSourceTests on Windows versions < RS5."; + GTEST_SKIP(); + } + + com_initializer_ = + std::make_unique(ScopedCOMInitializer::kMTA); + ASSERT_TRUE(com_initializer_->Succeeded()); + } + + void TearDown() override { + if (window_open_) { + DestroyTestWindow(window_info_); + } + } + + void SetUpForWindowSource() { + window_info_ = CreateTestWindow(kWindowTitle); + window_open_ = true; + source_id_ = reinterpret_cast(window_info_.hwnd); + source_factory_ = std::make_unique(); + } + + void SetUpForScreenSource() { + source_id_ = kFullDesktopScreenId; + source_factory_ = std::make_unique(); + } + + protected: + std::unique_ptr com_initializer_; + std::unique_ptr source_factory_; + std::unique_ptr source_; + DesktopCapturer::SourceId source_id_; + WindowInfo window_info_; + bool window_open_ = false; +}; + +// Window specific test +TEST_F(WgcCaptureSourceTest, WindowPosition) { + SetUpForWindowSource(); + source_ = source_factory_->CreateCaptureSource(source_id_); + ASSERT_TRUE(source_); + EXPECT_EQ(source_->GetSourceId(), source_id_); + + MoveTestWindow(window_info_.hwnd, kFirstXCoord, kFirstYCoord); + DesktopVector source_vector = source_->GetTopLeft(); + EXPECT_EQ(source_vector.x(), kFirstXCoord); + EXPECT_EQ(source_vector.y(), kFirstYCoord); + + MoveTestWindow(window_info_.hwnd, kSecondXCoord, kSecondYCoord); + source_vector = source_->GetTopLeft(); + EXPECT_EQ(source_vector.x(), kSecondXCoord); + EXPECT_EQ(source_vector.y(), kSecondYCoord); +} + +// Screen specific test +TEST_F(WgcCaptureSourceTest, ScreenPosition) { + SetUpForScreenSource(); + source_ = source_factory_->CreateCaptureSource(source_id_); + ASSERT_TRUE(source_); + EXPECT_EQ(source_id_, source_->GetSourceId()); + + DesktopRect screen_rect = GetFullscreenRect(); + DesktopVector source_vector = source_->GetTopLeft(); + EXPECT_EQ(source_vector.x(), screen_rect.left()); + EXPECT_EQ(source_vector.y(), screen_rect.top()); +} + +// Source agnostic test +TEST_P(WgcCaptureSourceTest, CreateSource) { + if (GetParam() == SourceType::kWindowSource) { + SetUpForWindowSource(); + } else { + SetUpForScreenSource(); + } + + source_ = source_factory_->CreateCaptureSource(source_id_); + ASSERT_TRUE(source_); + EXPECT_EQ(source_id_, source_->GetSourceId()); + EXPECT_TRUE(source_->IsCapturable()); + + Microsoft::WRL::ComPtr + item; + EXPECT_TRUE(SUCCEEDED(source_->GetCaptureItem(&item))); + EXPECT_TRUE(item); +} + +INSTANTIATE_TEST_SUITE_P(SourceAgnostic, + WgcCaptureSourceTest, + ::testing::Values(SourceType::kWindowSource, + SourceType::kScreenSource)); + +} // namespace webrtc diff --git a/modules/desktop_capture/win/wgc_capturer_win.cc b/modules/desktop_capture/win/wgc_capturer_win.cc new file mode 100644 index 0000000000..442c827a67 --- /dev/null +++ b/modules/desktop_capture/win/wgc_capturer_win.cc @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/wgc_capturer_win.h" + +#include + +#include "modules/desktop_capture/desktop_capture_metrics_helper.h" +#include "modules/desktop_capture/desktop_capture_types.h" +#include "modules/desktop_capture/win/wgc_desktop_frame.h" +#include "rtc_base/logging.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/metrics.h" + +namespace WGC = ABI::Windows::Graphics::Capture; +using Microsoft::WRL::ComPtr; + +namespace webrtc { + +namespace { + +enum class WgcCapturerResult { + kSuccess = 0, + kNoDirect3dDevice = 1, + kNoSourceSelected = 2, + kItemCreationFailure = 3, + kSessionStartFailure = 4, + kGetFrameFailure = 5, + kFrameDropped = 6, + kMaxValue = kFrameDropped +}; + +void RecordWgcCapturerResult(WgcCapturerResult error) { + RTC_HISTOGRAM_ENUMERATION("WebRTC.DesktopCapture.Win.WgcCapturerResult", + static_cast(error), + static_cast(WgcCapturerResult::kMaxValue)); +} + +} // namespace + +WgcCapturerWin::WgcCapturerWin( + std::unique_ptr source_factory, + std::unique_ptr source_enumerator) + : source_factory_(std::move(source_factory)), + source_enumerator_(std::move(source_enumerator)) {} +WgcCapturerWin::~WgcCapturerWin() = default; + +// static +std::unique_ptr WgcCapturerWin::CreateRawWindowCapturer( + const DesktopCaptureOptions& options) { + return std::make_unique( + std::make_unique(), + std::make_unique( + options.enumerate_current_process_windows())); +} + +// static +std::unique_ptr WgcCapturerWin::CreateRawScreenCapturer( + const DesktopCaptureOptions& options) { + return std::make_unique( + std::make_unique(), + std::make_unique()); +} + +bool WgcCapturerWin::GetSourceList(SourceList* sources) { + return source_enumerator_->FindAllSources(sources); +} + +bool WgcCapturerWin::SelectSource(DesktopCapturer::SourceId id) { + capture_source_ = source_factory_->CreateCaptureSource(id); + return capture_source_->IsCapturable(); +} + +bool WgcCapturerWin::FocusOnSelectedSource() { + if (!capture_source_) + return false; + + return capture_source_->FocusOnSource(); +} + +void WgcCapturerWin::Start(Callback* callback) { + RTC_DCHECK(!callback_); + RTC_DCHECK(callback); + RecordCapturerImpl(DesktopCapturerId::kWgcCapturerWin); + + callback_ = callback; + + // Create a Direct3D11 device to share amongst the WgcCaptureSessions. Many + // parameters are nullptr as the implemention uses defaults that work well for + // us. + HRESULT hr = D3D11CreateDevice( + /*adapter=*/nullptr, D3D_DRIVER_TYPE_HARDWARE, + /*software_rasterizer=*/nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT, + /*feature_levels=*/nullptr, /*feature_levels_size=*/0, D3D11_SDK_VERSION, + &d3d11_device_, /*feature_level=*/nullptr, /*device_context=*/nullptr); + if (hr == DXGI_ERROR_UNSUPPORTED) { + // If a hardware device could not be created, use WARP which is a high speed + // software device. + hr = D3D11CreateDevice( + /*adapter=*/nullptr, D3D_DRIVER_TYPE_WARP, + /*software_rasterizer=*/nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT, + /*feature_levels=*/nullptr, /*feature_levels_size=*/0, + D3D11_SDK_VERSION, &d3d11_device_, /*feature_level=*/nullptr, + /*device_context=*/nullptr); + } + + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "Failed to create D3D11Device: " << hr; + } +} + +void WgcCapturerWin::CaptureFrame() { + RTC_DCHECK(callback_); + + if (!capture_source_) { + RTC_LOG(LS_ERROR) << "Source hasn't been selected"; + callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT, + /*frame=*/nullptr); + RecordWgcCapturerResult(WgcCapturerResult::kNoSourceSelected); + return; + } + + if (!d3d11_device_) { + RTC_LOG(LS_ERROR) << "No D3D11D3evice, cannot capture."; + callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT, + /*frame=*/nullptr); + RecordWgcCapturerResult(WgcCapturerResult::kNoDirect3dDevice); + return; + } + + int64_t capture_start_time_nanos = rtc::TimeNanos(); + + HRESULT hr; + WgcCaptureSession* capture_session = nullptr; + std::map::iterator session_iter = + ongoing_captures_.find(capture_source_->GetSourceId()); + if (session_iter == ongoing_captures_.end()) { + ComPtr item; + hr = capture_source_->GetCaptureItem(&item); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "Failed to create a GraphicsCaptureItem: " << hr; + callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT, + /*frame=*/nullptr); + RecordWgcCapturerResult(WgcCapturerResult::kItemCreationFailure); + return; + } + + std::pair::iterator, bool> + iter_success_pair = ongoing_captures_.emplace( + std::piecewise_construct, + std::forward_as_tuple(capture_source_->GetSourceId()), + std::forward_as_tuple(d3d11_device_, item)); + RTC_DCHECK(iter_success_pair.second); + capture_session = &iter_success_pair.first->second; + } else { + capture_session = &session_iter->second; + } + + if (!capture_session->IsCaptureStarted()) { + hr = capture_session->StartCapture(); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "Failed to start capture: " << hr; + ongoing_captures_.erase(capture_source_->GetSourceId()); + callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT, + /*frame=*/nullptr); + RecordWgcCapturerResult(WgcCapturerResult::kSessionStartFailure); + return; + } + } + + std::unique_ptr frame; + hr = capture_session->GetFrame(&frame); + if (FAILED(hr)) { + RTC_LOG(LS_ERROR) << "GetFrame failed: " << hr; + ongoing_captures_.erase(capture_source_->GetSourceId()); + callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_PERMANENT, + /*frame=*/nullptr); + RecordWgcCapturerResult(WgcCapturerResult::kGetFrameFailure); + return; + } + + if (!frame) { + callback_->OnCaptureResult(DesktopCapturer::Result::ERROR_TEMPORARY, + /*frame=*/nullptr); + RecordWgcCapturerResult(WgcCapturerResult::kFrameDropped); + return; + } + + int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) / + rtc::kNumNanosecsPerMillisec; + RTC_HISTOGRAM_COUNTS_1000("WebRTC.DesktopCapture.Win.WgcCapturerFrameTime", + capture_time_ms); + frame->set_capture_time_ms(capture_time_ms); + frame->set_capturer_id(DesktopCapturerId::kWgcCapturerWin); + frame->set_may_contain_cursor(true); + frame->set_top_left(capture_source_->GetTopLeft()); + RecordWgcCapturerResult(WgcCapturerResult::kSuccess); + callback_->OnCaptureResult(DesktopCapturer::Result::SUCCESS, + std::move(frame)); +} + +bool WgcCapturerWin::IsSourceBeingCaptured(DesktopCapturer::SourceId id) { + std::map::iterator + session_iter = ongoing_captures_.find(id); + if (session_iter == ongoing_captures_.end()) + return false; + + return session_iter->second.IsCaptureStarted(); +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/wgc_capturer_win.h b/modules/desktop_capture/win/wgc_capturer_win.h new file mode 100644 index 0000000000..58f3fc318a --- /dev/null +++ b/modules/desktop_capture/win/wgc_capturer_win.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURER_WIN_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURER_WIN_H_ + +#include +#include + +#include +#include + +#include "modules/desktop_capture/desktop_capture_options.h" +#include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/win/screen_capture_utils.h" +#include "modules/desktop_capture/win/wgc_capture_session.h" +#include "modules/desktop_capture/win/wgc_capture_source.h" +#include "modules/desktop_capture/win/window_capture_utils.h" + +namespace webrtc { + +// WgcCapturerWin is initialized with an implementation of this base class, +// which it uses to find capturable sources of a particular type. This way, +// WgcCapturerWin can remain source-agnostic. +class SourceEnumerator { + public: + virtual ~SourceEnumerator() = default; + + virtual bool FindAllSources(DesktopCapturer::SourceList* sources) = 0; +}; + +class WindowEnumerator final : public SourceEnumerator { + public: + explicit WindowEnumerator(bool enumerate_current_process_windows) + : enumerate_current_process_windows_(enumerate_current_process_windows) {} + + WindowEnumerator(const WindowEnumerator&) = delete; + WindowEnumerator& operator=(const WindowEnumerator&) = delete; + + ~WindowEnumerator() override = default; + + bool FindAllSources(DesktopCapturer::SourceList* sources) override { + // WGC fails to capture windows with the WS_EX_TOOLWINDOW style, so we + // provide it as a filter to ensure windows with the style are not returned. + return window_capture_helper_.EnumerateCapturableWindows( + sources, enumerate_current_process_windows_, WS_EX_TOOLWINDOW); + } + + private: + WindowCaptureHelperWin window_capture_helper_; + bool enumerate_current_process_windows_; +}; + +class ScreenEnumerator final : public SourceEnumerator { + public: + ScreenEnumerator() = default; + + ScreenEnumerator(const ScreenEnumerator&) = delete; + ScreenEnumerator& operator=(const ScreenEnumerator&) = delete; + + ~ScreenEnumerator() override = default; + + bool FindAllSources(DesktopCapturer::SourceList* sources) override { + return webrtc::GetScreenList(sources); + } +}; + +// A capturer that uses the Window.Graphics.Capture APIs. It is suitable for +// both window and screen capture (but only one type per instance). Consumers +// should not instantiate this class directly, instead they should use +// |CreateRawWindowCapturer()| or |CreateRawScreenCapturer()| to receive a +// capturer appropriate for the type of source they want to capture. +class WgcCapturerWin : public DesktopCapturer { + public: + WgcCapturerWin(std::unique_ptr source_factory, + std::unique_ptr source_enumerator); + + WgcCapturerWin(const WgcCapturerWin&) = delete; + WgcCapturerWin& operator=(const WgcCapturerWin&) = delete; + + ~WgcCapturerWin() override; + + static std::unique_ptr CreateRawWindowCapturer( + const DesktopCaptureOptions& options); + + static std::unique_ptr CreateRawScreenCapturer( + const DesktopCaptureOptions& options); + + // DesktopCapturer interface. + bool GetSourceList(SourceList* sources) override; + bool SelectSource(SourceId id) override; + bool FocusOnSelectedSource() override; + void Start(Callback* callback) override; + void CaptureFrame() override; + + // Used in WgcCapturerTests. + bool IsSourceBeingCaptured(SourceId id); + + private: + // Factory to create a WgcCaptureSource for us whenever SelectSource is + // called. Initialized at construction with a source-specific implementation. + std::unique_ptr source_factory_; + + // The source enumerator helps us find capturable sources of the appropriate + // type. Initialized at construction with a source-specific implementation. + std::unique_ptr source_enumerator_; + + // The WgcCaptureSource represents the source we are capturing. It tells us + // if the source is capturable and it creates the GraphicsCaptureItem for us. + std::unique_ptr capture_source_; + + // A map of all the sources we are capturing and the associated + // WgcCaptureSession. Frames for the current source (indicated via + // SelectSource) will be retrieved from the appropriate session when + // requested via CaptureFrame. + // This helps us efficiently capture multiple sources (e.g. when consumers + // are trying to display a list of available capture targets with thumbnails). + std::map ongoing_captures_; + + // The callback that we deliver frames to, synchronously, before CaptureFrame + // returns. + Callback* callback_ = nullptr; + + // A Direct3D11 device that is shared amongst the WgcCaptureSessions, who + // require one to perform the capture. + Microsoft::WRL::ComPtr<::ID3D11Device> d3d11_device_; +}; + +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_CAPTURER_WIN_H_ diff --git a/modules/desktop_capture/win/wgc_capturer_win_unittest.cc b/modules/desktop_capture/win/wgc_capturer_win_unittest.cc new file mode 100644 index 0000000000..ebfb576e63 --- /dev/null +++ b/modules/desktop_capture/win/wgc_capturer_win_unittest.cc @@ -0,0 +1,508 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/wgc_capturer_win.h" + +#include +#include +#include + +#include "modules/desktop_capture/desktop_capture_options.h" +#include "modules/desktop_capture/desktop_capture_types.h" +#include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/win/test_support/test_window.h" +#include "modules/desktop_capture/win/window_capture_utils.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/thread.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/win/scoped_com_initializer.h" +#include "rtc_base/win/windows_version.h" +#include "system_wrappers/include/metrics.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +const char kWindowThreadName[] = "wgc_capturer_test_window_thread"; +const WCHAR kWindowTitle[] = L"WGC Capturer Test Window"; + +const char kCapturerImplHistogram[] = + "WebRTC.DesktopCapture.Win.DesktopCapturerImpl"; + +const char kCapturerResultHistogram[] = + "WebRTC.DesktopCapture.Win.WgcCapturerResult"; +const int kSuccess = 0; +const int kSessionStartFailure = 4; + +const char kCaptureSessionResultHistogram[] = + "WebRTC.DesktopCapture.Win.WgcCaptureSessionStartResult"; +const int kSourceClosed = 1; + +const char kCaptureTimeHistogram[] = + "WebRTC.DesktopCapture.Win.WgcCapturerFrameTime"; + +const int kSmallWindowWidth = 200; +const int kSmallWindowHeight = 100; +const int kMediumWindowWidth = 300; +const int kMediumWindowHeight = 200; +const int kLargeWindowWidth = 400; +const int kLargeWindowHeight = 500; + +// The size of the image we capture is slightly smaller than the actual size of +// the window. +const int kWindowWidthSubtrahend = 14; +const int kWindowHeightSubtrahend = 7; + +// Custom message constants so we can direct our thread to close windows +// and quit running. +const UINT kNoOp = WM_APP; +const UINT kDestroyWindow = WM_APP + 1; +const UINT kQuitRunning = WM_APP + 2; + +enum CaptureType { kWindowCapture = 0, kScreenCapture = 1 }; + +} // namespace + +class WgcCapturerWinTest : public ::testing::TestWithParam, + public DesktopCapturer::Callback { + public: + void SetUp() override { + if (rtc::rtc_win::GetVersion() < rtc::rtc_win::Version::VERSION_WIN10_RS5) { + RTC_LOG(LS_INFO) + << "Skipping WgcCapturerWinTests on Windows versions < RS5."; + GTEST_SKIP(); + } + + com_initializer_ = + std::make_unique(ScopedCOMInitializer::kMTA); + EXPECT_TRUE(com_initializer_->Succeeded()); + } + + void SetUpForWindowCapture(int window_width = kMediumWindowWidth, + int window_height = kMediumWindowHeight) { + capturer_ = WgcCapturerWin::CreateRawWindowCapturer( + DesktopCaptureOptions::CreateDefault()); + CreateWindowOnSeparateThread(window_width, window_height); + StartWindowThreadMessageLoop(); + source_id_ = GetTestWindowIdFromSourceList(); + } + + void SetUpForScreenCapture() { + capturer_ = WgcCapturerWin::CreateRawScreenCapturer( + DesktopCaptureOptions::CreateDefault()); + source_id_ = GetScreenIdFromSourceList(); + } + + void TearDown() override { + if (window_open_) { + CloseTestWindow(); + } + } + + // The window must live on a separate thread so that we can run a message pump + // without blocking the test thread. This is necessary if we are interested in + // having GraphicsCaptureItem events (i.e. the Closed event) fire, and it more + // closely resembles how capture works in the wild. + void CreateWindowOnSeparateThread(int window_width, int window_height) { + window_thread_ = rtc::Thread::Create(); + window_thread_->SetName(kWindowThreadName, nullptr); + window_thread_->Start(); + window_thread_->Invoke(RTC_FROM_HERE, [this, window_width, + window_height]() { + window_thread_id_ = GetCurrentThreadId(); + window_info_ = + CreateTestWindow(kWindowTitle, window_height, window_width); + window_open_ = true; + + while (!IsWindowResponding(window_info_.hwnd)) { + RTC_LOG(LS_INFO) << "Waiting for test window to become responsive in " + "WgcWindowCaptureTest."; + } + + while (!IsWindowValidAndVisible(window_info_.hwnd)) { + RTC_LOG(LS_INFO) << "Waiting for test window to be visible in " + "WgcWindowCaptureTest."; + } + }); + + ASSERT_TRUE(window_thread_->RunningForTest()); + ASSERT_FALSE(window_thread_->IsCurrent()); + } + + void StartWindowThreadMessageLoop() { + window_thread_->PostTask(RTC_FROM_HERE, [this]() { + MSG msg; + BOOL gm; + while ((gm = ::GetMessage(&msg, NULL, 0, 0)) != 0 && gm != -1) { + ::DispatchMessage(&msg); + if (msg.message == kDestroyWindow) { + DestroyTestWindow(window_info_); + } + if (msg.message == kQuitRunning) { + PostQuitMessage(0); + } + } + }); + } + + void CloseTestWindow() { + ::PostThreadMessage(window_thread_id_, kDestroyWindow, 0, 0); + ::PostThreadMessage(window_thread_id_, kQuitRunning, 0, 0); + window_thread_->Stop(); + window_open_ = false; + } + + DesktopCapturer::SourceId GetTestWindowIdFromSourceList() { + // Frequently, the test window will not show up in GetSourceList because it + // was created too recently. Since we are confident the window will be found + // eventually we loop here until we find it. + intptr_t src_id; + do { + DesktopCapturer::SourceList sources; + EXPECT_TRUE(capturer_->GetSourceList(&sources)); + + auto it = std::find_if( + sources.begin(), sources.end(), + [&](const DesktopCapturer::Source& src) { + return src.id == reinterpret_cast(window_info_.hwnd); + }); + + src_id = it->id; + } while (src_id != reinterpret_cast(window_info_.hwnd)); + + return src_id; + } + + DesktopCapturer::SourceId GetScreenIdFromSourceList() { + DesktopCapturer::SourceList sources; + EXPECT_TRUE(capturer_->GetSourceList(&sources)); + EXPECT_GT(sources.size(), 0ULL); + return sources[0].id; + } + + void DoCapture() { + // Sometimes the first few frames are empty becaues the capture engine is + // still starting up. We also may drop a few frames when the window is + // resized or un-minimized. + do { + capturer_->CaptureFrame(); + } while (result_ == DesktopCapturer::Result::ERROR_TEMPORARY); + + EXPECT_EQ(result_, DesktopCapturer::Result::SUCCESS); + EXPECT_TRUE(frame_); + + EXPECT_GT(metrics::NumEvents(kCapturerResultHistogram, kSuccess), + successful_captures_); + ++successful_captures_; + } + + void ValidateFrame(int expected_width, int expected_height) { + EXPECT_EQ(frame_->size().width(), expected_width - kWindowWidthSubtrahend); + EXPECT_EQ(frame_->size().height(), + expected_height - kWindowHeightSubtrahend); + + // Verify the buffer contains as much data as it should, and that the right + // colors are found. + int data_length = frame_->stride() * frame_->size().height(); + + // The first and last pixel should have the same color because they will be + // from the border of the window. + // Pixels have 4 bytes of data so the whole pixel needs a uint32_t to fit. + uint32_t first_pixel = static_cast(*frame_->data()); + uint32_t last_pixel = static_cast( + *(frame_->data() + data_length - DesktopFrame::kBytesPerPixel)); + EXPECT_EQ(first_pixel, last_pixel); + + // Let's also check a pixel from the middle of the content area, which the + // TestWindow will paint a consistent color for us to verify. + uint8_t* middle_pixel = frame_->data() + (data_length / 2); + + int sub_pixel_offset = DesktopFrame::kBytesPerPixel / 4; + EXPECT_EQ(*middle_pixel, kTestWindowBValue); + middle_pixel += sub_pixel_offset; + EXPECT_EQ(*middle_pixel, kTestWindowGValue); + middle_pixel += sub_pixel_offset; + EXPECT_EQ(*middle_pixel, kTestWindowRValue); + middle_pixel += sub_pixel_offset; + + // The window is opaque so we expect 0xFF for the Alpha channel. + EXPECT_EQ(*middle_pixel, 0xFF); + } + + // DesktopCapturer::Callback interface + // The capturer synchronously invokes this method before |CaptureFrame()| + // returns. + void OnCaptureResult(DesktopCapturer::Result result, + std::unique_ptr frame) override { + result_ = result; + frame_ = std::move(frame); + } + + protected: + std::unique_ptr com_initializer_; + DWORD window_thread_id_; + std::unique_ptr window_thread_; + WindowInfo window_info_; + intptr_t source_id_; + bool window_open_ = false; + DesktopCapturer::Result result_; + int successful_captures_ = 0; + std::unique_ptr frame_; + std::unique_ptr capturer_; +}; + +TEST_P(WgcCapturerWinTest, SelectValidSource) { + if (GetParam() == CaptureType::kWindowCapture) { + SetUpForWindowCapture(); + } else { + SetUpForScreenCapture(); + } + + EXPECT_TRUE(capturer_->SelectSource(source_id_)); +} + +TEST_P(WgcCapturerWinTest, SelectInvalidSource) { + if (GetParam() == CaptureType::kWindowCapture) { + capturer_ = WgcCapturerWin::CreateRawWindowCapturer( + DesktopCaptureOptions::CreateDefault()); + source_id_ = kNullWindowId; + } else { + capturer_ = WgcCapturerWin::CreateRawScreenCapturer( + DesktopCaptureOptions::CreateDefault()); + source_id_ = kInvalidScreenId; + } + + EXPECT_FALSE(capturer_->SelectSource(source_id_)); +} + +TEST_P(WgcCapturerWinTest, Capture) { + if (GetParam() == CaptureType::kWindowCapture) { + SetUpForWindowCapture(); + } else { + SetUpForScreenCapture(); + } + + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + + capturer_->Start(this); + EXPECT_GE(metrics::NumEvents(kCapturerImplHistogram, + DesktopCapturerId::kWgcCapturerWin), + 1); + + DoCapture(); + EXPECT_GT(frame_->size().width(), 0); + EXPECT_GT(frame_->size().height(), 0); +} + +TEST_P(WgcCapturerWinTest, CaptureTime) { + if (GetParam() == CaptureType::kWindowCapture) { + SetUpForWindowCapture(); + } else { + SetUpForScreenCapture(); + } + + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + capturer_->Start(this); + + int64_t start_time; + do { + start_time = rtc::TimeNanos(); + capturer_->CaptureFrame(); + } while (result_ == DesktopCapturer::Result::ERROR_TEMPORARY); + + int capture_time_ms = + (rtc::TimeNanos() - start_time) / rtc::kNumNanosecsPerMillisec; + EXPECT_TRUE(frame_); + + // The test may measure the time slightly differently than the capturer. So we + // just check if it's within 5 ms. + EXPECT_NEAR(frame_->capture_time_ms(), capture_time_ms, 5); + EXPECT_GE( + metrics::NumEvents(kCaptureTimeHistogram, frame_->capture_time_ms()), 1); +} + +INSTANTIATE_TEST_SUITE_P(SourceAgnostic, + WgcCapturerWinTest, + ::testing::Values(CaptureType::kWindowCapture, + CaptureType::kScreenCapture)); + +// Monitor specific tests. +TEST_F(WgcCapturerWinTest, FocusOnMonitor) { + SetUpForScreenCapture(); + EXPECT_TRUE(capturer_->SelectSource(0)); + + // You can't set focus on a monitor. + EXPECT_FALSE(capturer_->FocusOnSelectedSource()); +} + +TEST_F(WgcCapturerWinTest, CaptureAllMonitors) { + SetUpForScreenCapture(); + EXPECT_TRUE(capturer_->SelectSource(kFullDesktopScreenId)); + + capturer_->Start(this); + DoCapture(); + EXPECT_GT(frame_->size().width(), 0); + EXPECT_GT(frame_->size().height(), 0); +} + +// Window specific tests. +TEST_F(WgcCapturerWinTest, FocusOnWindow) { + capturer_ = WgcCapturerWin::CreateRawWindowCapturer( + DesktopCaptureOptions::CreateDefault()); + window_info_ = CreateTestWindow(kWindowTitle); + source_id_ = GetScreenIdFromSourceList(); + + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + EXPECT_TRUE(capturer_->FocusOnSelectedSource()); + + HWND hwnd = reinterpret_cast(source_id_); + EXPECT_EQ(hwnd, ::GetActiveWindow()); + EXPECT_EQ(hwnd, ::GetForegroundWindow()); + EXPECT_EQ(hwnd, ::GetFocus()); + DestroyTestWindow(window_info_); +} + +TEST_F(WgcCapturerWinTest, SelectMinimizedWindow) { + SetUpForWindowCapture(); + MinimizeTestWindow(reinterpret_cast(source_id_)); + EXPECT_FALSE(capturer_->SelectSource(source_id_)); + + UnminimizeTestWindow(reinterpret_cast(source_id_)); + EXPECT_TRUE(capturer_->SelectSource(source_id_)); +} + +TEST_F(WgcCapturerWinTest, SelectClosedWindow) { + SetUpForWindowCapture(); + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + + CloseTestWindow(); + EXPECT_FALSE(capturer_->SelectSource(source_id_)); +} + +TEST_F(WgcCapturerWinTest, UnsupportedWindowStyle) { + // Create a window with the WS_EX_TOOLWINDOW style, which WGC does not + // support. + window_info_ = CreateTestWindow(kWindowTitle, kMediumWindowWidth, + kMediumWindowHeight, WS_EX_TOOLWINDOW); + capturer_ = WgcCapturerWin::CreateRawWindowCapturer( + DesktopCaptureOptions::CreateDefault()); + DesktopCapturer::SourceList sources; + EXPECT_TRUE(capturer_->GetSourceList(&sources)); + auto it = std::find_if( + sources.begin(), sources.end(), [&](const DesktopCapturer::Source& src) { + return src.id == reinterpret_cast(window_info_.hwnd); + }); + + // We should not find the window, since we filter for unsupported styles. + EXPECT_EQ(it, sources.end()); + DestroyTestWindow(window_info_); +} + +TEST_F(WgcCapturerWinTest, IncreaseWindowSizeMidCapture) { + SetUpForWindowCapture(kSmallWindowWidth, kSmallWindowHeight); + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + + capturer_->Start(this); + DoCapture(); + ValidateFrame(kSmallWindowWidth, kSmallWindowHeight); + + ResizeTestWindow(window_info_.hwnd, kSmallWindowWidth, kMediumWindowHeight); + DoCapture(); + // We don't expect to see the new size until the next capture, as the frame + // pool hadn't had a chance to resize yet to fit the new, larger image. + DoCapture(); + ValidateFrame(kSmallWindowWidth, kMediumWindowHeight); + + ResizeTestWindow(window_info_.hwnd, kLargeWindowWidth, kMediumWindowHeight); + DoCapture(); + DoCapture(); + ValidateFrame(kLargeWindowWidth, kMediumWindowHeight); +} + +TEST_F(WgcCapturerWinTest, ReduceWindowSizeMidCapture) { + SetUpForWindowCapture(kLargeWindowWidth, kLargeWindowHeight); + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + + capturer_->Start(this); + DoCapture(); + ValidateFrame(kLargeWindowWidth, kLargeWindowHeight); + + ResizeTestWindow(window_info_.hwnd, kLargeWindowWidth, kMediumWindowHeight); + // We expect to see the new size immediately because the image data has shrunk + // and will fit in the existing buffer. + DoCapture(); + ValidateFrame(kLargeWindowWidth, kMediumWindowHeight); + + ResizeTestWindow(window_info_.hwnd, kSmallWindowWidth, kMediumWindowHeight); + DoCapture(); + ValidateFrame(kSmallWindowWidth, kMediumWindowHeight); +} + +TEST_F(WgcCapturerWinTest, MinimizeWindowMidCapture) { + SetUpForWindowCapture(); + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + + capturer_->Start(this); + + // Minmize the window and capture should continue but return temporary errors. + MinimizeTestWindow(window_info_.hwnd); + for (int i = 0; i < 10; ++i) { + capturer_->CaptureFrame(); + EXPECT_EQ(result_, DesktopCapturer::Result::ERROR_TEMPORARY); + } + + // Reopen the window and the capture should continue normally. + UnminimizeTestWindow(window_info_.hwnd); + DoCapture(); + // We can't verify the window size here because the test window does not + // repaint itself after it is unminimized, but capturing successfully is still + // a good test. +} + +TEST_F(WgcCapturerWinTest, CloseWindowMidCapture) { + SetUpForWindowCapture(); + EXPECT_TRUE(capturer_->SelectSource(source_id_)); + + capturer_->Start(this); + DoCapture(); + ValidateFrame(kMediumWindowWidth, kMediumWindowHeight); + + CloseTestWindow(); + + // We need to call GetMessage to trigger the Closed event and the capturer's + // event handler for it. If we are too early and the Closed event hasn't + // arrived yet we should keep trying until the capturer receives it and stops. + auto* wgc_capturer = static_cast(capturer_.get()); + while (wgc_capturer->IsSourceBeingCaptured(source_id_)) { + // Since the capturer handles the Closed message, there will be no message + // for us and GetMessage will hang, unless we send ourselves a message + // first. + ::PostThreadMessage(GetCurrentThreadId(), kNoOp, 0, 0); + MSG msg; + ::GetMessage(&msg, NULL, 0, 0); + ::DispatchMessage(&msg); + } + + // Occasionally, one last frame will have made it into the frame pool before + // the window closed. The first call will consume it, and in that case we need + // to make one more call to CaptureFrame. + capturer_->CaptureFrame(); + if (result_ == DesktopCapturer::Result::SUCCESS) + capturer_->CaptureFrame(); + + EXPECT_GE(metrics::NumEvents(kCapturerResultHistogram, kSessionStartFailure), + 1); + EXPECT_GE(metrics::NumEvents(kCaptureSessionResultHistogram, kSourceClosed), + 1); + EXPECT_EQ(result_, DesktopCapturer::Result::ERROR_PERMANENT); +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/wgc_desktop_frame.cc b/modules/desktop_capture/win/wgc_desktop_frame.cc new file mode 100644 index 0000000000..dd9009120b --- /dev/null +++ b/modules/desktop_capture/win/wgc_desktop_frame.cc @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/wgc_desktop_frame.h" + +#include + +namespace webrtc { + +WgcDesktopFrame::WgcDesktopFrame(DesktopSize size, + int stride, + std::vector&& image_data) + : DesktopFrame(size, stride, image_data.data(), nullptr), + image_data_(std::move(image_data)) {} + +WgcDesktopFrame::~WgcDesktopFrame() = default; + +} // namespace webrtc diff --git a/modules/desktop_capture/win/wgc_desktop_frame.h b/modules/desktop_capture/win/wgc_desktop_frame.h new file mode 100644 index 0000000000..0eca763f9e --- /dev/null +++ b/modules/desktop_capture/win/wgc_desktop_frame.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_DESKTOP_CAPTURE_WIN_WGC_DESKTOP_FRAME_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_WGC_DESKTOP_FRAME_H_ + +#include +#include + +#include +#include + +#include "modules/desktop_capture/desktop_frame.h" +#include "modules/desktop_capture/desktop_geometry.h" + +namespace webrtc { + +// DesktopFrame implementation used by capturers that use the +// Windows.Graphics.Capture API. +class WgcDesktopFrame final : public DesktopFrame { + public: + // WgcDesktopFrame receives an rvalue reference to the |image_data| vector + // so that it can take ownership of it (and avoid a copy). + WgcDesktopFrame(DesktopSize size, + int stride, + std::vector&& image_data); + + WgcDesktopFrame(const WgcDesktopFrame&) = delete; + WgcDesktopFrame& operator=(const WgcDesktopFrame&) = delete; + + ~WgcDesktopFrame() override; + + private: + std::vector image_data_; +}; + +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_WGC_DESKTOP_FRAME_H_ diff --git a/modules/desktop_capture/win/window_capture_utils.cc b/modules/desktop_capture/win/window_capture_utils.cc index 226b564b64..aaaef0a80d 100644 --- a/modules/desktop_capture/win/window_capture_utils.cc +++ b/modules/desktop_capture/win/window_capture_utils.cc @@ -13,13 +13,135 @@ // Just for the DWMWINDOWATTRIBUTE enums (DWMWA_CLOAKED). #include +#include + #include "modules/desktop_capture/win/scoped_gdi_object.h" +#include "rtc_base/arraysize.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "rtc_base/string_utils.h" #include "rtc_base/win32.h" namespace webrtc { +namespace { + +struct GetWindowListParams { + GetWindowListParams(int flags, + LONG ex_style_filters, + DesktopCapturer::SourceList* result) + : ignore_untitled(flags & GetWindowListFlags::kIgnoreUntitled), + ignore_unresponsive(flags & GetWindowListFlags::kIgnoreUnresponsive), + ignore_current_process_windows( + flags & GetWindowListFlags::kIgnoreCurrentProcessWindows), + ex_style_filters(ex_style_filters), + result(result) {} + const bool ignore_untitled; + const bool ignore_unresponsive; + const bool ignore_current_process_windows; + const LONG ex_style_filters; + DesktopCapturer::SourceList* const result; +}; + +bool IsWindowOwnedByCurrentProcess(HWND hwnd) { + DWORD process_id; + GetWindowThreadProcessId(hwnd, &process_id); + return process_id == GetCurrentProcessId(); +} + +BOOL CALLBACK GetWindowListHandler(HWND hwnd, LPARAM param) { + GetWindowListParams* params = reinterpret_cast(param); + DesktopCapturer::SourceList* list = params->result; + + // Skip invisible and minimized windows + if (!IsWindowVisible(hwnd) || IsIconic(hwnd)) { + return TRUE; + } + + // Skip windows which are not presented in the taskbar, + // namely owned window if they don't have the app window style set + HWND owner = GetWindow(hwnd, GW_OWNER); + LONG exstyle = GetWindowLong(hwnd, GWL_EXSTYLE); + if (owner && !(exstyle & WS_EX_APPWINDOW)) { + return TRUE; + } + + // Filter out windows that match the extended styles the caller has specified, + // e.g. WS_EX_TOOLWINDOW for capturers that don't support overlay windows. + if (exstyle & params->ex_style_filters) { + return TRUE; + } + + if (params->ignore_unresponsive && !IsWindowResponding(hwnd)) { + return TRUE; + } + + DesktopCapturer::Source window; + window.id = reinterpret_cast(hwnd); + + // GetWindowText* are potentially blocking operations if |hwnd| is + // owned by the current process. The APIs will send messages to the window's + // message loop, and if the message loop is waiting on this operation we will + // enter a deadlock. + // https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getwindowtexta#remarks + // + // To help consumers avoid this, there is a DesktopCaptureOption to ignore + // windows owned by the current process. Consumers should either ensure that + // the thread running their message loop never waits on this operation, or use + // the option to exclude these windows from the source list. + bool owned_by_current_process = IsWindowOwnedByCurrentProcess(hwnd); + if (owned_by_current_process && params->ignore_current_process_windows) { + return TRUE; + } + + // Even if consumers request to enumerate windows owned by the current + // process, we should not call GetWindowText* on unresponsive windows owned by + // the current process because we will hang. Unfortunately, we could still + // hang if the window becomes unresponsive after this check, hence the option + // to avoid these completely. + if (!owned_by_current_process || IsWindowResponding(hwnd)) { + const size_t kTitleLength = 500; + WCHAR window_title[kTitleLength] = L""; + if (GetWindowTextLength(hwnd) != 0 && + GetWindowTextW(hwnd, window_title, kTitleLength) > 0) { + window.title = rtc::ToUtf8(window_title); + } + } + + // Skip windows when we failed to convert the title or it is empty. + if (params->ignore_untitled && window.title.empty()) + return TRUE; + + // Capture the window class name, to allow specific window classes to be + // skipped. + // + // https://docs.microsoft.com/en-us/windows/win32/api/winuser/ns-winuser-wndclassa + // says lpszClassName field in WNDCLASS is limited by 256 symbols, so we don't + // need to have a buffer bigger than that. + const size_t kMaxClassNameLength = 256; + WCHAR class_name[kMaxClassNameLength] = L""; + const int class_name_length = + GetClassNameW(hwnd, class_name, kMaxClassNameLength); + if (class_name_length < 1) + return TRUE; + + // Skip Program Manager window. + if (wcscmp(class_name, L"Progman") == 0) + return TRUE; + + // Skip Start button window on Windows Vista, Windows 7. + // On Windows 8, Windows 8.1, Windows 10 Start button is not a top level + // window, so it will not be examined here. + if (wcscmp(class_name, L"Button") == 0) + return TRUE; + + list->push_back(window); + + return TRUE; +} + +} // namespace + // Prefix used to match the window class for Chrome windows. const wchar_t kChromeWindowClassPrefix[] = L"Chrome_WidgetWin_"; @@ -157,6 +279,26 @@ bool IsWindowMaximized(HWND window, bool* result) { return true; } +bool IsWindowValidAndVisible(HWND window) { + return IsWindow(window) && IsWindowVisible(window) && !IsIconic(window); +} + +bool IsWindowResponding(HWND window) { + // 50ms is chosen in case the system is under heavy load, but it's also not + // too long to delay window enumeration considerably. + const UINT uTimeoutMs = 50; + return SendMessageTimeout(window, WM_NULL, 0, 0, SMTO_ABORTIFHUNG, uTimeoutMs, + nullptr); +} + +bool GetWindowList(int flags, + DesktopCapturer::SourceList* windows, + LONG ex_style_filters) { + GetWindowListParams params(flags, ex_style_filters, windows); + return ::EnumWindows(&GetWindowListHandler, + reinterpret_cast(¶ms)) != 0; +} + // WindowCaptureHelperWin implementation. WindowCaptureHelperWin::WindowCaptureHelperWin() { // Try to load dwmapi.dll dynamically since it is not available on XP. @@ -223,12 +365,13 @@ bool WindowCaptureHelperWin::IsWindowChromeNotification(HWND hwnd) { } // |content_rect| is preferred because, -// 1. WindowCapturerWin is using GDI capturer, which cannot capture DX output. +// 1. WindowCapturerWinGdi is using GDI capturer, which cannot capture DX +// output. // So ScreenCapturer should be used as much as possible to avoid // uncapturable cases. Note: lots of new applications are using DX output // (hardware acceleration) to improve the performance which cannot be -// captured by WindowCapturerWin. See bug http://crbug.com/741770. -// 2. WindowCapturerWin is still useful because we do not want to expose the +// captured by WindowCapturerWinGdi. See bug http://crbug.com/741770. +// 2. WindowCapturerWinGdi is still useful because we do not want to expose the // content on other windows if the target window is covered by them. // 3. Shadow and borders should not be considered as "content" on other // windows because they do not expose any useful information. @@ -288,8 +431,8 @@ bool WindowCaptureHelperWin::IsWindowOnCurrentDesktop(HWND hwnd) { } bool WindowCaptureHelperWin::IsWindowVisibleOnCurrentDesktop(HWND hwnd) { - return !::IsIconic(hwnd) && ::IsWindowVisible(hwnd) && - IsWindowOnCurrentDesktop(hwnd) && !IsWindowCloaked(hwnd); + return IsWindowValidAndVisible(hwnd) && IsWindowOnCurrentDesktop(hwnd) && + !IsWindowCloaked(hwnd); } // A cloaked window is composited but not visible to the user. @@ -303,11 +446,36 @@ bool WindowCaptureHelperWin::IsWindowCloaked(HWND hwnd) { int res = 0; if (dwm_get_window_attribute_func_(hwnd, DWMWA_CLOAKED, &res, sizeof(res)) != S_OK) { - // Cannot tell so assume not cloacked for backward compatibility. + // Cannot tell so assume not cloaked for backward compatibility. return false; } return res != 0; } +bool WindowCaptureHelperWin::EnumerateCapturableWindows( + DesktopCapturer::SourceList* results, + bool enumerate_current_process_windows, + LONG ex_style_filters) { + int flags = (GetWindowListFlags::kIgnoreUntitled | + GetWindowListFlags::kIgnoreUnresponsive); + if (!enumerate_current_process_windows) { + flags |= GetWindowListFlags::kIgnoreCurrentProcessWindows; + } + + if (!webrtc::GetWindowList(flags, results, ex_style_filters)) { + return false; + } + + for (auto it = results->begin(); it != results->end();) { + if (!IsWindowVisibleOnCurrentDesktop(reinterpret_cast(it->id))) { + it = results->erase(it); + } else { + ++it; + } + } + + return true; +} + } // namespace webrtc diff --git a/modules/desktop_capture/win/window_capture_utils.h b/modules/desktop_capture/win/window_capture_utils.h index 20a475510b..a6a295d068 100644 --- a/modules/desktop_capture/win/window_capture_utils.h +++ b/modules/desktop_capture/win/window_capture_utils.h @@ -15,6 +15,7 @@ #include #include +#include "modules/desktop_capture/desktop_capturer.h" #include "modules/desktop_capture/desktop_geometry.h" #include "rtc_base/constructor_magic.h" @@ -40,7 +41,7 @@ bool GetWindowRect(HWND window, DesktopRect* result); // This function should only be used by CroppingWindowCapturerWin. Instead a // DesktopRect CropWindowRect(const DesktopRect& rect) // should be added as a utility function to help CroppingWindowCapturerWin and -// WindowCapturerWin to crop out the borders or shadow according to their +// WindowCapturerWinGdi to crop out the borders or shadow according to their // scenarios. But this function is too generic and easy to be misused. bool GetCroppedWindowRect(HWND window, bool avoid_cropping_border, @@ -66,6 +67,33 @@ bool GetDcSize(HDC hdc, DesktopSize* size); // function returns false if native APIs fail. bool IsWindowMaximized(HWND window, bool* result); +// Checks that the HWND is for a valid window, that window's visibility state is +// visible, and that it is not minimized. +bool IsWindowValidAndVisible(HWND window); + +// Checks if a window responds to a message within 50ms. +bool IsWindowResponding(HWND window); + +enum GetWindowListFlags { + kNone = 0x00, + kIgnoreUntitled = 1 << 0, + kIgnoreUnresponsive = 1 << 1, + kIgnoreCurrentProcessWindows = 1 << 2, +}; + +// Retrieves the list of top-level windows on the screen. +// Some windows will be ignored: +// - Those that are invisible or minimized. +// - Program Manager & Start menu. +// - [with kIgnoreUntitled] windows with no title. +// - [with kIgnoreUnresponsive] windows that are unresponsive. +// - [with kIgnoreCurrentProcessWindows] windows owned by the current process. +// - Any windows with extended styles that match |ex_style_filters|. +// Returns false if native APIs failed. +bool GetWindowList(int flags, + DesktopCapturer::SourceList* windows, + LONG ex_style_filters = 0); + typedef HRESULT(WINAPI* DwmIsCompositionEnabledFunc)(BOOL* enabled); typedef HRESULT(WINAPI* DwmGetWindowAttributeFunc)(HWND hwnd, DWORD flag, @@ -85,6 +113,13 @@ class WindowCaptureHelperWin { bool IsWindowVisibleOnCurrentDesktop(HWND hwnd); bool IsWindowCloaked(HWND hwnd); + // The optional |ex_style_filters| parameter allows callers to provide + // extended window styles (e.g. WS_EX_TOOLWINDOW) and prevent windows that + // match from being included in |results|. + bool EnumerateCapturableWindows(DesktopCapturer::SourceList* results, + bool enumerate_current_process_windows, + LONG ex_style_filters = 0); + private: HMODULE dwmapi_library_ = nullptr; DwmIsCompositionEnabledFunc func_ = nullptr; diff --git a/modules/desktop_capture/win/window_capture_utils_unittest.cc b/modules/desktop_capture/win/window_capture_utils_unittest.cc new file mode 100644 index 0000000000..4b426fc464 --- /dev/null +++ b/modules/desktop_capture/win/window_capture_utils_unittest.cc @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/window_capture_utils.h" + +#include +#include +#include +#include + +#include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/win/test_support/test_window.h" +#include "rtc_base/thread.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +const char kWindowThreadName[] = "window_capture_utils_test_thread"; +const WCHAR kWindowTitle[] = L"Window Capture Utils Test"; + +std::unique_ptr SetUpUnresponsiveWindow(std::mutex& mtx, + WindowInfo& info) { + std::unique_ptr window_thread; + window_thread = rtc::Thread::Create(); + window_thread->SetName(kWindowThreadName, nullptr); + window_thread->Start(); + + window_thread->Invoke( + RTC_FROM_HERE, [&info]() { info = CreateTestWindow(kWindowTitle); }); + + // Intentionally create a deadlock to cause the window to become unresponsive. + mtx.lock(); + window_thread->PostTask(RTC_FROM_HERE, [&mtx]() { + mtx.lock(); + mtx.unlock(); + }); + + return window_thread; +} + +} // namespace + +TEST(WindowCaptureUtilsTest, GetWindowList) { + WindowInfo info = CreateTestWindow(kWindowTitle); + DesktopCapturer::SourceList window_list; + ASSERT_TRUE(GetWindowList(GetWindowListFlags::kNone, &window_list)); + EXPECT_GT(window_list.size(), 0ULL); + EXPECT_NE(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + DestroyTestWindow(info); +} + +TEST(WindowCaptureUtilsTest, IncludeUnresponsiveWindows) { + std::mutex mtx; + WindowInfo info; + std::unique_ptr window_thread = + SetUpUnresponsiveWindow(mtx, info); + + EXPECT_FALSE(IsWindowResponding(info.hwnd)); + + DesktopCapturer::SourceList window_list; + ASSERT_TRUE(GetWindowList(GetWindowListFlags::kNone, &window_list)); + EXPECT_GT(window_list.size(), 0ULL); + EXPECT_NE(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + + mtx.unlock(); + window_thread->Invoke(RTC_FROM_HERE, + [&info]() { DestroyTestWindow(info); }); + window_thread->Stop(); +} + +TEST(WindowCaptureUtilsTest, IgnoreUnresponsiveWindows) { + std::mutex mtx; + WindowInfo info; + std::unique_ptr window_thread = + SetUpUnresponsiveWindow(mtx, info); + + EXPECT_FALSE(IsWindowResponding(info.hwnd)); + + DesktopCapturer::SourceList window_list; + ASSERT_TRUE( + GetWindowList(GetWindowListFlags::kIgnoreUnresponsive, &window_list)); + EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + + mtx.unlock(); + window_thread->Invoke(RTC_FROM_HERE, + [&info]() { DestroyTestWindow(info); }); + window_thread->Stop(); +} + +TEST(WindowCaptureUtilsTest, IncludeUntitledWindows) { + WindowInfo info = CreateTestWindow(L""); + DesktopCapturer::SourceList window_list; + ASSERT_TRUE(GetWindowList(GetWindowListFlags::kNone, &window_list)); + EXPECT_GT(window_list.size(), 0ULL); + EXPECT_NE(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + DestroyTestWindow(info); +} + +TEST(WindowCaptureUtilsTest, IgnoreUntitledWindows) { + WindowInfo info = CreateTestWindow(L""); + DesktopCapturer::SourceList window_list; + ASSERT_TRUE(GetWindowList(GetWindowListFlags::kIgnoreUntitled, &window_list)); + EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + DestroyTestWindow(info); +} + +TEST(WindowCaptureUtilsTest, IgnoreCurrentProcessWindows) { + WindowInfo info = CreateTestWindow(kWindowTitle); + DesktopCapturer::SourceList window_list; + ASSERT_TRUE(GetWindowList(GetWindowListFlags::kIgnoreCurrentProcessWindows, + &window_list)); + EXPECT_EQ(std::find_if(window_list.begin(), window_list.end(), + [&info](DesktopCapturer::Source window) { + return reinterpret_cast(window.id) == + info.hwnd; + }), + window_list.end()); + DestroyTestWindow(info); +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/window_capturer_win_gdi.cc b/modules/desktop_capture/win/window_capturer_win_gdi.cc new file mode 100644 index 0000000000..25677e9868 --- /dev/null +++ b/modules/desktop_capture/win/window_capturer_win_gdi.cc @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/desktop_capture/win/window_capturer_win_gdi.h" + +#include +#include +#include +#include +#include + +#include "modules/desktop_capture/cropped_desktop_frame.h" +#include "modules/desktop_capture/desktop_capture_metrics_helper.h" +#include "modules/desktop_capture/desktop_capture_types.h" +#include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/desktop_frame_win.h" +#include "modules/desktop_capture/win/screen_capture_utils.h" +#include "modules/desktop_capture/win/selected_window_context.h" +#include "rtc_base/arraysize.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/string_utils.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" +#include "rtc_base/win32.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { + +// Used to pass input/output data during the EnumWindows call to collect +// owned/pop-up windows that should be captured. +struct OwnedWindowCollectorContext : public SelectedWindowContext { + OwnedWindowCollectorContext(HWND selected_window, + DesktopRect selected_window_rect, + WindowCaptureHelperWin* window_capture_helper, + std::vector* owned_windows) + : SelectedWindowContext(selected_window, + selected_window_rect, + window_capture_helper), + owned_windows(owned_windows) {} + + std::vector* owned_windows; +}; + +// Called via EnumWindows for each root window; adds owned/pop-up windows that +// should be captured to a vector it's passed. +BOOL CALLBACK OwnedWindowCollector(HWND hwnd, LPARAM param) { + OwnedWindowCollectorContext* context = + reinterpret_cast(param); + if (hwnd == context->selected_window()) { + // Windows are enumerated in top-down z-order, so we can stop enumerating + // upon reaching the selected window. + return FALSE; + } + + // Skip windows that aren't visible pop-up windows. + if (!(GetWindowLong(hwnd, GWL_STYLE) & WS_POPUP) || + !context->window_capture_helper()->IsWindowVisibleOnCurrentDesktop( + hwnd)) { + return TRUE; + } + + // Owned windows that intersect the selected window should be captured. + if (context->IsWindowOwnedBySelectedWindow(hwnd) && + context->IsWindowOverlappingSelectedWindow(hwnd)) { + // Skip windows that draw shadows around menus. These "SysShadow" windows + // would otherwise be captured as solid black bars with no transparency + // gradient (since this capturer doesn't detect / respect variations in the + // window alpha channel). Any other semi-transparent owned windows will be + // captured fully-opaque. This seems preferable to excluding them (at least + // when they have content aside from a solid fill color / visual adornment; + // e.g. some tooltips have the transparent style set). + if (GetWindowLong(hwnd, GWL_EXSTYLE) & WS_EX_TRANSPARENT) { + const WCHAR kSysShadow[] = L"SysShadow"; + const size_t kClassLength = arraysize(kSysShadow); + WCHAR class_name[kClassLength]; + const int class_name_length = + GetClassNameW(hwnd, class_name, kClassLength); + if (class_name_length == kClassLength - 1 && + wcscmp(class_name, kSysShadow) == 0) { + return TRUE; + } + } + + context->owned_windows->push_back(hwnd); + } + + return TRUE; +} + +WindowCapturerWinGdi::WindowCapturerWinGdi( + bool enumerate_current_process_windows) + : enumerate_current_process_windows_(enumerate_current_process_windows) {} +WindowCapturerWinGdi::~WindowCapturerWinGdi() {} + +bool WindowCapturerWinGdi::GetSourceList(SourceList* sources) { + if (!window_capture_helper_.EnumerateCapturableWindows( + sources, enumerate_current_process_windows_)) + return false; + + std::map new_map; + for (const auto& item : *sources) { + HWND hwnd = reinterpret_cast(item.id); + new_map[hwnd] = window_size_map_[hwnd]; + } + window_size_map_.swap(new_map); + + return true; +} + +bool WindowCapturerWinGdi::SelectSource(SourceId id) { + HWND window = reinterpret_cast(id); + if (!IsWindowValidAndVisible(window)) + return false; + + window_ = window; + // When a window is not in the map, window_size_map_[window] will create an + // item with DesktopSize (0, 0). + previous_size_ = window_size_map_[window]; + return true; +} + +bool WindowCapturerWinGdi::FocusOnSelectedSource() { + if (!window_) + return false; + + if (!IsWindowValidAndVisible(window_)) + return false; + + return BringWindowToTop(window_) && SetForegroundWindow(window_); +} + +bool WindowCapturerWinGdi::IsOccluded(const DesktopVector& pos) { + DesktopVector sys_pos = pos.add(GetFullscreenRect().top_left()); + HWND hwnd = + reinterpret_cast(window_finder_.GetWindowUnderPoint(sys_pos)); + + return hwnd != window_ && + std::find(owned_windows_.begin(), owned_windows_.end(), hwnd) == + owned_windows_.end(); +} + +void WindowCapturerWinGdi::Start(Callback* callback) { + RTC_DCHECK(!callback_); + RTC_DCHECK(callback); + RecordCapturerImpl(DesktopCapturerId::kWindowCapturerWinGdi); + + callback_ = callback; +} + +void WindowCapturerWinGdi::CaptureFrame() { + RTC_DCHECK(callback_); + int64_t capture_start_time_nanos = rtc::TimeNanos(); + + CaptureResults results = CaptureFrame(/*capture_owned_windows*/ true); + + if (results.frame) { + int capture_time_ms = (rtc::TimeNanos() - capture_start_time_nanos) / + rtc::kNumNanosecsPerMillisec; + RTC_HISTOGRAM_COUNTS_1000( + "WebRTC.DesktopCapture.Win.WindowGdiCapturerFrameTime", + capture_time_ms); + results.frame->set_capture_time_ms(capture_time_ms); + results.frame->set_capturer_id(DesktopCapturerId::kWindowCapturerWinGdi); + } + + callback_->OnCaptureResult(results.result, std::move(results.frame)); +} + +WindowCapturerWinGdi::CaptureResults WindowCapturerWinGdi::CaptureFrame( + bool capture_owned_windows) { + TRACE_EVENT0("webrtc", "WindowCapturerWinGdi::CaptureFrame"); + + if (!window_) { + RTC_LOG(LS_ERROR) << "Window hasn't been selected: " << GetLastError(); + return {Result::ERROR_PERMANENT, nullptr}; + } + + // Stop capturing if the window has been closed. + if (!IsWindow(window_)) { + RTC_LOG(LS_ERROR) << "Target window has been closed."; + return {Result::ERROR_PERMANENT, nullptr}; + } + + // Determine the window region excluding any resize border, and including + // any visible border if capturing an owned window / dialog. (Don't include + // any visible border for the selected window for consistency with + // CroppingWindowCapturerWin, which would expose a bit of the background + // through the partially-transparent border.) + const bool avoid_cropping_border = !capture_owned_windows; + DesktopRect cropped_rect; + DesktopRect original_rect; + + if (!GetCroppedWindowRect(window_, avoid_cropping_border, &cropped_rect, + &original_rect)) { + RTC_LOG(LS_WARNING) << "Failed to get drawable window area: " + << GetLastError(); + return {Result::ERROR_TEMPORARY, nullptr}; + } + + // Return a 1x1 black frame if the window is minimized or invisible on current + // desktop, to match behavior on mace. Window can be temporarily invisible + // during the transition of full screen mode on/off. + if (original_rect.is_empty() || + !window_capture_helper_.IsWindowVisibleOnCurrentDesktop(window_)) { + std::unique_ptr frame( + new BasicDesktopFrame(DesktopSize(1, 1))); + + previous_size_ = frame->size(); + window_size_map_[window_] = previous_size_; + return {Result::SUCCESS, std::move(frame)}; + } + + HDC window_dc = GetWindowDC(window_); + if (!window_dc) { + RTC_LOG(LS_WARNING) << "Failed to get window DC: " << GetLastError(); + return {Result::ERROR_TEMPORARY, nullptr}; + } + + DesktopRect unscaled_cropped_rect = cropped_rect; + double horizontal_scale = 1.0; + double vertical_scale = 1.0; + + DesktopSize window_dc_size; + if (GetDcSize(window_dc, &window_dc_size)) { + // The |window_dc_size| is used to detect the scaling of the original + // window. If the application does not support high-DPI settings, it will + // be scaled by Windows according to the scaling setting. + // https://www.google.com/search?q=windows+scaling+settings&ie=UTF-8 + // So the size of the |window_dc|, i.e. the bitmap we can retrieve from + // PrintWindow() or BitBlt() function, will be smaller than + // |original_rect| and |cropped_rect|. Part of the captured desktop frame + // will be black. See + // bug https://bugs.chromium.org/p/webrtc/issues/detail?id=8112 for + // details. + + // If |window_dc_size| is smaller than |window_rect|, let's resize both + // |original_rect| and |cropped_rect| according to the scaling factor. + // This will adjust the width and height of the two rects. + horizontal_scale = + static_cast(window_dc_size.width()) / original_rect.width(); + vertical_scale = + static_cast(window_dc_size.height()) / original_rect.height(); + original_rect.Scale(horizontal_scale, vertical_scale); + cropped_rect.Scale(horizontal_scale, vertical_scale); + + // Translate |cropped_rect| to the left so that its position within + // |original_rect| remains accurate after scaling. + // See crbug.com/1083527 for more info. + int translate_left = static_cast(std::round( + (cropped_rect.left() - original_rect.left()) * (horizontal_scale - 1))); + int translate_top = static_cast(std::round( + (cropped_rect.top() - original_rect.top()) * (vertical_scale - 1))); + cropped_rect.Translate(translate_left, translate_top); + } + + std::unique_ptr frame( + DesktopFrameWin::Create(original_rect.size(), nullptr, window_dc)); + if (!frame.get()) { + RTC_LOG(LS_WARNING) << "Failed to create frame."; + ReleaseDC(window_, window_dc); + return {Result::ERROR_TEMPORARY, nullptr}; + } + + HDC mem_dc = CreateCompatibleDC(window_dc); + HGDIOBJ previous_object = SelectObject(mem_dc, frame->bitmap()); + BOOL result = FALSE; + + // When desktop composition (Aero) is enabled each window is rendered to a + // private buffer allowing BitBlt() to get the window content even if the + // window is occluded. PrintWindow() is slower but lets rendering the window + // contents to an off-screen device context when Aero is not available. + // PrintWindow() is not supported by some applications. + // + // If Aero is enabled, we prefer BitBlt() because it's faster and avoids + // window flickering. Otherwise, we prefer PrintWindow() because BitBlt() may + // render occluding windows on top of the desired window. + // + // When composition is enabled the DC returned by GetWindowDC() doesn't always + // have window frame rendered correctly. Windows renders it only once and then + // caches the result between captures. We hack it around by calling + // PrintWindow() whenever window size changes, including the first time of + // capturing - it somehow affects what we get from BitBlt() on the subsequent + // captures. + // + // For Windows 8.1 and later, we want to always use PrintWindow when the + // cropping screen capturer falls back to the window capturer. I.e. + // on Windows 8.1 and later, PrintWindow is only used when the window is + // occluded. When the window is not occluded, it is much faster to capture + // the screen and to crop it to the window position and size. + if (rtc::IsWindows8OrLater()) { + // Special flag that makes PrintWindow to work on Windows 8.1 and later. + // Indeed certain apps (e.g. those using DirectComposition rendering) can't + // be captured using BitBlt or PrintWindow without this flag. Note that on + // Windows 8.0 this flag is not supported so the block below will fallback + // to the other call to PrintWindow. It seems to be very tricky to detect + // Windows 8.0 vs 8.1 so a try/fallback is more approriate here. + const UINT flags = PW_RENDERFULLCONTENT; + result = PrintWindow(window_, mem_dc, flags); + } + + if (!result && (!window_capture_helper_.IsAeroEnabled() || + !previous_size_.equals(frame->size()))) { + result = PrintWindow(window_, mem_dc, 0); + } + + // Aero is enabled or PrintWindow() failed, use BitBlt. + if (!result) { + result = BitBlt(mem_dc, 0, 0, frame->size().width(), frame->size().height(), + window_dc, 0, 0, SRCCOPY); + } + + SelectObject(mem_dc, previous_object); + DeleteDC(mem_dc); + ReleaseDC(window_, window_dc); + + previous_size_ = frame->size(); + window_size_map_[window_] = previous_size_; + + frame->mutable_updated_region()->SetRect( + DesktopRect::MakeSize(frame->size())); + frame->set_top_left( + original_rect.top_left().subtract(GetFullscreenRect().top_left())); + + if (!result) { + RTC_LOG(LS_ERROR) << "Both PrintWindow() and BitBlt() failed."; + return {Result::ERROR_TEMPORARY, nullptr}; + } + + // Rect for the data is relative to the first pixel of the frame. + cropped_rect.Translate(-original_rect.left(), -original_rect.top()); + std::unique_ptr cropped_frame = + CreateCroppedDesktopFrame(std::move(frame), cropped_rect); + RTC_DCHECK(cropped_frame); + + if (capture_owned_windows) { + // If any owned/pop-up windows overlap the selected window, capture them + // and copy/composite their contents into the frame. + owned_windows_.clear(); + OwnedWindowCollectorContext context(window_, unscaled_cropped_rect, + &window_capture_helper_, + &owned_windows_); + + if (context.IsSelectedWindowValid()) { + EnumWindows(OwnedWindowCollector, reinterpret_cast(&context)); + + if (!owned_windows_.empty()) { + if (!owned_window_capturer_) { + owned_window_capturer_ = std::make_unique( + enumerate_current_process_windows_); + } + + // Owned windows are stored in top-down z-order, so this iterates in + // reverse to capture / draw them in bottom-up z-order + for (auto it = owned_windows_.rbegin(); it != owned_windows_.rend(); + it++) { + HWND hwnd = *it; + if (owned_window_capturer_->SelectSource( + reinterpret_cast(hwnd))) { + CaptureResults results = owned_window_capturer_->CaptureFrame( + /*capture_owned_windows*/ false); + + if (results.result != DesktopCapturer::Result::SUCCESS) { + // Simply log any error capturing an owned/pop-up window without + // bubbling it up to the caller (an expected error here is that + // the owned/pop-up window was closed; any unexpected errors won't + // fail the outer capture). + RTC_LOG(LS_INFO) << "Capturing owned window failed (previous " + "error/warning pertained to that)"; + } else { + // Copy / composite the captured frame into the outer frame. This + // may no-op if they no longer intersect (if the owned window was + // moved outside the owner bounds since scheduled for capture.) + cropped_frame->CopyIntersectingPixelsFrom( + *results.frame, horizontal_scale, vertical_scale); + } + } + } + } + } + } + + return {Result::SUCCESS, std::move(cropped_frame)}; +} + +// static +std::unique_ptr WindowCapturerWinGdi::CreateRawWindowCapturer( + const DesktopCaptureOptions& options) { + return std::unique_ptr( + new WindowCapturerWinGdi(options.enumerate_current_process_windows())); +} + +} // namespace webrtc diff --git a/modules/desktop_capture/win/window_capturer_win_gdi.h b/modules/desktop_capture/win/window_capturer_win_gdi.h new file mode 100644 index 0000000000..5091458a12 --- /dev/null +++ b/modules/desktop_capture/win/window_capturer_win_gdi.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURER_WIN_GDI_H_ +#define MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURER_WIN_GDI_H_ + +#include +#include +#include + +#include "modules/desktop_capture/desktop_capture_options.h" +#include "modules/desktop_capture/desktop_capturer.h" +#include "modules/desktop_capture/win/window_capture_utils.h" +#include "modules/desktop_capture/window_finder_win.h" + +namespace webrtc { + +class WindowCapturerWinGdi : public DesktopCapturer { + public: + explicit WindowCapturerWinGdi(bool enumerate_current_process_windows); + + // Disallow copy and assign + WindowCapturerWinGdi(const WindowCapturerWinGdi&) = delete; + WindowCapturerWinGdi& operator=(const WindowCapturerWinGdi&) = delete; + + ~WindowCapturerWinGdi() override; + + static std::unique_ptr CreateRawWindowCapturer( + const DesktopCaptureOptions& options); + + // DesktopCapturer interface. + void Start(Callback* callback) override; + void CaptureFrame() override; + bool GetSourceList(SourceList* sources) override; + bool SelectSource(SourceId id) override; + bool FocusOnSelectedSource() override; + bool IsOccluded(const DesktopVector& pos) override; + + private: + struct CaptureResults { + Result result; + std::unique_ptr frame; + }; + + CaptureResults CaptureFrame(bool capture_owned_windows); + + Callback* callback_ = nullptr; + + // HWND and HDC for the currently selected window or nullptr if window is not + // selected. + HWND window_ = nullptr; + + DesktopSize previous_size_; + + WindowCaptureHelperWin window_capture_helper_; + + bool enumerate_current_process_windows_; + + // This map is used to avoid flickering for the case when SelectWindow() calls + // are interleaved with Capture() calls. + std::map window_size_map_; + + WindowFinderWin window_finder_; + + std::vector owned_windows_; + std::unique_ptr owned_window_capturer_; +}; + +} // namespace webrtc + +#endif // MODULES_DESKTOP_CAPTURE_WIN_WINDOW_CAPTURER_WIN_GDI_H_ diff --git a/modules/desktop_capture/window_capturer_linux.cc b/modules/desktop_capture/window_capturer_linux.cc index 41dbf836b0..2b142ae3b9 100644 --- a/modules/desktop_capture/window_capturer_linux.cc +++ b/modules/desktop_capture/window_capturer_linux.cc @@ -14,7 +14,7 @@ #include "modules/desktop_capture/desktop_capturer.h" #if defined(WEBRTC_USE_PIPEWIRE) -#include "modules/desktop_capture/linux/window_capturer_pipewire.h" +#include "modules/desktop_capture/linux/base_capturer_pipewire.h" #endif // defined(WEBRTC_USE_PIPEWIRE) #if defined(WEBRTC_USE_X11) @@ -28,7 +28,7 @@ std::unique_ptr DesktopCapturer::CreateRawWindowCapturer( const DesktopCaptureOptions& options) { #if defined(WEBRTC_USE_PIPEWIRE) if (options.allow_pipewire() && DesktopCapturer::IsRunningUnderWayland()) { - return WindowCapturerPipeWire::CreateRawWindowCapturer(options); + return BaseCapturerPipeWire::CreateRawCapturer(options); } #endif // defined(WEBRTC_USE_PIPEWIRE) diff --git a/modules/desktop_capture/window_capturer_mac.mm b/modules/desktop_capture/window_capturer_mac.mm index 96f89eb14b..cbbc500613 100644 --- a/modules/desktop_capture/window_capturer_mac.mm +++ b/modules/desktop_capture/window_capturer_mac.mm @@ -161,7 +161,19 @@ explicit WindowCapturerMac( if (full_screen_window_detector_) { full_screen_window_detector_->UpdateWindowListIfNeeded( window_id_, [](DesktopCapturer::SourceList* sources) { - return webrtc::GetWindowList(sources, true, false); + // Not using webrtc::GetWindowList(sources, true, false) + // as it doesn't allow to have in the result window with + // empty title along with titled window owned by the same pid. + return webrtc::GetWindowList( + [sources](CFDictionaryRef window) { + WindowId window_id = GetWindowId(window); + if (window_id != kNullWindowId) { + sources->push_back(DesktopCapturer::Source{window_id, GetWindowTitle(window)}); + } + return true; + }, + true, + false); }); CGWindowID full_screen_window = full_screen_window_detector_->FindFullScreenWindow(window_id_); diff --git a/modules/desktop_capture/window_capturer_null.cc b/modules/desktop_capture/window_capturer_null.cc index 66e76a50fb..e7c7b0a134 100644 --- a/modules/desktop_capture/window_capturer_null.cc +++ b/modules/desktop_capture/window_capturer_null.cc @@ -8,10 +8,9 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include - #include "modules/desktop_capture/desktop_capturer.h" #include "modules/desktop_capture/desktop_frame.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" namespace webrtc { @@ -49,8 +48,8 @@ bool WindowCapturerNull::SelectSource(SourceId id) { } void WindowCapturerNull::Start(Callback* callback) { - assert(!callback_); - assert(callback); + RTC_DCHECK(!callback_); + RTC_DCHECK(callback); callback_ = callback; } diff --git a/modules/desktop_capture/window_capturer_unittest.cc b/modules/desktop_capture/window_capturer_unittest.cc index 8a611e760a..519c04601b 100644 --- a/modules/desktop_capture/window_capturer_unittest.cc +++ b/modules/desktop_capture/window_capturer_unittest.cc @@ -44,7 +44,13 @@ class WindowCapturerTest : public ::testing::Test, }; // Verify that we can enumerate windows. -TEST_F(WindowCapturerTest, Enumerate) { +// TODO(bugs.webrtc.org/12950): Re-enable when libc++ issue is fixed +#if defined(WEBRTC_LINUX) && defined(MEMORY_SANITIZER) +#define MAYBE_Enumerate DISABLED_Enumerate +#else +#define MAYBE_Enumerate Enumerate +#endif +TEST_F(WindowCapturerTest, MAYBE_Enumerate) { DesktopCapturer::SourceList sources; EXPECT_TRUE(capturer_->GetSourceList(&sources)); @@ -54,8 +60,9 @@ TEST_F(WindowCapturerTest, Enumerate) { } } -// Flaky on Linux. See: crbug.com/webrtc/7830 -#if defined(WEBRTC_LINUX) +// Flaky on Linux. See: crbug.com/webrtc/7830. +// Failing on macOS 11: See bugs.webrtc.org/12801 +#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC) #define MAYBE_Capture DISABLED_Capture #else #define MAYBE_Capture Capture diff --git a/modules/desktop_capture/window_capturer_win.cc b/modules/desktop_capture/window_capturer_win.cc index 4e16c44ced..4bfa09f4d6 100644 --- a/modules/desktop_capture/window_capturer_win.cc +++ b/modules/desktop_capture/window_capturer_win.cc @@ -8,472 +8,16 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include - -#include - -#include "modules/desktop_capture/cropped_desktop_frame.h" +#include "modules/desktop_capture/desktop_capture_options.h" #include "modules/desktop_capture/desktop_capturer.h" -#include "modules/desktop_capture/desktop_frame_win.h" -#include "modules/desktop_capture/win/screen_capture_utils.h" -#include "modules/desktop_capture/win/selected_window_context.h" -#include "modules/desktop_capture/win/window_capture_utils.h" -#include "modules/desktop_capture/window_finder_win.h" -#include "rtc_base/arraysize.h" -#include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/logging.h" -#include "rtc_base/string_utils.h" -#include "rtc_base/trace_event.h" -#include "rtc_base/win32.h" +#include "modules/desktop_capture/win/window_capturer_win_gdi.h" namespace webrtc { -namespace { - -BOOL CALLBACK WindowsEnumerationHandler(HWND hwnd, LPARAM param) { - DesktopCapturer::SourceList* list = - reinterpret_cast(param); - - // Skip windows that are invisible, minimized, have no title, or are owned, - // unless they have the app window style set. - int len = GetWindowTextLength(hwnd); - HWND owner = GetWindow(hwnd, GW_OWNER); - LONG exstyle = GetWindowLong(hwnd, GWL_EXSTYLE); - if (len == 0 || IsIconic(hwnd) || !IsWindowVisible(hwnd) || - (owner && !(exstyle & WS_EX_APPWINDOW))) { - return TRUE; - } - // Skip unresponsive windows. Set timout with 50ms, in case system is under - // heavy load, the check can wait longer but wont' be too long to delay the - // the enumeration. - const UINT uTimeout = 50; // ms - if (!SendMessageTimeout(hwnd, WM_NULL, 0, 0, SMTO_ABORTIFHUNG, uTimeout, - nullptr)) { - return TRUE; - } - - // Skip the Program Manager window and the Start button. - const size_t kClassLength = 256; - WCHAR class_name[kClassLength]; - const int class_name_length = GetClassNameW(hwnd, class_name, kClassLength); - if (class_name_length < 1) - return TRUE; - - // Skip Program Manager window and the Start button. This is the same logic - // that's used in Win32WindowPicker in libjingle. Consider filtering other - // windows as well (e.g. toolbars). - if (wcscmp(class_name, L"Progman") == 0 || wcscmp(class_name, L"Button") == 0) - return TRUE; - - DesktopCapturer::Source window; - window.id = reinterpret_cast(hwnd); - - const size_t kTitleLength = 500; - WCHAR window_title[kTitleLength]; - // Truncate the title if it's longer than kTitleLength. - GetWindowTextW(hwnd, window_title, kTitleLength); - window.title = rtc::ToUtf8(window_title); - - // Skip windows when we failed to convert the title or it is empty. - if (window.title.empty()) - return TRUE; - - list->push_back(window); - - return TRUE; -} - -// Used to pass input/output data during the EnumWindows call to collect -// owned/pop-up windows that should be captured. -struct OwnedWindowCollectorContext : public SelectedWindowContext { - OwnedWindowCollectorContext(HWND selected_window, - DesktopRect selected_window_rect, - WindowCaptureHelperWin* window_capture_helper, - std::vector* owned_windows) - : SelectedWindowContext(selected_window, - selected_window_rect, - window_capture_helper), - owned_windows(owned_windows) {} - - std::vector* owned_windows; -}; - -// Called via EnumWindows for each root window; adds owned/pop-up windows that -// should be captured to a vector it's passed. -BOOL CALLBACK OwnedWindowCollector(HWND hwnd, LPARAM param) { - OwnedWindowCollectorContext* context = - reinterpret_cast(param); - if (hwnd == context->selected_window()) { - // Windows are enumerated in top-down z-order, so we can stop enumerating - // upon reaching the selected window. - return FALSE; - } - - // Skip windows that aren't visible pop-up windows. - if (!(GetWindowLong(hwnd, GWL_STYLE) & WS_POPUP) || - !context->window_capture_helper()->IsWindowVisibleOnCurrentDesktop( - hwnd)) { - return TRUE; - } - - // Owned windows that intersect the selected window should be captured. - if (context->IsWindowOwnedBySelectedWindow(hwnd) && - context->IsWindowOverlappingSelectedWindow(hwnd)) { - // Skip windows that draw shadows around menus. These "SysShadow" windows - // would otherwise be captured as solid black bars with no transparency - // gradient (since this capturer doesn't detect / respect variations in the - // window alpha channel). Any other semi-transparent owned windows will be - // captured fully-opaque. This seems preferable to excluding them (at least - // when they have content aside from a solid fill color / visual adornment; - // e.g. some tooltips have the transparent style set). - if (GetWindowLong(hwnd, GWL_EXSTYLE) & WS_EX_TRANSPARENT) { - const WCHAR kSysShadow[] = L"SysShadow"; - const size_t kClassLength = arraysize(kSysShadow); - WCHAR class_name[kClassLength]; - const int class_name_length = - GetClassNameW(hwnd, class_name, kClassLength); - if (class_name_length == kClassLength - 1 && - wcscmp(class_name, kSysShadow) == 0) { - return TRUE; - } - } - - context->owned_windows->push_back(hwnd); - } - - return TRUE; -} - -class WindowCapturerWin : public DesktopCapturer { - public: - WindowCapturerWin(); - ~WindowCapturerWin() override; - - // DesktopCapturer interface. - void Start(Callback* callback) override; - void CaptureFrame() override; - bool GetSourceList(SourceList* sources) override; - bool SelectSource(SourceId id) override; - bool FocusOnSelectedSource() override; - bool IsOccluded(const DesktopVector& pos) override; - - private: - struct CaptureResults { - Result result; - std::unique_ptr frame; - }; - - CaptureResults CaptureFrame(bool capture_owned_windows); - - Callback* callback_ = nullptr; - - // HWND and HDC for the currently selected window or nullptr if window is not - // selected. - HWND window_ = nullptr; - - DesktopSize previous_size_; - - WindowCaptureHelperWin window_capture_helper_; - - // This map is used to avoid flickering for the case when SelectWindow() calls - // are interleaved with Capture() calls. - std::map window_size_map_; - - WindowFinderWin window_finder_; - - std::vector owned_windows_; - std::unique_ptr owned_window_capturer_; - - RTC_DISALLOW_COPY_AND_ASSIGN(WindowCapturerWin); -}; - -WindowCapturerWin::WindowCapturerWin() {} -WindowCapturerWin::~WindowCapturerWin() {} - -bool WindowCapturerWin::GetSourceList(SourceList* sources) { - SourceList result; - LPARAM param = reinterpret_cast(&result); - // EnumWindows only enumerates root windows. - if (!EnumWindows(&WindowsEnumerationHandler, param)) - return false; - - for (auto it = result.begin(); it != result.end();) { - if (!window_capture_helper_.IsWindowVisibleOnCurrentDesktop( - reinterpret_cast(it->id))) { - it = result.erase(it); - } else { - ++it; - } - } - sources->swap(result); - - std::map new_map; - for (const auto& item : *sources) { - HWND hwnd = reinterpret_cast(item.id); - new_map[hwnd] = window_size_map_[hwnd]; - } - window_size_map_.swap(new_map); - - return true; -} - -bool WindowCapturerWin::SelectSource(SourceId id) { - HWND window = reinterpret_cast(id); - if (!IsWindow(window) || !IsWindowVisible(window) || IsIconic(window)) - return false; - window_ = window; - // When a window is not in the map, window_size_map_[window] will create an - // item with DesktopSize (0, 0). - previous_size_ = window_size_map_[window]; - return true; -} - -bool WindowCapturerWin::FocusOnSelectedSource() { - if (!window_) - return false; - - if (!IsWindow(window_) || !IsWindowVisible(window_) || IsIconic(window_)) - return false; - - return BringWindowToTop(window_) != FALSE && - SetForegroundWindow(window_) != FALSE; -} - -bool WindowCapturerWin::IsOccluded(const DesktopVector& pos) { - DesktopVector sys_pos = pos.add(GetFullscreenRect().top_left()); - HWND hwnd = - reinterpret_cast(window_finder_.GetWindowUnderPoint(sys_pos)); - - return hwnd != window_ && - std::find(owned_windows_.begin(), owned_windows_.end(), hwnd) == - owned_windows_.end(); -} - -void WindowCapturerWin::Start(Callback* callback) { - assert(!callback_); - assert(callback); - - callback_ = callback; -} - -void WindowCapturerWin::CaptureFrame() { - CaptureResults results = CaptureFrame(/*capture_owned_windows*/ true); - - callback_->OnCaptureResult(results.result, std::move(results.frame)); -} - -WindowCapturerWin::CaptureResults WindowCapturerWin::CaptureFrame( - bool capture_owned_windows) { - TRACE_EVENT0("webrtc", "WindowCapturerWin::CaptureFrame"); - - if (!window_) { - RTC_LOG(LS_ERROR) << "Window hasn't been selected: " << GetLastError(); - return {Result::ERROR_PERMANENT, nullptr}; - } - - // Stop capturing if the window has been closed. - if (!IsWindow(window_)) { - RTC_LOG(LS_ERROR) << "target window has been closed"; - return {Result::ERROR_PERMANENT, nullptr}; - } - - // Determine the window region excluding any resize border, and including - // any visible border if capturing an owned window / dialog. (Don't include - // any visible border for the selected window for consistency with - // CroppingWindowCapturerWin, which would expose a bit of the background - // through the partially-transparent border.) - const bool avoid_cropping_border = !capture_owned_windows; - DesktopRect cropped_rect; - DesktopRect original_rect; - - if (!GetCroppedWindowRect(window_, avoid_cropping_border, &cropped_rect, - &original_rect)) { - RTC_LOG(LS_WARNING) << "Failed to get drawable window area: " - << GetLastError(); - return {Result::ERROR_TEMPORARY, nullptr}; - } - - // Return a 1x1 black frame if the window is minimized or invisible on current - // desktop, to match behavior on mace. Window can be temporarily invisible - // during the transition of full screen mode on/off. - if (original_rect.is_empty() || - !window_capture_helper_.IsWindowVisibleOnCurrentDesktop(window_)) { - std::unique_ptr frame( - new BasicDesktopFrame(DesktopSize(1, 1))); - - previous_size_ = frame->size(); - window_size_map_[window_] = previous_size_; - return {Result::SUCCESS, std::move(frame)}; - } - - HDC window_dc = GetWindowDC(window_); - if (!window_dc) { - RTC_LOG(LS_WARNING) << "Failed to get window DC: " << GetLastError(); - return {Result::ERROR_TEMPORARY, nullptr}; - } - - DesktopRect unscaled_cropped_rect = cropped_rect; - double horizontal_scale = 1.0; - double vertical_scale = 1.0; - - DesktopSize window_dc_size; - if (GetDcSize(window_dc, &window_dc_size)) { - // The |window_dc_size| is used to detect the scaling of the original - // window. If the application does not support high-DPI settings, it will - // be scaled by Windows according to the scaling setting. - // https://www.google.com/search?q=windows+scaling+settings&ie=UTF-8 - // So the size of the |window_dc|, i.e. the bitmap we can retrieve from - // PrintWindow() or BitBlt() function, will be smaller than - // |original_rect| and |cropped_rect|. Part of the captured desktop frame - // will be black. See - // bug https://bugs.chromium.org/p/webrtc/issues/detail?id=8112 for - // details. - - // If |window_dc_size| is smaller than |window_rect|, let's resize both - // |original_rect| and |cropped_rect| according to the scaling factor. - horizontal_scale = - static_cast(window_dc_size.width()) / original_rect.width(); - vertical_scale = - static_cast(window_dc_size.height()) / original_rect.height(); - original_rect.Scale(horizontal_scale, vertical_scale); - cropped_rect.Scale(horizontal_scale, vertical_scale); - } - - std::unique_ptr frame( - DesktopFrameWin::Create(original_rect.size(), nullptr, window_dc)); - if (!frame.get()) { - RTC_LOG(LS_WARNING) << "Failed to create frame."; - ReleaseDC(window_, window_dc); - return {Result::ERROR_TEMPORARY, nullptr}; - } - - HDC mem_dc = CreateCompatibleDC(window_dc); - HGDIOBJ previous_object = SelectObject(mem_dc, frame->bitmap()); - BOOL result = FALSE; - - // When desktop composition (Aero) is enabled each window is rendered to a - // private buffer allowing BitBlt() to get the window content even if the - // window is occluded. PrintWindow() is slower but lets rendering the window - // contents to an off-screen device context when Aero is not available. - // PrintWindow() is not supported by some applications. - // - // If Aero is enabled, we prefer BitBlt() because it's faster and avoids - // window flickering. Otherwise, we prefer PrintWindow() because BitBlt() may - // render occluding windows on top of the desired window. - // - // When composition is enabled the DC returned by GetWindowDC() doesn't always - // have window frame rendered correctly. Windows renders it only once and then - // caches the result between captures. We hack it around by calling - // PrintWindow() whenever window size changes, including the first time of - // capturing - it somehow affects what we get from BitBlt() on the subsequent - // captures. - // - // For Windows 8.1 and later, we want to always use PrintWindow when the - // cropping screen capturer falls back to the window capturer. I.e. - // on Windows 8.1 and later, PrintWindow is only used when the window is - // occluded. When the window is not occluded, it is much faster to capture - // the screen and to crop it to the window position and size. - if (rtc::IsWindows8OrLater()) { - // Special flag that makes PrintWindow to work on Windows 8.1 and later. - // Indeed certain apps (e.g. those using DirectComposition rendering) can't - // be captured using BitBlt or PrintWindow without this flag. Note that on - // Windows 8.0 this flag is not supported so the block below will fallback - // to the other call to PrintWindow. It seems to be very tricky to detect - // Windows 8.0 vs 8.1 so a try/fallback is more approriate here. - const UINT flags = PW_RENDERFULLCONTENT; - result = PrintWindow(window_, mem_dc, flags); - } - - if (!result && (!window_capture_helper_.IsAeroEnabled() || - !previous_size_.equals(frame->size()))) { - result = PrintWindow(window_, mem_dc, 0); - } - - // Aero is enabled or PrintWindow() failed, use BitBlt. - if (!result) { - result = BitBlt(mem_dc, 0, 0, frame->size().width(), frame->size().height(), - window_dc, 0, 0, SRCCOPY); - } - - SelectObject(mem_dc, previous_object); - DeleteDC(mem_dc); - ReleaseDC(window_, window_dc); - - previous_size_ = frame->size(); - window_size_map_[window_] = previous_size_; - - frame->mutable_updated_region()->SetRect( - DesktopRect::MakeSize(frame->size())); - frame->set_top_left( - original_rect.top_left().subtract(GetFullscreenRect().top_left())); - - if (!result) { - RTC_LOG(LS_ERROR) << "Both PrintWindow() and BitBlt() failed."; - return {Result::ERROR_TEMPORARY, nullptr}; - } - - // Rect for the data is relative to the first pixel of the frame. - cropped_rect.Translate(-original_rect.left(), -original_rect.top()); - std::unique_ptr cropped_frame = - CreateCroppedDesktopFrame(std::move(frame), cropped_rect); - RTC_DCHECK(cropped_frame); - - if (capture_owned_windows) { - // If any owned/pop-up windows overlap the selected window, capture them - // and copy/composite their contents into the frame. - owned_windows_.clear(); - OwnedWindowCollectorContext context(window_, unscaled_cropped_rect, - &window_capture_helper_, - &owned_windows_); - - if (context.IsSelectedWindowValid()) { - EnumWindows(OwnedWindowCollector, reinterpret_cast(&context)); - - if (!owned_windows_.empty()) { - if (!owned_window_capturer_) { - owned_window_capturer_ = std::make_unique(); - } - - // Owned windows are stored in top-down z-order, so this iterates in - // reverse to capture / draw them in bottom-up z-order - for (auto it = owned_windows_.rbegin(); it != owned_windows_.rend(); - it++) { - HWND hwnd = *it; - if (owned_window_capturer_->SelectSource( - reinterpret_cast(hwnd))) { - CaptureResults results = owned_window_capturer_->CaptureFrame( - /*capture_owned_windows*/ false); - - if (results.result != DesktopCapturer::Result::SUCCESS) { - // Simply log any error capturing an owned/pop-up window without - // bubbling it up to the caller (an expected error here is that - // the owned/pop-up window was closed; any unexpected errors won't - // fail the outer capture). - RTC_LOG(LS_INFO) << "Capturing owned window failed (previous " - "error/warning pertained to that)"; - } else { - // Copy / composite the captured frame into the outer frame. This - // may no-op if they no longer intersect (if the owned window was - // moved outside the owner bounds since scheduled for capture.) - cropped_frame->CopyIntersectingPixelsFrom( - *results.frame, horizontal_scale, vertical_scale); - } - } - } - } - } - } - - return {Result::SUCCESS, std::move(cropped_frame)}; -} - -} // namespace - // static std::unique_ptr DesktopCapturer::CreateRawWindowCapturer( const DesktopCaptureOptions& options) { - return std::unique_ptr(new WindowCapturerWin()); + return WindowCapturerWinGdi::CreateRawWindowCapturer(options); } } // namespace webrtc diff --git a/modules/include/module_common_types.cc b/modules/include/module_common_types.cc deleted file mode 100644 index 86f753356d..0000000000 --- a/modules/include/module_common_types.cc +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "modules/include/module_common_types.h" - -#include - -#include -#include - -#include "rtc_base/numerics/safe_conversions.h" - -namespace webrtc { - -RTPFragmentationHeader::RTPFragmentationHeader() - : fragmentationVectorSize(0), - fragmentationOffset(nullptr), - fragmentationLength(nullptr) {} - -RTPFragmentationHeader::RTPFragmentationHeader(RTPFragmentationHeader&& other) - : RTPFragmentationHeader() { - swap(*this, other); -} - -RTPFragmentationHeader& RTPFragmentationHeader::operator=( - RTPFragmentationHeader&& other) { - swap(*this, other); - return *this; -} - -RTPFragmentationHeader::~RTPFragmentationHeader() { - delete[] fragmentationOffset; - delete[] fragmentationLength; -} - -void swap(RTPFragmentationHeader& a, RTPFragmentationHeader& b) { - using std::swap; - swap(a.fragmentationVectorSize, b.fragmentationVectorSize); - swap(a.fragmentationOffset, b.fragmentationOffset); - swap(a.fragmentationLength, b.fragmentationLength); -} - -void RTPFragmentationHeader::CopyFrom(const RTPFragmentationHeader& src) { - if (this == &src) { - return; - } - - if (src.fragmentationVectorSize != fragmentationVectorSize) { - // new size of vectors - - // delete old - delete[] fragmentationOffset; - fragmentationOffset = nullptr; - delete[] fragmentationLength; - fragmentationLength = nullptr; - - if (src.fragmentationVectorSize > 0) { - // allocate new - if (src.fragmentationOffset) { - fragmentationOffset = new size_t[src.fragmentationVectorSize]; - } - if (src.fragmentationLength) { - fragmentationLength = new size_t[src.fragmentationVectorSize]; - } - } - // set new size - fragmentationVectorSize = src.fragmentationVectorSize; - } - - if (src.fragmentationVectorSize > 0) { - // copy values - if (src.fragmentationOffset) { - memcpy(fragmentationOffset, src.fragmentationOffset, - src.fragmentationVectorSize * sizeof(size_t)); - } - if (src.fragmentationLength) { - memcpy(fragmentationLength, src.fragmentationLength, - src.fragmentationVectorSize * sizeof(size_t)); - } - } -} - -void RTPFragmentationHeader::Resize(size_t size) { - const uint16_t size16 = rtc::dchecked_cast(size); - if (fragmentationVectorSize < size16) { - uint16_t oldVectorSize = fragmentationVectorSize; - { - // offset - size_t* oldOffsets = fragmentationOffset; - fragmentationOffset = new size_t[size16]; - memset(fragmentationOffset + oldVectorSize, 0, - sizeof(size_t) * (size16 - oldVectorSize)); - // copy old values - memcpy(fragmentationOffset, oldOffsets, sizeof(size_t) * oldVectorSize); - delete[] oldOffsets; - } - // length - { - size_t* oldLengths = fragmentationLength; - fragmentationLength = new size_t[size16]; - memset(fragmentationLength + oldVectorSize, 0, - sizeof(size_t) * (size16 - oldVectorSize)); - memcpy(fragmentationLength, oldLengths, sizeof(size_t) * oldVectorSize); - delete[] oldLengths; - } - fragmentationVectorSize = size16; - } -} - -} // namespace webrtc diff --git a/modules/include/module_common_types.h b/modules/include/module_common_types.h index 3afd7b7d7a..7c9ef39cf0 100644 --- a/modules/include/module_common_types.h +++ b/modules/include/module_common_types.h @@ -11,44 +11,12 @@ #ifndef MODULES_INCLUDE_MODULE_COMMON_TYPES_H_ #define MODULES_INCLUDE_MODULE_COMMON_TYPES_H_ -#include #include #include -#include "rtc_base/system/rtc_export.h" - namespace webrtc { -class RTC_EXPORT RTPFragmentationHeader { - public: - RTPFragmentationHeader(); - RTPFragmentationHeader(const RTPFragmentationHeader&) = delete; - RTPFragmentationHeader(RTPFragmentationHeader&& other); - RTPFragmentationHeader& operator=(const RTPFragmentationHeader& other) = - delete; - RTPFragmentationHeader& operator=(RTPFragmentationHeader&& other); - ~RTPFragmentationHeader(); - - friend void swap(RTPFragmentationHeader& a, RTPFragmentationHeader& b); - - void CopyFrom(const RTPFragmentationHeader& src); - void VerifyAndAllocateFragmentationHeader(size_t size) { Resize(size); } - - void Resize(size_t size); - size_t Size() const { return fragmentationVectorSize; } - - size_t Offset(size_t index) const { return fragmentationOffset[index]; } - size_t Length(size_t index) const { return fragmentationLength[index]; } - - // TODO(danilchap): Move all members to private section, - // simplify by replacing raw arrays with single std::vector - uint16_t fragmentationVectorSize; // Number of fragmentations - size_t* fragmentationOffset; // Offset of pointer to data for each - // fragmentation - size_t* fragmentationLength; // Data size for each fragmentation -}; - // Interface used by the CallStats class to distribute call statistics. // Callbacks will be triggered as soon as the class has been registered to a // CallStats object using RegisterStatsObserver. diff --git a/modules/pacing/BUILD.gn b/modules/pacing/BUILD.gn index 6f65c33942..0787105f14 100644 --- a/modules/pacing/BUILD.gn +++ b/modules/pacing/BUILD.gn @@ -34,6 +34,7 @@ rtc_library("pacing") { ":interval_budget", "..:module_api", "../../api:function_view", + "../../api:sequence_checker", "../../api/rtc_event_log", "../../api/task_queue:task_queue", "../../api/transport:field_trial_based_config", @@ -49,7 +50,7 @@ rtc_library("pacing") { "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_task_queue", "../../rtc_base/experiments:field_trial_parser", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers", "../../system_wrappers:metrics", @@ -57,6 +58,8 @@ rtc_library("pacing") { "../rtp_rtcp", "../rtp_rtcp:rtp_rtcp_format", "../utility", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", @@ -90,6 +93,7 @@ if (rtc_include_tests) { deps = [ ":interval_budget", ":pacing", + "../../api/transport:network_control", "../../api/units:data_rate", "../../api/units:time_delta", "../../modules/utility:mock_process_thread", @@ -99,6 +103,7 @@ if (rtc_include_tests) { "../../rtc_base/experiments:alr_experiment", "../../system_wrappers", "../../system_wrappers:field_trial", + "../../test:explicit_key_value_config", "../../test:field_trial", "../../test:test_support", "../../test/time_controller:time_controller", diff --git a/modules/pacing/bitrate_prober.cc b/modules/pacing/bitrate_prober.cc index e7ce01d95c..381a54d50a 100644 --- a/modules/pacing/bitrate_prober.cc +++ b/modules/pacing/bitrate_prober.cc @@ -26,7 +26,7 @@ namespace { // The min probe packet size is scaled with the bitrate we're probing at. // This defines the max min probe packet size, meaning that on high bitrates // we have a min probe packet size of 200 bytes. -constexpr size_t kMinProbePacketSize = 200; +constexpr DataSize kMinProbePacketSize = DataSize::Bytes(200); constexpr TimeDelta kProbeClusterTimeout = TimeDelta::Seconds(5); @@ -37,13 +37,16 @@ BitrateProberConfig::BitrateProberConfig( : min_probe_packets_sent("min_probe_packets_sent", 5), min_probe_delta("min_probe_delta", TimeDelta::Millis(1)), min_probe_duration("min_probe_duration", TimeDelta::Millis(15)), - max_probe_delay("max_probe_delay", TimeDelta::Millis(3)) { - ParseFieldTrial({&min_probe_packets_sent, &min_probe_delta, - &min_probe_duration, &max_probe_delay}, - key_value_config->Lookup("WebRTC-Bwe-ProbingConfiguration")); - ParseFieldTrial({&min_probe_packets_sent, &min_probe_delta, - &min_probe_duration, &max_probe_delay}, - key_value_config->Lookup("WebRTC-Bwe-ProbingBehavior")); + max_probe_delay("max_probe_delay", TimeDelta::Millis(10)), + abort_delayed_probes("abort_delayed_probes", true) { + ParseFieldTrial( + {&min_probe_packets_sent, &min_probe_delta, &min_probe_duration, + &max_probe_delay, &abort_delayed_probes}, + key_value_config->Lookup("WebRTC-Bwe-ProbingConfiguration")); + ParseFieldTrial( + {&min_probe_packets_sent, &min_probe_delta, &min_probe_duration, + &max_probe_delay, &abort_delayed_probes}, + key_value_config->Lookup("WebRTC-Bwe-ProbingBehavior")); } BitrateProber::~BitrateProber() { @@ -74,12 +77,11 @@ void BitrateProber::SetEnabled(bool enable) { } } -void BitrateProber::OnIncomingPacket(size_t packet_size) { +void BitrateProber::OnIncomingPacket(DataSize packet_size) { // Don't initialize probing unless we have something large enough to start // probing. if (probing_state_ == ProbingState::kInactive && !clusters_.empty() && - packet_size >= - std::min(RecommendedMinProbeSize(), kMinProbePacketSize)) { + packet_size >= std::min(RecommendedMinProbeSize(), kMinProbePacketSize)) { // Send next probe right away. next_probe_time_ = Timestamp::MinusInfinity(); probing_state_ = ProbingState::kActive; @@ -125,7 +127,8 @@ Timestamp BitrateProber::NextProbeTime(Timestamp now) const { return Timestamp::PlusInfinity(); } - if (next_probe_time_.IsFinite() && + // Legacy behavior, just warn about late probe and return as if not probing. + if (!config_.abort_delayed_probes && next_probe_time_.IsFinite() && now - next_probe_time_ > config_.max_probe_delay.Get()) { RTC_DLOG(LS_WARNING) << "Probe delay too high" " (next_ms:" @@ -137,9 +140,24 @@ Timestamp BitrateProber::NextProbeTime(Timestamp now) const { return next_probe_time_; } -PacedPacketInfo BitrateProber::CurrentCluster() const { - RTC_DCHECK(!clusters_.empty()); - RTC_DCHECK(probing_state_ == ProbingState::kActive); +absl::optional BitrateProber::CurrentCluster(Timestamp now) { + if (clusters_.empty() || probing_state_ != ProbingState::kActive) { + return absl::nullopt; + } + + if (config_.abort_delayed_probes && next_probe_time_.IsFinite() && + now - next_probe_time_ > config_.max_probe_delay.Get()) { + RTC_DLOG(LS_WARNING) << "Probe delay too high" + " (next_ms:" + << next_probe_time_.ms() << ", now_ms: " << now.ms() + << "), discarding probe cluster."; + clusters_.pop(); + if (clusters_.empty()) { + probing_state_ = ProbingState::kSuspended; + return absl::nullopt; + } + } + PacedPacketInfo info = clusters_.front().pace_info; info.probe_cluster_bytes_sent = clusters_.front().sent_bytes; return info; @@ -148,15 +166,18 @@ PacedPacketInfo BitrateProber::CurrentCluster() const { // Probe size is recommended based on the probe bitrate required. We choose // a minimum of twice |kMinProbeDeltaMs| interval to allow scheduling to be // feasible. -size_t BitrateProber::RecommendedMinProbeSize() const { - RTC_DCHECK(!clusters_.empty()); - return clusters_.front().pace_info.send_bitrate_bps * 2 * - config_.min_probe_delta->ms() / (8 * 1000); +DataSize BitrateProber::RecommendedMinProbeSize() const { + if (clusters_.empty()) { + return DataSize::Zero(); + } + DataRate send_rate = + DataRate::BitsPerSec(clusters_.front().pace_info.send_bitrate_bps); + return 2 * send_rate * config_.min_probe_delta; } -void BitrateProber::ProbeSent(Timestamp now, size_t bytes) { +void BitrateProber::ProbeSent(Timestamp now, DataSize size) { RTC_DCHECK(probing_state_ == ProbingState::kActive); - RTC_DCHECK_GT(bytes, 0); + RTC_DCHECK(!size.IsZero()); if (!clusters_.empty()) { ProbeCluster* cluster = &clusters_.front(); @@ -164,7 +185,7 @@ void BitrateProber::ProbeSent(Timestamp now, size_t bytes) { RTC_DCHECK(cluster->started_at.IsInfinite()); cluster->started_at = now; } - cluster->sent_bytes += static_cast(bytes); + cluster->sent_bytes += size.bytes(); cluster->sent_probes += 1; next_probe_time_ = CalculateNextProbeTime(*cluster); if (cluster->sent_bytes >= cluster->pace_info.probe_cluster_min_bytes && @@ -178,8 +199,9 @@ void BitrateProber::ProbeSent(Timestamp now, size_t bytes) { clusters_.pop(); } - if (clusters_.empty()) + if (clusters_.empty()) { probing_state_ = ProbingState::kSuspended; + } } } diff --git a/modules/pacing/bitrate_prober.h b/modules/pacing/bitrate_prober.h index 3ebe26ac1f..5a89aac435 100644 --- a/modules/pacing/bitrate_prober.h +++ b/modules/pacing/bitrate_prober.h @@ -35,9 +35,11 @@ struct BitrateProberConfig { FieldTrialParameter min_probe_delta; // The minimum probing duration. FieldTrialParameter min_probe_duration; - // Maximum amount of time each probe can be delayed. Probe cluster is reset - // and retried from the start when this limit is reached. + // Maximum amount of time each probe can be delayed. FieldTrialParameter max_probe_delay; + // If NextProbeTime() is called with a delay higher than specified by + // |max_probe_delay|, abort it. + FieldTrialParameter abort_delayed_probes; }; // Note that this class isn't thread-safe by itself and therefore relies @@ -57,29 +59,29 @@ class BitrateProber { // Initializes a new probing session if the prober is allowed to probe. Does // not initialize the prober unless the packet size is large enough to probe // with. - void OnIncomingPacket(size_t packet_size); + void OnIncomingPacket(DataSize packet_size); // Create a cluster used to probe for |bitrate_bps| with |num_probes| number // of probes. void CreateProbeCluster(DataRate bitrate, Timestamp now, int cluster_id); - // Returns the at which the next probe should be sent to get accurate probing. - // If probing is not desired at this time, Timestamp::PlusInfinity() will be - // returned. + // Returns the time at which the next probe should be sent to get accurate + // probing. If probing is not desired at this time, Timestamp::PlusInfinity() + // will be returned. + // TODO(bugs.webrtc.org/11780): Remove |now| argument when old mode is gone. Timestamp NextProbeTime(Timestamp now) const; // Information about the current probing cluster. - PacedPacketInfo CurrentCluster() const; + absl::optional CurrentCluster(Timestamp now); // Returns the minimum number of bytes that the prober recommends for - // the next probe. - size_t RecommendedMinProbeSize() const; + // the next probe, or zero if not probing. + DataSize RecommendedMinProbeSize() const; // Called to report to the prober that a probe has been sent. In case of // multiple packets per probe, this call would be made at the end of sending - // the last packet in probe. |probe_size| is the total size of all packets - // in probe. - void ProbeSent(Timestamp now, size_t probe_size); + // the last packet in probe. |size| is the total size of all packets in probe. + void ProbeSent(Timestamp now, DataSize size); private: enum class ProbingState { diff --git a/modules/pacing/bitrate_prober_unittest.cc b/modules/pacing/bitrate_prober_unittest.cc index 62277a0d2f..5627db0519 100644 --- a/modules/pacing/bitrate_prober_unittest.cc +++ b/modules/pacing/bitrate_prober_unittest.cc @@ -12,6 +12,7 @@ #include +#include "test/explicit_key_value_config.h" #include "test/gtest.h" namespace webrtc { @@ -28,7 +29,7 @@ TEST(BitrateProberTest, VerifyStatesAndTimeBetweenProbes) { const DataRate kTestBitrate1 = DataRate::KilobitsPerSec(900); const DataRate kTestBitrate2 = DataRate::KilobitsPerSec(1800); const int kClusterSize = 5; - const int kProbeSize = 1000; + const DataSize kProbeSize = DataSize::Bytes(1000); const TimeDelta kMinProbeDuration = TimeDelta::Millis(15); prober.CreateProbeCluster(kTestBitrate1, now, 0); @@ -37,7 +38,7 @@ TEST(BitrateProberTest, VerifyStatesAndTimeBetweenProbes) { prober.OnIncomingPacket(kProbeSize); EXPECT_TRUE(prober.is_probing()); - EXPECT_EQ(0, prober.CurrentCluster().probe_cluster_id); + EXPECT_EQ(0, prober.CurrentCluster(now)->probe_cluster_id); // First packet should probe as soon as possible. EXPECT_EQ(Timestamp::MinusInfinity(), prober.NextProbeTime(now)); @@ -45,14 +46,13 @@ TEST(BitrateProberTest, VerifyStatesAndTimeBetweenProbes) { for (int i = 0; i < kClusterSize; ++i) { now = std::max(now, prober.NextProbeTime(now)); EXPECT_EQ(now, std::max(now, prober.NextProbeTime(now))); - EXPECT_EQ(0, prober.CurrentCluster().probe_cluster_id); + EXPECT_EQ(0, prober.CurrentCluster(now)->probe_cluster_id); prober.ProbeSent(now, kProbeSize); } EXPECT_GE(now - start_time, kMinProbeDuration); // Verify that the actual bitrate is withing 10% of the target. - DataRate bitrate = - DataSize::Bytes(kProbeSize * (kClusterSize - 1)) / (now - start_time); + DataRate bitrate = kProbeSize * (kClusterSize - 1) / (now - start_time); EXPECT_GT(bitrate, kTestBitrate1 * 0.9); EXPECT_LT(bitrate, kTestBitrate1 * 1.1); @@ -62,14 +62,14 @@ TEST(BitrateProberTest, VerifyStatesAndTimeBetweenProbes) { for (int i = 0; i < kClusterSize; ++i) { now = std::max(now, prober.NextProbeTime(now)); EXPECT_EQ(now, std::max(now, prober.NextProbeTime(now))); - EXPECT_EQ(1, prober.CurrentCluster().probe_cluster_id); + EXPECT_EQ(1, prober.CurrentCluster(now)->probe_cluster_id); prober.ProbeSent(now, kProbeSize); } // Verify that the actual bitrate is withing 10% of the target. TimeDelta duration = now - probe2_started; EXPECT_GE(duration, kMinProbeDuration); - bitrate = DataSize::Bytes(kProbeSize * (kClusterSize - 1)) / duration; + bitrate = (kProbeSize * (kClusterSize - 1)) / duration; EXPECT_GT(bitrate, kTestBitrate2 * 0.9); EXPECT_LT(bitrate, kTestBitrate2 * 1.1); @@ -80,6 +80,7 @@ TEST(BitrateProberTest, VerifyStatesAndTimeBetweenProbes) { TEST(BitrateProberTest, DoesntProbeWithoutRecentPackets) { const FieldTrialBasedConfig config; BitrateProber prober(config); + const DataSize kProbeSize = DataSize::Bytes(1000); Timestamp now = Timestamp::Zero(); EXPECT_EQ(prober.NextProbeTime(now), Timestamp::PlusInfinity()); @@ -87,19 +88,74 @@ TEST(BitrateProberTest, DoesntProbeWithoutRecentPackets) { prober.CreateProbeCluster(DataRate::KilobitsPerSec(900), now, 0); EXPECT_FALSE(prober.is_probing()); - prober.OnIncomingPacket(1000); + prober.OnIncomingPacket(kProbeSize); EXPECT_TRUE(prober.is_probing()); EXPECT_EQ(now, std::max(now, prober.NextProbeTime(now))); - prober.ProbeSent(now, 1000); - // Let time pass, no large enough packets put into prober. - now += TimeDelta::Seconds(6); + prober.ProbeSent(now, kProbeSize); +} + +TEST(BitrateProberTest, DoesntDiscardDelayedProbesInLegacyMode) { + const TimeDelta kMaxProbeDelay = TimeDelta::Millis(3); + const test::ExplicitKeyValueConfig trials( + "WebRTC-Bwe-ProbingBehavior/" + "abort_delayed_probes:0," + "max_probe_delay:3ms/"); + BitrateProber prober(trials); + const DataSize kProbeSize = DataSize::Bytes(1000); + + Timestamp now = Timestamp::Zero(); + prober.CreateProbeCluster(DataRate::KilobitsPerSec(900), now, 0); + prober.OnIncomingPacket(kProbeSize); + EXPECT_TRUE(prober.is_probing()); + EXPECT_EQ(prober.CurrentCluster(now)->probe_cluster_id, 0); + // Advance to first probe time and indicate sent probe. + now = std::max(now, prober.NextProbeTime(now)); + prober.ProbeSent(now, kProbeSize); + + // Advance time 1ms past timeout for the next probe. + Timestamp next_probe_time = prober.NextProbeTime(now); + EXPECT_GT(next_probe_time, now); + now += next_probe_time - now + kMaxProbeDelay + TimeDelta::Millis(1); + EXPECT_EQ(prober.NextProbeTime(now), Timestamp::PlusInfinity()); // Check that legacy behaviour where prober is reset in TimeUntilNextProbe is // no longer there. Probes are no longer retried if they are timed out. - prober.OnIncomingPacket(1000); + prober.OnIncomingPacket(kProbeSize); EXPECT_EQ(prober.NextProbeTime(now), Timestamp::PlusInfinity()); } +TEST(BitrateProberTest, DiscardsDelayedProbesWhenNotInLegacyMode) { + const TimeDelta kMaxProbeDelay = TimeDelta::Millis(3); + const test::ExplicitKeyValueConfig trials( + "WebRTC-Bwe-ProbingBehavior/" + "abort_delayed_probes:1," + "max_probe_delay:3ms/"); + BitrateProber prober(trials); + const DataSize kProbeSize = DataSize::Bytes(1000); + + Timestamp now = Timestamp::Zero(); + + // Add two probe clusters. + prober.CreateProbeCluster(DataRate::KilobitsPerSec(900), now, /*id=*/0); + + prober.OnIncomingPacket(kProbeSize); + EXPECT_TRUE(prober.is_probing()); + EXPECT_EQ(prober.CurrentCluster(now)->probe_cluster_id, 0); + // Advance to first probe time and indicate sent probe. + now = std::max(now, prober.NextProbeTime(now)); + prober.ProbeSent(now, kProbeSize); + + // Advance time 1ms past timeout for the next probe. + Timestamp next_probe_time = prober.NextProbeTime(now); + EXPECT_GT(next_probe_time, now); + now += next_probe_time - now + kMaxProbeDelay + TimeDelta::Millis(1); + + // Still indicates the time we wanted to probe at. + EXPECT_EQ(prober.NextProbeTime(now), next_probe_time); + // First and only cluster removed due to timeout. + EXPECT_FALSE(prober.CurrentCluster(now).has_value()); +} + TEST(BitrateProberTest, DoesntInitializeProbingForSmallPackets) { const FieldTrialBasedConfig config; BitrateProber prober(config); @@ -107,7 +163,7 @@ TEST(BitrateProberTest, DoesntInitializeProbingForSmallPackets) { prober.SetEnabled(true); EXPECT_FALSE(prober.is_probing()); - prober.OnIncomingPacket(100); + prober.OnIncomingPacket(DataSize::Bytes(100)); EXPECT_FALSE(prober.is_probing()); } @@ -121,7 +177,7 @@ TEST(BitrateProberTest, VerifyProbeSizeOnHighBitrate) { /*cluster_id=*/0); // Probe size should ensure a minimum of 1 ms interval. EXPECT_GT(prober.RecommendedMinProbeSize(), - (kHighBitrate * TimeDelta::Millis(1)).bytes()); + kHighBitrate * TimeDelta::Millis(1)); } TEST(BitrateProberTest, MinumumNumberOfProbingPackets) { @@ -130,14 +186,14 @@ TEST(BitrateProberTest, MinumumNumberOfProbingPackets) { // Even when probing at a low bitrate we expect a minimum number // of packets to be sent. const DataRate kBitrate = DataRate::KilobitsPerSec(100); - const int kPacketSizeBytes = 1000; + const DataSize kPacketSize = DataSize::Bytes(1000); Timestamp now = Timestamp::Millis(0); prober.CreateProbeCluster(kBitrate, now, 0); - prober.OnIncomingPacket(kPacketSizeBytes); + prober.OnIncomingPacket(kPacketSize); for (int i = 0; i < 5; ++i) { EXPECT_TRUE(prober.is_probing()); - prober.ProbeSent(now, kPacketSizeBytes); + prober.ProbeSent(now, kPacketSize); } EXPECT_FALSE(prober.is_probing()); @@ -147,17 +203,17 @@ TEST(BitrateProberTest, ScaleBytesUsedForProbing) { const FieldTrialBasedConfig config; BitrateProber prober(config); const DataRate kBitrate = DataRate::KilobitsPerSec(10000); // 10 Mbps. - const int kPacketSizeBytes = 1000; - const int kExpectedBytesSent = (kBitrate * TimeDelta::Millis(15)).bytes(); + const DataSize kPacketSize = DataSize::Bytes(1000); + const DataSize kExpectedDataSent = kBitrate * TimeDelta::Millis(15); Timestamp now = Timestamp::Millis(0); prober.CreateProbeCluster(kBitrate, now, /*cluster_id=*/0); - prober.OnIncomingPacket(kPacketSizeBytes); - int bytes_sent = 0; - while (bytes_sent < kExpectedBytesSent) { + prober.OnIncomingPacket(kPacketSize); + DataSize data_sent = DataSize::Zero(); + while (data_sent < kExpectedDataSent) { ASSERT_TRUE(prober.is_probing()); - prober.ProbeSent(now, kPacketSizeBytes); - bytes_sent += kPacketSizeBytes; + prober.ProbeSent(now, kPacketSize); + data_sent += kPacketSize; } EXPECT_FALSE(prober.is_probing()); @@ -167,17 +223,17 @@ TEST(BitrateProberTest, HighBitrateProbing) { const FieldTrialBasedConfig config; BitrateProber prober(config); const DataRate kBitrate = DataRate::KilobitsPerSec(1000000); // 1 Gbps. - const int kPacketSizeBytes = 1000; - const int kExpectedBytesSent = (kBitrate * TimeDelta::Millis(15)).bytes(); + const DataSize kPacketSize = DataSize::Bytes(1000); + const DataSize kExpectedDataSent = kBitrate * TimeDelta::Millis(15); Timestamp now = Timestamp::Millis(0); prober.CreateProbeCluster(kBitrate, now, 0); - prober.OnIncomingPacket(kPacketSizeBytes); - int bytes_sent = 0; - while (bytes_sent < kExpectedBytesSent) { + prober.OnIncomingPacket(kPacketSize); + DataSize data_sent = DataSize::Zero(); + while (data_sent < kExpectedDataSent) { ASSERT_TRUE(prober.is_probing()); - prober.ProbeSent(now, kPacketSizeBytes); - bytes_sent += kPacketSizeBytes; + prober.ProbeSent(now, kPacketSize); + data_sent += kPacketSize; } EXPECT_FALSE(prober.is_probing()); @@ -187,9 +243,9 @@ TEST(BitrateProberTest, ProbeClusterTimeout) { const FieldTrialBasedConfig config; BitrateProber prober(config); const DataRate kBitrate = DataRate::KilobitsPerSec(300); - const int kSmallPacketSize = 20; + const DataSize kSmallPacketSize = DataSize::Bytes(20); // Expecting two probe clusters of 5 packets each. - const int kExpectedBytesSent = 20 * 2 * 5; + const DataSize kExpectedDataSent = kSmallPacketSize * 2 * 5; const TimeDelta kTimeout = TimeDelta::Millis(5000); Timestamp now = Timestamp::Millis(0); @@ -204,11 +260,11 @@ TEST(BitrateProberTest, ProbeClusterTimeout) { prober.CreateProbeCluster(kBitrate / 10, now, /*cluster_id=*/2); prober.OnIncomingPacket(kSmallPacketSize); EXPECT_TRUE(prober.is_probing()); - int bytes_sent = 0; - while (bytes_sent < kExpectedBytesSent) { + DataSize data_sent = DataSize::Zero(); + while (data_sent < kExpectedDataSent) { ASSERT_TRUE(prober.is_probing()); prober.ProbeSent(now, kSmallPacketSize); - bytes_sent += kSmallPacketSize; + data_sent += kSmallPacketSize; } EXPECT_FALSE(prober.is_probing()); diff --git a/modules/pacing/g3doc/index.md b/modules/pacing/g3doc/index.md new file mode 100644 index 0000000000..4187a8bd9b --- /dev/null +++ b/modules/pacing/g3doc/index.md @@ -0,0 +1,169 @@ + + + +# Paced Sending + +The paced sender, often referred to as just the "pacer", is a part of the WebRTC +RTP stack used primarily to smooth the flow of packets sent onto the network. + +## Background + +Consider a video stream at 5Mbps and 30fps. This would in an ideal world result +in each frame being ~21kB large and packetized into 18 RTP packets. While the +average bitrate over say a one second sliding window would be a correct 5Mbps, +on a shorter time scale it can be seen as a burst of 167Mbps every 33ms, each +followed by a 32ms silent period. Further, it is quite common that video +encoders overshoot the target frame size in case of sudden movement especially +dealing with screensharing. Frames being 10x or even 100x larger than the ideal +size is an all too real scenario. These packet bursts can cause several issues, +such as congesting networks and causing buffer bloat or even packet loss. Most +sessions have more than one media stream, e.g. a video and an audio track. If +you put a frame on the wire in one go, and those packets take 100ms to reach the +other side - that means you have now blocked any audio packets from reaching the +remote end in time as well. + +The paced sender solves this by having a buffer in which media is queued, and +then using a _leaky bucket_ algorithm to pace them onto the network. The buffer +contains separate fifo streams for all media tracks so that e.g. audio can be +prioritized over video - and equal prio streams can be sent in a round-robin +fashion to avoid any one stream blocking others. + +Since the pacer is in control of the bitrate sent on the wire, it is also used +to generate padding in cases where a minimum send rate is required - and to +generate packet trains if bitrate probing is used. + +## Life of a Packet + +The typical path for media packets when using the paced sender looks something +like this: + +1. `RTPSenderVideo` or `RTPSenderAudio` packetizes media into RTP packets. +2. The packets are sent to the [RTPSender] class for transmission. +3. The pacer is called via [RtpPacketSender] interface to enqueue the packet + batch. +4. The packets are put into a queue within the pacer awaiting opportune moments + to send them. +5. At a calculated time, the pacer calls the `PacingController::PacketSender()` + callback method, normally implemented by the [PacketRouter] class. +6. The router forwards the packet to the correct RTP module based on the + packet's SSRC, and in which the `RTPSenderEgress` class makes final time + stamping, potentially records it for retransmissions etc. +7. The packet is sent to the low-level `Transport` interface, after which it is + now out of scope. + +Asynchronously to this, the estimated available send bandwidth is determined - +and the target send rate is set on the `RtpPacketPacker` via the `void +SetPacingRates(DataRate pacing_rate, DataRate padding_rate)` method. + +## Packet Prioritization + +The pacer prioritized packets based on two criteria: + +* Packet type, with most to least prioritized: + 1. Audio + 2. Retransmissions + 3. Video and FEC + 4. Padding +* Enqueue order + +The enqueue order is enforced on a per stream (SSRC) basis. Given equal +priority, the [RoundRobinPacketQueue] alternates between media streams to ensure +no stream needlessly blocks others. + +## Implementations + +There are currently two implementations of the paced sender (although they share +a large amount of logic via the `PacingController` class). The legacy +[PacedSender] uses a dedicated thread to poll the pacing controller at 5ms +intervals, and has a lock to protect internal state. The newer +[TaskQueuePacedSender] as the name implies uses a TaskQueue to both protect +state and schedule packet processing, the latter is dynamic based on actual send +rates and constraints. Avoid using the legacy PacedSender in new applications as +we are planning to remove it. + +## The Packet Router + +An adjacent component called [PacketRouter] is used to route packets coming out +of the pacer and into the correct RTP module. It has the following functions: + +* The `SendPacket` method looks up an RTP module with an SSRC corresponding to + the packet for further routing to the network. +* If send-side bandwidth estimation is used, it populates the transport-wide + sequence number extension. +* Generate padding. Modules supporting payload-based padding are prioritized, + with the last module to have sent media always being the first choice. +* Returns any generated FEC after having sent media. +* Forwards REMB and/or TransportFeedback messages to suitable RTP modules. + +At present the FEC is generated on a per SSRC basis, so is always returned from +an RTP module after sending media. Hopefully one day we will support covering +multiple streams with a single FlexFEC stream - and the packet router is the +likely place for that FEC generator to live. It may even be used for FEC padding +as an alternative to RTX. + +## The API + +The section outlines the classes and methods relevant to a few different use +cases of the pacer. + +### Packet sending + +For sending packets, use +`RtpPacketSender::EnqueuePackets(std::vector> +packets)` The pacer takes a `PacingController::PacketSender` as constructor +argument, this callback is used when it's time to actually send packets. + +### Send rates + +To control the send rate, use `void SetPacingRates(DataRate pacing_rate, +DataRate padding_rate)` If the packet queue becomes empty and the send rate +drops below `padding_rate`, the pacer will request padding packets from the +`PacketRouter`. + +In order to completely suspend/resume sending data (e.g. due to network +availability), use the `Pause()` and `Resume()` methods. + +The specified pacing rate may be overriden in some cases, e.g. due to extreme +encoder overshoot. Use `void SetQueueTimeLimit(TimeDelta limit)` to specify the +longest time you want packets to spend waiting in the pacer queue (pausing +excluded). The actual send rate may then be increased past the pacing_rate to +try to make the _average_ queue time less than that requested limit. The +rationale for this is that if the send queue is say longer than three seconds, +it's better to risk packet loss and then try to recover using a key-frame rather +than cause severe delays. + +### Bandwidth estimation + +If the bandwidth estimator supports bandwidth probing, it may request a cluster +of packets to be sent at a specified rate in order to gauge if this causes +increased delay/loss on the network. Use the `void CreateProbeCluster(DataRate +bitrate, int cluster_id)` method - packets sent via this `PacketRouter` will be +marked with the corresponding cluster_id in the attached `PacedPacketInfo` +struct. + +If congestion window pushback is used, the state can be updated using +`SetCongestionWindow()` and `UpdateOutstandingData()`. + +A few more methods control how we pace: * `SetAccountForAudioPackets()` +determines if audio packets count into bandwidth consumed. * +`SetIncludeOverhead()` determines if the entire RTP packet size counts into +bandwidth used (otherwise just media payload). * `SetTransportOverhead()` sets +an additional data size consumed per packet, representing e.g. UDP/IP headers. + +### Stats + +Several methods are used to gather statistics in pacer state: + +* `OldestPacketWaitTime()` time since the oldest packet in the queue was + added. +* `QueueSizeData()` total bytes currently in the queue. +* `FirstSentPacketTime()` absolute time the first packet was sent. +* `ExpectedQueueTime()` total bytes in the queue divided by the send rate. + +[RTPSender]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/rtp_rtcp/source/rtp_sender.h;drc=77ee8542dd35d5143b5788ddf47fb7cdb96eb08e +[RtpPacketSender]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/rtp_rtcp/include/rtp_packet_sender.h;drc=ea55b0872f14faab23a4e5dbcb6956369c8ed5dc +[RtpPacketPacer]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/pacing/rtp_packet_pacer.h;drc=e7bc3a347760023dd4840cf6ebdd1e6c8592f4d7 +[PacketRouter]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/pacing/packet_router.h;drc=3d2210876e31d0bb5c7de88b27fd02ceb1f4e03e +[PacedSender]: https://source.chromium.org/chromium/chromium/src/+/master:media/cast/net/pacing/paced_sender.h;drc=df00acf8f3cea9a947e11dc687aa1147971a1883 +[TaskQueuePacedSender]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/pacing/task_queue_paced_sender.h;drc=5051693ada61bc7b78855c6fb3fa87a0394fa813 +[RoundRobinPacketQueue]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/pacing/round_robin_packet_queue.h;drc=b571ff48f8fe07678da5a854cd6c3f5dde02855f diff --git a/modules/pacing/paced_sender.cc b/modules/pacing/paced_sender.cc index 1d02fe95e4..51d3edc301 100644 --- a/modules/pacing/paced_sender.cc +++ b/modules/pacing/paced_sender.cc @@ -22,13 +22,15 @@ #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/clock.h" namespace webrtc { const int64_t PacedSender::kMaxQueueLengthMs = 2000; const float PacedSender::kDefaultPaceMultiplier = 2.5f; -PacedSender::PacedSender(Clock* clock, PacketRouter* packet_router, +PacedSender::PacedSender(Clock* clock, + PacketRouter* packet_router, RtcEventLog* event_log, const WebRtcKeyValueConfig* field_trials, ProcessThread* process_thread) @@ -39,10 +41,11 @@ PacedSender::PacedSender(Clock* clock, PacketRouter* packet_router, ? PacingController::ProcessMode::kDynamic : PacingController::ProcessMode::kPeriodic), pacing_controller_(clock, - static_cast(this), - event_log, field_trials, process_mode_), + packet_router, + event_log, + field_trials, + process_mode_), clock_(clock), - packet_router_(packet_router), process_thread_(process_thread) { if (process_thread_) process_thread_->RegisterModule(&module_proxy_, RTC_FROM_HERE); @@ -55,13 +58,13 @@ PacedSender::~PacedSender() { } void PacedSender::CreateProbeCluster(DataRate bitrate, int cluster_id) { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); return pacing_controller_.CreateProbeCluster(bitrate, cluster_id); } void PacedSender::Pause() { { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.Pause(); } @@ -74,7 +77,7 @@ void PacedSender::Pause() { void PacedSender::Resume() { { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.Resume(); } @@ -87,7 +90,7 @@ void PacedSender::Resume() { void PacedSender::SetCongestionWindow(DataSize congestion_window_size) { { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.SetCongestionWindow(congestion_window_size); } MaybeWakupProcessThread(); @@ -95,7 +98,7 @@ void PacedSender::SetCongestionWindow(DataSize congestion_window_size) { void PacedSender::UpdateOutstandingData(DataSize outstanding_data) { { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.UpdateOutstandingData(outstanding_data); } MaybeWakupProcessThread(); @@ -103,7 +106,7 @@ void PacedSender::UpdateOutstandingData(DataSize outstanding_data) { void PacedSender::SetPacingRates(DataRate pacing_rate, DataRate padding_rate) { { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.SetPacingRates(pacing_rate, padding_rate); } MaybeWakupProcessThread(); @@ -112,8 +115,16 @@ void PacedSender::SetPacingRates(DataRate pacing_rate, DataRate padding_rate) { void PacedSender::EnqueuePackets( std::vector> packets) { { - rtc::CritScope cs(&critsect_); + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("webrtc"), + "PacedSender::EnqueuePackets"); + MutexLock lock(&mutex_); for (auto& packet : packets) { + TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"), + "PacedSender::EnqueuePackets::Loop", "sequence_number", + packet->SequenceNumber(), "rtp_timestamp", + packet->Timestamp()); + + RTC_DCHECK_GE(packet->capture_time_ms(), 0); pacing_controller_.EnqueuePacket(std::move(packet)); } } @@ -121,42 +132,42 @@ void PacedSender::EnqueuePackets( } void PacedSender::SetAccountForAudioPackets(bool account_for_audio) { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.SetAccountForAudioPackets(account_for_audio); } void PacedSender::SetIncludeOverhead() { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.SetIncludeOverhead(); } void PacedSender::SetTransportOverhead(DataSize overhead_per_packet) { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.SetTransportOverhead(overhead_per_packet); } TimeDelta PacedSender::ExpectedQueueTime() const { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); return pacing_controller_.ExpectedQueueTime(); } DataSize PacedSender::QueueSizeData() const { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); return pacing_controller_.QueueSizeData(); } absl::optional PacedSender::FirstSentPacketTime() const { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); return pacing_controller_.FirstSentPacketTime(); } TimeDelta PacedSender::OldestPacketWaitTime() const { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); return pacing_controller_.OldestPacketWaitTime(); } int64_t PacedSender::TimeUntilNextProcess() { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); Timestamp next_send_time = pacing_controller_.NextSendTime(); TimeDelta sleep_time = @@ -168,7 +179,7 @@ int64_t PacedSender::TimeUntilNextProcess() { } void PacedSender::Process() { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.ProcessPackets(); } @@ -188,19 +199,10 @@ void PacedSender::MaybeWakupProcessThread() { void PacedSender::SetQueueTimeLimit(TimeDelta limit) { { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); pacing_controller_.SetQueueTimeLimit(limit); } MaybeWakupProcessThread(); } -void PacedSender::SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& cluster_info) { - packet_router_->SendPacket(std::move(packet), cluster_info); -} - -std::vector> PacedSender::GeneratePadding( - DataSize size) { - return packet_router_->GeneratePadding(size.bytes()); -} } // namespace webrtc diff --git a/modules/pacing/paced_sender.h b/modules/pacing/paced_sender.h index 16137dfcd6..c819f3fb79 100644 --- a/modules/pacing/paced_sender.h +++ b/modules/pacing/paced_sender.h @@ -32,7 +32,7 @@ #include "modules/rtp_rtcp/include/rtp_packet_sender.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/utility/include/process_thread.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -43,8 +43,7 @@ class RtcEventLog; // updating dependencies. class PacedSender : public Module, public RtpPacketPacer, - public RtpPacketSender, - private PacingController::PacketSender { + public RtpPacketSender { public: // Expected max pacer delay in ms. If ExpectedQueueTime() is higher than // this value, the packet producers should wait (eg drop frames rather than @@ -140,14 +139,6 @@ class PacedSender : public Module, // In dynamic process mode, refreshes the next process time. void MaybeWakupProcessThread(); - // Methods implementing PacedSenderController:PacketSender. - void SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& cluster_info) override - RTC_EXCLUSIVE_LOCKS_REQUIRED(critsect_); - - std::vector> GeneratePadding( - DataSize size) override RTC_EXCLUSIVE_LOCKS_REQUIRED(critsect_); - // Private implementation of Module to not expose those implementation details // publicly and control when the class is registered/deregistered. class ModuleProxy : public Module { @@ -166,12 +157,11 @@ class PacedSender : public Module, PacedSender* const delegate_; } module_proxy_{this}; - rtc::CriticalSection critsect_; + mutable Mutex mutex_; const PacingController::ProcessMode process_mode_; - PacingController pacing_controller_ RTC_GUARDED_BY(critsect_); + PacingController pacing_controller_ RTC_GUARDED_BY(mutex_); Clock* const clock_; - PacketRouter* const packet_router_; ProcessThread* const process_thread_; }; } // namespace webrtc diff --git a/modules/pacing/paced_sender_unittest.cc b/modules/pacing/paced_sender_unittest.cc index 26d2eac413..53cc1c42ed 100644 --- a/modules/pacing/paced_sender_unittest.cc +++ b/modules/pacing/paced_sender_unittest.cc @@ -39,12 +39,15 @@ constexpr size_t kDefaultPacketSize = 234; // Mock callback implementing the raw api. class MockCallback : public PacketRouter { public: - MOCK_METHOD2(SendPacket, - void(std::unique_ptr packet, - const PacedPacketInfo& cluster_info)); - MOCK_METHOD1( - GeneratePadding, - std::vector>(size_t target_size_bytes)); + MOCK_METHOD(void, + SendPacket, + (std::unique_ptr packet, + const PacedPacketInfo& cluster_info), + (override)); + MOCK_METHOD(std::vector>, + GeneratePadding, + (DataSize target_size), + (override)); }; class ProcessModeTrials : public WebRtcKeyValueConfig { diff --git a/modules/pacing/pacing_controller.cc b/modules/pacing/pacing_controller.cc index b1f6e896a7..e0ace4e65e 100644 --- a/modules/pacing/pacing_controller.cc +++ b/modules/pacing/pacing_controller.cc @@ -39,6 +39,11 @@ constexpr TimeDelta kMaxElapsedTime = TimeDelta::Seconds(2); // time. Applies only to periodic mode. constexpr TimeDelta kMaxProcessingInterval = TimeDelta::Millis(30); +// Allow probes to be processed slightly ahead of inteded send time. Currently +// set to 1ms as this is intended to allow times be rounded down to the nearest +// millisecond. +constexpr TimeDelta kMaxEarlyProbeProcessing = TimeDelta::Millis(1); + constexpr int kFirstPriority = 0; bool IsDisabled(const WebRtcKeyValueConfig& field_trials, @@ -79,6 +84,7 @@ int GetPriorityForType(RtpPacketMediaType type) { // BWE high. return kFirstPriority + 4; } + RTC_CHECK_NOTREACHED(); } } // namespace @@ -106,8 +112,6 @@ PacingController::PacingController(Clock* clock, send_padding_if_silent_( IsEnabled(*field_trials_, "WebRTC-Pacer-PadInSilence")), pace_audio_(IsEnabled(*field_trials_, "WebRTC-Pacer-BlockAudio")), - small_first_probe_packet_( - IsEnabled(*field_trials_, "WebRTC-Pacer-SmallFirstProbePacket")), ignore_transport_overhead_( IsEnabled(*field_trials_, "WebRTC-Pacer-IgnoreTransportOverhead")), padding_target_duration_(GetDynamicPaddingTarget(*field_trials_)), @@ -289,13 +293,9 @@ TimeDelta PacingController::OldestPacketWaitTime() const { void PacingController::EnqueuePacketInternal( std::unique_ptr packet, int priority) { - prober_.OnIncomingPacket(packet->payload_size()); + prober_.OnIncomingPacket(DataSize::Bytes(packet->payload_size())); - // TODO(sprang): Make sure tests respect this, replace with DCHECK. Timestamp now = CurrentTime(); - if (packet->capture_time_ms() < 0) { - packet->set_capture_time_ms(now.ms()); - } if (mode_ == ProcessMode::kDynamic && packet_queue_.Empty() && NextSendTime() <= now) { @@ -306,7 +306,9 @@ void PacingController::EnqueuePacketInternal( } TimeDelta PacingController::UpdateTimeAndGetElapsed(Timestamp now) { - if (last_process_time_.IsMinusInfinity()) { + // If no previous processing, or last process was "in the future" because of + // early probe processing, then there is no elapsed time to add budget for. + if (last_process_time_.IsMinusInfinity() || now < last_process_time_) { return TimeDelta::Zero(); } RTC_DCHECK_GE(now, last_process_time_); @@ -335,7 +337,7 @@ bool PacingController::ShouldSendKeepalive(Timestamp now) const { } Timestamp PacingController::NextSendTime() const { - Timestamp now = CurrentTime(); + const Timestamp now = CurrentTime(); if (paused_) { return last_send_time_ + kPausedProcessInterval; @@ -400,10 +402,15 @@ void PacingController::ProcessPackets() { Timestamp target_send_time = now; if (mode_ == ProcessMode::kDynamic) { target_send_time = NextSendTime(); + TimeDelta early_execute_margin = + prober_.is_probing() ? kMaxEarlyProbeProcessing : TimeDelta::Zero(); if (target_send_time.IsMinusInfinity()) { target_send_time = now; - } else if (now < target_send_time) { - // We are too early, abort and regroup! + } else if (now < target_send_time - early_execute_margin) { + // We are too early, but if queue is empty still allow draining some debt. + // Probing is allowed to be sent up to kMinSleepTime early. + TimeDelta elapsed_time = UpdateTimeAndGetElapsed(now); + UpdateBudgetWithElapsedTime(elapsed_time); return; } @@ -438,7 +445,10 @@ void PacingController::ProcessPackets() { for (auto& packet : keepalive_packets) { keepalive_data_sent += DataSize::Bytes(packet->payload_size() + packet->padding_size()); - packet_sender_->SendRtpPacket(std::move(packet), PacedPacketInfo()); + packet_sender_->SendPacket(std::move(packet), PacedPacketInfo()); + for (auto& packet : packet_sender_->FetchFec()) { + EnqueuePacket(std::move(packet)); + } } OnPaddingSent(keepalive_data_sent); } @@ -481,13 +491,21 @@ void PacingController::ProcessPackets() { } bool first_packet_in_probe = false; - bool is_probing = prober_.is_probing(); PacedPacketInfo pacing_info; - absl::optional recommended_probe_size; + DataSize recommended_probe_size = DataSize::Zero(); + bool is_probing = prober_.is_probing(); if (is_probing) { - pacing_info = prober_.CurrentCluster(); - first_packet_in_probe = pacing_info.probe_cluster_bytes_sent == 0; - recommended_probe_size = DataSize::Bytes(prober_.RecommendedMinProbeSize()); + // Probe timing is sensitive, and handled explicitly by BitrateProber, so + // use actual send time rather than target. + pacing_info = prober_.CurrentCluster(now).value_or(PacedPacketInfo()); + if (pacing_info.probe_cluster_id != PacedPacketInfo::kNotAProbe) { + first_packet_in_probe = pacing_info.probe_cluster_bytes_sent == 0; + recommended_probe_size = prober_.RecommendedMinProbeSize(); + RTC_DCHECK_GT(recommended_probe_size, DataSize::Zero()); + } else { + // No valid probe cluster returned, probe might have timed out. + is_probing = false; + } } DataSize data_sent = DataSize::Zero(); @@ -495,7 +513,7 @@ void PacingController::ProcessPackets() { // The paused state is checked in the loop since it leaves the critical // section allowing the paused state to be changed from other code. while (!paused_) { - if (small_first_probe_packet_ && first_packet_in_probe) { + if (first_packet_in_probe) { // If first packet in probe, insert a small padding packet so we have a // more reliable start window for the rate estimation. auto padding = packet_sender_->GeneratePadding(DataSize::Bytes(1)); @@ -557,14 +575,21 @@ void PacingController::ProcessPackets() { packet_size += DataSize::Bytes(rtp_packet->headers_size()) + transport_overhead_per_packet_; } - packet_sender_->SendRtpPacket(std::move(rtp_packet), pacing_info); + packet_sender_->SendPacket(std::move(rtp_packet), pacing_info); + for (auto& packet : packet_sender_->FetchFec()) { + EnqueuePacket(std::move(packet)); + } data_sent += packet_size; // Send done, update send/process time to the target send time. OnPacketSent(packet_type, packet_size, target_send_time); - if (recommended_probe_size && data_sent > *recommended_probe_size) + + // If we are currently probing, we need to stop the send loop when we have + // reached the send target. + if (is_probing && data_sent >= recommended_probe_size) { break; + } if (mode_ == ProcessMode::kDynamic) { // Update target send time in case that are more packets that we are late @@ -583,14 +608,13 @@ void PacingController::ProcessPackets() { if (is_probing) { probing_send_failure_ = data_sent == DataSize::Zero(); if (!probing_send_failure_) { - prober_.ProbeSent(CurrentTime(), data_sent.bytes()); + prober_.ProbeSent(CurrentTime(), data_sent); } } } -DataSize PacingController::PaddingToAdd( - absl::optional recommended_probe_size, - DataSize data_sent) const { +DataSize PacingController::PaddingToAdd(DataSize recommended_probe_size, + DataSize data_sent) const { if (!packet_queue_.Empty()) { // Actual payload available, no need to add padding. return DataSize::Zero(); @@ -607,9 +631,9 @@ DataSize PacingController::PaddingToAdd( return DataSize::Zero(); } - if (recommended_probe_size) { - if (*recommended_probe_size > data_sent) { - return *recommended_probe_size - data_sent; + if (!recommended_probe_size.IsZero()) { + if (recommended_probe_size > data_sent) { + return recommended_probe_size - data_sent; } return DataSize::Zero(); } @@ -684,8 +708,9 @@ void PacingController::OnPaddingSent(DataSize data_sent) { if (data_sent > DataSize::Zero()) { UpdateBudgetWithSentData(data_sent); } - last_send_time_ = CurrentTime(); - last_process_time_ = CurrentTime(); + Timestamp now = CurrentTime(); + last_send_time_ = now; + last_process_time_ = now; } void PacingController::UpdateBudgetWithElapsedTime(TimeDelta delta) { diff --git a/modules/pacing/pacing_controller.h b/modules/pacing/pacing_controller.h index 20d2539e45..b0bdfb2e42 100644 --- a/modules/pacing/pacing_controller.h +++ b/modules/pacing/pacing_controller.h @@ -31,7 +31,6 @@ #include "modules/rtp_rtcp/include/rtp_packet_sender.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" -#include "rtc_base/critical_section.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/thread_annotations.h" @@ -55,8 +54,10 @@ class PacingController { class PacketSender { public: virtual ~PacketSender() = default; - virtual void SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& cluster_info) = 0; + virtual void SendPacket(std::unique_ptr packet, + const PacedPacketInfo& cluster_info) = 0; + // Should be called after each call to SendPacket(). + virtual std::vector> FetchFec() = 0; virtual std::vector> GeneratePadding( DataSize size) = 0; }; @@ -158,7 +159,7 @@ class PacingController { void UpdateBudgetWithElapsedTime(TimeDelta delta); void UpdateBudgetWithSentData(DataSize size); - DataSize PaddingToAdd(absl::optional recommended_probe_size, + DataSize PaddingToAdd(DataSize recommended_probe_size, DataSize data_sent) const; std::unique_ptr GetPendingPacket( @@ -181,7 +182,6 @@ class PacingController { const bool drain_large_queues_; const bool send_padding_if_silent_; const bool pace_audio_; - const bool small_first_probe_packet_; const bool ignore_transport_overhead_; // In dynamic mode, indicates the target size when requesting padding, // expressed as a duration in order to adjust for varying padding rate. diff --git a/modules/pacing/pacing_controller_unittest.cc b/modules/pacing/pacing_controller_unittest.cc index fa23da70a0..a953d5b439 100644 --- a/modules/pacing/pacing_controller_unittest.cc +++ b/modules/pacing/pacing_controller_unittest.cc @@ -20,6 +20,7 @@ #include "api/units/data_rate.h" #include "modules/pacing/packet_router.h" #include "system_wrappers/include/clock.h" +#include "test/explicit_key_value_config.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" @@ -69,8 +70,8 @@ std::unique_ptr BuildPacket(RtpPacketMediaType type, // methods that focus on core aspects. class MockPacingControllerCallback : public PacingController::PacketSender { public: - void SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& cluster_info) override { + void SendPacket(std::unique_ptr packet, + const PacedPacketInfo& cluster_info) override { SendPacket(packet->Ssrc(), packet->SequenceNumber(), packet->capture_time_ms(), packet->packet_type() == RtpPacketMediaType::kRetransmission, @@ -90,24 +91,37 @@ class MockPacingControllerCallback : public PacingController::PacketSender { return ret; } - MOCK_METHOD5(SendPacket, - void(uint32_t ssrc, - uint16_t sequence_number, - int64_t capture_timestamp, - bool retransmission, - bool padding)); - MOCK_METHOD1(SendPadding, size_t(size_t target_size)); + MOCK_METHOD(void, + SendPacket, + (uint32_t ssrc, + uint16_t sequence_number, + int64_t capture_timestamp, + bool retransmission, + bool padding)); + MOCK_METHOD(std::vector>, + FetchFec, + (), + (override)); + MOCK_METHOD(size_t, SendPadding, (size_t target_size)); }; // Mock callback implementing the raw api. class MockPacketSender : public PacingController::PacketSender { public: - MOCK_METHOD2(SendRtpPacket, - void(std::unique_ptr packet, - const PacedPacketInfo& cluster_info)); - MOCK_METHOD1( - GeneratePadding, - std::vector>(DataSize target_size)); + MOCK_METHOD(void, + SendPacket, + (std::unique_ptr packet, + const PacedPacketInfo& cluster_info), + (override)); + MOCK_METHOD(std::vector>, + FetchFec, + (), + (override)); + + MOCK_METHOD(std::vector>, + GeneratePadding, + (DataSize target_size), + (override)); }; class PacingControllerPadding : public PacingController::PacketSender { @@ -116,11 +130,15 @@ class PacingControllerPadding : public PacingController::PacketSender { PacingControllerPadding() : padding_sent_(0), total_bytes_sent_(0) {} - void SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& pacing_info) override { + void SendPacket(std::unique_ptr packet, + const PacedPacketInfo& pacing_info) override { total_bytes_sent_ += packet->payload_size(); } + std::vector> FetchFec() override { + return {}; + } + std::vector> GeneratePadding( DataSize target_size) override { size_t num_packets = @@ -147,11 +165,16 @@ class PacingControllerProbing : public PacingController::PacketSender { public: PacingControllerProbing() : packets_sent_(0), padding_sent_(0) {} - void SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& pacing_info) override { + void SendPacket(std::unique_ptr packet, + const PacedPacketInfo& pacing_info) override { if (packet->packet_type() != RtpPacketMediaType::kPadding) { ++packets_sent_; } + last_pacing_info_ = pacing_info; + } + + std::vector> FetchFec() override { + return {}; } std::vector> GeneratePadding( @@ -173,12 +196,14 @@ class PacingControllerProbing : public PacingController::PacketSender { } int packets_sent() const { return packets_sent_; } - int padding_sent() const { return padding_sent_; } + int total_packets_sent() const { return packets_sent_ + padding_sent_; } + PacedPacketInfo last_pacing_info() const { return last_pacing_info_; } private: int packets_sent_; int padding_sent_; + PacedPacketInfo last_pacing_info_; }; class PacingControllerTest @@ -295,7 +320,7 @@ class PacingControllerTest } SimulatedClock clock_; - MockPacingControllerCallback callback_; + ::testing::NiceMock callback_; std::unique_ptr pacer_; }; @@ -1404,7 +1429,8 @@ TEST_P(PacingControllerTest, ProbingWithInsertedPackets) { EXPECT_NEAR((packets_sent - 1) * kPacketSize * 8000 / (clock_.TimeInMilliseconds() - start), kFirstClusterRate.bps(), kProbingErrorMargin.bps()); - EXPECT_EQ(0, packet_sender.padding_sent()); + // Probing always starts with a small padding packet. + EXPECT_EQ(1, packet_sender.padding_sent()); clock_.AdvanceTime(TimeUntilNextProcess()); start = clock_.TimeInMilliseconds(); @@ -1422,63 +1448,119 @@ TEST_P(PacingControllerTest, ProbingWithInsertedPackets) { TEST_P(PacingControllerTest, SkipsProbesWhenProcessIntervalTooLarge) { const size_t kPacketSize = 1200; const int kInitialBitrateBps = 300000; - uint32_t ssrc = 12346; - uint16_t sequence_number = 1234; + const uint32_t ssrc = 12346; + const int kProbeClusterId = 3; - PacingControllerProbing packet_sender; - pacer_ = std::make_unique(&clock_, &packet_sender, nullptr, - nullptr, GetParam()); - pacer_->SetPacingRates( - DataRate::BitsPerSec(kInitialBitrateBps * kPaceMultiplier), - DataRate::Zero()); + // Test with both legacy and new probe discard modes. + // TODO(bugs.webrtc.org/11780): Clean up when legacy is gone. + for (bool abort_delayed_probes : {false, true}) { + uint16_t sequence_number = 1234; - for (int i = 0; i < 10; ++i) { + PacingControllerProbing packet_sender; + + const test::ExplicitKeyValueConfig trials( + abort_delayed_probes ? "WebRTC-Bwe-ProbingBehavior/" + "abort_delayed_probes:1,max_probe_delay:2ms/" + : "WebRTC-Bwe-ProbingBehavior/" + "abort_delayed_probes:0,max_probe_delay:2ms/"); + pacer_ = std::make_unique(&clock_, &packet_sender, + nullptr, &trials, GetParam()); + pacer_->SetPacingRates( + DataRate::BitsPerSec(kInitialBitrateBps * kPaceMultiplier), + DataRate::BitsPerSec(kInitialBitrateBps)); + + for (int i = 0; i < 10; ++i) { + Send(RtpPacketMediaType::kVideo, ssrc, sequence_number++, + clock_.TimeInMilliseconds(), kPacketSize); + } + while (pacer_->QueueSizePackets() > 0) { + clock_.AdvanceTime(TimeUntilNextProcess()); + pacer_->ProcessPackets(); + } + + // Probe at a very high rate. + pacer_->CreateProbeCluster(DataRate::KilobitsPerSec(10000), // 10 Mbps. + /*cluster_id=*/kProbeClusterId); + // We need one packet to start the probe. Send(RtpPacketMediaType::kVideo, ssrc, sequence_number++, clock_.TimeInMilliseconds(), kPacketSize); - } - while (pacer_->QueueSizePackets() > 0) { + const int packets_sent_before_probe = packet_sender.packets_sent(); clock_.AdvanceTime(TimeUntilNextProcess()); pacer_->ProcessPackets(); - } + EXPECT_EQ(packet_sender.packets_sent(), packets_sent_before_probe + 1); - // Probe at a very high rate. - pacer_->CreateProbeCluster(DataRate::KilobitsPerSec(10000), // 10 Mbps. - /*cluster_id=*/3); - // We need one packet to start the probe. - Send(RtpPacketMediaType::kVideo, ssrc, sequence_number++, - clock_.TimeInMilliseconds(), kPacketSize); - const int packets_sent_before_probe = packet_sender.packets_sent(); - clock_.AdvanceTime(TimeUntilNextProcess()); - pacer_->ProcessPackets(); - EXPECT_EQ(packet_sender.packets_sent(), packets_sent_before_probe + 1); + // Figure out how long between probe packets. + Timestamp start_time = clock_.CurrentTime(); + clock_.AdvanceTime(TimeUntilNextProcess()); + TimeDelta time_between_probes = clock_.CurrentTime() - start_time; + // Advance that distance again + 1ms. + clock_.AdvanceTime(time_between_probes); - // Figure out how long between probe packets. - Timestamp start_time = clock_.CurrentTime(); - clock_.AdvanceTime(TimeUntilNextProcess()); - TimeDelta time_between_probes = clock_.CurrentTime() - start_time; - // Advance that distance again + 1ms. - clock_.AdvanceTime(time_between_probes); + // Send second probe packet. + Send(RtpPacketMediaType::kVideo, ssrc, sequence_number++, + clock_.TimeInMilliseconds(), kPacketSize); + pacer_->ProcessPackets(); + EXPECT_EQ(packet_sender.packets_sent(), packets_sent_before_probe + 2); + PacedPacketInfo last_pacing_info = packet_sender.last_pacing_info(); + EXPECT_EQ(last_pacing_info.probe_cluster_id, kProbeClusterId); + + // We're exactly where we should be for the next probe. + const Timestamp probe_time = clock_.CurrentTime(); + EXPECT_EQ(pacer_->NextSendTime(), clock_.CurrentTime()); + + BitrateProberConfig probing_config(&trials); + EXPECT_GT(probing_config.max_probe_delay.Get(), TimeDelta::Zero()); + // Advance to within max probe delay, should still return same target. + clock_.AdvanceTime(probing_config.max_probe_delay.Get()); + EXPECT_EQ(pacer_->NextSendTime(), probe_time); + + // Too high probe delay, drop it! + clock_.AdvanceTime(TimeDelta::Micros(1)); + + int packets_sent_before_timeout = packet_sender.total_packets_sent(); + if (abort_delayed_probes) { + // Expected next process time is unchanged, but calling should not + // generate new packets. + EXPECT_EQ(pacer_->NextSendTime(), probe_time); + pacer_->ProcessPackets(); + EXPECT_EQ(packet_sender.total_packets_sent(), + packets_sent_before_timeout); - // Send second probe packet. - Send(RtpPacketMediaType::kVideo, ssrc, sequence_number++, - clock_.TimeInMilliseconds(), kPacketSize); - pacer_->ProcessPackets(); - EXPECT_EQ(packet_sender.packets_sent(), packets_sent_before_probe + 2); - - // We're exactly where we should be for the next probe. - const Timestamp probe_time = clock_.CurrentTime(); - EXPECT_EQ(pacer_->NextSendTime(), clock_.CurrentTime()); - - FieldTrialBasedConfig field_trial_config; - BitrateProberConfig probing_config(&field_trial_config); - EXPECT_GT(probing_config.max_probe_delay.Get(), TimeDelta::Zero()); - // Advance to within max probe delay, should still return same target. - clock_.AdvanceTime(probing_config.max_probe_delay.Get()); - EXPECT_EQ(pacer_->NextSendTime(), probe_time); - - // Too high probe delay, drop it! - clock_.AdvanceTime(TimeDelta::Micros(1)); - EXPECT_GT(pacer_->NextSendTime(), probe_time); + // Next packet sent is not part of probe. + if (PeriodicProcess()) { + do { + AdvanceTimeAndProcess(); + } while (packet_sender.total_packets_sent() == + packets_sent_before_timeout); + } else { + AdvanceTimeAndProcess(); + } + const int expected_probe_id = PacedPacketInfo::kNotAProbe; + EXPECT_EQ(packet_sender.last_pacing_info().probe_cluster_id, + expected_probe_id); + } else { + // Legacy behaviour, probe "aborted" so send time moved back. Next call to + // ProcessPackets() still results in packets being marked as part of probe + // cluster. + EXPECT_GT(pacer_->NextSendTime(), probe_time); + AdvanceTimeAndProcess(); + EXPECT_GT(packet_sender.total_packets_sent(), + packets_sent_before_timeout); + const int expected_probe_id = last_pacing_info.probe_cluster_id; + EXPECT_EQ(packet_sender.last_pacing_info().probe_cluster_id, + expected_probe_id); + + // Time between sent packets keeps being too large, but we still mark the + // packets as being part of the cluster. + Timestamp a = clock_.CurrentTime(); + AdvanceTimeAndProcess(); + EXPECT_GT(packet_sender.total_packets_sent(), + packets_sent_before_timeout); + EXPECT_EQ(packet_sender.last_pacing_info().probe_cluster_id, + expected_probe_id); + EXPECT_GT(clock_.CurrentTime() - a, time_between_probes); + } + } } TEST_P(PacingControllerTest, ProbingWithPaddingSupport) { @@ -1571,7 +1653,7 @@ TEST_P(PacingControllerTest, ProbeClusterId) { // First probing cluster. EXPECT_CALL(callback, - SendRtpPacket(_, Field(&PacedPacketInfo::probe_cluster_id, 0))) + SendPacket(_, Field(&PacedPacketInfo::probe_cluster_id, 0))) .Times(5); for (int i = 0; i < 5; ++i) { @@ -1580,7 +1662,7 @@ TEST_P(PacingControllerTest, ProbeClusterId) { // Second probing cluster. EXPECT_CALL(callback, - SendRtpPacket(_, Field(&PacedPacketInfo::probe_cluster_id, 1))) + SendPacket(_, Field(&PacedPacketInfo::probe_cluster_id, 1))) .Times(5); for (int i = 0; i < 5; ++i) { @@ -1598,7 +1680,7 @@ TEST_P(PacingControllerTest, ProbeClusterId) { return padding_packets; }); bool non_probe_packet_seen = false; - EXPECT_CALL(callback, SendRtpPacket) + EXPECT_CALL(callback, SendPacket) .WillOnce([&](std::unique_ptr packet, const PacedPacketInfo& cluster_info) { EXPECT_EQ(cluster_info.probe_cluster_id, kNotAProbe); @@ -1628,23 +1710,23 @@ TEST_P(PacingControllerTest, OwnedPacketPrioritizedOnType) { ::testing::InSequence seq; EXPECT_CALL( callback, - SendRtpPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kAudioSsrc)), _)); - EXPECT_CALL(callback, - SendRtpPacket( - Pointee(Property(&RtpPacketToSend::Ssrc, kVideoRtxSsrc)), _)); + SendPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kAudioSsrc)), _)); + EXPECT_CALL( + callback, + SendPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kVideoRtxSsrc)), _)); // FEC and video actually have the same priority, so will come out in // insertion order. - EXPECT_CALL(callback, - SendRtpPacket( - Pointee(Property(&RtpPacketToSend::Ssrc, kFlexFecSsrc)), _)); EXPECT_CALL( callback, - SendRtpPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kVideoSsrc)), _)); + SendPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kFlexFecSsrc)), _)); + EXPECT_CALL( + callback, + SendPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kVideoSsrc)), _)); - EXPECT_CALL(callback, - SendRtpPacket( - Pointee(Property(&RtpPacketToSend::Ssrc, kVideoRtxSsrc)), _)); + EXPECT_CALL( + callback, + SendPacket(Pointee(Property(&RtpPacketToSend::Ssrc, kVideoRtxSsrc)), _)); while (pacer_->QueueSizePackets() > 0) { if (PeriodicProcess()) { @@ -1657,7 +1739,6 @@ TEST_P(PacingControllerTest, OwnedPacketPrioritizedOnType) { } TEST_P(PacingControllerTest, SmallFirstProbePacket) { - ScopedFieldTrials trial("WebRTC-Pacer-SmallFirstProbePacket/Enabled/"); MockPacketSender callback; pacer_ = std::make_unique(&clock_, &callback, nullptr, nullptr, GetParam()); @@ -1679,7 +1760,7 @@ TEST_P(PacingControllerTest, SmallFirstProbePacket) { size_t packets_sent = 0; bool media_seen = false; - EXPECT_CALL(callback, SendRtpPacket) + EXPECT_CALL(callback, SendPacket) .Times(::testing::AnyNumber()) .WillRepeatedly([&](std::unique_ptr packet, const PacedPacketInfo& cluster_info) { @@ -1817,7 +1898,7 @@ TEST_P(PacingControllerTest, for (bool account_for_audio : {false, true}) { uint16_t sequence_number = 1234; MockPacketSender callback; - EXPECT_CALL(callback, SendRtpPacket).Times(::testing::AnyNumber()); + EXPECT_CALL(callback, SendPacket).Times(::testing::AnyNumber()); pacer_ = std::make_unique(&clock_, &callback, nullptr, nullptr, GetParam()); pacer_->SetAccountForAudioPackets(account_for_audio); @@ -2025,6 +2106,35 @@ TEST_P(PacingControllerTest, PaddingTargetAccountsForPaddingRate) { AdvanceTimeAndProcess(); } +TEST_P(PacingControllerTest, SendsFecPackets) { + const uint32_t kSsrc = 12345; + const uint32_t kFlexSsrc = 54321; + uint16_t sequence_number = 1234; + uint16_t flexfec_sequence_number = 4321; + const size_t kPacketSize = 123; + + // Set pacing rate to 1000 packet/s, no padding. + pacer_->SetPacingRates( + DataSize::Bytes(1000 * kPacketSize) / TimeDelta::Seconds(1), + DataRate::Zero()); + + int64_t now = clock_.TimeInMilliseconds(); + Send(RtpPacketMediaType::kVideo, kSsrc, sequence_number, now, kPacketSize); + EXPECT_CALL(callback_, SendPacket(kSsrc, sequence_number, now, false, false)); + EXPECT_CALL(callback_, FetchFec).WillOnce([&]() { + EXPECT_CALL(callback_, SendPacket(kFlexSsrc, flexfec_sequence_number, now, + false, false)); + EXPECT_CALL(callback_, FetchFec); + std::vector> fec_packets; + fec_packets.push_back( + BuildPacket(RtpPacketMediaType::kForwardErrorCorrection, kFlexSsrc, + flexfec_sequence_number, now, kPacketSize)); + return fec_packets; + }); + AdvanceTimeAndProcess(); + AdvanceTimeAndProcess(); +} + INSTANTIATE_TEST_SUITE_P( WithAndWithoutIntervalBudget, PacingControllerTest, diff --git a/modules/pacing/packet_router.cc b/modules/pacing/packet_router.cc index fa64331493..3b1278e504 100644 --- a/modules/pacing/packet_router.cc +++ b/modules/pacing/packet_router.cc @@ -17,29 +17,21 @@ #include #include "absl/types/optional.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" namespace webrtc { -namespace { - -constexpr int kRembSendIntervalMs = 200; - -} // namespace PacketRouter::PacketRouter() : PacketRouter(0) {} PacketRouter::PacketRouter(uint16_t start_transport_seq) : last_send_module_(nullptr), - last_remb_time_ms_(rtc::TimeMillis()), - last_send_bitrate_bps_(0), - bitrate_bps_(0), - max_bitrate_bps_(std::numeric_limits::max()), active_remb_module_(nullptr), transport_seq_(start_transport_seq) {} @@ -52,8 +44,9 @@ PacketRouter::~PacketRouter() { RTC_DCHECK(active_remb_module_ == nullptr); } -void PacketRouter::AddSendRtpModule(RtpRtcp* rtp_module, bool remb_candidate) { - rtc::CritScope cs(&modules_crit_); +void PacketRouter::AddSendRtpModule(RtpRtcpInterface* rtp_module, + bool remb_candidate) { + MutexLock lock(&modules_mutex_); AddSendRtpModuleToMap(rtp_module, rtp_module->SSRC()); if (absl::optional rtx_ssrc = rtp_module->RtxSsrc()) { @@ -72,7 +65,8 @@ void PacketRouter::AddSendRtpModule(RtpRtcp* rtp_module, bool remb_candidate) { } } -void PacketRouter::AddSendRtpModuleToMap(RtpRtcp* rtp_module, uint32_t ssrc) { +void PacketRouter::AddSendRtpModuleToMap(RtpRtcpInterface* rtp_module, + uint32_t ssrc) { RTC_DCHECK(send_modules_map_.find(ssrc) == send_modules_map_.end()); // Always keep the audio modules at the back of the list, so that when we // iterate over the modules in order to find one that can send padding we @@ -93,8 +87,8 @@ void PacketRouter::RemoveSendRtpModuleFromMap(uint32_t ssrc) { send_modules_map_.erase(kv); } -void PacketRouter::RemoveSendRtpModule(RtpRtcp* rtp_module) { - rtc::CritScope cs(&modules_crit_); +void PacketRouter::RemoveSendRtpModule(RtpRtcpInterface* rtp_module) { + MutexLock lock(&modules_mutex_); MaybeRemoveRembModuleCandidate(rtp_module, /* media_sender = */ true); RemoveSendRtpModuleFromMap(rtp_module->SSRC()); @@ -112,7 +106,7 @@ void PacketRouter::RemoveSendRtpModule(RtpRtcp* rtp_module) { void PacketRouter::AddReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender, bool remb_candidate) { - rtc::CritScope cs(&modules_crit_); + MutexLock lock(&modules_mutex_); RTC_DCHECK(std::find(rtcp_feedback_senders_.begin(), rtcp_feedback_senders_.end(), rtcp_sender) == rtcp_feedback_senders_.end()); @@ -126,7 +120,7 @@ void PacketRouter::AddReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender, void PacketRouter::RemoveReceiveRtpModule( RtcpFeedbackSenderInterface* rtcp_sender) { - rtc::CritScope cs(&modules_crit_); + MutexLock lock(&modules_mutex_); MaybeRemoveRembModuleCandidate(rtcp_sender, /* media_sender = */ false); auto it = std::find(rtcp_feedback_senders_.begin(), rtcp_feedback_senders_.end(), rtcp_sender); @@ -136,7 +130,11 @@ void PacketRouter::RemoveReceiveRtpModule( void PacketRouter::SendPacket(std::unique_ptr packet, const PacedPacketInfo& cluster_info) { - rtc::CritScope cs(&modules_crit_); + TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"), "PacketRouter::SendPacket", + "sequence_number", packet->SequenceNumber(), "rtp_timestamp", + packet->Timestamp()); + + MutexLock lock(&modules_mutex_); // With the new pacer code path, transport sequence numbers are only set here, // on the pacer thread. Therefore we don't need atomics/synchronization. if (packet->HasExtension()) { @@ -153,7 +151,7 @@ void PacketRouter::SendPacket(std::unique_ptr packet, return; } - RtpRtcp* rtp_module = kv->second; + RtpRtcpInterface* rtp_module = kv->second; if (!rtp_module->TrySendPacket(packet.get(), cluster_info)) { RTC_LOG(LS_WARNING) << "Failed to send packet, rejected by RTP module."; return; @@ -164,11 +162,26 @@ void PacketRouter::SendPacket(std::unique_ptr packet, // properties needed for payload based padding. Cache it for later use. last_send_module_ = rtp_module; } + + for (auto& packet : rtp_module->FetchFecPackets()) { + pending_fec_packets_.push_back(std::move(packet)); + } +} + +std::vector> PacketRouter::FetchFec() { + MutexLock lock(&modules_mutex_); + std::vector> fec_packets = + std::move(pending_fec_packets_); + pending_fec_packets_.clear(); + return fec_packets; } std::vector> PacketRouter::GeneratePadding( - size_t target_size_bytes) { - rtc::CritScope cs(&modules_crit_); + DataSize size) { + TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("webrtc"), + "PacketRouter::GeneratePadding", "bytes", size.bytes()); + + MutexLock lock(&modules_mutex_); // First try on the last rtp module to have sent media. This increases the // the chance that any payload based padding will be useful as it will be // somewhat distributed over modules according the packet rate, even if it @@ -178,122 +191,71 @@ std::vector> PacketRouter::GeneratePadding( std::vector> padding_packets; if (last_send_module_ != nullptr && last_send_module_->SupportsRtxPayloadPadding()) { - padding_packets = last_send_module_->GeneratePadding(target_size_bytes); - if (!padding_packets.empty()) { - return padding_packets; - } + padding_packets = last_send_module_->GeneratePadding(size.bytes()); } - // Iterate over all modules send module. Video modules will be at the front - // and so will be prioritized. This is important since audio packets may not - // be taken into account by the bandwidth estimator, e.g. in FF. - for (RtpRtcp* rtp_module : send_modules_list_) { - if (rtp_module->SupportsPadding()) { - padding_packets = rtp_module->GeneratePadding(target_size_bytes); - if (!padding_packets.empty()) { - last_send_module_ = rtp_module; - break; + if (padding_packets.empty()) { + // Iterate over all modules send module. Video modules will be at the front + // and so will be prioritized. This is important since audio packets may not + // be taken into account by the bandwidth estimator, e.g. in FF. + for (RtpRtcpInterface* rtp_module : send_modules_list_) { + if (rtp_module->SupportsPadding()) { + padding_packets = rtp_module->GeneratePadding(size.bytes()); + if (!padding_packets.empty()) { + last_send_module_ = rtp_module; + break; + } } } } +#if RTC_TRACE_EVENTS_ENABLED + for (auto& packet : padding_packets) { + TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"), + "PacketRouter::GeneratePadding::Loop", "sequence_number", + packet->SequenceNumber(), "rtp_timestamp", + packet->Timestamp()); + } +#endif + return padding_packets; } uint16_t PacketRouter::CurrentTransportSequenceNumber() const { - rtc::CritScope lock(&modules_crit_); + MutexLock lock(&modules_mutex_); return transport_seq_ & 0xFFFF; } -void PacketRouter::OnReceiveBitrateChanged(const std::vector& ssrcs, - uint32_t bitrate_bps) { - // % threshold for if we should send a new REMB asap. - const int64_t kSendThresholdPercent = 97; - // TODO(danilchap): Remove receive_bitrate_bps variable and the cast - // when OnReceiveBitrateChanged takes bitrate as int64_t. - int64_t receive_bitrate_bps = static_cast(bitrate_bps); - - int64_t now_ms = rtc::TimeMillis(); - { - rtc::CritScope lock(&remb_crit_); - - // If we already have an estimate, check if the new total estimate is below - // kSendThresholdPercent of the previous estimate. - if (last_send_bitrate_bps_ > 0) { - int64_t new_remb_bitrate_bps = - last_send_bitrate_bps_ - bitrate_bps_ + receive_bitrate_bps; - - if (new_remb_bitrate_bps < - kSendThresholdPercent * last_send_bitrate_bps_ / 100) { - // The new bitrate estimate is less than kSendThresholdPercent % of the - // last report. Send a REMB asap. - last_remb_time_ms_ = now_ms - kRembSendIntervalMs; - } - } - bitrate_bps_ = receive_bitrate_bps; - - if (now_ms - last_remb_time_ms_ < kRembSendIntervalMs) { - return; - } - // NOTE: Updated if we intend to send the data; we might not have - // a module to actually send it. - last_remb_time_ms_ = now_ms; - last_send_bitrate_bps_ = receive_bitrate_bps; - // Cap the value to send in remb with configured value. - receive_bitrate_bps = std::min(receive_bitrate_bps, max_bitrate_bps_); - } - SendRemb(receive_bitrate_bps, ssrcs); -} - -void PacketRouter::SetMaxDesiredReceiveBitrate(int64_t bitrate_bps) { - RTC_DCHECK_GE(bitrate_bps, 0); - { - rtc::CritScope lock(&remb_crit_); - max_bitrate_bps_ = bitrate_bps; - if (rtc::TimeMillis() - last_remb_time_ms_ < kRembSendIntervalMs && - last_send_bitrate_bps_ > 0 && - last_send_bitrate_bps_ <= max_bitrate_bps_) { - // Recent measured bitrate is already below the cap. - return; - } - } - SendRemb(bitrate_bps, /*ssrcs=*/{}); -} - -bool PacketRouter::SendRemb(int64_t bitrate_bps, - const std::vector& ssrcs) { - rtc::CritScope lock(&modules_crit_); +void PacketRouter::SendRemb(int64_t bitrate_bps, std::vector ssrcs) { + MutexLock lock(&modules_mutex_); if (!active_remb_module_) { - return false; + return; } // The Add* and Remove* methods above ensure that REMB is disabled on all // other modules, because otherwise, they will send REMB with stale info. - active_remb_module_->SetRemb(bitrate_bps, ssrcs); - - return true; + active_remb_module_->SetRemb(bitrate_bps, std::move(ssrcs)); } -bool PacketRouter::SendCombinedRtcpPacket( +void PacketRouter::SendCombinedRtcpPacket( std::vector> packets) { - rtc::CritScope cs(&modules_crit_); + MutexLock lock(&modules_mutex_); // Prefer send modules. - for (RtpRtcp* rtp_module : send_modules_list_) { + for (RtpRtcpInterface* rtp_module : send_modules_list_) { if (rtp_module->RTCP() == RtcpMode::kOff) { continue; } rtp_module->SendCombinedRtcpPacket(std::move(packets)); - return true; + return; } if (rtcp_feedback_senders_.empty()) { - return false; + return; } auto* rtcp_sender = rtcp_feedback_senders_[0]; rtcp_sender->SendCombinedRtcpPacket(std::move(packets)); - return true; } void PacketRouter::AddRembModuleCandidate( diff --git a/modules/pacing/packet_router.h b/modules/pacing/packet_router.h index 40b3ad1407..7a6e24d7ea 100644 --- a/modules/pacing/packet_router.h +++ b/modules/pacing/packet_router.h @@ -21,109 +21,93 @@ #include #include "api/transport/network_types.h" +#include "modules/pacing/pacing_controller.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { -class RtpRtcp; +class RtpRtcpInterface; // PacketRouter keeps track of rtp send modules to support the pacer. // In addition, it handles feedback messages, which are sent on a send // module if possible (sender report), otherwise on receive module // (receiver report). For the latter case, we also keep track of the // receive modules. -class PacketRouter : public RemoteBitrateObserver, - public TransportFeedbackSenderInterface { +class PacketRouter : public PacingController::PacketSender { public: PacketRouter(); explicit PacketRouter(uint16_t start_transport_seq); ~PacketRouter() override; - void AddSendRtpModule(RtpRtcp* rtp_module, bool remb_candidate); - void RemoveSendRtpModule(RtpRtcp* rtp_module); + void AddSendRtpModule(RtpRtcpInterface* rtp_module, bool remb_candidate); + void RemoveSendRtpModule(RtpRtcpInterface* rtp_module); void AddReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender, bool remb_candidate); void RemoveReceiveRtpModule(RtcpFeedbackSenderInterface* rtcp_sender); - virtual void SendPacket(std::unique_ptr packet, - const PacedPacketInfo& cluster_info); - - virtual std::vector> GeneratePadding( - size_t target_size_bytes); + void SendPacket(std::unique_ptr packet, + const PacedPacketInfo& cluster_info) override; + std::vector> FetchFec() override; + std::vector> GeneratePadding( + DataSize size) override; uint16_t CurrentTransportSequenceNumber() const; - // Called every time there is a new bitrate estimate for a receive channel - // group. This call will trigger a new RTCP REMB packet if the bitrate - // estimate has decreased or if no RTCP REMB packet has been sent for - // a certain time interval. - // Implements RtpReceiveBitrateUpdate. - void OnReceiveBitrateChanged(const std::vector& ssrcs, - uint32_t bitrate_bps) override; - - // Ensures remote party notified of the receive bitrate limit no larger than - // |bitrate_bps|. - void SetMaxDesiredReceiveBitrate(int64_t bitrate_bps); - // Send REMB feedback. - bool SendRemb(int64_t bitrate_bps, const std::vector& ssrcs); + void SendRemb(int64_t bitrate_bps, std::vector ssrcs); // Sends |packets| in one or more IP packets. - bool SendCombinedRtcpPacket( - std::vector> packets) override; + void SendCombinedRtcpPacket( + std::vector> packets); private: void AddRembModuleCandidate(RtcpFeedbackSenderInterface* candidate_module, bool media_sender) - RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_); void MaybeRemoveRembModuleCandidate( RtcpFeedbackSenderInterface* candidate_module, - bool media_sender) RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_); - void UnsetActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_); - void DetermineActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_); - void AddSendRtpModuleToMap(RtpRtcp* rtp_module, uint32_t ssrc) - RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_); + bool media_sender) RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_); + void UnsetActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_); + void DetermineActiveRembModule() RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_); + void AddSendRtpModuleToMap(RtpRtcpInterface* rtp_module, uint32_t ssrc) + RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_); void RemoveSendRtpModuleFromMap(uint32_t ssrc) - RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_crit_); - - rtc::CriticalSection modules_crit_; - // Ssrc to RtpRtcp module; - std::unordered_map send_modules_map_ - RTC_GUARDED_BY(modules_crit_); - std::list send_modules_list_ RTC_GUARDED_BY(modules_crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(modules_mutex_); + + mutable Mutex modules_mutex_; + // Ssrc to RtpRtcpInterface module; + std::unordered_map send_modules_map_ + RTC_GUARDED_BY(modules_mutex_); + std::list send_modules_list_ + RTC_GUARDED_BY(modules_mutex_); // The last module used to send media. - RtpRtcp* last_send_module_ RTC_GUARDED_BY(modules_crit_); + RtpRtcpInterface* last_send_module_ RTC_GUARDED_BY(modules_mutex_); // Rtcp modules of the rtp receivers. std::vector rtcp_feedback_senders_ - RTC_GUARDED_BY(modules_crit_); - - // TODO(eladalon): remb_crit_ only ever held from one function, and it's not - // clear if that function can actually be called from more than one thread. - rtc::CriticalSection remb_crit_; - // The last time a REMB was sent. - int64_t last_remb_time_ms_ RTC_GUARDED_BY(remb_crit_); - int64_t last_send_bitrate_bps_ RTC_GUARDED_BY(remb_crit_); - // The last bitrate update. - int64_t bitrate_bps_ RTC_GUARDED_BY(remb_crit_); - int64_t max_bitrate_bps_ RTC_GUARDED_BY(remb_crit_); + RTC_GUARDED_BY(modules_mutex_); // Candidates for the REMB module can be RTP sender/receiver modules, with // the sender modules taking precedence. std::vector sender_remb_candidates_ - RTC_GUARDED_BY(modules_crit_); + RTC_GUARDED_BY(modules_mutex_); std::vector receiver_remb_candidates_ - RTC_GUARDED_BY(modules_crit_); + RTC_GUARDED_BY(modules_mutex_); RtcpFeedbackSenderInterface* active_remb_module_ - RTC_GUARDED_BY(modules_crit_); + RTC_GUARDED_BY(modules_mutex_); + + uint64_t transport_seq_ RTC_GUARDED_BY(modules_mutex_); - uint64_t transport_seq_ RTC_GUARDED_BY(modules_crit_); + // TODO(bugs.webrtc.org/10809): Replace lock with a sequence checker once the + // process thread is gone. + std::vector> pending_fec_packets_ + RTC_GUARDED_BY(modules_mutex_); RTC_DISALLOW_COPY_AND_ASSIGN(PacketRouter); }; diff --git a/modules/pacing/packet_router_unittest.cc b/modules/pacing/packet_router_unittest.cc index b8f16cb924..77fe5f9f8d 100644 --- a/modules/pacing/packet_router_unittest.cc +++ b/modules/pacing/packet_router_unittest.cc @@ -68,31 +68,25 @@ class PacketRouterTest : public ::testing::Test { }; TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_GeneratePadding) { - constexpr size_t bytes = 300; + constexpr DataSize bytes = DataSize::Bytes(300); const PacedPacketInfo paced_info(1, kProbeMinProbes, kProbeMinBytes); EXPECT_TRUE(packet_router_.GeneratePadding(bytes).empty()); } -TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_OnReceiveBitrateChanged) { - const std::vector ssrcs = {1, 2, 3}; - constexpr uint32_t bitrate_bps = 10000; - - packet_router_.OnReceiveBitrateChanged(ssrcs, bitrate_bps); -} TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_SendRemb) { const std::vector ssrcs = {1, 2, 3}; constexpr uint32_t bitrate_bps = 10000; - - EXPECT_FALSE(packet_router_.SendRemb(bitrate_bps, ssrcs)); + // Expect not to crash + packet_router_.SendRemb(bitrate_bps, ssrcs); } TEST_F(PacketRouterTest, Sanity_NoModuleRegistered_SendTransportFeedback) { std::vector> feedback; feedback.push_back(std::make_unique()); - - EXPECT_FALSE(packet_router_.SendCombinedRtcpPacket(std::move(feedback))); + // Expect not to crash + packet_router_.SendCombinedRtcpPacket(std::move(feedback)); } TEST_F(PacketRouterTest, GeneratePaddingPrioritizesRtx) { @@ -101,12 +95,12 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesRtx) { const uint16_t kSsrc1 = 1234; const uint16_t kSsrc2 = 4567; - NiceMock rtp_1; + NiceMock rtp_1; ON_CALL(rtp_1, RtxSendStatus()).WillByDefault(Return(kRtxRedundantPayloads)); ON_CALL(rtp_1, SSRC()).WillByDefault(Return(kSsrc1)); ON_CALL(rtp_1, SupportsPadding).WillByDefault(Return(false)); - NiceMock rtp_2; + NiceMock rtp_2; ON_CALL(rtp_2, RtxSendStatus()).WillByDefault(Return(kRtxOff)); ON_CALL(rtp_2, SSRC()).WillByDefault(Return(kSsrc2)); ON_CALL(rtp_2, SupportsPadding).WillByDefault(Return(true)); @@ -122,7 +116,8 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesRtx) { return std::vector>( kExpectedPaddingPackets); }); - auto generated_padding = packet_router_.GeneratePadding(kPaddingSize); + auto generated_padding = + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize)); EXPECT_EQ(generated_padding.size(), kExpectedPaddingPackets); packet_router_.RemoveSendRtpModule(&rtp_1); @@ -142,13 +137,13 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) { kExpectedPaddingPackets); }; - NiceMock audio_module; + NiceMock audio_module; ON_CALL(audio_module, RtxSendStatus()).WillByDefault(Return(kRtxOff)); ON_CALL(audio_module, SSRC()).WillByDefault(Return(kSsrc1)); ON_CALL(audio_module, SupportsPadding).WillByDefault(Return(true)); ON_CALL(audio_module, IsAudioConfigured).WillByDefault(Return(true)); - NiceMock video_module; + NiceMock video_module; ON_CALL(video_module, RtxSendStatus()).WillByDefault(Return(kRtxOff)); ON_CALL(video_module, SSRC()).WillByDefault(Return(kSsrc2)); ON_CALL(video_module, SupportsPadding).WillByDefault(Return(true)); @@ -159,7 +154,7 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) { packet_router_.AddSendRtpModule(&audio_module, false); EXPECT_CALL(audio_module, GeneratePadding(kPaddingSize)) .WillOnce(generate_padding); - packet_router_.GeneratePadding(kPaddingSize); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize)); // Add the video module, this should now be prioritized since we cannot // guarantee that audio packets will be included in the BWE. @@ -167,7 +162,7 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) { EXPECT_CALL(audio_module, GeneratePadding).Times(0); EXPECT_CALL(video_module, GeneratePadding(kPaddingSize)) .WillOnce(generate_padding); - packet_router_.GeneratePadding(kPaddingSize); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize)); // Remove and the add audio module again. Module order shouldn't matter; // video should still be prioritized. @@ -176,14 +171,14 @@ TEST_F(PacketRouterTest, GeneratePaddingPrioritizesVideo) { EXPECT_CALL(audio_module, GeneratePadding).Times(0); EXPECT_CALL(video_module, GeneratePadding(kPaddingSize)) .WillOnce(generate_padding); - packet_router_.GeneratePadding(kPaddingSize); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize)); // Remove and the video module, we should fall back to padding on the // audio module again. packet_router_.RemoveSendRtpModule(&video_module); EXPECT_CALL(audio_module, GeneratePadding(kPaddingSize)) .WillOnce(generate_padding); - packet_router_.GeneratePadding(kPaddingSize); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingSize)); packet_router_.RemoveSendRtpModule(&audio_module); } @@ -194,7 +189,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { const uint16_t kSsrc3 = 8901; // First two rtp modules send media and have rtx. - NiceMock rtp_1; + NiceMock rtp_1; EXPECT_CALL(rtp_1, SSRC()).WillRepeatedly(Return(kSsrc1)); EXPECT_CALL(rtp_1, SupportsPadding).WillRepeatedly(Return(true)); EXPECT_CALL(rtp_1, SupportsRtxPayloadPadding).WillRepeatedly(Return(true)); @@ -205,7 +200,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { ::testing::Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc1)), _)) .WillRepeatedly(Return(true)); - NiceMock rtp_2; + NiceMock rtp_2; EXPECT_CALL(rtp_2, SSRC()).WillRepeatedly(Return(kSsrc2)); EXPECT_CALL(rtp_2, SupportsPadding).WillRepeatedly(Return(true)); EXPECT_CALL(rtp_2, SupportsRtxPayloadPadding).WillRepeatedly(Return(true)); @@ -217,7 +212,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { .WillRepeatedly(Return(true)); // Third module is sending media, but does not support rtx. - NiceMock rtp_3; + NiceMock rtp_3; EXPECT_CALL(rtp_3, SSRC()).WillRepeatedly(Return(kSsrc3)); EXPECT_CALL(rtp_3, SupportsPadding).WillRepeatedly(Return(true)); EXPECT_CALL(rtp_3, SupportsRtxPayloadPadding).WillRepeatedly(Return(false)); @@ -243,7 +238,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { packets.push_back(BuildRtpPacket(kSsrc2)); return packets; }); - packet_router_.GeneratePadding(kPaddingBytes); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingBytes)); // Send media on first module. Padding should be sent on that module. packet_router_.SendPacket(BuildRtpPacket(kSsrc1), PacedPacketInfo()); @@ -255,7 +250,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { packets.push_back(BuildRtpPacket(kSsrc1)); return packets; }); - packet_router_.GeneratePadding(kPaddingBytes); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingBytes)); // Send media on second module. Padding should be sent there. packet_router_.SendPacket(BuildRtpPacket(kSsrc2), PacedPacketInfo()); @@ -265,7 +260,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { packet_router_.RemoveSendRtpModule(&rtp_2); // Send on and then remove all remaining modules. - RtpRtcp* last_send_module; + RtpRtcpInterface* last_send_module; EXPECT_CALL(rtp_1, GeneratePadding(kPaddingBytes)) .Times(1) .WillOnce([&](size_t target_size_bytes) { @@ -285,7 +280,7 @@ TEST_F(PacketRouterTest, PadsOnLastActiveMediaStream) { for (int i = 0; i < 2; ++i) { last_send_module = nullptr; - packet_router_.GeneratePadding(kPaddingBytes); + packet_router_.GeneratePadding(DataSize::Bytes(kPaddingBytes)); EXPECT_NE(last_send_module, nullptr); packet_router_.RemoveSendRtpModule(last_send_module); } @@ -297,7 +292,7 @@ TEST_F(PacketRouterTest, AllocatesTransportSequenceNumbers) { const uint16_t kSsrc1 = 1234; PacketRouter packet_router(kStartSeq - 1); - NiceMock rtp_1; + NiceMock rtp_1; EXPECT_CALL(rtp_1, SSRC()).WillRepeatedly(Return(kSsrc1)); EXPECT_CALL(rtp_1, TrySendPacket).WillRepeatedly(Return(true)); packet_router.AddSendRtpModule(&rtp_1, false); @@ -315,8 +310,8 @@ TEST_F(PacketRouterTest, AllocatesTransportSequenceNumbers) { } TEST_F(PacketRouterTest, SendTransportFeedback) { - NiceMock rtp_1; - NiceMock rtp_2; + NiceMock rtp_1; + NiceMock rtp_2; ON_CALL(rtp_1, RTCP()).WillByDefault(Return(RtcpMode::kCompound)); ON_CALL(rtp_2, RTCP()).WillByDefault(Return(RtcpMode::kCompound)); @@ -326,10 +321,10 @@ TEST_F(PacketRouterTest, SendTransportFeedback) { std::vector> feedback; feedback.push_back(std::make_unique()); - EXPECT_CALL(rtp_1, SendCombinedRtcpPacket).Times(1); + EXPECT_CALL(rtp_1, SendCombinedRtcpPacket); packet_router_.SendCombinedRtcpPacket(std::move(feedback)); packet_router_.RemoveSendRtpModule(&rtp_1); - EXPECT_CALL(rtp_2, SendCombinedRtcpPacket).Times(1); + EXPECT_CALL(rtp_2, SendCombinedRtcpPacket); std::vector> new_feedback; new_feedback.push_back(std::make_unique()); packet_router_.SendCombinedRtcpPacket(std::move(new_feedback)); @@ -338,7 +333,7 @@ TEST_F(PacketRouterTest, SendTransportFeedback) { TEST_F(PacketRouterTest, SendPacketWithoutTransportSequenceNumbers) { const uint16_t kSsrc1 = 1234; - NiceMock rtp_1; + NiceMock rtp_1; ON_CALL(rtp_1, SendingMedia).WillByDefault(Return(true)); ON_CALL(rtp_1, SSRC).WillByDefault(Return(kSsrc1)); packet_router_.AddSendRtpModule(&rtp_1, false); @@ -361,8 +356,8 @@ TEST_F(PacketRouterTest, SendPacketWithoutTransportSequenceNumbers) { } TEST_F(PacketRouterTest, SendPacketAssignsTransportSequenceNumbers) { - NiceMock rtp_1; - NiceMock rtp_2; + NiceMock rtp_1; + NiceMock rtp_2; const uint16_t kSsrc1 = 1234; const uint16_t kSsrc2 = 2345; @@ -405,8 +400,9 @@ TEST_F(PacketRouterTest, SendPacketAssignsTransportSequenceNumbers) { } #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST_F(PacketRouterTest, DoubleRegistrationOfSendModuleDisallowed) { - NiceMock module; +using PacketRouterDeathTest = PacketRouterTest; +TEST_F(PacketRouterDeathTest, DoubleRegistrationOfSendModuleDisallowed) { + NiceMock module; constexpr bool remb_candidate = false; // Value irrelevant. packet_router_.AddSendRtpModule(&module, remb_candidate); @@ -416,8 +412,8 @@ TEST_F(PacketRouterTest, DoubleRegistrationOfSendModuleDisallowed) { packet_router_.RemoveSendRtpModule(&module); } -TEST_F(PacketRouterTest, DoubleRegistrationOfReceiveModuleDisallowed) { - NiceMock module; +TEST_F(PacketRouterDeathTest, DoubleRegistrationOfReceiveModuleDisallowed) { + NiceMock module; constexpr bool remb_candidate = false; // Value irrelevant. packet_router_.AddReceiveRtpModule(&module, remb_candidate); @@ -427,102 +423,23 @@ TEST_F(PacketRouterTest, DoubleRegistrationOfReceiveModuleDisallowed) { packet_router_.RemoveReceiveRtpModule(&module); } -TEST_F(PacketRouterTest, RemovalOfNeverAddedSendModuleDisallowed) { - NiceMock module; +TEST_F(PacketRouterDeathTest, RemovalOfNeverAddedSendModuleDisallowed) { + NiceMock module; EXPECT_DEATH(packet_router_.RemoveSendRtpModule(&module), ""); } -TEST_F(PacketRouterTest, RemovalOfNeverAddedReceiveModuleDisallowed) { - NiceMock module; +TEST_F(PacketRouterDeathTest, RemovalOfNeverAddedReceiveModuleDisallowed) { + NiceMock module; EXPECT_DEATH(packet_router_.RemoveReceiveRtpModule(&module), ""); } #endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST(PacketRouterRembTest, LowerEstimateToSendRemb) { - rtc::ScopedFakeClock clock; - NiceMock rtp; - PacketRouter packet_router; - - packet_router.AddSendRtpModule(&rtp, true); - - uint32_t bitrate_estimate = 456; - const std::vector ssrcs = {1234}; - - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Call OnReceiveBitrateChanged twice to get a first estimate. - clock.AdvanceTime(TimeDelta::Millis(1000)); - EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Lower the estimate with more than 3% to trigger a call to SetRemb right - // away. - bitrate_estimate = bitrate_estimate - 100; - EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - packet_router.RemoveSendRtpModule(&rtp); -} - -TEST(PacketRouterRembTest, VerifyIncreasingAndDecreasing) { - rtc::ScopedFakeClock clock; - NiceMock rtp; - PacketRouter packet_router; - packet_router.AddSendRtpModule(&rtp, true); - - uint32_t bitrate_estimate[] = {456, 789}; - std::vector ssrcs = {1234, 5678}; - - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate[0]); - - // Call OnReceiveBitrateChanged twice to get a first estimate. - EXPECT_CALL(rtp, SetRemb(bitrate_estimate[0], ssrcs)).Times(1); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate[0]); - - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate[1] + 100); - - // Lower the estimate to trigger a callback. - EXPECT_CALL(rtp, SetRemb(bitrate_estimate[1], ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate[1]); - - packet_router.RemoveSendRtpModule(&rtp); -} - -TEST(PacketRouterRembTest, NoRembForIncreasedBitrate) { - rtc::ScopedFakeClock clock; - NiceMock rtp; - PacketRouter packet_router; - packet_router.AddSendRtpModule(&rtp, true); - - uint32_t bitrate_estimate = 456; - std::vector ssrcs = {1234, 5678}; - - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Call OnReceiveBitrateChanged twice to get a first estimate. - EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)).Times(1); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Increased estimate shouldn't trigger a callback right away. - EXPECT_CALL(rtp, SetRemb(_, _)).Times(0); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate + 1); - - // Decreasing the estimate less than 3% shouldn't trigger a new callback. - EXPECT_CALL(rtp, SetRemb(_, _)).Times(0); - int lower_estimate = bitrate_estimate * 98 / 100; - packet_router.OnReceiveBitrateChanged(ssrcs, lower_estimate); - - packet_router.RemoveSendRtpModule(&rtp); -} - -TEST(PacketRouterRembTest, ChangeSendRtpModule) { +TEST(PacketRouterRembTest, ChangeSendRtpModuleChangeRembSender) { rtc::ScopedFakeClock clock; - NiceMock rtp_send; - NiceMock rtp_recv; + NiceMock rtp_send; + NiceMock rtp_recv; PacketRouter packet_router; packet_router.AddSendRtpModule(&rtp_send, true); packet_router.AddReceiveRtpModule(&rtp_recv, true); @@ -530,196 +447,23 @@ TEST(PacketRouterRembTest, ChangeSendRtpModule) { uint32_t bitrate_estimate = 456; std::vector ssrcs = {1234, 5678}; - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Call OnReceiveBitrateChanged twice to get a first estimate. - clock.AdvanceTime(TimeDelta::Millis(1000)); - EXPECT_CALL(rtp_send, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Decrease estimate to trigger a REMB. - bitrate_estimate = bitrate_estimate - 100; - EXPECT_CALL(rtp_send, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + EXPECT_CALL(rtp_send, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Remove the sending module -> should get remb on the second module. packet_router.RemoveSendRtpModule(&rtp_send); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - bitrate_estimate = bitrate_estimate - 100; - EXPECT_CALL(rtp_recv, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + EXPECT_CALL(rtp_recv, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); packet_router.RemoveReceiveRtpModule(&rtp_recv); } -TEST(PacketRouterRembTest, OnlyOneRembForRepeatedOnReceiveBitrateChanged) { - rtc::ScopedFakeClock clock; - NiceMock rtp; - PacketRouter packet_router; - packet_router.AddSendRtpModule(&rtp, true); - - uint32_t bitrate_estimate = 456; - const std::vector ssrcs = {1234}; - - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Call OnReceiveBitrateChanged twice to get a first estimate. - clock.AdvanceTime(TimeDelta::Millis(1000)); - EXPECT_CALL(rtp, SetRemb(_, _)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Lower the estimate, should trigger a call to SetRemb right away. - bitrate_estimate = bitrate_estimate - 100; - EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Call OnReceiveBitrateChanged again, this should not trigger a new callback. - EXPECT_CALL(rtp, SetRemb(_, _)).Times(0); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - packet_router.RemoveSendRtpModule(&rtp); -} - -TEST(PacketRouterRembTest, SetMaxDesiredReceiveBitrateLimitsSetRemb) { - rtc::ScopedFakeClock clock; - PacketRouter packet_router; - clock.AdvanceTime(TimeDelta::Millis(1000)); - NiceMock remb_sender; - constexpr bool remb_candidate = true; - packet_router.AddSendRtpModule(&remb_sender, remb_candidate); - - const int64_t cap_bitrate = 100000; - EXPECT_CALL(remb_sender, SetRemb(Le(cap_bitrate), _)).Times(AtLeast(1)); - EXPECT_CALL(remb_sender, SetRemb(Gt(cap_bitrate), _)).Times(0); - - const std::vector ssrcs = {1234}; - packet_router.SetMaxDesiredReceiveBitrate(cap_bitrate); - packet_router.OnReceiveBitrateChanged(ssrcs, cap_bitrate + 5000); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, cap_bitrate - 5000); - - // Test tear-down. - packet_router.RemoveSendRtpModule(&remb_sender); -} - -TEST(PacketRouterRembTest, - SetMaxDesiredReceiveBitrateTriggersRembWhenMoreRestrictive) { - rtc::ScopedFakeClock clock; - PacketRouter packet_router; - clock.AdvanceTime(TimeDelta::Millis(1000)); - NiceMock remb_sender; - constexpr bool remb_candidate = true; - packet_router.AddSendRtpModule(&remb_sender, remb_candidate); - - const int64_t measured_bitrate_bps = 150000; - const int64_t cap_bitrate_bps = measured_bitrate_bps - 5000; - const std::vector ssrcs = {1234}; - EXPECT_CALL(remb_sender, SetRemb(measured_bitrate_bps, _)); - packet_router.OnReceiveBitrateChanged(ssrcs, measured_bitrate_bps); - - EXPECT_CALL(remb_sender, SetRemb(cap_bitrate_bps, _)); - packet_router.SetMaxDesiredReceiveBitrate(cap_bitrate_bps); - - // Test tear-down. - packet_router.RemoveSendRtpModule(&remb_sender); -} - -TEST(PacketRouterRembTest, - SetMaxDesiredReceiveBitrateDoesNotTriggerRembWhenAsRestrictive) { - rtc::ScopedFakeClock clock; - PacketRouter packet_router; - clock.AdvanceTime(TimeDelta::Millis(1000)); - NiceMock remb_sender; - constexpr bool remb_candidate = true; - packet_router.AddSendRtpModule(&remb_sender, remb_candidate); - - const uint32_t measured_bitrate_bps = 150000; - const uint32_t cap_bitrate_bps = measured_bitrate_bps; - const std::vector ssrcs = {1234}; - EXPECT_CALL(remb_sender, SetRemb(measured_bitrate_bps, _)); - packet_router.OnReceiveBitrateChanged(ssrcs, measured_bitrate_bps); - - EXPECT_CALL(remb_sender, SetRemb(_, _)).Times(0); - packet_router.SetMaxDesiredReceiveBitrate(cap_bitrate_bps); - - // Test tear-down. - packet_router.RemoveSendRtpModule(&remb_sender); -} - -TEST(PacketRouterRembTest, - SetMaxDesiredReceiveBitrateDoesNotTriggerRembWhenLessRestrictive) { - rtc::ScopedFakeClock clock; - PacketRouter packet_router; - clock.AdvanceTime(TimeDelta::Millis(1000)); - NiceMock remb_sender; - constexpr bool remb_candidate = true; - packet_router.AddSendRtpModule(&remb_sender, remb_candidate); - - const uint32_t measured_bitrate_bps = 150000; - const uint32_t cap_bitrate_bps = measured_bitrate_bps + 500; - const std::vector ssrcs = {1234}; - EXPECT_CALL(remb_sender, SetRemb(measured_bitrate_bps, _)); - packet_router.OnReceiveBitrateChanged(ssrcs, measured_bitrate_bps); - - EXPECT_CALL(remb_sender, SetRemb(_, _)).Times(0); - packet_router.SetMaxDesiredReceiveBitrate(cap_bitrate_bps); - - // Test tear-down. - packet_router.RemoveSendRtpModule(&remb_sender); -} - -TEST(PacketRouterRembTest, - SetMaxDesiredReceiveBitrateTriggersRembWhenNoRecentMeasure) { - rtc::ScopedFakeClock clock; - PacketRouter packet_router; - clock.AdvanceTime(TimeDelta::Millis(1000)); - NiceMock remb_sender; - constexpr bool remb_candidate = true; - packet_router.AddSendRtpModule(&remb_sender, remb_candidate); - - const uint32_t measured_bitrate_bps = 150000; - const uint32_t cap_bitrate_bps = measured_bitrate_bps + 5000; - const std::vector ssrcs = {1234}; - EXPECT_CALL(remb_sender, SetRemb(measured_bitrate_bps, _)); - packet_router.OnReceiveBitrateChanged(ssrcs, measured_bitrate_bps); - clock.AdvanceTime(TimeDelta::Millis(1000)); - - EXPECT_CALL(remb_sender, SetRemb(cap_bitrate_bps, _)); - packet_router.SetMaxDesiredReceiveBitrate(cap_bitrate_bps); - - // Test tear-down. - packet_router.RemoveSendRtpModule(&remb_sender); -} - -TEST(PacketRouterRembTest, - SetMaxDesiredReceiveBitrateTriggersRembWhenNoMeasures) { - rtc::ScopedFakeClock clock; - PacketRouter packet_router; - clock.AdvanceTime(TimeDelta::Millis(1000)); - NiceMock remb_sender; - constexpr bool remb_candidate = true; - packet_router.AddSendRtpModule(&remb_sender, remb_candidate); - - // Set cap. - EXPECT_CALL(remb_sender, SetRemb(100000, _)).Times(1); - packet_router.SetMaxDesiredReceiveBitrate(100000); - // Increase cap. - EXPECT_CALL(remb_sender, SetRemb(200000, _)).Times(1); - packet_router.SetMaxDesiredReceiveBitrate(200000); - // Decrease cap. - EXPECT_CALL(remb_sender, SetRemb(150000, _)).Times(1); - packet_router.SetMaxDesiredReceiveBitrate(150000); - - // Test tear-down. - packet_router.RemoveSendRtpModule(&remb_sender); -} - // Only register receiving modules and make sure we fallback to trigger a REMB // packet on this one. TEST(PacketRouterRembTest, NoSendingRtpModule) { rtc::ScopedFakeClock clock; - NiceMock rtp; + NiceMock rtp; PacketRouter packet_router; packet_router.AddReceiveRtpModule(&rtp, true); @@ -727,25 +471,21 @@ TEST(PacketRouterRembTest, NoSendingRtpModule) { uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); - - // Call OnReceiveBitrateChanged twice to get a first estimate. - clock.AdvanceTime(TimeDelta::Millis(1000)); - EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Lower the estimate to trigger a new packet REMB packet. - EXPECT_CALL(rtp, SetRemb(bitrate_estimate - 100, ssrcs)).Times(1); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate - 100); + EXPECT_CALL(rtp, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); - EXPECT_CALL(rtp, UnsetRemb()).Times(1); + EXPECT_CALL(rtp, UnsetRemb()); packet_router.RemoveReceiveRtpModule(&rtp); } TEST(PacketRouterRembTest, NonCandidateSendRtpModuleNotUsedForRemb) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock module; + NiceMock module; constexpr bool remb_candidate = false; @@ -754,8 +494,7 @@ TEST(PacketRouterRembTest, NonCandidateSendRtpModuleNotUsedForRemb) { constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; EXPECT_CALL(module, SetRemb(_, _)).Times(0); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveSendRtpModule(&module); @@ -764,7 +503,7 @@ TEST(PacketRouterRembTest, NonCandidateSendRtpModuleNotUsedForRemb) { TEST(PacketRouterRembTest, CandidateSendRtpModuleUsedForRemb) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock module; + NiceMock module; constexpr bool remb_candidate = true; @@ -772,9 +511,8 @@ TEST(PacketRouterRembTest, CandidateSendRtpModuleUsedForRemb) { constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; - EXPECT_CALL(module, SetRemb(bitrate_estimate, ssrcs)).Times(1); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + EXPECT_CALL(module, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveSendRtpModule(&module); @@ -783,7 +521,7 @@ TEST(PacketRouterRembTest, CandidateSendRtpModuleUsedForRemb) { TEST(PacketRouterRembTest, NonCandidateReceiveRtpModuleNotUsedForRemb) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock module; + NiceMock module; constexpr bool remb_candidate = false; @@ -792,8 +530,7 @@ TEST(PacketRouterRembTest, NonCandidateReceiveRtpModuleNotUsedForRemb) { constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; EXPECT_CALL(module, SetRemb(_, _)).Times(0); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveReceiveRtpModule(&module); @@ -802,7 +539,7 @@ TEST(PacketRouterRembTest, NonCandidateReceiveRtpModuleNotUsedForRemb) { TEST(PacketRouterRembTest, CandidateReceiveRtpModuleUsedForRemb) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock module; + NiceMock module; constexpr bool remb_candidate = true; @@ -810,9 +547,8 @@ TEST(PacketRouterRembTest, CandidateReceiveRtpModuleUsedForRemb) { constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; - EXPECT_CALL(module, SetRemb(bitrate_estimate, ssrcs)).Times(1); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + EXPECT_CALL(module, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveReceiveRtpModule(&module); @@ -822,8 +558,8 @@ TEST(PacketRouterRembTest, SendCandidatePreferredOverReceiveCandidate_SendModuleAddedFirst) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock send_module; - NiceMock receive_module; + NiceMock send_module; + NiceMock receive_module; constexpr bool remb_candidate = true; @@ -835,11 +571,10 @@ TEST(PacketRouterRembTest, constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; - EXPECT_CALL(send_module, SetRemb(bitrate_estimate, ssrcs)).Times(1); + EXPECT_CALL(send_module, SetRemb(bitrate_estimate, ssrcs)); EXPECT_CALL(receive_module, SetRemb(_, _)).Times(0); - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveReceiveRtpModule(&receive_module); @@ -850,8 +585,8 @@ TEST(PacketRouterRembTest, SendCandidatePreferredOverReceiveCandidate_ReceiveModuleAddedFirst) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock send_module; - NiceMock receive_module; + NiceMock send_module; + NiceMock receive_module; constexpr bool remb_candidate = true; @@ -863,11 +598,11 @@ TEST(PacketRouterRembTest, constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; - EXPECT_CALL(send_module, SetRemb(bitrate_estimate, ssrcs)).Times(1); + EXPECT_CALL(send_module, SetRemb(bitrate_estimate, ssrcs)); EXPECT_CALL(receive_module, SetRemb(_, _)).Times(0); clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveReceiveRtpModule(&receive_module); @@ -877,8 +612,8 @@ TEST(PacketRouterRembTest, TEST(PacketRouterRembTest, ReceiveModuleTakesOverWhenLastSendModuleRemoved) { rtc::ScopedFakeClock clock; PacketRouter packet_router; - NiceMock send_module; - NiceMock receive_module; + NiceMock send_module; + NiceMock receive_module; constexpr bool remb_candidate = true; @@ -891,10 +626,8 @@ TEST(PacketRouterRembTest, ReceiveModuleTakesOverWhenLastSendModuleRemoved) { constexpr uint32_t bitrate_estimate = 456; const std::vector ssrcs = {1234}; EXPECT_CALL(send_module, SetRemb(_, _)).Times(0); - EXPECT_CALL(receive_module, SetRemb(bitrate_estimate, ssrcs)).Times(1); - - clock.AdvanceTime(TimeDelta::Millis(1000)); - packet_router.OnReceiveBitrateChanged(ssrcs, bitrate_estimate); + EXPECT_CALL(receive_module, SetRemb(bitrate_estimate, ssrcs)); + packet_router.SendRemb(bitrate_estimate, ssrcs); // Test tear-down packet_router.RemoveReceiveRtpModule(&receive_module); diff --git a/modules/pacing/round_robin_packet_queue.h b/modules/pacing/round_robin_packet_queue.h index 9446a8e174..cad555a1af 100644 --- a/modules/pacing/round_robin_packet_queue.h +++ b/modules/pacing/round_robin_packet_queue.h @@ -19,6 +19,7 @@ #include #include #include +#include #include "absl/types/optional.h" #include "api/transport/webrtc_key_value_config.h" @@ -163,7 +164,7 @@ class RoundRobinPacketQueue { std::multimap stream_priorities_; // A map of SSRCs to Streams. - std::map streams_; + std::unordered_map streams_; // The enqueue time of every packet currently in the queue. Used to figure out // the age of the oldest packet in the queue. diff --git a/modules/pacing/task_queue_paced_sender.cc b/modules/pacing/task_queue_paced_sender.cc index d460d60048..709718ff16 100644 --- a/modules/pacing/task_queue_paced_sender.cc +++ b/modules/pacing/task_queue_paced_sender.cc @@ -17,6 +17,7 @@ #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/trace_event.h" namespace webrtc { namespace { @@ -31,16 +32,15 @@ constexpr TimeDelta kMinTimeBetweenStatsUpdates = TimeDelta::Millis(1); TaskQueuePacedSender::TaskQueuePacedSender( Clock* clock, - PacketRouter* packet_router, + PacingController::PacketSender* packet_sender, RtcEventLog* event_log, const WebRtcKeyValueConfig* field_trials, TaskQueueFactory* task_queue_factory, TimeDelta hold_back_window) : clock_(clock), hold_back_window_(hold_back_window), - packet_router_(packet_router), pacing_controller_(clock, - static_cast(this), + packet_sender, event_log, field_trials, PacingController::ProcessMode::kDynamic), @@ -62,6 +62,14 @@ TaskQueuePacedSender::~TaskQueuePacedSender() { }); } +void TaskQueuePacedSender::EnsureStarted() { + task_queue_.PostTask([this]() { + RTC_DCHECK_RUN_ON(&task_queue_); + is_started_ = true; + MaybeProcessPackets(Timestamp::MinusInfinity()); + }); +} + void TaskQueuePacedSender::CreateProbeCluster(DataRate bitrate, int cluster_id) { task_queue_.PostTask([this, bitrate, cluster_id]() { @@ -122,9 +130,21 @@ void TaskQueuePacedSender::SetPacingRates(DataRate pacing_rate, void TaskQueuePacedSender::EnqueuePackets( std::vector> packets) { +#if RTC_TRACE_EVENTS_ENABLED + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("webrtc"), + "TaskQueuePacedSender::EnqueuePackets"); + for (auto& packet : packets) { + TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("webrtc"), + "TaskQueuePacedSender::EnqueuePackets::Loop", + "sequence_number", packet->SequenceNumber(), "rtp_timestamp", + packet->Timestamp()); + } +#endif + task_queue_.PostTask([this, packets_ = std::move(packets)]() mutable { RTC_DCHECK_RUN_ON(&task_queue_); for (auto& packet : packets_) { + RTC_DCHECK_GE(packet->capture_time_ms(), 0); pacing_controller_.EnqueuePacket(std::move(packet)); } MaybeProcessPackets(Timestamp::MinusInfinity()); @@ -176,11 +196,16 @@ TimeDelta TaskQueuePacedSender::OldestPacketWaitTime() const { return GetStats().oldest_packet_wait_time; } +void TaskQueuePacedSender::OnStatsUpdated(const Stats& stats) { + MutexLock lock(&stats_mutex_); + current_stats_ = stats; +} + void TaskQueuePacedSender::MaybeProcessPackets( Timestamp scheduled_process_time) { RTC_DCHECK_RUN_ON(&task_queue_); - if (is_shutdown_) { + if (is_shutdown_ || !is_started_) { return; } @@ -202,78 +227,102 @@ void TaskQueuePacedSender::MaybeProcessPackets( next_process_time = pacing_controller_.NextSendTime(); } - const TimeDelta min_sleep = pacing_controller_.IsProbing() - ? PacingController::kMinSleepTime - : hold_back_window_; - next_process_time = std::max(now + min_sleep, next_process_time); + absl::optional time_to_next_process; + if (pacing_controller_.IsProbing() && + next_process_time != next_process_time_) { + // If we're probing and there isn't already a wakeup scheduled for the next + // process time, always post a task and just round sleep time down to + // nearest millisecond. + if (next_process_time.IsMinusInfinity()) { + time_to_next_process = TimeDelta::Zero(); + } else { + time_to_next_process = + std::max(TimeDelta::Zero(), + (next_process_time - now).RoundDownTo(TimeDelta::Millis(1))); + } + } else if (next_process_time_.IsMinusInfinity() || + next_process_time <= next_process_time_ - hold_back_window_) { + // Schedule a new task since there is none currently scheduled + // (|next_process_time_| is infinite), or the new process time is at least + // one holdback window earlier than whatever is currently scheduled. + time_to_next_process = std::max(next_process_time - now, hold_back_window_); + } - TimeDelta sleep_time = next_process_time - now; - if (next_process_time_.IsMinusInfinity() || - next_process_time <= - next_process_time_ - PacingController::kMinSleepTime) { + if (time_to_next_process) { + // Set a new scheduled process time and post a delayed task. next_process_time_ = next_process_time; task_queue_.PostDelayedTask( [this, next_process_time]() { MaybeProcessPackets(next_process_time); }, - sleep_time.ms()); + time_to_next_process->ms()); } MaybeUpdateStats(false); } -std::vector> -TaskQueuePacedSender::GeneratePadding(DataSize size) { - return packet_router_->GeneratePadding(size.bytes()); -} - -void TaskQueuePacedSender::SendRtpPacket( - std::unique_ptr packet, - const PacedPacketInfo& cluster_info) { - packet_router_->SendPacket(std::move(packet), cluster_info); -} - void TaskQueuePacedSender::MaybeUpdateStats(bool is_scheduled_call) { if (is_shutdown_) { + if (is_scheduled_call) { + stats_update_scheduled_ = false; + } return; } Timestamp now = clock_->CurrentTime(); - if (!is_scheduled_call && - now - last_stats_time_ < kMinTimeBetweenStatsUpdates) { - // Too frequent unscheduled stats update, return early. - return; + if (is_scheduled_call) { + // Allow scheduled task to process packets to clear up an remaining debt + // level in an otherwise empty queue. + pacing_controller_.ProcessPackets(); + } else { + if (now - last_stats_time_ < kMinTimeBetweenStatsUpdates) { + // Too frequent unscheduled stats update, return early. + return; + } } - rtc::CritScope cs(&stats_crit_); - current_stats_.expected_queue_time = pacing_controller_.ExpectedQueueTime(); - current_stats_.first_sent_packet_time = - pacing_controller_.FirstSentPacketTime(); - current_stats_.oldest_packet_wait_time = - pacing_controller_.OldestPacketWaitTime(); - current_stats_.queue_size = pacing_controller_.QueueSizeData(); + Stats new_stats; + new_stats.expected_queue_time = pacing_controller_.ExpectedQueueTime(); + new_stats.first_sent_packet_time = pacing_controller_.FirstSentPacketTime(); + new_stats.oldest_packet_wait_time = pacing_controller_.OldestPacketWaitTime(); + new_stats.queue_size = pacing_controller_.QueueSizeData(); + OnStatsUpdated(new_stats); + last_stats_time_ = now; bool pacer_drained = pacing_controller_.QueueSizePackets() == 0 && pacing_controller_.CurrentBufferLevel().IsZero(); // If there's anything interesting to get from the pacer and this is a - // scheduled call (no scheduled call in flight), post a new scheduled stats + // scheduled call (or no scheduled call in flight), post a new scheduled stats // update. - if (!pacer_drained && (is_scheduled_call || !stats_update_scheduled_)) { - task_queue_.PostDelayedTask( - [this]() { - RTC_DCHECK_RUN_ON(&task_queue_); - MaybeUpdateStats(true); - }, - kMaxTimeBetweenStatsUpdates.ms()); - stats_update_scheduled_ = true; - } else { + if (!pacer_drained) { + if (!stats_update_scheduled_) { + // There is no pending delayed task to update stats, add one. + // Treat this call as being scheduled in order to bootstrap scheduling + // loop. + stats_update_scheduled_ = true; + is_scheduled_call = true; + } + + // Only if on the scheduled call loop do we want to schedule a new delayed + // task. + if (is_scheduled_call) { + task_queue_.PostDelayedTask( + [this]() { + RTC_DCHECK_RUN_ON(&task_queue_); + MaybeUpdateStats(true); + }, + kMaxTimeBetweenStatsUpdates.ms()); + } + } else if (is_scheduled_call) { + // This is a scheduled call, signing out since there's nothing interesting + // left to check. stats_update_scheduled_ = false; } } TaskQueuePacedSender::Stats TaskQueuePacedSender::GetStats() const { - rtc::CritScope cs(&stats_crit_); + MutexLock lock(&stats_mutex_); return current_stats_; } diff --git a/modules/pacing/task_queue_paced_sender.h b/modules/pacing/task_queue_paced_sender.h index 3241d3fb63..0673441e52 100644 --- a/modules/pacing/task_queue_paced_sender.h +++ b/modules/pacing/task_queue_paced_sender.h @@ -20,17 +20,16 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" #include "api/units/data_size.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" #include "modules/include/module.h" #include "modules/pacing/pacing_controller.h" -#include "modules/pacing/packet_router.h" #include "modules/pacing/rtp_packet_pacer.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" #include "rtc_base/thread_annotations.h" @@ -38,9 +37,7 @@ namespace webrtc { class Clock; class RtcEventLog; -class TaskQueuePacedSender : public RtpPacketPacer, - public RtpPacketSender, - private PacingController::PacketSender { +class TaskQueuePacedSender : public RtpPacketPacer, public RtpPacketSender { public: // The |hold_back_window| parameter sets a lower bound on time to sleep if // there is currently a pacer queue and packets can't immediately be @@ -49,7 +46,7 @@ class TaskQueuePacedSender : public RtpPacketPacer, // TODO(bugs.webrtc.org/10809): Remove default value for hold_back_window. TaskQueuePacedSender( Clock* clock, - PacketRouter* packet_router, + PacingController::PacketSender* packet_sender, RtcEventLog* event_log, const WebRtcKeyValueConfig* field_trials, TaskQueueFactory* task_queue_factory, @@ -57,10 +54,13 @@ class TaskQueuePacedSender : public RtpPacketPacer, ~TaskQueuePacedSender() override; + // Ensure that necessary delayed tasks are scheduled. + void EnsureStarted(); + // Methods implementing RtpPacketSender. - // Adds the packet to the queue and calls PacketRouter::SendPacket() when - // it's time to send. + // Adds the packet to the queue and calls + // PacingController::PacketSender::SendPacket() when it's time to send. void EnqueuePackets( std::vector> packets) override; @@ -106,7 +106,8 @@ class TaskQueuePacedSender : public RtpPacketPacer, // specified by SetPacingRates() if needed to achieve this goal. void SetQueueTimeLimit(TimeDelta limit) override; - private: + protected: + // Exposed as protected for test. struct Stats { Stats() : oldest_packet_wait_time(TimeDelta::Zero()), @@ -117,7 +118,9 @@ class TaskQueuePacedSender : public RtpPacketPacer, TimeDelta expected_queue_time; absl::optional first_sent_packet_time; }; + virtual void OnStatsUpdated(const Stats& stats); + private: // Check if it is time to send packets, or schedule a delayed task if not. // Use Timestamp::MinusInfinity() to indicate that this call has _not_ // been scheduled by the pacing controller. If this is the case, check if @@ -125,21 +128,11 @@ class TaskQueuePacedSender : public RtpPacketPacer, // method again with desired (finite) scheduled process time. void MaybeProcessPackets(Timestamp scheduled_process_time); - // Methods implementing PacedSenderController:PacketSender. - - void SendRtpPacket(std::unique_ptr packet, - const PacedPacketInfo& cluster_info) override - RTC_RUN_ON(task_queue_); - - std::vector> GeneratePadding( - DataSize size) override RTC_RUN_ON(task_queue_); - void MaybeUpdateStats(bool is_scheduled_call) RTC_RUN_ON(task_queue_); Stats GetStats() const; Clock* const clock_; const TimeDelta hold_back_window_; - PacketRouter* const packet_router_ RTC_GUARDED_BY(task_queue_); PacingController pacing_controller_ RTC_GUARDED_BY(task_queue_); // We want only one (valid) delayed process task in flight at a time. @@ -159,13 +152,17 @@ class TaskQueuePacedSender : public RtpPacketPacer, // Last time stats were updated. Timestamp last_stats_time_ RTC_GUARDED_BY(task_queue_); + // Indicates if this task queue is started. If not, don't allow + // posting delayed tasks yet. + bool is_started_ RTC_GUARDED_BY(task_queue_) = false; + // Indicates if this task queue is shutting down. If so, don't allow // posting any more delayed tasks as that can cause the task queue to // never drain. bool is_shutdown_ RTC_GUARDED_BY(task_queue_); - rtc::CriticalSection stats_crit_; - Stats current_stats_ RTC_GUARDED_BY(stats_crit_); + mutable Mutex stats_mutex_; + Stats current_stats_ RTC_GUARDED_BY(stats_mutex_); rtc::TaskQueue task_queue_; }; diff --git a/modules/pacing/task_queue_paced_sender_unittest.cc b/modules/pacing/task_queue_paced_sender_unittest.cc index e93f776f38..3806ec28d2 100644 --- a/modules/pacing/task_queue_paced_sender_unittest.cc +++ b/modules/pacing/task_queue_paced_sender_unittest.cc @@ -10,12 +10,14 @@ #include "modules/pacing/task_queue_paced_sender.h" +#include #include #include #include #include #include +#include "api/transport/network_types.h" #include "modules/pacing/packet_router.h" #include "modules/utility/include/mock/mock_process_thread.h" #include "test/field_trial.h" @@ -38,13 +40,72 @@ constexpr size_t kDefaultPacketSize = 1234; class MockPacketRouter : public PacketRouter { public: - MOCK_METHOD2(SendPacket, - void(std::unique_ptr packet, - const PacedPacketInfo& cluster_info)); - MOCK_METHOD1( - GeneratePadding, - std::vector>(size_t target_size_bytes)); + MOCK_METHOD(void, + SendPacket, + (std::unique_ptr packet, + const PacedPacketInfo& cluster_info), + (override)); + MOCK_METHOD(std::vector>, + FetchFec, + (), + (override)); + MOCK_METHOD(std::vector>, + GeneratePadding, + (DataSize target_size), + (override)); }; + +class StatsUpdateObserver { + public: + StatsUpdateObserver() = default; + virtual ~StatsUpdateObserver() = default; + + virtual void OnStatsUpdated() = 0; +}; + +class TaskQueuePacedSenderForTest : public TaskQueuePacedSender { + public: + TaskQueuePacedSenderForTest(Clock* clock, + PacketRouter* packet_router, + RtcEventLog* event_log, + const WebRtcKeyValueConfig* field_trials, + TaskQueueFactory* task_queue_factory, + TimeDelta hold_back_window) + : TaskQueuePacedSender(clock, + packet_router, + event_log, + field_trials, + task_queue_factory, + hold_back_window) {} + + void OnStatsUpdated(const Stats& stats) override { + ++num_stats_updates_; + TaskQueuePacedSender::OnStatsUpdated(stats); + } + + size_t num_stats_updates_ = 0; +}; + +std::vector> GeneratePadding( + DataSize target_size) { + // 224 bytes is the max padding size for plain padding packets generated by + // RTPSender::GeneratePadding(). + const DataSize kMaxPaddingPacketSize = DataSize::Bytes(224); + DataSize padding_generated = DataSize::Zero(); + std::vector> padding_packets; + while (padding_generated < target_size) { + DataSize packet_size = + std::min(target_size - padding_generated, kMaxPaddingPacketSize); + padding_generated += packet_size; + auto padding_packet = + std::make_unique(/*extensions=*/nullptr); + padding_packet->set_packet_type(RtpPacketMediaType::kPadding); + padding_packet->SetPadding(packet_size.bytes()); + padding_packets.push_back(std::move(padding_packet)); + } + return padding_packets; +} + } // namespace namespace test { @@ -85,17 +146,18 @@ namespace test { TEST(TaskQueuePacedSenderTest, PacesPackets) { GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); MockPacketRouter packet_router; - TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, - /*event_log=*/nullptr, - /*field_trials=*/nullptr, - time_controller.GetTaskQueueFactory(), - PacingController::kMinSleepTime); + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + PacingController::kMinSleepTime); // Insert a number of packets, covering one second. static constexpr size_t kPacketsToSend = 42; pacer.SetPacingRates( DataRate::BitsPerSec(kDefaultPacketSize * 8 * kPacketsToSend), DataRate::Zero()); + pacer.EnsureStarted(); pacer.EnqueuePackets( GeneratePackets(RtpPacketMediaType::kVideo, kPacketsToSend)); @@ -124,17 +186,18 @@ namespace test { TEST(TaskQueuePacedSenderTest, ReschedulesProcessOnRateChange) { GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); MockPacketRouter packet_router; - TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, - /*event_log=*/nullptr, - /*field_trials=*/nullptr, - time_controller.GetTaskQueueFactory(), - PacingController::kMinSleepTime); + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + PacingController::kMinSleepTime); // Insert a number of packets to be sent 200ms apart. const size_t kPacketsPerSecond = 5; const DataRate kPacingRate = DataRate::BitsPerSec(kDefaultPacketSize * 8 * kPacketsPerSecond); pacer.SetPacingRates(kPacingRate, DataRate::Zero()); + pacer.EnsureStarted(); // Send some initial packets to be rid of any probes. EXPECT_CALL(packet_router, SendPacket).Times(kPacketsPerSecond); @@ -175,17 +238,18 @@ namespace test { TEST(TaskQueuePacedSenderTest, SendsAudioImmediately) { GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); MockPacketRouter packet_router; - TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, - /*event_log=*/nullptr, - /*field_trials=*/nullptr, - time_controller.GetTaskQueueFactory(), - PacingController::kMinSleepTime); + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + PacingController::kMinSleepTime); const DataRate kPacingDataRate = DataRate::KilobitsPerSec(125); const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize); const TimeDelta kPacketPacingTime = kPacketSize / kPacingDataRate; pacer.SetPacingRates(kPacingDataRate, DataRate::Zero()); + pacer.EnsureStarted(); // Add some initial video packets, only one should be sent. EXPECT_CALL(packet_router, SendPacket); @@ -207,11 +271,11 @@ namespace test { const TimeDelta kCoalescingWindow = TimeDelta::Millis(5); GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); MockPacketRouter packet_router; - TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, - /*event_log=*/nullptr, - /*field_trials=*/nullptr, - time_controller.GetTaskQueueFactory(), - kCoalescingWindow); + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + kCoalescingWindow); // Set rates so one packet adds one ms of buffer level. const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize); @@ -219,6 +283,7 @@ namespace test { const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime; pacer.SetPacingRates(kPacingDataRate, DataRate::Zero()); + pacer.EnsureStarted(); // Add 10 packets. The first should be sent immediately since the buffers // are clear. @@ -243,11 +308,11 @@ namespace test { const TimeDelta kCoalescingWindow = TimeDelta::Millis(5); GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); MockPacketRouter packet_router; - TaskQueuePacedSender pacer(time_controller.GetClock(), &packet_router, - /*event_log=*/nullptr, - /*field_trials=*/nullptr, - time_controller.GetTaskQueueFactory(), - kCoalescingWindow); + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + kCoalescingWindow); // Set rates so one packet adds one ms of buffer level. const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize); @@ -255,6 +320,7 @@ namespace test { const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime; pacer.SetPacingRates(kPacingDataRate, DataRate::Zero()); + pacer.EnsureStarted(); // Add 10 packets. The first should be sent immediately since the buffers // are clear. This will also trigger the probe to start. @@ -270,5 +336,258 @@ namespace test { time_controller.AdvanceTime(kCoalescingWindow - TimeDelta::Millis(1)); } + TEST(TaskQueuePacedSenderTest, RespectedMinTimeBetweenStatsUpdates) { + const TimeDelta kCoalescingWindow = TimeDelta::Millis(5); + GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); + MockPacketRouter packet_router; + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + kCoalescingWindow); + const DataRate kPacingDataRate = DataRate::KilobitsPerSec(300); + pacer.SetPacingRates(kPacingDataRate, DataRate::Zero()); + pacer.EnsureStarted(); + + const TimeDelta kMinTimeBetweenStatsUpdates = TimeDelta::Millis(1); + + // Nothing inserted, no stats updates yet. + EXPECT_EQ(pacer.num_stats_updates_, 0u); + + // Insert one packet, stats should be updated. + pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1)); + time_controller.AdvanceTime(TimeDelta::Zero()); + EXPECT_EQ(pacer.num_stats_updates_, 1u); + + // Advance time half of the min stats update interval, and trigger a + // refresh - stats should not be updated yet. + time_controller.AdvanceTime(kMinTimeBetweenStatsUpdates / 2); + pacer.EnqueuePackets({}); + time_controller.AdvanceTime(TimeDelta::Zero()); + EXPECT_EQ(pacer.num_stats_updates_, 1u); + + // Advance time the next half, now stats update is triggered. + time_controller.AdvanceTime(kMinTimeBetweenStatsUpdates / 2); + pacer.EnqueuePackets({}); + time_controller.AdvanceTime(TimeDelta::Zero()); + EXPECT_EQ(pacer.num_stats_updates_, 2u); + } + + TEST(TaskQueuePacedSenderTest, ThrottlesStatsUpdates) { + const TimeDelta kCoalescingWindow = TimeDelta::Millis(5); + GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); + MockPacketRouter packet_router; + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + kCoalescingWindow); + + // Set rates so one packet adds 10ms of buffer level. + const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize); + const TimeDelta kPacketPacingTime = TimeDelta::Millis(10); + const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime; + const TimeDelta kMinTimeBetweenStatsUpdates = TimeDelta::Millis(1); + const TimeDelta kMaxTimeBetweenStatsUpdates = TimeDelta::Millis(33); + + // Nothing inserted, no stats updates yet. + size_t num_expected_stats_updates = 0; + EXPECT_EQ(pacer.num_stats_updates_, num_expected_stats_updates); + pacer.SetPacingRates(kPacingDataRate, DataRate::Zero()); + pacer.EnsureStarted(); + time_controller.AdvanceTime(kMinTimeBetweenStatsUpdates); + // Updating pacing rates refreshes stats. + EXPECT_EQ(pacer.num_stats_updates_, ++num_expected_stats_updates); + + // Record time when we insert first packet, this triggers the scheduled + // stats updating. + Clock* const clock = time_controller.GetClock(); + const Timestamp start_time = clock->CurrentTime(); + + while (clock->CurrentTime() - start_time <= + kMaxTimeBetweenStatsUpdates - kPacketPacingTime) { + // Enqueue packet, expect stats update. + pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1)); + time_controller.AdvanceTime(TimeDelta::Zero()); + EXPECT_EQ(pacer.num_stats_updates_, ++num_expected_stats_updates); + + // Advance time to halfway through pacing time, expect another stats + // update. + time_controller.AdvanceTime(kPacketPacingTime / 2); + pacer.EnqueuePackets({}); + time_controller.AdvanceTime(TimeDelta::Zero()); + EXPECT_EQ(pacer.num_stats_updates_, ++num_expected_stats_updates); + + // Advance time the rest of the way. + time_controller.AdvanceTime(kPacketPacingTime / 2); + } + + // At this point, the pace queue is drained so there is no more intersting + // update to be made - but there is still as schduled task that should run + // |kMaxTimeBetweenStatsUpdates| after the first update. + time_controller.AdvanceTime(start_time + kMaxTimeBetweenStatsUpdates - + clock->CurrentTime()); + EXPECT_EQ(pacer.num_stats_updates_, ++num_expected_stats_updates); + + // Advance time a significant time - don't expect any more calls as stats + // updating does not happen when queue is drained. + time_controller.AdvanceTime(TimeDelta::Millis(400)); + EXPECT_EQ(pacer.num_stats_updates_, num_expected_stats_updates); + } + + TEST(TaskQueuePacedSenderTest, SchedulesProbeAtSetTime) { + ScopedFieldTrials trials("WebRTC-Bwe-ProbingBehavior/min_probe_delta:1ms/"); + GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); + MockPacketRouter packet_router; + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + PacingController::kMinSleepTime); + + // Set rates so one packet adds 4ms of buffer level. + const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize); + const TimeDelta kPacketPacingTime = TimeDelta::Millis(4); + const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime; + pacer.SetPacingRates(kPacingDataRate, /*padding_rate=*/DataRate::Zero()); + pacer.EnsureStarted(); + EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() { + return std::vector>(); + }); + EXPECT_CALL(packet_router, GeneratePadding(_)) + .WillRepeatedly( + [](DataSize target_size) { return GeneratePadding(target_size); }); + + // Enqueue two packets, only the first is sent immediately and the next + // will be scheduled for sending in 4ms. + pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 2)); + const int kNotAProbe = PacedPacketInfo::kNotAProbe; + EXPECT_CALL( + packet_router, + SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id, + kNotAProbe))); + // Advance to less than 3ms before next packet send time. + time_controller.AdvanceTime(TimeDelta::Micros(1001)); + + // Trigger a probe at 4x the current pacing rate and insert the number of + // packets the probe needs. + const DataRate kProbeRate = 2 * kPacingDataRate; + const int kProbeClusterId = 1; + pacer.CreateProbeCluster(kProbeRate, kProbeClusterId); + + // Expected size for each probe in a cluster is twice the expected bits + // sent during min_probe_delta. + // Expect one additional call since probe always starts with a small + const TimeDelta kProbeTimeDelta = TimeDelta::Millis(2); + const DataSize kProbeSize = kProbeRate * kProbeTimeDelta; + const size_t kNumPacketsInProbe = + (kProbeSize + kPacketSize - DataSize::Bytes(1)) / kPacketSize; + EXPECT_CALL( + packet_router, + SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id, + kProbeClusterId))) + .Times(kNumPacketsInProbe + 1); + + pacer.EnqueuePackets( + GeneratePackets(RtpPacketMediaType::kVideo, kNumPacketsInProbe)); + time_controller.AdvanceTime(TimeDelta::Zero()); + + // The pacer should have scheduled the next probe to be sent in + // kProbeTimeDelta. That there was existing scheduled call less than + // PacingController::kMinSleepTime before this should not matter. + + EXPECT_CALL( + packet_router, + SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id, + kProbeClusterId))) + .Times(AtLeast(1)); + time_controller.AdvanceTime(TimeDelta::Millis(2)); + } + + TEST(TaskQueuePacedSenderTest, NoMinSleepTimeWhenProbing) { + // Set min_probe_delta to be less than kMinSleepTime (1ms). + const TimeDelta kMinProbeDelta = TimeDelta::Micros(100); + ScopedFieldTrials trials( + "WebRTC-Bwe-ProbingBehavior/min_probe_delta:100us/"); + GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); + MockPacketRouter packet_router; + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + PacingController::kMinSleepTime); + + // Set rates so one packet adds 4ms of buffer level. + const DataSize kPacketSize = DataSize::Bytes(kDefaultPacketSize); + const TimeDelta kPacketPacingTime = TimeDelta::Millis(4); + const DataRate kPacingDataRate = kPacketSize / kPacketPacingTime; + pacer.SetPacingRates(kPacingDataRate, /*padding_rate=*/DataRate::Zero()); + pacer.EnsureStarted(); + EXPECT_CALL(packet_router, FetchFec).WillRepeatedly([]() { + return std::vector>(); + }); + EXPECT_CALL(packet_router, GeneratePadding) + .WillRepeatedly( + [](DataSize target_size) { return GeneratePadding(target_size); }); + + // Set a high probe rate. + const int kProbeClusterId = 1; + DataRate kProbingRate = kPacingDataRate * 10; + pacer.CreateProbeCluster(kProbingRate, kProbeClusterId); + + // Advance time less than PacingController::kMinSleepTime, probing packets + // for the first millisecond should be sent immediately. Min delta between + // probes is 2x 100us, meaning 4 times per ms we will get least one call to + // SendPacket(). + DataSize data_sent = DataSize::Zero(); + EXPECT_CALL( + packet_router, + SendPacket(_, ::testing::Field(&PacedPacketInfo::probe_cluster_id, + kProbeClusterId))) + .Times(AtLeast(4)) + .WillRepeatedly([&](std::unique_ptr packet, + const PacedPacketInfo&) { + data_sent += + DataSize::Bytes(packet->payload_size() + packet->padding_size()); + }); + + // Add one packet to kickstart probing, the rest will be padding packets. + pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1)); + time_controller.AdvanceTime(kMinProbeDelta); + + // Verify the amount of probing data sent. + // Probe always starts with a small (1 byte) padding packet that's not + // counted into the probe rate here. + EXPECT_EQ(data_sent, + kProbingRate * TimeDelta::Millis(1) + DataSize::Bytes(1)); + } + + TEST(TaskQueuePacedSenderTest, NoStatsUpdatesBeforeStart) { + const TimeDelta kCoalescingWindow = TimeDelta::Millis(5); + GlobalSimulatedTimeController time_controller(Timestamp::Millis(1234)); + MockPacketRouter packet_router; + TaskQueuePacedSenderForTest pacer( + time_controller.GetClock(), &packet_router, + /*event_log=*/nullptr, + /*field_trials=*/nullptr, time_controller.GetTaskQueueFactory(), + kCoalescingWindow); + const DataRate kPacingDataRate = DataRate::KilobitsPerSec(300); + pacer.SetPacingRates(kPacingDataRate, DataRate::Zero()); + + const TimeDelta kMinTimeBetweenStatsUpdates = TimeDelta::Millis(1); + + // Nothing inserted, no stats updates yet. + EXPECT_EQ(pacer.num_stats_updates_, 0u); + + // Insert one packet, stats should not be updated. + pacer.EnqueuePackets(GeneratePackets(RtpPacketMediaType::kVideo, 1)); + time_controller.AdvanceTime(TimeDelta::Zero()); + EXPECT_EQ(pacer.num_stats_updates_, 0u); + + // Advance time of the min stats update interval, and trigger a + // refresh - stats should not be updated still. + time_controller.AdvanceTime(kMinTimeBetweenStatsUpdates); + EXPECT_EQ(pacer.num_stats_updates_, 0u); + } } // namespace test } // namespace webrtc diff --git a/modules/remote_bitrate_estimator/BUILD.gn b/modules/remote_bitrate_estimator/BUILD.gn index d7b0397ea5..923f00a74c 100644 --- a/modules/remote_bitrate_estimator/BUILD.gn +++ b/modules/remote_bitrate_estimator/BUILD.gn @@ -9,7 +9,6 @@ import("../../webrtc.gni") rtc_library("remote_bitrate_estimator") { - visibility = [ "*" ] sources = [ "aimd_rate_control.cc", "aimd_rate_control.h", @@ -22,6 +21,8 @@ rtc_library("remote_bitrate_estimator") { "overuse_detector.h", "overuse_estimator.cc", "overuse_estimator.h", + "packet_arrival_map.cc", + "packet_arrival_map.h", "remote_bitrate_estimator_abs_send_time.cc", "remote_bitrate_estimator_abs_send_time.h", "remote_bitrate_estimator_single_stream.cc", @@ -46,6 +47,8 @@ rtc_library("remote_bitrate_estimator") { "../../api/transport:network_control", "../../api/transport:webrtc_key_value_config", "../../api/units:data_rate", + "../../api/units:data_size", + "../../api/units:time_delta", "../../api/units:timestamp", "../../modules:module_api", "../../modules:module_api_public", @@ -56,9 +59,12 @@ rtc_library("remote_bitrate_estimator") { "../../rtc_base:rtc_numerics", "../../rtc_base:safe_minmax", "../../rtc_base/experiments:field_trial_parser", + "../../rtc_base/synchronization:mutex", "../../system_wrappers", "../../system_wrappers:field_trial", "../../system_wrappers:metrics", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -72,10 +78,11 @@ if (!build_with_chromium) { "tools/bwe_rtp.h", ] deps = [ - ":remote_bitrate_estimator", "../../rtc_base:rtc_base_approved", "../../test:rtp_test_utils", - "../rtp_rtcp", + "../rtp_rtcp:rtp_rtcp_format", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:parse", ] @@ -86,10 +93,10 @@ if (!build_with_chromium) { sources = [ "tools/rtp_to_text.cc" ] deps = [ ":bwe_rtp", - "../../modules/rtp_rtcp", "../../rtc_base:macromagic", "../../rtc_base:stringutils", "../../test:rtp_test_utils", + "../rtp_rtcp:rtp_rtcp_format", ] } } @@ -102,6 +109,7 @@ if (rtc_include_tests) { "aimd_rate_control_unittest.cc", "inter_arrival_unittest.cc", "overuse_detector_unittest.cc", + "packet_arrival_map_test.cc", "remote_bitrate_estimator_abs_send_time_unittest.cc", "remote_bitrate_estimator_single_stream_unittest.cc", "remote_bitrate_estimator_unittest_helper.cc", @@ -111,7 +119,6 @@ if (rtc_include_tests) { deps = [ ":remote_bitrate_estimator", "..:module_api_public", - "../..:webrtc_common", "../../api/transport:field_trial_based_config", "../../api/transport:mock_network_control", "../../api/transport:network_control", diff --git a/modules/remote_bitrate_estimator/aimd_rate_control.cc b/modules/remote_bitrate_estimator/aimd_rate_control.cc index da13176645..bf7119cc7d 100644 --- a/modules/remote_bitrate_estimator/aimd_rate_control.cc +++ b/modules/remote_bitrate_estimator/aimd_rate_control.cc @@ -78,7 +78,7 @@ AimdRateControl::AimdRateControl(const WebRtcKeyValueConfig* key_value_config, current_bitrate_(max_configured_bitrate_), latest_estimated_throughput_(current_bitrate_), link_capacity_(), - rate_control_state_(kRcHold), + rate_control_state_(RateControlState::kRcHold), time_last_bitrate_change_(Timestamp::MinusInfinity()), time_last_bitrate_decrease_(Timestamp::MinusInfinity()), time_first_throughput_estimate_(Timestamp::MinusInfinity()), @@ -280,10 +280,10 @@ void AimdRateControl::ChangeBitrate(const RateControlInput& input, 1.5 * estimated_throughput + DataRate::KilobitsPerSec(10); switch (rate_control_state_) { - case kRcHold: + case RateControlState::kRcHold: break; - case kRcIncrease: + case RateControlState::kRcIncrease: if (estimated_throughput > link_capacity_.UpperBound()) link_capacity_.Reset(); @@ -316,7 +316,7 @@ void AimdRateControl::ChangeBitrate(const RateControlInput& input, time_last_bitrate_change_ = at_time; break; - case kRcDecrease: { + case RateControlState::kRcDecrease: { DataRate decreased_bitrate = DataRate::PlusInfinity(); // Set bit rate to something slightly lower than the measured throughput @@ -356,13 +356,13 @@ void AimdRateControl::ChangeBitrate(const RateControlInput& input, bitrate_is_initialized_ = true; link_capacity_.OnOveruseDetected(estimated_throughput); // Stay on hold until the pipes are cleared. - rate_control_state_ = kRcHold; + rate_control_state_ = RateControlState::kRcHold; time_last_bitrate_change_ = at_time; time_last_bitrate_decrease_ = at_time; break; } default: - assert(false); + RTC_NOTREACHED(); } current_bitrate_ = ClampBitrate(new_bitrate.value_or(current_bitrate_)); @@ -403,21 +403,21 @@ void AimdRateControl::ChangeState(const RateControlInput& input, Timestamp at_time) { switch (input.bw_state) { case BandwidthUsage::kBwNormal: - if (rate_control_state_ == kRcHold) { + if (rate_control_state_ == RateControlState::kRcHold) { time_last_bitrate_change_ = at_time; - rate_control_state_ = kRcIncrease; + rate_control_state_ = RateControlState::kRcIncrease; } break; case BandwidthUsage::kBwOverusing: - if (rate_control_state_ != kRcDecrease) { - rate_control_state_ = kRcDecrease; + if (rate_control_state_ != RateControlState::kRcDecrease) { + rate_control_state_ = RateControlState::kRcDecrease; } break; case BandwidthUsage::kBwUnderusing: - rate_control_state_ = kRcHold; + rate_control_state_ = RateControlState::kRcHold; break; default: - assert(false); + RTC_NOTREACHED(); } } diff --git a/modules/remote_bitrate_estimator/aimd_rate_control.h b/modules/remote_bitrate_estimator/aimd_rate_control.h index c9e9470c58..3e0d541b60 100644 --- a/modules/remote_bitrate_estimator/aimd_rate_control.h +++ b/modules/remote_bitrate_estimator/aimd_rate_control.h @@ -65,6 +65,8 @@ class AimdRateControl { TimeDelta GetExpectedBandwidthPeriod() const; private: + enum class RateControlState { kRcHold, kRcIncrease, kRcDecrease }; + friend class GoogCcStatePrinter; // Update the target bitrate based on, among other things, the current rate // control state, the current target bitrate and the estimated throughput. diff --git a/modules/remote_bitrate_estimator/include/bwe_defines.h b/modules/remote_bitrate_estimator/include/bwe_defines.h index 40fbfe0052..b3ca1846f4 100644 --- a/modules/remote_bitrate_estimator/include/bwe_defines.h +++ b/modules/remote_bitrate_estimator/include/bwe_defines.h @@ -17,9 +17,6 @@ #include "api/network_state_predictor.h" #include "api/units/data_rate.h" -#define BWE_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define BWE_MIN(a, b) ((a) < (b) ? (a) : (b)) - namespace webrtc { namespace congestion_controller { @@ -39,8 +36,6 @@ enum BweNames { kBweNamesMax = 4 }; -enum RateControlState { kRcHold, kRcIncrease, kRcDecrease }; - struct RateControlInput { RateControlInput(BandwidthUsage bw_state, const absl::optional& estimated_throughput); diff --git a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h index c60c030e8d..ac937bbfe0 100644 --- a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h +++ b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h @@ -38,14 +38,6 @@ class RemoteBitrateObserver { virtual ~RemoteBitrateObserver() {} }; -class TransportFeedbackSenderInterface { - public: - virtual ~TransportFeedbackSenderInterface() = default; - - virtual bool SendCombinedRtcpPacket( - std::vector> packets) = 0; -}; - // TODO(holmer): Remove when all implementations have been updated. struct ReceiveBandwidthEstimatorStats {}; diff --git a/modules/remote_bitrate_estimator/inter_arrival.cc b/modules/remote_bitrate_estimator/inter_arrival.cc index b8e683b89a..a8cf47fbfe 100644 --- a/modules/remote_bitrate_estimator/inter_arrival.cc +++ b/modules/remote_bitrate_estimator/inter_arrival.cc @@ -37,9 +37,9 @@ bool InterArrival::ComputeDeltas(uint32_t timestamp, uint32_t* timestamp_delta, int64_t* arrival_time_delta_ms, int* packet_size_delta) { - assert(timestamp_delta != NULL); - assert(arrival_time_delta_ms != NULL); - assert(packet_size_delta != NULL); + RTC_DCHECK(timestamp_delta); + RTC_DCHECK(arrival_time_delta_ms); + RTC_DCHECK(packet_size_delta); bool calculated_deltas = false; if (current_timestamp_group_.IsFirstPacket()) { // We don't have enough data to update the filter, so we store it until we @@ -85,7 +85,7 @@ bool InterArrival::ComputeDeltas(uint32_t timestamp, } else { num_consecutive_reordered_packets_ = 0; } - assert(*arrival_time_delta_ms >= 0); + RTC_DCHECK_GE(*arrival_time_delta_ms, 0); *packet_size_delta = static_cast(current_timestamp_group_.size) - static_cast(prev_timestamp_group_.size); calculated_deltas = true; @@ -141,7 +141,7 @@ bool InterArrival::BelongsToBurst(int64_t arrival_time_ms, if (!burst_grouping_) { return false; } - assert(current_timestamp_group_.complete_time_ms >= 0); + RTC_DCHECK_GE(current_timestamp_group_.complete_time_ms, 0); int64_t arrival_time_delta_ms = arrival_time_ms - current_timestamp_group_.complete_time_ms; uint32_t timestamp_diff = timestamp - current_timestamp_group_.timestamp; diff --git a/modules/remote_bitrate_estimator/inter_arrival.h b/modules/remote_bitrate_estimator/inter_arrival.h index 1d84970deb..dbc630ff63 100644 --- a/modules/remote_bitrate_estimator/inter_arrival.h +++ b/modules/remote_bitrate_estimator/inter_arrival.h @@ -14,8 +14,6 @@ #include #include -#include "rtc_base/constructor_magic.h" - namespace webrtc { // Helper class to compute the inter-arrival time delta and the size delta @@ -35,6 +33,10 @@ class InterArrival { double timestamp_to_ms_coeff, bool enable_burst_grouping); + InterArrival() = delete; + InterArrival(const InterArrival&) = delete; + InterArrival& operator=(const InterArrival&) = delete; + // This function returns true if a delta was computed, or false if the current // group is still incomplete or if only one group has been completed. // |timestamp| is the timestamp. @@ -87,8 +89,6 @@ class InterArrival { double timestamp_to_ms_coeff_; bool burst_grouping_; int num_consecutive_reordered_packets_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(InterArrival); }; } // namespace webrtc diff --git a/modules/remote_bitrate_estimator/overuse_detector.cc b/modules/remote_bitrate_estimator/overuse_detector.cc index 44cbe50136..710b3b21d3 100644 --- a/modules/remote_bitrate_estimator/overuse_detector.cc +++ b/modules/remote_bitrate_estimator/overuse_detector.cc @@ -16,7 +16,6 @@ #include #include -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" #include "rtc_base/checks.h" #include "rtc_base/numerics/safe_minmax.h" diff --git a/modules/remote_bitrate_estimator/overuse_detector.h b/modules/remote_bitrate_estimator/overuse_detector.h index 1df6cab786..4e72e8e037 100644 --- a/modules/remote_bitrate_estimator/overuse_detector.h +++ b/modules/remote_bitrate_estimator/overuse_detector.h @@ -12,8 +12,8 @@ #include +#include "api/network_state_predictor.h" #include "api/transport/webrtc_key_value_config.h" -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "rtc_base/constructor_magic.h" namespace webrtc { diff --git a/modules/remote_bitrate_estimator/overuse_estimator.cc b/modules/remote_bitrate_estimator/overuse_estimator.cc index e97e06b0bc..3427d5880c 100644 --- a/modules/remote_bitrate_estimator/overuse_estimator.cc +++ b/modules/remote_bitrate_estimator/overuse_estimator.cc @@ -16,7 +16,7 @@ #include -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" +#include "api/network_state_predictor.h" #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" #include "rtc_base/logging.h" @@ -110,7 +110,7 @@ void OveruseEstimator::Update(int64_t t_delta, bool positive_semi_definite = E_[0][0] + E_[1][1] >= 0 && E_[0][0] * E_[1][1] - E_[0][1] * E_[1][0] >= 0 && E_[0][0] >= 0; - assert(positive_semi_definite); + RTC_DCHECK(positive_semi_definite); if (!positive_semi_definite) { RTC_LOG(LS_ERROR) << "The over-use estimator's covariance matrix is no longer " diff --git a/modules/remote_bitrate_estimator/overuse_estimator.h b/modules/remote_bitrate_estimator/overuse_estimator.h index d5f675e996..a082d9d065 100644 --- a/modules/remote_bitrate_estimator/overuse_estimator.h +++ b/modules/remote_bitrate_estimator/overuse_estimator.h @@ -14,7 +14,7 @@ #include -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" +#include "api/network_state_predictor.h" #include "rtc_base/constructor_magic.h" namespace webrtc { diff --git a/modules/remote_bitrate_estimator/packet_arrival_map.cc b/modules/remote_bitrate_estimator/packet_arrival_map.cc new file mode 100644 index 0000000000..72696f6c80 --- /dev/null +++ b/modules/remote_bitrate_estimator/packet_arrival_map.cc @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/remote_bitrate_estimator/packet_arrival_map.h" + +#include + +#include "rtc_base/numerics/safe_minmax.h" + +namespace webrtc { + +constexpr size_t PacketArrivalTimeMap::kMaxNumberOfPackets; + +void PacketArrivalTimeMap::AddPacket(int64_t sequence_number, + int64_t arrival_time_ms) { + if (!has_seen_packet_) { + // First packet. + has_seen_packet_ = true; + begin_sequence_number_ = sequence_number; + arrival_times.push_back(arrival_time_ms); + return; + } + + int64_t pos = sequence_number - begin_sequence_number_; + if (pos >= 0 && pos < static_cast(arrival_times.size())) { + // The packet is within the buffer - no need to expand it. + arrival_times[pos] = arrival_time_ms; + return; + } + + if (pos < 0) { + // The packet goes before the current buffer. Expand to add packet, but only + // if it fits within kMaxNumberOfPackets. + size_t missing_packets = -pos; + if (missing_packets + arrival_times.size() > kMaxNumberOfPackets) { + // Don't expand the buffer further, as that would remove newly received + // packets. + return; + } + + arrival_times.insert(arrival_times.begin(), missing_packets, 0); + arrival_times[0] = arrival_time_ms; + begin_sequence_number_ = sequence_number; + return; + } + + // The packet goes after the buffer. + + if (static_cast(pos) >= kMaxNumberOfPackets) { + // The buffer grows too large - old packets have to be removed. + size_t packets_to_remove = pos - kMaxNumberOfPackets + 1; + if (packets_to_remove >= arrival_times.size()) { + arrival_times.clear(); + begin_sequence_number_ = sequence_number; + pos = 0; + } else { + // Also trim the buffer to remove leading non-received packets, to + // ensure that the buffer only spans received packets. + while (packets_to_remove < arrival_times.size() && + arrival_times[packets_to_remove] == 0) { + ++packets_to_remove; + } + + arrival_times.erase(arrival_times.begin(), + arrival_times.begin() + packets_to_remove); + begin_sequence_number_ += packets_to_remove; + pos -= packets_to_remove; + RTC_DCHECK_GE(pos, 0); + } + } + + // Packets can be received out-of-order. If this isn't the next expected + // packet, add enough placeholders to fill the gap. + size_t missing_gap_packets = pos - arrival_times.size(); + if (missing_gap_packets > 0) { + arrival_times.insert(arrival_times.end(), missing_gap_packets, 0); + } + RTC_DCHECK_EQ(arrival_times.size(), pos); + arrival_times.push_back(arrival_time_ms); + RTC_DCHECK_LE(arrival_times.size(), kMaxNumberOfPackets); +} + +void PacketArrivalTimeMap::RemoveOldPackets(int64_t sequence_number, + int64_t arrival_time_limit) { + while (!arrival_times.empty() && begin_sequence_number_ < sequence_number && + arrival_times.front() <= arrival_time_limit) { + arrival_times.pop_front(); + ++begin_sequence_number_; + } +} + +bool PacketArrivalTimeMap::has_received(int64_t sequence_number) const { + int64_t pos = sequence_number - begin_sequence_number_; + if (pos >= 0 && pos < static_cast(arrival_times.size()) && + arrival_times[pos] != 0) { + return true; + } + return false; +} + +void PacketArrivalTimeMap::EraseTo(int64_t sequence_number) { + if (sequence_number > begin_sequence_number_) { + size_t count = + std::min(static_cast(sequence_number - begin_sequence_number_), + arrival_times.size()); + + arrival_times.erase(arrival_times.begin(), arrival_times.begin() + count); + begin_sequence_number_ += count; + } +} + +int64_t PacketArrivalTimeMap::clamp(int64_t sequence_number) const { + return rtc::SafeClamp(sequence_number, begin_sequence_number(), + end_sequence_number()); +} + +} // namespace webrtc diff --git a/modules/remote_bitrate_estimator/packet_arrival_map.h b/modules/remote_bitrate_estimator/packet_arrival_map.h new file mode 100644 index 0000000000..10659e0f65 --- /dev/null +++ b/modules/remote_bitrate_estimator/packet_arrival_map.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_PACKET_ARRIVAL_MAP_H_ +#define MODULES_REMOTE_BITRATE_ESTIMATOR_PACKET_ARRIVAL_MAP_H_ + +#include +#include +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +// PacketArrivalTimeMap is an optimized map of packet sequence number to arrival +// time, limited in size to never exceed `kMaxNumberOfPackets`. It will grow as +// needed, and remove old packets, and will expand to allow earlier packets to +// be added (out-of-order). +// +// Not yet received packets have the arrival time zero. The queue will not span +// larger than necessary and the last packet should always be received. The +// first packet in the queue doesn't have to be received in case of receiving +// packets out-of-order. +class PacketArrivalTimeMap { + public: + // Impossible to request feedback older than what can be represented by 15 + // bits. + static constexpr size_t kMaxNumberOfPackets = (1 << 15); + + // Indicates if the packet with `sequence_number` has already been received. + bool has_received(int64_t sequence_number) const; + + // Returns the sequence number of the first entry in the map, i.e. the + // sequence number that a `begin()` iterator would represent. + int64_t begin_sequence_number() const { return begin_sequence_number_; } + + // Returns the sequence number of the element just after the map, i.e. the + // sequence number that an `end()` iterator would represent. + int64_t end_sequence_number() const { + return begin_sequence_number_ + arrival_times.size(); + } + + // Returns an element by `sequence_number`, which must be valid, i.e. + // between [begin_sequence_number, end_sequence_number). + int64_t get(int64_t sequence_number) { + int64_t pos = sequence_number - begin_sequence_number_; + RTC_DCHECK(pos >= 0 && pos < static_cast(arrival_times.size())); + return arrival_times[pos]; + } + + // Clamps `sequence_number` between [begin_sequence_number, + // end_sequence_number]. + int64_t clamp(int64_t sequence_number) const; + + // Erases all elements from the beginning of the map until `sequence_number`. + void EraseTo(int64_t sequence_number); + + // Records the fact that a packet with `sequence_number` arrived at + // `arrival_time_ms`. + void AddPacket(int64_t sequence_number, int64_t arrival_time_ms); + + // Removes packets from the beginning of the map as long as they are received + // before `sequence_number` and with an age older than `arrival_time_limit` + void RemoveOldPackets(int64_t sequence_number, int64_t arrival_time_limit); + + private: + // Deque representing unwrapped sequence number -> time, where the index + + // `begin_sequence_number_` represents the packet's sequence number. + std::deque arrival_times; + + // The unwrapped sequence number for the first element in + // `arrival_times`. + int64_t begin_sequence_number_ = 0; + + // Indicates if this map has had any packet added to it. The first packet + // decides the initial sequence number. + bool has_seen_packet_ = false; +}; + +} // namespace webrtc + +#endif // MODULES_REMOTE_BITRATE_ESTIMATOR_PACKET_ARRIVAL_MAP_H_ diff --git a/modules/remote_bitrate_estimator/packet_arrival_map_test.cc b/modules/remote_bitrate_estimator/packet_arrival_map_test.cc new file mode 100644 index 0000000000..afc7038832 --- /dev/null +++ b/modules/remote_bitrate_estimator/packet_arrival_map_test.cc @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/remote_bitrate_estimator/packet_arrival_map.h" + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +TEST(PacketArrivalMapTest, IsConsistentWhenEmpty) { + PacketArrivalTimeMap map; + + EXPECT_EQ(map.begin_sequence_number(), map.end_sequence_number()); + EXPECT_FALSE(map.has_received(0)); + EXPECT_EQ(map.clamp(-5), 0); + EXPECT_EQ(map.clamp(5), 0); +} + +TEST(PacketArrivalMapTest, InsertsFirstItemIntoMap) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + EXPECT_EQ(map.begin_sequence_number(), 42); + EXPECT_EQ(map.end_sequence_number(), 43); + + EXPECT_FALSE(map.has_received(41)); + EXPECT_TRUE(map.has_received(42)); + EXPECT_FALSE(map.has_received(44)); + + EXPECT_EQ(map.clamp(-100), 42); + EXPECT_EQ(map.clamp(42), 42); + EXPECT_EQ(map.clamp(100), 43); +} + +TEST(PacketArrivalMapTest, InsertsWithGaps) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + map.AddPacket(45, 11); + EXPECT_EQ(map.begin_sequence_number(), 42); + EXPECT_EQ(map.end_sequence_number(), 46); + + EXPECT_FALSE(map.has_received(41)); + EXPECT_TRUE(map.has_received(42)); + EXPECT_FALSE(map.has_received(43)); + EXPECT_FALSE(map.has_received(44)); + EXPECT_TRUE(map.has_received(45)); + EXPECT_FALSE(map.has_received(46)); + + EXPECT_EQ(map.get(42), 10); + EXPECT_EQ(map.get(43), 0); + EXPECT_EQ(map.get(44), 0); + EXPECT_EQ(map.get(45), 11); + + EXPECT_EQ(map.clamp(-100), 42); + EXPECT_EQ(map.clamp(44), 44); + EXPECT_EQ(map.clamp(100), 46); +} + +TEST(PacketArrivalMapTest, InsertsWithinBuffer) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + map.AddPacket(45, 11); + + map.AddPacket(43, 12); + map.AddPacket(44, 13); + + EXPECT_EQ(map.begin_sequence_number(), 42); + EXPECT_EQ(map.end_sequence_number(), 46); + + EXPECT_FALSE(map.has_received(41)); + EXPECT_TRUE(map.has_received(42)); + EXPECT_TRUE(map.has_received(43)); + EXPECT_TRUE(map.has_received(44)); + EXPECT_TRUE(map.has_received(45)); + EXPECT_FALSE(map.has_received(46)); + + EXPECT_EQ(map.get(42), 10); + EXPECT_EQ(map.get(43), 12); + EXPECT_EQ(map.get(44), 13); + EXPECT_EQ(map.get(45), 11); +} + +TEST(PacketArrivalMapTest, GrowsBufferAndRemoveOld) { + PacketArrivalTimeMap map; + + constexpr int64_t kLargeSeq = 42 + PacketArrivalTimeMap::kMaxNumberOfPackets; + map.AddPacket(42, 10); + map.AddPacket(43, 11); + map.AddPacket(44, 12); + map.AddPacket(45, 13); + map.AddPacket(kLargeSeq, 12); + + EXPECT_EQ(map.begin_sequence_number(), 43); + EXPECT_EQ(map.end_sequence_number(), kLargeSeq + 1); + EXPECT_EQ(static_cast(map.end_sequence_number() - + map.begin_sequence_number()), + PacketArrivalTimeMap::kMaxNumberOfPackets); + + EXPECT_FALSE(map.has_received(41)); + EXPECT_FALSE(map.has_received(42)); + EXPECT_TRUE(map.has_received(43)); + EXPECT_TRUE(map.has_received(44)); + EXPECT_TRUE(map.has_received(45)); + EXPECT_FALSE(map.has_received(46)); + EXPECT_TRUE(map.has_received(kLargeSeq)); + EXPECT_FALSE(map.has_received(kLargeSeq + 1)); +} + +TEST(PacketArrivalMapTest, GrowsBufferAndRemoveOldTrimsBeginning) { + PacketArrivalTimeMap map; + + constexpr int64_t kLargeSeq = 42 + PacketArrivalTimeMap::kMaxNumberOfPackets; + map.AddPacket(42, 10); + // Missing: 43, 44 + map.AddPacket(45, 13); + map.AddPacket(kLargeSeq, 12); + + EXPECT_EQ(map.begin_sequence_number(), 45); + EXPECT_EQ(map.end_sequence_number(), kLargeSeq + 1); + + EXPECT_FALSE(map.has_received(44)); + EXPECT_TRUE(map.has_received(45)); + EXPECT_FALSE(map.has_received(46)); + EXPECT_TRUE(map.has_received(kLargeSeq)); + EXPECT_FALSE(map.has_received(kLargeSeq + 1)); +} + +TEST(PacketArrivalMapTest, SequenceNumberJumpsDeletesAll) { + PacketArrivalTimeMap map; + + constexpr int64_t kLargeSeq = + 42 + 2 * PacketArrivalTimeMap::kMaxNumberOfPackets; + map.AddPacket(42, 10); + map.AddPacket(kLargeSeq, 12); + + EXPECT_EQ(map.begin_sequence_number(), kLargeSeq); + EXPECT_EQ(map.end_sequence_number(), kLargeSeq + 1); + + EXPECT_FALSE(map.has_received(42)); + EXPECT_TRUE(map.has_received(kLargeSeq)); + EXPECT_FALSE(map.has_received(kLargeSeq + 1)); +} + +TEST(PacketArrivalMapTest, ExpandsBeforeBeginning) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + map.AddPacket(-1000, 13); + + EXPECT_EQ(map.begin_sequence_number(), -1000); + EXPECT_EQ(map.end_sequence_number(), 43); + + EXPECT_FALSE(map.has_received(-1001)); + EXPECT_TRUE(map.has_received(-1000)); + EXPECT_FALSE(map.has_received(-999)); + EXPECT_TRUE(map.has_received(42)); + EXPECT_FALSE(map.has_received(43)); +} + +TEST(PacketArrivalMapTest, ExpandingBeforeBeginningKeepsReceived) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + constexpr int64_t kSmallSeq = + static_cast(42) - 2 * PacketArrivalTimeMap::kMaxNumberOfPackets; + map.AddPacket(kSmallSeq, 13); + + EXPECT_EQ(map.begin_sequence_number(), 42); + EXPECT_EQ(map.end_sequence_number(), 43); +} + +TEST(PacketArrivalMapTest, ErasesToRemoveElements) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + map.AddPacket(43, 11); + map.AddPacket(44, 12); + map.AddPacket(45, 13); + + map.EraseTo(44); + + EXPECT_EQ(map.begin_sequence_number(), 44); + EXPECT_EQ(map.end_sequence_number(), 46); + + EXPECT_FALSE(map.has_received(43)); + EXPECT_TRUE(map.has_received(44)); + EXPECT_TRUE(map.has_received(45)); + EXPECT_FALSE(map.has_received(46)); +} + +TEST(PacketArrivalMapTest, ErasesInEmptyMap) { + PacketArrivalTimeMap map; + + EXPECT_EQ(map.begin_sequence_number(), map.end_sequence_number()); + + map.EraseTo(map.end_sequence_number()); + EXPECT_EQ(map.begin_sequence_number(), map.end_sequence_number()); +} + +TEST(PacketArrivalMapTest, IsTolerantToWrongArgumentsForErase) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + map.AddPacket(43, 11); + + map.EraseTo(1); + + EXPECT_EQ(map.begin_sequence_number(), 42); + EXPECT_EQ(map.end_sequence_number(), 44); + + map.EraseTo(100); + + EXPECT_EQ(map.begin_sequence_number(), 44); + EXPECT_EQ(map.end_sequence_number(), 44); +} + +TEST(PacketArrivalMapTest, EraseAllRemembersBeginningSeqNbr) { + PacketArrivalTimeMap map; + + map.AddPacket(42, 10); + map.AddPacket(43, 11); + map.AddPacket(44, 12); + map.AddPacket(45, 13); + + map.EraseTo(46); + + map.AddPacket(50, 10); + + EXPECT_EQ(map.begin_sequence_number(), 46); + EXPECT_EQ(map.end_sequence_number(), 51); + + EXPECT_FALSE(map.has_received(45)); + EXPECT_FALSE(map.has_received(46)); + EXPECT_FALSE(map.has_received(47)); + EXPECT_FALSE(map.has_received(48)); + EXPECT_FALSE(map.has_received(49)); + EXPECT_TRUE(map.has_received(50)); + EXPECT_FALSE(map.has_received(51)); +} + +} // namespace +} // namespace webrtc diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc index b146d00a2b..ae960ab960 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.cc @@ -13,17 +13,36 @@ #include #include +#include +#include #include "api/transport/field_trial_based_config.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" +#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/metrics.h" namespace webrtc { namespace { + +constexpr TimeDelta kMinClusterDelta = TimeDelta::Millis(1); +constexpr TimeDelta kInitialProbingInterval = TimeDelta::Seconds(2); +constexpr int kTimestampGroupLengthMs = 5; +constexpr int kAbsSendTimeInterArrivalUpshift = 8; +constexpr int kInterArrivalShift = + RTPHeaderExtension::kAbsSendTimeFraction + kAbsSendTimeInterArrivalUpshift; +constexpr int kMinClusterSize = 4; +constexpr int kMaxProbePackets = 15; +constexpr int kExpectedNumberOfProbes = 3; +constexpr double kTimestampToMs = + 1000.0 / static_cast(1 << kInterArrivalShift); + absl::optional OptionalRateFromOptionalBps( absl::optional bitrate_bps) { if (bitrate_bps) { @@ -32,62 +51,48 @@ absl::optional OptionalRateFromOptionalBps( return absl::nullopt; } } -} // namespace - -enum { - kTimestampGroupLengthMs = 5, - kAbsSendTimeInterArrivalUpshift = 8, - kInterArrivalShift = RTPHeaderExtension::kAbsSendTimeFraction + - kAbsSendTimeInterArrivalUpshift, - kInitialProbingIntervalMs = 2000, - kMinClusterSize = 4, - kMaxProbePackets = 15, - kExpectedNumberOfProbes = 3 -}; - -static const double kTimestampToMs = - 1000.0 / static_cast(1 << kInterArrivalShift); template std::vector Keys(const std::map& map) { std::vector keys; keys.reserve(map.size()); - for (typename std::map::const_iterator it = map.begin(); - it != map.end(); ++it) { - keys.push_back(it->first); + for (const auto& kv_pair : map) { + keys.push_back(kv_pair.first); } return keys; } -uint32_t ConvertMsTo24Bits(int64_t time_ms) { - uint32_t time_24_bits = - static_cast(((static_cast(time_ms) - << RTPHeaderExtension::kAbsSendTimeFraction) + - 500) / - 1000) & - 0x00FFFFFF; - return time_24_bits; -} +} // namespace RemoteBitrateEstimatorAbsSendTime::~RemoteBitrateEstimatorAbsSendTime() = default; bool RemoteBitrateEstimatorAbsSendTime::IsWithinClusterBounds( - int send_delta_ms, + TimeDelta send_delta, const Cluster& cluster_aggregate) { if (cluster_aggregate.count == 0) return true; - float cluster_mean = cluster_aggregate.send_mean_ms / - static_cast(cluster_aggregate.count); - return fabs(static_cast(send_delta_ms) - cluster_mean) < 2.5f; + TimeDelta cluster_mean = + cluster_aggregate.send_mean / cluster_aggregate.count; + return (send_delta - cluster_mean).Abs() < TimeDelta::Micros(2'500); } -void RemoteBitrateEstimatorAbsSendTime::AddCluster(std::list* clusters, - Cluster* cluster) { - cluster->send_mean_ms /= static_cast(cluster->count); - cluster->recv_mean_ms /= static_cast(cluster->count); - cluster->mean_size /= cluster->count; - clusters->push_back(*cluster); +void RemoteBitrateEstimatorAbsSendTime::MaybeAddCluster( + const Cluster& cluster_aggregate, + std::list& clusters) { + if (cluster_aggregate.count < kMinClusterSize || + cluster_aggregate.send_mean <= TimeDelta::Zero() || + cluster_aggregate.recv_mean <= TimeDelta::Zero()) { + return; + } + + Cluster cluster; + cluster.send_mean = cluster_aggregate.send_mean / cluster_aggregate.count; + cluster.recv_mean = cluster_aggregate.recv_mean / cluster_aggregate.count; + cluster.mean_size = cluster_aggregate.mean_size / cluster_aggregate.count; + cluster.count = cluster_aggregate.count; + cluster.num_above_min_delta = cluster_aggregate.num_above_min_delta; + clusters.push_back(cluster); } RemoteBitrateEstimatorAbsSendTime::RemoteBitrateEstimatorAbsSendTime( @@ -95,91 +100,77 @@ RemoteBitrateEstimatorAbsSendTime::RemoteBitrateEstimatorAbsSendTime( Clock* clock) : clock_(clock), observer_(observer), - inter_arrival_(), - estimator_(), detector_(&field_trials_), - incoming_bitrate_(kBitrateWindowMs, 8000), - incoming_bitrate_initialized_(false), - total_probes_received_(0), - first_packet_time_ms_(-1), - last_update_ms_(-1), - uma_recorded_(false), remote_rate_(&field_trials_) { RTC_DCHECK(clock_); RTC_DCHECK(observer_); RTC_LOG(LS_INFO) << "RemoteBitrateEstimatorAbsSendTime: Instantiating."; } -void RemoteBitrateEstimatorAbsSendTime::ComputeClusters( - std::list* clusters) const { - Cluster current; - int64_t prev_send_time = -1; - int64_t prev_recv_time = -1; - for (std::list::const_iterator it = probes_.begin(); - it != probes_.end(); ++it) { - if (prev_send_time >= 0) { - int send_delta_ms = it->send_time_ms - prev_send_time; - int recv_delta_ms = it->recv_time_ms - prev_recv_time; - if (send_delta_ms >= 1 && recv_delta_ms >= 1) { - ++current.num_above_min_delta; +std::list +RemoteBitrateEstimatorAbsSendTime::ComputeClusters() const { + std::list clusters; + Cluster cluster_aggregate; + Timestamp prev_send_time = Timestamp::MinusInfinity(); + Timestamp prev_recv_time = Timestamp::MinusInfinity(); + for (const Probe& probe : probes_) { + if (prev_send_time.IsFinite()) { + TimeDelta send_delta = probe.send_time - prev_send_time; + TimeDelta recv_delta = probe.recv_time - prev_recv_time; + if (send_delta >= kMinClusterDelta && recv_delta >= kMinClusterDelta) { + ++cluster_aggregate.num_above_min_delta; } - if (!IsWithinClusterBounds(send_delta_ms, current)) { - if (current.count >= kMinClusterSize && current.send_mean_ms > 0.0f && - current.recv_mean_ms > 0.0f) { - AddCluster(clusters, ¤t); - } - current = Cluster(); + if (!IsWithinClusterBounds(send_delta, cluster_aggregate)) { + MaybeAddCluster(cluster_aggregate, clusters); + cluster_aggregate = Cluster(); } - current.send_mean_ms += send_delta_ms; - current.recv_mean_ms += recv_delta_ms; - current.mean_size += it->payload_size; - ++current.count; + cluster_aggregate.send_mean += send_delta; + cluster_aggregate.recv_mean += recv_delta; + cluster_aggregate.mean_size += probe.payload_size; + ++cluster_aggregate.count; } - prev_send_time = it->send_time_ms; - prev_recv_time = it->recv_time_ms; - } - if (current.count >= kMinClusterSize && current.send_mean_ms > 0.0f && - current.recv_mean_ms > 0.0f) { - AddCluster(clusters, ¤t); + prev_send_time = probe.send_time; + prev_recv_time = probe.recv_time; } + MaybeAddCluster(cluster_aggregate, clusters); + return clusters; } -std::list::const_iterator +const RemoteBitrateEstimatorAbsSendTime::Cluster* RemoteBitrateEstimatorAbsSendTime::FindBestProbe( const std::list& clusters) const { - int highest_probe_bitrate_bps = 0; - std::list::const_iterator best_it = clusters.end(); - for (std::list::const_iterator it = clusters.begin(); - it != clusters.end(); ++it) { - if (it->send_mean_ms == 0 || it->recv_mean_ms == 0) + DataRate highest_probe_bitrate = DataRate::Zero(); + const Cluster* best = nullptr; + for (const auto& cluster : clusters) { + if (cluster.send_mean == TimeDelta::Zero() || + cluster.recv_mean == TimeDelta::Zero()) { continue; - if (it->num_above_min_delta > it->count / 2 && - (it->recv_mean_ms - it->send_mean_ms <= 2.0f && - it->send_mean_ms - it->recv_mean_ms <= 5.0f)) { - int probe_bitrate_bps = - std::min(it->GetSendBitrateBps(), it->GetRecvBitrateBps()); - if (probe_bitrate_bps > highest_probe_bitrate_bps) { - highest_probe_bitrate_bps = probe_bitrate_bps; - best_it = it; + } + if (cluster.num_above_min_delta > cluster.count / 2 && + (cluster.recv_mean - cluster.send_mean <= TimeDelta::Millis(2) && + cluster.send_mean - cluster.recv_mean <= TimeDelta::Millis(5))) { + DataRate probe_bitrate = + std::min(cluster.SendBitrate(), cluster.RecvBitrate()); + if (probe_bitrate > highest_probe_bitrate) { + highest_probe_bitrate = probe_bitrate; + best = &cluster; } } else { - int send_bitrate_bps = it->mean_size * 8 * 1000 / it->send_mean_ms; - int recv_bitrate_bps = it->mean_size * 8 * 1000 / it->recv_mean_ms; - RTC_LOG(LS_INFO) << "Probe failed, sent at " << send_bitrate_bps - << " bps, received at " << recv_bitrate_bps - << " bps. Mean send delta: " << it->send_mean_ms - << " ms, mean recv delta: " << it->recv_mean_ms - << " ms, num probes: " << it->count; + RTC_LOG(LS_INFO) << "Probe failed, sent at " + << cluster.SendBitrate().bps() << " bps, received at " + << cluster.RecvBitrate().bps() + << " bps. Mean send delta: " << cluster.send_mean.ms() + << " ms, mean recv delta: " << cluster.recv_mean.ms() + << " ms, num probes: " << cluster.count; break; } } - return best_it; + return best; } RemoteBitrateEstimatorAbsSendTime::ProbeResult -RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) { - std::list clusters; - ComputeClusters(&clusters); +RemoteBitrateEstimatorAbsSendTime::ProcessClusters(Timestamp now) { + std::list clusters = ComputeClusters(); if (clusters.empty()) { // If we reach the max number of probe packets and still have no clusters, // we will remove the oldest one. @@ -188,21 +179,18 @@ RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) { return ProbeResult::kNoUpdate; } - std::list::const_iterator best_it = FindBestProbe(clusters); - if (best_it != clusters.end()) { - int probe_bitrate_bps = - std::min(best_it->GetSendBitrateBps(), best_it->GetRecvBitrateBps()); + if (const Cluster* best = FindBestProbe(clusters)) { + DataRate probe_bitrate = std::min(best->SendBitrate(), best->RecvBitrate()); // Make sure that a probe sent on a lower bitrate than our estimate can't // reduce the estimate. - if (IsBitrateImproving(probe_bitrate_bps)) { + if (IsBitrateImproving(probe_bitrate)) { RTC_LOG(LS_INFO) << "Probe successful, sent at " - << best_it->GetSendBitrateBps() << " bps, received at " - << best_it->GetRecvBitrateBps() - << " bps. Mean send delta: " << best_it->send_mean_ms - << " ms, mean recv delta: " << best_it->recv_mean_ms - << " ms, num probes: " << best_it->count; - remote_rate_.SetEstimate(DataRate::BitsPerSec(probe_bitrate_bps), - Timestamp::Millis(now_ms)); + << best->SendBitrate().bps() << " bps, received at " + << best->RecvBitrate().bps() + << " bps. Mean send delta: " << best->send_mean.ms() + << " ms, mean recv delta: " << best->recv_mean.ms() + << " ms, num probes: " << best->count; + remote_rate_.SetEstimate(probe_bitrate, now); return ProbeResult::kBitrateUpdated; } } @@ -215,11 +203,11 @@ RemoteBitrateEstimatorAbsSendTime::ProcessClusters(int64_t now_ms) { } bool RemoteBitrateEstimatorAbsSendTime::IsBitrateImproving( - int new_bitrate_bps) const { - bool initial_probe = !remote_rate_.ValidEstimate() && new_bitrate_bps > 0; - bool bitrate_above_estimate = - remote_rate_.ValidEstimate() && - new_bitrate_bps > remote_rate_.LatestEstimate().bps(); + DataRate probe_bitrate) const { + bool initial_probe = + !remote_rate_.ValidEstimate() && probe_bitrate > DataRate::Zero(); + bool bitrate_above_estimate = remote_rate_.ValidEstimate() && + probe_bitrate > remote_rate_.LatestEstimate(); return initial_probe || bitrate_above_estimate; } @@ -234,14 +222,15 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacket( "is missing absolute send time extension!"; return; } - IncomingPacketInfo(arrival_time_ms, header.extension.absoluteSendTime, - payload_size, header.ssrc); + IncomingPacketInfo(Timestamp::Millis(arrival_time_ms), + header.extension.absoluteSendTime, + DataSize::Bytes(payload_size), header.ssrc); } void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( - int64_t arrival_time_ms, + Timestamp arrival_time, uint32_t send_time_24bits, - size_t payload_size, + DataSize payload_size, uint32_t ssrc) { RTC_CHECK(send_time_24bits < (1ul << 24)); if (!uma_recorded_) { @@ -252,15 +241,16 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( // Shift up send time to use the full 32 bits that inter_arrival works with, // so wrapping works properly. uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift; - int64_t send_time_ms = static_cast(timestamp) * kTimestampToMs; + Timestamp send_time = + Timestamp::Millis(static_cast(timestamp) * kTimestampToMs); - int64_t now_ms = clock_->TimeInMilliseconds(); + Timestamp now = clock_->CurrentTime(); // TODO(holmer): SSRCs are only needed for REMB, should be broken out from // here. // Check if incoming bitrate estimate is valid, and if it needs to be reset. absl::optional incoming_bitrate = - incoming_bitrate_.Rate(arrival_time_ms); + incoming_bitrate_.Rate(arrival_time.ms()); if (incoming_bitrate) { incoming_bitrate_initialized_ = true; } else if (incoming_bitrate_initialized_) { @@ -270,74 +260,82 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( incoming_bitrate_.Reset(); incoming_bitrate_initialized_ = false; } - incoming_bitrate_.Update(payload_size, arrival_time_ms); + incoming_bitrate_.Update(payload_size.bytes(), arrival_time.ms()); - if (first_packet_time_ms_ == -1) - first_packet_time_ms_ = now_ms; + if (first_packet_time_.IsInfinite()) { + first_packet_time_ = now; + } uint32_t ts_delta = 0; int64_t t_delta = 0; int size_delta = 0; bool update_estimate = false; - uint32_t target_bitrate_bps = 0; + DataRate target_bitrate = DataRate::Zero(); std::vector ssrcs; { - rtc::CritScope lock(&crit_); - - TimeoutStreams(now_ms); - RTC_DCHECK(inter_arrival_.get()); - RTC_DCHECK(estimator_.get()); - ssrcs_[ssrc] = now_ms; + MutexLock lock(&mutex_); + + TimeoutStreams(now); + RTC_DCHECK(inter_arrival_); + RTC_DCHECK(estimator_); + // TODO(danilchap): Replace 5 lines below with insert_or_assign when that + // c++17 function is available. + auto inserted = ssrcs_.insert(std::make_pair(ssrc, now)); + if (!inserted.second) { + // Already inserted, update. + inserted.first->second = now; + } // For now only try to detect probes while we don't have a valid estimate. // We currently assume that only packets larger than 200 bytes are paced by // the sender. - const size_t kMinProbePacketSize = 200; + static constexpr DataSize kMinProbePacketSize = DataSize::Bytes(200); if (payload_size > kMinProbePacketSize && (!remote_rate_.ValidEstimate() || - now_ms - first_packet_time_ms_ < kInitialProbingIntervalMs)) { + now - first_packet_time_ < kInitialProbingInterval)) { // TODO(holmer): Use a map instead to get correct order? if (total_probes_received_ < kMaxProbePackets) { - int send_delta_ms = -1; - int recv_delta_ms = -1; + TimeDelta send_delta = TimeDelta::Millis(-1); + TimeDelta recv_delta = TimeDelta::Millis(-1); if (!probes_.empty()) { - send_delta_ms = send_time_ms - probes_.back().send_time_ms; - recv_delta_ms = arrival_time_ms - probes_.back().recv_time_ms; + send_delta = send_time - probes_.back().send_time; + recv_delta = arrival_time - probes_.back().recv_time; } - RTC_LOG(LS_INFO) << "Probe packet received: send time=" << send_time_ms - << " ms, recv time=" << arrival_time_ms - << " ms, send delta=" << send_delta_ms - << " ms, recv delta=" << recv_delta_ms << " ms."; + RTC_LOG(LS_INFO) << "Probe packet received: send time=" + << send_time.ms() + << " ms, recv time=" << arrival_time.ms() + << " ms, send delta=" << send_delta.ms() + << " ms, recv delta=" << recv_delta.ms() << " ms."; } - probes_.push_back(Probe(send_time_ms, arrival_time_ms, payload_size)); + probes_.emplace_back(send_time, arrival_time, payload_size); ++total_probes_received_; // Make sure that a probe which updated the bitrate immediately has an // effect by calling the OnReceiveBitrateChanged callback. - if (ProcessClusters(now_ms) == ProbeResult::kBitrateUpdated) + if (ProcessClusters(now) == ProbeResult::kBitrateUpdated) update_estimate = true; } - if (inter_arrival_->ComputeDeltas(timestamp, arrival_time_ms, now_ms, - payload_size, &ts_delta, &t_delta, + if (inter_arrival_->ComputeDeltas(timestamp, arrival_time.ms(), now.ms(), + payload_size.bytes(), &ts_delta, &t_delta, &size_delta)) { double ts_delta_ms = (1000.0 * ts_delta) / (1 << kInterArrivalShift); estimator_->Update(t_delta, ts_delta_ms, size_delta, detector_.State(), - arrival_time_ms); + arrival_time.ms()); detector_.Detect(estimator_->offset(), ts_delta_ms, - estimator_->num_of_deltas(), arrival_time_ms); + estimator_->num_of_deltas(), arrival_time.ms()); } if (!update_estimate) { // Check if it's time for a periodic update or if we should update because // of an over-use. - if (last_update_ms_ == -1 || - now_ms - last_update_ms_ > remote_rate_.GetFeedbackInterval().ms()) { + if (last_update_.IsInfinite() || + now.ms() - last_update_.ms() > + remote_rate_.GetFeedbackInterval().ms()) { update_estimate = true; } else if (detector_.State() == BandwidthUsage::kBwOverusing) { absl::optional incoming_rate = - incoming_bitrate_.Rate(arrival_time_ms); + incoming_bitrate_.Rate(arrival_time.ms()); if (incoming_rate && remote_rate_.TimeToReduceFurther( - Timestamp::Millis(now_ms), - DataRate::BitsPerSec(*incoming_rate))) { + now, DataRate::BitsPerSec(*incoming_rate))) { update_estimate = true; } } @@ -348,18 +346,16 @@ void RemoteBitrateEstimatorAbsSendTime::IncomingPacketInfo( // We also have to update the estimate immediately if we are overusing // and the target bitrate is too high compared to what we are receiving. const RateControlInput input( - detector_.State(), - OptionalRateFromOptionalBps(incoming_bitrate_.Rate(arrival_time_ms))); - target_bitrate_bps = - remote_rate_.Update(&input, Timestamp::Millis(now_ms)) - .bps(); + detector_.State(), OptionalRateFromOptionalBps( + incoming_bitrate_.Rate(arrival_time.ms()))); + target_bitrate = remote_rate_.Update(&input, now); update_estimate = remote_rate_.ValidEstimate(); ssrcs = Keys(ssrcs_); } } if (update_estimate) { - last_update_ms_ = now_ms; - observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate_bps); + last_update_ = now; + observer_->OnReceiveBitrateChanged(ssrcs, target_bitrate.bps()); } } @@ -370,9 +366,9 @@ int64_t RemoteBitrateEstimatorAbsSendTime::TimeUntilNextProcess() { return kDisabledModuleTime; } -void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(int64_t now_ms) { - for (Ssrcs::iterator it = ssrcs_.begin(); it != ssrcs_.end();) { - if ((now_ms - it->second) > kStreamTimeOutMs) { +void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(Timestamp now) { + for (auto it = ssrcs_.begin(); it != ssrcs_.end();) { + if (now - it->second > TimeDelta::Millis(kStreamTimeOutMs)) { ssrcs_.erase(it++); } else { ++it; @@ -380,23 +376,23 @@ void RemoteBitrateEstimatorAbsSendTime::TimeoutStreams(int64_t now_ms) { } if (ssrcs_.empty()) { // We can't update the estimate if we don't have any active streams. - inter_arrival_.reset( - new InterArrival((kTimestampGroupLengthMs << kInterArrivalShift) / 1000, - kTimestampToMs, true)); - estimator_.reset(new OveruseEstimator(OverUseDetectorOptions())); + inter_arrival_ = std::make_unique( + (kTimestampGroupLengthMs << kInterArrivalShift) / 1000, kTimestampToMs, + true); + estimator_ = std::make_unique(OverUseDetectorOptions()); // We deliberately don't reset the first_packet_time_ms_ here for now since // we only probe for bandwidth in the beginning of a call right now. } } void RemoteBitrateEstimatorAbsSendTime::OnRttUpdate(int64_t avg_rtt_ms, - int64_t max_rtt_ms) { - rtc::CritScope lock(&crit_); + int64_t /*max_rtt_ms*/) { + MutexLock lock(&mutex_); remote_rate_.SetRtt(TimeDelta::Millis(avg_rtt_ms)); } void RemoteBitrateEstimatorAbsSendTime::RemoveStream(uint32_t ssrc) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ssrcs_.erase(ssrc); } @@ -409,7 +405,7 @@ bool RemoteBitrateEstimatorAbsSendTime::LatestEstimate( // thread. RTC_DCHECK(ssrcs); RTC_DCHECK(bitrate_bps); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (!remote_rate_.ValidEstimate()) { return false; } @@ -425,7 +421,7 @@ bool RemoteBitrateEstimatorAbsSendTime::LatestEstimate( void RemoteBitrateEstimatorAbsSendTime::SetMinBitrate(int min_bitrate_bps) { // Called from both the configuration thread and the network thread. Shouldn't // be called from the network thread in the future. - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); remote_rate_.SetMinBitrate(DataRate::BitsPerSec(min_bitrate_bps)); } } // namespace webrtc diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h index 9fd4974116..4117382577 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h @@ -21,61 +21,35 @@ #include "api/rtp_headers.h" #include "api/transport/field_trial_based_config.h" +#include "api/units/data_rate.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "modules/remote_bitrate_estimator/aimd_rate_control.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/remote_bitrate_estimator/inter_arrival.h" #include "modules/remote_bitrate_estimator/overuse_detector.h" #include "modules/remote_bitrate_estimator/overuse_estimator.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/race_checker.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" namespace webrtc { -struct Probe { - Probe(int64_t send_time_ms, int64_t recv_time_ms, size_t payload_size) - : send_time_ms(send_time_ms), - recv_time_ms(recv_time_ms), - payload_size(payload_size) {} - int64_t send_time_ms; - int64_t recv_time_ms; - size_t payload_size; -}; - -struct Cluster { - Cluster() - : send_mean_ms(0.0f), - recv_mean_ms(0.0f), - mean_size(0), - count(0), - num_above_min_delta(0) {} - - int GetSendBitrateBps() const { - RTC_CHECK_GT(send_mean_ms, 0.0f); - return mean_size * 8 * 1000 / send_mean_ms; - } - - int GetRecvBitrateBps() const { - RTC_CHECK_GT(recv_mean_ms, 0.0f); - return mean_size * 8 * 1000 / recv_mean_ms; - } - - float send_mean_ms; - float recv_mean_ms; - // TODO(holmer): Add some variance metric as well? - size_t mean_size; - int count; - int num_above_min_delta; -}; - class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator { public: RemoteBitrateEstimatorAbsSendTime(RemoteBitrateObserver* observer, Clock* clock); + + RemoteBitrateEstimatorAbsSendTime() = delete; + RemoteBitrateEstimatorAbsSendTime(const RemoteBitrateEstimatorAbsSendTime&) = + delete; + RemoteBitrateEstimatorAbsSendTime& operator=( + const RemoteBitrateEstimatorAbsSendTime&) = delete; + ~RemoteBitrateEstimatorAbsSendTime() override; void IncomingPacket(int64_t arrival_time_ms, @@ -94,32 +68,54 @@ class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator { void SetMinBitrate(int min_bitrate_bps) override; private: - typedef std::map Ssrcs; + struct Probe { + Probe(Timestamp send_time, Timestamp recv_time, DataSize payload_size) + : send_time(send_time), + recv_time(recv_time), + payload_size(payload_size) {} + + Timestamp send_time; + Timestamp recv_time; + DataSize payload_size; + }; + + struct Cluster { + DataRate SendBitrate() const { return mean_size / send_mean; } + DataRate RecvBitrate() const { return mean_size / recv_mean; } + + TimeDelta send_mean = TimeDelta::Zero(); + TimeDelta recv_mean = TimeDelta::Zero(); + // TODO(holmer): Add some variance metric as well? + DataSize mean_size = DataSize::Zero(); + int count = 0; + int num_above_min_delta = 0; + }; + enum class ProbeResult { kBitrateUpdated, kNoUpdate }; - static bool IsWithinClusterBounds(int send_delta_ms, + static bool IsWithinClusterBounds(TimeDelta send_delta, const Cluster& cluster_aggregate); - static void AddCluster(std::list* clusters, Cluster* cluster); + static void MaybeAddCluster(const Cluster& cluster_aggregate, + std::list& clusters); - void IncomingPacketInfo(int64_t arrival_time_ms, + void IncomingPacketInfo(Timestamp arrival_time, uint32_t send_time_24bits, - size_t payload_size, + DataSize payload_size, uint32_t ssrc); - void ComputeClusters(std::list* clusters) const; + std::list ComputeClusters() const; - std::list::const_iterator FindBestProbe( - const std::list& clusters) const; + const Cluster* FindBestProbe(const std::list& clusters) const; // Returns true if a probe which changed the estimate was detected. - ProbeResult ProcessClusters(int64_t now_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_); + ProbeResult ProcessClusters(Timestamp now) + RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); - bool IsBitrateImproving(int probe_bitrate_bps) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_); + bool IsBitrateImproving(DataRate probe_bitrate) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); - void TimeoutStreams(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_); + void TimeoutStreams(Timestamp now) RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_); rtc::RaceChecker network_race_; Clock* const clock_; @@ -128,21 +124,17 @@ class RemoteBitrateEstimatorAbsSendTime : public RemoteBitrateEstimator { std::unique_ptr inter_arrival_; std::unique_ptr estimator_; OveruseDetector detector_; - RateStatistics incoming_bitrate_; - bool incoming_bitrate_initialized_; - std::vector recent_propagation_delta_ms_; - std::vector recent_update_time_ms_; + RateStatistics incoming_bitrate_{kBitrateWindowMs, 8000}; + bool incoming_bitrate_initialized_ = false; std::list probes_; - size_t total_probes_received_; - int64_t first_packet_time_ms_; - int64_t last_update_ms_; - bool uma_recorded_; - - rtc::CriticalSection crit_; - Ssrcs ssrcs_ RTC_GUARDED_BY(&crit_); - AimdRateControl remote_rate_ RTC_GUARDED_BY(&crit_); - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteBitrateEstimatorAbsSendTime); + size_t total_probes_received_ = 0; + Timestamp first_packet_time_ = Timestamp::MinusInfinity(); + Timestamp last_update_ = Timestamp::MinusInfinity(); + bool uma_recorded_ = false; + + mutable Mutex mutex_; + std::map ssrcs_ RTC_GUARDED_BY(&mutex_); + AimdRateControl remote_rate_ RTC_GUARDED_BY(&mutex_); }; } // namespace webrtc diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc index db3bbe93c0..ddaa1de088 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc @@ -95,7 +95,7 @@ void RemoteBitrateEstimatorSingleStream::IncomingPacket( uint32_t rtp_timestamp = header.timestamp + header.extension.transmissionTimeOffset; int64_t now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); SsrcOveruseEstimatorMap::iterator it = overuse_detectors_.find(ssrc); if (it == overuse_detectors_.end()) { // This is a new SSRC. Adding to map. @@ -158,7 +158,7 @@ void RemoteBitrateEstimatorSingleStream::IncomingPacket( void RemoteBitrateEstimatorSingleStream::Process() { { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); UpdateEstimate(clock_->TimeInMilliseconds()); } last_process_time_ = clock_->TimeInMilliseconds(); @@ -168,7 +168,7 @@ int64_t RemoteBitrateEstimatorSingleStream::TimeUntilNextProcess() { if (last_process_time_ < 0) { return 0; } - rtc::CritScope cs_(&crit_sect_); + MutexLock lock_(&mutex_); RTC_DCHECK_GT(process_interval_ms_, 0); return last_process_time_ + process_interval_ms_ - clock_->TimeInMilliseconds(); @@ -217,12 +217,12 @@ void RemoteBitrateEstimatorSingleStream::UpdateEstimate(int64_t now_ms) { void RemoteBitrateEstimatorSingleStream::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); GetRemoteRate()->SetRtt(TimeDelta::Millis(avg_rtt_ms)); } void RemoteBitrateEstimatorSingleStream::RemoveStream(unsigned int ssrc) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); SsrcOveruseEstimatorMap::iterator it = overuse_detectors_.find(ssrc); if (it != overuse_detectors_.end()) { delete it->second; @@ -233,8 +233,8 @@ void RemoteBitrateEstimatorSingleStream::RemoveStream(unsigned int ssrc) { bool RemoteBitrateEstimatorSingleStream::LatestEstimate( std::vector* ssrcs, uint32_t* bitrate_bps) const { - rtc::CritScope cs(&crit_sect_); - assert(bitrate_bps); + MutexLock lock(&mutex_); + RTC_DCHECK(bitrate_bps); if (!remote_rate_->ValidEstimate()) { return false; } @@ -248,7 +248,7 @@ bool RemoteBitrateEstimatorSingleStream::LatestEstimate( void RemoteBitrateEstimatorSingleStream::GetSsrcs( std::vector* ssrcs) const { - assert(ssrcs); + RTC_DCHECK(ssrcs); ssrcs->resize(overuse_detectors_.size()); int i = 0; for (SsrcOveruseEstimatorMap::const_iterator it = overuse_detectors_.begin(); @@ -264,7 +264,7 @@ AimdRateControl* RemoteBitrateEstimatorSingleStream::GetRemoteRate() { } void RemoteBitrateEstimatorSingleStream::SetMinBitrate(int min_bitrate_bps) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); remote_rate_->SetMinBitrate(DataRate::BitsPerSec(min_bitrate_bps)); } diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h index a28109ce99..9fd2f9fc06 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h @@ -21,9 +21,8 @@ #include "api/transport/field_trial_based_config.h" #include "modules/remote_bitrate_estimator/aimd_rate_control.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -35,6 +34,13 @@ class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator { public: RemoteBitrateEstimatorSingleStream(RemoteBitrateObserver* observer, Clock* clock); + + RemoteBitrateEstimatorSingleStream() = delete; + RemoteBitrateEstimatorSingleStream( + const RemoteBitrateEstimatorSingleStream&) = delete; + RemoteBitrateEstimatorSingleStream& operator=( + const RemoteBitrateEstimatorSingleStream&) = delete; + ~RemoteBitrateEstimatorSingleStream() override; void IncomingPacket(int64_t arrival_time_ms, @@ -54,29 +60,26 @@ class RemoteBitrateEstimatorSingleStream : public RemoteBitrateEstimator { typedef std::map SsrcOveruseEstimatorMap; // Triggers a new estimate calculation. - void UpdateEstimate(int64_t time_now) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + void UpdateEstimate(int64_t time_now) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void GetSsrcs(std::vector* ssrcs) const - RTC_SHARED_LOCKS_REQUIRED(crit_sect_); + RTC_SHARED_LOCKS_REQUIRED(mutex_); // Returns |remote_rate_| if the pointed to object exists, // otherwise creates it. - AimdRateControl* GetRemoteRate() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + AimdRateControl* GetRemoteRate() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* const clock_; const FieldTrialBasedConfig field_trials_; - SsrcOveruseEstimatorMap overuse_detectors_ RTC_GUARDED_BY(crit_sect_); - RateStatistics incoming_bitrate_ RTC_GUARDED_BY(crit_sect_); - uint32_t last_valid_incoming_bitrate_ RTC_GUARDED_BY(crit_sect_); - std::unique_ptr remote_rate_ RTC_GUARDED_BY(crit_sect_); - RemoteBitrateObserver* const observer_ RTC_GUARDED_BY(crit_sect_); - rtc::CriticalSection crit_sect_; + SsrcOveruseEstimatorMap overuse_detectors_ RTC_GUARDED_BY(mutex_); + RateStatistics incoming_bitrate_ RTC_GUARDED_BY(mutex_); + uint32_t last_valid_incoming_bitrate_ RTC_GUARDED_BY(mutex_); + std::unique_ptr remote_rate_ RTC_GUARDED_BY(mutex_); + RemoteBitrateObserver* const observer_ RTC_GUARDED_BY(mutex_); + mutable Mutex mutex_; int64_t last_process_time_; - int64_t process_interval_ms_ RTC_GUARDED_BY(crit_sect_); + int64_t process_interval_ms_ RTC_GUARDED_BY(mutex_); bool uma_recorded_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RemoteBitrateEstimatorSingleStream); }; } // namespace webrtc diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc index 5e117942c1..66f8ca053a 100644 --- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc +++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest_helper.cc @@ -46,7 +46,7 @@ RtpStream::RtpStream(int fps, next_rtcp_time_(rtcp_receive_time), rtp_timestamp_offset_(timestamp_offset), kNtpFracPerMs(4.294967296E6) { - assert(fps_ > 0); + RTC_DCHECK_GT(fps_, 0); } void RtpStream::set_rtp_timestamp_offset(uint32_t offset) { @@ -60,7 +60,7 @@ int64_t RtpStream::GenerateFrame(int64_t time_now_us, PacketList* packets) { if (time_now_us < next_rtp_time_) { return next_rtp_time_; } - assert(packets != NULL); + RTC_DCHECK(packets); size_t bits_per_frame = (bitrate_bps_ + fps_ / 2) / fps_; size_t n_packets = std::max((bits_per_frame + 4 * kMtu) / (8 * kMtu), 1u); @@ -173,9 +173,9 @@ void StreamGenerator::set_rtp_timestamp_offset(uint32_t ssrc, uint32_t offset) { // it possible to simulate different types of channels. int64_t StreamGenerator::GenerateFrame(RtpStream::PacketList* packets, int64_t time_now_us) { - assert(packets != NULL); - assert(packets->empty()); - assert(capacity_ > 0); + RTC_DCHECK(packets); + RTC_DCHECK(packets->empty()); + RTC_DCHECK_GT(capacity_, 0); StreamMap::iterator it = std::min_element(streams_.begin(), streams_.end(), RtpStream::Compare); (*it).second->GenerateFrame(time_now_us, packets); diff --git a/modules/remote_bitrate_estimator/remote_estimator_proxy.cc b/modules/remote_bitrate_estimator/remote_estimator_proxy.cc index f044721fe0..7764e60ef2 100644 --- a/modules/remote_bitrate_estimator/remote_estimator_proxy.cc +++ b/modules/remote_bitrate_estimator/remote_estimator_proxy.cc @@ -23,9 +23,6 @@ namespace webrtc { -// Impossible to request feedback older than what can be represented by 15 bits. -const int RemoteEstimatorProxy::kMaxNumberOfPackets = (1 << 15); - // The maximum allowed value for a timestamp in milliseconds. This is lower // than the numerical limit since we often convert to microseconds. static constexpr int64_t kMaxTimeMs = @@ -33,11 +30,11 @@ static constexpr int64_t kMaxTimeMs = RemoteEstimatorProxy::RemoteEstimatorProxy( Clock* clock, - TransportFeedbackSenderInterface* feedback_sender, + TransportFeedbackSender feedback_sender, const WebRtcKeyValueConfig* key_value_config, NetworkStateEstimator* network_state_estimator) : clock_(clock), - feedback_sender_(feedback_sender), + feedback_sender_(std::move(feedback_sender)), send_config_(key_value_config), last_process_time_ms_(-1), network_state_estimator_(network_state_estimator), @@ -54,6 +51,18 @@ RemoteEstimatorProxy::RemoteEstimatorProxy( RemoteEstimatorProxy::~RemoteEstimatorProxy() {} +void RemoteEstimatorProxy::MaybeCullOldPackets(int64_t sequence_number, + int64_t arrival_time_ms) { + if (periodic_window_start_seq_.has_value()) { + if (*periodic_window_start_seq_ >= + packet_arrival_times_.end_sequence_number()) { + // Start new feedback packet, cull old packets. + packet_arrival_times_.RemoveOldPackets( + sequence_number, arrival_time_ms - send_config_.back_window->ms()); + } + } +} + void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms, size_t payload_size, const RTPHeader& header) { @@ -61,7 +70,7 @@ void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms, RTC_LOG(LS_WARNING) << "Arrival time out of bounds: " << arrival_time_ms; return; } - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); media_ssrc_ = header.ssrc; int64_t seq = 0; @@ -69,39 +78,26 @@ void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms, seq = unwrapper_.Unwrap(header.extension.transportSequenceNumber); if (send_periodic_feedback_) { - if (periodic_window_start_seq_ && - packet_arrival_times_.lower_bound(*periodic_window_start_seq_) == - packet_arrival_times_.end()) { - // Start new feedback packet, cull old packets. - for (auto it = packet_arrival_times_.begin(); - it != packet_arrival_times_.end() && it->first < seq && - arrival_time_ms - it->second >= send_config_.back_window->ms();) { - it = packet_arrival_times_.erase(it); - } - } + MaybeCullOldPackets(seq, arrival_time_ms); + if (!periodic_window_start_seq_ || seq < *periodic_window_start_seq_) { periodic_window_start_seq_ = seq; } } // We are only interested in the first time a packet is received. - if (packet_arrival_times_.find(seq) != packet_arrival_times_.end()) + if (packet_arrival_times_.has_received(seq)) { return; + } - packet_arrival_times_[seq] = arrival_time_ms; + packet_arrival_times_.AddPacket(seq, arrival_time_ms); // Limit the range of sequence numbers to send feedback for. - auto first_arrival_time_to_keep = packet_arrival_times_.lower_bound( - packet_arrival_times_.rbegin()->first - kMaxNumberOfPackets); - if (first_arrival_time_to_keep != packet_arrival_times_.begin()) { - packet_arrival_times_.erase(packet_arrival_times_.begin(), - first_arrival_time_to_keep); - if (send_periodic_feedback_) { - // |packet_arrival_times_| cannot be empty since we just added one - // element and the last element is not deleted. - RTC_DCHECK(!packet_arrival_times_.empty()); - periodic_window_start_seq_ = packet_arrival_times_.begin()->first; - } + if (!periodic_window_start_seq_.has_value() || + periodic_window_start_seq_.value() < + packet_arrival_times_.begin_sequence_number()) { + periodic_window_start_seq_ = + packet_arrival_times_.begin_sequence_number(); } if (header.extension.feedback_request) { @@ -113,8 +109,8 @@ void RemoteEstimatorProxy::IncomingPacket(int64_t arrival_time_ms, if (network_state_estimator_ && header.extension.hasAbsoluteSendTime) { PacketResult packet_result; packet_result.receive_time = Timestamp::Millis(arrival_time_ms); - // Ignore reordering of packets and assume they have approximately the same - // send time. + // Ignore reordering of packets and assume they have approximately the + // same send time. abs_send_timestamp_ += std::max( header.extension.GetAbsoluteSendTimeDelta(previous_abs_send_time_), TimeDelta::Millis(0)); @@ -134,7 +130,7 @@ bool RemoteEstimatorProxy::LatestEstimate(std::vector* ssrcs, } int64_t RemoteEstimatorProxy::TimeUntilNextProcess() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (!send_periodic_feedback_) { // Wait a day until next process. return 24 * 60 * 60 * 1000; @@ -147,7 +143,7 @@ int64_t RemoteEstimatorProxy::TimeUntilNextProcess() { } void RemoteEstimatorProxy::Process() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (!send_periodic_feedback_) { return; } @@ -169,7 +165,7 @@ void RemoteEstimatorProxy::OnBitrateChanged(int bitrate_bps) { kTwccReportSize * 8.0 * 1000.0 / send_config_.min_interval->ms(); // Let TWCC reports occupy 5% of total bandwidth. - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); send_interval_ms_ = static_cast( 0.5 + kTwccReportSize * 8.0 * 1000.0 / rtc::SafeClamp(send_config_.bandwidth_fraction * bitrate_bps, @@ -178,14 +174,14 @@ void RemoteEstimatorProxy::OnBitrateChanged(int bitrate_bps) { void RemoteEstimatorProxy::SetSendPeriodicFeedback( bool send_periodic_feedback) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); send_periodic_feedback_ = send_periodic_feedback; } void RemoteEstimatorProxy::SendPeriodicFeedbacks() { - // |periodic_window_start_seq_| is the first sequence number to include in the - // current feedback packet. Some older may still be in the map, in case a - // reordering happens and we need to retransmit them. + // |periodic_window_start_seq_| is the first sequence number to include in + // the current feedback packet. Some older may still be in the map, in case + // a reordering happens and we need to retransmit them. if (!periodic_window_start_seq_) return; @@ -199,15 +195,17 @@ void RemoteEstimatorProxy::SendPeriodicFeedbacks() { } } - for (auto begin_iterator = - packet_arrival_times_.lower_bound(*periodic_window_start_seq_); - begin_iterator != packet_arrival_times_.cend(); - begin_iterator = - packet_arrival_times_.lower_bound(*periodic_window_start_seq_)) { - auto feedback_packet = std::make_unique(); - periodic_window_start_seq_ = BuildFeedbackPacket( - feedback_packet_count_++, media_ssrc_, *periodic_window_start_seq_, - begin_iterator, packet_arrival_times_.cend(), feedback_packet.get()); + int64_t packet_arrival_times_end_seq = + packet_arrival_times_.end_sequence_number(); + while (periodic_window_start_seq_ < packet_arrival_times_end_seq) { + auto feedback_packet = MaybeBuildFeedbackPacket( + /*include_timestamps=*/true, periodic_window_start_seq_.value(), + packet_arrival_times_end_seq, + /*is_periodic_update=*/true); + + if (feedback_packet == nullptr) { + break; + } RTC_DCHECK(feedback_sender_ != nullptr); @@ -217,10 +215,10 @@ void RemoteEstimatorProxy::SendPeriodicFeedbacks() { } packets.push_back(std::move(feedback_packet)); - feedback_sender_->SendCombinedRtcpPacket(std::move(packets)); - // Note: Don't erase items from packet_arrival_times_ after sending, in case - // they need to be re-sent after a reordering. Removal will be handled - // by OnPacketArrival once packets are too old. + feedback_sender_(std::move(packets)); + // Note: Don't erase items from packet_arrival_times_ after sending, in + // case they need to be re-sent after a reordering. Removal will be + // handled by OnPacketArrival once packets are too old. } } @@ -231,61 +229,79 @@ void RemoteEstimatorProxy::SendFeedbackOnRequest( return; } - auto feedback_packet = std::make_unique( - feedback_request.include_timestamps); - int64_t first_sequence_number = sequence_number - feedback_request.sequence_count + 1; - auto begin_iterator = - packet_arrival_times_.lower_bound(first_sequence_number); - auto end_iterator = packet_arrival_times_.upper_bound(sequence_number); - BuildFeedbackPacket(feedback_packet_count_++, media_ssrc_, - first_sequence_number, begin_iterator, end_iterator, - feedback_packet.get()); + auto feedback_packet = MaybeBuildFeedbackPacket( + feedback_request.include_timestamps, first_sequence_number, + sequence_number + 1, /*is_periodic_update=*/false); + + // This is called when a packet has just been added. + RTC_DCHECK(feedback_packet != nullptr); // Clear up to the first packet that is included in this feedback packet. - packet_arrival_times_.erase(packet_arrival_times_.begin(), begin_iterator); + packet_arrival_times_.EraseTo(first_sequence_number); RTC_DCHECK(feedback_sender_ != nullptr); std::vector> packets; packets.push_back(std::move(feedback_packet)); - feedback_sender_->SendCombinedRtcpPacket(std::move(packets)); + feedback_sender_(std::move(packets)); } -int64_t RemoteEstimatorProxy::BuildFeedbackPacket( - uint8_t feedback_packet_count, - uint32_t media_ssrc, - int64_t base_sequence_number, - std::map::const_iterator begin_iterator, - std::map::const_iterator end_iterator, - rtcp::TransportFeedback* feedback_packet) { - RTC_DCHECK(begin_iterator != end_iterator); - - // TODO(sprang): Measure receive times in microseconds and remove the - // conversions below. - feedback_packet->SetMediaSsrc(media_ssrc); - // Base sequence number is the expected first sequence number. This is known, - // but we might not have actually received it, so the base time shall be the - // time of the first received packet in the feedback. - feedback_packet->SetBase(static_cast(base_sequence_number & 0xFFFF), - begin_iterator->second * 1000); - feedback_packet->SetFeedbackSequenceNumber(feedback_packet_count); - int64_t next_sequence_number = base_sequence_number; - for (auto it = begin_iterator; it != end_iterator; ++it) { - if (!feedback_packet->AddReceivedPacket( - static_cast(it->first & 0xFFFF), it->second * 1000)) { - // If we can't even add the first seq to the feedback packet, we won't be - // able to build it at all. - RTC_CHECK(begin_iterator != it); +std::unique_ptr +RemoteEstimatorProxy::MaybeBuildFeedbackPacket( + bool include_timestamps, + int64_t begin_sequence_number_inclusive, + int64_t end_sequence_number_exclusive, + bool is_periodic_update) { + RTC_DCHECK_LT(begin_sequence_number_inclusive, end_sequence_number_exclusive); + + int64_t start_seq = + packet_arrival_times_.clamp(begin_sequence_number_inclusive); + + int64_t end_seq = packet_arrival_times_.clamp(end_sequence_number_exclusive); + + // Create the packet on demand, as it's not certain that there are packets + // in the range that have been received. + std::unique_ptr feedback_packet = nullptr; + + int64_t next_sequence_number = begin_sequence_number_inclusive; + for (int64_t seq = start_seq; seq < end_seq; ++seq) { + int64_t arrival_time_ms = packet_arrival_times_.get(seq); + if (arrival_time_ms == 0) { + // Packet not received. + continue; + } + + if (feedback_packet == nullptr) { + feedback_packet = + std::make_unique(include_timestamps); + // TODO(sprang): Measure receive times in microseconds and remove the + // conversions below. + feedback_packet->SetMediaSsrc(media_ssrc_); + // Base sequence number is the expected first sequence number. This is + // known, but we might not have actually received it, so the base time + // shall be the time of the first received packet in the feedback. + feedback_packet->SetBase( + static_cast(begin_sequence_number_inclusive & 0xFFFF), + arrival_time_ms * 1000); + feedback_packet->SetFeedbackSequenceNumber(feedback_packet_count_++); + } + + if (!feedback_packet->AddReceivedPacket(static_cast(seq & 0xFFFF), + arrival_time_ms * 1000)) { // Could not add timestamp, feedback packet might be full. Return and // try again with a fresh packet. break; } - next_sequence_number = it->first + 1; + + next_sequence_number = seq + 1; + } + if (is_periodic_update) { + periodic_window_start_seq_ = next_sequence_number; } - return next_sequence_number; + return feedback_packet; } } // namespace webrtc diff --git a/modules/remote_bitrate_estimator/remote_estimator_proxy.h b/modules/remote_bitrate_estimator/remote_estimator_proxy.h index e11eb1fa7a..4f89409995 100644 --- a/modules/remote_bitrate_estimator/remote_estimator_proxy.h +++ b/modules/remote_bitrate_estimator/remote_estimator_proxy.h @@ -11,20 +11,22 @@ #ifndef MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_ #define MODULES_REMOTE_BITRATE_ESTIMATOR_REMOTE_ESTIMATOR_PROXY_H_ -#include +#include +#include +#include #include #include "api/transport/network_control.h" #include "api/transport/webrtc_key_value_config.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" -#include "rtc_base/critical_section.h" +#include "modules/remote_bitrate_estimator/packet_arrival_map.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/numerics/sequence_number_util.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { class Clock; -class PacketRouter; namespace rtcp { class TransportFeedback; } @@ -32,11 +34,14 @@ class TransportFeedback; // Class used when send-side BWE is enabled: This proxy is instantiated on the // receive side. It buffers a number of receive timestamps and then sends // transport feedback messages back too the send side. - class RemoteEstimatorProxy : public RemoteBitrateEstimator { public: + // Used for sending transport feedback messages when send side + // BWE is used. + using TransportFeedbackSender = std::function> packets)>; RemoteEstimatorProxy(Clock* clock, - TransportFeedbackSenderInterface* feedback_sender, + TransportFeedbackSender feedback_sender, const WebRtcKeyValueConfig* key_value_config, NetworkStateEstimator* network_state_estimator); ~RemoteEstimatorProxy() override; @@ -71,37 +76,51 @@ class RemoteEstimatorProxy : public RemoteBitrateEstimator { } }; - static const int kMaxNumberOfPackets; - + void MaybeCullOldPackets(int64_t sequence_number, int64_t arrival_time_ms) + RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_); void SendPeriodicFeedbacks() RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_); void SendFeedbackOnRequest(int64_t sequence_number, const FeedbackRequest& feedback_request) RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_); - static int64_t BuildFeedbackPacket( - uint8_t feedback_packet_count, - uint32_t media_ssrc, - int64_t base_sequence_number, - std::map::const_iterator - begin_iterator, // |begin_iterator| is inclusive. - std::map::const_iterator - end_iterator, // |end_iterator| is exclusive. - rtcp::TransportFeedback* feedback_packet); + + // Returns a Transport Feedback packet with information about as many packets + // that has been received between [`begin_sequence_number_incl`, + // `end_sequence_number_excl`) that can fit in it. If `is_periodic_update`, + // this represents sending a periodic feedback message, which will make it + // update the `periodic_window_start_seq_` variable with the first packet that + // was not included in the feedback packet, so that the next update can + // continue from that sequence number. + // + // If no incoming packets were added, nullptr is returned. + // + // `include_timestamps` decide if the returned TransportFeedback should + // include timestamps. + std::unique_ptr MaybeBuildFeedbackPacket( + bool include_timestamps, + int64_t begin_sequence_number_inclusive, + int64_t end_sequence_number_exclusive, + bool is_periodic_update) RTC_EXCLUSIVE_LOCKS_REQUIRED(&lock_); Clock* const clock_; - TransportFeedbackSenderInterface* const feedback_sender_; + const TransportFeedbackSender feedback_sender_; const TransportWideFeedbackConfig send_config_; int64_t last_process_time_ms_; - rtc::CriticalSection lock_; + Mutex lock_; // |network_state_estimator_| may be null. NetworkStateEstimator* const network_state_estimator_ RTC_PT_GUARDED_BY(&lock_); uint32_t media_ssrc_ RTC_GUARDED_BY(&lock_); uint8_t feedback_packet_count_ RTC_GUARDED_BY(&lock_); SeqNumUnwrapper unwrapper_ RTC_GUARDED_BY(&lock_); + + // The next sequence number that should be the start sequence number during + // periodic reporting. Will be absl::nullopt before the first seen packet. absl::optional periodic_window_start_seq_ RTC_GUARDED_BY(&lock_); - // Map unwrapped seq -> time. - std::map packet_arrival_times_ RTC_GUARDED_BY(&lock_); + + // Packet arrival times, by sequence number. + PacketArrivalTimeMap packet_arrival_times_ RTC_GUARDED_BY(&lock_); + int64_t send_interval_ms_ RTC_GUARDED_BY(&lock_); bool send_periodic_feedback_ RTC_GUARDED_BY(&lock_); diff --git a/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc b/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc index f7e8ffc9fc..296724fa71 100644 --- a/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc +++ b/modules/remote_bitrate_estimator/remote_estimator_proxy_unittest.cc @@ -16,8 +16,8 @@ #include "api/transport/field_trial_based_config.h" #include "api/transport/network_types.h" #include "api/transport/test/mock_network_control.h" -#include "modules/pacing/packet_router.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" +#include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "system_wrappers/include/clock.h" #include "test/gmock.h" #include "test/gtest.h" @@ -25,6 +25,7 @@ using ::testing::_; using ::testing::ElementsAre; using ::testing::Invoke; +using ::testing::MockFunction; using ::testing::Return; using ::testing::SizeIs; @@ -63,19 +64,12 @@ std::vector TimestampsMs( return timestamps; } -class MockTransportFeedbackSender : public TransportFeedbackSenderInterface { - public: - MOCK_METHOD1( - SendCombinedRtcpPacket, - bool(std::vector> feedback_packets)); -}; - class RemoteEstimatorProxyTest : public ::testing::Test { public: RemoteEstimatorProxyTest() : clock_(0), proxy_(&clock_, - &router_, + feedback_sender_.AsStdFunction(), &field_trial_config_, &network_state_estimator_) {} @@ -112,7 +106,8 @@ class RemoteEstimatorProxyTest : public ::testing::Test { FieldTrialBasedConfig field_trial_config_; SimulatedClock clock_; - ::testing::StrictMock router_; + MockFunction>)> + feedback_sender_; ::testing::NiceMock network_state_estimator_; RemoteEstimatorProxy proxy_; }; @@ -120,7 +115,7 @@ class RemoteEstimatorProxyTest : public ::testing::Test { TEST_F(RemoteEstimatorProxyTest, SendsSinglePacketFeedback) { IncomingPacket(kBaseSeq, kBaseTimeMs); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -133,7 +128,6 @@ TEST_F(RemoteEstimatorProxyTest, SendsSinglePacketFeedback) { ElementsAre(kBaseSeq)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs)); - return true; })); Process(); @@ -143,7 +137,7 @@ TEST_F(RemoteEstimatorProxyTest, DuplicatedPackets) { IncomingPacket(kBaseSeq, kBaseTimeMs); IncomingPacket(kBaseSeq, kBaseTimeMs + 1000); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -166,13 +160,13 @@ TEST_F(RemoteEstimatorProxyTest, FeedbackWithMissingStart) { // First feedback. IncomingPacket(kBaseSeq, kBaseTimeMs); IncomingPacket(kBaseSeq + 1, kBaseTimeMs + 1000); - EXPECT_CALL(router_, SendCombinedRtcpPacket).WillOnce(Return(true)); + EXPECT_CALL(feedback_sender_, Call); Process(); // Second feedback starts with a missing packet (DROP kBaseSeq + 2). IncomingPacket(kBaseSeq + 3, kBaseTimeMs + 3000); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -185,7 +179,6 @@ TEST_F(RemoteEstimatorProxyTest, FeedbackWithMissingStart) { ElementsAre(kBaseSeq + 3)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + 3000)); - return true; })); Process(); @@ -196,7 +189,7 @@ TEST_F(RemoteEstimatorProxyTest, SendsFeedbackWithVaryingDeltas) { IncomingPacket(kBaseSeq + 1, kBaseTimeMs + kMaxSmallDeltaMs); IncomingPacket(kBaseSeq + 2, kBaseTimeMs + (2 * kMaxSmallDeltaMs) + 1); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -210,7 +203,6 @@ TEST_F(RemoteEstimatorProxyTest, SendsFeedbackWithVaryingDeltas) { EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs, kBaseTimeMs + kMaxSmallDeltaMs, kBaseTimeMs + (2 * kMaxSmallDeltaMs) + 1)); - return true; })); Process(); @@ -223,7 +215,7 @@ TEST_F(RemoteEstimatorProxyTest, SendsFragmentedFeedback) { IncomingPacket(kBaseSeq, kBaseTimeMs); IncomingPacket(kBaseSeq + 1, kBaseTimeMs + kTooLargeDelta); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -236,7 +228,6 @@ TEST_F(RemoteEstimatorProxyTest, SendsFragmentedFeedback) { ElementsAre(kBaseSeq)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs)); - return true; })) .WillOnce(Invoke( [](std::vector> feedback_packets) { @@ -250,7 +241,6 @@ TEST_F(RemoteEstimatorProxyTest, SendsFragmentedFeedback) { ElementsAre(kBaseSeq + 1)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + kTooLargeDelta)); - return true; })); Process(); @@ -262,7 +252,7 @@ TEST_F(RemoteEstimatorProxyTest, HandlesReorderingAndWrap) { IncomingPacket(kBaseSeq, kBaseTimeMs); IncomingPacket(kLargeSeq, kBaseTimeMs + kDeltaMs); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [&](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -273,7 +263,6 @@ TEST_F(RemoteEstimatorProxyTest, HandlesReorderingAndWrap) { EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + kDeltaMs, kBaseTimeMs)); - return true; })); Process(); @@ -292,7 +281,7 @@ TEST_F(RemoteEstimatorProxyTest, HandlesMalformedSequenceNumbers) { } // Only expect feedback for the last two packets. - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [&](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -305,7 +294,6 @@ TEST_F(RemoteEstimatorProxyTest, HandlesMalformedSequenceNumbers) { EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + 28 * kDeltaMs, kBaseTimeMs + 29 * kDeltaMs)); - return true; })); Process(); @@ -323,7 +311,7 @@ TEST_F(RemoteEstimatorProxyTest, HandlesBackwardsWrappingSequenceNumbers) { } // Only expect feedback for the first two packets. - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [&](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -335,7 +323,6 @@ TEST_F(RemoteEstimatorProxyTest, HandlesBackwardsWrappingSequenceNumbers) { ElementsAre(kBaseSeq + 40000, kBaseSeq)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + kDeltaMs, kBaseTimeMs)); - return true; })); Process(); @@ -345,7 +332,7 @@ TEST_F(RemoteEstimatorProxyTest, ResendsTimestampsOnReordering) { IncomingPacket(kBaseSeq, kBaseTimeMs); IncomingPacket(kBaseSeq + 2, kBaseTimeMs + 2); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -358,14 +345,13 @@ TEST_F(RemoteEstimatorProxyTest, ResendsTimestampsOnReordering) { ElementsAre(kBaseSeq, kBaseSeq + 2)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs, kBaseTimeMs + 2)); - return true; })); Process(); IncomingPacket(kBaseSeq + 1, kBaseTimeMs + 1); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -378,7 +364,6 @@ TEST_F(RemoteEstimatorProxyTest, ResendsTimestampsOnReordering) { ElementsAre(kBaseSeq + 1, kBaseSeq + 2)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + 1, kBaseTimeMs + 2)); - return true; })); Process(); @@ -389,7 +374,7 @@ TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) { IncomingPacket(kBaseSeq + 2, kBaseTimeMs); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -399,14 +384,13 @@ TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) { EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs)); - return true; })); Process(); IncomingPacket(kBaseSeq + 3, kTimeoutTimeMs); // kBaseSeq + 2 times out here. - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [&](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -416,7 +400,6 @@ TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) { EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kTimeoutTimeMs)); - return true; })); Process(); @@ -426,7 +409,7 @@ TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) { IncomingPacket(kBaseSeq, kBaseTimeMs - 1); IncomingPacket(kBaseSeq + 1, kTimeoutTimeMs - 1); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [&](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -439,7 +422,6 @@ TEST_F(RemoteEstimatorProxyTest, RemovesTimestampsOutOfScope) { EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs - 1, kTimeoutTimeMs - 1, kTimeoutTimeMs)); - return true; })); Process(); @@ -495,7 +477,7 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, TimeUntilNextProcessIsHigh) { TEST_F(RemoteEstimatorProxyOnRequestTest, ProcessDoesNotSendFeedback) { proxy_.SetSendPeriodicFeedback(false); IncomingPacket(kBaseSeq, kBaseTimeMs); - EXPECT_CALL(router_, SendCombinedRtcpPacket).Times(0); + EXPECT_CALL(feedback_sender_, Call).Times(0); Process(); } @@ -505,7 +487,7 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, RequestSinglePacketFeedback) { IncomingPacket(kBaseSeq + 1, kBaseTimeMs + kMaxSmallDeltaMs); IncomingPacket(kBaseSeq + 2, kBaseTimeMs + 2 * kMaxSmallDeltaMs); - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -518,7 +500,6 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, RequestSinglePacketFeedback) { ElementsAre(kBaseSeq + 3)); EXPECT_THAT(TimestampsMs(*feedback_packet), ElementsAre(kBaseTimeMs + 3 * kMaxSmallDeltaMs)); - return true; })); constexpr FeedbackRequest kSinglePacketFeedbackRequest = { @@ -534,7 +515,7 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, RequestLastFivePacketFeedback) { IncomingPacket(kBaseSeq + i, kBaseTimeMs + i * kMaxSmallDeltaMs); } - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -552,7 +533,6 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, RequestLastFivePacketFeedback) { kBaseTimeMs + 8 * kMaxSmallDeltaMs, kBaseTimeMs + 9 * kMaxSmallDeltaMs, kBaseTimeMs + 10 * kMaxSmallDeltaMs)); - return true; })); constexpr FeedbackRequest kFivePacketsFeedbackRequest = { @@ -570,7 +550,7 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, IncomingPacket(kBaseSeq + i, kBaseTimeMs + i * kMaxSmallDeltaMs); } - EXPECT_CALL(router_, SendCombinedRtcpPacket) + EXPECT_CALL(feedback_sender_, Call) .WillOnce(Invoke( [](std::vector> feedback_packets) { rtcp::TransportFeedback* feedback_packet = @@ -585,7 +565,6 @@ TEST_F(RemoteEstimatorProxyOnRequestTest, ElementsAre(kBaseTimeMs + 6 * kMaxSmallDeltaMs, kBaseTimeMs + 8 * kMaxSmallDeltaMs, kBaseTimeMs + 10 * kMaxSmallDeltaMs)); - return true; })); constexpr FeedbackRequest kFivePacketsFeedbackRequest = { @@ -657,13 +636,7 @@ TEST_F(RemoteEstimatorProxyTest, SendTransportFeedbackAndNetworkStateUpdate) { AbsoluteSendTime::MsTo24Bits(kBaseTimeMs - 1))); EXPECT_CALL(network_state_estimator_, GetCurrentEstimate()) .WillOnce(Return(NetworkStateEstimate())); - EXPECT_CALL(router_, SendCombinedRtcpPacket) - .WillOnce( - [](std::vector> feedback_packets) { - EXPECT_THAT(feedback_packets, SizeIs(2)); - return true; - }); - + EXPECT_CALL(feedback_sender_, Call(SizeIs(2))); Process(); } diff --git a/modules/remote_bitrate_estimator/test/bwe_test_logging.cc b/modules/remote_bitrate_estimator/test/bwe_test_logging.cc index cf44fa070a..f99576f59a 100644 --- a/modules/remote_bitrate_estimator/test/bwe_test_logging.cc +++ b/modules/remote_bitrate_estimator/test/bwe_test_logging.cc @@ -61,27 +61,27 @@ Logging* Logging::GetInstance() { } void Logging::SetGlobalContext(uint32_t name) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); thread_map_[rtc::CurrentThreadId()].global_state.tag = ToString(name); } void Logging::SetGlobalContext(const std::string& name) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); thread_map_[rtc::CurrentThreadId()].global_state.tag = name; } void Logging::SetGlobalContext(const char* name) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); thread_map_[rtc::CurrentThreadId()].global_state.tag = name; } void Logging::SetGlobalEnable(bool enabled) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); thread_map_[rtc::CurrentThreadId()].global_state.enabled = enabled; } void Logging::Log(const char format[], ...) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -118,7 +118,7 @@ void Logging::Plot(int figure, double value, uint32_t ssrc, const std::string& alg_name) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -132,7 +132,7 @@ void Logging::PlotBar(int figure, const std::string& name, double value, int flow_id) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -145,7 +145,7 @@ void Logging::PlotBaselineBar(int figure, const std::string& name, double value, int flow_id) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -161,7 +161,7 @@ void Logging::PlotErrorBar(int figure, double yhigh, const std::string& error_title, int flow_id) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -180,7 +180,7 @@ void Logging::PlotLimitErrorBar(int figure, double ymax, const std::string& limit_title, int flow_id) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -195,7 +195,7 @@ void Logging::PlotLabel(int figure, const std::string& title, const std::string& y_label, int num_flows) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); const State& state = it->second.stack.top(); @@ -229,7 +229,7 @@ void Logging::State::MergePrevious(const State& previous) { void Logging::PushState(const std::string& append_to_tag, int64_t timestamp_ms, bool enabled) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); State new_state(append_to_tag, timestamp_ms, enabled); ThreadState* thread_state = &thread_map_[rtc::CurrentThreadId()]; std::stack* stack = &thread_state->stack; @@ -242,7 +242,7 @@ void Logging::PushState(const std::string& append_to_tag, } void Logging::PopState() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ThreadMap::iterator it = thread_map_.find(rtc::CurrentThreadId()); RTC_DCHECK(it != thread_map_.end()); std::stack* stack = &it->second.stack; diff --git a/modules/remote_bitrate_estimator/test/bwe_test_logging.h b/modules/remote_bitrate_estimator/test/bwe_test_logging.h index a399d0b694..079a7f888d 100644 --- a/modules/remote_bitrate_estimator/test/bwe_test_logging.h +++ b/modules/remote_bitrate_estimator/test/bwe_test_logging.h @@ -129,7 +129,7 @@ #include #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #define BWE_TEST_LOGGING_GLOBAL_CONTEXT(name) \ do { \ @@ -263,10 +263,11 @@ class Logging { Context(uint32_t name, int64_t timestamp_ms, bool enabled); Context(const std::string& name, int64_t timestamp_ms, bool enabled); Context(const char* name, int64_t timestamp_ms, bool enabled); - ~Context(); - private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Context); + Context() = delete; + Context(const Context&) = delete; + Context& operator=(const Context&) = delete; + ~Context(); }; static Logging* GetInstance(); @@ -345,7 +346,7 @@ class Logging { bool enabled); void PopState(); - rtc::CriticalSection crit_sect_; + Mutex mutex_; ThreadMap thread_map_; RTC_DISALLOW_COPY_AND_ASSIGN(Logging); diff --git a/modules/remote_bitrate_estimator/tools/bwe_rtp.cc b/modules/remote_bitrate_estimator/tools/bwe_rtp.cc index c0b3a37ba5..403f81fd03 100644 --- a/modules/remote_bitrate_estimator/tools/bwe_rtp.cc +++ b/modules/remote_bitrate_estimator/tools/bwe_rtp.cc @@ -18,10 +18,8 @@ #include "absl/flags/flag.h" #include "absl/flags/parse.h" -#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_abs_send_time.h" -#include "modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h" +#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" ABSL_FLAG(std::string, extension_type, @@ -65,14 +63,11 @@ std::set SsrcFilter() { return ssrcs; } -std::unique_ptr ParseArgsAndSetupEstimator( +bool ParseArgsAndSetupRtpReader( int argc, char** argv, - webrtc::Clock* clock, - webrtc::RemoteBitrateObserver* observer, - std::unique_ptr* rtp_reader, - std::unique_ptr* estimator, - std::string* estimator_used) { + std::unique_ptr& rtp_reader, + webrtc::RtpHeaderExtensionMap& rtp_header_extensions) { absl::ParseCommandLine(argc, argv); std::string filename = InputFile(); @@ -84,16 +79,16 @@ std::unique_ptr ParseArgsAndSetupEstimator( fprintf(stderr, "\n"); if (filename.substr(filename.find_last_of('.')) == ".pcap") { fprintf(stderr, "Opening as pcap\n"); - rtp_reader->reset(webrtc::test::RtpFileReader::Create( + rtp_reader.reset(webrtc::test::RtpFileReader::Create( webrtc::test::RtpFileReader::kPcap, filename.c_str(), SsrcFilter())); } else { fprintf(stderr, "Opening as rtp\n"); - rtp_reader->reset(webrtc::test::RtpFileReader::Create( + rtp_reader.reset(webrtc::test::RtpFileReader::Create( webrtc::test::RtpFileReader::kRtpDump, filename.c_str())); } - if (!*rtp_reader) { + if (!rtp_reader) { fprintf(stderr, "Cannot open input file %s\n", filename.c_str()); - return nullptr; + return false; } fprintf(stderr, "Input file: %s\n\n", filename.c_str()); @@ -105,31 +100,10 @@ std::unique_ptr ParseArgsAndSetupEstimator( fprintf(stderr, "Extension: abs\n"); } else { fprintf(stderr, "Unknown extension type\n"); - return nullptr; + return false; } - // Setup the RTP header parser and the bitrate estimator. - auto parser = webrtc::RtpHeaderParser::CreateForTest(); - parser->RegisterRtpHeaderExtension(extension, ExtensionId()); - if (estimator) { - switch (extension) { - case webrtc::kRtpExtensionAbsoluteSendTime: { - estimator->reset( - new webrtc::RemoteBitrateEstimatorAbsSendTime(observer, clock)); - *estimator_used = "AbsoluteSendTimeRemoteBitrateEstimator"; - break; - } - case webrtc::kRtpExtensionTransmissionTimeOffset: { - estimator->reset( - new webrtc::RemoteBitrateEstimatorSingleStream(observer, clock)); - *estimator_used = "RemoteBitrateEstimator"; - break; - } - default: - assert(false); - return nullptr; - } - } + rtp_header_extensions.RegisterByType(ExtensionId(), extension); - return parser; + return true; } diff --git a/modules/remote_bitrate_estimator/tools/bwe_rtp.h b/modules/remote_bitrate_estimator/tools/bwe_rtp.h index 4285f926b5..3b161db37b 100644 --- a/modules/remote_bitrate_estimator/tools/bwe_rtp.h +++ b/modules/remote_bitrate_estimator/tools/bwe_rtp.h @@ -12,25 +12,14 @@ #define MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_ #include -#include -namespace webrtc { -class Clock; -class RemoteBitrateEstimator; -class RemoteBitrateObserver; -class RtpHeaderParser; -namespace test { -class RtpFileReader; -} -} // namespace webrtc +#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" +#include "test/rtp_file_reader.h" -std::unique_ptr ParseArgsAndSetupEstimator( +bool ParseArgsAndSetupRtpReader( int argc, char** argv, - webrtc::Clock* clock, - webrtc::RemoteBitrateObserver* observer, - std::unique_ptr* rtp_reader, - std::unique_ptr* estimator, - std::string* estimator_used); + std::unique_ptr& rtp_reader, + webrtc::RtpHeaderExtensionMap& rtp_header_extensions); #endif // MODULES_REMOTE_BITRATE_ESTIMATOR_TOOLS_BWE_RTP_H_ diff --git a/modules/remote_bitrate_estimator/tools/rtp_to_text.cc b/modules/remote_bitrate_estimator/tools/rtp_to_text.cc index 7f1e009793..98f502a42e 100644 --- a/modules/remote_bitrate_estimator/tools/rtp_to_text.cc +++ b/modules/remote_bitrate_estimator/tools/rtp_to_text.cc @@ -13,17 +13,19 @@ #include #include "modules/remote_bitrate_estimator/tools/bwe_rtp.h" +#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" +#include "modules/rtp_rtcp/source/rtp_header_extensions.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "rtc_base/format_macros.h" #include "rtc_base/strings/string_builder.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" int main(int argc, char* argv[]) { std::unique_ptr reader; - std::unique_ptr parser(ParseArgsAndSetupEstimator( - argc, argv, nullptr, nullptr, &reader, nullptr, nullptr)); - if (!parser) + webrtc::RtpHeaderExtensionMap rtp_header_extensions; + if (!ParseArgsAndSetupRtpReader(argc, argv, reader, rtp_header_extensions)) { return -1; + } bool arrival_time_only = (argc >= 5 && strncmp(argv[4], "-t", 2) == 0); @@ -35,11 +37,15 @@ int main(int argc, char* argv[]) { int non_zero_ts_offsets = 0; webrtc::test::RtpPacket packet; while (reader->NextPacket(&packet)) { - webrtc::RTPHeader header; - parser->Parse(packet.data, packet.length, &header); - if (header.extension.absoluteSendTime != 0) + webrtc::RtpPacket header(&rtp_header_extensions); + header.Parse(packet.data, packet.length); + uint32_t abs_send_time = 0; + if (header.GetExtension(&abs_send_time) && + abs_send_time != 0) ++non_zero_abs_send_time; - if (header.extension.transmissionTimeOffset != 0) + int32_t toffset = 0; + if (header.GetExtension(&toffset) && + toffset != 0) ++non_zero_ts_offsets; if (arrival_time_only) { rtc::StringBuilder ss; @@ -47,11 +53,9 @@ int main(int argc, char* argv[]) { fprintf(stdout, "%s\n", ss.str().c_str()); } else { fprintf(stdout, "%u %u %d %u %u %d %u %" RTC_PRIuS " %" RTC_PRIuS "\n", - header.sequenceNumber, header.timestamp, - header.extension.transmissionTimeOffset, - header.extension.absoluteSendTime, packet.time_ms, - header.markerBit, header.ssrc, packet.length, - packet.original_length); + header.SequenceNumber(), header.Timestamp(), toffset, + abs_send_time, packet.time_ms, header.Marker(), header.Ssrc(), + packet.length, packet.original_length); } ++packet_counter; } diff --git a/modules/rtp_rtcp/BUILD.gn b/modules/rtp_rtcp/BUILD.gn index eda27ba68d..778baf6e15 100644 --- a/modules/rtp_rtcp/BUILD.gn +++ b/modules/rtp_rtcp/BUILD.gn @@ -52,6 +52,8 @@ rtc_library("rtp_rtcp_format") { "source/rtp_packet.h", "source/rtp_packet_received.h", "source/rtp_packet_to_send.h", + "source/rtp_util.h", + "source/rtp_video_layers_allocation_extension.h", ] sources = [ "include/report_block_data.cc", @@ -95,29 +97,34 @@ rtc_library("rtp_rtcp_format") { "source/rtp_packet.cc", "source/rtp_packet_received.cc", "source/rtp_packet_to_send.cc", + "source/rtp_util.cc", + "source/rtp_video_layers_allocation_extension.cc", ] deps = [ - "..:module_api", "..:module_api_public", "../../api:array_view", "../../api:function_view", + "../../api:refcountedbase", "../../api:rtp_headers", "../../api:rtp_parameters", + "../../api:scoped_refptr", "../../api/audio_codecs:audio_codecs_api", "../../api/transport:network_control", "../../api/transport/rtp:dependency_descriptor", "../../api/units:time_delta", + "../../api/units:timestamp", "../../api/video:video_frame", + "../../api/video:video_layers_allocation", "../../api/video:video_rtp_headers", "../../common_video", "../../rtc_base:checks", - "../../rtc_base:deprecation", "../../rtc_base:divide_round", "../../rtc_base:rtc_base_approved", - "../../rtc_base/system:unused", "../../system_wrappers", "../video_coding:codec_globals_headers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", @@ -132,12 +139,15 @@ rtc_library("rtp_rtcp") { "include/flexfec_sender.h", "include/receive_statistics.h", "include/remote_ntp_time_estimator.h", - "include/rtp_rtcp.h", "include/ulpfec_receiver.h", - "source/absolute_capture_time_receiver.cc", - "source/absolute_capture_time_receiver.h", + "source/absolute_capture_time_interpolator.cc", + "source/absolute_capture_time_interpolator.h", "source/absolute_capture_time_sender.cc", "source/absolute_capture_time_sender.h", + "source/active_decode_targets_helper.cc", + "source/active_decode_targets_helper.h", + "source/capture_clock_offset_updater.cc", + "source/capture_clock_offset_updater.h", "source/create_video_rtp_depacketizer.cc", "source/create_video_rtp_depacketizer.h", "source/dtmf_queue.cc", @@ -156,6 +166,8 @@ rtc_library("rtp_rtcp") { "source/forward_error_correction_internal.h", "source/packet_loss_stats.cc", "source/packet_loss_stats.h", + "source/packet_sequencer.cc", + "source/packet_sequencer.h", "source/receive_statistics_impl.cc", "source/receive_statistics_impl.h", "source/remote_ntp_time_estimator.cc", @@ -184,10 +196,9 @@ rtc_library("rtp_rtcp") { "source/rtp_packetizer_av1.cc", "source/rtp_packetizer_av1.h", "source/rtp_rtcp_config.h", - "source/rtp_rtcp_impl.cc", - "source/rtp_rtcp_impl.h", "source/rtp_rtcp_impl2.cc", "source/rtp_rtcp_impl2.h", + "source/rtp_rtcp_interface.h", "source/rtp_sender.cc", "source/rtp_sender.h", "source/rtp_sender_audio.cc", @@ -240,10 +251,8 @@ rtc_library("rtp_rtcp") { deps = [ ":rtp_rtcp_format", ":rtp_video_header", - "..:module_api", "..:module_api_public", "..:module_fec_api", - "../..:webrtc_common", "../../api:array_view", "../../api:frame_transformer_interface", "../../api:function_view", @@ -252,6 +261,7 @@ rtc_library("rtp_rtcp") { "../../api:rtp_packet_info", "../../api:rtp_parameters", "../../api:scoped_refptr", + "../../api:sequence_checker", "../../api:transport_api", "../../api/audio_codecs:audio_codecs_api", "../../api/crypto:frame_encryptor_interface", @@ -271,6 +281,7 @@ rtc_library("rtp_rtcp") { "../../api/video:video_codec_constants", "../../api/video:video_frame", "../../api/video:video_frame_type", + "../../api/video:video_layers_allocation", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../call:rtp_interfaces", @@ -279,21 +290,27 @@ rtc_library("rtp_rtcp") { "../../logging:rtc_event_rtp_rtcp", "../../modules/audio_coding:audio_coding_module_typedefs", "../../rtc_base:checks", - "../../rtc_base:deprecation", "../../rtc_base:divide_round", "../../rtc_base:gtest_prod", "../../rtc_base:rate_limiter", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_numerics", "../../rtc_base:safe_minmax", + "../../rtc_base/containers:flat_map", "../../rtc_base/experiments:field_trial_parser", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", + "../../rtc_base/task_utils:pending_task_safety_flag", + "../../rtc_base/task_utils:repeating_task", "../../rtc_base/task_utils:to_queued_task", "../../rtc_base/time:timestamp_extrapolator", + "../../rtc_base/containers:flat_map", "../../system_wrappers", "../../system_wrappers:metrics", "../remote_bitrate_estimator", "../video_coding:codec_globals_headers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/container:inlined_vector", @@ -304,6 +321,39 @@ rtc_library("rtp_rtcp") { ] } +rtc_source_set("rtp_rtcp_legacy") { + sources = [ + "include/rtp_rtcp.h", + "source/deprecated/deprecated_rtp_sender_egress.cc", + "source/deprecated/deprecated_rtp_sender_egress.h", + "source/rtp_rtcp_impl.cc", + "source/rtp_rtcp_impl.h", + ] + deps = [ + ":rtp_rtcp", + ":rtp_rtcp_format", + "..:module_api", + "..:module_fec_api", + "../../api:rtp_headers", + "../../api:transport_api", + "../../api/rtc_event_log", + "../../api/transport:field_trial_based_config", + "../../api/units:data_rate", + "../../api/video:video_bitrate_allocation", + "../../logging:rtc_event_rtp_rtcp", + "../../rtc_base:checks", + "../../rtc_base:gtest_prod", + "../../rtc_base:rtc_base_approved", + "../../rtc_base/synchronization:mutex", + "../../system_wrappers", + "../remote_bitrate_estimator", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + rtc_library("rtcp_transceiver") { visibility = [ "*" ] public = [ @@ -319,18 +369,21 @@ rtc_library("rtcp_transceiver") { deps = [ ":rtp_rtcp", ":rtp_rtcp_format", - "../../:webrtc_common", "../../api:array_view", "../../api:rtp_headers", "../../api:transport_api", "../../api/task_queue", + "../../api/units:timestamp", "../../api/video:video_bitrate_allocation", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../rtc_base/task_utils:repeating_task", "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", ] @@ -343,12 +396,13 @@ rtc_library("rtp_video_header") { "source/rtp_video_header.h", ] deps = [ - "../../:webrtc_common", "../../api/transport/rtp:dependency_descriptor", "../../api/video:video_frame", "../../api/video:video_frame_type", "../../api/video:video_rtp_headers", "../../modules/video_coding:codec_globals_headers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/types:optional", "//third_party/abseil-cpp/absl/types:variant", @@ -364,7 +418,6 @@ rtc_library("fec_test_helper") { deps = [ ":rtp_rtcp", ":rtp_rtcp_format", - "..:module_api", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", ] @@ -386,27 +439,37 @@ rtc_library("mock_rtp_rtcp") { "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("rtp_packetizer_av1_test_helper") { + testonly = true + sources = [ + "source/rtp_packetizer_av1_test_helper.cc", + "source/rtp_packetizer_av1_test_helper.h", ] } if (rtc_include_tests) { - rtc_executable("test_packet_masks_metrics") { - testonly = true + if (!build_with_chromium) { + rtc_executable("test_packet_masks_metrics") { + testonly = true - sources = [ - "test/testFec/average_residual_loss_xor_codes.h", - "test/testFec/test_packet_masks_metrics.cc", - ] + sources = [ + "test/testFec/average_residual_loss_xor_codes.h", + "test/testFec/test_packet_masks_metrics.cc", + ] - deps = [ - ":rtp_rtcp", - "../../test:fileutils", - "../../test:test_main", - "../../test:test_support", - "//testing/gtest", - ] - } # test_packet_masks_metrics + deps = [ + ":rtp_rtcp", + "../../test:fileutils", + "../../test:test_main", + "../../test:test_support", + "//testing/gtest", + ] + } # test_packet_masks_metrics + } rtc_library("rtp_rtcp_modules_tests") { testonly = true @@ -425,9 +488,11 @@ if (rtc_include_tests) { testonly = true sources = [ - "source/absolute_capture_time_receiver_unittest.cc", + "source/absolute_capture_time_interpolator_unittest.cc", "source/absolute_capture_time_sender_unittest.cc", + "source/active_decode_targets_helper_unittest.cc", "source/byte_io_unittest.cc", + "source/capture_clock_offset_updater_unittest.cc", "source/fec_private_tables_bursty_unittest.cc", "source/flexfec_header_reader_writer_unittest.cc", "source/flexfec_receiver_unittest.cc", @@ -483,10 +548,13 @@ if (rtc_include_tests) { "source/rtp_rtcp_impl2_unittest.cc", "source/rtp_rtcp_impl_unittest.cc", "source/rtp_sender_audio_unittest.cc", + "source/rtp_sender_egress_unittest.cc", "source/rtp_sender_unittest.cc", "source/rtp_sender_video_unittest.cc", "source/rtp_sequence_number_map_unittest.cc", + "source/rtp_util_unittest.cc", "source/rtp_utility_unittest.cc", + "source/rtp_video_layers_allocation_extension_unittest.cc", "source/source_tracker_unittest.cc", "source/time_util_unittest.cc", "source/ulpfec_generator_unittest.cc", @@ -503,10 +571,10 @@ if (rtc_include_tests) { ":fec_test_helper", ":mock_rtp_rtcp", ":rtcp_transceiver", + ":rtp_packetizer_av1_test_helper", ":rtp_rtcp", ":rtp_rtcp_format", - "..:module_api", - "../..:webrtc_common", + ":rtp_rtcp_legacy", "../../api:array_view", "../../api:libjingle_peerconnection_api", "../../api:mock_frame_encryptor", @@ -518,12 +586,15 @@ if (rtc_include_tests) { "../../api/rtc_event_log", "../../api/transport:field_trial_based_config", "../../api/transport/rtp:dependency_descriptor", + "../../api/units:data_size", + "../../api/units:time_delta", "../../api/units:timestamp", "../../api/video:encoded_image", "../../api/video:video_bitrate_allocation", "../../api/video:video_bitrate_allocator", "../../api/video:video_codec_constants", "../../api/video:video_frame", + "../../api/video:video_layers_allocation", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../call:rtp_receiver", @@ -537,6 +608,7 @@ if (rtc_include_tests) { "../../rtc_base:rtc_base_tests_utils", "../../rtc_base:rtc_numerics", "../../rtc_base:task_queue_for_test", + "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers", "../../test:field_trial", "../../test:mock_frame_transformer", @@ -544,7 +616,10 @@ if (rtc_include_tests) { "../../test:rtp_test_utils", "../../test:test_common", "../../test:test_support", + "../../test/time_controller:time_controller", "../video_coding:codec_globals_headers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/memory", diff --git a/modules/rtp_rtcp/include/flexfec_receiver.h b/modules/rtp_rtcp/include/flexfec_receiver.h index 6df984f85a..b0caea68ff 100644 --- a/modules/rtp_rtcp/include/flexfec_receiver.h +++ b/modules/rtp_rtcp/include/flexfec_receiver.h @@ -15,11 +15,12 @@ #include +#include "api/sequence_checker.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/include/ulpfec_receiver.h" #include "modules/rtp_rtcp/source/forward_error_correction.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -69,7 +70,7 @@ class FlexfecReceiver { int64_t last_recovered_packet_ms_ RTC_GUARDED_BY(sequence_checker_); FecPacketCounter packet_counter_ RTC_GUARDED_BY(sequence_checker_); - SequenceChecker sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/include/flexfec_sender.h b/modules/rtp_rtcp/include/flexfec_sender.h index 7fe20181af..737593e04c 100644 --- a/modules/rtp_rtcp/include/flexfec_sender.h +++ b/modules/rtp_rtcp/include/flexfec_sender.h @@ -24,6 +24,7 @@ #include "modules/rtp_rtcp/source/video_fec_generator.h" #include "rtc_base/random.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -92,8 +93,8 @@ class FlexfecSender : public VideoFecGenerator { const RtpHeaderExtensionMap rtp_header_extension_map_; const size_t header_extensions_size_; - rtc::CriticalSection crit_; - RateStatistics fec_bitrate_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + RateStatistics fec_bitrate_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/include/receive_statistics.h b/modules/rtp_rtcp/include/receive_statistics.h index 4e6441340c..ce87b99a42 100644 --- a/modules/rtp_rtcp/include/receive_statistics.h +++ b/modules/rtp_rtcp/include/receive_statistics.h @@ -17,11 +17,9 @@ #include "absl/types/optional.h" #include "call/rtp_packet_sink_interface.h" -#include "modules/include/module.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet/report_block.h" -#include "rtc_base/deprecation.h" namespace webrtc { @@ -57,7 +55,12 @@ class ReceiveStatistics : public ReceiveStatisticsProvider, public: ~ReceiveStatistics() override = default; + // Returns a thread-safe instance of ReceiveStatistics. + // https://chromium.googlesource.com/chromium/src/+/lkgr/docs/threading_and_tasks.md#threading-lexicon static std::unique_ptr Create(Clock* clock); + // Returns a thread-compatible instance of ReceiveStatistics. + static std::unique_ptr CreateThreadCompatible( + Clock* clock); // Returns a pointer to the statistician of an ssrc. virtual StreamStatistician* GetStatistician(uint32_t ssrc) const = 0; diff --git a/modules/rtp_rtcp/include/rtcp_statistics.h b/modules/rtp_rtcp/include/rtcp_statistics.h index e26c475e31..de70c14943 100644 --- a/modules/rtp_rtcp/include/rtcp_statistics.h +++ b/modules/rtp_rtcp/include/rtcp_statistics.h @@ -17,22 +17,6 @@ namespace webrtc { -// Statistics for an RTCP channel -struct RtcpStatistics { - uint8_t fraction_lost = 0; - int32_t packets_lost = 0; // Defined as a 24 bit signed integer in RTCP - uint32_t extended_highest_sequence_number = 0; - uint32_t jitter = 0; -}; - -class RtcpStatisticsCallback { - public: - virtual ~RtcpStatisticsCallback() {} - - virtual void StatisticsUpdated(const RtcpStatistics& statistics, - uint32_t ssrc) = 0; -}; - // Statistics for RTCP packet types. struct RtcpPacketTypeCounter { RtcpPacketTypeCounter() diff --git a/modules/rtp_rtcp/include/rtp_header_extension_map.h b/modules/rtp_rtcp/include/rtp_header_extension_map.h index 360a619f82..72e5541d37 100644 --- a/modules/rtp_rtcp/include/rtp_header_extension_map.h +++ b/modules/rtp_rtcp/include/rtp_header_extension_map.h @@ -19,7 +19,6 @@ #include "api/rtp_parameters.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "rtc_base/checks.h" -#include "rtc_base/deprecation.h" namespace webrtc { @@ -51,10 +50,6 @@ class RtpHeaderExtensionMap { return ids_[type]; } - // TODO(danilchap): Remove use of the functions below. - RTC_DEPRECATED int32_t Register(RTPExtensionType type, int id) { - return RegisterByType(id, type) ? 0 : -1; - } int32_t Deregister(RTPExtensionType type); void Deregister(absl::string_view uri); diff --git a/modules/rtp_rtcp/include/rtp_rtcp.h b/modules/rtp_rtcp/include/rtp_rtcp.h index 2db523caaf..727fc6e649 100644 --- a/modules/rtp_rtcp/include/rtp_rtcp.h +++ b/modules/rtp_rtcp/include/rtp_rtcp.h @@ -12,456 +12,30 @@ #define MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_H_ #include -#include -#include -#include -#include -#include "absl/strings/string_view.h" -#include "absl/types/optional.h" -#include "api/frame_transformer_interface.h" -#include "api/scoped_refptr.h" -#include "api/transport/webrtc_key_value_config.h" -#include "api/video/video_bitrate_allocation.h" +#include "absl/base/attributes.h" #include "modules/include/module.h" -#include "modules/rtp_rtcp/include/receive_statistics.h" -#include "modules/rtp_rtcp/include/report_block_data.h" -#include "modules/rtp_rtcp/include/rtp_packet_sender.h" -#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" -#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" -#include "modules/rtp_rtcp/source/video_fec_generator.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/deprecation.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" namespace webrtc { -// Forward declarations. -class FrameEncryptorInterface; -class RateLimiter; -class ReceiveStatisticsProvider; -class RemoteBitrateEstimator; -class RtcEventLog; -class RTPSender; -class Transport; -class VideoBitrateAllocationObserver; - -namespace rtcp { -class TransportFeedback; -} - -class RtpRtcp : public Module, public RtcpFeedbackSenderInterface { +// DEPRECATED. Do not use. +class RtpRtcp : public Module, public RtpRtcpInterface { public: - struct Configuration { - Configuration() = default; - Configuration(Configuration&& rhs) = default; - - // True for a audio version of the RTP/RTCP module object false will create - // a video version. - bool audio = false; - bool receiver_only = false; - - // The clock to use to read time. If nullptr then system clock will be used. - Clock* clock = nullptr; - - ReceiveStatisticsProvider* receive_statistics = nullptr; - - // Transport object that will be called when packets are ready to be sent - // out on the network. - Transport* outgoing_transport = nullptr; - - // Called when the receiver requests an intra frame. - RtcpIntraFrameObserver* intra_frame_callback = nullptr; - - // Called when the receiver sends a loss notification. - RtcpLossNotificationObserver* rtcp_loss_notification_observer = nullptr; - - // Called when we receive a changed estimate from the receiver of out - // stream. - RtcpBandwidthObserver* bandwidth_callback = nullptr; - - NetworkStateEstimateObserver* network_state_estimate_observer = nullptr; - TransportFeedbackObserver* transport_feedback_callback = nullptr; - VideoBitrateAllocationObserver* bitrate_allocation_observer = nullptr; - RtcpRttStats* rtt_stats = nullptr; - RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer = nullptr; - // Called on receipt of RTCP report block from remote side. - // TODO(bugs.webrtc.org/10678): Remove RtcpStatisticsCallback in - // favor of ReportBlockDataObserver. - // TODO(bugs.webrtc.org/10679): Consider whether we want to use - // only getters or only callbacks. If we decide on getters, the - // ReportBlockDataObserver should also be removed in favor of - // GetLatestReportBlockData(). - RtcpStatisticsCallback* rtcp_statistics_callback = nullptr; - RtcpCnameCallback* rtcp_cname_callback = nullptr; - ReportBlockDataObserver* report_block_data_observer = nullptr; - - // Estimates the bandwidth available for a set of streams from the same - // client. - RemoteBitrateEstimator* remote_bitrate_estimator = nullptr; - - // Spread any bursts of packets into smaller bursts to minimize packet loss. - RtpPacketSender* paced_sender = nullptr; - - // Generates FEC packets. - // TODO(sprang): Wire up to RtpSenderEgress. - VideoFecGenerator* fec_generator = nullptr; - - BitrateStatisticsObserver* send_bitrate_observer = nullptr; - SendSideDelayObserver* send_side_delay_observer = nullptr; - RtcEventLog* event_log = nullptr; - SendPacketObserver* send_packet_observer = nullptr; - RateLimiter* retransmission_rate_limiter = nullptr; - StreamDataCountersCallback* rtp_stats_callback = nullptr; - - int rtcp_report_interval_ms = 0; - - // Update network2 instead of pacer_exit field of video timing extension. - bool populate_network2_timestamp = false; - - rtc::scoped_refptr frame_transformer; - - // E2EE Custom Video Frame Encryption - FrameEncryptorInterface* frame_encryptor = nullptr; - // Require all outgoing frames to be encrypted with a FrameEncryptor. - bool require_frame_encryption = false; - - // Corresponds to extmap-allow-mixed in SDP negotiation. - bool extmap_allow_mixed = false; - - // If true, the RTP sender will always annotate outgoing packets with - // MID and RID header extensions, if provided and negotiated. - // If false, the RTP sender will stop sending MID and RID header extensions, - // when it knows that the receiver is ready to demux based on SSRC. This is - // done by RTCP RR acking. - bool always_send_mid_and_rid = false; - - // If set, field trials are read from |field_trials|, otherwise - // defaults to webrtc::FieldTrialBasedConfig. - const WebRtcKeyValueConfig* field_trials = nullptr; - - // SSRCs for media and retransmission, respectively. - // FlexFec SSRC is fetched from |flexfec_sender|. - uint32_t local_media_ssrc = 0; - absl::optional rtx_send_ssrc; - - bool need_rtp_packet_infos = false; - - // If true, the RTP packet history will select RTX packets based on - // heuristics such as send time, retransmission count etc, in order to - // make padding potentially more useful. - // If false, the last packet will always be picked. This may reduce CPU - // overhead. - bool enable_rtx_padding_prioritization = true; - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(Configuration); - }; - - // Creates an RTP/RTCP module object using provided |configuration|. - static std::unique_ptr Create(const Configuration& configuration); - - // ************************************************************************** - // Receiver functions - // ************************************************************************** - - virtual void IncomingRtcpPacket(const uint8_t* incoming_packet, - size_t incoming_packet_length) = 0; - - virtual void SetRemoteSSRC(uint32_t ssrc) = 0; - - // ************************************************************************** - // Sender - // ************************************************************************** - - // Sets the maximum size of an RTP packet, including RTP headers. - virtual void SetMaxRtpPacketSize(size_t size) = 0; - - // Returns max RTP packet size. Takes into account RTP headers and - // FEC/ULP/RED overhead (when FEC is enabled). - virtual size_t MaxRtpPacketSize() const = 0; - - virtual void RegisterSendPayloadFrequency(int payload_type, - int payload_frequency) = 0; - - // Unregisters a send payload. - // |payload_type| - payload type of codec - // Returns -1 on failure else 0. - virtual int32_t DeRegisterSendPayload(int8_t payload_type) = 0; - - virtual void SetExtmapAllowMixed(bool extmap_allow_mixed) = 0; - - // (De)registers RTP header extension type and id. - // Returns -1 on failure else 0. - RTC_DEPRECATED virtual int32_t RegisterSendRtpHeaderExtension( - RTPExtensionType type, - uint8_t id) = 0; - // Register extension by uri, triggers CHECK on falure. - virtual void RegisterRtpHeaderExtension(absl::string_view uri, int id) = 0; - - virtual int32_t DeregisterSendRtpHeaderExtension(RTPExtensionType type) = 0; - virtual void DeregisterSendRtpHeaderExtension(absl::string_view uri) = 0; - - // Returns true if RTP module is send media, and any of the extensions - // required for bandwidth estimation is registered. - virtual bool SupportsPadding() const = 0; - // Same as SupportsPadding(), but additionally requires that - // SetRtxSendStatus() has been called with the kRtxRedundantPayloads option - // enabled. - virtual bool SupportsRtxPayloadPadding() const = 0; - - // Returns start timestamp. - virtual uint32_t StartTimestamp() const = 0; - - // Sets start timestamp. Start timestamp is set to a random value if this - // function is never called. - virtual void SetStartTimestamp(uint32_t timestamp) = 0; - - // Returns SequenceNumber. - virtual uint16_t SequenceNumber() const = 0; - - // Sets SequenceNumber, default is a random number. - virtual void SetSequenceNumber(uint16_t seq) = 0; - - virtual void SetRtpState(const RtpState& rtp_state) = 0; - virtual void SetRtxState(const RtpState& rtp_state) = 0; - virtual RtpState GetRtpState() const = 0; - virtual RtpState GetRtxState() const = 0; - - // Returns SSRC. - virtual uint32_t SSRC() const = 0; - - // Sets the value for sending in the RID (and Repaired) RTP header extension. - // RIDs are used to identify an RTP stream if SSRCs are not negotiated. - // If the RID and Repaired RID extensions are not registered, the RID will - // not be sent. - virtual void SetRid(const std::string& rid) = 0; - - // Sets the value for sending in the MID RTP header extension. - // The MID RTP header extension should be registered for this to do anything. - // Once set, this value can not be changed or removed. - virtual void SetMid(const std::string& mid) = 0; - - // Sets CSRC. - // |csrcs| - vector of CSRCs - virtual void SetCsrcs(const std::vector& csrcs) = 0; + // Instantiates a deprecated version of the RtpRtcp module. + static std::unique_ptr ABSL_DEPRECATED("") + Create(const Configuration& configuration) { + return DEPRECATED_Create(configuration); + } - // Turns on/off sending RTX (RFC 4588). The modes can be set as a combination - // of values of the enumerator RtxMode. - virtual void SetRtxSendStatus(int modes) = 0; - - // Returns status of sending RTX (RFC 4588). The returned value can be - // a combination of values of the enumerator RtxMode. - virtual int RtxSendStatus() const = 0; - - // Returns the SSRC used for RTX if set, otherwise a nullopt. - virtual absl::optional RtxSsrc() const = 0; - - // Sets the payload type to use when sending RTX packets. Note that this - // doesn't enable RTX, only the payload type is set. - virtual void SetRtxSendPayloadType(int payload_type, - int associated_payload_type) = 0; - - // Returns the FlexFEC SSRC, if there is one. - virtual absl::optional FlexfecSsrc() const = 0; - - // Sets sending status. Sends kRtcpByeCode when going from true to false. - // Returns -1 on failure else 0. - virtual int32_t SetSendingStatus(bool sending) = 0; - - // Returns current sending status. - virtual bool Sending() const = 0; - - // Starts/Stops media packets. On by default. - virtual void SetSendingMediaStatus(bool sending) = 0; - - // Returns current media sending status. - virtual bool SendingMedia() const = 0; - - // Returns whether audio is configured (i.e. Configuration::audio = true). - virtual bool IsAudioConfigured() const = 0; - - // Indicate that the packets sent by this module should be counted towards the - // bitrate estimate since the stream participates in the bitrate allocation. - virtual void SetAsPartOfAllocation(bool part_of_allocation) = 0; - - // TODO(sprang): Remove when all call sites have been moved to - // GetSendRates(). Fetches the current send bitrates in bits/s. - virtual void BitrateSent(uint32_t* total_rate, - uint32_t* video_rate, - uint32_t* fec_rate, - uint32_t* nack_rate) const = 0; - - // Returns bitrate sent (post-pacing) per packet type. - virtual RtpSendRates GetSendRates() const = 0; - - virtual RTPSender* RtpSender() = 0; - virtual const RTPSender* RtpSender() const = 0; - - // Record that a frame is about to be sent. Returns true on success, and false - // if the module isn't ready to send. - virtual bool OnSendingRtpFrame(uint32_t timestamp, - int64_t capture_time_ms, - int payload_type, - bool force_sender_report) = 0; - - // Try to send the provided packet. Returns true iff packet matches any of - // the SSRCs for this module (media/rtx/fec etc) and was forwarded to the - // transport. - virtual bool TrySendPacket(RtpPacketToSend* packet, - const PacedPacketInfo& pacing_info) = 0; - - virtual void OnPacketsAcknowledged( - rtc::ArrayView sequence_numbers) = 0; - - virtual std::vector> GeneratePadding( - size_t target_size_bytes) = 0; - - virtual std::vector GetSentRtpPacketInfos( - rtc::ArrayView sequence_numbers) const = 0; - - // Returns an expected per packet overhead representing the main RTP header, - // any CSRCs, and the registered header extensions that are expected on all - // packets (i.e. disregarding things like abs capture time which is only - // populated on a subset of packets, but counting MID/RID type extensions - // when we expect to send them). - virtual size_t ExpectedPerPacketOverhead() const = 0; - - // ************************************************************************** - // RTCP - // ************************************************************************** - - // Returns RTCP status. - virtual RtcpMode RTCP() const = 0; - - // Sets RTCP status i.e on(compound or non-compound)/off. - // |method| - RTCP method to use. - virtual void SetRTCPStatus(RtcpMode method) = 0; - - // Sets RTCP CName (i.e unique identifier). - // Returns -1 on failure else 0. - virtual int32_t SetCNAME(const char* cname) = 0; - - // Returns remote CName. - // Returns -1 on failure else 0. - virtual int32_t RemoteCNAME(uint32_t remote_ssrc, - char cname[RTCP_CNAME_SIZE]) const = 0; - - // Returns remote NTP. - // Returns -1 on failure else 0. - virtual int32_t RemoteNTP(uint32_t* received_ntp_secs, - uint32_t* received_ntp_frac, - uint32_t* rtcp_arrival_time_secs, - uint32_t* rtcp_arrival_time_frac, - uint32_t* rtcp_timestamp) const = 0; - - // Returns -1 on failure else 0. - virtual int32_t AddMixedCNAME(uint32_t ssrc, const char* cname) = 0; - - // Returns -1 on failure else 0. - virtual int32_t RemoveMixedCNAME(uint32_t ssrc) = 0; - - // Returns current RTT (round-trip time) estimate. - // Returns -1 on failure else 0. - virtual int32_t RTT(uint32_t remote_ssrc, - int64_t* rtt, - int64_t* avg_rtt, - int64_t* min_rtt, - int64_t* max_rtt) const = 0; - - // Returns the estimated RTT, with fallback to a default value. - virtual int64_t ExpectedRetransmissionTimeMs() const = 0; - - // Forces a send of a RTCP packet. Periodic SR and RR are triggered via the - // process function. - // Returns -1 on failure else 0. - virtual int32_t SendRTCP(RTCPPacketType rtcp_packet_type) = 0; - - // Returns statistics of the amount of data sent. - // Returns -1 on failure else 0. - virtual int32_t DataCountersRTP(size_t* bytes_sent, - uint32_t* packets_sent) const = 0; - - // Returns send statistics for the RTP and RTX stream. - virtual void GetSendStreamDataCounters( - StreamDataCounters* rtp_counters, - StreamDataCounters* rtx_counters) const = 0; - - // Returns received RTCP report block. - // Returns -1 on failure else 0. - // TODO(https://crbug.com/webrtc/10678): Remove this in favor of - // GetLatestReportBlockData(). - virtual int32_t RemoteRTCPStat( - std::vector* receive_blocks) const = 0; - // A snapshot of Report Blocks with additional data of interest to statistics. - // Within this list, the sender-source SSRC pair is unique and per-pair the - // ReportBlockData represents the latest Report Block that was received for - // that pair. - virtual std::vector GetLatestReportBlockData() const = 0; - - // (APP) Sets application specific data. - // Returns -1 on failure else 0. - virtual int32_t SetRTCPApplicationSpecificData(uint8_t sub_type, - uint32_t name, - const uint8_t* data, - uint16_t length) = 0; - // (XR) Sets Receiver Reference Time Report (RTTR) status. - virtual void SetRtcpXrRrtrStatus(bool enable) = 0; - - // Returns current Receiver Reference Time Report (RTTR) status. - virtual bool RtcpXrRrtrStatus() const = 0; - - // (REMB) Receiver Estimated Max Bitrate. - // Schedules sending REMB on next and following sender/receiver reports. - void SetRemb(int64_t bitrate_bps, std::vector ssrcs) override = 0; - // Stops sending REMB on next and following sender/receiver reports. - void UnsetRemb() override = 0; - - // (TMMBR) Temporary Max Media Bit Rate - virtual bool TMMBR() const = 0; - - virtual void SetTMMBRStatus(bool enable) = 0; - - // (NACK) - - // Sends a Negative acknowledgement packet. - // Returns -1 on failure else 0. - // TODO(philipel): Deprecate this and start using SendNack instead, mostly - // because we want a function that actually send NACK for the specified - // packets. - virtual int32_t SendNACK(const uint16_t* nack_list, uint16_t size) = 0; - - // Sends NACK for the packets specified. - // Note: This assumes the caller keeps track of timing and doesn't rely on - // the RTP module to do this. - virtual void SendNack(const std::vector& sequence_numbers) = 0; - - // Store the sent packets, needed to answer to a Negative acknowledgment - // requests. - virtual void SetStorePacketsStatus(bool enable, uint16_t numberToStore) = 0; - - // Returns true if the module is configured to store packets. - virtual bool StorePackets() const = 0; - - virtual void SetVideoBitrateAllocation( - const VideoBitrateAllocation& bitrate) = 0; - - // ************************************************************************** - // Video - // ************************************************************************** + static std::unique_ptr DEPRECATED_Create( + const Configuration& configuration); // Requests new key frame. // using PLI, https://tools.ietf.org/html/rfc4585#section-6.3.1.1 void SendPictureLossIndication() { SendRTCP(kRtcpPli); } // using FIR, https://tools.ietf.org/html/rfc5104#section-4.3.1.2 void SendFullIntraRequest() { SendRTCP(kRtcpFir); } - - // Sends a LossNotification RTCP message. - // Returns -1 on failure else 0. - virtual int32_t SendLossNotification(uint16_t last_decoded_seq_num, - uint16_t last_received_seq_num, - bool decodability_flag, - bool buffering_allowed) = 0; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/include/rtp_rtcp_defines.cc b/modules/rtp_rtcp/include/rtp_rtcp_defines.cc index ca128e708a..5aa41fccb3 100644 --- a/modules/rtp_rtcp/include/rtp_rtcp_defines.cc +++ b/modules/rtp_rtcp/include/rtp_rtcp_defines.cc @@ -44,6 +44,12 @@ bool IsLegalRsidName(absl::string_view name) { StreamDataCounters::StreamDataCounters() : first_packet_time_ms(-1) {} +RtpPacketCounter::RtpPacketCounter(const RtpPacket& packet) + : header_bytes(packet.headers_size()), + payload_bytes(packet.payload_size()), + padding_bytes(packet.padding_size()), + packets(1) {} + void RtpPacketCounter::AddPacket(const RtpPacket& packet) { ++packets; header_bytes += packet.headers_size(); diff --git a/modules/rtp_rtcp/include/rtp_rtcp_defines.h b/modules/rtp_rtcp/include/rtp_rtcp_defines.h index 049ff5c506..998a754cc0 100644 --- a/modules/rtp_rtcp/include/rtp_rtcp_defines.h +++ b/modules/rtp_rtcp/include/rtp_rtcp_defines.h @@ -33,6 +33,7 @@ namespace webrtc { class RtpPacket; +class RtpPacketToSend; namespace rtcp { class TransportFeedback; } @@ -56,6 +57,7 @@ enum RTPExtensionType : int { kRtpExtensionNone, kRtpExtensionTransmissionTimeOffset, kRtpExtensionAudioLevel, + kRtpExtensionCsrcAudioLevel, kRtpExtensionInbandComfortNoise, kRtpExtensionAbsoluteSendTime, kRtpExtensionAbsoluteCaptureTime, @@ -64,8 +66,8 @@ enum RTPExtensionType : int { kRtpExtensionTransportSequenceNumber02, kRtpExtensionPlayoutDelay, kRtpExtensionVideoContentType, + kRtpExtensionVideoLayersAllocation, kRtpExtensionVideoTiming, - kRtpExtensionFrameMarking, kRtpExtensionRtpStreamId, kRtpExtensionRepairedRtpStreamId, kRtpExtensionMid, @@ -73,6 +75,7 @@ enum RTPExtensionType : int { kRtpExtensionGenericFrameDescriptor = kRtpExtensionGenericFrameDescriptor00, kRtpExtensionGenericFrameDescriptor02, kRtpExtensionColorSpace, + kRtpExtensionVideoFrameTrackingId, kRtpExtensionNumberOfExtensions // Must be the last entity in the enum. }; @@ -91,7 +94,6 @@ enum RTCPPacketType : uint32_t { kRtcpTmmbr = 0x0100, kRtcpTmmbn = 0x0200, kRtcpSrReq = 0x0400, - kRtcpApp = 0x1000, kRtcpLossNotification = 0x2000, kRtcpRemb = 0x10000, kRtcpTransmissionTimeOffset = 0x20000, @@ -158,14 +160,12 @@ struct RtpState { timestamp(0), capture_time_ms(-1), last_timestamp_time_ms(-1), - media_has_been_sent(false), ssrc_has_acked(false) {} uint16_t sequence_number; uint32_t start_timestamp; uint32_t timestamp; int64_t capture_time_ms; int64_t last_timestamp_time_ms; - bool media_has_been_sent; bool ssrc_has_acked; }; @@ -228,8 +228,11 @@ struct RtpPacketSendInfo { RtpPacketSendInfo() = default; uint16_t transport_sequence_number = 0; + // TODO(bugs.webrtc.org/12713): Remove once downstream usage is gone. uint32_t ssrc = 0; - uint16_t rtp_sequence_number = 0; + absl::optional media_ssrc; + uint16_t rtp_sequence_number = 0; // Only valid if |media_ssrc| is set. + uint32_t rtp_timestamp = 0; size_t length = 0; absl::optional packet_type; PacedPacketInfo pacing_info; @@ -266,9 +269,13 @@ class RtcpFeedbackSenderInterface { class StreamFeedbackObserver { public: struct StreamPacketInfo { - uint32_t ssrc; - uint16_t rtp_sequence_number; bool received; + + // |rtp_sequence_number| and |is_retransmission| are only valid if |ssrc| + // is populated. + absl::optional ssrc; + uint16_t rtp_sequence_number; + bool is_retransmission; }; virtual ~StreamFeedbackObserver() = default; @@ -299,6 +306,8 @@ struct RtpPacketCounter { RtpPacketCounter() : header_bytes(0), payload_bytes(0), padding_bytes(0), packets(0) {} + explicit RtpPacketCounter(const RtpPacket& packet); + void Add(const RtpPacketCounter& other) { header_bytes += other.header_bytes; payload_bytes += other.payload_bytes; @@ -468,5 +477,15 @@ class SendPacketObserver { int64_t capture_time_ms, uint32_t ssrc) = 0; }; + +// Interface for a class that can assign RTP sequence numbers for a packet +// to be sent. +class SequenceNumberAssigner { + public: + SequenceNumberAssigner() = default; + virtual ~SequenceNumberAssigner() = default; + + virtual void AssignSequenceNumber(RtpPacketToSend* packet) = 0; +}; } // namespace webrtc #endif // MODULES_RTP_RTCP_INCLUDE_RTP_RTCP_DEFINES_H_ diff --git a/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h b/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h index 5b1585fa0f..e9a7d52691 100644 --- a/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h +++ b/modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h @@ -19,7 +19,7 @@ namespace webrtc { class MockRtcpRttStats : public RtcpRttStats { public: MOCK_METHOD(void, OnRttUpdate, (int64_t rtt), (override)); - MOCK_METHOD(int64_t, LastProcessedRtt, (), (const override)); + MOCK_METHOD(int64_t, LastProcessedRtt, (), (const, override)); }; } // namespace webrtc #endif // MODULES_RTP_RTCP_MOCKS_MOCK_RTCP_RTT_STATS_H_ diff --git a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h index 5a333fe847..a7707ecc19 100644 --- a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h +++ b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h @@ -20,22 +20,23 @@ #include "absl/types/optional.h" #include "api/video/video_bitrate_allocation.h" #include "modules/include/module.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "test/gmock.h" namespace webrtc { -class MockRtpRtcp : public RtpRtcp { +class MockRtpRtcpInterface : public RtpRtcpInterface { public: MOCK_METHOD(void, IncomingRtcpPacket, (const uint8_t* incoming_packet, size_t packet_length), (override)); MOCK_METHOD(void, SetRemoteSSRC, (uint32_t ssrc), (override)); + MOCK_METHOD(void, SetLocalSsrc, (uint32_t ssrc), (override)); MOCK_METHOD(void, SetMaxRtpPacketSize, (size_t size), (override)); - MOCK_METHOD(size_t, MaxRtpPacketSize, (), (const override)); + MOCK_METHOD(size_t, MaxRtpPacketSize, (), (const, override)); MOCK_METHOD(void, RegisterSendPayloadFrequency, (int payload_type, int frequency), @@ -45,10 +46,6 @@ class MockRtpRtcp : public RtpRtcp { (int8_t payload_type), (override)); MOCK_METHOD(void, SetExtmapAllowMixed, (bool extmap_allow_mixed), (override)); - MOCK_METHOD(int32_t, - RegisterSendRtpHeaderExtension, - (RTPExtensionType type, uint8_t id), - (override)); MOCK_METHOD(void, RegisterRtpHeaderExtension, (absl::string_view uri, int id), @@ -61,45 +58,32 @@ class MockRtpRtcp : public RtpRtcp { DeregisterSendRtpHeaderExtension, (absl::string_view uri), (override)); - MOCK_METHOD(bool, SupportsPadding, (), (const override)); - MOCK_METHOD(bool, SupportsRtxPayloadPadding, (), (const override)); - MOCK_METHOD(uint32_t, StartTimestamp, (), (const override)); + MOCK_METHOD(bool, SupportsPadding, (), (const, override)); + MOCK_METHOD(bool, SupportsRtxPayloadPadding, (), (const, override)); + MOCK_METHOD(uint32_t, StartTimestamp, (), (const, override)); MOCK_METHOD(void, SetStartTimestamp, (uint32_t timestamp), (override)); - MOCK_METHOD(uint16_t, SequenceNumber, (), (const override)); + MOCK_METHOD(uint16_t, SequenceNumber, (), (const, override)); MOCK_METHOD(void, SetSequenceNumber, (uint16_t seq), (override)); MOCK_METHOD(void, SetRtpState, (const RtpState& rtp_state), (override)); MOCK_METHOD(void, SetRtxState, (const RtpState& rtp_state), (override)); - MOCK_METHOD(RtpState, GetRtpState, (), (const override)); - MOCK_METHOD(RtpState, GetRtxState, (), (const override)); - MOCK_METHOD(uint32_t, SSRC, (), (const override)); + MOCK_METHOD(RtpState, GetRtpState, (), (const, override)); + MOCK_METHOD(RtpState, GetRtxState, (), (const, override)); + MOCK_METHOD(uint32_t, SSRC, (), (const, override)); MOCK_METHOD(void, SetRid, (const std::string& rid), (override)); MOCK_METHOD(void, SetMid, (const std::string& mid), (override)); - MOCK_METHOD(int32_t, CSRCs, (uint32_t csrcs[kRtpCsrcSize]), (const override)); MOCK_METHOD(void, SetCsrcs, (const std::vector& csrcs), (override)); MOCK_METHOD(void, SetRtxSendStatus, (int modes), (override)); - MOCK_METHOD(int, RtxSendStatus, (), (const override)); - MOCK_METHOD(absl::optional, RtxSsrc, (), (const override)); + MOCK_METHOD(int, RtxSendStatus, (), (const, override)); + MOCK_METHOD(absl::optional, RtxSsrc, (), (const, override)); MOCK_METHOD(void, SetRtxSendPayloadType, (int, int), (override)); - MOCK_METHOD(absl::optional, FlexfecSsrc, (), (const override)); - MOCK_METHOD((std::pair), RtxSendPayloadType, (), (const override)); + MOCK_METHOD(absl::optional, FlexfecSsrc, (), (const, override)); MOCK_METHOD(int32_t, SetSendingStatus, (bool sending), (override)); - MOCK_METHOD(bool, Sending, (), (const override)); + MOCK_METHOD(bool, Sending, (), (const, override)); MOCK_METHOD(void, SetSendingMediaStatus, (bool sending), (override)); - MOCK_METHOD(bool, SendingMedia, (), (const override)); - MOCK_METHOD(bool, IsAudioConfigured, (), (const override)); + MOCK_METHOD(bool, SendingMedia, (), (const, override)); + MOCK_METHOD(bool, IsAudioConfigured, (), (const, override)); MOCK_METHOD(void, SetAsPartOfAllocation, (bool), (override)); - MOCK_METHOD(void, - BitrateSent, - (uint32_t * total_rate, - uint32_t* video_rate, - uint32_t* fec_rate, - uint32_t* nack_rate), - (const override)); - MOCK_METHOD(RtpSendRates, GetSendRates, (), (const override)); - MOCK_METHOD(int, - EstimatedReceiveBandwidth, - (uint32_t * available_bandwidth), - (const override)); + MOCK_METHOD(RtpSendRates, GetSendRates, (), (const, override)); MOCK_METHOD(bool, OnSendingRtpFrame, (uint32_t, int64_t, int, bool), @@ -108,6 +92,15 @@ class MockRtpRtcp : public RtpRtcp { TrySendPacket, (RtpPacketToSend * packet, const PacedPacketInfo& pacing_info), (override)); + MOCK_METHOD(void, + SetFecProtectionParams, + (const FecProtectionParams& delta_params, + const FecProtectionParams& key_params), + (override)); + MOCK_METHOD(std::vector>, + FetchFecPackets, + (), + (override)); MOCK_METHOD(void, OnPacketsAcknowledged, (rtc::ArrayView), @@ -119,18 +112,14 @@ class MockRtpRtcp : public RtpRtcp { MOCK_METHOD(std::vector, GetSentRtpPacketInfos, (rtc::ArrayView sequence_numbers), - (const override)); - MOCK_METHOD(size_t, ExpectedPerPacketOverhead, (), (const override)); - MOCK_METHOD(RtcpMode, RTCP, (), (const override)); + (const, override)); + MOCK_METHOD(size_t, ExpectedPerPacketOverhead, (), (const, override)); + MOCK_METHOD(RtcpMode, RTCP, (), (const, override)); MOCK_METHOD(void, SetRTCPStatus, (RtcpMode method), (override)); MOCK_METHOD(int32_t, SetCNAME, (const char cname[RTCP_CNAME_SIZE]), (override)); - MOCK_METHOD(int32_t, - RemoteCNAME, - (uint32_t remote_ssrc, char cname[RTCP_CNAME_SIZE]), - (const override)); MOCK_METHOD(int32_t, RemoteNTP, (uint32_t * received_ntp_secs, @@ -138,12 +127,7 @@ class MockRtpRtcp : public RtpRtcp { uint32_t* rtcp_arrival_time_secs, uint32_t* rtcp_arrival_time_frac, uint32_t* rtcp_timestamp), - (const override)); - MOCK_METHOD(int32_t, - AddMixedCNAME, - (uint32_t ssrc, const char cname[RTCP_CNAME_SIZE]), - (override)); - MOCK_METHOD(int32_t, RemoveMixedCNAME, (uint32_t ssrc), (override)); + (const, override)); MOCK_METHOD(int32_t, RTT, (uint32_t remote_ssrc, @@ -151,39 +135,26 @@ class MockRtpRtcp : public RtpRtcp { int64_t* avg_rtt, int64_t* min_rtt, int64_t* max_rtt), - (const override)); - MOCK_METHOD(int64_t, ExpectedRetransmissionTimeMs, (), (const override)); + (const, override)); + MOCK_METHOD(int64_t, ExpectedRetransmissionTimeMs, (), (const, override)); MOCK_METHOD(int32_t, SendRTCP, (RTCPPacketType packet_type), (override)); - MOCK_METHOD(int32_t, - DataCountersRTP, - (size_t * bytes_sent, uint32_t* packets_sent), - (const override)); MOCK_METHOD(void, GetSendStreamDataCounters, (StreamDataCounters*, StreamDataCounters*), - (const override)); - MOCK_METHOD(int32_t, - RemoteRTCPStat, - (std::vector * receive_blocks), - (const override)); + (const, override)); MOCK_METHOD(std::vector, GetLatestReportBlockData, (), - (const override)); - MOCK_METHOD( - int32_t, - SetRTCPApplicationSpecificData, - (uint8_t sub_type, uint32_t name, const uint8_t* data, uint16_t length), - (override)); - MOCK_METHOD(void, SetRtcpXrRrtrStatus, (bool enable), (override)); - MOCK_METHOD(bool, RtcpXrRrtrStatus, (), (const override)); + (const, override)); + MOCK_METHOD(absl::optional, + GetSenderReportStats, + (), + (const, override)); MOCK_METHOD(void, SetRemb, (int64_t bitrate, std::vector ssrcs), (override)); MOCK_METHOD(void, UnsetRemb, (), (override)); - MOCK_METHOD(bool, TMMBR, (), (const override)); - MOCK_METHOD(void, SetTMMBRStatus, (bool enable), (override)); MOCK_METHOD(int32_t, SendNACK, (const uint16_t* nack_list, uint16_t size), @@ -196,7 +167,6 @@ class MockRtpRtcp : public RtpRtcp { SetStorePacketsStatus, (bool enable, uint16_t number_to_store), (override)); - MOCK_METHOD(bool, StorePackets, (), (const override)); MOCK_METHOD(void, SendCombinedRtcpPacket, (std::vector> rtcp_packets), @@ -208,20 +178,12 @@ class MockRtpRtcp : public RtpRtcp { bool decodability_flag, bool buffering_allowed), (override)); - MOCK_METHOD(void, Process, (), (override)); MOCK_METHOD(void, SetVideoBitrateAllocation, (const VideoBitrateAllocation&), (override)); MOCK_METHOD(RTPSender*, RtpSender, (), (override)); - MOCK_METHOD(const RTPSender*, RtpSender, (), (const override)); - - private: - // Mocking this method is currently not required and having a default - // implementation like - // MOCK_METHOD(int64_t, TimeUntilNextProcess, (), (override)) - // can be dangerous since it can cause a tight loop on a process thread. - int64_t TimeUntilNextProcess() override { return 0xffffffff; } + MOCK_METHOD(const RTPSender*, RtpSender, (), (const, override)); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc b/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc similarity index 69% rename from modules/rtp_rtcp/source/absolute_capture_time_receiver.cc rename to modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc index 62f300d0e5..99fc030aca 100644 --- a/modules/rtp_rtcp/source/absolute_capture_time_receiver.cc +++ b/modules/rtp_rtcp/source/absolute_capture_time_interpolator.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" #include @@ -20,14 +20,12 @@ namespace { constexpr Timestamp kInvalidLastReceiveTime = Timestamp::MinusInfinity(); } // namespace -constexpr TimeDelta AbsoluteCaptureTimeReceiver::kInterpolationMaxInterval; +constexpr TimeDelta AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval; -AbsoluteCaptureTimeReceiver::AbsoluteCaptureTimeReceiver(Clock* clock) - : clock_(clock), - remote_to_local_clock_offset_(absl::nullopt), - last_receive_time_(kInvalidLastReceiveTime) {} +AbsoluteCaptureTimeInterpolator::AbsoluteCaptureTimeInterpolator(Clock* clock) + : clock_(clock), last_receive_time_(kInvalidLastReceiveTime) {} -uint32_t AbsoluteCaptureTimeReceiver::GetSource( +uint32_t AbsoluteCaptureTimeInterpolator::GetSource( uint32_t ssrc, rtc::ArrayView csrcs) { if (csrcs.empty()) { @@ -37,22 +35,15 @@ uint32_t AbsoluteCaptureTimeReceiver::GetSource( return csrcs[0]; } -void AbsoluteCaptureTimeReceiver::SetRemoteToLocalClockOffset( - absl::optional value_q32x32) { - rtc::CritScope cs(&crit_); - - remote_to_local_clock_offset_ = value_q32x32; -} - absl::optional -AbsoluteCaptureTimeReceiver::OnReceivePacket( +AbsoluteCaptureTimeInterpolator::OnReceivePacket( uint32_t source, uint32_t rtp_timestamp, uint32_t rtp_clock_frequency, const absl::optional& received_extension) { const Timestamp receive_time = clock_->CurrentTime(); - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); AbsoluteCaptureTime extension; if (received_extension == absl::nullopt) { @@ -81,13 +72,10 @@ AbsoluteCaptureTimeReceiver::OnReceivePacket( extension = *received_extension; } - extension.estimated_capture_clock_offset = AdjustEstimatedCaptureClockOffset( - extension.estimated_capture_clock_offset); - return extension; } -uint64_t AbsoluteCaptureTimeReceiver::InterpolateAbsoluteCaptureTimestamp( +uint64_t AbsoluteCaptureTimeInterpolator::InterpolateAbsoluteCaptureTimestamp( uint32_t rtp_timestamp, uint32_t rtp_clock_frequency, uint32_t last_rtp_timestamp, @@ -101,7 +89,7 @@ uint64_t AbsoluteCaptureTimeReceiver::InterpolateAbsoluteCaptureTimestamp( rtp_clock_frequency; } -bool AbsoluteCaptureTimeReceiver::ShouldInterpolateExtension( +bool AbsoluteCaptureTimeInterpolator::ShouldInterpolateExtension( Timestamp receive_time, uint32_t source, uint32_t rtp_timestamp, @@ -134,17 +122,4 @@ bool AbsoluteCaptureTimeReceiver::ShouldInterpolateExtension( return true; } -absl::optional -AbsoluteCaptureTimeReceiver::AdjustEstimatedCaptureClockOffset( - absl::optional received_value) const { - if (received_value == absl::nullopt || - remote_to_local_clock_offset_ == absl::nullopt) { - return absl::nullopt; - } - - // Do calculations as "unsigned" to make overflows deterministic. - return static_cast(*received_value) + - static_cast(*remote_to_local_clock_offset_); -} - } // namespace webrtc diff --git a/modules/rtp_rtcp/source/absolute_capture_time_receiver.h b/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h similarity index 59% rename from modules/rtp_rtcp/source/absolute_capture_time_receiver.h rename to modules/rtp_rtcp/source/absolute_capture_time_interpolator.h index ea55ab4d22..89d7f0850c 100644 --- a/modules/rtp_rtcp/source/absolute_capture_time_receiver.h +++ b/modules/rtp_rtcp/source/absolute_capture_time_interpolator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,21 +8,21 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_RECEIVER_H_ -#define MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_RECEIVER_H_ +#ifndef MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_INTERPOLATOR_H_ +#define MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_INTERPOLATOR_H_ #include "api/array_view.h" #include "api/rtp_headers.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" namespace webrtc { // -// Helper class for receiving the |AbsoluteCaptureTime| header extension. +// Helper class for interpolating the |AbsoluteCaptureTime| header extension. // // Supports the "timestamp interpolation" optimization: // A receiver SHOULD memorize the capture system (i.e. CSRC/SSRC), capture @@ -33,25 +33,17 @@ namespace webrtc { // // See: https://webrtc.org/experiments/rtp-hdrext/abs-capture-time/ // -class AbsoluteCaptureTimeReceiver { +class AbsoluteCaptureTimeInterpolator { public: static constexpr TimeDelta kInterpolationMaxInterval = TimeDelta::Millis(5000); - explicit AbsoluteCaptureTimeReceiver(Clock* clock); + explicit AbsoluteCaptureTimeInterpolator(Clock* clock); // Returns the source (i.e. SSRC or CSRC) of the capture system. static uint32_t GetSource(uint32_t ssrc, rtc::ArrayView csrcs); - // Sets the NTP clock offset between the sender system (which may be different - // from the capture system) and the local system. This information is normally - // provided by passing half the value of the Round-Trip Time estimation given - // by RTCP sender reports (see DLSR/DLRR). - // - // Note that the value must be in Q32.32-formatted fixed-point seconds. - void SetRemoteToLocalClockOffset(absl::optional value_q32x32); - // Returns a received header extension, an interpolated header extension, or // |absl::nullopt| if it's not possible to interpolate a header extension. absl::optional OnReceivePacket( @@ -73,28 +65,22 @@ class AbsoluteCaptureTimeReceiver { uint32_t source, uint32_t rtp_timestamp, uint32_t rtp_clock_frequency) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - - absl::optional AdjustEstimatedCaptureClockOffset( - absl::optional received_value) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* const clock_; - rtc::CriticalSection crit_; - - absl::optional remote_to_local_clock_offset_ RTC_GUARDED_BY(crit_); + Mutex mutex_; - Timestamp last_receive_time_ RTC_GUARDED_BY(crit_); + Timestamp last_receive_time_ RTC_GUARDED_BY(mutex_); - uint32_t last_source_ RTC_GUARDED_BY(crit_); - uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(crit_); - uint32_t last_rtp_clock_frequency_ RTC_GUARDED_BY(crit_); - uint64_t last_absolute_capture_timestamp_ RTC_GUARDED_BY(crit_); + uint32_t last_source_ RTC_GUARDED_BY(mutex_); + uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_); + uint32_t last_rtp_clock_frequency_ RTC_GUARDED_BY(mutex_); + uint64_t last_absolute_capture_timestamp_ RTC_GUARDED_BY(mutex_); absl::optional last_estimated_capture_clock_offset_ - RTC_GUARDED_BY(crit_); -}; // AbsoluteCaptureTimeReceiver + RTC_GUARDED_BY(mutex_); +}; // AbsoluteCaptureTimeInterpolator } // namespace webrtc -#endif // MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_RECEIVER_H_ +#endif // MODULES_RTP_RTCP_SOURCE_ABSOLUTE_CAPTURE_TIME_INTERPOLATOR_H_ diff --git a/modules/rtp_rtcp/source/absolute_capture_time_receiver_unittest.cc b/modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc similarity index 61% rename from modules/rtp_rtcp/source/absolute_capture_time_receiver_unittest.cc rename to modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc index ecf256734d..6a312f9b43 100644 --- a/modules/rtp_rtcp/source/absolute_capture_time_receiver_unittest.cc +++ b/modules/rtp_rtcp/source/absolute_capture_time_interpolator_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" #include "system_wrappers/include/ntp_time.h" #include "test/gmock.h" @@ -16,20 +16,21 @@ namespace webrtc { -TEST(AbsoluteCaptureTimeReceiverTest, GetSourceWithoutCsrcs) { +TEST(AbsoluteCaptureTimeInterpolatorTest, GetSourceWithoutCsrcs) { constexpr uint32_t kSsrc = 12; - EXPECT_EQ(AbsoluteCaptureTimeReceiver::GetSource(kSsrc, nullptr), kSsrc); + EXPECT_EQ(AbsoluteCaptureTimeInterpolator::GetSource(kSsrc, nullptr), kSsrc); } -TEST(AbsoluteCaptureTimeReceiverTest, GetSourceWithCsrcs) { +TEST(AbsoluteCaptureTimeInterpolatorTest, GetSourceWithCsrcs) { constexpr uint32_t kSsrc = 12; constexpr uint32_t kCsrcs[] = {34, 56, 78, 90}; - EXPECT_EQ(AbsoluteCaptureTimeReceiver::GetSource(kSsrc, kCsrcs), kCsrcs[0]); + EXPECT_EQ(AbsoluteCaptureTimeInterpolator::GetSource(kSsrc, kCsrcs), + kCsrcs[0]); } -TEST(AbsoluteCaptureTimeReceiverTest, ReceiveExtensionReturnsExtension) { +TEST(AbsoluteCaptureTimeInterpolatorTest, ReceiveExtensionReturnsExtension) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; constexpr uint32_t kRtpTimestamp0 = 1020300000; @@ -40,20 +41,19 @@ TEST(AbsoluteCaptureTimeReceiverTest, ReceiveExtensionReturnsExtension) { AbsoluteCaptureTime{Int64MsToUQ32x32(9020), absl::nullopt}; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - receiver.SetRemoteToLocalClockOffset(0); - - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp1, - kRtpClockFrequency, kExtension1), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp1, + kRtpClockFrequency, kExtension1), kExtension1); } -TEST(AbsoluteCaptureTimeReceiverTest, ReceiveNoExtensionReturnsNoExtension) { +TEST(AbsoluteCaptureTimeInterpolatorTest, + ReceiveNoExtensionReturnsNoExtension) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; constexpr uint32_t kRtpTimestamp0 = 1020300000; @@ -62,20 +62,18 @@ TEST(AbsoluteCaptureTimeReceiverTest, ReceiveNoExtensionReturnsNoExtension) { static const absl::optional kExtension1 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), absl::nullopt); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp1, - kRtpClockFrequency, kExtension1), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp1, + kRtpClockFrequency, kExtension1), absl::nullopt); } -TEST(AbsoluteCaptureTimeReceiverTest, InterpolateLaterPacketArrivingLater) { +TEST(AbsoluteCaptureTimeInterpolatorTest, InterpolateLaterPacketArrivingLater) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; constexpr uint32_t kRtpTimestamp0 = 1020300000; @@ -87,15 +85,13 @@ TEST(AbsoluteCaptureTimeReceiverTest, InterpolateLaterPacketArrivingLater) { static const absl::optional kExtension2 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - absl::optional extension = receiver.OnReceivePacket( + absl::optional extension = interpolator.OnReceivePacket( kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), @@ -103,8 +99,8 @@ TEST(AbsoluteCaptureTimeReceiverTest, InterpolateLaterPacketArrivingLater) { EXPECT_EQ(extension->estimated_capture_clock_offset, kExtension0->estimated_capture_clock_offset); - extension = receiver.OnReceivePacket(kSource, kRtpTimestamp2, - kRtpClockFrequency, kExtension2); + extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2, + kRtpClockFrequency, kExtension2); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 40); @@ -112,7 +108,8 @@ TEST(AbsoluteCaptureTimeReceiverTest, InterpolateLaterPacketArrivingLater) { kExtension0->estimated_capture_clock_offset); } -TEST(AbsoluteCaptureTimeReceiverTest, InterpolateEarlierPacketArrivingLater) { +TEST(AbsoluteCaptureTimeInterpolatorTest, + InterpolateEarlierPacketArrivingLater) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; constexpr uint32_t kRtpTimestamp0 = 1020300000; @@ -124,15 +121,13 @@ TEST(AbsoluteCaptureTimeReceiverTest, InterpolateEarlierPacketArrivingLater) { static const absl::optional kExtension2 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - receiver.SetRemoteToLocalClockOffset(0); - - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - absl::optional extension = receiver.OnReceivePacket( + absl::optional extension = interpolator.OnReceivePacket( kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), @@ -140,8 +135,8 @@ TEST(AbsoluteCaptureTimeReceiverTest, InterpolateEarlierPacketArrivingLater) { EXPECT_EQ(extension->estimated_capture_clock_offset, kExtension0->estimated_capture_clock_offset); - extension = receiver.OnReceivePacket(kSource, kRtpTimestamp2, - kRtpClockFrequency, kExtension2); + extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2, + kRtpClockFrequency, kExtension2); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) - 40); @@ -149,7 +144,7 @@ TEST(AbsoluteCaptureTimeReceiverTest, InterpolateEarlierPacketArrivingLater) { kExtension0->estimated_capture_clock_offset); } -TEST(AbsoluteCaptureTimeReceiverTest, +TEST(AbsoluteCaptureTimeInterpolatorTest, InterpolateLaterPacketArrivingLaterWithRtpTimestampWrapAround) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; @@ -162,15 +157,13 @@ TEST(AbsoluteCaptureTimeReceiverTest, static const absl::optional kExtension2 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - absl::optional extension = receiver.OnReceivePacket( + absl::optional extension = interpolator.OnReceivePacket( kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), @@ -178,8 +171,8 @@ TEST(AbsoluteCaptureTimeReceiverTest, EXPECT_EQ(extension->estimated_capture_clock_offset, kExtension0->estimated_capture_clock_offset); - extension = receiver.OnReceivePacket(kSource, kRtpTimestamp2, - kRtpClockFrequency, kExtension2); + extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2, + kRtpClockFrequency, kExtension2); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 40); @@ -187,7 +180,7 @@ TEST(AbsoluteCaptureTimeReceiverTest, kExtension0->estimated_capture_clock_offset); } -TEST(AbsoluteCaptureTimeReceiverTest, +TEST(AbsoluteCaptureTimeInterpolatorTest, InterpolateEarlierPacketArrivingLaterWithRtpTimestampWrapAround) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; @@ -200,15 +193,13 @@ TEST(AbsoluteCaptureTimeReceiverTest, static const absl::optional kExtension2 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - receiver.SetRemoteToLocalClockOffset(0); - - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - absl::optional extension = receiver.OnReceivePacket( + absl::optional extension = interpolator.OnReceivePacket( kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), @@ -216,8 +207,8 @@ TEST(AbsoluteCaptureTimeReceiverTest, EXPECT_EQ(extension->estimated_capture_clock_offset, kExtension0->estimated_capture_clock_offset); - extension = receiver.OnReceivePacket(kSource, kRtpTimestamp2, - kRtpClockFrequency, kExtension2); + extension = interpolator.OnReceivePacket(kSource, kRtpTimestamp2, + kRtpClockFrequency, kExtension2); EXPECT_TRUE(extension.has_value()); EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) - 40); @@ -225,51 +216,7 @@ TEST(AbsoluteCaptureTimeReceiverTest, kExtension0->estimated_capture_clock_offset); } -TEST(AbsoluteCaptureTimeReceiverTest, - SkipEstimatedCaptureClockOffsetIfRemoteToLocalClockOffsetIsUnknown) { - constexpr uint32_t kSource = 1337; - constexpr uint32_t kRtpClockFrequency = 64000; - constexpr uint32_t kRtpTimestamp0 = 1020300000; - constexpr uint32_t kRtpTimestamp1 = kRtpTimestamp0 + 1280; - constexpr uint32_t kRtpTimestamp2 = kRtpTimestamp0 + 2560; - static const absl::optional kExtension0 = - AbsoluteCaptureTime{Int64MsToUQ32x32(9000), Int64MsToQ32x32(-350)}; - static const absl::optional kExtension1 = absl::nullopt; - static const absl::optional kExtension2 = absl::nullopt; - static const absl::optional kRemoteToLocalClockOffset2 = - Int64MsToQ32x32(-7000007); - - SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); - - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), - kExtension0); - - receiver.SetRemoteToLocalClockOffset(absl::nullopt); - - absl::optional extension = receiver.OnReceivePacket( - kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1); - EXPECT_TRUE(extension.has_value()); - EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), - UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 20); - EXPECT_EQ(extension->estimated_capture_clock_offset, absl::nullopt); - - receiver.SetRemoteToLocalClockOffset(kRemoteToLocalClockOffset2); - - extension = receiver.OnReceivePacket(kSource, kRtpTimestamp2, - kRtpClockFrequency, kExtension2); - EXPECT_TRUE(extension.has_value()); - EXPECT_EQ(UQ32x32ToInt64Ms(extension->absolute_capture_timestamp), - UQ32x32ToInt64Ms(kExtension0->absolute_capture_timestamp) + 40); - EXPECT_EQ(extension->estimated_capture_clock_offset, - *kExtension0->estimated_capture_clock_offset + - *kRemoteToLocalClockOffset2); -} - -TEST(AbsoluteCaptureTimeReceiverTest, SkipInterpolateIfTooLate) { +TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIfTooLate) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 64000; constexpr uint32_t kRtpTimestamp0 = 1020300000; @@ -281,30 +228,28 @@ TEST(AbsoluteCaptureTimeReceiverTest, SkipInterpolateIfTooLate) { static const absl::optional kExtension2 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - clock.AdvanceTime(AbsoluteCaptureTimeReceiver::kInterpolationMaxInterval); + clock.AdvanceTime(AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval); - EXPECT_TRUE(receiver + EXPECT_TRUE(interpolator .OnReceivePacket(kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1) .has_value()); clock.AdvanceTimeMilliseconds(1); - EXPECT_FALSE(receiver + EXPECT_FALSE(interpolator .OnReceivePacket(kSource, kRtpTimestamp2, kRtpClockFrequency, kExtension2) .has_value()); } -TEST(AbsoluteCaptureTimeReceiverTest, SkipInterpolateIfSourceChanged) { +TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIfSourceChanged) { constexpr uint32_t kSource0 = 1337; constexpr uint32_t kSource1 = 1338; constexpr uint32_t kRtpClockFrequency = 64000; @@ -315,21 +260,19 @@ TEST(AbsoluteCaptureTimeReceiverTest, SkipInterpolateIfSourceChanged) { static const absl::optional kExtension1 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - receiver.SetRemoteToLocalClockOffset(0); - - EXPECT_EQ(receiver.OnReceivePacket(kSource0, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource0, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - EXPECT_FALSE(receiver + EXPECT_FALSE(interpolator .OnReceivePacket(kSource1, kRtpTimestamp1, kRtpClockFrequency, kExtension1) .has_value()); } -TEST(AbsoluteCaptureTimeReceiverTest, +TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIfRtpClockFrequencyChanged) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency0 = 64000; @@ -341,21 +284,19 @@ TEST(AbsoluteCaptureTimeReceiverTest, static const absl::optional kExtension1 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency0, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency0, kExtension0), kExtension0); - EXPECT_FALSE(receiver + EXPECT_FALSE(interpolator .OnReceivePacket(kSource, kRtpTimestamp1, kRtpClockFrequency1, kExtension1) .has_value()); } -TEST(AbsoluteCaptureTimeReceiverTest, +TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIfRtpClockFrequencyIsInvalid) { constexpr uint32_t kSource = 1337; constexpr uint32_t kRtpClockFrequency = 0; @@ -366,21 +307,19 @@ TEST(AbsoluteCaptureTimeReceiverTest, static const absl::optional kExtension1 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - receiver.SetRemoteToLocalClockOffset(0); - - EXPECT_EQ(receiver.OnReceivePacket(kSource, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - EXPECT_FALSE(receiver + EXPECT_FALSE(interpolator .OnReceivePacket(kSource, kRtpTimestamp1, kRtpClockFrequency, kExtension1) .has_value()); } -TEST(AbsoluteCaptureTimeReceiverTest, SkipInterpolateIsSticky) { +TEST(AbsoluteCaptureTimeInterpolatorTest, SkipInterpolateIsSticky) { constexpr uint32_t kSource0 = 1337; constexpr uint32_t kSource1 = 1338; constexpr uint32_t kSource2 = 1337; @@ -394,20 +333,18 @@ TEST(AbsoluteCaptureTimeReceiverTest, SkipInterpolateIsSticky) { static const absl::optional kExtension2 = absl::nullopt; SimulatedClock clock(0); - AbsoluteCaptureTimeReceiver receiver(&clock); - - receiver.SetRemoteToLocalClockOffset(0); + AbsoluteCaptureTimeInterpolator interpolator(&clock); - EXPECT_EQ(receiver.OnReceivePacket(kSource0, kRtpTimestamp0, - kRtpClockFrequency, kExtension0), + EXPECT_EQ(interpolator.OnReceivePacket(kSource0, kRtpTimestamp0, + kRtpClockFrequency, kExtension0), kExtension0); - EXPECT_FALSE(receiver + EXPECT_FALSE(interpolator .OnReceivePacket(kSource1, kRtpTimestamp1, kRtpClockFrequency, kExtension1) .has_value()); - EXPECT_FALSE(receiver + EXPECT_FALSE(interpolator .OnReceivePacket(kSource2, kRtpTimestamp2, kRtpClockFrequency, kExtension2) .has_value()); diff --git a/modules/rtp_rtcp/source/absolute_capture_time_sender.cc b/modules/rtp_rtcp/source/absolute_capture_time_sender.cc index f614c0c521..28266769ff 100644 --- a/modules/rtp_rtcp/source/absolute_capture_time_sender.cc +++ b/modules/rtp_rtcp/source/absolute_capture_time_sender.cc @@ -12,7 +12,7 @@ #include -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" #include "system_wrappers/include/ntp_time.h" namespace webrtc { @@ -26,7 +26,7 @@ constexpr TimeDelta AbsoluteCaptureTimeSender::kInterpolationMaxInterval; constexpr TimeDelta AbsoluteCaptureTimeSender::kInterpolationMaxError; static_assert( - AbsoluteCaptureTimeReceiver::kInterpolationMaxInterval >= + AbsoluteCaptureTimeInterpolator::kInterpolationMaxInterval >= AbsoluteCaptureTimeSender::kInterpolationMaxInterval, "Receivers should be as willing to interpolate timestamps as senders."); @@ -36,7 +36,7 @@ AbsoluteCaptureTimeSender::AbsoluteCaptureTimeSender(Clock* clock) uint32_t AbsoluteCaptureTimeSender::GetSource( uint32_t ssrc, rtc::ArrayView csrcs) { - return AbsoluteCaptureTimeReceiver::GetSource(ssrc, csrcs); + return AbsoluteCaptureTimeInterpolator::GetSource(ssrc, csrcs); } absl::optional AbsoluteCaptureTimeSender::OnSendPacket( @@ -47,7 +47,7 @@ absl::optional AbsoluteCaptureTimeSender::OnSendPacket( absl::optional estimated_capture_clock_offset) { const Timestamp send_time = clock_->CurrentTime(); - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); if (!ShouldSendExtension(send_time, source, rtp_timestamp, rtp_clock_frequency, absolute_capture_timestamp, @@ -108,7 +108,7 @@ bool AbsoluteCaptureTimeSender::ShouldSendExtension( // Should if interpolation would introduce too much error. const uint64_t interpolated_absolute_capture_timestamp = - AbsoluteCaptureTimeReceiver::InterpolateAbsoluteCaptureTimestamp( + AbsoluteCaptureTimeInterpolator::InterpolateAbsoluteCaptureTimestamp( rtp_timestamp, rtp_clock_frequency, last_rtp_timestamp_, last_absolute_capture_timestamp_); const int64_t interpolation_error_ms = UQ32x32ToInt64Ms(std::min( diff --git a/modules/rtp_rtcp/source/absolute_capture_time_sender.h b/modules/rtp_rtcp/source/absolute_capture_time_sender.h index c45a2dc5b6..348a28370d 100644 --- a/modules/rtp_rtcp/source/absolute_capture_time_sender.h +++ b/modules/rtp_rtcp/source/absolute_capture_time_sender.h @@ -15,7 +15,7 @@ #include "api/rtp_headers.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" @@ -67,20 +67,20 @@ class AbsoluteCaptureTimeSender { uint32_t rtp_clock_frequency, uint64_t absolute_capture_timestamp, absl::optional estimated_capture_clock_offset) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* const clock_; - rtc::CriticalSection crit_; + Mutex mutex_; - Timestamp last_send_time_ RTC_GUARDED_BY(crit_); + Timestamp last_send_time_ RTC_GUARDED_BY(mutex_); - uint32_t last_source_ RTC_GUARDED_BY(crit_); - uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(crit_); - uint32_t last_rtp_clock_frequency_ RTC_GUARDED_BY(crit_); - uint64_t last_absolute_capture_timestamp_ RTC_GUARDED_BY(crit_); + uint32_t last_source_ RTC_GUARDED_BY(mutex_); + uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_); + uint32_t last_rtp_clock_frequency_ RTC_GUARDED_BY(mutex_); + uint64_t last_absolute_capture_timestamp_ RTC_GUARDED_BY(mutex_); absl::optional last_estimated_capture_clock_offset_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); }; // AbsoluteCaptureTimeSender } // namespace webrtc diff --git a/modules/rtp_rtcp/source/active_decode_targets_helper.cc b/modules/rtp_rtcp/source/active_decode_targets_helper.cc new file mode 100644 index 0000000000..71e7e8cf78 --- /dev/null +++ b/modules/rtp_rtcp/source/active_decode_targets_helper.cc @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/active_decode_targets_helper.h" + +#include + +#include "api/array_view.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +// Returns mask of ids of chains previous frame is part of. +// Assumes for each chain frames are seen in order and no frame on any chain is +// missing. That assumptions allows a simple detection when previous frame is +// part of a chain. +std::bitset<32> LastSendOnChain(int frame_diff, + rtc::ArrayView chain_diffs) { + std::bitset<32> bitmask = 0; + for (size_t i = 0; i < chain_diffs.size(); ++i) { + if (frame_diff == chain_diffs[i]) { + bitmask.set(i); + } + } + return bitmask; +} + +// Returns bitmask with first `num` bits set to 1. +std::bitset<32> AllActive(size_t num) { + RTC_DCHECK_LE(num, 32); + return (~uint32_t{0}) >> (32 - num); +} + +// Returns bitmask of chains that protect at least one active decode target. +std::bitset<32> ActiveChains( + rtc::ArrayView decode_target_protected_by_chain, + int num_chains, + std::bitset<32> active_decode_targets) { + std::bitset<32> active_chains = 0; + for (size_t dt = 0; dt < decode_target_protected_by_chain.size(); ++dt) { + if (dt < active_decode_targets.size() && !active_decode_targets[dt]) { + continue; + } + int chain_idx = decode_target_protected_by_chain[dt]; + RTC_DCHECK_LT(chain_idx, num_chains); + active_chains.set(chain_idx); + } + return active_chains; +} + +} // namespace + +void ActiveDecodeTargetsHelper::OnFrame( + rtc::ArrayView decode_target_protected_by_chain, + std::bitset<32> active_decode_targets, + bool is_keyframe, + int64_t frame_id, + rtc::ArrayView chain_diffs) { + const int num_chains = chain_diffs.size(); + if (num_chains == 0) { + // Avoid printing the warning + // when already printed the warning for the same active decode targets, or + // when active_decode_targets are not changed from it's default value of + // all are active, including non-existent decode targets. + if (last_active_decode_targets_ != active_decode_targets && + !active_decode_targets.all()) { + RTC_LOG(LS_WARNING) << "No chains are configured, but some decode " + "targets might be inactive. Unsupported."; + } + last_active_decode_targets_ = active_decode_targets; + return; + } + const size_t num_decode_targets = decode_target_protected_by_chain.size(); + RTC_DCHECK_GT(num_decode_targets, 0); + std::bitset<32> all_decode_targets = AllActive(num_decode_targets); + // Default value for active_decode_targets is 'all are active', i.e. all bits + // are set. Default value is set before number of decode targets is known. + // It is up to this helper to make the value cleaner and unset unused bits. + active_decode_targets &= all_decode_targets; + + if (is_keyframe) { + // Key frame resets the state. + last_active_decode_targets_ = all_decode_targets; + last_active_chains_ = AllActive(num_chains); + unsent_on_chain_.reset(); + } else { + // Update state assuming previous frame was sent. + unsent_on_chain_ &= + ~LastSendOnChain(frame_id - last_frame_id_, chain_diffs); + } + // Save for the next call to OnFrame. + // Though usually `frame_id == last_frame_id_ + 1`, it might not be so when + // frame id space is shared by several simulcast rtp streams. + last_frame_id_ = frame_id; + + if (active_decode_targets == last_active_decode_targets_) { + return; + } + last_active_decode_targets_ = active_decode_targets; + + if (active_decode_targets.none()) { + RTC_LOG(LS_ERROR) << "It is invalid to produce a frame (" << frame_id + << ") while there are no active decode targets"; + return; + } + last_active_chains_ = ActiveChains(decode_target_protected_by_chain, + num_chains, active_decode_targets); + // Frames that are part of inactive chains might not be produced by the + // encoder. Thus stop sending `active_decode_target` bitmask when it is sent + // on all active chains rather than on all chains. + unsent_on_chain_ = last_active_chains_; + RTC_DCHECK(!unsent_on_chain_.none()); +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/active_decode_targets_helper.h b/modules/rtp_rtcp/source/active_decode_targets_helper.h new file mode 100644 index 0000000000..13755e8d80 --- /dev/null +++ b/modules/rtp_rtcp/source/active_decode_targets_helper.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_ACTIVE_DECODE_TARGETS_HELPER_H_ +#define MODULES_RTP_RTCP_SOURCE_ACTIVE_DECODE_TARGETS_HELPER_H_ + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace webrtc { + +// Helper class that decides when active_decode_target_bitmask should be written +// into the dependency descriptor rtp header extension. +// See: https://aomediacodec.github.io/av1-rtp-spec/#a44-switching +// This class is thread-compatible +class ActiveDecodeTargetsHelper { + public: + ActiveDecodeTargetsHelper() = default; + ActiveDecodeTargetsHelper(const ActiveDecodeTargetsHelper&) = delete; + ActiveDecodeTargetsHelper& operator=(const ActiveDecodeTargetsHelper&) = + delete; + ~ActiveDecodeTargetsHelper() = default; + + // Decides if active decode target bitmask should be attached to the frame + // that is about to be sent. + void OnFrame(rtc::ArrayView decode_target_protected_by_chain, + std::bitset<32> active_decode_targets, + bool is_keyframe, + int64_t frame_id, + rtc::ArrayView chain_diffs); + + // Returns active decode target to attach to the dependency descriptor. + absl::optional ActiveDecodeTargetsBitmask() const { + if (unsent_on_chain_.none()) + return absl::nullopt; + return last_active_decode_targets_.to_ulong(); + } + + std::bitset<32> ActiveChainsBitmask() const { return last_active_chains_; } + + private: + // `unsent_on_chain_[i]` indicates last active decode + // target bitmask wasn't attached to a packet on the chain with id `i`. + std::bitset<32> unsent_on_chain_ = 0; + std::bitset<32> last_active_decode_targets_ = 0; + std::bitset<32> last_active_chains_ = 0; + int64_t last_frame_id_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_ACTIVE_DECODE_TARGETS_HELPER_H_ diff --git a/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc b/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc new file mode 100644 index 0000000000..6f64fd1418 --- /dev/null +++ b/modules/rtp_rtcp/source/active_decode_targets_helper_unittest.cc @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/active_decode_targets_helper.h" + +#include + +#include "absl/types/optional.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { +constexpr std::bitset<32> kAll = ~uint32_t{0}; +} // namespace + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsNulloptOnKeyFrameWhenAllDecodeTargetsAreActive) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b11, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs); + + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); +} + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsNulloptOnKeyFrameWhenAllDecodeTargetsAreActiveAfterDeltaFrame) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b11, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key); + int chain_diffs_delta[] = {1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta); + + ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u); + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b11, + /*is_keyframe=*/true, /*frame_id=*/3, chain_diffs_key); + + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); +} + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsBitmaskOnKeyFrameWhenSomeDecodeTargetsAreInactive) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs); + + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u); +} + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsBitmaskOnKeyFrameWhenSomeDecodeTargetsAreInactiveAfterDeltaFrame) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key); + int chain_diffs_delta[] = {1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta); + + ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/true, /*frame_id=*/3, chain_diffs_key); + + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u); +} + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsNulloptWhenActiveDecodeTargetsAreUnused) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/kAll, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/kAll, + /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); +} + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsNulloptOnDeltaFrameAfterSentOnKeyFrame) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key); + int chain_diffs_delta[] = {1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta); + + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); +} + +TEST(ActiveDecodeTargetsHelperTest, ReturnsNewBitmaskOnDeltaFrame) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b11, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key); + ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + int chain_diffs_delta[] = {1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta); + + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b01u); +} + +TEST(ActiveDecodeTargetsHelperTest, + ReturnsBitmaskWhenAllDecodeTargetsReactivatedOnDeltaFrame) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 0}; + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/true, /*frame_id=*/1, chain_diffs_key); + ASSERT_NE(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + int chain_diffs_delta[] = {1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b01, + /*is_keyframe=*/false, /*frame_id=*/2, chain_diffs_delta); + ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + + // Reactive all the decode targets + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/kAll, + /*is_keyframe=*/false, /*frame_id=*/3, chain_diffs_delta); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b11u); +} + +TEST(ActiveDecodeTargetsHelperTest, ReturnsNulloptAfterSentOnAllActiveChains) { + // Active decode targets (0 and 1) are protected by chains 1 and 2. + const std::bitset<32> kSome = 0b011; + constexpr int kDecodeTargetProtectedByChain[] = {2, 1, 0}; + + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0, 0, 0}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b111, + /*is_keyframe=*/true, + /*frame_id=*/0, chain_diffs_key); + ASSERT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + + int chain_diffs_delta1[] = {1, 1, 1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/kSome, + /*is_keyframe=*/false, + /*frame_id=*/1, chain_diffs_delta1); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b011u); + + int chain_diffs_delta2[] = {2, 2, 1}; // Previous frame was part of chain#2 + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/kSome, + /*is_keyframe=*/false, + /*frame_id=*/2, chain_diffs_delta2); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b011u); + + // active_decode_targets_bitmask was send on chains 1 and 2. It was never sent + // on chain 0, but chain 0 only protects inactive decode target#2 + int chain_diffs_delta3[] = {3, 1, 2}; // Previous frame was part of chain#1 + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/kSome, + /*is_keyframe=*/false, + /*frame_id=*/3, chain_diffs_delta3); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); +} + +TEST(ActiveDecodeTargetsHelperTest, ReturnsBitmaskWhenChanged) { + constexpr int kDecodeTargetProtectedByChain[] = {0, 1, 1}; + + ActiveDecodeTargetsHelper helper; + int chain_diffs_key[] = {0, 0}; + helper.OnFrame(kDecodeTargetProtectedByChain, /*active_decode_targets=*/0b111, + /*is_keyframe=*/true, + /*frame_id=*/0, chain_diffs_key); + int chain_diffs_delta1[] = {1, 1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b011, + /*is_keyframe=*/false, + /*frame_id=*/1, chain_diffs_delta1); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b011u); + + int chain_diffs_delta2[] = {1, 2}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b101, + /*is_keyframe=*/false, + /*frame_id=*/2, chain_diffs_delta2); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b101u); + + // active_decode_target_bitmask was send on chain0, but it was an old one. + int chain_diffs_delta3[] = {2, 1}; + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b101, + /*is_keyframe=*/false, + /*frame_id=*/3, chain_diffs_delta3); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), 0b101u); +} + +TEST(ActiveDecodeTargetsHelperTest, ReturnsNulloptWhenChainsAreNotUsed) { + const rtc::ArrayView kDecodeTargetProtectedByChain; + const rtc::ArrayView kNoChainDiffs; + + ActiveDecodeTargetsHelper helper; + helper.OnFrame(kDecodeTargetProtectedByChain, /*active_decode_targets=*/kAll, + /*is_keyframe=*/true, + /*frame_id=*/0, kNoChainDiffs); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + + helper.OnFrame(kDecodeTargetProtectedByChain, + /*active_decode_targets=*/0b101, + /*is_keyframe=*/false, + /*frame_id=*/1, kNoChainDiffs); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); +} + +TEST(ActiveDecodeTargetsHelperTest, Supports32DecodeTargets) { + std::bitset<32> some; + std::vector decode_target_protected_by_chain(32); + for (int i = 0; i < 32; ++i) { + decode_target_protected_by_chain[i] = i; + some[i] = i % 2 == 0; + } + + ActiveDecodeTargetsHelper helper; + std::vector chain_diffs_key(32, 0); + helper.OnFrame(decode_target_protected_by_chain, + /*active_decode_targets=*/some, + /*is_keyframe=*/true, + /*frame_id=*/1, chain_diffs_key); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), some.to_ulong()); + std::vector chain_diffs_delta(32, 1); + helper.OnFrame(decode_target_protected_by_chain, + /*active_decode_targets=*/some, + /*is_keyframe=*/false, + /*frame_id=*/2, chain_diffs_delta); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), absl::nullopt); + helper.OnFrame(decode_target_protected_by_chain, + /*active_decode_targets=*/kAll, + /*is_keyframe=*/false, + /*frame_id=*/2, chain_diffs_delta); + EXPECT_EQ(helper.ActiveDecodeTargetsBitmask(), kAll.to_ulong()); +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/capture_clock_offset_updater.cc b/modules/rtp_rtcp/source/capture_clock_offset_updater.cc new file mode 100644 index 0000000000..a5b12cb422 --- /dev/null +++ b/modules/rtp_rtcp/source/capture_clock_offset_updater.cc @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h" + +namespace webrtc { + +absl::optional +CaptureClockOffsetUpdater::AdjustEstimatedCaptureClockOffset( + absl::optional remote_capture_clock_offset) const { + if (remote_capture_clock_offset == absl::nullopt || + remote_to_local_clock_offset_ == absl::nullopt) { + return absl::nullopt; + } + + // Do calculations as "unsigned" to make overflows deterministic. + return static_cast(*remote_capture_clock_offset) + + static_cast(*remote_to_local_clock_offset_); +} + +void CaptureClockOffsetUpdater::SetRemoteToLocalClockOffset( + absl::optional offset_q32x32) { + remote_to_local_clock_offset_ = offset_q32x32; +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/capture_clock_offset_updater.h b/modules/rtp_rtcp/source/capture_clock_offset_updater.h new file mode 100644 index 0000000000..71d3eb4831 --- /dev/null +++ b/modules/rtp_rtcp/source/capture_clock_offset_updater.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_CAPTURE_CLOCK_OFFSET_UPDATER_H_ +#define MODULES_RTP_RTCP_SOURCE_CAPTURE_CLOCK_OFFSET_UPDATER_H_ + +#include + +#include "absl/types/optional.h" + +namespace webrtc { + +// +// Helper class for calculating the clock offset against the capturer's clock. +// +// This is achieved by adjusting the estimated capture clock offset in received +// Absolute Capture Time RTP header extension (see +// https://webrtc.org/experiments/rtp-hdrext/abs-capture-time/), which +// represents the clock offset between a remote sender and the capturer, by +// adding local-to-remote clock offset. + +class CaptureClockOffsetUpdater { + public: + // Adjusts remote_capture_clock_offset, which originates from Absolute Capture + // Time RTP header extension, to get the local clock offset against the + // capturer's clock. + absl::optional AdjustEstimatedCaptureClockOffset( + absl::optional remote_capture_clock_offset) const; + + // Sets the NTP clock offset between the sender system (which may be different + // from the capture system) and the local system. This information is normally + // provided by passing half the value of the Round-Trip Time estimation given + // by RTCP sender reports (see DLSR/DLRR). + // + // Note that the value must be in Q32.32-formatted fixed-point seconds. + void SetRemoteToLocalClockOffset(absl::optional offset_q32x32); + + private: + absl::optional remote_to_local_clock_offset_; +}; + +} // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_CAPTURE_CLOCK_OFFSET_UPDATER_H_ diff --git a/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc b/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc new file mode 100644 index 0000000000..43e1dd1379 --- /dev/null +++ b/modules/rtp_rtcp/source/capture_clock_offset_updater_unittest.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h" + +#include "system_wrappers/include/ntp_time.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +TEST(AbsoluteCaptureTimeReceiverTest, + SkipEstimatedCaptureClockOffsetIfRemoteToLocalClockOffsetIsUnknown) { + static const absl::optional kRemoteCaptureClockOffset = + Int64MsToQ32x32(-350); + CaptureClockOffsetUpdater updater; + updater.SetRemoteToLocalClockOffset(absl::nullopt); + EXPECT_EQ( + updater.AdjustEstimatedCaptureClockOffset(kRemoteCaptureClockOffset), + absl::nullopt); +} + +TEST(AbsoluteCaptureTimeReceiverTest, + SkipEstimatedCaptureClockOffsetIfRemoteCaptureClockOffsetIsUnknown) { + static const absl::optional kCaptureClockOffsetNull = absl::nullopt; + CaptureClockOffsetUpdater updater; + updater.SetRemoteToLocalClockOffset(0); + EXPECT_EQ(updater.AdjustEstimatedCaptureClockOffset(kCaptureClockOffsetNull), + kCaptureClockOffsetNull); + + static const absl::optional kRemoteCaptureClockOffset = + Int64MsToQ32x32(-350); + EXPECT_EQ( + updater.AdjustEstimatedCaptureClockOffset(kRemoteCaptureClockOffset), + kRemoteCaptureClockOffset); +} + +TEST(AbsoluteCaptureTimeReceiverTest, EstimatedCaptureClockOffsetArithmetic) { + static const absl::optional kRemoteCaptureClockOffset = + Int64MsToQ32x32(-350); + static const absl::optional kRemoteToLocalClockOffset = + Int64MsToQ32x32(-7000007); + CaptureClockOffsetUpdater updater; + updater.SetRemoteToLocalClockOffset(kRemoteToLocalClockOffset); + EXPECT_THAT( + updater.AdjustEstimatedCaptureClockOffset(kRemoteCaptureClockOffset), + ::testing::Optional(::testing::Eq(*kRemoteCaptureClockOffset + + *kRemoteToLocalClockOffset))); +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc b/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc index 724ad8c42e..f1e4eddb4b 100644 --- a/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc +++ b/modules/rtp_rtcp/source/create_video_rtp_depacketizer.cc @@ -37,6 +37,7 @@ std::unique_ptr CreateVideoRtpDepacketizer( case kVideoCodecMultiplex: return std::make_unique(); } + RTC_CHECK_NOTREACHED(); } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc b/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc new file mode 100644 index 0000000000..c542557526 --- /dev/null +++ b/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.cc @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h" + +#include +#include +#include + +#include "absl/strings/match.h" +#include "api/transport/field_trial_based_config.h" +#include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h" +#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { +constexpr uint32_t kTimestampTicksPerMs = 90; +constexpr int kSendSideDelayWindowMs = 1000; +constexpr int kBitrateStatisticsWindowMs = 1000; +constexpr size_t kRtpSequenceNumberMapMaxEntries = 1 << 13; + +bool IsDisabled(absl::string_view name, + const WebRtcKeyValueConfig* field_trials) { + FieldTrialBasedConfig default_trials; + auto& trials = field_trials ? *field_trials : default_trials; + return absl::StartsWith(trials.Lookup(name), "Disabled"); +} +} // namespace + +DEPRECATED_RtpSenderEgress::NonPacedPacketSender::NonPacedPacketSender( + DEPRECATED_RtpSenderEgress* sender) + : transport_sequence_number_(0), sender_(sender) {} +DEPRECATED_RtpSenderEgress::NonPacedPacketSender::~NonPacedPacketSender() = + default; + +void DEPRECATED_RtpSenderEgress::NonPacedPacketSender::EnqueuePackets( + std::vector> packets) { + for (auto& packet : packets) { + if (!packet->SetExtension( + ++transport_sequence_number_)) { + --transport_sequence_number_; + } + packet->ReserveExtension(); + packet->ReserveExtension(); + sender_->SendPacket(packet.get(), PacedPacketInfo()); + } +} + +DEPRECATED_RtpSenderEgress::DEPRECATED_RtpSenderEgress( + const RtpRtcpInterface::Configuration& config, + RtpPacketHistory* packet_history) + : ssrc_(config.local_media_ssrc), + rtx_ssrc_(config.rtx_send_ssrc), + flexfec_ssrc_(config.fec_generator ? config.fec_generator->FecSsrc() + : absl::nullopt), + populate_network2_timestamp_(config.populate_network2_timestamp), + send_side_bwe_with_overhead_( + !IsDisabled("WebRTC-SendSideBwe-WithOverhead", config.field_trials)), + clock_(config.clock), + packet_history_(packet_history), + transport_(config.outgoing_transport), + event_log_(config.event_log), + is_audio_(config.audio), + need_rtp_packet_infos_(config.need_rtp_packet_infos), + transport_feedback_observer_(config.transport_feedback_callback), + send_side_delay_observer_(config.send_side_delay_observer), + send_packet_observer_(config.send_packet_observer), + rtp_stats_callback_(config.rtp_stats_callback), + bitrate_callback_(config.send_bitrate_observer), + media_has_been_sent_(false), + force_part_of_allocation_(false), + timestamp_offset_(0), + max_delay_it_(send_delays_.end()), + sum_delays_ms_(0), + total_packet_send_delay_ms_(0), + send_rates_(kNumMediaTypes, + {kBitrateStatisticsWindowMs, RateStatistics::kBpsScale}), + rtp_sequence_number_map_(need_rtp_packet_infos_ + ? std::make_unique( + kRtpSequenceNumberMapMaxEntries) + : nullptr) {} + +void DEPRECATED_RtpSenderEgress::SendPacket( + RtpPacketToSend* packet, + const PacedPacketInfo& pacing_info) { + RTC_DCHECK(packet); + + const uint32_t packet_ssrc = packet->Ssrc(); + RTC_DCHECK(packet->packet_type().has_value()); + RTC_DCHECK(HasCorrectSsrc(*packet)); + int64_t now_ms = clock_->TimeInMilliseconds(); + + if (is_audio_) { +#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE + BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "AudioTotBitrate_kbps", now_ms, + GetSendRates().Sum().kbps(), packet_ssrc); + BWE_TEST_LOGGING_PLOT_WITH_SSRC( + 1, "AudioNackBitrate_kbps", now_ms, + GetSendRates()[RtpPacketMediaType::kRetransmission].kbps(), + packet_ssrc); +#endif + } else { +#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE + BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoTotBitrate_kbps", now_ms, + GetSendRates().Sum().kbps(), packet_ssrc); + BWE_TEST_LOGGING_PLOT_WITH_SSRC( + 1, "VideoNackBitrate_kbps", now_ms, + GetSendRates()[RtpPacketMediaType::kRetransmission].kbps(), + packet_ssrc); +#endif + } + + PacketOptions options; + { + MutexLock lock(&lock_); + options.included_in_allocation = force_part_of_allocation_; + + if (need_rtp_packet_infos_ && + packet->packet_type() == RtpPacketToSend::Type::kVideo) { + RTC_DCHECK(rtp_sequence_number_map_); + // Last packet of a frame, add it to sequence number info map. + const uint32_t timestamp = packet->Timestamp() - timestamp_offset_; + bool is_first_packet_of_frame = packet->is_first_packet_of_frame(); + bool is_last_packet_of_frame = packet->Marker(); + + rtp_sequence_number_map_->InsertPacket( + packet->SequenceNumber(), + RtpSequenceNumberMap::Info(timestamp, is_first_packet_of_frame, + is_last_packet_of_frame)); + } + } + + // Bug webrtc:7859. While FEC is invoked from rtp_sender_video, and not after + // the pacer, these modifications of the header below are happening after the + // FEC protection packets are calculated. This will corrupt recovered packets + // at the same place. It's not an issue for extensions, which are present in + // all the packets (their content just may be incorrect on recovered packets). + // In case of VideoTimingExtension, since it's present not in every packet, + // data after rtp header may be corrupted if these packets are protected by + // the FEC. + int64_t diff_ms = now_ms - packet->capture_time_ms(); + if (packet->HasExtension()) { + packet->SetExtension(kTimestampTicksPerMs * diff_ms); + } + if (packet->HasExtension()) { + packet->SetExtension( + AbsoluteSendTime::MsTo24Bits(now_ms)); + } + + if (packet->HasExtension()) { + if (populate_network2_timestamp_) { + packet->set_network2_time_ms(now_ms); + } else { + packet->set_pacer_exit_time_ms(now_ms); + } + } + + const bool is_media = packet->packet_type() == RtpPacketMediaType::kAudio || + packet->packet_type() == RtpPacketMediaType::kVideo; + + // Downstream code actually uses this flag to distinguish between media and + // everything else. + options.is_retransmit = !is_media; + if (auto packet_id = packet->GetExtension()) { + options.packet_id = *packet_id; + options.included_in_feedback = true; + options.included_in_allocation = true; + AddPacketToTransportFeedback(*packet_id, *packet, pacing_info); + } + + options.additional_data = packet->additional_data(); + + if (packet->packet_type() != RtpPacketMediaType::kPadding && + packet->packet_type() != RtpPacketMediaType::kRetransmission) { + UpdateDelayStatistics(packet->capture_time_ms(), now_ms, packet_ssrc); + UpdateOnSendPacket(options.packet_id, packet->capture_time_ms(), + packet_ssrc); + } + + const bool send_success = SendPacketToNetwork(*packet, options, pacing_info); + + // Put packet in retransmission history or update pending status even if + // actual sending fails. + if (is_media && packet->allow_retransmission()) { + packet_history_->PutRtpPacket(std::make_unique(*packet), + now_ms); + } else if (packet->retransmitted_sequence_number()) { + packet_history_->MarkPacketAsSent(*packet->retransmitted_sequence_number()); + } + + if (send_success) { + MutexLock lock(&lock_); + UpdateRtpStats(*packet); + media_has_been_sent_ = true; + } +} + +void DEPRECATED_RtpSenderEgress::ProcessBitrateAndNotifyObservers() { + if (!bitrate_callback_) + return; + + MutexLock lock(&lock_); + RtpSendRates send_rates = GetSendRatesLocked(); + bitrate_callback_->Notify( + send_rates.Sum().bps(), + send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_); +} + +RtpSendRates DEPRECATED_RtpSenderEgress::GetSendRates() const { + MutexLock lock(&lock_); + return GetSendRatesLocked(); +} + +RtpSendRates DEPRECATED_RtpSenderEgress::GetSendRatesLocked() const { + const int64_t now_ms = clock_->TimeInMilliseconds(); + RtpSendRates current_rates; + for (size_t i = 0; i < kNumMediaTypes; ++i) { + RtpPacketMediaType type = static_cast(i); + current_rates[type] = + DataRate::BitsPerSec(send_rates_[i].Rate(now_ms).value_or(0)); + } + return current_rates; +} + +void DEPRECATED_RtpSenderEgress::GetDataCounters( + StreamDataCounters* rtp_stats, + StreamDataCounters* rtx_stats) const { + MutexLock lock(&lock_); + *rtp_stats = rtp_stats_; + *rtx_stats = rtx_rtp_stats_; +} + +void DEPRECATED_RtpSenderEgress::ForceIncludeSendPacketsInAllocation( + bool part_of_allocation) { + MutexLock lock(&lock_); + force_part_of_allocation_ = part_of_allocation; +} + +bool DEPRECATED_RtpSenderEgress::MediaHasBeenSent() const { + MutexLock lock(&lock_); + return media_has_been_sent_; +} + +void DEPRECATED_RtpSenderEgress::SetMediaHasBeenSent(bool media_sent) { + MutexLock lock(&lock_); + media_has_been_sent_ = media_sent; +} + +void DEPRECATED_RtpSenderEgress::SetTimestampOffset(uint32_t timestamp) { + MutexLock lock(&lock_); + timestamp_offset_ = timestamp; +} + +std::vector +DEPRECATED_RtpSenderEgress::GetSentRtpPacketInfos( + rtc::ArrayView sequence_numbers) const { + RTC_DCHECK(!sequence_numbers.empty()); + if (!need_rtp_packet_infos_) { + return std::vector(); + } + + std::vector results; + results.reserve(sequence_numbers.size()); + + MutexLock lock(&lock_); + for (uint16_t sequence_number : sequence_numbers) { + const auto& info = rtp_sequence_number_map_->Get(sequence_number); + if (!info) { + // The empty vector will be returned. We can delay the clearing + // of the vector until after we exit the critical section. + return std::vector(); + } + results.push_back(*info); + } + + return results; +} + +bool DEPRECATED_RtpSenderEgress::HasCorrectSsrc( + const RtpPacketToSend& packet) const { + switch (*packet.packet_type()) { + case RtpPacketMediaType::kAudio: + case RtpPacketMediaType::kVideo: + return packet.Ssrc() == ssrc_; + case RtpPacketMediaType::kRetransmission: + case RtpPacketMediaType::kPadding: + // Both padding and retransmission must be on either the media or the + // RTX stream. + return packet.Ssrc() == rtx_ssrc_ || packet.Ssrc() == ssrc_; + case RtpPacketMediaType::kForwardErrorCorrection: + // FlexFEC is on separate SSRC, ULPFEC uses media SSRC. + return packet.Ssrc() == ssrc_ || packet.Ssrc() == flexfec_ssrc_; + } + return false; +} + +void DEPRECATED_RtpSenderEgress::AddPacketToTransportFeedback( + uint16_t packet_id, + const RtpPacketToSend& packet, + const PacedPacketInfo& pacing_info) { + if (transport_feedback_observer_) { + size_t packet_size = packet.payload_size() + packet.padding_size(); + if (send_side_bwe_with_overhead_) { + packet_size = packet.size(); + } + + RtpPacketSendInfo packet_info; + // TODO(bugs.webrtc.org/12713): Remove once downstream usage is gone. + packet_info.ssrc = ssrc_; + packet_info.media_ssrc = ssrc_; + packet_info.transport_sequence_number = packet_id; + packet_info.rtp_sequence_number = packet.SequenceNumber(); + packet_info.length = packet_size; + packet_info.pacing_info = pacing_info; + packet_info.packet_type = packet.packet_type(); + transport_feedback_observer_->OnAddPacket(packet_info); + } +} + +void DEPRECATED_RtpSenderEgress::UpdateDelayStatistics(int64_t capture_time_ms, + int64_t now_ms, + uint32_t ssrc) { + if (!send_side_delay_observer_ || capture_time_ms <= 0) + return; + + int avg_delay_ms = 0; + int max_delay_ms = 0; + uint64_t total_packet_send_delay_ms = 0; + { + MutexLock lock(&lock_); + // Compute the max and average of the recent capture-to-send delays. + // The time complexity of the current approach depends on the distribution + // of the delay values. This could be done more efficiently. + + // Remove elements older than kSendSideDelayWindowMs. + auto lower_bound = + send_delays_.lower_bound(now_ms - kSendSideDelayWindowMs); + for (auto it = send_delays_.begin(); it != lower_bound; ++it) { + if (max_delay_it_ == it) { + max_delay_it_ = send_delays_.end(); + } + sum_delays_ms_ -= it->second; + } + send_delays_.erase(send_delays_.begin(), lower_bound); + if (max_delay_it_ == send_delays_.end()) { + // Removed the previous max. Need to recompute. + RecomputeMaxSendDelay(); + } + + // Add the new element. + RTC_DCHECK_GE(now_ms, 0); + RTC_DCHECK_LE(now_ms, std::numeric_limits::max() / 2); + RTC_DCHECK_GE(capture_time_ms, 0); + RTC_DCHECK_LE(capture_time_ms, std::numeric_limits::max() / 2); + int64_t diff_ms = now_ms - capture_time_ms; + RTC_DCHECK_GE(diff_ms, static_cast(0)); + RTC_DCHECK_LE(diff_ms, std::numeric_limits::max()); + int new_send_delay = rtc::dchecked_cast(now_ms - capture_time_ms); + SendDelayMap::iterator it; + bool inserted; + std::tie(it, inserted) = + send_delays_.insert(std::make_pair(now_ms, new_send_delay)); + if (!inserted) { + // TODO(terelius): If we have multiple delay measurements during the same + // millisecond then we keep the most recent one. It is not clear that this + // is the right decision, but it preserves an earlier behavior. + int previous_send_delay = it->second; + sum_delays_ms_ -= previous_send_delay; + it->second = new_send_delay; + if (max_delay_it_ == it && new_send_delay < previous_send_delay) { + RecomputeMaxSendDelay(); + } + } + if (max_delay_it_ == send_delays_.end() || + it->second >= max_delay_it_->second) { + max_delay_it_ = it; + } + sum_delays_ms_ += new_send_delay; + total_packet_send_delay_ms_ += new_send_delay; + total_packet_send_delay_ms = total_packet_send_delay_ms_; + + size_t num_delays = send_delays_.size(); + RTC_DCHECK(max_delay_it_ != send_delays_.end()); + max_delay_ms = rtc::dchecked_cast(max_delay_it_->second); + int64_t avg_ms = (sum_delays_ms_ + num_delays / 2) / num_delays; + RTC_DCHECK_GE(avg_ms, static_cast(0)); + RTC_DCHECK_LE(avg_ms, + static_cast(std::numeric_limits::max())); + avg_delay_ms = + rtc::dchecked_cast((sum_delays_ms_ + num_delays / 2) / num_delays); + } + send_side_delay_observer_->SendSideDelayUpdated( + avg_delay_ms, max_delay_ms, total_packet_send_delay_ms, ssrc); +} + +void DEPRECATED_RtpSenderEgress::RecomputeMaxSendDelay() { + max_delay_it_ = send_delays_.begin(); + for (auto it = send_delays_.begin(); it != send_delays_.end(); ++it) { + if (it->second >= max_delay_it_->second) { + max_delay_it_ = it; + } + } +} + +void DEPRECATED_RtpSenderEgress::UpdateOnSendPacket(int packet_id, + int64_t capture_time_ms, + uint32_t ssrc) { + if (!send_packet_observer_ || capture_time_ms <= 0 || packet_id == -1) { + return; + } + + send_packet_observer_->OnSendPacket(packet_id, capture_time_ms, ssrc); +} + +bool DEPRECATED_RtpSenderEgress::SendPacketToNetwork( + const RtpPacketToSend& packet, + const PacketOptions& options, + const PacedPacketInfo& pacing_info) { + int bytes_sent = -1; + if (transport_) { + bytes_sent = transport_->SendRtp(packet.data(), packet.size(), options) + ? static_cast(packet.size()) + : -1; + if (event_log_ && bytes_sent > 0) { + event_log_->Log(std::make_unique( + packet, pacing_info.probe_cluster_id)); + } + } + + if (bytes_sent <= 0) { + RTC_LOG(LS_WARNING) << "Transport failed to send packet."; + return false; + } + return true; +} + +void DEPRECATED_RtpSenderEgress::UpdateRtpStats(const RtpPacketToSend& packet) { + int64_t now_ms = clock_->TimeInMilliseconds(); + + StreamDataCounters* counters = + packet.Ssrc() == rtx_ssrc_ ? &rtx_rtp_stats_ : &rtp_stats_; + + if (counters->first_packet_time_ms == -1) { + counters->first_packet_time_ms = now_ms; + } + + if (packet.packet_type() == RtpPacketMediaType::kForwardErrorCorrection) { + counters->fec.AddPacket(packet); + } + + if (packet.packet_type() == RtpPacketMediaType::kRetransmission) { + counters->retransmitted.AddPacket(packet); + } + counters->transmitted.AddPacket(packet); + + RTC_DCHECK(packet.packet_type().has_value()); + send_rates_[static_cast(*packet.packet_type())].Update(packet.size(), + now_ms); + + if (rtp_stats_callback_) { + rtp_stats_callback_->DataCountersUpdated(*counters, packet.Ssrc()); + } +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h b/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h new file mode 100644 index 0000000000..742e7d5499 --- /dev/null +++ b/modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_DEPRECATED_DEPRECATED_RTP_SENDER_EGRESS_H_ +#define MODULES_RTP_RTCP_SOURCE_DEPRECATED_DEPRECATED_RTP_SENDER_EGRESS_H_ + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/call/transport.h" +#include "api/rtc_event_log/rtc_event_log.h" +#include "api/units/data_rate.h" +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/rtp_rtcp/source/rtp_packet_history.h" +#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" +#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" +#include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +class DEPRECATED_RtpSenderEgress { + public: + // Helper class that redirects packets directly to the send part of this class + // without passing through an actual paced sender. + class NonPacedPacketSender : public RtpPacketSender { + public: + explicit NonPacedPacketSender(DEPRECATED_RtpSenderEgress* sender); + virtual ~NonPacedPacketSender(); + + void EnqueuePackets( + std::vector> packets) override; + + private: + uint16_t transport_sequence_number_; + DEPRECATED_RtpSenderEgress* const sender_; + }; + + DEPRECATED_RtpSenderEgress(const RtpRtcpInterface::Configuration& config, + RtpPacketHistory* packet_history); + ~DEPRECATED_RtpSenderEgress() = default; + + void SendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) + RTC_LOCKS_EXCLUDED(lock_); + uint32_t Ssrc() const { return ssrc_; } + absl::optional RtxSsrc() const { return rtx_ssrc_; } + absl::optional FlexFecSsrc() const { return flexfec_ssrc_; } + + void ProcessBitrateAndNotifyObservers() RTC_LOCKS_EXCLUDED(lock_); + RtpSendRates GetSendRates() const RTC_LOCKS_EXCLUDED(lock_); + void GetDataCounters(StreamDataCounters* rtp_stats, + StreamDataCounters* rtx_stats) const + RTC_LOCKS_EXCLUDED(lock_); + + void ForceIncludeSendPacketsInAllocation(bool part_of_allocation) + RTC_LOCKS_EXCLUDED(lock_); + bool MediaHasBeenSent() const RTC_LOCKS_EXCLUDED(lock_); + void SetMediaHasBeenSent(bool media_sent) RTC_LOCKS_EXCLUDED(lock_); + void SetTimestampOffset(uint32_t timestamp) RTC_LOCKS_EXCLUDED(lock_); + + // For each sequence number in |sequence_number|, recall the last RTP packet + // which bore it - its timestamp and whether it was the first and/or last + // packet in that frame. If all of the given sequence numbers could be + // recalled, return a vector with all of them (in corresponding order). + // If any could not be recalled, return an empty vector. + std::vector GetSentRtpPacketInfos( + rtc::ArrayView sequence_numbers) const + RTC_LOCKS_EXCLUDED(lock_); + + private: + // Maps capture time in milliseconds to send-side delay in milliseconds. + // Send-side delay is the difference between transmission time and capture + // time. + typedef std::map SendDelayMap; + + RtpSendRates GetSendRatesLocked() const RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); + bool HasCorrectSsrc(const RtpPacketToSend& packet) const; + void AddPacketToTransportFeedback(uint16_t packet_id, + const RtpPacketToSend& packet, + const PacedPacketInfo& pacing_info); + void UpdateDelayStatistics(int64_t capture_time_ms, + int64_t now_ms, + uint32_t ssrc); + void RecomputeMaxSendDelay() RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); + void UpdateOnSendPacket(int packet_id, + int64_t capture_time_ms, + uint32_t ssrc); + // Sends packet on to |transport_|, leaving the RTP module. + bool SendPacketToNetwork(const RtpPacketToSend& packet, + const PacketOptions& options, + const PacedPacketInfo& pacing_info); + void UpdateRtpStats(const RtpPacketToSend& packet) + RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); + + const uint32_t ssrc_; + const absl::optional rtx_ssrc_; + const absl::optional flexfec_ssrc_; + const bool populate_network2_timestamp_; + const bool send_side_bwe_with_overhead_; + Clock* const clock_; + RtpPacketHistory* const packet_history_; + Transport* const transport_; + RtcEventLog* const event_log_; + const bool is_audio_; + const bool need_rtp_packet_infos_; + + TransportFeedbackObserver* const transport_feedback_observer_; + SendSideDelayObserver* const send_side_delay_observer_; + SendPacketObserver* const send_packet_observer_; + StreamDataCountersCallback* const rtp_stats_callback_; + BitrateStatisticsObserver* const bitrate_callback_; + + mutable Mutex lock_; + bool media_has_been_sent_ RTC_GUARDED_BY(lock_); + bool force_part_of_allocation_ RTC_GUARDED_BY(lock_); + uint32_t timestamp_offset_ RTC_GUARDED_BY(lock_); + + SendDelayMap send_delays_ RTC_GUARDED_BY(lock_); + SendDelayMap::const_iterator max_delay_it_ RTC_GUARDED_BY(lock_); + // The sum of delays over a kSendSideDelayWindowMs sliding window. + int64_t sum_delays_ms_ RTC_GUARDED_BY(lock_); + uint64_t total_packet_send_delay_ms_ RTC_GUARDED_BY(lock_); + StreamDataCounters rtp_stats_ RTC_GUARDED_BY(lock_); + StreamDataCounters rtx_rtp_stats_ RTC_GUARDED_BY(lock_); + // One element per value in RtpPacketMediaType, with index matching value. + std::vector send_rates_ RTC_GUARDED_BY(lock_); + + // Maps sent packets' sequence numbers to a tuple consisting of: + // 1. The timestamp, without the randomizing offset mandated by the RFC. + // 2. Whether the packet was the first in its frame. + // 3. Whether the packet was the last in its frame. + const std::unique_ptr rtp_sequence_number_map_ + RTC_GUARDED_BY(lock_); +}; + +} // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_DEPRECATED_DEPRECATED_RTP_SENDER_EGRESS_H_ diff --git a/modules/rtp_rtcp/source/dtmf_queue.cc b/modules/rtp_rtcp/source/dtmf_queue.cc index 10e674789a..df06d2a2f3 100644 --- a/modules/rtp_rtcp/source/dtmf_queue.cc +++ b/modules/rtp_rtcp/source/dtmf_queue.cc @@ -24,7 +24,7 @@ DtmfQueue::DtmfQueue() {} DtmfQueue::~DtmfQueue() {} bool DtmfQueue::AddDtmf(const Event& event) { - rtc::CritScope lock(&dtmf_critsect_); + MutexLock lock(&dtmf_mutex_); if (queue_.size() >= kDtmfOutbandMax) { return false; } @@ -34,7 +34,7 @@ bool DtmfQueue::AddDtmf(const Event& event) { bool DtmfQueue::NextDtmf(Event* event) { RTC_DCHECK(event); - rtc::CritScope lock(&dtmf_critsect_); + MutexLock lock(&dtmf_mutex_); if (queue_.empty()) { return false; } @@ -45,7 +45,7 @@ bool DtmfQueue::NextDtmf(Event* event) { } bool DtmfQueue::PendingDtmf() const { - rtc::CritScope lock(&dtmf_critsect_); + MutexLock lock(&dtmf_mutex_); return !queue_.empty(); } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/dtmf_queue.h b/modules/rtp_rtcp/source/dtmf_queue.h index adb93aa6fa..1d1867fd27 100644 --- a/modules/rtp_rtcp/source/dtmf_queue.h +++ b/modules/rtp_rtcp/source/dtmf_queue.h @@ -15,7 +15,7 @@ #include -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { class DtmfQueue { @@ -35,7 +35,7 @@ class DtmfQueue { bool PendingDtmf() const; private: - rtc::CriticalSection dtmf_critsect_; + mutable Mutex dtmf_mutex_; std::list queue_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/fec_test_helper.cc b/modules/rtp_rtcp/source/fec_test_helper.cc index f8579b48ff..b9ac25e4a8 100644 --- a/modules/rtp_rtcp/source/fec_test_helper.cc +++ b/modules/rtp_rtcp/source/fec_test_helper.cc @@ -57,7 +57,7 @@ ForwardErrorCorrection::PacketList MediaPacketGenerator::ConstructMediaPackets( media_packet->data.SetSize( random_->Rand(min_packet_size_, max_packet_size_)); - uint8_t* data = media_packet->data.data(); + uint8_t* data = media_packet->data.MutableData(); // Generate random values for the first 2 bytes data[0] = random_->Rand(); data[1] = random_->Rand(); @@ -88,7 +88,7 @@ ForwardErrorCorrection::PacketList MediaPacketGenerator::ConstructMediaPackets( // Last packet, set marker bit. ForwardErrorCorrection::Packet* media_packet = media_packets.back().get(); RTC_DCHECK(media_packet); - media_packet->data[1] |= 0x80; + media_packet->data.MutableData()[1] |= 0x80; next_seq_num_ = seq_num; @@ -122,7 +122,7 @@ std::unique_ptr AugmentedPacketGenerator::NextPacket( std::unique_ptr packet(new AugmentedPacket()); packet->data.SetSize(length + kRtpHeaderSize); - uint8_t* data = packet->data.data(); + uint8_t* data = packet->data.MutableData(); for (size_t i = 0; i < length; ++i) data[i + kRtpHeaderSize] = offset + i; packet->data.SetSize(length + kRtpHeaderSize); @@ -132,7 +132,7 @@ std::unique_ptr AugmentedPacketGenerator::NextPacket( packet->header.sequenceNumber = seq_num_; packet->header.timestamp = timestamp_; packet->header.ssrc = ssrc_; - WriteRtpHeader(packet->header, packet->data.data()); + WriteRtpHeader(packet->header, data); ++seq_num_; --num_packets_; @@ -171,8 +171,8 @@ std::unique_ptr FlexfecPacketGenerator::BuildFlexfecPacket( std::unique_ptr packet_with_rtp_header( new AugmentedPacket()); packet_with_rtp_header->data.SetSize(kRtpHeaderSize + packet.data.size()); - WriteRtpHeader(header, packet_with_rtp_header->data.data()); - memcpy(packet_with_rtp_header->data.data() + kRtpHeaderSize, + WriteRtpHeader(header, packet_with_rtp_header->data.MutableData()); + memcpy(packet_with_rtp_header->data.MutableData() + kRtpHeaderSize, packet.data.cdata(), packet.data.size()); return packet_with_rtp_header; @@ -184,19 +184,21 @@ UlpfecPacketGenerator::UlpfecPacketGenerator(uint32_t ssrc) RtpPacketReceived UlpfecPacketGenerator::BuildMediaRedPacket( const AugmentedPacket& packet, bool is_recovered) { - RtpPacketReceived red_packet; - // Copy RTP header. + // Create a temporary buffer used to wrap the media packet in RED. + rtc::CopyOnWriteBuffer red_buffer; const size_t kHeaderLength = packet.header.headerLength; - red_packet.Parse(packet.data.cdata(), kHeaderLength); - RTC_DCHECK_EQ(red_packet.headers_size(), kHeaderLength); - uint8_t* rtp_payload = - red_packet.AllocatePayload(packet.data.size() + 1 - kHeaderLength); - // Move payload type into rtp payload. - rtp_payload[0] = red_packet.PayloadType(); + // Append header. + red_buffer.SetData(packet.data.data(), kHeaderLength); + // Find payload type and add it as RED header. + uint8_t media_payload_type = red_buffer[1] & 0x7F; + red_buffer.AppendData({media_payload_type}); + // Append rest of payload/padding. + red_buffer.AppendData( + packet.data.Slice(kHeaderLength, packet.data.size() - kHeaderLength)); + + RtpPacketReceived red_packet; + RTC_CHECK(red_packet.Parse(std::move(red_buffer))); red_packet.SetPayloadType(kRedPayloadType); - // Copy the payload. - memcpy(rtp_payload + 1, packet.data.cdata() + kHeaderLength, - packet.data.size() - kHeaderLength); red_packet.set_recovered(is_recovered); return red_packet; diff --git a/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc b/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc index ab0dcb68ae..40426f16bf 100644 --- a/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc +++ b/modules/rtp_rtcp/source/flexfec_header_reader_writer.cc @@ -25,6 +25,11 @@ namespace { // Maximum number of media packets that can be protected in one batch. constexpr size_t kMaxMediaPackets = 48; // Since we are reusing ULPFEC masks. +// Maximum number of media packets tracked by FEC decoder. +// Maintain a sufficiently larger tracking window than |kMaxMediaPackets| +// to account for packet reordering in pacer/ network. +constexpr size_t kMaxTrackedMediaPackets = 4 * kMaxMediaPackets; + // Maximum number of FEC packets stored inside ForwardErrorCorrection. constexpr size_t kMaxFecPackets = kMaxMediaPackets; @@ -72,7 +77,7 @@ size_t FlexfecHeaderSize(size_t packet_mask_size) { } // namespace FlexfecHeaderReader::FlexfecHeaderReader() - : FecHeaderReader(kMaxMediaPackets, kMaxFecPackets) {} + : FecHeaderReader(kMaxTrackedMediaPackets, kMaxFecPackets) {} FlexfecHeaderReader::~FlexfecHeaderReader() = default; @@ -85,7 +90,7 @@ bool FlexfecHeaderReader::ReadFecHeader( RTC_LOG(LS_WARNING) << "Discarding truncated FlexFEC packet."; return false; } - uint8_t* const data = fec_packet->pkt->data.data(); + uint8_t* const data = fec_packet->pkt->data.MutableData(); bool r_bit = (data[0] & 0x80) != 0; if (r_bit) { RTC_LOG(LS_INFO) @@ -249,7 +254,7 @@ void FlexfecHeaderWriter::FinalizeFecHeader( const uint8_t* packet_mask, size_t packet_mask_size, ForwardErrorCorrection::Packet* fec_packet) const { - uint8_t* data = fec_packet->data.data(); + uint8_t* data = fec_packet->data.MutableData(); data[0] &= 0x7f; // Clear R bit. data[0] &= 0xbf; // Clear F bit. ByteWriter::WriteBigEndian(&data[8], kSsrcCount); @@ -260,8 +265,7 @@ void FlexfecHeaderWriter::FinalizeFecHeader( // // We treat the mask parts as unsigned integers with host order endianness // in order to simplify the bit shifting between bytes. - uint8_t* const written_packet_mask = - fec_packet->data.data() + kPacketMaskOffset; + uint8_t* const written_packet_mask = data + kPacketMaskOffset; if (packet_mask_size == kUlpfecPacketMaskSizeLBitSet) { // The packet mask is 48 bits long. uint16_t tmp_mask_part0 = diff --git a/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc b/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc index 1d86dd0eb4..4a24e90ec3 100644 --- a/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc +++ b/modules/rtp_rtcp/source/flexfec_header_reader_writer_unittest.cc @@ -78,8 +78,9 @@ rtc::scoped_refptr WriteHeader(const uint8_t* packet_mask, FlexfecHeaderWriter writer; rtc::scoped_refptr written_packet(new Packet()); written_packet->data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet->data.MutableData(); for (size_t i = 0; i < written_packet->data.size(); ++i) { - written_packet->data[i] = i; // Actual content doesn't matter. + data[i] = i; // Actual content doesn't matter. } writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, packet_mask, packet_mask_size, written_packet.get()); @@ -328,8 +329,9 @@ TEST(FlexfecHeaderWriterTest, FinalizesHeaderWithKBit0Set) { constexpr uint8_t kUlpfecPacketMask[] = {0x11, 0x02}; Packet written_packet; written_packet.data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet.data.MutableData(); for (size_t i = 0; i < written_packet.data.size(); ++i) { - written_packet.data[i] = i; + data[i] = i; } FlexfecHeaderWriter writer; @@ -346,8 +348,9 @@ TEST(FlexfecHeaderWriterTest, FinalizesHeaderWithKBit1Set) { constexpr uint8_t kUlpfecPacketMask[] = {0x91, 0x02, 0x08, 0x44, 0x00, 0x84}; Packet written_packet; written_packet.data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet.data.MutableData(); for (size_t i = 0; i < written_packet.data.size(); ++i) { - written_packet.data[i] = i; + data[i] = i; } FlexfecHeaderWriter writer; @@ -368,8 +371,9 @@ TEST(FlexfecHeaderWriterTest, FinalizesHeaderWithKBit2Set) { constexpr uint8_t kUlpfecPacketMask[] = {0x22, 0x22, 0x44, 0x44, 0x44, 0x41}; Packet written_packet; written_packet.data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet.data.MutableData(); for (size_t i = 0; i < written_packet.data.size(); ++i) { - written_packet.data[i] = i; + data[i] = i; } FlexfecHeaderWriter writer; diff --git a/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc b/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc index 199d7860b2..7261280aef 100644 --- a/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc +++ b/modules/rtp_rtcp/source/flexfec_receiver_unittest.cc @@ -165,10 +165,10 @@ TEST_F(FlexfecReceiverTest, FailsOnUnknownMediaSsrc) { PacketizeFrame(kNumMediaPackets, 0, &media_packets); auto& media_packet = media_packets.front(); // Corrupt the SSRC. - media_packet->data[8] = 0; - media_packet->data[9] = 1; - media_packet->data[10] = 2; - media_packet->data[11] = 3; + media_packet->data.MutableData()[8] = 0; + media_packet->data.MutableData()[9] = 1; + media_packet->data.MutableData()[10] = 2; + media_packet->data.MutableData()[11] = 3; EXPECT_FALSE(receiver_.AddReceivedPacket(ParsePacket(*media_packet))); } @@ -183,10 +183,10 @@ TEST_F(FlexfecReceiverTest, FailsOnUnknownFecSsrc) { const auto& media_packet = media_packets.front(); auto fec_packet = packet_generator_.BuildFlexfecPacket(*fec_packets.front()); // Corrupt the SSRC. - fec_packet->data[8] = 4; - fec_packet->data[9] = 5; - fec_packet->data[10] = 6; - fec_packet->data[11] = 7; + fec_packet->data.MutableData()[8] = 4; + fec_packet->data.MutableData()[9] = 5; + fec_packet->data.MutableData()[10] = 6; + fec_packet->data.MutableData()[11] = 7; std::unique_ptr received_packet = receiver_.AddReceivedPacket(ParsePacket(*media_packet)); @@ -374,7 +374,8 @@ TEST_F(FlexfecReceiverTest, RecoversFrom50PercentLoss) { TEST_F(FlexfecReceiverTest, DelayedFecPacketDoesHelp) { // These values need to be updated if the underlying erasure code // implementation changes. - const size_t kNumFrames = 48; + // Delay FEC packet by maximum number of media packets tracked by receiver. + const size_t kNumFrames = 192; const size_t kNumMediaPacketsPerFrame = 1; const size_t kNumFecPackets = 1; @@ -412,14 +413,16 @@ TEST_F(FlexfecReceiverTest, DelayedFecPacketDoesHelp) { TEST_F(FlexfecReceiverTest, TooDelayedFecPacketDoesNotHelp) { // These values need to be updated if the underlying erasure code // implementation changes. - const size_t kNumFrames = 49; + // Delay FEC packet by one more than maximum number of media packets + // tracked by receiver. + const size_t kNumFrames = 193; const size_t kNumMediaPacketsPerFrame = 1; const size_t kNumFecPackets = 1; PacketList media_packets; PacketizeFrame(kNumMediaPacketsPerFrame, 0, &media_packets); PacketizeFrame(kNumMediaPacketsPerFrame, 1, &media_packets); - // Protect two first frames. + // Protect first two frames. std::list fec_packets = EncodeFec(media_packets, kNumFecPackets); for (size_t i = 2; i < kNumFrames; ++i) { PacketizeFrame(kNumMediaPacketsPerFrame, i, &media_packets); @@ -646,4 +649,58 @@ TEST_F(FlexfecReceiverTest, CalculatesNumberOfPackets) { EXPECT_EQ(1U, packet_counter.num_recovered_packets); } +TEST_F(FlexfecReceiverTest, DoesNotDecodeWrappedMediaSequenceUsingOldFec) { + const size_t kFirstFrameNumMediaPackets = 2; + const size_t kFirstFrameNumFecPackets = 1; + + PacketList media_packets; + PacketizeFrame(kFirstFrameNumMediaPackets, 0, &media_packets); + + // Protect first frame (sequences 0 and 1) with 1 FEC packet. + std::list fec_packets = + EncodeFec(media_packets, kFirstFrameNumFecPackets); + + // Generate enough media packets to simulate media sequence number wraparound. + // Use no FEC for these frames to make sure old FEC is not purged due to age. + const size_t kNumFramesSequenceWrapAround = + std::numeric_limits::max(); + const size_t kNumMediaPacketsPerFrame = 1; + + for (size_t i = 1; i <= kNumFramesSequenceWrapAround; ++i) { + PacketizeFrame(kNumMediaPacketsPerFrame, i, &media_packets); + } + + // Receive first (|kFirstFrameNumMediaPackets| + 192) media packets. + // Simulate an old FEC packet by separating it from its encoded media + // packets by at least 192 packets. + auto media_it = media_packets.begin(); + for (size_t i = 0; i < (kFirstFrameNumMediaPackets + 192); i++) { + if (i == 1) { + // Drop the second packet of the first frame. + media_it++; + } else { + receiver_.OnRtpPacket(ParsePacket(**media_it++)); + } + } + + // Receive FEC packet. Although a protected packet was dropped, + // expect no recovery callback since it is delayed from first frame + // by more than 192 packets. + auto fec_it = fec_packets.begin(); + std::unique_ptr fec_packet_with_rtp_header = + packet_generator_.BuildFlexfecPacket(**fec_it); + receiver_.OnRtpPacket(ParsePacket(*fec_packet_with_rtp_header)); + + // Receive remaining media packets. + // NOTE: Because we sent enough to simulate wrap around, sequence 0 is + // received again, but is a different packet than the original first + // packet of first frame. + while (media_it != media_packets.end()) { + receiver_.OnRtpPacket(ParsePacket(**media_it++)); + } + + // Do not expect a recovery callback, the FEC packet is old + // and should not decode wrapped around media sequences. +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/flexfec_sender.cc b/modules/rtp_rtcp/source/flexfec_sender.cc index 16a6f2603c..f1fe71d198 100644 --- a/modules/rtp_rtcp/source/flexfec_sender.cc +++ b/modules/rtp_rtcp/source/flexfec_sender.cc @@ -176,7 +176,7 @@ std::vector> FlexfecSender::GetFecPackets() { last_generated_packet_ms_ = now_ms; } - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); fec_bitrate_.Update(total_fec_data_bytes, now_ms); return fec_packets_to_send; @@ -188,7 +188,7 @@ size_t FlexfecSender::MaxPacketOverhead() const { } DataRate FlexfecSender::CurrentFecRate() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return DataRate::BitsPerSec( fec_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0)); } diff --git a/modules/rtp_rtcp/source/forward_error_correction.cc b/modules/rtp_rtcp/source/forward_error_correction.cc index 1812fbf037..da8025d3db 100644 --- a/modules/rtp_rtcp/source/forward_error_correction.cc +++ b/modules/rtp_rtcp/source/forward_error_correction.cc @@ -31,6 +31,8 @@ namespace webrtc { namespace { // Transport header size in bytes. Assume UDP/IPv4 as a reasonable minimum. constexpr size_t kTransportOverhead = 28; + +constexpr uint16_t kOldSequenceThreshold = 0x3fff; } // namespace ForwardErrorCorrection::Packet::Packet() : data(0), ref_count_(0) {} @@ -151,7 +153,7 @@ int ForwardErrorCorrection::EncodeFec(const PacketList& media_packets, } for (int i = 0; i < num_fec_packets; ++i) { generated_fec_packets_[i].data.EnsureCapacity(IP_PACKET_SIZE); - memset(generated_fec_packets_[i].data.data(), 0, IP_PACKET_SIZE); + memset(generated_fec_packets_[i].data.MutableData(), 0, IP_PACKET_SIZE); // Use this as a marker for untouched packets. generated_fec_packets_[i].data.SetSize(0); fec_packets->push_back(&generated_fec_packets_[i]); @@ -231,7 +233,7 @@ void ForwardErrorCorrection::GenerateFecPayloads( fec_packet->data.SetSize(fec_packet_length); } if (first_protected_packet) { - uint8_t* data = fec_packet->data.data(); + uint8_t* data = fec_packet->data.MutableData(); // Write P, X, CC, M, and PT recovery fields. // Note that bits 0, 1, and 16 are overwritten in FinalizeFecHeaders. memcpy(&data[0], &media_packet_data[0], 2); @@ -508,9 +510,6 @@ void ForwardErrorCorrection::InsertPacket( // This is important for keeping |received_fec_packets_| sorted, and may // also reduce the possibility of incorrect decoding due to sequence number // wrap-around. - // TODO(marpan/holmer): We should be able to improve detection/discarding of - // old FEC packets based on timestamp information or better sequence number - // thresholding (e.g., to distinguish between wrap-around and reordering). if (!received_fec_packets_.empty() && received_packet.ssrc == received_fec_packets_.front()->ssrc) { // It only makes sense to detect wrap-around when |received_packet| @@ -521,7 +520,7 @@ void ForwardErrorCorrection::InsertPacket( auto it = received_fec_packets_.begin(); while (it != received_fec_packets_.end()) { uint16_t seq_num_diff = MinDiff(received_packet.seq_num, (*it)->seq_num); - if (seq_num_diff > 0x3fff) { + if (seq_num_diff > kOldSequenceThreshold) { it = received_fec_packets_.erase(it); } else { // No need to keep iterating, since |received_fec_packets_| is sorted. @@ -567,11 +566,11 @@ bool ForwardErrorCorrection::StartPacketRecovery( // Copy bytes corresponding to minimum RTP header size. // Note that the sequence number and SSRC fields will be overwritten // at the end of packet recovery. - memcpy(recovered_packet->pkt->data.data(), fec_packet.pkt->data.cdata(), - kRtpHeaderSize); + memcpy(recovered_packet->pkt->data.MutableData(), + fec_packet.pkt->data.cdata(), kRtpHeaderSize); // Copy remaining FEC payload. if (fec_packet.protection_length > 0) { - memcpy(recovered_packet->pkt->data.data() + kRtpHeaderSize, + memcpy(recovered_packet->pkt->data.MutableData() + kRtpHeaderSize, fec_packet.pkt->data.cdata() + fec_packet.fec_header_size, fec_packet.protection_length); } @@ -581,7 +580,7 @@ bool ForwardErrorCorrection::StartPacketRecovery( bool ForwardErrorCorrection::FinishPacketRecovery( const ReceivedFecPacket& fec_packet, RecoveredPacket* recovered_packet) { - uint8_t* data = recovered_packet->pkt->data.data(); + uint8_t* data = recovered_packet->pkt->data.MutableData(); // Set the RTP version to 2. data[0] |= 0x80; // Set the 1st bit. data[0] &= 0xbf; // Clear the 2nd bit. @@ -603,7 +602,7 @@ bool ForwardErrorCorrection::FinishPacketRecovery( } void ForwardErrorCorrection::XorHeaders(const Packet& src, Packet* dst) { - uint8_t* dst_data = dst->data.data(); + uint8_t* dst_data = dst->data.MutableData(); const uint8_t* src_data = src.data.cdata(); // XOR the first 2 bytes of the header: V, P, X, CC, M, PT fields. dst_data[0] ^= src_data[0]; @@ -635,7 +634,7 @@ void ForwardErrorCorrection::XorPayloads(const Packet& src, if (dst_offset + payload_length > dst->data.size()) { dst->data.SetSize(dst_offset + payload_length); } - uint8_t* dst_data = dst->data.data(); + uint8_t* dst_data = dst->data.MutableData(); const uint8_t* src_data = src.data.cdata(); for (size_t i = 0; i < payload_length; ++i) { dst_data[dst_offset + i] ^= src_data[kRtpHeaderSize + i]; @@ -698,9 +697,10 @@ void ForwardErrorCorrection::AttemptRecovery( // this may allow additional packets to be recovered. // Restart for first FEC packet. fec_packet_it = received_fec_packets_.begin(); - } else if (packets_missing == 0) { - // Either all protected packets arrived or have been recovered. We can - // discard this FEC packet. + } else if (packets_missing == 0 || + IsOldFecPacket(**fec_packet_it, recovered_packets)) { + // Either all protected packets arrived or have been recovered, or the FEC + // packet is old. We can discard this FEC packet. fec_packet_it = received_fec_packets_.erase(fec_packet_it); } else { fec_packet_it++; @@ -731,11 +731,28 @@ void ForwardErrorCorrection::DiscardOldRecoveredPackets( RTC_DCHECK_LE(recovered_packets->size(), max_media_packets); } -uint16_t ForwardErrorCorrection::ParseSequenceNumber(uint8_t* packet) { +bool ForwardErrorCorrection::IsOldFecPacket( + const ReceivedFecPacket& fec_packet, + const RecoveredPacketList* recovered_packets) { + if (recovered_packets->empty()) { + return false; + } + + const uint16_t back_recovered_seq_num = recovered_packets->back()->seq_num; + const uint16_t last_protected_seq_num = + fec_packet.protected_packets.back()->seq_num; + + // FEC packet is old if its last protected sequence number is much + // older than the latest protected sequence number received. + return (MinDiff(back_recovered_seq_num, last_protected_seq_num) > + kOldSequenceThreshold); +} + +uint16_t ForwardErrorCorrection::ParseSequenceNumber(const uint8_t* packet) { return (packet[2] << 8) + packet[3]; } -uint32_t ForwardErrorCorrection::ParseSsrc(uint8_t* packet) { +uint32_t ForwardErrorCorrection::ParseSsrc(const uint8_t* packet) { return (packet[8] << 24) + (packet[9] << 16) + (packet[10] << 8) + packet[11]; } diff --git a/modules/rtp_rtcp/source/forward_error_correction.h b/modules/rtp_rtcp/source/forward_error_correction.h index 566ce7428a..b97693d01f 100644 --- a/modules/rtp_rtcp/source/forward_error_correction.h +++ b/modules/rtp_rtcp/source/forward_error_correction.h @@ -235,8 +235,8 @@ class ForwardErrorCorrection { // TODO(brandtr): Remove these functions when the Packet classes // have been refactored. - static uint16_t ParseSequenceNumber(uint8_t* packet); - static uint32_t ParseSsrc(uint8_t* packet); + static uint16_t ParseSequenceNumber(const uint8_t* packet); + static uint32_t ParseSsrc(const uint8_t* packet); protected: ForwardErrorCorrection(std::unique_ptr fec_header_reader, @@ -330,6 +330,11 @@ class ForwardErrorCorrection { // for recovering lost packets. void DiscardOldRecoveredPackets(RecoveredPacketList* recovered_packets); + // Checks if the FEC packet is old enough and no longer relevant for + // recovering lost media packets. + bool IsOldFecPacket(const ReceivedFecPacket& fec_packet, + const RecoveredPacketList* recovered_packets); + // These SSRCs are only used by the decoder. const uint32_t ssrc_; const uint32_t protected_media_ssrc_; diff --git a/modules/rtp_rtcp/source/nack_rtx_unittest.cc b/modules/rtp_rtcp/source/nack_rtx_unittest.cc index 55e1e44ebe..fc035047b0 100644 --- a/modules/rtp_rtcp/source/nack_rtx_unittest.cc +++ b/modules/rtp_rtcp/source/nack_rtx_unittest.cc @@ -19,9 +19,9 @@ #include "call/rtp_stream_receiver_controller.h" #include "call/rtx_receive_stream.h" #include "modules/rtp_rtcp/include/receive_statistics.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/rtp_sender_video.h" #include "rtc_base/rate_limiter.h" #include "test/gtest.h" @@ -63,7 +63,9 @@ class RtxLoopBackTransport : public webrtc::Transport { count_rtx_ssrc_(0), module_(NULL) {} - void SetSendModule(RtpRtcp* rtpRtcpModule) { module_ = rtpRtcpModule; } + void SetSendModule(RtpRtcpInterface* rtpRtcpModule) { + module_ = rtpRtcpModule; + } void DropEveryNthPacket(int n) { packet_loss_ = n; } @@ -109,7 +111,7 @@ class RtxLoopBackTransport : public webrtc::Transport { int consecutive_drop_end_; uint32_t rtx_ssrc_; int count_rtx_ssrc_; - RtpRtcp* module_; + RtpRtcpInterface* module_; RtpStreamReceiverController stream_receiver_controller_; std::set expected_sequence_numbers_; }; @@ -125,7 +127,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test { ~RtpRtcpRtxNackTest() override {} void SetUp() override { - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.audio = false; configuration.clock = &fake_clock; receive_statistics_ = ReceiveStatistics::Create(&fake_clock); @@ -134,7 +136,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test { configuration.retransmission_rate_limiter = &retransmission_rate_limiter_; configuration.local_media_ssrc = kTestSsrc; configuration.rtx_send_ssrc = kTestRtxSsrc; - rtp_rtcp_module_ = RtpRtcp::Create(configuration); + rtp_rtcp_module_ = ModuleRtpRtcpImpl2::Create(configuration); FieldTrialBasedConfig field_trials; RTPSenderVideo::Config video_config; video_config.clock = &fake_clock; @@ -209,14 +211,13 @@ class RtpRtcpRtxNackTest : public ::testing::Test { video_header.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp, - timestamp / 90, payload_data, nullptr, video_header, 0)); + timestamp / 90, payload_data, video_header, 0)); // Min required delay until retransmit = 5 + RTT ms (RTT = 0). fake_clock.AdvanceTimeMilliseconds(5); int length = BuildNackList(nack_list); if (length > 0) rtp_rtcp_module_->SendNACK(nack_list, length); fake_clock.AdvanceTimeMilliseconds(28); // 33ms - 5ms delay. - rtp_rtcp_module_->Process(); // Prepare next frame. timestamp += 3000; } @@ -224,7 +225,7 @@ class RtpRtcpRtxNackTest : public ::testing::Test { } std::unique_ptr receive_statistics_; - std::unique_ptr rtp_rtcp_module_; + std::unique_ptr rtp_rtcp_module_; std::unique_ptr rtp_sender_video_; RtxLoopBackTransport transport_; const std::map rtx_associated_payload_types_ = { @@ -259,11 +260,10 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) { video_header.frame_type = VideoFrameType::kVideoFrameDelta; EXPECT_TRUE(rtp_sender_video_->SendVideo( kPayloadType, VideoCodecType::kVideoCodecGeneric, timestamp, - timestamp / 90, payload_data, nullptr, video_header, 0)); + timestamp / 90, payload_data, video_header, 0)); // Prepare next frame. timestamp += 3000; fake_clock.AdvanceTimeMilliseconds(33); - rtp_rtcp_module_->Process(); } EXPECT_FALSE(transport_.expected_sequence_numbers_.empty()); EXPECT_FALSE(media_stream_.sequence_numbers_.empty()); diff --git a/modules/rtp_rtcp/source/packet_sequencer.cc b/modules/rtp_rtcp/source/packet_sequencer.cc new file mode 100644 index 0000000000..03ea9b8154 --- /dev/null +++ b/modules/rtp_rtcp/source/packet_sequencer.cc @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/packet_sequencer.h" + +#include "rtc_base/checks.h" + +namespace webrtc { + +namespace { +// RED header is first byte of payload, if present. +constexpr size_t kRedForFecHeaderLength = 1; + +// Timestamps use a 90kHz clock. +constexpr uint32_t kTimestampTicksPerMs = 90; +} // namespace + +PacketSequencer::PacketSequencer(uint32_t media_ssrc, + uint32_t rtx_ssrc, + bool require_marker_before_media_padding, + Clock* clock) + : media_ssrc_(media_ssrc), + rtx_ssrc_(rtx_ssrc), + require_marker_before_media_padding_(require_marker_before_media_padding), + clock_(clock), + media_sequence_number_(0), + rtx_sequence_number_(0), + last_payload_type_(-1), + last_rtp_timestamp_(0), + last_capture_time_ms_(0), + last_timestamp_time_ms_(0), + last_packet_marker_bit_(false) {} + +bool PacketSequencer::Sequence(RtpPacketToSend& packet) { + if (packet.packet_type() == RtpPacketMediaType::kPadding && + !PopulatePaddingFields(packet)) { + // This padding packet can't be sent with current state, return without + // updating the sequence number. + return false; + } + + if (packet.Ssrc() == media_ssrc_) { + packet.SetSequenceNumber(media_sequence_number_++); + if (packet.packet_type() != RtpPacketMediaType::kPadding) { + UpdateLastPacketState(packet); + } + return true; + } + + RTC_DCHECK_EQ(packet.Ssrc(), rtx_ssrc_); + packet.SetSequenceNumber(rtx_sequence_number_++); + return true; +} + +void PacketSequencer::SetRtpState(const RtpState& state) { + media_sequence_number_ = state.sequence_number; + last_rtp_timestamp_ = state.timestamp; + last_capture_time_ms_ = state.capture_time_ms; + last_timestamp_time_ms_ = state.last_timestamp_time_ms; +} + +void PacketSequencer::PupulateRtpState(RtpState& state) const { + state.sequence_number = media_sequence_number_; + state.timestamp = last_rtp_timestamp_; + state.capture_time_ms = last_capture_time_ms_; + state.last_timestamp_time_ms = last_timestamp_time_ms_; +} + +void PacketSequencer::UpdateLastPacketState(const RtpPacketToSend& packet) { + // Remember marker bit to determine if padding can be inserted with + // sequence number following |packet|. + last_packet_marker_bit_ = packet.Marker(); + // Remember media payload type to use in the padding packet if rtx is + // disabled. + if (packet.is_red()) { + RTC_DCHECK_GE(packet.payload_size(), kRedForFecHeaderLength); + last_payload_type_ = packet.PayloadBuffer()[0]; + } else { + last_payload_type_ = packet.PayloadType(); + } + // Save timestamps to generate timestamp field and extensions for the padding. + last_rtp_timestamp_ = packet.Timestamp(); + last_timestamp_time_ms_ = clock_->TimeInMilliseconds(); + last_capture_time_ms_ = packet.capture_time_ms(); +} + +bool PacketSequencer::PopulatePaddingFields(RtpPacketToSend& packet) { + if (packet.Ssrc() == media_ssrc_) { + if (last_payload_type_ == -1) { + return false; + } + + // Without RTX we can't send padding in the middle of frames. + // For audio marker bits doesn't mark the end of a frame and frames + // are usually a single packet, so for now we don't apply this rule + // for audio. + if (require_marker_before_media_padding_ && !last_packet_marker_bit_) { + return false; + } + + packet.SetTimestamp(last_rtp_timestamp_); + packet.set_capture_time_ms(last_capture_time_ms_); + packet.SetPayloadType(last_payload_type_); + return true; + } + + RTC_DCHECK_EQ(packet.Ssrc(), rtx_ssrc_); + if (packet.payload_size() > 0) { + // This is payload padding packet, don't update timestamp fields. + return true; + } + + packet.SetTimestamp(last_rtp_timestamp_); + packet.set_capture_time_ms(last_capture_time_ms_); + + // Only change the timestamp of padding packets sent over RTX. + // Padding only packets over RTP has to be sent as part of a media + // frame (and therefore the same timestamp). + int64_t now_ms = clock_->TimeInMilliseconds(); + if (last_timestamp_time_ms_ > 0) { + packet.SetTimestamp(packet.Timestamp() + + (now_ms - last_timestamp_time_ms_) * + kTimestampTicksPerMs); + if (packet.capture_time_ms() > 0) { + packet.set_capture_time_ms(packet.capture_time_ms() + + (now_ms - last_timestamp_time_ms_)); + } + } + + return true; +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/packet_sequencer.h b/modules/rtp_rtcp/source/packet_sequencer.h new file mode 100644 index 0000000000..67255164f3 --- /dev/null +++ b/modules/rtp_rtcp/source/packet_sequencer.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_PACKET_SEQUENCER_H_ +#define MODULES_RTP_RTCP_SOURCE_PACKET_SEQUENCER_H_ + +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "system_wrappers/include/clock.h" + +namespace webrtc { + +// Helper class used to assign RTP sequence numbers and populate some fields for +// padding packets based on the last sequenced packets. +// This class is not thread safe, the caller must provide that. +class PacketSequencer { + public: + // If |require_marker_before_media_padding_| is true, padding packets on the + // media ssrc is not allowed unless the last sequenced media packet had the + // marker bit set (i.e. don't insert padding packets between the first and + // last packets of a video frame). + PacketSequencer(uint32_t media_ssrc, + uint32_t rtx_ssrc, + bool require_marker_before_media_padding, + Clock* clock); + + // Assigns sequence number, and in the case of non-RTX padding also timestamps + // and payload type. + // Returns false if sequencing failed, which it can do for instance if the + // packet to squence is padding on the media ssrc, but the media is mid frame + // (the last marker bit is false). + bool Sequence(RtpPacketToSend& packet); + + void set_media_sequence_number(uint16_t sequence_number) { + media_sequence_number_ = sequence_number; + } + void set_rtx_sequence_number(uint16_t sequence_number) { + rtx_sequence_number_ = sequence_number; + } + + void SetRtpState(const RtpState& state); + void PupulateRtpState(RtpState& state) const; + + uint16_t media_sequence_number() const { return media_sequence_number_; } + uint16_t rtx_sequence_number() const { return rtx_sequence_number_; } + + private: + void UpdateLastPacketState(const RtpPacketToSend& packet); + bool PopulatePaddingFields(RtpPacketToSend& packet); + + const uint32_t media_ssrc_; + const uint32_t rtx_ssrc_; + const bool require_marker_before_media_padding_; + Clock* const clock_; + + uint16_t media_sequence_number_; + uint16_t rtx_sequence_number_; + + int8_t last_payload_type_; + uint32_t last_rtp_timestamp_; + int64_t last_capture_time_ms_; + int64_t last_timestamp_time_ms_; + bool last_packet_marker_bit_; +}; + +} // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_PACKET_SEQUENCER_H_ diff --git a/modules/rtp_rtcp/source/receive_statistics_impl.cc b/modules/rtp_rtcp/source/receive_statistics_impl.cc index 0c47e08b1e..f5c3eafbf3 100644 --- a/modules/rtp_rtcp/source/receive_statistics_impl.cc +++ b/modules/rtp_rtcp/source/receive_statistics_impl.cc @@ -13,9 +13,11 @@ #include #include #include +#include #include #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" +#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" #include "modules/rtp_rtcp/source/time_util.h" @@ -23,9 +25,14 @@ #include "system_wrappers/include/clock.h" namespace webrtc { +namespace { +constexpr int64_t kStatisticsTimeoutMs = 8000; +constexpr int64_t kStatisticsProcessIntervalMs = 1000; -const int64_t kStatisticsTimeoutMs = 8000; -const int64_t kStatisticsProcessIntervalMs = 1000; +// Number of seconds since 1900 January 1 00:00 GMT (see +// https://tools.ietf.org/html/rfc868). +constexpr int64_t kNtpJan1970Millisecs = 2'208'988'800'000; +} // namespace StreamStatistician::~StreamStatistician() {} @@ -34,10 +41,14 @@ StreamStatisticianImpl::StreamStatisticianImpl(uint32_t ssrc, int max_reordering_threshold) : ssrc_(ssrc), clock_(clock), + delta_internal_unix_epoch_ms_(clock_->CurrentNtpInMilliseconds() - + clock_->TimeInMilliseconds() - + kNtpJan1970Millisecs), incoming_bitrate_(kStatisticsProcessIntervalMs, RateStatistics::kBpsScale), max_reordering_threshold_(max_reordering_threshold), enable_retransmit_detection_(false), + cumulative_loss_is_capped_(false), jitter_q4_(0), cumulative_loss_(0), cumulative_loss_rtcp_offset_(0), @@ -100,7 +111,6 @@ bool StreamStatisticianImpl::UpdateOutOfOrder(const RtpPacketReceived& packet, } void StreamStatisticianImpl::UpdateCounters(const RtpPacketReceived& packet) { - rtc::CritScope cs(&stream_lock_); RTC_DCHECK_EQ(ssrc_, packet.Ssrc()); int64_t now_ms = clock_->TimeInMilliseconds(); @@ -159,47 +169,42 @@ void StreamStatisticianImpl::UpdateJitter(const RtpPacketReceived& packet, void StreamStatisticianImpl::SetMaxReorderingThreshold( int max_reordering_threshold) { - rtc::CritScope cs(&stream_lock_); max_reordering_threshold_ = max_reordering_threshold; } void StreamStatisticianImpl::EnableRetransmitDetection(bool enable) { - rtc::CritScope cs(&stream_lock_); enable_retransmit_detection_ = enable; } RtpReceiveStats StreamStatisticianImpl::GetStats() const { - rtc::CritScope cs(&stream_lock_); RtpReceiveStats stats; stats.packets_lost = cumulative_loss_; // TODO(nisse): Can we return a float instead? // Note: internal jitter value is in Q4 and needs to be scaled by 1/16. stats.jitter = jitter_q4_ >> 4; - stats.last_packet_received_timestamp_ms = - receive_counters_.last_packet_received_timestamp_ms; + if (receive_counters_.last_packet_received_timestamp_ms.has_value()) { + stats.last_packet_received_timestamp_ms = + *receive_counters_.last_packet_received_timestamp_ms + + delta_internal_unix_epoch_ms_; + } stats.packet_counter = receive_counters_.transmitted; return stats; } -bool StreamStatisticianImpl::GetActiveStatisticsAndReset( - RtcpStatistics* statistics) { - rtc::CritScope cs(&stream_lock_); - if (clock_->TimeInMilliseconds() - last_receive_time_ms_ >= - kStatisticsTimeoutMs) { +void StreamStatisticianImpl::MaybeAppendReportBlockAndReset( + std::vector& report_blocks) { + int64_t now_ms = clock_->TimeInMilliseconds(); + if (now_ms - last_receive_time_ms_ >= kStatisticsTimeoutMs) { // Not active. - return false; + return; } if (!ReceivedRtpPacket()) { - return false; + return; } - *statistics = CalculateRtcpStatistics(); - - return true; -} - -RtcpStatistics StreamStatisticianImpl::CalculateRtcpStatistics() { - RtcpStatistics stats; + report_blocks.emplace_back(); + rtcp::ReportBlock& stats = report_blocks.back(); + stats.SetMediaSsrc(ssrc_); // Calculate fraction lost. int64_t exp_since_last = received_seq_max_ - last_report_seq_max_; RTC_DCHECK_GE(exp_since_last, 0); @@ -207,41 +212,42 @@ RtcpStatistics StreamStatisticianImpl::CalculateRtcpStatistics() { int32_t lost_since_last = cumulative_loss_ - last_report_cumulative_loss_; if (exp_since_last > 0 && lost_since_last > 0) { // Scale 0 to 255, where 255 is 100% loss. - stats.fraction_lost = - static_cast(255 * lost_since_last / exp_since_last); - } else { - stats.fraction_lost = 0; + stats.SetFractionLost(255 * lost_since_last / exp_since_last); } - // TODO(danilchap): Ensure |stats.packets_lost| is clamped to fit in a signed - // 24-bit value. - stats.packets_lost = cumulative_loss_ + cumulative_loss_rtcp_offset_; - if (stats.packets_lost < 0) { + int packets_lost = cumulative_loss_ + cumulative_loss_rtcp_offset_; + if (packets_lost < 0) { // Clamp to zero. Work around to accomodate for senders that misbehave with // negative cumulative loss. - stats.packets_lost = 0; + packets_lost = 0; cumulative_loss_rtcp_offset_ = -cumulative_loss_; } - stats.extended_highest_sequence_number = - static_cast(received_seq_max_); + if (packets_lost > 0x7fffff) { + // Packets lost is a 24 bit signed field, and thus should be clamped, as + // described in https://datatracker.ietf.org/doc/html/rfc3550#appendix-A.3 + if (!cumulative_loss_is_capped_) { + cumulative_loss_is_capped_ = true; + RTC_LOG(LS_WARNING) << "Cumulative loss reached maximum value for ssrc " + << ssrc_; + } + packets_lost = 0x7fffff; + } + stats.SetCumulativeLost(packets_lost); + stats.SetExtHighestSeqNum(received_seq_max_); // Note: internal jitter value is in Q4 and needs to be scaled by 1/16. - stats.jitter = jitter_q4_ >> 4; + stats.SetJitter(jitter_q4_ >> 4); // Only for report blocks in RTCP SR and RR. last_report_cumulative_loss_ = cumulative_loss_; last_report_seq_max_ = received_seq_max_; - BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "cumulative_loss_pkts", - clock_->TimeInMilliseconds(), + BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "cumulative_loss_pkts", now_ms, cumulative_loss_, ssrc_); - BWE_TEST_LOGGING_PLOT_WITH_SSRC( - 1, "received_seq_max_pkts", clock_->TimeInMilliseconds(), - (received_seq_max_ - received_seq_first_), ssrc_); - - return stats; + BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "received_seq_max_pkts", now_ms, + (received_seq_max_ - received_seq_first_), + ssrc_); } absl::optional StreamStatisticianImpl::GetFractionLostInPercent() const { - rtc::CritScope cs(&stream_lock_); if (!ReceivedRtpPacket()) { return absl::nullopt; } @@ -257,12 +263,10 @@ absl::optional StreamStatisticianImpl::GetFractionLostInPercent() const { StreamDataCounters StreamStatisticianImpl::GetReceiveStreamDataCounters() const { - rtc::CritScope cs(&stream_lock_); return receive_counters_; } uint32_t StreamStatisticianImpl::BitrateReceived() const { - rtc::CritScope cs(&stream_lock_); return incoming_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0); } @@ -295,21 +299,33 @@ bool StreamStatisticianImpl::IsRetransmitOfOldPacket( } std::unique_ptr ReceiveStatistics::Create(Clock* clock) { - return std::make_unique(clock); + return std::make_unique( + clock, [](uint32_t ssrc, Clock* clock, int max_reordering_threshold) { + return std::make_unique( + ssrc, clock, max_reordering_threshold); + }); +} + +std::unique_ptr ReceiveStatistics::CreateThreadCompatible( + Clock* clock) { + return std::make_unique( + clock, [](uint32_t ssrc, Clock* clock, int max_reordering_threshold) { + return std::make_unique( + ssrc, clock, max_reordering_threshold); + }); } -ReceiveStatisticsImpl::ReceiveStatisticsImpl(Clock* clock) +ReceiveStatisticsImpl::ReceiveStatisticsImpl( + Clock* clock, + std::function( + uint32_t ssrc, + Clock* clock, + int max_reordering_threshold)> stream_statistician_factory) : clock_(clock), - last_returned_ssrc_(0), + stream_statistician_factory_(std::move(stream_statistician_factory)), + last_returned_ssrc_idx_(0), max_reordering_threshold_(kDefaultMaxReorderingThreshold) {} -ReceiveStatisticsImpl::~ReceiveStatisticsImpl() { - while (!statisticians_.empty()) { - delete statisticians_.begin()->second; - statisticians_.erase(statisticians_.begin()); - } -} - void ReceiveStatisticsImpl::OnRtpPacket(const RtpPacketReceived& packet) { // StreamStatisticianImpl instance is created once and only destroyed when // this whole ReceiveStatisticsImpl is destroyed. StreamStatisticianImpl has @@ -318,34 +334,29 @@ void ReceiveStatisticsImpl::OnRtpPacket(const RtpPacketReceived& packet) { GetOrCreateStatistician(packet.Ssrc())->UpdateCounters(packet); } -StreamStatisticianImpl* ReceiveStatisticsImpl::GetStatistician( +StreamStatistician* ReceiveStatisticsImpl::GetStatistician( uint32_t ssrc) const { - rtc::CritScope cs(&receive_statistics_lock_); const auto& it = statisticians_.find(ssrc); if (it == statisticians_.end()) - return NULL; - return it->second; + return nullptr; + return it->second.get(); } -StreamStatisticianImpl* ReceiveStatisticsImpl::GetOrCreateStatistician( +StreamStatisticianImplInterface* ReceiveStatisticsImpl::GetOrCreateStatistician( uint32_t ssrc) { - rtc::CritScope cs(&receive_statistics_lock_); - StreamStatisticianImpl*& impl = statisticians_[ssrc]; + std::unique_ptr& impl = statisticians_[ssrc]; if (impl == nullptr) { // new element - impl = new StreamStatisticianImpl(ssrc, clock_, max_reordering_threshold_); + impl = + stream_statistician_factory_(ssrc, clock_, max_reordering_threshold_); + all_ssrcs_.push_back(ssrc); } - return impl; + return impl.get(); } void ReceiveStatisticsImpl::SetMaxReorderingThreshold( int max_reordering_threshold) { - std::map statisticians; - { - rtc::CritScope cs(&receive_statistics_lock_); - max_reordering_threshold_ = max_reordering_threshold; - statisticians = statisticians_; - } - for (auto& statistician : statisticians) { + max_reordering_threshold_ = max_reordering_threshold; + for (auto& statistician : statisticians_) { statistician.second->SetMaxReorderingThreshold(max_reordering_threshold); } } @@ -364,42 +375,18 @@ void ReceiveStatisticsImpl::EnableRetransmitDetection(uint32_t ssrc, std::vector ReceiveStatisticsImpl::RtcpReportBlocks( size_t max_blocks) { - std::map statisticians; - { - rtc::CritScope cs(&receive_statistics_lock_); - statisticians = statisticians_; - } std::vector result; - result.reserve(std::min(max_blocks, statisticians.size())); - auto add_report_block = [&result](uint32_t media_ssrc, - StreamStatisticianImpl* statistician) { - // Do we have receive statistics to send? - RtcpStatistics stats; - if (!statistician->GetActiveStatisticsAndReset(&stats)) - return; - result.emplace_back(); - rtcp::ReportBlock& block = result.back(); - block.SetMediaSsrc(media_ssrc); - block.SetFractionLost(stats.fraction_lost); - if (!block.SetCumulativeLost(stats.packets_lost)) { - RTC_LOG(LS_WARNING) << "Cumulative lost is oversized."; - result.pop_back(); - return; - } - block.SetExtHighestSeqNum(stats.extended_highest_sequence_number); - block.SetJitter(stats.jitter); - }; - - const auto start_it = statisticians.upper_bound(last_returned_ssrc_); - for (auto it = start_it; - result.size() < max_blocks && it != statisticians.end(); ++it) - add_report_block(it->first, it->second); - for (auto it = statisticians.begin(); - result.size() < max_blocks && it != start_it; ++it) - add_report_block(it->first, it->second); - - if (!result.empty()) - last_returned_ssrc_ = result.back().source_ssrc(); + result.reserve(std::min(max_blocks, all_ssrcs_.size())); + + size_t ssrc_idx = 0; + for (size_t i = 0; i < all_ssrcs_.size() && result.size() < max_blocks; ++i) { + ssrc_idx = (last_returned_ssrc_idx_ + i + 1) % all_ssrcs_.size(); + const uint32_t media_ssrc = all_ssrcs_[ssrc_idx]; + auto statistician_it = statisticians_.find(media_ssrc); + RTC_DCHECK(statistician_it != statisticians_.end()); + statistician_it->second->MaybeAppendReportBlockAndReset(result); + } + last_returned_ssrc_idx_ = ssrc_idx; return result; } diff --git a/modules/rtp_rtcp/source/receive_statistics_impl.h b/modules/rtp_rtcp/source/receive_statistics_impl.h index e352ae8787..1a70fe4ad7 100644 --- a/modules/rtp_rtcp/source/receive_statistics_impl.h +++ b/modules/rtp_rtcp/source/receive_statistics_impl.h @@ -12,98 +12,162 @@ #define MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_ #include -#include +#include +#include +#include #include #include "absl/types/optional.h" #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/include/receive_statistics.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtcp_packet/report_block.h" +#include "rtc_base/containers/flat_map.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { -class StreamStatisticianImpl : public StreamStatistician { +// Extends StreamStatistician with methods needed by the implementation. +class StreamStatisticianImplInterface : public StreamStatistician { + public: + virtual ~StreamStatisticianImplInterface() = default; + virtual void MaybeAppendReportBlockAndReset( + std::vector& report_blocks) = 0; + virtual void SetMaxReorderingThreshold(int max_reordering_threshold) = 0; + virtual void EnableRetransmitDetection(bool enable) = 0; + virtual void UpdateCounters(const RtpPacketReceived& packet) = 0; +}; + +// Thread-compatible implementation of StreamStatisticianImplInterface. +class StreamStatisticianImpl : public StreamStatisticianImplInterface { public: StreamStatisticianImpl(uint32_t ssrc, Clock* clock, int max_reordering_threshold); ~StreamStatisticianImpl() override; + // Implements StreamStatistician RtpReceiveStats GetStats() const override; - - bool GetActiveStatisticsAndReset(RtcpStatistics* statistics); absl::optional GetFractionLostInPercent() const override; StreamDataCounters GetReceiveStreamDataCounters() const override; uint32_t BitrateReceived() const override; - void SetMaxReorderingThreshold(int max_reordering_threshold); - void EnableRetransmitDetection(bool enable); - + // Implements StreamStatisticianImplInterface + void MaybeAppendReportBlockAndReset( + std::vector& report_blocks) override; + void SetMaxReorderingThreshold(int max_reordering_threshold) override; + void EnableRetransmitDetection(bool enable) override; // Updates StreamStatistician for incoming packets. - void UpdateCounters(const RtpPacketReceived& packet); + void UpdateCounters(const RtpPacketReceived& packet) override; private: bool IsRetransmitOfOldPacket(const RtpPacketReceived& packet, - int64_t now_ms) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_); - RtcpStatistics CalculateRtcpStatistics() - RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_); - void UpdateJitter(const RtpPacketReceived& packet, int64_t receive_time_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_); + int64_t now_ms) const; + void UpdateJitter(const RtpPacketReceived& packet, int64_t receive_time_ms); // Updates StreamStatistician for out of order packets. // Returns true if packet considered to be out of order. bool UpdateOutOfOrder(const RtpPacketReceived& packet, int64_t sequence_number, - int64_t now_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_); + int64_t now_ms); // Checks if this StreamStatistician received any rtp packets. - bool ReceivedRtpPacket() const RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_) { - return received_seq_first_ >= 0; - } + bool ReceivedRtpPacket() const { return received_seq_first_ >= 0; } const uint32_t ssrc_; Clock* const clock_; - rtc::CriticalSection stream_lock_; - RateStatistics incoming_bitrate_ RTC_GUARDED_BY(&stream_lock_); + // Delta used to map internal timestamps to Unix epoch ones. + const int64_t delta_internal_unix_epoch_ms_; + RateStatistics incoming_bitrate_; // In number of packets or sequence numbers. - int max_reordering_threshold_ RTC_GUARDED_BY(&stream_lock_); - bool enable_retransmit_detection_ RTC_GUARDED_BY(&stream_lock_); + int max_reordering_threshold_; + bool enable_retransmit_detection_; + bool cumulative_loss_is_capped_; // Stats on received RTP packets. - uint32_t jitter_q4_ RTC_GUARDED_BY(&stream_lock_); + uint32_t jitter_q4_; // Cumulative loss according to RFC 3550, which may be negative (and often is, // if packets are reordered and there are non-RTX retransmissions). - int32_t cumulative_loss_ RTC_GUARDED_BY(&stream_lock_); + int32_t cumulative_loss_; // Offset added to outgoing rtcp reports, to make ensure that the reported // cumulative loss is non-negative. Reports with negative values confuse some // senders, in particular, our own loss-based bandwidth estimator. - int32_t cumulative_loss_rtcp_offset_ RTC_GUARDED_BY(&stream_lock_); + int32_t cumulative_loss_rtcp_offset_; - int64_t last_receive_time_ms_ RTC_GUARDED_BY(&stream_lock_); - uint32_t last_received_timestamp_ RTC_GUARDED_BY(&stream_lock_); - SequenceNumberUnwrapper seq_unwrapper_ RTC_GUARDED_BY(&stream_lock_); - int64_t received_seq_first_ RTC_GUARDED_BY(&stream_lock_); - int64_t received_seq_max_ RTC_GUARDED_BY(&stream_lock_); + int64_t last_receive_time_ms_; + uint32_t last_received_timestamp_; + SequenceNumberUnwrapper seq_unwrapper_; + int64_t received_seq_first_; + int64_t received_seq_max_; // Assume that the other side restarted when there are two sequential packets // with large jump from received_seq_max_. - absl::optional received_seq_out_of_order_ - RTC_GUARDED_BY(&stream_lock_); + absl::optional received_seq_out_of_order_; // Current counter values. - StreamDataCounters receive_counters_ RTC_GUARDED_BY(&stream_lock_); + StreamDataCounters receive_counters_; // Counter values when we sent the last report. - int32_t last_report_cumulative_loss_ RTC_GUARDED_BY(&stream_lock_); - int64_t last_report_seq_max_ RTC_GUARDED_BY(&stream_lock_); + int32_t last_report_cumulative_loss_; + int64_t last_report_seq_max_; }; -class ReceiveStatisticsImpl : public ReceiveStatistics { +// Thread-safe implementation of StreamStatisticianImplInterface. +class StreamStatisticianLocked : public StreamStatisticianImplInterface { public: - explicit ReceiveStatisticsImpl(Clock* clock); + StreamStatisticianLocked(uint32_t ssrc, + Clock* clock, + int max_reordering_threshold) + : impl_(ssrc, clock, max_reordering_threshold) {} + ~StreamStatisticianLocked() override = default; + + RtpReceiveStats GetStats() const override { + MutexLock lock(&stream_lock_); + return impl_.GetStats(); + } + absl::optional GetFractionLostInPercent() const override { + MutexLock lock(&stream_lock_); + return impl_.GetFractionLostInPercent(); + } + StreamDataCounters GetReceiveStreamDataCounters() const override { + MutexLock lock(&stream_lock_); + return impl_.GetReceiveStreamDataCounters(); + } + uint32_t BitrateReceived() const override { + MutexLock lock(&stream_lock_); + return impl_.BitrateReceived(); + } + void MaybeAppendReportBlockAndReset( + std::vector& report_blocks) override { + MutexLock lock(&stream_lock_); + impl_.MaybeAppendReportBlockAndReset(report_blocks); + } + void SetMaxReorderingThreshold(int max_reordering_threshold) override { + MutexLock lock(&stream_lock_); + return impl_.SetMaxReorderingThreshold(max_reordering_threshold); + } + void EnableRetransmitDetection(bool enable) override { + MutexLock lock(&stream_lock_); + return impl_.EnableRetransmitDetection(enable); + } + void UpdateCounters(const RtpPacketReceived& packet) override { + MutexLock lock(&stream_lock_); + return impl_.UpdateCounters(packet); + } + + private: + mutable Mutex stream_lock_; + StreamStatisticianImpl impl_ RTC_GUARDED_BY(&stream_lock_); +}; - ~ReceiveStatisticsImpl() override; +// Thread-compatible implementation. +class ReceiveStatisticsImpl : public ReceiveStatistics { + public: + ReceiveStatisticsImpl( + Clock* clock, + std::function( + uint32_t ssrc, + Clock* clock, + int max_reordering_threshold)> stream_statistician_factory); + ~ReceiveStatisticsImpl() override = default; // Implements ReceiveStatisticsProvider. std::vector RtcpReportBlocks(size_t max_blocks) override; @@ -112,22 +176,71 @@ class ReceiveStatisticsImpl : public ReceiveStatistics { void OnRtpPacket(const RtpPacketReceived& packet) override; // Implements ReceiveStatistics. - // Note: More specific return type for use in the implementation. - StreamStatisticianImpl* GetStatistician(uint32_t ssrc) const override; + StreamStatistician* GetStatistician(uint32_t ssrc) const override; void SetMaxReorderingThreshold(int max_reordering_threshold) override; void SetMaxReorderingThreshold(uint32_t ssrc, int max_reordering_threshold) override; void EnableRetransmitDetection(uint32_t ssrc, bool enable) override; private: - StreamStatisticianImpl* GetOrCreateStatistician(uint32_t ssrc); + StreamStatisticianImplInterface* GetOrCreateStatistician(uint32_t ssrc); Clock* const clock_; - rtc::CriticalSection receive_statistics_lock_; - uint32_t last_returned_ssrc_; - int max_reordering_threshold_ RTC_GUARDED_BY(receive_statistics_lock_); - std::map statisticians_ - RTC_GUARDED_BY(receive_statistics_lock_); + std::function( + uint32_t ssrc, + Clock* clock, + int max_reordering_threshold)> + stream_statistician_factory_; + // The index within `all_ssrcs_` that was last returned. + size_t last_returned_ssrc_idx_; + std::vector all_ssrcs_; + int max_reordering_threshold_; + flat_map> + statisticians_; }; + +// Thread-safe implementation wrapping access to ReceiveStatisticsImpl with a +// mutex. +class ReceiveStatisticsLocked : public ReceiveStatistics { + public: + explicit ReceiveStatisticsLocked( + Clock* clock, + std::function( + uint32_t ssrc, + Clock* clock, + int max_reordering_threshold)> stream_statitician_factory) + : impl_(clock, std::move(stream_statitician_factory)) {} + ~ReceiveStatisticsLocked() override = default; + std::vector RtcpReportBlocks(size_t max_blocks) override { + MutexLock lock(&receive_statistics_lock_); + return impl_.RtcpReportBlocks(max_blocks); + } + void OnRtpPacket(const RtpPacketReceived& packet) override { + MutexLock lock(&receive_statistics_lock_); + return impl_.OnRtpPacket(packet); + } + StreamStatistician* GetStatistician(uint32_t ssrc) const override { + MutexLock lock(&receive_statistics_lock_); + return impl_.GetStatistician(ssrc); + } + void SetMaxReorderingThreshold(int max_reordering_threshold) override { + MutexLock lock(&receive_statistics_lock_); + return impl_.SetMaxReorderingThreshold(max_reordering_threshold); + } + void SetMaxReorderingThreshold(uint32_t ssrc, + int max_reordering_threshold) override { + MutexLock lock(&receive_statistics_lock_); + return impl_.SetMaxReorderingThreshold(ssrc, max_reordering_threshold); + } + void EnableRetransmitDetection(uint32_t ssrc, bool enable) override { + MutexLock lock(&receive_statistics_lock_); + return impl_.EnableRetransmitDetection(ssrc, enable); + } + + private: + mutable Mutex receive_statistics_lock_; + ReceiveStatisticsImpl impl_ RTC_GUARDED_BY(&receive_statistics_lock_); +}; + } // namespace webrtc #endif // MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_ diff --git a/modules/rtp_rtcp/source/receive_statistics_unittest.cc b/modules/rtp_rtcp/source/receive_statistics_unittest.cc index 053460e2ba..d40a743469 100644 --- a/modules/rtp_rtcp/source/receive_statistics_unittest.cc +++ b/modules/rtp_rtcp/source/receive_statistics_unittest.cc @@ -65,10 +65,13 @@ void IncrementSequenceNumber(RtpPacketReceived* packet) { IncrementSequenceNumber(packet, 1); } -class ReceiveStatisticsTest : public ::testing::Test { +class ReceiveStatisticsTest : public ::testing::TestWithParam { public: ReceiveStatisticsTest() - : clock_(0), receive_statistics_(ReceiveStatistics::Create(&clock_)) { + : clock_(0), + receive_statistics_( + GetParam() ? ReceiveStatistics::Create(&clock_) + : ReceiveStatistics::CreateThreadCompatible(&clock_)) { packet1_ = CreateRtpPacket(kSsrc1, kPacketSize1); packet2_ = CreateRtpPacket(kSsrc2, kPacketSize2); } @@ -80,7 +83,14 @@ class ReceiveStatisticsTest : public ::testing::Test { RtpPacketReceived packet2_; }; -TEST_F(ReceiveStatisticsTest, TwoIncomingSsrcs) { +INSTANTIATE_TEST_SUITE_P(All, + ReceiveStatisticsTest, + ::testing::Bool(), + [](::testing::TestParamInfo info) { + return info.param ? "WithMutex" : "WithoutMutex"; + }); + +TEST_P(ReceiveStatisticsTest, TwoIncomingSsrcs) { receive_statistics_->OnRtpPacket(packet1_); IncrementSequenceNumber(&packet1_); receive_statistics_->OnRtpPacket(packet2_); @@ -133,7 +143,7 @@ TEST_F(ReceiveStatisticsTest, TwoIncomingSsrcs) { EXPECT_EQ(3u, counters.transmitted.packets); } -TEST_F(ReceiveStatisticsTest, +TEST_P(ReceiveStatisticsTest, RtcpReportBlocksReturnsMaxBlocksWhenThereAreMoreStatisticians) { RtpPacketReceived packet1 = CreateRtpPacket(kSsrc1, kPacketSize1); RtpPacketReceived packet2 = CreateRtpPacket(kSsrc2, kPacketSize1); @@ -147,7 +157,7 @@ TEST_F(ReceiveStatisticsTest, EXPECT_THAT(receive_statistics_->RtcpReportBlocks(2), SizeIs(2)); } -TEST_F(ReceiveStatisticsTest, +TEST_P(ReceiveStatisticsTest, RtcpReportBlocksReturnsAllObservedSsrcsWithMultipleCalls) { RtpPacketReceived packet1 = CreateRtpPacket(kSsrc1, kPacketSize1); RtpPacketReceived packet2 = CreateRtpPacket(kSsrc2, kPacketSize1); @@ -174,7 +184,7 @@ TEST_F(ReceiveStatisticsTest, UnorderedElementsAre(kSsrc1, kSsrc2, kSsrc3, kSsrc4)); } -TEST_F(ReceiveStatisticsTest, ActiveStatisticians) { +TEST_P(ReceiveStatisticsTest, ActiveStatisticians) { receive_statistics_->OnRtpPacket(packet1_); IncrementSequenceNumber(&packet1_); clock_.AdvanceTimeMilliseconds(1000); @@ -206,7 +216,7 @@ TEST_F(ReceiveStatisticsTest, ActiveStatisticians) { EXPECT_EQ(2u, counters.transmitted.packets); } -TEST_F(ReceiveStatisticsTest, +TEST_P(ReceiveStatisticsTest, DoesntCreateRtcpReportBlockUntilFirstReceivedPacketForSsrc) { // Creates a statistician object for the ssrc. receive_statistics_->EnableRetransmitDetection(kSsrc1, true); @@ -217,7 +227,7 @@ TEST_F(ReceiveStatisticsTest, EXPECT_EQ(1u, receive_statistics_->RtcpReportBlocks(3).size()); } -TEST_F(ReceiveStatisticsTest, GetReceiveStreamDataCounters) { +TEST_P(ReceiveStatisticsTest, GetReceiveStreamDataCounters) { receive_statistics_->OnRtpPacket(packet1_); StreamStatistician* statistician = receive_statistics_->GetStatistician(kSsrc1); @@ -233,7 +243,7 @@ TEST_F(ReceiveStatisticsTest, GetReceiveStreamDataCounters) { EXPECT_EQ(2u, counters.transmitted.packets); } -TEST_F(ReceiveStatisticsTest, SimpleLossComputation) { +TEST_P(ReceiveStatisticsTest, SimpleLossComputation) { packet1_.SetSequenceNumber(1); receive_statistics_->OnRtpPacket(packet1_); packet1_.SetSequenceNumber(3); @@ -256,7 +266,7 @@ TEST_F(ReceiveStatisticsTest, SimpleLossComputation) { EXPECT_EQ(20, statistician->GetFractionLostInPercent()); } -TEST_F(ReceiveStatisticsTest, LossComputationWithReordering) { +TEST_P(ReceiveStatisticsTest, LossComputationWithReordering) { packet1_.SetSequenceNumber(1); receive_statistics_->OnRtpPacket(packet1_); packet1_.SetSequenceNumber(3); @@ -279,7 +289,7 @@ TEST_F(ReceiveStatisticsTest, LossComputationWithReordering) { EXPECT_EQ(20, statistician->GetFractionLostInPercent()); } -TEST_F(ReceiveStatisticsTest, LossComputationWithDuplicates) { +TEST_P(ReceiveStatisticsTest, LossComputationWithDuplicates) { // Lose 2 packets, but also receive 1 duplicate. Should actually count as // only 1 packet being lost. packet1_.SetSequenceNumber(1); @@ -304,7 +314,7 @@ TEST_F(ReceiveStatisticsTest, LossComputationWithDuplicates) { EXPECT_EQ(20, statistician->GetFractionLostInPercent()); } -TEST_F(ReceiveStatisticsTest, LossComputationWithSequenceNumberWrapping) { +TEST_P(ReceiveStatisticsTest, LossComputationWithSequenceNumberWrapping) { // First, test loss computation over a period that included a sequence number // rollover. packet1_.SetSequenceNumber(0xfffd); @@ -344,7 +354,7 @@ TEST_F(ReceiveStatisticsTest, LossComputationWithSequenceNumberWrapping) { EXPECT_EQ(28, statistician->GetFractionLostInPercent()); } -TEST_F(ReceiveStatisticsTest, StreamRestartDoesntCountAsLoss) { +TEST_P(ReceiveStatisticsTest, StreamRestartDoesntCountAsLoss) { receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200); packet1_.SetSequenceNumber(0); @@ -377,7 +387,7 @@ TEST_F(ReceiveStatisticsTest, StreamRestartDoesntCountAsLoss) { EXPECT_EQ(0, statistician->GetFractionLostInPercent()); } -TEST_F(ReceiveStatisticsTest, CountsLossAfterStreamRestart) { +TEST_P(ReceiveStatisticsTest, CountsLossAfterStreamRestart) { receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200); packet1_.SetSequenceNumber(0); @@ -405,7 +415,7 @@ TEST_F(ReceiveStatisticsTest, CountsLossAfterStreamRestart) { EXPECT_EQ(0, statistician->GetFractionLostInPercent()); } -TEST_F(ReceiveStatisticsTest, StreamCanRestartAtSequenceNumberWrapAround) { +TEST_P(ReceiveStatisticsTest, StreamCanRestartAtSequenceNumberWrapAround) { receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200); packet1_.SetSequenceNumber(0xffff - 401); @@ -428,7 +438,7 @@ TEST_F(ReceiveStatisticsTest, StreamCanRestartAtSequenceNumberWrapAround) { EXPECT_EQ(1, report_blocks[0].cumulative_lost_signed()); } -TEST_F(ReceiveStatisticsTest, StreamRestartNeedsTwoConsecutivePackets) { +TEST_P(ReceiveStatisticsTest, StreamRestartNeedsTwoConsecutivePackets) { receive_statistics_->SetMaxReorderingThreshold(kSsrc1, 200); packet1_.SetSequenceNumber(400); @@ -458,7 +468,7 @@ TEST_F(ReceiveStatisticsTest, StreamRestartNeedsTwoConsecutivePackets) { EXPECT_EQ(4u, report_blocks[0].extended_high_seq_num()); } -TEST_F(ReceiveStatisticsTest, WrapsAroundExtendedHighestSequenceNumber) { +TEST_P(ReceiveStatisticsTest, WrapsAroundExtendedHighestSequenceNumber) { packet1_.SetSequenceNumber(0xffff); receive_statistics_->OnRtpPacket(packet1_); @@ -503,8 +513,7 @@ TEST_F(ReceiveStatisticsTest, WrapsAroundExtendedHighestSequenceNumber) { EXPECT_EQ(0x20001u, report_blocks[0].extended_high_seq_num()); } -TEST_F(ReceiveStatisticsTest, StreamDataCounters) { - receive_statistics_ = ReceiveStatistics::Create(&clock_); +TEST_P(ReceiveStatisticsTest, StreamDataCounters) { receive_statistics_->EnableRetransmitDetection(kSsrc1, true); const size_t kHeaderLength = 20; @@ -554,9 +563,7 @@ TEST_F(ReceiveStatisticsTest, StreamDataCounters) { EXPECT_EQ(counters.retransmitted.packets, 1u); } -TEST_F(ReceiveStatisticsTest, LastPacketReceivedTimestamp) { - receive_statistics_ = ReceiveStatistics::Create(&clock_); - +TEST_P(ReceiveStatisticsTest, LastPacketReceivedTimestamp) { clock_.AdvanceTimeMilliseconds(42); receive_statistics_->OnRtpPacket(packet1_); StreamDataCounters counters = receive_statistics_->GetStatistician(kSsrc1) diff --git a/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc b/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc index 6fed7314c0..723064eeba 100644 --- a/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc +++ b/modules/rtp_rtcp/source/remote_ntp_time_estimator.cc @@ -15,6 +15,7 @@ #include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/logging.h" #include "system_wrappers/include/clock.h" +#include "system_wrappers/include/ntp_time.h" namespace webrtc { @@ -51,9 +52,8 @@ bool RemoteNtpTimeEstimator::UpdateRtcpTimestamp(int64_t rtt, // Update extrapolator with the new arrival time. // The extrapolator assumes the ntp time. - int64_t receiver_arrival_time_ms = - clock_->TimeInMilliseconds() + NtpOffsetMs(); - int64_t sender_send_time_ms = Clock::NtpToMs(ntp_secs, ntp_frac); + int64_t receiver_arrival_time_ms = clock_->CurrentNtpInMilliseconds(); + int64_t sender_send_time_ms = NtpTime(ntp_secs, ntp_frac).ToMs(); int64_t sender_arrival_time_ms = sender_send_time_ms + rtt / 2; int64_t remote_to_local_clocks_offset = receiver_arrival_time_ms - sender_arrival_time_ms; @@ -72,16 +72,7 @@ int64_t RemoteNtpTimeEstimator::Estimate(uint32_t rtp_timestamp) { int64_t receiver_capture_ntp_ms = sender_capture_ntp_ms + remote_to_local_clocks_offset; - // TODO(bugs.webrtc.org/11327): Clock::CurrentNtpInMilliseconds() was - // previously used to calculate the offset between the local and the remote - // clock. However, rtc::TimeMillis() + NtpOffsetMs() is now used as the local - // ntp clock value. To preserve the old behavior of this method, the return - // value is adjusted with the difference between the two local ntp clocks. int64_t now_ms = clock_->TimeInMilliseconds(); - int64_t offset_between_local_ntp_clocks = - clock_->CurrentNtpInMilliseconds() - now_ms - NtpOffsetMs(); - receiver_capture_ntp_ms += offset_between_local_ntp_clocks; - if (now_ms - last_timing_log_ms_ > kTimingLogIntervalMs) { RTC_LOG(LS_INFO) << "RTP timestamp: " << rtp_timestamp << " in NTP clock: " << sender_capture_ntp_ms @@ -89,6 +80,7 @@ int64_t RemoteNtpTimeEstimator::Estimate(uint32_t rtp_timestamp) { << receiver_capture_ntp_ms; last_timing_log_ms_ = now_ms; } + return receiver_capture_ntp_ms; } diff --git a/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc b/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc index 85f08483ea..73c3e9b9b8 100644 --- a/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc +++ b/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc @@ -10,7 +10,6 @@ #include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h" #include "absl/types/optional.h" -#include "modules/rtp_rtcp/source/time_util.h" #include "system_wrappers/include/clock.h" #include "system_wrappers/include/ntp_time.h" #include "test/gmock.h" @@ -43,9 +42,7 @@ class RemoteNtpTimeEstimatorTest : public ::testing::Test { kTimestampOffset; } - NtpTime GetRemoteNtpTime() { - return TimeMicrosToNtp(remote_clock_.TimeInMicroseconds()); - } + NtpTime GetRemoteNtpTime() { return remote_clock_.CurrentNtpTime(); } void SendRtcpSr() { uint32_t rtcp_timestamp = GetRemoteTimestamp(); diff --git a/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc b/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc index 5e762335ea..54f3555fc6 100644 --- a/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc +++ b/modules/rtp_rtcp/source/rtcp_packet/compound_packet.cc @@ -10,6 +10,9 @@ #include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h" +#include +#include + #include "rtc_base/checks.h" namespace webrtc { @@ -19,16 +22,16 @@ CompoundPacket::CompoundPacket() = default; CompoundPacket::~CompoundPacket() = default; -void CompoundPacket::Append(RtcpPacket* packet) { +void CompoundPacket::Append(std::unique_ptr packet) { RTC_CHECK(packet); - appended_packets_.push_back(packet); + appended_packets_.push_back(std::move(packet)); } bool CompoundPacket::Create(uint8_t* packet, size_t* index, size_t max_length, PacketReadyCallback callback) const { - for (RtcpPacket* appended : appended_packets_) { + for (const auto& appended : appended_packets_) { if (!appended->Create(packet, index, max_length, callback)) return false; } @@ -37,7 +40,7 @@ bool CompoundPacket::Create(uint8_t* packet, size_t CompoundPacket::BlockLength() const { size_t block_length = 0; - for (RtcpPacket* appended : appended_packets_) { + for (const auto& appended : appended_packets_) { block_length += appended->BlockLength(); } return block_length; diff --git a/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h b/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h index f521c7f921..8bee600692 100644 --- a/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h +++ b/modules/rtp_rtcp/source/rtcp_packet/compound_packet.h @@ -12,6 +12,7 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_ #define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_COMPOUND_PACKET_H_ +#include #include #include "modules/rtp_rtcp/source/rtcp_packet.h" @@ -25,7 +26,7 @@ class CompoundPacket : public RtcpPacket { CompoundPacket(); ~CompoundPacket() override; - void Append(RtcpPacket* packet); + void Append(std::unique_ptr packet); // Size of this packet in bytes (i.e. total size of nested packets). size_t BlockLength() const override; @@ -36,7 +37,7 @@ class CompoundPacket : public RtcpPacket { PacketReadyCallback callback) const override; protected: - std::vector appended_packets_; + std::vector> appended_packets_; private: RTC_DISALLOW_COPY_AND_ASSIGN(CompoundPacket); diff --git a/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc b/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc index 02a4f11ac2..9348aee7e4 100644 --- a/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_packet/compound_packet_unittest.cc @@ -10,6 +10,9 @@ #include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h" +#include +#include + #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/bye.h" #include "modules/rtp_rtcp/source/rtcp_packet/fir.h" @@ -38,14 +41,14 @@ const uint8_t kSeqNo = 13; TEST(RtcpCompoundPacketTest, AppendPacket) { CompoundPacket compound; - Fir fir; - fir.AddRequestTo(kRemoteSsrc, kSeqNo); + auto fir = std::make_unique(); + fir->AddRequestTo(kRemoteSsrc, kSeqNo); ReportBlock rb; - ReceiverReport rr; - rr.SetSenderSsrc(kSenderSsrc); - EXPECT_TRUE(rr.AddReportBlock(rb)); - compound.Append(&rr); - compound.Append(&fir); + auto rr = std::make_unique(); + rr->SetSenderSsrc(kSenderSsrc); + EXPECT_TRUE(rr->AddReportBlock(rb)); + compound.Append(std::move(rr)); + compound.Append(std::move(fir)); rtc::Buffer packet = compound.Build(); RtcpPacketParser parser; @@ -58,21 +61,22 @@ TEST(RtcpCompoundPacketTest, AppendPacket) { TEST(RtcpCompoundPacketTest, AppendPacketWithOwnAppendedPacket) { CompoundPacket root; - CompoundPacket leaf; - Fir fir; - fir.AddRequestTo(kRemoteSsrc, kSeqNo); - Bye bye; + auto leaf = std::make_unique(); + + auto fir = std::make_unique(); + fir->AddRequestTo(kRemoteSsrc, kSeqNo); + auto bye = std::make_unique(); ReportBlock rb; - ReceiverReport rr; - EXPECT_TRUE(rr.AddReportBlock(rb)); - leaf.Append(&rr); - leaf.Append(&fir); + auto rr = std::make_unique(); + EXPECT_TRUE(rr->AddReportBlock(rb)); + leaf->Append(std::move(rr)); + leaf->Append(std::move(fir)); - SenderReport sr; - root.Append(&sr); - root.Append(&bye); - root.Append(&leaf); + auto sr = std::make_unique(); + root.Append(std::move(sr)); + root.Append(std::move(bye)); + root.Append(std::move(leaf)); rtc::Buffer packet = root.Build(); RtcpPacketParser parser; @@ -86,14 +90,14 @@ TEST(RtcpCompoundPacketTest, AppendPacketWithOwnAppendedPacket) { TEST(RtcpCompoundPacketTest, BuildWithInputBuffer) { CompoundPacket compound; - Fir fir; - fir.AddRequestTo(kRemoteSsrc, kSeqNo); + auto fir = std::make_unique(); + fir->AddRequestTo(kRemoteSsrc, kSeqNo); ReportBlock rb; - ReceiverReport rr; - rr.SetSenderSsrc(kSenderSsrc); - EXPECT_TRUE(rr.AddReportBlock(rb)); - compound.Append(&rr); - compound.Append(&fir); + auto rr = std::make_unique(); + rr->SetSenderSsrc(kSenderSsrc); + EXPECT_TRUE(rr->AddReportBlock(rb)); + compound.Append(std::move(rr)); + compound.Append(std::move(fir)); const size_t kRrLength = 8; const size_t kReportBlockLength = 24; @@ -115,14 +119,14 @@ TEST(RtcpCompoundPacketTest, BuildWithInputBuffer) { TEST(RtcpCompoundPacketTest, BuildWithTooSmallBuffer_FragmentedSend) { CompoundPacket compound; - Fir fir; - fir.AddRequestTo(kRemoteSsrc, kSeqNo); + auto fir = std::make_unique(); + fir->AddRequestTo(kRemoteSsrc, kSeqNo); ReportBlock rb; - ReceiverReport rr; - rr.SetSenderSsrc(kSenderSsrc); - EXPECT_TRUE(rr.AddReportBlock(rb)); - compound.Append(&rr); - compound.Append(&fir); + auto rr = std::make_unique(); + rr->SetSenderSsrc(kSenderSsrc); + EXPECT_TRUE(rr->AddReportBlock(rb)); + compound.Append(std::move(rr)); + compound.Append(std::move(fir)); const size_t kRrLength = 8; const size_t kReportBlockLength = 24; diff --git a/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h b/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h index 9627aac959..6c804bbc7b 100644 --- a/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h +++ b/modules/rtp_rtcp/source/rtcp_packet/extended_reports.h @@ -62,7 +62,6 @@ class ExtendedReports : public RtcpPacket { void ParseRrtrBlock(const uint8_t* block, uint16_t block_length); void ParseDlrrBlock(const uint8_t* block, uint16_t block_length); - void ParseVoipMetricBlock(const uint8_t* block, uint16_t block_length); void ParseTargetBitrateBlock(const uint8_t* block, uint16_t block_length); absl::optional rrtr_block_; diff --git a/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h b/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h index 2603a6715e..99f6d12da4 100644 --- a/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h +++ b/modules/rtp_rtcp/source/rtcp_packet/loss_notification.h @@ -11,9 +11,9 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_LOSS_NOTIFICATION_H_ #define MODULES_RTP_RTCP_SOURCE_RTCP_PACKET_LOSS_NOTIFICATION_H_ +#include "absl/base/attributes.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" #include "modules/rtp_rtcp/source/rtcp_packet/psfb.h" -#include "rtc_base/system/unused.h" namespace webrtc { namespace rtcp { @@ -29,14 +29,15 @@ class LossNotification : public Psfb { size_t BlockLength() const override; + ABSL_MUST_USE_RESULT bool Create(uint8_t* packet, size_t* index, size_t max_length, - PacketReadyCallback callback) const override - RTC_WARN_UNUSED_RESULT; + PacketReadyCallback callback) const override; // Parse assumes header is already parsed and validated. - bool Parse(const CommonHeader& packet) RTC_WARN_UNUSED_RESULT; + ABSL_MUST_USE_RESULT + bool Parse(const CommonHeader& packet); // Set all of the values transmitted by the loss notification message. // If the values may not be represented by a loss notification message, @@ -44,9 +45,10 @@ class LossNotification : public Psfb { // when |last_recieved| is ahead of |last_decoded| by more than 0x7fff. // This is because |last_recieved| is represented on the wire as a delta, // and only 15 bits are available for that delta. + ABSL_MUST_USE_RESULT bool Set(uint16_t last_decoded, uint16_t last_received, - bool decodability_flag) RTC_WARN_UNUSED_RESULT; + bool decodability_flag); // RTP sequence number of the first packet belong to the last decoded // non-discardable frame. diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc index bfe2667684..3ab78df17c 100644 --- a/modules/rtp_rtcp/source/rtcp_receiver.cc +++ b/modules/rtp_rtcp/source/rtcp_receiver.cc @@ -12,6 +12,7 @@ #include +#include #include #include #include @@ -38,6 +39,7 @@ #include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/time_util.h" #include "modules/rtp_rtcp/source/tmmbr_help.h" #include "rtc_base/checks.h" @@ -63,24 +65,62 @@ const int64_t kRtcpMinFrameLengthMs = 17; // Maximum number of received RRTRs that will be stored. const size_t kMaxNumberOfStoredRrtrs = 300; -constexpr int32_t kDefaultVideoReportInterval = 1000; -constexpr int32_t kDefaultAudioReportInterval = 5000; +constexpr TimeDelta kDefaultVideoReportInterval = TimeDelta::Seconds(1); +constexpr TimeDelta kDefaultAudioReportInterval = TimeDelta::Seconds(5); + +// Returns true if the |timestamp| has exceeded the |interval * +// kRrTimeoutIntervals| period and was reset (set to PlusInfinity()). Returns +// false if the timer was either already reset or if it has not expired. +bool ResetTimestampIfExpired(const Timestamp now, + Timestamp& timestamp, + TimeDelta interval) { + if (timestamp.IsInfinite() || + now <= timestamp + interval * kRrTimeoutIntervals) { + return false; + } + + timestamp = Timestamp::PlusInfinity(); + return true; +} + +} // namespace -std::set GetRegisteredSsrcs(const RtpRtcp::Configuration& config) { - std::set ssrcs; - ssrcs.insert(config.local_media_ssrc); +constexpr size_t RTCPReceiver::RegisteredSsrcs::kMediaSsrcIndex; +constexpr size_t RTCPReceiver::RegisteredSsrcs::kMaxSsrcs; + +RTCPReceiver::RegisteredSsrcs::RegisteredSsrcs( + bool disable_sequence_checker, + const RtpRtcpInterface::Configuration& config) + : packet_sequence_checker_(disable_sequence_checker) { + packet_sequence_checker_.Detach(); + ssrcs_.push_back(config.local_media_ssrc); if (config.rtx_send_ssrc) { - ssrcs.insert(*config.rtx_send_ssrc); + ssrcs_.push_back(*config.rtx_send_ssrc); } if (config.fec_generator) { absl::optional flexfec_ssrc = config.fec_generator->FecSsrc(); if (flexfec_ssrc) { - ssrcs.insert(*flexfec_ssrc); + ssrcs_.push_back(*flexfec_ssrc); } } - return ssrcs; + // Ensure that the RegisteredSsrcs can inline the SSRCs. + RTC_DCHECK_LE(ssrcs_.size(), RTCPReceiver::RegisteredSsrcs::kMaxSsrcs); +} + +bool RTCPReceiver::RegisteredSsrcs::contains(uint32_t ssrc) const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + return absl::c_linear_search(ssrcs_, ssrc); +} + +uint32_t RTCPReceiver::RegisteredSsrcs::media_ssrc() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + return ssrcs_[kMediaSsrcIndex]; +} + +void RTCPReceiver::RegisteredSsrcs::set_media_ssrc(uint32_t ssrc) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + ssrcs_[kMediaSsrcIndex] = ssrc; } -} // namespace struct RTCPReceiver::PacketInformation { uint32_t packet_type_flags = 0; // RTCPPacketTypeFlags bit field. @@ -98,76 +138,85 @@ struct RTCPReceiver::PacketInformation { std::unique_ptr loss_notification; }; -// Structure for handing TMMBR and TMMBN rtcp messages (RFC5104, section 3.5.4). -struct RTCPReceiver::TmmbrInformation { - struct TimedTmmbrItem { - rtcp::TmmbItem tmmbr_item; - int64_t last_updated_ms; - }; - - int64_t last_time_received_ms = 0; - - bool ready_for_delete = false; - - std::vector tmmbn; - std::map tmmbr; -}; - -// Structure for storing received RRTR RTCP messages (RFC3611, section 4.4). -struct RTCPReceiver::RrtrInformation { - RrtrInformation(uint32_t ssrc, - uint32_t received_remote_mid_ntp_time, - uint32_t local_receive_mid_ntp_time) - : ssrc(ssrc), - received_remote_mid_ntp_time(received_remote_mid_ntp_time), - local_receive_mid_ntp_time(local_receive_mid_ntp_time) {} - - uint32_t ssrc; - // Received NTP timestamp in compact representation. - uint32_t received_remote_mid_ntp_time; - // NTP time when the report was received in compact representation. - uint32_t local_receive_mid_ntp_time; -}; - -struct RTCPReceiver::LastFirStatus { - LastFirStatus(int64_t now_ms, uint8_t sequence_number) - : request_ms(now_ms), sequence_number(sequence_number) {} - int64_t request_ms; - uint8_t sequence_number; -}; +RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config, + ModuleRtpRtcpImpl2* owner) + : clock_(config.clock), + receiver_only_(config.receiver_only), + rtp_rtcp_(owner), + main_ssrc_(config.local_media_ssrc), + registered_ssrcs_(false, config), + rtcp_bandwidth_observer_(config.bandwidth_callback), + rtcp_intra_frame_observer_(config.intra_frame_callback), + rtcp_loss_notification_observer_(config.rtcp_loss_notification_observer), + network_state_estimate_observer_(config.network_state_estimate_observer), + transport_feedback_observer_(config.transport_feedback_callback), + bitrate_allocation_observer_(config.bitrate_allocation_observer), + report_interval_(config.rtcp_report_interval_ms > 0 + ? TimeDelta::Millis(config.rtcp_report_interval_ms) + : (config.audio ? kDefaultAudioReportInterval + : kDefaultVideoReportInterval)), + // TODO(bugs.webrtc.org/10774): Remove fallback. + remote_ssrc_(0), + remote_sender_rtp_time_(0), + remote_sender_packet_count_(0), + remote_sender_octet_count_(0), + remote_sender_reports_count_(0), + xr_rrtr_status_(config.non_sender_rtt_measurement), + xr_rr_rtt_ms_(0), + oldest_tmmbr_info_ms_(0), + cname_callback_(config.rtcp_cname_callback), + report_block_data_observer_(config.report_block_data_observer), + packet_type_counter_observer_(config.rtcp_packet_type_counter_observer), + num_skipped_packets_(0), + last_skipped_packets_warning_ms_(clock_->TimeInMilliseconds()) { + RTC_DCHECK(owner); +} -RTCPReceiver::RTCPReceiver(const RtpRtcp::Configuration& config, +RTCPReceiver::RTCPReceiver(const RtpRtcpInterface::Configuration& config, ModuleRtpRtcp* owner) : clock_(config.clock), receiver_only_(config.receiver_only), rtp_rtcp_(owner), main_ssrc_(config.local_media_ssrc), - registered_ssrcs_(GetRegisteredSsrcs(config)), + registered_ssrcs_(true, config), rtcp_bandwidth_observer_(config.bandwidth_callback), rtcp_intra_frame_observer_(config.intra_frame_callback), rtcp_loss_notification_observer_(config.rtcp_loss_notification_observer), network_state_estimate_observer_(config.network_state_estimate_observer), transport_feedback_observer_(config.transport_feedback_callback), bitrate_allocation_observer_(config.bitrate_allocation_observer), - report_interval_ms_(config.rtcp_report_interval_ms > 0 - ? config.rtcp_report_interval_ms - : (config.audio ? kDefaultAudioReportInterval - : kDefaultVideoReportInterval)), + report_interval_(config.rtcp_report_interval_ms > 0 + ? TimeDelta::Millis(config.rtcp_report_interval_ms) + : (config.audio ? kDefaultAudioReportInterval + : kDefaultVideoReportInterval)), // TODO(bugs.webrtc.org/10774): Remove fallback. remote_ssrc_(0), remote_sender_rtp_time_(0), - xr_rrtr_status_(false), + remote_sender_packet_count_(0), + remote_sender_octet_count_(0), + remote_sender_reports_count_(0), + xr_rrtr_status_(config.non_sender_rtt_measurement), xr_rr_rtt_ms_(0), oldest_tmmbr_info_ms_(0), - last_received_rb_ms_(0), - last_increased_sequence_number_ms_(0), - stats_callback_(config.rtcp_statistics_callback), cname_callback_(config.rtcp_cname_callback), report_block_data_observer_(config.report_block_data_observer), packet_type_counter_observer_(config.rtcp_packet_type_counter_observer), num_skipped_packets_(0), last_skipped_packets_warning_ms_(clock_->TimeInMilliseconds()) { RTC_DCHECK(owner); + // Dear reader - if you're here because of this log statement and are + // wondering what this is about, chances are that you are using an instance + // of RTCPReceiver without using the webrtc APIs. This creates a bit of a + // problem for WebRTC because this class is a part of an internal + // implementation that is constantly changing and being improved. + // The intention of this log statement is to give a heads up that changes + // are coming and encourage you to use the public APIs or be prepared that + // things might break down the line as more changes land. A thing you could + // try out for now is to replace the `CustomSequenceChecker` in the header + // with a regular `SequenceChecker` and see if that triggers an + // error in your code. If it does, chances are you have your own threading + // model that is not the same as WebRTC internally has. + RTC_LOG(LS_INFO) << "************** !!!DEPRECATION WARNING!! **************"; } RTCPReceiver::~RTCPReceiver() {} @@ -184,68 +233,79 @@ void RTCPReceiver::IncomingPacket(rtc::ArrayView packet) { TriggerCallbacksFromRtcpPacket(packet_information); } +// This method is only used by test and legacy code, so we should be able to +// remove it soon. int64_t RTCPReceiver::LastReceivedReportBlockMs() const { - rtc::CritScope lock(&rtcp_receiver_lock_); - return last_received_rb_ms_; + MutexLock lock(&rtcp_receiver_lock_); + return last_received_rb_.IsFinite() ? last_received_rb_.ms() : 0; } void RTCPReceiver::SetRemoteSSRC(uint32_t ssrc) { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); // New SSRC reset old reports. last_received_sr_ntp_.Reset(); remote_ssrc_ = ssrc; } +void RTCPReceiver::set_local_media_ssrc(uint32_t ssrc) { + registered_ssrcs_.set_media_ssrc(ssrc); +} + +uint32_t RTCPReceiver::local_media_ssrc() const { + return registered_ssrcs_.media_ssrc(); +} + uint32_t RTCPReceiver::RemoteSSRC() const { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); return remote_ssrc_; } +void RTCPReceiver::RttStats::AddRtt(TimeDelta rtt) { + last_rtt_ = rtt; + if (rtt < min_rtt_) { + min_rtt_ = rtt; + } + if (rtt > max_rtt_) { + max_rtt_ = rtt; + } + sum_rtt_ += rtt; + ++num_rtts_; +} + int32_t RTCPReceiver::RTT(uint32_t remote_ssrc, int64_t* last_rtt_ms, int64_t* avg_rtt_ms, int64_t* min_rtt_ms, int64_t* max_rtt_ms) const { - rtc::CritScope lock(&rtcp_receiver_lock_); - - auto it = received_report_blocks_.find(main_ssrc_); - if (it == received_report_blocks_.end()) - return -1; + MutexLock lock(&rtcp_receiver_lock_); - auto it_info = it->second.find(remote_ssrc); - if (it_info == it->second.end()) - return -1; - - const ReportBlockData* report_block_data = &it_info->second; - - if (report_block_data->num_rtts() == 0) + auto it = rtts_.find(remote_ssrc); + if (it == rtts_.end()) { return -1; + } - if (last_rtt_ms) - *last_rtt_ms = report_block_data->last_rtt_ms(); + if (last_rtt_ms) { + *last_rtt_ms = it->second.last_rtt().ms(); + } if (avg_rtt_ms) { - *avg_rtt_ms = - report_block_data->sum_rtt_ms() / report_block_data->num_rtts(); + *avg_rtt_ms = it->second.average_rtt().ms(); } - if (min_rtt_ms) - *min_rtt_ms = report_block_data->min_rtt_ms(); + if (min_rtt_ms) { + *min_rtt_ms = it->second.min_rtt().ms(); + } - if (max_rtt_ms) - *max_rtt_ms = report_block_data->max_rtt_ms(); + if (max_rtt_ms) { + *max_rtt_ms = it->second.max_rtt().ms(); + } return 0; } -void RTCPReceiver::SetRtcpXrRrtrStatus(bool enable) { - rtc::CritScope lock(&rtcp_receiver_lock_); - xr_rrtr_status_ = enable; -} - bool RTCPReceiver::GetAndResetXrRrRtt(int64_t* rtt_ms) { RTC_DCHECK(rtt_ms); - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); if (xr_rr_rtt_ms_ == 0) { return false; } @@ -254,12 +314,57 @@ bool RTCPReceiver::GetAndResetXrRrRtt(int64_t* rtt_ms) { return true; } +// Called regularly (1/sec) on the worker thread to do rtt calculations. +absl::optional RTCPReceiver::OnPeriodicRttUpdate( + Timestamp newer_than, + bool sending) { + // Running on the worker thread (same as construction thread). + absl::optional rtt; + + if (sending) { + // Check if we've received a report block within the last kRttUpdateInterval + // amount of time. + MutexLock lock(&rtcp_receiver_lock_); + if (last_received_rb_.IsInfinite() || last_received_rb_ > newer_than) { + TimeDelta max_rtt = TimeDelta::MinusInfinity(); + for (const auto& rtt_stats : rtts_) { + if (rtt_stats.second.last_rtt() > max_rtt) { + max_rtt = rtt_stats.second.last_rtt(); + } + } + if (max_rtt.IsFinite()) { + rtt = max_rtt; + } + } + + // Check for expired timers and if so, log and reset. + auto now = clock_->CurrentTime(); + if (RtcpRrTimeoutLocked(now)) { + RTC_LOG_F(LS_WARNING) << "Timeout: No RTCP RR received."; + } else if (RtcpRrSequenceNumberTimeoutLocked(now)) { + RTC_LOG_F(LS_WARNING) << "Timeout: No increase in RTCP RR extended " + "highest sequence number."; + } + } else { + // Report rtt from receiver. + int64_t rtt_ms; + if (GetAndResetXrRrRtt(&rtt_ms)) { + rtt.emplace(TimeDelta::Millis(rtt_ms)); + } + } + + return rtt; +} + bool RTCPReceiver::NTP(uint32_t* received_ntp_secs, uint32_t* received_ntp_frac, uint32_t* rtcp_arrival_time_secs, uint32_t* rtcp_arrival_time_frac, - uint32_t* rtcp_timestamp) const { - rtc::CritScope lock(&rtcp_receiver_lock_); + uint32_t* rtcp_timestamp, + uint32_t* remote_sender_packet_count, + uint64_t* remote_sender_octet_count, + uint64_t* remote_sender_reports_count) const { + MutexLock lock(&rtcp_receiver_lock_); if (!last_received_sr_ntp_.Valid()) return false; @@ -268,7 +373,6 @@ bool RTCPReceiver::NTP(uint32_t* received_ntp_secs, *received_ntp_secs = remote_sender_ntp_time_.seconds(); if (received_ntp_frac) *received_ntp_frac = remote_sender_ntp_time_.fractions(); - // Rtp time from incoming SenderReport. if (rtcp_timestamp) *rtcp_timestamp = remote_sender_rtp_time_; @@ -279,20 +383,27 @@ bool RTCPReceiver::NTP(uint32_t* received_ntp_secs, if (rtcp_arrival_time_frac) *rtcp_arrival_time_frac = last_received_sr_ntp_.fractions(); + // Counters. + if (remote_sender_packet_count) + *remote_sender_packet_count = remote_sender_packet_count_; + if (remote_sender_octet_count) + *remote_sender_octet_count = remote_sender_octet_count_; + if (remote_sender_reports_count) + *remote_sender_reports_count = remote_sender_reports_count_; + return true; } std::vector RTCPReceiver::ConsumeReceivedXrReferenceTimeInfo() { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); const size_t last_xr_rtis_size = std::min( received_rrtrs_.size(), rtcp::ExtendedReports::kMaxNumberOfDlrrItems); std::vector last_xr_rtis; last_xr_rtis.reserve(last_xr_rtis_size); - const uint32_t now_ntp = - CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + const uint32_t now_ntp = CompactNtp(clock_->CurrentNtpTime()); for (size_t i = 0; i < last_xr_rtis_size; ++i) { RrtrInformation& rrtr = received_rrtrs_.front(); @@ -305,29 +416,18 @@ RTCPReceiver::ConsumeReceivedXrReferenceTimeInfo() { return last_xr_rtis; } -// We can get multiple receive reports when we receive the report from a CE. -int32_t RTCPReceiver::StatisticsReceived( - std::vector* receive_blocks) const { - RTC_DCHECK(receive_blocks); - rtc::CritScope lock(&rtcp_receiver_lock_); - for (const auto& reports_per_receiver : received_report_blocks_) - for (const auto& report : reports_per_receiver.second) - receive_blocks->push_back(report.second.report_block()); - return 0; -} - std::vector RTCPReceiver::GetLatestReportBlockData() const { std::vector result; - rtc::CritScope lock(&rtcp_receiver_lock_); - for (const auto& reports_per_receiver : received_report_blocks_) - for (const auto& report : reports_per_receiver.second) - result.push_back(report.second); + MutexLock lock(&rtcp_receiver_lock_); + for (const auto& report : received_report_blocks_) { + result.push_back(report.second); + } return result; } bool RTCPReceiver::ParseCompoundPacket(rtc::ArrayView packet, PacketInformation* packet_information) { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); CommonHeader rtcp_block; for (const uint8_t* next_block = packet.begin(); next_block != packet.end(); @@ -451,7 +551,10 @@ void RTCPReceiver::HandleSenderReport(const CommonHeader& rtcp_block, remote_sender_ntp_time_ = sender_report.ntp(); remote_sender_rtp_time_ = sender_report.rtp_timestamp(); - last_received_sr_ntp_ = TimeMicrosToNtp(clock_->TimeInMicroseconds()); + last_received_sr_ntp_ = clock_->CurrentNtpTime(); + remote_sender_packet_count_ = sender_report.sender_packet_count(); + remote_sender_octet_count_ = sender_report.sender_octet_count(); + remote_sender_reports_count_++; } else { // We will only store the send report from one source, but // we will store all the receive blocks. @@ -495,14 +598,13 @@ void RTCPReceiver::HandleReportBlock(const ReportBlock& report_block, // which the information in this reception report block pertains. // Filter out all report blocks that are not for us. - if (registered_ssrcs_.count(report_block.source_ssrc()) == 0) + if (!registered_ssrcs_.contains(report_block.source_ssrc())) return; - const Timestamp now = clock_->CurrentTime(); - last_received_rb_ms_ = now.ms(); + last_received_rb_ = clock_->CurrentTime(); ReportBlockData* report_block_data = - &received_report_blocks_[report_block.source_ssrc()][remote_ssrc]; + &received_report_blocks_[report_block.source_ssrc()]; RTCPReportBlock rtcp_report_block; rtcp_report_block.sender_ssrc = remote_ssrc; rtcp_report_block.source_ssrc = report_block.source_ssrc(); @@ -512,7 +614,7 @@ void RTCPReceiver::HandleReportBlock(const ReportBlock& report_block, report_block_data->report_block().extended_highest_sequence_number) { // We have successfully delivered new RTP packets to the remote side after // the last RR was sent from the remote side. - last_increased_sequence_number_ms_ = now.ms(); + last_increased_sequence_number_ = last_received_rb_; } rtcp_report_block.extended_highest_sequence_number = report_block.extended_high_seq_num(); @@ -538,13 +640,17 @@ void RTCPReceiver::HandleReportBlock(const ReportBlock& report_block, if (send_time_ntp != 0) { uint32_t delay_ntp = report_block.delay_since_last_sr(); // Local NTP time. - uint32_t receive_time_ntp = CompactNtp(TimeMicrosToNtp(now.us())); + uint32_t receive_time_ntp = + CompactNtp(clock_->ConvertTimestampToNtpTime(last_received_rb_)); // RTT in 1/(2^16) seconds. uint32_t rtt_ntp = receive_time_ntp - delay_ntp - send_time_ntp; // Convert to 1/1000 seconds (milliseconds). rtt_ms = CompactNtpRttToMs(rtt_ntp); report_block_data->AddRoundTripTimeSample(rtt_ms); + if (report_block.source_ssrc() == main_ssrc_) { + rtts_[remote_ssrc].AddRtt(TimeDelta::Millis(rtt_ms)); + } packet_information->rtt_ms = rtt_ms; } @@ -577,37 +683,22 @@ RTCPReceiver::TmmbrInformation* RTCPReceiver::GetTmmbrInformation( return &it->second; } +// These two methods (RtcpRrTimeout and RtcpRrSequenceNumberTimeout) only exist +// for tests and legacy code (rtp_rtcp_impl.cc). We should be able to to delete +// the methods and require that access to the locked variables only happens on +// the worker thread and thus no locking is needed. bool RTCPReceiver::RtcpRrTimeout() { - rtc::CritScope lock(&rtcp_receiver_lock_); - if (last_received_rb_ms_ == 0) - return false; - - int64_t time_out_ms = kRrTimeoutIntervals * report_interval_ms_; - if (clock_->TimeInMilliseconds() > last_received_rb_ms_ + time_out_ms) { - // Reset the timer to only trigger one log. - last_received_rb_ms_ = 0; - return true; - } - return false; + MutexLock lock(&rtcp_receiver_lock_); + return RtcpRrTimeoutLocked(clock_->CurrentTime()); } bool RTCPReceiver::RtcpRrSequenceNumberTimeout() { - rtc::CritScope lock(&rtcp_receiver_lock_); - if (last_increased_sequence_number_ms_ == 0) - return false; - - int64_t time_out_ms = kRrTimeoutIntervals * report_interval_ms_; - if (clock_->TimeInMilliseconds() > - last_increased_sequence_number_ms_ + time_out_ms) { - // Reset the timer to only trigger one log. - last_increased_sequence_number_ms_ = 0; - return true; - } - return false; + MutexLock lock(&rtcp_receiver_lock_); + return RtcpRrSequenceNumberTimeoutLocked(clock_->CurrentTime()); } bool RTCPReceiver::UpdateTmmbrTimers() { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); int64_t now_ms = clock_->TimeInMilliseconds(); int64_t timeout_ms = now_ms - kTmmbrTimeoutIntervalMs; @@ -644,7 +735,7 @@ bool RTCPReceiver::UpdateTmmbrTimers() { } std::vector RTCPReceiver::BoundingSet(bool* tmmbr_owner) { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); TmmbrInformation* tmmbr_info = GetTmmbrInformation(remote_ssrc_); if (!tmmbr_info) return std::vector(); @@ -662,7 +753,6 @@ void RTCPReceiver::HandleSdes(const CommonHeader& rtcp_block, } for (const rtcp::Sdes::Chunk& chunk : sdes.chunks()) { - received_cnames_[chunk.ssrc] = chunk.cname; if (cname_callback_) cname_callback_->OnCname(chunk.ssrc, chunk.cname); } @@ -718,15 +808,16 @@ void RTCPReceiver::HandleBye(const CommonHeader& rtcp_block) { } // Clear our lists. - for (auto& reports_per_receiver : received_report_blocks_) - reports_per_receiver.second.erase(bye.sender_ssrc()); + rtts_.erase(bye.sender_ssrc()); + EraseIf(received_report_blocks_, [&](const auto& elem) { + return elem.second.report_block().sender_ssrc == bye.sender_ssrc(); + }); TmmbrInformation* tmmbr_info = GetTmmbrInformation(bye.sender_ssrc()); if (tmmbr_info) tmmbr_info->ready_for_delete = true; last_fir_.erase(bye.sender_ssrc()); - received_cnames_.erase(bye.sender_ssrc()); auto it = received_rrtrs_ssrc_it_.find(bye.sender_ssrc()); if (it != received_rrtrs_ssrc_it_.end()) { received_rrtrs_.erase(it->second); @@ -758,8 +849,7 @@ void RTCPReceiver::HandleXr(const CommonHeader& rtcp_block, void RTCPReceiver::HandleXrReceiveReferenceTime(uint32_t sender_ssrc, const rtcp::Rrtr& rrtr) { uint32_t received_remote_mid_ntp_time = CompactNtp(rrtr.ntp()); - uint32_t local_receive_mid_ntp_time = - CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + uint32_t local_receive_mid_ntp_time = CompactNtp(clock_->CurrentNtpTime()); auto it = received_rrtrs_ssrc_it_.find(sender_ssrc); if (it != received_rrtrs_ssrc_it_.end()) { @@ -778,7 +868,7 @@ void RTCPReceiver::HandleXrReceiveReferenceTime(uint32_t sender_ssrc, } void RTCPReceiver::HandleXrDlrrReportBlock(const rtcp::ReceiveTimeInfo& rti) { - if (registered_ssrcs_.count(rti.ssrc) == 0) // Not to us. + if (!registered_ssrcs_.contains(rti.ssrc)) // Not to us. return; // Caller should explicitly enable rtt calculation using extended reports. @@ -793,7 +883,7 @@ void RTCPReceiver::HandleXrDlrrReportBlock(const rtcp::ReceiveTimeInfo& rti) { return; uint32_t delay_ntp = rti.delay_since_last_rr; - uint32_t now_ntp = CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + uint32_t now_ntp = CompactNtp(clock_->CurrentNtpTime()); uint32_t rtt_ntp = now_ntp - delay_ntp - send_time_ntp; xr_rr_rtt_ms_ = CompactNtpRttToMs(rtt_ntp); @@ -1001,14 +1091,7 @@ void RTCPReceiver::TriggerCallbacksFromRtcpPacket( // Might trigger a OnReceivedBandwidthEstimateUpdate. NotifyTmmbrUpdated(); } - uint32_t local_ssrc; - std::set registered_ssrcs; - { - // We don't want to hold this critsect when triggering the callbacks below. - rtc::CritScope lock(&rtcp_receiver_lock_); - local_ssrc = main_ssrc_; - registered_ssrcs = registered_ssrcs_; - } + if (!receiver_only_ && (packet_information.packet_type_flags & kRtcpSrReq)) { rtp_rtcp_->OnRequestSendReport(); } @@ -1035,7 +1118,7 @@ void RTCPReceiver::TriggerCallbacksFromRtcpPacket( RTC_LOG(LS_VERBOSE) << "Incoming FIR from SSRC " << packet_information.remote_ssrc; } - rtcp_intra_frame_observer_->OnReceivedIntraFrameRequest(local_ssrc); + rtcp_intra_frame_observer_->OnReceivedIntraFrameRequest(main_ssrc_); } } if (rtcp_loss_notification_observer_ && @@ -1043,7 +1126,7 @@ void RTCPReceiver::TriggerCallbacksFromRtcpPacket( rtcp::LossNotification* loss_notification = packet_information.loss_notification.get(); RTC_DCHECK(loss_notification); - if (loss_notification->media_ssrc() == local_ssrc) { + if (loss_notification->media_ssrc() == main_ssrc_) { rtcp_loss_notification_observer_->OnReceivedLossNotification( loss_notification->media_ssrc(), loss_notification->last_decoded(), loss_notification->last_received(), @@ -1075,8 +1158,8 @@ void RTCPReceiver::TriggerCallbacksFromRtcpPacket( (packet_information.packet_type_flags & kRtcpTransportFeedback)) { uint32_t media_source_ssrc = packet_information.transport_feedback->media_ssrc(); - if (media_source_ssrc == local_ssrc || - registered_ssrcs.find(media_source_ssrc) != registered_ssrcs.end()) { + if (media_source_ssrc == main_ssrc_ || + registered_ssrcs_.contains(media_source_ssrc)) { transport_feedback_observer_->OnTransportFeedback( *packet_information.transport_feedback); } @@ -1095,18 +1178,6 @@ void RTCPReceiver::TriggerCallbacksFromRtcpPacket( } if (!receiver_only_) { - if (stats_callback_) { - for (const auto& report_block : packet_information.report_blocks) { - RtcpStatistics stats; - stats.packets_lost = report_block.packets_lost; - stats.extended_highest_sequence_number = - report_block.extended_highest_sequence_number; - stats.fraction_lost = report_block.fraction_lost; - stats.jitter = report_block.jitter; - - stats_callback_->StatisticsUpdated(stats, report_block.source_ssrc); - } - } if (report_block_data_observer_) { for (const auto& report_block_data : packet_information.report_block_datas) { @@ -1117,22 +1188,8 @@ void RTCPReceiver::TriggerCallbacksFromRtcpPacket( } } -int32_t RTCPReceiver::CNAME(uint32_t remoteSSRC, - char cName[RTCP_CNAME_SIZE]) const { - RTC_DCHECK(cName); - - rtc::CritScope lock(&rtcp_receiver_lock_); - auto received_cname_it = received_cnames_.find(remoteSSRC); - if (received_cname_it == received_cnames_.end()) - return -1; - - size_t length = received_cname_it->second.copy(cName, RTCP_CNAME_SIZE - 1); - cName[length] = 0; - return 0; -} - std::vector RTCPReceiver::TmmbrReceived() { - rtc::CritScope lock(&rtcp_receiver_lock_); + MutexLock lock(&rtcp_receiver_lock_); std::vector candidates; int64_t now_ms = clock_->TimeInMilliseconds(); @@ -1152,4 +1209,13 @@ std::vector RTCPReceiver::TmmbrReceived() { return candidates; } +bool RTCPReceiver::RtcpRrTimeoutLocked(Timestamp now) { + return ResetTimestampIfExpired(now, last_received_rb_, report_interval_); +} + +bool RTCPReceiver::RtcpRrSequenceNumberTimeoutLocked(Timestamp now) { + return ResetTimestampIfExpired(now, last_increased_sequence_number_, + report_interval_); +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtcp_receiver.h b/modules/rtp_rtcp/source/rtcp_receiver.h index ef41476903..fa9f367c9e 100644 --- a/modules/rtp_rtcp/source/rtcp_receiver.h +++ b/modules/rtp_rtcp/source/rtcp_receiver.h @@ -13,23 +13,29 @@ #include #include -#include #include #include #include "api/array_view.h" +#include "api/sequence_checker.h" #include "modules/rtp_rtcp/include/report_block_data.h" #include "modules/rtp_rtcp/include/rtcp_statistics.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_nack_stats.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" +#include "rtc_base/containers/flat_map.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/ntp_time.h" namespace webrtc { + +class ModuleRtpRtcpImpl2; class VideoBitrateAllocationObserver; + namespace rtcp { class CommonHeader; class ReportBlock; @@ -53,7 +59,12 @@ class RTCPReceiver final { virtual ~ModuleRtpRtcp() = default; }; - RTCPReceiver(const RtpRtcp::Configuration& config, ModuleRtpRtcp* owner); + RTCPReceiver(const RtpRtcpInterface::Configuration& config, + ModuleRtpRtcp* owner); + + RTCPReceiver(const RtpRtcpInterface::Configuration& config, + ModuleRtpRtcpImpl2* owner); + ~RTCPReceiver(); void IncomingPacket(const uint8_t* packet, size_t packet_size) { @@ -63,18 +74,30 @@ class RTCPReceiver final { int64_t LastReceivedReportBlockMs() const; + void set_local_media_ssrc(uint32_t ssrc); + uint32_t local_media_ssrc() const; + void SetRemoteSSRC(uint32_t ssrc); uint32_t RemoteSSRC() const; - // Get received cname. - int32_t CNAME(uint32_t remote_ssrc, char cname[RTCP_CNAME_SIZE]) const; + bool receiver_only() const { return receiver_only_; } // Get received NTP. + // The types for the arguments below derive from the specification: + // - `remote_sender_packet_count`: `RTCSentRtpStreamStats.packetsSent` [1] + // - `remote_sender_octet_count`: `RTCSentRtpStreamStats.bytesSent` [1] + // - `remote_sender_reports_count`: + // `RTCRemoteOutboundRtpStreamStats.reportsSent` [2] + // [1] https://www.w3.org/TR/webrtc-stats/#remoteoutboundrtpstats-dict* + // [2] https://www.w3.org/TR/webrtc-stats/#dom-rtcsentrtpstreamstats bool NTP(uint32_t* received_ntp_secs, uint32_t* received_ntp_frac, uint32_t* rtcp_arrival_time_secs, uint32_t* rtcp_arrival_time_frac, - uint32_t* rtcp_timestamp) const; + uint32_t* rtcp_timestamp, + uint32_t* remote_sender_packet_count, + uint64_t* remote_sender_octet_count, + uint64_t* remote_sender_reports_count) const; std::vector ConsumeReceivedXrReferenceTimeInfo(); @@ -85,15 +108,16 @@ class RTCPReceiver final { int64_t* min_rtt_ms, int64_t* max_rtt_ms) const; - void SetRtcpXrRrtrStatus(bool enable); bool GetAndResetXrRrRtt(int64_t* rtt_ms); - // Get statistics. - int32_t StatisticsReceived(std::vector* receiveBlocks) const; + // Called once per second on the worker thread to do rtt calculations. + // Returns an optional rtt value if one is available. + absl::optional OnPeriodicRttUpdate(Timestamp newer_than, + bool sending); + // A snapshot of Report Blocks with additional data of interest to statistics. - // Within this list, the sender-source SSRC pair is unique and per-pair the - // ReportBlockData represents the latest Report Block that was received for - // that pair. + // Within this list, the source SSRC is unique and ReportBlockData represents + // the latest Report Block that was received for that SSRC. std::vector GetLatestReportBlockData() const; // Returns true if we haven't received an RTCP RR for several RTCP @@ -114,14 +138,111 @@ class RTCPReceiver final { void NotifyTmmbrUpdated(); private: +#if RTC_DCHECK_IS_ON + class CustomSequenceChecker : public SequenceChecker { + public: + explicit CustomSequenceChecker(bool disable_checks) + : disable_checks_(disable_checks) {} + bool IsCurrent() const { + if (disable_checks_) + return true; + return SequenceChecker::IsCurrent(); + } + + private: + const bool disable_checks_; + }; +#else + class CustomSequenceChecker : public SequenceChecker { + public: + explicit CustomSequenceChecker(bool) {} + }; +#endif + + // A lightweight inlined set of local SSRCs. + class RegisteredSsrcs { + public: + static constexpr size_t kMediaSsrcIndex = 0; + static constexpr size_t kMaxSsrcs = 3; + // Initializes the set of registered local SSRCS by extracting them from the + // provided `config`. The `disable_sequence_checker` flag is a workaround + // to be able to use a sequence checker without breaking downstream + // code that currently doesn't follow the same threading rules as webrtc. + RegisteredSsrcs(bool disable_sequence_checker, + const RtpRtcpInterface::Configuration& config); + + // Indicates if `ssrc` is in the set of registered local SSRCs. + bool contains(uint32_t ssrc) const; + uint32_t media_ssrc() const; + void set_media_ssrc(uint32_t ssrc); + + private: + RTC_NO_UNIQUE_ADDRESS CustomSequenceChecker packet_sequence_checker_; + absl::InlinedVector ssrcs_ + RTC_GUARDED_BY(packet_sequence_checker_); + }; + struct PacketInformation; - struct TmmbrInformation; - struct RrtrInformation; - struct LastFirStatus; - // RTCP report blocks mapped by remote SSRC. - using ReportBlockDataMap = std::map; - // RTCP report blocks map mapped by source SSRC. - using ReportBlockMap = std::map; + + // Structure for handing TMMBR and TMMBN rtcp messages (RFC5104, + // section 3.5.4). + struct TmmbrInformation { + struct TimedTmmbrItem { + rtcp::TmmbItem tmmbr_item; + int64_t last_updated_ms; + }; + + int64_t last_time_received_ms = 0; + + bool ready_for_delete = false; + + std::vector tmmbn; + std::map tmmbr; + }; + + // Structure for storing received RRTR RTCP messages (RFC3611, section 4.4). + struct RrtrInformation { + RrtrInformation(uint32_t ssrc, + uint32_t received_remote_mid_ntp_time, + uint32_t local_receive_mid_ntp_time) + : ssrc(ssrc), + received_remote_mid_ntp_time(received_remote_mid_ntp_time), + local_receive_mid_ntp_time(local_receive_mid_ntp_time) {} + + uint32_t ssrc; + // Received NTP timestamp in compact representation. + uint32_t received_remote_mid_ntp_time; + // NTP time when the report was received in compact representation. + uint32_t local_receive_mid_ntp_time; + }; + + struct LastFirStatus { + LastFirStatus(int64_t now_ms, uint8_t sequence_number) + : request_ms(now_ms), sequence_number(sequence_number) {} + int64_t request_ms; + uint8_t sequence_number; + }; + + class RttStats { + public: + RttStats() = default; + RttStats(const RttStats&) = default; + RttStats& operator=(const RttStats&) = default; + + void AddRtt(TimeDelta rtt); + + TimeDelta last_rtt() const { return last_rtt_; } + TimeDelta min_rtt() const { return min_rtt_; } + TimeDelta max_rtt() const { return max_rtt_; } + TimeDelta average_rtt() const { return sum_rtt_ / num_rtts_; } + + private: + TimeDelta last_rtt_ = TimeDelta::Zero(); + TimeDelta min_rtt_ = TimeDelta::PlusInfinity(); + TimeDelta max_rtt_ = TimeDelta::MinusInfinity(); + TimeDelta sum_rtt_ = TimeDelta::Zero(); + size_t num_rtts_ = 0; + }; bool ParseCompoundPacket(rtc::ArrayView packet, PacketInformation* packet_information); @@ -209,11 +330,18 @@ class RTCPReceiver final { PacketInformation* packet_information) RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_); + bool RtcpRrTimeoutLocked(Timestamp now) + RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_); + + bool RtcpRrSequenceNumberTimeoutLocked(Timestamp now) + RTC_EXCLUSIVE_LOCKS_REQUIRED(rtcp_receiver_lock_); + Clock* const clock_; const bool receiver_only_; ModuleRtpRtcp* const rtp_rtcp_; const uint32_t main_ssrc_; - const std::set registered_ssrcs_; + // The set of registered local SSRCs. + RegisteredSsrcs registered_ssrcs_; RtcpBandwidthObserver* const rtcp_bandwidth_observer_; RtcpIntraFrameObserver* const rtcp_intra_frame_observer_; @@ -221,9 +349,9 @@ class RTCPReceiver final { NetworkStateEstimateObserver* const network_state_estimate_observer_; TransportFeedbackObserver* const transport_feedback_observer_; VideoBitrateAllocationObserver* const bitrate_allocation_observer_; - const int report_interval_ms_; + const TimeDelta report_interval_; - rtc::CriticalSection rtcp_receiver_lock_; + mutable Mutex rtcp_receiver_lock_; uint32_t remote_ssrc_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Received sender report. @@ -231,41 +359,44 @@ class RTCPReceiver final { uint32_t remote_sender_rtp_time_ RTC_GUARDED_BY(rtcp_receiver_lock_); // When did we receive the last send report. NtpTime last_received_sr_ntp_ RTC_GUARDED_BY(rtcp_receiver_lock_); + uint32_t remote_sender_packet_count_ RTC_GUARDED_BY(rtcp_receiver_lock_); + uint64_t remote_sender_octet_count_ RTC_GUARDED_BY(rtcp_receiver_lock_); + uint64_t remote_sender_reports_count_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Received RRTR information in ascending receive time order. std::list received_rrtrs_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Received RRTR information mapped by remote ssrc. - std::map::iterator> + flat_map::iterator> received_rrtrs_ssrc_it_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Estimated rtt, zero when there is no valid estimate. - bool xr_rrtr_status_ RTC_GUARDED_BY(rtcp_receiver_lock_); + const bool xr_rrtr_status_; int64_t xr_rr_rtt_ms_; int64_t oldest_tmmbr_info_ms_ RTC_GUARDED_BY(rtcp_receiver_lock_); // Mapped by remote ssrc. - std::map tmmbr_infos_ + flat_map tmmbr_infos_ RTC_GUARDED_BY(rtcp_receiver_lock_); - ReportBlockMap received_report_blocks_ RTC_GUARDED_BY(rtcp_receiver_lock_); - std::map last_fir_ + // Round-Trip Time per remote sender ssrc. + flat_map rtts_ RTC_GUARDED_BY(rtcp_receiver_lock_); + + // Report blocks per local source ssrc. + flat_map received_report_blocks_ RTC_GUARDED_BY(rtcp_receiver_lock_); - std::map received_cnames_ + flat_map last_fir_ RTC_GUARDED_BY(rtcp_receiver_lock_); // The last time we received an RTCP Report block for this module. - int64_t last_received_rb_ms_ RTC_GUARDED_BY(rtcp_receiver_lock_); + Timestamp last_received_rb_ RTC_GUARDED_BY(rtcp_receiver_lock_) = + Timestamp::PlusInfinity(); // The time we last received an RTCP RR telling we have successfully // delivered RTP packet to the remote side. - int64_t last_increased_sequence_number_ms_; + Timestamp last_increased_sequence_number_ = Timestamp::PlusInfinity(); - RtcpStatisticsCallback* const stats_callback_; RtcpCnameCallback* const cname_callback_; - // TODO(hbos): Remove RtcpStatisticsCallback in favor of - // ReportBlockDataObserver; the ReportBlockData contains a superset of the - // RtcpStatistics data. ReportBlockDataObserver* const report_block_data_observer_; RtcpPacketTypeCounterObserver* const packet_type_counter_observer_; diff --git a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc index f95219674b..3065534108 100644 --- a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc @@ -11,6 +11,7 @@ #include "modules/rtp_rtcp/source/rtcp_receiver.h" #include +#include #include "api/array_view.h" #include "api/units/timestamp.h" @@ -50,6 +51,7 @@ using rtcp::ReceiveTimeInfo; using ::testing::_; using ::testing::AllOf; using ::testing::ElementsAreArray; +using ::testing::Eq; using ::testing::Field; using ::testing::InSequence; using ::testing::IsEmpty; @@ -85,14 +87,6 @@ class MockRtcpLossNotificationObserver : public RtcpLossNotificationObserver { (override)); }; -class MockRtcpCallbackImpl : public RtcpStatisticsCallback { - public: - MOCK_METHOD(void, - StatisticsUpdated, - (const RtcpStatistics&, uint32_t), - (override)); -}; - class MockCnameCallbackImpl : public RtcpCnameCallback { public: MOCK_METHOD(void, OnCname, (uint32_t, absl::string_view), (override)); @@ -161,8 +155,8 @@ struct ReceiverMocks { StrictMock rtp_rtcp_impl; }; -RtpRtcp::Configuration DefaultConfiguration(ReceiverMocks* mocks) { - RtpRtcp::Configuration config; +RtpRtcpInterface::Configuration DefaultConfiguration(ReceiverMocks* mocks) { + RtpRtcpInterface::Configuration config; config.clock = &mocks->clock; config.receiver_only = false; config.rtcp_packet_type_counter_observer = @@ -207,7 +201,8 @@ TEST(RtcpReceiverTest, InjectSrPacket) { RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); - EXPECT_FALSE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr)); + EXPECT_FALSE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr, nullptr)); int64_t now = mocks.clock.TimeInMilliseconds(); rtcp::SenderReport sr; @@ -218,7 +213,8 @@ TEST(RtcpReceiverTest, InjectSrPacket) { OnReceivedRtcpReceiverReport(IsEmpty(), _, now)); receiver.IncomingPacket(sr.Build()); - EXPECT_TRUE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr)); + EXPECT_TRUE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr)); } TEST(RtcpReceiverTest, InjectSrPacketFromUnknownSender) { @@ -230,7 +226,7 @@ TEST(RtcpReceiverTest, InjectSrPacketFromUnknownSender) { rtcp::SenderReport sr; sr.SetSenderSsrc(kUnknownSenderSsrc); - // The parser will handle report blocks in Sender Report from other than his + // The parser will handle report blocks in Sender Report from other than their // expected peer. EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); EXPECT_CALL(mocks.bandwidth_observer, @@ -238,7 +234,8 @@ TEST(RtcpReceiverTest, InjectSrPacketFromUnknownSender) { receiver.IncomingPacket(sr.Build()); // But will not flag that he's gotten sender information. - EXPECT_FALSE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr)); + EXPECT_FALSE(receiver.NTP(nullptr, nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr, nullptr)); } TEST(RtcpReceiverTest, InjectSrPacketCalculatesRTT) { @@ -253,8 +250,7 @@ TEST(RtcpReceiverTest, InjectSrPacketCalculatesRTT) { int64_t rtt_ms = 0; EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr)); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -285,8 +281,7 @@ TEST(RtcpReceiverTest, InjectSrPacketCalculatesNegativeRTTAsOne) { int64_t rtt_ms = 0; EXPECT_EQ(-1, receiver.RTT(kSenderSsrc, &rtt_ms, nullptr, nullptr, nullptr)); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -316,8 +311,7 @@ TEST(RtcpReceiverTest, const uint32_t kDelayNtp = 123000; const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -351,9 +345,7 @@ TEST(RtcpReceiverTest, InjectRrPacket) { OnReceivedRtcpReceiverReport(IsEmpty(), _, now)); receiver.IncomingPacket(rr.Build()); - std::vector report_blocks; - receiver.StatisticsReceived(&report_blocks); - EXPECT_TRUE(report_blocks.empty()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), IsEmpty()); } TEST(RtcpReceiverTest, InjectRrPacketWithReportBlockNotToUsIgnored) { @@ -374,9 +366,7 @@ TEST(RtcpReceiverTest, InjectRrPacketWithReportBlockNotToUsIgnored) { receiver.IncomingPacket(rr.Build()); EXPECT_EQ(0, receiver.LastReceivedReportBlockMs()); - std::vector received_blocks; - receiver.StatisticsReceived(&received_blocks); - EXPECT_TRUE(received_blocks.empty()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), IsEmpty()); } TEST(RtcpReceiverTest, InjectRrPacketWithOneReportBlock) { @@ -398,9 +388,7 @@ TEST(RtcpReceiverTest, InjectRrPacketWithOneReportBlock) { receiver.IncomingPacket(rr.Build()); EXPECT_EQ(now, receiver.LastReceivedReportBlockMs()); - std::vector received_blocks; - receiver.StatisticsReceived(&received_blocks); - EXPECT_EQ(1u, received_blocks.size()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(1)); } TEST(RtcpReceiverTest, InjectSrPacketWithOneReportBlock) { @@ -422,9 +410,7 @@ TEST(RtcpReceiverTest, InjectSrPacketWithOneReportBlock) { receiver.IncomingPacket(sr.Build()); EXPECT_EQ(now, receiver.LastReceivedReportBlockMs()); - std::vector received_blocks; - receiver.StatisticsReceived(&received_blocks); - EXPECT_EQ(1u, received_blocks.size()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(1)); } TEST(RtcpReceiverTest, InjectRrPacketWithTwoReportBlocks) { @@ -458,11 +444,12 @@ TEST(RtcpReceiverTest, InjectRrPacketWithTwoReportBlocks) { receiver.IncomingPacket(rr1.Build()); EXPECT_EQ(now, receiver.LastReceivedReportBlockMs()); - std::vector received_blocks; - receiver.StatisticsReceived(&received_blocks); - EXPECT_THAT(received_blocks, - UnorderedElementsAre(Field(&RTCPReportBlock::fraction_lost, 0), - Field(&RTCPReportBlock::fraction_lost, 10))); + EXPECT_THAT(receiver.GetLatestReportBlockData(), + UnorderedElementsAre( + Property(&ReportBlockData::report_block, + Field(&RTCPReportBlock::fraction_lost, 0)), + Property(&ReportBlockData::report_block, + Field(&RTCPReportBlock::fraction_lost, 10)))); // Insert next receiver report with same ssrc but new values. rtcp::ReportBlock rb3; @@ -491,25 +478,27 @@ TEST(RtcpReceiverTest, InjectRrPacketWithTwoReportBlocks) { OnReceivedRtcpReceiverReport(SizeIs(2), _, now)); receiver.IncomingPacket(rr2.Build()); - received_blocks.clear(); - receiver.StatisticsReceived(&received_blocks); - EXPECT_EQ(2u, received_blocks.size()); EXPECT_THAT( - received_blocks, + receiver.GetLatestReportBlockData(), UnorderedElementsAre( - AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), - Field(&RTCPReportBlock::fraction_lost, kFracLost[0]), - Field(&RTCPReportBlock::packets_lost, kCumLost[0]), - Field(&RTCPReportBlock::extended_highest_sequence_number, - kSequenceNumbers[0])), - AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverExtraSsrc), - Field(&RTCPReportBlock::fraction_lost, kFracLost[1]), - Field(&RTCPReportBlock::packets_lost, kCumLost[1]), - Field(&RTCPReportBlock::extended_highest_sequence_number, - kSequenceNumbers[1])))); + Property( + &ReportBlockData::report_block, + AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), + Field(&RTCPReportBlock::fraction_lost, kFracLost[0]), + Field(&RTCPReportBlock::packets_lost, kCumLost[0]), + Field(&RTCPReportBlock::extended_highest_sequence_number, + kSequenceNumbers[0]))), + Property( + &ReportBlockData::report_block, + AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverExtraSsrc), + Field(&RTCPReportBlock::fraction_lost, kFracLost[1]), + Field(&RTCPReportBlock::packets_lost, kCumLost[1]), + Field(&RTCPReportBlock::extended_highest_sequence_number, + kSequenceNumbers[1]))))); } -TEST(RtcpReceiverTest, InjectRrPacketsFromTwoRemoteSsrcs) { +TEST(RtcpReceiverTest, + InjectRrPacketsFromTwoRemoteSsrcsReturnsLatestReportBlock) { const uint32_t kSenderSsrc2 = 0x20304; const uint16_t kSequenceNumbers[] = {10, 12423}; const int32_t kCumLost[] = {13, 555}; @@ -536,15 +525,16 @@ TEST(RtcpReceiverTest, InjectRrPacketsFromTwoRemoteSsrcs) { EXPECT_EQ(now, receiver.LastReceivedReportBlockMs()); - std::vector received_blocks; - receiver.StatisticsReceived(&received_blocks); - EXPECT_EQ(1u, received_blocks.size()); - EXPECT_EQ(kSenderSsrc, received_blocks[0].sender_ssrc); - EXPECT_EQ(kReceiverMainSsrc, received_blocks[0].source_ssrc); - EXPECT_EQ(kFracLost[0], received_blocks[0].fraction_lost); - EXPECT_EQ(kCumLost[0], received_blocks[0].packets_lost); - EXPECT_EQ(kSequenceNumbers[0], - received_blocks[0].extended_highest_sequence_number); + EXPECT_THAT( + receiver.GetLatestReportBlockData(), + ElementsAre(Property( + &ReportBlockData::report_block, + AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), + Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc), + Field(&RTCPReportBlock::fraction_lost, kFracLost[0]), + Field(&RTCPReportBlock::packets_lost, kCumLost[0]), + Field(&RTCPReportBlock::extended_highest_sequence_number, + kSequenceNumbers[0]))))); rtcp::ReportBlock rb2; rb2.SetMediaSsrc(kReceiverMainSsrc); @@ -560,24 +550,17 @@ TEST(RtcpReceiverTest, InjectRrPacketsFromTwoRemoteSsrcs) { OnReceivedRtcpReceiverReport(SizeIs(1), _, now)); receiver.IncomingPacket(rr2.Build()); - received_blocks.clear(); - receiver.StatisticsReceived(&received_blocks); - ASSERT_EQ(2u, received_blocks.size()); EXPECT_THAT( - received_blocks, + receiver.GetLatestReportBlockData(), UnorderedElementsAre( - AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), - Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc), - Field(&RTCPReportBlock::fraction_lost, kFracLost[0]), - Field(&RTCPReportBlock::packets_lost, kCumLost[0]), - Field(&RTCPReportBlock::extended_highest_sequence_number, - kSequenceNumbers[0])), - AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), - Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc2), - Field(&RTCPReportBlock::fraction_lost, kFracLost[1]), - Field(&RTCPReportBlock::packets_lost, kCumLost[1]), - Field(&RTCPReportBlock::extended_highest_sequence_number, - kSequenceNumbers[1])))); + Property( + &ReportBlockData::report_block, + AllOf(Field(&RTCPReportBlock::source_ssrc, kReceiverMainSsrc), + Field(&RTCPReportBlock::sender_ssrc, kSenderSsrc2), + Field(&RTCPReportBlock::fraction_lost, kFracLost[1]), + Field(&RTCPReportBlock::packets_lost, kCumLost[1]), + Field(&RTCPReportBlock::extended_highest_sequence_number, + kSequenceNumbers[1]))))); } TEST(RtcpReceiverTest, GetRtt) { @@ -636,7 +619,7 @@ TEST(RtcpReceiverTest, InjectApp) { TEST(RtcpReceiverTest, InjectSdesWithOneChunk) { ReceiverMocks mocks; MockCnameCallbackImpl callback; - RtpRtcp::Configuration config = DefaultConfiguration(&mocks); + RtpRtcpInterface::Configuration config = DefaultConfiguration(&mocks); config.rtcp_cname_callback = &callback; RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); @@ -647,33 +630,6 @@ TEST(RtcpReceiverTest, InjectSdesWithOneChunk) { EXPECT_CALL(callback, OnCname(kSenderSsrc, StrEq(kCname))); receiver.IncomingPacket(sdes.Build()); - - char cName[RTCP_CNAME_SIZE]; - EXPECT_EQ(0, receiver.CNAME(kSenderSsrc, cName)); - EXPECT_EQ(0, strncmp(cName, kCname, RTCP_CNAME_SIZE)); -} - -TEST(RtcpReceiverTest, InjectByePacket_RemovesCname) { - ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); - receiver.SetRemoteSSRC(kSenderSsrc); - - const char kCname[] = "alice@host"; - rtcp::Sdes sdes; - sdes.AddCName(kSenderSsrc, kCname); - - receiver.IncomingPacket(sdes.Build()); - - char cName[RTCP_CNAME_SIZE]; - EXPECT_EQ(0, receiver.CNAME(kSenderSsrc, cName)); - - // Verify that BYE removes the CNAME. - rtcp::Bye bye; - bye.SetSenderSsrc(kSenderSsrc); - - receiver.IncomingPacket(bye.Build()); - - EXPECT_EQ(-1, receiver.CNAME(kSenderSsrc, cName)); } TEST(RtcpReceiverTest, InjectByePacket_RemovesReportBlocks) { @@ -694,9 +650,7 @@ TEST(RtcpReceiverTest, InjectByePacket_RemovesReportBlocks) { EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport); receiver.IncomingPacket(rr.Build()); - std::vector received_blocks; - receiver.StatisticsReceived(&received_blocks); - EXPECT_EQ(2u, received_blocks.size()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(2)); // Verify that BYE removes the report blocks. rtcp::Bye bye; @@ -704,18 +658,14 @@ TEST(RtcpReceiverTest, InjectByePacket_RemovesReportBlocks) { receiver.IncomingPacket(bye.Build()); - received_blocks.clear(); - receiver.StatisticsReceived(&received_blocks); - EXPECT_TRUE(received_blocks.empty()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), IsEmpty()); // Inject packet again. EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport); receiver.IncomingPacket(rr.Build()); - received_blocks.clear(); - receiver.StatisticsReceived(&received_blocks); - EXPECT_EQ(2u, received_blocks.size()); + EXPECT_THAT(receiver.GetLatestReportBlockData(), SizeIs(2)); } TEST(RtcpReceiverTest, InjectByePacketRemovesReferenceTimeInfo) { @@ -837,11 +787,11 @@ TEST(RtcpReceiverTest, InjectExtendedReportsReceiverReferenceTimePacket) { TEST(RtcpReceiverTest, ExtendedReportsDlrrPacketNotToUsIgnored) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); - receiver.SetRemoteSSRC(kSenderSsrc); - + auto config = DefaultConfiguration(&mocks); // Allow calculate rtt using dlrr/rrtr, simulating media receiver side. - receiver.SetRtcpXrRrtrStatus(true); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); + receiver.SetRemoteSSRC(kSenderSsrc); rtcp::ExtendedReports xr; xr.SetSenderSsrc(kSenderSsrc); @@ -855,12 +805,13 @@ TEST(RtcpReceiverTest, ExtendedReportsDlrrPacketNotToUsIgnored) { TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithSubBlock) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); const uint32_t kLastRR = 0x12345; const uint32_t kDelay = 0x23456; - receiver.SetRtcpXrRrtrStatus(true); int64_t rtt_ms = 0; EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms)); @@ -870,8 +821,7 @@ TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithSubBlock) { receiver.IncomingPacket(xr.Build()); - uint32_t compact_ntp_now = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t compact_ntp_now = CompactNtp(mocks.clock.CurrentNtpTime()); EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms)); uint32_t rtt_ntp = compact_ntp_now - kDelay - kLastRR; EXPECT_NEAR(CompactNtpRttToMs(rtt_ntp), rtt_ms, 1); @@ -879,12 +829,13 @@ TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithSubBlock) { TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithMultipleSubBlocks) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); const uint32_t kLastRR = 0x12345; const uint32_t kDelay = 0x56789; - receiver.SetRtcpXrRrtrStatus(true); rtcp::ExtendedReports xr; xr.SetSenderSsrc(kSenderSsrc); @@ -894,8 +845,7 @@ TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithMultipleSubBlocks) { receiver.IncomingPacket(xr.Build()); - uint32_t compact_ntp_now = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t compact_ntp_now = CompactNtp(mocks.clock.CurrentNtpTime()); int64_t rtt_ms = 0; EXPECT_TRUE(receiver.GetAndResetXrRrRtt(&rtt_ms)); uint32_t rtt_ntp = compact_ntp_now - kDelay - kLastRR; @@ -904,11 +854,11 @@ TEST(RtcpReceiverTest, InjectExtendedReportsDlrrPacketWithMultipleSubBlocks) { TEST(RtcpReceiverTest, InjectExtendedReportsPacketWithMultipleReportBlocks) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); - receiver.SetRtcpXrRrtrStatus(true); - rtcp::Rrtr rrtr; rtcp::ExtendedReports xr; xr.SetSenderSsrc(kSenderSsrc); @@ -926,11 +876,11 @@ TEST(RtcpReceiverTest, InjectExtendedReportsPacketWithMultipleReportBlocks) { TEST(RtcpReceiverTest, InjectExtendedReportsPacketWithUnknownReportBlock) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); - receiver.SetRtcpXrRrtrStatus(true); - rtcp::Rrtr rrtr; rtcp::ExtendedReports xr; xr.SetSenderSsrc(kSenderSsrc); @@ -954,26 +904,27 @@ TEST(RtcpReceiverTest, InjectExtendedReportsPacketWithUnknownReportBlock) { TEST(RtcpReceiverTest, TestExtendedReportsRrRttInitiallyFalse) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); - receiver.SetRtcpXrRrtrStatus(true); - int64_t rtt_ms; EXPECT_FALSE(receiver.GetAndResetXrRrRtt(&rtt_ms)); } TEST(RtcpReceiverTest, RttCalculatedAfterExtendedReportsDlrr) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); Random rand(0x0123456789abcdef); const int64_t kRttMs = rand.Rand(1, 9 * 3600 * 1000); const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff); const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - receiver.SetRtcpXrRrtrStatus(true); - NtpTime now = TimeMicrosToNtp(mocks.clock.TimeInMicroseconds()); + NtpTime now = mocks.clock.CurrentNtpTime(); uint32_t sent_ntp = CompactNtp(now); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); @@ -990,17 +941,18 @@ TEST(RtcpReceiverTest, RttCalculatedAfterExtendedReportsDlrr) { TEST(RtcpReceiverTest, XrDlrrCalculatesNegativeRttAsOne) { ReceiverMocks mocks; - RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); + auto config = DefaultConfiguration(&mocks); + config.non_sender_rtt_measurement = true; + RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); Random rand(0x0123456789abcdef); const int64_t kRttMs = rand.Rand(-3600 * 1000, -1); const uint32_t kDelayNtp = rand.Rand(0, 0x7fffffff); const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - NtpTime now = TimeMicrosToNtp(mocks.clock.TimeInMicroseconds()); + NtpTime now = mocks.clock.CurrentNtpTime(); uint32_t sent_ntp = CompactNtp(now); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); - receiver.SetRtcpXrRrtrStatus(true); rtcp::ExtendedReports xr; xr.SetSenderSsrc(kSenderSsrc); @@ -1200,14 +1152,14 @@ TEST(RtcpReceiverTest, TmmbrPacketAccepted) { receiver.SetRemoteSSRC(kSenderSsrc); const uint32_t kBitrateBps = 30000; - rtcp::Tmmbr tmmbr; - tmmbr.SetSenderSsrc(kSenderSsrc); - tmmbr.AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, kBitrateBps, 0)); - rtcp::SenderReport sr; - sr.SetSenderSsrc(kSenderSsrc); + auto tmmbr = std::make_unique(); + tmmbr->SetSenderSsrc(kSenderSsrc); + tmmbr->AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, kBitrateBps, 0)); + auto sr = std::make_unique(); + sr->SetSenderSsrc(kSenderSsrc); rtcp::CompoundPacket compound; - compound.Append(&sr); - compound.Append(&tmmbr); + compound.Append(std::move(sr)); + compound.Append(std::move(tmmbr)); EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); EXPECT_CALL(mocks.rtp_rtcp_impl, SetTmmbn(SizeIs(1))); @@ -1228,15 +1180,15 @@ TEST(RtcpReceiverTest, TmmbrPacketNotForUsIgnored) { receiver.SetRemoteSSRC(kSenderSsrc); const uint32_t kBitrateBps = 30000; - rtcp::Tmmbr tmmbr; - tmmbr.SetSenderSsrc(kSenderSsrc); - tmmbr.AddTmmbr(rtcp::TmmbItem(kNotToUsSsrc, kBitrateBps, 0)); + auto tmmbr = std::make_unique(); + tmmbr->SetSenderSsrc(kSenderSsrc); + tmmbr->AddTmmbr(rtcp::TmmbItem(kNotToUsSsrc, kBitrateBps, 0)); - rtcp::SenderReport sr; - sr.SetSenderSsrc(kSenderSsrc); + auto sr = std::make_unique(); + sr->SetSenderSsrc(kSenderSsrc); rtcp::CompoundPacket compound; - compound.Append(&sr); - compound.Append(&tmmbr); + compound.Append(std::move(sr)); + compound.Append(std::move(tmmbr)); EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport); @@ -1251,14 +1203,14 @@ TEST(RtcpReceiverTest, TmmbrPacketZeroRateIgnored) { RTCPReceiver receiver(DefaultConfiguration(&mocks), &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); - rtcp::Tmmbr tmmbr; - tmmbr.SetSenderSsrc(kSenderSsrc); - tmmbr.AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, 0, 0)); - rtcp::SenderReport sr; - sr.SetSenderSsrc(kSenderSsrc); + auto tmmbr = std::make_unique(); + tmmbr->SetSenderSsrc(kSenderSsrc); + tmmbr->AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, 0, 0)); + auto sr = std::make_unique(); + sr->SetSenderSsrc(kSenderSsrc); rtcp::CompoundPacket compound; - compound.Append(&sr); - compound.Append(&tmmbr); + compound.Append(std::move(sr)); + compound.Append(std::move(tmmbr)); EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport); @@ -1276,14 +1228,14 @@ TEST(RtcpReceiverTest, TmmbrThreeConstraintsTimeOut) { // Inject 3 packets "from" kSenderSsrc, kSenderSsrc+1, kSenderSsrc+2. // The times of arrival are starttime + 0, starttime + 5 and starttime + 10. for (uint32_t ssrc = kSenderSsrc; ssrc < kSenderSsrc + 3; ++ssrc) { - rtcp::Tmmbr tmmbr; - tmmbr.SetSenderSsrc(ssrc); - tmmbr.AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, 30000, 0)); - rtcp::SenderReport sr; - sr.SetSenderSsrc(ssrc); + auto tmmbr = std::make_unique(); + tmmbr->SetSenderSsrc(ssrc); + tmmbr->AddTmmbr(rtcp::TmmbItem(kReceiverMainSsrc, 30000, 0)); + auto sr = std::make_unique(); + sr->SetSenderSsrc(ssrc); rtcp::CompoundPacket compound; - compound.Append(&sr); - compound.Append(&tmmbr); + compound.Append(std::move(sr)); + compound.Append(std::move(tmmbr)); EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); EXPECT_CALL(mocks.rtp_rtcp_impl, SetTmmbn); @@ -1295,60 +1247,24 @@ TEST(RtcpReceiverTest, TmmbrThreeConstraintsTimeOut) { mocks.clock.AdvanceTimeMilliseconds(5000); } // It is now starttime + 15. - std::vector candidate_set = receiver.TmmbrReceived(); - ASSERT_EQ(3u, candidate_set.size()); - EXPECT_EQ(30000U, candidate_set[0].bitrate_bps()); + EXPECT_THAT(receiver.TmmbrReceived(), + AllOf(SizeIs(3), + Each(Property(&rtcp::TmmbItem::bitrate_bps, Eq(30'000U))))); // We expect the timeout to be 25 seconds. Advance the clock by 12 // seconds, timing out the first packet. mocks.clock.AdvanceTimeMilliseconds(12000); - candidate_set = receiver.TmmbrReceived(); - ASSERT_EQ(2u, candidate_set.size()); - EXPECT_EQ(kSenderSsrc + 1, candidate_set[0].ssrc()); -} - -TEST(RtcpReceiverTest, Callbacks) { - ReceiverMocks mocks; - MockRtcpCallbackImpl callback; - RtpRtcp::Configuration config = DefaultConfiguration(&mocks); - config.rtcp_statistics_callback = &callback; - RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); - receiver.SetRemoteSSRC(kSenderSsrc); - - const uint8_t kFractionLoss = 3; - const uint32_t kCumulativeLoss = 7; - const uint32_t kJitter = 9; - const uint16_t kSequenceNumber = 1234; - - // First packet, all numbers should just propagate. - rtcp::ReportBlock rb1; - rb1.SetMediaSsrc(kReceiverMainSsrc); - rb1.SetExtHighestSeqNum(kSequenceNumber); - rb1.SetFractionLost(kFractionLoss); - rb1.SetCumulativeLost(kCumulativeLoss); - rb1.SetJitter(kJitter); - - rtcp::ReceiverReport rr1; - rr1.SetSenderSsrc(kSenderSsrc); - rr1.AddReportBlock(rb1); - EXPECT_CALL(callback, - StatisticsUpdated( - AllOf(Field(&RtcpStatistics::fraction_lost, kFractionLoss), - Field(&RtcpStatistics::packets_lost, kCumulativeLoss), - Field(&RtcpStatistics::extended_highest_sequence_number, - kSequenceNumber), - Field(&RtcpStatistics::jitter, kJitter)), - kReceiverMainSsrc)); - EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedRtcpReportBlocks); - EXPECT_CALL(mocks.bandwidth_observer, OnReceivedRtcpReceiverReport); - receiver.IncomingPacket(rr1.Build()); + EXPECT_THAT(receiver.TmmbrReceived(), + UnorderedElementsAre( + Property(&rtcp::TmmbItem::ssrc, Eq(kSenderSsrc + 1)), + Property(&rtcp::TmmbItem::ssrc, Eq(kSenderSsrc + 2)))); } TEST(RtcpReceiverTest, VerifyBlockAndTimestampObtainedFromReportBlockDataObserver) { ReceiverMocks mocks; MockReportBlockDataObserverImpl observer; - RtpRtcp::Configuration config = DefaultConfiguration(&mocks); + RtpRtcpInterface::Configuration config = DefaultConfiguration(&mocks); config.report_block_data_observer = &observer; RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); @@ -1397,7 +1313,7 @@ TEST(RtcpReceiverTest, TEST(RtcpReceiverTest, VerifyRttObtainedFromReportBlockDataObserver) { ReceiverMocks mocks; MockReportBlockDataObserverImpl observer; - RtpRtcp::Configuration config = DefaultConfiguration(&mocks); + RtpRtcpInterface::Configuration config = DefaultConfiguration(&mocks); config.report_block_data_observer = &observer; RTCPReceiver receiver(config, &mocks.rtp_rtcp_impl); receiver.SetRemoteSSRC(kSenderSsrc); @@ -1406,8 +1322,7 @@ TEST(RtcpReceiverTest, VerifyRttObtainedFromReportBlockDataObserver) { const uint32_t kDelayNtp = 123000; const int64_t kDelayMs = CompactNtpRttToMs(kDelayNtp); - uint32_t sent_ntp = - CompactNtp(TimeMicrosToNtp(mocks.clock.TimeInMicroseconds())); + uint32_t sent_ntp = CompactNtp(mocks.clock.CurrentNtpTime()); mocks.clock.AdvanceTimeMilliseconds(kRttMs + kDelayMs); rtcp::SenderReport sr; @@ -1598,19 +1513,19 @@ TEST(RtcpReceiverTest, HandlesInvalidTransportFeedback) { receiver.SetRemoteSSRC(kSenderSsrc); // Send a compound packet with a TransportFeedback followed by something else. - rtcp::TransportFeedback packet; - packet.SetMediaSsrc(kReceiverMainSsrc); - packet.SetSenderSsrc(kSenderSsrc); - packet.SetBase(1, 1000); - packet.AddReceivedPacket(1, 1000); + auto packet = std::make_unique(); + packet->SetMediaSsrc(kReceiverMainSsrc); + packet->SetSenderSsrc(kSenderSsrc); + packet->SetBase(1, 1000); + packet->AddReceivedPacket(1, 1000); static uint32_t kBitrateBps = 50000; - rtcp::Remb remb; - remb.SetSenderSsrc(kSenderSsrc); - remb.SetBitrateBps(kBitrateBps); + auto remb = std::make_unique(); + remb->SetSenderSsrc(kSenderSsrc); + remb->SetBitrateBps(kBitrateBps); rtcp::CompoundPacket compound; - compound.Append(&packet); - compound.Append(&remb); + compound.Append(std::move(packet)); + compound.Append(std::move(remb)); rtc::Buffer built_packet = compound.Build(); // Modify the TransportFeedback packet so that it is invalid. @@ -1639,10 +1554,10 @@ TEST(RtcpReceiverTest, Nack) { nack_set.insert(std::begin(kNackList1), std::end(kNackList1)); nack_set.insert(std::begin(kNackList23), std::end(kNackList23)); - rtcp::Nack nack1; - nack1.SetSenderSsrc(kSenderSsrc); - nack1.SetMediaSsrc(kReceiverMainSsrc); - nack1.SetPacketIds(kNackList1, arraysize(kNackList1)); + auto nack1 = std::make_unique(); + nack1->SetSenderSsrc(kSenderSsrc); + nack1->SetMediaSsrc(kReceiverMainSsrc); + nack1->SetPacketIds(kNackList1, arraysize(kNackList1)); EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedNack(ElementsAreArray(kNackList1))); @@ -1653,21 +1568,21 @@ TEST(RtcpReceiverTest, Nack) { arraysize(kNackList1)), Field(&RtcpPacketTypeCounter::unique_nack_requests, arraysize(kNackList1))))); - receiver.IncomingPacket(nack1.Build()); + receiver.IncomingPacket(nack1->Build()); - rtcp::Nack nack2; - nack2.SetSenderSsrc(kSenderSsrc); - nack2.SetMediaSsrc(kReceiverMainSsrc); - nack2.SetPacketIds(kNackList23, kNackListLength2); + auto nack2 = std::make_unique(); + nack2->SetSenderSsrc(kSenderSsrc); + nack2->SetMediaSsrc(kReceiverMainSsrc); + nack2->SetPacketIds(kNackList23, kNackListLength2); - rtcp::Nack nack3; - nack3.SetSenderSsrc(kSenderSsrc); - nack3.SetMediaSsrc(kReceiverMainSsrc); - nack3.SetPacketIds(kNackList23 + kNackListLength2, kNackListLength3); + auto nack3 = std::make_unique(); + nack3->SetSenderSsrc(kSenderSsrc); + nack3->SetMediaSsrc(kReceiverMainSsrc); + nack3->SetPacketIds(kNackList23 + kNackListLength2, kNackListLength3); rtcp::CompoundPacket two_nacks; - two_nacks.Append(&nack2); - two_nacks.Append(&nack3); + two_nacks.Append(std::move(nack2)); + two_nacks.Append(std::move(nack3)); EXPECT_CALL(mocks.rtp_rtcp_impl, OnReceivedNack(ElementsAreArray(kNackList23))); diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc index c12fb68dc3..8f5e3b104c 100644 --- a/modules/rtp_rtcp/source/rtcp_sender.cc +++ b/modules/rtp_rtcp/source/rtcp_sender.cc @@ -16,7 +16,11 @@ #include #include +#include "absl/types/optional.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/rtp_headers.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "logging/rtc_event_log/events/rtc_event_rtcp_packet_outgoing.h" #include "modules/rtp_rtcp/source/rtcp_packet/app.h" #include "modules/rtp_rtcp/source/rtcp_packet/bye.h" @@ -34,10 +38,10 @@ #include "modules/rtp_rtcp/source/rtcp_packet/tmmbr.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/time_util.h" #include "modules/rtp_rtcp/source/tmmbr_help.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/trace_event.h" @@ -50,39 +54,10 @@ const uint32_t kRtcpAnyExtendedReports = kRtcpXrReceiverReferenceTime | kRtcpXrTargetBitrate; constexpr int32_t kDefaultVideoReportInterval = 1000; constexpr int32_t kDefaultAudioReportInterval = 5000; - -class PacketContainer : public rtcp::CompoundPacket { - public: - PacketContainer(Transport* transport, RtcEventLog* event_log) - : transport_(transport), event_log_(event_log) {} - ~PacketContainer() override { - for (RtcpPacket* packet : appended_packets_) - delete packet; - } - - size_t SendPackets(size_t max_payload_length) { - size_t bytes_sent = 0; - Build(max_payload_length, [&](rtc::ArrayView packet) { - if (transport_->SendRtcp(packet.data(), packet.size())) { - bytes_sent += packet.size(); - if (event_log_) { - event_log_->Log(std::make_unique(packet)); - } - } - }); - return bytes_sent; - } - - private: - Transport* transport_; - RtcEventLog* const event_log_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(PacketContainer); -}; +} // namespace // Helper to put several RTCP packets into lower layer datagram RTCP packet. -// Prefer to use this class instead of PacketContainer. -class PacketSender { +class RTCPSender::PacketSender { public: PacketSender(rtcp::RtcpPacket::PacketReadyCallback callback, size_t max_packet_size) @@ -105,8 +80,6 @@ class PacketSender { } } - bool IsEmpty() const { return index_ == 0; } - private: const rtcp::RtcpPacket::PacketReadyCallback callback_; const size_t max_packet_size_; @@ -114,8 +87,6 @@ class PacketSender { uint8_t buffer_[IP_PACKET_SIZE]; }; -} // namespace - RTCPSender::FeedbackState::FeedbackState() : packets_sent(0), media_bytes_sent(0), @@ -136,19 +107,38 @@ class RTCPSender::RtcpContext { RtcpContext(const FeedbackState& feedback_state, int32_t nack_size, const uint16_t* nack_list, - int64_t now_us) + Timestamp now) : feedback_state_(feedback_state), nack_size_(nack_size), nack_list_(nack_list), - now_us_(now_us) {} + now_(now) {} const FeedbackState& feedback_state_; const int32_t nack_size_; const uint16_t* nack_list_; - const int64_t now_us_; + const Timestamp now_; }; -RTCPSender::RTCPSender(const RtpRtcp::Configuration& config) +RTCPSender::Configuration RTCPSender::Configuration::FromRtpRtcpConfiguration( + const RtpRtcpInterface::Configuration& configuration) { + RTCPSender::Configuration result; + result.audio = configuration.audio; + result.local_media_ssrc = configuration.local_media_ssrc; + result.clock = configuration.clock; + result.outgoing_transport = configuration.outgoing_transport; + result.non_sender_rtt_measurement = configuration.non_sender_rtt_measurement; + result.event_log = configuration.event_log; + if (configuration.rtcp_report_interval_ms) { + result.rtcp_report_interval = + TimeDelta::Millis(configuration.rtcp_report_interval_ms); + } + result.receive_statistics = configuration.receive_statistics; + result.rtcp_packet_type_counter_observer = + configuration.rtcp_packet_type_counter_observer; + return result; +} + +RTCPSender::RTCPSender(Configuration config) : audio_(config.audio), ssrc_(config.local_media_ssrc), clock_(config.clock), @@ -156,15 +146,14 @@ RTCPSender::RTCPSender(const RtpRtcp::Configuration& config) method_(RtcpMode::kOff), event_log_(config.event_log), transport_(config.outgoing_transport), - report_interval_ms_(config.rtcp_report_interval_ms > 0 - ? config.rtcp_report_interval_ms - : (config.audio ? kDefaultAudioReportInterval - : kDefaultVideoReportInterval)), + report_interval_(config.rtcp_report_interval.value_or( + TimeDelta::Millis(config.audio ? kDefaultAudioReportInterval + : kDefaultVideoReportInterval))), + schedule_next_rtcp_send_evaluation_function_( + std::move(config.schedule_next_rtcp_send_evaluation_function)), sending_(false), - next_time_to_send_rtcp_(0), timestamp_offset_(0), last_rtp_timestamp_(0), - last_frame_capture_time_ms_(-1), remote_ssrc_(0), receive_statistics_(config.receive_statistics), @@ -176,12 +165,8 @@ RTCPSender::RTCPSender(const RtpRtcp::Configuration& config) packet_oh_send_(0), max_packet_size_(IP_PACKET_SIZE - 28), // IPv4 + UDP by default. - app_sub_type_(0), - app_name_(0), - app_data_(nullptr), - app_length_(0), - - xr_send_receiver_reference_time_enabled_(false), + xr_send_receiver_reference_time_enabled_( + config.non_sender_rtt_measurement), packet_type_counter_observer_(config.rtcp_packet_type_counter_observer), send_video_bitrate_allocation_(false), last_payload_type_(-1) { @@ -194,7 +179,6 @@ RTCPSender::RTCPSender(const RtpRtcp::Configuration& config) builders_[kRtcpFir] = &RTCPSender::BuildFIR; builders_[kRtcpRemb] = &RTCPSender::BuildREMB; builders_[kRtcpBye] = &RTCPSender::BuildBYE; - builders_[kRtcpApp] = &RTCPSender::BuildAPP; builders_[kRtcpLossNotification] = &RTCPSender::BuildLossNotification; builders_[kRtcpTmmbr] = &RTCPSender::BuildTMMBR; builders_[kRtcpTmmbn] = &RTCPSender::BuildTMMBN; @@ -205,31 +189,32 @@ RTCPSender::RTCPSender(const RtpRtcp::Configuration& config) RTCPSender::~RTCPSender() {} RtcpMode RTCPSender::Status() const { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); return method_; } void RTCPSender::SetRTCPStatus(RtcpMode new_method) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); - if (method_ == RtcpMode::kOff && new_method != RtcpMode::kOff) { + if (new_method == RtcpMode::kOff) { + next_time_to_send_rtcp_ = absl::nullopt; + } else if (method_ == RtcpMode::kOff) { // When switching on, reschedule the next packet - next_time_to_send_rtcp_ = - clock_->TimeInMilliseconds() + (report_interval_ms_ / 2); + SetNextRtcpSendEvaluationDuration(report_interval_ / 2); } method_ = new_method; } bool RTCPSender::Sending() const { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); return sending_; } -int32_t RTCPSender::SetSendingStatus(const FeedbackState& feedback_state, - bool sending) { +void RTCPSender::SetSendingStatus(const FeedbackState& feedback_state, + bool sending) { bool sendRTCPBye = false; { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); if (method_ != RtcpMode::kOff) { if (sending == false && sending_ == true) { @@ -239,9 +224,11 @@ int32_t RTCPSender::SetSendingStatus(const FeedbackState& feedback_state, } sending_ = sending; } - if (sendRTCPBye) - return SendRTCP(feedback_state, kRtcpBye); - return 0; + if (sendRTCPBye) { + if (SendRTCP(feedback_state, kRtcpBye) != 0) { + RTC_LOG(LS_WARNING) << "Failed to send RTCP BYE"; + } + } } int32_t RTCPSender::SendLossNotification(const FeedbackState& feedback_state, @@ -249,90 +236,112 @@ int32_t RTCPSender::SendLossNotification(const FeedbackState& feedback_state, uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + int32_t error_code = -1; + auto callback = [&](rtc::ArrayView packet) { + transport_->SendRtcp(packet.data(), packet.size()); + error_code = 0; + if (event_log_) { + event_log_->Log(std::make_unique(packet)); + } + }; + absl::optional sender; + { + MutexLock lock(&mutex_rtcp_sender_); - loss_notification_state_.last_decoded_seq_num = last_decoded_seq_num; - loss_notification_state_.last_received_seq_num = last_received_seq_num; - loss_notification_state_.decodability_flag = decodability_flag; + if (!loss_notification_.Set(last_decoded_seq_num, last_received_seq_num, + decodability_flag)) { + return -1; + } - SetFlag(kRtcpLossNotification, /*is_volatile=*/true); + SetFlag(kRtcpLossNotification, /*is_volatile=*/true); - if (buffering_allowed) { - // The loss notification will be batched with additional feedback messages. - return 0; + if (buffering_allowed) { + // The loss notification will be batched with additional feedback + // messages. + return 0; + } + + sender.emplace(callback, max_packet_size_); + auto result = ComputeCompoundRTCPPacket( + feedback_state, RTCPPacketType::kRtcpLossNotification, 0, nullptr, + *sender); + if (result) { + return *result; + } } + sender->Send(); - return SendCompoundRTCPLocked( - feedback_state, {RTCPPacketType::kRtcpLossNotification}, 0, nullptr); + return error_code; } void RTCPSender::SetRemb(int64_t bitrate_bps, std::vector ssrcs) { RTC_CHECK_GE(bitrate_bps, 0); - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); remb_bitrate_ = bitrate_bps; remb_ssrcs_ = std::move(ssrcs); SetFlag(kRtcpRemb, /*is_volatile=*/false); // Send a REMB immediately if we have a new REMB. The frequency of REMBs is // throttled by the caller. - next_time_to_send_rtcp_ = clock_->TimeInMilliseconds(); + SetNextRtcpSendEvaluationDuration(TimeDelta::Zero()); } void RTCPSender::UnsetRemb() { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); // Stop sending REMB each report until it is reenabled and REMB data set. ConsumeFlag(kRtcpRemb, /*forced=*/true); } bool RTCPSender::TMMBR() const { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); return IsFlagPresent(RTCPPacketType::kRtcpTmmbr); } -void RTCPSender::SetTMMBRStatus(bool enable) { - rtc::CritScope lock(&critical_section_rtcp_sender_); - if (enable) { - SetFlag(RTCPPacketType::kRtcpTmmbr, false); - } else { - ConsumeFlag(RTCPPacketType::kRtcpTmmbr, true); - } -} - void RTCPSender::SetMaxRtpPacketSize(size_t max_packet_size) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); max_packet_size_ = max_packet_size; } void RTCPSender::SetTimestampOffset(uint32_t timestamp_offset) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); timestamp_offset_ = timestamp_offset; } void RTCPSender::SetLastRtpTime(uint32_t rtp_timestamp, - int64_t capture_time_ms, - int8_t payload_type) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + absl::optional capture_time, + absl::optional payload_type) { + MutexLock lock(&mutex_rtcp_sender_); // For compatibility with clients who don't set payload type correctly on all // calls. - if (payload_type != -1) { - last_payload_type_ = payload_type; + if (payload_type.has_value()) { + last_payload_type_ = *payload_type; } last_rtp_timestamp_ = rtp_timestamp; - if (capture_time_ms <= 0) { + if (!capture_time.has_value()) { // We don't currently get a capture time from VoiceEngine. - last_frame_capture_time_ms_ = clock_->TimeInMilliseconds(); + last_frame_capture_time_ = clock_->CurrentTime(); } else { - last_frame_capture_time_ms_ = capture_time_ms; + last_frame_capture_time_ = *capture_time; } } void RTCPSender::SetRtpClockRate(int8_t payload_type, int rtp_clock_rate_hz) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); rtp_clock_rates_khz_[payload_type] = rtp_clock_rate_hz / 1000; } +uint32_t RTCPSender::SSRC() const { + MutexLock lock(&mutex_rtcp_sender_); + return ssrc_; +} + +void RTCPSender::SetSsrc(uint32_t ssrc) { + MutexLock lock(&mutex_rtcp_sender_); + ssrc_ = ssrc; +} + void RTCPSender::SetRemoteSSRC(uint32_t ssrc) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); remote_ssrc_ = ssrc; } @@ -341,36 +350,11 @@ int32_t RTCPSender::SetCNAME(const char* c_name) { return -1; RTC_DCHECK_LT(strlen(c_name), RTCP_CNAME_SIZE); - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); cname_ = c_name; return 0; } -int32_t RTCPSender::AddMixedCNAME(uint32_t SSRC, const char* c_name) { - RTC_DCHECK(c_name); - RTC_DCHECK_LT(strlen(c_name), RTCP_CNAME_SIZE); - rtc::CritScope lock(&critical_section_rtcp_sender_); - // One spot is reserved for ssrc_/cname_. - // TODO(danilchap): Add support for more than 30 contributes by sending - // several sdes packets. - if (csrc_cnames_.size() >= rtcp::Sdes::kMaxNumberOfChunks - 1) - return -1; - - csrc_cnames_[SSRC] = c_name; - return 0; -} - -int32_t RTCPSender::RemoveMixedCNAME(uint32_t SSRC) { - rtc::CritScope lock(&critical_section_rtcp_sender_); - auto it = csrc_cnames_.find(SSRC); - - if (it == csrc_cnames_.end()) - return -1; - - csrc_cnames_.erase(it); - return 0; -} - bool RTCPSender::TimeToSendRTCPReport(bool sendKeyframeBeforeRTP) const { /* For audio we use a configurable interval (default: 5 seconds) @@ -430,32 +414,27 @@ bool RTCPSender::TimeToSendRTCPReport(bool sendKeyframeBeforeRTP) const { a value of the RTCP bandwidth below the intended average */ - int64_t now = clock_->TimeInMilliseconds(); - - rtc::CritScope lock(&critical_section_rtcp_sender_); + Timestamp now = clock_->CurrentTime(); + MutexLock lock(&mutex_rtcp_sender_); + RTC_DCHECK( + (method_ == RtcpMode::kOff && !next_time_to_send_rtcp_.has_value()) || + (method_ != RtcpMode::kOff && next_time_to_send_rtcp_.has_value())); if (method_ == RtcpMode::kOff) return false; if (!audio_ && sendKeyframeBeforeRTP) { // for video key-frames we want to send the RTCP before the large key-frame // if we have a 100 ms margin - now += RTCP_SEND_BEFORE_KEY_FRAME_MS; + now += RTCP_SEND_BEFORE_KEY_FRAME; } - if (now >= next_time_to_send_rtcp_) { - return true; - } else if (now < 0x0000ffff && - next_time_to_send_rtcp_ > 0xffff0000) { // 65 sec margin - // wrap - return true; - } - return false; + return now >= *next_time_to_send_rtcp_; } -std::unique_ptr RTCPSender::BuildSR(const RtcpContext& ctx) { +void RTCPSender::BuildSR(const RtcpContext& ctx, PacketSender& sender) { // Timestamp shouldn't be estimated before first media frame. - RTC_DCHECK_GE(last_frame_capture_time_ms_, 0); + RTC_DCHECK(last_frame_capture_time_.has_value()); // The timestamp of this RTCP packet should be estimated as the timestamp of // the frame being captured at this moment. We are calculating that // timestamp as the last frame's timestamp + the time since the last frame @@ -470,82 +449,71 @@ std::unique_ptr RTCPSender::BuildSR(const RtcpContext& ctx) { // when converted to milliseconds, uint32_t rtp_timestamp = timestamp_offset_ + last_rtp_timestamp_ + - ((ctx.now_us_ + 500) / 1000 - last_frame_capture_time_ms_) * rtp_rate; + ((ctx.now_.us() + 500) / 1000 - last_frame_capture_time_->ms()) * + rtp_rate; - rtcp::SenderReport* report = new rtcp::SenderReport(); - report->SetSenderSsrc(ssrc_); - report->SetNtp(TimeMicrosToNtp(ctx.now_us_)); - report->SetRtpTimestamp(rtp_timestamp); - report->SetPacketCount(ctx.feedback_state_.packets_sent); - report->SetOctetCount(ctx.feedback_state_.media_bytes_sent); - report->SetReportBlocks(CreateReportBlocks(ctx.feedback_state_)); - - return std::unique_ptr(report); + rtcp::SenderReport report; + report.SetSenderSsrc(ssrc_); + report.SetNtp(clock_->ConvertTimestampToNtpTime(ctx.now_)); + report.SetRtpTimestamp(rtp_timestamp); + report.SetPacketCount(ctx.feedback_state_.packets_sent); + report.SetOctetCount(ctx.feedback_state_.media_bytes_sent); + report.SetReportBlocks(CreateReportBlocks(ctx.feedback_state_)); + sender.AppendPacket(report); } -std::unique_ptr RTCPSender::BuildSDES( - const RtcpContext& ctx) { +void RTCPSender::BuildSDES(const RtcpContext& ctx, PacketSender& sender) { size_t length_cname = cname_.length(); RTC_CHECK_LT(length_cname, RTCP_CNAME_SIZE); - rtcp::Sdes* sdes = new rtcp::Sdes(); - sdes->AddCName(ssrc_, cname_); - - for (const auto& it : csrc_cnames_) - RTC_CHECK(sdes->AddCName(it.first, it.second)); - - return std::unique_ptr(sdes); + rtcp::Sdes sdes; + sdes.AddCName(ssrc_, cname_); + sender.AppendPacket(sdes); } -std::unique_ptr RTCPSender::BuildRR(const RtcpContext& ctx) { - rtcp::ReceiverReport* report = new rtcp::ReceiverReport(); - report->SetSenderSsrc(ssrc_); - report->SetReportBlocks(CreateReportBlocks(ctx.feedback_state_)); - - return std::unique_ptr(report); +void RTCPSender::BuildRR(const RtcpContext& ctx, PacketSender& sender) { + rtcp::ReceiverReport report; + report.SetSenderSsrc(ssrc_); + report.SetReportBlocks(CreateReportBlocks(ctx.feedback_state_)); + sender.AppendPacket(report); } -std::unique_ptr RTCPSender::BuildPLI(const RtcpContext& ctx) { - rtcp::Pli* pli = new rtcp::Pli(); - pli->SetSenderSsrc(ssrc_); - pli->SetMediaSsrc(remote_ssrc_); +void RTCPSender::BuildPLI(const RtcpContext& ctx, PacketSender& sender) { + rtcp::Pli pli; + pli.SetSenderSsrc(ssrc_); + pli.SetMediaSsrc(remote_ssrc_); ++packet_type_counter_.pli_packets; - - return std::unique_ptr(pli); + sender.AppendPacket(pli); } -std::unique_ptr RTCPSender::BuildFIR(const RtcpContext& ctx) { +void RTCPSender::BuildFIR(const RtcpContext& ctx, PacketSender& sender) { ++sequence_number_fir_; - rtcp::Fir* fir = new rtcp::Fir(); - fir->SetSenderSsrc(ssrc_); - fir->AddRequestTo(remote_ssrc_, sequence_number_fir_); + rtcp::Fir fir; + fir.SetSenderSsrc(ssrc_); + fir.AddRequestTo(remote_ssrc_, sequence_number_fir_); ++packet_type_counter_.fir_packets; - - return std::unique_ptr(fir); + sender.AppendPacket(fir); } -std::unique_ptr RTCPSender::BuildREMB( - const RtcpContext& ctx) { - rtcp::Remb* remb = new rtcp::Remb(); - remb->SetSenderSsrc(ssrc_); - remb->SetBitrateBps(remb_bitrate_); - remb->SetSsrcs(remb_ssrcs_); - - return std::unique_ptr(remb); +void RTCPSender::BuildREMB(const RtcpContext& ctx, PacketSender& sender) { + rtcp::Remb remb; + remb.SetSenderSsrc(ssrc_); + remb.SetBitrateBps(remb_bitrate_); + remb.SetSsrcs(remb_ssrcs_); + sender.AppendPacket(remb); } void RTCPSender::SetTargetBitrate(unsigned int target_bitrate) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); tmmbr_send_bps_ = target_bitrate; } -std::unique_ptr RTCPSender::BuildTMMBR( - const RtcpContext& ctx) { +void RTCPSender::BuildTMMBR(const RtcpContext& ctx, PacketSender& sender) { if (ctx.feedback_state_.receiver == nullptr) - return nullptr; + return; // Before sending the TMMBR check the received TMMBN, only an owner is // allowed to raise the bitrate: // * If the sender is an owner of the TMMBN -> send TMMBR @@ -554,7 +522,7 @@ std::unique_ptr RTCPSender::BuildTMMBR( // get current bounding set from RTCP receiver bool tmmbr_owner = false; - // holding critical_section_rtcp_sender_ while calling RTCPreceiver which + // holding mutex_rtcp_sender_ while calling RTCPreceiver which // will accuire criticalSectionRTCPReceiver_ is a potental deadlock but // since RTCPreceiver is not doing the reverse we should be fine std::vector candidates = @@ -565,7 +533,7 @@ std::unique_ptr RTCPSender::BuildTMMBR( if (candidate.bitrate_bps() == tmmbr_send_bps_ && candidate.packet_overhead() == packet_oh_send_) { // Do not send the same tuple. - return nullptr; + return; } } if (!tmmbr_owner) { @@ -579,65 +547,53 @@ std::unique_ptr RTCPSender::BuildTMMBR( tmmbr_owner = TMMBRHelp::IsOwner(bounding, ssrc_); if (!tmmbr_owner) { // Did not enter bounding set, no meaning to send this request. - return nullptr; + return; } } } if (!tmmbr_send_bps_) - return nullptr; + return; - rtcp::Tmmbr* tmmbr = new rtcp::Tmmbr(); - tmmbr->SetSenderSsrc(ssrc_); + rtcp::Tmmbr tmmbr; + tmmbr.SetSenderSsrc(ssrc_); rtcp::TmmbItem request; request.set_ssrc(remote_ssrc_); request.set_bitrate_bps(tmmbr_send_bps_); request.set_packet_overhead(packet_oh_send_); - tmmbr->AddTmmbr(request); - - return std::unique_ptr(tmmbr); + tmmbr.AddTmmbr(request); + sender.AppendPacket(tmmbr); } -std::unique_ptr RTCPSender::BuildTMMBN( - const RtcpContext& ctx) { - rtcp::Tmmbn* tmmbn = new rtcp::Tmmbn(); - tmmbn->SetSenderSsrc(ssrc_); +void RTCPSender::BuildTMMBN(const RtcpContext& ctx, PacketSender& sender) { + rtcp::Tmmbn tmmbn; + tmmbn.SetSenderSsrc(ssrc_); for (const rtcp::TmmbItem& tmmbr : tmmbn_to_send_) { if (tmmbr.bitrate_bps() > 0) { - tmmbn->AddTmmbr(tmmbr); + tmmbn.AddTmmbr(tmmbr); } } - - return std::unique_ptr(tmmbn); + sender.AppendPacket(tmmbn); } -std::unique_ptr RTCPSender::BuildAPP(const RtcpContext& ctx) { - rtcp::App* app = new rtcp::App(); - app->SetSenderSsrc(ssrc_); - app->SetSubType(app_sub_type_); - app->SetName(app_name_); - app->SetData(app_data_.get(), app_length_); - - return std::unique_ptr(app); +void RTCPSender::BuildAPP(const RtcpContext& ctx, PacketSender& sender) { + rtcp::App app; + app.SetSenderSsrc(ssrc_); + sender.AppendPacket(app); } -std::unique_ptr RTCPSender::BuildLossNotification( - const RtcpContext& ctx) { - auto loss_notification = std::make_unique( - loss_notification_state_.last_decoded_seq_num, - loss_notification_state_.last_received_seq_num, - loss_notification_state_.decodability_flag); - loss_notification->SetSenderSsrc(ssrc_); - loss_notification->SetMediaSsrc(remote_ssrc_); - return std::move(loss_notification); +void RTCPSender::BuildLossNotification(const RtcpContext& ctx, + PacketSender& sender) { + loss_notification_.SetSenderSsrc(ssrc_); + loss_notification_.SetMediaSsrc(remote_ssrc_); + sender.AppendPacket(loss_notification_); } -std::unique_ptr RTCPSender::BuildNACK( - const RtcpContext& ctx) { - rtcp::Nack* nack = new rtcp::Nack(); - nack->SetSenderSsrc(ssrc_); - nack->SetMediaSsrc(remote_ssrc_); - nack->SetPacketIds(ctx.nack_list_, ctx.nack_size_); +void RTCPSender::BuildNACK(const RtcpContext& ctx, PacketSender& sender) { + rtcp::Nack nack; + nack.SetSenderSsrc(ssrc_); + nack.SetMediaSsrc(remote_ssrc_); + nack.SetPacketIds(ctx.nack_list_, ctx.nack_size_); // Report stats. for (int idx = 0; idx < ctx.nack_size_; ++idx) { @@ -647,31 +603,29 @@ std::unique_ptr RTCPSender::BuildNACK( packet_type_counter_.unique_nack_requests = nack_stats_.unique_requests(); ++packet_type_counter_.nack_packets; - - return std::unique_ptr(nack); + sender.AppendPacket(nack); } -std::unique_ptr RTCPSender::BuildBYE(const RtcpContext& ctx) { - rtcp::Bye* bye = new rtcp::Bye(); - bye->SetSenderSsrc(ssrc_); - bye->SetCsrcs(csrcs_); - - return std::unique_ptr(bye); +void RTCPSender::BuildBYE(const RtcpContext& ctx, PacketSender& sender) { + rtcp::Bye bye; + bye.SetSenderSsrc(ssrc_); + bye.SetCsrcs(csrcs_); + sender.AppendPacket(bye); } -std::unique_ptr RTCPSender::BuildExtendedReports( - const RtcpContext& ctx) { - std::unique_ptr xr(new rtcp::ExtendedReports()); - xr->SetSenderSsrc(ssrc_); +void RTCPSender::BuildExtendedReports(const RtcpContext& ctx, + PacketSender& sender) { + rtcp::ExtendedReports xr; + xr.SetSenderSsrc(ssrc_); if (!sending_ && xr_send_receiver_reference_time_enabled_) { rtcp::Rrtr rrtr; - rrtr.SetNtp(TimeMicrosToNtp(ctx.now_us_)); - xr->SetRrtr(rrtr); + rrtr.SetNtp(clock_->ConvertTimestampToNtpTime(ctx.now_)); + xr.SetRrtr(rrtr); } for (const rtcp::ReceiveTimeInfo& rti : ctx.feedback_state_.last_xr_rtis) { - xr->AddDlrrItem(rti); + xr.AddDlrrItem(rti); } if (send_video_bitrate_allocation_) { @@ -686,75 +640,56 @@ std::unique_ptr RTCPSender::BuildExtendedReports( } } - xr->SetTargetBitrate(target_bitrate); + xr.SetTargetBitrate(target_bitrate); send_video_bitrate_allocation_ = false; } - - return std::move(xr); + sender.AppendPacket(xr); } int32_t RTCPSender::SendRTCP(const FeedbackState& feedback_state, - RTCPPacketType packetType, + RTCPPacketType packet_type, int32_t nack_size, const uint16_t* nack_list) { - return SendCompoundRTCP( - feedback_state, std::set(&packetType, &packetType + 1), - nack_size, nack_list); -} - -int32_t RTCPSender::SendCompoundRTCP( - const FeedbackState& feedback_state, - const std::set& packet_types, - int32_t nack_size, - const uint16_t* nack_list) { - PacketContainer container(transport_, event_log_); - size_t max_packet_size; - + int32_t error_code = -1; + auto callback = [&](rtc::ArrayView packet) { + if (transport_->SendRtcp(packet.data(), packet.size())) { + error_code = 0; + if (event_log_) { + event_log_->Log(std::make_unique(packet)); + } + } + }; + absl::optional sender; { - rtc::CritScope lock(&critical_section_rtcp_sender_); - auto result = ComputeCompoundRTCPPacket(feedback_state, packet_types, - nack_size, nack_list, &container); + MutexLock lock(&mutex_rtcp_sender_); + sender.emplace(callback, max_packet_size_); + auto result = ComputeCompoundRTCPPacket(feedback_state, packet_type, + nack_size, nack_list, *sender); if (result) { return *result; } - max_packet_size = max_packet_size_; } + sender->Send(); - size_t bytes_sent = container.SendPackets(max_packet_size); - return bytes_sent == 0 ? -1 : 0; -} - -int32_t RTCPSender::SendCompoundRTCPLocked( - const FeedbackState& feedback_state, - const std::set& packet_types, - int32_t nack_size, - const uint16_t* nack_list) { - PacketContainer container(transport_, event_log_); - auto result = ComputeCompoundRTCPPacket(feedback_state, packet_types, - nack_size, nack_list, &container); - if (result) { - return *result; - } - size_t bytes_sent = container.SendPackets(max_packet_size_); - return bytes_sent == 0 ? -1 : 0; + return error_code; } absl::optional RTCPSender::ComputeCompoundRTCPPacket( const FeedbackState& feedback_state, - const std::set& packet_types, + RTCPPacketType packet_type, int32_t nack_size, const uint16_t* nack_list, - rtcp::CompoundPacket* out_packet) { + PacketSender& sender) { if (method_ == RtcpMode::kOff) { RTC_LOG(LS_WARNING) << "Can't send rtcp if it is disabled."; return -1; } - // Add all flags as volatile. Non volatile entries will not be overwritten. - // All new volatile flags added will be consumed by the end of this call. - SetFlags(packet_types, true); + // Add the flag as volatile. Non volatile entries will not be overwritten. + // The new volatile flag will be consumed by the end of this call. + SetFlag(packet_type, true); // Prevent sending streams to send SR before any media has been sent. - const bool can_calculate_rtp_timestamp = (last_frame_capture_time_ms_ >= 0); + const bool can_calculate_rtp_timestamp = last_frame_capture_time_.has_value(); if (!can_calculate_rtp_timestamp) { bool consumed_sr_flag = ConsumeFlag(kRtcpSr); bool consumed_report_flag = sending_ && ConsumeFlag(kRtcpReport); @@ -774,39 +709,41 @@ absl::optional RTCPSender::ComputeCompoundRTCPPacket( // We need to send our NTP even if we haven't received any reports. RtcpContext context(feedback_state, nack_size, nack_list, - clock_->TimeInMicroseconds()); + clock_->CurrentTime()); PrepareReport(feedback_state); - std::unique_ptr packet_bye; + bool create_bye = false; auto it = report_flags_.begin(); while (it != report_flags_.end()) { - auto builder_it = builders_.find(it->type); - RTC_DCHECK(builder_it != builders_.end()) - << "Could not find builder for packet type " << it->type; + uint32_t rtcp_packet_type = it->type; + if (it->is_volatile) { report_flags_.erase(it++); } else { ++it; } - BuilderFunc func = builder_it->second; - std::unique_ptr packet = (this->*func)(context); - if (packet == nullptr) - return -1; // If there is a BYE, don't append now - save it and append it // at the end later. - if (builder_it->first == kRtcpBye) { - packet_bye = std::move(packet); + if (rtcp_packet_type == kRtcpBye) { + create_bye = true; + continue; + } + auto builder_it = builders_.find(rtcp_packet_type); + if (builder_it == builders_.end()) { + RTC_NOTREACHED() << "Could not find builder for packet type " + << rtcp_packet_type; } else { - out_packet->Append(packet.release()); + BuilderFunc func = builder_it->second; + (this->*func)(context, sender); } } // Append the BYE now at the end - if (packet_bye) { - out_packet->Append(packet_bye.release()); + if (create_bye) { + BuildBYE(context, sender); } if (packet_type_counter_observer_ != nullptr) { @@ -843,24 +780,25 @@ void RTCPSender::PrepareReport(const FeedbackState& feedback_state) { } // generate next time to send an RTCP report - int min_interval_ms = report_interval_ms_; + TimeDelta min_interval = report_interval_; if (!audio_ && sending_) { // Calculate bandwidth for video; 360 / send bandwidth in kbit/s. int send_bitrate_kbit = feedback_state.send_bitrate / 1000; if (send_bitrate_kbit != 0) { - min_interval_ms = 360000 / send_bitrate_kbit; - min_interval_ms = std::min(min_interval_ms, report_interval_ms_); + min_interval = std::min(TimeDelta::Millis(360000 / send_bitrate_kbit), + report_interval_); } } // The interval between RTCP packets is varied randomly over the // range [1/2,3/2] times the calculated interval. - int time_to_next = - random_.Rand(min_interval_ms * 1 / 2, min_interval_ms * 3 / 2); + int min_interval_int = rtc::dchecked_cast(min_interval.ms()); + TimeDelta time_to_next = TimeDelta::Millis( + random_.Rand(min_interval_int * 1 / 2, min_interval_int * 3 / 2)); - RTC_DCHECK_GT(time_to_next, 0); - next_time_to_send_rtcp_ = clock_->TimeInMilliseconds() + time_to_next; + RTC_DCHECK(!time_to_next.IsZero()); + SetNextRtcpSendEvaluationDuration(time_to_next); // RtcpSender expected to be used for sending either just sender reports // or just receiver reports. @@ -882,7 +820,7 @@ std::vector RTCPSender::CreateReportBlocks( if (!result.empty() && ((feedback_state.last_rr_ntp_secs != 0) || (feedback_state.last_rr_ntp_frac != 0))) { // Get our NTP as late as possible to avoid a race. - uint32_t now = CompactNtp(TimeMicrosToNtp(clock_->TimeInMicroseconds())); + uint32_t now = CompactNtp(clock_->CurrentNtpTime()); uint32_t receive_time = feedback_state.last_rr_ntp_secs & 0x0000FFFF; receive_time <<= 16; @@ -902,41 +840,12 @@ std::vector RTCPSender::CreateReportBlocks( void RTCPSender::SetCsrcs(const std::vector& csrcs) { RTC_DCHECK_LE(csrcs.size(), kRtpCsrcSize); - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); csrcs_ = csrcs; } -int32_t RTCPSender::SetApplicationSpecificData(uint8_t subType, - uint32_t name, - const uint8_t* data, - uint16_t length) { - if (length % 4 != 0) { - RTC_LOG(LS_ERROR) << "Failed to SetApplicationSpecificData."; - return -1; - } - rtc::CritScope lock(&critical_section_rtcp_sender_); - - SetFlag(kRtcpApp, true); - app_sub_type_ = subType; - app_name_ = name; - app_data_.reset(new uint8_t[length]); - app_length_ = length; - memcpy(app_data_.get(), data, length); - return 0; -} - -void RTCPSender::SendRtcpXrReceiverReferenceTime(bool enable) { - rtc::CritScope lock(&critical_section_rtcp_sender_); - xr_send_receiver_reference_time_enabled_ = enable; -} - -bool RTCPSender::RtcpXrReceiverReferenceTime() const { - rtc::CritScope lock(&critical_section_rtcp_sender_); - return xr_send_receiver_reference_time_enabled_; -} - void RTCPSender::SetTmmbn(std::vector bounding_set) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); tmmbn_to_send_ = std::move(bounding_set); SetFlag(kRtcpTmmbn, true); } @@ -949,12 +858,6 @@ void RTCPSender::SetFlag(uint32_t type, bool is_volatile) { } } -void RTCPSender::SetFlags(const std::set& types, - bool is_volatile) { - for (RTCPPacketType type : types) - SetFlag(type, is_volatile); -} - bool RTCPSender::IsFlagPresent(uint32_t type) const { return report_flags_.find(ReportFlag(type, false)) != report_flags_.end(); } @@ -978,7 +881,7 @@ bool RTCPSender::AllVolatileFlagsConsumed() const { void RTCPSender::SetVideoBitrateAllocation( const VideoBitrateAllocation& bitrate) { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); // Check if this allocation is first ever, or has a different set of // spatial/temporal layers signaled and enabled, if so trigger an rtcp report // as soon as possible. @@ -989,7 +892,7 @@ void RTCPSender::SetVideoBitrateAllocation( RTC_LOG(LS_INFO) << "Emitting TargetBitrate XR for SSRC " << ssrc_ << " with new layers enabled/disabled: " << video_bitrate_allocation_.ToString(); - next_time_to_send_rtcp_ = clock_->TimeInMilliseconds(); + SetNextRtcpSendEvaluationDuration(TimeDelta::Zero()); } else { video_bitrate_allocation_ = bitrate; } @@ -1026,7 +929,7 @@ void RTCPSender::SendCombinedRtcpPacket( size_t max_packet_size; uint32_t ssrc; { - rtc::CritScope lock(&critical_section_rtcp_sender_); + MutexLock lock(&mutex_rtcp_sender_); if (method_ == RtcpMode::kOff) { RTC_LOG(LS_WARNING) << "Can't send rtcp if it is disabled."; return; @@ -1050,4 +953,12 @@ void RTCPSender::SendCombinedRtcpPacket( sender.Send(); } +void RTCPSender::SetNextRtcpSendEvaluationDuration(TimeDelta duration) { + next_time_to_send_rtcp_ = clock_->CurrentTime() + duration; + // TODO(bugs.webrtc.org/11581): make unconditional once downstream consumers + // are using the callback method. + if (schedule_next_rtcp_send_evaluation_function_) + schedule_next_rtcp_send_evaluation_function_(duration); +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h index 61081d4c79..2d1c7da0fc 100644 --- a/modules/rtp_rtcp/source/rtcp_sender.h +++ b/modules/rtp_rtcp/source/rtcp_sender.h @@ -19,21 +19,22 @@ #include "absl/types/optional.h" #include "api/call/transport.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/video_bitrate_allocation.h" -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/rtp_rtcp/include/receive_statistics.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_nack_stats.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" +#include "modules/rtp_rtcp/source/rtcp_packet/loss_notification.h" #include "modules/rtp_rtcp/source/rtcp_packet/report_block.h" #include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/random.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -43,6 +44,43 @@ class RtcEventLog; class RTCPSender final { public: + struct Configuration { + // TODO(bugs.webrtc.org/11581): Remove this temporary conversion utility + // once rtc_rtcp_impl.cc/h are gone. + static Configuration FromRtpRtcpConfiguration( + const RtpRtcpInterface::Configuration& config); + + // True for a audio version of the RTP/RTCP module object false will create + // a video version. + bool audio = false; + // SSRCs for media and retransmission, respectively. + // FlexFec SSRC is fetched from |flexfec_sender|. + uint32_t local_media_ssrc = 0; + // The clock to use to read time. If nullptr then system clock will be used. + Clock* clock = nullptr; + // Transport object that will be called when packets are ready to be sent + // out on the network. + Transport* outgoing_transport = nullptr; + // Estimate RTT as non-sender as described in + // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5 + bool non_sender_rtt_measurement = false; + // Optional callback which, if specified, is used by RTCPSender to schedule + // the next time to evaluate if RTCP should be sent by means of + // TimeToSendRTCPReport/SendRTCP. + // The RTCPSender client still needs to call TimeToSendRTCPReport/SendRTCP + // to actually get RTCP sent. + // + // Note: It's recommended to use the callback to ensure program design that + // doesn't use polling. + // TODO(bugs.webrtc.org/11581): Make mandatory once downstream consumers + // have migrated to the callback solution. + std::function schedule_next_rtcp_send_evaluation_function; + + RtcEventLog* event_log = nullptr; + absl::optional rtcp_report_interval; + ReceiveStatisticsProvider* receive_statistics = nullptr; + RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer = nullptr; + }; struct FeedbackState { FeedbackState(); FeedbackState(const FeedbackState&); @@ -64,260 +102,212 @@ class RTCPSender final { RTCPReceiver* receiver; }; - explicit RTCPSender(const RtpRtcp::Configuration& config); + explicit RTCPSender(Configuration config); + + RTCPSender() = delete; + RTCPSender(const RTCPSender&) = delete; + RTCPSender& operator=(const RTCPSender&) = delete; + virtual ~RTCPSender(); - RtcpMode Status() const RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - void SetRTCPStatus(RtcpMode method) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RtcpMode Status() const RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); + void SetRTCPStatus(RtcpMode method) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - bool Sending() const RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - int32_t SetSendingStatus(const FeedbackState& feedback_state, - bool enabled) - RTC_LOCKS_EXCLUDED( - critical_section_rtcp_sender_); // combine the functions + bool Sending() const RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); + void SetSendingStatus(const FeedbackState& feedback_state, + bool enabled) + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); // combine the functions - int32_t SetNackStatus(bool enable) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + int32_t SetNackStatus(bool enable) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetTimestampOffset(uint32_t timestamp_offset) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - // TODO(bugs.webrtc.org/6458): Remove default parameter value when all the - // depending projects are updated to correctly set payload type. void SetLastRtpTime(uint32_t rtp_timestamp, - int64_t capture_time_ms, - int8_t payload_type = -1) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + absl::optional capture_time, + absl::optional payload_type) + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetRtpClockRate(int8_t payload_type, int rtp_clock_rate_hz) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - uint32_t SSRC() const { return ssrc_; } + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - void SetRemoteSSRC(uint32_t ssrc) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + uint32_t SSRC() const; + void SetSsrc(uint32_t ssrc); - int32_t SetCNAME(const char* cName) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + void SetRemoteSSRC(uint32_t ssrc) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - int32_t AddMixedCNAME(uint32_t SSRC, const char* c_name) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - int32_t RemoveMixedCNAME(uint32_t SSRC) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + int32_t SetCNAME(const char* cName) RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); bool TimeToSendRTCPReport(bool sendKeyframeBeforeRTP = false) const - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); int32_t SendRTCP(const FeedbackState& feedback_state, RTCPPacketType packetType, int32_t nackSize = 0, const uint16_t* nackList = 0) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - int32_t SendCompoundRTCP(const FeedbackState& feedback_state, - const std::set& packetTypes, - int32_t nackSize = 0, - const uint16_t* nackList = nullptr) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); int32_t SendLossNotification(const FeedbackState& feedback_state, uint16_t last_decoded_seq_num, uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetRemb(int64_t bitrate_bps, std::vector ssrcs) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - void UnsetRemb() RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + void UnsetRemb() RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); - bool TMMBR() const RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - void SetTMMBRStatus(bool enable) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + bool TMMBR() const RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetMaxRtpPacketSize(size_t max_packet_size) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetTmmbn(std::vector bounding_set) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - int32_t SetApplicationSpecificData(uint8_t subType, - uint32_t name, - const uint8_t* data, - uint16_t length) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - void SendRtcpXrReceiverReferenceTime(bool enable) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); - - bool RtcpXrReceiverReferenceTime() const - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetCsrcs(const std::vector& csrcs) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetTargetBitrate(unsigned int target_bitrate) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SetVideoBitrateAllocation(const VideoBitrateAllocation& bitrate) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); void SendCombinedRtcpPacket( std::vector> rtcp_packets) - RTC_LOCKS_EXCLUDED(critical_section_rtcp_sender_); + RTC_LOCKS_EXCLUDED(mutex_rtcp_sender_); private: class RtcpContext; - - int32_t SendCompoundRTCPLocked(const FeedbackState& feedback_state, - const std::set& packet_types, - int32_t nack_size, - const uint16_t* nack_list) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + class PacketSender; absl::optional ComputeCompoundRTCPPacket( const FeedbackState& feedback_state, - const std::set& packet_types, + RTCPPacketType packet_type, int32_t nack_size, const uint16_t* nack_list, - rtcp::CompoundPacket* out_packet) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + PacketSender& sender) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); // Determine which RTCP messages should be sent and setup flags. void PrepareReport(const FeedbackState& feedback_state) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); std::vector CreateReportBlocks( const FeedbackState& feedback_state) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - - std::unique_ptr BuildSR(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildRR(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildSDES(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildPLI(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildREMB(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildTMMBR(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildTMMBN(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildAPP(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildLossNotification( - const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildExtendedReports( - const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildBYE(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildFIR(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - std::unique_ptr BuildNACK(const RtcpContext& context) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + + void BuildSR(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildRR(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildSDES(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildPLI(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildREMB(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildTMMBR(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildTMMBN(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildAPP(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildLossNotification(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildExtendedReports(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildBYE(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildFIR(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + void BuildNACK(const RtcpContext& context, PacketSender& sender) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); + + // |duration| being TimeDelta::Zero() means schedule immediately. + void SetNextRtcpSendEvaluationDuration(TimeDelta duration) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); - private: const bool audio_; - const uint32_t ssrc_; + // TODO(bugs.webrtc.org/11581): `mutex_rtcp_sender_` shouldn't be required if + // we consistently run network related operations on the network thread. + // This is currently not possible due to callbacks from the process thread in + // ModuleRtpRtcpImpl2. + uint32_t ssrc_ RTC_GUARDED_BY(mutex_rtcp_sender_); Clock* const clock_; - Random random_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - RtcpMode method_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + Random random_ RTC_GUARDED_BY(mutex_rtcp_sender_); + RtcpMode method_ RTC_GUARDED_BY(mutex_rtcp_sender_); RtcEventLog* const event_log_; Transport* const transport_; - const int report_interval_ms_; + const TimeDelta report_interval_; + // Set from + // RTCPSender::Configuration::schedule_next_rtcp_send_evaluation_function. + const std::function + schedule_next_rtcp_send_evaluation_function_; - rtc::CriticalSection critical_section_rtcp_sender_; - bool sending_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + mutable Mutex mutex_rtcp_sender_; + bool sending_ RTC_GUARDED_BY(mutex_rtcp_sender_); - int64_t next_time_to_send_rtcp_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + absl::optional next_time_to_send_rtcp_ + RTC_GUARDED_BY(mutex_rtcp_sender_); - uint32_t timestamp_offset_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - int64_t last_frame_capture_time_ms_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + uint32_t timestamp_offset_ RTC_GUARDED_BY(mutex_rtcp_sender_); + uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(mutex_rtcp_sender_); + absl::optional last_frame_capture_time_ + RTC_GUARDED_BY(mutex_rtcp_sender_); // SSRC that we receive on our RTP channel - uint32_t remote_ssrc_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - std::string cname_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + uint32_t remote_ssrc_ RTC_GUARDED_BY(mutex_rtcp_sender_); + std::string cname_ RTC_GUARDED_BY(mutex_rtcp_sender_); ReceiveStatisticsProvider* receive_statistics_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); - std::map csrc_cnames_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + RTC_GUARDED_BY(mutex_rtcp_sender_); // send CSRCs - std::vector csrcs_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + std::vector csrcs_ RTC_GUARDED_BY(mutex_rtcp_sender_); // Full intra request - uint8_t sequence_number_fir_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + uint8_t sequence_number_fir_ RTC_GUARDED_BY(mutex_rtcp_sender_); - // Loss Notification - struct LossNotificationState { - uint16_t last_decoded_seq_num; - uint16_t last_received_seq_num; - bool decodability_flag; - }; - LossNotificationState loss_notification_state_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + rtcp::LossNotification loss_notification_ RTC_GUARDED_BY(mutex_rtcp_sender_); // REMB - int64_t remb_bitrate_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - std::vector remb_ssrcs_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); - - std::vector tmmbn_to_send_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); - uint32_t tmmbr_send_bps_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - uint32_t packet_oh_send_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - size_t max_packet_size_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - - // APP - uint8_t app_sub_type_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - uint32_t app_name_ RTC_GUARDED_BY(critical_section_rtcp_sender_); - std::unique_ptr app_data_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); - uint16_t app_length_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + int64_t remb_bitrate_ RTC_GUARDED_BY(mutex_rtcp_sender_); + std::vector remb_ssrcs_ RTC_GUARDED_BY(mutex_rtcp_sender_); + + std::vector tmmbn_to_send_ RTC_GUARDED_BY(mutex_rtcp_sender_); + uint32_t tmmbr_send_bps_ RTC_GUARDED_BY(mutex_rtcp_sender_); + uint32_t packet_oh_send_ RTC_GUARDED_BY(mutex_rtcp_sender_); + size_t max_packet_size_ RTC_GUARDED_BY(mutex_rtcp_sender_); // True if sending of XR Receiver reference time report is enabled. - bool xr_send_receiver_reference_time_enabled_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + const bool xr_send_receiver_reference_time_enabled_; RtcpPacketTypeCounterObserver* const packet_type_counter_observer_; - RtcpPacketTypeCounter packet_type_counter_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + RtcpPacketTypeCounter packet_type_counter_ RTC_GUARDED_BY(mutex_rtcp_sender_); - RtcpNackStats nack_stats_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + RtcpNackStats nack_stats_ RTC_GUARDED_BY(mutex_rtcp_sender_); VideoBitrateAllocation video_bitrate_allocation_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); - bool send_video_bitrate_allocation_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + RTC_GUARDED_BY(mutex_rtcp_sender_); + bool send_video_bitrate_allocation_ RTC_GUARDED_BY(mutex_rtcp_sender_); - std::map rtp_clock_rates_khz_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); - int8_t last_payload_type_ RTC_GUARDED_BY(critical_section_rtcp_sender_); + std::map rtp_clock_rates_khz_ RTC_GUARDED_BY(mutex_rtcp_sender_); + int8_t last_payload_type_ RTC_GUARDED_BY(mutex_rtcp_sender_); absl::optional CheckAndUpdateLayerStructure( const VideoBitrateAllocation& bitrate) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); void SetFlag(uint32_t type, bool is_volatile) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); - void SetFlags(const std::set& types, bool is_volatile) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); bool IsFlagPresent(uint32_t type) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); bool ConsumeFlag(uint32_t type, bool forced = false) - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); bool AllVolatileFlagsConsumed() const - RTC_EXCLUSIVE_LOCKS_REQUIRED(critical_section_rtcp_sender_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_rtcp_sender_); struct ReportFlag { ReportFlag(uint32_t type, bool is_volatile) : type(type), is_volatile(is_volatile) {} @@ -327,15 +317,11 @@ class RTCPSender final { const bool is_volatile; }; - std::set report_flags_ - RTC_GUARDED_BY(critical_section_rtcp_sender_); + std::set report_flags_ RTC_GUARDED_BY(mutex_rtcp_sender_); - typedef std::unique_ptr (RTCPSender::*BuilderFunc)( - const RtcpContext&); + typedef void (RTCPSender::*BuilderFunc)(const RtcpContext&, PacketSender&); // Map from RTCPPacketType to builder. std::map builders_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RTCPSender); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc index d187b16431..347be79398 100644 --- a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc @@ -14,12 +14,12 @@ #include #include "absl/base/macros.h" +#include "api/units/time_delta.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet/bye.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" -#include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/rate_limiter.h" #include "test/gmock.h" #include "test/gtest.h" @@ -28,7 +28,9 @@ using ::testing::_; using ::testing::ElementsAre; +using ::testing::Eq; using ::testing::Invoke; +using ::testing::Property; using ::testing::SizeIs; namespace webrtc { @@ -68,35 +70,52 @@ static const uint32_t kSenderSsrc = 0x11111111; static const uint32_t kRemoteSsrc = 0x22222222; static const uint32_t kStartRtpTimestamp = 0x34567; static const uint32_t kRtpTimestamp = 0x45678; + +std::unique_ptr CreateRtcpSender( + const RTCPSender::Configuration& config, + bool init_timestamps = true) { + auto rtcp_sender = std::make_unique(config); + rtcp_sender->SetRemoteSSRC(kRemoteSsrc); + if (init_timestamps) { + rtcp_sender->SetTimestampOffset(kStartRtpTimestamp); + rtcp_sender->SetLastRtpTime(kRtpTimestamp, config.clock->CurrentTime(), + /*payload_type=*/0); + } + return rtcp_sender; +} } // namespace class RtcpSenderTest : public ::testing::Test { protected: RtcpSenderTest() : clock_(1335900000), - receive_statistics_(ReceiveStatistics::Create(&clock_)), - retransmission_rate_limiter_(&clock_, 1000) { - RtpRtcp::Configuration configuration = GetDefaultConfig(); - rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl2(configuration)); - rtcp_sender_.reset(new RTCPSender(configuration)); - rtcp_sender_->SetRemoteSSRC(kRemoteSsrc); - rtcp_sender_->SetTimestampOffset(kStartRtpTimestamp); - rtcp_sender_->SetLastRtpTime(kRtpTimestamp, clock_.TimeInMilliseconds(), - /*payload_type=*/0); + receive_statistics_(ReceiveStatistics::Create(&clock_)) { + rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl2(GetDefaultRtpRtcpConfig())); } - RtpRtcp::Configuration GetDefaultConfig() { - RtpRtcp::Configuration configuration; + RTCPSender::Configuration GetDefaultConfig() { + RTCPSender::Configuration configuration; configuration.audio = false; configuration.clock = &clock_; configuration.outgoing_transport = &test_transport_; - configuration.retransmission_rate_limiter = &retransmission_rate_limiter_; - configuration.rtcp_report_interval_ms = 1000; + configuration.rtcp_report_interval = TimeDelta::Millis(1000); configuration.receive_statistics = receive_statistics_.get(); configuration.local_media_ssrc = kSenderSsrc; return configuration; } + RtpRtcpInterface::Configuration GetDefaultRtpRtcpConfig() { + RTCPSender::Configuration config = GetDefaultConfig(); + RtpRtcpInterface::Configuration result; + result.audio = config.audio; + result.clock = config.clock; + result.outgoing_transport = config.outgoing_transport; + result.rtcp_report_interval_ms = config.rtcp_report_interval->ms(); + result.receive_statistics = config.receive_statistics; + result.local_media_ssrc = config.local_media_ssrc; + return result; + } + void InsertIncomingPacket(uint32_t remote_ssrc, uint16_t seq_num) { RtpPacketReceived packet; packet.SetSsrc(remote_ssrc); @@ -116,37 +135,39 @@ class RtcpSenderTest : public ::testing::Test { TestTransport test_transport_; std::unique_ptr receive_statistics_; std::unique_ptr rtp_rtcp_impl_; - std::unique_ptr rtcp_sender_; - RateLimiter retransmission_rate_limiter_; }; TEST_F(RtcpSenderTest, SetRtcpStatus) { - EXPECT_EQ(RtcpMode::kOff, rtcp_sender_->Status()); - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(RtcpMode::kReducedSize, rtcp_sender_->Status()); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + EXPECT_EQ(RtcpMode::kOff, rtcp_sender->Status()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(RtcpMode::kReducedSize, rtcp_sender->Status()); } TEST_F(RtcpSenderTest, SetSendingStatus) { - EXPECT_FALSE(rtcp_sender_->Sending()); - EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), true)); - EXPECT_TRUE(rtcp_sender_->Sending()); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + EXPECT_FALSE(rtcp_sender->Sending()); + rtcp_sender->SetSendingStatus(feedback_state(), true); + EXPECT_TRUE(rtcp_sender->Sending()); } TEST_F(RtcpSenderTest, NoPacketSentIfOff) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kOff); - EXPECT_EQ(-1, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kOff); + EXPECT_EQ(-1, rtcp_sender->SendRTCP(feedback_state(), kRtcpSr)); } TEST_F(RtcpSenderTest, SendSr) { const uint32_t kPacketCount = 0x12345; const uint32_t kOctetCount = 0x23456; - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState(); - rtcp_sender_->SetSendingStatus(feedback_state, true); + rtcp_sender->SetSendingStatus(feedback_state, true); feedback_state.packets_sent = kPacketCount; feedback_state.media_bytes_sent = kOctetCount; - NtpTime ntp = TimeMicrosToNtp(clock_.TimeInMicroseconds()); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr)); + NtpTime ntp = clock_.CurrentNtpTime(); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr)); EXPECT_EQ(1, parser()->sender_report()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->sender_report()->sender_ssrc()); EXPECT_EQ(ntp, parser()->sender_report()->ntp()); @@ -162,15 +183,16 @@ TEST_F(RtcpSenderTest, SendConsecutiveSrWithExactSlope) { const uint32_t kOctetCount = 0x23456; const int kTimeBetweenSRsUs = 10043; // Not exact value in milliseconds. const int kExtraPackets = 30; + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); // Make sure clock is not exactly at some milliseconds point. clock_.AdvanceTimeMicroseconds(kTimeBetweenSRsUs); - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState(); - rtcp_sender_->SetSendingStatus(feedback_state, true); + rtcp_sender->SetSendingStatus(feedback_state, true); feedback_state.packets_sent = kPacketCount; feedback_state.media_bytes_sent = kOctetCount; - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr)); EXPECT_EQ(1, parser()->sender_report()->num_packets()); NtpTime ntp1 = parser()->sender_report()->ntp(); uint32_t rtp1 = parser()->sender_report()->rtp_timestamp(); @@ -178,7 +200,7 @@ TEST_F(RtcpSenderTest, SendConsecutiveSrWithExactSlope) { // Send more SRs to ensure slope is always exact for different offsets for (int packets = 1; packets <= kExtraPackets; ++packets) { clock_.AdvanceTimeMicroseconds(kTimeBetweenSRsUs); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpSr)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpSr)); EXPECT_EQ(packets + 1, parser()->sender_report()->num_packets()); NtpTime ntp2 = parser()->sender_report()->ntp(); @@ -191,48 +213,47 @@ TEST_F(RtcpSenderTest, SendConsecutiveSrWithExactSlope) { } TEST_F(RtcpSenderTest, DoNotSendSrBeforeRtp) { - RtpRtcp::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &test_transport_; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); config.local_media_ssrc = kSenderSsrc; - rtcp_sender_.reset(new RTCPSender(config)); - rtcp_sender_->SetRemoteSSRC(kRemoteSsrc); - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender_->SetSendingStatus(feedback_state(), true); + auto rtcp_sender = CreateRtcpSender(config, /*init_timestamps=*/false); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetSendingStatus(feedback_state(), true); // Sender Report shouldn't be send as an SR nor as a Report. - rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr); + rtcp_sender->SendRTCP(feedback_state(), kRtcpSr); EXPECT_EQ(0, parser()->sender_report()->num_packets()); - rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport); + rtcp_sender->SendRTCP(feedback_state(), kRtcpReport); EXPECT_EQ(0, parser()->sender_report()->num_packets()); // Other packets (e.g. Pli) are allowed, even if useless. - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli)); EXPECT_EQ(1, parser()->pli()->num_packets()); } TEST_F(RtcpSenderTest, DoNotSendCompundBeforeRtp) { - RtpRtcp::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &test_transport_; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); config.local_media_ssrc = kSenderSsrc; - rtcp_sender_.reset(new RTCPSender(config)); - rtcp_sender_->SetRemoteSSRC(kRemoteSsrc); - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - rtcp_sender_->SetSendingStatus(feedback_state(), true); + auto rtcp_sender = CreateRtcpSender(config, /*init_timestamps=*/false); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetSendingStatus(feedback_state(), true); // In compound mode no packets are allowed (e.g. Pli) because compound mode // should start with Sender Report. - EXPECT_EQ(-1, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli)); + EXPECT_EQ(-1, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli)); EXPECT_EQ(0, parser()->pli()->num_packets()); } TEST_F(RtcpSenderTest, SendRr) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpRr)); EXPECT_EQ(1, parser()->receiver_report()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->sender_ssrc()); EXPECT_EQ(0U, parser()->receiver_report()->report_blocks().size()); @@ -240,9 +261,10 @@ TEST_F(RtcpSenderTest, SendRr) { TEST_F(RtcpSenderTest, SendRrWithOneReportBlock) { const uint16_t kSeqNum = 11111; + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); InsertIncomingPacket(kRemoteSsrc, kSeqNum); - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr)); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpRr)); EXPECT_EQ(1, parser()->receiver_report()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->sender_ssrc()); ASSERT_EQ(1U, parser()->receiver_report()->report_blocks().size()); @@ -255,133 +277,87 @@ TEST_F(RtcpSenderTest, SendRrWithOneReportBlock) { TEST_F(RtcpSenderTest, SendRrWithTwoReportBlocks) { const uint16_t kSeqNum = 11111; + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); InsertIncomingPacket(kRemoteSsrc, kSeqNum); InsertIncomingPacket(kRemoteSsrc + 1, kSeqNum + 1); - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr)); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpRr)); EXPECT_EQ(1, parser()->receiver_report()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->receiver_report()->sender_ssrc()); - EXPECT_EQ(2U, parser()->receiver_report()->report_blocks().size()); - EXPECT_EQ(kRemoteSsrc, - parser()->receiver_report()->report_blocks()[0].source_ssrc()); - EXPECT_EQ(kRemoteSsrc + 1, - parser()->receiver_report()->report_blocks()[1].source_ssrc()); + EXPECT_THAT( + parser()->receiver_report()->report_blocks(), + UnorderedElementsAre( + Property(&rtcp::ReportBlock::source_ssrc, Eq(kRemoteSsrc)), + Property(&rtcp::ReportBlock::source_ssrc, Eq(kRemoteSsrc + 1)))); } TEST_F(RtcpSenderTest, SendSdes) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SetCNAME("alice@host")); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSdes)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(0, rtcp_sender->SetCNAME("alice@host")); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpSdes)); EXPECT_EQ(1, parser()->sdes()->num_packets()); EXPECT_EQ(1U, parser()->sdes()->chunks().size()); EXPECT_EQ(kSenderSsrc, parser()->sdes()->chunks()[0].ssrc); EXPECT_EQ("alice@host", parser()->sdes()->chunks()[0].cname); } -TEST_F(RtcpSenderTest, SendSdesWithMaxChunks) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SetCNAME("alice@host")); - const char cname[] = "smith@host"; - for (size_t i = 0; i < 30; ++i) { - const uint32_t csrc = 0x1234 + i; - EXPECT_EQ(0, rtcp_sender_->AddMixedCNAME(csrc, cname)); - } - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSdes)); - EXPECT_EQ(1, parser()->sdes()->num_packets()); - EXPECT_EQ(31U, parser()->sdes()->chunks().size()); -} - TEST_F(RtcpSenderTest, SdesIncludedInCompoundPacket) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SetCNAME("alice@host")); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + EXPECT_EQ(0, rtcp_sender->SetCNAME("alice@host")); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); EXPECT_EQ(1, parser()->receiver_report()->num_packets()); EXPECT_EQ(1, parser()->sdes()->num_packets()); EXPECT_EQ(1U, parser()->sdes()->chunks().size()); } TEST_F(RtcpSenderTest, SendBye) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpBye)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpBye)); EXPECT_EQ(1, parser()->bye()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->bye()->sender_ssrc()); } TEST_F(RtcpSenderTest, StopSendingTriggersBye) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), true)); - EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), false)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetSendingStatus(feedback_state(), true); + rtcp_sender->SetSendingStatus(feedback_state(), false); EXPECT_EQ(1, parser()->bye()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->bye()->sender_ssrc()); } -TEST_F(RtcpSenderTest, SendApp) { - const uint8_t kSubType = 30; - uint32_t name = 'n' << 24; - name += 'a' << 16; - name += 'm' << 8; - name += 'e'; - const uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't', 'a'}; - EXPECT_EQ(0, rtcp_sender_->SetApplicationSpecificData(kSubType, name, kData, - sizeof(kData))); - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpApp)); - EXPECT_EQ(1, parser()->app()->num_packets()); - EXPECT_EQ(kSubType, parser()->app()->sub_type()); - EXPECT_EQ(name, parser()->app()->name()); - EXPECT_EQ(sizeof(kData), parser()->app()->data_size()); - EXPECT_EQ(0, memcmp(kData, parser()->app()->data(), sizeof(kData))); -} - -TEST_F(RtcpSenderTest, SendEmptyApp) { - const uint8_t kSubType = 30; - const uint32_t kName = 0x6E616D65; - - EXPECT_EQ( - 0, rtcp_sender_->SetApplicationSpecificData(kSubType, kName, nullptr, 0)); - - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpApp)); - EXPECT_EQ(1, parser()->app()->num_packets()); - EXPECT_EQ(kSubType, parser()->app()->sub_type()); - EXPECT_EQ(kName, parser()->app()->name()); - EXPECT_EQ(0U, parser()->app()->data_size()); -} - -TEST_F(RtcpSenderTest, SetInvalidApplicationSpecificData) { - const uint8_t kData[] = {'t', 'e', 's', 't', 'd', 'a', 't'}; - const uint16_t kInvalidDataLength = sizeof(kData) / sizeof(kData[0]); - EXPECT_EQ(-1, - rtcp_sender_->SetApplicationSpecificData( - 0, 0, kData, kInvalidDataLength)); // Should by multiple of 4. -} - TEST_F(RtcpSenderTest, SendFir) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpFir)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpFir)); EXPECT_EQ(1, parser()->fir()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->fir()->sender_ssrc()); EXPECT_EQ(1U, parser()->fir()->requests().size()); EXPECT_EQ(kRemoteSsrc, parser()->fir()->requests()[0].ssrc); uint8_t seq = parser()->fir()->requests()[0].seq_nr; - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpFir)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpFir)); EXPECT_EQ(2, parser()->fir()->num_packets()); EXPECT_EQ(seq + 1, parser()->fir()->requests()[0].seq_nr); } TEST_F(RtcpSenderTest, SendPli) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli)); EXPECT_EQ(1, parser()->pli()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->pli()->sender_ssrc()); EXPECT_EQ(kRemoteSsrc, parser()->pli()->media_ssrc()); } TEST_F(RtcpSenderTest, SendNack) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); const uint16_t kList[] = {0, 1, 16}; - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpNack, - ABSL_ARRAYSIZE(kList), kList)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpNack, + ABSL_ARRAYSIZE(kList), kList)); EXPECT_EQ(1, parser()->nack()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->nack()->sender_ssrc()); EXPECT_EQ(kRemoteSsrc, parser()->nack()->media_ssrc()); @@ -389,14 +365,15 @@ TEST_F(RtcpSenderTest, SendNack) { } TEST_F(RtcpSenderTest, SendLossNotificationBufferingNotAllowed) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); constexpr uint16_t kLastDecoded = 0x1234; constexpr uint16_t kLastReceived = 0x4321; constexpr bool kDecodabilityFlag = true; constexpr bool kBufferingAllowed = false; - EXPECT_EQ(rtcp_sender_->SendLossNotification(feedback_state(), kLastDecoded, - kLastReceived, kDecodabilityFlag, - kBufferingAllowed), + EXPECT_EQ(rtcp_sender->SendLossNotification(feedback_state(), kLastDecoded, + kLastReceived, kDecodabilityFlag, + kBufferingAllowed), 0); EXPECT_EQ(parser()->processed_rtcp_packets(), 1u); EXPECT_EQ(parser()->loss_notification()->num_packets(), 1); @@ -405,14 +382,15 @@ TEST_F(RtcpSenderTest, SendLossNotificationBufferingNotAllowed) { } TEST_F(RtcpSenderTest, SendLossNotificationBufferingAllowed) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); constexpr uint16_t kLastDecoded = 0x1234; constexpr uint16_t kLastReceived = 0x4321; constexpr bool kDecodabilityFlag = true; constexpr bool kBufferingAllowed = true; - EXPECT_EQ(rtcp_sender_->SendLossNotification(feedback_state(), kLastDecoded, - kLastReceived, kDecodabilityFlag, - kBufferingAllowed), + EXPECT_EQ(rtcp_sender->SendLossNotification(feedback_state(), kLastDecoded, + kLastReceived, kDecodabilityFlag, + kBufferingAllowed), 0); // No RTCP messages sent yet. @@ -420,8 +398,8 @@ TEST_F(RtcpSenderTest, SendLossNotificationBufferingAllowed) { // Sending another messages triggers sending the LNTF messages as well. const uint16_t kList[] = {0, 1, 16}; - EXPECT_EQ(rtcp_sender_->SendRTCP(feedback_state(), kRtcpNack, - ABSL_ARRAYSIZE(kList), kList), + EXPECT_EQ(rtcp_sender->SendRTCP(feedback_state(), kRtcpNack, + ABSL_ARRAYSIZE(kList), kList), 0); // Exactly one packet was produced, and it contained both the buffered LNTF @@ -436,9 +414,10 @@ TEST_F(RtcpSenderTest, SendLossNotificationBufferingAllowed) { } TEST_F(RtcpSenderTest, RembNotIncludedBeforeSet) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr); + rtcp_sender->SendRTCP(feedback_state(), kRtcpRr); ASSERT_EQ(1, parser()->receiver_report()->num_packets()); EXPECT_EQ(0, parser()->remb()->num_packets()); @@ -447,15 +426,16 @@ TEST_F(RtcpSenderTest, RembNotIncludedBeforeSet) { TEST_F(RtcpSenderTest, RembNotIncludedAfterUnset) { const int64_t kBitrate = 261011; const std::vector kSsrcs = {kRemoteSsrc, kRemoteSsrc + 1}; - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender_->SetRemb(kBitrate, kSsrcs); - rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetRemb(kBitrate, kSsrcs); + rtcp_sender->SendRTCP(feedback_state(), kRtcpRr); ASSERT_EQ(1, parser()->receiver_report()->num_packets()); EXPECT_EQ(1, parser()->remb()->num_packets()); // Turn off REMB. rtcp_sender no longer should send it. - rtcp_sender_->UnsetRemb(); - rtcp_sender_->SendRTCP(feedback_state(), kRtcpRr); + rtcp_sender->UnsetRemb(); + rtcp_sender->SendRTCP(feedback_state(), kRtcpRr); ASSERT_EQ(2, parser()->receiver_report()->num_packets()); EXPECT_EQ(1, parser()->remb()->num_packets()); } @@ -463,10 +443,11 @@ TEST_F(RtcpSenderTest, RembNotIncludedAfterUnset) { TEST_F(RtcpSenderTest, SendRemb) { const int64_t kBitrate = 261011; const std::vector kSsrcs = {kRemoteSsrc, kRemoteSsrc + 1}; - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender_->SetRemb(kBitrate, kSsrcs); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetRemb(kBitrate, kSsrcs); - rtcp_sender_->SendRTCP(feedback_state(), kRtcpRemb); + rtcp_sender->SendRTCP(feedback_state(), kRtcpRemb); EXPECT_EQ(1, parser()->remb()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->remb()->sender_ssrc()); @@ -478,25 +459,27 @@ TEST_F(RtcpSenderTest, SendRemb) { TEST_F(RtcpSenderTest, RembIncludedInEachCompoundPacketAfterSet) { const int kBitrate = 261011; const std::vector kSsrcs = {kRemoteSsrc, kRemoteSsrc + 1}; - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - rtcp_sender_->SetRemb(kBitrate, kSsrcs); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetRemb(kBitrate, kSsrcs); - rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport); + rtcp_sender->SendRTCP(feedback_state(), kRtcpReport); EXPECT_EQ(1, parser()->remb()->num_packets()); // REMB should be included in each compound packet. - rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport); + rtcp_sender->SendRTCP(feedback_state(), kRtcpReport); EXPECT_EQ(2, parser()->remb()->num_packets()); } TEST_F(RtcpSenderTest, SendXrWithDlrr) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState(); rtcp::ReceiveTimeInfo last_xr_rr; last_xr_rr.ssrc = 0x11111111; last_xr_rr.last_rr = 0x22222222; last_xr_rr.delay_since_last_rr = 0x33333333; feedback_state.last_xr_rtis.push_back(last_xr_rr); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpReport)); EXPECT_EQ(1, parser()->xr()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc()); ASSERT_THAT(parser()->xr()->dlrr().sub_blocks(), SizeIs(1)); @@ -508,7 +491,8 @@ TEST_F(RtcpSenderTest, SendXrWithDlrr) { TEST_F(RtcpSenderTest, SendXrWithMultipleDlrrSubBlocks) { const size_t kNumReceivers = 2; - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); RTCPSender::FeedbackState feedback_state = rtp_rtcp_impl_->GetFeedbackState(); for (size_t i = 0; i < kNumReceivers; ++i) { rtcp::ReceiveTimeInfo last_xr_rr; @@ -518,7 +502,7 @@ TEST_F(RtcpSenderTest, SendXrWithMultipleDlrrSubBlocks) { feedback_state.last_xr_rtis.push_back(last_xr_rr); } - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state, kRtcpReport)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state, kRtcpReport)); EXPECT_EQ(1, parser()->xr()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc()); ASSERT_THAT(parser()->xr()->dlrr().sub_blocks(), SizeIs(kNumReceivers)); @@ -533,11 +517,13 @@ TEST_F(RtcpSenderTest, SendXrWithMultipleDlrrSubBlocks) { } TEST_F(RtcpSenderTest, SendXrWithRrtr) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), false)); - rtcp_sender_->SendRtcpXrReceiverReferenceTime(true); - NtpTime ntp = TimeMicrosToNtp(clock_.TimeInMicroseconds()); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + RTCPSender::Configuration config = GetDefaultConfig(); + config.non_sender_rtt_measurement = true; + auto rtcp_sender = CreateRtcpSender(config); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetSendingStatus(feedback_state(), false); + NtpTime ntp = clock_.CurrentNtpTime(); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); EXPECT_EQ(1, parser()->xr()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc()); EXPECT_FALSE(parser()->xr()->dlrr()); @@ -546,34 +532,36 @@ TEST_F(RtcpSenderTest, SendXrWithRrtr) { } TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfSending) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), true)); - rtcp_sender_->SendRtcpXrReceiverReferenceTime(true); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + RTCPSender::Configuration config = GetDefaultConfig(); + config.non_sender_rtt_measurement = true; + auto rtcp_sender = CreateRtcpSender(config); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetSendingStatus(feedback_state(), true); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); EXPECT_EQ(0, parser()->xr()->num_packets()); } TEST_F(RtcpSenderTest, TestNoXrRrtrSentIfNotEnabled) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SetSendingStatus(feedback_state(), false)); - rtcp_sender_->SendRtcpXrReceiverReferenceTime(false); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + RTCPSender::Configuration config = GetDefaultConfig(); + config.non_sender_rtt_measurement = false; + auto rtcp_sender = CreateRtcpSender(config); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetSendingStatus(feedback_state(), false); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); EXPECT_EQ(0, parser()->xr()->num_packets()); } TEST_F(RtcpSenderTest, TestRegisterRtcpPacketTypeObserver) { RtcpPacketTypeCounterObserverImpl observer; - RtpRtcp::Configuration config; + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &test_transport_; config.rtcp_packet_type_counter_observer = &observer; - config.rtcp_report_interval_ms = 1000; - rtcp_sender_.reset(new RTCPSender(config)); - - rtcp_sender_->SetRemoteSSRC(kRemoteSsrc); - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli)); + config.rtcp_report_interval = TimeDelta::Millis(1000); + auto rtcp_sender = CreateRtcpSender(config); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpPli)); EXPECT_EQ(1, parser()->pli()->num_packets()); EXPECT_EQ(kRemoteSsrc, observer.ssrc_); EXPECT_EQ(1U, observer.counter_.pli_packets); @@ -583,9 +571,10 @@ TEST_F(RtcpSenderTest, TestRegisterRtcpPacketTypeObserver) { TEST_F(RtcpSenderTest, SendTmmbr) { const unsigned int kBitrateBps = 312000; - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender_->SetTargetBitrate(kBitrateBps); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpTmmbr)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetTargetBitrate(kBitrateBps); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpTmmbr)); EXPECT_EQ(1, parser()->tmmbr()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->tmmbr()->sender_ssrc()); EXPECT_EQ(1U, parser()->tmmbr()->requests().size()); @@ -593,36 +582,19 @@ TEST_F(RtcpSenderTest, SendTmmbr) { // TODO(asapersson): tmmbr_item()->Overhead() looks broken, always zero. } -TEST_F(RtcpSenderTest, TmmbrIncludedInCompoundPacketIfEnabled) { - const unsigned int kBitrateBps = 312000; - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_FALSE(rtcp_sender_->TMMBR()); - rtcp_sender_->SetTMMBRStatus(true); - EXPECT_TRUE(rtcp_sender_->TMMBR()); - rtcp_sender_->SetTargetBitrate(kBitrateBps); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); - EXPECT_EQ(1, parser()->tmmbr()->num_packets()); - EXPECT_EQ(1U, parser()->tmmbr()->requests().size()); - // TMMBR should be included in each compound packet. - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); - EXPECT_EQ(2, parser()->tmmbr()->num_packets()); - - rtcp_sender_->SetTMMBRStatus(false); - EXPECT_FALSE(rtcp_sender_->TMMBR()); -} - TEST_F(RtcpSenderTest, SendTmmbn) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - rtcp_sender_->SetSendingStatus(feedback_state(), true); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetSendingStatus(feedback_state(), true); std::vector bounding_set; const uint32_t kBitrateBps = 32768000; const uint32_t kPacketOh = 40; const uint32_t kSourceSsrc = 12345; const rtcp::TmmbItem tmmbn(kSourceSsrc, kBitrateBps, kPacketOh); bounding_set.push_back(tmmbn); - rtcp_sender_->SetTmmbn(bounding_set); + rtcp_sender->SetTmmbn(bounding_set); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpSr)); EXPECT_EQ(1, parser()->sender_report()->num_packets()); EXPECT_EQ(1, parser()->tmmbn()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->tmmbn()->sender_ssrc()); @@ -639,33 +611,20 @@ TEST_F(RtcpSenderTest, SendTmmbn) { // See http://code.google.com/p/webrtc/issues/detail?id=468 for one // situation where this caused confusion. TEST_F(RtcpSenderTest, SendsTmmbnIfSetAndEmpty) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - rtcp_sender_->SetSendingStatus(feedback_state(), true); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetSendingStatus(feedback_state(), true); std::vector bounding_set; - rtcp_sender_->SetTmmbn(bounding_set); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpSr)); + rtcp_sender->SetTmmbn(bounding_set); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpSr)); EXPECT_EQ(1, parser()->sender_report()->num_packets()); EXPECT_EQ(1, parser()->tmmbn()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->tmmbn()->sender_ssrc()); EXPECT_EQ(0U, parser()->tmmbn()->items().size()); } -TEST_F(RtcpSenderTest, SendCompoundPliRemb) { - const int kBitrate = 261011; - std::vector ssrcs; - ssrcs.push_back(kRemoteSsrc); - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - rtcp_sender_->SetRemb(kBitrate, ssrcs); - std::set packet_types; - packet_types.insert(kRtcpRemb); - packet_types.insert(kRtcpPli); - EXPECT_EQ(0, rtcp_sender_->SendCompoundRTCP(feedback_state(), packet_types)); - EXPECT_EQ(1, parser()->remb()->num_packets()); - EXPECT_EQ(1, parser()->pli()->num_packets()); -} - // This test is written to verify that BYE is always the last packet -// type in a RTCP compoud packet. The rtcp_sender_ is recreated with +// type in a RTCP compoud packet. The rtcp_sender is recreated with // mock_transport, which is used to check for whether BYE at the end // of a RTCP compound packet. TEST_F(RtcpSenderTest, ByeMustBeLast) { @@ -690,28 +649,28 @@ TEST_F(RtcpSenderTest, ByeMustBeLast) { return true; })); - // Re-configure rtcp_sender_ with mock_transport_ - RtpRtcp::Configuration config; + // Re-configure rtcp_sender with mock_transport_ + RTCPSender::Configuration config; config.clock = &clock_; config.receive_statistics = receive_statistics_.get(); config.outgoing_transport = &mock_transport; - config.rtcp_report_interval_ms = 1000; + config.rtcp_report_interval = TimeDelta::Millis(1000); config.local_media_ssrc = kSenderSsrc; - rtcp_sender_.reset(new RTCPSender(config)); + auto rtcp_sender = CreateRtcpSender(config); - rtcp_sender_->SetRemoteSSRC(kRemoteSsrc); - rtcp_sender_->SetTimestampOffset(kStartRtpTimestamp); - rtcp_sender_->SetLastRtpTime(kRtpTimestamp, clock_.TimeInMilliseconds(), - /*payload_type=*/0); + rtcp_sender->SetTimestampOffset(kStartRtpTimestamp); + rtcp_sender->SetLastRtpTime(kRtpTimestamp, clock_.CurrentTime(), + /*payload_type=*/0); // Set up REMB info to be included with BYE. - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - rtcp_sender_->SetRemb(1234, {}); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpBye)); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + rtcp_sender->SetRemb(1234, {}); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpBye)); } TEST_F(RtcpSenderTest, SendXrWithTargetBitrate) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); const size_t kNumSpatialLayers = 2; const size_t kNumTemporalLayers = 2; VideoBitrateAllocation allocation; @@ -720,9 +679,9 @@ TEST_F(RtcpSenderTest, SendXrWithTargetBitrate) { for (size_t tl = 0; tl < kNumTemporalLayers; ++tl) allocation.SetBitrate(sl, tl, start_bitrate_bps + (tl * 20000)); } - rtcp_sender_->SetVideoBitrateAllocation(allocation); + rtcp_sender->SetVideoBitrateAllocation(allocation); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); EXPECT_EQ(1, parser()->xr()->num_packets()); EXPECT_EQ(kSenderSsrc, parser()->xr()->sender_ssrc()); const absl::optional& target_bitrate = @@ -747,48 +706,50 @@ TEST_F(RtcpSenderTest, SendXrWithTargetBitrate) { TEST_F(RtcpSenderTest, SendImmediateXrWithTargetBitrate) { // Initialize. Send a first report right away. - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); clock_.AdvanceTimeMilliseconds(5); // Video bitrate allocation generated, save until next time we send a report. VideoBitrateAllocation allocation; allocation.SetBitrate(0, 0, 100000); - rtcp_sender_->SetVideoBitrateAllocation(allocation); + rtcp_sender->SetVideoBitrateAllocation(allocation); // First seen instance will be sent immediately. - EXPECT_TRUE(rtcp_sender_->TimeToSendRTCPReport(false)); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + EXPECT_TRUE(rtcp_sender->TimeToSendRTCPReport(false)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); clock_.AdvanceTimeMilliseconds(5); // Update bitrate of existing layer, does not quality for immediate sending. allocation.SetBitrate(0, 0, 150000); - rtcp_sender_->SetVideoBitrateAllocation(allocation); - EXPECT_FALSE(rtcp_sender_->TimeToSendRTCPReport(false)); + rtcp_sender->SetVideoBitrateAllocation(allocation); + EXPECT_FALSE(rtcp_sender->TimeToSendRTCPReport(false)); // A new spatial layer enabled, signal this as soon as possible. allocation.SetBitrate(1, 0, 200000); - rtcp_sender_->SetVideoBitrateAllocation(allocation); - EXPECT_TRUE(rtcp_sender_->TimeToSendRTCPReport(false)); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + rtcp_sender->SetVideoBitrateAllocation(allocation); + EXPECT_TRUE(rtcp_sender->TimeToSendRTCPReport(false)); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); clock_.AdvanceTimeMilliseconds(5); // Explicitly disable top layer. The same set of layers now has a bitrate // defined, but the explicit 0 indicates shutdown. Signal immediately. allocation.SetBitrate(1, 0, 0); - EXPECT_FALSE(rtcp_sender_->TimeToSendRTCPReport(false)); - rtcp_sender_->SetVideoBitrateAllocation(allocation); - EXPECT_TRUE(rtcp_sender_->TimeToSendRTCPReport(false)); + EXPECT_FALSE(rtcp_sender->TimeToSendRTCPReport(false)); + rtcp_sender->SetVideoBitrateAllocation(allocation); + EXPECT_TRUE(rtcp_sender->TimeToSendRTCPReport(false)); } TEST_F(RtcpSenderTest, SendTargetBitrateExplicitZeroOnStreamRemoval) { // Set up and send a bitrate allocation with two layers. - rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kCompound); VideoBitrateAllocation allocation; allocation.SetBitrate(0, 0, 100000); allocation.SetBitrate(1, 0, 200000); - rtcp_sender_->SetVideoBitrateAllocation(allocation); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + rtcp_sender->SetVideoBitrateAllocation(allocation); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); absl::optional target_bitrate = parser()->xr()->target_bitrate(); ASSERT_TRUE(target_bitrate); @@ -803,8 +764,8 @@ TEST_F(RtcpSenderTest, SendTargetBitrateExplicitZeroOnStreamRemoval) { // Create a new allocation, where the second stream is no longer available. VideoBitrateAllocation new_allocation; new_allocation.SetBitrate(0, 0, 150000); - rtcp_sender_->SetVideoBitrateAllocation(new_allocation); - EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpReport)); + rtcp_sender->SetVideoBitrateAllocation(new_allocation); + EXPECT_EQ(0, rtcp_sender->SendRTCP(feedback_state(), kRtcpReport)); target_bitrate = parser()->xr()->target_bitrate(); ASSERT_TRUE(target_bitrate); bitrates = target_bitrate->GetTargetBitrates(); @@ -818,15 +779,17 @@ TEST_F(RtcpSenderTest, SendTargetBitrateExplicitZeroOnStreamRemoval) { } TEST_F(RtcpSenderTest, DoesntSchedulesInitialReportWhenSsrcSetOnConstruction) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender_->SetRemoteSSRC(kRemoteSsrc); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); + rtcp_sender->SetRemoteSSRC(kRemoteSsrc); // New report should not have been scheduled yet. clock_.AdvanceTimeMilliseconds(100); - EXPECT_FALSE(rtcp_sender_->TimeToSendRTCPReport(false)); + EXPECT_FALSE(rtcp_sender->TimeToSendRTCPReport(false)); } TEST_F(RtcpSenderTest, SendsCombinedRtcpPacket) { - rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize); + auto rtcp_sender = CreateRtcpSender(GetDefaultConfig()); + rtcp_sender->SetRTCPStatus(RtcpMode::kReducedSize); std::vector> packets; auto transport_feedback = std::make_unique(); @@ -834,7 +797,7 @@ TEST_F(RtcpSenderTest, SendsCombinedRtcpPacket) { packets.push_back(std::move(transport_feedback)); auto remote_estimate = std::make_unique(); packets.push_back(std::move(remote_estimate)); - rtcp_sender_->SendCombinedRtcpPacket(std::move(packets)); + rtcp_sender->SendCombinedRtcpPacket(std::move(packets)); EXPECT_EQ(parser()->transport_feedback()->num_packets(), 1); EXPECT_EQ(parser()->transport_feedback()->sender_ssrc(), kSenderSsrc); diff --git a/modules/rtp_rtcp/source/rtcp_transceiver.cc b/modules/rtp_rtcp/source/rtcp_transceiver.cc index 1de581849b..41fa5e6206 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver.cc @@ -14,6 +14,7 @@ #include #include +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "rtc_base/checks.h" #include "rtc_base/event.h" @@ -23,7 +24,8 @@ namespace webrtc { RtcpTransceiver::RtcpTransceiver(const RtcpTransceiverConfig& config) - : task_queue_(config.task_queue), + : clock_(config.clock), + task_queue_(config.task_queue), rtcp_transceiver_(std::make_unique(config)) { RTC_DCHECK(task_queue_); } @@ -82,9 +84,9 @@ void RtcpTransceiver::SetReadyToSend(bool ready) { void RtcpTransceiver::ReceivePacket(rtc::CopyOnWriteBuffer packet) { RTC_CHECK(rtcp_transceiver_); RtcpTransceiverImpl* ptr = rtcp_transceiver_.get(); - int64_t now_us = rtc::TimeMicros(); - task_queue_->PostTask(ToQueuedTask( - [ptr, packet, now_us] { ptr->ReceivePacket(packet, now_us); })); + Timestamp now = clock_->CurrentTime(); + task_queue_->PostTask( + ToQueuedTask([ptr, packet, now] { ptr->ReceivePacket(packet, now); })); } void RtcpTransceiver::SendCompoundPacket() { diff --git a/modules/rtp_rtcp/source/rtcp_transceiver.h b/modules/rtp_rtcp/source/rtcp_transceiver.h index 2d1f37cd44..52f4610716 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver.h +++ b/modules/rtp_rtcp/source/rtcp_transceiver.h @@ -20,6 +20,7 @@ #include "modules/rtp_rtcp/source/rtcp_transceiver_config.h" #include "modules/rtp_rtcp/source/rtcp_transceiver_impl.h" #include "rtc_base/copy_on_write_buffer.h" +#include "system_wrappers/include/clock.h" namespace webrtc { // @@ -93,6 +94,7 @@ class RtcpTransceiver : public RtcpFeedbackSenderInterface { void SendFullIntraRequest(std::vector ssrcs, bool new_request); private: + Clock* const clock_; TaskQueueBase* const task_queue_; std::unique_ptr rtcp_transceiver_; }; diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_config.h b/modules/rtp_rtcp/source/rtcp_transceiver_config.h index 2cbd1045d2..0501b9af7f 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_config.h +++ b/modules/rtp_rtcp/source/rtcp_transceiver_config.h @@ -17,6 +17,7 @@ #include "api/task_queue/task_queue_base.h" #include "api/video/video_bitrate_allocation.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "system_wrappers/include/clock.h" #include "system_wrappers/include/ntp_time.h" namespace webrtc { @@ -28,8 +29,8 @@ class MediaReceiverRtcpObserver { public: virtual ~MediaReceiverRtcpObserver() = default; - // All message handlers have default empty implementation. This way user needs - // to implement only those she is interested in. + // All message handlers have default empty implementation. This way users only + // need to implement the ones they are interested in. virtual void OnSenderReport(uint32_t sender_ssrc, NtpTime ntp_time, uint32_t rtp_time) {} @@ -61,6 +62,9 @@ struct RtcpTransceiverConfig { // Maximum packet size outgoing transport accepts. size_t max_packet_size = 1200; + // The clock to use when querying for the NTP time. Should be set. + Clock* clock = nullptr; + // Transport to send rtcp packets to. Should be set. Transport* outgoing_transport = nullptr; diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc index 0102616d59..5753ffd692 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc @@ -40,7 +40,7 @@ namespace webrtc { namespace { struct SenderReportTimes { - int64_t local_received_time_us; + Timestamp local_received_time; NtpTime remote_sent_time; }; @@ -92,9 +92,7 @@ RtcpTransceiverImpl::RtcpTransceiverImpl(const RtcpTransceiverConfig& config) : config_(config), ready_to_send_(config.initial_ready_to_send) { RTC_CHECK(config_.Validate()); if (ready_to_send_ && config_.schedule_periodic_compound_packets) { - config_.task_queue->PostTask(ToQueuedTask([this] { - SchedulePeriodicCompoundPackets(config_.initial_report_delay_ms); - })); + SchedulePeriodicCompoundPackets(config_.initial_report_delay_ms); } } @@ -133,13 +131,13 @@ void RtcpTransceiverImpl::SetReadyToSend(bool ready) { } void RtcpTransceiverImpl::ReceivePacket(rtc::ArrayView packet, - int64_t now_us) { + Timestamp now) { while (!packet.empty()) { rtcp::CommonHeader rtcp_block; if (!rtcp_block.Parse(packet.data(), packet.size())) return; - HandleReceivedPacket(rtcp_block, now_us); + HandleReceivedPacket(rtcp_block, now); // TODO(danilchap): Use packet.remove_prefix() when that function exists. packet = packet.subview(rtcp_block.packet_size()); @@ -228,16 +226,16 @@ void RtcpTransceiverImpl::SendFullIntraRequest( void RtcpTransceiverImpl::HandleReceivedPacket( const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us) { + Timestamp now) { switch (rtcp_packet_header.type()) { case rtcp::Bye::kPacketType: HandleBye(rtcp_packet_header); break; case rtcp::SenderReport::kPacketType: - HandleSenderReport(rtcp_packet_header, now_us); + HandleSenderReport(rtcp_packet_header, now); break; case rtcp::ExtendedReports::kPacketType: - HandleExtendedReports(rtcp_packet_header, now_us); + HandleExtendedReports(rtcp_packet_header, now); break; } } @@ -256,17 +254,14 @@ void RtcpTransceiverImpl::HandleBye( void RtcpTransceiverImpl::HandleSenderReport( const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us) { + Timestamp now) { rtcp::SenderReport sender_report; if (!sender_report.Parse(rtcp_packet_header)) return; RemoteSenderState& remote_sender = remote_senders_[sender_report.sender_ssrc()]; - absl::optional& last = - remote_sender.last_received_sender_report; - last.emplace(); - last->local_received_time_us = now_us; - last->remote_sent_time = sender_report.ntp(); + remote_sender.last_received_sender_report = + absl::optional({now, sender_report.ntp()}); for (MediaReceiverRtcpObserver* observer : remote_sender.observers) observer->OnSenderReport(sender_report.sender_ssrc(), sender_report.ntp(), @@ -275,26 +270,27 @@ void RtcpTransceiverImpl::HandleSenderReport( void RtcpTransceiverImpl::HandleExtendedReports( const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us) { + Timestamp now) { rtcp::ExtendedReports extended_reports; if (!extended_reports.Parse(rtcp_packet_header)) return; if (extended_reports.dlrr()) - HandleDlrr(extended_reports.dlrr(), now_us); + HandleDlrr(extended_reports.dlrr(), now); if (extended_reports.target_bitrate()) HandleTargetBitrate(*extended_reports.target_bitrate(), extended_reports.sender_ssrc()); } -void RtcpTransceiverImpl::HandleDlrr(const rtcp::Dlrr& dlrr, int64_t now_us) { +void RtcpTransceiverImpl::HandleDlrr(const rtcp::Dlrr& dlrr, Timestamp now) { if (!config_.non_sender_rtt_measurement || config_.rtt_observer == nullptr) return; // Delay and last_rr are transferred using 32bit compact ntp resolution. // Convert packet arrival time to same format through 64bit ntp format. - uint32_t receive_time_ntp = CompactNtp(TimeMicrosToNtp(now_us)); + uint32_t receive_time_ntp = + CompactNtp(config_.clock->ConvertTimestampToNtpTime(now)); for (const rtcp::ReceiveTimeInfo& rti : dlrr.sub_blocks()) { if (rti.ssrc != config_.feedback_ssrc) continue; @@ -353,13 +349,16 @@ void RtcpTransceiverImpl::SchedulePeriodicCompoundPackets(int64_t delay_ms) { void RtcpTransceiverImpl::CreateCompoundPacket(PacketSender* sender) { RTC_DCHECK(sender->IsEmpty()); const uint32_t sender_ssrc = config_.feedback_ssrc; - int64_t now_us = rtc::TimeMicros(); + Timestamp now = config_.clock->CurrentTime(); rtcp::ReceiverReport receiver_report; receiver_report.SetSenderSsrc(sender_ssrc); - receiver_report.SetReportBlocks(CreateReportBlocks(now_us)); - sender->AppendPacket(receiver_report); + receiver_report.SetReportBlocks(CreateReportBlocks(now)); + if (config_.rtcp_mode == RtcpMode::kCompound || + !receiver_report.report_blocks().empty()) { + sender->AppendPacket(receiver_report); + } - if (!config_.cname.empty()) { + if (!config_.cname.empty() && !sender->IsEmpty()) { rtcp::Sdes sdes; bool added = sdes.AddCName(config_.feedback_ssrc, config_.cname); RTC_DCHECK(added) << "Failed to add cname " << config_.cname @@ -377,7 +376,7 @@ void RtcpTransceiverImpl::CreateCompoundPacket(PacketSender* sender) { rtcp::ExtendedReports xr; rtcp::Rrtr rrtr; - rrtr.SetNtp(TimeMicrosToNtp(now_us)); + rrtr.SetNtp(config_.clock->ConvertTimestampToNtpTime(now)); xr.SetRrtr(rrtr); xr.SetSenderSsrc(sender_ssrc); @@ -428,7 +427,7 @@ void RtcpTransceiverImpl::SendImmediateFeedback( } std::vector RtcpTransceiverImpl::CreateReportBlocks( - int64_t now_us) { + Timestamp now) { if (!config_.receive_statistics) return {}; // TODO(danilchap): Support sending more than @@ -448,7 +447,7 @@ std::vector RtcpTransceiverImpl::CreateReportBlocks( *it->second.last_received_sender_report; last_sr = CompactNtp(last_sender_report.remote_sent_time); last_delay = SaturatedUsToCompactNtp( - now_us - last_sender_report.local_received_time_us); + now.us() - last_sender_report.local_received_time.us()); report_block.SetLastSr(last_sr); report_block.SetDelayLastSr(last_delay); } diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl.h b/modules/rtp_rtcp/source/rtcp_transceiver_impl.h index 6a6454662c..bcdee83e56 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_impl.h +++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl.h @@ -18,6 +18,7 @@ #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" #include "modules/rtp_rtcp/source/rtcp_packet/remb.h" @@ -48,7 +49,7 @@ class RtcpTransceiverImpl { void SetReadyToSend(bool ready); - void ReceivePacket(rtc::ArrayView packet, int64_t now_us); + void ReceivePacket(rtc::ArrayView packet, Timestamp now); void SendCompoundPacket(); @@ -76,15 +77,15 @@ class RtcpTransceiverImpl { struct RemoteSenderState; void HandleReceivedPacket(const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us); + Timestamp now); // Individual rtcp packet handlers. void HandleBye(const rtcp::CommonHeader& rtcp_packet_header); void HandleSenderReport(const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us); + Timestamp now); void HandleExtendedReports(const rtcp::CommonHeader& rtcp_packet_header, - int64_t now_us); + Timestamp now); // Extended Reports blocks handlers. - void HandleDlrr(const rtcp::Dlrr& dlrr, int64_t now_us); + void HandleDlrr(const rtcp::Dlrr& dlrr, Timestamp now); void HandleTargetBitrate(const rtcp::TargetBitrate& target_bitrate, uint32_t remote_ssrc); @@ -97,7 +98,7 @@ class RtcpTransceiverImpl { void SendPeriodicCompoundPacket(); void SendImmediateFeedback(const rtcp::RtcpPacket& rtcp_packet); // Generate Report Blocks to be send in Sender or Receiver Report. - std::vector CreateReportBlocks(int64_t now_us); + std::vector CreateReportBlocks(Timestamp now); const RtcpTransceiverConfig config_; diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc index 727a9bca23..06e1083aa8 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc @@ -10,10 +10,14 @@ #include "modules/rtp_rtcp/source/rtcp_transceiver_impl.h" +#include +#include #include #include "absl/memory/memory.h" #include "api/rtp_headers.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/video_bitrate_allocation.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h" @@ -22,8 +26,9 @@ #include "modules/rtp_rtcp/source/rtcp_packet/compound_packet.h" #include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/event.h" -#include "rtc_base/fake_clock.h" #include "rtc_base/task_queue_for_test.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/clock.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -33,6 +38,7 @@ namespace { using ::testing::_; using ::testing::ElementsAre; +using ::testing::NiceMock; using ::testing::Return; using ::testing::SizeIs; using ::testing::StrictMock; @@ -44,8 +50,10 @@ using ::webrtc::NtpTime; using ::webrtc::RtcpTransceiverConfig; using ::webrtc::RtcpTransceiverImpl; using ::webrtc::SaturatedUsToCompactNtp; +using ::webrtc::SimulatedClock; using ::webrtc::TaskQueueForTest; -using ::webrtc::TimeMicrosToNtp; +using ::webrtc::TimeDelta; +using ::webrtc::Timestamp; using ::webrtc::VideoBitrateAllocation; using ::webrtc::rtcp::Bye; using ::webrtc::rtcp::CompoundPacket; @@ -55,15 +63,17 @@ using ::webrtc::test::RtcpPacketParser; class MockReceiveStatisticsProvider : public webrtc::ReceiveStatisticsProvider { public: - MOCK_METHOD1(RtcpReportBlocks, std::vector(size_t)); + MOCK_METHOD(std::vector, RtcpReportBlocks, (size_t), (override)); }; class MockMediaReceiverRtcpObserver : public webrtc::MediaReceiverRtcpObserver { public: - MOCK_METHOD3(OnSenderReport, void(uint32_t, NtpTime, uint32_t)); - MOCK_METHOD1(OnBye, void(uint32_t)); - MOCK_METHOD2(OnBitrateAllocation, - void(uint32_t, const VideoBitrateAllocation&)); + MOCK_METHOD(void, OnSenderReport, (uint32_t, NtpTime, uint32_t), (override)); + MOCK_METHOD(void, OnBye, (uint32_t), (override)); + MOCK_METHOD(void, + OnBitrateAllocation, + (uint32_t, const VideoBitrateAllocation&), + (override)); }; // Since some tests will need to wait for this period, make it small to avoid @@ -138,9 +148,11 @@ RtcpTransceiverConfig DefaultTestConfig() { } TEST(RtcpTransceiverImplTest, NeedToStopPeriodicTaskToDestroyOnTaskQueue) { + SimulatedClock clock(0); FakeRtcpTransport transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.task_queue = queue.Get(); config.schedule_periodic_compound_packets = true; config.outgoing_transport = &transport; @@ -157,10 +169,31 @@ TEST(RtcpTransceiverImplTest, NeedToStopPeriodicTaskToDestroyOnTaskQueue) { ASSERT_TRUE(done.Wait(/*milliseconds=*/1000)); } +TEST(RtcpTransceiverImplTest, CanBeDestroyedRightAfterCreation) { + SimulatedClock clock(0); + FakeRtcpTransport transport; + TaskQueueForTest queue("rtcp"); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + config.task_queue = queue.Get(); + config.schedule_periodic_compound_packets = true; + config.outgoing_transport = &transport; + + rtc::Event done; + queue.PostTask([&] { + RtcpTransceiverImpl rtcp_transceiver(config); + rtcp_transceiver.StopPeriodicTask(); + done.Set(); + }); + ASSERT_TRUE(done.Wait(/*milliseconds=*/1000)); +} + TEST(RtcpTransceiverImplTest, CanDestroyAfterTaskQueue) { + SimulatedClock clock(0); FakeRtcpTransport transport; auto* queue = new TaskQueueForTest("rtcp"); RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.task_queue = queue->Get(); config.schedule_periodic_compound_packets = true; config.outgoing_transport = &transport; @@ -173,9 +206,11 @@ TEST(RtcpTransceiverImplTest, CanDestroyAfterTaskQueue) { } TEST(RtcpTransceiverImplTest, DelaysSendingFirstCompondPacket) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &transport; config.initial_report_delay_ms = 10; config.task_queue = queue.Get(); @@ -198,9 +233,11 @@ TEST(RtcpTransceiverImplTest, DelaysSendingFirstCompondPacket) { } TEST(RtcpTransceiverImplTest, PeriodicallySendsPackets) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &transport; config.initial_report_delay_ms = 0; config.report_period_ms = kReportPeriodMs; @@ -232,9 +269,11 @@ TEST(RtcpTransceiverImplTest, PeriodicallySendsPackets) { } TEST(RtcpTransceiverImplTest, SendCompoundPacketDelaysPeriodicSendPackets) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &transport; config.initial_report_delay_ms = 0; config.report_period_ms = kReportPeriodMs; @@ -278,8 +317,10 @@ TEST(RtcpTransceiverImplTest, SendCompoundPacketDelaysPeriodicSendPackets) { } TEST(RtcpTransceiverImplTest, SendsNoRtcpWhenNetworkStateIsDown) { + SimulatedClock clock(0); MockTransport mock_transport; RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.initial_ready_to_send = false; config.outgoing_transport = &mock_transport; RtcpTransceiverImpl rtcp_transceiver(config); @@ -297,8 +338,10 @@ TEST(RtcpTransceiverImplTest, SendsNoRtcpWhenNetworkStateIsDown) { } TEST(RtcpTransceiverImplTest, SendsRtcpWhenNetworkStateIsUp) { + SimulatedClock clock(0); MockTransport mock_transport; RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.initial_ready_to_send = false; config.outgoing_transport = &mock_transport; RtcpTransceiverImpl rtcp_transceiver(config); @@ -318,9 +361,11 @@ TEST(RtcpTransceiverImplTest, SendsRtcpWhenNetworkStateIsUp) { } TEST(RtcpTransceiverImplTest, SendsPeriodicRtcpWhenNetworkStateIsUp) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); FakeRtcpTransport transport; RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; config.schedule_periodic_compound_packets = true; config.initial_ready_to_send = false; config.outgoing_transport = &transport; @@ -344,7 +389,9 @@ TEST(RtcpTransceiverImplTest, SendsPeriodicRtcpWhenNetworkStateIsUp) { TEST(RtcpTransceiverImplTest, SendsMinimalCompoundPacket) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.cname = "cname"; RtcpPacketParser rtcp_parser; @@ -365,9 +412,52 @@ TEST(RtcpTransceiverImplTest, SendsMinimalCompoundPacket) { EXPECT_EQ(rtcp_parser.sdes()->chunks()[0].cname, config.cname); } +TEST(RtcpTransceiverImplTest, AvoidsEmptyPacketsInReducedMode) { + MockTransport transport; + EXPECT_CALL(transport, SendRtcp).Times(0); + NiceMock receive_statistics; + SimulatedClock clock(0); + + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + config.outgoing_transport = &transport; + config.rtcp_mode = webrtc::RtcpMode::kReducedSize; + config.schedule_periodic_compound_packets = false; + config.receive_statistics = &receive_statistics; + RtcpTransceiverImpl rtcp_transceiver(config); + + rtcp_transceiver.SendCompoundPacket(); +} + +TEST(RtcpTransceiverImplTest, AvoidsEmptyReceiverReportsInReducedMode) { + RtcpPacketParser rtcp_parser; + RtcpParserTransport transport(&rtcp_parser); + NiceMock receive_statistics; + SimulatedClock clock(0); + + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + config.outgoing_transport = &transport; + config.rtcp_mode = webrtc::RtcpMode::kReducedSize; + config.schedule_periodic_compound_packets = false; + config.receive_statistics = &receive_statistics; + // Set it to produce something (RRTR) in the "periodic" rtcp packets. + config.non_sender_rtt_measurement = true; + RtcpTransceiverImpl rtcp_transceiver(config); + + // Rather than waiting for the right time to produce the periodic packet, + // trigger it manually. + rtcp_transceiver.SendCompoundPacket(); + + EXPECT_EQ(rtcp_parser.receiver_report()->num_packets(), 0); + EXPECT_GT(rtcp_parser.xr()->num_packets(), 0); +} + TEST(RtcpTransceiverImplTest, SendsNoRembInitially) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -383,7 +473,9 @@ TEST(RtcpTransceiverImplTest, SendsNoRembInitially) { TEST(RtcpTransceiverImplTest, SetRembIncludesRembInNextCompoundPacket) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -402,7 +494,9 @@ TEST(RtcpTransceiverImplTest, SetRembIncludesRembInNextCompoundPacket) { TEST(RtcpTransceiverImplTest, SetRembUpdatesValuesToSend) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -427,7 +521,9 @@ TEST(RtcpTransceiverImplTest, SetRembUpdatesValuesToSend) { TEST(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChange) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.send_remb_on_change = true; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; @@ -453,7 +549,9 @@ TEST(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChange) { TEST(RtcpTransceiverImplTest, SetRembSendsImmediatelyIfSendRembOnChangeReducedSize) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.send_remb_on_change = true; config.rtcp_mode = webrtc::RtcpMode::kReducedSize; config.feedback_ssrc = kSenderSsrc; @@ -471,7 +569,9 @@ TEST(RtcpTransceiverImplTest, TEST(RtcpTransceiverImplTest, SetRembIncludesRembInAllCompoundPackets) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -489,7 +589,9 @@ TEST(RtcpTransceiverImplTest, SetRembIncludesRembInAllCompoundPackets) { TEST(RtcpTransceiverImplTest, SendsNoRembAfterUnset) { const uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -518,7 +620,9 @@ TEST(RtcpTransceiverImplTest, ReceiverReportUsesReceiveStatistics) { EXPECT_CALL(receive_statistics, RtcpReportBlocks(_)) .WillRepeatedly(Return(report_blocks)); + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -539,9 +643,12 @@ TEST(RtcpTransceiverImplTest, ReceiverReportUsesReceiveStatistics) { TEST(RtcpTransceiverImplTest, MultipleObserversOnSameSsrc) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer2); @@ -555,14 +662,17 @@ TEST(RtcpTransceiverImplTest, MultipleObserversOnSameSsrc) { EXPECT_CALL(observer1, OnSenderReport(kRemoteSsrc, kRemoteNtp, kRemoteRtp)); EXPECT_CALL(observer2, OnSenderReport(kRemoteSsrc, kRemoteNtp, kRemoteRtp)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, DoesntCallsObserverAfterRemoved) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer2); @@ -574,15 +684,18 @@ TEST(RtcpTransceiverImplTest, DoesntCallsObserverAfterRemoved) { EXPECT_CALL(observer1, OnSenderReport(_, _, _)).Times(0); EXPECT_CALL(observer2, OnSenderReport(_, _, _)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnSenderReportBySenderSsrc) { const uint32_t kRemoteSsrc1 = 12345; const uint32_t kRemoteSsrc2 = 22345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2); @@ -596,15 +709,18 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnSenderReportBySenderSsrc) { EXPECT_CALL(observer1, OnSenderReport(kRemoteSsrc1, kRemoteNtp, kRemoteRtp)); EXPECT_CALL(observer2, OnSenderReport(_, _, _)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnByeBySenderSsrc) { const uint32_t kRemoteSsrc1 = 12345; const uint32_t kRemoteSsrc2 = 22345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2); @@ -614,15 +730,18 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnByeBySenderSsrc) { EXPECT_CALL(observer1, OnBye(kRemoteSsrc1)); EXPECT_CALL(observer2, OnBye(_)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnTargetBitrateBySenderSsrc) { const uint32_t kRemoteSsrc1 = 12345; const uint32_t kRemoteSsrc2 = 22345; + SimulatedClock clock(0); StrictMock observer1; StrictMock observer2; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc1, &observer1); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc2, &observer2); @@ -643,13 +762,16 @@ TEST(RtcpTransceiverImplTest, CallsObserverOnTargetBitrateBySenderSsrc) { bitrate_allocation.SetBitrate(1, 1, /*bitrate_bps=*/80000); EXPECT_CALL(observer1, OnBitrateAllocation(kRemoteSsrc1, bitrate_allocation)); EXPECT_CALL(observer2, OnBitrateAllocation(_, _)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, SkipsIncorrectTargetBitrateEntries) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); MockMediaReceiverRtcpObserver observer; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer); webrtc::rtcp::TargetBitrate target_bitrate; @@ -665,46 +787,52 @@ TEST(RtcpTransceiverImplTest, SkipsIncorrectTargetBitrateEntries) { VideoBitrateAllocation expected_allocation; expected_allocation.SetBitrate(0, 0, /*bitrate_bps=*/10000); EXPECT_CALL(observer, OnBitrateAllocation(kRemoteSsrc, expected_allocation)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnByeBehindSenderReport) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); MockMediaReceiverRtcpObserver observer; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer); CompoundPacket compound; - SenderReport sr; - sr.SetSenderSsrc(kRemoteSsrc); - compound.Append(&sr); - Bye bye; - bye.SetSenderSsrc(kRemoteSsrc); - compound.Append(&bye); + auto sr = std::make_unique(); + sr->SetSenderSsrc(kRemoteSsrc); + compound.Append(std::move(sr)); + auto bye = std::make_unique(); + bye->SetSenderSsrc(kRemoteSsrc); + compound.Append(std::move(bye)); auto raw_packet = compound.Build(); EXPECT_CALL(observer, OnBye(kRemoteSsrc)); EXPECT_CALL(observer, OnSenderReport(kRemoteSsrc, _, _)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, CallsObserverOnByeBehindUnknownRtcpPacket) { const uint32_t kRemoteSsrc = 12345; + SimulatedClock clock(0); MockMediaReceiverRtcpObserver observer; - RtcpTransceiverImpl rtcp_transceiver(DefaultTestConfig()); + RtcpTransceiverConfig config = DefaultTestConfig(); + config.clock = &clock; + RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.AddMediaReceiverRtcpObserver(kRemoteSsrc, &observer); CompoundPacket compound; // Use Application-Defined rtcp packet as unknown. - webrtc::rtcp::App app; - compound.Append(&app); - Bye bye; - bye.SetSenderSsrc(kRemoteSsrc); - compound.Append(&bye); + auto app = std::make_unique(); + compound.Append(std::move(app)); + auto bye = std::make_unique(); + bye->SetSenderSsrc(kRemoteSsrc); + compound.Append(std::move(bye)); auto raw_packet = compound.Build(); EXPECT_CALL(observer, OnBye(kRemoteSsrc)); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); } TEST(RtcpTransceiverImplTest, @@ -718,7 +846,9 @@ TEST(RtcpTransceiverImplTest, EXPECT_CALL(receive_statistics, RtcpReportBlocks(_)) .WillOnce(Return(statistics_report_blocks)); + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -732,7 +862,7 @@ TEST(RtcpTransceiverImplTest, sr.SetSenderSsrc(kRemoteSsrc1); sr.SetNtp(kRemoteNtp); auto raw_packet = sr.Build(); - rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0); + rtcp_transceiver.ReceivePacket(raw_packet, Timestamp::Micros(0)); // Trigger sending ReceiverReport. rtcp_transceiver.SendCompoundPacket(); @@ -755,7 +885,7 @@ TEST(RtcpTransceiverImplTest, WhenSendsReceiverReportCalculatesDelaySinceLastSenderReport) { const uint32_t kRemoteSsrc1 = 4321; const uint32_t kRemoteSsrc2 = 5321; - rtc::ScopedFakeClock clock; + std::vector statistics_report_blocks(2); statistics_report_blocks[0].SetMediaSsrc(kRemoteSsrc1); statistics_report_blocks[1].SetMediaSsrc(kRemoteSsrc2); @@ -763,7 +893,9 @@ TEST(RtcpTransceiverImplTest, EXPECT_CALL(receive_statistics, RtcpReportBlocks(_)) .WillOnce(Return(statistics_report_blocks)); + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -771,18 +903,19 @@ TEST(RtcpTransceiverImplTest, config.receive_statistics = &receive_statistics; RtcpTransceiverImpl rtcp_transceiver(config); - auto receive_sender_report = [&rtcp_transceiver](uint32_t remote_ssrc) { + auto receive_sender_report = [&rtcp_transceiver, + &clock](uint32_t remote_ssrc) { SenderReport sr; sr.SetSenderSsrc(remote_ssrc); auto raw_packet = sr.Build(); - rtcp_transceiver.ReceivePacket(raw_packet, rtc::TimeMicros()); + rtcp_transceiver.ReceivePacket(raw_packet, clock.CurrentTime()); }; receive_sender_report(kRemoteSsrc1); - clock.AdvanceTime(webrtc::TimeDelta::Millis(100)); + clock.AdvanceTime(TimeDelta::Millis(100)); receive_sender_report(kRemoteSsrc2); - clock.AdvanceTime(webrtc::TimeDelta::Millis(100)); + clock.AdvanceTime(TimeDelta::Millis(100)); // Trigger ReceiverReport back. rtcp_transceiver.SendCompoundPacket(); @@ -804,7 +937,9 @@ TEST(RtcpTransceiverImplTest, SendsNack) { const uint32_t kSenderSsrc = 1234; const uint32_t kRemoteSsrc = 4321; std::vector kMissingSequenceNumbers = {34, 37, 38}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -823,7 +958,9 @@ TEST(RtcpTransceiverImplTest, SendsNack) { TEST(RtcpTransceiverImplTest, RequestKeyFrameWithPictureLossIndication) { const uint32_t kSenderSsrc = 1234; const uint32_t kRemoteSsrc = 4321; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -842,7 +979,9 @@ TEST(RtcpTransceiverImplTest, RequestKeyFrameWithPictureLossIndication) { TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFullIntraRequest) { const uint32_t kSenderSsrc = 1234; const uint32_t kRemoteSsrcs[] = {4321, 5321}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -859,7 +998,9 @@ TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFullIntraRequest) { } TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFirIncreaseSeqNoPerSsrc) { + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -889,7 +1030,9 @@ TEST(RtcpTransceiverImplTest, RequestKeyFrameWithFirIncreaseSeqNoPerSsrc) { } TEST(RtcpTransceiverImplTest, SendFirDoesNotIncreaseSeqNoIfOldRequest) { + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -915,7 +1058,9 @@ TEST(RtcpTransceiverImplTest, SendFirDoesNotIncreaseSeqNoIfOldRequest) { TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesCompoundPacket) { const uint32_t kRemoteSsrcs[] = {4321}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; // Turn periodic off to ensure sent rtcp packet is explicitly requested. config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -934,7 +1079,9 @@ TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesCompoundPacket) { TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesReducedSizePacket) { const uint32_t kRemoteSsrcs[] = {4321}; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; // Turn periodic off to ensure sent rtcp packet is explicitly requested. config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -953,8 +1100,9 @@ TEST(RtcpTransceiverImplTest, KeyFrameRequestCreatesReducedSizePacket) { TEST(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) { const uint32_t kSenderSsrc = 4321; - rtc::ScopedFakeClock clock; + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; @@ -964,7 +1112,7 @@ TEST(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) { RtcpTransceiverImpl rtcp_transceiver(config); rtcp_transceiver.SendCompoundPacket(); - NtpTime ntp_time_now = TimeMicrosToNtp(rtc::TimeMicros()); + NtpTime ntp_time_now = clock.CurrentNtpTime(); EXPECT_EQ(rtcp_parser.xr()->num_packets(), 1); EXPECT_EQ(rtcp_parser.xr()->sender_ssrc(), kSenderSsrc); @@ -973,7 +1121,9 @@ TEST(RtcpTransceiverImplTest, SendsXrRrtrWhenEnabled) { } TEST(RtcpTransceiverImplTest, SendsNoXrRrtrWhenDisabled) { + SimulatedClock clock(0); RtcpTransceiverConfig config; + config.clock = &clock; config.schedule_periodic_compound_packets = false; RtcpPacketParser rtcp_parser; RtcpParserTransport transport(&rtcp_parser); @@ -991,9 +1141,11 @@ TEST(RtcpTransceiverImplTest, SendsNoXrRrtrWhenDisabled) { TEST(RtcpTransceiverImplTest, CalculatesRoundTripTimeOnDlrr) { const uint32_t kSenderSsrc = 4321; + SimulatedClock clock(0); MockRtcpRttStats rtt_observer; MockTransport null_transport; RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; config.outgoing_transport = &null_transport; @@ -1001,25 +1153,27 @@ TEST(RtcpTransceiverImplTest, CalculatesRoundTripTimeOnDlrr) { config.rtt_observer = &rtt_observer; RtcpTransceiverImpl rtcp_transceiver(config); - int64_t time_us = 12345678; + Timestamp time = Timestamp::Micros(12345678); webrtc::rtcp::ReceiveTimeInfo rti; rti.ssrc = kSenderSsrc; - rti.last_rr = CompactNtp(TimeMicrosToNtp(time_us)); + rti.last_rr = CompactNtp(clock.ConvertTimestampToNtpTime(time)); rti.delay_since_last_rr = SaturatedUsToCompactNtp(10 * 1000); webrtc::rtcp::ExtendedReports xr; xr.AddDlrrItem(rti); auto raw_packet = xr.Build(); EXPECT_CALL(rtt_observer, OnRttUpdate(100 /* rtt_ms */)); - rtcp_transceiver.ReceivePacket(raw_packet, time_us + 110 * 1000); + rtcp_transceiver.ReceivePacket(raw_packet, time + TimeDelta::Millis(110)); } TEST(RtcpTransceiverImplTest, IgnoresUnknownSsrcInDlrr) { const uint32_t kSenderSsrc = 4321; const uint32_t kUnknownSsrc = 4322; + SimulatedClock clock(0); MockRtcpRttStats rtt_observer; MockTransport null_transport; RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.schedule_periodic_compound_packets = false; config.outgoing_transport = &null_transport; @@ -1027,16 +1181,16 @@ TEST(RtcpTransceiverImplTest, IgnoresUnknownSsrcInDlrr) { config.rtt_observer = &rtt_observer; RtcpTransceiverImpl rtcp_transceiver(config); - int64_t time_us = 12345678; + Timestamp time = Timestamp::Micros(12345678); webrtc::rtcp::ReceiveTimeInfo rti; rti.ssrc = kUnknownSsrc; - rti.last_rr = CompactNtp(TimeMicrosToNtp(time_us)); + rti.last_rr = CompactNtp(clock.ConvertTimestampToNtpTime(time)); webrtc::rtcp::ExtendedReports xr; xr.AddDlrrItem(rti); auto raw_packet = xr.Build(); EXPECT_CALL(rtt_observer, OnRttUpdate(_)).Times(0); - rtcp_transceiver.ReceivePacket(raw_packet, time_us + 100000); + rtcp_transceiver.ReceivePacket(raw_packet, time + TimeDelta::Millis(100)); } } // namespace diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc b/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc index 5fb2aa55eb..290aa48ff4 100644 --- a/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc +++ b/modules/rtp_rtcp/source/rtcp_transceiver_unittest.cc @@ -18,6 +18,7 @@ #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "rtc_base/event.h" #include "rtc_base/task_queue_for_test.h" +#include "system_wrappers/include/clock.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" @@ -34,6 +35,7 @@ using ::testing::NiceMock; using ::webrtc::MockTransport; using ::webrtc::RtcpTransceiver; using ::webrtc::RtcpTransceiverConfig; +using ::webrtc::SimulatedClock; using ::webrtc::TaskQueueForTest; using ::webrtc::rtcp::RemoteEstimate; using ::webrtc::rtcp::RtcpPacket; @@ -42,7 +44,10 @@ using ::webrtc::test::RtcpPacketParser; class MockMediaReceiverRtcpObserver : public webrtc::MediaReceiverRtcpObserver { public: - MOCK_METHOD3(OnSenderReport, void(uint32_t, webrtc::NtpTime, uint32_t)); + MOCK_METHOD(void, + OnSenderReport, + (uint32_t, webrtc::NtpTime, uint32_t), + (override)); }; constexpr int kTimeoutMs = 1000; @@ -54,9 +59,11 @@ void WaitPostedTasks(TaskQueueForTest* queue) { } TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOffTaskQueue) { + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); EXPECT_CALL(outgoing_transport, SendRtcp(_, _)) @@ -71,9 +78,11 @@ TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOffTaskQueue) { } TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOnTaskQueue) { + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); EXPECT_CALL(outgoing_transport, SendRtcp(_, _)) @@ -91,9 +100,11 @@ TEST(RtcpTransceiverTest, SendsRtcpOnTaskQueueWhenCreatedOnTaskQueue) { } TEST(RtcpTransceiverTest, CanBeDestroyedOnTaskQueue) { + SimulatedClock clock(0); NiceMock outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); auto rtcp_transceiver = std::make_unique(config); @@ -107,9 +118,11 @@ TEST(RtcpTransceiverTest, CanBeDestroyedOnTaskQueue) { } TEST(RtcpTransceiverTest, CanBeDestroyedWithoutBlocking) { + SimulatedClock clock(0); TaskQueueForTest queue("rtcp"); NiceMock outgoing_transport; RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); auto* rtcp_transceiver = new RtcpTransceiver(config); @@ -128,9 +141,11 @@ TEST(RtcpTransceiverTest, CanBeDestroyedWithoutBlocking) { } TEST(RtcpTransceiverTest, MaySendPacketsAfterDestructor) { // i.e. Be careful! + SimulatedClock clock(0); NiceMock outgoing_transport; // Must outlive queue below. TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); auto* rtcp_transceiver = new RtcpTransceiver(config); @@ -159,9 +174,11 @@ rtc::CopyOnWriteBuffer CreateSenderReport(uint32_t ssrc, uint32_t rtp_time) { TEST(RtcpTransceiverTest, DoesntPostToRtcpObserverAfterCallToRemove) { const uint32_t kRemoteSsrc = 1234; + SimulatedClock clock(0); MockTransport null_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &null_transport; config.task_queue = queue.Get(); RtcpTransceiver rtcp_transceiver(config); @@ -186,9 +203,11 @@ TEST(RtcpTransceiverTest, DoesntPostToRtcpObserverAfterCallToRemove) { TEST(RtcpTransceiverTest, RemoveMediaReceiverRtcpObserverIsNonBlocking) { const uint32_t kRemoteSsrc = 1234; + SimulatedClock clock(0); MockTransport null_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &null_transport; config.task_queue = queue.Get(); RtcpTransceiver rtcp_transceiver(config); @@ -210,9 +229,11 @@ TEST(RtcpTransceiverTest, RemoveMediaReceiverRtcpObserverIsNonBlocking) { } TEST(RtcpTransceiverTest, CanCallSendCompoundPacketFromAnyThread) { + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); @@ -239,9 +260,11 @@ TEST(RtcpTransceiverTest, CanCallSendCompoundPacketFromAnyThread) { } TEST(RtcpTransceiverTest, DoesntSendPacketsAfterStopCallback) { + SimulatedClock clock(0); NiceMock outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); config.schedule_periodic_compound_packets = true; @@ -260,9 +283,11 @@ TEST(RtcpTransceiverTest, DoesntSendPacketsAfterStopCallback) { TEST(RtcpTransceiverTest, SendsCombinedRtcpPacketOnTaskQueue) { static constexpr uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); @@ -297,9 +322,11 @@ TEST(RtcpTransceiverTest, SendsCombinedRtcpPacketOnTaskQueue) { TEST(RtcpTransceiverTest, SendFrameIntraRequestDefaultsToNewRequest) { static constexpr uint32_t kSenderSsrc = 12345; + SimulatedClock clock(0); MockTransport outgoing_transport; TaskQueueForTest queue("rtcp"); RtcpTransceiverConfig config; + config.clock = &clock; config.feedback_ssrc = kSenderSsrc; config.outgoing_transport = &outgoing_transport; config.task_queue = queue.Get(); diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc index 30dedb192f..3b09818576 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.cc @@ -10,6 +10,7 @@ #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" +#include #include #include "api/array_view.h" @@ -23,6 +24,7 @@ namespace webrtc { constexpr RTPExtensionType RtpDependencyDescriptorExtension::kId; constexpr char RtpDependencyDescriptorExtension::kUri[]; +constexpr std::bitset<32> RtpDependencyDescriptorExtension::kAllChainsAreActive; bool RtpDependencyDescriptorExtension::Parse( rtc::ArrayView data, @@ -34,16 +36,20 @@ bool RtpDependencyDescriptorExtension::Parse( size_t RtpDependencyDescriptorExtension::ValueSize( const FrameDependencyStructure& structure, + std::bitset<32> active_chains, const DependencyDescriptor& descriptor) { - RtpDependencyDescriptorWriter writer(/*data=*/{}, structure, descriptor); + RtpDependencyDescriptorWriter writer(/*data=*/{}, structure, active_chains, + descriptor); return DivideRoundUp(writer.ValueSizeBits(), 8); } bool RtpDependencyDescriptorExtension::Write( rtc::ArrayView data, const FrameDependencyStructure& structure, + std::bitset<32> active_chains, const DependencyDescriptor& descriptor) { - RtpDependencyDescriptorWriter writer(data, structure, descriptor); + RtpDependencyDescriptorWriter writer(data, structure, active_chains, + descriptor); return writer.Write(); } diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h index b99230c56b..de16eeab2a 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h @@ -10,6 +10,7 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_EXTENSION_H_ #define MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_EXTENSION_H_ +#include #include #include "api/array_view.h" @@ -34,10 +35,24 @@ class RtpDependencyDescriptorExtension { DependencyDescriptor* descriptor); static size_t ValueSize(const FrameDependencyStructure& structure, + const DependencyDescriptor& descriptor) { + return ValueSize(structure, kAllChainsAreActive, descriptor); + } + static size_t ValueSize(const FrameDependencyStructure& structure, + std::bitset<32> active_chains, const DependencyDescriptor& descriptor); static bool Write(rtc::ArrayView data, const FrameDependencyStructure& structure, + const DependencyDescriptor& descriptor) { + return Write(data, structure, kAllChainsAreActive, descriptor); + } + static bool Write(rtc::ArrayView data, + const FrameDependencyStructure& structure, + std::bitset<32> active_chains, const DependencyDescriptor& descriptor); + + private: + static constexpr std::bitset<32> kAllChainsAreActive = ~uint32_t{0}; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc index 244bef8579..974557ce6e 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_extension_unittest.cc @@ -26,11 +26,8 @@ TEST(RtpDependencyDescriptorExtensionTest, Writer3BytesForPerfectTemplate) { FrameDependencyStructure structure; structure.num_decode_targets = 2; structure.num_chains = 2; - structure.templates = {GenericFrameInfo::Builder() - .Dtis("SR") - .Fdiffs({1}) - .ChainDiffs({2, 2}) - .Build()}; + structure.templates = { + FrameDependencyTemplate().Dtis("SR").FrameDiffs({1}).ChainDiffs({2, 2})}; DependencyDescriptor descriptor; descriptor.frame_dependencies = structure.templates[0]; @@ -46,11 +43,8 @@ TEST(RtpDependencyDescriptorExtensionTest, WriteZeroInUnusedBits) { FrameDependencyStructure structure; structure.num_decode_targets = 2; structure.num_chains = 2; - structure.templates = {GenericFrameInfo::Builder() - .Dtis("SR") - .Fdiffs({1}) - .ChainDiffs({1, 1}) - .Build()}; + structure.templates = { + FrameDependencyTemplate().Dtis("SR").FrameDiffs({1}).ChainDiffs({1, 1})}; DependencyDescriptor descriptor; descriptor.frame_dependencies = structure.templates[0]; descriptor.frame_dependencies.frame_diffs = {2}; @@ -69,5 +63,75 @@ TEST(RtpDependencyDescriptorExtensionTest, WriteZeroInUnusedBits) { EXPECT_THAT(rtc::MakeArrayView(unused_bytes, num_unused_bytes), Each(0)); } +// In practice chain diff for inactive chain will grow uboundly because no +// frames are produced for it, that shouldn't block writing the extension. +TEST(RtpDependencyDescriptorExtensionTest, + TemplateMatchingSkipsInactiveChains) { + uint8_t buffer[3]; + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.templates = { + FrameDependencyTemplate().Dtis("SR").ChainDiffs({2, 2})}; + DependencyDescriptor descriptor; + descriptor.frame_dependencies = structure.templates[0]; + + // Set only 1st chain as active. + std::bitset<32> active_chains = 0b01; + descriptor.frame_dependencies.chain_diffs[1] = 1000; + + // Expect perfect template match since the only difference is for an inactive + // chain. Pefect template match consumes 3 bytes. + EXPECT_EQ(RtpDependencyDescriptorExtension::ValueSize( + structure, active_chains, descriptor), + 3u); + EXPECT_TRUE(RtpDependencyDescriptorExtension::Write( + buffer, structure, active_chains, descriptor)); +} + +TEST(RtpDependencyDescriptorExtensionTest, + AcceptsInvalidChainDiffForInactiveChainWhenChainsAreCustom) { + uint8_t buffer[256]; + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.templates = { + FrameDependencyTemplate().Dtis("SR").ChainDiffs({2, 2})}; + DependencyDescriptor descriptor; + descriptor.frame_dependencies = structure.templates[0]; + + // Set only 1st chain as active. + std::bitset<32> active_chains = 0b01; + // Set chain_diff different to the template to make it custom. + descriptor.frame_dependencies.chain_diffs[0] = 1; + // Set chain diff for inactive chain beyound limit of 255 max chain diff. + descriptor.frame_dependencies.chain_diffs[1] = 1000; + + // Because chains are custom, should use more than base 3 bytes. + EXPECT_GT(RtpDependencyDescriptorExtension::ValueSize( + structure, active_chains, descriptor), + 3u); + EXPECT_TRUE(RtpDependencyDescriptorExtension::Write( + buffer, structure, active_chains, descriptor)); +} + +TEST(RtpDependencyDescriptorExtensionTest, FailsToWriteInvalidDescriptor) { + uint8_t buffer[256]; + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.templates = { + FrameDependencyTemplate().T(0).Dtis("SR").ChainDiffs({2, 2})}; + DependencyDescriptor descriptor; + descriptor.frame_dependencies = structure.templates[0]; + descriptor.frame_dependencies.temporal_id = 1; + + EXPECT_EQ( + RtpDependencyDescriptorExtension::ValueSize(structure, 0b11, descriptor), + 0u); + EXPECT_FALSE(RtpDependencyDescriptorExtension::Write(buffer, structure, 0b11, + descriptor)); +} + } // namespace } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc b/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc index 07b6a3b3c3..8f0cb349bc 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_reader.cc @@ -18,13 +18,6 @@ #include "rtc_base/checks.h" namespace webrtc { -namespace { - -constexpr int kMaxTemporalId = 7; -constexpr int kMaxSpatialId = 3; -constexpr int kMaxTemplates = 64; - -} // namespace RtpDependencyDescriptorReader::RtpDependencyDescriptorReader( rtc::ArrayView raw_data, @@ -54,14 +47,14 @@ RtpDependencyDescriptorReader::RtpDependencyDescriptorReader( uint32_t RtpDependencyDescriptorReader::ReadBits(size_t bit_count) { uint32_t value = 0; - if (!buffer_.ReadBits(&value, bit_count)) + if (!buffer_.ReadBits(bit_count, value)) parsing_failed_ = true; return value; } uint32_t RtpDependencyDescriptorReader::ReadNonSymmetric(size_t num_values) { uint32_t value = 0; - if (!buffer_.ReadNonSymmetric(&value, num_values)) + if (!buffer_.ReadNonSymmetric(num_values, value)) parsing_failed_ = true; return value; } @@ -95,7 +88,7 @@ void RtpDependencyDescriptorReader::ReadTemplateLayers() { int spatial_id = 0; NextLayerIdc next_layer_idc; do { - if (templates.size() == kMaxTemplates) { + if (templates.size() == DependencyDescriptor::kMaxTemplates) { parsing_failed_ = true; break; } @@ -107,14 +100,14 @@ void RtpDependencyDescriptorReader::ReadTemplateLayers() { next_layer_idc = static_cast(ReadBits(2)); if (next_layer_idc == kNextTemporalLayer) { temporal_id++; - if (temporal_id > kMaxTemporalId) { + if (temporal_id >= DependencyDescriptor::kMaxTemporalIds) { parsing_failed_ = true; break; } } else if (next_layer_idc == kNextSpatialLayer) { temporal_id = 0; spatial_id++; - if (spatial_id > kMaxSpatialId) { + if (spatial_id >= DependencyDescriptor::kMaxSpatialIds) { parsing_failed_ = true; break; } @@ -153,7 +146,7 @@ void RtpDependencyDescriptorReader::ReadTemplateChains() { if (structure->num_chains == 0) return; for (int i = 0; i < structure->num_decode_targets; ++i) { - uint32_t protected_by_chain = ReadNonSymmetric(structure->num_chains + 1); + uint32_t protected_by_chain = ReadNonSymmetric(structure->num_chains); structure->decode_target_protected_by_chain.push_back(protected_by_chain); } for (FrameDependencyTemplate& frame_template : structure->templates) { @@ -198,9 +191,10 @@ void RtpDependencyDescriptorReader::ReadExtendedFields() { } void RtpDependencyDescriptorReader::ReadFrameDependencyDefinition() { - size_t template_index = (frame_dependency_template_id_ + kMaxTemplates - - structure_->structure_id) % - kMaxTemplates; + size_t template_index = + (frame_dependency_template_id_ + DependencyDescriptor::kMaxTemplates - + structure_->structure_id) % + DependencyDescriptor::kMaxTemplates; if (template_index >= structure_->templates.size()) { parsing_failed_ = true; diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc b/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc index df5310b6ba..31df783064 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.cc @@ -9,6 +9,7 @@ */ #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h" +#include #include #include #include @@ -23,8 +24,6 @@ namespace webrtc { namespace { -constexpr int kMaxTemplates = 64; - enum class NextLayerIdc : uint64_t { kSameLayer = 0, kNextTemporal = 1, @@ -35,12 +34,8 @@ enum class NextLayerIdc : uint64_t { NextLayerIdc GetNextLayerIdc(const FrameDependencyTemplate& previous, const FrameDependencyTemplate& next) { - // TODO(danilchap): Move these constants to header shared between reader and - // writer. - static constexpr int kMaxSpatialId = 3; - static constexpr int kMaxTemporalId = 7; - RTC_DCHECK_LE(next.spatial_id, kMaxSpatialId); - RTC_DCHECK_LE(next.temporal_id, kMaxTemporalId); + RTC_DCHECK_LT(next.spatial_id, DependencyDescriptor::kMaxSpatialIds); + RTC_DCHECK_LT(next.temporal_id, DependencyDescriptor::kMaxTemporalIds); if (next.spatial_id == previous.spatial_id && next.temporal_id == previous.temporal_id) { @@ -61,14 +56,19 @@ NextLayerIdc GetNextLayerIdc(const FrameDependencyTemplate& previous, RtpDependencyDescriptorWriter::RtpDependencyDescriptorWriter( rtc::ArrayView data, const FrameDependencyStructure& structure, + std::bitset<32> active_chains, const DependencyDescriptor& descriptor) : descriptor_(descriptor), structure_(structure), + active_chains_(active_chains), bit_writer_(data.data(), data.size()) { FindBestTemplate(); } bool RtpDependencyDescriptorWriter::Write() { + if (build_failed_) { + return false; + } WriteMandatoryFields(); if (HasExtendedFields()) { WriteExtendedFields(); @@ -86,6 +86,9 @@ bool RtpDependencyDescriptorWriter::Write() { } int RtpDependencyDescriptorWriter::ValueSizeBits() const { + if (build_failed_) { + return 0; + } static constexpr int kMandatoryFields = 1 + 1 + 6 + 16; int value_size_bits = kMandatoryFields + best_template_.extra_size_bits; if (HasExtendedFields()) { @@ -114,8 +117,8 @@ int RtpDependencyDescriptorWriter::StructureSizeBits() const { structure_.num_chains, structure_.num_decode_targets + 1); if (structure_.num_chains > 0) { for (int protected_by : structure_.decode_target_protected_by_chain) { - bits += rtc::BitBufferWriter::SizeNonSymmetricBits( - protected_by, structure_.num_chains + 1); + bits += rtc::BitBufferWriter::SizeNonSymmetricBits(protected_by, + structure_.num_chains); } bits += 4 * structure_.templates.size() * structure_.num_chains; } @@ -134,8 +137,14 @@ RtpDependencyDescriptorWriter::CalculateMatch( result.need_custom_dtis = descriptor_.frame_dependencies.decode_target_indications != frame_template->decode_target_indications; - result.need_custom_chains = - descriptor_.frame_dependencies.chain_diffs != frame_template->chain_diffs; + result.need_custom_chains = false; + for (int i = 0; i < structure_.num_chains; ++i) { + if (active_chains_[i] && descriptor_.frame_dependencies.chain_diffs[i] != + frame_template->chain_diffs[i]) { + result.need_custom_chains = true; + break; + } + } result.extra_size_bits = 0; if (result.need_custom_fdiffs) { @@ -169,7 +178,10 @@ void RtpDependencyDescriptorWriter::FindBestTemplate() { frame_template.temporal_id; }; auto first = absl::c_find_if(templates, same_layer); - RTC_CHECK(first != templates.end()); + if (first == templates.end()) { + build_failed_ = true; + return; + } auto last = std::find_if_not(first, templates.end(), same_layer); best_template_ = CalculateMatch(first); @@ -201,7 +213,7 @@ bool RtpDependencyDescriptorWriter::HasExtendedFields() const { uint64_t RtpDependencyDescriptorWriter::TemplateId() const { return (best_template_.template_position - structure_.templates.begin() + structure_.structure_id) % - kMaxTemplates; + DependencyDescriptor::kMaxTemplates; } void RtpDependencyDescriptorWriter::WriteBits(uint64_t val, size_t bit_count) { @@ -217,9 +229,10 @@ void RtpDependencyDescriptorWriter::WriteNonSymmetric(uint32_t value, void RtpDependencyDescriptorWriter::WriteTemplateDependencyStructure() { RTC_DCHECK_GE(structure_.structure_id, 0); - RTC_DCHECK_LT(structure_.structure_id, kMaxTemplates); + RTC_DCHECK_LT(structure_.structure_id, DependencyDescriptor::kMaxTemplates); RTC_DCHECK_GT(structure_.num_decode_targets, 0); - RTC_DCHECK_LE(structure_.num_decode_targets, 1 << 5); + RTC_DCHECK_LE(structure_.num_decode_targets, + DependencyDescriptor::kMaxDecodeTargets); WriteBits(structure_.structure_id, 6); WriteBits(structure_.num_decode_targets - 1, 5); @@ -236,7 +249,7 @@ void RtpDependencyDescriptorWriter::WriteTemplateDependencyStructure() { void RtpDependencyDescriptorWriter::WriteTemplateLayers() { const auto& templates = structure_.templates; RTC_DCHECK(!templates.empty()); - RTC_DCHECK_LE(templates.size(), kMaxTemplates); + RTC_DCHECK_LE(templates.size(), DependencyDescriptor::kMaxTemplates); RTC_DCHECK_EQ(templates[0].spatial_id, 0); RTC_DCHECK_EQ(templates[0].temporal_id, 0); @@ -284,8 +297,8 @@ void RtpDependencyDescriptorWriter::WriteTemplateChains() { structure_.num_decode_targets); for (int protected_by : structure_.decode_target_protected_by_chain) { RTC_DCHECK_GE(protected_by, 0); - RTC_DCHECK_LE(protected_by, structure_.num_chains); - WriteNonSymmetric(protected_by, structure_.num_chains + 1); + RTC_DCHECK_LT(protected_by, structure_.num_chains); + WriteNonSymmetric(protected_by, structure_.num_chains); } for (const auto& frame_template : structure_.templates) { RTC_DCHECK_EQ(frame_template.chain_diffs.size(), structure_.num_chains); @@ -371,7 +384,9 @@ void RtpDependencyDescriptorWriter::WriteFrameFdiffs() { void RtpDependencyDescriptorWriter::WriteFrameChains() { RTC_DCHECK_EQ(descriptor_.frame_dependencies.chain_diffs.size(), structure_.num_chains); - for (int chain_diff : descriptor_.frame_dependencies.chain_diffs) { + for (int i = 0; i < structure_.num_chains; ++i) { + int chain_diff = + active_chains_[i] ? descriptor_.frame_dependencies.chain_diffs[i] : 0; RTC_DCHECK_GE(chain_diff, 0); RTC_DCHECK_LT(chain_diff, 1 << 8); WriteBits(chain_diff, 8); diff --git a/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h b/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h index 5a823b6e86..99fefecea6 100644 --- a/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h +++ b/modules/rtp_rtcp/source/rtp_dependency_descriptor_writer.h @@ -10,6 +10,7 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_WRITER_H_ #define MODULES_RTP_RTCP_SOURCE_RTP_DEPENDENCY_DESCRIPTOR_WRITER_H_ +#include #include #include #include @@ -25,6 +26,7 @@ class RtpDependencyDescriptorWriter { // |descriptor| matches the |structure|. RtpDependencyDescriptorWriter(rtc::ArrayView data, const FrameDependencyStructure& structure, + std::bitset<32> active_chains, const DependencyDescriptor& descriptor); // Serializes DependencyDescriptor rtp header extension. @@ -77,6 +79,7 @@ class RtpDependencyDescriptorWriter { bool build_failed_ = false; const DependencyDescriptor& descriptor_; const FrameDependencyStructure& structure_; + std::bitset<32> active_chains_; rtc::BitBufferWriter bit_writer_; TemplateMatch best_template_; }; diff --git a/modules/rtp_rtcp/source/rtp_fec_unittest.cc b/modules/rtp_rtcp/source/rtp_fec_unittest.cc index eb559f2bd9..a90e61a731 100644 --- a/modules/rtp_rtcp/source/rtp_fec_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_fec_unittest.cc @@ -127,7 +127,7 @@ void RtpFecTest::ReceivedPackets( // For media packets, the sequence number is obtained from the // RTP header as written by MediaPacketGenerator::ConstructMediaPackets. received_packet->seq_num = - ByteReader::ReadBigEndian(&packet->data[2]); + ByteReader::ReadBigEndian(packet->data.data() + 2); } else { received_packet->ssrc = ForwardErrorCorrectionType::kFecSsrc; // For FEC packets, we simulate the sequence numbers differently @@ -222,10 +222,10 @@ TYPED_TEST(RtpFecTest, WillProtectMediaPacketsWithLargeSequenceNumberGap) { this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets); // Create |kMaxMediaPackets - 1| sequence number difference. - ByteWriter::WriteBigEndian(&this->media_packets_.front()->data[2], - 1); - ByteWriter::WriteBigEndian(&this->media_packets_.back()->data[2], - kMaxMediaPackets); + ByteWriter::WriteBigEndian( + this->media_packets_.front()->data.MutableData() + 2, 1); + ByteWriter::WriteBigEndian( + this->media_packets_.back()->data.MutableData() + 2, kMaxMediaPackets); EXPECT_EQ( 0, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor, @@ -245,10 +245,11 @@ TYPED_TEST(RtpFecTest, this->media_packet_generator_.ConstructMediaPackets(kNumMediaPackets); // Create |kMaxMediaPackets| sequence number difference. - ByteWriter::WriteBigEndian(&this->media_packets_.front()->data[2], - 1); - ByteWriter::WriteBigEndian(&this->media_packets_.back()->data[2], - kMaxMediaPackets + 1); + ByteWriter::WriteBigEndian( + this->media_packets_.front()->data.MutableData() + 2, 1); + ByteWriter::WriteBigEndian( + this->media_packets_.back()->data.MutableData() + 2, + kMaxMediaPackets + 1); EXPECT_EQ( -1, this->fec_.EncodeFec(this->media_packets_, kProtectionFactor, @@ -526,9 +527,9 @@ TEST_F(RtpFecTestFlexfecOnly, FecRecoveryWithSeqNumGapOneFrameNoRecovery) { // Overwrite the sequence numbers generated by ConstructMediaPackets, // to make sure that we do have a wrap. auto it = this->generated_fec_packets_.begin(); - ByteWriter::WriteBigEndian(&(*it)->data[2], 65535); + ByteWriter::WriteBigEndian(&(*it)->data.MutableData()[2], 65535); ++it; - ByteWriter::WriteBigEndian(&(*it)->data[2], 0); + ByteWriter::WriteBigEndian(&(*it)->data.MutableData()[2], 0); // Lose the last two media packets (seq# 65533, 65534). memset(this->media_loss_mask_, 0, sizeof(this->media_loss_mask_)); diff --git a/modules/rtp_rtcp/source/rtp_format.cc b/modules/rtp_rtcp/source/rtp_format.cc index 28f63f1109..7550b70f69 100644 --- a/modules/rtp_rtcp/source/rtp_format.cc +++ b/modules/rtp_rtcp/source/rtp_format.cc @@ -30,8 +30,7 @@ std::unique_ptr RtpPacketizer::Create( rtc::ArrayView payload, PayloadSizeLimits limits, // Codec-specific details. - const RTPVideoHeader& rtp_video_header, - const RTPFragmentationHeader* fragmentation) { + const RTPVideoHeader& rtp_video_header) { if (!type) { // Use raw packetizer. return std::make_unique(payload, limits); @@ -39,11 +38,10 @@ std::unique_ptr RtpPacketizer::Create( switch (*type) { case kVideoCodecH264: { - RTC_CHECK(fragmentation); const auto& h264 = absl::get(rtp_video_header.video_type_header); - return std::make_unique( - payload, limits, h264.packetization_mode, *fragmentation); + return std::make_unique(payload, limits, + h264.packetization_mode); } case kVideoCodecVP8: { const auto& vp8 = @@ -56,8 +54,9 @@ std::unique_ptr RtpPacketizer::Create( return std::make_unique(payload, limits, vp9); } case kVideoCodecAV1: - return std::make_unique(payload, limits, - rtp_video_header.frame_type); + return std::make_unique( + payload, limits, rtp_video_header.frame_type, + rtp_video_header.is_last_frame_in_picture); default: { return std::make_unique(payload, limits, rtp_video_header); diff --git a/modules/rtp_rtcp/source/rtp_format.h b/modules/rtp_rtcp/source/rtp_format.h index dca8285b62..b593f29b1d 100644 --- a/modules/rtp_rtcp/source/rtp_format.h +++ b/modules/rtp_rtcp/source/rtp_format.h @@ -18,7 +18,6 @@ #include "absl/types/optional.h" #include "api/array_view.h" -#include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" namespace webrtc { @@ -41,8 +40,7 @@ class RtpPacketizer { rtc::ArrayView payload, PayloadSizeLimits limits, // Codec-specific details. - const RTPVideoHeader& rtp_video_header, - const RTPFragmentationHeader* fragmentation); + const RTPVideoHeader& rtp_video_header); virtual ~RtpPacketizer() = default; diff --git a/modules/rtp_rtcp/source/rtp_format_h264.cc b/modules/rtp_rtcp/source/rtp_format_h264.cc index 6f19e38629..86f48582a7 100644 --- a/modules/rtp_rtcp/source/rtp_format_h264.cc +++ b/modules/rtp_rtcp/source/rtp_format_h264.cc @@ -25,7 +25,6 @@ #include "common_video/h264/pps_parser.h" #include "common_video/h264/sps_parser.h" #include "common_video/h264/sps_vui_rewriter.h" -#include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "rtc_base/checks.h" @@ -46,19 +45,18 @@ enum FuDefs : uint8_t { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 }; } // namespace -RtpPacketizerH264::RtpPacketizerH264( - rtc::ArrayView payload, - PayloadSizeLimits limits, - H264PacketizationMode packetization_mode, - const RTPFragmentationHeader& fragmentation) +RtpPacketizerH264::RtpPacketizerH264(rtc::ArrayView payload, + PayloadSizeLimits limits, + H264PacketizationMode packetization_mode) : limits_(limits), num_packets_left_(0) { // Guard against uninitialized memory in packetization_mode. RTC_CHECK(packetization_mode == H264PacketizationMode::NonInterleaved || packetization_mode == H264PacketizationMode::SingleNalUnit); - for (size_t i = 0; i < fragmentation.fragmentationVectorSize; ++i) { + for (const auto& nalu : + H264::FindNaluIndices(payload.data(), payload.size())) { input_fragments_.push_back( - payload.subview(fragmentation.Offset(i), fragmentation.Length(i))); + payload.subview(nalu.payload_start_offset, nalu.payload_size)); } if (!GeneratePackets(packetization_mode)) { @@ -179,7 +177,7 @@ size_t RtpPacketizerH264::PacketizeStapA(size_t fragment_index) { return fragment_size; } if (fragment_index == input_fragments_.size() - 1) { - // Last fragment, so StrapA might be the last packet. + // Last fragment, so STAP-A might be the last packet. return fragment_size + limits_.last_packet_reduction_len; } return fragment_size; diff --git a/modules/rtp_rtcp/source/rtp_format_h264.h b/modules/rtp_rtcp/source/rtp_format_h264.h index 4661dc2163..7c10dd5754 100644 --- a/modules/rtp_rtcp/source/rtp_format_h264.h +++ b/modules/rtp_rtcp/source/rtp_format_h264.h @@ -19,7 +19,6 @@ #include #include "api/array_view.h" -#include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/source/rtp_format.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/video_coding/codecs/h264/include/h264_globals.h" @@ -34,8 +33,7 @@ class RtpPacketizerH264 : public RtpPacketizer { // The payload_data must be exactly one encoded H264 frame. RtpPacketizerH264(rtc::ArrayView payload, PayloadSizeLimits limits, - H264PacketizationMode packetization_mode, - const RTPFragmentationHeader& fragmentation); + H264PacketizationMode packetization_mode); ~RtpPacketizerH264() override; diff --git a/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc b/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc index bf9771ab9f..9f660b7a74 100644 --- a/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc @@ -13,9 +13,9 @@ #include #include +#include "absl/algorithm/container.h" #include "api/array_view.h" #include "common_video/h264/h264_common.h" -#include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" @@ -56,45 +56,61 @@ enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F }; // Bit masks for FU (A and B) headers. enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 }; -RTPFragmentationHeader CreateFragmentation(rtc::ArrayView sizes) { - RTPFragmentationHeader fragmentation; - fragmentation.VerifyAndAllocateFragmentationHeader(sizes.size()); - size_t offset = 0; - for (size_t i = 0; i < sizes.size(); ++i) { - fragmentation.fragmentationOffset[i] = offset; - fragmentation.fragmentationLength[i] = sizes[i]; - offset += sizes[i]; +// Creates Buffer that looks like nal unit of given size. +rtc::Buffer GenerateNalUnit(size_t size) { + RTC_CHECK_GT(size, 0); + rtc::Buffer buffer(size); + // Set some valid header. + buffer[0] = kSlice; + for (size_t i = 1; i < size; ++i) { + buffer[i] = static_cast(i); } - return fragmentation; -} - -// Create fragmentation with single fragment of same size as |frame| -RTPFragmentationHeader NoFragmentation(rtc::ArrayView frame) { - size_t frame_size[] = {frame.size()}; - return CreateFragmentation(frame_size); + // Last byte shouldn't be 0, or it may be counted as part of next 4-byte start + // sequence. + buffer[size - 1] |= 0x10; + return buffer; } -// Create frame of given size. -rtc::Buffer CreateFrame(size_t frame_size) { - rtc::Buffer frame(frame_size); - // Set some valid header. - frame[0] = 0x01; - // Generate payload to detect when shifted payload was put into a packet. - for (size_t i = 1; i < frame_size; ++i) - frame[i] = static_cast(i); +// Create frame consisting of nalus of given size. +rtc::Buffer CreateFrame(std::initializer_list nalu_sizes) { + static constexpr int kStartCodeSize = 3; + rtc::Buffer frame(absl::c_accumulate(nalu_sizes, 0) + + kStartCodeSize * nalu_sizes.size()); + size_t offset = 0; + for (size_t nalu_size : nalu_sizes) { + EXPECT_GE(nalu_size, 1u); + // Insert nalu start code + frame[offset] = 0; + frame[offset + 1] = 0; + frame[offset + 2] = 1; + // Set some valid header. + frame[offset + 3] = 1; + // Fill payload avoiding accidental start codes + if (nalu_size > 1) { + memset(frame.data() + offset + 4, 0x3f, nalu_size - 1); + } + offset += (kStartCodeSize + nalu_size); + } return frame; } -// Create frame with size deduced from fragmentation. -rtc::Buffer CreateFrame(const RTPFragmentationHeader& fragmentation) { - size_t last_frame_index = fragmentation.fragmentationVectorSize - 1; - size_t frame_size = fragmentation.fragmentationOffset[last_frame_index] + - fragmentation.fragmentationLength[last_frame_index]; - rtc::Buffer frame = CreateFrame(frame_size); - // Set some headers. - // Tests can expect those are valid but shouln't rely on actual values. - for (size_t i = 0; i <= last_frame_index; ++i) { - frame[fragmentation.fragmentationOffset[i]] = i + 1; +// Create frame consisting of given nalus. +rtc::Buffer CreateFrame(rtc::ArrayView nalus) { + static constexpr int kStartCodeSize = 3; + int frame_size = 0; + for (const rtc::Buffer& nalu : nalus) { + frame_size += (kStartCodeSize + nalu.size()); + } + rtc::Buffer frame(frame_size); + size_t offset = 0; + for (const rtc::Buffer& nalu : nalus) { + // Insert nalu start code + frame[offset] = 0; + frame[offset + 1] = 0; + frame[offset + 2] = 1; + // Copy the nalu unit. + memcpy(frame.data() + offset + 3, nalu.data(), nalu.size()); + offset += (kStartCodeSize + nalu.size()); } return frame; } @@ -117,31 +133,28 @@ class RtpPacketizerH264ModeTest : public ::testing::TestWithParam {}; TEST_P(RtpPacketizerH264ModeTest, SingleNalu) { - const uint8_t frame[2] = {kIdr, 0xFF}; + const uint8_t frame[] = {0, 0, 1, kIdr, 0xFF}; - RtpPacketizerH264 packetizer(frame, kNoLimits, GetParam(), - NoFragmentation(frame)); + RtpPacketizerH264 packetizer(frame, kNoLimits, GetParam()); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(1)); - EXPECT_THAT(packets[0].payload(), ElementsAreArray(frame)); + EXPECT_THAT(packets[0].payload(), ElementsAre(kIdr, 0xFF)); } TEST_P(RtpPacketizerH264ModeTest, SingleNaluTwoPackets) { RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = kMaxPayloadSize; - const size_t fragment_sizes[] = {kMaxPayloadSize, 100}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragment_sizes); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(kMaxPayloadSize), + GenerateNalUnit(100)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer(frame, limits, GetParam(), fragmentation); + RtpPacketizerH264 packetizer(frame, limits, GetParam()); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(2)); - EXPECT_THAT(packets[0].payload(), - ElementsAreArray(frame.data(), kMaxPayloadSize)); - EXPECT_THAT(packets[1].payload(), - ElementsAreArray(frame.data() + kMaxPayloadSize, 100)); + EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0])); + EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[1])); } TEST_P(RtpPacketizerH264ModeTest, @@ -149,21 +162,18 @@ TEST_P(RtpPacketizerH264ModeTest, RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 200; limits.first_packet_reduction_len = 5; - const size_t fragments[] = {195, 200, 200}; - - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/195), + GenerateNalUnit(/*size=*/200), + GenerateNalUnit(/*size=*/200)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer(frame, limits, GetParam(), fragmentation); + RtpPacketizerH264 packetizer(frame, limits, GetParam()); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(3)); - const uint8_t* next_fragment = frame.data(); - EXPECT_THAT(packets[0].payload(), ElementsAreArray(next_fragment, 195)); - next_fragment += 195; - EXPECT_THAT(packets[1].payload(), ElementsAreArray(next_fragment, 200)); - next_fragment += 200; - EXPECT_THAT(packets[2].payload(), ElementsAreArray(next_fragment, 200)); + EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0])); + EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[1])); + EXPECT_THAT(packets[2].payload(), ElementsAreArray(nalus[2])); } TEST_P(RtpPacketizerH264ModeTest, @@ -171,21 +181,18 @@ TEST_P(RtpPacketizerH264ModeTest, RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 200; limits.last_packet_reduction_len = 5; - const size_t fragments[] = {200, 200, 195}; - - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/200), + GenerateNalUnit(/*size=*/200), + GenerateNalUnit(/*size=*/195)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer(frame, limits, GetParam(), fragmentation); + RtpPacketizerH264 packetizer(frame, limits, GetParam()); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(3)); - const uint8_t* next_fragment = frame.data(); - EXPECT_THAT(packets[0].payload(), ElementsAreArray(next_fragment, 200)); - next_fragment += 200; - EXPECT_THAT(packets[1].payload(), ElementsAreArray(next_fragment, 200)); - next_fragment += 200; - EXPECT_THAT(packets[2].payload(), ElementsAreArray(next_fragment, 195)); + EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0])); + EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[1])); + EXPECT_THAT(packets[2].payload(), ElementsAreArray(nalus[2])); } TEST_P(RtpPacketizerH264ModeTest, @@ -194,10 +201,9 @@ TEST_P(RtpPacketizerH264ModeTest, limits.max_payload_len = 200; limits.first_packet_reduction_len = 20; limits.last_packet_reduction_len = 30; - rtc::Buffer frame = CreateFrame(150); + rtc::Buffer frame = CreateFrame({150}); - RtpPacketizerH264 packetizer(frame, limits, GetParam(), - NoFragmentation(frame)); + RtpPacketizerH264 packetizer(frame, limits, GetParam()); std::vector packets = FetchAllPackets(&packetizer); EXPECT_THAT(packets, SizeIs(1)); @@ -211,19 +217,19 @@ INSTANTIATE_TEST_SUITE_P( // Aggregation tests. TEST(RtpPacketizerH264Test, StapA) { - size_t fragments[] = {2, 2, 0x123}; + rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/0x123)}; + rtc::Buffer frame = CreateFrame(nalus); - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); - - RtpPacketizerH264 packetizer( - frame, kNoLimits, H264PacketizationMode::NonInterleaved, fragmentation); + RtpPacketizerH264 packetizer(frame, kNoLimits, + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(1)); auto payload = packets[0].payload(); EXPECT_EQ(payload.size(), - kNalHeaderSize + 3 * kLengthFieldLength + frame.size()); + kNalHeaderSize + 3 * kLengthFieldLength + 2 + 2 + 0x123); EXPECT_EQ(payload[0], kStapA); payload = payload.subview(kNalHeaderSize); @@ -231,29 +237,26 @@ TEST(RtpPacketizerH264Test, StapA) { EXPECT_THAT(payload.subview(0, kLengthFieldLength), ElementsAre(0, 2)); // Size. EXPECT_THAT(payload.subview(kLengthFieldLength, 2), - ElementsAreArray(frame.data(), 2)); + ElementsAreArray(nalus[0])); payload = payload.subview(kLengthFieldLength + 2); // 2nd fragment. EXPECT_THAT(payload.subview(0, kLengthFieldLength), ElementsAre(0, 2)); // Size. EXPECT_THAT(payload.subview(kLengthFieldLength, 2), - ElementsAreArray(frame.data() + 2, 2)); + ElementsAreArray(nalus[1])); payload = payload.subview(kLengthFieldLength + 2); // 3rd fragment. EXPECT_THAT(payload.subview(0, kLengthFieldLength), ElementsAre(0x1, 0x23)); // Size. - EXPECT_THAT(payload.subview(kLengthFieldLength), - ElementsAreArray(frame.data() + 4, 0x123)); + EXPECT_THAT(payload.subview(kLengthFieldLength), ElementsAreArray(nalus[2])); } TEST(RtpPacketizerH264Test, SingleNalUnitModeHasNoStapA) { // This is the same setup as for the StapA test. - size_t fragments[] = {2, 2, 0x123}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer frame = CreateFrame({2, 2, 0x123}); - RtpPacketizerH264 packetizer( - frame, kNoLimits, H264PacketizationMode::SingleNalUnit, fragmentation); + RtpPacketizerH264 packetizer(frame, kNoLimits, + H264PacketizationMode::SingleNalUnit); std::vector packets = FetchAllPackets(&packetizer); // The three fragments should be returned as three packets. @@ -269,23 +272,23 @@ TEST(RtpPacketizerH264Test, StapARespectsFirstPacketReduction) { limits.first_packet_reduction_len = 100; const size_t kFirstFragmentSize = limits.max_payload_len - limits.first_packet_reduction_len; - size_t fragments[] = {kFirstFragmentSize, 2, 2}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/kFirstFragmentSize), + GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/2)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer( - frame, limits, H264PacketizationMode::NonInterleaved, fragmentation); + RtpPacketizerH264 packetizer(frame, limits, + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(2)); // Expect 1st packet is single nalu. - EXPECT_THAT(packets[0].payload(), - ElementsAreArray(frame.data(), kFirstFragmentSize)); + EXPECT_THAT(packets[0].payload(), ElementsAreArray(nalus[0])); // Expect 2nd packet is aggregate of last two fragments. - const uint8_t* tail = frame.data() + kFirstFragmentSize; - EXPECT_THAT(packets[1].payload(), ElementsAre(kStapA, // - 0, 2, tail[0], tail[1], // - 0, 2, tail[2], tail[3])); + EXPECT_THAT(packets[1].payload(), + ElementsAre(kStapA, // + 0, 2, nalus[1][0], nalus[1][1], // + 0, 2, nalus[2][0], nalus[2][1])); } TEST(RtpPacketizerH264Test, StapARespectsLastPacketReduction) { @@ -294,22 +297,23 @@ TEST(RtpPacketizerH264Test, StapARespectsLastPacketReduction) { limits.last_packet_reduction_len = 100; const size_t kLastFragmentSize = limits.max_payload_len - limits.last_packet_reduction_len; - size_t fragments[] = {2, 2, kLastFragmentSize}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/kLastFragmentSize)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer( - frame, limits, H264PacketizationMode::NonInterleaved, fragmentation); + RtpPacketizerH264 packetizer(frame, limits, + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(2)); // Expect 1st packet is aggregate of 1st two fragments. - EXPECT_THAT(packets[0].payload(), ElementsAre(kStapA, // - 0, 2, frame[0], frame[1], // - 0, 2, frame[2], frame[3])); + EXPECT_THAT(packets[0].payload(), + ElementsAre(kStapA, // + 0, 2, nalus[0][0], nalus[0][1], // + 0, 2, nalus[1][0], nalus[1][1])); // Expect 2nd packet is single nalu. - EXPECT_THAT(packets[1].payload(), - ElementsAreArray(frame.data() + 4, kLastFragmentSize)); + EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[2])); } TEST(RtpPacketizerH264Test, TooSmallForStapAHeaders) { @@ -317,22 +321,23 @@ TEST(RtpPacketizerH264Test, TooSmallForStapAHeaders) { limits.max_payload_len = 1000; const size_t kLastFragmentSize = limits.max_payload_len - 3 * kLengthFieldLength - 4; - size_t fragments[] = {2, 2, kLastFragmentSize}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/2), + GenerateNalUnit(/*size=*/kLastFragmentSize)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer( - frame, limits, H264PacketizationMode::NonInterleaved, fragmentation); + RtpPacketizerH264 packetizer(frame, limits, + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(2)); // Expect 1st packet is aggregate of 1st two fragments. - EXPECT_THAT(packets[0].payload(), ElementsAre(kStapA, // - 0, 2, frame[0], frame[1], // - 0, 2, frame[2], frame[3])); + EXPECT_THAT(packets[0].payload(), + ElementsAre(kStapA, // + 0, 2, nalus[0][0], nalus[0][1], // + 0, 2, nalus[1][0], nalus[1][1])); // Expect 2nd packet is single nalu. - EXPECT_THAT(packets[1].payload(), - ElementsAreArray(frame.data() + 4, kLastFragmentSize)); + EXPECT_THAT(packets[1].payload(), ElementsAreArray(nalus[2])); } // Fragmentation + aggregation. @@ -342,28 +347,29 @@ TEST(RtpPacketizerH264Test, MixedStapAFUA) { const size_t kFuaPayloadSize = 70; const size_t kFuaNaluSize = kNalHeaderSize + 2 * kFuaPayloadSize; const size_t kStapANaluSize = 20; - size_t fragments[] = {kFuaNaluSize, kStapANaluSize, kStapANaluSize}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer nalus[] = {GenerateNalUnit(kFuaNaluSize), + GenerateNalUnit(kStapANaluSize), + GenerateNalUnit(kStapANaluSize)}; + rtc::Buffer frame = CreateFrame(nalus); - RtpPacketizerH264 packetizer( - frame, limits, H264PacketizationMode::NonInterleaved, fragmentation); + RtpPacketizerH264 packetizer(frame, limits, + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); ASSERT_THAT(packets, SizeIs(3)); - const uint8_t* next_fragment = frame.data() + kNalHeaderSize; // First expect two FU-A packets. EXPECT_THAT(packets[0].payload().subview(0, kFuAHeaderSize), - ElementsAre(kFuA, FuDefs::kSBit | frame[0])); - EXPECT_THAT(packets[0].payload().subview(kFuAHeaderSize), - ElementsAreArray(next_fragment, kFuaPayloadSize)); - next_fragment += kFuaPayloadSize; + ElementsAre(kFuA, FuDefs::kSBit | nalus[0][0])); + EXPECT_THAT( + packets[0].payload().subview(kFuAHeaderSize), + ElementsAreArray(nalus[0].data() + kNalHeaderSize, kFuaPayloadSize)); EXPECT_THAT(packets[1].payload().subview(0, kFuAHeaderSize), - ElementsAre(kFuA, FuDefs::kEBit | frame[0])); - EXPECT_THAT(packets[1].payload().subview(kFuAHeaderSize), - ElementsAreArray(next_fragment, kFuaPayloadSize)); - next_fragment += kFuaPayloadSize; + ElementsAre(kFuA, FuDefs::kEBit | nalus[0][0])); + EXPECT_THAT( + packets[1].payload().subview(kFuAHeaderSize), + ElementsAreArray(nalus[0].data() + kNalHeaderSize + kFuaPayloadSize, + kFuaPayloadSize)); // Then expect one STAP-A packet with two nal units. EXPECT_THAT(packets[2].payload()[0], kStapA); @@ -371,13 +377,11 @@ TEST(RtpPacketizerH264Test, MixedStapAFUA) { EXPECT_THAT(payload.subview(0, kLengthFieldLength), ElementsAre(0, kStapANaluSize)); EXPECT_THAT(payload.subview(kLengthFieldLength, kStapANaluSize), - ElementsAreArray(next_fragment, kStapANaluSize)); + ElementsAreArray(nalus[1])); payload = payload.subview(kLengthFieldLength + kStapANaluSize); - next_fragment += kStapANaluSize; EXPECT_THAT(payload.subview(0, kLengthFieldLength), ElementsAre(0, kStapANaluSize)); - EXPECT_THAT(payload.subview(kLengthFieldLength), - ElementsAreArray(next_fragment, kStapANaluSize)); + EXPECT_THAT(payload.subview(kLengthFieldLength), ElementsAreArray(nalus[2])); } TEST(RtpPacketizerH264Test, LastFragmentFitsInSingleButNotLastPacket) { @@ -387,12 +391,10 @@ TEST(RtpPacketizerH264Test, LastFragmentFitsInSingleButNotLastPacket) { limits.last_packet_reduction_len = 20; limits.single_packet_reduction_len = 20; // Actual sizes, which triggered this bug. - size_t fragments[] = {20, 8, 18, 1161}; - RTPFragmentationHeader fragmentation = CreateFragmentation(fragments); - rtc::Buffer frame = CreateFrame(fragmentation); + rtc::Buffer frame = CreateFrame({20, 8, 18, 1161}); - RtpPacketizerH264 packetizer( - frame, limits, H264PacketizationMode::NonInterleaved, fragmentation); + RtpPacketizerH264 packetizer(frame, limits, + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); // Last packet has to be of correct size. @@ -406,11 +408,11 @@ TEST(RtpPacketizerH264Test, LastFragmentFitsInSingleButNotLastPacket) { // Returns sizes of the payloads excluding fua headers. std::vector TestFua(size_t frame_payload_size, const RtpPacketizer::PayloadSizeLimits& limits) { - rtc::Buffer frame = CreateFrame(kNalHeaderSize + frame_payload_size); + rtc::Buffer nalu[] = {GenerateNalUnit(kNalHeaderSize + frame_payload_size)}; + rtc::Buffer frame = CreateFrame(nalu); RtpPacketizerH264 packetizer(frame, limits, - H264PacketizationMode::NonInterleaved, - NoFragmentation(frame)); + H264PacketizationMode::NonInterleaved); std::vector packets = FetchAllPackets(&packetizer); EXPECT_GE(packets.size(), 2u); // Single packet indicates it is not FuA. @@ -429,7 +431,7 @@ std::vector TestFua(size_t frame_payload_size, // Clear S and E bits before testing all are duplicating same original header. fua_header.front() &= ~FuDefs::kSBit; fua_header.back() &= ~FuDefs::kEBit; - EXPECT_THAT(fua_header, Each(Eq((kFuA << 8) | frame[0]))); + EXPECT_THAT(fua_header, Each(Eq((kFuA << 8) | nalu[0][0]))); return payload_sizes; } @@ -488,11 +490,10 @@ TEST(RtpPacketizerH264Test, FUABig) { TEST(RtpPacketizerH264Test, RejectsOverlongDataInPacketizationMode0) { RtpPacketizer::PayloadSizeLimits limits; - rtc::Buffer frame = CreateFrame(kMaxPayloadSize + 1); - RTPFragmentationHeader fragmentation = NoFragmentation(frame); + rtc::Buffer frame = CreateFrame({kMaxPayloadSize + 1}); - RtpPacketizerH264 packetizer( - frame, limits, H264PacketizationMode::SingleNalUnit, fragmentation); + RtpPacketizerH264 packetizer(frame, limits, + H264PacketizationMode::SingleNalUnit); std::vector packets = FetchAllPackets(&packetizer); EXPECT_THAT(packets, IsEmpty()); diff --git a/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc b/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc index 35e7fe7ead..d83c3b03c9 100644 --- a/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_format_video_generic_unittest.cc @@ -16,7 +16,6 @@ #include #include "api/array_view.h" -#include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" diff --git a/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h b/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h index 03d4e58576..916d6577f1 100644 --- a/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h +++ b/modules/rtp_rtcp/source/rtp_format_vp8_test_helper.h @@ -10,10 +10,9 @@ // This file contains the class RtpFormatVp8TestHelper. The class is // responsible for setting up a fake VP8 bitstream according to the -// RTPVideoHeaderVP8 header, and partition information. After initialization, -// an RTPFragmentationHeader is provided so that the tester can create a -// packetizer. The packetizer can then be provided to this helper class, which -// will then extract all packets and compare to the expected outcome. +// RTPVideoHeaderVP8 header. The packetizer can then be provided to this helper +// class, which will then extract all packets and compare to the expected +// outcome. #ifndef MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_ #define MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP8_TEST_HELPER_H_ diff --git a/modules/rtp_rtcp/source/rtp_header_extension_map.cc b/modules/rtp_rtcp/source/rtp_header_extension_map.cc index f59f9c4ebb..0b5ba474c7 100644 --- a/modules/rtp_rtcp/source/rtp_header_extension_map.cc +++ b/modules/rtp_rtcp/source/rtp_header_extension_map.cc @@ -13,6 +13,7 @@ #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" +#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" #include "rtc_base/arraysize.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" @@ -33,6 +34,7 @@ constexpr ExtensionInfo CreateExtensionInfo() { constexpr ExtensionInfo kExtensions[] = { CreateExtensionInfo(), CreateExtensionInfo(), + CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), @@ -40,8 +42,8 @@ constexpr ExtensionInfo kExtensions[] = { CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), + CreateExtensionInfo(), CreateExtensionInfo(), - CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), @@ -49,6 +51,7 @@ constexpr ExtensionInfo kExtensions[] = { CreateExtensionInfo(), CreateExtensionInfo(), CreateExtensionInfo(), + CreateExtensionInfo(), }; // Because of kRtpExtensionNone, NumberOfExtension is 1 bigger than the actual diff --git a/modules/rtp_rtcp/source/rtp_header_extensions.cc b/modules/rtp_rtcp/source/rtp_header_extensions.cc index fefe6c618f..1dd4f54759 100644 --- a/modules/rtp_rtcp/source/rtp_header_extensions.cc +++ b/modules/rtp_rtcp/source/rtp_header_extensions.cc @@ -13,6 +13,7 @@ #include #include +#include #include #include "modules/rtp_rtcp/include/rtp_cvo.h" @@ -186,6 +187,60 @@ bool AudioLevel::Write(rtc::ArrayView data, return true; } +// An RTP Header Extension for Mixer-to-Client Audio Level Indication +// +// https://tools.ietf.org/html/rfc6465 +// +// The form of the audio level extension block: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ID | len=2 |0| level 1 |0| level 2 |0| level 3 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Sample Audio Level Encoding Using the One-Byte Header Format +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ID | len=3 |0| level 1 |0| level 2 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |0| level 3 | 0 (pad) | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// Sample Audio Level Encoding Using the Two-Byte Header Format +constexpr RTPExtensionType CsrcAudioLevel::kId; +constexpr uint8_t CsrcAudioLevel::kMaxValueSizeBytes; +constexpr const char CsrcAudioLevel::kUri[]; + +bool CsrcAudioLevel::Parse(rtc::ArrayView data, + std::vector* csrc_audio_levels) { + if (data.size() > kRtpCsrcSize) { + return false; + } + csrc_audio_levels->resize(data.size()); + for (size_t i = 0; i < data.size(); i++) { + (*csrc_audio_levels)[i] = data[i] & 0x7F; + } + return true; +} + +size_t CsrcAudioLevel::ValueSize( + rtc::ArrayView csrc_audio_levels) { + return csrc_audio_levels.size(); +} + +bool CsrcAudioLevel::Write(rtc::ArrayView data, + rtc::ArrayView csrc_audio_levels) { + RTC_CHECK_LE(csrc_audio_levels.size(), kRtpCsrcSize); + if (csrc_audio_levels.size() != data.size()) { + return false; + } + for (size_t i = 0; i < csrc_audio_levels.size(); i++) { + data[i] = csrc_audio_levels[i] & 0x7F; + } + return true; +} + // From RFC 5450: Transmission Time Offsets in RTP Streams. // // The transmission time is signaled to the receiver in-band using the @@ -371,7 +426,7 @@ constexpr uint8_t PlayoutDelayLimits::kValueSizeBytes; constexpr const char PlayoutDelayLimits::kUri[]; bool PlayoutDelayLimits::Parse(rtc::ArrayView data, - PlayoutDelay* playout_delay) { + VideoPlayoutDelay* playout_delay) { RTC_DCHECK(playout_delay); if (data.size() != 3) return false; @@ -386,7 +441,7 @@ bool PlayoutDelayLimits::Parse(rtc::ArrayView data, } bool PlayoutDelayLimits::Write(rtc::ArrayView data, - const PlayoutDelay& playout_delay) { + const VideoPlayoutDelay& playout_delay) { RTC_DCHECK_EQ(data.size(), 3); RTC_DCHECK_LE(0, playout_delay.min_ms); RTC_DCHECK_LE(playout_delay.min_ms, playout_delay.max_ms); @@ -525,86 +580,6 @@ bool VideoTimingExtension::Write(rtc::ArrayView data, return true; } -// Frame Marking. -// -// Meta-information about an RTP stream outside the encrypted media payload, -// useful for an RTP switch to do codec-agnostic selective forwarding -// without decrypting the payload. -// -// For non-scalable streams: -// 0 1 -// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 -// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -// | ID | L = 0 |S|E|I|D|0 0 0 0| -// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -// -// For scalable streams: -// 0 1 2 3 -// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -// | ID | L = 2 |S|E|I|D|B| TID | LID | TL0PICIDX | -// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - -constexpr RTPExtensionType FrameMarkingExtension::kId; -constexpr const char FrameMarkingExtension::kUri[]; - -bool FrameMarkingExtension::IsScalable(uint8_t temporal_id, uint8_t layer_id) { - return temporal_id != kNoTemporalIdx || layer_id != kNoSpatialIdx; -} - -bool FrameMarkingExtension::Parse(rtc::ArrayView data, - FrameMarking* frame_marking) { - RTC_DCHECK(frame_marking); - - if (data.size() != 1 && data.size() != 3) - return false; - - frame_marking->start_of_frame = (data[0] & 0x80) != 0; - frame_marking->end_of_frame = (data[0] & 0x40) != 0; - frame_marking->independent_frame = (data[0] & 0x20) != 0; - frame_marking->discardable_frame = (data[0] & 0x10) != 0; - - if (data.size() == 3) { - frame_marking->base_layer_sync = (data[0] & 0x08) != 0; - frame_marking->temporal_id = data[0] & 0x7; - frame_marking->layer_id = data[1]; - frame_marking->tl0_pic_idx = data[2]; - } else { - // non-scalable - frame_marking->base_layer_sync = false; - frame_marking->temporal_id = kNoTemporalIdx; - frame_marking->layer_id = kNoSpatialIdx; - frame_marking->tl0_pic_idx = 0; - } - return true; -} - -size_t FrameMarkingExtension::ValueSize(const FrameMarking& frame_marking) { - if (IsScalable(frame_marking.temporal_id, frame_marking.layer_id)) - return 3; - else - return 1; -} - -bool FrameMarkingExtension::Write(rtc::ArrayView data, - const FrameMarking& frame_marking) { - RTC_DCHECK_GE(data.size(), 1); - RTC_CHECK_LE(frame_marking.temporal_id, 0x07); - data[0] = frame_marking.start_of_frame ? 0x80 : 0x00; - data[0] |= frame_marking.end_of_frame ? 0x40 : 0x00; - data[0] |= frame_marking.independent_frame ? 0x20 : 0x00; - data[0] |= frame_marking.discardable_frame ? 0x10 : 0x00; - - if (IsScalable(frame_marking.temporal_id, frame_marking.layer_id)) { - RTC_DCHECK_EQ(data.size(), 3); - data[0] |= frame_marking.base_layer_sync ? 0x08 : 0x00; - data[0] |= frame_marking.temporal_id & 0x07; - data[1] = frame_marking.layer_id; - data[2] = frame_marking.tl0_pic_idx; - } - return true; -} - // Color space including HDR metadata as an optional field. // // RTP header extension to carry color space information and optionally HDR @@ -903,4 +878,32 @@ bool InbandComfortNoiseExtension::Write(rtc::ArrayView data, return true; } +// VideoFrameTrackingIdExtension +// +// 0 1 2 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ID | L=1 | video-frame-tracking-id | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +constexpr RTPExtensionType VideoFrameTrackingIdExtension::kId; +constexpr uint8_t VideoFrameTrackingIdExtension::kValueSizeBytes; +constexpr const char VideoFrameTrackingIdExtension::kUri[]; + +bool VideoFrameTrackingIdExtension::Parse(rtc::ArrayView data, + uint16_t* video_frame_tracking_id) { + if (data.size() != kValueSizeBytes) { + return false; + } + *video_frame_tracking_id = ByteReader::ReadBigEndian(data.data()); + return true; +} + +bool VideoFrameTrackingIdExtension::Write(rtc::ArrayView data, + uint16_t video_frame_tracking_id) { + RTC_DCHECK_EQ(data.size(), kValueSizeBytes); + ByteWriter::WriteBigEndian(data.data(), video_frame_tracking_id); + return true; +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_header_extensions.h b/modules/rtp_rtcp/source/rtp_header_extensions.h index f4517bb513..b47824afdb 100644 --- a/modules/rtp_rtcp/source/rtp_header_extensions.h +++ b/modules/rtp_rtcp/source/rtp_header_extensions.h @@ -14,12 +14,12 @@ #include #include +#include #include "api/array_view.h" #include "api/rtp_headers.h" #include "api/video/color_space.h" #include "api/video/video_content_type.h" -#include "api/video/video_frame_marking.h" #include "api/video/video_rotation.h" #include "api/video/video_timing.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" @@ -78,6 +78,20 @@ class AudioLevel { uint8_t audio_level); }; +class CsrcAudioLevel { + public: + static constexpr RTPExtensionType kId = kRtpExtensionCsrcAudioLevel; + static constexpr uint8_t kMaxValueSizeBytes = 15; + static constexpr const char kUri[] = + "urn:ietf:params:rtp-hdrext:csrc-audio-level"; + + static bool Parse(rtc::ArrayView data, + std::vector* csrc_audio_levels); + static size_t ValueSize(rtc::ArrayView csrc_audio_levels); + static bool Write(rtc::ArrayView data, + rtc::ArrayView csrc_audio_levels); +}; + class TransmissionOffset { public: using value_type = int32_t; @@ -149,7 +163,7 @@ class VideoOrientation { class PlayoutDelayLimits { public: - using value_type = PlayoutDelay; + using value_type = VideoPlayoutDelay; static constexpr RTPExtensionType kId = kRtpExtensionPlayoutDelay; static constexpr uint8_t kValueSizeBytes = 3; static constexpr const char kUri[] = @@ -163,10 +177,10 @@ class PlayoutDelayLimits { static constexpr int kMaxMs = 0xfff * kGranularityMs; // 40950. static bool Parse(rtc::ArrayView data, - PlayoutDelay* playout_delay); - static size_t ValueSize(const PlayoutDelay&) { return kValueSizeBytes; } + VideoPlayoutDelay* playout_delay); + static size_t ValueSize(const VideoPlayoutDelay&) { return kValueSizeBytes; } static bool Write(rtc::ArrayView data, - const PlayoutDelay& playout_delay); + const VideoPlayoutDelay& playout_delay); }; class VideoContentTypeExtension { @@ -217,23 +231,6 @@ class VideoTimingExtension { uint8_t offset); }; -class FrameMarkingExtension { - public: - using value_type = FrameMarking; - static constexpr RTPExtensionType kId = kRtpExtensionFrameMarking; - static constexpr const char kUri[] = - "http://tools.ietf.org/html/draft-ietf-avtext-framemarking-07"; - - static bool Parse(rtc::ArrayView data, - FrameMarking* frame_marking); - static size_t ValueSize(const FrameMarking& frame_marking); - static bool Write(rtc::ArrayView data, - const FrameMarking& frame_marking); - - private: - static bool IsScalable(uint8_t temporal_id, uint8_t layer_id); -}; - class ColorSpaceExtension { public: using value_type = ColorSpace; @@ -325,5 +322,21 @@ class InbandComfortNoiseExtension { absl::optional level); }; +class VideoFrameTrackingIdExtension { + public: + using value_type = uint16_t; + static constexpr RTPExtensionType kId = kRtpExtensionVideoFrameTrackingId; + static constexpr uint8_t kValueSizeBytes = 2; + static constexpr const char kUri[] = + "http://www.webrtc.org/experiments/rtp-hdrext/video-frame-tracking-id"; + static bool Parse(rtc::ArrayView data, + uint16_t* video_frame_tracking_id); + static size_t ValueSize(uint16_t /*video_frame_tracking_id*/) { + return kValueSizeBytes; + } + static bool Write(rtc::ArrayView data, + uint16_t video_frame_tracking_id); +}; + } // namespace webrtc #endif // MODULES_RTP_RTCP_SOURCE_RTP_HEADER_EXTENSIONS_H_ diff --git a/modules/rtp_rtcp/source/rtp_packet.cc b/modules/rtp_rtcp/source/rtp_packet.cc index e054bb8306..8523637feb 100644 --- a/modules/rtp_rtcp/source/rtp_packet.cc +++ b/modules/rtp_rtcp/source/rtp_packet.cc @@ -27,6 +27,7 @@ constexpr size_t kFixedHeaderSize = 12; constexpr uint8_t kRtpVersion = 2; constexpr uint16_t kOneByteExtensionProfileId = 0xBEDE; constexpr uint16_t kTwoByteExtensionProfileId = 0x1000; +constexpr uint16_t kTwobyteExtensionProfileIdAppBitsFilter = 0xfff0; constexpr size_t kOneByteExtensionHeaderLength = 1; constexpr size_t kTwoByteExtensionHeaderLength = 2; constexpr size_t kDefaultPacketSize = 1500; @@ -70,8 +71,8 @@ RtpPacket::RtpPacket(const ExtensionManager* extensions, size_t capacity) RtpPacket::~RtpPacket() {} -void RtpPacket::IdentifyExtensions(const ExtensionManager& extensions) { - extensions_ = extensions; +void RtpPacket::IdentifyExtensions(ExtensionManager extensions) { + extensions_ = std::move(extensions); } bool RtpPacket::Parse(const uint8_t* buffer, size_t buffer_size) { @@ -111,8 +112,6 @@ std::vector RtpPacket::Csrcs() const { } void RtpPacket::CopyHeaderFrom(const RtpPacket& packet) { - RTC_DCHECK_GE(capacity(), packet.headers_size()); - marker_ = packet.marker_; payload_type_ = packet.payload_type_; sequence_number_ = packet.sequence_number_; @@ -186,9 +185,9 @@ void RtpPacket::ZeroMutableExtensions() { break; } case RTPExtensionType::kRtpExtensionAudioLevel: + case RTPExtensionType::kRtpExtensionCsrcAudioLevel: case RTPExtensionType::kRtpExtensionAbsoluteCaptureTime: case RTPExtensionType::kRtpExtensionColorSpace: - case RTPExtensionType::kRtpExtensionFrameMarking: case RTPExtensionType::kRtpExtensionGenericFrameDescriptor00: case RTPExtensionType::kRtpExtensionGenericFrameDescriptor02: case RTPExtensionType::kRtpExtensionMid: @@ -197,8 +196,10 @@ void RtpPacket::ZeroMutableExtensions() { case RTPExtensionType::kRtpExtensionRepairedRtpStreamId: case RTPExtensionType::kRtpExtensionRtpStreamId: case RTPExtensionType::kRtpExtensionVideoContentType: + case RTPExtensionType::kRtpExtensionVideoLayersAllocation: case RTPExtensionType::kRtpExtensionVideoRotation: - case RTPExtensionType::kRtpExtensionInbandComfortNoise: { + case RTPExtensionType::kRtpExtensionInbandComfortNoise: + case RTPExtensionType::kRtpExtensionVideoFrameTrackingId: { // Non-mutable extension. Don't change it. break; } @@ -465,16 +466,6 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) { } payload_offset_ = kFixedHeaderSize + number_of_crcs * 4; - if (has_padding) { - padding_size_ = buffer[size - 1]; - if (padding_size_ == 0) { - RTC_LOG(LS_WARNING) << "Padding was set, but padding size is zero"; - return false; - } - } else { - padding_size_ = 0; - } - extensions_size_ = 0; extension_entries_.clear(); if (has_extension) { @@ -500,7 +491,8 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) { return false; } if (profile != kOneByteExtensionProfileId && - profile != kTwoByteExtensionProfileId) { + (profile & kTwobyteExtensionProfileIdAppBitsFilter) != + kTwoByteExtensionProfileId) { RTC_LOG(LS_WARNING) << "Unsupported rtp extension " << profile; } else { size_t extension_header_length = profile == kOneByteExtensionProfileId @@ -554,6 +546,16 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) { payload_offset_ = extension_offset + extensions_capacity; } + if (has_padding && payload_offset_ < size) { + padding_size_ = buffer[size - 1]; + if (padding_size_ == 0) { + RTC_LOG(LS_WARNING) << "Padding was set, but padding size is zero"; + return false; + } + } else { + padding_size_ = 0; + } + if (payload_offset_ + padding_size_ > size) { return false; } diff --git a/modules/rtp_rtcp/source/rtp_packet.h b/modules/rtp_rtcp/source/rtp_packet.h index b3e67be7cf..e2e291cf5d 100644 --- a/modules/rtp_rtcp/source/rtp_packet.h +++ b/modules/rtp_rtcp/source/rtp_packet.h @@ -30,6 +30,8 @@ class RtpPacket { // packet creating and used if available in Parse function. // Adding and getting extensions will fail until |extensions| is // provided via constructor or IdentifyExtensions function. + // |*extensions| is only accessed during construction; the pointer is not + // stored. RtpPacket(); explicit RtpPacket(const ExtensionManager* extensions); RtpPacket(const RtpPacket&); @@ -49,7 +51,7 @@ class RtpPacket { bool Parse(rtc::CopyOnWriteBuffer packet); // Maps extensions id to their types. - void IdentifyExtensions(const ExtensionManager& extensions); + void IdentifyExtensions(ExtensionManager extensions); // Header. bool Marker() const { return marker_; } @@ -63,6 +65,7 @@ class RtpPacket { // Payload. size_t payload_size() const { return payload_size_; } + bool has_padding() const { return buffer_[0] & 0x20; } size_t padding_size() const { return padding_size_; } rtc::ArrayView payload() const { return rtc::MakeArrayView(data() + payload_offset_, payload_size_); @@ -112,6 +115,11 @@ class RtpPacket { bool HasExtension() const; bool HasExtension(ExtensionType type) const; + // Returns whether there is an associated id for the extension and thus it is + // possible to set the extension. + template + bool IsRegistered() const; + template bool GetExtension(FirstValue, Values...) const; @@ -178,8 +186,10 @@ class RtpPacket { uint16_t SetExtensionLengthMaybeAddZeroPadding(size_t extensions_offset); - uint8_t* WriteAt(size_t offset) { return buffer_.data() + offset; } - void WriteAt(size_t offset, uint8_t byte) { buffer_.data()[offset] = byte; } + uint8_t* WriteAt(size_t offset) { return buffer_.MutableData() + offset; } + void WriteAt(size_t offset, uint8_t byte) { + buffer_.MutableData()[offset] = byte; + } const uint8_t* ReadAt(size_t offset) const { return buffer_.data() + offset; } // Header. @@ -203,6 +213,11 @@ bool RtpPacket::HasExtension() const { return HasExtension(Extension::kId); } +template +bool RtpPacket::IsRegistered() const { + return extensions_.IsRegistered(Extension::kId); +} + template bool RtpPacket::GetExtension(FirstValue first, Values... values) const { auto raw = FindExtension(Extension::kId); diff --git a/modules/rtp_rtcp/source/rtp_packet_history.cc b/modules/rtp_rtcp/source/rtp_packet_history.cc index 58e971ff1d..5089933051 100644 --- a/modules/rtp_rtcp/source/rtp_packet_history.cc +++ b/modules/rtp_rtcp/source/rtp_packet_history.cc @@ -93,7 +93,7 @@ RtpPacketHistory::~RtpPacketHistory() {} void RtpPacketHistory::SetStorePacketsStatus(StorageMode mode, size_t number_to_store) { RTC_DCHECK_LE(number_to_store, kMaxCapacity); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode != StorageMode::kDisabled && mode_ != StorageMode::kDisabled) { RTC_LOG(LS_WARNING) << "Purging packet history in order to re-set status."; } @@ -103,12 +103,12 @@ void RtpPacketHistory::SetStorePacketsStatus(StorageMode mode, } RtpPacketHistory::StorageMode RtpPacketHistory::GetStorageMode() const { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); return mode_; } void RtpPacketHistory::SetRtt(int64_t rtt_ms) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); RTC_DCHECK_GE(rtt_ms, 0); rtt_ms_ = rtt_ms; // If storage is not disabled, packets will be removed after a timeout @@ -122,7 +122,7 @@ void RtpPacketHistory::SetRtt(int64_t rtt_ms) { void RtpPacketHistory::PutRtpPacket(std::unique_ptr packet, absl::optional send_time_ms) { RTC_DCHECK(packet); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); int64_t now_ms = clock_->TimeInMilliseconds(); if (mode_ == StorageMode::kDisabled) { return; @@ -134,7 +134,7 @@ void RtpPacketHistory::PutRtpPacket(std::unique_ptr packet, // Store packet. const uint16_t rtp_seq_no = packet->SequenceNumber(); int packet_index = GetPacketIndex(rtp_seq_no); - if (packet_index >= 0u && + if (packet_index >= 0 && static_cast(packet_index) < packet_history_.size() && packet_history_[packet_index].packet_ != nullptr) { RTC_LOG(LS_WARNING) << "Duplicate packet inserted: " << rtp_seq_no; @@ -170,7 +170,7 @@ void RtpPacketHistory::PutRtpPacket(std::unique_ptr packet, std::unique_ptr RtpPacketHistory::GetPacketAndSetSendTime( uint16_t sequence_number) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode_ == StorageMode::kDisabled) { return nullptr; } @@ -210,7 +210,7 @@ std::unique_ptr RtpPacketHistory::GetPacketAndMarkAsPending( uint16_t sequence_number, rtc::FunctionView(const RtpPacketToSend&)> encapsulate) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode_ == StorageMode::kDisabled) { return nullptr; } @@ -241,7 +241,7 @@ std::unique_ptr RtpPacketHistory::GetPacketAndMarkAsPending( } void RtpPacketHistory::MarkPacketAsSent(uint16_t sequence_number) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode_ == StorageMode::kDisabled) { return; } @@ -263,7 +263,7 @@ void RtpPacketHistory::MarkPacketAsSent(uint16_t sequence_number) { absl::optional RtpPacketHistory::GetPacketState( uint16_t sequence_number) const { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode_ == StorageMode::kDisabled) { return absl::nullopt; } @@ -311,7 +311,7 @@ std::unique_ptr RtpPacketHistory::GetPayloadPaddingPacket() { std::unique_ptr RtpPacketHistory::GetPayloadPaddingPacket( rtc::FunctionView(const RtpPacketToSend&)> encapsulate) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode_ == StorageMode::kDisabled) { return nullptr; } @@ -357,7 +357,7 @@ std::unique_ptr RtpPacketHistory::GetPayloadPaddingPacket( void RtpPacketHistory::CullAcknowledgedPackets( rtc::ArrayView sequence_numbers) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); for (uint16_t sequence_number : sequence_numbers) { int packet_index = GetPacketIndex(sequence_number); if (packet_index < 0 || @@ -369,7 +369,7 @@ void RtpPacketHistory::CullAcknowledgedPackets( } bool RtpPacketHistory::SetPendingTransmission(uint16_t sequence_number) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mode_ == StorageMode::kDisabled) { return false; } @@ -384,7 +384,7 @@ bool RtpPacketHistory::SetPendingTransmission(uint16_t sequence_number) { } void RtpPacketHistory::Clear() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); Reset(); } diff --git a/modules/rtp_rtcp/source/rtp_packet_history.h b/modules/rtp_rtcp/source/rtp_packet_history.h index db25b17a17..4a2bf91bd7 100644 --- a/modules/rtp_rtcp/source/rtp_packet_history.h +++ b/modules/rtp_rtcp/source/rtp_packet_history.h @@ -19,8 +19,7 @@ #include "api/function_view.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -63,6 +62,11 @@ class RtpPacketHistory { static constexpr int kPacketCullingDelayFactor = 3; RtpPacketHistory(Clock* clock, bool enable_padding_prio); + + RtpPacketHistory() = delete; + RtpPacketHistory(const RtpPacketHistory&) = delete; + RtpPacketHistory& operator=(const RtpPacketHistory&) = delete; + ~RtpPacketHistory(); // Set/get storage mode. Note that setting the state will clear the history, @@ -193,7 +197,7 @@ class RtpPacketHistory { Clock* const clock_; const bool enable_padding_prio_; - rtc::CriticalSection lock_; + mutable Mutex lock_; size_t number_to_store_ RTC_GUARDED_BY(lock_); StorageMode mode_ RTC_GUARDED_BY(lock_); int64_t rtt_ms_ RTC_GUARDED_BY(lock_); @@ -211,8 +215,6 @@ class RtpPacketHistory { // Objects from |packet_history_| ordered by "most likely to be useful", used // in GetPayloadPaddingPacket(). PacketPrioritySet padding_priority_ RTC_GUARDED_BY(lock_); - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RtpPacketHistory); }; } // namespace webrtc #endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKET_HISTORY_H_ diff --git a/modules/rtp_rtcp/source/rtp_packet_received.cc b/modules/rtp_rtcp/source/rtp_packet_received.cc index 56aea8eb5e..6b2cc76981 100644 --- a/modules/rtp_rtcp/source/rtp_packet_received.cc +++ b/modules/rtp_rtcp/source/rtp_packet_received.cc @@ -21,8 +21,10 @@ namespace webrtc { RtpPacketReceived::RtpPacketReceived() = default; -RtpPacketReceived::RtpPacketReceived(const ExtensionManager* extensions) - : RtpPacket(extensions) {} +RtpPacketReceived::RtpPacketReceived( + const ExtensionManager* extensions, + webrtc::Timestamp arrival_time /*= webrtc::Timestamp::MinusInfinity()*/) + : RtpPacket(extensions), arrival_time_(arrival_time) {} RtpPacketReceived::RtpPacketReceived(const RtpPacketReceived& packet) = default; RtpPacketReceived::RtpPacketReceived(RtpPacketReceived&& packet) = default; @@ -69,8 +71,6 @@ void RtpPacketReceived::GetHeader(RTPHeader* header) const { &header->extension.videoContentType); header->extension.has_video_timing = GetExtension(&header->extension.video_timing); - header->extension.has_frame_marking = - GetExtension(&header->extension.frame_marking); GetExtension(&header->extension.stream_id); GetExtension(&header->extension.repaired_stream_id); GetExtension(&header->extension.mid); diff --git a/modules/rtp_rtcp/source/rtp_packet_received.h b/modules/rtp_rtcp/source/rtp_packet_received.h index f5d317668c..431d3f52be 100644 --- a/modules/rtp_rtcp/source/rtp_packet_received.h +++ b/modules/rtp_rtcp/source/rtp_packet_received.h @@ -12,19 +12,26 @@ #include -#include +#include +#include "absl/base/attributes.h" #include "api/array_view.h" +#include "api/ref_counted_base.h" #include "api/rtp_headers.h" +#include "api/scoped_refptr.h" +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtp_packet.h" -#include "system_wrappers/include/ntp_time.h" namespace webrtc { // Class to hold rtp packet with metadata for receiver side. +// The metadata is not parsed from the rtp packet, but may be derived from the +// data that is parsed from the rtp packet. class RtpPacketReceived : public RtpPacket { public: RtpPacketReceived(); - explicit RtpPacketReceived(const ExtensionManager* extensions); + explicit RtpPacketReceived( + const ExtensionManager* extensions, + webrtc::Timestamp arrival_time = webrtc::Timestamp::MinusInfinity()); RtpPacketReceived(const RtpPacketReceived& packet); RtpPacketReceived(RtpPacketReceived&& packet); @@ -39,12 +46,17 @@ class RtpPacketReceived : public RtpPacket { // Time in local time base as close as it can to packet arrived on the // network. - int64_t arrival_time_ms() const { return arrival_time_ms_; } - void set_arrival_time_ms(int64_t time) { arrival_time_ms_ = time; } + webrtc::Timestamp arrival_time() const { return arrival_time_; } + void set_arrival_time(webrtc::Timestamp time) { arrival_time_ = time; } - // Estimated from Timestamp() using rtcp Sender Reports. - NtpTime capture_ntp_time() const { return capture_time_; } - void set_capture_ntp_time(NtpTime time) { capture_time_ = time; } + ABSL_DEPRECATED("Use arrival_time() instead") + int64_t arrival_time_ms() const { + return arrival_time_.IsMinusInfinity() ? -1 : arrival_time_.ms(); + } + ABSL_DEPRECATED("Use set_arrival_time() instead") + void set_arrival_time_ms(int64_t time) { + arrival_time_ = webrtc::Timestamp::Millis(time); + } // Flag if packet was recovered via RTX or FEC. bool recovered() const { return recovered_; } @@ -55,21 +67,20 @@ class RtpPacketReceived : public RtpPacket { payload_type_frequency_ = value; } - // Additional data bound to the RTP packet for use in application code, - // outside of WebRTC. - rtc::ArrayView application_data() const { - return application_data_; + // An application can attach arbitrary data to an RTP packet using + // `additional_data`. The additional data does not affect WebRTC processing. + rtc::scoped_refptr additional_data() const { + return additional_data_; } - void set_application_data(rtc::ArrayView data) { - application_data_.assign(data.begin(), data.end()); + void set_additional_data(rtc::scoped_refptr data) { + additional_data_ = std::move(data); } private: - NtpTime capture_time_; - int64_t arrival_time_ms_ = 0; + webrtc::Timestamp arrival_time_ = Timestamp::MinusInfinity(); int payload_type_frequency_ = 0; bool recovered_ = false; - std::vector application_data_; + rtc::scoped_refptr additional_data_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_packet_to_send.h b/modules/rtp_rtcp/source/rtp_packet_to_send.h index 8997bce0d2..12341ef6cf 100644 --- a/modules/rtp_rtcp/source/rtp_packet_to_send.h +++ b/modules/rtp_rtcp/source/rtp_packet_to_send.h @@ -13,10 +13,12 @@ #include #include -#include +#include #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/ref_counted_base.h" +#include "api/scoped_refptr.h" #include "api/video/video_timing.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" @@ -24,6 +26,8 @@ namespace webrtc { // Class to hold rtp packet with metadata for sender side. +// The metadata is not send over the wire, but packet sender may use it to +// create rtp header extensions or other data that is sent over the wire. class RtpPacketToSend : public RtpPacket { public: // RtpPacketToSend::Type is deprecated. Use RtpPacketMediaType directly. @@ -55,23 +59,22 @@ class RtpPacketToSend : public RtpPacket { void set_retransmitted_sequence_number(uint16_t sequence_number) { retransmitted_sequence_number_ = sequence_number; } - absl::optional retransmitted_sequence_number() { + absl::optional retransmitted_sequence_number() const { return retransmitted_sequence_number_; } void set_allow_retransmission(bool allow_retransmission) { allow_retransmission_ = allow_retransmission; } - bool allow_retransmission() { return allow_retransmission_; } + bool allow_retransmission() const { return allow_retransmission_; } - // Additional data bound to the RTP packet for use in application code, - // outside of WebRTC. - rtc::ArrayView application_data() const { - return application_data_; + // An application can attach arbitrary data to an RTP packet using + // `additional_data`. The additional data does not affect WebRTC processing. + rtc::scoped_refptr additional_data() const { + return additional_data_; } - - void set_application_data(rtc::ArrayView data) { - application_data_.assign(data.begin(), data.end()); + void set_additional_data(rtc::scoped_refptr data) { + additional_data_ = std::move(data); } void set_packetization_finish_time_ms(int64_t time) { @@ -108,14 +111,25 @@ class RtpPacketToSend : public RtpPacket { void set_is_key_frame(bool is_key_frame) { is_key_frame_ = is_key_frame; } bool is_key_frame() const { return is_key_frame_; } + // Indicates if packets should be protected by FEC (Forward Error Correction). + void set_fec_protect_packet(bool protect) { fec_protect_packet_ = protect; } + bool fec_protect_packet() const { return fec_protect_packet_; } + + // Indicates if packet is using RED encapsulation, in accordance with + // https://tools.ietf.org/html/rfc2198 + void set_is_red(bool is_red) { is_red_ = is_red; } + bool is_red() const { return is_red_; } + private: int64_t capture_time_ms_ = 0; absl::optional packet_type_; bool allow_retransmission_ = false; absl::optional retransmitted_sequence_number_; - std::vector application_data_; + rtc::scoped_refptr additional_data_; bool is_first_packet_of_frame_ = false; bool is_key_frame_ = false; + bool fec_protect_packet_ = false; + bool is_red_ = false; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_packet_unittest.cc b/modules/rtp_rtcp/source/rtp_packet_unittest.cc index 74736a2ab7..8c5df1a0ad 100644 --- a/modules/rtp_rtcp/source/rtp_packet_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_packet_unittest.cc @@ -249,7 +249,7 @@ TEST(RtpPacketTest, CreateWithTwoByteHeaderExtensionFirst) { packet.SetTimestamp(kTimestamp); packet.SetSsrc(kSsrc); // Set extension that requires two-byte header. - PlayoutDelay playoutDelay = {30, 340}; + VideoPlayoutDelay playoutDelay = {30, 340}; ASSERT_TRUE(packet.SetExtension(playoutDelay)); packet.SetExtension(kTimeOffset); packet.SetExtension(kVoiceActive, kAudioLevel); @@ -273,7 +273,7 @@ TEST(RtpPacketTest, CreateWithTwoByteHeaderExtensionLast) { EXPECT_THAT(kPacketWithTOAndAL, ElementsAreArray(packet.data(), packet.size())); // Set extension that requires two-byte header. - PlayoutDelay playoutDelay = {30, 340}; + VideoPlayoutDelay playoutDelay = {30, 340}; ASSERT_TRUE(packet.SetExtension(playoutDelay)); EXPECT_THAT(kPacketWithTwoByteExtensionIdLast, ElementsAreArray(packet.data(), packet.size())); @@ -354,6 +354,35 @@ TEST(RtpPacketTest, CreateWithMaxSizeHeaderExtension) { EXPECT_EQ(read, kValue); } +TEST(RtpPacketTest, SetsRegisteredExtension) { + RtpPacketToSend::ExtensionManager extensions; + extensions.Register(kTransmissionOffsetExtensionId); + RtpPacketToSend packet(&extensions); + + EXPECT_TRUE(packet.IsRegistered()); + EXPECT_FALSE(packet.HasExtension()); + + // Try to set the extensions. + EXPECT_TRUE(packet.SetExtension(kTimeOffset)); + + EXPECT_TRUE(packet.HasExtension()); + EXPECT_EQ(packet.GetExtension(), kTimeOffset); +} + +TEST(RtpPacketTest, FailsToSetUnregisteredExtension) { + RtpPacketToSend::ExtensionManager extensions; + extensions.Register(kTransmissionOffsetExtensionId); + RtpPacketToSend packet(&extensions); + + EXPECT_FALSE(packet.IsRegistered()); + EXPECT_FALSE(packet.HasExtension()); + + EXPECT_FALSE(packet.SetExtension(42)); + + EXPECT_FALSE(packet.HasExtension()); + EXPECT_EQ(packet.GetExtension(), absl::nullopt); +} + TEST(RtpPacketTest, SetReservedExtensionsAfterPayload) { const size_t kPayloadSize = 4; RtpPacketToSend::ExtensionManager extensions; @@ -475,6 +504,76 @@ TEST(RtpPacketTest, ParseWithExtension) { EXPECT_EQ(0u, packet.padding_size()); } +TEST(RtpPacketTest, ParseHeaderOnly) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0x80, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78}; + // clang-format on + + RtpPacket packet; + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + EXPECT_EQ(packet.PayloadType(), 0x62u); + EXPECT_EQ(packet.SequenceNumber(), 0x3579u); + EXPECT_EQ(packet.Timestamp(), 0x65431278u); + EXPECT_EQ(packet.Ssrc(), 0x12345678u); + + EXPECT_FALSE(packet.has_padding()); + EXPECT_EQ(packet.padding_size(), 0u); + EXPECT_EQ(packet.payload_size(), 0u); +} + +TEST(RtpPacketTest, ParseHeaderOnlyWithPadding) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0xa0, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78}; + // clang-format on + + RtpPacket packet; + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + + EXPECT_TRUE(packet.has_padding()); + EXPECT_EQ(packet.padding_size(), 0u); + EXPECT_EQ(packet.payload_size(), 0u); +} + +TEST(RtpPacketTest, ParseHeaderOnlyWithExtensionAndPadding) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0xb0, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78, + 0xbe, 0xde, 0x00, 0x01, + 0x11, 0x00, 0x00, 0x00}; + // clang-format on + + RtpHeaderExtensionMap extensions; + extensions.Register(1); + RtpPacket packet(&extensions); + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + EXPECT_TRUE(packet.has_padding()); + EXPECT_TRUE(packet.HasExtension()); + EXPECT_EQ(packet.padding_size(), 0u); +} + +TEST(RtpPacketTest, ParsePaddingOnlyPacket) { + // clang-format off + constexpr uint8_t kPaddingHeader[] = { + 0xa0, 0x62, 0x35, 0x79, + 0x65, 0x43, 0x12, 0x78, + 0x12, 0x34, 0x56, 0x78, + 0, 0, 3}; + // clang-format on + + RtpPacket packet; + EXPECT_TRUE(packet.Parse(rtc::CopyOnWriteBuffer(kPaddingHeader))); + EXPECT_TRUE(packet.has_padding()); + EXPECT_EQ(packet.padding_size(), 3u); +} + TEST(RtpPacketTest, GetExtensionWithoutParametersReturnsOptionalValue) { RtpPacket::ExtensionManager extensions; extensions.Register(kTransmissionOffsetExtensionId); diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1.cc b/modules/rtp_rtcp/source/rtp_packetizer_av1.cc index 909b1289ed..4408beed31 100644 --- a/modules/rtp_rtcp/source/rtp_packetizer_av1.cc +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1.cc @@ -88,10 +88,12 @@ int MaxFragmentSize(int remaining_bytes) { RtpPacketizerAv1::RtpPacketizerAv1(rtc::ArrayView payload, RtpPacketizer::PayloadSizeLimits limits, - VideoFrameType frame_type) + VideoFrameType frame_type, + bool is_last_frame_in_picture) : frame_type_(frame_type), obus_(ParseObus(payload)), - packets_(Packetize(obus_, limits)) {} + packets_(Packetize(obus_, limits)), + is_last_frame_in_picture_(is_last_frame_in_picture) {} std::vector RtpPacketizerAv1::ParseObus( rtc::ArrayView payload) { @@ -414,11 +416,8 @@ bool RtpPacketizerAv1::NextPacket(RtpPacketToSend* packet) { kAggregationHeaderSize + next_packet.packet_size); ++packet_index_; - if (packet_index_ == packets_.size()) { - // TODO(danilchap): To support spatial scalability pass and use information - // if this frame is the last in the temporal unit. - packet->SetMarker(true); - } + bool is_last_packet_in_frame = packet_index_ == packets_.size(); + packet->SetMarker(is_last_packet_in_frame && is_last_frame_in_picture_); return true; } diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1.h b/modules/rtp_rtcp/source/rtp_packetizer_av1.h index 79fa6e02f9..520e746eac 100644 --- a/modules/rtp_rtcp/source/rtp_packetizer_av1.h +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1.h @@ -26,7 +26,8 @@ class RtpPacketizerAv1 : public RtpPacketizer { public: RtpPacketizerAv1(rtc::ArrayView payload, PayloadSizeLimits limits, - VideoFrameType frame_type); + VideoFrameType frame_type, + bool is_last_frame_in_picture); ~RtpPacketizerAv1() override = default; size_t NumPackets() const override { return packets_.size() - packet_index_; } @@ -63,6 +64,7 @@ class RtpPacketizerAv1 : public RtpPacketizer { const VideoFrameType frame_type_; const std::vector obus_; const std::vector packets_; + const bool is_last_frame_in_picture_; size_t packet_index_ = 0; }; diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc new file mode 100644 index 0000000000..3d62bcef44 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h" + +#include + +#include +#include + +namespace webrtc { + +Av1Obu::Av1Obu(uint8_t obu_type) : header_(obu_type | kAv1ObuSizePresentBit) {} + +Av1Obu& Av1Obu::WithExtension(uint8_t extension) { + extension_ = extension; + header_ |= kAv1ObuExtensionPresentBit; + return *this; +} +Av1Obu& Av1Obu::WithoutSize() { + header_ &= ~kAv1ObuSizePresentBit; + return *this; +} +Av1Obu& Av1Obu::WithPayload(std::vector payload) { + payload_ = std::move(payload); + return *this; +} + +std::vector BuildAv1Frame(std::initializer_list obus) { + std::vector raw; + for (const Av1Obu& obu : obus) { + raw.push_back(obu.header_); + if (obu.header_ & kAv1ObuExtensionPresentBit) { + raw.push_back(obu.extension_); + } + if (obu.header_ & kAv1ObuSizePresentBit) { + // write size in leb128 format. + size_t payload_size = obu.payload_.size(); + while (payload_size >= 0x80) { + raw.push_back(0x80 | (payload_size & 0x7F)); + payload_size >>= 7; + } + raw.push_back(payload_size); + } + raw.insert(raw.end(), obu.payload_.begin(), obu.payload_.end()); + } + return raw; +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h new file mode 100644 index 0000000000..04a902fe56 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_ +#define MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_ + +#include + +#include +#include +#include + +namespace webrtc { +// All obu types offset by 3 to take correct position in the obu_header. +constexpr uint8_t kAv1ObuTypeSequenceHeader = 1 << 3; +constexpr uint8_t kAv1ObuTypeTemporalDelimiter = 2 << 3; +constexpr uint8_t kAv1ObuTypeFrameHeader = 3 << 3; +constexpr uint8_t kAv1ObuTypeTileGroup = 4 << 3; +constexpr uint8_t kAv1ObuTypeMetadata = 5 << 3; +constexpr uint8_t kAv1ObuTypeFrame = 6 << 3; +constexpr uint8_t kAv1ObuTypeTileList = 8 << 3; +constexpr uint8_t kAv1ObuExtensionPresentBit = 0b0'0000'100; +constexpr uint8_t kAv1ObuSizePresentBit = 0b0'0000'010; +constexpr uint8_t kAv1ObuExtensionS1T1 = 0b001'01'000; + +class Av1Obu { + public: + explicit Av1Obu(uint8_t obu_type); + + Av1Obu& WithExtension(uint8_t extension); + Av1Obu& WithoutSize(); + Av1Obu& WithPayload(std::vector payload); + + private: + friend std::vector BuildAv1Frame(std::initializer_list obus); + uint8_t header_; + uint8_t extension_ = 0; + std::vector payload_; +}; + +std::vector BuildAv1Frame(std::initializer_list obus); + +} // namespace webrtc +#endif // MODULES_RTP_RTCP_SOURCE_RTP_PACKETIZER_AV1_TEST_HELPER_H_ diff --git a/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc b/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc index 0529e98129..2151a59295 100644 --- a/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_packetizer_av1_unittest.cc @@ -21,6 +21,7 @@ #include "api/scoped_refptr.h" #include "api/video/encoded_image.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_packetizer_av1_test_helper.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_av1.h" #include "test/gmock.h" #include "test/gtest.h" @@ -35,17 +36,6 @@ using ::testing::Le; using ::testing::SizeIs; constexpr uint8_t kNewCodedVideoSequenceBit = 0b00'00'1000; -// All obu types offset by 3 to take correct position in the obu_header. -constexpr uint8_t kObuTypeSequenceHeader = 1 << 3; -constexpr uint8_t kObuTypeTemporalDelimiter = 2 << 3; -constexpr uint8_t kObuTypeFrameHeader = 3 << 3; -constexpr uint8_t kObuTypeTileGroup = 4 << 3; -constexpr uint8_t kObuTypeMetadata = 5 << 3; -constexpr uint8_t kObuTypeFrame = 6 << 3; -constexpr uint8_t kObuTypeTileList = 8 << 3; -constexpr uint8_t kObuExtensionPresentBit = 0b0'0000'100; -constexpr uint8_t kObuSizePresentBit = 0b0'0000'010; -constexpr uint8_t kObuExtensionS1T1 = 0b001'01'000; // Wrapper around rtp_packet to make it look like container of payload bytes. struct RtpPayload { @@ -88,9 +78,11 @@ class Av1Frame { std::vector Packetize( rtc::ArrayView payload, RtpPacketizer::PayloadSizeLimits limits, - VideoFrameType frame_type = VideoFrameType::kVideoFrameDelta) { + VideoFrameType frame_type = VideoFrameType::kVideoFrameDelta, + bool is_last_frame_in_picture = true) { // Run code under test. - RtpPacketizerAv1 packetizer(payload, limits, frame_type); + RtpPacketizerAv1 packetizer(payload, limits, frame_type, + is_last_frame_in_picture); // Convert result into structure that is easier to run expectation against. std::vector result(packetizer.NumPackets()); for (RtpPayload& rtp_payload : result) { @@ -107,135 +99,90 @@ Av1Frame ReassembleFrame(rtc::ArrayView rtp_payloads) { return Av1Frame(VideoRtpDepacketizerAv1().AssembleFrame(payloads)); } -class Obu { - public: - explicit Obu(uint8_t obu_type) : header_(obu_type | kObuSizePresentBit) { - EXPECT_EQ(obu_type & 0b0'1111'000, obu_type); - } - - Obu& WithExtension(uint8_t extension) { - extension_ = extension; - header_ |= kObuExtensionPresentBit; - return *this; - } - Obu& WithoutSize() { - header_ &= ~kObuSizePresentBit; - return *this; - } - Obu& WithPayload(std::vector payload) { - payload_ = std::move(payload); - return *this; - } - - private: - friend std::vector BuildAv1Frame(std::initializer_list obus); - uint8_t header_; - uint8_t extension_ = 0; - std::vector payload_; -}; - -std::vector BuildAv1Frame(std::initializer_list obus) { - std::vector raw; - for (const Obu& obu : obus) { - raw.push_back(obu.header_); - if (obu.header_ & kObuExtensionPresentBit) { - raw.push_back(obu.extension_); - } - if (obu.header_ & kObuSizePresentBit) { - // write size in leb128 format. - size_t payload_size = obu.payload_.size(); - while (payload_size >= 0x80) { - raw.push_back(0x80 | (payload_size & 0x7F)); - payload_size >>= 7; - } - raw.push_back(payload_size); - } - raw.insert(raw.end(), obu.payload_.begin(), obu.payload_.end()); - } - return raw; -} - TEST(RtpPacketizerAv1Test, PacketizeOneObuWithoutSizeAndExtension) { - auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithoutSize().WithPayload({1, 2, 3, 4, 5, 6, 7})}); + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithoutSize() + .WithPayload({1, 2, 3, 4, 5, 6, 7})}); EXPECT_THAT(Packetize(kFrame, {}), ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame, 1, 2, 3, 4, 5, 6, 7))); + kAv1ObuTypeFrame, 1, 2, 3, 4, 5, 6, 7))); } TEST(RtpPacketizerAv1Test, PacketizeOneObuWithoutSizeWithExtension) { - auto kFrame = BuildAv1Frame({Obu(kObuTypeFrame) + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) .WithoutSize() - .WithExtension(kObuExtensionS1T1) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({2, 3, 4, 5, 6, 7})}); - EXPECT_THAT(Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame | kObuExtensionPresentBit, - kObuExtensionS1T1, 2, 3, 4, 5, 6, 7))); + EXPECT_THAT( + Packetize(kFrame, {}), + ElementsAre(ElementsAre(0b00'01'0000, // aggregation header + kAv1ObuTypeFrame | kAv1ObuExtensionPresentBit, + kAv1ObuExtensionS1T1, 2, 3, 4, 5, 6, 7))); } TEST(RtpPacketizerAv1Test, RemovesObuSizeFieldWithoutExtension) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17})}); + {Av1Obu(kAv1ObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17})}); EXPECT_THAT( Packetize(kFrame, {}), ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame, 11, 12, 13, 14, 15, 16, 17))); + kAv1ObuTypeFrame, 11, 12, 13, 14, 15, 16, 17))); } TEST(RtpPacketizerAv1Test, RemovesObuSizeFieldWithExtension) { - auto kFrame = BuildAv1Frame({Obu(kObuTypeFrame) - .WithExtension(kObuExtensionS1T1) + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({1, 2, 3, 4, 5, 6, 7})}); - EXPECT_THAT(Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'01'0000, // aggregation header - kObuTypeFrame | kObuExtensionPresentBit, - kObuExtensionS1T1, 1, 2, 3, 4, 5, 6, 7))); + EXPECT_THAT( + Packetize(kFrame, {}), + ElementsAre(ElementsAre(0b00'01'0000, // aggregation header + kAv1ObuTypeFrame | kAv1ObuExtensionPresentBit, + kAv1ObuExtensionS1T1, 1, 2, 3, 4, 5, 6, 7))); } TEST(RtpPacketizerAv1Test, OmitsSizeForLastObuWhenThreeObusFitsIntoThePacket) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), - Obu(kObuTypeMetadata).WithPayload({11, 12, 13, 14}), - Obu(kObuTypeFrame).WithPayload({21, 22, 23, 24, 25, 26})}); - EXPECT_THAT( - Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'11'0000, // aggregation header - 7, kObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // - 5, kObuTypeMetadata, 11, 12, 13, 14, // - kObuTypeFrame, 21, 22, 23, 24, 25, 26))); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), + Av1Obu(kAv1ObuTypeMetadata).WithPayload({11, 12, 13, 14}), + Av1Obu(kAv1ObuTypeFrame).WithPayload({21, 22, 23, 24, 25, 26})}); + EXPECT_THAT(Packetize(kFrame, {}), + ElementsAre(ElementsAre( + 0b00'11'0000, // aggregation header + 7, kAv1ObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // + 5, kAv1ObuTypeMetadata, 11, 12, 13, 14, // + kAv1ObuTypeFrame, 21, 22, 23, 24, 25, 26))); } TEST(RtpPacketizerAv1Test, UseSizeForAllObusWhenFourObusFitsIntoThePacket) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), - Obu(kObuTypeMetadata).WithPayload({11, 12, 13, 14}), - Obu(kObuTypeFrameHeader).WithPayload({21, 22, 23}), - Obu(kObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); - EXPECT_THAT( - Packetize(kFrame, {}), - ElementsAre(ElementsAre(0b00'00'0000, // aggregation header - 7, kObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // - 5, kObuTypeMetadata, 11, 12, 13, 14, // - 4, kObuTypeFrameHeader, 21, 22, 23, // - 7, kObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6}), + Av1Obu(kAv1ObuTypeMetadata).WithPayload({11, 12, 13, 14}), + Av1Obu(kAv1ObuTypeFrameHeader).WithPayload({21, 22, 23}), + Av1Obu(kAv1ObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); + EXPECT_THAT(Packetize(kFrame, {}), + ElementsAre(ElementsAre( + 0b00'00'0000, // aggregation header + 7, kAv1ObuTypeSequenceHeader, 1, 2, 3, 4, 5, 6, // + 5, kAv1ObuTypeMetadata, 11, 12, 13, 14, // + 4, kAv1ObuTypeFrameHeader, 21, 22, 23, // + 7, kAv1ObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); } TEST(RtpPacketizerAv1Test, DiscardsTemporalDelimiterAndTileListObu) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeTemporalDelimiter), Obu(kObuTypeMetadata), - Obu(kObuTypeTileList).WithPayload({1, 2, 3, 4, 5, 6}), - Obu(kObuTypeFrameHeader).WithPayload({21, 22, 23}), - Obu(kObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); + {Av1Obu(kAv1ObuTypeTemporalDelimiter), Av1Obu(kAv1ObuTypeMetadata), + Av1Obu(kAv1ObuTypeTileList).WithPayload({1, 2, 3, 4, 5, 6}), + Av1Obu(kAv1ObuTypeFrameHeader).WithPayload({21, 22, 23}), + Av1Obu(kAv1ObuTypeTileGroup).WithPayload({31, 32, 33, 34, 35, 36})}); EXPECT_THAT( Packetize(kFrame, {}), ElementsAre(ElementsAre(0b00'11'0000, // aggregation header 1, - kObuTypeMetadata, // - 4, kObuTypeFrameHeader, 21, 22, + kAv1ObuTypeMetadata, // + 4, kAv1ObuTypeFrameHeader, 21, 22, 23, // - kObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); + kAv1ObuTypeTileGroup, 31, 32, 33, 34, 35, 36))); } TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) { @@ -244,17 +191,17 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) { const uint8_t kExpectPayload1[6] = { 0b01'10'0000, // aggregation_header 3, - kObuTypeFrameHeader | kObuExtensionPresentBit, - kObuExtensionS1T1, + kAv1ObuTypeFrameHeader | kAv1ObuExtensionPresentBit, + kAv1ObuExtensionS1T1, 21, // - kObuTypeTileGroup | kObuExtensionPresentBit}; + kAv1ObuTypeTileGroup | kAv1ObuExtensionPresentBit}; const uint8_t kExpectPayload2[6] = {0b10'01'0000, // aggregation_header - kObuExtensionS1T1, 11, 12, 13, 14}; - auto kFrame = BuildAv1Frame({Obu(kObuTypeFrameHeader) - .WithExtension(kObuExtensionS1T1) + kAv1ObuExtensionS1T1, 11, 12, 13, 14}; + auto kFrame = BuildAv1Frame({Av1Obu(kAv1ObuTypeFrameHeader) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({21}), - Obu(kObuTypeTileGroup) - .WithExtension(kObuExtensionS1T1) + Av1Obu(kAv1ObuTypeTileGroup) + .WithExtension(kAv1ObuExtensionS1T1) .WithPayload({11, 12, 13, 14})}); RtpPacketizer::PayloadSizeLimits limits; @@ -267,7 +214,7 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPacketForceSplitObuHeader) { TEST(RtpPacketizerAv1Test, SetsNbitAtTheFirstPacketOfAKeyFrameWithSequenceHeader) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 6; auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameKey); @@ -278,8 +225,8 @@ TEST(RtpPacketizerAv1Test, TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfAKeyFrameWithoutSequenceHeader) { - auto kFrame = - BuildAv1Frame({Obu(kObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7})}); + auto kFrame = BuildAv1Frame( + {Av1Obu(kAv1ObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 6; auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameKey); @@ -291,7 +238,7 @@ TEST(RtpPacketizerAv1Test, TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfADeltaFrame) { // Even when that delta frame starts with a (redundant) sequence header. auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({1, 2, 3, 4, 5, 6, 7})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 6; auto packets = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta); @@ -306,8 +253,9 @@ TEST(RtpPacketizerAv1Test, DoesntSetNbitAtThePacketsOfADeltaFrame) { // RtpDepacketizer always inserts obu_size fields in the output, use frame where // each obu has obu_size fields for more streight forward validation. TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPackets) { - auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); + auto kFrame = + BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 8; @@ -320,7 +268,7 @@ TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPackets) { TEST(RtpPacketizerAv1Test, SplitSingleObuIntoManyPackets) { auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload(std::vector(1200, 27))}); + {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector(1200, 27))}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 100; @@ -332,12 +280,40 @@ TEST(RtpPacketizerAv1Test, SplitSingleObuIntoManyPackets) { EXPECT_THAT(ReassembleFrame(payloads), ElementsAreArray(kFrame)); } +TEST(RtpPacketizerAv1Test, SetMarkerBitForLastPacketInEndOfPictureFrame) { + auto kFrame = BuildAv1Frame( + {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector(200, 27))}); + + RtpPacketizer::PayloadSizeLimits limits; + limits.max_payload_len = 100; + auto payloads = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta, + /*is_last_frame_in_picture=*/true); + ASSERT_THAT(payloads, SizeIs(3u)); + EXPECT_FALSE(payloads[0].rtp_packet.Marker()); + EXPECT_FALSE(payloads[1].rtp_packet.Marker()); + EXPECT_TRUE(payloads[2].rtp_packet.Marker()); +} + +TEST(RtpPacketizerAv1Test, DoesntSetMarkerBitForPacketsNotInEndOfPictureFrame) { + auto kFrame = BuildAv1Frame( + {Av1Obu(kAv1ObuTypeFrame).WithPayload(std::vector(200, 27))}); + + RtpPacketizer::PayloadSizeLimits limits; + limits.max_payload_len = 100; + auto payloads = Packetize(kFrame, limits, VideoFrameType::kVideoFrameDelta, + /*is_last_frame_in_picture=*/false); + ASSERT_THAT(payloads, SizeIs(3u)); + EXPECT_FALSE(payloads[0].rtp_packet.Marker()); + EXPECT_FALSE(payloads[1].rtp_packet.Marker()); + EXPECT_FALSE(payloads[2].rtp_packet.Marker()); +} + TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPackets) { // 2nd OBU is too large to fit into one packet, so its head would be in the // same packet as the 1st OBU. auto kFrame = BuildAv1Frame( - {Obu(kObuTypeSequenceHeader).WithPayload({11, 12}), - Obu(kObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7, 8, 9})}); + {Av1Obu(kAv1ObuTypeSequenceHeader).WithPayload({11, 12}), + Av1Obu(kAv1ObuTypeFrame).WithPayload({1, 2, 3, 4, 5, 6, 7, 8, 9})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 8; @@ -350,8 +326,9 @@ TEST(RtpPacketizerAv1Test, SplitTwoObusIntoTwoPackets) { TEST(RtpPacketizerAv1Test, SplitSingleObuIntoTwoPacketsBecauseOfSinglePacketLimit) { - auto kFrame = BuildAv1Frame( - {Obu(kObuTypeFrame).WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); + auto kFrame = + BuildAv1Frame({Av1Obu(kAv1ObuTypeFrame) + .WithPayload({11, 12, 13, 14, 15, 16, 17, 18, 19})}); RtpPacketizer::PayloadSizeLimits limits; limits.max_payload_len = 10; limits.single_packet_reduction_len = 8; diff --git a/modules/rtp_rtcp/source/rtp_rtcp_config.h b/modules/rtp_rtcp/source/rtp_rtcp_config.h index 6863c4c353..66caadd578 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_config.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_config.h @@ -11,13 +11,15 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_ #define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_ +#include "api/units/time_delta.h" + // Configuration file for RTP utilities (RTPSender, RTPReceiver ...) namespace webrtc { -enum { kDefaultMaxReorderingThreshold = 50 }; // In sequence numbers. -enum { kRtcpMaxNackFields = 253 }; +constexpr int kDefaultMaxReorderingThreshold = 5; // In sequence numbers. +constexpr int kRtcpMaxNackFields = 253; -enum { RTCP_SEND_BEFORE_KEY_FRAME_MS = 100 }; -enum { RTCP_MAX_REPORT_BLOCKS = 31 }; // RFC 3550 page 37 +constexpr TimeDelta RTCP_SEND_BEFORE_KEY_FRAME = TimeDelta::Millis(100); +constexpr int RTCP_MAX_REPORT_BLOCKS = 31; // RFC 3550 page 37 } // namespace webrtc #endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_CONFIG_H_ diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc index 0bd37ebdd7..3f985e213a 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc @@ -21,9 +21,12 @@ #include "api/transport/field_trial_based_config.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" +#include "modules/rtp_rtcp/source/rtcp_sender.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "system_wrappers/include/ntp_time.h" #ifdef _WIN32 // Disable warning C4355: 'this' : used in base member initializer list. @@ -39,7 +42,7 @@ const int64_t kDefaultExpectedRetransmissionTimeMs = 125; } // namespace ModuleRtpRtcpImpl::RtpSenderContext::RtpSenderContext( - const RtpRtcp::Configuration& config) + const RtpRtcpInterface::Configuration& config) : packet_history(config.clock, config.enable_rtx_padding_prioritization), packet_sender(config, &packet_history), non_paced_sender(&packet_sender), @@ -48,8 +51,17 @@ ModuleRtpRtcpImpl::RtpSenderContext::RtpSenderContext( &packet_history, config.paced_sender ? config.paced_sender : &non_paced_sender) {} +std::unique_ptr RtpRtcp::DEPRECATED_Create( + const Configuration& configuration) { + RTC_DCHECK(configuration.clock); + RTC_LOG(LS_ERROR) + << "*********** USING WebRTC INTERNAL IMPLEMENTATION DETAILS ***********"; + return std::make_unique(configuration); +} + ModuleRtpRtcpImpl::ModuleRtpRtcpImpl(const Configuration& configuration) - : rtcp_sender_(configuration), + : rtcp_sender_( + RTCPSender::Configuration::FromRtpRtcpConfiguration(configuration)), rtcp_receiver_(configuration, this), clock_(configuration.clock), last_bitrate_process_time_(clock_->TimeInMilliseconds()), @@ -114,20 +126,18 @@ void ModuleRtpRtcpImpl::Process() { // processed RTT for at least |kRtpRtcpRttProcessTimeMs| milliseconds. // Note that LastReceivedReportBlockMs() grabs a lock, so check // |process_rtt| first. - if (process_rtt && + if (process_rtt && rtt_stats_ != nullptr && rtcp_receiver_.LastReceivedReportBlockMs() > last_rtt_process_time_) { - std::vector receive_blocks; - rtcp_receiver_.StatisticsReceived(&receive_blocks); - int64_t max_rtt = 0; - for (std::vector::iterator it = receive_blocks.begin(); - it != receive_blocks.end(); ++it) { - int64_t rtt = 0; - rtcp_receiver_.RTT(it->sender_ssrc, &rtt, NULL, NULL, NULL); - max_rtt = (rtt > max_rtt) ? rtt : max_rtt; + int64_t max_rtt_ms = 0; + for (const auto& block : rtcp_receiver_.GetLatestReportBlockData()) { + if (block.last_rtt_ms() > max_rtt_ms) { + max_rtt_ms = block.last_rtt_ms(); + } } // Report the rtt. - if (rtt_stats_ && max_rtt != 0) - rtt_stats_->OnRttUpdate(max_rtt); + if (max_rtt_ms > 0) { + rtt_stats_->OnRttUpdate(max_rtt_ms); + } } // Verify receiver reports are delivered and the reported sequence number @@ -184,7 +194,7 @@ void ModuleRtpRtcpImpl::Process() { if (rtcp_sender_.TimeToSendRTCPReport()) rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); - if (TMMBR() && rtcp_receiver_.UpdateTmmbrTimers()) { + if (rtcp_sender_.TMMBR() && rtcp_receiver_.UpdateTmmbrTimers()) { rtcp_receiver_.NotifyTmmbrUpdated(); } } @@ -250,7 +260,6 @@ void ModuleRtpRtcpImpl::SetSequenceNumber(const uint16_t seq_num) { void ModuleRtpRtcpImpl::SetRtpState(const RtpState& rtp_state) { rtp_sender_->packet_generator.SetRtpState(rtp_state); - rtp_sender_->packet_sender.SetMediaHasBeenSent(rtp_state.media_has_been_sent); rtcp_sender_.SetTimestampOffset(rtp_state.start_timestamp); } @@ -260,7 +269,6 @@ void ModuleRtpRtcpImpl::SetRtxState(const RtpState& rtp_state) { RtpState ModuleRtpRtcpImpl::GetRtpState() const { RtpState state = rtp_sender_->packet_generator.GetRtpState(); - state.media_has_been_sent = rtp_sender_->packet_sender.MediaHasBeenSent(); return state; } @@ -306,8 +314,19 @@ RTCPSender::FeedbackState ModuleRtpRtcpImpl::GetFeedbackState() { } state.receiver = &rtcp_receiver_; - LastReceivedNTP(&state.last_rr_ntp_secs, &state.last_rr_ntp_frac, - &state.remote_sr); + uint32_t received_ntp_secs = 0; + uint32_t received_ntp_frac = 0; + state.remote_sr = 0; + if (rtcp_receiver_.NTP(&received_ntp_secs, &received_ntp_frac, + /*rtcp_arrival_time_secs=*/&state.last_rr_ntp_secs, + /*rtcp_arrival_time_frac=*/&state.last_rr_ntp_frac, + /*rtcp_timestamp=*/nullptr, + /*remote_sender_packet_count=*/nullptr, + /*remote_sender_octet_count=*/nullptr, + /*remote_sender_reports_count=*/nullptr)) { + state.remote_sr = ((received_ntp_secs & 0x0000ffff) << 16) + + ((received_ntp_frac & 0xffff0000) >> 16); + } state.last_xr_rtis = rtcp_receiver_.ConsumeReceivedXrReferenceTimeInfo(); @@ -320,9 +339,7 @@ RTCPSender::FeedbackState ModuleRtpRtcpImpl::GetFeedbackState() { int32_t ModuleRtpRtcpImpl::SetSendingStatus(const bool sending) { if (rtcp_sender_.Sending() != sending) { // Sends RTCP BYE when going from true to false - if (rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending) != 0) { - RTC_LOG(LS_WARNING) << "Failed to send RTCP BYE"; - } + rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending); } return 0; } @@ -364,7 +381,16 @@ bool ModuleRtpRtcpImpl::OnSendingRtpFrame(uint32_t timestamp, if (!Sending()) return false; - rtcp_sender_.SetLastRtpTime(timestamp, capture_time_ms, payload_type); + // TODO(bugs.webrtc.org/12873): Migrate this method and it's users to use + // optional Timestamps. + absl::optional capture_time; + if (capture_time_ms > 0) { + capture_time = Timestamp::Millis(capture_time_ms); + } + absl::optional payload_type_optional; + if (payload_type >= 0) + payload_type_optional = payload_type; + rtcp_sender_.SetLastRtpTime(timestamp, capture_time, payload_type_optional); // Make sure an RTCP report isn't queued behind a key frame. if (rtcp_sender_.TimeToSendRTCPReport(force_sender_report)) rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); @@ -383,6 +409,17 @@ bool ModuleRtpRtcpImpl::TrySendPacket(RtpPacketToSend* packet, return true; } +void ModuleRtpRtcpImpl::SetFecProtectionParams(const FecProtectionParams&, + const FecProtectionParams&) { + // Deferred FEC not supported in deprecated RTP module. +} + +std::vector> +ModuleRtpRtcpImpl::FetchFecPackets() { + // Deferred FEC not supported in deprecated RTP module. + return {}; +} + void ModuleRtpRtcpImpl::OnPacketsAcknowledged( rtc::ArrayView sequence_numbers) { RTC_DCHECK(rtp_sender_); @@ -450,19 +487,6 @@ int32_t ModuleRtpRtcpImpl::SetCNAME(const char* c_name) { return rtcp_sender_.SetCNAME(c_name); } -int32_t ModuleRtpRtcpImpl::AddMixedCNAME(uint32_t ssrc, const char* c_name) { - return rtcp_sender_.AddMixedCNAME(ssrc, c_name); -} - -int32_t ModuleRtpRtcpImpl::RemoveMixedCNAME(const uint32_t ssrc) { - return rtcp_sender_.RemoveMixedCNAME(ssrc); -} - -int32_t ModuleRtpRtcpImpl::RemoteCNAME(const uint32_t remote_ssrc, - char c_name[RTCP_CNAME_SIZE]) const { - return rtcp_receiver_.CNAME(remote_ssrc, c_name); -} - int32_t ModuleRtpRtcpImpl::RemoteNTP(uint32_t* received_ntpsecs, uint32_t* received_ntpfrac, uint32_t* rtcp_arrival_time_secs, @@ -470,7 +494,10 @@ int32_t ModuleRtpRtcpImpl::RemoteNTP(uint32_t* received_ntpsecs, uint32_t* rtcp_timestamp) const { return rtcp_receiver_.NTP(received_ntpsecs, received_ntpfrac, rtcp_arrival_time_secs, rtcp_arrival_time_frac, - rtcp_timestamp) + rtcp_timestamp, + /*remote_sender_packet_count=*/nullptr, + /*remote_sender_octet_count=*/nullptr, + /*remote_sender_reports_count=*/nullptr) ? 0 : -1; } @@ -510,47 +537,6 @@ int32_t ModuleRtpRtcpImpl::SendRTCP(RTCPPacketType packet_type) { return rtcp_sender_.SendRTCP(GetFeedbackState(), packet_type); } -int32_t ModuleRtpRtcpImpl::SetRTCPApplicationSpecificData( - const uint8_t sub_type, - const uint32_t name, - const uint8_t* data, - const uint16_t length) { - return rtcp_sender_.SetApplicationSpecificData(sub_type, name, data, length); -} - -void ModuleRtpRtcpImpl::SetRtcpXrRrtrStatus(bool enable) { - rtcp_receiver_.SetRtcpXrRrtrStatus(enable); - rtcp_sender_.SendRtcpXrReceiverReferenceTime(enable); -} - -bool ModuleRtpRtcpImpl::RtcpXrRrtrStatus() const { - return rtcp_sender_.RtcpXrReceiverReferenceTime(); -} - -// TODO(asapersson): Replace this method with the one below. -int32_t ModuleRtpRtcpImpl::DataCountersRTP(size_t* bytes_sent, - uint32_t* packets_sent) const { - StreamDataCounters rtp_stats; - StreamDataCounters rtx_stats; - rtp_sender_->packet_sender.GetDataCounters(&rtp_stats, &rtx_stats); - - if (bytes_sent) { - // TODO(http://crbug.com/webrtc/10525): Bytes sent should only include - // payload bytes, not header and padding bytes. - *bytes_sent = rtp_stats.transmitted.payload_bytes + - rtp_stats.transmitted.padding_bytes + - rtp_stats.transmitted.header_bytes + - rtx_stats.transmitted.payload_bytes + - rtx_stats.transmitted.padding_bytes + - rtx_stats.transmitted.header_bytes; - } - if (packets_sent) { - *packets_sent = - rtp_stats.transmitted.packets + rtx_stats.transmitted.packets; - } - return 0; -} - void ModuleRtpRtcpImpl::GetSendStreamDataCounters( StreamDataCounters* rtp_counters, StreamDataCounters* rtx_counters) const { @@ -558,16 +544,31 @@ void ModuleRtpRtcpImpl::GetSendStreamDataCounters( } // Received RTCP report. -int32_t ModuleRtpRtcpImpl::RemoteRTCPStat( - std::vector* receive_blocks) const { - return rtcp_receiver_.StatisticsReceived(receive_blocks); -} - std::vector ModuleRtpRtcpImpl::GetLatestReportBlockData() const { return rtcp_receiver_.GetLatestReportBlockData(); } +absl::optional +ModuleRtpRtcpImpl::GetSenderReportStats() const { + SenderReportStats stats; + uint32_t remote_timestamp_secs; + uint32_t remote_timestamp_frac; + uint32_t arrival_timestamp_secs; + uint32_t arrival_timestamp_frac; + if (rtcp_receiver_.NTP(&remote_timestamp_secs, &remote_timestamp_frac, + &arrival_timestamp_secs, &arrival_timestamp_frac, + /*rtcp_timestamp=*/nullptr, &stats.packets_sent, + &stats.bytes_sent, &stats.reports_count)) { + stats.last_remote_timestamp.Set(remote_timestamp_secs, + remote_timestamp_frac); + stats.last_arrival_timestamp.Set(arrival_timestamp_secs, + arrival_timestamp_frac); + return stats; + } + return absl::nullopt; +} + // (REMB) Receiver Estimated Max Bitrate. void ModuleRtpRtcpImpl::SetRemb(int64_t bitrate_bps, std::vector ssrcs) { @@ -582,12 +583,6 @@ void ModuleRtpRtcpImpl::SetExtmapAllowMixed(bool extmap_allow_mixed) { rtp_sender_->packet_generator.SetExtmapAllowMixed(extmap_allow_mixed); } -int32_t ModuleRtpRtcpImpl::RegisterSendRtpHeaderExtension( - const RTPExtensionType type, - const uint8_t id) { - return rtp_sender_->packet_generator.RegisterRtpHeaderExtension(type, id); -} - void ModuleRtpRtcpImpl::RegisterRtpHeaderExtension(absl::string_view uri, int id) { bool registered = @@ -604,15 +599,6 @@ void ModuleRtpRtcpImpl::DeregisterSendRtpHeaderExtension( rtp_sender_->packet_generator.DeregisterRtpHeaderExtension(uri); } -// (TMMBR) Temporary Max Media Bit Rate. -bool ModuleRtpRtcpImpl::TMMBR() const { - return rtcp_sender_.TMMBR(); -} - -void ModuleRtpRtcpImpl::SetTMMBRStatus(const bool enable) { - rtcp_sender_.SetTMMBRStatus(enable); -} - void ModuleRtpRtcpImpl::SetTmmbn(std::vector bounding_set) { rtcp_sender_.SetTmmbn(std::move(bounding_set)); } @@ -709,18 +695,9 @@ void ModuleRtpRtcpImpl::SetRemoteSSRC(const uint32_t ssrc) { rtcp_receiver_.SetRemoteSSRC(ssrc); } -// TODO(nisse): Delete video_rate amd fec_rate arguments. -void ModuleRtpRtcpImpl::BitrateSent(uint32_t* total_rate, - uint32_t* video_rate, - uint32_t* fec_rate, - uint32_t* nack_rate) const { - RtpSendRates send_rates = rtp_sender_->packet_sender.GetSendRates(); - *total_rate = send_rates.Sum().bps(); - if (video_rate) - *video_rate = 0; - if (fec_rate) - *fec_rate = 0; - *nack_rate = send_rates[RtpPacketMediaType::kRetransmission].bps(); +void ModuleRtpRtcpImpl::SetLocalSsrc(uint32_t local_ssrc) { + rtcp_receiver_.set_local_media_ssrc(local_ssrc); + rtcp_sender_.SetSsrc(local_ssrc); } RtpSendRates ModuleRtpRtcpImpl::GetSendRates() const { @@ -768,26 +745,9 @@ void ModuleRtpRtcpImpl::OnReceivedRtcpReportBlocks( } } -bool ModuleRtpRtcpImpl::LastReceivedNTP( - uint32_t* rtcp_arrival_time_secs, // When we got the last report. - uint32_t* rtcp_arrival_time_frac, - uint32_t* remote_sr) const { - // Remote SR: NTP inside the last received (mid 16 bits from sec and frac). - uint32_t ntp_secs = 0; - uint32_t ntp_frac = 0; - - if (!rtcp_receiver_.NTP(&ntp_secs, &ntp_frac, rtcp_arrival_time_secs, - rtcp_arrival_time_frac, NULL)) { - return false; - } - *remote_sr = - ((ntp_secs & 0x0000ffff) << 16) + ((ntp_frac & 0xffff0000) >> 16); - return true; -} - void ModuleRtpRtcpImpl::set_rtt_ms(int64_t rtt_ms) { { - rtc::CritScope cs(&critical_section_rtt_); + MutexLock lock(&mutex_rtt_); rtt_ms_ = rtt_ms; } if (rtp_sender_) { @@ -796,7 +756,7 @@ void ModuleRtpRtcpImpl::set_rtt_ms(int64_t rtt_ms) { } int64_t ModuleRtpRtcpImpl::rtt_ms() const { - rtc::CritScope cs(&critical_section_rtt_); + MutexLock lock(&mutex_rtt_); return rtt_ms_; } @@ -813,15 +773,4 @@ const RTPSender* ModuleRtpRtcpImpl::RtpSender() const { return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr; } -DataRate ModuleRtpRtcpImpl::SendRate() const { - RTC_DCHECK(rtp_sender_); - return rtp_sender_->packet_sender.GetSendRates().Sum(); -} - -DataRate ModuleRtpRtcpImpl::NackOverheadRate() const { - RTC_DCHECK(rtp_sender_); - return rtp_sender_->packet_sender - .GetSendRates()[RtpPacketMediaType::kRetransmission]; -} - } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h index 2d07060b52..b0e0b41c48 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h @@ -26,15 +26,15 @@ #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" // RTCPPacketType +#include "modules/rtp_rtcp/source/deprecated/deprecated_rtp_sender_egress.h" #include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h" #include "modules/rtp_rtcp/source/rtcp_receiver.h" #include "modules/rtp_rtcp/source/rtcp_sender.h" #include "modules/rtp_rtcp/source/rtp_packet_history.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/rtp_rtcp/source/rtp_sender.h" -#include "modules/rtp_rtcp/source/rtp_sender_egress.h" -#include "rtc_base/critical_section.h" #include "rtc_base/gtest_prod_util.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -45,7 +45,8 @@ struct RTPVideoHeader; // DEPRECATED. class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { public: - explicit ModuleRtpRtcpImpl(const RtpRtcp::Configuration& configuration); + explicit ModuleRtpRtcpImpl( + const RtpRtcpInterface::Configuration& configuration); ~ModuleRtpRtcpImpl() override; // Returns the number of milliseconds until the module want a worker thread to @@ -62,6 +63,7 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { size_t incoming_packet_length) override; void SetRemoteSSRC(uint32_t ssrc) override; + void SetLocalSsrc(uint32_t ssrc) override; // Sender part. void RegisterSendPayloadFrequency(int payload_type, @@ -72,8 +74,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { void SetExtmapAllowMixed(bool extmap_allow_mixed) override; // Register RTP header extension. - int32_t RegisterSendRtpHeaderExtension(RTPExtensionType type, - uint8_t id) override; void RegisterRtpHeaderExtension(absl::string_view uri, int id) override; int32_t DeregisterSendRtpHeaderExtension(RTPExtensionType type) override; void DeregisterSendRtpHeaderExtension(absl::string_view uri) override; @@ -138,6 +138,11 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { bool TrySendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) override; + void SetFecProtectionParams(const FecProtectionParams& delta_params, + const FecProtectionParams& key_params) override; + + std::vector> FetchFecPackets() override; + void OnPacketsAcknowledged( rtc::ArrayView sequence_numbers) override; @@ -160,10 +165,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { // Set RTCP CName. int32_t SetCNAME(const char* c_name) override; - // Get remote CName. - int32_t RemoteCNAME(uint32_t remote_ssrc, - char c_name[RTCP_CNAME_SIZE]) const override; - // Get remote NTP. int32_t RemoteNTP(uint32_t* received_ntp_secs, uint32_t* received_ntp_frac, @@ -171,10 +172,6 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { uint32_t* rtcp_arrival_time_frac, uint32_t* rtcp_timestamp) const override; - int32_t AddMixedCNAME(uint32_t ssrc, const char* c_name) override; - - int32_t RemoveMixedCNAME(uint32_t ssrc) override; - // Get RoundTripTime. int32_t RTT(uint32_t remote_ssrc, int64_t* rtt, @@ -188,32 +185,21 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { // Normal SR and RR are triggered via the process function. int32_t SendRTCP(RTCPPacketType rtcpPacketType) override; - // Statistics of the amount of data sent and received. - int32_t DataCountersRTP(size_t* bytes_sent, - uint32_t* packets_sent) const override; - void GetSendStreamDataCounters( StreamDataCounters* rtp_counters, StreamDataCounters* rtx_counters) const override; - // Get received RTCP report, report block. - int32_t RemoteRTCPStat( - std::vector* receive_blocks) const override; // A snapshot of the most recent Report Block with additional data of // interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats. // Within this list, the ReportBlockData::RTCPReportBlock::source_ssrc(), // which is the SSRC of the corresponding outbound RTP stream, is unique. std::vector GetLatestReportBlockData() const override; + absl::optional GetSenderReportStats() const override; // (REMB) Receiver Estimated Max Bitrate. void SetRemb(int64_t bitrate_bps, std::vector ssrcs) override; void UnsetRemb() override; - // (TMMBR) Temporary Max Media Bit Rate. - bool TMMBR() const override; - - void SetTMMBRStatus(bool enable) override; - void SetTmmbn(std::vector bounding_set) override; size_t MaxRtpPacketSize() const override; @@ -232,37 +218,15 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { // requests. void SetStorePacketsStatus(bool enable, uint16_t number_to_store) override; - bool StorePackets() const override; - void SendCombinedRtcpPacket( std::vector> rtcp_packets) override; - // (APP) Application specific data. - int32_t SetRTCPApplicationSpecificData(uint8_t sub_type, - uint32_t name, - const uint8_t* data, - uint16_t length) override; - - // (XR) Receiver reference time report. - void SetRtcpXrRrtrStatus(bool enable) override; - - bool RtcpXrRrtrStatus() const override; - // Video part. int32_t SendLossNotification(uint16_t last_decoded_seq_num, uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) override; - bool LastReceivedNTP(uint32_t* NTPsecs, - uint32_t* NTPfrac, - uint32_t* remote_sr) const; - - void BitrateSent(uint32_t* total_rate, - uint32_t* video_rate, - uint32_t* fec_rate, - uint32_t* nackRate) const override; - RtpSendRates GetSendRates() const override; void OnReceivedNack( @@ -293,25 +257,25 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { RTCPReceiver* rtcp_receiver() { return &rtcp_receiver_; } const RTCPReceiver* rtcp_receiver() const { return &rtcp_receiver_; } - Clock* clock() const { return clock_; } + void SetMediaHasBeenSent(bool media_has_been_sent) { + rtp_sender_->packet_sender.SetMediaHasBeenSent(media_has_been_sent); + } - // TODO(sprang): Remove when usage is gone. - DataRate SendRate() const; - DataRate NackOverheadRate() const; + Clock* clock() const { return clock_; } private: FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, Rtt); FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, RttForReceiverOnly); struct RtpSenderContext { - explicit RtpSenderContext(const RtpRtcp::Configuration& config); + explicit RtpSenderContext(const RtpRtcpInterface::Configuration& config); // Storage of packets, for retransmissions and padding, if applicable. RtpPacketHistory packet_history; // Handles final time timestamping/stats/etc and handover to Transport. - RtpSenderEgress packet_sender; + DEPRECATED_RtpSenderEgress packet_sender; // If no paced sender configured, this class will be used to pass packets // from |packet_generator_| to |packet_sender_|. - RtpSenderEgress::NonPacedPacketSender non_paced_sender; + DEPRECATED_RtpSenderEgress::NonPacedPacketSender non_paced_sender; // Handles creation of RTP packets to be sent. RTPSender packet_generator; }; @@ -321,6 +285,12 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { bool TimeToSendFullNackList(int64_t now) const; + // Returns true if the module is configured to store packets. + bool StorePackets() const; + + // Returns current Receiver Reference Time Report (RTTR) status. + bool RtcpXrRrtrStatus() const; + std::unique_ptr rtp_sender_; RTCPSender rtcp_sender_; @@ -342,8 +312,8 @@ class ModuleRtpRtcpImpl : public RtpRtcp, public RTCPReceiver::ModuleRtpRtcp { RtcpRttStats* const rtt_stats_; // The processed RTT from RtcpRttStats. - rtc::CriticalSection critical_section_rtt_; - int64_t rtt_ms_; + mutable Mutex mutex_rtt_; + int64_t rtt_ms_ RTC_GUARDED_BY(mutex_rtt_); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc index c8f10ac481..7fae1e3bd0 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2.cc @@ -19,11 +19,18 @@ #include #include +#include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/transport/field_trial_based_config.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/ntp_time.h" #ifdef _WIN32 // Disable warning C4355: 'this' : used in base member initializer list. @@ -32,42 +39,58 @@ namespace webrtc { namespace { -const int64_t kRtpRtcpMaxIdleTimeProcessMs = 5; -const int64_t kRtpRtcpRttProcessTimeMs = 1000; -const int64_t kRtpRtcpBitrateProcessTimeMs = 10; const int64_t kDefaultExpectedRetransmissionTimeMs = 125; + +constexpr TimeDelta kRttUpdateInterval = TimeDelta::Millis(1000); + +RTCPSender::Configuration AddRtcpSendEvaluationCallback( + RTCPSender::Configuration config, + std::function send_evaluation_callback) { + config.schedule_next_rtcp_send_evaluation_function = + std::move(send_evaluation_callback); + return config; +} + +int DelayMillisForDuration(TimeDelta duration) { + // TimeDelta::ms() rounds downwards sometimes which leads to too little time + // slept. Account for this, unless |duration| is exactly representable in + // millisecs. + return (duration.us() + rtc::kNumMillisecsPerSec - 1) / + rtc::kNumMicrosecsPerMillisec; +} } // namespace ModuleRtpRtcpImpl2::RtpSenderContext::RtpSenderContext( - const RtpRtcp::Configuration& config) + const RtpRtcpInterface::Configuration& config) : packet_history(config.clock, config.enable_rtx_padding_prioritization), packet_sender(config, &packet_history), - non_paced_sender(&packet_sender), + non_paced_sender(&packet_sender, this), packet_generator( config, &packet_history, config.paced_sender ? config.paced_sender : &non_paced_sender) {} - -std::unique_ptr RtpRtcp::Create(const Configuration& configuration) { - RTC_DCHECK(configuration.clock); - return std::make_unique(configuration); +void ModuleRtpRtcpImpl2::RtpSenderContext::AssignSequenceNumber( + RtpPacketToSend* packet) { + packet_generator.AssignSequenceNumber(packet); } ModuleRtpRtcpImpl2::ModuleRtpRtcpImpl2(const Configuration& configuration) - : rtcp_sender_(configuration), + : worker_queue_(TaskQueueBase::Current()), + rtcp_sender_(AddRtcpSendEvaluationCallback( + RTCPSender::Configuration::FromRtpRtcpConfiguration(configuration), + [this](TimeDelta duration) { + ScheduleRtcpSendEvaluation(duration); + })), rtcp_receiver_(configuration, this), clock_(configuration.clock), - last_bitrate_process_time_(clock_->TimeInMilliseconds()), - last_rtt_process_time_(clock_->TimeInMilliseconds()), - next_process_time_(clock_->TimeInMilliseconds() + - kRtpRtcpMaxIdleTimeProcessMs), packet_overhead_(28), // IPV4 UDP. nack_last_time_sent_full_ms_(0), nack_last_seq_number_sent_(0), remote_bitrate_(configuration.remote_bitrate_estimator), rtt_stats_(configuration.rtt_stats), rtt_ms_(0) { - process_thread_checker_.Detach(); + RTC_DCHECK(worker_queue_); + packet_sequence_checker_.Detach(); if (!configuration.receiver_only) { rtp_sender_ = std::make_unique(configuration); // Make sure rtcp sender use same timestamp offset as rtp sender. @@ -80,123 +103,27 @@ ModuleRtpRtcpImpl2::ModuleRtpRtcpImpl2(const Configuration& configuration) // webrtc::VideoSendStream::Config::Rtp::kDefaultMaxPacketSize. const size_t kTcpOverIpv4HeaderSize = 40; SetMaxRtpPacketSize(IP_PACKET_SIZE - kTcpOverIpv4HeaderSize); -} -ModuleRtpRtcpImpl2::~ModuleRtpRtcpImpl2() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + if (rtt_stats_) { + rtt_update_task_ = RepeatingTaskHandle::DelayedStart( + worker_queue_, kRttUpdateInterval, [this]() { + PeriodicUpdate(); + return kRttUpdateInterval; + }); + } } -// Returns the number of milliseconds until the module want a worker thread -// to call Process. -int64_t ModuleRtpRtcpImpl2::TimeUntilNextProcess() { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - return std::max(0, - next_process_time_ - clock_->TimeInMilliseconds()); +ModuleRtpRtcpImpl2::~ModuleRtpRtcpImpl2() { + RTC_DCHECK_RUN_ON(worker_queue_); + rtt_update_task_.Stop(); } -// Process any pending tasks such as timeouts (non time critical events). -void ModuleRtpRtcpImpl2::Process() { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - const int64_t now = clock_->TimeInMilliseconds(); - // TODO(bugs.webrtc.org/11581): Figure out why we need to call Process() 200 - // times a second. - next_process_time_ = now + kRtpRtcpMaxIdleTimeProcessMs; - - if (rtp_sender_) { - if (now >= last_bitrate_process_time_ + kRtpRtcpBitrateProcessTimeMs) { - rtp_sender_->packet_sender.ProcessBitrateAndNotifyObservers(); - last_bitrate_process_time_ = now; - // TODO(bugs.webrtc.org/11581): Is this a bug? At the top of the function, - // next_process_time_ is incremented by 5ms, here we effectively do a - // std::min() of (now + 5ms, now + 10ms). Seems like this is a no-op? - next_process_time_ = - std::min(next_process_time_, now + kRtpRtcpBitrateProcessTimeMs); - } - } - - // TODO(bugs.webrtc.org/11581): We update the RTT once a second, whereas other - // things that run in this method are updated much more frequently. Move the - // RTT checking over to the worker thread, which matches better with where the - // stats are maintained. - bool process_rtt = now >= last_rtt_process_time_ + kRtpRtcpRttProcessTimeMs; - if (rtcp_sender_.Sending()) { - // Process RTT if we have received a report block and we haven't - // processed RTT for at least |kRtpRtcpRttProcessTimeMs| milliseconds. - // Note that LastReceivedReportBlockMs() grabs a lock, so check - // |process_rtt| first. - if (process_rtt && - rtcp_receiver_.LastReceivedReportBlockMs() > last_rtt_process_time_) { - std::vector receive_blocks; - rtcp_receiver_.StatisticsReceived(&receive_blocks); - int64_t max_rtt = 0; - for (std::vector::iterator it = receive_blocks.begin(); - it != receive_blocks.end(); ++it) { - int64_t rtt = 0; - rtcp_receiver_.RTT(it->sender_ssrc, &rtt, NULL, NULL, NULL); - max_rtt = (rtt > max_rtt) ? rtt : max_rtt; - } - // Report the rtt. - if (rtt_stats_ && max_rtt != 0) - rtt_stats_->OnRttUpdate(max_rtt); - } - - // Verify receiver reports are delivered and the reported sequence number - // is increasing. - // TODO(bugs.webrtc.org/11581): The timeout value needs to be checked every - // few seconds (see internals of RtcpRrTimeout). Here, we may be polling it - // a couple of hundred times a second, which isn't great since it grabs a - // lock. Note also that LastReceivedReportBlockMs() (called above) and - // RtcpRrTimeout() both grab the same lock and check the same timer, so - // it should be possible to consolidate that work somehow. - if (rtcp_receiver_.RtcpRrTimeout()) { - RTC_LOG_F(LS_WARNING) << "Timeout: No RTCP RR received."; - } else if (rtcp_receiver_.RtcpRrSequenceNumberTimeout()) { - RTC_LOG_F(LS_WARNING) << "Timeout: No increase in RTCP RR extended " - "highest sequence number."; - } - - if (remote_bitrate_ && rtcp_sender_.TMMBR()) { - unsigned int target_bitrate = 0; - std::vector ssrcs; - if (remote_bitrate_->LatestEstimate(&ssrcs, &target_bitrate)) { - if (!ssrcs.empty()) { - target_bitrate = target_bitrate / ssrcs.size(); - } - rtcp_sender_.SetTargetBitrate(target_bitrate); - } - } - } else { - // Report rtt from receiver. - if (process_rtt) { - int64_t rtt_ms; - if (rtt_stats_ && rtcp_receiver_.GetAndResetXrRrRtt(&rtt_ms)) { - rtt_stats_->OnRttUpdate(rtt_ms); - } - } - } - - // Get processed rtt. - if (process_rtt) { - last_rtt_process_time_ = now; - // TODO(bugs.webrtc.org/11581): Is this a bug? At the top of the function, - // next_process_time_ is incremented by 5ms, here we effectively do a - // std::min() of (now + 5ms, now + 1000ms). Seems like this is a no-op? - next_process_time_ = std::min( - next_process_time_, last_rtt_process_time_ + kRtpRtcpRttProcessTimeMs); - if (rtt_stats_) { - // Make sure we have a valid RTT before setting. - int64_t last_rtt = rtt_stats_->LastProcessedRtt(); - if (last_rtt >= 0) - set_rtt_ms(last_rtt); - } - } - - if (rtcp_sender_.TimeToSendRTCPReport()) - rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); - - if (TMMBR() && rtcp_receiver_.UpdateTmmbrTimers()) { - rtcp_receiver_.NotifyTmmbrUpdated(); - } +// static +std::unique_ptr ModuleRtpRtcpImpl2::Create( + const Configuration& configuration) { + RTC_DCHECK(configuration.clock); + RTC_DCHECK(TaskQueueBase::Current()); + return std::make_unique(configuration); } void ModuleRtpRtcpImpl2::SetRtxSendStatus(int mode) { @@ -226,6 +153,7 @@ absl::optional ModuleRtpRtcpImpl2::FlexfecSsrc() const { void ModuleRtpRtcpImpl2::IncomingRtcpPacket(const uint8_t* rtcp_packet, const size_t length) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); rtcp_receiver_.IncomingPacket(rtcp_packet, length); } @@ -260,7 +188,6 @@ void ModuleRtpRtcpImpl2::SetSequenceNumber(const uint16_t seq_num) { void ModuleRtpRtcpImpl2::SetRtpState(const RtpState& rtp_state) { rtp_sender_->packet_generator.SetRtpState(rtp_state); - rtp_sender_->packet_sender.SetMediaHasBeenSent(rtp_state.media_has_been_sent); rtcp_sender_.SetTimestampOffset(rtp_state.start_timestamp); } @@ -270,7 +197,6 @@ void ModuleRtpRtcpImpl2::SetRtxState(const RtpState& rtp_state) { RtpState ModuleRtpRtcpImpl2::GetRtpState() const { RtpState state = rtp_sender_->packet_generator.GetRtpState(); - state.media_has_been_sent = rtp_sender_->packet_sender.MediaHasBeenSent(); return state; } @@ -278,6 +204,12 @@ RtpState ModuleRtpRtcpImpl2::GetRtxState() const { return rtp_sender_->packet_generator.GetRtxRtpState(); } +uint32_t ModuleRtpRtcpImpl2::local_media_ssrc() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK_EQ(rtcp_receiver_.local_media_ssrc(), rtcp_sender_.SSRC()); + return rtcp_receiver_.local_media_ssrc(); +} + void ModuleRtpRtcpImpl2::SetRid(const std::string& rid) { if (rtp_sender_) { rtp_sender_->packet_generator.SetRid(rid); @@ -300,6 +232,10 @@ void ModuleRtpRtcpImpl2::SetCsrcs(const std::vector& csrcs) { // TODO(pbos): Handle media and RTX streams separately (separate RTCP // feedbacks). RTCPSender::FeedbackState ModuleRtpRtcpImpl2::GetFeedbackState() { + // TODO(bugs.webrtc.org/11581): Called by potentially multiple threads. + // Mostly "Send*" methods. Make sure it's only called on the + // construction thread. + RTCPSender::FeedbackState state; // This is called also when receiver_only is true. Hence below // checks that rtp_sender_ exists. @@ -316,8 +252,19 @@ RTCPSender::FeedbackState ModuleRtpRtcpImpl2::GetFeedbackState() { } state.receiver = &rtcp_receiver_; - LastReceivedNTP(&state.last_rr_ntp_secs, &state.last_rr_ntp_frac, - &state.remote_sr); + uint32_t received_ntp_secs = 0; + uint32_t received_ntp_frac = 0; + state.remote_sr = 0; + if (rtcp_receiver_.NTP(&received_ntp_secs, &received_ntp_frac, + /*rtcp_arrival_time_secs=*/&state.last_rr_ntp_secs, + /*rtcp_arrival_time_frac=*/&state.last_rr_ntp_frac, + /*rtcp_timestamp=*/nullptr, + /*remote_sender_packet_count=*/nullptr, + /*remote_sender_octet_count=*/nullptr, + /*remote_sender_reports_count=*/nullptr)) { + state.remote_sr = ((received_ntp_secs & 0x0000ffff) << 16) + + ((received_ntp_frac & 0xffff0000) >> 16); + } state.last_xr_rtis = rtcp_receiver_.ConsumeReceivedXrReferenceTimeInfo(); @@ -330,9 +277,7 @@ RTCPSender::FeedbackState ModuleRtpRtcpImpl2::GetFeedbackState() { int32_t ModuleRtpRtcpImpl2::SetSendingStatus(const bool sending) { if (rtcp_sender_.Sending() != sending) { // Sends RTCP BYE when going from true to false - if (rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending) != 0) { - RTC_LOG(LS_WARNING) << "Failed to send RTCP BYE"; - } + rtcp_sender_.SetSendingStatus(GetFeedbackState(), sending); } return 0; } @@ -374,7 +319,16 @@ bool ModuleRtpRtcpImpl2::OnSendingRtpFrame(uint32_t timestamp, if (!Sending()) return false; - rtcp_sender_.SetLastRtpTime(timestamp, capture_time_ms, payload_type); + // TODO(bugs.webrtc.org/12873): Migrate this method and it's users to use + // optional Timestamps. + absl::optional capture_time; + if (capture_time_ms > 0) { + capture_time = Timestamp::Millis(capture_time_ms); + } + absl::optional payload_type_optional; + if (payload_type >= 0) + payload_type_optional = payload_type; + rtcp_sender_.SetLastRtpTime(timestamp, capture_time, payload_type_optional); // Make sure an RTCP report isn't queued behind a key frame. if (rtcp_sender_.TimeToSendRTCPReport(force_sender_report)) rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); @@ -393,6 +347,31 @@ bool ModuleRtpRtcpImpl2::TrySendPacket(RtpPacketToSend* packet, return true; } +void ModuleRtpRtcpImpl2::SetFecProtectionParams( + const FecProtectionParams& delta_params, + const FecProtectionParams& key_params) { + RTC_DCHECK(rtp_sender_); + rtp_sender_->packet_sender.SetFecProtectionParameters(delta_params, + key_params); +} + +std::vector> +ModuleRtpRtcpImpl2::FetchFecPackets() { + RTC_DCHECK(rtp_sender_); + auto fec_packets = rtp_sender_->packet_sender.FetchFecPackets(); + if (!fec_packets.empty()) { + // Don't assign sequence numbers for FlexFEC packets. + const bool generate_sequence_numbers = + !rtp_sender_->packet_sender.FlexFecSsrc().has_value(); + if (generate_sequence_numbers) { + for (auto& fec_packet : fec_packets) { + rtp_sender_->packet_generator.AssignSequenceNumber(fec_packet.get()); + } + } + } + return fec_packets; +} + void ModuleRtpRtcpImpl2::OnPacketsAcknowledged( rtc::ArrayView sequence_numbers) { RTC_DCHECK(rtp_sender_); @@ -460,19 +439,6 @@ int32_t ModuleRtpRtcpImpl2::SetCNAME(const char* c_name) { return rtcp_sender_.SetCNAME(c_name); } -int32_t ModuleRtpRtcpImpl2::AddMixedCNAME(uint32_t ssrc, const char* c_name) { - return rtcp_sender_.AddMixedCNAME(ssrc, c_name); -} - -int32_t ModuleRtpRtcpImpl2::RemoveMixedCNAME(const uint32_t ssrc) { - return rtcp_sender_.RemoveMixedCNAME(ssrc); -} - -int32_t ModuleRtpRtcpImpl2::RemoteCNAME(const uint32_t remote_ssrc, - char c_name[RTCP_CNAME_SIZE]) const { - return rtcp_receiver_.CNAME(remote_ssrc, c_name); -} - int32_t ModuleRtpRtcpImpl2::RemoteNTP(uint32_t* received_ntpsecs, uint32_t* received_ntpfrac, uint32_t* rtcp_arrival_time_secs, @@ -480,12 +446,18 @@ int32_t ModuleRtpRtcpImpl2::RemoteNTP(uint32_t* received_ntpsecs, uint32_t* rtcp_timestamp) const { return rtcp_receiver_.NTP(received_ntpsecs, received_ntpfrac, rtcp_arrival_time_secs, rtcp_arrival_time_frac, - rtcp_timestamp) + rtcp_timestamp, + /*remote_sender_packet_count=*/nullptr, + /*remote_sender_octet_count=*/nullptr, + /*remote_sender_reports_count=*/nullptr) ? 0 : -1; } -// Get RoundTripTime. +// TODO(tommi): Check if |avg_rtt_ms|, |min_rtt_ms|, |max_rtt_ms| params are +// actually used in practice (some callers ask for it but don't use it). It +// could be that only |rtt| is needed and if so, then the fast path could be to +// just call rtt_ms() and rely on the calculation being done periodically. int32_t ModuleRtpRtcpImpl2::RTT(const uint32_t remote_ssrc, int64_t* rtt, int64_t* avg_rtt, @@ -504,7 +476,7 @@ int64_t ModuleRtpRtcpImpl2::ExpectedRetransmissionTimeMs() const { if (expected_retransmission_time_ms > 0) { return expected_retransmission_time_ms; } - // No rtt available (|kRtpRtcpRttProcessTimeMs| not yet passed?), so try to + // No rtt available (|kRttUpdateInterval| not yet passed?), so try to // poll avg_rtt_ms directly from rtcp receiver. if (rtcp_receiver_.RTT(rtcp_receiver_.RemoteSSRC(), nullptr, &expected_retransmission_time_ms, nullptr, @@ -520,47 +492,6 @@ int32_t ModuleRtpRtcpImpl2::SendRTCP(RTCPPacketType packet_type) { return rtcp_sender_.SendRTCP(GetFeedbackState(), packet_type); } -int32_t ModuleRtpRtcpImpl2::SetRTCPApplicationSpecificData( - const uint8_t sub_type, - const uint32_t name, - const uint8_t* data, - const uint16_t length) { - return rtcp_sender_.SetApplicationSpecificData(sub_type, name, data, length); -} - -void ModuleRtpRtcpImpl2::SetRtcpXrRrtrStatus(bool enable) { - rtcp_receiver_.SetRtcpXrRrtrStatus(enable); - rtcp_sender_.SendRtcpXrReceiverReferenceTime(enable); -} - -bool ModuleRtpRtcpImpl2::RtcpXrRrtrStatus() const { - return rtcp_sender_.RtcpXrReceiverReferenceTime(); -} - -// TODO(asapersson): Replace this method with the one below. -int32_t ModuleRtpRtcpImpl2::DataCountersRTP(size_t* bytes_sent, - uint32_t* packets_sent) const { - StreamDataCounters rtp_stats; - StreamDataCounters rtx_stats; - rtp_sender_->packet_sender.GetDataCounters(&rtp_stats, &rtx_stats); - - if (bytes_sent) { - // TODO(http://crbug.com/webrtc/10525): Bytes sent should only include - // payload bytes, not header and padding bytes. - *bytes_sent = rtp_stats.transmitted.payload_bytes + - rtp_stats.transmitted.padding_bytes + - rtp_stats.transmitted.header_bytes + - rtx_stats.transmitted.payload_bytes + - rtx_stats.transmitted.padding_bytes + - rtx_stats.transmitted.header_bytes; - } - if (packets_sent) { - *packets_sent = - rtp_stats.transmitted.packets + rtx_stats.transmitted.packets; - } - return 0; -} - void ModuleRtpRtcpImpl2::GetSendStreamDataCounters( StreamDataCounters* rtp_counters, StreamDataCounters* rtx_counters) const { @@ -568,16 +499,31 @@ void ModuleRtpRtcpImpl2::GetSendStreamDataCounters( } // Received RTCP report. -int32_t ModuleRtpRtcpImpl2::RemoteRTCPStat( - std::vector* receive_blocks) const { - return rtcp_receiver_.StatisticsReceived(receive_blocks); -} - std::vector ModuleRtpRtcpImpl2::GetLatestReportBlockData() const { return rtcp_receiver_.GetLatestReportBlockData(); } +absl::optional +ModuleRtpRtcpImpl2::GetSenderReportStats() const { + SenderReportStats stats; + uint32_t remote_timestamp_secs; + uint32_t remote_timestamp_frac; + uint32_t arrival_timestamp_secs; + uint32_t arrival_timestamp_frac; + if (rtcp_receiver_.NTP(&remote_timestamp_secs, &remote_timestamp_frac, + &arrival_timestamp_secs, &arrival_timestamp_frac, + /*rtcp_timestamp=*/nullptr, &stats.packets_sent, + &stats.bytes_sent, &stats.reports_count)) { + stats.last_remote_timestamp.Set(remote_timestamp_secs, + remote_timestamp_frac); + stats.last_arrival_timestamp.Set(arrival_timestamp_secs, + arrival_timestamp_frac); + return stats; + } + return absl::nullopt; +} + // (REMB) Receiver Estimated Max Bitrate. void ModuleRtpRtcpImpl2::SetRemb(int64_t bitrate_bps, std::vector ssrcs) { @@ -592,12 +538,6 @@ void ModuleRtpRtcpImpl2::SetExtmapAllowMixed(bool extmap_allow_mixed) { rtp_sender_->packet_generator.SetExtmapAllowMixed(extmap_allow_mixed); } -int32_t ModuleRtpRtcpImpl2::RegisterSendRtpHeaderExtension( - const RTPExtensionType type, - const uint8_t id) { - return rtp_sender_->packet_generator.RegisterRtpHeaderExtension(type, id); -} - void ModuleRtpRtcpImpl2::RegisterRtpHeaderExtension(absl::string_view uri, int id) { bool registered = @@ -614,15 +554,6 @@ void ModuleRtpRtcpImpl2::DeregisterSendRtpHeaderExtension( rtp_sender_->packet_generator.DeregisterRtpHeaderExtension(uri); } -// (TMMBR) Temporary Max Media Bit Rate. -bool ModuleRtpRtcpImpl2::TMMBR() const { - return rtcp_sender_.TMMBR(); -} - -void ModuleRtpRtcpImpl2::SetTMMBRStatus(const bool enable) { - rtcp_sender_.SetTMMBRStatus(enable); -} - void ModuleRtpRtcpImpl2::SetTmmbn(std::vector bounding_set) { rtcp_sender_.SetTmmbn(std::move(bounding_set)); } @@ -719,21 +650,15 @@ void ModuleRtpRtcpImpl2::SetRemoteSSRC(const uint32_t ssrc) { rtcp_receiver_.SetRemoteSSRC(ssrc); } -// TODO(nisse): Delete video_rate amd fec_rate arguments. -void ModuleRtpRtcpImpl2::BitrateSent(uint32_t* total_rate, - uint32_t* video_rate, - uint32_t* fec_rate, - uint32_t* nack_rate) const { - RtpSendRates send_rates = rtp_sender_->packet_sender.GetSendRates(); - *total_rate = send_rates.Sum().bps(); - if (video_rate) - *video_rate = 0; - if (fec_rate) - *fec_rate = 0; - *nack_rate = send_rates[RtpPacketMediaType::kRetransmission].bps(); +void ModuleRtpRtcpImpl2::SetLocalSsrc(uint32_t local_ssrc) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtcp_receiver_.set_local_media_ssrc(local_ssrc); + rtcp_sender_.SetSsrc(local_ssrc); } RtpSendRates ModuleRtpRtcpImpl2::GetSendRates() const { + // Typically called on the `rtp_transport_queue_` owned by an + // RtpTransportControllerSendInterface instance. return rtp_sender_->packet_sender.GetSendRates(); } @@ -778,26 +703,10 @@ void ModuleRtpRtcpImpl2::OnReceivedRtcpReportBlocks( } } -bool ModuleRtpRtcpImpl2::LastReceivedNTP( - uint32_t* rtcp_arrival_time_secs, // When we got the last report. - uint32_t* rtcp_arrival_time_frac, - uint32_t* remote_sr) const { - // Remote SR: NTP inside the last received (mid 16 bits from sec and frac). - uint32_t ntp_secs = 0; - uint32_t ntp_frac = 0; - - if (!rtcp_receiver_.NTP(&ntp_secs, &ntp_frac, rtcp_arrival_time_secs, - rtcp_arrival_time_frac, NULL)) { - return false; - } - *remote_sr = - ((ntp_secs & 0x0000ffff) << 16) + ((ntp_frac & 0xffff0000) >> 16); - return true; -} - void ModuleRtpRtcpImpl2::set_rtt_ms(int64_t rtt_ms) { + RTC_DCHECK_RUN_ON(worker_queue_); { - rtc::CritScope cs(&critical_section_rtt_); + MutexLock lock(&mutex_rtt_); rtt_ms_ = rtt_ms; } if (rtp_sender_) { @@ -806,7 +715,7 @@ void ModuleRtpRtcpImpl2::set_rtt_ms(int64_t rtt_ms) { } int64_t ModuleRtpRtcpImpl2::rtt_ms() const { - rtc::CritScope cs(&critical_section_rtt_); + MutexLock lock(&mutex_rtt_); return rtt_ms_; } @@ -823,15 +732,72 @@ const RTPSender* ModuleRtpRtcpImpl2::RtpSender() const { return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr; } -DataRate ModuleRtpRtcpImpl2::SendRate() const { - RTC_DCHECK(rtp_sender_); - return rtp_sender_->packet_sender.GetSendRates().Sum(); +void ModuleRtpRtcpImpl2::PeriodicUpdate() { + RTC_DCHECK_RUN_ON(worker_queue_); + + Timestamp check_since = clock_->CurrentTime() - kRttUpdateInterval; + absl::optional rtt = + rtcp_receiver_.OnPeriodicRttUpdate(check_since, rtcp_sender_.Sending()); + if (rtt) { + rtt_stats_->OnRttUpdate(rtt->ms()); + set_rtt_ms(rtt->ms()); + } } -DataRate ModuleRtpRtcpImpl2::NackOverheadRate() const { - RTC_DCHECK(rtp_sender_); - return rtp_sender_->packet_sender - .GetSendRates()[RtpPacketMediaType::kRetransmission]; +// RTC_RUN_ON(worker_queue_); +void ModuleRtpRtcpImpl2::MaybeSendRtcp() { + if (rtcp_sender_.TimeToSendRTCPReport()) + rtcp_sender_.SendRTCP(GetFeedbackState(), kRtcpReport); +} + +// TODO(bugs.webrtc.org/12889): Consider removing this function when the issue +// is resolved. +// RTC_RUN_ON(worker_queue_); +void ModuleRtpRtcpImpl2::MaybeSendRtcpAtOrAfterTimestamp( + Timestamp execution_time) { + Timestamp now = clock_->CurrentTime(); + if (now >= execution_time) { + MaybeSendRtcp(); + return; + } + + RTC_DLOG(LS_WARNING) + << "BUGBUG: Task queue scheduled delayed call too early."; + + ScheduleMaybeSendRtcpAtOrAfterTimestamp(execution_time, execution_time - now); +} + +void ModuleRtpRtcpImpl2::ScheduleRtcpSendEvaluation(TimeDelta duration) { + // We end up here under various sequences including the worker queue, and + // the RTCPSender lock is held. + // We're assuming that the fact that RTCPSender executes under other sequences + // than the worker queue on which it's created on implies that external + // synchronization is present and removes this activity before destruction. + if (duration.IsZero()) { + worker_queue_->PostTask(ToQueuedTask(task_safety_, [this] { + RTC_DCHECK_RUN_ON(worker_queue_); + MaybeSendRtcp(); + })); + } else { + Timestamp execution_time = clock_->CurrentTime() + duration; + ScheduleMaybeSendRtcpAtOrAfterTimestamp(execution_time, duration); + } +} + +void ModuleRtpRtcpImpl2::ScheduleMaybeSendRtcpAtOrAfterTimestamp( + Timestamp execution_time, + TimeDelta duration) { + // We end up here under various sequences including the worker queue, and + // the RTCPSender lock is held. + // See note in ScheduleRtcpSendEvaluation about why |worker_queue_| can be + // accessed. + worker_queue_->PostDelayedTask( + ToQueuedTask(task_safety_, + [this, execution_time] { + RTC_DCHECK_RUN_ON(worker_queue_); + MaybeSendRtcpAtOrAfterTimestamp(execution_time); + }), + DelayMillisForDuration(duration)); } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2.h b/modules/rtp_rtcp/source/rtp_rtcp_impl2.h index 67409c059f..0ad495593d 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2.h +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2.h @@ -21,10 +21,12 @@ #include "absl/types/optional.h" #include "api/rtp_headers.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_base.h" +#include "api/units/time_delta.h" #include "api/video/video_bitrate_allocation.h" #include "modules/include/module_fec_types.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" // RTCPPacketType #include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h" #include "modules/rtp_rtcp/source/rtcp_receiver.h" @@ -33,9 +35,13 @@ #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/rtp_rtcp/source/rtp_sender.h" #include "modules/rtp_rtcp/source/rtp_sender_egress.h" -#include "rtc_base/critical_section.h" #include "rtc_base/gtest_prod_util.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -43,18 +49,19 @@ class Clock; struct PacedPacketInfo; struct RTPVideoHeader; -class ModuleRtpRtcpImpl2 final : public RtpRtcp, +class ModuleRtpRtcpImpl2 final : public RtpRtcpInterface, public RTCPReceiver::ModuleRtpRtcp { public: - explicit ModuleRtpRtcpImpl2(const RtpRtcp::Configuration& configuration); + explicit ModuleRtpRtcpImpl2( + const RtpRtcpInterface::Configuration& configuration); ~ModuleRtpRtcpImpl2() override; - // Returns the number of milliseconds until the module want a worker thread to - // call Process. - int64_t TimeUntilNextProcess() override; - - // Process any pending tasks such as timeouts. - void Process() override; + // This method is provided to easy with migrating away from the + // RtpRtcp::Create factory method. Since this is an internal implementation + // detail though, creating an instance of ModuleRtpRtcpImpl2 directly should + // be fine. + static std::unique_ptr Create( + const Configuration& configuration); // Receiver part. @@ -64,6 +71,8 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, void SetRemoteSSRC(uint32_t ssrc) override; + void SetLocalSsrc(uint32_t local_ssrc) override; + // Sender part. void RegisterSendPayloadFrequency(int payload_type, int payload_frequency) override; @@ -72,9 +81,6 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, void SetExtmapAllowMixed(bool extmap_allow_mixed) override; - // Register RTP header extension. - int32_t RegisterSendRtpHeaderExtension(RTPExtensionType type, - uint8_t id) override; void RegisterRtpHeaderExtension(absl::string_view uri, int id) override; int32_t DeregisterSendRtpHeaderExtension(RTPExtensionType type) override; void DeregisterSendRtpHeaderExtension(absl::string_view uri) override; @@ -100,6 +106,11 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, uint32_t SSRC() const override { return rtcp_sender_.SSRC(); } + // Semantically identical to `SSRC()` but must be called on the packet + // delivery thread/tq and returns the ssrc that maps to + // RtpRtcpInterface::Configuration::local_media_ssrc. + uint32_t local_media_ssrc() const; + void SetRid(const std::string& rid) override; void SetMid(const std::string& mid) override; @@ -139,6 +150,11 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, bool TrySendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) override; + void SetFecProtectionParams(const FecProtectionParams& delta_params, + const FecProtectionParams& key_params) override; + + std::vector> FetchFecPackets() override; + void OnPacketsAcknowledged( rtc::ArrayView sequence_numbers) override; @@ -161,10 +177,6 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, // Set RTCP CName. int32_t SetCNAME(const char* c_name) override; - // Get remote CName. - int32_t RemoteCNAME(uint32_t remote_ssrc, - char c_name[RTCP_CNAME_SIZE]) const override; - // Get remote NTP. int32_t RemoteNTP(uint32_t* received_ntp_secs, uint32_t* received_ntp_frac, @@ -172,10 +184,6 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, uint32_t* rtcp_arrival_time_frac, uint32_t* rtcp_timestamp) const override; - int32_t AddMixedCNAME(uint32_t ssrc, const char* c_name) override; - - int32_t RemoveMixedCNAME(uint32_t ssrc) override; - // Get RoundTripTime. int32_t RTT(uint32_t remote_ssrc, int64_t* rtt, @@ -186,35 +194,25 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, int64_t ExpectedRetransmissionTimeMs() const override; // Force a send of an RTCP packet. - // Normal SR and RR are triggered via the process function. + // Normal SR and RR are triggered via the task queue that's current when this + // object is created. int32_t SendRTCP(RTCPPacketType rtcpPacketType) override; - // Statistics of the amount of data sent and received. - int32_t DataCountersRTP(size_t* bytes_sent, - uint32_t* packets_sent) const override; - void GetSendStreamDataCounters( StreamDataCounters* rtp_counters, StreamDataCounters* rtx_counters) const override; - // Get received RTCP report, report block. - int32_t RemoteRTCPStat( - std::vector* receive_blocks) const override; // A snapshot of the most recent Report Block with additional data of // interest to statistics. Used to implement RTCRemoteInboundRtpStreamStats. // Within this list, the ReportBlockData::RTCPReportBlock::source_ssrc(), // which is the SSRC of the corresponding outbound RTP stream, is unique. std::vector GetLatestReportBlockData() const override; + absl::optional GetSenderReportStats() const override; // (REMB) Receiver Estimated Max Bitrate. void SetRemb(int64_t bitrate_bps, std::vector ssrcs) override; void UnsetRemb() override; - // (TMMBR) Temporary Max Media Bit Rate. - bool TMMBR() const override; - - void SetTMMBRStatus(bool enable) override; - void SetTmmbn(std::vector bounding_set) override; size_t MaxRtpPacketSize() const override; @@ -233,37 +231,15 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, // requests. void SetStorePacketsStatus(bool enable, uint16_t number_to_store) override; - bool StorePackets() const override; - void SendCombinedRtcpPacket( std::vector> rtcp_packets) override; - // (APP) Application specific data. - int32_t SetRTCPApplicationSpecificData(uint8_t sub_type, - uint32_t name, - const uint8_t* data, - uint16_t length) override; - - // (XR) Receiver reference time report. - void SetRtcpXrRrtrStatus(bool enable) override; - - bool RtcpXrRrtrStatus() const override; - // Video part. int32_t SendLossNotification(uint16_t last_decoded_seq_num, uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) override; - bool LastReceivedNTP(uint32_t* NTPsecs, - uint32_t* NTPfrac, - uint32_t* remote_sr) const; - - void BitrateSent(uint32_t* total_rate, - uint32_t* video_rate, - uint32_t* fec_rate, - uint32_t* nackRate) const override; - RtpSendRates GetSendRates() const override; void OnReceivedNack( @@ -278,34 +254,13 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, RTPSender* RtpSender() override; const RTPSender* RtpSender() const override; - protected: - bool UpdateRTCPReceiveInformationTimers(); - - RTPSender* rtp_sender() { - return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr; - } - const RTPSender* rtp_sender() const { - return rtp_sender_ ? &rtp_sender_->packet_generator : nullptr; - } - - RTCPSender* rtcp_sender() { return &rtcp_sender_; } - const RTCPSender* rtcp_sender() const { return &rtcp_sender_; } - - RTCPReceiver* rtcp_receiver() { return &rtcp_receiver_; } - const RTCPReceiver* rtcp_receiver() const { return &rtcp_receiver_; } - - Clock* clock() const { return clock_; } - - // TODO(sprang): Remove when usage is gone. - DataRate SendRate() const; - DataRate NackOverheadRate() const; - private: FRIEND_TEST_ALL_PREFIXES(RtpRtcpImpl2Test, Rtt); FRIEND_TEST_ALL_PREFIXES(RtpRtcpImpl2Test, RttForReceiverOnly); - struct RtpSenderContext { - explicit RtpSenderContext(const RtpRtcp::Configuration& config); + struct RtpSenderContext : public SequenceNumberAssigner { + explicit RtpSenderContext(const RtpRtcpInterface::Configuration& config); + void AssignSequenceNumber(RtpPacketToSend* packet) override; // Storage of packets, for retransmissions and padding, if applicable. RtpPacketHistory packet_history; // Handles final time timestamping/stats/etc and handover to Transport. @@ -322,19 +277,39 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, bool TimeToSendFullNackList(int64_t now) const; - SequenceChecker construction_thread_checker_; - SequenceChecker process_thread_checker_; + // Called on a timer, once a second, on the worker_queue_, to update the RTT, + // check if we need to send RTCP report, send TMMBR updates and fire events. + void PeriodicUpdate(); - std::unique_ptr rtp_sender_; + // Returns true if the module is configured to store packets. + bool StorePackets() const; + // Used from RtcpSenderMediator to maybe send rtcp. + void MaybeSendRtcp() RTC_RUN_ON(worker_queue_); + + // Called when |rtcp_sender_| informs of the next RTCP instant. The method may + // be called on various sequences, and is called under a RTCPSenderLock. + void ScheduleRtcpSendEvaluation(TimeDelta duration); + + // Helper method combating too early delayed calls from task queues. + // TODO(bugs.webrtc.org/12889): Consider removing this function when the issue + // is resolved. + void MaybeSendRtcpAtOrAfterTimestamp(Timestamp execution_time) + RTC_RUN_ON(worker_queue_); + + // Schedules a call to MaybeSendRtcpAtOrAfterTimestamp delayed by |duration|. + void ScheduleMaybeSendRtcpAtOrAfterTimestamp(Timestamp execution_time, + TimeDelta duration); + + TaskQueueBase* const worker_queue_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; + + std::unique_ptr rtp_sender_; RTCPSender rtcp_sender_; RTCPReceiver rtcp_receiver_; Clock* const clock_; - int64_t last_bitrate_process_time_; - int64_t last_rtt_process_time_; - int64_t next_process_time_; uint16_t packet_overhead_; // Send side @@ -344,10 +319,13 @@ class ModuleRtpRtcpImpl2 final : public RtpRtcp, RemoteBitrateEstimator* const remote_bitrate_; RtcpRttStats* const rtt_stats_; + RepeatingTaskHandle rtt_update_task_ RTC_GUARDED_BY(worker_queue_); // The processed RTT from RtcpRttStats. - rtc::CriticalSection critical_section_rtt_; - int64_t rtt_ms_; + mutable Mutex mutex_rtt_; + int64_t rtt_ms_ RTC_GUARDED_BY(mutex_rtt_); + + RTC_NO_UNIQUE_ADDRESS ScopedTaskSafety task_safety_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc index 7627283e58..c8ab15de78 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl2_unittest.cc @@ -10,33 +10,60 @@ #include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include #include #include #include +#include +#include "absl/types/optional.h" #include "api/transport/field_trial_based_config.h" -#include "api/video_codecs/video_codec.h" +#include "api/units/time_delta.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/nack.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_sender_video.h" +#include "rtc_base/logging.h" #include "rtc_base/rate_limiter.h" +#include "rtc_base/strings/string_builder.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/rtcp_packet_parser.h" #include "test/rtp_header_parser.h" +#include "test/run_loop.h" +#include "test/time_controller/simulated_time_controller.h" +using ::testing::AllOf; using ::testing::ElementsAre; +using ::testing::Eq; +using ::testing::Field; +using ::testing::Gt; +using ::testing::Not; +using ::testing::Optional; +using ::testing::SizeIs; namespace webrtc { namespace { -const uint32_t kSenderSsrc = 0x12345; -const uint32_t kReceiverSsrc = 0x23456; -const int64_t kOneWayNetworkDelayMs = 100; -const uint8_t kBaseLayerTid = 0; -const uint8_t kHigherLayerTid = 1; -const uint16_t kSequenceNumber = 100; +constexpr uint32_t kSenderSsrc = 0x12345; +constexpr uint32_t kReceiverSsrc = 0x23456; +constexpr TimeDelta kOneWayNetworkDelay = TimeDelta::Millis(100); +constexpr uint8_t kBaseLayerTid = 0; +constexpr uint8_t kHigherLayerTid = 1; +constexpr uint16_t kSequenceNumber = 100; +constexpr uint8_t kPayloadType = 100; +constexpr int kWidth = 320; +constexpr int kHeight = 100; +constexpr int kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock. +constexpr TimeDelta kDefaultReportInterval = TimeDelta::Millis(1000); + +// RTP header extension ids. +enum : int { + kAbsoluteSendTimeExtensionId = 1, + kTransportSequenceNumberExtensionId, + kTransmissionOffsetExtensionId, +}; class RtcpRttStatsTestImpl : public RtcpRttStats { public: @@ -48,71 +75,150 @@ class RtcpRttStatsTestImpl : public RtcpRttStats { int64_t rtt_ms_; }; -class SendTransport : public Transport { +// TODO(bugs.webrtc.org/11581): remove inheritance once the ModuleRtpRtcpImpl2 +// Module/ProcessThread dependency is gone. +class SendTransport : public Transport, + public sim_time_impl::SimulatedSequenceRunner { public: - SendTransport() + SendTransport(TimeDelta delay, GlobalSimulatedTimeController* time_controller) : receiver_(nullptr), - clock_(nullptr), - delay_ms_(0), + time_controller_(time_controller), + delay_(delay), rtp_packets_sent_(0), - rtcp_packets_sent_(0) {} + rtcp_packets_sent_(0), + last_packet_(&header_extensions_) { + time_controller_->Register(this); + } + + ~SendTransport() { time_controller_->Unregister(this); } void SetRtpRtcpModule(ModuleRtpRtcpImpl2* receiver) { receiver_ = receiver; } - void SimulateNetworkDelay(int64_t delay_ms, SimulatedClock* clock) { - clock_ = clock; - delay_ms_ = delay_ms; - } + void SimulateNetworkDelay(TimeDelta delay) { delay_ = delay; } bool SendRtp(const uint8_t* data, size_t len, const PacketOptions& options) override { - RTPHeader header; - std::unique_ptr parser(RtpHeaderParser::CreateForTest()); - EXPECT_TRUE(parser->Parse(static_cast(data), len, &header)); + EXPECT_TRUE(last_packet_.Parse(data, len)); ++rtp_packets_sent_; - last_rtp_header_ = header; return true; } bool SendRtcp(const uint8_t* data, size_t len) override { test::RtcpPacketParser parser; parser.Parse(data, len); last_nack_list_ = parser.nack()->packet_ids(); - - if (clock_) { - clock_->AdvanceTimeMilliseconds(delay_ms_); - } - EXPECT_TRUE(receiver_); - receiver_->IncomingRtcpPacket(data, len); + Timestamp current_time = time_controller_->GetClock()->CurrentTime(); + Timestamp delivery_time = current_time + delay_; + rtcp_packets_.push_back( + Packet{delivery_time, std::vector(data, data + len)}); ++rtcp_packets_sent_; + RunReady(current_time); return true; } + + // sim_time_impl::SimulatedSequenceRunner + Timestamp GetNextRunTime() const override { + if (!rtcp_packets_.empty()) + return rtcp_packets_.front().send_time; + return Timestamp::PlusInfinity(); + } + void RunReady(Timestamp at_time) override { + while (!rtcp_packets_.empty() && + rtcp_packets_.front().send_time <= at_time) { + Packet packet = std::move(rtcp_packets_.front()); + rtcp_packets_.pop_front(); + EXPECT_TRUE(receiver_); + receiver_->IncomingRtcpPacket(packet.data.data(), packet.data.size()); + } + } + TaskQueueBase* GetAsTaskQueue() override { + return reinterpret_cast(this); + } + size_t NumRtcpSent() { return rtcp_packets_sent_; } + ModuleRtpRtcpImpl2* receiver_; - SimulatedClock* clock_; - int64_t delay_ms_; + GlobalSimulatedTimeController* const time_controller_; + TimeDelta delay_; int rtp_packets_sent_; size_t rtcp_packets_sent_; - RTPHeader last_rtp_header_; std::vector last_nack_list_; + RtpHeaderExtensionMap header_extensions_; + RtpPacketReceived last_packet_; + struct Packet { + Timestamp send_time; + std::vector data; + }; + std::deque rtcp_packets_; }; -class RtpRtcpModule : public RtcpPacketTypeCounterObserver { +struct TestConfig { + explicit TestConfig(bool with_overhead) : with_overhead(with_overhead) {} + + bool with_overhead = false; +}; + +class FieldTrialConfig : public WebRtcKeyValueConfig { + public: + static FieldTrialConfig GetFromTestConfig(const TestConfig& config) { + FieldTrialConfig trials; + trials.overhead_enabled_ = config.with_overhead; + return trials; + } + + FieldTrialConfig() : overhead_enabled_(false), max_padding_factor_(1200) {} + ~FieldTrialConfig() override {} + + void SetOverHeadEnabled(bool enabled) { overhead_enabled_ = enabled; } + void SetMaxPaddingFactor(double factor) { max_padding_factor_ = factor; } + + std::string Lookup(absl::string_view key) const override { + if (key == "WebRTC-LimitPaddingSize") { + char string_buf[32]; + rtc::SimpleStringBuilder ssb(string_buf); + ssb << "factor:" << max_padding_factor_; + return ssb.str(); + } else if (key == "WebRTC-SendSideBwe-WithOverhead") { + return overhead_enabled_ ? "Enabled" : "Disabled"; + } + return ""; + } + + private: + bool overhead_enabled_; + double max_padding_factor_; +}; + +class RtpRtcpModule : public RtcpPacketTypeCounterObserver, + public SendPacketObserver { public: - RtpRtcpModule(SimulatedClock* clock, bool is_sender) - : is_sender_(is_sender), - receive_statistics_(ReceiveStatistics::Create(clock)), - clock_(clock) { + struct SentPacket { + SentPacket(uint16_t packet_id, int64_t capture_time_ms, uint32_t ssrc) + : packet_id(packet_id), capture_time_ms(capture_time_ms), ssrc(ssrc) {} + uint16_t packet_id; + int64_t capture_time_ms; + uint32_t ssrc; + }; + + RtpRtcpModule(GlobalSimulatedTimeController* time_controller, + bool is_sender, + const FieldTrialConfig& trials) + : time_controller_(time_controller), + is_sender_(is_sender), + trials_(trials), + receive_statistics_( + ReceiveStatistics::Create(time_controller->GetClock())), + transport_(kOneWayNetworkDelay, time_controller) { CreateModuleImpl(); - transport_.SimulateNetworkDelay(kOneWayNetworkDelayMs, clock); } + TimeController* const time_controller_; const bool is_sender_; + const FieldTrialConfig& trials_; RtcpPacketTypeCounter packets_sent_; RtcpPacketTypeCounter packets_received_; std::unique_ptr receive_statistics_; SendTransport transport_; RtcpRttStatsTestImpl rtt_stats_; std::unique_ptr impl_; - int rtcp_report_interval_ms_ = 0; void RtcpPacketTypesCounterUpdated( uint32_t ssrc, @@ -120,6 +226,16 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver { counter_map_[ssrc] = packet_counter; } + void OnSendPacket(uint16_t packet_id, + int64_t capture_time_ms, + uint32_t ssrc) override { + last_sent_packet_.emplace(packet_id, capture_time_ms, ssrc); + } + + absl::optional last_sent_packet() const { + return last_sent_packet_; + } + RtcpPacketTypeCounter RtcpSent() { // RTCP counters for remote SSRC. return counter_map_[is_sender_ ? kReceiverSsrc : kSenderSsrc]; @@ -130,46 +246,64 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver { return counter_map_[impl_->SSRC()]; } int RtpSent() { return transport_.rtp_packets_sent_; } - uint16_t LastRtpSequenceNumber() { - return transport_.last_rtp_header_.sequenceNumber; - } + uint16_t LastRtpSequenceNumber() { return last_packet().SequenceNumber(); } std::vector LastNackListSent() { return transport_.last_nack_list_; } - void SetRtcpReportIntervalAndReset(int rtcp_report_interval_ms) { - rtcp_report_interval_ms_ = rtcp_report_interval_ms; + void SetRtcpReportIntervalAndReset(TimeDelta rtcp_report_interval) { + rtcp_report_interval_ = rtcp_report_interval; + CreateModuleImpl(); + } + const RtpPacketReceived& last_packet() { return transport_.last_packet_; } + void RegisterHeaderExtension(absl::string_view uri, int id) { + impl_->RegisterRtpHeaderExtension(uri, id); + transport_.header_extensions_.RegisterByUri(id, uri); + transport_.last_packet_.IdentifyExtensions(transport_.header_extensions_); + } + void ReinintWithFec(VideoFecGenerator* fec_generator) { + fec_generator_ = fec_generator; CreateModuleImpl(); } private: void CreateModuleImpl() { - RtpRtcp::Configuration config; + RtpRtcpInterface::Configuration config; config.audio = false; - config.clock = clock_; + config.clock = time_controller_->GetClock(); config.outgoing_transport = &transport_; config.receive_statistics = receive_statistics_.get(); config.rtcp_packet_type_counter_observer = this; config.rtt_stats = &rtt_stats_; - config.rtcp_report_interval_ms = rtcp_report_interval_ms_; + config.rtcp_report_interval_ms = rtcp_report_interval_.ms(); config.local_media_ssrc = is_sender_ ? kSenderSsrc : kReceiverSsrc; config.need_rtp_packet_infos = true; - + config.non_sender_rtt_measurement = true; + config.field_trials = &trials_; + config.send_packet_observer = this; + config.fec_generator = fec_generator_; impl_.reset(new ModuleRtpRtcpImpl2(config)); impl_->SetRemoteSSRC(is_sender_ ? kReceiverSsrc : kSenderSsrc); impl_->SetRTCPStatus(RtcpMode::kCompound); } - SimulatedClock* const clock_; std::map counter_map_; + absl::optional last_sent_packet_; + VideoFecGenerator* fec_generator_ = nullptr; + TimeDelta rtcp_report_interval_ = kDefaultReportInterval; }; } // namespace -class RtpRtcpImpl2Test : public ::testing::Test { +class RtpRtcpImpl2Test : public ::testing::TestWithParam { protected: RtpRtcpImpl2Test() - : clock_(133590000000000), - sender_(&clock_, /*is_sender=*/true), - receiver_(&clock_, /*is_sender=*/false) {} + : time_controller_(Timestamp::Micros(133590000000000)), + field_trials_(FieldTrialConfig::GetFromTestConfig(GetParam())), + sender_(&time_controller_, + /*is_sender=*/true, + field_trials_), + receiver_(&time_controller_, + /*is_sender=*/false, + field_trials_) {} void SetUp() override { // Send module. @@ -178,18 +312,12 @@ class RtpRtcpImpl2Test : public ::testing::Test { sender_.impl_->SetSequenceNumber(kSequenceNumber); sender_.impl_->SetStorePacketsStatus(true, 100); - FieldTrialBasedConfig field_trials; RTPSenderVideo::Config video_config; - video_config.clock = &clock_; + video_config.clock = time_controller_.GetClock(); video_config.rtp_sender = sender_.impl_->RtpSender(); - video_config.field_trials = &field_trials; + video_config.field_trials = &field_trials_; sender_video_ = std::make_unique(video_config); - memset(&codec_, 0, sizeof(VideoCodec)); - codec_.plType = 100; - codec_.width = 320; - codec_.height = 180; - // Receive module. EXPECT_EQ(0, receiver_.impl_->SetSendingStatus(false)); receiver_.impl_->SetSendingMediaStatus(false); @@ -198,21 +326,55 @@ class RtpRtcpImpl2Test : public ::testing::Test { receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get()); } - SimulatedClock clock_; + void AdvanceTime(TimeDelta duration) { + time_controller_.AdvanceTime(duration); + } + + void ReinitWithFec(VideoFecGenerator* fec_generator, + absl::optional red_payload_type) { + sender_.ReinintWithFec(fec_generator); + EXPECT_EQ(0, sender_.impl_->SetSendingStatus(true)); + sender_.impl_->SetSendingMediaStatus(true); + sender_.impl_->SetSequenceNumber(kSequenceNumber); + sender_.impl_->SetStorePacketsStatus(true, 100); + receiver_.transport_.SetRtpRtcpModule(sender_.impl_.get()); + + RTPSenderVideo::Config video_config; + video_config.clock = time_controller_.GetClock(); + video_config.rtp_sender = sender_.impl_->RtpSender(); + video_config.field_trials = &field_trials_; + video_config.fec_overhead_bytes = fec_generator->MaxPacketOverhead(); + video_config.fec_type = fec_generator->GetFecType(); + video_config.red_payload_type = red_payload_type; + sender_video_ = std::make_unique(video_config); + } + + GlobalSimulatedTimeController time_controller_; + FieldTrialConfig field_trials_; RtpRtcpModule sender_; std::unique_ptr sender_video_; RtpRtcpModule receiver_; - VideoCodec codec_; - void SendFrame(const RtpRtcpModule* module, + bool SendFrame(const RtpRtcpModule* module, RTPSenderVideo* sender, uint8_t tid) { + int64_t now_ms = time_controller_.GetClock()->TimeInMilliseconds(); + return SendFrame( + module, sender, tid, + static_cast(now_ms * kCaptureTimeMsToRtpTimestamp), now_ms); + } + + bool SendFrame(const RtpRtcpModule* module, + RTPSenderVideo* sender, + uint8_t tid, + uint32_t rtp_timestamp, + int64_t capture_time_ms) { RTPVideoHeaderVP8 vp8_header = {}; vp8_header.temporalIdx = tid; RTPVideoHeader rtp_video_header; rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey; - rtp_video_header.width = codec_.width; - rtp_video_header.height = codec_.height; + rtp_video_header.width = kWidth; + rtp_video_header.height = kHeight; rtp_video_header.rotation = kVideoRotation_0; rtp_video_header.content_type = VideoContentType::UNSPECIFIED; rtp_video_header.playout_delay = {-1, -1}; @@ -223,9 +385,12 @@ class RtpRtcpImpl2Test : public ::testing::Test { rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false}; const uint8_t payload[100] = {0}; - EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, codec_.plType, true)); - EXPECT_TRUE(sender->SendVideo(codec_.plType, VideoCodecType::kVideoCodecVP8, - 0, 0, payload, nullptr, rtp_video_header, 0)); + bool success = module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true); + + success &= sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8, + rtp_timestamp, capture_time_ms, payload, + rtp_video_header, 0); + return success; } void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) { @@ -242,19 +407,20 @@ class RtpRtcpImpl2Test : public ::testing::Test { } }; -TEST_F(RtpRtcpImpl2Test, RetransmitsAllLayers) { +TEST_P(RtpRtcpImpl2Test, RetransmitsAllLayers) { // Send frames. EXPECT_EQ(0, sender_.RtpSent()); - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); // kSequenceNumber - SendFrame(&sender_, sender_video_.get(), - kHigherLayerTid); // kSequenceNumber + 1 - SendFrame(&sender_, sender_video_.get(), - kNoTemporalIdx); // kSequenceNumber + 2 + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), + kBaseLayerTid)); // kSequenceNumber + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), + kHigherLayerTid)); // kSequenceNumber + 1 + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), + kNoTemporalIdx)); // kSequenceNumber + 2 EXPECT_EQ(3, sender_.RtpSent()); EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber()); // Min required delay until retransmit = 5 + RTT ms (RTT = 0). - clock_.AdvanceTimeMilliseconds(5); + AdvanceTime(TimeDelta::Millis(5)); // Frame with kBaseLayerTid re-sent. IncomingRtcpNack(&sender_, kSequenceNumber); @@ -270,7 +436,7 @@ TEST_F(RtpRtcpImpl2Test, RetransmitsAllLayers) { EXPECT_EQ(kSequenceNumber + 2, sender_.LastRtpSequenceNumber()); } -TEST_F(RtpRtcpImpl2Test, Rtt) { +TEST_P(RtpRtcpImpl2Test, Rtt) { RtpPacketReceived packet; packet.SetTimestamp(1); packet.SetSequenceNumber(123); @@ -279,13 +445,14 @@ TEST_F(RtpRtcpImpl2Test, Rtt) { receiver_.receive_statistics_->OnRtpPacket(packet); // Send Frame before sending an SR. - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Sender module should send an SR. EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport)); + AdvanceTime(kOneWayNetworkDelay); // Receiver module should send a RR with a response to the last received SR. - clock_.AdvanceTimeMilliseconds(1000); EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport)); + AdvanceTime(kOneWayNetworkDelay); // Verify RTT. int64_t rtt; @@ -294,10 +461,10 @@ TEST_F(RtpRtcpImpl2Test, Rtt) { int64_t max_rtt; EXPECT_EQ( 0, sender_.impl_->RTT(kReceiverSsrc, &rtt, &avg_rtt, &min_rtt, &max_rtt)); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, rtt, 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, avg_rtt, 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, min_rtt, 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, max_rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), avg_rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), min_rtt, 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), max_rtt, 1); // No RTT from other ssrc. EXPECT_EQ(-1, sender_.impl_->RTT(kReceiverSsrc + 1, &rtt, &avg_rtt, &min_rtt, @@ -306,61 +473,51 @@ TEST_F(RtpRtcpImpl2Test, Rtt) { // Verify RTT from rtt_stats config. EXPECT_EQ(0, sender_.rtt_stats_.LastProcessedRtt()); EXPECT_EQ(0, sender_.impl_->rtt_ms()); - sender_.impl_->Process(); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.rtt_stats_.LastProcessedRtt(), - 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.impl_->rtt_ms(), 1); -} + AdvanceTime(TimeDelta::Millis(1000)); -TEST_F(RtpRtcpImpl2Test, SetRtcpXrRrtrStatus) { - EXPECT_FALSE(receiver_.impl_->RtcpXrRrtrStatus()); - receiver_.impl_->SetRtcpXrRrtrStatus(true); - EXPECT_TRUE(receiver_.impl_->RtcpXrRrtrStatus()); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), + sender_.rtt_stats_.LastProcessedRtt(), 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), sender_.impl_->rtt_ms(), 1); } -TEST_F(RtpRtcpImpl2Test, RttForReceiverOnly) { - receiver_.impl_->SetRtcpXrRrtrStatus(true); - +TEST_P(RtpRtcpImpl2Test, RttForReceiverOnly) { // Receiver module should send a Receiver time reference report (RTRR). EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport)); // Sender module should send a response to the last received RTRR (DLRR). - clock_.AdvanceTimeMilliseconds(1000); + AdvanceTime(TimeDelta::Millis(1000)); // Send Frame before sending a SR. - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(0, sender_.impl_->SendRTCP(kRtcpReport)); // Verify RTT. EXPECT_EQ(0, receiver_.rtt_stats_.LastProcessedRtt()); EXPECT_EQ(0, receiver_.impl_->rtt_ms()); - receiver_.impl_->Process(); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, + AdvanceTime(TimeDelta::Millis(1000)); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), receiver_.rtt_stats_.LastProcessedRtt(), 1); - EXPECT_NEAR(2 * kOneWayNetworkDelayMs, receiver_.impl_->rtt_ms(), 1); + EXPECT_NEAR(2 * kOneWayNetworkDelay.ms(), receiver_.impl_->rtt_ms(), 1); } -TEST_F(RtpRtcpImpl2Test, NoSrBeforeMedia) { +TEST_P(RtpRtcpImpl2Test, NoSrBeforeMedia) { // Ignore fake transport delays in this test. - sender_.transport_.SimulateNetworkDelay(0, &clock_); - receiver_.transport_.SimulateNetworkDelay(0, &clock_); - - sender_.impl_->Process(); - EXPECT_EQ(-1, sender_.RtcpSent().first_packet_time_ms); + sender_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); + receiver_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); + // Move ahead to the instant a rtcp is expected. // Verify no SR is sent before media has been sent, RR should still be sent // from the receiving module though. - clock_.AdvanceTimeMilliseconds(2000); - int64_t current_time = clock_.TimeInMilliseconds(); - sender_.impl_->Process(); - receiver_.impl_->Process(); + AdvanceTime(kDefaultReportInterval / 2); + int64_t current_time = time_controller_.GetClock()->TimeInMilliseconds(); EXPECT_EQ(-1, sender_.RtcpSent().first_packet_time_ms); EXPECT_EQ(receiver_.RtcpSent().first_packet_time_ms, current_time); - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + // RTCP should be triggered by the RTP send. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, current_time); } -TEST_F(RtpRtcpImpl2Test, RtcpPacketTypeCounter_Nack) { +TEST_P(RtpRtcpImpl2Test, RtcpPacketTypeCounter_Nack) { EXPECT_EQ(-1, receiver_.RtcpSent().first_packet_time_ms); EXPECT_EQ(-1, sender_.RtcpReceived().first_packet_time_ms); EXPECT_EQ(0U, sender_.RtcpReceived().nack_packets); @@ -370,6 +527,7 @@ TEST_F(RtpRtcpImpl2Test, RtcpPacketTypeCounter_Nack) { const uint16_t kNackLength = 1; uint16_t nack_list[kNackLength] = {123}; EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list, kNackLength)); + AdvanceTime(kOneWayNetworkDelay); EXPECT_EQ(1U, receiver_.RtcpSent().nack_packets); EXPECT_GT(receiver_.RtcpSent().first_packet_time_ms, -1); @@ -378,7 +536,7 @@ TEST_F(RtpRtcpImpl2Test, RtcpPacketTypeCounter_Nack) { EXPECT_GT(sender_.RtcpReceived().first_packet_time_ms, -1); } -TEST_F(RtpRtcpImpl2Test, AddStreamDataCounters) { +TEST_P(RtpRtcpImpl2Test, AddStreamDataCounters) { StreamDataCounters rtp; const int64_t kStartTimeMs = 1; rtp.first_packet_time_ms = kStartTimeMs; @@ -421,25 +579,25 @@ TEST_F(RtpRtcpImpl2Test, AddStreamDataCounters) { EXPECT_EQ(kStartTimeMs, sum.first_packet_time_ms); // Holds oldest time. } -TEST_F(RtpRtcpImpl2Test, SendsInitialNackList) { +TEST_P(RtpRtcpImpl2Test, SendsInitialNackList) { // Send module sends a NACK. const uint16_t kNackLength = 1; uint16_t nack_list[kNackLength] = {123}; EXPECT_EQ(0U, sender_.RtcpSent().nack_packets); // Send Frame before sending a compound RTCP that starts with SR. - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(1U, sender_.RtcpSent().nack_packets); EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123)); } -TEST_F(RtpRtcpImpl2Test, SendsExtendedNackList) { +TEST_P(RtpRtcpImpl2Test, SendsExtendedNackList) { // Send module sends a NACK. const uint16_t kNackLength = 1; uint16_t nack_list[kNackLength] = {123}; EXPECT_EQ(0U, sender_.RtcpSent().nack_packets); // Send Frame before sending a compound RTCP that starts with SR. - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(1U, sender_.RtcpSent().nack_packets); EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123)); @@ -457,33 +615,33 @@ TEST_F(RtpRtcpImpl2Test, SendsExtendedNackList) { EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(124)); } -TEST_F(RtpRtcpImpl2Test, ReSendsNackListAfterRttMs) { - sender_.transport_.SimulateNetworkDelay(0, &clock_); +TEST_P(RtpRtcpImpl2Test, ReSendsNackListAfterRttMs) { + sender_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); // Send module sends a NACK. const uint16_t kNackLength = 2; uint16_t nack_list[kNackLength] = {123, 125}; EXPECT_EQ(0U, sender_.RtcpSent().nack_packets); // Send Frame before sending a compound RTCP that starts with SR. - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(1U, sender_.RtcpSent().nack_packets); EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125)); // Same list not re-send, rtt interval has not passed. - const int kStartupRttMs = 100; - clock_.AdvanceTimeMilliseconds(kStartupRttMs); + const TimeDelta kStartupRtt = TimeDelta::Millis(100); + AdvanceTime(kStartupRtt); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(1U, sender_.RtcpSent().nack_packets); // Rtt interval passed, full list sent. - clock_.AdvanceTimeMilliseconds(1); + AdvanceTime(TimeDelta::Millis(1)); EXPECT_EQ(0, sender_.impl_->SendNACK(nack_list, kNackLength)); EXPECT_EQ(2U, sender_.RtcpSent().nack_packets); EXPECT_THAT(sender_.LastNackListSent(), ElementsAre(123, 125)); } -TEST_F(RtpRtcpImpl2Test, UniqueNackRequests) { - receiver_.transport_.SimulateNetworkDelay(0, &clock_); +TEST_P(RtpRtcpImpl2Test, UniqueNackRequests) { + receiver_.transport_.SimulateNetworkDelay(TimeDelta::Millis(0)); EXPECT_EQ(0U, receiver_.RtcpSent().nack_packets); EXPECT_EQ(0U, receiver_.RtcpSent().nack_requests); EXPECT_EQ(0U, receiver_.RtcpSent().unique_nack_requests); @@ -505,8 +663,8 @@ TEST_F(RtpRtcpImpl2Test, UniqueNackRequests) { EXPECT_EQ(100, sender_.RtcpReceived().UniqueNackRequestsInPercent()); // Receive module sends new request with duplicated packets. - const int kStartupRttMs = 100; - clock_.AdvanceTimeMilliseconds(kStartupRttMs + 1); + const TimeDelta kStartupRtt = TimeDelta::Millis(100); + AdvanceTime(kStartupRtt + TimeDelta::Millis(1)); const uint16_t kNackLength2 = 4; uint16_t nack_list2[kNackLength2] = {11, 18, 20, 21}; EXPECT_EQ(0, receiver_.impl_->SendNACK(nack_list2, kNackLength2)); @@ -522,59 +680,54 @@ TEST_F(RtpRtcpImpl2Test, UniqueNackRequests) { EXPECT_EQ(75, sender_.RtcpReceived().UniqueNackRequestsInPercent()); } -TEST_F(RtpRtcpImpl2Test, ConfigurableRtcpReportInterval) { - const int kVideoReportInterval = 3000; +TEST_P(RtpRtcpImpl2Test, ConfigurableRtcpReportInterval) { + const TimeDelta kVideoReportInterval = TimeDelta::Millis(3000); // Recreate sender impl with new configuration, and redo setup. sender_.SetRtcpReportIntervalAndReset(kVideoReportInterval); SetUp(); - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Initial state - sender_.impl_->Process(); EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, -1); EXPECT_EQ(0u, sender_.transport_.NumRtcpSent()); // Move ahead to the last ms before a rtcp is expected, no action. - clock_.AdvanceTimeMilliseconds(kVideoReportInterval / 2 - 1); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval / 2 - TimeDelta::Millis(1)); EXPECT_EQ(sender_.RtcpSent().first_packet_time_ms, -1); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 0u); // Move ahead to the first rtcp. Send RTCP. - clock_.AdvanceTimeMilliseconds(1); - sender_.impl_->Process(); + AdvanceTime(TimeDelta::Millis(1)); EXPECT_GT(sender_.RtcpSent().first_packet_time_ms, -1); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u); - SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); // Move ahead to the last possible second before second rtcp is expected. - clock_.AdvanceTimeMilliseconds(kVideoReportInterval * 1 / 2 - 1); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval * 1 / 2 - TimeDelta::Millis(1)); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 1u); // Move ahead into the range of second rtcp, the second rtcp may be sent. - clock_.AdvanceTimeMilliseconds(1); - sender_.impl_->Process(); + AdvanceTime(TimeDelta::Millis(1)); EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u); - clock_.AdvanceTimeMilliseconds(kVideoReportInterval / 2); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval / 2); EXPECT_GE(sender_.transport_.NumRtcpSent(), 1u); // Move out the range of second rtcp, the second rtcp must have been sent. - clock_.AdvanceTimeMilliseconds(kVideoReportInterval / 2); - sender_.impl_->Process(); + AdvanceTime(kVideoReportInterval / 2); EXPECT_EQ(sender_.transport_.NumRtcpSent(), 2u); } -TEST_F(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { +TEST_P(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { const uint32_t kStartTimestamp = 1u; SetUp(); sender_.impl_->SetStartTimestamp(kStartTimestamp); + sender_.impl_->SetSequenceNumber(1); + PacedPacketInfo pacing_info; RtpPacketToSend packet(nullptr); packet.set_packet_type(RtpPacketToSend::Type::kVideo); @@ -586,6 +739,7 @@ TEST_F(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { packet.set_first_packet_of_frame(true); packet.SetMarker(true); sender_.impl_->TrySendPacket(&packet, pacing_info); + AdvanceTime(TimeDelta::Millis(1)); std::vector seqno_info = sender_.impl_->GetSentRtpPacketInfos(std::vector{1}); @@ -610,6 +764,8 @@ TEST_F(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { packet.SetMarker(true); sender_.impl_->TrySendPacket(&packet, pacing_info); + AdvanceTime(TimeDelta::Millis(1)); + seqno_info = sender_.impl_->GetSentRtpPacketInfos(std::vector{2, 3, 4}); @@ -627,4 +783,302 @@ TEST_F(RtpRtcpImpl2Test, StoresPacketInfoForSentPackets) { /*is_last=*/1))); } +// Checks that the sender report stats are not available if no RTCP SR was sent. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsNotAvailable) { + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt)); +} + +// Checks that the sender report stats are available if an RTCP SR was sent. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsAvailable) { + // Send a frame in order to send an SR. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + // Send an SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Not(Eq(absl::nullopt))); +} + +// Checks that the sender report stats are not available if an RTCP SR with an +// unexpected SSRC is received. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsNotUpdatedWithUnexpectedSsrc) { + constexpr uint32_t kUnexpectedSenderSsrc = 0x87654321; + static_assert(kUnexpectedSenderSsrc != kSenderSsrc, ""); + // Forge a sender report and pass it to the receiver as if an RTCP SR were + // sent by an unexpected sender. + rtcp::SenderReport sr; + sr.SetSenderSsrc(kUnexpectedSenderSsrc); + sr.SetNtp({/*seconds=*/1u, /*fractions=*/1u << 31}); + sr.SetPacketCount(123u); + sr.SetOctetCount(456u); + auto raw_packet = sr.Build(); + receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size()); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt)); +} + +// Checks the stats derived from the last received RTCP SR are set correctly. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsCheckStatsFromLastReport) { + using SenderReportStats = RtpRtcpInterface::SenderReportStats; + const NtpTime ntp(/*seconds=*/1u, /*fractions=*/1u << 31); + constexpr uint32_t kPacketCount = 123u; + constexpr uint32_t kOctetCount = 456u; + // Forge a sender report and pass it to the receiver as if an RTCP SR were + // sent by the sender. + rtcp::SenderReport sr; + sr.SetSenderSsrc(kSenderSsrc); + sr.SetNtp(ntp); + sr.SetPacketCount(kPacketCount); + sr.SetOctetCount(kOctetCount); + auto raw_packet = sr.Build(); + receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size()); + + EXPECT_THAT( + receiver_.impl_->GetSenderReportStats(), + Optional(AllOf(Field(&SenderReportStats::last_remote_timestamp, Eq(ntp)), + Field(&SenderReportStats::packets_sent, Eq(kPacketCount)), + Field(&SenderReportStats::bytes_sent, Eq(kOctetCount))))); +} + +// Checks that the sender report stats count equals the number of sent RTCP SRs. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsCount) { + using SenderReportStats = RtpRtcpInterface::SenderReportStats; + // Send a frame in order to send an SR. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + // Send the first SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), + Optional(Field(&SenderReportStats::reports_count, Eq(1u)))); + // Send the second SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), + Optional(Field(&SenderReportStats::reports_count, Eq(2u)))); +} + +// Checks that the sender report stats include a valid arrival time if an RTCP +// SR was sent. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsArrivalTimestampSet) { + // Send a frame in order to send an SR. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + // Send an SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); + auto stats = receiver_.impl_->GetSenderReportStats(); + ASSERT_THAT(stats, Not(Eq(absl::nullopt))); + EXPECT_TRUE(stats->last_arrival_timestamp.Valid()); +} + +// Checks that the packet and byte counters from an RTCP SR are not zero once +// a frame is sent. +TEST_P(RtpRtcpImpl2Test, SenderReportStatsPacketByteCounters) { + using SenderReportStats = RtpRtcpInterface::SenderReportStats; + // Send a frame in order to send an SR. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0)); + // Advance time otherwise the RTCP SR report will not include any packets + // generated by `SendFrame()`. + AdvanceTime(TimeDelta::Millis(1)); + // Send an SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + AdvanceTime(kOneWayNetworkDelay); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), + Optional(AllOf(Field(&SenderReportStats::packets_sent, Gt(0u)), + Field(&SenderReportStats::bytes_sent, Gt(0u))))); +} + +TEST_P(RtpRtcpImpl2Test, SendingVideoAdvancesSequenceNumber) { + const uint16_t sequence_number = sender_.impl_->SequenceNumber(); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0)); + EXPECT_EQ(sequence_number + 1, sender_.impl_->SequenceNumber()); +} + +TEST_P(RtpRtcpImpl2Test, SequenceNumberNotAdvancedWhenNotSending) { + const uint16_t sequence_number = sender_.impl_->SequenceNumber(); + sender_.impl_->SetSendingMediaStatus(false); + EXPECT_FALSE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(0)); + EXPECT_EQ(sequence_number, sender_.impl_->SequenceNumber()); +} + +TEST_P(RtpRtcpImpl2Test, PaddingNotAllowedInMiddleOfFrame) { + constexpr size_t kPaddingSize = 100; + + // Can't send padding before media. + EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(0u)); + + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + + // Padding is now ok. + EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(Gt(0u))); + + // Send half a video frame. + PacedPacketInfo pacing_info; + std::unique_ptr packet = + sender_.impl_->RtpSender()->AllocatePacket(); + packet->set_packet_type(RtpPacketToSend::Type::kVideo); + packet->set_first_packet_of_frame(true); + packet->SetMarker(false); // Marker false - not last packet of frame. + sender_.impl_->RtpSender()->AssignSequenceNumber(packet.get()); + + EXPECT_TRUE(sender_.impl_->TrySendPacket(packet.get(), pacing_info)); + + // Padding not allowed in middle of frame. + EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(0u)); + + packet = sender_.impl_->RtpSender()->AllocatePacket(); + packet->set_packet_type(RtpPacketToSend::Type::kVideo); + packet->set_first_packet_of_frame(true); + packet->SetMarker(true); + sender_.impl_->RtpSender()->AssignSequenceNumber(packet.get()); + + EXPECT_TRUE(sender_.impl_->TrySendPacket(packet.get(), pacing_info)); + + // Padding is OK again. + EXPECT_THAT(sender_.impl_->GeneratePadding(kPaddingSize), SizeIs(Gt(0u))); +} + +TEST_P(RtpRtcpImpl2Test, PaddingTimestampMatchesMedia) { + constexpr size_t kPaddingSize = 100; + const uint32_t kTimestamp = 123; + + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid, + kTimestamp, /*capture_time_ms=*/0)); + EXPECT_EQ(sender_.last_packet().Timestamp(), kTimestamp); + uint16_t media_seq = sender_.last_packet().SequenceNumber(); + + // Generate and send padding. + auto padding = sender_.impl_->GeneratePadding(kPaddingSize); + ASSERT_FALSE(padding.empty()); + for (auto& packet : padding) { + sender_.impl_->TrySendPacket(packet.get(), PacedPacketInfo()); + } + + // Verify we sent a new packet, but with the same timestamp. + EXPECT_NE(sender_.last_packet().SequenceNumber(), media_seq); + EXPECT_EQ(sender_.last_packet().Timestamp(), kTimestamp); +} + +TEST_P(RtpRtcpImpl2Test, AssignsTransportSequenceNumber) { + sender_.RegisterHeaderExtension(TransportSequenceNumber::kUri, + kTransportSequenceNumberExtensionId); + + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + uint16_t first_transport_seq = 0; + EXPECT_TRUE(sender_.last_packet().GetExtension( + &first_transport_seq)); + + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + uint16_t second_transport_seq = 0; + EXPECT_TRUE(sender_.last_packet().GetExtension( + &second_transport_seq)); + + EXPECT_EQ(first_transport_seq + 1, second_transport_seq); +} + +TEST_P(RtpRtcpImpl2Test, AssignsAbsoluteSendTime) { + sender_.RegisterHeaderExtension(AbsoluteSendTime::kUri, + kAbsoluteSendTimeExtensionId); + + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + EXPECT_NE(sender_.last_packet().GetExtension(), 0u); +} + +TEST_P(RtpRtcpImpl2Test, AssignsTransmissionTimeOffset) { + sender_.RegisterHeaderExtension(TransmissionOffset::kUri, + kTransmissionOffsetExtensionId); + + constexpr TimeDelta kOffset = TimeDelta::Millis(100); + // Transmission offset is calculated from difference between capture time + // and send time. + int64_t capture_time_ms = time_controller_.GetClock()->TimeInMilliseconds(); + time_controller_.AdvanceTime(kOffset); + + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid, + /*timestamp=*/0, capture_time_ms)); + EXPECT_EQ(sender_.last_packet().GetExtension(), + kOffset.ms() * kCaptureTimeMsToRtpTimestamp); +} + +TEST_P(RtpRtcpImpl2Test, PropagatesSentPacketInfo) { + sender_.RegisterHeaderExtension(TransportSequenceNumber::kUri, + kTransportSequenceNumberExtensionId); + int64_t now_ms = time_controller_.GetClock()->TimeInMilliseconds(); + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + EXPECT_THAT( + sender_.last_sent_packet(), + Optional( + AllOf(Field(&RtpRtcpModule::SentPacket::packet_id, + Eq(sender_.last_packet() + .GetExtension())), + Field(&RtpRtcpModule::SentPacket::capture_time_ms, Eq(now_ms)), + Field(&RtpRtcpModule::SentPacket::ssrc, Eq(kSenderSsrc))))); +} + +TEST_P(RtpRtcpImpl2Test, GeneratesFlexfec) { + constexpr int kFlexfecPayloadType = 118; + constexpr uint32_t kFlexfecSsrc = 17; + const char kNoMid[] = ""; + const std::vector kNoRtpExtensions; + const std::vector kNoRtpExtensionSizes; + + // Make sure FlexFec sequence numbers start at a different point than media. + const uint16_t fec_start_seq = sender_.impl_->SequenceNumber() + 100; + RtpState start_state; + start_state.sequence_number = fec_start_seq; + FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexfecSsrc, kSenderSsrc, + kNoMid, kNoRtpExtensions, kNoRtpExtensionSizes, + &start_state, time_controller_.GetClock()); + ReinitWithFec(&flexfec_sender, /*red_payload_type=*/absl::nullopt); + + // Parameters selected to generate a single FEC packet per media packet. + FecProtectionParams params; + params.fec_rate = 15; + params.max_fec_frames = 1; + params.fec_mask_type = kFecMaskRandom; + sender_.impl_->SetFecProtectionParams(params, params); + + // Send a one packet frame, expect one media packet and one FEC packet. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(2)); + + const RtpPacketReceived& fec_packet = sender_.last_packet(); + EXPECT_EQ(fec_packet.SequenceNumber(), fec_start_seq); + EXPECT_EQ(fec_packet.Ssrc(), kFlexfecSsrc); + EXPECT_EQ(fec_packet.PayloadType(), kFlexfecPayloadType); +} + +TEST_P(RtpRtcpImpl2Test, GeneratesUlpfec) { + constexpr int kUlpfecPayloadType = 118; + constexpr int kRedPayloadType = 119; + UlpfecGenerator ulpfec_sender(kRedPayloadType, kUlpfecPayloadType, + time_controller_.GetClock()); + ReinitWithFec(&ulpfec_sender, kRedPayloadType); + + // Parameters selected to generate a single FEC packet per media packet. + FecProtectionParams params; + params.fec_rate = 15; + params.max_fec_frames = 1; + params.fec_mask_type = kFecMaskRandom; + sender_.impl_->SetFecProtectionParams(params, params); + + // Send a one packet frame, expect one media packet and one FEC packet. + EXPECT_TRUE(SendFrame(&sender_, sender_video_.get(), kBaseLayerTid)); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Eq(2)); + + // Ulpfec is sent on the media ssrc, sharing the sequene number series. + const RtpPacketReceived& fec_packet = sender_.last_packet(); + EXPECT_EQ(fec_packet.SequenceNumber(), kSequenceNumber + 1); + EXPECT_EQ(fec_packet.Ssrc(), kSenderSsrc); + // The packets are encapsulated in RED packets, check that and that the RED + // header (first byte of payload) indicates the desired FEC payload type. + EXPECT_EQ(fec_packet.PayloadType(), kRedPayloadType); + EXPECT_EQ(fec_packet.payload()[0], kUlpfecPayloadType); +} + +INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, + RtpRtcpImpl2Test, + ::testing::Values(TestConfig{false}, + TestConfig{true})); + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc index e2595664f6..ac05584e18 100644 --- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc @@ -15,7 +15,6 @@ #include #include "api/transport/field_trial_based_config.h" -#include "api/video_codecs/video_codec.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/nack.h" @@ -28,6 +27,11 @@ #include "test/rtp_header_parser.h" using ::testing::ElementsAre; +using ::testing::Eq; +using ::testing::Field; +using ::testing::Gt; +using ::testing::Not; +using ::testing::Optional; namespace webrtc { namespace { @@ -37,6 +41,9 @@ const int64_t kOneWayNetworkDelayMs = 100; const uint8_t kBaseLayerTid = 0; const uint8_t kHigherLayerTid = 1; const uint16_t kSequenceNumber = 100; +const uint8_t kPayloadType = 100; +const int kWidth = 320; +const int kHeight = 100; class RtcpRttStatsTestImpl : public RtcpRttStats { public: @@ -65,11 +72,10 @@ class SendTransport : public Transport { bool SendRtp(const uint8_t* data, size_t len, const PacketOptions& options) override { - RTPHeader header; - std::unique_ptr parser(RtpHeaderParser::CreateForTest()); - EXPECT_TRUE(parser->Parse(static_cast(data), len, &header)); + RtpPacket packet; + EXPECT_TRUE(packet.Parse(data, len)); ++rtp_packets_sent_; - last_rtp_header_ = header; + last_rtp_sequence_number_ = packet.SequenceNumber(); return true; } bool SendRtcp(const uint8_t* data, size_t len) override { @@ -91,7 +97,7 @@ class SendTransport : public Transport { int64_t delay_ms_; int rtp_packets_sent_; size_t rtcp_packets_sent_; - RTPHeader last_rtp_header_; + uint16_t last_rtp_sequence_number_; std::vector last_nack_list_; }; @@ -131,7 +137,7 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver { } int RtpSent() { return transport_.rtp_packets_sent_; } uint16_t LastRtpSequenceNumber() { - return transport_.last_rtp_header_.sequenceNumber; + return transport_.last_rtp_sequence_number_; } std::vector LastNackListSent() { return transport_.last_nack_list_; @@ -143,7 +149,7 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver { private: void CreateModuleImpl() { - RtpRtcp::Configuration config; + RtpRtcpInterface::Configuration config; config.audio = false; config.clock = clock_; config.outgoing_transport = &transport_; @@ -153,6 +159,7 @@ class RtpRtcpModule : public RtcpPacketTypeCounterObserver { config.rtcp_report_interval_ms = rtcp_report_interval_ms_; config.local_media_ssrc = is_sender_ ? kSenderSsrc : kReceiverSsrc; config.need_rtp_packet_infos = true; + config.non_sender_rtt_measurement = true; impl_.reset(new ModuleRtpRtcpImpl(config)); impl_->SetRemoteSSRC(is_sender_ ? kReceiverSsrc : kSenderSsrc); @@ -185,11 +192,6 @@ class RtpRtcpImplTest : public ::testing::Test { video_config.field_trials = &field_trials; sender_video_ = std::make_unique(video_config); - memset(&codec_, 0, sizeof(VideoCodec)); - codec_.plType = 100; - codec_.width = 320; - codec_.height = 180; - // Receive module. EXPECT_EQ(0, receiver_.impl_->SetSendingStatus(false)); receiver_.impl_->SetSendingMediaStatus(false); @@ -202,7 +204,6 @@ class RtpRtcpImplTest : public ::testing::Test { RtpRtcpModule sender_; std::unique_ptr sender_video_; RtpRtcpModule receiver_; - VideoCodec codec_; void SendFrame(const RtpRtcpModule* module, RTPSenderVideo* sender, @@ -211,8 +212,8 @@ class RtpRtcpImplTest : public ::testing::Test { vp8_header.temporalIdx = tid; RTPVideoHeader rtp_video_header; rtp_video_header.frame_type = VideoFrameType::kVideoFrameKey; - rtp_video_header.width = codec_.width; - rtp_video_header.height = codec_.height; + rtp_video_header.width = kWidth; + rtp_video_header.height = kHeight; rtp_video_header.rotation = kVideoRotation_0; rtp_video_header.content_type = VideoContentType::UNSPECIFIED; rtp_video_header.playout_delay = {-1, -1}; @@ -223,9 +224,9 @@ class RtpRtcpImplTest : public ::testing::Test { rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false}; const uint8_t payload[100] = {0}; - EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, codec_.plType, true)); - EXPECT_TRUE(sender->SendVideo(codec_.plType, VideoCodecType::kVideoCodecVP8, - 0, 0, payload, nullptr, rtp_video_header, 0)); + EXPECT_TRUE(module->impl_->OnSendingRtpFrame(0, 0, kPayloadType, true)); + EXPECT_TRUE(sender->SendVideo(kPayloadType, VideoCodecType::kVideoCodecVP8, + 0, 0, payload, rtp_video_header, 0)); } void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) { @@ -312,15 +313,7 @@ TEST_F(RtpRtcpImplTest, Rtt) { EXPECT_NEAR(2 * kOneWayNetworkDelayMs, sender_.impl_->rtt_ms(), 1); } -TEST_F(RtpRtcpImplTest, SetRtcpXrRrtrStatus) { - EXPECT_FALSE(receiver_.impl_->RtcpXrRrtrStatus()); - receiver_.impl_->SetRtcpXrRrtrStatus(true); - EXPECT_TRUE(receiver_.impl_->RtcpXrRrtrStatus()); -} - TEST_F(RtpRtcpImplTest, RttForReceiverOnly) { - receiver_.impl_->SetRtcpXrRrtrStatus(true); - // Receiver module should send a Receiver time reference report (RTRR). EXPECT_EQ(0, receiver_.impl_->SendRTCP(kRtcpReport)); @@ -627,4 +620,102 @@ TEST_F(RtpRtcpImplTest, StoresPacketInfoForSentPackets) { /*is_last=*/1))); } +// Checks that the sender report stats are not available if no RTCP SR was sent. +TEST_F(RtpRtcpImplTest, SenderReportStatsNotAvailable) { + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt)); +} + +// Checks that the sender report stats are available if an RTCP SR was sent. +TEST_F(RtpRtcpImplTest, SenderReportStatsAvailable) { + // Send a frame in order to send an SR. + SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + // Send an SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Not(Eq(absl::nullopt))); +} + +// Checks that the sender report stats are not available if an RTCP SR with an +// unexpected SSRC is received. +TEST_F(RtpRtcpImplTest, SenderReportStatsNotUpdatedWithUnexpectedSsrc) { + constexpr uint32_t kUnexpectedSenderSsrc = 0x87654321; + static_assert(kUnexpectedSenderSsrc != kSenderSsrc, ""); + // Forge a sender report and pass it to the receiver as if an RTCP SR were + // sent by an unexpected sender. + rtcp::SenderReport sr; + sr.SetSenderSsrc(kUnexpectedSenderSsrc); + sr.SetNtp({/*seconds=*/1u, /*fractions=*/1u << 31}); + sr.SetPacketCount(123u); + sr.SetOctetCount(456u); + auto raw_packet = sr.Build(); + receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size()); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), Eq(absl::nullopt)); +} + +// Checks the stats derived from the last received RTCP SR are set correctly. +TEST_F(RtpRtcpImplTest, SenderReportStatsCheckStatsFromLastReport) { + using SenderReportStats = RtpRtcpInterface::SenderReportStats; + const NtpTime ntp(/*seconds=*/1u, /*fractions=*/1u << 31); + constexpr uint32_t kPacketCount = 123u; + constexpr uint32_t kOctetCount = 456u; + // Forge a sender report and pass it to the receiver as if an RTCP SR were + // sent by the sender. + rtcp::SenderReport sr; + sr.SetSenderSsrc(kSenderSsrc); + sr.SetNtp(ntp); + sr.SetPacketCount(kPacketCount); + sr.SetOctetCount(kOctetCount); + auto raw_packet = sr.Build(); + receiver_.impl_->IncomingRtcpPacket(raw_packet.data(), raw_packet.size()); + + EXPECT_THAT( + receiver_.impl_->GetSenderReportStats(), + Optional(AllOf(Field(&SenderReportStats::last_remote_timestamp, Eq(ntp)), + Field(&SenderReportStats::packets_sent, Eq(kPacketCount)), + Field(&SenderReportStats::bytes_sent, Eq(kOctetCount))))); +} + +// Checks that the sender report stats count equals the number of sent RTCP SRs. +TEST_F(RtpRtcpImplTest, SenderReportStatsCount) { + using SenderReportStats = RtpRtcpInterface::SenderReportStats; + // Send a frame in order to send an SR. + SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + // Send the first SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), + Optional(Field(&SenderReportStats::reports_count, Eq(1u)))); + // Send the second SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), + Optional(Field(&SenderReportStats::reports_count, Eq(2u)))); +} + +// Checks that the sender report stats include a valid arrival time if an RTCP +// SR was sent. +TEST_F(RtpRtcpImplTest, SenderReportStatsArrivalTimestampSet) { + // Send a frame in order to send an SR. + SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + // Send an SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + auto stats = receiver_.impl_->GetSenderReportStats(); + ASSERT_THAT(stats, Not(Eq(absl::nullopt))); + EXPECT_TRUE(stats->last_arrival_timestamp.Valid()); +} + +// Checks that the packet and byte counters from an RTCP SR are not zero once +// a frame is sent. +TEST_F(RtpRtcpImplTest, SenderReportStatsPacketByteCounters) { + using SenderReportStats = RtpRtcpInterface::SenderReportStats; + // Send a frame in order to send an SR. + SendFrame(&sender_, sender_video_.get(), kBaseLayerTid); + ASSERT_THAT(sender_.transport_.rtp_packets_sent_, Gt(0)); + // Advance time otherwise the RTCP SR report will not include any packets + // generated by `SendFrame()`. + clock_.AdvanceTimeMilliseconds(1); + // Send an SR. + ASSERT_THAT(sender_.impl_->SendRTCP(kRtcpReport), Eq(0)); + EXPECT_THAT(receiver_.impl_->GetSenderReportStats(), + Optional(AllOf(Field(&SenderReportStats::packets_sent, Gt(0u)), + Field(&SenderReportStats::bytes_sent, Gt(0u))))); +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_rtcp_interface.h b/modules/rtp_rtcp/source/rtp_rtcp_interface.h new file mode 100644 index 0000000000..dd5744ec54 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_rtcp_interface.h @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_RTP_RTCP_INTERFACE_H_ +#define MODULES_RTP_RTCP_SOURCE_RTP_RTCP_INTERFACE_H_ + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/frame_transformer_interface.h" +#include "api/scoped_refptr.h" +#include "api/transport/webrtc_key_value_config.h" +#include "api/video/video_bitrate_allocation.h" +#include "modules/rtp_rtcp/include/receive_statistics.h" +#include "modules/rtp_rtcp/include/report_block_data.h" +#include "modules/rtp_rtcp/include/rtp_packet_sender.h" +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" +#include "modules/rtp_rtcp/source/video_fec_generator.h" +#include "rtc_base/constructor_magic.h" +#include "system_wrappers/include/ntp_time.h" + +namespace webrtc { + +// Forward declarations. +class FrameEncryptorInterface; +class RateLimiter; +class RemoteBitrateEstimator; +class RtcEventLog; +class RTPSender; +class Transport; +class VideoBitrateAllocationObserver; + +class RtpRtcpInterface : public RtcpFeedbackSenderInterface { + public: + struct Configuration { + Configuration() = default; + Configuration(Configuration&& rhs) = default; + + // True for a audio version of the RTP/RTCP module object false will create + // a video version. + bool audio = false; + bool receiver_only = false; + + // The clock to use to read time. If nullptr then system clock will be used. + Clock* clock = nullptr; + + ReceiveStatisticsProvider* receive_statistics = nullptr; + + // Transport object that will be called when packets are ready to be sent + // out on the network. + Transport* outgoing_transport = nullptr; + + // Called when the receiver requests an intra frame. + RtcpIntraFrameObserver* intra_frame_callback = nullptr; + + // Called when the receiver sends a loss notification. + RtcpLossNotificationObserver* rtcp_loss_notification_observer = nullptr; + + // Called when we receive a changed estimate from the receiver of out + // stream. + RtcpBandwidthObserver* bandwidth_callback = nullptr; + + NetworkStateEstimateObserver* network_state_estimate_observer = nullptr; + TransportFeedbackObserver* transport_feedback_callback = nullptr; + VideoBitrateAllocationObserver* bitrate_allocation_observer = nullptr; + RtcpRttStats* rtt_stats = nullptr; + RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer = nullptr; + // Called on receipt of RTCP report block from remote side. + // TODO(bugs.webrtc.org/10679): Consider whether we want to use + // only getters or only callbacks. If we decide on getters, the + // ReportBlockDataObserver should also be removed in favor of + // GetLatestReportBlockData(). + RtcpCnameCallback* rtcp_cname_callback = nullptr; + ReportBlockDataObserver* report_block_data_observer = nullptr; + + // Estimates the bandwidth available for a set of streams from the same + // client. + RemoteBitrateEstimator* remote_bitrate_estimator = nullptr; + + // Spread any bursts of packets into smaller bursts to minimize packet loss. + RtpPacketSender* paced_sender = nullptr; + + // Generates FEC packets. + // TODO(sprang): Wire up to RtpSenderEgress. + VideoFecGenerator* fec_generator = nullptr; + + BitrateStatisticsObserver* send_bitrate_observer = nullptr; + SendSideDelayObserver* send_side_delay_observer = nullptr; + RtcEventLog* event_log = nullptr; + SendPacketObserver* send_packet_observer = nullptr; + RateLimiter* retransmission_rate_limiter = nullptr; + StreamDataCountersCallback* rtp_stats_callback = nullptr; + + int rtcp_report_interval_ms = 0; + + // Update network2 instead of pacer_exit field of video timing extension. + bool populate_network2_timestamp = false; + + rtc::scoped_refptr frame_transformer; + + // E2EE Custom Video Frame Encryption + FrameEncryptorInterface* frame_encryptor = nullptr; + // Require all outgoing frames to be encrypted with a FrameEncryptor. + bool require_frame_encryption = false; + + // Corresponds to extmap-allow-mixed in SDP negotiation. + bool extmap_allow_mixed = false; + + // If true, the RTP sender will always annotate outgoing packets with + // MID and RID header extensions, if provided and negotiated. + // If false, the RTP sender will stop sending MID and RID header extensions, + // when it knows that the receiver is ready to demux based on SSRC. This is + // done by RTCP RR acking. + bool always_send_mid_and_rid = false; + + // If set, field trials are read from |field_trials|, otherwise + // defaults to webrtc::FieldTrialBasedConfig. + const WebRtcKeyValueConfig* field_trials = nullptr; + + // SSRCs for media and retransmission, respectively. + // FlexFec SSRC is fetched from |flexfec_sender|. + uint32_t local_media_ssrc = 0; + absl::optional rtx_send_ssrc; + + bool need_rtp_packet_infos = false; + + // If true, the RTP packet history will select RTX packets based on + // heuristics such as send time, retransmission count etc, in order to + // make padding potentially more useful. + // If false, the last packet will always be picked. This may reduce CPU + // overhead. + bool enable_rtx_padding_prioritization = true; + + // Estimate RTT as non-sender as described in + // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5 + bool non_sender_rtt_measurement = false; + + private: + RTC_DISALLOW_COPY_AND_ASSIGN(Configuration); + }; + + // Stats for RTCP sender reports (SR) for a specific SSRC. + // Refer to https://tools.ietf.org/html/rfc3550#section-6.4.1. + struct SenderReportStats { + // Arrival NPT timestamp for the last received RTCP SR. + NtpTime last_arrival_timestamp; + // Received (a.k.a., remote) NTP timestamp for the last received RTCP SR. + NtpTime last_remote_timestamp; + // Total number of RTP data packets transmitted by the sender since starting + // transmission up until the time this SR packet was generated. The count + // should be reset if the sender changes its SSRC identifier. + uint32_t packets_sent; + // Total number of payload octets (i.e., not including header or padding) + // transmitted in RTP data packets by the sender since starting transmission + // up until the time this SR packet was generated. The count should be reset + // if the sender changes its SSRC identifier. + uint64_t bytes_sent; + // Total number of RTCP SR blocks received. + // https://www.w3.org/TR/webrtc-stats/#dom-rtcremoteoutboundrtpstreamstats-reportssent. + uint64_t reports_count; + }; + + // ************************************************************************** + // Receiver functions + // ************************************************************************** + + virtual void IncomingRtcpPacket(const uint8_t* incoming_packet, + size_t incoming_packet_length) = 0; + + virtual void SetRemoteSSRC(uint32_t ssrc) = 0; + + // Called when the local ssrc changes (post initialization) for receive + // streams to match with send. Called on the packet receive thread/tq. + virtual void SetLocalSsrc(uint32_t ssrc) = 0; + + // ************************************************************************** + // Sender + // ************************************************************************** + + // Sets the maximum size of an RTP packet, including RTP headers. + virtual void SetMaxRtpPacketSize(size_t size) = 0; + + // Returns max RTP packet size. Takes into account RTP headers and + // FEC/ULP/RED overhead (when FEC is enabled). + virtual size_t MaxRtpPacketSize() const = 0; + + virtual void RegisterSendPayloadFrequency(int payload_type, + int payload_frequency) = 0; + + // Unregisters a send payload. + // |payload_type| - payload type of codec + // Returns -1 on failure else 0. + virtual int32_t DeRegisterSendPayload(int8_t payload_type) = 0; + + virtual void SetExtmapAllowMixed(bool extmap_allow_mixed) = 0; + + // Register extension by uri, triggers CHECK on falure. + virtual void RegisterRtpHeaderExtension(absl::string_view uri, int id) = 0; + + virtual int32_t DeregisterSendRtpHeaderExtension(RTPExtensionType type) = 0; + virtual void DeregisterSendRtpHeaderExtension(absl::string_view uri) = 0; + + // Returns true if RTP module is send media, and any of the extensions + // required for bandwidth estimation is registered. + virtual bool SupportsPadding() const = 0; + // Same as SupportsPadding(), but additionally requires that + // SetRtxSendStatus() has been called with the kRtxRedundantPayloads option + // enabled. + virtual bool SupportsRtxPayloadPadding() const = 0; + + // Returns start timestamp. + virtual uint32_t StartTimestamp() const = 0; + + // Sets start timestamp. Start timestamp is set to a random value if this + // function is never called. + virtual void SetStartTimestamp(uint32_t timestamp) = 0; + + // Returns SequenceNumber. + virtual uint16_t SequenceNumber() const = 0; + + // Sets SequenceNumber, default is a random number. + virtual void SetSequenceNumber(uint16_t seq) = 0; + + virtual void SetRtpState(const RtpState& rtp_state) = 0; + virtual void SetRtxState(const RtpState& rtp_state) = 0; + virtual RtpState GetRtpState() const = 0; + virtual RtpState GetRtxState() const = 0; + + // Returns SSRC. + virtual uint32_t SSRC() const = 0; + + // Sets the value for sending in the RID (and Repaired) RTP header extension. + // RIDs are used to identify an RTP stream if SSRCs are not negotiated. + // If the RID and Repaired RID extensions are not registered, the RID will + // not be sent. + virtual void SetRid(const std::string& rid) = 0; + + // Sets the value for sending in the MID RTP header extension. + // The MID RTP header extension should be registered for this to do anything. + // Once set, this value can not be changed or removed. + virtual void SetMid(const std::string& mid) = 0; + + // Sets CSRC. + // |csrcs| - vector of CSRCs + virtual void SetCsrcs(const std::vector& csrcs) = 0; + + // Turns on/off sending RTX (RFC 4588). The modes can be set as a combination + // of values of the enumerator RtxMode. + virtual void SetRtxSendStatus(int modes) = 0; + + // Returns status of sending RTX (RFC 4588). The returned value can be + // a combination of values of the enumerator RtxMode. + virtual int RtxSendStatus() const = 0; + + // Returns the SSRC used for RTX if set, otherwise a nullopt. + virtual absl::optional RtxSsrc() const = 0; + + // Sets the payload type to use when sending RTX packets. Note that this + // doesn't enable RTX, only the payload type is set. + virtual void SetRtxSendPayloadType(int payload_type, + int associated_payload_type) = 0; + + // Returns the FlexFEC SSRC, if there is one. + virtual absl::optional FlexfecSsrc() const = 0; + + // Sets sending status. Sends kRtcpByeCode when going from true to false. + // Returns -1 on failure else 0. + virtual int32_t SetSendingStatus(bool sending) = 0; + + // Returns current sending status. + virtual bool Sending() const = 0; + + // Starts/Stops media packets. On by default. + virtual void SetSendingMediaStatus(bool sending) = 0; + + // Returns current media sending status. + virtual bool SendingMedia() const = 0; + + // Returns whether audio is configured (i.e. Configuration::audio = true). + virtual bool IsAudioConfigured() const = 0; + + // Indicate that the packets sent by this module should be counted towards the + // bitrate estimate since the stream participates in the bitrate allocation. + virtual void SetAsPartOfAllocation(bool part_of_allocation) = 0; + + // Returns bitrate sent (post-pacing) per packet type. + virtual RtpSendRates GetSendRates() const = 0; + + virtual RTPSender* RtpSender() = 0; + virtual const RTPSender* RtpSender() const = 0; + + // Record that a frame is about to be sent. Returns true on success, and false + // if the module isn't ready to send. + virtual bool OnSendingRtpFrame(uint32_t timestamp, + int64_t capture_time_ms, + int payload_type, + bool force_sender_report) = 0; + + // Try to send the provided packet. Returns true iff packet matches any of + // the SSRCs for this module (media/rtx/fec etc) and was forwarded to the + // transport. + virtual bool TrySendPacket(RtpPacketToSend* packet, + const PacedPacketInfo& pacing_info) = 0; + + // Update the FEC protection parameters to use for delta- and key-frames. + // Only used when deferred FEC is active. + virtual void SetFecProtectionParams( + const FecProtectionParams& delta_params, + const FecProtectionParams& key_params) = 0; + + // If deferred FEC generation is enabled, this method should be called after + // calling TrySendPacket(). Any generated FEC packets will be removed and + // returned from the FEC generator. + virtual std::vector> FetchFecPackets() = 0; + + virtual void OnPacketsAcknowledged( + rtc::ArrayView sequence_numbers) = 0; + + virtual std::vector> GeneratePadding( + size_t target_size_bytes) = 0; + + virtual std::vector GetSentRtpPacketInfos( + rtc::ArrayView sequence_numbers) const = 0; + + // Returns an expected per packet overhead representing the main RTP header, + // any CSRCs, and the registered header extensions that are expected on all + // packets (i.e. disregarding things like abs capture time which is only + // populated on a subset of packets, but counting MID/RID type extensions + // when we expect to send them). + virtual size_t ExpectedPerPacketOverhead() const = 0; + + // ************************************************************************** + // RTCP + // ************************************************************************** + + // Returns RTCP status. + virtual RtcpMode RTCP() const = 0; + + // Sets RTCP status i.e on(compound or non-compound)/off. + // |method| - RTCP method to use. + virtual void SetRTCPStatus(RtcpMode method) = 0; + + // Sets RTCP CName (i.e unique identifier). + // Returns -1 on failure else 0. + virtual int32_t SetCNAME(const char* cname) = 0; + + // Returns remote NTP. + // Returns -1 on failure else 0. + virtual int32_t RemoteNTP(uint32_t* received_ntp_secs, + uint32_t* received_ntp_frac, + uint32_t* rtcp_arrival_time_secs, + uint32_t* rtcp_arrival_time_frac, + uint32_t* rtcp_timestamp) const = 0; + + // Returns current RTT (round-trip time) estimate. + // Returns -1 on failure else 0. + virtual int32_t RTT(uint32_t remote_ssrc, + int64_t* rtt, + int64_t* avg_rtt, + int64_t* min_rtt, + int64_t* max_rtt) const = 0; + + // Returns the estimated RTT, with fallback to a default value. + virtual int64_t ExpectedRetransmissionTimeMs() const = 0; + + // Forces a send of a RTCP packet. Periodic SR and RR are triggered via the + // process function. + // Returns -1 on failure else 0. + virtual int32_t SendRTCP(RTCPPacketType rtcp_packet_type) = 0; + + // Returns send statistics for the RTP and RTX stream. + virtual void GetSendStreamDataCounters( + StreamDataCounters* rtp_counters, + StreamDataCounters* rtx_counters) const = 0; + + // A snapshot of Report Blocks with additional data of interest to statistics. + // Within this list, the sender-source SSRC pair is unique and per-pair the + // ReportBlockData represents the latest Report Block that was received for + // that pair. + virtual std::vector GetLatestReportBlockData() const = 0; + // Returns stats based on the received RTCP SRs. + virtual absl::optional GetSenderReportStats() const = 0; + + // (REMB) Receiver Estimated Max Bitrate. + // Schedules sending REMB on next and following sender/receiver reports. + void SetRemb(int64_t bitrate_bps, std::vector ssrcs) override = 0; + // Stops sending REMB on next and following sender/receiver reports. + void UnsetRemb() override = 0; + + // (NACK) + + // Sends a Negative acknowledgement packet. + // Returns -1 on failure else 0. + // TODO(philipel): Deprecate this and start using SendNack instead, mostly + // because we want a function that actually send NACK for the specified + // packets. + virtual int32_t SendNACK(const uint16_t* nack_list, uint16_t size) = 0; + + // Sends NACK for the packets specified. + // Note: This assumes the caller keeps track of timing and doesn't rely on + // the RTP module to do this. + virtual void SendNack(const std::vector& sequence_numbers) = 0; + + // Store the sent packets, needed to answer to a Negative acknowledgment + // requests. + virtual void SetStorePacketsStatus(bool enable, uint16_t numberToStore) = 0; + + virtual void SetVideoBitrateAllocation( + const VideoBitrateAllocation& bitrate) = 0; + + // ************************************************************************** + // Video + // ************************************************************************** + + // Requests new key frame. + // using PLI, https://tools.ietf.org/html/rfc4585#section-6.3.1.1 + void SendPictureLossIndication() { SendRTCP(kRtcpPli); } + // using FIR, https://tools.ietf.org/html/rfc5104#section-4.3.1.2 + void SendFullIntraRequest() { SendRTCP(kRtcpFir); } + + // Sends a LossNotification RTCP message. + // Returns -1 on failure else 0. + virtual int32_t SendLossNotification(uint16_t last_decoded_seq_num, + uint16_t last_received_seq_num, + bool decodability_flag, + bool buffering_allowed) = 0; +}; + +} // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_RTP_RTCP_INTERFACE_H_ diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc index 3023e59557..80c319f4f2 100644 --- a/modules/rtp_rtcp/source/rtp_sender.cc +++ b/modules/rtp_rtcp/source/rtp_sender.cc @@ -42,7 +42,6 @@ constexpr size_t kMaxPaddingLength = 224; constexpr size_t kMinAudioPaddingLength = 50; constexpr size_t kRtpHeaderLength = 12; constexpr uint16_t kMaxInitRtpSeqNumber = 32767; // 2^15 -1. -constexpr uint32_t kTimestampTicksPerMs = 90; // Min size needed to get payload padding from packet history. constexpr int kMinPayloadPaddingBytes = 50; @@ -105,10 +104,10 @@ bool IsNonVolatile(RTPExtensionType type) { switch (type) { case kRtpExtensionTransmissionTimeOffset: case kRtpExtensionAudioLevel: + case kRtpExtensionCsrcAudioLevel: case kRtpExtensionAbsoluteSendTime: case kRtpExtensionTransportSequenceNumber: case kRtpExtensionTransportSequenceNumber02: - case kRtpExtensionFrameMarking: case kRtpExtensionRtpStreamId: case kRtpExtensionMid: case kRtpExtensionGenericFrameDescriptor00: @@ -119,15 +118,18 @@ bool IsNonVolatile(RTPExtensionType type) { case kRtpExtensionVideoRotation: case kRtpExtensionPlayoutDelay: case kRtpExtensionVideoContentType: + case kRtpExtensionVideoLayersAllocation: case kRtpExtensionVideoTiming: case kRtpExtensionRepairedRtpStreamId: case kRtpExtensionColorSpace: + case kRtpExtensionVideoFrameTrackingId: return false; case kRtpExtensionNone: case kRtpExtensionNumberOfExtensions: RTC_NOTREACHED(); return false; } + RTC_CHECK_NOTREACHED(); } bool HasBweExtension(const RtpHeaderExtensionMap& extensions_map) { @@ -154,7 +156,7 @@ double GetMaxPaddingSizeFactor(const WebRtcKeyValueConfig* field_trials) { } // namespace -RTPSender::RTPSender(const RtpRtcp::Configuration& config, +RTPSender::RTPSender(const RtpRtcpInterface::Configuration& config, RtpPacketHistory* packet_history, RtpPacketSender* packet_sender) : clock_(config.clock), @@ -169,28 +171,25 @@ RTPSender::RTPSender(const RtpRtcp::Configuration& config, paced_sender_(packet_sender), sending_media_(true), // Default to sending media. max_packet_size_(IP_PACKET_SIZE - 28), // Default is IP-v4/UDP. - last_payload_type_(-1), rtp_header_extension_map_(config.extmap_allow_mixed), - max_media_packet_header_(kRtpHeaderSize), - max_padding_fec_packet_header_(kRtpHeaderSize), // RTP variables - sequence_number_forced_(false), + sequencer_(config.local_media_ssrc, + config.rtx_send_ssrc.value_or(config.local_media_ssrc), + /*require_marker_before_media_padding_=*/!config.audio, + config.clock), always_send_mid_and_rid_(config.always_send_mid_and_rid), ssrc_has_acked_(false), rtx_ssrc_has_acked_(false), - last_rtp_timestamp_(0), - capture_time_ms_(0), - last_timestamp_time_ms_(0), - last_packet_marker_bit_(false), csrcs_(), rtx_(kRtxOff), supports_bwe_extension_(false), retransmission_rate_limiter_(config.retransmission_rate_limiter) { + UpdateHeaderSizes(); // This random initialization is not intended to be cryptographic strong. timestamp_offset_ = random_.Rand(); // Random start, 16 bits. Can't be 0. - sequence_number_rtx_ = random_.Rand(1, kMaxInitRtpSeqNumber); - sequence_number_ = random_.Rand(1, kMaxInitRtpSeqNumber); + sequencer_.set_rtx_sequence_number(random_.Rand(1, kMaxInitRtpSeqNumber)); + sequencer_.set_media_sequence_number(random_.Rand(1, kMaxInitRtpSeqNumber)); RTC_DCHECK(paced_sender_); RTC_DCHECK(packet_history_); @@ -224,21 +223,12 @@ rtc::ArrayView RTPSender::AudioExtensionSizes() { } void RTPSender::SetExtmapAllowMixed(bool extmap_allow_mixed) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); rtp_header_extension_map_.SetExtmapAllowMixed(extmap_allow_mixed); } -int32_t RTPSender::RegisterRtpHeaderExtension(RTPExtensionType type, - uint8_t id) { - rtc::CritScope lock(&send_critsect_); - bool registered = rtp_header_extension_map_.RegisterByType(id, type); - supports_bwe_extension_ = HasBweExtension(rtp_header_extension_map_); - UpdateHeaderSizes(); - return registered ? 0 : -1; -} - bool RTPSender::RegisterRtpHeaderExtension(absl::string_view uri, int id) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); bool registered = rtp_header_extension_map_.RegisterByUri(id, uri); supports_bwe_extension_ = HasBweExtension(rtp_header_extension_map_); UpdateHeaderSizes(); @@ -246,12 +236,12 @@ bool RTPSender::RegisterRtpHeaderExtension(absl::string_view uri, int id) { } bool RTPSender::IsRtpHeaderExtensionRegistered(RTPExtensionType type) const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return rtp_header_extension_map_.IsRegistered(type); } int32_t RTPSender::DeregisterRtpHeaderExtension(RTPExtensionType type) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); rtp_header_extension_map_.Deregister(type); supports_bwe_extension_ = HasBweExtension(rtp_header_extension_map_); UpdateHeaderSizes(); @@ -259,7 +249,7 @@ int32_t RTPSender::DeregisterRtpHeaderExtension(RTPExtensionType type) { } void RTPSender::DeregisterRtpHeaderExtension(absl::string_view uri) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); rtp_header_extension_map_.Deregister(uri); supports_bwe_extension_ = HasBweExtension(rtp_header_extension_map_); UpdateHeaderSizes(); @@ -268,7 +258,7 @@ void RTPSender::DeregisterRtpHeaderExtension(absl::string_view uri) { void RTPSender::SetMaxRtpPacketSize(size_t max_packet_size) { RTC_DCHECK_GE(max_packet_size, 100); RTC_DCHECK_LE(max_packet_size, IP_PACKET_SIZE); - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); max_packet_size_ = max_packet_size; } @@ -277,18 +267,18 @@ size_t RTPSender::MaxRtpPacketSize() const { } void RTPSender::SetRtxStatus(int mode) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); rtx_ = mode; } int RTPSender::RtxStatus() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return rtx_; } void RTPSender::SetRtxPayloadType(int payload_type, int associated_payload_type) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); RTC_DCHECK_LE(payload_type, 127); RTC_DCHECK_LE(associated_payload_type, 127); if (payload_type < 0) { @@ -339,6 +329,7 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id) { return -1; } packet->set_packet_type(RtpPacketMediaType::kRetransmission); + packet->set_fec_protect_packet(false); std::vector> packets; packets.emplace_back(std::move(packet)); paced_sender_->EnqueuePackets(std::move(packets)); @@ -347,7 +338,7 @@ int32_t RTPSender::ReSendPacket(uint16_t packet_id) { } void RTPSender::OnReceivedAckOnSsrc(int64_t extended_highest_sequence_number) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); bool update_required = !ssrc_has_acked_; ssrc_has_acked_ = true; if (update_required) { @@ -357,8 +348,12 @@ void RTPSender::OnReceivedAckOnSsrc(int64_t extended_highest_sequence_number) { void RTPSender::OnReceivedAckOnRtxSsrc( int64_t extended_highest_sequence_number) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); + bool update_required = !rtx_ssrc_has_acked_; rtx_ssrc_has_acked_ = true; + if (update_required) { + UpdateHeaderSizes(); + } } void RTPSender::OnReceivedNack( @@ -377,12 +372,12 @@ void RTPSender::OnReceivedNack( } bool RTPSender::SupportsPadding() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return sending_media_ && supports_bwe_extension_; } bool RTPSender::SupportsRtxPayloadPadding() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return sending_media_ && supports_bwe_extension_ && (rtx_ & kRtxRedundantPayloads); } @@ -424,14 +419,14 @@ std::vector> RTPSender::GeneratePadding( } } - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); if (!sending_media_) { return {}; } size_t padding_bytes_in_packet; const size_t max_payload_size = - max_packet_size_ - FecOrPaddingPacketMaxRtpHeaderLength(); + max_packet_size_ - max_padding_fec_packet_header_; if (audio_configured_) { // Allow smaller padding packets for audio. padding_bytes_in_packet = rtc::SafeClamp( @@ -450,23 +445,11 @@ std::vector> RTPSender::GeneratePadding( std::make_unique(&rtp_header_extension_map_); padding_packet->set_packet_type(RtpPacketMediaType::kPadding); padding_packet->SetMarker(false); - padding_packet->SetTimestamp(last_rtp_timestamp_); - padding_packet->set_capture_time_ms(capture_time_ms_); if (rtx_ == kRtxOff) { - if (last_payload_type_ == -1) { - break; - } - // Without RTX we can't send padding in the middle of frames. - // For audio marker bits doesn't mark the end of a frame and frames - // are usually a single packet, so for now we don't apply this rule - // for audio. - if (!audio_configured_ && !last_packet_marker_bit_) { + padding_packet->SetSsrc(ssrc_); + if (!sequencer_.Sequence(*padding_packet)) { break; } - - padding_packet->SetSsrc(ssrc_); - padding_packet->SetPayloadType(last_payload_type_); - padding_packet->SetSequenceNumber(sequence_number_++); } else { // Without abs-send-time or transport sequence number a media packet // must be sent before padding so that the timestamps used for @@ -477,21 +460,13 @@ std::vector> RTPSender::GeneratePadding( TransportSequenceNumber::kId))) { break; } - // Only change the timestamp of padding packets sent over RTX. - // Padding only packets over RTP has to be sent as part of a media - // frame (and therefore the same timestamp). - int64_t now_ms = clock_->TimeInMilliseconds(); - if (last_timestamp_time_ms_ > 0) { - padding_packet->SetTimestamp(padding_packet->Timestamp() + - (now_ms - last_timestamp_time_ms_) * - kTimestampTicksPerMs); - padding_packet->set_capture_time_ms(padding_packet->capture_time_ms() + - (now_ms - last_timestamp_time_ms_)); - } + RTC_DCHECK(rtx_ssrc_); padding_packet->SetSsrc(*rtx_ssrc_); - padding_packet->SetSequenceNumber(sequence_number_rtx_++); padding_packet->SetPayloadType(rtx_payload_type_map_.begin()->second); + if (!sequencer_.Sequence(*padding_packet)) { + break; + } } if (rtp_header_extension_map_.IsRegistered(TransportSequenceNumber::kId)) { @@ -547,24 +522,17 @@ void RTPSender::EnqueuePackets( } size_t RTPSender::FecOrPaddingPacketMaxRtpHeaderLength() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return max_padding_fec_packet_header_; } size_t RTPSender::ExpectedPerPacketOverhead() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return max_media_packet_header_; } -uint16_t RTPSender::AllocateSequenceNumber(uint16_t packets_to_send) { - rtc::CritScope lock(&send_critsect_); - uint16_t first_allocated_sequence_number = sequence_number_; - sequence_number_ += packets_to_send; - return first_allocated_sequence_number; -} - std::unique_ptr RTPSender::AllocatePacket() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); // TODO(danilchap): Find better motivator and value for extra capacity. // RtpPacketizer might slightly miscalulate needed size, // SRTP may benefit from extra space in the buffer and do encryption in place @@ -606,31 +574,31 @@ std::unique_ptr RTPSender::AllocatePacket() const { } bool RTPSender::AssignSequenceNumber(RtpPacketToSend* packet) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); if (!sending_media_) return false; - RTC_DCHECK(packet->Ssrc() == ssrc_); - packet->SetSequenceNumber(sequence_number_++); - - // Remember marker bit to determine if padding can be inserted with - // sequence number following |packet|. - last_packet_marker_bit_ = packet->Marker(); - // Remember payload type to use in the padding packet if rtx is disabled. - last_payload_type_ = packet->PayloadType(); - // Save timestamps to generate timestamp field and extensions for the padding. - last_rtp_timestamp_ = packet->Timestamp(); - last_timestamp_time_ms_ = clock_->TimeInMilliseconds(); - capture_time_ms_ = packet->capture_time_ms(); + return sequencer_.Sequence(*packet); +} + +bool RTPSender::AssignSequenceNumbersAndStoreLastPacketState( + rtc::ArrayView> packets) { + RTC_DCHECK(!packets.empty()); + MutexLock lock(&send_mutex_); + if (!sending_media_) + return false; + for (auto& packet : packets) { + sequencer_.Sequence(*packet); + } return true; } void RTPSender::SetSendingMediaStatus(bool enabled) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); sending_media_ = enabled; } bool RTPSender::SendingMedia() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return sending_media_; } @@ -639,18 +607,18 @@ bool RTPSender::IsAudioConfigured() const { } void RTPSender::SetTimestampOffset(uint32_t timestamp) { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); timestamp_offset_ = timestamp; } uint32_t RTPSender::TimestampOffset() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); return timestamp_offset_; } void RTPSender::SetRid(const std::string& rid) { // RID is used in simulcast scenario when multiple layers share the same mid. - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); RTC_DCHECK_LE(rid.length(), RtpStreamId::kMaxValueSizeBytes); rid_ = rid; UpdateHeaderSizes(); @@ -658,7 +626,7 @@ void RTPSender::SetRid(const std::string& rid) { void RTPSender::SetMid(const std::string& mid) { // This is configured via the API. - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); RTC_DCHECK_LE(mid.length(), RtpMid::kMaxValueSizeBytes); mid_ = mid; UpdateHeaderSizes(); @@ -666,7 +634,7 @@ void RTPSender::SetMid(const std::string& mid) { void RTPSender::SetCsrcs(const std::vector& csrcs) { RTC_DCHECK_LE(csrcs.size(), kRtpCsrcSize); - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); csrcs_ = csrcs; UpdateHeaderSizes(); } @@ -674,12 +642,11 @@ void RTPSender::SetCsrcs(const std::vector& csrcs) { void RTPSender::SetSequenceNumber(uint16_t seq) { bool updated_sequence_number = false; { - rtc::CritScope lock(&send_critsect_); - sequence_number_forced_ = true; - if (sequence_number_ != seq) { + MutexLock lock(&send_mutex_); + if (sequencer_.media_sequence_number() != seq) { updated_sequence_number = true; } - sequence_number_ = seq; + sequencer_.set_media_sequence_number(seq); } if (updated_sequence_number) { @@ -690,8 +657,8 @@ void RTPSender::SetSequenceNumber(uint16_t seq) { } uint16_t RTPSender::SequenceNumber() const { - rtc::CritScope lock(&send_critsect_); - return sequence_number_; + MutexLock lock(&send_mutex_); + return sequencer_.media_sequence_number(); } static void CopyHeaderAndExtensionsToRtxPacket(const RtpPacketToSend& packet, @@ -748,7 +715,7 @@ std::unique_ptr RTPSender::BuildRtxPacket( // Add original RTP header. { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); if (!sending_media_) return nullptr; @@ -764,12 +731,12 @@ std::unique_ptr RTPSender::BuildRtxPacket( rtx_packet->SetPayloadType(kv->second); - // Replace sequence number. - rtx_packet->SetSequenceNumber(sequence_number_rtx_++); - // Replace SSRC. rtx_packet->SetSsrc(*rtx_ssrc_); + // Replace sequence number. + sequencer_.Sequence(*rtx_packet); + CopyHeaderAndExtensionsToRtxPacket(packet, rtx_packet.get()); // RTX packets are sent on an SSRC different from the main media, so the @@ -804,8 +771,8 @@ std::unique_ptr RTPSender::BuildRtxPacket( auto payload = packet.payload(); memcpy(rtx_payload + kRtxHeaderSize, payload.data(), payload.size()); - // Add original application data. - rtx_packet->set_application_data(packet.application_data()); + // Add original additional data. + rtx_packet->set_additional_data(packet.additional_data()); // Copy capture time so e.g. TransmissionOffset is correctly set. rtx_packet->set_capture_time_ms(packet.capture_time_ms()); @@ -814,52 +781,41 @@ std::unique_ptr RTPSender::BuildRtxPacket( } void RTPSender::SetRtpState(const RtpState& rtp_state) { - rtc::CritScope lock(&send_critsect_); - sequence_number_ = rtp_state.sequence_number; - sequence_number_forced_ = true; + MutexLock lock(&send_mutex_); + timestamp_offset_ = rtp_state.start_timestamp; - last_rtp_timestamp_ = rtp_state.timestamp; - capture_time_ms_ = rtp_state.capture_time_ms; - last_timestamp_time_ms_ = rtp_state.last_timestamp_time_ms; + sequencer_.SetRtpState(rtp_state); ssrc_has_acked_ = rtp_state.ssrc_has_acked; UpdateHeaderSizes(); } RtpState RTPSender::GetRtpState() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); RtpState state; - state.sequence_number = sequence_number_; state.start_timestamp = timestamp_offset_; - state.timestamp = last_rtp_timestamp_; - state.capture_time_ms = capture_time_ms_; - state.last_timestamp_time_ms = last_timestamp_time_ms_; state.ssrc_has_acked = ssrc_has_acked_; + sequencer_.PupulateRtpState(state); return state; } void RTPSender::SetRtxRtpState(const RtpState& rtp_state) { - rtc::CritScope lock(&send_critsect_); - sequence_number_rtx_ = rtp_state.sequence_number; + MutexLock lock(&send_mutex_); + sequencer_.set_rtx_sequence_number(rtp_state.sequence_number); rtx_ssrc_has_acked_ = rtp_state.ssrc_has_acked; } RtpState RTPSender::GetRtxRtpState() const { - rtc::CritScope lock(&send_critsect_); + MutexLock lock(&send_mutex_); RtpState state; - state.sequence_number = sequence_number_rtx_; + state.sequence_number = sequencer_.rtx_sequence_number(); state.start_timestamp = timestamp_offset_; state.ssrc_has_acked = rtx_ssrc_has_acked_; return state; } -int64_t RTPSender::LastTimestampTimeMs() const { - rtc::CritScope lock(&send_critsect_); - return last_timestamp_time_ms_; -} - void RTPSender::UpdateHeaderSizes() { const size_t rtp_header_length = kRtpHeaderLength + sizeof(uint32_t) * csrcs_.size(); @@ -869,10 +825,12 @@ void RTPSender::UpdateHeaderSizes() { rtp_header_extension_map_); // RtpStreamId and Mid are treated specially in that we check if they - // currently are being sent. RepairedRtpStreamId is still ignored since we - // assume RTX will not make up large enough bitrate to treat overhead - // differently. - const bool send_mid_rid = always_send_mid_and_rid_ || !ssrc_has_acked_; + // currently are being sent. RepairedRtpStreamId is ignored because it is sent + // instead of RtpStreamId on rtx packets and require the same size. + const bool send_mid_rid_on_rtx = + rtx_ssrc_.has_value() && !rtx_ssrc_has_acked_; + const bool send_mid_rid = + always_send_mid_and_rid_ || !ssrc_has_acked_ || send_mid_rid_on_rtx; std::vector non_volatile_extensions; for (auto& extension : audio_configured_ ? AudioExtensionSizes() : VideoExtensionSizes()) { @@ -896,5 +854,9 @@ void RTPSender::UpdateHeaderSizes() { max_media_packet_header_ = rtp_header_length + RtpHeaderExtensionSize(non_volatile_extensions, rtp_header_extension_map_); + // Reserve extra bytes if packet might be resent in an rtx packet. + if (rtx_ssrc_.has_value()) { + max_media_packet_header_ += kRtxHeaderSize; + } } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender.h b/modules/rtp_rtcp/source/rtp_sender.h index a14c3ae1a8..fbf135049c 100644 --- a/modules/rtp_rtcp/source/rtp_sender.h +++ b/modules/rtp_rtcp/source/rtp_sender.h @@ -25,15 +25,14 @@ #include "modules/rtp_rtcp/include/flexfec_sender.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/include/rtp_packet_sender.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/rtp_rtcp/source/packet_sequencer.h" #include "modules/rtp_rtcp/source/rtp_packet_history.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/deprecation.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/random.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -45,105 +44,133 @@ class RtpPacketToSend; class RTPSender { public: - RTPSender(const RtpRtcp::Configuration& config, + RTPSender(const RtpRtcpInterface::Configuration& config, RtpPacketHistory* packet_history, RtpPacketSender* packet_sender); + RTPSender() = delete; + RTPSender(const RTPSender&) = delete; + RTPSender& operator=(const RTPSender&) = delete; + ~RTPSender(); - void SetSendingMediaStatus(bool enabled); - bool SendingMedia() const; - bool IsAudioConfigured() const; + void SetSendingMediaStatus(bool enabled) RTC_LOCKS_EXCLUDED(send_mutex_); + bool SendingMedia() const RTC_LOCKS_EXCLUDED(send_mutex_); + bool IsAudioConfigured() const RTC_LOCKS_EXCLUDED(send_mutex_); - uint32_t TimestampOffset() const; - void SetTimestampOffset(uint32_t timestamp); + uint32_t TimestampOffset() const RTC_LOCKS_EXCLUDED(send_mutex_); + void SetTimestampOffset(uint32_t timestamp) RTC_LOCKS_EXCLUDED(send_mutex_); - void SetRid(const std::string& rid); + void SetRid(const std::string& rid) RTC_LOCKS_EXCLUDED(send_mutex_); - void SetMid(const std::string& mid); + void SetMid(const std::string& mid) RTC_LOCKS_EXCLUDED(send_mutex_); - uint16_t SequenceNumber() const; - void SetSequenceNumber(uint16_t seq); + uint16_t SequenceNumber() const RTC_LOCKS_EXCLUDED(send_mutex_); + void SetSequenceNumber(uint16_t seq) RTC_LOCKS_EXCLUDED(send_mutex_); - void SetCsrcs(const std::vector& csrcs); + void SetCsrcs(const std::vector& csrcs) + RTC_LOCKS_EXCLUDED(send_mutex_); - void SetMaxRtpPacketSize(size_t max_packet_size); + void SetMaxRtpPacketSize(size_t max_packet_size) + RTC_LOCKS_EXCLUDED(send_mutex_); - void SetExtmapAllowMixed(bool extmap_allow_mixed); + void SetExtmapAllowMixed(bool extmap_allow_mixed) + RTC_LOCKS_EXCLUDED(send_mutex_); // RTP header extension - int32_t RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id); - bool RegisterRtpHeaderExtension(absl::string_view uri, int id); - bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) const; - int32_t DeregisterRtpHeaderExtension(RTPExtensionType type); - void DeregisterRtpHeaderExtension(absl::string_view uri); - - bool SupportsPadding() const; - bool SupportsRtxPayloadPadding() const; + bool RegisterRtpHeaderExtension(absl::string_view uri, int id) + RTC_LOCKS_EXCLUDED(send_mutex_); + bool IsRtpHeaderExtensionRegistered(RTPExtensionType type) const + RTC_LOCKS_EXCLUDED(send_mutex_); + int32_t DeregisterRtpHeaderExtension(RTPExtensionType type) + RTC_LOCKS_EXCLUDED(send_mutex_); + void DeregisterRtpHeaderExtension(absl::string_view uri) + RTC_LOCKS_EXCLUDED(send_mutex_); + + bool SupportsPadding() const RTC_LOCKS_EXCLUDED(send_mutex_); + bool SupportsRtxPayloadPadding() const RTC_LOCKS_EXCLUDED(send_mutex_); std::vector> GeneratePadding( size_t target_size_bytes, - bool media_has_been_sent); + bool media_has_been_sent) RTC_LOCKS_EXCLUDED(send_mutex_); // NACK. void OnReceivedNack(const std::vector& nack_sequence_numbers, - int64_t avg_rtt); + int64_t avg_rtt) RTC_LOCKS_EXCLUDED(send_mutex_); - int32_t ReSendPacket(uint16_t packet_id); + int32_t ReSendPacket(uint16_t packet_id) RTC_LOCKS_EXCLUDED(send_mutex_); // ACK. - void OnReceivedAckOnSsrc(int64_t extended_highest_sequence_number); - void OnReceivedAckOnRtxSsrc(int64_t extended_highest_sequence_number); + void OnReceivedAckOnSsrc(int64_t extended_highest_sequence_number) + RTC_LOCKS_EXCLUDED(send_mutex_); + void OnReceivedAckOnRtxSsrc(int64_t extended_highest_sequence_number) + RTC_LOCKS_EXCLUDED(send_mutex_); // RTX. - void SetRtxStatus(int mode); - int RtxStatus() const; - absl::optional RtxSsrc() const { return rtx_ssrc_; } + void SetRtxStatus(int mode) RTC_LOCKS_EXCLUDED(send_mutex_); + int RtxStatus() const RTC_LOCKS_EXCLUDED(send_mutex_); + absl::optional RtxSsrc() const RTC_LOCKS_EXCLUDED(send_mutex_) { + return rtx_ssrc_; + } - void SetRtxPayloadType(int payload_type, int associated_payload_type); + void SetRtxPayloadType(int payload_type, int associated_payload_type) + RTC_LOCKS_EXCLUDED(send_mutex_); // Size info for header extensions used by FEC packets. - static rtc::ArrayView FecExtensionSizes(); + static rtc::ArrayView FecExtensionSizes() + RTC_LOCKS_EXCLUDED(send_mutex_); // Size info for header extensions used by video packets. - static rtc::ArrayView VideoExtensionSizes(); + static rtc::ArrayView VideoExtensionSizes() + RTC_LOCKS_EXCLUDED(send_mutex_); // Size info for header extensions used by audio packets. - static rtc::ArrayView AudioExtensionSizes(); + static rtc::ArrayView AudioExtensionSizes() + RTC_LOCKS_EXCLUDED(send_mutex_); // Create empty packet, fills ssrc, csrcs and reserve place for header // extensions RtpSender updates before sending. - std::unique_ptr AllocatePacket() const; + std::unique_ptr AllocatePacket() const + RTC_LOCKS_EXCLUDED(send_mutex_); // Allocate sequence number for provided packet. // Save packet's fields to generate padding that doesn't break media stream. // Return false if sending was turned off. - bool AssignSequenceNumber(RtpPacketToSend* packet); + bool AssignSequenceNumber(RtpPacketToSend* packet) + RTC_LOCKS_EXCLUDED(send_mutex_); + // Same as AssignSequenceNumber(), but applies sequence numbers atomically to + // a batch of packets. + bool AssignSequenceNumbersAndStoreLastPacketState( + rtc::ArrayView> packets) + RTC_LOCKS_EXCLUDED(send_mutex_); // Maximum header overhead per fec/padding packet. - size_t FecOrPaddingPacketMaxRtpHeaderLength() const; + size_t FecOrPaddingPacketMaxRtpHeaderLength() const + RTC_LOCKS_EXCLUDED(send_mutex_); // Expected header overhead per media packet. - size_t ExpectedPerPacketOverhead() const; - uint16_t AllocateSequenceNumber(uint16_t packets_to_send); + size_t ExpectedPerPacketOverhead() const RTC_LOCKS_EXCLUDED(send_mutex_); // Including RTP headers. - size_t MaxRtpPacketSize() const; + size_t MaxRtpPacketSize() const RTC_LOCKS_EXCLUDED(send_mutex_); - uint32_t SSRC() const { return ssrc_; } + uint32_t SSRC() const RTC_LOCKS_EXCLUDED(send_mutex_) { return ssrc_; } - absl::optional FlexfecSsrc() const { return flexfec_ssrc_; } + absl::optional FlexfecSsrc() const RTC_LOCKS_EXCLUDED(send_mutex_) { + return flexfec_ssrc_; + } // Sends packet to |transport_| or to the pacer, depending on configuration. // TODO(bugs.webrtc.org/XXX): Remove in favor of EnqueuePackets(). - bool SendToNetwork(std::unique_ptr packet); + bool SendToNetwork(std::unique_ptr packet) + RTC_LOCKS_EXCLUDED(send_mutex_); // Pass a set of packets to RtpPacketSender instance, for paced or immediate // sending to the network. - void EnqueuePackets(std::vector> packets); - - void SetRtpState(const RtpState& rtp_state); - RtpState GetRtpState() const; - void SetRtxRtpState(const RtpState& rtp_state); - RtpState GetRtxRtpState() const; + void EnqueuePackets(std::vector> packets) + RTC_LOCKS_EXCLUDED(send_mutex_); - int64_t LastTimestampTimeMs() const; + void SetRtpState(const RtpState& rtp_state) RTC_LOCKS_EXCLUDED(send_mutex_); + RtpState GetRtpState() const RTC_LOCKS_EXCLUDED(send_mutex_); + void SetRtxRtpState(const RtpState& rtp_state) + RTC_LOCKS_EXCLUDED(send_mutex_); + RtpState GetRtxRtpState() const RTC_LOCKS_EXCLUDED(send_mutex_); private: std::unique_ptr BuildRtxPacket( @@ -151,10 +178,13 @@ class RTPSender { bool IsFecPacket(const RtpPacketToSend& packet) const; - void UpdateHeaderSizes() RTC_EXCLUSIVE_LOCKS_REQUIRED(send_critsect_); + void UpdateHeaderSizes() RTC_EXCLUSIVE_LOCKS_REQUIRED(send_mutex_); + + void UpdateLastPacketState(const RtpPacketToSend& packet) + RTC_EXCLUSIVE_LOCKS_REQUIRED(send_mutex_); Clock* const clock_; - Random random_ RTC_GUARDED_BY(send_critsect_); + Random random_ RTC_GUARDED_BY(send_mutex_); const bool audio_configured_; @@ -168,46 +198,35 @@ class RTPSender { RtpPacketHistory* const packet_history_; RtpPacketSender* const paced_sender_; - rtc::CriticalSection send_critsect_; + mutable Mutex send_mutex_; - bool sending_media_ RTC_GUARDED_BY(send_critsect_); + bool sending_media_ RTC_GUARDED_BY(send_mutex_); size_t max_packet_size_; - int8_t last_payload_type_ RTC_GUARDED_BY(send_critsect_); - - RtpHeaderExtensionMap rtp_header_extension_map_ - RTC_GUARDED_BY(send_critsect_); - size_t max_media_packet_header_ RTC_GUARDED_BY(send_critsect_); - size_t max_padding_fec_packet_header_ RTC_GUARDED_BY(send_critsect_); + RtpHeaderExtensionMap rtp_header_extension_map_ RTC_GUARDED_BY(send_mutex_); + size_t max_media_packet_header_ RTC_GUARDED_BY(send_mutex_); + size_t max_padding_fec_packet_header_ RTC_GUARDED_BY(send_mutex_); // RTP variables - uint32_t timestamp_offset_ RTC_GUARDED_BY(send_critsect_); - bool sequence_number_forced_ RTC_GUARDED_BY(send_critsect_); - uint16_t sequence_number_ RTC_GUARDED_BY(send_critsect_); - uint16_t sequence_number_rtx_ RTC_GUARDED_BY(send_critsect_); + uint32_t timestamp_offset_ RTC_GUARDED_BY(send_mutex_); + PacketSequencer sequencer_ RTC_GUARDED_BY(send_mutex_); // RID value to send in the RID or RepairedRID header extension. - std::string rid_ RTC_GUARDED_BY(send_critsect_); + std::string rid_ RTC_GUARDED_BY(send_mutex_); // MID value to send in the MID header extension. - std::string mid_ RTC_GUARDED_BY(send_critsect_); + std::string mid_ RTC_GUARDED_BY(send_mutex_); // Should we send MID/RID even when ACKed? (see below). const bool always_send_mid_and_rid_; // Track if any ACK has been received on the SSRC and RTX SSRC to indicate // when to stop sending the MID and RID header extensions. - bool ssrc_has_acked_ RTC_GUARDED_BY(send_critsect_); - bool rtx_ssrc_has_acked_ RTC_GUARDED_BY(send_critsect_); - uint32_t last_rtp_timestamp_ RTC_GUARDED_BY(send_critsect_); - int64_t capture_time_ms_ RTC_GUARDED_BY(send_critsect_); - int64_t last_timestamp_time_ms_ RTC_GUARDED_BY(send_critsect_); - bool last_packet_marker_bit_ RTC_GUARDED_BY(send_critsect_); - std::vector csrcs_ RTC_GUARDED_BY(send_critsect_); - int rtx_ RTC_GUARDED_BY(send_critsect_); + bool ssrc_has_acked_ RTC_GUARDED_BY(send_mutex_); + bool rtx_ssrc_has_acked_ RTC_GUARDED_BY(send_mutex_); + std::vector csrcs_ RTC_GUARDED_BY(send_mutex_); + int rtx_ RTC_GUARDED_BY(send_mutex_); // Mapping rtx_payload_type_map_[associated] = rtx. - std::map rtx_payload_type_map_ RTC_GUARDED_BY(send_critsect_); - bool supports_bwe_extension_ RTC_GUARDED_BY(send_critsect_); + std::map rtx_payload_type_map_ RTC_GUARDED_BY(send_mutex_); + bool supports_bwe_extension_ RTC_GUARDED_BY(send_mutex_); RateLimiter* const retransmission_rate_limiter_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RTPSender); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.cc b/modules/rtp_rtcp/source/rtp_sender_audio.cc index c8d83db297..4d72211b7c 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio.cc @@ -46,15 +46,22 @@ const char* FrameTypeToString(AudioFrameType frame_type) { case AudioFrameType::kAudioFrameCN: return "audio_cn"; } + RTC_CHECK_NOTREACHED(); } #endif +constexpr char kIncludeCaptureClockOffset[] = + "WebRTC-IncludeCaptureClockOffset"; + } // namespace RTPSenderAudio::RTPSenderAudio(Clock* clock, RTPSender* rtp_sender) : clock_(clock), rtp_sender_(rtp_sender), - absolute_capture_time_sender_(clock) { + absolute_capture_time_sender_(clock), + include_capture_clock_offset_( + absl::StartsWith(field_trials_.Lookup(kIncludeCaptureClockOffset), + "Enabled")) { RTC_DCHECK(clock_); } @@ -66,7 +73,7 @@ int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name, const size_t channels, const uint32_t rate) { if (absl::EqualsIgnoreCase(payload_name, "cn")) { - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); // we can have multiple CNG payload types switch (frequency) { case 8000: @@ -85,14 +92,14 @@ int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name, return -1; } } else if (absl::EqualsIgnoreCase(payload_name, "telephone-event")) { - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); // Don't add it to the list // we dont want to allow send with a DTMF payloadtype dtmf_payload_type_ = payload_type; dtmf_payload_freq_ = frequency; return 0; } else if (payload_name == "audio") { - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); encoder_rtp_timestamp_frequency_ = frequency; return 0; } @@ -100,7 +107,7 @@ int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name, } bool RTPSenderAudio::MarkerBit(AudioFrameType frame_type, int8_t payload_type) { - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); // for audio true for first packet in a speech burst bool marker_bit = false; if (last_payload_type_ != payload_type) { @@ -150,7 +157,7 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, return SendAudio(frame_type, payload_type, rtp_timestamp, payload_data, payload_size, // TODO(bugs.webrtc.org/10739) replace once plumbed. - /*absolute_capture_timestamp_ms=*/0); + /*absolute_capture_timestamp_ms=*/-1); } bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, @@ -174,7 +181,7 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, uint32_t dtmf_payload_freq = 0; absl::optional encoder_rtp_timestamp_frequency; { - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); audio_level_dbov = audio_level_dbov_; dtmf_payload_freq = dtmf_payload_freq_; encoder_rtp_timestamp_frequency = encoder_rtp_timestamp_frequency_; @@ -270,21 +277,26 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, packet->SetExtension( frame_type == AudioFrameType::kAudioFrameSpeech, audio_level_dbov); - // Send absolute capture time periodically in order to optimize and save - // network traffic. Missing absolute capture times can be interpolated on the - // receiving end if sending intervals are small enough. - auto absolute_capture_time = absolute_capture_time_sender_.OnSendPacket( - AbsoluteCaptureTimeSender::GetSource(packet->Ssrc(), packet->Csrcs()), - packet->Timestamp(), - // Replace missing value with 0 (invalid frequency), this will trigger - // absolute capture time sending. - encoder_rtp_timestamp_frequency.value_or(0), - Int64MsToUQ32x32(absolute_capture_timestamp_ms + NtpOffsetMs()), - /*estimated_capture_clock_offset=*/absl::nullopt); - if (absolute_capture_time) { - // It also checks that extension was registered during SDP negotiation. If - // not then setter won't do anything. - packet->SetExtension(*absolute_capture_time); + if (absolute_capture_timestamp_ms > 0) { + // Send absolute capture time periodically in order to optimize and save + // network traffic. Missing absolute capture times can be interpolated on + // the receiving end if sending intervals are small enough. + auto absolute_capture_time = absolute_capture_time_sender_.OnSendPacket( + AbsoluteCaptureTimeSender::GetSource(packet->Ssrc(), packet->Csrcs()), + packet->Timestamp(), + // Replace missing value with 0 (invalid frequency), this will trigger + // absolute capture time sending. + encoder_rtp_timestamp_frequency.value_or(0), + Int64MsToUQ32x32(clock_->ConvertTimestampToNtpTimeInMilliseconds( + absolute_capture_timestamp_ms)), + /*estimated_capture_clock_offset=*/ + include_capture_clock_offset_ ? absl::make_optional(0) : absl::nullopt); + if (absolute_capture_time) { + // It also checks that extension was registered during SDP negotiation. If + // not then setter won't do anything. + packet->SetExtension( + *absolute_capture_time); + } } uint8_t* payload = packet->AllocatePayload(payload_size); @@ -296,7 +308,7 @@ bool RTPSenderAudio::SendAudio(AudioFrameType frame_type, return false; { - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); last_payload_type_ = payload_type; } TRACE_EVENT_ASYNC_END2("webrtc", "Audio", rtp_timestamp, "timestamp", @@ -316,7 +328,7 @@ int32_t RTPSenderAudio::SetAudioLevel(uint8_t level_dbov) { if (level_dbov > 127) { return -1; } - rtc::CritScope cs(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); audio_level_dbov_ = level_dbov; return 0; } @@ -327,7 +339,7 @@ int32_t RTPSenderAudio::SendTelephoneEvent(uint8_t key, uint8_t level) { DtmfQueue::Event event; { - rtc::CritScope lock(&send_audio_critsect_); + MutexLock lock(&send_audio_mutex_); if (dtmf_payload_type_ < 0) { // TelephoneEvent payloadtype not configured return -1; diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.h b/modules/rtp_rtcp/source/rtp_sender_audio.h index c2d8074a60..6d61facc9a 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio.h +++ b/modules/rtp_rtcp/source/rtp_sender_audio.h @@ -17,13 +17,13 @@ #include #include "absl/strings/string_view.h" +#include "api/transport/field_trial_based_config.h" #include "modules/audio_coding/include/audio_coding_module_typedefs.h" #include "modules/rtp_rtcp/source/absolute_capture_time_sender.h" #include "modules/rtp_rtcp/source/dtmf_queue.h" #include "modules/rtp_rtcp/source/rtp_sender.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/one_time_event.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" @@ -32,6 +32,11 @@ namespace webrtc { class RTPSenderAudio { public: RTPSenderAudio(Clock* clock, RTPSender* rtp_sender); + + RTPSenderAudio() = delete; + RTPSenderAudio(const RTPSenderAudio&) = delete; + RTPSenderAudio& operator=(const RTPSenderAudio&) = delete; + ~RTPSenderAudio(); int32_t RegisterAudioPayload(absl::string_view payload_name, @@ -46,6 +51,8 @@ class RTPSenderAudio { const uint8_t* payload_data, size_t payload_size); + // `absolute_capture_timestamp_ms` and `Clock::CurrentTime` + // should be using the same epoch. bool SendAudio(AudioFrameType frame_type, int8_t payload_type, uint32_t rtp_timestamp, @@ -74,13 +81,13 @@ class RTPSenderAudio { Clock* const clock_ = nullptr; RTPSender* const rtp_sender_ = nullptr; - rtc::CriticalSection send_audio_critsect_; + Mutex send_audio_mutex_; // DTMF. bool dtmf_event_is_on_ = false; bool dtmf_event_first_packet_sent_ = false; - int8_t dtmf_payload_type_ RTC_GUARDED_BY(send_audio_critsect_) = -1; - uint32_t dtmf_payload_freq_ RTC_GUARDED_BY(send_audio_critsect_) = 8000; + int8_t dtmf_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1; + uint32_t dtmf_payload_freq_ RTC_GUARDED_BY(send_audio_mutex_) = 8000; uint32_t dtmf_timestamp_ = 0; uint32_t dtmf_length_samples_ = 0; int64_t dtmf_time_last_sent_ = 0; @@ -89,24 +96,25 @@ class RTPSenderAudio { DtmfQueue dtmf_queue_; // VAD detection, used for marker bit. - bool inband_vad_active_ RTC_GUARDED_BY(send_audio_critsect_) = false; - int8_t cngnb_payload_type_ RTC_GUARDED_BY(send_audio_critsect_) = -1; - int8_t cngwb_payload_type_ RTC_GUARDED_BY(send_audio_critsect_) = -1; - int8_t cngswb_payload_type_ RTC_GUARDED_BY(send_audio_critsect_) = -1; - int8_t cngfb_payload_type_ RTC_GUARDED_BY(send_audio_critsect_) = -1; - int8_t last_payload_type_ RTC_GUARDED_BY(send_audio_critsect_) = -1; + bool inband_vad_active_ RTC_GUARDED_BY(send_audio_mutex_) = false; + int8_t cngnb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1; + int8_t cngwb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1; + int8_t cngswb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1; + int8_t cngfb_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1; + int8_t last_payload_type_ RTC_GUARDED_BY(send_audio_mutex_) = -1; // Audio level indication. // (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/) - uint8_t audio_level_dbov_ RTC_GUARDED_BY(send_audio_critsect_) = 0; + uint8_t audio_level_dbov_ RTC_GUARDED_BY(send_audio_mutex_) = 0; OneTimeEvent first_packet_sent_; absl::optional encoder_rtp_timestamp_frequency_ - RTC_GUARDED_BY(send_audio_critsect_); + RTC_GUARDED_BY(send_audio_mutex_); AbsoluteCaptureTimeSender absolute_capture_time_sender_; - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RTPSenderAudio); + const FieldTrialBasedConfig field_trials_; + const bool include_capture_clock_offset_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc index 3e35f42bff..0221800ea8 100644 --- a/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_audio_unittest.cc @@ -18,7 +18,8 @@ #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/rtp_rtcp/source/time_util.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" @@ -67,56 +68,58 @@ class RtpSenderAudioTest : public ::testing::Test { public: RtpSenderAudioTest() : fake_clock_(kStartTime), - rtp_module_(RtpRtcp::Create([&] { - RtpRtcp::Configuration config; + rtp_module_(ModuleRtpRtcpImpl2::Create([&] { + RtpRtcpInterface::Configuration config; config.audio = true; config.clock = &fake_clock_; config.outgoing_transport = &transport_; config.local_media_ssrc = kSsrc; return config; }())), - rtp_sender_audio_(&fake_clock_, rtp_module_->RtpSender()) { + rtp_sender_audio_( + std::make_unique(&fake_clock_, + rtp_module_->RtpSender())) { rtp_module_->SetSequenceNumber(kSeqNum); } SimulatedClock fake_clock_; LoopbackTransportTest transport_; - std::unique_ptr rtp_module_; - RTPSenderAudio rtp_sender_audio_; + std::unique_ptr rtp_module_; + std::unique_ptr rtp_sender_audio_; }; TEST_F(RtpSenderAudioTest, SendAudio) { const char payload_name[] = "PAYLOAD_NAME"; const uint8_t payload_type = 127; - ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload( + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( payload_name, payload_type, 48000, 0, 1500)); uint8_t payload[] = {47, 11, 32, 93, 89}; - ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kAudioFrameCN, - payload_type, 4321, payload, - sizeof(payload), - /*absolute_capture_timestamp_ms=*/0)); + ASSERT_TRUE( + rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type, + 4321, payload, sizeof(payload), + /*absolute_capture_timestamp_ms=*/0)); auto sent_payload = transport_.last_sent_packet().payload(); EXPECT_THAT(sent_payload, ElementsAreArray(payload)); } TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) { - EXPECT_EQ(0, rtp_sender_audio_.SetAudioLevel(kAudioLevel)); + EXPECT_EQ(0, rtp_sender_audio_->SetAudioLevel(kAudioLevel)); rtp_module_->RegisterRtpHeaderExtension(AudioLevel::kUri, kAudioLevelExtensionId); const char payload_name[] = "PAYLOAD_NAME"; const uint8_t payload_type = 127; - ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload( + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( payload_name, payload_type, 48000, 0, 1500)); uint8_t payload[] = {47, 11, 32, 93, 89}; - ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kAudioFrameCN, - payload_type, 4321, payload, - sizeof(payload), - /*absolute_capture_timestamp_ms=*/0)); + ASSERT_TRUE( + rtp_sender_audio_->SendAudio(AudioFrameType::kAudioFrameCN, payload_type, + 4321, payload, sizeof(payload), + /*absolute_capture_timestamp_ms=*/0)); auto sent_payload = transport_.last_sent_packet().payload(); EXPECT_THAT(sent_payload, ElementsAreArray(payload)); @@ -133,11 +136,11 @@ TEST_F(RtpSenderAudioTest, SendAudioWithoutAbsoluteCaptureTime) { constexpr uint32_t kAbsoluteCaptureTimestampMs = 521; const char payload_name[] = "audio"; const uint8_t payload_type = 127; - ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload( + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( payload_name, payload_type, 48000, 0, 1500)); uint8_t payload[] = {47, 11, 32, 93, 89}; - ASSERT_TRUE(rtp_sender_audio_.SendAudio( + ASSERT_TRUE(rtp_sender_audio_->SendAudio( AudioFrameType::kAudioFrameCN, payload_type, 4321, payload, sizeof(payload), kAbsoluteCaptureTimestampMs)); @@ -151,11 +154,11 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTime) { constexpr uint32_t kAbsoluteCaptureTimestampMs = 521; const char payload_name[] = "audio"; const uint8_t payload_type = 127; - ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload( + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( payload_name, payload_type, 48000, 0, 1500)); uint8_t payload[] = {47, 11, 32, 93, 89}; - ASSERT_TRUE(rtp_sender_audio_.SendAudio( + ASSERT_TRUE(rtp_sender_audio_->SendAudio( AudioFrameType::kAudioFrameCN, payload_type, 4321, payload, sizeof(payload), kAbsoluteCaptureTimestampMs)); @@ -163,8 +166,49 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAbsoluteCaptureTime) { transport_.last_sent_packet() .GetExtension(); EXPECT_TRUE(absolute_capture_time); - EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs())); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); + EXPECT_FALSE( + absolute_capture_time->estimated_capture_clock_offset.has_value()); +} + +// Essentially the same test as SendAudioWithAbsoluteCaptureTime but with a +// field trial. After the field trial is experimented, we will remove +// SendAudioWithAbsoluteCaptureTime. +TEST_F(RtpSenderAudioTest, + SendAudioWithAbsoluteCaptureTimeWithCaptureClockOffset) { + // Recreate rtp_sender_audio_ wieh new field trial. + test::ScopedFieldTrials field_trial( + "WebRTC-IncludeCaptureClockOffset/Enabled/"); + rtp_sender_audio_ = + std::make_unique(&fake_clock_, rtp_module_->RtpSender()); + + rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri, + kAbsoluteCaptureTimeExtensionId); + constexpr uint32_t kAbsoluteCaptureTimestampMs = 521; + const char payload_name[] = "audio"; + const uint8_t payload_type = 127; + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( + payload_name, payload_type, 48000, 0, 1500)); + uint8_t payload[] = {47, 11, 32, 93, 89}; + + ASSERT_TRUE(rtp_sender_audio_->SendAudio( + AudioFrameType::kAudioFrameCN, payload_type, 4321, payload, + sizeof(payload), kAbsoluteCaptureTimestampMs)); + + auto absolute_capture_time = + transport_.last_sent_packet() + .GetExtension(); + EXPECT_TRUE(absolute_capture_time); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); + EXPECT_TRUE( + absolute_capture_time->estimated_capture_clock_offset.has_value()); + EXPECT_EQ(0, *absolute_capture_time->estimated_capture_clock_offset); } // As RFC4733, named telephone events are carried as part of the audio stream @@ -177,40 +221,40 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) { const char* kDtmfPayloadName = "telephone-event"; const uint32_t kPayloadFrequency = 8000; const uint8_t kPayloadType = 126; - ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload( + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( kDtmfPayloadName, kPayloadType, kPayloadFrequency, 0, 0)); // For Telephone events, payload is not added to the registered payload list, // it will register only the payload used for audio stream. // Registering the payload again for audio stream with different payload name. const char* kPayloadName = "payload_name"; - ASSERT_EQ(0, rtp_sender_audio_.RegisterAudioPayload( + ASSERT_EQ(0, rtp_sender_audio_->RegisterAudioPayload( kPayloadName, kPayloadType, kPayloadFrequency, 1, 0)); // Start time is arbitrary. uint32_t capture_timestamp = fake_clock_.TimeInMilliseconds(); // DTMF event key=9, duration=500 and attenuationdB=10 - rtp_sender_audio_.SendTelephoneEvent(9, 500, 10); + rtp_sender_audio_->SendTelephoneEvent(9, 500, 10); // During start, it takes the starting timestamp as last sent timestamp. // The duration is calculated as the difference of current and last sent // timestamp. So for first call it will skip since the duration is zero. - ASSERT_TRUE(rtp_sender_audio_.SendAudio( + ASSERT_TRUE(rtp_sender_audio_->SendAudio( AudioFrameType::kEmptyFrame, kPayloadType, capture_timestamp, nullptr, 0, /*absolute_capture_time_ms=0*/ 0)); // DTMF Sample Length is (Frequency/1000) * Duration. // So in this case, it is (8000/1000) * 500 = 4000. // Sending it as two packets. - ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kEmptyFrame, - kPayloadType, - capture_timestamp + 2000, nullptr, 0, - /*absolute_capture_time_ms=0*/ 0)); + ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame, + kPayloadType, + capture_timestamp + 2000, nullptr, 0, + /*absolute_capture_time_ms=0*/ 0)); // Marker Bit should be set to 1 for first packet. EXPECT_TRUE(transport_.last_sent_packet().Marker()); - ASSERT_TRUE(rtp_sender_audio_.SendAudio(AudioFrameType::kEmptyFrame, - kPayloadType, - capture_timestamp + 4000, nullptr, 0, - /*absolute_capture_time_ms=0*/ 0)); + ASSERT_TRUE(rtp_sender_audio_->SendAudio(AudioFrameType::kEmptyFrame, + kPayloadType, + capture_timestamp + 4000, nullptr, 0, + /*absolute_capture_time_ms=0*/ 0)); // Marker Bit should be set to 0 for rest of the packets. EXPECT_FALSE(transport_.last_sent_packet().Marker()); } diff --git a/modules/rtp_rtcp/source/rtp_sender_egress.cc b/modules/rtp_rtcp/source/rtp_sender_egress.cc index 6d5477be21..126b89c8c8 100644 --- a/modules/rtp_rtcp/source/rtp_sender_egress.cc +++ b/modules/rtp_rtcp/source/rtp_sender_egress.cc @@ -10,6 +10,7 @@ #include "modules/rtp_rtcp/source/rtp_sender_egress.h" +#include #include #include #include @@ -17,8 +18,8 @@ #include "absl/strings/match.h" #include "api/transport/field_trial_based_config.h" #include "logging/rtc_event_log/events/rtc_event_rtp_packet_outgoing.h" -#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace webrtc { namespace { @@ -26,48 +27,81 @@ constexpr uint32_t kTimestampTicksPerMs = 90; constexpr int kSendSideDelayWindowMs = 1000; constexpr int kBitrateStatisticsWindowMs = 1000; constexpr size_t kRtpSequenceNumberMapMaxEntries = 1 << 13; +constexpr TimeDelta kUpdateInterval = + TimeDelta::Millis(kBitrateStatisticsWindowMs); -bool IsEnabled(absl::string_view name, - const WebRtcKeyValueConfig* field_trials) { +bool IsTrialSetTo(const WebRtcKeyValueConfig* field_trials, + absl::string_view name, + absl::string_view value) { FieldTrialBasedConfig default_trials; auto& trials = field_trials ? *field_trials : default_trials; - return absl::StartsWith(trials.Lookup(name), "Enabled"); + return absl::StartsWith(trials.Lookup(name), value); } } // namespace RtpSenderEgress::NonPacedPacketSender::NonPacedPacketSender( - RtpSenderEgress* sender) - : transport_sequence_number_(0), sender_(sender) {} + RtpSenderEgress* sender, + SequenceNumberAssigner* sequence_number_assigner) + : transport_sequence_number_(0), + sender_(sender), + sequence_number_assigner_(sequence_number_assigner) { + RTC_DCHECK(sequence_number_assigner_); +} RtpSenderEgress::NonPacedPacketSender::~NonPacedPacketSender() = default; void RtpSenderEgress::NonPacedPacketSender::EnqueuePackets( std::vector> packets) { for (auto& packet : packets) { - if (!packet->SetExtension( - ++transport_sequence_number_)) { - --transport_sequence_number_; - } - packet->ReserveExtension(); - packet->ReserveExtension(); + PrepareForSend(packet.get()); sender_->SendPacket(packet.get(), PacedPacketInfo()); } + auto fec_packets = sender_->FetchFecPackets(); + if (!fec_packets.empty()) { + // Don't generate sequence numbers for flexfec, they are already running on + // an internally maintained sequence. + const bool generate_sequence_numbers = !sender_->FlexFecSsrc().has_value(); + + for (auto& packet : fec_packets) { + if (generate_sequence_numbers) { + sequence_number_assigner_->AssignSequenceNumber(packet.get()); + } + PrepareForSend(packet.get()); + } + EnqueuePackets(std::move(fec_packets)); + } } -RtpSenderEgress::RtpSenderEgress(const RtpRtcp::Configuration& config, +void RtpSenderEgress::NonPacedPacketSender::PrepareForSend( + RtpPacketToSend* packet) { + if (!packet->SetExtension( + ++transport_sequence_number_)) { + --transport_sequence_number_; + } + packet->ReserveExtension(); + packet->ReserveExtension(); +} + +RtpSenderEgress::RtpSenderEgress(const RtpRtcpInterface::Configuration& config, RtpPacketHistory* packet_history) - : ssrc_(config.local_media_ssrc), + : worker_queue_(TaskQueueBase::Current()), + ssrc_(config.local_media_ssrc), rtx_ssrc_(config.rtx_send_ssrc), flexfec_ssrc_(config.fec_generator ? config.fec_generator->FecSsrc() : absl::nullopt), populate_network2_timestamp_(config.populate_network2_timestamp), send_side_bwe_with_overhead_( - IsEnabled("WebRTC-SendSideBwe-WithOverhead", config.field_trials)), + !IsTrialSetTo(config.field_trials, + "WebRTC-SendSideBwe-WithOverhead", + "Disabled")), clock_(config.clock), packet_history_(packet_history), transport_(config.outgoing_transport), event_log_(config.event_log), +#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE is_audio_(config.audio), +#endif need_rtp_packet_infos_(config.need_rtp_packet_infos), + fec_generator_(config.fec_generator), transport_feedback_observer_(config.transport_feedback_callback), send_side_delay_observer_(config.send_side_delay_observer), send_packet_observer_(config.send_packet_observer), @@ -84,54 +118,94 @@ RtpSenderEgress::RtpSenderEgress(const RtpRtcp::Configuration& config, rtp_sequence_number_map_(need_rtp_packet_infos_ ? std::make_unique( kRtpSequenceNumberMapMaxEntries) - : nullptr) {} + : nullptr) { + RTC_DCHECK(worker_queue_); + pacer_checker_.Detach(); + if (bitrate_callback_) { + update_task_ = RepeatingTaskHandle::DelayedStart(worker_queue_, + kUpdateInterval, [this]() { + PeriodicUpdate(); + return kUpdateInterval; + }); + } +} + +RtpSenderEgress::~RtpSenderEgress() { + RTC_DCHECK_RUN_ON(worker_queue_); + update_task_.Stop(); +} void RtpSenderEgress::SendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) { + RTC_DCHECK_RUN_ON(&pacer_checker_); RTC_DCHECK(packet); - const uint32_t packet_ssrc = packet->Ssrc(); RTC_DCHECK(packet->packet_type().has_value()); RTC_DCHECK(HasCorrectSsrc(*packet)); - int64_t now_ms = clock_->TimeInMilliseconds(); + if (packet->packet_type() == RtpPacketMediaType::kRetransmission) { + RTC_DCHECK(packet->retransmitted_sequence_number().has_value()); + } + + const uint32_t packet_ssrc = packet->Ssrc(); + const int64_t now_ms = clock_->TimeInMilliseconds(); - if (is_audio_) { -#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE - BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "AudioTotBitrate_kbps", now_ms, - GetSendRates().Sum().kbps(), packet_ssrc); - BWE_TEST_LOGGING_PLOT_WITH_SSRC( - 1, "AudioNackBitrate_kbps", now_ms, - GetSendRates()[RtpPacketMediaType::kRetransmission].kbps(), - packet_ssrc); -#endif - } else { #if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE - BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoTotBitrate_kbps", now_ms, - GetSendRates().Sum().kbps(), packet_ssrc); - BWE_TEST_LOGGING_PLOT_WITH_SSRC( - 1, "VideoNackBitrate_kbps", now_ms, - GetSendRates()[RtpPacketMediaType::kRetransmission].kbps(), - packet_ssrc); + worker_queue_->PostTask( + ToQueuedTask(task_safety_, [this, now_ms, packet_ssrc]() { + BweTestLoggingPlot(now_ms, packet_ssrc); + })); #endif + + if (need_rtp_packet_infos_ && + packet->packet_type() == RtpPacketToSend::Type::kVideo) { + worker_queue_->PostTask(ToQueuedTask( + task_safety_, + [this, packet_timestamp = packet->Timestamp(), + is_first_packet_of_frame = packet->is_first_packet_of_frame(), + is_last_packet_of_frame = packet->Marker(), + sequence_number = packet->SequenceNumber()]() { + RTC_DCHECK_RUN_ON(worker_queue_); + // Last packet of a frame, add it to sequence number info map. + const uint32_t timestamp = packet_timestamp - timestamp_offset_; + rtp_sequence_number_map_->InsertPacket( + sequence_number, + RtpSequenceNumberMap::Info(timestamp, is_first_packet_of_frame, + is_last_packet_of_frame)); + })); } - PacketOptions options; - { - rtc::CritScope lock(&lock_); - options.included_in_allocation = force_part_of_allocation_; + if (fec_generator_ && packet->fec_protect_packet()) { + // This packet should be protected by FEC, add it to packet generator. + RTC_DCHECK(fec_generator_); + RTC_DCHECK(packet->packet_type() == RtpPacketMediaType::kVideo); + absl::optional> + new_fec_params; + { + MutexLock lock(&lock_); + new_fec_params.swap(pending_fec_params_); + } + if (new_fec_params) { + fec_generator_->SetProtectionParameters(new_fec_params->first, + new_fec_params->second); + } + if (packet->is_red()) { + RtpPacketToSend unpacked_packet(*packet); + + const rtc::CopyOnWriteBuffer buffer = packet->Buffer(); + // Grab media payload type from RED header. + const size_t headers_size = packet->headers_size(); + unpacked_packet.SetPayloadType(buffer[headers_size]); - if (need_rtp_packet_infos_ && - packet->packet_type() == RtpPacketToSend::Type::kVideo) { - RTC_DCHECK(rtp_sequence_number_map_); - // Last packet of a frame, add it to sequence number info map. - const uint32_t timestamp = packet->Timestamp() - timestamp_offset_; - bool is_first_packet_of_frame = packet->is_first_packet_of_frame(); - bool is_last_packet_of_frame = packet->Marker(); - - rtp_sequence_number_map_->InsertPacket( - packet->SequenceNumber(), - RtpSequenceNumberMap::Info(timestamp, is_first_packet_of_frame, - is_last_packet_of_frame)); + // Copy the media payload into the unpacked buffer. + uint8_t* payload_buffer = + unpacked_packet.SetPayloadSize(packet->payload_size() - 1); + std::copy(&packet->payload()[0] + 1, + &packet->payload()[0] + packet->payload_size(), payload_buffer); + + fec_generator_->AddPacketAndGenerateFec(unpacked_packet); + } else { + // If not RED encapsulated - we can just insert packet directly. + fec_generator_->AddPacketAndGenerateFec(*packet); } } @@ -163,6 +237,12 @@ void RtpSenderEgress::SendPacket(RtpPacketToSend* packet, const bool is_media = packet->packet_type() == RtpPacketMediaType::kAudio || packet->packet_type() == RtpPacketMediaType::kVideo; + PacketOptions options; + { + MutexLock lock(&lock_); + options.included_in_allocation = force_part_of_allocation_; + } + // Downstream code actually uses this flag to distinguish between media and // everything else. options.is_retransmit = !is_media; @@ -173,8 +253,7 @@ void RtpSenderEgress::SendPacket(RtpPacketToSend* packet, AddPacketToTransportFeedback(*packet_id, *packet, pacing_info); } - options.application_data.assign(packet->application_data().begin(), - packet->application_data().end()); + options.additional_data = packet->additional_data(); if (packet->packet_type() != RtpPacketMediaType::kPadding && packet->packet_type() != RtpPacketMediaType::kRetransmission) { @@ -195,30 +274,35 @@ void RtpSenderEgress::SendPacket(RtpPacketToSend* packet, } if (send_success) { - rtc::CritScope lock(&lock_); - UpdateRtpStats(*packet); + // |media_has_been_sent_| is used by RTPSender to figure out if it can send + // padding in the absence of transport-cc or abs-send-time. + // In those cases media must be sent first to set a reference timestamp. media_has_been_sent_ = true; - } -} - -void RtpSenderEgress::ProcessBitrateAndNotifyObservers() { - if (!bitrate_callback_) - return; - rtc::CritScope lock(&lock_); - RtpSendRates send_rates = GetSendRatesLocked(); - bitrate_callback_->Notify( - send_rates.Sum().bps(), - send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_); + // TODO(sprang): Add support for FEC protecting all header extensions, add + // media packet to generator here instead. + + RTC_DCHECK(packet->packet_type().has_value()); + RtpPacketMediaType packet_type = *packet->packet_type(); + RtpPacketCounter counter(*packet); + size_t size = packet->size(); + worker_queue_->PostTask( + ToQueuedTask(task_safety_, [this, now_ms, packet_ssrc, packet_type, + counter = std::move(counter), size]() { + RTC_DCHECK_RUN_ON(worker_queue_); + UpdateRtpStats(now_ms, packet_ssrc, packet_type, std::move(counter), + size); + })); + } } RtpSendRates RtpSenderEgress::GetSendRates() const { - rtc::CritScope lock(&lock_); - return GetSendRatesLocked(); + MutexLock lock(&lock_); + const int64_t now_ms = clock_->TimeInMilliseconds(); + return GetSendRatesLocked(now_ms); } -RtpSendRates RtpSenderEgress::GetSendRatesLocked() const { - const int64_t now_ms = clock_->TimeInMilliseconds(); +RtpSendRates RtpSenderEgress::GetSendRatesLocked(int64_t now_ms) const { RtpSendRates current_rates; for (size_t i = 0; i < kNumMediaTypes; ++i) { RtpPacketMediaType type = static_cast(i); @@ -230,34 +314,37 @@ RtpSendRates RtpSenderEgress::GetSendRatesLocked() const { void RtpSenderEgress::GetDataCounters(StreamDataCounters* rtp_stats, StreamDataCounters* rtx_stats) const { - rtc::CritScope lock(&lock_); + // TODO(bugs.webrtc.org/11581): make sure rtx_rtp_stats_ and rtp_stats_ are + // only touched on the worker thread. + MutexLock lock(&lock_); *rtp_stats = rtp_stats_; *rtx_stats = rtx_rtp_stats_; } void RtpSenderEgress::ForceIncludeSendPacketsInAllocation( bool part_of_allocation) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); force_part_of_allocation_ = part_of_allocation; } bool RtpSenderEgress::MediaHasBeenSent() const { - rtc::CritScope lock(&lock_); + RTC_DCHECK_RUN_ON(&pacer_checker_); return media_has_been_sent_; } void RtpSenderEgress::SetMediaHasBeenSent(bool media_sent) { - rtc::CritScope lock(&lock_); + RTC_DCHECK_RUN_ON(&pacer_checker_); media_has_been_sent_ = media_sent; } void RtpSenderEgress::SetTimestampOffset(uint32_t timestamp) { - rtc::CritScope lock(&lock_); + RTC_DCHECK_RUN_ON(worker_queue_); timestamp_offset_ = timestamp; } std::vector RtpSenderEgress::GetSentRtpPacketInfos( rtc::ArrayView sequence_numbers) const { + RTC_DCHECK_RUN_ON(worker_queue_); RTC_DCHECK(!sequence_numbers.empty()); if (!need_rtp_packet_infos_) { return std::vector(); @@ -266,7 +353,6 @@ std::vector RtpSenderEgress::GetSentRtpPacketInfos( std::vector results; results.reserve(sequence_numbers.size()); - rtc::CritScope cs(&lock_); for (uint16_t sequence_number : sequence_numbers) { const auto& info = rtp_sequence_number_map_->Get(sequence_number); if (!info) { @@ -280,6 +366,24 @@ std::vector RtpSenderEgress::GetSentRtpPacketInfos( return results; } +void RtpSenderEgress::SetFecProtectionParameters( + const FecProtectionParams& delta_params, + const FecProtectionParams& key_params) { + // TODO(sprang): Post task to pacer queue instead, one pacer is fully + // migrated to a task queue. + MutexLock lock(&lock_); + pending_fec_params_.emplace(delta_params, key_params); +} + +std::vector> +RtpSenderEgress::FetchFecPackets() { + RTC_DCHECK_RUN_ON(&pacer_checker_); + if (fec_generator_) { + return fec_generator_->GetFecPackets(); + } + return {}; +} + bool RtpSenderEgress::HasCorrectSsrc(const RtpPacketToSend& packet) const { switch (*packet.packet_type()) { case RtpPacketMediaType::kAudio: @@ -308,12 +412,34 @@ void RtpSenderEgress::AddPacketToTransportFeedback( } RtpPacketSendInfo packet_info; - packet_info.ssrc = ssrc_; packet_info.transport_sequence_number = packet_id; - packet_info.rtp_sequence_number = packet.SequenceNumber(); + packet_info.rtp_timestamp = packet.Timestamp(); packet_info.length = packet_size; packet_info.pacing_info = pacing_info; packet_info.packet_type = packet.packet_type(); + + switch (*packet_info.packet_type) { + case RtpPacketMediaType::kAudio: + case RtpPacketMediaType::kVideo: + packet_info.media_ssrc = ssrc_; + packet_info.rtp_sequence_number = packet.SequenceNumber(); + break; + case RtpPacketMediaType::kRetransmission: + // For retransmissions, we're want to remove the original media packet + // if the rentrasmit arrives - so populate that in the packet info. + packet_info.media_ssrc = ssrc_; + packet_info.rtp_sequence_number = + *packet.retransmitted_sequence_number(); + break; + case RtpPacketMediaType::kPadding: + case RtpPacketMediaType::kForwardErrorCorrection: + // We're not interested in feedback about these packets being received + // or lost. + break; + } + // TODO(bugs.webrtc.org/12713): Remove once downstream usage is gone. + packet_info.ssrc = packet_info.media_ssrc.value_or(0); + transport_feedback_observer_->OnAddPacket(packet_info); } } @@ -328,7 +454,7 @@ void RtpSenderEgress::UpdateDelayStatistics(int64_t capture_time_ms, int max_delay_ms = 0; uint64_t total_packet_send_delay_ms = 0; { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); // Compute the max and average of the recent capture-to-send delays. // The time complexity of the current approach depends on the distribution // of the delay values. This could be done more efficiently. @@ -434,32 +560,82 @@ bool RtpSenderEgress::SendPacketToNetwork(const RtpPacketToSend& packet, return true; } -void RtpSenderEgress::UpdateRtpStats(const RtpPacketToSend& packet) { - int64_t now_ms = clock_->TimeInMilliseconds(); +void RtpSenderEgress::UpdateRtpStats(int64_t now_ms, + uint32_t packet_ssrc, + RtpPacketMediaType packet_type, + RtpPacketCounter counter, + size_t packet_size) { + RTC_DCHECK_RUN_ON(worker_queue_); - StreamDataCounters* counters = - packet.Ssrc() == rtx_ssrc_ ? &rtx_rtp_stats_ : &rtp_stats_; + // TODO(bugs.webrtc.org/11581): send_rates_ should be touched only on the + // worker thread. + RtpSendRates send_rates; + { + MutexLock lock(&lock_); - if (counters->first_packet_time_ms == -1) { - counters->first_packet_time_ms = now_ms; - } + // TODO(bugs.webrtc.org/11581): make sure rtx_rtp_stats_ and rtp_stats_ are + // only touched on the worker thread. + StreamDataCounters* counters = + packet_ssrc == rtx_ssrc_ ? &rtx_rtp_stats_ : &rtp_stats_; + + if (counters->first_packet_time_ms == -1) { + counters->first_packet_time_ms = now_ms; + } + + if (packet_type == RtpPacketMediaType::kForwardErrorCorrection) { + counters->fec.Add(counter); + } else if (packet_type == RtpPacketMediaType::kRetransmission) { + counters->retransmitted.Add(counter); + } + counters->transmitted.Add(counter); - if (packet.packet_type() == RtpPacketMediaType::kForwardErrorCorrection) { - counters->fec.AddPacket(packet); + send_rates_[static_cast(packet_type)].Update(packet_size, now_ms); + if (bitrate_callback_) { + send_rates = GetSendRatesLocked(now_ms); + } + + if (rtp_stats_callback_) { + rtp_stats_callback_->DataCountersUpdated(*counters, packet_ssrc); + } } - if (packet.packet_type() == RtpPacketMediaType::kRetransmission) { - counters->retransmitted.AddPacket(packet); + // The bitrate_callback_ and rtp_stats_callback_ pointers in practice point + // to the same object, so these callbacks could be consolidated into one. + if (bitrate_callback_) { + bitrate_callback_->Notify( + send_rates.Sum().bps(), + send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_); } - counters->transmitted.AddPacket(packet); +} - RTC_DCHECK(packet.packet_type().has_value()); - send_rates_[static_cast(*packet.packet_type())].Update(packet.size(), - now_ms); +void RtpSenderEgress::PeriodicUpdate() { + RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK(bitrate_callback_); + RtpSendRates send_rates = GetSendRates(); + bitrate_callback_->Notify( + send_rates.Sum().bps(), + send_rates[RtpPacketMediaType::kRetransmission].bps(), ssrc_); +} - if (rtp_stats_callback_) { - rtp_stats_callback_->DataCountersUpdated(*counters, packet.Ssrc()); +#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE +void RtpSenderEgress::BweTestLoggingPlot(int64_t now_ms, uint32_t packet_ssrc) { + RTC_DCHECK_RUN_ON(worker_queue_); + + const auto rates = GetSendRates(); + if (is_audio_) { + BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "AudioTotBitrate_kbps", now_ms, + rates.Sum().kbps(), packet_ssrc); + BWE_TEST_LOGGING_PLOT_WITH_SSRC( + 1, "AudioNackBitrate_kbps", now_ms, + rates[RtpPacketMediaType::kRetransmission].kbps(), packet_ssrc); + } else { + BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoTotBitrate_kbps", now_ms, + rates.Sum().kbps(), packet_ssrc); + BWE_TEST_LOGGING_PLOT_WITH_SSRC( + 1, "VideoNackBitrate_kbps", now_ms, + rates[RtpPacketMediaType::kRetransmission].kbps(), packet_ssrc); } } +#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_egress.h b/modules/rtp_rtcp/source/rtp_sender_egress.h index c9ecde3be8..c767a1fe1b 100644 --- a/modules/rtp_rtcp/source/rtp_sender_egress.h +++ b/modules/rtp_rtcp/source/rtp_sender_egress.h @@ -13,19 +13,26 @@ #include #include +#include #include #include "absl/types/optional.h" #include "api/call/transport.h" #include "api/rtc_event_log/rtc_event_log.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_base.h" #include "api/units/data_rate.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" +#include "modules/remote_bitrate_estimator/test/bwe_test_logging.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_packet_history.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_sequence_number_map.h" -#include "rtc_base/critical_section.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -36,20 +43,23 @@ class RtpSenderEgress { // without passing through an actual paced sender. class NonPacedPacketSender : public RtpPacketSender { public: - explicit NonPacedPacketSender(RtpSenderEgress* sender); + NonPacedPacketSender(RtpSenderEgress* sender, + SequenceNumberAssigner* sequence_number_assigner); virtual ~NonPacedPacketSender(); void EnqueuePackets( std::vector> packets) override; private: + void PrepareForSend(RtpPacketToSend* packet); uint16_t transport_sequence_number_; RtpSenderEgress* const sender_; + SequenceNumberAssigner* sequence_number_assigner_; }; - RtpSenderEgress(const RtpRtcp::Configuration& config, + RtpSenderEgress(const RtpRtcpInterface::Configuration& config, RtpPacketHistory* packet_history); - ~RtpSenderEgress() = default; + ~RtpSenderEgress(); void SendPacket(RtpPacketToSend* packet, const PacedPacketInfo& pacing_info) RTC_LOCKS_EXCLUDED(lock_); @@ -57,7 +67,6 @@ class RtpSenderEgress { absl::optional RtxSsrc() const { return rtx_ssrc_; } absl::optional FlexFecSsrc() const { return flexfec_ssrc_; } - void ProcessBitrateAndNotifyObservers() RTC_LOCKS_EXCLUDED(lock_); RtpSendRates GetSendRates() const RTC_LOCKS_EXCLUDED(lock_); void GetDataCounters(StreamDataCounters* rtp_stats, StreamDataCounters* rtx_stats) const @@ -78,13 +87,18 @@ class RtpSenderEgress { rtc::ArrayView sequence_numbers) const RTC_LOCKS_EXCLUDED(lock_); + void SetFecProtectionParameters(const FecProtectionParams& delta_params, + const FecProtectionParams& key_params); + std::vector> FetchFecPackets(); + private: // Maps capture time in milliseconds to send-side delay in milliseconds. // Send-side delay is the difference between transmission time and capture // time. typedef std::map SendDelayMap; - RtpSendRates GetSendRatesLocked() const RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); + RtpSendRates GetSendRatesLocked(int64_t now_ms) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); bool HasCorrectSsrc(const RtpPacketToSend& packet) const; void AddPacketToTransportFeedback(uint16_t packet_id, const RtpPacketToSend& packet, @@ -100,9 +114,21 @@ class RtpSenderEgress { bool SendPacketToNetwork(const RtpPacketToSend& packet, const PacketOptions& options, const PacedPacketInfo& pacing_info); - void UpdateRtpStats(const RtpPacketToSend& packet) - RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); + void UpdateRtpStats(int64_t now_ms, + uint32_t packet_ssrc, + RtpPacketMediaType packet_type, + RtpPacketCounter counter, + size_t packet_size); +#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE + void BweTestLoggingPlot(int64_t now_ms, uint32_t packet_ssrc); +#endif + + // Called on a timer, once a second, on the worker_queue_. + void PeriodicUpdate(); + + TaskQueueBase* const worker_queue_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker pacer_checker_; const uint32_t ssrc_; const absl::optional rtx_ssrc_; const absl::optional flexfec_ssrc_; @@ -112,8 +138,11 @@ class RtpSenderEgress { RtpPacketHistory* const packet_history_; Transport* const transport_; RtcEventLog* const event_log_; +#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE const bool is_audio_; +#endif const bool need_rtp_packet_infos_; + VideoFecGenerator* const fec_generator_ RTC_GUARDED_BY(pacer_checker_); TransportFeedbackObserver* const transport_feedback_observer_; SendSideDelayObserver* const send_side_delay_observer_; @@ -121,10 +150,10 @@ class RtpSenderEgress { StreamDataCountersCallback* const rtp_stats_callback_; BitrateStatisticsObserver* const bitrate_callback_; - rtc::CriticalSection lock_; - bool media_has_been_sent_ RTC_GUARDED_BY(lock_); + mutable Mutex lock_; + bool media_has_been_sent_ RTC_GUARDED_BY(pacer_checker_); bool force_part_of_allocation_ RTC_GUARDED_BY(lock_); - uint32_t timestamp_offset_ RTC_GUARDED_BY(lock_); + uint32_t timestamp_offset_ RTC_GUARDED_BY(worker_queue_); SendDelayMap send_delays_ RTC_GUARDED_BY(lock_); SendDelayMap::const_iterator max_delay_it_ RTC_GUARDED_BY(lock_); @@ -135,13 +164,17 @@ class RtpSenderEgress { StreamDataCounters rtx_rtp_stats_ RTC_GUARDED_BY(lock_); // One element per value in RtpPacketMediaType, with index matching value. std::vector send_rates_ RTC_GUARDED_BY(lock_); + absl::optional> + pending_fec_params_ RTC_GUARDED_BY(lock_); // Maps sent packets' sequence numbers to a tuple consisting of: // 1. The timestamp, without the randomizing offset mandated by the RFC. // 2. Whether the packet was the first in its frame. // 3. Whether the packet was the last in its frame. const std::unique_ptr rtp_sequence_number_map_ - RTC_GUARDED_BY(lock_); + RTC_GUARDED_BY(worker_queue_); + RepeatingTaskHandle update_task_ RTC_GUARDED_BY(worker_queue_); + ScopedTaskSafety task_safety_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc new file mode 100644 index 0000000000..4f3990cc3e --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_sender_egress_unittest.cc @@ -0,0 +1,982 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_sender_egress.h" + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/call/transport.h" +#include "api/units/data_size.h" +#include "api/units/timestamp.h" +#include "logging/rtc_event_log/mock/mock_rtc_event_log.h" +#include "modules/rtp_rtcp/include/flexfec_sender.h" +#include "modules/rtp_rtcp/include/rtp_rtcp.h" +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/rtp_rtcp/source/rtp_header_extensions.h" +#include "modules/rtp_rtcp/source/rtp_packet_history.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "test/gmock.h" +#include "test/gtest.h" +#include "test/time_controller/simulated_time_controller.h" + +namespace webrtc { +namespace { + +using ::testing::_; +using ::testing::Field; +using ::testing::NiceMock; +using ::testing::Optional; +using ::testing::StrictMock; + +constexpr Timestamp kStartTime = Timestamp::Millis(123456789); +constexpr int kDefaultPayloadType = 100; +constexpr int kFlexfectPayloadType = 110; +constexpr uint16_t kStartSequenceNumber = 33; +constexpr uint32_t kSsrc = 725242; +constexpr uint32_t kRtxSsrc = 12345; +constexpr uint32_t kFlexFecSsrc = 23456; +enum : int { + kTransportSequenceNumberExtensionId = 1, + kAbsoluteSendTimeExtensionId, + kTransmissionOffsetExtensionId, + kVideoTimingExtensionId, +}; + +struct TestConfig { + explicit TestConfig(bool with_overhead) : with_overhead(with_overhead) {} + bool with_overhead = false; +}; + +class MockSendPacketObserver : public SendPacketObserver { + public: + MOCK_METHOD(void, OnSendPacket, (uint16_t, int64_t, uint32_t), (override)); +}; + +class MockTransportFeedbackObserver : public TransportFeedbackObserver { + public: + MOCK_METHOD(void, OnAddPacket, (const RtpPacketSendInfo&), (override)); + MOCK_METHOD(void, + OnTransportFeedback, + (const rtcp::TransportFeedback&), + (override)); +}; + +class MockStreamDataCountersCallback : public StreamDataCountersCallback { + public: + MOCK_METHOD(void, + DataCountersUpdated, + (const StreamDataCounters& counters, uint32_t ssrc), + (override)); +}; + +class MockSendSideDelayObserver : public SendSideDelayObserver { + public: + MOCK_METHOD(void, + SendSideDelayUpdated, + (int, int, uint64_t, uint32_t), + (override)); +}; + +class FieldTrialConfig : public WebRtcKeyValueConfig { + public: + FieldTrialConfig() : overhead_enabled_(false) {} + ~FieldTrialConfig() override {} + + void SetOverHeadEnabled(bool enabled) { overhead_enabled_ = enabled; } + + std::string Lookup(absl::string_view key) const override { + if (key == "WebRTC-SendSideBwe-WithOverhead") { + return overhead_enabled_ ? "Enabled" : "Disabled"; + } + return ""; + } + + private: + bool overhead_enabled_; +}; + +struct TransmittedPacket { + TransmittedPacket(rtc::ArrayView data, + const PacketOptions& packet_options, + RtpHeaderExtensionMap* extensions) + : packet(extensions), options(packet_options) { + EXPECT_TRUE(packet.Parse(data)); + } + RtpPacketReceived packet; + PacketOptions options; +}; + +class TestTransport : public Transport { + public: + explicit TestTransport(RtpHeaderExtensionMap* extensions) + : total_data_sent_(DataSize::Zero()), extensions_(extensions) {} + bool SendRtp(const uint8_t* packet, + size_t length, + const PacketOptions& options) override { + total_data_sent_ += DataSize::Bytes(length); + last_packet_.emplace(rtc::MakeArrayView(packet, length), options, + extensions_); + return true; + } + + bool SendRtcp(const uint8_t*, size_t) override { RTC_CHECK_NOTREACHED(); } + + absl::optional last_packet() { return last_packet_; } + + private: + DataSize total_data_sent_; + absl::optional last_packet_; + RtpHeaderExtensionMap* const extensions_; +}; + +} // namespace + +class RtpSenderEgressTest : public ::testing::TestWithParam { + protected: + RtpSenderEgressTest() + : time_controller_(kStartTime), + clock_(time_controller_.GetClock()), + transport_(&header_extensions_), + packet_history_(clock_, /*enable_rtx_padding_prioritization=*/true), + sequence_number_(kStartSequenceNumber) { + trials_.SetOverHeadEnabled(GetParam().with_overhead); + } + + std::unique_ptr CreateRtpSenderEgress() { + return std::make_unique(DefaultConfig(), &packet_history_); + } + + RtpRtcp::Configuration DefaultConfig() { + RtpRtcp::Configuration config; + config.clock = clock_; + config.outgoing_transport = &transport_; + config.local_media_ssrc = kSsrc; + config.rtx_send_ssrc = kRtxSsrc; + config.fec_generator = nullptr; + config.event_log = &mock_rtc_event_log_; + config.send_packet_observer = &send_packet_observer_; + config.rtp_stats_callback = &mock_rtp_stats_callback_; + config.transport_feedback_callback = &feedback_observer_; + config.populate_network2_timestamp = false; + config.field_trials = &trials_; + return config; + } + + std::unique_ptr BuildRtpPacket(bool marker_bit, + int64_t capture_time_ms) { + auto packet = std::make_unique(&header_extensions_); + packet->SetSsrc(kSsrc); + packet->ReserveExtension(); + packet->ReserveExtension(); + packet->ReserveExtension(); + + packet->SetPayloadType(kDefaultPayloadType); + packet->set_packet_type(RtpPacketMediaType::kVideo); + packet->SetMarker(marker_bit); + packet->SetTimestamp(capture_time_ms * 90); + packet->set_capture_time_ms(capture_time_ms); + packet->SetSequenceNumber(sequence_number_++); + return packet; + } + + std::unique_ptr BuildRtpPacket() { + return BuildRtpPacket(/*marker_bit=*/true, clock_->CurrentTime().ms()); + } + + GlobalSimulatedTimeController time_controller_; + Clock* const clock_; + NiceMock mock_rtc_event_log_; + NiceMock mock_rtp_stats_callback_; + NiceMock send_packet_observer_; + NiceMock feedback_observer_; + RtpHeaderExtensionMap header_extensions_; + TestTransport transport_; + RtpPacketHistory packet_history_; + FieldTrialConfig trials_; + uint16_t sequence_number_; +}; + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverGetsCorrectByteCount) { + constexpr size_t kRtpOverheadBytesPerPacket = 12 + 8; + constexpr size_t kPayloadSize = 1400; + const uint16_t kTransportSequenceNumber = 17; + + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + const size_t expected_bytes = GetParam().with_overhead + ? kPayloadSize + kRtpOverheadBytesPerPacket + : kPayloadSize; + + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf( + Field(&RtpPacketSendInfo::media_ssrc, kSsrc), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber), + Field(&RtpPacketSendInfo::rtp_sequence_number, kStartSequenceNumber), + Field(&RtpPacketSendInfo::length, expected_bytes), + Field(&RtpPacketSendInfo::pacing_info, PacedPacketInfo())))); + + std::unique_ptr packet = BuildRtpPacket(); + packet->SetExtension(kTransportSequenceNumber); + packet->AllocatePayload(kPayloadSize); + + std::unique_ptr sender = CreateRtpSenderEgress(); + sender->SendPacket(packet.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, PacketOptionsIsRetransmitSetByPacketType) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->set_packet_type(RtpPacketMediaType::kVideo); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + EXPECT_FALSE(transport_.last_packet()->options.is_retransmit); + + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->set_retransmitted_sequence_number( + media_packet->SequenceNumber()); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_TRUE(transport_.last_packet()->options.is_retransmit); +} + +TEST_P(RtpSenderEgressTest, DoesnSetIncludedInAllocationByDefault) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + std::unique_ptr packet = BuildRtpPacket(); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_FALSE(transport_.last_packet()->options.included_in_feedback); + EXPECT_FALSE(transport_.last_packet()->options.included_in_allocation); +} + +TEST_P(RtpSenderEgressTest, + SetsIncludedInFeedbackWhenTransportSequenceNumberExtensionIsRegistered) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + std::unique_ptr packet = BuildRtpPacket(); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_TRUE(transport_.last_packet()->options.included_in_feedback); +} + +TEST_P( + RtpSenderEgressTest, + SetsIncludedInAllocationWhenTransportSequenceNumberExtensionIsRegistered) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + std::unique_ptr packet = BuildRtpPacket(); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_TRUE(transport_.last_packet()->options.included_in_allocation); +} + +TEST_P(RtpSenderEgressTest, + SetsIncludedInAllocationWhenForcedAsPartOfAllocation) { + std::unique_ptr sender = CreateRtpSenderEgress(); + sender->ForceIncludeSendPacketsInAllocation(true); + + std::unique_ptr packet = BuildRtpPacket(); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_FALSE(transport_.last_packet()->options.included_in_feedback); + EXPECT_TRUE(transport_.last_packet()->options.included_in_allocation); +} + +TEST_P(RtpSenderEgressTest, OnSendSideDelayUpdated) { + StrictMock send_side_delay_observer; + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.send_side_delay_observer = &send_side_delay_observer; + auto sender = std::make_unique(config, &packet_history_); + + // Send packet with 10 ms send-side delay. The average, max and total should + // be 10 ms. + EXPECT_CALL(send_side_delay_observer, + SendSideDelayUpdated(10, 10, 10, kSsrc)); + int64_t capture_time_ms = clock_->TimeInMilliseconds(); + time_controller_.AdvanceTime(TimeDelta::Millis(10)); + sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(), + PacedPacketInfo()); + + // Send another packet with 20 ms delay. The average, max and total should be + // 15, 20 and 30 ms respectively. + EXPECT_CALL(send_side_delay_observer, + SendSideDelayUpdated(15, 20, 30, kSsrc)); + capture_time_ms = clock_->TimeInMilliseconds(); + time_controller_.AdvanceTime(TimeDelta::Millis(20)); + sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(), + PacedPacketInfo()); + + // Send another packet at the same time, which replaces the last packet. + // Since this packet has 0 ms delay, the average is now 5 ms and max is 10 ms. + // The total counter stays the same though. + // TODO(terelius): Is is not clear that this is the right behavior. + EXPECT_CALL(send_side_delay_observer, SendSideDelayUpdated(5, 10, 30, kSsrc)); + capture_time_ms = clock_->TimeInMilliseconds(); + sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(), + PacedPacketInfo()); + + // Send a packet 1 second later. The earlier packets should have timed + // out, so both max and average should be the delay of this packet. The total + // keeps increasing. + time_controller_.AdvanceTime(TimeDelta::Seconds(1)); + EXPECT_CALL(send_side_delay_observer, SendSideDelayUpdated(1, 1, 31, kSsrc)); + capture_time_ms = clock_->TimeInMilliseconds(); + time_controller_.AdvanceTime(TimeDelta::Millis(1)); + sender->SendPacket(BuildRtpPacket(/*marker=*/true, capture_time_ms).get(), + PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, WritesPacerExitToTimingExtension) { + std::unique_ptr sender = CreateRtpSenderEgress(); + header_extensions_.RegisterByUri(kVideoTimingExtensionId, + VideoTimingExtension::kUri); + + std::unique_ptr packet = BuildRtpPacket(); + packet->SetExtension(VideoSendTiming{}); + + const int kStoredTimeInMs = 100; + time_controller_.AdvanceTime(TimeDelta::Millis(kStoredTimeInMs)); + sender->SendPacket(packet.get(), PacedPacketInfo()); + ASSERT_TRUE(transport_.last_packet().has_value()); + + VideoSendTiming video_timing; + EXPECT_TRUE( + transport_.last_packet()->packet.GetExtension( + &video_timing)); + EXPECT_EQ(video_timing.pacer_exit_delta_ms, kStoredTimeInMs); +} + +TEST_P(RtpSenderEgressTest, WritesNetwork2ToTimingExtension) { + RtpRtcpInterface::Configuration rtp_config = DefaultConfig(); + rtp_config.populate_network2_timestamp = true; + auto sender = std::make_unique(rtp_config, &packet_history_); + header_extensions_.RegisterByUri(kVideoTimingExtensionId, + VideoTimingExtension::kUri); + + const uint16_t kPacerExitMs = 1234u; + std::unique_ptr packet = BuildRtpPacket(); + VideoSendTiming send_timing = {}; + send_timing.pacer_exit_delta_ms = kPacerExitMs; + packet->SetExtension(send_timing); + + const int kStoredTimeInMs = 100; + time_controller_.AdvanceTime(TimeDelta::Millis(kStoredTimeInMs)); + sender->SendPacket(packet.get(), PacedPacketInfo()); + ASSERT_TRUE(transport_.last_packet().has_value()); + + VideoSendTiming video_timing; + EXPECT_TRUE( + transport_.last_packet()->packet.GetExtension( + &video_timing)); + EXPECT_EQ(video_timing.network2_timestamp_delta_ms, kStoredTimeInMs); + EXPECT_EQ(video_timing.pacer_exit_delta_ms, kPacerExitMs); +} + +TEST_P(RtpSenderEgressTest, OnSendPacketUpdated) { + std::unique_ptr sender = CreateRtpSenderEgress(); + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + const uint16_t kTransportSequenceNumber = 1; + EXPECT_CALL(send_packet_observer_, + OnSendPacket(kTransportSequenceNumber, + clock_->TimeInMilliseconds(), kSsrc)); + std::unique_ptr packet = BuildRtpPacket(); + packet->SetExtension(kTransportSequenceNumber); + sender->SendPacket(packet.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, OnSendPacketNotUpdatedForRetransmits) { + std::unique_ptr sender = CreateRtpSenderEgress(); + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + const uint16_t kTransportSequenceNumber = 1; + EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(0); + std::unique_ptr packet = BuildRtpPacket(); + packet->SetExtension(kTransportSequenceNumber); + packet->set_packet_type(RtpPacketMediaType::kRetransmission); + packet->set_retransmitted_sequence_number(packet->SequenceNumber()); + sender->SendPacket(packet.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, ReportsFecRate) { + constexpr int kNumPackets = 10; + constexpr TimeDelta kTimeBetweenPackets = TimeDelta::Millis(33); + + std::unique_ptr sender = CreateRtpSenderEgress(); + DataSize total_fec_data_sent = DataSize::Zero(); + // Send some packets, alternating between media and FEC. + for (size_t i = 0; i < kNumPackets; ++i) { + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->set_packet_type(RtpPacketMediaType::kVideo); + media_packet->SetPayloadSize(500); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetPayloadSize(123); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); + total_fec_data_sent += DataSize::Bytes(fec_packet->size()); + + time_controller_.AdvanceTime(kTimeBetweenPackets); + } + + EXPECT_NEAR( + (sender->GetSendRates()[RtpPacketMediaType::kForwardErrorCorrection]) + .bps(), + (total_fec_data_sent / (kTimeBetweenPackets * kNumPackets)).bps(), 500); +} + +TEST_P(RtpSenderEgressTest, BitrateCallbacks) { + class MockBitrateStaticsObserver : public BitrateStatisticsObserver { + public: + MOCK_METHOD(void, Notify, (uint32_t, uint32_t, uint32_t), (override)); + } observer; + + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.send_bitrate_observer = &observer; + auto sender = std::make_unique(config, &packet_history_); + + // Simulate kNumPackets sent with kPacketInterval intervals, with the + // number of packets selected so that we fill (but don't overflow) the one + // second averaging window. + const TimeDelta kWindowSize = TimeDelta::Seconds(1); + const TimeDelta kPacketInterval = TimeDelta::Millis(20); + const int kNumPackets = (kWindowSize - kPacketInterval) / kPacketInterval; + + DataSize total_data_sent = DataSize::Zero(); + + // Send all but on of the packets, expect a call for each packet but don't + // verify bitrate yet (noisy measurements in the beginning). + for (int i = 0; i < kNumPackets; ++i) { + std::unique_ptr packet = BuildRtpPacket(); + packet->SetPayloadSize(500); + // Mark all packets as retransmissions - will cause total and retransmission + // rates to be equal. + packet->set_packet_type(RtpPacketMediaType::kRetransmission); + packet->set_retransmitted_sequence_number(packet->SequenceNumber()); + total_data_sent += DataSize::Bytes(packet->size()); + + EXPECT_CALL(observer, Notify(_, _, kSsrc)) + .WillOnce([&](uint32_t total_bitrate_bps, + uint32_t retransmission_bitrate_bps, uint32_t /*ssrc*/) { + TimeDelta window_size = i * kPacketInterval + TimeDelta::Millis(1); + // If there is just a single data point, there is no well defined + // averaging window so a bitrate of zero will be reported. + const double expected_bitrate_bps = + i == 0 ? 0.0 : (total_data_sent / window_size).bps(); + EXPECT_NEAR(total_bitrate_bps, expected_bitrate_bps, 500); + EXPECT_NEAR(retransmission_bitrate_bps, expected_bitrate_bps, 500); + }); + + sender->SendPacket(packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(kPacketInterval); + } +} + +TEST_P(RtpSenderEgressTest, DoesNotPutNotRetransmittablePacketsInHistory) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + std::unique_ptr packet = BuildRtpPacket(); + packet->set_allow_retransmission(false); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_FALSE( + packet_history_.GetPacketState(packet->SequenceNumber()).has_value()); +} + +TEST_P(RtpSenderEgressTest, PutsRetransmittablePacketsInHistory) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + std::unique_ptr packet = BuildRtpPacket(); + packet->set_allow_retransmission(true); + sender->SendPacket(packet.get(), PacedPacketInfo()); + EXPECT_THAT( + packet_history_.GetPacketState(packet->SequenceNumber()), + Optional( + Field(&RtpPacketHistory::PacketState::pending_transmission, false))); +} + +TEST_P(RtpSenderEgressTest, DoesNotPutNonMediaInHistory) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + // Non-media packets, even when marked as retransmittable, are not put into + // the packet history. + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->set_allow_retransmission(true); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->set_retransmitted_sequence_number( + retransmission->SequenceNumber()); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_FALSE(packet_history_.GetPacketState(retransmission->SequenceNumber()) + .has_value()); + + std::unique_ptr fec = BuildRtpPacket(); + fec->set_allow_retransmission(true); + fec->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + sender->SendPacket(fec.get(), PacedPacketInfo()); + EXPECT_FALSE( + packet_history_.GetPacketState(fec->SequenceNumber()).has_value()); + + std::unique_ptr padding = BuildRtpPacket(); + padding->set_allow_retransmission(true); + padding->set_packet_type(RtpPacketMediaType::kPadding); + sender->SendPacket(padding.get(), PacedPacketInfo()); + EXPECT_FALSE( + packet_history_.GetPacketState(padding->SequenceNumber()).has_value()); +} + +TEST_P(RtpSenderEgressTest, UpdatesSendStatusOfRetransmittedPackets) { + std::unique_ptr sender = CreateRtpSenderEgress(); + packet_history_.SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); + + // Send a packet, putting it in the history. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->set_allow_retransmission(true); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + EXPECT_THAT( + packet_history_.GetPacketState(media_packet->SequenceNumber()), + Optional( + Field(&RtpPacketHistory::PacketState::pending_transmission, false))); + + // Simulate a retransmission, marking the packet as pending. + std::unique_ptr retransmission = + packet_history_.GetPacketAndMarkAsPending(media_packet->SequenceNumber()); + retransmission->set_retransmitted_sequence_number( + media_packet->SequenceNumber()); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + EXPECT_THAT(packet_history_.GetPacketState(media_packet->SequenceNumber()), + Optional(Field( + &RtpPacketHistory::PacketState::pending_transmission, true))); + + // Simulate packet leaving pacer, the packet should be marked as non-pending. + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_THAT( + packet_history_.GetPacketState(media_packet->SequenceNumber()), + Optional( + Field(&RtpPacketHistory::PacketState::pending_transmission, false))); +} + +TEST_P(RtpSenderEgressTest, StreamDataCountersCallbacks) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + const RtpPacketCounter kEmptyCounter; + RtpPacketCounter expected_transmitted_counter; + RtpPacketCounter expected_retransmission_counter; + + // Send a media packet. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->SetPayloadSize(6); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += media_packet->payload_size(); + expected_transmitted_counter.header_bytes += media_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, + expected_retransmission_counter), + Field(&StreamDataCounters::fec, kEmptyCounter)), + kSsrc)); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send a retransmission. Retransmissions are counted into both transmitted + // and retransmitted packet statistics. + std::unique_ptr retransmission_packet = BuildRtpPacket(); + retransmission_packet->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission_packet->set_retransmitted_sequence_number( + retransmission_packet->SequenceNumber()); + media_packet->SetPayloadSize(7); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += + retransmission_packet->payload_size(); + expected_transmitted_counter.header_bytes += + retransmission_packet->headers_size(); + + expected_retransmission_counter.packets += 1; + expected_retransmission_counter.payload_bytes += + retransmission_packet->payload_size(); + expected_retransmission_counter.header_bytes += + retransmission_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, + expected_retransmission_counter), + Field(&StreamDataCounters::fec, kEmptyCounter)), + kSsrc)); + sender->SendPacket(retransmission_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send a padding packet. + std::unique_ptr padding_packet = BuildRtpPacket(); + padding_packet->set_packet_type(RtpPacketMediaType::kPadding); + padding_packet->SetPadding(224); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.padding_bytes += padding_packet->padding_size(); + expected_transmitted_counter.header_bytes += padding_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated(AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, + expected_retransmission_counter), + Field(&StreamDataCounters::fec, kEmptyCounter)), + kSsrc)); + sender->SendPacket(padding_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); +} + +TEST_P(RtpSenderEgressTest, StreamDataCountersCallbacksFec) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + const RtpPacketCounter kEmptyCounter; + RtpPacketCounter expected_transmitted_counter; + RtpPacketCounter expected_fec_counter; + + // Send a media packet. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->SetPayloadSize(6); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += media_packet->payload_size(); + expected_transmitted_counter.header_bytes += media_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated( + AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, kEmptyCounter), + Field(&StreamDataCounters::fec, expected_fec_counter)), + kSsrc)); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send and FEC packet. FEC is counted into both transmitted and FEC packet + // statistics. + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetPayloadSize(6); + expected_transmitted_counter.packets += 1; + expected_transmitted_counter.payload_bytes += fec_packet->payload_size(); + expected_transmitted_counter.header_bytes += fec_packet->headers_size(); + + expected_fec_counter.packets += 1; + expected_fec_counter.payload_bytes += fec_packet->payload_size(); + expected_fec_counter.header_bytes += fec_packet->headers_size(); + + EXPECT_CALL( + mock_rtp_stats_callback_, + DataCountersUpdated( + AllOf(Field(&StreamDataCounters::transmitted, + expected_transmitted_counter), + Field(&StreamDataCounters::retransmitted, kEmptyCounter), + Field(&StreamDataCounters::fec, expected_fec_counter)), + kSsrc)); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); +} + +TEST_P(RtpSenderEgressTest, UpdatesDataCounters) { + std::unique_ptr sender = CreateRtpSenderEgress(); + + const RtpPacketCounter kEmptyCounter; + + // Send a media packet. + std::unique_ptr media_packet = BuildRtpPacket(); + media_packet->SetPayloadSize(6); + sender->SendPacket(media_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + // Send an RTX retransmission packet. + std::unique_ptr rtx_packet = BuildRtpPacket(); + rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission); + rtx_packet->SetSsrc(kRtxSsrc); + rtx_packet->SetPayloadSize(7); + rtx_packet->set_retransmitted_sequence_number(media_packet->SequenceNumber()); + sender->SendPacket(rtx_packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Zero()); + + StreamDataCounters rtp_stats; + StreamDataCounters rtx_stats; + sender->GetDataCounters(&rtp_stats, &rtx_stats); + + EXPECT_EQ(rtp_stats.transmitted.packets, 1u); + EXPECT_EQ(rtp_stats.transmitted.payload_bytes, media_packet->payload_size()); + EXPECT_EQ(rtp_stats.transmitted.padding_bytes, media_packet->padding_size()); + EXPECT_EQ(rtp_stats.transmitted.header_bytes, media_packet->headers_size()); + EXPECT_EQ(rtp_stats.retransmitted, kEmptyCounter); + EXPECT_EQ(rtp_stats.fec, kEmptyCounter); + + // Retransmissions are counted both into transmitted and retransmitted + // packet counts. + EXPECT_EQ(rtx_stats.transmitted.packets, 1u); + EXPECT_EQ(rtx_stats.transmitted.payload_bytes, rtx_packet->payload_size()); + EXPECT_EQ(rtx_stats.transmitted.padding_bytes, rtx_packet->padding_size()); + EXPECT_EQ(rtx_stats.transmitted.header_bytes, rtx_packet->headers_size()); + EXPECT_EQ(rtx_stats.retransmitted, rtx_stats.transmitted); + EXPECT_EQ(rtx_stats.fec, kEmptyCounter); +} + +TEST_P(RtpSenderEgressTest, SendPacketUpdatesExtensions) { + header_extensions_.RegisterByUri(kVideoTimingExtensionId, + VideoTimingExtension::kUri); + header_extensions_.RegisterByUri(kAbsoluteSendTimeExtensionId, + AbsoluteSendTime::kUri); + header_extensions_.RegisterByUri(kTransmissionOffsetExtensionId, + TransmissionOffset::kUri); + std::unique_ptr sender = CreateRtpSenderEgress(); + + std::unique_ptr packet = BuildRtpPacket(); + packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds()); + + const int32_t kDiffMs = 10; + time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs)); + + sender->SendPacket(packet.get(), PacedPacketInfo()); + + RtpPacketReceived received_packet = transport_.last_packet()->packet; + + EXPECT_EQ(received_packet.GetExtension(), kDiffMs * 90); + + EXPECT_EQ(received_packet.GetExtension(), + AbsoluteSendTime::MsTo24Bits(clock_->TimeInMilliseconds())); + + VideoSendTiming timing; + EXPECT_TRUE(received_packet.GetExtension(&timing)); + EXPECT_EQ(timing.pacer_exit_delta_ms, kDiffMs); +} + +TEST_P(RtpSenderEgressTest, SendPacketSetsPacketOptions) { + const uint16_t kPacketId = 42; + std::unique_ptr sender = CreateRtpSenderEgress(); + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr packet = BuildRtpPacket(); + packet->SetExtension(kPacketId); + EXPECT_CALL(send_packet_observer_, OnSendPacket); + sender->SendPacket(packet.get(), PacedPacketInfo()); + + PacketOptions packet_options = transport_.last_packet()->options; + + EXPECT_EQ(packet_options.packet_id, kPacketId); + EXPECT_TRUE(packet_options.included_in_allocation); + EXPECT_TRUE(packet_options.included_in_feedback); + EXPECT_FALSE(packet_options.is_retransmit); + + // Send another packet as retransmission, verify options are populated. + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->SetExtension(kPacketId + 1); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->set_retransmitted_sequence_number(packet->SequenceNumber()); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); + EXPECT_TRUE(transport_.last_packet()->options.is_retransmit); +} + +TEST_P(RtpSenderEgressTest, SendPacketUpdatesStats) { + const size_t kPayloadSize = 1000; + StrictMock send_side_delay_observer; + + const rtc::ArrayView kNoRtpHeaderExtensionSizes; + FlexfecSender flexfec(kFlexfectPayloadType, kFlexFecSsrc, kSsrc, /*mid=*/"", + /*header_extensions=*/{}, kNoRtpHeaderExtensionSizes, + /*rtp_state=*/nullptr, time_controller_.GetClock()); + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.fec_generator = &flexfec; + config.send_side_delay_observer = &send_side_delay_observer; + auto sender = std::make_unique(config, &packet_history_); + + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + const int64_t capture_time_ms = clock_->TimeInMilliseconds(); + + std::unique_ptr video_packet = BuildRtpPacket(); + video_packet->set_packet_type(RtpPacketMediaType::kVideo); + video_packet->SetPayloadSize(kPayloadSize); + video_packet->SetExtension(1); + + std::unique_ptr rtx_packet = BuildRtpPacket(); + rtx_packet->SetSsrc(kRtxSsrc); + rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission); + rtx_packet->set_retransmitted_sequence_number(video_packet->SequenceNumber()); + rtx_packet->SetPayloadSize(kPayloadSize); + rtx_packet->SetExtension(2); + + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->SetSsrc(kFlexFecSsrc); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetPayloadSize(kPayloadSize); + fec_packet->SetExtension(3); + + const int64_t kDiffMs = 25; + time_controller_.AdvanceTime(TimeDelta::Millis(kDiffMs)); + + EXPECT_CALL(send_side_delay_observer, + SendSideDelayUpdated(kDiffMs, kDiffMs, kDiffMs, kSsrc)); + EXPECT_CALL( + send_side_delay_observer, + SendSideDelayUpdated(kDiffMs, kDiffMs, 2 * kDiffMs, kFlexFecSsrc)); + + EXPECT_CALL(send_packet_observer_, OnSendPacket(1, capture_time_ms, kSsrc)); + + sender->SendPacket(video_packet.get(), PacedPacketInfo()); + + // Send packet observer not called for padding/retransmissions. + EXPECT_CALL(send_packet_observer_, OnSendPacket(2, _, _)).Times(0); + sender->SendPacket(rtx_packet.get(), PacedPacketInfo()); + + EXPECT_CALL(send_packet_observer_, + OnSendPacket(3, capture_time_ms, kFlexFecSsrc)); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); + + time_controller_.AdvanceTime(TimeDelta::Zero()); + StreamDataCounters rtp_stats; + StreamDataCounters rtx_stats; + sender->GetDataCounters(&rtp_stats, &rtx_stats); + EXPECT_EQ(rtp_stats.transmitted.packets, 2u); + EXPECT_EQ(rtp_stats.fec.packets, 1u); + EXPECT_EQ(rtx_stats.retransmitted.packets, 1u); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverWithRetransmission) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + std::unique_ptr retransmission = BuildRtpPacket(); + retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + retransmission->SetExtension( + kTransportSequenceNumber); + uint16_t retransmitted_seq = retransmission->SequenceNumber() - 2; + retransmission->set_retransmitted_sequence_number(retransmitted_seq); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf( + Field(&RtpPacketSendInfo::media_ssrc, kSsrc), + Field(&RtpPacketSendInfo::rtp_sequence_number, retransmitted_seq), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(retransmission.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverWithRtxRetransmission) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr rtx_retransmission = BuildRtpPacket(); + rtx_retransmission->SetSsrc(kRtxSsrc); + rtx_retransmission->SetExtension( + kTransportSequenceNumber); + rtx_retransmission->set_packet_type(RtpPacketMediaType::kRetransmission); + uint16_t rtx_retransmitted_seq = rtx_retransmission->SequenceNumber() - 2; + rtx_retransmission->set_retransmitted_sequence_number(rtx_retransmitted_seq); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf( + Field(&RtpPacketSendInfo::media_ssrc, kSsrc), + Field(&RtpPacketSendInfo::rtp_sequence_number, rtx_retransmitted_seq), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(rtx_retransmission.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverPadding) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + std::unique_ptr padding = BuildRtpPacket(); + padding->SetPadding(224); + padding->set_packet_type(RtpPacketMediaType::kPadding); + padding->SetExtension(kTransportSequenceNumber); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(padding.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverRtxPadding) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr rtx_padding = BuildRtpPacket(); + rtx_padding->SetPadding(224); + rtx_padding->SetSsrc(kRtxSsrc); + rtx_padding->set_packet_type(RtpPacketMediaType::kPadding); + rtx_padding->SetExtension(kTransportSequenceNumber); + + std::unique_ptr sender = CreateRtpSenderEgress(); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(rtx_padding.get(), PacedPacketInfo()); +} + +TEST_P(RtpSenderEgressTest, TransportFeedbackObserverFec) { + const uint16_t kTransportSequenceNumber = 17; + header_extensions_.RegisterByUri(kTransportSequenceNumberExtensionId, + TransportSequenceNumber::kUri); + + std::unique_ptr fec_packet = BuildRtpPacket(); + fec_packet->SetSsrc(kFlexFecSsrc); + fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); + fec_packet->SetExtension(kTransportSequenceNumber); + + const rtc::ArrayView kNoRtpHeaderExtensionSizes; + FlexfecSender flexfec(kFlexfectPayloadType, kFlexFecSsrc, kSsrc, /*mid=*/"", + /*header_extensions=*/{}, kNoRtpHeaderExtensionSizes, + /*rtp_state=*/nullptr, time_controller_.GetClock()); + RtpRtcpInterface::Configuration config = DefaultConfig(); + config.fec_generator = &flexfec; + auto sender = std::make_unique(config, &packet_history_); + EXPECT_CALL( + feedback_observer_, + OnAddPacket(AllOf(Field(&RtpPacketSendInfo::media_ssrc, absl::nullopt), + Field(&RtpPacketSendInfo::transport_sequence_number, + kTransportSequenceNumber)))); + sender->SendPacket(fec_packet.get(), PacedPacketInfo()); +} + +INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, + RtpSenderEgressTest, + ::testing::Values(TestConfig(false), + TestConfig(true))); + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc index 65e2e04ef4..e9be016143 100644 --- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc @@ -22,24 +22,26 @@ #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/include/rtp_packet_sender.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_format_video_generic.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" -#include "modules/rtp_rtcp/source/rtp_sender_egress.h" #include "modules/rtp_rtcp/source/rtp_sender_video.h" #include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/video_fec_generator.h" #include "rtc_base/arraysize.h" +#include "rtc_base/logging.h" #include "rtc_base/rate_limiter.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_transport.h" #include "test/rtp_header_parser.h" +#include "test/time_controller/simulated_time_controller.h" namespace webrtc { @@ -64,19 +66,19 @@ const uint16_t kSeqNum = 33; const uint32_t kSsrc = 725242; const uint32_t kRtxSsrc = 12345; const uint32_t kFlexFecSsrc = 45678; -const uint16_t kTransportSequenceNumber = 1; const uint64_t kStartTime = 123456789; const size_t kMaxPaddingSize = 224u; const uint8_t kPayloadData[] = {47, 11, 32, 93, 89}; const int64_t kDefaultExpectedRetransmissionTimeMs = 125; -const char kNoRid[] = ""; -const char kNoMid[] = ""; +const size_t kMaxPaddingLength = 224; // Value taken from rtp_sender.cc. +const uint32_t kTimestampTicksPerMs = 90; // 90kHz clock. using ::testing::_; using ::testing::AllOf; +using ::testing::AtLeast; using ::testing::Contains; using ::testing::Each; -using ::testing::ElementsAreArray; +using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Field; using ::testing::Gt; @@ -87,152 +89,23 @@ using ::testing::Pointee; using ::testing::Property; using ::testing::Return; using ::testing::SizeIs; -using ::testing::StrictMock; - -uint64_t ConvertMsToAbsSendTime(int64_t time_ms) { - return (((time_ms << 18) + 500) / 1000) & 0x00ffffff; -} - -class LoopbackTransportTest : public webrtc::Transport { - public: - LoopbackTransportTest() : total_bytes_sent_(0) { - receivers_extensions_.Register( - kTransmissionTimeOffsetExtensionId); - receivers_extensions_.Register( - kAbsoluteSendTimeExtensionId); - receivers_extensions_.Register( - kTransportSequenceNumberExtensionId); - receivers_extensions_.Register(kVideoRotationExtensionId); - receivers_extensions_.Register(kAudioLevelExtensionId); - receivers_extensions_.Register( - kVideoTimingExtensionId); - receivers_extensions_.Register(kMidExtensionId); - receivers_extensions_.Register( - kGenericDescriptorId); - receivers_extensions_.Register(kRidExtensionId); - receivers_extensions_.Register( - kRepairedRidExtensionId); - } - - bool SendRtp(const uint8_t* data, - size_t len, - const PacketOptions& options) override { - last_options_ = options; - total_bytes_sent_ += len; - sent_packets_.push_back(RtpPacketReceived(&receivers_extensions_)); - EXPECT_TRUE(sent_packets_.back().Parse(data, len)); - return true; - } - bool SendRtcp(const uint8_t* data, size_t len) override { return false; } - const RtpPacketReceived& last_sent_packet() { return sent_packets_.back(); } - int packets_sent() { return sent_packets_.size(); } - - size_t total_bytes_sent_; - PacketOptions last_options_; - std::vector sent_packets_; - - private: - RtpHeaderExtensionMap receivers_extensions_; -}; - -MATCHER_P(SameRtcEventTypeAs, value, "") { - return value == arg->GetType(); -} - -struct TestConfig { - explicit TestConfig(bool with_overhead) : with_overhead(with_overhead) {} - bool with_overhead = false; -}; class MockRtpPacketPacer : public RtpPacketSender { public: MockRtpPacketPacer() {} virtual ~MockRtpPacketPacer() {} - MOCK_METHOD1(EnqueuePackets, - void(std::vector>)); - - MOCK_METHOD2(CreateProbeCluster, void(int bitrate_bps, int cluster_id)); - - MOCK_METHOD0(Pause, void()); - MOCK_METHOD0(Resume, void()); - MOCK_METHOD1(SetCongestionWindow, - void(absl::optional congestion_window_bytes)); - MOCK_METHOD1(UpdateOutstandingData, void(int64_t outstanding_bytes)); - MOCK_METHOD1(SetAccountForAudioPackets, void(bool account_for_audio)); -}; - -class MockSendSideDelayObserver : public SendSideDelayObserver { - public: - MOCK_METHOD4(SendSideDelayUpdated, void(int, int, uint64_t, uint32_t)); -}; - -class MockSendPacketObserver : public SendPacketObserver { - public: - MOCK_METHOD3(OnSendPacket, void(uint16_t, int64_t, uint32_t)); -}; - -class MockTransportFeedbackObserver : public TransportFeedbackObserver { - public: - MOCK_METHOD1(OnAddPacket, void(const RtpPacketSendInfo&)); - MOCK_METHOD1(OnTransportFeedback, void(const rtcp::TransportFeedback&)); -}; - -class StreamDataTestCallback : public StreamDataCountersCallback { - public: - StreamDataTestCallback() - : StreamDataCountersCallback(), ssrc_(0), counters_() {} - ~StreamDataTestCallback() override = default; - - void DataCountersUpdated(const StreamDataCounters& counters, - uint32_t ssrc) override { - ssrc_ = ssrc; - counters_ = counters; - } - - uint32_t ssrc_; - StreamDataCounters counters_; - - void MatchPacketCounter(const RtpPacketCounter& expected, - const RtpPacketCounter& actual) { - EXPECT_EQ(expected.payload_bytes, actual.payload_bytes); - EXPECT_EQ(expected.header_bytes, actual.header_bytes); - EXPECT_EQ(expected.padding_bytes, actual.padding_bytes); - EXPECT_EQ(expected.packets, actual.packets); - } - - void Matches(uint32_t ssrc, const StreamDataCounters& counters) { - EXPECT_EQ(ssrc, ssrc_); - MatchPacketCounter(counters.transmitted, counters_.transmitted); - MatchPacketCounter(counters.retransmitted, counters_.retransmitted); - EXPECT_EQ(counters.fec.packets, counters_.fec.packets); - } -}; - -// Mimics ModuleRtpRtcp::RtpSenderContext. -// TODO(sprang): Split up unit tests and test these components individually -// wherever possible. -struct RtpSenderContext { - explicit RtpSenderContext(const RtpRtcp::Configuration& config) - : packet_history_(config.clock, config.enable_rtx_padding_prioritization), - packet_sender_(config, &packet_history_), - non_paced_sender_(&packet_sender_), - packet_generator_( - config, - &packet_history_, - config.paced_sender ? config.paced_sender : &non_paced_sender_) {} - RtpPacketHistory packet_history_; - RtpSenderEgress packet_sender_; - RtpSenderEgress::NonPacedPacketSender non_paced_sender_; - RTPSender packet_generator_; + MOCK_METHOD(void, + EnqueuePackets, + (std::vector>), + (override)); }; class FieldTrialConfig : public WebRtcKeyValueConfig { public: - FieldTrialConfig() : overhead_enabled_(false), max_padding_factor_(1200) {} + FieldTrialConfig() : max_padding_factor_(1200) {} ~FieldTrialConfig() override {} - void SetOverHeadEnabled(bool enabled) { overhead_enabled_ = enabled; } void SetMaxPaddingFactor(double factor) { max_padding_factor_ = factor; } std::string Lookup(absl::string_view key) const override { @@ -241,24 +114,22 @@ class FieldTrialConfig : public WebRtcKeyValueConfig { rtc::SimpleStringBuilder ssb(string_buf); ssb << "factor:" << max_padding_factor_; return ssb.str(); - } else if (key == "WebRTC-SendSideBwe-WithOverhead") { - return overhead_enabled_ ? "Enabled" : "Disabled"; } return ""; } private: - bool overhead_enabled_; double max_padding_factor_; }; } // namespace -class RtpSenderTest : public ::testing::TestWithParam { +class RtpSenderTest : public ::testing::Test { protected: RtpSenderTest() - : fake_clock_(kStartTime), - retransmission_rate_limiter_(&fake_clock_, 1000), + : time_controller_(Timestamp::Millis(kStartTime)), + clock_(time_controller_.GetClock()), + retransmission_rate_limiter_(clock_, 1000), flexfec_sender_(0, kFlexFecSsrc, kSsrc, @@ -266,72 +137,67 @@ class RtpSenderTest : public ::testing::TestWithParam { std::vector(), std::vector(), nullptr, - &fake_clock_), + clock_), kMarkerBit(true) { - field_trials_.SetOverHeadEnabled(GetParam().with_overhead); } - void SetUp() override { SetUpRtpSender(true, false, false); } - - RTPSender* rtp_sender() { - RTC_DCHECK(rtp_sender_context_); - return &rtp_sender_context_->packet_generator_; - } + void SetUp() override { SetUpRtpSender(true, false, nullptr); } - RtpSenderEgress* rtp_egress() { - RTC_DCHECK(rtp_sender_context_); - return &rtp_sender_context_->packet_sender_; + void SetUpRtpSender(bool populate_network2, + bool always_send_mid_and_rid, + VideoFecGenerator* fec_generator) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.fec_generator = fec_generator; + config.populate_network2_timestamp = populate_network2; + config.always_send_mid_and_rid = always_send_mid_and_rid; + CreateSender(config); } - void SetUpRtpSender(bool pacer, - bool populate_network2, - bool always_send_mid_and_rid) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; + RtpRtcpInterface::Configuration GetDefaultConfig() { + RtpRtcpInterface::Configuration config; + config.clock = clock_; config.local_media_ssrc = kSsrc; config.rtx_send_ssrc = kRtxSsrc; - config.fec_generator = &flexfec_sender_; config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; config.retransmission_rate_limiter = &retransmission_rate_limiter_; - config.paced_sender = pacer ? &mock_paced_sender_ : nullptr; - config.populate_network2_timestamp = populate_network2; - config.rtp_stats_callback = &rtp_stats_callback_; - config.always_send_mid_and_rid = always_send_mid_and_rid; + config.paced_sender = &mock_paced_sender_; config.field_trials = &field_trials_; + return config; + } - rtp_sender_context_ = std::make_unique(config); - rtp_sender()->SetSequenceNumber(kSeqNum); - rtp_sender()->SetTimestampOffset(0); + void CreateSender(const RtpRtcpInterface::Configuration& config) { + packet_history_ = std::make_unique( + config.clock, config.enable_rtx_padding_prioritization); + rtp_sender_ = std::make_unique(config, packet_history_.get(), + config.paced_sender); + rtp_sender_->SetSequenceNumber(kSeqNum); + rtp_sender_->SetTimestampOffset(0); } - SimulatedClock fake_clock_; + GlobalSimulatedTimeController time_controller_; + Clock* const clock_; NiceMock mock_rtc_event_log_; MockRtpPacketPacer mock_paced_sender_; - StrictMock send_packet_observer_; - StrictMock feedback_observer_; RateLimiter retransmission_rate_limiter_; FlexfecSender flexfec_sender_; - std::unique_ptr rtp_sender_context_; + std::unique_ptr packet_history_; + std::unique_ptr rtp_sender_; - LoopbackTransportTest transport_; const bool kMarkerBit; FieldTrialConfig field_trials_; - StreamDataTestCallback rtp_stats_callback_; std::unique_ptr BuildRtpPacket(int payload_type, bool marker_bit, uint32_t timestamp, int64_t capture_time_ms) { - auto packet = rtp_sender()->AllocatePacket(); + auto packet = rtp_sender_->AllocatePacket(); packet->SetPayloadType(payload_type); packet->set_packet_type(RtpPacketMediaType::kVideo); packet->SetMarker(marker_bit); packet->SetTimestamp(timestamp); packet->set_capture_time_ms(capture_time_ms); - EXPECT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); + EXPECT_TRUE(rtp_sender_->AssignSequenceNumber(packet.get())); return packet; } @@ -344,22 +210,26 @@ class RtpSenderTest : public ::testing::TestWithParam { packet->set_allow_retransmission(true); // Packet should be stored in a send bucket. - EXPECT_TRUE(rtp_sender()->SendToNetwork( - std::make_unique(*packet))); + EXPECT_TRUE( + rtp_sender_->SendToNetwork(std::make_unique(*packet))); return packet; } std::unique_ptr SendGenericPacket() { - const int64_t kCaptureTimeMs = fake_clock_.TimeInMilliseconds(); - return SendPacket(kCaptureTimeMs, sizeof(kPayloadData)); + const int64_t kCaptureTimeMs = clock_->TimeInMilliseconds(); + // Use maximum allowed size to catch corner cases when packet is dropped + // because of lack of capacity for the media packet, or for an rtx packet + // containing the media packet. + return SendPacket(kCaptureTimeMs, + /*payload_length=*/rtp_sender_->MaxRtpPacketSize() - + rtp_sender_->ExpectedPerPacketOverhead()); } size_t GenerateAndSendPadding(size_t target_size_bytes) { size_t generated_bytes = 0; - for (auto& packet : - rtp_sender()->GeneratePadding(target_size_bytes, true)) { + for (auto& packet : rtp_sender_->GeneratePadding(target_size_bytes, true)) { generated_bytes += packet->payload_size() + packet->padding_size(); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + rtp_sender_->SendToNetwork(std::move(packet)); } return generated_bytes; } @@ -372,67 +242,56 @@ class RtpSenderTest : public ::testing::TestWithParam { // RTX needs to be able to read the source packets from the packet store. // Pick a number of packets to store big enough for any unit test. constexpr uint16_t kNumberOfPacketsToStore = 100; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, kNumberOfPacketsToStore); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); + rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload); + rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); } // Enable sending of the MID header extension for both the primary SSRC and // the RTX SSRC. void EnableMidSending(const std::string& mid) { - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionMid, kMidExtensionId); - rtp_sender()->SetMid(mid); + rtp_sender_->RegisterRtpHeaderExtension(RtpMid::kUri, kMidExtensionId); + rtp_sender_->SetMid(mid); } // Enable sending of the RSID header extension for the primary SSRC and the // RRSID header extension for the RTX SSRC. void EnableRidSending(const std::string& rid) { - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionRtpStreamId, - kRidExtensionId); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionRepairedRtpStreamId, - kRepairedRidExtensionId); - rtp_sender()->SetRid(rid); + rtp_sender_->RegisterRtpHeaderExtension(RtpStreamId::kUri, kRidExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(RepairedRtpStreamId::kUri, + kRepairedRidExtensionId); + rtp_sender_->SetRid(rid); } }; -// TODO(pbos): Move tests over from WithoutPacer to RtpSenderTest as this is our -// default code path. -class RtpSenderTestWithoutPacer : public RtpSenderTest { - public: - void SetUp() override { SetUpRtpSender(false, false, false); } -}; - -TEST_P(RtpSenderTestWithoutPacer, AllocatePacketSetCsrc) { +TEST_F(RtpSenderTest, AllocatePacketSetCsrc) { // Configure rtp_sender with csrc. std::vector csrcs; csrcs.push_back(0x23456789); - rtp_sender()->SetCsrcs(csrcs); + rtp_sender_->SetCsrcs(csrcs); - auto packet = rtp_sender()->AllocatePacket(); + auto packet = rtp_sender_->AllocatePacket(); ASSERT_TRUE(packet); - EXPECT_EQ(rtp_sender()->SSRC(), packet->Ssrc()); + EXPECT_EQ(rtp_sender_->SSRC(), packet->Ssrc()); EXPECT_EQ(csrcs, packet->Csrcs()); } -TEST_P(RtpSenderTestWithoutPacer, AllocatePacketReserveExtensions) { +TEST_F(RtpSenderTest, AllocatePacketReserveExtensions) { // Configure rtp_sender with extensions. - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId)); - ASSERT_EQ(0, - rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId)); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAudioLevel, kAudioLevelExtensionId)); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionVideoRotation, kVideoRotationExtensionId)); - - auto packet = rtp_sender()->AllocatePacket(); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension(AudioLevel::kUri, + kAudioLevelExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + VideoOrientation::kUri, kVideoRotationExtensionId)); + + auto packet = rtp_sender_->AllocatePacket(); ASSERT_TRUE(packet); // Preallocate BWE extensions RtpSender set itself. @@ -444,998 +303,284 @@ TEST_P(RtpSenderTestWithoutPacer, AllocatePacketReserveExtensions) { EXPECT_FALSE(packet->HasExtension()); } -TEST_P(RtpSenderTestWithoutPacer, AssignSequenceNumberAdvanceSequenceNumber) { - auto packet = rtp_sender()->AllocatePacket(); - ASSERT_TRUE(packet); - const uint16_t sequence_number = rtp_sender()->SequenceNumber(); - - EXPECT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - - EXPECT_EQ(sequence_number, packet->SequenceNumber()); - EXPECT_EQ(sequence_number + 1, rtp_sender()->SequenceNumber()); -} - -TEST_P(RtpSenderTestWithoutPacer, AssignSequenceNumberFailsOnNotSending) { - auto packet = rtp_sender()->AllocatePacket(); - ASSERT_TRUE(packet); - - rtp_sender()->SetSendingMediaStatus(false); - EXPECT_FALSE(rtp_sender()->AssignSequenceNumber(packet.get())); -} - -TEST_P(RtpSenderTestWithoutPacer, AssignSequenceNumberMayAllowPaddingOnVideo) { - constexpr size_t kPaddingSize = 100; - auto packet = rtp_sender()->AllocatePacket(); - ASSERT_TRUE(packet); - - ASSERT_TRUE(rtp_sender()->GeneratePadding(kPaddingSize, true).empty()); - packet->SetMarker(false); - ASSERT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - // Packet without marker bit doesn't allow padding on video stream. - ASSERT_TRUE(rtp_sender()->GeneratePadding(kPaddingSize, true).empty()); - - packet->SetMarker(true); - ASSERT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - // Packet with marker bit allows send padding. - ASSERT_FALSE(rtp_sender()->GeneratePadding(kPaddingSize, true).empty()); -} - -TEST_P(RtpSenderTest, AssignSequenceNumberAllowsPaddingOnAudio) { - MockTransport transport; - RtpRtcp::Configuration config; +TEST_F(RtpSenderTest, PaddingAlwaysAllowedOnAudio) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); config.audio = true; - config.clock = &fake_clock_; - config.outgoing_transport = &transport; - config.paced_sender = &mock_paced_sender_; - config.local_media_ssrc = kSsrc; - config.event_log = &mock_rtc_event_log_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - rtp_sender()->SetTimestampOffset(0); - - std::unique_ptr audio_packet = - rtp_sender()->AllocatePacket(); + CreateSender(config); + + std::unique_ptr audio_packet = rtp_sender_->AllocatePacket(); // Padding on audio stream allowed regardless of marker in the last packet. audio_packet->SetMarker(false); audio_packet->SetPayloadType(kPayload); - rtp_sender()->AssignSequenceNumber(audio_packet.get()); + rtp_sender_->AssignSequenceNumber(audio_packet.get()); const size_t kPaddingSize = 59; - EXPECT_CALL(transport, SendRtp(_, kPaddingSize + kRtpHeaderSize, _)) - .WillOnce(Return(true)); + + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kPadding)), + Pointee(Property(&RtpPacketToSend::padding_size, kPaddingSize)))))); EXPECT_EQ(kPaddingSize, GenerateAndSendPadding(kPaddingSize)); // Requested padding size is too small, will send a larger one. const size_t kMinPaddingSize = 50; - EXPECT_CALL(transport, SendRtp(_, kMinPaddingSize + kRtpHeaderSize, _)) - .WillOnce(Return(true)); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre( + AllOf(Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kPadding)), + Pointee(Property(&RtpPacketToSend::padding_size, + kMinPaddingSize)))))); EXPECT_EQ(kMinPaddingSize, GenerateAndSendPadding(kMinPaddingSize - 5)); } -TEST_P(RtpSenderTestWithoutPacer, AssignSequenceNumberSetPaddingTimestamps) { - constexpr size_t kPaddingSize = 100; - auto packet = rtp_sender()->AllocatePacket(); - ASSERT_TRUE(packet); - packet->SetMarker(true); - packet->SetTimestamp(kTimestamp); - - ASSERT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - auto padding_packets = rtp_sender()->GeneratePadding(kPaddingSize, true); - - ASSERT_EQ(1u, padding_packets.size()); - // Verify padding packet timestamp. - EXPECT_EQ(kTimestamp, padding_packets[0]->Timestamp()); -} - -TEST_P(RtpSenderTestWithoutPacer, - TransportFeedbackObserverGetsCorrectByteCount) { - constexpr size_t kRtpOverheadBytesPerPacket = 12 + 8; - - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.transport_feedback_callback = &feedback_observer_; - config.event_log = &mock_rtc_event_log_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - config.field_trials = &field_trials_; - rtp_sender_context_ = std::make_unique(config); - - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - - const size_t expected_bytes = - GetParam().with_overhead - ? sizeof(kPayloadData) + kRtpOverheadBytesPerPacket - : sizeof(kPayloadData); - - EXPECT_CALL(feedback_observer_, - OnAddPacket(AllOf( - Field(&RtpPacketSendInfo::ssrc, rtp_sender()->SSRC()), - Field(&RtpPacketSendInfo::transport_sequence_number, - kTransportSequenceNumber), - Field(&RtpPacketSendInfo::rtp_sequence_number, - rtp_sender()->SequenceNumber()), - Field(&RtpPacketSendInfo::length, expected_bytes), - Field(&RtpPacketSendInfo::pacing_info, PacedPacketInfo())))) - .Times(1); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), - kRtpOverheadBytesPerPacket); - SendGenericPacket(); -} - -TEST_P(RtpSenderTestWithoutPacer, SendsPacketsWithTransportSequenceNumber) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.transport_feedback_callback = &feedback_observer_; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - - EXPECT_CALL(send_packet_observer_, - OnSendPacket(kTransportSequenceNumber, _, _)) - .Times(1); - - EXPECT_CALL(feedback_observer_, - OnAddPacket(AllOf( - Field(&RtpPacketSendInfo::ssrc, rtp_sender()->SSRC()), - Field(&RtpPacketSendInfo::transport_sequence_number, - kTransportSequenceNumber), - Field(&RtpPacketSendInfo::rtp_sequence_number, - rtp_sender()->SequenceNumber()), - Field(&RtpPacketSendInfo::pacing_info, PacedPacketInfo())))) - .Times(1); - - SendGenericPacket(); - - const auto& packet = transport_.last_sent_packet(); - uint16_t transport_seq_no; - ASSERT_TRUE(packet.GetExtension(&transport_seq_no)); - EXPECT_EQ(kTransportSequenceNumber, transport_seq_no); - EXPECT_EQ(transport_.last_options_.packet_id, transport_seq_no); - EXPECT_TRUE(transport_.last_options_.included_in_allocation); -} - -TEST_P(RtpSenderTestWithoutPacer, PacketOptionsNoRetransmission) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.transport_feedback_callback = &feedback_observer_; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - SendGenericPacket(); - - EXPECT_FALSE(transport_.last_options_.is_retransmit); -} - -TEST_P(RtpSenderTestWithoutPacer, - SetsIncludedInFeedbackWhenTransportSequenceNumberExtensionIsRegistered) { - SetUpRtpSender(false, false, false); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - SendGenericPacket(); - EXPECT_TRUE(transport_.last_options_.included_in_feedback); -} - -TEST_P( - RtpSenderTestWithoutPacer, - SetsIncludedInAllocationWhenTransportSequenceNumberExtensionIsRegistered) { - SetUpRtpSender(false, false, false); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - SendGenericPacket(); - EXPECT_TRUE(transport_.last_options_.included_in_allocation); -} - -TEST_P(RtpSenderTestWithoutPacer, - SetsIncludedInAllocationWhenForcedAsPartOfAllocation) { - SetUpRtpSender(false, false, false); - rtp_egress()->ForceIncludeSendPacketsInAllocation(true); - SendGenericPacket(); - EXPECT_FALSE(transport_.last_options_.included_in_feedback); - EXPECT_TRUE(transport_.last_options_.included_in_allocation); -} - -TEST_P(RtpSenderTestWithoutPacer, DoesnSetIncludedInAllocationByDefault) { - SetUpRtpSender(false, false, false); - SendGenericPacket(); - EXPECT_FALSE(transport_.last_options_.included_in_feedback); - EXPECT_FALSE(transport_.last_options_.included_in_allocation); -} - -TEST_P(RtpSenderTestWithoutPacer, OnSendSideDelayUpdated) { - StrictMock send_side_delay_observer_; - - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.send_side_delay_observer = &send_side_delay_observer_; - config.event_log = &mock_rtc_event_log_; - rtp_sender_context_ = std::make_unique(config); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - - const uint8_t kPayloadType = 127; - const absl::optional kCodecType = - VideoCodecType::kVideoCodecGeneric; - - const uint32_t kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock - RTPVideoHeader video_header; - - // Send packet with 10 ms send-side delay. The average, max and total should - // be 10 ms. - EXPECT_CALL(send_side_delay_observer_, - SendSideDelayUpdated(10, 10, 10, kSsrc)) - .Times(1); - int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - fake_clock_.AdvanceTimeMilliseconds(10); - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, - capture_time_ms, kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - // Send another packet with 20 ms delay. The average, max and total should be - // 15, 20 and 30 ms respectively. - EXPECT_CALL(send_side_delay_observer_, - SendSideDelayUpdated(15, 20, 30, kSsrc)) - .Times(1); - fake_clock_.AdvanceTimeMilliseconds(10); - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, - capture_time_ms, kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - // Send another packet at the same time, which replaces the last packet. - // Since this packet has 0 ms delay, the average is now 5 ms and max is 10 ms. - // The total counter stays the same though. - // TODO(terelius): Is is not clear that this is the right behavior. - EXPECT_CALL(send_side_delay_observer_, SendSideDelayUpdated(5, 10, 30, kSsrc)) - .Times(1); - capture_time_ms = fake_clock_.TimeInMilliseconds(); - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, - capture_time_ms, kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - // Send a packet 1 second later. The earlier packets should have timed - // out, so both max and average should be the delay of this packet. The total - // keeps increasing. - fake_clock_.AdvanceTimeMilliseconds(1000); - capture_time_ms = fake_clock_.TimeInMilliseconds(); - fake_clock_.AdvanceTimeMilliseconds(1); - EXPECT_CALL(send_side_delay_observer_, SendSideDelayUpdated(1, 1, 31, kSsrc)) - .Times(1); - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kPayloadType, kCodecType, capture_time_ms * kCaptureTimeMsToRtpTimestamp, - capture_time_ms, kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); -} - -TEST_P(RtpSenderTestWithoutPacer, OnSendPacketUpdated) { - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - EXPECT_CALL(send_packet_observer_, - OnSendPacket(kTransportSequenceNumber, _, _)) - .Times(1); - - SendGenericPacket(); -} - -TEST_P(RtpSenderTest, SendsPacketsWithTransportSequenceNumber) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.paced_sender = &mock_paced_sender_; - config.local_media_ssrc = kSsrc; - config.transport_feedback_callback = &feedback_observer_; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - rtp_sender()->SetSequenceNumber(kSeqNum); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - - EXPECT_CALL(send_packet_observer_, - OnSendPacket(kTransportSequenceNumber, _, _)) - .Times(1); - EXPECT_CALL(feedback_observer_, - OnAddPacket(AllOf( - Field(&RtpPacketSendInfo::ssrc, rtp_sender()->SSRC()), - Field(&RtpPacketSendInfo::transport_sequence_number, - kTransportSequenceNumber), - Field(&RtpPacketSendInfo::rtp_sequence_number, - rtp_sender()->SequenceNumber()), - Field(&RtpPacketSendInfo::pacing_info, PacedPacketInfo())))) - .Times(1); +TEST_F(RtpSenderTest, SendToNetworkForwardsPacketsToPacer) { + auto packet = BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, 0); + int64_t now_ms = clock_->TimeInMilliseconds(); EXPECT_CALL( mock_paced_sender_, - EnqueuePackets(Contains(AllOf( + EnqueuePackets(ElementsAre(AllOf( Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - auto packet = SendGenericPacket(); - packet->set_packet_type(RtpPacketMediaType::kVideo); - // Transport sequence number is set by PacketRouter, before SendPacket(). - packet->SetExtension(kTransportSequenceNumber); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - uint16_t transport_seq_no; + Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)), + Pointee(Property(&RtpPacketToSend::capture_time_ms, now_ms)))))); EXPECT_TRUE( - transport_.last_sent_packet().GetExtension( - &transport_seq_no)); - EXPECT_EQ(kTransportSequenceNumber, transport_seq_no); - EXPECT_EQ(transport_.last_options_.packet_id, transport_seq_no); + rtp_sender_->SendToNetwork(std::make_unique(*packet))); } -TEST_P(RtpSenderTest, WritesPacerExitToTimingExtension) { - rtp_sender_context_->packet_history_.SetStorePacketsStatus( +TEST_F(RtpSenderTest, ReSendPacketForwardsPacketsToPacer) { + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 10); - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionVideoTiming, kVideoTimingExtensionId)); - int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - auto packet = rtp_sender()->AllocatePacket(); - packet->SetPayloadType(kPayload); - packet->SetMarker(true); - packet->SetTimestamp(kTimestamp); - packet->set_capture_time_ms(capture_time_ms); - const VideoSendTiming kVideoTiming = {0u, 0u, 0u, 0u, 0u, 0u, true}; - packet->SetExtension(kVideoTiming); - EXPECT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - size_t packet_size = packet->size(); - - const int kStoredTimeInMs = 100; - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->set_allow_retransmission(true); - EXPECT_CALL(mock_paced_sender_, EnqueuePackets(Contains(Pointee(Property( - &RtpPacketToSend::Ssrc, kSsrc))))); - EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); - fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(1, transport_.packets_sent()); - EXPECT_EQ(packet_size, transport_.last_sent_packet().size()); - - VideoSendTiming video_timing; - EXPECT_TRUE(transport_.last_sent_packet().GetExtension( - &video_timing)); - EXPECT_EQ(kStoredTimeInMs, video_timing.pacer_exit_delta_ms); -} - -TEST_P(RtpSenderTest, WritesNetwork2ToTimingExtensionWithPacer) { - SetUpRtpSender(/*pacer=*/true, /*populate_network2=*/true, false); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionVideoTiming, kVideoTimingExtensionId)); - int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - auto packet = rtp_sender()->AllocatePacket(); - packet->SetPayloadType(kPayload); - packet->SetMarker(true); - packet->SetTimestamp(kTimestamp); - packet->set_capture_time_ms(capture_time_ms); - const uint16_t kPacerExitMs = 1234u; - const VideoSendTiming kVideoTiming = {0u, 0u, 0u, kPacerExitMs, 0u, 0u, true}; - packet->SetExtension(kVideoTiming); - EXPECT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - size_t packet_size = packet->size(); - - const int kStoredTimeInMs = 100; - - packet->set_packet_type(RtpPacketMediaType::kVideo); + int64_t now_ms = clock_->TimeInMilliseconds(); + auto packet = BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, now_ms); + uint16_t seq_no = packet->SequenceNumber(); packet->set_allow_retransmission(true); - EXPECT_CALL(mock_paced_sender_, EnqueuePackets(Contains(Pointee(Property( - &RtpPacketToSend::Ssrc, kSsrc))))); - EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); - fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - EXPECT_EQ(1, transport_.packets_sent()); - EXPECT_EQ(packet_size, transport_.last_sent_packet().size()); - - VideoSendTiming video_timing; - EXPECT_TRUE(transport_.last_sent_packet().GetExtension( - &video_timing)); - EXPECT_EQ(kStoredTimeInMs, video_timing.network2_timestamp_delta_ms); - EXPECT_EQ(kPacerExitMs, video_timing.pacer_exit_delta_ms); -} - -TEST_P(RtpSenderTest, WritesNetwork2ToTimingExtensionWithoutPacer) { - SetUpRtpSender(/*pacer=*/false, /*populate_network2=*/true, false); - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionVideoTiming, kVideoTimingExtensionId)); - auto packet = rtp_sender()->AllocatePacket(); - packet->SetMarker(true); - packet->set_capture_time_ms(fake_clock_.TimeInMilliseconds()); - const VideoSendTiming kVideoTiming = {0u, 0u, 0u, 0u, 0u, 0u, true}; - packet->SetExtension(kVideoTiming); - packet->set_allow_retransmission(true); - EXPECT_TRUE(rtp_sender()->AssignSequenceNumber(packet.get())); - packet->set_packet_type(RtpPacketMediaType::kVideo); - - const int kPropagateTimeMs = 10; - fake_clock_.AdvanceTimeMilliseconds(kPropagateTimeMs); + packet_history_->PutRtpPacket(std::move(packet), now_ms); - EXPECT_TRUE(rtp_sender()->SendToNetwork(std::move(packet))); - - EXPECT_EQ(1, transport_.packets_sent()); - absl::optional video_timing = - transport_.last_sent_packet().GetExtension(); - ASSERT_TRUE(video_timing); - EXPECT_EQ(kPropagateTimeMs, video_timing->network2_timestamp_delta_ms); -} - -TEST_P(RtpSenderTest, TrafficSmoothingWithExtensions) { - EXPECT_CALL(mock_rtc_event_log_, - LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing))); - - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId)); - EXPECT_EQ(0, - rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId)); - int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - auto packet = - BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, capture_time_ms); - size_t packet_size = packet->size(); - - const int kStoredTimeInMs = 100; - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->set_allow_retransmission(true); - EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); - EXPECT_EQ(0, transport_.packets_sent()); - fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - // Process send bucket. Packet should now be sent. - EXPECT_EQ(1, transport_.packets_sent()); - EXPECT_EQ(packet_size, transport_.last_sent_packet().size()); - - webrtc::RTPHeader rtp_header; - transport_.last_sent_packet().GetHeader(&rtp_header); - - // Verify transmission time offset. - EXPECT_EQ(kStoredTimeInMs * 90, rtp_header.extension.transmissionTimeOffset); - uint64_t expected_send_time = - ConvertMsToAbsSendTime(fake_clock_.TimeInMilliseconds()); - EXPECT_EQ(expected_send_time, rtp_header.extension.absoluteSendTime); -} - -TEST_P(RtpSenderTest, TrafficSmoothingRetransmits) { - EXPECT_CALL(mock_rtc_event_log_, - LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing))); - - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId)); - EXPECT_EQ(0, - rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId)); - int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - auto packet = - BuildRtpPacket(kPayload, kMarkerBit, kTimestamp, capture_time_ms); - size_t packet_size = packet->size(); - - // Packet should be stored in a send bucket. - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->set_allow_retransmission(true); - EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); - // Immediately process send bucket and send packet. - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - EXPECT_EQ(1, transport_.packets_sent()); - - // Retransmit packet. - const int kStoredTimeInMs = 100; - fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs); - - EXPECT_CALL(mock_rtc_event_log_, - LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing))); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - packet->set_retransmitted_sequence_number(kSeqNum); - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - EXPECT_EQ(static_cast(packet_size), rtp_sender()->ReSendPacket(kSeqNum)); - EXPECT_EQ(1, transport_.packets_sent()); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - // Process send bucket. Packet should now be sent. - EXPECT_EQ(2, transport_.packets_sent()); - EXPECT_EQ(packet_size, transport_.last_sent_packet().size()); - - webrtc::RTPHeader rtp_header; - transport_.last_sent_packet().GetHeader(&rtp_header); - - // Verify transmission time offset. - EXPECT_EQ(kStoredTimeInMs * 90, rtp_header.extension.transmissionTimeOffset); - uint64_t expected_send_time = - ConvertMsToAbsSendTime(fake_clock_.TimeInMilliseconds()); - EXPECT_EQ(expected_send_time, rtp_header.extension.absoluteSendTime); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), + Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)), + Pointee(Property(&RtpPacketToSend::capture_time_ms, now_ms)), + Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kRetransmission)))))); + EXPECT_TRUE(rtp_sender_->ReSendPacket(seq_no)); } // This test sends 1 regular video packet, then 4 padding packets, and then // 1 more regular packet. -TEST_P(RtpSenderTest, SendPadding) { - // Make all (non-padding) packets go to send queue. - EXPECT_CALL(mock_rtc_event_log_, - LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing))) - .Times(1 + 4 + 1); - - uint16_t seq_num = kSeqNum; - uint32_t timestamp = kTimestamp; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - size_t rtp_header_len = kRtpHeaderSize; - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId)); - rtp_header_len += 4; // 4 bytes extension. - EXPECT_EQ(0, - rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId)); - rtp_header_len += 4; // 4 bytes extension. - rtp_header_len += 4; // 4 extra bytes common to all extension headers. - - webrtc::RTPHeader rtp_header; - - int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - auto packet = - BuildRtpPacket(kPayload, kMarkerBit, timestamp, capture_time_ms); - const uint32_t media_packet_timestamp = timestamp; - size_t packet_size = packet->size(); - int total_packets_sent = 0; - const int kStoredTimeInMs = 100; - - // Packet should be stored in a send bucket. - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->set_allow_retransmission(true); - EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); - EXPECT_EQ(total_packets_sent, transport_.packets_sent()); - fake_clock_.AdvanceTimeMilliseconds(kStoredTimeInMs); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - ++seq_num; - - // Packet should now be sent. This test doesn't verify the regular video - // packet, since it is tested in another test. - EXPECT_EQ(++total_packets_sent, transport_.packets_sent()); - timestamp += 90 * kStoredTimeInMs; - - // Send padding 4 times, waiting 50 ms between each. - for (int i = 0; i < 4; ++i) { - const int kPaddingPeriodMs = 50; - const size_t kPaddingBytes = 100; - const size_t kMaxPaddingLength = 224; // Value taken from rtp_sender.cc. - // Padding will be forced to full packets. - EXPECT_EQ(kMaxPaddingLength, GenerateAndSendPadding(kPaddingBytes)); - - // Process send bucket. Padding should now be sent. - EXPECT_EQ(++total_packets_sent, transport_.packets_sent()); - EXPECT_EQ(kMaxPaddingLength + rtp_header_len, - transport_.last_sent_packet().size()); - - transport_.last_sent_packet().GetHeader(&rtp_header); - EXPECT_EQ(kMaxPaddingLength, rtp_header.paddingLength); - - // Verify sequence number and timestamp. The timestamp should be the same - // as the last media packet. - EXPECT_EQ(seq_num++, rtp_header.sequenceNumber); - EXPECT_EQ(media_packet_timestamp, rtp_header.timestamp); - // Verify transmission time offset. - int offset = timestamp - media_packet_timestamp; - EXPECT_EQ(offset, rtp_header.extension.transmissionTimeOffset); - uint64_t expected_send_time = - ConvertMsToAbsSendTime(fake_clock_.TimeInMilliseconds()); - EXPECT_EQ(expected_send_time, rtp_header.extension.absoluteSendTime); - fake_clock_.AdvanceTimeMilliseconds(kPaddingPeriodMs); - timestamp += 90 * kPaddingPeriodMs; +TEST_F(RtpSenderTest, SendPadding) { + constexpr int kNumPaddingPackets = 4; + EXPECT_CALL(mock_paced_sender_, EnqueuePackets); + std::unique_ptr media_packet = + SendPacket(/*capture_time_ms=*/clock_->TimeInMilliseconds(), + /*payload_size=*/100); + + // Wait 50 ms before generating each padding packet. + for (int i = 0; i < kNumPaddingPackets; ++i) { + time_controller_.AdvanceTime(TimeDelta::Millis(50)); + const size_t kPaddingTargetBytes = 100; // Request 100 bytes of padding. + + // Padding should be sent on the media ssrc, with a continous sequence + // number range. Size will be forced to full pack size and the timestamp + // shall be that of the last media packet. + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), + Pointee(Property(&RtpPacketToSend::SequenceNumber, + media_packet->SequenceNumber() + i + 1)), + Pointee(Property(&RtpPacketToSend::padding_size, + kMaxPaddingLength)), + Pointee(Property(&RtpPacketToSend::Timestamp, + media_packet->Timestamp())))))); + std::vector> padding_packets = + rtp_sender_->GeneratePadding(kPaddingTargetBytes, + /*media_has_been_sent=*/true); + ASSERT_THAT(padding_packets, SizeIs(1)); + rtp_sender_->SendToNetwork(std::move(padding_packets[0])); } // Send a regular video packet again. - capture_time_ms = fake_clock_.TimeInMilliseconds(); - packet = BuildRtpPacket(kPayload, kMarkerBit, timestamp, capture_time_ms); - packet_size = packet->size(); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property( + &RtpPacketToSend::SequenceNumber, + media_packet->SequenceNumber() + kNumPaddingPackets + 1)), + Pointee(Property(&RtpPacketToSend::Timestamp, + Gt(media_packet->Timestamp()))))))); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->set_allow_retransmission(true); - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, seq_num)))))); - EXPECT_TRUE( - rtp_sender()->SendToNetwork(std::make_unique(*packet))); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - // Process send bucket. - EXPECT_EQ(++total_packets_sent, transport_.packets_sent()); - EXPECT_EQ(packet_size, transport_.last_sent_packet().size()); - transport_.last_sent_packet().GetHeader(&rtp_header); - - // Verify sequence number and timestamp. - EXPECT_EQ(seq_num, rtp_header.sequenceNumber); - EXPECT_EQ(timestamp, rtp_header.timestamp); - // Verify transmission time offset. This packet is sent without delay. - EXPECT_EQ(0, rtp_header.extension.transmissionTimeOffset); - uint64_t expected_send_time = - ConvertMsToAbsSendTime(fake_clock_.TimeInMilliseconds()); - EXPECT_EQ(expected_send_time, rtp_header.extension.absoluteSendTime); + std::unique_ptr next_media_packet = + SendPacket(/*capture_time_ms=*/clock_->TimeInMilliseconds(), + /*payload_size=*/100); } -TEST_P(RtpSenderTest, OnSendPacketUpdated) { - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - - EXPECT_CALL(send_packet_observer_, - OnSendPacket(kTransportSequenceNumber, _, _)) - .Times(1); +TEST_F(RtpSenderTest, NoPaddingAsFirstPacketWithoutBweExtensions) { + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), + IsEmpty()); - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - auto packet = SendGenericPacket(); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->SetExtension(kTransportSequenceNumber); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - EXPECT_EQ(1, transport_.packets_sent()); -} - -TEST_P(RtpSenderTest, OnSendPacketNotUpdatedForRetransmits) { - EXPECT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - - EXPECT_CALL(send_packet_observer_, OnSendPacket(_, _, _)).Times(0); - - EXPECT_CALL( - mock_paced_sender_, - EnqueuePackets(Contains(AllOf( - Pointee(Property(&RtpPacketToSend::Ssrc, kSsrc)), - Pointee(Property(&RtpPacketToSend::SequenceNumber, kSeqNum)))))); - auto packet = SendGenericPacket(); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - packet->SetExtension(kTransportSequenceNumber); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - EXPECT_EQ(1, transport_.packets_sent()); - EXPECT_TRUE(transport_.last_options_.is_retransmit); -} - -TEST_P(RtpSenderTestWithoutPacer, SendGenericVideo) { - const uint8_t kPayloadType = 127; - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - uint8_t payload[] = {47, 11, 32, 93, 89}; - - // Send keyframe - RTPVideoHeader video_header; - video_header.frame_type = VideoFrameType::kVideoFrameKey; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321, - payload, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - auto sent_payload = transport_.last_sent_packet().payload(); - uint8_t generic_header = sent_payload[0]; - EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kKeyFrameBit); - EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kFirstPacketBit); - EXPECT_THAT(sent_payload.subview(1), ElementsAreArray(payload)); - - // Send delta frame - payload[0] = 13; - payload[1] = 42; - payload[4] = 13; - - video_header.frame_type = VideoFrameType::kVideoFrameDelta; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321, - payload, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - sent_payload = transport_.last_sent_packet().payload(); - generic_header = sent_payload[0]; - EXPECT_FALSE(generic_header & RtpFormatVideoGeneric::kKeyFrameBit); - EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kFirstPacketBit); - EXPECT_THAT(sent_payload.subview(1), ElementsAreArray(payload)); + // Don't send padding before media even with RTX. + EnableRtx(); + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), + IsEmpty()); } -TEST_P(RtpSenderTestWithoutPacer, SendRawVideo) { - const uint8_t kPayloadType = 111; - const uint8_t payload[] = {11, 22, 33, 44, 55}; - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); +TEST_F(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithTransportCc) { + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); - // Send a frame. - RTPVideoHeader video_header; - video_header.frame_type = VideoFrameType::kVideoFrameKey; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, absl::nullopt, 1234, - 4321, payload, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); + // Padding can't be sent as first packet on media SSRC since we don't know + // what payload type to assign. + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), + IsEmpty()); - auto sent_payload = transport_.last_sent_packet().payload(); - EXPECT_THAT(sent_payload, ElementsAreArray(payload)); + // With transportcc padding can be sent as first packet on the RTX SSRC. + EnableRtx(); + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), + Not(IsEmpty())); } -TEST_P(RtpSenderTest, SendFlexfecPackets) { - constexpr uint32_t kTimestamp = 1234; - constexpr int kMediaPayloadType = 127; - constexpr VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - constexpr int kFlexfecPayloadType = 118; - const std::vector kNoRtpExtensions; - const std::vector kNoRtpExtensionSizes; - FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexFecSsrc, kSsrc, kNoMid, - kNoRtpExtensions, kNoRtpExtensionSizes, - nullptr /* rtp_state */, &fake_clock_); - - // Reset |rtp_sender_| to use FlexFEC. - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.paced_sender = &mock_paced_sender_; - config.local_media_ssrc = kSsrc; - config.fec_generator = &flexfec_sender_; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - rtp_sender()->SetSequenceNumber(kSeqNum); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.fec_generator = &flexfec_sender; - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender.MaxPacketOverhead(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); +TEST_F(RtpSenderTest, AllowPaddingAsFirstPacketOnRtxWithAbsSendTime) { + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); - // Parameters selected to generate a single FEC packet per media packet. - FecProtectionParams params; - params.fec_rate = 15; - params.max_fec_frames = 1; - params.fec_mask_type = kFecMaskRandom; - flexfec_sender.SetProtectionParameters(params, params); + // Padding can't be sent as first packet on media SSRC since we don't know + // what payload type to assign. + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), + IsEmpty()); - uint16_t flexfec_seq_num; - RTPVideoHeader video_header; - - std::unique_ptr media_packet; - std::unique_ptr fec_packet; - - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - for (auto& packet : packets) { - if (packet->packet_type() == RtpPacketMediaType::kVideo) { - EXPECT_EQ(packet->Ssrc(), kSsrc); - EXPECT_EQ(packet->SequenceNumber(), kSeqNum); - media_packet = std::move(packet); - } else { - EXPECT_EQ(packet->packet_type(), - RtpPacketMediaType::kForwardErrorCorrection); - EXPECT_EQ(packet->Ssrc(), kFlexFecSsrc); - fec_packet = std::move(packet); - } - } - }); - - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kMediaPayloadType, kCodecType, kTimestamp, - fake_clock_.TimeInMilliseconds(), kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - ASSERT_TRUE(media_packet != nullptr); - ASSERT_TRUE(fec_packet != nullptr); - - flexfec_seq_num = fec_packet->SequenceNumber(); - rtp_egress()->SendPacket(media_packet.get(), PacedPacketInfo()); - rtp_egress()->SendPacket(fec_packet.get(), PacedPacketInfo()); - - ASSERT_EQ(2, transport_.packets_sent()); - const RtpPacketReceived& sent_media_packet = transport_.sent_packets_[0]; - EXPECT_EQ(kMediaPayloadType, sent_media_packet.PayloadType()); - EXPECT_EQ(kSeqNum, sent_media_packet.SequenceNumber()); - EXPECT_EQ(kSsrc, sent_media_packet.Ssrc()); - const RtpPacketReceived& sent_flexfec_packet = transport_.sent_packets_[1]; - EXPECT_EQ(kFlexfecPayloadType, sent_flexfec_packet.PayloadType()); - EXPECT_EQ(flexfec_seq_num, sent_flexfec_packet.SequenceNumber()); - EXPECT_EQ(kFlexFecSsrc, sent_flexfec_packet.Ssrc()); + // With abs send time, padding can be sent as first packet on the RTX SSRC. + EnableRtx(); + EXPECT_THAT(rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/false), + Not(IsEmpty())); } -TEST_P(RtpSenderTestWithoutPacer, SendFlexfecPackets) { - constexpr uint32_t kTimestamp = 1234; - constexpr int kMediaPayloadType = 127; - constexpr VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - constexpr int kFlexfecPayloadType = 118; - const std::vector kNoRtpExtensions; - const std::vector kNoRtpExtensionSizes; - FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexFecSsrc, kSsrc, kNoMid, - kNoRtpExtensions, kNoRtpExtensionSizes, - nullptr /* rtp_state */, &fake_clock_); - - // Reset |rtp_sender_| to use FlexFEC. - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.fec_generator = &flexfec_sender; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - rtp_sender()->SetSequenceNumber(kSeqNum); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.fec_generator = &flexfec_sender; - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender_.MaxPacketOverhead(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); +TEST_F(RtpSenderTest, UpdatesTimestampsOnPlainRtxPadding) { + EnableRtx(); + // Timestamps as set based on capture time in RtpSenderTest. + const int64_t start_time = clock_->TimeInMilliseconds(); + const uint32_t start_timestamp = start_time * kTimestampTicksPerMs; - // Parameters selected to generate a single FEC packet per media packet. - FecProtectionParams params; - params.fec_rate = 15; - params.max_fec_frames = 1; - params.fec_mask_type = kFecMaskRandom; - flexfec_sender.SetProtectionParameters(params, params); + // Start by sending one media packet. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::padding_size, 0u)), + Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)), + Pointee(Property(&RtpPacketToSend::capture_time_ms, start_time)))))); + std::unique_ptr media_packet = + SendPacket(start_time, /*payload_size=*/600); + + // Advance time before sending padding. + const TimeDelta kTimeDiff = TimeDelta::Millis(17); + time_controller_.AdvanceTime(kTimeDiff); + + // Timestamps on padding should be offset from the sent media. + EXPECT_THAT( + rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/true), + Each(AllOf( + Pointee(Property(&RtpPacketToSend::padding_size, kMaxPaddingLength)), + Pointee(Property( + &RtpPacketToSend::Timestamp, + start_timestamp + (kTimestampTicksPerMs * kTimeDiff.ms()))), + Pointee(Property(&RtpPacketToSend::capture_time_ms, + start_time + kTimeDiff.ms()))))); +} + +TEST_F(RtpSenderTest, KeepsTimestampsOnPayloadPadding) { + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); + EnableRtx(); + // Timestamps as set based on capture time in RtpSenderTest. + const int64_t start_time = clock_->TimeInMilliseconds(); + const uint32_t start_timestamp = start_time * kTimestampTicksPerMs; + const size_t kPayloadSize = 600; + const size_t kRtxHeaderSize = 2; - EXPECT_CALL(mock_rtc_event_log_, - LogProxy(SameRtcEventTypeAs(RtcEvent::Type::RtpPacketOutgoing))) - .Times(2); - RTPVideoHeader video_header; - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kMediaPayloadType, kCodecType, kTimestamp, - fake_clock_.TimeInMilliseconds(), kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - ASSERT_EQ(2, transport_.packets_sent()); - const RtpPacketReceived& media_packet = transport_.sent_packets_[0]; - EXPECT_EQ(kMediaPayloadType, media_packet.PayloadType()); - EXPECT_EQ(kSsrc, media_packet.Ssrc()); - const RtpPacketReceived& flexfec_packet = transport_.sent_packets_[1]; - EXPECT_EQ(kFlexfecPayloadType, flexfec_packet.PayloadType()); - EXPECT_EQ(kFlexFecSsrc, flexfec_packet.Ssrc()); + // Start by sending one media packet and putting in the packet history. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::padding_size, 0u)), + Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)), + Pointee(Property(&RtpPacketToSend::capture_time_ms, start_time)))))); + std::unique_ptr media_packet = + SendPacket(start_time, kPayloadSize); + packet_history_->PutRtpPacket(std::move(media_packet), start_time); + + // Advance time before sending padding. + const TimeDelta kTimeDiff = TimeDelta::Millis(17); + time_controller_.AdvanceTime(kTimeDiff); + + // Timestamps on payload padding should be set to original. + EXPECT_THAT( + rtp_sender_->GeneratePadding(/*target_size_bytes=*/100, + /*media_has_been_sent=*/true), + Each(AllOf( + Pointee(Property(&RtpPacketToSend::padding_size, 0u)), + Pointee(Property(&RtpPacketToSend::payload_size, + kPayloadSize + kRtxHeaderSize)), + Pointee(Property(&RtpPacketToSend::Timestamp, start_timestamp)), + Pointee(Property(&RtpPacketToSend::capture_time_ms, start_time))))); } // Test that the MID header extension is included on sent packets when // configured. -TEST_P(RtpSenderTestWithoutPacer, MidIncludedOnSentPackets) { +TEST_F(RtpSenderTest, MidIncludedOnSentPackets) { const char kMid[] = "mid"; - EnableMidSending(kMid); - // Send a couple packets. + // Send a couple packets, expect both packets to have the MID set. + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + Property(&RtpPacketToSend::GetExtension, kMid))))) + .Times(2); SendGenericPacket(); SendGenericPacket(); - - // Expect both packets to have the MID set. - ASSERT_EQ(2u, transport_.sent_packets_.size()); - for (const RtpPacketReceived& packet : transport_.sent_packets_) { - std::string mid; - ASSERT_TRUE(packet.GetExtension(&mid)); - EXPECT_EQ(kMid, mid); - } } -TEST_P(RtpSenderTestWithoutPacer, RidIncludedOnSentPackets) { +TEST_F(RtpSenderTest, RidIncludedOnSentPackets) { const char kRid[] = "f"; - EnableRidSending(kRid); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(Property( + &RtpPacketToSend::GetExtension, kRid))))); SendGenericPacket(); - - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const RtpPacketReceived& packet = transport_.sent_packets_[0]; - std::string rid; - ASSERT_TRUE(packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); } -TEST_P(RtpSenderTestWithoutPacer, RidIncludedOnRtxSentPackets) { +TEST_F(RtpSenderTest, RidIncludedOnRtxSentPackets) { const char kRid[] = "f"; - EnableRtx(); EnableRidSending(kRid); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kRid), + Property(&RtpPacketToSend::HasExtension, + false)))))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); SendGenericPacket(); - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const RtpPacketReceived& packet = transport_.sent_packets_[0]; - std::string rid; - ASSERT_TRUE(packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); - rid = kNoRid; - EXPECT_FALSE(packet.HasExtension()); - - uint16_t packet_id = packet.SequenceNumber(); - rtp_sender()->ReSendPacket(packet_id); - ASSERT_EQ(2u, transport_.sent_packets_.size()); - const RtpPacketReceived& rtx_packet = transport_.sent_packets_[1]; - ASSERT_TRUE(rtx_packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); - EXPECT_FALSE(rtx_packet.HasExtension()); + + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kRid), + Property(&RtpPacketToSend::HasExtension, false)))))); + rtp_sender_->ReSendPacket(kSeqNum); } -TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnSentPacketsAfterAck) { +TEST_F(RtpSenderTest, MidAndRidNotIncludedOnSentPacketsAfterAck) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1443,53 +588,48 @@ TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnSentPacketsAfterAck) { EnableRidSending(kRid); // This first packet should include both MID and RID. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, kRid)))))); auto first_built_packet = SendGenericPacket(); - - rtp_sender()->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); // The second packet should include neither since an ack was received. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, false)))))); SendGenericPacket(); - - ASSERT_EQ(2u, transport_.sent_packets_.size()); - - const RtpPacketReceived& first_packet = transport_.sent_packets_[0]; - std::string mid, rid; - ASSERT_TRUE(first_packet.GetExtension(&mid)); - EXPECT_EQ(kMid, mid); - ASSERT_TRUE(first_packet.GetExtension(&rid)); - EXPECT_EQ(kRid, rid); - - const RtpPacketReceived& second_packet = transport_.sent_packets_[1]; - EXPECT_FALSE(second_packet.HasExtension()); - EXPECT_FALSE(second_packet.HasExtension()); } -TEST_P(RtpSenderTestWithoutPacer, - MidAndRidAlwaysIncludedOnSentPacketsWhenConfigured) { - SetUpRtpSender(false, false, /*always_send_mid_and_rid=*/true); +TEST_F(RtpSenderTest, MidAndRidAlwaysIncludedOnSentPacketsWhenConfigured) { + SetUpRtpSender(false, /*always_send_mid_and_rid=*/true, nullptr); const char kMid[] = "mid"; const char kRid[] = "f"; EnableMidSending(kMid); EnableRidSending(kRid); // Send two media packets: one before and one after the ack. - auto first_packet = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(first_packet->SequenceNumber()); - SendGenericPacket(); - // Due to the configuration, both sent packets should contain MID and RID. - ASSERT_EQ(2u, transport_.sent_packets_.size()); - for (const RtpPacketReceived& packet : transport_.sent_packets_) { - EXPECT_EQ(packet.GetExtension(), kMid); - EXPECT_EQ(packet.GetExtension(), kRid); - } + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + AllOf(Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, kRid)))))) + .Times(2); + auto first_built_packet = SendGenericPacket(); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + SendGenericPacket(); } // Test that the first RTX packet includes both MID and RRID even if the packet // being retransmitted did not have MID or RID. The MID and RID are needed on // the first packets for a given SSRC, and RTX packets are sent on a separate // SSRC. -TEST_P(RtpSenderTestWithoutPacer, MidAndRidIncludedOnFirstRtxPacket) { +TEST_F(RtpSenderTest, MidAndRidIncludedOnFirstRtxPacket) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1498,30 +638,32 @@ TEST_P(RtpSenderTestWithoutPacer, MidAndRidIncludedOnFirstRtxPacket) { EnableRidSending(kRid); // This first packet will include both MID and RID. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets); auto first_built_packet = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); - - // The second packet will include neither since an ack was received. + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + + // The second packet will include neither since an ack was received, put + // it in the packet history for retransmission. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto second_built_packet = SendGenericPacket(); // The first RTX packet should include MID and RRID. - ASSERT_LT(0, - rtp_sender()->ReSendPacket(second_built_packet->SequenceNumber())); - - ASSERT_EQ(3u, transport_.sent_packets_.size()); - - const RtpPacketReceived& rtx_packet = transport_.sent_packets_[2]; - std::string mid, rrid; - ASSERT_TRUE(rtx_packet.GetExtension(&mid)); - EXPECT_EQ(kMid, mid); - ASSERT_TRUE(rtx_packet.GetExtension(&rrid)); - EXPECT_EQ(kRid, rrid); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, + kRid)))))); + rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber()); } // Test that the RTX packets sent after receving an ACK on the RTX SSRC does // not include either MID or RRID even if the packet being retransmitted did // had a MID or RID. -TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnRtxPacketsAfterAck) { +TEST_F(RtpSenderTest, MidAndRidNotIncludedOnRtxPacketsAfterAck) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1530,41 +672,44 @@ TEST_P(RtpSenderTestWithoutPacer, MidAndRidNotIncludedOnRtxPacketsAfterAck) { EnableRidSending(kRid); // This first packet will include both MID and RID. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto first_built_packet = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(first_built_packet->SequenceNumber()); // The second packet will include neither since an ack was received. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto second_built_packet = SendGenericPacket(); // The first RTX packet will include MID and RRID. - ASSERT_LT(0, - rtp_sender()->ReSendPacket(second_built_packet->SequenceNumber())); - - ASSERT_EQ(3u, transport_.sent_packets_.size()); - const RtpPacketReceived& first_rtx_packet = transport_.sent_packets_[2]; - - rtp_sender()->OnReceivedAckOnRtxSsrc(first_rtx_packet.SequenceNumber()); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + rtp_sender_->OnReceivedAckOnRtxSsrc(packets[0]->SequenceNumber()); + packet_history_->MarkPacketAsSent( + *packets[0]->retransmitted_sequence_number()); + }); + rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber()); // The second and third RTX packets should not include MID nor RRID. - ASSERT_LT(0, - rtp_sender()->ReSendPacket(first_built_packet->SequenceNumber())); - ASSERT_LT(0, - rtp_sender()->ReSendPacket(second_built_packet->SequenceNumber())); - - ASSERT_EQ(5u, transport_.sent_packets_.size()); - - const RtpPacketReceived& second_rtx_packet = transport_.sent_packets_[3]; - EXPECT_FALSE(second_rtx_packet.HasExtension()); - EXPECT_FALSE(second_rtx_packet.HasExtension()); - - const RtpPacketReceived& third_rtx_packet = transport_.sent_packets_[4]; - EXPECT_FALSE(third_rtx_packet.HasExtension()); - EXPECT_FALSE(third_rtx_packet.HasExtension()); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, + false)))))) + .Times(2); + rtp_sender_->ReSendPacket(first_built_packet->SequenceNumber()); + rtp_sender_->ReSendPacket(second_built_packet->SequenceNumber()); } -TEST_P(RtpSenderTestWithoutPacer, - MidAndRidAlwaysIncludedOnRtxPacketsWhenConfigured) { - SetUpRtpSender(false, false, /*always_send_mid_and_rid=*/true); +TEST_F(RtpSenderTest, MidAndRidAlwaysIncludedOnRtxPacketsWhenConfigured) { + SetUpRtpSender(false, /*always_send_mid_and_rid=*/true, nullptr); const char kMid[] = "mid"; const char kRid[] = "f"; EnableRtx(); @@ -1572,63 +717,68 @@ TEST_P(RtpSenderTestWithoutPacer, EnableRidSending(kRid); // Send two media packets: one before and one after the ack. + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + AllOf(Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, kRid)))))) + .Times(2) + .WillRepeatedly( + [&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto media_packet1 = SendGenericPacket(); - rtp_sender()->OnReceivedAckOnSsrc(media_packet1->SequenceNumber()); + rtp_sender_->OnReceivedAckOnSsrc(media_packet1->SequenceNumber()); auto media_packet2 = SendGenericPacket(); // Send three RTX packets with different combinations of orders w.r.t. the // media and RTX acks. - ASSERT_LT(0, rtp_sender()->ReSendPacket(media_packet2->SequenceNumber())); - ASSERT_EQ(3u, transport_.sent_packets_.size()); - rtp_sender()->OnReceivedAckOnRtxSsrc( - transport_.sent_packets_[2].SequenceNumber()); - ASSERT_LT(0, rtp_sender()->ReSendPacket(media_packet1->SequenceNumber())); - ASSERT_LT(0, rtp_sender()->ReSendPacket(media_packet2->SequenceNumber())); - // Due to the configuration, all sent packets should contain MID // and either RID (media) or RRID (RTX). - ASSERT_EQ(5u, transport_.sent_packets_.size()); - for (const auto& packet : transport_.sent_packets_) { - EXPECT_EQ(packet.GetExtension(), kMid); - } - for (size_t i = 0; i < 2; ++i) { - const RtpPacketReceived& packet = transport_.sent_packets_[i]; - EXPECT_EQ(packet.GetExtension(), kRid); - } - for (size_t i = 2; i < transport_.sent_packets_.size(); ++i) { - const RtpPacketReceived& packet = transport_.sent_packets_[i]; - EXPECT_EQ(packet.GetExtension(), kRid); - } + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::GetExtension, kMid), + Property(&RtpPacketToSend::GetExtension, + kRid)))))) + .Times(3) + .WillRepeatedly( + [&](std::vector> packets) { + rtp_sender_->OnReceivedAckOnRtxSsrc(packets[0]->SequenceNumber()); + packet_history_->MarkPacketAsSent( + *packets[0]->retransmitted_sequence_number()); + }); + rtp_sender_->ReSendPacket(media_packet2->SequenceNumber()); + rtp_sender_->ReSendPacket(media_packet1->SequenceNumber()); + rtp_sender_->ReSendPacket(media_packet2->SequenceNumber()); } // Test that if the RtpState indicates an ACK has been received on that SSRC // then neither the MID nor RID header extensions will be sent. -TEST_P(RtpSenderTestWithoutPacer, - MidAndRidNotIncludedOnSentPacketsAfterRtpStateRestored) { +TEST_F(RtpSenderTest, MidAndRidNotIncludedOnSentPacketsAfterRtpStateRestored) { const char kMid[] = "mid"; const char kRid[] = "f"; EnableMidSending(kMid); EnableRidSending(kRid); - RtpState state = rtp_sender()->GetRtpState(); + RtpState state = rtp_sender_->GetRtpState(); EXPECT_FALSE(state.ssrc_has_acked); state.ssrc_has_acked = true; - rtp_sender()->SetRtpState(state); + rtp_sender_->SetRtpState(state); + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, false)))))); SendGenericPacket(); - - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const RtpPacketReceived& packet = transport_.sent_packets_[0]; - EXPECT_FALSE(packet.HasExtension()); - EXPECT_FALSE(packet.HasExtension()); } // Test that if the RTX RtpState indicates an ACK has been received on that // RTX SSRC then neither the MID nor RRID header extensions will be sent on // RTX packets. -TEST_P(RtpSenderTestWithoutPacer, - MidAndRridNotIncludedOnRtxPacketsAfterRtpStateRestored) { +TEST_F(RtpSenderTest, MidAndRridNotIncludedOnRtxPacketsAfterRtpStateRestored) { const char kMid[] = "mid"; const char kRid[] = "f"; @@ -1636,786 +786,270 @@ TEST_P(RtpSenderTestWithoutPacer, EnableMidSending(kMid); EnableRidSending(kRid); - RtpState rtx_state = rtp_sender()->GetRtxRtpState(); + RtpState rtx_state = rtp_sender_->GetRtxRtpState(); EXPECT_FALSE(rtx_state.ssrc_has_acked); rtx_state.ssrc_has_acked = true; - rtp_sender()->SetRtxRtpState(rtx_state); + rtp_sender_->SetRtxRtpState(rtx_state); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); auto built_packet = SendGenericPacket(); - ASSERT_LT(0, rtp_sender()->ReSendPacket(built_packet->SequenceNumber())); - - ASSERT_EQ(2u, transport_.sent_packets_.size()); - const RtpPacketReceived& rtx_packet = transport_.sent_packets_[1]; - EXPECT_FALSE(rtx_packet.HasExtension()); - EXPECT_FALSE(rtx_packet.HasExtension()); -} - -TEST_P(RtpSenderTest, FecOverheadRate) { - constexpr uint32_t kTimestamp = 1234; - constexpr int kMediaPayloadType = 127; - constexpr VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - constexpr int kFlexfecPayloadType = 118; - const std::vector kNoRtpExtensions; - const std::vector kNoRtpExtensionSizes; - FlexfecSender flexfec_sender(kFlexfecPayloadType, kFlexFecSsrc, kSsrc, kNoMid, - kNoRtpExtensions, kNoRtpExtensionSizes, - nullptr /* rtp_state */, &fake_clock_); - - // Reset |rtp_sender_| to use FlexFEC. - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.paced_sender = &mock_paced_sender_; - config.local_media_ssrc = kSsrc; - config.fec_generator = &flexfec_sender; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - rtp_sender()->SetSequenceNumber(kSeqNum); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.fec_generator = &flexfec_sender; - video_config.fec_type = flexfec_sender.GetFecType(); - video_config.fec_overhead_bytes = flexfec_sender.MaxPacketOverhead(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - // Parameters selected to generate a single FEC packet per media packet. - FecProtectionParams params; - params.fec_rate = 15; - params.max_fec_frames = 1; - params.fec_mask_type = kFecMaskRandom; - flexfec_sender.SetProtectionParameters(params, params); - - constexpr size_t kNumMediaPackets = 10; - constexpr size_t kNumFecPackets = kNumMediaPackets; - constexpr int64_t kTimeBetweenPacketsMs = 10; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets).Times(kNumMediaPackets); - for (size_t i = 0; i < kNumMediaPackets; ++i) { - RTPVideoHeader video_header; - - video_header.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video.SendVideo( - kMediaPayloadType, kCodecType, kTimestamp, - fake_clock_.TimeInMilliseconds(), kPayloadData, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - - fake_clock_.AdvanceTimeMilliseconds(kTimeBetweenPacketsMs); - } - constexpr size_t kRtpHeaderLength = 12; - constexpr size_t kFlexfecHeaderLength = 20; - constexpr size_t kGenericCodecHeaderLength = 1; - constexpr size_t kPayloadLength = sizeof(kPayloadData); - constexpr size_t kPacketLength = kRtpHeaderLength + kFlexfecHeaderLength + - kGenericCodecHeaderLength + kPayloadLength; - EXPECT_NEAR(kNumFecPackets * kPacketLength * 8 / - (kNumFecPackets * kTimeBetweenPacketsMs / 1000.0f), - flexfec_sender.CurrentFecRate().bps(), 500); -} - -TEST_P(RtpSenderTest, BitrateCallbacks) { - class TestCallback : public BitrateStatisticsObserver { - public: - TestCallback() - : BitrateStatisticsObserver(), - num_calls_(0), - ssrc_(0), - total_bitrate_(0), - retransmit_bitrate_(0) {} - ~TestCallback() override = default; - - void Notify(uint32_t total_bitrate, - uint32_t retransmit_bitrate, - uint32_t ssrc) override { - ++num_calls_; - ssrc_ = ssrc; - total_bitrate_ = total_bitrate; - retransmit_bitrate_ = retransmit_bitrate; - } - - uint32_t num_calls_; - uint32_t ssrc_; - uint32_t total_bitrate_; - uint32_t retransmit_bitrate_; - } callback; - - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.send_bitrate_observer = &callback; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); - - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - const uint8_t kPayloadType = 127; - - // Simulate kNumPackets sent with kPacketInterval ms intervals, with the - // number of packets selected so that we fill (but don't overflow) the one - // second averaging window. - const uint32_t kWindowSizeMs = 1000; - const uint32_t kPacketInterval = 20; - const uint32_t kNumPackets = - (kWindowSizeMs - kPacketInterval) / kPacketInterval; - // Overhead = 12 bytes RTP header + 1 byte generic header. - const uint32_t kPacketOverhead = 13; - - uint8_t payload[] = {47, 11, 32, 93, 89}; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - uint32_t ssrc = rtp_sender()->SSRC(); - - // Initial process call so we get a new time window. - rtp_egress()->ProcessBitrateAndNotifyObservers(); - - // Send a few frames. - RTPVideoHeader video_header; - for (uint32_t i = 0; i < kNumPackets; ++i) { - video_header.frame_type = VideoFrameType::kVideoFrameKey; - ASSERT_TRUE(rtp_sender_video.SendVideo( - kPayloadType, kCodecType, 1234, 4321, payload, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - fake_clock_.AdvanceTimeMilliseconds(kPacketInterval); - } - - rtp_egress()->ProcessBitrateAndNotifyObservers(); - - // We get one call for every stats updated, thus two calls since both the - // stream stats and the retransmit stats are updated once. - EXPECT_EQ(2u, callback.num_calls_); - EXPECT_EQ(ssrc, callback.ssrc_); - const uint32_t kTotalPacketSize = kPacketOverhead + sizeof(payload); - // Bitrate measured over delta between last and first timestamp, plus one. - const uint32_t kExpectedWindowMs = kNumPackets * kPacketInterval + 1; - const uint32_t kExpectedBitsAccumulated = kTotalPacketSize * kNumPackets * 8; - const uint32_t kExpectedRateBps = - (kExpectedBitsAccumulated * 1000 + (kExpectedWindowMs / 2)) / - kExpectedWindowMs; - EXPECT_EQ(kExpectedRateBps, callback.total_bitrate_); -} -TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) { - const uint8_t kPayloadType = 127; - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - FieldTrialBasedConfig field_trials; - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - RTPSenderVideo rtp_sender_video(video_config); - uint8_t payload[] = {47, 11, 32, 93, 89}; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - uint32_t ssrc = rtp_sender()->SSRC(); - - // Send a frame. - RTPVideoHeader video_header; - video_header.frame_type = VideoFrameType::kVideoFrameKey; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321, - payload, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - StreamDataCounters expected; - expected.transmitted.payload_bytes = 6; - expected.transmitted.header_bytes = 12; - expected.transmitted.padding_bytes = 0; - expected.transmitted.packets = 1; - expected.retransmitted.payload_bytes = 0; - expected.retransmitted.header_bytes = 0; - expected.retransmitted.padding_bytes = 0; - expected.retransmitted.packets = 0; - expected.fec.packets = 0; - rtp_stats_callback_.Matches(ssrc, expected); - - // Retransmit a frame. - uint16_t seqno = rtp_sender()->SequenceNumber() - 1; - rtp_sender()->ReSendPacket(seqno); - expected.transmitted.payload_bytes = 12; - expected.transmitted.header_bytes = 24; - expected.transmitted.packets = 2; - expected.retransmitted.payload_bytes = 6; - expected.retransmitted.header_bytes = 12; - expected.retransmitted.padding_bytes = 0; - expected.retransmitted.packets = 1; - rtp_stats_callback_.Matches(ssrc, expected); - - // Send padding. - GenerateAndSendPadding(kMaxPaddingSize); - expected.transmitted.payload_bytes = 12; - expected.transmitted.header_bytes = 36; - expected.transmitted.padding_bytes = kMaxPaddingSize; - expected.transmitted.packets = 3; - rtp_stats_callback_.Matches(ssrc, expected); -} - -TEST_P(RtpSenderTestWithoutPacer, StreamDataCountersCallbacksUlpfec) { - const uint8_t kRedPayloadType = 96; - const uint8_t kUlpfecPayloadType = 97; - const uint8_t kPayloadType = 127; - const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; - FieldTrialBasedConfig field_trials; - UlpfecGenerator ulpfec_generator(kRedPayloadType, kUlpfecPayloadType, - &fake_clock_); - RTPSenderVideo::Config video_config; - video_config.clock = &fake_clock_; - video_config.rtp_sender = rtp_sender(); - video_config.field_trials = &field_trials; - video_config.red_payload_type = kRedPayloadType; - video_config.fec_generator = &ulpfec_generator; - video_config.fec_type = ulpfec_generator.GetFecType(); - video_config.fec_overhead_bytes = ulpfec_generator.MaxPacketOverhead(); - RTPSenderVideo rtp_sender_video(video_config); - uint8_t payload[] = {47, 11, 32, 93, 89}; - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); - uint32_t ssrc = rtp_sender()->SSRC(); - - RTPVideoHeader video_header; - StreamDataCounters expected; - - // Send ULPFEC. - FecProtectionParams fec_params; - fec_params.fec_mask_type = kFecMaskRandom; - fec_params.fec_rate = 1; - fec_params.max_fec_frames = 1; - ulpfec_generator.SetProtectionParameters(fec_params, fec_params); - video_header.frame_type = VideoFrameType::kVideoFrameDelta; - ASSERT_TRUE(rtp_sender_video.SendVideo(kPayloadType, kCodecType, 1234, 4321, - payload, nullptr, video_header, - kDefaultExpectedRetransmissionTimeMs)); - expected.transmitted.payload_bytes = 28; - expected.transmitted.header_bytes = 24; - expected.transmitted.packets = 2; - expected.fec.packets = 1; - rtp_stats_callback_.Matches(ssrc, expected); -} - -TEST_P(RtpSenderTestWithoutPacer, BytesReportedCorrectly) { - // XXX const char* kPayloadName = "GENERIC"; - const uint8_t kPayloadType = 127; - rtp_sender()->SetRtxPayloadType(kPayloadType - 1, kPayloadType); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - - SendGenericPacket(); - // Will send 2 full-size padding packets. - GenerateAndSendPadding(1); - GenerateAndSendPadding(1); - - StreamDataCounters rtp_stats; - StreamDataCounters rtx_stats; - rtp_egress()->GetDataCounters(&rtp_stats, &rtx_stats); - - // Payload - EXPECT_GT(rtp_stats.first_packet_time_ms, -1); - EXPECT_EQ(rtp_stats.transmitted.payload_bytes, sizeof(kPayloadData)); - EXPECT_EQ(rtp_stats.transmitted.header_bytes, 12u); - EXPECT_EQ(rtp_stats.transmitted.padding_bytes, 0u); - EXPECT_EQ(rtx_stats.transmitted.payload_bytes, 0u); - EXPECT_EQ(rtx_stats.transmitted.header_bytes, 24u); - EXPECT_EQ(rtx_stats.transmitted.padding_bytes, 2 * kMaxPaddingSize); - - EXPECT_EQ(rtp_stats.transmitted.TotalBytes(), - rtp_stats.transmitted.payload_bytes + - rtp_stats.transmitted.header_bytes + - rtp_stats.transmitted.padding_bytes); - EXPECT_EQ(rtx_stats.transmitted.TotalBytes(), - rtx_stats.transmitted.payload_bytes + - rtx_stats.transmitted.header_bytes + - rtx_stats.transmitted.padding_bytes); - - EXPECT_EQ( - transport_.total_bytes_sent_, - rtp_stats.transmitted.TotalBytes() + rtx_stats.transmitted.TotalBytes()); + EXPECT_CALL( + mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(AllOf( + Property(&RtpPacketToSend::HasExtension, false), + Property(&RtpPacketToSend::HasExtension, false)))))); + ASSERT_LT(0, rtp_sender_->ReSendPacket(built_packet->SequenceNumber())); } -TEST_P(RtpSenderTestWithoutPacer, RespectsNackBitrateLimit) { +TEST_F(RtpSenderTest, RespectsNackBitrateLimit) { const int32_t kPacketSize = 1400; const int32_t kNumPackets = 30; retransmission_rate_limiter_.SetMaxRate(kPacketSize * kNumPackets * 8); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, kNumPackets); - const uint16_t kStartSequenceNumber = rtp_sender()->SequenceNumber(); + const uint16_t kStartSequenceNumber = rtp_sender_->SequenceNumber(); std::vector sequence_numbers; for (int32_t i = 0; i < kNumPackets; ++i) { sequence_numbers.push_back(kStartSequenceNumber + i); - fake_clock_.AdvanceTimeMilliseconds(1); - SendPacket(fake_clock_.TimeInMilliseconds(), kPacketSize); + time_controller_.AdvanceTime(TimeDelta::Millis(1)); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(SizeIs(1))) + .WillOnce([&](std::vector> packets) { + packet_history_->PutRtpPacket(std::move(packets[0]), + clock_->TimeInMilliseconds()); + }); + SendPacket(clock_->TimeInMilliseconds(), kPacketSize); } - EXPECT_EQ(kNumPackets, transport_.packets_sent()); - fake_clock_.AdvanceTimeMilliseconds(1000 - kNumPackets); + time_controller_.AdvanceTime(TimeDelta::Millis(1000 - kNumPackets)); // Resending should work - brings the bandwidth up to the limit. // NACK bitrate is capped to the same bitrate as the encoder, since the max // protection overhead is 50% (see MediaOptimization::SetTargetRates). - rtp_sender()->OnReceivedNack(sequence_numbers, 0); - EXPECT_EQ(kNumPackets * 2, transport_.packets_sent()); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets(ElementsAre(Pointee(Property( + &RtpPacketToSend::packet_type, + RtpPacketMediaType::kRetransmission))))) + .Times(kNumPackets) + .WillRepeatedly( + [&](std::vector> packets) { + for (const auto& packet : packets) { + packet_history_->MarkPacketAsSent( + *packet->retransmitted_sequence_number()); + } + }); + rtp_sender_->OnReceivedNack(sequence_numbers, 0); // Must be at least 5ms in between retransmission attempts. - fake_clock_.AdvanceTimeMilliseconds(5); + time_controller_.AdvanceTime(TimeDelta::Millis(5)); // Resending should not work, bandwidth exceeded. - rtp_sender()->OnReceivedNack(sequence_numbers, 0); - EXPECT_EQ(kNumPackets * 2, transport_.packets_sent()); + EXPECT_CALL(mock_paced_sender_, EnqueuePackets).Times(0); + rtp_sender_->OnReceivedNack(sequence_numbers, 0); } -TEST_P(RtpSenderTest, UpdatingCsrcsUpdatedOverhead) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); +TEST_F(RtpSenderTest, UpdatingCsrcsUpdatedOverhead) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); // Adding two csrcs adds 2*4 bytes to the header. - rtp_sender()->SetCsrcs({1, 2}); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 20u); + rtp_sender_->SetCsrcs({1, 2}); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 20u); } -TEST_P(RtpSenderTest, OnOverheadChanged) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); +TEST_F(RtpSenderTest, OnOverheadChanged) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(TransmissionOffset::kUri, + kTransmissionTimeOffsetExtensionId); // TransmissionTimeOffset extension has a size of 3B, but with the addition // of header index and rounding to 4 byte boundary we end up with 20B total. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 20u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 20u); } -TEST_P(RtpSenderTest, CountMidOnlyUntilAcked) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); +TEST_F(RtpSenderTest, CountMidOnlyUntilAcked) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionMid, kMidExtensionId); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionRtpStreamId, - kRidExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(RtpMid::kUri, kMidExtensionId); + rtp_sender_->RegisterRtpHeaderExtension(RtpStreamId::kUri, kRidExtensionId); // Counted only if set. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); - rtp_sender()->SetMid("foo"); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 36u); - rtp_sender()->SetRid("bar"); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 52u); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); + rtp_sender_->SetMid("foo"); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 36u); + rtp_sender_->SetRid("bar"); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 52u); // Ack received, mid/rid no longer sent. - rtp_sender()->OnReceivedAckOnSsrc(0); - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); + rtp_sender_->OnReceivedAckOnSsrc(0); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); } -TEST_P(RtpSenderTest, DontCountVolatileExtensionsIntoOverhead) { - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.retransmission_rate_limiter = &retransmission_rate_limiter_; - rtp_sender_context_ = std::make_unique(config); +TEST_F(RtpSenderTest, DontCountVolatileExtensionsIntoOverhead) { + RtpRtcpInterface::Configuration config = GetDefaultConfig(); + config.rtx_send_ssrc = {}; + CreateSender(config); // Base RTP overhead is 12B. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); - - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionInbandComfortNoise, 1); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionAbsoluteCaptureTime, 2); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionVideoRotation, 3); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionPlayoutDelay, 4); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionVideoContentType, 5); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionVideoTiming, 6); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionRepairedRtpStreamId, 7); - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionColorSpace, 8); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); + + rtp_sender_->RegisterRtpHeaderExtension(InbandComfortNoiseExtension::kUri, 1); + rtp_sender_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri, + 2); + rtp_sender_->RegisterRtpHeaderExtension(VideoOrientation::kUri, 3); + rtp_sender_->RegisterRtpHeaderExtension(PlayoutDelayLimits::kUri, 4); + rtp_sender_->RegisterRtpHeaderExtension(VideoContentTypeExtension::kUri, 5); + rtp_sender_->RegisterRtpHeaderExtension(VideoTimingExtension::kUri, 6); + rtp_sender_->RegisterRtpHeaderExtension(RepairedRtpStreamId::kUri, 7); + rtp_sender_->RegisterRtpHeaderExtension(ColorSpaceExtension::kUri, 8); // Still only 12B counted since can't count on above being sent. - EXPECT_EQ(rtp_sender()->ExpectedPerPacketOverhead(), 12u); -} - -TEST_P(RtpSenderTest, SendPacketMatchesVideo) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kVideo); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kVideo); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketMatchesAudio) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kAudio); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kAudio); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); + EXPECT_EQ(rtp_sender_->ExpectedPerPacketOverhead(), 12u); } -TEST_P(RtpSenderTest, SendPacketMatchesRetransmissions) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - - // Verify sent with correct SSRC (non-RTX). - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); - - // RTX retransmission. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kRtxSsrc); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 2); -} - -TEST_P(RtpSenderTest, SendPacketMatchesPadding) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kPadding); - - // Verify sent with correct SSRC (non-RTX). - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kPadding); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); - - // RTX padding. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kRtxSsrc); - packet->set_packet_type(RtpPacketMediaType::kPadding); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 2); -} - -TEST_P(RtpSenderTest, SendPacketMatchesFlexfec) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kFlexFecSsrc); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketMatchesUlpfec) { - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - - // Verify sent with correct SSRC. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetSsrc(kSsrc); - packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_EQ(transport_.packets_sent(), 1); -} - -TEST_P(RtpSenderTest, SendPacketHandlesRetransmissionHistory) { - rtp_sender_context_->packet_history_.SetStorePacketsStatus( +TEST_F(RtpSenderTest, SendPacketHandlesRetransmissionHistory) { + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 10); - // Build a media packet and send it. + // Ignore calls to EnqueuePackets() for this test. + EXPECT_CALL(mock_paced_sender_, EnqueuePackets).WillRepeatedly(Return()); + + // Build a media packet and put in the packet history. std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); + BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); const uint16_t media_sequence_number = packet->SequenceNumber(); - packet->set_packet_type(RtpPacketMediaType::kVideo); packet->set_allow_retransmission(true); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); - // Simulate retransmission request. - fake_clock_.AdvanceTimeMilliseconds(30); - EXPECT_GT(rtp_sender()->ReSendPacket(media_sequence_number), 0); + // Simulate successful retransmission request. + time_controller_.AdvanceTime(TimeDelta::Millis(30)); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0)); // Packet already pending, retransmission not allowed. - fake_clock_.AdvanceTimeMilliseconds(30); - EXPECT_EQ(rtp_sender()->ReSendPacket(media_sequence_number), 0); - - // Packet exiting pacer, mark as not longer pending. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - EXPECT_NE(packet->SequenceNumber(), media_sequence_number); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - packet->SetSsrc(kRtxSsrc); - packet->set_retransmitted_sequence_number(media_sequence_number); - packet->set_allow_retransmission(false); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + time_controller_.AdvanceTime(TimeDelta::Millis(30)); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Eq(0)); - // Retransmissions allowed again. - fake_clock_.AdvanceTimeMilliseconds(30); - EXPECT_GT(rtp_sender()->ReSendPacket(media_sequence_number), 0); + // Simulate packet exiting pacer, mark as not longer pending. + packet_history_->MarkPacketAsSent(media_sequence_number); - // Retransmission of RTX packet should not be allowed. - EXPECT_EQ(rtp_sender()->ReSendPacket(packet->SequenceNumber()), 0); + // Retransmissions allowed again. + time_controller_.AdvanceTime(TimeDelta::Millis(30)); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0)); } -TEST_P(RtpSenderTest, SendPacketUpdatesExtensions) { - ASSERT_EQ(rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId), - 0); - ASSERT_EQ(rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId), - 0); - ASSERT_EQ(rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionVideoTiming, - kVideoTimingExtensionId), - 0); - - std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->set_packetization_finish_time_ms(fake_clock_.TimeInMilliseconds()); - - const int32_t kDiffMs = 10; - fake_clock_.AdvanceTimeMilliseconds(kDiffMs); - - packet->set_packet_type(RtpPacketMediaType::kVideo); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - const RtpPacketReceived& received_packet = transport_.last_sent_packet(); - - EXPECT_EQ(received_packet.GetExtension(), kDiffMs * 90); - - EXPECT_EQ(received_packet.GetExtension(), - AbsoluteSendTime::MsTo24Bits(fake_clock_.TimeInMilliseconds())); - - VideoSendTiming timing; - EXPECT_TRUE(received_packet.GetExtension(&timing)); - EXPECT_EQ(timing.pacer_exit_delta_ms, kDiffMs); -} +TEST_F(RtpSenderTest, MarksRetransmittedPackets) { + packet_history_->SetStorePacketsStatus( + RtpPacketHistory::StorageMode::kStoreAndCull, 10); -TEST_P(RtpSenderTest, SendPacketSetsPacketOptions) { - const uint16_t kPacketId = 42; - ASSERT_EQ(rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId), - 0); + // Build a media packet and put in the packet history. std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetExtension(kPacketId); - - packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - - EXPECT_EQ(transport_.last_options_.packet_id, kPacketId); - EXPECT_TRUE(transport_.last_options_.included_in_allocation); - EXPECT_TRUE(transport_.last_options_.included_in_feedback); - EXPECT_FALSE(transport_.last_options_.is_retransmit); - - // Send another packet as retransmission, verify options are populated. - packet = BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - packet->SetExtension(kPacketId + 1); - packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - EXPECT_TRUE(transport_.last_options_.is_retransmit); -} + BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); + const uint16_t media_sequence_number = packet->SequenceNumber(); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); -TEST_P(RtpSenderTest, SendPacketUpdatesStats) { - const size_t kPayloadSize = 1000; - - StrictMock send_side_delay_observer; - - RtpRtcp::Configuration config; - config.clock = &fake_clock_; - config.outgoing_transport = &transport_; - config.local_media_ssrc = kSsrc; - config.rtx_send_ssrc = kRtxSsrc; - config.fec_generator = &flexfec_sender_; - config.send_side_delay_observer = &send_side_delay_observer; - config.event_log = &mock_rtc_event_log_; - config.send_packet_observer = &send_packet_observer_; - rtp_sender_context_ = std::make_unique(config); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - - const int64_t capture_time_ms = fake_clock_.TimeInMilliseconds(); - - std::unique_ptr video_packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - video_packet->set_packet_type(RtpPacketMediaType::kVideo); - video_packet->SetPayloadSize(kPayloadSize); - video_packet->SetExtension(1); - - std::unique_ptr rtx_packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - rtx_packet->SetSsrc(kRtxSsrc); - rtx_packet->set_packet_type(RtpPacketMediaType::kRetransmission); - rtx_packet->SetPayloadSize(kPayloadSize); - rtx_packet->SetExtension(2); - - std::unique_ptr fec_packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); - fec_packet->SetSsrc(kFlexFecSsrc); - fec_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); - fec_packet->SetPayloadSize(kPayloadSize); - fec_packet->SetExtension(3); - - const int64_t kDiffMs = 25; - fake_clock_.AdvanceTimeMilliseconds(kDiffMs); - - EXPECT_CALL(send_side_delay_observer, - SendSideDelayUpdated(kDiffMs, kDiffMs, kDiffMs, kSsrc)); + // Expect a retransmission packet marked with which packet it is a + // retransmit of. EXPECT_CALL( - send_side_delay_observer, - SendSideDelayUpdated(kDiffMs, kDiffMs, 2 * kDiffMs, kFlexFecSsrc)); - - EXPECT_CALL(send_packet_observer_, OnSendPacket(1, capture_time_ms, kSsrc)); - - rtp_egress()->SendPacket(video_packet.get(), PacedPacketInfo()); - - // Send packet observer not called for padding/retransmissions. - EXPECT_CALL(send_packet_observer_, OnSendPacket(2, _, _)).Times(0); - rtp_egress()->SendPacket(rtx_packet.get(), PacedPacketInfo()); - - EXPECT_CALL(send_packet_observer_, - OnSendPacket(3, capture_time_ms, kFlexFecSsrc)); - rtp_egress()->SendPacket(fec_packet.get(), PacedPacketInfo()); - - StreamDataCounters rtp_stats; - StreamDataCounters rtx_stats; - rtp_egress()->GetDataCounters(&rtp_stats, &rtx_stats); - EXPECT_EQ(rtp_stats.transmitted.packets, 2u); - EXPECT_EQ(rtp_stats.fec.packets, 1u); - EXPECT_EQ(rtx_stats.retransmitted.packets, 1u); + mock_paced_sender_, + EnqueuePackets(ElementsAre(AllOf( + Pointee(Property(&RtpPacketToSend::packet_type, + RtpPacketMediaType::kRetransmission)), + Pointee(Property(&RtpPacketToSend::retransmitted_sequence_number, + Eq(media_sequence_number))))))); + EXPECT_THAT(rtp_sender_->ReSendPacket(media_sequence_number), Gt(0)); } -TEST_P(RtpSenderTest, GeneratedPaddingHasBweExtensions) { +TEST_F(RtpSenderTest, GeneratedPaddingHasBweExtensions) { // Min requested size in order to use RTX payload. const size_t kMinPaddingSize = 50; + EnableRtx(); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 1); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId)); - ASSERT_EQ(0, - rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId)); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); - - // Send a payload packet first, to enable padding and populate the packet - // history. + // Put a packet in the history, in order to facilitate payload padding. std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); + BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); packet->set_allow_retransmission(true); packet->SetPayloadSize(kMinPaddingSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Generate a plain padding packet, check that extensions are registered. std::vector> generated_packets = - rtp_sender()->GeneratePadding(/*target_size_bytes=*/1, true); + rtp_sender_->GeneratePadding(/*target_size_bytes=*/1, true); ASSERT_THAT(generated_packets, SizeIs(1)); auto& plain_padding = generated_packets.front(); EXPECT_GT(plain_padding->padding_size(), 0u); EXPECT_TRUE(plain_padding->HasExtension()); EXPECT_TRUE(plain_padding->HasExtension()); EXPECT_TRUE(plain_padding->HasExtension()); - - // Verify all header extensions have been written. - rtp_egress()->SendPacket(plain_padding.get(), PacedPacketInfo()); - const auto& sent_plain_padding = transport_.last_sent_packet(); - EXPECT_TRUE(sent_plain_padding.HasExtension()); - EXPECT_TRUE(sent_plain_padding.HasExtension()); - EXPECT_TRUE(sent_plain_padding.HasExtension()); - webrtc::RTPHeader rtp_header; - sent_plain_padding.GetHeader(&rtp_header); - EXPECT_TRUE(rtp_header.extension.hasAbsoluteSendTime); - EXPECT_TRUE(rtp_header.extension.hasTransmissionTimeOffset); - EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber); + EXPECT_GT(plain_padding->padding_size(), 0u); // Generate a payload padding packets, check that extensions are registered. - generated_packets = rtp_sender()->GeneratePadding(kMinPaddingSize, true); + generated_packets = rtp_sender_->GeneratePadding(kMinPaddingSize, true); ASSERT_EQ(generated_packets.size(), 1u); auto& payload_padding = generated_packets.front(); EXPECT_EQ(payload_padding->padding_size(), 0u); EXPECT_TRUE(payload_padding->HasExtension()); EXPECT_TRUE(payload_padding->HasExtension()); EXPECT_TRUE(payload_padding->HasExtension()); - - // Verify all header extensions have been written. - rtp_egress()->SendPacket(payload_padding.get(), PacedPacketInfo()); - const auto& sent_payload_padding = transport_.last_sent_packet(); - EXPECT_TRUE(sent_payload_padding.HasExtension()); - EXPECT_TRUE(sent_payload_padding.HasExtension()); - EXPECT_TRUE(sent_payload_padding.HasExtension()); - sent_payload_padding.GetHeader(&rtp_header); - EXPECT_TRUE(rtp_header.extension.hasAbsoluteSendTime); - EXPECT_TRUE(rtp_header.extension.hasTransmissionTimeOffset); - EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber); + EXPECT_GT(payload_padding->payload_size(), 0u); } -TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { +TEST_F(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { // Min requested size in order to use RTX payload. const size_t kMinPaddingSize = 50; - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); + rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload); + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 1); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); const size_t kPayloadPacketSize = kMinPaddingSize; std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); + BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); packet->set_allow_retransmission(true); packet->SetPayloadSize(kPayloadPacketSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - - // Send a dummy video packet so it ends up in the packet history. - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Generated padding has large enough budget that the video packet should be // retransmitted as padding. std::vector> generated_packets = - rtp_sender()->GeneratePadding(kMinPaddingSize, true); + rtp_sender_->GeneratePadding(kMinPaddingSize, true); ASSERT_EQ(generated_packets.size(), 1u); auto& padding_packet = generated_packets.front(); EXPECT_EQ(padding_packet->packet_type(), RtpPacketMediaType::kPadding); @@ -2428,7 +1062,7 @@ TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { size_t padding_bytes_generated = 0; generated_packets = - rtp_sender()->GeneratePadding(kPaddingBytesRequested, true); + rtp_sender_->GeneratePadding(kPaddingBytesRequested, true); EXPECT_EQ(generated_packets.size(), 1u); for (auto& packet : generated_packets) { EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding); @@ -2441,29 +1075,28 @@ TEST_P(RtpSenderTest, GeneratePaddingResendsOldPacketsWithRtx) { EXPECT_EQ(padding_bytes_generated, kMaxPaddingSize); } -TEST_P(RtpSenderTest, LimitsPayloadPaddingSize) { +TEST_F(RtpSenderTest, LimitsPayloadPaddingSize) { // Limit RTX payload padding to 2x target size. const double kFactor = 2.0; field_trials_.SetMaxPaddingFactor(kFactor); - SetUpRtpSender(true, false, false); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + SetUpRtpSender(false, false, nullptr); + rtp_sender_->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); + rtp_sender_->SetRtxPayloadType(kRtxPayload, kPayload); + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 1); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); // Send a dummy video packet so it ends up in the packet history. const size_t kPayloadPacketSize = 1234u; std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); + BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); packet->set_allow_retransmission(true); packet->SetPayloadSize(kPayloadPacketSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Smallest target size that will result in the sent packet being returned as // padding. @@ -2473,41 +1106,38 @@ TEST_P(RtpSenderTest, LimitsPayloadPaddingSize) { // Generated padding has large enough budget that the video packet should be // retransmitted as padding. EXPECT_THAT( - rtp_sender()->GeneratePadding(kMinTargerSizeForPayload, true), + rtp_sender_->GeneratePadding(kMinTargerSizeForPayload, true), AllOf(Not(IsEmpty()), Each(Pointee(Property(&RtpPacketToSend::padding_size, Eq(0u)))))); // If payload padding is > 2x requested size, plain padding is returned // instead. EXPECT_THAT( - rtp_sender()->GeneratePadding(kMinTargerSizeForPayload - 1, true), + rtp_sender_->GeneratePadding(kMinTargerSizeForPayload - 1, true), AllOf(Not(IsEmpty()), Each(Pointee(Property(&RtpPacketToSend::padding_size, Gt(0u)))))); } -TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { - rtp_sender_context_->packet_history_.SetStorePacketsStatus( +TEST_F(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 1); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId)); - ASSERT_EQ(0, - rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionAbsoluteSendTime, kAbsoluteSendTimeExtensionId)); - ASSERT_EQ(0, rtp_sender()->RegisterRtpHeaderExtension( - kRtpExtensionTransportSequenceNumber, - kTransportSequenceNumberExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransmissionOffset::kUri, kTransmissionTimeOffsetExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + AbsoluteSendTime::kUri, kAbsoluteSendTimeExtensionId)); + ASSERT_TRUE(rtp_sender_->RegisterRtpHeaderExtension( + TransportSequenceNumber::kUri, kTransportSequenceNumberExtensionId)); const size_t kPayloadPacketSize = 1234; // Send a dummy video packet so it ends up in the packet history. Since we // are not using RTX, it should never be used as padding. std::unique_ptr packet = - BuildRtpPacket(kPayload, true, 0, fake_clock_.TimeInMilliseconds()); + BuildRtpPacket(kPayload, true, 0, clock_->TimeInMilliseconds()); packet->set_allow_retransmission(true); packet->SetPayloadSize(kPayloadPacketSize); packet->set_packet_type(RtpPacketMediaType::kVideo); - EXPECT_CALL(send_packet_observer_, OnSendPacket).Times(1); - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); + packet_history_->PutRtpPacket(std::move(packet), + clock_->TimeInMilliseconds()); // Payload padding not available without RTX, only generate plain padding on // the media SSRC. @@ -2519,7 +1149,7 @@ TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { (kPaddingBytesRequested + kMaxPaddingSize - 1) / kMaxPaddingSize; size_t padding_bytes_generated = 0; std::vector> padding_packets = - rtp_sender()->GeneratePadding(kPaddingBytesRequested, true); + rtp_sender_->GeneratePadding(kPaddingBytesRequested, true); EXPECT_EQ(padding_packets.size(), kExpectedNumPaddingPackets); for (auto& packet : padding_packets) { EXPECT_EQ(packet->packet_type(), RtpPacketMediaType::kPadding); @@ -2530,187 +1160,182 @@ TEST_P(RtpSenderTest, GeneratePaddingCreatesPurePaddingWithoutRtx) { EXPECT_TRUE(packet->HasExtension()); EXPECT_TRUE(packet->HasExtension()); EXPECT_TRUE(packet->HasExtension()); - - // Verify all header extensions are received. - rtp_egress()->SendPacket(packet.get(), PacedPacketInfo()); - webrtc::RTPHeader rtp_header; - transport_.last_sent_packet().GetHeader(&rtp_header); - EXPECT_TRUE(rtp_header.extension.hasAbsoluteSendTime); - EXPECT_TRUE(rtp_header.extension.hasTransmissionTimeOffset); - EXPECT_TRUE(rtp_header.extension.hasTransportSequenceNumber); } EXPECT_EQ(padding_bytes_generated, kExpectedNumPaddingPackets * kMaxPaddingSize); } -TEST_P(RtpSenderTest, SupportsPadding) { +TEST_F(RtpSenderTest, SupportsPadding) { bool kSendingMediaStats[] = {true, false}; bool kEnableRedundantPayloads[] = {true, false}; - RTPExtensionType kBweExtensionTypes[] = { - kRtpExtensionTransportSequenceNumber, - kRtpExtensionTransportSequenceNumber02, kRtpExtensionAbsoluteSendTime, - kRtpExtensionTransmissionTimeOffset}; + absl::string_view kBweExtensionUris[] = { + TransportSequenceNumber::kUri, TransportSequenceNumberV2::kUri, + AbsoluteSendTime::kUri, TransmissionOffset::kUri}; const int kExtensionsId = 7; for (bool sending_media : kSendingMediaStats) { - rtp_sender()->SetSendingMediaStatus(sending_media); + rtp_sender_->SetSendingMediaStatus(sending_media); for (bool redundant_payloads : kEnableRedundantPayloads) { int rtx_mode = kRtxRetransmitted; if (redundant_payloads) { rtx_mode |= kRtxRedundantPayloads; } - rtp_sender()->SetRtxStatus(rtx_mode); + rtp_sender_->SetRtxStatus(rtx_mode); - for (auto extension_type : kBweExtensionTypes) { - EXPECT_FALSE(rtp_sender()->SupportsPadding()); - rtp_sender()->RegisterRtpHeaderExtension(extension_type, kExtensionsId); + for (auto extension_uri : kBweExtensionUris) { + EXPECT_FALSE(rtp_sender_->SupportsPadding()); + rtp_sender_->RegisterRtpHeaderExtension(extension_uri, kExtensionsId); if (!sending_media) { - EXPECT_FALSE(rtp_sender()->SupportsPadding()); + EXPECT_FALSE(rtp_sender_->SupportsPadding()); } else { - EXPECT_TRUE(rtp_sender()->SupportsPadding()); + EXPECT_TRUE(rtp_sender_->SupportsPadding()); if (redundant_payloads) { - EXPECT_TRUE(rtp_sender()->SupportsRtxPayloadPadding()); + EXPECT_TRUE(rtp_sender_->SupportsRtxPayloadPadding()); } else { - EXPECT_FALSE(rtp_sender()->SupportsRtxPayloadPadding()); + EXPECT_FALSE(rtp_sender_->SupportsRtxPayloadPadding()); } } - rtp_sender()->DeregisterRtpHeaderExtension(extension_type); - EXPECT_FALSE(rtp_sender()->SupportsPadding()); + rtp_sender_->DeregisterRtpHeaderExtension(extension_uri); + EXPECT_FALSE(rtp_sender_->SupportsPadding()); } } } } -TEST_P(RtpSenderTest, SetsCaptureTimeAndPopulatesTransmissionOffset) { - rtp_sender()->RegisterRtpHeaderExtension(kRtpExtensionTransmissionTimeOffset, - kTransmissionTimeOffsetExtensionId); - - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); +TEST_F(RtpSenderTest, SetsCaptureTimeOnRtxRetransmissions) { + EnableRtx(); - const int64_t kMissingCaptureTimeMs = 0; - const uint32_t kTimestampTicksPerMs = 90; - const int64_t kOffsetMs = 10; + // Put a packet in the packet history, with current time as capture time. + const int64_t start_time_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, start_time_ms, + /*capture_time_ms=*/start_time_ms); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), start_time_ms); - auto packet = - BuildRtpPacket(kPayload, kMarkerBit, fake_clock_.TimeInMilliseconds(), - kMissingCaptureTimeMs); - packet->set_packet_type(RtpPacketMediaType::kVideo); - packet->ReserveExtension(); - packet->AllocatePayload(sizeof(kPayloadData)); + // Advance time, request an RTX retransmission. Capture timestamp should be + // preserved. + time_controller_.AdvanceTime(TimeDelta::Millis(10)); - std::unique_ptr packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - EXPECT_EQ(packets.size(), 1u); - EXPECT_GT(packets[0]->capture_time_ms(), 0); - packet_to_pace = std::move(packets[0]); - }); + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee(Property( + &RtpPacketToSend::capture_time_ms, start_time_ms))))); + EXPECT_GT(rtp_sender_->ReSendPacket(kSeqNum), 0); +} - packet->set_allow_retransmission(true); - EXPECT_TRUE(rtp_sender()->SendToNetwork(std::move(packet))); +TEST_F(RtpSenderTest, ClearHistoryOnSequenceNumberCange) { + EnableRtx(); - fake_clock_.AdvanceTimeMilliseconds(kOffsetMs); + // Put a packet in the packet history. + const int64_t now_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, now_ms, now_ms); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), now_ms); - rtp_egress()->SendPacket(packet_to_pace.get(), PacedPacketInfo()); + EXPECT_TRUE(packet_history_->GetPacketState(kSeqNum)); - EXPECT_EQ(1, transport_.packets_sent()); - absl::optional transmission_time_extension = - transport_.sent_packets_.back().GetExtension(); - ASSERT_TRUE(transmission_time_extension.has_value()); - EXPECT_EQ(*transmission_time_extension, kOffsetMs * kTimestampTicksPerMs); + // Update the sequence number of the RTP module, verify packet has been + // removed. + rtp_sender_->SetSequenceNumber(rtp_sender_->SequenceNumber() - 1); + EXPECT_FALSE(packet_history_->GetPacketState(kSeqNum)); +} - // Retransmit packet. The RTX packet should get the same capture time as the - // original packet, so offset is delta from original packet to now. - fake_clock_.AdvanceTimeMilliseconds(kOffsetMs); +TEST_F(RtpSenderTest, IgnoresNackAfterDisablingMedia) { + const int64_t kRtt = 10; - std::unique_ptr rtx_packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - EXPECT_GT(packets[0]->capture_time_ms(), 0); - rtx_packet_to_pace = std::move(packets[0]); - }); + EnableRtx(); + packet_history_->SetRtt(kRtt); - EXPECT_GT(rtp_sender()->ReSendPacket(kSeqNum), 0); - rtp_egress()->SendPacket(rtx_packet_to_pace.get(), PacedPacketInfo()); + // Put a packet in the history. + const int64_t start_time_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, start_time_ms, + /*capture_time_ms=*/start_time_ms); + packet->set_allow_retransmission(true); + packet_history_->PutRtpPacket(std::move(packet), start_time_ms); - EXPECT_EQ(2, transport_.packets_sent()); - transmission_time_extension = - transport_.sent_packets_.back().GetExtension(); - ASSERT_TRUE(transmission_time_extension.has_value()); - EXPECT_EQ(*transmission_time_extension, - 2 * kOffsetMs * kTimestampTicksPerMs); + // Disable media sending and try to retransmit the packet, it should fail. + rtp_sender_->SetSendingMediaStatus(false); + time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); + EXPECT_LT(rtp_sender_->ReSendPacket(kSeqNum), 0); } -TEST_P(RtpSenderTestWithoutPacer, ClearHistoryOnSequenceNumberCange) { +TEST_F(RtpSenderTest, DoesntFecProtectRetransmissions) { + // Set up retranmission without RTX, so that a plain copy of the old packet is + // re-sent instead. const int64_t kRtt = 10; - - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( + rtp_sender_->SetSendingMediaStatus(true); + rtp_sender_->SetRtxStatus(kRtxOff); + packet_history_->SetStorePacketsStatus( RtpPacketHistory::StorageMode::kStoreAndCull, 10); - rtp_sender_context_->packet_history_.SetRtt(kRtt); + packet_history_->SetRtt(kRtt); - // Send a packet and record its sequence numbers. - SendGenericPacket(); - ASSERT_EQ(1u, transport_.sent_packets_.size()); - const uint16_t packet_seqence_number = - transport_.sent_packets_.back().SequenceNumber(); - - // Advance time and make sure it can be retransmitted, even if we try to set - // the ssrc the what it already is. - rtp_sender()->SetSequenceNumber(rtp_sender()->SequenceNumber()); - fake_clock_.AdvanceTimeMilliseconds(kRtt); - EXPECT_GT(rtp_sender()->ReSendPacket(packet_seqence_number), 0); - - // Change the sequence number, then move the time and try to retransmit again. - // The old packet should now be gone. - rtp_sender()->SetSequenceNumber(rtp_sender()->SequenceNumber() - 1); - fake_clock_.AdvanceTimeMilliseconds(kRtt); - EXPECT_EQ(rtp_sender()->ReSendPacket(packet_seqence_number), 0); -} + // Put a fec protected packet in the history. + const int64_t start_time_ms = clock_->TimeInMilliseconds(); + std::unique_ptr packet = + BuildRtpPacket(kPayload, kMarkerBit, start_time_ms, + /*capture_time_ms=*/start_time_ms); + packet->set_allow_retransmission(true); + packet->set_fec_protect_packet(true); + packet_history_->PutRtpPacket(std::move(packet), start_time_ms); -TEST_P(RtpSenderTest, IgnoresNackAfterDisablingMedia) { - const int64_t kRtt = 10; + // Re-send packet, the retransmitted packet should not have the FEC protection + // flag set. + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(ElementsAre(Pointee( + Property(&RtpPacketToSend::fec_protect_packet, false))))); - rtp_sender()->SetSendingMediaStatus(true); - rtp_sender()->SetRtxStatus(kRtxRetransmitted | kRtxRedundantPayloads); - rtp_sender()->SetRtxPayloadType(kRtxPayload, kPayload); - rtp_sender_context_->packet_history_.SetStorePacketsStatus( - RtpPacketHistory::StorageMode::kStoreAndCull, 10); - rtp_sender_context_->packet_history_.SetRtt(kRtt); + time_controller_.AdvanceTime(TimeDelta::Millis(kRtt)); + EXPECT_GT(rtp_sender_->ReSendPacket(kSeqNum), 0); +} - // Send a packet so it is in the packet history. - std::unique_ptr packet_to_pace; - EXPECT_CALL(mock_paced_sender_, EnqueuePackets) - .WillOnce([&](std::vector> packets) { - packet_to_pace = std::move(packets[0]); - }); +TEST_F(RtpSenderTest, MarksPacketsWithKeyframeStatus) { + FieldTrialBasedConfig field_trials; + RTPSenderVideo::Config video_config; + video_config.clock = clock_; + video_config.rtp_sender = rtp_sender_.get(); + video_config.field_trials = &field_trials; + RTPSenderVideo rtp_sender_video(video_config); - SendGenericPacket(); - rtp_egress()->SendPacket(packet_to_pace.get(), PacedPacketInfo()); + const uint8_t kPayloadType = 127; + const absl::optional kCodecType = + VideoCodecType::kVideoCodecGeneric; - ASSERT_EQ(1u, transport_.sent_packets_.size()); + const uint32_t kCaptureTimeMsToRtpTimestamp = 90; // 90 kHz clock - // Disable media sending and try to retransmit the packet, it should fail. - rtp_sender()->SetSendingMediaStatus(false); - fake_clock_.AdvanceTimeMilliseconds(kRtt); - EXPECT_LT(rtp_sender()->ReSendPacket(kSeqNum), 0); -} + { + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(Each( + Pointee(Property(&RtpPacketToSend::is_key_frame, true))))) + .Times(AtLeast(1)); + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + int64_t capture_time_ms = clock_->TimeInMilliseconds(); + EXPECT_TRUE(rtp_sender_video.SendVideo( + kPayloadType, kCodecType, + capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms, + kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); -INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, - RtpSenderTest, - ::testing::Values(TestConfig{false}, - TestConfig{true})); + time_controller_.AdvanceTime(TimeDelta::Millis(33)); + } + + { + EXPECT_CALL(mock_paced_sender_, + EnqueuePackets(Each( + Pointee(Property(&RtpPacketToSend::is_key_frame, false))))) + .Times(AtLeast(1)); + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + int64_t capture_time_ms = clock_->TimeInMilliseconds(); + EXPECT_TRUE(rtp_sender_video.SendVideo( + kPayloadType, kCodecType, + capture_time_ms * kCaptureTimeMsToRtpTimestamp, capture_time_ms, + kPayloadData, video_header, kDefaultExpectedRetransmissionTimeMs)); -INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, - RtpSenderTestWithoutPacer, - ::testing::Values(TestConfig{false}, - TestConfig{true})); + time_controller_.AdvanceTime(TimeDelta::Millis(33)); + } +} } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_video.cc b/modules/rtp_rtcp/source/rtp_sender_video.cc index 8dbcc90763..4919e3ebf4 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video.cc @@ -34,8 +34,10 @@ #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" +#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" #include "modules/rtp_rtcp/source/time_util.h" #include "rtc_base/checks.h" +#include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/logging.h" #include "rtc_base/trace_event.h" @@ -44,6 +46,8 @@ namespace webrtc { namespace { constexpr size_t kRedForFecHeaderLength = 1; constexpr int64_t kMaxUnretransmittableFrameIntervalMs = 33 * 4; +constexpr char kIncludeCaptureClockOffset[] = + "WebRTC-IncludeCaptureClockOffset"; void BuildRedPayload(const RtpPacketToSend& media_packet, RtpPacketToSend* red_packet) { @@ -109,10 +113,35 @@ const char* FrameTypeToString(VideoFrameType frame_type) { } #endif -bool IsNoopDelay(const PlayoutDelay& delay) { +bool IsNoopDelay(const VideoPlayoutDelay& delay) { return delay.min_ms == -1 && delay.max_ms == -1; } +absl::optional LoadVideoPlayoutDelayOverride( + const WebRtcKeyValueConfig* key_value_config) { + RTC_DCHECK(key_value_config); + FieldTrialOptional playout_delay_min_ms("min_ms", absl::nullopt); + FieldTrialOptional playout_delay_max_ms("max_ms", absl::nullopt); + ParseFieldTrial({&playout_delay_max_ms, &playout_delay_min_ms}, + key_value_config->Lookup("WebRTC-ForceSendPlayoutDelay")); + return playout_delay_max_ms && playout_delay_min_ms + ? absl::make_optional(*playout_delay_min_ms, + *playout_delay_max_ms) + : absl::nullopt; +} + +// Some packets can be skipped and the stream can still be decoded. Those +// packets are less likely to be retransmitted if they are lost. +bool PacketWillLikelyBeRequestedForRestransmitionIfLost( + const RTPVideoHeader& video_header) { + return IsBaseLayer(video_header) && + !(video_header.generic.has_value() + ? absl::c_linear_search( + video_header.generic->decode_target_indications, + DecodeTargetIndication::kDiscardable) + : false); +} + } // namespace RTPSenderVideo::RTPSenderVideo(const Config& config) @@ -124,13 +153,13 @@ RTPSenderVideo::RTPSenderVideo(const Config& config) : (kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers)), last_rotation_(kVideoRotation_0), transmit_color_space_next_frame_(false), + send_allocation_(SendVideoLayersAllocation::kDontSend), current_playout_delay_{-1, -1}, playout_delay_pending_(false), + forced_playout_delay_(LoadVideoPlayoutDelayOverride(config.field_trials)), red_payload_type_(config.red_payload_type), - fec_generator_(config.fec_generator), fec_type_(config.fec_type), fec_overhead_bytes_(config.fec_overhead_bytes), - video_bitrate_(1000, RateStatistics::kBpsScale), packetization_overhead_bitrate_(1000, RateStatistics::kBpsScale), frame_encryptor_(config.frame_encryptor), require_frame_encryption_(config.require_frame_encryption), @@ -140,12 +169,15 @@ RTPSenderVideo::RTPSenderVideo(const Config& config) absolute_capture_time_sender_(config.clock), frame_transformer_delegate_( config.frame_transformer - ? new rtc::RefCountedObject< - RTPSenderVideoFrameTransformerDelegate>( + ? rtc::make_ref_counted( this, config.frame_transformer, - rtp_sender_->SSRC()) - : nullptr) { + rtp_sender_->SSRC(), + config.send_transport_queue) + : nullptr), + include_capture_clock_offset_(absl::StartsWith( + config.field_trials->Lookup(kIncludeCaptureClockOffset), + "Enabled")) { if (frame_transformer_delegate_) frame_transformer_delegate_->Init(); } @@ -158,31 +190,16 @@ RTPSenderVideo::~RTPSenderVideo() { void RTPSenderVideo::LogAndSendToNetwork( std::vector> packets, size_t unpacketized_payload_size) { - int64_t now_ms = clock_->TimeInMilliseconds(); -#if BWE_TEST_LOGGING_COMPILE_TIME_ENABLE - if (fec_generator_) { - uint32_t fec_rate_kbps = fec_generator_->CurrentFecRate().kbps(); - for (const auto& packet : packets) { - if (packet->packet_type() == - RtpPacketMediaType::kForwardErrorCorrection) { - const uint32_t ssrc = packet->Ssrc(); - BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoFecBitrate_kbps", now_ms, - fec_rate_kbps, ssrc); - } - } - } -#endif - { - rtc::CritScope cs(&stats_crit_); + MutexLock lock(&stats_mutex_); size_t packetized_payload_size = 0; for (const auto& packet : packets) { if (*packet->packet_type() == RtpPacketMediaType::kVideo) { - video_bitrate_.Update(packet->size(), now_ms); packetized_payload_size += packet->payload_size(); } } - // AV1 packetizer may produce less packetized bytes than unpacketized. + // AV1 and H264 packetizers may produce less packetized bytes than + // unpacketized. if (packetized_payload_size >= unpacketized_payload_size) { packetization_overhead_bitrate_.Update( packetized_payload_size - unpacketized_payload_size, @@ -219,11 +236,15 @@ void RTPSenderVideo::SetVideoStructure( frame_transformer_delegate_->SetVideoStructureUnderLock(video_structure); return; } - // Lock is being held by SetVideoStructure() caller. - SetVideoStructureUnderLock(video_structure); + SetVideoStructureInternal(video_structure); } -void RTPSenderVideo::SetVideoStructureUnderLock( +void RTPSenderVideo::SetVideoStructureAfterTransformation( + const FrameDependencyStructure* video_structure) { + SetVideoStructureInternal(video_structure); +} + +void RTPSenderVideo::SetVideoStructureInternal( const FrameDependencyStructure* video_structure) { RTC_DCHECK_RUNS_SERIALIZED(&send_checker_); if (video_structure == nullptr) { @@ -253,6 +274,33 @@ void RTPSenderVideo::SetVideoStructureUnderLock( video_structure_->structure_id = structure_id; } +void RTPSenderVideo::SetVideoLayersAllocation( + VideoLayersAllocation allocation) { + if (frame_transformer_delegate_) { + frame_transformer_delegate_->SetVideoLayersAllocationUnderLock( + std::move(allocation)); + return; + } + SetVideoLayersAllocationInternal(std::move(allocation)); +} + +void RTPSenderVideo::SetVideoLayersAllocationAfterTransformation( + VideoLayersAllocation allocation) { + SetVideoLayersAllocationInternal(std::move(allocation)); +} + +void RTPSenderVideo::SetVideoLayersAllocationInternal( + VideoLayersAllocation allocation) { + RTC_DCHECK_RUNS_SERIALIZED(&send_checker_); + if (!allocation_ || allocation.active_spatial_layers.size() > + allocation_->active_spatial_layers.size()) { + send_allocation_ = SendVideoLayersAllocation::kSendWithResolution; + } else if (send_allocation_ == SendVideoLayersAllocation::kDontSend) { + send_allocation_ = SendVideoLayersAllocation::kSendWithoutResolution; + } + allocation_ = std::move(allocation); +} + void RTPSenderVideo::AddRtpHeaderExtensions( const RTPVideoHeader& video_header, const absl::optional& absolute_capture_time, @@ -311,17 +359,10 @@ void RTPSenderVideo::AddRtpHeaderExtensions( packet->SetExtension(*absolute_capture_time); } - if (video_header.codec == kVideoCodecH264 && - video_header.frame_marking.temporal_id != kNoTemporalIdx) { - FrameMarking frame_marking = video_header.frame_marking; - frame_marking.start_of_frame = first_packet; - frame_marking.end_of_frame = last_packet; - packet->SetExtension(frame_marking); - } - if (video_header.generic) { bool extension_is_set = false; - if (video_structure_ != nullptr) { + if (packet->IsRegistered() && + video_structure_ != nullptr) { DependencyDescriptor descriptor; descriptor.first_packet_in_frame = first_packet; descriptor.last_packet_in_frame = last_packet; @@ -342,22 +383,33 @@ void RTPSenderVideo::AddRtpHeaderExtensions( descriptor.frame_dependencies.decode_target_indications.size(), video_structure_->num_decode_targets); - // To avoid extra structure copy, temporary share ownership of the - // video_structure with the dependency descriptor. + if (first_packet) { + descriptor.active_decode_targets_bitmask = + active_decode_targets_tracker_.ActiveDecodeTargetsBitmask(); + } + // VP9 mark all layer frames of the first picture as kVideoFrameKey, + // Structure should be attached to the descriptor to lowest spatial layer + // when inter layer dependency is used, i.e. L structures; or to all + // layers when inter layer dependency is not used, i.e. S structures. + // Distinguish these two cases by checking if there are any dependencies. if (video_header.frame_type == VideoFrameType::kVideoFrameKey && - first_packet) { + video_header.generic->dependencies.empty() && first_packet) { + // To avoid extra structure copy, temporary share ownership of the + // video_structure with the dependency descriptor. descriptor.attached_structure = absl::WrapUnique(video_structure_.get()); } extension_is_set = packet->SetExtension( - *video_structure_, descriptor); + *video_structure_, + active_decode_targets_tracker_.ActiveChainsBitmask(), descriptor); // Remove the temporary shared ownership. descriptor.attached_structure.release(); } // Do not use generic frame descriptor when dependency descriptor is stored. - if (!extension_is_set) { + if (packet->IsRegistered() && + !extension_is_set) { RtpGenericFrameDescriptor generic_descriptor; generic_descriptor.SetFirstPacketInSubFrame(first_packet); generic_descriptor.SetLastPacketInSubFrame(last_packet); @@ -386,6 +438,22 @@ void RTPSenderVideo::AddRtpHeaderExtensions( generic_descriptor); } } + + if (packet->IsRegistered() && + first_packet && + send_allocation_ != SendVideoLayersAllocation::kDontSend && + (video_header.frame_type == VideoFrameType::kVideoFrameKey || + PacketWillLikelyBeRequestedForRestransmitionIfLost(video_header))) { + VideoLayersAllocation allocation = allocation_.value(); + allocation.resolution_and_frame_rate_is_valid = + send_allocation_ == SendVideoLayersAllocation::kSendWithResolution; + packet->SetExtension(allocation); + } + + if (first_packet && video_header.video_frame_tracking_id) { + packet->SetExtension( + *video_header.video_frame_tracking_id); + } } bool RTPSenderVideo::SendVideo( @@ -394,9 +462,9 @@ bool RTPSenderVideo::SendVideo( uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView payload, - const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, - absl::optional expected_retransmission_time_ms) { + absl::optional expected_retransmission_time_ms, + absl::optional estimated_capture_clock_offset_ms) { #if RTC_TRACE_EVENTS_ENABLED TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms, "Send", "type", FrameTypeToString(video_header.frame_type)); @@ -416,15 +484,34 @@ bool RTPSenderVideo::SendVideo( } MaybeUpdateCurrentPlayoutDelay(video_header); - if (video_header.frame_type == VideoFrameType::kVideoFrameKey && - !IsNoopDelay(current_playout_delay_)) { - // Force playout delay on key-frames, if set. - playout_delay_pending_ = true; + if (video_header.frame_type == VideoFrameType::kVideoFrameKey) { + if (!IsNoopDelay(current_playout_delay_)) { + // Force playout delay on key-frames, if set. + playout_delay_pending_ = true; + } + if (allocation_) { + // Send the bitrate allocation on every key frame. + send_allocation_ = SendVideoLayersAllocation::kSendWithResolution; + } } + if (video_structure_ != nullptr && video_header.generic) { + active_decode_targets_tracker_.OnFrame( + video_structure_->decode_target_protected_by_chain, + video_header.generic->active_decode_targets, + video_header.frame_type == VideoFrameType::kVideoFrameKey, + video_header.generic->frame_id, video_header.generic->chain_diffs); + } + + const uint8_t temporal_id = GetTemporalId(video_header); + // No FEC protection for upper temporal layers, if used. + const bool use_fec = fec_type_.has_value() && + (temporal_id == 0 || temporal_id == kNoTemporalIdx); + // Maximum size of packet including rtp headers. // Extra space left in case packet will be resent using fec or rtx. - int packet_capacity = rtp_sender_->MaxRtpPacketSize() - FecPacketOverhead() - + int packet_capacity = rtp_sender_->MaxRtpPacketSize() - + (use_fec ? FecPacketOverhead() : 0) - (rtp_sender_->RtxStatus() ? kRtxHeaderSize : 0); std::unique_ptr single_packet = @@ -439,8 +526,11 @@ bool RTPSenderVideo::SendVideo( AbsoluteCaptureTimeSender::GetSource(single_packet->Ssrc(), single_packet->Csrcs()), single_packet->Timestamp(), kVideoPayloadTypeFrequency, - Int64MsToUQ32x32(single_packet->capture_time_ms() + NtpOffsetMs()), - /*estimated_capture_clock_offset=*/absl::nullopt); + Int64MsToUQ32x32( + clock_->ConvertTimestampToNtpTimeInMilliseconds(capture_time_ms)), + /*estimated_capture_clock_offset=*/ + include_capture_clock_offset_ ? estimated_capture_clock_offset_ms + : absl::nullopt); auto first_packet = std::make_unique(*single_packet); auto middle_packet = std::make_unique(*single_packet); @@ -482,8 +572,8 @@ bool RTPSenderVideo::SendVideo( first_packet->HasExtension() || first_packet->HasExtension(); - // Minimization of the vp8 descriptor may erase temporal_id, so save it. - const uint8_t temporal_id = GetTemporalId(video_header); + // Minimization of the vp8 descriptor may erase temporal_id, so use + // |temporal_id| rather than reference |video_header| beyond this point. if (has_generic_descriptor) { MinimizeDescriptor(&video_header); } @@ -522,8 +612,8 @@ bool RTPSenderVideo::SendVideo( "one is required since require_frame_encryptor is set"; } - std::unique_ptr packetizer = RtpPacketizer::Create( - codec_type, payload, limits, video_header, fragmentation); + std::unique_ptr packetizer = + RtpPacketizer::Create(codec_type, payload, limits, video_header); // TODO(bugs.webrtc.org/10714): retransmission_settings_ should generally be // replaced by expected_retransmission_time_ms.has_value(). For now, though, @@ -535,16 +625,6 @@ bool RTPSenderVideo::SendVideo( : false; const size_t num_packets = packetizer->NumPackets(); - size_t unpacketized_payload_size; - if (fragmentation && fragmentation->fragmentationVectorSize > 0) { - unpacketized_payload_size = 0; - for (uint16_t i = 0; i < fragmentation->fragmentationVectorSize; ++i) { - unpacketized_payload_size += fragmentation->fragmentationLength[i]; - } - } else { - unpacketized_payload_size = payload.size(); - } - if (num_packets == 0) return false; @@ -576,33 +656,27 @@ bool RTPSenderVideo::SendVideo( if (!packetizer->NextPacket(packet.get())) return false; RTC_DCHECK_LE(packet->payload_size(), expected_payload_capacity); - if (!rtp_sender_->AssignSequenceNumber(packet.get())) - return false; packet->set_allow_retransmission(allow_retransmission); + packet->set_is_key_frame(video_header.frame_type == + VideoFrameType::kVideoFrameKey); // Put packetization finish timestamp into extension. if (packet->HasExtension()) { packet->set_packetization_finish_time_ms(clock_->TimeInMilliseconds()); } - // No FEC protection for upper temporal layers, if used. - if (fec_type_.has_value() && - (temporal_id == 0 || temporal_id == kNoTemporalIdx)) { - if (fec_generator_) { - fec_generator_->AddPacketAndGenerateFec(*packet); - } else { - // TODO(sprang): When deferred FEC generation is enabled, just mark the - // packet as protected here. - } - } + packet->set_fec_protect_packet(use_fec); if (red_enabled()) { + // TODO(sprang): Consider packetizing directly into packets with the RED + // header already in place, to avoid this copy. std::unique_ptr red_packet(new RtpPacketToSend(*packet)); BuildRedPayload(*packet, red_packet.get()); red_packet->SetPayloadType(*red_payload_type_); + red_packet->set_is_red(true); - // Send |red_packet| instead of |packet| for allocated sequence number. + // Append |red_packet| instead of |packet| to output. red_packet->set_packet_type(RtpPacketMediaType::kVideo); red_packet->set_allow_retransmission(packet->allow_retransmission()); rtp_packets.emplace_back(std::move(red_packet)); @@ -623,20 +697,12 @@ bool RTPSenderVideo::SendVideo( } } - if (fec_generator_) { - // Fetch any FEC packets generated from the media frame and add them to - // the list of packets to send. - auto fec_packets = fec_generator_->GetFecPackets(); - const bool generate_sequence_numbers = !fec_generator_->FecSsrc(); - for (auto& fec_packet : fec_packets) { - if (generate_sequence_numbers) { - rtp_sender_->AssignSequenceNumber(fec_packet.get()); - } - rtp_packets.emplace_back(std::move(fec_packet)); - } + if (!rtp_sender_->AssignSequenceNumbersAndStoreLastPacketState(rtp_packets)) { + // Media not being sent. + return false; } - LogAndSendToNetwork(std::move(rtp_packets), unpacketized_payload_size); + LogAndSendToNetwork(std::move(rtp_packets), payload.size()); // Update details about the last sent frame. last_rotation_ = video_header.rotation; @@ -650,15 +716,11 @@ bool RTPSenderVideo::SendVideo( } if (video_header.frame_type == VideoFrameType::kVideoFrameKey || - (IsBaseLayer(video_header) && - !(video_header.generic.has_value() - ? absl::c_linear_search( - video_header.generic->decode_target_indications, - DecodeTargetIndication::kDiscardable) - : false))) { - // This frame has guaranteed delivery, no need to populate playout + PacketWillLikelyBeRequestedForRestransmitionIfLost(video_header)) { + // This frame will likely be delivered, no need to populate playout // delay extensions until it changes again. playout_delay_pending_ = false; + send_allocation_ = SendVideoLayersAllocation::kDontSend; } TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms, "timestamp", @@ -671,27 +733,21 @@ bool RTPSenderVideo::SendEncodedImage( absl::optional codec_type, uint32_t rtp_timestamp, const EncodedImage& encoded_image, - const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms) { if (frame_transformer_delegate_) { // The frame will be sent async once transformed. return frame_transformer_delegate_->TransformFrame( - payload_type, codec_type, rtp_timestamp, encoded_image, fragmentation, - video_header, expected_retransmission_time_ms); + payload_type, codec_type, rtp_timestamp, encoded_image, video_header, + expected_retransmission_time_ms); } return SendVideo(payload_type, codec_type, rtp_timestamp, - encoded_image.capture_time_ms_, encoded_image, fragmentation, - video_header, expected_retransmission_time_ms); -} - -uint32_t RTPSenderVideo::VideoBitrateSent() const { - rtc::CritScope cs(&stats_crit_); - return video_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0); + encoded_image.capture_time_ms_, encoded_image, video_header, + expected_retransmission_time_ms); } uint32_t RTPSenderVideo::PacketizationOverheadBps() const { - rtc::CritScope cs(&stats_crit_); + MutexLock lock(&stats_mutex_); return packetization_overhead_bitrate_.Rate(clock_->TimeInMilliseconds()) .value_or(0); } @@ -703,7 +759,7 @@ bool RTPSenderVideo::AllowRetransmission( if (retransmission_settings == kRetransmitOff) return false; - rtc::CritScope cs(&stats_crit_); + MutexLock lock(&stats_mutex_); // Media packet storage. if ((retransmission_settings & kConditionallyRetransmitHigherLayers) && UpdateConditionalRetransmit(temporal_id, @@ -735,12 +791,7 @@ uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) { } uint8_t operator()(const absl::monostate&) { return kNoTemporalIdx; } }; - switch (header.codec) { - case kVideoCodecH264: - return header.frame_marking.temporal_id; - default: - return absl::visit(TemporalIdGetter(), header.video_type_header); - } + return absl::visit(TemporalIdGetter(), header.video_type_header); } bool RTPSenderVideo::UpdateConditionalRetransmit( @@ -791,12 +842,13 @@ bool RTPSenderVideo::UpdateConditionalRetransmit( void RTPSenderVideo::MaybeUpdateCurrentPlayoutDelay( const RTPVideoHeader& header) { - if (IsNoopDelay(header.playout_delay)) { + VideoPlayoutDelay requested_delay = + forced_playout_delay_.value_or(header.playout_delay); + + if (IsNoopDelay(requested_delay)) { return; } - PlayoutDelay requested_delay = header.playout_delay; - if (requested_delay.min_ms > PlayoutDelayLimits::kMaxMs || requested_delay.max_ms > PlayoutDelayLimits::kMaxMs) { RTC_DLOG(LS_ERROR) diff --git a/modules/rtp_rtcp/source/rtp_sender_video.h b/modules/rtp_rtcp/source/rtp_sender_video.h index bf5f181823..ba8d7e8360 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video.h +++ b/modules/rtp_rtcp/source/rtp_sender_video.h @@ -20,22 +20,24 @@ #include "api/array_view.h" #include "api/frame_transformer_interface.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_base.h" #include "api/transport/rtp/dependency_descriptor.h" #include "api/video/video_codec_type.h" #include "api/video/video_frame_type.h" -#include "modules/include/module_common_types.h" +#include "api/video/video_layers_allocation.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/absolute_capture_time_sender.h" +#include "modules/rtp_rtcp/source/active_decode_targets_helper.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" #include "modules/rtp_rtcp/source/rtp_sender.h" #include "modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" #include "modules/rtp_rtcp/source/video_fec_generator.h" -#include "rtc_base/critical_section.h" #include "rtc_base/one_time_event.h" #include "rtc_base/race_checker.h" #include "rtc_base/rate_statistics.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -69,8 +71,6 @@ class RTPSenderVideo { // expected to outlive the RTPSenderVideo object they are passed to. Clock* clock = nullptr; RTPSender* rtp_sender = nullptr; - FlexfecSender* flexfec_sender = nullptr; - VideoFecGenerator* fec_generator = nullptr; // Some FEC data is duplicated here in preparation of moving FEC to // the egress stage. absl::optional fec_type; @@ -81,6 +81,7 @@ class RTPSenderVideo { absl::optional red_payload_type; const WebRtcKeyValueConfig* field_trials = nullptr; rtc::scoped_refptr frame_transformer; + TaskQueueBase* send_transport_queue = nullptr; }; explicit RTPSenderVideo(const Config& config); @@ -88,22 +89,27 @@ class RTPSenderVideo { virtual ~RTPSenderVideo(); // expected_retransmission_time_ms.has_value() -> retransmission allowed. + // `capture_time_ms` and `clock::CurrentTime` should be using the same epoch. // Calls to this method is assumed to be externally serialized. + // |estimated_capture_clock_offset_ms| is an estimated clock offset between + // this sender and the original capturer, for this video packet. See + // http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time for more + // details. If the sender and the capture has the same clock, it is supposed + // to be zero valued, which is given as the default. bool SendVideo(int payload_type, absl::optional codec_type, uint32_t rtp_timestamp, int64_t capture_time_ms, rtc::ArrayView payload, - const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, - absl::optional expected_retransmission_time_ms); + absl::optional expected_retransmission_time_ms, + absl::optional estimated_capture_clock_offset_ms = 0); bool SendEncodedImage( int payload_type, absl::optional codec_type, uint32_t rtp_timestamp, const EncodedImage& encoded_image, - const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms); @@ -113,14 +119,27 @@ class RTPSenderVideo { // All calls to SendVideo after this call must use video_header compatible // with the video_structure. void SetVideoStructure(const FrameDependencyStructure* video_structure); - void SetVideoStructureUnderLock( + // Should only be used by a RTPSenderVideoFrameTransformerDelegate and exists + // to ensure correct syncronization. + void SetVideoStructureAfterTransformation( const FrameDependencyStructure* video_structure); - uint32_t VideoBitrateSent() const; + // Sets current active VideoLayersAllocation. The allocation will be sent + // using the rtp video layers allocation extension. The allocation will be + // sent in full on every key frame. The allocation will be sent once on a + // none discardable delta frame per call to this method and will not contain + // resolution and frame rate. + void SetVideoLayersAllocation(VideoLayersAllocation allocation); + // Should only be used by a RTPSenderVideoFrameTransformerDelegate and exists + // to ensure correct syncronization. + void SetVideoLayersAllocationAfterTransformation( + VideoLayersAllocation allocation); // Returns the current packetization overhead rate, in bps. Note that this is // the payload overhead, eg the VP8 payload headers, not the RTP headers // or extension/ + // TODO(sprang): Consider moving this to RtpSenderEgress so it's in the same + // place as the other rate stats. uint32_t PacketizationOverheadBps() const; protected: @@ -141,6 +160,16 @@ class RTPSenderVideo { int64_t last_frame_time_ms; }; + enum class SendVideoLayersAllocation { + kSendWithResolution, + kSendWithoutResolution, + kDontSend + }; + + void SetVideoStructureInternal( + const FrameDependencyStructure* video_structure); + void SetVideoLayersAllocationInternal(VideoLayersAllocation allocation); + void AddRtpHeaderExtensions( const RTPVideoHeader& video_header, const absl::optional& absolute_capture_time, @@ -159,7 +188,7 @@ class RTPSenderVideo { bool UpdateConditionalRetransmit(uint8_t temporal_id, int64_t expected_retransmission_time_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(stats_crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(stats_mutex_); void MaybeUpdateCurrentPlayoutDelay(const RTPVideoHeader& header) RTC_EXCLUSIVE_LOCKS_REQUIRED(send_checker_); @@ -177,28 +206,32 @@ class RTPSenderVideo { bool transmit_color_space_next_frame_ RTC_GUARDED_BY(send_checker_); std::unique_ptr video_structure_ RTC_GUARDED_BY(send_checker_); + absl::optional allocation_ + RTC_GUARDED_BY(send_checker_); + // Flag indicating if we should send |allocation_|. + SendVideoLayersAllocation send_allocation_ RTC_GUARDED_BY(send_checker_); // Current target playout delay. - PlayoutDelay current_playout_delay_ RTC_GUARDED_BY(send_checker_); - // Flag indicating if we need to propagate |current_playout_delay_| in order + VideoPlayoutDelay current_playout_delay_ RTC_GUARDED_BY(send_checker_); + // Flag indicating if we need to send |current_playout_delay_| in order // to guarantee it gets delivered. bool playout_delay_pending_; + // Set by the field trial WebRTC-ForceSendPlayoutDelay to override the playout + // delay of outgoing video frames. + const absl::optional forced_playout_delay_; // Should never be held when calling out of this class. - rtc::CriticalSection crit_; + Mutex mutex_; const absl::optional red_payload_type_; - VideoFecGenerator* const fec_generator_; absl::optional fec_type_; const size_t fec_overhead_bytes_; // Per packet max FEC overhead. - rtc::CriticalSection stats_crit_; - // Bitrate used for video payload and RTP headers. - RateStatistics video_bitrate_ RTC_GUARDED_BY(stats_crit_); - RateStatistics packetization_overhead_bitrate_ RTC_GUARDED_BY(stats_crit_); + mutable Mutex stats_mutex_; + RateStatistics packetization_overhead_bitrate_ RTC_GUARDED_BY(stats_mutex_); std::map frame_stats_by_temporal_layer_ - RTC_GUARDED_BY(stats_crit_); + RTC_GUARDED_BY(stats_mutex_); OneTimeEvent first_frame_sent_; @@ -212,9 +245,14 @@ class RTPSenderVideo { const bool generic_descriptor_auth_experiment_; AbsoluteCaptureTimeSender absolute_capture_time_sender_; + // Tracks updates to the active decode targets and decides when active decode + // targets bitmask should be attached to the dependency descriptor. + ActiveDecodeTargetsHelper active_decode_targets_tracker_; const rtc::scoped_refptr frame_transformer_delegate_; + + const bool include_capture_clock_offset_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc index 60740d3681..23e66bf757 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.cc @@ -21,15 +21,6 @@ namespace webrtc { namespace { -std::unique_ptr CreateFragmentationHeader( - const RTPFragmentationHeader* fragmentation_header) { - if (!fragmentation_header) - return nullptr; - auto ret = std::make_unique(); - ret->CopyFrom(*fragmentation_header); - return ret; -} - class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { public: TransformableVideoSenderFrame( @@ -38,7 +29,6 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { int payload_type, absl::optional codec_type, uint32_t rtp_timestamp, - const RTPFragmentationHeader* fragmentation_header, absl::optional expected_retransmission_time_ms, uint32_t ssrc) : encoded_data_(encoded_image.GetEncodedData()), @@ -50,9 +40,7 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { timestamp_(rtp_timestamp), capture_time_ms_(encoded_image.capture_time_ms_), expected_retransmission_time_ms_(expected_retransmission_time_ms), - ssrc_(ssrc), - fragmentation_header_(CreateFragmentationHeader(fragmentation_header)) { - } + ssrc_(ssrc) {} ~TransformableVideoSenderFrame() override = default; @@ -83,10 +71,6 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { absl::optional GetCodecType() const { return codec_type_; } int64_t GetCaptureTimeMs() const { return capture_time_ms_; } - RTPFragmentationHeader* GetFragmentationHeader() const { - return fragmentation_header_.get(); - } - const absl::optional& GetExpectedRetransmissionTimeMs() const { return expected_retransmission_time_ms_; } @@ -102,17 +86,18 @@ class TransformableVideoSenderFrame : public TransformableVideoFrameInterface { const int64_t capture_time_ms_; const absl::optional expected_retransmission_time_ms_; const uint32_t ssrc_; - const std::unique_ptr fragmentation_header_; }; } // namespace RTPSenderVideoFrameTransformerDelegate::RTPSenderVideoFrameTransformerDelegate( RTPSenderVideo* sender, rtc::scoped_refptr frame_transformer, - uint32_t ssrc) + uint32_t ssrc, + TaskQueueBase* send_transport_queue) : sender_(sender), frame_transformer_(std::move(frame_transformer)), - ssrc_(ssrc) {} + ssrc_(ssrc), + send_transport_queue_(send_transport_queue) {} void RTPSenderVideoFrameTransformerDelegate::Init() { frame_transformer_->RegisterTransformedFrameSinkCallback( @@ -124,24 +109,30 @@ bool RTPSenderVideoFrameTransformerDelegate::TransformFrame( absl::optional codec_type, uint32_t rtp_timestamp, const EncodedImage& encoded_image, - const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms) { - if (!encoder_queue_) - encoder_queue_ = TaskQueueBase::Current(); + if (!encoder_queue_) { + // Save the current task queue to post the transformed frame for sending + // once it is transformed. When there is no current task queue, i.e. + // encoding is done on an external thread (for example in the case of + // hardware encoders), use the send transport queue instead. + TaskQueueBase* current = TaskQueueBase::Current(); + encoder_queue_ = current ? current : send_transport_queue_; + } frame_transformer_->Transform(std::make_unique( encoded_image, video_header, payload_type, codec_type, rtp_timestamp, - fragmentation, expected_retransmission_time_ms, ssrc_)); + expected_retransmission_time_ms, ssrc_)); return true; } void RTPSenderVideoFrameTransformerDelegate::OnTransformedFrame( std::unique_ptr frame) { - rtc::CritScope lock(&sender_lock_); + MutexLock lock(&sender_lock_); - // The encoder queue gets destroyed after the sender; as long as the sender is - // alive, it's safe to post. - if (!sender_) + // The encoder queue normally gets destroyed after the sender; + // however, it might still be null by the time a previously queued frame + // arrives. + if (!sender_ || !encoder_queue_) return; rtc::scoped_refptr delegate = this; encoder_queue_->PostTask(ToQueuedTask( @@ -153,7 +144,7 @@ void RTPSenderVideoFrameTransformerDelegate::OnTransformedFrame( void RTPSenderVideoFrameTransformerDelegate::SendVideo( std::unique_ptr transformed_frame) const { RTC_CHECK(encoder_queue_->IsCurrent()); - rtc::CritScope lock(&sender_lock_); + MutexLock lock(&sender_lock_); if (!sender_) return; auto* transformed_video_frame = @@ -164,23 +155,29 @@ void RTPSenderVideoFrameTransformerDelegate::SendVideo( transformed_video_frame->GetTimestamp(), transformed_video_frame->GetCaptureTimeMs(), transformed_video_frame->GetData(), - transformed_video_frame->GetFragmentationHeader(), transformed_video_frame->GetHeader(), transformed_video_frame->GetExpectedRetransmissionTimeMs()); } void RTPSenderVideoFrameTransformerDelegate::SetVideoStructureUnderLock( const FrameDependencyStructure* video_structure) { - rtc::CritScope lock(&sender_lock_); + MutexLock lock(&sender_lock_); + RTC_CHECK(sender_); + sender_->SetVideoStructureAfterTransformation(video_structure); +} + +void RTPSenderVideoFrameTransformerDelegate::SetVideoLayersAllocationUnderLock( + VideoLayersAllocation allocation) { + MutexLock lock(&sender_lock_); RTC_CHECK(sender_); - sender_->SetVideoStructureUnderLock(video_structure); + sender_->SetVideoLayersAllocationAfterTransformation(std::move(allocation)); } void RTPSenderVideoFrameTransformerDelegate::Reset() { frame_transformer_->UnregisterTransformedFrameSinkCallback(ssrc_); frame_transformer_ = nullptr; { - rtc::CritScope lock(&sender_lock_); + MutexLock lock(&sender_lock_); sender_ = nullptr; } } diff --git a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h index 29ac9e4e1c..8573869296 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h +++ b/modules/rtp_rtcp/source/rtp_sender_video_frame_transformer_delegate.h @@ -16,7 +16,8 @@ #include "api/frame_transformer_interface.h" #include "api/scoped_refptr.h" #include "api/task_queue/task_queue_base.h" -#include "rtc_base/critical_section.h" +#include "api/video/video_layers_allocation.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -30,7 +31,8 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback { RTPSenderVideoFrameTransformerDelegate( RTPSenderVideo* sender, rtc::scoped_refptr frame_transformer, - uint32_t ssrc); + uint32_t ssrc, + TaskQueueBase* send_transport_queue); void Init(); @@ -39,7 +41,6 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback { absl::optional codec_type, uint32_t rtp_timestamp, const EncodedImage& encoded_image, - const RTPFragmentationHeader* fragmentation, RTPVideoHeader video_header, absl::optional expected_retransmission_time_ms); @@ -51,10 +52,16 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback { // Delegates the call to RTPSendVideo::SendVideo on the |encoder_queue_|. void SendVideo(std::unique_ptr frame) const; - // Delegates the call to RTPSendVideo::SendVideo under |sender_lock_|. + // Delegates the call to RTPSendVideo::SetVideoStructureAfterTransformation + // under |sender_lock_|. void SetVideoStructureUnderLock( const FrameDependencyStructure* video_structure); + // Delegates the call to + // RTPSendVideo::SetVideoLayersAllocationAfterTransformation under + // |sender_lock_|. + void SetVideoLayersAllocationUnderLock(VideoLayersAllocation allocation); + // Unregisters and releases the |frame_transformer_| reference, and resets // |sender_| under lock. Called from RTPSenderVideo destructor to prevent the // |sender_| to dangle. @@ -64,11 +71,12 @@ class RTPSenderVideoFrameTransformerDelegate : public TransformedFrameCallback { ~RTPSenderVideoFrameTransformerDelegate() override = default; private: - rtc::CriticalSection sender_lock_; + mutable Mutex sender_lock_; RTPSenderVideo* sender_ RTC_GUARDED_BY(sender_lock_); rtc::scoped_refptr frame_transformer_; const uint32_t ssrc_; TaskQueueBase* encoder_queue_ = nullptr; + TaskQueueBase* send_transport_queue_; }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc index 80481dc2e5..ea727828cc 100644 --- a/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc +++ b/modules/rtp_rtcp/source/rtp_sender_video_unittest.cc @@ -24,7 +24,6 @@ #include "common_video/generic_frame_descriptor/generic_frame_info.h" #include "modules/rtp_rtcp/include/rtp_cvo.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h" @@ -33,7 +32,8 @@ #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/rtp_rtcp/source/time_util.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" #include "rtc_base/arraysize.h" #include "rtc_base/rate_limiter.h" #include "rtc_base/task_queue_for_test.h" @@ -59,7 +59,6 @@ using ::testing::WithArgs; enum : int { // The first valid value is 1. kAbsoluteSendTimeExtensionId = 1, - kFrameMarkingExtensionId, kGenericDescriptorId, kDependencyDescriptorId, kTransmissionTimeOffsetExtensionId, @@ -67,7 +66,8 @@ enum : int { // The first valid value is 1. kVideoRotationExtensionId, kVideoTimingExtensionId, kAbsoluteCaptureTimeExtensionId, - kPlayoutDelayExtensionId + kPlayoutDelayExtensionId, + kVideoLayersAllocationExtensionId, }; constexpr int kPayload = 100; @@ -95,12 +95,12 @@ class LoopbackTransportTest : public webrtc::Transport { kGenericDescriptorId); receivers_extensions_.Register( kDependencyDescriptorId); - receivers_extensions_.Register( - kFrameMarkingExtensionId); receivers_extensions_.Register( kAbsoluteCaptureTimeExtensionId); receivers_extensions_.Register( kPlayoutDelayExtensionId); + receivers_extensions_.Register( + kVideoLayersAllocationExtensionId); } bool SendRtp(const uint8_t* data, @@ -126,13 +126,11 @@ class TestRtpSenderVideo : public RTPSenderVideo { public: TestRtpSenderVideo(Clock* clock, RTPSender* rtp_sender, - FlexfecSender* flexfec_sender, const WebRtcKeyValueConfig& field_trials) : RTPSenderVideo([&] { Config config; config.clock = clock; config.rtp_sender = rtp_sender; - config.fec_generator = flexfec_sender; config.field_trials = &field_trials; return config; }()) {} @@ -150,17 +148,25 @@ class TestRtpSenderVideo : public RTPSenderVideo { class FieldTrials : public WebRtcKeyValueConfig { public: explicit FieldTrials(bool use_send_side_bwe_with_overhead) - : use_send_side_bwe_with_overhead_(use_send_side_bwe_with_overhead) {} + : use_send_side_bwe_with_overhead_(use_send_side_bwe_with_overhead), + include_capture_clock_offset_(false) {} + + void set_include_capture_clock_offset(bool include_capture_clock_offset) { + include_capture_clock_offset_ = include_capture_clock_offset; + } std::string Lookup(absl::string_view key) const override { - return key == "WebRTC-SendSideBwe-WithOverhead" && - use_send_side_bwe_with_overhead_ - ? "Enabled" - : ""; + if (key == "WebRTC-SendSideBwe-WithOverhead") { + return use_send_side_bwe_with_overhead_ ? "Enabled" : ""; + } else if (key == "WebRTC-IncludeCaptureClockOffset") { + return include_capture_clock_offset_ ? "Enabled" : ""; + } + return ""; } private: bool use_send_side_bwe_with_overhead_; + bool include_capture_clock_offset_; }; class RtpSenderVideoTest : public ::testing::TestWithParam { @@ -169,8 +175,8 @@ class RtpSenderVideoTest : public ::testing::TestWithParam { : field_trials_(GetParam()), fake_clock_(kStartTime), retransmission_rate_limiter_(&fake_clock_, 1000), - rtp_module_(RtpRtcp::Create([&] { - RtpRtcp::Configuration config; + rtp_module_(ModuleRtpRtcpImpl2::Create([&] { + RtpRtcpInterface::Configuration config; config.clock = &fake_clock_; config.outgoing_transport = &transport_; config.retransmission_rate_limiter = &retransmission_rate_limiter_; @@ -178,10 +184,10 @@ class RtpSenderVideoTest : public ::testing::TestWithParam { config.local_media_ssrc = kSsrc; return config; }())), - rtp_sender_video_(&fake_clock_, - rtp_module_->RtpSender(), - nullptr, - field_trials_) { + rtp_sender_video_( + std::make_unique(&fake_clock_, + rtp_module_->RtpSender(), + field_trials_)) { rtp_module_->SetSequenceNumber(kSeqNum); rtp_module_->SetStartTimestamp(0); } @@ -190,13 +196,13 @@ class RtpSenderVideoTest : public ::testing::TestWithParam { int version); protected: - const RtpRtcp::Configuration config_; + const RtpRtcpInterface::Configuration config_; FieldTrials field_trials_; SimulatedClock fake_clock_; LoopbackTransportTest transport_; RateLimiter retransmission_rate_limiter_; - std::unique_ptr rtp_module_; - TestRtpSenderVideo rtp_sender_video_; + std::unique_ptr rtp_module_; + std::unique_ptr rtp_sender_video_; }; TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) { @@ -207,8 +213,8 @@ TEST_P(RtpSenderVideoTest, KeyFrameHasCVO) { RTPVideoHeader hdr; hdr.rotation = kVideoRotation_0; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); VideoRotation rotation; EXPECT_TRUE( @@ -233,9 +239,9 @@ TEST_P(RtpSenderVideoTest, TimingFrameHasPacketizationTimstampSet) { fake_clock_.AdvanceTimeMilliseconds(kPacketizationTimeMs); hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp, - kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, kCaptureTimestamp, + kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); VideoSendTiming timing; EXPECT_TRUE(transport_.last_sent_packet().GetExtension( &timing)); @@ -252,15 +258,15 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenChanged) { RTPVideoHeader hdr; hdr.rotation = kVideoRotation_90; hdr.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video_.SendVideo( - kPayload, kType, kTimestamp, 0, kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs)); + EXPECT_TRUE( + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs)); hdr.rotation = kVideoRotation_0; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - EXPECT_TRUE(rtp_sender_video_.SendVideo( - kPayload, kType, kTimestamp + 1, 0, kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs)); + EXPECT_TRUE( + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, + hdr, kDefaultExpectedRetransmissionTimeMs)); VideoRotation rotation; EXPECT_TRUE( @@ -276,14 +282,14 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) { RTPVideoHeader hdr; hdr.rotation = kVideoRotation_90; hdr.frame_type = VideoFrameType::kVideoFrameKey; - EXPECT_TRUE(rtp_sender_video_.SendVideo( - kPayload, kType, kTimestamp, 0, kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs)); + EXPECT_TRUE( + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs)); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - EXPECT_TRUE(rtp_sender_video_.SendVideo( - kPayload, kType, kTimestamp + 1, 0, kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs)); + EXPECT_TRUE( + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, + hdr, kDefaultExpectedRetransmissionTimeMs)); VideoRotation rotation; EXPECT_TRUE( @@ -291,43 +297,6 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) { EXPECT_EQ(kVideoRotation_90, rotation); } -TEST_P(RtpSenderVideoTest, CheckH264FrameMarking) { - uint8_t kFrame[kMaxPacketLength]; - rtp_module_->RegisterRtpHeaderExtension(FrameMarkingExtension::kUri, - kFrameMarkingExtensionId); - - RTPFragmentationHeader frag; - frag.VerifyAndAllocateFragmentationHeader(1); - frag.fragmentationOffset[0] = 0; - frag.fragmentationLength[0] = sizeof(kFrame); - - RTPVideoHeader hdr; - hdr.video_type_header.emplace().packetization_mode = - H264PacketizationMode::NonInterleaved; - hdr.codec = kVideoCodecH264; - hdr.frame_marking.temporal_id = kNoTemporalIdx; - hdr.frame_marking.tl0_pic_idx = 99; - hdr.frame_marking.base_layer_sync = true; - hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, &frag, - hdr, kDefaultExpectedRetransmissionTimeMs); - - FrameMarking fm; - EXPECT_FALSE( - transport_.last_sent_packet().GetExtension(&fm)); - - hdr.frame_marking.temporal_id = 0; - hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, &frag, - hdr, kDefaultExpectedRetransmissionTimeMs); - - EXPECT_TRUE( - transport_.last_sent_packet().GetExtension(&fm)); - EXPECT_EQ(hdr.frame_marking.temporal_id, fm.temporal_id); - EXPECT_EQ(hdr.frame_marking.tl0_pic_idx, fm.tl0_pic_idx); - EXPECT_EQ(hdr.frame_marking.base_layer_sync, fm.base_layer_sync); -} - // Make sure rotation is parsed correctly when the Camera (C) and Flip (F) bits // are set in the CVO byte. TEST_P(RtpSenderVideoTest, SendVideoWithCameraAndFlipCVO) { @@ -353,13 +322,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesGeneric) { RTPVideoHeader header; header.codec = kVideoCodecGeneric; - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kConditionallyRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); } @@ -369,25 +338,16 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) { header.video_type_header.emplace().packetization_mode = H264PacketizationMode::NonInterleaved; header.codec = kVideoCodecH264; - header.frame_marking.temporal_id = kNoTemporalIdx; - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kConditionallyRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - - // Test higher level retransmit. - for (int tid = 0; tid <= kMaxTemporalStreams; ++tid) { - header.frame_marking.temporal_id = tid; - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( - header, kRetransmitHigherLayers | kRetransmitBaseLayer, - kDefaultExpectedRetransmissionTimeMs)); - } } TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) { @@ -396,19 +356,19 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) { auto& vp8_header = header.video_type_header.emplace(); vp8_header.temporalIdx = 0; - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers | kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kConditionallyRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitBaseLayer | kConditionallyRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); } @@ -421,13 +381,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) { for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) { vp8_header.temporalIdx = tid; - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers | kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); } @@ -441,13 +401,13 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP9) { for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) { vp9_header.temporal_idx = tid; - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_FALSE(rtp_sender_video_.AllowRetransmission( + EXPECT_FALSE(rtp_sender_video_->AllowRetransmission( header, kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers, kDefaultExpectedRetransmissionTimeMs)); - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission( + EXPECT_TRUE(rtp_sender_video_->AllowRetransmission( header, kRetransmitHigherLayers | kRetransmitBaseLayer, kDefaultExpectedRetransmissionTimeMs)); } @@ -471,7 +431,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) { auto& vp8_header = header.video_type_header.emplace(); for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) { vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)]; - rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs); + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); } @@ -481,31 +441,34 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) { // will not be retransmitted. vp8_header.temporalIdx = 1; EXPECT_FALSE( - rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); EXPECT_FALSE( - rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); // The TL0 frame did not arrive. So allow retransmission. - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + EXPECT_TRUE( + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); // Insert a frame for TL2. We just had frame in TL1, so the next one there is // in three frames away. TL0 is still too far in the past. So, allow // retransmission. vp8_header.temporalIdx = 2; - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + EXPECT_TRUE( + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); // Another TL2, next in TL1 is two frames away. Allow again. - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + EXPECT_TRUE( + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); // Yet another TL2, next in TL1 is now only one frame away, so don't store // for retransmission. EXPECT_FALSE( - rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); } TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) { @@ -527,7 +490,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) { for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) { vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)]; - rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs); + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs); fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs); } @@ -537,7 +500,8 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) { // layer, but that last frame in TL1 was a long time ago in absolute terms, // so allow retransmission anyway. vp8_header.temporalIdx = 1; - EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(header, kSettings, kRttMs)); + EXPECT_TRUE( + rtp_sender_video_->AllowRetransmission(header, kSettings, kRttMs)); } TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { @@ -548,11 +512,11 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { FrameDependencyStructure video_structure; video_structure.num_decode_targets = 2; video_structure.templates = { - GenericFrameInfo::Builder().S(0).T(0).Dtis("SS").Build(), - GenericFrameInfo::Builder().S(1).T(0).Dtis("-S").Build(), - GenericFrameInfo::Builder().S(1).T(1).Dtis("-D").Build(), + FrameDependencyTemplate().S(0).T(0).Dtis("SS"), + FrameDependencyTemplate().S(1).T(0).Dtis("-S"), + FrameDependencyTemplate().S(1).T(1).Dtis("-D"), }; - rtp_sender_video_.SetVideoStructure(&video_structure); + rtp_sender_video_->SetVideoStructure(&video_structure); // Send key frame. RTPVideoHeader hdr; @@ -563,8 +527,8 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { generic.decode_target_indications = {DecodeTargetIndication::kSwitch, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key; @@ -589,8 +553,8 @@ TEST_P(RtpSenderVideoTest, SendsDependencyDescriptorWhenVideoStructureIsSet) { generic.decode_target_indications = {DecodeTargetIndication::kNotPresent, DecodeTargetIndication::kRequired}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); EXPECT_EQ(transport_.packets_sent(), 2); DependencyDescriptor descriptor_delta; @@ -616,12 +580,11 @@ TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) { FrameDependencyStructure video_structure; video_structure.num_decode_targets = 2; video_structure.num_chains = 1; - // First decode target is protected by the only chain, second one - is not. - video_structure.decode_target_protected_by_chain = {0, 1}; + video_structure.decode_target_protected_by_chain = {0, 0}; video_structure.templates = { - GenericFrameInfo::Builder().S(0).T(0).Dtis("SS").ChainDiffs({1}).Build(), + FrameDependencyTemplate().S(0).T(0).Dtis("SS").ChainDiffs({1}), }; - rtp_sender_video_.SetVideoStructure(&video_structure); + rtp_sender_video_->SetVideoStructure(&video_structure); RTPVideoHeader hdr; RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace(); @@ -630,8 +593,8 @@ TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) { DecodeTargetIndication::kSwitch}; generic.chain_diffs = {2}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key; @@ -642,6 +605,40 @@ TEST_P(RtpSenderVideoTest, PropagatesChainDiffsIntoDependencyDescriptor) { ContainerEq(generic.chain_diffs)); } +TEST_P(RtpSenderVideoTest, + PropagatesActiveDecodeTargetsIntoDependencyDescriptor) { + const int64_t kFrameId = 100000; + uint8_t kFrame[100]; + rtp_module_->RegisterRtpHeaderExtension( + RtpDependencyDescriptorExtension::kUri, kDependencyDescriptorId); + FrameDependencyStructure video_structure; + video_structure.num_decode_targets = 2; + video_structure.num_chains = 1; + video_structure.decode_target_protected_by_chain = {0, 0}; + video_structure.templates = { + FrameDependencyTemplate().S(0).T(0).Dtis("SS").ChainDiffs({1}), + }; + rtp_sender_video_->SetVideoStructure(&video_structure); + + RTPVideoHeader hdr; + RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace(); + generic.frame_id = kFrameId; + generic.decode_target_indications = {DecodeTargetIndication::kSwitch, + DecodeTargetIndication::kSwitch}; + generic.active_decode_targets = 0b01; + generic.chain_diffs = {1}; + hdr.frame_type = VideoFrameType::kVideoFrameKey; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + + ASSERT_EQ(transport_.packets_sent(), 1); + DependencyDescriptor descriptor_key; + ASSERT_TRUE(transport_.last_sent_packet() + .GetExtension( + nullptr, &descriptor_key)); + EXPECT_EQ(descriptor_key.active_decode_targets_bitmask, 0b01u); +} + TEST_P(RtpSenderVideoTest, SetDiffentVideoStructureAvoidsCollisionWithThePreviousStructure) { const int64_t kFrameId = 100000; @@ -651,14 +648,14 @@ TEST_P(RtpSenderVideoTest, FrameDependencyStructure video_structure1; video_structure1.num_decode_targets = 2; video_structure1.templates = { - GenericFrameInfo::Builder().S(0).T(0).Dtis("SS").Build(), - GenericFrameInfo::Builder().S(0).T(1).Dtis("D-").Build(), + FrameDependencyTemplate().S(0).T(0).Dtis("SS"), + FrameDependencyTemplate().S(0).T(1).Dtis("D-"), }; FrameDependencyStructure video_structure2; video_structure2.num_decode_targets = 2; video_structure2.templates = { - GenericFrameInfo::Builder().S(0).T(0).Dtis("SS").Build(), - GenericFrameInfo::Builder().S(0).T(1).Dtis("R-").Build(), + FrameDependencyTemplate().S(0).T(0).Dtis("SS"), + FrameDependencyTemplate().S(0).T(1).Dtis("R-"), }; // Send 1st key frame. @@ -668,9 +665,9 @@ TEST_P(RtpSenderVideoTest, generic.decode_target_indications = {DecodeTargetIndication::kSwitch, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SetVideoStructure(&video_structure1); - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SetVideoStructure(&video_structure1); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); // Parse 1st extension. ASSERT_EQ(transport_.packets_sent(), 1); DependencyDescriptor descriptor_key1; @@ -685,8 +682,8 @@ TEST_P(RtpSenderVideoTest, generic.decode_target_indications = {DecodeTargetIndication::kDiscardable, DecodeTargetIndication::kNotPresent}; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); ASSERT_EQ(transport_.packets_sent(), 2); RtpPacket delta_packet = transport_.last_sent_packet(); @@ -696,9 +693,9 @@ TEST_P(RtpSenderVideoTest, generic.decode_target_indications = {DecodeTargetIndication::kSwitch, DecodeTargetIndication::kSwitch}; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SetVideoStructure(&video_structure2); - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SetVideoStructure(&video_structure2); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); // Parse the 2nd key frame. ASSERT_EQ(transport_.packets_sent(), 3); DependencyDescriptor descriptor_key2; @@ -741,7 +738,7 @@ TEST_P(RtpSenderVideoTest, FrameDependencyStructure video_structure; video_structure.num_decode_targets = 1; - video_structure.templates = {GenericFrameInfo::Builder().Dtis("S").Build()}; + video_structure.templates = {FrameDependencyTemplate().Dtis("S")}; rtp_sender_video.SetVideoStructure(&video_structure); // Send key frame. @@ -752,8 +749,8 @@ TEST_P(RtpSenderVideoTest, EXPECT_CALL(*encryptor, Encrypt(_, _, Not(IsEmpty()), ElementsAreArray(kFrame), _, _)); - rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); // Double check packet with the dependency descriptor is sent. ASSERT_EQ(transport_.packets_sent(), 1); EXPECT_TRUE(transport_.last_sent_packet() @@ -774,8 +771,8 @@ TEST_P(RtpSenderVideoTest, PopulateGenericFrameDescriptor) { generic.dependencies.push_back(kFrameId - 1); generic.dependencies.push_back(kFrameId - 500); hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); RtpGenericFrameDescriptor descriptor_wire; EXPECT_EQ(1, transport_.packets_sent()); @@ -808,9 +805,9 @@ void RtpSenderVideoTest:: RTPVideoHeader::GenericDescriptorInfo& generic = hdr.generic.emplace(); generic.frame_id = kFrameId; hdr.frame_type = VideoFrameType::kVideoFrameDelta; - rtp_sender_video_.SendVideo(kPayload, VideoCodecType::kVideoCodecVP8, - kTimestamp, 0, kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, VideoCodecType::kVideoCodecVP8, + kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); ASSERT_EQ(transport_.packets_sent(), 1); // Expect only minimal 1-byte vp8 descriptor was generated. @@ -827,6 +824,214 @@ TEST_P(RtpSenderVideoTest, UsesMinimalVp8DescriptorWhenGenericFrameDescriptorExtensionIsUsed(1); } +TEST_P(RtpSenderVideoTest, VideoLayersAllocationWithResolutionSentOnKeyFrames) { + const size_t kFrameSize = 100; + uint8_t kFrame[kFrameSize]; + rtp_module_->RegisterRtpHeaderExtension( + RtpVideoLayersAllocationExtension::kUri, + kVideoLayersAllocationExtensionId); + + VideoLayersAllocation allocation; + VideoLayersAllocation::SpatialLayer layer; + layer.width = 360; + layer.height = 180; + layer.target_bitrate_per_temporal_layer.push_back( + DataRate::KilobitsPerSec(50)); + allocation.resolution_and_frame_rate_is_valid = true; + allocation.active_spatial_layers.push_back(layer); + rtp_sender_video_->SetVideoLayersAllocation(allocation); + + RTPVideoHeader hdr; + hdr.frame_type = VideoFrameType::kVideoFrameKey; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + + VideoLayersAllocation sent_allocation; + EXPECT_TRUE( + transport_.last_sent_packet() + .GetExtension(&sent_allocation)); + EXPECT_THAT(sent_allocation.active_spatial_layers, ElementsAre(layer)); + + // Next key frame also have the allocation. + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_TRUE( + transport_.last_sent_packet() + .GetExtension(&sent_allocation)); +} + +TEST_P(RtpSenderVideoTest, + VideoLayersAllocationWithoutResolutionSentOnDeltaWhenUpdated) { + const size_t kFrameSize = 100; + uint8_t kFrame[kFrameSize]; + rtp_module_->RegisterRtpHeaderExtension( + RtpVideoLayersAllocationExtension::kUri, + kVideoLayersAllocationExtensionId); + + VideoLayersAllocation allocation; + VideoLayersAllocation::SpatialLayer layer; + layer.width = 360; + layer.height = 180; + allocation.resolution_and_frame_rate_is_valid = true; + layer.target_bitrate_per_temporal_layer.push_back( + DataRate::KilobitsPerSec(50)); + allocation.active_spatial_layers.push_back(layer); + rtp_sender_video_->SetVideoLayersAllocation(allocation); + + RTPVideoHeader hdr; + hdr.frame_type = VideoFrameType::kVideoFrameKey; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_TRUE(transport_.last_sent_packet() + .HasExtension()); + + // No allocation sent on delta frame unless it has been updated. + hdr.frame_type = VideoFrameType::kVideoFrameDelta; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_FALSE(transport_.last_sent_packet() + .HasExtension()); + + // Update the allocation. + rtp_sender_video_->SetVideoLayersAllocation(allocation); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + + VideoLayersAllocation sent_allocation; + EXPECT_TRUE( + transport_.last_sent_packet() + .GetExtension(&sent_allocation)); + ASSERT_THAT(sent_allocation.active_spatial_layers, SizeIs(1)); + EXPECT_FALSE(sent_allocation.resolution_and_frame_rate_is_valid); + EXPECT_THAT(sent_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(1)); +} + +TEST_P(RtpSenderVideoTest, + VideoLayersAllocationWithResolutionSentOnDeltaWhenSpatialLayerAdded) { + const size_t kFrameSize = 100; + uint8_t kFrame[kFrameSize]; + rtp_module_->RegisterRtpHeaderExtension( + RtpVideoLayersAllocationExtension::kUri, + kVideoLayersAllocationExtensionId); + + VideoLayersAllocation allocation; + allocation.resolution_and_frame_rate_is_valid = true; + VideoLayersAllocation::SpatialLayer layer; + layer.width = 360; + layer.height = 180; + layer.spatial_id = 0; + layer.target_bitrate_per_temporal_layer.push_back( + DataRate::KilobitsPerSec(50)); + allocation.active_spatial_layers.push_back(layer); + rtp_sender_video_->SetVideoLayersAllocation(allocation); + + RTPVideoHeader hdr; + hdr.frame_type = VideoFrameType::kVideoFrameKey; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + ASSERT_TRUE(transport_.last_sent_packet() + .HasExtension()); + + // Update the allocation. + layer.width = 640; + layer.height = 320; + layer.spatial_id = 1; + layer.target_bitrate_per_temporal_layer.push_back( + DataRate::KilobitsPerSec(100)); + allocation.active_spatial_layers.push_back(layer); + rtp_sender_video_->SetVideoLayersAllocation(allocation); + hdr.frame_type = VideoFrameType::kVideoFrameDelta; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + + VideoLayersAllocation sent_allocation; + EXPECT_TRUE( + transport_.last_sent_packet() + .GetExtension(&sent_allocation)); + EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_TRUE(sent_allocation.resolution_and_frame_rate_is_valid); +} + +TEST_P(RtpSenderVideoTest, VideoLayersAllocationSentOnDeltaFramesOnlyOnUpdate) { + const size_t kFrameSize = 100; + uint8_t kFrame[kFrameSize]; + rtp_module_->RegisterRtpHeaderExtension( + RtpVideoLayersAllocationExtension::kUri, + kVideoLayersAllocationExtensionId); + + VideoLayersAllocation allocation; + VideoLayersAllocation::SpatialLayer layer; + layer.width = 360; + layer.height = 180; + layer.target_bitrate_per_temporal_layer.push_back( + DataRate::KilobitsPerSec(50)); + allocation.active_spatial_layers.push_back(layer); + rtp_sender_video_->SetVideoLayersAllocation(allocation); + + RTPVideoHeader hdr; + hdr.frame_type = VideoFrameType::kVideoFrameDelta; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + + VideoLayersAllocation sent_allocation; + EXPECT_TRUE( + transport_.last_sent_packet() + .GetExtension(&sent_allocation)); + EXPECT_THAT(sent_allocation.active_spatial_layers, SizeIs(1)); + + // VideoLayersAllocation not sent on the next delta frame. + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_FALSE(transport_.last_sent_packet() + .HasExtension()); + + // Update allocation. VideoLayesAllocation should be sent on the next frame. + rtp_sender_video_->SetVideoLayersAllocation(allocation); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_TRUE( + transport_.last_sent_packet() + .GetExtension(&sent_allocation)); +} + +TEST_P(RtpSenderVideoTest, VideoLayersAllocationNotSentOnHigherTemporalLayers) { + const size_t kFrameSize = 100; + uint8_t kFrame[kFrameSize]; + rtp_module_->RegisterRtpHeaderExtension( + RtpVideoLayersAllocationExtension::kUri, + kVideoLayersAllocationExtensionId); + + VideoLayersAllocation allocation; + allocation.resolution_and_frame_rate_is_valid = true; + VideoLayersAllocation::SpatialLayer layer; + layer.width = 360; + layer.height = 180; + layer.target_bitrate_per_temporal_layer.push_back( + DataRate::KilobitsPerSec(50)); + allocation.active_spatial_layers.push_back(layer); + rtp_sender_video_->SetVideoLayersAllocation(allocation); + + RTPVideoHeader hdr; + hdr.frame_type = VideoFrameType::kVideoFrameDelta; + hdr.codec = VideoCodecType::kVideoCodecVP8; + auto& vp8_header = hdr.video_type_header.emplace(); + vp8_header.temporalIdx = 1; + + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_FALSE(transport_.last_sent_packet() + .HasExtension()); + + // Send a delta frame on tl0. + vp8_header.temporalIdx = 0; + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + EXPECT_TRUE(transport_.last_sent_packet() + .HasExtension()); +} + TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) { constexpr int64_t kAbsoluteCaptureTimestampMs = 12345678; uint8_t kFrame[kMaxPacketLength]; @@ -835,20 +1040,65 @@ TEST_P(RtpSenderVideoTest, AbsoluteCaptureTime) { RTPVideoHeader hdr; hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, - kAbsoluteCaptureTimestampMs, kFrame, nullptr, hdr, - kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, + kAbsoluteCaptureTimestampMs, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); // It is expected that one and only one of the packets sent on this video - // frame has absolute capture time header extension. + // frame has absolute capture time header extension. And no absolute capture + // time header extensions include capture clock offset. int packets_with_abs_capture_time = 0; for (const RtpPacketReceived& packet : transport_.sent_packets()) { auto absolute_capture_time = packet.GetExtension(); if (absolute_capture_time) { ++packets_with_abs_capture_time; - EXPECT_EQ(absolute_capture_time->absolute_capture_timestamp, - Int64MsToUQ32x32(kAbsoluteCaptureTimestampMs + NtpOffsetMs())); + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); + EXPECT_FALSE( + absolute_capture_time->estimated_capture_clock_offset.has_value()); + } + } + EXPECT_EQ(packets_with_abs_capture_time, 1); +} + +// Essentially the same test as AbsoluteCaptureTime but with a field trial. +// After the field trial is experimented, we will remove AbsoluteCaptureTime. +TEST_P(RtpSenderVideoTest, AbsoluteCaptureTimeWithCaptureClockOffset) { + field_trials_.set_include_capture_clock_offset(true); + rtp_sender_video_ = std::make_unique( + &fake_clock_, rtp_module_->RtpSender(), field_trials_); + + constexpr int64_t kAbsoluteCaptureTimestampMs = 12345678; + uint8_t kFrame[kMaxPacketLength]; + rtp_module_->RegisterRtpHeaderExtension(AbsoluteCaptureTimeExtension::kUri, + kAbsoluteCaptureTimeExtensionId); + + RTPVideoHeader hdr; + const absl::optional kExpectedCaptureClockOffset = + absl::make_optional(1234); + hdr.frame_type = VideoFrameType::kVideoFrameKey; + rtp_sender_video_->SendVideo( + kPayload, kType, kTimestamp, kAbsoluteCaptureTimestampMs, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs, kExpectedCaptureClockOffset); + + // It is expected that one and only one of the packets sent on this video + // frame has absolute capture time header extension. And it includes capture + // clock offset. + int packets_with_abs_capture_time = 0; + for (const RtpPacketReceived& packet : transport_.sent_packets()) { + auto absolute_capture_time = + packet.GetExtension(); + if (absolute_capture_time) { + ++packets_with_abs_capture_time; + EXPECT_EQ( + absolute_capture_time->absolute_capture_timestamp, + Int64MsToUQ32x32(fake_clock_.ConvertTimestampToNtpTimeInMilliseconds( + kAbsoluteCaptureTimestampMs))); + EXPECT_EQ(kExpectedCaptureClockOffset, + absolute_capture_time->estimated_capture_clock_offset); } } EXPECT_EQ(packets_with_abs_capture_time, 1); @@ -860,7 +1110,7 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) { uint8_t kFrame[kPacketSize]; rtp_module_->RegisterRtpHeaderExtension(PlayoutDelayLimits::kUri, kPlayoutDelayExtensionId); - const PlayoutDelay kExpectedDelay = {10, 20}; + const VideoPlayoutDelay kExpectedDelay = {10, 20}; // Send initial key-frame without playout delay. RTPVideoHeader hdr; @@ -869,8 +1119,8 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) { auto& vp8_header = hdr.video_type_header.emplace(); vp8_header.temporalIdx = 0; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); EXPECT_FALSE( transport_.last_sent_packet().HasExtension()); @@ -878,39 +1128,88 @@ TEST_P(RtpSenderVideoTest, PopulatesPlayoutDelay) { hdr.playout_delay = kExpectedDelay; hdr.frame_type = VideoFrameType::kVideoFrameDelta; vp8_header.temporalIdx = 1; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); - PlayoutDelay received_delay = PlayoutDelay::Noop(); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); + VideoPlayoutDelay received_delay = VideoPlayoutDelay(); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); EXPECT_EQ(received_delay, kExpectedDelay); // Set playout delay on a non-discardable frame, the extension should still // be populated since dilvery wasn't guaranteed on the last one. - hdr.playout_delay = PlayoutDelay::Noop(); // Inidcates "no change". + hdr.playout_delay = VideoPlayoutDelay(); // Indicates "no change". vp8_header.temporalIdx = 0; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); EXPECT_EQ(received_delay, kExpectedDelay); // The next frame does not need the extensions since it's delivery has // already been guaranteed. - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); EXPECT_FALSE( transport_.last_sent_packet().HasExtension()); // Insert key-frame, we need to refresh the state here. hdr.frame_type = VideoFrameType::kVideoFrameKey; - rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, nullptr, - hdr, kDefaultExpectedRetransmissionTimeMs); + rtp_sender_video_->SendVideo(kPayload, kType, kTimestamp, 0, kFrame, hdr, + kDefaultExpectedRetransmissionTimeMs); ASSERT_TRUE(transport_.last_sent_packet().GetExtension( &received_delay)); EXPECT_EQ(received_delay, kExpectedDelay); } +TEST_P(RtpSenderVideoTest, SendGenericVideo) { + const uint8_t kPayloadType = 127; + const VideoCodecType kCodecType = VideoCodecType::kVideoCodecGeneric; + const uint8_t kPayload[] = {47, 11, 32, 93, 89}; + + // Send keyframe. + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321, + kPayload, video_header, + absl::nullopt)); + + rtc::ArrayView sent_payload = + transport_.last_sent_packet().payload(); + uint8_t generic_header = sent_payload[0]; + EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kKeyFrameBit); + EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kFirstPacketBit); + EXPECT_THAT(sent_payload.subview(1), ElementsAreArray(kPayload)); + + // Send delta frame. + const uint8_t kDeltaPayload[] = {13, 42, 32, 93, 13}; + video_header.frame_type = VideoFrameType::kVideoFrameDelta; + ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, kCodecType, 1234, 4321, + kDeltaPayload, video_header, + absl::nullopt)); + + sent_payload = sent_payload = transport_.last_sent_packet().payload(); + generic_header = sent_payload[0]; + EXPECT_FALSE(generic_header & RtpFormatVideoGeneric::kKeyFrameBit); + EXPECT_TRUE(generic_header & RtpFormatVideoGeneric::kFirstPacketBit); + EXPECT_THAT(sent_payload.subview(1), ElementsAreArray(kDeltaPayload)); +} + +TEST_P(RtpSenderVideoTest, SendRawVideo) { + const uint8_t kPayloadType = 111; + const uint8_t kPayload[] = {11, 22, 33, 44, 55}; + + // Send a frame. + RTPVideoHeader video_header; + video_header.frame_type = VideoFrameType::kVideoFrameKey; + ASSERT_TRUE(rtp_sender_video_->SendVideo(kPayloadType, absl::nullopt, 1234, + 4321, kPayload, video_header, + absl::nullopt)); + + rtc::ArrayView sent_payload = + transport_.last_sent_packet().payload(); + EXPECT_THAT(sent_payload, ElementsAreArray(kPayload)); +} + INSTANTIATE_TEST_SUITE_P(WithAndWithoutOverhead, RtpSenderVideoTest, ::testing::Bool()); @@ -920,8 +1219,8 @@ class RtpSenderVideoWithFrameTransformerTest : public ::testing::Test { RtpSenderVideoWithFrameTransformerTest() : fake_clock_(kStartTime), retransmission_rate_limiter_(&fake_clock_, 1000), - rtp_module_(RtpRtcp::Create([&] { - RtpRtcp::Configuration config; + rtp_module_(ModuleRtpRtcpImpl2::Create([&] { + RtpRtcpInterface::Configuration config; config.clock = &fake_clock_; config.outgoing_transport = &transport_; config.retransmission_rate_limiter = &retransmission_rate_limiter_; @@ -948,7 +1247,7 @@ class RtpSenderVideoWithFrameTransformerTest : public ::testing::Test { SimulatedClock fake_clock_; LoopbackTransportTest transport_; RateLimiter retransmission_rate_limiter_; - std::unique_ptr rtp_module_; + std::unique_ptr rtp_module_; }; std::unique_ptr CreateDefaultEncodedImage() { @@ -991,7 +1290,7 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest, EXPECT_CALL(*mock_frame_transformer, Transform); rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp, - *encoded_image, nullptr, video_header, + *encoded_image, video_header, kDefaultExpectedRetransmissionTimeMs); } @@ -1017,7 +1316,7 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest, OnTransformedFrameSendsVideo) { encoder_queue.SendTask( [&] { rtp_sender_video->SendEncodedImage( - kPayload, kType, kTimestamp, *encoded_image, nullptr, video_header, + kPayload, kType, kTimestamp, *encoded_image, video_header, kDefaultExpectedRetransmissionTimeMs); }, RTC_FROM_HERE); @@ -1063,7 +1362,7 @@ TEST_F(RtpSenderVideoWithFrameTransformerTest, ElementsAre(DecodeTargetIndication::kSwitch)); }); rtp_sender_video->SendEncodedImage(kPayload, kType, kTimestamp, - *encoded_image, nullptr, video_header, + *encoded_image, video_header, kDefaultExpectedRetransmissionTimeMs); } diff --git a/modules/rtp_rtcp/source/rtp_util.cc b/modules/rtp_rtcp/source/rtp_util.cc new file mode 100644 index 0000000000..46c641ea2f --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_util.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_util.h" + +#include +#include + +#include "api/array_view.h" + +namespace webrtc { +namespace { + +constexpr uint8_t kRtpVersion = 2; +constexpr size_t kMinRtpPacketLen = 12; +constexpr size_t kMinRtcpPacketLen = 4; + +bool HasCorrectRtpVersion(rtc::ArrayView packet) { + return packet[0] >> 6 == kRtpVersion; +} + +// For additional details, see http://tools.ietf.org/html/rfc5761#section-4 +bool PayloadTypeIsReservedForRtcp(uint8_t payload_type) { + return 64 <= payload_type && payload_type < 96; +} + +} // namespace + +bool IsRtpPacket(rtc::ArrayView packet) { + return packet.size() >= kMinRtpPacketLen && HasCorrectRtpVersion(packet) && + !PayloadTypeIsReservedForRtcp(packet[1] & 0x7F); +} + +bool IsRtcpPacket(rtc::ArrayView packet) { + return packet.size() >= kMinRtcpPacketLen && HasCorrectRtpVersion(packet) && + PayloadTypeIsReservedForRtcp(packet[1] & 0x7F); +} + +} // namespace webrtc diff --git a/api/video_codecs/video_decoder_factory.cc b/modules/rtp_rtcp/source/rtp_util.h similarity index 52% rename from api/video_codecs/video_decoder_factory.cc rename to modules/rtp_rtcp/source/rtp_util.h index 511a3c7e92..b85727bf47 100644 --- a/api/video_codecs/video_decoder_factory.cc +++ b/modules/rtp_rtcp/source/rtp_util.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,16 +8,18 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "api/video_codecs/video_decoder_factory.h" +#ifndef MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_ +#define MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_ -#include "api/video_codecs/video_decoder.h" +#include + +#include "api/array_view.h" namespace webrtc { -std::unique_ptr VideoDecoderFactory::LegacyCreateVideoDecoder( - const SdpVideoFormat& format, - const std::string& receive_stream_id) { - return CreateVideoDecoder(format); -} +bool IsRtcpPacket(rtc::ArrayView packet); +bool IsRtpPacket(rtc::ArrayView packet); } // namespace webrtc + +#endif // MODULES_RTP_RTCP_SOURCE_RTP_UTIL_H_ diff --git a/modules/rtp_rtcp/source/rtp_util_unittest.cc b/modules/rtp_rtcp/source/rtp_util_unittest.cc new file mode 100644 index 0000000000..8f980ecff1 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_util_unittest.cc @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_util.h" + +#include "test/gmock.h" + +namespace webrtc { +namespace { + +TEST(RtpUtil, IsRtpPacket) { + constexpr uint8_t kMinimalisticRtpPacket[] = {0x80, 97, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0, 0}; + EXPECT_TRUE(IsRtpPacket(kMinimalisticRtpPacket)); + + constexpr uint8_t kWrongRtpVersion[] = {0xc0, 97, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0, 0}; + EXPECT_FALSE(IsRtpPacket(kWrongRtpVersion)); + + constexpr uint8_t kPacketWithPayloadForRtcp[] = {0x80, 200, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0, 0}; + EXPECT_FALSE(IsRtpPacket(kPacketWithPayloadForRtcp)); + + constexpr uint8_t kTooSmallRtpPacket[] = {0x80, 97, 0, 0, // + 0, 0, 0, 0, // + 0, 0, 0}; + EXPECT_FALSE(IsRtpPacket(kTooSmallRtpPacket)); + + EXPECT_FALSE(IsRtpPacket({})); +} + +TEST(RtpUtil, IsRtcpPacket) { + constexpr uint8_t kMinimalisticRtcpPacket[] = {0x80, 202, 0, 0}; + EXPECT_TRUE(IsRtcpPacket(kMinimalisticRtcpPacket)); + + constexpr uint8_t kWrongRtpVersion[] = {0xc0, 202, 0, 0}; + EXPECT_FALSE(IsRtcpPacket(kWrongRtpVersion)); + + constexpr uint8_t kPacketWithPayloadForRtp[] = {0x80, 225, 0, 0}; + EXPECT_FALSE(IsRtcpPacket(kPacketWithPayloadForRtp)); + + constexpr uint8_t kTooSmallRtcpPacket[] = {0x80, 202, 0}; + EXPECT_FALSE(IsRtcpPacket(kTooSmallRtcpPacket)); + + EXPECT_FALSE(IsRtcpPacket({})); +} + +} // namespace +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_utility.cc b/modules/rtp_rtcp/source/rtp_utility.cc index f76d7d0f0b..9b68f0dead 100644 --- a/modules/rtp_rtcp/source/rtp_utility.cc +++ b/modules/rtp_rtcp/source/rtp_utility.cc @@ -17,7 +17,6 @@ #include "api/array_view.h" #include "api/video/video_content_type.h" -#include "api/video/video_frame_marking.h" #include "api/video/video_rotation.h" #include "api/video/video_timing.h" #include "modules/rtp_rtcp/include/rtp_cvo.h" @@ -132,7 +131,7 @@ bool RtpHeaderParser::RTCP() const { } bool RtpHeaderParser::ParseRtcp(RTPHeader* header) const { - assert(header != NULL); + RTC_DCHECK(header); const ptrdiff_t length = _ptrRTPDataEnd - _ptrRTPDataBegin; if (length < kRtcpMinParseLength) { @@ -245,10 +244,6 @@ bool RtpHeaderParser::Parse(RTPHeader* header, header->extension.has_video_timing = false; header->extension.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false}; - header->extension.has_frame_marking = false; - header->extension.frame_marking = {false, false, false, false, - false, kNoTemporalIdx, 0, 0}; - if (X) { /* RTP header extension, RFC 3550. 0 1 2 3 @@ -369,6 +364,10 @@ void RtpHeaderParser::ParseOneByteExtensionHeader( header->extension.hasTransmissionTimeOffset = true; break; } + case kRtpExtensionCsrcAudioLevel: { + RTC_LOG(LS_WARNING) << "Csrc audio level extension not supported"; + return; + } case kRtpExtensionAudioLevel: { if (len != 0) { RTC_LOG(LS_WARNING) << "Incorrect audio level len: " << len; @@ -497,15 +496,10 @@ void RtpHeaderParser::ParseOneByteExtensionHeader( &header->extension.video_timing); break; } - case kRtpExtensionFrameMarking: { - if (!FrameMarkingExtension::Parse(rtc::MakeArrayView(ptr, len + 1), - &header->extension.frame_marking)) { - RTC_LOG(LS_WARNING) << "Incorrect frame marking len: " << len; - return; - } - header->extension.has_frame_marking = true; + case kRtpExtensionVideoLayersAllocation: + RTC_LOG(WARNING) << "VideoLayersAllocation extension unsupported by " + "rtp header parser."; break; - } case kRtpExtensionRtpStreamId: { std::string name(reinterpret_cast(ptr), len + 1); if (IsLegalRsidName(name)) { @@ -546,6 +540,10 @@ void RtpHeaderParser::ParseOneByteExtensionHeader( RTC_LOG(WARNING) << "Inband comfort noise extension unsupported by " "rtp header parser."; break; + case kRtpExtensionVideoFrameTrackingId: + RTC_LOG(WARNING) + << "VideoFrameTrackingId unsupported by rtp header parser."; + break; case kRtpExtensionNone: case kRtpExtensionNumberOfExtensions: { RTC_NOTREACHED() << "Invalid extension type: " << type; diff --git a/modules/rtp_rtcp/source/rtp_utility.h b/modules/rtp_rtcp/source/rtp_utility.h index cdda9ef119..cdfff4072f 100644 --- a/modules/rtp_rtcp/source/rtp_utility.h +++ b/modules/rtp_rtcp/source/rtp_utility.h @@ -15,6 +15,7 @@ #include +#include "absl/base/attributes.h" #include "absl/strings/string_view.h" #include "api/rtp_headers.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" @@ -34,6 +35,7 @@ class RtpHeaderParser { RtpHeaderParser(const uint8_t* rtpData, size_t rtpDataLength); ~RtpHeaderParser(); + ABSL_DEPRECATED("Use IsRtpPacket or IsRtcpPacket") bool RTCP() const; bool ParseRtcp(RTPHeader* header) const; bool Parse(RTPHeader* parsedPacket, diff --git a/modules/rtp_rtcp/source/rtp_video_header.h b/modules/rtp_rtcp/source/rtp_video_header.h index 5785ea9a54..c1be76fa4c 100644 --- a/modules/rtp_rtcp/source/rtp_video_header.h +++ b/modules/rtp_rtcp/source/rtp_video_header.h @@ -10,6 +10,7 @@ #ifndef MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_HEADER_H_ #define MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_HEADER_H_ +#include #include #include "absl/container/inlined_vector.h" @@ -19,11 +20,9 @@ #include "api/video/color_space.h" #include "api/video/video_codec_type.h" #include "api/video/video_content_type.h" -#include "api/video/video_frame_marking.h" #include "api/video/video_frame_type.h" #include "api/video/video_rotation.h" #include "api/video/video_timing.h" -#include "common_types.h" // NOLINT(build/include_directory) #include "modules/video_coding/codecs/h264/include/h264_globals.h" #include "modules/video_coding/codecs/vp8/include/vp8_globals.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" @@ -54,6 +53,7 @@ struct RTPVideoHeader { absl::InlinedVector decode_target_indications; absl::InlinedVector dependencies; absl::InlinedVector chain_diffs; + std::bitset<32> active_decode_targets = ~uint32_t{0}; }; RTPVideoHeader(); @@ -70,13 +70,16 @@ struct RTPVideoHeader { VideoContentType content_type = VideoContentType::UNSPECIFIED; bool is_first_packet_in_frame = false; bool is_last_packet_in_frame = false; + bool is_last_frame_in_picture = true; uint8_t simulcastIdx = 0; VideoCodecType codec = VideoCodecType::kVideoCodecGeneric; - PlayoutDelay playout_delay = {-1, -1}; + VideoPlayoutDelay playout_delay; VideoSendTiming video_timing; - FrameMarking frame_marking = {false, false, false, false, false, 0xFF, 0, 0}; absl::optional color_space; + // This field is meant for media quality testing purpose only. When enabled it + // carries the webrtc::VideoFrame id field from the sender to the receiver. + absl::optional video_frame_tracking_id; RTPVideoTypeHeader video_type_header; }; diff --git a/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc b/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc new file mode 100644 index 0000000000..93fb235dcd --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.cc @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" + +#include +#include + +#include "absl/algorithm/container.h" +#include "api/video/video_layers_allocation.h" +#include "modules/rtp_rtcp/source/byte_io.h" +#include "rtc_base/checks.h" + +namespace webrtc { + +constexpr RTPExtensionType RtpVideoLayersAllocationExtension::kId; +constexpr const char RtpVideoLayersAllocationExtension::kUri[]; + +namespace { + +constexpr int kMaxNumRtpStreams = 4; + +// TODO(bugs.webrtc.org/12000): share Leb128 functions with av1 packetizer. +// Returns minimum number of bytes required to store `value`. +int Leb128Size(uint32_t value) { + int size = 0; + while (value >= 0x80) { + ++size; + value >>= 7; + } + return size + 1; +} + +// Returns number of bytes consumed. +int WriteLeb128(uint32_t value, uint8_t* buffer) { + int size = 0; + while (value >= 0x80) { + buffer[size] = 0x80 | (value & 0x7F); + ++size; + value >>= 7; + } + buffer[size] = value; + ++size; + return size; +} + +// Reads leb128 encoded value and advance read_at by number of bytes consumed. +// Sets read_at to nullptr on error. +uint64_t ReadLeb128(const uint8_t*& read_at, const uint8_t* end) { + uint64_t value = 0; + int fill_bits = 0; + while (read_at != end && fill_bits < 64 - 7) { + uint8_t leb128_byte = *read_at; + value |= uint64_t{leb128_byte & 0x7Fu} << fill_bits; + ++read_at; + fill_bits += 7; + if ((leb128_byte & 0x80) == 0) { + return value; + } + } + // Failed to find terminator leb128 byte. + read_at = nullptr; + return 0; +} + +bool AllocationIsValid(const VideoLayersAllocation& allocation) { + // Since all multivalue fields are stored in (rtp_stream_id, spatial_id) order + // assume `allocation.active_spatial_layers` is already sorted. It is simpler + // to assemble it in the sorted way than to resort during serialization. + if (!absl::c_is_sorted( + allocation.active_spatial_layers, + [](const VideoLayersAllocation::SpatialLayer& lhs, + const VideoLayersAllocation::SpatialLayer& rhs) { + return std::make_tuple(lhs.rtp_stream_index, lhs.spatial_id) < + std::make_tuple(rhs.rtp_stream_index, rhs.spatial_id); + })) { + return false; + } + + int max_rtp_stream_idx = 0; + for (const auto& spatial_layer : allocation.active_spatial_layers) { + if (spatial_layer.rtp_stream_index < 0 || + spatial_layer.rtp_stream_index >= 4) { + return false; + } + if (spatial_layer.spatial_id < 0 || spatial_layer.spatial_id >= 4) { + return false; + } + if (spatial_layer.target_bitrate_per_temporal_layer.empty() || + spatial_layer.target_bitrate_per_temporal_layer.size() > 4) { + return false; + } + if (max_rtp_stream_idx < spatial_layer.rtp_stream_index) { + max_rtp_stream_idx = spatial_layer.rtp_stream_index; + } + if (allocation.resolution_and_frame_rate_is_valid) { + // TODO(danilchap): Add check width and height are no more than 0x10000 + // when width and height become larger type and thus would support maximum + // resolution. + if (spatial_layer.width <= 0) { + return false; + } + if (spatial_layer.height <= 0) { + return false; + } + if (spatial_layer.frame_rate_fps > 255) { + return false; + } + } + } + if (allocation.rtp_stream_index < 0 || + (!allocation.active_spatial_layers.empty() && + allocation.rtp_stream_index > max_rtp_stream_idx)) { + return false; + } + return true; +} + +struct SpatialLayersBitmasks { + int max_rtp_stream_id = 0; + uint8_t spatial_layer_bitmask[kMaxNumRtpStreams] = {}; + bool bitmasks_are_the_same = true; +}; + +SpatialLayersBitmasks SpatialLayersBitmasksPerRtpStream( + const VideoLayersAllocation& allocation) { + RTC_DCHECK(AllocationIsValid(allocation)); + SpatialLayersBitmasks result; + for (const auto& layer : allocation.active_spatial_layers) { + result.spatial_layer_bitmask[layer.rtp_stream_index] |= + (1u << layer.spatial_id); + if (result.max_rtp_stream_id < layer.rtp_stream_index) { + result.max_rtp_stream_id = layer.rtp_stream_index; + } + } + for (int i = 1; i <= result.max_rtp_stream_id; ++i) { + if (result.spatial_layer_bitmask[i] != result.spatial_layer_bitmask[0]) { + result.bitmasks_are_the_same = false; + break; + } + } + return result; +} + +} // namespace + +// +-+-+-+-+-+-+-+-+ +// |RID| NS| sl_bm | +// +-+-+-+-+-+-+-+-+ +// Spatial layer bitmask |sl0_bm |sl1_bm | +// up to 2 bytes |---------------| +// when sl_bm == 0 |sl2_bm |sl3_bm | +// +-+-+-+-+-+-+-+-+ +// Number of temporal |#tl|#tl|#tl|#tl| +// layers per spatial layer :---------------: +// up to 4 bytes | ... | +// +-+-+-+-+-+-+-+-+ +// Target bitrate in kpbs | | +// per temporal layer : ... : +// leb128 encoded | | +// +-+-+-+-+-+-+-+-+ +// Resolution and framerate | | +// 5 bytes per spatial layer + width-1 for + +// (optional) | rid=0, sid=0 | +// +---------------+ +// | | +// + height-1 for + +// | rid=0, sid=0 | +// +---------------+ +// | max framerate | +// +-+-+-+-+-+-+-+-+ +// : ... : +// +-+-+-+-+-+-+-+-+ +// +// RID: RTP stream index this allocation is sent on, numbered from 0. 2 bits. +// NS: Number of RTP streams - 1. 2 bits, thus allowing up-to 4 RTP streams. +// sl_bm: BitMask of the active Spatial Layers when same for all RTP streams or +// 0 otherwise. 4 bits thus allows up to 4 spatial layers per RTP streams. +// slX_bm: BitMask of the active Spatial Layers for RTP stream with index=X. +// byte-aligned. When NS < 2, takes ones byte, otherwise uses two bytes. +// #tl: 2-bit value of number of temporal layers-1, thus allowing up-to 4 +// temporal layer per spatial layer. One per spatial layer per RTP stream. +// values are stored in (RTP stream id, spatial id) ascending order. +// zero-padded to byte alignment. +// Target bitrate in kbps. Values are stored using leb128 encoding. +// one value per temporal layer. values are stored in +// (RTP stream id, spatial id, temporal id) ascending order. +// All bitrates are total required bitrate to receive the corresponding +// layer, i.e. in simulcast mode they include only corresponding spatial +// layer, in full-svc all lower spatial layers are included. All lower +// temporal layers are also included. +// Resolution and framerate. +// Optional. Presense is infered from the rtp header extension size. +// Encoded (width - 1), 16-bit, (height - 1), 16-bit, max frame rate 8-bit +// per spatial layer per RTP stream. +// Values are stored in (RTP stream id, spatial id) ascending order. +// +// An empty layer allocation (i.e nothing sent on ssrc) is encoded as +// special case with a single 0 byte. + +bool RtpVideoLayersAllocationExtension::Write( + rtc::ArrayView data, + const VideoLayersAllocation& allocation) { + RTC_DCHECK(AllocationIsValid(allocation)); + RTC_DCHECK_GE(data.size(), ValueSize(allocation)); + + if (allocation.active_spatial_layers.empty()) { + data[0] = 0; + return true; + } + + SpatialLayersBitmasks slb = SpatialLayersBitmasksPerRtpStream(allocation); + uint8_t* write_at = data.data(); + // First half of the header byte. + *write_at = (allocation.rtp_stream_index << 6); + // number of rtp stream - 1 is the same as the maximum rtp_stream_id. + *write_at |= slb.max_rtp_stream_id << 4; + if (slb.bitmasks_are_the_same) { + // Second half of the header byte. + *write_at |= slb.spatial_layer_bitmask[0]; + } else { + // spatial layer bitmasks when they are different for different RTP streams. + *++write_at = + (slb.spatial_layer_bitmask[0] << 4) | slb.spatial_layer_bitmask[1]; + if (slb.max_rtp_stream_id >= 2) { + *++write_at = + (slb.spatial_layer_bitmask[2] << 4) | slb.spatial_layer_bitmask[3]; + } + } + ++write_at; + + { // Number of temporal layers. + int bit_offset = 8; + *write_at = 0; + for (const auto& layer : allocation.active_spatial_layers) { + if (bit_offset == 0) { + bit_offset = 6; + *++write_at = 0; + } else { + bit_offset -= 2; + } + *write_at |= + ((layer.target_bitrate_per_temporal_layer.size() - 1) << bit_offset); + } + ++write_at; + } + + // Target bitrates. + for (const auto& spatial_layer : allocation.active_spatial_layers) { + for (const DataRate& bitrate : + spatial_layer.target_bitrate_per_temporal_layer) { + write_at += WriteLeb128(bitrate.kbps(), write_at); + } + } + + if (allocation.resolution_and_frame_rate_is_valid) { + for (const auto& spatial_layer : allocation.active_spatial_layers) { + ByteWriter::WriteBigEndian(write_at, spatial_layer.width - 1); + write_at += 2; + ByteWriter::WriteBigEndian(write_at, spatial_layer.height - 1); + write_at += 2; + *write_at = spatial_layer.frame_rate_fps; + ++write_at; + } + } + RTC_DCHECK_EQ(write_at - data.data(), ValueSize(allocation)); + return true; +} + +bool RtpVideoLayersAllocationExtension::Parse( + rtc::ArrayView data, + VideoLayersAllocation* allocation) { + if (data.empty() || allocation == nullptr) { + return false; + } + + allocation->active_spatial_layers.clear(); + + const uint8_t* read_at = data.data(); + const uint8_t* const end = data.data() + data.size(); + + if (data.size() == 1 && *read_at == 0) { + allocation->rtp_stream_index = 0; + allocation->resolution_and_frame_rate_is_valid = true; + return true; + } + + // Header byte. + allocation->rtp_stream_index = *read_at >> 6; + int num_rtp_streams = 1 + ((*read_at >> 4) & 0b11); + uint8_t spatial_layers_bitmasks[kMaxNumRtpStreams]; + spatial_layers_bitmasks[0] = *read_at & 0b1111; + + if (spatial_layers_bitmasks[0] != 0) { + for (int i = 1; i < num_rtp_streams; ++i) { + spatial_layers_bitmasks[i] = spatial_layers_bitmasks[0]; + } + } else { + // Spatial layer bitmasks when they are different for different RTP streams. + if (++read_at == end) { + return false; + } + spatial_layers_bitmasks[0] = *read_at >> 4; + spatial_layers_bitmasks[1] = *read_at & 0b1111; + if (num_rtp_streams > 2) { + if (++read_at == end) { + return false; + } + spatial_layers_bitmasks[2] = *read_at >> 4; + spatial_layers_bitmasks[3] = *read_at & 0b1111; + } + } + if (++read_at == end) { + return false; + } + + // Read number of temporal layers, + // Create `allocation->active_spatial_layers` while iterating though it. + int bit_offset = 8; + for (int stream_idx = 0; stream_idx < num_rtp_streams; ++stream_idx) { + for (int sid = 0; sid < VideoLayersAllocation::kMaxSpatialIds; ++sid) { + if ((spatial_layers_bitmasks[stream_idx] & (1 << sid)) == 0) { + continue; + } + + if (bit_offset == 0) { + bit_offset = 6; + if (++read_at == end) { + return false; + } + } else { + bit_offset -= 2; + } + int num_temporal_layers = 1 + ((*read_at >> bit_offset) & 0b11); + allocation->active_spatial_layers.emplace_back(); + auto& layer = allocation->active_spatial_layers.back(); + layer.rtp_stream_index = stream_idx; + layer.spatial_id = sid; + layer.target_bitrate_per_temporal_layer.resize(num_temporal_layers, + DataRate::Zero()); + } + } + if (++read_at == end) { + return false; + } + + // Target bitrates. + for (auto& layer : allocation->active_spatial_layers) { + for (DataRate& rate : layer.target_bitrate_per_temporal_layer) { + rate = DataRate::KilobitsPerSec(ReadLeb128(read_at, end)); + if (read_at == nullptr) { + return false; + } + } + } + + if (read_at == end) { + allocation->resolution_and_frame_rate_is_valid = false; + return true; + } + + if (read_at + 5 * allocation->active_spatial_layers.size() != end) { + // data is left, but it size is not what can be used for resolutions and + // framerates. + return false; + } + allocation->resolution_and_frame_rate_is_valid = true; + for (auto& layer : allocation->active_spatial_layers) { + layer.width = 1 + ByteReader::ReadBigEndian(read_at); + read_at += 2; + layer.height = 1 + ByteReader::ReadBigEndian(read_at); + read_at += 2; + layer.frame_rate_fps = *read_at; + ++read_at; + } + return true; +} + +size_t RtpVideoLayersAllocationExtension::ValueSize( + const VideoLayersAllocation& allocation) { + if (allocation.active_spatial_layers.empty()) { + return 1; + } + size_t result = 1; // header + SpatialLayersBitmasks slb = SpatialLayersBitmasksPerRtpStream(allocation); + if (!slb.bitmasks_are_the_same) { + ++result; + if (slb.max_rtp_stream_id >= 2) { + ++result; + } + } + // 2 bits per active spatial layer, rounded up to full byte, i.e. + // 0.25 byte per active spatial layer. + result += (allocation.active_spatial_layers.size() + 3) / 4; + for (const auto& spatial_layer : allocation.active_spatial_layers) { + for (DataRate value : spatial_layer.target_bitrate_per_temporal_layer) { + result += Leb128Size(value.kbps()); + } + } + if (allocation.resolution_and_frame_rate_is_valid) { + result += 5 * allocation.active_spatial_layers.size(); + } + return result; +} + +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h b/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h new file mode 100644 index 0000000000..ff8ea2a7a2 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_ +#define MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_ + +#include "api/video/video_layers_allocation.h" +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" + +namespace webrtc { + +// TODO(bugs.webrtc.org/12000): Note that this extensions is being developed and +// the wire format will likely change. +class RtpVideoLayersAllocationExtension { + public: + using value_type = VideoLayersAllocation; + static constexpr RTPExtensionType kId = kRtpExtensionVideoLayersAllocation; + static constexpr const char kUri[] = + "http://www.webrtc.org/experiments/rtp-hdrext/video-layers-allocation00"; + static bool Parse(rtc::ArrayView data, + VideoLayersAllocation* allocation); + static size_t ValueSize(const VideoLayersAllocation& allocation); + static bool Write(rtc::ArrayView data, + const VideoLayersAllocation& allocation); +}; + +} // namespace webrtc +#endif // MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_ diff --git a/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc b/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc new file mode 100644 index 0000000000..92e5673441 --- /dev/null +++ b/modules/rtp_rtcp/source/rtp_video_layers_allocation_extension_unittest.cc @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" + +#include "api/video/video_layers_allocation.h" +#include "rtc_base/bit_buffer.h" +#include "rtc_base/buffer.h" + +#include "test/gmock.h" + +namespace webrtc { +namespace { + +TEST(RtpVideoLayersAllocationExtension, WriteEmptyLayersAllocationReturnsTrue) { + VideoLayersAllocation written_allocation; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParseLayersAllocationWithZeroSpatialLayers) { + // We require the resolution_and_frame_rate_is_valid to be set to true in + // order to send an "empty" allocation. + VideoLayersAllocation written_allocation; + written_allocation.resolution_and_frame_rate_is_valid = true; + written_allocation.rtp_stream_index = 0; + + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParse2SpatialWith2TemporalLayers) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 1; + written_allocation.active_spatial_layers = { + { + /*rtp_stream_index*/ 0, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ + {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0, + }, + { + /*rtp_stream_index*/ 1, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ + {DataRate::KilobitsPerSec(100), DataRate::KilobitsPerSec(200)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0, + }, + }; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParseAllocationWithDifferentNumerOfSpatialLayers) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 1; + written_allocation.active_spatial_layers = { + {/*rtp_stream_index*/ 0, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(50)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + {/*rtp_stream_index*/ 1, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(100)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + {/*rtp_stream_index*/ 1, + /*spatial_id*/ 1, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(200)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + }; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParseAllocationWithSkippedLowerSpatialLayer) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 1; + written_allocation.active_spatial_layers = { + {/*rtp_stream_index*/ 0, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(50)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + {/*rtp_stream_index*/ 1, + /*spatial_id*/ 1, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(200)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + }; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParseAllocationWithSkippedRtpStreamIds) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 2; + written_allocation.active_spatial_layers = { + {/*rtp_stream_index*/ 0, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(50)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + {/*rtp_stream_index*/ 2, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(200)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0}, + }; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParseAllocationWithDifferentNumerOfTemporalLayers) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 1; + written_allocation.active_spatial_layers = { + { + /*rtp_stream_index*/ 0, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ + {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0, + }, + { + /*rtp_stream_index*/ 1, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(100)}, + /*width*/ 0, + /*height*/ 0, + /*frame_rate_fps*/ 0, + }, + }; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + CanWriteAndParseAllocationWithResolution) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 1; + written_allocation.resolution_and_frame_rate_is_valid = true; + written_allocation.active_spatial_layers = { + { + /*rtp_stream_index*/ 0, + /*spatial_id*/ 0, + /*target_bitrate_per_temporal_layer*/ + {DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)}, + /*width*/ 320, + /*height*/ 240, + /*frame_rate_fps*/ 8, + }, + { + /*rtp_stream_index*/ 1, + /*spatial_id*/ 1, + /*target_bitrate_per_temporal_layer*/ + {DataRate::KilobitsPerSec(100), DataRate::KilobitsPerSec(200)}, + /*width*/ 640, + /*height*/ 320, + /*frame_rate_fps*/ 30, + }, + }; + + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); + VideoLayersAllocation parsed_allocation; + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation)); + EXPECT_EQ(written_allocation, parsed_allocation); +} + +TEST(RtpVideoLayersAllocationExtension, + WriteEmptyAllocationCanHaveAnyRtpStreamIndex) { + VideoLayersAllocation written_allocation; + written_allocation.rtp_stream_index = 1; + rtc::Buffer buffer( + RtpVideoLayersAllocationExtension::ValueSize(written_allocation)); + EXPECT_TRUE( + RtpVideoLayersAllocationExtension::Write(buffer, written_allocation)); +} + +} // namespace +} // namespace webrtc diff --git a/modules/rtp_rtcp/source/source_tracker.cc b/modules/rtp_rtcp/source/source_tracker.cc index 22b887c5d2..d6c744512a 100644 --- a/modules/rtp_rtcp/source/source_tracker.cc +++ b/modules/rtp_rtcp/source/source_tracker.cc @@ -25,7 +25,7 @@ void SourceTracker::OnFrameDelivered(const RtpPacketInfos& packet_infos) { } int64_t now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope lock_scope(&lock_); + MutexLock lock_scope(&lock_); for (const auto& packet_info : packet_infos) { for (uint32_t csrc : packet_info.csrcs()) { @@ -54,7 +54,7 @@ std::vector SourceTracker::GetSources() const { std::vector sources; int64_t now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope lock_scope(&lock_); + MutexLock lock_scope(&lock_); PruneEntries(now_ms); diff --git a/modules/rtp_rtcp/source/source_tracker.h b/modules/rtp_rtcp/source/source_tracker.h index fcf99bf8b5..0c7627c41d 100644 --- a/modules/rtp_rtcp/source/source_tracker.h +++ b/modules/rtp_rtcp/source/source_tracker.h @@ -20,7 +20,7 @@ #include "absl/types/optional.h" #include "api/rtp_packet_infos.h" #include "api/transport/rtp/rtp_source.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/clock.h" @@ -116,7 +116,7 @@ class SourceTracker { void PruneEntries(int64_t now_ms) const RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); Clock* const clock_; - rtc::CriticalSection lock_; + mutable Mutex lock_; // Entries are stored in reverse chronological order (i.e. with the most // recently updated entries appearing first). Mutability is needed for timeout diff --git a/modules/rtp_rtcp/source/source_tracker_unittest.cc b/modules/rtp_rtcp/source/source_tracker_unittest.cc index 32f9f4b2a3..8514e8462d 100644 --- a/modules/rtp_rtcp/source/source_tracker_unittest.cc +++ b/modules/rtp_rtcp/source/source_tracker_unittest.cc @@ -111,7 +111,7 @@ class SourceTrackerRandomTest packet_infos.emplace_back(GenerateSsrc(), GenerateCsrcs(), GenerateRtpTimestamp(), GenerateAudioLevel(), GenerateAbsoluteCaptureTime(), - GenerateReceiveTimeMs()); + GenerateReceiveTime()); } return RtpPacketInfos(std::move(packet_infos)); @@ -192,8 +192,9 @@ class SourceTrackerRandomTest return value; } - int64_t GenerateReceiveTimeMs() { - return std::uniform_int_distribution()(generator_); + Timestamp GenerateReceiveTime() { + return Timestamp::Micros( + std::uniform_int_distribution()(generator_)); } const uint32_t ssrcs_count_; @@ -239,78 +240,156 @@ TEST(SourceTrackerTest, StartEmpty) { EXPECT_THAT(tracker.GetSources(), IsEmpty()); } -TEST(SourceTrackerTest, OnFrameDeliveredRecordsSources) { +TEST(SourceTrackerTest, OnFrameDeliveredRecordsSourcesDistinctSsrcs) { + constexpr uint32_t kSsrc1 = 10; + constexpr uint32_t kSsrc2 = 11; + constexpr uint32_t kCsrcs0 = 20; + constexpr uint32_t kCsrcs1 = 21; + constexpr uint32_t kCsrcs2 = 22; + constexpr uint32_t kRtpTimestamp0 = 40; + constexpr uint32_t kRtpTimestamp1 = 50; + constexpr absl::optional kAudioLevel0 = 50; + constexpr absl::optional kAudioLevel1 = 20; + constexpr absl::optional kAbsoluteCaptureTime = + AbsoluteCaptureTime{/*absolute_capture_timestamp=*/12, + /*estimated_capture_clock_offset=*/absl::nullopt}; + constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60); + constexpr Timestamp kReceiveTime1 = Timestamp::Millis(70); + + SimulatedClock clock(1000000000000ULL); + SourceTracker tracker(&clock); + + tracker.OnFrameDelivered(RtpPacketInfos( + {RtpPacketInfo(kSsrc1, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0, + kAbsoluteCaptureTime, kReceiveTime0), + RtpPacketInfo(kSsrc2, {kCsrcs2}, kRtpTimestamp1, kAudioLevel1, + kAbsoluteCaptureTime, kReceiveTime1)})); + + int64_t timestamp_ms = clock.TimeInMilliseconds(); + constexpr RtpSource::Extensions extensions0 = {kAudioLevel0, + kAbsoluteCaptureTime}; + constexpr RtpSource::Extensions extensions1 = {kAudioLevel1, + kAbsoluteCaptureTime}; + + EXPECT_THAT(tracker.GetSources(), + ElementsAre(RtpSource(timestamp_ms, kSsrc2, RtpSourceType::SSRC, + kRtpTimestamp1, extensions1), + RtpSource(timestamp_ms, kCsrcs2, RtpSourceType::CSRC, + kRtpTimestamp1, extensions1), + RtpSource(timestamp_ms, kSsrc1, RtpSourceType::SSRC, + kRtpTimestamp0, extensions0), + RtpSource(timestamp_ms, kCsrcs1, RtpSourceType::CSRC, + kRtpTimestamp0, extensions0), + RtpSource(timestamp_ms, kCsrcs0, RtpSourceType::CSRC, + kRtpTimestamp0, extensions0))); +} + +TEST(SourceTrackerTest, OnFrameDeliveredRecordsSourcesSameSsrc) { constexpr uint32_t kSsrc = 10; constexpr uint32_t kCsrcs0 = 20; constexpr uint32_t kCsrcs1 = 21; - constexpr uint32_t kRtpTimestamp = 40; - constexpr absl::optional kAudioLevel = 50; + constexpr uint32_t kCsrcs2 = 22; + constexpr uint32_t kRtpTimestamp0 = 40; + constexpr uint32_t kRtpTimestamp1 = 45; + constexpr uint32_t kRtpTimestamp2 = 50; + constexpr absl::optional kAudioLevel0 = 50; + constexpr absl::optional kAudioLevel1 = 20; + constexpr absl::optional kAudioLevel2 = 10; constexpr absl::optional kAbsoluteCaptureTime = AbsoluteCaptureTime{/*absolute_capture_timestamp=*/12, /*estimated_capture_clock_offset=*/absl::nullopt}; - constexpr int64_t kReceiveTimeMs = 60; + constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60); + constexpr Timestamp kReceiveTime1 = Timestamp::Millis(70); + constexpr Timestamp kReceiveTime2 = Timestamp::Millis(80); SimulatedClock clock(1000000000000ULL); SourceTracker tracker(&clock); tracker.OnFrameDelivered(RtpPacketInfos( - {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs1}, kRtpTimestamp, kAudioLevel, - kAbsoluteCaptureTime, kReceiveTimeMs)})); + {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0, + kAbsoluteCaptureTime, kReceiveTime0), + RtpPacketInfo(kSsrc, {kCsrcs2}, kRtpTimestamp1, kAudioLevel1, + kAbsoluteCaptureTime, kReceiveTime1), + RtpPacketInfo(kSsrc, {kCsrcs0}, kRtpTimestamp2, kAudioLevel2, + kAbsoluteCaptureTime, kReceiveTime2)})); int64_t timestamp_ms = clock.TimeInMilliseconds(); - constexpr RtpSource::Extensions extensions = {kAudioLevel, - kAbsoluteCaptureTime}; + constexpr RtpSource::Extensions extensions0 = {kAudioLevel0, + kAbsoluteCaptureTime}; + constexpr RtpSource::Extensions extensions1 = {kAudioLevel1, + kAbsoluteCaptureTime}; + constexpr RtpSource::Extensions extensions2 = {kAudioLevel2, + kAbsoluteCaptureTime}; EXPECT_THAT(tracker.GetSources(), ElementsAre(RtpSource(timestamp_ms, kSsrc, RtpSourceType::SSRC, - kRtpTimestamp, extensions), - RtpSource(timestamp_ms, kCsrcs1, RtpSourceType::CSRC, - kRtpTimestamp, extensions), + kRtpTimestamp2, extensions2), RtpSource(timestamp_ms, kCsrcs0, RtpSourceType::CSRC, - kRtpTimestamp, extensions))); + kRtpTimestamp2, extensions2), + RtpSource(timestamp_ms, kCsrcs2, RtpSourceType::CSRC, + kRtpTimestamp1, extensions1), + RtpSource(timestamp_ms, kCsrcs1, RtpSourceType::CSRC, + kRtpTimestamp0, extensions0))); } TEST(SourceTrackerTest, OnFrameDeliveredUpdatesSources) { - constexpr uint32_t kSsrc = 10; + constexpr uint32_t kSsrc1 = 10; + constexpr uint32_t kSsrc2 = 11; constexpr uint32_t kCsrcs0 = 20; constexpr uint32_t kCsrcs1 = 21; constexpr uint32_t kCsrcs2 = 22; constexpr uint32_t kRtpTimestamp0 = 40; constexpr uint32_t kRtpTimestamp1 = 41; + constexpr uint32_t kRtpTimestamp2 = 42; constexpr absl::optional kAudioLevel0 = 50; constexpr absl::optional kAudioLevel1 = absl::nullopt; + constexpr absl::optional kAudioLevel2 = 10; constexpr absl::optional kAbsoluteCaptureTime0 = AbsoluteCaptureTime{12, 34}; constexpr absl::optional kAbsoluteCaptureTime1 = AbsoluteCaptureTime{56, 78}; - constexpr int64_t kReceiveTimeMs0 = 60; - constexpr int64_t kReceiveTimeMs1 = 61; + constexpr absl::optional kAbsoluteCaptureTime2 = + AbsoluteCaptureTime{89, 90}; + constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60); + constexpr Timestamp kReceiveTime1 = Timestamp::Millis(61); + constexpr Timestamp kReceiveTime2 = Timestamp::Millis(62); + + constexpr RtpSource::Extensions extensions0 = {kAudioLevel0, + kAbsoluteCaptureTime0}; + constexpr RtpSource::Extensions extensions1 = {kAudioLevel1, + kAbsoluteCaptureTime1}; + constexpr RtpSource::Extensions extensions2 = {kAudioLevel2, + kAbsoluteCaptureTime2}; SimulatedClock clock(1000000000000ULL); SourceTracker tracker(&clock); tracker.OnFrameDelivered(RtpPacketInfos( - {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0, - kAbsoluteCaptureTime0, kReceiveTimeMs0)})); + {RtpPacketInfo(kSsrc1, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0, + kAbsoluteCaptureTime0, kReceiveTime0)})); int64_t timestamp_ms_0 = clock.TimeInMilliseconds(); + EXPECT_THAT( + tracker.GetSources(), + ElementsAre(RtpSource(timestamp_ms_0, kSsrc1, RtpSourceType::SSRC, + kRtpTimestamp0, extensions0), + RtpSource(timestamp_ms_0, kCsrcs1, RtpSourceType::CSRC, + kRtpTimestamp0, extensions0), + RtpSource(timestamp_ms_0, kCsrcs0, RtpSourceType::CSRC, + kRtpTimestamp0, extensions0))); - clock.AdvanceTimeMilliseconds(17); + // Deliver packets with updated sources. + clock.AdvanceTimeMilliseconds(17); tracker.OnFrameDelivered(RtpPacketInfos( - {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs2}, kRtpTimestamp1, kAudioLevel1, - kAbsoluteCaptureTime1, kReceiveTimeMs1)})); + {RtpPacketInfo(kSsrc1, {kCsrcs0, kCsrcs2}, kRtpTimestamp1, kAudioLevel1, + kAbsoluteCaptureTime1, kReceiveTime1)})); int64_t timestamp_ms_1 = clock.TimeInMilliseconds(); - constexpr RtpSource::Extensions extensions0 = {kAudioLevel0, - kAbsoluteCaptureTime0}; - constexpr RtpSource::Extensions extensions1 = {kAudioLevel1, - kAbsoluteCaptureTime1}; - EXPECT_THAT( tracker.GetSources(), - ElementsAre(RtpSource(timestamp_ms_1, kSsrc, RtpSourceType::SSRC, + ElementsAre(RtpSource(timestamp_ms_1, kSsrc1, RtpSourceType::SSRC, kRtpTimestamp1, extensions1), RtpSource(timestamp_ms_1, kCsrcs2, RtpSourceType::CSRC, kRtpTimestamp1, extensions1), @@ -318,6 +397,27 @@ TEST(SourceTrackerTest, OnFrameDeliveredUpdatesSources) { kRtpTimestamp1, extensions1), RtpSource(timestamp_ms_0, kCsrcs1, RtpSourceType::CSRC, kRtpTimestamp0, extensions0))); + + // Deliver more packets with update csrcs and a new ssrc. + clock.AdvanceTimeMilliseconds(17); + tracker.OnFrameDelivered(RtpPacketInfos( + {RtpPacketInfo(kSsrc2, {kCsrcs0}, kRtpTimestamp2, kAudioLevel2, + kAbsoluteCaptureTime2, kReceiveTime2)})); + + int64_t timestamp_ms_2 = clock.TimeInMilliseconds(); + + EXPECT_THAT( + tracker.GetSources(), + ElementsAre(RtpSource(timestamp_ms_2, kSsrc2, RtpSourceType::SSRC, + kRtpTimestamp2, extensions2), + RtpSource(timestamp_ms_2, kCsrcs0, RtpSourceType::CSRC, + kRtpTimestamp2, extensions2), + RtpSource(timestamp_ms_1, kSsrc1, RtpSourceType::SSRC, + kRtpTimestamp1, extensions1), + RtpSource(timestamp_ms_1, kCsrcs2, RtpSourceType::CSRC, + kRtpTimestamp1, extensions1), + RtpSource(timestamp_ms_0, kCsrcs1, RtpSourceType::CSRC, + kRtpTimestamp0, extensions0))); } TEST(SourceTrackerTest, TimedOutSourcesAreRemoved) { @@ -333,21 +433,21 @@ TEST(SourceTrackerTest, TimedOutSourcesAreRemoved) { AbsoluteCaptureTime{12, 34}; constexpr absl::optional kAbsoluteCaptureTime1 = AbsoluteCaptureTime{56, 78}; - constexpr int64_t kReceiveTimeMs0 = 60; - constexpr int64_t kReceiveTimeMs1 = 61; + constexpr Timestamp kReceiveTime0 = Timestamp::Millis(60); + constexpr Timestamp kReceiveTime1 = Timestamp::Millis(61); SimulatedClock clock(1000000000000ULL); SourceTracker tracker(&clock); tracker.OnFrameDelivered(RtpPacketInfos( {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs1}, kRtpTimestamp0, kAudioLevel0, - kAbsoluteCaptureTime0, kReceiveTimeMs0)})); + kAbsoluteCaptureTime0, kReceiveTime0)})); clock.AdvanceTimeMilliseconds(17); tracker.OnFrameDelivered(RtpPacketInfos( {RtpPacketInfo(kSsrc, {kCsrcs0, kCsrcs2}, kRtpTimestamp1, kAudioLevel1, - kAbsoluteCaptureTime1, kReceiveTimeMs1)})); + kAbsoluteCaptureTime1, kReceiveTime1)})); int64_t timestamp_ms_1 = clock.TimeInMilliseconds(); diff --git a/modules/rtp_rtcp/source/time_util.cc b/modules/rtp_rtcp/source/time_util.cc index b5b4f8bd98..fe0cfea11f 100644 --- a/modules/rtp_rtcp/source/time_util.cc +++ b/modules/rtp_rtcp/source/time_util.cc @@ -17,48 +17,6 @@ #include "rtc_base/time_utils.h" namespace webrtc { -namespace { - -int64_t NtpOffsetMsCalledOnce() { - constexpr int64_t kNtpJan1970Sec = 2208988800; - int64_t clock_time = rtc::TimeMillis(); - int64_t utc_time = rtc::TimeUTCMillis(); - return utc_time - clock_time + kNtpJan1970Sec * rtc::kNumMillisecsPerSec; -} - -} // namespace - -int64_t NtpOffsetMs() { - // Calculate the offset once. - static int64_t ntp_offset_ms = NtpOffsetMsCalledOnce(); - return ntp_offset_ms; -} - -NtpTime TimeMicrosToNtp(int64_t time_us) { - // Since this doesn't return a wallclock time, but only NTP representation - // of rtc::TimeMillis() clock, the exact offset doesn't matter. - // To simplify conversions between NTP and RTP time, this offset is - // limited to milliseconds in resolution. - int64_t time_ntp_us = time_us + NtpOffsetMs() * 1000; - RTC_DCHECK_GE(time_ntp_us, 0); // Time before year 1900 is unsupported. - - // TODO(danilchap): Convert both seconds and fraction together using int128 - // when that type is easily available. - // Currently conversion is done separetly for seconds and fraction of a second - // to avoid overflow. - - // Convert seconds to uint32 through uint64 for well-defined cast. - // Wrap around (will happen in 2036) is expected for ntp time. - uint32_t ntp_seconds = - static_cast(time_ntp_us / rtc::kNumMicrosecsPerSec); - - // Scale fractions of the second to ntp resolution. - constexpr int64_t kNtpInSecond = 1LL << 32; - int64_t us_fractions = time_ntp_us % rtc::kNumMicrosecsPerSec; - uint32_t ntp_fractions = - us_fractions * kNtpInSecond / rtc::kNumMicrosecsPerSec; - return NtpTime(ntp_seconds, ntp_fractions); -} uint32_t SaturatedUsToCompactNtp(int64_t us) { constexpr uint32_t kMaxCompactNtp = 0xFFFFFFFF; diff --git a/modules/rtp_rtcp/source/time_util.h b/modules/rtp_rtcp/source/time_util.h index 94b914310c..c883e5ca38 100644 --- a/modules/rtp_rtcp/source/time_util.h +++ b/modules/rtp_rtcp/source/time_util.h @@ -17,20 +17,6 @@ namespace webrtc { -// Converts time obtained using rtc::TimeMicros to ntp format. -// TimeMicrosToNtp guarantees difference of the returned values matches -// difference of the passed values. -// As a result TimeMicrosToNtp(rtc::TimeMicros()) doesn't guarantee to match -// system time. -// However, TimeMicrosToNtp Guarantees that returned NtpTime will be offsetted -// from rtc::TimeMicros() by integral number of milliseconds. -// Use NtpOffsetMs() to get that offset value. -NtpTime TimeMicrosToNtp(int64_t time_us); - -// Difference between Ntp time and local relative time returned by -// rtc::TimeMicros() -int64_t NtpOffsetMs(); - // Helper function for compact ntp representation: // RFC 3550, Section 4. Time Format. // Wallclock time is represented using the timestamp format of diff --git a/modules/rtp_rtcp/source/time_util_unittest.cc b/modules/rtp_rtcp/source/time_util_unittest.cc index 4b469bb956..6ff55dda55 100644 --- a/modules/rtp_rtcp/source/time_util_unittest.cc +++ b/modules/rtp_rtcp/source/time_util_unittest.cc @@ -9,34 +9,10 @@ */ #include "modules/rtp_rtcp/source/time_util.h" -#include "rtc_base/fake_clock.h" -#include "rtc_base/time_utils.h" -#include "system_wrappers/include/clock.h" #include "test/gtest.h" namespace webrtc { -TEST(TimeUtilTest, TimeMicrosToNtpDoesntChangeBetweenRuns) { - rtc::ScopedFakeClock clock; - // TimeMicrosToNtp is not pure: it behave differently between different - // execution of the program, but should behave same during same execution. - const int64_t time_us = 12345; - clock.SetTime(Timestamp::Micros(2)); - NtpTime time_ntp = TimeMicrosToNtp(time_us); - clock.SetTime(Timestamp::Micros(time_us)); - EXPECT_EQ(TimeMicrosToNtp(time_us), time_ntp); - clock.SetTime(Timestamp::Micros(1000000)); - EXPECT_EQ(TimeMicrosToNtp(time_us), time_ntp); -} - -TEST(TimeUtilTest, TimeMicrosToNtpKeepsIntervals) { - rtc::ScopedFakeClock clock; - NtpTime time_ntp1 = TimeMicrosToNtp(rtc::TimeMicros()); - clock.AdvanceTime(TimeDelta::Millis(20)); - NtpTime time_ntp2 = TimeMicrosToNtp(rtc::TimeMicros()); - EXPECT_EQ(time_ntp2.ToMs() - time_ntp1.ToMs(), 20); -} - TEST(TimeUtilTest, CompactNtp) { const uint32_t kNtpSec = 0x12345678; const uint32_t kNtpFrac = 0x23456789; diff --git a/modules/rtp_rtcp/source/ulpfec_generator.cc b/modules/rtp_rtcp/source/ulpfec_generator.cc index 265fa4d1ac..4873693164 100644 --- a/modules/rtp_rtcp/source/ulpfec_generator.cc +++ b/modules/rtp_rtcp/source/ulpfec_generator.cc @@ -22,7 +22,7 @@ #include "modules/rtp_rtcp/source/forward_error_correction_internal.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -77,7 +77,7 @@ UlpfecGenerator::UlpfecGenerator(int red_payload_type, fec_(ForwardErrorCorrection::CreateUlpfec(kUnknownSsrc)), num_protected_frames_(0), min_num_media_packets_(1), - keyframe_in_process_(false), + media_contains_keyframe_(false), fec_bitrate_(/*max_window_size_ms=*/1000, RateStatistics::kBpsScale) {} // Used by FlexFecSender, payload types are unused. @@ -89,7 +89,7 @@ UlpfecGenerator::UlpfecGenerator(std::unique_ptr fec, fec_(std::move(fec)), num_protected_frames_(0), min_num_media_packets_(1), - keyframe_in_process_(false), + media_contains_keyframe_(false), fec_bitrate_(/*max_window_size_ms=*/1000, RateStatistics::kBpsScale) {} UlpfecGenerator::~UlpfecGenerator() = default; @@ -103,7 +103,7 @@ void UlpfecGenerator::SetProtectionParameters( RTC_DCHECK_LE(key_params.fec_rate, 255); // Store the new params and apply them for the next set of FEC packets being // produced. - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); pending_params_.emplace(delta_params, key_params); } @@ -111,8 +111,8 @@ void UlpfecGenerator::AddPacketAndGenerateFec(const RtpPacketToSend& packet) { RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); RTC_DCHECK(generated_fec_packets_.empty()); - if (media_packets_.empty()) { - rtc::CritScope cs(&crit_); + { + MutexLock lock(&mutex_); if (pending_params_) { current_params_ = *pending_params_; pending_params_.reset(); @@ -123,13 +123,12 @@ void UlpfecGenerator::AddPacketAndGenerateFec(const RtpPacketToSend& packet) { min_num_media_packets_ = 1; } } - - keyframe_in_process_ = packet.is_key_frame(); } - RTC_DCHECK_EQ(packet.is_key_frame(), keyframe_in_process_); - bool complete_frame = false; - const bool marker_bit = packet.Marker(); + if (packet.is_key_frame()) { + media_contains_keyframe_ = true; + } + const bool complete_frame = packet.Marker(); if (media_packets_.size() < kUlpfecMaxMediaPackets) { // Our packet masks can only protect up to |kUlpfecMaxMediaPackets| packets. auto fec_packet = std::make_unique(); @@ -142,9 +141,8 @@ void UlpfecGenerator::AddPacketAndGenerateFec(const RtpPacketToSend& packet) { last_media_packet_ = packet; } - if (marker_bit) { + if (complete_frame) { ++num_protected_frames_; - complete_frame = true; } auto params = CurrentParams(); @@ -154,7 +152,7 @@ void UlpfecGenerator::AddPacketAndGenerateFec(const RtpPacketToSend& packet) { // less than |kMaxExcessOverhead|, and // (2) at least |min_num_media_packets_| media packets is reached. if (complete_frame && - (num_protected_frames_ == params.max_fec_frames || + (num_protected_frames_ >= params.max_fec_frames || (ExcessOverheadBelowMax() && MinimumMediaPacketsReached()))) { // We are not using Unequal Protection feature of the parity erasure code. constexpr int kNumImportantPackets = 0; @@ -190,8 +188,8 @@ bool UlpfecGenerator::MinimumMediaPacketsReached() const { const FecProtectionParams& UlpfecGenerator::CurrentParams() const { RTC_DCHECK_RUNS_SERIALIZED(&race_checker_); - return keyframe_in_process_ ? current_params_.keyframe_params - : current_params_.delta_params; + return media_contains_keyframe_ ? current_params_.keyframe_params + : current_params_.delta_params; } size_t UlpfecGenerator::MaxPacketOverhead() const { @@ -230,19 +228,21 @@ std::vector> UlpfecGenerator::GetFecPackets() { total_fec_size_bytes += red_packet->size(); red_packet->set_packet_type(RtpPacketMediaType::kForwardErrorCorrection); red_packet->set_allow_retransmission(false); + red_packet->set_is_red(true); + red_packet->set_fec_protect_packet(false); fec_packets.push_back(std::move(red_packet)); } ResetState(); - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); fec_bitrate_.Update(total_fec_size_bytes, clock_->TimeInMilliseconds()); return fec_packets; } DataRate UlpfecGenerator::CurrentFecRate() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return DataRate::BitsPerSec( fec_bitrate_.Rate(clock_->TimeInMilliseconds()).value_or(0)); } @@ -263,6 +263,7 @@ void UlpfecGenerator::ResetState() { last_media_packet_.reset(); generated_fec_packets_.clear(); num_protected_frames_ = 0; + media_contains_keyframe_ = false; } } // namespace webrtc diff --git a/modules/rtp_rtcp/source/ulpfec_generator.h b/modules/rtp_rtcp/source/ulpfec_generator.h index be59e4c9ea..934a1d5c38 100644 --- a/modules/rtp_rtcp/source/ulpfec_generator.h +++ b/modules/rtp_rtcp/source/ulpfec_generator.h @@ -21,9 +21,9 @@ #include "modules/include/module_fec_types.h" #include "modules/rtp_rtcp/source/forward_error_correction.h" #include "modules/rtp_rtcp/source/video_fec_generator.h" -#include "rtc_base/critical_section.h" #include "rtc_base/race_checker.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -59,6 +59,9 @@ class UlpfecGenerator : public VideoFecGenerator { absl::optional GetRtpState() override { return absl::nullopt; } + // Currently used protection params. + const FecProtectionParams& CurrentParams() const; + private: struct Params { Params(); @@ -90,8 +93,6 @@ class UlpfecGenerator : public VideoFecGenerator { // (e.g. (2k,2m) vs (k,m)) are generally more effective at recovering losses. bool MinimumMediaPacketsReached() const; - const FecProtectionParams& CurrentParams() const; - void ResetState(); const int red_payload_type_; @@ -110,11 +111,11 @@ class UlpfecGenerator : public VideoFecGenerator { int num_protected_frames_ RTC_GUARDED_BY(race_checker_); int min_num_media_packets_ RTC_GUARDED_BY(race_checker_); Params current_params_ RTC_GUARDED_BY(race_checker_); - bool keyframe_in_process_ RTC_GUARDED_BY(race_checker_); + bool media_contains_keyframe_ RTC_GUARDED_BY(race_checker_); - rtc::CriticalSection crit_; - absl::optional pending_params_ RTC_GUARDED_BY(crit_); - RateStatistics fec_bitrate_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + absl::optional pending_params_ RTC_GUARDED_BY(mutex_); + RateStatistics fec_bitrate_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc b/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc index db005ddb49..c07e81d4fc 100644 --- a/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc +++ b/modules/rtp_rtcp/source/ulpfec_generator_unittest.cc @@ -217,4 +217,57 @@ TEST_F(UlpfecGeneratorTest, MixedMediaRtpHeaderLengths) { } } +TEST_F(UlpfecGeneratorTest, UpdatesProtectionParameters) { + const FecProtectionParams kKeyFrameParams = {25, /*max_fec_frames=*/2, + kFecMaskRandom}; + const FecProtectionParams kDeltaFrameParams = {25, /*max_fec_frames=*/5, + kFecMaskRandom}; + + ulpfec_generator_.SetProtectionParameters(kDeltaFrameParams, kKeyFrameParams); + + // No params applied yet. + EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 0); + + // Helper function to add a single-packet frame market as either key-frame + // or delta-frame. + auto add_frame = [&](bool is_keyframe) { + packet_generator_.NewFrame(1); + std::unique_ptr packet = + packet_generator_.NextPacket(0, 10); + RtpPacketToSend rtp_packet(nullptr); + EXPECT_TRUE(rtp_packet.Parse(packet->data.data(), packet->data.size())); + rtp_packet.set_is_key_frame(is_keyframe); + ulpfec_generator_.AddPacketAndGenerateFec(rtp_packet); + }; + + // Add key-frame, keyframe params should apply, no FEC generated yet. + add_frame(true); + EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 2); + EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty()); + + // Add delta-frame, generated FEC packet. Params will not be updated until + // next added packet though. + add_frame(false); + EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 2); + EXPECT_FALSE(ulpfec_generator_.GetFecPackets().empty()); + + // Add delta-frame, now params get updated. + add_frame(false); + EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 5); + EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty()); + + // Add yet another delta-frame. + add_frame(false); + EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 5); + EXPECT_TRUE(ulpfec_generator_.GetFecPackets().empty()); + + // Add key-frame, params immediately switch to key-frame ones. The two + // buffered frames plus the key-frame is protected and fec emitted, + // even though the frame count is technically over the keyframe frame count + // threshold. + add_frame(true); + EXPECT_EQ(ulpfec_generator_.CurrentParams().max_fec_frames, 2); + EXPECT_FALSE(ulpfec_generator_.GetFecPackets().empty()); +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc b/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc index 261c8f739b..49f483dad6 100644 --- a/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc +++ b/modules/rtp_rtcp/source/ulpfec_header_reader_writer.cc @@ -24,6 +24,11 @@ namespace { // Maximum number of media packets that can be protected in one batch. constexpr size_t kMaxMediaPackets = 48; +// Maximum number of media packets tracked by FEC decoder. +// Maintain a sufficiently larger tracking window than |kMaxMediaPackets| +// to account for packet reordering in pacer/ network. +constexpr size_t kMaxTrackedMediaPackets = 4 * kMaxMediaPackets; + // Maximum number of FEC packets stored inside ForwardErrorCorrection. constexpr size_t kMaxFecPackets = kMaxMediaPackets; @@ -51,13 +56,13 @@ size_t UlpfecHeaderSize(size_t packet_mask_size) { } // namespace UlpfecHeaderReader::UlpfecHeaderReader() - : FecHeaderReader(kMaxMediaPackets, kMaxFecPackets) {} + : FecHeaderReader(kMaxTrackedMediaPackets, kMaxFecPackets) {} UlpfecHeaderReader::~UlpfecHeaderReader() = default; bool UlpfecHeaderReader::ReadFecHeader( ForwardErrorCorrection::ReceivedFecPacket* fec_packet) const { - uint8_t* data = fec_packet->pkt->data.data(); + uint8_t* data = fec_packet->pkt->data.MutableData(); if (fec_packet->pkt->data.size() < kPacketMaskOffset) { return false; // Truncated packet. } @@ -108,7 +113,7 @@ void UlpfecHeaderWriter::FinalizeFecHeader( const uint8_t* packet_mask, size_t packet_mask_size, ForwardErrorCorrection::Packet* fec_packet) const { - uint8_t* data = fec_packet->data.data(); + uint8_t* data = fec_packet->data.MutableData(); // Set E bit to zero. data[0] &= 0x7f; // Set L bit based on packet mask size. (Note that the packet mask diff --git a/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc b/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc index 19da2c87c0..a190a548e4 100644 --- a/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc +++ b/modules/rtp_rtcp/source/ulpfec_header_reader_writer_unittest.cc @@ -53,8 +53,9 @@ std::unique_ptr WriteHeader(const uint8_t* packet_mask, UlpfecHeaderWriter writer; std::unique_ptr written_packet(new Packet()); written_packet->data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet->data.MutableData(); for (size_t i = 0; i < written_packet->data.size(); ++i) { - written_packet->data[i] = i; // Actual content doesn't matter. + data[i] = i; // Actual content doesn't matter. } writer.FinalizeFecHeader(kMediaSsrc, kMediaStartSeqNum, packet_mask, packet_mask_size, written_packet.get()); @@ -85,7 +86,8 @@ void VerifyHeaders(size_t expected_fec_header_size, EXPECT_EQ(written_packet.data.size() - expected_fec_header_size, read_packet.protection_length); EXPECT_EQ(0, memcmp(expected_packet_mask, - &read_packet.pkt->data[read_packet.packet_mask_offset], + read_packet.pkt->data.MutableData() + + read_packet.packet_mask_offset, read_packet.packet_mask_size)); // Verify that the call to ReadFecHeader did not tamper with the payload. EXPECT_EQ(0, memcmp(written_packet.data.data() + expected_fec_header_size, @@ -147,8 +149,9 @@ TEST(UlpfecHeaderWriterTest, FinalizesSmallHeader) { auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd); Packet written_packet; written_packet.data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet.data.MutableData(); for (size_t i = 0; i < written_packet.data.size(); ++i) { - written_packet.data[i] = i; + data[i] = i; } UlpfecHeaderWriter writer; @@ -171,8 +174,9 @@ TEST(UlpfecHeaderWriterTest, FinalizesLargeHeader) { auto packet_mask = GeneratePacketMask(packet_mask_size, 0xabcd); Packet written_packet; written_packet.data.SetSize(kMediaPacketLength); + uint8_t* data = written_packet.data.MutableData(); for (size_t i = 0; i < written_packet.data.size(); ++i) { - written_packet.data[i] = i; + data[i] = i; } UlpfecHeaderWriter writer; diff --git a/modules/rtp_rtcp/source/ulpfec_receiver_impl.cc b/modules/rtp_rtcp/source/ulpfec_receiver_impl.cc index 4395d8ea6b..fdfa475186 100644 --- a/modules/rtp_rtcp/source/ulpfec_receiver_impl.cc +++ b/modules/rtp_rtcp/source/ulpfec_receiver_impl.cc @@ -37,12 +37,13 @@ UlpfecReceiverImpl::UlpfecReceiverImpl( fec_(ForwardErrorCorrection::CreateUlpfec(ssrc_)) {} UlpfecReceiverImpl::~UlpfecReceiverImpl() { + RTC_DCHECK_RUN_ON(&sequence_checker_); received_packets_.clear(); fec_->ResetState(&recovered_packets_); } FecPacketCounter UlpfecReceiverImpl::GetPacketCounter() const { - rtc::CritScope cs(&crit_sect_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return packet_counter_; } @@ -77,6 +78,10 @@ FecPacketCounter UlpfecReceiverImpl::GetPacketCounter() const { bool UlpfecReceiverImpl::AddReceivedRedPacket( const RtpPacketReceived& rtp_packet, uint8_t ulpfec_payload_type) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + // TODO(bugs.webrtc.org/11993): We get here via Call::DeliverRtp, so should be + // moved to the network thread. + if (rtp_packet.Ssrc() != ssrc_) { RTC_LOG(LS_WARNING) << "Received RED packet with different SSRC than expected; dropping."; @@ -87,7 +92,6 @@ bool UlpfecReceiverImpl::AddReceivedRedPacket( "packet size; dropping."; return false; } - rtc::CritScope cs(&crit_sect_); static constexpr uint8_t kRedHeaderLength = 1; @@ -128,18 +132,19 @@ bool UlpfecReceiverImpl::AddReceivedRedPacket( rtp_packet.Buffer().Slice(rtp_packet.headers_size() + kRedHeaderLength, rtp_packet.payload_size() - kRedHeaderLength); } else { - auto red_payload = rtp_packet.payload().subview(kRedHeaderLength); - received_packet->pkt->data.EnsureCapacity(rtp_packet.headers_size() + - red_payload.size()); + received_packet->pkt->data.EnsureCapacity(rtp_packet.size() - + kRedHeaderLength); // Copy RTP header. received_packet->pkt->data.SetData(rtp_packet.data(), rtp_packet.headers_size()); // Set payload type. - received_packet->pkt->data[1] &= 0x80; // Reset RED payload type. - received_packet->pkt->data[1] += payload_type; // Set media payload type. - // Copy payload data. - received_packet->pkt->data.AppendData(red_payload.data(), - red_payload.size()); + uint8_t& payload_type_byte = received_packet->pkt->data.MutableData()[1]; + payload_type_byte &= 0x80; // Reset RED payload type. + payload_type_byte += payload_type; // Set media payload type. + // Copy payload and padding data, after the RED header. + received_packet->pkt->data.AppendData( + rtp_packet.data() + rtp_packet.headers_size() + kRedHeaderLength, + rtp_packet.size() - rtp_packet.headers_size() - kRedHeaderLength); } if (received_packet->pkt->data.size() > 0) { @@ -150,7 +155,7 @@ bool UlpfecReceiverImpl::AddReceivedRedPacket( // TODO(nisse): Drop always-zero return value. int32_t UlpfecReceiverImpl::ProcessReceivedFec() { - crit_sect_.Enter(); + RTC_DCHECK_RUN_ON(&sequence_checker_); // If we iterate over |received_packets_| and it contains a packet that cause // us to recurse back to this function (for example a RED packet encapsulating @@ -167,10 +172,8 @@ int32_t UlpfecReceiverImpl::ProcessReceivedFec() { // Send received media packet to VCM. if (!received_packet->is_fec) { ForwardErrorCorrection::Packet* packet = received_packet->pkt; - crit_sect_.Leave(); recovered_packet_callback_->OnRecoveredPacket(packet->data.data(), packet->data.size()); - crit_sect_.Enter(); // Create a packet with the buffer to modify it. RtpPacketReceived rtp_packet; const uint8_t* const original_data = packet->data.cdata(); @@ -207,13 +210,10 @@ int32_t UlpfecReceiverImpl::ProcessReceivedFec() { // Set this flag first; in case the recovered packet carries a RED // header, OnRecoveredPacket will recurse back here. recovered_packet->returned = true; - crit_sect_.Leave(); recovered_packet_callback_->OnRecoveredPacket(packet->data.data(), packet->data.size()); - crit_sect_.Enter(); } - crit_sect_.Leave(); return 0; } diff --git a/modules/rtp_rtcp/source/ulpfec_receiver_impl.h b/modules/rtp_rtcp/source/ulpfec_receiver_impl.h index 9e4e5b8f0b..f59251f848 100644 --- a/modules/rtp_rtcp/source/ulpfec_receiver_impl.h +++ b/modules/rtp_rtcp/source/ulpfec_receiver_impl.h @@ -17,12 +17,13 @@ #include #include +#include "api/sequence_checker.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/include/ulpfec_receiver.h" #include "modules/rtp_rtcp/source/forward_error_correction.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { @@ -44,17 +45,18 @@ class UlpfecReceiverImpl : public UlpfecReceiver { const uint32_t ssrc_; const RtpHeaderExtensionMap extensions_; - rtc::CriticalSection crit_sect_; - RecoveredPacketReceiver* recovered_packet_callback_; - std::unique_ptr fec_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + RecoveredPacketReceiver* const recovered_packet_callback_; + const std::unique_ptr fec_; // TODO(nisse): The AddReceivedRedPacket method adds one or two packets to // this list at a time, after which it is emptied by ProcessReceivedFec. It // will make things simpler to merge AddReceivedRedPacket and // ProcessReceivedFec into a single method, and we can then delete this list. std::vector> - received_packets_; - ForwardErrorCorrection::RecoveredPacketList recovered_packets_; - FecPacketCounter packet_counter_; + received_packets_ RTC_GUARDED_BY(&sequence_checker_); + ForwardErrorCorrection::RecoveredPacketList recovered_packets_ + RTC_GUARDED_BY(&sequence_checker_); + FecPacketCounter packet_counter_ RTC_GUARDED_BY(&sequence_checker_); }; } // namespace webrtc diff --git a/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc b/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc index 4d6aa3d2c9..53d363de67 100644 --- a/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc +++ b/modules/rtp_rtcp/source/ulpfec_receiver_unittest.cc @@ -158,7 +158,7 @@ void UlpfecReceiverTest::InjectGarbagePacketLength(size_t fec_garbage_offset) { std::list fec_packets; EncodeFec(media_packets, kNumFecPackets, &fec_packets); ByteWriter::WriteBigEndian( - &fec_packets.front()->data[fec_garbage_offset], 0x4711); + fec_packets.front()->data.MutableData() + fec_garbage_offset, 0x4711); // Inject first media packet, then first FEC packet, skipping the second media // packet to cause a recovery from the FEC packet. @@ -392,7 +392,7 @@ TEST_F(UlpfecReceiverTest, PacketNotDroppedTooEarly) { delayed_fec = fec_packets.front(); // Fill the FEC decoder. No packets should be dropped. - const size_t kNumMediaPacketsBatch2 = 46; + const size_t kNumMediaPacketsBatch2 = 191; std::list augmented_media_packets_batch2; ForwardErrorCorrection::PacketList media_packets_batch2; for (size_t i = 0; i < kNumMediaPacketsBatch2; ++i) { @@ -431,7 +431,7 @@ TEST_F(UlpfecReceiverTest, PacketDroppedWhenTooOld) { delayed_fec = fec_packets.front(); // Fill the FEC decoder and force the last packet to be dropped. - const size_t kNumMediaPacketsBatch2 = 48; + const size_t kNumMediaPacketsBatch2 = 192; std::list augmented_media_packets_batch2; ForwardErrorCorrection::PacketList media_packets_batch2; for (size_t i = 0; i < kNumMediaPacketsBatch2; ++i) { @@ -512,4 +512,31 @@ TEST_F(UlpfecReceiverTest, TruncatedPacketWithoutDataPastFirstBlock) { SurvivesMaliciousPacket(kPacket, sizeof(kPacket), 100); } +TEST_F(UlpfecReceiverTest, MediaWithPadding) { + const size_t kNumFecPackets = 1; + std::list augmented_media_packets; + ForwardErrorCorrection::PacketList media_packets; + PacketizeFrame(2, 0, &augmented_media_packets, &media_packets); + + // Append four bytes of padding to the first media packet. + const uint8_t kPadding[] = {0, 0, 0, 4}; + augmented_media_packets.front()->data.AppendData(kPadding); + augmented_media_packets.front()->data.MutableData()[0] |= 1 << 5; // P bit. + augmented_media_packets.front()->header.paddingLength = 4; + + std::list fec_packets; + EncodeFec(media_packets, kNumFecPackets, &fec_packets); + + auto it = augmented_media_packets.begin(); + BuildAndAddRedMediaPacket(augmented_media_packets.front()); + + VerifyReconstructedMediaPacket(**it, 1); + EXPECT_EQ(0, receiver_fec_->ProcessReceivedFec()); + + BuildAndAddRedFecPacket(fec_packets.front()); + ++it; + VerifyReconstructedMediaPacket(**it, 1); + EXPECT_EQ(0, receiver_fec_->ProcessReceivedFec()); +} + } // namespace webrtc diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc b/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc index 13788025c8..e87be031a8 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_h264.cc @@ -264,7 +264,7 @@ absl::optional ParseFuaNalu( uint8_t original_nal_header = fnri | original_nal_type; rtp_payload = rtp_payload.Slice(kNalHeaderSize, rtp_payload.size() - kNalHeaderSize); - rtp_payload[0] = original_nal_header; + rtp_payload.MutableData()[0] = original_nal_header; parsed_payload->video_payload = std::move(rtp_payload); } else { parsed_payload->video_payload = diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc b/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc index d7e6147fd6..d335af0244 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_h264_unittest.cc @@ -16,7 +16,6 @@ #include "absl/types/optional.h" #include "api/array_view.h" #include "common_video/h264/h264_common.h" -#include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h" #include "modules/rtp_rtcp/source/byte_io.h" #include "rtc_base/copy_on_write_buffer.h" diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h index a7573993f7..3d7cb3291d 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp8.h @@ -25,7 +25,7 @@ class VideoRtpDepacketizerVp8 : public VideoRtpDepacketizer { public: VideoRtpDepacketizerVp8() = default; VideoRtpDepacketizerVp8(const VideoRtpDepacketizerVp8&) = delete; - VideoRtpDepacketizerVp8& operator=(VideoRtpDepacketizerVp8&) = delete; + VideoRtpDepacketizerVp8& operator=(const VideoRtpDepacketizerVp8&) = delete; ~VideoRtpDepacketizerVp8() override = default; // Parses vp8 rtp payload descriptor. diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc index a719d7ab12..be05009807 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.cc @@ -40,12 +40,12 @@ constexpr int kFailedToParse = 0; bool ParsePictureId(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { uint32_t picture_id; uint32_t m_bit; - RETURN_FALSE_ON_ERROR(parser->ReadBits(&m_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, m_bit)); if (m_bit) { - RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 15)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(15, picture_id)); vp9->max_picture_id = kMaxTwoBytePictureId; } else { - RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 7)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(7, picture_id)); vp9->max_picture_id = kMaxOneBytePictureId; } vp9->picture_id = picture_id; @@ -60,10 +60,10 @@ bool ParsePictureId(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { // bool ParseLayerInfoCommon(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { uint32_t t, u_bit, s, d_bit; - RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&s, 3)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&d_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(3, t)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, u_bit)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(3, s)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, d_bit)); vp9->temporal_idx = t; vp9->temporal_up_switch = u_bit ? true : false; if (s >= kMaxSpatialLayers) @@ -84,7 +84,7 @@ bool ParseLayerInfoCommon(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { bool ParseLayerInfoNonFlexibleMode(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { uint8_t tl0picidx; - RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&tl0picidx)); + RETURN_FALSE_ON_ERROR(parser->ReadUInt8(tl0picidx)); vp9->tl0_pic_idx = tl0picidx; return true; } @@ -117,8 +117,8 @@ bool ParseRefIndices(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { return false; uint32_t p_diff; - RETURN_FALSE_ON_ERROR(parser->ReadBits(&p_diff, 7)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(7, p_diff)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, n_bit)); vp9->pid_diff[vp9->num_ref_pics] = p_diff; uint32_t scaled_pid = vp9->picture_id; @@ -154,9 +154,9 @@ bool ParseRefIndices(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { // bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { uint32_t n_s, y_bit, g_bit; - RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_s, 3)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&y_bit, 1)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&g_bit, 1)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(3, n_s)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, y_bit)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, g_bit)); RETURN_FALSE_ON_ERROR(parser->ConsumeBits(3)); vp9->num_spatial_layers = n_s + 1; vp9->spatial_layer_resolution_present = y_bit ? true : false; @@ -164,20 +164,20 @@ bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { if (y_bit) { for (size_t i = 0; i < vp9->num_spatial_layers; ++i) { - RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->width[i])); - RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->height[i])); + RETURN_FALSE_ON_ERROR(parser->ReadUInt16(vp9->width[i])); + RETURN_FALSE_ON_ERROR(parser->ReadUInt16(vp9->height[i])); } } if (g_bit) { uint8_t n_g; - RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&n_g)); + RETURN_FALSE_ON_ERROR(parser->ReadUInt8(n_g)); vp9->gof.num_frames_in_gof = n_g; } for (size_t i = 0; i < vp9->gof.num_frames_in_gof; ++i) { uint32_t t, u_bit, r; - RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1)); - RETURN_FALSE_ON_ERROR(parser->ReadBits(&r, 2)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(3, t)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(1, u_bit)); + RETURN_FALSE_ON_ERROR(parser->ReadBits(2, r)); RETURN_FALSE_ON_ERROR(parser->ConsumeBits(2)); vp9->gof.temporal_idx[i] = t; vp9->gof.temporal_up_switch[i] = u_bit ? true : false; @@ -185,7 +185,7 @@ bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) { for (uint8_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) { uint8_t p_diff; - RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&p_diff)); + RETURN_FALSE_ON_ERROR(parser->ReadUInt8(p_diff)); vp9->gof.pid_diff[i][p] = p_diff; } } @@ -214,7 +214,7 @@ int VideoRtpDepacketizerVp9::ParseRtpPayload( // Parse mandatory first byte of payload descriptor. rtc::BitBuffer parser(rtp_payload.data(), rtp_payload.size()); uint8_t first_byte; - if (!parser.ReadUInt8(&first_byte)) { + if (!parser.ReadUInt8(first_byte)) { RTC_LOG(LS_ERROR) << "Payload length is zero."; return kFailedToParse; } diff --git a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h index c622cbc75e..4bb358a15f 100644 --- a/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h +++ b/modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h @@ -25,7 +25,7 @@ class VideoRtpDepacketizerVp9 : public VideoRtpDepacketizer { public: VideoRtpDepacketizerVp9() = default; VideoRtpDepacketizerVp9(const VideoRtpDepacketizerVp9&) = delete; - VideoRtpDepacketizerVp9& operator=(VideoRtpDepacketizerVp9&) = delete; + VideoRtpDepacketizerVp9& operator=(const VideoRtpDepacketizerVp9&) = delete; ~VideoRtpDepacketizerVp9() override = default; // Parses vp9 rtp payload descriptor. diff --git a/modules/rtp_rtcp/test/testFec/test_fec.cc b/modules/rtp_rtcp/test/testFec/test_fec.cc index db5ff15166..5ac8feca21 100644 --- a/modules/rtp_rtcp/test/testFec/test_fec.cc +++ b/modules/rtp_rtcp/test/testFec/test_fec.cc @@ -254,7 +254,7 @@ void RunTest(bool use_flexfec) { random.Rand(kMinPacketSize, kMaxPacketSize); media_packet->data.SetSize(packet_length); - uint8_t* data = media_packet->data.data(); + uint8_t* data = media_packet->data.MutableData(); // Generate random values for the first 2 bytes. data[0] = random.Rand(); data[1] = random.Rand(); @@ -285,7 +285,7 @@ void RunTest(bool use_flexfec) { media_packet_list.push_back(std::move(media_packet)); seq_num++; } - media_packet_list.back()->data[1] |= 0x80; + media_packet_list.back()->data.MutableData()[1] |= 0x80; ASSERT_EQ(0, fec->EncodeFec(media_packet_list, protection_factor, num_imp_packets, kUseUnequalProtection, @@ -312,8 +312,8 @@ void RunTest(bool use_flexfec) { received_packet->pkt = new ForwardErrorCorrection::Packet(); received_packet->pkt->data = media_packet->data; received_packet->ssrc = media_ssrc; - received_packet->seq_num = - ByteReader::ReadBigEndian(&media_packet->data[2]); + received_packet->seq_num = ByteReader::ReadBigEndian( + media_packet->data.data() + 2); received_packet->is_fec = false; received_packet_list.push_back(std::move(received_packet)); } diff --git a/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc b/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc index 44597b85bb..dffdf2ebf6 100644 --- a/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc +++ b/modules/rtp_rtcp/test/testFec/test_packet_masks_metrics.cc @@ -225,7 +225,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } } // Check that we can only recover 1 packet. - assert(check_num_recovered == 1); + RTC_DCHECK_EQ(check_num_recovered, 1); // Update the state with the newly recovered media packet. state_tmp[jsel] = 0; } @@ -260,7 +260,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } } } else { // Gilbert-Elliot model for burst model. - assert(loss_model_[k].loss_type == kBurstyLossModel); + RTC_DCHECK_EQ(loss_model_[k].loss_type, kBurstyLossModel); // Transition probabilities: from previous to current state. // Prob. of previous = lost --> current = received. double prob10 = 1.0 / burst_length; @@ -425,8 +425,8 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } } } // Done with loop over total number of packets. - assert(num_media_packets_lost <= num_media_packets); - assert(num_packets_lost <= tot_num_packets && num_packets_lost > 0); + RTC_DCHECK_LE(num_media_packets_lost, num_media_packets); + RTC_DCHECK_LE(num_packets_lost, tot_num_packets && num_packets_lost > 0); double residual_loss = 0.0; // Only need to compute residual loss (number of recovered packets) for // configurations that have at least one media packet lost. @@ -445,7 +445,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { num_recovered_packets = num_media_packets_lost; } } - assert(num_recovered_packets <= num_media_packets); + RTC_DCHECK_LE(num_recovered_packets, num_media_packets); // Compute the residual loss. We only care about recovering media/source // packets, so residual loss is based on lost/recovered media packets. residual_loss = @@ -464,9 +464,9 @@ class FecPacketMaskMetricsTest : public ::testing::Test { // Update the distribution statistics. // Compute the gap of the loss (the "consecutiveness" of the loss). int gap_loss = GapLoss(tot_num_packets, state.get()); - assert(gap_loss < kMaxGapSize); + RTC_DCHECK_LT(gap_loss, kMaxGapSize); int index = gap_loss * (2 * kMaxMediaPacketsTest) + num_packets_lost; - assert(index < kNumStatesDistribution); + RTC_DCHECK_LT(index, kNumStatesDistribution); metrics_code.residual_loss_per_loss_gap[index] += residual_loss; if (code_type == xor_random_code) { // The configuration density is only a function of the code length and @@ -492,8 +492,8 @@ class FecPacketMaskMetricsTest : public ::testing::Test { metrics_code.variance_residual_loss[k] - (metrics_code.average_residual_loss[k] * metrics_code.average_residual_loss[k]); - assert(metrics_code.variance_residual_loss[k] >= 0.0); - assert(metrics_code.average_residual_loss[k] > 0.0); + RTC_DCHECK_GE(metrics_code.variance_residual_loss[k], 0.0); + RTC_DCHECK_GT(metrics_code.average_residual_loss[k], 0.0); metrics_code.variance_residual_loss[k] = std::sqrt(metrics_code.variance_residual_loss[k]) / metrics_code.average_residual_loss[k]; @@ -509,7 +509,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { } else if (code_type == xor_bursty_code) { CopyMetrics(&kMetricsXorBursty[code_index], metrics_code); } else { - assert(false); + RTC_NOTREACHED(); } } @@ -588,7 +588,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { num_loss_models++; } } - assert(num_loss_models == kNumLossModels); + RTC_DCHECK_EQ(num_loss_models, kNumLossModels); } void SetCodeParams() { @@ -738,7 +738,7 @@ class FecPacketMaskMetricsTest : public ::testing::Test { code_index++; } } - assert(code_index == kNumberCodes); + RTC_DCHECK_EQ(code_index, kNumberCodes); return 0; } diff --git a/modules/utility/BUILD.gn b/modules/utility/BUILD.gn index 8710ed4729..aca7b1efdd 100644 --- a/modules/utility/BUILD.gn +++ b/modules/utility/BUILD.gn @@ -26,11 +26,12 @@ rtc_library("utility") { } if (is_ios) { - libs = [ "AVFoundation.framework" ] + frameworks = [ "AVFoundation.framework" ] } deps = [ "..:module_api", + "../../api:sequence_checker", "../../api/task_queue", "../../common_audio", "../../rtc_base:checks", diff --git a/modules/utility/include/jvm_android.h b/modules/utility/include/jvm_android.h index 3caab87761..693ee519ed 100644 --- a/modules/utility/include/jvm_android.h +++ b/modules/utility/include/jvm_android.h @@ -16,8 +16,8 @@ #include #include +#include "api/sequence_checker.h" #include "modules/utility/include/helpers_android.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -34,7 +34,7 @@ class JvmThreadConnector { ~JvmThreadConnector(); private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; bool attached_; }; @@ -111,7 +111,7 @@ class JNIEnvironment { std::string JavaToStdString(const jstring& j_string); private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; JNIEnv* const jni_; }; @@ -184,7 +184,7 @@ class JVM { private: JNIEnv* jni() const { return GetEnv(jvm_); } - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; JavaVM* const jvm_; }; diff --git a/modules/utility/source/process_thread_impl.cc b/modules/utility/source/process_thread_impl.cc index 3709306925..73fc23400b 100644 --- a/modules/utility/source/process_thread_impl.cc +++ b/modules/utility/source/process_thread_impl.cc @@ -48,7 +48,6 @@ ProcessThreadImpl::ProcessThreadImpl(const char* thread_name) ProcessThreadImpl::~ProcessThreadImpl() { RTC_DCHECK(thread_checker_.IsCurrent()); - RTC_DCHECK(!thread_.get()); RTC_DCHECK(!stop_); while (!delayed_tasks_.empty()) { @@ -69,10 +68,11 @@ void ProcessThreadImpl::Delete() { delete this; } -void ProcessThreadImpl::Start() { +// Doesn't need locking, because the contending thread isn't running. +void ProcessThreadImpl::Start() RTC_NO_THREAD_SAFETY_ANALYSIS { RTC_DCHECK(thread_checker_.IsCurrent()); - RTC_DCHECK(!thread_.get()); - if (thread_.get()) + RTC_DCHECK(thread_.empty()); + if (!thread_.empty()) return; RTC_DCHECK(!stop_); @@ -80,47 +80,84 @@ void ProcessThreadImpl::Start() { for (ModuleCallback& m : modules_) m.module->ProcessThreadAttached(this); - thread_.reset( - new rtc::PlatformThread(&ProcessThreadImpl::Run, this, thread_name_)); - thread_->Start(); + thread_ = rtc::PlatformThread::SpawnJoinable( + [this] { + CurrentTaskQueueSetter set_current(this); + while (Process()) { + } + }, + thread_name_); } void ProcessThreadImpl::Stop() { RTC_DCHECK(thread_checker_.IsCurrent()); - if (!thread_.get()) + if (thread_.empty()) return; { - rtc::CritScope lock(&lock_); + // Need to take lock, for synchronization with `thread_`. + MutexLock lock(&mutex_); stop_ = true; } wake_up_.Set(); + thread_.Finalize(); + + StopNoLocks(); +} - thread_->Stop(); +// No locking needed, since this is called after the contending thread is +// stopped. +void ProcessThreadImpl::StopNoLocks() RTC_NO_THREAD_SAFETY_ANALYSIS { + RTC_DCHECK(thread_.empty()); stop_ = false; - thread_.reset(); for (ModuleCallback& m : modules_) m.module->ProcessThreadAttached(nullptr); } void ProcessThreadImpl::WakeUp(Module* module) { // Allowed to be called on any thread. - { - rtc::CritScope lock(&lock_); - for (ModuleCallback& m : modules_) { - if (m.module == module) - m.next_callback = kCallProcessImmediately; + auto holds_mutex = [this] { + if (!IsCurrent()) { + return false; } + RTC_DCHECK_RUN_ON(this); + return holds_mutex_; + }; + if (holds_mutex()) { + // Avoid locking if called on the ProcessThread, via a module's Process), + WakeUpNoLocks(module); + } else { + MutexLock lock(&mutex_); + WakeUpInternal(module); } wake_up_.Set(); } +// Must be called only indirectly from Process, which already holds the lock. +void ProcessThreadImpl::WakeUpNoLocks(Module* module) + RTC_NO_THREAD_SAFETY_ANALYSIS { + RTC_DCHECK_RUN_ON(this); + WakeUpInternal(module); +} + +void ProcessThreadImpl::WakeUpInternal(Module* module) { + for (ModuleCallback& m : modules_) { + if (m.module == module) + m.next_callback = kCallProcessImmediately; + } +} + void ProcessThreadImpl::PostTask(std::unique_ptr task) { - // Allowed to be called on any thread. + // Allowed to be called on any thread, except from a module's Process method. + if (IsCurrent()) { + RTC_DCHECK_RUN_ON(this); + RTC_DCHECK(!holds_mutex_) << "Calling ProcessThread::PostTask from " + "Module::Process is not supported"; + } { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); queue_.push(task.release()); } wake_up_.Set(); @@ -131,7 +168,7 @@ void ProcessThreadImpl::PostDelayedTask(std::unique_ptr task, int64_t run_at_ms = rtc::TimeMillis() + milliseconds; bool recalculate_wakeup_time; { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); recalculate_wakeup_time = delayed_tasks_.empty() || run_at_ms < delayed_tasks_.top().run_at_ms; delayed_tasks_.emplace(run_at_ms, std::move(task)); @@ -143,13 +180,14 @@ void ProcessThreadImpl::PostDelayedTask(std::unique_ptr task, void ProcessThreadImpl::RegisterModule(Module* module, const rtc::Location& from) { + TRACE_EVENT0("webrtc", "ProcessThreadImpl::RegisterModule"); RTC_DCHECK(thread_checker_.IsCurrent()); RTC_DCHECK(module) << from.ToString(); #if RTC_DCHECK_IS_ON { // Catch programmer error. - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); for (const ModuleCallback& mc : modules_) { RTC_DCHECK(mc.module != module) << "Already registered here: " << mc.location.ToString() @@ -163,11 +201,11 @@ void ProcessThreadImpl::RegisterModule(Module* module, // Now that we know the module isn't in the list, we'll call out to notify // the module that it's attached to the worker thread. We don't hold // the lock while we make this call. - if (thread_.get()) + if (!thread_.empty()) module->ProcessThreadAttached(this); { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); modules_.push_back(ModuleCallback(module, from)); } @@ -182,7 +220,7 @@ void ProcessThreadImpl::DeRegisterModule(Module* module) { RTC_DCHECK(module); { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); modules_.remove_if( [&module](const ModuleCallback& m) { return m.module == module; }); } @@ -191,21 +229,13 @@ void ProcessThreadImpl::DeRegisterModule(Module* module) { module->ProcessThreadAttached(nullptr); } -// static -void ProcessThreadImpl::Run(void* obj) { - ProcessThreadImpl* impl = static_cast(obj); - CurrentTaskQueueSetter set_current(impl); - while (impl->Process()) { - } -} - bool ProcessThreadImpl::Process() { TRACE_EVENT1("webrtc", "ProcessThreadImpl", "name", thread_name_); int64_t now = rtc::TimeMillis(); int64_t next_checkpoint = now + (1000 * 60); - + RTC_DCHECK_RUN_ON(this); { - rtc::CritScope lock(&lock_); + MutexLock lock(&mutex_); if (stop_) return false; for (ModuleCallback& m : modules_) { @@ -216,6 +246,8 @@ bool ProcessThreadImpl::Process() { if (m.next_callback == 0) m.next_callback = GetNextCallbackTime(m.module, now); + // Set to true for the duration of the calls to modules' Process(). + holds_mutex_ = true; if (m.next_callback <= now || m.next_callback == kCallProcessImmediately) { { @@ -230,6 +262,7 @@ bool ProcessThreadImpl::Process() { int64_t new_now = rtc::TimeMillis(); m.next_callback = GetNextCallbackTime(m.module, new_now); } + holds_mutex_ = false; if (m.next_callback < next_checkpoint) next_checkpoint = m.next_callback; @@ -248,11 +281,11 @@ bool ProcessThreadImpl::Process() { while (!queue_.empty()) { QueuedTask* task = queue_.front(); queue_.pop(); - lock_.Leave(); + mutex_.Unlock(); if (task->Run()) { delete task; } - lock_.Enter(); + mutex_.Lock(); } } diff --git a/modules/utility/source/process_thread_impl.h b/modules/utility/source/process_thread_impl.h index 24a72d91f5..5d22e37ca1 100644 --- a/modules/utility/source/process_thread_impl.h +++ b/modules/utility/source/process_thread_impl.h @@ -17,14 +17,13 @@ #include #include +#include "api/sequence_checker.h" #include "api/task_queue/queued_task.h" #include "modules/include/module.h" #include "modules/utility/include/process_thread.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/location.h" #include "rtc_base/platform_thread.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -45,7 +44,6 @@ class ProcessThreadImpl : public ProcessThread { void DeRegisterModule(Module* module) override; protected: - static void Run(void* obj); bool Process(); private: @@ -85,24 +83,32 @@ class ProcessThreadImpl : public ProcessThread { typedef std::list ModuleList; void Delete() override; + // The part of Stop processing that doesn't need any locking. + void StopNoLocks(); + void WakeUpNoLocks(Module* module); + void WakeUpInternal(Module* module) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // Warning: For some reason, if |lock_| comes immediately before |modules_| - // with the current class layout, we will start to have mysterious crashes - // on Mac 10.9 debug. I (Tommi) suspect we're hitting some obscure alignemnt - // issues, but I haven't figured out what they are, if there are alignment - // requirements for mutexes on Mac or if there's something else to it. - // So be careful with changing the layout. - rtc::CriticalSection lock_; // Used to guard modules_, tasks_ and stop_. + // Members protected by this mutex are accessed on the constructor thread and + // on the spawned process thread, and locking is needed only while the process + // thread is running. + Mutex mutex_; - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; rtc::Event wake_up_; - // TODO(pbos): Remove unique_ptr and stop recreating the thread. - std::unique_ptr thread_; + rtc::PlatformThread thread_; - ModuleList modules_; + ModuleList modules_ RTC_GUARDED_BY(mutex_); + // Set to true when calling Process, to allow reentrant calls to WakeUp. + bool holds_mutex_ RTC_GUARDED_BY(this) = false; std::queue queue_; - std::priority_queue delayed_tasks_ RTC_GUARDED_BY(lock_); - bool stop_; + std::priority_queue delayed_tasks_ RTC_GUARDED_BY(mutex_); + // The `stop_` flag is modified only by the construction thread, protected by + // `thread_checker_`. It is read also by the spawned `thread_`. The latter + // thread must take `mutex_` before access, and for thread safety, the + // constructor thread needs to take `mutex_` when it modifies `stop_` and + // `thread_` is running. Annotations like RTC_GUARDED_BY doesn't support this + // usage pattern. + bool stop_ RTC_GUARDED_BY(mutex_); const char* thread_name_; }; diff --git a/modules/utility/source/process_thread_impl_unittest.cc b/modules/utility/source/process_thread_impl_unittest.cc index 6f765369f4..1fef0b6740 100644 --- a/modules/utility/source/process_thread_impl_unittest.cc +++ b/modules/utility/source/process_thread_impl_unittest.cc @@ -37,9 +37,9 @@ static const int kEventWaitTimeout = 500; class MockModule : public Module { public: - MOCK_METHOD0(TimeUntilNextProcess, int64_t()); - MOCK_METHOD0(Process, void()); - MOCK_METHOD1(ProcessThreadAttached, void(ProcessThread*)); + MOCK_METHOD(int64_t, TimeUntilNextProcess, (), (override)); + MOCK_METHOD(void, Process, (), (override)); + MOCK_METHOD(void, ProcessThreadAttached, (ProcessThread*), (override)); }; class RaiseEventTask : public QueuedTask { diff --git a/modules/video_capture/BUILD.gn b/modules/video_capture/BUILD.gn index 1c02412264..3a5052dc7a 100644 --- a/modules/video_capture/BUILD.gn +++ b/modules/video_capture/BUILD.gn @@ -27,20 +27,18 @@ rtc_library("video_capture_module") { ] deps = [ - "..:module_api", "../../api:scoped_refptr", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../common_video", "../../media:rtc_media_base", "../../rtc_base:rtc_base_approved", "../../rtc_base:stringutils", - "../../rtc_base/synchronization:rw_lock_wrapper", + "../../rtc_base/synchronization:mutex", "../../system_wrappers", - "//third_party/abseil-cpp/absl/strings", "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } if (!build_with_chromium) { @@ -51,10 +49,11 @@ if (!build_with_chromium) { "../../api:scoped_refptr", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", + "../../rtc_base/synchronization:mutex", "../../system_wrappers", ] - if (is_linux) { + if (is_linux || is_chromeos) { sources = [ "linux/device_info_linux.cc", "linux/device_info_linux.h", @@ -76,7 +75,12 @@ if (!build_with_chromium) { "windows/video_capture_factory_windows.cc", ] - libs = [ "strmiids.lib" ] + libs = [ + "ole32.lib", + "oleaut32.lib", + "strmiids.lib", + "user32.lib", + ] if (build_with_mozilla) { sources += [ @@ -106,13 +110,13 @@ if (!build_with_chromium) { rtc_test("video_capture_tests") { sources = [ "test/video_capture_unittest.cc" ] ldflags = [] - if (is_linux || is_mac) { + if (is_linux || is_chromeos || is_mac) { ldflags += [ "-lpthread", "-lm", ] } - if (is_linux) { + if (is_linux || is_chromeos) { ldflags += [ "-lrt", "-lXext", @@ -125,19 +129,19 @@ if (!build_with_chromium) { ":video_capture_module", "../../api:scoped_refptr", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../common_video", "../../rtc_base:rtc_base_approved", + "../../rtc_base/synchronization:mutex", "../../system_wrappers", "../../test:frame_utils", + "../../test:test_main", "../../test:test_support", "../../test:video_test_common", "../utility", "//testing/gtest", "//third_party/abseil-cpp/absl/memory", ] - deps += [ "../../test:test_main" ] } } } diff --git a/modules/video_capture/device_info_impl.cc b/modules/video_capture/device_info_impl.cc index 91a72326cf..d5abb29407 100644 --- a/modules/video_capture/device_info_impl.cc +++ b/modules/video_capture/device_info_impl.cc @@ -25,34 +25,25 @@ namespace webrtc { namespace videocapturemodule { DeviceInfoImpl::DeviceInfoImpl() - : _apiLock(*RWLockWrapper::CreateRWLock()), - _lastUsedDeviceName(NULL), - _lastUsedDeviceNameLength(0) {} + : _lastUsedDeviceName(NULL), _lastUsedDeviceNameLength(0) {} DeviceInfoImpl::~DeviceInfoImpl(void) { - _apiLock.AcquireLockExclusive(); + MutexLock lock(&_apiLock); free(_lastUsedDeviceName); - _apiLock.ReleaseLockExclusive(); - - delete &_apiLock; } int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) { if (!deviceUniqueIdUTF8) return -1; - _apiLock.AcquireLockShared(); + MutexLock lock(&_apiLock); // Is it the same device that is asked for again. if (absl::EqualsIgnoreCase( deviceUniqueIdUTF8, absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) { - _apiLock.ReleaseLockShared(); return static_cast(_captureCapabilities.size()); } - // Need to get exclusive rights to create the new capability map. - _apiLock.ReleaseLockShared(); - WriteLockScoped cs2(_apiLock); int32_t ret = CreateCapabilityMap(deviceUniqueIdUTF8); return ret; @@ -61,22 +52,16 @@ int32_t DeviceInfoImpl::NumberOfCapabilities(const char* deviceUniqueIdUTF8) { int32_t DeviceInfoImpl::GetCapability(const char* deviceUniqueIdUTF8, const uint32_t deviceCapabilityNumber, VideoCaptureCapability& capability) { - assert(deviceUniqueIdUTF8 != NULL); + RTC_DCHECK(deviceUniqueIdUTF8); - ReadLockScoped cs(_apiLock); + MutexLock lock(&_apiLock); if (!absl::EqualsIgnoreCase( deviceUniqueIdUTF8, absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) { - _apiLock.ReleaseLockShared(); - _apiLock.AcquireLockExclusive(); if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) { - _apiLock.ReleaseLockExclusive(); - _apiLock.AcquireLockShared(); return -1; } - _apiLock.ReleaseLockExclusive(); - _apiLock.AcquireLockShared(); } // Make sure the number is valid @@ -98,17 +83,13 @@ int32_t DeviceInfoImpl::GetBestMatchedCapability( if (!deviceUniqueIdUTF8) return -1; - ReadLockScoped cs(_apiLock); + MutexLock lock(&_apiLock); if (!absl::EqualsIgnoreCase( deviceUniqueIdUTF8, absl::string_view(_lastUsedDeviceName, _lastUsedDeviceNameLength))) { - _apiLock.ReleaseLockShared(); - _apiLock.AcquireLockExclusive(); if (-1 == CreateCapabilityMap(deviceUniqueIdUTF8)) { return -1; } - _apiLock.ReleaseLockExclusive(); - _apiLock.AcquireLockShared(); } int32_t bestformatIndex = -1; diff --git a/modules/video_capture/device_info_impl.h b/modules/video_capture/device_info_impl.h index 37a457ce8a..4b47389609 100644 --- a/modules/video_capture/device_info_impl.h +++ b/modules/video_capture/device_info_impl.h @@ -18,7 +18,8 @@ #include "api/video/video_rotation.h" #include "modules/video_capture/video_capture.h" #include "modules/video_capture/video_capture_defines.h" -#include "rtc_base/synchronization/rw_lock_wrapper.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { namespace videocapturemodule { @@ -45,15 +46,16 @@ class DeviceInfoImpl : public VideoCaptureModule::DeviceInfo { * Fills the member variable _captureCapabilities with capabilities for the * given device name. */ - virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) = 0; + virtual int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) + RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock) = 0; protected: // Data members typedef std::vector VideoCaptureCapabilities; - VideoCaptureCapabilities _captureCapabilities; - RWLockWrapper& _apiLock; - char* _lastUsedDeviceName; - uint32_t _lastUsedDeviceNameLength; + VideoCaptureCapabilities _captureCapabilities RTC_GUARDED_BY(_apiLock); + Mutex _apiLock; + char* _lastUsedDeviceName RTC_GUARDED_BY(_apiLock); + uint32_t _lastUsedDeviceNameLength RTC_GUARDED_BY(_apiLock); }; } // namespace videocapturemodule } // namespace webrtc diff --git a/modules/video_capture/linux/device_info_linux.cc b/modules/video_capture/linux/device_info_linux.cc index bac5d4078a..cde3b86d5c 100644 --- a/modules/video_capture/linux/device_info_linux.cc +++ b/modules/video_capture/linux/device_info_linux.cc @@ -42,16 +42,22 @@ int32_t DeviceInfoLinux::Init() { DeviceInfoLinux::~DeviceInfoLinux() {} uint32_t DeviceInfoLinux::NumberOfDevices() { - RTC_LOG(LS_INFO) << __FUNCTION__; - uint32_t count = 0; char device[20]; int fd = -1; + struct v4l2_capability cap; /* detect /dev/video [0-63]VideoCaptureModule entries */ for (int n = 0; n < 64; n++) { sprintf(device, "/dev/video%d", n); if ((fd = open(device, O_RDONLY)) != -1) { + // query device capabilities and make sure this is a video capture device + if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 || + !(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) { + close(fd); + continue; + } + close(fd); count++; } @@ -67,16 +73,21 @@ int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber, uint32_t deviceUniqueIdUTF8Length, char* /*productUniqueIdUTF8*/, uint32_t /*productUniqueIdUTF8Length*/) { - RTC_LOG(LS_INFO) << __FUNCTION__; - // Travel through /dev/video [0-63] uint32_t count = 0; char device[20]; int fd = -1; bool found = false; + struct v4l2_capability cap; for (int n = 0; n < 64; n++) { sprintf(device, "/dev/video%d", n); if ((fd = open(device, O_RDONLY)) != -1) { + // query device capabilities and make sure this is a video capture device + if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0 || + !(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) { + close(fd); + continue; + } if (count == deviceNumber) { // Found the device found = true; @@ -92,7 +103,6 @@ int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber, return -1; // query device capabilities - struct v4l2_capability cap; if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) { RTC_LOG(LS_INFO) << "error in querying the device capability for device " << device << ". errno = " << errno; @@ -106,7 +116,7 @@ int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber, memset(deviceNameUTF8, 0, deviceNameLength); memcpy(cameraName, cap.card, sizeof(cap.card)); - if (deviceNameLength >= strlen(cameraName)) { + if (deviceNameLength > strlen(cameraName)) { memcpy(deviceNameUTF8, cameraName, strlen(cameraName)); } else { RTC_LOG(LS_INFO) << "buffer passed is too small"; @@ -116,7 +126,7 @@ int32_t DeviceInfoLinux::GetDeviceName(uint32_t deviceNumber, if (cap.bus_info[0] != 0) // may not available in all drivers { // copy device id - if (deviceUniqueIdUTF8Length >= strlen((const char*)cap.bus_info)) { + if (deviceUniqueIdUTF8Length > strlen((const char*)cap.bus_info)) { memset(deviceUniqueIdUTF8, 0, deviceUniqueIdUTF8Length); memcpy(deviceUniqueIdUTF8, cap.bus_info, strlen((const char*)cap.bus_info)); @@ -136,7 +146,7 @@ int32_t DeviceInfoLinux::CreateCapabilityMap(const char* deviceUniqueIdUTF8) { const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen((char*)deviceUniqueIdUTF8); - if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) { + if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) { RTC_LOG(LS_INFO) << "Device name too long"; return -1; } @@ -153,6 +163,11 @@ int32_t DeviceInfoLinux::CreateCapabilityMap(const char* deviceUniqueIdUTF8) { // query device capabilities struct v4l2_capability cap; if (ioctl(fd, VIDIOC_QUERYCAP, &cap) == 0) { + // skip devices without video capture capability + if (!(cap.device_caps & V4L2_CAP_VIDEO_CAPTURE)) { + continue; + } + if (cap.bus_info[0] != 0) { if (strncmp((const char*)cap.bus_info, (const char*)deviceUniqueIdUTF8, strlen((const char*)deviceUniqueIdUTF8)) == diff --git a/modules/video_capture/linux/device_info_linux.h b/modules/video_capture/linux/device_info_linux.h index a320c36fde..304ae71230 100644 --- a/modules/video_capture/linux/device_info_linux.h +++ b/modules/video_capture/linux/device_info_linux.h @@ -33,13 +33,14 @@ class DeviceInfoLinux : public DeviceInfoImpl { * Fills the membervariable _captureCapabilities with capabilites for the * given device name. */ - int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override; + int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override + RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock); int32_t DisplayCaptureSettingsDialogBox(const char* /*deviceUniqueIdUTF8*/, const char* /*dialogTitleUTF8*/, void* /*parentWindow*/, uint32_t /*positionX*/, uint32_t /*positionY*/) override; - int32_t FillCapabilities(int fd); + int32_t FillCapabilities(int fd) RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock); int32_t Init() override; private: diff --git a/modules/video_capture/linux/video_capture_linux.cc b/modules/video_capture/linux/video_capture_linux.cc index 30865235b6..10f9713ec3 100644 --- a/modules/video_capture/linux/video_capture_linux.cc +++ b/modules/video_capture/linux/video_capture_linux.cc @@ -34,8 +34,7 @@ namespace webrtc { namespace videocapturemodule { rtc::scoped_refptr VideoCaptureImpl::Create( const char* deviceUniqueId) { - rtc::scoped_refptr implementation( - new rtc::RefCountedObject()); + auto implementation = rtc::make_ref_counted(); if (implementation->Init(deviceUniqueId) != 0) return nullptr; @@ -115,7 +114,7 @@ int32_t VideoCaptureModuleV4L2::StartCapture( } } - rtc::CritScope cs(&_captureCritSect); + MutexLock lock(&capture_lock_); // first open /dev/video device char device[20]; sprintf(device, "/dev/video%d", (int)_deviceId); @@ -241,12 +240,15 @@ int32_t VideoCaptureModuleV4L2::StartCapture( } // start capture thread; - if (!_captureThread) { + if (_captureThread.empty()) { quit_ = false; - _captureThread.reset( - new rtc::PlatformThread(VideoCaptureModuleV4L2::CaptureThread, this, - "CaptureThread", rtc::kHighPriority)); - _captureThread->Start(); + _captureThread = rtc::PlatformThread::SpawnJoinable( + [this] { + while (CaptureProcess()) { + } + }, + "CaptureThread", + rtc::ThreadAttributes().SetPriority(rtc::ThreadPriority::kHigh)); } // Needed to start UVC camera - from the uvcview application @@ -262,17 +264,16 @@ int32_t VideoCaptureModuleV4L2::StartCapture( } int32_t VideoCaptureModuleV4L2::StopCapture() { - if (_captureThread) { + if (!_captureThread.empty()) { { - rtc::CritScope cs(&_captureCritSect); + MutexLock lock(&capture_lock_); quit_ = true; } - // Make sure the capture thread stop stop using the critsect. - _captureThread->Stop(); - _captureThread.reset(); + // Make sure the capture thread stops using the mutex. + _captureThread.Finalize(); } - rtc::CritScope cs(&_captureCritSect); + MutexLock lock(&capture_lock_); if (_captureStarted) { _captureStarted = false; @@ -357,11 +358,6 @@ bool VideoCaptureModuleV4L2::CaptureStarted() { return _captureStarted; } -void VideoCaptureModuleV4L2::CaptureThread(void* obj) { - VideoCaptureModuleV4L2* capture = static_cast(obj); - while (capture->CaptureProcess()) { - } -} bool VideoCaptureModuleV4L2::CaptureProcess() { int retVal = 0; fd_set rSet; @@ -387,7 +383,7 @@ bool VideoCaptureModuleV4L2::CaptureProcess() { } { - rtc::CritScope cs(&_captureCritSect); + MutexLock lock(&capture_lock_); if (quit_) { return false; diff --git a/modules/video_capture/linux/video_capture_linux.h b/modules/video_capture/linux/video_capture_linux.h index ac9409e23a..fa06d72b8d 100644 --- a/modules/video_capture/linux/video_capture_linux.h +++ b/modules/video_capture/linux/video_capture_linux.h @@ -18,8 +18,8 @@ #include "modules/video_capture/video_capture_defines.h" #include "modules/video_capture/video_capture_impl.h" -#include "rtc_base/critical_section.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { namespace videocapturemodule { @@ -41,10 +41,9 @@ class VideoCaptureModuleV4L2 : public VideoCaptureImpl { bool AllocateVideoBuffers(); bool DeAllocateVideoBuffers(); - // TODO(pbos): Stop using unique_ptr and resetting the thread. - std::unique_ptr _captureThread; - rtc::CriticalSection _captureCritSect; - bool quit_ RTC_GUARDED_BY(_captureCritSect); + rtc::PlatformThread _captureThread; + Mutex capture_lock_; + bool quit_ RTC_GUARDED_BY(capture_lock_); int32_t _deviceId; int32_t _deviceFd; diff --git a/modules/video_capture/test/video_capture_unittest.cc b/modules/video_capture/test/video_capture_unittest.cc index be443e0820..e74a456cee 100644 --- a/modules/video_capture/test/video_capture_unittest.cc +++ b/modules/video_capture/test/video_capture_unittest.cc @@ -23,7 +23,7 @@ #include "common_video/libyuv/include/webrtc_libyuv.h" #include "modules/utility/include/process_thread.h" #include "modules/video_capture/video_capture_factory.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/sleep.h" #include "test/frame_utils.h" @@ -74,7 +74,7 @@ class TestVideoCaptureCallback } void OnFrame(const webrtc::VideoFrame& videoFrame) override { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); int height = videoFrame.height(); int width = videoFrame.width(); #if defined(WEBRTC_ANDROID) && WEBRTC_ANDROID @@ -106,38 +106,38 @@ class TestVideoCaptureCallback } void SetExpectedCapability(VideoCaptureCapability capability) { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); capability_ = capability; incoming_frames_ = 0; last_render_time_ms_ = 0; } int incoming_frames() { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); return incoming_frames_; } int timing_warnings() { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); return timing_warnings_; } VideoCaptureCapability capability() { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); return capability_; } bool CompareLastFrame(const webrtc::VideoFrame& frame) { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); return webrtc::test::FrameBufsEqual(last_frame_, frame.video_frame_buffer()); } void SetExpectedCaptureRotation(webrtc::VideoRotation rotation) { - rtc::CritScope cs(&capture_cs_); + webrtc::MutexLock lock(&capture_lock_); rotate_frame_ = rotation; } private: - rtc::CriticalSection capture_cs_; + webrtc::Mutex capture_lock_; VideoCaptureCapability capability_; int64_t last_render_time_ms_; int incoming_frames_; @@ -152,7 +152,7 @@ class VideoCaptureTest : public ::testing::Test { void SetUp() override { device_info_.reset(VideoCaptureFactory::CreateDeviceInfo()); - assert(device_info_.get()); + RTC_DCHECK(device_info_.get()); number_of_devices_ = device_info_->NumberOfDevices(); ASSERT_GT(number_of_devices_, 0u); } diff --git a/modules/video_capture/video_capture.h b/modules/video_capture/video_capture.h index 8d2a8f5514..0f60092d72 100644 --- a/modules/video_capture/video_capture.h +++ b/modules/video_capture/video_capture.h @@ -13,7 +13,6 @@ #include "api/video/video_rotation.h" #include "api/video/video_sink_interface.h" -#include "modules/include/module.h" #include "modules/video_capture/video_capture_defines.h" namespace webrtc { diff --git a/modules/video_capture/video_capture_impl.cc b/modules/video_capture/video_capture_impl.cc index 9d53a91157..6619d15924 100644 --- a/modules/video_capture/video_capture_impl.cc +++ b/modules/video_capture/video_capture_impl.cc @@ -96,12 +96,12 @@ VideoCaptureImpl::~VideoCaptureImpl() { void VideoCaptureImpl::RegisterCaptureDataCallback( rtc::VideoSinkInterface* dataCallBack) { - rtc::CritScope cs(&_apiCs); + MutexLock lock(&api_lock_); _dataCallBack = dataCallBack; } void VideoCaptureImpl::DeRegisterCaptureDataCallback() { - rtc::CritScope cs(&_apiCs); + MutexLock lock(&api_lock_); _dataCallBack = NULL; } int32_t VideoCaptureImpl::DeliverCapturedFrame(VideoFrame& captureFrame) { @@ -118,7 +118,7 @@ int32_t VideoCaptureImpl::IncomingFrame(uint8_t* videoFrame, size_t videoFrameLength, const VideoCaptureCapability& frameInfo, int64_t captureTime /*=0*/) { - rtc::CritScope cs(&_apiCs); + MutexLock lock(&api_lock_); const int32_t width = frameInfo.width; const int32_t height = frameInfo.height; @@ -223,7 +223,7 @@ int32_t VideoCaptureImpl::CaptureSettings( } int32_t VideoCaptureImpl::SetCaptureRotation(VideoRotation rotation) { - rtc::CritScope cs(&_apiCs); + MutexLock lock(&api_lock_); _rotateFrame = rotation; return 0; } diff --git a/modules/video_capture/video_capture_impl.h b/modules/video_capture/video_capture_impl.h index 197bfd387c..cbc99b76c1 100644 --- a/modules/video_capture/video_capture_impl.h +++ b/modules/video_capture/video_capture_impl.h @@ -25,7 +25,7 @@ #include "modules/video_capture/video_capture.h" #include "modules/video_capture/video_capture_config.h" #include "modules/video_capture/video_capture_defines.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -78,7 +78,7 @@ class VideoCaptureImpl : public VideoCaptureModule { ~VideoCaptureImpl() override; char* _deviceUniqueId; // current Device unique name; - rtc::CriticalSection _apiCs; + Mutex api_lock_; VideoCaptureCapability _requestedCapability; // Should be set by platform // dependent code in // StartCapture. diff --git a/modules/video_capture/windows/device_info_ds.cc b/modules/video_capture/windows/device_info_ds.cc index a163579bf1..3731dce8bc 100644 --- a/modules/video_capture/windows/device_info_ds.cc +++ b/modules/video_capture/windows/device_info_ds.cc @@ -72,10 +72,10 @@ DeviceInfoDS::DeviceInfoDS() // Details: hr = 0x80010106 <=> "Cannot change thread mode after it is // set". // - RTC_LOG(LS_INFO) << __FUNCTION__ - << ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)" - " => RPC_E_CHANGED_MODE, error 0x" - << rtc::ToHex(hr); + RTC_DLOG(LS_INFO) << __FUNCTION__ + << ": CoInitializeEx(NULL, COINIT_APARTMENTTHREADED)" + " => RPC_E_CHANGED_MODE, error 0x" + << rtc::ToHex(hr); } } } @@ -99,7 +99,7 @@ int32_t DeviceInfoDS::Init() { return 0; } uint32_t DeviceInfoDS::NumberOfDevices() { - ReadLockScoped cs(_apiLock); + MutexLock lock(&_apiLock); return GetDeviceInfo(0, 0, 0, 0, 0, 0, 0); } @@ -110,7 +110,7 @@ int32_t DeviceInfoDS::GetDeviceName(uint32_t deviceNumber, uint32_t deviceUniqueIdUTF8Length, char* productUniqueIdUTF8, uint32_t productUniqueIdUTF8Length) { - ReadLockScoped cs(_apiLock); + MutexLock lock(&_apiLock); const int32_t result = GetDeviceInfo( deviceNumber, deviceNameUTF8, deviceNameLength, deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, productUniqueIdUTF8, productUniqueIdUTF8Length); @@ -203,7 +203,7 @@ int32_t DeviceInfoDS::GetDeviceInfo(uint32_t deviceNumber, } } if (deviceNameLength) { - RTC_LOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8; + RTC_DLOG(LS_INFO) << __FUNCTION__ << " " << deviceNameUTF8; } return index; } @@ -213,7 +213,7 @@ IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8, uint32_t productUniqueIdUTF8Length) { const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen( (char*)deviceUniqueIdUTF8); // UTF8 is also NULL terminated - if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) { + if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) { RTC_LOG(LS_INFO) << "Device name too long"; return NULL; } @@ -287,7 +287,7 @@ IBaseFilter* DeviceInfoDS::GetDeviceFilter(const char* deviceUniqueIdUTF8, int32_t DeviceInfoDS::GetWindowsCapability( const int32_t capabilityIndex, VideoCaptureCapabilityWindows& windowsCapability) { - ReadLockScoped cs(_apiLock); + MutexLock lock(&_apiLock); if (capabilityIndex < 0 || static_cast(capabilityIndex) >= _captureCapabilitiesWindows.size()) { @@ -306,7 +306,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) const int32_t deviceUniqueIdUTF8Length = (int32_t)strlen((char*)deviceUniqueIdUTF8); - if (deviceUniqueIdUTF8Length > kVideoCaptureUniqueNameLength) { + if (deviceUniqueIdUTF8Length >= kVideoCaptureUniqueNameLength) { RTC_LOG(LS_INFO) << "Device name too long"; return -1; } @@ -380,7 +380,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) supportFORMAT_VideoInfo2 = true; VIDEOINFOHEADER2* h = reinterpret_cast(pmt->pbFormat); - assert(h); + RTC_DCHECK(h); foundInterlacedFormat |= h->dwInterlaceFlags & (AMINTERLACE_IsInterlaced | AMINTERLACE_DisplayModeBobOnly); @@ -418,7 +418,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) if (pmt->formattype == FORMAT_VideoInfo) { VIDEOINFOHEADER* h = reinterpret_cast(pmt->pbFormat); - assert(h); + RTC_DCHECK(h); capability.directShowCapabilityIndex = tmp; capability.width = h->bmiHeader.biWidth; capability.height = h->bmiHeader.biHeight; @@ -427,7 +427,7 @@ int32_t DeviceInfoDS::CreateCapabilityMap(const char* deviceUniqueIdUTF8) if (pmt->formattype == FORMAT_VideoInfo2) { VIDEOINFOHEADER2* h = reinterpret_cast(pmt->pbFormat); - assert(h); + RTC_DCHECK(h); capability.directShowCapabilityIndex = tmp; capability.width = h->bmiHeader.biWidth; capability.height = h->bmiHeader.biHeight; @@ -568,7 +568,7 @@ void DeviceInfoDS::GetProductId(const char* devicePath, // Find the second occurrence. pos = strchr(pos + 1, '&'); uint32_t bytesToCopy = (uint32_t)(pos - startPos); - if (pos && (bytesToCopy <= productUniqueIdUTF8Length) && + if (pos && (bytesToCopy < productUniqueIdUTF8Length) && bytesToCopy <= kVideoCaptureProductIdLength) { strncpy_s((char*)productUniqueIdUTF8, productUniqueIdUTF8Length, (char*)startPos, bytesToCopy); @@ -584,7 +584,7 @@ int32_t DeviceInfoDS::DisplayCaptureSettingsDialogBox( void* parentWindow, uint32_t positionX, uint32_t positionY) { - ReadLockScoped cs(_apiLock); + MutexLock lock(&_apiLock); HWND window = (HWND)parentWindow; IBaseFilter* filter = GetDeviceFilter(deviceUniqueIdUTF8, NULL, 0); diff --git a/modules/video_capture/windows/device_info_ds.h b/modules/video_capture/windows/device_info_ds.h index d782eb5415..2fda3257f4 100644 --- a/modules/video_capture/windows/device_info_ds.h +++ b/modules/video_capture/windows/device_info_ds.h @@ -85,7 +85,8 @@ class DeviceInfoDS : public DeviceInfoImpl { char* productUniqueIdUTF8, uint32_t productUniqueIdUTF8Length); - int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override; + int32_t CreateCapabilityMap(const char* deviceUniqueIdUTF8) override + RTC_EXCLUSIVE_LOCKS_REQUIRED(_apiLock); private: ICreateDevEnum* _dsDevEnum; diff --git a/modules/video_capture/windows/sink_filter_ds.cc b/modules/video_capture/windows/sink_filter_ds.cc index 9019b127cf..e4be7aa14f 100644 --- a/modules/video_capture/windows/sink_filter_ds.cc +++ b/modules/video_capture/windows/sink_filter_ds.cc @@ -58,7 +58,7 @@ class EnumPins : public IEnumPins { } STDMETHOD(Clone)(IEnumPins** pins) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -83,7 +83,7 @@ class EnumPins : public IEnumPins { } STDMETHOD(Skip)(ULONG count) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -274,7 +274,7 @@ class MediaTypesEnum : public IEnumMediaTypes { // IEnumMediaTypes STDMETHOD(Clone)(IEnumMediaTypes** pins) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -364,7 +364,7 @@ class MediaTypesEnum : public IEnumMediaTypes { } STDMETHOD(Skip)(ULONG count) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return E_NOTIMPL; } @@ -538,7 +538,7 @@ STDMETHODIMP CaptureInputPin::Connect(IPin* receive_pin, return VFW_E_NOT_STOPPED; if (receive_pin_) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return VFW_E_ALREADY_CONNECTED; } @@ -564,7 +564,7 @@ STDMETHODIMP CaptureInputPin::ReceiveConnection( RTC_DCHECK(Filter()->IsStopped()); if (receive_pin_) { - RTC_DCHECK(false); + RTC_NOTREACHED(); return VFW_E_ALREADY_CONNECTED; } diff --git a/modules/video_capture/windows/sink_filter_ds.h b/modules/video_capture/windows/sink_filter_ds.h index af264a937a..b0fabda3cd 100644 --- a/modules/video_capture/windows/sink_filter_ds.h +++ b/modules/video_capture/windows/sink_filter_ds.h @@ -17,10 +17,10 @@ #include #include +#include "api/sequence_checker.h" #include "modules/video_capture/video_capture_impl.h" #include "modules/video_capture/windows/help_functions_ds.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" namespace webrtc { namespace videocapturemodule { @@ -89,8 +89,8 @@ class CaptureInputPin : public IMemInputPin, public IPin { STDMETHOD(ReceiveCanBlock)() override; // clang-format on - rtc::ThreadChecker main_checker_; - rtc::ThreadChecker capture_checker_; + SequenceChecker main_checker_; + SequenceChecker capture_checker_; VideoCaptureCapability requested_capability_ RTC_GUARDED_BY(main_checker_); // Accessed on the main thread when Filter()->IsStopped() (capture thread not @@ -147,7 +147,7 @@ class CaptureSinkFilter : public IBaseFilter { virtual ~CaptureSinkFilter(); private: - rtc::ThreadChecker main_checker_; + SequenceChecker main_checker_; const rtc::scoped_refptr> input_pin_; VideoCaptureImpl* const capture_observer_; FILTER_INFO info_ RTC_GUARDED_BY(main_checker_) = {}; diff --git a/modules/video_capture/windows/video_capture_ds.cc b/modules/video_capture/windows/video_capture_ds.cc index 615a1b56ea..1a1e51934d 100644 --- a/modules/video_capture/windows/video_capture_ds.cc +++ b/modules/video_capture/windows/video_capture_ds.cc @@ -57,7 +57,7 @@ VideoCaptureDS::~VideoCaptureDS() { int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) { const int32_t nameLength = (int32_t)strlen((char*)deviceUniqueIdUTF8); - if (nameLength > kVideoCaptureUniqueNameLength) + if (nameLength >= kVideoCaptureUniqueNameLength) return -1; // Store the device name @@ -130,7 +130,7 @@ int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) { } int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) { - rtc::CritScope cs(&_apiCs); + MutexLock lock(&api_lock_); if (capability != _requestedCapability) { DisconnectGraph(); @@ -148,7 +148,7 @@ int32_t VideoCaptureDS::StartCapture(const VideoCaptureCapability& capability) { } int32_t VideoCaptureDS::StopCapture() { - rtc::CritScope cs(&_apiCs); + MutexLock lock(&api_lock_); HRESULT hr = _mediaControl->Pause(); if (FAILED(hr)) { diff --git a/modules/video_capture/windows/video_capture_factory_windows.cc b/modules/video_capture/windows/video_capture_factory_windows.cc index ea9d31add9..34cc982d7e 100644 --- a/modules/video_capture/windows/video_capture_factory_windows.cc +++ b/modules/video_capture/windows/video_capture_factory_windows.cc @@ -27,8 +27,7 @@ rtc::scoped_refptr VideoCaptureImpl::Create( return nullptr; // TODO(tommi): Use Media Foundation implementation for Vista and up. - rtc::scoped_refptr capture( - new rtc::RefCountedObject()); + auto capture = rtc::make_ref_counted(); if (capture->Init(device_id) != 0) { return nullptr; } diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn index b084577243..50f2e8d836 100644 --- a/modules/video_coding/BUILD.gn +++ b/modules/video_coding/BUILD.gn @@ -6,6 +6,7 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. +import("//third_party/libaom/options.gni") import("../../webrtc.gni") rtc_library("encoded_frame") { @@ -19,9 +20,7 @@ rtc_library("encoded_frame") { ":video_codec_interface", "../../api/video:encoded_image", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", - "../../modules:module_api", "../../modules:module_api_public", "../../modules/rtp_rtcp:rtp_video_header", "../../rtc_base:checks", @@ -30,6 +29,8 @@ rtc_library("encoded_frame") { "../../rtc_base/experiments:rtt_mult_experiment", "../../rtc_base/system:rtc_export", "../../system_wrappers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional", "//third_party/abseil-cpp/absl/types:variant", ] @@ -44,6 +45,8 @@ rtc_library("chain_diff_calculator") { deps = [ "../../rtc_base:checks", "../../rtc_base:logging", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/types:optional", ] @@ -57,10 +60,11 @@ rtc_library("frame_dependencies_calculator") { deps = [ "../../api:array_view", - "../../api/video:video_frame_type", "../../common_video/generic_frame_descriptor", "../../rtc_base:checks", "../../rtc_base:logging", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/types:optional", @@ -68,24 +72,25 @@ rtc_library("frame_dependencies_calculator") { } rtc_library("nack_module") { - visibility = [ "*" ] sources = [ "histogram.cc", "histogram.h", - "nack_module.cc", - "nack_module.h", "nack_module2.cc", "nack_module2.h", ] deps = [ "..:module_api", + "../../api:sequence_checker", "../../api/units:time_delta", "../../api/units:timestamp", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_numerics", + "../../rtc_base:rtc_task_queue", "../../rtc_base/experiments:field_trial_parser", + "../../rtc_base/task_utils:pending_task_safety_flag", + "../../rtc_base/task_utils:repeating_task", "../../system_wrappers", "../../system_wrappers:field_trial", "../utility", @@ -94,23 +99,6 @@ rtc_library("nack_module") { rtc_library("video_coding") { visibility = [ "*" ] - deps = [ - "..:module_fec_api", - "../../api:array_view", - "../../api:scoped_refptr", - "../../api/video:encoded_image", - "../../api/video:video_adaptation", - "../../api/video:video_bitrate_allocation", - "../../api/video:video_bitrate_allocator_factory", - "../../rtc_base:deprecation", - "../../rtc_base/task_utils:to_queued_task", - "../../system_wrappers:field_trial", - "../../system_wrappers:metrics", - "../rtp_rtcp:rtp_video_header", - "//third_party/abseil-cpp/absl/base:core_headers", - "//third_party/abseil-cpp/absl/memory", - ] - sources = [ "codec_timer.cc", "codec_timer.h", @@ -141,8 +129,18 @@ rtc_library("video_coding") { "media_opt_util.h", "packet_buffer.cc", "packet_buffer.h", + "rtp_frame_id_only_ref_finder.cc", + "rtp_frame_id_only_ref_finder.h", "rtp_frame_reference_finder.cc", "rtp_frame_reference_finder.h", + "rtp_generic_ref_finder.cc", + "rtp_generic_ref_finder.h", + "rtp_seq_num_only_ref_finder.cc", + "rtp_seq_num_only_ref_finder.h", + "rtp_vp8_ref_finder.cc", + "rtp_vp8_ref_finder.h", + "rtp_vp9_ref_finder.cc", + "rtp_vp9_ref_finder.h", "rtt_filter.cc", "rtt_filter.h", "timestamp_map.cc", @@ -156,7 +154,7 @@ rtc_library("video_coding") { "video_receiver2.h", ] - deps += [ + deps = [ ":codec_globals_headers", ":encoded_frame", ":video_codec_interface", @@ -164,17 +162,25 @@ rtc_library("video_coding") { ":webrtc_vp9_helpers", "..:module_api", "..:module_api_public", + "..:module_fec_api", + "../../api:array_view", "../../api:fec_controller_api", "../../api:rtp_headers", "../../api:rtp_packet_info", + "../../api:scoped_refptr", + "../../api:sequence_checker", "../../api/units:data_rate", "../../api/units:time_delta", + "../../api/units:timestamp", "../../api/video:builtin_video_bitrate_allocator_factory", "../../api/video:encoded_frame", + "../../api/video:encoded_image", + "../../api/video:video_adaptation", "../../api/video:video_adaptation", + "../../api/video:video_bitrate_allocation", "../../api/video:video_bitrate_allocator", + "../../api/video:video_bitrate_allocator_factory", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_frame_type", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", @@ -184,20 +190,31 @@ rtc_library("video_coding") { "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_numerics", "../../rtc_base:rtc_task_queue", + "../../rtc_base:threading", "../../rtc_base/experiments:alr_experiment", "../../rtc_base/experiments:field_trial_parser", "../../rtc_base/experiments:jitter_upper_bound_experiment", "../../rtc_base/experiments:min_video_bitrate_experiment", "../../rtc_base/experiments:rate_control_settings", "../../rtc_base/experiments:rtt_mult_experiment", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", "../../rtc_base/task_utils:repeating_task", + "../../rtc_base/task_utils:to_queued_task", "../../rtc_base/third_party/base64", "../../rtc_base/time:timestamp_extrapolator", "../../system_wrappers", + "../../system_wrappers:field_trial", + "../../system_wrappers:metrics", "../rtp_rtcp", "../rtp_rtcp:rtp_rtcp_format", + "../rtp_rtcp:rtp_video_header", + "codecs/av1:av1_svc_config", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/container:inlined_vector", + "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", "//third_party/abseil-cpp/absl/types:variant", ] @@ -214,15 +231,14 @@ rtc_library("video_codec_interface") { ] deps = [ ":codec_globals_headers", - "..:module_api", "../../api/video:video_frame", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../common_video/generic_frame_descriptor", "../../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("video_coding_legacy") { @@ -255,9 +271,10 @@ rtc_library("video_coding_legacy") { ":video_coding", "..:module_api", "..:module_api_public", - "../../:webrtc_common", "../../api:rtp_headers", "../../api:rtp_packet_info", + "../../api:sequence_checker", + "../../api/units:timestamp", "../../api/video:encoded_image", "../../api/video:video_frame", "../../api/video:video_frame_type", @@ -269,11 +286,13 @@ rtc_library("video_coding_legacy") { "../../rtc_base:logging", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_event", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", "../../system_wrappers", "../rtp_rtcp:rtp_rtcp_format", "../rtp_rtcp:rtp_video_header", "../utility", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", "//third_party/abseil-cpp/absl/types:variant", @@ -305,6 +324,8 @@ rtc_library("video_coding_utility") { "utility/ivf_file_reader.h", "utility/ivf_file_writer.cc", "utility/ivf_file_writer.h", + "utility/qp_parser.cc", + "utility/qp_parser.h", "utility/quality_scaler.cc", "utility/quality_scaler.h", "utility/simulcast_rate_allocator.cc", @@ -319,13 +340,15 @@ rtc_library("video_coding_utility") { deps = [ ":video_codec_interface", - "..:module_api", "../../api:scoped_refptr", + "../../api:sequence_checker", "../../api/video:encoded_frame", "../../api/video:encoded_image", "../../api/video:video_adaptation", "../../api/video:video_bitrate_allocation", "../../api/video:video_bitrate_allocator", + "../../api/video:video_codec_constants", + "../../api/video:video_frame", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../modules/rtp_rtcp", @@ -338,13 +361,17 @@ rtc_library("video_coding_utility") { "../../rtc_base/experiments:quality_scaling_experiment", "../../rtc_base/experiments:rate_control_settings", "../../rtc_base/experiments:stable_target_rate_experiment", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", "../../rtc_base/system:arch", "../../rtc_base/system:file_wrapper", + "../../rtc_base/system:no_unique_address", "../../rtc_base/task_utils:repeating_task", "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers:field_trial", "../rtp_rtcp:rtp_rtcp_format", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings:strings", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -368,7 +395,6 @@ rtc_library("webrtc_h264") { ":video_coding_utility", "../../api/video:video_frame", "../../api/video:video_frame_i010", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../common_video", @@ -379,9 +405,11 @@ rtc_library("webrtc_h264") { "../../rtc_base/system:rtc_export", "../../system_wrappers:field_trial", "../../system_wrappers:metrics", + "//third_party/libyuv", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", - "//third_party/libyuv", ] if (rtc_use_h264) { @@ -410,30 +438,50 @@ rtc_library("webrtc_multiplex") { deps = [ ":video_codec_interface", ":video_coding_utility", - "..:module_api", "../../api:fec_controller_api", "../../api:scoped_refptr", "../../api/video:encoded_image", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../media:rtc_media_base", "../../rtc_base", "../../rtc_base:checks", + "../../rtc_base/synchronization:mutex", "../rtp_rtcp:rtp_rtcp_format", ] } +# This target defines a bare-bones interface towards libvpx, used by the +# VP8 and VP9 wrappers below. +rtc_library("webrtc_libvpx_interface") { + visibility = [ "*" ] + sources = [ + "codecs/interface/libvpx_interface.cc", + "codecs/interface/libvpx_interface.h", + ] + deps = [ "../../rtc_base:checks" ] + if (rtc_build_libvpx) { + deps += [ rtc_libvpx_dir ] + } +} + +rtc_library("mock_libvpx_interface") { + testonly = true + sources = [ "codecs/interface/mock_libvpx_interface.h" ] + deps = [ + ":webrtc_libvpx_interface", + "../../test:test_support", + ] +} + # This target includes the internal SW codec. rtc_library("webrtc_vp8") { visibility = [ "*" ] poisonous = [ "software_video_codecs" ] sources = [ "codecs/vp8/include/vp8.h", - "codecs/vp8/libvpx_interface.cc", - "codecs/vp8/libvpx_interface.h", "codecs/vp8/libvpx_vp8_decoder.cc", "codecs/vp8/libvpx_vp8_decoder.h", "codecs/vp8/libvpx_vp8_encoder.cc", @@ -444,30 +492,31 @@ rtc_library("webrtc_vp8") { ":codec_globals_headers", ":video_codec_interface", ":video_coding_utility", + ":webrtc_libvpx_interface", ":webrtc_vp8_temporal_layers", - "..:module_api", - "../..:webrtc_common", "../../api:fec_controller_api", "../../api:scoped_refptr", "../../api/video:encoded_image", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../api/video_codecs:vp8_temporal_layers_factory", "../../common_video", "../../rtc_base:checks", - "../../rtc_base:deprecation", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_numerics", "../../rtc_base/experiments:cpu_speed_experiment", + "../../rtc_base/experiments:encoder_info_settings", "../../rtc_base/experiments:field_trial_parser", "../../rtc_base/experiments:rate_control_settings", "../../system_wrappers:field_trial", "../../system_wrappers:metrics", - "//third_party/abseil-cpp/absl/types:optional", "//third_party/libyuv", ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/types:optional", + ] if (rtc_build_libvpx) { deps += [ rtc_libvpx_dir ] } @@ -489,8 +538,6 @@ rtc_library("webrtc_vp8_temporal_layers") { ":codec_globals_headers", ":video_codec_interface", ":video_coding_utility", - "..:module_api", - "../..:webrtc_common", "../../api:fec_controller_api", "../../api/video_codecs:video_codecs_api", "../../rtc_base:checks", @@ -498,8 +545,8 @@ rtc_library("webrtc_vp8_temporal_layers") { "../../rtc_base:rtc_numerics", "../../system_wrappers:field_trial", "../../system_wrappers:metrics", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } # This target includes VP9 files that may be used for any VP9 codec, internal SW or external HW. @@ -507,23 +554,23 @@ rtc_library("webrtc_vp9_helpers") { sources = [ "codecs/vp9/svc_config.cc", "codecs/vp9/svc_config.h", - "codecs/vp9/svc_rate_allocator.cc", "codecs/vp9/svc_rate_allocator.h", ] deps = [ ":codec_globals_headers", ":video_codec_interface", - "../..:webrtc_common", "../../api/video:video_bitrate_allocation", "../../api/video:video_bitrate_allocator", "../../api/video:video_codec_constants", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../rtc_base:checks", + "../../rtc_base:logging", "../../rtc_base/experiments:stable_target_rate_experiment", - "//third_party/abseil-cpp/absl/container:inlined_vector", + "svc:svc_rate_allocator", ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ] } rtc_library("webrtc_vp9") { @@ -531,34 +578,47 @@ rtc_library("webrtc_vp9") { poisonous = [ "software_video_codecs" ] sources = [ "codecs/vp9/include/vp9.h", + "codecs/vp9/libvpx_vp9_decoder.cc", + "codecs/vp9/libvpx_vp9_decoder.h", + "codecs/vp9/libvpx_vp9_encoder.cc", + "codecs/vp9/libvpx_vp9_encoder.h", "codecs/vp9/vp9.cc", "codecs/vp9/vp9_frame_buffer_pool.cc", "codecs/vp9/vp9_frame_buffer_pool.h", - "codecs/vp9/vp9_impl.cc", - "codecs/vp9/vp9_impl.h", ] deps = [ ":video_codec_interface", ":video_coding_utility", + ":webrtc_libvpx_interface", ":webrtc_vp9_helpers", - "..:module_api", - "../..:webrtc_common", "../../api:fec_controller_api", + "../../api:refcountedbase", "../../api:scoped_refptr", + "../../api/transport:field_trial_based_config", + "../../api/transport:webrtc_key_value_config", "../../api/video:video_frame", "../../api/video:video_frame_i010", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../media:rtc_media_base", - "../../media:rtc_vp9_profile", "../../rtc_base", "../../rtc_base:checks", + "../../rtc_base/experiments:encoder_info_settings", + "../../rtc_base/experiments:field_trial_parser", "../../rtc_base/experiments:rate_control_settings", + "../../rtc_base/synchronization:mutex", "../../system_wrappers:field_trial", "../rtp_rtcp:rtp_rtcp_format", + "svc:scalability_structures", + "svc:scalable_video_controller", + "//third_party/libyuv", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings:strings", ] if (rtc_build_libvpx) { deps += [ rtc_libvpx_dir ] @@ -599,7 +659,6 @@ if (rtc_include_tests) { "../../api/video_codecs:video_codecs_api", "../../media:rtc_audio_video", "../../media:rtc_media_base", - "../../modules:module_api", "../../rtc_base:rtc_base_approved", "../../sdk:native_api", "../../sdk:peerconnectionfactory_base_objc", @@ -610,6 +669,25 @@ if (rtc_include_tests) { } } + rtc_library("encoded_video_frame_producer") { + testonly = true + sources = [ + "codecs/test/encoded_video_frame_producer.cc", + "codecs/test/encoded_video_frame_producer.h", + ] + deps = [ + ":video_codec_interface", + "../../api:create_frame_generator", + "../../api:frame_generator_api", + "../../api/transport/rtp:dependency_descriptor", + "../../api/video:encoded_image", + "../../api/video:video_frame", + "../../api/video:video_frame_type", + "../../api/video_codecs:video_codecs_api", + "../../rtc_base:checks", + ] + } + rtc_library("simulcast_test_fixture_impl") { testonly = true sources = [ @@ -621,13 +699,11 @@ if (rtc_include_tests) { ":video_codec_interface", ":video_coding", ":video_coding_utility", - "../../:webrtc_common", "../../api:mock_video_decoder", "../../api:mock_video_encoder", "../../api:simulcast_test_fixture_api", "../../api/video:encoded_image", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../common_video", @@ -657,6 +733,7 @@ if (rtc_include_tests) { "../../api:create_frame_generator", "../../api:frame_generator_api", "../../api:scoped_refptr", + "../../api:sequence_checker", "../../api:videocodec_test_fixture_api", "../../api/task_queue", "../../api/video:builtin_video_bitrate_allocator_factory", @@ -665,22 +742,22 @@ if (rtc_include_tests) { "../../api/video:video_bitrate_allocator", "../../api/video:video_bitrate_allocator_factory", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_task_queue", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", "../../rtc_base/task_utils:to_queued_task", "../../test:test_support", "../../test:video_test_common", "../../test:video_test_support", "../rtp_rtcp:rtp_rtcp_format", - "//third_party/abseil-cpp/absl/types:optional", "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } video_coding_modules_tests_resources = [] @@ -729,16 +806,15 @@ if (rtc_include_tests) { ":video_coding_utility", ":videocodec_test_stats_impl", ":webrtc_vp9_helpers", - "../..:webrtc_common", "../../api:array_view", "../../api:videocodec_test_fixture_api", "../../api/test/video:function_video_factory", + "../../api/transport:field_trial_based_config", "../../api/video:video_bitrate_allocation", "../../api/video_codecs:video_codecs_api", "../../call:video_stream_api", "../../common_video", "../../media:rtc_audio_video", - "../../media:rtc_h264_profile_id", "../../media:rtc_internal_video_codecs", "../../media:rtc_media_base", "../../rtc_base:checks", @@ -751,6 +827,9 @@ if (rtc_include_tests) { "../../test:test_support", "../../test:video_test_common", "../../test:video_test_support", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings:strings", "//third_party/abseil-cpp/absl/types:optional", ] } @@ -763,6 +842,7 @@ if (rtc_include_tests) { ] deps = [ "../../api:videocodec_test_fixture_api", + "../../api/numerics", "../../rtc_base:checks", "../../rtc_base:rtc_numerics", "../../rtc_base:stringutils", @@ -780,25 +860,34 @@ if (rtc_include_tests) { "codecs/multiplex/test/multiplex_adapter_unittest.cc", "codecs/test/video_encoder_decoder_instantiation_tests.cc", "codecs/test/videocodec_test_libvpx.cc", - "codecs/vp8/test/mock_libvpx_interface.h", "codecs/vp8/test/vp8_impl_unittest.cc", - "codecs/vp9/test/vp9_impl_unittest.cc", ] + + if (rtc_libvpx_build_vp9) { + sources += [ "codecs/vp9/test/vp9_impl_unittest.cc" ] + } + + # TODO(jianj): Fix crash on iOS and re-enable + if (enable_libaom && !is_ios) { + sources += [ "codecs/test/videocodec_test_libaom.cc" ] + } if (rtc_use_h264) { sources += [ "codecs/test/videocodec_test_openh264.cc" ] } deps = [ + ":encoded_video_frame_producer", + ":mock_libvpx_interface", ":video_codec_interface", ":video_codecs_test_framework", ":video_coding_utility", ":videocodec_test_impl", ":webrtc_h264", + ":webrtc_libvpx_interface", ":webrtc_multiplex", ":webrtc_vp8", ":webrtc_vp9", ":webrtc_vp9_helpers", - "../..:webrtc_common", "../../api:create_frame_generator", "../../api:create_videocodec_test_fixture_api", "../../api:frame_generator_api", @@ -810,26 +899,24 @@ if (rtc_include_tests) { "../../api/test/video:function_video_factory", "../../api/video:encoded_image", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:rtc_software_fallback_wrappers", "../../api/video_codecs:video_codecs_api", "../../common_video", "../../common_video/test:utilities", - "../../media:rtc_h264_profile_id", "../../media:rtc_internal_video_codecs", "../../media:rtc_media_base", "../../media:rtc_simulcast_encoder_adapter", - "../../media:rtc_vp9_profile", "../../rtc_base", + "../../test:explicit_key_value_config", "../../test:field_trial", "../../test:fileutils", "../../test:test_support", "../../test:video_test_common", "../rtp_rtcp:rtp_rtcp_format", - "//third_party/abseil-cpp/absl/types:optional", "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] data = video_coding_modules_tests_resources @@ -866,7 +953,6 @@ if (rtc_include_tests) { "codecs/vp8/libvpx_vp8_simulcast_test.cc", "codecs/vp8/screenshare_layers_unittest.cc", "codecs/vp9/svc_config_unittest.cc", - "codecs/vp9/svc_rate_allocator_unittest.cc", "decoding_state_unittest.cc", "fec_controller_unittest.cc", "frame_buffer2_unittest.cc", @@ -883,9 +969,12 @@ if (rtc_include_tests) { "packet_buffer_unittest.cc", "receiver_unittest.cc", "rtp_frame_reference_finder_unittest.cc", + "rtp_vp8_ref_finder_unittest.cc", + "rtp_vp9_ref_finder_unittest.cc", "session_info_unittest.cc", "test/stream_generator.cc", "test/stream_generator.h", + "timestamp_map_unittest.cc", "timing_unittest.cc", "unique_timestamp_counter_unittest.cc", "utility/decoded_frames_history_unittest.cc", @@ -893,8 +982,10 @@ if (rtc_include_tests) { "utility/framerate_controller_unittest.cc", "utility/ivf_file_reader_unittest.cc", "utility/ivf_file_writer_unittest.cc", + "utility/qp_parser_unittest.cc", "utility/quality_scaler_unittest.cc", "utility/simulcast_rate_allocator_unittest.cc", + "utility/vp9_uncompressed_header_parser_unittest.cc", "video_codec_initializer_unittest.cc", "video_receiver_unittest.cc", ] @@ -924,7 +1015,6 @@ if (rtc_include_tests) { ":webrtc_vp8_temporal_layers", ":webrtc_vp9", ":webrtc_vp9_helpers", - "..:module_api", "..:module_fec_api", "../../api:array_view", "../../api:create_simulcast_test_fixture_api", @@ -938,12 +1028,12 @@ if (rtc_include_tests) { "../../api/task_queue:default_task_queue_factory", "../../api/test/video:function_video_factory", "../../api/video:builtin_video_bitrate_allocator_factory", + "../../api/video:encoded_frame", "../../api/video:video_adaptation", "../../api/video:video_bitrate_allocation", "../../api/video:video_bitrate_allocator", "../../api/video:video_bitrate_allocator_factory", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_frame_type", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", @@ -960,6 +1050,7 @@ if (rtc_include_tests) { "../../rtc_base:rtc_task_queue", "../../rtc_base:task_queue_for_test", "../../rtc_base/experiments:jitter_upper_bound_experiment", + "../../rtc_base/synchronization:mutex", "../../system_wrappers", "../../system_wrappers:field_trial", "../../system_wrappers:metrics", @@ -974,6 +1065,11 @@ if (rtc_include_tests) { "../rtp_rtcp:rtp_rtcp_format", "../rtp_rtcp:rtp_video_header", "codecs/av1:video_coding_codecs_av1_tests", + "deprecated:nack_module", + "svc:scalability_structure_tests", + "svc:svc_rate_allocator_tests", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", "//third_party/abseil-cpp/absl/types:variant", diff --git a/modules/video_coding/codecs/av1/BUILD.gn b/modules/video_coding/codecs/av1/BUILD.gn index e6b689b442..e7c901cc9a 100644 --- a/modules/video_coding/codecs/av1/BUILD.gn +++ b/modules/video_coding/codecs/av1/BUILD.gn @@ -9,14 +9,26 @@ import("//third_party/libaom/options.gni") import("../../../../webrtc.gni") +rtc_library("av1_svc_config") { + sources = [ + "av1_svc_config.cc", + "av1_svc_config.h", + ] + deps = [ + "../../../../api/video_codecs:video_codecs_api", + "../../../../rtc_base:checks", + "../../../../rtc_base:logging", + "../../svc:scalability_structures", + "../../svc:scalable_video_controller", + ] +} + rtc_library("libaom_av1_decoder") { visibility = [ "*" ] poisonous = [ "software_video_codecs" ] public = [ "libaom_av1_decoder.h" ] - deps = [ - "../../../../api/video_codecs:video_codecs_api", - "//third_party/abseil-cpp/absl/base:core_headers", - ] + deps = [ "../../../../api/video_codecs:video_codecs_api" ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] if (enable_libaom) { sources = [ "libaom_av1_decoder.cc" ] @@ -24,41 +36,31 @@ rtc_library("libaom_av1_decoder") { "../..:video_codec_interface", "../../../../api:scoped_refptr", "../../../../api/video:encoded_image", - "../../../../api/video:video_frame_i420", + "../../../../api/video:video_frame", "../../../../common_video", "../../../../rtc_base:logging", - "//third_party/abseil-cpp/absl/types:optional", "//third_party/libaom", "//third_party/libyuv", ] + absl_deps += [ "//third_party/abseil-cpp/absl/types:optional" ] } else { sources = [ "libaom_av1_decoder_absent.cc" ] } } -rtc_source_set("scalable_video_controller") { - sources = [ - "scalable_video_controller.h", - "scalable_video_controller_no_layering.cc", - "scalable_video_controller_no_layering.h", - ] - deps = [ - "../../../../api/transport/rtp:dependency_descriptor", - "../../../../common_video/generic_frame_descriptor", - "../../../../rtc_base:checks", - "//third_party/abseil-cpp/absl/container:inlined_vector", - "//third_party/abseil-cpp/absl/types:optional", - ] -} - rtc_library("libaom_av1_encoder") { visibility = [ "*" ] poisonous = [ "software_video_codecs" ] public = [ "libaom_av1_encoder.h" ] deps = [ - ":scalable_video_controller", "../../../../api/video_codecs:video_codecs_api", + "../../svc:scalability_structures", + "../../svc:scalable_video_controller", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", + "//third_party/abseil-cpp/absl/types:optional", ] if (enable_libaom) { @@ -68,11 +70,9 @@ rtc_library("libaom_av1_encoder") { "../../../../api:scoped_refptr", "../../../../api/video:encoded_image", "../../../../api/video:video_frame", - "../../../../api/video:video_frame_i420", "../../../../common_video", "../../../../rtc_base:checks", "../../../../rtc_base:logging", - "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/libaom", ] } else { @@ -84,23 +84,31 @@ if (rtc_include_tests) { rtc_library("video_coding_codecs_av1_tests") { testonly = true + sources = [ "av1_svc_config_unittest.cc" ] + deps = [ + ":av1_svc_config", + "../../../../api/video_codecs:video_codecs_api", + "../../../../test:test_support", + ] + if (enable_libaom) { - sources = [ + sources += [ "libaom_av1_encoder_unittest.cc", "libaom_av1_unittest.cc", ] - deps = [ + deps += [ ":libaom_av1_decoder", ":libaom_av1_encoder", + "../..:encoded_video_frame_producer", "../..:video_codec_interface", - "../../../../api:create_frame_generator", - "../../../../api:frame_generator_api", "../../../../api:mock_video_encoder", - "../../../../api/video:video_frame_i420", - "../../../../api/video_codecs:video_codecs_api", - "../../../../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", + "../../../../api/units:data_size", + "../../../../api/units:time_delta", + "../../../../api/video:video_frame", + "../../svc:scalability_structures", + "../../svc:scalable_video_controller", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } } diff --git a/modules/video_coding/codecs/av1/av1_svc_config.cc b/modules/video_coding/codecs/av1/av1_svc_config.cc new file mode 100644 index 0000000000..b15443c563 --- /dev/null +++ b/modules/video_coding/codecs/av1/av1_svc_config.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/codecs/av1/av1_svc_config.h" + +#include +#include +#include + +#include "modules/video_coding/svc/create_scalability_structure.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +bool SetAv1SvcConfig(VideoCodec& video_codec) { + RTC_DCHECK_EQ(video_codec.codecType, kVideoCodecAV1); + + if (video_codec.ScalabilityMode().empty()) { + RTC_LOG(LS_INFO) << "No scalability mode set."; + return false; + } + std::unique_ptr structure = + CreateScalabilityStructure(video_codec.ScalabilityMode()); + if (structure == nullptr) { + RTC_LOG(LS_INFO) << "Failed to create structure " + << video_codec.ScalabilityMode(); + return false; + } + ScalableVideoController::StreamLayersConfig info = structure->StreamConfig(); + for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) { + SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx]; + spatial_layer.width = video_codec.width * info.scaling_factor_num[sl_idx] / + info.scaling_factor_den[sl_idx]; + spatial_layer.height = video_codec.height * + info.scaling_factor_num[sl_idx] / + info.scaling_factor_den[sl_idx]; + spatial_layer.maxFramerate = video_codec.maxFramerate; + spatial_layer.numberOfTemporalLayers = info.num_temporal_layers; + spatial_layer.active = true; + } + + if (info.num_spatial_layers == 1) { + SpatialLayer& spatial_layer = video_codec.spatialLayers[0]; + spatial_layer.minBitrate = video_codec.minBitrate; + spatial_layer.maxBitrate = video_codec.maxBitrate; + spatial_layer.targetBitrate = + (video_codec.minBitrate + video_codec.maxBitrate) / 2; + return true; + } + + for (int sl_idx = 0; sl_idx < info.num_spatial_layers; ++sl_idx) { + SpatialLayer& spatial_layer = video_codec.spatialLayers[sl_idx]; + // minBitrate and maxBitrate formulas are copied from vp9 settings and + // are not yet tuned for av1. + const int num_pixels = spatial_layer.width * spatial_layer.height; + int min_bitrate_kbps = (600.0 * std::sqrt(num_pixels) - 95'000.0) / 1000.0; + spatial_layer.minBitrate = std::max(min_bitrate_kbps, 20); + spatial_layer.maxBitrate = 50 + static_cast(1.6 * num_pixels / 1000.0); + spatial_layer.targetBitrate = + (spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2; + } + return true; +} + +} // namespace webrtc diff --git a/modules/video_coding/codecs/av1/av1_svc_config.h b/modules/video_coding/codecs/av1/av1_svc_config.h new file mode 100644 index 0000000000..15d94e03a9 --- /dev/null +++ b/modules/video_coding/codecs/av1/av1_svc_config.h @@ -0,0 +1,22 @@ +/* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_ +#define MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_ + +#include "api/video_codecs/video_codec.h" + +namespace webrtc { + +// Fills `video_codec.spatialLayers` using other members. +bool SetAv1SvcConfig(VideoCodec& video_codec); + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_CODECS_AV1_AV1_SVC_CONFIG_H_ diff --git a/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc b/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc new file mode 100644 index 0000000000..e6035328da --- /dev/null +++ b/modules/video_coding/codecs/av1/av1_svc_config_unittest.cc @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/codecs/av1/av1_svc_config.h" + +#include "api/video_codecs/video_codec.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +TEST(Av1SvcConfigTest, RequireScalabilityMode) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + + video_codec.SetScalabilityMode(""); + EXPECT_FALSE(SetAv1SvcConfig(video_codec)); + + video_codec.SetScalabilityMode("Unknown"); + EXPECT_FALSE(SetAv1SvcConfig(video_codec)); + + video_codec.SetScalabilityMode("NONE"); + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); +} + +TEST(Av1SvcConfigTest, SetsActiveSpatialLayersFromScalabilityMode) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + video_codec.SetScalabilityMode("L2T1"); + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_TRUE(video_codec.spatialLayers[0].active); + EXPECT_TRUE(video_codec.spatialLayers[1].active); + EXPECT_FALSE(video_codec.spatialLayers[2].active); +} + +TEST(Av1SvcConfigTest, ConfiguresDobuleResolutionRatioFromScalabilityMode) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + video_codec.SetScalabilityMode("L2T1"); + video_codec.width = 1200; + video_codec.height = 800; + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_EQ(video_codec.spatialLayers[0].width, 600); + EXPECT_EQ(video_codec.spatialLayers[0].height, 400); + EXPECT_EQ(video_codec.spatialLayers[1].width, 1200); + EXPECT_EQ(video_codec.spatialLayers[1].height, 800); +} + +TEST(Av1SvcConfigTest, ConfiguresSmallResolutionRatioFromScalabilityMode) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + // h mode uses 1.5:1 ratio + video_codec.SetScalabilityMode("L2T1h"); + video_codec.width = 1500; + video_codec.height = 900; + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_EQ(video_codec.spatialLayers[0].width, 1000); + EXPECT_EQ(video_codec.spatialLayers[0].height, 600); + EXPECT_EQ(video_codec.spatialLayers[1].width, 1500); + EXPECT_EQ(video_codec.spatialLayers[1].height, 900); +} + +TEST(Av1SvcConfigTest, CopiesFramrate) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + // h mode uses 1.5:1 ratio + video_codec.SetScalabilityMode("L2T1"); + video_codec.maxFramerate = 27; + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_EQ(video_codec.spatialLayers[0].maxFramerate, 27); + EXPECT_EQ(video_codec.spatialLayers[1].maxFramerate, 27); +} + +TEST(Av1SvcConfigTest, SetsNumberOfTemporalLayers) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + video_codec.SetScalabilityMode("L1T3"); + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_EQ(video_codec.spatialLayers[0].numberOfTemporalLayers, 3); +} + +TEST(Av1SvcConfigTest, CopiesMinMaxBitrateForSingleSpatialLayer) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + video_codec.SetScalabilityMode("L1T3"); + video_codec.minBitrate = 100; + video_codec.maxBitrate = 500; + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_EQ(video_codec.spatialLayers[0].minBitrate, 100u); + EXPECT_EQ(video_codec.spatialLayers[0].maxBitrate, 500u); + EXPECT_LE(video_codec.spatialLayers[0].minBitrate, + video_codec.spatialLayers[0].targetBitrate); + EXPECT_LE(video_codec.spatialLayers[0].targetBitrate, + video_codec.spatialLayers[0].maxBitrate); +} + +TEST(Av1SvcConfigTest, SetsBitratesForMultipleSpatialLayers) { + VideoCodec video_codec; + video_codec.codecType = kVideoCodecAV1; + video_codec.SetScalabilityMode("L3T3"); + + EXPECT_TRUE(SetAv1SvcConfig(video_codec)); + + EXPECT_GT(video_codec.spatialLayers[0].minBitrate, 0u); + EXPECT_LE(video_codec.spatialLayers[0].minBitrate, + video_codec.spatialLayers[0].targetBitrate); + EXPECT_LE(video_codec.spatialLayers[0].targetBitrate, + video_codec.spatialLayers[0].maxBitrate); + + EXPECT_GT(video_codec.spatialLayers[1].minBitrate, 0u); + EXPECT_LE(video_codec.spatialLayers[1].minBitrate, + video_codec.spatialLayers[1].targetBitrate); + EXPECT_LE(video_codec.spatialLayers[1].targetBitrate, + video_codec.spatialLayers[1].maxBitrate); + + EXPECT_GT(video_codec.spatialLayers[2].minBitrate, 0u); + EXPECT_LE(video_codec.spatialLayers[2].minBitrate, + video_codec.spatialLayers[2].targetBitrate); + EXPECT_LE(video_codec.spatialLayers[2].targetBitrate, + video_codec.spatialLayers[2].maxBitrate); +} + +} // namespace +} // namespace webrtc diff --git a/modules/video_coding/codecs/av1/libaom_av1_decoder.cc b/modules/video_coding/codecs/av1/libaom_av1_decoder.cc index 122f214a5c..c187c72026 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_decoder.cc +++ b/modules/video_coding/codecs/av1/libaom_av1_decoder.cc @@ -19,7 +19,7 @@ #include "api/video/i420_buffer.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_decoder.h" -#include "common_video/include/i420_buffer_pool.h" +#include "common_video/include/video_frame_buffer_pool.h" #include "modules/video_coding/include/video_error_codes.h" #include "rtc_base/logging.h" #include "third_party/libaom/source/libaom/aom/aom_decoder.h" @@ -53,11 +53,14 @@ class LibaomAv1Decoder final : public VideoDecoder { int32_t Release() override; + DecoderInfo GetDecoderInfo() const override; + const char* ImplementationName() const override; + private: aom_codec_ctx_t context_; bool inited_; // Pool of memory buffers to store decoded image data for application access. - I420BufferPool buffer_pool_; + VideoFrameBufferPool buffer_pool_; DecodedImageCallback* decode_complete_callback_; }; @@ -127,7 +130,7 @@ int32_t LibaomAv1Decoder::Decode(const EncodedImage& encoded_image, // Return decoded frame data. int qp; - ret = aom_codec_control_(&context_, AOMD_GET_LAST_QUANTIZER, &qp); + ret = aom_codec_control(&context_, AOMD_GET_LAST_QUANTIZER, &qp); if (ret != AOM_CODEC_OK) { RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode returned " << ret << " on control AOME_GET_LAST_QUANTIZER."; @@ -136,7 +139,7 @@ int32_t LibaomAv1Decoder::Decode(const EncodedImage& encoded_image, // Allocate memory for decoded frame. rtc::scoped_refptr buffer = - buffer_pool_.CreateBuffer(decoded_image->d_w, decoded_image->d_h); + buffer_pool_.CreateI420Buffer(decoded_image->d_w, decoded_image->d_h); if (!buffer.get()) { // Pool has too many pending frames. RTC_LOG(LS_WARNING) << "LibaomAv1Decoder::Decode returned due to lack of" @@ -180,6 +183,17 @@ int32_t LibaomAv1Decoder::Release() { return WEBRTC_VIDEO_CODEC_OK; } +VideoDecoder::DecoderInfo LibaomAv1Decoder::GetDecoderInfo() const { + DecoderInfo info; + info.implementation_name = "libaom"; + info.is_hardware_accelerated = false; + return info; +} + +const char* LibaomAv1Decoder::ImplementationName() const { + return "libaom"; +} + } // namespace const bool kIsLibaomAv1DecoderSupported = true; diff --git a/modules/video_coding/codecs/av1/libaom_av1_encoder.cc b/modules/video_coding/codecs/av1/libaom_av1_encoder.cc index c34e0b8663..034709a989 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_encoder.cc +++ b/modules/video_coding/codecs/av1/libaom_av1_encoder.cc @@ -17,16 +17,19 @@ #include #include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/types/optional.h" #include "api/scoped_refptr.h" #include "api/video/encoded_image.h" #include "api/video/i420_buffer.h" #include "api/video/video_frame.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" -#include "modules/video_coding/codecs/av1/scalable_video_controller.h" -#include "modules/video_coding/codecs/av1/scalable_video_controller_no_layering.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" +#include "modules/video_coding/svc/create_scalability_structure.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "third_party/libaom/source/libaom/aom/aom_codec.h" @@ -37,11 +40,10 @@ namespace webrtc { namespace { // Encoder configuration parameters -constexpr int kQpMax = 56; constexpr int kQpMin = 10; -constexpr int kUsageProfile = 1; // 0 = good quality; 1 = real-time. -constexpr int kMinQindex = 58; // Min qindex threshold for QP scaling. -constexpr int kMaxQindex = 180; // Max qindex threshold for QP scaling. +constexpr int kUsageProfile = AOM_USAGE_REALTIME; +constexpr int kMinQindex = 145; // Min qindex threshold for QP scaling. +constexpr int kMaxQindex = 205; // Max qindex threshold for QP scaling. constexpr int kBitDepth = 8; constexpr int kLagInFrames = 0; // No look ahead. constexpr int kRtpTicksPerSecond = 90000; @@ -49,21 +51,30 @@ constexpr float kMinimumFrameRate = 1.0; // Only positive speeds, range for real-time coding currently is: 6 - 8. // Lower means slower/better quality, higher means fastest/lower quality. -int GetCpuSpeed(int width, int height) { +int GetCpuSpeed(int width, int height, int number_of_cores) { // For smaller resolutions, use lower speed setting (get some coding gain at // the cost of increased encoding complexity). - if (width * height <= 320 * 180) + if (number_of_cores > 4 && width * height < 320 * 180) return 6; else if (width * height >= 1280 * 720) + return 9; + else if (width * height >= 640 * 360) return 8; else return 7; } +aom_superblock_size_t GetSuperblockSize(int width, int height, int threads) { + int resolution = width * height; + if (threads >= 4 && resolution >= 960 * 540 && resolution < 1920 * 1080) + return AOM_SUPERBLOCK_SIZE_64X64; + else + return AOM_SUPERBLOCK_SIZE_DYNAMIC; +} + class LibaomAv1Encoder final : public VideoEncoder { public: - explicit LibaomAv1Encoder( - std::unique_ptr svc_controller); + LibaomAv1Encoder(); ~LibaomAv1Encoder(); int InitEncode(const VideoCodec* codec_settings, @@ -82,9 +93,22 @@ class LibaomAv1Encoder final : public VideoEncoder { EncoderInfo GetEncoderInfo() const override; private: - const std::unique_ptr svc_controller_; + // Determine number of encoder threads to use. + int NumberOfThreads(int width, int height, int number_of_cores); + + bool SvcEnabled() const { return svc_params_.has_value(); } + // Fills svc_params_ memeber value. Returns false on error. + bool SetSvcParams(ScalableVideoController::StreamLayersConfig svc_config); + // Configures the encoder with layer for the next frame. + void SetSvcLayerId( + const ScalableVideoController::LayerFrameConfig& layer_frame); + // Configures the encoder which buffers next frame updates and can reference. + void SetSvcRefFrameConfig( + const ScalableVideoController::LayerFrameConfig& layer_frame); + + std::unique_ptr svc_controller_; bool inited_; - bool keyframe_required_; + absl::optional svc_params_; VideoCodec encoder_settings_; aom_image_t* frame_for_encode_; aom_codec_ctx_t ctx_; @@ -117,15 +141,10 @@ int32_t VerifyCodecSettings(const VideoCodec& codec_settings) { return WEBRTC_VIDEO_CODEC_OK; } -LibaomAv1Encoder::LibaomAv1Encoder( - std::unique_ptr svc_controller) - : svc_controller_(std::move(svc_controller)), - inited_(false), - keyframe_required_(true), +LibaomAv1Encoder::LibaomAv1Encoder() + : inited_(false), frame_for_encode_(nullptr), - encoded_image_callback_(nullptr) { - RTC_DCHECK(svc_controller_); -} + encoded_image_callback_(nullptr) {} LibaomAv1Encoder::~LibaomAv1Encoder() { Release(); @@ -154,10 +173,29 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings, "LibaomAv1Encoder."; return result; } + if (encoder_settings_.numberOfSimulcastStreams > 1) { + RTC_LOG(LS_WARNING) << "Simulcast is not implemented by LibaomAv1Encoder."; + return result; + } + absl::string_view scalability_mode = encoder_settings_.ScalabilityMode(); + if (scalability_mode.empty()) { + RTC_LOG(LS_WARNING) << "Scalability mode is not set, using 'NONE'."; + scalability_mode = "NONE"; + } + svc_controller_ = CreateScalabilityStructure(scalability_mode); + if (svc_controller_ == nullptr) { + RTC_LOG(LS_WARNING) << "Failed to set scalability mode " + << scalability_mode; + return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; + } + + if (!SetSvcParams(svc_controller_->StreamConfig())) { + return WEBRTC_VIDEO_CODEC_ERROR; + } // Initialize encoder configuration structure with default values aom_codec_err_t ret = - aom_codec_enc_config_default(aom_codec_av1_cx(), &cfg_, 0); + aom_codec_enc_config_default(aom_codec_av1_cx(), &cfg_, kUsageProfile); if (ret != AOM_CODEC_OK) { RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret << " on aom_codec_enc_config_default."; @@ -167,16 +205,22 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings, // Overwrite default config with input encoder settings & RTC-relevant values. cfg_.g_w = encoder_settings_.width; cfg_.g_h = encoder_settings_.height; - cfg_.g_threads = settings.number_of_cores; + cfg_.g_threads = + NumberOfThreads(cfg_.g_w, cfg_.g_h, settings.number_of_cores); cfg_.g_timebase.num = 1; cfg_.g_timebase.den = kRtpTicksPerSecond; cfg_.rc_target_bitrate = encoder_settings_.maxBitrate; // kilobits/sec. cfg_.g_input_bit_depth = kBitDepth; cfg_.kf_mode = AOM_KF_DISABLED; cfg_.rc_min_quantizer = kQpMin; - cfg_.rc_max_quantizer = kQpMax; + cfg_.rc_max_quantizer = encoder_settings_.qpMax; + cfg_.rc_undershoot_pct = 50; + cfg_.rc_overshoot_pct = 50; + cfg_.rc_buf_initial_sz = 600; + cfg_.rc_buf_optimal_sz = 600; + cfg_.rc_buf_sz = 1000; cfg_.g_usage = kUsageProfile; - + cfg_.g_error_resilient = 0; // Low-latency settings. cfg_.rc_end_usage = AOM_CBR; // Constant Bit Rate (CBR) mode cfg_.g_pass = AOM_RC_ONE_PASS; // One-pass rate control @@ -201,13 +245,20 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings, inited_ = true; // Set control parameters - ret = aom_codec_control(&ctx_, AOME_SET_CPUUSED, - GetCpuSpeed(cfg_.g_w, cfg_.g_h)); + ret = aom_codec_control( + &ctx_, AOME_SET_CPUUSED, + GetCpuSpeed(cfg_.g_w, cfg_.g_h, settings.number_of_cores)); if (ret != AOM_CODEC_OK) { RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret << " on control AV1E_SET_CPUUSED."; return WEBRTC_VIDEO_CODEC_ERROR; } + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_CDEF, 1); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_CDEF."; + return WEBRTC_VIDEO_CODEC_ERROR; + } ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_TPL_MODEL, 0); if (ret != AOM_CODEC_OK) { RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret @@ -220,21 +271,276 @@ int LibaomAv1Encoder::InitEncode(const VideoCodec* codec_settings, << " on control AV1E_SET_DELTAQ_MODE."; return WEBRTC_VIDEO_CODEC_ERROR; } + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_ORDER_HINT, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_ORDER_HINT."; + return WEBRTC_VIDEO_CODEC_ERROR; + } ret = aom_codec_control(&ctx_, AV1E_SET_AQ_MODE, 3); if (ret != AOM_CODEC_OK) { RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret << " on control AV1E_SET_AQ_MODE."; return WEBRTC_VIDEO_CODEC_ERROR; } + if (SvcEnabled()) { + ret = aom_codec_control(&ctx_, AV1E_SET_SVC_PARAMS, &*svc_params_); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAV1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_SVC_PARAMS."; + return false; + } + } + + ret = aom_codec_control(&ctx_, AOME_SET_MAX_INTRA_BITRATE_PCT, 300); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_MAX_INTRA_BITRATE_PCT."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + ret = aom_codec_control(&ctx_, AV1E_SET_COEFF_COST_UPD_FREQ, 3); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_COEFF_COST_UPD_FREQ."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + ret = aom_codec_control(&ctx_, AV1E_SET_MODE_COST_UPD_FREQ, 3); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_MODE_COST_UPD_FREQ."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + ret = aom_codec_control(&ctx_, AV1E_SET_MV_COST_UPD_FREQ, 3); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_MV_COST_UPD_FREQ."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + if (cfg_.g_threads == 4 && cfg_.g_w == 640 && + (cfg_.g_h == 360 || cfg_.g_h == 480)) { + ret = aom_codec_control(&ctx_, AV1E_SET_TILE_ROWS, + static_cast(log2(cfg_.g_threads))); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_TILE_ROWS."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + } else { + ret = aom_codec_control(&ctx_, AV1E_SET_TILE_COLUMNS, + static_cast(log2(cfg_.g_threads))); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_TILE_COLUMNS."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ROW_MT, 1); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ROW_MT."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_OBMC, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_OBMC."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_NOISE_SENSITIVITY, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_NOISE_SENSITIVITY."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_WARPED_MOTION, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_WARPED_MOTION."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_GLOBAL_MOTION, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_GLOBAL_MOTION."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_REF_FRAME_MVS, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_REF_FRAME_MVS."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = + aom_codec_control(&ctx_, AV1E_SET_SUPERBLOCK_SIZE, + GetSuperblockSize(cfg_.g_w, cfg_.g_h, cfg_.g_threads)); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_SUPERBLOCK_SIZE."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_CFL_INTRA, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_CFL_INTRA."; + return WEBRTC_VIDEO_CODEC_ERROR; + } - ScalableVideoController::StreamLayersConfig svc_config = - svc_controller_->StreamConfig(); - // TODO(danilchap): Configure SVC. - (void)svc_config; + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_SMOOTH_INTRA, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_SMOOTH_INTRA."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_ANGLE_DELTA, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_ANGLE_DELTA."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_ENABLE_FILTER_INTRA, 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AV1E_SET_ENABLE_FILTER_INTRA."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + ret = aom_codec_control(&ctx_, AV1E_SET_INTRA_DEFAULT_TX_ONLY, 1); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) + << "LibaomAv1Encoder::EncodeInit returned " << ret + << " on control AOM_CTRL_AV1E_SET_INTRA_DEFAULT_TX_ONLY."; + return WEBRTC_VIDEO_CODEC_ERROR; + } return WEBRTC_VIDEO_CODEC_OK; } +int LibaomAv1Encoder::NumberOfThreads(int width, + int height, + int number_of_cores) { + // Keep the number of encoder threads equal to the possible number of + // column/row tiles, which is (1, 2, 4, 8). See comments below for + // AV1E_SET_TILE_COLUMNS/ROWS. + if (width * height >= 640 * 360 && number_of_cores > 4) { + return 4; + } else if (width * height >= 320 * 180 && number_of_cores > 2) { + return 2; + } else { +// Use 2 threads for low res on ARM. +#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \ + defined(WEBRTC_ANDROID) + if (width * height >= 320 * 180 && number_of_cores > 2) { + return 2; + } +#endif + // 1 thread less than VGA. + return 1; + } +} + +bool LibaomAv1Encoder::SetSvcParams( + ScalableVideoController::StreamLayersConfig svc_config) { + bool svc_enabled = + svc_config.num_spatial_layers > 1 || svc_config.num_temporal_layers > 1; + if (!svc_enabled) { + svc_params_ = absl::nullopt; + return true; + } + if (svc_config.num_spatial_layers < 1 || svc_config.num_spatial_layers > 4) { + RTC_LOG(LS_WARNING) << "Av1 supports up to 4 spatial layers. " + << svc_config.num_spatial_layers << " configured."; + return false; + } + if (svc_config.num_temporal_layers < 1 || + svc_config.num_temporal_layers > 8) { + RTC_LOG(LS_WARNING) << "Av1 supports up to 8 temporal layers. " + << svc_config.num_temporal_layers << " configured."; + return false; + } + aom_svc_params_t& svc_params = svc_params_.emplace(); + svc_params.number_spatial_layers = svc_config.num_spatial_layers; + svc_params.number_temporal_layers = svc_config.num_temporal_layers; + + int num_layers = + svc_config.num_spatial_layers * svc_config.num_temporal_layers; + for (int i = 0; i < num_layers; ++i) { + svc_params.min_quantizers[i] = kQpMin; + svc_params.max_quantizers[i] = encoder_settings_.qpMax; + } + + // Assume each temporal layer doubles framerate. + for (int tid = 0; tid < svc_config.num_temporal_layers; ++tid) { + svc_params.framerate_factor[tid] = + 1 << (svc_config.num_temporal_layers - tid - 1); + } + + for (int sid = 0; sid < svc_config.num_spatial_layers; ++sid) { + svc_params.scaling_factor_num[sid] = svc_config.scaling_factor_num[sid]; + svc_params.scaling_factor_den[sid] = svc_config.scaling_factor_den[sid]; + } + + return true; +} + +void LibaomAv1Encoder::SetSvcLayerId( + const ScalableVideoController::LayerFrameConfig& layer_frame) { + aom_svc_layer_id_t layer_id = {}; + layer_id.spatial_layer_id = layer_frame.SpatialId(); + layer_id.temporal_layer_id = layer_frame.TemporalId(); + aom_codec_err_t ret = + aom_codec_control(&ctx_, AV1E_SET_SVC_LAYER_ID, &layer_id); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encode returned " << ret + << " on control AV1E_SET_SVC_LAYER_ID."; + } +} + +void LibaomAv1Encoder::SetSvcRefFrameConfig( + const ScalableVideoController::LayerFrameConfig& layer_frame) { + // Buffer name to use for each layer_frame.buffers position. In particular + // when there are 2 buffers are referenced, prefer name them last and golden, + // because av1 bitstream format has dedicated fields for these two names. + // See last_frame_idx and golden_frame_idx in the av1 spec + // https://aomediacodec.github.io/av1-spec/av1-spec.pdf + static constexpr int kPreferedSlotName[] = {0, // Last + 3, // Golden + 1, 2, 4, 5, 6}; + static constexpr int kAv1NumBuffers = 8; + + aom_svc_ref_frame_config_t ref_frame_config = {}; + RTC_CHECK_LE(layer_frame.Buffers().size(), ABSL_ARRAYSIZE(kPreferedSlotName)); + for (size_t i = 0; i < layer_frame.Buffers().size(); ++i) { + const CodecBufferUsage& buffer = layer_frame.Buffers()[i]; + int slot_name = kPreferedSlotName[i]; + RTC_CHECK_GE(buffer.id, 0); + RTC_CHECK_LT(buffer.id, kAv1NumBuffers); + ref_frame_config.ref_idx[slot_name] = buffer.id; + if (buffer.referenced) { + ref_frame_config.reference[slot_name] = 1; + } + if (buffer.updated) { + ref_frame_config.refresh[buffer.id] = 1; + } + } + aom_codec_err_t ret = aom_codec_control(&ctx_, AV1E_SET_SVC_REF_FRAME_CONFIG, + &ref_frame_config); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encode returned " << ret + << " on control AV1_SET_SVC_REF_FRAME_CONFIG."; + } +} + int32_t LibaomAv1Encoder::RegisterEncodeCompleteCallback( EncodedImageCallback* encoded_image_callback) { encoded_image_callback_ = encoded_image_callback; @@ -262,12 +568,12 @@ int32_t LibaomAv1Encoder::Encode( return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } - keyframe_required_ = + bool keyframe_required = frame_types != nullptr && absl::c_linear_search(*frame_types, VideoFrameType::kVideoFrameKey); std::vector layer_frames = - svc_controller_->NextFrameConfig(keyframe_required_); + svc_controller_->NextFrameConfig(keyframe_required); if (layer_frames.empty()) { RTC_LOG(LS_ERROR) << "SVCController returned no configuration for a frame."; @@ -277,9 +583,36 @@ int32_t LibaomAv1Encoder::Encode( // Convert input frame to I420, if needed. VideoFrame prepped_input_frame = frame; if (prepped_input_frame.video_frame_buffer()->type() != - VideoFrameBuffer::Type::kI420) { + VideoFrameBuffer::Type::kI420 && + prepped_input_frame.video_frame_buffer()->type() != + VideoFrameBuffer::Type::kI420A) { rtc::scoped_refptr converted_buffer( prepped_input_frame.video_frame_buffer()->ToI420()); + // The buffer should now be a mapped I420 or I420A format, but some buffer + // implementations incorrectly return the wrong buffer format, such as + // kNative. As a workaround to this, we perform ToI420() a second time. + // TODO(https://crbug.com/webrtc/12602): When Android buffers have a correct + // ToI420() implementaion, remove his workaround. + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + converted_buffer->type()) + << " image to I420. Can't encode frame."; + return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE; + } + if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 && + converted_buffer->type() != VideoFrameBuffer::Type::kI420A) { + converted_buffer = converted_buffer->ToI420(); + RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 || + converted_buffer->type() == VideoFrameBuffer::Type::kI420A); + } + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + converted_buffer->type()) + << " image to I420. Can't encode frame."; + return WEBRTC_VIDEO_CODEC_ENCODER_FAILURE; + } prepped_input_frame = VideoFrame(converted_buffer, frame.timestamp(), frame.render_time_ms(), frame.rotation()); } @@ -299,14 +632,26 @@ int32_t LibaomAv1Encoder::Encode( const uint32_t duration = kRtpTicksPerSecond / static_cast(encoder_settings_.maxFramerate); - // TODO(danilchap): Remove this checks when layering is implemented. - RTC_DCHECK_EQ(layer_frames.size(), 1); - for (ScalableVideoController::LayerFrameConfig& layer_frame : layer_frames) { - aom_enc_frame_flags_t flags = - layer_frame.is_keyframe ? AOM_EFLAG_FORCE_KF : 0; + for (size_t i = 0; i < layer_frames.size(); ++i) { + ScalableVideoController::LayerFrameConfig& layer_frame = layer_frames[i]; + const bool end_of_picture = i == layer_frames.size() - 1; - // TODO(danilchap): configure buffers and layers based on - // `layer_frame.buffers` when layering is enabled. + aom_enc_frame_flags_t flags = + layer_frame.IsKeyframe() ? AOM_EFLAG_FORCE_KF : 0; + + if (SvcEnabled()) { + SetSvcLayerId(layer_frame); + SetSvcRefFrameConfig(layer_frame); + + aom_codec_err_t ret = + aom_codec_control(&ctx_, AV1E_SET_ERROR_RESILIENT_MODE, + layer_frame.TemporalId() > 0 ? 1 : 0); + if (ret != AOM_CODEC_OK) { + RTC_LOG(LS_WARNING) << "LibaomAv1Encoder::Encode returned " << ret + << " on control AV1E_SET_ERROR_RESILIENT_MODE."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + } // Encode a frame. aom_codec_err_t ret = aom_codec_encode(&ctx_, frame_for_encode_, @@ -319,7 +664,6 @@ int32_t LibaomAv1Encoder::Encode( // Get encoded image data. EncodedImage encoded_image; - encoded_image._completeFrame = true; aom_codec_iter_t iter = nullptr; int data_pkt_count = 0; while (const aom_codec_cx_pkt_t* pkt = @@ -330,24 +674,14 @@ int32_t LibaomAv1Encoder::Encode( "one data packet for an input video frame."; Release(); } - // TODO(bugs.webrtc.org/11174): Remove this hack when - // webrtc_pc_e2e::SingleProcessEncodedImageDataInjector not used or - // fixed not to assume that encoded image transfered as is. - const uint8_t* data = static_cast(pkt->data.frame.buf); - size_t size = pkt->data.frame.sz; - if (size > 2 && data[0] == 0b0'0010'010 && data[1] == 0) { - // Typically frame starts with a Temporal Delimter OBU of size 0 that - // is not need by any component in webrtc and discarded during rtp - // packetization. Before discarded it confuses test framework that - // assumes received encoded frame is exactly same as sent frame. - data += 2; - size -= 2; - } - encoded_image.SetEncodedData(EncodedImageBuffer::Create(data, size)); + encoded_image.SetEncodedData(EncodedImageBuffer::Create( + /*data=*/static_cast(pkt->data.frame.buf), + /*size=*/pkt->data.frame.sz)); - layer_frame.is_keyframe = - ((pkt->data.frame.flags & AOM_EFLAG_FORCE_KF) != 0); - encoded_image._frameType = layer_frame.is_keyframe + if ((pkt->data.frame.flags & AOM_EFLAG_FORCE_KF) != 0) { + layer_frame.Keyframe(); + } + encoded_image._frameType = layer_frame.IsKeyframe() ? VideoFrameType::kVideoFrameKey : VideoFrameType::kVideoFrameDelta; encoded_image.SetTimestamp(frame.timestamp()); @@ -356,8 +690,15 @@ int32_t LibaomAv1Encoder::Encode( encoded_image.content_type_ = VideoContentType::UNSPECIFIED; // If encoded image width/height info are added to aom_codec_cx_pkt_t, // use those values in lieu of the values in frame. - encoded_image._encodedHeight = frame.height(); - encoded_image._encodedWidth = frame.width(); + if (svc_params_) { + int n = svc_params_->scaling_factor_num[layer_frame.SpatialId()]; + int d = svc_params_->scaling_factor_den[layer_frame.SpatialId()]; + encoded_image._encodedWidth = cfg_.g_w * n / d; + encoded_image._encodedHeight = cfg_.g_h * n / d; + } else { + encoded_image._encodedWidth = cfg_.g_w; + encoded_image._encodedHeight = cfg_.g_h; + } encoded_image.timing_.flags = VideoSendTiming::kInvalid; int qp = -1; ret = aom_codec_control(&ctx_, AOME_GET_LAST_QUANTIZER, &qp); @@ -376,15 +717,28 @@ int32_t LibaomAv1Encoder::Encode( if (encoded_image.size() > 0) { CodecSpecificInfo codec_specific_info; codec_specific_info.codecType = kVideoCodecAV1; - bool is_keyframe = layer_frame.is_keyframe; + codec_specific_info.end_of_picture = end_of_picture; + bool is_keyframe = layer_frame.IsKeyframe(); codec_specific_info.generic_frame_info = svc_controller_->OnEncodeDone(std::move(layer_frame)); if (is_keyframe && codec_specific_info.generic_frame_info) { codec_specific_info.template_structure = svc_controller_->DependencyStructure(); + auto& resolutions = codec_specific_info.template_structure->resolutions; + if (SvcEnabled()) { + resolutions.resize(svc_params_->number_spatial_layers); + for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) { + int n = svc_params_->scaling_factor_num[sid]; + int d = svc_params_->scaling_factor_den[sid]; + resolutions[sid] = + RenderResolution(cfg_.g_w * n / d, cfg_.g_h * n / d); + } + } else { + resolutions = {RenderResolution(cfg_.g_w, cfg_.g_h)}; + } } encoded_image_callback_->OnEncodedImage(encoded_image, - &codec_specific_info, nullptr); + &codec_specific_info); } } @@ -407,14 +761,26 @@ void LibaomAv1Encoder::SetRates(const RateControlParameters& parameters) { return; } - // Check input target bit rate value. - uint32_t rc_target_bitrate_kbps = parameters.bitrate.get_sum_kbps(); - if (encoder_settings_.maxBitrate > 0) - RTC_DCHECK_LE(rc_target_bitrate_kbps, encoder_settings_.maxBitrate); - RTC_DCHECK_GE(rc_target_bitrate_kbps, encoder_settings_.minBitrate); - - // Set target bit rate. - cfg_.rc_target_bitrate = rc_target_bitrate_kbps; + svc_controller_->OnRatesUpdated(parameters.bitrate); + cfg_.rc_target_bitrate = parameters.bitrate.get_sum_kbps(); + + if (SvcEnabled()) { + for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) { + // libaom bitrate for spatial id S and temporal id T means bitrate + // of frames with spatial_id=S and temporal_id<=T + // while `parameters.bitrate` provdies bitrate of frames with + // spatial_id=S and temporal_id=T + int accumulated_bitrate_bps = 0; + for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) { + int layer_index = sid * svc_params_->number_temporal_layers + tid; + accumulated_bitrate_bps += parameters.bitrate.GetBitrate(sid, tid); + // `svc_params.layer_target_bitrate` expects bitrate in kbps. + svc_params_->layer_target_bitrate[layer_index] = + accumulated_bitrate_bps / 1000; + } + } + aom_codec_control(&ctx_, AV1E_SET_SVC_PARAMS, &*svc_params_); + } // Set frame rate to closest integer value. encoder_settings_.maxFramerate = @@ -435,6 +801,16 @@ VideoEncoder::EncoderInfo LibaomAv1Encoder::GetEncoderInfo() const { info.has_trusted_rate_controller = true; info.is_hardware_accelerated = false; info.scaling_settings = VideoEncoder::ScalingSettings(kMinQindex, kMaxQindex); + info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420}; + if (SvcEnabled()) { + for (int sid = 0; sid < svc_params_->number_spatial_layers; ++sid) { + info.fps_allocation[sid].resize(svc_params_->number_temporal_layers); + for (int tid = 0; tid < svc_params_->number_temporal_layers; ++tid) { + info.fps_allocation[sid][tid] = + encoder_settings_.maxFramerate / svc_params_->framerate_factor[tid]; + } + } + } return info; } @@ -443,13 +819,7 @@ VideoEncoder::EncoderInfo LibaomAv1Encoder::GetEncoderInfo() const { const bool kIsLibaomAv1EncoderSupported = true; std::unique_ptr CreateLibaomAv1Encoder() { - return std::make_unique( - std::make_unique()); -} - -std::unique_ptr CreateLibaomAv1Encoder( - std::unique_ptr svc_controller) { - return std::make_unique(std::move(svc_controller)); + return std::make_unique(); } } // namespace webrtc diff --git a/modules/video_coding/codecs/av1/libaom_av1_encoder.h b/modules/video_coding/codecs/av1/libaom_av1_encoder.h index c2f04e669c..4b0ee28d40 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_encoder.h +++ b/modules/video_coding/codecs/av1/libaom_av1_encoder.h @@ -14,15 +14,12 @@ #include "absl/base/attributes.h" #include "api/video_codecs/video_encoder.h" -#include "modules/video_coding/codecs/av1/scalable_video_controller.h" namespace webrtc { ABSL_CONST_INIT extern const bool kIsLibaomAv1EncoderSupported; std::unique_ptr CreateLibaomAv1Encoder(); -std::unique_ptr CreateLibaomAv1Encoder( - std::unique_ptr controller); } // namespace webrtc diff --git a/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc b/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc index 6d1d0bbb24..96057a0ce2 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc +++ b/modules/video_coding/codecs/av1/libaom_av1_encoder_unittest.cc @@ -11,15 +11,40 @@ #include "modules/video_coding/codecs/av1/libaom_av1_encoder.h" #include +#include +#include "absl/types/optional.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" +#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h" #include "modules/video_coding/include/video_error_codes.h" +#include "test/gmock.h" #include "test/gtest.h" namespace webrtc { namespace { +using ::testing::ElementsAre; +using ::testing::Field; +using ::testing::IsEmpty; +using ::testing::SizeIs; + +VideoCodec DefaultCodecSettings() { + VideoCodec codec_settings; + codec_settings.width = 320; + codec_settings.height = 180; + codec_settings.maxFramerate = 30; + codec_settings.maxBitrate = 1000; + codec_settings.qpMax = 63; + return codec_settings; +} + +VideoEncoder::Settings DefaultEncoderSettings() { + return VideoEncoder::Settings( + VideoEncoder::Capabilities(/*loss_notification=*/false), + /*number_of_cores=*/1, /*max_payload_size=*/1200); +} + TEST(LibaomAv1EncoderTest, CanCreate) { std::unique_ptr encoder = CreateLibaomAv1Encoder(); EXPECT_TRUE(encoder); @@ -28,17 +53,119 @@ TEST(LibaomAv1EncoderTest, CanCreate) { TEST(LibaomAv1EncoderTest, InitAndRelease) { std::unique_ptr encoder = CreateLibaomAv1Encoder(); ASSERT_TRUE(encoder); - VideoCodec codec_settings; - codec_settings.width = 1280; - codec_settings.height = 720; - codec_settings.maxFramerate = 30; - VideoEncoder::Capabilities capabilities(/*loss_notification=*/false); - VideoEncoder::Settings encoder_settings(capabilities, /*number_of_cores=*/1, - /*max_payload_size=*/1200); - EXPECT_EQ(encoder->InitEncode(&codec_settings, encoder_settings), + VideoCodec codec_settings = DefaultCodecSettings(); + EXPECT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), WEBRTC_VIDEO_CODEC_OK); EXPECT_EQ(encoder->Release(), WEBRTC_VIDEO_CODEC_OK); } +TEST(LibaomAv1EncoderTest, NoBitrateOnTopLayerRefecltedInActiveDecodeTargets) { + // Configure encoder with 2 temporal layers. + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.SetScalabilityMode("L1T2"); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + + VideoEncoder::RateControlParameters rate_parameters; + rate_parameters.framerate_fps = 30; + rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/0, 300'000); + rate_parameters.bitrate.SetBitrate(0, /*temporal_index=*/1, 0); + encoder->SetRates(rate_parameters); + + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode(); + ASSERT_THAT(encoded_frames, SizeIs(1)); + ASSERT_NE(encoded_frames[0].codec_specific_info.generic_frame_info, + absl::nullopt); + // Assuming L1T2 structure uses 1st decode target for T0 and 2nd decode target + // for T0+T1 frames, expect only 1st decode target is active. + EXPECT_EQ(encoded_frames[0] + .codec_specific_info.generic_frame_info->active_decode_targets, + 0b01); +} + +TEST(LibaomAv1EncoderTest, SetsEndOfPictureForLastFrameInTemporalUnit) { + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + // Configure encoder with 3 spatial layers. + codec_settings.SetScalabilityMode("L3T1"); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder).SetNumInputFrames(2).Encode(); + ASSERT_THAT(encoded_frames, SizeIs(6)); + EXPECT_FALSE(encoded_frames[0].codec_specific_info.end_of_picture); + EXPECT_FALSE(encoded_frames[1].codec_specific_info.end_of_picture); + EXPECT_TRUE(encoded_frames[2].codec_specific_info.end_of_picture); + EXPECT_FALSE(encoded_frames[3].codec_specific_info.end_of_picture); + EXPECT_FALSE(encoded_frames[4].codec_specific_info.end_of_picture); + EXPECT_TRUE(encoded_frames[5].codec_specific_info.end_of_picture); +} + +TEST(LibaomAv1EncoderTest, CheckOddDimensionsWithSpatialLayers) { + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + // Configure encoder with 3 spatial layers. + codec_settings.SetScalabilityMode("L3T1"); + // Odd width and height values should not make encoder crash. + codec_settings.width = 623; + codec_settings.height = 405; + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + EncodedVideoFrameProducer evfp(*encoder); + evfp.SetResolution(RenderResolution{623, 405}); + std::vector encoded_frames = + evfp.SetNumInputFrames(2).Encode(); + ASSERT_THAT(encoded_frames, SizeIs(6)); +} + +TEST(LibaomAv1EncoderTest, EncoderInfoProvidesFpsAllocation) { + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.SetScalabilityMode("L3T3"); + codec_settings.maxFramerate = 60; + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + + const auto& encoder_info = encoder->GetEncoderInfo(); + EXPECT_THAT(encoder_info.fps_allocation[0], ElementsAre(15, 30, 60)); + EXPECT_THAT(encoder_info.fps_allocation[1], ElementsAre(15, 30, 60)); + EXPECT_THAT(encoder_info.fps_allocation[2], ElementsAre(15, 30, 60)); + EXPECT_THAT(encoder_info.fps_allocation[3], IsEmpty()); +} + +TEST(LibaomAv1EncoderTest, PopulatesEncodedFrameSize) { + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + ASSERT_GT(codec_settings.width, 4); + // Configure encoder with 3 spatial layers. + codec_settings.SetScalabilityMode("L3T1"); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + + using Frame = EncodedVideoFrameProducer::EncodedFrame; + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder).SetNumInputFrames(1).Encode(); + EXPECT_THAT( + encoded_frames, + ElementsAre( + Field(&Frame::encoded_image, + AllOf(Field(&EncodedImage::_encodedWidth, + codec_settings.width / 4), + Field(&EncodedImage::_encodedHeight, + codec_settings.height / 4))), + Field(&Frame::encoded_image, + AllOf(Field(&EncodedImage::_encodedWidth, + codec_settings.width / 2), + Field(&EncodedImage::_encodedHeight, + codec_settings.height / 2))), + Field(&Frame::encoded_image, + AllOf(Field(&EncodedImage::_encodedWidth, codec_settings.width), + Field(&EncodedImage::_encodedHeight, + codec_settings.height))))); +} + } // namespace } // namespace webrtc diff --git a/modules/video_coding/codecs/av1/libaom_av1_unittest.cc b/modules/video_coding/codecs/av1/libaom_av1_unittest.cc index 4a549ea453..e63e0f8c94 100644 --- a/modules/video_coding/codecs/av1/libaom_av1_unittest.cc +++ b/modules/video_coding/codecs/av1/libaom_av1_unittest.cc @@ -11,97 +11,70 @@ #include #include +#include #include +#include +#include #include #include "absl/types/optional.h" -#include "api/test/create_frame_generator.h" -#include "api/test/frame_generator_interface.h" +#include "api/units/data_size.h" +#include "api/units/time_delta.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" #include "modules/video_coding/codecs/av1/libaom_av1_decoder.h" #include "modules/video_coding/codecs/av1/libaom_av1_encoder.h" +#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" +#include "modules/video_coding/svc/create_scalability_structure.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" #include "test/gmock.h" #include "test/gtest.h" namespace webrtc { namespace { +using ::testing::ContainerEq; +using ::testing::Each; using ::testing::ElementsAreArray; +using ::testing::Ge; using ::testing::IsEmpty; using ::testing::Not; using ::testing::NotNull; +using ::testing::Pointwise; +using ::testing::SizeIs; +using ::testing::Truly; +using ::testing::Values; // Use small resolution for this test to make it faster. constexpr int kWidth = 320; constexpr int kHeight = 180; constexpr int kFramerate = 30; -constexpr int kRtpTicksPerSecond = 90000; -class TestAv1Encoder { - public: - struct Encoded { - EncodedImage encoded_image; - CodecSpecificInfo codec_specific_info; - }; - - TestAv1Encoder() : encoder_(CreateLibaomAv1Encoder()) { - RTC_CHECK(encoder_); - VideoCodec codec_settings; - codec_settings.width = kWidth; - codec_settings.height = kHeight; - codec_settings.maxFramerate = kFramerate; - VideoEncoder::Settings encoder_settings( - VideoEncoder::Capabilities(/*loss_notification=*/false), - /*number_of_cores=*/1, /*max_payload_size=*/1200); - EXPECT_EQ(encoder_->InitEncode(&codec_settings, encoder_settings), - WEBRTC_VIDEO_CODEC_OK); - EXPECT_EQ(encoder_->RegisterEncodeCompleteCallback(&callback_), - WEBRTC_VIDEO_CODEC_OK); - } - // This class requires pointer stability and thus not copyable nor movable. - TestAv1Encoder(const TestAv1Encoder&) = delete; - TestAv1Encoder& operator=(const TestAv1Encoder&) = delete; - - void EncodeAndAppend(const VideoFrame& frame, std::vector* encoded) { - callback_.SetEncodeStorage(encoded); - std::vector frame_types = { - VideoFrameType::kVideoFrameDelta}; - EXPECT_EQ(encoder_->Encode(frame, &frame_types), WEBRTC_VIDEO_CODEC_OK); - // Prefer to crash checking nullptr rather than writing to random memory. - callback_.SetEncodeStorage(nullptr); - } - - private: - class EncoderCallback : public EncodedImageCallback { - public: - void SetEncodeStorage(std::vector* storage) { storage_ = storage; } - - private: - Result OnEncodedImage( - const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* /*fragmentation*/) override { - RTC_CHECK(storage_); - storage_->push_back({encoded_image, *codec_specific_info}); - return Result(Result::Error::OK); - } - - std::vector* storage_ = nullptr; - }; - - EncoderCallback callback_; - std::unique_ptr encoder_; -}; +VideoCodec DefaultCodecSettings() { + VideoCodec codec_settings; + codec_settings.SetScalabilityMode("NONE"); + codec_settings.width = kWidth; + codec_settings.height = kHeight; + codec_settings.maxFramerate = kFramerate; + codec_settings.maxBitrate = 1000; + codec_settings.qpMax = 63; + return codec_settings; +} +VideoEncoder::Settings DefaultEncoderSettings() { + return VideoEncoder::Settings( + VideoEncoder::Capabilities(/*loss_notification=*/false), + /*number_of_cores=*/1, /*max_payload_size=*/1200); +} class TestAv1Decoder { public: - TestAv1Decoder() { - decoder_ = CreateLibaomAv1Decoder(); + explicit TestAv1Decoder(int decoder_id) + : decoder_id_(decoder_id), decoder_(CreateLibaomAv1Decoder()) { if (decoder_ == nullptr) { - ADD_FAILURE() << "Failed to create a decoder"; + ADD_FAILURE() << "Failed to create a decoder#" << decoder_id_; return; } EXPECT_EQ(decoder_->InitDecode(/*codec_settings=*/nullptr, @@ -116,20 +89,17 @@ class TestAv1Decoder { void Decode(int64_t frame_id, const EncodedImage& image) { ASSERT_THAT(decoder_, NotNull()); - requested_ids_.push_back(frame_id); int32_t error = decoder_->Decode(image, /*missing_frames=*/false, /*render_time_ms=*/image.capture_time_ms_); if (error != WEBRTC_VIDEO_CODEC_OK) { ADD_FAILURE() << "Failed to decode frame id " << frame_id - << " with error code " << error; + << " with error code " << error << " by decoder#" + << decoder_id_; return; } decoded_ids_.push_back(frame_id); } - const std::vector& requested_frame_ids() const { - return requested_ids_; - } const std::vector& decoded_frame_ids() const { return decoded_ids_; } size_t num_output_frames() const { return callback_.num_called(); } @@ -156,51 +126,208 @@ class TestAv1Decoder { int num_called_ = 0; }; - std::vector requested_ids_; + const int decoder_id_; std::vector decoded_ids_; DecoderCallback callback_; - std::unique_ptr decoder_; + const std::unique_ptr decoder_; }; -std::vector GenerateFrames(size_t num_frames) { - std::vector frames; - frames.reserve(num_frames); - - auto input_frame_generator = test::CreateSquareFrameGenerator( - kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420, - absl::nullopt); - uint32_t timestamp = 1000; - for (size_t i = 0; i < num_frames; ++i) { - frames.push_back( - VideoFrame::Builder() - .set_video_frame_buffer(input_frame_generator->NextFrame().buffer) - .set_timestamp_rtp(timestamp += kRtpTicksPerSecond / kFramerate) - .build()); - } - return frames; -} - TEST(LibaomAv1Test, EncodeDecode) { - TestAv1Decoder decoder; - TestAv1Encoder encoder; + TestAv1Decoder decoder(0); + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); - std::vector encoded_frames; - for (const VideoFrame& frame : GenerateFrames(/*num_frames=*/4)) { - encoder.EncodeAndAppend(frame, &encoded_frames); - } - for (size_t frame_idx = 0; frame_idx < encoded_frames.size(); ++frame_idx) { - decoder.Decode(static_cast(frame_idx), - encoded_frames[frame_idx].encoded_image); + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder).SetNumInputFrames(4).Encode(); + for (size_t frame_id = 0; frame_id < encoded_frames.size(); ++frame_id) { + decoder.Decode(static_cast(frame_id), + encoded_frames[frame_id].encoded_image); } // Check encoder produced some frames for decoder to decode. ASSERT_THAT(encoded_frames, Not(IsEmpty())); // Check decoder found all of them valid. - EXPECT_THAT(decoder.decoded_frame_ids(), - ElementsAreArray(decoder.requested_frame_ids())); + EXPECT_THAT(decoder.decoded_frame_ids(), SizeIs(encoded_frames.size())); // Check each of them produced an output frame. EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size()); } +struct LayerId { + friend bool operator==(const LayerId& lhs, const LayerId& rhs) { + return std::tie(lhs.spatial_id, lhs.temporal_id) == + std::tie(rhs.spatial_id, rhs.temporal_id); + } + friend bool operator<(const LayerId& lhs, const LayerId& rhs) { + return std::tie(lhs.spatial_id, lhs.temporal_id) < + std::tie(rhs.spatial_id, rhs.temporal_id); + } + friend std::ostream& operator<<(std::ostream& s, const LayerId& layer) { + return s << "S" << layer.spatial_id << "T" << layer.temporal_id; + } + + int spatial_id = 0; + int temporal_id = 0; +}; + +struct SvcTestParam { + std::string name; + int num_frames_to_generate; + std::map configured_bitrates; +}; + +class LibaomAv1SvcTest : public ::testing::TestWithParam {}; + +TEST_P(LibaomAv1SvcTest, EncodeAndDecodeAllDecodeTargets) { + size_t num_decode_targets = CreateScalabilityStructure(GetParam().name) + ->DependencyStructure() + .num_decode_targets; + + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.SetScalabilityMode(GetParam().name); + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(GetParam().num_frames_to_generate) + .SetResolution({kWidth, kHeight}) + .Encode(); + + ASSERT_THAT( + encoded_frames, + Each(Truly([&](const EncodedVideoFrameProducer::EncodedFrame& frame) { + return frame.codec_specific_info.generic_frame_info && + frame.codec_specific_info.generic_frame_info + ->decode_target_indications.size() == num_decode_targets; + }))); + + for (size_t dt = 0; dt < num_decode_targets; ++dt) { + TestAv1Decoder decoder(dt); + std::vector requested_ids; + for (int64_t frame_id = 0; + frame_id < static_cast(encoded_frames.size()); ++frame_id) { + const EncodedVideoFrameProducer::EncodedFrame& frame = + encoded_frames[frame_id]; + if (frame.codec_specific_info.generic_frame_info + ->decode_target_indications[dt] != + DecodeTargetIndication::kNotPresent) { + requested_ids.push_back(frame_id); + decoder.Decode(frame_id, frame.encoded_image); + } + } + + ASSERT_THAT(requested_ids, SizeIs(Ge(2u))); + // Check decoder found all of them valid. + EXPECT_THAT(decoder.decoded_frame_ids(), ContainerEq(requested_ids)) + << "Decoder#" << dt; + // Check each of them produced an output frame. + EXPECT_EQ(decoder.num_output_frames(), decoder.decoded_frame_ids().size()) + << "Decoder#" << dt; + } +} + +MATCHER(SameLayerIdAndBitrateIsNear, "") { + // First check if layer id is the same. + return std::get<0>(arg).first == std::get<1>(arg).first && + // check measured bitrate is not much lower than requested. + std::get<0>(arg).second >= std::get<1>(arg).second * 0.8 && + // check measured bitrate is not much larger than requested. + std::get<0>(arg).second <= std::get<1>(arg).second * 1.1; +} + +TEST_P(LibaomAv1SvcTest, SetRatesMatchMeasuredBitrate) { + const SvcTestParam param = GetParam(); + if (param.configured_bitrates.empty()) { + // Rates are not configured for this particular structure, skip the test. + return; + } + constexpr TimeDelta kDuration = TimeDelta::Seconds(5); + + VideoBitrateAllocation allocation; + for (const auto& kv : param.configured_bitrates) { + allocation.SetBitrate(kv.first.spatial_id, kv.first.temporal_id, + kv.second.bps()); + } + + std::unique_ptr encoder = CreateLibaomAv1Encoder(); + ASSERT_TRUE(encoder); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.SetScalabilityMode(param.name); + codec_settings.maxBitrate = allocation.get_sum_kbps(); + codec_settings.maxFramerate = 30; + ASSERT_EQ(encoder->InitEncode(&codec_settings, DefaultEncoderSettings()), + WEBRTC_VIDEO_CODEC_OK); + + encoder->SetRates(VideoEncoder::RateControlParameters( + allocation, codec_settings.maxFramerate)); + + std::vector encoded_frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(codec_settings.maxFramerate * kDuration.seconds()) + .SetResolution({codec_settings.width, codec_settings.height}) + .SetFramerateFps(codec_settings.maxFramerate) + .Encode(); + + // Calculate size of each layer. + std::map layer_size; + for (const auto& frame : encoded_frames) { + ASSERT_TRUE(frame.codec_specific_info.generic_frame_info); + const auto& layer = *frame.codec_specific_info.generic_frame_info; + LayerId layer_id = {layer.spatial_id, layer.temporal_id}; + // This is almost same as + // layer_size[layer_id] += DataSize::Bytes(frame.encoded_image.size()); + // but avoids calling deleted default constructor for DataSize. + layer_size.emplace(layer_id, DataSize::Zero()).first->second += + DataSize::Bytes(frame.encoded_image.size()); + } + // Convert size of the layer into bitrate of that layer. + std::vector> measured_bitrates; + for (const auto& kv : layer_size) { + measured_bitrates.emplace_back(kv.first, kv.second / kDuration); + } + EXPECT_THAT(measured_bitrates, Pointwise(SameLayerIdAndBitrateIsNear(), + param.configured_bitrates)); +} + +INSTANTIATE_TEST_SUITE_P( + Svc, + LibaomAv1SvcTest, + Values(SvcTestParam{"NONE", /*num_frames_to_generate=*/4}, + SvcTestParam{"L1T2", + /*num_frames_to_generate=*/4, + /*configured_bitrates=*/ + {{{0, 0}, DataRate::KilobitsPerSec(60)}, + {{0, 1}, DataRate::KilobitsPerSec(40)}}}, + SvcTestParam{"L1T3", /*num_frames_to_generate=*/8}, + SvcTestParam{"L2T1", + /*num_frames_to_generate=*/3, + /*configured_bitrates=*/ + {{{0, 0}, DataRate::KilobitsPerSec(30)}, + {{1, 0}, DataRate::KilobitsPerSec(70)}}}, + SvcTestParam{"L2T1h", + /*num_frames_to_generate=*/3, + /*configured_bitrates=*/ + {{{0, 0}, DataRate::KilobitsPerSec(30)}, + {{1, 0}, DataRate::KilobitsPerSec(70)}}}, + SvcTestParam{"L2T1_KEY", /*num_frames_to_generate=*/3}, + SvcTestParam{"L3T1", /*num_frames_to_generate=*/3}, + SvcTestParam{"L3T3", /*num_frames_to_generate=*/8}, + SvcTestParam{"S2T1", /*num_frames_to_generate=*/3}, + SvcTestParam{"S3T3", /*num_frames_to_generate=*/8}, + SvcTestParam{"L2T2", /*num_frames_to_generate=*/4}, + SvcTestParam{"L2T2_KEY", /*num_frames_to_generate=*/4}, + SvcTestParam{"L2T2_KEY_SHIFT", + /*num_frames_to_generate=*/4, + /*configured_bitrates=*/ + {{{0, 0}, DataRate::KilobitsPerSec(70)}, + {{0, 1}, DataRate::KilobitsPerSec(30)}, + {{1, 0}, DataRate::KilobitsPerSec(110)}, + {{1, 1}, DataRate::KilobitsPerSec(80)}}}), + [](const testing::TestParamInfo& info) { + return info.param.name; + }); + } // namespace } // namespace webrtc diff --git a/modules/video_coding/codecs/av1/scalable_video_controller.h b/modules/video_coding/codecs/av1/scalable_video_controller.h deleted file mode 100644 index dec985f282..0000000000 --- a/modules/video_coding/codecs/av1/scalable_video_controller.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef MODULES_VIDEO_CODING_CODECS_AV1_SCALABLE_VIDEO_CONTROLLER_H_ -#define MODULES_VIDEO_CODING_CODECS_AV1_SCALABLE_VIDEO_CONTROLLER_H_ - -#include - -#include "absl/container/inlined_vector.h" -#include "absl/types/optional.h" -#include "api/transport/rtp/dependency_descriptor.h" -#include "common_video/generic_frame_descriptor/generic_frame_info.h" - -namespace webrtc { - -// Controls how video should be encoded to be scalable. Outputs results as -// buffer usage configuration for encoder and enough details to communicate the -// scalability structure via dependency descriptor rtp header extension. -class ScalableVideoController { - public: - struct StreamLayersConfig { - int num_spatial_layers = 1; - int num_temporal_layers = 1; - }; - struct LayerFrameConfig { - // Id to match configuration returned by NextFrameConfig with - // (possibly modified) configuration passed back via OnEncoderDone. - // The meaning of the id is an implementation detail of - // the ScalableVideoController. - int id = 0; - - // Indication frame should be encoded as a key frame. In particular when - // `is_keyframe=true` property `CodecBufferUsage::referenced` should be - // ignored and treated as false. - bool is_keyframe = false; - - int spatial_id = 0; - int temporal_id = 0; - // Describes how encoder which buffers encoder allowed to reference and - // which buffers encoder should update. - absl::InlinedVector buffers; - }; - - virtual ~ScalableVideoController() = default; - - // Returns video structure description for encoder to configure itself. - virtual StreamLayersConfig StreamConfig() const = 0; - - // Returns video structure description in format compatible with - // dependency descriptor rtp header extension. - virtual FrameDependencyStructure DependencyStructure() const = 0; - - // When `restart` is true, first `LayerFrameConfig` should have `is_keyframe` - // set to true. - // Returned vector shouldn't be empty. - virtual std::vector NextFrameConfig(bool restart) = 0; - - // Returns configuration to pass to EncoderCallback. - virtual absl::optional OnEncodeDone( - LayerFrameConfig config) = 0; -}; - -} // namespace webrtc - -#endif // MODULES_VIDEO_CODING_CODECS_AV1_SCALABLE_VIDEO_CONTROLLER_H_ diff --git a/modules/video_coding/codecs/h264/h264.cc b/modules/video_coding/codecs/h264/h264.cc index be5b031e88..14e1691153 100644 --- a/modules/video_coding/codecs/h264/h264.cc +++ b/modules/video_coding/codecs/h264/h264.cc @@ -17,6 +17,7 @@ #include "absl/types/optional.h" #include "api/video_codecs/sdp_video_format.h" #include "media/base/media_constants.h" +#include "rtc_base/trace_event.h" #if defined(WEBRTC_USE_H264) #include "modules/video_coding/codecs/h264/h264_decoder_impl.h" @@ -45,11 +46,11 @@ bool IsH264CodecSupported() { } // namespace -SdpVideoFormat CreateH264Format(H264::Profile profile, - H264::Level level, +SdpVideoFormat CreateH264Format(H264Profile profile, + H264Level level, const std::string& packetization_mode) { const absl::optional profile_string = - H264::ProfileLevelIdToString(H264::ProfileLevelId(profile, level)); + H264ProfileLevelIdToString(H264ProfileLevelId(profile, level)); RTC_CHECK(profile_string); return SdpVideoFormat( cricket::kH264CodecName, @@ -65,6 +66,7 @@ void DisableRtcUseH264() { } std::vector SupportedH264Codecs() { + TRACE_EVENT0("webrtc", __func__); if (!IsH264CodecSupported()) return std::vector(); // We only support encoding Constrained Baseline Profile (CBP), but the @@ -76,12 +78,14 @@ std::vector SupportedH264Codecs() { // // We support both packetization modes 0 (mandatory) and 1 (optional, // preferred). - return { - CreateH264Format(H264::kProfileBaseline, H264::kLevel3_1, "1"), - CreateH264Format(H264::kProfileBaseline, H264::kLevel3_1, "0"), - CreateH264Format(H264::kProfileConstrainedBaseline, H264::kLevel3_1, "1"), - CreateH264Format(H264::kProfileConstrainedBaseline, H264::kLevel3_1, - "0")}; + return {CreateH264Format(H264Profile::kProfileBaseline, H264Level::kLevel3_1, + "1"), + CreateH264Format(H264Profile::kProfileBaseline, H264Level::kLevel3_1, + "0"), + CreateH264Format(H264Profile::kProfileConstrainedBaseline, + H264Level::kLevel3_1, "1"), + CreateH264Format(H264Profile::kProfileConstrainedBaseline, + H264Level::kLevel3_1, "0")}; } std::unique_ptr H264Encoder::Create( diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc index 33efa648ba..83f9a77614 100644 --- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc +++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc @@ -32,11 +32,10 @@ extern "C" { #include "common_video/include/video_frame_buffer.h" #include "modules/video_coding/codecs/h264/h264_color_space.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/keep_ref_until_done.h" #include "rtc_base/logging.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" +#include "third_party/libyuv/include/libyuv/convert.h" namespace webrtc { @@ -55,6 +54,16 @@ enum H264DecoderImplEvent { kH264DecoderEventMax = 16, }; +struct ScopedPtrAVFreePacket { + void operator()(AVPacket* packet) { av_packet_free(&packet); } +}; +typedef std::unique_ptr ScopedAVPacket; + +ScopedAVPacket MakeScopedAVPacket() { + ScopedAVPacket packet(av_packet_alloc()); + return packet; +} + } // namespace int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context, @@ -104,7 +113,7 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context, // TODO(nisse): Delete that feature from the video pool, instead add // an explicit call to InitializeData here. rtc::scoped_refptr frame_buffer = - decoder->pool_.CreateBuffer(width, height); + decoder->ffmpeg_buffer_pool_.CreateI420Buffer(width, height); int y_size = width * height; int uv_size = frame_buffer->ChromaWidth() * frame_buffer->ChromaHeight(); @@ -151,10 +160,13 @@ void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) { } H264DecoderImpl::H264DecoderImpl() - : pool_(true), + : ffmpeg_buffer_pool_(true), decoded_image_callback_(nullptr), has_reported_init_(false), - has_reported_error_(false) {} + has_reported_error_(false), + preferred_output_format_(field_trial::IsEnabled("WebRTC-NV12Decode") + ? VideoFrameBuffer::Type::kNV12 + : VideoFrameBuffer::Type::kI420) {} H264DecoderImpl::~H264DecoderImpl() { Release(); @@ -200,7 +212,7 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings, // a pointer |this|. av_context_->opaque = this; - AVCodec* codec = avcodec_find_decoder(av_context_->codec_id); + const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id); if (!codec) { // This is an indication that FFmpeg has not been initialized or it has not // been compiled/initialized with the correct set of codecs. @@ -220,7 +232,8 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings, av_frame_.reset(av_frame_alloc()); if (codec_settings && codec_settings->buffer_pool_size) { - if (!pool_.Resize(*codec_settings->buffer_pool_size)) { + if (!ffmpeg_buffer_pool_.Resize(*codec_settings->buffer_pool_size) || + !output_buffer_pool_.Resize(*codec_settings->buffer_pool_size)) { return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } } @@ -258,21 +271,25 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } - AVPacket packet; - av_init_packet(&packet); + ScopedAVPacket packet = MakeScopedAVPacket(); + if (!packet) { + ReportError(); + return WEBRTC_VIDEO_CODEC_ERROR; + } // packet.data has a non-const type, but isn't modified by // avcodec_send_packet. - packet.data = const_cast(input_image.data()); + packet->data = const_cast(input_image.data()); if (input_image.size() > static_cast(std::numeric_limits::max())) { ReportError(); return WEBRTC_VIDEO_CODEC_ERROR; } - packet.size = static_cast(input_image.size()); + packet->size = static_cast(input_image.size()); int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs av_context_->reordered_opaque = frame_timestamp_us; - int result = avcodec_send_packet(av_context_.get(), &packet); + int result = avcodec_send_packet(av_context_.get(), packet.get()); + if (result < 0) { RTC_LOG(LS_ERROR) << "avcodec_send_packet error: " << result; ReportError(); @@ -290,20 +307,17 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, // the input one. RTC_DCHECK_EQ(av_frame_->reordered_opaque, frame_timestamp_us); - absl::optional qp; // TODO(sakal): Maybe it is possible to get QP directly from FFmpeg. - h264_bitstream_parser_.ParseBitstream(input_image.data(), input_image.size()); - int qp_int; - if (h264_bitstream_parser_.GetLastSliceQp(&qp_int)) { - qp.emplace(qp_int); - } + h264_bitstream_parser_.ParseBitstream(input_image); + absl::optional qp = h264_bitstream_parser_.GetLastSliceQp(); // Obtain the |video_frame| containing the decoded image. VideoFrame* input_frame = static_cast(av_buffer_get_opaque(av_frame_->buf[0])); RTC_DCHECK(input_frame); - const webrtc::I420BufferInterface* i420_buffer = - input_frame->video_frame_buffer()->GetI420(); + rtc::scoped_refptr frame_buffer = + input_frame->video_frame_buffer(); + const webrtc::I420BufferInterface* i420_buffer = frame_buffer->GetI420(); // When needed, FFmpeg applies cropping by moving plane pointers and adjusting // frame width/height. Ensure that cropped buffers lie within the allocated @@ -326,11 +340,26 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image, i420_buffer->DataV() + i420_buffer->StrideV() * i420_buffer->height() / 2); - auto cropped_buffer = WrapI420Buffer( + rtc::scoped_refptr cropped_buffer = WrapI420Buffer( av_frame_->width, av_frame_->height, av_frame_->data[kYPlaneIndex], av_frame_->linesize[kYPlaneIndex], av_frame_->data[kUPlaneIndex], av_frame_->linesize[kUPlaneIndex], av_frame_->data[kVPlaneIndex], - av_frame_->linesize[kVPlaneIndex], rtc::KeepRefUntilDone(i420_buffer)); + av_frame_->linesize[kVPlaneIndex], + // To keep reference alive. + [frame_buffer] {}); + + if (preferred_output_format_ == VideoFrameBuffer::Type::kNV12) { + const I420BufferInterface* cropped_i420 = cropped_buffer->GetI420(); + auto nv12_buffer = output_buffer_pool_.CreateNV12Buffer( + cropped_i420->width(), cropped_i420->height()); + libyuv::I420ToNV12(cropped_i420->DataY(), cropped_i420->StrideY(), + cropped_i420->DataU(), cropped_i420->StrideU(), + cropped_i420->DataV(), cropped_i420->StrideV(), + nv12_buffer->MutableDataY(), nv12_buffer->StrideY(), + nv12_buffer->MutableDataUV(), nv12_buffer->StrideUV(), + i420_buffer->width(), i420_buffer->height()); + cropped_buffer = nv12_buffer; + } // Pass on color space from input frame if explicitly specified. const ColorSpace& color_space = diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.h b/modules/video_coding/codecs/h264/h264_decoder_impl.h index 3c038e6425..bca482d8a9 100644 --- a/modules/video_coding/codecs/h264/h264_decoder_impl.h +++ b/modules/video_coding/codecs/h264/h264_decoder_impl.h @@ -44,7 +44,7 @@ extern "C" { } // extern "C" #include "common_video/h264/h264_bitstream_parser.h" -#include "common_video/include/i420_buffer_pool.h" +#include "common_video/include/video_frame_buffer_pool.h" namespace webrtc { @@ -92,7 +92,10 @@ class H264DecoderImpl : public H264Decoder { void ReportInit(); void ReportError(); - I420BufferPool pool_; + // Used by ffmpeg via |AVGetBuffer2()| to allocate I420 images. + VideoFrameBufferPool ffmpeg_buffer_pool_; + // Used to allocate NV12 images if NV12 output is preferred. + VideoFrameBufferPool output_buffer_pool_; std::unique_ptr av_context_; std::unique_ptr av_frame_; @@ -102,6 +105,9 @@ class H264DecoderImpl : public H264Decoder { bool has_reported_error_; webrtc::H264BitstreamParser h264_bitstream_parser_; + + // Decoder should produce this format if possible. + const VideoFrameBuffer::Type preferred_output_format_; }; } // namespace webrtc diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc index af36dd9865..af0393976e 100644 --- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc +++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc @@ -16,6 +16,7 @@ #include "modules/video_coding/codecs/h264/h264_encoder_impl.h" +#include #include #include @@ -87,19 +88,15 @@ VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) { } // namespace // Helper method used by H264EncoderImpl::Encode. -// Copies the encoded bytes from |info| to |encoded_image| and updates the -// fragmentation information of |frag_header|. The |encoded_image->_buffer| may -// be deleted and reallocated if a bigger buffer is required. +// Copies the encoded bytes from |info| to |encoded_image|. The +// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is +// required. // // After OpenH264 encoding, the encoded bytes are stored in |info| spread out // over a number of layers and "NAL units". Each NAL unit is a fragment starting // with the four-byte start code {0,0,0,1}. All of this data (including the -// start codes) is copied to the |encoded_image->_buffer| and the |frag_header| -// is updated to point to each fragment, with offsets and lengths set as to -// exclude the start codes. -static void RtpFragmentize(EncodedImage* encoded_image, - SFrameBSInfo* info, - RTPFragmentationHeader* frag_header) { +// start codes) is copied to the |encoded_image->_buffer|. +static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) { // Calculate minimum buffer size required to hold encoded data. size_t required_capacity = 0; size_t fragments_count = 0; @@ -114,12 +111,12 @@ static void RtpFragmentize(EncodedImage* encoded_image, } } // TODO(nisse): Use a cache or buffer pool to avoid allocation? - encoded_image->SetEncodedData(EncodedImageBuffer::Create(required_capacity)); + auto buffer = EncodedImageBuffer::Create(required_capacity); + encoded_image->SetEncodedData(buffer); // Iterate layers and NAL units, note each NAL unit as a fragment and copy // the data to |encoded_image->_buffer|. const uint8_t start_code[4] = {0, 0, 0, 1}; - frag_header->VerifyAndAllocateFragmentationHeader(fragments_count); size_t frag = 0; encoded_image->set_size(0); for (int layer = 0; layer < info->iLayerNum; ++layer) { @@ -134,15 +131,10 @@ static void RtpFragmentize(EncodedImage* encoded_image, RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]); RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]); RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]); - frag_header->fragmentationOffset[frag] = - encoded_image->size() + layer_len + sizeof(start_code); - frag_header->fragmentationLength[frag] = - layerInfo.pNalLengthInByte[nal] - sizeof(start_code); layer_len += layerInfo.pNalLengthInByte[nal]; } // Copy the entire layer's data (including start codes). - memcpy(encoded_image->data() + encoded_image->size(), layerInfo.pBsBuf, - layer_len); + memcpy(buffer->data() + encoded_image->size(), layerInfo.pBsBuf, layer_len); encoded_image->set_size(encoded_image->size() + layer_len); } } @@ -250,7 +242,8 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst, configurations_[i].frame_dropping_on = codec_.H264()->frameDroppingOn; configurations_[i].key_frame_interval = codec_.H264()->keyFrameInterval; configurations_[i].num_temporal_layers = - codec_.simulcastStream[idx].numberOfTemporalLayers; + std::max(codec_.H264()->numberOfTemporalLayers, + codec_.simulcastStream[idx].numberOfTemporalLayers); // Create downscaled image buffers. if (i > 0) { @@ -284,7 +277,6 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst, CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width, codec_.simulcastStream[idx].height); encoded_images_[i].SetEncodedData(EncodedImageBuffer::Create(new_capacity)); - encoded_images_[i]._completeFrame = true; encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width; encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height; encoded_images_[i].set_size(0); @@ -383,8 +375,19 @@ int32_t H264EncoderImpl::Encode( return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } - rtc::scoped_refptr frame_buffer = + rtc::scoped_refptr frame_buffer = input_frame.video_frame_buffer()->ToI420(); + // The buffer should now be a mapped I420 or I420A format, but some buffer + // implementations incorrectly return the wrong buffer format, such as + // kNative. As a workaround to this, we perform ToI420() a second time. + // TODO(https://crbug.com/webrtc/12602): When Android buffers have a correct + // ToI420() implementaion, remove his workaround. + if (frame_buffer->type() != VideoFrameBuffer::Type::kI420 && + frame_buffer->type() != VideoFrameBuffer::Type::kI420A) { + frame_buffer = frame_buffer->ToI420(); + RTC_CHECK(frame_buffer->type() == VideoFrameBuffer::Type::kI420 || + frame_buffer->type() == VideoFrameBuffer::Type::kI420A); + } bool send_key_frame = false; for (size_t i = 0; i < configurations_.size(); ++i) { @@ -444,7 +447,7 @@ int32_t H264EncoderImpl::Encode( pictures_[i].iStride[0], pictures_[i].pData[1], pictures_[i].iStride[1], pictures_[i].pData[2], pictures_[i].iStride[2], configurations_[i].width, - configurations_[i].height, libyuv::kFilterBilinear); + configurations_[i].height, libyuv::kFilterBox); } if (!configurations_[i].sending) { @@ -485,16 +488,15 @@ int32_t H264EncoderImpl::Encode( // Split encoded image up into fragments. This also updates // |encoded_image_|. - RTPFragmentationHeader frag_header; - RtpFragmentize(&encoded_images_[i], &info, &frag_header); + RtpFragmentize(&encoded_images_[i], &info); // Encoder can skip frames to save bandwidth in which case // |encoded_images_[i]._length| == 0. if (encoded_images_[i].size() > 0) { // Parse QP. - h264_bitstream_parser_.ParseBitstream(encoded_images_[i].data(), - encoded_images_[i].size()); - h264_bitstream_parser_.GetLastSliceQp(&encoded_images_[i].qp_); + h264_bitstream_parser_.ParseBitstream(encoded_images_[i]); + encoded_images_[i].qp_ = + h264_bitstream_parser_.GetLastSliceQp().value_or(-1); // Deliver encoded image. CodecSpecificInfo codec_specific; @@ -518,7 +520,7 @@ int32_t H264EncoderImpl::Encode( } } encoded_image_callback_->OnEncodedImage(encoded_images_[i], - &codec_specific, &frag_header); + &codec_specific); } } return WEBRTC_VIDEO_CODEC_OK; @@ -555,6 +557,12 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const { // |uiIntraPeriod| - multiple of GOP size // |keyFrameInterval| - number of frames encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval; + // Reuse SPS id if possible. This helps to avoid reset of chromium HW decoder + // on each key-frame. + // Note that WebRTC resets encoder on resolution change which makes all + // EParameterSetStrategy modes except INCREASING_ID (default) essentially + // equivalent to CONSTANT_ID. + encoder_params.eSpsPpsIdStrategy = SPS_LISTING; encoder_params.uiMaxNalSize = 0; // Threading model: use auto. // 0: auto (dynamic imp. internal encoder) @@ -572,7 +580,13 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const { encoder_params.iMaxBitrate; encoder_params.iTemporalLayerNum = configurations_[i].num_temporal_layers; if (encoder_params.iTemporalLayerNum > 1) { - encoder_params.iNumRefFrame = 1; + // iNumRefFrame specifies total number of reference buffers to allocate. + // For N temporal layers we need at least (N - 1) buffers to store last + // encoded frames of all reference temporal layers. + // Note that there is no API in OpenH264 encoder to specify exact set of + // references to be used to prediction of a given frame. Encoder can + // theoretically use all available reference buffers. + encoder_params.iNumRefFrame = encoder_params.iTemporalLayerNum - 1; } RTC_LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "." << OPENH264_MINOR; @@ -625,6 +639,7 @@ VideoEncoder::EncoderInfo H264EncoderImpl::GetEncoderInfo() const { info.is_hardware_accelerated = false; info.has_internal_source = false; info.supports_simulcast = true; + info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420}; return info; } diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h index ba996366a3..2a78b14311 100644 --- a/modules/video_coding/codecs/h264/h264_encoder_impl.h +++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h @@ -72,7 +72,7 @@ class H264EncoderImpl : public H264Encoder { EncodedImageCallback* callback) override; void SetRates(const RateControlParameters& parameters) override; - // The result of encoding - an EncodedImage and RTPFragmentationHeader - are + // The result of encoding - an EncodedImage and CodecSpecificInfo - are // passed to the encode complete callback. int32_t Encode(const VideoFrame& frame, const std::vector* frame_types) override; diff --git a/modules/video_coding/codecs/h264/include/h264.h b/modules/video_coding/codecs/h264/include/h264.h index 70ca817988..1f8f796064 100644 --- a/modules/video_coding/codecs/h264/include/h264.h +++ b/modules/video_coding/codecs/h264/include/h264.h @@ -27,8 +27,8 @@ struct SdpVideoFormat; // Creates an H264 SdpVideoFormat entry with specified paramters. RTC_EXPORT SdpVideoFormat -CreateH264Format(H264::Profile profile, - H264::Level level, +CreateH264Format(H264Profile profile, + H264Level level, const std::string& packetization_mode); // Set to disable the H.264 encoder/decoder implementations that are provided if diff --git a/modules/video_coding/codecs/vp8/libvpx_interface.cc b/modules/video_coding/codecs/interface/libvpx_interface.cc similarity index 50% rename from modules/video_coding/codecs/vp8/libvpx_interface.cc rename to modules/video_coding/codecs/interface/libvpx_interface.cc index 1a3df403ae..b24922f921 100644 --- a/modules/video_coding/codecs/vp8/libvpx_interface.cc +++ b/modules/video_coding/codecs/interface/libvpx_interface.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/codecs/vp8/libvpx_interface.h" +#include "modules/video_coding/codecs/interface/libvpx_interface.h" #include @@ -16,10 +16,10 @@ namespace webrtc { namespace { -class LibvpxVp8Facade : public LibvpxInterface { +class LibvpxFacade : public LibvpxInterface { public: - LibvpxVp8Facade() = default; - ~LibvpxVp8Facade() override = default; + LibvpxFacade() = default; + ~LibvpxFacade() override = default; vpx_image_t* img_alloc(vpx_image_t* img, vpx_img_fmt_t fmt, @@ -93,17 +93,45 @@ class LibvpxVp8Facade : public LibvpxInterface { return vpx_codec_control(ctx, VP8E_SET_ARNR_MAXFRAMES, param); case VP8E_SET_ARNR_STRENGTH: return vpx_codec_control(ctx, VP8E_SET_ARNR_STRENGTH, param); - case VP8E_SET_ARNR_TYPE: - RTC_NOTREACHED() << "VP8E_SET_ARNR_TYPE is deprecated."; - return VPX_CODEC_UNSUP_FEATURE; case VP8E_SET_CQ_LEVEL: return vpx_codec_control(ctx, VP8E_SET_CQ_LEVEL, param); case VP8E_SET_MAX_INTRA_BITRATE_PCT: return vpx_codec_control(ctx, VP8E_SET_MAX_INTRA_BITRATE_PCT, param); + case VP9E_SET_MAX_INTER_BITRATE_PCT: + return vpx_codec_control(ctx, VP9E_SET_MAX_INTER_BITRATE_PCT, param); case VP8E_SET_GF_CBR_BOOST_PCT: return vpx_codec_control(ctx, VP8E_SET_GF_CBR_BOOST_PCT, param); case VP8E_SET_SCREEN_CONTENT_MODE: return vpx_codec_control(ctx, VP8E_SET_SCREEN_CONTENT_MODE, param); + case VP9E_SET_GF_CBR_BOOST_PCT: + return vpx_codec_control(ctx, VP9E_SET_GF_CBR_BOOST_PCT, param); + case VP9E_SET_LOSSLESS: + return vpx_codec_control(ctx, VP9E_SET_LOSSLESS, param); + case VP9E_SET_FRAME_PARALLEL_DECODING: + return vpx_codec_control(ctx, VP9E_SET_FRAME_PARALLEL_DECODING, param); + case VP9E_SET_AQ_MODE: + return vpx_codec_control(ctx, VP9E_SET_AQ_MODE, param); + case VP9E_SET_FRAME_PERIODIC_BOOST: + return vpx_codec_control(ctx, VP9E_SET_FRAME_PERIODIC_BOOST, param); + case VP9E_SET_NOISE_SENSITIVITY: + return vpx_codec_control(ctx, VP9E_SET_NOISE_SENSITIVITY, param); + case VP9E_SET_MIN_GF_INTERVAL: + return vpx_codec_control(ctx, VP9E_SET_MIN_GF_INTERVAL, param); + case VP9E_SET_MAX_GF_INTERVAL: + return vpx_codec_control(ctx, VP9E_SET_MAX_GF_INTERVAL, param); + case VP9E_SET_TARGET_LEVEL: + return vpx_codec_control(ctx, VP9E_SET_TARGET_LEVEL, param); + case VP9E_SET_ROW_MT: + return vpx_codec_control(ctx, VP9E_SET_ROW_MT, param); + case VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST: + return vpx_codec_control(ctx, VP9E_ENABLE_MOTION_VECTOR_UNIT_TEST, + param); + case VP9E_SET_SVC_INTER_LAYER_PRED: + return vpx_codec_control(ctx, VP9E_SET_SVC_INTER_LAYER_PRED, param); + case VP9E_SET_SVC_GF_TEMPORAL_REF: + return vpx_codec_control(ctx, VP9E_SET_SVC_GF_TEMPORAL_REF, param); + case VP9E_SET_POSTENCODE_DROP: + return vpx_codec_control(ctx, VP9E_SET_POSTENCODE_DROP, param); default: RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; } @@ -118,14 +146,41 @@ class LibvpxVp8Facade : public LibvpxInterface { return vpx_codec_control(ctx, VP8E_SET_FRAME_FLAGS, param); case VP8E_SET_TEMPORAL_LAYER_ID: return vpx_codec_control(ctx, VP8E_SET_TEMPORAL_LAYER_ID, param); + case VP9E_SET_SVC: + return vpx_codec_control(ctx, VP9E_SET_SVC, param); case VP8E_SET_CPUUSED: return vpx_codec_control(ctx, VP8E_SET_CPUUSED, param); case VP8E_SET_TOKEN_PARTITIONS: return vpx_codec_control(ctx, VP8E_SET_TOKEN_PARTITIONS, param); case VP8E_SET_TUNING: return vpx_codec_control(ctx, VP8E_SET_TUNING, param); + case VP9E_SET_TILE_COLUMNS: + return vpx_codec_control(ctx, VP9E_SET_TILE_COLUMNS, param); + case VP9E_SET_TILE_ROWS: + return vpx_codec_control(ctx, VP9E_SET_TILE_ROWS, param); + case VP9E_SET_TPL: + return vpx_codec_control(ctx, VP9E_SET_TPL, param); + case VP9E_SET_ALT_REF_AQ: + return vpx_codec_control(ctx, VP9E_SET_ALT_REF_AQ, param); + case VP9E_SET_TUNE_CONTENT: + return vpx_codec_control(ctx, VP9E_SET_TUNE_CONTENT, param); + case VP9E_SET_COLOR_SPACE: + return vpx_codec_control(ctx, VP9E_SET_COLOR_SPACE, param); + case VP9E_SET_COLOR_RANGE: + return vpx_codec_control(ctx, VP9E_SET_COLOR_RANGE, param); + case VP9E_SET_DELTA_Q_UV: + return vpx_codec_control(ctx, VP9E_SET_DELTA_Q_UV, param); + case VP9E_SET_DISABLE_OVERSHOOT_MAXQ_CBR: + return vpx_codec_control(ctx, VP9E_SET_DISABLE_OVERSHOOT_MAXQ_CBR, + param); + case VP9E_SET_DISABLE_LOOPFILTER: + return vpx_codec_control(ctx, VP9E_SET_DISABLE_LOOPFILTER, param); default: + if (param >= 0) { + // Might be intended for uint32_t but int literal used, try fallback. + return codec_control(ctx, ctrl_id, static_cast(param)); + } RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; } return VPX_CODEC_ERROR; @@ -139,6 +194,10 @@ class LibvpxVp8Facade : public LibvpxInterface { return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER, param); case VP8E_GET_LAST_QUANTIZER_64: return vpx_codec_control(ctx, VP8E_GET_LAST_QUANTIZER_64, param); + case VP9E_SET_RENDER_SIZE: + return vpx_codec_control(ctx, VP9E_SET_RENDER_SIZE, param); + case VP9E_GET_LEVEL: + return vpx_codec_control(ctx, VP9E_GET_LEVEL, param); default: RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; } @@ -151,6 +210,8 @@ class LibvpxVp8Facade : public LibvpxInterface { switch (ctrl_id) { case VP8E_SET_ROI_MAP: return vpx_codec_control(ctx, VP8E_SET_ROI_MAP, param); + case VP9E_SET_ROI_MAP: + return vpx_codec_control(ctx, VP9E_SET_ROI_MAP, param); default: RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; } @@ -163,6 +224,8 @@ class LibvpxVp8Facade : public LibvpxInterface { switch (ctrl_id) { case VP8E_SET_ACTIVEMAP: return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param); + case VP9E_GET_ACTIVEMAP: + return vpx_codec_control(ctx, VP8E_SET_ACTIVEMAP, param); default: RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; } @@ -181,6 +244,98 @@ class LibvpxVp8Facade : public LibvpxInterface { return VPX_CODEC_ERROR; } + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_extra_cfg_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_PARAMETERS: + return vpx_codec_control_(ctx, VP9E_SET_SVC_PARAMETERS, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_frame_drop_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_FRAME_DROP_LAYER: + return vpx_codec_control_(ctx, VP9E_SET_SVC_FRAME_DROP_LAYER, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + void* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_PARAMETERS: + return vpx_codec_control_(ctx, VP9E_SET_SVC_PARAMETERS, param); + case VP9E_REGISTER_CX_CALLBACK: + return vpx_codec_control_(ctx, VP9E_REGISTER_CX_CALLBACK, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_layer_id_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_LAYER_ID: + return vpx_codec_control_(ctx, VP9E_SET_SVC_LAYER_ID, param); + case VP9E_GET_SVC_LAYER_ID: + return vpx_codec_control_(ctx, VP9E_GET_SVC_LAYER_ID, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + + vpx_codec_err_t codec_control( + vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_ref_frame_config_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_REF_FRAME_CONFIG: + return vpx_codec_control_(ctx, VP9E_SET_SVC_REF_FRAME_CONFIG, param); + case VP9E_GET_SVC_REF_FRAME_CONFIG: + return vpx_codec_control_(ctx, VP9E_GET_SVC_REF_FRAME_CONFIG, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + + vpx_codec_err_t codec_control( + vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_spatial_layer_sync_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_SPATIAL_LAYER_SYNC: + return vpx_codec_control_(ctx, VP9E_SET_SVC_SPATIAL_LAYER_SYNC, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_rc_funcs_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_EXTERNAL_RATE_CONTROL: + return vpx_codec_control_(ctx, VP9E_SET_EXTERNAL_RATE_CONTROL, param); + default: + RTC_NOTREACHED() << "Unsupported libvpx ctrl_id: " << ctrl_id; + } + return VPX_CODEC_ERROR; + } + vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx, const vpx_image_t* img, vpx_codec_pts_t pts, @@ -195,12 +350,24 @@ class LibvpxVp8Facade : public LibvpxInterface { vpx_codec_iter_t* iter) const override { return ::vpx_codec_get_cx_data(ctx, iter); } + + const char* codec_error_detail(vpx_codec_ctx_t* ctx) const override { + return ::vpx_codec_error_detail(ctx); + } + + const char* codec_error(vpx_codec_ctx_t* ctx) const override { + return ::vpx_codec_error(ctx); + } + + const char* codec_err_to_string(vpx_codec_err_t err) const override { + return ::vpx_codec_err_to_string(err); + } }; } // namespace -std::unique_ptr LibvpxInterface::CreateEncoder() { - return std::make_unique(); +std::unique_ptr LibvpxInterface::Create() { + return std::make_unique(); } } // namespace webrtc diff --git a/modules/video_coding/codecs/vp8/libvpx_interface.h b/modules/video_coding/codecs/interface/libvpx_interface.h similarity index 69% rename from modules/video_coding/codecs/vp8/libvpx_interface.h rename to modules/video_coding/codecs/interface/libvpx_interface.h index fe40dedeca..3dea24dd6d 100644 --- a/modules/video_coding/codecs/vp8/libvpx_interface.h +++ b/modules/video_coding/codecs/interface/libvpx_interface.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_INTERFACE_H_ -#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_INTERFACE_H_ +#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_ +#define MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_ #include @@ -22,7 +22,7 @@ namespace webrtc { -// This interface is a proxy to to the static libvpx functions, so that they +// This interface is a proxy to the static libvpx functions, so that they // can be mocked for testing. Currently supports VP8 encoder functions. // TODO(sprang): Extend this to VP8 decoder and VP9 encoder/decoder too. class LibvpxInterface { @@ -81,7 +81,29 @@ class LibvpxInterface { virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, vp8e_enc_control_id ctrl_id, vpx_scaling_mode* param) const = 0; - + virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_extra_cfg_t* param) const = 0; + virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_frame_drop_t* param) const = 0; + virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + void* param) const = 0; + virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_layer_id_t* param) const = 0; + virtual vpx_codec_err_t codec_control( + vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_ref_frame_config_t* param) const = 0; + virtual vpx_codec_err_t codec_control( + vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_spatial_layer_sync_t* param) const = 0; + virtual vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_rc_funcs_t* param) const = 0; virtual vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx, const vpx_image_t* img, vpx_codec_pts_t pts, @@ -93,10 +115,14 @@ class LibvpxInterface { vpx_codec_ctx_t* ctx, vpx_codec_iter_t* iter) const = 0; + virtual const char* codec_error_detail(vpx_codec_ctx_t* ctx) const = 0; + virtual const char* codec_error(vpx_codec_ctx_t* ctx) const = 0; + virtual const char* codec_err_to_string(vpx_codec_err_t err) const = 0; + // Returns interface wrapping the actual libvpx functions. - static std::unique_ptr CreateEncoder(); + static std::unique_ptr Create(); }; } // namespace webrtc -#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_INTERFACE_H_ +#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_LIBVPX_INTERFACE_H_ diff --git a/modules/video_coding/codecs/interface/mock_libvpx_interface.h b/modules/video_coding/codecs/interface/mock_libvpx_interface.h new file mode 100644 index 0000000000..6dfe733dd0 --- /dev/null +++ b/modules/video_coding/codecs/interface/mock_libvpx_interface.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_ +#define MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_ + +#include "modules/video_coding/codecs/interface/libvpx_interface.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { + +class MockLibvpxInterface : public LibvpxInterface { + public: + MOCK_METHOD( + vpx_image_t*, + img_alloc, + (vpx_image_t*, vpx_img_fmt_t, unsigned int, unsigned int, unsigned int), + (const, override)); + MOCK_METHOD(vpx_image_t*, + img_wrap, + (vpx_image_t*, + vpx_img_fmt_t, + unsigned int, + unsigned int, + unsigned int, + unsigned char*), + (const, override)); + MOCK_METHOD(void, img_free, (vpx_image_t * img), (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_enc_config_set, + (vpx_codec_ctx_t*, const vpx_codec_enc_cfg_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_enc_config_default, + (vpx_codec_iface_t*, vpx_codec_enc_cfg_t*, unsigned int), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_enc_init, + (vpx_codec_ctx_t*, + vpx_codec_iface_t*, + const vpx_codec_enc_cfg_t*, + vpx_codec_flags_t), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_enc_init_multi, + (vpx_codec_ctx_t*, + vpx_codec_iface_t*, + vpx_codec_enc_cfg_t*, + int, + vpx_codec_flags_t, + vpx_rational_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_destroy, + (vpx_codec_ctx_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, uint32_t), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, int), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, int*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_roi_map*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_active_map*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_scaling_mode*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_extra_cfg_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_frame_drop_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, void*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_svc_layer_id_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, + vp8e_enc_control_id, + vpx_svc_ref_frame_config_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, + vp8e_enc_control_id, + vpx_svc_spatial_layer_sync_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_control, + (vpx_codec_ctx_t*, vp8e_enc_control_id, vpx_rc_funcs_t*), + (const, override)); + MOCK_METHOD(vpx_codec_err_t, + codec_encode, + (vpx_codec_ctx_t*, + const vpx_image_t*, + vpx_codec_pts_t, + uint64_t, + vpx_enc_frame_flags_t, + uint64_t), + (const, override)); + MOCK_METHOD(const vpx_codec_cx_pkt_t*, + codec_get_cx_data, + (vpx_codec_ctx_t*, vpx_codec_iter_t*), + (const, override)); + MOCK_METHOD(const char*, + codec_error_detail, + (vpx_codec_ctx_t*), + (const, override)); + MOCK_METHOD(const char*, codec_error, (vpx_codec_ctx_t*), (const, override)); + MOCK_METHOD(const char*, + codec_err_to_string, + (vpx_codec_err_t), + (const, override)); +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_CODECS_INTERFACE_MOCK_LIBVPX_INTERFACE_H_ diff --git a/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc b/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc index b48996cbcf..8740884f5b 100644 --- a/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc +++ b/modules/video_coding/codecs/multiplex/augmented_video_frame_buffer.cc @@ -54,4 +54,12 @@ int AugmentedVideoFrameBuffer::height() const { rtc::scoped_refptr AugmentedVideoFrameBuffer::ToI420() { return video_frame_buffer_->ToI420(); } + +const I420BufferInterface* AugmentedVideoFrameBuffer::GetI420() const { + // TODO(https://crbug.com/webrtc/12021): When AugmentedVideoFrameBuffer is + // updated to implement the buffer interfaces of relevant + // VideoFrameBuffer::Types, stop overriding GetI420() as a workaround to + // AugmentedVideoFrameBuffer not being the type that is returned by type(). + return video_frame_buffer_->GetI420(); +} } // namespace webrtc diff --git a/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h b/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h index c45ab3b2a4..d711cd07da 100644 --- a/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h +++ b/modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h @@ -45,6 +45,12 @@ class AugmentedVideoFrameBuffer : public VideoFrameBuffer { // Get the I140 Buffer from the underlying frame buffer rtc::scoped_refptr ToI420() final; + // Returns GetI420() of the underlying VideoFrameBuffer. + // TODO(hbos): AugmentedVideoFrameBuffer should not return a type (such as + // kI420) without also implementing that type's interface (i.e. + // I420BufferInterface). Either implement all possible Type's interfaces or + // return kNative. + const I420BufferInterface* GetI420() const final; private: uint16_t augmenting_data_size_; diff --git a/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h b/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h index 9e718303b7..c43109e460 100644 --- a/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h +++ b/modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h @@ -21,7 +21,7 @@ #include "api/video_codecs/video_encoder_factory.h" #include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -57,8 +57,7 @@ class MultiplexEncoderAdapter : public VideoEncoder { EncodedImageCallback::Result OnEncodedImage( AlphaCodecStream stream_idx, const EncodedImage& encodedImage, - const CodecSpecificInfo* codecSpecificInfo, - const RTPFragmentationHeader* fragmentation); + const CodecSpecificInfo* codecSpecificInfo); private: // Wrapper class that redirects OnEncodedImage() calls. @@ -71,7 +70,7 @@ class MultiplexEncoderAdapter : public VideoEncoder { EncodedImageCallback* encoded_complete_callback_; std::map stashed_images_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); uint16_t picture_index_ = 0; std::vector multiplex_dummy_planes_; @@ -79,7 +78,7 @@ class MultiplexEncoderAdapter : public VideoEncoder { int key_frame_interval_; EncodedImage combined_image_; - rtc::CriticalSection crit_; + Mutex mutex_; const bool supports_augmented_data_; int augmenting_data_size_ = 0; diff --git a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc index cd39e72c29..2332fcddfb 100644 --- a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc +++ b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc @@ -17,14 +17,8 @@ #include "common_video/libyuv/include/webrtc_libyuv.h" #include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h" #include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h" -#include "rtc_base/keep_ref_until_done.h" #include "rtc_base/logging.h" -namespace { -void KeepBufferRefs(rtc::scoped_refptr, - rtc::scoped_refptr) {} -} // anonymous namespace - namespace webrtc { class MultiplexDecoderAdapter::AdapterDecodedImageCallback @@ -76,23 +70,26 @@ struct MultiplexDecoderAdapter::DecodedImageData { decoded_image_(decoded_image), decode_time_ms_(decode_time_ms), qp_(qp) {} + + DecodedImageData() = delete; + DecodedImageData(const DecodedImageData&) = delete; + DecodedImageData& operator=(const DecodedImageData&) = delete; + const AlphaCodecStream stream_idx_; VideoFrame decoded_image_; const absl::optional decode_time_ms_; const absl::optional qp_; - - private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DecodedImageData); }; struct MultiplexDecoderAdapter::AugmentingData { AugmentingData(std::unique_ptr augmenting_data, uint16_t data_size) : data_(std::move(augmenting_data)), size_(data_size) {} + AugmentingData() = delete; + AugmentingData(const AugmentingData&) = delete; + AugmentingData& operator=(const AugmentingData&) = delete; + std::unique_ptr data_; const uint16_t size_; - - private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AugmentingData); }; MultiplexDecoderAdapter::MultiplexDecoderAdapter( @@ -247,12 +244,12 @@ void MultiplexDecoderAdapter::MergeAlphaImages( yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(), yuv_buffer->DataV(), yuv_buffer->StrideV(), alpha_buffer->DataY(), alpha_buffer->StrideY(), - rtc::Bind(&KeepBufferRefs, yuv_buffer, alpha_buffer)); + // To keep references alive. + [yuv_buffer, alpha_buffer] {}); } if (supports_augmenting_data_) { - merged_buffer = rtc::scoped_refptr( - new rtc::RefCountedObject( - merged_buffer, std::move(augmenting_data), augmenting_data_length)); + merged_buffer = rtc::make_ref_counted( + merged_buffer, std::move(augmenting_data), augmenting_data_length); } VideoFrame merged_image = VideoFrame::Builder() diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h index 4a913fe502..9f9f39ce05 100644 --- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h +++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h @@ -16,6 +16,7 @@ #include #include "api/video/encoded_image.h" +#include "api/video_codecs/video_codec.h" namespace webrtc { diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc index 492ff19ffa..db525b8f98 100644 --- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc +++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc @@ -17,9 +17,7 @@ #include "common_video/include/video_frame_buffer.h" #include "common_video/libyuv/include/webrtc_libyuv.h" #include "media/base/video_common.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h" -#include "rtc_base/keep_ref_until_done.h" #include "rtc_base/logging.h" namespace webrtc { @@ -35,12 +33,11 @@ class MultiplexEncoderAdapter::AdapterEncodedImageCallback EncodedImageCallback::Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { if (!adapter_) return Result(Result::OK); return adapter_->OnEncodedImage(stream_idx_, encoded_image, - codec_specific_info, fragmentation); + codec_specific_info); } private: @@ -103,6 +100,7 @@ int MultiplexEncoderAdapter::InitEncode( encoder_info_ = EncoderInfo(); encoder_info_.implementation_name = "MultiplexEncoderAdapter ("; encoder_info_.requested_resolution_alignment = 1; + encoder_info_.apply_alignment_to_all_simulcast_layers = false; // This needs to be false so that we can do the split in Encode(). encoder_info_.supports_native_handle = false; @@ -139,6 +137,10 @@ int MultiplexEncoderAdapter::InitEncode( encoder_info_.requested_resolution_alignment, encoder_impl_info.requested_resolution_alignment); + if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) { + encoder_info_.apply_alignment_to_all_simulcast_layers = true; + } + encoder_info_.has_internal_source = false; encoders_.emplace_back(std::move(encoder)); @@ -155,20 +157,38 @@ int MultiplexEncoderAdapter::Encode( return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } + // The input image is forwarded as-is, unless it is a native buffer and + // |supports_augmented_data_| is true in which case we need to map it in order + // to access the underlying AugmentedVideoFrameBuffer. + VideoFrame forwarded_image = input_image; + if (supports_augmented_data_ && + forwarded_image.video_frame_buffer()->type() == + VideoFrameBuffer::Type::kNative) { + auto info = GetEncoderInfo(); + rtc::scoped_refptr mapped_buffer = + forwarded_image.video_frame_buffer()->GetMappedFrameBuffer( + info.preferred_pixel_formats); + if (!mapped_buffer) { + // Unable to map the buffer. + return WEBRTC_VIDEO_CODEC_ERROR; + } + forwarded_image.set_video_frame_buffer(std::move(mapped_buffer)); + } + std::vector adjusted_frame_types; if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) { adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey); } else { adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta); } - const bool has_alpha = input_image.video_frame_buffer()->type() == + const bool has_alpha = forwarded_image.video_frame_buffer()->type() == VideoFrameBuffer::Type::kI420A; std::unique_ptr augmenting_data = nullptr; uint16_t augmenting_data_length = 0; AugmentedVideoFrameBuffer* augmented_video_frame_buffer = nullptr; if (supports_augmented_data_) { augmented_video_frame_buffer = static_cast( - input_image.video_frame_buffer().get()); + forwarded_image.video_frame_buffer().get()); augmenting_data_length = augmented_video_frame_buffer->GetAugmentingDataSize(); augmenting_data = @@ -180,10 +200,10 @@ int MultiplexEncoderAdapter::Encode( } { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); stashed_images_.emplace( std::piecewise_construct, - std::forward_as_tuple(input_image.timestamp()), + std::forward_as_tuple(forwarded_image.timestamp()), std::forward_as_tuple( picture_index_, has_alpha ? kAlphaCodecStreams : 1, std::move(augmenting_data), augmenting_data_length)); @@ -192,7 +212,8 @@ int MultiplexEncoderAdapter::Encode( ++picture_index_; // Encode YUV - int rv = encoders_[kYUVStream]->Encode(input_image, &adjusted_frame_types); + int rv = + encoders_[kYUVStream]->Encode(forwarded_image, &adjusted_frame_types); // If we do not receive an alpha frame, we send a single frame for this // |picture_index_|. The receiver will receive |frame_count| as 1 which @@ -201,24 +222,27 @@ int MultiplexEncoderAdapter::Encode( return rv; // Encode AXX - const I420ABufferInterface* yuva_buffer = + rtc::scoped_refptr frame_buffer = supports_augmented_data_ - ? augmented_video_frame_buffer->GetVideoFrameBuffer()->GetI420A() - : input_image.video_frame_buffer()->GetI420A(); + ? augmented_video_frame_buffer->GetVideoFrameBuffer() + : forwarded_image.video_frame_buffer(); + const I420ABufferInterface* yuva_buffer = frame_buffer->GetI420A(); rtc::scoped_refptr alpha_buffer = - WrapI420Buffer(input_image.width(), input_image.height(), + WrapI420Buffer(forwarded_image.width(), forwarded_image.height(), yuva_buffer->DataA(), yuva_buffer->StrideA(), multiplex_dummy_planes_.data(), yuva_buffer->StrideU(), multiplex_dummy_planes_.data(), yuva_buffer->StrideV(), - rtc::KeepRefUntilDone(input_image.video_frame_buffer())); - VideoFrame alpha_image = VideoFrame::Builder() - .set_video_frame_buffer(alpha_buffer) - .set_timestamp_rtp(input_image.timestamp()) - .set_timestamp_ms(input_image.render_time_ms()) - .set_rotation(input_image.rotation()) - .set_id(input_image.id()) - .set_packet_infos(input_image.packet_infos()) - .build(); + // To keep reference alive. + [frame_buffer] {}); + VideoFrame alpha_image = + VideoFrame::Builder() + .set_video_frame_buffer(alpha_buffer) + .set_timestamp_rtp(forwarded_image.timestamp()) + .set_timestamp_ms(forwarded_image.render_time_ms()) + .set_rotation(forwarded_image.rotation()) + .set_id(forwarded_image.id()) + .set_packet_infos(forwarded_image.packet_infos()) + .build(); rv = encoders_[kAXXStream]->Encode(alpha_image, &adjusted_frame_types); return rv; } @@ -273,7 +297,7 @@ int MultiplexEncoderAdapter::Release() { } encoders_.clear(); adapter_callbacks_.clear(); - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); stashed_images_.clear(); return WEBRTC_VIDEO_CODEC_OK; @@ -286,8 +310,7 @@ VideoEncoder::EncoderInfo MultiplexEncoderAdapter::GetEncoderInfo() const { EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage( AlphaCodecStream stream_idx, const EncodedImage& encodedImage, - const CodecSpecificInfo* codecSpecificInfo, - const RTPFragmentationHeader* fragmentation) { + const CodecSpecificInfo* codecSpecificInfo) { // Save the image MultiplexImageComponent image_component; image_component.component_index = stream_idx; @@ -295,10 +318,7 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage( PayloadStringToCodecType(associated_format_.name); image_component.encoded_image = encodedImage; - // If we don't already own the buffer, make a copy. - image_component.encoded_image.Retain(); - - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& stashed_image_itr = stashed_images_.find(encodedImage.Timestamp()); const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1); @@ -324,8 +344,7 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage( CodecSpecificInfo codec_info = *codecSpecificInfo; codec_info.codecType = kVideoCodecMultiplex; - encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info, - fragmentation); + encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info); } stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr); diff --git a/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc index 770d8b596c..7ecb24a87c 100644 --- a/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc +++ b/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc @@ -38,7 +38,6 @@ #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" -#include "rtc_base/keep_ref_until_done.h" #include "rtc_base/ref_counted_object.h" #include "test/gmock.h" #include "test/gtest.h" @@ -91,9 +90,9 @@ class TestMultiplexAdapter : public VideoCodecUnitTest, for (int i = 0; i < 16; i++) { data[i] = i; } - rtc::scoped_refptr augmented_video_frame_buffer = - new rtc::RefCountedObject( - video_buffer, std::move(data), 16); + auto augmented_video_frame_buffer = + rtc::make_ref_counted(video_buffer, + std::move(data), 16); return std::make_unique( VideoFrame::Builder() .set_video_frame_buffer(augmented_video_frame_buffer) @@ -112,7 +111,9 @@ class TestMultiplexAdapter : public VideoCodecUnitTest, yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(), yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(), yuv_buffer->DataV(), yuv_buffer->StrideV(), yuv_buffer->DataY(), - yuv_buffer->StrideY(), rtc::KeepRefUntilDone(yuv_buffer)); + yuv_buffer->StrideY(), + // To keep reference alive. + [yuv_buffer] {}); return std::make_unique(VideoFrame::Builder() .set_video_frame_buffer(yuva_buffer) .set_timestamp_rtp(123) @@ -168,8 +169,7 @@ class TestMultiplexAdapter : public VideoCodecUnitTest, rtc::scoped_refptr axx_buffer = WrapI420Buffer( yuva_buffer->width(), yuva_buffer->height(), yuva_buffer->DataA(), yuva_buffer->StrideA(), yuva_buffer->DataU(), yuva_buffer->StrideU(), - yuva_buffer->DataV(), yuva_buffer->StrideV(), - rtc::KeepRefUntilDone(video_frame_buffer)); + yuva_buffer->DataV(), yuva_buffer->StrideV(), [video_frame_buffer] {}); return std::make_unique(VideoFrame::Builder() .set_video_frame_buffer(axx_buffer) .set_timestamp_rtp(123) diff --git a/modules/video_coding/codecs/test/encoded_video_frame_producer.cc b/modules/video_coding/codecs/test/encoded_video_frame_producer.cc new file mode 100644 index 0000000000..899826eee4 --- /dev/null +++ b/modules/video_coding/codecs/test/encoded_video_frame_producer.cc @@ -0,0 +1,77 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h" + +#include +#include + +#include "api/test/create_frame_generator.h" +#include "api/test/frame_generator_interface.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_frame.h" +#include "api/video/video_frame_type.h" +#include "api/video_codecs/video_encoder.h" +#include "modules/video_coding/include/video_codec_interface.h" +#include "modules/video_coding/include/video_error_codes.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +class EncoderCallback : public EncodedImageCallback { + public: + explicit EncoderCallback( + std::vector& output_frames) + : output_frames_(output_frames) {} + + private: + Result OnEncodedImage(const EncodedImage& encoded_image, + const CodecSpecificInfo* codec_specific_info) override { + output_frames_.push_back({encoded_image, *codec_specific_info}); + return Result(Result::Error::OK); + } + + std::vector& output_frames_; +}; + +} // namespace + +std::vector +EncodedVideoFrameProducer::Encode() { + std::unique_ptr frame_buffer_generator = + test::CreateSquareFrameGenerator( + resolution_.Width(), resolution_.Height(), + test::FrameGeneratorInterface::OutputType::kI420, absl::nullopt); + + std::vector encoded_frames; + EncoderCallback encoder_callback(encoded_frames); + RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(&encoder_callback), + WEBRTC_VIDEO_CODEC_OK); + + uint32_t rtp_tick = 90000 / framerate_fps_; + for (int i = 0; i < num_input_frames_; ++i) { + VideoFrame frame = + VideoFrame::Builder() + .set_video_frame_buffer(frame_buffer_generator->NextFrame().buffer) + .set_timestamp_rtp(rtp_timestamp_) + .build(); + rtp_timestamp_ += rtp_tick; + RTC_CHECK_EQ(encoder_.Encode(frame, &next_frame_type_), + WEBRTC_VIDEO_CODEC_OK); + next_frame_type_[0] = VideoFrameType::kVideoFrameDelta; + } + + RTC_CHECK_EQ(encoder_.RegisterEncodeCompleteCallback(nullptr), + WEBRTC_VIDEO_CODEC_OK); + return encoded_frames; +} + +} // namespace webrtc diff --git a/modules/video_coding/codecs/test/encoded_video_frame_producer.h b/modules/video_coding/codecs/test/encoded_video_frame_producer.h new file mode 100644 index 0000000000..2216287b92 --- /dev/null +++ b/modules/video_coding/codecs/test/encoded_video_frame_producer.h @@ -0,0 +1,92 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_ +#define MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_ + +#include + +#include + +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/encoded_image.h" +#include "api/video_codecs/video_encoder.h" +#include "modules/video_coding/include/video_codec_interface.h" + +namespace webrtc { + +// Wrapper around VideoEncoder::Encode for convenient input (generates frames) +// and output (returns encoded frames instead of passing them to callback) +class EncodedVideoFrameProducer { + public: + struct EncodedFrame { + EncodedImage encoded_image; + CodecSpecificInfo codec_specific_info; + }; + + // `encoder` should be initialized, but shouldn't have `EncoderCallback` set. + explicit EncodedVideoFrameProducer(VideoEncoder& encoder) + : encoder_(encoder) {} + EncodedVideoFrameProducer(const EncodedVideoFrameProducer&) = delete; + EncodedVideoFrameProducer& operator=(const EncodedVideoFrameProducer&) = + delete; + + // Number of the input frames to pass to the encoder. + EncodedVideoFrameProducer& SetNumInputFrames(int value); + // Encode next frame as key frame. + EncodedVideoFrameProducer& ForceKeyFrame(); + // Resolution of the input frames. + EncodedVideoFrameProducer& SetResolution(RenderResolution value); + + EncodedVideoFrameProducer& SetFramerateFps(int value); + + // Generates input video frames and encodes them with `encoder` provided in + // the constructor. Returns frame passed to the `OnEncodedImage` by wraping + // `EncodedImageCallback` underneath. + std::vector Encode(); + + private: + VideoEncoder& encoder_; + + uint32_t rtp_timestamp_ = 1000; + int num_input_frames_ = 1; + int framerate_fps_ = 30; + RenderResolution resolution_ = {320, 180}; + std::vector next_frame_type_ = { + VideoFrameType::kVideoFrameKey}; +}; + +inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetNumInputFrames( + int value) { + RTC_DCHECK_GT(value, 0); + num_input_frames_ = value; + return *this; +} + +inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::ForceKeyFrame() { + next_frame_type_ = {VideoFrameType::kVideoFrameKey}; + return *this; +} + +inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetResolution( + RenderResolution value) { + resolution_ = value; + return *this; +} + +inline EncodedVideoFrameProducer& EncodedVideoFrameProducer::SetFramerateFps( + int value) { + RTC_DCHECK_GT(value, 0); + framerate_fps_ = value; + return *this; +} + +} // namespace webrtc +#endif // MODULES_VIDEO_CODING_CODECS_TEST_ENCODED_VIDEO_FRAME_PRODUCER_H_ diff --git a/modules/video_coding/codecs/test/plot_webrtc_test_logs.py b/modules/video_coding/codecs/test/plot_webrtc_test_logs.py index a0cdc0637f..29e2d6f65a 100755 --- a/modules/video_coding/codecs/test/plot_webrtc_test_logs.py +++ b/modules/video_coding/codecs/test/plot_webrtc_test_logs.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Plots statistics from WebRTC integration test logs. Usage: $ python plot_webrtc_test_logs.py filename.txt @@ -52,43 +51,43 @@ # Settings. SETTINGS = [ - WIDTH, - HEIGHT, - FILENAME, - NUM_FRAMES, + WIDTH, + HEIGHT, + FILENAME, + NUM_FRAMES, ] # Settings, options for x-axis. X_SETTINGS = [ - CORES, - FRAMERATE, - DENOISING, - RESILIENCE, - ERROR_CONCEALMENT, - BITRATE, # TODO(asapersson): Needs to be last. + CORES, + FRAMERATE, + DENOISING, + RESILIENCE, + ERROR_CONCEALMENT, + BITRATE, # TODO(asapersson): Needs to be last. ] # Settings, options for subplots. SUBPLOT_SETTINGS = [ - CODEC_TYPE, - ENCODER_IMPLEMENTATION_NAME, - DECODER_IMPLEMENTATION_NAME, - CODEC_IMPLEMENTATION_NAME, + CODEC_TYPE, + ENCODER_IMPLEMENTATION_NAME, + DECODER_IMPLEMENTATION_NAME, + CODEC_IMPLEMENTATION_NAME, ] + X_SETTINGS # Results. RESULTS = [ - PSNR, - SSIM, - ENC_BITRATE, - NUM_DROPPED_FRAMES, - TIME_TO_TARGET, - ENCODE_SPEED_FPS, - DECODE_SPEED_FPS, - QP, - CPU_USAGE, - AVG_KEY_FRAME_SIZE, - AVG_DELTA_FRAME_SIZE, + PSNR, + SSIM, + ENC_BITRATE, + NUM_DROPPED_FRAMES, + TIME_TO_TARGET, + ENCODE_SPEED_FPS, + DECODE_SPEED_FPS, + QP, + CPU_USAGE, + AVG_KEY_FRAME_SIZE, + AVG_DELTA_FRAME_SIZE, ] METRICS_TO_PARSE = SETTINGS + SUBPLOT_SETTINGS + RESULTS @@ -102,7 +101,7 @@ def ParseSetting(filename, setting): - """Parses setting from file. + """Parses setting from file. Args: filename: The name of the file. @@ -111,36 +110,36 @@ def ParseSetting(filename, setting): Returns: A list holding parsed settings, e.g. ['width: 128.0', 'width: 160.0'] """ - settings = [] - - settings_file = open(filename) - while True: - line = settings_file.readline() - if not line: - break - if re.search(r'%s' % EVENT_START, line): - # Parse event. - parsed = {} - while True: + settings = [] + + settings_file = open(filename) + while True: line = settings_file.readline() if not line: - break - if re.search(r'%s' % EVENT_END, line): - # Add parsed setting to list. - if setting in parsed: - s = setting + ': ' + str(parsed[setting]) - if s not in settings: - settings.append(s) - break - - TryFindMetric(parsed, line) - - settings_file.close() - return settings + break + if re.search(r'%s' % EVENT_START, line): + # Parse event. + parsed = {} + while True: + line = settings_file.readline() + if not line: + break + if re.search(r'%s' % EVENT_END, line): + # Add parsed setting to list. + if setting in parsed: + s = setting + ': ' + str(parsed[setting]) + if s not in settings: + settings.append(s) + break + + TryFindMetric(parsed, line) + + settings_file.close() + return settings def ParseMetrics(filename, setting1, setting2): - """Parses metrics from file. + """Parses metrics from file. Args: filename: The name of the file. @@ -175,82 +174,82 @@ def ParseMetrics(filename, setting1, setting2): } } """ - metrics = {} - - # Parse events. - settings_file = open(filename) - while True: - line = settings_file.readline() - if not line: - break - if re.search(r'%s' % EVENT_START, line): - # Parse event. - parsed = {} - while True: + metrics = {} + + # Parse events. + settings_file = open(filename) + while True: line = settings_file.readline() if not line: - break - if re.search(r'%s' % EVENT_END, line): - # Add parsed values to metrics. - key1 = setting1 + ': ' + str(parsed[setting1]) - key2 = setting2 + ': ' + str(parsed[setting2]) - if key1 not in metrics: - metrics[key1] = {} - if key2 not in metrics[key1]: - metrics[key1][key2] = {} - - for label in parsed: - if label not in metrics[key1][key2]: - metrics[key1][key2][label] = [] - metrics[key1][key2][label].append(parsed[label]) - - break - - TryFindMetric(parsed, line) - - settings_file.close() - return metrics + break + if re.search(r'%s' % EVENT_START, line): + # Parse event. + parsed = {} + while True: + line = settings_file.readline() + if not line: + break + if re.search(r'%s' % EVENT_END, line): + # Add parsed values to metrics. + key1 = setting1 + ': ' + str(parsed[setting1]) + key2 = setting2 + ': ' + str(parsed[setting2]) + if key1 not in metrics: + metrics[key1] = {} + if key2 not in metrics[key1]: + metrics[key1][key2] = {} + + for label in parsed: + if label not in metrics[key1][key2]: + metrics[key1][key2][label] = [] + metrics[key1][key2][label].append(parsed[label]) + + break + + TryFindMetric(parsed, line) + + settings_file.close() + return metrics def TryFindMetric(parsed, line): - for metric in METRICS_TO_PARSE: - name = metric[0] - label = metric[1] - if re.search(r'%s' % name, line): - found, value = GetMetric(name, line) - if found: - parsed[label] = value - return + for metric in METRICS_TO_PARSE: + name = metric[0] + label = metric[1] + if re.search(r'%s' % name, line): + found, value = GetMetric(name, line) + if found: + parsed[label] = value + return def GetMetric(name, string): - # Float (e.g. bitrate = 98.8253). - pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name - m = re.search(r'%s' % pattern, string) - if m is not None: - return StringToFloat(m.group(1)) - - # Alphanumeric characters (e.g. codec type : VP8). - pattern = r'%s\s*[:=]\s*(\w+)' % name - m = re.search(r'%s' % pattern, string) - if m is not None: - return True, m.group(1) + # Float (e.g. bitrate = 98.8253). + pattern = r'%s\s*[:=]\s*([+-]?\d+\.*\d*)' % name + m = re.search(r'%s' % pattern, string) + if m is not None: + return StringToFloat(m.group(1)) + + # Alphanumeric characters (e.g. codec type : VP8). + pattern = r'%s\s*[:=]\s*(\w+)' % name + m = re.search(r'%s' % pattern, string) + if m is not None: + return True, m.group(1) - return False, -1 + return False, -1 def StringToFloat(value): - try: - value = float(value) - except ValueError: - print "Not a float, skipped %s" % value - return False, -1 + try: + value = float(value) + except ValueError: + print "Not a float, skipped %s" % value + return False, -1 - return True, value + return True, value def Plot(y_metric, x_metric, metrics): - """Plots y_metric vs x_metric per key in metrics. + """Plots y_metric vs x_metric per key in metrics. For example: y_metric = 'PSNR (dB)' @@ -266,26 +265,31 @@ def Plot(y_metric, x_metric, metrics): }, } """ - for key in sorted(metrics): - data = metrics[key] - if y_metric not in data: - print "Failed to find metric: %s" % y_metric - continue + for key in sorted(metrics): + data = metrics[key] + if y_metric not in data: + print "Failed to find metric: %s" % y_metric + continue - y = numpy.array(data[y_metric]) - x = numpy.array(data[x_metric]) - if len(y) != len(x): - print "Length mismatch for %s, %s" % (y, x) - continue + y = numpy.array(data[y_metric]) + x = numpy.array(data[x_metric]) + if len(y) != len(x): + print "Length mismatch for %s, %s" % (y, x) + continue - label = y_metric + ' - ' + str(key) + label = y_metric + ' - ' + str(key) - plt.plot(x, y, label=label, linewidth=1.5, marker='o', markersize=5, - markeredgewidth=0.0) + plt.plot(x, + y, + label=label, + linewidth=1.5, + marker='o', + markersize=5, + markeredgewidth=0.0) def PlotFigure(settings, y_metrics, x_metric, metrics, title): - """Plots metrics in y_metrics list. One figure is plotted and each entry + """Plots metrics in y_metrics list. One figure is plotted and each entry in the list is plotted in a subplot (and sorted per settings). For example: @@ -295,136 +299,140 @@ def PlotFigure(settings, y_metrics, x_metric, metrics, title): """ - plt.figure() - plt.suptitle(title, fontsize='large', fontweight='bold') - settings.sort() - rows = len(settings) - cols = 1 - pos = 1 - while pos <= rows: - plt.rc('grid', color=GRID_COLOR) - ax = plt.subplot(rows, cols, pos) - plt.grid() - plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large') - plt.setp(ax.get_yticklabels(), fontsize='large') - setting = settings[pos - 1] - Plot(y_metrics[pos - 1], x_metric, metrics[setting]) - if setting.startswith(WIDTH[1]): - plt.title(setting, fontsize='medium') - plt.legend(fontsize='large', loc='best') - pos += 1 - - plt.xlabel(x_metric, fontsize='large') - plt.subplots_adjust(left=0.06, right=0.98, bottom=0.05, top=0.94, hspace=0.08) + plt.figure() + plt.suptitle(title, fontsize='large', fontweight='bold') + settings.sort() + rows = len(settings) + cols = 1 + pos = 1 + while pos <= rows: + plt.rc('grid', color=GRID_COLOR) + ax = plt.subplot(rows, cols, pos) + plt.grid() + plt.setp(ax.get_xticklabels(), visible=(pos == rows), fontsize='large') + plt.setp(ax.get_yticklabels(), fontsize='large') + setting = settings[pos - 1] + Plot(y_metrics[pos - 1], x_metric, metrics[setting]) + if setting.startswith(WIDTH[1]): + plt.title(setting, fontsize='medium') + plt.legend(fontsize='large', loc='best') + pos += 1 + + plt.xlabel(x_metric, fontsize='large') + plt.subplots_adjust(left=0.06, + right=0.98, + bottom=0.05, + top=0.94, + hspace=0.08) def GetTitle(filename, setting): - title = '' - if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]: - codec_types = ParseSetting(filename, CODEC_TYPE[1]) - for i in range(0, len(codec_types)): - title += codec_types[i] + ', ' + title = '' + if setting != CODEC_IMPLEMENTATION_NAME[1] and setting != CODEC_TYPE[1]: + codec_types = ParseSetting(filename, CODEC_TYPE[1]) + for i in range(0, len(codec_types)): + title += codec_types[i] + ', ' - if setting != CORES[1]: - cores = ParseSetting(filename, CORES[1]) - for i in range(0, len(cores)): - title += cores[i].split('.')[0] + ', ' + if setting != CORES[1]: + cores = ParseSetting(filename, CORES[1]) + for i in range(0, len(cores)): + title += cores[i].split('.')[0] + ', ' - if setting != FRAMERATE[1]: - framerate = ParseSetting(filename, FRAMERATE[1]) - for i in range(0, len(framerate)): - title += framerate[i].split('.')[0] + ', ' + if setting != FRAMERATE[1]: + framerate = ParseSetting(filename, FRAMERATE[1]) + for i in range(0, len(framerate)): + title += framerate[i].split('.')[0] + ', ' - if (setting != CODEC_IMPLEMENTATION_NAME[1] and - setting != ENCODER_IMPLEMENTATION_NAME[1]): - enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1]) - for i in range(0, len(enc_names)): - title += enc_names[i] + ', ' + if (setting != CODEC_IMPLEMENTATION_NAME[1] + and setting != ENCODER_IMPLEMENTATION_NAME[1]): + enc_names = ParseSetting(filename, ENCODER_IMPLEMENTATION_NAME[1]) + for i in range(0, len(enc_names)): + title += enc_names[i] + ', ' - if (setting != CODEC_IMPLEMENTATION_NAME[1] and - setting != DECODER_IMPLEMENTATION_NAME[1]): - dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1]) - for i in range(0, len(dec_names)): - title += dec_names[i] + ', ' + if (setting != CODEC_IMPLEMENTATION_NAME[1] + and setting != DECODER_IMPLEMENTATION_NAME[1]): + dec_names = ParseSetting(filename, DECODER_IMPLEMENTATION_NAME[1]) + for i in range(0, len(dec_names)): + title += dec_names[i] + ', ' - filenames = ParseSetting(filename, FILENAME[1]) - title += filenames[0].split('_')[0] + filenames = ParseSetting(filename, FILENAME[1]) + title += filenames[0].split('_')[0] - num_frames = ParseSetting(filename, NUM_FRAMES[1]) - for i in range(0, len(num_frames)): - title += ' (' + num_frames[i].split('.')[0] + ')' + num_frames = ParseSetting(filename, NUM_FRAMES[1]) + for i in range(0, len(num_frames)): + title += ' (' + num_frames[i].split('.')[0] + ')' - return title + return title def ToString(input_list): - return ToStringWithoutMetric(input_list, ('', '')) + return ToStringWithoutMetric(input_list, ('', '')) def ToStringWithoutMetric(input_list, metric): - i = 1 - output_str = "" - for m in input_list: - if m != metric: - output_str = output_str + ("%s. %s\n" % (i, m[1])) - i += 1 - return output_str + i = 1 + output_str = "" + for m in input_list: + if m != metric: + output_str = output_str + ("%s. %s\n" % (i, m[1])) + i += 1 + return output_str def GetIdx(text_list): - return int(raw_input(text_list)) - 1 + return int(raw_input(text_list)) - 1 def main(): - filename = sys.argv[1] - - # Setup. - idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS)) - if idx_metric == -1: - # Plot all metrics. One subplot for each metric. - # Per subplot: metric vs bitrate (per resolution). - cores = ParseSetting(filename, CORES[1]) - setting1 = CORES[1] - setting2 = WIDTH[1] - sub_keys = [cores[0]] * len(Y_METRICS) - y_metrics = Y_METRICS - x_metric = BITRATE[1] - else: - resolutions = ParseSetting(filename, WIDTH[1]) - idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS)) - if X_SETTINGS[idx] == BITRATE: - idx = GetIdx("Plot per:\n%s" % ToStringWithoutMetric(SUBPLOT_SETTINGS, - BITRATE)) - idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx]) - # Plot one metric. One subplot for each resolution. - # Per subplot: metric vs bitrate (per setting). - setting1 = WIDTH[1] - setting2 = METRICS_TO_PARSE[idx_setting][1] - sub_keys = resolutions - y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys) - x_metric = BITRATE[1] + filename = sys.argv[1] + + # Setup. + idx_metric = GetIdx("Choose metric:\n0. All\n%s" % ToString(RESULTS)) + if idx_metric == -1: + # Plot all metrics. One subplot for each metric. + # Per subplot: metric vs bitrate (per resolution). + cores = ParseSetting(filename, CORES[1]) + setting1 = CORES[1] + setting2 = WIDTH[1] + sub_keys = [cores[0]] * len(Y_METRICS) + y_metrics = Y_METRICS + x_metric = BITRATE[1] else: - # Plot one metric. One subplot for each resolution. - # Per subplot: metric vs setting (per bitrate). - setting1 = WIDTH[1] - setting2 = BITRATE[1] - sub_keys = resolutions - y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys) - x_metric = X_SETTINGS[idx][1] - - metrics = ParseMetrics(filename, setting1, setting2) - - # Stretch fig size. - figsize = plt.rcParams["figure.figsize"] - figsize[0] *= FIG_SIZE_SCALE_FACTOR_X - figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y - plt.rcParams["figure.figsize"] = figsize - - PlotFigure(sub_keys, y_metrics, x_metric, metrics, - GetTitle(filename, setting2)) - - plt.show() + resolutions = ParseSetting(filename, WIDTH[1]) + idx = GetIdx("Select metric for x-axis:\n%s" % ToString(X_SETTINGS)) + if X_SETTINGS[idx] == BITRATE: + idx = GetIdx("Plot per:\n%s" % + ToStringWithoutMetric(SUBPLOT_SETTINGS, BITRATE)) + idx_setting = METRICS_TO_PARSE.index(SUBPLOT_SETTINGS[idx]) + # Plot one metric. One subplot for each resolution. + # Per subplot: metric vs bitrate (per setting). + setting1 = WIDTH[1] + setting2 = METRICS_TO_PARSE[idx_setting][1] + sub_keys = resolutions + y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys) + x_metric = BITRATE[1] + else: + # Plot one metric. One subplot for each resolution. + # Per subplot: metric vs setting (per bitrate). + setting1 = WIDTH[1] + setting2 = BITRATE[1] + sub_keys = resolutions + y_metrics = [RESULTS[idx_metric][1]] * len(sub_keys) + x_metric = X_SETTINGS[idx][1] + + metrics = ParseMetrics(filename, setting1, setting2) + + # Stretch fig size. + figsize = plt.rcParams["figure.figsize"] + figsize[0] *= FIG_SIZE_SCALE_FACTOR_X + figsize[1] *= FIG_SIZE_SCALE_FACTOR_Y + plt.rcParams["figure.figsize"] = figsize + + PlotFigure(sub_keys, y_metrics, x_metric, metrics, + GetTitle(filename, setting2)) + + plt.show() if __name__ == '__main__': - main() + main() diff --git a/modules/video_coding/codecs/test/video_codec_unittest.cc b/modules/video_coding/codecs/test/video_codec_unittest.cc index c6cf1add94..ff09231b62 100644 --- a/modules/video_coding/codecs/test/video_codec_unittest.cc +++ b/modules/video_coding/codecs/test/video_codec_unittest.cc @@ -35,9 +35,8 @@ const VideoEncoder::Capabilities kCapabilities(false); EncodedImageCallback::Result VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage( const EncodedImage& frame, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { - rtc::CritScope lock(&test_->encoded_frame_section_); + const CodecSpecificInfo* codec_specific_info) { + MutexLock lock(&test_->encoded_frame_section_); test_->encoded_frames_.push_back(frame); RTC_DCHECK(codec_specific_info); test_->codec_specific_infos_.push_back(*codec_specific_info); @@ -58,7 +57,7 @@ void VideoCodecUnitTest::FakeDecodeCompleteCallback::Decoded( VideoFrame& frame, absl::optional decode_time_ms, absl::optional qp) { - rtc::CritScope lock(&test_->decoded_frame_section_); + MutexLock lock(&test_->decoded_frame_section_); test_->decoded_frame_.emplace(frame); test_->decoded_qp_ = qp; test_->decoded_frame_event_.Set(); @@ -126,7 +125,7 @@ bool VideoCodecUnitTest::WaitForEncodedFrame( } void VideoCodecUnitTest::SetWaitForEncodedFramesThreshold(size_t num_frames) { - rtc::CritScope lock(&encoded_frame_section_); + MutexLock lock(&encoded_frame_section_); wait_for_encoded_frames_threshold_ = num_frames; } @@ -136,7 +135,7 @@ bool VideoCodecUnitTest::WaitForEncodedFrames( EXPECT_TRUE(encoded_frame_event_.Wait(kEncodeTimeoutMs)) << "Timed out while waiting for encoded frame."; // This becomes unsafe if there are multiple threads waiting for frames. - rtc::CritScope lock(&encoded_frame_section_); + MutexLock lock(&encoded_frame_section_); EXPECT_FALSE(encoded_frames_.empty()); EXPECT_FALSE(codec_specific_infos_.empty()); EXPECT_EQ(encoded_frames_.size(), codec_specific_infos_.size()); @@ -157,7 +156,7 @@ bool VideoCodecUnitTest::WaitForDecodedFrame(std::unique_ptr* frame, bool ret = decoded_frame_event_.Wait(kDecodeTimeoutMs); EXPECT_TRUE(ret) << "Timed out while waiting for a decoded frame."; // This becomes unsafe if there are multiple threads waiting for frames. - rtc::CritScope lock(&decoded_frame_section_); + MutexLock lock(&decoded_frame_section_); EXPECT_TRUE(decoded_frame_); if (decoded_frame_) { frame->reset(new VideoFrame(std::move(*decoded_frame_))); @@ -170,7 +169,7 @@ bool VideoCodecUnitTest::WaitForDecodedFrame(std::unique_ptr* frame, } size_t VideoCodecUnitTest::GetNumEncodedFrames() { - rtc::CritScope lock(&encoded_frame_section_); + MutexLock lock(&encoded_frame_section_); return encoded_frames_.size(); } diff --git a/modules/video_coding/codecs/test/video_codec_unittest.h b/modules/video_coding/codecs/test/video_codec_unittest.h index 1ce37a7ed5..adab3558aa 100644 --- a/modules/video_coding/codecs/test/video_codec_unittest.h +++ b/modules/video_coding/codecs/test/video_codec_unittest.h @@ -20,8 +20,8 @@ #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/utility/vp8_header_parser.h" #include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "test/gtest.h" @@ -42,8 +42,7 @@ class VideoCodecUnitTest : public ::testing::Test { : test_(test) {} Result OnEncodedImage(const EncodedImage& frame, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation); + const CodecSpecificInfo* codec_specific_info); private: VideoCodecUnitTest* const test_; @@ -108,7 +107,7 @@ class VideoCodecUnitTest : public ::testing::Test { FakeDecodeCompleteCallback decode_complete_callback_; rtc::Event encoded_frame_event_; - rtc::CriticalSection encoded_frame_section_; + Mutex encoded_frame_section_; size_t wait_for_encoded_frames_threshold_; std::vector encoded_frames_ RTC_GUARDED_BY(encoded_frame_section_); @@ -116,7 +115,7 @@ class VideoCodecUnitTest : public ::testing::Test { RTC_GUARDED_BY(encoded_frame_section_); rtc::Event decoded_frame_event_; - rtc::CriticalSection decoded_frame_section_; + Mutex decoded_frame_section_; absl::optional decoded_frame_ RTC_GUARDED_BY(decoded_frame_section_); absl::optional decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_); diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc index 7e92b360bd..dee5b1b939 100644 --- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc +++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc @@ -20,15 +20,17 @@ #include #include +#include "absl/strings/str_replace.h" #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/transport/field_trial_based_config.h" #include "api/video/video_bitrate_allocation.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_decoder.h" #include "api/video_codecs/video_encoder_config.h" #include "common_video/h264/h264_common.h" -#include "media/base/h264_profile_level_id.h" #include "media/base/media_constants.h" #include "media/engine/internal_decoder_factory.h" #include "media/engine/internal_encoder_factory.h" @@ -57,17 +59,18 @@ using VideoStatistics = VideoCodecTestStats::VideoStatistics; namespace { const int kBaseKeyFrameInterval = 3000; const double kBitratePriority = 1.0; -const int kMaxFramerateFps = 30; +const int kDefaultMaxFramerateFps = 30; const int kMaxQp = 56; void ConfigureSimulcast(VideoCodec* codec_settings) { + FieldTrialBasedConfig trials; const std::vector streams = cricket::GetSimulcastConfig( /*min_layer=*/1, codec_settings->numberOfSimulcastStreams, codec_settings->width, codec_settings->height, kBitratePriority, kMaxQp, - /* is_screenshare = */ false, true); + /* is_screenshare = */ false, true, trials); for (size_t i = 0; i < streams.size(); ++i) { - SimulcastStream* ss = &codec_settings->simulcastStream[i]; + SpatialLayer* ss = &codec_settings->simulcastStream[i]; ss->width = static_cast(streams[i].width); ss->height = static_cast(streams[i].height); ss->numberOfTemporalLayers = @@ -84,7 +87,7 @@ void ConfigureSvc(VideoCodec* codec_settings) { RTC_CHECK_EQ(kVideoCodecVP9, codec_settings->codecType); const std::vector layers = GetSvcConfig( - codec_settings->width, codec_settings->height, kMaxFramerateFps, + codec_settings->width, codec_settings->height, kDefaultMaxFramerateFps, /*first_active_layer=*/0, codec_settings->VP9()->numberOfSpatialLayers, codec_settings->VP9()->numberOfTemporalLayers, /* is_screen_sharing = */ false); @@ -125,6 +128,8 @@ std::string CodecSpecificToString(const VideoCodec& codec) { case kVideoCodecH264: ss << "frame_dropping: " << codec.H264().frameDroppingOn; ss << "\nkey_frame_interval: " << codec.H264().keyFrameInterval; + ss << "\nnum_temporal_layers: " + << static_cast(codec.H264().numberOfTemporalLayers); break; default: break; @@ -205,9 +210,14 @@ void VideoCodecTestFixtureImpl::Config::SetCodecSettings( codec_settings.VP9()->numberOfSpatialLayers = static_cast(num_spatial_layers); break; + case kVideoCodecAV1: + codec_settings.qpMax = 63; + break; case kVideoCodecH264: codec_settings.H264()->frameDroppingOn = frame_dropper_on; codec_settings.H264()->keyFrameInterval = kBaseKeyFrameInterval; + codec_settings.H264()->numberOfTemporalLayers = + static_cast(num_temporal_layers); break; default: break; @@ -230,6 +240,8 @@ size_t VideoCodecTestFixtureImpl::Config::NumberOfTemporalLayers() const { return codec_settings.VP8().numberOfTemporalLayers; } else if (codec_settings.codecType == kVideoCodecVP9) { return codec_settings.VP9().numberOfTemporalLayers; + } else if (codec_settings.codecType == kVideoCodecH264) { + return codec_settings.H264().numberOfTemporalLayers; } else { return 1; } @@ -274,8 +286,7 @@ std::string VideoCodecTestFixtureImpl::Config::ToString() const { if (codec_settings.numberOfSimulcastStreams > 1) { for (int i = 0; i < codec_settings.numberOfSimulcastStreams; ++i) { ss << "\n\n--> codec_settings.simulcastStream[" << i << "]"; - const SimulcastStream& simulcast_stream = - codec_settings.simulcastStream[i]; + const SpatialLayer& simulcast_stream = codec_settings.simulcastStream[i]; ss << "\nwidth: " << simulcast_stream.width; ss << "\nheight: " << simulcast_stream.height; ss << "\nnum_temporal_layers: " @@ -297,11 +308,11 @@ std::string VideoCodecTestFixtureImpl::Config::CodecName() const { name = CodecTypeToPayloadString(codec_settings.codecType); } if (codec_settings.codecType == kVideoCodecH264) { - if (h264_codec_settings.profile == H264::kProfileConstrainedHigh) { + if (h264_codec_settings.profile == H264Profile::kProfileConstrainedHigh) { return name + "-CHP"; } else { RTC_DCHECK_EQ(h264_codec_settings.profile, - H264::kProfileConstrainedBaseline); + H264Profile::kProfileConstrainedBaseline); return name + "-CBP"; } } @@ -404,8 +415,14 @@ void VideoCodecTestFixtureImpl::RunTest( // codecs on a task queue. TaskQueueForTest task_queue("VidProc TQ"); - SetUpAndInitObjects(&task_queue, rate_profiles[0].target_kbps, - rate_profiles[0].input_fps); + bool is_setup_succeeded = SetUpAndInitObjects( + &task_queue, rate_profiles[0].target_kbps, rate_profiles[0].input_fps); + EXPECT_TRUE(is_setup_succeeded); + if (!is_setup_succeeded) { + ReleaseAndCloseObjects(&task_queue); + return; + } + PrintSettings(&task_queue); ProcessAllFrames(&task_queue, rate_profiles); ReleaseAndCloseObjects(&task_queue); @@ -446,6 +463,8 @@ void VideoCodecTestFixtureImpl::ProcessAllFrames( } } + task_queue->PostTask([this] { processor_->Finalize(); }); + // Wait until we know that the last frame has been sent for encode. task_queue->SendTask([] {}, RTC_FROM_HERE); @@ -591,7 +610,7 @@ void VideoCodecTestFixtureImpl::VerifyVideoStatistic( } } -void VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { +bool VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { SdpVideoFormat::Parameters params; if (config_.codec_settings.codecType == kVideoCodecH264) { const char* packetization_mode = @@ -600,8 +619,8 @@ void VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { ? "1" : "0"; params = {{cricket::kH264FmtpProfileLevelId, - *H264::ProfileLevelIdToString(H264::ProfileLevelId( - config_.h264_codec_settings.profile, H264::kLevel3_1))}, + *H264ProfileLevelIdToString(H264ProfileLevelId( + config_.h264_codec_settings.profile, H264Level::kLevel3_1))}, {cricket::kH264FmtpPacketizationMode, packetization_mode}}; } else { params = {}; @@ -610,6 +629,9 @@ void VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { encoder_ = encoder_factory_->CreateVideoEncoder(format); EXPECT_TRUE(encoder_) << "Encoder not successfully created."; + if (encoder_ == nullptr) { + return false; + } const size_t num_simulcast_or_spatial_layers = std::max( config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers()); @@ -620,7 +642,12 @@ void VideoCodecTestFixtureImpl::CreateEncoderAndDecoder() { for (const auto& decoder : decoders_) { EXPECT_TRUE(decoder) << "Decoder not successfully created."; + if (decoder == nullptr) { + return false; + } } + + return true; } void VideoCodecTestFixtureImpl::DestroyEncoderAndDecoder() { @@ -632,7 +659,7 @@ VideoCodecTestStats& VideoCodecTestFixtureImpl::GetStats() { return stats_; } -void VideoCodecTestFixtureImpl::SetUpAndInitObjects( +bool VideoCodecTestFixtureImpl::SetUpAndInitObjects( TaskQueueForTest* task_queue, size_t initial_bitrate_kbps, double initial_framerate_fps) { @@ -640,26 +667,60 @@ void VideoCodecTestFixtureImpl::SetUpAndInitObjects( config_.codec_settings.startBitrate = static_cast(initial_bitrate_kbps); config_.codec_settings.maxFramerate = std::ceil(initial_framerate_fps); + int clip_width = config_.clip_width.value_or(config_.codec_settings.width); + int clip_height = config_.clip_height.value_or(config_.codec_settings.height); + // Create file objects for quality analysis. - source_frame_reader_.reset( - new YuvFrameReaderImpl(config_.filepath, config_.codec_settings.width, - config_.codec_settings.height)); + source_frame_reader_.reset(new YuvFrameReaderImpl( + config_.filepath, clip_width, clip_height, + config_.reference_width.value_or(clip_width), + config_.reference_height.value_or(clip_height), + YuvFrameReaderImpl::RepeatMode::kPingPong, config_.clip_fps, + config_.codec_settings.maxFramerate)); EXPECT_TRUE(source_frame_reader_->Init()); RTC_DCHECK(encoded_frame_writers_.empty()); RTC_DCHECK(decoded_frame_writers_.empty()); + stats_.Clear(); + + cpu_process_time_.reset(new CpuProcessTime(config_)); + + bool is_codec_created = false; + task_queue->SendTask( + [this, &is_codec_created]() { + is_codec_created = CreateEncoderAndDecoder(); + }, + RTC_FROM_HERE); + + if (!is_codec_created) { + return false; + } + + task_queue->SendTask( + [this]() { + processor_ = std::make_unique( + encoder_.get(), &decoders_, source_frame_reader_.get(), config_, + &stats_, &encoded_frame_writers_, + decoded_frame_writers_.empty() ? nullptr : &decoded_frame_writers_); + }, + RTC_FROM_HERE); + if (config_.visualization_params.save_encoded_ivf || config_.visualization_params.save_decoded_y4m) { + std::string encoder_name = GetCodecName(task_queue, /*is_encoder=*/true); + encoder_name = absl::StrReplaceAll(encoder_name, {{":", ""}, {" ", "-"}}); + const size_t num_simulcast_or_spatial_layers = std::max( config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers()); const size_t num_temporal_layers = config_.NumberOfTemporalLayers(); for (size_t simulcast_svc_idx = 0; simulcast_svc_idx < num_simulcast_or_spatial_layers; ++simulcast_svc_idx) { - const std::string output_filename_base = JoinFilename( - config_.output_path, FilenameWithParams(config_) + "_sl" + - std::to_string(simulcast_svc_idx)); + const std::string output_filename_base = + JoinFilename(config_.output_path, + FilenameWithParams(config_) + "_" + encoder_name + + "_sl" + std::to_string(simulcast_svc_idx)); if (config_.visualization_params.save_encoded_ivf) { for (size_t temporal_idx = 0; temporal_idx < num_temporal_layers; @@ -687,19 +748,7 @@ void VideoCodecTestFixtureImpl::SetUpAndInitObjects( } } - stats_.Clear(); - - cpu_process_time_.reset(new CpuProcessTime(config_)); - - task_queue->SendTask( - [this]() { - CreateEncoderAndDecoder(); - processor_ = std::make_unique( - encoder_.get(), &decoders_, source_frame_reader_.get(), config_, - &stats_, &encoded_frame_writers_, - decoded_frame_writers_.empty() ? nullptr : &decoded_frame_writers_); - }, - RTC_FROM_HERE); + return true; } void VideoCodecTestFixtureImpl::ReleaseAndCloseObjects( @@ -725,22 +774,32 @@ void VideoCodecTestFixtureImpl::ReleaseAndCloseObjects( decoded_frame_writers_.clear(); } +std::string VideoCodecTestFixtureImpl::GetCodecName( + TaskQueueForTest* task_queue, + bool is_encoder) const { + std::string codec_name; + task_queue->SendTask( + [this, is_encoder, &codec_name] { + if (is_encoder) { + codec_name = encoder_->GetEncoderInfo().implementation_name; + } else { + codec_name = decoders_.at(0)->ImplementationName(); + } + }, + RTC_FROM_HERE); + return codec_name; +} + void VideoCodecTestFixtureImpl::PrintSettings( TaskQueueForTest* task_queue) const { RTC_LOG(LS_INFO) << "==> Config"; RTC_LOG(LS_INFO) << config_.ToString(); RTC_LOG(LS_INFO) << "==> Codec names"; - std::string encoder_name; - std::string decoder_name; - task_queue->SendTask( - [this, &encoder_name, &decoder_name] { - encoder_name = encoder_->GetEncoderInfo().implementation_name; - decoder_name = decoders_.at(0)->ImplementationName(); - }, - RTC_FROM_HERE); - RTC_LOG(LS_INFO) << "enc_impl_name: " << encoder_name; - RTC_LOG(LS_INFO) << "dec_impl_name: " << decoder_name; + RTC_LOG(LS_INFO) << "enc_impl_name: " + << GetCodecName(task_queue, /*is_encoder=*/true); + RTC_LOG(LS_INFO) << "dec_impl_name: " + << GetCodecName(task_queue, /*is_encoder=*/false); } } // namespace test diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h index 3bbe50ecc3..005b7c0a8e 100644 --- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h +++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.h @@ -59,9 +59,9 @@ class VideoCodecTestFixtureImpl : public VideoCodecTestFixture { private: class CpuProcessTime; - void CreateEncoderAndDecoder(); + bool CreateEncoderAndDecoder(); void DestroyEncoderAndDecoder(); - void SetUpAndInitObjects(TaskQueueForTest* task_queue, + bool SetUpAndInitObjects(TaskQueueForTest* task_queue, size_t initial_bitrate_kbps, double initial_framerate_fps); void ReleaseAndCloseObjects(TaskQueueForTest* task_queue); @@ -82,6 +82,7 @@ class VideoCodecTestFixtureImpl : public VideoCodecTestFixture { size_t target_bitrate_kbps, double input_framerate_fps); + std::string GetCodecName(TaskQueueForTest* task_queue, bool is_encoder) const; void PrintSettings(TaskQueueForTest* task_queue) const; // Codecs. diff --git a/modules/video_coding/codecs/test/videocodec_test_libaom.cc b/modules/video_coding/codecs/test/videocodec_test_libaom.cc new file mode 100644 index 0000000000..c3263e7134 --- /dev/null +++ b/modules/video_coding/codecs/test/videocodec_test_libaom.cc @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "api/test/create_videocodec_test_fixture.h" +#include "api/test/video/function_video_encoder_factory.h" +#include "api/video_codecs/sdp_video_format.h" +#include "media/base/media_constants.h" +#include "media/engine/internal_decoder_factory.h" +#include "media/engine/internal_encoder_factory.h" +#include "media/engine/simulcast_encoder_adapter.h" +#include "test/gtest.h" +#include "test/testsupport/file_utils.h" + +namespace webrtc { +namespace test { +namespace { +// Test clips settings. +constexpr int kCifWidth = 352; +constexpr int kCifHeight = 288; +constexpr int kNumFramesLong = 300; + +VideoCodecTestFixture::Config CreateConfig(std::string filename) { + VideoCodecTestFixture::Config config; + config.filename = filename; + config.filepath = ResourcePath(config.filename, "yuv"); + config.num_frames = kNumFramesLong; + config.use_single_core = true; + return config; +} + +TEST(VideoCodecTestLibaom, HighBitrateAV1) { + auto config = CreateConfig("foreman_cif"); + config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true, + kCifWidth, kCifHeight); + config.codec_settings.SetScalabilityMode("NONE"); + config.num_frames = kNumFramesLong; + auto fixture = CreateVideoCodecTestFixture(config); + + std::vector rate_profiles = {{500, 30, 0}}; + + std::vector rc_thresholds = { + {12, 1, 0, 1, 0.3, 0.1, 0, 1}}; + + std::vector quality_thresholds = {{37, 34, 0.94, 0.915}}; + + fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); +} + +TEST(VideoCodecTestLibaom, VeryLowBitrateAV1) { + auto config = CreateConfig("foreman_cif"); + config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true, + kCifWidth, kCifHeight); + config.codec_settings.SetScalabilityMode("NONE"); + auto fixture = CreateVideoCodecTestFixture(config); + + std::vector rate_profiles = {{50, 30, 0}}; + + std::vector rc_thresholds = { + {15, 8, 75, 2, 2, 2, 2, 1}}; + + std::vector quality_thresholds = {{28, 25, 0.70, 0.60}}; + + fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); +} + +#if !defined(WEBRTC_ANDROID) +constexpr int kHdWidth = 1280; +constexpr int kHdHeight = 720; +TEST(VideoCodecTestLibaom, HdAV1) { + auto config = CreateConfig("ConferenceMotion_1280_720_50"); + config.SetCodecSettings(cricket::kAv1CodecName, 1, 1, 1, false, true, true, + kHdWidth, kHdHeight); + config.codec_settings.SetScalabilityMode("NONE"); + config.num_frames = kNumFramesLong; + auto fixture = CreateVideoCodecTestFixture(config); + + std::vector rate_profiles = {{1000, 50, 0}}; + + std::vector rc_thresholds = { + {13, 3, 0, 1, 0.3, 0.1, 0, 1}}; + + std::vector quality_thresholds = {{36, 31.7, 0.93, 0.87}}; + + fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); +} +#endif + +} // namespace +} // namespace test +} // namespace webrtc diff --git a/modules/video_coding/codecs/test/videocodec_test_libvpx.cc b/modules/video_coding/codecs/test/videocodec_test_libvpx.cc index d94d803504..0eb0d5a284 100644 --- a/modules/video_coding/codecs/test/videocodec_test_libvpx.cc +++ b/modules/video_coding/codecs/test/videocodec_test_libvpx.cc @@ -222,21 +222,6 @@ TEST(VideoCodecTestLibvpx, HighBitrateVP8) { fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); } -// The tests below are currently disabled for Android. For ARM, the encoder -// uses |cpu_speed| = 12, as opposed to default |cpu_speed| <= 6 for x86, -// which leads to significantly different quality. The quality and rate control -// settings in the tests below are defined for encoder speed setting -// |cpu_speed| <= ~6. A number of settings would need to be significantly -// modified for the |cpu_speed| = 12 case. For now, keep the tests below -// disabled on Android. Some quality parameter in the above test has been -// adjusted to also pass for |cpu_speed| <= 12. - -// TODO(webrtc:9267): Fails on iOS -#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) -#define MAYBE_ChangeBitrateVP8 DISABLED_ChangeBitrateVP8 -#else -#define MAYBE_ChangeBitrateVP8 ChangeBitrateVP8 -#endif TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) { auto config = CreateConfig(); config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false, @@ -265,12 +250,6 @@ TEST(VideoCodecTestLibvpx, MAYBE_ChangeBitrateVP8) { fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); } -// TODO(webrtc:9267): Fails on iOS -#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) -#define MAYBE_ChangeFramerateVP8 DISABLED_ChangeFramerateVP8 -#else -#define MAYBE_ChangeFramerateVP8 ChangeFramerateVP8 -#endif TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) { auto config = CreateConfig(); config.SetCodecSettings(cricket::kVp8CodecName, 1, 1, 1, true, true, false, @@ -286,7 +265,7 @@ TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) { #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) std::vector rc_thresholds = { - {10, 2, 60, 1, 0.3, 0.3, 0, 1}, + {10, 2.42, 60, 1, 0.3, 0.3, 0, 1}, {10, 2, 30, 1, 0.3, 0.3, 0, 0}, {10, 2, 10, 1, 0.3, 0.2, 0, 0}}; #else @@ -298,10 +277,10 @@ TEST(VideoCodecTestLibvpx, MAYBE_ChangeFramerateVP8) { #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) std::vector quality_thresholds = { - {31, 30, 0.85, 0.84}, {31.5, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}}; + {31, 30, 0.85, 0.84}, {31.4, 30.5, 0.86, 0.84}, {30.5, 29, 0.83, 0.78}}; #else std::vector quality_thresholds = { - {31, 30, 0.87, 0.86}, {32, 31, 0.89, 0.86}, {32, 30, 0.87, 0.82}}; + {31, 30, 0.87, 0.85}, {32, 31, 0.88, 0.85}, {32, 30, 0.87, 0.82}}; #endif fixture->RunTest(rate_profiles, &rc_thresholds, &quality_thresholds, nullptr); } @@ -358,7 +337,7 @@ TEST(VideoCodecTestLibvpx, MAYBE_MultiresVP8) { std::vector rate_profiles = {{1500, 30, 0}}; #if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) std::vector rc_thresholds = { - {4.1, 1.04, 6, 0.18, 0.14, 0.08, 0, 1}}; + {4.1, 1.04, 7, 0.18, 0.14, 0.08, 0, 1}}; #else std::vector rc_thresholds = { {5, 1, 5, 1, 0.3, 0.1, 0, 1}}; diff --git a/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc b/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc index 9f887160a4..978fd8856f 100644 --- a/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc +++ b/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc @@ -95,7 +95,7 @@ TEST(VideoCodecTestMediaCodec, DISABLED_ForemanCif500kbpsH264CHP) { const auto frame_checker = std::make_unique(); - config.h264_codec_settings.profile = H264::kProfileConstrainedHigh; + config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh; config.encoded_frame_checker = frame_checker.get(); config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false, 352, 288); diff --git a/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc b/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc index e5d6d2aaad..aa0ff0b9c8 100644 --- a/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc +++ b/modules/video_coding/codecs/test/videocodec_test_stats_impl.cc @@ -179,20 +179,20 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic( VideoStatistics video_stat; float buffer_level_bits = 0.0f; - RunningStatistics buffer_level_sec; + webrtc_impl::RunningStatistics buffer_level_sec; - RunningStatistics key_frame_size_bytes; - RunningStatistics delta_frame_size_bytes; + webrtc_impl::RunningStatistics key_frame_size_bytes; + webrtc_impl::RunningStatistics delta_frame_size_bytes; - RunningStatistics frame_encoding_time_us; - RunningStatistics frame_decoding_time_us; + webrtc_impl::RunningStatistics frame_encoding_time_us; + webrtc_impl::RunningStatistics frame_decoding_time_us; - RunningStatistics psnr_y; - RunningStatistics psnr_u; - RunningStatistics psnr_v; - RunningStatistics psnr; - RunningStatistics ssim; - RunningStatistics qp; + webrtc_impl::RunningStatistics psnr_y; + webrtc_impl::RunningStatistics psnr_u; + webrtc_impl::RunningStatistics psnr_v; + webrtc_impl::RunningStatistics psnr; + webrtc_impl::RunningStatistics ssim; + webrtc_impl::RunningStatistics qp; size_t rtp_timestamp_first_frame = 0; size_t rtp_timestamp_prev_frame = 0; @@ -252,12 +252,6 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic( video_stat.height = std::max(video_stat.height, frame_stat.decoded_height); - psnr_y.AddSample(frame_stat.psnr_y); - psnr_u.AddSample(frame_stat.psnr_u); - psnr_v.AddSample(frame_stat.psnr_v); - psnr.AddSample(frame_stat.psnr); - ssim.AddSample(frame_stat.ssim); - if (video_stat.num_decoded_frames > 1) { if (last_successfully_decoded_frame.decoded_width != frame_stat.decoded_width || @@ -271,6 +265,14 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic( last_successfully_decoded_frame = frame_stat; } + if (frame_stat.quality_analysis_successful) { + psnr_y.AddSample(frame_stat.psnr_y); + psnr_u.AddSample(frame_stat.psnr_u); + psnr_v.AddSample(frame_stat.psnr_v); + psnr.AddSample(frame_stat.psnr); + ssim.AddSample(frame_stat.ssim); + } + if (video_stat.num_input_frames > 0) { if (video_stat.time_to_reach_target_bitrate_sec == 0.0f) { RTC_CHECK_GT(time_since_first_frame_sec, 0); @@ -329,10 +331,10 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic( ? 1000000.0f / mean_decode_time_us : std::numeric_limits::max(); - auto MaxDelaySec = - [target_bitrate_kbps](const RunningStatistics& stats) { - return 8 * stats.GetMax().value_or(0) / 1000 / target_bitrate_kbps; - }; + auto MaxDelaySec = [target_bitrate_kbps]( + const webrtc_impl::RunningStatistics& stats) { + return 8 * stats.GetMax().value_or(0) / 1000 / target_bitrate_kbps; + }; video_stat.avg_delay_sec = buffer_level_sec.GetMean().value_or(0); video_stat.max_key_frame_delay_sec = MaxDelaySec(key_frame_size_bytes); diff --git a/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc b/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc index 0f02080f27..6df974362f 100644 --- a/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc +++ b/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc @@ -71,7 +71,7 @@ MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CHP) { const auto frame_checker = std::make_unique(); auto config = CreateConfig(); - config.h264_codec_settings.profile = H264::kProfileConstrainedHigh; + config.h264_codec_settings.profile = H264Profile::kProfileConstrainedHigh; config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false, 352, 288); config.encoded_frame_checker = frame_checker.get(); diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc index 8fbbe4a04e..23eadfc0db 100644 --- a/modules/video_coding/codecs/test/videoprocessor.cc +++ b/modules/video_coding/codecs/test/videoprocessor.cc @@ -41,8 +41,6 @@ namespace webrtc { namespace test { -using FrameStatistics = VideoCodecTestStats::FrameStatistics; - namespace { const int kMsToRtpTimestamp = kVideoPayloadTypeFrequency / 1000; const int kMaxBufferedInputFrames = 20; @@ -86,34 +84,9 @@ int GetElapsedTimeMicroseconds(int64_t start_ns, int64_t stop_ns) { return static_cast(diff_us); } -void ExtractI420BufferWithSize(const VideoFrame& image, - int width, - int height, - rtc::Buffer* buffer) { - if (image.width() != width || image.height() != height) { - EXPECT_DOUBLE_EQ(static_cast(width) / height, - static_cast(image.width()) / image.height()); - // Same aspect ratio, no cropping needed. - rtc::scoped_refptr scaled(I420Buffer::Create(width, height)); - scaled->ScaleFrom(*image.video_frame_buffer()->ToI420()); - - size_t length = - CalcBufferSize(VideoType::kI420, scaled->width(), scaled->height()); - buffer->SetSize(length); - RTC_CHECK_NE(ExtractBuffer(scaled, length, buffer->data()), -1); - return; - } - - // No resize. - size_t length = - CalcBufferSize(VideoType::kI420, image.width(), image.height()); - buffer->SetSize(length); - RTC_CHECK_NE(ExtractBuffer(image, length, buffer->data()), -1); -} - void CalculateFrameQuality(const I420BufferInterface& ref_buffer, const I420BufferInterface& dec_buffer, - FrameStatistics* frame_stat, + VideoCodecTestStats::FrameStatistics* frame_stat, bool calc_ssim) { if (ref_buffer.width() != dec_buffer.width() || ref_buffer.height() != dec_buffer.height()) { @@ -174,6 +147,7 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder, num_simulcast_or_spatial_layers_( std::max(config_.NumberOfSimulcastStreams(), config_.NumberOfSpatialLayers())), + analyze_frame_quality_(!config_.measure_cpu), stats_(stats), encoder_(encoder), decoders_(decoders), @@ -192,8 +166,9 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder, last_encoded_frame_num_(num_simulcast_or_spatial_layers_), first_decoded_frame_(num_simulcast_or_spatial_layers_, true), last_decoded_frame_num_(num_simulcast_or_spatial_layers_), - decoded_frame_buffer_(num_simulcast_or_spatial_layers_), - post_encode_time_ns_(0) { + last_decoded_frame_buffer_(num_simulcast_or_spatial_layers_), + post_encode_time_ns_(0), + is_finalized_(false) { // Sanity checks. RTC_CHECK(TaskQueueBase::Current()) << "VideoProcessor must be run on a task queue."; @@ -234,6 +209,10 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder, VideoProcessor::~VideoProcessor() { RTC_DCHECK_RUN_ON(&sequence_checker_); + if (!is_finalized_) { + Finalize(); + } + // Explicitly reset codecs, in case they don't do that themselves when they // go out of scope. RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK); @@ -249,6 +228,8 @@ VideoProcessor::~VideoProcessor() { void VideoProcessor::ProcessFrame() { RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!is_finalized_); + const size_t frame_number = last_inputed_frame_num_++; // Get input frame and store for future quality calculation. @@ -270,7 +251,27 @@ void VideoProcessor::ProcessFrame() { if (input_frames_.size() == kMaxBufferedInputFrames) { input_frames_.erase(input_frames_.begin()); } - input_frames_.emplace(frame_number, input_frame); + + if (config_.reference_width != -1 && config_.reference_height != -1 && + (input_frame.width() != config_.reference_width || + input_frame.height() != config_.reference_height)) { + rtc::scoped_refptr scaled_buffer = I420Buffer::Create( + config_.codec_settings.width, config_.codec_settings.height); + scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420()); + + VideoFrame scaled_reference_frame = input_frame; + scaled_reference_frame.set_video_frame_buffer(scaled_buffer); + input_frames_.emplace(frame_number, scaled_reference_frame); + + if (config_.reference_width == config_.codec_settings.width && + config_.reference_height == config_.codec_settings.height) { + // Both encoding and comparison uses the same down-scale factor, reuse + // it for encoder below. + input_frame = scaled_reference_frame; + } + } else { + input_frames_.emplace(frame_number, input_frame); + } } last_inputed_timestamp_ = timestamp; @@ -290,6 +291,14 @@ void VideoProcessor::ProcessFrame() { frame_stat->encode_start_ns = encode_start_ns; } + if (input_frame.width() != config_.codec_settings.width || + input_frame.height() != config_.codec_settings.height) { + rtc::scoped_refptr scaled_buffer = I420Buffer::Create( + config_.codec_settings.width, config_.codec_settings.height); + scaled_buffer->ScaleFrom(*input_frame.video_frame_buffer()->ToI420()); + input_frame.set_video_frame_buffer(scaled_buffer); + } + // Encode. const std::vector frame_types = (frame_number == 0) @@ -304,6 +313,8 @@ void VideoProcessor::ProcessFrame() { void VideoProcessor::SetRates(size_t bitrate_kbps, double framerate_fps) { RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!is_finalized_); + framerate_fps_ = framerate_fps; bitrate_allocation_ = bitrate_allocator_->Allocate(VideoBitrateAllocationParameters( @@ -390,13 +401,11 @@ void VideoProcessor::FrameEncoded( frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_); frame_stat->qp = encoded_image.qp_; - bool end_of_picture = false; if (codec_type == kVideoCodecVP9) { const CodecSpecificInfoVP9& vp9_info = codec_specific.codecSpecific.VP9; frame_stat->inter_layer_predicted = vp9_info.inter_layer_predicted; frame_stat->non_ref_for_inter_layer_pred = vp9_info.non_ref_for_inter_layer_pred; - end_of_picture = vp9_info.end_of_picture; } else { frame_stat->inter_layer_predicted = false; frame_stat->non_ref_for_inter_layer_pred = true; @@ -414,7 +423,7 @@ void VideoProcessor::FrameEncoded( if (config_.decode) { DecodeFrame(*encoded_image_for_decode, spatial_idx); - if (end_of_picture && num_spatial_layers > 1) { + if (codec_specific.end_of_picture && num_spatial_layers > 1) { // If inter-layer prediction is enabled and upper layer was dropped then // base layer should be passed to upper layer decoder. Otherwise decoder // won't be able to decode next superframe. @@ -460,6 +469,56 @@ void VideoProcessor::FrameEncoded( } } +void VideoProcessor::CalcFrameQuality(const I420BufferInterface& decoded_frame, + FrameStatistics* frame_stat) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + + const auto reference_frame = input_frames_.find(frame_stat->frame_number); + RTC_CHECK(reference_frame != input_frames_.cend()) + << "The codecs are either buffering too much, dropping too much, or " + "being too slow relative to the input frame rate."; + + // SSIM calculation is not optimized. Skip it in real-time mode. + const bool calc_ssim = !config_.encode_in_real_time; + CalculateFrameQuality(*reference_frame->second.video_frame_buffer()->ToI420(), + decoded_frame, frame_stat, calc_ssim); + + frame_stat->quality_analysis_successful = true; +} + +void VideoProcessor::WriteDecodedFrame(const I420BufferInterface& decoded_frame, + FrameWriter& frame_writer) { + int input_video_width = config_.codec_settings.width; + int input_video_height = config_.codec_settings.height; + + rtc::scoped_refptr scaled_buffer; + const I420BufferInterface* scaled_frame; + + if (decoded_frame.width() == input_video_width && + decoded_frame.height() == input_video_height) { + scaled_frame = &decoded_frame; + } else { + EXPECT_DOUBLE_EQ( + static_cast(input_video_width) / input_video_height, + static_cast(decoded_frame.width()) / decoded_frame.height()); + + scaled_buffer = I420Buffer::Create(input_video_width, input_video_height); + scaled_buffer->ScaleFrom(decoded_frame); + + scaled_frame = scaled_buffer; + } + + // Ensure there is no padding. + RTC_CHECK_EQ(scaled_frame->StrideY(), input_video_width); + RTC_CHECK_EQ(scaled_frame->StrideU(), input_video_width / 2); + RTC_CHECK_EQ(scaled_frame->StrideV(), input_video_width / 2); + + RTC_CHECK_EQ(3 * input_video_width * input_video_height / 2, + frame_writer.FrameLength()); + + RTC_CHECK(frame_writer.WriteFrame(scaled_frame->DataY())); +} + void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame, size_t spatial_idx) { RTC_DCHECK_RUN_ON(&sequence_checker_); @@ -472,13 +531,24 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame, stats_->GetFrameWithTimestamp(decoded_frame.timestamp(), spatial_idx); const size_t frame_number = frame_stat->frame_number; - if (decoded_frame_writers_ && !first_decoded_frame_[spatial_idx]) { - // Fill drops with last decoded frame to make them look like freeze at - // playback and to keep decoded layers in sync. - for (size_t i = last_decoded_frame_num_[spatial_idx] + 1; i < frame_number; - ++i) { - RTC_CHECK(decoded_frame_writers_->at(spatial_idx) - ->WriteFrame(decoded_frame_buffer_[spatial_idx].data())); + if (!first_decoded_frame_[spatial_idx]) { + for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1; + dropped_frame_number < frame_number; ++dropped_frame_number) { + FrameStatistics* dropped_frame_stat = + stats_->GetFrame(dropped_frame_number, spatial_idx); + + if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) { + // Calculate frame quality comparing input frame with last decoded one. + CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx], + dropped_frame_stat); + } + + if (decoded_frame_writers_ != nullptr) { + // Fill drops with last decoded frame to make them look like freeze at + // playback and to keep decoded layers in sync. + WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx], + *decoded_frame_writers_->at(spatial_idx)); + } } } @@ -497,41 +567,40 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame, frame_stat->decoded_height = decoded_frame.height(); // Skip quality metrics calculation to not affect CPU usage. - if (!config_.measure_cpu) { - const auto reference_frame = input_frames_.find(frame_number); - RTC_CHECK(reference_frame != input_frames_.cend()) - << "The codecs are either buffering too much, dropping too much, or " - "being too slow relative the input frame rate."; - - // SSIM calculation is not optimized. Skip it in real-time mode. - const bool calc_ssim = !config_.encode_in_real_time; - CalculateFrameQuality( - *reference_frame->second.video_frame_buffer()->ToI420(), - *decoded_frame.video_frame_buffer()->ToI420(), frame_stat, calc_ssim); - - // Erase all buffered input frames that we have moved past for all - // simulcast/spatial layers. Never buffer more than - // |kMaxBufferedInputFrames| frames, to protect against long runs of - // consecutive frame drops for a particular layer. - const auto min_last_decoded_frame_num = std::min_element( - last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend()); - const size_t min_buffered_frame_num = std::max( - 0, static_cast(frame_number) - kMaxBufferedInputFrames + 1); - RTC_CHECK(min_last_decoded_frame_num != last_decoded_frame_num_.cend()); - const auto input_frames_erase_before = input_frames_.lower_bound( - std::max(*min_last_decoded_frame_num, min_buffered_frame_num)); - input_frames_.erase(input_frames_.cbegin(), input_frames_erase_before); + if (analyze_frame_quality_ || decoded_frame_writers_) { + // Save last decoded frame to handle possible future drops. + rtc::scoped_refptr i420buffer = + decoded_frame.video_frame_buffer()->ToI420(); + + // Copy decoded frame to a buffer without padding/stride such that we can + // dump Y, U and V planes into a file in one shot. + last_decoded_frame_buffer_[spatial_idx] = I420Buffer::Copy( + i420buffer->width(), i420buffer->height(), i420buffer->DataY(), + i420buffer->StrideY(), i420buffer->DataU(), i420buffer->StrideU(), + i420buffer->DataV(), i420buffer->StrideV()); } - if (decoded_frame_writers_) { - ExtractI420BufferWithSize(decoded_frame, config_.codec_settings.width, - config_.codec_settings.height, - &decoded_frame_buffer_[spatial_idx]); - RTC_CHECK_EQ(decoded_frame_buffer_[spatial_idx].size(), - decoded_frame_writers_->at(spatial_idx)->FrameLength()); - RTC_CHECK(decoded_frame_writers_->at(spatial_idx) - ->WriteFrame(decoded_frame_buffer_[spatial_idx].data())); + if (analyze_frame_quality_) { + CalcFrameQuality(*decoded_frame.video_frame_buffer()->ToI420(), frame_stat); } + + if (decoded_frame_writers_ != nullptr) { + WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx], + *decoded_frame_writers_->at(spatial_idx)); + } + + // Erase all buffered input frames that we have moved past for all + // simulcast/spatial layers. Never buffer more than + // |kMaxBufferedInputFrames| frames, to protect against long runs of + // consecutive frame drops for a particular layer. + const auto min_last_decoded_frame_num = std::min_element( + last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend()); + const size_t min_buffered_frame_num = + std::max(0, static_cast(frame_number) - kMaxBufferedInputFrames + 1); + RTC_CHECK(min_last_decoded_frame_num != last_decoded_frame_num_.cend()); + const auto input_frames_erase_before = input_frames_.lower_bound( + std::max(*min_last_decoded_frame_num, min_buffered_frame_num)); + input_frames_.erase(input_frames_.cbegin(), input_frames_erase_before); } void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image, @@ -571,16 +640,18 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe( } const size_t payload_size_bytes = base_image.size() + encoded_image.size(); - EncodedImage copied_image = encoded_image; - copied_image.SetEncodedData(EncodedImageBuffer::Create(payload_size_bytes)); + auto buffer = EncodedImageBuffer::Create(payload_size_bytes); if (base_image.size()) { RTC_CHECK(base_image.data()); - memcpy(copied_image.data(), base_image.data(), base_image.size()); + memcpy(buffer->data(), base_image.data(), base_image.size()); } - memcpy(copied_image.data() + base_image.size(), encoded_image.data(), + memcpy(buffer->data() + base_image.size(), encoded_image.data(), encoded_image.size()); - copied_image.set_size(payload_size_bytes); + EncodedImage copied_image = encoded_image; + copied_image.SetEncodedData(buffer); + if (base_image.size()) + copied_image._frameType = base_image._frameType; // Replace previous EncodedImage for this spatial layer. merged_encoded_frames_.at(spatial_idx) = std::move(copied_image); @@ -588,5 +659,41 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe( return &merged_encoded_frames_.at(spatial_idx); } +void VideoProcessor::Finalize() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!is_finalized_); + is_finalized_ = true; + + if (!(analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) && + decoded_frame_writers_ == nullptr) { + return; + } + + for (size_t spatial_idx = 0; spatial_idx < num_simulcast_or_spatial_layers_; + ++spatial_idx) { + if (first_decoded_frame_[spatial_idx]) { + continue; // No decoded frames on this spatial layer. + } + + for (size_t dropped_frame_number = last_decoded_frame_num_[spatial_idx] + 1; + dropped_frame_number < last_inputed_frame_num_; + ++dropped_frame_number) { + FrameStatistics* frame_stat = + stats_->GetFrame(dropped_frame_number, spatial_idx); + + RTC_DCHECK(!frame_stat->decoding_successful); + + if (analyze_frame_quality_ && config_.analyze_quality_of_dropped_frames) { + CalcFrameQuality(*last_decoded_frame_buffer_[spatial_idx], frame_stat); + } + + if (decoded_frame_writers_ != nullptr) { + WriteDecodedFrame(*last_decoded_frame_buffer_[spatial_idx], + *decoded_frame_writers_->at(spatial_idx)); + } + } + } +} + } // namespace test } // namespace webrtc diff --git a/modules/video_coding/codecs/test/videoprocessor.h b/modules/video_coding/codecs/test/videoprocessor.h index bed65bdb2e..d9e10f13bf 100644 --- a/modules/video_coding/codecs/test/videoprocessor.h +++ b/modules/video_coding/codecs/test/videoprocessor.h @@ -20,10 +20,12 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/task_queue/queued_task.h" #include "api/task_queue/task_queue_base.h" #include "api/test/videocodec_test_fixture.h" #include "api/video/encoded_image.h" +#include "api/video/i420_buffer.h" #include "api/video/video_bitrate_allocation.h" #include "api/video/video_bitrate_allocator.h" #include "api/video/video_frame.h" @@ -36,9 +38,8 @@ #include "rtc_base/buffer.h" #include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "test/testsupport/frame_reader.h" #include "test/testsupport/frame_writer.h" @@ -58,6 +59,7 @@ class VideoProcessor { // TODO(brandtr): Consider changing FrameWriterList to be a FrameWriterMap, // to be able to save different TLs separately. using FrameWriterList = std::vector>; + using FrameStatistics = VideoCodecTestStats::FrameStatistics; VideoProcessor(webrtc::VideoEncoder* encoder, VideoDecoderList* decoders, @@ -77,6 +79,11 @@ class VideoProcessor { // Updates the encoder with target rates. Must be called at least once. void SetRates(size_t bitrate_kbps, double framerate_fps); + // Signals processor to finalize frame processing and handle possible tail + // drops. If not called expelicitly, this will be called in dtor. It is + // unexpected to get ProcessFrame() or SetRates() calls after Finalize(). + void Finalize(); + private: class VideoProcessorEncodeCompleteCallback : public webrtc::EncodedImageCallback { @@ -91,8 +98,7 @@ class VideoProcessor { Result OnEncodedImage( const webrtc::EncodedImage& encoded_image, - const webrtc::CodecSpecificInfo* codec_specific_info, - const webrtc::RTPFragmentationHeader* fragmentation) override { + const webrtc::CodecSpecificInfo* codec_specific_info) override { RTC_CHECK(codec_specific_info); // Post the callback to the right task queue, if needed. @@ -115,7 +121,6 @@ class VideoProcessor { : video_processor_(video_processor), encoded_image_(encoded_image), codec_specific_info_(*codec_specific_info) { - encoded_image_.Retain(); } bool Run() override { @@ -183,9 +188,20 @@ class VideoProcessor { size_t simulcast_svc_idx, bool inter_layer_predicted) RTC_RUN_ON(sequence_checker_); - // Test input/output. - VideoCodecTestFixture::Config config_ RTC_GUARDED_BY(sequence_checker_); + void CalcFrameQuality(const I420BufferInterface& decoded_frame, + FrameStatistics* frame_stat); + + void WriteDecodedFrame(const I420BufferInterface& decoded_frame, + FrameWriter& frame_writer); + + void HandleTailDrops(); + + // Test config. + const VideoCodecTestFixture::Config config_; const size_t num_simulcast_or_spatial_layers_; + const bool analyze_frame_quality_; + + // Frame statistics. VideoCodecTestStatsImpl* const stats_; // Codecs. @@ -241,7 +257,7 @@ class VideoProcessor { // simulcast_svc_idx -> frame_number. std::vector last_decoded_frame_num_ RTC_GUARDED_BY(sequence_checker_); // simulcast_svc_idx -> buffer. - std::vector decoded_frame_buffer_ + std::vector> last_decoded_frame_buffer_ RTC_GUARDED_BY(sequence_checker_); // Time spent in frame encode callback. It is accumulated for layers and @@ -249,8 +265,11 @@ class VideoProcessor { // is substracted from measured encode time. Thus we get pure encode time. int64_t post_encode_time_ns_ RTC_GUARDED_BY(sequence_checker_); + // Indicates whether Finalize() was called or not. + bool is_finalized_ RTC_GUARDED_BY(sequence_checker_); + // This class must be operated on a TaskQueue. - SequenceChecker sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; RTC_DISALLOW_COPY_AND_ASSIGN(VideoProcessor); }; diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/modules/video_coding/codecs/vp8/default_temporal_layers.cc index 83ea450d88..c84d9acb1c 100644 --- a/modules/video_coding/codecs/vp8/default_temporal_layers.cc +++ b/modules/video_coding/codecs/vp8/default_temporal_layers.cc @@ -27,10 +27,12 @@ namespace webrtc { DefaultTemporalLayers::PendingFrame::PendingFrame() = default; DefaultTemporalLayers::PendingFrame::PendingFrame( + uint32_t timestamp, bool expired, uint8_t updated_buffers_mask, const DependencyInfo& dependency_info) - : expired(expired), + : timestamp(timestamp), + expired(expired), updated_buffer_mask(updated_buffers_mask), dependency_info(dependency_info) {} @@ -96,8 +98,24 @@ uint8_t GetUpdatedBuffers(const Vp8FrameConfig& config) { } return flags; } + +size_t BufferToIndex(Vp8BufferReference buffer) { + switch (buffer) { + case Vp8FrameConfig::Vp8BufferReference::kLast: + return 0; + case Vp8FrameConfig::Vp8BufferReference::kGolden: + return 1; + case Vp8FrameConfig::Vp8BufferReference::kAltref: + return 2; + case Vp8FrameConfig::Vp8BufferReference::kNone: + RTC_CHECK_NOTREACHED(); + } +} + } // namespace +constexpr size_t DefaultTemporalLayers::kNumReferenceBuffers; + std::vector DefaultTemporalLayers::GetDependencyInfo(size_t num_layers) { // For indexing in the patterns described below (which temporal layers they @@ -225,11 +243,30 @@ DefaultTemporalLayers::GetDependencyInfo(size_t num_layers) { return {{"", {kNone, kNone, kNone}}}; } +std::bitset +DefaultTemporalLayers::DetermineStaticBuffers( + const std::vector& temporal_pattern) { + std::bitset buffers; + buffers.set(); + for (const DependencyInfo& info : temporal_pattern) { + uint8_t updated_buffers = GetUpdatedBuffers(info.frame_config); + + for (Vp8BufferReference buffer : kAllBuffers) { + if (static_cast(buffer) & updated_buffers) { + buffers.reset(BufferToIndex(buffer)); + } + } + } + return buffers; +} + DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers) : num_layers_(std::max(1, number_of_temporal_layers)), temporal_ids_(GetTemporalIds(num_layers_)), temporal_pattern_(GetDependencyInfo(num_layers_)), - pattern_idx_(kUninitializedPatternIndex) { + is_static_buffer_(DetermineStaticBuffers(temporal_pattern_)), + pattern_idx_(kUninitializedPatternIndex), + new_bitrates_bps_(std::vector(num_layers_, 0u)) { RTC_CHECK_GE(kMaxTemporalStreams, number_of_temporal_layers); RTC_CHECK_GE(number_of_temporal_layers, 0); RTC_CHECK_LE(number_of_temporal_layers, 4); @@ -238,25 +275,12 @@ DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers) // wrap at max(temporal_ids_.size(), temporal_pattern_.size()). RTC_DCHECK_LE(temporal_ids_.size(), temporal_pattern_.size()); -#if RTC_DCHECK_IS_ON - checker_ = TemporalLayersChecker::CreateTemporalLayersChecker( - Vp8TemporalLayersType::kFixedPattern, number_of_temporal_layers); -#endif + RTC_DCHECK( + checker_ = TemporalLayersChecker::CreateTemporalLayersChecker( + Vp8TemporalLayersType::kFixedPattern, number_of_temporal_layers)); // Always need to start with a keyframe, so pre-populate all frame counters. - for (Vp8BufferReference buffer : kAllBuffers) { - frames_since_buffer_refresh_[buffer] = 0; - } - - kf_buffers_ = {kAllBuffers.begin(), kAllBuffers.end()}; - for (const DependencyInfo& info : temporal_pattern_) { - uint8_t updated_buffers = GetUpdatedBuffers(info.frame_config); - - for (Vp8BufferReference buffer : kAllBuffers) { - if (static_cast(buffer) & updated_buffers) - kf_buffers_.erase(buffer); - } - } + frames_since_buffer_refresh_.fill(0); } DefaultTemporalLayers::~DefaultTemporalLayers() = default; @@ -340,12 +364,12 @@ bool DefaultTemporalLayers::IsSyncFrame(const Vp8FrameConfig& config) const { } if ((config.golden_buffer_flags & BufferFlags::kReference) && - kf_buffers_.find(Vp8BufferReference::kGolden) == kf_buffers_.end()) { + !is_static_buffer_[BufferToIndex(Vp8BufferReference::kGolden)]) { // Referencing a golden frame that contains a non-(base layer|key frame). return false; } if ((config.arf_buffer_flags & BufferFlags::kReference) && - kf_buffers_.find(Vp8BufferReference::kAltref) == kf_buffers_.end()) { + !is_static_buffer_[BufferToIndex(Vp8BufferReference::kAltref)]) { // Referencing an altref frame that contains a non-(base layer|key frame). return false; } @@ -372,8 +396,8 @@ Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index, // Start of new pattern iteration, set up clear state by invalidating any // pending frames, so that we don't make an invalid reference to a buffer // containing data from a previous iteration. - for (auto& it : pending_frames_) { - it.second.expired = true; + for (auto& frame : pending_frames_) { + frame.expired = true; } } @@ -401,21 +425,19 @@ Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index, // To prevent this data spill over into the next iteration, // the |pedning_frames_| map is reset in loops. If delay is constant, // the relative age should still be OK for the search order. - for (Vp8BufferReference buffer : kAllBuffers) { - ++frames_since_buffer_refresh_[buffer]; + for (size_t& n : frames_since_buffer_refresh_) { + ++n; } } // Add frame to set of pending frames, awaiting completion. - pending_frames_[timestamp] = - PendingFrame{false, GetUpdatedBuffers(tl_config), dependency_info}; + pending_frames_.emplace_back(timestamp, false, GetUpdatedBuffers(tl_config), + dependency_info); -#if RTC_DCHECK_IS_ON // Checker does not yet support encoder frame dropping, so validate flags // here before they can be dropped. // TODO(sprang): Update checker to support dropping. RTC_DCHECK(checker_->CheckTemporalConfig(first_frame, tl_config)); -#endif return tl_config; } @@ -426,10 +448,8 @@ void DefaultTemporalLayers::ValidateReferences(BufferFlags* flags, // if it also a dynamically updating one (buffers always just containing // keyframes are always safe to reference). if ((*flags & BufferFlags::kReference) && - kf_buffers_.find(ref) == kf_buffers_.end()) { - auto it = frames_since_buffer_refresh_.find(ref); - if (it == frames_since_buffer_refresh_.end() || - it->second >= pattern_idx_) { + !is_static_buffer_[BufferToIndex(ref)]) { + if (NumFramesSinceBufferRefresh(ref) >= pattern_idx_) { // No valid buffer state, or buffer contains frame that is older than the // current pattern. This reference is not valid, so remove it. *flags = static_cast(*flags & ~BufferFlags::kReference); @@ -446,17 +466,17 @@ void DefaultTemporalLayers::UpdateSearchOrder(Vp8FrameConfig* config) { if (config->last_buffer_flags & BufferFlags::kReference) { eligible_buffers.emplace_back( Vp8BufferReference::kLast, - frames_since_buffer_refresh_[Vp8BufferReference::kLast]); + NumFramesSinceBufferRefresh(Vp8BufferReference::kLast)); } if (config->golden_buffer_flags & BufferFlags::kReference) { eligible_buffers.emplace_back( Vp8BufferReference::kGolden, - frames_since_buffer_refresh_[Vp8BufferReference::kGolden]); + NumFramesSinceBufferRefresh(Vp8BufferReference::kGolden)); } if (config->arf_buffer_flags & BufferFlags::kReference) { eligible_buffers.emplace_back( Vp8BufferReference::kAltref, - frames_since_buffer_refresh_[Vp8BufferReference::kAltref]); + NumFramesSinceBufferRefresh(Vp8BufferReference::kAltref)); } std::sort(eligible_buffers.begin(), eligible_buffers.end(), @@ -476,6 +496,23 @@ void DefaultTemporalLayers::UpdateSearchOrder(Vp8FrameConfig* config) { } } +size_t DefaultTemporalLayers::NumFramesSinceBufferRefresh( + Vp8FrameConfig::Vp8BufferReference ref) const { + return frames_since_buffer_refresh_[BufferToIndex(ref)]; +} + +void DefaultTemporalLayers::ResetNumFramesSinceBufferRefresh( + Vp8FrameConfig::Vp8BufferReference ref) { + frames_since_buffer_refresh_[BufferToIndex(ref)] = 0; +} + +void DefaultTemporalLayers::CullPendingFramesBefore(uint32_t timestamp) { + while (!pending_frames_.empty() && + pending_frames_.front().timestamp != timestamp) { + pending_frames_.pop_front(); + } +} + void DefaultTemporalLayers::OnEncodeDone(size_t stream_index, uint32_t rtp_timestamp, size_t size_bytes, @@ -491,17 +528,15 @@ void DefaultTemporalLayers::OnEncodeDone(size_t stream_index, return; } - auto pending_frame = pending_frames_.find(rtp_timestamp); - RTC_DCHECK(pending_frame != pending_frames_.end()); - - PendingFrame& frame = pending_frame->second; + CullPendingFramesBefore(rtp_timestamp); + RTC_CHECK(!pending_frames_.empty()); + PendingFrame& frame = pending_frames_.front(); + RTC_DCHECK_EQ(frame.timestamp, rtp_timestamp); const Vp8FrameConfig& frame_config = frame.dependency_info.frame_config; -#if RTC_DCHECK_IS_ON if (is_keyframe) { // Signal key-frame so checker resets state. RTC_DCHECK(checker_->CheckTemporalConfig(true, frame_config)); } -#endif CodecSpecificInfoVP8& vp8_info = info->codecSpecific.VP8; if (num_layers_ == 1) { @@ -515,10 +550,10 @@ void DefaultTemporalLayers::OnEncodeDone(size_t stream_index, vp8_info.layerSync = true; // Keyframes are always sync frames. for (Vp8BufferReference buffer : kAllBuffers) { - if (kf_buffers_.find(buffer) != kf_buffers_.end()) { + if (is_static_buffer_[BufferToIndex(buffer)]) { // Update frame count of all kf-only buffers, regardless of state of // |pending_frames_|. - frames_since_buffer_refresh_[buffer] = 0; + ResetNumFramesSinceBufferRefresh(buffer); } else { // Key-frames update all buffers, this should be reflected when // updating state in FrameEncoded(). @@ -558,8 +593,9 @@ void DefaultTemporalLayers::OnEncodeDone(size_t stream_index, vp8_info.updatedBuffers[vp8_info.updatedBuffersCount++] = i; } - if (references || updates) + if (references || updates) { generic_frame_info.encoder_buffers.emplace_back(i, references, updates); + } } // The templates are always present on keyframes, and then refered to by @@ -578,19 +614,20 @@ void DefaultTemporalLayers::OnEncodeDone(size_t stream_index, if (!frame.expired) { for (Vp8BufferReference buffer : kAllBuffers) { if (frame.updated_buffer_mask & static_cast(buffer)) { - frames_since_buffer_refresh_[buffer] = 0; + ResetNumFramesSinceBufferRefresh(buffer); } } } - pending_frames_.erase(pending_frame); + pending_frames_.pop_front(); } void DefaultTemporalLayers::OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) { - auto pending_frame = pending_frames_.find(rtp_timestamp); - RTC_DCHECK(pending_frame != pending_frames_.end()); - pending_frames_.erase(pending_frame); + CullPendingFramesBefore(rtp_timestamp); + RTC_CHECK(!pending_frames_.empty()); + RTC_DCHECK_EQ(pending_frames_.front().timestamp, rtp_timestamp); + pending_frames_.pop_front(); } void DefaultTemporalLayers::OnPacketLossRateUpdate(float packet_loss_rate) {} @@ -608,58 +645,52 @@ FrameDependencyStructure DefaultTemporalLayers::GetTemplateStructure( FrameDependencyStructure template_structure; template_structure.num_decode_targets = num_layers; - using Builder = GenericFrameInfo::Builder; switch (num_layers) { case 1: { - template_structure.templates = { - Builder().T(0).Dtis("S").Build(), - Builder().T(0).Dtis("S").Fdiffs({1}).Build(), - }; + template_structure.templates.resize(2); + template_structure.templates[0].T(0).Dtis("S"); + template_structure.templates[1].T(0).Dtis("S").FrameDiffs({1}); return template_structure; } case 2: { - template_structure.templates = { - Builder().T(0).Dtis("SS").Build(), - Builder().T(0).Dtis("SS").Fdiffs({2}).Build(), - Builder().T(0).Dtis("SR").Fdiffs({2}).Build(), - Builder().T(1).Dtis("-S").Fdiffs({1}).Build(), - Builder().T(1).Dtis("-D").Fdiffs({1, 2}).Build(), - }; + template_structure.templates.resize(5); + template_structure.templates[0].T(0).Dtis("SS"); + template_structure.templates[1].T(0).Dtis("SS").FrameDiffs({2}); + template_structure.templates[2].T(0).Dtis("SR").FrameDiffs({2}); + template_structure.templates[3].T(1).Dtis("-S").FrameDiffs({1}); + template_structure.templates[4].T(1).Dtis("-D").FrameDiffs({2, 1}); return template_structure; } case 3: { if (field_trial::IsEnabled("WebRTC-UseShortVP8TL3Pattern")) { - template_structure.templates = { - Builder().T(0).Dtis("SSS").Build(), - Builder().T(0).Dtis("SSS").Fdiffs({4}).Build(), - Builder().T(1).Dtis("-DR").Fdiffs({2}).Build(), - Builder().T(2).Dtis("--S").Fdiffs({1}).Build(), - Builder().T(2).Dtis("--D").Fdiffs({1, 2}).Build(), - }; + template_structure.templates.resize(5); + template_structure.templates[0].T(0).Dtis("SSS"); + template_structure.templates[1].T(0).Dtis("SSS").FrameDiffs({4}); + template_structure.templates[2].T(1).Dtis("-DR").FrameDiffs({2}); + template_structure.templates[3].T(2).Dtis("--S").FrameDiffs({1}); + template_structure.templates[4].T(2).Dtis("--D").FrameDiffs({2, 1}); } else { - template_structure.templates = { - Builder().T(0).Dtis("SSS").Build(), - Builder().T(0).Dtis("SSS").Fdiffs({4}).Build(), - Builder().T(0).Dtis("SRR").Fdiffs({4}).Build(), - Builder().T(1).Dtis("-SS").Fdiffs({2}).Build(), - Builder().T(1).Dtis("-DS").Fdiffs({2, 4}).Build(), - Builder().T(2).Dtis("--D").Fdiffs({1}).Build(), - Builder().T(2).Dtis("--D").Fdiffs({1, 3}).Build(), - }; + template_structure.templates.resize(7); + template_structure.templates[0].T(0).Dtis("SSS"); + template_structure.templates[1].T(0).Dtis("SSS").FrameDiffs({4}); + template_structure.templates[2].T(0).Dtis("SRR").FrameDiffs({4}); + template_structure.templates[3].T(1).Dtis("-SS").FrameDiffs({2}); + template_structure.templates[4].T(1).Dtis("-DS").FrameDiffs({4, 2}); + template_structure.templates[5].T(2).Dtis("--D").FrameDiffs({1}); + template_structure.templates[6].T(2).Dtis("--D").FrameDiffs({3, 1}); } return template_structure; } case 4: { - template_structure.templates = { - Builder().T(0).Dtis("SSSS").Build(), - Builder().T(0).Dtis("SSSS").Fdiffs({8}).Build(), - Builder().T(1).Dtis("-SRR").Fdiffs({4}).Build(), - Builder().T(1).Dtis("-SRR").Fdiffs({4, 8}).Build(), - Builder().T(2).Dtis("--SR").Fdiffs({2}).Build(), - Builder().T(2).Dtis("--SR").Fdiffs({2, 4}).Build(), - Builder().T(3).Dtis("---D").Fdiffs({1}).Build(), - Builder().T(3).Dtis("---D").Fdiffs({1, 3}).Build(), - }; + template_structure.templates.resize(8); + template_structure.templates[0].T(0).Dtis("SSSS"); + template_structure.templates[1].T(0).Dtis("SSSS").FrameDiffs({8}); + template_structure.templates[2].T(1).Dtis("-SRR").FrameDiffs({4}); + template_structure.templates[3].T(1).Dtis("-SRR").FrameDiffs({4, 8}); + template_structure.templates[4].T(2).Dtis("--SR").FrameDiffs({2}); + template_structure.templates[5].T(2).Dtis("--SR").FrameDiffs({2, 4}); + template_structure.templates[6].T(3).Dtis("---D").FrameDiffs({1}); + template_structure.templates[7].T(3).Dtis("---D").FrameDiffs({1, 3}); return template_structure; } default: diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers.h b/modules/video_coding/codecs/vp8/default_temporal_layers.h index 29cfcf0489..bc6574c54c 100644 --- a/modules/video_coding/codecs/vp8/default_temporal_layers.h +++ b/modules/video_coding/codecs/vp8/default_temporal_layers.h @@ -15,8 +15,9 @@ #include #include +#include +#include #include -#include #include #include #include @@ -53,13 +54,15 @@ class DefaultTemporalLayers final : public Vp8FrameBufferController { Vp8EncoderConfig UpdateConfiguration(size_t stream_index) override; + // Callbacks methods on frame completion. OnEncodeDone() or OnFrameDropped() + // should be called once for each NextFrameConfig() call (using the RTP + // timestamp as ID), and the calls MUST be in the same order. void OnEncodeDone(size_t stream_index, uint32_t rtp_timestamp, size_t size_bytes, bool is_keyframe, int qp, CodecSpecificInfo* info) override; - void OnFrameDropped(size_t stream_index, uint32_t rtp_timestamp) override; void OnPacketLossRateUpdate(float packet_loss_rate) override; @@ -70,57 +73,66 @@ class DefaultTemporalLayers final : public Vp8FrameBufferController { const VideoEncoder::LossNotification& loss_notification) override; private: + static constexpr size_t kNumReferenceBuffers = 3; // Last, golden, altref. struct DependencyInfo { DependencyInfo() = default; DependencyInfo(absl::string_view indication_symbols, Vp8FrameConfig frame_config) : decode_target_indications( - GenericFrameInfo::DecodeTargetInfo(indication_symbols)), + webrtc_impl::StringToDecodeTargetIndications(indication_symbols)), frame_config(frame_config) {} absl::InlinedVector decode_target_indications; Vp8FrameConfig frame_config; }; + struct PendingFrame { + PendingFrame(); + PendingFrame(uint32_t timestamp, + bool expired, + uint8_t updated_buffers_mask, + const DependencyInfo& dependency_info); + uint32_t timestamp = 0; + // Flag indicating if this frame has expired, ie it belongs to a previous + // iteration of the temporal pattern. + bool expired = false; + // Bitmask of Vp8BufferReference flags, indicating which buffers this frame + // updates. + uint8_t updated_buffer_mask = 0; + // The frame config returned by NextFrameConfig() for this frame. + DependencyInfo dependency_info; + }; static std::vector GetDependencyInfo(size_t num_layers); + static std::bitset DetermineStaticBuffers( + const std::vector& temporal_pattern); bool IsSyncFrame(const Vp8FrameConfig& config) const; void ValidateReferences(Vp8FrameConfig::BufferFlags* flags, Vp8FrameConfig::Vp8BufferReference ref) const; void UpdateSearchOrder(Vp8FrameConfig* config); + size_t NumFramesSinceBufferRefresh( + Vp8FrameConfig::Vp8BufferReference ref) const; + void ResetNumFramesSinceBufferRefresh(Vp8FrameConfig::Vp8BufferReference ref); + void CullPendingFramesBefore(uint32_t timestamp); const size_t num_layers_; const std::vector temporal_ids_; const std::vector temporal_pattern_; - // Set of buffers that are never updated except by keyframes. - std::set kf_buffers_; + // Per reference buffer flag indicating if it is static, meaning it is only + // updated by key-frames. + const std::bitset is_static_buffer_; FrameDependencyStructure GetTemplateStructure(int num_layers) const; uint8_t pattern_idx_; // Updated cumulative bitrates, per temporal layer. absl::optional> new_bitrates_bps_; - struct PendingFrame { - PendingFrame(); - PendingFrame(bool expired, - uint8_t updated_buffers_mask, - const DependencyInfo& dependency_info); - // Flag indicating if this frame has expired, ie it belongs to a previous - // iteration of the temporal pattern. - bool expired = false; - // Bitmask of Vp8BufferReference flags, indicating which buffers this frame - // updates. - uint8_t updated_buffer_mask = 0; - // The frame config returned by NextFrameConfig() for this frame. - DependencyInfo dependency_info; - }; - // Map from rtp timestamp to pending frame status. Reset on pattern loop. - std::map pending_frames_; + // Status for each pending frame, in + std::deque pending_frames_; - // One counter per Vp8BufferReference, indicating number of frames since last + // One counter per reference buffer, indicating number of frames since last // refresh. For non-base-layer frames (ie golden, altref buffers), this is // reset when the pattern loops. - std::map - frames_since_buffer_refresh_; + std::array frames_since_buffer_refresh_; // Optional utility used to verify reference validity. std::unique_ptr checker_; diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc index 64ad40ab76..a18ac40e7d 100644 --- a/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc +++ b/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc @@ -687,6 +687,25 @@ TEST_F(TemporalLayersTest, KeyFrame) { } } +TEST_F(TemporalLayersTest, SetsTlCountOnFirstConfigUpdate) { + // Create an instance and fetch config update without setting any rate. + constexpr int kNumLayers = 2; + DefaultTemporalLayers tl(kNumLayers); + Vp8EncoderConfig config = tl.UpdateConfiguration(0); + + // Config should indicate correct number of temporal layers, but zero bitrate. + ASSERT_TRUE(config.temporal_layer_config.has_value()); + EXPECT_EQ(config.temporal_layer_config->ts_number_layers, + uint32_t{kNumLayers}); + std::array + kZeroRate = {}; + EXPECT_EQ(config.temporal_layer_config->ts_target_bitrate, kZeroRate); + + // On second call, no new update. + config = tl.UpdateConfiguration(0); + EXPECT_FALSE(config.temporal_layer_config.has_value()); +} + class TemporalLayersReferenceTest : public TemporalLayersTest, public ::testing::WithParamInterface { public: diff --git a/modules/video_coding/codecs/vp8/include/vp8.h b/modules/video_coding/codecs/vp8/include/vp8.h index 44efbeeb3b..d05c3a68d1 100644 --- a/modules/video_coding/codecs/vp8/include/vp8.h +++ b/modules/video_coding/codecs/vp8/include/vp8.h @@ -14,10 +14,10 @@ #include #include +#include "absl/base/attributes.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/vp8_frame_buffer_controller.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "rtc_base/deprecation.h" namespace webrtc { @@ -40,7 +40,8 @@ class VP8Encoder { static std::unique_ptr Create(); static std::unique_ptr Create(Settings settings); - RTC_DEPRECATED static std::unique_ptr Create( + ABSL_DEPRECATED("") + static std::unique_ptr Create( std::unique_ptr frame_buffer_controller_factory); }; diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc index a3ee2c0c41..9d6ffdba90 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc @@ -44,26 +44,44 @@ constexpr int kVp8ErrorPropagationTh = 30; constexpr long kDecodeDeadlineRealtime = 1; // NOLINT const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Config-Arm"; +const char kVp8PostProcFieldTrial[] = "WebRTC-VP8-Postproc-Config"; -void GetPostProcParamsFromFieldTrialGroup( - LibvpxVp8Decoder::DeblockParams* deblock_params) { - std::string group = - webrtc::field_trial::FindFullName(kVp8PostProcArmFieldTrial); - if (group.empty()) - return; +#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \ + defined(WEBRTC_ANDROID) +constexpr bool kIsArm = true; +#else +constexpr bool kIsArm = false; +#endif + +absl::optional DefaultDeblockParams() { + return LibvpxVp8Decoder::DeblockParams(/*max_level=*/8, + /*degrade_qp=*/60, + /*min_qp=*/30); +} + +absl::optional +GetPostProcParamsFromFieldTrialGroup() { + std::string group = webrtc::field_trial::FindFullName( + kIsArm ? kVp8PostProcArmFieldTrial : kVp8PostProcFieldTrial); + if (group.empty()) { + return DefaultDeblockParams(); + } LibvpxVp8Decoder::DeblockParams params; if (sscanf(group.c_str(), "Enabled-%d,%d,%d", ¶ms.max_level, - ¶ms.min_qp, ¶ms.degrade_qp) != 3) - return; + ¶ms.min_qp, ¶ms.degrade_qp) != 3) { + return DefaultDeblockParams(); + } - if (params.max_level < 0 || params.max_level > 16) - return; + if (params.max_level < 0 || params.max_level > 16) { + return DefaultDeblockParams(); + } - if (params.min_qp < 0 || params.degrade_qp <= params.min_qp) - return; + if (params.min_qp < 0 || params.degrade_qp <= params.min_qp) { + return DefaultDeblockParams(); + } - *deblock_params = params; + return params; } } // namespace @@ -97,8 +115,9 @@ class LibvpxVp8Decoder::QpSmoother { }; LibvpxVp8Decoder::LibvpxVp8Decoder() - : use_postproc_arm_( - webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial)), + : use_postproc_( + kIsArm ? webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial) + : true), buffer_pool_(false, 300 /* max_number_of_buffers*/), decode_complete_callback_(NULL), inited_(false), @@ -107,10 +126,12 @@ LibvpxVp8Decoder::LibvpxVp8Decoder() last_frame_width_(0), last_frame_height_(0), key_frame_required_(true), - qp_smoother_(use_postproc_arm_ ? new QpSmoother() : nullptr) { - if (use_postproc_arm_) - GetPostProcParamsFromFieldTrialGroup(&deblock_); -} + deblock_params_(use_postproc_ ? GetPostProcParamsFromFieldTrialGroup() + : absl::nullopt), + qp_smoother_(use_postproc_ ? new QpSmoother() : nullptr), + preferred_output_format_(field_trial::IsEnabled("WebRTC-NV12Decode") + ? VideoFrameBuffer::Type::kNV12 + : VideoFrameBuffer::Type::kI420) {} LibvpxVp8Decoder::~LibvpxVp8Decoder() { inited_ = true; // in order to do the actual release @@ -131,12 +152,7 @@ int LibvpxVp8Decoder::InitDecode(const VideoCodec* inst, int number_of_cores) { cfg.threads = 1; cfg.h = cfg.w = 0; // set after decode -#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \ - defined(WEBRTC_ANDROID) - vpx_codec_flags_t flags = use_postproc_arm_ ? VPX_CODEC_USE_POSTPROC : 0; -#else - vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC; -#endif + vpx_codec_flags_t flags = use_postproc_ ? VPX_CODEC_USE_POSTPROC : 0; if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) { delete decoder_; @@ -174,63 +190,60 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image, } // Post process configurations. -#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \ - defined(WEBRTC_ANDROID) - if (use_postproc_arm_) { + if (use_postproc_) { vp8_postproc_cfg_t ppcfg; + // MFQE enabled to reduce key frame popping. ppcfg.post_proc_flag = VP8_MFQE; - // For low resolutions, use stronger deblocking filter. - int last_width_x_height = last_frame_width_ * last_frame_height_; - if (last_width_x_height > 0 && last_width_x_height <= 320 * 240) { - // Enable the deblock and demacroblocker based on qp thresholds. - RTC_DCHECK(qp_smoother_); - int qp = qp_smoother_->GetAvg(); - if (qp > deblock_.min_qp) { - int level = deblock_.max_level; - if (qp < deblock_.degrade_qp) { - // Use lower level. - level = deblock_.max_level * (qp - deblock_.min_qp) / - (deblock_.degrade_qp - deblock_.min_qp); + + if (kIsArm) { + RTC_DCHECK(deblock_params_.has_value()); + } + if (deblock_params_.has_value()) { + // For low resolutions, use stronger deblocking filter. + int last_width_x_height = last_frame_width_ * last_frame_height_; + if (last_width_x_height > 0 && last_width_x_height <= 320 * 240) { + // Enable the deblock and demacroblocker based on qp thresholds. + RTC_DCHECK(qp_smoother_); + int qp = qp_smoother_->GetAvg(); + if (qp > deblock_params_->min_qp) { + int level = deblock_params_->max_level; + if (qp < deblock_params_->degrade_qp) { + // Use lower level. + level = deblock_params_->max_level * + (qp - deblock_params_->min_qp) / + (deblock_params_->degrade_qp - deblock_params_->min_qp); + } + // Deblocking level only affects VP8_DEMACROBLOCK. + ppcfg.deblocking_level = std::max(level, 1); + ppcfg.post_proc_flag |= VP8_DEBLOCK | VP8_DEMACROBLOCK; } - // Deblocking level only affects VP8_DEMACROBLOCK. - ppcfg.deblocking_level = std::max(level, 1); - ppcfg.post_proc_flag |= VP8_DEBLOCK | VP8_DEMACROBLOCK; } + } else { + // Non-arm with no explicit deblock params set. + ppcfg.post_proc_flag |= VP8_DEBLOCK; + // For VGA resolutions and lower, enable the demacroblocker postproc. + if (last_frame_width_ * last_frame_height_ <= 640 * 360) { + ppcfg.post_proc_flag |= VP8_DEMACROBLOCK; + } + // Strength of deblocking filter. Valid range:[0,16] + ppcfg.deblocking_level = 3; } + vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg); } -#else - vp8_postproc_cfg_t ppcfg; - // MFQE enabled to reduce key frame popping. - ppcfg.post_proc_flag = VP8_MFQE | VP8_DEBLOCK; - // For VGA resolutions and lower, enable the demacroblocker postproc. - if (last_frame_width_ * last_frame_height_ <= 640 * 360) { - ppcfg.post_proc_flag |= VP8_DEMACROBLOCK; - } - // Strength of deblocking filter. Valid range:[0,16] - ppcfg.deblocking_level = 3; - vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg); -#endif // Always start with a complete key frame. if (key_frame_required_) { if (input_image._frameType != VideoFrameType::kVideoFrameKey) return WEBRTC_VIDEO_CODEC_ERROR; - // We have a key frame - is it complete? - if (input_image._completeFrame) { - key_frame_required_ = false; - } else { - return WEBRTC_VIDEO_CODEC_ERROR; - } + key_frame_required_ = false; } // Restrict error propagation using key frame requests. // Reset on a key frame refresh. - if (input_image._frameType == VideoFrameType::kVideoFrameKey && - input_image._completeFrame) { + if (input_image._frameType == VideoFrameType::kVideoFrameKey) { propagation_cnt_ = -1; // Start count on first loss. - } else if ((!input_image._completeFrame || missing_frames) && - propagation_cnt_ == -1) { + } else if (missing_frames && propagation_cnt_ == -1) { propagation_cnt_ = 0; } if (propagation_cnt_ >= 0) { @@ -307,8 +320,39 @@ int LibvpxVp8Decoder::ReturnFrame( last_frame_width_ = img->d_w; last_frame_height_ = img->d_h; // Allocate memory for decoded image. - rtc::scoped_refptr buffer = - buffer_pool_.CreateBuffer(img->d_w, img->d_h); + rtc::scoped_refptr buffer; + + if (preferred_output_format_ == VideoFrameBuffer::Type::kNV12) { + // Convert instead of making a copy. + // Note: libvpx doesn't support creating NV12 image directly. + // Due to the bitstream structure such a change would just hide the + // conversion operation inside the decode call. + rtc::scoped_refptr nv12_buffer = + buffer_pool_.CreateNV12Buffer(img->d_w, img->d_h); + buffer = nv12_buffer; + if (nv12_buffer.get()) { + libyuv::I420ToNV12(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], + img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], + img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], + nv12_buffer->MutableDataY(), nv12_buffer->StrideY(), + nv12_buffer->MutableDataUV(), nv12_buffer->StrideUV(), + img->d_w, img->d_h); + } + } else { + rtc::scoped_refptr i420_buffer = + buffer_pool_.CreateI420Buffer(img->d_w, img->d_h); + buffer = i420_buffer; + if (i420_buffer.get()) { + libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], + img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], + img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], + i420_buffer->MutableDataY(), i420_buffer->StrideY(), + i420_buffer->MutableDataU(), i420_buffer->StrideU(), + i420_buffer->MutableDataV(), i420_buffer->StrideV(), + img->d_w, img->d_h); + } + } + if (!buffer.get()) { // Pool has too many pending frames. RTC_HISTOGRAM_BOOLEAN("WebRTC.Video.LibvpxVp8Decoder.TooManyPendingFrames", @@ -316,14 +360,6 @@ int LibvpxVp8Decoder::ReturnFrame( return WEBRTC_VIDEO_CODEC_NO_OUTPUT; } - libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], - img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], - img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], - buffer->MutableDataY(), buffer->StrideY(), - buffer->MutableDataU(), buffer->StrideU(), - buffer->MutableDataV(), buffer->StrideV(), img->d_w, - img->d_h); - VideoFrame decoded_image = VideoFrame::Builder() .set_video_frame_buffer(buffer) .set_timestamp_rtp(timestamp) @@ -357,6 +393,13 @@ int LibvpxVp8Decoder::Release() { return ret_val; } +VideoDecoder::DecoderInfo LibvpxVp8Decoder::GetDecoderInfo() const { + DecoderInfo info; + info.implementation_name = "libvpx"; + info.is_hardware_accelerated = false; + return info; +} + const char* LibvpxVp8Decoder::ImplementationName() const { return "libvpx"; } diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h index d9bfee81c1..60295e5d5d 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h @@ -13,9 +13,10 @@ #include +#include "absl/types/optional.h" #include "api/video/encoded_image.h" #include "api/video_codecs/video_decoder.h" -#include "common_video/include/i420_buffer_pool.h" +#include "common_video/include/video_frame_buffer_pool.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/include/video_codec_interface.h" #include "vpx/vp8dx.h" @@ -37,12 +38,16 @@ class LibvpxVp8Decoder : public VideoDecoder { int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override; int Release() override; + DecoderInfo GetDecoderInfo() const override; const char* ImplementationName() const override; struct DeblockParams { - int max_level = 6; // Deblocking strength: [0, 16]. - int degrade_qp = 1; // If QP value is below, start lowering |max_level|. - int min_qp = 0; // If QP value is below, turn off deblocking. + DeblockParams() : max_level(6), degrade_qp(1), min_qp(0) {} + DeblockParams(int max_level, int degrade_qp, int min_qp) + : max_level(max_level), degrade_qp(degrade_qp), min_qp(min_qp) {} + int max_level; // Deblocking strength: [0, 16]. + int degrade_qp; // If QP value is below, start lowering |max_level|. + int min_qp; // If QP value is below, turn off deblocking. }; private: @@ -51,9 +56,9 @@ class LibvpxVp8Decoder : public VideoDecoder { uint32_t timeStamp, int qp, const webrtc::ColorSpace* explicit_color_space); - const bool use_postproc_arm_; + const bool use_postproc_; - I420BufferPool buffer_pool_; + VideoFrameBufferPool buffer_pool_; DecodedImageCallback* decode_complete_callback_; bool inited_; vpx_codec_ctx_t* decoder_; @@ -61,8 +66,11 @@ class LibvpxVp8Decoder : public VideoDecoder { int last_frame_width_; int last_frame_height_; bool key_frame_required_; - DeblockParams deblock_; + const absl::optional deblock_params_; const std::unique_ptr qp_smoother_; + + // Decoder should produce this format if possible. + const VideoFrameBuffer::Type preferred_output_format_; }; } // namespace webrtc diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc index ac04bc3e50..a994193031 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc @@ -21,6 +21,7 @@ #include #include +#include "absl/algorithm/container.h" #include "api/scoped_refptr.h" #include "api/video/video_content_type.h" #include "api/video/video_frame_buffer.h" @@ -44,12 +45,12 @@ namespace webrtc { namespace { #if defined(WEBRTC_IOS) -const char kVP8IosMaxNumberOfThreadFieldTrial[] = +constexpr char kVP8IosMaxNumberOfThreadFieldTrial[] = "WebRTC-VP8IosMaxNumberOfThread"; -const char kVP8IosMaxNumberOfThreadFieldTrialParameter[] = "max_thread"; +constexpr char kVP8IosMaxNumberOfThreadFieldTrialParameter[] = "max_thread"; #endif -const char kVp8ForcePartitionResilience[] = +constexpr char kVp8ForcePartitionResilience[] = "WebRTC-VP8-ForcePartitionResilience"; // QP is obtained from VP8-bitstream for HW, so the QP corresponds to the @@ -63,9 +64,6 @@ constexpr uint32_t kVp832ByteAlign = 32u; constexpr int kRtpTicksPerSecond = 90000; constexpr int kRtpTicksPerMs = kRtpTicksPerSecond / 1000; -constexpr double kLowRateFactor = 1.0; -constexpr double kHighRateFactor = 2.0; - // VP8 denoiser states. enum denoiserState : uint32_t { kDenoiserOff, @@ -77,15 +75,6 @@ enum denoiserState : uint32_t { kDenoiserOnAdaptive }; -// These settings correspond to the settings in vpx_codec_enc_cfg. -struct Vp8RateSettings { - uint32_t rc_undershoot_pct; - uint32_t rc_overshoot_pct; - uint32_t rc_buf_sz; - uint32_t rc_buf_optimal_sz; - uint32_t rc_dropframe_thresh; -}; - // Greatest common divisior int GCD(int a, int b) { int c = a % b; @@ -97,56 +86,6 @@ int GCD(int a, int b) { return b; } -uint32_t Interpolate(uint32_t low, - uint32_t high, - double bandwidth_headroom_factor) { - RTC_DCHECK_GE(bandwidth_headroom_factor, kLowRateFactor); - RTC_DCHECK_LE(bandwidth_headroom_factor, kHighRateFactor); - - // |factor| is between 0.0 and 1.0. - const double factor = bandwidth_headroom_factor - kLowRateFactor; - - return static_cast(((1.0 - factor) * low) + (factor * high) + 0.5); -} - -Vp8RateSettings GetRateSettings(double bandwidth_headroom_factor) { - static const Vp8RateSettings low_settings{1000u, 0u, 100u, 30u, 40u}; - static const Vp8RateSettings high_settings{100u, 15u, 1000u, 600u, 5u}; - - if (bandwidth_headroom_factor <= kLowRateFactor) { - return low_settings; - } else if (bandwidth_headroom_factor >= kHighRateFactor) { - return high_settings; - } - - Vp8RateSettings settings; - settings.rc_undershoot_pct = - Interpolate(low_settings.rc_undershoot_pct, - high_settings.rc_undershoot_pct, bandwidth_headroom_factor); - settings.rc_overshoot_pct = - Interpolate(low_settings.rc_overshoot_pct, high_settings.rc_overshoot_pct, - bandwidth_headroom_factor); - settings.rc_buf_sz = - Interpolate(low_settings.rc_buf_sz, high_settings.rc_buf_sz, - bandwidth_headroom_factor); - settings.rc_buf_optimal_sz = - Interpolate(low_settings.rc_buf_optimal_sz, - high_settings.rc_buf_optimal_sz, bandwidth_headroom_factor); - settings.rc_dropframe_thresh = - Interpolate(low_settings.rc_dropframe_thresh, - high_settings.rc_dropframe_thresh, bandwidth_headroom_factor); - return settings; -} - -void UpdateRateSettings(vpx_codec_enc_cfg_t* config, - const Vp8RateSettings& new_settings) { - config->rc_undershoot_pct = new_settings.rc_undershoot_pct; - config->rc_overshoot_pct = new_settings.rc_overshoot_pct; - config->rc_buf_sz = new_settings.rc_buf_sz; - config->rc_buf_optimal_sz = new_settings.rc_buf_optimal_sz; - config->rc_dropframe_thresh = new_settings.rc_dropframe_thresh; -} - static_assert(Vp8EncoderConfig::TemporalLayerConfig::kMaxPeriodicity == VPX_TS_MAX_PERIODICITY, "Vp8EncoderConfig::kMaxPeriodicity must be kept in sync with the " @@ -222,16 +161,63 @@ void ApplyVp8EncoderConfigToVpxConfig(const Vp8EncoderConfig& encoder_config, } } +bool IsCompatibleVideoFrameBufferType(VideoFrameBuffer::Type left, + VideoFrameBuffer::Type right) { + if (left == VideoFrameBuffer::Type::kI420 || + left == VideoFrameBuffer::Type::kI420A) { + // LibvpxVp8Encoder does not care about the alpha channel, I420A and I420 + // are considered compatible. + return right == VideoFrameBuffer::Type::kI420 || + right == VideoFrameBuffer::Type::kI420A; + } + return left == right; +} + +void SetRawImagePlanes(vpx_image_t* raw_image, VideoFrameBuffer* buffer) { + switch (buffer->type()) { + case VideoFrameBuffer::Type::kI420: + case VideoFrameBuffer::Type::kI420A: { + const I420BufferInterface* i420_buffer = buffer->GetI420(); + RTC_DCHECK(i420_buffer); + raw_image->planes[VPX_PLANE_Y] = + const_cast(i420_buffer->DataY()); + raw_image->planes[VPX_PLANE_U] = + const_cast(i420_buffer->DataU()); + raw_image->planes[VPX_PLANE_V] = + const_cast(i420_buffer->DataV()); + raw_image->stride[VPX_PLANE_Y] = i420_buffer->StrideY(); + raw_image->stride[VPX_PLANE_U] = i420_buffer->StrideU(); + raw_image->stride[VPX_PLANE_V] = i420_buffer->StrideV(); + break; + } + case VideoFrameBuffer::Type::kNV12: { + const NV12BufferInterface* nv12_buffer = buffer->GetNV12(); + RTC_DCHECK(nv12_buffer); + raw_image->planes[VPX_PLANE_Y] = + const_cast(nv12_buffer->DataY()); + raw_image->planes[VPX_PLANE_U] = + const_cast(nv12_buffer->DataUV()); + raw_image->planes[VPX_PLANE_V] = raw_image->planes[VPX_PLANE_U] + 1; + raw_image->stride[VPX_PLANE_Y] = nv12_buffer->StrideY(); + raw_image->stride[VPX_PLANE_U] = nv12_buffer->StrideUV(); + raw_image->stride[VPX_PLANE_V] = nv12_buffer->StrideUV(); + break; + } + default: + RTC_NOTREACHED(); + } +} + } // namespace std::unique_ptr VP8Encoder::Create() { - return std::make_unique(LibvpxInterface::CreateEncoder(), + return std::make_unique(LibvpxInterface::Create(), VP8Encoder::Settings()); } std::unique_ptr VP8Encoder::Create( VP8Encoder::Settings settings) { - return std::make_unique(LibvpxInterface::CreateEncoder(), + return std::make_unique(LibvpxInterface::Create(), std::move(settings)); } @@ -241,7 +227,7 @@ std::unique_ptr VP8Encoder::Create( VP8Encoder::Settings settings; settings.frame_buffer_controller_factory = std::move(frame_buffer_controller_factory); - return std::make_unique(LibvpxInterface::CreateEncoder(), + return std::make_unique(LibvpxInterface::Create(), std::move(settings)); } @@ -277,7 +263,6 @@ vpx_enc_frame_flags_t LibvpxVp8Encoder::EncodeFlags( LibvpxVp8Encoder::LibvpxVp8Encoder(std::unique_ptr interface, VP8Encoder::Settings settings) : libvpx_(std::move(interface)), - experimental_cpu_speed_config_arm_(CpuSpeedExperiment::GetConfigs()), rate_control_settings_(RateControlSettings::ParseFromFieldTrials()), frame_buffer_controller_factory_( std::move(settings.frame_buffer_controller_factory)), @@ -392,18 +377,12 @@ void LibvpxVp8Encoder::SetRates(const RateControlParameters& parameters) { UpdateVpxConfiguration(stream_idx); - if (rate_control_settings_.Vp8DynamicRateSettings()) { - // Tweak rate control settings based on available network headroom. - UpdateRateSettings( - &vpx_configs_[i], - GetRateSettings(parameters.bandwidth_allocation.bps() / - parameters.bitrate.get_sum_bps())); - } - vpx_codec_err_t err = libvpx_->codec_enc_config_set(&encoders_[i], &vpx_configs_[i]); if (err != VPX_CODEC_OK) { - RTC_LOG(LS_WARNING) << "Error configuring codec, error code: " << err; + RTC_LOG(LS_WARNING) << "Error configuring codec, error code: " << err + << ", details: " + << libvpx_->codec_error_detail(&encoders_[i]); } } } @@ -480,6 +459,10 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst, return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } + // Use the previous pixel format to avoid extra image allocations. + vpx_img_fmt_t pixel_format = + raw_images_.empty() ? VPX_IMG_FMT_I420 : raw_images_[0].fmt; + int retVal = Release(); if (retVal < 0) { return retVal; @@ -537,9 +520,7 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst, downsampling_factors_[number_of_streams - 1].num = 1; downsampling_factors_[number_of_streams - 1].den = 1; } - for (int i = 0; i < number_of_streams; ++i) { - encoded_images_[i]._completeFrame = true; - } + // populate encoder configuration with default values if (libvpx_->codec_enc_config_default(vpx_codec_vp8_cx(), &vpx_configs_[0], 0)) { @@ -633,8 +614,8 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst, // Creating a wrapper to the image - setting image data to NULL. // Actual pointer will be set in encode. Setting align to 1, as it // is meaningless (no memory allocation is done here). - libvpx_->img_wrap(&raw_images_[0], VPX_IMG_FMT_I420, inst->width, - inst->height, 1, NULL); + libvpx_->img_wrap(&raw_images_[0], pixel_format, inst->width, inst->height, 1, + NULL); // Note the order we use is different from webm, we have lowest resolution // at position 0 and they have highest resolution at position 0. @@ -682,10 +663,9 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst, // Setting alignment to 32 - as that ensures at least 16 for all // planes (32 for Y, 16 for U,V). Libvpx sets the requested stride for // the y plane, but only half of it to the u and v planes. - libvpx_->img_alloc(&raw_images_[i], VPX_IMG_FMT_I420, - inst->simulcastStream[stream_idx].width, - inst->simulcastStream[stream_idx].height, - kVp832ByteAlign); + libvpx_->img_alloc( + &raw_images_[i], pixel_format, inst->simulcastStream[stream_idx].width, + inst->simulcastStream[stream_idx].height, kVp832ByteAlign); SetStreamState(stream_bitrates[stream_idx] > 0, stream_idx); vpx_configs_[i].rc_target_bitrate = stream_bitrates[stream_idx]; if (stream_bitrates[stream_idx] > 0) { @@ -712,14 +692,17 @@ int LibvpxVp8Encoder::GetCpuSpeed(int width, int height) { // On mobile platform, use a lower speed setting for lower resolutions for // CPUs with 4 or more cores. RTC_DCHECK_GT(number_of_cores_, 0); + if (experimental_cpu_speed_config_arm_ + .GetValue(width * height, number_of_cores_) + .has_value()) { + return experimental_cpu_speed_config_arm_ + .GetValue(width * height, number_of_cores_) + .value(); + } + if (number_of_cores_ <= 3) return -12; - if (experimental_cpu_speed_config_arm_) { - return CpuSpeedExperiment::GetValue(width * height, - *experimental_cpu_speed_config_arm_); - } - if (width * height <= 352 * 288) return -8; else if (width * height <= 640 * 480) @@ -879,7 +862,7 @@ size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) { const int encoder_id = encoders_.size() - 1 - sid; size_t bitrate_bps; float fps; - if (SimulcastUtility::IsConferenceModeScreenshare(codec_) || + if ((SimulcastUtility::IsConferenceModeScreenshare(codec_) && sid == 0) || vpx_configs_[encoder_id].ts_number_layers <= 1) { // In conference screenshare there's no defined per temporal layer bitrate // and framerate. @@ -994,51 +977,29 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame, flags[i] = send_key_frame ? VPX_EFLAG_FORCE_KF : EncodeFlags(tl_configs[i]); } - rtc::scoped_refptr input_image = - frame.video_frame_buffer()->ToI420(); - // Since we are extracting raw pointers from |input_image| to - // |raw_images_[0]|, the resolution of these frames must match. - RTC_DCHECK_EQ(input_image->width(), raw_images_[0].d_w); - RTC_DCHECK_EQ(input_image->height(), raw_images_[0].d_h); - - // Image in vpx_image_t format. - // Input image is const. VP8's raw image is not defined as const. - raw_images_[0].planes[VPX_PLANE_Y] = - const_cast(input_image->DataY()); - raw_images_[0].planes[VPX_PLANE_U] = - const_cast(input_image->DataU()); - raw_images_[0].planes[VPX_PLANE_V] = - const_cast(input_image->DataV()); - - raw_images_[0].stride[VPX_PLANE_Y] = input_image->StrideY(); - raw_images_[0].stride[VPX_PLANE_U] = input_image->StrideU(); - raw_images_[0].stride[VPX_PLANE_V] = input_image->StrideV(); - + // Scale and map buffers and set |raw_images_| to hold pointers to the result. + // Because |raw_images_| are set to hold pointers to the prepared buffers, we + // need to keep these buffers alive through reference counting until after + // encoding is complete. + std::vector> prepared_buffers = + PrepareBuffers(frame.video_frame_buffer()); + if (prepared_buffers.empty()) { + return WEBRTC_VIDEO_CODEC_ERROR; + } struct CleanUpOnExit { - explicit CleanUpOnExit(vpx_image_t& raw_image) : raw_image_(raw_image) {} + explicit CleanUpOnExit( + vpx_image_t* raw_image, + std::vector> prepared_buffers) + : raw_image_(raw_image), + prepared_buffers_(std::move(prepared_buffers)) {} ~CleanUpOnExit() { - raw_image_.planes[VPX_PLANE_Y] = nullptr; - raw_image_.planes[VPX_PLANE_U] = nullptr; - raw_image_.planes[VPX_PLANE_V] = nullptr; + raw_image_->planes[VPX_PLANE_Y] = nullptr; + raw_image_->planes[VPX_PLANE_U] = nullptr; + raw_image_->planes[VPX_PLANE_V] = nullptr; } - vpx_image_t& raw_image_; - } clean_up_on_exit(raw_images_[0]); - - for (size_t i = 1; i < encoders_.size(); ++i) { - // Scale the image down a number of times by downsampling factor - libyuv::I420Scale( - raw_images_[i - 1].planes[VPX_PLANE_Y], - raw_images_[i - 1].stride[VPX_PLANE_Y], - raw_images_[i - 1].planes[VPX_PLANE_U], - raw_images_[i - 1].stride[VPX_PLANE_U], - raw_images_[i - 1].planes[VPX_PLANE_V], - raw_images_[i - 1].stride[VPX_PLANE_V], raw_images_[i - 1].d_w, - raw_images_[i - 1].d_h, raw_images_[i].planes[VPX_PLANE_Y], - raw_images_[i].stride[VPX_PLANE_Y], raw_images_[i].planes[VPX_PLANE_U], - raw_images_[i].stride[VPX_PLANE_U], raw_images_[i].planes[VPX_PLANE_V], - raw_images_[i].stride[VPX_PLANE_V], raw_images_[i].d_w, - raw_images_[i].d_h, libyuv::kFilterBilinear); - } + vpx_image_t* raw_image_; + std::vector> prepared_buffers_; + } clean_up_on_exit(&raw_images_[0], std::move(prepared_buffers)); if (send_key_frame) { // Adapt the size of the key frame when in screenshare with 1 temporal @@ -1076,7 +1037,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame, // would like to use the duration of the previous frame. Unfortunately the // rate control seems to be off with that setup. Using the average input // frame rate to calculate an average duration for now. - assert(codec_.maxFramerate > 0); + RTC_DCHECK_GT(codec_.maxFramerate, 0); uint32_t duration = kRtpTicksPerSecond / codec_.maxFramerate; int error = WEBRTC_VIDEO_CODEC_OK; @@ -1113,7 +1074,7 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, int stream_idx, int encoder_idx, uint32_t timestamp) { - assert(codec_specific != NULL); + RTC_DCHECK(codec_specific); codec_specific->codecType = kVideoCodecVP8; codec_specific->codecSpecific.VP8.keyIdx = kNoKeyIdx; // TODO(hlundin) populate this @@ -1122,9 +1083,25 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, int qp = 0; vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp); - frame_buffer_controller_->OnEncodeDone( - stream_idx, timestamp, encoded_images_[encoder_idx].size(), - (pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0, qp, codec_specific); + bool is_keyframe = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0; + frame_buffer_controller_->OnEncodeDone(stream_idx, timestamp, + encoded_images_[encoder_idx].size(), + is_keyframe, qp, codec_specific); + if (is_keyframe && codec_specific->template_structure != absl::nullopt) { + // Number of resolutions must match number of spatial layers, VP8 structures + // expected to use single spatial layer. Templates must be ordered by + // spatial_id, so assumption there is exactly one spatial layer is same as + // assumption last template uses spatial_id = 0. + // This check catches potential scenario where template_structure is shared + // across multiple vp8 streams and they are distinguished using spatial_id. + // Assigning single resolution doesn't support such scenario, i.e. assumes + // vp8 simulcast is sent using multiple ssrcs. + RTC_DCHECK(!codec_specific->template_structure->templates.empty()); + RTC_DCHECK_EQ( + codec_specific->template_structure->templates.back().spatial_id, 0); + codec_specific->template_structure->resolutions = { + RenderResolution(pkt.data.frame.width[0], pkt.data.frame.height[0])}; + } } int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image, @@ -1198,7 +1175,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image, &qp_128); encoded_images_[encoder_idx].qp_ = qp_128; encoded_complete_callback_->OnEncodedImage(encoded_images_[encoder_idx], - &codec_specific, nullptr); + &codec_specific); const size_t steady_state_size = SteadyStateSize( stream_idx, codec_specific.codecSpecific.VP8.temporalIdx); if (qp_128 > variable_framerate_experiment_.steady_state_qp || @@ -1233,6 +1210,16 @@ VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const { if (!resolution_bitrate_limits_.empty()) { info.resolution_bitrate_limits = resolution_bitrate_limits_; } + if (encoder_info_override_.requested_resolution_alignment()) { + info.requested_resolution_alignment = + *encoder_info_override_.requested_resolution_alignment(); + info.apply_alignment_to_all_simulcast_layers = + encoder_info_override_.apply_alignment_to_all_simulcast_layers(); + } + if (!encoder_info_override_.resolution_bitrate_limits().empty()) { + info.resolution_bitrate_limits = + encoder_info_override_.resolution_bitrate_limits(); + } const bool enable_scaling = num_active_streams_ == 1 && @@ -1247,6 +1234,8 @@ VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const { info.scaling_settings.min_pixels_per_frame = rate_control_settings_.LibvpxVp8MinPixels().value(); } + info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420, + VideoFrameBuffer::Type::kNV12}; if (inited_) { // |encoder_idx| is libvpx index where 0 is highest resolution. @@ -1285,17 +1274,159 @@ int LibvpxVp8Encoder::RegisterEncodeCompleteCallback( return WEBRTC_VIDEO_CODEC_OK; } +void LibvpxVp8Encoder::MaybeUpdatePixelFormat(vpx_img_fmt fmt) { + RTC_DCHECK(!raw_images_.empty()); + if (raw_images_[0].fmt == fmt) { + RTC_DCHECK(std::all_of( + std::next(raw_images_.begin()), raw_images_.end(), + [fmt](const vpx_image_t& raw_img) { return raw_img.fmt == fmt; })) + << "Not all raw images had the right format!"; + return; + } + RTC_LOG(INFO) << "Updating vp8 encoder pixel format to " + << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420"); + for (size_t i = 0; i < raw_images_.size(); ++i) { + vpx_image_t& img = raw_images_[i]; + auto d_w = img.d_w; + auto d_h = img.d_h; + libvpx_->img_free(&img); + // First image is wrapping the input frame, the rest are allocated. + if (i == 0) { + libvpx_->img_wrap(&img, fmt, d_w, d_h, 1, NULL); + } else { + libvpx_->img_alloc(&img, fmt, d_w, d_h, kVp832ByteAlign); + } + } +} + +std::vector> +LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr buffer) { + RTC_DCHECK_EQ(buffer->width(), raw_images_[0].d_w); + RTC_DCHECK_EQ(buffer->height(), raw_images_[0].d_h); + absl::InlinedVector + supported_formats = {VideoFrameBuffer::Type::kI420, + VideoFrameBuffer::Type::kNV12}; + + rtc::scoped_refptr mapped_buffer; + if (buffer->type() != VideoFrameBuffer::Type::kNative) { + // |buffer| is already mapped. + mapped_buffer = buffer; + } else { + // Attempt to map to one of the supported formats. + mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats); + } + if (!mapped_buffer || + (absl::c_find(supported_formats, mapped_buffer->type()) == + supported_formats.end() && + mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) { + // Unknown pixel format or unable to map, convert to I420 and prepare that + // buffer instead to ensure Scale() is safe to use. + auto converted_buffer = buffer->ToI420(); + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString(buffer->type()) + << " image to I420. Can't encode frame."; + return {}; + } + // The buffer should now be a mapped I420 or I420A format, but some buffer + // implementations incorrectly return the wrong buffer format, such as + // kNative. As a workaround to this, we perform ToI420() a second time. + // TODO(https://crbug.com/webrtc/12602): When Android buffers have a correct + // ToI420() implementaion, remove his workaround. + if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 && + converted_buffer->type() != VideoFrameBuffer::Type::kI420A) { + converted_buffer = converted_buffer->ToI420(); + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + converted_buffer->type()) + << " image to I420. Can't encode frame."; + return {}; + } + RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 || + converted_buffer->type() == VideoFrameBuffer::Type::kI420A); + } + // Because |buffer| had to be converted, use |converted_buffer| instead... + buffer = mapped_buffer = converted_buffer; + } + + // Maybe update pixel format. + absl::InlinedVector + mapped_type = {mapped_buffer->type()}; + switch (mapped_buffer->type()) { + case VideoFrameBuffer::Type::kI420: + case VideoFrameBuffer::Type::kI420A: + MaybeUpdatePixelFormat(VPX_IMG_FMT_I420); + break; + case VideoFrameBuffer::Type::kNV12: + MaybeUpdatePixelFormat(VPX_IMG_FMT_NV12); + break; + default: + RTC_NOTREACHED(); + } + + // Prepare |raw_images_| from |mapped_buffer| and, if simulcast, scaled + // versions of |buffer|. + std::vector> prepared_buffers; + SetRawImagePlanes(&raw_images_[0], mapped_buffer); + prepared_buffers.push_back(mapped_buffer); + for (size_t i = 1; i < encoders_.size(); ++i) { + // Native buffers should implement optimized scaling and is the preferred + // buffer to scale. But if the buffer isn't native, it should be cheaper to + // scale from the previously prepared buffer which is smaller than |buffer|. + VideoFrameBuffer* buffer_to_scale = + buffer->type() == VideoFrameBuffer::Type::kNative + ? buffer.get() + : prepared_buffers.back().get(); + + auto scaled_buffer = + buffer_to_scale->Scale(raw_images_[i].d_w, raw_images_[i].d_h); + if (scaled_buffer->type() == VideoFrameBuffer::Type::kNative) { + auto mapped_scaled_buffer = + scaled_buffer->GetMappedFrameBuffer(mapped_type); + RTC_DCHECK(mapped_scaled_buffer) << "Unable to map the scaled buffer."; + if (!mapped_scaled_buffer) { + RTC_LOG(LS_ERROR) << "Failed to map scaled " + << VideoFrameBufferTypeToString(scaled_buffer->type()) + << " image to " + << VideoFrameBufferTypeToString(mapped_buffer->type()) + << ". Can't encode frame."; + return {}; + } + scaled_buffer = mapped_scaled_buffer; + } + if (!IsCompatibleVideoFrameBufferType(scaled_buffer->type(), + mapped_buffer->type())) { + RTC_LOG(LS_ERROR) << "When scaling " + << VideoFrameBufferTypeToString(buffer_to_scale->type()) + << ", the image was unexpectedly converted to " + << VideoFrameBufferTypeToString(scaled_buffer->type()) + << " instead of " + << VideoFrameBufferTypeToString(mapped_buffer->type()) + << ". Can't encode frame."; + RTC_NOTREACHED() << "Scaled buffer type " + << VideoFrameBufferTypeToString(scaled_buffer->type()) + << " is not compatible with mapped buffer type " + << VideoFrameBufferTypeToString(mapped_buffer->type()); + return {}; + } + SetRawImagePlanes(&raw_images_[i], scaled_buffer); + prepared_buffers.push_back(scaled_buffer); + } + return prepared_buffers; +} + // static LibvpxVp8Encoder::VariableFramerateExperiment LibvpxVp8Encoder::ParseVariableFramerateConfig(std::string group_name) { - FieldTrialFlag enabled = FieldTrialFlag("Enabled"); + FieldTrialFlag disabled = FieldTrialFlag("Disabled"); FieldTrialParameter framerate_limit("min_fps", 5.0); FieldTrialParameter qp("min_qp", 15); FieldTrialParameter undershoot_percentage("undershoot", 30); - ParseFieldTrial({&enabled, &framerate_limit, &qp, &undershoot_percentage}, + ParseFieldTrial({&disabled, &framerate_limit, &qp, &undershoot_percentage}, field_trial::FindFullName(group_name)); VariableFramerateExperiment config; - config.enabled = enabled.Get(); + config.enabled = !disabled.Get(); config.framerate_limit = framerate_limit.Get(); config.steady_state_qp = qp.Get(); config.steady_state_undershoot_percentage = undershoot_percentage.Get(); diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h index 8afcaae58f..ed80eacab2 100644 --- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h +++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h @@ -21,11 +21,12 @@ #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/vp8_frame_buffer_controller.h" #include "api/video_codecs/vp8_frame_config.h" +#include "modules/video_coding/codecs/interface/libvpx_interface.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" -#include "modules/video_coding/codecs/vp8/libvpx_interface.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/utility/framerate_controller.h" #include "rtc_base/experiments/cpu_speed_experiment.h" +#include "rtc_base/experiments/encoder_info_settings.h" #include "rtc_base/experiments/rate_control_settings.h" #include "vpx/vp8cx.h" #include "vpx/vpx_encoder.h" @@ -93,10 +94,18 @@ class LibvpxVp8Encoder : public VideoEncoder { bool UpdateVpxConfiguration(size_t stream_index); + void MaybeUpdatePixelFormat(vpx_img_fmt fmt); + // Prepares |raw_image_| to reference image data of |buffer|, or of mapped or + // scaled versions of |buffer|. Returns a list of buffers that got referenced + // as a result, allowing the caller to keep references to them until after + // encoding has finished. On failure to convert the buffer, an empty list is + // returned. + std::vector> PrepareBuffers( + rtc::scoped_refptr buffer); + const std::unique_ptr libvpx_; - const absl::optional> - experimental_cpu_speed_config_arm_; + const CpuSpeedExperiment experimental_cpu_speed_config_arm_; const RateControlSettings rate_control_settings_; EncodedImageCallback* encoded_complete_callback_ = nullptr; @@ -140,6 +149,8 @@ class LibvpxVp8Encoder : public VideoEncoder { int num_steady_state_frames_ = 0; FecControllerOverride* fec_controller_override_ = nullptr; + + const LibvpxVp8EncoderInfoSettings encoder_info_override_; }; } // namespace webrtc diff --git a/modules/video_coding/codecs/vp8/screenshare_layers.cc b/modules/video_coding/codecs/vp8/screenshare_layers.cc index 01858c6ee9..caccb4246c 100644 --- a/modules/video_coding/codecs/vp8/screenshare_layers.cc +++ b/modules/video_coding/codecs/vp8/screenshare_layers.cc @@ -36,6 +36,7 @@ constexpr int kMinTimeBetweenSyncs = kOneSecond90Khz * 2; constexpr int kMaxTimeBetweenSyncs = kOneSecond90Khz * 4; constexpr int kQpDeltaThresholdForSync = 8; constexpr int kMinBitrateKbpsForQpBoost = 500; +constexpr auto kSwitch = DecodeTargetIndication::kSwitch; } // namespace const double ScreenshareLayers::kMaxTL0FpsReduction = 2.5; @@ -319,8 +320,7 @@ void ScreenshareLayers::OnEncodeDone(size_t stream_index, if (number_of_temporal_layers_ == 1) { vp8_info.temporalIdx = kNoTemporalIdx; vp8_info.layerSync = false; - generic_frame_info.decode_target_indications = - GenericFrameInfo::DecodeTargetInfo("S"); + generic_frame_info.decode_target_indications = {kSwitch}; generic_frame_info.encoder_buffers.emplace_back( 0, /*referenced=*/!is_keyframe, /*updated=*/true); } else { @@ -344,8 +344,7 @@ void ScreenshareLayers::OnEncodeDone(size_t stream_index, active_layer_ = 1; info->template_structure = GetTemplateStructure(number_of_temporal_layers_); - generic_frame_info.decode_target_indications = - GenericFrameInfo::DecodeTargetInfo("SS"); + generic_frame_info.decode_target_indications = {kSwitch, kSwitch}; } else if (active_layer_ >= 0 && layers_[active_layer_].state == TemporalLayer::State::kKeyFrame) { layers_[active_layer_].state = TemporalLayer::State::kNormal; @@ -429,21 +428,18 @@ FrameDependencyStructure ScreenshareLayers::GetTemplateStructure( FrameDependencyStructure template_structure; template_structure.num_decode_targets = num_layers; - using Builder = GenericFrameInfo::Builder; switch (num_layers) { case 1: { - template_structure.templates = { - Builder().T(0).Dtis("S").Build(), - Builder().T(0).Dtis("S").Fdiffs({1}).Build(), - }; + template_structure.templates.resize(2); + template_structure.templates[0].T(0).Dtis("S"); + template_structure.templates[1].T(0).Dtis("S").FrameDiffs({1}); return template_structure; } case 2: { - template_structure.templates = { - Builder().T(0).Dtis("SS").Build(), - Builder().T(0).Dtis("SS").Fdiffs({1}).Build(), - Builder().T(1).Dtis("-S").Fdiffs({1}).Build(), - }; + template_structure.templates.resize(3); + template_structure.templates[0].T(0).Dtis("SS"); + template_structure.templates[1].T(0).Dtis("SS").FrameDiffs({1}); + template_structure.templates[2].T(1).Dtis("-S").FrameDiffs({1}); return template_structure; } default: diff --git a/modules/video_coding/codecs/vp8/screenshare_layers.h b/modules/video_coding/codecs/vp8/screenshare_layers.h index 5270ffe81c..39477f12f1 100644 --- a/modules/video_coding/codecs/vp8/screenshare_layers.h +++ b/modules/video_coding/codecs/vp8/screenshare_layers.h @@ -78,7 +78,7 @@ class ScreenshareLayers final : public Vp8FrameBufferController { DependencyInfo(absl::string_view indication_symbols, Vp8FrameConfig frame_config) : decode_target_indications( - GenericFrameInfo::DecodeTargetInfo(indication_symbols)), + webrtc_impl::StringToDecodeTargetIndications(indication_symbols)), frame_config(frame_config) {} absl::InlinedVector decode_target_indications; diff --git a/modules/video_coding/codecs/vp8/temporal_layers_checker.cc b/modules/video_coding/codecs/vp8/temporal_layers_checker.cc index 53a68bd5e8..5aebd2c526 100644 --- a/modules/video_coding/codecs/vp8/temporal_layers_checker.cc +++ b/modules/video_coding/codecs/vp8/temporal_layers_checker.cc @@ -29,6 +29,7 @@ TemporalLayersChecker::CreateTemporalLayersChecker(Vp8TemporalLayersType type, // Conference mode temporal layering for screen content in base stream. return std::make_unique(num_temporal_layers); } + RTC_CHECK_NOTREACHED(); } TemporalLayersChecker::TemporalLayersChecker(int num_temporal_layers) diff --git a/modules/video_coding/codecs/vp8/test/mock_libvpx_interface.h b/modules/video_coding/codecs/vp8/test/mock_libvpx_interface.h deleted file mode 100644 index dcff1e6a18..0000000000 --- a/modules/video_coding/codecs/vp8/test/mock_libvpx_interface.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef MODULES_VIDEO_CODING_CODECS_VP8_TEST_MOCK_LIBVPX_INTERFACE_H_ -#define MODULES_VIDEO_CODING_CODECS_VP8_TEST_MOCK_LIBVPX_INTERFACE_H_ - -#include "modules/video_coding/codecs/vp8/libvpx_interface.h" -#include "test/gmock.h" -#include "test/gtest.h" - -namespace webrtc { - -class MockLibvpxVp8Interface : public LibvpxInterface { - public: - MOCK_CONST_METHOD5(img_alloc, - vpx_image_t*(vpx_image_t*, - vpx_img_fmt_t, - unsigned int, - unsigned int, - unsigned int)); - MOCK_CONST_METHOD6(img_wrap, - vpx_image_t*(vpx_image_t*, - vpx_img_fmt_t, - unsigned int, - unsigned int, - unsigned int, - unsigned char*)); - MOCK_CONST_METHOD1(img_free, void(vpx_image_t* img)); - MOCK_CONST_METHOD2(codec_enc_config_set, - vpx_codec_err_t(vpx_codec_ctx_t*, - const vpx_codec_enc_cfg_t*)); - MOCK_CONST_METHOD3(codec_enc_config_default, - vpx_codec_err_t(vpx_codec_iface_t*, - vpx_codec_enc_cfg_t*, - unsigned int)); - MOCK_CONST_METHOD4(codec_enc_init, - vpx_codec_err_t(vpx_codec_ctx_t*, - vpx_codec_iface_t*, - const vpx_codec_enc_cfg_t*, - vpx_codec_flags_t)); - MOCK_CONST_METHOD6(codec_enc_init_multi, - vpx_codec_err_t(vpx_codec_ctx_t*, - vpx_codec_iface_t*, - vpx_codec_enc_cfg_t*, - int, - vpx_codec_flags_t, - vpx_rational_t*)); - MOCK_CONST_METHOD1(codec_destroy, vpx_codec_err_t(vpx_codec_ctx_t*)); - MOCK_CONST_METHOD3(codec_control, - vpx_codec_err_t(vpx_codec_ctx_t*, - vp8e_enc_control_id, - uint32_t)); - MOCK_CONST_METHOD3(codec_control, - vpx_codec_err_t(vpx_codec_ctx_t*, - vp8e_enc_control_id, - int)); - MOCK_CONST_METHOD3(codec_control, - vpx_codec_err_t(vpx_codec_ctx_t*, - vp8e_enc_control_id, - int*)); - MOCK_CONST_METHOD3(codec_control, - vpx_codec_err_t(vpx_codec_ctx_t*, - vp8e_enc_control_id, - vpx_roi_map*)); - MOCK_CONST_METHOD3(codec_control, - vpx_codec_err_t(vpx_codec_ctx_t*, - vp8e_enc_control_id, - vpx_active_map*)); - MOCK_CONST_METHOD3(codec_control, - vpx_codec_err_t(vpx_codec_ctx_t*, - vp8e_enc_control_id, - vpx_scaling_mode*)); - MOCK_CONST_METHOD6(codec_encode, - vpx_codec_err_t(vpx_codec_ctx_t*, - const vpx_image_t*, - vpx_codec_pts_t, - uint64_t, - vpx_enc_frame_flags_t, - uint64_t)); - MOCK_CONST_METHOD2(codec_get_cx_data, - const vpx_codec_cx_pkt_t*(vpx_codec_ctx_t*, - vpx_codec_iter_t*)); -}; - -} // namespace webrtc - -#endif // MODULES_VIDEO_CODING_CODECS_VP8_TEST_MOCK_LIBVPX_INTERFACE_H_ diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc index 51595260dd..047bf2acae 100644 --- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc +++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc @@ -20,19 +20,21 @@ #include "api/video_codecs/vp8_temporal_layers.h" #include "common_video/libyuv/include/webrtc_libyuv.h" #include "common_video/test/utilities.h" +#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h" #include "modules/video_coding/codecs/test/video_codec_unittest.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h" -#include "modules/video_coding/codecs/vp8/test/mock_libvpx_interface.h" #include "modules/video_coding/utility/vp8_header_parser.h" #include "rtc_base/time_utils.h" #include "test/field_trial.h" +#include "test/mappable_native_buffer.h" #include "test/video_codec_settings.h" namespace webrtc { using ::testing::_; using ::testing::AllOf; +using ::testing::ElementsAre; using ::testing::ElementsAreArray; using ::testing::Field; using ::testing::Invoke; @@ -119,7 +121,7 @@ class TestVp8Impl : public VideoCodecUnitTest { TEST_F(TestVp8Impl, ErrorResilienceDisabledForNoTemporalLayers) { codec_settings_.simulcastStream[0].numberOfTemporalLayers = 1; - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); EXPECT_CALL(*vpx, @@ -133,7 +135,7 @@ TEST_F(TestVp8Impl, DefaultErrorResilienceEnabledForTemporalLayers) { codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2; codec_settings_.VP8()->numberOfTemporalLayers = 2; - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); EXPECT_CALL(*vpx, @@ -152,7 +154,7 @@ TEST_F(TestVp8Impl, codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2; codec_settings_.VP8()->numberOfTemporalLayers = 2; - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); EXPECT_CALL(*vpx, @@ -165,7 +167,7 @@ TEST_F(TestVp8Impl, } TEST_F(TestVp8Impl, SetRates) { - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, @@ -190,75 +192,51 @@ TEST_F(TestVp8Impl, SetRates) { bitrate_allocation, static_cast(codec_settings_.maxFramerate))); } -TEST_F(TestVp8Impl, DynamicSetRates) { - test::ScopedFieldTrials field_trials( - "WebRTC-VideoRateControl/vp8_dynamic_rate:true/"); - auto* const vpx = new NiceMock(); - LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), - VP8Encoder::Settings()); +TEST_F(TestVp8Impl, EncodeFrameAndRelease) { + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder.InitEncode(&codec_settings_, - VideoEncoder::Settings(kCapabilities, 1, 1000))); + encoder_->InitEncode(&codec_settings_, kSettings)); - const uint32_t kBitrateBps = 300000; - VideoEncoder::RateControlParameters rate_settings; - rate_settings.bitrate.SetBitrate(0, 0, kBitrateBps); - rate_settings.framerate_fps = - static_cast(codec_settings_.maxFramerate); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info); - // Set rates with no headroom. - rate_settings.bandwidth_allocation = DataRate::BitsPerSec(kBitrateBps); - EXPECT_CALL( - *vpx, - codec_enc_config_set( - _, AllOf(Field(&vpx_codec_enc_cfg_t::rc_target_bitrate, - kBitrateBps / 1000), - Field(&vpx_codec_enc_cfg_t::rc_undershoot_pct, 1000u), - Field(&vpx_codec_enc_cfg_t::rc_overshoot_pct, 0u), - Field(&vpx_codec_enc_cfg_t::rc_buf_sz, 100u), - Field(&vpx_codec_enc_cfg_t::rc_buf_optimal_sz, 30u), - Field(&vpx_codec_enc_cfg_t::rc_dropframe_thresh, 40u)))) - .WillOnce(Return(VPX_CODEC_OK)); - encoder.SetRates(rate_settings); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED, + encoder_->Encode(NextInputFrame(), nullptr)); +} - // Set rates with max headroom. - rate_settings.bandwidth_allocation = DataRate::BitsPerSec(kBitrateBps * 2); - EXPECT_CALL( - *vpx, codec_enc_config_set( - _, AllOf(Field(&vpx_codec_enc_cfg_t::rc_target_bitrate, - kBitrateBps / 1000), - Field(&vpx_codec_enc_cfg_t::rc_undershoot_pct, 100u), - Field(&vpx_codec_enc_cfg_t::rc_overshoot_pct, 15u), - Field(&vpx_codec_enc_cfg_t::rc_buf_sz, 1000u), - Field(&vpx_codec_enc_cfg_t::rc_buf_optimal_sz, 600u), - Field(&vpx_codec_enc_cfg_t::rc_dropframe_thresh, 5u)))) - .WillOnce(Return(VPX_CODEC_OK)); - encoder.SetRates(rate_settings); +TEST_F(TestVp8Impl, EncodeNv12FrameSimulcast) { + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + encoder_->InitEncode(&codec_settings_, kSettings)); - // Set rates with headroom half way. - rate_settings.bandwidth_allocation = - DataRate::BitsPerSec((3 * kBitrateBps) / 2); - EXPECT_CALL( - *vpx, - codec_enc_config_set( - _, AllOf(Field(&vpx_codec_enc_cfg_t::rc_target_bitrate, - kBitrateBps / 1000), - Field(&vpx_codec_enc_cfg_t::rc_undershoot_pct, 550u), - Field(&vpx_codec_enc_cfg_t::rc_overshoot_pct, 8u), - Field(&vpx_codec_enc_cfg_t::rc_buf_sz, 550u), - Field(&vpx_codec_enc_cfg_t::rc_buf_optimal_sz, 315u), - Field(&vpx_codec_enc_cfg_t::rc_dropframe_thresh, 23u)))) - .WillOnce(Return(VPX_CODEC_OK)); - encoder.SetRates(rate_settings); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + input_frame_generator_ = test::CreateSquareFrameGenerator( + kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12, + absl::nullopt); + EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info); + + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_UNINITIALIZED, + encoder_->Encode(NextInputFrame(), nullptr)); } -TEST_F(TestVp8Impl, EncodeFrameAndRelease) { +TEST_F(TestVp8Impl, EncodeI420FrameAfterNv12Frame) { EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_settings_, kSettings)); EncodedImage encoded_frame; CodecSpecificInfo codec_specific_info; + input_frame_generator_ = test::CreateSquareFrameGenerator( + kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12, + absl::nullopt); + EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info); + input_frame_generator_ = test::CreateSquareFrameGenerator( + kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420, + absl::nullopt); EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); @@ -286,6 +264,17 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) { EXPECT_EQ(kHeight, static_cast(encoded_frame._encodedHeight)); } +TEST_F(TestVp8Impl, + EncoderFillsResolutionInCodecAgnosticSectionOfCodecSpecificInfo) { + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(NextInputFrame(), &encoded_frame, &codec_specific_info); + + ASSERT_TRUE(codec_specific_info.template_structure); + EXPECT_THAT(codec_specific_info.template_structure->resolutions, + ElementsAre(RenderResolution(kWidth, kHeight))); +} + TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) { VideoFrame input_frame = NextInputFrame(); EncodedImage encoded_frame; @@ -400,36 +389,6 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) { EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp()); } -#if defined(WEBRTC_ANDROID) -#define MAYBE_DecodeWithACompleteKeyFrame DISABLED_DecodeWithACompleteKeyFrame -#else -#define MAYBE_DecodeWithACompleteKeyFrame DecodeWithACompleteKeyFrame -#endif -TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) { - VideoFrame input_frame = NextInputFrame(); - EncodedImage encoded_frame; - CodecSpecificInfo codec_specific_info; - EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info); - - // Setting complete to false -> should return an error. - encoded_frame._completeFrame = false; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, - decoder_->Decode(encoded_frame, false, -1)); - // Setting complete back to true. Forcing a delta frame. - encoded_frame._frameType = VideoFrameType::kVideoFrameDelta; - encoded_frame._completeFrame = true; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR, - decoder_->Decode(encoded_frame, false, -1)); - // Now setting a key frame. - encoded_frame._frameType = VideoFrameType::kVideoFrameKey; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1)); - std::unique_ptr decoded_frame; - absl::optional decoded_qp; - ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp)); - ASSERT_TRUE(decoded_frame); - EXPECT_GT(I420PSNR(&input_frame, decoded_frame.get()), 36); -} - TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) { codec_settings_.VP8()->numberOfTemporalLayers = 2; EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, @@ -514,7 +473,7 @@ TEST_F(TestVp8Impl, DontDropKeyframes) { } TEST_F(TestVp8Impl, KeepsTimestampOnReencode) { - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); @@ -523,6 +482,7 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) { codec_settings_.maxBitrate = 1000; codec_settings_.mode = VideoCodecMode::kScreensharing; codec_settings_.VP8()->numberOfTemporalLayers = 2; + codec_settings_.legacy_conference_mode = true; EXPECT_CALL(*vpx, img_wrap(_, _, _, _, _, _)) .WillOnce(Invoke([](vpx_image_t* img, vpx_img_fmt_t fmt, unsigned int d_w, @@ -553,7 +513,7 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) { } TEST(LibvpxVp8EncoderTest, GetEncoderInfoReturnsStaticInformation) { - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); @@ -564,11 +524,50 @@ TEST(LibvpxVp8EncoderTest, GetEncoderInfoReturnsStaticInformation) { EXPECT_FALSE(info.has_internal_source); EXPECT_TRUE(info.supports_simulcast); EXPECT_EQ(info.implementation_name, "libvpx"); + EXPECT_EQ(info.requested_resolution_alignment, 1); + EXPECT_THAT(info.preferred_pixel_formats, + testing::UnorderedElementsAre(VideoFrameBuffer::Type::kNV12, + VideoFrameBuffer::Type::kI420)); +} + +TEST(LibvpxVp8EncoderTest, RequestedResolutionAlignmentFromFieldTrial) { + test::ScopedFieldTrials field_trials( + "WebRTC-VP8-GetEncoderInfoOverride/" + "requested_resolution_alignment:10/"); + + auto* const vpx = new NiceMock(); + LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), + VP8Encoder::Settings()); + + EXPECT_EQ(encoder.GetEncoderInfo().requested_resolution_alignment, 10); + EXPECT_FALSE( + encoder.GetEncoderInfo().apply_alignment_to_all_simulcast_layers); + EXPECT_TRUE(encoder.GetEncoderInfo().resolution_bitrate_limits.empty()); +} + +TEST(LibvpxVp8EncoderTest, ResolutionBitrateLimitsFromFieldTrial) { + test::ScopedFieldTrials field_trials( + "WebRTC-VP8-GetEncoderInfoOverride/" + "frame_size_pixels:123|456|789," + "min_start_bitrate_bps:11000|22000|33000," + "min_bitrate_bps:44000|55000|66000," + "max_bitrate_bps:77000|88000|99000/"); + + auto* const vpx = new NiceMock(); + LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), + VP8Encoder::Settings()); + + EXPECT_THAT( + encoder.GetEncoderInfo().resolution_bitrate_limits, + ::testing::ElementsAre( + VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000}, + VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000}, + VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000})); } TEST(LibvpxVp8EncoderTest, GetEncoderInfoReturnsEmptyResolutionBitrateLimitsByDefault) { - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), VP8Encoder::Settings()); @@ -588,7 +587,7 @@ TEST(LibvpxVp8EncoderTest, VP8Encoder::Settings settings; settings.resolution_bitrate_limits = resolution_bitrate_limits; - auto* const vpx = new NiceMock(); + auto* const vpx = new NiceMock(); LibvpxVp8Encoder encoder((std::unique_ptr(vpx)), std::move(settings)); @@ -653,6 +652,7 @@ TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationScreenshareLayers) { codec_settings_.simulcastStream[0].maxBitrate = kLegacyScreenshareTl1BitrateKbps; codec_settings_.simulcastStream[0].numberOfTemporalLayers = 2; + codec_settings_.legacy_conference_mode = true; EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_settings_, kSettings)); @@ -716,4 +716,61 @@ TEST_F(TestVp8Impl, GetEncoderInfoFpsAllocationSimulcastVideo) { ::testing::ElementsAreArray(expected_fps_allocation)); } +class TestVp8ImplForPixelFormat + : public TestVp8Impl, + public ::testing::WithParamInterface { + public: + TestVp8ImplForPixelFormat() : TestVp8Impl(), mappable_type_(GetParam()) {} + + protected: + VideoFrameBuffer::Type mappable_type_; +}; + +TEST_P(TestVp8ImplForPixelFormat, EncodeNativeFrameSimulcast) { + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); + + // Configure simulcast. + codec_settings_.numberOfSimulcastStreams = 3; + codec_settings_.simulcastStream[0] = { + kWidth / 4, kHeight / 4, kFramerateFps, 1, 4000, 3000, 2000, 80, true}; + codec_settings_.simulcastStream[1] = { + kWidth / 2, kHeight / 2, kFramerateFps, 1, 4000, 3000, 2000, 80, true}; + codec_settings_.simulcastStream[2] = { + kWidth, kHeight, kFramerateFps, 1, 4000, 3000, 2000, 80, true}; + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + encoder_->InitEncode(&codec_settings_, kSettings)); + + // Create a zero-conversion NV12 frame (calling ToI420 on it crashes). + VideoFrame input_frame = + test::CreateMappableNativeFrame(1, mappable_type_, kWidth, kHeight); + + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + EncodeAndWaitForFrame(input_frame, &encoded_frame, &codec_specific_info); + + // After encoding, we expect one mapping per simulcast layer. + rtc::scoped_refptr mappable_buffer = + test::GetMappableNativeBufferFromVideoFrame(input_frame); + std::vector> mapped_buffers = + mappable_buffer->GetMappedFramedBuffers(); + ASSERT_EQ(mapped_buffers.size(), 3u); + EXPECT_EQ(mapped_buffers[0]->type(), mappable_type_); + EXPECT_EQ(mapped_buffers[0]->width(), kWidth); + EXPECT_EQ(mapped_buffers[0]->height(), kHeight); + EXPECT_EQ(mapped_buffers[1]->type(), mappable_type_); + EXPECT_EQ(mapped_buffers[1]->width(), kWidth / 2); + EXPECT_EQ(mapped_buffers[1]->height(), kHeight / 2); + EXPECT_EQ(mapped_buffers[2]->type(), mappable_type_); + EXPECT_EQ(mapped_buffers[2]->width(), kWidth / 4); + EXPECT_EQ(mapped_buffers[2]->height(), kHeight / 4); + EXPECT_FALSE(mappable_buffer->DidConvertToI420()); + + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release()); +} + +INSTANTIATE_TEST_SUITE_P(All, + TestVp8ImplForPixelFormat, + ::testing::Values(VideoFrameBuffer::Type::kI420, + VideoFrameBuffer::Type::kNV12)); + } // namespace webrtc diff --git a/modules/video_coding/codecs/vp9/include/vp9.h b/modules/video_coding/codecs/vp9/include/vp9.h index 8091cacec9..7cf1c2ebd1 100644 --- a/modules/video_coding/codecs/vp9/include/vp9.h +++ b/modules/video_coding/codecs/vp9/include/vp9.h @@ -25,6 +25,10 @@ namespace webrtc { // negotiate in SDP, in order of preference. std::vector SupportedVP9Codecs(); +// Returns a vector with all supported internal VP9 decode profiles in order of +// preference. These will be availble for receive-only connections. +std::vector SupportedVP9DecoderCodecs(); + class VP9Encoder : public VideoEncoder { public: // Deprecated. Returns default implementation using VP9 Profile 0. diff --git a/modules/video_coding/codecs/vp9/include/vp9_globals.h b/modules/video_coding/codecs/vp9/include/vp9_globals.h index c6853127ac..34aa0bc6cf 100644 --- a/modules/video_coding/codecs/vp9/include/vp9_globals.h +++ b/modules/video_coding/codecs/vp9/include/vp9_globals.h @@ -18,6 +18,7 @@ #include #include "modules/video_coding/codecs/interface/common_constants.h" +#include "rtc_base/checks.h" namespace webrtc { @@ -30,8 +31,8 @@ const size_t kMaxVp9RefPics = 3; const size_t kMaxVp9FramesInGof = 0xFF; // 8 bits const size_t kMaxVp9NumberOfSpatialLayers = 8; -const size_t kMinVp9SpatialLayerWidth = 320; -const size_t kMinVp9SpatialLayerHeight = 180; +const size_t kMinVp9SpatialLayerWidth = 240; +const size_t kMinVp9SpatialLayerHeight = 135; enum TemporalStructureMode { kTemporalStructureMode1, // 1 temporal layer structure - i.e., IPPP... @@ -131,7 +132,7 @@ struct GofInfoVP9 { pid_diff[7][1] = 2; break; default: - assert(false); + RTC_NOTREACHED(); } } diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc new file mode 100644 index 0000000000..3500ef5919 --- /dev/null +++ b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.cc @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#ifdef RTC_ENABLE_VP9 + +#include "modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h" + +#include + +#include "absl/strings/match.h" +#include "api/transport/field_trial_based_config.h" +#include "api/video/color_space.h" +#include "api/video/i010_buffer.h" +#include "common_video/include/video_frame_buffer.h" +#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "third_party/libyuv/include/libyuv/convert.h" +#include "vpx/vp8dx.h" +#include "vpx/vpx_decoder.h" + +namespace webrtc { +namespace { + +// Helper class for extracting VP9 colorspace. +ColorSpace ExtractVP9ColorSpace(vpx_color_space_t space_t, + vpx_color_range_t range_t, + unsigned int bit_depth) { + ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified; + ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified; + ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified; + switch (space_t) { + case VPX_CS_BT_601: + case VPX_CS_SMPTE_170: + primaries = ColorSpace::PrimaryID::kSMPTE170M; + transfer = ColorSpace::TransferID::kSMPTE170M; + matrix = ColorSpace::MatrixID::kSMPTE170M; + break; + case VPX_CS_SMPTE_240: + primaries = ColorSpace::PrimaryID::kSMPTE240M; + transfer = ColorSpace::TransferID::kSMPTE240M; + matrix = ColorSpace::MatrixID::kSMPTE240M; + break; + case VPX_CS_BT_709: + primaries = ColorSpace::PrimaryID::kBT709; + transfer = ColorSpace::TransferID::kBT709; + matrix = ColorSpace::MatrixID::kBT709; + break; + case VPX_CS_BT_2020: + primaries = ColorSpace::PrimaryID::kBT2020; + switch (bit_depth) { + case 8: + transfer = ColorSpace::TransferID::kBT709; + break; + case 10: + transfer = ColorSpace::TransferID::kBT2020_10; + break; + default: + RTC_NOTREACHED(); + break; + } + matrix = ColorSpace::MatrixID::kBT2020_NCL; + break; + case VPX_CS_SRGB: + primaries = ColorSpace::PrimaryID::kBT709; + transfer = ColorSpace::TransferID::kIEC61966_2_1; + matrix = ColorSpace::MatrixID::kBT709; + break; + default: + break; + } + + ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid; + switch (range_t) { + case VPX_CR_STUDIO_RANGE: + range = ColorSpace::RangeID::kLimited; + break; + case VPX_CR_FULL_RANGE: + range = ColorSpace::RangeID::kFull; + break; + default: + break; + } + return ColorSpace(primaries, transfer, matrix, range); +} + +} // namespace + +LibvpxVp9Decoder::LibvpxVp9Decoder() + : LibvpxVp9Decoder(FieldTrialBasedConfig()) {} +LibvpxVp9Decoder::LibvpxVp9Decoder(const WebRtcKeyValueConfig& trials) + : decode_complete_callback_(nullptr), + inited_(false), + decoder_(nullptr), + key_frame_required_(true), + preferred_output_format_( + absl::StartsWith(trials.Lookup("WebRTC-NV12Decode"), "Enabled") + ? VideoFrameBuffer::Type::kNV12 + : VideoFrameBuffer::Type::kI420) {} + +LibvpxVp9Decoder::~LibvpxVp9Decoder() { + inited_ = true; // in order to do the actual release + Release(); + int num_buffers_in_use = libvpx_buffer_pool_.GetNumBuffersInUse(); + if (num_buffers_in_use > 0) { + // The frame buffers are reference counted and frames are exposed after + // decoding. There may be valid usage cases where previous frames are still + // referenced after ~LibvpxVp9Decoder that is not a leak. + RTC_LOG(LS_INFO) << num_buffers_in_use + << " Vp9FrameBuffers are still " + "referenced during ~LibvpxVp9Decoder."; + } +} + +int LibvpxVp9Decoder::InitDecode(const VideoCodec* inst, int number_of_cores) { + int ret_val = Release(); + if (ret_val < 0) { + return ret_val; + } + + if (decoder_ == nullptr) { + decoder_ = new vpx_codec_ctx_t; + } + vpx_codec_dec_cfg_t cfg; + memset(&cfg, 0, sizeof(cfg)); + +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + // We focus on webrtc fuzzing here, not libvpx itself. Use single thread for + // fuzzing, because: + // - libvpx's VP9 single thread decoder is more fuzzer friendly. It detects + // errors earlier than the multi-threads version. + // - Make peak CPU usage under control (not depending on input) + cfg.threads = 1; +#else + if (!inst) { + // No config provided - don't know resolution to decode yet. + // Set thread count to one in the meantime. + cfg.threads = 1; + } else { + // We want to use multithreading when decoding high resolution videos. But + // not too many in order to avoid overhead when many stream are decoded + // concurrently. + // Set 2 thread as target for 1280x720 pixel count, and then scale up + // linearly from there - but cap at physical core count. + // For common resolutions this results in: + // 1 for 360p + // 2 for 720p + // 4 for 1080p + // 8 for 1440p + // 18 for 4K + int num_threads = + std::max(1, 2 * (inst->width * inst->height) / (1280 * 720)); + cfg.threads = std::min(number_of_cores, num_threads); + current_codec_ = *inst; + } +#endif + + num_cores_ = number_of_cores; + + vpx_codec_flags_t flags = 0; + if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) { + return WEBRTC_VIDEO_CODEC_MEMORY; + } + + if (!libvpx_buffer_pool_.InitializeVpxUsePool(decoder_)) { + return WEBRTC_VIDEO_CODEC_MEMORY; + } + + inited_ = true; + // Always start with a complete key frame. + key_frame_required_ = true; + if (inst && inst->buffer_pool_size) { + if (!libvpx_buffer_pool_.Resize(*inst->buffer_pool_size) || + !output_buffer_pool_.Resize(*inst->buffer_pool_size)) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + } + + vpx_codec_err_t status = + vpx_codec_control(decoder_, VP9D_SET_LOOP_FILTER_OPT, 1); + if (status != VPX_CODEC_OK) { + RTC_LOG(LS_ERROR) << "Failed to enable VP9D_SET_LOOP_FILTER_OPT. " + << vpx_codec_error(decoder_); + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +int LibvpxVp9Decoder::Decode(const EncodedImage& input_image, + bool missing_frames, + int64_t /*render_time_ms*/) { + if (!inited_) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + if (decode_complete_callback_ == nullptr) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + + if (input_image._frameType == VideoFrameType::kVideoFrameKey) { + absl::optional frame_info = + vp9::ParseIntraFrameInfo(input_image.data(), input_image.size()); + if (frame_info) { + if (frame_info->frame_width != current_codec_.width || + frame_info->frame_height != current_codec_.height) { + // Resolution has changed, tear down and re-init a new decoder in + // order to get correct sizing. + Release(); + current_codec_.width = frame_info->frame_width; + current_codec_.height = frame_info->frame_height; + int reinit_status = InitDecode(¤t_codec_, num_cores_); + if (reinit_status != WEBRTC_VIDEO_CODEC_OK) { + RTC_LOG(LS_WARNING) << "Failed to re-init decoder."; + return reinit_status; + } + } + } else { + RTC_LOG(LS_WARNING) << "Failed to parse VP9 header from key-frame."; + } + } + + // Always start with a complete key frame. + if (key_frame_required_) { + if (input_image._frameType != VideoFrameType::kVideoFrameKey) + return WEBRTC_VIDEO_CODEC_ERROR; + key_frame_required_ = false; + } + vpx_codec_iter_t iter = nullptr; + vpx_image_t* img; + const uint8_t* buffer = input_image.data(); + if (input_image.size() == 0) { + buffer = nullptr; // Triggers full frame concealment. + } + // During decode libvpx may get and release buffers from + // |libvpx_buffer_pool_|. In practice libvpx keeps a few (~3-4) buffers alive + // at a time. + if (vpx_codec_decode(decoder_, buffer, + static_cast(input_image.size()), 0, + VPX_DL_REALTIME)) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer. + // It may be released by libvpx during future vpx_codec_decode or + // vpx_codec_destroy calls. + img = vpx_codec_get_frame(decoder_, &iter); + int qp; + vpx_codec_err_t vpx_ret = + vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp); + RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK); + int ret = + ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace()); + if (ret != 0) { + return ret; + } + return WEBRTC_VIDEO_CODEC_OK; +} + +int LibvpxVp9Decoder::ReturnFrame( + const vpx_image_t* img, + uint32_t timestamp, + int qp, + const webrtc::ColorSpace* explicit_color_space) { + if (img == nullptr) { + // Decoder OK and nullptr image => No show frame. + return WEBRTC_VIDEO_CODEC_NO_OUTPUT; + } + + // This buffer contains all of |img|'s image data, a reference counted + // Vp9FrameBuffer. (libvpx is done with the buffers after a few + // vpx_codec_decode calls or vpx_codec_destroy). + rtc::scoped_refptr img_buffer = + static_cast(img->fb_priv); + + // The buffer can be used directly by the VideoFrame (without copy) by + // using a Wrapped*Buffer. + rtc::scoped_refptr img_wrapped_buffer; + switch (img->bit_depth) { + case 8: + if (img->fmt == VPX_IMG_FMT_I420) { + if (preferred_output_format_ == VideoFrameBuffer::Type::kNV12) { + rtc::scoped_refptr nv12_buffer = + output_buffer_pool_.CreateNV12Buffer(img->d_w, img->d_h); + if (!nv12_buffer.get()) { + // Buffer pool is full. + return WEBRTC_VIDEO_CODEC_NO_OUTPUT; + } + img_wrapped_buffer = nv12_buffer; + libyuv::I420ToNV12(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], + img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], + img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], + nv12_buffer->MutableDataY(), + nv12_buffer->StrideY(), + nv12_buffer->MutableDataUV(), + nv12_buffer->StrideUV(), img->d_w, img->d_h); + // No holding onto img_buffer as it's no longer needed and can be + // reused. + } else { + img_wrapped_buffer = WrapI420Buffer( + img->d_w, img->d_h, img->planes[VPX_PLANE_Y], + img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U], + img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V], + img->stride[VPX_PLANE_V], + // WrappedI420Buffer's mechanism for allowing the release of its + // frame buffer is through a callback function. This is where we + // should release |img_buffer|. + [img_buffer] {}); + } + } else if (img->fmt == VPX_IMG_FMT_I444) { + img_wrapped_buffer = WrapI444Buffer( + img->d_w, img->d_h, img->planes[VPX_PLANE_Y], + img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U], + img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V], + img->stride[VPX_PLANE_V], + // WrappedI444Buffer's mechanism for allowing the release of its + // frame buffer is through a callback function. This is where we + // should release |img_buffer|. + [img_buffer] {}); + } else { + RTC_LOG(LS_ERROR) + << "Unsupported pixel format produced by the decoder: " + << static_cast(img->fmt); + return WEBRTC_VIDEO_CODEC_NO_OUTPUT; + } + break; + case 10: + img_wrapped_buffer = WrapI010Buffer( + img->d_w, img->d_h, + reinterpret_cast(img->planes[VPX_PLANE_Y]), + img->stride[VPX_PLANE_Y] / 2, + reinterpret_cast(img->planes[VPX_PLANE_U]), + img->stride[VPX_PLANE_U] / 2, + reinterpret_cast(img->planes[VPX_PLANE_V]), + img->stride[VPX_PLANE_V] / 2, [img_buffer] {}); + break; + default: + RTC_LOG(LS_ERROR) << "Unsupported bit depth produced by the decoder: " + << img->bit_depth; + return WEBRTC_VIDEO_CODEC_NO_OUTPUT; + } + + auto builder = VideoFrame::Builder() + .set_video_frame_buffer(img_wrapped_buffer) + .set_timestamp_rtp(timestamp); + if (explicit_color_space) { + builder.set_color_space(*explicit_color_space); + } else { + builder.set_color_space( + ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth)); + } + VideoFrame decoded_image = builder.build(); + + decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp); + return WEBRTC_VIDEO_CODEC_OK; +} + +int LibvpxVp9Decoder::RegisterDecodeCompleteCallback( + DecodedImageCallback* callback) { + decode_complete_callback_ = callback; + return WEBRTC_VIDEO_CODEC_OK; +} + +int LibvpxVp9Decoder::Release() { + int ret_val = WEBRTC_VIDEO_CODEC_OK; + + if (decoder_ != nullptr) { + if (inited_) { + // When a codec is destroyed libvpx will release any buffers of + // |libvpx_buffer_pool_| it is currently using. + if (vpx_codec_destroy(decoder_)) { + ret_val = WEBRTC_VIDEO_CODEC_MEMORY; + } + } + delete decoder_; + decoder_ = nullptr; + } + // Releases buffers from the pool. Any buffers not in use are deleted. Buffers + // still referenced externally are deleted once fully released, not returning + // to the pool. + libvpx_buffer_pool_.ClearPool(); + output_buffer_pool_.Release(); + inited_ = false; + return ret_val; +} + +VideoDecoder::DecoderInfo LibvpxVp9Decoder::GetDecoderInfo() const { + DecoderInfo info; + info.implementation_name = "libvpx"; + info.is_hardware_accelerated = false; + return info; +} + +const char* LibvpxVp9Decoder::ImplementationName() const { + return "libvpx"; +} + +} // namespace webrtc + +#endif // RTC_ENABLE_VP9 diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h new file mode 100644 index 0000000000..f26f42700c --- /dev/null +++ b/modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#ifndef MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_ +#define MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_ + +#ifdef RTC_ENABLE_VP9 + +#include "api/transport/webrtc_key_value_config.h" +#include "api/video_codecs/video_decoder.h" +#include "common_video/include/video_frame_buffer_pool.h" +#include "modules/video_coding/codecs/vp9/include/vp9.h" +#include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h" +#include "vpx/vp8cx.h" + +namespace webrtc { + +class LibvpxVp9Decoder : public VP9Decoder { + public: + LibvpxVp9Decoder(); + explicit LibvpxVp9Decoder(const WebRtcKeyValueConfig& trials); + + virtual ~LibvpxVp9Decoder(); + + int InitDecode(const VideoCodec* inst, int number_of_cores) override; + + int Decode(const EncodedImage& input_image, + bool missing_frames, + int64_t /*render_time_ms*/) override; + + int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override; + + int Release() override; + + DecoderInfo GetDecoderInfo() const override; + const char* ImplementationName() const override; + + private: + int ReturnFrame(const vpx_image_t* img, + uint32_t timestamp, + int qp, + const webrtc::ColorSpace* explicit_color_space); + + // Memory pool used to share buffers between libvpx and webrtc. + Vp9FrameBufferPool libvpx_buffer_pool_; + // Buffer pool used to allocate additionally needed NV12 buffers. + VideoFrameBufferPool output_buffer_pool_; + DecodedImageCallback* decode_complete_callback_; + bool inited_; + vpx_codec_ctx_t* decoder_; + bool key_frame_required_; + VideoCodec current_codec_; + int num_cores_; + + // Decoder should produce this format if possible. + const VideoFrameBuffer::Type preferred_output_format_; +}; +} // namespace webrtc + +#endif // RTC_ENABLE_VP9 + +#endif // MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_DECODER_H_ diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc similarity index 64% rename from modules/video_coding/codecs/vp9/vp9_impl.cc rename to modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc index 46f72b6e02..511e6df585 100644 --- a/modules/video_coding/codecs/vp9/vp9_impl.cc +++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -11,30 +11,36 @@ #ifdef RTC_ENABLE_VP9 -#include "modules/video_coding/codecs/vp9/vp9_impl.h" +#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h" #include #include #include #include +#include "absl/algorithm/container.h" #include "absl/memory/memory.h" +#include "absl/strings/match.h" #include "api/video/color_space.h" #include "api/video/i010_buffer.h" #include "common_video/include/video_frame_buffer.h" #include "common_video/libyuv/include/webrtc_libyuv.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/video_coding/codecs/vp9/svc_rate_allocator.h" +#include "modules/video_coding/svc/create_scalability_structure.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" +#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" #include "rtc_base/checks.h" +#include "rtc_base/experiments/field_trial_list.h" +#include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/experiments/rate_control_settings.h" -#include "rtc_base/keep_ref_until_done.h" #include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" -#include "system_wrappers/include/field_trial.h" +#include "third_party/libyuv/include/libyuv/convert.h" #include "vpx/vp8cx.h" -#include "vpx/vp8dx.h" -#include "vpx/vpx_decoder.h" #include "vpx/vpx_encoder.h" namespace webrtc { @@ -45,98 +51,17 @@ namespace { uint8_t kRefBufIdx[4] = {0, 0, 0, 1}; uint8_t kUpdBufIdx[4] = {0, 0, 1, 0}; -int kMaxNumTiles4kVideo = 8; - // Maximum allowed PID difference for differnet per-layer frame-rate case. const int kMaxAllowedPidDiff = 30; -constexpr double kLowRateFactor = 1.0; -constexpr double kHighRateFactor = 2.0; - -// These settings correspond to the settings in vpx_codec_enc_cfg. -struct Vp9RateSettings { - uint32_t rc_undershoot_pct; - uint32_t rc_overshoot_pct; - uint32_t rc_buf_sz; - uint32_t rc_buf_optimal_sz; - uint32_t rc_dropframe_thresh; -}; - -// Only positive speeds, range for real-time coding currently is: 5 - 8. -// Lower means slower/better quality, higher means fastest/lower quality. -int GetCpuSpeed(int width, int height) { -#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID) - return 8; -#else - // For smaller resolutions, use lower speed setting (get some coding gain at - // the cost of increased encoding complexity). - if (width * height <= 352 * 288) - return 5; - else - return 7; -#endif -} -// Helper class for extracting VP9 colorspace. -ColorSpace ExtractVP9ColorSpace(vpx_color_space_t space_t, - vpx_color_range_t range_t, - unsigned int bit_depth) { - ColorSpace::PrimaryID primaries = ColorSpace::PrimaryID::kUnspecified; - ColorSpace::TransferID transfer = ColorSpace::TransferID::kUnspecified; - ColorSpace::MatrixID matrix = ColorSpace::MatrixID::kUnspecified; - switch (space_t) { - case VPX_CS_BT_601: - case VPX_CS_SMPTE_170: - primaries = ColorSpace::PrimaryID::kSMPTE170M; - transfer = ColorSpace::TransferID::kSMPTE170M; - matrix = ColorSpace::MatrixID::kSMPTE170M; - break; - case VPX_CS_SMPTE_240: - primaries = ColorSpace::PrimaryID::kSMPTE240M; - transfer = ColorSpace::TransferID::kSMPTE240M; - matrix = ColorSpace::MatrixID::kSMPTE240M; - break; - case VPX_CS_BT_709: - primaries = ColorSpace::PrimaryID::kBT709; - transfer = ColorSpace::TransferID::kBT709; - matrix = ColorSpace::MatrixID::kBT709; - break; - case VPX_CS_BT_2020: - primaries = ColorSpace::PrimaryID::kBT2020; - switch (bit_depth) { - case 8: - transfer = ColorSpace::TransferID::kBT709; - break; - case 10: - transfer = ColorSpace::TransferID::kBT2020_10; - break; - default: - RTC_NOTREACHED(); - break; - } - matrix = ColorSpace::MatrixID::kBT2020_NCL; - break; - case VPX_CS_SRGB: - primaries = ColorSpace::PrimaryID::kBT709; - transfer = ColorSpace::TransferID::kIEC61966_2_1; - matrix = ColorSpace::MatrixID::kBT709; - break; - default: - break; - } - - ColorSpace::RangeID range = ColorSpace::RangeID::kInvalid; - switch (range_t) { - case VPX_CR_STUDIO_RANGE: - range = ColorSpace::RangeID::kLimited; - break; - case VPX_CR_FULL_RANGE: - range = ColorSpace::RangeID::kFull; - break; - default: - break; - } - return ColorSpace(primaries, transfer, matrix, range); -} +// TODO(ilink): Tune these thresholds further. +// Selected using ConverenceMotion_1280_720_50.yuv clip. +// No toggling observed on any link capacity from 100-2000kbps. +// HD was reached consistently when link capacity was 1500kbps. +// Set resolutions are a bit more conservative than svc_config.cc sets, e.g. +// for 300kbps resolution converged to 270p instead of 360p. +constexpr int kLowVp9QpThreshold = 149; +constexpr int kHighVp9QpThreshold = 205; std::pair GetActiveLayers( const VideoBitrateAllocation& allocation) { @@ -153,72 +78,130 @@ std::pair GetActiveLayers( return {0, 0}; } -uint32_t Interpolate(uint32_t low, - uint32_t high, - double bandwidth_headroom_factor) { - RTC_DCHECK_GE(bandwidth_headroom_factor, kLowRateFactor); - RTC_DCHECK_LE(bandwidth_headroom_factor, kHighRateFactor); - - // |factor| is between 0.0 and 1.0. - const double factor = bandwidth_headroom_factor - kLowRateFactor; +std::unique_ptr CreateVp9ScalabilityStructure( + const VideoCodec& codec) { + int num_spatial_layers = codec.VP9().numberOfSpatialLayers; + int num_temporal_layers = + std::max(1, int{codec.VP9().numberOfTemporalLayers}); + if (num_spatial_layers == 1 && num_temporal_layers == 1) { + return std::make_unique(); + } + + char name[20]; + rtc::SimpleStringBuilder ss(name); + if (codec.mode == VideoCodecMode::kScreensharing) { + // TODO(bugs.webrtc.org/11999): Compose names of the structures when they + // are implemented. + return nullptr; + } else if (codec.VP9().interLayerPred == InterLayerPredMode::kOn || + num_spatial_layers == 1) { + ss << "L" << num_spatial_layers << "T" << num_temporal_layers; + } else if (codec.VP9().interLayerPred == InterLayerPredMode::kOnKeyPic) { + ss << "L" << num_spatial_layers << "T" << num_temporal_layers << "_KEY"; + } else { + RTC_DCHECK_EQ(codec.VP9().interLayerPred, InterLayerPredMode::kOff); + ss << "S" << num_spatial_layers << "T" << num_temporal_layers; + } - return static_cast(((1.0 - factor) * low) + (factor * high) + 0.5); -} + // Check spatial ratio. + if (num_spatial_layers > 1 && codec.spatialLayers[0].targetBitrate > 0) { + if (codec.width != codec.spatialLayers[num_spatial_layers - 1].width || + codec.height != codec.spatialLayers[num_spatial_layers - 1].height) { + RTC_LOG(LS_WARNING) + << "Top layer resolution expected to match overall resolution"; + return nullptr; + } + // Check if the ratio is one of the supported. + int numerator; + int denominator; + if (codec.spatialLayers[1].width == 2 * codec.spatialLayers[0].width) { + numerator = 1; + denominator = 2; + // no suffix for 1:2 ratio. + } else if (2 * codec.spatialLayers[1].width == + 3 * codec.spatialLayers[0].width) { + numerator = 2; + denominator = 3; + ss << "h"; + } else { + RTC_LOG(LS_WARNING) << "Unsupported scalability ratio " + << codec.spatialLayers[0].width << ":" + << codec.spatialLayers[1].width; + return nullptr; + } + // Validate ratio is consistent for all spatial layer transitions. + for (int sid = 1; sid < num_spatial_layers; ++sid) { + if (codec.spatialLayers[sid].width * numerator != + codec.spatialLayers[sid - 1].width * denominator || + codec.spatialLayers[sid].height * numerator != + codec.spatialLayers[sid - 1].height * denominator) { + RTC_LOG(LS_WARNING) << "Inconsistent scalability ratio " << numerator + << ":" << denominator; + return nullptr; + } + } + } -Vp9RateSettings GetRateSettings(double bandwidth_headroom_factor) { - static const Vp9RateSettings low_settings{100u, 0u, 100u, 33u, 40u}; - static const Vp9RateSettings high_settings{50u, 50u, 1000u, 700u, 5u}; - - if (bandwidth_headroom_factor <= kLowRateFactor) { - return low_settings; - } else if (bandwidth_headroom_factor >= kHighRateFactor) { - return high_settings; - } - - Vp9RateSettings settings; - settings.rc_undershoot_pct = - Interpolate(low_settings.rc_undershoot_pct, - high_settings.rc_undershoot_pct, bandwidth_headroom_factor); - settings.rc_overshoot_pct = - Interpolate(low_settings.rc_overshoot_pct, high_settings.rc_overshoot_pct, - bandwidth_headroom_factor); - settings.rc_buf_sz = - Interpolate(low_settings.rc_buf_sz, high_settings.rc_buf_sz, - bandwidth_headroom_factor); - settings.rc_buf_optimal_sz = - Interpolate(low_settings.rc_buf_optimal_sz, - high_settings.rc_buf_optimal_sz, bandwidth_headroom_factor); - settings.rc_dropframe_thresh = - Interpolate(low_settings.rc_dropframe_thresh, - high_settings.rc_dropframe_thresh, bandwidth_headroom_factor); - return settings; + auto scalability_structure_controller = CreateScalabilityStructure(name); + if (scalability_structure_controller == nullptr) { + RTC_LOG(LS_WARNING) << "Unsupported scalability structure " << name; + } else { + RTC_LOG(LS_INFO) << "Created scalability structure " << name; + } + return scalability_structure_controller; } -void UpdateRateSettings(vpx_codec_enc_cfg_t* config, - const Vp9RateSettings& new_settings) { - config->rc_undershoot_pct = new_settings.rc_undershoot_pct; - config->rc_overshoot_pct = new_settings.rc_overshoot_pct; - config->rc_buf_sz = new_settings.rc_buf_sz; - config->rc_buf_optimal_sz = new_settings.rc_buf_optimal_sz; - config->rc_dropframe_thresh = new_settings.rc_dropframe_thresh; +vpx_svc_ref_frame_config_t Vp9References( + rtc::ArrayView layers) { + vpx_svc_ref_frame_config_t ref_config = {}; + for (const ScalableVideoController::LayerFrameConfig& layer_frame : layers) { + const auto& buffers = layer_frame.Buffers(); + RTC_DCHECK_LE(buffers.size(), 3); + int sid = layer_frame.SpatialId(); + if (!buffers.empty()) { + ref_config.lst_fb_idx[sid] = buffers[0].id; + ref_config.reference_last[sid] = buffers[0].referenced; + if (buffers[0].updated) { + ref_config.update_buffer_slot[sid] |= (1 << buffers[0].id); + } + } + if (buffers.size() > 1) { + ref_config.gld_fb_idx[sid] = buffers[1].id; + ref_config.reference_golden[sid] = buffers[1].referenced; + if (buffers[1].updated) { + ref_config.update_buffer_slot[sid] |= (1 << buffers[1].id); + } + } + if (buffers.size() > 2) { + ref_config.alt_fb_idx[sid] = buffers[2].id; + ref_config.reference_alt_ref[sid] = buffers[2].referenced; + if (buffers[2].updated) { + ref_config.update_buffer_slot[sid] |= (1 << buffers[2].id); + } + } + } + // TODO(bugs.webrtc.org/11999): Fill ref_config.duration + return ref_config; } } // namespace -void VP9EncoderImpl::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, - void* user_data) { - VP9EncoderImpl* enc = static_cast(user_data); +void LibvpxVp9Encoder::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, + void* user_data) { + LibvpxVp9Encoder* enc = static_cast(user_data); enc->GetEncodedLayerFrame(pkt); } -VP9EncoderImpl::VP9EncoderImpl(const cricket::VideoCodec& codec) - : encoded_image_(), +LibvpxVp9Encoder::LibvpxVp9Encoder(const cricket::VideoCodec& codec, + std::unique_ptr interface, + const WebRtcKeyValueConfig& trials) + : libvpx_(std::move(interface)), + encoded_image_(), encoded_complete_callback_(nullptr), profile_( ParseSdpForVP9Profile(codec.params).value_or(VP9Profile::kProfile0)), inited_(false), timestamp_(0), - cpu_speed_(3), rc_max_intra_target_(0), encoder_(nullptr), config_(nullptr), @@ -230,46 +213,52 @@ VP9EncoderImpl::VP9EncoderImpl(const cricket::VideoCodec& codec) num_spatial_layers_(0), num_active_spatial_layers_(0), first_active_layer_(0), - layer_deactivation_requires_key_frame_( - field_trial::IsEnabled("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation")), + layer_deactivation_requires_key_frame_(absl::StartsWith( + trials.Lookup("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation"), + "Enabled")), is_svc_(false), inter_layer_pred_(InterLayerPredMode::kOn), external_ref_control_(false), // Set in InitEncode because of tests. - trusted_rate_controller_(RateControlSettings::ParseFromFieldTrials() - .LibvpxVp9TrustedRateController()), - dynamic_rate_settings_( - RateControlSettings::ParseFromFieldTrials().Vp9DynamicRateSettings()), + trusted_rate_controller_( + RateControlSettings::ParseFromKeyValueConfig(&trials) + .LibvpxVp9TrustedRateController()), layer_buffering_(false), full_superframe_drop_(true), first_frame_in_picture_(true), ss_info_needed_(false), force_all_active_layers_(false), + use_svc_controller_( + absl::StartsWith(trials.Lookup("WebRTC-Vp9DependencyDescriptor"), + "Enabled")), is_flexible_mode_(false), - variable_framerate_experiment_(ParseVariableFramerateConfig( - "WebRTC-VP9VariableFramerateScreenshare")), + variable_framerate_experiment_(ParseVariableFramerateConfig(trials)), variable_framerate_controller_( variable_framerate_experiment_.framerate_limit), + quality_scaler_experiment_(ParseQualityScalerConfig(trials)), + external_ref_ctrl_( + !absl::StartsWith(trials.Lookup("WebRTC-Vp9ExternalRefCtrl"), + "Disabled")), + performance_flags_(ParsePerformanceFlagsFromTrials(trials)), num_steady_state_frames_(0), config_changed_(true) { codec_ = {}; memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t)); } -VP9EncoderImpl::~VP9EncoderImpl() { +LibvpxVp9Encoder::~LibvpxVp9Encoder() { Release(); } -void VP9EncoderImpl::SetFecControllerOverride( - FecControllerOverride* fec_controller_override) { +void LibvpxVp9Encoder::SetFecControllerOverride(FecControllerOverride*) { // Ignored. } -int VP9EncoderImpl::Release() { +int LibvpxVp9Encoder::Release() { int ret_val = WEBRTC_VIDEO_CODEC_OK; if (encoder_ != nullptr) { if (inited_) { - if (vpx_codec_destroy(encoder_)) { + if (libvpx_->codec_destroy(encoder_)) { ret_val = WEBRTC_VIDEO_CODEC_MEMORY; } } @@ -281,20 +270,20 @@ int VP9EncoderImpl::Release() { config_ = nullptr; } if (raw_ != nullptr) { - vpx_img_free(raw_); + libvpx_->img_free(raw_); raw_ = nullptr; } inited_ = false; return ret_val; } -bool VP9EncoderImpl::ExplicitlyConfiguredSpatialLayers() const { +bool LibvpxVp9Encoder::ExplicitlyConfiguredSpatialLayers() const { // We check target_bitrate_bps of the 0th layer to see if the spatial layers // (i.e. bitrates) were explicitly configured. return codec_.spatialLayers[0].targetBitrate > 0; } -bool VP9EncoderImpl::SetSvcRates( +bool LibvpxVp9Encoder::SetSvcRates( const VideoBitrateAllocation& bitrate_allocation) { std::pair current_layers = GetActiveLayers(current_bitrate_allocation_); @@ -399,7 +388,6 @@ bool VP9EncoderImpl::SetSvcRates( expect_no_more_active_layers = seen_active_layer; } } - RTC_DCHECK_GT(num_active_spatial_layers_, 0); if (higher_layers_enabled && !force_key_frame_) { // Prohibit drop of all layers for the next frame, so newly enabled @@ -410,12 +398,24 @@ bool VP9EncoderImpl::SetSvcRates( force_all_active_layers_ = true; } + if (svc_controller_) { + VideoBitrateAllocation allocation; + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + for (int tid = 0; tid < num_temporal_layers_; ++tid) { + allocation.SetBitrate( + sid, tid, + config_->layer_target_bitrate[sid * num_temporal_layers_ + tid] * + 1000); + } + } + svc_controller_->OnRatesUpdated(allocation); + } current_bitrate_allocation_ = bitrate_allocation; config_changed_ = true; return true; } -void VP9EncoderImpl::SetRates(const RateControlParameters& parameters) { +void LibvpxVp9Encoder::SetRates(const RateControlParameters& parameters) { if (!inited_) { RTC_LOG(LS_WARNING) << "SetRates() calll while uninitialzied."; return; @@ -432,21 +432,14 @@ void VP9EncoderImpl::SetRates(const RateControlParameters& parameters) { codec_.maxFramerate = static_cast(parameters.framerate_fps + 0.5); - if (dynamic_rate_settings_) { - // Tweak rate control settings based on available network headroom. - UpdateRateSettings( - config_, GetRateSettings(parameters.bandwidth_allocation.bps() / - parameters.bitrate.get_sum_bps())); - } - bool res = SetSvcRates(parameters.bitrate); RTC_DCHECK(res) << "Failed to set new bitrate allocation"; config_changed_ = true; } // TODO(eladalon): s/inst/codec_settings/g. -int VP9EncoderImpl::InitEncode(const VideoCodec* inst, - const Settings& settings) { +int LibvpxVp9Encoder::InitEncode(const VideoCodec* inst, + const Settings& settings) { if (inst == nullptr) { return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } @@ -471,6 +464,9 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } + absl::optional previous_img_fmt = + raw_ ? absl::make_optional(raw_->fmt) : absl::nullopt; + int ret_val = Release(); if (ret_val < 0) { return ret_val; @@ -485,6 +481,7 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, if (&codec_ != inst) { codec_ = *inst; } + memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t)); force_key_frame_ = true; pics_since_key_ = 0; @@ -496,14 +493,16 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, num_temporal_layers_ = 1; } + if (use_svc_controller_) { + svc_controller_ = CreateVp9ScalabilityStructure(*inst); + } framerate_controller_ = std::vector( num_spatial_layers_, FramerateController(codec_.maxFramerate)); is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1); - encoded_image_._completeFrame = true; // Populate encoder configuration with default values. - if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) { + if (libvpx_->codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) { return WEBRTC_VIDEO_CODEC_ERROR; } @@ -511,12 +510,17 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, unsigned int bits_for_storage = 8; switch (profile_) { case VP9Profile::kProfile0: - img_fmt = VPX_IMG_FMT_I420; + img_fmt = previous_img_fmt.value_or(VPX_IMG_FMT_I420); bits_for_storage = 8; config_->g_bit_depth = VPX_BITS_8; config_->g_profile = 0; config_->g_input_bit_depth = 8; break; + case VP9Profile::kProfile1: + // Encoding of profile 1 is not implemented. It would require extended + // support for I444, I422, and I440 buffers. + RTC_NOTREACHED(); + break; case VP9Profile::kProfile2: img_fmt = VPX_IMG_FMT_I42016; bits_for_storage = 16; @@ -529,8 +533,8 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, // Creating a wrapper to the image - setting image data to nullptr. Actual // pointer will be set in encode. Setting align to 1, as it is meaningless // (actual memory is not allocated). - raw_ = - vpx_img_wrap(nullptr, img_fmt, codec_.width, codec_.height, 1, nullptr); + raw_ = libvpx_->img_wrap(nullptr, img_fmt, codec_.width, codec_.height, 1, + nullptr); raw_->bit_depth = bits_for_storage; config_->g_w = codec_.width; @@ -562,13 +566,17 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, // put some key-frames at will even in VPX_KF_DISABLED kf_mode. config_->kf_max_dist = inst->VP9().keyFrameInterval; config_->kf_min_dist = config_->kf_max_dist; - config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0; + if (quality_scaler_experiment_.enabled) { + // In that experiment webrtc wide quality scaler is used instead of libvpx + // internal scaler. + config_->rc_resize_allowed = 0; + } else { + config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0; + } // Determine number of threads based on the image size and #cores. config_->g_threads = NumberOfThreads(config_->g_w, config_->g_h, settings.number_of_cores); - cpu_speed_ = GetCpuSpeed(config_->g_w, config_->g_h); - is_flexible_mode_ = inst->VP9().flexibleMode; inter_layer_pred_ = inst->VP9().interLayerPred; @@ -582,18 +590,10 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, // External reference control is required for different frame rate on spatial // layers because libvpx generates rtp incompatible references in this case. - external_ref_control_ = - !field_trial::IsDisabled("WebRTC-Vp9ExternalRefCtrl") || - (num_spatial_layers_ > 1 && - codec_.mode == VideoCodecMode::kScreensharing) || - inter_layer_pred_ == InterLayerPredMode::kOn; - // TODO(ilnik): Remove this workaround once external reference control works - // nicely with simulcast SVC mode. - // Simlucast SVC mode is currently only used in some tests and is impossible - // to trigger for users without using some field trials. - if (inter_layer_pred_ == InterLayerPredMode::kOff) { - external_ref_control_ = false; - } + external_ref_control_ = external_ref_ctrl_ || + (num_spatial_layers_ > 1 && + codec_.mode == VideoCodecMode::kScreensharing) || + inter_layer_pred_ == InterLayerPredMode::kOn; if (num_temporal_layers_ == 1) { gof_.SetGofInfoVP9(kTemporalStructureMode1); @@ -641,9 +641,9 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst, return InitAndSetControlSettings(inst); } -int VP9EncoderImpl::NumberOfThreads(int width, - int height, - int number_of_cores) { +int LibvpxVp9Encoder::NumberOfThreads(int width, + int height, + int number_of_cores) { // Keep the number of encoder threads equal to the possible number of column // tiles, which is (1, 2, 4, 8). See comments below for VP9E_SET_TILE_COLUMNS. if (width * height >= 1280 * 720 && number_of_cores > 4) { @@ -663,7 +663,7 @@ int VP9EncoderImpl::NumberOfThreads(int width, } } -int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { +int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) { // Set QP-min/max per spatial and temporal layer. int tot_num_layers = num_spatial_layers_ * num_temporal_layers_; for (int i = 0; i < tot_num_layers; ++i) { @@ -671,7 +671,13 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { svc_params_.min_quantizers[i] = config_->rc_min_quantizer; } config_->ss_number_layers = num_spatial_layers_; - if (ExplicitlyConfiguredSpatialLayers()) { + if (svc_controller_) { + auto stream_config = svc_controller_->StreamConfig(); + for (int i = 0; i < stream_config.num_spatial_layers; ++i) { + svc_params_.scaling_factor_num[i] = stream_config.scaling_factor_num[i]; + svc_params_.scaling_factor_den[i] = stream_config.scaling_factor_den[i]; + } + } else if (ExplicitlyConfiguredSpatialLayers()) { for (int i = 0; i < num_spatial_layers_; ++i) { const auto& layer = codec_.spatialLayers[i]; RTC_CHECK_GT(layer.width, 0); @@ -723,37 +729,54 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { return WEBRTC_VIDEO_CODEC_ERR_PARAMETER; } - const vpx_codec_err_t rv = vpx_codec_enc_init( + const vpx_codec_err_t rv = libvpx_->codec_enc_init( encoder_, vpx_codec_vp9_cx(), config_, config_->g_bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH); if (rv != VPX_CODEC_OK) { - RTC_LOG(LS_ERROR) << "Init error: " << vpx_codec_err_to_string(rv); + RTC_LOG(LS_ERROR) << "Init error: " << libvpx_->codec_err_to_string(rv); return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } - vpx_codec_control(encoder_, VP8E_SET_CPUUSED, cpu_speed_); - vpx_codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT, - rc_max_intra_target_); - vpx_codec_control(encoder_, VP9E_SET_AQ_MODE, - inst->VP9().adaptiveQpMode ? 3 : 0); - vpx_codec_control(encoder_, VP9E_SET_FRAME_PARALLEL_DECODING, 0); - vpx_codec_control(encoder_, VP9E_SET_SVC_GF_TEMPORAL_REF, 0); + UpdatePerformanceFlags(); + RTC_DCHECK_EQ(performance_flags_by_spatial_index_.size(), + static_cast(num_spatial_layers_)); + if (performance_flags_.use_per_layer_speed) { + for (int si = 0; si < num_spatial_layers_; ++si) { + svc_params_.speed_per_layer[si] = + performance_flags_by_spatial_index_[si].base_layer_speed; + svc_params_.loopfilter_ctrl[si] = + performance_flags_by_spatial_index_[si].deblock_mode; + } + } + + libvpx_->codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT, + rc_max_intra_target_); + libvpx_->codec_control(encoder_, VP9E_SET_AQ_MODE, + inst->VP9().adaptiveQpMode ? 3 : 0); + + libvpx_->codec_control(encoder_, VP9E_SET_FRAME_PARALLEL_DECODING, 0); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_GF_TEMPORAL_REF, 0); if (is_svc_) { - vpx_codec_control(encoder_, VP9E_SET_SVC, 1); - vpx_codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_); + libvpx_->codec_control(encoder_, VP9E_SET_SVC, 1); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_); + } + if (!is_svc_ || !performance_flags_.use_per_layer_speed) { + libvpx_->codec_control( + encoder_, VP8E_SET_CPUUSED, + performance_flags_by_spatial_index_.rbegin()->base_layer_speed); } if (num_spatial_layers_ > 1) { switch (inter_layer_pred_) { case InterLayerPredMode::kOn: - vpx_codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 0); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 0); break; case InterLayerPredMode::kOff: - vpx_codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 1); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 1); break; case InterLayerPredMode::kOnKeyPic: - vpx_codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 2); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 2); break; default: RTC_NOTREACHED(); @@ -790,46 +813,47 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) { svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh; } } - vpx_codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER, - &svc_drop_frame_); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER, + &svc_drop_frame_); } // Register callback for getting each spatial layer. vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = { - VP9EncoderImpl::EncoderOutputCodedPacketCallback, + LibvpxVp9Encoder::EncoderOutputCodedPacketCallback, reinterpret_cast(this)}; - vpx_codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, - reinterpret_cast(&cbp)); + libvpx_->codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK, + reinterpret_cast(&cbp)); // Control function to set the number of column tiles in encoding a frame, in // log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns. // The number tile columns will be capped by the encoder based on image size // (minimum width of tile column is 256 pixels, maximum is 4096). - vpx_codec_control(encoder_, VP9E_SET_TILE_COLUMNS, (config_->g_threads >> 1)); + libvpx_->codec_control(encoder_, VP9E_SET_TILE_COLUMNS, + static_cast((config_->g_threads >> 1))); // Turn on row-based multithreading. - vpx_codec_control(encoder_, VP9E_SET_ROW_MT, 1); + libvpx_->codec_control(encoder_, VP9E_SET_ROW_MT, 1); #if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \ !defined(ANDROID) // Do not enable the denoiser on ARM since optimization is pending. // Denoiser is on by default on other platforms. - vpx_codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY, - inst->VP9().denoisingOn ? 1 : 0); + libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY, + inst->VP9().denoisingOn ? 1 : 0); #endif if (codec_.mode == VideoCodecMode::kScreensharing) { // Adjust internal parameters to screen content. - vpx_codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1); + libvpx_->codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1); } // Enable encoder skip of static/low content blocks. - vpx_codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1); + libvpx_->codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1); inited_ = true; config_changed_ = true; return WEBRTC_VIDEO_CODEC_OK; } -uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) { +uint32_t LibvpxVp9Encoder::MaxIntraTarget(uint32_t optimal_buffer_size) { // Set max to the optimal buffer level (normalized by target BR), // and scaled by a scale_par. // Max target size = scale_par * optimal_buffer_size * targetBR[Kbps]. @@ -844,8 +868,8 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) { return (target_pct < min_intra_size) ? min_intra_size : target_pct; } -int VP9EncoderImpl::Encode(const VideoFrame& input_image, - const std::vector* frame_types) { +int LibvpxVp9Encoder::Encode(const VideoFrame& input_image, + const std::vector* frame_types) { if (!inited_) { return WEBRTC_VIDEO_CODEC_UNINITIALIZED; } @@ -869,6 +893,13 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, force_key_frame_ = true; } + if (svc_controller_) { + layer_frames_ = svc_controller_->NextFrameConfig(force_key_frame_); + if (layer_frames_.empty()) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + vpx_svc_layer_id_t layer_id = {0}; if (!force_key_frame_) { const size_t gof_idx = (pics_since_key_ + 1) % gof_.num_frames_in_gof; @@ -940,18 +971,64 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, layer_id.spatial_layer_id = first_active_layer_; } - vpx_codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id); + if (svc_controller_) { + layer_id.spatial_layer_id = layer_frames_.front().SpatialId(); + layer_id.temporal_layer_id = layer_frames_.front().TemporalId(); + for (const auto& layer : layer_frames_) { + layer_id.temporal_layer_id_per_spatial[layer.SpatialId()] = + layer.TemporalId(); + } + } + + if (is_svc_ && performance_flags_.use_per_layer_speed) { + // Update speed settings that might depend on temporal index. + bool speed_updated = false; + for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) { + const int target_speed = + layer_id.temporal_layer_id_per_spatial[sl_idx] == 0 + ? performance_flags_by_spatial_index_[sl_idx].base_layer_speed + : performance_flags_by_spatial_index_[sl_idx].high_layer_speed; + if (svc_params_.speed_per_layer[sl_idx] != target_speed) { + svc_params_.speed_per_layer[sl_idx] = target_speed; + speed_updated = true; + } + } + if (speed_updated) { + libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_); + } + } + + libvpx_->codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id); if (num_spatial_layers_ > 1) { // Update frame dropping settings as they may change on per-frame basis. - vpx_codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER, - &svc_drop_frame_); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER, + &svc_drop_frame_); } if (config_changed_) { - if (vpx_codec_enc_config_set(encoder_, config_)) { + if (libvpx_->codec_enc_config_set(encoder_, config_)) { return WEBRTC_VIDEO_CODEC_ERROR; } + + if (!performance_flags_.use_per_layer_speed) { + // Not setting individual speeds per layer, find the highest active + // resolution instead and base the speed on that. + for (int i = num_spatial_layers_ - 1; i >= 0; --i) { + if (config_->ss_target_bitrate[i] > 0) { + int width = (svc_params_.scaling_factor_num[i] * config_->g_w) / + svc_params_.scaling_factor_den[i]; + int height = (svc_params_.scaling_factor_num[i] * config_->g_h) / + svc_params_.scaling_factor_den[i]; + int speed = + std::prev(performance_flags_.settings_by_resolution.lower_bound( + width * height)) + ->second.base_layer_speed; + libvpx_->codec_control(encoder_, VP8E_SET_CPUUSED, speed); + break; + } + } + } config_changed_ = false; } @@ -964,21 +1041,22 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, // doing this. input_image_ = &input_image; - // Keep reference to buffer until encode completes. - rtc::scoped_refptr i420_buffer; + // In case we need to map the buffer, |mapped_buffer| is used to keep it alive + // through reference counting until after encoding has finished. + rtc::scoped_refptr mapped_buffer; const I010BufferInterface* i010_buffer; rtc::scoped_refptr i010_copy; switch (profile_) { case VP9Profile::kProfile0: { - i420_buffer = input_image.video_frame_buffer()->ToI420(); - // Image in vpx_image_t format. - // Input image is const. VPX's raw image is not defined as const. - raw_->planes[VPX_PLANE_Y] = const_cast(i420_buffer->DataY()); - raw_->planes[VPX_PLANE_U] = const_cast(i420_buffer->DataU()); - raw_->planes[VPX_PLANE_V] = const_cast(i420_buffer->DataV()); - raw_->stride[VPX_PLANE_Y] = i420_buffer->StrideY(); - raw_->stride[VPX_PLANE_U] = i420_buffer->StrideU(); - raw_->stride[VPX_PLANE_V] = i420_buffer->StrideV(); + mapped_buffer = + PrepareBufferForProfile0(input_image.video_frame_buffer()); + if (!mapped_buffer) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + break; + } + case VP9Profile::kProfile1: { + RTC_NOTREACHED(); break; } case VP9Profile::kProfile2: { @@ -990,8 +1068,15 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, break; } default: { - i010_copy = - I010Buffer::Copy(*input_image.video_frame_buffer()->ToI420()); + auto i420_buffer = input_image.video_frame_buffer()->ToI420(); + if (!i420_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString( + input_image.video_frame_buffer()->type()) + << " image to I420. Can't encode frame."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + i010_copy = I010Buffer::Copy(*i420_buffer); i010_buffer = i010_copy.get(); } } @@ -1013,7 +1098,11 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, flags = VPX_EFLAG_FORCE_KF; } - if (external_ref_control_) { + if (svc_controller_) { + vpx_svc_ref_frame_config_t ref_config = Vp9References(layer_frames_); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, + &ref_config); + } else if (external_ref_control_) { vpx_svc_ref_frame_config_t ref_config = SetReferences(force_key_frame_, layer_id.spatial_layer_id); @@ -1025,7 +1114,8 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, } } - vpx_codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, &ref_config); + libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG, + &ref_config); } first_frame_in_picture_ = true; @@ -1045,14 +1135,14 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, .GetTargetRate()) : codec_.maxFramerate; uint32_t duration = static_cast(90000 / target_framerate_fps); - const vpx_codec_err_t rv = vpx_codec_encode(encoder_, raw_, timestamp_, - duration, flags, VPX_DL_REALTIME); + const vpx_codec_err_t rv = libvpx_->codec_encode( + encoder_, raw_, timestamp_, duration, flags, VPX_DL_REALTIME); if (rv != VPX_CODEC_OK) { - RTC_LOG(LS_ERROR) << "Encoding error: " << vpx_codec_err_to_string(rv) + RTC_LOG(LS_ERROR) << "Encoding error: " << libvpx_->codec_err_to_string(rv) << "\n" "Details: " - << vpx_codec_error(encoder_) << "\n" - << vpx_codec_error_detail(encoder_); + << libvpx_->codec_error(encoder_) << "\n" + << libvpx_->codec_error_detail(encoder_); return WEBRTC_VIDEO_CODEC_ERROR; } timestamp_ += duration; @@ -1065,10 +1155,10 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image, return WEBRTC_VIDEO_CODEC_OK; } -void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, - absl::optional* spatial_idx, - const vpx_codec_cx_pkt& pkt, - uint32_t timestamp) { +bool LibvpxVp9Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, + absl::optional* spatial_idx, + const vpx_codec_cx_pkt& pkt, + uint32_t timestamp) { RTC_CHECK(codec_specific != nullptr); codec_specific->codecType = kVideoCodecVP9; CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9); @@ -1083,7 +1173,7 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, } vpx_svc_layer_id_t layer_id = {0}; - vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); + libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); // Can't have keyframe with non-zero temporal layer. RTC_DCHECK(pics_since_key_ != 0 || layer_id.temporal_layer_id == 0); @@ -1179,14 +1269,45 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, } first_frame_in_picture_ = false; + + // Populate codec-agnostic section in the codec specific structure. + if (svc_controller_) { + auto it = absl::c_find_if( + layer_frames_, + [&](const ScalableVideoController::LayerFrameConfig& config) { + return config.SpatialId() == layer_id.spatial_layer_id; + }); + if (it == layer_frames_.end()) { + RTC_LOG(LS_ERROR) << "Encoder produced a frame for layer S" + << layer_id.spatial_layer_id << "T" + << layer_id.temporal_layer_id + << " that wasn't requested."; + return false; + } + codec_specific->generic_frame_info = svc_controller_->OnEncodeDone(*it); + if (is_key_frame) { + codec_specific->template_structure = + svc_controller_->DependencyStructure(); + auto& resolutions = codec_specific->template_structure->resolutions; + resolutions.resize(num_spatial_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + resolutions[sid] = RenderResolution( + /*width=*/codec_.width * svc_params_.scaling_factor_num[sid] / + svc_params_.scaling_factor_den[sid], + /*height=*/codec_.height * svc_params_.scaling_factor_num[sid] / + svc_params_.scaling_factor_den[sid]); + } + } + } + return true; } -void VP9EncoderImpl::FillReferenceIndices(const vpx_codec_cx_pkt& pkt, - const size_t pic_num, - const bool inter_layer_predicted, - CodecSpecificInfoVP9* vp9_info) { +void LibvpxVp9Encoder::FillReferenceIndices(const vpx_codec_cx_pkt& pkt, + const size_t pic_num, + const bool inter_layer_predicted, + CodecSpecificInfoVP9* vp9_info) { vpx_svc_layer_id_t layer_id = {0}; - vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); + libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); const bool is_key_frame = (pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? true : false; @@ -1195,7 +1316,8 @@ void VP9EncoderImpl::FillReferenceIndices(const vpx_codec_cx_pkt& pkt, if (is_svc_) { vpx_svc_ref_frame_config_t enc_layer_conf = {{0}}; - vpx_codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG, &enc_layer_conf); + libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG, + &enc_layer_conf); int ref_buf_flags = 0; if (enc_layer_conf.reference_last[layer_id.spatial_layer_id]) { @@ -1300,17 +1422,18 @@ void VP9EncoderImpl::FillReferenceIndices(const vpx_codec_cx_pkt& pkt, static_cast(layer_id.temporal_layer_id)); } -void VP9EncoderImpl::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt, - const size_t pic_num) { +void LibvpxVp9Encoder::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt, + const size_t pic_num) { vpx_svc_layer_id_t layer_id = {0}; - vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); + libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); RefFrameBuffer frame_buf(pic_num, layer_id.spatial_layer_id, layer_id.temporal_layer_id); if (is_svc_) { vpx_svc_ref_frame_config_t enc_layer_conf = {{0}}; - vpx_codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG, &enc_layer_conf); + libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG, + &enc_layer_conf); const int update_buffer_slot = enc_layer_conf.update_buffer_slot[layer_id.spatial_layer_id]; @@ -1340,7 +1463,7 @@ void VP9EncoderImpl::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt, } } -vpx_svc_ref_frame_config_t VP9EncoderImpl::SetReferences( +vpx_svc_ref_frame_config_t LibvpxVp9Encoder::SetReferences( bool is_key_pic, size_t first_active_spatial_layer_id) { // kRefBufIdx, kUpdBufIdx need to be updated to support longer GOFs. @@ -1434,16 +1557,16 @@ vpx_svc_ref_frame_config_t VP9EncoderImpl::SetReferences( return ref_config; } -int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { +void LibvpxVp9Encoder::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT); if (pkt->data.frame.sz == 0) { // Ignore dropped frame. - return WEBRTC_VIDEO_CODEC_OK; + return; } vpx_svc_layer_id_t layer_id = {0}; - vpx_codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); + libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id); if (layer_buffering_) { // Deliver buffered low spatial layer frame. @@ -1467,12 +1590,15 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { encoded_image_._frameType = VideoFrameType::kVideoFrameKey; force_key_frame_ = false; } - RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity()); codec_specific_ = {}; absl::optional spatial_index; - PopulateCodecSpecific(&codec_specific_, &spatial_index, *pkt, - input_image_->timestamp()); + if (!PopulateCodecSpecific(&codec_specific_, &spatial_index, *pkt, + input_image_->timestamp())) { + // Drop the frame. + encoded_image_.set_size(0); + return; + } encoded_image_.SetSpatialIndex(spatial_index); UpdateReferenceBuffers(*pkt, pics_since_key_); @@ -1484,7 +1610,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { encoded_image_._encodedWidth = pkt->data.frame.width[layer_id.spatial_layer_id]; int qp = -1; - vpx_codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp); + libvpx_->codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp); encoded_image_.qp_ = qp; if (!layer_buffering_) { @@ -1492,11 +1618,9 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) { num_active_spatial_layers_; DeliverBufferedFrame(end_of_picture); } - - return WEBRTC_VIDEO_CODEC_OK; } -void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) { +void LibvpxVp9Encoder::DeliverBufferedFrame(bool end_of_picture) { if (encoded_image_.size() > 0) { if (num_spatial_layers_ > 1) { // Restore frame dropping settings, as dropping may be temporary forbidden @@ -1506,17 +1630,10 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) { } } - codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture; + codec_specific_.end_of_picture = end_of_picture; - // No data partitioning in VP9, so 1 partition only. - int part_idx = 0; - RTPFragmentationHeader frag_info; - frag_info.VerifyAndAllocateFragmentationHeader(1); - frag_info.fragmentationOffset[part_idx] = 0; - frag_info.fragmentationLength[part_idx] = encoded_image_.size(); - - encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_, - &frag_info); + encoded_complete_callback_->OnEncodedImage(encoded_image_, + &codec_specific_); if (codec_.mode == VideoCodecMode::kScreensharing) { const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0); @@ -1544,17 +1661,23 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) { } } -int VP9EncoderImpl::RegisterEncodeCompleteCallback( +int LibvpxVp9Encoder::RegisterEncodeCompleteCallback( EncodedImageCallback* callback) { encoded_complete_callback_ = callback; return WEBRTC_VIDEO_CODEC_OK; } -VideoEncoder::EncoderInfo VP9EncoderImpl::GetEncoderInfo() const { +VideoEncoder::EncoderInfo LibvpxVp9Encoder::GetEncoderInfo() const { EncoderInfo info; info.supports_native_handle = false; info.implementation_name = "libvpx"; - info.scaling_settings = VideoEncoder::ScalingSettings::kOff; + if (quality_scaler_experiment_.enabled && inited_ && + codec_.VP9().automaticResizeOn) { + info.scaling_settings = VideoEncoder::ScalingSettings( + quality_scaler_experiment_.low_qp, quality_scaler_experiment_.high_qp); + } else { + info.scaling_settings = VideoEncoder::ScalingSettings::kOff; + } info.has_trusted_rate_controller = trusted_rate_controller_; info.is_hardware_accelerated = false; info.has_internal_source = false; @@ -1586,11 +1709,19 @@ VideoEncoder::EncoderInfo VP9EncoderImpl::GetEncoderInfo() const { (sl_fps_fraction / decimator))); } } + if (profile_ == VP9Profile::kProfile0) { + info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420, + VideoFrameBuffer::Type::kNV12}; + } + } + if (!encoder_info_override_.resolution_bitrate_limits().empty()) { + info.resolution_bitrate_limits = + encoder_info_override_.resolution_bitrate_limits(); } return info; } -size_t VP9EncoderImpl::SteadyStateSize(int sid, int tid) { +size_t LibvpxVp9Encoder::SteadyStateSize(int sid, int tid) { const size_t bitrate_bps = current_bitrate_allocation_.GetBitrate( sid, tid == kNoTemporalIdx ? 0 : tid); const float fps = (codec_.mode == VideoCodecMode::kScreensharing) @@ -1606,8 +1737,9 @@ size_t VP9EncoderImpl::SteadyStateSize(int sid, int tid) { } // static -VP9EncoderImpl::VariableFramerateExperiment -VP9EncoderImpl::ParseVariableFramerateConfig(std::string group_name) { +LibvpxVp9Encoder::VariableFramerateExperiment +LibvpxVp9Encoder::ParseVariableFramerateConfig( + const WebRtcKeyValueConfig& trials) { FieldTrialFlag enabled = FieldTrialFlag("Enabled"); FieldTrialParameter framerate_limit("min_fps", 5.0); FieldTrialParameter qp("min_qp", 32); @@ -1616,7 +1748,7 @@ VP9EncoderImpl::ParseVariableFramerateConfig(std::string group_name) { "frames_before_steady_state", 5); ParseFieldTrial({&enabled, &framerate_limit, &qp, &undershoot_percentage, &frames_before_steady_state}, - field_trial::FindFullName(group_name)); + trials.Lookup("WebRTC-VP9VariableFramerateScreenshare")); VariableFramerateExperiment config; config.enabled = enabled.Get(); config.framerate_limit = framerate_limit.Get(); @@ -1627,231 +1759,206 @@ VP9EncoderImpl::ParseVariableFramerateConfig(std::string group_name) { return config; } -VP9DecoderImpl::VP9DecoderImpl() - : decode_complete_callback_(nullptr), - inited_(false), - decoder_(nullptr), - key_frame_required_(true) {} +// static +LibvpxVp9Encoder::QualityScalerExperiment +LibvpxVp9Encoder::ParseQualityScalerConfig(const WebRtcKeyValueConfig& trials) { + FieldTrialFlag disabled = FieldTrialFlag("Disabled"); + FieldTrialParameter low_qp("low_qp", kLowVp9QpThreshold); + FieldTrialParameter high_qp("hihg_qp", kHighVp9QpThreshold); + ParseFieldTrial({&disabled, &low_qp, &high_qp}, + trials.Lookup("WebRTC-VP9QualityScaler")); + QualityScalerExperiment config; + config.enabled = !disabled.Get(); + RTC_LOG(LS_INFO) << "Webrtc quality scaler for vp9 is " + << (config.enabled ? "enabled." : "disabled"); + config.low_qp = low_qp.Get(); + config.high_qp = high_qp.Get(); -VP9DecoderImpl::~VP9DecoderImpl() { - inited_ = true; // in order to do the actual release - Release(); - int num_buffers_in_use = frame_buffer_pool_.GetNumBuffersInUse(); - if (num_buffers_in_use > 0) { - // The frame buffers are reference counted and frames are exposed after - // decoding. There may be valid usage cases where previous frames are still - // referenced after ~VP9DecoderImpl that is not a leak. - RTC_LOG(LS_INFO) << num_buffers_in_use - << " Vp9FrameBuffers are still " - "referenced during ~VP9DecoderImpl."; - } + return config; } -int VP9DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) { - int ret_val = Release(); - if (ret_val < 0) { - return ret_val; - } - - if (decoder_ == nullptr) { - decoder_ = new vpx_codec_ctx_t; - } - vpx_codec_dec_cfg_t cfg; - memset(&cfg, 0, sizeof(cfg)); - -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - // We focus on webrtc fuzzing here, not libvpx itself. Use single thread for - // fuzzing, because: - // - libvpx's VP9 single thread decoder is more fuzzer friendly. It detects - // errors earlier than the multi-threads version. - // - Make peak CPU usage under control (not depending on input) - cfg.threads = 1; - (void)kMaxNumTiles4kVideo; // unused -#else - // We want to use multithreading when decoding high resolution videos. But, - // since we don't know resolution of input stream at this stage, we always - // enable it. - cfg.threads = std::min(number_of_cores, kMaxNumTiles4kVideo); -#endif - - vpx_codec_flags_t flags = 0; - if (vpx_codec_dec_init(decoder_, vpx_codec_vp9_dx(), &cfg, flags)) { - return WEBRTC_VIDEO_CODEC_MEMORY; - } +void LibvpxVp9Encoder::UpdatePerformanceFlags() { + const auto find_speed = [&](int min_pixel_count) { + RTC_DCHECK(!performance_flags_.settings_by_resolution.empty()); + auto it = + performance_flags_.settings_by_resolution.upper_bound(min_pixel_count); + return std::prev(it)->second; + }; - if (!frame_buffer_pool_.InitializeVpxUsePool(decoder_)) { - return WEBRTC_VIDEO_CODEC_MEMORY; - } - - inited_ = true; - // Always start with a complete key frame. - key_frame_required_ = true; - if (inst && inst->buffer_pool_size) { - if (!frame_buffer_pool_.Resize(*inst->buffer_pool_size)) { - return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + performance_flags_by_spatial_index_.clear(); + if (is_svc_) { + for (int si = 0; si < num_spatial_layers_; ++si) { + performance_flags_by_spatial_index_.push_back(find_speed( + codec_.spatialLayers[si].width * codec_.spatialLayers[si].height)); } + } else { + performance_flags_by_spatial_index_.push_back( + find_speed(codec_.width * codec_.height)); } - return WEBRTC_VIDEO_CODEC_OK; } -int VP9DecoderImpl::Decode(const EncodedImage& input_image, - bool missing_frames, - int64_t /*render_time_ms*/) { - if (!inited_) { - return WEBRTC_VIDEO_CODEC_UNINITIALIZED; - } - if (decode_complete_callback_ == nullptr) { - return WEBRTC_VIDEO_CODEC_UNINITIALIZED; - } - // Always start with a complete key frame. - if (key_frame_required_) { - if (input_image._frameType != VideoFrameType::kVideoFrameKey) - return WEBRTC_VIDEO_CODEC_ERROR; - // We have a key frame - is it complete? - if (input_image._completeFrame) { - key_frame_required_ = false; - } else { - return WEBRTC_VIDEO_CODEC_ERROR; +// static +LibvpxVp9Encoder::PerformanceFlags +LibvpxVp9Encoder::ParsePerformanceFlagsFromTrials( + const WebRtcKeyValueConfig& trials) { + struct Params : public PerformanceFlags::ParameterSet { + int min_pixel_count = 0; + }; + + FieldTrialStructList trials_list( + {FieldTrialStructMember("min_pixel_count", + [](Params* p) { return &p->min_pixel_count; }), + FieldTrialStructMember("high_layer_speed", + [](Params* p) { return &p->high_layer_speed; }), + FieldTrialStructMember("base_layer_speed", + [](Params* p) { return &p->base_layer_speed; }), + FieldTrialStructMember("deblock_mode", + [](Params* p) { return &p->deblock_mode; })}, + {}); + + FieldTrialFlag per_layer_speed("use_per_layer_speed"); + + ParseFieldTrial({&trials_list, &per_layer_speed}, + trials.Lookup("WebRTC-VP9-PerformanceFlags")); + + PerformanceFlags flags; + flags.use_per_layer_speed = per_layer_speed.Get(); + + constexpr int kMinSpeed = 1; + constexpr int kMaxSpeed = 9; + for (auto& f : trials_list.Get()) { + if (f.base_layer_speed < kMinSpeed || f.base_layer_speed > kMaxSpeed || + f.high_layer_speed < kMinSpeed || f.high_layer_speed > kMaxSpeed || + f.deblock_mode < 0 || f.deblock_mode > 2) { + RTC_LOG(LS_WARNING) << "Ignoring invalid performance flags: " + << "min_pixel_count = " << f.min_pixel_count + << ", high_layer_speed = " << f.high_layer_speed + << ", base_layer_speed = " << f.base_layer_speed + << ", deblock_mode = " << f.deblock_mode; + continue; } + flags.settings_by_resolution[f.min_pixel_count] = f; } - vpx_codec_iter_t iter = nullptr; - vpx_image_t* img; - const uint8_t* buffer = input_image.data(); - if (input_image.size() == 0) { - buffer = nullptr; // Triggers full frame concealment. - } - // During decode libvpx may get and release buffers from |frame_buffer_pool_|. - // In practice libvpx keeps a few (~3-4) buffers alive at a time. - if (vpx_codec_decode(decoder_, buffer, - static_cast(input_image.size()), 0, - VPX_DL_REALTIME)) { - return WEBRTC_VIDEO_CODEC_ERROR; - } - // |img->fb_priv| contains the image data, a reference counted Vp9FrameBuffer. - // It may be released by libvpx during future vpx_codec_decode or - // vpx_codec_destroy calls. - img = vpx_codec_get_frame(decoder_, &iter); - int qp; - vpx_codec_err_t vpx_ret = - vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp); - RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK); - int ret = - ReturnFrame(img, input_image.Timestamp(), qp, input_image.ColorSpace()); - if (ret != 0) { - return ret; - } - return WEBRTC_VIDEO_CODEC_OK; -} -int VP9DecoderImpl::ReturnFrame( - const vpx_image_t* img, - uint32_t timestamp, - int qp, - const webrtc::ColorSpace* explicit_color_space) { - if (img == nullptr) { - // Decoder OK and nullptr image => No show frame. - return WEBRTC_VIDEO_CODEC_NO_OUTPUT; - } - - // This buffer contains all of |img|'s image data, a reference counted - // Vp9FrameBuffer. (libvpx is done with the buffers after a few - // vpx_codec_decode calls or vpx_codec_destroy). - Vp9FrameBufferPool::Vp9FrameBuffer* img_buffer = - static_cast(img->fb_priv); - - // The buffer can be used directly by the VideoFrame (without copy) by - // using a Wrapped*Buffer. - rtc::scoped_refptr img_wrapped_buffer; - switch (img->bit_depth) { - case 8: - if (img->fmt == VPX_IMG_FMT_I420) { - img_wrapped_buffer = WrapI420Buffer( - img->d_w, img->d_h, img->planes[VPX_PLANE_Y], - img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U], - img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V], - img->stride[VPX_PLANE_V], - // WrappedI420Buffer's mechanism for allowing the release of its - // frame buffer is through a callback function. This is where we - // should release |img_buffer|. - rtc::KeepRefUntilDone(img_buffer)); - } else if (img->fmt == VPX_IMG_FMT_I444) { - img_wrapped_buffer = WrapI444Buffer( - img->d_w, img->d_h, img->planes[VPX_PLANE_Y], - img->stride[VPX_PLANE_Y], img->planes[VPX_PLANE_U], - img->stride[VPX_PLANE_U], img->planes[VPX_PLANE_V], - img->stride[VPX_PLANE_V], - // WrappedI444Buffer's mechanism for allowing the release of its - // frame buffer is through a callback function. This is where we - // should release |img_buffer|. - rtc::KeepRefUntilDone(img_buffer)); - } else { - RTC_LOG(LS_ERROR) - << "Unsupported pixel format produced by the decoder: " - << static_cast(img->fmt); - return WEBRTC_VIDEO_CODEC_NO_OUTPUT; - } - break; - case 10: - img_wrapped_buffer = WrapI010Buffer( - img->d_w, img->d_h, - reinterpret_cast(img->planes[VPX_PLANE_Y]), - img->stride[VPX_PLANE_Y] / 2, - reinterpret_cast(img->planes[VPX_PLANE_U]), - img->stride[VPX_PLANE_U] / 2, - reinterpret_cast(img->planes[VPX_PLANE_V]), - img->stride[VPX_PLANE_V] / 2, rtc::KeepRefUntilDone(img_buffer)); - break; - default: - RTC_LOG(LS_ERROR) << "Unsupported bit depth produced by the decoder: " - << img->bit_depth; - return WEBRTC_VIDEO_CODEC_NO_OUTPUT; + if (flags.settings_by_resolution.empty()) { + return GetDefaultPerformanceFlags(); } - auto builder = VideoFrame::Builder() - .set_video_frame_buffer(img_wrapped_buffer) - .set_timestamp_rtp(timestamp); - if (explicit_color_space) { - builder.set_color_space(*explicit_color_space); - } else { - builder.set_color_space( - ExtractVP9ColorSpace(img->cs, img->range, img->bit_depth)); - } - VideoFrame decoded_image = builder.build(); + return flags; +} - decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp); - return WEBRTC_VIDEO_CODEC_OK; +// static +LibvpxVp9Encoder::PerformanceFlags +LibvpxVp9Encoder::GetDefaultPerformanceFlags() { + PerformanceFlags flags; + flags.use_per_layer_speed = false; +#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID) + // Speed 8 on all layers for all resolutions. + flags.settings_by_resolution[0] = {8, 8, 0}; +#else + // For smaller resolutions, use lower speed setting (get some coding gain at + // the cost of increased encoding complexity). + flags.settings_by_resolution[0] = {5, 5, 0}; + + // Use speed 7 for QCIF and above. + flags.settings_by_resolution[352 * 288] = {7, 7, 0}; +#endif + return flags; } -int VP9DecoderImpl::RegisterDecodeCompleteCallback( - DecodedImageCallback* callback) { - decode_complete_callback_ = callback; - return WEBRTC_VIDEO_CODEC_OK; +void LibvpxVp9Encoder::MaybeRewrapRawWithFormat(const vpx_img_fmt fmt) { + if (!raw_) { + raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1, + nullptr); + } else if (raw_->fmt != fmt) { + RTC_LOG(INFO) << "Switching VP9 encoder pixel format to " + << (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420"); + libvpx_->img_free(raw_); + raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1, + nullptr); + } + // else no-op since the image is already in the right format. } -int VP9DecoderImpl::Release() { - int ret_val = WEBRTC_VIDEO_CODEC_OK; +rtc::scoped_refptr LibvpxVp9Encoder::PrepareBufferForProfile0( + rtc::scoped_refptr buffer) { + absl::InlinedVector + supported_formats = {VideoFrameBuffer::Type::kI420, + VideoFrameBuffer::Type::kNV12}; - if (decoder_ != nullptr) { - if (inited_) { - // When a codec is destroyed libvpx will release any buffers of - // |frame_buffer_pool_| it is currently using. - if (vpx_codec_destroy(decoder_)) { - ret_val = WEBRTC_VIDEO_CODEC_MEMORY; + rtc::scoped_refptr mapped_buffer; + if (buffer->type() != VideoFrameBuffer::Type::kNative) { + // |buffer| is already mapped. + mapped_buffer = buffer; + } else { + // Attempt to map to one of the supported formats. + mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats); + } + if (!mapped_buffer || + (absl::c_find(supported_formats, mapped_buffer->type()) == + supported_formats.end() && + mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) { + // Unknown pixel format or unable to map, convert to I420 and prepare that + // buffer instead to ensure Scale() is safe to use. + auto converted_buffer = buffer->ToI420(); + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString(buffer->type()) + << " image to I420. Can't encode frame."; + return {}; + } + // The buffer should now be a mapped I420 or I420A format, but some buffer + // implementations incorrectly return the wrong buffer format, such as + // kNative. As a workaround to this, we perform ToI420() a second time. + // TODO(https://crbug.com/webrtc/12602): When Android buffers have a correct + // ToI420() implementaion, remove his workaround. + if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 && + converted_buffer->type() != VideoFrameBuffer::Type::kI420A) { + converted_buffer = converted_buffer->ToI420(); + if (!converted_buffer) { + RTC_LOG(LS_ERROR) << "Failed to convert " + << VideoFrameBufferTypeToString(buffer->type()) + << " image to I420. Can't encode frame."; + return {}; } + RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 || + converted_buffer->type() == VideoFrameBuffer::Type::kI420A); } - delete decoder_; - decoder_ = nullptr; + // Because |buffer| had to be converted, use |converted_buffer| instead. + buffer = mapped_buffer = converted_buffer; } - // Releases buffers from the pool. Any buffers not in use are deleted. Buffers - // still referenced externally are deleted once fully released, not returning - // to the pool. - frame_buffer_pool_.ClearPool(); - inited_ = false; - return ret_val; -} -const char* VP9DecoderImpl::ImplementationName() const { - return "libvpx"; + // Prepare |raw_| from |mapped_buffer|. + switch (mapped_buffer->type()) { + case VideoFrameBuffer::Type::kI420: + case VideoFrameBuffer::Type::kI420A: { + MaybeRewrapRawWithFormat(VPX_IMG_FMT_I420); + const I420BufferInterface* i420_buffer = mapped_buffer->GetI420(); + RTC_DCHECK(i420_buffer); + raw_->planes[VPX_PLANE_Y] = const_cast(i420_buffer->DataY()); + raw_->planes[VPX_PLANE_U] = const_cast(i420_buffer->DataU()); + raw_->planes[VPX_PLANE_V] = const_cast(i420_buffer->DataV()); + raw_->stride[VPX_PLANE_Y] = i420_buffer->StrideY(); + raw_->stride[VPX_PLANE_U] = i420_buffer->StrideU(); + raw_->stride[VPX_PLANE_V] = i420_buffer->StrideV(); + break; + } + case VideoFrameBuffer::Type::kNV12: { + MaybeRewrapRawWithFormat(VPX_IMG_FMT_NV12); + const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12(); + RTC_DCHECK(nv12_buffer); + raw_->planes[VPX_PLANE_Y] = const_cast(nv12_buffer->DataY()); + raw_->planes[VPX_PLANE_U] = const_cast(nv12_buffer->DataUV()); + raw_->planes[VPX_PLANE_V] = raw_->planes[VPX_PLANE_U] + 1; + raw_->stride[VPX_PLANE_Y] = nv12_buffer->StrideY(); + raw_->stride[VPX_PLANE_U] = nv12_buffer->StrideUV(); + raw_->stride[VPX_PLANE_V] = nv12_buffer->StrideUV(); + break; + } + default: + RTC_NOTREACHED(); + } + return mapped_buffer; } } // namespace webrtc diff --git a/modules/video_coding/codecs/vp9/vp9_impl.h b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h similarity index 61% rename from modules/video_coding/codecs/vp9/vp9_impl.h rename to modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h index 2126044dcc..954c044c2c 100644 --- a/modules/video_coding/codecs/vp9/vp9_impl.h +++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -9,33 +9,37 @@ * */ -#ifndef MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_ -#define MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_ +#ifndef MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_ +#define MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_ #ifdef RTC_ENABLE_VP9 #include #include -#include #include #include "api/fec_controller_override.h" +#include "api/transport/webrtc_key_value_config.h" #include "api/video_codecs/video_encoder.h" -#include "media/base/vp9_profile.h" +#include "api/video_codecs/vp9_profile.h" +#include "common_video/include/video_frame_buffer_pool.h" +#include "modules/video_coding/codecs/interface/libvpx_interface.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h" +#include "modules/video_coding/svc/scalable_video_controller.h" #include "modules/video_coding/utility/framerate_controller.h" +#include "rtc_base/experiments/encoder_info_settings.h" #include "vpx/vp8cx.h" -#include "vpx/vpx_decoder.h" -#include "vpx/vpx_encoder.h" namespace webrtc { -class VP9EncoderImpl : public VP9Encoder { +class LibvpxVp9Encoder : public VP9Encoder { public: - explicit VP9EncoderImpl(const cricket::VideoCodec& codec); + LibvpxVp9Encoder(const cricket::VideoCodec& codec, + std::unique_ptr interface, + const WebRtcKeyValueConfig& trials); - ~VP9EncoderImpl() override; + ~LibvpxVp9Encoder() override; void SetFecControllerOverride( FecControllerOverride* fec_controller_override) override; @@ -61,7 +65,7 @@ class VP9EncoderImpl : public VP9Encoder { // Call encoder initialize function and set control settings. int InitAndSetControlSettings(const VideoCodec* inst); - void PopulateCodecSpecific(CodecSpecificInfo* codec_specific, + bool PopulateCodecSpecific(CodecSpecificInfo* codec_specific, absl::optional* spatial_idx, const vpx_codec_cx_pkt& pkt, uint32_t timestamp); @@ -78,7 +82,7 @@ class VP9EncoderImpl : public VP9Encoder { bool ExplicitlyConfiguredSpatialLayers() const; bool SetSvcRates(const VideoBitrateAllocation& bitrate_allocation); - virtual int GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt); + void GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt); // Callback function for outputting packets per spatial layer. static void EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt, @@ -98,6 +102,15 @@ class VP9EncoderImpl : public VP9Encoder { size_t SteadyStateSize(int sid, int tid); + void MaybeRewrapRawWithFormat(const vpx_img_fmt fmt); + // Prepares |raw_| to reference image data of |buffer|, or of mapped or scaled + // versions of |buffer|. Returns the buffer that got referenced as a result, + // allowing the caller to keep a reference to it until after encoding has + // finished. On failure to convert the buffer, null is returned. + rtc::scoped_refptr PrepareBufferForProfile0( + rtc::scoped_refptr buffer); + + const std::unique_ptr libvpx_; EncodedImage encoded_image_; CodecSpecificInfo codec_specific_; EncodedImageCallback* encoded_complete_callback_; @@ -105,7 +118,6 @@ class VP9EncoderImpl : public VP9Encoder { const VP9Profile profile_; bool inited_; int64_t timestamp_; - int cpu_speed_; uint32_t rc_max_intra_target_; vpx_codec_ctx_t* encoder_; vpx_codec_enc_cfg_t* config_; @@ -125,7 +137,6 @@ class VP9EncoderImpl : public VP9Encoder { InterLayerPredMode inter_layer_pred_; bool external_ref_control_; const bool trusted_rate_controller_; - const bool dynamic_rate_settings_; bool layer_buffering_; const bool full_superframe_drop_; vpx_svc_frame_drop_t svc_drop_frame_; @@ -133,7 +144,9 @@ class VP9EncoderImpl : public VP9Encoder { VideoBitrateAllocation current_bitrate_allocation_; bool ss_info_needed_; bool force_all_active_layers_; + const bool use_svc_controller_; + std::unique_ptr svc_controller_; std::vector framerate_controller_; // Used for flexible mode. @@ -157,6 +170,7 @@ class VP9EncoderImpl : public VP9Encoder { size_t temporal_layer_id = 0; }; std::map ref_buf_; + std::vector layer_frames_; // Variable frame-rate related fields and methods. const struct VariableFramerateExperiment { @@ -173,46 +187,62 @@ class VP9EncoderImpl : public VP9Encoder { int frames_before_steady_state; } variable_framerate_experiment_; static VariableFramerateExperiment ParseVariableFramerateConfig( - std::string group_name); + const WebRtcKeyValueConfig& trials); FramerateController variable_framerate_controller_; + + const struct QualityScalerExperiment { + int low_qp; + int high_qp; + bool enabled; + } quality_scaler_experiment_; + static QualityScalerExperiment ParseQualityScalerConfig( + const WebRtcKeyValueConfig& trials); + const bool external_ref_ctrl_; + + // Flags that can affect speed vs quality tradeoff, and are configureable per + // resolution ranges. + struct PerformanceFlags { + // If false, a lookup will be made in |settings_by_resolution| base on the + // highest currently active resolution, and the overall speed then set to + // to the |base_layer_speed| matching that entry. + // If true, each active resolution will have it's speed and deblock_mode set + // based on it resolution, and the high layer speed configured for non + // base temporal layer frames. + bool use_per_layer_speed = false; + + struct ParameterSet { + int base_layer_speed = -1; // Speed setting for TL0. + int high_layer_speed = -1; // Speed setting for TL1-TL3. + // 0 = deblock all temporal layers (TL) + // 1 = disable deblock for top-most TL + // 2 = disable deblock for all TLs + int deblock_mode = 0; + }; + // Map from min pixel count to settings for that resolution and above. + // E.g. if you want some settings A if below wvga (640x360) and some other + // setting B at wvga and above, you'd use map {{0, A}, {230400, B}}. + std::map settings_by_resolution; + }; + // Performance flags, ordered by |min_pixel_count|. + const PerformanceFlags performance_flags_; + // Caching of of |speed_configs_|, where index i maps to the resolution as + // specified in |codec_.spatialLayer[i]|. + std::vector + performance_flags_by_spatial_index_; + void UpdatePerformanceFlags(); + static PerformanceFlags ParsePerformanceFlagsFromTrials( + const WebRtcKeyValueConfig& trials); + static PerformanceFlags GetDefaultPerformanceFlags(); + int num_steady_state_frames_; // Only set config when this flag is set. bool config_changed_; -}; - -class VP9DecoderImpl : public VP9Decoder { - public: - VP9DecoderImpl(); - - virtual ~VP9DecoderImpl(); - int InitDecode(const VideoCodec* inst, int number_of_cores) override; - - int Decode(const EncodedImage& input_image, - bool missing_frames, - int64_t /*render_time_ms*/) override; - - int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override; - - int Release() override; - - const char* ImplementationName() const override; - - private: - int ReturnFrame(const vpx_image_t* img, - uint32_t timestamp, - int qp, - const webrtc::ColorSpace* explicit_color_space); - - // Memory pool used to share buffers between libvpx and webrtc. - Vp9FrameBufferPool frame_buffer_pool_; - DecodedImageCallback* decode_complete_callback_; - bool inited_; - vpx_codec_ctx_t* decoder_; - bool key_frame_required_; + const LibvpxVp9EncoderInfoSettings encoder_info_override_; }; + } // namespace webrtc #endif // RTC_ENABLE_VP9 -#endif // MODULES_VIDEO_CODING_CODECS_VP9_VP9_IMPL_H_ +#endif // MODULES_VIDEO_CODING_CODECS_VP9_LIBVPX_VP9_ENCODER_H_ diff --git a/modules/video_coding/codecs/vp9/svc_config.cc b/modules/video_coding/codecs/vp9/svc_config.cc index e5d88bce21..cc7743ad25 100644 --- a/modules/video_coding/codecs/vp9/svc_config.cc +++ b/modules/video_coding/codecs/vp9/svc_config.cc @@ -16,6 +16,7 @@ #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" #include "rtc_base/checks.h" +#include "rtc_base/logging.h" namespace webrtc { @@ -74,11 +75,23 @@ std::vector ConfigureSvcNormalVideo(size_t input_width, const size_t num_layers_fit_vert = static_cast( std::floor(1 + std::max(0.0f, std::log2(1.0f * input_height / kMinVp9SpatialLayerHeight)))); - num_spatial_layers = - std::min({num_spatial_layers, num_layers_fit_horz, num_layers_fit_vert}); + const size_t limited_num_spatial_layers = + std::min(num_layers_fit_horz, num_layers_fit_vert); + if (limited_num_spatial_layers < num_spatial_layers) { + RTC_LOG(LS_WARNING) << "Reducing number of spatial layers from " + << num_spatial_layers << " to " + << limited_num_spatial_layers + << " due to low input resolution."; + num_spatial_layers = limited_num_spatial_layers; + } // First active layer must be configured. num_spatial_layers = std::max(num_spatial_layers, first_active_layer + 1); + // Ensure top layer is even enough. + int required_divisiblity = 1 << (num_spatial_layers - first_active_layer - 1); + input_width = input_width - input_width % required_divisiblity; + input_height = input_height - input_height % required_divisiblity; + for (size_t sl_idx = first_active_layer; sl_idx < num_spatial_layers; ++sl_idx) { SpatialLayer spatial_layer = {0}; @@ -108,6 +121,19 @@ std::vector ConfigureSvcNormalVideo(size_t input_width, spatial_layers.push_back(spatial_layer); } + // A workaround for sitiation when single HD layer is left with minBitrate + // about 500kbps. This would mean that there will always be at least 500kbps + // allocated to video regardless of how low is the actual BWE. + // Also, boost maxBitrate for the first layer to account for lost ability to + // predict from previous layers. + if (first_active_layer > 0) { + spatial_layers[0].minBitrate = kMinVp9SvcBitrateKbps; + // TODO(ilnik): tune this value or come up with a different formula to + // ensure that all singlecast configurations look good and not too much + // bitrate is added. + spatial_layers[0].maxBitrate *= 1.1; + } + return spatial_layers; } diff --git a/modules/video_coding/codecs/vp9/svc_config.h b/modules/video_coding/codecs/vp9/svc_config.h index 9bd8b0e313..f6b562e189 100644 --- a/modules/video_coding/codecs/vp9/svc_config.h +++ b/modules/video_coding/codecs/vp9/svc_config.h @@ -14,7 +14,7 @@ #include -#include "common_types.h" // NOLINT(build/include) +#include "api/video_codecs/spatial_layer.h" namespace webrtc { diff --git a/modules/video_coding/codecs/vp9/svc_config_unittest.cc b/modules/video_coding/codecs/vp9/svc_config_unittest.cc index abc67a22ff..1891628921 100644 --- a/modules/video_coding/codecs/vp9/svc_config_unittest.cc +++ b/modules/video_coding/codecs/vp9/svc_config_unittest.cc @@ -41,6 +41,32 @@ TEST(SvcConfig, AlwaysSendsAtLeastOneLayer) { EXPECT_EQ(spatial_layers.back().width, kMinVp9SpatialLayerWidth); } +TEST(SvcConfig, EnforcesMinimalRequiredParity) { + const size_t max_num_spatial_layers = 3; + const size_t kOddSize = 1023; + + std::vector spatial_layers = + GetSvcConfig(kOddSize, kOddSize, 30, + /*first_active_layer=*/1, max_num_spatial_layers, 1, false); + // Since there are 2 layers total (1, 2), divisiblity by 2 is required. + EXPECT_EQ(spatial_layers.back().width, kOddSize - 1); + EXPECT_EQ(spatial_layers.back().width, kOddSize - 1); + + spatial_layers = + GetSvcConfig(kOddSize, kOddSize, 30, + /*first_active_layer=*/0, max_num_spatial_layers, 1, false); + // Since there are 3 layers total (0, 1, 2), divisiblity by 4 is required. + EXPECT_EQ(spatial_layers.back().width, kOddSize - 3); + EXPECT_EQ(spatial_layers.back().width, kOddSize - 3); + + spatial_layers = + GetSvcConfig(kOddSize, kOddSize, 30, + /*first_active_layer=*/2, max_num_spatial_layers, 1, false); + // Since there is only 1 layer active (2), divisiblity by 1 is required. + EXPECT_EQ(spatial_layers.back().width, kOddSize); + EXPECT_EQ(spatial_layers.back().width, kOddSize); +} + TEST(SvcConfig, SkipsInactiveLayers) { const size_t num_spatial_layers = 4; const size_t first_active_layer = 2; diff --git a/modules/video_coding/codecs/vp9/svc_rate_allocator.h b/modules/video_coding/codecs/vp9/svc_rate_allocator.h index a4e0c28cc0..fa53a155ab 100644 --- a/modules/video_coding/codecs/vp9/svc_rate_allocator.h +++ b/modules/video_coding/codecs/vp9/svc_rate_allocator.h @@ -11,52 +11,7 @@ #ifndef MODULES_VIDEO_CODING_CODECS_VP9_SVC_RATE_ALLOCATOR_H_ #define MODULES_VIDEO_CODING_CODECS_VP9_SVC_RATE_ALLOCATOR_H_ -#include -#include - -#include "absl/container/inlined_vector.h" -#include "api/video/video_bitrate_allocation.h" -#include "api/video/video_bitrate_allocator.h" -#include "api/video/video_codec_constants.h" -#include "api/video_codecs/video_codec.h" -#include "rtc_base/experiments/stable_target_rate_experiment.h" - -namespace webrtc { - -class SvcRateAllocator : public VideoBitrateAllocator { - public: - explicit SvcRateAllocator(const VideoCodec& codec); - - VideoBitrateAllocation Allocate( - VideoBitrateAllocationParameters parameters) override; - - static DataRate GetMaxBitrate(const VideoCodec& codec); - static DataRate GetPaddingBitrate(const VideoCodec& codec); - static absl::InlinedVector GetLayerStartBitrates( - const VideoCodec& codec); - - private: - VideoBitrateAllocation GetAllocationNormalVideo( - DataRate total_bitrate, - size_t first_active_layer, - size_t num_spatial_layers) const; - - VideoBitrateAllocation GetAllocationScreenSharing( - DataRate total_bitrate, - size_t first_active_layer, - size_t num_spatial_layers) const; - - // Returns the number of layers that are active and have enough bitrate to - // actually be enabled. - size_t FindNumEnabledLayers(DataRate target_rate) const; - - const VideoCodec codec_; - const StableTargetRateExperiment experiment_settings_; - const absl::InlinedVector - cumulative_layer_start_bitrates_; - size_t last_active_layer_count_; -}; - -} // namespace webrtc +// TODO(danilchap): Update dependent includes and remove this forwarding header. +#include "modules/video_coding/svc/svc_rate_allocator.h" #endif // MODULES_VIDEO_CODING_CODECS_VP9_SVC_RATE_ALLOCATOR_H_ diff --git a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc index d40cf23257..e96538427b 100644 --- a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc +++ b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc @@ -8,37 +8,93 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "absl/memory/memory.h" #include "api/test/create_frame_generator.h" #include "api/test/frame_generator_interface.h" +#include "api/test/mock_video_encoder.h" #include "api/video/color_space.h" #include "api/video/i420_buffer.h" #include "api/video_codecs/video_encoder.h" +#include "api/video_codecs/vp9_profile.h" #include "common_video/libyuv/include/webrtc_libyuv.h" -#include "media/base/vp9_profile.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "modules/video_coding/codecs/interface/libvpx_interface.h" +#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h" +#include "modules/video_coding/codecs/test/encoded_video_frame_producer.h" #include "modules/video_coding/codecs/test/video_codec_unittest.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" +#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h" #include "modules/video_coding/codecs/vp9/svc_config.h" +#include "rtc_base/strings/string_builder.h" +#include "test/explicit_key_value_config.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" +#include "test/mappable_native_buffer.h" #include "test/video_codec_settings.h" namespace webrtc { +namespace { +using ::testing::_; +using ::testing::A; +using ::testing::AllOf; +using ::testing::An; +using ::testing::AnyNumber; +using ::testing::ByRef; +using ::testing::DoAll; +using ::testing::Each; +using ::testing::ElementsAre; using ::testing::ElementsAreArray; +using ::testing::Field; +using ::testing::IsEmpty; +using ::testing::Mock; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SafeMatcherCast; +using ::testing::SaveArgPointee; +using ::testing::SetArgPointee; +using ::testing::SizeIs; +using ::testing::TypedEq; +using ::testing::UnorderedElementsAreArray; +using ::testing::WithArg; using EncoderInfo = webrtc::VideoEncoder::EncoderInfo; using FramerateFractions = absl::InlinedVector; -namespace { -const size_t kWidth = 1280; -const size_t kHeight = 720; +constexpr size_t kWidth = 1280; +constexpr size_t kHeight = 720; const VideoEncoder::Capabilities kCapabilities(false); const VideoEncoder::Settings kSettings(kCapabilities, /*number_of_cores=*/1, /*max_payload_size=*/0); + +VideoCodec DefaultCodecSettings() { + VideoCodec codec_settings; + webrtc::test::CodecSettings(kVideoCodecVP9, &codec_settings); + codec_settings.width = kWidth; + codec_settings.height = kHeight; + codec_settings.VP9()->numberOfTemporalLayers = 1; + codec_settings.VP9()->numberOfSpatialLayers = 1; + return codec_settings; +} + +void ConfigureSvc(VideoCodec& codec_settings, + int num_spatial_layers, + int num_temporal_layers = 1) { + codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers; + codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers; + codec_settings.VP9()->frameDroppingOn = false; + + std::vector layers = GetSvcConfig( + codec_settings.width, codec_settings.height, codec_settings.maxFramerate, + /*first_active_layer=*/0, num_spatial_layers, num_temporal_layers, false); + for (size_t i = 0; i < layers.size(); ++i) { + codec_settings.spatialLayers[i] = layers[i]; + } +} + } // namespace class TestVp9Impl : public VideoCodecUnitTest { @@ -58,75 +114,25 @@ class TestVp9Impl : public VideoCodecUnitTest { codec_settings->VP9()->numberOfTemporalLayers = 1; codec_settings->VP9()->numberOfSpatialLayers = 1; } +}; - void ExpectFrameWith(uint8_t temporal_idx) { - EncodedImage encoded_frame; - CodecSpecificInfo codec_specific_info; - ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); - EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP9.temporal_idx); - } - - void ExpectFrameWith(size_t num_spatial_layers, - uint8_t temporal_idx, - bool temporal_up_switch, - uint8_t num_ref_pics, - const std::vector& p_diff) { - std::vector encoded_frame; - std::vector codec_specific; - ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_specific)); - for (size_t spatial_idx = 0; spatial_idx < num_spatial_layers; - ++spatial_idx) { - const CodecSpecificInfoVP9& vp9 = - codec_specific[spatial_idx].codecSpecific.VP9; - if (vp9.temporal_idx == kNoTemporalIdx) { - EXPECT_EQ(temporal_idx, 0); - } else { - EXPECT_EQ(vp9.temporal_idx, temporal_idx); - } - if (num_spatial_layers == 1) { - EXPECT_FALSE(encoded_frame[spatial_idx].SpatialIndex()); - } else { - EXPECT_EQ(encoded_frame[spatial_idx].SpatialIndex(), - static_cast(spatial_idx)); - } - EXPECT_EQ(vp9.temporal_up_switch, temporal_up_switch); - - // Ensure there are no duplicates in reference list. - std::vector vp9_p_diff(vp9.p_diff, - vp9.p_diff + vp9.num_ref_pics); - std::sort(vp9_p_diff.begin(), vp9_p_diff.end()); - EXPECT_EQ(std::unique(vp9_p_diff.begin(), vp9_p_diff.end()), - vp9_p_diff.end()); - - for (size_t ref_pic_num = 0; ref_pic_num < num_ref_pics; ++ref_pic_num) { - EXPECT_NE( - std::find(p_diff.begin(), p_diff.end(), vp9.p_diff[ref_pic_num]), - p_diff.end()); - } - } - } - - void ConfigureSvc(size_t num_spatial_layers, size_t num_temporal_layers = 1) { - codec_settings_.VP9()->numberOfSpatialLayers = - static_cast(num_spatial_layers); - codec_settings_.VP9()->numberOfTemporalLayers = num_temporal_layers; - codec_settings_.VP9()->frameDroppingOn = false; - - std::vector layers = - GetSvcConfig(codec_settings_.width, codec_settings_.height, - codec_settings_.maxFramerate, /*first_active_layer=*/0, - num_spatial_layers, num_temporal_layers, false); - for (size_t i = 0; i < layers.size(); ++i) { - codec_settings_.spatialLayers[i] = layers[i]; - } +class TestVp9ImplForPixelFormat + : public TestVp9Impl, + public ::testing::WithParamInterface< + test::FrameGeneratorInterface::OutputType> { + protected: + void SetUp() override { + input_frame_generator_ = test::CreateSquareFrameGenerator( + kWidth, kHeight, GetParam(), absl::optional()); + TestVp9Impl::SetUp(); } }; // Disabled on ios as flake, see https://crbug.com/webrtc/7057 #if defined(WEBRTC_IOS) -TEST_F(TestVp9Impl, DISABLED_EncodeDecode) { +TEST_P(TestVp9ImplForPixelFormat, DISABLED_EncodeDecode) { #else -TEST_F(TestVp9Impl, EncodeDecode) { +TEST_P(TestVp9ImplForPixelFormat, EncodeDecode) { #endif VideoFrame input_frame = NextInputFrame(); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr)); @@ -153,7 +159,32 @@ TEST_F(TestVp9Impl, EncodeDecode) { color_space.chroma_siting_vertical()); } -TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) { +TEST_P(TestVp9ImplForPixelFormat, EncodeNativeBuffer) { + VideoFrame input_frame = NextInputFrame(); + // Replace the input frame with a fake native buffer of the same size and + // underlying pixel format. Do not allow ToI420() for non-I420 buffers, + // ensuring zero-conversion. + input_frame = test::CreateMappableNativeFrame( + input_frame.ntp_time_ms(), input_frame.video_frame_buffer()->type(), + input_frame.width(), input_frame.height()); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(input_frame, nullptr)); + EncodedImage encoded_frame; + CodecSpecificInfo codec_specific_info; + ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); + + // After encoding, we would expect a single mapping to have happened. + rtc::scoped_refptr mappable_buffer = + test::GetMappableNativeBufferFromVideoFrame(input_frame); + std::vector> mapped_buffers = + mappable_buffer->GetMappedFramedBuffers(); + ASSERT_EQ(mapped_buffers.size(), 1u); + EXPECT_EQ(mapped_buffers[0]->type(), mappable_buffer->mappable_type()); + EXPECT_EQ(mapped_buffers[0]->width(), input_frame.width()); + EXPECT_EQ(mapped_buffers[0]->height(), input_frame.height()); + EXPECT_FALSE(mappable_buffer->DidConvertToI420()); +} + +TEST_P(TestVp9ImplForPixelFormat, DecodedColorSpaceFromBitstream) { EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); EncodedImage encoded_frame; CodecSpecificInfo codec_specific_info; @@ -171,7 +202,7 @@ TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) { EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata()); } -TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) { +TEST_P(TestVp9ImplForPixelFormat, DecodedQpEqualsEncodedQp) { EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); EncodedImage encoded_frame; CodecSpecificInfo codec_specific_info; @@ -187,57 +218,166 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) { EXPECT_EQ(encoded_frame.qp_, *decoded_qp); } -TEST_F(TestVp9Impl, ParserQpEqualsEncodedQp) { +TEST_F(TestVp9Impl, SwitchInputPixelFormatsWithoutReconfigure) { EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); EncodedImage encoded_frame; CodecSpecificInfo codec_specific_info; ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); + // Change the input frame type from I420 to NV12, encoding should still work. + input_frame_generator_ = test::CreateSquareFrameGenerator( + kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kNV12, + absl::optional()); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); + ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); + + // Flipping back to I420, encoding should still work. + input_frame_generator_ = test::CreateSquareFrameGenerator( + kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420, + absl::optional()); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); + ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); +} + +TEST(Vp9ImplTest, ParserQpEqualsEncodedQp) { + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + encoder->InitEncode(&codec_settings, kSettings); + + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(1) + .SetResolution({kWidth, kHeight}) + .Encode(); + ASSERT_THAT(frames, SizeIs(1)); + const auto& encoded_frame = frames.front().encoded_image; int qp = 0; ASSERT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp)); EXPECT_EQ(encoded_frame.qp_, qp); } -TEST_F(TestVp9Impl, EncoderWith2TemporalLayers) { - // Override default settings. - codec_settings_.VP9()->numberOfTemporalLayers = 2; - // Tl0PidIdx is only used in non-flexible mode. - codec_settings_.VP9()->flexibleMode = false; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder_->InitEncode(&codec_settings_, kSettings)); - - // Temporal layer 0. - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); - EncodedImage encoded_frame; - CodecSpecificInfo codec_specific_info; - ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info)); - EXPECT_EQ(0, codec_specific_info.codecSpecific.VP9.temporal_idx); - - // Temporal layer 1. - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); - ExpectFrameWith(1); +TEST(Vp9ImplTest, EncodeAttachesTemplateStructureWithSvcController) { + test::ScopedFieldTrials override_field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(2) + .SetResolution({kWidth, kHeight}) + .Encode(); + + ASSERT_THAT(frames, SizeIs(2)); + EXPECT_TRUE(frames[0].codec_specific_info.template_structure); + EXPECT_TRUE(frames[0].codec_specific_info.generic_frame_info); + + EXPECT_FALSE(frames[1].codec_specific_info.template_structure); + EXPECT_TRUE(frames[1].codec_specific_info.generic_frame_info); +} - // Temporal layer 0. - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); - ExpectFrameWith(0); +TEST(Vp9ImplTest, EncoderWith2TemporalLayers) { + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.VP9()->numberOfTemporalLayers = 2; + // Tl0PidIdx is only used in non-flexible mode. + codec_settings.VP9()->flexibleMode = false; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(4) + .SetResolution({kWidth, kHeight}) + .Encode(); + + ASSERT_THAT(frames, SizeIs(4)); + EXPECT_EQ(frames[0].codec_specific_info.codecSpecific.VP9.temporal_idx, 0); + EXPECT_EQ(frames[1].codec_specific_info.codecSpecific.VP9.temporal_idx, 1); + EXPECT_EQ(frames[2].codec_specific_info.codecSpecific.VP9.temporal_idx, 0); + EXPECT_EQ(frames[3].codec_specific_info.codecSpecific.VP9.temporal_idx, 1); +} - // Temporal layer 1. - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); - ExpectFrameWith(1); +TEST(Vp9ImplTest, EncodeTemporalLayersWithSvcController) { + test::ScopedFieldTrials override_field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.VP9()->numberOfTemporalLayers = 2; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(4) + .SetResolution({kWidth, kHeight}) + .Encode(); + + ASSERT_THAT(frames, SizeIs(4)); + EXPECT_EQ(frames[0].codec_specific_info.codecSpecific.VP9.temporal_idx, 0); + EXPECT_EQ(frames[1].codec_specific_info.codecSpecific.VP9.temporal_idx, 1); + EXPECT_EQ(frames[2].codec_specific_info.codecSpecific.VP9.temporal_idx, 0); + EXPECT_EQ(frames[3].codec_specific_info.codecSpecific.VP9.temporal_idx, 1); + // Verify codec agnostic part + ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info); + ASSERT_TRUE(frames[1].codec_specific_info.generic_frame_info); + ASSERT_TRUE(frames[2].codec_specific_info.generic_frame_info); + ASSERT_TRUE(frames[3].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->temporal_id, 0); + EXPECT_EQ(frames[1].codec_specific_info.generic_frame_info->temporal_id, 1); + EXPECT_EQ(frames[2].codec_specific_info.generic_frame_info->temporal_id, 0); + EXPECT_EQ(frames[3].codec_specific_info.generic_frame_info->temporal_id, 1); } -TEST_F(TestVp9Impl, EncoderWith2SpatialLayers) { - codec_settings_.VP9()->numberOfSpatialLayers = 2; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder_->InitEncode(&codec_settings_, kSettings)); +TEST(Vp9ImplTest, EncoderWith2SpatialLayers) { + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.VP9()->numberOfSpatialLayers = 2; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(1) + .SetResolution({kWidth, kHeight}) + .Encode(); + + ASSERT_THAT(frames, SizeIs(2)); + EXPECT_EQ(frames[0].encoded_image.SpatialIndex(), 0); + EXPECT_EQ(frames[1].encoded_image.SpatialIndex(), 1); +} - SetWaitForEncodedFramesThreshold(2); - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(NextInputFrame(), nullptr)); - std::vector encoded_frame; - std::vector codec_info; - ASSERT_TRUE(WaitForEncodedFrames(&encoded_frame, &codec_info)); - EXPECT_EQ(encoded_frame[0].SpatialIndex(), 0); - EXPECT_EQ(encoded_frame[1].SpatialIndex(), 1); +TEST(Vp9ImplTest, EncodeSpatialLayersWithSvcController) { + test::ScopedFieldTrials override_field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.VP9()->numberOfSpatialLayers = 2; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(2) + .SetResolution({kWidth, kHeight}) + .Encode(); + + ASSERT_THAT(frames, SizeIs(4)); + EXPECT_EQ(frames[0].encoded_image.SpatialIndex(), 0); + EXPECT_EQ(frames[1].encoded_image.SpatialIndex(), 1); + EXPECT_EQ(frames[2].encoded_image.SpatialIndex(), 0); + EXPECT_EQ(frames[3].encoded_image.SpatialIndex(), 1); + // Verify codec agnostic part + ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info); + ASSERT_TRUE(frames[1].codec_specific_info.generic_frame_info); + ASSERT_TRUE(frames[2].codec_specific_info.generic_frame_info); + ASSERT_TRUE(frames[3].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 0); + EXPECT_EQ(frames[1].codec_specific_info.generic_frame_info->spatial_id, 1); + EXPECT_EQ(frames[2].codec_specific_info.generic_frame_info->spatial_id, 0); + EXPECT_EQ(frames[3].codec_specific_info.generic_frame_info->spatial_id, 1); } TEST_F(TestVp9Impl, EncoderExplicitLayering) { @@ -300,7 +440,7 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) { const size_t num_spatial_layers = 3; const size_t num_frames_to_encode = 5; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.VP9()->frameDroppingOn = true; EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, @@ -346,6 +486,124 @@ TEST_F(TestVp9Impl, EnableDisableSpatialLayers) { } } +TEST(Vp9ImplTest, EnableDisableSpatialLayersWithSvcController) { + test::ScopedFieldTrials override_field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + const int num_spatial_layers = 3; + // Configure encoder to produce 3 spatial layers. Encode frames of layer 0 + // then enable layer 1 and encode more frames and so on. + // Then disable layers one by one in the same way. + // Note: bit rate allocation is high to avoid frame dropping due to rate + // control, the encoder should always produce a frame. A dropped + // frame indicates a problem and the test will fail. + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + ConfigureSvc(codec_settings, num_spatial_layers); + codec_settings.VP9()->frameDroppingOn = true; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + EncodedVideoFrameProducer producer(*encoder); + producer.SetResolution({kWidth, kHeight}); + + // Encode a key frame to validate all other frames are delta frames. + std::vector frames = + producer.SetNumInputFrames(1).Encode(); + ASSERT_THAT(frames, Not(IsEmpty())); + EXPECT_TRUE(frames[0].codec_specific_info.template_structure); + + const size_t num_frames_to_encode = 5; + + VideoBitrateAllocation bitrate_allocation; + for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) { + // Allocate high bit rate to avoid frame dropping due to rate control. + bitrate_allocation.SetBitrate( + sl_idx, 0, + codec_settings.spatialLayers[sl_idx].targetBitrate * 1000 * 2); + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + + frames = producer.SetNumInputFrames(num_frames_to_encode).Encode(); + // With (sl_idx+1) spatial layers expect (sl_idx+1) frames per input frame. + ASSERT_THAT(frames, SizeIs(num_frames_to_encode * (sl_idx + 1))); + for (size_t i = 0; i < frames.size(); ++i) { + EXPECT_TRUE(frames[i].codec_specific_info.generic_frame_info); + EXPECT_FALSE(frames[i].codec_specific_info.template_structure); + } + } + + for (int sl_idx = num_spatial_layers - 1; sl_idx > 0; --sl_idx) { + bitrate_allocation.SetBitrate(sl_idx, 0, 0); + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + + frames = producer.SetNumInputFrames(num_frames_to_encode).Encode(); + // With |sl_idx| spatial layer disabled, there are |sl_idx| spatial layers + // left. + ASSERT_THAT(frames, SizeIs(num_frames_to_encode * sl_idx)); + for (size_t i = 0; i < frames.size(); ++i) { + EXPECT_TRUE(frames[i].codec_specific_info.generic_frame_info); + EXPECT_FALSE(frames[i].codec_specific_info.template_structure); + } + } +} + +MATCHER_P2(GenericLayerIs, spatial_id, temporal_id, "") { + if (arg.codec_specific_info.generic_frame_info == absl::nullopt) { + *result_listener << " miss generic_frame_info"; + return false; + } + const auto& layer = *arg.codec_specific_info.generic_frame_info; + if (layer.spatial_id != spatial_id || layer.temporal_id != temporal_id) { + *result_listener << " frame from layer (" << layer.spatial_id << ", " + << layer.temporal_id << ")"; + return false; + } + return true; +} + +TEST(Vp9ImplTest, SpatialUpswitchNotAtGOFBoundary) { + test::ScopedFieldTrials override_field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + ConfigureSvc(codec_settings, /*num_spatial_layers=*/3, + /*num_temporal_layers=*/3); + codec_settings.VP9()->frameDroppingOn = true; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + EncodedVideoFrameProducer producer(*encoder); + producer.SetResolution({kWidth, kHeight}); + + // Disable all but spatial_layer = 0; + VideoBitrateAllocation bitrate_allocation; + int layer_bitrate_bps = codec_settings.spatialLayers[0].targetBitrate * 1000; + bitrate_allocation.SetBitrate(0, 0, layer_bitrate_bps); + bitrate_allocation.SetBitrate(0, 1, layer_bitrate_bps); + bitrate_allocation.SetBitrate(0, 2, layer_bitrate_bps); + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + EXPECT_THAT(producer.SetNumInputFrames(3).Encode(), + ElementsAre(GenericLayerIs(0, 0), GenericLayerIs(0, 2), + GenericLayerIs(0, 1))); + + // Upswitch to spatial_layer = 1 + layer_bitrate_bps = codec_settings.spatialLayers[1].targetBitrate * 1000; + bitrate_allocation.SetBitrate(1, 0, layer_bitrate_bps); + bitrate_allocation.SetBitrate(1, 1, layer_bitrate_bps); + bitrate_allocation.SetBitrate(1, 2, layer_bitrate_bps); + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + // Expect upswitch doesn't happen immediately since there is no S1 frame that + // S1T2 frame can reference. + EXPECT_THAT(producer.SetNumInputFrames(1).Encode(), + ElementsAre(GenericLayerIs(0, 2))); + // Expect spatial upswitch happens now, at T0 frame. + EXPECT_THAT(producer.SetNumInputFrames(1).Encode(), + ElementsAre(GenericLayerIs(0, 0), GenericLayerIs(1, 0))); +} + TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrame) { // Configure encoder to produce N spatial layers. Encode frames for all // layers. Then disable all but the last layer. Then reenable all back again. @@ -356,7 +614,7 @@ TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrame) { // Must not be multiple of temporal period to exercise all code paths. const size_t num_frames_to_encode = 5; - ConfigureSvc(num_spatial_layers, num_temporal_layers); + ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.VP9()->flexibleMode = false; codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic; @@ -502,13 +760,134 @@ TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrame) { } } +TEST(Vp9ImplTest, DisableEnableBaseLayerWithSvcControllerTriggersKeyFrame) { + // Configure encoder to produce N spatial layers. Encode frames for all + // layers. Then disable all but the last layer. Then reenable all back again. + test::ScopedFieldTrials override_field_trials( + "WebRTC-Vp9DependencyDescriptor/Enabled/"); + const size_t num_spatial_layers = 3; + const size_t num_temporal_layers = 3; + // Must not be multiple of temporal period to exercise all code paths. + const size_t num_frames_to_encode = 5; + + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + ConfigureSvc(codec_settings, num_spatial_layers, num_temporal_layers); + codec_settings.VP9()->frameDroppingOn = false; + codec_settings.VP9()->flexibleMode = false; + codec_settings.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic; + codec_settings.mode = VideoCodecMode::kRealtimeVideo; + + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); + + VideoBitrateAllocation bitrate_allocation; + for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) { + for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) { + // Allocate high bit rate to avoid frame dropping due to rate control. + bitrate_allocation.SetBitrate( + sl_idx, tl_idx, + codec_settings.spatialLayers[sl_idx].targetBitrate * 1000 * 2); + } + } + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + + EncodedVideoFrameProducer producer(*encoder); + producer.SetResolution({kWidth, kHeight}); + + std::vector frames = + producer.SetNumInputFrames(num_frames_to_encode).Encode(); + ASSERT_THAT(frames, SizeIs(num_frames_to_encode * num_spatial_layers)); + + // Disable all but top spatial layer. + for (size_t sl_idx = 0; sl_idx < num_spatial_layers - 1; ++sl_idx) { + for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) { + bitrate_allocation.SetBitrate(sl_idx, tl_idx, 0); + } + } + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + + frames = producer.SetNumInputFrames(num_frames_to_encode).Encode(); + EXPECT_THAT(frames, SizeIs(num_frames_to_encode)); + for (const auto& frame : frames) { + // Expect no key-frames generated. + EXPECT_FALSE(frame.codec_specific_info.template_structure); + ASSERT_TRUE(frame.codec_specific_info.generic_frame_info); + EXPECT_EQ(frame.codec_specific_info.generic_frame_info->spatial_id, 2); + } + + frames = producer.ForceKeyFrame().SetNumInputFrames(1).Encode(); + ASSERT_THAT(frames, SizeIs(1)); + // Key-frame should be produced. + EXPECT_EQ(frames[0].encoded_image._frameType, VideoFrameType::kVideoFrameKey); + ASSERT_TRUE(frames[0].codec_specific_info.template_structure); + ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 2); + + frames = producer.SetNumInputFrames(num_frames_to_encode).Encode(); + ASSERT_THAT(frames, SizeIs(num_frames_to_encode)); + for (const auto& frame : frames) { + EXPECT_EQ(frame.encoded_image._frameType, VideoFrameType::kVideoFrameDelta); + EXPECT_FALSE(frame.codec_specific_info.template_structure); + ASSERT_TRUE(frame.codec_specific_info.generic_frame_info); + EXPECT_EQ(frame.codec_specific_info.generic_frame_info->spatial_id, 2); + } + + // Enable the second layer back. + // Allocate high bit rate to avoid frame dropping due to rate control. + for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) { + bitrate_allocation.SetBitrate( + 1, tl_idx, codec_settings.spatialLayers[0].targetBitrate * 1000 * 2); + } + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + + frames = producer.SetNumInputFrames(num_frames_to_encode).Encode(); + ASSERT_THAT(frames, SizeIs(num_frames_to_encode * 2)); + EXPECT_EQ(frames[0].encoded_image._frameType, VideoFrameType::kVideoFrameKey); + EXPECT_TRUE(frames[0].codec_specific_info.template_structure); + ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 1); + for (size_t i = 1; i < frames.size(); ++i) { + EXPECT_EQ(frames[i].encoded_image._frameType, + VideoFrameType::kVideoFrameDelta); + EXPECT_FALSE(frames[i].codec_specific_info.template_structure); + ASSERT_TRUE(frames[i].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[i].codec_specific_info.generic_frame_info->spatial_id, + 1 + static_cast(i % 2)); + } + + // Enable the first layer back. + // Allocate high bit rate to avoid frame dropping due to rate control. + for (size_t tl_idx = 0; tl_idx < num_temporal_layers; ++tl_idx) { + bitrate_allocation.SetBitrate( + 0, tl_idx, codec_settings.spatialLayers[1].targetBitrate * 1000 * 2); + } + encoder->SetRates(VideoEncoder::RateControlParameters( + bitrate_allocation, codec_settings.maxFramerate)); + + frames = producer.SetNumInputFrames(num_frames_to_encode).Encode(); + ASSERT_THAT(frames, SizeIs(num_frames_to_encode * 3)); + EXPECT_TRUE(frames[0].codec_specific_info.template_structure); + ASSERT_TRUE(frames[0].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[0].codec_specific_info.generic_frame_info->spatial_id, 0); + for (size_t i = 1; i < frames.size(); ++i) { + EXPECT_FALSE(frames[i].codec_specific_info.template_structure); + ASSERT_TRUE(frames[i].codec_specific_info.generic_frame_info); + EXPECT_EQ(frames[i].codec_specific_info.generic_frame_info->spatial_id, + static_cast(i % 3)); + } +} + TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrameForScreenshare) { // Configure encoder to produce N spatial layers. Encode frames for all // layers. Then disable all but the last layer. Then reenable all back again. const size_t num_spatial_layers = 3; const size_t num_frames_to_encode = 5; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.mode = VideoCodecMode::kScreensharing; codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn; @@ -626,7 +1005,7 @@ TEST_F(TestVp9Impl, DisableEnableBaseLayerTriggersKeyFrameForScreenshare) { TEST_F(TestVp9Impl, EndOfPicture) { const size_t num_spatial_layers = 2; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_settings_, kSettings)); @@ -646,8 +1025,8 @@ TEST_F(TestVp9Impl, EndOfPicture) { std::vector frames; std::vector codec_specific; ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific)); - EXPECT_FALSE(codec_specific[0].codecSpecific.VP9.end_of_picture); - EXPECT_TRUE(codec_specific[1].codecSpecific.VP9.end_of_picture); + EXPECT_FALSE(codec_specific[0].end_of_picture); + EXPECT_TRUE(codec_specific[1].end_of_picture); // Encode only base layer. Check that end-of-superframe flag is // set on base layer frame. @@ -662,12 +1041,12 @@ TEST_F(TestVp9Impl, EndOfPicture) { ASSERT_TRUE(WaitForEncodedFrames(&frames, &codec_specific)); EXPECT_FALSE(frames[0].SpatialIndex()); - EXPECT_TRUE(codec_specific[0].codecSpecific.VP9.end_of_picture); + EXPECT_TRUE(codec_specific[0].end_of_picture); } TEST_F(TestVp9Impl, InterLayerPred) { const size_t num_spatial_layers = 2; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.VP9()->frameDroppingOn = false; VideoBitrateAllocation bitrate_allocation; @@ -742,7 +1121,7 @@ TEST_F(TestVp9Impl, const size_t num_spatial_layers = 3; const size_t num_frames_to_encode = 2; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.VP9()->frameDroppingOn = false; const std::vector inter_layer_pred_modes = { @@ -799,7 +1178,7 @@ TEST_F(TestVp9Impl, const size_t num_spatial_layers = 3; const size_t num_frames_to_encode = 2; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.VP9()->flexibleMode = false; @@ -854,7 +1233,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerInTheSameGof) { const size_t num_spatial_layers = 2; const size_t num_temporal_layers = 2; - ConfigureSvc(num_spatial_layers, num_temporal_layers); + ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.VP9()->flexibleMode = false; @@ -926,7 +1305,7 @@ TEST_F(TestVp9Impl, EnablingDisablingUpperLayerAccrossGof) { const size_t num_spatial_layers = 2; const size_t num_temporal_layers = 2; - ConfigureSvc(num_spatial_layers, num_temporal_layers); + ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.VP9()->flexibleMode = false; @@ -1006,7 +1385,7 @@ TEST_F(TestVp9Impl, EnablingNewLayerInScreenshareForcesAllLayersWithSS) { const size_t num_frames_to_encode_before_drop = 1; codec_settings_.maxFramerate = 30; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.spatialLayers[0].maxFramerate = 5.0; // use 30 for the SL 1 instead of 10, so even if SL 0 frame is dropped due to // framerate capping we would still get back at least a middle layer. It @@ -1065,7 +1444,7 @@ TEST_F(TestVp9Impl, ScreenshareFrameDropping) { const int num_frames_to_detect_drops = 2; codec_settings_.maxFramerate = 30; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); // use 30 for the SL0 and SL1 because it simplifies the test. codec_settings_.spatialLayers[0].maxFramerate = 30.0; codec_settings_.spatialLayers[1].maxFramerate = 30.0; @@ -1155,7 +1534,7 @@ TEST_F(TestVp9Impl, RemovingLayerIsNotDelayedInScreenshareAndAddsSsInfo) { const size_t num_dropped_frames = 5; codec_settings_.maxFramerate = 30; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); codec_settings_.spatialLayers[0].maxFramerate = 5.0; // use 30 for the SL 1 instead of 5, so even if SL 0 frame is dropped due to // framerate capping we would still get back at least a middle layer. It @@ -1242,7 +1621,7 @@ TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) { const size_t num_temporal_layers = 2; // Chosen by hand, the 2nd frame is dropped with configured per-layer max // framerate. - ConfigureSvc(num_spatial_layers, num_temporal_layers); + ConfigureSvc(codec_settings_, num_spatial_layers, num_temporal_layers); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.mode = VideoCodecMode::kRealtimeVideo; codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOnKeyPic; @@ -1301,7 +1680,7 @@ TEST_F(TestVp9Impl, DisableNewLayerInVideoDelaysSsInfoTillTL0) { TEST_F(TestVp9Impl, LowLayerMarkedAsRefIfHighLayerNotEncodedAndInterLayerPredIsEnabled) { - ConfigureSvc(3); + ConfigureSvc(codec_settings_, 3); codec_settings_.VP9()->frameDroppingOn = false; codec_settings_.VP9()->interLayerPred = InterLayerPredMode::kOn; @@ -1333,6 +1712,33 @@ TEST_F(TestVp9Impl, ScalabilityStructureIsAvailableInFlexibleMode) { EXPECT_TRUE(codec_specific_info.codecSpecific.VP9.ss_data_available); } +TEST_F(TestVp9Impl, Profile0PreferredPixelFormats) { + EXPECT_THAT(encoder_->GetEncoderInfo().preferred_pixel_formats, + testing::UnorderedElementsAre(VideoFrameBuffer::Type::kNV12, + VideoFrameBuffer::Type::kI420)); +} + +TEST_F(TestVp9Impl, EncoderInfoWithoutResolutionBitrateLimits) { + EXPECT_TRUE(encoder_->GetEncoderInfo().resolution_bitrate_limits.empty()); +} + +TEST_F(TestVp9Impl, EncoderInfoWithBitrateLimitsFromFieldTrial) { + test::ScopedFieldTrials field_trials( + "WebRTC-VP9-GetEncoderInfoOverride/" + "frame_size_pixels:123|456|789," + "min_start_bitrate_bps:11000|22000|33000," + "min_bitrate_bps:44000|55000|66000," + "max_bitrate_bps:77000|88000|99000/"); + SetUp(); + + EXPECT_THAT( + encoder_->GetEncoderInfo().resolution_bitrate_limits, + ::testing::ElementsAre( + VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000}, + VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000}, + VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000})); +} + TEST_F(TestVp9Impl, EncoderInfoFpsAllocation) { const uint8_t kNumSpatialLayers = 3; const uint8_t kNumTemporalLayers = 3; @@ -1365,7 +1771,7 @@ TEST_F(TestVp9Impl, EncoderInfoFpsAllocation) { expected_fps_allocation[1] = expected_fps_allocation[0]; expected_fps_allocation[2] = expected_fps_allocation[0]; EXPECT_THAT(encoder_->GetEncoderInfo().fps_allocation, - ::testing::ElementsAreArray(expected_fps_allocation)); + ElementsAreArray(expected_fps_allocation)); } TEST_F(TestVp9Impl, EncoderInfoFpsAllocationFlexibleMode) { @@ -1421,67 +1827,34 @@ TEST_F(TestVp9Impl, EncoderInfoFpsAllocationFlexibleMode) { ::testing::ElementsAreArray(expected_fps_allocation)); } -class TestVp9ImplWithLayering - : public TestVp9Impl, - public ::testing::WithParamInterface<::testing::tuple> { +class Vp9ImplWithLayeringTest + : public ::testing::TestWithParam> { protected: - TestVp9ImplWithLayering() - : num_spatial_layers_(::testing::get<0>(GetParam())), - num_temporal_layers_(::testing::get<1>(GetParam())) {} + Vp9ImplWithLayeringTest() + : num_spatial_layers_(std::get<0>(GetParam())), + num_temporal_layers_(std::get<1>(GetParam())), + override_field_trials_(std::get<2>(GetParam()) + ? "WebRTC-Vp9ExternalRefCtrl/Enabled/" + : "") {} const uint8_t num_spatial_layers_; const uint8_t num_temporal_layers_; + const test::ScopedFieldTrials override_field_trials_; }; -TEST_P(TestVp9ImplWithLayering, FlexibleMode) { +TEST_P(Vp9ImplWithLayeringTest, FlexibleMode) { // In flexible mode encoder wrapper obtains actual list of references from // encoder and writes it into RTP payload descriptor. Check that reference // list in payload descriptor matches the predefined one, which is used // in non-flexible mode. - codec_settings_.VP9()->flexibleMode = true; - codec_settings_.VP9()->frameDroppingOn = false; - codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers_; - codec_settings_.VP9()->numberOfTemporalLayers = num_temporal_layers_; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder_->InitEncode(&codec_settings_, kSettings)); - - GofInfoVP9 gof; - if (num_temporal_layers_ == 1) { - gof.SetGofInfoVP9(kTemporalStructureMode1); - } else if (num_temporal_layers_ == 2) { - gof.SetGofInfoVP9(kTemporalStructureMode2); - } else if (num_temporal_layers_ == 3) { - gof.SetGofInfoVP9(kTemporalStructureMode3); - } - - // Encode at least (num_frames_in_gof + 1) frames to verify references - // of non-key frame with gof_idx = 0. - for (size_t frame_num = 0; frame_num < gof.num_frames_in_gof + 1; - ++frame_num) { - SetWaitForEncodedFramesThreshold(num_spatial_layers_); - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder_->Encode(NextInputFrame(), nullptr)); - - const bool is_key_frame = frame_num == 0; - const size_t gof_idx = frame_num % gof.num_frames_in_gof; - const std::vector p_diff(std::begin(gof.pid_diff[gof_idx]), - std::end(gof.pid_diff[gof_idx])); - - ExpectFrameWith(num_spatial_layers_, gof.temporal_idx[gof_idx], - gof.temporal_up_switch[gof_idx], - is_key_frame ? 0 : gof.num_ref_pics[gof_idx], p_diff); - } -} - -TEST_P(TestVp9ImplWithLayering, ExternalRefControl) { - test::ScopedFieldTrials override_field_trials( - "WebRTC-Vp9ExternalRefCtrl/Enabled/"); - codec_settings_.VP9()->flexibleMode = true; - codec_settings_.VP9()->frameDroppingOn = false; - codec_settings_.VP9()->numberOfSpatialLayers = num_spatial_layers_; - codec_settings_.VP9()->numberOfTemporalLayers = num_temporal_layers_; - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder_->InitEncode(&codec_settings_, kSettings)); + std::unique_ptr encoder = VP9Encoder::Create(); + VideoCodec codec_settings = DefaultCodecSettings(); + codec_settings.VP9()->flexibleMode = true; + codec_settings.VP9()->frameDroppingOn = false; + codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers_; + codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers_; + EXPECT_EQ(encoder->InitEncode(&codec_settings, kSettings), + WEBRTC_VIDEO_CODEC_OK); GofInfoVP9 gof; if (num_temporal_layers_ == 1) { @@ -1494,27 +1867,48 @@ TEST_P(TestVp9ImplWithLayering, ExternalRefControl) { // Encode at least (num_frames_in_gof + 1) frames to verify references // of non-key frame with gof_idx = 0. - for (size_t frame_num = 0; frame_num < gof.num_frames_in_gof + 1; - ++frame_num) { - SetWaitForEncodedFramesThreshold(num_spatial_layers_); - EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, - encoder_->Encode(NextInputFrame(), nullptr)); - - const bool is_key_frame = frame_num == 0; - const size_t gof_idx = frame_num % gof.num_frames_in_gof; - const std::vector p_diff(std::begin(gof.pid_diff[gof_idx]), - std::end(gof.pid_diff[gof_idx])); - - ExpectFrameWith(num_spatial_layers_, gof.temporal_idx[gof_idx], - gof.temporal_up_switch[gof_idx], - is_key_frame ? 0 : gof.num_ref_pics[gof_idx], p_diff); + int num_input_frames = gof.num_frames_in_gof + 1; + std::vector frames = + EncodedVideoFrameProducer(*encoder) + .SetNumInputFrames(num_input_frames) + .SetResolution({kWidth, kHeight}) + .Encode(); + ASSERT_THAT(frames, SizeIs(num_input_frames * num_spatial_layers_)); + + for (size_t i = 0; i < frames.size(); ++i) { + const EncodedVideoFrameProducer::EncodedFrame& frame = frames[i]; + const size_t picture_idx = i / num_spatial_layers_; + const size_t gof_idx = picture_idx % gof.num_frames_in_gof; + + const CodecSpecificInfoVP9& vp9 = + frame.codec_specific_info.codecSpecific.VP9; + EXPECT_EQ(frame.encoded_image.SpatialIndex(), + num_spatial_layers_ == 1 + ? absl::nullopt + : absl::optional(i % num_spatial_layers_)) + << "Frame " << i; + EXPECT_EQ(vp9.temporal_idx, num_temporal_layers_ == 1 + ? kNoTemporalIdx + : gof.temporal_idx[gof_idx]) + << "Frame " << i; + EXPECT_EQ(vp9.temporal_up_switch, gof.temporal_up_switch[gof_idx]) + << "Frame " << i; + if (picture_idx == 0) { + EXPECT_EQ(vp9.num_ref_pics, 0) << "Frame " << i; + } else { + EXPECT_THAT(rtc::MakeArrayView(vp9.p_diff, vp9.num_ref_pics), + UnorderedElementsAreArray(gof.pid_diff[gof_idx], + gof.num_ref_pics[gof_idx])) + << "Frame " << i; + } } } INSTANTIATE_TEST_SUITE_P(All, - TestVp9ImplWithLayering, + Vp9ImplWithLayeringTest, ::testing::Combine(::testing::Values(1, 2, 3), - ::testing::Values(1, 2, 3))); + ::testing::Values(1, 2, 3), + ::testing::Bool())); class TestVp9ImplFrameDropping : public TestVp9Impl { protected: @@ -1712,7 +2106,7 @@ TEST_F(TestVp9Impl, ReenablingUpperLayerAfterKFWithInterlayerPredIsEnabled) { // Force low frame-rate, so all layers are present for all frames. codec_settings_.maxFramerate = 5; - ConfigureSvc(num_spatial_layers); + ConfigureSvc(codec_settings_, num_spatial_layers); EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->InitEncode(&codec_settings_, kSettings)); @@ -1774,4 +2168,304 @@ TEST_F(TestVp9Impl, ReenablingUpperLayerAfterKFWithInterlayerPredIsEnabled) { EXPECT_EQ(encoded_frames[0]._frameType, VideoFrameType::kVideoFrameDelta); } +TEST_F(TestVp9Impl, HandlesEmptyInitDecode) { + std::unique_ptr decoder = CreateDecoder(); + // Check that nullptr settings are ok for decoder. + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, + decoder->InitDecode(/*codec_settings=*/nullptr, 1)); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder->Release()); +} + +INSTANTIATE_TEST_SUITE_P( + TestVp9ImplForPixelFormat, + TestVp9ImplForPixelFormat, + ::testing::Values(test::FrameGeneratorInterface::OutputType::kI420, + test::FrameGeneratorInterface::OutputType::kNV12), + [](const auto& info) { + return test::FrameGeneratorInterface::OutputTypeToString(info.param); + }); + +// Helper function to populate an vpx_image_t instance with dimensions and +// potential image data. +std::function +GetWrapImageFunction(vpx_image_t* img) { + return [img](vpx_image_t* /*img*/, vpx_img_fmt_t fmt, unsigned int d_w, + unsigned int d_h, unsigned int /*stride_align*/, + unsigned char* img_data) { + img->fmt = fmt; + img->d_w = d_w; + img->d_h = d_h; + img->img_data = img_data; + return img; + }; +} + +TEST(Vp9SpeedSettingsTrialsTest, SvcExtraCfgNotPopulatedByDefault) { + test::ExplicitKeyValueConfig trials(""); + + // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise + // passed on to LibvpxVp9Encoder. + auto* const vpx = new NiceMock(); + LibvpxVp9Encoder encoder(cricket::VideoCodec(), + absl::WrapUnique(vpx), trials); + + VideoCodec settings = DefaultCodecSettings(); + // Configure 3 spatial and three temporal ayers. + ConfigureSvc(settings, 3, 3); + vpx_image_t img; + + ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img)); + ON_CALL(*vpx, codec_enc_config_default) + .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) { + memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t)); + }), + Return(VPX_CODEC_OK))); + EXPECT_CALL(*vpx, + codec_control( + _, VP9E_SET_SVC_PARAMETERS, + SafeMatcherCast(AllOf( + Field(&vpx_svc_extra_cfg_t::speed_per_layer, Each(0)), + Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl, Each(0)))))); + + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); +} + +TEST(Vp9SpeedSettingsTrialsTest, NoSvcUsesGlobalSpeedFromTl0InLayerConfig) { + // TL0 speed 8 at >= 480x270, 5 if below that. + test::ExplicitKeyValueConfig trials( + "WebRTC-VP9-PerformanceFlags/" + "use_per_layer_speed," + "min_pixel_count:0|129600," + "base_layer_speed:4|8," + "high_layer_speed:5|9," + "deblock_mode:1|0/"); + + // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise + // passed on to LibvpxVp9Encoder. + auto* const vpx = new NiceMock(); + LibvpxVp9Encoder encoder(cricket::VideoCodec(), + absl::WrapUnique(vpx), trials); + + VideoCodec settings = DefaultCodecSettings(); + settings.width = 480; + settings.height = 270; + vpx_image_t img; + + ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img)); + ON_CALL(*vpx, codec_enc_config_default) + .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) { + memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t)); + }), + Return(VPX_CODEC_OK))); + EXPECT_CALL(*vpx, codec_control(_, _, An())).Times(AnyNumber()); + + EXPECT_CALL(*vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS, + A())) + .Times(0); + + EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq(8))); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); + + encoder.Release(); + settings.width = 352; + settings.height = 216; + + EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq(4))); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); +} + +TEST(Vp9SpeedSettingsTrialsTest, + NoPerLayerFlagUsesGlobalSpeedFromTopLayerInConfig) { + // TL0 speed 8 at >= 480x270, 5 if below that. + test::ExplicitKeyValueConfig trials( + "WebRTC-VP9-PerformanceFlags/" + "min_pixel_count:0|129600," + "base_layer_speed:4|8," + "high_layer_speed:5|9," + "deblock_mode:1|0/"); + + // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise + // passed on to LibvpxVp9Encoder. + auto* const vpx = new NiceMock(); + LibvpxVp9Encoder encoder(cricket::VideoCodec(), + absl::WrapUnique(vpx), trials); + + VideoCodec settings = DefaultCodecSettings(); + settings.width = 480; + settings.height = 270; + ConfigureSvc(settings, 2, 3); + vpx_image_t img; + + ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img)); + ON_CALL(*vpx, codec_enc_config_default) + .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) { + memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t)); + }), + Return(VPX_CODEC_OK))); + EXPECT_CALL(*vpx, codec_control(_, _, An())).Times(AnyNumber()); + + // Speed settings not populated when 'use_per_layer_speed' flag is absent. + EXPECT_CALL(*vpx, + codec_control( + _, VP9E_SET_SVC_PARAMETERS, + SafeMatcherCast(AllOf( + Field(&vpx_svc_extra_cfg_t::speed_per_layer, Each(0)), + Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl, Each(0)))))) + .Times(2); + + EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq(8))); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); + + encoder.Release(); + settings.width = 476; + settings.height = 268; + settings.spatialLayers[0].width = settings.width / 2; + settings.spatialLayers[0].height = settings.height / 2; + settings.spatialLayers[1].width = settings.width; + settings.spatialLayers[1].height = settings.height; + + EXPECT_CALL(*vpx, codec_control(_, VP8E_SET_CPUUSED, TypedEq(4))); + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); +} + +TEST(Vp9SpeedSettingsTrialsTest, PerLayerFlagsWithSvc) { + // Per-temporal and spatial layer speed settings: + // SL0: TL0 = speed 5, TL1/TL2 = speed 8. + // SL1/2: TL0 = speed 7, TL1/TL2 = speed 9. + // Deblocking-mode per spatial layer: + // SL0: mode 1, SL1/2: mode 0. + test::ExplicitKeyValueConfig trials( + "WebRTC-VP9-PerformanceFlags/" + "use_per_layer_speed," + "min_pixel_count:0|129600," + "base_layer_speed:5|7," + "high_layer_speed:8|9," + "deblock_mode:1|0/"); + + // Keep a raw pointer for EXPECT calls and the like. Ownership is otherwise + // passed on to LibvpxVp9Encoder. + auto* const vpx = new NiceMock(); + LibvpxVp9Encoder encoder(cricket::VideoCodec(), + absl::WrapUnique(vpx), trials); + + VideoCodec settings = DefaultCodecSettings(); + const int kNumSpatialLayers = 3; + ConfigureSvc(settings, kNumSpatialLayers, /*num_temporal_layers=*/3); + vpx_image_t img; + + // Speed settings per spatial layer, for TL0. + const int kBaseTlSpeed[VPX_MAX_LAYERS] = {5, 7, 7}; + // Speed settings per spatial layer, for TL1, TL2. + const int kHighTlSpeed[VPX_MAX_LAYERS] = {8, 9, 9}; + // Loopfilter settings are handled within libvpx, so this array is valid for + // both TL0 and higher. + const int kLoopFilter[VPX_MAX_LAYERS] = {1, 0, 0}; + + ON_CALL(*vpx, img_wrap).WillByDefault(GetWrapImageFunction(&img)); + ON_CALL(*vpx, codec_enc_config_default) + .WillByDefault(DoAll(WithArg<1>([](vpx_codec_enc_cfg_t* cfg) { + memset(cfg, 0, sizeof(vpx_codec_enc_cfg_t)); + }), + Return(VPX_CODEC_OK))); + EXPECT_CALL( + *vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS, + SafeMatcherCast( + AllOf(Field(&vpx_svc_extra_cfg_t::speed_per_layer, + ElementsAreArray(kBaseTlSpeed)), + Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl, + ElementsAreArray(kLoopFilter)))))); + + // Capture the callback into the vp9 wrapper. + vpx_codec_priv_output_cx_pkt_cb_pair_t callback_pointer = {}; + EXPECT_CALL(*vpx, codec_control(_, VP9E_REGISTER_CX_CALLBACK, A())) + .WillOnce(WithArg<2>([&](void* cbp) { + callback_pointer = + *reinterpret_cast(cbp); + return VPX_CODEC_OK; + })); + + EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder.InitEncode(&settings, kSettings)); + + MockEncodedImageCallback callback; + encoder.RegisterEncodeCompleteCallback(&callback); + auto frame_generator = test::CreateSquareFrameGenerator( + kWidth, kHeight, test::FrameGeneratorInterface::OutputType::kI420, 10); + Mock::VerifyAndClearExpectations(vpx); + + uint8_t data[1] = {0}; + vpx_codec_cx_pkt encoded_data = {}; + encoded_data.data.frame.buf = &data; + encoded_data.data.frame.sz = 1; + + const auto kImageOk = + EncodedImageCallback::Result(EncodedImageCallback::Result::OK); + + int spatial_id = 0; + int temporal_id = 0; + EXPECT_CALL(*vpx, + codec_control(_, VP9E_SET_SVC_LAYER_ID, A())) + .Times(AnyNumber()); + EXPECT_CALL(*vpx, + codec_control(_, VP9E_GET_SVC_LAYER_ID, A())) + .WillRepeatedly(WithArg<2>([&](vpx_svc_layer_id_t* layer_id) { + layer_id->spatial_layer_id = spatial_id; + layer_id->temporal_layer_id = temporal_id; + return VPX_CODEC_OK; + })); + vpx_svc_ref_frame_config_t stored_refs = {}; + ON_CALL(*vpx, codec_control(_, VP9E_SET_SVC_REF_FRAME_CONFIG, + A())) + .WillByDefault( + DoAll(SaveArgPointee<2>(&stored_refs), Return(VPX_CODEC_OK))); + ON_CALL(*vpx, codec_control(_, VP9E_GET_SVC_REF_FRAME_CONFIG, + A())) + .WillByDefault( + DoAll(SetArgPointee<2>(ByRef(stored_refs)), Return(VPX_CODEC_OK))); + + // First frame is keyframe. + encoded_data.data.frame.flags = VPX_FRAME_IS_KEY; + + // Default 3-layer temporal pattern: 0-2-1-2, then repeat and do two more. + for (int ti : {0, 2, 1, 2, 0, 2}) { + EXPECT_CALL(*vpx, codec_encode).WillOnce(Return(VPX_CODEC_OK)); + // No update expected if flags haven't changed, and they change we we move + // between base temporal layer and non-base temporal layer. + if ((ti > 0) != (temporal_id > 0)) { + EXPECT_CALL(*vpx, codec_control( + _, VP9E_SET_SVC_PARAMETERS, + SafeMatcherCast(AllOf( + Field(&vpx_svc_extra_cfg_t::speed_per_layer, + ElementsAreArray(ti == 0 ? kBaseTlSpeed + : kHighTlSpeed)), + Field(&vpx_svc_extra_cfg_t::loopfilter_ctrl, + ElementsAreArray(kLoopFilter)))))); + } else { + EXPECT_CALL(*vpx, codec_control(_, VP9E_SET_SVC_PARAMETERS, + A())) + .Times(0); + } + + VideoFrame frame = + VideoFrame::Builder() + .set_video_frame_buffer(frame_generator->NextFrame().buffer) + .build(); + encoder.Encode(frame, nullptr); + + temporal_id = ti; + for (int si = 0; si < kNumSpatialLayers; ++si) { + spatial_id = si; + + EXPECT_CALL(callback, OnEncodedImage).WillOnce(Return(kImageOk)); + callback_pointer.output_cx_pkt(&encoded_data, callback_pointer.user_priv); + } + + encoded_data.data.frame.flags = 0; // Following frames are delta frames. + } +} + } // namespace webrtc diff --git a/modules/video_coding/codecs/vp9/vp9.cc b/modules/video_coding/codecs/vp9/vp9.cc index 527bce7729..d9caf0f039 100644 --- a/modules/video_coding/codecs/vp9/vp9.cc +++ b/modules/video_coding/codecs/vp9/vp9.cc @@ -12,8 +12,11 @@ #include +#include "api/transport/field_trial_based_config.h" #include "api/video_codecs/sdp_video_format.h" -#include "modules/video_coding/codecs/vp9/vp9_impl.h" +#include "api/video_codecs/vp9_profile.h" +#include "modules/video_coding/codecs/vp9/libvpx_vp9_decoder.h" +#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h" #include "rtc_base/checks.h" #include "vpx/vp8cx.h" #include "vpx/vp8dx.h" @@ -39,6 +42,22 @@ std::vector SupportedVP9Codecs() { cricket::kVp9CodecName, {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile2)}})); } + + return supported_formats; +#else + return std::vector(); +#endif +} + +std::vector SupportedVP9DecoderCodecs() { +#ifdef RTC_ENABLE_VP9 + std::vector supported_formats = SupportedVP9Codecs(); + // The WebRTC internal decoder supports VP9 profile 1. However, there's + // currently no way of sending VP9 profile 1 using the internal encoder. + // It would require extended support for I444, I422, and I440 buffers. + supported_formats.push_back(SdpVideoFormat( + cricket::kVp9CodecName, + {{kVP9FmtpProfileId, VP9ProfileToString(VP9Profile::kProfile1)}})); return supported_formats; #else return std::vector(); @@ -47,7 +66,9 @@ std::vector SupportedVP9Codecs() { std::unique_ptr VP9Encoder::Create() { #ifdef RTC_ENABLE_VP9 - return std::make_unique(cricket::VideoCodec()); + return std::make_unique(cricket::VideoCodec(), + LibvpxInterface::Create(), + FieldTrialBasedConfig()); #else RTC_NOTREACHED(); return nullptr; @@ -57,7 +78,8 @@ std::unique_ptr VP9Encoder::Create() { std::unique_ptr VP9Encoder::Create( const cricket::VideoCodec& codec) { #ifdef RTC_ENABLE_VP9 - return std::make_unique(codec); + return std::make_unique(codec, LibvpxInterface::Create(), + FieldTrialBasedConfig()); #else RTC_NOTREACHED(); return nullptr; @@ -66,7 +88,7 @@ std::unique_ptr VP9Encoder::Create( std::unique_ptr VP9Decoder::Create() { #ifdef RTC_ENABLE_VP9 - return std::make_unique(); + return std::make_unique(); #else RTC_NOTREACHED(); return nullptr; diff --git a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc index 551ace22a2..d1f58b1bb8 100644 --- a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc +++ b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.cc @@ -15,7 +15,6 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" -#include "rtc_base/ref_counted_object.h" #include "vpx/vpx_codec.h" #include "vpx/vpx_decoder.h" #include "vpx/vpx_frame_buffer.h" @@ -58,7 +57,7 @@ Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) { RTC_DCHECK_GT(min_size, 0); rtc::scoped_refptr available_buffer = nullptr; { - rtc::CritScope cs(&buffers_lock_); + MutexLock lock(&buffers_lock_); // Do we have a buffer we can recycle? for (const auto& buffer : allocated_buffers_) { if (buffer->HasOneRef()) { @@ -68,7 +67,7 @@ Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) { } // Otherwise create one. if (available_buffer == nullptr) { - available_buffer = new rtc::RefCountedObject(); + available_buffer = new Vp9FrameBuffer(); allocated_buffers_.push_back(available_buffer); if (allocated_buffers_.size() > max_num_buffers_) { RTC_LOG(LS_WARNING) @@ -91,7 +90,7 @@ Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) { int Vp9FrameBufferPool::GetNumBuffersInUse() const { int num_buffers_in_use = 0; - rtc::CritScope cs(&buffers_lock_); + MutexLock lock(&buffers_lock_); for (const auto& buffer : allocated_buffers_) { if (!buffer->HasOneRef()) ++num_buffers_in_use; @@ -100,7 +99,7 @@ int Vp9FrameBufferPool::GetNumBuffersInUse() const { } bool Vp9FrameBufferPool::Resize(size_t max_number_of_buffers) { - rtc::CritScope cs(&buffers_lock_); + MutexLock lock(&buffers_lock_); size_t used_buffers_count = 0; for (const auto& buffer : allocated_buffers_) { // If the buffer is in use, the ref count will be >= 2, one from the list we @@ -130,7 +129,7 @@ bool Vp9FrameBufferPool::Resize(size_t max_number_of_buffers) { } void Vp9FrameBufferPool::ClearPool() { - rtc::CritScope cs(&buffers_lock_); + MutexLock lock(&buffers_lock_); allocated_buffers_.clear(); } diff --git a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h index 02d2b26273..bce10be4d9 100644 --- a/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h +++ b/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h @@ -16,10 +16,10 @@ #include +#include "api/ref_counted_base.h" #include "api/scoped_refptr.h" #include "rtc_base/buffer.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/ref_count.h" +#include "rtc_base/synchronization/mutex.h" struct vpx_codec_ctx; struct vpx_codec_frame_buffer; @@ -65,13 +65,14 @@ constexpr size_t kDefaultMaxNumBuffers = 68; // vpx_codec_destroy(decoder_ctx); class Vp9FrameBufferPool { public: - class Vp9FrameBuffer : public rtc::RefCountInterface { + class Vp9FrameBuffer final + : public rtc::RefCountedNonVirtual { public: uint8_t* GetData(); size_t GetDataSize() const; void SetSize(size_t size); - virtual bool HasOneRef() const = 0; + using rtc::RefCountedNonVirtual::HasOneRef; private: // Data as an easily resizable buffer. @@ -119,7 +120,7 @@ class Vp9FrameBufferPool { private: // Protects |allocated_buffers_|. - rtc::CriticalSection buffers_lock_; + mutable Mutex buffers_lock_; // All buffers, in use or ready to be recycled. std::vector> allocated_buffers_ RTC_GUARDED_BY(buffers_lock_); diff --git a/modules/video_coding/decoder_database.cc b/modules/video_coding/decoder_database.cc index 38a18baa6d..6aa332eb88 100644 --- a/modules/video_coding/decoder_database.cc +++ b/modules/video_coding/decoder_database.cc @@ -15,12 +15,8 @@ namespace webrtc { -VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings, - int number_of_cores, - bool require_key_frame) - : settings(settings), - number_of_cores(number_of_cores), - require_key_frame(require_key_frame) { +VCMDecoderMapItem::VCMDecoderMapItem(VideoCodec* settings, int number_of_cores) + : settings(settings), number_of_cores(number_of_cores) { RTC_DCHECK_GE(number_of_cores, 0); } @@ -33,7 +29,10 @@ VCMExtDecoderMapItem::VCMExtDecoderMapItem( VCMDecoderMapItem::~VCMDecoderMapItem() {} VCMDecoderDataBase::VCMDecoderDataBase() - : receive_codec_(), dec_map_(), dec_external_map_() {} + : current_payload_type_(0), + receive_codec_(), + dec_map_(), + dec_external_map_() {} VCMDecoderDataBase::~VCMDecoderDataBase() { ptr_decoder_.reset(); @@ -57,7 +56,6 @@ bool VCMDecoderDataBase::DeregisterExternalDecoder(uint8_t payload_type) { // Release it if it was registered and in use. ptr_decoder_.reset(); } - DeregisterReceiveCodec(payload_type); delete it->second; dec_external_map_.erase(it); return true; @@ -74,17 +72,23 @@ void VCMDecoderDataBase::RegisterExternalDecoder(VideoDecoder* external_decoder, dec_external_map_[payload_type] = ext_decoder; } -bool VCMDecoderDataBase::RegisterReceiveCodec(const VideoCodec* receive_codec, - int number_of_cores, - bool require_key_frame) { +bool VCMDecoderDataBase::IsExternalDecoderRegistered( + uint8_t payload_type) const { + return payload_type == current_payload_type_ || + FindExternalDecoderItem(payload_type); +} + +bool VCMDecoderDataBase::RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receive_codec, + int number_of_cores) { if (number_of_cores < 0) { return false; } // If payload value already exists, erase old and insert new. - DeregisterReceiveCodec(receive_codec->plType); + DeregisterReceiveCodec(payload_type); VideoCodec* new_receive_codec = new VideoCodec(*receive_codec); - dec_map_[receive_codec->plType] = new VCMDecoderMapItem( - new_receive_codec, number_of_cores, require_key_frame); + dec_map_[payload_type] = + new VCMDecoderMapItem(new_receive_codec, number_of_cores); return true; } @@ -95,9 +99,10 @@ bool VCMDecoderDataBase::DeregisterReceiveCodec(uint8_t payload_type) { } delete it->second; dec_map_.erase(it); - if (receive_codec_.plType == payload_type) { + if (payload_type == current_payload_type_) { // This codec is currently in use. - memset(&receive_codec_, 0, sizeof(VideoCodec)); + receive_codec_ = {}; + current_payload_type_ = 0; } return true; } @@ -107,33 +112,32 @@ VCMGenericDecoder* VCMDecoderDataBase::GetDecoder( VCMDecodedFrameCallback* decoded_frame_callback) { RTC_DCHECK(decoded_frame_callback->UserReceiveCallback()); uint8_t payload_type = frame.PayloadType(); - if (payload_type == receive_codec_.plType || payload_type == 0) { + if (payload_type == current_payload_type_ || payload_type == 0) { return ptr_decoder_.get(); } // If decoder exists - delete. if (ptr_decoder_) { ptr_decoder_.reset(); - memset(&receive_codec_, 0, sizeof(VideoCodec)); + receive_codec_ = {}; + current_payload_type_ = 0; } ptr_decoder_ = CreateAndInitDecoder(frame, &receive_codec_); if (!ptr_decoder_) { return nullptr; } + current_payload_type_ = frame.PayloadType(); VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback(); - callback->OnIncomingPayloadType(receive_codec_.plType); + callback->OnIncomingPayloadType(current_payload_type_); if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) < 0) { ptr_decoder_.reset(); - memset(&receive_codec_, 0, sizeof(VideoCodec)); + receive_codec_ = {}; + current_payload_type_ = 0; return nullptr; } return ptr_decoder_.get(); } -bool VCMDecoderDataBase::PrefersLateDecoding() const { - return ptr_decoder_ ? ptr_decoder_->PrefersLateDecoding() : true; -} - std::unique_ptr VCMDecoderDataBase::CreateAndInitDecoder( const VCMEncodedFrame& frame, VideoCodec* new_codec) const { @@ -175,7 +179,7 @@ std::unique_ptr VCMDecoderDataBase::CreateAndInitDecoder( RTC_LOG(LS_ERROR) << "Failed to initialize decoder. Error code: " << err; return nullptr; } - memcpy(new_codec, decoder_item->settings.get(), sizeof(VideoCodec)); + *new_codec = *decoder_item->settings.get(); return ptr_decoder; } diff --git a/modules/video_coding/decoder_database.h b/modules/video_coding/decoder_database.h index 8c96b41efd..81c68e4138 100644 --- a/modules/video_coding/decoder_database.h +++ b/modules/video_coding/decoder_database.h @@ -20,14 +20,11 @@ namespace webrtc { struct VCMDecoderMapItem { public: - VCMDecoderMapItem(VideoCodec* settings, - int number_of_cores, - bool require_key_frame); + VCMDecoderMapItem(VideoCodec* settings, int number_of_cores); ~VCMDecoderMapItem(); std::unique_ptr settings; int number_of_cores; - bool require_key_frame; }; struct VCMExtDecoderMapItem { @@ -47,10 +44,11 @@ class VCMDecoderDataBase { bool DeregisterExternalDecoder(uint8_t payload_type); void RegisterExternalDecoder(VideoDecoder* external_decoder, uint8_t payload_type); + bool IsExternalDecoderRegistered(uint8_t payload_type) const; - bool RegisterReceiveCodec(const VideoCodec* receive_codec, - int number_of_cores, - bool require_key_frame); + bool RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receive_codec, + int number_of_cores); bool DeregisterReceiveCodec(uint8_t payload_type); // Returns a decoder specified by frame.PayloadType. The decoded frame @@ -62,10 +60,6 @@ class VCMDecoderDataBase { const VCMEncodedFrame& frame, VCMDecodedFrameCallback* decoded_frame_callback); - // Returns true if the currently active decoder prefer to decode frames late. - // That means that frames must be decoded near the render times stamp. - bool PrefersLateDecoding() const; - private: typedef std::map DecoderMap; typedef std::map ExternalDecoderMap; @@ -79,6 +73,7 @@ class VCMDecoderDataBase { const VCMExtDecoderMapItem* FindExternalDecoderItem( uint8_t payload_type) const; + uint8_t current_payload_type_; // Corresponding to receive_codec_. VideoCodec receive_codec_; std::unique_ptr ptr_decoder_; DecoderMap dec_map_; diff --git a/modules/video_coding/decoding_state.cc b/modules/video_coding/decoding_state.cc index a951358992..5e405cbd05 100644 --- a/modules/video_coding/decoding_state.cc +++ b/modules/video_coding/decoding_state.cc @@ -55,21 +55,22 @@ uint16_t VCMDecodingState::sequence_num() const { } bool VCMDecodingState::IsOldFrame(const VCMFrameBuffer* frame) const { - assert(frame != NULL); + RTC_DCHECK(frame); if (in_initial_state_) return false; return !IsNewerTimestamp(frame->Timestamp(), time_stamp_); } bool VCMDecodingState::IsOldPacket(const VCMPacket* packet) const { - assert(packet != NULL); + RTC_DCHECK(packet); if (in_initial_state_) return false; return !IsNewerTimestamp(packet->timestamp, time_stamp_); } void VCMDecodingState::SetState(const VCMFrameBuffer* frame) { - assert(frame != NULL && frame->GetHighSeqNum() >= 0); + RTC_DCHECK(frame); + RTC_CHECK_GE(frame->GetHighSeqNum(), 0); if (!UsingFlexibleMode(frame)) UpdateSyncState(frame); sequence_num_ = static_cast(frame->GetHighSeqNum()); @@ -150,7 +151,7 @@ bool VCMDecodingState::UpdateEmptyFrame(const VCMFrameBuffer* frame) { } void VCMDecodingState::UpdateOldPacket(const VCMPacket* packet) { - assert(packet != NULL); + RTC_DCHECK(packet); if (packet->timestamp == time_stamp_) { // Late packet belonging to the last decoded frame - make sure we update the // last decoded sequence number. @@ -204,7 +205,7 @@ bool VCMDecodingState::ContinuousFrame(const VCMFrameBuffer* frame) const { // - Sequence numbers. // Return true when in initial state. // Note that when a method is not applicable it will return false. - assert(frame != NULL); + RTC_DCHECK(frame); // A key frame is always considered continuous as it doesn't refer to any // frames and therefore won't introduce any errors even if prior frames are // missing. diff --git a/modules/video_coding/decoding_state.h b/modules/video_coding/decoding_state.h index b87fb2d034..ec972949d8 100644 --- a/modules/video_coding/decoding_state.h +++ b/modules/video_coding/decoding_state.h @@ -11,6 +11,7 @@ #ifndef MODULES_VIDEO_CODING_DECODING_STATE_H_ #define MODULES_VIDEO_CODING_DECODING_STATE_H_ +#include #include #include #include diff --git a/modules/video_coding/deprecated/BUILD.gn b/modules/video_coding/deprecated/BUILD.gn new file mode 100644 index 0000000000..487c0267d5 --- /dev/null +++ b/modules/video_coding/deprecated/BUILD.gn @@ -0,0 +1,34 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("nack_module") { + sources = [ + "nack_module.cc", + "nack_module.h", + ] + + deps = [ + "..:nack_module", + "../..:module_api", + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../rtc_base:checks", + "../../../rtc_base:criticalsection", + "../../../rtc_base:logging", + "../../../rtc_base:macromagic", + "../../../rtc_base:rtc_numerics", + "../../../rtc_base/experiments:field_trial_parser", + "../../../rtc_base/synchronization:mutex", + "../../../system_wrappers", + "../../../system_wrappers:field_trial", + "../../utility", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] +} diff --git a/modules/video_coding/nack_module.cc b/modules/video_coding/deprecated/nack_module.cc similarity index 83% rename from modules/video_coding/nack_module.cc rename to modules/video_coding/deprecated/nack_module.cc index 838af1548b..f8cfd3440b 100644 --- a/modules/video_coding/nack_module.cc +++ b/modules/video_coding/deprecated/nack_module.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/nack_module.h" +#include "modules/video_coding/deprecated/nack_module.h" #include #include @@ -45,25 +45,25 @@ int64_t GetSendNackDelay() { } } // namespace -NackModule::NackInfo::NackInfo() +DEPRECATED_NackModule::NackInfo::NackInfo() : seq_num(0), send_at_seq_num(0), sent_at_time(-1), retries(0) {} -NackModule::NackInfo::NackInfo(uint16_t seq_num, - uint16_t send_at_seq_num, - int64_t created_at_time) +DEPRECATED_NackModule::NackInfo::NackInfo(uint16_t seq_num, + uint16_t send_at_seq_num, + int64_t created_at_time) : seq_num(seq_num), send_at_seq_num(send_at_seq_num), created_at_time(created_at_time), sent_at_time(-1), retries(0) {} -NackModule::BackoffSettings::BackoffSettings(TimeDelta min_retry, - TimeDelta max_rtt, - double base) +DEPRECATED_NackModule::BackoffSettings::BackoffSettings(TimeDelta min_retry, + TimeDelta max_rtt, + double base) : min_retry_interval(min_retry), max_rtt(max_rtt), base(base) {} -absl::optional -NackModule::BackoffSettings::ParseFromFieldTrials() { +absl::optional +DEPRECATED_NackModule::BackoffSettings::ParseFromFieldTrials() { // Matches magic number in RTPSender::OnReceivedNack(). const TimeDelta kDefaultMinRetryInterval = TimeDelta::Millis(5); // Upper bound on link-delay considered for exponential backoff. @@ -82,15 +82,16 @@ NackModule::BackoffSettings::ParseFromFieldTrials() { field_trial::FindFullName("WebRTC-ExponentialNackBackoff")); if (enabled) { - return NackModule::BackoffSettings(min_retry.Get(), max_rtt.Get(), - base.Get()); + return DEPRECATED_NackModule::BackoffSettings(min_retry.Get(), + max_rtt.Get(), base.Get()); } return absl::nullopt; } -NackModule::NackModule(Clock* clock, - NackSender* nack_sender, - KeyFrameRequestSender* keyframe_request_sender) +DEPRECATED_NackModule::DEPRECATED_NackModule( + Clock* clock, + NackSender* nack_sender, + KeyFrameRequestSender* keyframe_request_sender) : clock_(clock), nack_sender_(nack_sender), keyframe_request_sender_(keyframe_request_sender), @@ -106,14 +107,15 @@ NackModule::NackModule(Clock* clock, RTC_DCHECK(keyframe_request_sender_); } -int NackModule::OnReceivedPacket(uint16_t seq_num, bool is_keyframe) { +int DEPRECATED_NackModule::OnReceivedPacket(uint16_t seq_num, + bool is_keyframe) { return OnReceivedPacket(seq_num, is_keyframe, false); } -int NackModule::OnReceivedPacket(uint16_t seq_num, - bool is_keyframe, - bool is_recovered) { - rtc::CritScope lock(&crit_); +int DEPRECATED_NackModule::OnReceivedPacket(uint16_t seq_num, + bool is_keyframe, + bool is_recovered) { + MutexLock lock(&mutex_); // TODO(philipel): When the packet includes information whether it is // retransmitted or not, use that value instead. For // now set it to true, which will cause the reordering @@ -181,8 +183,8 @@ int NackModule::OnReceivedPacket(uint16_t seq_num, return 0; } -void NackModule::ClearUpTo(uint16_t seq_num) { - rtc::CritScope lock(&crit_); +void DEPRECATED_NackModule::ClearUpTo(uint16_t seq_num) { + MutexLock lock(&mutex_); nack_list_.erase(nack_list_.begin(), nack_list_.lower_bound(seq_num)); keyframe_list_.erase(keyframe_list_.begin(), keyframe_list_.lower_bound(seq_num)); @@ -190,28 +192,28 @@ void NackModule::ClearUpTo(uint16_t seq_num) { recovered_list_.lower_bound(seq_num)); } -void NackModule::UpdateRtt(int64_t rtt_ms) { - rtc::CritScope lock(&crit_); +void DEPRECATED_NackModule::UpdateRtt(int64_t rtt_ms) { + MutexLock lock(&mutex_); rtt_ms_ = rtt_ms; } -void NackModule::Clear() { - rtc::CritScope lock(&crit_); +void DEPRECATED_NackModule::Clear() { + MutexLock lock(&mutex_); nack_list_.clear(); keyframe_list_.clear(); recovered_list_.clear(); } -int64_t NackModule::TimeUntilNextProcess() { +int64_t DEPRECATED_NackModule::TimeUntilNextProcess() { return std::max(next_process_time_ms_ - clock_->TimeInMilliseconds(), 0); } -void NackModule::Process() { +void DEPRECATED_NackModule::Process() { if (nack_sender_) { std::vector nack_batch; { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); nack_batch = GetNackBatch(kTimeOnly); } @@ -236,7 +238,7 @@ void NackModule::Process() { } } -bool NackModule::RemovePacketsUntilKeyFrame() { +bool DEPRECATED_NackModule::RemovePacketsUntilKeyFrame() { while (!keyframe_list_.empty()) { auto it = nack_list_.lower_bound(*keyframe_list_.begin()); @@ -254,8 +256,8 @@ bool NackModule::RemovePacketsUntilKeyFrame() { return false; } -void NackModule::AddPacketsToNack(uint16_t seq_num_start, - uint16_t seq_num_end) { +void DEPRECATED_NackModule::AddPacketsToNack(uint16_t seq_num_start, + uint16_t seq_num_end) { // Remove old packets. auto it = nack_list_.lower_bound(seq_num_end - kMaxPacketAge); nack_list_.erase(nack_list_.begin(), it); @@ -289,7 +291,8 @@ void NackModule::AddPacketsToNack(uint16_t seq_num_start, } } -std::vector NackModule::GetNackBatch(NackFilterOptions options) { +std::vector DEPRECATED_NackModule::GetNackBatch( + NackFilterOptions options) { bool consider_seq_num = options != kTimeOnly; bool consider_timestamp = options != kSeqNumOnly; Timestamp now = clock_->CurrentTime(); @@ -334,13 +337,13 @@ std::vector NackModule::GetNackBatch(NackFilterOptions options) { return nack_batch; } -void NackModule::UpdateReorderingStatistics(uint16_t seq_num) { +void DEPRECATED_NackModule::UpdateReorderingStatistics(uint16_t seq_num) { RTC_DCHECK(AheadOf(newest_seq_num_, seq_num)); uint16_t diff = ReverseDiff(newest_seq_num_, seq_num); reordering_histogram_.Add(diff); } -int NackModule::WaitNumberOfPackets(float probability) const { +int DEPRECATED_NackModule::WaitNumberOfPackets(float probability) const { if (reordering_histogram_.NumValues() == 0) return 0; return reordering_histogram_.InverseCdf(probability); diff --git a/modules/video_coding/nack_module.h b/modules/video_coding/deprecated/nack_module.h similarity index 77% rename from modules/video_coding/nack_module.h rename to modules/video_coding/deprecated/nack_module.h index d4f705b351..2fac6ce128 100644 --- a/modules/video_coding/nack_module.h +++ b/modules/video_coding/deprecated/nack_module.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef MODULES_VIDEO_CODING_NACK_MODULE_H_ -#define MODULES_VIDEO_CODING_NACK_MODULE_H_ +#ifndef MODULES_VIDEO_CODING_DEPRECATED_NACK_MODULE_H_ +#define MODULES_VIDEO_CODING_DEPRECATED_NACK_MODULE_H_ #include @@ -17,22 +17,23 @@ #include #include +#include "absl/base/attributes.h" #include "api/units/time_delta.h" #include "modules/include/module.h" #include "modules/include/module_common_types.h" #include "modules/video_coding/histogram.h" -#include "rtc_base/critical_section.h" #include "rtc_base/numerics/sequence_number_util.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" namespace webrtc { -class NackModule : public Module { +class DEPRECATED_NackModule : public Module { public: - NackModule(Clock* clock, - NackSender* nack_sender, - KeyFrameRequestSender* keyframe_request_sender); + DEPRECATED_NackModule(Clock* clock, + NackSender* nack_sender, + KeyFrameRequestSender* keyframe_request_sender); int OnReceivedPacket(uint16_t seq_num, bool is_keyframe); int OnReceivedPacket(uint16_t seq_num, bool is_keyframe, bool is_recovered); @@ -79,24 +80,24 @@ class NackModule : public Module { }; void AddPacketsToNack(uint16_t seq_num_start, uint16_t seq_num_end) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Removes packets from the nack list until the next keyframe. Returns true // if packets were removed. - bool RemovePacketsUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + bool RemovePacketsUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); std::vector GetNackBatch(NackFilterOptions options) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Update the reordering distribution. void UpdateReorderingStatistics(uint16_t seq_num) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns how many packets we have to wait in order to receive the packet // with probability |probabilty| or higher. int WaitNumberOfPackets(float probability) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - rtc::CriticalSection crit_; + Mutex mutex_; Clock* const clock_; NackSender* const nack_sender_; KeyFrameRequestSender* const keyframe_request_sender_; @@ -105,15 +106,15 @@ class NackModule : public Module { // known thread (e.g. see |initialized_|). Those probably do not need // synchronized access. std::map> nack_list_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); std::set> keyframe_list_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); std::set> recovered_list_ - RTC_GUARDED_BY(crit_); - video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(crit_); - bool initialized_ RTC_GUARDED_BY(crit_); - int64_t rtt_ms_ RTC_GUARDED_BY(crit_); - uint16_t newest_seq_num_ RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); + video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(mutex_); + bool initialized_ RTC_GUARDED_BY(mutex_); + int64_t rtt_ms_ RTC_GUARDED_BY(mutex_); + uint16_t newest_seq_num_ RTC_GUARDED_BY(mutex_); // Only touched on the process thread. int64_t next_process_time_ms_; @@ -124,6 +125,8 @@ class NackModule : public Module { const absl::optional backoff_settings_; }; +using NackModule ABSL_DEPRECATED("") = DEPRECATED_NackModule; + } // namespace webrtc -#endif // MODULES_VIDEO_CODING_NACK_MODULE_H_ +#endif // MODULES_VIDEO_CODING_DEPRECATED_NACK_MODULE_H_ diff --git a/modules/video_coding/encoded_frame.cc b/modules/video_coding/encoded_frame.cc index 1e9e374c64..637a20cfc9 100644 --- a/modules/video_coding/encoded_frame.cc +++ b/modules/video_coding/encoded_frame.cc @@ -43,7 +43,6 @@ void VCMEncodedFrame::Reset() { _frameType = VideoFrameType::kVideoFrameDelta; _encodedWidth = 0; _encodedHeight = 0; - _completeFrame = false; _missingFrame = false; set_size(0); _codecSpecificInfo.codecType = kVideoCodecGeneric; @@ -135,20 +134,10 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) { } case kVideoCodecH264: { _codecSpecificInfo.codecType = kVideoCodecH264; - - // The following H264 codec specific data are not used elsewhere. - // Instead they are read directly from the frame marking extension. - // These codec specific data structures should be removed - // when frame marking is used. - _codecSpecificInfo.codecSpecific.H264.temporal_idx = kNoTemporalIdx; - if (header->frame_marking.temporal_id != kNoTemporalIdx) { - _codecSpecificInfo.codecSpecific.H264.temporal_idx = - header->frame_marking.temporal_id; - _codecSpecificInfo.codecSpecific.H264.base_layer_sync = - header->frame_marking.base_layer_sync; - _codecSpecificInfo.codecSpecific.H264.idr_frame = - header->frame_marking.independent_frame; - } + break; + } + case kVideoCodecAV1: { + _codecSpecificInfo.codecType = kVideoCodecAV1; break; } default: { diff --git a/modules/video_coding/encoded_frame.h b/modules/video_coding/encoded_frame.h index 261aae77aa..9cc769277d 100644 --- a/modules/video_coding/encoded_frame.h +++ b/modules/video_coding/encoded_frame.h @@ -21,7 +21,7 @@ namespace webrtc { -class RTC_EXPORT VCMEncodedFrame : protected EncodedImage { +class RTC_EXPORT VCMEncodedFrame : public EncodedImage { public: VCMEncodedFrame(); VCMEncodedFrame(const VCMEncodedFrame&); @@ -34,7 +34,9 @@ class RTC_EXPORT VCMEncodedFrame : protected EncodedImage { _renderTimeMs = renderTimeMs; } - void SetPlayoutDelay(PlayoutDelay playout_delay) { + VideoPlayoutDelay PlayoutDelay() const { return playout_delay_; } + + void SetPlayoutDelay(VideoPlayoutDelay playout_delay) { playout_delay_ = playout_delay; } @@ -50,7 +52,6 @@ class RTC_EXPORT VCMEncodedFrame : protected EncodedImage { using EncodedImage::GetEncodedData; using EncodedImage::NtpTimeMs; using EncodedImage::PacketInfos; - using EncodedImage::Retain; using EncodedImage::set_size; using EncodedImage::SetColorSpace; using EncodedImage::SetEncodedData; @@ -90,10 +91,6 @@ class RTC_EXPORT VCMEncodedFrame : protected EncodedImage { */ EncodedImage::Timing video_timing() const { return timing_; } EncodedImage::Timing* video_timing_mutable() { return &timing_; } - /** - * True if this frame is complete, false otherwise - */ - bool Complete() const { return _completeFrame; } /** * True if there's a frame missing before this frame */ diff --git a/modules/video_coding/fec_controller_default.cc b/modules/video_coding/fec_controller_default.cc index 97919f5315..827c853541 100644 --- a/modules/video_coding/fec_controller_default.cc +++ b/modules/video_coding/fec_controller_default.cc @@ -20,7 +20,6 @@ #include "system_wrappers/include/field_trial.h" namespace webrtc { -using rtc::CritScope; const float kProtectionOverheadRateThreshold = 0.5; @@ -54,7 +53,7 @@ void FecControllerDefault::SetEncodingData(size_t width, size_t height, size_t num_temporal_layers, size_t max_payload_size) { - CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); loss_prot_logic_->UpdateFrameSize(width, height); loss_prot_logic_->UpdateNumLayers(num_temporal_layers); max_payload_size_ = max_payload_size; @@ -94,7 +93,7 @@ uint32_t FecControllerDefault::UpdateFecRates( FecProtectionParams delta_fec_params; FecProtectionParams key_fec_params; { - CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); loss_prot_logic_->UpdateBitRate(target_bitrate_kbps); loss_prot_logic_->UpdateRtt(round_trip_time_ms); // Update frame rate for the loss protection logic class: frame rate should @@ -175,7 +174,7 @@ void FecControllerDefault::SetProtectionMethod(bool enable_fec, } else if (enable_fec) { method = media_optimization::kFec; } - CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); loss_prot_logic_->SetMethod(method); } @@ -183,7 +182,7 @@ void FecControllerDefault::UpdateWithEncodedData( const size_t encoded_image_length, const VideoFrameType encoded_image_frametype) { const size_t encoded_length = encoded_image_length; - CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); if (encoded_length > 0) { const bool delta_frame = encoded_image_frametype != VideoFrameType::kVideoFrameKey; diff --git a/modules/video_coding/fec_controller_default.h b/modules/video_coding/fec_controller_default.h index 02c0ec0d37..6b9e8eb8e5 100644 --- a/modules/video_coding/fec_controller_default.h +++ b/modules/video_coding/fec_controller_default.h @@ -20,7 +20,7 @@ #include "api/fec_controller.h" #include "modules/video_coding/media_opt_util.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" @@ -54,10 +54,10 @@ class FecControllerDefault : public FecController { enum { kBitrateAverageWinMs = 1000 }; Clock* const clock_; VCMProtectionCallback* protection_callback_; - rtc::CriticalSection crit_sect_; + Mutex mutex_; std::unique_ptr loss_prot_logic_ - RTC_GUARDED_BY(crit_sect_); - size_t max_payload_size_ RTC_GUARDED_BY(crit_sect_); + RTC_GUARDED_BY(mutex_); + size_t max_payload_size_ RTC_GUARDED_BY(mutex_); RTC_DISALLOW_COPY_AND_ASSIGN(FecControllerDefault); const float overhead_threshold_; }; diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc index 755acb2940..8f73e73bad 100644 --- a/modules/video_coding/frame_buffer.cc +++ b/modules/video_coding/frame_buffer.cc @@ -70,17 +70,12 @@ void VCMFrameBuffer::SetGofInfo(const GofInfoVP9& gof_info, size_t idx) { gof_info.temporal_up_switch[idx]; } -bool VCMFrameBuffer::IsSessionComplete() const { - TRACE_EVENT0("webrtc", "VCMFrameBuffer::IsSessionComplete"); - return _sessionInfo.complete(); -} - // Insert packet VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet, int64_t timeInMs, const FrameData& frame_data) { TRACE_EVENT0("webrtc", "VCMFrameBuffer::InsertPacket"); - assert(!(NULL == packet.dataPtr && packet.sizeBytes > 0)); + RTC_DCHECK(!(NULL == packet.dataPtr && packet.sizeBytes > 0)); if (packet.dataPtr != NULL) { _payloadType = packet.payloadType; } @@ -98,15 +93,16 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet, } } + size_t oldSize = encoded_image_buffer_ ? encoded_image_buffer_->size() : 0; uint32_t requiredSizeBytes = size() + packet.sizeBytes + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0); - if (requiredSizeBytes > capacity()) { + if (requiredSizeBytes > oldSize) { const uint8_t* prevBuffer = data(); const uint32_t increments = requiredSizeBytes / kBufferIncStepSizeBytes + (requiredSizeBytes % kBufferIncStepSizeBytes > 0); - const uint32_t newSize = capacity() + increments * kBufferIncStepSizeBytes; + const uint32_t newSize = oldSize + increments * kBufferIncStepSizeBytes; if (newSize > kMaxJBFrameSizeBytes) { RTC_LOG(LS_ERROR) << "Failed to insert packet due to frame being too " "big."; @@ -133,7 +129,9 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet, if (packet.sizeBytes > 0) CopyCodecSpecific(&packet.video_header); - int retVal = _sessionInfo.InsertPacket(packet, data(), frame_data); + int retVal = _sessionInfo.InsertPacket( + packet, encoded_image_buffer_ ? encoded_image_buffer_->data() : nullptr, + frame_data); if (retVal == -1) { return kSizeError; } else if (retVal == -2) { @@ -232,19 +230,19 @@ void VCMFrameBuffer::SetState(VCMFrameBufferStateEnum state) { switch (state) { case kStateIncomplete: // we can go to this state from state kStateEmpty - assert(_state == kStateEmpty); + RTC_DCHECK_EQ(_state, kStateEmpty); // Do nothing, we received a packet break; case kStateComplete: - assert(_state == kStateEmpty || _state == kStateIncomplete); + RTC_DCHECK(_state == kStateEmpty || _state == kStateIncomplete); break; case kStateEmpty: // Should only be set to empty through Reset(). - assert(false); + RTC_NOTREACHED(); break; } _state = state; @@ -262,7 +260,6 @@ void VCMFrameBuffer::PrepareForDecode(bool continuous) { // Transfer frame information to EncodedFrame and create any codec // specific information. _frameType = _sessionInfo.FrameType(); - _completeFrame = _sessionInfo.complete(); _missingFrame = !continuous; } diff --git a/modules/video_coding/frame_buffer2.cc b/modules/video_coding/frame_buffer2.cc index 64d3699e01..80f9eb1814 100644 --- a/modules/video_coding/frame_buffer2.cc +++ b/modules/video_coding/frame_buffer2.cc @@ -63,7 +63,11 @@ FrameBuffer::FrameBuffer(Clock* clock, last_log_non_decoded_ms_(-kLogNonDecodedIntervalMs), add_rtt_to_playout_delay_( webrtc::field_trial::IsEnabled("WebRTC-AddRttToPlayoutDelay")), - rtt_mult_settings_(RttMultExperiment::GetRttMultValue()) { + rtt_mult_settings_(RttMultExperiment::GetRttMultValue()), + zero_playout_delay_max_decode_queue_size_("max_decode_queue_size", + kMaxFramesBuffered) { + ParseFieldTrial({&zero_playout_delay_max_decode_queue_size_}, + field_trial::FindFullName("WebRTC-ZeroPlayoutDelay")); callback_checker_.Detach(); } @@ -82,7 +86,7 @@ void FrameBuffer::NextFrame( int64_t latest_return_time_ms = clock_->TimeInMilliseconds() + max_wait_time_ms; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (stopped_) { return; } @@ -102,24 +106,30 @@ void FrameBuffer::StartWaitForNextFrameOnQueue() { RTC_DCHECK_RUN_ON(&callback_checker_); // If this task has not been cancelled, we did not get any new frames // while waiting. Continue with frame delivery. - rtc::CritScope lock(&crit_); - if (!frames_to_decode_.empty()) { - // We have frames, deliver! - frame_handler_(absl::WrapUnique(GetNextFrame()), kFrameFound); + std::unique_ptr frame; + std::function, ReturnReason)> + frame_handler; + { + MutexLock lock(&mutex_); + if (!frames_to_decode_.empty()) { + // We have frames, deliver! + frame = absl::WrapUnique(GetNextFrame()); + timing_->SetLastDecodeScheduledTimestamp( + clock_->TimeInMilliseconds()); + } else if (clock_->TimeInMilliseconds() < latest_return_time_ms_) { + // If there's no frames to decode and there is still time left, it + // means that the frame buffer was cleared between creation and + // execution of this task. Continue waiting for the remaining time. + int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds()); + return TimeDelta::Millis(wait_ms); + } + frame_handler = std::move(frame_handler_); CancelCallback(); - return TimeDelta::Zero(); // Ignored. - } else if (clock_->TimeInMilliseconds() >= latest_return_time_ms_) { - // We have timed out, signal this and stop repeating. - frame_handler_(nullptr, kTimeout); - CancelCallback(); - return TimeDelta::Zero(); // Ignored. - } else { - // If there's no frames to decode and there is still time left, it - // means that the frame buffer was cleared between creation and - // execution of this task. Continue waiting for the remaining time. - int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds()); - return TimeDelta::Millis(wait_ms); } + // Deliver frame, if any. Otherwise signal timeout. + ReturnReason reason = frame ? kFrameFound : kTimeout; + frame_handler(std::move(frame), reason); + return TimeDelta::Zero(); // Ignored. }); } @@ -153,38 +163,44 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) { continue; } - // Only ever return all parts of a superframe. Therefore skip this - // frame if it's not a beginning of a superframe. - if (frame->inter_layer_predicted) { - continue; - } - // Gather all remaining frames for the same superframe. std::vector current_superframe; current_superframe.push_back(frame_it); bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer; FrameMap::iterator next_frame_it = frame_it; - while (true) { + while (!last_layer_completed) { ++next_frame_it; - if (next_frame_it == frames_.end() || - next_frame_it->first.picture_id != frame->id.picture_id || - !next_frame_it->second.continuous) { + + if (next_frame_it == frames_.end() || !next_frame_it->second.frame) { break; } - // Check if the next frame has some undecoded references other than - // the previous frame in the same superframe. - size_t num_allowed_undecoded_refs = - (next_frame_it->second.frame->inter_layer_predicted) ? 1 : 0; - if (next_frame_it->second.num_missing_decodable > - num_allowed_undecoded_refs) { + + if (next_frame_it->second.frame->Timestamp() != frame->Timestamp() || + !next_frame_it->second.continuous) { break; } - // All frames in the superframe should have the same timestamp. - if (frame->Timestamp() != next_frame_it->second.frame->Timestamp()) { - RTC_LOG(LS_WARNING) << "Frames in a single superframe have different" - " timestamps. Skipping undecodable superframe."; - break; + + if (next_frame_it->second.num_missing_decodable > 0) { + bool has_inter_layer_dependency = false; + for (size_t i = 0; i < EncodedFrame::kMaxFrameReferences && + i < next_frame_it->second.frame->num_references; + ++i) { + if (next_frame_it->second.frame->references[i] >= frame_it->first) { + has_inter_layer_dependency = true; + break; + } + } + + // If the frame has an undecoded dependency that is not within the same + // temporal unit then this frame is not yet ready to be decoded. If it + // is within the same temporal unit then the not yet decoded dependency + // is just a lower spatial frame, which is ok. + if (!has_inter_layer_dependency || + next_frame_it->second.num_missing_decodable > 1) { + break; + } } + current_superframe.push_back(next_frame_it); last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer; } @@ -200,7 +216,11 @@ int64_t FrameBuffer::FindNextFrame(int64_t now_ms) { if (frame->RenderTime() == -1) { frame->SetRenderTime(timing_->RenderTimeMs(frame->Timestamp(), now_ms)); } - wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms); + bool too_many_frames_queued = + frames_.size() > zero_playout_delay_max_decode_queue_size_ ? true + : false; + wait_ms = timing_->MaxWaitingTime(frame->RenderTime(), now_ms, + too_many_frames_queued); // This will cause the frame buffer to prefer high framerate rather // than high resolution in the case of the decoder not decoding fast @@ -251,11 +271,11 @@ EncodedFrame* FrameBuffer::GetNextFrame() { // Remove decoded frame and all undecoded frames before it. if (stats_callback_) { - unsigned int dropped_frames = std::count_if( - frames_.begin(), frame_it, - [](const std::pair& frame) { - return frame.second.frame != nullptr; - }); + unsigned int dropped_frames = + std::count_if(frames_.begin(), frame_it, + [](const std::pair& frame) { + return frame.second.frame != nullptr; + }); if (dropped_frames > 0) { stats_callback_->OnDroppedFrames(dropped_frames); } @@ -329,19 +349,13 @@ bool FrameBuffer::HasBadRenderTiming(const EncodedFrame& frame, void FrameBuffer::SetProtectionMode(VCMVideoProtection mode) { TRACE_EVENT0("webrtc", "FrameBuffer::SetProtectionMode"); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); protection_mode_ = mode; } -void FrameBuffer::Start() { - TRACE_EVENT0("webrtc", "FrameBuffer::Start"); - rtc::CritScope lock(&crit_); - stopped_ = false; -} - void FrameBuffer::Stop() { TRACE_EVENT0("webrtc", "FrameBuffer::Stop"); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (stopped_) return; stopped_ = true; @@ -350,18 +364,23 @@ void FrameBuffer::Stop() { } void FrameBuffer::Clear() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ClearFramesAndHistory(); } +int FrameBuffer::Size() { + MutexLock lock(&mutex_); + return frames_.size(); +} + void FrameBuffer::UpdateRtt(int64_t rtt_ms) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); jitter_estimator_.UpdateRtt(rtt_ms); } bool FrameBuffer::ValidReferences(const EncodedFrame& frame) const { for (size_t i = 0; i < frame.num_references; ++i) { - if (frame.references[i] >= frame.id.picture_id) + if (frame.references[i] >= frame.Id()) return false; for (size_t j = i + 1; j < frame.num_references; ++j) { @@ -370,9 +389,6 @@ bool FrameBuffer::ValidReferences(const EncodedFrame& frame) const { } } - if (frame.inter_layer_predicted && frame.id.spatial_layer == 0) - return false; - return true; } @@ -384,136 +400,82 @@ void FrameBuffer::CancelCallback() { callback_checker_.Detach(); } -bool FrameBuffer::IsCompleteSuperFrame(const EncodedFrame& frame) { - if (frame.inter_layer_predicted) { - // Check that all previous spatial layers are already inserted. - VideoLayerFrameId id = frame.id; - RTC_DCHECK_GT(id.spatial_layer, 0); - --id.spatial_layer; - FrameMap::iterator prev_frame = frames_.find(id); - if (prev_frame == frames_.end() || !prev_frame->second.frame) - return false; - while (prev_frame->second.frame->inter_layer_predicted) { - if (prev_frame == frames_.begin()) - return false; - --prev_frame; - --id.spatial_layer; - if (!prev_frame->second.frame || - prev_frame->first.picture_id != id.picture_id || - prev_frame->first.spatial_layer != id.spatial_layer) { - return false; - } - } - } - - if (!frame.is_last_spatial_layer) { - // Check that all following spatial layers are already inserted. - VideoLayerFrameId id = frame.id; - ++id.spatial_layer; - FrameMap::iterator next_frame = frames_.find(id); - if (next_frame == frames_.end() || !next_frame->second.frame) - return false; - while (!next_frame->second.frame->is_last_spatial_layer) { - ++next_frame; - ++id.spatial_layer; - if (next_frame == frames_.end() || !next_frame->second.frame || - next_frame->first.picture_id != id.picture_id || - next_frame->first.spatial_layer != id.spatial_layer) { - return false; - } - } - } - - return true; -} - int64_t FrameBuffer::InsertFrame(std::unique_ptr frame) { TRACE_EVENT0("webrtc", "FrameBuffer::InsertFrame"); RTC_DCHECK(frame); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); - const VideoLayerFrameId& id = frame->id; - int64_t last_continuous_picture_id = - !last_continuous_frame_ ? -1 : last_continuous_frame_->picture_id; + int64_t last_continuous_frame_id = last_continuous_frame_.value_or(-1); if (!ValidReferences(*frame)) { - RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" - << id.picture_id << ":" - << static_cast(id.spatial_layer) - << ") has invalid frame references, dropping frame."; - return last_continuous_picture_id; + RTC_LOG(LS_WARNING) << "Frame " << frame->Id() + << " has invalid frame references, dropping frame."; + return last_continuous_frame_id; } if (frames_.size() >= kMaxFramesBuffered) { if (frame->is_keyframe()) { - RTC_LOG(LS_WARNING) << "Inserting keyframe (picture_id:spatial_id) (" - << id.picture_id << ":" - << static_cast(id.spatial_layer) - << ") but buffer is full, clearing" + RTC_LOG(LS_WARNING) << "Inserting keyframe " << frame->Id() + << " but buffer is full, clearing" " buffer and inserting the frame."; ClearFramesAndHistory(); } else { - RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" - << id.picture_id << ":" - << static_cast(id.spatial_layer) - << ") could not be inserted due to the frame " + RTC_LOG(LS_WARNING) << "Frame " << frame->Id() + << " could not be inserted due to the frame " "buffer being full, dropping frame."; - return last_continuous_picture_id; + return last_continuous_frame_id; } } auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId(); auto last_decoded_frame_timestamp = decoded_frames_history_.GetLastDecodedFrameTimestamp(); - if (last_decoded_frame && id <= *last_decoded_frame) { + if (last_decoded_frame && frame->Id() <= *last_decoded_frame) { if (AheadOf(frame->Timestamp(), *last_decoded_frame_timestamp) && frame->is_keyframe()) { - // If this frame has a newer timestamp but an earlier picture id then we - // assume there has been a jump in the picture id due to some encoder + // If this frame has a newer timestamp but an earlier frame id then we + // assume there has been a jump in the frame id due to some encoder // reconfiguration or some other reason. Even though this is not according // to spec we can still continue to decode from this frame if it is a // keyframe. RTC_LOG(LS_WARNING) - << "A jump in picture id was detected, clearing buffer."; + << "A jump in frame id was detected, clearing buffer."; ClearFramesAndHistory(); - last_continuous_picture_id = -1; + last_continuous_frame_id = -1; } else { - RTC_LOG(LS_WARNING) << "Frame with (picture_id:spatial_id) (" - << id.picture_id << ":" - << static_cast(id.spatial_layer) - << ") inserted after frame (" - << last_decoded_frame->picture_id << ":" - << static_cast(last_decoded_frame->spatial_layer) - << ") was handed off for decoding, dropping frame."; - return last_continuous_picture_id; + RTC_LOG(LS_WARNING) << "Frame " << frame->Id() << " inserted after frame " + << *last_decoded_frame + << " was handed off for decoding, dropping frame."; + return last_continuous_frame_id; } } // Test if inserting this frame would cause the order of the frames to become // ambiguous (covering more than half the interval of 2^16). This can happen - // when the picture id make large jumps mid stream. - if (!frames_.empty() && id < frames_.begin()->first && - frames_.rbegin()->first < id) { - RTC_LOG(LS_WARNING) - << "A jump in picture id was detected, clearing buffer."; + // when the frame id make large jumps mid stream. + if (!frames_.empty() && frame->Id() < frames_.begin()->first && + frames_.rbegin()->first < frame->Id()) { + RTC_LOG(LS_WARNING) << "A jump in frame id was detected, clearing buffer."; ClearFramesAndHistory(); - last_continuous_picture_id = -1; + last_continuous_frame_id = -1; } - auto info = frames_.emplace(id, FrameInfo()).first; + auto info = frames_.emplace(frame->Id(), FrameInfo()).first; if (info->second.frame) { - return last_continuous_picture_id; + return last_continuous_frame_id; } if (!UpdateFrameInfoWithIncomingFrame(*frame, info)) - return last_continuous_picture_id; + return last_continuous_frame_id; if (!frame->delayed_by_retransmission()) timing_->IncomingTimestamp(frame->Timestamp(), frame->ReceivedTime()); - if (stats_callback_ && IsCompleteSuperFrame(*frame)) { + // It can happen that a frame will be reported as fully received even if a + // lower spatial layer frame is missing. + if (stats_callback_ && frame->is_last_spatial_layer) { stats_callback_->OnCompleteFrame(frame->is_keyframe(), frame->size(), frame->contentType()); } @@ -523,13 +485,13 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr frame) { if (info->second.num_missing_continuous == 0) { info->second.continuous = true; PropagateContinuity(info); - last_continuous_picture_id = last_continuous_frame_->picture_id; + last_continuous_frame_id = *last_continuous_frame_; // Since we now have new continuous frames there might be a better frame // to return from NextFrame. if (callback_queue_) { callback_queue_->PostTask([this] { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (!callback_task_.Running()) return; RTC_CHECK(frame_handler_); @@ -539,7 +501,7 @@ int64_t FrameBuffer::InsertFrame(std::unique_ptr frame) { } } - return last_continuous_picture_id; + return last_continuous_frame_id; } void FrameBuffer::PropagateContinuity(FrameMap::iterator start) { @@ -592,8 +554,6 @@ void FrameBuffer::PropagateDecodability(const FrameInfo& info) { bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame, FrameMap::iterator info) { TRACE_EVENT0("webrtc", "FrameBuffer::UpdateFrameInfoWithIncomingFrame"); - const VideoLayerFrameId& id = frame.id; - auto last_decoded_frame = decoded_frames_history_.GetLastDecodedFrameId(); RTC_DCHECK(!last_decoded_frame || *last_decoded_frame < info->first); @@ -606,52 +566,34 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame, // so that |num_missing_continuous| and |num_missing_decodable| can be // decremented as frames become continuous/are decoded. struct Dependency { - VideoLayerFrameId id; + int64_t frame_id; bool continuous; }; std::vector not_yet_fulfilled_dependencies; // Find all dependencies that have not yet been fulfilled. for (size_t i = 0; i < frame.num_references; ++i) { - VideoLayerFrameId ref_key(frame.references[i], frame.id.spatial_layer); // Does |frame| depend on a frame earlier than the last decoded one? - if (last_decoded_frame && ref_key <= *last_decoded_frame) { + if (last_decoded_frame && frame.references[i] <= *last_decoded_frame) { // Was that frame decoded? If not, this |frame| will never become // decodable. - if (!decoded_frames_history_.WasDecoded(ref_key)) { + if (!decoded_frames_history_.WasDecoded(frame.references[i])) { int64_t now_ms = clock_->TimeInMilliseconds(); if (last_log_non_decoded_ms_ + kLogNonDecodedIntervalMs < now_ms) { RTC_LOG(LS_WARNING) - << "Frame with (picture_id:spatial_id) (" << id.picture_id << ":" - << static_cast(id.spatial_layer) - << ") depends on a non-decoded frame more previous than" - " the last decoded frame, dropping frame."; + << "Frame " << frame.Id() + << " depends on a non-decoded frame more previous than the last " + "decoded frame, dropping frame."; last_log_non_decoded_ms_ = now_ms; } return false; } } else { - auto ref_info = frames_.find(ref_key); + auto ref_info = frames_.find(frame.references[i]); bool ref_continuous = ref_info != frames_.end() && ref_info->second.continuous; - not_yet_fulfilled_dependencies.push_back({ref_key, ref_continuous}); - } - } - - // Does |frame| depend on the lower spatial layer? - if (frame.inter_layer_predicted) { - VideoLayerFrameId ref_key(frame.id.picture_id, frame.id.spatial_layer - 1); - auto ref_info = frames_.find(ref_key); - - bool lower_layer_decoded = - last_decoded_frame && *last_decoded_frame == ref_key; - bool lower_layer_continuous = - lower_layer_decoded || - (ref_info != frames_.end() && ref_info->second.continuous); - - if (!lower_layer_continuous || !lower_layer_decoded) { not_yet_fulfilled_dependencies.push_back( - {ref_key, lower_layer_continuous}); + {frame.references[i], ref_continuous}); } } @@ -662,7 +604,7 @@ bool FrameBuffer::UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame, if (dep.continuous) --info->second.num_missing_continuous; - frames_[dep.id].dependent_frames.push_back(id); + frames_[dep.frame_id].dependent_frames.push_back(frame.Id()); } return true; @@ -698,11 +640,11 @@ void FrameBuffer::UpdateTimingFrameInfo() { void FrameBuffer::ClearFramesAndHistory() { TRACE_EVENT0("webrtc", "FrameBuffer::ClearFramesAndHistory"); if (stats_callback_) { - unsigned int dropped_frames = std::count_if( - frames_.begin(), frames_.end(), - [](const std::pair& frame) { - return frame.second.frame != nullptr; - }); + unsigned int dropped_frames = + std::count_if(frames_.begin(), frames_.end(), + [](const std::pair& frame) { + return frame.second.frame != nullptr; + }); if (dropped_frames > 0) { stats_callback_->OnDroppedFrames(dropped_frames); } @@ -726,15 +668,14 @@ EncodedFrame* FrameBuffer::CombineAndDeleteFrames( } auto encoded_image_buffer = EncodedImageBuffer::Create(total_length); uint8_t* buffer = encoded_image_buffer->data(); - first_frame->SetSpatialLayerFrameSize(first_frame->id.spatial_layer, + first_frame->SetSpatialLayerFrameSize(first_frame->SpatialIndex().value_or(0), first_frame->size()); memcpy(buffer, first_frame->data(), first_frame->size()); buffer += first_frame->size(); // Spatial index of combined frame is set equal to spatial index of its top // spatial layer. - first_frame->SetSpatialIndex(last_frame->id.spatial_layer); - first_frame->id.spatial_layer = last_frame->id.spatial_layer; + first_frame->SetSpatialIndex(last_frame->SpatialIndex().value_or(0)); first_frame->video_timing_mutable()->network2_timestamp_ms = last_frame->video_timing().network2_timestamp_ms; @@ -744,8 +685,8 @@ EncodedFrame* FrameBuffer::CombineAndDeleteFrames( // Append all remaining frames to the first one. for (size_t i = 1; i < frames.size(); ++i) { EncodedFrame* next_frame = frames[i]; - first_frame->SetSpatialLayerFrameSize(next_frame->id.spatial_layer, - next_frame->size()); + first_frame->SetSpatialLayerFrameSize( + next_frame->SpatialIndex().value_or(0), next_frame->size()); memcpy(buffer, next_frame->data(), next_frame->size()); buffer += next_frame->size(); delete next_frame; diff --git a/modules/video_coding/frame_buffer2.h b/modules/video_coding/frame_buffer2.h index d824ddf4d0..c7d8fcd403 100644 --- a/modules/video_coding/frame_buffer2.h +++ b/modules/video_coding/frame_buffer2.h @@ -18,17 +18,18 @@ #include #include "absl/container/inlined_vector.h" +#include "api/sequence_checker.h" #include "api/video/encoded_frame.h" #include "modules/video_coding/include/video_coding_defines.h" #include "modules/video_coding/inter_frame_delay.h" #include "modules/video_coding/jitter_estimator.h" #include "modules/video_coding/utility/decoded_frames_history.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" +#include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/experiments/rtt_mult_experiment.h" #include "rtc_base/numerics/sequence_number_util.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" @@ -50,11 +51,14 @@ class FrameBuffer { VCMTiming* timing, VCMReceiveStatisticsCallback* stats_callback); + FrameBuffer() = delete; + FrameBuffer(const FrameBuffer&) = delete; + FrameBuffer& operator=(const FrameBuffer&) = delete; + virtual ~FrameBuffer(); // Insert a frame into the frame buffer. Returns the picture id // of the last continuous frame or -1 if there is no continuous frame. - // TODO(philipel): Return a VideoLayerFrameId and not only the picture id. int64_t InsertFrame(std::unique_ptr frame); // Get the next frame for decoding. Will return at latest after @@ -71,10 +75,6 @@ class FrameBuffer { // implemented. void SetProtectionMode(VCMVideoProtection mode); - // Start the frame buffer, has no effect if the frame buffer is started. - // The frame buffer is started upon construction. - void Start(); - // Stop the frame buffer, causing any sleeping thread in NextFrame to // return immediately. void Stop(); @@ -85,6 +85,8 @@ class FrameBuffer { // Clears the FrameBuffer, removing all the buffered frames. void Clear(); + int Size(); + private: struct FrameInfo { FrameInfo(); @@ -93,7 +95,7 @@ class FrameBuffer { // Which other frames that have direct unfulfilled dependencies // on this frame. - absl::InlinedVector dependent_frames; + absl::InlinedVector dependent_frames; // A frame is continiuous if it has all its referenced/indirectly // referenced frames. @@ -113,45 +115,41 @@ class FrameBuffer { std::unique_ptr frame; }; - using FrameMap = std::map; + using FrameMap = std::map; // Check that the references of |frame| are valid. bool ValidReferences(const EncodedFrame& frame) const; - int64_t FindNextFrame(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - EncodedFrame* GetNextFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + int64_t FindNextFrame(int64_t now_ms) RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + EncodedFrame* GetNextFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void StartWaitForNextFrameOnQueue() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void CancelCallback() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Update all directly dependent and indirectly dependent frames and mark // them as continuous if all their references has been fulfilled. void PropagateContinuity(FrameMap::iterator start) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Marks the frame as decoded and updates all directly dependent frames. void PropagateDecodability(const FrameInfo& info) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Update the corresponding FrameInfo of |frame| and all FrameInfos that // |frame| references. // Return false if |frame| will never be decodable, true otherwise. bool UpdateFrameInfoWithIncomingFrame(const EncodedFrame& frame, FrameMap::iterator info) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - - void UpdateJitterDelay() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void UpdateTimingFrameInfo() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void UpdateJitterDelay() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void UpdateTimingFrameInfo() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - // Checks if the superframe, which current frame belongs to, is complete. - bool IsCompleteSuperFrame(const EncodedFrame& frame) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void ClearFramesAndHistory() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); bool HasBadRenderTiming(const EncodedFrame& frame, int64_t now_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // The cleaner solution would be to have the NextFrame function return a // vector of frames, but until the decoding pipeline can support decoding @@ -160,40 +158,44 @@ class FrameBuffer { EncodedFrame* CombineAndDeleteFrames( const std::vector& frames) const; - SequenceChecker construction_checker_; - SequenceChecker callback_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker construction_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker callback_checker_; // Stores only undecoded frames. - FrameMap frames_ RTC_GUARDED_BY(crit_); - DecodedFramesHistory decoded_frames_history_ RTC_GUARDED_BY(crit_); + FrameMap frames_ RTC_GUARDED_BY(mutex_); + DecodedFramesHistory decoded_frames_history_ RTC_GUARDED_BY(mutex_); - rtc::CriticalSection crit_; + Mutex mutex_; Clock* const clock_; - rtc::TaskQueue* callback_queue_ RTC_GUARDED_BY(crit_); - RepeatingTaskHandle callback_task_ RTC_GUARDED_BY(crit_); + rtc::TaskQueue* callback_queue_ RTC_GUARDED_BY(mutex_); + RepeatingTaskHandle callback_task_ RTC_GUARDED_BY(mutex_); std::function, ReturnReason)> - frame_handler_ RTC_GUARDED_BY(crit_); - int64_t latest_return_time_ms_ RTC_GUARDED_BY(crit_); - bool keyframe_required_ RTC_GUARDED_BY(crit_); - - VCMJitterEstimator jitter_estimator_ RTC_GUARDED_BY(crit_); - VCMTiming* const timing_ RTC_GUARDED_BY(crit_); - VCMInterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(crit_); - absl::optional last_continuous_frame_ - RTC_GUARDED_BY(crit_); - std::vector frames_to_decode_ RTC_GUARDED_BY(crit_); - bool stopped_ RTC_GUARDED_BY(crit_); - VCMVideoProtection protection_mode_ RTC_GUARDED_BY(crit_); + frame_handler_ RTC_GUARDED_BY(mutex_); + int64_t latest_return_time_ms_ RTC_GUARDED_BY(mutex_); + bool keyframe_required_ RTC_GUARDED_BY(mutex_); + + VCMJitterEstimator jitter_estimator_ RTC_GUARDED_BY(mutex_); + VCMTiming* const timing_ RTC_GUARDED_BY(mutex_); + VCMInterFrameDelay inter_frame_delay_ RTC_GUARDED_BY(mutex_); + absl::optional last_continuous_frame_ RTC_GUARDED_BY(mutex_); + std::vector frames_to_decode_ RTC_GUARDED_BY(mutex_); + bool stopped_ RTC_GUARDED_BY(mutex_); + VCMVideoProtection protection_mode_ RTC_GUARDED_BY(mutex_); VCMReceiveStatisticsCallback* const stats_callback_; - int64_t last_log_non_decoded_ms_ RTC_GUARDED_BY(crit_); + int64_t last_log_non_decoded_ms_ RTC_GUARDED_BY(mutex_); const bool add_rtt_to_playout_delay_; // rtt_mult experiment settings. const absl::optional rtt_mult_settings_; - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(FrameBuffer); + // Maximum number of frames in the decode queue to allow pacing. If the + // queue grows beyond the max limit, pacing will be disabled and frames will + // be pushed to the decoder as soon as possible. This only has an effect + // when the low-latency rendering path is active, which is indicated by + // the frame's render time == 0. + FieldTrialParameter zero_playout_delay_max_decode_queue_size_; }; } // namespace video_coding diff --git a/modules/video_coding/frame_buffer2_unittest.cc b/modules/video_coding/frame_buffer2_unittest.cc index b4d663ee06..f2a0589411 100644 --- a/modules/video_coding/frame_buffer2_unittest.cc +++ b/modules/video_coding/frame_buffer2_unittest.cc @@ -56,7 +56,8 @@ class VCMTimingFake : public VCMTiming { } int64_t MaxWaitingTime(int64_t render_time_ms, - int64_t now_ms) const override { + int64_t now_ms, + bool too_many_frames_queued) const override { return render_time_ms - now_ms - kDecodeTime; } @@ -108,21 +109,26 @@ class FrameObjectFake : public EncodedFrame { class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback { public: - MOCK_METHOD3(OnCompleteFrame, - void(bool is_keyframe, - size_t size_bytes, - VideoContentType content_type)); - MOCK_METHOD1(OnDroppedFrames, void(uint32_t frames_dropped)); - MOCK_METHOD1(OnDiscardedPacketsUpdated, void(int discarded_packets)); - MOCK_METHOD1(OnFrameCountsUpdated, void(const FrameCounts& frame_counts)); - MOCK_METHOD6(OnFrameBufferTimingsUpdated, - void(int max_decode_ms, - int current_delay_ms, - int target_delay_ms, - int jitter_buffer_ms, - int min_playout_delay_ms, - int render_delay_ms)); - MOCK_METHOD1(OnTimingFrameInfoUpdated, void(const TimingFrameInfo& info)); + MOCK_METHOD(void, + OnCompleteFrame, + (bool is_keyframe, + size_t size_bytes, + VideoContentType content_type), + (override)); + MOCK_METHOD(void, OnDroppedFrames, (uint32_t frames_dropped), (override)); + MOCK_METHOD(void, + OnFrameBufferTimingsUpdated, + (int max_decode_ms, + int current_delay_ms, + int target_delay_ms, + int jitter_buffer_ms, + int min_playout_delay_ms, + int render_delay_ms), + (override)); + MOCK_METHOD(void, + OnTimingFrameInfoUpdated, + (const TimingFrameInfo& info), + (override)); }; class TestFrameBuffer2 : public ::testing::Test { @@ -150,7 +156,6 @@ class TestFrameBuffer2 : public ::testing::Test { std::unique_ptr CreateFrame(uint16_t picture_id, uint8_t spatial_layer, int64_t ts_ms, - bool inter_layer_predicted, bool last_spatial_layer, size_t frame_size_bytes, T... refs) { @@ -160,12 +165,10 @@ class TestFrameBuffer2 : public ::testing::Test { {rtc::checked_cast(refs)...}}; auto frame = std::make_unique(); - frame->id.picture_id = picture_id; - frame->id.spatial_layer = spatial_layer; + frame->SetId(picture_id); frame->SetSpatialIndex(spatial_layer); frame->SetTimestamp(ts_ms * 90); frame->num_references = references.size(); - frame->inter_layer_predicted = inter_layer_predicted; frame->is_last_spatial_layer = last_spatial_layer; // Add some data to buffer. frame->SetEncodedData(EncodedImageBuffer::Create(frame_size_bytes)); @@ -178,18 +181,17 @@ class TestFrameBuffer2 : public ::testing::Test { int InsertFrame(uint16_t picture_id, uint8_t spatial_layer, int64_t ts_ms, - bool inter_layer_predicted, bool last_spatial_layer, size_t frame_size_bytes, T... refs) { - return buffer_->InsertFrame( - CreateFrame(picture_id, spatial_layer, ts_ms, inter_layer_predicted, - last_spatial_layer, frame_size_bytes, refs...)); + return buffer_->InsertFrame(CreateFrame(picture_id, spatial_layer, ts_ms, + last_spatial_layer, + frame_size_bytes, refs...)); } int InsertNackedFrame(uint16_t picture_id, int64_t ts_ms) { std::unique_ptr frame = - CreateFrame(picture_id, 0, ts_ms, false, true, kFrameSize); + CreateFrame(picture_id, 0, ts_ms, true, kFrameSize); frame->set_delayed_by_retransmission(true); return buffer_->InsertFrame(std::move(frame)); } @@ -198,7 +200,7 @@ class TestFrameBuffer2 : public ::testing::Test { time_task_queue_.PostTask([this, max_wait_time, keyframe_required]() { buffer_->NextFrame( max_wait_time, keyframe_required, &time_task_queue_, - [this](std::unique_ptr frame, + [this](std::unique_ptr frame, video_coding::FrameBuffer::ReturnReason reason) { if (reason != FrameBuffer::ReturnReason::kStopped) { frames_.emplace_back(std::move(frame)); @@ -213,8 +215,8 @@ class TestFrameBuffer2 : public ::testing::Test { void CheckFrame(size_t index, int picture_id, int spatial_layer) { ASSERT_LT(index, frames_.size()); ASSERT_TRUE(frames_[index]); - ASSERT_EQ(picture_id, frames_[index]->id.picture_id); - ASSERT_EQ(spatial_layer, frames_[index]->id.spatial_layer); + ASSERT_EQ(picture_id, frames_[index]->Id()); + ASSERT_EQ(spatial_layer, frames_[index]->SpatialIndex().value_or(0)); } void CheckFrameSize(size_t index, size_t size) { @@ -255,7 +257,7 @@ TEST_F(TestFrameBuffer2, WaitForFrame) { uint32_t ts = Rand(); ExtractFrame(50); - InsertFrame(pid, 0, ts, false, true, kFrameSize); + InsertFrame(pid, 0, ts, true, kFrameSize); time_controller_.AdvanceTime(TimeDelta::Millis(50)); CheckFrame(0, pid, 0); } @@ -264,8 +266,8 @@ TEST_F(TestFrameBuffer2, OneSuperFrame) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, false, kFrameSize); - InsertFrame(pid, 1, ts, true, true, kFrameSize); + InsertFrame(pid, 0, ts, false, kFrameSize); + InsertFrame(pid + 1, 1, ts, true, kFrameSize); ExtractFrame(); CheckFrame(0, pid, 1); @@ -275,9 +277,9 @@ TEST_F(TestFrameBuffer2, ZeroPlayoutDelay) { VCMTiming timing(time_controller_.GetClock()); buffer_.reset( new FrameBuffer(time_controller_.GetClock(), &timing, &stats_callback_)); - const PlayoutDelay kPlayoutDelayMs = {0, 0}; + const VideoPlayoutDelay kPlayoutDelayMs = {0, 0}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); buffer_->InsertFrame(std::move(test_frame)); ExtractFrame(0, false); @@ -291,8 +293,8 @@ TEST_F(TestFrameBuffer2, DISABLED_OneUnorderedSuperFrame) { uint32_t ts = Rand(); ExtractFrame(50); - InsertFrame(pid, 1, ts, true, true, kFrameSize); - InsertFrame(pid, 0, ts, false, false, kFrameSize); + InsertFrame(pid, 1, ts, true, kFrameSize); + InsertFrame(pid, 0, ts, false, kFrameSize); time_controller_.AdvanceTime(TimeDelta::Millis(0)); CheckFrame(0, pid, 0); @@ -308,11 +310,10 @@ TEST_F(TestFrameBuffer2, DISABLED_OneLayerStreamReordered) { CheckFrame(0, pid, 0); for (int i = 1; i < 10; i += 2) { ExtractFrame(50); - InsertFrame(pid + i + 1, 0, ts + (i + 1) * kFps10, false, true, kFrameSize, + InsertFrame(pid + i + 1, 0, ts + (i + 1) * kFps10, true, kFrameSize, pid + i); time_controller_.AdvanceTime(TimeDelta::Millis(kFps10)); - InsertFrame(pid + i, 0, ts + i * kFps10, false, true, kFrameSize, - pid + i - 1); + InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1); time_controller_.AdvanceTime(TimeDelta::Millis(kFps10)); ExtractFrame(); CheckFrame(i, pid + i, 0); @@ -329,9 +330,9 @@ TEST_F(TestFrameBuffer2, MissingFrame) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, true, kFrameSize); - InsertFrame(pid + 2, 0, ts, false, true, kFrameSize, pid); - InsertFrame(pid + 3, 0, ts, false, true, kFrameSize, pid + 1, pid + 2); + InsertFrame(pid, 0, ts, true, kFrameSize); + InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid); + InsertFrame(pid + 3, 0, ts, true, kFrameSize, pid + 1, pid + 2); ExtractFrame(); ExtractFrame(); ExtractFrame(); @@ -345,12 +346,11 @@ TEST_F(TestFrameBuffer2, OneLayerStream) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, true, kFrameSize); + InsertFrame(pid, 0, ts, true, kFrameSize); ExtractFrame(); CheckFrame(0, pid, 0); for (int i = 1; i < 10; ++i) { - InsertFrame(pid + i, 0, ts + i * kFps10, false, true, kFrameSize, - pid + i - 1); + InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1); ExtractFrame(); time_controller_.AdvanceTime(TimeDelta::Millis(kFps10)); CheckFrame(i, pid + i, 0); @@ -361,13 +361,13 @@ TEST_F(TestFrameBuffer2, DropTemporalLayerSlowDecoder) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, true, kFrameSize); - InsertFrame(pid + 1, 0, ts + kFps20, false, true, kFrameSize, pid); + InsertFrame(pid, 0, ts, true, kFrameSize); + InsertFrame(pid + 1, 0, ts + kFps20, true, kFrameSize, pid); for (int i = 2; i < 10; i += 2) { uint32_t ts_tl0 = ts + i / 2 * kFps10; - InsertFrame(pid + i, 0, ts_tl0, false, true, kFrameSize, pid + i - 2); - InsertFrame(pid + i + 1, 0, ts_tl0 + kFps20, false, true, kFrameSize, - pid + i, pid + i - 1); + InsertFrame(pid + i, 0, ts_tl0, true, kFrameSize, pid + i - 2); + InsertFrame(pid + i + 1, 0, ts_tl0 + kFps20, true, kFrameSize, pid + i, + pid + i - 1); } EXPECT_CALL(stats_callback_, OnDroppedFrames(1)).Times(3); @@ -393,10 +393,10 @@ TEST_F(TestFrameBuffer2, DropFramesIfSystemIsStalled) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, true, kFrameSize); - InsertFrame(pid + 1, 0, ts + 1 * kFps10, false, true, kFrameSize, pid); - InsertFrame(pid + 2, 0, ts + 2 * kFps10, false, true, kFrameSize, pid + 1); - InsertFrame(pid + 3, 0, ts + 3 * kFps10, false, true, kFrameSize); + InsertFrame(pid, 0, ts, true, kFrameSize); + InsertFrame(pid + 1, 0, ts + 1 * kFps10, true, kFrameSize, pid); + InsertFrame(pid + 2, 0, ts + 2 * kFps10, true, kFrameSize, pid + 1); + InsertFrame(pid + 3, 0, ts + 3 * kFps10, true, kFrameSize); ExtractFrame(); // Jump forward in time, simulating the system being stalled for some reason. @@ -413,10 +413,9 @@ TEST_F(TestFrameBuffer2, DroppedFramesCountedOnClear) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, true, kFrameSize); + InsertFrame(pid, 0, ts, true, kFrameSize); for (int i = 1; i < 5; ++i) { - InsertFrame(pid + i, 0, ts + i * kFps10, false, true, kFrameSize, - pid + i - 1); + InsertFrame(pid + i, 0, ts + i * kFps10, true, kFrameSize, pid + i - 1); } // All frames should be dropped when Clear is called. @@ -428,11 +427,11 @@ TEST_F(TestFrameBuffer2, InsertLateFrame) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, true, kFrameSize); + InsertFrame(pid, 0, ts, true, kFrameSize); ExtractFrame(); - InsertFrame(pid + 2, 0, ts, false, true, kFrameSize); + InsertFrame(pid + 2, 0, ts, true, kFrameSize); ExtractFrame(); - InsertFrame(pid + 1, 0, ts, false, true, kFrameSize, pid); + InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid); ExtractFrame(); CheckFrame(0, pid, 0); @@ -451,7 +450,7 @@ TEST_F(TestFrameBuffer2, ProtectionModeNackFEC) { InsertNackedFrame(pid, ts); InsertNackedFrame(pid + 1, ts + 100); InsertNackedFrame(pid + 2, ts + 200); - InsertFrame(pid + 3, 0, ts + 300, false, true, kFrameSize); + InsertFrame(pid + 3, 0, ts + 300, true, kFrameSize); ExtractFrame(); ExtractFrame(); ExtractFrame(); @@ -472,7 +471,7 @@ TEST_F(TestFrameBuffer2, ProtectionModeNack) { InsertNackedFrame(pid, ts); InsertNackedFrame(pid + 1, ts + 100); InsertNackedFrame(pid + 2, ts + 200); - InsertFrame(pid + 3, 0, ts + 300, false, true, kFrameSize); + InsertFrame(pid + 3, 0, ts + 300, true, kFrameSize); ExtractFrame(); ExtractFrame(); ExtractFrame(); @@ -486,51 +485,48 @@ TEST_F(TestFrameBuffer2, NoContinuousFrame) { uint16_t pid = Rand(); uint32_t ts = Rand(); - EXPECT_EQ(-1, InsertFrame(pid + 1, 0, ts, false, true, kFrameSize, pid)); + EXPECT_EQ(-1, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid)); } TEST_F(TestFrameBuffer2, LastContinuousFrameSingleLayer) { uint16_t pid = Rand(); uint32_t ts = Rand(); - EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, true, kFrameSize)); - EXPECT_EQ(pid, InsertFrame(pid + 2, 0, ts, false, true, kFrameSize, pid + 1)); - EXPECT_EQ(pid + 2, InsertFrame(pid + 1, 0, ts, false, true, kFrameSize, pid)); - EXPECT_EQ(pid + 2, - InsertFrame(pid + 4, 0, ts, false, true, kFrameSize, pid + 3)); - EXPECT_EQ(pid + 5, InsertFrame(pid + 5, 0, ts, false, true, kFrameSize)); + EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize)); + EXPECT_EQ(pid, InsertFrame(pid + 2, 0, ts, true, kFrameSize, pid + 1)); + EXPECT_EQ(pid + 2, InsertFrame(pid + 1, 0, ts, true, kFrameSize, pid)); + EXPECT_EQ(pid + 2, InsertFrame(pid + 4, 0, ts, true, kFrameSize, pid + 3)); + EXPECT_EQ(pid + 5, InsertFrame(pid + 5, 0, ts, true, kFrameSize)); } TEST_F(TestFrameBuffer2, LastContinuousFrameTwoLayers) { uint16_t pid = Rand(); uint32_t ts = Rand(); - EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, false, kFrameSize)); - EXPECT_EQ(pid, InsertFrame(pid, 1, ts, true, true, kFrameSize)); - EXPECT_EQ(pid, InsertFrame(pid + 1, 1, ts, true, true, kFrameSize, pid)); - EXPECT_EQ(pid, - InsertFrame(pid + 2, 0, ts, false, false, kFrameSize, pid + 1)); - EXPECT_EQ(pid, InsertFrame(pid + 2, 1, ts, true, true, kFrameSize, pid + 1)); - EXPECT_EQ(pid, - InsertFrame(pid + 3, 0, ts, false, false, kFrameSize, pid + 2)); - EXPECT_EQ(pid + 3, - InsertFrame(pid + 1, 0, ts, false, false, kFrameSize, pid)); - EXPECT_EQ(pid + 3, - InsertFrame(pid + 3, 1, ts, true, true, kFrameSize, pid + 2)); + EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, kFrameSize)); + EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 1, ts, true, kFrameSize)); + EXPECT_EQ(pid + 1, + InsertFrame(pid + 3, 1, ts, true, kFrameSize, pid + 1, pid + 2)); + EXPECT_EQ(pid + 1, InsertFrame(pid + 4, 0, ts, false, kFrameSize, pid + 2)); + EXPECT_EQ(pid + 1, + InsertFrame(pid + 5, 1, ts, true, kFrameSize, pid + 3, pid + 4)); + EXPECT_EQ(pid + 1, InsertFrame(pid + 6, 0, ts, false, kFrameSize, pid + 4)); + EXPECT_EQ(pid + 6, InsertFrame(pid + 2, 0, ts, false, kFrameSize, pid)); + EXPECT_EQ(pid + 7, + InsertFrame(pid + 7, 1, ts, true, kFrameSize, pid + 5, pid + 6)); } TEST_F(TestFrameBuffer2, PictureIdJumpBack) { uint16_t pid = Rand(); uint32_t ts = Rand(); - EXPECT_EQ(pid, InsertFrame(pid, 0, ts, false, true, kFrameSize)); - EXPECT_EQ(pid + 1, - InsertFrame(pid + 1, 0, ts + 1, false, true, kFrameSize, pid)); + EXPECT_EQ(pid, InsertFrame(pid, 0, ts, true, kFrameSize)); + EXPECT_EQ(pid + 1, InsertFrame(pid + 1, 0, ts + 1, true, kFrameSize, pid)); ExtractFrame(); CheckFrame(0, pid, 0); // Jump back in pid but increase ts. - EXPECT_EQ(pid - 1, InsertFrame(pid - 1, 0, ts + 2, false, true, kFrameSize)); + EXPECT_EQ(pid - 1, InsertFrame(pid - 1, 0, ts + 2, true, kFrameSize)); ExtractFrame(); ExtractFrame(); CheckFrame(1, pid - 1, 0); @@ -549,11 +545,9 @@ TEST_F(TestFrameBuffer2, StatsCallback) { { std::unique_ptr frame(new FrameObjectFake()); frame->SetEncodedData(EncodedImageBuffer::Create(kFrameSize)); - frame->id.picture_id = pid; - frame->id.spatial_layer = 0; + frame->SetId(pid); frame->SetTimestamp(ts); frame->num_references = 0; - frame->inter_layer_predicted = false; EXPECT_EQ(buffer_->InsertFrame(std::move(frame)), pid); } @@ -563,42 +557,42 @@ TEST_F(TestFrameBuffer2, StatsCallback) { } TEST_F(TestFrameBuffer2, ForwardJumps) { - EXPECT_EQ(5453, InsertFrame(5453, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(5453, InsertFrame(5453, 0, 1, true, kFrameSize)); ExtractFrame(); - EXPECT_EQ(5454, InsertFrame(5454, 0, 1, false, true, kFrameSize, 5453)); + EXPECT_EQ(5454, InsertFrame(5454, 0, 1, true, kFrameSize, 5453)); ExtractFrame(); - EXPECT_EQ(15670, InsertFrame(15670, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(15670, InsertFrame(15670, 0, 1, true, kFrameSize)); ExtractFrame(); - EXPECT_EQ(29804, InsertFrame(29804, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(29804, InsertFrame(29804, 0, 1, true, kFrameSize)); ExtractFrame(); - EXPECT_EQ(29805, InsertFrame(29805, 0, 1, false, true, kFrameSize, 29804)); + EXPECT_EQ(29805, InsertFrame(29805, 0, 1, true, kFrameSize, 29804)); ExtractFrame(); - EXPECT_EQ(29806, InsertFrame(29806, 0, 1, false, true, kFrameSize, 29805)); + EXPECT_EQ(29806, InsertFrame(29806, 0, 1, true, kFrameSize, 29805)); ExtractFrame(); - EXPECT_EQ(33819, InsertFrame(33819, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(33819, InsertFrame(33819, 0, 1, true, kFrameSize)); ExtractFrame(); - EXPECT_EQ(41248, InsertFrame(41248, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(41248, InsertFrame(41248, 0, 1, true, kFrameSize)); ExtractFrame(); } TEST_F(TestFrameBuffer2, DuplicateFrames) { - EXPECT_EQ(22256, InsertFrame(22256, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize)); ExtractFrame(); - EXPECT_EQ(22256, InsertFrame(22256, 0, 1, false, true, kFrameSize)); + EXPECT_EQ(22256, InsertFrame(22256, 0, 1, true, kFrameSize)); } // TODO(philipel): implement more unittests related to invalid references. TEST_F(TestFrameBuffer2, InvalidReferences) { - EXPECT_EQ(-1, InsertFrame(0, 0, 1000, false, true, kFrameSize, 2)); - EXPECT_EQ(1, InsertFrame(1, 0, 2000, false, true, kFrameSize)); + EXPECT_EQ(-1, InsertFrame(0, 0, 1000, true, kFrameSize, 2)); + EXPECT_EQ(1, InsertFrame(1, 0, 2000, true, kFrameSize)); ExtractFrame(); - EXPECT_EQ(2, InsertFrame(2, 0, 3000, false, true, kFrameSize, 1)); + EXPECT_EQ(2, InsertFrame(2, 0, 3000, true, kFrameSize, 1)); } TEST_F(TestFrameBuffer2, KeyframeRequired) { - EXPECT_EQ(1, InsertFrame(1, 0, 1000, false, true, kFrameSize)); - EXPECT_EQ(2, InsertFrame(2, 0, 2000, false, true, kFrameSize, 1)); - EXPECT_EQ(3, InsertFrame(3, 0, 3000, false, true, kFrameSize)); + EXPECT_EQ(1, InsertFrame(1, 0, 1000, true, kFrameSize)); + EXPECT_EQ(2, InsertFrame(2, 0, 2000, true, kFrameSize, 1)); + EXPECT_EQ(3, InsertFrame(3, 0, 3000, true, kFrameSize)); ExtractFrame(); ExtractFrame(0, true); ExtractFrame(); @@ -612,38 +606,38 @@ TEST_F(TestFrameBuffer2, KeyframeClearsFullBuffer) { const int kMaxBufferSize = 600; for (int i = 1; i <= kMaxBufferSize; ++i) - EXPECT_EQ(-1, InsertFrame(i, 0, i * 1000, false, true, kFrameSize, i - 1)); + EXPECT_EQ(-1, InsertFrame(i, 0, i * 1000, true, kFrameSize, i - 1)); ExtractFrame(); CheckNoFrame(0); EXPECT_EQ(kMaxBufferSize + 1, InsertFrame(kMaxBufferSize + 1, 0, (kMaxBufferSize + 1) * 1000, - false, true, kFrameSize)); + true, kFrameSize)); ExtractFrame(); CheckFrame(1, kMaxBufferSize + 1, 0); } TEST_F(TestFrameBuffer2, DontUpdateOnUndecodableFrame) { - InsertFrame(1, 0, 0, false, true, kFrameSize); + InsertFrame(1, 0, 0, true, kFrameSize); ExtractFrame(0, true); - InsertFrame(3, 0, 0, false, true, kFrameSize, 2, 0); - InsertFrame(3, 0, 0, false, true, kFrameSize, 0); - InsertFrame(2, 0, 0, false, true, kFrameSize); + InsertFrame(3, 0, 0, true, kFrameSize, 2, 0); + InsertFrame(3, 0, 0, true, kFrameSize, 0); + InsertFrame(2, 0, 0, true, kFrameSize); ExtractFrame(0, true); ExtractFrame(0, true); } TEST_F(TestFrameBuffer2, DontDecodeOlderTimestamp) { - InsertFrame(2, 0, 1, false, true, kFrameSize); - InsertFrame(1, 0, 2, false, true, + InsertFrame(2, 0, 1, true, kFrameSize); + InsertFrame(1, 0, 2, true, kFrameSize); // Older picture id but newer timestamp. ExtractFrame(0); ExtractFrame(0); CheckFrame(0, 1, 0); CheckNoFrame(1); - InsertFrame(3, 0, 4, false, true, kFrameSize); - InsertFrame(4, 0, 3, false, true, + InsertFrame(3, 0, 4, true, kFrameSize); + InsertFrame(4, 0, 3, true, kFrameSize); // Newer picture id but older timestamp. ExtractFrame(0); ExtractFrame(0); @@ -655,8 +649,8 @@ TEST_F(TestFrameBuffer2, CombineFramesToSuperframe) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, false, kFrameSize); - InsertFrame(pid, 1, ts, true, true, 2 * kFrameSize); + InsertFrame(pid, 0, ts, false, kFrameSize); + InsertFrame(pid + 1, 1, ts, true, 2 * kFrameSize, pid); ExtractFrame(0); ExtractFrame(0); CheckFrame(0, pid, 1); @@ -673,25 +667,25 @@ TEST_F(TestFrameBuffer2, HigherSpatialLayerNonDecodable) { uint16_t pid = Rand(); uint32_t ts = Rand(); - InsertFrame(pid, 0, ts, false, false, kFrameSize); - InsertFrame(pid, 1, ts, true, true, kFrameSize); + InsertFrame(pid, 0, ts, false, kFrameSize); + InsertFrame(pid + 1, 1, ts, true, kFrameSize, pid); ExtractFrame(0); CheckFrame(0, pid, 1); - InsertFrame(pid + 1, 1, ts + kFps20, false, true, kFrameSize, pid); - InsertFrame(pid + 2, 0, ts + kFps10, false, false, kFrameSize, pid); - InsertFrame(pid + 2, 1, ts + kFps10, true, true, kFrameSize, pid + 1); + InsertFrame(pid + 3, 1, ts + kFps20, true, kFrameSize, pid); + InsertFrame(pid + 4, 0, ts + kFps10, false, kFrameSize, pid); + InsertFrame(pid + 5, 1, ts + kFps10, true, kFrameSize, pid + 3, pid + 4); time_controller_.AdvanceTime(TimeDelta::Millis(1000)); - // Frame pid+1 is decodable but too late. - // In superframe pid+2 frame sid=0 is decodable, but frame sid=1 is not. - // Incorrect implementation might skip pid+1 frame and output undecodable - // pid+2 instead. + // Frame pid+3 is decodable but too late. + // In superframe pid+4 is decodable, but frame pid+5 is not. + // Incorrect implementation might skip pid+2 frame and output undecodable + // pid+5 instead. ExtractFrame(); ExtractFrame(); - CheckFrame(1, pid + 1, 1); - CheckFrame(2, pid + 2, 1); + CheckFrame(1, pid + 3, 1); + CheckFrame(2, pid + 4, 1); } } // namespace video_coding diff --git a/modules/video_coding/frame_dependencies_calculator.cc b/modules/video_coding/frame_dependencies_calculator.cc index 6de5081b94..7ca59f779a 100644 --- a/modules/video_coding/frame_dependencies_calculator.cc +++ b/modules/video_coding/frame_dependencies_calculator.cc @@ -17,14 +17,12 @@ #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "api/array_view.h" -#include "api/video/video_frame_type.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" namespace webrtc { absl::InlinedVector FrameDependenciesCalculator::FromBuffersUsage( - VideoFrameType frame_type, int64_t frame_id, rtc::ArrayView buffers_usage) { absl::InlinedVector dependencies; @@ -37,29 +35,28 @@ absl::InlinedVector FrameDependenciesCalculator::FromBuffersUsage( } std::set direct_depenendencies; std::set indirect_depenendencies; - if (frame_type == VideoFrameType::kVideoFrameDelta) { - for (const CodecBufferUsage& buffer_usage : buffers_usage) { - if (!buffer_usage.referenced) { - continue; - } - const BufferUsage& buffer = buffers_[buffer_usage.id]; - if (buffer.frame_id == absl::nullopt) { - RTC_LOG(LS_ERROR) << "Odd configuration: frame " << frame_id - << " references buffer #" << buffer_usage.id - << " that was never updated."; - continue; - } - direct_depenendencies.insert(*buffer.frame_id); - indirect_depenendencies.insert(buffer.dependencies.begin(), - buffer.dependencies.end()); + + for (const CodecBufferUsage& buffer_usage : buffers_usage) { + if (!buffer_usage.referenced) { + continue; + } + const BufferUsage& buffer = buffers_[buffer_usage.id]; + if (buffer.frame_id == absl::nullopt) { + RTC_LOG(LS_ERROR) << "Odd configuration: frame " << frame_id + << " references buffer #" << buffer_usage.id + << " that was never updated."; + continue; } - // Reduce references: if frame #3 depends on frame #2 and #1, and frame #2 - // depends on frame #1, then frame #3 needs to depend just on frame #2. - // Though this set diff removes only 1 level of indirection, it seems - // enough for all currently used structures. - absl::c_set_difference(direct_depenendencies, indirect_depenendencies, - std::back_inserter(dependencies)); + direct_depenendencies.insert(*buffer.frame_id); + indirect_depenendencies.insert(buffer.dependencies.begin(), + buffer.dependencies.end()); } + // Reduce references: if frame #3 depends on frame #2 and #1, and frame #2 + // depends on frame #1, then frame #3 needs to depend just on frame #2. + // Though this set diff removes only 1 level of indirection, it seems + // enough for all currently used structures. + absl::c_set_difference(direct_depenendencies, indirect_depenendencies, + std::back_inserter(dependencies)); // Update buffers. for (const CodecBufferUsage& buffer_usage : buffers_usage) { diff --git a/modules/video_coding/frame_dependencies_calculator.h b/modules/video_coding/frame_dependencies_calculator.h index b70eddfc53..2c4a8502e1 100644 --- a/modules/video_coding/frame_dependencies_calculator.h +++ b/modules/video_coding/frame_dependencies_calculator.h @@ -18,7 +18,6 @@ #include "absl/container/inlined_vector.h" #include "absl/types/optional.h" #include "api/array_view.h" -#include "api/video/video_frame_type.h" #include "common_video/generic_frame_descriptor/generic_frame_info.h" namespace webrtc { @@ -33,7 +32,6 @@ class FrameDependenciesCalculator { // Calculates frame dependencies based on previous encoder buffer usage. absl::InlinedVector FromBuffersUsage( - VideoFrameType frame_type, int64_t frame_id, rtc::ArrayView buffers_usage); diff --git a/modules/video_coding/frame_dependencies_calculator_unittest.cc b/modules/video_coding/frame_dependencies_calculator_unittest.cc index 81f774b227..a09650401a 100644 --- a/modules/video_coding/frame_dependencies_calculator_unittest.cc +++ b/modules/video_coding/frame_dependencies_calculator_unittest.cc @@ -10,7 +10,6 @@ #include "modules/video_coding/frame_dependencies_calculator.h" -#include "api/video/video_frame_type.h" #include "common_video/generic_frame_descriptor/generic_frame_info.h" #include "test/gmock.h" #include "test/gtest.h" @@ -22,9 +21,6 @@ using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::UnorderedElementsAre; -constexpr VideoFrameType kVideoFrameKey = VideoFrameType::kVideoFrameKey; -constexpr VideoFrameType kVideoFrameDelta = VideoFrameType::kVideoFrameDelta; - constexpr CodecBufferUsage ReferenceAndUpdate(int id) { return CodecBufferUsage(id, /*referenced=*/true, /*updated=*/true); } @@ -39,15 +35,11 @@ TEST(FrameDependenciesCalculatorTest, SingleLayer) { CodecBufferUsage pattern[] = {ReferenceAndUpdate(0)}; FrameDependenciesCalculator calculator; - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameKey, /*frame_id=*/1, pattern), - IsEmpty()); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/3, pattern), - ElementsAre(1)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/6, pattern), - ElementsAre(3)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern), IsEmpty()); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern), + ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern), + ElementsAre(3)); } TEST(FrameDependenciesCalculatorTest, TwoTemporalLayers) { @@ -61,30 +53,21 @@ TEST(FrameDependenciesCalculatorTest, TwoTemporalLayers) { CodecBufferUsage pattern3[] = {Reference(0), Reference(1)}; FrameDependenciesCalculator calculator; - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameKey, /*frame_id=*/1, pattern0), - IsEmpty()); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/2, pattern1), - ElementsAre(1)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/3, pattern2), - ElementsAre(1)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/4, pattern3), - UnorderedElementsAre(2, 3)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/5, pattern0), - ElementsAre(3)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/6, pattern1), - ElementsAre(5)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/7, pattern2), - ElementsAre(5)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/8, pattern3), - UnorderedElementsAre(6, 7)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty()); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1), + ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern2), + ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern3), + UnorderedElementsAre(2, 3)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0), + ElementsAre(3)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1), + ElementsAre(5)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/7, pattern2), + ElementsAre(5)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/8, pattern3), + UnorderedElementsAre(6, 7)); } TEST(FrameDependenciesCalculatorTest, ThreeTemporalLayers4FramePattern) { @@ -99,26 +82,19 @@ TEST(FrameDependenciesCalculatorTest, ThreeTemporalLayers4FramePattern) { CodecBufferUsage pattern3[] = {Reference(0), Reference(1), Reference(2)}; FrameDependenciesCalculator calculator; - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameKey, /*frame_id=*/1, pattern0), - IsEmpty()); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/2, pattern1), - ElementsAre(1)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/3, pattern2), - ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty()); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1), + ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern2), + ElementsAre(1)); // Note that frame#4 references buffer#0 that is updated by frame#1, // yet there is no direct dependency from frame#4 to frame#1. - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/4, pattern3), - UnorderedElementsAre(2, 3)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/5, pattern0), - ElementsAre(1)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/6, pattern1), - ElementsAre(5)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern3), + UnorderedElementsAre(2, 3)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0), + ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1), + ElementsAre(5)); } TEST(FrameDependenciesCalculatorTest, SimulcastWith2Layers) { @@ -129,24 +105,16 @@ TEST(FrameDependenciesCalculatorTest, SimulcastWith2Layers) { CodecBufferUsage pattern1[] = {ReferenceAndUpdate(1)}; FrameDependenciesCalculator calculator; - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameKey, /*frame_id=*/1, pattern0), - IsEmpty()); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameKey, /*frame_id=*/2, pattern1), - IsEmpty()); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/3, pattern0), - ElementsAre(1)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/4, pattern1), - ElementsAre(2)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/5, pattern0), - ElementsAre(3)); - EXPECT_THAT( - calculator.FromBuffersUsage(kVideoFrameDelta, /*frame_id=*/6, pattern1), - ElementsAre(4)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/1, pattern0), IsEmpty()); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/2, pattern1), IsEmpty()); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/3, pattern0), + ElementsAre(1)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/4, pattern1), + ElementsAre(2)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/5, pattern0), + ElementsAre(3)); + EXPECT_THAT(calculator.FromBuffersUsage(/*frame_id=*/6, pattern1), + ElementsAre(4)); } } // namespace diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc index cb83999c94..d226dcd013 100644 --- a/modules/video_coding/frame_object.cc +++ b/modules/video_coding/frame_object.cc @@ -17,10 +17,8 @@ #include "api/video/encoded_image.h" #include "api/video/video_timing.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" namespace webrtc { -namespace video_coding { RtpFrameObject::RtpFrameObject( uint16_t first_seq_num, uint16_t last_seq_num, @@ -39,7 +37,8 @@ RtpFrameObject::RtpFrameObject( const absl::optional& color_space, RtpPacketInfos packet_infos, rtc::scoped_refptr image_buffer) - : first_seq_num_(first_seq_num), + : image_buffer_(image_buffer), + first_seq_num_(first_seq_num), last_seq_num_(last_seq_num), last_packet_received_time_(last_packet_received_time), times_nacked_(times_nacked) { @@ -51,7 +50,6 @@ RtpFrameObject::RtpFrameObject( // TODO(philipel): Remove when encoded image is replaced by EncodedFrame. // VCMEncodedFrame members CopyCodecSpecific(&rtp_video_header_); - _completeFrame = true; _payloadType = payload_type; SetTimestamp(rtp_timestamp); ntp_time_ms_ = ntp_time_ms; @@ -61,7 +59,7 @@ RtpFrameObject::RtpFrameObject( // as of the first packet's. SetPlayoutDelay(rtp_video_header_.playout_delay); - SetEncodedData(std::move(image_buffer)); + SetEncodedData(image_buffer_); _encodedWidth = rtp_video_header_.width; _encodedHeight = rtp_video_header_.height; @@ -70,6 +68,7 @@ RtpFrameObject::RtpFrameObject( rotation_ = rotation; SetColorSpace(color_space); + SetVideoFrameTrackingId(rtp_video_header_.video_frame_tracking_id); content_type_ = content_type; if (timing.flags != VideoSendTiming::kInvalid) { // ntp_time_ms_ may be -1 if not estimated yet. This is not a problem, @@ -129,9 +128,4 @@ const RTPVideoHeader& RtpFrameObject::GetRtpVideoHeader() const { return rtp_video_header_; } -const FrameMarking& RtpFrameObject::GetFrameMarking() const { - return rtp_video_header_.frame_marking; -} - -} // namespace video_coding } // namespace webrtc diff --git a/modules/video_coding/frame_object.h b/modules/video_coding/frame_object.h index f7988763d3..c6f069f241 100644 --- a/modules/video_coding/frame_object.h +++ b/modules/video_coding/frame_object.h @@ -15,7 +15,6 @@ #include "api/video/encoded_frame.h" namespace webrtc { -namespace video_coding { class RtpFrameObject : public EncodedFrame { public: @@ -47,9 +46,12 @@ class RtpFrameObject : public EncodedFrame { int64_t RenderTime() const override; bool delayed_by_retransmission() const override; const RTPVideoHeader& GetRtpVideoHeader() const; - const FrameMarking& GetFrameMarking() const; + + uint8_t* mutable_data() { return image_buffer_->data(); } private: + // Reference for mutable access. + rtc::scoped_refptr image_buffer_; RTPVideoHeader rtp_video_header_; VideoCodecType codec_type_; uint16_t first_seq_num_; @@ -61,7 +63,6 @@ class RtpFrameObject : public EncodedFrame { int times_nacked_; }; -} // namespace video_coding } // namespace webrtc #endif // MODULES_VIDEO_CODING_FRAME_OBJECT_H_ diff --git a/modules/video_coding/g3doc/index.md b/modules/video_coding/g3doc/index.md new file mode 100644 index 0000000000..6fdab6eb98 --- /dev/null +++ b/modules/video_coding/g3doc/index.md @@ -0,0 +1,177 @@ + + + +# Video coding in WebRTC + +## Introduction to layered video coding + +[Video coding][video-coding-wiki] is the process of encoding a stream of +uncompressed video frames into a compressed bitstream, whose bitrate is lower +than that of the original stream. + +### Block-based hybrid video coding + +All video codecs in WebRTC are based on the block-based hybrid video coding +paradigm, which entails prediction of the original video frame using either +[information from previously encoded frames][motion-compensation-wiki] or +information from previously encoded portions of the current frame, subtraction +of the prediction from the original video, and +[transform][transform-coding-wiki] and [quantization][quantization-wiki] of the +resulting difference. The output of the quantization process, quantized +transform coefficients, is losslessly [entropy coded][entropy-coding-wiki] along +with other encoder parameters (e.g., those related to the prediction process) +and then a reconstruction is constructed by inverse quantizing and inverse +transforming the quantized transform coefficients and adding the result to the +prediction. Finally, in-loop filtering is applied and the resulting +reconstruction is stored as a reference frame to be used to develop predictions +for future frames. + +### Frame types + +When an encoded frame depends on previously encoded frames (i.e., it has one or +more inter-frame dependencies), the prior frames must be available at the +receiver before the current frame can be decoded. In order for a receiver to +start decoding an encoded bitstream, a frame which has no prior dependencies is +required. Such a frame is called a "key frame". For real-time-communications +encoding, key frames typically compress less efficiently than "delta frames" +(i.e., frames whose predictions are derived from previously encoded frames). + +### Single-layer coding + +In 1:1 calls, the encoded bitstream has a single recipient. Using end-to-end +bandwidth estimation, the target bitrate can thus be well tailored for the +intended recipient. The number of key frames can be kept to a minimum and the +compressability of the stream can be maximized. One way of achiving this is by +using "single-layer coding", where each delta frame only depends on the frame +that was most recently encoded. + +### Scalable video coding + +In multiway conferences, on the other hand, the encoded bitstream has multiple +recipients each of whom may have different downlink bandwidths. In order to +tailor the encoded bitstreams to a heterogeneous network of receivers, +[scalable video coding][svc-wiki] can be used. The idea is to introduce +structure into the dependency graph of the encoded bitstream, such that _layers_ of +the full stream can be decoded using only available lower layers. This structure +allows for a [selective forwarding unit][sfu-webrtc-glossary] to discard upper +layers of the of the bitstream in order to achieve the intended downlink +bandwidth. + +There are multiple types of scalability: + +* _Temporal scalability_ are layers whose framerate (and bitrate) is lower than that of the upper layer(s) +* _Spatial scalability_ are layers whose resolution (and bitrate) is lower than that of the upper layer(s) +* _Quality scalability_ are layers whose bitrate is lower than that of the upper layer(s) + +WebRTC supports temporal scalability for `VP8`, `VP9` and `AV1`, and spatial +scalability for `VP9` and `AV1`. + +### Simulcast + +Simulcast is another approach for multiway conferencing, where multiple +_independent_ bitstreams are produced by the encoder. + +In cases where multiple encodings of the same source are required (e.g., uplink +transmission in a multiway call), spatial scalability with inter-layer +prediction generally offers superior coding efficiency compared with simulcast. +When a single encoding is required (e.g., downlink transmission in any call), +simulcast generally provides better coding efficiency for the upper spatial +layers. The `K-SVC` concept, where spatial inter-layer dependencies are only +used to encode key frames, for which inter-layer prediction is typically +significantly more effective than it is for delta frames, can be seen as a +compromise between full spatial scalability and simulcast. + +## Overview of implementation in `modules/video_coding` + +Given the general introduction to video coding above, we now describe some +specifics of the [`modules/video_coding`][modules-video-coding] folder in WebRTC. + +### Built-in software codecs in [`modules/video_coding/codecs`][modules-video-coding-codecs] + +This folder contains WebRTC-specific classes that wrap software codec +implementations for different video coding standards: + +* [libaom][libaom-src] for [AV1][av1-spec] +* [libvpx][libvpx-src] for [VP8][vp8-spec] and [VP9][vp9-spec] +* [OpenH264][openh264-src] for [H.264 constrained baseline profile][h264-spec] + +Users of the library can also inject their own codecs, using the +[VideoEncoderFactory][video-encoder-factory-interface] and +[VideoDecoderFactory][video-decoder-factory-interface] interfaces. This is how +platform-supported codecs, such as hardware backed codecs, are implemented. + +### Video codec test framework in [`modules/video_coding/codecs/test`][modules-video-coding-codecs-test] + +This folder contains a test framework that can be used to evaluate video quality +performance of different video codec implementations. + +### SVC helper classes in [`modules/video_coding/svc`][modules-video-coding-svc] + +* [`ScalabilityStructure*`][scalabilitystructure] - different + [standardized scalability structures][scalability-structure-spec] +* [`ScalableVideoController`][scalablevideocontroller] - provides instructions to the video encoder how + to create a scalable stream +* [`SvcRateAllocator`][svcrateallocator] - bitrate allocation to different spatial and temporal + layers + +### Utility classes in [`modules/video_coding/utility`][modules-video-coding-utility] + +* [`FrameDropper`][framedropper] - drops incoming frames when encoder systematically + overshoots its target bitrate +* [`FramerateController`][frameratecontroller] - drops incoming frames to achieve a target framerate +* [`QpParser`][qpparser] - parses the quantization parameter from a bitstream +* [`QualityScaler`][qualityscaler] - signals when an encoder generates encoded frames whose + quantization parameter is outside the window of acceptable values +* [`SimulcastRateAllocator`][simulcastrateallocator] - bitrate allocation to simulcast layers + +### General helper classes in [`modules/video_coding`][modules-video-coding] + +* [`FecControllerDefault`][feccontrollerdefault] - provides a default implementation for rate + allocation to [forward error correction][fec-wiki] +* [`VideoCodecInitializer`][videocodecinitializer] - converts between different encoder configuration + structs + +### Receiver buffer classes in [`modules/video_coding`][modules-video-coding] + +* [`PacketBuffer`][packetbuffer] - (re-)combines RTP packets into frames +* [`RtpFrameReferenceFinder`][rtpframereferencefinder] - determines dependencies between frames based on information in the RTP header, payload header and RTP extensions +* [`FrameBuffer`][framebuffer] - order frames based on their dependencies to be fed to the decoder + +[video-coding-wiki]: https://en.wikipedia.org/wiki/Video_coding_format +[motion-compensation-wiki]: https://en.wikipedia.org/wiki/Motion_compensation +[transform-coding-wiki]: https://en.wikipedia.org/wiki/Transform_coding +[motion-vector-wiki]: https://en.wikipedia.org/wiki/Motion_vector +[mpeg-wiki]: https://en.wikipedia.org/wiki/Moving_Picture_Experts_Group +[svc-wiki]: https://en.wikipedia.org/wiki/Scalable_Video_Coding +[sfu-webrtc-glossary]: https://webrtcglossary.com/sfu/ +[libvpx-src]: https://chromium.googlesource.com/webm/libvpx/ +[libaom-src]: https://aomedia.googlesource.com/aom/ +[openh264-src]: https://github.com/cisco/openh264 +[vp8-spec]: https://tools.ietf.org/html/rfc6386 +[vp9-spec]: https://storage.googleapis.com/downloads.webmproject.org/docs/vp9/vp9-bitstream-specification-v0.6-20160331-draft.pdf +[av1-spec]: https://aomediacodec.github.io/av1-spec/ +[h264-spec]: https://www.itu.int/rec/T-REC-H.264-201906-I/en +[video-encoder-factory-interface]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video_codecs/video_encoder_factory.h;l=27;drc=afadfb24a5e608da6ae102b20b0add53a083dcf3 +[video-decoder-factory-interface]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video_codecs/video_decoder_factory.h;l=27;drc=49c293f03d8f593aa3aca282577fcb14daa63207 +[scalability-structure-spec]: https://w3c.github.io/webrtc-svc/#scalabilitymodes* +[fec-wiki]: https://en.wikipedia.org/wiki/Error_correction_code#Forward_error_correction +[entropy-coding-wiki]: https://en.wikipedia.org/wiki/Entropy_encoding +[modules-video-coding]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/ +[modules-video-coding-codecs]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/codecs/ +[modules-video-coding-codecs-test]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/codecs/test/ +[modules-video-coding-svc]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/svc/ +[modules-video-coding-utility]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/ +[scalabilitystructure]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/svc/create_scalability_structure.h?q=CreateScalabilityStructure +[scalablevideocontroller]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/svc/scalable_video_controller.h?q=ScalableVideoController +[svcrateallocator]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/svc/svc_rate_allocator.h?q=SvcRateAllocator +[framedropper]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/frame_dropper.h?q=FrameDropper +[frameratecontroller]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/framerate_controller.h?q=FramerateController +[qpparser]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/qp_parser.h?q=QpParser +[qualityscaler]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/quality_scaler.h?q=QualityScaler +[simulcastrateallocator]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/simulcast_rate_allocator.h?q=SimulcastRateAllocator +[feccontrollerdefault]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/fec_controller_default.h?q=FecControllerDefault +[videocodecinitializer]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/include/video_codec_initializer.h?q=VideoCodecInitializer +[packetbuffer]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/packet_buffer.h?q=PacketBuffer +[rtpframereferencefinder]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/rtp_frame_reference_finder.h?q=RtpFrameReferenceFinder +[framebuffer]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/frame_buffer2.h?q=FrameBuffer +[quantization-wiki]: https://en.wikipedia.org/wiki/Quantization_(signal_processing) diff --git a/modules/video_coding/generic_decoder.cc b/modules/video_coding/generic_decoder.cc index cfe16ed3f1..acb4307f3f 100644 --- a/modules/video_coding/generic_decoder.cc +++ b/modules/video_coding/generic_decoder.cc @@ -13,6 +13,7 @@ #include #include +#include #include "api/video/video_timing.h" #include "modules/video_coding/include/video_error_codes.h" @@ -31,12 +32,18 @@ VCMDecodedFrameCallback::VCMDecodedFrameCallback(VCMTiming* timing, : _clock(clock), _timing(timing), _timestampMap(kDecoderFrameMemoryLength), - _extra_decode_time("t", absl::nullopt) { + _extra_decode_time("t", absl::nullopt), + low_latency_renderer_enabled_("enabled", true), + low_latency_renderer_include_predecode_buffer_("include_predecode_buffer", + true) { ntp_offset_ = _clock->CurrentNtpInMilliseconds() - _clock->TimeInMilliseconds(); ParseFieldTrial({&_extra_decode_time}, field_trial::FindFullName("WebRTC-SlowDownDecoder")); + ParseFieldTrial({&low_latency_renderer_enabled_, + &low_latency_renderer_include_predecode_buffer_}, + field_trial::FindFullName("WebRTC-LowLatencyRenderer")); } VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {} @@ -84,16 +91,30 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage, "timestamp", decodedImage.timestamp()); // TODO(holmer): We should improve this so that we can handle multiple // callbacks from one call to Decode(). - VCMFrameInformation* frameInfo; + absl::optional frameInfo; + int timestamp_map_size = 0; + int dropped_frames = 0; { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); + int initial_timestamp_map_size = _timestampMap.Size(); frameInfo = _timestampMap.Pop(decodedImage.timestamp()); + timestamp_map_size = _timestampMap.Size(); + // _timestampMap.Pop() erases all frame upto the specified timestamp and + // return the frame info for this timestamp if it exists. Thus, the + // difference in the _timestampMap size before and after Pop() will show + // internally dropped frames. + dropped_frames = + initial_timestamp_map_size - timestamp_map_size - (frameInfo ? 1 : 0); } - if (frameInfo == NULL) { + if (dropped_frames > 0) { + _receiveCallback->OnDroppedFrames(dropped_frames); + } + + if (!frameInfo) { RTC_LOG(LS_WARNING) << "Too many frames backed up in the decoder, dropping " - "this one."; - _receiveCallback->OnDroppedFrames(1); + "frame with timestamp " + << decodedImage.timestamp(); return; } @@ -101,13 +122,29 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage, decodedImage.set_packet_infos(frameInfo->packet_infos); decodedImage.set_rotation(frameInfo->rotation); - const Timestamp now = _clock->CurrentTime(); - RTC_DCHECK(frameInfo->decodeStart); - if (!decode_time_ms) { - decode_time_ms = (now - *frameInfo->decodeStart).ms(); + if (low_latency_renderer_enabled_) { + absl::optional max_composition_delay_in_frames = + _timing->MaxCompositionDelayInFrames(); + if (max_composition_delay_in_frames) { + // Subtract frames that are in flight. + if (low_latency_renderer_include_predecode_buffer_) { + *max_composition_delay_in_frames -= timestamp_map_size; + *max_composition_delay_in_frames = + std::max(0, *max_composition_delay_in_frames); + } + decodedImage.set_max_composition_delay_in_frames( + max_composition_delay_in_frames); + } } - _timing->StopDecodeTimer(*decode_time_ms, now.ms()); - decodedImage.set_processing_time({*frameInfo->decodeStart, now}); + + RTC_DCHECK(frameInfo->decodeStart); + const Timestamp now = _clock->CurrentTime(); + const TimeDelta decode_time = decode_time_ms + ? TimeDelta::Millis(*decode_time_ms) + : now - *frameInfo->decodeStart; + _timing->StopDecodeTimer(decode_time.ms(), now.ms()); + decodedImage.set_processing_time( + {*frameInfo->decodeStart, *frameInfo->decodeStart + decode_time}); // Report timing information. TimingFrameInfo timing_frame_info; @@ -161,7 +198,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage, decodedImage.set_timestamp_us(frameInfo->renderTimeMs * rtc::kNumMicrosecsPerMillisec); - _receiveCallback->FrameToRender(decodedImage, qp, *decode_time_ms, + _receiveCallback->FrameToRender(decodedImage, qp, decode_time.ms(), frameInfo->content_type); } @@ -171,18 +208,30 @@ void VCMDecodedFrameCallback::OnDecoderImplementationName( } void VCMDecodedFrameCallback::Map(uint32_t timestamp, - VCMFrameInformation* frameInfo) { - rtc::CritScope cs(&lock_); - _timestampMap.Add(timestamp, frameInfo); + const VCMFrameInformation& frameInfo) { + int dropped_frames = 0; + { + MutexLock lock(&lock_); + int initial_size = _timestampMap.Size(); + _timestampMap.Add(timestamp, frameInfo); + // If no frame is dropped, the new size should be |initial_size| + 1 + dropped_frames = (initial_size + 1) - _timestampMap.Size(); + } + if (dropped_frames > 0) { + _receiveCallback->OnDroppedFrames(dropped_frames); + } } -int32_t VCMDecodedFrameCallback::Pop(uint32_t timestamp) { - rtc::CritScope cs(&lock_); - if (_timestampMap.Pop(timestamp) == NULL) { - return VCM_GENERAL_ERROR; +void VCMDecodedFrameCallback::ClearTimestampMap() { + int dropped_frames = 0; + { + MutexLock lock(&lock_); + dropped_frames = _timestampMap.Size(); + _timestampMap.Clear(); + } + if (dropped_frames > 0) { + _receiveCallback->OnDroppedFrames(dropped_frames); } - _receiveCallback->OnDroppedFrames(1); - return VCM_OK; } VCMGenericDecoder::VCMGenericDecoder(std::unique_ptr decoder) @@ -190,8 +239,6 @@ VCMGenericDecoder::VCMGenericDecoder(std::unique_ptr decoder) VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal) : _callback(NULL), - _frameInfos(), - _nextFrameInfoIdx(0), decoder_(decoder), _codecType(kVideoCodecGeneric), _isExternal(isExternal), @@ -212,51 +259,56 @@ int32_t VCMGenericDecoder::InitDecode(const VideoCodec* settings, _codecType = settings->codecType; int err = decoder_->InitDecode(settings, numberOfCores); - implementation_name_ = decoder_->ImplementationName(); - RTC_LOG(LS_INFO) << "Decoder implementation: " << implementation_name_; + decoder_info_ = decoder_->GetDecoderInfo(); + RTC_LOG(LS_INFO) << "Decoder implementation: " << decoder_info_.ToString(); + if (_callback) { + _callback->OnDecoderImplementationName( + decoder_info_.implementation_name.c_str()); + } return err; } int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) { TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp", frame.Timestamp()); - _frameInfos[_nextFrameInfoIdx].decodeStart = now; - _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs(); - _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation(); - _frameInfos[_nextFrameInfoIdx].timing = frame.video_timing(); - _frameInfos[_nextFrameInfoIdx].ntp_time_ms = - frame.EncodedImage().ntp_time_ms_; - _frameInfos[_nextFrameInfoIdx].packet_infos = frame.PacketInfos(); + VCMFrameInformation frame_info; + frame_info.decodeStart = now; + frame_info.renderTimeMs = frame.RenderTimeMs(); + frame_info.rotation = frame.rotation(); + frame_info.timing = frame.video_timing(); + frame_info.ntp_time_ms = frame.EncodedImage().ntp_time_ms_; + frame_info.packet_infos = frame.PacketInfos(); // Set correctly only for key frames. Thus, use latest key frame // content type. If the corresponding key frame was lost, decode will fail // and content type will be ignored. if (frame.FrameType() == VideoFrameType::kVideoFrameKey) { - _frameInfos[_nextFrameInfoIdx].content_type = frame.contentType(); + frame_info.content_type = frame.contentType(); _last_keyframe_content_type = frame.contentType(); } else { - _frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type; + frame_info.content_type = _last_keyframe_content_type; } - _callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]); + _callback->Map(frame.Timestamp(), frame_info); - _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength; int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(), frame.RenderTimeMs()); - const char* new_implementation_name = decoder_->ImplementationName(); - if (new_implementation_name != implementation_name_) { - implementation_name_ = new_implementation_name; + VideoDecoder::DecoderInfo decoder_info = decoder_->GetDecoderInfo(); + if (decoder_info != decoder_info_) { RTC_LOG(LS_INFO) << "Changed decoder implementation to: " - << new_implementation_name; + << decoder_info.ToString(); + decoder_info_ = decoder_info; + _callback->OnDecoderImplementationName( + decoder_info.implementation_name.empty() + ? "unknown" + : decoder_info.implementation_name.c_str()); } - _callback->OnDecoderImplementationName(implementation_name_.c_str()); if (ret < WEBRTC_VIDEO_CODEC_OK) { RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp " << frame.Timestamp() << ", error code: " << ret; - _callback->Pop(frame.Timestamp()); - return ret; + _callback->ClearTimestampMap(); } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT) { - // No output - _callback->Pop(frame.Timestamp()); + // No output. + _callback->ClearTimestampMap(); } return ret; } @@ -264,11 +316,12 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, Timestamp now) { int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback( VCMDecodedFrameCallback* callback) { _callback = callback; - return decoder_->RegisterDecodeCompleteCallback(callback); -} - -bool VCMGenericDecoder::PrefersLateDecoding() const { - return decoder_->PrefersLateDecoding(); + int32_t ret = decoder_->RegisterDecodeCompleteCallback(callback); + if (callback && !decoder_info_.implementation_name.empty()) { + callback->OnDecoderImplementationName( + decoder_info_.implementation_name.c_str()); + } + return ret; } } // namespace webrtc diff --git a/modules/video_coding/generic_decoder.h b/modules/video_coding/generic_decoder.h index 40fe667d65..8e79cb4e19 100644 --- a/modules/video_coding/generic_decoder.h +++ b/modules/video_coding/generic_decoder.h @@ -14,14 +14,14 @@ #include #include +#include "api/sequence_checker.h" #include "api/units/time_delta.h" #include "modules/video_coding/encoded_frame.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/timestamp_map.h" #include "modules/video_coding/timing.h" -#include "rtc_base/critical_section.h" #include "rtc_base/experiments/field_trial_parser.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -29,18 +29,6 @@ class VCMReceiveCallback; enum { kDecoderFrameMemoryLength = 10 }; -struct VCMFrameInformation { - int64_t renderTimeMs; - absl::optional decodeStart; - void* userData; - VideoRotation rotation; - VideoContentType content_type; - EncodedImage::Timing timing; - int64_t ntp_time_ms; - RtpPacketInfos packet_infos; - // ColorSpace is not stored here, as it might be modified by decoders. -}; - class VCMDecodedFrameCallback : public DecodedImageCallback { public: VCMDecodedFrameCallback(VCMTiming* timing, Clock* clock); @@ -56,11 +44,11 @@ class VCMDecodedFrameCallback : public DecodedImageCallback { void OnDecoderImplementationName(const char* implementation_name); - void Map(uint32_t timestamp, VCMFrameInformation* frameInfo); - int32_t Pop(uint32_t timestamp); + void Map(uint32_t timestamp, const VCMFrameInformation& frameInfo); + void ClearTimestampMap(); private: - rtc::ThreadChecker construction_thread_; + SequenceChecker construction_thread_; // Protect |_timestampMap|. Clock* const _clock; // This callback must be set before the decoder thread starts running @@ -70,11 +58,21 @@ class VCMDecodedFrameCallback : public DecodedImageCallback { // from the same thread, and therfore a lock is not required to access it. VCMReceiveCallback* _receiveCallback = nullptr; VCMTiming* _timing; - rtc::CriticalSection lock_; + Mutex lock_; VCMTimestampMap _timestampMap RTC_GUARDED_BY(lock_); int64_t ntp_offset_; // Set by the field trial WebRTC-SlowDownDecoder to simulate a slow decoder. FieldTrialOptional _extra_decode_time; + + // Set by the field trial WebRTC-LowLatencyRenderer. The parameter |enabled| + // determines if the low-latency renderer algorithm should be used for the + // case min playout delay=0 and max playout delay>0. + FieldTrialParameter low_latency_renderer_enabled_; + // Set by the field trial WebRTC-LowLatencyRenderer. The parameter + // |include_predecode_buffer| determines if the predecode buffer should be + // taken into account when calculating maximum number of frames in composition + // queue. + FieldTrialParameter low_latency_renderer_include_predecode_buffer_; }; class VCMGenericDecoder { @@ -100,20 +98,17 @@ class VCMGenericDecoder { */ int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback); - bool PrefersLateDecoding() const; bool IsSameDecoder(VideoDecoder* decoder) const { return decoder_.get() == decoder; } private: VCMDecodedFrameCallback* _callback; - VCMFrameInformation _frameInfos[kDecoderFrameMemoryLength]; - uint32_t _nextFrameInfoIdx; std::unique_ptr decoder_; VideoCodecType _codecType; const bool _isExternal; VideoContentType _last_keyframe_content_type; - std::string implementation_name_; + VideoDecoder::DecoderInfo decoder_info_; }; } // namespace webrtc diff --git a/modules/video_coding/generic_decoder_unittest.cc b/modules/video_coding/generic_decoder_unittest.cc index 3e07a2a81c..a4cc5b0ded 100644 --- a/modules/video_coding/generic_decoder_unittest.cc +++ b/modules/video_coding/generic_decoder_unittest.cc @@ -16,8 +16,8 @@ #include "api/task_queue/default_task_queue_factory.h" #include "common_video/test/utilities.h" #include "modules/video_coding/timing.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/clock.h" #include "test/fake_decoder.h" #include "test/gmock.h" @@ -33,7 +33,7 @@ class ReceiveCallback : public VCMReceiveCallback { int32_t decode_time_ms, VideoContentType content_type) override { { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); last_frame_ = videoFrame; } received_frame_event_.Set(); @@ -41,13 +41,13 @@ class ReceiveCallback : public VCMReceiveCallback { } absl::optional GetLastFrame() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); return last_frame_; } absl::optional WaitForFrame(int64_t wait_ms) { if (received_frame_event_.Wait(wait_ms)) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); return last_frame_; } else { return absl::nullopt; @@ -55,7 +55,7 @@ class ReceiveCallback : public VCMReceiveCallback { } private: - rtc::CriticalSection lock_; + Mutex lock_; rtc::Event received_frame_event_; absl::optional last_frame_ RTC_GUARDED_BY(lock_); }; @@ -115,5 +115,29 @@ TEST_F(GenericDecoderTest, PassesPacketInfosForDelayedDecoders) { EXPECT_EQ(decoded_frame->packet_infos().size(), 3U); } +TEST_F(GenericDecoderTest, MaxCompositionDelayNotSetByDefault) { + VCMEncodedFrame encoded_frame; + generic_decoder_.Decode(encoded_frame, clock_.CurrentTime()); + absl::optional decoded_frame = user_callback_.WaitForFrame(10); + ASSERT_TRUE(decoded_frame.has_value()); + EXPECT_FALSE(decoded_frame->max_composition_delay_in_frames()); +} + +TEST_F(GenericDecoderTest, MaxCompositionDelayActivatedByPlayoutDelay) { + VCMEncodedFrame encoded_frame; + // VideoReceiveStream2 would set MaxCompositionDelayInFrames if playout delay + // is specified as X,Y, where X=0, Y>0. + const VideoPlayoutDelay kPlayoutDelay = {0, 50}; + constexpr int kMaxCompositionDelayInFrames = 3; // ~50 ms at 60 fps. + encoded_frame.SetPlayoutDelay(kPlayoutDelay); + timing_.SetMaxCompositionDelayInFrames( + absl::make_optional(kMaxCompositionDelayInFrames)); + generic_decoder_.Decode(encoded_frame, clock_.CurrentTime()); + absl::optional decoded_frame = user_callback_.WaitForFrame(10); + ASSERT_TRUE(decoded_frame.has_value()); + EXPECT_EQ(kMaxCompositionDelayInFrames, + decoded_frame->max_composition_delay_in_frames()); +} + } // namespace video_coding } // namespace webrtc diff --git a/modules/video_coding/h264_sps_pps_tracker.cc b/modules/video_coding/h264_sps_pps_tracker.cc index 3965b28e8e..4becdb7608 100644 --- a/modules/video_coding/h264_sps_pps_tracker.cc +++ b/modules/video_coding/h264_sps_pps_tracker.cc @@ -49,6 +49,7 @@ H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream( RTPVideoHeader* video_header) { RTC_DCHECK(video_header); RTC_DCHECK(video_header->codec == kVideoCodecH264); + RTC_DCHECK_GT(bitstream.size(), 0); auto& h264_header = absl::get(video_header->video_type_header); @@ -128,7 +129,7 @@ H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream( if (h264_header.packetization_type == kH264StapA) { const uint8_t* nalu_ptr = bitstream.data() + 1; - while (nalu_ptr < bitstream.data() + bitstream.size()) { + while (nalu_ptr < bitstream.data() + bitstream.size() - 1) { RTC_DCHECK(video_header->is_first_packet_in_frame); required_size += sizeof(start_code_h264); @@ -180,7 +181,7 @@ H264SpsPpsTracker::FixedBitstream H264SpsPpsTracker::CopyAndFixBitstream( // Copy the rest of the bitstream and insert start codes. if (h264_header.packetization_type == kH264StapA) { const uint8_t* nalu_ptr = bitstream.data() + 1; - while (nalu_ptr < bitstream.data() + bitstream.size()) { + while (nalu_ptr < bitstream.data() + bitstream.size() - 1) { fixed.bitstream.AppendData(start_code_h264); // The first two bytes describe the length of a segment. diff --git a/modules/video_coding/include/video_codec_interface.h b/modules/video_coding/include/video_codec_interface.h index c7b116f4ae..4737dde90f 100644 --- a/modules/video_coding/include/video_codec_interface.h +++ b/modules/video_coding/include/video_codec_interface.h @@ -13,12 +13,12 @@ #include +#include "absl/base/attributes.h" #include "absl/types/optional.h" #include "api/video/video_frame.h" #include "api/video_codecs/video_decoder.h" #include "api/video_codecs/video_encoder.h" #include "common_video/generic_frame_descriptor/generic_frame_info.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/codecs/h264/include/h264_globals.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" #include "modules/video_coding/include/video_error_codes.h" @@ -79,7 +79,7 @@ struct CodecSpecificInfoVP9 { uint8_t num_ref_pics; uint8_t p_diff[kMaxVp9RefPics]; - bool end_of_picture; + ABSL_DEPRECATED("") bool end_of_picture; }; static_assert(std::is_pod::value, ""); @@ -109,6 +109,7 @@ struct RTC_EXPORT CodecSpecificInfo { VideoCodecType codecType; CodecSpecificInfoUnion codecSpecific; + bool end_of_picture = true; absl::optional generic_frame_info; absl::optional template_structure; }; diff --git a/modules/video_coding/include/video_coding.h b/modules/video_coding/include/video_coding.h index acaa73bbf2..a7cb50ef9c 100644 --- a/modules/video_coding/include/video_coding.h +++ b/modules/video_coding/include/video_coding.h @@ -42,19 +42,16 @@ class VideoCodingModule : public Module { // needed. // // Input: + // - payload_type : RTP payload type // - receiveCodec : Settings for the codec to be registered. // - numberOfCores : Number of CPU cores that the decoder is allowed // to use. - // - requireKeyFrame : Set this to true if you don't want any delta - // frames - // to be decoded until the first key frame has been - // decoded. // // Return value : VCM_OK, on success. // < 0, on error. - virtual int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec, - int32_t numberOfCores, - bool requireKeyFrame = false) = 0; + virtual int32_t RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receiveCodec, + int32_t numberOfCores) = 0; // Register an external decoder object. // diff --git a/modules/video_coding/include/video_coding_defines.h b/modules/video_coding/include/video_coding_defines.h index ff9b7d6a66..641e7121ef 100644 --- a/modules/video_coding/include/video_coding_defines.h +++ b/modules/video_coding/include/video_coding_defines.h @@ -41,9 +41,7 @@ enum { }; enum VCMVideoProtection { - kProtectionNone, kProtectionNack, - kProtectionFEC, kProtectionNackFEC, }; diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc index 0873285f39..75142e93ee 100644 --- a/modules/video_coding/jitter_buffer.cc +++ b/modules/video_coding/jitter_buffer.cc @@ -153,7 +153,7 @@ VCMJitterBuffer::~VCMJitterBuffer() { } void VCMJitterBuffer::Start() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); running_ = true; num_consecutive_old_packets_ = 0; @@ -172,7 +172,7 @@ void VCMJitterBuffer::Start() { } void VCMJitterBuffer::Stop() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); running_ = false; last_decoded_state_.Reset(); @@ -181,12 +181,12 @@ void VCMJitterBuffer::Stop() { } bool VCMJitterBuffer::Running() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return running_; } void VCMJitterBuffer::Flush() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); decodable_frames_.Reset(&free_frames_); incomplete_frames_.Reset(&free_frames_); last_decoded_state_.Reset(); // TODO(mikhal): sync reset. @@ -202,21 +202,20 @@ void VCMJitterBuffer::Flush() { } int VCMJitterBuffer::num_packets() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return num_packets_; } int VCMJitterBuffer::num_duplicated_packets() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return num_duplicated_packets_; } // Returns immediately or a |max_wait_time_ms| ms event hang waiting for a // complete frame, |max_wait_time_ms| decided by caller. VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) { - crit_sect_.Enter(); + MutexLock lock(&mutex_); if (!running_) { - crit_sect_.Leave(); return nullptr; } CleanUpOldOrEmptyFrames(); @@ -227,14 +226,13 @@ VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) { clock_->TimeInMilliseconds() + max_wait_time_ms; int64_t wait_time_ms = max_wait_time_ms; while (wait_time_ms > 0) { - crit_sect_.Leave(); + mutex_.Unlock(); const EventTypeWrapper ret = frame_event_->Wait(static_cast(wait_time_ms)); - crit_sect_.Enter(); + mutex_.Lock(); if (ret == kEventSignaled) { // Are we shutting down the jitter buffer? if (!running_) { - crit_sect_.Leave(); return nullptr; } // Finding oldest frame ready for decoder. @@ -252,16 +250,13 @@ VCMEncodedFrame* VCMJitterBuffer::NextCompleteFrame(uint32_t max_wait_time_ms) { } if (decodable_frames_.empty() || decodable_frames_.Front()->GetState() != kStateComplete) { - crit_sect_.Leave(); return nullptr; } - VCMEncodedFrame* encoded_frame = decodable_frames_.Front(); - crit_sect_.Leave(); - return encoded_frame; + return decodable_frames_.Front(); } VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); if (!running_) { return NULL; } @@ -303,8 +298,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { last_decoded_state_.SetState(frame); DropPacketsFromNackList(last_decoded_state_.sequence_num()); - if ((*frame).IsSessionComplete()) - UpdateAveragePacketsPerFrame(frame->NumPackets()); + UpdateAveragePacketsPerFrame(frame->NumPackets()); return frame; } @@ -313,7 +307,7 @@ VCMEncodedFrame* VCMJitterBuffer::ExtractAndSetDecode(uint32_t timestamp) { // frames from within the jitter buffer. void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) { RTC_CHECK(frame != nullptr); - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); VCMFrameBuffer* frame_buffer = static_cast(frame); RecycleFrameBuffer(frame_buffer); } @@ -353,8 +347,8 @@ VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet, int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame, bool* retransmitted) const { - assert(retransmitted); - rtc::CritScope cs(&crit_sect_); + RTC_DCHECK(retransmitted); + MutexLock lock(&mutex_); const VCMFrameBuffer* frame_buffer = static_cast(frame); *retransmitted = (frame_buffer->GetNackCount() > 0); @@ -363,7 +357,7 @@ int64_t VCMJitterBuffer::LastPacketTime(const VCMEncodedFrame* frame, VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, bool* retransmitted) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ++num_packets_; // Does this packet belong to an old frame? @@ -504,7 +498,7 @@ VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet, RecycleFrameBuffer(frame); return kFlushIndicator; default: - assert(false); + RTC_NOTREACHED(); } return buffer_state; } @@ -577,7 +571,7 @@ void VCMJitterBuffer::FindAndInsertContinuousFramesWithState( } uint32_t VCMJitterBuffer::EstimatedJitterMs() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); const double rtt_mult = 1.0f; return jitter_estimate_.GetJitterEstimate(rtt_mult, absl::nullopt); } @@ -585,9 +579,9 @@ uint32_t VCMJitterBuffer::EstimatedJitterMs() { void VCMJitterBuffer::SetNackSettings(size_t max_nack_list_size, int max_packet_age_to_nack, int max_incomplete_time_ms) { - rtc::CritScope cs(&crit_sect_); - assert(max_packet_age_to_nack >= 0); - assert(max_incomplete_time_ms_ >= 0); + MutexLock lock(&mutex_); + RTC_DCHECK_GE(max_packet_age_to_nack, 0); + RTC_DCHECK_GE(max_incomplete_time_ms_, 0); max_nack_list_size_ = max_nack_list_size; max_packet_age_to_nack_ = max_packet_age_to_nack; max_incomplete_time_ms_ = max_incomplete_time_ms; @@ -606,7 +600,7 @@ int VCMJitterBuffer::NonContinuousOrIncompleteDuration() { uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber( const VCMFrameBuffer& frame) const { - assert(frame.GetLowSeqNum() >= 0); + RTC_DCHECK_GE(frame.GetLowSeqNum(), 0); if (frame.HaveFirstPacket()) return frame.GetLowSeqNum(); @@ -616,7 +610,7 @@ uint16_t VCMJitterBuffer::EstimatedLowSequenceNumber( } std::vector VCMJitterBuffer::GetNackList(bool* request_key_frame) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); *request_key_frame = false; if (last_decoded_state_.in_initial_state()) { VCMFrameBuffer* next_frame = NextFrame(); @@ -827,7 +821,7 @@ void VCMJitterBuffer::UpdateAveragePacketsPerFrame(int current_number_packets) { } } -// Must be called under the critical section |crit_sect_|. +// Must be called under the critical section |mutex_|. void VCMJitterBuffer::CleanUpOldOrEmptyFrames() { decodable_frames_.CleanUpOldOrEmptyFrames(&last_decoded_state_, &free_frames_); @@ -838,13 +832,13 @@ void VCMJitterBuffer::CleanUpOldOrEmptyFrames() { } } -// Must be called from within |crit_sect_|. +// Must be called from within |mutex_|. bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const { return missing_sequence_numbers_.find(packet.seqNum) != missing_sequence_numbers_.end(); } -// Must be called under the critical section |crit_sect_|. Should never be +// Must be called under the critical section |mutex_|. Should never be // called with retransmitted frames, they must be filtered out before this // function is called. void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample, @@ -856,7 +850,7 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample, sample.frame_size, incomplete_frame); } -// Must be called under the critical section crit_sect_. Should never be +// Must be called under the critical section mutex_. Should never be // called with retransmitted frames, they must be filtered out before this // function is called. void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame, @@ -870,7 +864,7 @@ void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame, frame.size(), incomplete_frame); } -// Must be called under the critical section |crit_sect_|. Should never be +// Must be called under the critical section |mutex_|. Should never be // called with retransmitted frames, they must be filtered out before this // function is called. void VCMJitterBuffer::UpdateJitterEstimate(int64_t latest_packet_time_ms, diff --git a/modules/video_coding/jitter_buffer.h b/modules/video_coding/jitter_buffer.h index 2505845d4f..b15ca75ffa 100644 --- a/modules/video_coding/jitter_buffer.h +++ b/modules/video_coding/jitter_buffer.h @@ -28,7 +28,7 @@ #include "modules/video_coding/jitter_buffer_common.h" #include "modules/video_coding/jitter_estimator.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -143,66 +143,66 @@ class VCMJitterBuffer { VCMFrameBufferEnum GetFrame(const VCMPacket& packet, VCMFrameBuffer** frame, FrameList** frame_list) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns true if |frame| is continuous in |decoding_state|, not taking // decodable frames into account. bool IsContinuousInState(const VCMFrameBuffer& frame, const VCMDecodingState& decoding_state) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns true if |frame| is continuous in the |last_decoded_state_|, taking // all decodable frames into account. bool IsContinuous(const VCMFrameBuffer& frame) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Looks for frames in |incomplete_frames_| which are continuous in the // provided |decoded_state|. Starts the search from the timestamp of // |decoded_state|. void FindAndInsertContinuousFramesWithState( const VCMDecodingState& decoded_state) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Looks for frames in |incomplete_frames_| which are continuous in // |last_decoded_state_| taking all decodable frames into account. Starts // the search from |new_frame|. void FindAndInsertContinuousFrames(const VCMFrameBuffer& new_frame) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); - VCMFrameBuffer* NextFrame() const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + VCMFrameBuffer* NextFrame() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns true if the NACK list was updated to cover sequence numbers up to // |sequence_number|. If false a key frame is needed to get into a state where // we can continue decoding. bool UpdateNackList(uint16_t sequence_number) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); bool TooLargeNackList() const; // Returns true if the NACK list was reduced without problem. If false a key // frame is needed to get into a state where we can continue decoding. - bool HandleTooLargeNackList() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + bool HandleTooLargeNackList() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); bool MissingTooOldPacket(uint16_t latest_sequence_number) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns true if the too old packets was successfully removed from the NACK // list. If false, a key frame is needed to get into a state where we can // continue decoding. bool HandleTooOldPackets(uint16_t latest_sequence_number) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Drops all packets in the NACK list up until |last_decoded_sequence_number|. void DropPacketsFromNackList(uint16_t last_decoded_sequence_number); // Gets an empty frame, creating a new frame if necessary (i.e. increases // jitter buffer size). - VCMFrameBuffer* GetEmptyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + VCMFrameBuffer* GetEmptyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Attempts to increase the size of the jitter buffer. Returns true on // success, false otherwise. - bool TryToIncreaseJitterBufferSize() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + bool TryToIncreaseJitterBufferSize() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Recycles oldest frames until a key frame is found. Used if jitter buffer is // completely full. Returns true if a key frame was found. - bool RecycleFramesUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + bool RecycleFramesUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Update rolling average of packets per frame. void UpdateAveragePacketsPerFrame(int current_number_packets_); // Cleans the frame list in the JB from old/empty frames. // Should only be called prior to actual use. - void CleanUpOldOrEmptyFrames() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + void CleanUpOldOrEmptyFrames() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Returns true if |packet| is likely to have been retransmitted. bool IsPacketRetransmitted(const VCMPacket& packet) const; @@ -217,35 +217,34 @@ class VCMJitterBuffer { unsigned int frame_size, bool incomplete_frame); - int NonContinuousOrIncompleteDuration() - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + int NonContinuousOrIncompleteDuration() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); uint16_t EstimatedLowSequenceNumber(const VCMFrameBuffer& frame) const; // Reset frame buffer and return it to free_frames_. void RecycleFrameBuffer(VCMFrameBuffer* frame) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* clock_; // If we are running (have started) or not. bool running_; - rtc::CriticalSection crit_sect_; + mutable Mutex mutex_; // Event to signal when we have a frame ready for decoder. std::unique_ptr frame_event_; // Number of allocated frames. int max_number_of_frames_; - UnorderedFrameList free_frames_ RTC_GUARDED_BY(crit_sect_); - FrameList decodable_frames_ RTC_GUARDED_BY(crit_sect_); - FrameList incomplete_frames_ RTC_GUARDED_BY(crit_sect_); - VCMDecodingState last_decoded_state_ RTC_GUARDED_BY(crit_sect_); + UnorderedFrameList free_frames_ RTC_GUARDED_BY(mutex_); + FrameList decodable_frames_ RTC_GUARDED_BY(mutex_); + FrameList incomplete_frames_ RTC_GUARDED_BY(mutex_); + VCMDecodingState last_decoded_state_ RTC_GUARDED_BY(mutex_); bool first_packet_since_reset_; // Number of packets in a row that have been too old. int num_consecutive_old_packets_; // Number of packets received. - int num_packets_ RTC_GUARDED_BY(crit_sect_); + int num_packets_ RTC_GUARDED_BY(mutex_); // Number of duplicated packets received. - int num_duplicated_packets_ RTC_GUARDED_BY(crit_sect_); + int num_duplicated_packets_ RTC_GUARDED_BY(mutex_); // Jitter estimation. // Filter for estimating jitter. diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc index acfee8c6f7..752ceb835e 100644 --- a/modules/video_coding/jitter_buffer_unittest.cc +++ b/modules/video_coding/jitter_buffer_unittest.cc @@ -67,8 +67,7 @@ class TestBasicJitterBuffer : public ::testing::Test { video_header.is_first_packet_in_frame = true; video_header.frame_type = VideoFrameType::kVideoFrameDelta; packet_.reset(new VCMPacket(data_, size_, rtp_header, video_header, - /*ntp_time_ms=*/0, - clock_->TimeInMilliseconds())); + /*ntp_time_ms=*/0, clock_->CurrentTime())); } VCMEncodedFrame* DecodeCompleteFrame() { @@ -541,7 +540,7 @@ TEST_F(TestBasicJitterBuffer, TestReorderingWithPadding) { video_header.codec = kVideoCodecGeneric; video_header.frame_type = VideoFrameType::kEmptyFrame; VCMPacket empty_packet(data_, 0, rtp_header, video_header, - /*ntp_time_ms=*/0, clock_->TimeInMilliseconds()); + /*ntp_time_ms=*/0, clock_->CurrentTime()); EXPECT_EQ(kOldPacket, jitter_buffer_->InsertPacket(empty_packet, &retransmitted)); empty_packet.seqNum += 1; diff --git a/modules/video_coding/jitter_estimator.cc b/modules/video_coding/jitter_estimator.cc index 44e2a9811e..92a298c259 100644 --- a/modules/video_coding/jitter_estimator.cc +++ b/modules/video_coding/jitter_estimator.cc @@ -247,7 +247,7 @@ void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS, hMh_sigma = deltaFSBytes * Mh[0] + Mh[1] + sigma; if ((hMh_sigma < 1e-9 && hMh_sigma >= 0) || (hMh_sigma > -1e-9 && hMh_sigma <= 0)) { - assert(false); + RTC_NOTREACHED(); return; } kalmanGain[0] = Mh[0] / hMh_sigma; @@ -276,11 +276,11 @@ void VCMJitterEstimator::KalmanEstimateChannel(int64_t frameDelayMS, kalmanGain[1] * deltaFSBytes * t01; // Covariance matrix, must be positive semi-definite. - assert(_thetaCov[0][0] + _thetaCov[1][1] >= 0 && - _thetaCov[0][0] * _thetaCov[1][1] - - _thetaCov[0][1] * _thetaCov[1][0] >= - 0 && - _thetaCov[0][0] >= 0); + RTC_DCHECK(_thetaCov[0][0] + _thetaCov[1][1] >= 0 && + _thetaCov[0][0] * _thetaCov[1][1] - + _thetaCov[0][1] * _thetaCov[1][0] >= + 0 && + _thetaCov[0][0] >= 0); } // Calculate difference in delay between a sample and the expected delay @@ -302,7 +302,7 @@ void VCMJitterEstimator::EstimateRandomJitter(double d_dT, _lastUpdateT = now; if (_alphaCount == 0) { - assert(false); + RTC_NOTREACHED(); return; } double alpha = @@ -428,7 +428,7 @@ double VCMJitterEstimator::GetFrameRate() const { double fps = 1000000.0 / fps_counter_.ComputeMean(); // Sanity check. - assert(fps >= 0.0); + RTC_DCHECK_GE(fps, 0.0); if (fps > kMaxFramerateEstimate) { fps = kMaxFramerateEstimate; } diff --git a/modules/video_coding/loss_notification_controller.h b/modules/video_coding/loss_notification_controller.h index a7a1fb9fe8..4d536ba4f9 100644 --- a/modules/video_coding/loss_notification_controller.h +++ b/modules/video_coding/loss_notification_controller.h @@ -17,8 +17,9 @@ #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/sequence_checker.h" #include "modules/include/module_common_types.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { @@ -102,7 +103,7 @@ class LossNotificationController { // (Naturally, later frames must also be assemblable to be decodable.) std::set decodable_frame_ids_ RTC_GUARDED_BY(sequence_checker_); - SequenceChecker sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; }; } // namespace webrtc diff --git a/modules/video_coding/media_opt_util.cc b/modules/video_coding/media_opt_util.cc index b47eeb55d3..0136ae8ec9 100644 --- a/modules/video_coding/media_opt_util.cc +++ b/modules/video_coding/media_opt_util.cc @@ -87,10 +87,10 @@ VCMNackFecMethod::VCMNackFecMethod(int64_t lowRttNackThresholdMs, _lowRttNackMs(lowRttNackThresholdMs), _highRttNackMs(highRttNackThresholdMs), _maxFramesFec(1) { - assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1); - assert(highRttNackThresholdMs == -1 || - lowRttNackThresholdMs <= highRttNackThresholdMs); - assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1); + RTC_DCHECK(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1); + RTC_DCHECK(highRttNackThresholdMs == -1 || + lowRttNackThresholdMs <= highRttNackThresholdMs); + RTC_DCHECK(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1); _type = kNackFec; } @@ -384,7 +384,7 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) { indexTableKey = VCM_MIN(indexTableKey, kFecRateTableSize); // Check on table index - assert(indexTableKey < kFecRateTableSize); + RTC_DCHECK_LT(indexTableKey, kFecRateTableSize); // Protection factor for I frame codeRateKey = kFecRateTable[indexTableKey]; diff --git a/modules/video_coding/nack_module2.cc b/modules/video_coding/nack_module2.cc index 267eaebb7a..8a3a731ed0 100644 --- a/modules/video_coding/nack_module2.cc +++ b/modules/video_coding/nack_module2.cc @@ -14,10 +14,10 @@ #include #include "api/units/timestamp.h" -#include "modules/utility/include/process_thread.h" #include "rtc_base/checks.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/logging.h" +#include "rtc_base/task_queue.h" #include "system_wrappers/include/field_trial.h" namespace webrtc { @@ -27,8 +27,6 @@ const int kMaxPacketAge = 10000; const int kMaxNackPackets = 1000; const int kDefaultRttMs = 100; const int kMaxNackRetries = 10; -const int kProcessFrequency = 50; -const int kProcessIntervalMs = 1000 / kProcessFrequency; const int kMaxReorderedPackets = 128; const int kNumReorderingBuckets = 10; const int kDefaultSendNackDelayMs = 0; @@ -45,6 +43,8 @@ int64_t GetSendNackDelay() { } } // namespace +constexpr TimeDelta NackModule2::kUpdateInterval; + NackModule2::NackInfo::NackInfo() : seq_num(0), send_at_seq_num(0), sent_at_time(-1), retries(0) {} @@ -88,32 +88,58 @@ NackModule2::BackoffSettings::ParseFromFieldTrials() { return absl::nullopt; } -NackModule2::NackModule2(Clock* clock, +NackModule2::NackModule2(TaskQueueBase* current_queue, + Clock* clock, NackSender* nack_sender, - KeyFrameRequestSender* keyframe_request_sender) - : clock_(clock), + KeyFrameRequestSender* keyframe_request_sender, + TimeDelta update_interval /*= kUpdateInterval*/) + : worker_thread_(current_queue), + update_interval_(update_interval), + clock_(clock), nack_sender_(nack_sender), keyframe_request_sender_(keyframe_request_sender), reordering_histogram_(kNumReorderingBuckets, kMaxReorderedPackets), initialized_(false), rtt_ms_(kDefaultRttMs), newest_seq_num_(0), - next_process_time_ms_(-1), send_nack_delay_ms_(GetSendNackDelay()), backoff_settings_(BackoffSettings::ParseFromFieldTrials()) { RTC_DCHECK(clock_); RTC_DCHECK(nack_sender_); RTC_DCHECK(keyframe_request_sender_); + RTC_DCHECK_GT(update_interval.ms(), 0); + RTC_DCHECK(worker_thread_); + RTC_DCHECK(worker_thread_->IsCurrent()); + + repeating_task_ = RepeatingTaskHandle::DelayedStart( + TaskQueueBase::Current(), update_interval_, + [this]() { + RTC_DCHECK_RUN_ON(worker_thread_); + std::vector nack_batch = GetNackBatch(kTimeOnly); + if (!nack_batch.empty()) { + // This batch of NACKs is triggered externally; there is no external + // initiator who can batch them with other feedback messages. + nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/false); + } + return update_interval_; + }, + clock_); +} + +NackModule2::~NackModule2() { + RTC_DCHECK_RUN_ON(worker_thread_); + repeating_task_.Stop(); } int NackModule2::OnReceivedPacket(uint16_t seq_num, bool is_keyframe) { + RTC_DCHECK_RUN_ON(worker_thread_); return OnReceivedPacket(seq_num, is_keyframe, false); } int NackModule2::OnReceivedPacket(uint16_t seq_num, bool is_keyframe, bool is_recovered) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(worker_thread_); // TODO(philipel): When the packet includes information whether it is // retransmitted or not, use that value instead. For // now set it to true, which will cause the reordering @@ -182,61 +208,24 @@ int NackModule2::OnReceivedPacket(uint16_t seq_num, } void NackModule2::ClearUpTo(uint16_t seq_num) { - rtc::CritScope lock(&crit_); - nack_list_.erase(nack_list_.begin(), nack_list_.lower_bound(seq_num)); - keyframe_list_.erase(keyframe_list_.begin(), - keyframe_list_.lower_bound(seq_num)); - recovered_list_.erase(recovered_list_.begin(), - recovered_list_.lower_bound(seq_num)); + // Called via RtpVideoStreamReceiver2::FrameContinuous on the network thread. + worker_thread_->PostTask(ToQueuedTask(task_safety_, [seq_num, this]() { + RTC_DCHECK_RUN_ON(worker_thread_); + nack_list_.erase(nack_list_.begin(), nack_list_.lower_bound(seq_num)); + keyframe_list_.erase(keyframe_list_.begin(), + keyframe_list_.lower_bound(seq_num)); + recovered_list_.erase(recovered_list_.begin(), + recovered_list_.lower_bound(seq_num)); + })); } void NackModule2::UpdateRtt(int64_t rtt_ms) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(worker_thread_); rtt_ms_ = rtt_ms; } -void NackModule2::Clear() { - rtc::CritScope lock(&crit_); - nack_list_.clear(); - keyframe_list_.clear(); - recovered_list_.clear(); -} - -int64_t NackModule2::TimeUntilNextProcess() { - return std::max(next_process_time_ms_ - clock_->TimeInMilliseconds(), - 0); -} - -void NackModule2::Process() { - if (nack_sender_) { - std::vector nack_batch; - { - rtc::CritScope lock(&crit_); - nack_batch = GetNackBatch(kTimeOnly); - } - - if (!nack_batch.empty()) { - // This batch of NACKs is triggered externally; there is no external - // initiator who can batch them with other feedback messages. - nack_sender_->SendNack(nack_batch, /*buffering_allowed=*/false); - } - } - - // Update the next_process_time_ms_ in intervals to achieve - // the targeted frequency over time. Also add multiple intervals - // in case of a skip in time as to not make uneccessary - // calls to Process in order to catch up. - int64_t now_ms = clock_->TimeInMilliseconds(); - if (next_process_time_ms_ == -1) { - next_process_time_ms_ = now_ms + kProcessIntervalMs; - } else { - next_process_time_ms_ = next_process_time_ms_ + kProcessIntervalMs + - (now_ms - next_process_time_ms_) / - kProcessIntervalMs * kProcessIntervalMs; - } -} - bool NackModule2::RemovePacketsUntilKeyFrame() { + // Called on worker_thread_. while (!keyframe_list_.empty()) { auto it = nack_list_.lower_bound(*keyframe_list_.begin()); @@ -256,6 +245,7 @@ bool NackModule2::RemovePacketsUntilKeyFrame() { void NackModule2::AddPacketsToNack(uint16_t seq_num_start, uint16_t seq_num_end) { + // Called on worker_thread_. // Remove old packets. auto it = nack_list_.lower_bound(seq_num_end - kMaxPacketAge); nack_list_.erase(nack_list_.begin(), it); @@ -290,6 +280,8 @@ void NackModule2::AddPacketsToNack(uint16_t seq_num_start, } std::vector NackModule2::GetNackBatch(NackFilterOptions options) { + // Called on worker_thread_. + bool consider_seq_num = options != kTimeOnly; bool consider_timestamp = options != kSeqNumOnly; Timestamp now = clock_->CurrentTime(); @@ -335,12 +327,14 @@ std::vector NackModule2::GetNackBatch(NackFilterOptions options) { } void NackModule2::UpdateReorderingStatistics(uint16_t seq_num) { + // Running on worker_thread_. RTC_DCHECK(AheadOf(newest_seq_num_, seq_num)); uint16_t diff = ReverseDiff(newest_seq_num_, seq_num); reordering_histogram_.Add(diff); } int NackModule2::WaitNumberOfPackets(float probability) const { + // Called on worker_thread_; if (reordering_histogram_.NumValues() == 0) return 0; return reordering_histogram_.InverseCdf(probability); diff --git a/modules/video_coding/nack_module2.h b/modules/video_coding/nack_module2.h index 6518f32bb6..f58f886934 100644 --- a/modules/video_coding/nack_module2.h +++ b/modules/video_coding/nack_module2.h @@ -17,33 +17,38 @@ #include #include +#include "api/sequence_checker.h" #include "api/units/time_delta.h" -#include "modules/include/module.h" #include "modules/include/module_common_types.h" #include "modules/video_coding/histogram.h" -#include "rtc_base/critical_section.h" #include "rtc_base/numerics/sequence_number_util.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" namespace webrtc { -class NackModule2 final : public Module { +// TODO(bugs.webrtc.org/11594): This class no longer implements the Module +// interface and therefore "NackModule" may not be a descriptive name anymore. +// Consider renaming to e.g. NackTracker or NackRequester. +class NackModule2 final { public: - NackModule2(Clock* clock, + static constexpr TimeDelta kUpdateInterval = TimeDelta::Millis(20); + + NackModule2(TaskQueueBase* current_queue, + Clock* clock, NackSender* nack_sender, - KeyFrameRequestSender* keyframe_request_sender); + KeyFrameRequestSender* keyframe_request_sender, + TimeDelta update_interval = kUpdateInterval); + ~NackModule2(); int OnReceivedPacket(uint16_t seq_num, bool is_keyframe); int OnReceivedPacket(uint16_t seq_num, bool is_keyframe, bool is_recovered); void ClearUpTo(uint16_t seq_num); void UpdateRtt(int64_t rtt_ms); - void Clear(); - - // Module implementation - int64_t TimeUntilNextProcess() override; - void Process() override; private: // Which fields to consider when deciding which packet to nack in @@ -79,24 +84,30 @@ class NackModule2 final : public Module { }; void AddPacketsToNack(uint16_t seq_num_start, uint16_t seq_num_end) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); // Removes packets from the nack list until the next keyframe. Returns true // if packets were removed. - bool RemovePacketsUntilKeyFrame() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + bool RemovePacketsUntilKeyFrame() + RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); std::vector GetNackBatch(NackFilterOptions options) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); // Update the reordering distribution. void UpdateReorderingStatistics(uint16_t seq_num) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); // Returns how many packets we have to wait in order to receive the packet // with probability |probabilty| or higher. int WaitNumberOfPackets(float probability) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_thread_); + + TaskQueueBase* const worker_thread_; + + // Used to regularly call SendNack if needed. + RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(worker_thread_); + const TimeDelta update_interval_; - rtc::CriticalSection crit_; Clock* const clock_; NackSender* const nack_sender_; KeyFrameRequestSender* const keyframe_request_sender_; @@ -105,23 +116,23 @@ class NackModule2 final : public Module { // known thread (e.g. see |initialized_|). Those probably do not need // synchronized access. std::map> nack_list_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(worker_thread_); std::set> keyframe_list_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(worker_thread_); std::set> recovered_list_ - RTC_GUARDED_BY(crit_); - video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(crit_); - bool initialized_ RTC_GUARDED_BY(crit_); - int64_t rtt_ms_ RTC_GUARDED_BY(crit_); - uint16_t newest_seq_num_ RTC_GUARDED_BY(crit_); - - // Only touched on the process thread. - int64_t next_process_time_ms_; + RTC_GUARDED_BY(worker_thread_); + video_coding::Histogram reordering_histogram_ RTC_GUARDED_BY(worker_thread_); + bool initialized_ RTC_GUARDED_BY(worker_thread_); + int64_t rtt_ms_ RTC_GUARDED_BY(worker_thread_); + uint16_t newest_seq_num_ RTC_GUARDED_BY(worker_thread_); // Adds a delay before send nack on packet received. const int64_t send_nack_delay_ms_; const absl::optional backoff_settings_; + + // Used to signal destruction to potentially pending tasks. + ScopedTaskSafety task_safety_; }; } // namespace webrtc diff --git a/modules/video_coding/nack_module2_unittest.cc b/modules/video_coding/nack_module2_unittest.cc index ebc28ecb5a..acd1eead01 100644 --- a/modules/video_coding/nack_module2_unittest.cc +++ b/modules/video_coding/nack_module2_unittest.cc @@ -18,8 +18,12 @@ #include "system_wrappers/include/clock.h" #include "test/field_trial.h" #include "test/gtest.h" +#include "test/run_loop.h" namespace webrtc { +// TODO(bugs.webrtc.org/11594): Use the use the GlobalSimulatedTimeController +// instead of RunLoop. At the moment we mix use of the Clock and the underlying +// implementation of RunLoop, which is realtime. class TestNackModule2 : public ::testing::TestWithParam, public NackSender, public KeyFrameRequestSender { @@ -29,68 +33,116 @@ class TestNackModule2 : public ::testing::TestWithParam, field_trial_(GetParam() ? "WebRTC-ExponentialNackBackoff/enabled:true/" : "WebRTC-ExponentialNackBackoff/enabled:false/"), - nack_module_(clock_.get(), this, this), keyframes_requested_(0) {} - void SetUp() override { nack_module_.UpdateRtt(kDefaultRttMs); } + void SetUp() override {} void SendNack(const std::vector& sequence_numbers, bool buffering_allowed) override { sent_nacks_.insert(sent_nacks_.end(), sequence_numbers.begin(), sequence_numbers.end()); + if (waiting_for_send_nack_) { + waiting_for_send_nack_ = false; + loop_.Quit(); + } } void RequestKeyFrame() override { ++keyframes_requested_; } + void Flush() { + // nack_module.Process(); + loop_.Flush(); + } + + bool WaitForSendNack() { + if (timed_out_) { + RTC_NOTREACHED(); + return false; + } + + RTC_DCHECK(!waiting_for_send_nack_); + + waiting_for_send_nack_ = true; + loop_.PostDelayedTask( + [this]() { + timed_out_ = true; + loop_.Quit(); + }, + 1000); + + loop_.Run(); + + if (timed_out_) + return false; + + RTC_DCHECK(!waiting_for_send_nack_); + return true; + } + + NackModule2& CreateNackModule( + TimeDelta interval = NackModule2::kUpdateInterval) { + RTC_DCHECK(!nack_module_.get()); + nack_module_ = std::make_unique( + TaskQueueBase::Current(), clock_.get(), this, this, interval); + nack_module_->UpdateRtt(kDefaultRttMs); + return *nack_module_.get(); + } + static constexpr int64_t kDefaultRttMs = 20; + test::RunLoop loop_; std::unique_ptr clock_; test::ScopedFieldTrials field_trial_; - NackModule2 nack_module_; + std::unique_ptr nack_module_; std::vector sent_nacks_; int keyframes_requested_; + bool waiting_for_send_nack_ = false; + bool timed_out_ = false; }; TEST_P(TestNackModule2, NackOnePacket) { - nack_module_.OnReceivedPacket(1, false, false); - nack_module_.OnReceivedPacket(3, false, false); - EXPECT_EQ(1u, sent_nacks_.size()); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(1, false, false); + nack_module.OnReceivedPacket(3, false, false); + ASSERT_EQ(1u, sent_nacks_.size()); EXPECT_EQ(2, sent_nacks_[0]); } TEST_P(TestNackModule2, WrappingSeqNum) { - nack_module_.OnReceivedPacket(0xfffe, false, false); - nack_module_.OnReceivedPacket(1, false, false); - EXPECT_EQ(2u, sent_nacks_.size()); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(0xfffe, false, false); + nack_module.OnReceivedPacket(1, false, false); + ASSERT_EQ(2u, sent_nacks_.size()); EXPECT_EQ(0xffff, sent_nacks_[0]); EXPECT_EQ(0, sent_nacks_[1]); } TEST_P(TestNackModule2, WrappingSeqNumClearToKeyframe) { - nack_module_.OnReceivedPacket(0xfffe, false, false); - nack_module_.OnReceivedPacket(1, false, false); - EXPECT_EQ(2u, sent_nacks_.size()); + NackModule2& nack_module = CreateNackModule(TimeDelta::Millis(10)); + nack_module.OnReceivedPacket(0xfffe, false, false); + nack_module.OnReceivedPacket(1, false, false); + ASSERT_EQ(2u, sent_nacks_.size()); EXPECT_EQ(0xffff, sent_nacks_[0]); EXPECT_EQ(0, sent_nacks_[1]); sent_nacks_.clear(); - nack_module_.OnReceivedPacket(2, true, false); - EXPECT_EQ(0u, sent_nacks_.size()); + nack_module.OnReceivedPacket(2, true, false); + ASSERT_EQ(0u, sent_nacks_.size()); - nack_module_.OnReceivedPacket(501, true, false); - EXPECT_EQ(498u, sent_nacks_.size()); + nack_module.OnReceivedPacket(501, true, false); + ASSERT_EQ(498u, sent_nacks_.size()); for (int seq_num = 3; seq_num < 501; ++seq_num) EXPECT_EQ(seq_num, sent_nacks_[seq_num - 3]); sent_nacks_.clear(); - nack_module_.OnReceivedPacket(1001, false, false); + nack_module.OnReceivedPacket(1001, false, false); EXPECT_EQ(499u, sent_nacks_.size()); for (int seq_num = 502; seq_num < 1001; ++seq_num) EXPECT_EQ(seq_num, sent_nacks_[seq_num - 502]); sent_nacks_.clear(); clock_->AdvanceTimeMilliseconds(100); - nack_module_.Process(); - EXPECT_EQ(999u, sent_nacks_.size()); + ASSERT_TRUE(WaitForSendNack()); + ASSERT_EQ(999u, sent_nacks_.size()); EXPECT_EQ(0xffff, sent_nacks_[0]); EXPECT_EQ(0, sent_nacks_[1]); for (int seq_num = 3; seq_num < 501; ++seq_num) @@ -102,15 +154,15 @@ TEST_P(TestNackModule2, WrappingSeqNumClearToKeyframe) { // It will then clear all nacks up to the next keyframe (seq num 2), // thus removing 0xffff and 0 from the nack list. sent_nacks_.clear(); - nack_module_.OnReceivedPacket(1004, false, false); - EXPECT_EQ(2u, sent_nacks_.size()); + nack_module.OnReceivedPacket(1004, false, false); + ASSERT_EQ(2u, sent_nacks_.size()); EXPECT_EQ(1002, sent_nacks_[0]); EXPECT_EQ(1003, sent_nacks_[1]); sent_nacks_.clear(); clock_->AdvanceTimeMilliseconds(100); - nack_module_.Process(); - EXPECT_EQ(999u, sent_nacks_.size()); + ASSERT_TRUE(WaitForSendNack()); + ASSERT_EQ(999u, sent_nacks_.size()); for (int seq_num = 3; seq_num < 501; ++seq_num) EXPECT_EQ(seq_num, sent_nacks_[seq_num - 3]); for (int seq_num = 502; seq_num < 1001; ++seq_num) @@ -118,65 +170,39 @@ TEST_P(TestNackModule2, WrappingSeqNumClearToKeyframe) { // Adding packet 1007 will cause the nack module to overflow again, thus // clearing everything up to 501 which is the next keyframe. - nack_module_.OnReceivedPacket(1007, false, false); + nack_module.OnReceivedPacket(1007, false, false); sent_nacks_.clear(); clock_->AdvanceTimeMilliseconds(100); - nack_module_.Process(); - EXPECT_EQ(503u, sent_nacks_.size()); + ASSERT_TRUE(WaitForSendNack()); + ASSERT_EQ(503u, sent_nacks_.size()); for (int seq_num = 502; seq_num < 1001; ++seq_num) EXPECT_EQ(seq_num, sent_nacks_[seq_num - 502]); EXPECT_EQ(1005, sent_nacks_[501]); EXPECT_EQ(1006, sent_nacks_[502]); } -TEST_P(TestNackModule2, DontBurstOnTimeSkip) { - nack_module_.Process(); - clock_->AdvanceTimeMilliseconds(20); - EXPECT_EQ(0, nack_module_.TimeUntilNextProcess()); - nack_module_.Process(); - - clock_->AdvanceTimeMilliseconds(100); - EXPECT_EQ(0, nack_module_.TimeUntilNextProcess()); - nack_module_.Process(); - EXPECT_EQ(20, nack_module_.TimeUntilNextProcess()); - - clock_->AdvanceTimeMilliseconds(19); - EXPECT_EQ(1, nack_module_.TimeUntilNextProcess()); - clock_->AdvanceTimeMilliseconds(2); - nack_module_.Process(); - EXPECT_EQ(19, nack_module_.TimeUntilNextProcess()); - - clock_->AdvanceTimeMilliseconds(19); - EXPECT_EQ(0, nack_module_.TimeUntilNextProcess()); - nack_module_.Process(); - - clock_->AdvanceTimeMilliseconds(21); - EXPECT_EQ(0, nack_module_.TimeUntilNextProcess()); - nack_module_.Process(); - EXPECT_EQ(19, nack_module_.TimeUntilNextProcess()); -} - TEST_P(TestNackModule2, ResendNack) { - nack_module_.OnReceivedPacket(1, false, false); - nack_module_.OnReceivedPacket(3, false, false); + NackModule2& nack_module = CreateNackModule(TimeDelta::Millis(1)); + nack_module.OnReceivedPacket(1, false, false); + nack_module.OnReceivedPacket(3, false, false); size_t expected_nacks_sent = 1; - EXPECT_EQ(expected_nacks_sent, sent_nacks_.size()); + ASSERT_EQ(expected_nacks_sent, sent_nacks_.size()); EXPECT_EQ(2, sent_nacks_[0]); if (GetParam()) { // Retry has to wait at least 5ms by default. - nack_module_.UpdateRtt(1); + nack_module.UpdateRtt(1); clock_->AdvanceTimeMilliseconds(4); - nack_module_.Process(); // Too early. + Flush(); // Too early. EXPECT_EQ(expected_nacks_sent, sent_nacks_.size()); clock_->AdvanceTimeMilliseconds(1); - nack_module_.Process(); // Now allowed. + WaitForSendNack(); // Now allowed. EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size()); } else { - nack_module_.UpdateRtt(1); + nack_module.UpdateRtt(1); clock_->AdvanceTimeMilliseconds(1); - nack_module_.Process(); // Fast retransmit allowed. + WaitForSendNack(); // Fast retransmit allowed. EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size()); } @@ -185,7 +211,7 @@ TEST_P(TestNackModule2, ResendNack) { for (int i = 2; i < 10; ++i) { // Change RTT, above the 40ms max for exponential backoff. TimeDelta rtt = TimeDelta::Millis(160); // + (i * 10 - 40) - nack_module_.UpdateRtt(rtt.ms()); + nack_module.UpdateRtt(rtt.ms()); // RTT gets capped at 160ms in backoff calculations. TimeDelta expected_backoff_delay = @@ -193,26 +219,27 @@ TEST_P(TestNackModule2, ResendNack) { // Move to one millisecond before next allowed NACK. clock_->AdvanceTimeMilliseconds(expected_backoff_delay.ms() - 1); - nack_module_.Process(); + Flush(); EXPECT_EQ(expected_nacks_sent, sent_nacks_.size()); // Move to one millisecond after next allowed NACK. // After rather than on to avoid rounding errors. clock_->AdvanceTimeMilliseconds(2); - nack_module_.Process(); // Now allowed. + WaitForSendNack(); // Now allowed. EXPECT_EQ(++expected_nacks_sent, sent_nacks_.size()); } // Giving up after 10 tries. clock_->AdvanceTimeMilliseconds(3000); - nack_module_.Process(); + Flush(); EXPECT_EQ(expected_nacks_sent, sent_nacks_.size()); } TEST_P(TestNackModule2, ResendPacketMaxRetries) { - nack_module_.OnReceivedPacket(1, false, false); - nack_module_.OnReceivedPacket(3, false, false); - EXPECT_EQ(1u, sent_nacks_.size()); + NackModule2& nack_module = CreateNackModule(TimeDelta::Millis(1)); + nack_module.OnReceivedPacket(1, false, false); + nack_module.OnReceivedPacket(3, false, false); + ASSERT_EQ(1u, sent_nacks_.size()); EXPECT_EQ(2, sent_nacks_[0]); int backoff_factor = 1; @@ -220,111 +247,124 @@ TEST_P(TestNackModule2, ResendPacketMaxRetries) { // Exponential backoff, so that we don't reject NACK because of time. clock_->AdvanceTimeMilliseconds(backoff_factor * kDefaultRttMs); backoff_factor *= 2; - nack_module_.Process(); + WaitForSendNack(); EXPECT_EQ(retries + 1, sent_nacks_.size()); } clock_->AdvanceTimeMilliseconds(backoff_factor * kDefaultRttMs); - nack_module_.Process(); + Flush(); EXPECT_EQ(10u, sent_nacks_.size()); } TEST_P(TestNackModule2, TooLargeNackList) { - nack_module_.OnReceivedPacket(0, false, false); - nack_module_.OnReceivedPacket(1001, false, false); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(0, false, false); + nack_module.OnReceivedPacket(1001, false, false); EXPECT_EQ(1000u, sent_nacks_.size()); EXPECT_EQ(0, keyframes_requested_); - nack_module_.OnReceivedPacket(1003, false, false); + nack_module.OnReceivedPacket(1003, false, false); EXPECT_EQ(1000u, sent_nacks_.size()); EXPECT_EQ(1, keyframes_requested_); - nack_module_.OnReceivedPacket(1004, false, false); + nack_module.OnReceivedPacket(1004, false, false); EXPECT_EQ(1000u, sent_nacks_.size()); EXPECT_EQ(1, keyframes_requested_); } TEST_P(TestNackModule2, TooLargeNackListWithKeyFrame) { - nack_module_.OnReceivedPacket(0, false, false); - nack_module_.OnReceivedPacket(1, true, false); - nack_module_.OnReceivedPacket(1001, false, false); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(0, false, false); + nack_module.OnReceivedPacket(1, true, false); + nack_module.OnReceivedPacket(1001, false, false); EXPECT_EQ(999u, sent_nacks_.size()); EXPECT_EQ(0, keyframes_requested_); - nack_module_.OnReceivedPacket(1003, false, false); + nack_module.OnReceivedPacket(1003, false, false); EXPECT_EQ(1000u, sent_nacks_.size()); EXPECT_EQ(0, keyframes_requested_); - nack_module_.OnReceivedPacket(1005, false, false); + nack_module.OnReceivedPacket(1005, false, false); EXPECT_EQ(1000u, sent_nacks_.size()); EXPECT_EQ(1, keyframes_requested_); } TEST_P(TestNackModule2, ClearUpTo) { - nack_module_.OnReceivedPacket(0, false, false); - nack_module_.OnReceivedPacket(100, false, false); + NackModule2& nack_module = CreateNackModule(TimeDelta::Millis(1)); + nack_module.OnReceivedPacket(0, false, false); + nack_module.OnReceivedPacket(100, false, false); EXPECT_EQ(99u, sent_nacks_.size()); sent_nacks_.clear(); clock_->AdvanceTimeMilliseconds(100); - nack_module_.ClearUpTo(50); - nack_module_.Process(); - EXPECT_EQ(50u, sent_nacks_.size()); + nack_module.ClearUpTo(50); + WaitForSendNack(); + ASSERT_EQ(50u, sent_nacks_.size()); EXPECT_EQ(50, sent_nacks_[0]); } TEST_P(TestNackModule2, ClearUpToWrap) { - nack_module_.OnReceivedPacket(0xfff0, false, false); - nack_module_.OnReceivedPacket(0xf, false, false); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(0xfff0, false, false); + nack_module.OnReceivedPacket(0xf, false, false); EXPECT_EQ(30u, sent_nacks_.size()); sent_nacks_.clear(); clock_->AdvanceTimeMilliseconds(100); - nack_module_.ClearUpTo(0); - nack_module_.Process(); - EXPECT_EQ(15u, sent_nacks_.size()); + nack_module.ClearUpTo(0); + WaitForSendNack(); + ASSERT_EQ(15u, sent_nacks_.size()); EXPECT_EQ(0, sent_nacks_[0]); } TEST_P(TestNackModule2, PacketNackCount) { - EXPECT_EQ(0, nack_module_.OnReceivedPacket(0, false, false)); - EXPECT_EQ(0, nack_module_.OnReceivedPacket(2, false, false)); - EXPECT_EQ(1, nack_module_.OnReceivedPacket(1, false, false)); + NackModule2& nack_module = CreateNackModule(TimeDelta::Millis(1)); + EXPECT_EQ(0, nack_module.OnReceivedPacket(0, false, false)); + EXPECT_EQ(0, nack_module.OnReceivedPacket(2, false, false)); + EXPECT_EQ(1, nack_module.OnReceivedPacket(1, false, false)); sent_nacks_.clear(); - nack_module_.UpdateRtt(100); - EXPECT_EQ(0, nack_module_.OnReceivedPacket(5, false, false)); + nack_module.UpdateRtt(100); + EXPECT_EQ(0, nack_module.OnReceivedPacket(5, false, false)); clock_->AdvanceTimeMilliseconds(100); - nack_module_.Process(); + WaitForSendNack(); + EXPECT_EQ(4u, sent_nacks_.size()); + clock_->AdvanceTimeMilliseconds(125); - nack_module_.Process(); - EXPECT_EQ(3, nack_module_.OnReceivedPacket(3, false, false)); - EXPECT_EQ(3, nack_module_.OnReceivedPacket(4, false, false)); - EXPECT_EQ(0, nack_module_.OnReceivedPacket(4, false, false)); + WaitForSendNack(); + + EXPECT_EQ(6u, sent_nacks_.size()); + + EXPECT_EQ(3, nack_module.OnReceivedPacket(3, false, false)); + EXPECT_EQ(3, nack_module.OnReceivedPacket(4, false, false)); + EXPECT_EQ(0, nack_module.OnReceivedPacket(4, false, false)); } TEST_P(TestNackModule2, NackListFullAndNoOverlapWithKeyframes) { + NackModule2& nack_module = CreateNackModule(); const int kMaxNackPackets = 1000; const unsigned int kFirstGap = kMaxNackPackets - 20; const unsigned int kSecondGap = 200; uint16_t seq_num = 0; - nack_module_.OnReceivedPacket(seq_num++, true, false); + nack_module.OnReceivedPacket(seq_num++, true, false); seq_num += kFirstGap; - nack_module_.OnReceivedPacket(seq_num++, true, false); + nack_module.OnReceivedPacket(seq_num++, true, false); EXPECT_EQ(kFirstGap, sent_nacks_.size()); sent_nacks_.clear(); seq_num += kSecondGap; - nack_module_.OnReceivedPacket(seq_num, true, false); + nack_module.OnReceivedPacket(seq_num, true, false); EXPECT_EQ(kSecondGap, sent_nacks_.size()); } TEST_P(TestNackModule2, HandleFecRecoveredPacket) { - nack_module_.OnReceivedPacket(1, false, false); - nack_module_.OnReceivedPacket(4, false, true); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(1, false, false); + nack_module.OnReceivedPacket(4, false, true); EXPECT_EQ(0u, sent_nacks_.size()); - nack_module_.OnReceivedPacket(5, false, false); + nack_module.OnReceivedPacket(5, false, false); EXPECT_EQ(2u, sent_nacks_.size()); } TEST_P(TestNackModule2, SendNackWithoutDelay) { - nack_module_.OnReceivedPacket(0, false, false); - nack_module_.OnReceivedPacket(100, false, false); + NackModule2& nack_module = CreateNackModule(); + nack_module.OnReceivedPacket(0, false, false); + nack_module.OnReceivedPacket(100, false, false); EXPECT_EQ(99u, sent_nacks_.size()); } @@ -339,7 +379,7 @@ class TestNackModule2WithFieldTrial : public ::testing::Test, TestNackModule2WithFieldTrial() : nack_delay_field_trial_("WebRTC-SendNackDelayMs/10/"), clock_(new SimulatedClock(0)), - nack_module_(clock_.get(), this, this), + nack_module_(TaskQueueBase::Current(), clock_.get(), this, this), keyframes_requested_(0) {} void SendNack(const std::vector& sequence_numbers, diff --git a/modules/video_coding/nack_module_unittest.cc b/modules/video_coding/nack_module_unittest.cc index ab1c76f1b5..f91eb750f0 100644 --- a/modules/video_coding/nack_module_unittest.cc +++ b/modules/video_coding/nack_module_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/nack_module.h" +#include "modules/video_coding/deprecated/nack_module.h" #include #include @@ -45,7 +45,7 @@ class TestNackModule : public ::testing::TestWithParam, static constexpr int64_t kDefaultRttMs = 20; std::unique_ptr clock_; test::ScopedFieldTrials field_trial_; - NackModule nack_module_; + DEPRECATED_NackModule nack_module_; std::vector sent_nacks_; int keyframes_requested_; }; @@ -352,7 +352,7 @@ class TestNackModuleWithFieldTrial : public ::testing::Test, test::ScopedFieldTrials nack_delay_field_trial_; std::unique_ptr clock_; - NackModule nack_module_; + DEPRECATED_NackModule nack_module_; std::vector sent_nacks_; int keyframes_requested_; }; diff --git a/modules/video_coding/packet.cc b/modules/video_coding/packet.cc index 0c4a658b8f..324248ab36 100644 --- a/modules/video_coding/packet.cc +++ b/modules/video_coding/packet.cc @@ -34,7 +34,7 @@ VCMPacket::VCMPacket(const uint8_t* ptr, const RTPHeader& rtp_header, const RTPVideoHeader& videoHeader, int64_t ntp_time_ms, - int64_t receive_time_ms) + Timestamp receive_time) : payloadType(rtp_header.payloadType), timestamp(rtp_header.timestamp), ntp_time_ms_(ntp_time_ms), @@ -47,7 +47,7 @@ VCMPacket::VCMPacket(const uint8_t* ptr, insertStartCode(videoHeader.codec == kVideoCodecH264 && videoHeader.is_first_packet_in_frame), video_header(videoHeader), - packet_info(rtp_header, receive_time_ms) { + packet_info(rtp_header, receive_time) { if (is_first_packet_in_frame() && markerBit) { completeNALU = kNaluComplete; } else if (is_first_packet_in_frame()) { diff --git a/modules/video_coding/packet.h b/modules/video_coding/packet.h index f157e10898..9aa2d5ce08 100644 --- a/modules/video_coding/packet.h +++ b/modules/video_coding/packet.h @@ -17,6 +17,7 @@ #include "absl/types/optional.h" #include "api/rtp_headers.h" #include "api/rtp_packet_info.h" +#include "api/units/timestamp.h" #include "api/video/video_frame_type.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" @@ -41,7 +42,7 @@ class VCMPacket { const RTPHeader& rtp_header, const RTPVideoHeader& video_header, int64_t ntp_time_ms, - int64_t receive_time_ms); + Timestamp receive_time); ~VCMPacket(); diff --git a/modules/video_coding/packet_buffer.cc b/modules/video_coding/packet_buffer.cc index 5db3c0f670..c98ae00389 100644 --- a/modules/video_coding/packet_buffer.cc +++ b/modules/video_coding/packet_buffer.cc @@ -30,41 +30,26 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/mod_ops.h" -#include "system_wrappers/include/clock.h" -#include "system_wrappers/include/field_trial.h" namespace webrtc { namespace video_coding { PacketBuffer::Packet::Packet(const RtpPacketReceived& rtp_packet, - const RTPVideoHeader& video_header, - int64_t ntp_time_ms, - int64_t receive_time_ms) + const RTPVideoHeader& video_header) : marker_bit(rtp_packet.Marker()), payload_type(rtp_packet.PayloadType()), seq_num(rtp_packet.SequenceNumber()), timestamp(rtp_packet.Timestamp()), - ntp_time_ms(ntp_time_ms), times_nacked(-1), - video_header(video_header), - packet_info(rtp_packet.Ssrc(), - rtp_packet.Csrcs(), - rtp_packet.Timestamp(), - /*audio_level=*/absl::nullopt, - rtp_packet.GetExtension(), - receive_time_ms) {} - -PacketBuffer::PacketBuffer(Clock* clock, - size_t start_buffer_size, - size_t max_buffer_size) - : clock_(clock), - max_size_(max_buffer_size), + video_header(video_header) {} + +PacketBuffer::PacketBuffer(size_t start_buffer_size, size_t max_buffer_size) + : max_size_(max_buffer_size), first_seq_num_(0), first_packet_received_(false), is_cleared_to_first_seq_num_(false), buffer_(start_buffer_size), - sps_pps_idr_is_h264_keyframe_( - field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) { + sps_pps_idr_is_h264_keyframe_(false) { RTC_DCHECK_LE(start_buffer_size, max_buffer_size); // Buffer size must always be a power of 2. RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0); @@ -78,7 +63,6 @@ PacketBuffer::~PacketBuffer() { PacketBuffer::InsertResult PacketBuffer::InsertPacket( std::unique_ptr packet) { PacketBuffer::InsertResult result; - rtc::CritScope lock(&crit_); uint16_t seq_num = packet->seq_num; size_t index = seq_num % buffer_.size(); @@ -112,20 +96,12 @@ PacketBuffer::InsertResult PacketBuffer::InsertPacket( // Clear the buffer, delete payload, and return false to signal that a // new keyframe is needed. RTC_LOG(LS_WARNING) << "Clear PacketBuffer and request key frame."; - Clear(); + ClearInternal(); result.buffer_cleared = true; return result; } } - int64_t now_ms = clock_->TimeInMilliseconds(); - last_received_packet_ms_ = now_ms; - if (packet->video_header.frame_type == VideoFrameType::kVideoFrameKey || - last_received_keyframe_rtp_timestamp_ == packet->timestamp) { - last_received_keyframe_packet_ms_ = now_ms; - last_received_keyframe_rtp_timestamp_ = packet->timestamp; - } - packet->continuous = false; buffer_[index] = std::move(packet); @@ -136,7 +112,6 @@ PacketBuffer::InsertResult PacketBuffer::InsertPacket( } void PacketBuffer::ClearTo(uint16_t seq_num) { - rtc::CritScope lock(&crit_); // We have already cleared past this sequence number, no need to do anything. if (is_cleared_to_first_seq_num_ && AheadOf(first_seq_num_, seq_num)) { @@ -173,35 +148,29 @@ void PacketBuffer::ClearTo(uint16_t seq_num) { } void PacketBuffer::Clear() { - rtc::CritScope lock(&crit_); - for (auto& entry : buffer_) { - entry = nullptr; - } - - first_packet_received_ = false; - is_cleared_to_first_seq_num_ = false; - last_received_packet_ms_.reset(); - last_received_keyframe_packet_ms_.reset(); - newest_inserted_seq_num_.reset(); - missing_packets_.clear(); + ClearInternal(); } PacketBuffer::InsertResult PacketBuffer::InsertPadding(uint16_t seq_num) { PacketBuffer::InsertResult result; - rtc::CritScope lock(&crit_); UpdateMissingPackets(seq_num); result.packets = FindFrames(static_cast(seq_num + 1)); return result; } -absl::optional PacketBuffer::LastReceivedPacketMs() const { - rtc::CritScope lock(&crit_); - return last_received_packet_ms_; +void PacketBuffer::ForceSpsPpsIdrIsH264Keyframe() { + sps_pps_idr_is_h264_keyframe_ = true; } -absl::optional PacketBuffer::LastReceivedKeyframePacketMs() const { - rtc::CritScope lock(&crit_); - return last_received_keyframe_packet_ms_; +void PacketBuffer::ClearInternal() { + for (auto& entry : buffer_) { + entry = nullptr; + } + + first_packet_received_ = false; + is_cleared_to_first_seq_num_ = false; + newest_inserted_seq_num_.reset(); + missing_packets_.clear(); } bool PacketBuffer::ExpandBufferSize() { @@ -359,15 +328,10 @@ std::vector> PacketBuffer::FindFrames( VideoFrameType::kVideoFrameDelta; } - // With IPPP, if this is not a keyframe, make sure there are no gaps - // in the packet sequence numbers up until this point. - const uint8_t h264tid = - buffer_[start_index] != nullptr - ? buffer_[start_index]->video_header.frame_marking.temporal_id - : kNoTemporalIdx; - if (h264tid == kNoTemporalIdx && !is_h264_keyframe && - missing_packets_.upper_bound(start_seq_num) != - missing_packets_.begin()) { + // If this is not a keyframe, make sure there are no gaps in the packet + // sequence numbers up until this point. + if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) != + missing_packets_.begin()) { return found_frames; } } diff --git a/modules/video_coding/packet_buffer.h b/modules/video_coding/packet_buffer.h index c480e37239..f4dbe31266 100644 --- a/modules/video_coding/packet_buffer.h +++ b/modules/video_coding/packet_buffer.h @@ -18,14 +18,13 @@ #include "absl/base/attributes.h" #include "api/rtp_packet_info.h" +#include "api/units/timestamp.h" #include "api/video/encoded_image.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" #include "rtc_base/copy_on_write_buffer.h" -#include "rtc_base/critical_section.h" #include "rtc_base/numerics/sequence_number_util.h" #include "rtc_base/thread_annotations.h" -#include "system_wrappers/include/clock.h" namespace webrtc { namespace video_coding { @@ -35,9 +34,7 @@ class PacketBuffer { struct Packet { Packet() = default; Packet(const RtpPacketReceived& rtp_packet, - const RTPVideoHeader& video_header, - int64_t ntp_time_ms, - int64_t receive_time_ms); + const RTPVideoHeader& video_header); Packet(const Packet&) = delete; Packet(Packet&&) = delete; Packet& operator=(const Packet&) = delete; @@ -62,14 +59,10 @@ class PacketBuffer { uint8_t payload_type = 0; uint16_t seq_num = 0; uint32_t timestamp = 0; - // NTP time of the capture time in local timebase in milliseconds. - int64_t ntp_time_ms = -1; int times_nacked = -1; rtc::CopyOnWriteBuffer video_payload; RTPVideoHeader video_header; - - RtpPacketInfo packet_info; }; struct InsertResult { std::vector> packets; @@ -79,69 +72,54 @@ class PacketBuffer { }; // Both |start_buffer_size| and |max_buffer_size| must be a power of 2. - PacketBuffer(Clock* clock, size_t start_buffer_size, size_t max_buffer_size); + PacketBuffer(size_t start_buffer_size, size_t max_buffer_size); ~PacketBuffer(); - InsertResult InsertPacket(std::unique_ptr packet) - ABSL_MUST_USE_RESULT; - InsertResult InsertPadding(uint16_t seq_num) ABSL_MUST_USE_RESULT; + ABSL_MUST_USE_RESULT InsertResult + InsertPacket(std::unique_ptr packet); + ABSL_MUST_USE_RESULT InsertResult InsertPadding(uint16_t seq_num); void ClearTo(uint16_t seq_num); void Clear(); - // Timestamp (not RTP timestamp) of the last received packet/keyframe packet. - absl::optional LastReceivedPacketMs() const; - absl::optional LastReceivedKeyframePacketMs() const; + void ForceSpsPpsIdrIsH264Keyframe(); private: - Clock* const clock_; + void ClearInternal(); // Tries to expand the buffer. - bool ExpandBufferSize() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + bool ExpandBufferSize(); // Test if all previous packets has arrived for the given sequence number. - bool PotentialNewFrame(uint16_t seq_num) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + bool PotentialNewFrame(uint16_t seq_num) const; // Test if all packets of a frame has arrived, and if so, returns packets to // create frames. - std::vector> FindFrames(uint16_t seq_num) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - - void UpdateMissingPackets(uint16_t seq_num) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + std::vector> FindFrames(uint16_t seq_num); - rtc::CriticalSection crit_; + void UpdateMissingPackets(uint16_t seq_num); // buffer_.size() and max_size_ must always be a power of two. const size_t max_size_; // The fist sequence number currently in the buffer. - uint16_t first_seq_num_ RTC_GUARDED_BY(crit_); + uint16_t first_seq_num_; // If the packet buffer has received its first packet. - bool first_packet_received_ RTC_GUARDED_BY(crit_); + bool first_packet_received_; // If the buffer is cleared to |first_seq_num_|. - bool is_cleared_to_first_seq_num_ RTC_GUARDED_BY(crit_); + bool is_cleared_to_first_seq_num_; // Buffer that holds the the inserted packets and information needed to // determine continuity between them. - std::vector> buffer_ RTC_GUARDED_BY(crit_); - - // Timestamp of the last received packet/keyframe packet. - absl::optional last_received_packet_ms_ RTC_GUARDED_BY(crit_); - absl::optional last_received_keyframe_packet_ms_ - RTC_GUARDED_BY(crit_); - absl::optional last_received_keyframe_rtp_timestamp_ - RTC_GUARDED_BY(crit_); + std::vector> buffer_; - absl::optional newest_inserted_seq_num_ RTC_GUARDED_BY(crit_); - std::set> missing_packets_ - RTC_GUARDED_BY(crit_); + absl::optional newest_inserted_seq_num_; + std::set> missing_packets_; // Indicates if we should require SPS, PPS, and IDR for a particular // RTP timestamp to treat the corresponding frame as a keyframe. - const bool sps_pps_idr_is_h264_keyframe_; + bool sps_pps_idr_is_h264_keyframe_; }; } // namespace video_coding diff --git a/modules/video_coding/packet_buffer_unittest.cc b/modules/video_coding/packet_buffer_unittest.cc index 242fff2526..97012618f3 100644 --- a/modules/video_coding/packet_buffer_unittest.cc +++ b/modules/video_coding/packet_buffer_unittest.cc @@ -19,7 +19,6 @@ #include "common_video/h264/h264_common.h" #include "modules/video_coding/frame_object.h" #include "rtc_base/random.h" -#include "system_wrappers/include/clock.h" #include "test/field_trial.h" #include "test/gmock.h" #include "test/gtest.h" @@ -100,11 +99,7 @@ void PrintTo(const PacketBufferInsertResult& result, std::ostream* os) { class PacketBufferTest : public ::testing::Test { protected: - explicit PacketBufferTest(std::string field_trials = "") - : scoped_field_trials_(field_trials), - rand_(0x7732213), - clock_(0), - packet_buffer_(&clock_, kStartSize, kMaxSize) {} + PacketBufferTest() : rand_(0x7732213), packet_buffer_(kStartSize, kMaxSize) {} uint16_t Rand() { return rand_.Rand(); } @@ -133,9 +128,7 @@ class PacketBufferTest : public ::testing::Test { packet_buffer_.InsertPacket(std::move(packet))); } - const test::ScopedFieldTrials scoped_field_trials_; Random rand_; - SimulatedClock clock_; PacketBuffer packet_buffer_; }; @@ -391,10 +384,11 @@ TEST_F(PacketBufferTest, InsertPacketAfterSequenceNumberWrapAround) { class PacketBufferH264Test : public PacketBufferTest { protected: explicit PacketBufferH264Test(bool sps_pps_idr_is_keyframe) - : PacketBufferTest(sps_pps_idr_is_keyframe - ? "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/" - : ""), - sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe) {} + : PacketBufferTest(), sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe) { + if (sps_pps_idr_is_keyframe) { + packet_buffer_.ForceSpsPpsIdrIsH264Keyframe(); + } + } PacketBufferInsertResult InsertH264( uint16_t seq_num, // packet sequence number @@ -617,67 +611,6 @@ TEST_F(PacketBufferTest, ContinuousSeqNumDoubleMarkerBit) { EXPECT_THAT(Insert(3, kKeyFrame, kNotFirst, kLast).packets, IsEmpty()); } -TEST_F(PacketBufferTest, PacketTimestamps) { - absl::optional packet_ms; - absl::optional packet_keyframe_ms; - - packet_ms = packet_buffer_.LastReceivedPacketMs(); - packet_keyframe_ms = packet_buffer_.LastReceivedKeyframePacketMs(); - EXPECT_FALSE(packet_ms); - EXPECT_FALSE(packet_keyframe_ms); - - int64_t keyframe_ms = clock_.TimeInMilliseconds(); - Insert(100, kKeyFrame, kFirst, kLast, {}, /*timestamp=*/1000); - packet_ms = packet_buffer_.LastReceivedPacketMs(); - packet_keyframe_ms = packet_buffer_.LastReceivedKeyframePacketMs(); - EXPECT_TRUE(packet_ms); - EXPECT_TRUE(packet_keyframe_ms); - EXPECT_EQ(keyframe_ms, *packet_ms); - EXPECT_EQ(keyframe_ms, *packet_keyframe_ms); - - clock_.AdvanceTimeMilliseconds(100); - int64_t delta_ms = clock_.TimeInMilliseconds(); - Insert(101, kDeltaFrame, kFirst, kLast, {}, /*timestamp=*/2000); - packet_ms = packet_buffer_.LastReceivedPacketMs(); - packet_keyframe_ms = packet_buffer_.LastReceivedKeyframePacketMs(); - EXPECT_TRUE(packet_ms); - EXPECT_TRUE(packet_keyframe_ms); - EXPECT_EQ(delta_ms, *packet_ms); - EXPECT_EQ(keyframe_ms, *packet_keyframe_ms); - - packet_buffer_.Clear(); - packet_ms = packet_buffer_.LastReceivedPacketMs(); - packet_keyframe_ms = packet_buffer_.LastReceivedKeyframePacketMs(); - EXPECT_FALSE(packet_ms); - EXPECT_FALSE(packet_keyframe_ms); -} - -TEST_F(PacketBufferTest, - LastReceivedKeyFrameReturnsReceiveTimeOfALastReceivedPacketOfAKeyFrame) { - clock_.AdvanceTimeMilliseconds(100); - Insert(/*seq_num=*/100, kKeyFrame, kFirst, kNotLast, {}, /*timestamp=*/1000); - EXPECT_EQ(packet_buffer_.LastReceivedKeyframePacketMs(), - clock_.TimeInMilliseconds()); - - clock_.AdvanceTimeMilliseconds(100); - Insert(/*seq_num=*/102, kDeltaFrame, kNotFirst, kLast, {}, - /*timestamp=*/1000); - EXPECT_EQ(packet_buffer_.LastReceivedKeyframePacketMs(), - clock_.TimeInMilliseconds()); - - clock_.AdvanceTimeMilliseconds(100); - Insert(/*seq_num=*/101, kDeltaFrame, kNotFirst, kNotLast, {}, - /*timestamp=*/1000); - EXPECT_EQ(packet_buffer_.LastReceivedKeyframePacketMs(), - clock_.TimeInMilliseconds()); - - clock_.AdvanceTimeMilliseconds(100); - Insert(/*seq_num=*/103, kDeltaFrame, kFirst, kNotLast, {}, - /*timestamp=*/2000); - EXPECT_EQ(packet_buffer_.LastReceivedKeyframePacketMs(), - clock_.TimeInMilliseconds() - 100); -} - TEST_F(PacketBufferTest, IncomingCodecChange) { auto packet = std::make_unique(); packet->video_header.is_first_packet_in_frame = true; diff --git a/modules/video_coding/receiver.cc b/modules/video_coding/receiver.cc index 2db4e211b1..8e8f0e1ee2 100644 --- a/modules/video_coding/receiver.cc +++ b/modules/video_coding/receiver.cc @@ -141,7 +141,8 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms, uint16_t new_max_wait_time = static_cast(VCM_MAX(available_wait_time, 0)); uint32_t wait_time_ms = rtc::saturated_cast( - timing_->MaxWaitingTime(render_time_ms, clock_->TimeInMilliseconds())); + timing_->MaxWaitingTime(render_time_ms, clock_->TimeInMilliseconds(), + /*too_many_frames_queued=*/false)); if (new_max_wait_time < wait_time_ms) { // We're not allowed to wait until the frame is supposed to be rendered, // waiting as long as we're allowed to avoid busy looping, and then return @@ -161,18 +162,6 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms, frame->SetRenderTime(render_time_ms); TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", frame->Timestamp(), "SetRenderTS", "render_time", frame->RenderTimeMs()); - if (!frame->Complete()) { - // Update stats for incomplete frames. - bool retransmitted = false; - const int64_t last_packet_time_ms = - jitter_buffer_.LastPacketTime(frame, &retransmitted); - if (last_packet_time_ms >= 0 && !retransmitted) { - // We don't want to include timestamps which have suffered from - // retransmission here, since we compensate with extra retransmission - // delay within the jitter estimate. - timing_->IncomingTimestamp(frame_timestamp, last_packet_time_ms); - } - } return frame; } diff --git a/modules/video_coding/receiver.h b/modules/video_coding/receiver.h index 64a157f05e..8f6b041a5a 100644 --- a/modules/video_coding/receiver.h +++ b/modules/video_coding/receiver.h @@ -20,7 +20,6 @@ #include "modules/video_coding/jitter_buffer.h" #include "modules/video_coding/packet.h" #include "modules/video_coding/timing.h" -#include "rtc_base/critical_section.h" namespace webrtc { diff --git a/modules/video_coding/receiver_unittest.cc b/modules/video_coding/receiver_unittest.cc index 2585056023..b2d5bc6f03 100644 --- a/modules/video_coding/receiver_unittest.cc +++ b/modules/video_coding/receiver_unittest.cc @@ -30,18 +30,14 @@ namespace webrtc { class TestVCMReceiver : public ::testing::Test { protected: TestVCMReceiver() - : clock_(new SimulatedClock(0)), - timing_(clock_.get()), - receiver_(&timing_, clock_.get()) { - stream_generator_.reset( - new StreamGenerator(0, clock_->TimeInMilliseconds())); - } - - virtual void SetUp() {} + : clock_(0), + timing_(&clock_), + receiver_(&timing_, &clock_), + stream_generator_(0, clock_.TimeInMilliseconds()) {} int32_t InsertPacket(int index) { VCMPacket packet; - bool packet_available = stream_generator_->GetPacket(&packet, index); + bool packet_available = stream_generator_.GetPacket(&packet, index); EXPECT_TRUE(packet_available); if (!packet_available) return kGeneralError; // Return here to avoid crashes below. @@ -50,7 +46,7 @@ class TestVCMReceiver : public ::testing::Test { int32_t InsertPacketAndPop(int index) { VCMPacket packet; - bool packet_available = stream_generator_->PopPacket(&packet, index); + bool packet_available = stream_generator_.PopPacket(&packet, index); EXPECT_TRUE(packet_available); if (!packet_available) return kGeneralError; // Return here to avoid crashes below. @@ -59,18 +55,18 @@ class TestVCMReceiver : public ::testing::Test { int32_t InsertFrame(VideoFrameType frame_type, bool complete) { int num_of_packets = complete ? 1 : 2; - stream_generator_->GenerateFrame( + stream_generator_.GenerateFrame( frame_type, (frame_type != VideoFrameType::kEmptyFrame) ? num_of_packets : 0, (frame_type == VideoFrameType::kEmptyFrame) ? 1 : 0, - clock_->TimeInMilliseconds()); + clock_.TimeInMilliseconds()); int32_t ret = InsertPacketAndPop(0); if (!complete) { // Drop the second packet. VCMPacket packet; - stream_generator_->PopPacket(&packet, 0); + stream_generator_.PopPacket(&packet, 0); } - clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs); + clock_.AdvanceTimeMilliseconds(kDefaultFramePeriodMs); return ret; } @@ -82,10 +78,10 @@ class TestVCMReceiver : public ::testing::Test { return true; } - std::unique_ptr clock_; + SimulatedClock clock_; VCMTiming timing_; VCMReceiver receiver_; - std::unique_ptr stream_generator_; + StreamGenerator stream_generator_; }; TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) { @@ -97,7 +93,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_Empty) { kMaxNonDecodableDuration); EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError); // Advance time until it's time to decode the key frame. - clock_->AdvanceTimeMilliseconds(kMinDelayMs); + clock_.AdvanceTimeMilliseconds(kMinDelayMs); EXPECT_TRUE(DecodeNextFrame()); bool request_key_frame = false; std::vector nack_list = receiver_.NackList(&request_key_frame); @@ -129,7 +125,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) { receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, kMaxNonDecodableDuration); timing_.set_min_playout_delay(kMinDelayMs); - int64_t key_frame_inserted = clock_->TimeInMilliseconds(); + int64_t key_frame_inserted = clock_.TimeInMilliseconds(); EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError); // Insert an incomplete frame. EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError); @@ -138,8 +134,8 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_OneIncomplete) { EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError); } // Advance time until it's time to decode the key frame. - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - - key_frame_inserted); + clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() - + key_frame_inserted); EXPECT_TRUE(DecodeNextFrame()); // Make sure we get a key frame request. bool request_key_frame = false; @@ -157,7 +153,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) { receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, kMaxNonDecodableDuration); timing_.set_min_playout_delay(kMinDelayMs); - int64_t key_frame_inserted = clock_->TimeInMilliseconds(); + int64_t key_frame_inserted = clock_.TimeInMilliseconds(); EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError); // Insert an incomplete frame. EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError); @@ -167,8 +163,8 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger) { EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, true), kNoError); } // Advance time until it's time to decode the key frame. - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - - key_frame_inserted); + clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() - + key_frame_inserted); EXPECT_TRUE(DecodeNextFrame()); // Make sure we don't get a key frame request since we haven't generated // enough frames. @@ -187,7 +183,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) { receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, kMaxNonDecodableDuration); timing_.set_min_playout_delay(kMinDelayMs); - int64_t key_frame_inserted = clock_->TimeInMilliseconds(); + int64_t key_frame_inserted = clock_.TimeInMilliseconds(); EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError); // Insert enough frames to have too long non-decodable sequence, except that // we don't have any losses. @@ -197,8 +193,8 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_NoTrigger2) { // Insert an incomplete frame. EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError); // Advance time until it's time to decode the key frame. - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - - key_frame_inserted); + clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() - + key_frame_inserted); EXPECT_TRUE(DecodeNextFrame()); // Make sure we don't get a key frame request since the non-decodable duration // is only one frame. @@ -217,7 +213,7 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) { receiver_.SetNackSettings(kMaxNackListSize, kMaxPacketAgeToNack, kMaxNonDecodableDuration); timing_.set_min_playout_delay(kMinDelayMs); - int64_t key_frame_inserted = clock_->TimeInMilliseconds(); + int64_t key_frame_inserted = clock_.TimeInMilliseconds(); EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError); // Insert an incomplete frame. EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameDelta, false), kNoError); @@ -227,8 +223,8 @@ TEST_F(TestVCMReceiver, NonDecodableDuration_KeyFrameAfterIncompleteFrames) { } EXPECT_GE(InsertFrame(VideoFrameType::kVideoFrameKey, true), kNoError); // Advance time until it's time to decode the key frame. - clock_->AdvanceTimeMilliseconds(kMinDelayMs - clock_->TimeInMilliseconds() - - key_frame_inserted); + clock_.AdvanceTimeMilliseconds(kMinDelayMs - clock_.TimeInMilliseconds() - + key_frame_inserted); EXPECT_TRUE(DecodeNextFrame()); // Make sure we don't get a key frame request since we have a key frame // in the list. @@ -367,7 +363,6 @@ class FrameInjectEvent : public EventWrapper { class VCMReceiverTimingTest : public ::testing::Test { protected: VCMReceiverTimingTest() - : clock_(&stream_generator_, &receiver_), stream_generator_(0, clock_.TimeInMilliseconds()), timing_(&clock_), diff --git a/modules/video_coding/rtp_frame_id_only_ref_finder.cc b/modules/video_coding/rtp_frame_id_only_ref_finder.cc new file mode 100644 index 0000000000..9f3d5bb296 --- /dev/null +++ b/modules/video_coding/rtp_frame_id_only_ref_finder.cc @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/rtp_frame_id_only_ref_finder.h" + +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +RtpFrameReferenceFinder::ReturnVector RtpFrameIdOnlyRefFinder::ManageFrame( + std::unique_ptr frame, + int frame_id) { + frame->SetSpatialIndex(0); + frame->SetId(unwrapper_.Unwrap(frame_id & (kFrameIdLength - 1))); + frame->num_references = + frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1; + frame->references[0] = frame->Id() - 1; + + RtpFrameReferenceFinder::ReturnVector res; + res.push_back(std::move(frame)); + return res; +} + +} // namespace webrtc diff --git a/modules/video_coding/rtp_frame_id_only_ref_finder.h b/modules/video_coding/rtp_frame_id_only_ref_finder.h new file mode 100644 index 0000000000..1df4870c5b --- /dev/null +++ b/modules/video_coding/rtp_frame_id_only_ref_finder.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_ +#define MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_ + +#include + +#include "absl/container/inlined_vector.h" +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" +#include "rtc_base/numerics/sequence_number_util.h" + +namespace webrtc { + +class RtpFrameIdOnlyRefFinder { + public: + RtpFrameIdOnlyRefFinder() = default; + + RtpFrameReferenceFinder::ReturnVector ManageFrame( + std::unique_ptr frame, + int frame_id); + + private: + static constexpr int kFrameIdLength = 1 << 15; + SeqNumUnwrapper unwrapper_; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_RTP_FRAME_ID_ONLY_REF_FINDER_H_ diff --git a/modules/video_coding/rtp_frame_reference_finder.cc b/modules/video_coding/rtp_frame_reference_finder.cc index bdef991b8b..a44b76bf15 100644 --- a/modules/video_coding/rtp_frame_reference_finder.cc +++ b/modules/video_coding/rtp_frame_reference_finder.cc @@ -10,897 +10,180 @@ #include "modules/video_coding/rtp_frame_reference_finder.h" -#include -#include +#include -#include "absl/base/macros.h" #include "absl/types/variant.h" #include "modules/video_coding/frame_object.h" -#include "modules/video_coding/packet_buffer.h" -#include "rtc_base/checks.h" -#include "rtc_base/logging.h" +#include "modules/video_coding/rtp_frame_id_only_ref_finder.h" +#include "modules/video_coding/rtp_generic_ref_finder.h" +#include "modules/video_coding/rtp_seq_num_only_ref_finder.h" +#include "modules/video_coding/rtp_vp8_ref_finder.h" +#include "modules/video_coding/rtp_vp9_ref_finder.h" namespace webrtc { -namespace video_coding { - -RtpFrameReferenceFinder::RtpFrameReferenceFinder( - OnCompleteFrameCallback* frame_callback) - : RtpFrameReferenceFinder(frame_callback, 0) {} - -RtpFrameReferenceFinder::RtpFrameReferenceFinder( - OnCompleteFrameCallback* frame_callback, - int64_t picture_id_offset) - : last_picture_id_(-1), - current_ss_idx_(0), - cleared_to_seq_num_(-1), - frame_callback_(frame_callback), - picture_id_offset_(picture_id_offset) {} - -RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default; - -void RtpFrameReferenceFinder::ManageFrame( - std::unique_ptr frame) { - // If we have cleared past this frame, drop it. - if (cleared_to_seq_num_ != -1 && - AheadOf(cleared_to_seq_num_, frame->first_seq_num())) { - return; - } - - FrameDecision decision = ManageFrameInternal(frame.get()); - - switch (decision) { - case kStash: - if (stashed_frames_.size() > kMaxStashedFrames) - stashed_frames_.pop_back(); - stashed_frames_.push_front(std::move(frame)); - break; - case kHandOff: - HandOffFrame(std::move(frame)); - RetryStashedFrames(); - break; - case kDrop: - break; - } -} - -void RtpFrameReferenceFinder::RetryStashedFrames() { - bool complete_frame = false; - do { - complete_frame = false; - for (auto frame_it = stashed_frames_.begin(); - frame_it != stashed_frames_.end();) { - FrameDecision decision = ManageFrameInternal(frame_it->get()); - - switch (decision) { - case kStash: - ++frame_it; - break; - case kHandOff: - complete_frame = true; - HandOffFrame(std::move(*frame_it)); - ABSL_FALLTHROUGH_INTENDED; - case kDrop: - frame_it = stashed_frames_.erase(frame_it); - } - } - } while (complete_frame); -} - -void RtpFrameReferenceFinder::HandOffFrame( +namespace internal { +class RtpFrameReferenceFinderImpl { + public: + RtpFrameReferenceFinderImpl() = default; + + RtpFrameReferenceFinder::ReturnVector ManageFrame( + std::unique_ptr frame); + RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num); + void ClearTo(uint16_t seq_num); + + private: + using RefFinder = absl::variant; + + template + T& GetRefFinderAs(); + RefFinder ref_finder_; +}; + +RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinderImpl::ManageFrame( std::unique_ptr frame) { - frame->id.picture_id += picture_id_offset_; - for (size_t i = 0; i < frame->num_references; ++i) { - frame->references[i] += picture_id_offset_; - } - - frame_callback_->OnCompleteFrame(std::move(frame)); -} - -RtpFrameReferenceFinder::FrameDecision -RtpFrameReferenceFinder::ManageFrameInternal(RtpFrameObject* frame) { - if (const absl::optional& - generic_descriptor = frame->GetRtpVideoHeader().generic) { - return ManageFrameGeneric(frame, *generic_descriptor); - } - - switch (frame->codec_type()) { - case kVideoCodecVP8: - return ManageFrameVp8(frame); - case kVideoCodecVP9: - return ManageFrameVp9(frame); - case kVideoCodecH264: - return ManageFrameH264(frame); - case kVideoCodecGeneric: - if (auto* generic_header = absl::get_if( - &frame->GetRtpVideoHeader().video_type_header)) { - return ManageFramePidOrSeqNum(frame, generic_header->picture_id); - } - ABSL_FALLTHROUGH_INTENDED; - default: - return ManageFramePidOrSeqNum(frame, kNoPictureId); - } -} - -void RtpFrameReferenceFinder::PaddingReceived(uint16_t seq_num) { - auto clean_padding_to = - stashed_padding_.lower_bound(seq_num - kMaxPaddingAge); - stashed_padding_.erase(stashed_padding_.begin(), clean_padding_to); - stashed_padding_.insert(seq_num); - UpdateLastPictureIdWithPadding(seq_num); - RetryStashedFrames(); -} - -void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) { - cleared_to_seq_num_ = seq_num; - - auto it = stashed_frames_.begin(); - while (it != stashed_frames_.end()) { - if (AheadOf(cleared_to_seq_num_, (*it)->first_seq_num())) { - it = stashed_frames_.erase(it); - } else { - ++it; - } - } -} - -void RtpFrameReferenceFinder::UpdateLastPictureIdWithPadding(uint16_t seq_num) { - auto gop_seq_num_it = last_seq_num_gop_.upper_bound(seq_num); - - // If this padding packet "belongs" to a group of pictures that we don't track - // anymore, do nothing. - if (gop_seq_num_it == last_seq_num_gop_.begin()) - return; - --gop_seq_num_it; - - // Calculate the next contiuous sequence number and search for it in - // the padding packets we have stashed. - uint16_t next_seq_num_with_padding = gop_seq_num_it->second.second + 1; - auto padding_seq_num_it = - stashed_padding_.lower_bound(next_seq_num_with_padding); - - // While there still are padding packets and those padding packets are - // continuous, then advance the "last-picture-id-with-padding" and remove - // the stashed padding packet. - while (padding_seq_num_it != stashed_padding_.end() && - *padding_seq_num_it == next_seq_num_with_padding) { - gop_seq_num_it->second.second = next_seq_num_with_padding; - ++next_seq_num_with_padding; - padding_seq_num_it = stashed_padding_.erase(padding_seq_num_it); - } - - // In the case where the stream has been continuous without any new keyframes - // for a while there is a risk that new frames will appear to be older than - // the keyframe they belong to due to wrapping sequence number. In order - // to prevent this we advance the picture id of the keyframe every so often. - if (ForwardDiff(gop_seq_num_it->first, seq_num) > 10000) { - auto save = gop_seq_num_it->second; - last_seq_num_gop_.clear(); - last_seq_num_gop_[seq_num] = save; - } -} - -RtpFrameReferenceFinder::FrameDecision -RtpFrameReferenceFinder::ManageFrameGeneric( - RtpFrameObject* frame, - const RTPVideoHeader::GenericDescriptorInfo& descriptor) { - frame->id.picture_id = descriptor.frame_id; - frame->id.spatial_layer = descriptor.spatial_index; - - if (EncodedFrame::kMaxFrameReferences < descriptor.dependencies.size()) { - RTC_LOG(LS_WARNING) << "Too many dependencies in generic descriptor."; - return kDrop; - } - - frame->num_references = descriptor.dependencies.size(); - for (size_t i = 0; i < descriptor.dependencies.size(); ++i) - frame->references[i] = descriptor.dependencies[i]; - - return kHandOff; -} - -RtpFrameReferenceFinder::FrameDecision -RtpFrameReferenceFinder::ManageFramePidOrSeqNum(RtpFrameObject* frame, - int picture_id) { - // If |picture_id| is specified then we use that to set the frame references, - // otherwise we use sequence number. - if (picture_id != kNoPictureId) { - frame->id.picture_id = unwrapper_.Unwrap(picture_id & 0x7FFF); - frame->num_references = - frame->frame_type() == VideoFrameType::kVideoFrameKey ? 0 : 1; - frame->references[0] = frame->id.picture_id - 1; - return kHandOff; - } - - if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - last_seq_num_gop_.insert(std::make_pair( - frame->last_seq_num(), - std::make_pair(frame->last_seq_num(), frame->last_seq_num()))); - } - - // We have received a frame but not yet a keyframe, stash this frame. - if (last_seq_num_gop_.empty()) - return kStash; - - // Clean up info for old keyframes but make sure to keep info - // for the last keyframe. - auto clean_to = last_seq_num_gop_.lower_bound(frame->last_seq_num() - 100); - for (auto it = last_seq_num_gop_.begin(); - it != clean_to && last_seq_num_gop_.size() > 1;) { - it = last_seq_num_gop_.erase(it); - } - - // Find the last sequence number of the last frame for the keyframe - // that this frame indirectly references. - auto seq_num_it = last_seq_num_gop_.upper_bound(frame->last_seq_num()); - if (seq_num_it == last_seq_num_gop_.begin()) { - RTC_LOG(LS_WARNING) << "Generic frame with packet range [" - << frame->first_seq_num() << ", " - << frame->last_seq_num() - << "] has no GoP, dropping frame."; - return kDrop; - } - seq_num_it--; - - // Make sure the packet sequence numbers are continuous, otherwise stash - // this frame. - uint16_t last_picture_id_gop = seq_num_it->second.first; - uint16_t last_picture_id_with_padding_gop = seq_num_it->second.second; - if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) { - uint16_t prev_seq_num = frame->first_seq_num() - 1; - - if (prev_seq_num != last_picture_id_with_padding_gop) - return kStash; - } - - RTC_DCHECK(AheadOrAt(frame->last_seq_num(), seq_num_it->first)); - - // Since keyframes can cause reordering we can't simply assign the - // picture id according to some incrementing counter. - frame->id.picture_id = frame->last_seq_num(); - frame->num_references = - frame->frame_type() == VideoFrameType::kVideoFrameDelta; - frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop); - if (AheadOf(frame->id.picture_id, last_picture_id_gop)) { - seq_num_it->second.first = frame->id.picture_id; - seq_num_it->second.second = frame->id.picture_id; - } - - UpdateLastPictureIdWithPadding(frame->id.picture_id); - frame->id.picture_id = rtp_seq_num_unwrapper_.Unwrap(frame->id.picture_id); - return kHandOff; -} - -RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8( - RtpFrameObject* frame) { - const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); - const RTPVideoHeaderVP8& codec_header = - absl::get(video_header.video_type_header); - - if (codec_header.pictureId == kNoPictureId || - codec_header.temporalIdx == kNoTemporalIdx || - codec_header.tl0PicIdx == kNoTl0PicIdx) { - return ManageFramePidOrSeqNum(frame, codec_header.pictureId); - } - - // Protect against corrupted packets with arbitrary large temporal idx. - if (codec_header.temporalIdx >= kMaxTemporalLayers) - return kDrop; - - frame->id.picture_id = codec_header.pictureId & 0x7FFF; - - if (last_picture_id_ == -1) - last_picture_id_ = frame->id.picture_id; - - // Clean up info about not yet received frames that are too old. - uint16_t old_picture_id = - Subtract(frame->id.picture_id, kMaxNotYetReceivedFrames); - auto clean_frames_to = not_yet_received_frames_.lower_bound(old_picture_id); - not_yet_received_frames_.erase(not_yet_received_frames_.begin(), - clean_frames_to); - // Avoid re-adding picture ids that were just erased. - if (AheadOf(old_picture_id, last_picture_id_)) { - last_picture_id_ = old_picture_id; - } - // Find if there has been a gap in fully received frames and save the picture - // id of those frames in |not_yet_received_frames_|. - if (AheadOf(frame->id.picture_id, last_picture_id_)) { - do { - last_picture_id_ = Add(last_picture_id_, 1); - not_yet_received_frames_.insert(last_picture_id_); - } while (last_picture_id_ != frame->id.picture_id); - } - - int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(codec_header.tl0PicIdx & 0xFF); - - // Clean up info for base layers that are too old. - int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo; - auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx); - layer_info_.erase(layer_info_.begin(), clean_layer_info_to); - - if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - if (codec_header.temporalIdx != 0) { - return kDrop; - } - frame->num_references = 0; - layer_info_[unwrapped_tl0].fill(-1); - UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); - return kHandOff; - } - - auto layer_info_it = layer_info_.find( - codec_header.temporalIdx == 0 ? unwrapped_tl0 - 1 : unwrapped_tl0); - - // If we don't have the base layer frame yet, stash this frame. - if (layer_info_it == layer_info_.end()) - return kStash; - - // A non keyframe base layer frame has been received, copy the layer info - // from the previous base layer frame and set a reference to the previous - // base layer frame. - if (codec_header.temporalIdx == 0) { - layer_info_it = - layer_info_.emplace(unwrapped_tl0, layer_info_it->second).first; - frame->num_references = 1; - int64_t last_pid_on_layer = layer_info_it->second[0]; - - // Is this an old frame that has already been used to update the state? If - // so, drop it. - if (AheadOrAt(last_pid_on_layer, - frame->id.picture_id)) { - return kDrop; - } - - frame->references[0] = last_pid_on_layer; - UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); - return kHandOff; - } - - // Layer sync frame, this frame only references its base layer frame. - if (codec_header.layerSync) { - frame->num_references = 1; - int64_t last_pid_on_layer = layer_info_it->second[codec_header.temporalIdx]; - - // Is this an old frame that has already been used to update the state? If - // so, drop it. - if (last_pid_on_layer != -1 && - AheadOrAt(last_pid_on_layer, - frame->id.picture_id)) { - return kDrop; - } - - frame->references[0] = layer_info_it->second[0]; - UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); - return kHandOff; - } - - // Find all references for this frame. - frame->num_references = 0; - for (uint8_t layer = 0; layer <= codec_header.temporalIdx; ++layer) { - // If we have not yet received a previous frame on this temporal layer, - // stash this frame. - if (layer_info_it->second[layer] == -1) - return kStash; - - // If the last frame on this layer is ahead of this frame it means that - // a layer sync frame has been received after this frame for the same - // base layer frame, drop this frame. - if (AheadOf(layer_info_it->second[layer], - frame->id.picture_id)) { - return kDrop; - } - - // If we have not yet received a frame between this frame and the referenced - // frame then we have to wait for that frame to be completed first. - auto not_received_frame_it = - not_yet_received_frames_.upper_bound(layer_info_it->second[layer]); - if (not_received_frame_it != not_yet_received_frames_.end() && - AheadOf(frame->id.picture_id, - *not_received_frame_it)) { - return kStash; - } - - if (!(AheadOf(frame->id.picture_id, - layer_info_it->second[layer]))) { - RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->id.picture_id - << " and packet range [" << frame->first_seq_num() - << ", " << frame->last_seq_num() - << "] already received, " - " dropping frame."; - return kDrop; - } - - ++frame->num_references; - frame->references[layer] = layer_info_it->second[layer]; - } - - UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); - return kHandOff; -} - -void RtpFrameReferenceFinder::UpdateLayerInfoVp8(RtpFrameObject* frame, - int64_t unwrapped_tl0, - uint8_t temporal_idx) { - auto layer_info_it = layer_info_.find(unwrapped_tl0); - - // Update this layer info and newer. - while (layer_info_it != layer_info_.end()) { - if (layer_info_it->second[temporal_idx] != -1 && - AheadOf(layer_info_it->second[temporal_idx], - frame->id.picture_id)) { - // The frame was not newer, then no subsequent layer info have to be - // update. - break; - } - - layer_info_it->second[temporal_idx] = frame->id.picture_id; - ++unwrapped_tl0; - layer_info_it = layer_info_.find(unwrapped_tl0); - } - not_yet_received_frames_.erase(frame->id.picture_id); - - UnwrapPictureIds(frame); -} - -RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9( - RtpFrameObject* frame) { const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); - const RTPVideoHeaderVP9& codec_header = - absl::get(video_header.video_type_header); - - if (codec_header.picture_id == kNoPictureId || - codec_header.temporal_idx == kNoTemporalIdx) { - return ManageFramePidOrSeqNum(frame, codec_header.picture_id); - } - - // Protect against corrupted packets with arbitrary large temporal idx. - if (codec_header.temporal_idx >= kMaxTemporalLayers || - codec_header.spatial_idx >= kMaxSpatialLayers) - return kDrop; - - frame->id.spatial_layer = codec_header.spatial_idx; - frame->inter_layer_predicted = codec_header.inter_layer_predicted; - frame->id.picture_id = codec_header.picture_id & 0x7FFF; - - if (last_picture_id_ == -1) - last_picture_id_ = frame->id.picture_id; - if (codec_header.flexible_mode) { - if (codec_header.num_ref_pics > EncodedFrame::kMaxFrameReferences) { - return kDrop; - } - frame->num_references = codec_header.num_ref_pics; - for (size_t i = 0; i < frame->num_references; ++i) { - frame->references[i] = Subtract(frame->id.picture_id, - codec_header.pid_diff[i]); - } - - UnwrapPictureIds(frame); - return kHandOff; + if (video_header.generic.has_value()) { + return GetRefFinderAs().ManageFrame( + std::move(frame), *video_header.generic); } - if (codec_header.tl0_pic_idx == kNoTl0PicIdx) { - RTC_LOG(LS_WARNING) << "TL0PICIDX is expected to be present in " - "non-flexible mode."; - return kDrop; - } - - GofInfo* info; - int64_t unwrapped_tl0 = - tl0_unwrapper_.Unwrap(codec_header.tl0_pic_idx & 0xFF); - if (codec_header.ss_data_available) { - if (codec_header.temporal_idx != 0) { - RTC_LOG(LS_WARNING) << "Received scalability structure on a non base " - "layer frame. Scalability structure ignored."; - } else { - if (codec_header.gof.num_frames_in_gof > kMaxVp9FramesInGof) { - return kDrop; - } - - for (size_t i = 0; i < codec_header.gof.num_frames_in_gof; ++i) { - if (codec_header.gof.num_ref_pics[i] > kMaxVp9RefPics) { - return kDrop; + switch (frame->codec_type()) { + case kVideoCodecVP8: { + const RTPVideoHeaderVP8& vp8_header = + absl::get(video_header.video_type_header); + + if (vp8_header.temporalIdx == kNoTemporalIdx || + vp8_header.tl0PicIdx == kNoTl0PicIdx) { + if (vp8_header.pictureId == kNoPictureId) { + return GetRefFinderAs().ManageFrame( + std::move(frame)); } - } - GofInfoVP9 gof = codec_header.gof; - if (gof.num_frames_in_gof == 0) { - RTC_LOG(LS_WARNING) << "Number of frames in GOF is zero. Assume " - "that stream has only one temporal layer."; - gof.SetGofInfoVP9(kTemporalStructureMode1); + return GetRefFinderAs().ManageFrame( + std::move(frame), vp8_header.pictureId); } - current_ss_idx_ = Add(current_ss_idx_, 1); - scalability_structures_[current_ss_idx_] = gof; - scalability_structures_[current_ss_idx_].pid_start = frame->id.picture_id; - gof_info_.emplace(unwrapped_tl0, - GofInfo(&scalability_structures_[current_ss_idx_], - frame->id.picture_id)); + return GetRefFinderAs().ManageFrame(std::move(frame)); } + case kVideoCodecVP9: { + const RTPVideoHeaderVP9& vp9_header = + absl::get(video_header.video_type_header); - const auto gof_info_it = gof_info_.find(unwrapped_tl0); - if (gof_info_it == gof_info_.end()) - return kStash; - - info = &gof_info_it->second; - - if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - frame->num_references = 0; - FrameReceivedVp9(frame->id.picture_id, info); - UnwrapPictureIds(frame); - return kHandOff; - } - } else if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - if (frame->id.spatial_layer == 0) { - RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure"; - return kDrop; - } - const auto gof_info_it = gof_info_.find(unwrapped_tl0); - if (gof_info_it == gof_info_.end()) - return kStash; + if (vp9_header.temporal_idx == kNoTemporalIdx) { + if (vp9_header.picture_id == kNoPictureId) { + return GetRefFinderAs().ManageFrame( + std::move(frame)); + } - info = &gof_info_it->second; + return GetRefFinderAs().ManageFrame( + std::move(frame), vp9_header.picture_id); + } - if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - frame->num_references = 0; - FrameReceivedVp9(frame->id.picture_id, info); - UnwrapPictureIds(frame); - return kHandOff; + return GetRefFinderAs().ManageFrame(std::move(frame)); } - } else { - auto gof_info_it = gof_info_.find( - (codec_header.temporal_idx == 0) ? unwrapped_tl0 - 1 : unwrapped_tl0); - - // Gof info for this frame is not available yet, stash this frame. - if (gof_info_it == gof_info_.end()) - return kStash; + case kVideoCodecGeneric: { + if (auto* generic_header = absl::get_if( + &video_header.video_type_header)) { + return GetRefFinderAs().ManageFrame( + std::move(frame), generic_header->picture_id); + } - if (codec_header.temporal_idx == 0) { - gof_info_it = gof_info_ - .emplace(unwrapped_tl0, GofInfo(gof_info_it->second.gof, - frame->id.picture_id)) - .first; + return GetRefFinderAs().ManageFrame( + std::move(frame)); } - - info = &gof_info_it->second; - } - - // Clean up info for base layers that are too old. - int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxGofSaved; - auto clean_gof_info_to = gof_info_.lower_bound(old_tl0_pic_idx); - gof_info_.erase(gof_info_.begin(), clean_gof_info_to); - - FrameReceivedVp9(frame->id.picture_id, info); - - // Make sure we don't miss any frame that could potentially have the - // up switch flag set. - if (MissingRequiredFrameVp9(frame->id.picture_id, *info)) - return kStash; - - if (codec_header.temporal_up_switch) - up_switch_.emplace(frame->id.picture_id, codec_header.temporal_idx); - - // Clean out old info about up switch frames. - uint16_t old_picture_id = Subtract(frame->id.picture_id, 50); - auto up_switch_erase_to = up_switch_.lower_bound(old_picture_id); - up_switch_.erase(up_switch_.begin(), up_switch_erase_to); - - size_t diff = ForwardDiff(info->gof->pid_start, - frame->id.picture_id); - size_t gof_idx = diff % info->gof->num_frames_in_gof; - - if (info->gof->num_ref_pics[gof_idx] > EncodedFrame::kMaxFrameReferences) { - return kDrop; - } - // Populate references according to the scalability structure. - frame->num_references = info->gof->num_ref_pics[gof_idx]; - for (size_t i = 0; i < frame->num_references; ++i) { - frame->references[i] = Subtract( - frame->id.picture_id, info->gof->pid_diff[gof_idx][i]); - - // If this is a reference to a frame earlier than the last up switch point, - // then ignore this reference. - if (UpSwitchInIntervalVp9(frame->id.picture_id, codec_header.temporal_idx, - frame->references[i])) { - --frame->num_references; + default: { + return GetRefFinderAs().ManageFrame( + std::move(frame)); } } - - // Override GOF references. - if (!codec_header.inter_pic_predicted) { - frame->num_references = 0; - } - - UnwrapPictureIds(frame); - return kHandOff; } -bool RtpFrameReferenceFinder::MissingRequiredFrameVp9(uint16_t picture_id, - const GofInfo& info) { - size_t diff = - ForwardDiff(info.gof->pid_start, picture_id); - size_t gof_idx = diff % info.gof->num_frames_in_gof; - size_t temporal_idx = info.gof->temporal_idx[gof_idx]; - - if (temporal_idx >= kMaxTemporalLayers) { - RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers - << " temporal " - "layers are supported."; - return true; - } - - // For every reference this frame has, check if there is a frame missing in - // the interval (|ref_pid|, |picture_id|) in any of the lower temporal - // layers. If so, we are missing a required frame. - uint8_t num_references = info.gof->num_ref_pics[gof_idx]; - for (size_t i = 0; i < num_references; ++i) { - uint16_t ref_pid = - Subtract(picture_id, info.gof->pid_diff[gof_idx][i]); - for (size_t l = 0; l < temporal_idx; ++l) { - auto missing_frame_it = missing_frames_for_layer_[l].lower_bound(ref_pid); - if (missing_frame_it != missing_frames_for_layer_[l].end() && - AheadOf(picture_id, *missing_frame_it)) { - return true; - } - } +RtpFrameReferenceFinder::ReturnVector +RtpFrameReferenceFinderImpl::PaddingReceived(uint16_t seq_num) { + if (auto* ref_finder = absl::get_if(&ref_finder_)) { + return ref_finder->PaddingReceived(seq_num); } - return false; + return {}; } -void RtpFrameReferenceFinder::FrameReceivedVp9(uint16_t picture_id, - GofInfo* info) { - int last_picture_id = info->last_picture_id; - size_t gof_size = std::min(info->gof->num_frames_in_gof, kMaxVp9FramesInGof); - - // If there is a gap, find which temporal layer the missing frames - // belong to and add the frame as missing for that temporal layer. - // Otherwise, remove this frame from the set of missing frames. - if (AheadOf(picture_id, last_picture_id)) { - size_t diff = ForwardDiff(info->gof->pid_start, - last_picture_id); - size_t gof_idx = diff % gof_size; - - last_picture_id = Add(last_picture_id, 1); - while (last_picture_id != picture_id) { - gof_idx = (gof_idx + 1) % gof_size; - RTC_CHECK(gof_idx < kMaxVp9FramesInGof); - - size_t temporal_idx = info->gof->temporal_idx[gof_idx]; - if (temporal_idx >= kMaxTemporalLayers) { - RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers - << " temporal " - "layers are supported."; - return; - } - - missing_frames_for_layer_[temporal_idx].insert(last_picture_id); - last_picture_id = Add(last_picture_id, 1); +void RtpFrameReferenceFinderImpl::ClearTo(uint16_t seq_num) { + struct ClearToVisitor { + void operator()(absl::monostate& ref_finder) {} + void operator()(RtpGenericFrameRefFinder& ref_finder) {} + void operator()(RtpFrameIdOnlyRefFinder& ref_finder) {} + void operator()(RtpSeqNumOnlyRefFinder& ref_finder) { + ref_finder.ClearTo(seq_num); } - - info->last_picture_id = last_picture_id; - } else { - size_t diff = - ForwardDiff(info->gof->pid_start, picture_id); - size_t gof_idx = diff % gof_size; - RTC_CHECK(gof_idx < kMaxVp9FramesInGof); - - size_t temporal_idx = info->gof->temporal_idx[gof_idx]; - if (temporal_idx >= kMaxTemporalLayers) { - RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers - << " temporal " - "layers are supported."; - return; + void operator()(RtpVp8RefFinder& ref_finder) { + ref_finder.ClearTo(seq_num); + } + void operator()(RtpVp9RefFinder& ref_finder) { + ref_finder.ClearTo(seq_num); } + uint16_t seq_num; + }; - missing_frames_for_layer_[temporal_idx].erase(picture_id); - } + absl::visit(ClearToVisitor{seq_num}, ref_finder_); } -bool RtpFrameReferenceFinder::UpSwitchInIntervalVp9(uint16_t picture_id, - uint8_t temporal_idx, - uint16_t pid_ref) { - for (auto up_switch_it = up_switch_.upper_bound(pid_ref); - up_switch_it != up_switch_.end() && - AheadOf(picture_id, up_switch_it->first); - ++up_switch_it) { - if (up_switch_it->second < temporal_idx) - return true; +template +T& RtpFrameReferenceFinderImpl::GetRefFinderAs() { + if (auto* ref_finder = absl::get_if(&ref_finder_)) { + return *ref_finder; } - - return false; -} - -void RtpFrameReferenceFinder::UnwrapPictureIds(RtpFrameObject* frame) { - for (size_t i = 0; i < frame->num_references; ++i) - frame->references[i] = unwrapper_.Unwrap(frame->references[i]); - frame->id.picture_id = unwrapper_.Unwrap(frame->id.picture_id); + return ref_finder_.emplace(); } -RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameH264( - RtpFrameObject* frame) { - const FrameMarking& rtp_frame_marking = frame->GetFrameMarking(); - - uint8_t tid = rtp_frame_marking.temporal_id; - bool blSync = rtp_frame_marking.base_layer_sync; - - if (tid == kNoTemporalIdx) - return ManageFramePidOrSeqNum(std::move(frame), kNoPictureId); - - // Protect against corrupted packets with arbitrary large temporal idx. - if (tid >= kMaxTemporalLayers) - return kDrop; - - frame->id.picture_id = frame->last_seq_num(); - - if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - // For H264, use last_seq_num_gop_ to simply store last picture id - // as a pair of unpadded and padded sequence numbers. - if (last_seq_num_gop_.empty()) { - last_seq_num_gop_.insert(std::make_pair( - 0, std::make_pair(frame->id.picture_id, frame->id.picture_id))); - } - } - - // Stash if we have no keyframe yet. - if (last_seq_num_gop_.empty()) - return kStash; - - // Check for gap in sequence numbers. Store in |not_yet_received_seq_num_|. - if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) { - uint16_t last_pic_id_padded = last_seq_num_gop_.begin()->second.second; - if (AheadOf(frame->id.picture_id, last_pic_id_padded)) { - do { - last_pic_id_padded = last_pic_id_padded + 1; - not_yet_received_seq_num_.insert(last_pic_id_padded); - } while (last_pic_id_padded != frame->id.picture_id); - } - } - - int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(rtp_frame_marking.tl0_pic_idx); - - // Clean up info for base layers that are too old. - int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo; - auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx); - layer_info_.erase(layer_info_.begin(), clean_layer_info_to); +} // namespace internal - // Clean up info about not yet received frames that are too old. - uint16_t old_picture_id = frame->id.picture_id - kMaxNotYetReceivedFrames * 2; - auto clean_frames_to = not_yet_received_seq_num_.lower_bound(old_picture_id); - not_yet_received_seq_num_.erase(not_yet_received_seq_num_.begin(), - clean_frames_to); +RtpFrameReferenceFinder::RtpFrameReferenceFinder() + : RtpFrameReferenceFinder(0) {} - if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { - frame->num_references = 0; - layer_info_[unwrapped_tl0].fill(-1); - UpdateDataH264(frame, unwrapped_tl0, tid); - return kHandOff; - } - - auto layer_info_it = - layer_info_.find(tid == 0 ? unwrapped_tl0 - 1 : unwrapped_tl0); - - // Stash if we have no base layer frame yet. - if (layer_info_it == layer_info_.end()) - return kStash; - - // Base layer frame. Copy layer info from previous base layer frame. - if (tid == 0) { - layer_info_it = - layer_info_.insert(std::make_pair(unwrapped_tl0, layer_info_it->second)) - .first; - frame->num_references = 1; - frame->references[0] = layer_info_it->second[0]; - UpdateDataH264(frame, unwrapped_tl0, tid); - return kHandOff; - } - - // This frame only references its base layer frame. - if (blSync) { - frame->num_references = 1; - frame->references[0] = layer_info_it->second[0]; - UpdateDataH264(frame, unwrapped_tl0, tid); - return kHandOff; - } - - // Find all references for general frame. - frame->num_references = 0; - for (uint8_t layer = 0; layer <= tid; ++layer) { - // Stash if we have not yet received frames on this temporal layer. - if (layer_info_it->second[layer] == -1) - return kStash; - - // Drop if the last frame on this layer is ahead of this frame. A layer sync - // frame was received after this frame for the same base layer frame. - uint16_t last_frame_in_layer = layer_info_it->second[layer]; - if (AheadOf(last_frame_in_layer, frame->id.picture_id)) - return kDrop; - - // Stash and wait for missing frame between this frame and the reference - auto not_received_seq_num_it = - not_yet_received_seq_num_.upper_bound(last_frame_in_layer); - if (not_received_seq_num_it != not_yet_received_seq_num_.end() && - AheadOf(frame->id.picture_id, *not_received_seq_num_it)) { - return kStash; - } +RtpFrameReferenceFinder::RtpFrameReferenceFinder( + int64_t picture_id_offset) + : picture_id_offset_(picture_id_offset), + impl_(std::make_unique()) {} - if (!(AheadOf(frame->id.picture_id, last_frame_in_layer))) { - RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->id.picture_id - << " and packet range [" << frame->first_seq_num() - << ", " << frame->last_seq_num() - << "] already received, " - " dropping frame."; - return kDrop; - } +RtpFrameReferenceFinder::~RtpFrameReferenceFinder() = default; - ++frame->num_references; - frame->references[layer] = last_frame_in_layer; +RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::ManageFrame( + std::unique_ptr frame) { + // If we have cleared past this frame, drop it. + if (cleared_to_seq_num_ != -1 && + AheadOf(cleared_to_seq_num_, frame->first_seq_num())) { + return {}; } - UpdateDataH264(frame, unwrapped_tl0, tid); - return kHandOff; + auto frames = impl_->ManageFrame(std::move(frame)); + AddPictureIdOffset(frames); + return frames; } -void RtpFrameReferenceFinder::UpdateLastPictureIdWithPaddingH264() { - auto seq_num_it = last_seq_num_gop_.begin(); - - // Check if next sequence number is in a stashed padding packet. - uint16_t next_padded_seq_num = seq_num_it->second.second + 1; - auto padding_seq_num_it = stashed_padding_.lower_bound(next_padded_seq_num); - - // Check for more consecutive padding packets to increment - // the "last-picture-id-with-padding" and remove the stashed packets. - while (padding_seq_num_it != stashed_padding_.end() && - *padding_seq_num_it == next_padded_seq_num) { - seq_num_it->second.second = next_padded_seq_num; - ++next_padded_seq_num; - padding_seq_num_it = stashed_padding_.erase(padding_seq_num_it); - } +RtpFrameReferenceFinder::ReturnVector RtpFrameReferenceFinder::PaddingReceived( + uint16_t seq_num) { + auto frames = impl_->PaddingReceived(seq_num); + AddPictureIdOffset(frames); + return frames; } -void RtpFrameReferenceFinder::UpdateLayerInfoH264(RtpFrameObject* frame, - int64_t unwrapped_tl0, - uint8_t temporal_idx) { - auto layer_info_it = layer_info_.find(unwrapped_tl0); - - // Update this layer info and newer. - while (layer_info_it != layer_info_.end()) { - if (layer_info_it->second[temporal_idx] != -1 && - AheadOf(layer_info_it->second[temporal_idx], - frame->id.picture_id)) { - // Not a newer frame. No subsequent layer info needs update. - break; - } - - layer_info_it->second[temporal_idx] = frame->id.picture_id; - ++unwrapped_tl0; - layer_info_it = layer_info_.find(unwrapped_tl0); - } - - for (size_t i = 0; i < frame->num_references; ++i) - frame->references[i] = rtp_seq_num_unwrapper_.Unwrap(frame->references[i]); - frame->id.picture_id = rtp_seq_num_unwrapper_.Unwrap(frame->id.picture_id); +void RtpFrameReferenceFinder::ClearTo(uint16_t seq_num) { + cleared_to_seq_num_ = seq_num; + impl_->ClearTo(seq_num); } -void RtpFrameReferenceFinder::UpdateDataH264(RtpFrameObject* frame, - int64_t unwrapped_tl0, - uint8_t temporal_idx) { - // Update last_seq_num_gop_ entry for last picture id. - auto seq_num_it = last_seq_num_gop_.begin(); - uint16_t last_pic_id = seq_num_it->second.first; - if (AheadOf(frame->id.picture_id, last_pic_id)) { - seq_num_it->second.first = frame->id.picture_id; - seq_num_it->second.second = frame->id.picture_id; - } - UpdateLastPictureIdWithPaddingH264(); - - UpdateLayerInfoH264(frame, unwrapped_tl0, temporal_idx); - - // Remove any current packets from |not_yet_received_seq_num_|. - uint16_t last_seq_num_padded = seq_num_it->second.second; - for (uint16_t n = frame->first_seq_num(); AheadOrAt(last_seq_num_padded, n); - ++n) { - not_yet_received_seq_num_.erase(n); +void RtpFrameReferenceFinder::AddPictureIdOffset(ReturnVector& frames) { + for (auto& frame : frames) { + frame->SetId(frame->Id() + picture_id_offset_); + for (size_t i = 0; i < frame->num_references; ++i) { + frame->references[i] += picture_id_offset_; + } } } -} // namespace video_coding } // namespace webrtc diff --git a/modules/video_coding/rtp_frame_reference_finder.h b/modules/video_coding/rtp_frame_reference_finder.h index d9c7c72d1e..d2447773a3 100644 --- a/modules/video_coding/rtp_frame_reference_finder.h +++ b/modules/video_coding/rtp_frame_reference_finder.h @@ -11,215 +11,50 @@ #ifndef MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_ #define MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_ -#include -#include -#include #include -#include -#include -#include "modules/include/module_common_types_public.h" -#include "modules/rtp_rtcp/source/rtp_video_header.h" -#include "modules/video_coding/codecs/vp9/include/vp9_globals.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/numerics/sequence_number_util.h" -#include "rtc_base/thread_annotations.h" +#include "modules/video_coding/frame_object.h" namespace webrtc { -namespace video_coding { - -class EncodedFrame; -class RtpFrameObject; - -// A complete frame is a frame which has received all its packets and all its -// references are known. -class OnCompleteFrameCallback { - public: - virtual ~OnCompleteFrameCallback() {} - virtual void OnCompleteFrame(std::unique_ptr frame) = 0; -}; +namespace internal { +class RtpFrameReferenceFinderImpl; +} // namespace internal class RtpFrameReferenceFinder { public: - explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback); - explicit RtpFrameReferenceFinder(OnCompleteFrameCallback* frame_callback, - int64_t picture_id_offset); + using ReturnVector = absl::InlinedVector, 3>; + + RtpFrameReferenceFinder(); + explicit RtpFrameReferenceFinder(int64_t picture_id_offset); ~RtpFrameReferenceFinder(); - // Manage this frame until: - // - We have all information needed to determine its references, after - // which |frame_callback_| is called with the completed frame, or - // - We have too many stashed frames (determined by |kMaxStashedFrames|) - // so we drop this frame, or - // - It gets cleared by ClearTo, which also means we drop it. - void ManageFrame(std::unique_ptr frame); + // The RtpFrameReferenceFinder will hold onto the frame until: + // - the required information to determine its references has been received, + // in which case it (and possibly other) frames are returned, or + // - There are too many stashed frames (determined by |kMaxStashedFrames|), + // in which case it gets dropped, or + // - It gets cleared by ClearTo, in which case its dropped. + // - The frame is old, in which case it also gets dropped. + ReturnVector ManageFrame(std::unique_ptr frame); // Notifies that padding has been received, which the reference finder // might need to calculate the references of a frame. - void PaddingReceived(uint16_t seq_num); + ReturnVector PaddingReceived(uint16_t seq_num); // Clear all stashed frames that include packets older than |seq_num|. void ClearTo(uint16_t seq_num); private: - static const uint16_t kPicIdLength = 1 << 15; - static const uint8_t kMaxTemporalLayers = 5; - static const int kMaxLayerInfo = 50; - static const int kMaxStashedFrames = 100; - static const int kMaxNotYetReceivedFrames = 100; - static const int kMaxGofSaved = 50; - static const int kMaxPaddingAge = 100; - - enum FrameDecision { kStash, kHandOff, kDrop }; - - struct GofInfo { - GofInfo(GofInfoVP9* gof, uint16_t last_picture_id) - : gof(gof), last_picture_id(last_picture_id) {} - GofInfoVP9* gof; - uint16_t last_picture_id; - }; - - // Find the relevant group of pictures and update its "last-picture-id-with - // padding" sequence number. - void UpdateLastPictureIdWithPadding(uint16_t seq_num); - - // Retry stashed frames until no more complete frames are found. - void RetryStashedFrames(); - - void HandOffFrame(std::unique_ptr frame); - - FrameDecision ManageFrameInternal(RtpFrameObject* frame); - - FrameDecision ManageFrameGeneric( - RtpFrameObject* frame, - const RTPVideoHeader::GenericDescriptorInfo& descriptor); - - // Find references for frames with no or very limited information in the - // descriptor. If |picture_id| is unspecified then packet sequence numbers - // will be used to determine the references of the frames. - FrameDecision ManageFramePidOrSeqNum(RtpFrameObject* frame, int picture_id); - - // Find references for Vp8 frames - FrameDecision ManageFrameVp8(RtpFrameObject* frame); - - // Updates necessary layer info state used to determine frame references for - // Vp8. - void UpdateLayerInfoVp8(RtpFrameObject* frame, - int64_t unwrapped_tl0, - uint8_t temporal_idx); - - // Find references for Vp9 frames - FrameDecision ManageFrameVp9(RtpFrameObject* frame); - - // Check if we are missing a frame necessary to determine the references - // for this frame. - bool MissingRequiredFrameVp9(uint16_t picture_id, const GofInfo& info); - - // Updates which frames that have been received. If there is a gap, - // missing frames will be added to |missing_frames_for_layer_| or - // if this is an already missing frame then it will be removed. - void FrameReceivedVp9(uint16_t picture_id, GofInfo* info); - - // Check if there is a frame with the up-switch flag set in the interval - // (|pid_ref|, |picture_id|) with temporal layer smaller than |temporal_idx|. - bool UpSwitchInIntervalVp9(uint16_t picture_id, - uint8_t temporal_idx, - uint16_t pid_ref); - - // Unwrap |frame|s picture id and its references to 16 bits. - void UnwrapPictureIds(RtpFrameObject* frame); - - // Find references for H264 frames - FrameDecision ManageFrameH264(RtpFrameObject* frame); - - // Update "last-picture-id-with-padding" sequence number for H264. - void UpdateLastPictureIdWithPaddingH264(); - - // Update H264 layer info state used to determine frame references. - void UpdateLayerInfoH264(RtpFrameObject* frame, - int64_t unwrapped_tl0, - uint8_t temporal_idx); - - // Update H264 state for decodeable frames. - void UpdateDataH264(RtpFrameObject* frame, - int64_t unwrapped_tl0, - uint8_t temporal_idx); - - // For every group of pictures, hold two sequence numbers. The first being - // the sequence number of the last packet of the last completed frame, and - // the second being the sequence number of the last packet of the last - // completed frame advanced by any potential continuous packets of padding. - std::map, - DescendingSeqNumComp> - last_seq_num_gop_; - - // Save the last picture id in order to detect when there is a gap in frames - // that have not yet been fully received. - int last_picture_id_; - - // Padding packets that have been received but that are not yet continuous - // with any group of pictures. - std::set> stashed_padding_; - - // Frames earlier than the last received frame that have not yet been - // fully received. - std::set> - not_yet_received_frames_; - - // Sequence numbers of frames earlier than the last received frame that - // have not yet been fully received. - std::set> not_yet_received_seq_num_; - - // Frames that have been fully received but didn't have all the information - // needed to determine their references. - std::deque> stashed_frames_; - - // Holds the information about the last completed frame for a given temporal - // layer given an unwrapped Tl0 picture index. - std::map> layer_info_; - - // Where the current scalability structure is in the - // |scalability_structures_| array. - uint8_t current_ss_idx_; - - // Holds received scalability structures. - std::array scalability_structures_; - - // Holds the the Gof information for a given unwrapped TL0 picture index. - std::map gof_info_; - - // Keep track of which picture id and which temporal layer that had the - // up switch flag set. - std::map> - up_switch_; - - // For every temporal layer, keep a set of which frames that are missing. - std::array>, - kMaxTemporalLayers> - missing_frames_for_layer_; - - // How far frames have been cleared by sequence number. A frame will be - // cleared if it contains a packet with a sequence number older than - // |cleared_to_seq_num_|. - int cleared_to_seq_num_; - - OnCompleteFrameCallback* frame_callback_; - - // Unwrapper used to unwrap generic RTP streams. In a generic stream we derive - // a picture id from the packet sequence number. - SeqNumUnwrapper rtp_seq_num_unwrapper_; - - // Unwrapper used to unwrap VP8/VP9 streams which have their picture id - // specified. - SeqNumUnwrapper unwrapper_; - - SeqNumUnwrapper tl0_unwrapper_; + void AddPictureIdOffset(ReturnVector& frames); + // How far frames have been cleared out of the buffer by RTP sequence number. + // A frame will be cleared if it contains a packet with a sequence number + // older than |cleared_to_seq_num_|. + int cleared_to_seq_num_ = -1; const int64_t picture_id_offset_; + std::unique_ptr impl_; }; -} // namespace video_coding } // namespace webrtc #endif // MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_ diff --git a/modules/video_coding/rtp_frame_reference_finder_unittest.cc b/modules/video_coding/rtp_frame_reference_finder_unittest.cc index 9ded6bcb96..a5b0fc49ce 100644 --- a/modules/video_coding/rtp_frame_reference_finder_unittest.cc +++ b/modules/video_coding/rtp_frame_reference_finder_unittest.cc @@ -24,7 +24,6 @@ #include "test/gtest.h" namespace webrtc { -namespace video_coding { namespace { std::unique_ptr CreateFrame( @@ -32,13 +31,11 @@ std::unique_ptr CreateFrame( uint16_t seq_num_end, bool keyframe, VideoCodecType codec, - const RTPVideoTypeHeader& video_type_header, - const FrameMarking& frame_markings) { + const RTPVideoTypeHeader& video_type_header) { RTPVideoHeader video_header; video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey : VideoFrameType::kVideoFrameDelta; video_header.video_type_header = video_type_header; - video_header.frame_marking = frame_markings; // clang-format off return std::make_unique( @@ -63,28 +60,29 @@ std::unique_ptr CreateFrame( } } // namespace -class TestRtpFrameReferenceFinder : public ::testing::Test, - public OnCompleteFrameCallback { +class TestRtpFrameReferenceFinder : public ::testing::Test { protected: TestRtpFrameReferenceFinder() : rand_(0x8739211), - reference_finder_(new RtpFrameReferenceFinder(this)), + reference_finder_(std::make_unique()), frames_from_callback_(FrameComp()) {} uint16_t Rand() { return rand_.Rand(); } - void OnCompleteFrame(std::unique_ptr frame) override { - int64_t pid = frame->id.picture_id; - uint16_t sidx = frame->id.spatial_layer; - auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx)); - if (frame_it != frames_from_callback_.end()) { - ADD_FAILURE() << "Already received frame with (pid:sidx): (" << pid << ":" - << sidx << ")"; - return; - } + void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frames) { + for (auto& frame : frames) { + int64_t pid = frame->Id(); + uint16_t sidx = *frame->SpatialIndex(); + auto frame_it = frames_from_callback_.find(std::make_pair(pid, sidx)); + if (frame_it != frames_from_callback_.end()) { + ADD_FAILURE() << "Already received frame with (pid:sidx): (" << pid + << ":" << sidx << ")"; + return; + } - frames_from_callback_.insert( - std::make_pair(std::make_pair(pid, sidx), std::move(frame))); + frames_from_callback_.insert( + std::make_pair(std::make_pair(pid, sidx), std::move(frame))); + } } void InsertGeneric(uint16_t seq_num_start, @@ -92,101 +90,20 @@ class TestRtpFrameReferenceFinder : public ::testing::Test, bool keyframe) { std::unique_ptr frame = CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecGeneric, - RTPVideoTypeHeader(), FrameMarking()); + RTPVideoTypeHeader()); - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } - void InsertVp8(uint16_t seq_num_start, - uint16_t seq_num_end, - bool keyframe, - int32_t pid = kNoPictureId, - uint8_t tid = kNoTemporalIdx, - int32_t tl0 = kNoTl0PicIdx, - bool sync = false) { - RTPVideoHeaderVP8 vp8_header{}; - vp8_header.pictureId = pid % (1 << 15); - vp8_header.temporalIdx = tid; - vp8_header.tl0PicIdx = tl0; - vp8_header.layerSync = sync; - + void InsertH264(uint16_t seq_num_start, uint16_t seq_num_end, bool keyframe) { std::unique_ptr frame = - CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecVP8, - vp8_header, FrameMarking()); - - reference_finder_->ManageFrame(std::move(frame)); - } - - void InsertVp9Gof(uint16_t seq_num_start, - uint16_t seq_num_end, - bool keyframe, - int32_t pid = kNoPictureId, - uint8_t sid = kNoSpatialIdx, - uint8_t tid = kNoTemporalIdx, - int32_t tl0 = kNoTl0PicIdx, - bool up_switch = false, - bool inter_pic_predicted = true, - GofInfoVP9* ss = nullptr) { - RTPVideoHeaderVP9 vp9_header{}; - vp9_header.flexible_mode = false; - vp9_header.picture_id = pid % (1 << 15); - vp9_header.temporal_idx = tid; - vp9_header.spatial_idx = sid; - vp9_header.tl0_pic_idx = tl0; - vp9_header.temporal_up_switch = up_switch; - vp9_header.inter_pic_predicted = inter_pic_predicted && !keyframe; - if (ss != nullptr) { - vp9_header.ss_data_available = true; - vp9_header.gof = *ss; - } - - std::unique_ptr frame = - CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecVP9, - vp9_header, FrameMarking()); - - reference_finder_->ManageFrame(std::move(frame)); - } - - void InsertVp9Flex(uint16_t seq_num_start, - uint16_t seq_num_end, - bool keyframe, - int32_t pid = kNoPictureId, - uint8_t sid = kNoSpatialIdx, - uint8_t tid = kNoTemporalIdx, - bool inter = false, - std::vector refs = std::vector()) { - RTPVideoHeaderVP9 vp9_header{}; - vp9_header.inter_layer_predicted = inter; - vp9_header.flexible_mode = true; - vp9_header.picture_id = pid % (1 << 15); - vp9_header.temporal_idx = tid; - vp9_header.spatial_idx = sid; - vp9_header.tl0_pic_idx = kNoTl0PicIdx; - vp9_header.num_ref_pics = refs.size(); - for (size_t i = 0; i < refs.size(); ++i) - vp9_header.pid_diff[i] = refs[i]; - - std::unique_ptr frame = - CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecVP9, - vp9_header, FrameMarking()); - reference_finder_->ManageFrame(std::move(frame)); + CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264, + RTPVideoTypeHeader()); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } - void InsertH264(uint16_t seq_num_start, - uint16_t seq_num_end, - bool keyframe, - uint8_t tid = kNoTemporalIdx, - int32_t tl0 = kNoTl0PicIdx, - bool sync = false) { - FrameMarking frame_marking{}; - frame_marking.temporal_id = tid; - frame_marking.tl0_pic_idx = tl0; - frame_marking.base_layer_sync = sync; - - std::unique_ptr frame = - CreateFrame(seq_num_start, seq_num_end, keyframe, kVideoCodecH264, - RTPVideoTypeHeader(), frame_marking); - reference_finder_->ManageFrame(std::move(frame)); + void InsertPadding(uint16_t seq_num) { + OnCompleteFrames(reference_finder_->PaddingReceived(seq_num)); } // Check if a frame with picture id |pid| and spatial index |sidx| has been @@ -219,16 +136,6 @@ class TestRtpFrameReferenceFinder : public ::testing::Test, CheckReferences(pid, 0, refs...); } - template - void CheckReferencesVp8(int64_t pid, T... refs) const { - CheckReferences(pid, 0, refs...); - } - - template - void CheckReferencesVp9(int64_t pid, uint8_t sidx, T... refs) const { - CheckReferences(pid, sidx, refs...); - } - template void CheckReferencesH264(int64_t pid, T... refs) const { CheckReferences(pid, 0, refs...); @@ -263,7 +170,7 @@ TEST_F(TestRtpFrameReferenceFinder, PaddingPackets) { InsertGeneric(sn, sn, true); InsertGeneric(sn + 2, sn + 2, false); EXPECT_EQ(1UL, frames_from_callback_.size()); - reference_finder_->PaddingReceived(sn + 1); + InsertPadding(sn + 1); EXPECT_EQ(2UL, frames_from_callback_.size()); } @@ -271,8 +178,8 @@ TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReordered) { uint16_t sn = Rand(); InsertGeneric(sn, sn, true); - reference_finder_->PaddingReceived(sn + 1); - reference_finder_->PaddingReceived(sn + 4); + InsertPadding(sn + 1); + InsertPadding(sn + 4); InsertGeneric(sn + 2, sn + 3, false); EXPECT_EQ(2UL, frames_from_callback_.size()); @@ -284,12 +191,12 @@ TEST_F(TestRtpFrameReferenceFinder, PaddingPacketsReorderedMultipleKeyframes) { uint16_t sn = Rand(); InsertGeneric(sn, sn, true); - reference_finder_->PaddingReceived(sn + 1); - reference_finder_->PaddingReceived(sn + 4); + InsertPadding(sn + 1); + InsertPadding(sn + 4); InsertGeneric(sn + 2, sn + 3, false); InsertGeneric(sn + 5, sn + 5, true); - reference_finder_->PaddingReceived(sn + 6); - reference_finder_->PaddingReceived(sn + 9); + InsertPadding(sn + 6); + InsertPadding(sn + 9); InsertGeneric(sn + 7, sn + 8, false); EXPECT_EQ(4UL, frames_from_callback_.size()); @@ -308,12 +215,6 @@ TEST_F(TestRtpFrameReferenceFinder, AdvanceSavedKeyframe) { EXPECT_EQ(6UL, frames_from_callback_.size()); } -TEST_F(TestRtpFrameReferenceFinder, AdvanceSavedKeyframeBigJump) { - InsertVp9Flex(0, 0, true); - InsertVp9Flex(1, 1, true); - reference_finder_->PaddingReceived(32768); -} - TEST_F(TestRtpFrameReferenceFinder, ClearTo) { uint16_t sn = Rand(); @@ -332,1106 +233,6 @@ TEST_F(TestRtpFrameReferenceFinder, ClearTo) { EXPECT_EQ(3UL, frames_from_callback_.size()); } -TEST_F(TestRtpFrameReferenceFinder, Vp8NoPictureId) { - uint16_t sn = Rand(); - - InsertVp8(sn, sn + 2, true); - ASSERT_EQ(1UL, frames_from_callback_.size()); - - InsertVp8(sn + 3, sn + 4, false); - ASSERT_EQ(2UL, frames_from_callback_.size()); - - InsertVp8(sn + 5, sn + 8, false); - ASSERT_EQ(3UL, frames_from_callback_.size()); - - InsertVp8(sn + 9, sn + 9, false); - ASSERT_EQ(4UL, frames_from_callback_.size()); - - InsertVp8(sn + 10, sn + 11, false); - ASSERT_EQ(5UL, frames_from_callback_.size()); - - InsertVp8(sn + 12, sn + 12, true); - ASSERT_EQ(6UL, frames_from_callback_.size()); - - InsertVp8(sn + 13, sn + 17, false); - ASSERT_EQ(7UL, frames_from_callback_.size()); - - InsertVp8(sn + 18, sn + 18, false); - ASSERT_EQ(8UL, frames_from_callback_.size()); - - InsertVp8(sn + 19, sn + 20, false); - ASSERT_EQ(9UL, frames_from_callback_.size()); - - InsertVp8(sn + 21, sn + 21, false); - - ASSERT_EQ(10UL, frames_from_callback_.size()); - CheckReferencesVp8(sn + 2); - CheckReferencesVp8(sn + 4, sn + 2); - CheckReferencesVp8(sn + 8, sn + 4); - CheckReferencesVp8(sn + 9, sn + 8); - CheckReferencesVp8(sn + 11, sn + 9); - CheckReferencesVp8(sn + 12); - CheckReferencesVp8(sn + 17, sn + 12); - CheckReferencesVp8(sn + 18, sn + 17); - CheckReferencesVp8(sn + 20, sn + 18); - CheckReferencesVp8(sn + 21, sn + 20); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8NoPictureIdReordered) { - uint16_t sn = 0xfffa; - - InsertVp8(sn, sn + 2, true); - InsertVp8(sn + 3, sn + 4, false); - InsertVp8(sn + 5, sn + 8, false); - InsertVp8(sn + 9, sn + 9, false); - InsertVp8(sn + 10, sn + 11, false); - InsertVp8(sn + 12, sn + 12, true); - InsertVp8(sn + 13, sn + 17, false); - InsertVp8(sn + 18, sn + 18, false); - InsertVp8(sn + 19, sn + 20, false); - InsertVp8(sn + 21, sn + 21, false); - - ASSERT_EQ(10UL, frames_from_callback_.size()); - CheckReferencesVp8(sn + 2); - CheckReferencesVp8(sn + 4, sn + 2); - CheckReferencesVp8(sn + 8, sn + 4); - CheckReferencesVp8(sn + 9, sn + 8); - CheckReferencesVp8(sn + 11, sn + 9); - CheckReferencesVp8(sn + 12); - CheckReferencesVp8(sn + 17, sn + 12); - CheckReferencesVp8(sn + 18, sn + 17); - CheckReferencesVp8(sn + 20, sn + 18); - CheckReferencesVp8(sn + 21, sn + 20); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8KeyFrameReferences) { - uint16_t sn = Rand(); - InsertVp8(sn, sn, true); - - ASSERT_EQ(1UL, frames_from_callback_.size()); - CheckReferencesVp8(sn); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8RepeatedFrame_0) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 1); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 0, 2); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 0, 2); - - ASSERT_EQ(2UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8RepeatedFrameLayerSync_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 1); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 1, 1, true); - ASSERT_EQ(2UL, frames_from_callback_.size()); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 1, 1, true); - - ASSERT_EQ(2UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8RepeatedFrame_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 1); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 0, 2, true); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 3); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 0, 4); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 0, 4); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid + 1); - CheckReferencesVp8(pid + 3, pid + 2); -} - -// Test with 1 temporal layer. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayers_0) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 1); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 0, 2); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 3); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 0, 4); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid + 1); - CheckReferencesVp8(pid + 3, pid + 2); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8DuplicateTl1Frames) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 0); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 1, 0, true); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 1); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 1, 1); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 1, 1); - InsertVp8(sn + 4, sn + 4, false, pid + 4, 0, 2); - InsertVp8(sn + 5, sn + 5, false, pid + 5, 1, 2); - - ASSERT_EQ(6UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 3, pid + 1, pid + 2); - CheckReferencesVp8(pid + 4, pid + 2); - CheckReferencesVp8(pid + 5, pid + 3, pid + 4); -} - -// Test with 1 temporal layer. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayersReordering_0) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 1); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 0, 2); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 0, 4); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 3); - InsertVp8(sn + 5, sn + 5, false, pid + 5, 0, 6); - InsertVp8(sn + 6, sn + 6, false, pid + 6, 0, 7); - InsertVp8(sn + 4, sn + 4, false, pid + 4, 0, 5); - - ASSERT_EQ(7UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid + 1); - CheckReferencesVp8(pid + 3, pid + 2); - CheckReferencesVp8(pid + 4, pid + 3); - CheckReferencesVp8(pid + 5, pid + 4); - CheckReferencesVp8(pid + 6, pid + 5); -} - -// Test with 2 temporal layers in a 01 pattern. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayers_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 255); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 1, 255, true); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 0); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 1, 0); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 3, pid + 1, pid + 2); -} - -// Test with 2 temporal layers in a 01 pattern. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayersReordering_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn + 1, sn + 1, false, pid + 1, 1, 255, true); - InsertVp8(sn, sn, true, pid, 0, 255); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 1, 0); - InsertVp8(sn + 5, sn + 5, false, pid + 5, 1, 1); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 0); - InsertVp8(sn + 4, sn + 4, false, pid + 4, 0, 1); - InsertVp8(sn + 6, sn + 6, false, pid + 6, 0, 2); - InsertVp8(sn + 7, sn + 7, false, pid + 7, 1, 2); - - ASSERT_EQ(8UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 3, pid + 1, pid + 2); - CheckReferencesVp8(pid + 4, pid + 2); - CheckReferencesVp8(pid + 5, pid + 3, pid + 4); - CheckReferencesVp8(pid + 6, pid + 4); - CheckReferencesVp8(pid + 7, pid + 5, pid + 6); -} - -// Test with 3 temporal layers in a 0212 pattern. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayers_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 55); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 2, 55, true); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 1, 55, true); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 2, 55); - InsertVp8(sn + 4, sn + 4, false, pid + 4, 0, 56); - InsertVp8(sn + 5, sn + 5, false, pid + 5, 2, 56); - InsertVp8(sn + 6, sn + 6, false, pid + 6, 1, 56); - InsertVp8(sn + 7, sn + 7, false, pid + 7, 2, 56); - InsertVp8(sn + 8, sn + 8, false, pid + 8, 0, 57); - InsertVp8(sn + 9, sn + 9, false, pid + 9, 2, 57, true); - InsertVp8(sn + 10, sn + 10, false, pid + 10, 1, 57, true); - InsertVp8(sn + 11, sn + 11, false, pid + 11, 2, 57); - - ASSERT_EQ(12UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 3, pid, pid + 1, pid + 2); - CheckReferencesVp8(pid + 4, pid); - CheckReferencesVp8(pid + 5, pid + 2, pid + 3, pid + 4); - CheckReferencesVp8(pid + 6, pid + 2, pid + 4); - CheckReferencesVp8(pid + 7, pid + 4, pid + 5, pid + 6); - CheckReferencesVp8(pid + 8, pid + 4); - CheckReferencesVp8(pid + 9, pid + 8); - CheckReferencesVp8(pid + 10, pid + 8); - CheckReferencesVp8(pid + 11, pid + 8, pid + 9, pid + 10); -} - -// Test with 3 temporal layers in a 0212 pattern. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayersMissingFrame_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 55, false); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 1, 55, true); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 2, 55, false); - - ASSERT_EQ(2UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 2, pid); -} - -// Test with 3 temporal layers in a 0212 pattern. -TEST_F(TestRtpFrameReferenceFinder, Vp8TemporalLayersReordering_0212) { - uint16_t pid = 126; - uint16_t sn = Rand(); - - InsertVp8(sn + 1, sn + 1, false, pid + 1, 2, 55, true); - InsertVp8(sn, sn, true, pid, 0, 55, false); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 1, 55, true); - InsertVp8(sn + 4, sn + 4, false, pid + 4, 0, 56, false); - InsertVp8(sn + 5, sn + 5, false, pid + 5, 2, 56, false); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 2, 55, false); - InsertVp8(sn + 7, sn + 7, false, pid + 7, 2, 56, false); - InsertVp8(sn + 9, sn + 9, false, pid + 9, 2, 57, true); - InsertVp8(sn + 6, sn + 6, false, pid + 6, 1, 56, false); - InsertVp8(sn + 8, sn + 8, false, pid + 8, 0, 57, false); - InsertVp8(sn + 11, sn + 11, false, pid + 11, 2, 57, false); - InsertVp8(sn + 10, sn + 10, false, pid + 10, 1, 57, true); - - ASSERT_EQ(12UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 3, pid, pid + 1, pid + 2); - CheckReferencesVp8(pid + 4, pid); - CheckReferencesVp8(pid + 5, pid + 2, pid + 3, pid + 4); - CheckReferencesVp8(pid + 6, pid + 2, pid + 4); - CheckReferencesVp8(pid + 7, pid + 4, pid + 5, pid + 6); - CheckReferencesVp8(pid + 8, pid + 4); - CheckReferencesVp8(pid + 9, pid + 8); - CheckReferencesVp8(pid + 10, pid + 8); - CheckReferencesVp8(pid + 11, pid + 8, pid + 9, pid + 10); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8InsertManyFrames_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - const int keyframes_to_insert = 50; - const int frames_per_keyframe = 120; // Should be a multiple of 4. - uint8_t tl0 = 128; - - for (int k = 0; k < keyframes_to_insert; ++k) { - InsertVp8(sn, sn, true, pid, 0, tl0, false); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 2, tl0, true); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 1, tl0, true); - InsertVp8(sn + 3, sn + 3, false, pid + 3, 2, tl0, false); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 3, pid, pid + 1, pid + 2); - frames_from_callback_.clear(); - ++tl0; - - for (int f = 4; f < frames_per_keyframe; f += 4) { - uint16_t sf = sn + f; - int64_t pidf = pid + f; - - InsertVp8(sf, sf, false, pidf, 0, tl0, false); - InsertVp8(sf + 1, sf + 1, false, pidf + 1, 2, tl0, false); - InsertVp8(sf + 2, sf + 2, false, pidf + 2, 1, tl0, false); - InsertVp8(sf + 3, sf + 3, false, pidf + 3, 2, tl0, false); - CheckReferencesVp8(pidf, pidf - 4); - CheckReferencesVp8(pidf + 1, pidf, pidf - 1, pidf - 2); - CheckReferencesVp8(pidf + 2, pidf, pidf - 2); - CheckReferencesVp8(pidf + 3, pidf, pidf + 1, pidf + 2); - frames_from_callback_.clear(); - ++tl0; - } - - pid += frames_per_keyframe; - sn += frames_per_keyframe; - } -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8LayerSync) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp8(sn, sn, true, pid, 0, 0, false); - InsertVp8(sn + 1, sn + 1, false, pid + 1, 1, 0, true); - InsertVp8(sn + 2, sn + 2, false, pid + 2, 0, 1, false); - ASSERT_EQ(3UL, frames_from_callback_.size()); - - InsertVp8(sn + 4, sn + 4, false, pid + 4, 0, 2, false); - InsertVp8(sn + 5, sn + 5, false, pid + 5, 1, 2, true); - InsertVp8(sn + 6, sn + 6, false, pid + 6, 0, 3, false); - InsertVp8(sn + 7, sn + 7, false, pid + 7, 1, 3, false); - - ASSERT_EQ(7UL, frames_from_callback_.size()); - CheckReferencesVp8(pid); - CheckReferencesVp8(pid + 1, pid); - CheckReferencesVp8(pid + 2, pid); - CheckReferencesVp8(pid + 4, pid + 2); - CheckReferencesVp8(pid + 5, pid + 4); - CheckReferencesVp8(pid + 6, pid + 4); - CheckReferencesVp8(pid + 7, pid + 6, pid + 5); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8Tl1SyncFrameAfterTl1Frame) { - InsertVp8(1000, 1000, true, 1, 0, 247, true); - InsertVp8(1001, 1001, false, 3, 0, 248, false); - InsertVp8(1002, 1002, false, 4, 1, 248, false); // Will be dropped - InsertVp8(1003, 1003, false, 5, 1, 248, true); // due to this frame. - - ASSERT_EQ(3UL, frames_from_callback_.size()); - CheckReferencesVp8(1); - CheckReferencesVp8(3, 1); - CheckReferencesVp8(5, 3); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp8DetectMissingFrame_0212) { - InsertVp8(1, 1, true, 1, 0, 1, false); - InsertVp8(2, 2, false, 2, 2, 1, true); - InsertVp8(3, 3, false, 3, 1, 1, true); - InsertVp8(4, 4, false, 4, 2, 1, false); - - InsertVp8(6, 6, false, 6, 2, 2, false); - InsertVp8(7, 7, false, 7, 1, 2, false); - InsertVp8(8, 8, false, 8, 2, 2, false); - ASSERT_EQ(4UL, frames_from_callback_.size()); - - InsertVp8(5, 5, false, 5, 0, 2, false); - ASSERT_EQ(8UL, frames_from_callback_.size()); - - CheckReferencesVp8(1); - CheckReferencesVp8(2, 1); - CheckReferencesVp8(3, 1); - CheckReferencesVp8(4, 3, 2, 1); - - CheckReferencesVp8(5, 1); - CheckReferencesVp8(6, 5, 4, 3); - CheckReferencesVp8(7, 5, 3); - CheckReferencesVp8(8, 7, 6, 5); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofInsertOneFrame) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode1); - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - - CheckReferencesVp9(pid, 0); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9NoPictureIdReordered) { - uint16_t sn = 0xfffa; - - InsertVp9Gof(sn, sn + 2, true); - InsertVp9Gof(sn + 3, sn + 4, false); - InsertVp9Gof(sn + 9, sn + 9, false); - InsertVp9Gof(sn + 5, sn + 8, false); - InsertVp9Gof(sn + 12, sn + 12, true); - InsertVp9Gof(sn + 10, sn + 11, false); - InsertVp9Gof(sn + 13, sn + 17, false); - InsertVp9Gof(sn + 19, sn + 20, false); - InsertVp9Gof(sn + 21, sn + 21, false); - InsertVp9Gof(sn + 18, sn + 18, false); - - ASSERT_EQ(10UL, frames_from_callback_.size()); - CheckReferencesVp9(sn + 2, 0); - CheckReferencesVp9(sn + 4, 0, sn + 2); - CheckReferencesVp9(sn + 8, 0, sn + 4); - CheckReferencesVp9(sn + 9, 0, sn + 8); - CheckReferencesVp9(sn + 11, 0, sn + 9); - CheckReferencesVp9(sn + 12, 0); - CheckReferencesVp9(sn + 17, 0, sn + 12); - CheckReferencesVp9(sn + 18, 0, sn + 17); - CheckReferencesVp9(sn + 20, 0, sn + 18); - CheckReferencesVp9(sn + 21, 0, sn + 20); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayers_0) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer. - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 0, 1, false); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 0, 2, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 0, 3, false); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 4, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 0, 5, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 0, 6, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 0, 7, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 8, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 0, 9, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 0, 10, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 0, 11, false); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 12, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 0, 13, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 0, 14, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 0, 15, false); - InsertVp9Gof(sn + 16, sn + 16, false, pid + 16, 0, 0, 16, false); - InsertVp9Gof(sn + 17, sn + 17, false, pid + 17, 0, 0, 17, false); - InsertVp9Gof(sn + 18, sn + 18, false, pid + 18, 0, 0, 18, false); - InsertVp9Gof(sn + 19, sn + 19, false, pid + 19, 0, 0, 19, false); - - ASSERT_EQ(20UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid + 1); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid + 3); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 5); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 7); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 9); - CheckReferencesVp9(pid + 11, 0, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 11); - CheckReferencesVp9(pid + 13, 0, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 13); - CheckReferencesVp9(pid + 15, 0, pid + 14); - CheckReferencesVp9(pid + 16, 0, pid + 15); - CheckReferencesVp9(pid + 17, 0, pid + 16); - CheckReferencesVp9(pid + 18, 0, pid + 17); - CheckReferencesVp9(pid + 19, 0, pid + 18); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofSpatialLayers_2) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer. - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 0, 1, false, true); - // Not inter_pic_predicted because it's the first frame with this layer. - InsertVp9Gof(sn + 2, sn + 2, false, pid + 1, 1, 0, 1, false, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 2, 0, 0, 1, false, true); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 2, 1, 0, 1, false, true); - - ASSERT_EQ(5UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 1, 1); - CheckReferencesVp9(pid + 2, 0, pid + 1); - CheckReferencesVp9(pid + 2, 1, pid + 1); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayersReordered_0) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer. - - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 0, 2, false); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 0, 1, false); - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 4, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 0, 3, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 0, 5, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 0, 7, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 0, 6, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 8, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 0, 10, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 0, 13, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 0, 11, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 0, 9, false); - InsertVp9Gof(sn + 16, sn + 16, false, pid + 16, 0, 0, 16, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 0, 14, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 0, 15, false); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 12, false); - InsertVp9Gof(sn + 17, sn + 17, false, pid + 17, 0, 0, 17, false); - InsertVp9Gof(sn + 19, sn + 19, false, pid + 19, 0, 0, 19, false); - InsertVp9Gof(sn + 18, sn + 18, false, pid + 18, 0, 0, 18, false); - - ASSERT_EQ(20UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid + 1); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid + 3); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 5); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 7); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 9); - CheckReferencesVp9(pid + 11, 0, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 11); - CheckReferencesVp9(pid + 13, 0, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 13); - CheckReferencesVp9(pid + 15, 0, pid + 14); - CheckReferencesVp9(pid + 16, 0, pid + 15); - CheckReferencesVp9(pid + 17, 0, pid + 16); - CheckReferencesVp9(pid + 18, 0, pid + 17); - CheckReferencesVp9(pid + 19, 0, pid + 18); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofSkipFramesTemporalLayers_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 1, 0, false); - // Skip GOF with tl0 1 - InsertVp9Gof(sn + 4, sn + 4, true, pid + 4, 0, 0, 2, false, true, &ss); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 1, 2, false); - // Skip GOF with tl0 3 - // Skip GOF with tl0 4 - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 0, 5, false, true, &ss); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 1, 5, false); - - ASSERT_EQ(6UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 4, 0); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofSkipFramesTemporalLayers_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode3); // 02120212 pattern - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 2, 0, false); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 1, 0, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 2, 0, false); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 2); - - // Skip frames with tl0 = 1 - - InsertVp9Gof(sn + 8, sn + 8, true, pid + 8, 0, 0, 2, false, false, &ss); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 2, 2, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 1, 2, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 2, 2, false); - - ASSERT_EQ(8UL, frames_from_callback_.size()); - CheckReferencesVp9(pid + 8, 0); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); - - // Now insert frames with tl0 = 1 - InsertVp9Gof(sn + 4, sn + 4, true, pid + 4, 0, 0, 1, false, true, &ss); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 2, 1, false); - - ASSERT_EQ(9UL, frames_from_callback_.size()); - CheckReferencesVp9(pid + 4, 0); - - // Rest of frames belonging to tl0 = 1 - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 2, 1, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 1, 1, true); // up-switch - - ASSERT_EQ(12UL, frames_from_callback_.size()); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayers_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 1, 0, false); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 0, 1, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 1, 1, false); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 2, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 1, 2, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 0, 3, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 1, 3, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 4, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 1, 4, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 0, 5, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 1, 5, false); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 6, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 1, 6, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 0, 7, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 1, 7, false); - InsertVp9Gof(sn + 16, sn + 16, false, pid + 16, 0, 0, 8, false); - InsertVp9Gof(sn + 17, sn + 17, false, pid + 17, 0, 1, 8, false); - InsertVp9Gof(sn + 18, sn + 18, false, pid + 18, 0, 0, 9, false); - InsertVp9Gof(sn + 19, sn + 19, false, pid + 19, 0, 1, 9, false); - - ASSERT_EQ(20UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid + 2); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 6); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 10); - CheckReferencesVp9(pid + 13, 0, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 12); - CheckReferencesVp9(pid + 15, 0, pid + 14); - CheckReferencesVp9(pid + 16, 0, pid + 14); - CheckReferencesVp9(pid + 17, 0, pid + 16); - CheckReferencesVp9(pid + 18, 0, pid + 16); - CheckReferencesVp9(pid + 19, 0, pid + 18); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayersReordered_01) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern - - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 1, 0, false); - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 0, 1, false); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 2, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 1, 1, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 1, 2, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 1, 3, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 0, 3, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 0, 5, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 4, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 1, 4, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 1, 5, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 1, 6, false); - InsertVp9Gof(sn + 16, sn + 16, false, pid + 16, 0, 0, 8, false); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 6, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 0, 7, false); - InsertVp9Gof(sn + 17, sn + 17, false, pid + 17, 0, 1, 8, false); - InsertVp9Gof(sn + 19, sn + 19, false, pid + 19, 0, 1, 9, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 1, 7, false); - InsertVp9Gof(sn + 18, sn + 18, false, pid + 18, 0, 0, 9, false); - - ASSERT_EQ(20UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid + 2); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 6); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 10); - CheckReferencesVp9(pid + 13, 0, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 12); - CheckReferencesVp9(pid + 15, 0, pid + 14); - CheckReferencesVp9(pid + 16, 0, pid + 14); - CheckReferencesVp9(pid + 17, 0, pid + 16); - CheckReferencesVp9(pid + 18, 0, pid + 16); - CheckReferencesVp9(pid + 19, 0, pid + 18); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayers_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 2, 0, false); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 1, 0, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 2, 0, false); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 1, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 2, 1, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 1, 1, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 2, 1, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 2, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 2, 2, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 1, 2, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 2, 2, false); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 3, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 2, 3, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 1, 3, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 2, 3, false); - InsertVp9Gof(sn + 16, sn + 16, false, pid + 16, 0, 0, 4, false); - InsertVp9Gof(sn + 17, sn + 17, false, pid + 17, 0, 2, 4, false); - InsertVp9Gof(sn + 18, sn + 18, false, pid + 18, 0, 1, 4, false); - InsertVp9Gof(sn + 19, sn + 19, false, pid + 19, 0, 2, 4, false); - - ASSERT_EQ(20UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 4); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 8); - CheckReferencesVp9(pid + 13, 0, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 12); - CheckReferencesVp9(pid + 15, 0, pid + 14); - CheckReferencesVp9(pid + 16, 0, pid + 12); - CheckReferencesVp9(pid + 17, 0, pid + 16); - CheckReferencesVp9(pid + 18, 0, pid + 16); - CheckReferencesVp9(pid + 19, 0, pid + 18); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayersReordered_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern - - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 1, 0, false); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 2, 0, false); - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 2, 0, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 1, 1, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 2, 1, false); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 1, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 2, 2, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 2, 1, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 2, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 2, 2, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 1, 2, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 2, 3, false); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 3, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 1, 3, false); - InsertVp9Gof(sn + 16, sn + 16, false, pid + 16, 0, 0, 4, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 2, 3, false); - InsertVp9Gof(sn + 17, sn + 17, false, pid + 17, 0, 2, 4, false); - InsertVp9Gof(sn + 19, sn + 19, false, pid + 19, 0, 2, 4, false); - InsertVp9Gof(sn + 18, sn + 18, false, pid + 18, 0, 1, 4, false); - - ASSERT_EQ(20UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 4); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 8); - CheckReferencesVp9(pid + 13, 0, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 12); - CheckReferencesVp9(pid + 15, 0, pid + 14); - CheckReferencesVp9(pid + 16, 0, pid + 12); - CheckReferencesVp9(pid + 17, 0, pid + 16); - CheckReferencesVp9(pid + 18, 0, pid + 16); - CheckReferencesVp9(pid + 19, 0, pid + 18); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayersUpSwitch_02120212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode4); // 02120212 pattern - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 2, 0, false); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 1, 0, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 2, 0, false); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 1, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 2, 1, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 1, 1, true); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 2, 1, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 2, true); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 2, 2, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 1, 2, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 2, 2, true); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 3, false); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 2, 3, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 1, 3, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 2, 3, false); - - ASSERT_EQ(16UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 1, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid); - CheckReferencesVp9(pid + 5, 0, pid + 3, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 2, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 4); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 9, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 8); - CheckReferencesVp9(pid + 13, 0, pid + 11, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 10, pid + 12); - CheckReferencesVp9(pid + 15, 0, pid + 13, pid + 14); -} - -TEST_F(TestRtpFrameReferenceFinder, - Vp9GofTemporalLayersUpSwitchReordered_02120212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode4); // 02120212 pattern - - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 2, 0, false); - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 1, false); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 1, 0, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 2, 1, false); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 2, 0, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 2, 1, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 2, 2, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 1, 1, true); - InsertVp9Gof(sn + 12, sn + 12, false, pid + 12, 0, 0, 3, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 1, 2, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 2, true); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 2, 2, true); - InsertVp9Gof(sn + 13, sn + 13, false, pid + 13, 0, 2, 3, false); - InsertVp9Gof(sn + 15, sn + 15, false, pid + 15, 0, 2, 3, false); - InsertVp9Gof(sn + 14, sn + 14, false, pid + 14, 0, 1, 3, false); - - ASSERT_EQ(16UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 1, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid); - CheckReferencesVp9(pid + 5, 0, pid + 3, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 2, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 4); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 9, pid + 10); - CheckReferencesVp9(pid + 12, 0, pid + 8); - CheckReferencesVp9(pid + 13, 0, pid + 11, pid + 12); - CheckReferencesVp9(pid + 14, 0, pid + 10, pid + 12); - CheckReferencesVp9(pid + 15, 0, pid + 13, pid + 14); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTemporalLayersReordered_01_0212) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern - - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 1, 0, false); - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 3, sn + 3, false, pid + 3, 0, 1, 1, false); - InsertVp9Gof(sn + 6, sn + 6, false, pid + 6, 0, 1, 2, false); - ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern - InsertVp9Gof(sn + 4, sn + 4, false, pid + 4, 0, 0, 2, false, true, &ss); - InsertVp9Gof(sn + 2, sn + 2, false, pid + 2, 0, 0, 1, false); - InsertVp9Gof(sn + 5, sn + 5, false, pid + 5, 0, 2, 2, false); - InsertVp9Gof(sn + 8, sn + 8, false, pid + 8, 0, 0, 3, false); - InsertVp9Gof(sn + 10, sn + 10, false, pid + 10, 0, 1, 3, false); - InsertVp9Gof(sn + 7, sn + 7, false, pid + 7, 0, 2, 2, false); - InsertVp9Gof(sn + 11, sn + 11, false, pid + 11, 0, 2, 3, false); - InsertVp9Gof(sn + 9, sn + 9, false, pid + 9, 0, 2, 3, false); - - ASSERT_EQ(12UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 3, 0, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid); - CheckReferencesVp9(pid + 5, 0, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 7, 0, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 4); - CheckReferencesVp9(pid + 9, 0, pid + 8); - CheckReferencesVp9(pid + 10, 0, pid + 8); - CheckReferencesVp9(pid + 11, 0, pid + 10); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9FlexibleModeOneFrame) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp9Flex(sn, sn, true, pid, 0, 0, false); - - ASSERT_EQ(1UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9FlexibleModeTwoSpatialLayers) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp9Flex(sn, sn, true, pid, 0, 0, false); - InsertVp9Flex(sn + 1, sn + 1, true, pid, 1, 0, true); - InsertVp9Flex(sn + 2, sn + 2, false, pid + 1, 1, 0, false, {1}); - InsertVp9Flex(sn + 3, sn + 3, false, pid + 2, 0, 0, false, {2}); - InsertVp9Flex(sn + 4, sn + 4, false, pid + 2, 1, 0, false, {1}); - InsertVp9Flex(sn + 5, sn + 5, false, pid + 3, 1, 0, false, {1}); - InsertVp9Flex(sn + 6, sn + 6, false, pid + 4, 0, 0, false, {2}); - InsertVp9Flex(sn + 7, sn + 7, false, pid + 4, 1, 0, false, {1}); - InsertVp9Flex(sn + 8, sn + 8, false, pid + 5, 1, 0, false, {1}); - InsertVp9Flex(sn + 9, sn + 9, false, pid + 6, 0, 0, false, {2}); - InsertVp9Flex(sn + 10, sn + 10, false, pid + 6, 1, 0, false, {1}); - InsertVp9Flex(sn + 11, sn + 11, false, pid + 7, 1, 0, false, {1}); - InsertVp9Flex(sn + 12, sn + 12, false, pid + 8, 0, 0, false, {2}); - InsertVp9Flex(sn + 13, sn + 13, false, pid + 8, 1, 0, false, {1}); - - ASSERT_EQ(14UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid, 1); - CheckReferencesVp9(pid + 1, 1, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 2, 1, pid + 1); - CheckReferencesVp9(pid + 3, 1, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid + 2); - CheckReferencesVp9(pid + 4, 1, pid + 3); - CheckReferencesVp9(pid + 5, 1, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 6, 1, pid + 5); - CheckReferencesVp9(pid + 7, 1, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 6); - CheckReferencesVp9(pid + 8, 1, pid + 7); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9FlexibleModeTwoSpatialLayersReordered) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - - InsertVp9Flex(sn + 1, sn + 1, true, pid, 1, 0, true); - InsertVp9Flex(sn + 2, sn + 2, false, pid + 1, 1, 0, false, {1}); - InsertVp9Flex(sn, sn, true, pid, 0, 0, false); - InsertVp9Flex(sn + 4, sn + 4, false, pid + 2, 1, 0, false, {1}); - InsertVp9Flex(sn + 5, sn + 5, false, pid + 3, 1, 0, false, {1}); - InsertVp9Flex(sn + 3, sn + 3, false, pid + 2, 0, 0, false, {2}); - InsertVp9Flex(sn + 7, sn + 7, false, pid + 4, 1, 0, false, {1}); - InsertVp9Flex(sn + 6, sn + 6, false, pid + 4, 0, 0, false, {2}); - InsertVp9Flex(sn + 8, sn + 8, false, pid + 5, 1, 0, false, {1}); - InsertVp9Flex(sn + 9, sn + 9, false, pid + 6, 0, 0, false, {2}); - InsertVp9Flex(sn + 11, sn + 11, false, pid + 7, 1, 0, false, {1}); - InsertVp9Flex(sn + 10, sn + 10, false, pid + 6, 1, 0, false, {1}); - InsertVp9Flex(sn + 13, sn + 13, false, pid + 8, 1, 0, false, {1}); - InsertVp9Flex(sn + 12, sn + 12, false, pid + 8, 0, 0, false, {2}); - - ASSERT_EQ(14UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid, 1); - CheckReferencesVp9(pid + 1, 1, pid); - CheckReferencesVp9(pid + 2, 0, pid); - CheckReferencesVp9(pid + 2, 1, pid + 1); - CheckReferencesVp9(pid + 3, 1, pid + 2); - CheckReferencesVp9(pid + 4, 0, pid + 2); - CheckReferencesVp9(pid + 4, 1, pid + 3); - CheckReferencesVp9(pid + 5, 1, pid + 4); - CheckReferencesVp9(pid + 6, 0, pid + 4); - CheckReferencesVp9(pid + 6, 1, pid + 5); - CheckReferencesVp9(pid + 7, 1, pid + 6); - CheckReferencesVp9(pid + 8, 0, pid + 6); - CheckReferencesVp9(pid + 8, 1, pid + 7); -} - -TEST_F(TestRtpFrameReferenceFinder, WrappingFlexReference) { - InsertVp9Flex(0, 0, false, 0, 0, 0, false, {1}); - - ASSERT_EQ(1UL, frames_from_callback_.size()); - const EncodedFrame& frame = *frames_from_callback_.begin()->second; - ASSERT_EQ(frame.id.picture_id - frame.references[0], 1); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofPidJump) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode3); - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1000, 0, 0, 1); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTl0Jump) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode3); - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 125, true, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 0, 0, false, true, &ss); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofTidTooHigh) { - // Same as RtpFrameReferenceFinder::kMaxTemporalLayers. - const int kMaxTemporalLayers = 5; - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.SetGofInfoVP9(kTemporalStructureMode2); - ss.temporal_idx[1] = kMaxTemporalLayers; - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 0, 1); - - ASSERT_EQ(1UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); -} - -TEST_F(TestRtpFrameReferenceFinder, Vp9GofZeroFrames) { - uint16_t pid = Rand(); - uint16_t sn = Rand(); - GofInfoVP9 ss; - ss.num_frames_in_gof = 0; - - InsertVp9Gof(sn, sn, true, pid, 0, 0, 0, false, false, &ss); - InsertVp9Gof(sn + 1, sn + 1, false, pid + 1, 0, 0, 1); - - ASSERT_EQ(2UL, frames_from_callback_.size()); - CheckReferencesVp9(pid, 0); - CheckReferencesVp9(pid + 1, 0, pid); -} - TEST_F(TestRtpFrameReferenceFinder, H264KeyFrameReferences) { uint16_t sn = Rand(); InsertH264(sn, sn, true); @@ -1440,53 +241,46 @@ TEST_F(TestRtpFrameReferenceFinder, H264KeyFrameReferences) { CheckReferencesH264(sn); } -// Test with 1 temporal layer. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_0) { - uint16_t sn = Rand(); +TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrap) { + uint16_t sn = 0xFFFF; - InsertH264(sn, sn, true, 0, 1); - InsertH264(sn + 1, sn + 1, false, 0, 2); - InsertH264(sn + 2, sn + 2, false, 0, 3); - InsertH264(sn + 3, sn + 3, false, 0, 4); + InsertH264(sn - 1, sn - 1, true); + InsertH264(sn, sn, false); + InsertH264(sn + 1, sn + 1, false); + InsertH264(sn + 2, sn + 2, false); ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesH264(sn); + CheckReferencesH264(sn - 1); + CheckReferencesH264(sn, sn - 1); CheckReferencesH264(sn + 1, sn); CheckReferencesH264(sn + 2, sn + 1); - CheckReferencesH264(sn + 3, sn + 2); } -TEST_F(TestRtpFrameReferenceFinder, H264DuplicateTl1Frames) { +TEST_F(TestRtpFrameReferenceFinder, H264Frames) { uint16_t sn = Rand(); - InsertH264(sn, sn, true, 0, 0); - InsertH264(sn + 1, sn + 1, false, 1, 0, true); - InsertH264(sn + 2, sn + 2, false, 0, 1); - InsertH264(sn + 3, sn + 3, false, 1, 1); - InsertH264(sn + 3, sn + 3, false, 1, 1); - InsertH264(sn + 4, sn + 4, false, 0, 2); - InsertH264(sn + 5, sn + 5, false, 1, 2); + InsertH264(sn, sn, true); + InsertH264(sn + 1, sn + 1, false); + InsertH264(sn + 2, sn + 2, false); + InsertH264(sn + 3, sn + 3, false); - ASSERT_EQ(6UL, frames_from_callback_.size()); + ASSERT_EQ(4UL, frames_from_callback_.size()); CheckReferencesH264(sn); CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 3, sn + 1, sn + 2); - CheckReferencesH264(sn + 4, sn + 2); - CheckReferencesH264(sn + 5, sn + 3, sn + 4); + CheckReferencesH264(sn + 2, sn + 1); + CheckReferencesH264(sn + 3, sn + 2); } -// Test with 1 temporal layer. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_0) { +TEST_F(TestRtpFrameReferenceFinder, H264Reordering) { uint16_t sn = Rand(); - InsertH264(sn, sn, true, 0, 1); - InsertH264(sn + 1, sn + 1, false, 0, 2); - InsertH264(sn + 3, sn + 3, false, 0, 4); - InsertH264(sn + 2, sn + 2, false, 0, 3); - InsertH264(sn + 5, sn + 5, false, 0, 6); - InsertH264(sn + 6, sn + 6, false, 0, 7); - InsertH264(sn + 4, sn + 4, false, 0, 5); + InsertH264(sn, sn, true); + InsertH264(sn + 1, sn + 1, false); + InsertH264(sn + 3, sn + 3, false); + InsertH264(sn + 2, sn + 2, false); + InsertH264(sn + 5, sn + 5, false); + InsertH264(sn + 6, sn + 6, false); + InsertH264(sn + 4, sn + 4, false); ASSERT_EQ(7UL, frames_from_callback_.size()); CheckReferencesH264(sn); @@ -1498,258 +292,13 @@ TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_0) { CheckReferencesH264(sn + 6, sn + 5); } -// Test with 2 temporal layers in a 01 pattern. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_01) { - uint16_t sn = Rand(); - - InsertH264(sn, sn, true, 0, 255); - InsertH264(sn + 1, sn + 1, false, 1, 255, true); - InsertH264(sn + 2, sn + 2, false, 0, 0); - InsertH264(sn + 3, sn + 3, false, 1, 0); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 3, sn + 1, sn + 2); -} - -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersMultiSn_01) { - uint16_t sn = Rand(); - - InsertH264(sn, sn + 3, true, 0, 255); - InsertH264(sn + 4, sn + 5, false, 1, 255, true); - InsertH264(sn + 6, sn + 8, false, 0, 0); - InsertH264(sn + 9, sn + 9, false, 1, 0); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesH264(sn + 3); - CheckReferencesH264(sn + 5, sn + 3); - CheckReferencesH264(sn + 8, sn + 3); - CheckReferencesH264(sn + 9, sn + 5, sn + 8); -} - -// Test with 2 temporal layers in a 01 pattern. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_01) { - uint16_t sn = Rand(); - - InsertH264(sn + 1, sn + 1, false, 1, 255, true); - InsertH264(sn, sn, true, 0, 255); - InsertH264(sn + 3, sn + 3, false, 1, 0); - InsertH264(sn + 5, sn + 5, false, 1, 1); - InsertH264(sn + 2, sn + 2, false, 0, 0); - InsertH264(sn + 4, sn + 4, false, 0, 1); - InsertH264(sn + 6, sn + 6, false, 0, 2); - InsertH264(sn + 7, sn + 7, false, 1, 2); - - ASSERT_EQ(8UL, frames_from_callback_.size()); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 3, sn + 1, sn + 2); - CheckReferencesH264(sn + 4, sn + 2); - CheckReferencesH264(sn + 5, sn + 3, sn + 4); - CheckReferencesH264(sn + 6, sn + 4); - CheckReferencesH264(sn + 7, sn + 5, sn + 6); -} - -// Test with 3 temporal layers in a 0212 pattern. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayers_0212) { - uint16_t sn = Rand(); - - InsertH264(sn, sn, true, 0, 55); - InsertH264(sn + 1, sn + 1, false, 2, 55, true); - InsertH264(sn + 2, sn + 2, false, 1, 55, true); - InsertH264(sn + 3, sn + 3, false, 2, 55); - InsertH264(sn + 4, sn + 4, false, 0, 56); - InsertH264(sn + 5, sn + 5, false, 2, 56, true); - InsertH264(sn + 6, sn + 6, false, 1, 56, true); - InsertH264(sn + 7, sn + 7, false, 2, 56); - InsertH264(sn + 8, sn + 8, false, 0, 57); - InsertH264(sn + 9, sn + 9, false, 2, 57, true); - InsertH264(sn + 10, sn + 10, false, 1, 57, true); - InsertH264(sn + 11, sn + 11, false, 2, 57); - - ASSERT_EQ(12UL, frames_from_callback_.size()); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 3, sn, sn + 1, sn + 2); - CheckReferencesH264(sn + 4, sn); - CheckReferencesH264(sn + 5, sn + 4); - CheckReferencesH264(sn + 6, sn + 4); - CheckReferencesH264(sn + 7, sn + 4, sn + 5, sn + 6); - CheckReferencesH264(sn + 8, sn + 4); - CheckReferencesH264(sn + 9, sn + 8); - CheckReferencesH264(sn + 10, sn + 8); - CheckReferencesH264(sn + 11, sn + 8, sn + 9, sn + 10); -} - -// Test with 3 temporal layers in a 0212 pattern. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersMissingFrame_0212) { - uint16_t sn = Rand(); - - InsertH264(sn, sn, true, 0, 55, false); - InsertH264(sn + 2, sn + 2, false, 1, 55, true); - InsertH264(sn + 3, sn + 3, false, 2, 55, false); - - ASSERT_EQ(2UL, frames_from_callback_.size()); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 2, sn); -} - -// Test with 3 temporal layers in a 0212 pattern. -TEST_F(TestRtpFrameReferenceFinder, H264TemporalLayersReordering_0212) { - uint16_t sn = Rand(); - - InsertH264(sn + 1, sn + 1, false, 2, 55, true); - InsertH264(sn, sn, true, 0, 55, false); - InsertH264(sn + 2, sn + 2, false, 1, 55, true); - InsertH264(sn + 4, sn + 4, false, 0, 56, false); - InsertH264(sn + 5, sn + 5, false, 2, 56, false); - InsertH264(sn + 3, sn + 3, false, 2, 55, false); - InsertH264(sn + 7, sn + 7, false, 2, 56, false); - InsertH264(sn + 9, sn + 9, false, 2, 57, true); - InsertH264(sn + 6, sn + 6, false, 1, 56, false); - InsertH264(sn + 8, sn + 8, false, 0, 57, false); - InsertH264(sn + 11, sn + 11, false, 2, 57, false); - InsertH264(sn + 10, sn + 10, false, 1, 57, true); - - ASSERT_EQ(12UL, frames_from_callback_.size()); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 3, sn, sn + 1, sn + 2); - CheckReferencesH264(sn + 4, sn); - CheckReferencesH264(sn + 5, sn + 2, sn + 3, sn + 4); - CheckReferencesH264(sn + 6, sn + 2, sn + 4); - CheckReferencesH264(sn + 7, sn + 4, sn + 5, sn + 6); - CheckReferencesH264(sn + 8, sn + 4); - CheckReferencesH264(sn + 9, sn + 8); - CheckReferencesH264(sn + 10, sn + 8); - CheckReferencesH264(sn + 11, sn + 8, sn + 9, sn + 10); -} - -TEST_F(TestRtpFrameReferenceFinder, H264InsertManyFrames_0212) { - uint16_t sn = Rand(); - - const int keyframes_to_insert = 50; - const int frames_per_keyframe = 120; // Should be a multiple of 4. - uint8_t tl0 = 128; - - for (int k = 0; k < keyframes_to_insert; ++k) { - InsertH264(sn, sn, true, 0, tl0, false); - InsertH264(sn + 1, sn + 1, false, 2, tl0, true); - InsertH264(sn + 2, sn + 2, false, 1, tl0, true); - InsertH264(sn + 3, sn + 3, false, 2, tl0, false); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 3, sn, sn + 1, sn + 2); - frames_from_callback_.clear(); - ++tl0; - - for (int f = 4; f < frames_per_keyframe; f += 4) { - uint16_t sf = sn + f; - - InsertH264(sf, sf, false, 0, tl0, false); - InsertH264(sf + 1, sf + 1, false, 2, tl0, false); - InsertH264(sf + 2, sf + 2, false, 1, tl0, false); - InsertH264(sf + 3, sf + 3, false, 2, tl0, false); - CheckReferencesH264(sf, sf - 4); - CheckReferencesH264(sf + 1, sf, sf - 1, sf - 2); - CheckReferencesH264(sf + 2, sf, sf - 2); - CheckReferencesH264(sf + 3, sf, sf + 1, sf + 2); - frames_from_callback_.clear(); - ++tl0; - } - - sn += frames_per_keyframe; - } -} - -TEST_F(TestRtpFrameReferenceFinder, H264LayerSync) { - uint16_t sn = Rand(); - - InsertH264(sn, sn, true, 0, 0, false); - InsertH264(sn + 1, sn + 1, false, 1, 0, true); - InsertH264(sn + 2, sn + 2, false, 0, 1, false); - ASSERT_EQ(3UL, frames_from_callback_.size()); - - InsertH264(sn + 4, sn + 4, false, 0, 2, false); - InsertH264(sn + 5, sn + 5, false, 1, 2, true); - InsertH264(sn + 6, sn + 6, false, 0, 3, false); - InsertH264(sn + 7, sn + 7, false, 1, 3, false); - - ASSERT_EQ(7UL, frames_from_callback_.size()); - CheckReferencesH264(sn); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn); - CheckReferencesH264(sn + 4, sn + 2); - CheckReferencesH264(sn + 5, sn + 4); - CheckReferencesH264(sn + 6, sn + 4); - CheckReferencesH264(sn + 7, sn + 6, sn + 5); -} - -TEST_F(TestRtpFrameReferenceFinder, H264Tl1SyncFrameAfterTl1Frame) { - InsertH264(1000, 1000, true, 0, 247, true); - InsertH264(1001, 1001, false, 0, 248, false); - InsertH264(1002, 1002, false, 1, 248, false); // Will be dropped - InsertH264(1003, 1003, false, 1, 248, true); // due to this frame. - - ASSERT_EQ(3UL, frames_from_callback_.size()); - CheckReferencesH264(1000); - CheckReferencesH264(1001, 1000); - CheckReferencesH264(1003, 1001); -} - -TEST_F(TestRtpFrameReferenceFinder, H264DetectMissingFrame_0212) { - InsertH264(1, 1, true, 0, 1, false); - InsertH264(2, 2, false, 2, 1, true); - InsertH264(3, 3, false, 1, 1, true); - InsertH264(4, 4, false, 2, 1, false); - - InsertH264(6, 6, false, 2, 2, false); - InsertH264(7, 7, false, 1, 2, false); - InsertH264(8, 8, false, 2, 2, false); - ASSERT_EQ(4UL, frames_from_callback_.size()); - - InsertH264(5, 5, false, 0, 2, false); - ASSERT_EQ(8UL, frames_from_callback_.size()); - - CheckReferencesH264(1); - CheckReferencesH264(2, 1); - CheckReferencesH264(3, 1); - CheckReferencesH264(4, 3, 2, 1); - - CheckReferencesH264(5, 1); - CheckReferencesH264(6, 5, 4, 3); - CheckReferencesH264(7, 5, 3); - CheckReferencesH264(8, 7, 6, 5); -} - -TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrap) { - uint16_t sn = 0xFFFF; - - InsertH264(sn - 1, sn - 1, true, 0, 1); - InsertH264(sn, sn, false, 0, 2); - InsertH264(sn + 1, sn + 1, false, 0, 3); - InsertH264(sn + 2, sn + 2, false, 0, 4); - - ASSERT_EQ(4UL, frames_from_callback_.size()); - CheckReferencesH264(sn - 1); - CheckReferencesH264(sn, sn - 1); - CheckReferencesH264(sn + 1, sn); - CheckReferencesH264(sn + 2, sn + 1); -} - TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) { uint16_t sn = 0xFFFF; - InsertH264(sn - 3, sn - 2, true, 0, 1); - InsertH264(sn - 1, sn + 1, false, 0, 2); - InsertH264(sn + 2, sn + 3, false, 0, 3); - InsertH264(sn + 4, sn + 7, false, 0, 4); + InsertH264(sn - 3, sn - 2, true); + InsertH264(sn - 1, sn + 1, false); + InsertH264(sn + 2, sn + 3, false); + InsertH264(sn + 4, sn + 7, false); ASSERT_EQ(4UL, frames_from_callback_.size()); CheckReferencesH264(sn - 2); @@ -1758,35 +307,16 @@ TEST_F(TestRtpFrameReferenceFinder, H264SequenceNumberWrapMulti) { CheckReferencesH264(sn + 7, sn + 3); } -TEST_F(TestRtpFrameReferenceFinder, H264Tl0PicIdxWrap) { - int numTl0Wraps = 1000; - int64_t sn = Rand(); - - for (int i = 0; i < numTl0Wraps; i++) { - for (int tl0 = 0; tl0 < 256; tl0 += 16, sn += 16) { - InsertH264(sn, sn, true, 0, tl0); - reference_finder_->ClearTo(sn); // Too many stashed frames cause errors. - - for (int k = 1; k < 8; k++) { - InsertH264(sn + k, sn + k, false, 0, tl0 + k); - } - - // Skip a TL0 index. - for (int k = 9; k < 16; k++) { - InsertH264(sn + k, sn + k, false, 0, tl0 + k); - } +TEST_F(TestRtpFrameReferenceFinder, Av1FrameNoDependencyDescriptor) { + uint16_t sn = 0xFFFF; + std::unique_ptr frame = + CreateFrame(/*seq_num_start=*/sn, /*seq_num_end=*/sn, /*keyframe=*/true, + kVideoCodecAV1, RTPVideoTypeHeader()); - ASSERT_EQ(8UL, frames_from_callback_.size()); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); - CheckReferencesH264(sn); - for (int k = 1; k < 8; k++) { - CheckReferencesH264(sn + k, sn + k - 1); - } - - frames_from_callback_.clear(); - } - } + ASSERT_EQ(1UL, frames_from_callback_.size()); + CheckReferencesGeneric(sn); } -} // namespace video_coding } // namespace webrtc diff --git a/modules/video_coding/rtp_generic_ref_finder.cc b/modules/video_coding/rtp_generic_ref_finder.cc new file mode 100644 index 0000000000..87fff9c26f --- /dev/null +++ b/modules/video_coding/rtp_generic_ref_finder.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/rtp_generic_ref_finder.h" + +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +RtpFrameReferenceFinder::ReturnVector RtpGenericFrameRefFinder::ManageFrame( + std::unique_ptr frame, + const RTPVideoHeader::GenericDescriptorInfo& descriptor) { + // Frame IDs are unwrapped in the RtpVideoStreamReceiver, no need to unwrap + // them here. + frame->SetId(descriptor.frame_id); + frame->SetSpatialIndex(descriptor.spatial_index); + + RtpFrameReferenceFinder::ReturnVector res; + if (EncodedFrame::kMaxFrameReferences < descriptor.dependencies.size()) { + RTC_LOG(LS_WARNING) << "Too many dependencies in generic descriptor."; + return res; + } + + frame->num_references = descriptor.dependencies.size(); + for (size_t i = 0; i < descriptor.dependencies.size(); ++i) { + frame->references[i] = descriptor.dependencies[i]; + } + + res.push_back(std::move(frame)); + return res; +} + +} // namespace webrtc diff --git a/modules/video_coding/rtp_generic_ref_finder.h b/modules/video_coding/rtp_generic_ref_finder.h new file mode 100644 index 0000000000..87d7b59406 --- /dev/null +++ b/modules/video_coding/rtp_generic_ref_finder.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_ +#define MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_ + +#include + +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" + +namespace webrtc { + +class RtpGenericFrameRefFinder { + public: + RtpGenericFrameRefFinder() = default; + + RtpFrameReferenceFinder::ReturnVector ManageFrame( + std::unique_ptr frame, + const RTPVideoHeader::GenericDescriptorInfo& descriptor); +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_RTP_GENERIC_REF_FINDER_H_ diff --git a/modules/video_coding/rtp_seq_num_only_ref_finder.cc b/modules/video_coding/rtp_seq_num_only_ref_finder.cc new file mode 100644 index 0000000000..4381cf0952 --- /dev/null +++ b/modules/video_coding/rtp_seq_num_only_ref_finder.cc @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/rtp_seq_num_only_ref_finder.h" + +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::ManageFrame( + std::unique_ptr frame) { + FrameDecision decision = ManageFrameInternal(frame.get()); + + RtpFrameReferenceFinder::ReturnVector res; + switch (decision) { + case kStash: + if (stashed_frames_.size() > kMaxStashedFrames) + stashed_frames_.pop_back(); + stashed_frames_.push_front(std::move(frame)); + return res; + case kHandOff: + res.push_back(std::move(frame)); + RetryStashedFrames(res); + return res; + case kDrop: + return res; + } + + return res; +} + +RtpSeqNumOnlyRefFinder::FrameDecision +RtpSeqNumOnlyRefFinder::ManageFrameInternal(RtpFrameObject* frame) { + if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { + last_seq_num_gop_.insert(std::make_pair( + frame->last_seq_num(), + std::make_pair(frame->last_seq_num(), frame->last_seq_num()))); + } + + // We have received a frame but not yet a keyframe, stash this frame. + if (last_seq_num_gop_.empty()) + return kStash; + + // Clean up info for old keyframes but make sure to keep info + // for the last keyframe. + auto clean_to = last_seq_num_gop_.lower_bound(frame->last_seq_num() - 100); + for (auto it = last_seq_num_gop_.begin(); + it != clean_to && last_seq_num_gop_.size() > 1;) { + it = last_seq_num_gop_.erase(it); + } + + // Find the last sequence number of the last frame for the keyframe + // that this frame indirectly references. + auto seq_num_it = last_seq_num_gop_.upper_bound(frame->last_seq_num()); + if (seq_num_it == last_seq_num_gop_.begin()) { + RTC_LOG(LS_WARNING) << "Generic frame with packet range [" + << frame->first_seq_num() << ", " + << frame->last_seq_num() + << "] has no GoP, dropping frame."; + return kDrop; + } + seq_num_it--; + + // Make sure the packet sequence numbers are continuous, otherwise stash + // this frame. + uint16_t last_picture_id_gop = seq_num_it->second.first; + uint16_t last_picture_id_with_padding_gop = seq_num_it->second.second; + if (frame->frame_type() == VideoFrameType::kVideoFrameDelta) { + uint16_t prev_seq_num = frame->first_seq_num() - 1; + + if (prev_seq_num != last_picture_id_with_padding_gop) + return kStash; + } + + RTC_DCHECK(AheadOrAt(frame->last_seq_num(), seq_num_it->first)); + + // Since keyframes can cause reordering we can't simply assign the + // picture id according to some incrementing counter. + frame->SetId(frame->last_seq_num()); + frame->num_references = + frame->frame_type() == VideoFrameType::kVideoFrameDelta; + frame->references[0] = rtp_seq_num_unwrapper_.Unwrap(last_picture_id_gop); + if (AheadOf(frame->Id(), last_picture_id_gop)) { + seq_num_it->second.first = frame->Id(); + seq_num_it->second.second = frame->Id(); + } + + UpdateLastPictureIdWithPadding(frame->Id()); + frame->SetSpatialIndex(0); + frame->SetId(rtp_seq_num_unwrapper_.Unwrap(frame->Id())); + return kHandOff; +} + +void RtpSeqNumOnlyRefFinder::RetryStashedFrames( + RtpFrameReferenceFinder::ReturnVector& res) { + bool complete_frame = false; + do { + complete_frame = false; + for (auto frame_it = stashed_frames_.begin(); + frame_it != stashed_frames_.end();) { + FrameDecision decision = ManageFrameInternal(frame_it->get()); + + switch (decision) { + case kStash: + ++frame_it; + break; + case kHandOff: + complete_frame = true; + res.push_back(std::move(*frame_it)); + ABSL_FALLTHROUGH_INTENDED; + case kDrop: + frame_it = stashed_frames_.erase(frame_it); + } + } + } while (complete_frame); +} + +void RtpSeqNumOnlyRefFinder::UpdateLastPictureIdWithPadding(uint16_t seq_num) { + auto gop_seq_num_it = last_seq_num_gop_.upper_bound(seq_num); + + // If this padding packet "belongs" to a group of pictures that we don't track + // anymore, do nothing. + if (gop_seq_num_it == last_seq_num_gop_.begin()) + return; + --gop_seq_num_it; + + // Calculate the next contiuous sequence number and search for it in + // the padding packets we have stashed. + uint16_t next_seq_num_with_padding = gop_seq_num_it->second.second + 1; + auto padding_seq_num_it = + stashed_padding_.lower_bound(next_seq_num_with_padding); + + // While there still are padding packets and those padding packets are + // continuous, then advance the "last-picture-id-with-padding" and remove + // the stashed padding packet. + while (padding_seq_num_it != stashed_padding_.end() && + *padding_seq_num_it == next_seq_num_with_padding) { + gop_seq_num_it->second.second = next_seq_num_with_padding; + ++next_seq_num_with_padding; + padding_seq_num_it = stashed_padding_.erase(padding_seq_num_it); + } + + // In the case where the stream has been continuous without any new keyframes + // for a while there is a risk that new frames will appear to be older than + // the keyframe they belong to due to wrapping sequence number. In order + // to prevent this we advance the picture id of the keyframe every so often. + if (ForwardDiff(gop_seq_num_it->first, seq_num) > 10000) { + auto save = gop_seq_num_it->second; + last_seq_num_gop_.clear(); + last_seq_num_gop_[seq_num] = save; + } +} + +RtpFrameReferenceFinder::ReturnVector RtpSeqNumOnlyRefFinder::PaddingReceived( + uint16_t seq_num) { + auto clean_padding_to = + stashed_padding_.lower_bound(seq_num - kMaxPaddingAge); + stashed_padding_.erase(stashed_padding_.begin(), clean_padding_to); + stashed_padding_.insert(seq_num); + UpdateLastPictureIdWithPadding(seq_num); + RtpFrameReferenceFinder::ReturnVector res; + RetryStashedFrames(res); + return res; +} + +void RtpSeqNumOnlyRefFinder::ClearTo(uint16_t seq_num) { + auto it = stashed_frames_.begin(); + while (it != stashed_frames_.end()) { + if (AheadOf(seq_num, (*it)->first_seq_num())) { + it = stashed_frames_.erase(it); + } else { + ++it; + } + } +} + +} // namespace webrtc diff --git a/modules/video_coding/rtp_seq_num_only_ref_finder.h b/modules/video_coding/rtp_seq_num_only_ref_finder.h new file mode 100644 index 0000000000..ef3c022111 --- /dev/null +++ b/modules/video_coding/rtp_seq_num_only_ref_finder.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_ +#define MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_ + +#include +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" +#include "rtc_base/numerics/sequence_number_util.h" + +namespace webrtc { + +class RtpSeqNumOnlyRefFinder { + public: + RtpSeqNumOnlyRefFinder() = default; + + RtpFrameReferenceFinder::ReturnVector ManageFrame( + std::unique_ptr frame); + RtpFrameReferenceFinder::ReturnVector PaddingReceived(uint16_t seq_num); + void ClearTo(uint16_t seq_num); + + private: + static constexpr int kMaxStashedFrames = 100; + static constexpr int kMaxPaddingAge = 100; + + enum FrameDecision { kStash, kHandOff, kDrop }; + + FrameDecision ManageFrameInternal(RtpFrameObject* frame); + void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res); + void UpdateLastPictureIdWithPadding(uint16_t seq_num); + + // For every group of pictures, hold two sequence numbers. The first being + // the sequence number of the last packet of the last completed frame, and + // the second being the sequence number of the last packet of the last + // completed frame advanced by any potential continuous packets of padding. + std::map, + DescendingSeqNumComp> + last_seq_num_gop_; + + // Padding packets that have been received but that are not yet continuous + // with any group of pictures. + std::set> stashed_padding_; + + // Frames that have been fully received but didn't have all the information + // needed to determine their references. + std::deque> stashed_frames_; + + // Unwrapper used to unwrap generic RTP streams. In a generic stream we derive + // a picture id from the packet sequence number. + SeqNumUnwrapper rtp_seq_num_unwrapper_; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_RTP_SEQ_NUM_ONLY_REF_FINDER_H_ diff --git a/modules/video_coding/rtp_vp8_ref_finder.cc b/modules/video_coding/rtp_vp8_ref_finder.cc new file mode 100644 index 0000000000..b448b23308 --- /dev/null +++ b/modules/video_coding/rtp_vp8_ref_finder.cc @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/rtp_vp8_ref_finder.h" + +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +RtpFrameReferenceFinder::ReturnVector RtpVp8RefFinder::ManageFrame( + std::unique_ptr frame) { + FrameDecision decision = ManageFrameInternal(frame.get()); + + RtpFrameReferenceFinder::ReturnVector res; + switch (decision) { + case kStash: + if (stashed_frames_.size() > kMaxStashedFrames) + stashed_frames_.pop_back(); + stashed_frames_.push_front(std::move(frame)); + return res; + case kHandOff: + res.push_back(std::move(frame)); + RetryStashedFrames(res); + return res; + case kDrop: + return res; + } + + return res; +} + +RtpVp8RefFinder::FrameDecision RtpVp8RefFinder::ManageFrameInternal( + RtpFrameObject* frame) { + const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); + const RTPVideoHeaderVP8& codec_header = + absl::get(video_header.video_type_header); + + // Protect against corrupted packets with arbitrary large temporal idx. + if (codec_header.temporalIdx >= kMaxTemporalLayers) + return kDrop; + + frame->SetSpatialIndex(0); + frame->SetId(codec_header.pictureId & 0x7FFF); + + if (last_picture_id_ == -1) + last_picture_id_ = frame->Id(); + + // Clean up info about not yet received frames that are too old. + uint16_t old_picture_id = + Subtract(frame->Id(), kMaxNotYetReceivedFrames); + auto clean_frames_to = not_yet_received_frames_.lower_bound(old_picture_id); + not_yet_received_frames_.erase(not_yet_received_frames_.begin(), + clean_frames_to); + // Avoid re-adding picture ids that were just erased. + if (AheadOf(old_picture_id, last_picture_id_)) { + last_picture_id_ = old_picture_id; + } + // Find if there has been a gap in fully received frames and save the picture + // id of those frames in |not_yet_received_frames_|. + if (AheadOf(frame->Id(), last_picture_id_)) { + do { + last_picture_id_ = Add(last_picture_id_, 1); + not_yet_received_frames_.insert(last_picture_id_); + } while (last_picture_id_ != frame->Id()); + } + + int64_t unwrapped_tl0 = tl0_unwrapper_.Unwrap(codec_header.tl0PicIdx & 0xFF); + + // Clean up info for base layers that are too old. + int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxLayerInfo; + auto clean_layer_info_to = layer_info_.lower_bound(old_tl0_pic_idx); + layer_info_.erase(layer_info_.begin(), clean_layer_info_to); + + if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { + if (codec_header.temporalIdx != 0) { + return kDrop; + } + frame->num_references = 0; + layer_info_[unwrapped_tl0].fill(-1); + UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); + return kHandOff; + } + + auto layer_info_it = layer_info_.find( + codec_header.temporalIdx == 0 ? unwrapped_tl0 - 1 : unwrapped_tl0); + + // If we don't have the base layer frame yet, stash this frame. + if (layer_info_it == layer_info_.end()) + return kStash; + + // A non keyframe base layer frame has been received, copy the layer info + // from the previous base layer frame and set a reference to the previous + // base layer frame. + if (codec_header.temporalIdx == 0) { + layer_info_it = + layer_info_.emplace(unwrapped_tl0, layer_info_it->second).first; + frame->num_references = 1; + int64_t last_pid_on_layer = layer_info_it->second[0]; + + // Is this an old frame that has already been used to update the state? If + // so, drop it. + if (AheadOrAt(last_pid_on_layer, frame->Id())) { + return kDrop; + } + + frame->references[0] = last_pid_on_layer; + UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); + return kHandOff; + } + + // Layer sync frame, this frame only references its base layer frame. + if (codec_header.layerSync) { + frame->num_references = 1; + int64_t last_pid_on_layer = layer_info_it->second[codec_header.temporalIdx]; + + // Is this an old frame that has already been used to update the state? If + // so, drop it. + if (last_pid_on_layer != -1 && + AheadOrAt(last_pid_on_layer, frame->Id())) { + return kDrop; + } + + frame->references[0] = layer_info_it->second[0]; + UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); + return kHandOff; + } + + // Find all references for this frame. + frame->num_references = 0; + for (uint8_t layer = 0; layer <= codec_header.temporalIdx; ++layer) { + // If we have not yet received a previous frame on this temporal layer, + // stash this frame. + if (layer_info_it->second[layer] == -1) + return kStash; + + // If the last frame on this layer is ahead of this frame it means that + // a layer sync frame has been received after this frame for the same + // base layer frame, drop this frame. + if (AheadOf(layer_info_it->second[layer], + frame->Id())) { + return kDrop; + } + + // If we have not yet received a frame between this frame and the referenced + // frame then we have to wait for that frame to be completed first. + auto not_received_frame_it = + not_yet_received_frames_.upper_bound(layer_info_it->second[layer]); + if (not_received_frame_it != not_yet_received_frames_.end() && + AheadOf(frame->Id(), + *not_received_frame_it)) { + return kStash; + } + + if (!(AheadOf(frame->Id(), + layer_info_it->second[layer]))) { + RTC_LOG(LS_WARNING) << "Frame with picture id " << frame->Id() + << " and packet range [" << frame->first_seq_num() + << ", " << frame->last_seq_num() + << "] already received, " + " dropping frame."; + return kDrop; + } + + ++frame->num_references; + frame->references[layer] = layer_info_it->second[layer]; + } + + UpdateLayerInfoVp8(frame, unwrapped_tl0, codec_header.temporalIdx); + return kHandOff; +} + +void RtpVp8RefFinder::UpdateLayerInfoVp8(RtpFrameObject* frame, + int64_t unwrapped_tl0, + uint8_t temporal_idx) { + auto layer_info_it = layer_info_.find(unwrapped_tl0); + + // Update this layer info and newer. + while (layer_info_it != layer_info_.end()) { + if (layer_info_it->second[temporal_idx] != -1 && + AheadOf(layer_info_it->second[temporal_idx], + frame->Id())) { + // The frame was not newer, then no subsequent layer info have to be + // update. + break; + } + + layer_info_it->second[temporal_idx] = frame->Id(); + ++unwrapped_tl0; + layer_info_it = layer_info_.find(unwrapped_tl0); + } + not_yet_received_frames_.erase(frame->Id()); + + UnwrapPictureIds(frame); +} + +void RtpVp8RefFinder::RetryStashedFrames( + RtpFrameReferenceFinder::ReturnVector& res) { + bool complete_frame = false; + do { + complete_frame = false; + for (auto frame_it = stashed_frames_.begin(); + frame_it != stashed_frames_.end();) { + FrameDecision decision = ManageFrameInternal(frame_it->get()); + + switch (decision) { + case kStash: + ++frame_it; + break; + case kHandOff: + complete_frame = true; + res.push_back(std::move(*frame_it)); + ABSL_FALLTHROUGH_INTENDED; + case kDrop: + frame_it = stashed_frames_.erase(frame_it); + } + } + } while (complete_frame); +} + +void RtpVp8RefFinder::UnwrapPictureIds(RtpFrameObject* frame) { + for (size_t i = 0; i < frame->num_references; ++i) + frame->references[i] = unwrapper_.Unwrap(frame->references[i]); + frame->SetId(unwrapper_.Unwrap(frame->Id())); +} + +void RtpVp8RefFinder::ClearTo(uint16_t seq_num) { + auto it = stashed_frames_.begin(); + while (it != stashed_frames_.end()) { + if (AheadOf(seq_num, (*it)->first_seq_num())) { + it = stashed_frames_.erase(it); + } else { + ++it; + } + } +} + +} // namespace webrtc diff --git a/modules/video_coding/rtp_vp8_ref_finder.h b/modules/video_coding/rtp_vp8_ref_finder.h new file mode 100644 index 0000000000..0a6cd7e10d --- /dev/null +++ b/modules/video_coding/rtp_vp8_ref_finder.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_ +#define MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_ + +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" +#include "rtc_base/numerics/sequence_number_util.h" + +namespace webrtc { + +class RtpVp8RefFinder { + public: + RtpVp8RefFinder() = default; + + RtpFrameReferenceFinder::ReturnVector ManageFrame( + std::unique_ptr frame); + void ClearTo(uint16_t seq_num); + + private: + static constexpr int kFrameIdLength = 1 << 15; + static constexpr int kMaxLayerInfo = 50; + static constexpr int kMaxNotYetReceivedFrames = 100; + static constexpr int kMaxStashedFrames = 100; + static constexpr int kMaxTemporalLayers = 5; + + enum FrameDecision { kStash, kHandOff, kDrop }; + + FrameDecision ManageFrameInternal(RtpFrameObject* frame); + void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res); + void UpdateLayerInfoVp8(RtpFrameObject* frame, + int64_t unwrapped_tl0, + uint8_t temporal_idx); + void UnwrapPictureIds(RtpFrameObject* frame); + + // Save the last picture id in order to detect when there is a gap in frames + // that have not yet been fully received. + int last_picture_id_ = -1; + + // Frames earlier than the last received frame that have not yet been + // fully received. + std::set> + not_yet_received_frames_; + + // Frames that have been fully received but didn't have all the information + // needed to determine their references. + std::deque> stashed_frames_; + + // Holds the information about the last completed frame for a given temporal + // layer given an unwrapped Tl0 picture index. + std::map> layer_info_; + + // Unwrapper used to unwrap VP8/VP9 streams which have their picture id + // specified. + SeqNumUnwrapper unwrapper_; + + SeqNumUnwrapper tl0_unwrapper_; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_RTP_VP8_REF_FINDER_H_ diff --git a/modules/video_coding/rtp_vp8_ref_finder_unittest.cc b/modules/video_coding/rtp_vp8_ref_finder_unittest.cc new file mode 100644 index 0000000000..a77149a89b --- /dev/null +++ b/modules/video_coding/rtp_vp8_ref_finder_unittest.cc @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/rtp_vp8_ref_finder.h" + +#include +#include + +#include "modules/video_coding/frame_object.h" +#include "test/gmock.h" +#include "test/gtest.h" + +using ::testing::Contains; +using ::testing::Eq; +using ::testing::Matcher; +using ::testing::Matches; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAreArray; + +namespace webrtc { +namespace { + +MATCHER_P2(HasIdAndRefs, id, refs, "") { + return Matches(Eq(id))(arg->Id()) && + Matches(UnorderedElementsAreArray(refs))( + rtc::ArrayView(arg->references, arg->num_references)); +} + +Matcher>&> +HasFrameWithIdAndRefs(int64_t frame_id, const std::vector& refs) { + return Contains(HasIdAndRefs(frame_id, refs)); +} + +class Frame { + public: + Frame& AsKeyFrame(bool is_keyframe = true) { + is_keyframe_ = is_keyframe; + return *this; + } + + Frame& Pid(int pid) { + picture_id_ = pid; + return *this; + } + + Frame& Tid(int tid) { + temporal_id_ = tid; + return *this; + } + + Frame& Tl0(int tl0) { + tl0_idx_ = tl0; + return *this; + } + + Frame& AsSync(bool is_sync = true) { + sync = is_sync; + return *this; + } + + operator std::unique_ptr() { + RTPVideoHeaderVP8 vp8_header{}; + vp8_header.pictureId = *picture_id_; + vp8_header.temporalIdx = *temporal_id_; + vp8_header.tl0PicIdx = *tl0_idx_; + vp8_header.layerSync = sync; + + RTPVideoHeader video_header; + video_header.frame_type = is_keyframe_ ? VideoFrameType::kVideoFrameKey + : VideoFrameType::kVideoFrameDelta; + video_header.video_type_header = vp8_header; + // clang-format off + return std::make_unique( + /*seq_num_start=*/0, + /*seq_num_end=*/0, + /*markerBit=*/true, + /*times_nacked=*/0, + /*first_packet_received_time=*/0, + /*last_packet_received_time=*/0, + /*rtp_timestamp=*/0, + /*ntp_time_ms=*/0, + VideoSendTiming(), + /*payload_type=*/0, + kVideoCodecVP8, + kVideoRotation_0, + VideoContentType::UNSPECIFIED, + video_header, + /*color_space=*/absl::nullopt, + RtpPacketInfos(), + EncodedImageBuffer::Create(/*size=*/0)); + // clang-format on + } + + private: + bool is_keyframe_ = false; + absl::optional picture_id_; + absl::optional temporal_id_; + absl::optional tl0_idx_; + bool sync = false; +}; + +} // namespace + +class RtpVp8RefFinderTest : public ::testing::Test { + protected: + RtpVp8RefFinderTest() : ref_finder_(std::make_unique()) {} + + void Insert(std::unique_ptr frame) { + for (auto& f : ref_finder_->ManageFrame(std::move(frame))) { + frames_.push_back(std::move(f)); + } + } + + std::unique_ptr ref_finder_; + std::vector> frames_; +}; + +TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrame_0) { + Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(0).Tl0(2)); + Insert(Frame().Pid(1).Tid(0).Tl0(2)); + + EXPECT_THAT(frames_, SizeIs(2)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrameLayerSync_01) { + Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(1).Tl0(1).AsSync()); + Insert(Frame().Pid(1).Tid(1).Tl0(1).AsSync()); + + EXPECT_THAT(frames_, SizeIs(2)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8RepeatedFrame_01) { + Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(0).Tl0(2).AsSync()); + Insert(Frame().Pid(2).Tid(0).Tl0(3)); + Insert(Frame().Pid(3).Tid(0).Tl0(4)); + Insert(Frame().Pid(3).Tid(0).Tl0(4)); + + EXPECT_THAT(frames_, SizeIs(4)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {2})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_0) { + Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(0).Tl0(2)); + + EXPECT_THAT(frames_, SizeIs(2)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8DuplicateTl1Frames) { + Insert(Frame().Pid(0).Tid(0).Tl0(0).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(1).Tl0(0).AsSync()); + Insert(Frame().Pid(2).Tid(0).Tl0(1)); + Insert(Frame().Pid(3).Tid(1).Tl0(1)); + Insert(Frame().Pid(3).Tid(1).Tl0(1)); + Insert(Frame().Pid(4).Tid(0).Tl0(2)); + Insert(Frame().Pid(5).Tid(1).Tl0(2)); + + EXPECT_THAT(frames_, SizeIs(6)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3, 4})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_0) { + Insert(Frame().Pid(1).Tid(0).Tl0(2)); + Insert(Frame().Pid(0).Tid(0).Tl0(1).AsKeyFrame()); + Insert(Frame().Pid(3).Tid(0).Tl0(4)); + Insert(Frame().Pid(2).Tid(0).Tl0(3)); + Insert(Frame().Pid(5).Tid(0).Tl0(6)); + Insert(Frame().Pid(6).Tid(0).Tl0(7)); + Insert(Frame().Pid(4).Tid(0).Tl0(5)); + + EXPECT_THAT(frames_, SizeIs(7)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {3})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {5})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_01) { + Insert(Frame().Pid(0).Tid(0).Tl0(255).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(1).Tl0(255).AsSync()); + Insert(Frame().Pid(2).Tid(0).Tl0(0)); + Insert(Frame().Pid(3).Tid(1).Tl0(0)); + + EXPECT_THAT(frames_, SizeIs(4)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_01) { + Insert(Frame().Pid(1).Tid(1).Tl0(255).AsSync()); + Insert(Frame().Pid(0).Tid(0).Tl0(255).AsKeyFrame()); + Insert(Frame().Pid(3).Tid(1).Tl0(0)); + Insert(Frame().Pid(5).Tid(1).Tl0(1)); + Insert(Frame().Pid(2).Tid(0).Tl0(0)); + Insert(Frame().Pid(4).Tid(0).Tl0(1)); + Insert(Frame().Pid(6).Tid(0).Tl0(2)); + Insert(Frame().Pid(7).Tid(1).Tl0(2)); + + EXPECT_THAT(frames_, SizeIs(8)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1, 2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3, 4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {5, 6})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayers_0212) { + Insert(Frame().Pid(0).Tid(0).Tl0(55).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(2).Tl0(55).AsSync()); + Insert(Frame().Pid(2).Tid(1).Tl0(55).AsSync()); + Insert(Frame().Pid(3).Tid(2).Tl0(55)); + Insert(Frame().Pid(4).Tid(0).Tl0(56)); + Insert(Frame().Pid(5).Tid(2).Tl0(56)); + Insert(Frame().Pid(6).Tid(1).Tl0(56)); + Insert(Frame().Pid(7).Tid(2).Tl0(56)); + Insert(Frame().Pid(8).Tid(0).Tl0(57)); + Insert(Frame().Pid(9).Tid(2).Tl0(57).AsSync()); + Insert(Frame().Pid(10).Tid(1).Tl0(57).AsSync()); + Insert(Frame().Pid(11).Tid(2).Tl0(57)); + + EXPECT_THAT(frames_, SizeIs(12)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {0, 1, 2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {2, 3, 4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {2, 4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {4, 5, 6})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(8, {4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(9, {8})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {8})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {8, 9, 10})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersMissingFrame_0212) { + Insert(Frame().Pid(0).Tid(0).Tl0(55).AsKeyFrame()); + Insert(Frame().Pid(2).Tid(1).Tl0(55).AsSync()); + Insert(Frame().Pid(3).Tid(2).Tl0(55)); + + EXPECT_THAT(frames_, SizeIs(2)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0})); +} + +// Test with 3 temporal layers in a 0212 pattern. +TEST_F(RtpVp8RefFinderTest, Vp8TemporalLayersReordering_0212) { + Insert(Frame().Pid(127).Tid(2).Tl0(55).AsSync()); + Insert(Frame().Pid(126).Tid(0).Tl0(55).AsKeyFrame()); + Insert(Frame().Pid(128).Tid(1).Tl0(55).AsSync()); + Insert(Frame().Pid(130).Tid(0).Tl0(56)); + Insert(Frame().Pid(131).Tid(2).Tl0(56)); + Insert(Frame().Pid(129).Tid(2).Tl0(55)); + Insert(Frame().Pid(133).Tid(2).Tl0(56)); + Insert(Frame().Pid(135).Tid(2).Tl0(57).AsSync()); + Insert(Frame().Pid(132).Tid(1).Tl0(56)); + Insert(Frame().Pid(134).Tid(0).Tl0(57)); + Insert(Frame().Pid(137).Tid(2).Tl0(57)); + Insert(Frame().Pid(136).Tid(1).Tl0(57).AsSync()); + + EXPECT_THAT(frames_, SizeIs(12)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(126, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(127, {126})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(128, {126})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(129, {126, 127, 128})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(130, {126})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(131, {128, 129, 130})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(132, {128, 130})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(133, {130, 131, 132})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(134, {130})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(135, {134})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(136, {134})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(137, {134, 135, 136})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8LayerSync) { + Insert(Frame().Pid(0).Tid(0).Tl0(0).AsKeyFrame()); + Insert(Frame().Pid(1).Tid(1).Tl0(0).AsSync()); + Insert(Frame().Pid(2).Tid(0).Tl0(1)); + Insert(Frame().Pid(4).Tid(0).Tl0(2)); + Insert(Frame().Pid(5).Tid(1).Tl0(2).AsSync()); + Insert(Frame().Pid(6).Tid(0).Tl0(3)); + Insert(Frame().Pid(7).Tid(1).Tl0(3)); + + EXPECT_THAT(frames_, SizeIs(7)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {2})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {4})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {5, 6})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8Tl1SyncFrameAfterTl1Frame) { + Insert(Frame().Pid(1).Tid(0).Tl0(247).AsKeyFrame().AsSync()); + Insert(Frame().Pid(3).Tid(0).Tl0(248)); + Insert(Frame().Pid(4).Tid(1).Tl0(248)); + Insert(Frame().Pid(5).Tid(1).Tl0(248).AsSync()); + + EXPECT_THAT(frames_, SizeIs(3)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {3})); +} + +TEST_F(RtpVp8RefFinderTest, Vp8DetectMissingFrame_0212) { + Insert(Frame().Pid(1).Tid(0).Tl0(1).AsKeyFrame()); + Insert(Frame().Pid(2).Tid(2).Tl0(1).AsSync()); + Insert(Frame().Pid(3).Tid(1).Tl0(1).AsSync()); + Insert(Frame().Pid(4).Tid(2).Tl0(1)); + Insert(Frame().Pid(6).Tid(2).Tl0(2)); + Insert(Frame().Pid(7).Tid(1).Tl0(2)); + Insert(Frame().Pid(8).Tid(2).Tl0(2)); + Insert(Frame().Pid(5).Tid(0).Tl0(2)); + + EXPECT_THAT(frames_, SizeIs(8)); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(2, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(3, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(4, {1, 2, 3})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {3, 4, 5})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(7, {3, 5})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(8, {5, 6, 7})); +} + +} // namespace webrtc diff --git a/modules/video_coding/rtp_vp9_ref_finder.cc b/modules/video_coding/rtp_vp9_ref_finder.cc new file mode 100644 index 0000000000..b44bb2500d --- /dev/null +++ b/modules/video_coding/rtp_vp9_ref_finder.cc @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/rtp_vp9_ref_finder.h" + +#include +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +RtpFrameReferenceFinder::ReturnVector RtpVp9RefFinder::ManageFrame( + std::unique_ptr frame) { + FrameDecision decision = ManageFrameInternal(frame.get()); + + RtpFrameReferenceFinder::ReturnVector res; + switch (decision) { + case kStash: + if (stashed_frames_.size() > kMaxStashedFrames) + stashed_frames_.pop_back(); + stashed_frames_.push_front(std::move(frame)); + return res; + case kHandOff: + res.push_back(std::move(frame)); + RetryStashedFrames(res); + return res; + case kDrop: + return res; + } + + return res; +} + +RtpVp9RefFinder::FrameDecision RtpVp9RefFinder::ManageFrameInternal( + RtpFrameObject* frame) { + const RTPVideoHeader& video_header = frame->GetRtpVideoHeader(); + const RTPVideoHeaderVP9& codec_header = + absl::get(video_header.video_type_header); + + // Protect against corrupted packets with arbitrary large temporal idx. + if (codec_header.temporal_idx >= kMaxTemporalLayers || + codec_header.spatial_idx >= kMaxSpatialLayers) + return kDrop; + + frame->SetSpatialIndex(codec_header.spatial_idx); + frame->SetId(codec_header.picture_id & (kFrameIdLength - 1)); + + if (last_picture_id_ == -1) + last_picture_id_ = frame->Id(); + + if (codec_header.flexible_mode) { + if (codec_header.num_ref_pics > EncodedFrame::kMaxFrameReferences) { + return kDrop; + } + frame->num_references = codec_header.num_ref_pics; + for (size_t i = 0; i < frame->num_references; ++i) { + frame->references[i] = + Subtract(frame->Id(), codec_header.pid_diff[i]); + } + + FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted); + return kHandOff; + } + + if (codec_header.tl0_pic_idx == kNoTl0PicIdx) { + RTC_LOG(LS_WARNING) << "TL0PICIDX is expected to be present in " + "non-flexible mode."; + return kDrop; + } + + GofInfo* info; + int64_t unwrapped_tl0 = + tl0_unwrapper_.Unwrap(codec_header.tl0_pic_idx & 0xFF); + if (codec_header.ss_data_available) { + if (codec_header.temporal_idx != 0) { + RTC_LOG(LS_WARNING) << "Received scalability structure on a non base " + "layer frame. Scalability structure ignored."; + } else { + if (codec_header.gof.num_frames_in_gof > kMaxVp9FramesInGof) { + return kDrop; + } + + for (size_t i = 0; i < codec_header.gof.num_frames_in_gof; ++i) { + if (codec_header.gof.num_ref_pics[i] > kMaxVp9RefPics) { + return kDrop; + } + } + + GofInfoVP9 gof = codec_header.gof; + if (gof.num_frames_in_gof == 0) { + RTC_LOG(LS_WARNING) << "Number of frames in GOF is zero. Assume " + "that stream has only one temporal layer."; + gof.SetGofInfoVP9(kTemporalStructureMode1); + } + + current_ss_idx_ = Add(current_ss_idx_, 1); + scalability_structures_[current_ss_idx_] = gof; + scalability_structures_[current_ss_idx_].pid_start = frame->Id(); + gof_info_.emplace( + unwrapped_tl0, + GofInfo(&scalability_structures_[current_ss_idx_], frame->Id())); + } + + const auto gof_info_it = gof_info_.find(unwrapped_tl0); + if (gof_info_it == gof_info_.end()) + return kStash; + + info = &gof_info_it->second; + + if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { + frame->num_references = 0; + FrameReceivedVp9(frame->Id(), info); + FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted); + return kHandOff; + } + } else if (frame->frame_type() == VideoFrameType::kVideoFrameKey) { + if (frame->SpatialIndex() == 0) { + RTC_LOG(LS_WARNING) << "Received keyframe without scalability structure"; + return kDrop; + } + const auto gof_info_it = gof_info_.find(unwrapped_tl0); + if (gof_info_it == gof_info_.end()) + return kStash; + + info = &gof_info_it->second; + + frame->num_references = 0; + FrameReceivedVp9(frame->Id(), info); + FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted); + return kHandOff; + } else { + auto gof_info_it = gof_info_.find( + (codec_header.temporal_idx == 0) ? unwrapped_tl0 - 1 : unwrapped_tl0); + + // Gof info for this frame is not available yet, stash this frame. + if (gof_info_it == gof_info_.end()) + return kStash; + + if (codec_header.temporal_idx == 0) { + gof_info_it = gof_info_ + .emplace(unwrapped_tl0, + GofInfo(gof_info_it->second.gof, frame->Id())) + .first; + } + + info = &gof_info_it->second; + } + + // Clean up info for base layers that are too old. + int64_t old_tl0_pic_idx = unwrapped_tl0 - kMaxGofSaved; + auto clean_gof_info_to = gof_info_.lower_bound(old_tl0_pic_idx); + gof_info_.erase(gof_info_.begin(), clean_gof_info_to); + + FrameReceivedVp9(frame->Id(), info); + + // Make sure we don't miss any frame that could potentially have the + // up switch flag set. + if (MissingRequiredFrameVp9(frame->Id(), *info)) + return kStash; + + if (codec_header.temporal_up_switch) + up_switch_.emplace(frame->Id(), codec_header.temporal_idx); + + // Clean out old info about up switch frames. + uint16_t old_picture_id = Subtract(frame->Id(), 50); + auto up_switch_erase_to = up_switch_.lower_bound(old_picture_id); + up_switch_.erase(up_switch_.begin(), up_switch_erase_to); + + size_t diff = + ForwardDiff(info->gof->pid_start, frame->Id()); + size_t gof_idx = diff % info->gof->num_frames_in_gof; + + if (info->gof->num_ref_pics[gof_idx] > EncodedFrame::kMaxFrameReferences) { + return kDrop; + } + // Populate references according to the scalability structure. + frame->num_references = info->gof->num_ref_pics[gof_idx]; + for (size_t i = 0; i < frame->num_references; ++i) { + frame->references[i] = + Subtract(frame->Id(), info->gof->pid_diff[gof_idx][i]); + + // If this is a reference to a frame earlier than the last up switch point, + // then ignore this reference. + if (UpSwitchInIntervalVp9(frame->Id(), codec_header.temporal_idx, + frame->references[i])) { + --frame->num_references; + } + } + + // Override GOF references. + if (!codec_header.inter_pic_predicted) { + frame->num_references = 0; + } + + FlattenFrameIdAndRefs(frame, codec_header.inter_layer_predicted); + return kHandOff; +} + +bool RtpVp9RefFinder::MissingRequiredFrameVp9(uint16_t picture_id, + const GofInfo& info) { + size_t diff = + ForwardDiff(info.gof->pid_start, picture_id); + size_t gof_idx = diff % info.gof->num_frames_in_gof; + size_t temporal_idx = info.gof->temporal_idx[gof_idx]; + + if (temporal_idx >= kMaxTemporalLayers) { + RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers + << " temporal " + "layers are supported."; + return true; + } + + // For every reference this frame has, check if there is a frame missing in + // the interval (|ref_pid|, |picture_id|) in any of the lower temporal + // layers. If so, we are missing a required frame. + uint8_t num_references = info.gof->num_ref_pics[gof_idx]; + for (size_t i = 0; i < num_references; ++i) { + uint16_t ref_pid = + Subtract(picture_id, info.gof->pid_diff[gof_idx][i]); + for (size_t l = 0; l < temporal_idx; ++l) { + auto missing_frame_it = missing_frames_for_layer_[l].lower_bound(ref_pid); + if (missing_frame_it != missing_frames_for_layer_[l].end() && + AheadOf(picture_id, *missing_frame_it)) { + return true; + } + } + } + return false; +} + +void RtpVp9RefFinder::FrameReceivedVp9(uint16_t picture_id, GofInfo* info) { + int last_picture_id = info->last_picture_id; + size_t gof_size = std::min(info->gof->num_frames_in_gof, kMaxVp9FramesInGof); + + // If there is a gap, find which temporal layer the missing frames + // belong to and add the frame as missing for that temporal layer. + // Otherwise, remove this frame from the set of missing frames. + if (AheadOf(picture_id, last_picture_id)) { + size_t diff = ForwardDiff(info->gof->pid_start, + last_picture_id); + size_t gof_idx = diff % gof_size; + + last_picture_id = Add(last_picture_id, 1); + while (last_picture_id != picture_id) { + gof_idx = (gof_idx + 1) % gof_size; + RTC_CHECK(gof_idx < kMaxVp9FramesInGof); + + size_t temporal_idx = info->gof->temporal_idx[gof_idx]; + if (temporal_idx >= kMaxTemporalLayers) { + RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers + << " temporal " + "layers are supported."; + return; + } + + missing_frames_for_layer_[temporal_idx].insert(last_picture_id); + last_picture_id = Add(last_picture_id, 1); + } + + info->last_picture_id = last_picture_id; + } else { + size_t diff = + ForwardDiff(info->gof->pid_start, picture_id); + size_t gof_idx = diff % gof_size; + RTC_CHECK(gof_idx < kMaxVp9FramesInGof); + + size_t temporal_idx = info->gof->temporal_idx[gof_idx]; + if (temporal_idx >= kMaxTemporalLayers) { + RTC_LOG(LS_WARNING) << "At most " << kMaxTemporalLayers + << " temporal " + "layers are supported."; + return; + } + + missing_frames_for_layer_[temporal_idx].erase(picture_id); + } +} + +bool RtpVp9RefFinder::UpSwitchInIntervalVp9(uint16_t picture_id, + uint8_t temporal_idx, + uint16_t pid_ref) { + for (auto up_switch_it = up_switch_.upper_bound(pid_ref); + up_switch_it != up_switch_.end() && + AheadOf(picture_id, up_switch_it->first); + ++up_switch_it) { + if (up_switch_it->second < temporal_idx) + return true; + } + + return false; +} + +void RtpVp9RefFinder::RetryStashedFrames( + RtpFrameReferenceFinder::ReturnVector& res) { + bool complete_frame = false; + do { + complete_frame = false; + for (auto frame_it = stashed_frames_.begin(); + frame_it != stashed_frames_.end();) { + FrameDecision decision = ManageFrameInternal(frame_it->get()); + + switch (decision) { + case kStash: + ++frame_it; + break; + case kHandOff: + complete_frame = true; + res.push_back(std::move(*frame_it)); + ABSL_FALLTHROUGH_INTENDED; + case kDrop: + frame_it = stashed_frames_.erase(frame_it); + } + } + } while (complete_frame); +} + +void RtpVp9RefFinder::FlattenFrameIdAndRefs(RtpFrameObject* frame, + bool inter_layer_predicted) { + for (size_t i = 0; i < frame->num_references; ++i) { + frame->references[i] = + unwrapper_.Unwrap(frame->references[i]) * kMaxSpatialLayers + + *frame->SpatialIndex(); + } + frame->SetId(unwrapper_.Unwrap(frame->Id()) * kMaxSpatialLayers + + *frame->SpatialIndex()); + + if (inter_layer_predicted && + frame->num_references + 1 <= EncodedFrame::kMaxFrameReferences) { + frame->references[frame->num_references] = frame->Id() - 1; + ++frame->num_references; + } +} + +void RtpVp9RefFinder::ClearTo(uint16_t seq_num) { + auto it = stashed_frames_.begin(); + while (it != stashed_frames_.end()) { + if (AheadOf(seq_num, (*it)->first_seq_num())) { + it = stashed_frames_.erase(it); + } else { + ++it; + } + } +} + +} // namespace webrtc diff --git a/modules/video_coding/rtp_vp9_ref_finder.h b/modules/video_coding/rtp_vp9_ref_finder.h new file mode 100644 index 0000000000..81008fea88 --- /dev/null +++ b/modules/video_coding/rtp_vp9_ref_finder.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_ +#define MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_ + +#include +#include +#include +#include + +#include "absl/container/inlined_vector.h" +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/rtp_frame_reference_finder.h" +#include "rtc_base/numerics/sequence_number_util.h" + +namespace webrtc { + +class RtpVp9RefFinder { + public: + RtpVp9RefFinder() = default; + + RtpFrameReferenceFinder::ReturnVector ManageFrame( + std::unique_ptr frame); + void ClearTo(uint16_t seq_num); + + private: + static constexpr int kFrameIdLength = 1 << 15; + static constexpr int kMaxGofSaved = 50; + static constexpr int kMaxLayerInfo = 50; + static constexpr int kMaxNotYetReceivedFrames = 100; + static constexpr int kMaxStashedFrames = 100; + static constexpr int kMaxTemporalLayers = 5; + + enum FrameDecision { kStash, kHandOff, kDrop }; + + struct GofInfo { + GofInfo(GofInfoVP9* gof, uint16_t last_picture_id) + : gof(gof), last_picture_id(last_picture_id) {} + GofInfoVP9* gof; + uint16_t last_picture_id; + }; + + FrameDecision ManageFrameInternal(RtpFrameObject* frame); + void RetryStashedFrames(RtpFrameReferenceFinder::ReturnVector& res); + + bool MissingRequiredFrameVp9(uint16_t picture_id, const GofInfo& info); + + void FrameReceivedVp9(uint16_t picture_id, GofInfo* info); + bool UpSwitchInIntervalVp9(uint16_t picture_id, + uint8_t temporal_idx, + uint16_t pid_ref); + + void FlattenFrameIdAndRefs(RtpFrameObject* frame, bool inter_layer_predicted); + + // Save the last picture id in order to detect when there is a gap in frames + // that have not yet been fully received. + int last_picture_id_ = -1; + + // Frames that have been fully received but didn't have all the information + // needed to determine their references. + std::deque> stashed_frames_; + + // Where the current scalability structure is in the + // |scalability_structures_| array. + uint8_t current_ss_idx_ = 0; + + // Holds received scalability structures. + std::array scalability_structures_; + + // Holds the the Gof information for a given unwrapped TL0 picture index. + std::map gof_info_; + + // Keep track of which picture id and which temporal layer that had the + // up switch flag set. + std::map> + up_switch_; + + // For every temporal layer, keep a set of which frames that are missing. + std::array>, + kMaxTemporalLayers> + missing_frames_for_layer_; + + // Unwrapper used to unwrap VP8/VP9 streams which have their picture id + // specified. + SeqNumUnwrapper unwrapper_; + + SeqNumUnwrapper tl0_unwrapper_; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_RTP_VP9_REF_FINDER_H_ diff --git a/modules/video_coding/rtp_vp9_ref_finder_unittest.cc b/modules/video_coding/rtp_vp9_ref_finder_unittest.cc new file mode 100644 index 0000000000..6de7ce106f --- /dev/null +++ b/modules/video_coding/rtp_vp9_ref_finder_unittest.cc @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "modules/video_coding/frame_object.h" +#include "modules/video_coding/rtp_vp9_ref_finder.h" +#include "test/gmock.h" +#include "test/gtest.h" + +using ::testing::Contains; +using ::testing::Matcher; +using ::testing::MatcherInterface; +using ::testing::Matches; +using ::testing::MatchResultListener; +using ::testing::Pointee; +using ::testing::Property; +using ::testing::UnorderedElementsAreArray; + +namespace webrtc { + +namespace { +class Frame { + public: + Frame& SeqNum(uint16_t start, uint16_t end) { + seq_num_start = start; + seq_num_end = end; + return *this; + } + + Frame& AsKeyFrame(bool is_keyframe = true) { + keyframe = is_keyframe; + return *this; + } + + Frame& Pid(int pid) { + picture_id = pid; + return *this; + } + + Frame& SidAndTid(int sid, int tid) { + spatial_id = sid; + temporal_id = tid; + return *this; + } + + Frame& Tl0(int tl0) { + tl0_idx = tl0; + return *this; + } + + Frame& AsUpswitch(bool is_up = true) { + up_switch = is_up; + return *this; + } + + Frame& AsInterLayer(bool is_inter_layer = true) { + inter_layer = is_inter_layer; + return *this; + } + + Frame& NotAsInterPic(bool is_inter_pic = false) { + inter_pic = is_inter_pic; + return *this; + } + + Frame& Gof(GofInfoVP9* ss) { + scalability_structure = ss; + return *this; + } + + Frame& FlexRefs(const std::vector& refs) { + flex_refs = refs; + return *this; + } + + operator std::unique_ptr() { + RTPVideoHeaderVP9 vp9_header{}; + vp9_header.picture_id = *picture_id; + vp9_header.temporal_idx = *temporal_id; + vp9_header.spatial_idx = *spatial_id; + if (tl0_idx.has_value()) { + RTC_DCHECK(flex_refs.empty()); + vp9_header.flexible_mode = false; + vp9_header.tl0_pic_idx = *tl0_idx; + } else { + vp9_header.flexible_mode = true; + vp9_header.num_ref_pics = flex_refs.size(); + for (size_t i = 0; i < flex_refs.size(); ++i) { + vp9_header.pid_diff[i] = flex_refs.at(i); + } + } + vp9_header.temporal_up_switch = up_switch; + vp9_header.inter_layer_predicted = inter_layer; + vp9_header.inter_pic_predicted = inter_pic && !keyframe; + if (scalability_structure != nullptr) { + vp9_header.ss_data_available = true; + vp9_header.gof = *scalability_structure; + } + + RTPVideoHeader video_header; + video_header.frame_type = keyframe ? VideoFrameType::kVideoFrameKey + : VideoFrameType::kVideoFrameDelta; + video_header.video_type_header = vp9_header; + // clang-format off + return std::make_unique( + seq_num_start, + seq_num_end, + /*markerBit=*/true, + /*times_nacked=*/0, + /*first_packet_received_time=*/0, + /*last_packet_received_time=*/0, + /*rtp_timestamp=*/0, + /*ntp_time_ms=*/0, + VideoSendTiming(), + /*payload_type=*/0, + kVideoCodecVP9, + kVideoRotation_0, + VideoContentType::UNSPECIFIED, + video_header, + /*color_space=*/absl::nullopt, + RtpPacketInfos(), + EncodedImageBuffer::Create(/*size=*/0)); + // clang-format on + } + + private: + uint16_t seq_num_start = 0; + uint16_t seq_num_end = 0; + bool keyframe = false; + absl::optional picture_id; + absl::optional spatial_id; + absl::optional temporal_id; + absl::optional tl0_idx; + bool up_switch = false; + bool inter_layer = false; + bool inter_pic = true; + GofInfoVP9* scalability_structure = nullptr; + std::vector flex_refs; +}; + +using FrameVector = std::vector>; + +// Would have been nice to use the MATCHER_P3 macro instead, but when used it +// fails to infer the type of the vector if not explicitly given in the +class HasFrameMatcher : public MatcherInterface { + public: + explicit HasFrameMatcher(int64_t frame_id, + const std::vector& expected_refs) + : frame_id_(frame_id), + expected_refs_(expected_refs) {} + + bool MatchAndExplain(const FrameVector& frames, + MatchResultListener* result_listener) const override { + auto it = std::find_if(frames.begin(), frames.end(), + [this](const std::unique_ptr& f) { + return f->Id() == frame_id_; + }); + if (it == frames.end()) { + if (result_listener->IsInterested()) { + *result_listener << "No frame with frame_id:" << frame_id_; + } + return false; + } + + rtc::ArrayView actual_refs((*it)->references, + (*it)->num_references); + if (!Matches(UnorderedElementsAreArray(expected_refs_))(actual_refs)) { + if (result_listener->IsInterested()) { + *result_listener << "Frame with frame_id:" << frame_id_ << " and " + << actual_refs.size() << " references { "; + for (auto r : actual_refs) { + *result_listener << r << " "; + } + *result_listener << "}"; + } + return false; + } + + return true; + } + + void DescribeTo(std::ostream* os) const override { + *os << "frame with frame_id:" << frame_id_ << " and " + << expected_refs_.size() << " references { "; + for (auto r : expected_refs_) { + *os << r << " "; + } + *os << "}"; + } + + private: + const int64_t frame_id_; + const std::vector expected_refs_; +}; + +} // namespace + +class RtpVp9RefFinderTest : public ::testing::Test { + protected: + RtpVp9RefFinderTest() : ref_finder_(std::make_unique()) {} + + void Insert(std::unique_ptr frame) { + for (auto& f : ref_finder_->ManageFrame(std::move(frame))) { + frames_.push_back(std::move(f)); + } + } + + std::unique_ptr ref_finder_; + FrameVector frames_; +}; + +Matcher HasFrameWithIdAndRefs(int64_t frame_id, + std::vector refs) { + return MakeMatcher(new HasFrameMatcher(frame_id, refs)); +} + +TEST_F(RtpVp9RefFinderTest, GofInsertOneFrame) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode1); + + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss)); + + EXPECT_EQ(frames_.size(), 1UL); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_0) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer. + + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss)); + Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1)); + + EXPECT_EQ(frames_.size(), 2UL); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5})); +} + +TEST_F(RtpVp9RefFinderTest, GofSpatialLayers_2) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer. + + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss)); + Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(2).SidAndTid(1, 0).Tl0(1).NotAsInterPic()); + Insert(Frame().Pid(3).SidAndTid(0, 0).Tl0(2)); + Insert(Frame().Pid(3).SidAndTid(1, 0).Tl0(2)); + + EXPECT_EQ(frames_.size(), 5UL); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_0) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode1); // Only 1 spatial layer. + + Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(2).SidAndTid(1, 0).Tl0(1).NotAsInterPic()); + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).AsKeyFrame().Gof(&ss)); + Insert(Frame().Pid(3).SidAndTid(0, 0).Tl0(2)); + Insert(Frame().Pid(3).SidAndTid(1, 0).Tl0(2)); + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(3)); + Insert(Frame().Pid(5).SidAndTid(1, 0).Tl0(4)); + Insert(Frame().Pid(4).SidAndTid(1, 0).Tl0(3)); + Insert(Frame().Pid(5).SidAndTid(0, 0).Tl0(4)); + + EXPECT_EQ(frames_.size(), 9UL); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {5})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {15})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(26, {21})); +} + +TEST_F(RtpVp9RefFinderTest, GofSkipFramesTemporalLayers_01) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0)); + // Skip GOF with tl0 1 + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2).AsKeyFrame().Gof(&ss)); + Insert(Frame().Pid(5).SidAndTid(0, 1).Tl0(2)); + // Skip GOF with tl0 3 + // Skip GOF with tl0 4 + Insert(Frame().Pid(10).SidAndTid(0, 0).Tl0(5).Gof(&ss)); + Insert(Frame().Pid(11).SidAndTid(0, 1).Tl0(5)); + + ASSERT_EQ(6UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50})); +} + +TEST_F(RtpVp9RefFinderTest, GofSkipFramesTemporalLayers_0212) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode3); // 02120212 pattern + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0)); + + ASSERT_EQ(4UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + + // Skip frames with tl0 = 1 + + Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2)); + Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2)); + + ASSERT_EQ(8UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50})); + + // Now insert frames with tl0 = 1 + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1).AsKeyFrame().Gof(&ss)); + Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1)); + + ASSERT_EQ(9UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {})); + + Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1)); + + ASSERT_EQ(12UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_01) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode2); // 0101 pattern + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1)); + + ASSERT_EQ(4UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_01) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern + + Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2)); + Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1)); + Insert(Frame().Pid(5).SidAndTid(0, 1).Tl0(2)); + Insert(Frame().Pid(7).SidAndTid(0, 1).Tl0(3)); + Insert(Frame().Pid(6).SidAndTid(0, 0).Tl0(3)); + Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(4)); + Insert(Frame().Pid(9).SidAndTid(0, 1).Tl0(4)); + + ASSERT_EQ(10UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {30})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayers_0212) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1)); + Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1)); + + ASSERT_EQ(8UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_0212) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern + + Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1)); + Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2)); + Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2)); + + ASSERT_EQ(12UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayersUpSwitch_02120212) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode4); // 02120212 pattern + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1).AsUpswitch()); + Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsUpswitch()); + Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2)); + Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2).AsUpswitch()); + Insert(Frame().Pid(12).SidAndTid(0, 0).Tl0(3)); + Insert(Frame().Pid(13).SidAndTid(0, 2).Tl0(3)); + Insert(Frame().Pid(14).SidAndTid(0, 1).Tl0(3)); + Insert(Frame().Pid(15).SidAndTid(0, 2).Tl0(3)); + + ASSERT_EQ(16UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {5, 10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {15, 20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {10, 20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {45, 50})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(60, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(65, {55, 60})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(70, {50, 60})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(75, {65, 70})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayersUpSwitchReordered_02120212) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode4); // 02120212 pattern + + Insert(Frame().Pid(1).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(2).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(3).SidAndTid(0, 2).Tl0(0)); + Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(1)); + Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(1).AsUpswitch()); + Insert(Frame().Pid(12).SidAndTid(0, 0).Tl0(3)); + Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(2)); + Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(2).AsUpswitch()); + Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(2).AsUpswitch()); + Insert(Frame().Pid(13).SidAndTid(0, 2).Tl0(3)); + Insert(Frame().Pid(15).SidAndTid(0, 2).Tl0(3)); + Insert(Frame().Pid(14).SidAndTid(0, 1).Tl0(3)); + + ASSERT_EQ(16UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {5, 10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {15, 20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {10, 20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {45, 50})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(60, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(65, {55, 60})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(70, {50, 60})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(75, {65, 70})); +} + +TEST_F(RtpVp9RefFinderTest, GofTemporalLayersReordered_01_0212) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode2); // 01 pattern + + Insert(Frame().Pid(1).SidAndTid(0, 1).Tl0(0)); + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(3).SidAndTid(0, 1).Tl0(1)); + Insert(Frame().Pid(6).SidAndTid(0, 1).Tl0(2)); + ss.SetGofInfoVP9(kTemporalStructureMode3); // 0212 pattern + Insert(Frame().Pid(4).SidAndTid(0, 0).Tl0(2).Gof(&ss)); + Insert(Frame().Pid(2).SidAndTid(0, 0).Tl0(1)); + Insert(Frame().Pid(5).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(8).SidAndTid(0, 0).Tl0(3)); + Insert(Frame().Pid(10).SidAndTid(0, 1).Tl0(3)); + Insert(Frame().Pid(7).SidAndTid(0, 2).Tl0(2)); + Insert(Frame().Pid(11).SidAndTid(0, 2).Tl0(3)); + Insert(Frame().Pid(9).SidAndTid(0, 2).Tl0(3)); + + ASSERT_EQ(12UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(15, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(25, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(30, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(35, {30})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(40, {20})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(45, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(50, {40})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(55, {50})); +} + +TEST_F(RtpVp9RefFinderTest, FlexibleModeOneFrame) { + Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame()); + + ASSERT_EQ(1UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); +} + +TEST_F(RtpVp9RefFinderTest, FlexibleModeTwoSpatialLayers) { + Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame()); + Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame().AsInterLayer()); + Insert(Frame().Pid(1).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(2).SidAndTid(0, 0).FlexRefs({2})); + Insert(Frame().Pid(2).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(3).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(4).SidAndTid(0, 0).FlexRefs({2})); + Insert(Frame().Pid(4).SidAndTid(1, 0).FlexRefs({1})); + + ASSERT_EQ(8UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {6})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16})); +} + +TEST_F(RtpVp9RefFinderTest, FlexibleModeTwoSpatialLayersReordered) { + Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame().AsInterLayer()); + Insert(Frame().Pid(1).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame()); + Insert(Frame().Pid(2).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(3).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(2).SidAndTid(0, 0).FlexRefs({2})); + Insert(Frame().Pid(4).SidAndTid(1, 0).FlexRefs({1})); + Insert(Frame().Pid(4).SidAndTid(0, 0).FlexRefs({2})); + + ASSERT_EQ(8UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(1, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(6, {1})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(10, {0})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(11, {6})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(16, {11})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(20, {10})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(21, {16})); +} + +TEST_F(RtpVp9RefFinderTest, WrappingFlexReference) { + Insert(Frame().Pid(0).SidAndTid(0, 0).FlexRefs({1})); + + ASSERT_EQ(1UL, frames_.size()); + const EncodedFrame& frame = *frames_[0]; + + ASSERT_EQ(frame.Id() - frame.references[0], 5); +} + +TEST_F(RtpVp9RefFinderTest, GofPidJump) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode3); + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1000).SidAndTid(0, 0).Tl0(1)); +} + +TEST_F(RtpVp9RefFinderTest, GofTl0Jump) { + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode3); + + Insert(Frame() + .Pid(0) + .SidAndTid(0, 0) + .Tl0(125) + .AsUpswitch() + .AsKeyFrame() + .NotAsInterPic() + .Gof(&ss)); + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(0).Gof(&ss)); +} + +TEST_F(RtpVp9RefFinderTest, GofTidTooHigh) { + const int kMaxTemporalLayers = 5; + GofInfoVP9 ss; + ss.SetGofInfoVP9(kTemporalStructureMode2); + ss.temporal_idx[1] = kMaxTemporalLayers; + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(1)); + + ASSERT_EQ(1UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); +} + +TEST_F(RtpVp9RefFinderTest, GofZeroFrames) { + GofInfoVP9 ss; + ss.num_frames_in_gof = 0; + + Insert(Frame().Pid(0).SidAndTid(0, 0).Tl0(0).AsKeyFrame().NotAsInterPic().Gof( + &ss)); + Insert(Frame().Pid(1).SidAndTid(0, 0).Tl0(1)); + + ASSERT_EQ(2UL, frames_.size()); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(0, {})); + EXPECT_THAT(frames_, HasFrameWithIdAndRefs(5, {0})); +} + +TEST_F(RtpVp9RefFinderTest, SpatialIndex) { + Insert(Frame().Pid(0).SidAndTid(0, 0).AsKeyFrame()); + Insert(Frame().Pid(0).SidAndTid(1, 0).AsKeyFrame()); + Insert(Frame().Pid(0).SidAndTid(2, 0).AsKeyFrame()); + + ASSERT_EQ(3UL, frames_.size()); + EXPECT_THAT(frames_, + Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 0)))); + EXPECT_THAT(frames_, + Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 1)))); + EXPECT_THAT(frames_, + Contains(Pointee(Property(&EncodedFrame::SpatialIndex, 2)))); +} + +} // namespace webrtc diff --git a/modules/video_coding/session_info.cc b/modules/video_coding/session_info.cc index e51d293607..477bbbe209 100644 --- a/modules/video_coding/session_info.cc +++ b/modules/video_coding/session_info.cc @@ -49,7 +49,7 @@ void VCMSessionInfo::UpdateDataPointers(const uint8_t* old_base_ptr, const uint8_t* new_base_ptr) { for (PacketIterator it = packets_.begin(); it != packets_.end(); ++it) if ((*it).dataPtr != NULL) { - assert(old_base_ptr != NULL && new_base_ptr != NULL); + RTC_DCHECK(old_base_ptr != NULL && new_base_ptr != NULL); (*it).dataPtr = new_base_ptr + ((*it).dataPtr - old_base_ptr); } } @@ -95,8 +95,6 @@ int VCMSessionInfo::TemporalId() const { return absl::get( packets_.front().video_header.video_type_header) .temporal_idx; - } else if (packets_.front().video_header.codec == kVideoCodecH264) { - return packets_.front().video_header.frame_marking.temporal_id; } else { return kNoTemporalIdx; } @@ -113,8 +111,6 @@ bool VCMSessionInfo::LayerSync() const { return absl::get( packets_.front().video_header.video_type_header) .temporal_up_switch; - } else if (packets_.front().video_header.codec == kVideoCodecH264) { - return packets_.front().video_header.frame_marking.base_layer_sync; } else { return false; } @@ -131,8 +127,6 @@ int VCMSessionInfo::Tl0PicId() const { return absl::get( packets_.front().video_header.video_type_header) .tl0_pic_idx; - } else if (packets_.front().video_header.codec == kVideoCodecH264) { - return packets_.front().video_header.frame_marking.tl0_pic_idx; } else { return kNoTl0PicIdx; } @@ -354,7 +348,7 @@ VCMSessionInfo::PacketIterator VCMSessionInfo::FindNextPartitionBeginning( VCMSessionInfo::PacketIterator VCMSessionInfo::FindPartitionEnd( PacketIterator it) const { - assert((*it).codec() == kVideoCodecVP8); + RTC_DCHECK_EQ((*it).codec(), kVideoCodecVP8); PacketIterator prev_it = it; const int partition_id = absl::get((*it).video_header.video_type_header) diff --git a/modules/video_coding/svc/BUILD.gn b/modules/video_coding/svc/BUILD.gn new file mode 100644 index 0000000000..2eb25025c1 --- /dev/null +++ b/modules/video_coding/svc/BUILD.gn @@ -0,0 +1,111 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("scalable_video_controller") { + sources = [ + "scalable_video_controller.h", + "scalable_video_controller_no_layering.cc", + "scalable_video_controller_no_layering.h", + ] + deps = [ + "../../../api/transport/rtp:dependency_descriptor", + "../../../api/video:video_bitrate_allocation", + "../../../common_video/generic_frame_descriptor", + "../../../rtc_base:checks", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/container:inlined_vector", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_source_set("scalability_structures") { + sources = [ + "create_scalability_structure.cc", + "create_scalability_structure.h", + "scalability_structure_full_svc.cc", + "scalability_structure_full_svc.h", + "scalability_structure_key_svc.cc", + "scalability_structure_key_svc.h", + "scalability_structure_l2t2_key_shift.cc", + "scalability_structure_l2t2_key_shift.h", + "scalability_structure_simulcast.cc", + "scalability_structure_simulcast.h", + ] + deps = [ + ":scalable_video_controller", + "../../../api/transport/rtp:dependency_descriptor", + "../../../api/video:video_bitrate_allocation", + "../../../common_video/generic_frame_descriptor", + "../../../rtc_base:checks", + "../../../rtc_base:logging", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_source_set("svc_rate_allocator") { + sources = [ + "svc_rate_allocator.cc", + "svc_rate_allocator.h", + ] + deps = [ + ":scalability_structures", + "../../../api/video:video_bitrate_allocation", + "../../../api/video:video_bitrate_allocator", + "../../../api/video:video_codec_constants", + "../../../api/video_codecs:video_codecs_api", + "../../../rtc_base:checks", + "../../../rtc_base/experiments:stable_target_rate_experiment", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector" ] +} + +if (rtc_include_tests) { + rtc_source_set("scalability_structure_tests") { + testonly = true + sources = [ + "scalability_structure_full_svc_unittest.cc", + "scalability_structure_key_svc_unittest.cc", + "scalability_structure_l2t2_key_shift_unittest.cc", + "scalability_structure_test_helpers.cc", + "scalability_structure_test_helpers.h", + "scalability_structure_unittest.cc", + ] + deps = [ + ":scalability_structures", + ":scalable_video_controller", + "..:chain_diff_calculator", + "..:frame_dependencies_calculator", + "../../../api:array_view", + "../../../api/transport/rtp:dependency_descriptor", + "../../../api/video:video_bitrate_allocation", + "../../../api/video:video_frame_type", + "../../../common_video/generic_frame_descriptor", + "../../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } + + rtc_source_set("svc_rate_allocator_tests") { + testonly = true + sources = [ "svc_rate_allocator_unittest.cc" ] + deps = [ + ":svc_rate_allocator", + "..:webrtc_vp9_helpers", + "../../../rtc_base:checks", + "../../../test:field_trial", + "../../../test:test_support", + ] + } +} diff --git a/modules/video_coding/svc/create_scalability_structure.cc b/modules/video_coding/svc/create_scalability_structure.cc new file mode 100644 index 0000000000..39710d82ff --- /dev/null +++ b/modules/video_coding/svc/create_scalability_structure.cc @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/create_scalability_structure.h" + +#include + +#include "absl/strings/string_view.h" +#include "modules/video_coding/svc/scalability_structure_full_svc.h" +#include "modules/video_coding/svc/scalability_structure_key_svc.h" +#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h" +#include "modules/video_coding/svc/scalability_structure_simulcast.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace { + +struct NamedStructureFactory { + absl::string_view name; + // Use function pointer to make NamedStructureFactory trivally destructable. + std::unique_ptr (*factory)(); +}; + +// Wrap std::make_unique function to have correct return type. +template +std::unique_ptr Create() { + return std::make_unique(); +} + +template +std::unique_ptr CreateH() { + // 1.5:1 scaling, see https://w3c.github.io/webrtc-svc/#scalabilitymodes* + typename T::ScalingFactor factor; + factor.num = 2; + factor.den = 3; + return std::make_unique(factor); +} + +constexpr NamedStructureFactory kFactories[] = { + {"NONE", Create}, + {"L1T2", Create}, + {"L1T3", Create}, + {"L2T1", Create}, + {"L2T1h", CreateH}, + {"L2T1_KEY", Create}, + {"L2T2", Create}, + {"L2T2_KEY", Create}, + {"L2T2_KEY_SHIFT", Create}, + {"L2T3_KEY", Create}, + {"L3T1", Create}, + {"L3T3", Create}, + {"L3T3_KEY", Create}, + {"S2T1", Create}, + {"S3T3", Create}, +}; + +} // namespace + +std::unique_ptr CreateScalabilityStructure( + absl::string_view name) { + RTC_DCHECK(!name.empty()); + for (const auto& entry : kFactories) { + if (entry.name == name) { + return entry.factory(); + } + } + return nullptr; +} + +} // namespace webrtc diff --git a/modules/video_coding/svc/create_scalability_structure.h b/modules/video_coding/svc/create_scalability_structure.h new file mode 100644 index 0000000000..9a14221fd2 --- /dev/null +++ b/modules/video_coding/svc/create_scalability_structure.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_ +#define MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "modules/video_coding/svc/scalable_video_controller.h" + +namespace webrtc { + +// Creates a structure by name according to +// https://w3c.github.io/webrtc-svc/#scalabilitymodes* +// Returns nullptr for unknown name. +std::unique_ptr CreateScalabilityStructure( + absl::string_view name); + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_CREATE_SCALABILITY_STRUCTURE_H_ diff --git a/modules/video_coding/svc/scalability_structure_full_svc.cc b/modules/video_coding/svc/scalability_structure_full_svc.cc new file mode 100644 index 0000000000..b89de99330 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_full_svc.cc @@ -0,0 +1,398 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_full_svc.h" + +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +constexpr int ScalabilityStructureFullSvc::kMaxNumSpatialLayers; +constexpr int ScalabilityStructureFullSvc::kMaxNumTemporalLayers; +constexpr absl::string_view ScalabilityStructureFullSvc::kFramePatternNames[]; + +ScalabilityStructureFullSvc::ScalabilityStructureFullSvc( + int num_spatial_layers, + int num_temporal_layers, + ScalingFactor resolution_factor) + : num_spatial_layers_(num_spatial_layers), + num_temporal_layers_(num_temporal_layers), + resolution_factor_(resolution_factor), + active_decode_targets_( + (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) { + RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers); + RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers); +} + +ScalabilityStructureFullSvc::~ScalabilityStructureFullSvc() = default; + +ScalabilityStructureFullSvc::StreamLayersConfig +ScalabilityStructureFullSvc::StreamConfig() const { + StreamLayersConfig result; + result.num_spatial_layers = num_spatial_layers_; + result.num_temporal_layers = num_temporal_layers_; + result.scaling_factor_num[num_spatial_layers_ - 1] = 1; + result.scaling_factor_den[num_spatial_layers_ - 1] = 1; + for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) { + result.scaling_factor_num[sid - 1] = + resolution_factor_.num * result.scaling_factor_num[sid]; + result.scaling_factor_den[sid - 1] = + resolution_factor_.den * result.scaling_factor_den[sid]; + } + return result; +} + +bool ScalabilityStructureFullSvc::TemporalLayerIsActive(int tid) const { + if (tid >= num_temporal_layers_) { + return false; + } + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (DecodeTargetIsActive(sid, tid)) { + return true; + } + } + return false; +} + +DecodeTargetIndication ScalabilityStructureFullSvc::Dti( + int sid, + int tid, + const LayerFrameConfig& config) { + if (sid < config.SpatialId() || tid < config.TemporalId()) { + return DecodeTargetIndication::kNotPresent; + } + if (sid == config.SpatialId()) { + if (tid == 0) { + RTC_DCHECK_EQ(config.TemporalId(), 0); + return DecodeTargetIndication::kSwitch; + } + if (tid == config.TemporalId()) { + return DecodeTargetIndication::kDiscardable; + } + if (tid > config.TemporalId()) { + RTC_DCHECK_GT(tid, config.TemporalId()); + return DecodeTargetIndication::kSwitch; + } + } + RTC_DCHECK_GT(sid, config.SpatialId()); + RTC_DCHECK_GE(tid, config.TemporalId()); + if (config.IsKeyframe() || config.Id() == kKey) { + return DecodeTargetIndication::kSwitch; + } + return DecodeTargetIndication::kRequired; +} + +ScalabilityStructureFullSvc::FramePattern +ScalabilityStructureFullSvc::NextPattern() const { + switch (last_pattern_) { + case kNone: + return kKey; + case kDeltaT2B: + return kDeltaT0; + case kDeltaT2A: + if (TemporalLayerIsActive(1)) { + return kDeltaT1; + } + return kDeltaT0; + case kDeltaT1: + if (TemporalLayerIsActive(2)) { + return kDeltaT2B; + } + return kDeltaT0; + case kKey: + case kDeltaT0: + if (TemporalLayerIsActive(2)) { + return kDeltaT2A; + } + if (TemporalLayerIsActive(1)) { + return kDeltaT1; + } + return kDeltaT0; + } + RTC_NOTREACHED(); + return kNone; +} + +std::vector +ScalabilityStructureFullSvc::NextFrameConfig(bool restart) { + std::vector configs; + if (active_decode_targets_.none()) { + last_pattern_ = kNone; + return configs; + } + configs.reserve(num_spatial_layers_); + + if (last_pattern_ == kNone || restart) { + can_reference_t0_frame_for_spatial_id_.reset(); + last_pattern_ = kNone; + } + FramePattern current_pattern = NextPattern(); + + absl::optional spatial_dependency_buffer_id; + switch (current_pattern) { + case kDeltaT0: + case kKey: + // Disallow temporal references cross T0 on higher temporal layers. + can_reference_t1_frame_for_spatial_id_.reset(); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/0)) { + // Next frame from the spatial layer `sid` shouldn't depend on + // potentially old previous frame from the spatial layer `sid`. + can_reference_t0_frame_for_spatial_id_.reset(sid); + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(current_pattern).S(sid).T(0); + + if (spatial_dependency_buffer_id) { + config.Reference(*spatial_dependency_buffer_id); + } else if (current_pattern == kKey) { + config.Keyframe(); + } + + if (can_reference_t0_frame_for_spatial_id_[sid]) { + config.ReferenceAndUpdate(BufferIndex(sid, /*tid=*/0)); + } else { + // TODO(bugs.webrtc.org/11999): Propagate chain restart on delta frame + // to ChainDiffCalculator + config.Update(BufferIndex(sid, /*tid=*/0)); + } + + can_reference_t0_frame_for_spatial_id_.set(sid); + spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/0); + } + break; + case kDeltaT1: + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/1) || + !can_reference_t0_frame_for_spatial_id_[sid]) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(current_pattern).S(sid).T(1); + // Temporal reference. + config.Reference(BufferIndex(sid, /*tid=*/0)); + // Spatial reference unless this is the lowest active spatial layer. + if (spatial_dependency_buffer_id) { + config.Reference(*spatial_dependency_buffer_id); + } + // No frame reference top layer frame, so no need save it into a buffer. + if (num_temporal_layers_ > 2 || sid < num_spatial_layers_ - 1) { + config.Update(BufferIndex(sid, /*tid=*/1)); + } + spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/1); + } + break; + case kDeltaT2A: + case kDeltaT2B: + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/2) || + !can_reference_t0_frame_for_spatial_id_[sid]) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(current_pattern).S(sid).T(2); + // Temporal reference. + if (current_pattern == kDeltaT2B && + can_reference_t1_frame_for_spatial_id_[sid]) { + config.Reference(BufferIndex(sid, /*tid=*/1)); + } else { + config.Reference(BufferIndex(sid, /*tid=*/0)); + } + // Spatial reference unless this is the lowest active spatial layer. + if (spatial_dependency_buffer_id) { + config.Reference(*spatial_dependency_buffer_id); + } + // No frame reference top layer frame, so no need save it into a buffer. + if (sid < num_spatial_layers_ - 1) { + config.Update(BufferIndex(sid, /*tid=*/2)); + } + spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/2); + } + break; + case kNone: + RTC_NOTREACHED(); + break; + } + + if (configs.empty() && !restart) { + RTC_LOG(LS_WARNING) << "Failed to generate configuration for L" + << num_spatial_layers_ << "T" << num_temporal_layers_ + << " with active decode targets " + << active_decode_targets_.to_string('-').substr( + active_decode_targets_.size() - + num_spatial_layers_ * num_temporal_layers_) + << " and transition from " + << kFramePatternNames[last_pattern_] << " to " + << kFramePatternNames[current_pattern] + << ". Resetting."; + return NextFrameConfig(/*restart=*/true); + } + + return configs; +} + +GenericFrameInfo ScalabilityStructureFullSvc::OnEncodeDone( + const LayerFrameConfig& config) { + // When encoder drops all frames for a temporal unit, it is better to reuse + // old temporal pattern rather than switch to next one, thus switch to next + // pattern defered here from the `NextFrameConfig`. + // In particular creating VP9 references rely on this behavior. + last_pattern_ = static_cast(config.Id()); + if (config.TemporalId() == 1) { + can_reference_t1_frame_for_spatial_id_.set(config.SpatialId()); + } + + GenericFrameInfo frame_info; + frame_info.spatial_id = config.SpatialId(); + frame_info.temporal_id = config.TemporalId(); + frame_info.encoder_buffers = config.Buffers(); + frame_info.decode_target_indications.reserve(num_spatial_layers_ * + num_temporal_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + for (int tid = 0; tid < num_temporal_layers_; ++tid) { + frame_info.decode_target_indications.push_back(Dti(sid, tid, config)); + } + } + if (config.TemporalId() == 0) { + frame_info.part_of_chain.resize(num_spatial_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + frame_info.part_of_chain[sid] = config.SpatialId() <= sid; + } + } else { + frame_info.part_of_chain.assign(num_spatial_layers_, false); + } + frame_info.active_decode_targets = active_decode_targets_; + return frame_info; +} + +void ScalabilityStructureFullSvc::OnRatesUpdated( + const VideoBitrateAllocation& bitrates) { + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + // Enable/disable spatial layers independetely. + bool active = true; + for (int tid = 0; tid < num_temporal_layers_; ++tid) { + // To enable temporal layer, require bitrates for lower temporal layers. + active = active && bitrates.GetBitrate(sid, tid) > 0; + SetDecodeTargetIsActive(sid, tid, active); + } + } +} + +FrameDependencyStructure ScalabilityStructureL1T2::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 1; + structure.decode_target_protected_by_chain = {0, 0}; + structure.templates.resize(3); + structure.templates[0].T(0).Dtis("SS").ChainDiffs({0}); + structure.templates[1].T(0).Dtis("SS").ChainDiffs({2}).FrameDiffs({2}); + structure.templates[2].T(1).Dtis("-D").ChainDiffs({1}).FrameDiffs({1}); + return structure; +} + +FrameDependencyStructure ScalabilityStructureL1T3::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 3; + structure.num_chains = 1; + structure.decode_target_protected_by_chain = {0, 0, 0}; + structure.templates.resize(5); + structure.templates[0].T(0).Dtis("SSS").ChainDiffs({0}); + structure.templates[1].T(0).Dtis("SSS").ChainDiffs({4}).FrameDiffs({4}); + structure.templates[2].T(1).Dtis("-DS").ChainDiffs({2}).FrameDiffs({2}); + structure.templates[3].T(2).Dtis("--D").ChainDiffs({1}).FrameDiffs({1}); + structure.templates[4].T(2).Dtis("--D").ChainDiffs({3}).FrameDiffs({1}); + return structure; +} + +FrameDependencyStructure ScalabilityStructureL2T1::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 1}; + structure.templates.resize(4); + structure.templates[0].S(0).Dtis("SR").ChainDiffs({2, 1}).FrameDiffs({2}); + structure.templates[1].S(0).Dtis("SS").ChainDiffs({0, 0}); + structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({2, 1}); + structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({1}); + return structure; +} + +FrameDependencyStructure ScalabilityStructureL2T2::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 4; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 0, 1, 1}; + structure.templates.resize(6); + auto& templates = structure.templates; + templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0}); + templates[1].S(0).T(0).Dtis("SSRR").ChainDiffs({4, 3}).FrameDiffs({4}); + templates[2].S(0).T(1).Dtis("-D-R").ChainDiffs({2, 1}).FrameDiffs({2}); + templates[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1}); + templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({4, 1}); + templates[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2, 1}); + return structure; +} + +FrameDependencyStructure ScalabilityStructureL3T1::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 3; + structure.num_chains = 3; + structure.decode_target_protected_by_chain = {0, 1, 2}; + auto& templates = structure.templates; + templates.resize(6); + templates[0].S(0).Dtis("SRR").ChainDiffs({3, 2, 1}).FrameDiffs({3}); + templates[1].S(0).Dtis("SSS").ChainDiffs({0, 0, 0}); + templates[2].S(1).Dtis("-SR").ChainDiffs({1, 1, 1}).FrameDiffs({3, 1}); + templates[3].S(1).Dtis("-SS").ChainDiffs({1, 1, 1}).FrameDiffs({1}); + templates[4].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({3, 1}); + templates[5].S(2).Dtis("--S").ChainDiffs({2, 1, 1}).FrameDiffs({1}); + return structure; +} + +FrameDependencyStructure ScalabilityStructureL3T3::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 9; + structure.num_chains = 3; + structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2}; + auto& t = structure.templates; + t.resize(15); + // Templates are shown in the order frames following them appear in the + // stream, but in `structure.templates` array templates are sorted by + // (`spatial_id`, `temporal_id`) since that is a dependency descriptor + // requirement. Indexes are written in hex for nicer alignment. + t[0x1].S(0).T(0).Dtis("SSSSSSSSS").ChainDiffs({0, 0, 0}); + t[0x6].S(1).T(0).Dtis("---SSSSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1}); + t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({1}); + t[0x3].S(0).T(2).Dtis("--D--R--R").ChainDiffs({3, 2, 1}).FrameDiffs({3}); + t[0x8].S(1).T(2).Dtis("-----D--R").ChainDiffs({4, 3, 2}).FrameDiffs({3, 1}); + t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3, 1}); + t[0x2].S(0).T(1).Dtis("-DS-RR-RR").ChainDiffs({6, 5, 4}).FrameDiffs({6}); + t[0x7].S(1).T(1).Dtis("----DS-RR").ChainDiffs({7, 6, 5}).FrameDiffs({6, 1}); + t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6, 1}); + t[0x4].S(0).T(2).Dtis("--D--R--R").ChainDiffs({9, 8, 7}).FrameDiffs({3}); + t[0x9].S(1).T(2).Dtis("-----D--R").ChainDiffs({10, 9, 8}).FrameDiffs({3, 1}); + t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3, 1}); + t[0x0].S(0).T(0).Dtis("SSSRRRRRR").ChainDiffs({12, 11, 10}).FrameDiffs({12}); + t[0x5].S(1).T(0).Dtis("---SSSRRR").ChainDiffs({1, 1, 1}).FrameDiffs({12, 1}); + t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({12, 1}); + return structure; +} + +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_full_svc.h b/modules/video_coding/svc/scalability_structure_full_svc.h new file mode 100644 index 0000000000..a3cad0af8a --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_full_svc.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_ + +#include +#include + +#include "api/transport/rtp/dependency_descriptor.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalable_video_controller.h" + +namespace webrtc { + +class ScalabilityStructureFullSvc : public ScalableVideoController { + public: + struct ScalingFactor { + int num = 1; + int den = 2; + }; + ScalabilityStructureFullSvc(int num_spatial_layers, + int num_temporal_layers, + ScalingFactor resolution_factor); + ~ScalabilityStructureFullSvc() override; + + StreamLayersConfig StreamConfig() const override; + + std::vector NextFrameConfig(bool restart) override; + GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override; + void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override; + + private: + enum FramePattern { + kNone, + kKey, + kDeltaT2A, + kDeltaT1, + kDeltaT2B, + kDeltaT0, + }; + static constexpr absl::string_view kFramePatternNames[] = { + "None", "Key", "DeltaT2A", "DeltaT1", "DeltaT2B", "DeltaT0"}; + static constexpr int kMaxNumSpatialLayers = 3; + static constexpr int kMaxNumTemporalLayers = 3; + + // Index of the buffer to store last frame for layer (`sid`, `tid`) + int BufferIndex(int sid, int tid) const { + return tid * num_spatial_layers_ + sid; + } + bool DecodeTargetIsActive(int sid, int tid) const { + return active_decode_targets_[sid * num_temporal_layers_ + tid]; + } + void SetDecodeTargetIsActive(int sid, int tid, bool value) { + active_decode_targets_.set(sid * num_temporal_layers_ + tid, value); + } + FramePattern NextPattern() const; + bool TemporalLayerIsActive(int tid) const; + static DecodeTargetIndication Dti(int sid, + int tid, + const LayerFrameConfig& frame); + + const int num_spatial_layers_; + const int num_temporal_layers_; + const ScalingFactor resolution_factor_; + + FramePattern last_pattern_ = kNone; + std::bitset can_reference_t0_frame_for_spatial_id_ = 0; + std::bitset can_reference_t1_frame_for_spatial_id_ = 0; + std::bitset<32> active_decode_targets_; +}; + +// T1 0 0 +// / / / ... +// T0 0---0---0-- +// Time-> 0 1 2 3 4 +class ScalabilityStructureL1T2 : public ScalabilityStructureFullSvc { + public: + explicit ScalabilityStructureL1T2(ScalingFactor resolution_factor = {}) + : ScalabilityStructureFullSvc(1, 2, resolution_factor) {} + ~ScalabilityStructureL1T2() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +// T2 0 0 0 0 +// | / | / +// T1 / 0 / 0 ... +// |_/ |_/ +// T0 0-------0------ +// Time-> 0 1 2 3 4 5 6 7 +class ScalabilityStructureL1T3 : public ScalabilityStructureFullSvc { + public: + explicit ScalabilityStructureL1T3(ScalingFactor resolution_factor = {}) + : ScalabilityStructureFullSvc(1, 3, resolution_factor) {} + ~ScalabilityStructureL1T3() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +// S1 0--0--0- +// | | | ... +// S0 0--0--0- +class ScalabilityStructureL2T1 : public ScalabilityStructureFullSvc { + public: + explicit ScalabilityStructureL2T1(ScalingFactor resolution_factor = {}) + : ScalabilityStructureFullSvc(2, 1, resolution_factor) {} + ~ScalabilityStructureL2T1() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +// S1T1 0 0 +// /| /| / +// S1T0 0-+-0-+-0 +// | | | | | ... +// S0T1 | 0 | 0 | +// |/ |/ |/ +// S0T0 0---0---0-- +// Time-> 0 1 2 3 4 +class ScalabilityStructureL2T2 : public ScalabilityStructureFullSvc { + public: + explicit ScalabilityStructureL2T2(ScalingFactor resolution_factor = {}) + : ScalabilityStructureFullSvc(2, 2, resolution_factor) {} + ~ScalabilityStructureL2T2() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +// S2 0-0-0- +// | | | +// S1 0-0-0-... +// | | | +// S0 0-0-0- +// Time-> 0 1 2 +class ScalabilityStructureL3T1 : public ScalabilityStructureFullSvc { + public: + explicit ScalabilityStructureL3T1(ScalingFactor resolution_factor = {}) + : ScalabilityStructureFullSvc(3, 1, resolution_factor) {} + ~ScalabilityStructureL3T1() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +// https://www.w3.org/TR/webrtc-svc/#L3T3* +class ScalabilityStructureL3T3 : public ScalabilityStructureFullSvc { + public: + explicit ScalabilityStructureL3T3(ScalingFactor resolution_factor = {}) + : ScalabilityStructureFullSvc(3, 3, resolution_factor) {} + ~ScalabilityStructureL3T3() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_FULL_SVC_H_ diff --git a/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc b/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc new file mode 100644 index 0000000000..9ccbe21f75 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_full_svc_unittest.cc @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_full_svc.h" + +#include + +#include "modules/video_coding/svc/scalability_structure_test_helpers.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::IsEmpty; +using ::testing::SizeIs; + +TEST(ScalabilityStructureL3T3Test, SkipS1T1FrameKeepsStructureValid) { + ScalabilityStructureL3T3 structure; + ScalabilityStructureWrapper wrapper(structure); + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3)); + auto frames = wrapper.GenerateFrames(/*num_temporal_units=*/1); + EXPECT_THAT(frames, SizeIs(2)); + EXPECT_EQ(frames[0].temporal_id, 0); + + frames = wrapper.GenerateFrames(/*num_temporal_units=*/1); + EXPECT_THAT(frames, SizeIs(2)); + EXPECT_EQ(frames[0].temporal_id, 2); + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/0)); + frames = wrapper.GenerateFrames(/*num_temporal_units=*/1); + EXPECT_THAT(frames, SizeIs(1)); + EXPECT_EQ(frames[0].temporal_id, 1); + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3)); + // Rely on checks inside GenerateFrames frame references are valid. + frames = wrapper.GenerateFrames(/*num_temporal_units=*/1); + EXPECT_THAT(frames, SizeIs(2)); + EXPECT_EQ(frames[0].temporal_id, 2); +} + +TEST(ScalabilityStructureL3T3Test, SkipT1FrameByEncoderKeepsReferencesValid) { + std::vector frames; + ScalabilityStructureL3T3 structure; + ScalabilityStructureWrapper wrapper(structure); + + // 1st 2 temporal units (T0 and T2) + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + // Simulate T1 frame dropped by the encoder, + // i.e. retrieve config, but skip calling OnEncodeDone. + structure.NextFrameConfig(/*restart=*/false); + // one more temporal units (T2) + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL3T3Test, + SkippingFrameReusePreviousFrameConfiguration) { + std::vector frames; + ScalabilityStructureL3T3 structure; + ScalabilityStructureWrapper wrapper(structure); + + // 1st 2 temporal units (T0 and T2) + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + ASSERT_THAT(frames, SizeIs(6)); + ASSERT_EQ(frames[0].temporal_id, 0); + ASSERT_EQ(frames[3].temporal_id, 2); + + // Simulate a frame dropped by the encoder, + // i.e. retrieve config, but skip calling OnEncodeDone. + structure.NextFrameConfig(/*restart=*/false); + // two more temporal unit, expect temporal pattern continues + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + ASSERT_THAT(frames, SizeIs(12)); + // Expect temporal pattern continues as if there were no dropped frames. + EXPECT_EQ(frames[6].temporal_id, 1); + EXPECT_EQ(frames[9].temporal_id, 2); +} + +TEST(ScalabilityStructureL3T3Test, SwitchSpatialLayerBeforeT1Frame) { + ScalabilityStructureL3T3 structure; + ScalabilityStructureWrapper wrapper(structure); + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0)); + EXPECT_THAT(wrapper.GenerateFrames(1), SizeIs(1)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2)); + auto frames = wrapper.GenerateFrames(1); + ASSERT_THAT(frames, SizeIs(1)); + EXPECT_THAT(frames[0].frame_diffs, IsEmpty()); + EXPECT_EQ(frames[0].temporal_id, 0); +} + +} // namespace +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_key_svc.cc b/modules/video_coding/svc/scalability_structure_key_svc.cc new file mode 100644 index 0000000000..1cee80e84b --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_key_svc.cc @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_key_svc.h" + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +constexpr int ScalabilityStructureKeySvc::kMaxNumSpatialLayers; +constexpr int ScalabilityStructureKeySvc::kMaxNumTemporalLayers; + +ScalabilityStructureKeySvc::ScalabilityStructureKeySvc(int num_spatial_layers, + int num_temporal_layers) + : num_spatial_layers_(num_spatial_layers), + num_temporal_layers_(num_temporal_layers), + active_decode_targets_( + (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) { + // There is no point to use this structure without spatial scalability. + RTC_DCHECK_GT(num_spatial_layers, 1); + RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers); + RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers); +} + +ScalabilityStructureKeySvc::~ScalabilityStructureKeySvc() = default; + +ScalableVideoController::StreamLayersConfig +ScalabilityStructureKeySvc::StreamConfig() const { + StreamLayersConfig result; + result.num_spatial_layers = num_spatial_layers_; + result.num_temporal_layers = num_temporal_layers_; + result.scaling_factor_num[num_spatial_layers_ - 1] = 1; + result.scaling_factor_den[num_spatial_layers_ - 1] = 1; + for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) { + result.scaling_factor_num[sid - 1] = 1; + result.scaling_factor_den[sid - 1] = 2 * result.scaling_factor_den[sid]; + } + return result; +} + +bool ScalabilityStructureKeySvc::TemporalLayerIsActive(int tid) const { + if (tid >= num_temporal_layers_) { + return false; + } + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (DecodeTargetIsActive(sid, tid)) { + return true; + } + } + return false; +} + +DecodeTargetIndication ScalabilityStructureKeySvc::Dti( + int sid, + int tid, + const LayerFrameConfig& config) { + if (config.IsKeyframe() || config.Id() == kKey) { + RTC_DCHECK_EQ(config.TemporalId(), 0); + return sid < config.SpatialId() ? DecodeTargetIndication::kNotPresent + : DecodeTargetIndication::kSwitch; + } + + if (sid != config.SpatialId() || tid < config.TemporalId()) { + return DecodeTargetIndication::kNotPresent; + } + if (tid == config.TemporalId() && tid > 0) { + return DecodeTargetIndication::kDiscardable; + } + return DecodeTargetIndication::kSwitch; +} + +std::vector +ScalabilityStructureKeySvc::KeyframeConfig() { + std::vector configs; + configs.reserve(num_spatial_layers_); + absl::optional spatial_dependency_buffer_id; + spatial_id_is_enabled_.reset(); + // Disallow temporal references cross T0 on higher temporal layers. + can_reference_t1_frame_for_spatial_id_.reset(); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/0)) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(kKey).S(sid).T(0); + + if (spatial_dependency_buffer_id) { + config.Reference(*spatial_dependency_buffer_id); + } else { + config.Keyframe(); + } + config.Update(BufferIndex(sid, /*tid=*/0)); + + spatial_id_is_enabled_.set(sid); + spatial_dependency_buffer_id = BufferIndex(sid, /*tid=*/0); + } + return configs; +} + +std::vector +ScalabilityStructureKeySvc::T0Config() { + std::vector configs; + configs.reserve(num_spatial_layers_); + // Disallow temporal references cross T0 on higher temporal layers. + can_reference_t1_frame_for_spatial_id_.reset(); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/0)) { + spatial_id_is_enabled_.reset(sid); + continue; + } + configs.emplace_back(); + configs.back().Id(kDeltaT0).S(sid).T(0).ReferenceAndUpdate( + BufferIndex(sid, /*tid=*/0)); + } + return configs; +} + +std::vector +ScalabilityStructureKeySvc::T1Config() { + std::vector configs; + configs.reserve(num_spatial_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/1)) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(kDeltaT1).S(sid).T(1).Reference(BufferIndex(sid, /*tid=*/0)); + if (num_temporal_layers_ > 2) { + config.Update(BufferIndex(sid, /*tid=*/1)); + } + } + return configs; +} + +std::vector +ScalabilityStructureKeySvc::T2Config(FramePattern pattern) { + std::vector configs; + configs.reserve(num_spatial_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/2)) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(pattern).S(sid).T(2); + if (can_reference_t1_frame_for_spatial_id_[sid]) { + config.Reference(BufferIndex(sid, /*tid=*/1)); + } else { + config.Reference(BufferIndex(sid, /*tid=*/0)); + } + } + return configs; +} + +ScalabilityStructureKeySvc::FramePattern +ScalabilityStructureKeySvc::NextPattern(FramePattern last_pattern) const { + switch (last_pattern) { + case kNone: + return kKey; + case kDeltaT2B: + return kDeltaT0; + case kDeltaT2A: + if (TemporalLayerIsActive(1)) { + return kDeltaT1; + } + return kDeltaT0; + case kDeltaT1: + if (TemporalLayerIsActive(2)) { + return kDeltaT2B; + } + return kDeltaT0; + case kDeltaT0: + case kKey: + if (TemporalLayerIsActive(2)) { + return kDeltaT2A; + } + if (TemporalLayerIsActive(1)) { + return kDeltaT1; + } + return kDeltaT0; + } + RTC_NOTREACHED(); + return kNone; +} + +std::vector +ScalabilityStructureKeySvc::NextFrameConfig(bool restart) { + if (active_decode_targets_.none()) { + last_pattern_ = kNone; + return {}; + } + + if (restart) { + last_pattern_ = kNone; + } + + FramePattern current_pattern = NextPattern(last_pattern_); + switch (current_pattern) { + case kKey: + return KeyframeConfig(); + case kDeltaT0: + return T0Config(); + case kDeltaT1: + return T1Config(); + case kDeltaT2A: + case kDeltaT2B: + return T2Config(current_pattern); + case kNone: + break; + } + RTC_NOTREACHED(); + return {}; +} + +GenericFrameInfo ScalabilityStructureKeySvc::OnEncodeDone( + const LayerFrameConfig& config) { + // When encoder drops all frames for a temporal unit, it is better to reuse + // old temporal pattern rather than switch to next one, thus switch to next + // pattern defered here from the `NextFrameConfig`. + // In particular creating VP9 references rely on this behavior. + last_pattern_ = static_cast(config.Id()); + if (config.TemporalId() == 1) { + can_reference_t1_frame_for_spatial_id_.set(config.SpatialId()); + } + + GenericFrameInfo frame_info; + frame_info.spatial_id = config.SpatialId(); + frame_info.temporal_id = config.TemporalId(); + frame_info.encoder_buffers = config.Buffers(); + frame_info.decode_target_indications.reserve(num_spatial_layers_ * + num_temporal_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + for (int tid = 0; tid < num_temporal_layers_; ++tid) { + frame_info.decode_target_indications.push_back(Dti(sid, tid, config)); + } + } + frame_info.part_of_chain.assign(num_spatial_layers_, false); + if (config.IsKeyframe() || config.Id() == kKey) { + RTC_DCHECK_EQ(config.TemporalId(), 0); + for (int sid = config.SpatialId(); sid < num_spatial_layers_; ++sid) { + frame_info.part_of_chain[sid] = true; + } + } else if (config.TemporalId() == 0) { + frame_info.part_of_chain[config.SpatialId()] = true; + } + frame_info.active_decode_targets = active_decode_targets_; + return frame_info; +} + +void ScalabilityStructureKeySvc::OnRatesUpdated( + const VideoBitrateAllocation& bitrates) { + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + // Enable/disable spatial layers independetely. + bool active = bitrates.GetBitrate(sid, /*tid=*/0) > 0; + SetDecodeTargetIsActive(sid, /*tid=*/0, active); + if (!spatial_id_is_enabled_[sid] && active) { + // Key frame is required to reenable any spatial layer. + last_pattern_ = kNone; + } + + for (int tid = 1; tid < num_temporal_layers_; ++tid) { + // To enable temporal layer, require bitrates for lower temporal layers. + active = active && bitrates.GetBitrate(sid, tid) > 0; + SetDecodeTargetIsActive(sid, tid, active); + } + } +} + +ScalabilityStructureL2T1Key::~ScalabilityStructureL2T1Key() = default; + +FrameDependencyStructure ScalabilityStructureL2T1Key::DependencyStructure() + const { + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 1}; + structure.templates.resize(4); + structure.templates[0].S(0).Dtis("S-").ChainDiffs({2, 1}).FrameDiffs({2}); + structure.templates[1].S(0).Dtis("SS").ChainDiffs({0, 0}); + structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 2}).FrameDiffs({2}); + structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 1}).FrameDiffs({1}); + return structure; +} + +ScalabilityStructureL2T2Key::~ScalabilityStructureL2T2Key() = default; + +FrameDependencyStructure ScalabilityStructureL2T2Key::DependencyStructure() + const { + FrameDependencyStructure structure; + structure.num_decode_targets = 4; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 0, 1, 1}; + structure.templates.resize(6); + auto& templates = structure.templates; + templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0}); + templates[1].S(0).T(0).Dtis("SS--").ChainDiffs({4, 3}).FrameDiffs({4}); + templates[2].S(0).T(1).Dtis("-D--").ChainDiffs({2, 1}).FrameDiffs({2}); + templates[3].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1}); + templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 4}).FrameDiffs({4}); + templates[5].S(1).T(1).Dtis("---D").ChainDiffs({3, 2}).FrameDiffs({2}); + return structure; +} + +ScalabilityStructureL2T3Key::~ScalabilityStructureL2T3Key() = default; + +FrameDependencyStructure ScalabilityStructureL2T3Key::DependencyStructure() + const { + FrameDependencyStructure structure; + structure.num_decode_targets = 6; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1}; + auto& templates = structure.templates; + templates.resize(10); + templates[0].S(0).T(0).Dtis("SSSSSS").ChainDiffs({0, 0}); + templates[1].S(0).T(0).Dtis("SSS---").ChainDiffs({8, 7}).FrameDiffs({8}); + templates[2].S(0).T(1).Dtis("-DS---").ChainDiffs({4, 3}).FrameDiffs({4}); + templates[3].S(0).T(2).Dtis("--D---").ChainDiffs({2, 1}).FrameDiffs({2}); + templates[4].S(0).T(2).Dtis("--D---").ChainDiffs({6, 5}).FrameDiffs({2}); + templates[5].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 1}).FrameDiffs({1}); + templates[6].S(1).T(0).Dtis("---SSS").ChainDiffs({1, 8}).FrameDiffs({8}); + templates[7].S(1).T(1).Dtis("----DS").ChainDiffs({5, 4}).FrameDiffs({4}); + templates[8].S(1).T(2).Dtis("-----D").ChainDiffs({3, 2}).FrameDiffs({2}); + templates[9].S(1).T(2).Dtis("-----D").ChainDiffs({7, 6}).FrameDiffs({2}); + return structure; +} + +ScalabilityStructureL3T3Key::~ScalabilityStructureL3T3Key() = default; + +FrameDependencyStructure ScalabilityStructureL3T3Key::DependencyStructure() + const { + FrameDependencyStructure structure; + structure.num_decode_targets = 9; + structure.num_chains = 3; + structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2}; + auto& t = structure.templates; + t.resize(15); + // Templates are shown in the order frames following them appear in the + // stream, but in `structure.templates` array templates are sorted by + // (`spatial_id`, `temporal_id`) since that is a dependency descriptor + // requirement. Indexes are written in hex for nicer alignment. + t[0x0].S(0).T(0).Dtis("SSSSSSSSS").ChainDiffs({0, 0, 0}); + t[0x5].S(1).T(0).Dtis("---SSSSSS").ChainDiffs({1, 1, 1}).FrameDiffs({1}); + t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 1}).FrameDiffs({1}); + t[0x3].S(0).T(2).Dtis("--D------").ChainDiffs({3, 2, 1}).FrameDiffs({3}); + t[0x8].S(1).T(2).Dtis("-----D---").ChainDiffs({4, 3, 2}).FrameDiffs({3}); + t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3}); + t[0x2].S(0).T(1).Dtis("-DS------").ChainDiffs({6, 5, 4}).FrameDiffs({6}); + t[0x7].S(1).T(1).Dtis("----DS---").ChainDiffs({7, 6, 5}).FrameDiffs({6}); + t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6}); + t[0x4].S(0).T(2).Dtis("--D------").ChainDiffs({9, 8, 7}).FrameDiffs({3}); + t[0x9].S(1).T(2).Dtis("-----D---").ChainDiffs({10, 9, 8}).FrameDiffs({3}); + t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3}); + t[0x1].S(0).T(0).Dtis("SSS------").ChainDiffs({12, 11, 10}).FrameDiffs({12}); + t[0x6].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 12, 11}).FrameDiffs({12}); + t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 12}).FrameDiffs({12}); + return structure; +} + +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_key_svc.h b/modules/video_coding/svc/scalability_structure_key_svc.h new file mode 100644 index 0000000000..b66f6f83e4 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_key_svc.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_ + +#include +#include + +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalable_video_controller.h" + +namespace webrtc { + +class ScalabilityStructureKeySvc : public ScalableVideoController { + public: + ScalabilityStructureKeySvc(int num_spatial_layers, int num_temporal_layers); + ~ScalabilityStructureKeySvc() override; + + StreamLayersConfig StreamConfig() const override; + + std::vector NextFrameConfig(bool restart) override; + GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override; + void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override; + + private: + enum FramePattern : int { + kNone, + kKey, + kDeltaT0, + kDeltaT2A, + kDeltaT1, + kDeltaT2B, + }; + static constexpr int kMaxNumSpatialLayers = 3; + static constexpr int kMaxNumTemporalLayers = 3; + + // Index of the buffer to store last frame for layer (`sid`, `tid`) + int BufferIndex(int sid, int tid) const { + return tid * num_spatial_layers_ + sid; + } + bool DecodeTargetIsActive(int sid, int tid) const { + return active_decode_targets_[sid * num_temporal_layers_ + tid]; + } + void SetDecodeTargetIsActive(int sid, int tid, bool value) { + active_decode_targets_.set(sid * num_temporal_layers_ + tid, value); + } + bool TemporalLayerIsActive(int tid) const; + static DecodeTargetIndication Dti(int sid, + int tid, + const LayerFrameConfig& config); + + std::vector KeyframeConfig(); + std::vector T0Config(); + std::vector T1Config(); + std::vector T2Config(FramePattern pattern); + + FramePattern NextPattern(FramePattern last_pattern) const; + + const int num_spatial_layers_; + const int num_temporal_layers_; + + FramePattern last_pattern_ = kNone; + std::bitset spatial_id_is_enabled_; + std::bitset can_reference_t1_frame_for_spatial_id_; + std::bitset<32> active_decode_targets_; +}; + +// S1 0--0--0- +// | ... +// S0 0--0--0- +class ScalabilityStructureL2T1Key : public ScalabilityStructureKeySvc { + public: + ScalabilityStructureL2T1Key() : ScalabilityStructureKeySvc(2, 1) {} + ~ScalabilityStructureL2T1Key() override; + + FrameDependencyStructure DependencyStructure() const override; +}; + +// S1T1 0 0 +// / / / +// S1T0 0---0---0 +// | ... +// S0T1 | 0 0 +// |/ / / +// S0T0 0---0---0 +// Time-> 0 1 2 3 4 +class ScalabilityStructureL2T2Key : public ScalabilityStructureKeySvc { + public: + ScalabilityStructureL2T2Key() : ScalabilityStructureKeySvc(2, 2) {} + ~ScalabilityStructureL2T2Key() override; + + FrameDependencyStructure DependencyStructure() const override; +}; + +class ScalabilityStructureL2T3Key : public ScalabilityStructureKeySvc { + public: + ScalabilityStructureL2T3Key() : ScalabilityStructureKeySvc(2, 3) {} + ~ScalabilityStructureL2T3Key() override; + + FrameDependencyStructure DependencyStructure() const override; +}; + +class ScalabilityStructureL3T3Key : public ScalabilityStructureKeySvc { + public: + ScalabilityStructureL3T3Key() : ScalabilityStructureKeySvc(3, 3) {} + ~ScalabilityStructureL3T3Key() override; + + FrameDependencyStructure DependencyStructure() const override; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_KEY_SVC_H_ diff --git a/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc b/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc new file mode 100644 index 0000000000..5f923bb487 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_key_svc_unittest.cc @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_key_svc.h" + +#include + +#include "api/array_view.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalability_structure_test_helpers.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::SizeIs; + +TEST(ScalabilityStructureL3T3KeyTest, + SkipingT1FrameOnOneSpatialLayerKeepsStructureValid) { + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3)); + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + EXPECT_THAT(frames, SizeIs(4)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/1)); + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + EXPECT_THAT(frames, SizeIs(5)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3)); + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + ASSERT_THAT(frames, SizeIs(7)); + + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[1].temporal_id, 0); + EXPECT_EQ(frames[2].temporal_id, 2); + EXPECT_EQ(frames[3].temporal_id, 2); + EXPECT_EQ(frames[4].temporal_id, 1); + EXPECT_EQ(frames[5].temporal_id, 2); + EXPECT_EQ(frames[6].temporal_id, 2); + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL3T3KeyTest, + SkipT1FrameByEncoderKeepsReferencesValid) { + std::vector frames; + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + + // 1st 2 temporal units (T0 and T2) + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + // Simulate T1 frame dropped by the encoder, + // i.e. retrieve config, but skip calling OnEncodeDone. + structure.NextFrameConfig(/*restart=*/false); + // one more temporal unit. + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + + EXPECT_THAT(frames, SizeIs(9)); + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL3T3KeyTest, + SkippingFrameReusePreviousFrameConfiguration) { + std::vector frames; + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + + // 1st 2 temporal units (T0 and T2) + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + ASSERT_THAT(frames, SizeIs(6)); + ASSERT_EQ(frames[0].temporal_id, 0); + ASSERT_EQ(frames[3].temporal_id, 2); + + // Simulate a frame dropped by the encoder, + // i.e. retrieve config, but skip calling OnEncodeDone. + structure.NextFrameConfig(/*restart=*/false); + // two more temporal unit, expect temporal pattern continues + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + ASSERT_THAT(frames, SizeIs(12)); + // Expect temporal pattern continues as if there were no dropped frames. + EXPECT_EQ(frames[6].temporal_id, 1); + EXPECT_EQ(frames[9].temporal_id, 2); +} + +TEST(ScalabilityStructureL3T3KeyTest, SkippingKeyFrameTriggersNewKeyFrame) { + std::vector frames; + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + + // Ask for a key frame config, but do not return any frames + structure.NextFrameConfig(/*restart=*/false); + + // Ask for more frames, expect they start with a key frame. + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + ASSERT_THAT(frames, SizeIs(6)); + ASSERT_EQ(frames[0].temporal_id, 0); + ASSERT_EQ(frames[3].temporal_id, 2); + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL3T3KeyTest, + SkippingT2FrameAndDisablingT2LayerProduceT1AsNextFrame) { + std::vector frames; + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + // Ask for next (T2) frame config, but do not return any frames + auto config = structure.NextFrameConfig(/*restart=*/false); + ASSERT_THAT(config, Not(IsEmpty())); + ASSERT_EQ(config.front().TemporalId(), 2); + + // Disable T2 layer, + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2)); + // Expect instead of reusing unused config, T1 config is generated. + config = structure.NextFrameConfig(/*restart=*/false); + ASSERT_THAT(config, Not(IsEmpty())); + EXPECT_EQ(config.front().TemporalId(), 1); +} + +TEST(ScalabilityStructureL3T3KeyTest, EnableT2LayerWhileProducingT1Frame) { + std::vector frames; + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + + // Disable T2 layer, + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2)); + + // Generate the key frame. + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + ASSERT_THAT(frames, SizeIs(3)); + EXPECT_EQ(frames[0].temporal_id, 0); + + // Ask for next (T1) frame config, but do not return any frames yet. + auto config = structure.NextFrameConfig(/*restart=*/false); + ASSERT_THAT(config, Not(IsEmpty())); + ASSERT_EQ(config.front().TemporalId(), 1); + + // Reenable T2 layer. + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/3, /*s1=*/3, /*s2=*/3)); + + // Finish encoding previously requested config. + for (auto layer_config : config) { + GenericFrameInfo info = structure.OnEncodeDone(layer_config); + EXPECT_EQ(info.temporal_id, 1); + frames.push_back(info); + } + ASSERT_THAT(frames, SizeIs(6)); + + // Generate more frames, expect T2 pattern resumes. + wrapper.GenerateFrames(/*num_temporal_units=*/4, frames); + ASSERT_THAT(frames, SizeIs(18)); + EXPECT_EQ(frames[6].temporal_id, 2); + EXPECT_EQ(frames[9].temporal_id, 0); + EXPECT_EQ(frames[12].temporal_id, 2); + EXPECT_EQ(frames[15].temporal_id, 1); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL3T3KeyTest, + ReenablingSpatialLayerBeforeMissedT0FrameDoesntTriggerAKeyFrame) { + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2)); + wrapper.GenerateFrames(1, frames); + EXPECT_THAT(frames, SizeIs(2)); + // Drop a spatial layer. + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0)); + wrapper.GenerateFrames(1, frames); + EXPECT_THAT(frames, SizeIs(3)); + // Reenable a spatial layer before T0 frame is encoded. + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2)); + wrapper.GenerateFrames(1, frames); + EXPECT_THAT(frames, SizeIs(5)); + + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[1].temporal_id, 0); + EXPECT_EQ(frames[2].temporal_id, 1); + EXPECT_EQ(frames[3].temporal_id, 0); + EXPECT_EQ(frames[4].temporal_id, 0); + EXPECT_THAT(frames[3].frame_diffs, SizeIs(1)); + EXPECT_THAT(frames[4].frame_diffs, SizeIs(1)); + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL3T3KeyTest, ReenablingSpatialLayerTriggersKeyFrame) { + ScalabilityStructureL3T3Key structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + // Start with all spatial layers enabled. + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2)); + wrapper.GenerateFrames(3, frames); + EXPECT_THAT(frames, SizeIs(9)); + // Drop a spatial layer. Two remaining spatial layers should just continue. + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0, /*s2=*/2)); + wrapper.GenerateFrames(2, frames); + EXPECT_THAT(frames, SizeIs(13)); + // Reenable spatial layer, expect a full restart. + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2, /*s2=*/2)); + wrapper.GenerateFrames(1, frames); + ASSERT_THAT(frames, SizeIs(16)); + + // First 3 temporal units with all spatial layers enabled. + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[3].temporal_id, 1); + EXPECT_EQ(frames[6].temporal_id, 0); + // 2 temporal units with spatial layer 1 disabled. + EXPECT_EQ(frames[9].spatial_id, 0); + EXPECT_EQ(frames[9].temporal_id, 1); + EXPECT_EQ(frames[10].spatial_id, 2); + EXPECT_EQ(frames[10].temporal_id, 1); + // T0 frames were encoded while spatial layer 1 is disabled. + EXPECT_EQ(frames[11].spatial_id, 0); + EXPECT_EQ(frames[11].temporal_id, 0); + EXPECT_EQ(frames[12].spatial_id, 2); + EXPECT_EQ(frames[12].temporal_id, 0); + // Key frame to reenable spatial layer 1. + EXPECT_THAT(frames[13].frame_diffs, IsEmpty()); + EXPECT_THAT(frames[14].frame_diffs, ElementsAre(1)); + EXPECT_THAT(frames[15].frame_diffs, ElementsAre(1)); + EXPECT_EQ(frames[13].temporal_id, 0); + EXPECT_EQ(frames[14].temporal_id, 0); + EXPECT_EQ(frames[15].temporal_id, 0); + auto all_frames = rtc::MakeArrayView(frames.data(), frames.size()); + EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(0, 13))); + // Frames starting from the frame#13 should not reference any earlier frames. + EXPECT_TRUE(wrapper.FrameReferencesAreValid(all_frames.subview(13))); +} + +} // namespace +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc b/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc new file mode 100644 index 0000000000..c53ff8f07b --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_l2t2_key_shift.cc @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h" + +#include +#include + +#include "absl/base/macros.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +DecodeTargetIndication +Dti(int sid, int tid, const ScalableVideoController::LayerFrameConfig& config) { + if (config.IsKeyframe()) { + RTC_DCHECK_EQ(config.TemporalId(), 0); + return sid < config.SpatialId() ? DecodeTargetIndication::kNotPresent + : DecodeTargetIndication::kSwitch; + } + + if (sid != config.SpatialId() || tid < config.TemporalId()) { + return DecodeTargetIndication::kNotPresent; + } + if (tid == config.TemporalId() && tid > 0) { + return DecodeTargetIndication::kDiscardable; + } + return DecodeTargetIndication::kSwitch; +} + +} // namespace + +constexpr int ScalabilityStructureL2T2KeyShift::kNumSpatialLayers; +constexpr int ScalabilityStructureL2T2KeyShift::kNumTemporalLayers; + +ScalabilityStructureL2T2KeyShift::~ScalabilityStructureL2T2KeyShift() = default; + +ScalableVideoController::StreamLayersConfig +ScalabilityStructureL2T2KeyShift::StreamConfig() const { + StreamLayersConfig result; + result.num_spatial_layers = 2; + result.num_temporal_layers = 2; + result.scaling_factor_num[0] = 1; + result.scaling_factor_den[0] = 2; + return result; +} + +FrameDependencyStructure ScalabilityStructureL2T2KeyShift::DependencyStructure() + const { + FrameDependencyStructure structure; + structure.num_decode_targets = 4; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 0, 1, 1}; + structure.templates.resize(7); + auto& templates = structure.templates; + templates[0].S(0).T(0).Dtis("SSSS").ChainDiffs({0, 0}); + templates[1].S(0).T(0).Dtis("SS--").ChainDiffs({2, 1}).FrameDiffs({2}); + templates[2].S(0).T(0).Dtis("SS--").ChainDiffs({4, 1}).FrameDiffs({4}); + templates[3].S(0).T(1).Dtis("-D--").ChainDiffs({2, 3}).FrameDiffs({2}); + templates[4].S(1).T(0).Dtis("--SS").ChainDiffs({1, 1}).FrameDiffs({1}); + templates[5].S(1).T(0).Dtis("--SS").ChainDiffs({3, 4}).FrameDiffs({4}); + templates[6].S(1).T(1).Dtis("---D").ChainDiffs({1, 2}).FrameDiffs({2}); + return structure; +} + +std::vector +ScalabilityStructureL2T2KeyShift::NextFrameConfig(bool restart) { + std::vector configs; + configs.reserve(2); + if (restart) { + next_pattern_ = kKey; + } + + // Buffer0 keeps latest S0T0 frame, + // Buffer1 keeps latest S1T0 frame. + switch (next_pattern_) { + case kKey: + if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) { + configs.emplace_back(); + configs.back().S(0).T(0).Update(0).Keyframe(); + } + if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) { + configs.emplace_back(); + configs.back().S(1).T(0).Update(1); + if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) { + configs.back().Reference(0); + } else { + configs.back().Keyframe(); + } + } + next_pattern_ = kDelta0; + break; + case kDelta0: + if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) { + configs.emplace_back(); + configs.back().S(0).T(0).ReferenceAndUpdate(0); + } + if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/1)) { + configs.emplace_back(); + configs.back().S(1).T(1).Reference(1); + } + if (configs.empty() && DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) { + configs.emplace_back(); + configs.back().S(1).T(0).ReferenceAndUpdate(1); + } + next_pattern_ = kDelta1; + break; + case kDelta1: + if (DecodeTargetIsActive(/*sid=*/0, /*tid=*/1)) { + configs.emplace_back(); + configs.back().S(0).T(1).Reference(0); + } + if (DecodeTargetIsActive(/*sid=*/1, /*tid=*/0)) { + configs.emplace_back(); + configs.back().S(1).T(0).ReferenceAndUpdate(1); + } + if (configs.empty() && DecodeTargetIsActive(/*sid=*/0, /*tid=*/0)) { + configs.emplace_back(); + configs.back().S(0).T(0).ReferenceAndUpdate(0); + } + next_pattern_ = kDelta0; + break; + } + + RTC_DCHECK(!configs.empty() || active_decode_targets_.none()); + return configs; +} + +GenericFrameInfo ScalabilityStructureL2T2KeyShift::OnEncodeDone( + const LayerFrameConfig& config) { + GenericFrameInfo frame_info; + frame_info.spatial_id = config.SpatialId(); + frame_info.temporal_id = config.TemporalId(); + frame_info.encoder_buffers = config.Buffers(); + for (int sid = 0; sid < kNumSpatialLayers; ++sid) { + for (int tid = 0; tid < kNumTemporalLayers; ++tid) { + frame_info.decode_target_indications.push_back(Dti(sid, tid, config)); + } + } + if (config.IsKeyframe()) { + frame_info.part_of_chain = {true, true}; + } else if (config.TemporalId() == 0) { + frame_info.part_of_chain = {config.SpatialId() == 0, + config.SpatialId() == 1}; + } else { + frame_info.part_of_chain = {false, false}; + } + return frame_info; +} + +void ScalabilityStructureL2T2KeyShift::OnRatesUpdated( + const VideoBitrateAllocation& bitrates) { + for (int sid = 0; sid < kNumSpatialLayers; ++sid) { + // Enable/disable spatial layers independetely. + bool active = bitrates.GetBitrate(sid, /*tid=*/0) > 0; + if (!DecodeTargetIsActive(sid, /*tid=*/0) && active) { + // Key frame is required to reenable any spatial layer. + next_pattern_ = kKey; + } + + SetDecodeTargetIsActive(sid, /*tid=*/0, active); + SetDecodeTargetIsActive(sid, /*tid=*/1, + active && bitrates.GetBitrate(sid, /*tid=*/1) > 0); + } +} + +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h b/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h new file mode 100644 index 0000000000..26d1afcb29 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_l2t2_key_shift.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_ + +#include + +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalable_video_controller.h" + +namespace webrtc { + +// S1T1 0 0 +// / / / +// S1T0 0---0---0 +// | ... +// S0T1 | 0 0 +// | / / +// S0T0 0-0---0-- +// Time-> 0 1 2 3 4 +class ScalabilityStructureL2T2KeyShift : public ScalableVideoController { + public: + ~ScalabilityStructureL2T2KeyShift() override; + + StreamLayersConfig StreamConfig() const override; + FrameDependencyStructure DependencyStructure() const override; + + std::vector NextFrameConfig(bool restart) override; + GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override; + void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override; + + private: + enum FramePattern { + kKey, + kDelta0, + kDelta1, + }; + + static constexpr int kNumSpatialLayers = 2; + static constexpr int kNumTemporalLayers = 2; + + bool DecodeTargetIsActive(int sid, int tid) const { + return active_decode_targets_[sid * kNumTemporalLayers + tid]; + } + void SetDecodeTargetIsActive(int sid, int tid, bool value) { + active_decode_targets_.set(sid * kNumTemporalLayers + tid, value); + } + + FramePattern next_pattern_ = kKey; + std::bitset<32> active_decode_targets_ = 0b1111; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_L2T2_KEY_SHIFT_H_ diff --git a/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc b/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc new file mode 100644 index 0000000000..40fecf1812 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_l2t2_key_shift_unittest.cc @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_l2t2_key_shift.h" + +#include + +#include "api/array_view.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalability_structure_test_helpers.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::SizeIs; + +// S1T1 3 7 +// / / +// S1T0 1---5---9 +// | +// S0T1 | 4 8 +// | / / +// S0T0 0-2---6 +// Time-> 0 1 2 3 4 +TEST(ScalabilityStructureL2T2KeyShiftTest, DecodeTargetsAreEnabledByDefault) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + wrapper.GenerateFrames(/*num_temporal_units=*/5, frames); + ASSERT_THAT(frames, SizeIs(10)); + + EXPECT_EQ(frames[0].spatial_id, 0); + EXPECT_EQ(frames[1].spatial_id, 1); + EXPECT_EQ(frames[2].spatial_id, 0); + EXPECT_EQ(frames[3].spatial_id, 1); + EXPECT_EQ(frames[4].spatial_id, 0); + EXPECT_EQ(frames[5].spatial_id, 1); + EXPECT_EQ(frames[6].spatial_id, 0); + EXPECT_EQ(frames[7].spatial_id, 1); + EXPECT_EQ(frames[8].spatial_id, 0); + EXPECT_EQ(frames[9].spatial_id, 1); + + // spatial_id = 0 has the temporal shift. + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[2].temporal_id, 0); + EXPECT_EQ(frames[4].temporal_id, 1); + EXPECT_EQ(frames[6].temporal_id, 0); + EXPECT_EQ(frames[8].temporal_id, 1); + + // spatial_id = 1 hasn't temporal shift. + EXPECT_EQ(frames[1].temporal_id, 0); + EXPECT_EQ(frames[3].temporal_id, 1); + EXPECT_EQ(frames[5].temporal_id, 0); + EXPECT_EQ(frames[7].temporal_id, 1); + EXPECT_EQ(frames[9].temporal_id, 0); + + // Key frame diff. + EXPECT_THAT(frames[0].frame_diffs, IsEmpty()); + EXPECT_THAT(frames[1].frame_diffs, ElementsAre(1)); + // S0T0 frame diffs + EXPECT_THAT(frames[2].frame_diffs, ElementsAre(2)); + EXPECT_THAT(frames[6].frame_diffs, ElementsAre(4)); + // S1T0 frame diffs + EXPECT_THAT(frames[5].frame_diffs, ElementsAre(4)); + EXPECT_THAT(frames[9].frame_diffs, ElementsAre(4)); + // T1 frames refer T0 frame of same spatial layer which is 2 frame ids away. + EXPECT_THAT(frames[3].frame_diffs, ElementsAre(2)); + EXPECT_THAT(frames[4].frame_diffs, ElementsAre(2)); + EXPECT_THAT(frames[7].frame_diffs, ElementsAre(2)); + EXPECT_THAT(frames[8].frame_diffs, ElementsAre(2)); +} + +// S1T0 1---4---7 +// | +// S0T1 | 3 6 +// | / / +// S0T0 0-2---5-- +// Time-> 0 1 2 3 4 +TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS1T1Layer) { + ScalabilityStructureL2T2KeyShift structure; + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/1)); + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + wrapper.GenerateFrames(/*num_temporal_units=*/5, frames); + ASSERT_THAT(frames, SizeIs(8)); + + EXPECT_EQ(frames[0].spatial_id, 0); + EXPECT_EQ(frames[1].spatial_id, 1); + EXPECT_EQ(frames[2].spatial_id, 0); + EXPECT_EQ(frames[3].spatial_id, 0); + EXPECT_EQ(frames[4].spatial_id, 1); + EXPECT_EQ(frames[5].spatial_id, 0); + EXPECT_EQ(frames[6].spatial_id, 0); + EXPECT_EQ(frames[7].spatial_id, 1); + + // spatial_id = 0 has the temporal shift. + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[2].temporal_id, 0); + EXPECT_EQ(frames[3].temporal_id, 1); + EXPECT_EQ(frames[5].temporal_id, 0); + EXPECT_EQ(frames[6].temporal_id, 1); + + // spatial_id = 1 has single temporal layer. + EXPECT_EQ(frames[1].temporal_id, 0); + EXPECT_EQ(frames[4].temporal_id, 0); + EXPECT_EQ(frames[5].temporal_id, 0); +} + +// S1T1 3 | +// / | +// S1T0 1---5+--7 +// | | +// S0T1 | 4| +// | / | +// S0T0 0-2--+6---8 +// Time-> 0 1 2 3 4 5 +TEST(ScalabilityStructureL2T2KeyShiftTest, DisableT1LayersAfterFewFrames) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + EXPECT_THAT(frames, SizeIs(6)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/1)); + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + ASSERT_THAT(frames, SizeIs(9)); + + // Skip validation before T1 was disabled as that is covered by the test + // where no layers are disabled. + EXPECT_EQ(frames[6].spatial_id, 0); + EXPECT_EQ(frames[7].spatial_id, 1); + EXPECT_EQ(frames[8].spatial_id, 0); + + EXPECT_EQ(frames[6].temporal_id, 0); + EXPECT_EQ(frames[7].temporal_id, 0); + EXPECT_EQ(frames[8].temporal_id, 0); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +// S1T1 1 3 +// / / +// S1T0 0---2 +// Time-> 0 1 2 3 4 5 +TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS0FromTheStart) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2)); + wrapper.GenerateFrames(/*num_temporal_units=*/4, frames); + EXPECT_THAT(frames, SizeIs(4)); + + EXPECT_EQ(frames[0].spatial_id, 1); + EXPECT_EQ(frames[1].spatial_id, 1); + EXPECT_EQ(frames[2].spatial_id, 1); + EXPECT_EQ(frames[3].spatial_id, 1); + + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[1].temporal_id, 1); + EXPECT_EQ(frames[2].temporal_id, 0); + EXPECT_EQ(frames[3].temporal_id, 1); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +// S1T1 3 |6 8 +// / / / +// S1T0 1---5+--7 +// | | +// S0T1 | 4| +// | / | +// S0T0 0-2 | +// Time-> 0 1 2 3 4 5 +TEST(ScalabilityStructureL2T2KeyShiftTest, DisableS0AfterFewFrames) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + EXPECT_THAT(frames, SizeIs(6)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/2)); + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + ASSERT_THAT(frames, SizeIs(9)); + + // Expect frame[6] is delta frame. + EXPECT_THAT(frames[6].frame_diffs, ElementsAre(1)); + // Skip validation before S0 was disabled as that should be covered by + // test where no layers are disabled. + EXPECT_EQ(frames[6].spatial_id, 1); + EXPECT_EQ(frames[7].spatial_id, 1); + EXPECT_EQ(frames[8].spatial_id, 1); + + EXPECT_EQ(frames[6].temporal_id, 1); + EXPECT_EQ(frames[7].temporal_id, 0); + EXPECT_EQ(frames[8].temporal_id, 1); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +// S1T1 3| | 8 +// / | | / +// S1T0 1 | |6 +// | | || +// S0T1 | |4|| +// | / || +// S0T0 0-2| |5-7 +// Time-> 0 1 2 3 4 5 +TEST(ScalabilityStructureL2T2KeyShiftTest, ReenableS1TriggersKeyFrame) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + EXPECT_THAT(frames, SizeIs(4)); + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/0)); + wrapper.GenerateFrames(/*num_temporal_units=*/1, frames); + EXPECT_THAT(frames, SizeIs(5)); + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/2, /*s1=*/2)); + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + ASSERT_THAT(frames, SizeIs(9)); + + EXPECT_THAT(frames[4].spatial_id, 0); + EXPECT_THAT(frames[4].temporal_id, 1); + + // Expect frame[5] to be a key frame. + EXPECT_TRUE(wrapper.FrameReferencesAreValid( + rtc::MakeArrayView(frames.data() + 5, 4))); + + EXPECT_THAT(frames[5].spatial_id, 0); + EXPECT_THAT(frames[6].spatial_id, 1); + EXPECT_THAT(frames[7].spatial_id, 0); + EXPECT_THAT(frames[8].spatial_id, 1); + + // S0 should do temporal shift after the key frame. + EXPECT_THAT(frames[5].temporal_id, 0); + EXPECT_THAT(frames[7].temporal_id, 0); + + // No temporal shift for the top spatial layer. + EXPECT_THAT(frames[6].temporal_id, 0); + EXPECT_THAT(frames[8].temporal_id, 1); +} + +TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS0T0FromTheStart) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0)); + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + ASSERT_THAT(frames, SizeIs(3)); + + EXPECT_EQ(frames[0].spatial_id, 0); + EXPECT_EQ(frames[1].spatial_id, 0); + EXPECT_EQ(frames[2].spatial_id, 0); + + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[1].temporal_id, 0); + EXPECT_EQ(frames[2].temporal_id, 0); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +// S1T1 3| +// / | +// S1T0 1 | +// | | +// S0T1 | | +// | | +// S0T0 0-2+4-5-6 +// Time-> 0 1 2 3 4 +TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS0T0AfterFewFrames) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + EXPECT_THAT(frames, SizeIs(4)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/1, /*s1=*/0)); + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + ASSERT_THAT(frames, SizeIs(7)); + + EXPECT_EQ(frames[4].spatial_id, 0); + EXPECT_EQ(frames[5].spatial_id, 0); + EXPECT_EQ(frames[6].spatial_id, 0); + + EXPECT_EQ(frames[4].temporal_id, 0); + EXPECT_EQ(frames[5].temporal_id, 0); + EXPECT_EQ(frames[6].temporal_id, 0); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS1T0FromTheStart) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/1)); + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + ASSERT_THAT(frames, SizeIs(3)); + + EXPECT_EQ(frames[0].spatial_id, 1); + EXPECT_EQ(frames[1].spatial_id, 1); + EXPECT_EQ(frames[2].spatial_id, 1); + + EXPECT_EQ(frames[0].temporal_id, 0); + EXPECT_EQ(frames[1].temporal_id, 0); + EXPECT_EQ(frames[2].temporal_id, 0); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +// S1T1 3| +// / | +// S1T0 1--+4-5-6 +// | | +// S0T1 | | +// | | +// S0T0 0-2| +// Time-> 0 1 2 3 4 +TEST(ScalabilityStructureL2T2KeyShiftTest, EnableOnlyS1T0AfterFewFrames) { + ScalabilityStructureL2T2KeyShift structure; + ScalabilityStructureWrapper wrapper(structure); + std::vector frames; + + wrapper.GenerateFrames(/*num_temporal_units=*/2, frames); + EXPECT_THAT(frames, SizeIs(4)); + structure.OnRatesUpdated(EnableTemporalLayers(/*s0=*/0, /*s1=*/1)); + wrapper.GenerateFrames(/*num_temporal_units=*/3, frames); + ASSERT_THAT(frames, SizeIs(7)); + + EXPECT_EQ(frames[4].spatial_id, 1); + EXPECT_EQ(frames[5].spatial_id, 1); + EXPECT_EQ(frames[6].spatial_id, 1); + + EXPECT_EQ(frames[4].temporal_id, 0); + EXPECT_EQ(frames[5].temporal_id, 0); + EXPECT_EQ(frames[6].temporal_id, 0); + + EXPECT_TRUE(wrapper.FrameReferencesAreValid(frames)); +} + +} // namespace +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_simulcast.cc b/modules/video_coding/svc/scalability_structure_simulcast.cc new file mode 100644 index 0000000000..c236066736 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_simulcast.cc @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_simulcast.h" + +#include +#include + +#include "absl/base/macros.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { + +DecodeTargetIndication +Dti(int sid, int tid, const ScalableVideoController::LayerFrameConfig& config) { + if (sid != config.SpatialId() || tid < config.TemporalId()) { + return DecodeTargetIndication::kNotPresent; + } + if (tid == 0) { + RTC_DCHECK_EQ(config.TemporalId(), 0); + return DecodeTargetIndication::kSwitch; + } + if (tid == config.TemporalId()) { + return DecodeTargetIndication::kDiscardable; + } + RTC_DCHECK_GT(tid, config.TemporalId()); + return DecodeTargetIndication::kSwitch; +} + +} // namespace + +constexpr int ScalabilityStructureSimulcast::kMaxNumSpatialLayers; +constexpr int ScalabilityStructureSimulcast::kMaxNumTemporalLayers; + +ScalabilityStructureSimulcast::ScalabilityStructureSimulcast( + int num_spatial_layers, + int num_temporal_layers) + : num_spatial_layers_(num_spatial_layers), + num_temporal_layers_(num_temporal_layers), + active_decode_targets_( + (uint32_t{1} << (num_spatial_layers * num_temporal_layers)) - 1) { + RTC_DCHECK_LE(num_spatial_layers, kMaxNumSpatialLayers); + RTC_DCHECK_LE(num_temporal_layers, kMaxNumTemporalLayers); +} + +ScalabilityStructureSimulcast::~ScalabilityStructureSimulcast() = default; + +ScalableVideoController::StreamLayersConfig +ScalabilityStructureSimulcast::StreamConfig() const { + StreamLayersConfig result; + result.num_spatial_layers = num_spatial_layers_; + result.num_temporal_layers = num_temporal_layers_; + result.scaling_factor_num[num_spatial_layers_ - 1] = 1; + result.scaling_factor_den[num_spatial_layers_ - 1] = 1; + for (int sid = num_spatial_layers_ - 1; sid > 0; --sid) { + result.scaling_factor_num[sid - 1] = 1; + result.scaling_factor_den[sid - 1] = 2 * result.scaling_factor_den[sid]; + } + return result; +} + +bool ScalabilityStructureSimulcast::TemporalLayerIsActive(int tid) const { + if (tid >= num_temporal_layers_) { + return false; + } + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (DecodeTargetIsActive(sid, tid)) { + return true; + } + } + return false; +} + +ScalabilityStructureSimulcast::FramePattern +ScalabilityStructureSimulcast::NextPattern() const { + switch (last_pattern_) { + case kNone: + case kDeltaT2B: + return kDeltaT0; + case kDeltaT2A: + if (TemporalLayerIsActive(1)) { + return kDeltaT1; + } + return kDeltaT0; + case kDeltaT1: + if (TemporalLayerIsActive(2)) { + return kDeltaT2B; + } + return kDeltaT0; + case kDeltaT0: + if (TemporalLayerIsActive(2)) { + return kDeltaT2A; + } + if (TemporalLayerIsActive(1)) { + return kDeltaT1; + } + return kDeltaT0; + } + RTC_NOTREACHED(); + return kDeltaT0; +} + +std::vector +ScalabilityStructureSimulcast::NextFrameConfig(bool restart) { + std::vector configs; + if (active_decode_targets_.none()) { + last_pattern_ = kNone; + return configs; + } + configs.reserve(num_spatial_layers_); + + if (last_pattern_ == kNone || restart) { + can_reference_t0_frame_for_spatial_id_.reset(); + last_pattern_ = kNone; + } + FramePattern current_pattern = NextPattern(); + + switch (current_pattern) { + case kDeltaT0: + // Disallow temporal references cross T0 on higher temporal layers. + can_reference_t1_frame_for_spatial_id_.reset(); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/0)) { + // Next frame from the spatial layer `sid` shouldn't depend on + // potentially old previous frame from the spatial layer `sid`. + can_reference_t0_frame_for_spatial_id_.reset(sid); + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(current_pattern).S(sid).T(0); + + if (can_reference_t0_frame_for_spatial_id_[sid]) { + config.ReferenceAndUpdate(BufferIndex(sid, /*tid=*/0)); + } else { + config.Keyframe().Update(BufferIndex(sid, /*tid=*/0)); + } + can_reference_t0_frame_for_spatial_id_.set(sid); + } + break; + case kDeltaT1: + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/1) || + !can_reference_t0_frame_for_spatial_id_[sid]) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(current_pattern) + .S(sid) + .T(1) + .Reference(BufferIndex(sid, /*tid=*/0)); + // Save frame only if there is a higher temporal layer that may need it. + if (num_temporal_layers_ > 2) { + config.Update(BufferIndex(sid, /*tid=*/1)); + } + } + break; + case kDeltaT2A: + case kDeltaT2B: + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + if (!DecodeTargetIsActive(sid, /*tid=*/2) || + !can_reference_t0_frame_for_spatial_id_[sid]) { + continue; + } + configs.emplace_back(); + ScalableVideoController::LayerFrameConfig& config = configs.back(); + config.Id(current_pattern).S(sid).T(2); + if (can_reference_t1_frame_for_spatial_id_[sid]) { + config.Reference(BufferIndex(sid, /*tid=*/1)); + } else { + config.Reference(BufferIndex(sid, /*tid=*/0)); + } + } + break; + case kNone: + RTC_NOTREACHED(); + break; + } + + return configs; +} + +GenericFrameInfo ScalabilityStructureSimulcast::OnEncodeDone( + const LayerFrameConfig& config) { + last_pattern_ = static_cast(config.Id()); + if (config.TemporalId() == 1) { + can_reference_t1_frame_for_spatial_id_.set(config.SpatialId()); + } + GenericFrameInfo frame_info; + frame_info.spatial_id = config.SpatialId(); + frame_info.temporal_id = config.TemporalId(); + frame_info.encoder_buffers = config.Buffers(); + frame_info.decode_target_indications.reserve(num_spatial_layers_ * + num_temporal_layers_); + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + for (int tid = 0; tid < num_temporal_layers_; ++tid) { + frame_info.decode_target_indications.push_back(Dti(sid, tid, config)); + } + } + frame_info.part_of_chain.assign(num_spatial_layers_, false); + if (config.TemporalId() == 0) { + frame_info.part_of_chain[config.SpatialId()] = true; + } + frame_info.active_decode_targets = active_decode_targets_; + return frame_info; +} + +void ScalabilityStructureSimulcast::OnRatesUpdated( + const VideoBitrateAllocation& bitrates) { + for (int sid = 0; sid < num_spatial_layers_; ++sid) { + // Enable/disable spatial layers independetely. + bool active = true; + for (int tid = 0; tid < num_temporal_layers_; ++tid) { + // To enable temporal layer, require bitrates for lower temporal layers. + active = active && bitrates.GetBitrate(sid, tid) > 0; + SetDecodeTargetIsActive(sid, tid, active); + } + } +} + +FrameDependencyStructure ScalabilityStructureS2T1::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 2; + structure.num_chains = 2; + structure.decode_target_protected_by_chain = {0, 1}; + structure.templates.resize(4); + structure.templates[0].S(0).Dtis("S-").ChainDiffs({2, 1}).FrameDiffs({2}); + structure.templates[1].S(0).Dtis("S-").ChainDiffs({0, 0}); + structure.templates[2].S(1).Dtis("-S").ChainDiffs({1, 2}).FrameDiffs({2}); + structure.templates[3].S(1).Dtis("-S").ChainDiffs({1, 0}); + return structure; +} + +FrameDependencyStructure ScalabilityStructureS3T3::DependencyStructure() const { + FrameDependencyStructure structure; + structure.num_decode_targets = 9; + structure.num_chains = 3; + structure.decode_target_protected_by_chain = {0, 0, 0, 1, 1, 1, 2, 2, 2}; + auto& t = structure.templates; + t.resize(15); + // Templates are shown in the order frames following them appear in the + // stream, but in `structure.templates` array templates are sorted by + // (`spatial_id`, `temporal_id`) since that is a dependency descriptor + // requirement. Indexes are written in hex for nicer alignment. + t[0x1].S(0).T(0).Dtis("SSS------").ChainDiffs({0, 0, 0}); + t[0x6].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 0, 0}); + t[0xB].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 0}); + t[0x3].S(0).T(2).Dtis("--D------").ChainDiffs({3, 2, 1}).FrameDiffs({3}); + t[0x8].S(1).T(2).Dtis("-----D---").ChainDiffs({4, 3, 2}).FrameDiffs({3}); + t[0xD].S(2).T(2).Dtis("--------D").ChainDiffs({5, 4, 3}).FrameDiffs({3}); + t[0x2].S(0).T(1).Dtis("-DS------").ChainDiffs({6, 5, 4}).FrameDiffs({6}); + t[0x7].S(1).T(1).Dtis("----DS---").ChainDiffs({7, 6, 5}).FrameDiffs({6}); + t[0xC].S(2).T(1).Dtis("-------DS").ChainDiffs({8, 7, 6}).FrameDiffs({6}); + t[0x4].S(0).T(2).Dtis("--D------").ChainDiffs({9, 8, 7}).FrameDiffs({3}); + t[0x9].S(1).T(2).Dtis("-----D---").ChainDiffs({10, 9, 8}).FrameDiffs({3}); + t[0xE].S(2).T(2).Dtis("--------D").ChainDiffs({11, 10, 9}).FrameDiffs({3}); + t[0x0].S(0).T(0).Dtis("SSS------").ChainDiffs({12, 11, 10}).FrameDiffs({12}); + t[0x5].S(1).T(0).Dtis("---SSS---").ChainDiffs({1, 12, 11}).FrameDiffs({12}); + t[0xA].S(2).T(0).Dtis("------SSS").ChainDiffs({2, 1, 12}).FrameDiffs({12}); + return structure; +} + +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_simulcast.h b/modules/video_coding/svc/scalability_structure_simulcast.h new file mode 100644 index 0000000000..7b57df2985 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_simulcast.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_ + +#include + +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/svc/scalable_video_controller.h" + +namespace webrtc { + +// Scalability structure with multiple independent spatial layers each with the +// same temporal layering. +class ScalabilityStructureSimulcast : public ScalableVideoController { + public: + ScalabilityStructureSimulcast(int num_spatial_layers, + int num_temporal_layers); + ~ScalabilityStructureSimulcast() override; + + StreamLayersConfig StreamConfig() const override; + std::vector NextFrameConfig(bool restart) override; + GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override; + void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override; + + private: + enum FramePattern { + kNone, + kDeltaT2A, + kDeltaT1, + kDeltaT2B, + kDeltaT0, + }; + static constexpr int kMaxNumSpatialLayers = 3; + static constexpr int kMaxNumTemporalLayers = 3; + + // Index of the buffer to store last frame for layer (`sid`, `tid`) + int BufferIndex(int sid, int tid) const { + return tid * num_spatial_layers_ + sid; + } + bool DecodeTargetIsActive(int sid, int tid) const { + return active_decode_targets_[sid * num_temporal_layers_ + tid]; + } + void SetDecodeTargetIsActive(int sid, int tid, bool value) { + active_decode_targets_.set(sid * num_temporal_layers_ + tid, value); + } + FramePattern NextPattern() const; + bool TemporalLayerIsActive(int tid) const; + + const int num_spatial_layers_; + const int num_temporal_layers_; + + FramePattern last_pattern_ = kNone; + std::bitset can_reference_t0_frame_for_spatial_id_ = 0; + std::bitset can_reference_t1_frame_for_spatial_id_ = 0; + std::bitset<32> active_decode_targets_; +}; + +// S1 0--0--0- +// ... +// S0 0--0--0- +class ScalabilityStructureS2T1 : public ScalabilityStructureSimulcast { + public: + ScalabilityStructureS2T1() : ScalabilityStructureSimulcast(2, 1) {} + ~ScalabilityStructureS2T1() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +class ScalabilityStructureS3T3 : public ScalabilityStructureSimulcast { + public: + ScalabilityStructureS3T3() : ScalabilityStructureSimulcast(3, 3) {} + ~ScalabilityStructureS3T3() override = default; + + FrameDependencyStructure DependencyStructure() const override; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_SIMULCAST_H_ diff --git a/modules/video_coding/svc/scalability_structure_test_helpers.cc b/modules/video_coding/svc/scalability_structure_test_helpers.cc new file mode 100644 index 0000000000..aeb4d88f1a --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_test_helpers.cc @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "modules/video_coding/svc/scalability_structure_test_helpers.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "modules/video_coding/chain_diff_calculator.h" +#include "modules/video_coding/frame_dependencies_calculator.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "test/gtest.h" + +namespace webrtc { + +VideoBitrateAllocation EnableTemporalLayers(int s0, int s1, int s2) { + VideoBitrateAllocation bitrate; + for (int tid = 0; tid < s0; ++tid) { + bitrate.SetBitrate(0, tid, 1'000'000); + } + for (int tid = 0; tid < s1; ++tid) { + bitrate.SetBitrate(1, tid, 1'000'000); + } + for (int tid = 0; tid < s2; ++tid) { + bitrate.SetBitrate(2, tid, 1'000'000); + } + return bitrate; +} + +void ScalabilityStructureWrapper::GenerateFrames( + int num_temporal_units, + std::vector& frames) { + for (int i = 0; i < num_temporal_units; ++i) { + for (auto& layer_frame : + structure_controller_.NextFrameConfig(/*restart=*/false)) { + int64_t frame_id = ++frame_id_; + bool is_keyframe = layer_frame.IsKeyframe(); + + GenericFrameInfo frame_info = + structure_controller_.OnEncodeDone(layer_frame); + if (is_keyframe) { + chain_diff_calculator_.Reset(frame_info.part_of_chain); + } + frame_info.chain_diffs = + chain_diff_calculator_.From(frame_id, frame_info.part_of_chain); + for (int64_t base_frame_id : frame_deps_calculator_.FromBuffersUsage( + frame_id, frame_info.encoder_buffers)) { + frame_info.frame_diffs.push_back(frame_id - base_frame_id); + } + + frames.push_back(std::move(frame_info)); + } + } +} + +bool ScalabilityStructureWrapper::FrameReferencesAreValid( + rtc::ArrayView frames) const { + bool valid = true; + // VP9 and AV1 supports up to 8 buffers. Expect no more buffers are not used. + std::bitset<8> buffer_contains_frame; + for (size_t i = 0; i < frames.size(); ++i) { + const GenericFrameInfo& frame = frames[i]; + for (const CodecBufferUsage& buffer_usage : frame.encoder_buffers) { + if (buffer_usage.id < 0 || buffer_usage.id >= 8) { + ADD_FAILURE() << "Invalid buffer id " << buffer_usage.id + << " for frame#" << i + << ". Up to 8 buffers are supported."; + valid = false; + continue; + } + if (buffer_usage.referenced && !buffer_contains_frame[buffer_usage.id]) { + ADD_FAILURE() << "buffer " << buffer_usage.id << " for frame#" << i + << " was reference before updated."; + valid = false; + } + if (buffer_usage.updated) { + buffer_contains_frame.set(buffer_usage.id); + } + } + for (int fdiff : frame.frame_diffs) { + if (fdiff <= 0 || static_cast(fdiff) > i) { + ADD_FAILURE() << "Invalid frame diff " << fdiff << " for frame#" << i; + valid = false; + } + } + } + return valid; +} + +} // namespace webrtc diff --git a/modules/video_coding/svc/scalability_structure_test_helpers.h b/modules/video_coding/svc/scalability_structure_test_helpers.h new file mode 100644 index 0000000000..d183be4766 --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_test_helpers.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_ + +#include + +#include + +#include "api/array_view.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" +#include "modules/video_coding/chain_diff_calculator.h" +#include "modules/video_coding/frame_dependencies_calculator.h" +#include "modules/video_coding/svc/scalable_video_controller.h" + +namespace webrtc { + +// Creates bitrate allocation with non-zero bitrate for given number of temporal +// layers for each spatial layer. +VideoBitrateAllocation EnableTemporalLayers(int s0, int s1 = 0, int s2 = 0); + +class ScalabilityStructureWrapper { + public: + explicit ScalabilityStructureWrapper(ScalableVideoController& structure) + : structure_controller_(structure) {} + + std::vector GenerateFrames(int num_temporal_units) { + std::vector frames; + GenerateFrames(num_temporal_units, frames); + return frames; + } + void GenerateFrames(int num_temporal_units, + std::vector& frames); + + // Returns false and ADD_FAILUREs for frames with invalid references. + // In particular validates no frame frame reference to frame before frames[0]. + // In error messages frames are indexed starting with 0. + bool FrameReferencesAreValid( + rtc::ArrayView frames) const; + + private: + ScalableVideoController& structure_controller_; + FrameDependenciesCalculator frame_deps_calculator_; + ChainDiffCalculator chain_diff_calculator_; + int64_t frame_id_ = 0; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SCALABILITY_STRUCTURE_TEST_HELPERS_H_ diff --git a/modules/video_coding/svc/scalability_structure_unittest.cc b/modules/video_coding/svc/scalability_structure_unittest.cc new file mode 100644 index 0000000000..8bd933be5d --- /dev/null +++ b/modules/video_coding/svc/scalability_structure_unittest.cc @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "modules/video_coding/svc/create_scalability_structure.h" +#include "modules/video_coding/svc/scalability_structure_test_helpers.h" +#include "modules/video_coding/svc/scalable_video_controller.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::AllOf; +using ::testing::Contains; +using ::testing::Each; +using ::testing::Field; +using ::testing::Ge; +using ::testing::IsEmpty; +using ::testing::Le; +using ::testing::Lt; +using ::testing::Not; +using ::testing::SizeIs; +using ::testing::TestWithParam; +using ::testing::Values; + +struct SvcTestParam { + friend std::ostream& operator<<(std::ostream& os, const SvcTestParam& param) { + return os << param.name; + } + + std::string name; + int num_temporal_units; +}; + +class ScalabilityStructureTest : public TestWithParam {}; + +TEST_P(ScalabilityStructureTest, + NumberOfDecodeTargetsAndChainsAreInRangeAndConsistent) { + FrameDependencyStructure structure = + CreateScalabilityStructure(GetParam().name)->DependencyStructure(); + EXPECT_GT(structure.num_decode_targets, 0); + EXPECT_LE(structure.num_decode_targets, + DependencyDescriptor::kMaxDecodeTargets); + EXPECT_GE(structure.num_chains, 0); + EXPECT_LE(structure.num_chains, structure.num_decode_targets); + if (structure.num_chains == 0) { + EXPECT_THAT(structure.decode_target_protected_by_chain, IsEmpty()); + } else { + EXPECT_THAT(structure.decode_target_protected_by_chain, + AllOf(SizeIs(structure.num_decode_targets), Each(Ge(0)), + Each(Lt(structure.num_chains)))); + } + EXPECT_THAT(structure.templates, + SizeIs(Lt(size_t{DependencyDescriptor::kMaxTemplates}))); +} + +TEST_P(ScalabilityStructureTest, TemplatesAreSortedByLayerId) { + FrameDependencyStructure structure = + CreateScalabilityStructure(GetParam().name)->DependencyStructure(); + ASSERT_THAT(structure.templates, Not(IsEmpty())); + const auto& first_templates = structure.templates.front(); + EXPECT_EQ(first_templates.spatial_id, 0); + EXPECT_EQ(first_templates.temporal_id, 0); + for (size_t i = 1; i < structure.templates.size(); ++i) { + const auto& prev_template = structure.templates[i - 1]; + const auto& next_template = structure.templates[i]; + if (next_template.spatial_id == prev_template.spatial_id && + next_template.temporal_id == prev_template.temporal_id) { + // Same layer, next_layer_idc == 0 + } else if (next_template.spatial_id == prev_template.spatial_id && + next_template.temporal_id == prev_template.temporal_id + 1) { + // Next temporal layer, next_layer_idc == 1 + } else if (next_template.spatial_id == prev_template.spatial_id + 1 && + next_template.temporal_id == 0) { + // Next spatial layer, next_layer_idc == 2 + } else { + // everything else is invalid. + ADD_FAILURE() << "Invalid templates order. Template #" << i + << " with layer (" << next_template.spatial_id << "," + << next_template.temporal_id + << ") follows template with layer (" + << prev_template.spatial_id << "," + << prev_template.temporal_id << ")."; + } + } +} + +TEST_P(ScalabilityStructureTest, TemplatesMatchNumberOfDecodeTargetsAndChains) { + FrameDependencyStructure structure = + CreateScalabilityStructure(GetParam().name)->DependencyStructure(); + EXPECT_THAT( + structure.templates, + Each(AllOf(Field(&FrameDependencyTemplate::decode_target_indications, + SizeIs(structure.num_decode_targets)), + Field(&FrameDependencyTemplate::chain_diffs, + SizeIs(structure.num_chains))))); +} + +TEST_P(ScalabilityStructureTest, FrameInfoMatchesFrameDependencyStructure) { + std::unique_ptr svc_controller = + CreateScalabilityStructure(GetParam().name); + FrameDependencyStructure structure = svc_controller->DependencyStructure(); + std::vector frame_infos = + ScalabilityStructureWrapper(*svc_controller) + .GenerateFrames(GetParam().num_temporal_units); + for (size_t frame_id = 0; frame_id < frame_infos.size(); ++frame_id) { + const auto& frame = frame_infos[frame_id]; + EXPECT_GE(frame.spatial_id, 0) << " for frame " << frame_id; + EXPECT_GE(frame.temporal_id, 0) << " for frame " << frame_id; + EXPECT_THAT(frame.decode_target_indications, + SizeIs(structure.num_decode_targets)) + << " for frame " << frame_id; + EXPECT_THAT(frame.part_of_chain, SizeIs(structure.num_chains)) + << " for frame " << frame_id; + } +} + +TEST_P(ScalabilityStructureTest, ThereIsAPerfectTemplateForEachFrame) { + std::unique_ptr svc_controller = + CreateScalabilityStructure(GetParam().name); + FrameDependencyStructure structure = svc_controller->DependencyStructure(); + std::vector frame_infos = + ScalabilityStructureWrapper(*svc_controller) + .GenerateFrames(GetParam().num_temporal_units); + for (size_t frame_id = 0; frame_id < frame_infos.size(); ++frame_id) { + EXPECT_THAT(structure.templates, Contains(frame_infos[frame_id])) + << " for frame " << frame_id; + } +} + +TEST_P(ScalabilityStructureTest, FrameDependsOnSameOrLowerLayer) { + std::unique_ptr svc_controller = + CreateScalabilityStructure(GetParam().name); + std::vector frame_infos = + ScalabilityStructureWrapper(*svc_controller) + .GenerateFrames(GetParam().num_temporal_units); + int64_t num_frames = frame_infos.size(); + + for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) { + const auto& frame = frame_infos[frame_id]; + for (int frame_diff : frame.frame_diffs) { + int64_t base_frame_id = frame_id - frame_diff; + const auto& base_frame = frame_infos[base_frame_id]; + EXPECT_GE(frame.spatial_id, base_frame.spatial_id) + << "Frame " << frame_id << " depends on frame " << base_frame_id; + EXPECT_GE(frame.temporal_id, base_frame.temporal_id) + << "Frame " << frame_id << " depends on frame " << base_frame_id; + } + } +} + +TEST_P(ScalabilityStructureTest, NoFrameDependsOnDiscardableOrNotPresent) { + std::unique_ptr svc_controller = + CreateScalabilityStructure(GetParam().name); + std::vector frame_infos = + ScalabilityStructureWrapper(*svc_controller) + .GenerateFrames(GetParam().num_temporal_units); + int64_t num_frames = frame_infos.size(); + FrameDependencyStructure structure = svc_controller->DependencyStructure(); + + for (int dt = 0; dt < structure.num_decode_targets; ++dt) { + for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) { + const auto& frame = frame_infos[frame_id]; + if (frame.decode_target_indications[dt] == + DecodeTargetIndication::kNotPresent) { + continue; + } + for (int frame_diff : frame.frame_diffs) { + int64_t base_frame_id = frame_id - frame_diff; + const auto& base_frame = frame_infos[base_frame_id]; + EXPECT_NE(base_frame.decode_target_indications[dt], + DecodeTargetIndication::kNotPresent) + << "Frame " << frame_id << " depends on frame " << base_frame_id + << " that is not part of decode target#" << dt; + EXPECT_NE(base_frame.decode_target_indications[dt], + DecodeTargetIndication::kDiscardable) + << "Frame " << frame_id << " depends on frame " << base_frame_id + << " that is discardable for decode target#" << dt; + } + } + } +} + +TEST_P(ScalabilityStructureTest, NoFrameDependsThroughSwitchIndication) { + std::unique_ptr svc_controller = + CreateScalabilityStructure(GetParam().name); + FrameDependencyStructure structure = svc_controller->DependencyStructure(); + std::vector frame_infos = + ScalabilityStructureWrapper(*svc_controller) + .GenerateFrames(GetParam().num_temporal_units); + int64_t num_frames = frame_infos.size(); + std::vector> full_deps(num_frames); + + // For each frame calculate set of all frames it depends on, both directly and + // indirectly. + for (int64_t frame_id = 0; frame_id < num_frames; ++frame_id) { + std::set all_base_frames; + for (int frame_diff : frame_infos[frame_id].frame_diffs) { + int64_t base_frame_id = frame_id - frame_diff; + all_base_frames.insert(base_frame_id); + const auto& indirect = full_deps[base_frame_id]; + all_base_frames.insert(indirect.begin(), indirect.end()); + } + full_deps[frame_id] = std::move(all_base_frames); + } + + // Now check the switch indication: frames after the switch indication mustn't + // depend on any addition frames before the switch indications. + for (int dt = 0; dt < structure.num_decode_targets; ++dt) { + for (int64_t switch_frame_id = 0; switch_frame_id < num_frames; + ++switch_frame_id) { + if (frame_infos[switch_frame_id].decode_target_indications[dt] != + DecodeTargetIndication::kSwitch) { + continue; + } + for (int64_t later_frame_id = switch_frame_id + 1; + later_frame_id < num_frames; ++later_frame_id) { + if (frame_infos[later_frame_id].decode_target_indications[dt] == + DecodeTargetIndication::kNotPresent) { + continue; + } + for (int frame_diff : frame_infos[later_frame_id].frame_diffs) { + int64_t early_frame_id = later_frame_id - frame_diff; + if (early_frame_id < switch_frame_id) { + EXPECT_THAT(full_deps[switch_frame_id], Contains(early_frame_id)) + << "For decode target #" << dt << " frame " << later_frame_id + << " depends on the frame " << early_frame_id + << " that switch indication frame " << switch_frame_id + << " doesn't directly on indirectly depend on."; + } + } + } + } + } +} + +TEST_P(ScalabilityStructureTest, ProduceNoFrameForDisabledLayers) { + std::unique_ptr svc_controller = + CreateScalabilityStructure(GetParam().name); + ScalableVideoController::StreamLayersConfig structure = + svc_controller->StreamConfig(); + + VideoBitrateAllocation all_bitrates; + for (int sid = 0; sid < structure.num_spatial_layers; ++sid) { + for (int tid = 0; tid < structure.num_temporal_layers; ++tid) { + all_bitrates.SetBitrate(sid, tid, 100'000); + } + } + + svc_controller->OnRatesUpdated(all_bitrates); + ScalabilityStructureWrapper wrapper(*svc_controller); + std::vector frames = + wrapper.GenerateFrames(GetParam().num_temporal_units); + + for (int sid = 0; sid < structure.num_spatial_layers; ++sid) { + for (int tid = 0; tid < structure.num_temporal_layers; ++tid) { + // When all layers were enabled, expect there was a frame for each layer. + EXPECT_THAT(frames, + Contains(AllOf(Field(&GenericFrameInfo::spatial_id, sid), + Field(&GenericFrameInfo::temporal_id, tid)))) + << "For layer (" << sid << "," << tid << ")"; + // Restore bitrates for all layers before disabling single layer. + VideoBitrateAllocation bitrates = all_bitrates; + bitrates.SetBitrate(sid, tid, 0); + svc_controller->OnRatesUpdated(bitrates); + // With layer (sid, tid) disabled, expect no frames are produced for it. + EXPECT_THAT( + wrapper.GenerateFrames(GetParam().num_temporal_units), + Not(Contains(AllOf(Field(&GenericFrameInfo::spatial_id, sid), + Field(&GenericFrameInfo::temporal_id, tid))))) + << "For layer (" << sid << "," << tid << ")"; + } + } +} + +INSTANTIATE_TEST_SUITE_P( + Svc, + ScalabilityStructureTest, + Values(SvcTestParam{"NONE", /*num_temporal_units=*/3}, + SvcTestParam{"L1T2", /*num_temporal_units=*/4}, + SvcTestParam{"L1T3", /*num_temporal_units=*/8}, + SvcTestParam{"L2T1", /*num_temporal_units=*/3}, + SvcTestParam{"L2T1_KEY", /*num_temporal_units=*/3}, + SvcTestParam{"L3T1", /*num_temporal_units=*/3}, + SvcTestParam{"L3T3", /*num_temporal_units=*/8}, + SvcTestParam{"S2T1", /*num_temporal_units=*/3}, + SvcTestParam{"S3T3", /*num_temporal_units=*/8}, + SvcTestParam{"L2T2", /*num_temporal_units=*/4}, + SvcTestParam{"L2T2_KEY", /*num_temporal_units=*/4}, + SvcTestParam{"L2T2_KEY_SHIFT", /*num_temporal_units=*/4}, + SvcTestParam{"L2T3_KEY", /*num_temporal_units=*/8}, + SvcTestParam{"L3T3_KEY", /*num_temporal_units=*/8}), + [](const testing::TestParamInfo& info) { + return info.param.name; + }); + +} // namespace +} // namespace webrtc diff --git a/modules/video_coding/svc/scalable_video_controller.h b/modules/video_coding/svc/scalable_video_controller.h new file mode 100644 index 0000000000..d2d8486863 --- /dev/null +++ b/modules/video_coding/svc/scalable_video_controller.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_ + +#include + +#include "absl/container/inlined_vector.h" +#include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" +#include "common_video/generic_frame_descriptor/generic_frame_info.h" + +namespace webrtc { + +// Controls how video should be encoded to be scalable. Outputs results as +// buffer usage configuration for encoder and enough details to communicate the +// scalability structure via dependency descriptor rtp header extension. +class ScalableVideoController { + public: + struct StreamLayersConfig { + int num_spatial_layers = 1; + int num_temporal_layers = 1; + // Spatial layers scaling. Frames with spatial_id = i expected to be encoded + // with original_resolution * scaling_factor_num[i] / scaling_factor_den[i]. + int scaling_factor_num[DependencyDescriptor::kMaxSpatialIds] = {1, 1, 1, 1}; + int scaling_factor_den[DependencyDescriptor::kMaxSpatialIds] = {1, 1, 1, 1}; + }; + class LayerFrameConfig { + public: + // Builders/setters. + LayerFrameConfig& Id(int value); + LayerFrameConfig& Keyframe(); + LayerFrameConfig& S(int value); + LayerFrameConfig& T(int value); + LayerFrameConfig& Reference(int buffer_id); + LayerFrameConfig& Update(int buffer_id); + LayerFrameConfig& ReferenceAndUpdate(int buffer_id); + + // Getters. + int Id() const { return id_; } + bool IsKeyframe() const { return is_keyframe_; } + int SpatialId() const { return spatial_id_; } + int TemporalId() const { return temporal_id_; } + const absl::InlinedVector& Buffers() + const { + return buffers_; + } + + private: + // Id to match configuration returned by NextFrameConfig with + // (possibly modified) configuration passed back via OnEncoderDone. + // The meaning of the id is an implementation detail of + // the ScalableVideoController. + int id_ = 0; + + // Indication frame should be encoded as a key frame. In particular when + // `is_keyframe=true` property `CodecBufferUsage::referenced` should be + // ignored and treated as false. + bool is_keyframe_ = false; + + int spatial_id_ = 0; + int temporal_id_ = 0; + // Describes how encoder which buffers encoder allowed to reference and + // which buffers encoder should update. + absl::InlinedVector buffers_; + }; + + virtual ~ScalableVideoController() = default; + + // Returns video structure description for encoder to configure itself. + virtual StreamLayersConfig StreamConfig() const = 0; + + // Returns video structure description in format compatible with + // dependency descriptor rtp header extension. + virtual FrameDependencyStructure DependencyStructure() const = 0; + + // Notifies Controller with updated bitrates per layer. In particular notifies + // when certain layers should be disabled. + // Controller shouldn't produce LayerFrameConfig for disabled layers. + virtual void OnRatesUpdated(const VideoBitrateAllocation& bitrates) = 0; + + // When `restart` is true, first `LayerFrameConfig` should have `is_keyframe` + // set to true. + // Returned vector shouldn't be empty. + virtual std::vector NextFrameConfig(bool restart) = 0; + + // Returns configuration to pass to EncoderCallback. + virtual GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) = 0; +}; + +// Below are implementation details. +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::Id(int value) { + id_ = value; + return *this; +} +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::Keyframe() { + is_keyframe_ = true; + return *this; +} +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::S(int value) { + spatial_id_ = value; + return *this; +} +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::T(int value) { + temporal_id_ = value; + return *this; +} +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::Reference(int buffer_id) { + buffers_.emplace_back(buffer_id, /*referenced=*/true, /*updated=*/false); + return *this; +} +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::Update(int buffer_id) { + buffers_.emplace_back(buffer_id, /*referenced=*/false, /*updated=*/true); + return *this; +} +inline ScalableVideoController::LayerFrameConfig& +ScalableVideoController::LayerFrameConfig::ReferenceAndUpdate(int buffer_id) { + buffers_.emplace_back(buffer_id, /*referenced=*/true, /*updated=*/true); + return *this; +} + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_H_ diff --git a/modules/video_coding/codecs/av1/scalable_video_controller_no_layering.cc b/modules/video_coding/svc/scalable_video_controller_no_layering.cc similarity index 51% rename from modules/video_coding/codecs/av1/scalable_video_controller_no_layering.cc rename to modules/video_coding/svc/scalable_video_controller_no_layering.cc index 6b63ca4328..3934e57804 100644 --- a/modules/video_coding/codecs/av1/scalable_video_controller_no_layering.cc +++ b/modules/video_coding/svc/scalable_video_controller_no_layering.cc @@ -7,7 +7,7 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/codecs/av1/scalable_video_controller_no_layering.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" #include #include @@ -32,38 +32,56 @@ FrameDependencyStructure ScalableVideoControllerNoLayering::DependencyStructure() const { FrameDependencyStructure structure; structure.num_decode_targets = 1; - FrameDependencyTemplate a_template; - a_template.decode_target_indications = {DecodeTargetIndication::kSwitch}; - structure.templates.push_back(a_template); + structure.num_chains = 1; + structure.decode_target_protected_by_chain = {0}; + + FrameDependencyTemplate key_frame; + key_frame.decode_target_indications = {DecodeTargetIndication::kSwitch}; + key_frame.chain_diffs = {0}; + structure.templates.push_back(key_frame); + + FrameDependencyTemplate delta_frame; + delta_frame.decode_target_indications = {DecodeTargetIndication::kSwitch}; + delta_frame.chain_diffs = {1}; + delta_frame.frame_diffs = {1}; + structure.templates.push_back(delta_frame); + return structure; } std::vector ScalableVideoControllerNoLayering::NextFrameConfig(bool restart) { - if (restart) { - start_ = true; + if (!enabled_) { + return {}; } std::vector result(1); - result[0].id = 0; - result[0].is_keyframe = start_; - result[0].buffers = {{/*id=*/0, /*references=*/!start_, /*updates=*/true}}; - + if (restart || start_) { + result[0].Id(0).Keyframe().Update(0); + } else { + result[0].Id(0).ReferenceAndUpdate(0); + } start_ = false; return result; } -absl::optional -ScalableVideoControllerNoLayering::OnEncodeDone(LayerFrameConfig config) { - RTC_DCHECK_EQ(config.id, 0); - absl::optional frame_info(absl::in_place); - frame_info->encoder_buffers = std::move(config.buffers); - if (config.is_keyframe) { - for (auto& buffer : frame_info->encoder_buffers) { +GenericFrameInfo ScalableVideoControllerNoLayering::OnEncodeDone( + const LayerFrameConfig& config) { + RTC_DCHECK_EQ(config.Id(), 0); + GenericFrameInfo frame_info; + frame_info.encoder_buffers = config.Buffers(); + if (config.IsKeyframe()) { + for (auto& buffer : frame_info.encoder_buffers) { buffer.referenced = false; } } - frame_info->decode_target_indications = {DecodeTargetIndication::kSwitch}; + frame_info.decode_target_indications = {DecodeTargetIndication::kSwitch}; + frame_info.part_of_chain = {true}; return frame_info; } +void ScalableVideoControllerNoLayering::OnRatesUpdated( + const VideoBitrateAllocation& bitrates) { + enabled_ = bitrates.GetBitrate(0, 0) > 0; +} + } // namespace webrtc diff --git a/modules/video_coding/codecs/av1/scalable_video_controller_no_layering.h b/modules/video_coding/svc/scalable_video_controller_no_layering.h similarity index 65% rename from modules/video_coding/codecs/av1/scalable_video_controller_no_layering.h rename to modules/video_coding/svc/scalable_video_controller_no_layering.h index ad730989af..6d66b61c8b 100644 --- a/modules/video_coding/codecs/av1/scalable_video_controller_no_layering.h +++ b/modules/video_coding/svc/scalable_video_controller_no_layering.h @@ -7,14 +7,15 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ -#ifndef MODULES_VIDEO_CODING_CODECS_AV1_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_ -#define MODULES_VIDEO_CODING_CODECS_AV1_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_ +#ifndef MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_ +#define MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_ #include #include "api/transport/rtp/dependency_descriptor.h" +#include "api/video/video_bitrate_allocation.h" #include "common_video/generic_frame_descriptor/generic_frame_info.h" -#include "modules/video_coding/codecs/av1/scalable_video_controller.h" +#include "modules/video_coding/svc/scalable_video_controller.h" namespace webrtc { @@ -26,13 +27,14 @@ class ScalableVideoControllerNoLayering : public ScalableVideoController { FrameDependencyStructure DependencyStructure() const override; std::vector NextFrameConfig(bool restart) override; - absl::optional OnEncodeDone( - LayerFrameConfig config) override; + GenericFrameInfo OnEncodeDone(const LayerFrameConfig& config) override; + void OnRatesUpdated(const VideoBitrateAllocation& bitrates) override; private: bool start_ = true; + bool enabled_ = true; }; } // namespace webrtc -#endif // MODULES_VIDEO_CODING_CODECS_AV1_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_ +#endif // MODULES_VIDEO_CODING_SVC_SCALABLE_VIDEO_CONTROLLER_NO_LAYERING_H_ diff --git a/modules/video_coding/codecs/vp9/svc_rate_allocator.cc b/modules/video_coding/svc/svc_rate_allocator.cc similarity index 81% rename from modules/video_coding/codecs/vp9/svc_rate_allocator.cc rename to modules/video_coding/svc/svc_rate_allocator.cc index cc9a0d8997..a51bdb05dd 100644 --- a/modules/video_coding/codecs/vp9/svc_rate_allocator.cc +++ b/modules/video_coding/svc/svc_rate_allocator.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/codecs/vp9/svc_rate_allocator.h" +#include "modules/video_coding/svc/svc_rate_allocator.h" #include #include @@ -17,40 +17,38 @@ #include #include "absl/container/inlined_vector.h" +#include "modules/video_coding/svc/create_scalability_structure.h" #include "rtc_base/checks.h" namespace webrtc { namespace { -const float kSpatialLayeringRateScalingFactor = 0.55f; -const float kTemporalLayeringRateScalingFactor = 0.55f; +constexpr float kSpatialLayeringRateScalingFactor = 0.55f; +constexpr float kTemporalLayeringRateScalingFactor = 0.55f; -// Returns numberOfSpatialLayers if no layers are active. -size_t GetFirstActiveLayer(const VideoCodec& codec) { - RTC_DCHECK_EQ(codec.codecType, kVideoCodecVP9); - RTC_DCHECK_GT(codec.VP9().numberOfSpatialLayers, 0u); - size_t layer = 0; - for (; layer < codec.VP9().numberOfSpatialLayers; ++layer) { - if (codec.spatialLayers[layer].active) { +struct ActiveSpatialLayers { + size_t first = 0; + size_t num = 0; +}; + +ActiveSpatialLayers GetActiveSpatialLayers(const VideoCodec& codec, + size_t num_spatial_layers) { + ActiveSpatialLayers active; + for (active.first = 0; active.first < num_spatial_layers; ++active.first) { + if (codec.spatialLayers[active.first].active) { break; } } - return layer; -} -static size_t GetNumActiveSpatialLayers(const VideoCodec& codec) { - RTC_DCHECK_EQ(codec.codecType, kVideoCodecVP9); - RTC_DCHECK_GT(codec.VP9().numberOfSpatialLayers, 0u); - - const size_t first_active_layer = GetFirstActiveLayer(codec); - size_t last_active_layer = first_active_layer; - for (; last_active_layer < codec.VP9().numberOfSpatialLayers; - ++last_active_layer) { + size_t last_active_layer = active.first; + for (; last_active_layer < num_spatial_layers; ++last_active_layer) { if (!codec.spatialLayers[last_active_layer].active) { break; } } - return last_active_layer - first_active_layer; + active.num = last_active_layer - active.first; + + return active; } std::vector AdjustAndVerify( @@ -140,7 +138,8 @@ DataRate FindLayerTogglingThreshold(const VideoCodec& codec, } } upper_bound += DataRate::KilobitsPerSec( - codec.spatialLayers[num_active_layers - 1].minBitrate); + codec.spatialLayers[first_active_layer + num_active_layers - 1] + .minBitrate); // Do a binary search until upper and lower bound is the highest bitrate for // |num_active_layers| - 1 layers and lowest bitrate for |num_active_layers| @@ -172,16 +171,39 @@ DataRate FindLayerTogglingThreshold(const VideoCodec& codec, } // namespace +SvcRateAllocator::NumLayers SvcRateAllocator::GetNumLayers( + const VideoCodec& codec) { + NumLayers layers; + if (!codec.ScalabilityMode().empty()) { + if (auto structure = CreateScalabilityStructure(codec.ScalabilityMode())) { + ScalableVideoController::StreamLayersConfig config = + structure->StreamConfig(); + layers.spatial = config.num_spatial_layers; + layers.temporal = config.num_temporal_layers; + return layers; + } + } + if (codec.codecType == kVideoCodecVP9) { + layers.spatial = codec.VP9().numberOfSpatialLayers; + layers.temporal = codec.VP9().numberOfTemporalLayers; + return layers; + } + layers.spatial = 1; + layers.temporal = 1; + return layers; +} + SvcRateAllocator::SvcRateAllocator(const VideoCodec& codec) : codec_(codec), + num_layers_(GetNumLayers(codec)), experiment_settings_(StableTargetRateExperiment::ParseFromFieldTrials()), cumulative_layer_start_bitrates_(GetLayerStartBitrates(codec)), last_active_layer_count_(0) { - RTC_DCHECK_EQ(codec.codecType, kVideoCodecVP9); - RTC_DCHECK_GT(codec.VP9().numberOfSpatialLayers, 0u); - RTC_DCHECK_GT(codec.VP9().numberOfTemporalLayers, 0u); - for (size_t layer_idx = 0; layer_idx < codec.VP9().numberOfSpatialLayers; - ++layer_idx) { + RTC_DCHECK_GT(num_layers_.spatial, 0); + RTC_DCHECK_LE(num_layers_.spatial, kMaxSpatialLayers); + RTC_DCHECK_GT(num_layers_.temporal, 0); + RTC_DCHECK_LE(num_layers_.temporal, 3); + for (size_t layer_idx = 0; layer_idx < num_layers_.spatial; ++layer_idx) { // Verify min <= target <= max. if (codec.spatialLayers[layer_idx].active) { RTC_DCHECK_GT(codec.spatialLayers[layer_idx].maxBitrate, 0); @@ -204,16 +226,16 @@ VideoBitrateAllocation SvcRateAllocator::Allocate( } if (codec_.spatialLayers[0].targetBitrate == 0) { - // Delegate rate distribution to VP9 encoder wrapper if bitrate thresholds + // Delegate rate distribution to encoder wrapper if bitrate thresholds // are not set. VideoBitrateAllocation bitrate_allocation; bitrate_allocation.SetBitrate(0, 0, total_bitrate.bps()); return bitrate_allocation; } - const size_t first_active_layer = GetFirstActiveLayer(codec_); - const size_t num_active_layers = GetNumActiveSpatialLayers(codec_); - size_t num_spatial_layers = num_active_layers; + const ActiveSpatialLayers active_layers = + GetActiveSpatialLayers(codec_, num_layers_.spatial); + size_t num_spatial_layers = active_layers.num; if (num_spatial_layers == 0) { return VideoBitrateAllocation(); // All layers are deactivated. @@ -248,13 +270,13 @@ VideoBitrateAllocation SvcRateAllocator::Allocate( VideoBitrateAllocation allocation; if (codec_.mode == VideoCodecMode::kRealtimeVideo) { - allocation = GetAllocationNormalVideo(total_bitrate, first_active_layer, + allocation = GetAllocationNormalVideo(total_bitrate, active_layers.first, num_spatial_layers); } else { - allocation = GetAllocationScreenSharing(total_bitrate, first_active_layer, + allocation = GetAllocationScreenSharing(total_bitrate, active_layers.first, num_spatial_layers); } - allocation.set_bw_limited(num_spatial_layers < num_active_layers); + allocation.set_bw_limited(num_spatial_layers < active_layers.num); return allocation; } @@ -278,25 +300,24 @@ VideoBitrateAllocation SvcRateAllocator::GetAllocationNormalVideo( VideoBitrateAllocation bitrate_allocation; - const size_t num_temporal_layers = codec_.VP9().numberOfTemporalLayers; for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) { std::vector temporal_layer_rates = - SplitBitrate(num_temporal_layers, spatial_layer_rates[sl_idx], + SplitBitrate(num_layers_.temporal, spatial_layer_rates[sl_idx], kTemporalLayeringRateScalingFactor); // Distribute rate across temporal layers. Allocate more bits to lower // layers since they are used for prediction of higher layers and their // references are far apart. - if (num_temporal_layers == 1) { + if (num_layers_.temporal == 1) { bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0, temporal_layer_rates[0].bps()); - } else if (num_temporal_layers == 2) { + } else if (num_layers_.temporal == 2) { bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 0, temporal_layer_rates[1].bps()); bitrate_allocation.SetBitrate(sl_idx + first_active_layer, 1, temporal_layer_rates[0].bps()); } else { - RTC_CHECK_EQ(num_temporal_layers, 3); + RTC_CHECK_EQ(num_layers_.temporal, 3); // In case of three temporal layers the high layer has two frames and the // middle layer has one frame within GOP (in between two consecutive low // layer frames). Thus high layer requires more bits (comparing pure @@ -382,13 +403,14 @@ size_t SvcRateAllocator::FindNumEnabledLayers(DataRate target_rate) const { } DataRate SvcRateAllocator::GetMaxBitrate(const VideoCodec& codec) { - const size_t first_active_layer = GetFirstActiveLayer(codec); - const size_t num_spatial_layers = GetNumActiveSpatialLayers(codec); + const NumLayers num_layers = GetNumLayers(codec); + const ActiveSpatialLayers active_layers = + GetActiveSpatialLayers(codec, num_layers.spatial); DataRate max_bitrate = DataRate::Zero(); - for (size_t sl_idx = 0; sl_idx < num_spatial_layers; ++sl_idx) { + for (size_t sl_idx = 0; sl_idx < active_layers.num; ++sl_idx) { max_bitrate += DataRate::KilobitsPerSec( - codec.spatialLayers[first_active_layer + sl_idx].maxBitrate); + codec.spatialLayers[active_layers.first + sl_idx].maxBitrate); } if (codec.maxBitrate != 0) { @@ -411,12 +433,13 @@ DataRate SvcRateAllocator::GetPaddingBitrate(const VideoCodec& codec) { absl::InlinedVector SvcRateAllocator::GetLayerStartBitrates(const VideoCodec& codec) { absl::InlinedVector start_bitrates; - const size_t first_active_layer = GetFirstActiveLayer(codec); - const size_t num_layers = GetNumActiveSpatialLayers(codec); + const NumLayers num_layers = GetNumLayers(codec); + const ActiveSpatialLayers active_layers = + GetActiveSpatialLayers(codec, num_layers.spatial); DataRate last_rate = DataRate::Zero(); - for (size_t i = 1; i <= num_layers; ++i) { + for (size_t i = 1; i <= active_layers.num; ++i) { DataRate layer_toggling_rate = - FindLayerTogglingThreshold(codec, first_active_layer, i); + FindLayerTogglingThreshold(codec, active_layers.first, i); start_bitrates.push_back(layer_toggling_rate); RTC_DCHECK_LE(last_rate, layer_toggling_rate); last_rate = layer_toggling_rate; diff --git a/modules/video_coding/svc/svc_rate_allocator.h b/modules/video_coding/svc/svc_rate_allocator.h new file mode 100644 index 0000000000..bd75fca284 --- /dev/null +++ b/modules/video_coding/svc/svc_rate_allocator.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_ +#define MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_ + +#include +#include + +#include "absl/container/inlined_vector.h" +#include "api/video/video_bitrate_allocation.h" +#include "api/video/video_bitrate_allocator.h" +#include "api/video/video_codec_constants.h" +#include "api/video_codecs/video_codec.h" +#include "rtc_base/experiments/stable_target_rate_experiment.h" + +namespace webrtc { + +class SvcRateAllocator : public VideoBitrateAllocator { + public: + explicit SvcRateAllocator(const VideoCodec& codec); + + VideoBitrateAllocation Allocate( + VideoBitrateAllocationParameters parameters) override; + + static DataRate GetMaxBitrate(const VideoCodec& codec); + static DataRate GetPaddingBitrate(const VideoCodec& codec); + static absl::InlinedVector GetLayerStartBitrates( + const VideoCodec& codec); + + private: + struct NumLayers { + size_t spatial = 1; + size_t temporal = 1; + }; + + static NumLayers GetNumLayers(const VideoCodec& codec); + VideoBitrateAllocation GetAllocationNormalVideo( + DataRate total_bitrate, + size_t first_active_layer, + size_t num_spatial_layers) const; + + VideoBitrateAllocation GetAllocationScreenSharing( + DataRate total_bitrate, + size_t first_active_layer, + size_t num_spatial_layers) const; + + // Returns the number of layers that are active and have enough bitrate to + // actually be enabled. + size_t FindNumEnabledLayers(DataRate target_rate) const; + + const VideoCodec codec_; + const NumLayers num_layers_; + const StableTargetRateExperiment experiment_settings_; + const absl::InlinedVector + cumulative_layer_start_bitrates_; + size_t last_active_layer_count_; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_SVC_SVC_RATE_ALLOCATOR_H_ diff --git a/modules/video_coding/codecs/vp9/svc_rate_allocator_unittest.cc b/modules/video_coding/svc/svc_rate_allocator_unittest.cc similarity index 85% rename from modules/video_coding/codecs/vp9/svc_rate_allocator_unittest.cc rename to modules/video_coding/svc/svc_rate_allocator_unittest.cc index daa0c52e09..fd22acd85d 100644 --- a/modules/video_coding/codecs/vp9/svc_rate_allocator_unittest.cc +++ b/modules/video_coding/svc/svc_rate_allocator_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/video_coding/codecs/vp9/svc_rate_allocator.h" +#include "modules/video_coding/svc/svc_rate_allocator.h" #include #include @@ -270,6 +270,97 @@ TEST(SvcRateAllocatorTest, FindLayerTogglingThreshold) { EXPECT_EQ(layer_start_bitrates[2], kThreeLayerMinRate); } +TEST(SvcRateAllocatorTest, SupportsAv1) { + VideoCodec codec; + codec.width = 640; + codec.height = 360; + codec.codecType = kVideoCodecAV1; + codec.SetScalabilityMode("L3T3"); + codec.spatialLayers[0].active = true; + codec.spatialLayers[0].minBitrate = 30; + codec.spatialLayers[0].targetBitrate = 51; + codec.spatialLayers[0].maxBitrate = 73; + codec.spatialLayers[1].active = true; + codec.spatialLayers[1].minBitrate = 49; + codec.spatialLayers[1].targetBitrate = 64; + codec.spatialLayers[1].maxBitrate = 97; + codec.spatialLayers[2].active = true; + codec.spatialLayers[2].minBitrate = 193; + codec.spatialLayers[2].targetBitrate = 305; + codec.spatialLayers[2].maxBitrate = 418; + + SvcRateAllocator allocator(codec); + + VideoBitrateAllocation allocation = + allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30)); + + EXPECT_GT(allocation.GetSpatialLayerSum(0), 0u); + EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u); + EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u); +} + +TEST(SvcRateAllocatorTest, SupportsAv1WithSkippedLayer) { + VideoCodec codec; + codec.width = 640; + codec.height = 360; + codec.codecType = kVideoCodecAV1; + codec.SetScalabilityMode("L3T3"); + codec.spatialLayers[0].active = false; + codec.spatialLayers[0].minBitrate = 30; + codec.spatialLayers[0].targetBitrate = 51; + codec.spatialLayers[0].maxBitrate = 73; + codec.spatialLayers[1].active = true; + codec.spatialLayers[1].minBitrate = 49; + codec.spatialLayers[1].targetBitrate = 64; + codec.spatialLayers[1].maxBitrate = 97; + codec.spatialLayers[2].active = true; + codec.spatialLayers[2].minBitrate = 193; + codec.spatialLayers[2].targetBitrate = 305; + codec.spatialLayers[2].maxBitrate = 418; + + SvcRateAllocator allocator(codec); + + VideoBitrateAllocation allocation = + allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30)); + + EXPECT_EQ(allocation.GetSpatialLayerSum(0), 0u); + EXPECT_GT(allocation.GetSpatialLayerSum(1), 0u); + EXPECT_GT(allocation.GetSpatialLayerSum(2), 0u); +} + +TEST(SvcRateAllocatorTest, UsesScalabilityModeToGetNumberOfLayers) { + VideoCodec codec; + codec.width = 640; + codec.height = 360; + codec.codecType = kVideoCodecAV1; + codec.SetScalabilityMode("L2T2"); + codec.spatialLayers[0].active = true; + codec.spatialLayers[0].minBitrate = 30; + codec.spatialLayers[0].targetBitrate = 51; + codec.spatialLayers[0].maxBitrate = 73; + codec.spatialLayers[1].active = true; + codec.spatialLayers[1].minBitrate = 49; + codec.spatialLayers[1].targetBitrate = 64; + codec.spatialLayers[1].maxBitrate = 97; + codec.spatialLayers[2].active = true; + codec.spatialLayers[2].minBitrate = 193; + codec.spatialLayers[2].targetBitrate = 305; + codec.spatialLayers[2].maxBitrate = 418; + + SvcRateAllocator allocator(codec); + VideoBitrateAllocation allocation = + allocator.Allocate(VideoBitrateAllocationParameters(1'000'000, 30)); + + // Expect bitrates for 2 temporal layers. + EXPECT_TRUE(allocation.HasBitrate(1, /*temporal_index=*/0)); + EXPECT_TRUE(allocation.HasBitrate(1, /*temporal_index=*/1)); + EXPECT_FALSE(allocation.HasBitrate(1, /*temporal_index=*/2)); + + // expect codec.spatialLayers[2].active is ignored because scability mode uses + // just 2 spatial layers. + EXPECT_EQ(allocation.GetSpatialLayerSum(2), 0u); +} + class SvcRateAllocatorTestParametrizedContentType : public ::testing::Test, public ::testing::WithParamInterface { diff --git a/modules/video_coding/timestamp_map.cc b/modules/video_coding/timestamp_map.cc index d93293704d..f6fb81815a 100644 --- a/modules/video_coding/timestamp_map.cc +++ b/modules/video_coding/timestamp_map.cc @@ -24,7 +24,7 @@ VCMTimestampMap::VCMTimestampMap(size_t capacity) VCMTimestampMap::~VCMTimestampMap() {} -void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) { +void VCMTimestampMap::Add(uint32_t timestamp, const VCMFrameInformation& data) { ring_buffer_[next_add_idx_].timestamp = timestamp; ring_buffer_[next_add_idx_].data = data; next_add_idx_ = (next_add_idx_ + 1) % capacity_; @@ -35,18 +35,18 @@ void VCMTimestampMap::Add(uint32_t timestamp, VCMFrameInformation* data) { } } -VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) { +absl::optional VCMTimestampMap::Pop(uint32_t timestamp) { while (!IsEmpty()) { if (ring_buffer_[next_pop_idx_].timestamp == timestamp) { // Found start time for this timestamp. - VCMFrameInformation* data = ring_buffer_[next_pop_idx_].data; - ring_buffer_[next_pop_idx_].data = nullptr; + const VCMFrameInformation& data = ring_buffer_[next_pop_idx_].data; + ring_buffer_[next_pop_idx_].timestamp = 0; next_pop_idx_ = (next_pop_idx_ + 1) % capacity_; return data; } else if (IsNewerTimestamp(ring_buffer_[next_pop_idx_].timestamp, timestamp)) { // The timestamp we are looking for is not in the list. - return nullptr; + return absl::nullopt; } // Not in this position, check next (and forget this position). @@ -54,10 +54,26 @@ VCMFrameInformation* VCMTimestampMap::Pop(uint32_t timestamp) { } // Could not find matching timestamp in list. - return nullptr; + return absl::nullopt; } bool VCMTimestampMap::IsEmpty() const { return (next_add_idx_ == next_pop_idx_); } + +size_t VCMTimestampMap::Size() const { + // The maximum number of elements in the list is |capacity_| - 1. The list is + // empty if the add and pop indices are equal. + return next_add_idx_ >= next_pop_idx_ + ? next_add_idx_ - next_pop_idx_ + : next_add_idx_ + capacity_ - next_pop_idx_; +} + +void VCMTimestampMap::Clear() { + while (!IsEmpty()) { + ring_buffer_[next_pop_idx_].timestamp = 0; + next_pop_idx_ = (next_pop_idx_ + 1) % capacity_; + } +} + } // namespace webrtc diff --git a/modules/video_coding/timestamp_map.h b/modules/video_coding/timestamp_map.h index c85666c9aa..dc20a0551c 100644 --- a/modules/video_coding/timestamp_map.h +++ b/modules/video_coding/timestamp_map.h @@ -13,22 +13,42 @@ #include +#include "absl/types/optional.h" +#include "api/rtp_packet_infos.h" +#include "api/units/timestamp.h" +#include "api/video/encoded_image.h" +#include "api/video/video_content_type.h" +#include "api/video/video_rotation.h" +#include "api/video/video_timing.h" + namespace webrtc { -struct VCMFrameInformation; +struct VCMFrameInformation { + int64_t renderTimeMs; + absl::optional decodeStart; + void* userData; + VideoRotation rotation; + VideoContentType content_type; + EncodedImage::Timing timing; + int64_t ntp_time_ms; + RtpPacketInfos packet_infos; + // ColorSpace is not stored here, as it might be modified by decoders. +}; class VCMTimestampMap { public: explicit VCMTimestampMap(size_t capacity); ~VCMTimestampMap(); - void Add(uint32_t timestamp, VCMFrameInformation* data); - VCMFrameInformation* Pop(uint32_t timestamp); + void Add(uint32_t timestamp, const VCMFrameInformation& data); + absl::optional Pop(uint32_t timestamp); + size_t Size() const; + void Clear(); private: struct TimestampDataTuple { uint32_t timestamp; - VCMFrameInformation* data; + VCMFrameInformation data; }; bool IsEmpty() const; diff --git a/modules/video_coding/timestamp_map_unittest.cc b/modules/video_coding/timestamp_map_unittest.cc new file mode 100644 index 0000000000..5e90786b95 --- /dev/null +++ b/modules/video_coding/timestamp_map_unittest.cc @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/timestamp_map.h" + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace video_coding { +namespace { +constexpr int kTimestampMapSize = 6; +constexpr int kTimestamp1 = 1; +constexpr int kTimestamp2 = 2; +constexpr int kNoExistingTimestamp3 = 3; +constexpr int kTimestamp4 = 4; +constexpr int kTimestamp5 = 5; +constexpr int kTimestamp6 = 6; +constexpr int kTimestamp7 = 7; +constexpr int64_t kRenderTime1 = 1000; +constexpr int64_t kRenderTime2 = 2000; +constexpr int64_t kRenderTime4 = 4000; +constexpr int64_t kRenderTime5 = 5000; +constexpr int64_t kRenderTime6 = 6000; +constexpr int64_t kRenderTime7 = 7000; +} // namespace + +class VcmTimestampMapTest : public ::testing::Test { + protected: + VcmTimestampMapTest() : _timestampMap(kTimestampMapSize) {} + + void SetUp() override { + _timestampMap.Add(kTimestamp1, VCMFrameInformation({kRenderTime1})); + _timestampMap.Add(kTimestamp2, VCMFrameInformation({kRenderTime2})); + _timestampMap.Add(kTimestamp4, VCMFrameInformation({kRenderTime4})); + } + + VCMTimestampMap _timestampMap; +}; + +TEST_F(VcmTimestampMapTest, PopExistingFrameInfo) { + EXPECT_EQ(_timestampMap.Size(), 3u); + auto frameInfo = _timestampMap.Pop(kTimestamp1); + ASSERT_TRUE(frameInfo); + EXPECT_EQ(frameInfo->renderTimeMs, kRenderTime1); + frameInfo = _timestampMap.Pop(kTimestamp2); + ASSERT_TRUE(frameInfo); + EXPECT_EQ(frameInfo->renderTimeMs, kRenderTime2); + frameInfo = _timestampMap.Pop(kTimestamp4); + ASSERT_TRUE(frameInfo); + EXPECT_EQ(frameInfo->renderTimeMs, kRenderTime4); +} + +TEST_F(VcmTimestampMapTest, PopNonexistingClearsOlderFrameInfos) { + auto frameInfo = _timestampMap.Pop(kNoExistingTimestamp3); + EXPECT_FALSE(frameInfo); + EXPECT_EQ(_timestampMap.Size(), 1u); +} + +TEST_F(VcmTimestampMapTest, SizeIsIncrementedWhenAddingNewFrameInfo) { + EXPECT_EQ(_timestampMap.Size(), 3u); + _timestampMap.Add(kTimestamp5, VCMFrameInformation({kRenderTime5})); + EXPECT_EQ(_timestampMap.Size(), 4u); + _timestampMap.Add(kTimestamp6, VCMFrameInformation({kRenderTime6})); + EXPECT_EQ(_timestampMap.Size(), 5u); +} + +TEST_F(VcmTimestampMapTest, SizeIsDecreasedWhenPoppingFrameInfo) { + EXPECT_EQ(_timestampMap.Size(), 3u); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp1)); + EXPECT_EQ(_timestampMap.Size(), 2u); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp2)); + EXPECT_EQ(_timestampMap.Size(), 1u); + EXPECT_FALSE(_timestampMap.Pop(kNoExistingTimestamp3)); + EXPECT_EQ(_timestampMap.Size(), 1u); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp4)); + EXPECT_EQ(_timestampMap.Size(), 0u); +} + +TEST_F(VcmTimestampMapTest, ClearEmptiesMap) { + EXPECT_EQ(_timestampMap.Size(), 3u); + _timestampMap.Clear(); + EXPECT_EQ(_timestampMap.Size(), 0u); + // Clear empty map does nothing. + _timestampMap.Clear(); + EXPECT_EQ(_timestampMap.Size(), 0u); +} + +TEST_F(VcmTimestampMapTest, PopLastAddedClearsMap) { + EXPECT_EQ(_timestampMap.Size(), 3u); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp4)); + EXPECT_EQ(_timestampMap.Size(), 0u); +} + +TEST_F(VcmTimestampMapTest, LastAddedIsDiscardedIfMapGetsFull) { + EXPECT_EQ(_timestampMap.Size(), 3u); + _timestampMap.Add(kTimestamp5, VCMFrameInformation({kRenderTime5})); + EXPECT_EQ(_timestampMap.Size(), 4u); + _timestampMap.Add(kTimestamp6, VCMFrameInformation({kRenderTime6})); + EXPECT_EQ(_timestampMap.Size(), 5u); + _timestampMap.Add(kTimestamp7, VCMFrameInformation({kRenderTime7})); + // Size is not incremented since the oldest element is discarded. + EXPECT_EQ(_timestampMap.Size(), 5u); + EXPECT_FALSE(_timestampMap.Pop(kTimestamp1)); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp2)); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp4)); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp5)); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp6)); + EXPECT_TRUE(_timestampMap.Pop(kTimestamp7)); + EXPECT_EQ(_timestampMap.Size(), 0u); +} + +} // namespace video_coding +} // namespace webrtc diff --git a/modules/video_coding/timing.cc b/modules/video_coding/timing.cc index c62c848c09..ea1b59cad7 100644 --- a/modules/video_coding/timing.cc +++ b/modules/video_coding/timing.cc @@ -14,16 +14,18 @@ #include +#include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/time/timestamp_extrapolator.h" #include "system_wrappers/include/clock.h" +#include "system_wrappers/include/field_trial.h" namespace webrtc { -VCMTiming::VCMTiming(Clock* clock, VCMTiming* master_timing) +VCMTiming::VCMTiming(Clock* clock) : clock_(clock), - master_(false), - ts_extrapolator_(), - codec_timer_(new VCMCodecTimer()), + ts_extrapolator_(std::make_unique( + clock_->TimeInMilliseconds())), + codec_timer_(std::make_unique()), render_delay_ms_(kDefaultRenderDelayMs), min_playout_delay_ms_(0), max_playout_delay_ms_(10000), @@ -31,25 +33,20 @@ VCMTiming::VCMTiming(Clock* clock, VCMTiming* master_timing) current_delay_ms_(0), prev_frame_timestamp_(0), timing_frame_info_(), - num_decoded_frames_(0) { - if (master_timing == NULL) { - master_ = true; - ts_extrapolator_ = new TimestampExtrapolator(clock_->TimeInMilliseconds()); - } else { - ts_extrapolator_ = master_timing->ts_extrapolator_; - } -} - -VCMTiming::~VCMTiming() { - if (master_) { - delete ts_extrapolator_; - } + num_decoded_frames_(0), + low_latency_renderer_enabled_("enabled", true), + zero_playout_delay_min_pacing_("min_pacing", TimeDelta::Millis(0)), + last_decode_scheduled_ts_(0) { + ParseFieldTrial({&low_latency_renderer_enabled_}, + field_trial::FindFullName("WebRTC-LowLatencyRenderer")); + ParseFieldTrial({&zero_playout_delay_min_pacing_}, + field_trial::FindFullName("WebRTC-ZeroPlayoutDelay")); } void VCMTiming::Reset() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ts_extrapolator_->Reset(clock_->TimeInMilliseconds()); - codec_timer_.reset(new VCMCodecTimer()); + codec_timer_ = std::make_unique(); render_delay_ms_ = kDefaultRenderDelayMs; min_playout_delay_ms_ = 0; jitter_delay_ms_ = 0; @@ -58,32 +55,32 @@ void VCMTiming::Reset() { } void VCMTiming::set_render_delay(int render_delay_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); render_delay_ms_ = render_delay_ms; } void VCMTiming::set_min_playout_delay(int min_playout_delay_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); min_playout_delay_ms_ = min_playout_delay_ms; } int VCMTiming::min_playout_delay() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return min_playout_delay_ms_; } void VCMTiming::set_max_playout_delay(int max_playout_delay_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); max_playout_delay_ms_ = max_playout_delay_ms; } int VCMTiming::max_playout_delay() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return max_playout_delay_ms_; } void VCMTiming::SetJitterDelay(int jitter_delay_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); if (jitter_delay_ms != jitter_delay_ms_) { jitter_delay_ms_ = jitter_delay_ms; // When in initial state, set current delay to minimum delay. @@ -94,7 +91,7 @@ void VCMTiming::SetJitterDelay(int jitter_delay_ms) { } void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); int target_delay_ms = TargetDelayInternal(); if (current_delay_ms_ == 0) { @@ -135,7 +132,7 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) { void VCMTiming::UpdateCurrentDelay(int64_t render_time_ms, int64_t actual_decode_time_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); uint32_t target_delay_ms = TargetDelayInternal(); int64_t delayed_ms = actual_decode_time_ms - @@ -158,29 +155,41 @@ void VCMTiming::StopDecodeTimer(uint32_t /*time_stamp*/, } void VCMTiming::StopDecodeTimer(int32_t decode_time_ms, int64_t now_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); codec_timer_->AddTiming(decode_time_ms, now_ms); - assert(decode_time_ms >= 0); + RTC_DCHECK_GE(decode_time_ms, 0); ++num_decoded_frames_; } void VCMTiming::IncomingTimestamp(uint32_t time_stamp, int64_t now_ms) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); ts_extrapolator_->Update(now_ms, time_stamp); } int64_t VCMTiming::RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return RenderTimeMsInternal(frame_timestamp, now_ms); } +void VCMTiming::SetLastDecodeScheduledTimestamp( + int64_t last_decode_scheduled_ts) { + MutexLock lock(&mutex_); + last_decode_scheduled_ts_ = last_decode_scheduled_ts; +} + int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const { - if (min_playout_delay_ms_ == 0 && max_playout_delay_ms_ == 0) { - // Render as soon as possible. + constexpr int kLowLatencyRendererMaxPlayoutDelayMs = 500; + if (min_playout_delay_ms_ == 0 && + (max_playout_delay_ms_ == 0 || + (low_latency_renderer_enabled_ && + max_playout_delay_ms_ <= kLowLatencyRendererMaxPlayoutDelayMs))) { + // Render as soon as possible or with low-latency renderer algorithm. return 0; } + // Note that TimestampExtrapolator::ExtrapolateLocalTime is not a const + // method; it mutates the object's wraparound state. int64_t estimated_complete_time_ms = ts_extrapolator_->ExtrapolateLocalTime(frame_timestamp); if (estimated_complete_time_ms == -1) { @@ -196,22 +205,37 @@ int64_t VCMTiming::RenderTimeMsInternal(uint32_t frame_timestamp, int VCMTiming::RequiredDecodeTimeMs() const { const int decode_time_ms = codec_timer_->RequiredDecodeTimeMs(); - assert(decode_time_ms >= 0); + RTC_DCHECK_GE(decode_time_ms, 0); return decode_time_ms; } int64_t VCMTiming::MaxWaitingTime(int64_t render_time_ms, - int64_t now_ms) const { - rtc::CritScope cs(&crit_sect_); - - const int64_t max_wait_time_ms = - render_time_ms - now_ms - RequiredDecodeTimeMs() - render_delay_ms_; - - return max_wait_time_ms; + int64_t now_ms, + bool too_many_frames_queued) const { + MutexLock lock(&mutex_); + + if (render_time_ms == 0 && zero_playout_delay_min_pacing_->us() > 0 && + min_playout_delay_ms_ == 0 && max_playout_delay_ms_ > 0) { + // |render_time_ms| == 0 indicates that the frame should be decoded and + // rendered as soon as possible. However, the decoder can be choked if too + // many frames are sent at once. Therefore, limit the interframe delay to + // |zero_playout_delay_min_pacing_| unless too many frames are queued in + // which case the frames are sent to the decoder at once. + if (too_many_frames_queued) { + return 0; + } + int64_t earliest_next_decode_start_time = + last_decode_scheduled_ts_ + zero_playout_delay_min_pacing_->ms(); + int64_t max_wait_time_ms = now_ms >= earliest_next_decode_start_time + ? 0 + : earliest_next_decode_start_time - now_ms; + return max_wait_time_ms; + } + return render_time_ms - now_ms - RequiredDecodeTimeMs() - render_delay_ms_; } int VCMTiming::TargetVideoDelay() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return TargetDelayInternal(); } @@ -226,7 +250,7 @@ bool VCMTiming::GetTimings(int* max_decode_ms, int* jitter_buffer_ms, int* min_playout_delay_ms, int* render_delay_ms) const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); *max_decode_ms = RequiredDecodeTimeMs(); *current_delay_ms = current_delay_ms_; *target_delay_ms = TargetDelayInternal(); @@ -237,13 +261,24 @@ bool VCMTiming::GetTimings(int* max_decode_ms, } void VCMTiming::SetTimingFrameInfo(const TimingFrameInfo& info) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); timing_frame_info_.emplace(info); } absl::optional VCMTiming::GetTimingFrameInfo() { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return timing_frame_info_; } +void VCMTiming::SetMaxCompositionDelayInFrames( + absl::optional max_composition_delay_in_frames) { + MutexLock lock(&mutex_); + max_composition_delay_in_frames_ = max_composition_delay_in_frames; +} + +absl::optional VCMTiming::MaxCompositionDelayInFrames() const { + MutexLock lock(&mutex_); + return max_composition_delay_in_frames_; +} + } // namespace webrtc diff --git a/modules/video_coding/timing.h b/modules/video_coding/timing.h index c9efcb13b0..7f891e4b9b 100644 --- a/modules/video_coding/timing.h +++ b/modules/video_coding/timing.h @@ -14,10 +14,13 @@ #include #include "absl/types/optional.h" +#include "api/units/time_delta.h" #include "api/video/video_timing.h" #include "modules/video_coding/codec_timer.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/experiments/field_trial_parser.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" +#include "rtc_base/time/timestamp_extrapolator.h" namespace webrtc { @@ -26,10 +29,8 @@ class TimestampExtrapolator; class VCMTiming { public: - // The primary timing component should be passed - // if this is the dual timing component. - explicit VCMTiming(Clock* clock, VCMTiming* master_timing = NULL); - virtual ~VCMTiming(); + explicit VCMTiming(Clock* clock); + virtual ~VCMTiming() = default; // Resets the timing to the initial state. void Reset(); @@ -81,8 +82,15 @@ class VCMTiming { virtual int64_t RenderTimeMs(uint32_t frame_timestamp, int64_t now_ms) const; // Returns the maximum time in ms that we can wait for a frame to become - // complete before we must pass it to the decoder. - virtual int64_t MaxWaitingTime(int64_t render_time_ms, int64_t now_ms) const; + // complete before we must pass it to the decoder. render_time_ms==0 indicates + // that the frames should be processed as quickly as possible, with possibly + // only a small delay added to make sure that the decoder is not overloaded. + // In this case, the parameter too_many_frames_queued is used to signal that + // the decode queue is full and that the frame should be decoded as soon as + // possible. + virtual int64_t MaxWaitingTime(int64_t render_time_ms, + int64_t now_ms, + bool too_many_frames_queued) const; // Returns the current target delay which is required delay + decode time + // render delay. @@ -100,34 +108,57 @@ class VCMTiming { void SetTimingFrameInfo(const TimingFrameInfo& info); absl::optional GetTimingFrameInfo(); + void SetMaxCompositionDelayInFrames( + absl::optional max_composition_delay_in_frames); + absl::optional MaxCompositionDelayInFrames() const; + + // Updates the last time a frame was scheduled for decoding. + void SetLastDecodeScheduledTimestamp(int64_t last_decode_scheduled_ts); + enum { kDefaultRenderDelayMs = 10 }; enum { kDelayMaxChangeMsPerS = 100 }; protected: - int RequiredDecodeTimeMs() const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + int RequiredDecodeTimeMs() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); int64_t RenderTimeMsInternal(uint32_t frame_timestamp, int64_t now_ms) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); - int TargetDelayInternal() const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + int TargetDelayInternal() const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); private: - rtc::CriticalSection crit_sect_; + mutable Mutex mutex_; Clock* const clock_; - bool master_ RTC_GUARDED_BY(crit_sect_); - TimestampExtrapolator* ts_extrapolator_ RTC_GUARDED_BY(crit_sect_); - std::unique_ptr codec_timer_ RTC_GUARDED_BY(crit_sect_); - int render_delay_ms_ RTC_GUARDED_BY(crit_sect_); + const std::unique_ptr ts_extrapolator_ + RTC_PT_GUARDED_BY(mutex_); + std::unique_ptr codec_timer_ RTC_GUARDED_BY(mutex_) + RTC_PT_GUARDED_BY(mutex_); + int render_delay_ms_ RTC_GUARDED_BY(mutex_); // Best-effort playout delay range for frames from capture to render. // The receiver tries to keep the delay between |min_playout_delay_ms_| // and |max_playout_delay_ms_| taking the network jitter into account. // A special case is where min_playout_delay_ms_ = max_playout_delay_ms_ = 0, // in which case the receiver tries to play the frames as they arrive. - int min_playout_delay_ms_ RTC_GUARDED_BY(crit_sect_); - int max_playout_delay_ms_ RTC_GUARDED_BY(crit_sect_); - int jitter_delay_ms_ RTC_GUARDED_BY(crit_sect_); - int current_delay_ms_ RTC_GUARDED_BY(crit_sect_); - uint32_t prev_frame_timestamp_ RTC_GUARDED_BY(crit_sect_); - absl::optional timing_frame_info_ RTC_GUARDED_BY(crit_sect_); - size_t num_decoded_frames_ RTC_GUARDED_BY(crit_sect_); + int min_playout_delay_ms_ RTC_GUARDED_BY(mutex_); + int max_playout_delay_ms_ RTC_GUARDED_BY(mutex_); + int jitter_delay_ms_ RTC_GUARDED_BY(mutex_); + int current_delay_ms_ RTC_GUARDED_BY(mutex_); + uint32_t prev_frame_timestamp_ RTC_GUARDED_BY(mutex_); + absl::optional timing_frame_info_ RTC_GUARDED_BY(mutex_); + size_t num_decoded_frames_ RTC_GUARDED_BY(mutex_); + // Set by the field trial WebRTC-LowLatencyRenderer. The parameter enabled + // determines if the low-latency renderer algorithm should be used for the + // case min playout delay=0 and max playout delay>0. + FieldTrialParameter low_latency_renderer_enabled_ + RTC_GUARDED_BY(mutex_); + absl::optional max_composition_delay_in_frames_ RTC_GUARDED_BY(mutex_); + // Set by the field trial WebRTC-ZeroPlayoutDelay. The parameter min_pacing + // determines the minimum delay between frames scheduled for decoding that is + // used when min playout delay=0 and max playout delay>=0. + FieldTrialParameter zero_playout_delay_min_pacing_ + RTC_GUARDED_BY(mutex_); + // Timestamp at which the last frame was scheduled to be sent to the decoder. + // Used only when the RTP header extension playout delay is set to min=0 ms + // which is indicated by a render time set to 0. + int64_t last_decode_scheduled_ts_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/modules/video_coding/timing_unittest.cc b/modules/video_coding/timing_unittest.cc index ee86605fb6..cc87a3b4e0 100644 --- a/modules/video_coding/timing_unittest.cc +++ b/modules/video_coding/timing_unittest.cc @@ -11,6 +11,7 @@ #include "modules/video_coding/timing.h" #include "system_wrappers/include/clock.h" +#include "test/field_trial.h" #include "test/gtest.h" namespace webrtc { @@ -18,7 +19,7 @@ namespace { const int kFps = 25; } // namespace -TEST(ReceiverTiming, Tests) { +TEST(ReceiverTimingTest, JitterDelay) { SimulatedClock clock(0); VCMTiming timing(&clock); timing.Reset(); @@ -35,7 +36,7 @@ TEST(ReceiverTiming, Tests) { timing.set_render_delay(0); uint32_t wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); // First update initializes the render time. Since we have no decode delay // we get wait_time_ms = renderTime - now - renderDelay = jitter. EXPECT_EQ(jitter_delay_ms, wait_time_ms); @@ -47,7 +48,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); // Since we gradually increase the delay we only get 100 ms every second. EXPECT_EQ(jitter_delay_ms - 10, wait_time_ms); @@ -56,7 +57,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); EXPECT_EQ(jitter_delay_ms, wait_time_ms); // Insert frames without jitter, verify that this gives the exact wait time. @@ -69,7 +70,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); EXPECT_EQ(jitter_delay_ms, wait_time_ms); // Add decode time estimates for 1 second. @@ -84,7 +85,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); EXPECT_EQ(jitter_delay_ms, wait_time_ms); const int kMinTotalDelayMs = 200; @@ -96,7 +97,7 @@ TEST(ReceiverTiming, Tests) { timing.set_render_delay(kRenderDelayMs); wait_time_ms = timing.MaxWaitingTime( timing.RenderTimeMs(timestamp, clock.TimeInMilliseconds()), - clock.TimeInMilliseconds()); + clock.TimeInMilliseconds(), /*too_many_frames_queued=*/false); // We should at least have kMinTotalDelayMs - decodeTime (10) - renderTime // (10) to wait. EXPECT_EQ(kMinTotalDelayMs - kDecodeTimeMs - kRenderDelayMs, wait_time_ms); @@ -110,7 +111,7 @@ TEST(ReceiverTiming, Tests) { timing.UpdateCurrentDelay(timestamp); } -TEST(ReceiverTiming, WrapAround) { +TEST(ReceiverTimingTest, TimestampWrapAround) { SimulatedClock clock(0); VCMTiming timing(&clock); // Provoke a wrap-around. The fifth frame will have wrapped at 25 fps. @@ -127,4 +128,155 @@ TEST(ReceiverTiming, WrapAround) { } } +TEST(ReceiverTimingTest, MaxWaitingTimeIsZeroForZeroRenderTime) { + // This is the default path when the RTP playout delay header extension is set + // to min==0. + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + constexpr int64_t kZeroRenderTimeMs = 0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + for (int i = 0; i < 10; ++i) { + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + } + // Another frame submitted at the same time also returns a negative max + // waiting time. + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + // MaxWaitingTime should be less than zero even if there's a burst of frames. + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + EXPECT_LT(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); +} + +TEST(ReceiverTimingTest, MaxWaitingTimeZeroDelayPacingExperiment) { + // The minimum pacing is enabled by a field trial and active if the RTP + // playout delay header extension is set to min==0. + constexpr int64_t kMinPacingMs = 3; + test::ScopedFieldTrials override_field_trials( + "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/"); + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + constexpr int64_t kZeroRenderTimeMs = 0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + // MaxWaitingTime() returns zero for evenly spaced video frames. + for (int i = 0; i < 10; ++i) { + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + timing.SetLastDecodeScheduledTimestamp(now_ms); + } + // Another frame submitted at the same time is paced according to the field + // trial setting. + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + // If there's a burst of frames, the wait time is calculated based on next + // decode time. + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + // Allow a few ms to pass, this should be subtracted from the MaxWaitingTime. + constexpr int64_t kTwoMs = 2; + clock.AdvanceTimeMilliseconds(kTwoMs); + now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs - kTwoMs); + // A frame is decoded at the current time, the wait time should be restored to + // pacing delay. + timing.SetLastDecodeScheduledTimestamp(now_ms); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); +} + +TEST(ReceiverTimingTest, DefaultMaxWaitingTimeUnaffectedByPacingExperiment) { + // The minimum pacing is enabled by a field trial but should not have any + // effect if render_time_ms is greater than 0; + test::ScopedFieldTrials override_field_trials( + "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/"); + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + int64_t render_time_ms = now_ms + 30; + // Estimate the internal processing delay from the first frame. + int64_t estimated_processing_delay = + (render_time_ms - now_ms) - + timing.MaxWaitingTime(render_time_ms, now_ms, + /*too_many_frames_queued=*/false); + EXPECT_GT(estimated_processing_delay, 0); + + // Any other frame submitted at the same time should be scheduled according to + // its render time. + for (int i = 0; i < 5; ++i) { + render_time_ms += kTimeDeltaMs; + EXPECT_EQ(timing.MaxWaitingTime(render_time_ms, now_ms, + /*too_many_frames_queued=*/false), + render_time_ms - now_ms - estimated_processing_delay); + } +} + +TEST(ReceiverTiminTest, MaxWaitingTimeReturnsZeroIfTooManyFramesQueuedIsTrue) { + // The minimum pacing is enabled by a field trial and active if the RTP + // playout delay header extension is set to min==0. + constexpr int64_t kMinPacingMs = 3; + test::ScopedFieldTrials override_field_trials( + "WebRTC-ZeroPlayoutDelay/min_pacing:3ms/"); + constexpr int64_t kStartTimeUs = 3.15e13; // About one year in us. + constexpr int64_t kTimeDeltaMs = 1000.0 / 60.0; + constexpr int64_t kZeroRenderTimeMs = 0; + SimulatedClock clock(kStartTimeUs); + VCMTiming timing(&clock); + timing.Reset(); + // MaxWaitingTime() returns zero for evenly spaced video frames. + for (int i = 0; i < 10; ++i) { + clock.AdvanceTimeMilliseconds(kTimeDeltaMs); + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + 0); + timing.SetLastDecodeScheduledTimestamp(now_ms); + } + // Another frame submitted at the same time is paced according to the field + // trial setting. + int64_t now_ms = clock.TimeInMilliseconds(); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/false), + kMinPacingMs); + // MaxWaitingTime returns 0 even if there's a burst of frames if + // too_many_frames_queued is set to true. + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/true), + 0); + EXPECT_EQ(timing.MaxWaitingTime(kZeroRenderTimeMs, now_ms, + /*too_many_frames_queued=*/true), + 0); +} + } // namespace webrtc diff --git a/modules/video_coding/utility/decoded_frames_history.cc b/modules/video_coding/utility/decoded_frames_history.cc index d15cf26d8d..005bb26ea6 100644 --- a/modules/video_coding/utility/decoded_frames_history.cc +++ b/modules/video_coding/utility/decoded_frames_history.cc @@ -18,89 +18,63 @@ namespace webrtc { namespace video_coding { -DecodedFramesHistory::LayerHistory::LayerHistory() = default; -DecodedFramesHistory::LayerHistory::~LayerHistory() = default; - DecodedFramesHistory::DecodedFramesHistory(size_t window_size) - : window_size_(window_size) {} + : buffer_(window_size) {} DecodedFramesHistory::~DecodedFramesHistory() = default; -void DecodedFramesHistory::InsertDecoded(const VideoLayerFrameId& frameid, - uint32_t timestamp) { - last_decoded_frame_ = frameid; +void DecodedFramesHistory::InsertDecoded(int64_t frame_id, uint32_t timestamp) { + last_decoded_frame_ = frame_id; last_decoded_frame_timestamp_ = timestamp; - if (static_cast(layers_.size()) < frameid.spatial_layer + 1) { - size_t old_size = layers_.size(); - layers_.resize(frameid.spatial_layer + 1); - - for (size_t i = old_size; i < layers_.size(); ++i) - layers_[i].buffer.resize(window_size_); - - layers_[frameid.spatial_layer].last_picture_id = frameid.picture_id; - layers_[frameid.spatial_layer] - .buffer[PictureIdToIndex(frameid.picture_id)] = true; - return; - } - - int new_index = PictureIdToIndex(frameid.picture_id); - LayerHistory& history = layers_[frameid.spatial_layer]; + int new_index = FrameIdToIndex(frame_id); - RTC_DCHECK(history.last_picture_id < frameid.picture_id); + RTC_DCHECK(last_frame_id_ < frame_id); - // Clears expired values from the cyclic buffer. - if (history.last_picture_id) { - int64_t id_jump = frameid.picture_id - *history.last_picture_id; - int last_index = PictureIdToIndex(*history.last_picture_id); + // Clears expired values from the cyclic buffer_. + if (last_frame_id_) { + int64_t id_jump = frame_id - *last_frame_id_; + int last_index = FrameIdToIndex(*last_frame_id_); - if (id_jump >= window_size_) { - std::fill(history.buffer.begin(), history.buffer.end(), false); + if (id_jump >= static_cast(buffer_.size())) { + std::fill(buffer_.begin(), buffer_.end(), false); } else if (new_index > last_index) { - std::fill(history.buffer.begin() + last_index + 1, - history.buffer.begin() + new_index, false); - } else { - std::fill(history.buffer.begin() + last_index + 1, history.buffer.end(), - false); - std::fill(history.buffer.begin(), history.buffer.begin() + new_index, + std::fill(buffer_.begin() + last_index + 1, buffer_.begin() + new_index, false); + } else { + std::fill(buffer_.begin() + last_index + 1, buffer_.end(), false); + std::fill(buffer_.begin(), buffer_.begin() + new_index, false); } } - history.buffer[new_index] = true; - history.last_picture_id = frameid.picture_id; + buffer_[new_index] = true; + last_frame_id_ = frame_id; } -bool DecodedFramesHistory::WasDecoded(const VideoLayerFrameId& frameid) { - // Unseen before spatial layer. - if (static_cast(layers_.size()) < frameid.spatial_layer + 1) - return false; - - LayerHistory& history = layers_[frameid.spatial_layer]; - - if (!history.last_picture_id) +bool DecodedFramesHistory::WasDecoded(int64_t frame_id) { + if (!last_frame_id_) return false; - // Reference to the picture_id out of the stored history should happen. - if (frameid.picture_id <= *history.last_picture_id - window_size_) { - RTC_LOG(LS_WARNING) << "Referencing a frame out of the history window. " + // Reference to the picture_id out of the stored should happen. + if (frame_id <= *last_frame_id_ - static_cast(buffer_.size())) { + RTC_LOG(LS_WARNING) << "Referencing a frame out of the window. " "Assuming it was undecoded to avoid artifacts."; return false; } - if (frameid.picture_id > history.last_picture_id) + if (frame_id > last_frame_id_) return false; - return history.buffer[PictureIdToIndex(frameid.picture_id)]; + return buffer_[FrameIdToIndex(frame_id)]; } void DecodedFramesHistory::Clear() { - layers_.clear(); last_decoded_frame_timestamp_.reset(); last_decoded_frame_.reset(); + std::fill(buffer_.begin(), buffer_.end(), false); + last_frame_id_.reset(); } -absl::optional -DecodedFramesHistory::GetLastDecodedFrameId() { +absl::optional DecodedFramesHistory::GetLastDecodedFrameId() { return last_decoded_frame_; } @@ -108,9 +82,9 @@ absl::optional DecodedFramesHistory::GetLastDecodedFrameTimestamp() { return last_decoded_frame_timestamp_; } -int DecodedFramesHistory::PictureIdToIndex(int64_t frame_id) const { - int m = frame_id % window_size_; - return m >= 0 ? m : m + window_size_; +int DecodedFramesHistory::FrameIdToIndex(int64_t frame_id) const { + int m = frame_id % buffer_.size(); + return m >= 0 ? m : m + buffer_.size(); } } // namespace video_coding diff --git a/modules/video_coding/utility/decoded_frames_history.h b/modules/video_coding/utility/decoded_frames_history.h index 7cbe1f5cfc..06008dc22e 100644 --- a/modules/video_coding/utility/decoded_frames_history.h +++ b/modules/video_coding/utility/decoded_frames_history.h @@ -27,31 +27,23 @@ class DecodedFramesHistory { // window_size - how much frames back to the past are actually remembered. explicit DecodedFramesHistory(size_t window_size); ~DecodedFramesHistory(); - // Called for each decoded frame. Assumes picture id's are non-decreasing. - void InsertDecoded(const VideoLayerFrameId& frameid, uint32_t timestamp); - // Query if the following (picture_id, spatial_id) pair was inserted before. - // Should be at most less by window_size-1 than the last inserted picture id. - bool WasDecoded(const VideoLayerFrameId& frameid); + // Called for each decoded frame. Assumes frame id's are non-decreasing. + void InsertDecoded(int64_t frame_id, uint32_t timestamp); + // Query if the following (frame_id, spatial_id) pair was inserted before. + // Should be at most less by window_size-1 than the last inserted frame id. + bool WasDecoded(int64_t frame_id); void Clear(); - absl::optional GetLastDecodedFrameId(); + absl::optional GetLastDecodedFrameId(); absl::optional GetLastDecodedFrameTimestamp(); private: - struct LayerHistory { - LayerHistory(); - ~LayerHistory(); - // Cyclic bitset buffer. Stores last known |window_size| bits. - std::vector buffer; - absl::optional last_picture_id; - }; - - int PictureIdToIndex(int64_t frame_id) const; - - const int window_size_; - std::vector layers_; - absl::optional last_decoded_frame_; + int FrameIdToIndex(int64_t frame_id) const; + + std::vector buffer_; + absl::optional last_frame_id_; + absl::optional last_decoded_frame_; absl::optional last_decoded_frame_timestamp_; }; diff --git a/modules/video_coding/utility/decoded_frames_history_unittest.cc b/modules/video_coding/utility/decoded_frames_history_unittest.cc index ccf393d403..ac09a42053 100644 --- a/modules/video_coding/utility/decoded_frames_history_unittest.cc +++ b/modules/video_coding/utility/decoded_frames_history_unittest.cc @@ -20,125 +20,93 @@ constexpr int kHistorySize = 1 << 13; TEST(DecodedFramesHistory, RequestOnEmptyHistory) { DecodedFramesHistory history(kHistorySize); - EXPECT_EQ(history.WasDecoded({1234, 0}), false); + EXPECT_EQ(history.WasDecoded(1234), false); } TEST(DecodedFramesHistory, FindsLastDecodedFrame) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - EXPECT_EQ(history.WasDecoded({1234, 0}), true); + history.InsertDecoded(1234, 0); + EXPECT_EQ(history.WasDecoded(1234), true); } TEST(DecodedFramesHistory, FindsPreviousFrame) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1235, 0}, 0); - EXPECT_EQ(history.WasDecoded({1234, 0}), true); + history.InsertDecoded(1234, 0); + history.InsertDecoded(1235, 0); + EXPECT_EQ(history.WasDecoded(1234), true); } TEST(DecodedFramesHistory, ReportsMissingFrame) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1236, 0}, 0); - EXPECT_EQ(history.WasDecoded({1235, 0}), false); + history.InsertDecoded(1234, 0); + history.InsertDecoded(1236, 0); + EXPECT_EQ(history.WasDecoded(1235), false); } TEST(DecodedFramesHistory, ClearsHistory) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); + history.InsertDecoded(1234, 0); history.Clear(); - EXPECT_EQ(history.WasDecoded({1234, 0}), false); + EXPECT_EQ(history.WasDecoded(1234), false); EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt); EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt); } -TEST(DecodedFramesHistory, HandlesMultipleLayers) { - DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1234, 1}, 0); - history.InsertDecoded({1235, 0}, 0); - history.InsertDecoded({1236, 0}, 0); - history.InsertDecoded({1236, 1}, 0); - EXPECT_EQ(history.WasDecoded({1235, 0}), true); - EXPECT_EQ(history.WasDecoded({1235, 1}), false); -} - -TEST(DecodedFramesHistory, HandlesNewLayer) { - DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1234, 1}, 0); - history.InsertDecoded({1235, 0}, 0); - history.InsertDecoded({1235, 1}, 0); - history.InsertDecoded({1236, 0}, 0); - history.InsertDecoded({1236, 1}, 0); - EXPECT_EQ(history.WasDecoded({1234, 2}), false); -} - -TEST(DecodedFramesHistory, HandlesSkippedLayer) { - DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1234, 2}, 0); - history.InsertDecoded({1235, 0}, 0); - history.InsertDecoded({1235, 1}, 0); - EXPECT_EQ(history.WasDecoded({1234, 1}), false); - EXPECT_EQ(history.WasDecoded({1235, 1}), true); -} - TEST(DecodedFramesHistory, HandlesBigJumpInPictureId) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1235, 0}, 0); - history.InsertDecoded({1236, 0}, 0); - history.InsertDecoded({1236 + kHistorySize / 2, 0}, 0); - EXPECT_EQ(history.WasDecoded({1234, 0}), true); - EXPECT_EQ(history.WasDecoded({1237, 0}), false); + history.InsertDecoded(1234, 0); + history.InsertDecoded(1235, 0); + history.InsertDecoded(1236, 0); + history.InsertDecoded(1236 + kHistorySize / 2, 0); + EXPECT_EQ(history.WasDecoded(1234), true); + EXPECT_EQ(history.WasDecoded(1237), false); } TEST(DecodedFramesHistory, ForgetsTooOldHistory) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({1234, 0}, 0); - history.InsertDecoded({1235, 0}, 0); - history.InsertDecoded({1236, 0}, 0); - history.InsertDecoded({1236 + kHistorySize * 2, 0}, 0); - EXPECT_EQ(history.WasDecoded({1234, 0}), false); - EXPECT_EQ(history.WasDecoded({1237, 0}), false); + history.InsertDecoded(1234, 0); + history.InsertDecoded(1235, 0); + history.InsertDecoded(1236, 0); + history.InsertDecoded(1236 + kHistorySize * 2, 0); + EXPECT_EQ(history.WasDecoded(1234), false); + EXPECT_EQ(history.WasDecoded(1237), false); } TEST(DecodedFramesHistory, ReturnsLastDecodedFrameId) { DecodedFramesHistory history(kHistorySize); EXPECT_EQ(history.GetLastDecodedFrameId(), absl::nullopt); - history.InsertDecoded({1234, 0}, 0); - EXPECT_EQ(history.GetLastDecodedFrameId(), VideoLayerFrameId(1234, 0)); - history.InsertDecoded({1235, 0}, 0); - EXPECT_EQ(history.GetLastDecodedFrameId(), VideoLayerFrameId(1235, 0)); + history.InsertDecoded(1234, 0); + EXPECT_EQ(history.GetLastDecodedFrameId(), 1234); + history.InsertDecoded(1235, 0); + EXPECT_EQ(history.GetLastDecodedFrameId(), 1235); } TEST(DecodedFramesHistory, ReturnsLastDecodedFrameTimestamp) { DecodedFramesHistory history(kHistorySize); EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), absl::nullopt); - history.InsertDecoded({1234, 0}, 12345); + history.InsertDecoded(1234, 12345); EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12345u); - history.InsertDecoded({1235, 0}, 12366); + history.InsertDecoded(1235, 12366); EXPECT_EQ(history.GetLastDecodedFrameTimestamp(), 12366u); } TEST(DecodedFramesHistory, NegativePictureIds) { DecodedFramesHistory history(kHistorySize); - history.InsertDecoded({-1234, 0}, 12345); - history.InsertDecoded({-1233, 0}, 12366); - EXPECT_EQ(history.GetLastDecodedFrameId()->picture_id, -1233); + history.InsertDecoded(-1234, 12345); + history.InsertDecoded(-1233, 12366); + EXPECT_EQ(*history.GetLastDecodedFrameId(), -1233); - history.InsertDecoded({-1, 0}, 12377); - history.InsertDecoded({0, 0}, 12388); - EXPECT_EQ(history.GetLastDecodedFrameId()->picture_id, 0); + history.InsertDecoded(-1, 12377); + history.InsertDecoded(0, 12388); + EXPECT_EQ(*history.GetLastDecodedFrameId(), 0); - history.InsertDecoded({1, 0}, 12399); - EXPECT_EQ(history.GetLastDecodedFrameId()->picture_id, 1); + history.InsertDecoded(1, 12399); + EXPECT_EQ(*history.GetLastDecodedFrameId(), 1); - EXPECT_EQ(history.WasDecoded({-1234, 0}), true); - EXPECT_EQ(history.WasDecoded({-1, 0}), true); - EXPECT_EQ(history.WasDecoded({0, 0}), true); - EXPECT_EQ(history.WasDecoded({1, 0}), true); + EXPECT_EQ(history.WasDecoded(-1234), true); + EXPECT_EQ(history.WasDecoded(-1), true); + EXPECT_EQ(history.WasDecoded(0), true); + EXPECT_EQ(history.WasDecoded(1), true); } } // namespace diff --git a/modules/video_coding/utility/frame_dropper.h b/modules/video_coding/utility/frame_dropper.h index 50a8d58e66..014b5dd7aa 100644 --- a/modules/video_coding/utility/frame_dropper.h +++ b/modules/video_coding/utility/frame_dropper.h @@ -44,7 +44,7 @@ class FrameDropper { // Input: // - framesize_bytes : The size of the latest frame returned // from the encoder. - // - delta_frame : True if the encoder returned a key frame. + // - delta_frame : True if the encoder returned a delta frame. void Fill(size_t framesize_bytes, bool delta_frame); void Leak(uint32_t input_framerate); diff --git a/modules/video_coding/utility/ivf_file_reader.cc b/modules/video_coding/utility/ivf_file_reader.cc index 9667bb7cec..f326c8cb53 100644 --- a/modules/video_coding/utility/ivf_file_reader.cc +++ b/modules/video_coding/utility/ivf_file_reader.cc @@ -27,6 +27,7 @@ constexpr int kCodecTypeBytesCount = 4; constexpr uint8_t kFileHeaderStart[kCodecTypeBytesCount] = {'D', 'K', 'I', 'F'}; constexpr uint8_t kVp8Header[kCodecTypeBytesCount] = {'V', 'P', '8', '0'}; constexpr uint8_t kVp9Header[kCodecTypeBytesCount] = {'V', 'P', '9', '0'}; +constexpr uint8_t kAv1Header[kCodecTypeBytesCount] = {'A', 'V', '0', '1'}; constexpr uint8_t kH264Header[kCodecTypeBytesCount] = {'H', '2', '6', '4'}; } // namespace @@ -163,14 +164,13 @@ absl::optional IvfFileReader::NextFrame() { image.SetTimestamp(static_cast(current_timestamp)); } image.SetEncodedData(payload); - image.SetSpatialIndex(static_cast(layer_sizes.size())); + image.SetSpatialIndex(static_cast(layer_sizes.size()) - 1); for (size_t i = 0; i < layer_sizes.size(); ++i) { image.SetSpatialLayerFrameSize(static_cast(i), layer_sizes[i]); } if (is_first_frame) { image._frameType = VideoFrameType::kVideoFrameKey; } - image._completeFrame = true; return image; } @@ -191,6 +191,9 @@ absl::optional IvfFileReader::ParseCodecType(uint8_t* buffer, if (memcmp(&buffer[start_pos], kVp9Header, kCodecTypeBytesCount) == 0) { return VideoCodecType::kVideoCodecVP9; } + if (memcmp(&buffer[start_pos], kAv1Header, kCodecTypeBytesCount) == 0) { + return VideoCodecType::kVideoCodecAV1; + } if (memcmp(&buffer[start_pos], kH264Header, kCodecTypeBytesCount) == 0) { return VideoCodecType::kVideoCodecH264; } diff --git a/modules/video_coding/utility/ivf_file_reader.h b/modules/video_coding/utility/ivf_file_reader.h index eb5a21d55d..5e0634f9fd 100644 --- a/modules/video_coding/utility/ivf_file_reader.h +++ b/modules/video_coding/utility/ivf_file_reader.h @@ -16,6 +16,7 @@ #include "absl/types/optional.h" #include "api/video/encoded_image.h" +#include "api/video_codecs/video_codec.h" #include "rtc_base/system/file_wrapper.h" namespace webrtc { diff --git a/modules/video_coding/utility/ivf_file_reader_unittest.cc b/modules/video_coding/utility/ivf_file_reader_unittest.cc index 6ff580511b..c9cf14674b 100644 --- a/modules/video_coding/utility/ivf_file_reader_unittest.cc +++ b/modules/video_coding/utility/ivf_file_reader_unittest.cc @@ -83,7 +83,7 @@ class IvfFileReaderTest : public ::testing::Test { bool use_capture_tims_ms, int spatial_layers_count) { ASSERT_TRUE(frame); - EXPECT_EQ(frame->SpatialIndex(), spatial_layers_count); + EXPECT_EQ(frame->SpatialIndex(), spatial_layers_count - 1); if (use_capture_tims_ms) { EXPECT_EQ(frame->capture_time_ms_, static_cast(frame_index)); EXPECT_EQ(frame->Timestamp(), static_cast(90 * frame_index)); @@ -145,6 +145,16 @@ TEST_F(IvfFileReaderTest, BasicVP9FileMsTimestamp) { ValidateContent(kVideoCodecVP9, true, 1); } +TEST_F(IvfFileReaderTest, BasicAv1FileNtpTimestamp) { + CreateTestFile(kVideoCodecAV1, false, 1); + ValidateContent(kVideoCodecAV1, false, 1); +} + +TEST_F(IvfFileReaderTest, BasicAv1FileMsTimestamp) { + CreateTestFile(kVideoCodecAV1, true, 1); + ValidateContent(kVideoCodecAV1, true, 1); +} + TEST_F(IvfFileReaderTest, BasicH264FileNtpTimestamp) { CreateTestFile(kVideoCodecH264, false, 1); ValidateContent(kVideoCodecH264, false, 1); @@ -165,6 +175,11 @@ TEST_F(IvfFileReaderTest, MultilayerVP9FileNtpTimestamp) { ValidateContent(kVideoCodecVP9, false, 3); } +TEST_F(IvfFileReaderTest, MultilayerAv1FileNtpTimestamp) { + CreateTestFile(kVideoCodecAV1, false, 3); + ValidateContent(kVideoCodecAV1, false, 3); +} + TEST_F(IvfFileReaderTest, MultilayerH264FileNtpTimestamp) { CreateTestFile(kVideoCodecH264, false, 3); ValidateContent(kVideoCodecH264, false, 3); diff --git a/modules/video_coding/utility/ivf_file_writer.cc b/modules/video_coding/utility/ivf_file_writer.cc index 46b8e87ba3..496da894a1 100644 --- a/modules/video_coding/utility/ivf_file_writer.cc +++ b/modules/video_coding/utility/ivf_file_writer.cc @@ -75,6 +75,12 @@ bool IvfFileWriter::WriteHeader() { ivf_header[10] = '9'; ivf_header[11] = '0'; break; + case kVideoCodecAV1: + ivf_header[8] = 'A'; + ivf_header[9] = 'V'; + ivf_header[10] = '0'; + ivf_header[11] = '1'; + break; case kVideoCodecH264: ivf_header[8] = 'H'; ivf_header[9] = '2'; diff --git a/modules/video_coding/utility/ivf_file_writer.h b/modules/video_coding/utility/ivf_file_writer.h index 5de67acdb2..140b9c06ff 100644 --- a/modules/video_coding/utility/ivf_file_writer.h +++ b/modules/video_coding/utility/ivf_file_writer.h @@ -17,6 +17,7 @@ #include #include "api/video/encoded_image.h" +#include "api/video/video_codec_type.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/system/file_wrapper.h" #include "rtc_base/time_utils.h" diff --git a/modules/video_coding/utility/ivf_file_writer_unittest.cc b/modules/video_coding/utility/ivf_file_writer_unittest.cc index 49e0459ba6..8e781a7b22 100644 --- a/modules/video_coding/utility/ivf_file_writer_unittest.cc +++ b/modules/video_coding/utility/ivf_file_writer_unittest.cc @@ -147,6 +147,16 @@ TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) { RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true); } +TEST_F(IvfFileWriterTest, WritesBasicAv1FileNtpTimestamp) { + const uint8_t fourcc[4] = {'A', 'V', '0', '1'}; + RunBasicFileStructureTest(kVideoCodecAV1, fourcc, false); +} + +TEST_F(IvfFileWriterTest, WritesBasicAv1FileMsTimestamp) { + const uint8_t fourcc[4] = {'A', 'V', '0', '1'}; + RunBasicFileStructureTest(kVideoCodecAV1, fourcc, true); +} + TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) { const uint8_t fourcc[4] = {'H', '2', '6', '4'}; RunBasicFileStructureTest(kVideoCodecH264, fourcc, false); diff --git a/modules/video_coding/utility/qp_parser.cc b/modules/video_coding/utility/qp_parser.cc new file mode 100644 index 0000000000..18f225447d --- /dev/null +++ b/modules/video_coding/utility/qp_parser.cc @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/utility/qp_parser.h" + +#include "modules/video_coding/utility/vp8_header_parser.h" +#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" + +namespace webrtc { + +absl::optional QpParser::Parse(VideoCodecType codec_type, + size_t spatial_idx, + const uint8_t* frame_data, + size_t frame_size) { + if (frame_data == nullptr || frame_size == 0 || + spatial_idx >= kMaxSimulcastStreams) { + return absl::nullopt; + } + + if (codec_type == kVideoCodecVP8) { + int qp = -1; + if (vp8::GetQp(frame_data, frame_size, &qp)) { + return qp; + } + } else if (codec_type == kVideoCodecVP9) { + int qp = -1; + if (vp9::GetQp(frame_data, frame_size, &qp)) { + return qp; + } + } else if (codec_type == kVideoCodecH264) { + return h264_parsers_[spatial_idx].Parse(frame_data, frame_size); + } + + return absl::nullopt; +} + +absl::optional QpParser::H264QpParser::Parse( + const uint8_t* frame_data, + size_t frame_size) { + MutexLock lock(&mutex_); + bitstream_parser_.ParseBitstream( + rtc::ArrayView(frame_data, frame_size)); + return bitstream_parser_.GetLastSliceQp(); +} + +} // namespace webrtc diff --git a/modules/video_coding/utility/qp_parser.h b/modules/video_coding/utility/qp_parser.h new file mode 100644 index 0000000000..f132ff9337 --- /dev/null +++ b/modules/video_coding/utility/qp_parser.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_ +#define MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_ + +#include "absl/types/optional.h" +#include "api/video/video_codec_constants.h" +#include "api/video/video_codec_type.h" +#include "common_video/h264/h264_bitstream_parser.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { +class QpParser { + public: + absl::optional Parse(VideoCodecType codec_type, + size_t spatial_idx, + const uint8_t* frame_data, + size_t frame_size); + + private: + // A thread safe wrapper for H264 bitstream parser. + class H264QpParser { + public: + absl::optional Parse(const uint8_t* frame_data, + size_t frame_size); + + private: + Mutex mutex_; + H264BitstreamParser bitstream_parser_ RTC_GUARDED_BY(mutex_); + }; + + H264QpParser h264_parsers_[kMaxSimulcastStreams]; +}; + +} // namespace webrtc + +#endif // MODULES_VIDEO_CODING_UTILITY_QP_PARSER_H_ diff --git a/modules/video_coding/utility/qp_parser_unittest.cc b/modules/video_coding/utility/qp_parser_unittest.cc new file mode 100644 index 0000000000..1131288f26 --- /dev/null +++ b/modules/video_coding/utility/qp_parser_unittest.cc @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/utility/qp_parser.h" + +#include + +#include "test/gtest.h" + +namespace webrtc { + +namespace { +// ffmpeg -s 16x16 -f rawvideo -pix_fmt rgb24 -r 30 -i /dev/zero -c:v libvpx +// -qmin 20 -qmax 20 -crf 20 -frames:v 1 -y out.ivf +const uint8_t kCodedFrameVp8Qp25[] = { + 0x10, 0x02, 0x00, 0x9d, 0x01, 0x2a, 0x10, 0x00, 0x10, 0x00, + 0x02, 0x47, 0x08, 0x85, 0x85, 0x88, 0x85, 0x84, 0x88, 0x0c, + 0x82, 0x00, 0x0c, 0x0d, 0x60, 0x00, 0xfe, 0xfc, 0x5c, 0xd0}; + +// ffmpeg -s 16x16 -f rawvideo -pix_fmt rgb24 -r 30 -i /dev/zero -c:v libvpx-vp9 +// -qmin 24 -qmax 24 -crf 24 -frames:v 1 -y out.ivf +const uint8_t kCodedFrameVp9Qp96[] = { + 0xa2, 0x49, 0x83, 0x42, 0xe0, 0x00, 0xf0, 0x00, 0xf6, 0x00, + 0x38, 0x24, 0x1c, 0x18, 0xc0, 0x00, 0x00, 0x30, 0x70, 0x00, + 0x00, 0x4a, 0xa7, 0xff, 0xfc, 0xb9, 0x01, 0xbf, 0xff, 0xff, + 0x97, 0x20, 0xdb, 0xff, 0xff, 0xcb, 0x90, 0x5d, 0x40}; + +// ffmpeg -s 16x16 -f rawvideo -pix_fmt yuv420p -r 30 -i /dev/zero -c:v libx264 +// -qmin 38 -qmax 38 -crf 38 -profile:v baseline -frames:v 2 -y out.264 +const uint8_t kCodedFrameH264SpsPpsIdrQp38[] = { + 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x0a, 0xd9, 0x1e, 0x84, + 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c, + 0x48, 0x99, 0x20, 0x00, 0x00, 0x00, 0x01, 0x68, 0xcb, 0x80, 0xc4, + 0xb2, 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0xf1, 0x18, 0xa0, 0x00, + 0x20, 0x5b, 0x1c, 0x00, 0x04, 0x07, 0xe3, 0x80, 0x00, 0x80, 0xfe}; + +const uint8_t kCodedFrameH264SpsPpsIdrQp49[] = { + 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x0a, 0xd9, 0x1e, 0x84, + 0x00, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xf0, 0x3c, + 0x48, 0x99, 0x20, 0x00, 0x00, 0x00, 0x01, 0x68, 0xcb, 0x80, 0x5d, + 0x2c, 0x80, 0x00, 0x00, 0x01, 0x65, 0x88, 0x84, 0xf1, 0x18, 0xa0, + 0x00, 0x5e, 0x38, 0x00, 0x08, 0x03, 0xc7, 0x00, 0x01, 0x00, 0x7c}; + +const uint8_t kCodedFrameH264InterSliceQpDelta0[] = {0x00, 0x00, 0x00, 0x01, + 0x41, 0x9a, 0x39, 0xea}; + +} // namespace + +TEST(QpParserTest, ParseQpVp8) { + QpParser parser; + absl::optional qp = parser.Parse( + kVideoCodecVP8, 0, kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25)); + EXPECT_EQ(qp, 25u); +} + +TEST(QpParserTest, ParseQpVp9) { + QpParser parser; + absl::optional qp = parser.Parse( + kVideoCodecVP9, 0, kCodedFrameVp9Qp96, sizeof(kCodedFrameVp9Qp96)); + EXPECT_EQ(qp, 96u); +} + +TEST(QpParserTest, ParseQpH264) { + QpParser parser; + absl::optional qp = parser.Parse( + VideoCodecType::kVideoCodecH264, 0, kCodedFrameH264SpsPpsIdrQp38, + sizeof(kCodedFrameH264SpsPpsIdrQp38)); + EXPECT_EQ(qp, 38u); + + qp = parser.Parse(kVideoCodecH264, 1, kCodedFrameH264SpsPpsIdrQp49, + sizeof(kCodedFrameH264SpsPpsIdrQp49)); + EXPECT_EQ(qp, 49u); + + qp = parser.Parse(kVideoCodecH264, 0, kCodedFrameH264InterSliceQpDelta0, + sizeof(kCodedFrameH264InterSliceQpDelta0)); + EXPECT_EQ(qp, 38u); + + qp = parser.Parse(kVideoCodecH264, 1, kCodedFrameH264InterSliceQpDelta0, + sizeof(kCodedFrameH264InterSliceQpDelta0)); + EXPECT_EQ(qp, 49u); +} + +TEST(QpParserTest, ParseQpUnsupportedCodecType) { + QpParser parser; + absl::optional qp = parser.Parse( + kVideoCodecGeneric, 0, kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25)); + EXPECT_FALSE(qp.has_value()); +} + +TEST(QpParserTest, ParseQpNullData) { + QpParser parser; + absl::optional qp = parser.Parse(kVideoCodecVP8, 0, nullptr, 100); + EXPECT_FALSE(qp.has_value()); +} + +TEST(QpParserTest, ParseQpEmptyData) { + QpParser parser; + absl::optional qp = + parser.Parse(kVideoCodecVP8, 0, kCodedFrameVp8Qp25, 0); + EXPECT_FALSE(qp.has_value()); +} + +TEST(QpParserTest, ParseQpSpatialIdxExceedsMax) { + QpParser parser; + absl::optional qp = + parser.Parse(kVideoCodecVP8, kMaxSimulcastStreams, kCodedFrameVp8Qp25, + sizeof(kCodedFrameVp8Qp25)); + EXPECT_FALSE(qp.has_value()); +} + +} // namespace webrtc diff --git a/modules/video_coding/utility/quality_scaler.cc b/modules/video_coding/utility/quality_scaler.cc index e909b2f88e..2859ac2e22 100644 --- a/modules/video_coding/utility/quality_scaler.cc +++ b/modules/video_coding/utility/quality_scaler.cc @@ -86,7 +86,6 @@ class QualityScaler::CheckQpTask { struct Result { bool observed_enough_frames = false; bool qp_usage_reported = false; - bool clear_qp_samples = false; }; CheckQpTask(QualityScaler* quality_scaler, Result previous_task_result) @@ -110,49 +109,36 @@ class QualityScaler::CheckQpTask { case QualityScaler::CheckQpResult::kInsufficientSamples: { result_.observed_enough_frames = false; // After this line, |this| may be deleted. - DoCompleteTask(); - return; + break; } case QualityScaler::CheckQpResult::kNormalQp: { result_.observed_enough_frames = true; - // After this line, |this| may be deleted. - DoCompleteTask(); - return; + break; } case QualityScaler::CheckQpResult::kHighQp: { result_.observed_enough_frames = true; result_.qp_usage_reported = true; - state_ = State::kAwaitingQpUsageHandled; - rtc::scoped_refptr - callback = ConstructCallback(); quality_scaler_->fast_rampup_ = false; - // After this line, |this| may be deleted. - quality_scaler_->handler_->OnReportQpUsageHigh(callback); - return; + quality_scaler_->handler_->OnReportQpUsageHigh(); + quality_scaler_->ClearSamples(); + break; } case QualityScaler::CheckQpResult::kLowQp: { result_.observed_enough_frames = true; result_.qp_usage_reported = true; - state_ = State::kAwaitingQpUsageHandled; - rtc::scoped_refptr - callback = ConstructCallback(); - // After this line, |this| may be deleted. - quality_scaler_->handler_->OnReportQpUsageLow(callback); - return; + quality_scaler_->handler_->OnReportQpUsageLow(); + quality_scaler_->ClearSamples(); + break; } } + state_ = State::kCompleted; + // Starting the next task deletes the pending task. After this line, + // |this| has been deleted. + quality_scaler_->StartNextCheckQpTask(); }), GetCheckingQpDelayMs()); } - void OnQpUsageHandled(bool clear_qp_samples) { - RTC_DCHECK_EQ(state_, State::kAwaitingQpUsageHandled); - result_.clear_qp_samples = clear_qp_samples; - if (clear_qp_samples) - quality_scaler_->ClearSamples(); - DoCompleteTask(); - } - bool HasCompletedTask() const { return state_ == State::kCompleted; } Result result() const { @@ -164,15 +150,9 @@ class QualityScaler::CheckQpTask { enum class State { kNotStarted, kCheckingQp, - kAwaitingQpUsageHandled, kCompleted, }; - // Defined after the definition of QualityScaler::CheckQpTaskHandlerCallback. - // Gets around a forward declaration issue. - rtc::scoped_refptr - ConstructCallback(); - // Determines the sampling period of CheckQpTasks. int64_t GetCheckingQpDelayMs() const { RTC_DCHECK_RUN_ON(&quality_scaler_->task_checker_); @@ -184,10 +164,6 @@ class QualityScaler::CheckQpTask { // Use half the interval while waiting for enough frames. return quality_scaler_->sampling_period_ms_ / 2; } - if (!previous_task_result_.clear_qp_samples) { - // Check shortly again. - return quality_scaler_->sampling_period_ms_ / 8; - } if (quality_scaler_->scale_factor_ && !previous_task_result_.qp_usage_reported) { // Last CheckQp did not call AdaptDown/Up, possibly reduce interval. @@ -198,15 +174,6 @@ class QualityScaler::CheckQpTask { quality_scaler_->initial_scale_factor_; } - void DoCompleteTask() { - RTC_DCHECK(state_ == State::kCheckingQp || - state_ == State::kAwaitingQpUsageHandled); - state_ = State::kCompleted; - // Starting the next task deletes the pending task. After this line, |this| - // has been deleted. - quality_scaler_->StartNextCheckQpTask(); - } - QualityScaler* const quality_scaler_; State state_; const Result previous_task_result_; @@ -215,39 +182,6 @@ class QualityScaler::CheckQpTask { rtc::WeakPtrFactory weak_ptr_factory_; }; -class QualityScaler::CheckQpTaskHandlerCallback - : public QualityScalerQpUsageHandlerCallbackInterface { - public: - CheckQpTaskHandlerCallback( - rtc::WeakPtr check_qp_task) - : QualityScalerQpUsageHandlerCallbackInterface(), - check_qp_task_(std::move(check_qp_task)), - was_handled_(false) {} - - ~CheckQpTaskHandlerCallback() { RTC_DCHECK(was_handled_); } - - void OnQpUsageHandled(bool clear_qp_samples) { - RTC_DCHECK(!was_handled_); - was_handled_ = true; - if (!check_qp_task_) { - // The task has been cancelled through destruction; the result of the - // operation is ignored. - return; - } - check_qp_task_->OnQpUsageHandled(clear_qp_samples); - } - - private: - // The callback may outlive the QualityScaler and its task. - rtc::WeakPtr const check_qp_task_; - bool was_handled_; -}; - -rtc::scoped_refptr -QualityScaler::CheckQpTask::ConstructCallback() { - return new CheckQpTaskHandlerCallback(weak_ptr_factory_.GetWeakPtr()); -} - QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler, VideoEncoder::QpThresholds thresholds) : QualityScaler(handler, thresholds, kMeasureMs) {} @@ -255,13 +189,17 @@ QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler, // Protected ctor, should not be called directly. QualityScaler::QualityScaler(QualityScalerQpUsageHandlerInterface* handler, VideoEncoder::QpThresholds thresholds, - int64_t sampling_period_ms) + int64_t default_sampling_period_ms) : handler_(handler), thresholds_(thresholds), - sampling_period_ms_(sampling_period_ms), + sampling_period_ms_(QualityScalerSettings::ParseFromFieldTrials() + .SamplingPeriodMs() + .value_or(default_sampling_period_ms)), fast_rampup_(true), // Arbitrarily choose size based on 30 fps for 5 seconds. - average_qp_(5 * 30), + average_qp_(QualityScalerSettings::ParseFromFieldTrials() + .AverageQpWindow() + .value_or(5 * 30)), framedrop_percent_media_opt_(5 * 30), framedrop_percent_all_(5 * 30), experiment_enabled_(QualityScalingExperiment::Enabled()), @@ -401,10 +339,4 @@ void QualityScaler::ClearSamples() { QualityScalerQpUsageHandlerInterface::~QualityScalerQpUsageHandlerInterface() {} -QualityScalerQpUsageHandlerCallbackInterface:: - QualityScalerQpUsageHandlerCallbackInterface() {} - -QualityScalerQpUsageHandlerCallbackInterface:: - ~QualityScalerQpUsageHandlerCallbackInterface() {} - } // namespace webrtc diff --git a/modules/video_coding/utility/quality_scaler.h b/modules/video_coding/utility/quality_scaler.h index cfd2fced3f..20169a3cee 100644 --- a/modules/video_coding/utility/quality_scaler.h +++ b/modules/video_coding/utility/quality_scaler.h @@ -18,12 +18,13 @@ #include "absl/types/optional.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "api/video_codecs/video_encoder.h" #include "rtc_base/experiments/quality_scaling_experiment.h" #include "rtc_base/numerics/moving_average.h" #include "rtc_base/ref_count.h" #include "rtc_base/ref_counted_object.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" namespace webrtc { @@ -82,7 +83,7 @@ class QualityScaler { std::unique_ptr pending_qp_task_ RTC_GUARDED_BY(&task_checker_); QualityScalerQpUsageHandlerInterface* const handler_ RTC_GUARDED_BY(&task_checker_); - SequenceChecker task_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_; VideoEncoder::QpThresholds thresholds_ RTC_GUARDED_BY(&task_checker_); const int64_t sampling_period_ms_; @@ -112,38 +113,8 @@ class QualityScalerQpUsageHandlerInterface { public: virtual ~QualityScalerQpUsageHandlerInterface(); - // Reacts to QP usage being too high or too low. The |callback| MUST be - // invoked when the handler is done, allowing the QualityScaler to resume - // checking for QP. - virtual void OnReportQpUsageHigh( - rtc::scoped_refptr - callback) = 0; - virtual void OnReportQpUsageLow( - rtc::scoped_refptr - callback) = 0; -}; - -// When QP is reported as high or low by the QualityScaler, it pauses checking -// for QP until the QP usage has been handled. When OnQpUsageHandled() is -// invoked, the QualityScaler resumes checking for QP. This ensures that if the -// stream is reconfigured in response to QP usage we do not include QP samples -// from before the reconfiguration the next time we check for QP. -// -// OnQpUsageHandled() MUST be invoked exactly once before this object is -// destroyed. -class QualityScalerQpUsageHandlerCallbackInterface - : public rtc::RefCountedObject { - public: - virtual ~QualityScalerQpUsageHandlerCallbackInterface(); - - // If |clear_qp_samples| is true, existing QP samples are cleared before the - // next time QualityScaler checks for QP. This is usually a good idea when the - // stream is reconfigured. If |clear_qp_samples| is false, samples are not - // cleared and QualityScaler increases its frequency of checking for QP. - virtual void OnQpUsageHandled(bool clear_qp_samples) = 0; - - protected: - QualityScalerQpUsageHandlerCallbackInterface(); + virtual void OnReportQpUsageHigh() = 0; + virtual void OnReportQpUsageLow() = 0; }; } // namespace webrtc diff --git a/modules/video_coding/utility/quality_scaler_unittest.cc b/modules/video_coding/utility/quality_scaler_unittest.cc index 275b327960..91911a7696 100644 --- a/modules/video_coding/utility/quality_scaler_unittest.cc +++ b/modules/video_coding/utility/quality_scaler_unittest.cc @@ -28,37 +28,24 @@ static const int kMinFramesNeededToScale = 60; // From quality_scaler.cc. static const size_t kDefaultTimeoutMs = 150; } // namespace -class MockQpUsageHandler : public QualityScalerQpUsageHandlerInterface { +class FakeQpUsageHandler : public QualityScalerQpUsageHandlerInterface { public: - virtual ~MockQpUsageHandler() {} + ~FakeQpUsageHandler() override = default; // QualityScalerQpUsageHandlerInterface implementation. - void OnReportQpUsageHigh( - rtc::scoped_refptr callback) - override { - callback_ = callback; + void OnReportQpUsageHigh() override { adapt_down_events_++; event.Set(); - if (synchronously_invoke_callback) - callback_->OnQpUsageHandled(true); } - void OnReportQpUsageLow( - rtc::scoped_refptr callback) - override { - callback_ = callback; + void OnReportQpUsageLow() override { adapt_up_events_++; event.Set(); - if (synchronously_invoke_callback) - callback_->OnQpUsageHandled(true); } rtc::Event event; int adapt_up_events_ = 0; int adapt_down_events_ = 0; - bool synchronously_invoke_callback = true; - rtc::scoped_refptr callback_ = - nullptr; }; // Pass a lower sampling period to speed up the tests. @@ -83,7 +70,7 @@ class QualityScalerTest : public ::testing::Test, QualityScalerTest() : scoped_field_trial_(GetParam()), task_queue_("QualityScalerTestQueue"), - handler_(new MockQpUsageHandler()) { + handler_(std::make_unique()) { task_queue_.SendTask( [this] { qs_ = std::unique_ptr(new QualityScalerUnderTest( @@ -92,7 +79,7 @@ class QualityScalerTest : public ::testing::Test, RTC_FROM_HERE); } - ~QualityScalerTest() { + ~QualityScalerTest() override { task_queue_.SendTask([this] { qs_ = nullptr; }, RTC_FROM_HERE); } @@ -121,7 +108,7 @@ class QualityScalerTest : public ::testing::Test, test::ScopedFieldTrials scoped_field_trial_; TaskQueueForTest task_queue_; std::unique_ptr qs_; - std::unique_ptr handler_; + std::unique_ptr handler_; }; INSTANTIATE_TEST_SUITE_P( @@ -129,7 +116,7 @@ INSTANTIATE_TEST_SUITE_P( QualityScalerTest, ::testing::Values( "WebRTC-Video-QualityScaling/Enabled-1,2,3,4,5,6,7,8,0.9,0.99,1/", - "")); + "WebRTC-Video-QualityScaling/Disabled/")); TEST_P(QualityScalerTest, DownscalesAfterContinuousFramedrop) { task_queue_.SendTask([this] { TriggerScale(kScaleDown); }, RTC_FROM_HERE); @@ -184,7 +171,8 @@ TEST_P(QualityScalerTest, DoesNotDownscaleAfterHalfFramedrop) { } TEST_P(QualityScalerTest, DownscalesAfterTwoThirdsIfFieldTrialEnabled) { - const bool kDownScaleExpected = !GetParam().empty(); + const bool kDownScaleExpected = + GetParam().find("Enabled") != std::string::npos; task_queue_.SendTask( [this] { for (int i = 0; i < kFramerate * 5; ++i) { @@ -282,34 +270,4 @@ TEST_P(QualityScalerTest, ScalesDownAndBackUpWithMinFramesNeeded) { EXPECT_EQ(1, handler_->adapt_up_events_); } -TEST_P(QualityScalerTest, CheckingQpAgainRequiresResolvingCallback) { - handler_->synchronously_invoke_callback = false; - task_queue_.SendTask([this] { TriggerScale(kScaleDown); }, RTC_FROM_HERE); - EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs)); - EXPECT_EQ(1, handler_->adapt_down_events_); - // Without invoking the callback, another downscale should not happen. - handler_->event.Reset(); - rtc::Event event; - task_queue_.SendTask( - [this, &event] { - TriggerScale(kScaleDown); - event.Set(); - }, - RTC_FROM_HERE); - EXPECT_TRUE(event.Wait(kDefaultTimeoutMs)); - EXPECT_FALSE(handler_->event.Wait(0)); - EXPECT_EQ(1, handler_->adapt_down_events_); - // Resume checking for QP again by invoking the callback. - task_queue_.SendTask( - [this] { - handler_->callback_->OnQpUsageHandled(true); - TriggerScale(kScaleDown); - }, - RTC_FROM_HERE); - EXPECT_TRUE(handler_->event.Wait(kDefaultTimeoutMs)); - EXPECT_EQ(2, handler_->adapt_down_events_); - task_queue_.SendTask([this] { handler_->callback_->OnQpUsageHandled(true); }, - RTC_FROM_HERE); -} - } // namespace webrtc diff --git a/modules/video_coding/utility/simulcast_rate_allocator.cc b/modules/video_coding/utility/simulcast_rate_allocator.cc index fef74cdb45..39e39abca1 100644 --- a/modules/video_coding/utility/simulcast_rate_allocator.cc +++ b/modules/video_coding/utility/simulcast_rate_allocator.cc @@ -61,7 +61,8 @@ float SimulcastRateAllocator::GetTemporalRateAllocation( SimulcastRateAllocator::SimulcastRateAllocator(const VideoCodec& codec) : codec_(codec), stable_rate_settings_(StableTargetRateExperiment::ParseFromFieldTrials()), - rate_control_settings_(RateControlSettings::ParseFromFieldTrials()) {} + rate_control_settings_(RateControlSettings::ParseFromFieldTrials()), + legacy_conference_mode_(false) {} SimulcastRateAllocator::~SimulcastRateAllocator() = default; @@ -150,7 +151,7 @@ void SimulcastRateAllocator::DistributeAllocationToSimulcastLayers( size_t top_active_layer = active_layer; // Allocate up to the target bitrate for each active simulcast layer. for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) { - const SimulcastStream& stream = + const SpatialLayer& stream = codec_.simulcastStream[layer_index[active_layer]]; if (!stream.active) { stream_enabled_[layer_index[active_layer]] = false; @@ -193,7 +194,7 @@ void SimulcastRateAllocator::DistributeAllocationToSimulcastLayers( // TODO(sprang): Allocate up to max bitrate for all layers once we have a // better idea of possible performance implications. if (left_in_total_allocation > DataRate::Zero()) { - const SimulcastStream& stream = codec_.simulcastStream[top_active_layer]; + const SpatialLayer& stream = codec_.simulcastStream[top_active_layer]; DataRate initial_layer_rate = DataRate::BitsPerSec( allocated_bitrates->GetSpatialLayerSum(top_active_layer)); DataRate additional_allocation = std::min( @@ -228,12 +229,8 @@ void SimulcastRateAllocator::DistributeAllocationToTemporalLayers( uint32_t max_bitrate_kbps; // Legacy temporal-layered only screenshare, or simulcast screenshare // with legacy mode for simulcast stream 0. - const bool conference_screenshare_mode = - codec_.mode == VideoCodecMode::kScreensharing && - ((num_spatial_streams == 1 && num_temporal_streams == 2) || // Legacy. - (num_spatial_streams > 1 && simulcast_id == 0 && - num_temporal_streams == 2)); // Simulcast. - if (conference_screenshare_mode) { + if (codec_.mode == VideoCodecMode::kScreensharing && + legacy_conference_mode_ && simulcast_id == 0) { // TODO(holmer): This is a "temporary" hack for screensharing, where we // interpret the startBitrate as the encoder target bitrate. This is // to allow for a different max bitrate, so if the codec can't meet @@ -253,7 +250,8 @@ void SimulcastRateAllocator::DistributeAllocationToTemporalLayers( if (num_temporal_streams == 1) { tl_allocation.push_back(target_bitrate_kbps); } else { - if (conference_screenshare_mode) { + if (codec_.mode == VideoCodecMode::kScreensharing && + legacy_conference_mode_ && simulcast_id == 0) { tl_allocation = ScreenshareTemporalLayerAllocation( target_bitrate_kbps, max_bitrate_kbps, simulcast_id); } else { @@ -338,4 +336,8 @@ int SimulcastRateAllocator::NumTemporalStreams(size_t simulcast_id) const { : codec_.simulcastStream[simulcast_id].numberOfTemporalLayers); } +void SimulcastRateAllocator::SetLegacyConferenceMode(bool enabled) { + legacy_conference_mode_ = enabled; +} + } // namespace webrtc diff --git a/modules/video_coding/utility/simulcast_rate_allocator.h b/modules/video_coding/utility/simulcast_rate_allocator.h index d9d9627352..9b2f9696e6 100644 --- a/modules/video_coding/utility/simulcast_rate_allocator.h +++ b/modules/video_coding/utility/simulcast_rate_allocator.h @@ -38,6 +38,8 @@ class SimulcastRateAllocator : public VideoBitrateAllocator { int temporal_id, bool base_heavy_tl3_alloc); + void SetLegacyConferenceMode(bool mode) override; + private: void DistributeAllocationToSimulcastLayers( DataRate total_bitrate, @@ -58,6 +60,7 @@ class SimulcastRateAllocator : public VideoBitrateAllocator { const StableTargetRateExperiment stable_rate_settings_; const RateControlSettings rate_control_settings_; std::vector stream_enabled_; + bool legacy_conference_mode_; RTC_DISALLOW_COPY_AND_ASSIGN(SimulcastRateAllocator); }; diff --git a/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc b/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc index db104c49d1..24d7c58bcd 100644 --- a/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc +++ b/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc @@ -35,24 +35,28 @@ constexpr uint32_t kLegacyScreenshareMaxBitrateKbps = 1000; // Bitrates for upper simulcast screenshare layer. constexpr uint32_t kSimulcastScreenshareMinBitrateKbps = 600; constexpr uint32_t kSimulcastScreenshareMaxBitrateKbps = 1250; +// Default video hysteresis factor: allocatable bitrate for next layer must +// exceed 20% of min setting in order to be initially turned on. +const double kDefaultHysteresis = 1.2; class MockTemporalLayers : public Vp8FrameBufferController { public: - MOCK_METHOD2(NextFrameConfig, Vp8FrameConfig(size_t, uint32_t)); - MOCK_METHOD3(OnRatesUpdated, void(size_t, const std::vector&, int)); - MOCK_METHOD1(UpdateConfiguration, Vp8EncoderConfig(size_t)); - MOCK_METHOD6(OnEncodeDone, - void(size_t, uint32_t, size_t, bool, int, CodecSpecificInfo*)); - MOCK_METHOD4(FrameEncoded, void(size_t, uint32_t, size_t, int)); - MOCK_CONST_METHOD0(Tl0PicIdx, uint8_t()); - MOCK_CONST_METHOD1(GetTemporalLayerId, int(const Vp8FrameConfig&)); + MOCK_METHOD(Vp8FrameConfig, NextFrameConfig, (size_t, uint32_t), (override)); + MOCK_METHOD(void, + OnRatesUpdated, + (size_t, const std::vector&, int), + (override)); + MOCK_METHOD(Vp8EncoderConfig, UpdateConfiguration, (size_t), (override)); + MOCK_METHOD(void, + OnEncodeDone, + (size_t, uint32_t, size_t, bool, int, CodecSpecificInfo*), + (override)); }; } // namespace class SimulcastRateAllocatorTest : public ::testing::TestWithParam { public: SimulcastRateAllocatorTest() { - memset(&codec_, 0, sizeof(VideoCodec)); codec_.codecType = kVideoCodecVP8; codec_.minBitrate = kMinBitrateKbps; codec_.maxBitrate = kLegacyScreenshareMaxBitrateKbps; @@ -86,8 +90,9 @@ class SimulcastRateAllocatorTest : public ::testing::TestWithParam { EXPECT_EQ(sum, actual.get_sum_bps()); } - void CreateAllocator() { + void CreateAllocator(bool legacy_conference_mode = false) { allocator_.reset(new SimulcastRateAllocator(codec_)); + allocator_->SetLegacyConferenceMode(legacy_conference_mode); } void SetupCodec3SL3TL(const std::vector& active_streams) { @@ -227,6 +232,7 @@ TEST_F(SimulcastRateAllocatorTest, SingleSimulcastBelowMin) { TEST_F(SimulcastRateAllocatorTest, SignalsBwLimited) { // Enough to enable all layers. const int kVeryBigBitrate = 100000; + // With simulcast, use the min bitrate from the ss spec instead of the global. SetupCodec3SL3TL({true, true, true}); CreateAllocator(); @@ -238,10 +244,13 @@ TEST_F(SimulcastRateAllocatorTest, SignalsBwLimited) { EXPECT_TRUE(GetAllocation(codec_.simulcastStream[0].targetBitrate + codec_.simulcastStream[1].minBitrate) .is_bw_limited()); - EXPECT_FALSE(GetAllocation(codec_.simulcastStream[0].targetBitrate + - codec_.simulcastStream[1].targetBitrate + - codec_.simulcastStream[2].minBitrate) - .is_bw_limited()); + EXPECT_FALSE( + GetAllocation( + codec_.simulcastStream[0].targetBitrate + + codec_.simulcastStream[1].targetBitrate + + static_cast( + codec_.simulcastStream[2].minBitrate * kDefaultHysteresis + 0.5)) + .is_bw_limited()); EXPECT_FALSE(GetAllocation(kVeryBigBitrate).is_bw_limited()); } @@ -337,20 +346,23 @@ TEST_F(SimulcastRateAllocatorTest, OneToThreeStreams) { ExpectEqual(expected, GetAllocation(bitrate)); } + uint32_t kMinInitialRateTwoLayers = + codec_.simulcastStream[0].targetBitrate + + static_cast(codec_.simulcastStream[1].minBitrate * + kDefaultHysteresis); { // Bitrate above target for first stream, but below min for the next one. - const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate + - codec_.simulcastStream[1].minBitrate - 1; + const uint32_t bitrate = kMinInitialRateTwoLayers - 1; uint32_t expected[] = {bitrate, 0, 0}; ExpectEqual(expected, GetAllocation(bitrate)); } { // Just enough for two streams. - const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate + - codec_.simulcastStream[1].minBitrate; - uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, - codec_.simulcastStream[1].minBitrate, 0}; + const uint32_t bitrate = kMinInitialRateTwoLayers; + uint32_t expected[] = { + codec_.simulcastStream[0].targetBitrate, + kMinInitialRateTwoLayers - codec_.simulcastStream[0].targetBitrate, 0}; ExpectEqual(expected, GetAllocation(bitrate)); } @@ -363,11 +375,15 @@ TEST_F(SimulcastRateAllocatorTest, OneToThreeStreams) { ExpectEqual(expected, GetAllocation(bitrate)); } + uint32_t kMinInitialRateThreeLayers = + codec_.simulcastStream[0].targetBitrate + + codec_.simulcastStream[1].targetBitrate + + static_cast(codec_.simulcastStream[2].minBitrate * + kDefaultHysteresis); { // First two streams maxed out, but not enough for third. Nowhere to put // remaining bits. - const uint32_t bitrate = codec_.simulcastStream[0].maxBitrate + - codec_.simulcastStream[1].maxBitrate + 499; + const uint32_t bitrate = kMinInitialRateThreeLayers - 1; uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, codec_.simulcastStream[1].maxBitrate, 0}; ExpectEqual(expected, GetAllocation(bitrate)); @@ -375,12 +391,12 @@ TEST_F(SimulcastRateAllocatorTest, OneToThreeStreams) { { // Just enough for all three streams. - const uint32_t bitrate = codec_.simulcastStream[0].targetBitrate + - codec_.simulcastStream[1].targetBitrate + - codec_.simulcastStream[2].minBitrate; - uint32_t expected[] = {codec_.simulcastStream[0].targetBitrate, - codec_.simulcastStream[1].targetBitrate, - codec_.simulcastStream[2].minBitrate}; + const uint32_t bitrate = kMinInitialRateThreeLayers; + uint32_t expected[] = { + codec_.simulcastStream[0].targetBitrate, + codec_.simulcastStream[1].targetBitrate, + static_cast(codec_.simulcastStream[2].minBitrate * + kDefaultHysteresis)}; ExpectEqual(expected, GetAllocation(bitrate)); } @@ -667,9 +683,9 @@ INSTANTIATE_TEST_SUITE_P(ScreenshareTest, ScreenshareRateAllocationTest, ::testing::Bool()); -TEST_P(ScreenshareRateAllocationTest, BitrateBelowTl0) { +TEST_P(ScreenshareRateAllocationTest, ConferenceBitrateBelowTl0) { SetupConferenceScreenshare(GetParam()); - CreateAllocator(); + CreateAllocator(true); VideoBitrateAllocation allocation = allocator_->Allocate(VideoBitrateAllocationParameters( @@ -682,9 +698,9 @@ TEST_P(ScreenshareRateAllocationTest, BitrateBelowTl0) { EXPECT_EQ(allocation.is_bw_limited(), GetParam()); } -TEST_P(ScreenshareRateAllocationTest, BitrateAboveTl0) { +TEST_P(ScreenshareRateAllocationTest, ConferenceBitrateAboveTl0) { SetupConferenceScreenshare(GetParam()); - CreateAllocator(); + CreateAllocator(true); uint32_t target_bitrate_kbps = (kLegacyScreenshareTargetBitrateKbps + kLegacyScreenshareMaxBitrateKbps) / @@ -702,10 +718,10 @@ TEST_P(ScreenshareRateAllocationTest, BitrateAboveTl0) { EXPECT_EQ(allocation.is_bw_limited(), GetParam()); } -TEST_F(ScreenshareRateAllocationTest, BitrateAboveTl1) { +TEST_F(ScreenshareRateAllocationTest, ConferenceBitrateAboveTl1) { // This test is only for the non-simulcast case. SetupConferenceScreenshare(false); - CreateAllocator(); + CreateAllocator(true); VideoBitrateAllocation allocation = allocator_->Allocate(VideoBitrateAllocationParameters( diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc index f157734192..6d3195c32b 100644 --- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc +++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc @@ -75,27 +75,18 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback } Result OnEncodedImage(const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { bool is_vp8 = (codec_specific_info->codecType == kVideoCodecVP8); bool is_h264 = (codec_specific_info->codecType == kVideoCodecH264); // Only store the base layer. if (encoded_image.SpatialIndex().value_or(0) == 0) { if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) { - // TODO(nisse): Why not size() ? - encoded_key_frame_.SetEncodedData( - EncodedImageBuffer::Create(encoded_image.capacity())); - encoded_key_frame_.set_size(encoded_image.size()); + encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create( + encoded_image.data(), encoded_image.size())); encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey; - encoded_key_frame_._completeFrame = encoded_image._completeFrame; - memcpy(encoded_key_frame_.data(), encoded_image.data(), - encoded_image.size()); } else { - encoded_frame_.SetEncodedData( - EncodedImageBuffer::Create(encoded_image.capacity())); - encoded_frame_.set_size(encoded_image.size()); - memcpy(encoded_frame_.data(), encoded_image.data(), - encoded_image.size()); + encoded_frame_.SetEncodedData(EncodedImageBuffer::Create( + encoded_image.data(), encoded_image.size())); } } if (is_vp8) { @@ -197,9 +188,9 @@ void ConfigureStream(int width, int min_bitrate, int target_bitrate, float max_framerate, - SimulcastStream* stream, + SpatialLayer* stream, int num_temporal_layers) { - assert(stream); + RTC_DCHECK(stream); stream->width = width; stream->height = height; stream->maxBitrate = max_bitrate; @@ -221,10 +212,8 @@ void SimulcastTestFixtureImpl::DefaultSettings( VideoCodecType codec_type, bool reverse_layer_order) { RTC_CHECK(settings); - memset(settings, 0, sizeof(VideoCodec)); + *settings = {}; settings->codecType = codec_type; - // 96 to 127 dynamic payload types for video codecs - settings->plType = 120; settings->startBitrate = 300; settings->minBitrate = 30; settings->maxBitrate = 0; @@ -354,7 +343,7 @@ void SimulcastTestFixtureImpl::ExpectStreams( AllOf(Field(&EncodedImage::_frameType, frame_type), Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4), Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)), - _, _)) + _)) .Times(1) .WillRepeatedly(Return( EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); @@ -366,7 +355,7 @@ void SimulcastTestFixtureImpl::ExpectStreams( AllOf(Field(&EncodedImage::_frameType, frame_type), Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2), Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)), - _, _)) + _)) .Times(1) .WillRepeatedly(Return( EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); @@ -377,7 +366,7 @@ void SimulcastTestFixtureImpl::ExpectStreams( AllOf(Field(&EncodedImage::_frameType, frame_type), Field(&EncodedImage::_encodedWidth, kDefaultWidth), Field(&EncodedImage::_encodedHeight, kDefaultHeight)), - _, _)) + _)) .Times(1) .WillRepeatedly(Return( EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); @@ -601,6 +590,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) { settings_.VP8()->numberOfTemporalLayers = 1; temporal_layer_profile = kDefaultTemporalLayerProfile; } else { + settings_.H264()->numberOfTemporalLayers = 1; temporal_layer_profile = kNoTemporalLayerProfile; } settings_.maxBitrate = 100; @@ -645,7 +635,7 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) { VideoFrameType::kVideoFrameKey), Field(&EncodedImage::_encodedWidth, width), Field(&EncodedImage::_encodedHeight, height)), - _, _)) + _)) .Times(1) .WillRepeatedly(Return( EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); @@ -868,23 +858,17 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() { encoder_->RegisterEncodeCompleteCallback(&encoder_callback); decoder_->RegisterDecodeCompleteCallback(&decoder_callback); - EXPECT_CALL(encoder_callback, OnEncodedImage(_, _, _)) + EXPECT_CALL(encoder_callback, OnEncodedImage(_, _)) .Times(3) .WillRepeatedly( ::testing::Invoke([&](const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { + const CodecSpecificInfo* codec_specific_info) { EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey); size_t index = encoded_image.SpatialIndex().value_or(0); - // TODO(nisse): Why not size() - encoded_frame[index].SetEncodedData( - EncodedImageBuffer::Create(encoded_image.capacity())); - encoded_frame[index].set_size(encoded_image.size()); + encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create( + encoded_image.data(), encoded_image.size())); encoded_frame[index]._frameType = encoded_image._frameType; - encoded_frame[index]._completeFrame = encoded_image._completeFrame; - memcpy(encoded_frame[index].data(), encoded_image.data(), - encoded_image.size()); return EncodedImageCallback::Result( EncodedImageCallback::Result::OK, 0); })); @@ -918,5 +902,15 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() { EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, 0)); } +void SimulcastTestFixtureImpl:: + TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() { + VideoEncoder::EncoderInfo encoder_info = encoder_->GetEncoderInfo(); + EXPECT_EQ(encoder_info.fps_allocation[0].size(), + static_cast(kDefaultTemporalLayerProfile[0])); + EXPECT_EQ(encoder_info.fps_allocation[1].size(), + static_cast(kDefaultTemporalLayerProfile[1])); + EXPECT_EQ(encoder_info.fps_allocation[2].size(), + static_cast(kDefaultTemporalLayerProfile[2])); +} } // namespace test } // namespace webrtc diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.h b/modules/video_coding/utility/simulcast_test_fixture_impl.h index 3b55898ccf..a3d3fc66a8 100644 --- a/modules/video_coding/utility/simulcast_test_fixture_impl.h +++ b/modules/video_coding/utility/simulcast_test_fixture_impl.h @@ -50,6 +50,7 @@ class SimulcastTestFixtureImpl final : public SimulcastTestFixture { void TestSpatioTemporalLayers321PatternEncoder() override; void TestStrideEncodeDecode() override; void TestDecodeWidthHeightSet() override; + void TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation() override; static void DefaultSettings(VideoCodec* settings, const int* temporal_layer_profile, diff --git a/modules/video_coding/utility/simulcast_utility.cc b/modules/video_coding/utility/simulcast_utility.cc index 58cb991155..a407483edd 100644 --- a/modules/video_coding/utility/simulcast_utility.cc +++ b/modules/video_coding/utility/simulcast_utility.cc @@ -84,16 +84,8 @@ bool SimulcastUtility::ValidSimulcastParameters(const VideoCodec& codec, } bool SimulcastUtility::IsConferenceModeScreenshare(const VideoCodec& codec) { - if (codec.mode != VideoCodecMode::kScreensharing || - NumberOfTemporalLayers(codec, 0) != 2) { - return false; - } - - // Fixed default bitrates for legacy screenshare layers mode. - return (codec.numberOfSimulcastStreams == 0 && codec.maxBitrate == 1000) || - (codec.numberOfSimulcastStreams >= 1 && - codec.simulcastStream[0].maxBitrate == 1000 && - codec.simulcastStream[0].targetBitrate == 200); + return codec.mode == VideoCodecMode::kScreensharing && + codec.legacy_conference_mode; } int SimulcastUtility::NumberOfTemporalLayers(const VideoCodec& codec, diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser.cc b/modules/video_coding/utility/vp9_uncompressed_header_parser.cc index 9c89235fe2..07ba3255c6 100644 --- a/modules/video_coding/utility/vp9_uncompressed_header_parser.cc +++ b/modules/video_coding/utility/vp9_uncompressed_header_parser.cc @@ -9,83 +9,209 @@ */ #include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" +#include "absl/strings/string_view.h" #include "rtc_base/bit_buffer.h" #include "rtc_base/logging.h" namespace webrtc { -#define RETURN_FALSE_IF_ERROR(x) \ - if (!(x)) { \ - return false; \ +// Evaluates x and returns false if false. +#define RETURN_IF_FALSE(x) \ + if (!(x)) { \ + return false; \ } +// Evaluates x, which is intended to return an optional. If result is nullopt, +// returns false. Else, calls fun() with the dereferenced optional as parameter. +#define READ_OR_RETURN(x, fun) \ + do { \ + if (auto optional_val = (x)) { \ + fun(*optional_val); \ + } else { \ + return false; \ + } \ + } while (false) + namespace vp9 { namespace { const size_t kVp9NumRefsPerFrame = 3; const size_t kVp9MaxRefLFDeltas = 4; const size_t kVp9MaxModeLFDeltas = 2; - -bool Vp9ReadProfile(rtc::BitBuffer* br, uint8_t* profile) { - uint32_t high_bit; - uint32_t low_bit; - RETURN_FALSE_IF_ERROR(br->ReadBits(&low_bit, 1)); - RETURN_FALSE_IF_ERROR(br->ReadBits(&high_bit, 1)); - *profile = (high_bit << 1) + low_bit; - if (*profile > 2) { - uint32_t reserved_bit; - RETURN_FALSE_IF_ERROR(br->ReadBits(&reserved_bit, 1)); - if (reserved_bit) { - RTC_LOG(LS_WARNING) << "Failed to get QP. Unsupported bitstream profile."; +const size_t kVp9MinTileWidthB64 = 4; +const size_t kVp9MaxTileWidthB64 = 64; + +class BitstreamReader { + public: + explicit BitstreamReader(rtc::BitBuffer* buffer) : buffer_(buffer) {} + + // Reads on bit from the input stream and: + // * returns false if bit cannot be read + // * calls f_true() if bit is true, returns return value of that function + // * calls f_else() if bit is false, returns return value of that function + bool IfNextBoolean( + std::function f_true, + std::function f_false = [] { return true; }) { + uint32_t val; + if (!buffer_->ReadBits(1, val)) { return false; } + if (val != 0) { + return f_true(); + } + return f_false(); } - return true; -} -bool Vp9ReadSyncCode(rtc::BitBuffer* br) { - uint32_t sync_code; - RETURN_FALSE_IF_ERROR(br->ReadBits(&sync_code, 24)); - if (sync_code != 0x498342) { - RTC_LOG(LS_WARNING) << "Failed to get QP. Invalid sync code."; - return false; + absl::optional ReadBoolean() { + uint32_t val; + if (!buffer_->ReadBits(1, val)) { + return {}; + } + return {val != 0}; } - return true; -} -bool Vp9ReadColorConfig(rtc::BitBuffer* br, uint8_t profile) { - if (profile == 2 || profile == 3) { - // Bitdepth. - RETURN_FALSE_IF_ERROR(br->ConsumeBits(1)); + // Reads a bit from the input stream and returns: + // * false if bit cannot be read + // * true if bit matches expected_val + // * false if bit does not match expected_val - in which case |error_msg| is + // logged as warning, if provided. + bool VerifyNextBooleanIs(bool expected_val, absl::string_view error_msg) { + uint32_t val; + if (!buffer_->ReadBits(1, val)) { + return false; + } + if ((val != 0) != expected_val) { + if (!error_msg.empty()) { + RTC_LOG(LS_WARNING) << error_msg; + } + return false; + } + return true; } - uint32_t color_space; - RETURN_FALSE_IF_ERROR(br->ReadBits(&color_space, 3)); - - // SRGB is 7. - if (color_space != 7) { - // YUV range flag. - RETURN_FALSE_IF_ERROR(br->ConsumeBits(1)); - if (profile == 1 || profile == 3) { - // 1 bit: subsampling x. - // 1 bit: subsampling y. - RETURN_FALSE_IF_ERROR(br->ConsumeBits(2)); - uint32_t reserved_bit; - RETURN_FALSE_IF_ERROR(br->ReadBits(&reserved_bit, 1)); - if (reserved_bit) { - RTC_LOG(LS_WARNING) << "Failed to get QP. Reserved bit set."; - return false; + + // Reads |bits| bits from the bitstream and interprets them as an unsigned + // integer that gets cast to the type T before returning. + // Returns nullopt if all bits cannot be read. + // If number of bits matches size of data type, the bits parameter may be + // omitted. Ex: + // ReadUnsigned(2); // Returns uint8_t with 2 LSB populated. + // ReadUnsigned(); // Returns uint8_t with all 8 bits populated. + template + absl::optional ReadUnsigned(int bits = sizeof(T) * 8) { + RTC_DCHECK_LE(bits, 32); + RTC_DCHECK_LE(bits, sizeof(T) * 8); + uint32_t val; + if (!buffer_->ReadBits(bits, val)) { + return {}; + } + return (static_cast(val)); + } + + // Helper method that reads |num_bits| from the bitstream, returns: + // * false if bits cannot be read. + // * true if |expected_val| matches the read bits + // * false if |expected_val| does not match the read bits, and logs + // |error_msg| as a warning (if provided). + bool VerifyNextUnsignedIs(int num_bits, + uint32_t expected_val, + absl::string_view error_msg) { + uint32_t val; + if (!buffer_->ReadBits(num_bits, val)) { + return false; + } + if (val != expected_val) { + if (!error_msg.empty()) { + RTC_LOG(LS_WARNING) << error_msg; } + return false; + } + return true; + } + + // Basically the same as ReadUnsigned() - but for signed integers. + // Here |bits| indicates the size of the value - number of bits read from the + // bit buffer is one higher (the sign bit). This is made to matche the spec in + // which eg s(4) = f(1) sign-bit, plus an f(4). + template + absl::optional ReadSigned(int bits = sizeof(T) * 8) { + uint32_t sign; + if (!buffer_->ReadBits(1, sign)) { + return {}; + } + uint32_t val; + if (!buffer_->ReadBits(bits, val)) { + return {}; + } + int64_t sign_val = val; + if (sign != 0) { + sign_val = -sign_val; } + return {static_cast(sign_val)}; + } + + // Reads |bits| from the bitstream, disregarding their value. + // Returns true if full number of bits were read, false otherwise. + bool ConsumeBits(int bits) { return buffer_->ConsumeBits(bits); } + + private: + rtc::BitBuffer* buffer_; +}; + +bool Vp9ReadColorConfig(BitstreamReader* br, FrameInfo* frame_info) { + if (frame_info->profile == 2 || frame_info->profile == 3) { + READ_OR_RETURN(br->ReadBoolean(), [frame_info](bool ten_or_twelve_bits) { + frame_info->bit_detph = + ten_or_twelve_bits ? BitDept::k12Bit : BitDept::k10Bit; + }); } else { - if (profile == 1 || profile == 3) { - uint32_t reserved_bit; - RETURN_FALSE_IF_ERROR(br->ReadBits(&reserved_bit, 1)); - if (reserved_bit) { - RTC_LOG(LS_WARNING) << "Failed to get QP. Reserved bit set."; - return false; - } + frame_info->bit_detph = BitDept::k8Bit; + } + + READ_OR_RETURN( + br->ReadUnsigned(3), [frame_info](uint8_t color_space) { + frame_info->color_space = static_cast(color_space); + }); + + if (frame_info->color_space != ColorSpace::CS_RGB) { + READ_OR_RETURN(br->ReadBoolean(), [frame_info](bool color_range) { + frame_info->color_range = + color_range ? ColorRange::kFull : ColorRange::kStudio; + }); + + if (frame_info->profile == 1 || frame_info->profile == 3) { + READ_OR_RETURN(br->ReadUnsigned(2), + [frame_info](uint8_t subsampling) { + switch (subsampling) { + case 0b00: + frame_info->sub_sampling = YuvSubsampling::k444; + break; + case 0b01: + frame_info->sub_sampling = YuvSubsampling::k440; + break; + case 0b10: + frame_info->sub_sampling = YuvSubsampling::k422; + break; + case 0b11: + frame_info->sub_sampling = YuvSubsampling::k420; + break; + } + }); + + RETURN_IF_FALSE(br->VerifyNextBooleanIs( + 0, "Failed to parse header. Reserved bit set.")); } else { - RTC_LOG(LS_WARNING) << "Failed to get QP. 4:4:4 color not supported in " - "profile 0 or 2."; + // Profile 0 or 2. + frame_info->sub_sampling = YuvSubsampling::k420; + } + } else { + // SRGB + frame_info->color_range = ColorRange::kFull; + if (frame_info->profile == 1 || frame_info->profile == 3) { + frame_info->sub_sampling = YuvSubsampling::k444; + RETURN_IF_FALSE(br->VerifyNextBooleanIs( + 0, "Failed to parse header. Reserved bit set.")); + } else { + RTC_LOG(LS_WARNING) << "Failed to parse header. 4:4:4 color not supported" + " in profile 0 or 2."; return false; } } @@ -93,180 +219,302 @@ bool Vp9ReadColorConfig(rtc::BitBuffer* br, uint8_t profile) { return true; } -bool Vp9ReadFrameSize(rtc::BitBuffer* br) { - // 2 bytes: frame width. - // 2 bytes: frame height. - return br->ConsumeBytes(4); +bool Vp9ReadFrameSize(BitstreamReader* br, FrameInfo* frame_info) { + // 16 bits: frame (width|height) - 1. + READ_OR_RETURN(br->ReadUnsigned(), [frame_info](uint16_t width) { + frame_info->frame_width = width + 1; + }); + READ_OR_RETURN(br->ReadUnsigned(), [frame_info](uint16_t height) { + frame_info->frame_height = height + 1; + }); + return true; } -bool Vp9ReadRenderSize(rtc::BitBuffer* br) { - uint32_t bit; - RETURN_FALSE_IF_ERROR(br->ReadBits(&bit, 1)); - if (bit) { - // 2 bytes: render width. - // 2 bytes: render height. - RETURN_FALSE_IF_ERROR(br->ConsumeBytes(4)); - } - return true; +bool Vp9ReadRenderSize(BitstreamReader* br, FrameInfo* frame_info) { + // render_and_frame_size_different + return br->IfNextBoolean( + [&] { + // 16 bits: render (width|height) - 1. + READ_OR_RETURN(br->ReadUnsigned(), + [frame_info](uint16_t width) { + frame_info->render_width = width + 1; + }); + READ_OR_RETURN(br->ReadUnsigned(), + [frame_info](uint16_t height) { + frame_info->render_height = height + 1; + }); + return true; + }, + /*else*/ + [&] { + frame_info->render_height = frame_info->frame_height; + frame_info->render_width = frame_info->frame_width; + return true; + }); } -bool Vp9ReadFrameSizeFromRefs(rtc::BitBuffer* br) { - uint32_t found_ref = 0; - for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) { +bool Vp9ReadFrameSizeFromRefs(BitstreamReader* br, FrameInfo* frame_info) { + bool found_ref = false; + for (size_t i = 0; !found_ref && i < kVp9NumRefsPerFrame; i++) { // Size in refs. - RETURN_FALSE_IF_ERROR(br->ReadBits(&found_ref, 1)); - if (found_ref) - break; + READ_OR_RETURN(br->ReadBoolean(), [&](bool ref) { found_ref = ref; }); } if (!found_ref) { - if (!Vp9ReadFrameSize(br)) { + if (!Vp9ReadFrameSize(br, frame_info)) { return false; } } - return Vp9ReadRenderSize(br); -} - -bool Vp9ReadInterpolationFilter(rtc::BitBuffer* br) { - uint32_t bit; - RETURN_FALSE_IF_ERROR(br->ReadBits(&bit, 1)); - if (bit) - return true; - - return br->ConsumeBits(2); + return Vp9ReadRenderSize(br, frame_info); } -bool Vp9ReadLoopfilter(rtc::BitBuffer* br) { +bool Vp9ReadLoopfilter(BitstreamReader* br) { // 6 bits: filter level. // 3 bits: sharpness level. - RETURN_FALSE_IF_ERROR(br->ConsumeBits(9)); - - uint32_t mode_ref_delta_enabled; - RETURN_FALSE_IF_ERROR(br->ReadBits(&mode_ref_delta_enabled, 1)); - if (mode_ref_delta_enabled) { - uint32_t mode_ref_delta_update; - RETURN_FALSE_IF_ERROR(br->ReadBits(&mode_ref_delta_update, 1)); - if (mode_ref_delta_update) { - uint32_t bit; + RETURN_IF_FALSE(br->ConsumeBits(9)); + + return br->IfNextBoolean([&] { // if mode_ref_delta_enabled + return br->IfNextBoolean([&] { // if mode_ref_delta_update for (size_t i = 0; i < kVp9MaxRefLFDeltas; i++) { - RETURN_FALSE_IF_ERROR(br->ReadBits(&bit, 1)); - if (bit) { - RETURN_FALSE_IF_ERROR(br->ConsumeBits(7)); - } + RETURN_IF_FALSE(br->IfNextBoolean([&] { return br->ConsumeBits(7); })); } for (size_t i = 0; i < kVp9MaxModeLFDeltas; i++) { - RETURN_FALSE_IF_ERROR(br->ReadBits(&bit, 1)); - if (bit) { - RETURN_FALSE_IF_ERROR(br->ConsumeBits(7)); + RETURN_IF_FALSE(br->IfNextBoolean([&] { return br->ConsumeBits(7); })); + } + return true; + }); + }); +} + +bool Vp9ReadQp(BitstreamReader* br, FrameInfo* frame_info) { + READ_OR_RETURN(br->ReadUnsigned(), + [frame_info](uint8_t qp) { frame_info->base_qp = qp; }); + + // yuv offsets + for (int i = 0; i < 3; ++i) { + RETURN_IF_FALSE(br->IfNextBoolean([br] { // if delta_coded + return br->ConsumeBits(5); + })); + } + return true; +} + +bool Vp9ReadSegmentationParams(BitstreamReader* br) { + constexpr int kVp9MaxSegments = 8; + constexpr int kVp9SegLvlMax = 4; + constexpr int kSegmentationFeatureBits[kVp9SegLvlMax] = {8, 6, 2, 0}; + constexpr bool kSegmentationFeatureSigned[kVp9SegLvlMax] = {1, 1, 0, 0}; + + RETURN_IF_FALSE(br->IfNextBoolean([&] { // segmentation_enabled + return br->IfNextBoolean([&] { // update_map + // Consume probs. + for (int i = 0; i < 7; ++i) { + RETURN_IF_FALSE(br->IfNextBoolean([br] { return br->ConsumeBits(7); })); + } + + return br->IfNextBoolean([&] { // temporal_update + // Consume probs. + for (int i = 0; i < 3; ++i) { + RETURN_IF_FALSE( + br->IfNextBoolean([br] { return br->ConsumeBits(7); })); } + return true; + }); + }); + })); + + return br->IfNextBoolean([&] { + RETURN_IF_FALSE(br->ConsumeBits(1)); // abs_or_delta + for (int i = 0; i < kVp9MaxSegments; ++i) { + for (int j = 0; j < kVp9SegLvlMax; ++j) { + RETURN_IF_FALSE(br->IfNextBoolean([&] { // feature_enabled + return br->ConsumeBits(kSegmentationFeatureBits[j] + + kSegmentationFeatureSigned[j]); + })); } } + return true; + }); +} + +bool Vp9ReadTileInfo(BitstreamReader* br, FrameInfo* frame_info) { + size_t mi_cols = (frame_info->frame_width + 7) >> 3; + size_t sb64_cols = (mi_cols + 7) >> 3; + + size_t min_log2 = 0; + while ((kVp9MaxTileWidthB64 << min_log2) < sb64_cols) { + ++min_log2; } - return true; + + size_t max_log2 = 1; + while ((sb64_cols >> max_log2) >= kVp9MinTileWidthB64) { + ++max_log2; + } + --max_log2; + + size_t cols_log2 = min_log2; + bool done = false; + while (!done && cols_log2 < max_log2) { + RETURN_IF_FALSE(br->IfNextBoolean( + [&] { + ++cols_log2; + return true; + }, + [&] { + done = true; + return true; + })); + } + + // rows_log2; + return br->IfNextBoolean([&] { return br->ConsumeBits(1); }); } } // namespace -bool GetQp(const uint8_t* buf, size_t length, int* qp) { - rtc::BitBuffer br(buf, length); +bool Parse(const uint8_t* buf, size_t length, FrameInfo* frame_info) { + rtc::BitBuffer bit_buffer(buf, length); + BitstreamReader br(&bit_buffer); // Frame marker. - uint32_t frame_marker; - RETURN_FALSE_IF_ERROR(br.ReadBits(&frame_marker, 2)); - if (frame_marker != 0x2) { - RTC_LOG(LS_WARNING) << "Failed to get QP. Frame marker should be 2."; - return false; + RETURN_IF_FALSE(br.VerifyNextUnsignedIs( + 2, 0x2, "Failed to parse header. Frame marker should be 2.")); + + // Profile has low bit first. + READ_OR_RETURN(br.ReadBoolean(), + [frame_info](bool low) { frame_info->profile = int{low}; }); + READ_OR_RETURN(br.ReadBoolean(), [frame_info](bool high) { + frame_info->profile |= int{high} << 1; + }); + if (frame_info->profile > 2) { + RETURN_IF_FALSE(br.VerifyNextBooleanIs( + false, "Failed to get QP. Unsupported bitstream profile.")); } - // Profile. - uint8_t profile; - if (!Vp9ReadProfile(&br, &profile)) - return false; - // Show existing frame. - uint32_t show_existing_frame; - RETURN_FALSE_IF_ERROR(br.ReadBits(&show_existing_frame, 1)); - if (show_existing_frame) - return false; - - // Frame type: KEY_FRAME(0), INTER_FRAME(1). - uint32_t frame_type; - uint32_t show_frame; - uint32_t error_resilient; - RETURN_FALSE_IF_ERROR(br.ReadBits(&frame_type, 1)); - RETURN_FALSE_IF_ERROR(br.ReadBits(&show_frame, 1)); - RETURN_FALSE_IF_ERROR(br.ReadBits(&error_resilient, 1)); + RETURN_IF_FALSE(br.IfNextBoolean([&] { + READ_OR_RETURN(br.ReadUnsigned(3), + [frame_info](uint8_t frame_idx) { + frame_info->show_existing_frame = frame_idx; + }); + return true; + })); + if (frame_info->show_existing_frame.has_value()) { + return true; + } - if (!frame_type) { - if (!Vp9ReadSyncCode(&br)) - return false; - if (!Vp9ReadColorConfig(&br, profile)) + READ_OR_RETURN(br.ReadBoolean(), [frame_info](bool frame_type) { + // Frame type: KEY_FRAME(0), INTER_FRAME(1). + frame_info->is_keyframe = frame_type == 0; + }); + READ_OR_RETURN(br.ReadBoolean(), [frame_info](bool show_frame) { + frame_info->show_frame = show_frame; + }); + READ_OR_RETURN(br.ReadBoolean(), [frame_info](bool error_resilient) { + frame_info->error_resilient = error_resilient; + }); + + if (frame_info->is_keyframe) { + RETURN_IF_FALSE(br.VerifyNextUnsignedIs( + 24, 0x498342, "Failed to get QP. Invalid sync code.")); + + if (!Vp9ReadColorConfig(&br, frame_info)) return false; - if (!Vp9ReadFrameSize(&br)) + if (!Vp9ReadFrameSize(&br, frame_info)) return false; - if (!Vp9ReadRenderSize(&br)) + if (!Vp9ReadRenderSize(&br, frame_info)) return false; - } else { - uint32_t intra_only = 0; - if (!show_frame) - RETURN_FALSE_IF_ERROR(br.ReadBits(&intra_only, 1)); - if (!error_resilient) - RETURN_FALSE_IF_ERROR(br.ConsumeBits(2)); // Reset frame context. - - if (intra_only) { - if (!Vp9ReadSyncCode(&br)) - return false; + // Non-keyframe. + bool is_intra_only = false; + if (!frame_info->show_frame) { + READ_OR_RETURN(br.ReadBoolean(), + [&](bool intra_only) { is_intra_only = intra_only; }); + } + if (!frame_info->error_resilient) { + RETURN_IF_FALSE(br.ConsumeBits(2)); // Reset frame context. + } + + if (is_intra_only) { + RETURN_IF_FALSE(br.VerifyNextUnsignedIs( + 24, 0x498342, "Failed to get QP. Invalid sync code.")); - if (profile > 0) { - if (!Vp9ReadColorConfig(&br, profile)) + if (frame_info->profile > 0) { + if (!Vp9ReadColorConfig(&br, frame_info)) return false; } // Refresh frame flags. - RETURN_FALSE_IF_ERROR(br.ConsumeBits(8)); - if (!Vp9ReadFrameSize(&br)) + RETURN_IF_FALSE(br.ConsumeBits(8)); + if (!Vp9ReadFrameSize(&br, frame_info)) return false; - if (!Vp9ReadRenderSize(&br)) + if (!Vp9ReadRenderSize(&br, frame_info)) return false; } else { // Refresh frame flags. - RETURN_FALSE_IF_ERROR(br.ConsumeBits(8)); + RETURN_IF_FALSE(br.ConsumeBits(8)); for (size_t i = 0; i < kVp9NumRefsPerFrame; i++) { // 3 bits: Ref frame index. // 1 bit: Ref frame sign biases. - RETURN_FALSE_IF_ERROR(br.ConsumeBits(4)); + RETURN_IF_FALSE(br.ConsumeBits(4)); } - if (!Vp9ReadFrameSizeFromRefs(&br)) + if (!Vp9ReadFrameSizeFromRefs(&br, frame_info)) return false; // Allow high precision mv. - RETURN_FALSE_IF_ERROR(br.ConsumeBits(1)); + RETURN_IF_FALSE(br.ConsumeBits(1)); // Interpolation filter. - if (!Vp9ReadInterpolationFilter(&br)) - return false; + RETURN_IF_FALSE(br.IfNextBoolean([] { return true; }, + [&br] { return br.ConsumeBits(2); })); } } - if (!error_resilient) { + if (!frame_info->error_resilient) { // 1 bit: Refresh frame context. // 1 bit: Frame parallel decoding mode. - RETURN_FALSE_IF_ERROR(br.ConsumeBits(2)); + RETURN_IF_FALSE(br.ConsumeBits(2)); } // Frame context index. - RETURN_FALSE_IF_ERROR(br.ConsumeBits(2)); + RETURN_IF_FALSE(br.ConsumeBits(2)); if (!Vp9ReadLoopfilter(&br)) return false; - // Base QP. - uint8_t base_q0; - RETURN_FALSE_IF_ERROR(br.ReadUInt8(&base_q0)); - *qp = base_q0; + // Read base QP. + RETURN_IF_FALSE(Vp9ReadQp(&br, frame_info)); + + const bool kParseFullHeader = false; + if (kParseFullHeader) { + // Currently not used, but will be needed when parsing beyond the + // uncompressed header. + RETURN_IF_FALSE(Vp9ReadSegmentationParams(&br)); + + RETURN_IF_FALSE(Vp9ReadTileInfo(&br, frame_info)); + + RETURN_IF_FALSE(br.ConsumeBits(16)); // header_size_in_bytes + } + return true; } -} // namespace vp9 +bool GetQp(const uint8_t* buf, size_t length, int* qp) { + FrameInfo frame_info; + if (!Parse(buf, length, &frame_info)) { + return false; + } + *qp = frame_info.base_qp; + return true; +} + +absl::optional ParseIntraFrameInfo(const uint8_t* buf, + size_t length) { + FrameInfo frame_info; + if (Parse(buf, length, &frame_info) && frame_info.frame_width > 0) { + return frame_info; + } + return absl::nullopt; +} +} // namespace vp9 } // namespace webrtc diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser.h b/modules/video_coding/utility/vp9_uncompressed_header_parser.h index 69e8de87df..7a5e2c058b 100644 --- a/modules/video_coding/utility/vp9_uncompressed_header_parser.h +++ b/modules/video_coding/utility/vp9_uncompressed_header_parser.h @@ -13,6 +13,7 @@ #include #include +#include "absl/types/optional.h" namespace webrtc { @@ -22,6 +23,68 @@ namespace vp9 { // Returns true on success, false otherwise. bool GetQp(const uint8_t* buf, size_t length, int* qp); +// Bit depth per channel. Support varies by profile. +enum class BitDept : uint8_t { + k8Bit = 8, + k10Bit = 10, + k12Bit = 12, +}; + +enum class ColorSpace : uint8_t { + CS_UNKNOWN = 0, // Unknown (in this case the color space must be signaled + // outside the VP9 bitstream). + CS_BT_601 = 1, // CS_BT_601 Rec. ITU-R BT.601-7 + CS_BT_709 = 2, // Rec. ITU-R BT.709-6 + CS_SMPTE_170 = 3, // SMPTE-170 + CS_SMPTE_240 = 4, // SMPTE-240 + CS_BT_2020 = 5, // Rec. ITU-R BT.2020-2 + CS_RESERVED = 6, // Reserved + CS_RGB = 7, // sRGB (IEC 61966-2-1) +}; + +enum class ColorRange { + kStudio, // Studio swing: + // For BitDepth equals 8: + // Y is between 16 and 235 inclusive. + // U and V are between 16 and 240 inclusive. + // For BitDepth equals 10: + // Y is between 64 and 940 inclusive. + // U and V are between 64 and 960 inclusive. + // For BitDepth equals 12: + // Y is between 256 and 3760. + // U and V are between 256 and 3840 inclusive. + kFull // Full swing; no restriction on Y, U, V values. +}; + +enum class YuvSubsampling { + k444, + k440, + k422, + k420, +}; + +struct FrameInfo { + int profile = 0; // Profile 0-3 are valid. + absl::optional show_existing_frame; + bool is_keyframe = false; + bool show_frame = false; + bool error_resilient = false; + BitDept bit_detph = BitDept::k8Bit; + ColorSpace color_space = ColorSpace::CS_UNKNOWN; + ColorRange color_range; + YuvSubsampling sub_sampling; + int frame_width = 0; + int frame_height = 0; + int render_width = 0; + int render_height = 0; + int base_qp = 0; +}; + +// Parses frame information for a VP9 key-frame or all-intra frame from a +// bitstream. Returns nullopt on failure or if not a key-frame. +absl::optional ParseIntraFrameInfo(const uint8_t* buf, + size_t length); + } // namespace vp9 } // namespace webrtc diff --git a/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc b/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc new file mode 100644 index 0000000000..b69b45d5c4 --- /dev/null +++ b/modules/video_coding/utility/vp9_uncompressed_header_parser_unittest.cc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace vp9 { + +TEST(Vp9UncompressedHeaderParserTest, FrameWithSegmentation) { + // Uncompressed header from a frame generated with libvpx. + // Encoded QVGA frame (SL0 of a VGA frame) that includes a segmentation. + const uint8_t kHeader[] = { + 0x87, 0x01, 0x00, 0x00, 0x02, 0x7e, 0x01, 0xdf, 0x02, 0x7f, 0x01, 0xdf, + 0xc6, 0x87, 0x04, 0x83, 0x83, 0x2e, 0x46, 0x60, 0x20, 0x38, 0x0c, 0x06, + 0x03, 0xcd, 0x80, 0xc0, 0x60, 0x9f, 0xc5, 0x46, 0x00, 0x00, 0x00, 0x00, + 0x2e, 0x73, 0xb7, 0xee, 0x22, 0x06, 0x81, 0x82, 0xd4, 0xef, 0xc3, 0x58, + 0x1f, 0x12, 0xd2, 0x7b, 0x28, 0x1f, 0x80, 0xfc, 0x07, 0xe0, 0x00, 0x00}; + + absl::optional frame_info = + ParseIntraFrameInfo(kHeader, sizeof(kHeader)); + // Segmentation info is not actually populated in FrameInfo struct, but it + // needs to be parsed otherwise we end up on the wrong offset. The check for + // segmentation is thus that we have a valid return value. + ASSERT_TRUE(frame_info.has_value()); + + EXPECT_EQ(frame_info->is_keyframe, false); + EXPECT_EQ(frame_info->error_resilient, true); + EXPECT_EQ(frame_info->show_frame, true); + EXPECT_EQ(frame_info->base_qp, 185); + EXPECT_EQ(frame_info->frame_width, 320); + EXPECT_EQ(frame_info->frame_height, 240); + EXPECT_EQ(frame_info->render_width, 640); + EXPECT_EQ(frame_info->render_height, 480); +} + +} // namespace vp9 +} // namespace webrtc diff --git a/modules/video_coding/video_codec_initializer.cc b/modules/video_coding/video_codec_initializer.cc index e8665b9557..17ea66acb1 100644 --- a/modules/video_coding/video_codec_initializer.cc +++ b/modules/video_coding/video_codec_initializer.cc @@ -20,6 +20,7 @@ #include "api/units/data_rate.h" #include "api/video/video_bitrate_allocation.h" #include "api/video_codecs/video_encoder.h" +#include "modules/video_coding/codecs/av1/av1_svc_config.h" #include "modules/video_coding/codecs/vp9/svc_config.h" #include "modules/video_coding/include/video_coding_defines.h" #include "rtc_base/checks.h" @@ -56,7 +57,6 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( RTC_DCHECK_GE(config.min_transmit_bitrate_bps, 0); VideoCodec video_codec; - memset(&video_codec, 0, sizeof(video_codec)); video_codec.codecType = config.codec_type; switch (config.content_type) { @@ -68,14 +68,17 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( break; } - // TODO(nisse): The plType field should be deleted. Luckily, our - // callers don't need it. - video_codec.plType = 0; + video_codec.legacy_conference_mode = + config.content_type == VideoEncoderConfig::ContentType::kScreen && + config.legacy_conference_mode; + video_codec.numberOfSimulcastStreams = static_cast(streams.size()); video_codec.minBitrate = streams[0].min_bitrate_bps / 1000; bool codec_active = false; - for (const VideoStream& stream : streams) { + // Active configuration might not be fully copied to |streams| for SVC yet. + // Therefore the |config| is checked here. + for (const VideoStream& stream : config.simulcast_layers) { if (stream.active) { codec_active = true; break; @@ -91,8 +94,9 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( int max_framerate = 0; + absl::optional scalability_mode = streams[0].scalability_mode; for (size_t i = 0; i < streams.size(); ++i) { - SimulcastStream* sim_stream = &video_codec.simulcastStream[i]; + SpatialLayer* sim_stream = &video_codec.simulcastStream[i]; RTC_DCHECK_GT(streams[i].width, 0); RTC_DCHECK_GT(streams[i].height, 0); RTC_DCHECK_GT(streams[i].max_framerate, 0); @@ -123,6 +127,15 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( video_codec.qpMax = std::max(video_codec.qpMax, static_cast(streams[i].max_qp)); max_framerate = std::max(max_framerate, streams[i].max_framerate); + + if (streams[0].scalability_mode != streams[i].scalability_mode) { + RTC_LOG(LS_WARNING) << "Inconsistent scalability modes configured."; + scalability_mode.reset(); + } + } + + if (scalability_mode.has_value()) { + video_codec.SetScalabilityMode(*scalability_mode); } if (video_codec.maxBitrate == 0) { @@ -135,6 +148,12 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( video_codec.maxBitrate = kEncoderMinBitrateKbps; video_codec.maxFramerate = max_framerate; + video_codec.spatialLayers[0] = {0}; + video_codec.spatialLayers[0].width = video_codec.width; + video_codec.spatialLayers[0].height = video_codec.height; + video_codec.spatialLayers[0].maxFramerate = max_framerate; + video_codec.spatialLayers[0].numberOfTemporalLayers = + streams[0].num_temporal_layers.value_or(1); // Set codec specific options if (config.encoder_specific_settings) @@ -205,9 +224,9 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( spatial_layers.back().maxBitrate = video_codec.maxBitrate; } - for (size_t spatial_idx = 0; + for (size_t spatial_idx = first_active_layer; spatial_idx < config.simulcast_layers.size() && - spatial_idx < spatial_layers.size(); + spatial_idx < spatial_layers.size() + first_active_layer; ++spatial_idx) { spatial_layers[spatial_idx - first_active_layer].active = config.simulcast_layers[spatial_idx].active; @@ -219,6 +238,14 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( video_codec.spatialLayers[i] = spatial_layers[i]; } + // The top spatial layer dimensions may not be equal to the input + // resolution because of the rounding or explicit configuration. + // This difference must be propagated to the stream configuration. + video_codec.width = spatial_layers.back().width; + video_codec.height = spatial_layers.back().height; + video_codec.simulcastStream[0].width = spatial_layers.back().width; + video_codec.simulcastStream[0].height = spatial_layers.back().height; + // Update layering settings. video_codec.VP9()->numberOfSpatialLayers = static_cast(spatial_layers.size()); @@ -234,6 +261,15 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec( break; } + case kVideoCodecAV1: + if (SetAv1SvcConfig(video_codec)) { + for (size_t i = 0; i < config.spatial_layers.size(); ++i) { + video_codec.spatialLayers[i].active = config.spatial_layers[i].active; + } + } else { + RTC_LOG(LS_WARNING) << "Failed to configure svc bitrates for av1."; + } + break; case kVideoCodecH264: { if (!config.encoder_specific_settings) *video_codec.H264() = VideoEncoder::GetDefaultH264Settings(); diff --git a/modules/video_coding/video_codec_initializer_unittest.cc b/modules/video_coding/video_codec_initializer_unittest.cc index d5a18f0413..6c1c2e7a38 100644 --- a/modules/video_coding/video_codec_initializer_unittest.cc +++ b/modules/video_coding/video_codec_initializer_unittest.cc @@ -42,7 +42,8 @@ static const uint32_t kDefaultTargetBitrateBps = 2000000; static const uint32_t kDefaultMaxBitrateBps = 2000000; static const uint32_t kDefaultMinTransmitBitrateBps = 400000; static const int kDefaultMaxQp = 48; -static const uint32_t kScreenshareTl0BitrateBps = 200000; +static const uint32_t kScreenshareTl0BitrateBps = 120000; +static const uint32_t kScreenshareConferenceTl0BitrateBps = 200000; static const uint32_t kScreenshareCodecTargetBitrateBps = 200000; static const uint32_t kScreenshareDefaultFramerate = 5; // Bitrates for the temporal layers of the higher screenshare simulcast stream. @@ -73,13 +74,13 @@ class VideoCodecInitializerTest : public ::testing::Test { config_.number_of_streams = num_spatial_streams; VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings(); vp8_settings.numberOfTemporalLayers = num_temporal_streams; - config_.encoder_specific_settings = new rtc::RefCountedObject< + config_.encoder_specific_settings = rtc::make_ref_counted< webrtc::VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings); } else if (type == VideoCodecType::kVideoCodecVP9) { VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); vp9_settings.numberOfSpatialLayers = num_spatial_streams; vp9_settings.numberOfTemporalLayers = num_temporal_streams; - config_.encoder_specific_settings = new rtc::RefCountedObject< + config_.encoder_specific_settings = rtc::make_ref_counted< webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings); } else if (type != VideoCodecType::kVideoCodecMultiplex) { ADD_FAILURE() << "Unexpected codec type: " << type; @@ -126,7 +127,7 @@ class VideoCodecInitializerTest : public ::testing::Test { VideoStream DefaultScreenshareStream() { VideoStream stream = DefaultStream(); stream.min_bitrate_bps = 30000; - stream.target_bitrate_bps = kScreenshareTl0BitrateBps; + stream.target_bitrate_bps = kScreenshareCodecTargetBitrateBps; stream.max_bitrate_bps = 1000000; stream.max_framerate = kScreenshareDefaultFramerate; stream.num_temporal_layers = 2; @@ -174,6 +175,23 @@ TEST_F(VideoCodecInitializerTest, SingleStreamVp8ScreenshareInactive) { EXPECT_EQ(0U, bitrate_allocation.get_sum_bps()); } +TEST_F(VideoCodecInitializerTest, TemporalLayeredVp8ScreenshareConference) { + SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 2, true); + streams_.push_back(DefaultScreenshareStream()); + EXPECT_TRUE(InitializeCodec()); + bitrate_allocator_->SetLegacyConferenceMode(true); + + EXPECT_EQ(1u, codec_out_.numberOfSimulcastStreams); + EXPECT_EQ(2u, codec_out_.VP8()->numberOfTemporalLayers); + VideoBitrateAllocation bitrate_allocation = + bitrate_allocator_->Allocate(VideoBitrateAllocationParameters( + kScreenshareCodecTargetBitrateBps, kScreenshareDefaultFramerate)); + EXPECT_EQ(kScreenshareCodecTargetBitrateBps, + bitrate_allocation.get_sum_bps()); + EXPECT_EQ(kScreenshareConferenceTl0BitrateBps, + bitrate_allocation.GetBitrate(0, 0)); +} + TEST_F(VideoCodecInitializerTest, TemporalLayeredVp8Screenshare) { SetUpFor(VideoCodecType::kVideoCodecVP8, 1, 2, true); streams_.push_back(DefaultScreenshareStream()); @@ -346,24 +364,131 @@ TEST_F(VideoCodecInitializerTest, Vp9DeactivateLayers) { config_.simulcast_layers[1].active = true; config_.simulcast_layers[2].active = true; EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3); EXPECT_TRUE(codec_out_.spatialLayers[0].active); EXPECT_TRUE(codec_out_.spatialLayers[1].active); EXPECT_TRUE(codec_out_.spatialLayers[2].active); // Deactivate top layer. + config_.simulcast_layers[0].active = true; + config_.simulcast_layers[1].active = true; config_.simulcast_layers[2].active = false; EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3); EXPECT_TRUE(codec_out_.spatialLayers[0].active); EXPECT_TRUE(codec_out_.spatialLayers[1].active); EXPECT_FALSE(codec_out_.spatialLayers[2].active); // Deactivate middle layer. - config_.simulcast_layers[2].active = true; + config_.simulcast_layers[0].active = true; config_.simulcast_layers[1].active = false; + config_.simulcast_layers[2].active = true; EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3); EXPECT_TRUE(codec_out_.spatialLayers[0].active); EXPECT_FALSE(codec_out_.spatialLayers[1].active); EXPECT_TRUE(codec_out_.spatialLayers[2].active); + + // Deactivate first layer. + config_.simulcast_layers[0].active = false; + config_.simulcast_layers[1].active = true; + config_.simulcast_layers[2].active = true; + EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(codec_out_.spatialLayers[0].active); + EXPECT_TRUE(codec_out_.spatialLayers[1].active); + + // HD singlecast. + config_.simulcast_layers[0].active = false; + config_.simulcast_layers[1].active = false; + config_.simulcast_layers[2].active = true; + EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 1); + EXPECT_TRUE(codec_out_.spatialLayers[0].active); + + // VGA singlecast. + config_.simulcast_layers[0].active = false; + config_.simulcast_layers[1].active = true; + config_.simulcast_layers[2].active = false; + EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(codec_out_.spatialLayers[0].active); + EXPECT_FALSE(codec_out_.spatialLayers[1].active); + + // QVGA singlecast. + config_.simulcast_layers[0].active = true; + config_.simulcast_layers[1].active = false; + config_.simulcast_layers[2].active = false; + EXPECT_TRUE(InitializeCodec()); + EXPECT_EQ(codec_out_.VP9()->numberOfSpatialLayers, 3); + EXPECT_TRUE(codec_out_.spatialLayers[0].active); + EXPECT_FALSE(codec_out_.spatialLayers[1].active); + EXPECT_FALSE(codec_out_.spatialLayers[2].active); +} + +TEST_F(VideoCodecInitializerTest, Av1SingleSpatialLayerBitratesAreConsistent) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecAV1; + std::vector streams = {DefaultStream()}; + streams[0].scalability_mode = "L1T2"; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_GE(codec.spatialLayers[0].targetBitrate, + codec.spatialLayers[0].minBitrate); + EXPECT_LE(codec.spatialLayers[0].targetBitrate, + codec.spatialLayers[0].maxBitrate); +} + +TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersBitratesAreConsistent) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecAV1; + std::vector streams = {DefaultStream()}; + streams[0].scalability_mode = "L2T2"; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_GE(codec.spatialLayers[0].targetBitrate, + codec.spatialLayers[0].minBitrate); + EXPECT_LE(codec.spatialLayers[0].targetBitrate, + codec.spatialLayers[0].maxBitrate); + + EXPECT_GE(codec.spatialLayers[1].targetBitrate, + codec.spatialLayers[1].minBitrate); + EXPECT_LE(codec.spatialLayers[1].targetBitrate, + codec.spatialLayers[1].maxBitrate); +} + +TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersActiveByDefault) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecAV1; + std::vector streams = {DefaultStream()}; + streams[0].scalability_mode = "L2T2"; + config.spatial_layers = {}; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_TRUE(codec.spatialLayers[0].active); + EXPECT_TRUE(codec.spatialLayers[1].active); +} + +TEST_F(VideoCodecInitializerTest, Av1TwoSpatialLayersOneDeactivated) { + VideoEncoderConfig config; + config.codec_type = VideoCodecType::kVideoCodecAV1; + std::vector streams = {DefaultStream()}; + streams[0].scalability_mode = "L2T2"; + config.spatial_layers.resize(2); + config.spatial_layers[0].active = true; + config.spatial_layers[1].active = false; + + VideoCodec codec; + EXPECT_TRUE(VideoCodecInitializer::SetupCodec(config, streams, &codec)); + + EXPECT_TRUE(codec.spatialLayers[0].active); + EXPECT_FALSE(codec.spatialLayers[1].active); } } // namespace webrtc diff --git a/modules/video_coding/video_coding_impl.cc b/modules/video_coding/video_coding_impl.cc index 1d12ac93f0..f19ea51325 100644 --- a/modules/video_coding/video_coding_impl.cc +++ b/modules/video_coding/video_coding_impl.cc @@ -13,11 +13,10 @@ #include #include +#include "api/sequence_checker.h" #include "api/video/encoded_image.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/timing.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/thread_checker.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -57,11 +56,11 @@ class VideoCodingModuleImpl : public VideoCodingModule { void Process() override { receiver_.Process(); } - int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec, - int32_t numberOfCores, - bool requireKeyFrame) override { - return receiver_.RegisterReceiveCodec(receiveCodec, numberOfCores, - requireKeyFrame); + int32_t RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receiveCodec, + int32_t numberOfCores) override { + return receiver_.RegisterReceiveCodec(payload_type, receiveCodec, + numberOfCores); } void RegisterExternalDecoder(VideoDecoder* externalDecoder, @@ -106,7 +105,7 @@ class VideoCodingModuleImpl : public VideoCodingModule { } private: - rtc::ThreadChecker construction_thread_; + SequenceChecker construction_thread_; const std::unique_ptr timing_; vcm::VideoReceiver receiver_; }; diff --git a/modules/video_coding/video_coding_impl.h b/modules/video_coding/video_coding_impl.h index eaab639dbf..d74799460c 100644 --- a/modules/video_coding/video_coding_impl.h +++ b/modules/video_coding/video_coding_impl.h @@ -16,6 +16,7 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "modules/video_coding/decoder_database.h" #include "modules/video_coding/frame_buffer.h" #include "modules/video_coding/generic_decoder.h" @@ -24,9 +25,8 @@ #include "modules/video_coding/receiver.h" #include "modules/video_coding/timing.h" #include "rtc_base/one_time_event.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -59,9 +59,9 @@ class VideoReceiver : public Module { VideoReceiver(Clock* clock, VCMTiming* timing); ~VideoReceiver() override; - int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec, - int32_t numberOfCores, - bool requireKeyFrame); + int32_t RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receiveCodec, + int32_t numberOfCores); void RegisterExternalDecoder(VideoDecoder* externalDecoder, uint8_t payloadType); @@ -96,11 +96,11 @@ class VideoReceiver : public Module { // In builds where DCHECKs aren't enabled, it will return true. bool IsDecoderThreadRunning(); - rtc::ThreadChecker construction_thread_checker_; - rtc::ThreadChecker decoder_thread_checker_; - rtc::ThreadChecker module_thread_checker_; + SequenceChecker construction_thread_checker_; + SequenceChecker decoder_thread_checker_; + SequenceChecker module_thread_checker_; Clock* const clock_; - rtc::CriticalSection process_crit_; + Mutex process_mutex_; VCMTiming* _timing; VCMReceiver _receiver; VCMDecodedFrameCallback _decodedFrameCallback; @@ -111,8 +111,8 @@ class VideoReceiver : public Module { VCMPacketRequestCallback* _packetRequestCallback; // Used on both the module and decoder thread. - bool _scheduleKeyRequest RTC_GUARDED_BY(process_crit_); - bool drop_frames_until_keyframe_ RTC_GUARDED_BY(process_crit_); + bool _scheduleKeyRequest RTC_GUARDED_BY(process_mutex_); + bool drop_frames_until_keyframe_ RTC_GUARDED_BY(process_mutex_); // Modified on the construction thread while not attached to the process // thread. Once attached to the process thread, its value is only read diff --git a/modules/video_coding/video_receiver.cc b/modules/video_coding/video_receiver.cc index a817293f2f..43dbc9f0b2 100644 --- a/modules/video_coding/video_receiver.cc +++ b/modules/video_coding/video_receiver.cc @@ -14,9 +14,9 @@ #include #include "api/rtp_headers.h" +#include "api/sequence_checker.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_decoder.h" -#include "modules/include/module_common_types.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/decoder_database.h" #include "modules/video_coding/encoded_frame.h" @@ -31,11 +31,9 @@ #include "modules/video_coding/timing.h" #include "modules/video_coding/video_coding_impl.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/one_time_event.h" -#include "rtc_base/thread_checker.h" #include "rtc_base/trace_event.h" #include "system_wrappers/include/clock.h" @@ -71,7 +69,7 @@ void VideoReceiver::Process() { _keyRequestTimer.Processed(); bool request_key_frame = _frameTypeCallback != nullptr; if (request_key_frame) { - rtc::CritScope cs(&process_crit_); + MutexLock lock(&process_mutex_); request_key_frame = _scheduleKeyRequest; } if (request_key_frame) @@ -94,7 +92,7 @@ void VideoReceiver::Process() { ret = RequestKeyFrame(); } if (ret == VCM_OK && !nackList.empty()) { - rtc::CritScope cs(&process_crit_); + MutexLock lock(&process_mutex_); if (_packetRequestCallback != nullptr) { _packetRequestCallback->ResendPackets(&nackList[0], nackList.size()); } @@ -175,15 +173,14 @@ int32_t VideoReceiver::RegisterPacketRequestCallback( // Should be called as often as possible to get the most out of the decoder. int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) { RTC_DCHECK_RUN_ON(&decoder_thread_checker_); - VCMEncodedFrame* frame = _receiver.FrameForDecoding( - maxWaitTimeMs, _codecDataBase.PrefersLateDecoding()); + VCMEncodedFrame* frame = _receiver.FrameForDecoding(maxWaitTimeMs, true); if (!frame) return VCM_FRAME_NOT_READY; bool drop_frame = false; { - rtc::CritScope cs(&process_crit_); + MutexLock lock(&process_mutex_); if (drop_frames_until_keyframe_) { // Still getting delta frames, schedule another keyframe request as if // decode failed. @@ -210,9 +207,7 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) { clock_->TimeInMilliseconds()); if (first_frame_received_()) { - RTC_LOG(LS_INFO) << "Received first " - << (frame->Complete() ? "complete" : "incomplete") - << " decodable video frame"; + RTC_LOG(LS_INFO) << "Received first complete decodable video frame"; } const int32_t ret = Decode(*frame); @@ -229,7 +224,7 @@ int32_t VideoReceiver::RequestKeyFrame() { if (ret < 0) { return ret; } - rtc::CritScope cs(&process_crit_); + MutexLock lock(&process_mutex_); _scheduleKeyRequest = false; } else { return VCM_MISSING_CALLBACK; @@ -251,15 +246,15 @@ int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) { } // Register possible receive codecs, can be called multiple times -int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec, - int32_t numberOfCores, - bool requireKeyFrame) { +int32_t VideoReceiver::RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receiveCodec, + int32_t numberOfCores) { RTC_DCHECK_RUN_ON(&construction_thread_checker_); if (receiveCodec == nullptr) { return VCM_PARAMETER_ERROR; } - if (!_codecDataBase.RegisterReceiveCodec(receiveCodec, numberOfCores, - requireKeyFrame)) { + if (!_codecDataBase.RegisterReceiveCodec(payload_type, receiveCodec, + numberOfCores)) { return -1; } return 0; @@ -284,14 +279,14 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload, // Callers don't provide any ntp time. const VCMPacket packet(incomingPayload, payloadLength, rtp_header, video_header, /*ntp_time_ms=*/0, - clock_->TimeInMilliseconds()); + clock_->CurrentTime()); int32_t ret = _receiver.InsertPacket(packet); // TODO(holmer): Investigate if this somehow should use the key frame // request scheduling to throttle the requests. if (ret == VCM_FLUSH_INDICATOR) { { - rtc::CritScope cs(&process_crit_); + MutexLock lock(&process_mutex_); drop_frames_until_keyframe_ = true; } RequestKeyFrame(); diff --git a/modules/video_coding/video_receiver2.cc b/modules/video_coding/video_receiver2.cc index 8eaefbb8da..b893b954bc 100644 --- a/modules/video_coding/video_receiver2.cc +++ b/modules/video_coding/video_receiver2.cc @@ -33,18 +33,18 @@ VideoReceiver2::VideoReceiver2(Clock* clock, VCMTiming* timing) timing_(timing), decodedFrameCallback_(timing_, clock_), codecDataBase_() { - decoder_thread_checker_.Detach(); + decoder_sequence_checker_.Detach(); } VideoReceiver2::~VideoReceiver2() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(&construction_sequence_checker_); } // Register a receive callback. Will be called whenever there is a new frame // ready for rendering. int32_t VideoReceiver2::RegisterReceiveCallback( VCMReceiveCallback* receiveCallback) { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(&construction_sequence_checker_); RTC_DCHECK(!IsDecoderThreadRunning()); // This value is set before the decoder thread starts and unset after // the decoder thread has been stopped. @@ -52,20 +52,35 @@ int32_t VideoReceiver2::RegisterReceiveCallback( return VCM_OK; } -// Register an externally defined decoder object. +// Register an externally defined decoder object. This may be called on either +// the construction sequence or the decoder sequence to allow for lazy creation +// of video decoders. If called on the decoder sequence |externalDecoder| cannot +// be a nullptr. It's the responsibility of the caller to make sure that the +// access from the two sequences are mutually exclusive. void VideoReceiver2::RegisterExternalDecoder(VideoDecoder* externalDecoder, uint8_t payloadType) { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); - RTC_DCHECK(!IsDecoderThreadRunning()); + if (IsDecoderThreadRunning()) { + RTC_DCHECK_RUN_ON(&decoder_sequence_checker_); + // Don't allow deregistering decoders on the decoder thread. + RTC_DCHECK(externalDecoder != nullptr); + } else { + RTC_DCHECK_RUN_ON(&construction_sequence_checker_); + } + if (externalDecoder == nullptr) { - RTC_CHECK(codecDataBase_.DeregisterExternalDecoder(payloadType)); + codecDataBase_.DeregisterExternalDecoder(payloadType); return; } codecDataBase_.RegisterExternalDecoder(externalDecoder, payloadType); } +bool VideoReceiver2::IsExternalDecoderRegistered(uint8_t payloadType) const { + RTC_DCHECK_RUN_ON(&decoder_sequence_checker_); + return codecDataBase_.IsExternalDecoderRegistered(payloadType); +} + void VideoReceiver2::DecoderThreadStarting() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(&construction_sequence_checker_); RTC_DCHECK(!IsDecoderThreadRunning()); #if RTC_DCHECK_IS_ON decoder_thread_is_running_ = true; @@ -73,17 +88,17 @@ void VideoReceiver2::DecoderThreadStarting() { } void VideoReceiver2::DecoderThreadStopped() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(&construction_sequence_checker_); RTC_DCHECK(IsDecoderThreadRunning()); #if RTC_DCHECK_IS_ON decoder_thread_is_running_ = false; - decoder_thread_checker_.Detach(); + decoder_sequence_checker_.Detach(); #endif } // Must be called from inside the receive side critical section. int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) { - RTC_DCHECK_RUN_ON(&decoder_thread_checker_); + RTC_DCHECK_RUN_ON(&decoder_sequence_checker_); TRACE_EVENT0("webrtc", "VideoReceiver2::Decode"); // Change decoder if payload type has changed VCMGenericDecoder* decoder = @@ -95,16 +110,16 @@ int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) { } // Register possible receive codecs, can be called multiple times -int32_t VideoReceiver2::RegisterReceiveCodec(const VideoCodec* receiveCodec, - int32_t numberOfCores, - bool requireKeyFrame) { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); +int32_t VideoReceiver2::RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receiveCodec, + int32_t numberOfCores) { + RTC_DCHECK_RUN_ON(&construction_sequence_checker_); RTC_DCHECK(!IsDecoderThreadRunning()); if (receiveCodec == nullptr) { return VCM_PARAMETER_ERROR; } - if (!codecDataBase_.RegisterReceiveCodec(receiveCodec, numberOfCores, - requireKeyFrame)) { + if (!codecDataBase_.RegisterReceiveCodec(payload_type, receiveCodec, + numberOfCores)) { return -1; } return 0; diff --git a/modules/video_coding/video_receiver2.h b/modules/video_coding/video_receiver2.h index 202072a560..0c3fe1a257 100644 --- a/modules/video_coding/video_receiver2.h +++ b/modules/video_coding/video_receiver2.h @@ -11,11 +11,11 @@ #ifndef MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_ #define MODULES_VIDEO_CODING_VIDEO_RECEIVER2_H_ +#include "api/sequence_checker.h" #include "modules/video_coding/decoder_database.h" #include "modules/video_coding/encoded_frame.h" #include "modules/video_coding/generic_decoder.h" #include "modules/video_coding/timing.h" -#include "rtc_base/thread_checker.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -30,12 +30,13 @@ class VideoReceiver2 { VideoReceiver2(Clock* clock, VCMTiming* timing); ~VideoReceiver2(); - int32_t RegisterReceiveCodec(const VideoCodec* receiveCodec, - int32_t numberOfCores, - bool requireKeyFrame); + int32_t RegisterReceiveCodec(uint8_t payload_type, + const VideoCodec* receiveCodec, + int32_t numberOfCores); void RegisterExternalDecoder(VideoDecoder* externalDecoder, uint8_t payloadType); + bool IsExternalDecoderRegistered(uint8_t payloadType) const; int32_t RegisterReceiveCallback(VCMReceiveCallback* receiveCallback); int32_t Decode(const webrtc::VCMEncodedFrame* frame); @@ -54,8 +55,8 @@ class VideoReceiver2 { // In builds where DCHECKs aren't enabled, it will return true. bool IsDecoderThreadRunning(); - rtc::ThreadChecker construction_thread_checker_; - rtc::ThreadChecker decoder_thread_checker_; + SequenceChecker construction_sequence_checker_; + SequenceChecker decoder_sequence_checker_; Clock* const clock_; VCMTiming* timing_; VCMDecodedFrameCallback decodedFrameCallback_; diff --git a/modules/video_coding/video_receiver_unittest.cc b/modules/video_coding/video_receiver_unittest.cc index 363838b846..fcd4f449ca 100644 --- a/modules/video_coding/video_receiver_unittest.cc +++ b/modules/video_coding/video_receiver_unittest.cc @@ -26,8 +26,10 @@ namespace { class MockPacketRequestCallback : public VCMPacketRequestCallback { public: - MOCK_METHOD2(ResendPackets, - int32_t(const uint16_t* sequenceNumbers, uint16_t length)); + MOCK_METHOD(int32_t, + ResendPackets, + (const uint16_t* sequenceNumbers, uint16_t length), + (override)); }; class MockVCMReceiveCallback : public VCMReceiveCallback { @@ -35,11 +37,12 @@ class MockVCMReceiveCallback : public VCMReceiveCallback { MockVCMReceiveCallback() {} virtual ~MockVCMReceiveCallback() {} - MOCK_METHOD4( - FrameToRender, - int32_t(VideoFrame&, absl::optional, int32_t, VideoContentType)); - MOCK_METHOD1(OnIncomingPayloadType, void(int)); - MOCK_METHOD1(OnDecoderImplementationName, void(const char*)); + MOCK_METHOD(int32_t, + FrameToRender, + (VideoFrame&, absl::optional, int32_t, VideoContentType), + (override)); + MOCK_METHOD(void, OnIncomingPayloadType, (int), (override)); + MOCK_METHOD(void, OnDecoderImplementationName, (const char*), (override)); }; class TestVideoReceiver : public ::testing::Test { @@ -54,8 +57,8 @@ class TestVideoReceiver : public ::testing::Test { // Register decoder. receiver_.RegisterExternalDecoder(&decoder_, kUnusedPayloadType); webrtc::test::CodecSettings(kVideoCodecVP8, &settings_); - settings_.plType = kUnusedPayloadType; - EXPECT_EQ(0, receiver_.RegisterReceiveCodec(&settings_, 1, true)); + EXPECT_EQ( + 0, receiver_.RegisterReceiveCodec(kUnusedPayloadType, &settings_, 1)); // Set protection mode. const size_t kMaxNackListSize = 250; diff --git a/modules/video_processing/BUILD.gn b/modules/video_processing/BUILD.gn index 4354454111..daffd49cdf 100644 --- a/modules/video_processing/BUILD.gn +++ b/modules/video_processing/BUILD.gn @@ -26,10 +26,8 @@ rtc_library("video_processing") { deps = [ ":denoiser_filter", - "..:module_api", "../../api:scoped_refptr", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../common_audio", "../../common_video", @@ -37,7 +35,7 @@ rtc_library("video_processing") { "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", "../../rtc_base/system:arch", - "../../system_wrappers:cpu_features_api", + "../../system_wrappers", "//third_party/libyuv", ] if (build_video_processing_sse2) { @@ -52,7 +50,6 @@ rtc_source_set("denoiser_filter") { # Target that only exists to avoid cyclic depdency errors for the SSE2 and # Neon implementations below. sources = [ "util/denoiser_filter.h" ] - deps = [ "..:module_api" ] } if (build_video_processing_sse2) { @@ -100,7 +97,6 @@ if (rtc_include_tests) { ":video_processing", "../../api:scoped_refptr", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../common_video", "../../test:fileutils", diff --git a/modules/video_processing/test/denoiser_test.cc b/modules/video_processing/test/denoiser_test.cc index 4707eb5950..28ba702b3a 100644 --- a/modules/video_processing/test/denoiser_test.cc +++ b/modules/video_processing/test/denoiser_test.cc @@ -27,26 +27,6 @@ namespace webrtc { -TEST(VideoDenoiserTest, CopyMem) { - std::unique_ptr df_c(DenoiserFilter::Create(false, nullptr)); - std::unique_ptr df_sse_neon( - DenoiserFilter::Create(true, nullptr)); - uint8_t src[16 * 16], dst[16 * 16]; - for (int i = 0; i < 16; ++i) { - for (int j = 0; j < 16; ++j) { - src[i * 16 + j] = i * 16 + j; - } - } - - memset(dst, 0, 16 * 16); - df_c->CopyMem16x16(src, 16, dst, 16); - EXPECT_EQ(0, memcmp(src, dst, 16 * 16)); - - memset(dst, 0, 16 * 16); - df_sse_neon->CopyMem16x16(src, 16, dst, 16); - EXPECT_EQ(0, memcmp(src, dst, 16 * 16)); -} - TEST(VideoDenoiserTest, Variance) { std::unique_ptr df_c(DenoiserFilter::Create(false, nullptr)); std::unique_ptr df_sse_neon( diff --git a/modules/video_processing/util/denoiser_filter.cc b/modules/video_processing/util/denoiser_filter.cc index d6b5094a5b..0e1570114a 100644 --- a/modules/video_processing/util/denoiser_filter.cc +++ b/modules/video_processing/util/denoiser_filter.cc @@ -41,7 +41,7 @@ std::unique_ptr DenoiserFilter::Create( filter.reset(new DenoiserFilterSSE2()); #else // x86 CPU detection required. - if (WebRtc_GetCPUInfo(kSSE2)) { + if (GetCPUInfo(kSSE2)) { filter.reset(new DenoiserFilterSSE2()); } else { filter.reset(new DenoiserFilterC()); diff --git a/modules/video_processing/util/denoiser_filter.h b/modules/video_processing/util/denoiser_filter.h index 0db50471e0..1d574f4a4f 100644 --- a/modules/video_processing/util/denoiser_filter.h +++ b/modules/video_processing/util/denoiser_filter.h @@ -30,11 +30,6 @@ class DenoiserFilter { CpuType* cpu_type); virtual ~DenoiserFilter() {} - - virtual void CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) = 0; virtual uint32_t Variance16x8(const uint8_t* a, int a_stride, const uint8_t* b, diff --git a/modules/video_processing/util/denoiser_filter_c.cc b/modules/video_processing/util/denoiser_filter_c.cc index b1831a6775..5411e556e7 100644 --- a/modules/video_processing/util/denoiser_filter_c.cc +++ b/modules/video_processing/util/denoiser_filter_c.cc @@ -15,17 +15,6 @@ namespace webrtc { -void DenoiserFilterC::CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) { - for (int i = 0; i < 16; i++) { - memcpy(dst, src, 16); - src += src_stride; - dst += dst_stride; - } -} - uint32_t DenoiserFilterC::Variance16x8(const uint8_t* a, int a_stride, const uint8_t* b, diff --git a/modules/video_processing/util/denoiser_filter_c.h b/modules/video_processing/util/denoiser_filter_c.h index f05663e1a2..5633c171f0 100644 --- a/modules/video_processing/util/denoiser_filter_c.h +++ b/modules/video_processing/util/denoiser_filter_c.h @@ -20,10 +20,6 @@ namespace webrtc { class DenoiserFilterC : public DenoiserFilter { public: DenoiserFilterC() {} - void CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) override; uint32_t Variance16x8(const uint8_t* a, int a_stride, const uint8_t* b, diff --git a/modules/video_processing/util/denoiser_filter_neon.cc b/modules/video_processing/util/denoiser_filter_neon.cc index 4eabe02ea9..e1e6ed4f18 100644 --- a/modules/video_processing/util/denoiser_filter_neon.cc +++ b/modules/video_processing/util/denoiser_filter_neon.cc @@ -64,19 +64,6 @@ static void VarianceNeonW8(const uint8_t* a, static_cast(HorizontalAddS32x4(vaddq_s32(v_sse_lo, v_sse_hi))); } -void DenoiserFilterNEON::CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) { - uint8x16_t qtmp; - for (int r = 0; r < 16; r++) { - qtmp = vld1q_u8(src); - vst1q_u8(dst, qtmp); - src += src_stride; - dst += dst_stride; - } -} - uint32_t DenoiserFilterNEON::Variance16x8(const uint8_t* a, int a_stride, const uint8_t* b, diff --git a/modules/video_processing/util/denoiser_filter_neon.h b/modules/video_processing/util/denoiser_filter_neon.h index decbd41c0d..4d9f271e5a 100644 --- a/modules/video_processing/util/denoiser_filter_neon.h +++ b/modules/video_processing/util/denoiser_filter_neon.h @@ -18,10 +18,6 @@ namespace webrtc { class DenoiserFilterNEON : public DenoiserFilter { public: DenoiserFilterNEON() {} - void CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) override; uint32_t Variance16x8(const uint8_t* a, int a_stride, const uint8_t* b, diff --git a/modules/video_processing/util/denoiser_filter_sse2.cc b/modules/video_processing/util/denoiser_filter_sse2.cc index 281169bcc2..5ca5f0cf34 100644 --- a/modules/video_processing/util/denoiser_filter_sse2.cc +++ b/modules/video_processing/util/denoiser_filter_sse2.cc @@ -100,18 +100,6 @@ static uint32_t AbsSumDiff16x1(__m128i acc_diff) { return sum_diff; } -// TODO(jackychen): Optimize this function using SSE2. -void DenoiserFilterSSE2::CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) { - for (int i = 0; i < 16; i++) { - memcpy(dst, src, 16); - src += src_stride; - dst += dst_stride; - } -} - uint32_t DenoiserFilterSSE2::Variance16x8(const uint8_t* src, int src_stride, const uint8_t* ref, diff --git a/modules/video_processing/util/denoiser_filter_sse2.h b/modules/video_processing/util/denoiser_filter_sse2.h index 6fb7279a2d..8fe4b905ae 100644 --- a/modules/video_processing/util/denoiser_filter_sse2.h +++ b/modules/video_processing/util/denoiser_filter_sse2.h @@ -20,10 +20,6 @@ namespace webrtc { class DenoiserFilterSSE2 : public DenoiserFilter { public: DenoiserFilterSSE2() {} - void CopyMem16x16(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) override; uint32_t Variance16x8(const uint8_t* a, int a_stride, const uint8_t* b, diff --git a/modules/video_processing/video_denoiser.cc b/modules/video_processing/video_denoiser.cc index 40568a5ec6..1d844e61de 100644 --- a/modules/video_processing/video_denoiser.cc +++ b/modules/video_processing/video_denoiser.cc @@ -19,17 +19,6 @@ namespace webrtc { #if DISPLAY || DISPLAYNEON -static void CopyMem8x8(const uint8_t* src, - int src_stride, - uint8_t* dst, - int dst_stride) { - for (int i = 0; i < 8; i++) { - memcpy(dst, src, 8); - src += src_stride; - dst += dst_stride; - } -} - static void ShowRect(const std::unique_ptr& filter, const std::unique_ptr& d_status, const std::unique_ptr& moving_edge_red, @@ -58,16 +47,16 @@ static void ShowRect(const std::unique_ptr& filter, memset(uv_tmp, 200, 8 * 8); if (d_status[mb_index] == 1) { // Paint to red. - CopyMem8x8(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst); - CopyMem8x8(uv_tmp, 8, mb_dst_v, stride_v_dst); + libyuv::CopyPlane(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst, 8, 8); + libyuv::CopyPlane(uv_tmp, 8, mb_dst_v, stride_v_dst, 8, 8); } else if (moving_edge_red[mb_row * mb_cols_ + mb_col] && x_density[mb_col] * y_density[mb_row]) { // Paint to blue. - CopyMem8x8(uv_tmp, 8, mb_dst_u, stride_u_dst); - CopyMem8x8(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst); + libyuv::CopyPlane(uv_tmp, 8, mb_dst_u, stride_u_dst, 8, 8); + libyuv::CopyPlane(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst, 8, 8); } else { - CopyMem8x8(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst); - CopyMem8x8(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst); + libyuv::CopyPlane(mb_src_u, stride_u_src, mb_dst_u, stride_u_dst, 8, 8); + libyuv::CopyPlane(mb_src_v, stride_v_src, mb_dst_v, stride_v_dst, 8, 8); } } } @@ -194,7 +183,7 @@ void VideoDenoiser::CopySrcOnMOB(const uint8_t* y_src, (x_density_[mb_col] * y_density_[mb_row] && moving_object_[mb_row * mb_cols_ + mb_col])) { // Copy y source. - filter_->CopyMem16x16(mb_src, stride_src, mb_dst, stride_dst); + libyuv::CopyPlane(mb_src, stride_src, mb_dst, stride_dst, 16, 16); } } } @@ -235,7 +224,7 @@ rtc::scoped_refptr VideoDenoiser::DenoiseFrame( const uint8_t* y_src = frame->DataY(); int stride_y_src = frame->StrideY(); rtc::scoped_refptr dst = - buffer_pool_.CreateBuffer(width_, height_); + buffer_pool_.CreateI420Buffer(width_, height_); uint8_t* y_dst = dst->MutableDataY(); int stride_y_dst = dst->StrideY(); diff --git a/modules/video_processing/video_denoiser.h b/modules/video_processing/video_denoiser.h index 37d624bb25..eb98c5bc53 100644 --- a/modules/video_processing/video_denoiser.h +++ b/modules/video_processing/video_denoiser.h @@ -15,7 +15,7 @@ #include "api/scoped_refptr.h" #include "api/video/video_frame_buffer.h" -#include "common_video/include/i420_buffer_pool.h" +#include "common_video/include/video_frame_buffer_pool.h" #include "modules/video_processing/util/denoiser_filter.h" #include "modules/video_processing/util/noise_estimation.h" #include "modules/video_processing/util/skin_detection.h" @@ -77,7 +77,7 @@ class VideoDenoiser { std::unique_ptr y_density_; // Save the return values by MbDenoise for each block. std::unique_ptr mb_filter_decision_; - I420BufferPool buffer_pool_; + VideoFrameBufferPool buffer_pool_; rtc::scoped_refptr prev_buffer_; }; diff --git a/native-api.md b/native-api.md index 2c193274ad..a9893c37cd 100644 --- a/native-api.md +++ b/native-api.md @@ -19,10 +19,8 @@ Legacy API directory | Including subdirectories? `modules/audio_coding/include` | No `modules/audio_device/include` | No `modules/audio_processing/include` | No -`modules/bitrate_controller/include` | No `modules/congestion_controller/include` | No `modules/include` | No -`modules/remote_bitrate_estimator/include` | No `modules/rtp_rtcp/include` | No `modules/rtp_rtcp/source` | No `modules/utility/include` | No diff --git a/net/dcsctp/BUILD.gn b/net/dcsctp/BUILD.gn new file mode 100644 index 0000000000..8b38a65ca1 --- /dev/null +++ b/net/dcsctp/BUILD.gn @@ -0,0 +1,26 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +if (rtc_include_tests) { + rtc_test("dcsctp_unittests") { + testonly = true + deps = [ + "../../test:test_main", + "common:dcsctp_common_unittests", + "fuzzers:dcsctp_fuzzers_unittests", + "packet:dcsctp_packet_unittests", + "public:dcsctp_public_unittests", + "rx:dcsctp_rx_unittests", + "socket:dcsctp_socket_unittests", + "timer:dcsctp_timer_unittests", + "tx:dcsctp_tx_unittests", + ] + } +} diff --git a/net/dcsctp/OWNERS b/net/dcsctp/OWNERS new file mode 100644 index 0000000000..06a0f86179 --- /dev/null +++ b/net/dcsctp/OWNERS @@ -0,0 +1,2 @@ +boivie@webrtc.org +orphis@webrtc.org diff --git a/net/dcsctp/common/BUILD.gn b/net/dcsctp/common/BUILD.gn new file mode 100644 index 0000000000..6e99cdcef4 --- /dev/null +++ b/net/dcsctp/common/BUILD.gn @@ -0,0 +1,63 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("internal_types") { + deps = [ + "../public:strong_alias", + "../public:types", + ] + sources = [ "internal_types.h" ] +} + +rtc_source_set("math") { + deps = [] + sources = [ "math.h" ] +} + +rtc_source_set("pair_hash") { + deps = [] + sources = [ "pair_hash.h" ] +} + +rtc_source_set("sequence_numbers") { + deps = [ ":internal_types" ] + sources = [ "sequence_numbers.h" ] +} + +rtc_source_set("str_join") { + deps = [ "../../../rtc_base:stringutils" ] + sources = [ "str_join.h" ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +if (rtc_include_tests) { + rtc_library("dcsctp_common_unittests") { + testonly = true + + defines = [] + deps = [ + ":math", + ":pair_hash", + ":sequence_numbers", + ":str_join", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + ] + sources = [ + "math_test.cc", + "pair_hash_test.cc", + "sequence_numbers_test.cc", + "str_join_test.cc", + ] + } +} diff --git a/net/dcsctp/common/internal_types.h b/net/dcsctp/common/internal_types.h new file mode 100644 index 0000000000..b651d45d91 --- /dev/null +++ b/net/dcsctp/common/internal_types.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_COMMON_INTERNAL_TYPES_H_ +#define NET_DCSCTP_COMMON_INTERNAL_TYPES_H_ + +#include + +#include "net/dcsctp/public/strong_alias.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// Stream Sequence Number (SSN) +using SSN = StrongAlias; + +// Message Identifier (MID) +using MID = StrongAlias; + +// Fragment Sequence Number (FSN) +using FSN = StrongAlias; + +// Transmission Sequence Number (TSN) +using TSN = StrongAlias; + +// Reconfiguration Request Sequence Number +using ReconfigRequestSN = StrongAlias; + +// Verification Tag, used for packet validation. +using VerificationTag = StrongAlias; + +// Tie Tag, used as a nonce when connecting. +using TieTag = StrongAlias; + +// Hasher for separated ordered/unordered stream identifiers. +struct UnorderedStreamHash { + size_t operator()(const std::pair& p) const { + return std::hash{}(*p.first) ^ + (std::hash{}(*p.second) << 1); + } +}; + +} // namespace dcsctp +#endif // NET_DCSCTP_COMMON_INTERNAL_TYPES_H_ diff --git a/net/dcsctp/common/math.h b/net/dcsctp/common/math.h new file mode 100644 index 0000000000..12f690ed57 --- /dev/null +++ b/net/dcsctp/common/math.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_COMMON_MATH_H_ +#define NET_DCSCTP_COMMON_MATH_H_ + +namespace dcsctp { + +// Rounds up `val` to the nearest value that is divisible by four. Frequently +// used to e.g. pad chunks or parameters to an even 32-bit offset. +template +IntType RoundUpTo4(IntType val) { + return (val + 3) & ~3; +} + +// Similarly, rounds down `val` to the nearest value that is divisible by four. +template +IntType RoundDownTo4(IntType val) { + return val & ~3; +} + +// Returns true if `val` is divisible by four. +template +bool IsDivisibleBy4(IntType val) { + return (val & 3) == 0; +} + +} // namespace dcsctp + +#endif // NET_DCSCTP_COMMON_MATH_H_ diff --git a/net/dcsctp/common/math_test.cc b/net/dcsctp/common/math_test.cc new file mode 100644 index 0000000000..f95dfbdb55 --- /dev/null +++ b/net/dcsctp/common/math_test.cc @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/common/math.h" + +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(MathUtilTest, CanRoundUpTo4) { + // Signed numbers + EXPECT_EQ(RoundUpTo4(static_cast(-5)), -4); + EXPECT_EQ(RoundUpTo4(static_cast(-4)), -4); + EXPECT_EQ(RoundUpTo4(static_cast(-3)), 0); + EXPECT_EQ(RoundUpTo4(static_cast(-2)), 0); + EXPECT_EQ(RoundUpTo4(static_cast(-1)), 0); + EXPECT_EQ(RoundUpTo4(static_cast(0)), 0); + EXPECT_EQ(RoundUpTo4(static_cast(1)), 4); + EXPECT_EQ(RoundUpTo4(static_cast(2)), 4); + EXPECT_EQ(RoundUpTo4(static_cast(3)), 4); + EXPECT_EQ(RoundUpTo4(static_cast(4)), 4); + EXPECT_EQ(RoundUpTo4(static_cast(5)), 8); + EXPECT_EQ(RoundUpTo4(static_cast(6)), 8); + EXPECT_EQ(RoundUpTo4(static_cast(7)), 8); + EXPECT_EQ(RoundUpTo4(static_cast(8)), 8); + EXPECT_EQ(RoundUpTo4(static_cast(10000000000)), 10000000000); + EXPECT_EQ(RoundUpTo4(static_cast(10000000001)), 10000000004); + + // Unsigned numbers + EXPECT_EQ(RoundUpTo4(static_cast(0)), 0u); + EXPECT_EQ(RoundUpTo4(static_cast(1)), 4u); + EXPECT_EQ(RoundUpTo4(static_cast(2)), 4u); + EXPECT_EQ(RoundUpTo4(static_cast(3)), 4u); + EXPECT_EQ(RoundUpTo4(static_cast(4)), 4u); + EXPECT_EQ(RoundUpTo4(static_cast(5)), 8u); + EXPECT_EQ(RoundUpTo4(static_cast(6)), 8u); + EXPECT_EQ(RoundUpTo4(static_cast(7)), 8u); + EXPECT_EQ(RoundUpTo4(static_cast(8)), 8u); + EXPECT_EQ(RoundUpTo4(static_cast(10000000000)), 10000000000u); + EXPECT_EQ(RoundUpTo4(static_cast(10000000001)), 10000000004u); +} + +TEST(MathUtilTest, CanRoundDownTo4) { + // Signed numbers + EXPECT_EQ(RoundDownTo4(static_cast(-5)), -8); + EXPECT_EQ(RoundDownTo4(static_cast(-4)), -4); + EXPECT_EQ(RoundDownTo4(static_cast(-3)), -4); + EXPECT_EQ(RoundDownTo4(static_cast(-2)), -4); + EXPECT_EQ(RoundDownTo4(static_cast(-1)), -4); + EXPECT_EQ(RoundDownTo4(static_cast(0)), 0); + EXPECT_EQ(RoundDownTo4(static_cast(1)), 0); + EXPECT_EQ(RoundDownTo4(static_cast(2)), 0); + EXPECT_EQ(RoundDownTo4(static_cast(3)), 0); + EXPECT_EQ(RoundDownTo4(static_cast(4)), 4); + EXPECT_EQ(RoundDownTo4(static_cast(5)), 4); + EXPECT_EQ(RoundDownTo4(static_cast(6)), 4); + EXPECT_EQ(RoundDownTo4(static_cast(7)), 4); + EXPECT_EQ(RoundDownTo4(static_cast(8)), 8); + EXPECT_EQ(RoundDownTo4(static_cast(10000000000)), 10000000000); + EXPECT_EQ(RoundDownTo4(static_cast(10000000001)), 10000000000); + + // Unsigned numbers + EXPECT_EQ(RoundDownTo4(static_cast(0)), 0u); + EXPECT_EQ(RoundDownTo4(static_cast(1)), 0u); + EXPECT_EQ(RoundDownTo4(static_cast(2)), 0u); + EXPECT_EQ(RoundDownTo4(static_cast(3)), 0u); + EXPECT_EQ(RoundDownTo4(static_cast(4)), 4u); + EXPECT_EQ(RoundDownTo4(static_cast(5)), 4u); + EXPECT_EQ(RoundDownTo4(static_cast(6)), 4u); + EXPECT_EQ(RoundDownTo4(static_cast(7)), 4u); + EXPECT_EQ(RoundDownTo4(static_cast(8)), 8u); + EXPECT_EQ(RoundDownTo4(static_cast(10000000000)), 10000000000u); + EXPECT_EQ(RoundDownTo4(static_cast(10000000001)), 10000000000u); +} + +TEST(MathUtilTest, IsDivisibleBy4) { + // Signed numbers + EXPECT_EQ(IsDivisibleBy4(static_cast(-4)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(-3)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(-2)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(-1)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(0)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(1)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(2)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(3)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(4)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(5)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(6)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(7)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(8)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(10000000000)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(10000000001)), false); + + // Unsigned numbers + EXPECT_EQ(IsDivisibleBy4(static_cast(0)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(1)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(2)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(3)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(4)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(5)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(6)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(7)), false); + EXPECT_EQ(IsDivisibleBy4(static_cast(8)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(10000000000)), true); + EXPECT_EQ(IsDivisibleBy4(static_cast(10000000001)), false); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/common/pair_hash.h b/net/dcsctp/common/pair_hash.h new file mode 100644 index 0000000000..62af8b4221 --- /dev/null +++ b/net/dcsctp/common/pair_hash.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_COMMON_PAIR_HASH_H_ +#define NET_DCSCTP_COMMON_PAIR_HASH_H_ + +#include + +#include +#include + +namespace dcsctp { + +// A custom hash function for std::pair, to be able to be used as key in a +// std::unordered_map. If absl::flat_hash_map would ever be used, this is +// unnecessary as it already has a hash function for std::pair. +struct PairHash { + template + size_t operator()(const std::pair& p) const { + return (3 * std::hash{}(p.first)) ^ std::hash{}(p.second); + } +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_COMMON_PAIR_HASH_H_ diff --git a/net/dcsctp/common/pair_hash_test.cc b/net/dcsctp/common/pair_hash_test.cc new file mode 100644 index 0000000000..bcc3ec86c0 --- /dev/null +++ b/net/dcsctp/common/pair_hash_test.cc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/common/pair_hash.h" + +#include +#include + +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(PairHashTest, CanInsertIntoSet) { + using MyPair = std::pair; + + std::unordered_set pairs; + + pairs.insert({1, 2}); + pairs.insert({3, 4}); + + EXPECT_NE(pairs.find({1, 2}), pairs.end()); + EXPECT_NE(pairs.find({3, 4}), pairs.end()); + EXPECT_EQ(pairs.find({1, 3}), pairs.end()); + EXPECT_EQ(pairs.find({3, 3}), pairs.end()); +} + +TEST(PairHashTest, CanInsertIntoMap) { + using MyPair = std::pair; + + std::unordered_map pairs; + + pairs[{1, 2}] = 99; + pairs[{3, 4}] = 100; + + EXPECT_EQ((pairs[{1, 2}]), 99); + EXPECT_EQ((pairs[{3, 4}]), 100); + EXPECT_EQ(pairs.find({1, 3}), pairs.end()); + EXPECT_EQ(pairs.find({3, 3}), pairs.end()); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/common/sequence_numbers.h b/net/dcsctp/common/sequence_numbers.h new file mode 100644 index 0000000000..52b638b54a --- /dev/null +++ b/net/dcsctp/common/sequence_numbers.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_COMMON_SEQUENCE_NUMBERS_H_ +#define NET_DCSCTP_COMMON_SEQUENCE_NUMBERS_H_ + +#include +#include +#include + +#include "net/dcsctp/common/internal_types.h" + +namespace dcsctp { + +// UnwrappedSequenceNumber handles wrapping sequence numbers and unwraps them to +// an int64_t value space, to allow wrapped sequence numbers to be easily +// compared for ordering. +// +// Sequence numbers are expected to be monotonically increasing, but they do not +// need to be unwrapped in order, as long as the difference to the previous one +// is not larger than half the range of the wrapped sequence number. +// +// The WrappedType must be a StrongAlias type. +template +class UnwrappedSequenceNumber { + public: + static_assert( + !std::numeric_limits::is_signed, + "The wrapped type must be unsigned"); + static_assert( + std::numeric_limits::max() < + std::numeric_limits::max(), + "The wrapped type must be less than the int64_t value space"); + + // The unwrapper is a sort of factory and converts wrapped sequence numbers to + // unwrapped ones. + class Unwrapper { + public: + Unwrapper() : largest_(kValueLimit) {} + Unwrapper(const Unwrapper&) = default; + Unwrapper& operator=(const Unwrapper&) = default; + + // Given a wrapped `value`, and with knowledge of its current last seen + // largest number, will return a value that can be compared using normal + // operators, such as less-than, greater-than etc. + // + // This will also update the Unwrapper's state, to track the last seen + // largest value. + UnwrappedSequenceNumber Unwrap(WrappedType value) { + WrappedType wrapped_largest = + static_cast(largest_ % kValueLimit); + int64_t result = largest_ + Delta(value, wrapped_largest); + if (largest_ < result) { + largest_ = result; + } + return UnwrappedSequenceNumber(result); + } + + // Similar to `Unwrap`, but will not update the Unwrappers's internal state. + UnwrappedSequenceNumber PeekUnwrap(WrappedType value) const { + WrappedType uint32_largest = + static_cast(largest_ % kValueLimit); + int64_t result = largest_ + Delta(value, uint32_largest); + return UnwrappedSequenceNumber(result); + } + + // Resets the Unwrapper to its pristine state. Used when a sequence number + // is to be reset to zero. + void Reset() { largest_ = kValueLimit; } + + private: + static int64_t Delta(WrappedType value, WrappedType prev_value) { + static constexpr typename WrappedType::UnderlyingType kBreakpoint = + kValueLimit / 2; + typename WrappedType::UnderlyingType diff = *value - *prev_value; + diff %= kValueLimit; + if (diff < kBreakpoint) { + return static_cast(diff); + } + return static_cast(diff) - kValueLimit; + } + + int64_t largest_; + }; + + // Returns the wrapped value this type represents. + WrappedType Wrap() const { + return static_cast(value_ % kValueLimit); + } + + template + friend H AbslHashValue(H state, + const UnwrappedSequenceNumber& hash) { + return H::combine(std::move(state), hash.value_); + } + + bool operator==(const UnwrappedSequenceNumber& other) const { + return value_ == other.value_; + } + bool operator!=(const UnwrappedSequenceNumber& other) const { + return value_ != other.value_; + } + bool operator<(const UnwrappedSequenceNumber& other) const { + return value_ < other.value_; + } + bool operator>(const UnwrappedSequenceNumber& other) const { + return value_ > other.value_; + } + bool operator>=(const UnwrappedSequenceNumber& other) const { + return value_ >= other.value_; + } + bool operator<=(const UnwrappedSequenceNumber& other) const { + return value_ <= other.value_; + } + + // Increments the value. + void Increment() { ++value_; } + + // Returns the next value relative to this sequence number. + UnwrappedSequenceNumber next_value() const { + return UnwrappedSequenceNumber(value_ + 1); + } + + // Returns a new sequence number based on `value`, and adding `delta` (which + // may be negative). + static UnwrappedSequenceNumber AddTo( + UnwrappedSequenceNumber value, + int delta) { + return UnwrappedSequenceNumber(value.value_ + delta); + } + + // Returns the absolute difference between `lhs` and `rhs`. + static typename WrappedType::UnderlyingType Difference( + UnwrappedSequenceNumber lhs, + UnwrappedSequenceNumber rhs) { + return (lhs.value_ > rhs.value_) ? (lhs.value_ - rhs.value_) + : (rhs.value_ - lhs.value_); + } + + private: + explicit UnwrappedSequenceNumber(int64_t value) : value_(value) {} + static constexpr int64_t kValueLimit = + static_cast(1) + << std::numeric_limits::digits; + + int64_t value_; +}; + +// Unwrapped Transmission Sequence Numbers (TSN) +using UnwrappedTSN = UnwrappedSequenceNumber; + +// Unwrapped Stream Sequence Numbers (SSN) +using UnwrappedSSN = UnwrappedSequenceNumber; + +// Unwrapped Message Identifier (MID) +using UnwrappedMID = UnwrappedSequenceNumber; + +} // namespace dcsctp + +#endif // NET_DCSCTP_COMMON_SEQUENCE_NUMBERS_H_ diff --git a/net/dcsctp/common/sequence_numbers_test.cc b/net/dcsctp/common/sequence_numbers_test.cc new file mode 100644 index 0000000000..f5fa788876 --- /dev/null +++ b/net/dcsctp/common/sequence_numbers_test.cc @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/common/sequence_numbers.h" + +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +using Wrapped = StrongAlias; +using TestSequence = UnwrappedSequenceNumber; + +TEST(SequenceNumbersTest, SimpleUnwrapping) { + TestSequence::Unwrapper unwrapper; + + TestSequence s0 = unwrapper.Unwrap(Wrapped(0)); + TestSequence s1 = unwrapper.Unwrap(Wrapped(1)); + TestSequence s2 = unwrapper.Unwrap(Wrapped(2)); + TestSequence s3 = unwrapper.Unwrap(Wrapped(3)); + + EXPECT_LT(s0, s1); + EXPECT_LT(s0, s2); + EXPECT_LT(s0, s3); + EXPECT_LT(s1, s2); + EXPECT_LT(s1, s3); + EXPECT_LT(s2, s3); + + EXPECT_EQ(TestSequence::Difference(s1, s0), 1); + EXPECT_EQ(TestSequence::Difference(s2, s0), 2); + EXPECT_EQ(TestSequence::Difference(s3, s0), 3); + + EXPECT_GT(s1, s0); + EXPECT_GT(s2, s0); + EXPECT_GT(s3, s0); + EXPECT_GT(s2, s1); + EXPECT_GT(s3, s1); + EXPECT_GT(s3, s2); + + s0.Increment(); + EXPECT_EQ(s0, s1); + s1.Increment(); + EXPECT_EQ(s1, s2); + s2.Increment(); + EXPECT_EQ(s2, s3); + + EXPECT_EQ(TestSequence::AddTo(s0, 2), s3); +} + +TEST(SequenceNumbersTest, MidValueUnwrapping) { + TestSequence::Unwrapper unwrapper; + + TestSequence s0 = unwrapper.Unwrap(Wrapped(0x7FFE)); + TestSequence s1 = unwrapper.Unwrap(Wrapped(0x7FFF)); + TestSequence s2 = unwrapper.Unwrap(Wrapped(0x8000)); + TestSequence s3 = unwrapper.Unwrap(Wrapped(0x8001)); + + EXPECT_LT(s0, s1); + EXPECT_LT(s0, s2); + EXPECT_LT(s0, s3); + EXPECT_LT(s1, s2); + EXPECT_LT(s1, s3); + EXPECT_LT(s2, s3); + + EXPECT_EQ(TestSequence::Difference(s1, s0), 1); + EXPECT_EQ(TestSequence::Difference(s2, s0), 2); + EXPECT_EQ(TestSequence::Difference(s3, s0), 3); + + EXPECT_GT(s1, s0); + EXPECT_GT(s2, s0); + EXPECT_GT(s3, s0); + EXPECT_GT(s2, s1); + EXPECT_GT(s3, s1); + EXPECT_GT(s3, s2); + + s0.Increment(); + EXPECT_EQ(s0, s1); + s1.Increment(); + EXPECT_EQ(s1, s2); + s2.Increment(); + EXPECT_EQ(s2, s3); + + EXPECT_EQ(TestSequence::AddTo(s0, 2), s3); +} + +TEST(SequenceNumbersTest, WrappedUnwrapping) { + TestSequence::Unwrapper unwrapper; + + TestSequence s0 = unwrapper.Unwrap(Wrapped(0xFFFE)); + TestSequence s1 = unwrapper.Unwrap(Wrapped(0xFFFF)); + TestSequence s2 = unwrapper.Unwrap(Wrapped(0x0000)); + TestSequence s3 = unwrapper.Unwrap(Wrapped(0x0001)); + + EXPECT_LT(s0, s1); + EXPECT_LT(s0, s2); + EXPECT_LT(s0, s3); + EXPECT_LT(s1, s2); + EXPECT_LT(s1, s3); + EXPECT_LT(s2, s3); + + EXPECT_EQ(TestSequence::Difference(s1, s0), 1); + EXPECT_EQ(TestSequence::Difference(s2, s0), 2); + EXPECT_EQ(TestSequence::Difference(s3, s0), 3); + + EXPECT_GT(s1, s0); + EXPECT_GT(s2, s0); + EXPECT_GT(s3, s0); + EXPECT_GT(s2, s1); + EXPECT_GT(s3, s1); + EXPECT_GT(s3, s2); + + s0.Increment(); + EXPECT_EQ(s0, s1); + s1.Increment(); + EXPECT_EQ(s1, s2); + s2.Increment(); + EXPECT_EQ(s2, s3); + + EXPECT_EQ(TestSequence::AddTo(s0, 2), s3); +} + +TEST(SequenceNumbersTest, WrapAroundAFewTimes) { + TestSequence::Unwrapper unwrapper; + + TestSequence s0 = unwrapper.Unwrap(Wrapped(0)); + TestSequence prev = s0; + + for (uint32_t i = 1; i < 65536 * 3; i++) { + uint16_t wrapped = static_cast(i); + TestSequence si = unwrapper.Unwrap(Wrapped(wrapped)); + + EXPECT_LT(s0, si); + EXPECT_LT(prev, si); + prev = si; + } +} + +TEST(SequenceNumbersTest, IncrementIsSameAsWrapped) { + TestSequence::Unwrapper unwrapper; + + TestSequence s0 = unwrapper.Unwrap(Wrapped(0)); + + for (uint32_t i = 1; i < 65536 * 2; i++) { + uint16_t wrapped = static_cast(i); + TestSequence si = unwrapper.Unwrap(Wrapped(wrapped)); + + s0.Increment(); + EXPECT_EQ(s0, si); + } +} + +TEST(SequenceNumbersTest, UnwrappingLargerNumberIsAlwaysLarger) { + TestSequence::Unwrapper unwrapper; + + for (uint32_t i = 1; i < 65536 * 2; i++) { + uint16_t wrapped = static_cast(i); + TestSequence si = unwrapper.Unwrap(Wrapped(wrapped)); + + EXPECT_GT(unwrapper.Unwrap(Wrapped(wrapped + 1)), si); + EXPECT_GT(unwrapper.Unwrap(Wrapped(wrapped + 5)), si); + EXPECT_GT(unwrapper.Unwrap(Wrapped(wrapped + 10)), si); + EXPECT_GT(unwrapper.Unwrap(Wrapped(wrapped + 100)), si); + } +} + +TEST(SequenceNumbersTest, UnwrappingSmallerNumberIsAlwaysSmaller) { + TestSequence::Unwrapper unwrapper; + + for (uint32_t i = 1; i < 65536 * 2; i++) { + uint16_t wrapped = static_cast(i); + TestSequence si = unwrapper.Unwrap(Wrapped(wrapped)); + + EXPECT_LT(unwrapper.Unwrap(Wrapped(wrapped - 1)), si); + EXPECT_LT(unwrapper.Unwrap(Wrapped(wrapped - 5)), si); + EXPECT_LT(unwrapper.Unwrap(Wrapped(wrapped - 10)), si); + EXPECT_LT(unwrapper.Unwrap(Wrapped(wrapped - 100)), si); + } +} + +TEST(SequenceNumbersTest, DifferenceIsAbsolute) { + TestSequence::Unwrapper unwrapper; + + TestSequence this_value = unwrapper.Unwrap(Wrapped(10)); + TestSequence other_value = TestSequence::AddTo(this_value, 100); + + EXPECT_EQ(TestSequence::Difference(this_value, other_value), 100); + EXPECT_EQ(TestSequence::Difference(other_value, this_value), 100); + + TestSequence minus_value = TestSequence::AddTo(this_value, -100); + + EXPECT_EQ(TestSequence::Difference(this_value, minus_value), 100); + EXPECT_EQ(TestSequence::Difference(minus_value, this_value), 100); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/common/str_join.h b/net/dcsctp/common/str_join.h new file mode 100644 index 0000000000..04517827b7 --- /dev/null +++ b/net/dcsctp/common/str_join.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_COMMON_STR_JOIN_H_ +#define NET_DCSCTP_COMMON_STR_JOIN_H_ + +#include + +#include "absl/strings/string_view.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +template +std::string StrJoin(const Range& seq, absl::string_view delimiter) { + rtc::StringBuilder sb; + int idx = 0; + + for (const typename Range::value_type& elem : seq) { + if (idx > 0) { + sb << delimiter; + } + sb << elem; + + ++idx; + } + return sb.Release(); +} + +template +std::string StrJoin(const Range& seq, + absl::string_view delimiter, + const Functor& fn) { + rtc::StringBuilder sb; + int idx = 0; + + for (const typename Range::value_type& elem : seq) { + if (idx > 0) { + sb << delimiter; + } + fn(sb, elem); + + ++idx; + } + return sb.Release(); +} + +} // namespace dcsctp + +#endif // NET_DCSCTP_COMMON_STR_JOIN_H_ diff --git a/net/dcsctp/common/str_join_test.cc b/net/dcsctp/common/str_join_test.cc new file mode 100644 index 0000000000..dbfd92c1cf --- /dev/null +++ b/net/dcsctp/common/str_join_test.cc @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/common/str_join.h" + +#include +#include +#include + +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(StrJoinTest, CanJoinStringsFromVector) { + std::vector strings = {"Hello", "World"}; + std::string s = StrJoin(strings, " "); + EXPECT_EQ(s, "Hello World"); +} + +TEST(StrJoinTest, CanJoinNumbersFromArray) { + std::array numbers = {1, 2, 3}; + std::string s = StrJoin(numbers, ","); + EXPECT_EQ(s, "1,2,3"); +} + +TEST(StrJoinTest, CanFormatElementsWhileJoining) { + std::vector> pairs = { + {"hello", "world"}, {"foo", "bar"}, {"fum", "gazonk"}}; + std::string s = StrJoin(pairs, ",", + [&](rtc::StringBuilder& sb, + const std::pair& p) { + sb << p.first << "=" << p.second; + }); + EXPECT_EQ(s, "hello=world,foo=bar,fum=gazonk"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/fuzzers/BUILD.gn b/net/dcsctp/fuzzers/BUILD.gn new file mode 100644 index 0000000000..9edbae44d7 --- /dev/null +++ b/net/dcsctp/fuzzers/BUILD.gn @@ -0,0 +1,50 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("dcsctp_fuzzers") { + testonly = true + deps = [ + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:math", + "../packet:chunk", + "../packet:error_cause", + "../packet:parameter", + "../public:socket", + "../public:types", + "../socket:dcsctp_socket", + ] + sources = [ + "dcsctp_fuzzers.cc", + "dcsctp_fuzzers.h", + ] +} + +if (rtc_include_tests) { + rtc_library("dcsctp_fuzzers_unittests") { + testonly = true + + deps = [ + ":dcsctp_fuzzers", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../packet:sctp_packet", + "../public:socket", + "../socket:dcsctp_socket", + "../testing:testing_macros", + ] + sources = [ "dcsctp_fuzzers_test.cc" ] + } +} diff --git a/net/dcsctp/fuzzers/dcsctp_fuzzers.cc b/net/dcsctp/fuzzers/dcsctp_fuzzers.cc new file mode 100644 index 0000000000..b4b6224ec4 --- /dev/null +++ b/net/dcsctp/fuzzers/dcsctp_fuzzers.cc @@ -0,0 +1,460 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/fuzzers/dcsctp_fuzzers.h" + +#include +#include +#include + +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" +#include "net/dcsctp/packet/error_cause/protocol_violation_cause.h" +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/state_cookie_parameter.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/socket/dcsctp_socket.h" +#include "net/dcsctp/socket/state_cookie.h" +#include "rtc_base/logging.h" + +namespace dcsctp { +namespace dcsctp_fuzzers { +namespace { +static constexpr int kRandomValue = FuzzerCallbacks::kRandomValue; +static constexpr size_t kMinInputLength = 5; +static constexpr size_t kMaxInputLength = 1024; + +// A starting state for the socket, when fuzzing. +enum class StartingState : int { + kConnectNotCalled, + // When socket initiating Connect + kConnectCalled, + kReceivedInitAck, + kReceivedCookieAck, + // When socket initiating Shutdown + kShutdownCalled, + kReceivedShutdownAck, + // When peer socket initiated Connect + kReceivedInit, + kReceivedCookieEcho, + // When peer initiated Shutdown + kReceivedShutdown, + kReceivedShutdownComplete, + kNumberOfStates, +}; + +// State about the current fuzzing iteration +class FuzzState { + public: + explicit FuzzState(rtc::ArrayView data) : data_(data) {} + + uint8_t GetByte() { + uint8_t value = 0; + if (offset_ < data_.size()) { + value = data_[offset_]; + ++offset_; + } + return value; + } + + TSN GetNextTSN() { return TSN(tsn_++); } + MID GetNextMID() { return MID(mid_++); } + + bool empty() const { return offset_ >= data_.size(); } + + private: + uint32_t tsn_ = kRandomValue; + uint32_t mid_ = 0; + rtc::ArrayView data_; + size_t offset_ = 0; +}; + +void SetSocketState(DcSctpSocketInterface& socket, + FuzzerCallbacks& socket_cb, + StartingState state) { + // We'll use another temporary peer socket for the establishment. + FuzzerCallbacks peer_cb; + DcSctpSocket peer("peer", peer_cb, nullptr, {}); + + switch (state) { + case StartingState::kConnectNotCalled: + return; + case StartingState::kConnectCalled: + socket.Connect(); + return; + case StartingState::kReceivedInitAck: + socket.Connect(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT_ACK + return; + case StartingState::kReceivedCookieAck: + socket.Connect(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT_ACK + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // COOKIE_ECHO + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // COOKIE_ACK + return; + case StartingState::kShutdownCalled: + socket.Connect(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT_ACK + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // COOKIE_ECHO + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // COOKIE_ACK + socket.Shutdown(); + return; + case StartingState::kReceivedShutdownAck: + socket.Connect(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT_ACK + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // COOKIE_ECHO + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // COOKIE_ACK + socket.Shutdown(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // SHUTDOWN + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // SHUTDOWN_ACK + return; + case StartingState::kReceivedInit: + peer.Connect(); + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT + return; + case StartingState::kReceivedCookieEcho: + peer.Connect(); + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT_ACK + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // COOKIE_ECHO + return; + case StartingState::kReceivedShutdown: + socket.Connect(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT_ACK + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // COOKIE_ECHO + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // COOKIE_ACK + peer.Shutdown(); + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // SHUTDOWN + return; + case StartingState::kReceivedShutdownComplete: + socket.Connect(); + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // INIT + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // INIT_ACK + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // COOKIE_ECHO + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // COOKIE_ACK + peer.Shutdown(); + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // SHUTDOWN + peer.ReceivePacket(socket_cb.ConsumeSentPacket()); // SHUTDOWN_ACK + socket.ReceivePacket(peer_cb.ConsumeSentPacket()); // SHUTDOWN_COMPLETE + return; + case StartingState::kNumberOfStates: + RTC_CHECK(false); + return; + } +} + +void MakeDataChunk(FuzzState& state, SctpPacket::Builder& b) { + DataChunk::Options options; + options.is_unordered = IsUnordered(state.GetByte() != 0); + options.is_beginning = Data::IsBeginning(state.GetByte() != 0); + options.is_end = Data::IsEnd(state.GetByte() != 0); + b.Add(DataChunk(state.GetNextTSN(), StreamID(state.GetByte()), + SSN(state.GetByte()), PPID(53), std::vector(10), + options)); +} + +void MakeInitChunk(FuzzState& state, SctpPacket::Builder& b) { + Parameters::Builder builder; + builder.Add(ForwardTsnSupportedParameter()); + + b.Add(InitChunk(VerificationTag(kRandomValue), 10000, 1000, 1000, + TSN(kRandomValue), builder.Build())); +} + +void MakeInitAckChunk(FuzzState& state, SctpPacket::Builder& b) { + Parameters::Builder builder; + builder.Add(ForwardTsnSupportedParameter()); + + uint8_t state_cookie[] = {1, 2, 3, 4, 5}; + Parameters::Builder params_builder = + Parameters::Builder().Add(StateCookieParameter(state_cookie)); + + b.Add(InitAckChunk(VerificationTag(kRandomValue), 10000, 1000, 1000, + TSN(kRandomValue), builder.Build())); +} + +void MakeSackChunk(FuzzState& state, SctpPacket::Builder& b) { + std::vector gap_ack_blocks; + uint16_t last_end = 0; + while (gap_ack_blocks.size() < 20) { + uint8_t delta_start = state.GetByte(); + if (delta_start < 0x80) { + break; + } + uint8_t delta_end = state.GetByte(); + + uint16_t start = last_end + delta_start; + uint16_t end = start + delta_end; + last_end = end; + gap_ack_blocks.emplace_back(start, end); + } + + TSN cum_ack_tsn(kRandomValue + state.GetByte()); + b.Add(SackChunk(cum_ack_tsn, 10000, std::move(gap_ack_blocks), {})); +} + +void MakeHeartbeatRequestChunk(FuzzState& state, SctpPacket::Builder& b) { + uint8_t info[] = {1, 2, 3, 4, 5}; + b.Add(HeartbeatRequestChunk( + Parameters::Builder().Add(HeartbeatInfoParameter(info)).Build())); +} + +void MakeHeartbeatAckChunk(FuzzState& state, SctpPacket::Builder& b) { + std::vector info(8); + b.Add(HeartbeatRequestChunk( + Parameters::Builder().Add(HeartbeatInfoParameter(info)).Build())); +} + +void MakeAbortChunk(FuzzState& state, SctpPacket::Builder& b) { + b.Add(AbortChunk( + /*filled_in_verification_tag=*/true, + Parameters::Builder().Add(UserInitiatedAbortCause("Fuzzing")).Build())); +} + +void MakeErrorChunk(FuzzState& state, SctpPacket::Builder& b) { + b.Add(ErrorChunk( + Parameters::Builder().Add(ProtocolViolationCause("Fuzzing")).Build())); +} + +void MakeCookieEchoChunk(FuzzState& state, SctpPacket::Builder& b) { + std::vector cookie(StateCookie::kCookieSize); + b.Add(CookieEchoChunk(cookie)); +} + +void MakeCookieAckChunk(FuzzState& state, SctpPacket::Builder& b) { + b.Add(CookieAckChunk()); +} + +void MakeShutdownChunk(FuzzState& state, SctpPacket::Builder& b) { + b.Add(ShutdownChunk(state.GetNextTSN())); +} + +void MakeShutdownAckChunk(FuzzState& state, SctpPacket::Builder& b) { + b.Add(ShutdownAckChunk()); +} + +void MakeShutdownCompleteChunk(FuzzState& state, SctpPacket::Builder& b) { + b.Add(ShutdownCompleteChunk(false)); +} + +void MakeReConfigChunk(FuzzState& state, SctpPacket::Builder& b) { + std::vector streams = {StreamID(state.GetByte())}; + Parameters::Builder params_builder = + Parameters::Builder().Add(OutgoingSSNResetRequestParameter( + ReconfigRequestSN(kRandomValue), ReconfigRequestSN(kRandomValue), + state.GetNextTSN(), streams)); + b.Add(ReConfigChunk(params_builder.Build())); +} + +void MakeForwardTsnChunk(FuzzState& state, SctpPacket::Builder& b) { + std::vector skipped_streams; + for (;;) { + uint8_t stream = state.GetByte(); + if (skipped_streams.size() > 20 || stream < 0x80) { + break; + } + skipped_streams.emplace_back(StreamID(stream), SSN(state.GetByte())); + } + b.Add(ForwardTsnChunk(state.GetNextTSN(), std::move(skipped_streams))); +} + +void MakeIDataChunk(FuzzState& state, SctpPacket::Builder& b) { + DataChunk::Options options; + options.is_unordered = IsUnordered(state.GetByte() != 0); + options.is_beginning = Data::IsBeginning(state.GetByte() != 0); + options.is_end = Data::IsEnd(state.GetByte() != 0); + b.Add(IDataChunk(state.GetNextTSN(), StreamID(state.GetByte()), + state.GetNextMID(), PPID(53), FSN(0), + std::vector(10), options)); +} + +void MakeIForwardTsnChunk(FuzzState& state, SctpPacket::Builder& b) { + std::vector skipped_streams; + for (;;) { + uint8_t stream = state.GetByte(); + if (skipped_streams.size() > 20 || stream < 0x80) { + break; + } + skipped_streams.emplace_back(StreamID(stream), SSN(state.GetByte())); + } + b.Add(IForwardTsnChunk(state.GetNextTSN(), std::move(skipped_streams))); +} + +class RandomFuzzedChunk : public Chunk { + public: + explicit RandomFuzzedChunk(FuzzState& state) : state_(state) {} + + void SerializeTo(std::vector& out) const override { + size_t bytes = state_.GetByte(); + for (size_t i = 0; i < bytes; ++i) { + out.push_back(state_.GetByte()); + } + } + + std::string ToString() const override { return std::string("RANDOM_FUZZED"); } + + private: + FuzzState& state_; +}; + +void MakeChunkWithRandomContent(FuzzState& state, SctpPacket::Builder& b) { + b.Add(RandomFuzzedChunk(state)); +} + +std::vector GeneratePacket(FuzzState& state) { + DcSctpOptions options; + // Setting a fixed limit to not be dependent on the defaults, which may + // change. + options.mtu = 2048; + SctpPacket::Builder builder(VerificationTag(kRandomValue), options); + + // The largest expected serialized chunk, as created by fuzzers. + static constexpr size_t kMaxChunkSize = 256; + + for (int i = 0; i < 5 && builder.bytes_remaining() > kMaxChunkSize; ++i) { + switch (state.GetByte()) { + case 1: + MakeDataChunk(state, builder); + break; + case 2: + MakeInitChunk(state, builder); + break; + case 3: + MakeInitAckChunk(state, builder); + break; + case 4: + MakeSackChunk(state, builder); + break; + case 5: + MakeHeartbeatRequestChunk(state, builder); + break; + case 6: + MakeHeartbeatAckChunk(state, builder); + break; + case 7: + MakeAbortChunk(state, builder); + break; + case 8: + MakeErrorChunk(state, builder); + break; + case 9: + MakeCookieEchoChunk(state, builder); + break; + case 10: + MakeCookieAckChunk(state, builder); + break; + case 11: + MakeShutdownChunk(state, builder); + break; + case 12: + MakeShutdownAckChunk(state, builder); + break; + case 13: + MakeShutdownCompleteChunk(state, builder); + break; + case 14: + MakeReConfigChunk(state, builder); + break; + case 15: + MakeForwardTsnChunk(state, builder); + break; + case 16: + MakeIDataChunk(state, builder); + break; + case 17: + MakeIForwardTsnChunk(state, builder); + break; + case 18: + MakeChunkWithRandomContent(state, builder); + break; + default: + break; + } + } + std::vector packet = builder.Build(); + return packet; +} +} // namespace + +void FuzzSocket(DcSctpSocketInterface& socket, + FuzzerCallbacks& cb, + rtc::ArrayView data) { + if (data.size() < kMinInputLength || data.size() > kMaxInputLength) { + return; + } + if (data[0] >= static_cast(StartingState::kNumberOfStates)) { + return; + } + + // Set the socket in a specified valid starting state + SetSocketState(socket, cb, static_cast(data[0])); + + FuzzState state(data.subview(1)); + + while (!state.empty()) { + switch (state.GetByte()) { + case 1: + // Generate a valid SCTP packet (based on fuzz data) and "receive it". + socket.ReceivePacket(GeneratePacket(state)); + break; + case 2: + socket.Connect(); + break; + case 3: + socket.Shutdown(); + break; + case 4: + socket.Close(); + break; + case 5: { + StreamID streams[] = {StreamID(state.GetByte())}; + socket.ResetStreams(streams); + } break; + case 6: { + uint8_t flags = state.GetByte(); + SendOptions options; + options.unordered = IsUnordered(flags & 0x01); + options.max_retransmissions = + (flags & 0x02) != 0 ? absl::make_optional(0) : absl::nullopt; + size_t payload_exponent = (flags >> 2) % 16; + size_t payload_size = static_cast(1) << payload_exponent; + socket.Send(DcSctpMessage(StreamID(state.GetByte()), PPID(53), + std::vector(payload_size)), + options); + break; + } + case 7: { + // Expire an active timeout/timer. + uint8_t timeout_idx = state.GetByte(); + absl::optional timeout_id = cb.ExpireTimeout(timeout_idx); + if (timeout_id.has_value()) { + socket.HandleTimeout(*timeout_id); + } + break; + } + default: + break; + } + } +} +} // namespace dcsctp_fuzzers +} // namespace dcsctp diff --git a/net/dcsctp/fuzzers/dcsctp_fuzzers.h b/net/dcsctp/fuzzers/dcsctp_fuzzers.h new file mode 100644 index 0000000000..f3de0722f4 --- /dev/null +++ b/net/dcsctp/fuzzers/dcsctp_fuzzers.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_FUZZERS_DCSCTP_FUZZERS_H_ +#define NET_DCSCTP_FUZZERS_DCSCTP_FUZZERS_H_ + +#include +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/public/dcsctp_socket.h" + +namespace dcsctp { +namespace dcsctp_fuzzers { + +// A fake timeout used during fuzzing. +class FuzzerTimeout : public Timeout { + public: + explicit FuzzerTimeout(std::set& active_timeouts) + : active_timeouts_(active_timeouts) {} + + void Start(DurationMs duration_ms, TimeoutID timeout_id) override { + // Start is only allowed to be called on stopped or expired timeouts. + if (timeout_id_.has_value()) { + // It has been started before, but maybe it expired. Ensure that it's not + // running at least. + RTC_DCHECK(active_timeouts_.find(*timeout_id_) == active_timeouts_.end()); + } + timeout_id_ = timeout_id; + RTC_DCHECK(active_timeouts_.insert(timeout_id).second); + } + + void Stop() override { + // Stop is only allowed to be called on active timeouts. Not stopped or + // expired. + RTC_DCHECK(timeout_id_.has_value()); + RTC_DCHECK(active_timeouts_.erase(*timeout_id_) == 1); + timeout_id_ = absl::nullopt; + } + + // A set of all active timeouts, managed by `FuzzerCallbacks`. + std::set& active_timeouts_; + // If present, the timout is active and will expire reported as `timeout_id`. + absl::optional timeout_id_; +}; + +class FuzzerCallbacks : public DcSctpSocketCallbacks { + public: + static constexpr int kRandomValue = 42; + void SendPacket(rtc::ArrayView data) override { + sent_packets_.emplace_back(std::vector(data.begin(), data.end())); + } + std::unique_ptr CreateTimeout() override { + return std::make_unique(active_timeouts_); + } + TimeMs TimeMillis() override { return TimeMs(42); } + uint32_t GetRandomInt(uint32_t low, uint32_t high) override { + return kRandomValue; + } + void OnMessageReceived(DcSctpMessage message) override {} + void OnError(ErrorKind error, absl::string_view message) override {} + void OnAborted(ErrorKind error, absl::string_view message) override {} + void OnConnected() override {} + void OnClosed() override {} + void OnConnectionRestarted() override {} + void OnStreamsResetFailed(rtc::ArrayView outgoing_streams, + absl::string_view reason) override {} + void OnStreamsResetPerformed( + rtc::ArrayView outgoing_streams) override {} + void OnIncomingStreamsReset( + rtc::ArrayView incoming_streams) override {} + + std::vector ConsumeSentPacket() { + if (sent_packets_.empty()) { + return {}; + } + std::vector ret = sent_packets_.front(); + sent_packets_.pop_front(); + return ret; + } + + // Given an index among the active timeouts, will expire that one. + absl::optional ExpireTimeout(size_t index) { + if (index < active_timeouts_.size()) { + auto it = active_timeouts_.begin(); + std::advance(it, index); + TimeoutID timeout_id = *it; + active_timeouts_.erase(it); + return timeout_id; + } + return absl::nullopt; + } + + private: + // Needs to be ordered, to allow fuzzers to expire timers. + std::set active_timeouts_; + std::deque> sent_packets_; +}; + +// Given some fuzzing `data` will send packets to the socket as well as calling +// API methods. +void FuzzSocket(DcSctpSocketInterface& socket, + FuzzerCallbacks& cb, + rtc::ArrayView data); + +} // namespace dcsctp_fuzzers +} // namespace dcsctp +#endif // NET_DCSCTP_FUZZERS_DCSCTP_FUZZERS_H_ diff --git a/net/dcsctp/fuzzers/dcsctp_fuzzers_test.cc b/net/dcsctp/fuzzers/dcsctp_fuzzers_test.cc new file mode 100644 index 0000000000..c7d2cd7c99 --- /dev/null +++ b/net/dcsctp/fuzzers/dcsctp_fuzzers_test.cc @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/fuzzers/dcsctp_fuzzers.h" + +#include "api/array_view.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/socket/dcsctp_socket.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "rtc_base/logging.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace dcsctp_fuzzers { +namespace { + +// This is a testbed where fuzzed data that cause issues can be evaluated and +// crashes reproduced. Use `xxd -i ./crash-abc` to generate `data` below. +TEST(DcsctpFuzzersTest, PassesTestbed) { + uint8_t data[] = {0x07, 0x09, 0x00, 0x01, 0x11, 0xff, 0xff}; + + FuzzerCallbacks cb; + DcSctpOptions options; + options.disable_checksum_verification = true; + DcSctpSocket socket("A", cb, nullptr, options); + + FuzzSocket(socket, cb, data); +} + +} // namespace +} // namespace dcsctp_fuzzers +} // namespace dcsctp diff --git a/net/dcsctp/packet/BUILD.gn b/net/dcsctp/packet/BUILD.gn new file mode 100644 index 0000000000..9c08ebc80e --- /dev/null +++ b/net/dcsctp/packet/BUILD.gn @@ -0,0 +1,338 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +group("packet") { + deps = [ ":bounded_io" ] +} + +rtc_source_set("bounded_io") { + deps = [ + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + ] + sources = [ + "bounded_byte_reader.h", + "bounded_byte_writer.h", + ] +} + +rtc_library("tlv_trait") { + deps = [ + ":bounded_io", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings:strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + sources = [ + "tlv_trait.cc", + "tlv_trait.h", + ] +} + +rtc_source_set("data") { + deps = [ + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../public:types", + ] + sources = [ "data.h" ] +} + +rtc_library("crc32c") { + deps = [ + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "//third_party/crc32c", + ] + sources = [ + "crc32c.cc", + "crc32c.h", + ] +} + +rtc_library("parameter") { + deps = [ + ":bounded_io", + ":data", + ":tlv_trait", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../common:math", + "../common:str_join", + "../public:types", + ] + sources = [ + "parameter/add_incoming_streams_request_parameter.cc", + "parameter/add_incoming_streams_request_parameter.h", + "parameter/add_outgoing_streams_request_parameter.cc", + "parameter/add_outgoing_streams_request_parameter.h", + "parameter/forward_tsn_supported_parameter.cc", + "parameter/forward_tsn_supported_parameter.h", + "parameter/heartbeat_info_parameter.cc", + "parameter/heartbeat_info_parameter.h", + "parameter/incoming_ssn_reset_request_parameter.cc", + "parameter/incoming_ssn_reset_request_parameter.h", + "parameter/outgoing_ssn_reset_request_parameter.cc", + "parameter/outgoing_ssn_reset_request_parameter.h", + "parameter/parameter.cc", + "parameter/parameter.h", + "parameter/reconfiguration_response_parameter.cc", + "parameter/reconfiguration_response_parameter.h", + "parameter/ssn_tsn_reset_request_parameter.cc", + "parameter/ssn_tsn_reset_request_parameter.h", + "parameter/state_cookie_parameter.cc", + "parameter/state_cookie_parameter.h", + "parameter/supported_extensions_parameter.cc", + "parameter/supported_extensions_parameter.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("error_cause") { + deps = [ + ":data", + ":parameter", + ":tlv_trait", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../common:math", + "../common:str_join", + "../packet:bounded_io", + "../public:types", + ] + sources = [ + "error_cause/cookie_received_while_shutting_down_cause.cc", + "error_cause/cookie_received_while_shutting_down_cause.h", + "error_cause/error_cause.cc", + "error_cause/error_cause.h", + "error_cause/invalid_mandatory_parameter_cause.cc", + "error_cause/invalid_mandatory_parameter_cause.h", + "error_cause/invalid_stream_identifier_cause.cc", + "error_cause/invalid_stream_identifier_cause.h", + "error_cause/missing_mandatory_parameter_cause.cc", + "error_cause/missing_mandatory_parameter_cause.h", + "error_cause/no_user_data_cause.cc", + "error_cause/no_user_data_cause.h", + "error_cause/out_of_resource_error_cause.cc", + "error_cause/out_of_resource_error_cause.h", + "error_cause/protocol_violation_cause.cc", + "error_cause/protocol_violation_cause.h", + "error_cause/restart_of_an_association_with_new_address_cause.cc", + "error_cause/restart_of_an_association_with_new_address_cause.h", + "error_cause/stale_cookie_error_cause.cc", + "error_cause/stale_cookie_error_cause.h", + "error_cause/unrecognized_chunk_type_cause.cc", + "error_cause/unrecognized_chunk_type_cause.h", + "error_cause/unrecognized_parameter_cause.cc", + "error_cause/unrecognized_parameter_cause.h", + "error_cause/unresolvable_address_cause.cc", + "error_cause/unresolvable_address_cause.h", + "error_cause/user_initiated_abort_cause.cc", + "error_cause/user_initiated_abort_cause.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("chunk") { + deps = [ + ":data", + ":error_cause", + ":parameter", + ":tlv_trait", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:math", + "../common:str_join", + "../packet:bounded_io", + ] + sources = [ + "chunk/abort_chunk.cc", + "chunk/abort_chunk.h", + "chunk/chunk.cc", + "chunk/chunk.h", + "chunk/cookie_ack_chunk.cc", + "chunk/cookie_ack_chunk.h", + "chunk/cookie_echo_chunk.cc", + "chunk/cookie_echo_chunk.h", + "chunk/data_chunk.cc", + "chunk/data_chunk.h", + "chunk/data_common.h", + "chunk/error_chunk.cc", + "chunk/error_chunk.h", + "chunk/forward_tsn_chunk.cc", + "chunk/forward_tsn_chunk.h", + "chunk/forward_tsn_common.h", + "chunk/heartbeat_ack_chunk.cc", + "chunk/heartbeat_ack_chunk.h", + "chunk/heartbeat_request_chunk.cc", + "chunk/heartbeat_request_chunk.h", + "chunk/idata_chunk.cc", + "chunk/idata_chunk.h", + "chunk/iforward_tsn_chunk.cc", + "chunk/iforward_tsn_chunk.h", + "chunk/init_ack_chunk.cc", + "chunk/init_ack_chunk.h", + "chunk/init_chunk.cc", + "chunk/init_chunk.h", + "chunk/reconfig_chunk.cc", + "chunk/reconfig_chunk.h", + "chunk/sack_chunk.cc", + "chunk/sack_chunk.h", + "chunk/shutdown_ack_chunk.cc", + "chunk/shutdown_ack_chunk.h", + "chunk/shutdown_chunk.cc", + "chunk/shutdown_chunk.h", + "chunk/shutdown_complete_chunk.cc", + "chunk/shutdown_complete_chunk.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("chunk_validators") { + deps = [ + ":chunk", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + ] + sources = [ + "chunk_validators.cc", + "chunk_validators.h", + ] +} + +rtc_library("sctp_packet") { + deps = [ + ":bounded_io", + ":chunk", + ":crc32c", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../common:math", + "../public:types", + ] + sources = [ + "sctp_packet.cc", + "sctp_packet.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory:memory", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +if (rtc_include_tests) { + rtc_library("dcsctp_packet_unittests") { + testonly = true + + deps = [ + ":bounded_io", + ":chunk", + ":chunk_validators", + ":crc32c", + ":error_cause", + ":parameter", + ":sctp_packet", + ":tlv_trait", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../common:internal_types", + "../common:math", + "../public:types", + "../testing:testing_macros", + ] + sources = [ + "bounded_byte_reader_test.cc", + "bounded_byte_writer_test.cc", + "chunk/abort_chunk_test.cc", + "chunk/cookie_ack_chunk_test.cc", + "chunk/cookie_echo_chunk_test.cc", + "chunk/data_chunk_test.cc", + "chunk/error_chunk_test.cc", + "chunk/forward_tsn_chunk_test.cc", + "chunk/heartbeat_ack_chunk_test.cc", + "chunk/heartbeat_request_chunk_test.cc", + "chunk/idata_chunk_test.cc", + "chunk/iforward_tsn_chunk_test.cc", + "chunk/init_ack_chunk_test.cc", + "chunk/init_chunk_test.cc", + "chunk/reconfig_chunk_test.cc", + "chunk/sack_chunk_test.cc", + "chunk/shutdown_ack_chunk_test.cc", + "chunk/shutdown_chunk_test.cc", + "chunk/shutdown_complete_chunk_test.cc", + "chunk_validators_test.cc", + "crc32c_test.cc", + "error_cause/cookie_received_while_shutting_down_cause_test.cc", + "error_cause/invalid_mandatory_parameter_cause_test.cc", + "error_cause/invalid_stream_identifier_cause_test.cc", + "error_cause/missing_mandatory_parameter_cause_test.cc", + "error_cause/no_user_data_cause_test.cc", + "error_cause/out_of_resource_error_cause_test.cc", + "error_cause/protocol_violation_cause_test.cc", + "error_cause/restart_of_an_association_with_new_address_cause_test.cc", + "error_cause/stale_cookie_error_cause_test.cc", + "error_cause/unrecognized_chunk_type_cause_test.cc", + "error_cause/unrecognized_parameter_cause_test.cc", + "error_cause/unresolvable_address_cause_test.cc", + "error_cause/user_initiated_abort_cause_test.cc", + "parameter/add_incoming_streams_request_parameter_test.cc", + "parameter/add_outgoing_streams_request_parameter_test.cc", + "parameter/forward_tsn_supported_parameter_test.cc", + "parameter/incoming_ssn_reset_request_parameter_test.cc", + "parameter/outgoing_ssn_reset_request_parameter_test.cc", + "parameter/parameter_test.cc", + "parameter/reconfiguration_response_parameter_test.cc", + "parameter/ssn_tsn_reset_request_parameter_test.cc", + "parameter/state_cookie_parameter_test.cc", + "parameter/supported_extensions_parameter_test.cc", + "sctp_packet_test.cc", + "tlv_trait_test.cc", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } +} diff --git a/net/dcsctp/packet/bounded_byte_reader.h b/net/dcsctp/packet/bounded_byte_reader.h new file mode 100644 index 0000000000..603ed6ac33 --- /dev/null +++ b/net/dcsctp/packet/bounded_byte_reader.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef NET_DCSCTP_PACKET_BOUNDED_BYTE_READER_H_ +#define NET_DCSCTP_PACKET_BOUNDED_BYTE_READER_H_ + +#include + +#include "api/array_view.h" + +namespace dcsctp { + +// TODO(boivie): These generic functions - and possibly this entire class - +// could be a candidate to have added to rtc_base/. They should use compiler +// intrinsics as well. +namespace internal { +// Loads a 8-bit unsigned word at `data`. +inline uint8_t LoadBigEndian8(const uint8_t* data) { + return data[0]; +} + +// Loads a 16-bit unsigned word at `data`. +inline uint16_t LoadBigEndian16(const uint8_t* data) { + return (data[0] << 8) | data[1]; +} + +// Loads a 32-bit unsigned word at `data`. +inline uint32_t LoadBigEndian32(const uint8_t* data) { + return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; +} +} // namespace internal + +// BoundedByteReader wraps an ArrayView and divides it into two parts; A fixed +// size - which is the template parameter - and a variable size, which is what +// remains in `data` after the `FixedSize`. +// +// The BoundedByteReader provides methods to load/read big endian numbers from +// the FixedSize portion of the buffer, and these are read with static bounds +// checking, to avoid out-of-bounds accesses without a run-time penalty. +// +// The variable sized portion can either be used to create sub-readers, which +// themselves would provide compile-time bounds-checking, or the entire variable +// sized portion can be retrieved as an ArrayView. +template +class BoundedByteReader { + public: + explicit BoundedByteReader(rtc::ArrayView data) : data_(data) { + RTC_CHECK(data.size() >= FixedSize); + } + + template + uint8_t Load8() const { + static_assert(offset + sizeof(uint8_t) <= FixedSize, "Out-of-bounds"); + return internal::LoadBigEndian8(&data_[offset]); + } + + template + uint16_t Load16() const { + static_assert(offset + sizeof(uint16_t) <= FixedSize, "Out-of-bounds"); + static_assert((offset % sizeof(uint16_t)) == 0, "Unaligned access"); + return internal::LoadBigEndian16(&data_[offset]); + } + + template + uint32_t Load32() const { + static_assert(offset + sizeof(uint32_t) <= FixedSize, "Out-of-bounds"); + static_assert((offset % sizeof(uint32_t)) == 0, "Unaligned access"); + return internal::LoadBigEndian32(&data_[offset]); + } + + template + BoundedByteReader sub_reader(size_t variable_offset) const { + RTC_CHECK(FixedSize + variable_offset + SubSize <= data_.size()); + + rtc::ArrayView sub_span = + data_.subview(FixedSize + variable_offset, SubSize); + return BoundedByteReader(sub_span); + } + + size_t variable_data_size() const { return data_.size() - FixedSize; } + + rtc::ArrayView variable_data() const { + return data_.subview(FixedSize, data_.size() - FixedSize); + } + + private: + const rtc::ArrayView data_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_BOUNDED_BYTE_READER_H_ diff --git a/net/dcsctp/packet/bounded_byte_reader_test.cc b/net/dcsctp/packet/bounded_byte_reader_test.cc new file mode 100644 index 0000000000..2fb4a86785 --- /dev/null +++ b/net/dcsctp/packet/bounded_byte_reader_test.cc @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "net/dcsctp/packet/bounded_byte_reader.h" + +#include "api/array_view.h" +#include "rtc_base/buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(BoundedByteReaderTest, CanLoadData) { + uint8_t data[14] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4}; + + BoundedByteReader<8> reader(data); + EXPECT_EQ(reader.variable_data_size(), 6U); + EXPECT_EQ(reader.Load32<0>(), 0x01020304U); + EXPECT_EQ(reader.Load32<4>(), 0x05060708U); + EXPECT_EQ(reader.Load16<4>(), 0x0506U); + EXPECT_EQ(reader.Load8<4>(), 0x05U); + EXPECT_EQ(reader.Load8<5>(), 0x06U); + + BoundedByteReader<6> sub = reader.sub_reader<6>(0); + EXPECT_EQ(sub.Load16<0>(), 0x0900U); + EXPECT_EQ(sub.Load32<0>(), 0x09000102U); + EXPECT_EQ(sub.Load16<4>(), 0x0304U); + + EXPECT_THAT(reader.variable_data(), ElementsAre(9, 0, 1, 2, 3, 4)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/bounded_byte_writer.h b/net/dcsctp/packet/bounded_byte_writer.h new file mode 100644 index 0000000000..467f26800b --- /dev/null +++ b/net/dcsctp/packet/bounded_byte_writer.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef NET_DCSCTP_PACKET_BOUNDED_BYTE_WRITER_H_ +#define NET_DCSCTP_PACKET_BOUNDED_BYTE_WRITER_H_ + +#include + +#include "api/array_view.h" + +namespace dcsctp { + +// TODO(boivie): These generic functions - and possibly this entire class - +// could be a candidate to have added to rtc_base/. They should use compiler +// intrinsics as well. +namespace internal { +// Stores a 8-bit unsigned word at `data`. +inline void StoreBigEndian8(uint8_t* data, uint8_t val) { + data[0] = val; +} + +// Stores a 16-bit unsigned word at `data`. +inline void StoreBigEndian16(uint8_t* data, uint16_t val) { + data[0] = val >> 8; + data[1] = val; +} + +// Stores a 32-bit unsigned word at `data`. +inline void StoreBigEndian32(uint8_t* data, uint32_t val) { + data[0] = val >> 24; + data[1] = val >> 16; + data[2] = val >> 8; + data[3] = val; +} +} // namespace internal + +// BoundedByteWriter wraps an ArrayView and divides it into two parts; A fixed +// size - which is the template parameter - and a variable size, which is what +// remains in `data` after the `FixedSize`. +// +// The BoundedByteWriter provides methods to write big endian numbers to the +// FixedSize portion of the buffer, and these are written with static bounds +// checking, to avoid out-of-bounds accesses without a run-time penalty. +// +// The variable sized portion can either be used to create sub-writers, which +// themselves would provide compile-time bounds-checking, or data can be copied +// to it. +template +class BoundedByteWriter { + public: + explicit BoundedByteWriter(rtc::ArrayView data) : data_(data) { + RTC_CHECK(data.size() >= FixedSize); + } + + template + void Store8(uint8_t value) { + static_assert(offset + sizeof(uint8_t) <= FixedSize, "Out-of-bounds"); + internal::StoreBigEndian8(&data_[offset], value); + } + + template + void Store16(uint16_t value) { + static_assert(offset + sizeof(uint16_t) <= FixedSize, "Out-of-bounds"); + static_assert((offset % sizeof(uint16_t)) == 0, "Unaligned access"); + internal::StoreBigEndian16(&data_[offset], value); + } + + template + void Store32(uint32_t value) { + static_assert(offset + sizeof(uint32_t) <= FixedSize, "Out-of-bounds"); + static_assert((offset % sizeof(uint32_t)) == 0, "Unaligned access"); + internal::StoreBigEndian32(&data_[offset], value); + } + + template + BoundedByteWriter sub_writer(size_t variable_offset) { + RTC_CHECK(FixedSize + variable_offset + SubSize <= data_.size()); + + return BoundedByteWriter( + data_.subview(FixedSize + variable_offset, SubSize)); + } + + void CopyToVariableData(rtc::ArrayView source) { + memcpy(data_.data() + FixedSize, source.data(), + std::min(source.size(), data_.size() - FixedSize)); + } + + private: + rtc::ArrayView data_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_BOUNDED_BYTE_WRITER_H_ diff --git a/net/dcsctp/packet/bounded_byte_writer_test.cc b/net/dcsctp/packet/bounded_byte_writer_test.cc new file mode 100644 index 0000000000..3cea0a2f7c --- /dev/null +++ b/net/dcsctp/packet/bounded_byte_writer_test.cc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "net/dcsctp/packet/bounded_byte_writer.h" + +#include + +#include "api/array_view.h" +#include "rtc_base/buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(BoundedByteWriterTest, CanWriteData) { + std::vector data(14); + + BoundedByteWriter<8> writer(data); + writer.Store32<0>(0x01020304); + writer.Store16<4>(0x0506); + writer.Store8<6>(0x07); + writer.Store8<7>(0x08); + + uint8_t variable_data[] = {0, 0, 0, 0, 3, 0}; + writer.CopyToVariableData(variable_data); + + BoundedByteWriter<6> sub = writer.sub_writer<6>(0); + sub.Store32<0>(0x09000000); + sub.Store16<2>(0x0102); + + BoundedByteWriter<2> sub2 = writer.sub_writer<2>(4); + sub2.Store8<1>(0x04); + + EXPECT_THAT(data, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/abort_chunk.cc b/net/dcsctp/packet/chunk/abort_chunk.cc new file mode 100644 index 0000000000..8348eb96a9 --- /dev/null +++ b/net/dcsctp/packet/chunk/abort_chunk.cc @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/abort_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.7 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 6 |Reserved |T| Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / zero or more Error Causes / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int AbortChunk::kType; + +absl::optional AbortChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + absl::optional error_causes = + Parameters::Parse(reader->variable_data()); + if (!error_causes.has_value()) { + return absl::nullopt; + } + uint8_t flags = reader->Load8<1>(); + bool filled_in_verification_tag = (flags & (1 << kFlagsBitT)) == 0; + return AbortChunk(filled_in_verification_tag, *std::move(error_causes)); +} + +void AbortChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView error_causes = error_causes_.data(); + BoundedByteWriter writer = AllocateTLV(out, error_causes.size()); + writer.Store8<1>(filled_in_verification_tag_ ? 0 : (1 << kFlagsBitT)); + writer.CopyToVariableData(error_causes); +} + +std::string AbortChunk::ToString() const { + return "ABORT"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/abort_chunk.h b/net/dcsctp/packet/chunk/abort_chunk.h new file mode 100644 index 0000000000..1408a75e80 --- /dev/null +++ b/net/dcsctp/packet/chunk/abort_chunk.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_ABORT_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_ABORT_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.7 +struct AbortChunkConfig : ChunkConfig { + static constexpr int kType = 6; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class AbortChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = AbortChunkConfig::kType; + + AbortChunk(bool filled_in_verification_tag, Parameters error_causes) + : filled_in_verification_tag_(filled_in_verification_tag), + error_causes_(std::move(error_causes)) {} + + AbortChunk(AbortChunk&& other) = default; + AbortChunk& operator=(AbortChunk&& other) = default; + + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + bool filled_in_verification_tag() const { + return filled_in_verification_tag_; + } + + const Parameters& error_causes() const { return error_causes_; } + + private: + static constexpr int kFlagsBitT = 0; + bool filled_in_verification_tag_; + Parameters error_causes_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_ABORT_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/abort_chunk_test.cc b/net/dcsctp/packet/chunk/abort_chunk_test.cc new file mode 100644 index 0000000000..c1f3a4d5b9 --- /dev/null +++ b/net/dcsctp/packet/chunk/abort_chunk_test.cc @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/abort_chunk.h" + +#include + +#include +#include + +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(AbortChunkTest, FromCapture) { + /* + ABORT chunk + Chunk type: ABORT (6) + Chunk flags: 0x00 + Chunk length: 8 + User initiated ABORT cause + */ + + uint8_t data[] = {0x06, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x04}; + + ASSERT_HAS_VALUE_AND_ASSIGN(AbortChunk chunk, AbortChunk::Parse(data)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + UserInitiatedAbortCause cause, + chunk.error_causes().get()); + + EXPECT_EQ(cause.upper_layer_abort_reason(), ""); +} + +TEST(AbortChunkTest, SerializeAndDeserialize) { + AbortChunk chunk(/*filled_in_verification_tag=*/true, + Parameters::Builder() + .Add(UserInitiatedAbortCause("Close called")) + .Build()); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(AbortChunk deserialized, + AbortChunk::Parse(serialized)); + ASSERT_HAS_VALUE_AND_ASSIGN( + UserInitiatedAbortCause cause, + deserialized.error_causes().get()); + + EXPECT_EQ(cause.upper_layer_abort_reason(), "Close called"); +} + +// Validates that AbortChunk doesn't make any alignment assumptions. +TEST(AbortChunkTest, SerializeAndDeserializeOneChar) { + AbortChunk chunk( + /*filled_in_verification_tag=*/true, + Parameters::Builder().Add(UserInitiatedAbortCause("!")).Build()); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(AbortChunk deserialized, + AbortChunk::Parse(serialized)); + ASSERT_HAS_VALUE_AND_ASSIGN( + UserInitiatedAbortCause cause, + deserialized.error_causes().get()); + + EXPECT_EQ(cause.upper_layer_abort_reason(), "!"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/chunk.cc b/net/dcsctp/packet/chunk/chunk.cc new file mode 100644 index 0000000000..832ab82288 --- /dev/null +++ b/net/dcsctp/packet/chunk/chunk.cc @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/chunk.h" + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/packet/chunk/abort_chunk.h" +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/error_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/chunk/idata_chunk.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/init_ack_chunk.h" +#include "net/dcsctp/packet/chunk/init_chunk.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_ack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_complete_chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +template +bool ParseAndPrint(uint8_t chunk_type, + rtc::ArrayView data, + rtc::StringBuilder& sb) { + if (chunk_type == Chunk::kType) { + absl::optional c = Chunk::Parse(data); + if (c.has_value()) { + sb << c->ToString(); + } else { + sb << "Failed to parse chunk of type " << chunk_type; + } + return true; + } + return false; +} + +std::string DebugConvertChunkToString(rtc::ArrayView data) { + rtc::StringBuilder sb; + + if (data.empty()) { + sb << "Failed to parse chunk due to empty data"; + } else { + uint8_t chunk_type = data[0]; + if (!ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb) && + !ParseAndPrint(chunk_type, data, sb)) { + sb << "Unhandled chunk type: " << static_cast(chunk_type); + } + } + return sb.Release(); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/chunk.h b/net/dcsctp/packet/chunk/chunk.h new file mode 100644 index 0000000000..687aa1daa1 --- /dev/null +++ b/net/dcsctp/packet/chunk/chunk.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_CHUNK_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// Base class for all SCTP chunks +class Chunk { + public: + Chunk() {} + virtual ~Chunk() = default; + + // Chunks can contain data payloads that shouldn't be copied unnecessarily. + Chunk(Chunk&& other) = default; + Chunk& operator=(Chunk&& other) = default; + Chunk(const Chunk&) = delete; + Chunk& operator=(const Chunk&) = delete; + + // Serializes the chunk to `out`, growing it as necessary. + virtual void SerializeTo(std::vector& out) const = 0; + + // Returns a human readable description of this chunk and its parameters. + virtual std::string ToString() const = 0; +}; + +// Introspects the chunk in `data` and returns a human readable textual +// representation of it, to be used in debugging. +std::string DebugConvertChunkToString(rtc::ArrayView data); + +struct ChunkConfig { + static constexpr int kTypeSizeInBytes = 1; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/cookie_ack_chunk.cc b/net/dcsctp/packet/chunk/cookie_ack_chunk.cc new file mode 100644 index 0000000000..4839969ccf --- /dev/null +++ b/net/dcsctp/packet/chunk/cookie_ack_chunk.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.12 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 11 |Chunk Flags | Length = 4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int CookieAckChunk::kType; + +absl::optional CookieAckChunk::Parse( + rtc::ArrayView data) { + if (!ParseTLV(data).has_value()) { + return absl::nullopt; + } + return CookieAckChunk(); +} + +void CookieAckChunk::SerializeTo(std::vector& out) const { + AllocateTLV(out); +} + +std::string CookieAckChunk::ToString() const { + return "COOKIE-ACK"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/cookie_ack_chunk.h b/net/dcsctp/packet/chunk/cookie_ack_chunk.h new file mode 100644 index 0000000000..f7d4a33f7d --- /dev/null +++ b/net/dcsctp/packet/chunk/cookie_ack_chunk.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_COOKIE_ACK_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_COOKIE_ACK_CHUNK_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.12 +struct CookieAckChunkConfig : ChunkConfig { + static constexpr int kType = 11; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class CookieAckChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = CookieAckChunkConfig::kType; + + CookieAckChunk() {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_COOKIE_ACK_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/cookie_ack_chunk_test.cc b/net/dcsctp/packet/chunk/cookie_ack_chunk_test.cc new file mode 100644 index 0000000000..3f560c6fef --- /dev/null +++ b/net/dcsctp/packet/chunk/cookie_ack_chunk_test.cc @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(CookieAckChunkTest, FromCapture) { + /* + COOKIE_ACK chunk + Chunk type: COOKIE_ACK (11) + Chunk flags: 0x00 + Chunk length: 4 + */ + + uint8_t data[] = {0x0b, 0x00, 0x00, 0x04}; + + EXPECT_TRUE(CookieAckChunk::Parse(data).has_value()); +} + +TEST(CookieAckChunkTest, SerializeAndDeserialize) { + CookieAckChunk chunk; + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(CookieAckChunk deserialized, + CookieAckChunk::Parse(serialized)); + EXPECT_EQ(deserialized.ToString(), "COOKIE-ACK"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/cookie_echo_chunk.cc b/net/dcsctp/packet/chunk/cookie_echo_chunk.cc new file mode 100644 index 0000000000..a01d0b13c4 --- /dev/null +++ b/net/dcsctp/packet/chunk/cookie_echo_chunk.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.11 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 10 |Chunk Flags | Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / Cookie / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int CookieEchoChunk::kType; + +absl::optional CookieEchoChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return CookieEchoChunk(reader->variable_data()); +} + +void CookieEchoChunk::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out, cookie_.size()); + writer.CopyToVariableData(cookie_); +} + +std::string CookieEchoChunk::ToString() const { + return "COOKIE-ECHO"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/cookie_echo_chunk.h b/net/dcsctp/packet/chunk/cookie_echo_chunk.h new file mode 100644 index 0000000000..8cb80527f8 --- /dev/null +++ b/net/dcsctp/packet/chunk/cookie_echo_chunk.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_COOKIE_ECHO_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_COOKIE_ECHO_CHUNK_H_ +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.11 +struct CookieEchoChunkConfig : ChunkConfig { + static constexpr int kType = 10; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class CookieEchoChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = CookieEchoChunkConfig::kType; + + explicit CookieEchoChunk(rtc::ArrayView cookie) + : cookie_(cookie.begin(), cookie.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView cookie() const { return cookie_; } + + private: + std::vector cookie_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_COOKIE_ECHO_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/cookie_echo_chunk_test.cc b/net/dcsctp/packet/chunk/cookie_echo_chunk_test.cc new file mode 100644 index 0000000000..d06e0a6439 --- /dev/null +++ b/net/dcsctp/packet/chunk/cookie_echo_chunk_test.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(CookieEchoChunkTest, FromCapture) { + /* + COOKIE_ECHO chunk (Cookie length: 256 bytes) + Chunk type: COOKIE_ECHO (10) + Chunk flags: 0x00 + Chunk length: 260 + Cookie: 12345678 + */ + + uint8_t data[] = {0x0a, 0x00, 0x00, 0x08, 0x12, 0x34, 0x56, 0x78}; + + ASSERT_HAS_VALUE_AND_ASSIGN(CookieEchoChunk chunk, + CookieEchoChunk::Parse(data)); + + EXPECT_THAT(chunk.cookie(), ElementsAre(0x12, 0x34, 0x56, 0x78)); +} + +TEST(CookieEchoChunkTest, SerializeAndDeserialize) { + uint8_t cookie[] = {1, 2, 3, 4}; + CookieEchoChunk chunk(cookie); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(CookieEchoChunk deserialized, + CookieEchoChunk::Parse(serialized)); + + EXPECT_THAT(deserialized.cookie(), ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(deserialized.ToString(), "COOKIE-ECHO"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/data_chunk.cc b/net/dcsctp/packet/chunk/data_chunk.cc new file mode 100644 index 0000000000..cf65f53d29 --- /dev/null +++ b/net/dcsctp/packet/chunk/data_chunk.cc @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/data_chunk.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.1 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 0 | Reserved|U|B|E| Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Identifier S | Stream Sequence Number n | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Payload Protocol Identifier | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / User Data (seq n of Stream S) / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int DataChunk::kType; + +absl::optional DataChunk::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + uint8_t flags = reader->Load8<1>(); + TSN tsn(reader->Load32<4>()); + StreamID stream_identifier(reader->Load16<8>()); + SSN ssn(reader->Load16<10>()); + PPID ppid(reader->Load32<12>()); + + Options options; + options.is_end = Data::IsEnd((flags & (1 << kFlagsBitEnd)) != 0); + options.is_beginning = + Data::IsBeginning((flags & (1 << kFlagsBitBeginning)) != 0); + options.is_unordered = IsUnordered((flags & (1 << kFlagsBitUnordered)) != 0); + options.immediate_ack = + ImmediateAckFlag((flags & (1 << kFlagsBitImmediateAck)) != 0); + + return DataChunk(tsn, stream_identifier, ssn, ppid, + std::vector(reader->variable_data().begin(), + reader->variable_data().end()), + options); +} + +void DataChunk::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out, payload().size()); + + writer.Store8<1>( + (*options().is_end ? (1 << kFlagsBitEnd) : 0) | + (*options().is_beginning ? (1 << kFlagsBitBeginning) : 0) | + (*options().is_unordered ? (1 << kFlagsBitUnordered) : 0) | + (*options().immediate_ack ? (1 << kFlagsBitImmediateAck) : 0)); + writer.Store32<4>(*tsn()); + writer.Store16<8>(*stream_id()); + writer.Store16<10>(*ssn()); + writer.Store32<12>(*ppid()); + + writer.CopyToVariableData(payload()); +} + +std::string DataChunk::ToString() const { + rtc::StringBuilder sb; + sb << "DATA, type=" << (options().is_unordered ? "unordered" : "ordered") + << "::" + << (*options().is_beginning && *options().is_end + ? "complete" + : *options().is_beginning ? "first" + : *options().is_end ? "last" : "middle") + << ", tsn=" << *tsn() << ", stream_id=" << *stream_id() + << ", ppid=" << *ppid() << ", length=" << payload().size(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/data_chunk.h b/net/dcsctp/packet/chunk/data_chunk.h new file mode 100644 index 0000000000..12bb05f2c4 --- /dev/null +++ b/net/dcsctp/packet/chunk/data_chunk.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_DATA_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_DATA_CHUNK_H_ +#include +#include + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.1 +struct DataChunkConfig : ChunkConfig { + static constexpr int kType = 0; + static constexpr size_t kHeaderSize = 16; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class DataChunk : public AnyDataChunk, public TLVTrait { + public: + static constexpr int kType = DataChunkConfig::kType; + + // Exposed to allow the retransmission queue to make room for the correct + // header size. + static constexpr size_t kHeaderSize = DataChunkConfig::kHeaderSize; + + DataChunk(TSN tsn, + StreamID stream_id, + SSN ssn, + PPID ppid, + std::vector payload, + const Options& options) + : AnyDataChunk(tsn, + stream_id, + ssn, + MID(0), + FSN(0), + ppid, + std::move(payload), + options) {} + + DataChunk(TSN tsn, Data&& data, bool immediate_ack) + : AnyDataChunk(tsn, std::move(data), immediate_ack) {} + + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_DATA_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/data_chunk_test.cc b/net/dcsctp/packet/chunk/data_chunk_test.cc new file mode 100644 index 0000000000..6a5ca82bae --- /dev/null +++ b/net/dcsctp/packet/chunk/data_chunk_test.cc @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/data_chunk.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(DataChunkTest, FromCapture) { + /* + DATA chunk(ordered, complete segment, TSN: 1426601532, SID: 2, SSN: 1, + PPID: 53, payload length: 4 bytes) + Chunk type: DATA (0) + Chunk flags: 0x03 + Chunk length: 20 + Transmission sequence number: 1426601532 + Stream identifier: 0x0002 + Stream sequence number: 1 + Payload protocol identifier: WebRTC Binary (53) + */ + + uint8_t data[] = {0x00, 0x03, 0x00, 0x14, 0x55, 0x08, 0x36, 0x3c, 0x00, 0x02, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x01, 0x02, 0x03}; + + ASSERT_HAS_VALUE_AND_ASSIGN(DataChunk chunk, DataChunk::Parse(data)); + EXPECT_EQ(*chunk.tsn(), 1426601532u); + EXPECT_EQ(*chunk.stream_id(), 2u); + EXPECT_EQ(*chunk.ssn(), 1u); + EXPECT_EQ(*chunk.ppid(), 53u); + EXPECT_TRUE(*chunk.options().is_beginning); + EXPECT_TRUE(*chunk.options().is_end); + EXPECT_FALSE(*chunk.options().is_unordered); + EXPECT_FALSE(*chunk.options().immediate_ack); + EXPECT_THAT(chunk.payload(), ElementsAre(0x0, 0x1, 0x2, 0x3)); +} + +TEST(DataChunkTest, SerializeAndDeserialize) { + DataChunk chunk(TSN(123), StreamID(456), SSN(789), PPID(9090), + /*payload=*/{1, 2, 3, 4, 5}, + /*options=*/{}); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(DataChunk deserialized, + DataChunk::Parse(serialized)); + EXPECT_EQ(*chunk.tsn(), 123u); + EXPECT_EQ(*chunk.stream_id(), 456u); + EXPECT_EQ(*chunk.ssn(), 789u); + EXPECT_EQ(*chunk.ppid(), 9090u); + EXPECT_THAT(chunk.payload(), ElementsAre(1, 2, 3, 4, 5)); + + EXPECT_EQ(deserialized.ToString(), + "DATA, type=ordered::middle, tsn=123, stream_id=456, ppid=9090, " + "length=5"); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/data_common.h b/net/dcsctp/packet/chunk/data_common.h new file mode 100644 index 0000000000..b15a034593 --- /dev/null +++ b/net/dcsctp/packet/chunk/data_common.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_DATA_COMMON_H_ +#define NET_DCSCTP_PACKET_CHUNK_DATA_COMMON_H_ +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/data.h" + +namespace dcsctp { + +// Base class for DataChunk and IDataChunk +class AnyDataChunk : public Chunk { + public: + // Represents the "immediate ack" flag on DATA/I-DATA, from RFC7053. + using ImmediateAckFlag = StrongAlias; + + // Data chunk options. + // See https://tools.ietf.org/html/rfc4960#section-3.3.1 + struct Options { + Data::IsEnd is_end = Data::IsEnd(false); + Data::IsBeginning is_beginning = Data::IsBeginning(false); + IsUnordered is_unordered = IsUnordered(false); + ImmediateAckFlag immediate_ack = ImmediateAckFlag(false); + }; + + TSN tsn() const { return tsn_; } + + Options options() const { + Options options; + options.is_end = data_.is_end; + options.is_beginning = data_.is_beginning; + options.is_unordered = data_.is_unordered; + options.immediate_ack = immediate_ack_; + return options; + } + + StreamID stream_id() const { return data_.stream_id; } + SSN ssn() const { return data_.ssn; } + MID message_id() const { return data_.message_id; } + FSN fsn() const { return data_.fsn; } + PPID ppid() const { return data_.ppid; } + rtc::ArrayView payload() const { return data_.payload; } + + // Extracts the Data from the chunk, as a destructive action. + Data extract() && { return std::move(data_); } + + AnyDataChunk(TSN tsn, + StreamID stream_id, + SSN ssn, + MID message_id, + FSN fsn, + PPID ppid, + std::vector payload, + const Options& options) + : tsn_(tsn), + data_(stream_id, + ssn, + message_id, + fsn, + ppid, + std::move(payload), + options.is_beginning, + options.is_end, + options.is_unordered), + immediate_ack_(options.immediate_ack) {} + + AnyDataChunk(TSN tsn, Data data, bool immediate_ack) + : tsn_(tsn), data_(std::move(data)), immediate_ack_(immediate_ack) {} + + protected: + // Bits in `flags` header field. + static constexpr int kFlagsBitEnd = 0; + static constexpr int kFlagsBitBeginning = 1; + static constexpr int kFlagsBitUnordered = 2; + static constexpr int kFlagsBitImmediateAck = 3; + + private: + TSN tsn_; + Data data_; + ImmediateAckFlag immediate_ack_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_DATA_COMMON_H_ diff --git a/net/dcsctp/packet/chunk/error_chunk.cc b/net/dcsctp/packet/chunk/error_chunk.cc new file mode 100644 index 0000000000..baac0c5588 --- /dev/null +++ b/net/dcsctp/packet/chunk/error_chunk.cc @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/error_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 9 | Chunk Flags | Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / one or more Error Causes / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ErrorChunk::kType; + +absl::optional ErrorChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + absl::optional error_causes = + Parameters::Parse(reader->variable_data()); + if (!error_causes.has_value()) { + return absl::nullopt; + } + return ErrorChunk(*std::move(error_causes)); +} + +void ErrorChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView error_causes = error_causes_.data(); + BoundedByteWriter writer = AllocateTLV(out, error_causes.size()); + writer.CopyToVariableData(error_causes); +} + +std::string ErrorChunk::ToString() const { + return "ERROR"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/error_chunk.h b/net/dcsctp/packet/chunk/error_chunk.h new file mode 100644 index 0000000000..96122cff6a --- /dev/null +++ b/net/dcsctp/packet/chunk/error_chunk.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_ERROR_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_ERROR_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10 +struct ErrorChunkConfig : ChunkConfig { + static constexpr int kType = 9; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 4; +}; + +class ErrorChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = ErrorChunkConfig::kType; + + explicit ErrorChunk(Parameters error_causes) + : error_causes_(std::move(error_causes)) {} + + ErrorChunk(ErrorChunk&& other) = default; + ErrorChunk& operator=(ErrorChunk&& other) = default; + + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + const Parameters& error_causes() const { return error_causes_; } + + private: + Parameters error_causes_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_ERROR_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/error_chunk_test.cc b/net/dcsctp/packet/chunk/error_chunk_test.cc new file mode 100644 index 0000000000..f2b8be1edc --- /dev/null +++ b/net/dcsctp/packet/chunk/error_chunk_test.cc @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/error_chunk.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(ErrorChunkTest, FromCapture) { + /* + ERROR chunk + Chunk type: ERROR (9) + Chunk flags: 0x00 + Chunk length: 12 + Unrecognized chunk type cause (Type: 73 (unknown)) + */ + + uint8_t data[] = {0x09, 0x00, 0x00, 0x0c, 0x00, 0x06, + 0x00, 0x08, 0x49, 0x00, 0x00, 0x04}; + + ASSERT_HAS_VALUE_AND_ASSIGN(ErrorChunk chunk, ErrorChunk::Parse(data)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + UnrecognizedChunkTypeCause cause, + chunk.error_causes().get()); + + EXPECT_THAT(cause.unrecognized_chunk(), ElementsAre(0x49, 0x00, 0x00, 0x04)); +} + +TEST(ErrorChunkTest, SerializeAndDeserialize) { + ErrorChunk chunk(Parameters::Builder() + .Add(UnrecognizedChunkTypeCause({1, 2, 3, 4})) + .Build()); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(ErrorChunk deserialized, + ErrorChunk::Parse(serialized)); + ASSERT_HAS_VALUE_AND_ASSIGN( + UnrecognizedChunkTypeCause cause, + deserialized.error_causes().get()); + + EXPECT_THAT(cause.unrecognized_chunk(), ElementsAre(1, 2, 3, 4)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/forward_tsn_chunk.cc b/net/dcsctp/packet/chunk/forward_tsn_chunk.cc new file mode 100644 index 0000000000..f01505094d --- /dev/null +++ b/net/dcsctp/packet/chunk/forward_tsn_chunk.cc @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" + +#include +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc3758#section-3.2 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 192 | Flags = 0x00 | Length = Variable | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | New Cumulative TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream-1 | Stream Sequence-1 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ / +// / \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream-N | Stream Sequence-N | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ForwardTsnChunk::kType; + +absl::optional ForwardTsnChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + TSN new_cumulative_tsn(reader->Load32<4>()); + + size_t streams_skipped = + reader->variable_data_size() / kSkippedStreamBufferSize; + + std::vector skipped_streams; + skipped_streams.reserve(streams_skipped); + for (size_t i = 0; i < streams_skipped; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(i * + kSkippedStreamBufferSize); + + StreamID stream_id(sub_reader.Load16<0>()); + SSN ssn(sub_reader.Load16<2>()); + skipped_streams.emplace_back(stream_id, ssn); + } + return ForwardTsnChunk(new_cumulative_tsn, std::move(skipped_streams)); +} + +void ForwardTsnChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView skipped = skipped_streams(); + size_t variable_size = skipped.size() * kSkippedStreamBufferSize; + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(*new_cumulative_tsn()); + for (size_t i = 0; i < skipped.size(); ++i) { + BoundedByteWriter sub_writer = + writer.sub_writer(i * + kSkippedStreamBufferSize); + sub_writer.Store16<0>(*skipped[i].stream_id); + sub_writer.Store16<2>(*skipped[i].ssn); + } +} + +std::string ForwardTsnChunk::ToString() const { + rtc::StringBuilder sb; + sb << "FORWARD-TSN, new_cumulative_tsn=" << *new_cumulative_tsn(); + return sb.str(); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/forward_tsn_chunk.h b/net/dcsctp/packet/chunk/forward_tsn_chunk.h new file mode 100644 index 0000000000..b9ef666f41 --- /dev/null +++ b/net/dcsctp/packet/chunk/forward_tsn_chunk.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_FORWARD_TSN_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_FORWARD_TSN_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc3758#section-3.2 +struct ForwardTsnChunkConfig : ChunkConfig { + static constexpr int kType = 192; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 4; +}; + +class ForwardTsnChunk : public AnyForwardTsnChunk, + public TLVTrait { + public: + static constexpr int kType = ForwardTsnChunkConfig::kType; + + ForwardTsnChunk(TSN new_cumulative_tsn, + std::vector skipped_streams) + : AnyForwardTsnChunk(new_cumulative_tsn, std::move(skipped_streams)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + private: + static constexpr size_t kSkippedStreamBufferSize = 4; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_FORWARD_TSN_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/forward_tsn_chunk_test.cc b/net/dcsctp/packet/chunk/forward_tsn_chunk_test.cc new file mode 100644 index 0000000000..9420c1f2ef --- /dev/null +++ b/net/dcsctp/packet/chunk/forward_tsn_chunk_test.cc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(ForwardTsnChunkTest, FromCapture) { + /* + FORWARD_TSN chunk(Cumulative TSN: 1905748778) + Chunk type: FORWARD_TSN (192) + Chunk flags: 0x00 + Chunk length: 8 + New cumulative TSN: 1905748778 + */ + + uint8_t data[] = {0xc0, 0x00, 0x00, 0x08, 0x71, 0x97, 0x6b, 0x2a}; + + ASSERT_HAS_VALUE_AND_ASSIGN(ForwardTsnChunk chunk, + ForwardTsnChunk::Parse(data)); + EXPECT_EQ(*chunk.new_cumulative_tsn(), 1905748778u); +} + +TEST(ForwardTsnChunkTest, SerializeAndDeserialize) { + ForwardTsnChunk chunk( + TSN(123), {ForwardTsnChunk::SkippedStream(StreamID(1), SSN(23)), + ForwardTsnChunk::SkippedStream(StreamID(42), SSN(99))}); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(ForwardTsnChunk deserialized, + ForwardTsnChunk::Parse(serialized)); + EXPECT_EQ(*deserialized.new_cumulative_tsn(), 123u); + EXPECT_THAT( + deserialized.skipped_streams(), + ElementsAre(ForwardTsnChunk::SkippedStream(StreamID(1), SSN(23)), + ForwardTsnChunk::SkippedStream(StreamID(42), SSN(99)))); + + EXPECT_EQ(deserialized.ToString(), "FORWARD-TSN, new_cumulative_tsn=123"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/forward_tsn_common.h b/net/dcsctp/packet/chunk/forward_tsn_common.h new file mode 100644 index 0000000000..37bd2aafff --- /dev/null +++ b/net/dcsctp/packet/chunk/forward_tsn_common.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_FORWARD_TSN_COMMON_H_ +#define NET_DCSCTP_PACKET_CHUNK_FORWARD_TSN_COMMON_H_ +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" + +namespace dcsctp { + +// Base class for both ForwardTsnChunk and IForwardTsnChunk +class AnyForwardTsnChunk : public Chunk { + public: + struct SkippedStream { + SkippedStream(StreamID stream_id, SSN ssn) + : stream_id(stream_id), ssn(ssn), unordered(false), message_id(0) {} + SkippedStream(IsUnordered unordered, StreamID stream_id, MID message_id) + : stream_id(stream_id), + ssn(0), + unordered(unordered), + message_id(message_id) {} + + StreamID stream_id; + + // Set for FORWARD_TSN + SSN ssn; + + // Set for I-FORWARD_TSN + IsUnordered unordered; + MID message_id; + + bool operator==(const SkippedStream& other) const { + return stream_id == other.stream_id && ssn == other.ssn && + unordered == other.unordered && message_id == other.message_id; + } + }; + + AnyForwardTsnChunk(TSN new_cumulative_tsn, + std::vector skipped_streams) + : new_cumulative_tsn_(new_cumulative_tsn), + skipped_streams_(std::move(skipped_streams)) {} + + TSN new_cumulative_tsn() const { return new_cumulative_tsn_; } + + rtc::ArrayView skipped_streams() const { + return skipped_streams_; + } + + private: + TSN new_cumulative_tsn_; + std::vector skipped_streams_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_FORWARD_TSN_COMMON_H_ diff --git a/net/dcsctp/packet/chunk/heartbeat_ack_chunk.cc b/net/dcsctp/packet/chunk/heartbeat_ack_chunk.cc new file mode 100644 index 0000000000..3cbcd09c75 --- /dev/null +++ b/net/dcsctp/packet/chunk/heartbeat_ack_chunk.cc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.6 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 5 | Chunk Flags | Heartbeat Ack Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Heartbeat Information TLV (Variable-Length) / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int HeartbeatAckChunk::kType; + +absl::optional HeartbeatAckChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + absl::optional parameters = + Parameters::Parse(reader->variable_data()); + if (!parameters.has_value()) { + return absl::nullopt; + } + return HeartbeatAckChunk(*std::move(parameters)); +} + +void HeartbeatAckChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView parameters = parameters_.data(); + BoundedByteWriter writer = AllocateTLV(out, parameters.size()); + writer.CopyToVariableData(parameters); +} + +std::string HeartbeatAckChunk::ToString() const { + return "HEARTBEAT-ACK"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/heartbeat_ack_chunk.h b/net/dcsctp/packet/chunk/heartbeat_ack_chunk.h new file mode 100644 index 0000000000..a6479f78b0 --- /dev/null +++ b/net/dcsctp/packet/chunk/heartbeat_ack_chunk.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_HEARTBEAT_ACK_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_HEARTBEAT_ACK_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.6 +struct HeartbeatAckChunkConfig : ChunkConfig { + static constexpr int kType = 5; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class HeartbeatAckChunk : public Chunk, + public TLVTrait { + public: + static constexpr int kType = HeartbeatAckChunkConfig::kType; + + explicit HeartbeatAckChunk(Parameters parameters) + : parameters_(std::move(parameters)) {} + + HeartbeatAckChunk(HeartbeatAckChunk&& other) = default; + HeartbeatAckChunk& operator=(HeartbeatAckChunk&& other) = default; + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + const Parameters& parameters() const { return parameters_; } + + absl::optional info() const { + return parameters_.get(); + } + + private: + Parameters parameters_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_HEARTBEAT_ACK_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/heartbeat_ack_chunk_test.cc b/net/dcsctp/packet/chunk/heartbeat_ack_chunk_test.cc new file mode 100644 index 0000000000..e4d0dd1489 --- /dev/null +++ b/net/dcsctp/packet/chunk/heartbeat_ack_chunk_test.cc @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(HeartbeatAckChunkTest, FromCapture) { + /* + HEARTBEAT_ACK chunk (Information: 40 bytes) + Chunk type: HEARTBEAT_ACK (5) + Chunk flags: 0x00 + Chunk length: 44 + Heartbeat info parameter (Information: 36 bytes) + Parameter type: Heartbeat info (0x0001) + Parameter length: 40 + Heartbeat information: ad2436603726070000000000000000007b1000000100… + */ + + uint8_t data[] = {0x05, 0x00, 0x00, 0x2c, 0x00, 0x01, 0x00, 0x28, 0xad, + 0x24, 0x36, 0x60, 0x37, 0x26, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x10, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatAckChunk chunk, + HeartbeatAckChunk::Parse(data)); + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatInfoParameter info, chunk.info()); + + EXPECT_THAT( + info.info(), + ElementsAre(0xad, 0x24, 0x36, 0x60, 0x37, 0x26, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x10, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)); +} + +TEST(HeartbeatAckChunkTest, SerializeAndDeserialize) { + uint8_t info_data[] = {1, 2, 3, 4}; + Parameters parameters = + Parameters::Builder().Add(HeartbeatInfoParameter(info_data)).Build(); + HeartbeatAckChunk chunk(std::move(parameters)); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatAckChunk deserialized, + HeartbeatAckChunk::Parse(serialized)); + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatInfoParameter info, deserialized.info()); + + EXPECT_THAT(info.info(), ElementsAre(1, 2, 3, 4)); + + EXPECT_EQ(deserialized.ToString(), "HEARTBEAT-ACK"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/heartbeat_request_chunk.cc b/net/dcsctp/packet/chunk/heartbeat_request_chunk.cc new file mode 100644 index 0000000000..d759d6b16d --- /dev/null +++ b/net/dcsctp/packet/chunk/heartbeat_request_chunk.cc @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.5 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 4 | Chunk Flags | Heartbeat Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Heartbeat Information TLV (Variable-Length) / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int HeartbeatRequestChunk::kType; + +absl::optional HeartbeatRequestChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + absl::optional parameters = + Parameters::Parse(reader->variable_data()); + if (!parameters.has_value()) { + return absl::nullopt; + } + return HeartbeatRequestChunk(*std::move(parameters)); +} + +void HeartbeatRequestChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView parameters = parameters_.data(); + BoundedByteWriter writer = AllocateTLV(out, parameters.size()); + writer.CopyToVariableData(parameters); +} + +std::string HeartbeatRequestChunk::ToString() const { + return "HEARTBEAT"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/heartbeat_request_chunk.h b/net/dcsctp/packet/chunk/heartbeat_request_chunk.h new file mode 100644 index 0000000000..fe2ce19504 --- /dev/null +++ b/net/dcsctp/packet/chunk/heartbeat_request_chunk.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_HEARTBEAT_REQUEST_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_HEARTBEAT_REQUEST_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { +// https://tools.ietf.org/html/rfc4960#section-3.3.5 +struct HeartbeatRequestChunkConfig : ChunkConfig { + static constexpr int kType = 4; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class HeartbeatRequestChunk : public Chunk, + public TLVTrait { + public: + static constexpr int kType = HeartbeatRequestChunkConfig::kType; + + explicit HeartbeatRequestChunk(Parameters parameters) + : parameters_(std::move(parameters)) {} + + HeartbeatRequestChunk(HeartbeatRequestChunk&& other) = default; + HeartbeatRequestChunk& operator=(HeartbeatRequestChunk&& other) = default; + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + const Parameters& parameters() const { return parameters_; } + Parameters extract_parameters() && { return std::move(parameters_); } + absl::optional info() const { + return parameters_.get(); + } + + private: + Parameters parameters_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_HEARTBEAT_REQUEST_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/heartbeat_request_chunk_test.cc b/net/dcsctp/packet/chunk/heartbeat_request_chunk_test.cc new file mode 100644 index 0000000000..94911fe28b --- /dev/null +++ b/net/dcsctp/packet/chunk/heartbeat_request_chunk_test.cc @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(HeartbeatRequestChunkTest, FromCapture) { + /* + HEARTBEAT chunk (Information: 40 bytes) + Chunk type: HEARTBEAT (4) + Chunk flags: 0x00 + Chunk length: 44 + Heartbeat info parameter (Information: 36 bytes) + Parameter type: Heartbeat info (0x0001) + Parameter length: 40 + Heartbeat information: ad2436603726070000000000000000007b10000001… + */ + + uint8_t data[] = {0x04, 0x00, 0x00, 0x2c, 0x00, 0x01, 0x00, 0x28, 0xad, + 0x24, 0x36, 0x60, 0x37, 0x26, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x10, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatRequestChunk chunk, + HeartbeatRequestChunk::Parse(data)); + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatInfoParameter info, chunk.info()); + + EXPECT_THAT( + info.info(), + ElementsAre(0xad, 0x24, 0x36, 0x60, 0x37, 0x26, 0x07, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x10, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)); +} + +TEST(HeartbeatRequestChunkTest, SerializeAndDeserialize) { + uint8_t info_data[] = {1, 2, 3, 4}; + Parameters parameters = + Parameters::Builder().Add(HeartbeatInfoParameter(info_data)).Build(); + HeartbeatRequestChunk chunk(std::move(parameters)); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatRequestChunk deserialized, + HeartbeatRequestChunk::Parse(serialized)); + + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatInfoParameter info, deserialized.info()); + + EXPECT_THAT(info.info(), ElementsAre(1, 2, 3, 4)); + + EXPECT_EQ(deserialized.ToString(), "HEARTBEAT"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/idata_chunk.cc b/net/dcsctp/packet/chunk/idata_chunk.cc new file mode 100644 index 0000000000..378c527909 --- /dev/null +++ b/net/dcsctp/packet/chunk/idata_chunk.cc @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/idata_chunk.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc8260#section-2.1 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 64 | Res |I|U|B|E| Length = Variable | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Identifier | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Message Identifier | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Payload Protocol Identifier / Fragment Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / User Data / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int IDataChunk::kType; + +absl::optional IDataChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + uint8_t flags = reader->Load8<1>(); + TSN tsn(reader->Load32<4>()); + StreamID stream_identifier(reader->Load16<8>()); + MID message_id(reader->Load32<12>()); + uint32_t ppid_or_fsn = reader->Load32<16>(); + + Options options; + options.is_end = Data::IsEnd((flags & (1 << kFlagsBitEnd)) != 0); + options.is_beginning = + Data::IsBeginning((flags & (1 << kFlagsBitBeginning)) != 0); + options.is_unordered = IsUnordered((flags & (1 << kFlagsBitUnordered)) != 0); + options.immediate_ack = + ImmediateAckFlag((flags & (1 << kFlagsBitImmediateAck)) != 0); + + return IDataChunk(tsn, stream_identifier, message_id, + PPID(options.is_beginning ? ppid_or_fsn : 0), + FSN(options.is_beginning ? 0 : ppid_or_fsn), + std::vector(reader->variable_data().begin(), + reader->variable_data().end()), + options); +} + +void IDataChunk::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out, payload().size()); + + writer.Store8<1>( + (*options().is_end ? (1 << kFlagsBitEnd) : 0) | + (*options().is_beginning ? (1 << kFlagsBitBeginning) : 0) | + (*options().is_unordered ? (1 << kFlagsBitUnordered) : 0) | + (*options().immediate_ack ? (1 << kFlagsBitImmediateAck) : 0)); + writer.Store32<4>(*tsn()); + writer.Store16<8>(*stream_id()); + writer.Store32<12>(*message_id()); + writer.Store32<16>(options().is_beginning ? *ppid() : *fsn()); + writer.CopyToVariableData(payload()); +} + +std::string IDataChunk::ToString() const { + rtc::StringBuilder sb; + sb << "I-DATA, type=" << (options().is_unordered ? "unordered" : "ordered") + << "::" + << (*options().is_beginning && *options().is_end + ? "complete" + : *options().is_beginning ? "first" + : *options().is_end ? "last" : "middle") + << ", tsn=" << *tsn() << ", stream_id=" << *stream_id() + << ", message_id=" << *message_id(); + + if (*options().is_beginning) { + sb << ", ppid=" << *ppid(); + } else { + sb << ", fsn=" << *fsn(); + } + sb << ", length=" << payload().size(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/idata_chunk.h b/net/dcsctp/packet/chunk/idata_chunk.h new file mode 100644 index 0000000000..8cdf2a1fc4 --- /dev/null +++ b/net/dcsctp/packet/chunk/idata_chunk.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_IDATA_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_IDATA_CHUNK_H_ +#include +#include + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc8260#section-2.1 +struct IDataChunkConfig : ChunkConfig { + static constexpr int kType = 64; + static constexpr size_t kHeaderSize = 20; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class IDataChunk : public AnyDataChunk, public TLVTrait { + public: + static constexpr int kType = IDataChunkConfig::kType; + + // Exposed to allow the retransmission queue to make room for the correct + // header size. + static constexpr size_t kHeaderSize = IDataChunkConfig::kHeaderSize; + IDataChunk(TSN tsn, + StreamID stream_id, + MID message_id, + PPID ppid, + FSN fsn, + std::vector payload, + const Options& options) + : AnyDataChunk(tsn, + stream_id, + SSN(0), + message_id, + fsn, + ppid, + std::move(payload), + options) {} + + explicit IDataChunk(TSN tsn, Data&& data, bool immediate_ack) + : AnyDataChunk(tsn, std::move(data), immediate_ack) {} + + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_IDATA_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/idata_chunk_test.cc b/net/dcsctp/packet/chunk/idata_chunk_test.cc new file mode 100644 index 0000000000..fea492d71e --- /dev/null +++ b/net/dcsctp/packet/chunk/idata_chunk_test.cc @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/idata_chunk.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(IDataChunkTest, AtBeginningFromCapture) { + /* + I_DATA chunk(ordered, first segment, TSN: 2487901653, SID: 1, MID: 0, + payload length: 1180 bytes) + Chunk type: I_DATA (64) + Chunk flags: 0x02 + Chunk length: 1200 + Transmission sequence number: 2487901653 + Stream identifier: 0x0001 + Reserved: 0 + Message identifier: 0 + Payload protocol identifier: WebRTC Binary (53) + Reassembled Message in frame: 39 + */ + + uint8_t data[] = {0x40, 0x02, 0x00, 0x15, 0x94, 0x4a, 0x5d, 0xd5, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x35, 0x01, 0x00, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(IDataChunk chunk, IDataChunk::Parse(data)); + EXPECT_EQ(*chunk.tsn(), 2487901653); + EXPECT_EQ(*chunk.stream_id(), 1); + EXPECT_EQ(*chunk.message_id(), 0u); + EXPECT_EQ(*chunk.ppid(), 53u); + EXPECT_EQ(*chunk.fsn(), 0u); // Not provided (so set to zero) +} + +TEST(IDataChunkTest, AtBeginningSerializeAndDeserialize) { + IDataChunk::Options options; + options.is_beginning = Data::IsBeginning(true); + IDataChunk chunk(TSN(123), StreamID(456), MID(789), PPID(53), FSN(0), {1}, + options); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(IDataChunk deserialized, + IDataChunk::Parse(serialized)); + EXPECT_EQ(*deserialized.tsn(), 123u); + EXPECT_EQ(*deserialized.stream_id(), 456u); + EXPECT_EQ(*deserialized.message_id(), 789u); + EXPECT_EQ(*deserialized.ppid(), 53u); + EXPECT_EQ(*deserialized.fsn(), 0u); + + EXPECT_EQ(deserialized.ToString(), + "I-DATA, type=ordered::first, tsn=123, stream_id=456, " + "message_id=789, ppid=53, length=1"); +} + +TEST(IDataChunkTest, InMiddleFromCapture) { + /* + I_DATA chunk(ordered, last segment, TSN: 2487901706, SID: 3, MID: 1, + FSN: 8, payload length: 560 bytes) + Chunk type: I_DATA (64) + Chunk flags: 0x01 + Chunk length: 580 + Transmission sequence number: 2487901706 + Stream identifier: 0x0003 + Reserved: 0 + Message identifier: 1 + Fragment sequence number: 8 + Reassembled SCTP Fragments (10000 bytes, 9 fragments): + */ + + uint8_t data[] = {0x40, 0x01, 0x00, 0x15, 0x94, 0x4a, 0x5e, 0x0a, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(IDataChunk chunk, IDataChunk::Parse(data)); + EXPECT_EQ(*chunk.tsn(), 2487901706); + EXPECT_EQ(*chunk.stream_id(), 3u); + EXPECT_EQ(*chunk.message_id(), 1u); + EXPECT_EQ(*chunk.ppid(), 0u); // Not provided (so set to zero) + EXPECT_EQ(*chunk.fsn(), 8u); +} + +TEST(IDataChunkTest, InMiddleSerializeAndDeserialize) { + IDataChunk chunk(TSN(123), StreamID(456), MID(789), PPID(0), FSN(101112), + {1, 2, 3}, /*options=*/{}); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(IDataChunk deserialized, + IDataChunk::Parse(serialized)); + EXPECT_EQ(*deserialized.tsn(), 123u); + EXPECT_EQ(*deserialized.stream_id(), 456u); + EXPECT_EQ(*deserialized.message_id(), 789u); + EXPECT_EQ(*deserialized.ppid(), 0u); + EXPECT_EQ(*deserialized.fsn(), 101112u); + EXPECT_THAT(deserialized.payload(), ElementsAre(1, 2, 3)); + + EXPECT_EQ(deserialized.ToString(), + "I-DATA, type=ordered::middle, tsn=123, stream_id=456, " + "message_id=789, fsn=101112, length=3"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/iforward_tsn_chunk.cc b/net/dcsctp/packet/chunk/iforward_tsn_chunk.cc new file mode 100644 index 0000000000..a647a8bf8a --- /dev/null +++ b/net/dcsctp/packet/chunk/iforward_tsn_chunk.cc @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" + +#include +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc8260#section-2.3.1 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 194 | Flags = 0x00 | Length = Variable | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | New Cumulative TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Identifier | Reserved |U| +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Message Identifier | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / / +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Identifier | Reserved |U| +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Message Identifier | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int IForwardTsnChunk::kType; + +absl::optional IForwardTsnChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + TSN new_cumulative_tsn(reader->Load32<4>()); + + size_t streams_skipped = + reader->variable_data_size() / kSkippedStreamBufferSize; + std::vector skipped_streams; + skipped_streams.reserve(streams_skipped); + size_t offset = 0; + for (size_t i = 0; i < streams_skipped; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(offset); + + StreamID stream_id(sub_reader.Load16<0>()); + IsUnordered unordered(sub_reader.Load8<3>() & 0x01); + MID message_id(sub_reader.Load32<4>()); + skipped_streams.emplace_back(unordered, stream_id, message_id); + offset += kSkippedStreamBufferSize; + } + RTC_DCHECK(offset == reader->variable_data_size()); + return IForwardTsnChunk(new_cumulative_tsn, std::move(skipped_streams)); +} + +void IForwardTsnChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView skipped = skipped_streams(); + size_t variable_size = skipped.size() * kSkippedStreamBufferSize; + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(*new_cumulative_tsn()); + size_t offset = 0; + for (size_t i = 0; i < skipped.size(); ++i) { + BoundedByteWriter sub_writer = + writer.sub_writer(offset); + + sub_writer.Store16<0>(*skipped[i].stream_id); + sub_writer.Store8<3>(skipped[i].unordered ? 1 : 0); + sub_writer.Store32<4>(*skipped[i].message_id); + offset += kSkippedStreamBufferSize; + } + RTC_DCHECK(offset == variable_size); +} + +std::string IForwardTsnChunk::ToString() const { + rtc::StringBuilder sb; + sb << "I-FORWARD-TSN, new_cumulative_tsn=" << *new_cumulative_tsn(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/iforward_tsn_chunk.h b/net/dcsctp/packet/chunk/iforward_tsn_chunk.h new file mode 100644 index 0000000000..54d23f7a83 --- /dev/null +++ b/net/dcsctp/packet/chunk/iforward_tsn_chunk.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_IFORWARD_TSN_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_IFORWARD_TSN_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc8260#section-2.3.1 +struct IForwardTsnChunkConfig : ChunkConfig { + static constexpr int kType = 194; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 8; +}; + +class IForwardTsnChunk : public AnyForwardTsnChunk, + public TLVTrait { + public: + static constexpr int kType = IForwardTsnChunkConfig::kType; + + IForwardTsnChunk(TSN new_cumulative_tsn, + std::vector skipped_streams) + : AnyForwardTsnChunk(new_cumulative_tsn, std::move(skipped_streams)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + private: + static constexpr size_t kSkippedStreamBufferSize = 8; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_IFORWARD_TSN_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/iforward_tsn_chunk_test.cc b/net/dcsctp/packet/chunk/iforward_tsn_chunk_test.cc new file mode 100644 index 0000000000..6a89433be1 --- /dev/null +++ b/net/dcsctp/packet/chunk/iforward_tsn_chunk_test.cc @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(IForwardTsnChunkTest, FromCapture) { + /* + I_FORWARD_TSN chunk(Cumulative TSN: 3094631148) + Chunk type: I_FORWARD_TSN (194) + Chunk flags: 0x00 + Chunk length: 16 + New cumulative TSN: 3094631148 + Stream identifier: 1 + Flags: 0x0000 + Message identifier: 2 + */ + + uint8_t data[] = {0xc2, 0x00, 0x00, 0x10, 0xb8, 0x74, 0x52, 0xec, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}; + + ASSERT_HAS_VALUE_AND_ASSIGN(IForwardTsnChunk chunk, + IForwardTsnChunk::Parse(data)); + EXPECT_EQ(*chunk.new_cumulative_tsn(), 3094631148u); + EXPECT_THAT(chunk.skipped_streams(), + ElementsAre(IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(1), MID(2)))); +} + +TEST(IForwardTsnChunkTest, SerializeAndDeserialize) { + IForwardTsnChunk chunk( + TSN(123), {IForwardTsnChunk::SkippedStream(IsUnordered(false), + StreamID(1), MID(23)), + IForwardTsnChunk::SkippedStream(IsUnordered(true), + StreamID(42), MID(99))}); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(IForwardTsnChunk deserialized, + IForwardTsnChunk::Parse(serialized)); + EXPECT_EQ(*deserialized.new_cumulative_tsn(), 123u); + EXPECT_THAT(deserialized.skipped_streams(), + ElementsAre(IForwardTsnChunk::SkippedStream(IsUnordered(false), + StreamID(1), MID(23)), + IForwardTsnChunk::SkippedStream( + IsUnordered(true), StreamID(42), MID(99)))); + + EXPECT_EQ(deserialized.ToString(), "I-FORWARD-TSN, new_cumulative_tsn=123"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/init_ack_chunk.cc b/net/dcsctp/packet/chunk/init_ack_chunk.cc new file mode 100644 index 0000000000..c7ef9da1f1 --- /dev/null +++ b/net/dcsctp/packet/chunk/init_ack_chunk.cc @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/init_ack_chunk.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_format.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.3 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 2 | Chunk Flags | Chunk Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Initiate Tag | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Advertised Receiver Window Credit | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Number of Outbound Streams | Number of Inbound Streams | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Initial TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Optional/Variable-Length Parameters / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int InitAckChunk::kType; + +absl::optional InitAckChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + VerificationTag initiate_tag(reader->Load32<4>()); + uint32_t a_rwnd = reader->Load32<8>(); + uint16_t nbr_outbound_streams = reader->Load16<12>(); + uint16_t nbr_inbound_streams = reader->Load16<14>(); + TSN initial_tsn(reader->Load32<16>()); + absl::optional parameters = + Parameters::Parse(reader->variable_data()); + if (!parameters.has_value()) { + return absl::nullopt; + } + return InitAckChunk(initiate_tag, a_rwnd, nbr_outbound_streams, + nbr_inbound_streams, initial_tsn, *std::move(parameters)); +} + +void InitAckChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView parameters = parameters_.data(); + BoundedByteWriter writer = AllocateTLV(out, parameters.size()); + + writer.Store32<4>(*initiate_tag_); + writer.Store32<8>(a_rwnd_); + writer.Store16<12>(nbr_outbound_streams_); + writer.Store16<14>(nbr_inbound_streams_); + writer.Store32<16>(*initial_tsn_); + writer.CopyToVariableData(parameters); +} + +std::string InitAckChunk::ToString() const { + return rtc::StringFormat("INIT_ACK, initiate_tag=0x%0x, initial_tsn=%u", + *initiate_tag(), *initial_tsn()); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/init_ack_chunk.h b/net/dcsctp/packet/chunk/init_ack_chunk.h new file mode 100644 index 0000000000..6fcf64b2eb --- /dev/null +++ b/net/dcsctp/packet/chunk/init_ack_chunk.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_INIT_ACK_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_INIT_ACK_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.3 +struct InitAckChunkConfig : ChunkConfig { + static constexpr int kType = 2; + static constexpr size_t kHeaderSize = 20; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class InitAckChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = InitAckChunkConfig::kType; + + InitAckChunk(VerificationTag initiate_tag, + uint32_t a_rwnd, + uint16_t nbr_outbound_streams, + uint16_t nbr_inbound_streams, + TSN initial_tsn, + Parameters parameters) + : initiate_tag_(initiate_tag), + a_rwnd_(a_rwnd), + nbr_outbound_streams_(nbr_outbound_streams), + nbr_inbound_streams_(nbr_inbound_streams), + initial_tsn_(initial_tsn), + parameters_(std::move(parameters)) {} + + InitAckChunk(InitAckChunk&& other) = default; + InitAckChunk& operator=(InitAckChunk&& other) = default; + + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + VerificationTag initiate_tag() const { return initiate_tag_; } + uint32_t a_rwnd() const { return a_rwnd_; } + uint16_t nbr_outbound_streams() const { return nbr_outbound_streams_; } + uint16_t nbr_inbound_streams() const { return nbr_inbound_streams_; } + TSN initial_tsn() const { return initial_tsn_; } + const Parameters& parameters() const { return parameters_; } + + private: + VerificationTag initiate_tag_; + uint32_t a_rwnd_; + uint16_t nbr_outbound_streams_; + uint16_t nbr_inbound_streams_; + TSN initial_tsn_; + Parameters parameters_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_INIT_ACK_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/init_ack_chunk_test.cc b/net/dcsctp/packet/chunk/init_ack_chunk_test.cc new file mode 100644 index 0000000000..184ade747d --- /dev/null +++ b/net/dcsctp/packet/chunk/init_ack_chunk_test.cc @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/init_ack_chunk.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/parameter/state_cookie_parameter.h" +#include "net/dcsctp/packet/parameter/supported_extensions_parameter.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(InitAckChunkTest, FromCapture) { + /* + INIT_ACK chunk (Outbound streams: 1000, inbound streams: 2048) + Chunk type: INIT_ACK (2) + Chunk flags: 0x00 + Chunk length: 292 + Initiate tag: 0x579c2f98 + Advertised receiver window credit (a_rwnd): 131072 + Number of outbound streams: 1000 + Number of inbound streams: 2048 + Initial TSN: 1670811335 + Forward TSN supported parameter + Parameter type: Forward TSN supported (0xc000) + Parameter length: 4 + Supported Extensions parameter (Supported types: FORWARD_TSN, RE_CONFIG) + Parameter type: Supported Extensions (0x8008) + Parameter length: 6 + Supported chunk type: FORWARD_TSN (192) + Supported chunk type: RE_CONFIG (130) + Parameter padding: 0000 + State cookie parameter (Cookie length: 256 bytes) + Parameter type: State cookie (0x0007) + Parameter length: 260 + State cookie: 4b414d452d42534420312e310000000096b8386000000000… + */ + + uint8_t data[] = { + 0x02, 0x00, 0x01, 0x24, 0x57, 0x9c, 0x2f, 0x98, 0x00, 0x02, 0x00, 0x00, + 0x03, 0xe8, 0x08, 0x00, 0x63, 0x96, 0x8e, 0xc7, 0xc0, 0x00, 0x00, 0x04, + 0x80, 0x08, 0x00, 0x06, 0xc0, 0x82, 0x00, 0x00, 0x00, 0x07, 0x01, 0x04, + 0x4b, 0x41, 0x4d, 0x45, 0x2d, 0x42, 0x53, 0x44, 0x20, 0x31, 0x2e, 0x31, + 0x00, 0x00, 0x00, 0x00, 0x96, 0xb8, 0x38, 0x60, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x5a, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xea, 0x00, 0x00, + 0xb5, 0xaa, 0x19, 0xea, 0x31, 0xef, 0xa4, 0x2b, 0x90, 0x16, 0x7a, 0xde, + 0x57, 0x9c, 0x2f, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x5a, 0xde, 0x7a, 0x16, 0x90, + 0x00, 0x02, 0x00, 0x00, 0x03, 0xe8, 0x03, 0xe8, 0x25, 0x0d, 0x37, 0xe8, + 0x80, 0x00, 0x00, 0x04, 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, + 0xc0, 0x0f, 0xc1, 0x80, 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, + 0xab, 0x31, 0x44, 0x62, 0x12, 0x1a, 0x15, 0x13, 0xfd, 0x5a, 0x5f, 0x69, + 0xef, 0xaa, 0x06, 0xe9, 0xab, 0xd7, 0x48, 0xcc, 0x3b, 0xd1, 0x4b, 0x60, + 0xed, 0x7f, 0xa6, 0x44, 0xce, 0x4d, 0xd2, 0xad, 0x80, 0x04, 0x00, 0x06, + 0x00, 0x01, 0x00, 0x00, 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00, + 0x02, 0x00, 0x01, 0x24, 0x57, 0x9c, 0x2f, 0x98, 0x00, 0x02, 0x00, 0x00, + 0x03, 0xe8, 0x08, 0x00, 0x63, 0x96, 0x8e, 0xc7, 0xc0, 0x00, 0x00, 0x04, + 0x80, 0x08, 0x00, 0x06, 0xc0, 0x82, 0x00, 0x00, 0x51, 0x95, 0x01, 0x88, + 0x0d, 0x80, 0x7b, 0x19, 0xe7, 0xf9, 0xc6, 0x18, 0x5c, 0x4a, 0xbf, 0x39, + 0x32, 0xe5, 0x63, 0x8e}; + + ASSERT_HAS_VALUE_AND_ASSIGN(InitAckChunk chunk, InitAckChunk::Parse(data)); + + EXPECT_EQ(chunk.initiate_tag(), VerificationTag(0x579c2f98u)); + EXPECT_EQ(chunk.a_rwnd(), 131072u); + EXPECT_EQ(chunk.nbr_outbound_streams(), 1000u); + EXPECT_EQ(chunk.nbr_inbound_streams(), 2048u); + EXPECT_EQ(chunk.initial_tsn(), TSN(1670811335u)); + EXPECT_TRUE( + chunk.parameters().get().has_value()); + EXPECT_TRUE( + chunk.parameters().get().has_value()); + EXPECT_TRUE(chunk.parameters().get().has_value()); +} + +TEST(InitAckChunkTest, SerializeAndDeserialize) { + uint8_t state_cookie[] = {1, 2, 3, 4, 5}; + Parameters parameters = + Parameters::Builder().Add(StateCookieParameter(state_cookie)).Build(); + InitAckChunk chunk(VerificationTag(123), /*a_rwnd=*/456, + /*nbr_outbound_streams=*/65535, + /*nbr_inbound_streams=*/65534, /*initial_tsn=*/TSN(789), + /*parameters=*/std::move(parameters)); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(InitAckChunk deserialized, + InitAckChunk::Parse(serialized)); + + EXPECT_EQ(chunk.initiate_tag(), VerificationTag(123u)); + EXPECT_EQ(chunk.a_rwnd(), 456u); + EXPECT_EQ(chunk.nbr_outbound_streams(), 65535u); + EXPECT_EQ(chunk.nbr_inbound_streams(), 65534u); + EXPECT_EQ(chunk.initial_tsn(), TSN(789u)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + StateCookieParameter cookie, + deserialized.parameters().get()); + EXPECT_THAT(cookie.data(), ElementsAre(1, 2, 3, 4, 5)); + EXPECT_EQ(deserialized.ToString(), + "INIT_ACK, initiate_tag=0x7b, initial_tsn=789"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/init_chunk.cc b/net/dcsctp/packet/chunk/init_chunk.cc new file mode 100644 index 0000000000..8030107072 --- /dev/null +++ b/net/dcsctp/packet/chunk/init_chunk.cc @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/init_chunk.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_format.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.2 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 1 | Chunk Flags | Chunk Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Initiate Tag | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Advertised Receiver Window Credit (a_rwnd) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Number of Outbound Streams | Number of Inbound Streams | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Initial TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Optional/Variable-Length Parameters / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int InitChunk::kType; + +absl::optional InitChunk::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + VerificationTag initiate_tag(reader->Load32<4>()); + uint32_t a_rwnd = reader->Load32<8>(); + uint16_t nbr_outbound_streams = reader->Load16<12>(); + uint16_t nbr_inbound_streams = reader->Load16<14>(); + TSN initial_tsn(reader->Load32<16>()); + + absl::optional parameters = + Parameters::Parse(reader->variable_data()); + if (!parameters.has_value()) { + return absl::nullopt; + } + return InitChunk(initiate_tag, a_rwnd, nbr_outbound_streams, + nbr_inbound_streams, initial_tsn, *std::move(parameters)); +} + +void InitChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView parameters = parameters_.data(); + BoundedByteWriter writer = AllocateTLV(out, parameters.size()); + + writer.Store32<4>(*initiate_tag_); + writer.Store32<8>(a_rwnd_); + writer.Store16<12>(nbr_outbound_streams_); + writer.Store16<14>(nbr_inbound_streams_); + writer.Store32<16>(*initial_tsn_); + + writer.CopyToVariableData(parameters); +} + +std::string InitChunk::ToString() const { + return rtc::StringFormat("INIT, initiate_tag=0x%0x, initial_tsn=%u", + *initiate_tag(), *initial_tsn()); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/init_chunk.h b/net/dcsctp/packet/chunk/init_chunk.h new file mode 100644 index 0000000000..38f9994caa --- /dev/null +++ b/net/dcsctp/packet/chunk/init_chunk.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_INIT_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_INIT_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.2 +struct InitChunkConfig : ChunkConfig { + static constexpr int kType = 1; + static constexpr size_t kHeaderSize = 20; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class InitChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = InitChunkConfig::kType; + + InitChunk(VerificationTag initiate_tag, + uint32_t a_rwnd, + uint16_t nbr_outbound_streams, + uint16_t nbr_inbound_streams, + TSN initial_tsn, + Parameters parameters) + : initiate_tag_(initiate_tag), + a_rwnd_(a_rwnd), + nbr_outbound_streams_(nbr_outbound_streams), + nbr_inbound_streams_(nbr_inbound_streams), + initial_tsn_(initial_tsn), + parameters_(std::move(parameters)) {} + + InitChunk(InitChunk&& other) = default; + InitChunk& operator=(InitChunk&& other) = default; + + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + VerificationTag initiate_tag() const { return initiate_tag_; } + uint32_t a_rwnd() const { return a_rwnd_; } + uint16_t nbr_outbound_streams() const { return nbr_outbound_streams_; } + uint16_t nbr_inbound_streams() const { return nbr_inbound_streams_; } + TSN initial_tsn() const { return initial_tsn_; } + const Parameters& parameters() const { return parameters_; } + + private: + VerificationTag initiate_tag_; + uint32_t a_rwnd_; + uint16_t nbr_outbound_streams_; + uint16_t nbr_inbound_streams_; + TSN initial_tsn_; + Parameters parameters_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_INIT_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/init_chunk_test.cc b/net/dcsctp/packet/chunk/init_chunk_test.cc new file mode 100644 index 0000000000..bd36d6fdf8 --- /dev/null +++ b/net/dcsctp/packet/chunk/init_chunk_test.cc @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/init_chunk.h" + +#include + +#include +#include + +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/parameter/supported_extensions_parameter.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(InitChunkTest, FromCapture) { + /* + INIT chunk (Outbound streams: 1000, inbound streams: 1000) + Chunk type: INIT (1) + Chunk flags: 0x00 + Chunk length: 90 + Initiate tag: 0xde7a1690 + Advertised receiver window credit (a_rwnd): 131072 + Number of outbound streams: 1000 + Number of inbound streams: 1000 + Initial TSN: 621623272 + ECN parameter + Parameter type: ECN (0x8000) + Parameter length: 4 + Forward TSN supported parameter + Parameter type: Forward TSN supported (0xc000) + Parameter length: 4 + Supported Extensions parameter (Supported types: FORWARD_TSN, AUTH, + ASCONF, ASCONF_ACK, RE_CONFIG) Parameter type: Supported Extensions (0x8008) + Parameter length: 9 + Supported chunk type: FORWARD_TSN (192) + Supported chunk type: AUTH (15) + Supported chunk type: ASCONF (193) + Supported chunk type: ASCONF_ACK (128) + Supported chunk type: RE_CONFIG (130) + Parameter padding: 000000 + Random parameter + Parameter type: Random (0x8002) + Parameter length: 36 + Random number: ab314462121a1513fd5a5f69efaa06e9abd748cc3bd14b60… + Requested HMAC Algorithm parameter (Supported HMACs: SHA-1) + Parameter type: Requested HMAC Algorithm (0x8004) + Parameter length: 6 + HMAC identifier: SHA-1 (1) + Parameter padding: 0000 + Authenticated Chunk list parameter (Chunk types to be authenticated: + ASCONF_ACK, ASCONF) Parameter type: Authenticated Chunk list (0x8003) + Parameter length: 6 + Chunk type: ASCONF_ACK (128) + Chunk type: ASCONF (193) + */ + + uint8_t data[] = { + 0x01, 0x00, 0x00, 0x5a, 0xde, 0x7a, 0x16, 0x90, 0x00, 0x02, 0x00, 0x00, + 0x03, 0xe8, 0x03, 0xe8, 0x25, 0x0d, 0x37, 0xe8, 0x80, 0x00, 0x00, 0x04, + 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, + 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0xab, 0x31, 0x44, 0x62, + 0x12, 0x1a, 0x15, 0x13, 0xfd, 0x5a, 0x5f, 0x69, 0xef, 0xaa, 0x06, 0xe9, + 0xab, 0xd7, 0x48, 0xcc, 0x3b, 0xd1, 0x4b, 0x60, 0xed, 0x7f, 0xa6, 0x44, + 0xce, 0x4d, 0xd2, 0xad, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, + 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(InitChunk chunk, InitChunk::Parse(data)); + + EXPECT_EQ(chunk.initiate_tag(), VerificationTag(0xde7a1690)); + EXPECT_EQ(chunk.a_rwnd(), 131072u); + EXPECT_EQ(chunk.nbr_outbound_streams(), 1000u); + EXPECT_EQ(chunk.nbr_inbound_streams(), 1000u); + EXPECT_EQ(chunk.initial_tsn(), TSN(621623272u)); + EXPECT_TRUE( + chunk.parameters().get().has_value()); + EXPECT_TRUE( + chunk.parameters().get().has_value()); +} + +TEST(InitChunkTest, SerializeAndDeserialize) { + InitChunk chunk(VerificationTag(123), /*a_rwnd=*/456, + /*nbr_outbound_streams=*/65535, + /*nbr_inbound_streams=*/65534, /*initial_tsn=*/TSN(789), + /*parameters=*/Parameters::Builder().Build()); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(InitChunk deserialized, + InitChunk::Parse(serialized)); + + EXPECT_EQ(deserialized.initiate_tag(), VerificationTag(123u)); + EXPECT_EQ(deserialized.a_rwnd(), 456u); + EXPECT_EQ(deserialized.nbr_outbound_streams(), 65535u); + EXPECT_EQ(deserialized.nbr_inbound_streams(), 65534u); + EXPECT_EQ(deserialized.initial_tsn(), TSN(789u)); + EXPECT_EQ(deserialized.ToString(), + "INIT, initiate_tag=0x7b, initial_tsn=789"); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/reconfig_chunk.cc b/net/dcsctp/packet/chunk/reconfig_chunk.cc new file mode 100644 index 0000000000..f39f3b619f --- /dev/null +++ b/net/dcsctp/packet/chunk/reconfig_chunk.cc @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-3.1 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 130 | Chunk Flags | Chunk Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Re-configuration Parameter / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Re-configuration Parameter (optional) / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ReConfigChunk::kType; + +absl::optional ReConfigChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + absl::optional parameters = + Parameters::Parse(reader->variable_data()); + if (!parameters.has_value()) { + return absl::nullopt; + } + + return ReConfigChunk(*std::move(parameters)); +} + +void ReConfigChunk::SerializeTo(std::vector& out) const { + rtc::ArrayView parameters = parameters_.data(); + BoundedByteWriter writer = AllocateTLV(out, parameters.size()); + writer.CopyToVariableData(parameters); +} + +std::string ReConfigChunk::ToString() const { + return "RE-CONFIG"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/reconfig_chunk.h b/net/dcsctp/packet/chunk/reconfig_chunk.h new file mode 100644 index 0000000000..9d2539a515 --- /dev/null +++ b/net/dcsctp/packet/chunk/reconfig_chunk.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_RECONFIG_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_RECONFIG_CHUNK_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-3.1 +struct ReConfigChunkConfig : ChunkConfig { + static constexpr int kType = 130; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class ReConfigChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = ReConfigChunkConfig::kType; + + explicit ReConfigChunk(Parameters parameters) + : parameters_(std::move(parameters)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + const Parameters& parameters() const { return parameters_; } + Parameters extract_parameters() { return std::move(parameters_); } + + private: + Parameters parameters_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_RECONFIG_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/reconfig_chunk_test.cc b/net/dcsctp/packet/chunk/reconfig_chunk_test.cc new file mode 100644 index 0000000000..dbf40ff8c0 --- /dev/null +++ b/net/dcsctp/packet/chunk/reconfig_chunk_test.cc @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::SizeIs; + +TEST(ReConfigChunkTest, FromCapture) { + /* + RE_CONFIG chunk + Chunk type: RE_CONFIG (130) + Chunk flags: 0x00 + Chunk length: 22 + Outgoing SSN reset request parameter + Parameter type: Outgoing SSN reset request (0x000d) + Parameter length: 18 + Re-configuration request sequence number: 2270550051 + Re-configuration response sequence number: 1905748638 + Senders last assigned TSN: 2270550066 + Stream Identifier: 6 + Chunk padding: 0000 + */ + + uint8_t data[] = {0x82, 0x00, 0x00, 0x16, 0x00, 0x0d, 0x00, 0x12, + 0x87, 0x55, 0xd8, 0x23, 0x71, 0x97, 0x6a, 0x9e, + 0x87, 0x55, 0xd8, 0x32, 0x00, 0x06, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(ReConfigChunk chunk, ReConfigChunk::Parse(data)); + + const Parameters& parameters = chunk.parameters(); + EXPECT_THAT(parameters.descriptors(), SizeIs(1)); + ParameterDescriptor desc = parameters.descriptors()[0]; + ASSERT_EQ(desc.type, OutgoingSSNResetRequestParameter::kType); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req, + OutgoingSSNResetRequestParameter::Parse(desc.data)); + EXPECT_EQ(*req.request_sequence_number(), 2270550051u); + EXPECT_EQ(*req.response_sequence_number(), 1905748638u); + EXPECT_EQ(*req.sender_last_assigned_tsn(), 2270550066u); + EXPECT_THAT(req.stream_ids(), ElementsAre(StreamID(6))); +} + +TEST(ReConfigChunkTest, SerializeAndDeserialize) { + Parameters::Builder params_builder = + Parameters::Builder().Add(OutgoingSSNResetRequestParameter( + ReconfigRequestSN(123), ReconfigRequestSN(456), TSN(789), + {StreamID(42), StreamID(43)})); + + ReConfigChunk chunk(params_builder.Build()); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(ReConfigChunk deserialized, + ReConfigChunk::Parse(serialized)); + + const Parameters& parameters = deserialized.parameters(); + EXPECT_THAT(parameters.descriptors(), SizeIs(1)); + ParameterDescriptor desc = parameters.descriptors()[0]; + ASSERT_EQ(desc.type, OutgoingSSNResetRequestParameter::kType); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req, + OutgoingSSNResetRequestParameter::Parse(desc.data)); + EXPECT_EQ(*req.request_sequence_number(), 123u); + EXPECT_EQ(*req.response_sequence_number(), 456u); + EXPECT_EQ(*req.sender_last_assigned_tsn(), 789u); + EXPECT_THAT(req.stream_ids(), ElementsAre(StreamID(42), StreamID(43))); + + EXPECT_EQ(deserialized.ToString(), "RE-CONFIG"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/sack_chunk.cc b/net/dcsctp/packet/chunk/sack_chunk.cc new file mode 100644 index 0000000000..d80e430082 --- /dev/null +++ b/net/dcsctp/packet/chunk/sack_chunk.cc @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/sack_chunk.h" + +#include + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/str_join.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.4 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 3 |Chunk Flags | Chunk Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cumulative TSN Ack | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Advertised Receiver Window Credit (a_rwnd) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Number of Gap Ack Blocks = N | Number of Duplicate TSNs = X | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Gap Ack Block #1 Start | Gap Ack Block #1 End | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / / +// \ ... \ +// / / +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Gap Ack Block #N Start | Gap Ack Block #N End | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Duplicate TSN 1 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / / +// \ ... \ +// / / +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Duplicate TSN X | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int SackChunk::kType; + +absl::optional SackChunk::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + TSN tsn_ack(reader->Load32<4>()); + uint32_t a_rwnd = reader->Load32<8>(); + uint16_t nbr_of_gap_blocks = reader->Load16<12>(); + uint16_t nbr_of_dup_tsns = reader->Load16<14>(); + + if (reader->variable_data_size() != nbr_of_gap_blocks * kGapAckBlockSize + + nbr_of_dup_tsns * kDupTsnBlockSize) { + RTC_DLOG(LS_WARNING) << "Invalid number of gap blocks or duplicate TSNs"; + return absl::nullopt; + } + + std::vector gap_ack_blocks; + gap_ack_blocks.reserve(nbr_of_gap_blocks); + size_t offset = 0; + for (int i = 0; i < nbr_of_gap_blocks; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(offset); + + uint16_t start = sub_reader.Load16<0>(); + uint16_t end = sub_reader.Load16<2>(); + gap_ack_blocks.emplace_back(start, end); + offset += kGapAckBlockSize; + } + + std::set duplicate_tsns; + for (int i = 0; i < nbr_of_dup_tsns; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(offset); + + duplicate_tsns.insert(TSN(sub_reader.Load32<0>())); + offset += kDupTsnBlockSize; + } + RTC_DCHECK(offset == reader->variable_data_size()); + + return SackChunk(tsn_ack, a_rwnd, gap_ack_blocks, duplicate_tsns); +} + +void SackChunk::SerializeTo(std::vector& out) const { + int nbr_of_gap_blocks = gap_ack_blocks_.size(); + int nbr_of_dup_tsns = duplicate_tsns_.size(); + size_t variable_size = + nbr_of_gap_blocks * kGapAckBlockSize + nbr_of_dup_tsns * kDupTsnBlockSize; + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(*cumulative_tsn_ack_); + writer.Store32<8>(a_rwnd_); + writer.Store16<12>(nbr_of_gap_blocks); + writer.Store16<14>(nbr_of_dup_tsns); + + size_t offset = 0; + for (int i = 0; i < nbr_of_gap_blocks; ++i) { + BoundedByteWriter sub_writer = + writer.sub_writer(offset); + + sub_writer.Store16<0>(gap_ack_blocks_[i].start); + sub_writer.Store16<2>(gap_ack_blocks_[i].end); + offset += kGapAckBlockSize; + } + + for (TSN tsn : duplicate_tsns_) { + BoundedByteWriter sub_writer = + writer.sub_writer(offset); + + sub_writer.Store32<0>(*tsn); + offset += kDupTsnBlockSize; + } + + RTC_DCHECK(offset == variable_size); +} + +std::string SackChunk::ToString() const { + rtc::StringBuilder sb; + sb << "SACK, cum_ack_tsn=" << *cumulative_tsn_ack() + << ", a_rwnd=" << a_rwnd(); + for (const GapAckBlock& gap : gap_ack_blocks_) { + uint32_t first = *cumulative_tsn_ack_ + gap.start; + uint32_t last = *cumulative_tsn_ack_ + gap.end; + sb << ", gap=" << first << "--" << last; + } + if (!duplicate_tsns_.empty()) { + sb << ", dup_tsns=" + << StrJoin(duplicate_tsns(), ",", + [](rtc::StringBuilder& sb, TSN tsn) { sb << *tsn; }); + } + + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/sack_chunk.h b/net/dcsctp/packet/chunk/sack_chunk.h new file mode 100644 index 0000000000..e6758fa332 --- /dev/null +++ b/net/dcsctp/packet/chunk/sack_chunk.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_SACK_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_SACK_CHUNK_H_ +#include + +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.4 +struct SackChunkConfig : ChunkConfig { + static constexpr int kType = 3; + static constexpr size_t kHeaderSize = 16; + static constexpr size_t kVariableLengthAlignment = 4; +}; + +class SackChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = SackChunkConfig::kType; + + struct GapAckBlock { + GapAckBlock(uint16_t start, uint16_t end) : start(start), end(end) {} + + uint16_t start; + uint16_t end; + + bool operator==(const GapAckBlock& other) const { + return start == other.start && end == other.end; + } + }; + + SackChunk(TSN cumulative_tsn_ack, + uint32_t a_rwnd, + std::vector gap_ack_blocks, + std::set duplicate_tsns) + : cumulative_tsn_ack_(cumulative_tsn_ack), + a_rwnd_(a_rwnd), + gap_ack_blocks_(std::move(gap_ack_blocks)), + duplicate_tsns_(std::move(duplicate_tsns)) {} + static absl::optional Parse(rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + TSN cumulative_tsn_ack() const { return cumulative_tsn_ack_; } + uint32_t a_rwnd() const { return a_rwnd_; } + rtc::ArrayView gap_ack_blocks() const { + return gap_ack_blocks_; + } + const std::set& duplicate_tsns() const { return duplicate_tsns_; } + + private: + static constexpr size_t kGapAckBlockSize = 4; + static constexpr size_t kDupTsnBlockSize = 4; + + const TSN cumulative_tsn_ack_; + const uint32_t a_rwnd_; + std::vector gap_ack_blocks_; + std::set duplicate_tsns_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_SACK_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/sack_chunk_test.cc b/net/dcsctp/packet/chunk/sack_chunk_test.cc new file mode 100644 index 0000000000..9122945308 --- /dev/null +++ b/net/dcsctp/packet/chunk/sack_chunk_test.cc @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/sack_chunk.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(SackChunkTest, FromCapture) { + /* + SACK chunk (Cumulative TSN: 916312075, a_rwnd: 126323, + gaps: 2, duplicate TSNs: 1) + Chunk type: SACK (3) + Chunk flags: 0x00 + Chunk length: 28 + Cumulative TSN ACK: 916312075 + Advertised receiver window credit (a_rwnd): 126323 + Number of gap acknowledgement blocks: 2 + Number of duplicated TSNs: 1 + Gap Acknowledgement for TSN 916312077 to 916312081 + Gap Acknowledgement for TSN 916312083 to 916312083 + [Number of TSNs in gap acknowledgement blocks: 6] + Duplicate TSN: 916312081 + + */ + + uint8_t data[] = {0x03, 0x00, 0x00, 0x1c, 0x36, 0x9d, 0xd0, 0x0b, 0x00, 0x01, + 0xed, 0x73, 0x00, 0x02, 0x00, 0x01, 0x00, 0x02, 0x00, 0x06, + 0x00, 0x08, 0x00, 0x08, 0x36, 0x9d, 0xd0, 0x11}; + + ASSERT_HAS_VALUE_AND_ASSIGN(SackChunk chunk, SackChunk::Parse(data)); + + TSN cum_ack_tsn(916312075); + EXPECT_EQ(chunk.cumulative_tsn_ack(), cum_ack_tsn); + EXPECT_EQ(chunk.a_rwnd(), 126323u); + EXPECT_THAT( + chunk.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock( + static_cast(916312077 - *cum_ack_tsn), + static_cast(916312081 - *cum_ack_tsn)), + SackChunk::GapAckBlock( + static_cast(916312083 - *cum_ack_tsn), + static_cast(916312083 - *cum_ack_tsn)))); + EXPECT_THAT(chunk.duplicate_tsns(), ElementsAre(TSN(916312081))); +} + +TEST(SackChunkTest, SerializeAndDeserialize) { + SackChunk chunk(TSN(123), /*a_rwnd=*/456, {SackChunk::GapAckBlock(2, 3)}, + {TSN(1), TSN(2), TSN(3)}); + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(SackChunk deserialized, + SackChunk::Parse(serialized)); + + EXPECT_EQ(*deserialized.cumulative_tsn_ack(), 123u); + EXPECT_EQ(deserialized.a_rwnd(), 456u); + EXPECT_THAT(deserialized.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 3))); + EXPECT_THAT(deserialized.duplicate_tsns(), + ElementsAre(TSN(1), TSN(2), TSN(3))); + + EXPECT_EQ(deserialized.ToString(), + "SACK, cum_ack_tsn=123, a_rwnd=456, gap=125--126, dup_tsns=1,2,3"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/shutdown_ack_chunk.cc b/net/dcsctp/packet/chunk/shutdown_ack_chunk.cc new file mode 100644 index 0000000000..d42aceead4 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_ack_chunk.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/shutdown_ack_chunk.h" + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.9 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 8 |Chunk Flags | Length = 4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ShutdownAckChunk::kType; + +absl::optional ShutdownAckChunk::Parse( + rtc::ArrayView data) { + if (!ParseTLV(data).has_value()) { + return absl::nullopt; + } + return ShutdownAckChunk(); +} + +void ShutdownAckChunk::SerializeTo(std::vector& out) const { + AllocateTLV(out); +} + +std::string ShutdownAckChunk::ToString() const { + return "SHUTDOWN-ACK"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/shutdown_ack_chunk.h b/net/dcsctp/packet/chunk/shutdown_ack_chunk.h new file mode 100644 index 0000000000..29c1a98be6 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_ack_chunk.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_ACK_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_ACK_CHUNK_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.9 +struct ShutdownAckChunkConfig : ChunkConfig { + static constexpr int kType = 8; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class ShutdownAckChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = ShutdownAckChunkConfig::kType; + + ShutdownAckChunk() {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_ACK_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/shutdown_ack_chunk_test.cc b/net/dcsctp/packet/chunk/shutdown_ack_chunk_test.cc new file mode 100644 index 0000000000..ef04ea9892 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_ack_chunk_test.cc @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/shutdown_ack_chunk.h" + +#include + +#include + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(ShutdownAckChunkTest, FromCapture) { + /* + SHUTDOWN_ACK chunk + Chunk type: SHUTDOWN_ACK (8) + Chunk flags: 0x00 + Chunk length: 4 + */ + + uint8_t data[] = {0x08, 0x00, 0x00, 0x04}; + + EXPECT_TRUE(ShutdownAckChunk::Parse(data).has_value()); +} + +TEST(ShutdownAckChunkTest, SerializeAndDeserialize) { + ShutdownAckChunk chunk; + + std::vector serialized; + chunk.SerializeTo(serialized); + + EXPECT_TRUE(ShutdownAckChunk::Parse(serialized).has_value()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/shutdown_chunk.cc b/net/dcsctp/packet/chunk/shutdown_chunk.cc new file mode 100644 index 0000000000..59f806f7f7 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_chunk.cc @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.8 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 7 | Chunk Flags | Length = 8 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cumulative TSN Ack | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ShutdownChunk::kType; + +absl::optional ShutdownChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + TSN cumulative_tsn_ack(reader->Load32<4>()); + return ShutdownChunk(cumulative_tsn_ack); +} + +void ShutdownChunk::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store32<4>(*cumulative_tsn_ack_); +} + +std::string ShutdownChunk::ToString() const { + return "SHUTDOWN"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/shutdown_chunk.h b/net/dcsctp/packet/chunk/shutdown_chunk.h new file mode 100644 index 0000000000..8148cca286 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_chunk.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_CHUNK_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.8 +struct ShutdownChunkConfig : ChunkConfig { + static constexpr int kType = 7; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class ShutdownChunk : public Chunk, public TLVTrait { + public: + static constexpr int kType = ShutdownChunkConfig::kType; + + explicit ShutdownChunk(TSN cumulative_tsn_ack) + : cumulative_tsn_ack_(cumulative_tsn_ack) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + TSN cumulative_tsn_ack() const { return cumulative_tsn_ack_; } + + private: + TSN cumulative_tsn_ack_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/shutdown_chunk_test.cc b/net/dcsctp/packet/chunk/shutdown_chunk_test.cc new file mode 100644 index 0000000000..16d147ca83 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_chunk_test.cc @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { +TEST(ShutdownChunkTest, FromCapture) { + /* + SHUTDOWN chunk (Cumulative TSN ack: 101831101) + Chunk type: SHUTDOWN (7) + Chunk flags: 0x00 + Chunk length: 8 + Cumulative TSN Ack: 101831101 + */ + + uint8_t data[] = {0x07, 0x00, 0x00, 0x08, 0x06, 0x11, 0xd1, 0xbd}; + + ASSERT_HAS_VALUE_AND_ASSIGN(ShutdownChunk chunk, ShutdownChunk::Parse(data)); + EXPECT_EQ(chunk.cumulative_tsn_ack(), TSN(101831101u)); +} + +TEST(ShutdownChunkTest, SerializeAndDeserialize) { + ShutdownChunk chunk(TSN(12345678)); + + std::vector serialized; + chunk.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(ShutdownChunk deserialized, + ShutdownChunk::Parse(serialized)); + + EXPECT_EQ(deserialized.cumulative_tsn_ack(), TSN(12345678u)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/shutdown_complete_chunk.cc b/net/dcsctp/packet/chunk/shutdown_complete_chunk.cc new file mode 100644 index 0000000000..3f54857437 --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_complete_chunk.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/shutdown_complete_chunk.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.13 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 14 |Reserved |T| Length = 4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ShutdownCompleteChunk::kType; + +absl::optional ShutdownCompleteChunk::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + uint8_t flags = reader->Load8<1>(); + bool tag_reflected = (flags & (1 << kFlagsBitT)) != 0; + return ShutdownCompleteChunk(tag_reflected); +} + +void ShutdownCompleteChunk::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store8<1>(tag_reflected_ ? (1 << kFlagsBitT) : 0); +} + +std::string ShutdownCompleteChunk::ToString() const { + return "SHUTDOWN-COMPLETE"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk/shutdown_complete_chunk.h b/net/dcsctp/packet/chunk/shutdown_complete_chunk.h new file mode 100644 index 0000000000..46d28e88dc --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_complete_chunk.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_COMPLETE_CHUNK_H_ +#define NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_COMPLETE_CHUNK_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.13 +struct ShutdownCompleteChunkConfig : ChunkConfig { + static constexpr int kType = 14; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class ShutdownCompleteChunk : public Chunk, + public TLVTrait { + public: + static constexpr int kType = ShutdownCompleteChunkConfig::kType; + + explicit ShutdownCompleteChunk(bool tag_reflected) + : tag_reflected_(tag_reflected) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + bool tag_reflected() const { return tag_reflected_; } + + private: + static constexpr int kFlagsBitT = 0; + bool tag_reflected_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_SHUTDOWN_COMPLETE_CHUNK_H_ diff --git a/net/dcsctp/packet/chunk/shutdown_complete_chunk_test.cc b/net/dcsctp/packet/chunk/shutdown_complete_chunk_test.cc new file mode 100644 index 0000000000..253900d5cd --- /dev/null +++ b/net/dcsctp/packet/chunk/shutdown_complete_chunk_test.cc @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/shutdown_complete_chunk.h" + +#include + +#include + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(ShutdownCompleteChunkTest, FromCapture) { + /* + SHUTDOWN_COMPLETE chunk + Chunk type: SHUTDOWN_COMPLETE (14) + Chunk flags: 0x00 + Chunk length: 4 + */ + + uint8_t data[] = {0x0e, 0x00, 0x00, 0x04}; + + EXPECT_TRUE(ShutdownCompleteChunk::Parse(data).has_value()); +} + +TEST(ShutdownCompleteChunkTest, SerializeAndDeserialize) { + ShutdownCompleteChunk chunk(/*tag_reflected=*/false); + + std::vector serialized; + chunk.SerializeTo(serialized); + + EXPECT_TRUE(ShutdownCompleteChunk::Parse(serialized).has_value()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk_validators.cc b/net/dcsctp/packet/chunk_validators.cc new file mode 100644 index 0000000000..48d351827e --- /dev/null +++ b/net/dcsctp/packet/chunk_validators.cc @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk_validators.h" + +#include +#include +#include + +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "rtc_base/logging.h" + +namespace dcsctp { + +SackChunk ChunkValidators::Clean(SackChunk&& sack) { + if (Validate(sack)) { + return std::move(sack); + } + + RTC_DLOG(LS_WARNING) << "Received SACK is malformed; cleaning it"; + + std::vector gap_ack_blocks; + gap_ack_blocks.reserve(sack.gap_ack_blocks().size()); + + // First: Only keep blocks that are sane + for (const SackChunk::GapAckBlock& gap_ack_block : sack.gap_ack_blocks()) { + if (gap_ack_block.end > gap_ack_block.start) { + gap_ack_blocks.emplace_back(gap_ack_block); + } + } + + // Not more than at most one remaining? Exit early. + if (gap_ack_blocks.size() <= 1) { + return SackChunk(sack.cumulative_tsn_ack(), sack.a_rwnd(), + std::move(gap_ack_blocks), sack.duplicate_tsns()); + } + + // Sort the intervals by their start value, to aid in the merging below. + absl::c_sort(gap_ack_blocks, [&](const SackChunk::GapAckBlock& a, + const SackChunk::GapAckBlock& b) { + return a.start < b.start; + }); + + // Merge overlapping ranges. + std::vector merged; + merged.reserve(gap_ack_blocks.size()); + merged.push_back(gap_ack_blocks[0]); + + for (size_t i = 1; i < gap_ack_blocks.size(); ++i) { + if (merged.back().end + 1 >= gap_ack_blocks[i].start) { + merged.back().end = std::max(merged.back().end, gap_ack_blocks[i].end); + } else { + merged.push_back(gap_ack_blocks[i]); + } + } + + return SackChunk(sack.cumulative_tsn_ack(), sack.a_rwnd(), std::move(merged), + sack.duplicate_tsns()); +} + +bool ChunkValidators::Validate(const SackChunk& sack) { + if (sack.gap_ack_blocks().empty()) { + return true; + } + + // Ensure that gap-ack-blocks are sorted, has an "end" that is not before + // "start" and are non-overlapping and non-adjacent. + uint16_t prev_end = 0; + for (const SackChunk::GapAckBlock& gap_ack_block : sack.gap_ack_blocks()) { + if (gap_ack_block.end < gap_ack_block.start) { + return false; + } + if (gap_ack_block.start <= (prev_end + 1)) { + return false; + } + prev_end = gap_ack_block.end; + } + return true; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/chunk_validators.h b/net/dcsctp/packet/chunk_validators.h new file mode 100644 index 0000000000..b11848a162 --- /dev/null +++ b/net/dcsctp/packet/chunk_validators.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CHUNK_VALIDATORS_H_ +#define NET_DCSCTP_PACKET_CHUNK_VALIDATORS_H_ + +#include "net/dcsctp/packet/chunk/sack_chunk.h" + +namespace dcsctp { +// Validates and cleans SCTP chunks. +class ChunkValidators { + public: + // Given a SackChunk, will return `true` if it's valid, and `false` if not. + static bool Validate(const SackChunk& sack); + + // Given a SackChunk, it will return a cleaned and validated variant of it. + // RFC4960 doesn't say anything about validity of SACKs or if the Gap ACK + // blocks must be sorted, and non-overlapping. While they always are in + // well-behaving implementations, this can't be relied on. + // + // This method internally calls `Validate`, which means that you can always + // pass a SackChunk to this method (valid or not), and use the results. + static SackChunk Clean(SackChunk&& sack); +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CHUNK_VALIDATORS_H_ diff --git a/net/dcsctp/packet/chunk_validators_test.cc b/net/dcsctp/packet/chunk_validators_test.cc new file mode 100644 index 0000000000..d59fd4ec48 --- /dev/null +++ b/net/dcsctp/packet/chunk_validators_test.cc @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk_validators.h" + +#include + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::IsEmpty; + +TEST(ChunkValidatorsTest, NoGapAckBlocksAreValid) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + /*gap_ack_blocks=*/{}, {}); + + EXPECT_TRUE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + EXPECT_THAT(clean.gap_ack_blocks(), IsEmpty()); +} + +TEST(ChunkValidatorsTest, OneValidAckBlock) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, {SackChunk::GapAckBlock(2, 3)}, {}); + + EXPECT_TRUE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 3))); +} + +TEST(ChunkValidatorsTest, TwoValidAckBlocks) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(2, 3), SackChunk::GapAckBlock(5, 6)}, + {}); + + EXPECT_TRUE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + EXPECT_THAT( + clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 3), SackChunk::GapAckBlock(5, 6))); +} + +TEST(ChunkValidatorsTest, OneInvalidAckBlock) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, {SackChunk::GapAckBlock(1, 2)}, {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + // It's not strictly valid, but due to the renegable nature of gap ack blocks, + // the cum_ack_tsn can't simply be moved. + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(1, 2))); +} + +TEST(ChunkValidatorsTest, RemovesInvalidGapAckBlockFromSack) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(2, 3), SackChunk::GapAckBlock(6, 4)}, + {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 3))); +} + +TEST(ChunkValidatorsTest, SortsGapAckBlocksInOrder) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(6, 7), SackChunk::GapAckBlock(3, 4)}, + {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_THAT( + clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(3, 4), SackChunk::GapAckBlock(6, 7))); +} + +TEST(ChunkValidatorsTest, MergesAdjacentBlocks) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(3, 4), SackChunk::GapAckBlock(5, 6)}, + {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(3, 6))); +} + +TEST(ChunkValidatorsTest, MergesOverlappingByOne) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(3, 4), SackChunk::GapAckBlock(4, 5)}, + {}); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(3, 5))); +} + +TEST(ChunkValidatorsTest, MergesOverlappingByMore) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(3, 10), SackChunk::GapAckBlock(4, 5)}, + {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(3, 10))); +} + +TEST(ChunkValidatorsTest, MergesBlocksStartingWithSameStartOffset) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(3, 7), SackChunk::GapAckBlock(3, 5), + SackChunk::GapAckBlock(3, 9)}, + {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(3, 9))); +} + +TEST(ChunkValidatorsTest, MergesBlocksPartiallyOverlapping) { + SackChunk sack(TSN(123), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(3, 7), SackChunk::GapAckBlock(5, 9)}, + {}); + + EXPECT_FALSE(ChunkValidators::Validate(sack)); + + SackChunk clean = ChunkValidators::Clean(std::move(sack)); + + EXPECT_THAT(clean.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(3, 9))); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/crc32c.cc b/net/dcsctp/packet/crc32c.cc new file mode 100644 index 0000000000..e3f0dc1d19 --- /dev/null +++ b/net/dcsctp/packet/crc32c.cc @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/crc32c.h" + +#include + +#include "third_party/crc32c/src/include/crc32c/crc32c.h" + +namespace dcsctp { + +uint32_t GenerateCrc32C(rtc::ArrayView data) { + uint32_t crc32c = crc32c_value(data.data(), data.size()); + + // Byte swapping for little endian byte order: + uint8_t byte0 = crc32c; + uint8_t byte1 = crc32c >> 8; + uint8_t byte2 = crc32c >> 16; + uint8_t byte3 = crc32c >> 24; + crc32c = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3); + return crc32c; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/crc32c.h b/net/dcsctp/packet/crc32c.h new file mode 100644 index 0000000000..a969e1b26b --- /dev/null +++ b/net/dcsctp/packet/crc32c.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_CRC32C_H_ +#define NET_DCSCTP_PACKET_CRC32C_H_ + +#include + +#include "api/array_view.h" + +namespace dcsctp { + +// Generates the CRC32C checksum of `data`. +uint32_t GenerateCrc32C(rtc::ArrayView data); + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_CRC32C_H_ diff --git a/net/dcsctp/packet/crc32c_test.cc b/net/dcsctp/packet/crc32c_test.cc new file mode 100644 index 0000000000..0821c4ef75 --- /dev/null +++ b/net/dcsctp/packet/crc32c_test.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/crc32c.h" + +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +constexpr std::array kEmpty = {}; +constexpr std::array kZero = {0}; +constexpr std::array kManyZeros = {0, 0, 0, 0}; +constexpr std::array kShort = {1, 2, 3, 4}; +constexpr std::array kLong = {1, 2, 3, 4, 5, 6, 7, 8}; +// https://tools.ietf.org/html/rfc3720#appendix-B.4 +constexpr std::array k32Zeros = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +constexpr std::array k32Ones = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; +constexpr std::array k32Incrementing = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; +constexpr std::array k32Decrementing = { + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; +constexpr std::array kISCSICommandPDU = { + 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +TEST(Crc32Test, TestVectors) { + EXPECT_EQ(GenerateCrc32C(kEmpty), 0U); + EXPECT_EQ(GenerateCrc32C(kZero), 0x51537d52U); + EXPECT_EQ(GenerateCrc32C(kManyZeros), 0xc74b6748U); + EXPECT_EQ(GenerateCrc32C(kShort), 0xf48c3029U); + EXPECT_EQ(GenerateCrc32C(kLong), 0x811f8946U); + // https://tools.ietf.org/html/rfc3720#appendix-B.4 + EXPECT_EQ(GenerateCrc32C(k32Zeros), 0xaa36918aU); + EXPECT_EQ(GenerateCrc32C(k32Ones), 0x43aba862U); + EXPECT_EQ(GenerateCrc32C(k32Incrementing), 0x4e79dd46U); + EXPECT_EQ(GenerateCrc32C(k32Decrementing), 0x5cdb3f11U); + EXPECT_EQ(GenerateCrc32C(kISCSICommandPDU), 0x563a96d9U); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/data.h b/net/dcsctp/packet/data.h new file mode 100644 index 0000000000..f2d2e74904 --- /dev/null +++ b/net/dcsctp/packet/data.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_DATA_H_ +#define NET_DCSCTP_PACKET_DATA_H_ + +#include +#include +#include + +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// Represents data that is either received and extracted from a DATA/I-DATA +// chunk, or data that is supposed to be sent, and wrapped in a DATA/I-DATA +// chunk (depending on peer capabilities). +// +// The data wrapped in this structure is actually the same as the DATA/I-DATA +// chunk (actually the union of them), but to avoid having all components be +// aware of the implementation details of the different chunks, this abstraction +// is used instead. A notable difference is also that it doesn't carry a +// Transmission Sequence Number (TSN), as that is not known when a chunk is +// created (assigned late, just when sending), and that the TSNs in DATA/I-DATA +// are wrapped numbers, and within the library, unwrapped sequence numbers are +// preferably used. +struct Data { + // Indicates if a chunk is the first in a fragmented message and maps to the + // "beginning" flag in DATA/I-DATA chunk. + using IsBeginning = StrongAlias; + + // Indicates if a chunk is the last in a fragmented message and maps to the + // "end" flag in DATA/I-DATA chunk. + using IsEnd = StrongAlias; + + Data(StreamID stream_id, + SSN ssn, + MID message_id, + FSN fsn, + PPID ppid, + std::vector payload, + IsBeginning is_beginning, + IsEnd is_end, + IsUnordered is_unordered) + : stream_id(stream_id), + ssn(ssn), + message_id(message_id), + fsn(fsn), + ppid(ppid), + payload(std::move(payload)), + is_beginning(is_beginning), + is_end(is_end), + is_unordered(is_unordered) {} + + // Move-only, to avoid accidental copies. + Data(Data&& other) = default; + Data& operator=(Data&& other) = default; + + // Creates a copy of this `Data` object. + Data Clone() const { + return Data(stream_id, ssn, message_id, fsn, ppid, payload, is_beginning, + is_end, is_unordered); + } + + // The size of this data, which translates to the size of its payload. + size_t size() const { return payload.size(); } + + // Stream Identifier. + StreamID stream_id; + + // Stream Sequence Number (SSN), per stream, for ordered chunks. Defined by + // RFC4960 and used only in DATA chunks (not I-DATA). + SSN ssn; + + // Message Identifier (MID) per stream and ordered/unordered. Defined by + // RFC8260, and used together with options.is_unordered and stream_id to + // uniquely identify a message. Used only in I-DATA chunks (not DATA). + MID message_id; + // Fragment Sequence Number (FSN) per stream and ordered/unordered, as above. + FSN fsn; + + // Payload Protocol Identifier (PPID). + PPID ppid; + + // The actual data payload. + std::vector payload; + + // If this data represents the first, last or a middle chunk. + IsBeginning is_beginning; + IsEnd is_end; + // If this data is sent/received unordered. + IsUnordered is_unordered; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_DATA_H_ diff --git a/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.cc b/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.cc new file mode 100644 index 0000000000..ef67c2a49f --- /dev/null +++ b/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.cc @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h" + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.10 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=10 | Cause Length=4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int CookieReceivedWhileShuttingDownCause::kType; + +absl::optional +CookieReceivedWhileShuttingDownCause::Parse( + rtc::ArrayView data) { + if (!ParseTLV(data).has_value()) { + return absl::nullopt; + } + return CookieReceivedWhileShuttingDownCause(); +} + +void CookieReceivedWhileShuttingDownCause::SerializeTo( + std::vector& out) const { + AllocateTLV(out); +} + +std::string CookieReceivedWhileShuttingDownCause::ToString() const { + return "Cookie Received While Shutting Down"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h b/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h new file mode 100644 index 0000000000..362f181fba --- /dev/null +++ b/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_COOKIE_RECEIVED_WHILE_SHUTTING_DOWN_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_COOKIE_RECEIVED_WHILE_SHUTTING_DOWN_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.10 +struct CookieReceivedWhileShuttingDownCauseConfig : public ParameterConfig { + static constexpr int kType = 10; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class CookieReceivedWhileShuttingDownCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = + CookieReceivedWhileShuttingDownCauseConfig::kType; + + CookieReceivedWhileShuttingDownCause() {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_COOKIE_RECEIVED_WHILE_SHUTTING_DOWN_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause_test.cc b/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause_test.cc new file mode 100644 index 0000000000..afb8364c32 --- /dev/null +++ b/net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause_test.cc @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(CookieReceivedWhileShuttingDownCauseTest, SerializeAndDeserialize) { + CookieReceivedWhileShuttingDownCause parameter; + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + CookieReceivedWhileShuttingDownCause deserialized, + CookieReceivedWhileShuttingDownCause::Parse(serialized)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/error_cause.cc b/net/dcsctp/packet/error_cause/error_cause.cc new file mode 100644 index 0000000000..dcd07472ed --- /dev/null +++ b/net/dcsctp/packet/error_cause/error_cause.cc @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/error_cause.h" + +#include + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h" +#include "net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.h" +#include "net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.h" +#include "net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.h" +#include "net/dcsctp/packet/error_cause/no_user_data_cause.h" +#include "net/dcsctp/packet/error_cause/out_of_resource_error_cause.h" +#include "net/dcsctp/packet/error_cause/protocol_violation_cause.h" +#include "net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.h" +#include "net/dcsctp/packet/error_cause/stale_cookie_error_cause.h" +#include "net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h" +#include "net/dcsctp/packet/error_cause/unrecognized_parameter_cause.h" +#include "net/dcsctp/packet/error_cause/unresolvable_address_cause.h" +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +template +bool ParseAndPrint(ParameterDescriptor descriptor, rtc::StringBuilder& sb) { + if (descriptor.type == ErrorCause::kType) { + absl::optional p = ErrorCause::Parse(descriptor.data); + if (p.has_value()) { + sb << p->ToString(); + } else { + sb << "Failed to parse error cause of type " << ErrorCause::kType; + } + return true; + } + return false; +} + +std::string ErrorCausesToString(const Parameters& parameters) { + rtc::StringBuilder sb; + + std::vector descriptors = parameters.descriptors(); + for (size_t i = 0; i < descriptors.size(); ++i) { + if (i > 0) { + sb << "\n"; + } + + const ParameterDescriptor& d = descriptors[i]; + if (!ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb) && + !ParseAndPrint(d, sb)) { + sb << "Unhandled parameter of type: " << d.type; + } + } + + return sb.Release(); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/error_cause.h b/net/dcsctp/packet/error_cause/error_cause.h new file mode 100644 index 0000000000..fa2bf81478 --- /dev/null +++ b/net/dcsctp/packet/error_cause/error_cause.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_ERROR_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_ERROR_CAUSE_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// Converts the Error Causes in `parameters` to a human readable string, +// to be used in error reporting and logging. +std::string ErrorCausesToString(const Parameters& parameters); + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_ERROR_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.cc b/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.cc new file mode 100644 index 0000000000..0187544226 --- /dev/null +++ b/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.cc @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.h" + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.7 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=7 | Cause Length=4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int InvalidMandatoryParameterCause::kType; + +absl::optional +InvalidMandatoryParameterCause::Parse(rtc::ArrayView data) { + if (!ParseTLV(data).has_value()) { + return absl::nullopt; + } + return InvalidMandatoryParameterCause(); +} + +void InvalidMandatoryParameterCause::SerializeTo( + std::vector& out) const { + AllocateTLV(out); +} + +std::string InvalidMandatoryParameterCause::ToString() const { + return "Invalid Mandatory Parameter"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.h b/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.h new file mode 100644 index 0000000000..e192b5a42f --- /dev/null +++ b/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_INVALID_MANDATORY_PARAMETER_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_INVALID_MANDATORY_PARAMETER_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.7 +struct InvalidMandatoryParameterCauseConfig : public ParameterConfig { + static constexpr int kType = 7; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class InvalidMandatoryParameterCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = InvalidMandatoryParameterCauseConfig::kType; + + InvalidMandatoryParameterCause() {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_INVALID_MANDATORY_PARAMETER_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause_test.cc b/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause_test.cc new file mode 100644 index 0000000000..3d532d09b1 --- /dev/null +++ b/net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause_test.cc @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/invalid_mandatory_parameter_cause.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(InvalidMandatoryParameterCauseTest, SerializeAndDeserialize) { + InvalidMandatoryParameterCause parameter; + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + InvalidMandatoryParameterCause deserialized, + InvalidMandatoryParameterCause::Parse(serialized)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.cc b/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.cc new file mode 100644 index 0000000000..b2ddd6f4ef --- /dev/null +++ b/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.cc @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.1 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=1 | Cause Length=8 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Identifier | (Reserved) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int InvalidStreamIdentifierCause::kType; + +absl::optional +InvalidStreamIdentifierCause::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + StreamID stream_id(reader->Load16<4>()); + return InvalidStreamIdentifierCause(stream_id); +} + +void InvalidStreamIdentifierCause::SerializeTo( + std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + + writer.Store16<4>(*stream_id_); +} + +std::string InvalidStreamIdentifierCause::ToString() const { + rtc::StringBuilder sb; + sb << "Invalid Stream Identifier, stream_id=" << *stream_id_; + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.h b/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.h new file mode 100644 index 0000000000..b7dfe177b8 --- /dev/null +++ b/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_INVALID_STREAM_IDENTIFIER_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_INVALID_STREAM_IDENTIFIER_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.1 +struct InvalidStreamIdentifierCauseConfig : public ParameterConfig { + static constexpr int kType = 1; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class InvalidStreamIdentifierCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = InvalidStreamIdentifierCauseConfig::kType; + + explicit InvalidStreamIdentifierCause(StreamID stream_id) + : stream_id_(stream_id) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + StreamID stream_id() const { return stream_id_; } + + private: + StreamID stream_id_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_INVALID_STREAM_IDENTIFIER_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause_test.cc b/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause_test.cc new file mode 100644 index 0000000000..a282ce5ee8 --- /dev/null +++ b/net/dcsctp/packet/error_cause/invalid_stream_identifier_cause_test.cc @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/invalid_stream_identifier_cause.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(InvalidStreamIdentifierCauseTest, SerializeAndDeserialize) { + InvalidStreamIdentifierCause parameter(StreamID(1)); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(InvalidStreamIdentifierCause deserialized, + InvalidStreamIdentifierCause::Parse(serialized)); + + EXPECT_EQ(*deserialized.stream_id(), 1); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc new file mode 100644 index 0000000000..b89f86e43e --- /dev/null +++ b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.cc @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.h" + +#include + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/str_join.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.2 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=2 | Cause Length=8+N*2 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Number of missing params=N | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Missing Param Type #1 | Missing Param Type #2 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Missing Param Type #N-1 | Missing Param Type #N | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int MissingMandatoryParameterCause::kType; + +absl::optional +MissingMandatoryParameterCause::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + uint32_t count = reader->Load32<4>(); + if (reader->variable_data_size() / kMissingParameterSize != count) { + RTC_DLOG(LS_WARNING) << "Invalid number of missing parameters"; + return absl::nullopt; + } + + std::vector missing_parameter_types; + missing_parameter_types.reserve(count); + for (uint32_t i = 0; i < count; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(i * kMissingParameterSize); + + missing_parameter_types.push_back(sub_reader.Load16<0>()); + } + return MissingMandatoryParameterCause(missing_parameter_types); +} + +void MissingMandatoryParameterCause::SerializeTo( + std::vector& out) const { + size_t variable_size = + missing_parameter_types_.size() * kMissingParameterSize; + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(missing_parameter_types_.size()); + + for (size_t i = 0; i < missing_parameter_types_.size(); ++i) { + BoundedByteWriter sub_writer = + writer.sub_writer(i * kMissingParameterSize); + + sub_writer.Store16<0>(missing_parameter_types_[i]); + } +} + +std::string MissingMandatoryParameterCause::ToString() const { + rtc::StringBuilder sb; + sb << "Missing Mandatory Parameter, missing_parameter_types=" + << StrJoin(missing_parameter_types_, ","); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.h b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.h new file mode 100644 index 0000000000..4435424295 --- /dev/null +++ b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_MISSING_MANDATORY_PARAMETER_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_MISSING_MANDATORY_PARAMETER_CAUSE_H_ +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.2 +struct MissingMandatoryParameterCauseConfig : public ParameterConfig { + static constexpr int kType = 2; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 2; +}; + +class MissingMandatoryParameterCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = MissingMandatoryParameterCauseConfig::kType; + + explicit MissingMandatoryParameterCause( + rtc::ArrayView missing_parameter_types) + : missing_parameter_types_(missing_parameter_types.begin(), + missing_parameter_types.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView missing_parameter_types() const { + return missing_parameter_types_; + } + + private: + static constexpr size_t kMissingParameterSize = 2; + std::vector missing_parameter_types_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_MISSING_MANDATORY_PARAMETER_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc new file mode 100644 index 0000000000..1c526ff0e2 --- /dev/null +++ b/net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause_test.cc @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/missing_mandatory_parameter_cause.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::IsEmpty; + +TEST(MissingMandatoryParameterCauseTest, SerializeAndDeserialize) { + uint16_t parameter_types[] = {1, 2, 3}; + MissingMandatoryParameterCause parameter(parameter_types); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + MissingMandatoryParameterCause deserialized, + MissingMandatoryParameterCause::Parse(serialized)); + + EXPECT_THAT(deserialized.missing_parameter_types(), ElementsAre(1, 2, 3)); +} + +TEST(MissingMandatoryParameterCauseTest, HandlesDeserializeZeroParameters) { + uint8_t serialized[] = {0, 2, 0, 8, 0, 0, 0, 0}; + + ASSERT_HAS_VALUE_AND_ASSIGN( + MissingMandatoryParameterCause deserialized, + MissingMandatoryParameterCause::Parse(serialized)); + + EXPECT_THAT(deserialized.missing_parameter_types(), IsEmpty()); +} + +TEST(MissingMandatoryParameterCauseTest, HandlesOverflowParameterCount) { + // 0x80000004 * 2 = 2**32 + 8 -> if overflow, would validate correctly. + uint8_t serialized[] = {0, 2, 0, 8, 0x80, 0x00, 0x00, 0x04}; + + EXPECT_FALSE(MissingMandatoryParameterCause::Parse(serialized).has_value()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/no_user_data_cause.cc b/net/dcsctp/packet/error_cause/no_user_data_cause.cc new file mode 100644 index 0000000000..2853915b0c --- /dev/null +++ b/net/dcsctp/packet/error_cause/no_user_data_cause.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/no_user_data_cause.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.9 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=9 | Cause Length=8 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / TSN value / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int NoUserDataCause::kType; + +absl::optional NoUserDataCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + TSN tsn(reader->Load32<4>()); + return NoUserDataCause(tsn); +} + +void NoUserDataCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store32<4>(*tsn_); +} + +std::string NoUserDataCause::ToString() const { + rtc::StringBuilder sb; + sb << "No User Data, tsn=" << *tsn_; + return sb.Release(); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/no_user_data_cause.h b/net/dcsctp/packet/error_cause/no_user_data_cause.h new file mode 100644 index 0000000000..1087dcc97c --- /dev/null +++ b/net/dcsctp/packet/error_cause/no_user_data_cause.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_NO_USER_DATA_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_NO_USER_DATA_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.9 +struct NoUserDataCauseConfig : public ParameterConfig { + static constexpr int kType = 9; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class NoUserDataCause : public Parameter, + public TLVTrait { + public: + static constexpr int kType = NoUserDataCauseConfig::kType; + + explicit NoUserDataCause(TSN tsn) : tsn_(tsn) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + TSN tsn() const { return tsn_; } + + private: + TSN tsn_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_NO_USER_DATA_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/no_user_data_cause_test.cc b/net/dcsctp/packet/error_cause/no_user_data_cause_test.cc new file mode 100644 index 0000000000..0a535bf4fa --- /dev/null +++ b/net/dcsctp/packet/error_cause/no_user_data_cause_test.cc @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/no_user_data_cause.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(NoUserDataCauseTest, SerializeAndDeserialize) { + NoUserDataCause parameter(TSN(123)); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(NoUserDataCause deserialized, + NoUserDataCause::Parse(serialized)); + + EXPECT_EQ(*deserialized.tsn(), 123u); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/out_of_resource_error_cause.cc b/net/dcsctp/packet/error_cause/out_of_resource_error_cause.cc new file mode 100644 index 0000000000..e5c7c0e787 --- /dev/null +++ b/net/dcsctp/packet/error_cause/out_of_resource_error_cause.cc @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/out_of_resource_error_cause.h" + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.4 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=4 | Cause Length=4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int OutOfResourceErrorCause::kType; + +absl::optional OutOfResourceErrorCause::Parse( + rtc::ArrayView data) { + if (!ParseTLV(data).has_value()) { + return absl::nullopt; + } + return OutOfResourceErrorCause(); +} + +void OutOfResourceErrorCause::SerializeTo(std::vector& out) const { + AllocateTLV(out); +} + +std::string OutOfResourceErrorCause::ToString() const { + return "Out Of Resource"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/out_of_resource_error_cause.h b/net/dcsctp/packet/error_cause/out_of_resource_error_cause.h new file mode 100644 index 0000000000..fc798ca4ac --- /dev/null +++ b/net/dcsctp/packet/error_cause/out_of_resource_error_cause.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_OUT_OF_RESOURCE_ERROR_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_OUT_OF_RESOURCE_ERROR_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.4 +struct OutOfResourceParameterConfig : public ParameterConfig { + static constexpr int kType = 4; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class OutOfResourceErrorCause : public Parameter, + public TLVTrait { + public: + static constexpr int kType = OutOfResourceParameterConfig::kType; + + OutOfResourceErrorCause() {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_OUT_OF_RESOURCE_ERROR_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/out_of_resource_error_cause_test.cc b/net/dcsctp/packet/error_cause/out_of_resource_error_cause_test.cc new file mode 100644 index 0000000000..501fc201cd --- /dev/null +++ b/net/dcsctp/packet/error_cause/out_of_resource_error_cause_test.cc @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/out_of_resource_error_cause.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(OutOfResourceErrorCauseTest, SerializeAndDeserialize) { + OutOfResourceErrorCause parameter; + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(OutOfResourceErrorCause deserialized, + OutOfResourceErrorCause::Parse(serialized)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/protocol_violation_cause.cc b/net/dcsctp/packet/error_cause/protocol_violation_cause.cc new file mode 100644 index 0000000000..1b8d423afb --- /dev/null +++ b/net/dcsctp/packet/error_cause/protocol_violation_cause.cc @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/protocol_violation_cause.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.13 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=13 | Cause Length=Variable | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / Additional Information / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ProtocolViolationCause::kType; + +absl::optional ProtocolViolationCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return ProtocolViolationCause( + std::string(reinterpret_cast(reader->variable_data().data()), + reader->variable_data().size())); +} + +void ProtocolViolationCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = + AllocateTLV(out, additional_information_.size()); + writer.CopyToVariableData(rtc::MakeArrayView( + reinterpret_cast(additional_information_.data()), + additional_information_.size())); +} + +std::string ProtocolViolationCause::ToString() const { + rtc::StringBuilder sb; + sb << "Protocol Violation, additional_information=" + << additional_information_; + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/protocol_violation_cause.h b/net/dcsctp/packet/error_cause/protocol_violation_cause.h new file mode 100644 index 0000000000..3081e1f28c --- /dev/null +++ b/net/dcsctp/packet/error_cause/protocol_violation_cause.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_PROTOCOL_VIOLATION_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_PROTOCOL_VIOLATION_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.13 +struct ProtocolViolationCauseConfig : public ParameterConfig { + static constexpr int kType = 13; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class ProtocolViolationCause : public Parameter, + public TLVTrait { + public: + static constexpr int kType = ProtocolViolationCauseConfig::kType; + + explicit ProtocolViolationCause(absl::string_view additional_information) + : additional_information_(additional_information) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + absl::string_view additional_information() const { + return additional_information_; + } + + private: + std::string additional_information_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_PROTOCOL_VIOLATION_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/protocol_violation_cause_test.cc b/net/dcsctp/packet/error_cause/protocol_violation_cause_test.cc new file mode 100644 index 0000000000..902d867091 --- /dev/null +++ b/net/dcsctp/packet/error_cause/protocol_violation_cause_test.cc @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/protocol_violation_cause.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::SizeIs; + +TEST(ProtocolViolationCauseTest, EmptyReason) { + Parameters causes = + Parameters::Builder().Add(ProtocolViolationCause("")).Build(); + + ASSERT_HAS_VALUE_AND_ASSIGN(Parameters deserialized, + Parameters::Parse(causes.data())); + ASSERT_THAT(deserialized.descriptors(), SizeIs(1)); + EXPECT_EQ(deserialized.descriptors()[0].type, ProtocolViolationCause::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN( + ProtocolViolationCause cause, + ProtocolViolationCause::Parse(deserialized.descriptors()[0].data)); + + EXPECT_EQ(cause.additional_information(), ""); +} + +TEST(ProtocolViolationCauseTest, SetReason) { + Parameters causes = Parameters::Builder() + .Add(ProtocolViolationCause("Reason goes here")) + .Build(); + + ASSERT_HAS_VALUE_AND_ASSIGN(Parameters deserialized, + Parameters::Parse(causes.data())); + ASSERT_THAT(deserialized.descriptors(), SizeIs(1)); + EXPECT_EQ(deserialized.descriptors()[0].type, ProtocolViolationCause::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN( + ProtocolViolationCause cause, + ProtocolViolationCause::Parse(deserialized.descriptors()[0].data)); + + EXPECT_EQ(cause.additional_information(), "Reason goes here"); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.cc b/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.cc new file mode 100644 index 0000000000..abe5de6211 --- /dev/null +++ b/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.11 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=11 | Cause Length=Variable | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / New Address TLVs / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int RestartOfAnAssociationWithNewAddressesCause::kType; + +absl::optional +RestartOfAnAssociationWithNewAddressesCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return RestartOfAnAssociationWithNewAddressesCause(reader->variable_data()); +} + +void RestartOfAnAssociationWithNewAddressesCause::SerializeTo( + std::vector& out) const { + BoundedByteWriter writer = + AllocateTLV(out, new_address_tlvs_.size()); + writer.CopyToVariableData(new_address_tlvs_); +} + +std::string RestartOfAnAssociationWithNewAddressesCause::ToString() const { + return "Restart of an Association with New Addresses"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.h b/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.h new file mode 100644 index 0000000000..a1cccdc8a1 --- /dev/null +++ b/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_RESTART_OF_AN_ASSOCIATION_WITH_NEW_ADDRESS_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_RESTART_OF_AN_ASSOCIATION_WITH_NEW_ADDRESS_CAUSE_H_ +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.11 +struct RestartOfAnAssociationWithNewAddressesCauseConfig + : public ParameterConfig { + static constexpr int kType = 11; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class RestartOfAnAssociationWithNewAddressesCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = + RestartOfAnAssociationWithNewAddressesCauseConfig::kType; + + explicit RestartOfAnAssociationWithNewAddressesCause( + rtc::ArrayView new_address_tlvs) + : new_address_tlvs_(new_address_tlvs.begin(), new_address_tlvs.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView new_address_tlvs() const { + return new_address_tlvs_; + } + + private: + std::vector new_address_tlvs_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_RESTART_OF_AN_ASSOCIATION_WITH_NEW_ADDRESS_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause_test.cc b/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause_test.cc new file mode 100644 index 0000000000..b8ab8b6803 --- /dev/null +++ b/net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause_test.cc @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/restart_of_an_association_with_new_address_cause.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(RestartOfAnAssociationWithNewAddressesCauseTest, SerializeAndDeserialize) { + uint8_t data[] = {1, 2, 3}; + RestartOfAnAssociationWithNewAddressesCause parameter(data); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + RestartOfAnAssociationWithNewAddressesCause deserialized, + RestartOfAnAssociationWithNewAddressesCause::Parse(serialized)); + + EXPECT_THAT(deserialized.new_address_tlvs(), ElementsAre(1, 2, 3)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/stale_cookie_error_cause.cc b/net/dcsctp/packet/error_cause/stale_cookie_error_cause.cc new file mode 100644 index 0000000000..d77d8488f1 --- /dev/null +++ b/net/dcsctp/packet/error_cause/stale_cookie_error_cause.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/stale_cookie_error_cause.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.3 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=3 | Cause Length=8 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Measure of Staleness (usec.) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int StaleCookieErrorCause::kType; + +absl::optional StaleCookieErrorCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + uint32_t staleness_us = reader->Load32<4>(); + return StaleCookieErrorCause(staleness_us); +} + +void StaleCookieErrorCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store32<4>(staleness_us_); +} + +std::string StaleCookieErrorCause::ToString() const { + rtc::StringBuilder sb; + sb << "Stale Cookie Error, staleness_us=" << staleness_us_; + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/stale_cookie_error_cause.h b/net/dcsctp/packet/error_cause/stale_cookie_error_cause.h new file mode 100644 index 0000000000..d8b7b5b5bd --- /dev/null +++ b/net/dcsctp/packet/error_cause/stale_cookie_error_cause.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_STALE_COOKIE_ERROR_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_STALE_COOKIE_ERROR_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.3 +struct StaleCookieParameterConfig : public ParameterConfig { + static constexpr int kType = 3; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class StaleCookieErrorCause : public Parameter, + public TLVTrait { + public: + static constexpr int kType = StaleCookieParameterConfig::kType; + + explicit StaleCookieErrorCause(uint32_t staleness_us) + : staleness_us_(staleness_us) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + uint16_t staleness_us() const { return staleness_us_; } + + private: + uint32_t staleness_us_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_STALE_COOKIE_ERROR_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/stale_cookie_error_cause_test.cc b/net/dcsctp/packet/error_cause/stale_cookie_error_cause_test.cc new file mode 100644 index 0000000000..c0d1ac1c58 --- /dev/null +++ b/net/dcsctp/packet/error_cause/stale_cookie_error_cause_test.cc @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/stale_cookie_error_cause.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(StaleCookieErrorCauseTest, SerializeAndDeserialize) { + StaleCookieErrorCause parameter(123); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(StaleCookieErrorCause deserialized, + StaleCookieErrorCause::Parse(serialized)); + + EXPECT_EQ(deserialized.staleness_us(), 123); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.cc b/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.cc new file mode 100644 index 0000000000..04b960d992 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.cc @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h" + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.6 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=6 | Cause Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / Unrecognized Chunk / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int UnrecognizedChunkTypeCause::kType; + +absl::optional UnrecognizedChunkTypeCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + std::vector unrecognized_chunk(reader->variable_data().begin(), + reader->variable_data().end()); + return UnrecognizedChunkTypeCause(std::move(unrecognized_chunk)); +} + +void UnrecognizedChunkTypeCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = + AllocateTLV(out, unrecognized_chunk_.size()); + writer.CopyToVariableData(unrecognized_chunk_); +} + +std::string UnrecognizedChunkTypeCause::ToString() const { + rtc::StringBuilder sb; + sb << "Unrecognized Chunk Type, chunk_type="; + if (!unrecognized_chunk_.empty()) { + sb << static_cast(unrecognized_chunk_[0]); + } else { + sb << ""; + } + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h b/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h new file mode 100644 index 0000000000..26d3d3b8f9 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_UNRECOGNIZED_CHUNK_TYPE_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_UNRECOGNIZED_CHUNK_TYPE_CAUSE_H_ +#include +#include + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.6 +struct UnrecognizedChunkTypeCauseConfig : public ParameterConfig { + static constexpr int kType = 6; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class UnrecognizedChunkTypeCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = UnrecognizedChunkTypeCauseConfig::kType; + + explicit UnrecognizedChunkTypeCause(std::vector unrecognized_chunk) + : unrecognized_chunk_(std::move(unrecognized_chunk)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView unrecognized_chunk() const { + return unrecognized_chunk_; + } + + private: + std::vector unrecognized_chunk_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_UNRECOGNIZED_CHUNK_TYPE_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause_test.cc b/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause_test.cc new file mode 100644 index 0000000000..baff852f40 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause_test.cc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(UnrecognizedChunkTypeCauseTest, SerializeAndDeserialize) { + UnrecognizedChunkTypeCause parameter({1, 2, 3}); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(UnrecognizedChunkTypeCause deserialized, + UnrecognizedChunkTypeCause::Parse(serialized)); + + EXPECT_THAT(deserialized.unrecognized_chunk(), ElementsAre(1, 2, 3)); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/unrecognized_parameter_cause.cc b/net/dcsctp/packet/error_cause/unrecognized_parameter_cause.cc new file mode 100644 index 0000000000..80001a9eae --- /dev/null +++ b/net/dcsctp/packet/error_cause/unrecognized_parameter_cause.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/unrecognized_parameter_cause.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.8 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=8 | Cause Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / Unrecognized Parameters / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int UnrecognizedParametersCause::kType; + +absl::optional UnrecognizedParametersCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return UnrecognizedParametersCause(reader->variable_data()); +} + +void UnrecognizedParametersCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = + AllocateTLV(out, unrecognized_parameters_.size()); + writer.CopyToVariableData(unrecognized_parameters_); +} + +std::string UnrecognizedParametersCause::ToString() const { + return "Unrecognized Parameters"; +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/unrecognized_parameter_cause.h b/net/dcsctp/packet/error_cause/unrecognized_parameter_cause.h new file mode 100644 index 0000000000..ebec5ed4c3 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unrecognized_parameter_cause.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_UNRECOGNIZED_PARAMETER_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_UNRECOGNIZED_PARAMETER_CAUSE_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.8 +struct UnrecognizedParametersCauseConfig : public ParameterConfig { + static constexpr int kType = 8; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class UnrecognizedParametersCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = UnrecognizedParametersCauseConfig::kType; + + explicit UnrecognizedParametersCause( + rtc::ArrayView unrecognized_parameters) + : unrecognized_parameters_(unrecognized_parameters.begin(), + unrecognized_parameters.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView unrecognized_parameters() const { + return unrecognized_parameters_; + } + + private: + std::vector unrecognized_parameters_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_UNRECOGNIZED_PARAMETER_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/unrecognized_parameter_cause_test.cc b/net/dcsctp/packet/error_cause/unrecognized_parameter_cause_test.cc new file mode 100644 index 0000000000..0449599ca6 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unrecognized_parameter_cause_test.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/unrecognized_parameter_cause.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(UnrecognizedParametersCauseTest, SerializeAndDeserialize) { + uint8_t unrecognized_parameters[] = {1, 2, 3}; + UnrecognizedParametersCause parameter(unrecognized_parameters); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(UnrecognizedParametersCause deserialized, + UnrecognizedParametersCause::Parse(serialized)); + + EXPECT_THAT(deserialized.unrecognized_parameters(), ElementsAre(1, 2, 3)); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/unresolvable_address_cause.cc b/net/dcsctp/packet/error_cause/unresolvable_address_cause.cc new file mode 100644 index 0000000000..8108d31aa7 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unresolvable_address_cause.cc @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/unresolvable_address_cause.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.5 + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=5 | Cause Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / Unresolvable Address / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int UnresolvableAddressCause::kType; + +absl::optional UnresolvableAddressCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return UnresolvableAddressCause(reader->variable_data()); +} + +void UnresolvableAddressCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = + AllocateTLV(out, unresolvable_address_.size()); + writer.CopyToVariableData(unresolvable_address_); +} + +std::string UnresolvableAddressCause::ToString() const { + return "Unresolvable Address"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/unresolvable_address_cause.h b/net/dcsctp/packet/error_cause/unresolvable_address_cause.h new file mode 100644 index 0000000000..c63b3779ef --- /dev/null +++ b/net/dcsctp/packet/error_cause/unresolvable_address_cause.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_UNRESOLVABLE_ADDRESS_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_UNRESOLVABLE_ADDRESS_CAUSE_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.5 +struct UnresolvableAddressCauseConfig : public ParameterConfig { + static constexpr int kType = 5; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class UnresolvableAddressCause + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = UnresolvableAddressCauseConfig::kType; + + explicit UnresolvableAddressCause( + rtc::ArrayView unresolvable_address) + : unresolvable_address_(unresolvable_address.begin(), + unresolvable_address.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView unresolvable_address() const { + return unresolvable_address_; + } + + private: + std::vector unresolvable_address_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_UNRESOLVABLE_ADDRESS_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/unresolvable_address_cause_test.cc b/net/dcsctp/packet/error_cause/unresolvable_address_cause_test.cc new file mode 100644 index 0000000000..688730e6b3 --- /dev/null +++ b/net/dcsctp/packet/error_cause/unresolvable_address_cause_test.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/unresolvable_address_cause.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(UnresolvableAddressCauseTest, SerializeAndDeserialize) { + uint8_t unresolvable_address[] = {1, 2, 3}; + UnresolvableAddressCause parameter(unresolvable_address); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(UnresolvableAddressCause deserialized, + UnresolvableAddressCause::Parse(serialized)); + + EXPECT_THAT(deserialized.unresolvable_address(), ElementsAre(1, 2, 3)); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/user_initiated_abort_cause.cc b/net/dcsctp/packet/error_cause/user_initiated_abort_cause.cc new file mode 100644 index 0000000000..da99aacbfa --- /dev/null +++ b/net/dcsctp/packet/error_cause/user_initiated_abort_cause.cc @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.12 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Cause Code=12 | Cause Length=Variable | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / Upper Layer Abort Reason / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int UserInitiatedAbortCause::kType; + +absl::optional UserInitiatedAbortCause::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + if (reader->variable_data().empty()) { + return UserInitiatedAbortCause(""); + } + return UserInitiatedAbortCause( + std::string(reinterpret_cast(reader->variable_data().data()), + reader->variable_data().size())); +} + +void UserInitiatedAbortCause::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = + AllocateTLV(out, upper_layer_abort_reason_.size()); + writer.CopyToVariableData(rtc::MakeArrayView( + reinterpret_cast(upper_layer_abort_reason_.data()), + upper_layer_abort_reason_.size())); +} + +std::string UserInitiatedAbortCause::ToString() const { + rtc::StringBuilder sb; + sb << "User-Initiated Abort, reason=" << upper_layer_abort_reason_; + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/error_cause/user_initiated_abort_cause.h b/net/dcsctp/packet/error_cause/user_initiated_abort_cause.h new file mode 100644 index 0000000000..9eb16657b4 --- /dev/null +++ b/net/dcsctp/packet/error_cause/user_initiated_abort_cause.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_ERROR_CAUSE_USER_INITIATED_ABORT_CAUSE_H_ +#define NET_DCSCTP_PACKET_ERROR_CAUSE_USER_INITIATED_ABORT_CAUSE_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.10.12 +struct UserInitiatedAbortCauseConfig : public ParameterConfig { + static constexpr int kType = 12; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class UserInitiatedAbortCause : public Parameter, + public TLVTrait { + public: + static constexpr int kType = UserInitiatedAbortCauseConfig::kType; + + explicit UserInitiatedAbortCause(absl::string_view upper_layer_abort_reason) + : upper_layer_abort_reason_(upper_layer_abort_reason) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + absl::string_view upper_layer_abort_reason() const { + return upper_layer_abort_reason_; + } + + private: + std::string upper_layer_abort_reason_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_ERROR_CAUSE_USER_INITIATED_ABORT_CAUSE_H_ diff --git a/net/dcsctp/packet/error_cause/user_initiated_abort_cause_test.cc b/net/dcsctp/packet/error_cause/user_initiated_abort_cause_test.cc new file mode 100644 index 0000000000..250959e3df --- /dev/null +++ b/net/dcsctp/packet/error_cause/user_initiated_abort_cause_test.cc @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::SizeIs; + +TEST(UserInitiatedAbortCauseTest, EmptyReason) { + Parameters causes = + Parameters::Builder().Add(UserInitiatedAbortCause("")).Build(); + + ASSERT_HAS_VALUE_AND_ASSIGN(Parameters deserialized, + Parameters::Parse(causes.data())); + ASSERT_THAT(deserialized.descriptors(), SizeIs(1)); + EXPECT_EQ(deserialized.descriptors()[0].type, UserInitiatedAbortCause::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN( + UserInitiatedAbortCause cause, + UserInitiatedAbortCause::Parse(deserialized.descriptors()[0].data)); + + EXPECT_EQ(cause.upper_layer_abort_reason(), ""); +} + +TEST(UserInitiatedAbortCauseTest, SetReason) { + Parameters causes = Parameters::Builder() + .Add(UserInitiatedAbortCause("User called Close")) + .Build(); + + ASSERT_HAS_VALUE_AND_ASSIGN(Parameters deserialized, + Parameters::Parse(causes.data())); + ASSERT_THAT(deserialized.descriptors(), SizeIs(1)); + EXPECT_EQ(deserialized.descriptors()[0].type, UserInitiatedAbortCause::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN( + UserInitiatedAbortCause cause, + UserInitiatedAbortCause::Parse(deserialized.descriptors()[0].data)); + + EXPECT_EQ(cause.upper_layer_abort_reason(), "User called Close"); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.cc b/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.cc new file mode 100644 index 0000000000..c33e3e11f6 --- /dev/null +++ b/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.cc @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.6 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 18 | Parameter Length = 12 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Request Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Number of new streams | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int AddIncomingStreamsRequestParameter::kType; + +absl::optional +AddIncomingStreamsRequestParameter::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + ReconfigRequestSN request_sequence_number(reader->Load32<4>()); + uint16_t nbr_of_new_streams = reader->Load16<8>(); + + return AddIncomingStreamsRequestParameter(request_sequence_number, + nbr_of_new_streams); +} + +void AddIncomingStreamsRequestParameter::SerializeTo( + std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store32<4>(*request_sequence_number_); + writer.Store16<8>(nbr_of_new_streams_); +} + +std::string AddIncomingStreamsRequestParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Add Incoming Streams Request, req_seq_nbr=" + << *request_sequence_number(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h b/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h new file mode 100644 index 0000000000..3859eb3f7e --- /dev/null +++ b/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_ADD_INCOMING_STREAMS_REQUEST_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_ADD_INCOMING_STREAMS_REQUEST_PARAMETER_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.6 +struct AddIncomingStreamsRequestParameterConfig : ParameterConfig { + static constexpr int kType = 18; + static constexpr size_t kHeaderSize = 12; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class AddIncomingStreamsRequestParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = AddIncomingStreamsRequestParameterConfig::kType; + + explicit AddIncomingStreamsRequestParameter( + ReconfigRequestSN request_sequence_number, + uint16_t nbr_of_new_streams) + : request_sequence_number_(request_sequence_number), + nbr_of_new_streams_(nbr_of_new_streams) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + ReconfigRequestSN request_sequence_number() const { + return request_sequence_number_; + } + uint16_t nbr_of_new_streams() const { return nbr_of_new_streams_; } + + private: + ReconfigRequestSN request_sequence_number_; + uint16_t nbr_of_new_streams_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_ADD_INCOMING_STREAMS_REQUEST_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter_test.cc b/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter_test.cc new file mode 100644 index 0000000000..a29257a8f8 --- /dev/null +++ b/net/dcsctp/packet/parameter/add_incoming_streams_request_parameter_test.cc @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(AddIncomingStreamsRequestParameterTest, SerializeAndDeserialize) { + AddIncomingStreamsRequestParameter parameter(ReconfigRequestSN(1), 2); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + AddIncomingStreamsRequestParameter deserialized, + AddIncomingStreamsRequestParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.request_sequence_number(), 1u); + EXPECT_EQ(deserialized.nbr_of_new_streams(), 2u); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.cc b/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.cc new file mode 100644 index 0000000000..4787ee9718 --- /dev/null +++ b/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.cc @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.5 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 17 | Parameter Length = 12 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Request Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Number of new streams | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int AddOutgoingStreamsRequestParameter::kType; + +absl::optional +AddOutgoingStreamsRequestParameter::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + ReconfigRequestSN request_sequence_number(reader->Load32<4>()); + uint16_t nbr_of_new_streams = reader->Load16<8>(); + + return AddOutgoingStreamsRequestParameter(request_sequence_number, + nbr_of_new_streams); +} + +void AddOutgoingStreamsRequestParameter::SerializeTo( + std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store32<4>(*request_sequence_number_); + writer.Store16<8>(nbr_of_new_streams_); +} + +std::string AddOutgoingStreamsRequestParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Add Outgoing Streams Request, req_seq_nbr=" + << *request_sequence_number(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h b/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h new file mode 100644 index 0000000000..01e8f91cfa --- /dev/null +++ b/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_ADD_OUTGOING_STREAMS_REQUEST_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_ADD_OUTGOING_STREAMS_REQUEST_PARAMETER_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.5 +struct AddOutgoingStreamsRequestParameterConfig : ParameterConfig { + static constexpr int kType = 17; + static constexpr size_t kHeaderSize = 12; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class AddOutgoingStreamsRequestParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = AddOutgoingStreamsRequestParameterConfig::kType; + + explicit AddOutgoingStreamsRequestParameter( + ReconfigRequestSN request_sequence_number, + uint16_t nbr_of_new_streams) + : request_sequence_number_(request_sequence_number), + nbr_of_new_streams_(nbr_of_new_streams) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + ReconfigRequestSN request_sequence_number() const { + return request_sequence_number_; + } + uint16_t nbr_of_new_streams() const { return nbr_of_new_streams_; } + + private: + ReconfigRequestSN request_sequence_number_; + uint16_t nbr_of_new_streams_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_ADD_OUTGOING_STREAMS_REQUEST_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter_test.cc b/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter_test.cc new file mode 100644 index 0000000000..d0303b1ba8 --- /dev/null +++ b/net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter_test.cc @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(AddOutgoingStreamsRequestParameterTest, SerializeAndDeserialize) { + AddOutgoingStreamsRequestParameter parameter(ReconfigRequestSN(1), 2); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + AddOutgoingStreamsRequestParameter deserialized, + AddOutgoingStreamsRequestParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.request_sequence_number(), 1u); + EXPECT_EQ(deserialized.nbr_of_new_streams(), 2u); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/forward_tsn_supported_parameter.cc b/net/dcsctp/packet/parameter/forward_tsn_supported_parameter.cc new file mode 100644 index 0000000000..7dd8e1923f --- /dev/null +++ b/net/dcsctp/packet/parameter/forward_tsn_supported_parameter.cc @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" + +#include + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc3758#section-3.1 + +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 49152 | Parameter Length = 4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ForwardTsnSupportedParameter::kType; + +absl::optional +ForwardTsnSupportedParameter::Parse(rtc::ArrayView data) { + if (!ParseTLV(data).has_value()) { + return absl::nullopt; + } + return ForwardTsnSupportedParameter(); +} + +void ForwardTsnSupportedParameter::SerializeTo( + std::vector& out) const { + AllocateTLV(out); +} + +std::string ForwardTsnSupportedParameter::ToString() const { + return "Forward TSN Supported"; +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h b/net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h new file mode 100644 index 0000000000..d4cff4ac21 --- /dev/null +++ b/net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_FORWARD_TSN_SUPPORTED_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_FORWARD_TSN_SUPPORTED_PARAMETER_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc3758#section-3.1 +struct ForwardTsnSupportedParameterConfig : ParameterConfig { + static constexpr int kType = 49152; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class ForwardTsnSupportedParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = ForwardTsnSupportedParameterConfig::kType; + + ForwardTsnSupportedParameter() {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_FORWARD_TSN_SUPPORTED_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/forward_tsn_supported_parameter_test.cc b/net/dcsctp/packet/parameter/forward_tsn_supported_parameter_test.cc new file mode 100644 index 0000000000..fb4f983fae --- /dev/null +++ b/net/dcsctp/packet/parameter/forward_tsn_supported_parameter_test.cc @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" + +namespace dcsctp { +namespace { + +TEST(ForwardTsnSupportedParameterTest, SerializeAndDeserialize) { + ForwardTsnSupportedParameter parameter; + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(ForwardTsnSupportedParameter deserialized, + ForwardTsnSupportedParameter::Parse(serialized)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/heartbeat_info_parameter.cc b/net/dcsctp/packet/parameter/heartbeat_info_parameter.cc new file mode 100644 index 0000000000..918976d305 --- /dev/null +++ b/net/dcsctp/packet/parameter/heartbeat_info_parameter.cc @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.5 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 4 | Chunk Flags | Heartbeat Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// \ \ +// / Heartbeat Information TLV (Variable-Length) / +// \ \ +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int HeartbeatInfoParameter::kType; + +absl::optional HeartbeatInfoParameter::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return HeartbeatInfoParameter(reader->variable_data()); +} + +void HeartbeatInfoParameter::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out, info_.size()); + writer.CopyToVariableData(info_); +} + +std::string HeartbeatInfoParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Heartbeat Info parameter (info_length=" << info_.size() << ")"; + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/heartbeat_info_parameter.h b/net/dcsctp/packet/parameter/heartbeat_info_parameter.h new file mode 100644 index 0000000000..ec503a94b2 --- /dev/null +++ b/net/dcsctp/packet/parameter/heartbeat_info_parameter.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_HEARTBEAT_INFO_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_HEARTBEAT_INFO_PARAMETER_H_ +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.5 +struct HeartbeatInfoParameterConfig : ParameterConfig { + static constexpr int kType = 1; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class HeartbeatInfoParameter : public Parameter, + public TLVTrait { + public: + static constexpr int kType = HeartbeatInfoParameterConfig::kType; + + explicit HeartbeatInfoParameter(rtc::ArrayView info) + : info_(info.begin(), info.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView info() const { return info_; } + + private: + std::vector info_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_HEARTBEAT_INFO_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.cc b/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.cc new file mode 100644 index 0000000000..6191adfe9d --- /dev/null +++ b/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.cc @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h" + +#include + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.2 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 14 | Parameter Length = 8 + 2 * N | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Request Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Number 1 (optional) | Stream Number 2 (optional) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / ...... / +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Number N-1 (optional) | Stream Number N (optional) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int IncomingSSNResetRequestParameter::kType; + +absl::optional +IncomingSSNResetRequestParameter::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + ReconfigRequestSN request_sequence_number(reader->Load32<4>()); + + size_t stream_count = reader->variable_data_size() / kStreamIdSize; + std::vector stream_ids; + stream_ids.reserve(stream_count); + for (size_t i = 0; i < stream_count; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(i * kStreamIdSize); + + stream_ids.push_back(StreamID(sub_reader.Load16<0>())); + } + + return IncomingSSNResetRequestParameter(request_sequence_number, + std::move(stream_ids)); +} + +void IncomingSSNResetRequestParameter::SerializeTo( + std::vector& out) const { + size_t variable_size = stream_ids_.size() * kStreamIdSize; + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(*request_sequence_number_); + + for (size_t i = 0; i < stream_ids_.size(); ++i) { + BoundedByteWriter sub_writer = + writer.sub_writer(i * kStreamIdSize); + sub_writer.Store16<0>(*stream_ids_[i]); + } +} + +std::string IncomingSSNResetRequestParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Incoming SSN Reset Request, req_seq_nbr=" + << *request_sequence_number(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h b/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h new file mode 100644 index 0000000000..18963efafc --- /dev/null +++ b/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_INCOMING_SSN_RESET_REQUEST_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_INCOMING_SSN_RESET_REQUEST_PARAMETER_H_ +#include + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.2 +struct IncomingSSNResetRequestParameterConfig : ParameterConfig { + static constexpr int kType = 14; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 2; +}; + +class IncomingSSNResetRequestParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = IncomingSSNResetRequestParameterConfig::kType; + + explicit IncomingSSNResetRequestParameter( + ReconfigRequestSN request_sequence_number, + std::vector stream_ids) + : request_sequence_number_(request_sequence_number), + stream_ids_(std::move(stream_ids)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + ReconfigRequestSN request_sequence_number() const { + return request_sequence_number_; + } + rtc::ArrayView stream_ids() const { return stream_ids_; } + + private: + static constexpr size_t kStreamIdSize = sizeof(uint16_t); + + ReconfigRequestSN request_sequence_number_; + std::vector stream_ids_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_INCOMING_SSN_RESET_REQUEST_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter_test.cc b/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter_test.cc new file mode 100644 index 0000000000..17793f6638 --- /dev/null +++ b/net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter_test.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(IncomingSSNResetRequestParameterTest, SerializeAndDeserialize) { + IncomingSSNResetRequestParameter parameter( + ReconfigRequestSN(1), {StreamID(2), StreamID(3), StreamID(4)}); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + IncomingSSNResetRequestParameter deserialized, + IncomingSSNResetRequestParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.request_sequence_number(), 1u); + EXPECT_THAT(deserialized.stream_ids(), + ElementsAre(StreamID(2), StreamID(3), StreamID(4))); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.cc b/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.cc new file mode 100644 index 0000000000..c25a2426be --- /dev/null +++ b/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.cc @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" + +#include + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/types.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.1 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 13 | Parameter Length = 16 + 2 * N | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Request Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Response Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Sender's Last Assigned TSN | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Number 1 (optional) | Stream Number 2 (optional) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// / ...... / +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Stream Number N-1 (optional) | Stream Number N (optional) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int OutgoingSSNResetRequestParameter::kType; + +absl::optional +OutgoingSSNResetRequestParameter::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + ReconfigRequestSN request_sequence_number(reader->Load32<4>()); + ReconfigRequestSN response_sequence_number(reader->Load32<8>()); + TSN sender_last_assigned_tsn(reader->Load32<12>()); + + size_t stream_count = reader->variable_data_size() / kStreamIdSize; + std::vector stream_ids; + stream_ids.reserve(stream_count); + for (size_t i = 0; i < stream_count; ++i) { + BoundedByteReader sub_reader = + reader->sub_reader(i * kStreamIdSize); + + stream_ids.push_back(StreamID(sub_reader.Load16<0>())); + } + + return OutgoingSSNResetRequestParameter( + request_sequence_number, response_sequence_number, + sender_last_assigned_tsn, std::move(stream_ids)); +} + +void OutgoingSSNResetRequestParameter::SerializeTo( + std::vector& out) const { + size_t variable_size = stream_ids_.size() * kStreamIdSize; + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(*request_sequence_number_); + writer.Store32<8>(*response_sequence_number_); + writer.Store32<12>(*sender_last_assigned_tsn_); + + for (size_t i = 0; i < stream_ids_.size(); ++i) { + BoundedByteWriter sub_writer = + writer.sub_writer(i * kStreamIdSize); + sub_writer.Store16<0>(*stream_ids_[i]); + } +} + +std::string OutgoingSSNResetRequestParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Outgoing SSN Reset Request, req_seq_nbr=" << *request_sequence_number() + << ", resp_seq_nbr=" << *response_sequence_number() + << ", sender_last_asg_tsn=" << *sender_last_assigned_tsn(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h b/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h new file mode 100644 index 0000000000..6eb44e079f --- /dev/null +++ b/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_OUTGOING_SSN_RESET_REQUEST_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_OUTGOING_SSN_RESET_REQUEST_PARAMETER_H_ +#include +#include + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.1 +struct OutgoingSSNResetRequestParameterConfig : ParameterConfig { + static constexpr int kType = 13; + static constexpr size_t kHeaderSize = 16; + static constexpr size_t kVariableLengthAlignment = 2; +}; + +class OutgoingSSNResetRequestParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = OutgoingSSNResetRequestParameterConfig::kType; + + explicit OutgoingSSNResetRequestParameter( + ReconfigRequestSN request_sequence_number, + ReconfigRequestSN response_sequence_number, + TSN sender_last_assigned_tsn, + std::vector stream_ids) + : request_sequence_number_(request_sequence_number), + response_sequence_number_(response_sequence_number), + sender_last_assigned_tsn_(sender_last_assigned_tsn), + stream_ids_(std::move(stream_ids)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + ReconfigRequestSN request_sequence_number() const { + return request_sequence_number_; + } + ReconfigRequestSN response_sequence_number() const { + return response_sequence_number_; + } + TSN sender_last_assigned_tsn() const { return sender_last_assigned_tsn_; } + rtc::ArrayView stream_ids() const { return stream_ids_; } + + private: + static constexpr size_t kStreamIdSize = sizeof(uint16_t); + + ReconfigRequestSN request_sequence_number_; + ReconfigRequestSN response_sequence_number_; + TSN sender_last_assigned_tsn_; + std::vector stream_ids_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_OUTGOING_SSN_RESET_REQUEST_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter_test.cc b/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter_test.cc new file mode 100644 index 0000000000..dae73c2fba --- /dev/null +++ b/net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter_test.cc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(OutgoingSSNResetRequestParameterTest, SerializeAndDeserialize) { + OutgoingSSNResetRequestParameter parameter( + ReconfigRequestSN(1), ReconfigRequestSN(2), TSN(3), + {StreamID(4), StreamID(5), StreamID(6)}); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter deserialized, + OutgoingSSNResetRequestParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.request_sequence_number(), 1u); + EXPECT_EQ(*deserialized.response_sequence_number(), 2u); + EXPECT_EQ(*deserialized.sender_last_assigned_tsn(), 3u); + EXPECT_THAT(deserialized.stream_ids(), + ElementsAre(StreamID(4), StreamID(5), StreamID(6))); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/parameter.cc b/net/dcsctp/packet/parameter/parameter.cc new file mode 100644 index 0000000000..b3b2bffef7 --- /dev/null +++ b/net/dcsctp/packet/parameter/parameter.cc @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/parameter.h" + +#include + +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h" +#include "net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h" +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" +#include "net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/state_cookie_parameter.h" +#include "net/dcsctp/packet/parameter/supported_extensions_parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +constexpr size_t kParameterHeaderSize = 4; + +Parameters::Builder& Parameters::Builder::Add(const Parameter& p) { + // https://tools.ietf.org/html/rfc4960#section-3.2.1 + // "If the length of the parameter is not a multiple of 4 bytes, the sender + // pads the parameter at the end (i.e., after the Parameter Value field) with + // all zero bytes." + if (data_.size() % 4 != 0) { + data_.resize(RoundUpTo4(data_.size())); + } + + p.SerializeTo(data_); + return *this; +} + +std::vector Parameters::descriptors() const { + rtc::ArrayView span(data_); + std::vector result; + while (!span.empty()) { + BoundedByteReader header(span); + uint16_t type = header.Load16<0>(); + uint16_t length = header.Load16<2>(); + result.emplace_back(type, span.subview(0, length)); + size_t length_with_padding = RoundUpTo4(length); + if (length_with_padding > span.size()) { + break; + } + span = span.subview(length_with_padding); + } + return result; +} + +absl::optional Parameters::Parse( + rtc::ArrayView data) { + // Validate the parameter descriptors + rtc::ArrayView span(data); + while (!span.empty()) { + if (span.size() < kParameterHeaderSize) { + RTC_DLOG(LS_WARNING) << "Insufficient parameter length"; + return absl::nullopt; + } + BoundedByteReader header(span); + uint16_t length = header.Load16<2>(); + if (length < kParameterHeaderSize || length > span.size()) { + RTC_DLOG(LS_WARNING) << "Invalid parameter length field"; + return absl::nullopt; + } + size_t length_with_padding = RoundUpTo4(length); + if (length_with_padding > span.size()) { + break; + } + span = span.subview(length_with_padding); + } + return Parameters(std::vector(data.begin(), data.end())); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/parameter.h b/net/dcsctp/packet/parameter/parameter.h new file mode 100644 index 0000000000..e8fa67c8f7 --- /dev/null +++ b/net/dcsctp/packet/parameter/parameter.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_PARAMETER_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +class Parameter { + public: + Parameter() {} + virtual ~Parameter() = default; + + Parameter(const Parameter& other) = default; + Parameter& operator=(const Parameter& other) = default; + + virtual void SerializeTo(std::vector& out) const = 0; + virtual std::string ToString() const = 0; +}; + +struct ParameterDescriptor { + ParameterDescriptor(uint16_t type, rtc::ArrayView data) + : type(type), data(data) {} + uint16_t type; + rtc::ArrayView data; +}; + +class Parameters { + public: + class Builder { + public: + Builder() {} + Builder& Add(const Parameter& p); + Parameters Build() { return Parameters(std::move(data_)); } + + private: + std::vector data_; + }; + + static absl::optional Parse(rtc::ArrayView data); + + Parameters() {} + Parameters(Parameters&& other) = default; + Parameters& operator=(Parameters&& other) = default; + + rtc::ArrayView data() const { return data_; } + std::vector descriptors() const; + + template + absl::optional

get() const { + static_assert(std::is_base_of::value, + "Template parameter not derived from Parameter"); + for (const auto& p : descriptors()) { + if (p.type == P::kType) { + return P::Parse(p.data); + } + } + return absl::nullopt; + } + + private: + explicit Parameters(std::vector data) : data_(std::move(data)) {} + std::vector data_; +}; + +struct ParameterConfig { + static constexpr int kTypeSizeInBytes = 2; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/parameter_test.cc b/net/dcsctp/packet/parameter/parameter_test.cc new file mode 100644 index 0000000000..467e324592 --- /dev/null +++ b/net/dcsctp/packet/parameter/parameter_test.cc @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/parameter.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::SizeIs; + +TEST(ParameterTest, SerializeDeserializeParameter) { + Parameters parameters = + Parameters::Builder() + .Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(123), + ReconfigRequestSN(456), + TSN(789), {StreamID(42)})) + .Build(); + + rtc::ArrayView serialized = parameters.data(); + + ASSERT_HAS_VALUE_AND_ASSIGN(Parameters parsed, Parameters::Parse(serialized)); + auto descriptors = parsed.descriptors(); + ASSERT_THAT(descriptors, SizeIs(1)); + EXPECT_THAT(descriptors[0].type, OutgoingSSNResetRequestParameter::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter parsed_param, + OutgoingSSNResetRequestParameter::Parse(descriptors[0].data)); + EXPECT_EQ(*parsed_param.request_sequence_number(), 123u); + EXPECT_EQ(*parsed_param.response_sequence_number(), 456u); + EXPECT_EQ(*parsed_param.sender_last_assigned_tsn(), 789u); + EXPECT_THAT(parsed_param.stream_ids(), ElementsAre(StreamID(42))); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/reconfiguration_response_parameter.cc b/net/dcsctp/packet/parameter/reconfiguration_response_parameter.cc new file mode 100644 index 0000000000..fafb204acc --- /dev/null +++ b/net/dcsctp/packet/parameter/reconfiguration_response_parameter.cc @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" + +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.4 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 16 | Parameter Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Response Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Result | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Sender's Next TSN (optional) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Receiver's Next TSN (optional) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int ReconfigurationResponseParameter::kType; + +absl::string_view ToString(ReconfigurationResponseParameter::Result result) { + switch (result) { + case ReconfigurationResponseParameter::Result::kSuccessNothingToDo: + return "Success: nothing to do"; + case ReconfigurationResponseParameter::Result::kSuccessPerformed: + return "Success: performed"; + case ReconfigurationResponseParameter::Result::kDenied: + return "Denied"; + case ReconfigurationResponseParameter::Result::kErrorWrongSSN: + return "Error: wrong ssn"; + case ReconfigurationResponseParameter::Result:: + kErrorRequestAlreadyInProgress: + return "Error: request already in progress"; + case ReconfigurationResponseParameter::Result::kErrorBadSequenceNumber: + return "Error: bad sequence number"; + case ReconfigurationResponseParameter::Result::kInProgress: + return "In progress"; + } +} + +absl::optional +ReconfigurationResponseParameter::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + ReconfigRequestSN response_sequence_number(reader->Load32<4>()); + Result result; + uint32_t result_nbr = reader->Load32<8>(); + switch (result_nbr) { + case 0: + result = ReconfigurationResponseParameter::Result::kSuccessNothingToDo; + break; + case 1: + result = ReconfigurationResponseParameter::Result::kSuccessPerformed; + break; + case 2: + result = ReconfigurationResponseParameter::Result::kDenied; + break; + case 3: + result = ReconfigurationResponseParameter::Result::kErrorWrongSSN; + break; + case 4: + result = ReconfigurationResponseParameter::Result:: + kErrorRequestAlreadyInProgress; + break; + case 5: + result = + ReconfigurationResponseParameter::Result::kErrorBadSequenceNumber; + break; + case 6: + result = ReconfigurationResponseParameter::Result::kInProgress; + break; + default: + RTC_DLOG(LS_WARNING) << "Invalid reconfig response result: " + << result_nbr; + return absl::nullopt; + } + + if (reader->variable_data().empty()) { + return ReconfigurationResponseParameter(response_sequence_number, result); + } else if (reader->variable_data_size() != kNextTsnHeaderSize) { + RTC_DLOG(LS_WARNING) << "Invalid parameter size"; + return absl::nullopt; + } + + BoundedByteReader sub_reader = + reader->sub_reader(0); + + TSN sender_next_tsn(sub_reader.Load32<0>()); + TSN receiver_next_tsn(sub_reader.Load32<4>()); + + return ReconfigurationResponseParameter(response_sequence_number, result, + sender_next_tsn, receiver_next_tsn); +} + +void ReconfigurationResponseParameter::SerializeTo( + std::vector& out) const { + size_t variable_size = + (sender_next_tsn().has_value() ? kNextTsnHeaderSize : 0); + BoundedByteWriter writer = AllocateTLV(out, variable_size); + + writer.Store32<4>(*response_sequence_number_); + uint32_t result_nbr = + static_cast::type>(result_); + writer.Store32<8>(result_nbr); + + if (sender_next_tsn().has_value()) { + BoundedByteWriter sub_writer = + writer.sub_writer(0); + + sub_writer.Store32<0>(sender_next_tsn_.has_value() ? **sender_next_tsn_ + : 0); + sub_writer.Store32<4>(receiver_next_tsn_.has_value() ? **receiver_next_tsn_ + : 0); + } +} + +std::string ReconfigurationResponseParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Re-configuration Response, resp_seq_nbr=" + << *response_sequence_number(); + return sb.Release(); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/reconfiguration_response_parameter.h b/net/dcsctp/packet/parameter/reconfiguration_response_parameter.h new file mode 100644 index 0000000000..c5a68acb33 --- /dev/null +++ b/net/dcsctp/packet/parameter/reconfiguration_response_parameter.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_RECONFIGURATION_RESPONSE_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_RECONFIGURATION_RESPONSE_PARAMETER_H_ +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.4 +struct ReconfigurationResponseParameterConfig : ParameterConfig { + static constexpr int kType = 16; + static constexpr size_t kHeaderSize = 12; + static constexpr size_t kVariableLengthAlignment = 4; +}; + +class ReconfigurationResponseParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = ReconfigurationResponseParameterConfig::kType; + + enum class Result { + kSuccessNothingToDo = 0, + kSuccessPerformed = 1, + kDenied = 2, + kErrorWrongSSN = 3, + kErrorRequestAlreadyInProgress = 4, + kErrorBadSequenceNumber = 5, + kInProgress = 6, + }; + + ReconfigurationResponseParameter(ReconfigRequestSN response_sequence_number, + Result result) + : response_sequence_number_(response_sequence_number), + result_(result), + sender_next_tsn_(absl::nullopt), + receiver_next_tsn_(absl::nullopt) {} + + explicit ReconfigurationResponseParameter( + ReconfigRequestSN response_sequence_number, + Result result, + TSN sender_next_tsn, + TSN receiver_next_tsn) + : response_sequence_number_(response_sequence_number), + result_(result), + sender_next_tsn_(sender_next_tsn), + receiver_next_tsn_(receiver_next_tsn) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + ReconfigRequestSN response_sequence_number() const { + return response_sequence_number_; + } + Result result() const { return result_; } + absl::optional sender_next_tsn() const { return sender_next_tsn_; } + absl::optional receiver_next_tsn() const { return receiver_next_tsn_; } + + private: + static constexpr size_t kNextTsnHeaderSize = 8; + ReconfigRequestSN response_sequence_number_; + Result result_; + absl::optional sender_next_tsn_; + absl::optional receiver_next_tsn_; +}; + +absl::string_view ToString(ReconfigurationResponseParameter::Result result); + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_RECONFIGURATION_RESPONSE_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/reconfiguration_response_parameter_test.cc b/net/dcsctp/packet/parameter/reconfiguration_response_parameter_test.cc new file mode 100644 index 0000000000..8125d93cd0 --- /dev/null +++ b/net/dcsctp/packet/parameter/reconfiguration_response_parameter_test.cc @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" + +#include + +#include +#include + +#include "absl/types/optional.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(ReconfigurationResponseParameterTest, SerializeAndDeserializeFirstForm) { + ReconfigurationResponseParameter parameter( + ReconfigRequestSN(1), + ReconfigurationResponseParameter::Result::kSuccessPerformed); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + ReconfigurationResponseParameter deserialized, + ReconfigurationResponseParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.response_sequence_number(), 1u); + EXPECT_EQ(deserialized.result(), + ReconfigurationResponseParameter::Result::kSuccessPerformed); + EXPECT_EQ(deserialized.sender_next_tsn(), absl::nullopt); + EXPECT_EQ(deserialized.receiver_next_tsn(), absl::nullopt); +} + +TEST(ReconfigurationResponseParameterTest, + SerializeAndDeserializeFirstFormSecondForm) { + ReconfigurationResponseParameter parameter( + ReconfigRequestSN(1), + ReconfigurationResponseParameter::Result::kSuccessPerformed, TSN(2), + TSN(3)); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN( + ReconfigurationResponseParameter deserialized, + ReconfigurationResponseParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.response_sequence_number(), 1u); + EXPECT_EQ(deserialized.result(), + ReconfigurationResponseParameter::Result::kSuccessPerformed); + EXPECT_TRUE(deserialized.sender_next_tsn().has_value()); + EXPECT_EQ(**deserialized.sender_next_tsn(), 2u); + EXPECT_TRUE(deserialized.receiver_next_tsn().has_value()); + EXPECT_EQ(**deserialized.receiver_next_tsn(), 3u); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.cc b/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.cc new file mode 100644 index 0000000000..d656e0db8f --- /dev/null +++ b/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.cc @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.3 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 15 | Parameter Length = 8 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Re-configuration Request Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int SSNTSNResetRequestParameter::kType; + +absl::optional SSNTSNResetRequestParameter::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + ReconfigRequestSN request_sequence_number(reader->Load32<4>()); + + return SSNTSNResetRequestParameter(request_sequence_number); +} + +void SSNTSNResetRequestParameter::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out); + writer.Store32<4>(*request_sequence_number_); +} + +std::string SSNTSNResetRequestParameter::ToString() const { + rtc::StringBuilder sb; + sb << "SSN/TSN Reset Request, req_seq_nbr=" << *request_sequence_number(); + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h b/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h new file mode 100644 index 0000000000..e31d7ebe8f --- /dev/null +++ b/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_SSN_TSN_RESET_REQUEST_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_SSN_TSN_RESET_REQUEST_PARAMETER_H_ +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc6525#section-4.3 +struct SSNTSNResetRequestParameterConfig : ParameterConfig { + static constexpr int kType = 15; + static constexpr size_t kHeaderSize = 8; + static constexpr size_t kVariableLengthAlignment = 0; +}; + +class SSNTSNResetRequestParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = SSNTSNResetRequestParameterConfig::kType; + + explicit SSNTSNResetRequestParameter( + ReconfigRequestSN request_sequence_number) + : request_sequence_number_(request_sequence_number) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + ReconfigRequestSN request_sequence_number() const { + return request_sequence_number_; + } + + private: + ReconfigRequestSN request_sequence_number_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_SSN_TSN_RESET_REQUEST_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter_test.cc b/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter_test.cc new file mode 100644 index 0000000000..eeb973cbcb --- /dev/null +++ b/net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter_test.cc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h" + +#include + +#include +#include + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(SSNTSNResetRequestParameterTest, SerializeAndDeserialize) { + SSNTSNResetRequestParameter parameter(ReconfigRequestSN(1)); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(SSNTSNResetRequestParameter deserialized, + SSNTSNResetRequestParameter::Parse(serialized)); + + EXPECT_EQ(*deserialized.request_sequence_number(), 1u); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/state_cookie_parameter.cc b/net/dcsctp/packet/parameter/state_cookie_parameter.cc new file mode 100644 index 0000000000..9777aa6667 --- /dev/null +++ b/net/dcsctp/packet/parameter/state_cookie_parameter.cc @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/state_cookie_parameter.h" + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.3.1 + +constexpr int StateCookieParameter::kType; + +absl::optional StateCookieParameter::Parse( + rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + return StateCookieParameter(reader->variable_data()); +} + +void StateCookieParameter::SerializeTo(std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out, data_.size()); + writer.CopyToVariableData(data_); +} + +std::string StateCookieParameter::ToString() const { + rtc::StringBuilder sb; + sb << "State Cookie parameter (cookie_length=" << data_.size() << ")"; + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/state_cookie_parameter.h b/net/dcsctp/packet/parameter/state_cookie_parameter.h new file mode 100644 index 0000000000..f4355495e2 --- /dev/null +++ b/net/dcsctp/packet/parameter/state_cookie_parameter.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_STATE_COOKIE_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_STATE_COOKIE_PARAMETER_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc4960#section-3.3.3.1 +struct StateCookieParameterConfig : ParameterConfig { + static constexpr int kType = 7; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class StateCookieParameter : public Parameter, + public TLVTrait { + public: + static constexpr int kType = StateCookieParameterConfig::kType; + + explicit StateCookieParameter(rtc::ArrayView data) + : data_(data.begin(), data.end()) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + rtc::ArrayView data() const { return data_; } + + private: + std::vector data_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_STATE_COOKIE_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/state_cookie_parameter_test.cc b/net/dcsctp/packet/parameter/state_cookie_parameter_test.cc new file mode 100644 index 0000000000..bcca38b586 --- /dev/null +++ b/net/dcsctp/packet/parameter/state_cookie_parameter_test.cc @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/state_cookie_parameter.h" + +#include + +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(StateCookieParameterTest, SerializeAndDeserialize) { + uint8_t cookie[] = {1, 2, 3}; + StateCookieParameter parameter(cookie); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(StateCookieParameter deserialized, + StateCookieParameter::Parse(serialized)); + + EXPECT_THAT(deserialized.data(), ElementsAre(1, 2, 3)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/supported_extensions_parameter.cc b/net/dcsctp/packet/parameter/supported_extensions_parameter.cc new file mode 100644 index 0000000000..6a8fb214de --- /dev/null +++ b/net/dcsctp/packet/parameter/supported_extensions_parameter.cc @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/supported_extensions_parameter.h" + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/str_join.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc5061#section-4.2.7 + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Parameter Type = 0x8008 | Parameter Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | CHUNK TYPE 1 | CHUNK TYPE 2 | CHUNK TYPE 3 | CHUNK TYPE 4 | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | .... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | CHUNK TYPE N | PAD | PAD | PAD | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +constexpr int SupportedExtensionsParameter::kType; + +absl::optional +SupportedExtensionsParameter::Parse(rtc::ArrayView data) { + absl::optional> reader = ParseTLV(data); + if (!reader.has_value()) { + return absl::nullopt; + } + + std::vector chunk_types(reader->variable_data().begin(), + reader->variable_data().end()); + return SupportedExtensionsParameter(std::move(chunk_types)); +} + +void SupportedExtensionsParameter::SerializeTo( + std::vector& out) const { + BoundedByteWriter writer = AllocateTLV(out, chunk_types_.size()); + writer.CopyToVariableData(chunk_types_); +} + +std::string SupportedExtensionsParameter::ToString() const { + rtc::StringBuilder sb; + sb << "Supported Extensions (" << StrJoin(chunk_types_, ", ") << ")"; + return sb.Release(); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/parameter/supported_extensions_parameter.h b/net/dcsctp/packet/parameter/supported_extensions_parameter.h new file mode 100644 index 0000000000..5689fd8035 --- /dev/null +++ b/net/dcsctp/packet/parameter/supported_extensions_parameter.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_PARAMETER_SUPPORTED_EXTENSIONS_PARAMETER_H_ +#define NET_DCSCTP_PACKET_PARAMETER_SUPPORTED_EXTENSIONS_PARAMETER_H_ +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" + +namespace dcsctp { + +// https://tools.ietf.org/html/rfc5061#section-4.2.7 +struct SupportedExtensionsParameterConfig : ParameterConfig { + static constexpr int kType = 0x8008; + static constexpr size_t kHeaderSize = 4; + static constexpr size_t kVariableLengthAlignment = 1; +}; + +class SupportedExtensionsParameter + : public Parameter, + public TLVTrait { + public: + static constexpr int kType = SupportedExtensionsParameterConfig::kType; + + explicit SupportedExtensionsParameter(std::vector chunk_types) + : chunk_types_(std::move(chunk_types)) {} + + static absl::optional Parse( + rtc::ArrayView data); + + void SerializeTo(std::vector& out) const override; + std::string ToString() const override; + + bool supports(uint8_t chunk_type) const { + return std::find(chunk_types_.begin(), chunk_types_.end(), chunk_type) != + chunk_types_.end(); + } + + rtc::ArrayView chunk_types() const { return chunk_types_; } + + private: + std::vector chunk_types_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_PARAMETER_SUPPORTED_EXTENSIONS_PARAMETER_H_ diff --git a/net/dcsctp/packet/parameter/supported_extensions_parameter_test.cc b/net/dcsctp/packet/parameter/supported_extensions_parameter_test.cc new file mode 100644 index 0000000000..c870af2e70 --- /dev/null +++ b/net/dcsctp/packet/parameter/supported_extensions_parameter_test.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/parameter/supported_extensions_parameter.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +TEST(SupportedExtensionsParameterTest, SerializeAndDeserialize) { + SupportedExtensionsParameter parameter({1, 2, 3}); + + std::vector serialized; + parameter.SerializeTo(serialized); + + ASSERT_HAS_VALUE_AND_ASSIGN(SupportedExtensionsParameter deserialized, + SupportedExtensionsParameter::Parse(serialized)); + + EXPECT_THAT(deserialized.chunk_types(), ElementsAre(1, 2, 3)); + EXPECT_TRUE(deserialized.supports(1)); + EXPECT_TRUE(deserialized.supports(2)); + EXPECT_TRUE(deserialized.supports(3)); + EXPECT_FALSE(deserialized.supports(4)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/sctp_packet.cc b/net/dcsctp/packet/sctp_packet.cc new file mode 100644 index 0000000000..3e419c5978 --- /dev/null +++ b/net/dcsctp/packet/sctp_packet.cc @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/sctp_packet.h" + +#include + +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/crc32c.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_format.h" + +namespace dcsctp { +namespace { +constexpr size_t kMaxUdpPacketSize = 65535; +constexpr size_t kChunkTlvHeaderSize = 4; +constexpr size_t kExpectedDescriptorCount = 4; +} // namespace + +/* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Source Port Number | Destination Port Number | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Verification Tag | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Checksum | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ + +SctpPacket::Builder::Builder(VerificationTag verification_tag, + const DcSctpOptions& options) + : verification_tag_(verification_tag), + source_port_(options.local_port), + dest_port_(options.remote_port), + max_packet_size_(RoundDownTo4(options.mtu)) {} + +SctpPacket::Builder& SctpPacket::Builder::Add(const Chunk& chunk) { + if (out_.empty()) { + out_.reserve(max_packet_size_); + out_.resize(SctpPacket::kHeaderSize); + BoundedByteWriter buffer(out_); + buffer.Store16<0>(source_port_); + buffer.Store16<2>(dest_port_); + buffer.Store32<4>(*verification_tag_); + // Checksum is at offset 8 - written when calling Build(); + } + RTC_DCHECK(IsDivisibleBy4(out_.size())); + + chunk.SerializeTo(out_); + if (out_.size() % 4 != 0) { + out_.resize(RoundUpTo4(out_.size())); + } + + RTC_DCHECK(out_.size() <= max_packet_size_) + << "Exceeded max size, data=" << out_.size() + << ", max_size=" << max_packet_size_; + return *this; +} + +size_t SctpPacket::Builder::bytes_remaining() const { + if (out_.empty()) { + // The packet header (CommonHeader) hasn't been written yet: + return max_packet_size_ - kHeaderSize; + } else if (out_.size() > max_packet_size_) { + RTC_NOTREACHED() << "Exceeded max size, data=" << out_.size() + << ", max_size=" << max_packet_size_; + return 0; + } + return max_packet_size_ - out_.size(); +} + +std::vector SctpPacket::Builder::Build() { + std::vector out; + out_.swap(out); + + if (!out.empty()) { + uint32_t crc = GenerateCrc32C(out); + BoundedByteWriter(out).Store32<8>(crc); + } + + RTC_DCHECK(out.size() <= max_packet_size_) + << "Exceeded max size, data=" << out.size() + << ", max_size=" << max_packet_size_; + + return out; +} + +absl::optional SctpPacket::Parse( + rtc::ArrayView data, + bool disable_checksum_verification) { + if (data.size() < kHeaderSize + kChunkTlvHeaderSize || + data.size() > kMaxUdpPacketSize) { + RTC_DLOG(LS_WARNING) << "Invalid packet size"; + return absl::nullopt; + } + + BoundedByteReader reader(data); + + CommonHeader common_header; + common_header.source_port = reader.Load16<0>(); + common_header.destination_port = reader.Load16<2>(); + common_header.verification_tag = VerificationTag(reader.Load32<4>()); + common_header.checksum = reader.Load32<8>(); + + // Create a copy of the packet, which will be held by this object. + std::vector data_copy = + std::vector(data.begin(), data.end()); + + // Verify the checksum. The checksum field must be zero when that's done. + BoundedByteWriter(data_copy).Store32<8>(0); + uint32_t calculated_checksum = GenerateCrc32C(data_copy); + if (!disable_checksum_verification && + calculated_checksum != common_header.checksum) { + RTC_DLOG(LS_WARNING) << rtc::StringFormat( + "Invalid packet checksum, packet_checksum=0x%08x, " + "calculated_checksum=0x%08x", + common_header.checksum, calculated_checksum); + return absl::nullopt; + } + // Restore the checksum in the header. + BoundedByteWriter(data_copy).Store32<8>(common_header.checksum); + + // Validate and parse the chunk headers in the message. + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Chunk Type | Chunk Flags | Chunk Length | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + std::vector descriptors; + descriptors.reserve(kExpectedDescriptorCount); + rtc::ArrayView descriptor_data = + rtc::ArrayView(data_copy).subview(kHeaderSize); + while (!descriptor_data.empty()) { + if (descriptor_data.size() < kChunkTlvHeaderSize) { + RTC_DLOG(LS_WARNING) << "Too small chunk"; + return absl::nullopt; + } + BoundedByteReader chunk_header(descriptor_data); + uint8_t type = chunk_header.Load8<0>(); + uint8_t flags = chunk_header.Load8<1>(); + uint16_t length = chunk_header.Load16<2>(); + uint16_t padded_length = RoundUpTo4(length); + if (padded_length > descriptor_data.size()) { + RTC_DLOG(LS_WARNING) << "Too large chunk. length=" << length + << ", remaining=" << descriptor_data.size(); + return absl::nullopt; + } else if (padded_length < kChunkTlvHeaderSize) { + RTC_DLOG(LS_WARNING) << "Too small chunk. length=" << length; + return absl::nullopt; + } + descriptors.emplace_back(type, flags, + descriptor_data.subview(0, padded_length)); + descriptor_data = descriptor_data.subview(padded_length); + } + + // Note that iterators (and pointer) are guaranteed to be stable when moving a + // std::vector, and `descriptors` have pointers to within `data_copy`. + return SctpPacket(common_header, std::move(data_copy), + std::move(descriptors)); +} +} // namespace dcsctp diff --git a/net/dcsctp/packet/sctp_packet.h b/net/dcsctp/packet/sctp_packet.h new file mode 100644 index 0000000000..2600caf7a9 --- /dev/null +++ b/net/dcsctp/packet/sctp_packet.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_SCTP_PACKET_H_ +#define NET_DCSCTP_PACKET_SCTP_PACKET_H_ + +#include + +#include +#include +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/public/dcsctp_options.h" + +namespace dcsctp { + +// The "Common Header", which every SCTP packet starts with, and is described in +// https://tools.ietf.org/html/rfc4960#section-3.1. +struct CommonHeader { + uint16_t source_port; + uint16_t destination_port; + VerificationTag verification_tag; + uint32_t checksum; +}; + +// Represents an immutable (received or to-be-sent) SCTP packet. +class SctpPacket { + public: + static constexpr size_t kHeaderSize = 12; + + struct ChunkDescriptor { + ChunkDescriptor(uint8_t type, + uint8_t flags, + rtc::ArrayView data) + : type(type), flags(flags), data(data) {} + uint8_t type; + uint8_t flags; + rtc::ArrayView data; + }; + + SctpPacket(SctpPacket&& other) = default; + SctpPacket& operator=(SctpPacket&& other) = default; + SctpPacket(const SctpPacket&) = delete; + SctpPacket& operator=(const SctpPacket&) = delete; + + // Used for building SctpPacket, as those are immutable. + class Builder { + public: + Builder(VerificationTag verification_tag, const DcSctpOptions& options); + + Builder(Builder&& other) = default; + Builder& operator=(Builder&& other) = default; + + // Adds a chunk to the to-be-built SCTP packet. + Builder& Add(const Chunk& chunk); + + // The number of bytes remaining in the packet for chunk storage until the + // packet reaches its maximum size. + size_t bytes_remaining() const; + + // Indicates if any packets have been added to the builder. + bool empty() const { return out_.empty(); } + + // Returns the payload of the build SCTP packet. The Builder will be cleared + // after having called this function, and can be used to build a new packet. + std::vector Build(); + + private: + void WritePacketHeader(); + VerificationTag verification_tag_; + uint16_t source_port_; + uint16_t dest_port_; + // The maximum packet size is always even divisible by four, as chunks are + // always padded to a size even divisible by four. + size_t max_packet_size_; + std::vector out_; + }; + + // Parses `data` as an SCTP packet and returns it if it validates. + static absl::optional Parse( + rtc::ArrayView data, + bool disable_checksum_verification = false); + + // Returns the SCTP common header. + const CommonHeader& common_header() const { return common_header_; } + + // Returns the chunks (types and offsets) within the packet. + rtc::ArrayView descriptors() const { + return descriptors_; + } + + private: + SctpPacket(const CommonHeader& common_header, + std::vector data, + std::vector descriptors) + : common_header_(common_header), + data_(std::move(data)), + descriptors_(std::move(descriptors)) {} + + CommonHeader common_header_; + + // As the `descriptors_` refer to offset within data, and since SctpPacket is + // movable, `data` needs to be pointer stable, which it is according to + // http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2321 + std::vector data_; + // The chunks and their offsets within `data_ `. + std::vector descriptors_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_SCTP_PACKET_H_ diff --git a/net/dcsctp/packet/sctp_packet_test.cc b/net/dcsctp/packet/sctp_packet_test.cc new file mode 100644 index 0000000000..7438315eec --- /dev/null +++ b/net/dcsctp/packet/sctp_packet_test.cc @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/sctp_packet.h" + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/packet/chunk/abort_chunk.h" +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/init_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::SizeIs; + +constexpr VerificationTag kVerificationTag = VerificationTag(0x12345678); + +TEST(SctpPacketTest, DeserializeSimplePacketFromCapture) { + /* + Stream Control Transmission Protocol, Src Port: 5000 (5000), Dst Port: 5000 + (5000) Source port: 5000 Destination port: 5000 Verification tag: 0x00000000 + [Association index: 1] + Checksum: 0xaa019d33 [unverified] + [Checksum Status: Unverified] + INIT chunk (Outbound streams: 1000, inbound streams: 1000) + Chunk type: INIT (1) + Chunk flags: 0x00 + Chunk length: 90 + Initiate tag: 0x0eddca08 + Advertised receiver window credit (a_rwnd): 131072 + Number of outbound streams: 1000 + Number of inbound streams: 1000 + Initial TSN: 1426601527 + ECN parameter + Parameter type: ECN (0x8000) + Parameter length: 4 + Forward TSN supported parameter + Parameter type: Forward TSN supported (0xc000) + Parameter length: 4 + Supported Extensions parameter (Supported types: FORWARD_TSN, AUTH, + ASCONF, ASCONF_ACK, RE_CONFIG) Parameter type: Supported Extensions + (0x8008) Parameter length: 9 Supported chunk type: FORWARD_TSN (192) Supported + chunk type: AUTH (15) Supported chunk type: ASCONF (193) Supported chunk type: + ASCONF_ACK (128) Supported chunk type: RE_CONFIG (130) Parameter padding: + 000000 Random parameter Parameter type: Random (0x8002) Parameter length: 36 + Random number: c5a86155090e6f420050634cc8d6b908dfd53e17c99cb143… + Requested HMAC Algorithm parameter (Supported HMACs: SHA-1) + Parameter type: Requested HMAC Algorithm (0x8004) + Parameter length: 6 + HMAC identifier: SHA-1 (1) + Parameter padding: 0000 + Authenticated Chunk list parameter (Chunk types to be authenticated: + ASCONF_ACK, ASCONF) Parameter type: Authenticated Chunk list + (0x8003) Parameter length: 6 Chunk type: ASCONF_ACK (128) Chunk type: ASCONF + (193) Chunk padding: 0000 + */ + + uint8_t data[] = { + 0x13, 0x88, 0x13, 0x88, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x01, 0x9d, 0x33, + 0x01, 0x00, 0x00, 0x5a, 0x0e, 0xdd, 0xca, 0x08, 0x00, 0x02, 0x00, 0x00, + 0x03, 0xe8, 0x03, 0xe8, 0x55, 0x08, 0x36, 0x37, 0x80, 0x00, 0x00, 0x04, + 0xc0, 0x00, 0x00, 0x04, 0x80, 0x08, 0x00, 0x09, 0xc0, 0x0f, 0xc1, 0x80, + 0x82, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x24, 0xc5, 0xa8, 0x61, 0x55, + 0x09, 0x0e, 0x6f, 0x42, 0x00, 0x50, 0x63, 0x4c, 0xc8, 0xd6, 0xb9, 0x08, + 0xdf, 0xd5, 0x3e, 0x17, 0xc9, 0x9c, 0xb1, 0x43, 0x28, 0x4e, 0xaf, 0x64, + 0x68, 0x2a, 0xc2, 0x97, 0x80, 0x04, 0x00, 0x06, 0x00, 0x01, 0x00, 0x00, + 0x80, 0x03, 0x00, 0x06, 0x80, 0xc1, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(data)); + EXPECT_EQ(packet.common_header().source_port, 5000); + EXPECT_EQ(packet.common_header().destination_port, 5000); + EXPECT_EQ(packet.common_header().verification_tag, VerificationTag(0)); + EXPECT_EQ(packet.common_header().checksum, 0xaa019d33); + + EXPECT_THAT(packet.descriptors(), SizeIs(1)); + EXPECT_EQ(packet.descriptors()[0].type, InitChunk::kType); + ASSERT_HAS_VALUE_AND_ASSIGN(InitChunk init, + InitChunk::Parse(packet.descriptors()[0].data)); + EXPECT_EQ(init.initial_tsn(), TSN(1426601527)); +} + +TEST(SctpPacketTest, DeserializePacketWithTwoChunks) { + /* + Stream Control Transmission Protocol, Src Port: 1234 (1234), + Dst Port: 4321 (4321) + Source port: 1234 + Destination port: 4321 + Verification tag: 0x697e3a4e + [Association index: 3] + Checksum: 0xc06e8b36 [unverified] + [Checksum Status: Unverified] + COOKIE_ACK chunk + Chunk type: COOKIE_ACK (11) + Chunk flags: 0x00 + Chunk length: 4 + SACK chunk (Cumulative TSN: 2930332242, a_rwnd: 131072, + gaps: 0, duplicate TSNs: 0) + Chunk type: SACK (3) + Chunk flags: 0x00 + Chunk length: 16 + Cumulative TSN ACK: 2930332242 + Advertised receiver window credit (a_rwnd): 131072 + Number of gap acknowledgement blocks: 0 + Number of duplicated TSNs: 0 + */ + + uint8_t data[] = {0x04, 0xd2, 0x10, 0xe1, 0x69, 0x7e, 0x3a, 0x4e, + 0xc0, 0x6e, 0x8b, 0x36, 0x0b, 0x00, 0x00, 0x04, + 0x03, 0x00, 0x00, 0x10, 0xae, 0xa9, 0x52, 0x52, + 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(data)); + EXPECT_EQ(packet.common_header().source_port, 1234); + EXPECT_EQ(packet.common_header().destination_port, 4321); + EXPECT_EQ(packet.common_header().verification_tag, + VerificationTag(0x697e3a4eu)); + EXPECT_EQ(packet.common_header().checksum, 0xc06e8b36u); + + EXPECT_THAT(packet.descriptors(), SizeIs(2)); + EXPECT_EQ(packet.descriptors()[0].type, CookieAckChunk::kType); + EXPECT_EQ(packet.descriptors()[1].type, SackChunk::kType); + ASSERT_HAS_VALUE_AND_ASSIGN( + CookieAckChunk cookie_ack, + CookieAckChunk::Parse(packet.descriptors()[0].data)); + ASSERT_HAS_VALUE_AND_ASSIGN(SackChunk sack, + SackChunk::Parse(packet.descriptors()[1].data)); +} + +TEST(SctpPacketTest, DeserializePacketWithWrongChecksum) { + /* + Stream Control Transmission Protocol, Src Port: 5000 (5000), + Dst Port: 5000 (5000) + Source port: 5000 + Destination port: 5000 + Verification tag: 0x0eddca08 + [Association index: 1] + Checksum: 0x2a81f531 [unverified] + [Checksum Status: Unverified] + SACK chunk (Cumulative TSN: 1426601536, a_rwnd: 131072, + gaps: 0, duplicate TSNs: 0) + Chunk type: SACK (3) + Chunk flags: 0x00 + Chunk length: 16 + Cumulative TSN ACK: 1426601536 + Advertised receiver window credit (a_rwnd): 131072 + Number of gap acknowledgement blocks: 0 + Number of duplicated TSNs: 0 + */ + + uint8_t data[] = {0x13, 0x88, 0x13, 0x88, 0x0e, 0xdd, 0xca, 0x08, 0x2a, 0x81, + 0xf5, 0x31, 0x03, 0x00, 0x00, 0x10, 0x55, 0x08, 0x36, 0x40, + 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + EXPECT_FALSE(SctpPacket::Parse(data).has_value()); +} + +TEST(SctpPacketTest, DeserializePacketDontValidateChecksum) { + /* + Stream Control Transmission Protocol, Src Port: 5000 (5000), + Dst Port: 5000 (5000) + Source port: 5000 + Destination port: 5000 + Verification tag: 0x0eddca08 + [Association index: 1] + Checksum: 0x2a81f531 [unverified] + [Checksum Status: Unverified] + SACK chunk (Cumulative TSN: 1426601536, a_rwnd: 131072, + gaps: 0, duplicate TSNs: 0) + Chunk type: SACK (3) + Chunk flags: 0x00 + Chunk length: 16 + Cumulative TSN ACK: 1426601536 + Advertised receiver window credit (a_rwnd): 131072 + Number of gap acknowledgement blocks: 0 + Number of duplicated TSNs: 0 + */ + + uint8_t data[] = {0x13, 0x88, 0x13, 0x88, 0x0e, 0xdd, 0xca, 0x08, 0x2a, 0x81, + 0xf5, 0x31, 0x03, 0x00, 0x00, 0x10, 0x55, 0x08, 0x36, 0x40, + 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + ASSERT_HAS_VALUE_AND_ASSIGN( + SctpPacket packet, + SctpPacket::Parse(data, /*disable_checksum_verification=*/true)); + EXPECT_EQ(packet.common_header().source_port, 5000); + EXPECT_EQ(packet.common_header().destination_port, 5000); + EXPECT_EQ(packet.common_header().verification_tag, + VerificationTag(0x0eddca08u)); + EXPECT_EQ(packet.common_header().checksum, 0x2a81f531u); +} + +TEST(SctpPacketTest, SerializeAndDeserializeSingleChunk) { + SctpPacket::Builder b(kVerificationTag, {}); + InitChunk init(/*initiate_tag=*/VerificationTag(123), /*a_rwnd=*/456, + /*nbr_outbound_streams=*/65535, + /*nbr_inbound_streams=*/65534, /*initial_tsn=*/TSN(789), + /*parameters=*/Parameters()); + + b.Add(init); + std::vector serialized = b.Build(); + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(serialized)); + + EXPECT_EQ(packet.common_header().verification_tag, kVerificationTag); + + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + EXPECT_EQ(packet.descriptors()[0].type, InitChunk::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN(InitChunk deserialized, + InitChunk::Parse(packet.descriptors()[0].data)); + EXPECT_EQ(deserialized.initiate_tag(), VerificationTag(123)); + EXPECT_EQ(deserialized.a_rwnd(), 456u); + EXPECT_EQ(deserialized.nbr_outbound_streams(), 65535u); + EXPECT_EQ(deserialized.nbr_inbound_streams(), 65534u); + EXPECT_EQ(deserialized.initial_tsn(), TSN(789)); +} + +TEST(SctpPacketTest, SerializeAndDeserializeThreeChunks) { + SctpPacket::Builder b(kVerificationTag, {}); + b.Add(SackChunk(/*cumulative_tsn_ack=*/TSN(999), /*a_rwnd=*/456, + {SackChunk::GapAckBlock(2, 3)}, + /*duplicate_tsns=*/{TSN(1), TSN(2), TSN(3)})); + b.Add(DataChunk(TSN(123), StreamID(456), SSN(789), PPID(9090), + /*payload=*/{1, 2, 3, 4, 5}, + /*options=*/{})); + b.Add(DataChunk(TSN(124), StreamID(654), SSN(987), PPID(909), + /*payload=*/{5, 4, 3, 3, 1}, + /*options=*/{})); + + std::vector serialized = b.Build(); + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(serialized)); + + EXPECT_EQ(packet.common_header().verification_tag, kVerificationTag); + + ASSERT_THAT(packet.descriptors(), SizeIs(3)); + EXPECT_EQ(packet.descriptors()[0].type, SackChunk::kType); + EXPECT_EQ(packet.descriptors()[1].type, DataChunk::kType); + EXPECT_EQ(packet.descriptors()[2].type, DataChunk::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN(SackChunk sack, + SackChunk::Parse(packet.descriptors()[0].data)); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(999)); + EXPECT_EQ(sack.a_rwnd(), 456u); + + ASSERT_HAS_VALUE_AND_ASSIGN(DataChunk data1, + DataChunk::Parse(packet.descriptors()[1].data)); + EXPECT_EQ(data1.tsn(), TSN(123)); + + ASSERT_HAS_VALUE_AND_ASSIGN(DataChunk data2, + DataChunk::Parse(packet.descriptors()[2].data)); + EXPECT_EQ(data2.tsn(), TSN(124)); +} + +TEST(SctpPacketTest, ParseAbortWithEmptyCause) { + SctpPacket::Builder b(kVerificationTag, {}); + b.Add(AbortChunk( + /*filled_in_verification_tag=*/true, + Parameters::Builder().Add(UserInitiatedAbortCause("")).Build())); + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(b.Build())); + + EXPECT_EQ(packet.common_header().verification_tag, kVerificationTag); + + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + EXPECT_EQ(packet.descriptors()[0].type, AbortChunk::kType); + + ASSERT_HAS_VALUE_AND_ASSIGN(AbortChunk abort, + AbortChunk::Parse(packet.descriptors()[0].data)); + ASSERT_HAS_VALUE_AND_ASSIGN( + UserInitiatedAbortCause cause, + abort.error_causes().get()); + EXPECT_EQ(cause.upper_layer_abort_reason(), ""); +} + +TEST(SctpPacketTest, DetectPacketWithZeroSizeChunk) { + uint8_t data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x0a, 0x0a, 0x5c, + 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x00}; + + EXPECT_FALSE(SctpPacket::Parse(data, true).has_value()); +} + +TEST(SctpPacketTest, ReturnsCorrectSpaceAvailableToStayWithinMTU) { + DcSctpOptions options; + options.mtu = 1191; + + SctpPacket::Builder builder(VerificationTag(123), options); + + // Chunks will be padded to an even 4 bytes, so the maximum packet size should + // be rounded down. + const size_t kMaxPacketSize = RoundDownTo4(options.mtu); + EXPECT_EQ(kMaxPacketSize, 1188u); + + const size_t kSctpHeaderSize = 12; + EXPECT_EQ(builder.bytes_remaining(), kMaxPacketSize - kSctpHeaderSize); + EXPECT_EQ(builder.bytes_remaining(), 1176u); + + // Add a smaller packet first. + DataChunk::Options data_options; + + std::vector payload1(183); + builder.Add( + DataChunk(TSN(1), StreamID(1), SSN(0), PPID(53), payload1, data_options)); + + size_t chunk1_size = RoundUpTo4(DataChunk::kHeaderSize + payload1.size()); + EXPECT_EQ(builder.bytes_remaining(), + kMaxPacketSize - kSctpHeaderSize - chunk1_size); + EXPECT_EQ(builder.bytes_remaining(), 976u); // Hand-calculated. + + std::vector payload2(957); + builder.Add( + DataChunk(TSN(1), StreamID(1), SSN(0), PPID(53), payload2, data_options)); + + size_t chunk2_size = RoundUpTo4(DataChunk::kHeaderSize + payload2.size()); + EXPECT_EQ(builder.bytes_remaining(), + kMaxPacketSize - kSctpHeaderSize - chunk1_size - chunk2_size); + EXPECT_EQ(builder.bytes_remaining(), 0u); // Hand-calculated. +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/packet/tlv_trait.cc b/net/dcsctp/packet/tlv_trait.cc new file mode 100644 index 0000000000..493b6a4613 --- /dev/null +++ b/net/dcsctp/packet/tlv_trait.cc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/tlv_trait.h" + +#include "rtc_base/logging.h" + +namespace dcsctp { +namespace tlv_trait_impl { +void ReportInvalidSize(size_t actual_size, size_t expected_size) { + RTC_DLOG(LS_WARNING) << "Invalid size (" << actual_size + << ", expected minimum " << expected_size << " bytes)"; +} + +void ReportInvalidType(int actual_type, int expected_type) { + RTC_DLOG(LS_WARNING) << "Invalid type (" << actual_type << ", expected " + << expected_type << ")"; +} + +void ReportInvalidFixedLengthField(size_t value, size_t expected) { + RTC_DLOG(LS_WARNING) << "Invalid length field (" << value << ", expected " + << expected << " bytes)"; +} + +void ReportInvalidVariableLengthField(size_t value, size_t available) { + RTC_DLOG(LS_WARNING) << "Invalid length field (" << value << ", available " + << available << " bytes)"; +} + +void ReportInvalidPadding(size_t padding_bytes) { + RTC_DLOG(LS_WARNING) << "Invalid padding (" << padding_bytes << " bytes)"; +} + +void ReportInvalidLengthMultiple(size_t length, size_t alignment) { + RTC_DLOG(LS_WARNING) << "Invalid length field (" << length + << ", expected an even multiple of " << alignment + << " bytes)"; +} +} // namespace tlv_trait_impl +} // namespace dcsctp diff --git a/net/dcsctp/packet/tlv_trait.h b/net/dcsctp/packet/tlv_trait.h new file mode 100644 index 0000000000..a3c728efd7 --- /dev/null +++ b/net/dcsctp/packet/tlv_trait.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PACKET_TLV_TRAIT_H_ +#define NET_DCSCTP_PACKET_TLV_TRAIT_H_ + +#include +#include + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" + +namespace dcsctp { +namespace tlv_trait_impl { +// Logging functions, only to be used by TLVTrait, which is a templated class. +void ReportInvalidSize(size_t actual_size, size_t expected_size); +void ReportInvalidType(int actual_type, int expected_type); +void ReportInvalidFixedLengthField(size_t value, size_t expected); +void ReportInvalidVariableLengthField(size_t value, size_t available); +void ReportInvalidPadding(size_t padding_bytes); +void ReportInvalidLengthMultiple(size_t length, size_t alignment); +} // namespace tlv_trait_impl + +// Various entities in SCTP are padded data blocks, with a type and length +// field at fixed offsets, all stored in a 4-byte header. +// +// See e.g. https://tools.ietf.org/html/rfc4960#section-3.2 and +// https://tools.ietf.org/html/rfc4960#section-3.2.1 +// +// These are helper classes for writing and parsing that data, which in SCTP is +// called Type-Length-Value, or TLV. +// +// This templated class is configurable - a struct passed in as template +// parameter with the following expected members: +// * kType - The type field's value +// * kTypeSizeInBytes - The type field's width in bytes. +// Either 1 or 2. +// * kHeaderSize - The fixed size header +// * kVariableLengthAlignment - The size alignment on the variable data. Set +// to zero (0) if no variable data is used. +// +// This class is to be used as a trait +// (https://en.wikipedia.org/wiki/Trait_(computer_programming)) that adds a few +// public and protected members and which a class inherits from when it +// represents a type-length-value object. +template +class TLVTrait { + private: + static constexpr size_t kTlvHeaderSize = 4; + + protected: + static constexpr size_t kHeaderSize = Config::kHeaderSize; + + static_assert(Config::kTypeSizeInBytes == 1 || Config::kTypeSizeInBytes == 2, + "kTypeSizeInBytes must be 1 or 2"); + static_assert(Config::kHeaderSize >= kTlvHeaderSize, + "HeaderSize must be >= 4 bytes"); + static_assert((Config::kHeaderSize % 4 == 0), + "kHeaderSize must be an even multiple of 4 bytes"); + static_assert((Config::kVariableLengthAlignment == 0 || + Config::kVariableLengthAlignment == 1 || + Config::kVariableLengthAlignment == 2 || + Config::kVariableLengthAlignment == 4 || + Config::kVariableLengthAlignment == 8), + "kVariableLengthAlignment must be an allowed value"); + + // Validates the data with regards to size, alignment and type. + // If valid, returns a bounded buffer. + static absl::optional> ParseTLV( + rtc::ArrayView data) { + if (data.size() < Config::kHeaderSize) { + tlv_trait_impl::ReportInvalidSize(data.size(), Config::kHeaderSize); + return absl::nullopt; + } + BoundedByteReader tlv_header(data); + + const int type = (Config::kTypeSizeInBytes == 1) + ? tlv_header.template Load8<0>() + : tlv_header.template Load16<0>(); + + if (type != Config::kType) { + tlv_trait_impl::ReportInvalidType(type, Config::kType); + return absl::nullopt; + } + const uint16_t length = tlv_header.template Load16<2>(); + if (Config::kVariableLengthAlignment == 0) { + // Don't expect any variable length data at all. + if (length != Config::kHeaderSize || data.size() != Config::kHeaderSize) { + tlv_trait_impl::ReportInvalidFixedLengthField(length, + Config::kHeaderSize); + return absl::nullopt; + } + } else { + // Expect variable length data - verify its size alignment. + if (length > data.size() || length < Config::kHeaderSize) { + tlv_trait_impl::ReportInvalidVariableLengthField(length, data.size()); + return absl::nullopt; + } + const size_t padding = data.size() - length; + if (padding > 3) { + // https://tools.ietf.org/html/rfc4960#section-3.2 + // "This padding MUST NOT be more than 3 bytes in total" + tlv_trait_impl::ReportInvalidPadding(padding); + return absl::nullopt; + } + if (!ValidateLengthAlignment(length, Config::kVariableLengthAlignment)) { + tlv_trait_impl::ReportInvalidLengthMultiple( + length, Config::kVariableLengthAlignment); + return absl::nullopt; + } + } + return BoundedByteReader(data.subview(0, length)); + } + + // Allocates space for data with a static header size, as defined by + // `Config::kHeaderSize` and a variable footer, as defined by `variable_size` + // (which may be 0) and writes the type and length in the header. + static BoundedByteWriter AllocateTLV( + std::vector& out, + size_t variable_size = 0) { + const size_t offset = out.size(); + const size_t size = Config::kHeaderSize + variable_size; + out.resize(offset + size); + + BoundedByteWriter tlv_header( + rtc::ArrayView(out.data() + offset, kTlvHeaderSize)); + if (Config::kTypeSizeInBytes == 1) { + tlv_header.template Store8<0>(static_cast(Config::kType)); + } else { + tlv_header.template Store16<0>(Config::kType); + } + tlv_header.template Store16<2>(size); + + return BoundedByteWriter( + rtc::ArrayView(out.data() + offset, size)); + } + + private: + static bool ValidateLengthAlignment(uint16_t length, size_t alignment) { + // This is to avoid MSVC believing there could be a "mod by zero", when it + // certainly can't. + if (alignment == 0) { + return true; + } + return (length % alignment) == 0; + } +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PACKET_TLV_TRAIT_H_ diff --git a/net/dcsctp/packet/tlv_trait_test.cc b/net/dcsctp/packet/tlv_trait_test.cc new file mode 100644 index 0000000000..a0dd1a1136 --- /dev/null +++ b/net/dcsctp/packet/tlv_trait_test.cc @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/tlv_trait.h" + +#include + +#include "api/array_view.h" +#include "rtc_base/buffer.h" +#include "rtc_base/checks.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::SizeIs; + +struct OneByteTypeConfig { + static constexpr int kTypeSizeInBytes = 1; + static constexpr int kType = 0x49; + static constexpr size_t kHeaderSize = 12; + static constexpr int kVariableLengthAlignment = 4; +}; + +class OneByteChunk : public TLVTrait { + public: + static constexpr size_t kVariableSize = 4; + + void SerializeTo(std::vector& out) { + BoundedByteWriter writer = + AllocateTLV(out, kVariableSize); + writer.Store32<4>(0x01020304); + writer.Store16<8>(0x0506); + writer.Store16<10>(0x0708); + + uint8_t variable_data[kVariableSize] = {0xDE, 0xAD, 0xBE, 0xEF}; + writer.CopyToVariableData(rtc::ArrayView(variable_data)); + } + + static absl::optional> + Parse(rtc::ArrayView data) { + return ParseTLV(data); + } +}; + +TEST(TlvDataTest, CanWriteOneByteTypeTlvs) { + std::vector out; + OneByteChunk().SerializeTo(out); + + EXPECT_THAT(out, SizeIs(OneByteTypeConfig::kHeaderSize + + OneByteChunk::kVariableSize)); + EXPECT_THAT(out, ElementsAre(0x49, 0x00, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, 0xDE, 0xAD, 0xBE, 0xEF)); +} + +TEST(TlvDataTest, CanReadOneByteTypeTlvs) { + uint8_t data[] = {0x49, 0x00, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, 0xDE, 0xAD, 0xBE, 0xEF}; + + absl::optional> reader = + OneByteChunk::Parse(data); + ASSERT_TRUE(reader.has_value()); + EXPECT_EQ(reader->Load32<4>(), 0x01020304U); + EXPECT_EQ(reader->Load16<8>(), 0x0506U); + EXPECT_EQ(reader->Load16<10>(), 0x0708U); + EXPECT_THAT(reader->variable_data(), ElementsAre(0xDE, 0xAD, 0xBE, 0xEF)); +} + +struct TwoByteTypeConfig { + static constexpr int kTypeSizeInBytes = 2; + static constexpr int kType = 31337; + static constexpr size_t kHeaderSize = 8; + static constexpr int kVariableLengthAlignment = 2; +}; + +class TwoByteChunk : public TLVTrait { + public: + static constexpr size_t kVariableSize = 8; + + void SerializeTo(std::vector& out) { + BoundedByteWriter writer = + AllocateTLV(out, kVariableSize); + writer.Store32<4>(0x01020304U); + + uint8_t variable_data[] = {0x05, 0x06, 0x07, 0x08, 0xDE, 0xAD, 0xBE, 0xEF}; + writer.CopyToVariableData(rtc::ArrayView(variable_data)); + } + + static absl::optional> + Parse(rtc::ArrayView data) { + return ParseTLV(data); + } +}; + +TEST(TlvDataTest, CanWriteTwoByteTypeTlvs) { + std::vector out; + + TwoByteChunk().SerializeTo(out); + + EXPECT_THAT(out, SizeIs(TwoByteTypeConfig::kHeaderSize + + TwoByteChunk::kVariableSize)); + EXPECT_THAT(out, ElementsAre(0x7A, 0x69, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, 0xDE, 0xAD, 0xBE, 0xEF)); +} + +TEST(TlvDataTest, CanReadTwoByteTypeTlvs) { + uint8_t data[] = {0x7A, 0x69, 0x00, 0x10, 0x01, 0x02, 0x03, 0x04, + 0x05, 0x06, 0x07, 0x08, 0xDE, 0xAD, 0xBE, 0xEF}; + + absl::optional> reader = + TwoByteChunk::Parse(data); + EXPECT_TRUE(reader.has_value()); + EXPECT_EQ(reader->Load32<4>(), 0x01020304U); + EXPECT_THAT(reader->variable_data(), + ElementsAre(0x05, 0x06, 0x07, 0x08, 0xDE, 0xAD, 0xBE, 0xEF)); +} + +TEST(TlvDataTest, CanHandleInvalidLengthSmallerThanFixedSize) { + // Has 'length=6', which is below the kHeaderSize of 8. + uint8_t data[] = {0x7A, 0x69, 0x00, 0x06, 0x01, 0x02, 0x03, 0x04}; + + EXPECT_FALSE(TwoByteChunk::Parse(data).has_value()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/public/BUILD.gn b/net/dcsctp/public/BUILD.gn new file mode 100644 index 0000000000..ced94de151 --- /dev/null +++ b/net/dcsctp/public/BUILD.gn @@ -0,0 +1,103 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("strong_alias") { + sources = [ "strong_alias.h" ] +} + +rtc_source_set("types") { + deps = [ + ":strong_alias", + "../../../api:array_view", + ] + sources = [ + "dcsctp_message.h", + "dcsctp_options.h", + "types.h", + ] +} + +rtc_source_set("socket") { + deps = [ + ":types", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + ] + sources = [ + "dcsctp_socket.h", + "packet_observer.h", + "timeout.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_source_set("factory") { + deps = [ + ":socket", + ":types", + "../socket:dcsctp_socket", + ] + sources = [ + "dcsctp_socket_factory.cc", + "dcsctp_socket_factory.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_source_set("mocks") { + testonly = true + sources = [ "mock_dcsctp_socket.h" ] + deps = [ + ":socket", + "../../../test:test_support", + ] +} + +rtc_source_set("utils") { + deps = [ + ":socket", + ":types", + "../../../api:array_view", + "../../../rtc_base:logging", + "../../../rtc_base:stringutils", + "../socket:dcsctp_socket", + ] + sources = [ + "text_pcap_packet_observer.cc", + "text_pcap_packet_observer.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +if (rtc_include_tests) { + rtc_library("dcsctp_public_unittests") { + testonly = true + + deps = [ + ":mocks", + ":strong_alias", + ":types", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + ] + sources = [ + "mock_dcsctp_socket_test.cc", + "strong_alias_test.cc", + "types_test.cc", + ] + } +} diff --git a/net/dcsctp/public/dcsctp_message.h b/net/dcsctp/public/dcsctp_message.h new file mode 100644 index 0000000000..38e6763916 --- /dev/null +++ b/net/dcsctp/public/dcsctp_message.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_DCSCTP_MESSAGE_H_ +#define NET_DCSCTP_PUBLIC_DCSCTP_MESSAGE_H_ + +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// An SCTP message is a group of bytes sent and received as a whole on a +// specified stream identifier (`stream_id`), and with a payload protocol +// identifier (`ppid`). +class DcSctpMessage { + public: + DcSctpMessage(StreamID stream_id, PPID ppid, std::vector payload) + : stream_id_(stream_id), ppid_(ppid), payload_(std::move(payload)) {} + + DcSctpMessage(DcSctpMessage&& other) = default; + DcSctpMessage& operator=(DcSctpMessage&& other) = default; + DcSctpMessage(const DcSctpMessage&) = delete; + DcSctpMessage& operator=(const DcSctpMessage&) = delete; + + // The stream identifier to which the message is sent. + StreamID stream_id() const { return stream_id_; } + + // The payload protocol identifier (ppid) associated with the message. + PPID ppid() const { return ppid_; } + + // The payload of the message. + rtc::ArrayView payload() const { return payload_; } + + // When destructing the message, extracts the payload. + std::vector ReleasePayload() && { return std::move(payload_); } + + private: + StreamID stream_id_; + PPID ppid_; + std::vector payload_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_DCSCTP_MESSAGE_H_ diff --git a/net/dcsctp/public/dcsctp_options.h b/net/dcsctp/public/dcsctp_options.h new file mode 100644 index 0000000000..caefcff4f5 --- /dev/null +++ b/net/dcsctp/public/dcsctp_options.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_DCSCTP_OPTIONS_H_ +#define NET_DCSCTP_PUBLIC_DCSCTP_OPTIONS_H_ + +#include +#include + +#include "net/dcsctp/public/types.h" + +namespace dcsctp { +struct DcSctpOptions { + // The largest safe SCTP packet. Starting from the minimum guaranteed MTU + // value of 1280 for IPv6 (which may not support fragmentation), take off 85 + // bytes for DTLS/TURN/TCP/IP and ciphertext overhead. + // + // Additionally, it's possible that TURN adds an additional 4 bytes of + // overhead after a channel has been established, so an additional 4 bytes is + // subtracted + // + // 1280 IPV6 MTU + // -40 IPV6 header + // -8 UDP + // -24 GCM Cipher + // -13 DTLS record header + // -4 TURN ChannelData + // = 1191 bytes. + static constexpr size_t kMaxSafeMTUSize = 1191; + + // The local port for which the socket is supposed to be bound to. Incoming + // packets will be verified that they are sent to this port number and all + // outgoing packets will have this port number as source port. + int local_port = 5000; + + // The remote port to send packets to. All outgoing packets will have this + // port number as destination port. + int remote_port = 5000; + + // The announced maximum number of incoming streams. Note that this value is + // constant and can't be currently increased in run-time as "Add Incoming + // Streams Request" in RFC6525 isn't supported. + // + // The socket implementation doesn't have any per-stream fixed costs, which is + // why the default value is set to be the maximum value. + uint16_t announced_maximum_incoming_streams = 65535; + + // The announced maximum number of outgoing streams. Note that this value is + // constant and can't be currently increased in run-time as "Add Outgoing + // Streams Request" in RFC6525 isn't supported. + // + // The socket implementation doesn't have any per-stream fixed costs, which is + // why the default value is set to be the maximum value. + uint16_t announced_maximum_outgoing_streams = 65535; + + // Maximum SCTP packet size. The library will limit the size of generated + // packets to be less than or equal to this number. This does not include any + // overhead of DTLS, TURN, UDP or IP headers. + size_t mtu = kMaxSafeMTUSize; + + // The largest allowed message payload to be sent. Messages will be rejected + // if their payload is larger than this value. Note that this doesn't affect + // incoming messages, which may larger than this value (but smaller than + // `max_receiver_window_buffer_size`). + size_t max_message_size = 256 * 1024; + + // Maximum received window buffer size. This should be a bit larger than the + // largest sized message you want to be able to receive. This essentially + // limits the memory usage on the receive side. Note that memory is allocated + // dynamically, and this represents the maximum amount of buffered data. The + // actual memory usage of the library will be smaller in normal operation, and + // will be larger than this due to other allocations and overhead if the + // buffer is fully utilized. + size_t max_receiver_window_buffer_size = 5 * 1024 * 1024; + + // Maximum send buffer size. It will not be possible to queue more data than + // this before sending it. + size_t max_send_buffer_size = 2'000'000; + + // A threshold that, when the amount of data in the send buffer goes below + // this value, will trigger `DcSctpCallbacks::OnTotalBufferedAmountLow`. + size_t total_buffered_amount_low_threshold = 1'800'000; + + // Max allowed RTT value. When the RTT is measured and it's found to be larger + // than this value, it will be discarded and not used for e.g. any RTO + // calculation. The default value is an extreme maximum but can be adapted + // to better match the environment. + DurationMs rtt_max = DurationMs(8000); + + // Initial RTO value. + DurationMs rto_initial = DurationMs(500); + + // Maximum RTO value. + DurationMs rto_max = DurationMs(800); + + // Minimum RTO value. This must be larger than an expected peer delayed ack + // timeout. + DurationMs rto_min = DurationMs(220); + + // T1-init timeout. + DurationMs t1_init_timeout = DurationMs(1000); + + // T1-cookie timeout. + DurationMs t1_cookie_timeout = DurationMs(1000); + + // T2-shutdown timeout. + DurationMs t2_shutdown_timeout = DurationMs(1000); + + // Hearbeat interval (on idle connections only). Set to zero to disable. + DurationMs heartbeat_interval = DurationMs(30000); + + // The maximum time when a SACK will be sent from the arrival of an + // unacknowledged packet. Whatever is smallest of RTO/2 and this will be used. + DurationMs delayed_ack_max_timeout = DurationMs(200); + + // Do slow start as TCP - double cwnd instead of increasing it by MTU. + bool slow_start_tcp_style = false; + + // The initial congestion window size, in number of MTUs. + // See https://tools.ietf.org/html/rfc4960#section-7.2.1 which defaults at ~3 + // and https://research.google/pubs/pub36640/ which argues for at least ten + // segments. + size_t cwnd_mtus_initial = 10; + + // The minimum congestion window size, in number of MTUs. + // See https://tools.ietf.org/html/rfc4960#section-7.2.3. + size_t cwnd_mtus_min = 4; + + // Maximum Data Retransmit Attempts (per DATA chunk). + int max_retransmissions = 10; + + // Max.Init.Retransmits (https://tools.ietf.org/html/rfc4960#section-15) + int max_init_retransmits = 8; + + // RFC3758 Partial Reliability Extension + bool enable_partial_reliability = true; + + // RFC8260 Stream Schedulers and User Message Interleaving + bool enable_message_interleaving = false; + + // If RTO should be added to heartbeat_interval + bool heartbeat_interval_include_rtt = true; + + // Disables SCTP packet crc32 verification. Useful when running with fuzzers. + bool disable_checksum_verification = false; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_DCSCTP_OPTIONS_H_ diff --git a/net/dcsctp/public/dcsctp_socket.h b/net/dcsctp/public/dcsctp_socket.h new file mode 100644 index 0000000000..f07f54e044 --- /dev/null +++ b/net/dcsctp/public/dcsctp_socket.h @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_H_ +#define NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/public/timeout.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// The socket/association state +enum class SocketState { + // The socket is closed. + kClosed, + // The socket has initiated a connection, which is not yet established. Note + // that for incoming connections and for reconnections when the socket is + // already connected, the socket will not transition to this state. + kConnecting, + // The socket is connected, and the connection is established. + kConnected, + // The socket is shutting down, and the connection is not yet closed. + kShuttingDown, +}; + +// Send options for sending messages +struct SendOptions { + // If the message should be sent with unordered message delivery. + IsUnordered unordered = IsUnordered(false); + + // If set, will discard messages that haven't been correctly sent and + // received before the lifetime has expired. This is only available if the + // peer supports Partial Reliability Extension (RFC3758). + absl::optional lifetime = absl::nullopt; + + // If set, limits the number of retransmissions. This is only available + // if the peer supports Partial Reliability Extension (RFC3758). + absl::optional max_retransmissions = absl::nullopt; +}; + +enum class ErrorKind { + // Indicates that no error has occurred. This will never be the case when + // `OnError` or `OnAborted` is called. + kNoError, + // There have been too many retries or timeouts, and the library has given up. + kTooManyRetries, + // A command was received that is only possible to execute when the socket is + // connected, which it is not. + kNotConnected, + // Parsing of the command or its parameters failed. + kParseFailed, + // Commands are received in the wrong sequence, which indicates a + // synchronisation mismatch between the peers. + kWrongSequence, + // The peer has reported an issue using ERROR or ABORT command. + kPeerReported, + // The peer has performed a protocol violation. + kProtocolViolation, + // The receive or send buffers have been exhausted. + kResourceExhaustion, + // The client has performed an invalid operation. + kUnsupportedOperation, +}; + +inline constexpr absl::string_view ToString(ErrorKind error) { + switch (error) { + case ErrorKind::kNoError: + return "NO_ERROR"; + case ErrorKind::kTooManyRetries: + return "TOO_MANY_RETRIES"; + case ErrorKind::kNotConnected: + return "NOT_CONNECTED"; + case ErrorKind::kParseFailed: + return "PARSE_FAILED"; + case ErrorKind::kWrongSequence: + return "WRONG_SEQUENCE"; + case ErrorKind::kPeerReported: + return "PEER_REPORTED"; + case ErrorKind::kProtocolViolation: + return "PROTOCOL_VIOLATION"; + case ErrorKind::kResourceExhaustion: + return "RESOURCE_EXHAUSTION"; + case ErrorKind::kUnsupportedOperation: + return "UNSUPPORTED_OPERATION"; + } +} + +enum class SendStatus { + // The message was enqueued successfully. As sending the message is done + // asynchronously, this is no guarantee that the message has been actually + // sent. + kSuccess, + // The message was rejected as the payload was empty (which is not allowed in + // SCTP). + kErrorMessageEmpty, + // The message was rejected as the payload was larger than what has been set + // as `DcSctpOptions.max_message_size`. + kErrorMessageTooLarge, + // The message could not be enqueued as the socket is out of resources. This + // mainly indicates that the send queue is full. + kErrorResourceExhaustion, + // The message could not be sent as the socket is shutting down. + kErrorShuttingDown, +}; + +inline constexpr absl::string_view ToString(SendStatus error) { + switch (error) { + case SendStatus::kSuccess: + return "SUCCESS"; + case SendStatus::kErrorMessageEmpty: + return "ERROR_MESSAGE_EMPTY"; + case SendStatus::kErrorMessageTooLarge: + return "ERROR_MESSAGE_TOO_LARGE"; + case SendStatus::kErrorResourceExhaustion: + return "ERROR_RESOURCE_EXHAUSTION"; + case SendStatus::kErrorShuttingDown: + return "ERROR_SHUTTING_DOWN"; + } +} + +// Return value of ResetStreams. +enum class ResetStreamsStatus { + // If the connection is not yet established, this will be returned. + kNotConnected, + // Indicates that ResetStreams operation has been successfully initiated. + kPerformed, + // Indicates that ResetStreams has failed as it's not supported by the peer. + kNotSupported, +}; + +inline constexpr absl::string_view ToString(ResetStreamsStatus error) { + switch (error) { + case ResetStreamsStatus::kNotConnected: + return "NOT_CONNECTED"; + case ResetStreamsStatus::kPerformed: + return "PERFORMED"; + case ResetStreamsStatus::kNotSupported: + return "NOT_SUPPORTED"; + } +} + +// Callbacks that the DcSctpSocket will be done synchronously to the owning +// client. It is allowed to call back into the library from callbacks that start +// with "On". It has been explicitly documented when it's not allowed to call +// back into this library from within a callback. +// +// Theses callbacks are only synchronously triggered as a result of the client +// calling a public method in `DcSctpSocketInterface`. +class DcSctpSocketCallbacks { + public: + virtual ~DcSctpSocketCallbacks() = default; + + // Called when the library wants the packet serialized as `data` to be sent. + // + // Note that it's NOT ALLOWED to call into this library from within this + // callback. + virtual void SendPacket(rtc::ArrayView data) = 0; + + // Called when the library wants to create a Timeout. The callback must return + // an object that implements that interface. + // + // Note that it's NOT ALLOWED to call into this library from within this + // callback. + virtual std::unique_ptr CreateTimeout() = 0; + + // Returns the current time in milliseconds (from any epoch). + // + // Note that it's NOT ALLOWED to call into this library from within this + // callback. + virtual TimeMs TimeMillis() = 0; + + // Called when the library needs a random number uniformly distributed between + // `low` (inclusive) and `high` (exclusive). The random numbers used by the + // library are not used for cryptographic purposes. There are no requirements + // that the random number generator must be secure. + // + // Note that it's NOT ALLOWED to call into this library from within this + // callback. + virtual uint32_t GetRandomInt(uint32_t low, uint32_t high) = 0; + + // Triggered when the outgoing message buffer is empty, meaning that there are + // no more queued messages, but there can still be packets in-flight or to be + // retransmitted. (in contrast to SCTP_SENDER_DRY_EVENT). + // + // Note that it's NOT ALLOWED to call into this library from within this + // callback. + ABSL_DEPRECATED("Use OnTotalBufferedAmountLow instead") + virtual void NotifyOutgoingMessageBufferEmpty() {} + + // Called when the library has received an SCTP message in full and delivers + // it to the upper layer. + // + // It is allowed to call into this library from within this callback. + virtual void OnMessageReceived(DcSctpMessage message) = 0; + + // Triggered when an non-fatal error is reported by either this library or + // from the other peer (by sending an ERROR command). These should be logged, + // but no other action need to be taken as the association is still viable. + // + // It is allowed to call into this library from within this callback. + virtual void OnError(ErrorKind error, absl::string_view message) = 0; + + // Triggered when the socket has aborted - either as decided by this socket + // due to e.g. too many retransmission attempts, or by the peer when + // receiving an ABORT command. No other callbacks will be done after this + // callback, unless reconnecting. + // + // It is allowed to call into this library from within this callback. + virtual void OnAborted(ErrorKind error, absl::string_view message) = 0; + + // Called when calling `Connect` succeeds, but also for incoming successful + // connection attempts. + // + // It is allowed to call into this library from within this callback. + virtual void OnConnected() = 0; + + // Called when the socket is closed in a controlled way. No other + // callbacks will be done after this callback, unless reconnecting. + // + // It is allowed to call into this library from within this callback. + virtual void OnClosed() = 0; + + // On connection restarted (by peer). This is just a notification, and the + // association is expected to work fine after this call, but there could have + // been packet loss as a result of restarting the association. + // + // It is allowed to call into this library from within this callback. + virtual void OnConnectionRestarted() = 0; + + // Indicates that a stream reset request has failed. + // + // It is allowed to call into this library from within this callback. + virtual void OnStreamsResetFailed( + rtc::ArrayView outgoing_streams, + absl::string_view reason) = 0; + + // Indicates that a stream reset request has been performed. + // + // It is allowed to call into this library from within this callback. + virtual void OnStreamsResetPerformed( + rtc::ArrayView outgoing_streams) = 0; + + // When a peer has reset some of its outgoing streams, this will be called. An + // empty list indicates that all streams have been reset. + // + // It is allowed to call into this library from within this callback. + virtual void OnIncomingStreamsReset( + rtc::ArrayView incoming_streams) = 0; + + // Will be called when the amount of data buffered to be sent falls to or + // below the threshold set when calling `SetBufferedAmountLowThreshold`. + // + // It is allowed to call into this library from within this callback. + virtual void OnBufferedAmountLow(StreamID stream_id) {} + + // Will be called when the total amount of data buffered (in the entire send + // buffer, for all streams) falls to or below the threshold specified in + // `DcSctpOptions::total_buffered_amount_low_threshold`. + virtual void OnTotalBufferedAmountLow() {} +}; + +// The DcSctpSocket implementation implements the following interface. +class DcSctpSocketInterface { + public: + virtual ~DcSctpSocketInterface() = default; + + // To be called when an incoming SCTP packet is to be processed. + virtual void ReceivePacket(rtc::ArrayView data) = 0; + + // To be called when a timeout has expired. The `timeout_id` is provided + // when the timeout was initiated. + virtual void HandleTimeout(TimeoutID timeout_id) = 0; + + // Connects the socket. This is an asynchronous operation, and + // `DcSctpSocketCallbacks::OnConnected` will be called on success. + virtual void Connect() = 0; + + // Gracefully shutdowns the socket and sends all outstanding data. This is an + // asynchronous operation and `DcSctpSocketCallbacks::OnClosed` will be called + // on success. + virtual void Shutdown() = 0; + + // Closes the connection non-gracefully. Will send ABORT if the connection is + // not already closed. No callbacks will be made after Close() has returned. + virtual void Close() = 0; + + // The socket state. + virtual SocketState state() const = 0; + + // The options it was created with. + virtual const DcSctpOptions& options() const = 0; + + // Update the options max_message_size. + virtual void SetMaxMessageSize(size_t max_message_size) = 0; + + // Sends the message `message` using the provided send options. + // Sending a message is an asynchrous operation, and the `OnError` callback + // may be invoked to indicate any errors in sending the message. + // + // The association does not have to be established before calling this method. + // If it's called before there is an established association, the message will + // be queued. + virtual SendStatus Send(DcSctpMessage message, + const SendOptions& send_options) = 0; + + // Resetting streams is an asynchronous operation and the results will + // be notified using `DcSctpSocketCallbacks::OnStreamsResetDone()` on success + // and `DcSctpSocketCallbacks::OnStreamsResetFailed()` on failure. Note that + // only outgoing streams can be reset. + // + // When it's known that the peer has reset its own outgoing streams, + // `DcSctpSocketCallbacks::OnIncomingStreamReset` is called. + // + // Note that resetting a stream will also remove all queued messages on those + // streams, but will ensure that the currently sent message (if any) is fully + // sent before closing the stream. + // + // Resetting streams can only be done on an established association that + // supports stream resetting. Calling this method on e.g. a closed association + // or streams that don't support resetting will not perform any operation. + virtual ResetStreamsStatus ResetStreams( + rtc::ArrayView outgoing_streams) = 0; + + // Returns the number of bytes of data currently queued to be sent on a given + // stream. + virtual size_t buffered_amount(StreamID stream_id) const = 0; + + // Returns the number of buffered outgoing bytes that is considered "low" for + // a given stream. See `SetBufferedAmountLowThreshold`. + virtual size_t buffered_amount_low_threshold(StreamID stream_id) const = 0; + + // Used to specify the number of bytes of buffered outgoing data that is + // considered "low" for a given stream, which will trigger an + // OnBufferedAmountLow event. The default value is zero (0). + virtual void SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) = 0; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_H_ diff --git a/net/dcsctp/public/dcsctp_socket_factory.cc b/net/dcsctp/public/dcsctp_socket_factory.cc new file mode 100644 index 0000000000..338d143424 --- /dev/null +++ b/net/dcsctp/public/dcsctp_socket_factory.cc @@ -0,0 +1,31 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "net/dcsctp/public/dcsctp_socket_factory.h" + +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/socket/dcsctp_socket.h" + +namespace dcsctp { +std::unique_ptr DcSctpSocketFactory::Create( + absl::string_view log_prefix, + DcSctpSocketCallbacks& callbacks, + std::unique_ptr packet_observer, + const DcSctpOptions& options) { + return std::make_unique(log_prefix, callbacks, + std::move(packet_observer), options); +} +} // namespace dcsctp diff --git a/net/dcsctp/public/dcsctp_socket_factory.h b/net/dcsctp/public/dcsctp_socket_factory.h new file mode 100644 index 0000000000..dcc68d9b54 --- /dev/null +++ b/net/dcsctp/public/dcsctp_socket_factory.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_FACTORY_H_ +#define NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_FACTORY_H_ + +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/packet_observer.h" + +namespace dcsctp { +class DcSctpSocketFactory { + public: + std::unique_ptr Create( + absl::string_view log_prefix, + DcSctpSocketCallbacks& callbacks, + std::unique_ptr packet_observer, + const DcSctpOptions& options); +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_DCSCTP_SOCKET_FACTORY_H_ diff --git a/net/dcsctp/public/mock_dcsctp_socket.h b/net/dcsctp/public/mock_dcsctp_socket.h new file mode 100644 index 0000000000..18140642b7 --- /dev/null +++ b/net/dcsctp/public/mock_dcsctp_socket.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_MOCK_DCSCTP_SOCKET_H_ +#define NET_DCSCTP_PUBLIC_MOCK_DCSCTP_SOCKET_H_ + +#include "net/dcsctp/public/dcsctp_socket.h" +#include "test/gmock.h" + +namespace dcsctp { + +class MockDcSctpSocket : public DcSctpSocketInterface { + public: + MOCK_METHOD(void, + ReceivePacket, + (rtc::ArrayView data), + (override)); + + MOCK_METHOD(void, HandleTimeout, (TimeoutID timeout_id), (override)); + + MOCK_METHOD(void, Connect, (), (override)); + + MOCK_METHOD(void, Shutdown, (), (override)); + + MOCK_METHOD(void, Close, (), (override)); + + MOCK_METHOD(SocketState, state, (), (const, override)); + + MOCK_METHOD(const DcSctpOptions&, options, (), (const, override)); + + MOCK_METHOD(void, SetMaxMessageSize, (size_t max_message_size), (override)); + + MOCK_METHOD(SendStatus, + Send, + (DcSctpMessage message, const SendOptions& send_options), + (override)); + + MOCK_METHOD(ResetStreamsStatus, + ResetStreams, + (rtc::ArrayView outgoing_streams), + (override)); + + MOCK_METHOD(size_t, buffered_amount, (StreamID stream_id), (const, override)); + + MOCK_METHOD(size_t, + buffered_amount_low_threshold, + (StreamID stream_id), + (const, override)); + + MOCK_METHOD(void, + SetBufferedAmountLowThreshold, + (StreamID stream_id, size_t bytes), + (override)); +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_MOCK_DCSCTP_SOCKET_H_ diff --git a/net/dcsctp/public/mock_dcsctp_socket_test.cc b/net/dcsctp/public/mock_dcsctp_socket_test.cc new file mode 100644 index 0000000000..57013e4ce2 --- /dev/null +++ b/net/dcsctp/public/mock_dcsctp_socket_test.cc @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/public/mock_dcsctp_socket.h" + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { + +// This test exists to ensure that all methods are mocked correctly, and to +// generate compiler errors if they are not. +TEST(MockDcSctpSocketTest, CanInstantiateAndConnect) { + testing::StrictMock socket; + + EXPECT_CALL(socket, Connect); + + socket.Connect(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/public/packet_observer.h b/net/dcsctp/public/packet_observer.h new file mode 100644 index 0000000000..fe7567824f --- /dev/null +++ b/net/dcsctp/public/packet_observer.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_PACKET_OBSERVER_H_ +#define NET_DCSCTP_PUBLIC_PACKET_OBSERVER_H_ + +#include + +#include "api/array_view.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// A PacketObserver can be attached to a socket and will be called for +// all sent and received packets. +class PacketObserver { + public: + virtual ~PacketObserver() = default; + // Called when a packet is sent, with the current time (in milliseconds) as + // `now`, and the packet payload as `payload`. + virtual void OnSentPacket(TimeMs now, + rtc::ArrayView payload) = 0; + + // Called when a packet is received, with the current time (in milliseconds) + // as `now`, and the packet payload as `payload`. + virtual void OnReceivedPacket(TimeMs now, + rtc::ArrayView payload) = 0; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_PACKET_OBSERVER_H_ diff --git a/net/dcsctp/public/strong_alias.h b/net/dcsctp/public/strong_alias.h new file mode 100644 index 0000000000..96678442b4 --- /dev/null +++ b/net/dcsctp/public/strong_alias.h @@ -0,0 +1,85 @@ +/* + * Copyright 2019 The Chromium Authors. All rights reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_STRONG_ALIAS_H_ +#define NET_DCSCTP_PUBLIC_STRONG_ALIAS_H_ + +#include +#include + +namespace dcsctp { + +// This is a copy of +// https://source.chromium.org/chromium/chromium/src/+/master:base/types/strong_alias.h +// as the API (and internals) are using type-safe integral identifiers, but this +// library can't depend on that file. The ostream operator has been removed +// per WebRTC library conventions, and the underlying type is exposed. + +template +class StrongAlias { + public: + using UnderlyingType = TheUnderlyingType; + constexpr StrongAlias() = default; + constexpr explicit StrongAlias(const UnderlyingType& v) : value_(v) {} + constexpr explicit StrongAlias(UnderlyingType&& v) noexcept + : value_(std::move(v)) {} + + constexpr UnderlyingType* operator->() { return &value_; } + constexpr const UnderlyingType* operator->() const { return &value_; } + + constexpr UnderlyingType& operator*() & { return value_; } + constexpr const UnderlyingType& operator*() const& { return value_; } + constexpr UnderlyingType&& operator*() && { return std::move(value_); } + constexpr const UnderlyingType&& operator*() const&& { + return std::move(value_); + } + + constexpr UnderlyingType& value() & { return value_; } + constexpr const UnderlyingType& value() const& { return value_; } + constexpr UnderlyingType&& value() && { return std::move(value_); } + constexpr const UnderlyingType&& value() const&& { return std::move(value_); } + + constexpr explicit operator const UnderlyingType&() const& { return value_; } + + constexpr bool operator==(const StrongAlias& other) const { + return value_ == other.value_; + } + constexpr bool operator!=(const StrongAlias& other) const { + return value_ != other.value_; + } + constexpr bool operator<(const StrongAlias& other) const { + return value_ < other.value_; + } + constexpr bool operator<=(const StrongAlias& other) const { + return value_ <= other.value_; + } + constexpr bool operator>(const StrongAlias& other) const { + return value_ > other.value_; + } + constexpr bool operator>=(const StrongAlias& other) const { + return value_ >= other.value_; + } + + // Hasher to use in std::unordered_map, std::unordered_set, etc. + struct Hasher { + using argument_type = StrongAlias; + using result_type = std::size_t; + result_type operator()(const argument_type& id) const { + return std::hash()(id.value()); + } + }; + + protected: + UnderlyingType value_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_STRONG_ALIAS_H_ diff --git a/net/dcsctp/public/strong_alias_test.cc b/net/dcsctp/public/strong_alias_test.cc new file mode 100644 index 0000000000..0c57c6b248 --- /dev/null +++ b/net/dcsctp/public/strong_alias_test.cc @@ -0,0 +1,362 @@ +/* + * Copyright 2019 The Chromium Authors. All rights reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/public/strong_alias.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +// This is a copy of +// https://source.chromium.org/chromium/chromium/src/+/master:base/types/strong_alias_unittest.cc +// but adapted to use WebRTC's includes, remove unit tests that test the ostream +// operator (it's removed in this port) and other adaptations to pass lint. + +namespace dcsctp { +namespace { + +// For test correctnenss, it's important that these getters return lexically +// incrementing values as |index| grows. +template +T GetExampleValue(int index); + +template <> +int GetExampleValue(int index) { + return 5 + index; +} +template <> +uint64_t GetExampleValue(int index) { + return 500U + index; +} + +template <> +std::string GetExampleValue(int index) { + return std::string('a', index); +} + +} // namespace + +template +class StrongAliasTest : public ::testing::Test {}; + +using TestedTypes = ::testing::Types; +TYPED_TEST_SUITE(StrongAliasTest, TestedTypes); + +TYPED_TEST(StrongAliasTest, ValueAccessesUnderlyingValue) { + using FooAlias = StrongAlias; + + // Const value getter. + const FooAlias const_alias(GetExampleValue(1)); + EXPECT_EQ(GetExampleValue(1), const_alias.value()); + static_assert(std::is_const::type>::value, + "Reference returned by const value getter should be const."); +} + +TYPED_TEST(StrongAliasTest, ExplicitConversionToUnderlyingValue) { + using FooAlias = StrongAlias; + + const FooAlias const_alias(GetExampleValue(1)); + EXPECT_EQ(GetExampleValue(1), static_cast(const_alias)); +} + +TYPED_TEST(StrongAliasTest, CanBeCopyConstructed) { + using FooAlias = StrongAlias; + FooAlias alias(GetExampleValue(0)); + FooAlias copy_constructed = alias; + EXPECT_EQ(copy_constructed, alias); + + FooAlias copy_assigned; + copy_assigned = alias; + EXPECT_EQ(copy_assigned, alias); +} + +TYPED_TEST(StrongAliasTest, CanBeMoveConstructed) { + using FooAlias = StrongAlias; + FooAlias alias(GetExampleValue(0)); + FooAlias move_constructed = std::move(alias); + EXPECT_EQ(move_constructed, FooAlias(GetExampleValue(0))); + + FooAlias alias2(GetExampleValue(2)); + FooAlias move_assigned; + move_assigned = std::move(alias2); + EXPECT_EQ(move_assigned, FooAlias(GetExampleValue(2))); + + // Check that FooAlias is nothrow move constructible. This matters for + // performance when used in std::vectors. + static_assert(std::is_nothrow_move_constructible::value, + "Error: Alias is not nothow move constructible"); +} + +TYPED_TEST(StrongAliasTest, CanBeConstructedFromMoveOnlyType) { + // Note, using a move-only unique_ptr to T: + using FooAlias = StrongAlias>; + + FooAlias a(std::make_unique(GetExampleValue(0))); + EXPECT_EQ(*a.value(), GetExampleValue(0)); + + auto bare_value = std::make_unique(GetExampleValue(1)); + FooAlias b(std::move(bare_value)); + EXPECT_EQ(*b.value(), GetExampleValue(1)); +} + +TYPED_TEST(StrongAliasTest, MutableOperatorArrow) { + // Note, using a move-only unique_ptr to T: + using Ptr = std::unique_ptr; + using FooAlias = StrongAlias; + + FooAlias a(std::make_unique()); + EXPECT_TRUE(a.value()); + + // Check that `a` can be modified through the use of operator->. + a->reset(); + + EXPECT_FALSE(a.value()); +} + +TYPED_TEST(StrongAliasTest, MutableOperatorStar) { + // Note, using a move-only unique_ptr to T: + using Ptr = std::unique_ptr; + using FooAlias = StrongAlias; + + FooAlias a(std::make_unique()); + FooAlias b(std::make_unique()); + EXPECT_TRUE(*a); + EXPECT_TRUE(*b); + + // Check that both the mutable l-value and r-value overloads work and we can + // move out of the aliases. + { Ptr ignore(*std::move(a)); } + { Ptr ignore(std::move(*b)); } + + EXPECT_FALSE(a.value()); + EXPECT_FALSE(b.value()); +} + +TYPED_TEST(StrongAliasTest, MutableValue) { + // Note, using a move-only unique_ptr to T: + using Ptr = std::unique_ptr; + using FooAlias = StrongAlias; + + FooAlias a(std::make_unique()); + FooAlias b(std::make_unique()); + EXPECT_TRUE(a.value()); + EXPECT_TRUE(b.value()); + + // Check that both the mutable l-value and r-value overloads work and we can + // move out of the aliases. + { Ptr ignore(std::move(a).value()); } + { Ptr ignore(std::move(b.value())); } + + EXPECT_FALSE(a.value()); + EXPECT_FALSE(b.value()); +} + +TYPED_TEST(StrongAliasTest, SizeSameAsUnderlyingType) { + using FooAlias = StrongAlias; + static_assert(sizeof(FooAlias) == sizeof(TypeParam), + "StrongAlias should be as large as the underlying type."); +} + +TYPED_TEST(StrongAliasTest, IsDefaultConstructible) { + using FooAlias = StrongAlias; + static_assert(std::is_default_constructible::value, + "Should be possible to default-construct a StrongAlias."); + static_assert( + std::is_trivially_default_constructible::value == + std::is_trivially_default_constructible::value, + "Should be possible to trivially default-construct a StrongAlias iff the " + "underlying type is trivially default constructible."); +} + +TEST(StrongAliasTest, TrivialTypeAliasIsStandardLayout) { + using FooAlias = StrongAlias; + static_assert(std::is_standard_layout::value, + "int-based alias should have standard layout. "); + static_assert(std::is_trivially_copyable::value, + "int-based alias should be trivially copyable. "); +} + +TYPED_TEST(StrongAliasTest, CannotBeCreatedFromDifferentAlias) { + using FooAlias = StrongAlias; + using BarAlias = StrongAlias; + static_assert(!std::is_constructible::value, + "Should be impossible to construct FooAlias from a BarAlias."); + static_assert(!std::is_convertible::value, + "Should be impossible to convert a BarAlias into FooAlias."); +} + +TYPED_TEST(StrongAliasTest, CannotBeImplicitlyConverterToUnderlyingValue) { + using FooAlias = StrongAlias; + static_assert(!std::is_convertible::value, + "Should be impossible to implicitly convert a StrongAlias into " + "an underlying type."); +} + +TYPED_TEST(StrongAliasTest, ComparesEqualToSameValue) { + using FooAlias = StrongAlias; + // Comparison to self: + const FooAlias a = FooAlias(GetExampleValue(0)); + EXPECT_EQ(a, a); + EXPECT_FALSE(a != a); + EXPECT_TRUE(a >= a); + EXPECT_TRUE(a <= a); + EXPECT_FALSE(a > a); + EXPECT_FALSE(a < a); + // Comparison to other equal object: + const FooAlias b = FooAlias(GetExampleValue(0)); + EXPECT_EQ(a, b); + EXPECT_FALSE(a != b); + EXPECT_TRUE(a >= b); + EXPECT_TRUE(a <= b); + EXPECT_FALSE(a > b); + EXPECT_FALSE(a < b); +} + +TYPED_TEST(StrongAliasTest, ComparesCorrectlyToDifferentValue) { + using FooAlias = StrongAlias; + const FooAlias a = FooAlias(GetExampleValue(0)); + const FooAlias b = FooAlias(GetExampleValue(1)); + EXPECT_NE(a, b); + EXPECT_FALSE(a == b); + EXPECT_TRUE(b >= a); + EXPECT_TRUE(a <= b); + EXPECT_TRUE(b > a); + EXPECT_TRUE(a < b); +} + +TEST(StrongAliasTest, CanBeDerivedFrom) { + // Aliases can be enriched by custom operations or validations if needed. + // Ideally, one could go from a 'using' declaration to a derived class to add + // those methods without the need to change any other code. + class CountryCode : public StrongAlias { + public: + explicit CountryCode(const std::string& value) + : StrongAlias::StrongAlias(value) { + if (value_.length() != 2) { + // Country code invalid! + value_.clear(); // is_null() will return true. + } + } + + bool is_null() const { return value_.empty(); } + }; + + CountryCode valid("US"); + EXPECT_FALSE(valid.is_null()); + + CountryCode invalid("United States"); + EXPECT_TRUE(invalid.is_null()); +} + +TEST(StrongAliasTest, CanWrapComplexStructures) { + // A pair of strings implements odering and can, in principle, be used as + // a base of StrongAlias. + using PairOfStrings = std::pair; + using ComplexAlias = StrongAlias; + + ComplexAlias a1{std::make_pair("aaa", "bbb")}; + ComplexAlias a2{std::make_pair("ccc", "ddd")}; + EXPECT_TRUE(a1 < a2); + + EXPECT_TRUE(a1.value() == PairOfStrings("aaa", "bbb")); + + // Note a caveat, an std::pair doesn't have an overload of operator<<, and it + // cannot be easily added since ADL rules would require it to be in the std + // namespace. So we can't print ComplexAlias. +} + +TYPED_TEST(StrongAliasTest, CanBeKeysInStdUnorderedMap) { + using FooAlias = StrongAlias; + std::unordered_map map; + + FooAlias k1(GetExampleValue(0)); + FooAlias k2(GetExampleValue(1)); + + map[k1] = "value1"; + map[k2] = "value2"; + + EXPECT_EQ(map[k1], "value1"); + EXPECT_EQ(map[k2], "value2"); +} + +TYPED_TEST(StrongAliasTest, CanBeKeysInStdMap) { + using FooAlias = StrongAlias; + std::map map; + + FooAlias k1(GetExampleValue(0)); + FooAlias k2(GetExampleValue(1)); + + map[k1] = "value1"; + map[k2] = "value2"; + + EXPECT_EQ(map[k1], "value1"); + EXPECT_EQ(map[k2], "value2"); +} + +TYPED_TEST(StrongAliasTest, CanDifferentiateOverloads) { + using FooAlias = StrongAlias; + using BarAlias = StrongAlias; + class Scope { + public: + static std::string Overload(FooAlias) { return "FooAlias"; } + static std::string Overload(BarAlias) { return "BarAlias"; } + }; + EXPECT_EQ("FooAlias", Scope::Overload(FooAlias())); + EXPECT_EQ("BarAlias", Scope::Overload(BarAlias())); +} + +TEST(StrongAliasTest, EnsureConstexpr) { + using FooAlias = StrongAlias; + + // Check constructors. + static constexpr FooAlias kZero{}; + static constexpr FooAlias kOne(1); + + // Check operator*. + static_assert(*kZero == 0, ""); + static_assert(*kOne == 1, ""); + + // Check value(). + static_assert(kZero.value() == 0, ""); + static_assert(kOne.value() == 1, ""); + + // Check explicit conversions to underlying type. + static_assert(static_cast(kZero) == 0, ""); + static_assert(static_cast(kOne) == 1, ""); + + // Check comparison operations. + static_assert(kZero == kZero, ""); + static_assert(kZero != kOne, ""); + static_assert(kZero < kOne, ""); + static_assert(kZero <= kOne, ""); + static_assert(kOne > kZero, ""); + static_assert(kOne >= kZero, ""); +} + +TEST(StrongAliasTest, BooleansAreEvaluatedAsBooleans) { + using BoolAlias = StrongAlias; + + BoolAlias happy(true); + BoolAlias sad(false); + + EXPECT_TRUE(happy); + EXPECT_FALSE(sad); + EXPECT_TRUE(*happy); + EXPECT_FALSE(*sad); +} +} // namespace dcsctp diff --git a/net/dcsctp/public/text_pcap_packet_observer.cc b/net/dcsctp/public/text_pcap_packet_observer.cc new file mode 100644 index 0000000000..2b13060190 --- /dev/null +++ b/net/dcsctp/public/text_pcap_packet_observer.cc @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/public/text_pcap_packet_observer.h" + +#include "api/array_view.h" +#include "net/dcsctp/public/types.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +void TextPcapPacketObserver::OnSentPacket( + dcsctp::TimeMs now, + rtc::ArrayView payload) { + PrintPacket("O ", name_, now, payload); +} + +void TextPcapPacketObserver::OnReceivedPacket( + dcsctp::TimeMs now, + rtc::ArrayView payload) { + PrintPacket("I ", name_, now, payload); +} + +void TextPcapPacketObserver::PrintPacket( + absl::string_view prefix, + absl::string_view socket_name, + dcsctp::TimeMs now, + rtc::ArrayView payload) { + rtc::StringBuilder s; + s << "\n" << prefix; + int64_t remaining = *now % (24 * 60 * 60 * 1000); + int hours = remaining / (60 * 60 * 1000); + remaining = remaining % (60 * 60 * 1000); + int minutes = remaining / (60 * 1000); + remaining = remaining % (60 * 1000); + int seconds = remaining / 1000; + int ms = remaining % 1000; + s.AppendFormat("%02d:%02d:%02d.%03d", hours, minutes, seconds, ms); + s << " 0000"; + for (uint8_t byte : payload) { + s.AppendFormat(" %02x", byte); + } + s << " # SCTP_PACKET " << socket_name; + RTC_LOG(LS_VERBOSE) << s.str(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/public/text_pcap_packet_observer.h b/net/dcsctp/public/text_pcap_packet_observer.h new file mode 100644 index 0000000000..0685771ccf --- /dev/null +++ b/net/dcsctp/public/text_pcap_packet_observer.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_TEXT_PCAP_PACKET_OBSERVER_H_ +#define NET_DCSCTP_PUBLIC_TEXT_PCAP_PACKET_OBSERVER_H_ + +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// Print outs all sent and received packets to the logs, at LS_VERBOSE severity. +class TextPcapPacketObserver : public dcsctp::PacketObserver { + public: + explicit TextPcapPacketObserver(absl::string_view name) : name_(name) {} + + // Implementation of `dcsctp::PacketObserver`. + void OnSentPacket(dcsctp::TimeMs now, + rtc::ArrayView payload) override; + + void OnReceivedPacket(dcsctp::TimeMs now, + rtc::ArrayView payload) override; + + // Prints a packet to the log. Exposed to allow it to be used in compatibility + // tests suites that don't use PacketObserver. + static void PrintPacket(absl::string_view prefix, + absl::string_view socket_name, + dcsctp::TimeMs now, + rtc::ArrayView payload); + + private: + const std::string name_; +}; + +} // namespace dcsctp +#endif // NET_DCSCTP_PUBLIC_TEXT_PCAP_PACKET_OBSERVER_H_ diff --git a/net/dcsctp/public/timeout.h b/net/dcsctp/public/timeout.h new file mode 100644 index 0000000000..64ba351093 --- /dev/null +++ b/net/dcsctp/public/timeout.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_TIMEOUT_H_ +#define NET_DCSCTP_PUBLIC_TIMEOUT_H_ + +#include + +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// A very simple timeout that can be started and stopped. When started, +// it will be given a unique `timeout_id` which should be provided to +// `DcSctpSocket::HandleTimeout` when it expires. +class Timeout { + public: + virtual ~Timeout() = default; + + // Called to start time timeout, with the duration in milliseconds as + // `duration` and with the timeout identifier as `timeout_id`, which - if + // the timeout expires - shall be provided to `DcSctpSocket::HandleTimeout`. + // + // `Start` and `Stop` will always be called in pairs. In other words will + // ´Start` never be called twice, without a call to `Stop` in between. + virtual void Start(DurationMs duration, TimeoutID timeout_id) = 0; + + // Called to stop the running timeout. + // + // `Start` and `Stop` will always be called in pairs. In other words will + // ´Start` never be called twice, without a call to `Stop` in between. + // + // `Stop` will always be called prior to releasing this object. + virtual void Stop() = 0; + + // Called to restart an already running timeout, with the `duration` and + // `timeout_id` parameters as described in `Start`. This can be overridden by + // the implementation to restart it more efficiently. + virtual void Restart(DurationMs duration, TimeoutID timeout_id) { + Stop(); + Start(duration, timeout_id); + } +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_TIMEOUT_H_ diff --git a/net/dcsctp/public/types.h b/net/dcsctp/public/types.h new file mode 100644 index 0000000000..d516daffe3 --- /dev/null +++ b/net/dcsctp/public/types.h @@ -0,0 +1,110 @@ +/* + * Copyright 2019 The Chromium Authors. All rights reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_PUBLIC_TYPES_H_ +#define NET_DCSCTP_PUBLIC_TYPES_H_ + +#include +#include + +#include "net/dcsctp/public/strong_alias.h" + +namespace dcsctp { + +// Stream Identifier +using StreamID = StrongAlias; + +// Payload Protocol Identifier (PPID) +using PPID = StrongAlias; + +// Timeout Identifier +using TimeoutID = StrongAlias; + +// Indicates if a message is allowed to be received out-of-order compared to +// other messages on the same stream. +using IsUnordered = StrongAlias; + +// Duration, as milliseconds. Overflows after 24 days. +class DurationMs : public StrongAlias { + public: + constexpr explicit DurationMs(const UnderlyingType& v) + : StrongAlias(v) {} + + // Convenience methods for working with time. + constexpr DurationMs& operator+=(DurationMs d) { + value_ += d.value_; + return *this; + } + constexpr DurationMs& operator-=(DurationMs d) { + value_ -= d.value_; + return *this; + } + template + constexpr DurationMs& operator*=(T factor) { + value_ *= factor; + return *this; + } +}; + +constexpr inline DurationMs operator+(DurationMs lhs, DurationMs rhs) { + return lhs += rhs; +} +constexpr inline DurationMs operator-(DurationMs lhs, DurationMs rhs) { + return lhs -= rhs; +} +template +constexpr inline DurationMs operator*(DurationMs lhs, T rhs) { + return lhs *= rhs; +} +template +constexpr inline DurationMs operator*(T lhs, DurationMs rhs) { + return rhs *= lhs; +} +constexpr inline int32_t operator/(DurationMs lhs, DurationMs rhs) { + return lhs.value() / rhs.value(); +} + +// Represents time, in milliseconds since a client-defined epoch. +class TimeMs : public StrongAlias { + public: + constexpr explicit TimeMs(const UnderlyingType& v) + : StrongAlias(v) {} + + // Convenience methods for working with time. + constexpr TimeMs& operator+=(DurationMs d) { + value_ += *d; + return *this; + } + constexpr TimeMs& operator-=(DurationMs d) { + value_ -= *d; + return *this; + } + + static constexpr TimeMs InfiniteFuture() { + return TimeMs(std::numeric_limits::max()); + } +}; + +constexpr inline TimeMs operator+(TimeMs lhs, DurationMs rhs) { + return lhs += rhs; +} +constexpr inline TimeMs operator+(DurationMs lhs, TimeMs rhs) { + return rhs += lhs; +} +constexpr inline TimeMs operator-(TimeMs lhs, DurationMs rhs) { + return lhs -= rhs; +} +constexpr inline DurationMs operator-(TimeMs lhs, TimeMs rhs) { + return DurationMs(*lhs - *rhs); +} + +} // namespace dcsctp + +#endif // NET_DCSCTP_PUBLIC_TYPES_H_ diff --git a/net/dcsctp/public/types_test.cc b/net/dcsctp/public/types_test.cc new file mode 100644 index 0000000000..d3d1240751 --- /dev/null +++ b/net/dcsctp/public/types_test.cc @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/public/types.h" + +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(TypesTest, DurationOperators) { + DurationMs d1(10); + DurationMs d2(25); + EXPECT_EQ(d1 + d2, DurationMs(35)); + EXPECT_EQ(d2 - d1, DurationMs(15)); + + d1 += d2; + EXPECT_EQ(d1, DurationMs(35)); + + d1 -= DurationMs(5); + EXPECT_EQ(d1, DurationMs(30)); + + d1 *= 1.5; + EXPECT_EQ(d1, DurationMs(45)); + + EXPECT_EQ(DurationMs(10) * 2, DurationMs(20)); +} + +TEST(TypesTest, TimeOperators) { + EXPECT_EQ(TimeMs(250) + DurationMs(100), TimeMs(350)); + EXPECT_EQ(DurationMs(250) + TimeMs(100), TimeMs(350)); + EXPECT_EQ(TimeMs(250) - DurationMs(100), TimeMs(150)); + EXPECT_EQ(TimeMs(250) - TimeMs(100), DurationMs(150)); + + TimeMs t1(150); + t1 -= DurationMs(50); + EXPECT_EQ(t1, TimeMs(100)); + t1 += DurationMs(200); + EXPECT_EQ(t1, TimeMs(300)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/rx/BUILD.gn b/net/dcsctp/rx/BUILD.gn new file mode 100644 index 0000000000..fb92513158 --- /dev/null +++ b/net/dcsctp/rx/BUILD.gn @@ -0,0 +1,122 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("data_tracker") { + deps = [ + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:sequence_numbers", + "../packet:chunk", + "../packet:data", + "../timer", + ] + sources = [ + "data_tracker.cc", + "data_tracker.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_source_set("reassembly_streams") { + deps = [ + "../../../api:array_view", + "../common:sequence_numbers", + "../packet:chunk", + "../packet:data", + "../public:types", + ] + sources = [ "reassembly_streams.h" ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_library("traditional_reassembly_streams") { + deps = [ + ":reassembly_streams", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:sequence_numbers", + "../packet:chunk", + "../packet:data", + "../public:types", + ] + sources = [ + "traditional_reassembly_streams.cc", + "traditional_reassembly_streams.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("reassembly_queue") { + deps = [ + ":reassembly_streams", + ":traditional_reassembly_streams", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../common:sequence_numbers", + "../common:str_join", + "../packet:chunk", + "../packet:data", + "../packet:parameter", + "../public:types", + ] + sources = [ + "reassembly_queue.cc", + "reassembly_queue.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +if (rtc_include_tests) { + rtc_library("dcsctp_rx_unittests") { + testonly = true + + deps = [ + ":data_tracker", + ":reassembly_queue", + ":reassembly_streams", + ":traditional_reassembly_streams", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../common:sequence_numbers", + "../packet:chunk", + "../packet:data", + "../public:types", + "../testing:data_generator", + "../timer", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + sources = [ + "data_tracker_test.cc", + "reassembly_queue_test.cc", + "traditional_reassembly_streams_test.cc", + ] + } +} diff --git a/net/dcsctp/rx/data_tracker.cc b/net/dcsctp/rx/data_tracker.cc new file mode 100644 index 0000000000..5b563a8463 --- /dev/null +++ b/net/dcsctp/rx/data_tracker.cc @@ -0,0 +1,359 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/rx/data_tracker.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/timer/timer.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +constexpr size_t DataTracker::kMaxDuplicateTsnReported; +constexpr size_t DataTracker::kMaxGapAckBlocksReported; + +bool DataTracker::AdditionalTsnBlocks::Add(UnwrappedTSN tsn) { + // Find any block to expand. It will look for any block that includes (also + // when expanded) the provided `tsn`. It will return the block that is greater + // than, or equal to `tsn`. + auto it = absl::c_lower_bound( + blocks_, tsn, [&](const TsnRange& elem, const UnwrappedTSN& t) { + return elem.last.next_value() < t; + }); + + if (it == blocks_.end()) { + // No matching block found. There is no greater than, or equal block - which + // means that this TSN is greater than any block. It can then be inserted at + // the end. + blocks_.emplace_back(tsn, tsn); + return true; + } + + if (tsn >= it->first && tsn <= it->last) { + // It's already in this block. + return false; + } + + if (it->last.next_value() == tsn) { + // This block can be expanded to the right, or merged with the next. + auto next_it = it + 1; + if (next_it != blocks_.end() && tsn.next_value() == next_it->first) { + // Expanding it would make it adjacent to next block - merge those. + it->last = next_it->last; + blocks_.erase(next_it); + return true; + } + + // Expand to the right + it->last = tsn; + return true; + } + + if (it->first == tsn.next_value()) { + // This block can be expanded to the left. Merging to the left would've been + // covered by the above "merge to the right". Both blocks (expand a + // right-most block to the left and expand a left-most block to the right) + // would match, but the left-most would be returned by std::lower_bound. + RTC_DCHECK(it == blocks_.begin() || (it - 1)->last.next_value() != tsn); + + // Expand to the left. + it->first = tsn; + return true; + } + + // Need to create a new block in the middle. + blocks_.emplace(it, tsn, tsn); + return true; +} + +void DataTracker::AdditionalTsnBlocks::EraseTo(UnwrappedTSN tsn) { + // Find the block that is greater than or equals `tsn`. + auto it = absl::c_lower_bound( + blocks_, tsn, [&](const TsnRange& elem, const UnwrappedTSN& t) { + return elem.last < t; + }); + + // The block that is found is greater or equal (or possibly ::end, when no + // block is greater or equal). All blocks before this block can be safely + // removed. the TSN might be within this block, so possibly truncate it. + bool tsn_is_within_block = it != blocks_.end() && tsn >= it->first; + blocks_.erase(blocks_.begin(), it); + + if (tsn_is_within_block) { + blocks_.front().first = tsn.next_value(); + } +} + +void DataTracker::AdditionalTsnBlocks::PopFront() { + RTC_DCHECK(!blocks_.empty()); + blocks_.erase(blocks_.begin()); +} + +bool DataTracker::IsTSNValid(TSN tsn) const { + UnwrappedTSN unwrapped_tsn = tsn_unwrapper_.PeekUnwrap(tsn); + + // Note that this method doesn't return `false` for old DATA chunks, as those + // are actually valid, and receiving those may affect the generated SACK + // response (by setting "duplicate TSNs"). + + uint32_t difference = + UnwrappedTSN::Difference(unwrapped_tsn, last_cumulative_acked_tsn_); + if (difference > kMaxAcceptedOutstandingFragments) { + return false; + } + return true; +} + +void DataTracker::Observe(TSN tsn, + AnyDataChunk::ImmediateAckFlag immediate_ack) { + UnwrappedTSN unwrapped_tsn = tsn_unwrapper_.Unwrap(tsn); + + // IsTSNValid must be called prior to calling this method. + RTC_DCHECK( + UnwrappedTSN::Difference(unwrapped_tsn, last_cumulative_acked_tsn_) <= + kMaxAcceptedOutstandingFragments); + + // Old chunk already seen before? + if (unwrapped_tsn <= last_cumulative_acked_tsn_) { + if (duplicate_tsns_.size() < kMaxDuplicateTsnReported) { + duplicate_tsns_.insert(unwrapped_tsn.Wrap()); + } + // https://datatracker.ietf.org/doc/html/rfc4960#section-6.2 + // "When a packet arrives with duplicate DATA chunk(s) and with no new DATA + // chunk(s), the endpoint MUST immediately send a SACK with no delay. If a + // packet arrives with duplicate DATA chunk(s) bundled with new DATA chunks, + // the endpoint MAY immediately send a SACK." + UpdateAckState(AckState::kImmediate, "duplicate data"); + } else { + if (unwrapped_tsn == last_cumulative_acked_tsn_.next_value()) { + last_cumulative_acked_tsn_ = unwrapped_tsn; + // The cumulative acked tsn may be moved even further, if a gap was + // filled. + if (!additional_tsn_blocks_.empty() && + additional_tsn_blocks_.front().first == + last_cumulative_acked_tsn_.next_value()) { + last_cumulative_acked_tsn_ = additional_tsn_blocks_.front().last; + additional_tsn_blocks_.PopFront(); + } + } else { + bool inserted = additional_tsn_blocks_.Add(unwrapped_tsn); + if (!inserted) { + // Already seen before. + if (duplicate_tsns_.size() < kMaxDuplicateTsnReported) { + duplicate_tsns_.insert(unwrapped_tsn.Wrap()); + } + // https://datatracker.ietf.org/doc/html/rfc4960#section-6.2 + // "When a packet arrives with duplicate DATA chunk(s) and with no new + // DATA chunk(s), the endpoint MUST immediately send a SACK with no + // delay. If a packet arrives with duplicate DATA chunk(s) bundled with + // new DATA chunks, the endpoint MAY immediately send a SACK." + // No need to do this. SACKs are sent immediately on packet loss below. + } + } + } + + // https://tools.ietf.org/html/rfc4960#section-6.7 + // "Upon the reception of a new DATA chunk, an endpoint shall examine the + // continuity of the TSNs received. If the endpoint detects a gap in + // the received DATA chunk sequence, it SHOULD send a SACK with Gap Ack + // Blocks immediately. The data receiver continues sending a SACK after + // receipt of each SCTP packet that doesn't fill the gap." + if (!additional_tsn_blocks_.empty()) { + UpdateAckState(AckState::kImmediate, "packet loss"); + } + + // https://tools.ietf.org/html/rfc7053#section-5.2 + // "Upon receipt of an SCTP packet containing a DATA chunk with the I + // bit set, the receiver SHOULD NOT delay the sending of the corresponding + // SACK chunk, i.e., the receiver SHOULD immediately respond with the + // corresponding SACK chunk." + if (*immediate_ack) { + UpdateAckState(AckState::kImmediate, "immediate-ack bit set"); + } + + if (!seen_packet_) { + // https://tools.ietf.org/html/rfc4960#section-5.1 + // "After the reception of the first DATA chunk in an association the + // endpoint MUST immediately respond with a SACK to acknowledge the DATA + // chunk." + seen_packet_ = true; + UpdateAckState(AckState::kImmediate, "first DATA chunk"); + } + + // https://tools.ietf.org/html/rfc4960#section-6.2 + // "Specifically, an acknowledgement SHOULD be generated for at least + // every second packet (not every second DATA chunk) received, and SHOULD be + // generated within 200 ms of the arrival of any unacknowledged DATA chunk." + if (ack_state_ == AckState::kIdle) { + UpdateAckState(AckState::kBecomingDelayed, "received DATA when idle"); + } else if (ack_state_ == AckState::kDelayed) { + UpdateAckState(AckState::kImmediate, "received DATA when already delayed"); + } +} + +void DataTracker::HandleForwardTsn(TSN new_cumulative_ack) { + // ForwardTSN is sent to make the receiver (this socket) "forget" about partly + // received (or not received at all) data, up until `new_cumulative_ack`. + + UnwrappedTSN unwrapped_tsn = tsn_unwrapper_.Unwrap(new_cumulative_ack); + UnwrappedTSN prev_last_cum_ack_tsn = last_cumulative_acked_tsn_; + + // Old chunk already seen before? + if (unwrapped_tsn <= last_cumulative_acked_tsn_) { + // https://tools.ietf.org/html/rfc3758#section-3.6 + // "Note, if the "New Cumulative TSN" value carried in the arrived + // FORWARD TSN chunk is found to be behind or at the current cumulative TSN + // point, the data receiver MUST treat this FORWARD TSN as out-of-date and + // MUST NOT update its Cumulative TSN. The receiver SHOULD send a SACK to + // its peer (the sender of the FORWARD TSN) since such a duplicate may + // indicate the previous SACK was lost in the network." + UpdateAckState(AckState::kImmediate, + "FORWARD_TSN new_cumulative_tsn was behind"); + return; + } + + // https://tools.ietf.org/html/rfc3758#section-3.6 + // "When a FORWARD TSN chunk arrives, the data receiver MUST first update + // its cumulative TSN point to the value carried in the FORWARD TSN chunk, and + // then MUST further advance its cumulative TSN point locally if possible, as + // shown by the following example..." + + // The `new_cumulative_ack` will become the current + // `last_cumulative_acked_tsn_`, and if there have been prior "gaps" that are + // now overlapping with the new value, remove them. + last_cumulative_acked_tsn_ = unwrapped_tsn; + additional_tsn_blocks_.EraseTo(unwrapped_tsn); + + // See if the `last_cumulative_acked_tsn_` can be moved even further: + if (!additional_tsn_blocks_.empty() && + additional_tsn_blocks_.front().first == + last_cumulative_acked_tsn_.next_value()) { + last_cumulative_acked_tsn_ = additional_tsn_blocks_.front().last; + additional_tsn_blocks_.PopFront(); + } + + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "FORWARD_TSN, cum_ack_tsn=" + << *prev_last_cum_ack_tsn.Wrap() << "->" + << *new_cumulative_ack << "->" + << *last_cumulative_acked_tsn_.Wrap(); + + // https://tools.ietf.org/html/rfc3758#section-3.6 + // "Any time a FORWARD TSN chunk arrives, for the purposes of sending a + // SACK, the receiver MUST follow the same rules as if a DATA chunk had been + // received (i.e., follow the delayed sack rules specified in ..." + if (ack_state_ == AckState::kIdle) { + UpdateAckState(AckState::kBecomingDelayed, + "received FORWARD_TSN when idle"); + } else if (ack_state_ == AckState::kDelayed) { + UpdateAckState(AckState::kImmediate, + "received FORWARD_TSN when already delayed"); + } +} + +SackChunk DataTracker::CreateSelectiveAck(size_t a_rwnd) { + // Note that in SCTP, the receiver side is allowed to discard received data + // and signal that to the sender, but only chunks that have previously been + // reported in the gap-ack-blocks. However, this implementation will never do + // that. So this SACK produced is more like a NR-SACK as explained in + // https://ieeexplore.ieee.org/document/4697037 and which there is an RFC + // draft at https://tools.ietf.org/html/draft-tuexen-tsvwg-sctp-multipath-17. + std::set duplicate_tsns; + duplicate_tsns_.swap(duplicate_tsns); + + return SackChunk(last_cumulative_acked_tsn_.Wrap(), a_rwnd, + CreateGapAckBlocks(), std::move(duplicate_tsns)); +} + +std::vector DataTracker::CreateGapAckBlocks() const { + const auto& blocks = additional_tsn_blocks_.blocks(); + std::vector gap_ack_blocks; + gap_ack_blocks.reserve(std::min(blocks.size(), kMaxGapAckBlocksReported)); + for (size_t i = 0; i < blocks.size() && i < kMaxGapAckBlocksReported; ++i) { + auto start_diff = + UnwrappedTSN::Difference(blocks[i].first, last_cumulative_acked_tsn_); + auto end_diff = + UnwrappedTSN::Difference(blocks[i].last, last_cumulative_acked_tsn_); + gap_ack_blocks.emplace_back(static_cast(start_diff), + static_cast(end_diff)); + } + + return gap_ack_blocks; +} + +bool DataTracker::ShouldSendAck(bool also_if_delayed) { + if (ack_state_ == AckState::kImmediate || + (also_if_delayed && (ack_state_ == AckState::kBecomingDelayed || + ack_state_ == AckState::kDelayed))) { + UpdateAckState(AckState::kIdle, "sending SACK"); + return true; + } + + return false; +} + +bool DataTracker::will_increase_cum_ack_tsn(TSN tsn) const { + UnwrappedTSN unwrapped = tsn_unwrapper_.PeekUnwrap(tsn); + return unwrapped == last_cumulative_acked_tsn_.next_value(); +} + +void DataTracker::ForceImmediateSack() { + ack_state_ = AckState::kImmediate; +} + +void DataTracker::HandleDelayedAckTimerExpiry() { + UpdateAckState(AckState::kImmediate, "delayed ack timer expired"); +} + +void DataTracker::ObservePacketEnd() { + if (ack_state_ == AckState::kBecomingDelayed) { + UpdateAckState(AckState::kDelayed, "packet end"); + } +} + +void DataTracker::UpdateAckState(AckState new_state, absl::string_view reason) { + if (new_state != ack_state_) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "State changed from " + << ToString(ack_state_) << " to " + << ToString(new_state) << " due to " << reason; + if (ack_state_ == AckState::kDelayed) { + delayed_ack_timer_.Stop(); + } else if (new_state == AckState::kDelayed) { + delayed_ack_timer_.Start(); + } + ack_state_ = new_state; + } +} + +absl::string_view DataTracker::ToString(AckState ack_state) { + switch (ack_state) { + case AckState::kIdle: + return "IDLE"; + case AckState::kBecomingDelayed: + return "BECOMING_DELAYED"; + case AckState::kDelayed: + return "DELAYED"; + case AckState::kImmediate: + return "IMMEDIATE"; + } +} + +} // namespace dcsctp diff --git a/net/dcsctp/rx/data_tracker.h b/net/dcsctp/rx/data_tracker.h new file mode 100644 index 0000000000..167f5a04e7 --- /dev/null +++ b/net/dcsctp/rx/data_tracker.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_RX_DATA_TRACKER_H_ +#define NET_DCSCTP_RX_DATA_TRACKER_H_ + +#include +#include + +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/timer/timer.h" + +namespace dcsctp { + +// Keeps track of received DATA chunks and handles all logic for _when_ to +// create SACKs and also _how_ to generate them. +// +// It only uses TSNs to track delivery and doesn't need to be aware of streams. +// +// SACKs are optimally sent every second packet on connections with no packet +// loss. When packet loss is detected, it's sent for every packet. When SACKs +// are not sent directly, a timer is used to send a SACK delayed (by RTO/2, or +// 200ms, whatever is smallest). +class DataTracker { + public: + // The maximum number of duplicate TSNs that will be reported in a SACK. + static constexpr size_t kMaxDuplicateTsnReported = 20; + // The maximum number of gap-ack-blocks that will be reported in a SACK. + static constexpr size_t kMaxGapAckBlocksReported = 20; + + // The maximum number of accepted in-flight DATA chunks. This indicates the + // maximum difference from this buffer's last cumulative ack TSN, and any + // received data. Data received beyond this limit will be dropped, which will + // force the transmitter to send data that actually increases the last + // cumulative acked TSN. + static constexpr uint32_t kMaxAcceptedOutstandingFragments = 100000; + + explicit DataTracker(absl::string_view log_prefix, + Timer* delayed_ack_timer, + TSN peer_initial_tsn) + : log_prefix_(std::string(log_prefix) + "dtrack: "), + delayed_ack_timer_(*delayed_ack_timer), + last_cumulative_acked_tsn_( + tsn_unwrapper_.Unwrap(TSN(*peer_initial_tsn - 1))) {} + + // Indicates if the provided TSN is valid. If this return false, the data + // should be dropped and not added to any other buffers, which essentially + // means that there is intentional packet loss. + bool IsTSNValid(TSN tsn) const; + + // Call for every incoming data chunk. + void Observe(TSN tsn, + AnyDataChunk::ImmediateAckFlag immediate_ack = + AnyDataChunk::ImmediateAckFlag(false)); + // Called at the end of processing an SCTP packet. + void ObservePacketEnd(); + + // Called for incoming FORWARD-TSN/I-FORWARD-TSN chunks + void HandleForwardTsn(TSN new_cumulative_ack); + + // Indicates if a SACK should be sent. There may be other reasons to send a + // SACK, but if this function indicates so, it should be sent as soon as + // possible. Calling this function will make it clear a flag so that if it's + // called again, it will probably return false. + // + // If the delayed ack timer is running, this method will return false _unless_ + // `also_if_delayed` is set to true. Then it will return true as well. + bool ShouldSendAck(bool also_if_delayed = false); + + // Returns the last cumulative ack TSN - the last seen data chunk's TSN + // value before any packet loss was detected. + TSN last_cumulative_acked_tsn() const { + return TSN(last_cumulative_acked_tsn_.Wrap()); + } + + // Returns true if the received `tsn` would increase the cumulative ack TSN. + bool will_increase_cum_ack_tsn(TSN tsn) const; + + // Forces `ShouldSendSack` to return true. + void ForceImmediateSack(); + + // Note that this will clear `duplicates_`, so every SackChunk that is + // consumed must be sent. + SackChunk CreateSelectiveAck(size_t a_rwnd); + + void HandleDelayedAckTimerExpiry(); + + private: + enum class AckState { + // No need to send an ACK. + kIdle, + + // Has received data chunks (but not yet end of packet). + kBecomingDelayed, + + // Has received data chunks and the end of a packet. Delayed ack timer is + // running and a SACK will be sent on expiry, or if DATA is sent, or after + // next packet with data. + kDelayed, + + // Send a SACK immediately after handling this packet. + kImmediate, + }; + + // Represents ranges of TSNs that have been received that are not directly + // following the last cumulative acked TSN. This information is returned to + // the sender in the "gap ack blocks" in the SACK chunk. The blocks are always + // non-overlapping and non-adjacent. + class AdditionalTsnBlocks { + public: + // Represents an inclusive range of received TSNs, i.e. [first, last]. + struct TsnRange { + TsnRange(UnwrappedTSN first, UnwrappedTSN last) + : first(first), last(last) {} + UnwrappedTSN first; + UnwrappedTSN last; + }; + + // Adds a TSN to the set. This will try to expand any existing block and + // might merge blocks to ensure that all blocks are non-adjacent. If a + // current block can't be expanded, a new block is created. + // + // The return value indicates if `tsn` was added. If false is returned, the + // `tsn` was already represented in one of the blocks. + bool Add(UnwrappedTSN tsn); + + // Erases all TSNs up to, and including `tsn`. This will remove all blocks + // that are completely below `tsn` and may truncate a block where `tsn` is + // within that block. In that case, the frontmost block's start TSN will be + // the next following tsn after `tsn`. + void EraseTo(UnwrappedTSN tsn); + + // Removes the first block. Must not be called on an empty set. + void PopFront(); + + const std::vector& blocks() const { return blocks_; } + + bool empty() const { return blocks_.empty(); } + + const TsnRange& front() const { return blocks_.front(); } + + private: + // A sorted vector of non-overlapping and non-adjacent blocks. + std::vector blocks_; + }; + + std::vector CreateGapAckBlocks() const; + void UpdateAckState(AckState new_state, absl::string_view reason); + static absl::string_view ToString(AckState ack_state); + + const std::string log_prefix_; + // If a packet has ever been seen. + bool seen_packet_ = false; + Timer& delayed_ack_timer_; + AckState ack_state_ = AckState::kIdle; + UnwrappedTSN::Unwrapper tsn_unwrapper_; + + // All TSNs up until (and including) this value have been seen. + UnwrappedTSN last_cumulative_acked_tsn_; + // Received TSNs that are not directly following `last_cumulative_acked_tsn_`. + AdditionalTsnBlocks additional_tsn_blocks_; + std::set duplicate_tsns_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_RX_DATA_TRACKER_H_ diff --git a/net/dcsctp/rx/data_tracker_test.cc b/net/dcsctp/rx/data_tracker_test.cc new file mode 100644 index 0000000000..5c2e56fb2b --- /dev/null +++ b/net/dcsctp/rx/data_tracker_test.cc @@ -0,0 +1,636 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/rx/data_tracker.h" + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/timer/fake_timeout.h" +#include "net/dcsctp/timer/timer.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; + +constexpr size_t kArwnd = 10000; +constexpr TSN kInitialTSN(11); + +class DataTrackerTest : public testing::Test { + protected: + DataTrackerTest() + : timeout_manager_([this]() { return now_; }), + timer_manager_([this]() { return timeout_manager_.CreateTimeout(); }), + timer_(timer_manager_.CreateTimer( + "test/delayed_ack", + []() { return absl::nullopt; }, + TimerOptions(DurationMs(0)))), + buf_("log: ", timer_.get(), kInitialTSN) {} + + void Observer(std::initializer_list tsns) { + for (const uint32_t tsn : tsns) { + buf_.Observe(TSN(tsn), AnyDataChunk::ImmediateAckFlag(false)); + } + } + + TimeMs now_ = TimeMs(0); + FakeTimeoutManager timeout_manager_; + TimerManager timer_manager_; + std::unique_ptr timer_; + DataTracker buf_; +}; + +TEST_F(DataTrackerTest, Empty) { + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, ObserverSingleInOrderPacket) { + Observer({11}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, ObserverManyInOrderMovesCumulativeTsnAck) { + Observer({11, 12, 13}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(13)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, ObserveOutOfOrderMovesCumulativeTsnAck) { + Observer({12, 13, 14, 11}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, SingleGap) { + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); + EXPECT_THAT(sack.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, ExampleFromRFC4960Section334) { + Observer({11, 12, 14, 15, 17}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(12)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 3), + SackChunk::GapAckBlock(5, 5))); + EXPECT_THAT(sack.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, AckAlreadyReceivedChunk) { + Observer({11}); + SackChunk sack1 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack1.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack1.gap_ack_blocks(), IsEmpty()); + + // Receive old chunk + Observer({8}); + SackChunk sack2 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack2.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack2.gap_ack_blocks(), IsEmpty()); +} + +TEST_F(DataTrackerTest, DoubleSendRetransmittedChunk) { + Observer({11, 13, 14, 15}); + SackChunk sack1 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack1.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack1.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 4))); + + // Fill in the hole. + Observer({12, 16, 17, 18}); + SackChunk sack2 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack2.cumulative_tsn_ack(), TSN(18)); + EXPECT_THAT(sack2.gap_ack_blocks(), IsEmpty()); + + // Receive chunk 12 again. + Observer({12, 19, 20, 21}); + SackChunk sack3 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack3.cumulative_tsn_ack(), TSN(21)); + EXPECT_THAT(sack3.gap_ack_blocks(), IsEmpty()); +} + +TEST_F(DataTrackerTest, ForwardTsnSimple) { + // Messages (11, 12, 13), (14, 15) - first message expires. + Observer({11, 12, 15}); + + buf_.HandleForwardTsn(TSN(13)); + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(13)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); +} + +TEST_F(DataTrackerTest, ForwardTsnSkipsFromGapBlock) { + // Messages (11, 12, 13), (14, 15) - first message expires. + Observer({11, 12, 14}); + + buf_.HandleForwardTsn(TSN(13)); + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); +} + +TEST_F(DataTrackerTest, ExampleFromRFC3758) { + buf_.HandleForwardTsn(TSN(102)); + + Observer({102, 104, 105, 107}); + + buf_.HandleForwardTsn(TSN(103)); + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(105)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); +} + +TEST_F(DataTrackerTest, EmptyAllAcks) { + Observer({11, 13, 14, 15}); + + buf_.HandleForwardTsn(TSN(100)); + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(100)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); +} + +TEST_F(DataTrackerTest, SetsArwndCorrectly) { + SackChunk sack1 = buf_.CreateSelectiveAck(/*a_rwnd=*/100); + EXPECT_EQ(sack1.a_rwnd(), 100u); + + SackChunk sack2 = buf_.CreateSelectiveAck(/*a_rwnd=*/101); + EXPECT_EQ(sack2.a_rwnd(), 101u); +} + +TEST_F(DataTrackerTest, WillIncreaseCumAckTsn) { + EXPECT_EQ(buf_.last_cumulative_acked_tsn(), TSN(10)); + EXPECT_FALSE(buf_.will_increase_cum_ack_tsn(TSN(10))); + EXPECT_TRUE(buf_.will_increase_cum_ack_tsn(TSN(11))); + EXPECT_FALSE(buf_.will_increase_cum_ack_tsn(TSN(12))); + + Observer({11, 12, 13, 14, 15}); + EXPECT_EQ(buf_.last_cumulative_acked_tsn(), TSN(15)); + EXPECT_FALSE(buf_.will_increase_cum_ack_tsn(TSN(15))); + EXPECT_TRUE(buf_.will_increase_cum_ack_tsn(TSN(16))); + EXPECT_FALSE(buf_.will_increase_cum_ack_tsn(TSN(17))); +} + +TEST_F(DataTrackerTest, ForceShouldSendSackImmediately) { + EXPECT_FALSE(buf_.ShouldSendAck()); + + buf_.ForceImmediateSack(); + + EXPECT_TRUE(buf_.ShouldSendAck()); +} + +TEST_F(DataTrackerTest, WillAcceptValidTSNs) { + // The initial TSN is always one more than the last, which is our base. + TSN last_tsn = TSN(*kInitialTSN - 1); + int limit = static_cast(DataTracker::kMaxAcceptedOutstandingFragments); + + for (int i = -limit; i <= limit; ++i) { + EXPECT_TRUE(buf_.IsTSNValid(TSN(*last_tsn + i))); + } +} + +TEST_F(DataTrackerTest, WillNotAcceptInvalidTSNs) { + // The initial TSN is always one more than the last, which is our base. + TSN last_tsn = TSN(*kInitialTSN - 1); + + size_t limit = DataTracker::kMaxAcceptedOutstandingFragments; + EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn + limit + 1))); + EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn - (limit + 1)))); + EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn + 0x8000000))); + EXPECT_FALSE(buf_.IsTSNValid(TSN(*last_tsn - 0x8000000))); +} + +TEST_F(DataTrackerTest, ReportSingleDuplicateTsns) { + Observer({11, 12, 11}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(12)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), UnorderedElementsAre(TSN(11))); +} + +TEST_F(DataTrackerTest, ReportMultipleDuplicateTsns) { + Observer({11, 12, 13, 14, 12, 13, 12, 13, 15, 16}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(16)); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), UnorderedElementsAre(TSN(12), TSN(13))); +} + +TEST_F(DataTrackerTest, ReportDuplicateTsnsInGapAckBlocks) { + Observer({11, /*12,*/ 13, 14, 13, 14, 15, 16}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 5))); + EXPECT_THAT(sack.duplicate_tsns(), UnorderedElementsAre(TSN(13), TSN(14))); +} + +TEST_F(DataTrackerTest, ClearsDuplicateTsnsAfterCreatingSack) { + Observer({11, 12, 13, 14, 12, 13, 12, 13, 15, 16}); + SackChunk sack1 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack1.cumulative_tsn_ack(), TSN(16)); + EXPECT_THAT(sack1.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack1.duplicate_tsns(), UnorderedElementsAre(TSN(12), TSN(13))); + + Observer({17}); + SackChunk sack2 = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack2.cumulative_tsn_ack(), TSN(17)); + EXPECT_THAT(sack2.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack2.duplicate_tsns(), IsEmpty()); +} + +TEST_F(DataTrackerTest, LimitsNumberOfDuplicatesReported) { + for (size_t i = 0; i < DataTracker::kMaxDuplicateTsnReported + 10; ++i) { + TSN tsn(11 + i); + buf_.Observe(tsn, AnyDataChunk::ImmediateAckFlag(false)); + buf_.Observe(tsn, AnyDataChunk::ImmediateAckFlag(false)); + } + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_THAT(sack.gap_ack_blocks(), IsEmpty()); + EXPECT_THAT(sack.duplicate_tsns(), + SizeIs(DataTracker::kMaxDuplicateTsnReported)); +} + +TEST_F(DataTrackerTest, LimitsNumberOfGapAckBlocksReported) { + for (size_t i = 0; i < DataTracker::kMaxGapAckBlocksReported + 10; ++i) { + TSN tsn(11 + i * 2); + buf_.Observe(tsn, AnyDataChunk::ImmediateAckFlag(false)); + } + + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(11)); + EXPECT_THAT(sack.gap_ack_blocks(), + SizeIs(DataTracker::kMaxGapAckBlocksReported)); +} + +TEST_F(DataTrackerTest, SendsSackForFirstPacketObserved) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, SendsSackEverySecondPacketWhenThereIsNoPacketLoss) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + Observer({13}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({14}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + Observer({15}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, SendsSackEveryPacketOnPacketLoss) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({13}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({14}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({15}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({16}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + // Fill the hole. + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + // Goes back to every second packet + Observer({17}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({18}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, SendsSackOnDuplicateDataChunks) { + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({11}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_FALSE(buf_.ShouldSendAck()); + EXPECT_TRUE(timer_->is_running()); + // Goes back to every second packet + Observer({13}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); + // Duplicate again + Observer({12}); + buf_.ObservePacketEnd(); + EXPECT_TRUE(buf_.ShouldSendAck()); + EXPECT_FALSE(timer_->is_running()); +} + +TEST_F(DataTrackerTest, GapAckBlockAddSingleBlock) { + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); +} + +TEST_F(DataTrackerTest, GapAckBlockAddsAnother) { + Observer({12}); + Observer({14}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2), + SackChunk::GapAckBlock(4, 4))); +} + +TEST_F(DataTrackerTest, GapAckBlockAddsDuplicate) { + Observer({12}); + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 2))); + EXPECT_THAT(sack.duplicate_tsns(), ElementsAre(TSN(12))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToRight) { + Observer({12}); + Observer({13}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 3))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToRightWithOther) { + Observer({12}); + Observer({20}); + Observer({30}); + Observer({21}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 2), // + SackChunk::GapAckBlock(10, 11), // + SackChunk::GapAckBlock(20, 20))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToLeft) { + Observer({13}); + Observer({12}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), ElementsAre(SackChunk::GapAckBlock(2, 3))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToLeftWithOther) { + Observer({12}); + Observer({21}); + Observer({30}); + Observer({20}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 2), // + SackChunk::GapAckBlock(10, 11), // + SackChunk::GapAckBlock(20, 20))); +} + +TEST_F(DataTrackerTest, GapAckBlockExpandsToLRightAndMerges) { + Observer({12}); + Observer({20}); + Observer({22}); + Observer({30}); + Observer({21}); + SackChunk sack = buf_.CreateSelectiveAck(kArwnd); + EXPECT_EQ(sack.cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(sack.gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 2), // + SackChunk::GapAckBlock(10, 12), // + SackChunk::GapAckBlock(20, 20))); +} + +TEST_F(DataTrackerTest, GapAckBlockMergesManyBlocksIntoOne) { + Observer({22}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12))); + Observer({30}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(20, 20))); + Observer({24}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(20, 20))); + Observer({28}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(18, 18), // + SackChunk::GapAckBlock(20, 20))); + Observer({26}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(16, 16), // + SackChunk::GapAckBlock(18, 18), // + SackChunk::GapAckBlock(20, 20))); + Observer({29}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 12), // + SackChunk::GapAckBlock(14, 14), // + SackChunk::GapAckBlock(16, 16), // + SackChunk::GapAckBlock(18, 20))); + Observer({23}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 14), // + SackChunk::GapAckBlock(16, 16), // + SackChunk::GapAckBlock(18, 20))); + Observer({27}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 14), // + SackChunk::GapAckBlock(16, 20))); + + Observer({25}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(12, 20))); + Observer({20}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 10), // + SackChunk::GapAckBlock(12, 20))); + Observer({32}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 10), // + SackChunk::GapAckBlock(12, 20), // + SackChunk::GapAckBlock(22, 22))); + Observer({21}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 20), // + SackChunk::GapAckBlock(22, 22))); + Observer({31}); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(10, 22))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveBeforeCumAckTsn) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(8)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(10)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 4), // + SackChunk::GapAckBlock(10, 12), + SackChunk::GapAckBlock(20, 21))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveBeforeFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(11)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveAtBeginningOfFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(12)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveAtMiddleOfFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + buf_.HandleForwardTsn(TSN(13)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveAtEndOfFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + buf_.HandleForwardTsn(TSN(14)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(14)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(6, 8), // + SackChunk::GapAckBlock(16, 17))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAfterFirstBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(18)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(18)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(2, 4), // + SackChunk::GapAckBlock(12, 13))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightBeforeSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(19)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAtStartOfSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(20)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAtMiddleOfSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(21)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveRightAtEndOfSecondBlock) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(22)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(22)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), + ElementsAre(SackChunk::GapAckBlock(8, 9))); +} + +TEST_F(DataTrackerTest, GapAckBlockRemoveeFarAfterAllBlocks) { + Observer({12, 13, 14, 20, 21, 22, 30, 31}); + + buf_.HandleForwardTsn(TSN(40)); + EXPECT_EQ(buf_.CreateSelectiveAck(kArwnd).cumulative_tsn_ack(), TSN(40)); + EXPECT_THAT(buf_.CreateSelectiveAck(kArwnd).gap_ack_blocks(), IsEmpty()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/rx/reassembly_queue.cc b/net/dcsctp/rx/reassembly_queue.cc new file mode 100644 index 0000000000..581b9fcc49 --- /dev/null +++ b/net/dcsctp/rx/reassembly_queue.cc @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/rx/reassembly_queue.h" + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/common/str_join.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/rx/reassembly_streams.h" +#include "net/dcsctp/rx/traditional_reassembly_streams.h" +#include "rtc_base/logging.h" + +namespace dcsctp { +ReassemblyQueue::ReassemblyQueue(absl::string_view log_prefix, + TSN peer_initial_tsn, + size_t max_size_bytes) + : log_prefix_(std::string(log_prefix) + "reasm: "), + max_size_bytes_(max_size_bytes), + watermark_bytes_(max_size_bytes * kHighWatermarkLimit), + last_assembled_tsn_watermark_( + tsn_unwrapper_.Unwrap(TSN(*peer_initial_tsn - 1))), + streams_(std::make_unique( + log_prefix_, + [this](rtc::ArrayView tsns, + DcSctpMessage message) { + AddReassembledMessage(tsns, std::move(message)); + })) {} + +void ReassemblyQueue::Add(TSN tsn, Data data) { + RTC_DCHECK(IsConsistent()); + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "added tsn=" << *tsn + << ", stream=" << *data.stream_id << ":" + << *data.message_id << ":" << *data.fsn << ", type=" + << (data.is_beginning && data.is_end + ? "complete" + : data.is_beginning + ? "first" + : data.is_end ? "last" : "middle"); + + UnwrappedTSN unwrapped_tsn = tsn_unwrapper_.Unwrap(tsn); + + if (unwrapped_tsn <= last_assembled_tsn_watermark_ || + delivered_tsns_.find(unwrapped_tsn) != delivered_tsns_.end()) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "Chunk has already been delivered - skipping"; + return; + } + + // If a stream reset has been received with a "sender's last assigned tsn" in + // the future, the socket is in "deferred reset processing" mode and must + // buffer chunks until it's exited. + if (deferred_reset_streams_.has_value() && + unwrapped_tsn > + tsn_unwrapper_.Unwrap( + deferred_reset_streams_->req.sender_last_assigned_tsn())) { + RTC_DLOG(LS_VERBOSE) + << log_prefix_ << "Deferring chunk with tsn=" << *tsn + << " until cum_ack_tsn=" + << *deferred_reset_streams_->req.sender_last_assigned_tsn(); + // https://tools.ietf.org/html/rfc6525#section-5.2.2 + // "In this mode, any data arriving with a TSN larger than the + // Sender's Last Assigned TSN for the affected stream(s) MUST be queued + // locally and held until the cumulative acknowledgment point reaches the + // Sender's Last Assigned TSN." + queued_bytes_ += data.size(); + deferred_reset_streams_->deferred_chunks.emplace_back( + std::make_pair(tsn, std::move(data))); + } else { + queued_bytes_ += streams_->Add(unwrapped_tsn, std::move(data)); + } + + // https://tools.ietf.org/html/rfc4960#section-6.9 + // "Note: If the data receiver runs out of buffer space while still + // waiting for more fragments to complete the reassembly of the message, it + // should dispatch part of its inbound message through a partial delivery + // API (see Section 10), freeing some of its receive buffer space so that + // the rest of the message may be received." + + // TODO(boivie): Support EOR flag and partial delivery? + RTC_DCHECK(IsConsistent()); +} + +ReconfigurationResponseParameter::Result ReassemblyQueue::ResetStreams( + const OutgoingSSNResetRequestParameter& req, + TSN cum_tsn_ack) { + RTC_DCHECK(IsConsistent()); + if (deferred_reset_streams_.has_value()) { + // In deferred mode already. + return ReconfigurationResponseParameter::Result::kInProgress; + } else if (req.request_sequence_number() <= + last_completed_reset_req_seq_nbr_) { + // Already performed at some time previously. + return ReconfigurationResponseParameter::Result::kSuccessPerformed; + } + + UnwrappedTSN sla_tsn = tsn_unwrapper_.Unwrap(req.sender_last_assigned_tsn()); + UnwrappedTSN unwrapped_cum_tsn_ack = tsn_unwrapper_.Unwrap(cum_tsn_ack); + + // https://tools.ietf.org/html/rfc6525#section-5.2.2 + // "If the Sender's Last Assigned TSN is greater than the + // cumulative acknowledgment point, then the endpoint MUST enter "deferred + // reset processing"." + if (sla_tsn > unwrapped_cum_tsn_ack) { + RTC_DLOG(LS_VERBOSE) + << log_prefix_ + << "Entering deferred reset processing mode until cum_tsn_ack=" + << *req.sender_last_assigned_tsn(); + deferred_reset_streams_ = absl::make_optional(req); + return ReconfigurationResponseParameter::Result::kInProgress; + } + + // https://tools.ietf.org/html/rfc6525#section-5.2.2 + // "... streams MUST be reset to 0 as the next expected SSN." + streams_->ResetStreams(req.stream_ids()); + last_completed_reset_req_seq_nbr_ = req.request_sequence_number(); + RTC_DCHECK(IsConsistent()); + return ReconfigurationResponseParameter::Result::kSuccessPerformed; +} + +bool ReassemblyQueue::MaybeResetStreamsDeferred(TSN cum_ack_tsn) { + RTC_DCHECK(IsConsistent()); + if (deferred_reset_streams_.has_value()) { + UnwrappedTSN unwrapped_cum_ack_tsn = tsn_unwrapper_.Unwrap(cum_ack_tsn); + UnwrappedTSN unwrapped_sla_tsn = tsn_unwrapper_.Unwrap( + deferred_reset_streams_->req.sender_last_assigned_tsn()); + if (unwrapped_cum_ack_tsn >= unwrapped_sla_tsn) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "Leaving deferred reset processing with tsn=" + << *cum_ack_tsn << ", feeding back " + << deferred_reset_streams_->deferred_chunks.size() + << " chunks"; + // https://tools.ietf.org/html/rfc6525#section-5.2.2 + // "... streams MUST be reset to 0 as the next expected SSN." + streams_->ResetStreams(deferred_reset_streams_->req.stream_ids()); + std::vector> deferred_chunks = + std::move(deferred_reset_streams_->deferred_chunks); + // The response will not be sent now, but as a reply to the retried + // request, which will come as "in progress" has been sent prior. + last_completed_reset_req_seq_nbr_ = + deferred_reset_streams_->req.request_sequence_number(); + deferred_reset_streams_ = absl::nullopt; + + // https://tools.ietf.org/html/rfc6525#section-5.2.2 + // "Any queued TSNs (queued at step E2) MUST now be released and processed + // normally." + for (auto& p : deferred_chunks) { + const TSN& tsn = p.first; + Data& data = p.second; + queued_bytes_ -= data.size(); + Add(tsn, std::move(data)); + } + + RTC_DCHECK(IsConsistent()); + return true; + } else { + RTC_DLOG(LS_VERBOSE) << "Staying in deferred reset processing. tsn=" + << *cum_ack_tsn; + } + } + + return false; +} + +std::vector ReassemblyQueue::FlushMessages() { + std::vector ret; + reassembled_messages_.swap(ret); + return ret; +} + +void ReassemblyQueue::AddReassembledMessage( + rtc::ArrayView tsns, + DcSctpMessage message) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Assembled message from TSN=[" + << StrJoin(tsns, ",", + [](rtc::StringBuilder& sb, UnwrappedTSN tsn) { + sb << *tsn.Wrap(); + }) + << "], message; stream_id=" << *message.stream_id() + << ", ppid=" << *message.ppid() + << ", payload=" << message.payload().size() << " bytes"; + + for (const UnwrappedTSN tsn : tsns) { + // Update watermark, or insert into delivered_tsns_ + if (tsn == last_assembled_tsn_watermark_.next_value()) { + last_assembled_tsn_watermark_.Increment(); + } else { + delivered_tsns_.insert(tsn); + } + } + + // With new TSNs in delivered_tsns, gaps might be filled. + while (!delivered_tsns_.empty() && + *delivered_tsns_.begin() == + last_assembled_tsn_watermark_.next_value()) { + last_assembled_tsn_watermark_.Increment(); + delivered_tsns_.erase(delivered_tsns_.begin()); + } + + reassembled_messages_.emplace_back(std::move(message)); +} + +void ReassemblyQueue::Handle(const AnyForwardTsnChunk& forward_tsn) { + RTC_DCHECK(IsConsistent()); + UnwrappedTSN tsn = tsn_unwrapper_.Unwrap(forward_tsn.new_cumulative_tsn()); + + last_assembled_tsn_watermark_ = std::max(last_assembled_tsn_watermark_, tsn); + delivered_tsns_.erase(delivered_tsns_.begin(), + delivered_tsns_.upper_bound(tsn)); + + queued_bytes_ -= + streams_->HandleForwardTsn(tsn, forward_tsn.skipped_streams()); + RTC_DCHECK(IsConsistent()); +} + +bool ReassemblyQueue::IsConsistent() const { + // Allow queued_bytes_ to be larger than max_size_bytes, as it's not actively + // enforced in this class. This comparison will still trigger if queued_bytes_ + // became "negative". + return (queued_bytes_ >= 0 && queued_bytes_ <= 2 * max_size_bytes_); +} + +} // namespace dcsctp diff --git a/net/dcsctp/rx/reassembly_queue.h b/net/dcsctp/rx/reassembly_queue.h new file mode 100644 index 0000000000..25cda70c58 --- /dev/null +++ b/net/dcsctp/rx/reassembly_queue.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_RX_REASSEMBLY_QUEUE_H_ +#define NET_DCSCTP_RX_REASSEMBLY_QUEUE_H_ + +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/rx/reassembly_streams.h" + +namespace dcsctp { + +// Contains the received DATA chunks that haven't yet been reassembled, and +// reassembles chunks when possible. +// +// The actual assembly is handled by an implementation of the +// `ReassemblyStreams` interface. +// +// Except for reassembling fragmented messages, this class will also handle two +// less common operations; To handle the receiver-side of partial reliability +// (limited number of retransmissions or limited message lifetime) as well as +// stream resetting, which is used when a sender wishes to close a data channel. +// +// Partial reliability is handled when a FORWARD-TSN or I-FORWARD-TSN chunk is +// received, and it will simply delete any chunks matching the parameters in +// that chunk. This is mainly implemented in ReassemblyStreams. +// +// Resetting streams is handled when a RECONFIG chunks is received, with an +// "Outgoing SSN Reset Request" parameter. That parameter will contain a list of +// streams to reset, and a `sender_last_assigned_tsn`. If this TSN is not yet +// seen, the stream cannot be directly reset, and this class will respond that +// the reset is "deferred". But if this TSN provided is known, the stream can be +// immediately be reset. +// +// The ReassemblyQueue has a maximum size, as it would otherwise be an DoS +// attack vector where a peer could consume all memory of the other peer by +// sending a lot of ordered chunks, but carefully withholding an early one. It +// also has a watermark limit, which the caller can query is the number of bytes +// is above that limit. This is used by the caller to be selective in what to +// add to the reassembly queue, so that it's not exhausted. The caller is +// expected to call `is_full` prior to adding data to the queue and to act +// accordingly if the queue is full. +class ReassemblyQueue { + public: + // When the queue is filled over this fraction (of its maximum size), the + // socket should restrict incoming data to avoid filling up the queue. + static constexpr float kHighWatermarkLimit = 0.9; + + ReassemblyQueue(absl::string_view log_prefix, + TSN peer_initial_tsn, + size_t max_size_bytes); + + // Adds a data chunk to the queue, with a `tsn` and other parameters in + // `data`. + void Add(TSN tsn, Data data); + + // Indicates if the reassembly queue has any reassembled messages that can be + // retrieved by calling `FlushMessages`. + bool HasMessages() const { return !reassembled_messages_.empty(); } + + // Returns any reassembled messages. + std::vector FlushMessages(); + + // Handle a ForwardTSN chunk, when the sender has indicated that the received + // (this class) should forget about some chunks. This is used to implement + // partial reliability. + void Handle(const AnyForwardTsnChunk& forward_tsn); + + // Given the reset stream request and the current cum_tsn_ack, might either + // reset the streams directly (returns kSuccessPerformed), or at a later time, + // by entering the "deferred reset processing" mode (returns kInProgress). + ReconfigurationResponseParameter::Result ResetStreams( + const OutgoingSSNResetRequestParameter& req, + TSN cum_tsn_ack); + + // Given the current (updated) cum_tsn_ack, might leave "defererred reset + // processing" mode and reset streams. Returns true if so. + bool MaybeResetStreamsDeferred(TSN cum_ack_tsn); + + // The number of payload bytes that have been queued. Note that the actual + // memory usage is higher due to additional overhead of tracking received + // data. + size_t queued_bytes() const { return queued_bytes_; } + + // The remaining bytes until the queue has reached the watermark limit. + size_t remaining_bytes() const { return watermark_bytes_ - queued_bytes_; } + + // Indicates if the queue is full. Data should not be added to the queue when + // it's full. + bool is_full() const { return queued_bytes_ >= max_size_bytes_; } + + // Indicates if the queue is above the watermark limit, which is a certain + // percentage of its size. + bool is_above_watermark() const { return queued_bytes_ >= watermark_bytes_; } + + // Returns the watermark limit, in bytes. + size_t watermark_bytes() const { return watermark_bytes_; } + + private: + bool IsConsistent() const; + void AddReassembledMessage(rtc::ArrayView tsns, + DcSctpMessage message); + + struct DeferredResetStreams { + explicit DeferredResetStreams(OutgoingSSNResetRequestParameter req) + : req(std::move(req)) {} + OutgoingSSNResetRequestParameter req; + std::vector> deferred_chunks; + }; + + const std::string log_prefix_; + const size_t max_size_bytes_; + const size_t watermark_bytes_; + UnwrappedTSN::Unwrapper tsn_unwrapper_; + + // Whenever a message has been assembled, either increase + // `last_assembled_tsn_watermark_` or - if there are gaps - add the message's + // TSNs into delivered_tsns_ so that messages are not re-delivered on + // duplicate chunks. + UnwrappedTSN last_assembled_tsn_watermark_; + std::set delivered_tsns_; + // Messages that have been reassembled, and will be returned by + // `FlushMessages`. + std::vector reassembled_messages_; + + // If present, "deferred reset processing" mode is active. + absl::optional deferred_reset_streams_; + + // Contains the last request sequence number of the + // OutgoingSSNResetRequestParameter that was performed. + ReconfigRequestSN last_completed_reset_req_seq_nbr_ = ReconfigRequestSN(0); + + // The number of "payload bytes" that are in this queue, in total. + size_t queued_bytes_ = 0; + + // The actual implementation of ReassemblyStreams. + std::unique_ptr streams_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_RX_REASSEMBLY_QUEUE_H_ diff --git a/net/dcsctp/rx/reassembly_queue_test.cc b/net/dcsctp/rx/reassembly_queue_test.cc new file mode 100644 index 0000000000..e38372c7d1 --- /dev/null +++ b/net/dcsctp/rx/reassembly_queue_test.cc @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/rx/reassembly_queue.h" + +#include + +#include +#include +#include +#include +#include + +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/testing/data_generator.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; + +// The default maximum size of the Reassembly Queue. +static constexpr size_t kBufferSize = 10000; + +static constexpr StreamID kStreamID(1); +static constexpr SSN kSSN(0); +static constexpr MID kMID(0); +static constexpr FSN kFSN(0); +static constexpr PPID kPPID(53); + +static constexpr std::array kShortPayload = {1, 2, 3, 4}; +static constexpr std::array kMessage2Payload = {5, 6, 7, 8}; +static constexpr std::array kLongPayload = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; + +MATCHER_P3(SctpMessageIs, stream_id, ppid, expected_payload, "") { + if (arg.stream_id() != stream_id) { + *result_listener << "the stream_id is " << *arg.stream_id(); + return false; + } + + if (arg.ppid() != ppid) { + *result_listener << "the ppid is " << *arg.ppid(); + return false; + } + + if (std::vector(arg.payload().begin(), arg.payload().end()) != + std::vector(expected_payload.begin(), expected_payload.end())) { + *result_listener << "the payload is wrong"; + return false; + } + return true; +} + +class ReassemblyQueueTest : public testing::Test { + protected: + ReassemblyQueueTest() {} + DataGenerator gen_; +}; + +TEST_F(ReassemblyQueueTest, EmptyQueue) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + EXPECT_FALSE(reasm.HasMessages()); + EXPECT_EQ(reasm.queued_bytes(), 0u); +} + +TEST_F(ReassemblyQueueTest, SingleUnorderedChunkMessage) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kShortPayload))); + EXPECT_EQ(reasm.queued_bytes(), 0u); +} + +TEST_F(ReassemblyQueueTest, LargeUnorderedChunkAllPermutations) { + std::vector tsns = {10, 11, 12, 13}; + rtc::ArrayView payload(kLongPayload); + do { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + + for (size_t i = 0; i < tsns.size(); i++) { + auto span = payload.subview((tsns[i] - 10) * 4, 4); + Data::IsBeginning is_beginning(tsns[i] == 10); + Data::IsEnd is_end(tsns[i] == 13); + + reasm.Add(TSN(tsns[i]), + Data(kStreamID, kSSN, kMID, kFSN, kPPID, + std::vector(span.begin(), span.end()), + is_beginning, is_end, IsUnordered(false))); + if (i < 3) { + EXPECT_FALSE(reasm.HasMessages()); + } else { + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kLongPayload))); + EXPECT_EQ(reasm.queued_bytes(), 0u); + } + } + } while (std::next_permutation(std::begin(tsns), std::end(tsns))); +} + +TEST_F(ReassemblyQueueTest, SingleOrderedChunkMessage) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Ordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 0u); + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kShortPayload))); +} + +TEST_F(ReassemblyQueueTest, ManySmallOrderedMessages) { + std::vector tsns = {10, 11, 12, 13}; + rtc::ArrayView payload(kLongPayload); + do { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + for (size_t i = 0; i < tsns.size(); i++) { + auto span = payload.subview((tsns[i] - 10) * 4, 4); + Data::IsBeginning is_beginning(true); + Data::IsEnd is_end(true); + + SSN ssn(static_cast(tsns[i] - 10)); + reasm.Add(TSN(tsns[i]), + Data(kStreamID, ssn, kMID, kFSN, kPPID, + std::vector(span.begin(), span.end()), + is_beginning, is_end, IsUnordered(false))); + } + EXPECT_THAT( + reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, payload.subview(0, 4)), + SctpMessageIs(kStreamID, kPPID, payload.subview(4, 4)), + SctpMessageIs(kStreamID, kPPID, payload.subview(8, 4)), + SctpMessageIs(kStreamID, kPPID, payload.subview(12, 4)))); + EXPECT_EQ(reasm.queued_bytes(), 0u); + } while (std::next_permutation(std::begin(tsns), std::end(tsns))); +} + +TEST_F(ReassemblyQueueTest, RetransmissionInLargeOrdered) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Ordered({1}, "B")); + reasm.Add(TSN(12), gen_.Ordered({3})); + reasm.Add(TSN(13), gen_.Ordered({4})); + reasm.Add(TSN(14), gen_.Ordered({5})); + reasm.Add(TSN(15), gen_.Ordered({6})); + reasm.Add(TSN(16), gen_.Ordered({7})); + reasm.Add(TSN(17), gen_.Ordered({8})); + EXPECT_EQ(reasm.queued_bytes(), 7u); + + // lost and retransmitted + reasm.Add(TSN(11), gen_.Ordered({2})); + reasm.Add(TSN(18), gen_.Ordered({9})); + reasm.Add(TSN(19), gen_.Ordered({10})); + EXPECT_EQ(reasm.queued_bytes(), 10u); + EXPECT_FALSE(reasm.HasMessages()); + + reasm.Add(TSN(20), gen_.Ordered({11, 12, 13, 14, 15, 16}, "E")); + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kLongPayload))); + EXPECT_EQ(reasm.queued_bytes(), 0u); +} + +TEST_F(ReassemblyQueueTest, ForwardTSNRemoveUnordered) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Unordered({1}, "B")); + reasm.Add(TSN(12), gen_.Unordered({3})); + reasm.Add(TSN(13), gen_.Unordered({4}, "E")); + + reasm.Add(TSN(14), gen_.Unordered({5}, "B")); + reasm.Add(TSN(15), gen_.Unordered({6})); + reasm.Add(TSN(17), gen_.Unordered({8}, "E")); + EXPECT_EQ(reasm.queued_bytes(), 6u); + + EXPECT_FALSE(reasm.HasMessages()); + + reasm.Handle(ForwardTsnChunk(TSN(13), {})); + EXPECT_EQ(reasm.queued_bytes(), 3u); + + // The lost chunk comes, but too late. + reasm.Add(TSN(11), gen_.Unordered({2})); + EXPECT_FALSE(reasm.HasMessages()); + EXPECT_EQ(reasm.queued_bytes(), 3u); + + // The second lost chunk comes, message is assembled. + reasm.Add(TSN(16), gen_.Unordered({7})); + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_EQ(reasm.queued_bytes(), 0u); +} + +TEST_F(ReassemblyQueueTest, ForwardTSNRemoveOrdered) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Ordered({1}, "B")); + reasm.Add(TSN(12), gen_.Ordered({3})); + reasm.Add(TSN(13), gen_.Ordered({4}, "E")); + + reasm.Add(TSN(14), gen_.Ordered({5}, "B")); + reasm.Add(TSN(15), gen_.Ordered({6})); + reasm.Add(TSN(16), gen_.Ordered({7})); + reasm.Add(TSN(17), gen_.Ordered({8}, "E")); + EXPECT_EQ(reasm.queued_bytes(), 7u); + + EXPECT_FALSE(reasm.HasMessages()); + + reasm.Handle(ForwardTsnChunk( + TSN(13), {ForwardTsnChunk::SkippedStream(kStreamID, kSSN)})); + EXPECT_EQ(reasm.queued_bytes(), 0u); + + // The lost chunk comes, but too late. + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kMessage2Payload))); +} + +TEST_F(ReassemblyQueueTest, ForwardTSNRemoveALotOrdered) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Ordered({1}, "B")); + reasm.Add(TSN(12), gen_.Ordered({3})); + reasm.Add(TSN(13), gen_.Ordered({4}, "E")); + + reasm.Add(TSN(15), gen_.Ordered({5}, "B")); + reasm.Add(TSN(16), gen_.Ordered({6})); + reasm.Add(TSN(17), gen_.Ordered({7})); + reasm.Add(TSN(18), gen_.Ordered({8}, "E")); + EXPECT_EQ(reasm.queued_bytes(), 7u); + + EXPECT_FALSE(reasm.HasMessages()); + + reasm.Handle(ForwardTsnChunk( + TSN(13), {ForwardTsnChunk::SkippedStream(kStreamID, kSSN)})); + EXPECT_EQ(reasm.queued_bytes(), 0u); + + // The lost chunk comes, but too late. + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kMessage2Payload))); +} + +TEST_F(ReassemblyQueueTest, ShouldntDeliverMessagesBeforeInitialTsn) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(5), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 0u); + EXPECT_FALSE(reasm.HasMessages()); +} + +TEST_F(ReassemblyQueueTest, ShouldntRedeliverUnorderedMessages) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 0u); + EXPECT_TRUE(reasm.HasMessages()); + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kShortPayload))); + reasm.Add(TSN(10), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 0u); + EXPECT_FALSE(reasm.HasMessages()); +} + +TEST_F(ReassemblyQueueTest, ShouldntRedeliverUnorderedMessagesReallyUnordered) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Add(TSN(10), gen_.Unordered({1, 2, 3, 4}, "B")); + EXPECT_EQ(reasm.queued_bytes(), 4u); + + EXPECT_FALSE(reasm.HasMessages()); + + reasm.Add(TSN(12), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 4u); + EXPECT_TRUE(reasm.HasMessages()); + + EXPECT_THAT(reasm.FlushMessages(), + ElementsAre(SctpMessageIs(kStreamID, kPPID, kShortPayload))); + reasm.Add(TSN(12), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 4u); + EXPECT_FALSE(reasm.HasMessages()); +} + +TEST_F(ReassemblyQueueTest, ShouldntDeliverBeforeForwardedTsn) { + ReassemblyQueue reasm("log: ", TSN(10), kBufferSize); + reasm.Handle(ForwardTsnChunk(TSN(12), {})); + + reasm.Add(TSN(12), gen_.Unordered({1, 2, 3, 4}, "BE")); + EXPECT_EQ(reasm.queued_bytes(), 0u); + EXPECT_FALSE(reasm.HasMessages()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/rx/reassembly_streams.h b/net/dcsctp/rx/reassembly_streams.h new file mode 100644 index 0000000000..a8b42b5a2d --- /dev/null +++ b/net/dcsctp/rx/reassembly_streams.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_RX_REASSEMBLY_STREAMS_H_ +#define NET_DCSCTP_RX_REASSEMBLY_STREAMS_H_ + +#include +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" + +namespace dcsctp { + +// Implementations of this interface will be called when data is received, when +// data should be skipped/forgotten or when sequence number should be reset. +// +// As a result of these operations - mainly when data is received - the +// implementations of this interface should notify when a message has been +// assembled, by calling the provided callback of type `OnAssembledMessage`. How +// it assembles messages will depend on e.g. if a message was sent on an ordered +// or unordered stream. +// +// Implementations will - for each operation - indicate how much additional +// memory that has been used as a result of performing the operation. This is +// used to limit the maximum amount of memory used, to prevent out-of-memory +// situations. +class ReassemblyStreams { + public: + // This callback will be provided as an argument to the constructor of the + // concrete class implementing this interface and should be called when a + // message has been assembled as well as indicating from which TSNs this + // message was assembled from. + using OnAssembledMessage = + std::function tsns, + DcSctpMessage message)>; + + virtual ~ReassemblyStreams() = default; + + // Adds a data chunk to a stream as identified in `data`. + // If it was the last remaining chunk in a message, reassemble one (or + // several, in case of ordered chunks) messages. + // + // Returns the additional number of bytes added to the queue as a result of + // performing this operation. If this addition resulted in messages being + // assembled and delivered, this may be negative. + virtual int Add(UnwrappedTSN tsn, Data data) = 0; + + // Called for incoming FORWARD-TSN/I-FORWARD-TSN chunks - when the sender + // wishes the received to skip/forget about data up until the provided TSN. + // This is used to implement partial reliability, such as limiting the number + // of retransmissions or the an expiration duration. As a result of skipping + // data, this may result in the implementation being able to assemble messages + // in ordered streams. + // + // Returns the number of bytes removed from the queue as a result of + // this operation. + virtual size_t HandleForwardTsn( + UnwrappedTSN new_cumulative_ack_tsn, + rtc::ArrayView + skipped_streams) = 0; + + // Called for incoming (possibly deferred) RE_CONFIG chunks asking for + // either a few streams, or all streams (when the list is empty) to be + // reset - to have their next SSN or Message ID to be zero. + virtual void ResetStreams(rtc::ArrayView stream_ids) = 0; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_RX_REASSEMBLY_STREAMS_H_ diff --git a/net/dcsctp/rx/traditional_reassembly_streams.cc b/net/dcsctp/rx/traditional_reassembly_streams.cc new file mode 100644 index 0000000000..7cec1150d5 --- /dev/null +++ b/net/dcsctp/rx/traditional_reassembly_streams.cc @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/rx/traditional_reassembly_streams.h" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "rtc_base/logging.h" + +namespace dcsctp { +namespace { + +// Given a map (`chunks`) and an iterator to within that map (`iter`), this +// function will return an iterator to the first chunk in that message, which +// has the `is_beginning` flag set. If there are any gaps, or if the beginning +// can't be found, `absl::nullopt` is returned. +absl::optional::iterator> FindBeginning( + const std::map& chunks, + std::map::iterator iter) { + UnwrappedTSN prev_tsn = iter->first; + for (;;) { + if (iter->second.is_beginning) { + return iter; + } + if (iter == chunks.begin()) { + return absl::nullopt; + } + --iter; + if (iter->first.next_value() != prev_tsn) { + return absl::nullopt; + } + prev_tsn = iter->first; + } +} + +// Given a map (`chunks`) and an iterator to within that map (`iter`), this +// function will return an iterator to the chunk after the last chunk in that +// message, which has the `is_end` flag set. If there are any gaps, or if the +// end can't be found, `absl::nullopt` is returned. +absl::optional::iterator> FindEnd( + std::map& chunks, + std::map::iterator iter) { + UnwrappedTSN prev_tsn = iter->first; + for (;;) { + if (iter->second.is_end) { + return ++iter; + } + ++iter; + if (iter == chunks.end()) { + return absl::nullopt; + } + if (iter->first != prev_tsn.next_value()) { + return absl::nullopt; + } + prev_tsn = iter->first; + } +} +} // namespace + +int TraditionalReassemblyStreams::UnorderedStream::Add(UnwrappedTSN tsn, + Data data) { + int queued_bytes = data.size(); + auto p = chunks_.emplace(tsn, std::move(data)); + if (!p.second /* !inserted */) { + return 0; + } + + queued_bytes -= TryToAssembleMessage(p.first); + + return queued_bytes; +} + +size_t TraditionalReassemblyStreams::UnorderedStream::TryToAssembleMessage( + ChunkMap::iterator iter) { + // TODO(boivie): This method is O(N) with the number of fragments in a + // message, which can be inefficient for very large values of N. This could be + // optimized by e.g. only trying to assemble a message once _any_ beginning + // and _any_ end has been found. + absl::optional start = FindBeginning(chunks_, iter); + if (!start.has_value()) { + return 0; + } + absl::optional end = FindEnd(chunks_, iter); + if (!end.has_value()) { + return 0; + } + + size_t bytes_assembled = AssembleMessage(*start, *end); + chunks_.erase(*start, *end); + return bytes_assembled; +} + +size_t TraditionalReassemblyStreams::StreamBase::AssembleMessage( + const ChunkMap::iterator start, + const ChunkMap::iterator end) { + size_t count = std::distance(start, end); + + if (count == 1) { + // Fast path - zero-copy + const Data& data = start->second; + size_t payload_size = start->second.size(); + UnwrappedTSN tsns[1] = {start->first}; + DcSctpMessage message(data.stream_id, data.ppid, std::move(data.payload)); + parent_.on_assembled_message_(tsns, std::move(message)); + return payload_size; + } + + // Slow path - will need to concatenate the payload. + std::vector tsns; + std::vector payload; + + size_t payload_size = std::accumulate( + start, end, 0, + [](size_t v, const auto& p) { return v + p.second.size(); }); + + tsns.reserve(count); + payload.reserve(payload_size); + for (auto it = start; it != end; ++it) { + const Data& data = it->second; + tsns.push_back(it->first); + payload.insert(payload.end(), data.payload.begin(), data.payload.end()); + } + + DcSctpMessage message(start->second.stream_id, start->second.ppid, + std::move(payload)); + parent_.on_assembled_message_(tsns, std::move(message)); + + return payload_size; +} + +size_t TraditionalReassemblyStreams::UnorderedStream::EraseTo( + UnwrappedTSN tsn) { + auto end_iter = chunks_.upper_bound(tsn); + size_t removed_bytes = std::accumulate( + chunks_.begin(), end_iter, 0, + [](size_t r, const auto& p) { return r + p.second.size(); }); + + chunks_.erase(chunks_.begin(), end_iter); + return removed_bytes; +} + +size_t TraditionalReassemblyStreams::OrderedStream::TryToAssembleMessage() { + if (chunks_by_ssn_.empty() || chunks_by_ssn_.begin()->first != next_ssn_) { + return 0; + } + + ChunkMap& chunks = chunks_by_ssn_.begin()->second; + + if (!chunks.begin()->second.is_beginning || !chunks.rbegin()->second.is_end) { + return 0; + } + + uint32_t tsn_diff = + UnwrappedTSN::Difference(chunks.rbegin()->first, chunks.begin()->first); + if (tsn_diff != chunks.size() - 1) { + return 0; + } + + size_t assembled_bytes = AssembleMessage(chunks.begin(), chunks.end()); + chunks_by_ssn_.erase(chunks_by_ssn_.begin()); + next_ssn_.Increment(); + return assembled_bytes; +} + +size_t TraditionalReassemblyStreams::OrderedStream::TryToAssembleMessages() { + size_t assembled_bytes = 0; + + for (;;) { + size_t assembled_bytes_this_iter = TryToAssembleMessage(); + if (assembled_bytes_this_iter == 0) { + break; + } + assembled_bytes += assembled_bytes_this_iter; + } + return assembled_bytes; +} + +int TraditionalReassemblyStreams::OrderedStream::Add(UnwrappedTSN tsn, + Data data) { + int queued_bytes = data.size(); + + UnwrappedSSN ssn = ssn_unwrapper_.Unwrap(data.ssn); + auto p = chunks_by_ssn_[ssn].emplace(tsn, std::move(data)); + if (!p.second /* !inserted */) { + return 0; + } + + if (ssn == next_ssn_) { + queued_bytes -= TryToAssembleMessages(); + } + + return queued_bytes; +} + +size_t TraditionalReassemblyStreams::OrderedStream::EraseTo(SSN ssn) { + UnwrappedSSN unwrapped_ssn = ssn_unwrapper_.Unwrap(ssn); + + auto end_iter = chunks_by_ssn_.upper_bound(unwrapped_ssn); + size_t removed_bytes = std::accumulate( + chunks_by_ssn_.begin(), end_iter, 0, [](size_t r1, const auto& p) { + return r1 + + absl::c_accumulate(p.second, 0, [](size_t r2, const auto& q) { + return r2 + q.second.size(); + }); + }); + chunks_by_ssn_.erase(chunks_by_ssn_.begin(), end_iter); + + if (unwrapped_ssn >= next_ssn_) { + unwrapped_ssn.Increment(); + next_ssn_ = unwrapped_ssn; + } + + removed_bytes += TryToAssembleMessages(); + return removed_bytes; +} + +int TraditionalReassemblyStreams::Add(UnwrappedTSN tsn, Data data) { + if (data.is_unordered) { + auto it = unordered_streams_.emplace(data.stream_id, this).first; + return it->second.Add(tsn, std::move(data)); + } + + auto it = ordered_streams_.emplace(data.stream_id, this).first; + return it->second.Add(tsn, std::move(data)); +} + +size_t TraditionalReassemblyStreams::HandleForwardTsn( + UnwrappedTSN new_cumulative_ack_tsn, + rtc::ArrayView skipped_streams) { + size_t bytes_removed = 0; + // The `skipped_streams` only over ordered messages - need to + // iterate all unordered streams manually to remove those chunks. + for (auto& entry : unordered_streams_) { + bytes_removed += entry.second.EraseTo(new_cumulative_ack_tsn); + } + + for (const auto& skipped_stream : skipped_streams) { + auto it = ordered_streams_.find(skipped_stream.stream_id); + if (it != ordered_streams_.end()) { + bytes_removed += it->second.EraseTo(skipped_stream.ssn); + } + } + + return bytes_removed; +} + +void TraditionalReassemblyStreams::ResetStreams( + rtc::ArrayView stream_ids) { + if (stream_ids.empty()) { + for (auto& entry : ordered_streams_) { + const StreamID& stream_id = entry.first; + OrderedStream& stream = entry.second; + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "Resetting implicit stream_id=" << *stream_id; + stream.Reset(); + } + } else { + for (StreamID stream_id : stream_ids) { + auto it = ordered_streams_.find(stream_id); + if (it != ordered_streams_.end()) { + RTC_DLOG(LS_VERBOSE) + << log_prefix_ << "Resetting explicit stream_id=" << *stream_id; + it->second.Reset(); + } + } + } +} +} // namespace dcsctp diff --git a/net/dcsctp/rx/traditional_reassembly_streams.h b/net/dcsctp/rx/traditional_reassembly_streams.h new file mode 100644 index 0000000000..12d1d933a4 --- /dev/null +++ b/net/dcsctp/rx/traditional_reassembly_streams.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_RX_TRADITIONAL_REASSEMBLY_STREAMS_H_ +#define NET_DCSCTP_RX_TRADITIONAL_REASSEMBLY_STREAMS_H_ +#include +#include + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/rx/reassembly_streams.h" + +namespace dcsctp { + +// Handles reassembly of incoming data when interleaved message sending +// is not enabled on the association, i.e. when RFC8260 is not in use and +// RFC4960 is to be followed. +class TraditionalReassemblyStreams : public ReassemblyStreams { + public: + TraditionalReassemblyStreams(absl::string_view log_prefix, + OnAssembledMessage on_assembled_message) + : log_prefix_(log_prefix), on_assembled_message_(on_assembled_message) {} + + int Add(UnwrappedTSN tsn, Data data) override; + + size_t HandleForwardTsn( + UnwrappedTSN new_cumulative_ack_tsn, + rtc::ArrayView skipped_streams) + override; + + void ResetStreams(rtc::ArrayView stream_ids) override; + + private: + using ChunkMap = std::map; + + // Base class for `UnorderedStream` and `OrderedStream`. + class StreamBase { + protected: + explicit StreamBase(TraditionalReassemblyStreams* parent) + : parent_(*parent) {} + + size_t AssembleMessage(const ChunkMap::iterator start, + const ChunkMap::iterator end); + TraditionalReassemblyStreams& parent_; + }; + + // Manages all received data for a specific unordered stream, and assembles + // messages when possible. + class UnorderedStream : StreamBase { + public: + explicit UnorderedStream(TraditionalReassemblyStreams* parent) + : StreamBase(parent) {} + int Add(UnwrappedTSN tsn, Data data); + // Returns the number of bytes removed from the queue. + size_t EraseTo(UnwrappedTSN tsn); + + private: + // Given an iterator to any chunk within the map, try to assemble a message + // into `reassembled_messages` containing it and - if successful - erase + // those chunks from the stream chunks map. + // + // Returns the number of bytes that were assembled. + size_t TryToAssembleMessage(ChunkMap::iterator iter); + + ChunkMap chunks_; + }; + + // Manages all received data for a specific ordered stream, and assembles + // messages when possible. + class OrderedStream : StreamBase { + public: + explicit OrderedStream(TraditionalReassemblyStreams* parent) + : StreamBase(parent), next_ssn_(ssn_unwrapper_.Unwrap(SSN(0))) {} + int Add(UnwrappedTSN tsn, Data data); + size_t EraseTo(SSN ssn); + void Reset() { + ssn_unwrapper_.Reset(); + next_ssn_ = ssn_unwrapper_.Unwrap(SSN(0)); + } + + private: + // Try to assemble one or several messages in order from the stream. + // Returns the number of bytes assembled if a message was assembled. + size_t TryToAssembleMessage(); + size_t TryToAssembleMessages(); + // This must be an ordered container to be able to iterate in SSN order. + std::map chunks_by_ssn_; + UnwrappedSSN::Unwrapper ssn_unwrapper_; + UnwrappedSSN next_ssn_; + }; + + const std::string log_prefix_; + + // Callback for when a message has been assembled. + const OnAssembledMessage on_assembled_message_; + + // All unordered and ordered streams, managing not-yet-assembled data. + std::unordered_map + unordered_streams_; + std::unordered_map + ordered_streams_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_RX_TRADITIONAL_REASSEMBLY_STREAMS_H_ diff --git a/net/dcsctp/rx/traditional_reassembly_streams_test.cc b/net/dcsctp/rx/traditional_reassembly_streams_test.cc new file mode 100644 index 0000000000..30d29a05dc --- /dev/null +++ b/net/dcsctp/rx/traditional_reassembly_streams_test.cc @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/rx/traditional_reassembly_streams.h" + +#include +#include +#include + +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/rx/reassembly_streams.h" +#include "net/dcsctp/testing/data_generator.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::MockFunction; +using ::testing::NiceMock; + +class TraditionalReassemblyStreamsTest : public testing::Test { + protected: + UnwrappedTSN tsn(uint32_t value) { return tsn_.Unwrap(TSN(value)); } + + TraditionalReassemblyStreamsTest() {} + DataGenerator gen_; + UnwrappedTSN::Unwrapper tsn_; +}; + +TEST_F(TraditionalReassemblyStreamsTest, + AddUnorderedMessageReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Unordered({1}, "B")), 1); + EXPECT_EQ(streams.Add(tsn(2), gen_.Unordered({2, 3, 4})), 3); + EXPECT_EQ(streams.Add(tsn(3), gen_.Unordered({5, 6})), 2); + // Adding the end fragment should make it empty again. + EXPECT_EQ(streams.Add(tsn(4), gen_.Unordered({7}, "E")), -6); +} + +TEST_F(TraditionalReassemblyStreamsTest, + AddSimpleOrderedMessageReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Ordered({1}, "B")), 1); + EXPECT_EQ(streams.Add(tsn(2), gen_.Ordered({2, 3, 4})), 3); + EXPECT_EQ(streams.Add(tsn(3), gen_.Ordered({5, 6})), 2); + EXPECT_EQ(streams.Add(tsn(4), gen_.Ordered({7}, "E")), -6); +} + +TEST_F(TraditionalReassemblyStreamsTest, + AddMoreComplexOrderedMessageReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Ordered({1}, "B")), 1); + Data late = gen_.Ordered({2, 3, 4}); + EXPECT_EQ(streams.Add(tsn(3), gen_.Ordered({5, 6})), 2); + EXPECT_EQ(streams.Add(tsn(4), gen_.Ordered({7}, "E")), 1); + + EXPECT_EQ(streams.Add(tsn(5), gen_.Ordered({1}, "BE")), 1); + EXPECT_EQ(streams.Add(tsn(6), gen_.Ordered({5, 6}, "B")), 2); + EXPECT_EQ(streams.Add(tsn(7), gen_.Ordered({7}, "E")), 1); + EXPECT_EQ(streams.Add(tsn(2), std::move(late)), -8); +} + +TEST_F(TraditionalReassemblyStreamsTest, + DeleteUnorderedMessageReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Unordered({1}, "B")), 1); + EXPECT_EQ(streams.Add(tsn(2), gen_.Unordered({2, 3, 4})), 3); + EXPECT_EQ(streams.Add(tsn(3), gen_.Unordered({5, 6})), 2); + + EXPECT_EQ(streams.HandleForwardTsn(tsn(3), {}), 6u); +} + +TEST_F(TraditionalReassemblyStreamsTest, + DeleteSimpleOrderedMessageReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Ordered({1}, "B")), 1); + EXPECT_EQ(streams.Add(tsn(2), gen_.Ordered({2, 3, 4})), 3); + EXPECT_EQ(streams.Add(tsn(3), gen_.Ordered({5, 6})), 2); + + ForwardTsnChunk::SkippedStream skipped[] = { + ForwardTsnChunk::SkippedStream(StreamID(1), SSN(0))}; + EXPECT_EQ(streams.HandleForwardTsn(tsn(3), skipped), 6u); +} + +TEST_F(TraditionalReassemblyStreamsTest, + DeleteManyOrderedMessagesReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Ordered({1}, "B")), 1); + gen_.Ordered({2, 3, 4}); + EXPECT_EQ(streams.Add(tsn(3), gen_.Ordered({5, 6})), 2); + EXPECT_EQ(streams.Add(tsn(4), gen_.Ordered({7}, "E")), 1); + + EXPECT_EQ(streams.Add(tsn(5), gen_.Ordered({1}, "BE")), 1); + EXPECT_EQ(streams.Add(tsn(6), gen_.Ordered({5, 6}, "B")), 2); + EXPECT_EQ(streams.Add(tsn(7), gen_.Ordered({7}, "E")), 1); + + // Expire all three messages + ForwardTsnChunk::SkippedStream skipped[] = { + ForwardTsnChunk::SkippedStream(StreamID(1), SSN(2))}; + EXPECT_EQ(streams.HandleForwardTsn(tsn(8), skipped), 8u); +} + +TEST_F(TraditionalReassemblyStreamsTest, + DeleteOrderedMessageDelivesTwoReturnsCorrectSize) { + NiceMock> on_assembled; + + TraditionalReassemblyStreams streams("", on_assembled.AsStdFunction()); + + EXPECT_EQ(streams.Add(tsn(1), gen_.Ordered({1}, "B")), 1); + gen_.Ordered({2, 3, 4}); + EXPECT_EQ(streams.Add(tsn(3), gen_.Ordered({5, 6})), 2); + EXPECT_EQ(streams.Add(tsn(4), gen_.Ordered({7}, "E")), 1); + + EXPECT_EQ(streams.Add(tsn(5), gen_.Ordered({1}, "BE")), 1); + EXPECT_EQ(streams.Add(tsn(6), gen_.Ordered({5, 6}, "B")), 2); + EXPECT_EQ(streams.Add(tsn(7), gen_.Ordered({7}, "E")), 1); + + // The first ordered message expire, and the following two are delivered. + ForwardTsnChunk::SkippedStream skipped[] = { + ForwardTsnChunk::SkippedStream(StreamID(1), SSN(0))}; + EXPECT_EQ(streams.HandleForwardTsn(tsn(4), skipped), 8u); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/socket/BUILD.gn b/net/dcsctp/socket/BUILD.gn new file mode 100644 index 0000000000..72ac139acb --- /dev/null +++ b/net/dcsctp/socket/BUILD.gn @@ -0,0 +1,236 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("context") { + sources = [ "context.h" ] + deps = [ + "../common:internal_types", + "../packet:sctp_packet", + "../public:socket", + "../public:types", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_library("heartbeat_handler") { + deps = [ + ":context", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../packet:bounded_io", + "../packet:chunk", + "../packet:parameter", + "../packet:sctp_packet", + "../public:socket", + "../public:types", + "../timer", + ] + sources = [ + "heartbeat_handler.cc", + "heartbeat_handler.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("stream_reset_handler") { + deps = [ + ":context", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../common:str_join", + "../packet:chunk", + "../packet:parameter", + "../packet:sctp_packet", + "../packet:tlv_trait", + "../public:socket", + "../public:types", + "../rx:data_tracker", + "../rx:reassembly_queue", + "../timer", + "../tx:retransmission_queue", + ] + sources = [ + "stream_reset_handler.cc", + "stream_reset_handler.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("transmission_control_block") { + deps = [ + ":context", + ":heartbeat_handler", + ":stream_reset_handler", + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:sequence_numbers", + "../packet:chunk", + "../packet:sctp_packet", + "../public:socket", + "../public:types", + "../rx:data_tracker", + "../rx:reassembly_queue", + "../timer", + "../tx:retransmission_error_counter", + "../tx:retransmission_queue", + "../tx:retransmission_timeout", + "../tx:send_queue", + ] + sources = [ + "capabilities.h", + "transmission_control_block.cc", + "transmission_control_block.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("dcsctp_socket") { + deps = [ + ":context", + ":heartbeat_handler", + ":stream_reset_handler", + ":transmission_control_block", + "../../../api:array_view", + "../../../api:refcountedbase", + "../../../api:scoped_refptr", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../packet:bounded_io", + "../packet:chunk", + "../packet:chunk_validators", + "../packet:data", + "../packet:error_cause", + "../packet:parameter", + "../packet:sctp_packet", + "../packet:tlv_trait", + "../public:socket", + "../public:types", + "../rx:data_tracker", + "../rx:reassembly_queue", + "../timer", + "../tx:retransmission_error_counter", + "../tx:retransmission_queue", + "../tx:retransmission_timeout", + "../tx:rr_send_queue", + "../tx:send_queue", + ] + sources = [ + "callback_deferrer.h", + "dcsctp_socket.cc", + "dcsctp_socket.h", + "state_cookie.cc", + "state_cookie.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +if (rtc_include_tests) { + rtc_source_set("mock_callbacks") { + testonly = true + sources = [ "mock_dcsctp_socket_callbacks.h" ] + deps = [ + "../../../api:array_view", + "../../../rtc_base:logging", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../public:socket", + "../public:types", + "../timer", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + } + + rtc_source_set("mock_context") { + testonly = true + sources = [ "mock_context.h" ] + deps = [ + ":context", + ":mock_callbacks", + "../../../test:test_support", + "../common:internal_types", + "../packet:sctp_packet", + "../public:socket", + "../public:types", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + } + + rtc_library("dcsctp_socket_unittests") { + testonly = true + + deps = [ + ":dcsctp_socket", + ":heartbeat_handler", + ":mock_callbacks", + ":mock_context", + ":stream_reset_handler", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../common:internal_types", + "../packet:chunk", + "../packet:error_cause", + "../packet:parameter", + "../packet:sctp_packet", + "../packet:tlv_trait", + "../public:socket", + "../public:types", + "../public:utils", + "../rx:data_tracker", + "../rx:reassembly_queue", + "../testing:data_generator", + "../testing:testing_macros", + "../timer", + "../tx:mock_send_queue", + "../tx:retransmission_queue", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + sources = [ + "dcsctp_socket_test.cc", + "heartbeat_handler_test.cc", + "state_cookie_test.cc", + "stream_reset_handler_test.cc", + ] + } +} diff --git a/net/dcsctp/socket/callback_deferrer.h b/net/dcsctp/socket/callback_deferrer.h new file mode 100644 index 0000000000..197cf434af --- /dev/null +++ b/net/dcsctp/socket/callback_deferrer.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_CALLBACK_DEFERRER_H_ +#define NET_DCSCTP_SOCKET_CALLBACK_DEFERRER_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/ref_counted_base.h" +#include "api/scoped_refptr.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "rtc_base/ref_counted_object.h" + +namespace dcsctp { + +// Defers callbacks until they can be safely triggered. +// +// There are a lot of callbacks from the dcSCTP library to the client, +// such as when messages are received or streams are closed. When the client +// receives these callbacks, the client is expected to be able to call into the +// library - from within the callback. For example, sending a reply message when +// a certain SCTP message has been received, or to reconnect when the connection +// was closed for any reason. This means that the dcSCTP library must always be +// in a consistent and stable state when these callbacks are delivered, and to +// ensure that's the case, callbacks are not immediately delivered from where +// they originate, but instead queued (deferred) by this class. At the end of +// any public API method that may result in callbacks, they are triggered and +// then delivered. +// +// There are a number of exceptions, which is clearly annotated in the API. +class CallbackDeferrer : public DcSctpSocketCallbacks { + public: + explicit CallbackDeferrer(DcSctpSocketCallbacks& underlying) + : underlying_(underlying) {} + + void TriggerDeferred() { + // Need to swap here. The client may call into the library from within a + // callback, and that might result in adding new callbacks to this instance, + // and the vector can't be modified while iterated on. + std::vector> deferred; + deferred.swap(deferred_); + + for (auto& cb : deferred) { + cb(underlying_); + } + } + + void SendPacket(rtc::ArrayView data) override { + // Will not be deferred - call directly. + underlying_.SendPacket(data); + } + + std::unique_ptr CreateTimeout() override { + // Will not be deferred - call directly. + return underlying_.CreateTimeout(); + } + + TimeMs TimeMillis() override { + // Will not be deferred - call directly. + return underlying_.TimeMillis(); + } + + uint32_t GetRandomInt(uint32_t low, uint32_t high) override { + // Will not be deferred - call directly. + return underlying_.GetRandomInt(low, high); + } + + void OnMessageReceived(DcSctpMessage message) override { + deferred_.emplace_back( + [deliverer = MessageDeliverer(std::move(message))]( + DcSctpSocketCallbacks& cb) mutable { deliverer.Deliver(cb); }); + } + + void OnError(ErrorKind error, absl::string_view message) override { + deferred_.emplace_back( + [error, message = std::string(message)](DcSctpSocketCallbacks& cb) { + cb.OnError(error, message); + }); + } + + void OnAborted(ErrorKind error, absl::string_view message) override { + deferred_.emplace_back( + [error, message = std::string(message)](DcSctpSocketCallbacks& cb) { + cb.OnAborted(error, message); + }); + } + + void OnConnected() override { + deferred_.emplace_back([](DcSctpSocketCallbacks& cb) { cb.OnConnected(); }); + } + + void OnClosed() override { + deferred_.emplace_back([](DcSctpSocketCallbacks& cb) { cb.OnClosed(); }); + } + + void OnConnectionRestarted() override { + deferred_.emplace_back( + [](DcSctpSocketCallbacks& cb) { cb.OnConnectionRestarted(); }); + } + + void OnStreamsResetFailed(rtc::ArrayView outgoing_streams, + absl::string_view reason) override { + deferred_.emplace_back( + [streams = std::vector(outgoing_streams.begin(), + outgoing_streams.end()), + reason = std::string(reason)](DcSctpSocketCallbacks& cb) { + cb.OnStreamsResetFailed(streams, reason); + }); + } + + void OnStreamsResetPerformed( + rtc::ArrayView outgoing_streams) override { + deferred_.emplace_back( + [streams = std::vector(outgoing_streams.begin(), + outgoing_streams.end())]( + DcSctpSocketCallbacks& cb) { + cb.OnStreamsResetPerformed(streams); + }); + } + + void OnIncomingStreamsReset( + rtc::ArrayView incoming_streams) override { + deferred_.emplace_back( + [streams = std::vector(incoming_streams.begin(), + incoming_streams.end())]( + DcSctpSocketCallbacks& cb) { cb.OnIncomingStreamsReset(streams); }); + } + + void OnBufferedAmountLow(StreamID stream_id) override { + deferred_.emplace_back([stream_id](DcSctpSocketCallbacks& cb) { + cb.OnBufferedAmountLow(stream_id); + }); + } + + void OnTotalBufferedAmountLow() override { + deferred_.emplace_back( + [](DcSctpSocketCallbacks& cb) { cb.OnTotalBufferedAmountLow(); }); + } + + private: + // A wrapper around the move-only DcSctpMessage, to let it be captured in a + // lambda. + class MessageDeliverer { + public: + explicit MessageDeliverer(DcSctpMessage&& message) + : state_(rtc::make_ref_counted(std::move(message))) {} + + void Deliver(DcSctpSocketCallbacks& c) { + // Really ensure that it's only called once. + RTC_DCHECK(!state_->has_delivered); + state_->has_delivered = true; + c.OnMessageReceived(std::move(state_->message)); + } + + private: + struct State : public rtc::RefCountInterface { + explicit State(DcSctpMessage&& m) + : has_delivered(false), message(std::move(m)) {} + bool has_delivered; + DcSctpMessage message; + }; + rtc::scoped_refptr state_; + }; + + DcSctpSocketCallbacks& underlying_; + std::vector> deferred_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_CALLBACK_DEFERRER_H_ diff --git a/net/dcsctp/socket/capabilities.h b/net/dcsctp/socket/capabilities.h new file mode 100644 index 0000000000..c6d3692b2d --- /dev/null +++ b/net/dcsctp/socket/capabilities.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_CAPABILITIES_H_ +#define NET_DCSCTP_SOCKET_CAPABILITIES_H_ + +namespace dcsctp { +// Indicates what the association supports, meaning that both parties +// support it and that feature can be used. +struct Capabilities { + // RFC3758 Partial Reliability Extension + bool partial_reliability = false; + // RFC8260 Stream Schedulers and User Message Interleaving + bool message_interleaving = false; + // RFC6525 Stream Reconfiguration + bool reconfig = false; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_CAPABILITIES_H_ diff --git a/net/dcsctp/socket/context.h b/net/dcsctp/socket/context.h new file mode 100644 index 0000000000..eca5b9e4fb --- /dev/null +++ b/net/dcsctp/socket/context.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_CONTEXT_H_ +#define NET_DCSCTP_SOCKET_CONTEXT_H_ + +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +// A set of helper methods used by handlers to e.g. send packets. +// +// Implemented by the TransmissionControlBlock. +class Context { + public: + virtual ~Context() = default; + + // Indicates if a connection has been established. + virtual bool is_connection_established() const = 0; + + // Returns this side's initial TSN value. + virtual TSN my_initial_tsn() const = 0; + + // Returns the peer's initial TSN value. + virtual TSN peer_initial_tsn() const = 0; + + // Returns the socket callbacks. + virtual DcSctpSocketCallbacks& callbacks() const = 0; + + // Observes a measured RTT value, in milliseconds. + virtual void ObserveRTT(DurationMs rtt_ms) = 0; + + // Returns the current Retransmission Timeout (rto) value, in milliseconds. + virtual DurationMs current_rto() const = 0; + + // Increments the transmission error counter, given a human readable reason. + virtual bool IncrementTxErrorCounter(absl::string_view reason) = 0; + + // Clears the transmission error counter. + virtual void ClearTxErrorCounter() = 0; + + // Returns true if there have been too many retransmission errors. + virtual bool HasTooManyTxErrors() const = 0; + + // Returns a PacketBuilder, filled in with the correct verification tag. + virtual SctpPacket::Builder PacketBuilder() const = 0; + + // Builds the packet from `builder` and sends it. + virtual void Send(SctpPacket::Builder& builder) = 0; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_CONTEXT_H_ diff --git a/net/dcsctp/socket/dcsctp_socket.cc b/net/dcsctp/socket/dcsctp_socket.cc new file mode 100644 index 0000000000..71bc98c70d --- /dev/null +++ b/net/dcsctp/socket/dcsctp_socket.cc @@ -0,0 +1,1550 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/dcsctp_socket.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/abort_chunk.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "net/dcsctp/packet/chunk/error_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/chunk/idata_chunk.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/init_ack_chunk.h" +#include "net/dcsctp/packet/chunk/init_chunk.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_ack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_complete_chunk.h" +#include "net/dcsctp/packet/chunk_validators.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/error_cause/cookie_received_while_shutting_down_cause.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/error_cause/no_user_data_cause.h" +#include "net/dcsctp/packet/error_cause/out_of_resource_error_cause.h" +#include "net/dcsctp/packet/error_cause/protocol_violation_cause.h" +#include "net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h" +#include "net/dcsctp/packet/error_cause/user_initiated_abort_cause.h" +#include "net/dcsctp/packet/parameter/forward_tsn_supported_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/parameter/state_cookie_parameter.h" +#include "net/dcsctp/packet/parameter/supported_extensions_parameter.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/callback_deferrer.h" +#include "net/dcsctp/socket/capabilities.h" +#include "net/dcsctp/socket/heartbeat_handler.h" +#include "net/dcsctp/socket/state_cookie.h" +#include "net/dcsctp/socket/stream_reset_handler.h" +#include "net/dcsctp/socket/transmission_control_block.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_queue.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/strings/string_format.h" + +namespace dcsctp { +namespace { + +// https://tools.ietf.org/html/rfc4960#section-5.1 +constexpr uint32_t kMinVerificationTag = 1; +constexpr uint32_t kMaxVerificationTag = std::numeric_limits::max(); + +// https://tools.ietf.org/html/rfc4960#section-3.3.2 +constexpr uint32_t kMinInitialTsn = 0; +constexpr uint32_t kMaxInitialTsn = std::numeric_limits::max(); + +Capabilities GetCapabilities(const DcSctpOptions& options, + const Parameters& parameters) { + Capabilities capabilities; + absl::optional supported_extensions = + parameters.get(); + + if (options.enable_partial_reliability) { + capabilities.partial_reliability = + parameters.get().has_value(); + if (supported_extensions.has_value()) { + capabilities.partial_reliability |= + supported_extensions->supports(ForwardTsnChunk::kType); + } + } + + if (options.enable_message_interleaving && supported_extensions.has_value()) { + capabilities.message_interleaving = + supported_extensions->supports(IDataChunk::kType) && + supported_extensions->supports(IForwardTsnChunk::kType); + } + if (supported_extensions.has_value() && + supported_extensions->supports(ReConfigChunk::kType)) { + capabilities.reconfig = true; + } + return capabilities; +} + +void AddCapabilityParameters(const DcSctpOptions& options, + Parameters::Builder& builder) { + std::vector chunk_types = {ReConfigChunk::kType}; + + if (options.enable_partial_reliability) { + builder.Add(ForwardTsnSupportedParameter()); + chunk_types.push_back(ForwardTsnChunk::kType); + } + if (options.enable_message_interleaving) { + chunk_types.push_back(IDataChunk::kType); + chunk_types.push_back(IForwardTsnChunk::kType); + } + builder.Add(SupportedExtensionsParameter(std::move(chunk_types))); +} + +TieTag MakeTieTag(DcSctpSocketCallbacks& cb) { + uint32_t tie_tag_upper = + cb.GetRandomInt(0, std::numeric_limits::max()); + uint32_t tie_tag_lower = + cb.GetRandomInt(1, std::numeric_limits::max()); + return TieTag(static_cast(tie_tag_upper) << 32 | + static_cast(tie_tag_lower)); +} + +} // namespace + +DcSctpSocket::DcSctpSocket(absl::string_view log_prefix, + DcSctpSocketCallbacks& callbacks, + std::unique_ptr packet_observer, + const DcSctpOptions& options) + : log_prefix_(std::string(log_prefix) + ": "), + packet_observer_(std::move(packet_observer)), + options_(options), + callbacks_(callbacks), + timer_manager_([this]() { return callbacks_.CreateTimeout(); }), + t1_init_(timer_manager_.CreateTimer( + "t1-init", + [this]() { return OnInitTimerExpiry(); }, + TimerOptions(options.t1_init_timeout, + TimerBackoffAlgorithm::kExponential, + options.max_init_retransmits))), + t1_cookie_(timer_manager_.CreateTimer( + "t1-cookie", + [this]() { return OnCookieTimerExpiry(); }, + TimerOptions(options.t1_cookie_timeout, + TimerBackoffAlgorithm::kExponential, + options.max_init_retransmits))), + t2_shutdown_(timer_manager_.CreateTimer( + "t2-shutdown", + [this]() { return OnShutdownTimerExpiry(); }, + TimerOptions(options.t2_shutdown_timeout, + TimerBackoffAlgorithm::kExponential, + options.max_retransmissions))), + send_queue_( + log_prefix_, + options_.max_send_buffer_size, + [this](StreamID stream_id) { + callbacks_.OnBufferedAmountLow(stream_id); + }, + options_.total_buffered_amount_low_threshold, + [this]() { callbacks_.OnTotalBufferedAmountLow(); }) {} + +std::string DcSctpSocket::log_prefix() const { + return log_prefix_ + "[" + std::string(ToString(state_)) + "] "; +} + +bool DcSctpSocket::IsConsistent() const { + switch (state_) { + case State::kClosed: + return (tcb_ == nullptr && !t1_init_->is_running() && + !t1_cookie_->is_running() && !t2_shutdown_->is_running()); + case State::kCookieWait: + return (tcb_ == nullptr && t1_init_->is_running() && + !t1_cookie_->is_running() && !t2_shutdown_->is_running()); + case State::kCookieEchoed: + return (tcb_ != nullptr && !t1_init_->is_running() && + t1_cookie_->is_running() && !t2_shutdown_->is_running() && + tcb_->has_cookie_echo_chunk()); + case State::kEstablished: + return (tcb_ != nullptr && !t1_init_->is_running() && + !t1_cookie_->is_running() && !t2_shutdown_->is_running()); + case State::kShutdownPending: + return (tcb_ != nullptr && !t1_init_->is_running() && + !t1_cookie_->is_running() && !t2_shutdown_->is_running()); + case State::kShutdownSent: + return (tcb_ != nullptr && !t1_init_->is_running() && + !t1_cookie_->is_running() && t2_shutdown_->is_running()); + case State::kShutdownReceived: + return (tcb_ != nullptr && !t1_init_->is_running() && + !t1_cookie_->is_running() && !t2_shutdown_->is_running()); + case State::kShutdownAckSent: + return (tcb_ != nullptr && !t1_init_->is_running() && + !t1_cookie_->is_running() && t2_shutdown_->is_running()); + } +} + +constexpr absl::string_view DcSctpSocket::ToString(DcSctpSocket::State state) { + switch (state) { + case DcSctpSocket::State::kClosed: + return "CLOSED"; + case DcSctpSocket::State::kCookieWait: + return "COOKIE_WAIT"; + case DcSctpSocket::State::kCookieEchoed: + return "COOKIE_ECHOED"; + case DcSctpSocket::State::kEstablished: + return "ESTABLISHED"; + case DcSctpSocket::State::kShutdownPending: + return "SHUTDOWN_PENDING"; + case DcSctpSocket::State::kShutdownSent: + return "SHUTDOWN_SENT"; + case DcSctpSocket::State::kShutdownReceived: + return "SHUTDOWN_RECEIVED"; + case DcSctpSocket::State::kShutdownAckSent: + return "SHUTDOWN_ACK_SENT"; + } +} + +void DcSctpSocket::SetState(State state, absl::string_view reason) { + if (state_ != state) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Socket state changed from " + << ToString(state_) << " to " << ToString(state) + << " due to " << reason; + state_ = state; + } +} + +void DcSctpSocket::SendInit() { + Parameters::Builder params_builder; + AddCapabilityParameters(options_, params_builder); + InitChunk init(/*initiate_tag=*/connect_params_.verification_tag, + /*a_rwnd=*/options_.max_receiver_window_buffer_size, + options_.announced_maximum_outgoing_streams, + options_.announced_maximum_incoming_streams, + connect_params_.initial_tsn, params_builder.Build()); + SctpPacket::Builder b(VerificationTag(0), options_); + b.Add(init); + SendPacket(b); +} + +void DcSctpSocket::MakeConnectionParameters() { + VerificationTag new_verification_tag( + callbacks_.GetRandomInt(kMinVerificationTag, kMaxVerificationTag)); + TSN initial_tsn(callbacks_.GetRandomInt(kMinInitialTsn, kMaxInitialTsn)); + connect_params_.initial_tsn = initial_tsn; + connect_params_.verification_tag = new_verification_tag; +} + +void DcSctpSocket::Connect() { + if (state_ == State::kClosed) { + MakeConnectionParameters(); + RTC_DLOG(LS_INFO) + << log_prefix() + << rtc::StringFormat( + "Connecting. my_verification_tag=%08x, my_initial_tsn=%u", + *connect_params_.verification_tag, *connect_params_.initial_tsn); + SendInit(); + t1_init_->Start(); + SetState(State::kCookieWait, "Connect called"); + } else { + RTC_DLOG(LS_WARNING) << log_prefix() + << "Called Connect on a socket that is not closed"; + } + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); +} + +void DcSctpSocket::Shutdown() { + if (tcb_ != nullptr) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "Upon receipt of the SHUTDOWN primitive from its upper layer, the + // endpoint enters the SHUTDOWN-PENDING state and remains there until all + // outstanding data has been acknowledged by its peer." + + // TODO(webrtc:12739): Remove this check, as it just hides the problem that + // the socket can transition from ShutdownSent to ShutdownPending, or + // ShutdownAckSent to ShutdownPending which is illegal. + if (state_ != State::kShutdownSent && state_ != State::kShutdownAckSent) { + SetState(State::kShutdownPending, "Shutdown called"); + t1_init_->Stop(); + t1_cookie_->Stop(); + MaybeSendShutdownOrAck(); + } + } else { + // Connection closed before even starting to connect, or during the initial + // connection phase. There is no outstanding data, so the socket can just + // be closed (stopping any connection timers, if any), as this is the + // client's intention, by calling Shutdown. + InternalClose(ErrorKind::kNoError, ""); + } + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); +} + +void DcSctpSocket::Close() { + if (state_ != State::kClosed) { + if (tcb_ != nullptr) { + SctpPacket::Builder b = tcb_->PacketBuilder(); + b.Add(AbortChunk(/*filled_in_verification_tag=*/true, + Parameters::Builder() + .Add(UserInitiatedAbortCause("Close called")) + .Build())); + SendPacket(b); + } + InternalClose(ErrorKind::kNoError, ""); + } else { + RTC_DLOG(LS_INFO) << log_prefix() << "Called Close on a closed socket"; + } + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); +} + +void DcSctpSocket::CloseConnectionBecauseOfTooManyTransmissionErrors() { + SendPacket(tcb_->PacketBuilder().Add(AbortChunk( + true, Parameters::Builder() + .Add(UserInitiatedAbortCause("Too many retransmissions")) + .Build()))); + InternalClose(ErrorKind::kTooManyRetries, "Too many retransmissions"); +} + +void DcSctpSocket::InternalClose(ErrorKind error, absl::string_view message) { + if (state_ != State::kClosed) { + t1_init_->Stop(); + t1_cookie_->Stop(); + t2_shutdown_->Stop(); + tcb_ = nullptr; + + if (error == ErrorKind::kNoError) { + callbacks_.OnClosed(); + } else { + callbacks_.OnAborted(error, message); + } + SetState(State::kClosed, message); + } + // This method's purpose is to abort/close and make it consistent by ensuring + // that e.g. all timers really are stopped. + RTC_DCHECK(IsConsistent()); +} + +SendStatus DcSctpSocket::Send(DcSctpMessage message, + const SendOptions& send_options) { + if (message.payload().empty()) { + callbacks_.OnError(ErrorKind::kProtocolViolation, + "Unable to send empty message"); + return SendStatus::kErrorMessageEmpty; + } + if (message.payload().size() > options_.max_message_size) { + callbacks_.OnError(ErrorKind::kProtocolViolation, + "Unable to send too large message"); + return SendStatus::kErrorMessageTooLarge; + } + if (state_ == State::kShutdownPending || state_ == State::kShutdownSent || + state_ == State::kShutdownReceived || state_ == State::kShutdownAckSent) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "An endpoint should reject any new data request from its upper layer + // if it is in the SHUTDOWN-PENDING, SHUTDOWN-SENT, SHUTDOWN-RECEIVED, or + // SHUTDOWN-ACK-SENT state." + callbacks_.OnError(ErrorKind::kWrongSequence, + "Unable to send message as the socket is shutting down"); + return SendStatus::kErrorShuttingDown; + } + if (send_queue_.IsFull()) { + callbacks_.OnError(ErrorKind::kResourceExhaustion, + "Unable to send message as the send queue is full"); + return SendStatus::kErrorResourceExhaustion; + } + + TimeMs now = callbacks_.TimeMillis(); + send_queue_.Add(now, std::move(message), send_options); + if (tcb_ != nullptr) { + tcb_->SendBufferedPackets(now); + } + + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); + return SendStatus::kSuccess; +} + +ResetStreamsStatus DcSctpSocket::ResetStreams( + rtc::ArrayView outgoing_streams) { + if (tcb_ == nullptr) { + callbacks_.OnError(ErrorKind::kWrongSequence, + "Can't reset streams as the socket is not connected"); + return ResetStreamsStatus::kNotConnected; + } + if (!tcb_->capabilities().reconfig) { + callbacks_.OnError(ErrorKind::kUnsupportedOperation, + "Can't reset streams as the peer doesn't support it"); + return ResetStreamsStatus::kNotSupported; + } + + tcb_->stream_reset_handler().ResetStreams(outgoing_streams); + absl::optional reconfig = + tcb_->stream_reset_handler().MakeStreamResetRequest(); + if (reconfig.has_value()) { + SctpPacket::Builder builder = tcb_->PacketBuilder(); + builder.Add(*reconfig); + SendPacket(builder); + } + + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); + return ResetStreamsStatus::kPerformed; +} + +SocketState DcSctpSocket::state() const { + switch (state_) { + case State::kClosed: + return SocketState::kClosed; + case State::kCookieWait: + ABSL_FALLTHROUGH_INTENDED; + case State::kCookieEchoed: + return SocketState::kConnecting; + case State::kEstablished: + return SocketState::kConnected; + case State::kShutdownPending: + ABSL_FALLTHROUGH_INTENDED; + case State::kShutdownSent: + ABSL_FALLTHROUGH_INTENDED; + case State::kShutdownReceived: + ABSL_FALLTHROUGH_INTENDED; + case State::kShutdownAckSent: + return SocketState::kShuttingDown; + } +} + +void DcSctpSocket::SetMaxMessageSize(size_t max_message_size) { + options_.max_message_size = max_message_size; +} + +size_t DcSctpSocket::buffered_amount(StreamID stream_id) const { + return send_queue_.buffered_amount(stream_id); +} + +size_t DcSctpSocket::buffered_amount_low_threshold(StreamID stream_id) const { + return send_queue_.buffered_amount_low_threshold(stream_id); +} + +void DcSctpSocket::SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) { + send_queue_.SetBufferedAmountLowThreshold(stream_id, bytes); +} + +void DcSctpSocket::MaybeSendShutdownOnPacketReceived(const SctpPacket& packet) { + if (state_ == State::kShutdownSent) { + bool has_data_chunk = + std::find_if(packet.descriptors().begin(), packet.descriptors().end(), + [](const SctpPacket::ChunkDescriptor& descriptor) { + return descriptor.type == DataChunk::kType; + }) != packet.descriptors().end(); + if (has_data_chunk) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "While in the SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately + // respond to each received packet containing one or more DATA chunks with + // a SHUTDOWN chunk and restart the T2-shutdown timer."" + SendShutdown(); + t2_shutdown_->set_duration(tcb_->current_rto()); + t2_shutdown_->Start(); + } + } +} + +bool DcSctpSocket::ValidatePacket(const SctpPacket& packet) { + const CommonHeader& header = packet.common_header(); + VerificationTag my_verification_tag = + tcb_ != nullptr ? tcb_->my_verification_tag() : VerificationTag(0); + + if (header.verification_tag == VerificationTag(0)) { + if (packet.descriptors().size() == 1 && + packet.descriptors()[0].type == InitChunk::kType) { + // https://tools.ietf.org/html/rfc4960#section-8.5.1 + // "When an endpoint receives an SCTP packet with the Verification Tag + // set to 0, it should verify that the packet contains only an INIT chunk. + // Otherwise, the receiver MUST silently discard the packet."" + return true; + } + callbacks_.OnError( + ErrorKind::kParseFailed, + "Only a single INIT chunk can be present in packets sent on " + "verification_tag = 0"); + return false; + } + + if (packet.descriptors().size() == 1 && + packet.descriptors()[0].type == AbortChunk::kType) { + // https://tools.ietf.org/html/rfc4960#section-8.5.1 + // "The receiver of an ABORT MUST accept the packet if the Verification + // Tag field of the packet matches its own tag and the T bit is not set OR + // if it is set to its peer's tag and the T bit is set in the Chunk Flags. + // Otherwise, the receiver MUST silently discard the packet and take no + // further action." + bool t_bit = (packet.descriptors()[0].flags & 0x01) != 0; + if (t_bit && tcb_ == nullptr) { + // Can't verify the tag - assume it's okey. + return true; + } + if ((!t_bit && header.verification_tag == my_verification_tag) || + (t_bit && header.verification_tag == tcb_->peer_verification_tag())) { + return true; + } + callbacks_.OnError(ErrorKind::kParseFailed, + "ABORT chunk verification tag was wrong"); + return false; + } + + if (packet.descriptors()[0].type == InitAckChunk::kType) { + if (header.verification_tag == connect_params_.verification_tag) { + return true; + } + callbacks_.OnError( + ErrorKind::kParseFailed, + rtc::StringFormat( + "Packet has invalid verification tag: %08x, expected %08x", + *header.verification_tag, *connect_params_.verification_tag)); + return false; + } + + if (packet.descriptors()[0].type == CookieEchoChunk::kType) { + // Handled in chunk handler (due to RFC 4960, section 5.2.4). + return true; + } + + if (packet.descriptors().size() == 1 && + packet.descriptors()[0].type == ShutdownCompleteChunk::kType) { + // https://tools.ietf.org/html/rfc4960#section-8.5.1 + // "The receiver of a SHUTDOWN COMPLETE shall accept the packet if the + // Verification Tag field of the packet matches its own tag and the T bit is + // not set OR if it is set to its peer's tag and the T bit is set in the + // Chunk Flags. Otherwise, the receiver MUST silently discard the packet + // and take no further action." + bool t_bit = (packet.descriptors()[0].flags & 0x01) != 0; + if (t_bit && tcb_ == nullptr) { + // Can't verify the tag - assume it's okey. + return true; + } + if ((!t_bit && header.verification_tag == my_verification_tag) || + (t_bit && header.verification_tag == tcb_->peer_verification_tag())) { + return true; + } + callbacks_.OnError(ErrorKind::kParseFailed, + "SHUTDOWN_COMPLETE chunk verification tag was wrong"); + return false; + } + + // https://tools.ietf.org/html/rfc4960#section-8.5 + // "When receiving an SCTP packet, the endpoint MUST ensure that the value + // in the Verification Tag field of the received SCTP packet matches its own + // tag. If the received Verification Tag value does not match the receiver's + // own tag value, the receiver shall silently discard the packet and shall not + // process it any further..." + if (header.verification_tag == my_verification_tag) { + return true; + } + + callbacks_.OnError( + ErrorKind::kParseFailed, + rtc::StringFormat( + "Packet has invalid verification tag: %08x, expected %08x", + *header.verification_tag, *my_verification_tag)); + return false; +} + +void DcSctpSocket::HandleTimeout(TimeoutID timeout_id) { + timer_manager_.HandleTimeout(timeout_id); + + if (tcb_ != nullptr && tcb_->HasTooManyTxErrors()) { + // Tearing down the TCB has to be done outside the handlers. + CloseConnectionBecauseOfTooManyTransmissionErrors(); + } + + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); +} + +void DcSctpSocket::ReceivePacket(rtc::ArrayView data) { + if (packet_observer_ != nullptr) { + packet_observer_->OnReceivedPacket(callbacks_.TimeMillis(), data); + } + + absl::optional packet = + SctpPacket::Parse(data, options_.disable_checksum_verification); + if (!packet.has_value()) { + // https://tools.ietf.org/html/rfc4960#section-6.8 + // "The default procedure for handling invalid SCTP packets is to + // silently discard them." + callbacks_.OnError(ErrorKind::kParseFailed, + "Failed to parse received SCTP packet"); + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); + return; + } + + if (RTC_DLOG_IS_ON) { + for (const auto& descriptor : packet->descriptors()) { + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Received " + << DebugConvertChunkToString(descriptor.data); + } + } + + if (!ValidatePacket(*packet)) { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Packet failed verification tag check - dropping"; + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); + return; + } + + MaybeSendShutdownOnPacketReceived(*packet); + + for (const auto& descriptor : packet->descriptors()) { + if (!Dispatch(packet->common_header(), descriptor)) { + break; + } + } + + if (tcb_ != nullptr) { + tcb_->data_tracker().ObservePacketEnd(); + tcb_->MaybeSendSack(); + } + + RTC_DCHECK(IsConsistent()); + callbacks_.TriggerDeferred(); +} + +void DcSctpSocket::DebugPrintOutgoing(rtc::ArrayView payload) { + auto packet = SctpPacket::Parse(payload); + RTC_DCHECK(packet.has_value()); + + for (const auto& desc : packet->descriptors()) { + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Sent " + << DebugConvertChunkToString(desc.data); + } +} + +bool DcSctpSocket::Dispatch(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + switch (descriptor.type) { + case DataChunk::kType: + HandleData(header, descriptor); + break; + case InitChunk::kType: + HandleInit(header, descriptor); + break; + case InitAckChunk::kType: + HandleInitAck(header, descriptor); + break; + case SackChunk::kType: + HandleSack(header, descriptor); + break; + case HeartbeatRequestChunk::kType: + HandleHeartbeatRequest(header, descriptor); + break; + case HeartbeatAckChunk::kType: + HandleHeartbeatAck(header, descriptor); + break; + case AbortChunk::kType: + HandleAbort(header, descriptor); + break; + case ErrorChunk::kType: + HandleError(header, descriptor); + break; + case CookieEchoChunk::kType: + HandleCookieEcho(header, descriptor); + break; + case CookieAckChunk::kType: + HandleCookieAck(header, descriptor); + break; + case ShutdownChunk::kType: + HandleShutdown(header, descriptor); + break; + case ShutdownAckChunk::kType: + HandleShutdownAck(header, descriptor); + break; + case ShutdownCompleteChunk::kType: + HandleShutdownComplete(header, descriptor); + break; + case ReConfigChunk::kType: + HandleReconfig(header, descriptor); + break; + case ForwardTsnChunk::kType: + HandleForwardTsn(header, descriptor); + break; + case IDataChunk::kType: + HandleIData(header, descriptor); + break; + case IForwardTsnChunk::kType: + HandleForwardTsn(header, descriptor); + break; + default: + return HandleUnrecognizedChunk(descriptor); + } + return true; +} + +bool DcSctpSocket::HandleUnrecognizedChunk( + const SctpPacket::ChunkDescriptor& descriptor) { + bool report_as_error = (descriptor.type & 0x40) != 0; + bool continue_processing = (descriptor.type & 0x80) != 0; + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Received unknown chunk: " + << static_cast(descriptor.type); + if (report_as_error) { + rtc::StringBuilder sb; + sb << "Received unknown chunk of type: " + << static_cast(descriptor.type) << " with report-error bit set"; + callbacks_.OnError(ErrorKind::kParseFailed, sb.str()); + RTC_DLOG(LS_VERBOSE) + << log_prefix() + << "Unknown chunk, with type indicating it should be reported."; + + // https://tools.ietf.org/html/rfc4960#section-3.2 + // "... report in an ERROR chunk using the 'Unrecognized Chunk Type' + // cause." + if (tcb_ != nullptr) { + // Need TCB - this chunk must be sent with a correct verification tag. + SendPacket(tcb_->PacketBuilder().Add( + ErrorChunk(Parameters::Builder() + .Add(UnrecognizedChunkTypeCause(std::vector( + descriptor.data.begin(), descriptor.data.end()))) + .Build()))); + } + } + if (!continue_processing) { + // https://tools.ietf.org/html/rfc4960#section-3.2 + // "Stop processing this SCTP packet and discard it, do not process any + // further chunks within it." + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Unknown chunk, with type indicating not to " + "process any further chunks"; + } + + return continue_processing; +} + +absl::optional DcSctpSocket::OnInitTimerExpiry() { + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Timer " << t1_init_->name() + << " has expired: " << t1_init_->expiration_count() + << "/" << t1_init_->options().max_restarts; + RTC_DCHECK(state_ == State::kCookieWait); + + if (t1_init_->is_running()) { + SendInit(); + } else { + InternalClose(ErrorKind::kTooManyRetries, "No INIT_ACK received"); + } + RTC_DCHECK(IsConsistent()); + return absl::nullopt; +} + +absl::optional DcSctpSocket::OnCookieTimerExpiry() { + // https://tools.ietf.org/html/rfc4960#section-4 + // "If the T1-cookie timer expires, the endpoint MUST retransmit COOKIE + // ECHO and restart the T1-cookie timer without changing state. This MUST + // be repeated up to 'Max.Init.Retransmits' times. After that, the endpoint + // MUST abort the initialization process and report the error to the SCTP + // user." + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Timer " << t1_cookie_->name() + << " has expired: " << t1_cookie_->expiration_count() + << "/" << t1_cookie_->options().max_restarts; + + RTC_DCHECK(state_ == State::kCookieEchoed); + + if (t1_cookie_->is_running()) { + tcb_->SendBufferedPackets(callbacks_.TimeMillis()); + } else { + InternalClose(ErrorKind::kTooManyRetries, "No COOKIE_ACK received"); + } + + RTC_DCHECK(IsConsistent()); + return absl::nullopt; +} + +absl::optional DcSctpSocket::OnShutdownTimerExpiry() { + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Timer " << t2_shutdown_->name() + << " has expired: " << t2_shutdown_->expiration_count() + << "/" << t2_shutdown_->options().max_restarts; + + if (!t2_shutdown_->is_running()) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "An endpoint should limit the number of retransmissions of the SHUTDOWN + // chunk to the protocol parameter 'Association.Max.Retrans'. If this + // threshold is exceeded, the endpoint should destroy the TCB..." + + SendPacket(tcb_->PacketBuilder().Add( + AbortChunk(true, Parameters::Builder() + .Add(UserInitiatedAbortCause( + "Too many retransmissions of SHUTDOWN")) + .Build()))); + + InternalClose(ErrorKind::kTooManyRetries, "No SHUTDOWN_ACK received"); + RTC_DCHECK(IsConsistent()); + return absl::nullopt; + } + + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "If the timer expires, the endpoint must resend the SHUTDOWN with the + // updated last sequential TSN received from its peer." + SendShutdown(); + RTC_DCHECK(IsConsistent()); + return tcb_->current_rto(); +} + +void DcSctpSocket::SendPacket(SctpPacket::Builder& builder) { + if (builder.empty()) { + return; + } + + std::vector payload = builder.Build(); + + if (RTC_DLOG_IS_ON) { + DebugPrintOutgoing(payload); + } + + // The heartbeat interval timer is restarted for every sent packet, to + // fire when the outgoing channel is inactive. + if (tcb_ != nullptr) { + tcb_->heartbeat_handler().RestartTimer(); + } + + if (packet_observer_ != nullptr) { + packet_observer_->OnSentPacket(callbacks_.TimeMillis(), payload); + } + callbacks_.SendPacket(payload); +} + +bool DcSctpSocket::ValidateHasTCB() { + if (tcb_ != nullptr) { + return true; + } + + callbacks_.OnError( + ErrorKind::kNotConnected, + "Received unexpected commands on socket that is not connected"); + return false; +} + +void DcSctpSocket::ReportFailedToParseChunk(int chunk_type) { + rtc::StringBuilder sb; + sb << "Failed to parse chunk of type: " << chunk_type; + callbacks_.OnError(ErrorKind::kParseFailed, sb.str()); +} + +void DcSctpSocket::HandleData(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = DataChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + HandleDataCommon(*chunk); + } +} + +void DcSctpSocket::HandleIData(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = IDataChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + HandleDataCommon(*chunk); + } +} + +void DcSctpSocket::HandleDataCommon(AnyDataChunk& chunk) { + TSN tsn = chunk.tsn(); + AnyDataChunk::ImmediateAckFlag immediate_ack = chunk.options().immediate_ack; + Data data = std::move(chunk).extract(); + + if (data.payload.empty()) { + // Empty DATA chunks are illegal. + SendPacket(tcb_->PacketBuilder().Add( + ErrorChunk(Parameters::Builder().Add(NoUserDataCause(tsn)).Build()))); + callbacks_.OnError(ErrorKind::kProtocolViolation, + "Received DATA chunk with no user data"); + return; + } + + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Handle DATA, queue_size=" + << tcb_->reassembly_queue().queued_bytes() + << ", water_mark=" + << tcb_->reassembly_queue().watermark_bytes() + << ", full=" << tcb_->reassembly_queue().is_full() + << ", above=" + << tcb_->reassembly_queue().is_above_watermark(); + + if (tcb_->reassembly_queue().is_full()) { + // If the reassembly queue is full, there is nothing that can be done. The + // specification only allows dropping gap-ack-blocks, and that's not + // likely to help as the socket has been trying to fill gaps since the + // watermark was reached. + SendPacket(tcb_->PacketBuilder().Add(AbortChunk( + true, Parameters::Builder().Add(OutOfResourceErrorCause()).Build()))); + InternalClose(ErrorKind::kResourceExhaustion, + "Reassembly Queue is exhausted"); + return; + } + + if (tcb_->reassembly_queue().is_above_watermark()) { + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Is above high watermark"; + // If the reassembly queue is above its high watermark, only accept data + // chunks that increase its cumulative ack tsn in an attempt to fill gaps + // to deliver messages. + if (!tcb_->data_tracker().will_increase_cum_ack_tsn(tsn)) { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Rejected data because of exceeding watermark"; + tcb_->data_tracker().ForceImmediateSack(); + return; + } + } + + if (!tcb_->data_tracker().IsTSNValid(tsn)) { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Rejected data because of failing TSN validity"; + return; + } + + tcb_->data_tracker().Observe(tsn, immediate_ack); + tcb_->reassembly_queue().MaybeResetStreamsDeferred( + tcb_->data_tracker().last_cumulative_acked_tsn()); + tcb_->reassembly_queue().Add(tsn, std::move(data)); + DeliverReassembledMessages(); +} + +void DcSctpSocket::HandleInit(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = InitChunk::Parse(descriptor.data); + if (!ValidateParseSuccess(chunk)) { + return; + } + + if (chunk->initiate_tag() == VerificationTag(0) || + chunk->nbr_outbound_streams() == 0 || chunk->nbr_inbound_streams() == 0) { + // https://tools.ietf.org/html/rfc4960#section-3.3.2 + // "If the value of the Initiate Tag in a received INIT chunk is found + // to be 0, the receiver MUST treat it as an error and close the + // association by transmitting an ABORT." + + // "A receiver of an INIT with the OS value set to 0 SHOULD abort the + // association." + + // "A receiver of an INIT with the MIS value of 0 SHOULD abort the + // association." + + SendPacket(SctpPacket::Builder(VerificationTag(0), options_) + .Add(AbortChunk( + /*filled_in_verification_tag=*/false, + Parameters::Builder() + .Add(ProtocolViolationCause("INIT malformed")) + .Build()))); + InternalClose(ErrorKind::kProtocolViolation, "Received invalid INIT"); + return; + } + + if (state_ == State::kShutdownAckSent) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "If an endpoint is in the SHUTDOWN-ACK-SENT state and receives an + // INIT chunk (e.g., if the SHUTDOWN COMPLETE was lost) with source and + // destination transport addresses (either in the IP addresses or in the + // INIT chunk) that belong to this association, it should discard the INIT + // chunk and retransmit the SHUTDOWN ACK chunk." + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received Init indicating lost ShutdownComplete"; + SendShutdownAck(); + return; + } + + TieTag tie_tag(0); + if (state_ == State::kClosed) { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received Init in closed state (normal)"; + + MakeConnectionParameters(); + } else if (state_ == State::kCookieWait || state_ == State::kCookieEchoed) { + // https://tools.ietf.org/html/rfc4960#section-5.2.1 + // "This usually indicates an initialization collision, i.e., each + // endpoint is attempting, at about the same time, to establish an + // association with the other endpoint. Upon receipt of an INIT in the + // COOKIE-WAIT state, an endpoint MUST respond with an INIT ACK using the + // same parameters it sent in its original INIT chunk (including its + // Initiate Tag, unchanged). When responding, the endpoint MUST send the + // INIT ACK back to the same address that the original INIT (sent by this + // endpoint) was sent." + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received Init indicating simultaneous connections"; + } else { + RTC_DCHECK(tcb_ != nullptr); + // https://tools.ietf.org/html/rfc4960#section-5.2.2 + // "The outbound SCTP packet containing this INIT ACK MUST carry a + // Verification Tag value equal to the Initiate Tag found in the + // unexpected INIT. And the INIT ACK MUST contain a new Initiate Tag + // (randomly generated; see Section 5.3.1). Other parameters for the + // endpoint SHOULD be copied from the existing parameters of the + // association (e.g., number of outbound streams) into the INIT ACK and + // cookie." + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received Init indicating restarted connection"; + // Create a new verification tag - different from the previous one. + for (int tries = 0; tries < 10; ++tries) { + connect_params_.verification_tag = VerificationTag( + callbacks_.GetRandomInt(kMinVerificationTag, kMaxVerificationTag)); + if (connect_params_.verification_tag != tcb_->my_verification_tag()) { + break; + } + } + + // Make the initial TSN make a large jump, so that there is no overlap + // with the old and new association. + connect_params_.initial_tsn = + TSN(*tcb_->retransmission_queue().next_tsn() + 1000000); + tie_tag = tcb_->tie_tag(); + } + + RTC_DLOG(LS_VERBOSE) + << log_prefix() + << rtc::StringFormat( + "Proceeding with connection. my_verification_tag=%08x, " + "my_initial_tsn=%u, peer_verification_tag=%08x, " + "peer_initial_tsn=%u", + *connect_params_.verification_tag, *connect_params_.initial_tsn, + *chunk->initiate_tag(), *chunk->initial_tsn()); + + Capabilities capabilities = GetCapabilities(options_, chunk->parameters()); + + SctpPacket::Builder b(chunk->initiate_tag(), options_); + Parameters::Builder params_builder = + Parameters::Builder().Add(StateCookieParameter( + StateCookie(chunk->initiate_tag(), chunk->initial_tsn(), + chunk->a_rwnd(), tie_tag, capabilities) + .Serialize())); + AddCapabilityParameters(options_, params_builder); + + InitAckChunk init_ack(/*initiate_tag=*/connect_params_.verification_tag, + options_.max_receiver_window_buffer_size, + options_.announced_maximum_outgoing_streams, + options_.announced_maximum_incoming_streams, + connect_params_.initial_tsn, params_builder.Build()); + b.Add(init_ack); + SendPacket(b); +} + +void DcSctpSocket::HandleInitAck( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = InitAckChunk::Parse(descriptor.data); + if (!ValidateParseSuccess(chunk)) { + return; + } + + if (state_ != State::kCookieWait) { + // https://tools.ietf.org/html/rfc4960#section-5.2.3 + // "If an INIT ACK is received by an endpoint in any state other than + // the COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk." + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received INIT_ACK in unexpected state"; + return; + } + + auto cookie = chunk->parameters().get(); + if (!cookie.has_value()) { + SendPacket(SctpPacket::Builder(connect_params_.verification_tag, options_) + .Add(AbortChunk( + /*filled_in_verification_tag=*/false, + Parameters::Builder() + .Add(ProtocolViolationCause("INIT-ACK malformed")) + .Build()))); + InternalClose(ErrorKind::kProtocolViolation, + "InitAck chunk doesn't contain a cookie"); + return; + } + Capabilities capabilities = GetCapabilities(options_, chunk->parameters()); + t1_init_->Stop(); + + tcb_ = std::make_unique( + timer_manager_, log_prefix_, options_, capabilities, callbacks_, + send_queue_, connect_params_.verification_tag, + connect_params_.initial_tsn, chunk->initiate_tag(), chunk->initial_tsn(), + chunk->a_rwnd(), MakeTieTag(callbacks_), + [this]() { return state_ == State::kEstablished; }, + [this](SctpPacket::Builder& builder) { return SendPacket(builder); }); + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Created peer TCB: " << tcb_->ToString(); + + SetState(State::kCookieEchoed, "INIT_ACK received"); + + // The connection isn't fully established just yet. + tcb_->SetCookieEchoChunk(CookieEchoChunk(cookie->data())); + tcb_->SendBufferedPackets(callbacks_.TimeMillis()); + t1_cookie_->Start(); +} + +void DcSctpSocket::HandleCookieEcho( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = + CookieEchoChunk::Parse(descriptor.data); + if (!ValidateParseSuccess(chunk)) { + return; + } + + absl::optional cookie = + StateCookie::Deserialize(chunk->cookie()); + if (!cookie.has_value()) { + callbacks_.OnError(ErrorKind::kParseFailed, "Failed to parse state cookie"); + return; + } + + if (tcb_ != nullptr) { + if (!HandleCookieEchoWithTCB(header, *cookie)) { + return; + } + } else { + if (header.verification_tag != connect_params_.verification_tag) { + callbacks_.OnError( + ErrorKind::kParseFailed, + rtc::StringFormat( + "Received CookieEcho with invalid verification tag: %08x, " + "expected %08x", + *header.verification_tag, *connect_params_.verification_tag)); + return; + } + } + + // The init timer can be running on simultaneous connections. + t1_init_->Stop(); + t1_cookie_->Stop(); + if (state_ != State::kEstablished) { + if (tcb_ != nullptr) { + tcb_->ClearCookieEchoChunk(); + } + SetState(State::kEstablished, "COOKIE_ECHO received"); + callbacks_.OnConnected(); + } + + if (tcb_ == nullptr) { + tcb_ = std::make_unique( + timer_manager_, log_prefix_, options_, cookie->capabilities(), + callbacks_, send_queue_, connect_params_.verification_tag, + connect_params_.initial_tsn, cookie->initiate_tag(), + cookie->initial_tsn(), cookie->a_rwnd(), MakeTieTag(callbacks_), + [this]() { return state_ == State::kEstablished; }, + [this](SctpPacket::Builder& builder) { return SendPacket(builder); }); + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Created peer TCB: " << tcb_->ToString(); + } + + SctpPacket::Builder b = tcb_->PacketBuilder(); + b.Add(CookieAckChunk()); + + // https://tools.ietf.org/html/rfc4960#section-5.1 + // "A COOKIE ACK chunk may be bundled with any pending DATA chunks (and/or + // SACK chunks), but the COOKIE ACK chunk MUST be the first chunk in the + // packet." + tcb_->SendBufferedPackets(b, callbacks_.TimeMillis()); +} + +bool DcSctpSocket::HandleCookieEchoWithTCB(const CommonHeader& header, + const StateCookie& cookie) { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Handling CookieEchoChunk with TCB. local_tag=" + << *tcb_->my_verification_tag() + << ", peer_tag=" << *header.verification_tag + << ", tcb_tag=" << *tcb_->peer_verification_tag() + << ", cookie_tag=" << *cookie.initiate_tag() + << ", local_tie_tag=" << *tcb_->tie_tag() + << ", peer_tie_tag=" << *cookie.tie_tag(); + // https://tools.ietf.org/html/rfc4960#section-5.2.4 + // "Handle a COOKIE ECHO when a TCB Exists" + if (header.verification_tag != tcb_->my_verification_tag() && + tcb_->peer_verification_tag() != cookie.initiate_tag() && + cookie.tie_tag() == tcb_->tie_tag()) { + // "A) In this case, the peer may have restarted." + if (state_ == State::kShutdownAckSent) { + // "If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes + // that the peer has restarted ... it MUST NOT set up a new association + // but instead resend the SHUTDOWN ACK and send an ERROR chunk with a + // "Cookie Received While Shutting Down" error cause to its peer." + SctpPacket::Builder b(cookie.initiate_tag(), options_); + b.Add(ShutdownAckChunk()); + b.Add(ErrorChunk(Parameters::Builder() + .Add(CookieReceivedWhileShuttingDownCause()) + .Build())); + SendPacket(b); + callbacks_.OnError(ErrorKind::kWrongSequence, + "Received COOKIE-ECHO while shutting down"); + return false; + } + + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received COOKIE-ECHO indicating a restarted peer"; + + // If a message was partly sent, and the peer restarted, resend it in + // full by resetting the send queue. + send_queue_.Reset(); + tcb_ = nullptr; + callbacks_.OnConnectionRestarted(); + } else if (header.verification_tag == tcb_->my_verification_tag() && + tcb_->peer_verification_tag() != cookie.initiate_tag()) { + // TODO(boivie): Handle the peer_tag == 0? + // "B) In this case, both sides may be attempting to start an + // association at about the same time, but the peer endpoint started its + // INIT after responding to the local endpoint's INIT." + RTC_DLOG(LS_VERBOSE) + << log_prefix() + << "Received COOKIE-ECHO indicating simultaneous connections"; + tcb_ = nullptr; + } else if (header.verification_tag != tcb_->my_verification_tag() && + tcb_->peer_verification_tag() == cookie.initiate_tag() && + cookie.tie_tag() == TieTag(0)) { + // "C) In this case, the local endpoint's cookie has arrived late. + // Before it arrived, the local endpoint sent an INIT and received an + // INIT ACK and finally sent a COOKIE ECHO with the peer's same tag but + // a new tag of its own. The cookie should be silently discarded. The + // endpoint SHOULD NOT change states and should leave any timers + // running." + RTC_DLOG(LS_VERBOSE) + << log_prefix() + << "Received COOKIE-ECHO indicating a late COOKIE-ECHO. Discarding"; + return false; + } else if (header.verification_tag == tcb_->my_verification_tag() && + tcb_->peer_verification_tag() == cookie.initiate_tag()) { + // "D) When both local and remote tags match, the endpoint should enter + // the ESTABLISHED state, if it is in the COOKIE-ECHOED state. It + // should stop any cookie timer that may be running and send a COOKIE + // ACK." + RTC_DLOG(LS_VERBOSE) + << log_prefix() + << "Received duplicate COOKIE-ECHO, probably because of peer not " + "receiving COOKIE-ACK and retransmitting COOKIE-ECHO. Continuing."; + } + return true; +} + +void DcSctpSocket::HandleCookieAck( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = CookieAckChunk::Parse(descriptor.data); + if (!ValidateParseSuccess(chunk)) { + return; + } + + if (state_ != State::kCookieEchoed) { + // https://tools.ietf.org/html/rfc4960#section-5.2.5 + // "At any state other than COOKIE-ECHOED, an endpoint should silently + // discard a received COOKIE ACK chunk." + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received COOKIE_ACK not in COOKIE_ECHOED state"; + return; + } + + // RFC 4960, Errata ID: 4400 + t1_cookie_->Stop(); + tcb_->ClearCookieEchoChunk(); + SetState(State::kEstablished, "COOKIE_ACK received"); + tcb_->SendBufferedPackets(callbacks_.TimeMillis()); + callbacks_.OnConnected(); +} + +void DcSctpSocket::DeliverReassembledMessages() { + if (tcb_->reassembly_queue().HasMessages()) { + for (auto& message : tcb_->reassembly_queue().FlushMessages()) { + callbacks_.OnMessageReceived(std::move(message)); + } + } +} + +void DcSctpSocket::HandleSack(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = SackChunk::Parse(descriptor.data); + + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + TimeMs now = callbacks_.TimeMillis(); + SackChunk sack = ChunkValidators::Clean(*std::move(chunk)); + + if (tcb_->retransmission_queue().HandleSack(now, sack)) { + MaybeSendShutdownOrAck(); + // Receiving an ACK will decrease outstanding bytes (maybe now below + // cwnd?) or indicate packet loss that may result in sending FORWARD-TSN. + tcb_->SendBufferedPackets(now); + } else { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Dropping out-of-order SACK with TSN " + << *sack.cumulative_tsn_ack(); + } + } +} + +void DcSctpSocket::HandleHeartbeatRequest( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = + HeartbeatRequestChunk::Parse(descriptor.data); + + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + tcb_->heartbeat_handler().HandleHeartbeatRequest(*std::move(chunk)); + } +} + +void DcSctpSocket::HandleHeartbeatAck( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = + HeartbeatAckChunk::Parse(descriptor.data); + + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + tcb_->heartbeat_handler().HandleHeartbeatAck(*std::move(chunk)); + } +} + +void DcSctpSocket::HandleAbort(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = AbortChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk)) { + std::string error_string = ErrorCausesToString(chunk->error_causes()); + if (tcb_ == nullptr) { + // https://tools.ietf.org/html/rfc4960#section-3.3.7 + // "If an endpoint receives an ABORT with a format error or no TCB is + // found, it MUST silently discard it." + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Received ABORT (" << error_string + << ") on a connection with no TCB. Ignoring"; + return; + } + + RTC_DLOG(LS_WARNING) << log_prefix() << "Received ABORT (" << error_string + << ") - closing connection."; + InternalClose(ErrorKind::kPeerReported, error_string); + } +} + +void DcSctpSocket::HandleError(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = ErrorChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk)) { + std::string error_string = ErrorCausesToString(chunk->error_causes()); + if (tcb_ == nullptr) { + RTC_DLOG(LS_VERBOSE) << log_prefix() << "Received ERROR (" << error_string + << ") on a connection with no TCB. Ignoring"; + return; + } + + RTC_DLOG(LS_WARNING) << log_prefix() << "Received ERROR: " << error_string; + callbacks_.OnError(ErrorKind::kPeerReported, + "Peer reported error: " + error_string); + } +} + +void DcSctpSocket::HandleReconfig( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = ReConfigChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + tcb_->stream_reset_handler().HandleReConfig(*std::move(chunk)); + } +} + +void DcSctpSocket::HandleShutdown( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + if (!ValidateParseSuccess(ShutdownChunk::Parse(descriptor.data))) { + return; + } + + if (state_ == State::kClosed) { + return; + } else if (state_ == State::kCookieWait || state_ == State::kCookieEchoed) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "If a SHUTDOWN is received in the COOKIE-WAIT or COOKIE ECHOED state, + // the SHUTDOWN chunk SHOULD be silently discarded." + } else if (state_ == State::kShutdownSent) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "If an endpoint is in the SHUTDOWN-SENT state and receives a + // SHUTDOWN chunk from its peer, the endpoint shall respond immediately + // with a SHUTDOWN ACK to its peer, and move into the SHUTDOWN-ACK-SENT + // state restarting its T2-shutdown timer." + SendShutdownAck(); + SetState(State::kShutdownAckSent, "SHUTDOWN received"); + } else if (state_ == State::kShutdownAckSent) { + // TODO(webrtc:12739): This condition should be removed and handled by the + // next (state_ != State::kShutdownReceived). + return; + } else if (state_ != State::kShutdownReceived) { + RTC_DLOG(LS_VERBOSE) << log_prefix() + << "Received SHUTDOWN - shutting down the socket"; + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "Upon reception of the SHUTDOWN, the peer endpoint shall enter the + // SHUTDOWN-RECEIVED state, stop accepting new data from its SCTP user, + // and verify, by checking the Cumulative TSN Ack field of the chunk, that + // all its outstanding DATA chunks have been received by the SHUTDOWN + // sender." + SetState(State::kShutdownReceived, "SHUTDOWN received"); + MaybeSendShutdownOrAck(); + } +} + +void DcSctpSocket::HandleShutdownAck( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + if (!ValidateParseSuccess(ShutdownAckChunk::Parse(descriptor.data))) { + return; + } + + if (state_ == State::kShutdownSent || state_ == State::kShutdownAckSent) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall stop + // the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its peer, and + // remove all record of the association." + + // "If an endpoint is in the SHUTDOWN-ACK-SENT state and receives a + // SHUTDOWN ACK, it shall stop the T2-shutdown timer, send a SHUTDOWN + // COMPLETE chunk to its peer, and remove all record of the association." + + SctpPacket::Builder b = tcb_->PacketBuilder(); + b.Add(ShutdownCompleteChunk(/*tag_reflected=*/false)); + SendPacket(b); + InternalClose(ErrorKind::kNoError, ""); + } else { + // https://tools.ietf.org/html/rfc4960#section-8.5.1 + // "If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state + // the procedures in Section 8.4 SHOULD be followed; in other words, it + // should be treated as an Out Of The Blue packet." + + // https://tools.ietf.org/html/rfc4960#section-8.4 + // "If the packet contains a SHUTDOWN ACK chunk, the receiver + // should respond to the sender of the OOTB packet with a SHUTDOWN + // COMPLETE. When sending the SHUTDOWN COMPLETE, the receiver of the OOTB + // packet must fill in the Verification Tag field of the outbound packet + // with the Verification Tag received in the SHUTDOWN ACK and set the T + // bit in the Chunk Flags to indicate that the Verification Tag is + // reflected." + + SctpPacket::Builder b(header.verification_tag, options_); + b.Add(ShutdownCompleteChunk(/*tag_reflected=*/true)); + SendPacket(b); + } +} + +void DcSctpSocket::HandleShutdownComplete( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + if (!ValidateParseSuccess(ShutdownCompleteChunk::Parse(descriptor.data))) { + return; + } + + if (state_ == State::kShutdownAckSent) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "Upon reception of the SHUTDOWN COMPLETE chunk, the endpoint will + // verify that it is in the SHUTDOWN-ACK-SENT state; if it is not, the + // chunk should be discarded. If the endpoint is in the SHUTDOWN-ACK-SENT + // state, the endpoint should stop the T2-shutdown timer and remove all + // knowledge of the association (and thus the association enters the + // CLOSED state)." + InternalClose(ErrorKind::kNoError, ""); + } +} + +void DcSctpSocket::HandleForwardTsn( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = + ForwardTsnChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + HandleForwardTsnCommon(*chunk); + } +} + +void DcSctpSocket::HandleIForwardTsn( + const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor) { + absl::optional chunk = + IForwardTsnChunk::Parse(descriptor.data); + if (ValidateParseSuccess(chunk) && ValidateHasTCB()) { + HandleForwardTsnCommon(*chunk); + } +} + +void DcSctpSocket::HandleForwardTsnCommon(const AnyForwardTsnChunk& chunk) { + if (!tcb_->capabilities().partial_reliability) { + SctpPacket::Builder b = tcb_->PacketBuilder(); + b.Add(AbortChunk(/*filled_in_verification_tag=*/true, + Parameters::Builder() + .Add(ProtocolViolationCause( + "I-FORWARD-TSN received, but not indicated " + "during connection establishment")) + .Build())); + SendPacket(b); + + callbacks_.OnError(ErrorKind::kProtocolViolation, + "Received a FORWARD_TSN without announced peer support"); + return; + } + tcb_->data_tracker().HandleForwardTsn(chunk.new_cumulative_tsn()); + tcb_->reassembly_queue().Handle(chunk); + // A forward TSN - for ordered streams - may allow messages to be + // delivered. + DeliverReassembledMessages(); + + // Processing a FORWARD_TSN might result in sending a SACK. + tcb_->MaybeSendSack(); +} + +void DcSctpSocket::MaybeSendShutdownOrAck() { + if (tcb_->retransmission_queue().outstanding_bytes() != 0) { + return; + } + + if (state_ == State::kShutdownPending) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "Once all its outstanding data has been acknowledged, the endpoint + // shall send a SHUTDOWN chunk to its peer including in the Cumulative TSN + // Ack field the last sequential TSN it has received from the peer. It + // shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT + // state."" + + SendShutdown(); + t2_shutdown_->set_duration(tcb_->current_rto()); + t2_shutdown_->Start(); + SetState(State::kShutdownSent, "No more outstanding data"); + } else if (state_ == State::kShutdownReceived) { + // https://tools.ietf.org/html/rfc4960#section-9.2 + // "If the receiver of the SHUTDOWN has no more outstanding DATA + // chunks, the SHUTDOWN receiver MUST send a SHUTDOWN ACK and start a + // T2-shutdown timer of its own, entering the SHUTDOWN-ACK-SENT state. If + // the timer expires, the endpoint must resend the SHUTDOWN ACK." + + SendShutdownAck(); + SetState(State::kShutdownAckSent, "No more outstanding data"); + } +} + +void DcSctpSocket::SendShutdown() { + SctpPacket::Builder b = tcb_->PacketBuilder(); + b.Add(ShutdownChunk(tcb_->data_tracker().last_cumulative_acked_tsn())); + SendPacket(b); +} + +void DcSctpSocket::SendShutdownAck() { + SendPacket(tcb_->PacketBuilder().Add(ShutdownAckChunk())); + t2_shutdown_->set_duration(tcb_->current_rto()); + t2_shutdown_->Start(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/socket/dcsctp_socket.h b/net/dcsctp/socket/dcsctp_socket.h new file mode 100644 index 0000000000..32e89b50d1 --- /dev/null +++ b/net/dcsctp/socket/dcsctp_socket.h @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_DCSCTP_SOCKET_H_ +#define NET_DCSCTP_SOCKET_DCSCTP_SOCKET_H_ + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/abort_chunk.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/cookie_ack_chunk.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "net/dcsctp/packet/chunk/error_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/chunk/idata_chunk.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/init_ack_chunk.h" +#include "net/dcsctp/packet/chunk/init_chunk.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_ack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_complete_chunk.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/packet_observer.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/callback_deferrer.h" +#include "net/dcsctp/socket/state_cookie.h" +#include "net/dcsctp/socket/transmission_control_block.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_error_counter.h" +#include "net/dcsctp/tx/retransmission_queue.h" +#include "net/dcsctp/tx/retransmission_timeout.h" +#include "net/dcsctp/tx/rr_send_queue.h" + +namespace dcsctp { + +// DcSctpSocket represents a single SCTP socket, to be used over DTLS. +// +// Every dcSCTP is completely isolated from any other socket. +// +// This class manages all packet and chunk dispatching and mainly handles the +// connection sequences (connect, close, shutdown, etc) as well as managing +// the Transmission Control Block (tcb). +// +// This class is thread-compatible. +class DcSctpSocket : public DcSctpSocketInterface { + public: + // Instantiates a DcSctpSocket, which interacts with the world through the + // `callbacks` interface and is configured using `options`. + // + // For debugging, `log_prefix` will prefix all debug logs, and a + // `packet_observer` can be attached to e.g. dump sent and received packets. + DcSctpSocket(absl::string_view log_prefix, + DcSctpSocketCallbacks& callbacks, + std::unique_ptr packet_observer, + const DcSctpOptions& options); + + DcSctpSocket(const DcSctpSocket&) = delete; + DcSctpSocket& operator=(const DcSctpSocket&) = delete; + + // Implementation of `DcSctpSocketInterface`. + void ReceivePacket(rtc::ArrayView data) override; + void HandleTimeout(TimeoutID timeout_id) override; + void Connect() override; + void Shutdown() override; + void Close() override; + SendStatus Send(DcSctpMessage message, + const SendOptions& send_options) override; + ResetStreamsStatus ResetStreams( + rtc::ArrayView outgoing_streams) override; + SocketState state() const override; + const DcSctpOptions& options() const override { return options_; } + void SetMaxMessageSize(size_t max_message_size) override; + size_t buffered_amount(StreamID stream_id) const override; + size_t buffered_amount_low_threshold(StreamID stream_id) const override; + void SetBufferedAmountLowThreshold(StreamID stream_id, size_t bytes) override; + // Returns this socket's verification tag, or zero if not yet connected. + VerificationTag verification_tag() const { + return tcb_ != nullptr ? tcb_->my_verification_tag() : VerificationTag(0); + } + + private: + // Parameter proposals valid during the connect phase. + struct ConnectParameters { + TSN initial_tsn = TSN(0); + VerificationTag verification_tag = VerificationTag(0); + }; + + // Detailed state (separate from SocketState, which is the public state). + enum class State { + kClosed, + kCookieWait, + // TCB valid in these: + kCookieEchoed, + kEstablished, + kShutdownPending, + kShutdownSent, + kShutdownReceived, + kShutdownAckSent, + }; + + // Returns the log prefix used for debug logging. + std::string log_prefix() const; + + bool IsConsistent() const; + static constexpr absl::string_view ToString(DcSctpSocket::State state); + + // Changes the socket state, given a `reason` (for debugging/logging). + void SetState(State state, absl::string_view reason); + // Fills in `connect_params` with random verification tag and initial TSN. + void MakeConnectionParameters(); + // Closes the association. Note that the TCB will not be valid past this call. + void InternalClose(ErrorKind error, absl::string_view message); + // Closes the association, because of too many retransmission errors. + void CloseConnectionBecauseOfTooManyTransmissionErrors(); + // Timer expiration handlers + absl::optional OnInitTimerExpiry(); + absl::optional OnCookieTimerExpiry(); + absl::optional OnShutdownTimerExpiry(); + // Builds the packet from `builder` and sends it (through callbacks). + void SendPacket(SctpPacket::Builder& builder); + // Sends SHUTDOWN or SHUTDOWN-ACK if the socket is shutting down and if all + // outstanding data has been acknowledged. + void MaybeSendShutdownOrAck(); + // If the socket is shutting down, responds SHUTDOWN to any incoming DATA. + void MaybeSendShutdownOnPacketReceived(const SctpPacket& packet); + // Sends a INIT chunk. + void SendInit(); + // Sends a SHUTDOWN chunk. + void SendShutdown(); + // Sends a SHUTDOWN-ACK chunk. + void SendShutdownAck(); + // Validates the SCTP packet, as a whole - not the validity of individual + // chunks within it, as that's done in the different chunk handlers. + bool ValidatePacket(const SctpPacket& packet); + // Parses `payload`, which is a serialized packet that is just going to be + // sent and prints all chunks. + void DebugPrintOutgoing(rtc::ArrayView payload); + // Called whenever there may be reassembled messages, and delivers those. + void DeliverReassembledMessages(); + // Returns true if there is a TCB, and false otherwise (and reports an error). + bool ValidateHasTCB(); + + // Returns true if the parsing of a chunk of type `T` succeeded. If it didn't, + // it reports an error and returns false. + template + bool ValidateParseSuccess(const absl::optional& c) { + if (c.has_value()) { + return true; + } + + ReportFailedToParseChunk(T::kType); + return false; + } + + // Reports failing to have parsed a chunk with the provided `chunk_type`. + void ReportFailedToParseChunk(int chunk_type); + // Called when unknown chunks are received. May report an error. + bool HandleUnrecognizedChunk(const SctpPacket::ChunkDescriptor& descriptor); + + // Will dispatch more specific chunk handlers. + bool Dispatch(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming DATA chunks. + void HandleData(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming I-DATA chunks. + void HandleIData(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Common handler for DATA and I-DATA chunks. + void HandleDataCommon(AnyDataChunk& chunk); + // Handles incoming INIT chunks. + void HandleInit(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming INIT-ACK chunks. + void HandleInitAck(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming SACK chunks. + void HandleSack(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming HEARTBEAT chunks. + void HandleHeartbeatRequest(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming HEARTBEAT-ACK chunks. + void HandleHeartbeatAck(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming ABORT chunks. + void HandleAbort(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming ERROR chunks. + void HandleError(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming COOKIE-ECHO chunks. + void HandleCookieEcho(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles receiving COOKIE-ECHO when there already is a TCB. The return value + // indicates if the processing should continue. + bool HandleCookieEchoWithTCB(const CommonHeader& header, + const StateCookie& cookie); + // Handles incoming COOKIE-ACK chunks. + void HandleCookieAck(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming SHUTDOWN chunks. + void HandleShutdown(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming SHUTDOWN-ACK chunks. + void HandleShutdownAck(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming FORWARD-TSN chunks. + void HandleForwardTsn(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming I-FORWARD-TSN chunks. + void HandleIForwardTsn(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Handles incoming RE-CONFIG chunks. + void HandleReconfig(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + // Common handled for FORWARD-TSN/I-FORWARD-TSN. + void HandleForwardTsnCommon(const AnyForwardTsnChunk& chunk); + // Handles incoming SHUTDOWN-COMPLETE chunks + void HandleShutdownComplete(const CommonHeader& header, + const SctpPacket::ChunkDescriptor& descriptor); + + const std::string log_prefix_; + const std::unique_ptr packet_observer_; + DcSctpOptions options_; + + // Enqueues callbacks and dispatches them just before returning to the caller. + CallbackDeferrer callbacks_; + + TimerManager timer_manager_; + const std::unique_ptr t1_init_; + const std::unique_ptr t1_cookie_; + const std::unique_ptr t2_shutdown_; + + // The actual SendQueue implementation. As data can be sent on a socket before + // the connection is established, this component is not in the TCB. + RRSendQueue send_queue_; + + // Contains verification tag and initial TSN between having sent the INIT + // until the connection is established (there is no TCB at this point). + ConnectParameters connect_params_; + // The socket state. + State state_ = State::kClosed; + // If the connection is established, contains a transmission control block. + std::unique_ptr tcb_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_DCSCTP_SOCKET_H_ diff --git a/net/dcsctp/socket/dcsctp_socket_test.cc b/net/dcsctp/socket/dcsctp_socket_test.cc new file mode 100644 index 0000000000..7ca3d9b399 --- /dev/null +++ b/net/dcsctp/socket/dcsctp_socket_test.cc @@ -0,0 +1,1612 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/dcsctp_socket.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/flags/flag.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/data_common.h" +#include "net/dcsctp/packet/chunk/error_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/chunk/idata_chunk.h" +#include "net/dcsctp/packet/chunk/init_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/chunk/shutdown_chunk.h" +#include "net/dcsctp/packet/error_cause/error_cause.h" +#include "net/dcsctp/packet/error_cause/unrecognized_chunk_type_cause.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/text_pcap_packet_observer.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/mock_dcsctp_socket_callbacks.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +ABSL_FLAG(bool, dcsctp_capture_packets, false, "Print packet capture."); + +namespace dcsctp { +namespace { +using ::testing::_; +using ::testing::AllOf; +using ::testing::ElementsAre; +using ::testing::HasSubstr; +using ::testing::IsEmpty; +using ::testing::SizeIs; + +constexpr SendOptions kSendOptions; +constexpr size_t kLargeMessageSize = DcSctpOptions::kMaxSafeMTUSize * 20; +static constexpr size_t kSmallMessageSize = 10; + +MATCHER_P(HasDataChunkWithStreamId, stream_id, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != DataChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional dc = + DataChunk::Parse(packet->descriptors()[0].data); + if (!dc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (dc->stream_id() != stream_id) { + *result_listener << "the stream_id is " << *dc->stream_id(); + return false; + } + + return true; +} + +MATCHER_P(HasDataChunkWithPPID, ppid, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != DataChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional dc = + DataChunk::Parse(packet->descriptors()[0].data); + if (!dc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (dc->ppid() != ppid) { + *result_listener << "the ppid is " << *dc->ppid(); + return false; + } + + return true; +} + +MATCHER_P(HasDataChunkWithSsn, ssn, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != DataChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional dc = + DataChunk::Parse(packet->descriptors()[0].data); + if (!dc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (dc->ssn() != ssn) { + *result_listener << "the ssn is " << *dc->ssn(); + return false; + } + + return true; +} + +MATCHER_P(HasDataChunkWithMid, mid, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != IDataChunk::kType) { + *result_listener << "the first chunk in the packet is not an i-data chunk"; + return false; + } + + absl::optional dc = + IDataChunk::Parse(packet->descriptors()[0].data); + if (!dc.has_value()) { + *result_listener << "The first chunk didn't parse as an i-data chunk"; + return false; + } + + if (dc->message_id() != mid) { + *result_listener << "the mid is " << *dc->message_id(); + return false; + } + + return true; +} + +MATCHER_P(HasSackWithCumAckTsn, tsn, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != SackChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional sc = + SackChunk::Parse(packet->descriptors()[0].data); + if (!sc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (sc->cumulative_tsn_ack() != tsn) { + *result_listener << "the cum_ack_tsn is " << *sc->cumulative_tsn_ack(); + return false; + } + + return true; +} + +MATCHER(HasSackWithNoGapAckBlocks, "") { + absl::optional packet = SctpPacket::Parse(arg); + if (!packet.has_value()) { + *result_listener << "data didn't parse as an SctpPacket"; + return false; + } + + if (packet->descriptors()[0].type != SackChunk::kType) { + *result_listener << "the first chunk in the packet is not a data chunk"; + return false; + } + + absl::optional sc = + SackChunk::Parse(packet->descriptors()[0].data); + if (!sc.has_value()) { + *result_listener << "The first chunk didn't parse as a data chunk"; + return false; + } + + if (!sc->gap_ack_blocks().empty()) { + *result_listener << "there are gap ack blocks"; + return false; + } + + return true; +} + +TSN AddTo(TSN tsn, int delta) { + return TSN(*tsn + delta); +} + +DcSctpOptions MakeOptionsForTest(bool enable_message_interleaving) { + DcSctpOptions options; + // To make the interval more predictable in tests. + options.heartbeat_interval_include_rtt = false; + options.enable_message_interleaving = enable_message_interleaving; + return options; +} + +std::unique_ptr GetPacketObserver(absl::string_view name) { + if (absl::GetFlag(FLAGS_dcsctp_capture_packets)) { + return std::make_unique(name); + } + return nullptr; +} + +class DcSctpSocketTest : public testing::Test { + protected: + explicit DcSctpSocketTest(bool enable_message_interleaving = false) + : options_(MakeOptionsForTest(enable_message_interleaving)), + cb_a_("A"), + cb_z_("Z"), + sock_a_("A", cb_a_, GetPacketObserver("A"), options_), + sock_z_("Z", cb_z_, GetPacketObserver("Z"), options_) {} + + void AdvanceTime(DurationMs duration) { + cb_a_.AdvanceTime(duration); + cb_z_.AdvanceTime(duration); + } + + static void ExchangeMessages(DcSctpSocket& sock_a, + MockDcSctpSocketCallbacks& cb_a, + DcSctpSocket& sock_z, + MockDcSctpSocketCallbacks& cb_z) { + bool delivered_packet = false; + do { + delivered_packet = false; + std::vector packet_from_a = cb_a.ConsumeSentPacket(); + if (!packet_from_a.empty()) { + delivered_packet = true; + sock_z.ReceivePacket(std::move(packet_from_a)); + } + std::vector packet_from_z = cb_z.ConsumeSentPacket(); + if (!packet_from_z.empty()) { + delivered_packet = true; + sock_a.ReceivePacket(std::move(packet_from_z)); + } + } while (delivered_packet); + } + + void RunTimers(MockDcSctpSocketCallbacks& cb, DcSctpSocket& socket) { + for (;;) { + absl::optional timeout_id = cb.GetNextExpiredTimeout(); + if (!timeout_id.has_value()) { + break; + } + socket.HandleTimeout(*timeout_id); + } + } + + void RunTimers() { + RunTimers(cb_a_, sock_a_); + RunTimers(cb_z_, sock_z_); + } + + // Calls Connect() on `sock_a_` and make the connection established. + void ConnectSockets() { + EXPECT_CALL(cb_a_, OnConnected).Times(1); + EXPECT_CALL(cb_z_, OnConnected).Times(1); + + sock_a_.Connect(); + // Z reads INIT, INIT_ACK, COOKIE_ECHO, COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); + } + + const DcSctpOptions options_; + testing::NiceMock cb_a_; + testing::NiceMock cb_z_; + DcSctpSocket sock_a_; + DcSctpSocket sock_z_; +}; + +TEST_F(DcSctpSocketTest, EstablishConnection) { + EXPECT_CALL(cb_a_, OnConnected).Times(1); + EXPECT_CALL(cb_z_, OnConnected).Times(1); + EXPECT_CALL(cb_a_, OnConnectionRestarted).Times(0); + EXPECT_CALL(cb_z_, OnConnectionRestarted).Times(0); + + sock_a_.Connect(); + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); +} + +TEST_F(DcSctpSocketTest, EstablishConnectionWithSetupCollision) { + EXPECT_CALL(cb_a_, OnConnected).Times(1); + EXPECT_CALL(cb_z_, OnConnected).Times(1); + EXPECT_CALL(cb_a_, OnConnectionRestarted).Times(0); + EXPECT_CALL(cb_z_, OnConnectionRestarted).Times(0); + sock_a_.Connect(); + sock_z_.Connect(); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); +} + +TEST_F(DcSctpSocketTest, ShuttingDownWhileEstablishingConnection) { + EXPECT_CALL(cb_a_, OnConnected).Times(0); + EXPECT_CALL(cb_z_, OnConnected).Times(1); + sock_a_.Connect(); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // Drop COOKIE_ACK, just to more easily verify shutdown protocol. + cb_z_.ConsumeSentPacket(); + + // As Socket A has received INIT_ACK, it has a TCB and is connected, while + // Socket Z needs to receive COOKIE_ECHO to get there. Socket A still has + // timers running at this point. + EXPECT_EQ(sock_a_.state(), SocketState::kConnecting); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); + + // Socket A is now shut down, which should make it stop those timers. + sock_a_.Shutdown(); + + EXPECT_CALL(cb_a_, OnClosed).Times(1); + EXPECT_CALL(cb_z_, OnClosed).Times(1); + + // Z reads SHUTDOWN, produces SHUTDOWN_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads SHUTDOWN_ACK, produces SHUTDOWN_COMPLETE + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // Z reads SHUTDOWN_COMPLETE. + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + EXPECT_TRUE(cb_a_.ConsumeSentPacket().empty()); + EXPECT_TRUE(cb_z_.ConsumeSentPacket().empty()); + + EXPECT_EQ(sock_a_.state(), SocketState::kClosed); + EXPECT_EQ(sock_z_.state(), SocketState::kClosed); +} + +TEST_F(DcSctpSocketTest, EstablishSimultaneousConnection) { + EXPECT_CALL(cb_a_, OnConnected).Times(1); + EXPECT_CALL(cb_z_, OnConnected).Times(1); + EXPECT_CALL(cb_a_, OnConnectionRestarted).Times(0); + EXPECT_CALL(cb_z_, OnConnectionRestarted).Times(0); + sock_a_.Connect(); + + // INIT isn't received by Z, as it wasn't ready yet. + cb_a_.ConsumeSentPacket(); + + sock_z_.Connect(); + + // A reads INIT, produces INIT_ACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Z reads INIT_ACK, sends COOKIE_ECHO + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + // A reads COOKIE_ECHO - establishes connection. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + + // Proceed with the remaining packets. + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); +} + +TEST_F(DcSctpSocketTest, EstablishConnectionLostCookieAck) { + EXPECT_CALL(cb_a_, OnConnected).Times(1); + EXPECT_CALL(cb_z_, OnConnected).Times(1); + EXPECT_CALL(cb_a_, OnConnectionRestarted).Times(0); + EXPECT_CALL(cb_z_, OnConnectionRestarted).Times(0); + + sock_a_.Connect(); + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // COOKIE_ACK is lost. + cb_z_.ConsumeSentPacket(); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnecting); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); + + // This will make A re-send the COOKIE_ECHO + AdvanceTime(DurationMs(options_.t1_cookie_timeout)); + RunTimers(); + + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); +} + +TEST_F(DcSctpSocketTest, ResendInitAndEstablishConnection) { + sock_a_.Connect(); + // INIT is never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket init_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(init_packet.descriptors()[0].type, InitChunk::kType); + + AdvanceTime(options_.t1_init_timeout); + RunTimers(); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); +} + +TEST_F(DcSctpSocketTest, ResendingInitTooManyTimesAborts) { + sock_a_.Connect(); + + // INIT is never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket init_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(init_packet.descriptors()[0].type, InitChunk::kType); + + for (int i = 0; i < options_.max_init_retransmits; ++i) { + AdvanceTime(options_.t1_init_timeout * (1 << i)); + RunTimers(); + + // INIT is resent + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket resent_init_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(resent_init_packet.descriptors()[0].type, InitChunk::kType); + } + + // Another timeout, after the max init retransmits. + AdvanceTime(options_.t1_init_timeout * (1 << options_.max_init_retransmits)); + EXPECT_CALL(cb_a_, OnAborted).Times(1); + RunTimers(); + + EXPECT_EQ(sock_a_.state(), SocketState::kClosed); +} + +TEST_F(DcSctpSocketTest, ResendCookieEchoAndEstablishConnection) { + sock_a_.Connect(); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // COOKIE_ECHO is never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket init_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(init_packet.descriptors()[0].type, CookieEchoChunk::kType); + + AdvanceTime(options_.t1_init_timeout); + RunTimers(); + + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); +} + +TEST_F(DcSctpSocketTest, ResendingCookieEchoTooManyTimesAborts) { + sock_a_.Connect(); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // COOKIE_ECHO is never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket init_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(init_packet.descriptors()[0].type, CookieEchoChunk::kType); + + for (int i = 0; i < options_.max_init_retransmits; ++i) { + AdvanceTime(options_.t1_cookie_timeout * (1 << i)); + RunTimers(); + + // COOKIE_ECHO is resent + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket resent_init_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(resent_init_packet.descriptors()[0].type, CookieEchoChunk::kType); + } + + // Another timeout, after the max init retransmits. + AdvanceTime(options_.t1_cookie_timeout * + (1 << options_.max_init_retransmits)); + EXPECT_CALL(cb_a_, OnAborted).Times(1); + RunTimers(); + + EXPECT_EQ(sock_a_.state(), SocketState::kClosed); +} + +TEST_F(DcSctpSocketTest, DoesntSendMorePacketsUntilCookieAckHasBeenReceived) { + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions); + sock_a_.Connect(); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // COOKIE_ECHO is never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket cookie_echo_packet1, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_THAT(cookie_echo_packet1.descriptors(), SizeIs(2)); + EXPECT_EQ(cookie_echo_packet1.descriptors()[0].type, CookieEchoChunk::kType); + EXPECT_EQ(cookie_echo_packet1.descriptors()[1].type, DataChunk::kType); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // There are DATA chunks in the sent packet (that was lost), which means that + // the T3-RTX timer is running, but as the socket is in kCookieEcho state, it + // will be T1-COOKIE that drives retransmissions, so when the T3-RTX expires, + // nothing should be retransmitted. + ASSERT_TRUE(options_.rto_initial < options_.t1_cookie_timeout); + AdvanceTime(options_.rto_initial); + RunTimers(); + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // When T1-COOKIE expires, both the COOKIE-ECHO and DATA should be present. + AdvanceTime(options_.t1_cookie_timeout - options_.rto_initial); + RunTimers(); + + // And this COOKIE-ECHO and DATA is also lost - never received by Z. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket cookie_echo_packet2, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_THAT(cookie_echo_packet2.descriptors(), SizeIs(2)); + EXPECT_EQ(cookie_echo_packet2.descriptors()[0].type, CookieEchoChunk::kType); + EXPECT_EQ(cookie_echo_packet2.descriptors()[1].type, DataChunk::kType); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // COOKIE_ECHO has exponential backoff. + AdvanceTime(options_.t1_cookie_timeout * 2); + RunTimers(); + + // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + EXPECT_THAT(cb_z_.ConsumeReceivedMessage()->payload(), + SizeIs(kLargeMessageSize)); +} + +TEST_F(DcSctpSocketTest, ShutdownConnection) { + ConnectSockets(); + + RTC_LOG(LS_INFO) << "Shutting down"; + + sock_a_.Shutdown(); + // Z reads SHUTDOWN, produces SHUTDOWN_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // A reads SHUTDOWN_ACK, produces SHUTDOWN_COMPLETE + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // Z reads SHUTDOWN_COMPLETE. + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kClosed); + EXPECT_EQ(sock_z_.state(), SocketState::kClosed); +} + +TEST_F(DcSctpSocketTest, ShutdownTimerExpiresTooManyTimeClosesConnection) { + ConnectSockets(); + + sock_a_.Shutdown(); + // Drop first SHUTDOWN packet. + cb_a_.ConsumeSentPacket(); + + EXPECT_EQ(sock_a_.state(), SocketState::kShuttingDown); + + for (int i = 0; i < options_.max_retransmissions; ++i) { + AdvanceTime(DurationMs(options_.rto_initial * (1 << i))); + RunTimers(); + + // Dropping every shutdown chunk. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(packet.descriptors()[0].type, ShutdownChunk::kType); + EXPECT_TRUE(cb_a_.ConsumeSentPacket().empty()); + } + // The last expiry, makes it abort the connection. + AdvanceTime(options_.rto_initial * (1 << options_.max_retransmissions)); + EXPECT_CALL(cb_a_, OnAborted).Times(1); + RunTimers(); + + EXPECT_EQ(sock_a_.state(), SocketState::kClosed); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(packet.descriptors()[0].type, AbortChunk::kType); + EXPECT_TRUE(cb_a_.ConsumeSentPacket().empty()); +} + +TEST_F(DcSctpSocketTest, EstablishConnectionWhileSendingData) { + sock_a_.Connect(); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {1, 2}), kSendOptions); + + // Z reads INIT, produces INIT_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // // A reads INIT_ACK, produces COOKIE_ECHO + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + // // Z reads COOKIE_ECHO, produces COOKIE_ACK + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // // A reads COOKIE_ACK. + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + EXPECT_EQ(sock_a_.state(), SocketState::kConnected); + EXPECT_EQ(sock_z_.state(), SocketState::kConnected); + + absl::optional msg = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg.has_value()); + EXPECT_EQ(msg->stream_id(), StreamID(1)); +} + +TEST_F(DcSctpSocketTest, SendMessageAfterEstablished) { + ConnectSockets(); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {1, 2}), kSendOptions); + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + absl::optional msg = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg.has_value()); + EXPECT_EQ(msg->stream_id(), StreamID(1)); +} + +TEST_F(DcSctpSocketTest, TimeoutResendsPacket) { + ConnectSockets(); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {1, 2}), kSendOptions); + cb_a_.ConsumeSentPacket(); + + RTC_LOG(LS_INFO) << "Advancing time"; + AdvanceTime(options_.rto_initial); + RunTimers(); + + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + absl::optional msg = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg.has_value()); + EXPECT_EQ(msg->stream_id(), StreamID(1)); +} + +TEST_F(DcSctpSocketTest, SendALotOfBytesMissedSecondPacket) { + ConnectSockets(); + + std::vector payload(kLargeMessageSize); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), kSendOptions); + + // First DATA + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // Second DATA (lost) + cb_a_.ConsumeSentPacket(); + + // Retransmit and handle the rest + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + absl::optional msg = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg.has_value()); + EXPECT_EQ(msg->stream_id(), StreamID(1)); + EXPECT_THAT(msg->payload(), testing::ElementsAreArray(payload)); +} + +TEST_F(DcSctpSocketTest, SendingHeartbeatAnswersWithAck) { + ConnectSockets(); + + // Inject a HEARTBEAT chunk + SctpPacket::Builder b(sock_a_.verification_tag(), DcSctpOptions()); + uint8_t info[] = {1, 2, 3, 4}; + Parameters::Builder params_builder; + params_builder.Add(HeartbeatInfoParameter(info)); + b.Add(HeartbeatRequestChunk(params_builder.Build())); + sock_a_.ReceivePacket(b.Build()); + + // HEARTBEAT_ACK is sent as a reply. Capture it. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket ack_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + ASSERT_THAT(ack_packet.descriptors(), SizeIs(1)); + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatAckChunk ack, + HeartbeatAckChunk::Parse(ack_packet.descriptors()[0].data)); + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatInfoParameter info_param, ack.info()); + EXPECT_THAT(info_param.info(), ElementsAre(1, 2, 3, 4)); +} + +TEST_F(DcSctpSocketTest, ExpectHeartbeatToBeSent) { + ConnectSockets(); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + AdvanceTime(options_.heartbeat_interval); + RunTimers(); + + std::vector hb_packet_raw = cb_a_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket hb_packet, + SctpPacket::Parse(hb_packet_raw)); + ASSERT_THAT(hb_packet.descriptors(), SizeIs(1)); + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatRequestChunk hb, + HeartbeatRequestChunk::Parse(hb_packet.descriptors()[0].data)); + ASSERT_HAS_VALUE_AND_ASSIGN(HeartbeatInfoParameter info_param, hb.info()); + + // The info is a single 64-bit number. + EXPECT_THAT(hb.info()->info(), SizeIs(8)); + + // Feed it to Sock-z and expect a HEARTBEAT_ACK that will be propagated back. + sock_z_.ReceivePacket(hb_packet_raw); + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); +} + +TEST_F(DcSctpSocketTest, CloseConnectionAfterTooManyLostHeartbeats) { + ConnectSockets(); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), testing::IsEmpty()); + // Force-close socket Z so that it doesn't interfere from now on. + sock_z_.Close(); + + DurationMs time_to_next_hearbeat = options_.heartbeat_interval; + + for (int i = 0; i < options_.max_retransmissions; ++i) { + RTC_LOG(LS_INFO) << "Letting HEARTBEAT interval timer expire - sending..."; + AdvanceTime(time_to_next_hearbeat); + RunTimers(); + + // Dropping every heartbeat. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket hb_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(hb_packet.descriptors()[0].type, HeartbeatRequestChunk::kType); + + RTC_LOG(LS_INFO) << "Letting the heartbeat expire."; + AdvanceTime(DurationMs(1000)); + RunTimers(); + + time_to_next_hearbeat = options_.heartbeat_interval - DurationMs(1000); + } + + RTC_LOG(LS_INFO) << "Letting HEARTBEAT interval timer expire - sending..."; + AdvanceTime(time_to_next_hearbeat); + RunTimers(); + + // Last heartbeat + EXPECT_THAT(cb_a_.ConsumeSentPacket(), Not(IsEmpty())); + + EXPECT_CALL(cb_a_, OnAborted).Times(1); + // Should suffice as exceeding RTO + AdvanceTime(DurationMs(1000)); + RunTimers(); +} + +TEST_F(DcSctpSocketTest, RecoversAfterASuccessfulAck) { + ConnectSockets(); + + EXPECT_THAT(cb_a_.ConsumeSentPacket(), testing::IsEmpty()); + // Force-close socket Z so that it doesn't interfere from now on. + sock_z_.Close(); + + DurationMs time_to_next_hearbeat = options_.heartbeat_interval; + + for (int i = 0; i < options_.max_retransmissions; ++i) { + AdvanceTime(time_to_next_hearbeat); + RunTimers(); + + // Dropping every heartbeat. + cb_a_.ConsumeSentPacket(); + + RTC_LOG(LS_INFO) << "Letting the heartbeat expire."; + AdvanceTime(DurationMs(1000)); + RunTimers(); + + time_to_next_hearbeat = options_.heartbeat_interval - DurationMs(1000); + } + + RTC_LOG(LS_INFO) << "Getting the last heartbeat - and acking it"; + AdvanceTime(time_to_next_hearbeat); + RunTimers(); + + std::vector hb_packet_raw = cb_a_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket hb_packet, + SctpPacket::Parse(hb_packet_raw)); + ASSERT_THAT(hb_packet.descriptors(), SizeIs(1)); + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatRequestChunk hb, + HeartbeatRequestChunk::Parse(hb_packet.descriptors()[0].data)); + + SctpPacket::Builder b(sock_a_.verification_tag(), options_); + b.Add(HeartbeatAckChunk(std::move(hb).extract_parameters())); + sock_a_.ReceivePacket(b.Build()); + + // Should suffice as exceeding RTO - which will not fire. + EXPECT_CALL(cb_a_, OnAborted).Times(0); + AdvanceTime(DurationMs(1000)); + RunTimers(); + EXPECT_THAT(cb_a_.ConsumeSentPacket(), IsEmpty()); + + // Verify that we get new heartbeats again. + RTC_LOG(LS_INFO) << "Expecting a new heartbeat"; + AdvanceTime(time_to_next_hearbeat); + RunTimers(); + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket another_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + EXPECT_EQ(another_packet.descriptors()[0].type, HeartbeatRequestChunk::kType); +} + +TEST_F(DcSctpSocketTest, ResetStream) { + ConnectSockets(); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {1, 2}), {}); + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + absl::optional msg = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg.has_value()); + EXPECT_EQ(msg->stream_id(), StreamID(1)); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Reset the outgoing stream. This will directly send a RE-CONFIG. + sock_a_.ResetStreams(std::vector({StreamID(1)})); + + // Receiving the packet will trigger a callback, indicating that A has + // reset its stream. It will also send a RE-CONFIG with a response. + EXPECT_CALL(cb_z_, OnIncomingStreamsReset).Times(1); + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + // Receiving a response will trigger a callback. Streams are now reset. + EXPECT_CALL(cb_a_, OnStreamsResetPerformed).Times(1); + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); +} + +TEST_F(DcSctpSocketTest, ResetStreamWillMakeChunksStartAtZeroSsn) { + ConnectSockets(); + + std::vector payload(options_.mtu - 100); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + auto packet1 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet1, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(packet1); + + auto packet2 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet2, HasDataChunkWithSsn(SSN(1))); + sock_z_.ReceivePacket(packet2); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg1.has_value()); + EXPECT_EQ(msg1->stream_id(), StreamID(1)); + + absl::optional msg2 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg2.has_value()); + EXPECT_EQ(msg2->stream_id(), StreamID(1)); + + // Reset the outgoing stream. This will directly send a RE-CONFIG. + sock_a_.ResetStreams(std::vector({StreamID(1)})); + // RE-CONFIG, req + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // RE-CONFIG, resp + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + auto packet3 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet3, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(packet3); + + auto packet4 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet4, HasDataChunkWithSsn(SSN(1))); + sock_z_.ReceivePacket(packet4); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); +} + +TEST_F(DcSctpSocketTest, ResetStreamWillOnlyResetTheRequestedStreams) { + ConnectSockets(); + + std::vector payload(options_.mtu - 100); + + // Send two ordered messages on SID 1 + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + auto packet1 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet1, HasDataChunkWithStreamId(StreamID(1))); + EXPECT_THAT(packet1, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(packet1); + + auto packet2 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet1, HasDataChunkWithStreamId(StreamID(1))); + EXPECT_THAT(packet2, HasDataChunkWithSsn(SSN(1))); + sock_z_.ReceivePacket(packet2); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Do the same, for SID 3 + sock_a_.Send(DcSctpMessage(StreamID(3), PPID(53), payload), {}); + sock_a_.Send(DcSctpMessage(StreamID(3), PPID(53), payload), {}); + auto packet3 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet3, HasDataChunkWithStreamId(StreamID(3))); + EXPECT_THAT(packet3, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(packet3); + auto packet4 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet4, HasDataChunkWithStreamId(StreamID(3))); + EXPECT_THAT(packet4, HasDataChunkWithSsn(SSN(1))); + sock_z_.ReceivePacket(packet4); + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Receive all messages. + absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg1.has_value()); + EXPECT_EQ(msg1->stream_id(), StreamID(1)); + + absl::optional msg2 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg2.has_value()); + EXPECT_EQ(msg2->stream_id(), StreamID(1)); + + absl::optional msg3 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg3.has_value()); + EXPECT_EQ(msg3->stream_id(), StreamID(3)); + + absl::optional msg4 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg4.has_value()); + EXPECT_EQ(msg4->stream_id(), StreamID(3)); + + // Reset SID 1. This will directly send a RE-CONFIG. + sock_a_.ResetStreams(std::vector({StreamID(3)})); + // RE-CONFIG, req + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // RE-CONFIG, resp + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Send a message on SID 1 and 3 - SID 1 should not be reset, but 3 should. + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), {}); + + sock_a_.Send(DcSctpMessage(StreamID(3), PPID(53), payload), {}); + + auto packet5 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet5, HasDataChunkWithStreamId(StreamID(1))); + EXPECT_THAT(packet5, HasDataChunkWithSsn(SSN(2))); // Unchanged. + sock_z_.ReceivePacket(packet5); + + auto packet6 = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet6, HasDataChunkWithStreamId(StreamID(3))); + EXPECT_THAT(packet6, HasDataChunkWithSsn(SSN(0))); // Reset. + sock_z_.ReceivePacket(packet6); + + // Handle SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); +} + +TEST_F(DcSctpSocketTest, OnePeerReconnects) { + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnConnectionRestarted).Times(1); + // Let's be evil here - reconnect while a fragmented packet was about to be + // sent. The receiving side should get it in full. + std::vector payload(kLargeMessageSize); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), kSendOptions); + + // First DATA + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + // Create a new association, z2 - and don't use z anymore. + testing::NiceMock cb_z2("Z2"); + DcSctpSocket sock_z2("Z2", cb_z2, nullptr, options_); + + sock_z2.Connect(); + + // Retransmit and handle the rest. As there will be some chunks in-flight that + // have the wrong verification tag, those will yield errors. + ExchangeMessages(sock_a_, cb_a_, sock_z2, cb_z2); + + absl::optional msg = cb_z2.ConsumeReceivedMessage(); + ASSERT_TRUE(msg.has_value()); + EXPECT_EQ(msg->stream_id(), StreamID(1)); + EXPECT_THAT(msg->payload(), testing::ElementsAreArray(payload)); +} + +TEST_F(DcSctpSocketTest, SendMessageWithLimitedRtx) { + ConnectSockets(); + + SendOptions send_options; + send_options.max_retransmissions = 0; + std::vector payload(options_.mtu - 100); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(51), payload), send_options); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(52), payload), send_options); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), send_options); + + // First DATA + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + // Second DATA (lost) + cb_a_.ConsumeSentPacket(); + // Third DATA + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + // Handle SACK for first DATA + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Handle delayed SACK for third DATA + AdvanceTime(options_.delayed_ack_max_timeout); + RunTimers(); + + // Handle SACK for second DATA + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + // Now the missing data chunk will be marked as nacked, but it might still be + // in-flight and the reported gap could be due to out-of-order delivery. So + // the RetransmissionQueue will not mark it as "to be retransmitted" until + // after the t3-rtx timer has expired. + AdvanceTime(options_.rto_initial); + RunTimers(); + + // The chunk will be marked as retransmitted, and then as abandoned, which + // will trigger a FORWARD-TSN to be sent. + + // FORWARD-TSN (third) + sock_z_.ReceivePacket(cb_a_.ConsumeSentPacket()); + + // Which will trigger a SACK + sock_a_.ReceivePacket(cb_z_.ConsumeSentPacket()); + + absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg1.has_value()); + EXPECT_EQ(msg1->ppid(), PPID(51)); + + absl::optional msg2 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg2.has_value()); + EXPECT_EQ(msg2->ppid(), PPID(53)); + + absl::optional msg3 = cb_z_.ConsumeReceivedMessage(); + EXPECT_FALSE(msg3.has_value()); +} + +TEST_F(DcSctpSocketTest, SendManyFragmentedMessagesWithLimitedRtx) { + ConnectSockets(); + + SendOptions send_options; + send_options.unordered = IsUnordered(true); + send_options.max_retransmissions = 0; + std::vector payload(options_.mtu * 2 - 100 /* margin */); + // Sending first message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(51), payload), send_options); + // Sending second message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(52), payload), send_options); + // Sending third message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), send_options); + // Sending fourth message + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(54), payload), send_options); + + // First DATA, first fragment + std::vector packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(51))); + sock_z_.ReceivePacket(std::move(packet)); + + // First DATA, second fragment (lost) + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(51))); + + // Second DATA, first fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(52))); + sock_z_.ReceivePacket(std::move(packet)); + + // Second DATA, second fragment (lost) + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(52))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + + // Third DATA, first fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(53))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(std::move(packet)); + + // Third DATA, second fragment (lost) + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(53))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + + // Fourth DATA, first fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(54))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(std::move(packet)); + + // Fourth DATA, second fragment + packet = cb_a_.ConsumeSentPacket(); + EXPECT_THAT(packet, HasDataChunkWithPPID(PPID(54))); + EXPECT_THAT(packet, HasDataChunkWithSsn(SSN(0))); + sock_z_.ReceivePacket(std::move(packet)); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + // Let the RTX timer expire, and exchange FORWARD-TSN/SACKs + AdvanceTime(options_.rto_initial); + RunTimers(); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + absl::optional msg1 = cb_z_.ConsumeReceivedMessage(); + ASSERT_TRUE(msg1.has_value()); + EXPECT_EQ(msg1->ppid(), PPID(54)); + + ASSERT_FALSE(cb_z_.ConsumeReceivedMessage().has_value()); +} + +struct FakeChunkConfig : ChunkConfig { + static constexpr int kType = 0x49; + static constexpr size_t kHeaderSize = 4; + static constexpr int kVariableLengthAlignment = 0; +}; + +class FakeChunk : public Chunk, public TLVTrait { + public: + FakeChunk() {} + + FakeChunk(FakeChunk&& other) = default; + FakeChunk& operator=(FakeChunk&& other) = default; + + void SerializeTo(std::vector& out) const override { + AllocateTLV(out); + } + std::string ToString() const override { return "FAKE"; } +}; + +TEST_F(DcSctpSocketTest, ReceivingUnknownChunkRespondsWithError) { + ConnectSockets(); + + // Inject a FAKE chunk + SctpPacket::Builder b(sock_a_.verification_tag(), DcSctpOptions()); + b.Add(FakeChunk()); + sock_a_.ReceivePacket(b.Build()); + + // ERROR is sent as a reply. Capture it. + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket reply_packet, + SctpPacket::Parse(cb_a_.ConsumeSentPacket())); + ASSERT_THAT(reply_packet.descriptors(), SizeIs(1)); + ASSERT_HAS_VALUE_AND_ASSIGN( + ErrorChunk error, ErrorChunk::Parse(reply_packet.descriptors()[0].data)); + ASSERT_HAS_VALUE_AND_ASSIGN( + UnrecognizedChunkTypeCause cause, + error.error_causes().get()); + EXPECT_THAT(cause.unrecognized_chunk(), ElementsAre(0x49, 0x00, 0x00, 0x04)); +} + +TEST_F(DcSctpSocketTest, ReceivingErrorChunkReportsAsCallback) { + ConnectSockets(); + + // Inject a ERROR chunk + SctpPacket::Builder b(sock_a_.verification_tag(), DcSctpOptions()); + b.Add( + ErrorChunk(Parameters::Builder() + .Add(UnrecognizedChunkTypeCause({0x49, 0x00, 0x00, 0x04})) + .Build())); + + EXPECT_CALL(cb_a_, OnError(ErrorKind::kPeerReported, + HasSubstr("Unrecognized Chunk Type"))); + sock_a_.ReceivePacket(b.Build()); +} + +TEST_F(DcSctpSocketTest, PassingHighWatermarkWillOnlyAcceptCumAckTsn) { + // Create a new association, z2 - and don't use z anymore. + testing::NiceMock cb_z2("Z2"); + DcSctpOptions options = options_; + options.max_receiver_window_buffer_size = 100; + DcSctpSocket sock_z2("Z2", cb_z2, nullptr, options); + + EXPECT_CALL(cb_z2, OnClosed).Times(0); + EXPECT_CALL(cb_z2, OnAborted).Times(0); + + sock_a_.Connect(); + std::vector init_data = cb_a_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket init_packet, + SctpPacket::Parse(init_data)); + ASSERT_HAS_VALUE_AND_ASSIGN( + InitChunk init_chunk, + InitChunk::Parse(init_packet.descriptors()[0].data)); + sock_z2.ReceivePacket(init_data); + sock_a_.ReceivePacket(cb_z2.ConsumeSentPacket()); + sock_z2.ReceivePacket(cb_a_.ConsumeSentPacket()); + sock_a_.ReceivePacket(cb_z2.ConsumeSentPacket()); + + // Fill up Z2 to the high watermark limit. + TSN tsn = init_chunk.initial_tsn(); + AnyDataChunk::Options opts; + opts.is_beginning = Data::IsBeginning(true); + sock_z2.ReceivePacket( + SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(tsn, StreamID(1), SSN(0), PPID(53), + std::vector( + 100 * ReassemblyQueue::kHighWatermarkLimit + 1), + opts)) + .Build()); + + // First DATA will always trigger a SACK. It's not interesting. + EXPECT_THAT(cb_z2.ConsumeSentPacket(), + AllOf(HasSackWithCumAckTsn(tsn), HasSackWithNoGapAckBlocks())); + + // This DATA should be accepted - it's advancing cum ack tsn. + sock_z2.ReceivePacket(SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 1), StreamID(1), SSN(0), + PPID(53), std::vector(1), + /*options=*/{})) + .Build()); + + // The receiver might have moved into delayed ack mode. + cb_z2.AdvanceTime(options.rto_initial); + RunTimers(cb_z2, sock_z2); + + EXPECT_THAT( + cb_z2.ConsumeSentPacket(), + AllOf(HasSackWithCumAckTsn(AddTo(tsn, 1)), HasSackWithNoGapAckBlocks())); + + // This DATA will not be accepted - it's not advancing cum ack tsn. + sock_z2.ReceivePacket(SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 3), StreamID(1), SSN(0), + PPID(53), std::vector(1), + /*options=*/{})) + .Build()); + + // Sack will be sent in IMMEDIATE mode when this is happening. + EXPECT_THAT( + cb_z2.ConsumeSentPacket(), + AllOf(HasSackWithCumAckTsn(AddTo(tsn, 1)), HasSackWithNoGapAckBlocks())); + + // This DATA will not be accepted either. + sock_z2.ReceivePacket(SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 4), StreamID(1), SSN(0), + PPID(53), std::vector(1), + /*options=*/{})) + .Build()); + + // Sack will be sent in IMMEDIATE mode when this is happening. + EXPECT_THAT( + cb_z2.ConsumeSentPacket(), + AllOf(HasSackWithCumAckTsn(AddTo(tsn, 1)), HasSackWithNoGapAckBlocks())); + + // This DATA should be accepted, and it fills the reassembly queue. + sock_z2.ReceivePacket( + SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 2), StreamID(1), SSN(0), PPID(53), + std::vector(kSmallMessageSize), + /*options=*/{})) + .Build()); + + // The receiver might have moved into delayed ack mode. + cb_z2.AdvanceTime(options.rto_initial); + RunTimers(cb_z2, sock_z2); + + EXPECT_THAT( + cb_z2.ConsumeSentPacket(), + AllOf(HasSackWithCumAckTsn(AddTo(tsn, 2)), HasSackWithNoGapAckBlocks())); + + EXPECT_CALL(cb_z2, OnAborted(ErrorKind::kResourceExhaustion, _)); + EXPECT_CALL(cb_z2, OnClosed).Times(0); + + // This DATA will make the connection close. It's too full now. + sock_z2.ReceivePacket( + SctpPacket::Builder(sock_z2.verification_tag(), options) + .Add(DataChunk(AddTo(tsn, 3), StreamID(1), SSN(0), PPID(53), + std::vector(kSmallMessageSize), + /*options=*/{})) + .Build()); +} + +TEST_F(DcSctpSocketTest, SetMaxMessageSize) { + sock_a_.SetMaxMessageSize(42u); + EXPECT_EQ(sock_a_.options().max_message_size, 42u); +} + +TEST_F(DcSctpSocketTest, SendsMessagesWithLowLifetime) { + ConnectSockets(); + + // Mock that the time always goes forward. + TimeMs now(0); + EXPECT_CALL(cb_a_, TimeMillis).WillRepeatedly([&]() { + now += DurationMs(3); + return now; + }); + EXPECT_CALL(cb_z_, TimeMillis).WillRepeatedly([&]() { + now += DurationMs(3); + return now; + }); + + // Queue a few small messages with low lifetime, both ordered and unordered, + // and validate that all are delivered. + static constexpr int kIterations = 100; + for (int i = 0; i < kIterations; ++i) { + SendOptions send_options; + send_options.unordered = IsUnordered((i % 2) == 0); + send_options.lifetime = DurationMs(i % 3); // 0, 1, 2 ms + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {1, 2}), send_options); + } + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + for (int i = 0; i < kIterations; ++i) { + EXPECT_TRUE(cb_z_.ConsumeReceivedMessage().has_value()); + } + + EXPECT_FALSE(cb_z_.ConsumeReceivedMessage().has_value()); + + // Validate that the sockets really make the time move forward. + EXPECT_GE(*now, kIterations * 2); +} + +TEST_F(DcSctpSocketTest, DiscardsMessagesWithLowLifetimeIfMustBuffer) { + ConnectSockets(); + + SendOptions lifetime_0; + lifetime_0.unordered = IsUnordered(true); + lifetime_0.lifetime = DurationMs(0); + + SendOptions lifetime_1; + lifetime_1.unordered = IsUnordered(true); + lifetime_1.lifetime = DurationMs(1); + + // Mock that the time always goes forward. + TimeMs now(0); + EXPECT_CALL(cb_a_, TimeMillis).WillRepeatedly([&]() { + now += DurationMs(3); + return now; + }); + EXPECT_CALL(cb_z_, TimeMillis).WillRepeatedly([&]() { + now += DurationMs(3); + return now; + }); + + // Fill up the send buffer with a large message. + std::vector payload(kLargeMessageSize); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), payload), kSendOptions); + + // And queue a few small messages with lifetime=0 or 1 ms - can't be sent. + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {1, 2, 3}), lifetime_0); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {4, 5, 6}), lifetime_1); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), {7, 8, 9}), lifetime_0); + + // Handle all that was sent until congestion window got full. + for (;;) { + std::vector packet_from_a = cb_a_.ConsumeSentPacket(); + if (packet_from_a.empty()) { + break; + } + sock_z_.ReceivePacket(std::move(packet_from_a)); + } + + // Shouldn't be enough to send that large message. + EXPECT_FALSE(cb_z_.ConsumeReceivedMessage().has_value()); + + // Exchange the rest of the messages, with the time ever increasing. + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + // The large message should be delivered. It was sent reliably. + ASSERT_HAS_VALUE_AND_ASSIGN(DcSctpMessage m1, cb_z_.ConsumeReceivedMessage()); + EXPECT_EQ(m1.stream_id(), StreamID(1)); + EXPECT_THAT(m1.payload(), SizeIs(kLargeMessageSize)); + + // But none of the smaller messages. + EXPECT_FALSE(cb_z_.ConsumeReceivedMessage().has_value()); +} + +TEST_F(DcSctpSocketTest, HasReasonableBufferedAmountValues) { + ConnectSockets(); + + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), 0u); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kSmallMessageSize)), + kSendOptions); + // Sending a small message will directly send it as a single packet, so + // nothing is left in the queue. + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), 0u); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions); + + // Sending a message will directly start sending a few packets, so the + // buffered amount is not the full message size. + EXPECT_GT(sock_a_.buffered_amount(StreamID(1)), 0u); + EXPECT_LT(sock_a_.buffered_amount(StreamID(1)), kLargeMessageSize); +} + +TEST_F(DcSctpSocketTest, HasDefaultOnBufferedAmountLowValueZero) { + EXPECT_EQ(sock_a_.buffered_amount_low_threshold(StreamID(1)), 0u); +} + +TEST_F(DcSctpSocketTest, TriggersOnBufferedAmountLowWithDefaultValueZero) { + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))); + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kSmallMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, DoesntTriggerOnBufferedAmountLowIfBelowThreshold) { + static constexpr size_t kMessageSize = 1000; + static constexpr size_t kBufferedAmountLowThreshold = kMessageSize * 10; + + sock_a_.SetBufferedAmountLowThreshold(StreamID(1), + kBufferedAmountLowThreshold); + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))).Times(0); + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, TriggersOnBufferedAmountMultipleTimes) { + static constexpr size_t kMessageSize = 1000; + static constexpr size_t kBufferedAmountLowThreshold = kMessageSize / 2; + + sock_a_.SetBufferedAmountLowThreshold(StreamID(1), + kBufferedAmountLowThreshold); + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))).Times(3); + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(2))).Times(2); + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(2), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(2), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); + + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, TriggersOnBufferedAmountLowOnlyWhenCrossingThreshold) { + static constexpr size_t kMessageSize = 1000; + static constexpr size_t kBufferedAmountLowThreshold = kMessageSize * 1.5; + + sock_a_.SetBufferedAmountLowThreshold(StreamID(1), + kBufferedAmountLowThreshold); + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnBufferedAmountLow).Times(0); + + // Add a few messages to fill up the congestion window. When that is full, + // messages will start to be fully buffered. + while (sock_a_.buffered_amount(StreamID(1)) == 0) { + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kMessageSize)), + kSendOptions); + } + size_t initial_buffered = sock_a_.buffered_amount(StreamID(1)); + ASSERT_GE(initial_buffered, 0u); + ASSERT_LT(initial_buffered, kMessageSize); + + // Up to kMessageSize (which is below the threshold) + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), + std::vector(kMessageSize - initial_buffered)), + kSendOptions); + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), kMessageSize); + + // Up to 2*kMessageSize (which is above the threshold) + sock_a_.Send( + DcSctpMessage(StreamID(1), PPID(53), std::vector(kMessageSize)), + kSendOptions); + EXPECT_EQ(sock_a_.buffered_amount(StreamID(1)), 2 * kMessageSize); + + // Start ACKing packets, which will empty the send queue, and trigger the + // callback. + EXPECT_CALL(cb_a_, OnBufferedAmountLow(StreamID(1))).Times(1); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, DoesntTriggerOnTotalBufferAmountLowWhenBelow) { + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnTotalBufferedAmountLow).Times(0); + + sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions); + + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +TEST_F(DcSctpSocketTest, TriggersOnTotalBufferAmountLowWhenCrossingThreshold) { + ConnectSockets(); + + EXPECT_CALL(cb_a_, OnTotalBufferedAmountLow).Times(0); + + // Fill up the send queue completely. + for (;;) { + if (sock_a_.Send(DcSctpMessage(StreamID(1), PPID(53), + std::vector(kLargeMessageSize)), + kSendOptions) == SendStatus::kErrorResourceExhaustion) { + break; + } + } + + EXPECT_CALL(cb_a_, OnTotalBufferedAmountLow).Times(1); + ExchangeMessages(sock_a_, cb_a_, sock_z_, cb_z_); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/socket/heartbeat_handler.cc b/net/dcsctp/socket/heartbeat_handler.cc new file mode 100644 index 0000000000..78616d1033 --- /dev/null +++ b/net/dcsctp/socket/heartbeat_handler.cc @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/heartbeat_handler.h" + +#include + +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/socket/context.h" +#include "net/dcsctp/timer/timer.h" +#include "rtc_base/logging.h" + +namespace dcsctp { + +// This is stored (in serialized form) as HeartbeatInfoParameter sent in +// HeartbeatRequestChunk and received back in HeartbeatAckChunk. It should be +// well understood that this data may be modified by the peer, so it can't +// be trusted. +// +// It currently only stores a timestamp, in millisecond precision, to allow for +// RTT measurements. If that would be manipulated by the peer, it would just +// result in incorrect RTT measurements, which isn't an issue. +class HeartbeatInfo { + public: + static constexpr size_t kBufferSize = sizeof(uint64_t); + static_assert(kBufferSize == 8, "Unexpected buffer size"); + + explicit HeartbeatInfo(TimeMs created_at) : created_at_(created_at) {} + + std::vector Serialize() { + uint32_t high_bits = static_cast(*created_at_ >> 32); + uint32_t low_bits = static_cast(*created_at_); + + std::vector data(kBufferSize); + BoundedByteWriter writer(data); + writer.Store32<0>(high_bits); + writer.Store32<4>(low_bits); + return data; + } + + static absl::optional Deserialize( + rtc::ArrayView data) { + if (data.size() != kBufferSize) { + RTC_LOG(LS_WARNING) << "Invalid heartbeat info: " << data.size() + << " bytes"; + return absl::nullopt; + } + + BoundedByteReader reader(data); + uint32_t high_bits = reader.Load32<0>(); + uint32_t low_bits = reader.Load32<4>(); + + uint64_t created_at = static_cast(high_bits) << 32 | low_bits; + return HeartbeatInfo(TimeMs(created_at)); + } + + TimeMs created_at() const { return created_at_; } + + private: + const TimeMs created_at_; +}; + +HeartbeatHandler::HeartbeatHandler(absl::string_view log_prefix, + const DcSctpOptions& options, + Context* context, + TimerManager* timer_manager) + : log_prefix_(std::string(log_prefix) + "heartbeat: "), + ctx_(context), + timer_manager_(timer_manager), + interval_duration_(options.heartbeat_interval), + interval_duration_should_include_rtt_( + options.heartbeat_interval_include_rtt), + interval_timer_(timer_manager_->CreateTimer( + "heartbeat-interval", + [this]() { return OnIntervalTimerExpiry(); }, + TimerOptions(interval_duration_, TimerBackoffAlgorithm::kFixed))), + timeout_timer_(timer_manager_->CreateTimer( + "heartbeat-timeout", + [this]() { return OnTimeoutTimerExpiry(); }, + TimerOptions(options.rto_initial, + TimerBackoffAlgorithm::kExponential, + /*max_restarts=*/0))) { + // The interval timer must always be running as long as the association is up. + RestartTimer(); +} + +void HeartbeatHandler::RestartTimer() { + if (interval_duration_ == DurationMs(0)) { + // Heartbeating has been disabled. + return; + } + + if (interval_duration_should_include_rtt_) { + // The RTT should be used, but it's not easy accessible. The RTO will + // suffice. + interval_timer_->set_duration(interval_duration_ + ctx_->current_rto()); + } else { + interval_timer_->set_duration(interval_duration_); + } + + interval_timer_->Start(); +} + +void HeartbeatHandler::HandleHeartbeatRequest(HeartbeatRequestChunk chunk) { + // https://tools.ietf.org/html/rfc4960#section-8.3 + // "The receiver of the HEARTBEAT should immediately respond with a + // HEARTBEAT ACK that contains the Heartbeat Information TLV, together with + // any other received TLVs, copied unchanged from the received HEARTBEAT + // chunk." + ctx_->Send(ctx_->PacketBuilder().Add( + HeartbeatAckChunk(std::move(chunk).extract_parameters()))); +} + +void HeartbeatHandler::HandleHeartbeatAck(HeartbeatAckChunk chunk) { + timeout_timer_->Stop(); + absl::optional info_param = chunk.info(); + if (!info_param.has_value()) { + ctx_->callbacks().OnError( + ErrorKind::kParseFailed, + "Failed to parse HEARTBEAT-ACK; No Heartbeat Info parameter"); + return; + } + absl::optional info = + HeartbeatInfo::Deserialize(info_param->info()); + if (!info.has_value()) { + ctx_->callbacks().OnError(ErrorKind::kParseFailed, + "Failed to parse HEARTBEAT-ACK; Failed to " + "deserialized Heartbeat info parameter"); + return; + } + + DurationMs duration(*ctx_->callbacks().TimeMillis() - *info->created_at()); + + ctx_->ObserveRTT(duration); + + // https://tools.ietf.org/html/rfc4960#section-8.1 + // "The counter shall be reset each time ... a HEARTBEAT ACK is received from + // the peer endpoint." + ctx_->ClearTxErrorCounter(); +} + +absl::optional HeartbeatHandler::OnIntervalTimerExpiry() { + if (ctx_->is_connection_established()) { + HeartbeatInfo info(ctx_->callbacks().TimeMillis()); + timeout_timer_->set_duration(ctx_->current_rto()); + timeout_timer_->Start(); + RTC_DLOG(LS_INFO) << log_prefix_ << "Sending HEARTBEAT with timeout " + << *timeout_timer_->duration(); + + Parameters parameters = Parameters::Builder() + .Add(HeartbeatInfoParameter(info.Serialize())) + .Build(); + + ctx_->Send(ctx_->PacketBuilder().Add( + HeartbeatRequestChunk(std::move(parameters)))); + } else { + RTC_DLOG(LS_VERBOSE) + << log_prefix_ + << "Will not send HEARTBEAT when connection not established"; + } + return absl::nullopt; +} + +absl::optional HeartbeatHandler::OnTimeoutTimerExpiry() { + // Note that the timeout timer is not restarted. It will be started again when + // the interval timer expires. + RTC_DCHECK(!timeout_timer_->is_running()); + ctx_->IncrementTxErrorCounter("HEARTBEAT timeout"); + return absl::nullopt; +} +} // namespace dcsctp diff --git a/net/dcsctp/socket/heartbeat_handler.h b/net/dcsctp/socket/heartbeat_handler.h new file mode 100644 index 0000000000..14c3109534 --- /dev/null +++ b/net/dcsctp/socket/heartbeat_handler.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_HEARTBEAT_HANDLER_H_ +#define NET_DCSCTP_SOCKET_HEARTBEAT_HANDLER_H_ + +#include + +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/socket/context.h" +#include "net/dcsctp/timer/timer.h" + +namespace dcsctp { + +// HeartbeatHandler handles all logic around sending heartbeats and receiving +// the responses, as well as receiving incoming heartbeat requests. +// +// Heartbeats are sent on idle connections to ensure that the connection is +// still healthy and to measure the RTT. If a number of heartbeats time out, +// the connection will eventually be closed. +class HeartbeatHandler { + public: + HeartbeatHandler(absl::string_view log_prefix, + const DcSctpOptions& options, + Context* context, + TimerManager* timer_manager); + + // Called when the heartbeat interval timer should be restarted. This is + // generally done every time data is sent, which makes the timer expire when + // the connection is idle. + void RestartTimer(); + + // Called on received HeartbeatRequestChunk chunks. + void HandleHeartbeatRequest(HeartbeatRequestChunk chunk); + + // Called on received HeartbeatRequestChunk chunks. + void HandleHeartbeatAck(HeartbeatAckChunk chunk); + + private: + absl::optional OnIntervalTimerExpiry(); + absl::optional OnTimeoutTimerExpiry(); + + const std::string log_prefix_; + Context* ctx_; + TimerManager* timer_manager_; + // The time for a connection to be idle before a heartbeat is sent. + const DurationMs interval_duration_; + // Adding RTT to the duration will add some jitter, which is good in + // production, but less good in unit tests, which is why it can be disabled. + const bool interval_duration_should_include_rtt_; + const std::unique_ptr interval_timer_; + const std::unique_ptr timeout_timer_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_HEARTBEAT_HANDLER_H_ diff --git a/net/dcsctp/socket/heartbeat_handler_test.cc b/net/dcsctp/socket/heartbeat_handler_test.cc new file mode 100644 index 0000000000..2c5df9fd92 --- /dev/null +++ b/net/dcsctp/socket/heartbeat_handler_test.cc @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/heartbeat_handler.h" + +#include +#include +#include + +#include "net/dcsctp/packet/chunk/heartbeat_ack_chunk.h" +#include "net/dcsctp/packet/chunk/heartbeat_request_chunk.h" +#include "net/dcsctp/packet/parameter/heartbeat_info_parameter.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/socket/mock_context.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SizeIs; + +constexpr DurationMs kHeartbeatInterval = DurationMs(30'000); + +DcSctpOptions MakeOptions(DurationMs heartbeat_interval) { + DcSctpOptions options; + options.heartbeat_interval_include_rtt = false; + options.heartbeat_interval = heartbeat_interval; + return options; +} + +class HeartbeatHandlerTestBase : public testing::Test { + protected: + explicit HeartbeatHandlerTestBase(DurationMs heartbeat_interval) + : options_(MakeOptions(heartbeat_interval)), + context_(&callbacks_), + timer_manager_([this]() { return callbacks_.CreateTimeout(); }), + handler_("log: ", options_, &context_, &timer_manager_) {} + + void AdvanceTime(DurationMs duration) { + callbacks_.AdvanceTime(duration); + for (;;) { + absl::optional timeout_id = callbacks_.GetNextExpiredTimeout(); + if (!timeout_id.has_value()) { + break; + } + timer_manager_.HandleTimeout(*timeout_id); + } + } + + const DcSctpOptions options_; + NiceMock callbacks_; + NiceMock context_; + TimerManager timer_manager_; + HeartbeatHandler handler_; +}; + +class HeartbeatHandlerTest : public HeartbeatHandlerTestBase { + protected: + HeartbeatHandlerTest() : HeartbeatHandlerTestBase(kHeartbeatInterval) {} +}; + +class DisabledHeartbeatHandlerTest : public HeartbeatHandlerTestBase { + protected: + DisabledHeartbeatHandlerTest() : HeartbeatHandlerTestBase(DurationMs(0)) {} +}; + +TEST_F(HeartbeatHandlerTest, HasRunningHeartbeatIntervalTimer) { + AdvanceTime(options_.heartbeat_interval); + + // Validate that a heartbeat request was sent. + std::vector payload = callbacks_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(payload)); + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatRequestChunk request, + HeartbeatRequestChunk::Parse(packet.descriptors()[0].data)); + + EXPECT_TRUE(request.info().has_value()); +} + +TEST_F(HeartbeatHandlerTest, RepliesToHeartbeatRequests) { + uint8_t info_data[] = {1, 2, 3, 4, 5}; + HeartbeatRequestChunk request( + Parameters::Builder().Add(HeartbeatInfoParameter(info_data)).Build()); + + handler_.HandleHeartbeatRequest(std::move(request)); + + std::vector payload = callbacks_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(payload)); + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatAckChunk response, + HeartbeatAckChunk::Parse(packet.descriptors()[0].data)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatInfoParameter param, + response.parameters().get()); + + EXPECT_THAT(param.info(), ElementsAre(1, 2, 3, 4, 5)); +} + +TEST_F(HeartbeatHandlerTest, SendsHeartbeatRequestsOnIdleChannel) { + AdvanceTime(options_.heartbeat_interval); + + // Grab the request, and make a response. + std::vector payload = callbacks_.ConsumeSentPacket(); + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(payload)); + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + HeartbeatRequestChunk req, + HeartbeatRequestChunk::Parse(packet.descriptors()[0].data)); + + HeartbeatAckChunk ack(std::move(req).extract_parameters()); + + // Respond a while later. This RTT will be measured by the handler + constexpr DurationMs rtt(313); + + EXPECT_CALL(context_, ObserveRTT(rtt)).Times(1); + + callbacks_.AdvanceTime(rtt); + handler_.HandleHeartbeatAck(std::move(ack)); +} + +TEST_F(HeartbeatHandlerTest, IncreasesErrorIfNotAckedInTime) { + DurationMs rto(105); + EXPECT_CALL(context_, current_rto).WillOnce(Return(rto)); + AdvanceTime(options_.heartbeat_interval); + + // Validate that a request was sent. + EXPECT_THAT(callbacks_.ConsumeSentPacket(), Not(IsEmpty())); + + EXPECT_CALL(context_, IncrementTxErrorCounter).Times(1); + AdvanceTime(rto); +} + +TEST_F(DisabledHeartbeatHandlerTest, IsReallyDisabled) { + AdvanceTime(options_.heartbeat_interval); + + // Validate that a request was NOT sent. + EXPECT_THAT(callbacks_.ConsumeSentPacket(), IsEmpty()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/socket/mock_context.h b/net/dcsctp/socket/mock_context.h new file mode 100644 index 0000000000..d86b99a20d --- /dev/null +++ b/net/dcsctp/socket/mock_context.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_MOCK_CONTEXT_H_ +#define NET_DCSCTP_SOCKET_MOCK_CONTEXT_H_ + +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/socket/context.h" +#include "net/dcsctp/socket/mock_dcsctp_socket_callbacks.h" +#include "test/gmock.h" + +namespace dcsctp { + +class MockContext : public Context { + public: + static constexpr TSN MyInitialTsn() { return TSN(990); } + static constexpr TSN PeerInitialTsn() { return TSN(10); } + static constexpr VerificationTag PeerVerificationTag() { + return VerificationTag(0x01234567); + } + + explicit MockContext(MockDcSctpSocketCallbacks* callbacks) + : callbacks_(*callbacks) { + ON_CALL(*this, is_connection_established) + .WillByDefault(testing::Return(true)); + ON_CALL(*this, my_initial_tsn) + .WillByDefault(testing::Return(MyInitialTsn())); + ON_CALL(*this, peer_initial_tsn) + .WillByDefault(testing::Return(PeerInitialTsn())); + ON_CALL(*this, callbacks).WillByDefault(testing::ReturnRef(callbacks_)); + ON_CALL(*this, current_rto).WillByDefault(testing::Return(DurationMs(123))); + ON_CALL(*this, Send).WillByDefault([this](SctpPacket::Builder& builder) { + callbacks_.SendPacket(builder.Build()); + }); + } + + MOCK_METHOD(bool, is_connection_established, (), (const, override)); + MOCK_METHOD(TSN, my_initial_tsn, (), (const, override)); + MOCK_METHOD(TSN, peer_initial_tsn, (), (const, override)); + MOCK_METHOD(DcSctpSocketCallbacks&, callbacks, (), (const, override)); + + MOCK_METHOD(void, ObserveRTT, (DurationMs rtt_ms), (override)); + MOCK_METHOD(DurationMs, current_rto, (), (const, override)); + MOCK_METHOD(bool, + IncrementTxErrorCounter, + (absl::string_view reason), + (override)); + MOCK_METHOD(void, ClearTxErrorCounter, (), (override)); + MOCK_METHOD(bool, HasTooManyTxErrors, (), (const, override)); + SctpPacket::Builder PacketBuilder() const override { + return SctpPacket::Builder(PeerVerificationTag(), options_); + } + MOCK_METHOD(void, Send, (SctpPacket::Builder & builder), (override)); + + DcSctpOptions options_; + MockDcSctpSocketCallbacks& callbacks_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_MOCK_CONTEXT_H_ diff --git a/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h b/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h new file mode 100644 index 0000000000..bcf1bde5b8 --- /dev/null +++ b/net/dcsctp/socket/mock_dcsctp_socket_callbacks.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_MOCK_DCSCTP_SOCKET_CALLBACKS_H_ +#define NET_DCSCTP_SOCKET_MOCK_DCSCTP_SOCKET_CALLBACKS_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/timeout.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/timer/fake_timeout.h" +#include "rtc_base/logging.h" +#include "rtc_base/random.h" +#include "test/gmock.h" + +namespace dcsctp { + +namespace internal { +// It can be argued if a mocked random number generator should be deterministic +// or if it should be have as a "real" random number generator. In this +// implementation, each instantiation of `MockDcSctpSocketCallbacks` will have +// their `GetRandomInt` return different sequences, but each instantiation will +// always generate the same sequence of random numbers. This to make it easier +// to compare logs from tests, but still to let e.g. two different sockets (used +// in the same test) get different random numbers, so that they don't start e.g. +// on the same sequence number. While that isn't an issue in the protocol, it +// just makes debugging harder as the two sockets would look exactly the same. +// +// In a real implementation of `DcSctpSocketCallbacks` the random number +// generator backing `GetRandomInt` should be seeded externally and correctly. +inline int GetUniqueSeed() { + static int seed = 0; + return ++seed; +} +} // namespace internal + +class MockDcSctpSocketCallbacks : public DcSctpSocketCallbacks { + public: + explicit MockDcSctpSocketCallbacks(absl::string_view name = "") + : log_prefix_(name.empty() ? "" : std::string(name) + ": "), + random_(internal::GetUniqueSeed()), + timeout_manager_([this]() { return now_; }) { + ON_CALL(*this, SendPacket) + .WillByDefault([this](rtc::ArrayView data) { + sent_packets_.emplace_back( + std::vector(data.begin(), data.end())); + }); + ON_CALL(*this, OnMessageReceived) + .WillByDefault([this](DcSctpMessage message) { + received_messages_.emplace_back(std::move(message)); + }); + + ON_CALL(*this, OnError) + .WillByDefault([this](ErrorKind error, absl::string_view message) { + RTC_LOG(LS_WARNING) + << log_prefix_ << "Socket error: " << ToString(error) << "; " + << message; + }); + ON_CALL(*this, OnAborted) + .WillByDefault([this](ErrorKind error, absl::string_view message) { + RTC_LOG(LS_WARNING) + << log_prefix_ << "Socket abort: " << ToString(error) << "; " + << message; + }); + ON_CALL(*this, TimeMillis).WillByDefault([this]() { return now_; }); + } + MOCK_METHOD(void, + SendPacket, + (rtc::ArrayView data), + (override)); + + std::unique_ptr CreateTimeout() override { + return timeout_manager_.CreateTimeout(); + } + + MOCK_METHOD(TimeMs, TimeMillis, (), (override)); + uint32_t GetRandomInt(uint32_t low, uint32_t high) override { + return random_.Rand(low, high); + } + + MOCK_METHOD(void, OnMessageReceived, (DcSctpMessage message), (override)); + MOCK_METHOD(void, + OnError, + (ErrorKind error, absl::string_view message), + (override)); + MOCK_METHOD(void, + OnAborted, + (ErrorKind error, absl::string_view message), + (override)); + MOCK_METHOD(void, OnConnected, (), (override)); + MOCK_METHOD(void, OnClosed, (), (override)); + MOCK_METHOD(void, OnConnectionRestarted, (), (override)); + MOCK_METHOD(void, + OnStreamsResetFailed, + (rtc::ArrayView outgoing_streams, + absl::string_view reason), + (override)); + MOCK_METHOD(void, + OnStreamsResetPerformed, + (rtc::ArrayView outgoing_streams), + (override)); + MOCK_METHOD(void, + OnIncomingStreamsReset, + (rtc::ArrayView incoming_streams), + (override)); + MOCK_METHOD(void, OnBufferedAmountLow, (StreamID stream_id), (override)); + MOCK_METHOD(void, OnTotalBufferedAmountLow, (), (override)); + + bool HasPacket() const { return !sent_packets_.empty(); } + + std::vector ConsumeSentPacket() { + if (sent_packets_.empty()) { + return {}; + } + std::vector ret = std::move(sent_packets_.front()); + sent_packets_.pop_front(); + return ret; + } + absl::optional ConsumeReceivedMessage() { + if (received_messages_.empty()) { + return absl::nullopt; + } + DcSctpMessage ret = std::move(received_messages_.front()); + received_messages_.pop_front(); + return ret; + } + + void AdvanceTime(DurationMs duration_ms) { now_ = now_ + duration_ms; } + void SetTime(TimeMs now) { now_ = now; } + + absl::optional GetNextExpiredTimeout() { + return timeout_manager_.GetNextExpiredTimeout(); + } + + private: + const std::string log_prefix_; + TimeMs now_ = TimeMs(0); + webrtc::Random random_; + FakeTimeoutManager timeout_manager_; + std::deque> sent_packets_; + std::deque received_messages_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_MOCK_DCSCTP_SOCKET_CALLBACKS_H_ diff --git a/net/dcsctp/socket/state_cookie.cc b/net/dcsctp/socket/state_cookie.cc new file mode 100644 index 0000000000..7d04cbb0d7 --- /dev/null +++ b/net/dcsctp/socket/state_cookie.cc @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/state_cookie.h" + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/bounded_byte_reader.h" +#include "net/dcsctp/packet/bounded_byte_writer.h" +#include "net/dcsctp/socket/capabilities.h" +#include "rtc_base/logging.h" + +namespace dcsctp { + +// Magic values, which the state cookie is prefixed with. +constexpr uint32_t kMagic1 = 1684230979; +constexpr uint32_t kMagic2 = 1414541360; +constexpr size_t StateCookie::kCookieSize; + +std::vector StateCookie::Serialize() { + std::vector cookie; + cookie.resize(kCookieSize); + BoundedByteWriter buffer(cookie); + buffer.Store32<0>(kMagic1); + buffer.Store32<4>(kMagic2); + buffer.Store32<8>(*initiate_tag_); + buffer.Store32<12>(*initial_tsn_); + buffer.Store32<16>(a_rwnd_); + buffer.Store32<20>(static_cast(*tie_tag_ >> 32)); + buffer.Store32<24>(static_cast(*tie_tag_)); + buffer.Store8<28>(capabilities_.partial_reliability); + buffer.Store8<29>(capabilities_.message_interleaving); + buffer.Store8<30>(capabilities_.reconfig); + return cookie; +} + +absl::optional StateCookie::Deserialize( + rtc::ArrayView cookie) { + if (cookie.size() != kCookieSize) { + RTC_DLOG(LS_WARNING) << "Invalid state cookie: " << cookie.size() + << " bytes"; + return absl::nullopt; + } + + BoundedByteReader buffer(cookie); + uint32_t magic1 = buffer.Load32<0>(); + uint32_t magic2 = buffer.Load32<4>(); + if (magic1 != kMagic1 || magic2 != kMagic2) { + RTC_DLOG(LS_WARNING) << "Invalid state cookie; wrong magic"; + return absl::nullopt; + } + + VerificationTag verification_tag(buffer.Load32<8>()); + TSN initial_tsn(buffer.Load32<12>()); + uint32_t a_rwnd = buffer.Load32<16>(); + uint32_t tie_tag_upper = buffer.Load32<20>(); + uint32_t tie_tag_lower = buffer.Load32<24>(); + TieTag tie_tag(static_cast(tie_tag_upper) << 32 | + static_cast(tie_tag_lower)); + Capabilities capabilities; + capabilities.partial_reliability = buffer.Load8<28>() != 0; + capabilities.message_interleaving = buffer.Load8<29>() != 0; + capabilities.reconfig = buffer.Load8<30>() != 0; + + return StateCookie(verification_tag, initial_tsn, a_rwnd, tie_tag, + capabilities); +} + +} // namespace dcsctp diff --git a/net/dcsctp/socket/state_cookie.h b/net/dcsctp/socket/state_cookie.h new file mode 100644 index 0000000000..df4b801397 --- /dev/null +++ b/net/dcsctp/socket/state_cookie.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_STATE_COOKIE_H_ +#define NET_DCSCTP_SOCKET_STATE_COOKIE_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/socket/capabilities.h" + +namespace dcsctp { + +// This is serialized as a state cookie and put in INIT_ACK. The client then +// responds with this in COOKIE_ECHO. +// +// NOTE: Expect that the client will modify it to try to exploit the library. +// Do not trust anything in it; no pointers or anything like that. +class StateCookie { + public: + static constexpr size_t kCookieSize = 31; + + StateCookie(VerificationTag initiate_tag, + TSN initial_tsn, + uint32_t a_rwnd, + TieTag tie_tag, + Capabilities capabilities) + : initiate_tag_(initiate_tag), + initial_tsn_(initial_tsn), + a_rwnd_(a_rwnd), + tie_tag_(tie_tag), + capabilities_(capabilities) {} + + // Returns a serialized version of this cookie. + std::vector Serialize(); + + // Deserializes the cookie, and returns absl::nullopt if that failed. + static absl::optional Deserialize( + rtc::ArrayView cookie); + + VerificationTag initiate_tag() const { return initiate_tag_; } + TSN initial_tsn() const { return initial_tsn_; } + uint32_t a_rwnd() const { return a_rwnd_; } + TieTag tie_tag() const { return tie_tag_; } + const Capabilities& capabilities() const { return capabilities_; } + + private: + const VerificationTag initiate_tag_; + const TSN initial_tsn_; + const uint32_t a_rwnd_; + const TieTag tie_tag_; + const Capabilities capabilities_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_STATE_COOKIE_H_ diff --git a/net/dcsctp/socket/state_cookie_test.cc b/net/dcsctp/socket/state_cookie_test.cc new file mode 100644 index 0000000000..eab41a7a56 --- /dev/null +++ b/net/dcsctp/socket/state_cookie_test.cc @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/state_cookie.h" + +#include "net/dcsctp/testing/testing_macros.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::SizeIs; + +TEST(StateCookieTest, SerializeAndDeserialize) { + Capabilities capabilities = {/*partial_reliability=*/true, + /*message_interleaving=*/false, + /*reconfig=*/true}; + StateCookie cookie(VerificationTag(123), TSN(456), + /*a_rwnd=*/789, TieTag(101112), capabilities); + std::vector serialized = cookie.Serialize(); + EXPECT_THAT(serialized, SizeIs(StateCookie::kCookieSize)); + ASSERT_HAS_VALUE_AND_ASSIGN(StateCookie deserialized, + StateCookie::Deserialize(serialized)); + EXPECT_EQ(deserialized.initiate_tag(), VerificationTag(123)); + EXPECT_EQ(deserialized.initial_tsn(), TSN(456)); + EXPECT_EQ(deserialized.a_rwnd(), 789u); + EXPECT_EQ(deserialized.tie_tag(), TieTag(101112)); + EXPECT_TRUE(deserialized.capabilities().partial_reliability); + EXPECT_FALSE(deserialized.capabilities().message_interleaving); + EXPECT_TRUE(deserialized.capabilities().reconfig); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/socket/stream_reset_handler.cc b/net/dcsctp/socket/stream_reset_handler.cc new file mode 100644 index 0000000000..a1f57e6b2b --- /dev/null +++ b/net/dcsctp/socket/stream_reset_handler.cc @@ -0,0 +1,347 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/stream_reset_handler.h" + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/common/str_join.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/parameter/add_incoming_streams_request_parameter.h" +#include "net/dcsctp/packet/parameter/add_outgoing_streams_request_parameter.h" +#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" +#include "net/dcsctp/packet/parameter/ssn_tsn_reset_request_parameter.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/packet/tlv_trait.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/context.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_queue.h" +#include "rtc_base/logging.h" + +namespace dcsctp { +namespace { +using ResponseResult = ReconfigurationResponseParameter::Result; + +bool DescriptorsAre(const std::vector& c, + uint16_t e1, + uint16_t e2) { + return (c[0].type == e1 && c[1].type == e2) || + (c[0].type == e2 && c[1].type == e1); +} + +} // namespace + +bool StreamResetHandler::Validate(const ReConfigChunk& chunk) { + const Parameters& parameters = chunk.parameters(); + + // https://tools.ietf.org/html/rfc6525#section-3.1 + // "Note that each RE-CONFIG chunk holds at least one parameter + // and at most two parameters. Only the following combinations are allowed:" + std::vector descriptors = parameters.descriptors(); + if (descriptors.size() == 1) { + if ((descriptors[0].type == OutgoingSSNResetRequestParameter::kType) || + (descriptors[0].type == IncomingSSNResetRequestParameter::kType) || + (descriptors[0].type == SSNTSNResetRequestParameter::kType) || + (descriptors[0].type == AddOutgoingStreamsRequestParameter::kType) || + (descriptors[0].type == AddIncomingStreamsRequestParameter::kType) || + (descriptors[0].type == ReconfigurationResponseParameter::kType)) { + return true; + } + } else if (descriptors.size() == 2) { + if (DescriptorsAre(descriptors, OutgoingSSNResetRequestParameter::kType, + IncomingSSNResetRequestParameter::kType) || + DescriptorsAre(descriptors, AddOutgoingStreamsRequestParameter::kType, + AddIncomingStreamsRequestParameter::kType) || + DescriptorsAre(descriptors, ReconfigurationResponseParameter::kType, + OutgoingSSNResetRequestParameter::kType) || + DescriptorsAre(descriptors, ReconfigurationResponseParameter::kType, + ReconfigurationResponseParameter::kType)) { + return true; + } + } + + RTC_LOG(LS_WARNING) << "Invalid set of RE-CONFIG parameters"; + return false; +} + +absl::optional> +StreamResetHandler::Process(const ReConfigChunk& chunk) { + if (!Validate(chunk)) { + return absl::nullopt; + } + + std::vector responses; + + for (const ParameterDescriptor& desc : chunk.parameters().descriptors()) { + switch (desc.type) { + case OutgoingSSNResetRequestParameter::kType: + HandleResetOutgoing(desc, responses); + break; + + case IncomingSSNResetRequestParameter::kType: + HandleResetIncoming(desc, responses); + break; + + case ReconfigurationResponseParameter::kType: + HandleResponse(desc); + break; + } + } + + return responses; +} + +void StreamResetHandler::HandleReConfig(ReConfigChunk chunk) { + absl::optional> responses = + Process(chunk); + + if (!responses.has_value()) { + ctx_->callbacks().OnError(ErrorKind::kParseFailed, + "Failed to parse RE-CONFIG command"); + return; + } + + if (!responses->empty()) { + SctpPacket::Builder b = ctx_->PacketBuilder(); + Parameters::Builder params_builder; + for (const auto& response : *responses) { + params_builder.Add(response); + } + b.Add(ReConfigChunk(params_builder.Build())); + ctx_->Send(b); + } +} + +bool StreamResetHandler::ValidateReqSeqNbr( + ReconfigRequestSN req_seq_nbr, + std::vector& responses) { + if (req_seq_nbr == last_processed_req_seq_nbr_) { + // This has already been performed previously. + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "req=" << *req_seq_nbr + << " already processed"; + responses.push_back(ReconfigurationResponseParameter( + req_seq_nbr, ResponseResult::kSuccessNothingToDo)); + return false; + } + + if (req_seq_nbr != ReconfigRequestSN(*last_processed_req_seq_nbr_ + 1)) { + // Too old, too new, from wrong association etc. + // This is expected to happen when handing over a RTCPeerConnection from one + // server to another. The client will notice this and may decide to close + // old data channels, which may be sent to the wrong (or both) servers + // during a handover. + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "req=" << *req_seq_nbr + << " bad seq_nbr"; + responses.push_back(ReconfigurationResponseParameter( + req_seq_nbr, ResponseResult::kErrorBadSequenceNumber)); + return false; + } + + return true; +} + +void StreamResetHandler::HandleResetOutgoing( + const ParameterDescriptor& descriptor, + std::vector& responses) { + absl::optional req = + OutgoingSSNResetRequestParameter::Parse(descriptor.data); + if (!req.has_value()) { + ctx_->callbacks().OnError(ErrorKind::kParseFailed, + "Failed to parse Outgoing Reset command"); + return; + } + + if (ValidateReqSeqNbr(req->request_sequence_number(), responses)) { + ResponseResult result; + + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "Reset outgoing streams with req_seq_nbr=" + << *req->request_sequence_number(); + + result = reassembly_queue_->ResetStreams( + *req, data_tracker_->last_cumulative_acked_tsn()); + if (result == ResponseResult::kSuccessPerformed) { + last_processed_req_seq_nbr_ = req->request_sequence_number(); + ctx_->callbacks().OnIncomingStreamsReset(req->stream_ids()); + } + responses.push_back(ReconfigurationResponseParameter( + req->request_sequence_number(), result)); + } +} + +void StreamResetHandler::HandleResetIncoming( + const ParameterDescriptor& descriptor, + std::vector& responses) { + absl::optional req = + IncomingSSNResetRequestParameter::Parse(descriptor.data); + if (!req.has_value()) { + ctx_->callbacks().OnError(ErrorKind::kParseFailed, + "Failed to parse Incoming Reset command"); + return; + } + if (ValidateReqSeqNbr(req->request_sequence_number(), responses)) { + responses.push_back(ReconfigurationResponseParameter( + req->request_sequence_number(), ResponseResult::kSuccessNothingToDo)); + last_processed_req_seq_nbr_ = req->request_sequence_number(); + } +} + +void StreamResetHandler::HandleResponse(const ParameterDescriptor& descriptor) { + absl::optional resp = + ReconfigurationResponseParameter::Parse(descriptor.data); + if (!resp.has_value()) { + ctx_->callbacks().OnError( + ErrorKind::kParseFailed, + "Failed to parse Reconfiguration Response command"); + return; + } + + if (current_request_.has_value() && current_request_->has_been_sent() && + resp->response_sequence_number() == current_request_->req_seq_nbr()) { + reconfig_timer_->Stop(); + + switch (resp->result()) { + case ResponseResult::kSuccessNothingToDo: + case ResponseResult::kSuccessPerformed: + RTC_DLOG(LS_VERBOSE) + << log_prefix_ << "Reset stream success, req_seq_nbr=" + << *current_request_->req_seq_nbr() << ", streams=" + << StrJoin(current_request_->streams(), ",", + [](rtc::StringBuilder& sb, StreamID stream_id) { + sb << *stream_id; + }); + ctx_->callbacks().OnStreamsResetPerformed(current_request_->streams()); + current_request_ = absl::nullopt; + retransmission_queue_->CommitResetStreams(); + break; + case ResponseResult::kInProgress: + RTC_DLOG(LS_VERBOSE) + << log_prefix_ << "Reset stream still pending, req_seq_nbr=" + << *current_request_->req_seq_nbr() << ", streams=" + << StrJoin(current_request_->streams(), ",", + [](rtc::StringBuilder& sb, StreamID stream_id) { + sb << *stream_id; + }); + // Force this request to be sent again, but with new req_seq_nbr. + current_request_->PrepareRetransmission(); + reconfig_timer_->set_duration(ctx_->current_rto()); + reconfig_timer_->Start(); + break; + case ResponseResult::kErrorRequestAlreadyInProgress: + case ResponseResult::kDenied: + case ResponseResult::kErrorWrongSSN: + case ResponseResult::kErrorBadSequenceNumber: + RTC_DLOG(LS_WARNING) + << log_prefix_ << "Reset stream error=" << ToString(resp->result()) + << ", req_seq_nbr=" << *current_request_->req_seq_nbr() + << ", streams=" + << StrJoin(current_request_->streams(), ",", + [](rtc::StringBuilder& sb, StreamID stream_id) { + sb << *stream_id; + }); + ctx_->callbacks().OnStreamsResetFailed(current_request_->streams(), + ToString(resp->result())); + current_request_ = absl::nullopt; + retransmission_queue_->RollbackResetStreams(); + break; + } + } +} + +absl::optional StreamResetHandler::MakeStreamResetRequest() { + // Only send stream resets if there are streams to reset, and no current + // ongoing request (there can only be one at a time), and if the stream + // can be reset. + if (streams_to_reset_.empty() || current_request_.has_value() || + !retransmission_queue_->CanResetStreams()) { + return absl::nullopt; + } + + std::vector streams_to_reset(streams_to_reset_.begin(), + streams_to_reset_.end()); + current_request_.emplace(TSN(*retransmission_queue_->next_tsn() - 1), + std::move(streams_to_reset)); + streams_to_reset_.clear(); + reconfig_timer_->set_duration(ctx_->current_rto()); + reconfig_timer_->Start(); + return MakeReconfigChunk(); +} + +ReConfigChunk StreamResetHandler::MakeReconfigChunk() { + // The req_seq_nbr will be empty if the request has never been sent before, + // or if it was sent, but the sender responded "in progress", and then the + // req_seq_nbr will be cleared to re-send with a new number. But if the + // request is re-sent due to timeout (reconfig-timer expiring), the same + // req_seq_nbr will be used. + RTC_DCHECK(current_request_.has_value()); + + if (!current_request_->has_been_sent()) { + current_request_->PrepareToSend(next_outgoing_req_seq_nbr_); + next_outgoing_req_seq_nbr_ = + ReconfigRequestSN(*next_outgoing_req_seq_nbr_ + 1); + } + + Parameters::Builder params_builder = + Parameters::Builder().Add(OutgoingSSNResetRequestParameter( + current_request_->req_seq_nbr(), current_request_->req_seq_nbr(), + current_request_->sender_last_assigned_tsn(), + current_request_->streams())); + + return ReConfigChunk(params_builder.Build()); +} + +void StreamResetHandler::ResetStreams( + rtc::ArrayView outgoing_streams) { + // Enqueue streams to be reset - as this may be called multiple times + // while a request is already in progress (and there can only be one). + for (StreamID stream_id : outgoing_streams) { + streams_to_reset_.insert(stream_id); + } + if (current_request_.has_value()) { + // Already an ongoing request - will need to wait for it to finish as + // there can only be one in-flight ReConfig chunk with requests at any + // time. + } else { + retransmission_queue_->PrepareResetStreams(std::vector( + streams_to_reset_.begin(), streams_to_reset_.end())); + } +} + +absl::optional StreamResetHandler::OnReconfigTimerExpiry() { + if (current_request_->has_been_sent()) { + // There is an outstanding request, which timed out while waiting for a + // response. + if (!ctx_->IncrementTxErrorCounter("RECONFIG timeout")) { + // Timed out. The connection will close after processing the timers. + return absl::nullopt; + } + } else { + // There is no outstanding request, but there is a prepared one. This means + // that the receiver has previously responded "in progress", which resulted + // in retrying the request (but with a new req_seq_nbr) after a while. + } + + ctx_->Send(ctx_->PacketBuilder().Add(MakeReconfigChunk())); + return ctx_->current_rto(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/socket/stream_reset_handler.h b/net/dcsctp/socket/stream_reset_handler.h new file mode 100644 index 0000000000..dc0ee5e8cc --- /dev/null +++ b/net/dcsctp/socket/stream_reset_handler.h @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_STREAM_RESET_HANDLER_H_ +#define NET_DCSCTP_SOCKET_STREAM_RESET_HANDLER_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/context.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_queue.h" + +namespace dcsctp { + +// StreamResetHandler handles sending outgoing stream reset requests (to close +// an SCTP stream, which translates to closing a data channel). +// +// It also handles incoming "outgoing stream reset requests", when the peer +// wants to close its data channel. +// +// Resetting streams is an asynchronous operation where the client will request +// a request a stream to be reset, but then it might not be performed exactly at +// this point. First, the sender might need to discard all messages that have +// been enqueued for this stream, or it may select to wait until all have been +// sent. At least, it must wait for the currently sending fragmented message to +// be fully sent, because a stream can't be reset while having received half a +// message. In the stream reset request, the "sender's last assigned TSN" is +// provided, which is simply the TSN for which the receiver should've received +// all messages before this value, before the stream can be reset. Since +// fragments can get lost or sent out-of-order, the receiver of a request may +// not have received all the data just yet, and then it will respond to the +// sender: "In progress". In other words, try again. The sender will then need +// to start a timer and try the very same request again (but with a new sequence +// number) until the receiver successfully performs the operation. +// +// All this can take some time, and may be driven by timers, so the client will +// ultimately be notified using callbacks. +// +// In this implementation, when a stream is reset, the queued but not-yet-sent +// messages will be discarded, but that may change in the future. RFC8831 allows +// both behaviors. +class StreamResetHandler { + public: + StreamResetHandler(absl::string_view log_prefix, + Context* context, + TimerManager* timer_manager, + DataTracker* data_tracker, + ReassemblyQueue* reassembly_queue, + RetransmissionQueue* retransmission_queue) + : log_prefix_(std::string(log_prefix) + "reset: "), + ctx_(context), + data_tracker_(data_tracker), + reassembly_queue_(reassembly_queue), + retransmission_queue_(retransmission_queue), + reconfig_timer_(timer_manager->CreateTimer( + "re-config", + [this]() { return OnReconfigTimerExpiry(); }, + TimerOptions(DurationMs(0)))), + next_outgoing_req_seq_nbr_(ReconfigRequestSN(*ctx_->my_initial_tsn())), + last_processed_req_seq_nbr_( + ReconfigRequestSN(*ctx_->peer_initial_tsn() - 1)) {} + + // Initiates reset of the provided streams. While there can only be one + // ongoing stream reset request at any time, this method can be called at any + // time and also multiple times. It will enqueue requests that can't be + // directly fulfilled, and will asynchronously process them when any ongoing + // request has completed. + void ResetStreams(rtc::ArrayView outgoing_streams); + + // Creates a Reset Streams request that must be sent if returned. Will start + // the reconfig timer. Will return absl::nullopt if there is no need to + // create a request (no streams to reset) or if there already is an ongoing + // stream reset request that hasn't completed yet. + absl::optional MakeStreamResetRequest(); + + // Called when handling and incoming RE-CONFIG chunk. + void HandleReConfig(ReConfigChunk chunk); + + private: + // Represents a stream request operation. There can only be one ongoing at + // any time, and a sent request may either succeed, fail or result in the + // receiver signaling that it can't process it right now, and then it will be + // retried. + class CurrentRequest { + public: + CurrentRequest(TSN sender_last_assigned_tsn, std::vector streams) + : req_seq_nbr_(absl::nullopt), + sender_last_assigned_tsn_(sender_last_assigned_tsn), + streams_(std::move(streams)) {} + + // Returns the current request sequence number, if this request has been + // sent (check `has_been_sent` first). Will return 0 if the request is just + // prepared (or scheduled for retransmission) but not yet sent. + ReconfigRequestSN req_seq_nbr() const { + return req_seq_nbr_.value_or(ReconfigRequestSN(0)); + } + + // The sender's last assigned TSN, from the retransmission queue. The + // receiver uses this to know when all data up to this TSN has been + // received, to know when to safely reset the stream. + TSN sender_last_assigned_tsn() const { return sender_last_assigned_tsn_; } + + // The streams that are to be reset. + const std::vector& streams() const { return streams_; } + + // If this request has been sent yet. If not, then it's either because it + // has only been prepared and not yet sent, or because the received couldn't + // apply the request, and then the exact same request will be retried, but + // with a new sequence number. + bool has_been_sent() const { return req_seq_nbr_.has_value(); } + + // If the receiver can't apply the request yet (and answered "In Progress"), + // this will be called to prepare the request to be retransmitted at a later + // time. + void PrepareRetransmission() { req_seq_nbr_ = absl::nullopt; } + + // If the request hasn't been sent yet, this assigns it a request number. + void PrepareToSend(ReconfigRequestSN new_req_seq_nbr) { + req_seq_nbr_ = new_req_seq_nbr; + } + + private: + // If this is set, this request has been sent. If it's not set, the request + // has been prepared, but has not yet been sent. This is typically used when + // the peer responded "in progress" and the same request (but a different + // request number) must be sent again. + absl::optional req_seq_nbr_; + // The sender's (that's us) last assigned TSN, from the retransmission + // queue. + TSN sender_last_assigned_tsn_; + // The streams that are to be reset in this request. + const std::vector streams_; + }; + + // Called to validate an incoming RE-CONFIG chunk. + bool Validate(const ReConfigChunk& chunk); + + // Processes a stream stream reconfiguration chunk and may either return + // absl::nullopt (on protocol errors), or a list of responses - either 0, 1 + // or 2. + absl::optional> Process( + const ReConfigChunk& chunk); + + // Creates the actual RE-CONFIG chunk. A request (which set `current_request`) + // must have been created prior. + ReConfigChunk MakeReconfigChunk(); + + // Called to validate the `req_seq_nbr`, that it's the next in sequence. If it + // fails to validate, and returns false, it will also add a response to + // `responses`. + bool ValidateReqSeqNbr( + ReconfigRequestSN req_seq_nbr, + std::vector& responses); + + // Called when this socket receives an outgoing stream reset request. It might + // either be performed straight away, or have to be deferred, and the result + // of that will be put in `responses`. + void HandleResetOutgoing( + const ParameterDescriptor& descriptor, + std::vector& responses); + + // Called when this socket receives an incoming stream reset request. This + // isn't really supported, but a successful response is put in `responses`. + void HandleResetIncoming( + const ParameterDescriptor& descriptor, + std::vector& responses); + + // Called when receiving a response to an outgoing stream reset request. It + // will either commit the stream resetting, if the operation was successful, + // or will schedule a retry if it was deferred. And if it failed, the + // operation will be rolled back. + void HandleResponse(const ParameterDescriptor& descriptor); + + // Expiration handler for the Reconfig timer. + absl::optional OnReconfigTimerExpiry(); + + const std::string log_prefix_; + Context* ctx_; + DataTracker* data_tracker_; + ReassemblyQueue* reassembly_queue_; + RetransmissionQueue* retransmission_queue_; + const std::unique_ptr reconfig_timer_; + + // Outgoing streams that have been requested to be reset, but hasn't yet + // been included in an outgoing request. + std::unordered_set streams_to_reset_; + + // The next sequence number for outgoing stream requests. + ReconfigRequestSN next_outgoing_req_seq_nbr_; + + // The current stream request operation. + absl::optional current_request_; + + // For incoming requests - last processed request sequence number. + ReconfigRequestSN last_processed_req_seq_nbr_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_STREAM_RESET_HANDLER_H_ diff --git a/net/dcsctp/socket/stream_reset_handler_test.cc b/net/dcsctp/socket/stream_reset_handler_test.cc new file mode 100644 index 0000000000..a8e96fbf20 --- /dev/null +++ b/net/dcsctp/socket/stream_reset_handler_test.cc @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/stream_reset_handler.h" + +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/parameter/incoming_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/outgoing_ssn_reset_request_parameter.h" +#include "net/dcsctp/packet/parameter/parameter.h" +#include "net/dcsctp/packet/parameter/reconfiguration_response_parameter.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/mock_context.h" +#include "net/dcsctp/socket/mock_dcsctp_socket_callbacks.h" +#include "net/dcsctp/testing/data_generator.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/mock_send_queue.h" +#include "net/dcsctp/tx/retransmission_queue.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::_; +using ::testing::IsEmpty; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; +using ResponseResult = ReconfigurationResponseParameter::Result; + +constexpr TSN kMyInitialTsn = MockContext::MyInitialTsn(); +constexpr ReconfigRequestSN kMyInitialReqSn = ReconfigRequestSN(*kMyInitialTsn); +constexpr TSN kPeerInitialTsn = MockContext::PeerInitialTsn(); +constexpr ReconfigRequestSN kPeerInitialReqSn = + ReconfigRequestSN(*kPeerInitialTsn); +constexpr uint32_t kArwnd = 131072; +constexpr DurationMs kRto = DurationMs(250); + +constexpr std::array kShortPayload = {1, 2, 3, 4}; + +MATCHER_P3(SctpMessageIs, stream_id, ppid, expected_payload, "") { + if (arg.stream_id() != stream_id) { + *result_listener << "the stream_id is " << *arg.stream_id(); + return false; + } + + if (arg.ppid() != ppid) { + *result_listener << "the ppid is " << *arg.ppid(); + return false; + } + + if (std::vector(arg.payload().begin(), arg.payload().end()) != + std::vector(expected_payload.begin(), expected_payload.end())) { + *result_listener << "the payload is wrong"; + return false; + } + return true; +} + +TSN AddTo(TSN tsn, int delta) { + return TSN(*tsn + delta); +} + +ReconfigRequestSN AddTo(ReconfigRequestSN req_sn, int delta) { + return ReconfigRequestSN(*req_sn + delta); +} + +class StreamResetHandlerTest : public testing::Test { + protected: + StreamResetHandlerTest() + : ctx_(&callbacks_), + timer_manager_([this]() { return callbacks_.CreateTimeout(); }), + delayed_ack_timer_(timer_manager_.CreateTimer( + "test/delayed_ack", + []() { return absl::nullopt; }, + TimerOptions(DurationMs(0)))), + t3_rtx_timer_(timer_manager_.CreateTimer( + "test/t3_rtx", + []() { return absl::nullopt; }, + TimerOptions(DurationMs(0)))), + buf_("log: ", delayed_ack_timer_.get(), kPeerInitialTsn), + reasm_("log: ", kPeerInitialTsn, kArwnd), + retransmission_queue_( + "", + kMyInitialTsn, + kArwnd, + producer_, + [](DurationMs rtt_ms) {}, + []() {}, + *t3_rtx_timer_, + /*options=*/{}), + handler_("log: ", + &ctx_, + &timer_manager_, + &buf_, + &reasm_, + &retransmission_queue_) { + EXPECT_CALL(ctx_, current_rto).WillRepeatedly(Return(kRto)); + } + + void AdvanceTime(DurationMs duration) { + callbacks_.AdvanceTime(kRto); + for (;;) { + absl::optional timeout_id = callbacks_.GetNextExpiredTimeout(); + if (!timeout_id.has_value()) { + break; + } + timer_manager_.HandleTimeout(*timeout_id); + } + } + + // Handles the passed in RE-CONFIG `chunk` and returns the responses + // that are sent in the response RE-CONFIG. + std::vector HandleAndCatchResponse( + ReConfigChunk chunk) { + handler_.HandleReConfig(std::move(chunk)); + + std::vector payload = callbacks_.ConsumeSentPacket(); + if (payload.empty()) { + EXPECT_TRUE(false); + return {}; + } + + std::vector responses; + absl::optional p = SctpPacket::Parse(payload); + if (!p.has_value()) { + EXPECT_TRUE(false); + return {}; + } + if (p->descriptors().size() != 1) { + EXPECT_TRUE(false); + return {}; + } + absl::optional response_chunk = + ReConfigChunk::Parse(p->descriptors()[0].data); + if (!response_chunk.has_value()) { + EXPECT_TRUE(false); + return {}; + } + for (const auto& desc : response_chunk->parameters().descriptors()) { + if (desc.type == ReconfigurationResponseParameter::kType) { + absl::optional response = + ReconfigurationResponseParameter::Parse(desc.data); + if (!response.has_value()) { + EXPECT_TRUE(false); + return {}; + } + responses.emplace_back(*std::move(response)); + } + } + return responses; + } + + DataGenerator gen_; + NiceMock callbacks_; + NiceMock ctx_; + NiceMock producer_; + TimerManager timer_manager_; + std::unique_ptr delayed_ack_timer_; + std::unique_ptr t3_rtx_timer_; + DataTracker buf_; + ReassemblyQueue reasm_; + RetransmissionQueue retransmission_queue_; + StreamResetHandler handler_; +}; + +TEST_F(StreamResetHandlerTest, ChunkWithNoParametersReturnsError) { + EXPECT_CALL(callbacks_, SendPacket).Times(0); + EXPECT_CALL(callbacks_, OnError).Times(1); + handler_.HandleReConfig(ReConfigChunk(Parameters())); +} + +TEST_F(StreamResetHandlerTest, ChunkWithInvalidParametersReturnsError) { + Parameters::Builder builder; + // Two OutgoingSSNResetRequestParameter in a RE-CONFIG is not valid. + builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(1), + ReconfigRequestSN(10), + kPeerInitialTsn, {StreamID(1)})); + builder.Add(OutgoingSSNResetRequestParameter(ReconfigRequestSN(2), + ReconfigRequestSN(10), + kPeerInitialTsn, {StreamID(2)})); + + EXPECT_CALL(callbacks_, SendPacket).Times(0); + EXPECT_CALL(callbacks_, OnError).Times(1); + handler_.HandleReConfig(ReConfigChunk(builder.Build())); +} + +TEST_F(StreamResetHandlerTest, FailToDeliverWithoutResettingStream) { + reasm_.Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE")); + reasm_.Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE")); + + buf_.Observe(kPeerInitialTsn); + buf_.Observe(AddTo(kPeerInitialTsn, 1)); + EXPECT_THAT(reasm_.FlushMessages(), + UnorderedElementsAre( + SctpMessageIs(StreamID(1), PPID(53), kShortPayload), + SctpMessageIs(StreamID(1), PPID(53), kShortPayload))); + + gen_.ResetStream(); + reasm_.Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE")); + EXPECT_THAT(reasm_.FlushMessages(), IsEmpty()); +} + +TEST_F(StreamResetHandlerTest, ResetStreamsNotDeferred) { + reasm_.Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE")); + reasm_.Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE")); + + buf_.Observe(kPeerInitialTsn); + buf_.Observe(AddTo(kPeerInitialTsn, 1)); + EXPECT_THAT(reasm_.FlushMessages(), + UnorderedElementsAre( + SctpMessageIs(StreamID(1), PPID(53), kShortPayload), + SctpMessageIs(StreamID(1), PPID(53), kShortPayload))); + + Parameters::Builder builder; + builder.Add(OutgoingSSNResetRequestParameter( + kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1), + {StreamID(1)})); + + std::vector responses = + HandleAndCatchResponse(ReConfigChunk(builder.Build())); + EXPECT_THAT(responses, SizeIs(1)); + EXPECT_EQ(responses[0].result(), ResponseResult::kSuccessPerformed); + + gen_.ResetStream(); + reasm_.Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE")); + EXPECT_THAT(reasm_.FlushMessages(), + UnorderedElementsAre( + SctpMessageIs(StreamID(1), PPID(53), kShortPayload))); +} + +TEST_F(StreamResetHandlerTest, ResetStreamsDeferred) { + DataGeneratorOptions opts; + opts.message_id = MID(0); + reasm_.Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE", opts)); + + opts.message_id = MID(1); + reasm_.Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE", opts)); + + buf_.Observe(kPeerInitialTsn); + buf_.Observe(AddTo(kPeerInitialTsn, 1)); + EXPECT_THAT(reasm_.FlushMessages(), + UnorderedElementsAre( + SctpMessageIs(StreamID(1), PPID(53), kShortPayload), + SctpMessageIs(StreamID(1), PPID(53), kShortPayload))); + + Parameters::Builder builder; + builder.Add(OutgoingSSNResetRequestParameter( + kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 3), + {StreamID(1)})); + + std::vector responses = + HandleAndCatchResponse(ReConfigChunk(builder.Build())); + EXPECT_THAT(responses, SizeIs(1)); + EXPECT_EQ(responses[0].result(), ResponseResult::kInProgress); + + opts.message_id = MID(1); + opts.ppid = PPID(5); + reasm_.Add(AddTo(kPeerInitialTsn, 5), gen_.Ordered({1, 2, 3, 4}, "BE", opts)); + reasm_.MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 1)); + + opts.message_id = MID(0); + opts.ppid = PPID(4); + reasm_.Add(AddTo(kPeerInitialTsn, 4), gen_.Ordered({1, 2, 3, 4}, "BE", opts)); + reasm_.MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 1)); + + opts.message_id = MID(3); + opts.ppid = PPID(3); + reasm_.Add(AddTo(kPeerInitialTsn, 3), gen_.Ordered({1, 2, 3, 4}, "BE", opts)); + reasm_.MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 1)); + + opts.message_id = MID(2); + opts.ppid = PPID(2); + reasm_.Add(AddTo(kPeerInitialTsn, 2), gen_.Ordered({1, 2, 3, 4}, "BE", opts)); + reasm_.MaybeResetStreamsDeferred(AddTo(kPeerInitialTsn, 5)); + + EXPECT_THAT( + reasm_.FlushMessages(), + UnorderedElementsAre(SctpMessageIs(StreamID(1), PPID(2), kShortPayload), + SctpMessageIs(StreamID(1), PPID(3), kShortPayload), + SctpMessageIs(StreamID(1), PPID(4), kShortPayload), + SctpMessageIs(StreamID(1), PPID(5), kShortPayload))); +} + +TEST_F(StreamResetHandlerTest, SendOutgoingRequestDirectly) { + EXPECT_CALL(producer_, PrepareResetStreams).Times(1); + handler_.ResetStreams(std::vector({StreamID(42)})); + + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + absl::optional reconfig = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req, + reconfig->parameters().get()); + + EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn); + EXPECT_EQ(req.sender_last_assigned_tsn(), + TSN(*retransmission_queue_.next_tsn() - 1)); + EXPECT_THAT(req.stream_ids(), UnorderedElementsAre(StreamID(42))); +} + +TEST_F(StreamResetHandlerTest, ResetMultipleStreamsInOneRequest) { + EXPECT_CALL(producer_, PrepareResetStreams).Times(3); + handler_.ResetStreams(std::vector({StreamID(42)})); + handler_.ResetStreams( + std::vector({StreamID(43), StreamID(44), StreamID(41)})); + handler_.ResetStreams(std::vector({StreamID(42), StreamID(40)})); + + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + absl::optional reconfig = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req, + reconfig->parameters().get()); + + EXPECT_EQ(req.request_sequence_number(), kMyInitialReqSn); + EXPECT_EQ(req.sender_last_assigned_tsn(), + TSN(*retransmission_queue_.next_tsn() - 1)); + EXPECT_THAT(req.stream_ids(), + UnorderedElementsAre(StreamID(40), StreamID(41), StreamID(42), + StreamID(43), StreamID(44))); +} + +TEST_F(StreamResetHandlerTest, SendOutgoingRequestDeferred) { + EXPECT_CALL(producer_, PrepareResetStreams).Times(1); + handler_.ResetStreams(std::vector({StreamID(42)})); + + EXPECT_CALL(producer_, CanResetStreams()) + .WillOnce(Return(false)) + .WillOnce(Return(false)) + .WillOnce(Return(true)); + + EXPECT_FALSE(handler_.MakeStreamResetRequest().has_value()); + EXPECT_FALSE(handler_.MakeStreamResetRequest().has_value()); + EXPECT_TRUE(handler_.MakeStreamResetRequest().has_value()); +} + +TEST_F(StreamResetHandlerTest, SendOutgoingResettingOnPositiveResponse) { + EXPECT_CALL(producer_, PrepareResetStreams).Times(1); + handler_.ResetStreams(std::vector({StreamID(42)})); + + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + + absl::optional reconfig = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req, + reconfig->parameters().get()); + + Parameters::Builder builder; + builder.Add(ReconfigurationResponseParameter( + req.request_sequence_number(), ResponseResult::kSuccessPerformed)); + ReConfigChunk response_reconfig(builder.Build()); + + EXPECT_CALL(producer_, CommitResetStreams()).Times(1); + EXPECT_CALL(producer_, RollbackResetStreams()).Times(0); + + // Processing a response shouldn't result in sending anything. + EXPECT_CALL(callbacks_, OnError).Times(0); + EXPECT_CALL(callbacks_, SendPacket).Times(0); + handler_.HandleReConfig(std::move(response_reconfig)); +} + +TEST_F(StreamResetHandlerTest, SendOutgoingResetRollbackOnError) { + EXPECT_CALL(producer_, PrepareResetStreams).Times(1); + handler_.ResetStreams(std::vector({StreamID(42)})); + + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + + absl::optional reconfig = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req, + reconfig->parameters().get()); + + Parameters::Builder builder; + builder.Add(ReconfigurationResponseParameter( + req.request_sequence_number(), ResponseResult::kErrorBadSequenceNumber)); + ReConfigChunk response_reconfig(builder.Build()); + + EXPECT_CALL(producer_, CommitResetStreams()).Times(0); + EXPECT_CALL(producer_, RollbackResetStreams()).Times(1); + + // Only requests should result in sending responses. + EXPECT_CALL(callbacks_, OnError).Times(0); + EXPECT_CALL(callbacks_, SendPacket).Times(0); + handler_.HandleReConfig(std::move(response_reconfig)); +} + +TEST_F(StreamResetHandlerTest, SendOutgoingResetRetransmitOnInProgress) { + static constexpr StreamID kStreamToReset = StreamID(42); + + EXPECT_CALL(producer_, PrepareResetStreams).Times(1); + handler_.ResetStreams(std::vector({kStreamToReset})); + + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + + absl::optional reconfig1 = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig1.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req1, + reconfig1->parameters().get()); + + // Simulate that the peer responded "In Progress". + Parameters::Builder builder; + builder.Add(ReconfigurationResponseParameter(req1.request_sequence_number(), + ResponseResult::kInProgress)); + ReConfigChunk response_reconfig(builder.Build()); + + EXPECT_CALL(producer_, CommitResetStreams()).Times(0); + EXPECT_CALL(producer_, RollbackResetStreams()).Times(0); + + // Processing a response shouldn't result in sending anything. + EXPECT_CALL(callbacks_, OnError).Times(0); + EXPECT_CALL(callbacks_, SendPacket).Times(0); + handler_.HandleReConfig(std::move(response_reconfig)); + + // Let some time pass, so that the reconfig timer expires, and retries the + // same request. + EXPECT_CALL(callbacks_, SendPacket).Times(1); + AdvanceTime(kRto); + + std::vector payload = callbacks_.ConsumeSentPacket(); + ASSERT_FALSE(payload.empty()); + + ASSERT_HAS_VALUE_AND_ASSIGN(SctpPacket packet, SctpPacket::Parse(payload)); + ASSERT_THAT(packet.descriptors(), SizeIs(1)); + ASSERT_HAS_VALUE_AND_ASSIGN( + ReConfigChunk reconfig2, + ReConfigChunk::Parse(packet.descriptors()[0].data)); + + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req2, + reconfig2.parameters().get()); + + EXPECT_EQ(req2.request_sequence_number(), + AddTo(req1.request_sequence_number(), 1)); + EXPECT_THAT(req2.stream_ids(), UnorderedElementsAre(kStreamToReset)); +} + +TEST_F(StreamResetHandlerTest, ResetWhileRequestIsSentWillQueue) { + EXPECT_CALL(producer_, PrepareResetStreams).Times(1); + handler_.ResetStreams(std::vector({StreamID(42)})); + + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + absl::optional reconfig1 = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig1.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req1, + reconfig1->parameters().get()); + EXPECT_EQ(req1.request_sequence_number(), kMyInitialReqSn); + EXPECT_EQ(req1.sender_last_assigned_tsn(), + AddTo(retransmission_queue_.next_tsn(), -1)); + EXPECT_THAT(req1.stream_ids(), UnorderedElementsAre(StreamID(42))); + + // Streams reset while the request is in-flight will be queued. + StreamID stream_ids[] = {StreamID(41), StreamID(43)}; + handler_.ResetStreams(stream_ids); + EXPECT_EQ(handler_.MakeStreamResetRequest(), absl::nullopt); + + Parameters::Builder builder; + builder.Add(ReconfigurationResponseParameter( + req1.request_sequence_number(), ResponseResult::kSuccessPerformed)); + ReConfigChunk response_reconfig(builder.Build()); + + EXPECT_CALL(producer_, CommitResetStreams()).Times(1); + EXPECT_CALL(producer_, RollbackResetStreams()).Times(0); + + // Processing a response shouldn't result in sending anything. + EXPECT_CALL(callbacks_, OnError).Times(0); + EXPECT_CALL(callbacks_, SendPacket).Times(0); + handler_.HandleReConfig(std::move(response_reconfig)); + + // Response has been processed. A new request can be sent. + EXPECT_CALL(producer_, CanResetStreams()).WillOnce(Return(true)); + absl::optional reconfig2 = handler_.MakeStreamResetRequest(); + ASSERT_TRUE(reconfig2.has_value()); + ASSERT_HAS_VALUE_AND_ASSIGN( + OutgoingSSNResetRequestParameter req2, + reconfig2->parameters().get()); + EXPECT_EQ(req2.request_sequence_number(), AddTo(kMyInitialReqSn, 1)); + EXPECT_EQ(req2.sender_last_assigned_tsn(), + TSN(*retransmission_queue_.next_tsn() - 1)); + EXPECT_THAT(req2.stream_ids(), + UnorderedElementsAre(StreamID(41), StreamID(43))); +} + +TEST_F(StreamResetHandlerTest, SendIncomingResetJustReturnsNothingPerformed) { + Parameters::Builder builder; + builder.Add( + IncomingSSNResetRequestParameter(kPeerInitialReqSn, {StreamID(1)})); + + std::vector responses = + HandleAndCatchResponse(ReConfigChunk(builder.Build())); + ASSERT_THAT(responses, SizeIs(1)); + EXPECT_THAT(responses[0].response_sequence_number(), kPeerInitialReqSn); + EXPECT_THAT(responses[0].result(), ResponseResult::kSuccessNothingToDo); +} + +TEST_F(StreamResetHandlerTest, SendSameRequestTwiceReturnsNothingToDo) { + reasm_.Add(kPeerInitialTsn, gen_.Ordered({1, 2, 3, 4}, "BE")); + reasm_.Add(AddTo(kPeerInitialTsn, 1), gen_.Ordered({1, 2, 3, 4}, "BE")); + + buf_.Observe(kPeerInitialTsn); + buf_.Observe(AddTo(kPeerInitialTsn, 1)); + EXPECT_THAT(reasm_.FlushMessages(), + UnorderedElementsAre( + SctpMessageIs(StreamID(1), PPID(53), kShortPayload), + SctpMessageIs(StreamID(1), PPID(53), kShortPayload))); + + Parameters::Builder builder1; + builder1.Add(OutgoingSSNResetRequestParameter( + kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1), + {StreamID(1)})); + + std::vector responses1 = + HandleAndCatchResponse(ReConfigChunk(builder1.Build())); + EXPECT_THAT(responses1, SizeIs(1)); + EXPECT_EQ(responses1[0].result(), ResponseResult::kSuccessPerformed); + + Parameters::Builder builder2; + builder2.Add(OutgoingSSNResetRequestParameter( + kPeerInitialReqSn, ReconfigRequestSN(3), AddTo(kPeerInitialTsn, 1), + {StreamID(1)})); + + std::vector responses2 = + HandleAndCatchResponse(ReConfigChunk(builder2.Build())); + EXPECT_THAT(responses2, SizeIs(1)); + EXPECT_EQ(responses2[0].result(), ResponseResult::kSuccessNothingToDo); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/socket/transmission_control_block.cc b/net/dcsctp/socket/transmission_control_block.cc new file mode 100644 index 0000000000..4fde40cee9 --- /dev/null +++ b/net/dcsctp/socket/transmission_control_block.cc @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/socket/transmission_control_block.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/idata_chunk.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/reconfig_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/capabilities.h" +#include "net/dcsctp/socket/stream_reset_handler.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_queue.h" +#include "net/dcsctp/tx/retransmission_timeout.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { + +void TransmissionControlBlock::ObserveRTT(DurationMs rtt) { + DurationMs prev_rto = rto_.rto(); + rto_.ObserveRTT(rtt); + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "new rtt=" << *rtt + << ", srtt=" << *rto_.srtt() << ", rto=" << *rto_.rto() + << " (" << *prev_rto << ")"; + t3_rtx_->set_duration(rto_.rto()); + + DurationMs delayed_ack_tmo = + std::min(rto_.rto() * 0.5, options_.delayed_ack_max_timeout); + delayed_ack_timer_->set_duration(delayed_ack_tmo); +} + +absl::optional TransmissionControlBlock::OnRtxTimerExpiry() { + TimeMs now = callbacks_.TimeMillis(); + RTC_DLOG(LS_INFO) << log_prefix_ << "Timer " << t3_rtx_->name() + << " has expired"; + if (cookie_echo_chunk_.has_value()) { + // In the COOKIE_ECHO state, let the T1-COOKIE timer trigger + // retransmissions, to avoid having two timers doing that. + RTC_DLOG(LS_VERBOSE) << "Not retransmitting as T1-cookie is active."; + } else { + if (IncrementTxErrorCounter("t3-rtx expired")) { + retransmission_queue_.HandleT3RtxTimerExpiry(); + SendBufferedPackets(now); + } + } + return absl::nullopt; +} + +absl::optional TransmissionControlBlock::OnDelayedAckTimerExpiry() { + data_tracker_.HandleDelayedAckTimerExpiry(); + MaybeSendSack(); + return absl::nullopt; +} + +void TransmissionControlBlock::MaybeSendSack() { + if (data_tracker_.ShouldSendAck(/*also_if_delayed=*/false)) { + SctpPacket::Builder builder = PacketBuilder(); + builder.Add( + data_tracker_.CreateSelectiveAck(reassembly_queue_.remaining_bytes())); + Send(builder); + } +} + +void TransmissionControlBlock::SendBufferedPackets(SctpPacket::Builder& builder, + TimeMs now) { + for (int packet_idx = 0;; ++packet_idx) { + // Only add control chunks to the first packet that is sent, if sending + // multiple packets in one go (as allowed by the congestion window). + if (packet_idx == 0) { + if (cookie_echo_chunk_.has_value()) { + // https://tools.ietf.org/html/rfc4960#section-5.1 + // "The COOKIE ECHO chunk can be bundled with any pending outbound DATA + // chunks, but it MUST be the first chunk in the packet..." + RTC_DCHECK(builder.empty()); + builder.Add(*cookie_echo_chunk_); + } + + // https://tools.ietf.org/html/rfc4960#section-6 + // "Before an endpoint transmits a DATA chunk, if any received DATA + // chunks have not been acknowledged (e.g., due to delayed ack), the + // sender should create a SACK and bundle it with the outbound DATA chunk, + // as long as the size of the final SCTP packet does not exceed the + // current MTU." + if (data_tracker_.ShouldSendAck(/*also_if_delayed=*/true)) { + builder.Add(data_tracker_.CreateSelectiveAck( + reassembly_queue_.remaining_bytes())); + } + if (retransmission_queue_.ShouldSendForwardTsn(now)) { + if (capabilities_.message_interleaving) { + builder.Add(retransmission_queue_.CreateIForwardTsn()); + } else { + builder.Add(retransmission_queue_.CreateForwardTsn()); + } + } + absl::optional reconfig = + stream_reset_handler_.MakeStreamResetRequest(); + if (reconfig.has_value()) { + builder.Add(*reconfig); + } + } + + auto chunks = + retransmission_queue_.GetChunksToSend(now, builder.bytes_remaining()); + for (auto& elem : chunks) { + TSN tsn = elem.first; + Data data = std::move(elem.second); + if (capabilities_.message_interleaving) { + builder.Add(IDataChunk(tsn, std::move(data), false)); + } else { + builder.Add(DataChunk(tsn, std::move(data), false)); + } + } + if (builder.empty()) { + break; + } + Send(builder); + + if (cookie_echo_chunk_.has_value()) { + // https://tools.ietf.org/html/rfc4960#section-5.1 + // "... until the COOKIE ACK is returned the sender MUST NOT send any + // other packets to the peer." + break; + } + } +} + +std::string TransmissionControlBlock::ToString() const { + rtc::StringBuilder sb; + + sb.AppendFormat( + "verification_tag=%08x, last_cumulative_ack=%u, capabilities=", + *peer_verification_tag_, *data_tracker_.last_cumulative_acked_tsn()); + + if (capabilities_.partial_reliability) { + sb << "PR,"; + } + if (capabilities_.message_interleaving) { + sb << "IL,"; + } + if (capabilities_.reconfig) { + sb << "Reconfig,"; + } + + return sb.Release(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/socket/transmission_control_block.h b/net/dcsctp/socket/transmission_control_block.h new file mode 100644 index 0000000000..172f7c0c08 --- /dev/null +++ b/net/dcsctp/socket/transmission_control_block.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_SOCKET_TRANSMISSION_CONTROL_BLOCK_H_ +#define NET_DCSCTP_SOCKET_TRANSMISSION_CONTROL_BLOCK_H_ + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/cookie_echo_chunk.h" +#include "net/dcsctp/packet/sctp_packet.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/rx/data_tracker.h" +#include "net/dcsctp/rx/reassembly_queue.h" +#include "net/dcsctp/socket/capabilities.h" +#include "net/dcsctp/socket/context.h" +#include "net/dcsctp/socket/heartbeat_handler.h" +#include "net/dcsctp/socket/stream_reset_handler.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_error_counter.h" +#include "net/dcsctp/tx/retransmission_queue.h" +#include "net/dcsctp/tx/retransmission_timeout.h" +#include "net/dcsctp/tx/send_queue.h" + +namespace dcsctp { + +// The TransmissionControlBlock (TCB) represents an open connection to a peer, +// and holds all the resources for that. If the connection is e.g. shutdown, +// closed or restarted, this object will be deleted and/or replaced. +class TransmissionControlBlock : public Context { + public: + TransmissionControlBlock(TimerManager& timer_manager, + absl::string_view log_prefix, + const DcSctpOptions& options, + const Capabilities& capabilities, + DcSctpSocketCallbacks& callbacks, + SendQueue& send_queue, + VerificationTag my_verification_tag, + TSN my_initial_tsn, + VerificationTag peer_verification_tag, + TSN peer_initial_tsn, + size_t a_rwnd, + TieTag tie_tag, + std::function is_connection_established, + std::function send_fn) + : log_prefix_(log_prefix), + options_(options), + timer_manager_(timer_manager), + capabilities_(capabilities), + callbacks_(callbacks), + t3_rtx_(timer_manager_.CreateTimer( + "t3-rtx", + [this]() { return OnRtxTimerExpiry(); }, + TimerOptions(options.rto_initial))), + delayed_ack_timer_(timer_manager_.CreateTimer( + "delayed-ack", + [this]() { return OnDelayedAckTimerExpiry(); }, + TimerOptions(options.delayed_ack_max_timeout, + TimerBackoffAlgorithm::kExponential, + /*max_restarts=*/0))), + my_verification_tag_(my_verification_tag), + my_initial_tsn_(my_initial_tsn), + peer_verification_tag_(peer_verification_tag), + peer_initial_tsn_(peer_initial_tsn), + tie_tag_(tie_tag), + is_connection_established_(std::move(is_connection_established)), + send_fn_(std::move(send_fn)), + rto_(options), + tx_error_counter_(log_prefix, options), + data_tracker_(log_prefix, delayed_ack_timer_.get(), peer_initial_tsn), + reassembly_queue_(log_prefix, + peer_initial_tsn, + options.max_receiver_window_buffer_size), + retransmission_queue_( + log_prefix, + my_initial_tsn, + a_rwnd, + send_queue, + [this](DurationMs rtt) { return ObserveRTT(rtt); }, + [this]() { tx_error_counter_.Clear(); }, + *t3_rtx_, + options, + capabilities.partial_reliability, + capabilities.message_interleaving), + stream_reset_handler_(log_prefix, + this, + &timer_manager, + &data_tracker_, + &reassembly_queue_, + &retransmission_queue_), + heartbeat_handler_(log_prefix, options, this, &timer_manager_) {} + + // Implementation of `Context`. + bool is_connection_established() const override { + return is_connection_established_(); + } + TSN my_initial_tsn() const override { return my_initial_tsn_; } + TSN peer_initial_tsn() const override { return peer_initial_tsn_; } + DcSctpSocketCallbacks& callbacks() const override { return callbacks_; } + void ObserveRTT(DurationMs rtt) override; + DurationMs current_rto() const override { return rto_.rto(); } + bool IncrementTxErrorCounter(absl::string_view reason) override { + return tx_error_counter_.Increment(reason); + } + void ClearTxErrorCounter() override { tx_error_counter_.Clear(); } + SctpPacket::Builder PacketBuilder() const override { + return SctpPacket::Builder(peer_verification_tag_, options_); + } + bool HasTooManyTxErrors() const override { + return tx_error_counter_.IsExhausted(); + } + void Send(SctpPacket::Builder& builder) override { send_fn_(builder); } + + // Other accessors + DataTracker& data_tracker() { return data_tracker_; } + ReassemblyQueue& reassembly_queue() { return reassembly_queue_; } + RetransmissionQueue& retransmission_queue() { return retransmission_queue_; } + StreamResetHandler& stream_reset_handler() { return stream_reset_handler_; } + HeartbeatHandler& heartbeat_handler() { return heartbeat_handler_; } + + // Returns this socket's verification tag, set in all packet headers. + VerificationTag my_verification_tag() const { return my_verification_tag_; } + // Returns the peer's verification tag, which should be in received packets. + VerificationTag peer_verification_tag() const { + return peer_verification_tag_; + } + // All negotiated supported capabilities. + const Capabilities& capabilities() const { return capabilities_; } + // A 64-bit tie-tag, used to e.g. detect reconnections. + TieTag tie_tag() const { return tie_tag_; } + + // Sends a SACK, if there is a need to. + void MaybeSendSack(); + + // Will be set while the socket is in kCookieEcho state. In this state, there + // can only be a single packet outstanding, and it must contain the COOKIE + // ECHO chunk as the first chunk in that packet, until the COOKIE ACK has been + // received, which will make the socket call `ClearCookieEchoChunk`. + void SetCookieEchoChunk(CookieEchoChunk chunk) { + cookie_echo_chunk_ = std::move(chunk); + } + + // Called when the COOKIE ACK chunk has been received, to allow further + // packets to be sent. + void ClearCookieEchoChunk() { cookie_echo_chunk_ = absl::nullopt; } + + bool has_cookie_echo_chunk() const { return cookie_echo_chunk_.has_value(); } + + // Fills `builder` (which may already be filled with control chunks) with + // other control and data chunks, and sends packets as much as can be + // allowed by the congestion control algorithm. + void SendBufferedPackets(SctpPacket::Builder& builder, TimeMs now); + + // As above, but without passing in a builder. If `cookie_echo_chunk_` is + // present, then only one packet will be sent, with this chunk as the first + // chunk. + void SendBufferedPackets(TimeMs now) { + SctpPacket::Builder builder(peer_verification_tag_, options_); + SendBufferedPackets(builder, now); + } + + // Returns a textual representation of this object, for logging. + std::string ToString() const; + + private: + // Will be called when the retransmission timer (t3-rtx) expires. + absl::optional OnRtxTimerExpiry(); + // Will be called when the delayed ack timer expires. + absl::optional OnDelayedAckTimerExpiry(); + + const std::string log_prefix_; + const DcSctpOptions options_; + TimerManager& timer_manager_; + // Negotiated capabilities that both peers support. + const Capabilities capabilities_; + DcSctpSocketCallbacks& callbacks_; + // The data retransmission timer, called t3-rtx in SCTP. + const std::unique_ptr t3_rtx_; + // Delayed ack timer, which triggers when acks should be sent (when delayed). + const std::unique_ptr delayed_ack_timer_; + const VerificationTag my_verification_tag_; + const TSN my_initial_tsn_; + const VerificationTag peer_verification_tag_; + const TSN peer_initial_tsn_; + // Nonce, used to detect reconnections. + const TieTag tie_tag_; + const std::function is_connection_established_; + const std::function send_fn_; + + RetransmissionTimeout rto_; + RetransmissionErrorCounter tx_error_counter_; + DataTracker data_tracker_; + ReassemblyQueue reassembly_queue_; + RetransmissionQueue retransmission_queue_; + StreamResetHandler stream_reset_handler_; + HeartbeatHandler heartbeat_handler_; + + // Only valid when the socket state == State::kCookieEchoed. In this state, + // the socket must wait for COOKIE ACK to continue sending any packets (not + // including a COOKIE ECHO). So if `cookie_echo_chunk_` is present, the + // SendBufferedChunks will always only just send one packet, with this chunk + // as the first chunk in the packet. + absl::optional cookie_echo_chunk_ = absl::nullopt; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_SOCKET_TRANSMISSION_CONTROL_BLOCK_H_ diff --git a/net/dcsctp/testing/BUILD.gn b/net/dcsctp/testing/BUILD.gn new file mode 100644 index 0000000000..5367ef8c6f --- /dev/null +++ b/net/dcsctp/testing/BUILD.gn @@ -0,0 +1,35 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("testing_macros") { + testonly = true + sources = [ "testing_macros.h" ] +} + +rtc_library("data_generator") { + testonly = true + deps = [ + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:internal_types", + "../packet:data", + "../public:types", + ] + sources = [ + "data_generator.cc", + "data_generator.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} diff --git a/net/dcsctp/testing/data_generator.cc b/net/dcsctp/testing/data_generator.cc new file mode 100644 index 0000000000..e4f9f91384 --- /dev/null +++ b/net/dcsctp/testing/data_generator.cc @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/testing/data_generator.h" + +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { +constexpr PPID kPpid = PPID(53); + +Data DataGenerator::Ordered(std::vector payload, + absl::string_view flags, + const DataGeneratorOptions opts) { + Data::IsBeginning is_beginning(flags.find('B') != std::string::npos); + Data::IsEnd is_end(flags.find('E') != std::string::npos); + + if (is_beginning) { + fsn_ = FSN(0); + } else { + fsn_ = FSN(*fsn_ + 1); + } + MID message_id = opts.message_id.value_or(message_id_); + Data ret = Data(opts.stream_id, SSN(static_cast(*message_id)), + message_id, fsn_, opts.ppid, std::move(payload), is_beginning, + is_end, IsUnordered(false)); + + if (is_end) { + message_id_ = MID(*message_id + 1); + } + return ret; +} + +Data DataGenerator::Unordered(std::vector payload, + absl::string_view flags, + const DataGeneratorOptions opts) { + Data::IsBeginning is_beginning(flags.find('B') != std::string::npos); + Data::IsEnd is_end(flags.find('E') != std::string::npos); + + if (is_beginning) { + fsn_ = FSN(0); + } else { + fsn_ = FSN(*fsn_ + 1); + } + MID message_id = opts.message_id.value_or(message_id_); + Data ret = Data(opts.stream_id, SSN(0), message_id, fsn_, kPpid, + std::move(payload), is_beginning, is_end, IsUnordered(true)); + if (is_end) { + message_id_ = MID(*message_id + 1); + } + return ret; +} +} // namespace dcsctp diff --git a/net/dcsctp/testing/data_generator.h b/net/dcsctp/testing/data_generator.h new file mode 100644 index 0000000000..859450b1c3 --- /dev/null +++ b/net/dcsctp/testing/data_generator.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TESTING_DATA_GENERATOR_H_ +#define NET_DCSCTP_TESTING_DATA_GENERATOR_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/data.h" + +namespace dcsctp { + +struct DataGeneratorOptions { + StreamID stream_id = StreamID(1); + absl::optional message_id = absl::nullopt; + PPID ppid = PPID(53); +}; + +// Generates Data with correct sequence numbers, and used only in unit tests. +class DataGenerator { + public: + explicit DataGenerator(MID start_message_id = MID(0)) + : message_id_(start_message_id) {} + + // Generates ordered "data" with the provided `payload` and flags, which can + // contain "B" for setting the "is_beginning" flag, and/or "E" for setting the + // "is_end" flag. + Data Ordered(std::vector payload, + absl::string_view flags = "", + const DataGeneratorOptions opts = {}); + + // Generates unordered "data" with the provided `payload` and flags, which can + // contain "B" for setting the "is_beginning" flag, and/or "E" for setting the + // "is_end" flag. + Data Unordered(std::vector payload, + absl::string_view flags = "", + const DataGeneratorOptions opts = {}); + + // Resets the Message ID identifier - simulating a "stream reset". + void ResetStream() { message_id_ = MID(0); } + + private: + MID message_id_; + FSN fsn_ = FSN(0); +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TESTING_DATA_GENERATOR_H_ diff --git a/net/dcsctp/testing/testing_macros.h b/net/dcsctp/testing/testing_macros.h new file mode 100644 index 0000000000..5cbdfffdce --- /dev/null +++ b/net/dcsctp/testing/testing_macros.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TESTING_TESTING_MACROS_H_ +#define NET_DCSCTP_TESTING_TESTING_MACROS_H_ + +#include + +namespace dcsctp { + +#define DCSCTP_CONCAT_INNER_(x, y) x##y +#define DCSCTP_CONCAT_(x, y) DCSCTP_CONCAT_INNER_(x, y) + +// Similar to ASSERT_OK_AND_ASSIGN, this works with an absl::optional<> instead +// of an absl::StatusOr<>. +#define ASSERT_HAS_VALUE_AND_ASSIGN(lhs, rexpr) \ + auto DCSCTP_CONCAT_(tmp_opt_val__, __LINE__) = rexpr; \ + ASSERT_TRUE(DCSCTP_CONCAT_(tmp_opt_val__, __LINE__).has_value()); \ + lhs = *std::move(DCSCTP_CONCAT_(tmp_opt_val__, __LINE__)); + +} // namespace dcsctp + +#endif // NET_DCSCTP_TESTING_TESTING_MACROS_H_ diff --git a/net/dcsctp/timer/BUILD.gn b/net/dcsctp/timer/BUILD.gn new file mode 100644 index 0000000000..a0ba5b030e --- /dev/null +++ b/net/dcsctp/timer/BUILD.gn @@ -0,0 +1,73 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_library("timer") { + deps = [ + "../../../api:array_view", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../public:socket", + "../public:strong_alias", + "../public:types", + ] + sources = [ + "fake_timeout.h", + "timer.cc", + "timer.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("task_queue_timeout") { + deps = [ + "../../../api:array_view", + "../../../api/task_queue:task_queue", + "../../../rtc_base", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base/task_utils:pending_task_safety_flag", + "../../../rtc_base/task_utils:to_queued_task", + "../public:socket", + "../public:strong_alias", + "../public:types", + ] + sources = [ + "task_queue_timeout.cc", + "task_queue_timeout.h", + ] +} + +if (rtc_include_tests) { + rtc_library("dcsctp_timer_unittests") { + testonly = true + + defines = [] + deps = [ + ":task_queue_timeout", + ":timer", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../../../test/time_controller:time_controller", + "../public:socket", + ] + sources = [ + "task_queue_timeout_test.cc", + "timer_test.cc", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } +} diff --git a/net/dcsctp/timer/fake_timeout.h b/net/dcsctp/timer/fake_timeout.h new file mode 100644 index 0000000000..927e6b2808 --- /dev/null +++ b/net/dcsctp/timer/fake_timeout.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TIMER_FAKE_TIMEOUT_H_ +#define NET_DCSCTP_TIMER_FAKE_TIMEOUT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "net/dcsctp/public/timeout.h" +#include "rtc_base/checks.h" + +namespace dcsctp { + +// A timeout used in tests. +class FakeTimeout : public Timeout { + public: + explicit FakeTimeout(std::function get_time, + std::function on_delete) + : get_time_(std::move(get_time)), on_delete_(std::move(on_delete)) {} + + ~FakeTimeout() override { on_delete_(this); } + + void Start(DurationMs duration_ms, TimeoutID timeout_id) override { + RTC_DCHECK(expiry_ == TimeMs::InfiniteFuture()); + timeout_id_ = timeout_id; + expiry_ = get_time_() + duration_ms; + } + void Stop() override { + RTC_DCHECK(expiry_ != TimeMs::InfiniteFuture()); + expiry_ = TimeMs::InfiniteFuture(); + } + + bool EvaluateHasExpired(TimeMs now) { + if (now >= expiry_) { + expiry_ = TimeMs::InfiniteFuture(); + return true; + } + return false; + } + + TimeoutID timeout_id() const { return timeout_id_; } + + private: + const std::function get_time_; + const std::function on_delete_; + + TimeoutID timeout_id_ = TimeoutID(0); + TimeMs expiry_ = TimeMs::InfiniteFuture(); +}; + +class FakeTimeoutManager { + public: + // The `get_time` function must return the current time, relative to any + // epoch. + explicit FakeTimeoutManager(std::function get_time) + : get_time_(std::move(get_time)) {} + + std::unique_ptr CreateTimeout() { + auto timer = std::make_unique( + get_time_, [this](FakeTimeout* timer) { timers_.erase(timer); }); + timers_.insert(timer.get()); + return timer; + } + + // NOTE: This can't return a vector, as calling EvaluateHasExpired requires + // calling socket->HandleTimeout directly afterwards, as the owning Timer + // still believes it's running, and it needs to be updated to set + // Timer::is_running_ to false before you operate on the Timer or Timeout + // again. + absl::optional GetNextExpiredTimeout() { + TimeMs now = get_time_(); + std::vector expired_timers; + for (auto& timer : timers_) { + if (timer->EvaluateHasExpired(now)) { + return timer->timeout_id(); + } + } + return absl::nullopt; + } + + private: + const std::function get_time_; + std::unordered_set timers_; +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_TIMER_FAKE_TIMEOUT_H_ diff --git a/net/dcsctp/timer/task_queue_timeout.cc b/net/dcsctp/timer/task_queue_timeout.cc new file mode 100644 index 0000000000..6d3054eeb8 --- /dev/null +++ b/net/dcsctp/timer/task_queue_timeout.cc @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/timer/task_queue_timeout.h" + +#include "rtc_base/logging.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" + +namespace dcsctp { + +TaskQueueTimeoutFactory::TaskQueueTimeout::TaskQueueTimeout( + TaskQueueTimeoutFactory& parent) + : parent_(parent), + pending_task_safety_flag_(webrtc::PendingTaskSafetyFlag::Create()) {} + +TaskQueueTimeoutFactory::TaskQueueTimeout::~TaskQueueTimeout() { + RTC_DCHECK_RUN_ON(&parent_.thread_checker_); + pending_task_safety_flag_->SetNotAlive(); +} + +void TaskQueueTimeoutFactory::TaskQueueTimeout::Start(DurationMs duration_ms, + TimeoutID timeout_id) { + RTC_DCHECK_RUN_ON(&parent_.thread_checker_); + RTC_DCHECK(timeout_expiration_ == TimeMs::InfiniteFuture()); + timeout_expiration_ = parent_.get_time_() + duration_ms; + timeout_id_ = timeout_id; + + if (timeout_expiration_ >= posted_task_expiration_) { + // There is already a running task, and it's scheduled to expire sooner than + // the new expiration time. Don't do anything; The `timeout_expiration_` has + // already been updated and if the delayed task _does_ expire and the timer + // hasn't been stopped, that will be noticed in the timeout handler, and the + // task will be re-scheduled. Most timers are stopped before they expire. + return; + } + + if (posted_task_expiration_ != TimeMs::InfiniteFuture()) { + RTC_DLOG(LS_VERBOSE) << "New timeout duration is less than scheduled - " + "ghosting old delayed task."; + // There is already a scheduled delayed task, but its expiration time is + // further away than the new expiration, so it can't be used. It will be + // "killed" by replacing the safety flag. This is not expected to happen + // especially often; Mainly when a timer did exponential backoff and + // later recovered. + pending_task_safety_flag_->SetNotAlive(); + pending_task_safety_flag_ = webrtc::PendingTaskSafetyFlag::Create(); + } + + posted_task_expiration_ = timeout_expiration_; + parent_.task_queue_.PostDelayedTask( + webrtc::ToQueuedTask( + pending_task_safety_flag_, + [timeout_id, this]() { + RTC_DLOG(LS_VERBOSE) << "Timout expired: " << timeout_id.value(); + RTC_DCHECK_RUN_ON(&parent_.thread_checker_); + RTC_DCHECK(posted_task_expiration_ != TimeMs::InfiniteFuture()); + posted_task_expiration_ = TimeMs::InfiniteFuture(); + + if (timeout_expiration_ == TimeMs::InfiniteFuture()) { + // The timeout was stopped before it expired. Very common. + } else { + // Note that the timeout might have been restarted, which updated + // `timeout_expiration_` but left the scheduled task running. So + // if it's not quite time to trigger the timeout yet, schedule a + // new delayed task with what's remaining and retry at that point + // in time. + DurationMs remaining = timeout_expiration_ - parent_.get_time_(); + timeout_expiration_ = TimeMs::InfiniteFuture(); + if (*remaining > 0) { + Start(remaining, timeout_id_); + } else { + // It has actually triggered. + RTC_DLOG(LS_VERBOSE) + << "Timout triggered: " << timeout_id.value(); + parent_.on_expired_(timeout_id_); + } + } + }), + duration_ms.value()); +} + +void TaskQueueTimeoutFactory::TaskQueueTimeout::Stop() { + // As the TaskQueue doesn't support deleting a posted task, just mark the + // timeout as not running. + RTC_DCHECK_RUN_ON(&parent_.thread_checker_); + timeout_expiration_ = TimeMs::InfiniteFuture(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/timer/task_queue_timeout.h b/net/dcsctp/timer/task_queue_timeout.h new file mode 100644 index 0000000000..e8d12df592 --- /dev/null +++ b/net/dcsctp/timer/task_queue_timeout.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TIMER_TASK_QUEUE_TIMEOUT_H_ +#define NET_DCSCTP_TIMER_TASK_QUEUE_TIMEOUT_H_ + +#include +#include + +#include "api/task_queue/task_queue_base.h" +#include "net/dcsctp/public/timeout.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" + +namespace dcsctp { + +// The TaskQueueTimeoutFactory creates `Timeout` instances, which schedules +// itself to be triggered on the provided `task_queue`, which may be a thread, +// an actual TaskQueue or something else which supports posting a delayed task. +// +// Note that each `DcSctpSocket` must have its own `TaskQueueTimeoutFactory`, +// as the `TimeoutID` are not unique among sockets. +// +// This class must outlive any created Timeout that it has created. Note that +// the `DcSctpSocket` will ensure that all Timeouts are deleted when the socket +// is destructed, so this means that this class must outlive the `DcSctpSocket`. +// +// This class, and the timeouts created it, are not thread safe. +class TaskQueueTimeoutFactory { + public: + // The `get_time` function must return the current time, relative to any + // epoch. Whenever a timeout expires, the `on_expired` callback will be + // triggered, and then the client should provided `timeout_id` to + // `DcSctpSocketInterface::HandleTimeout`. + TaskQueueTimeoutFactory(webrtc::TaskQueueBase& task_queue, + std::function get_time, + std::function on_expired) + : task_queue_(task_queue), + get_time_(std::move(get_time)), + on_expired_(std::move(on_expired)) {} + + // Creates an implementation of `Timeout`. + std::unique_ptr CreateTimeout() { + return std::make_unique(*this); + } + + private: + class TaskQueueTimeout : public Timeout { + public: + explicit TaskQueueTimeout(TaskQueueTimeoutFactory& parent); + ~TaskQueueTimeout(); + + void Start(DurationMs duration_ms, TimeoutID timeout_id) override; + void Stop() override; + + private: + TaskQueueTimeoutFactory& parent_; + // A safety flag to ensure that posted tasks to the task queue don't + // reference these object when they go out of scope. Note that this safety + // flag will be re-created if the scheduled-but-not-yet-expired task is not + // to be run. This happens when there is a posted delayed task with an + // expiration time _further away_ than what is now the expected expiration + // time. In this scenario, a new delayed task has to be posted with a + // shorter duration and the old task has to be forgotten. + rtc::scoped_refptr pending_task_safety_flag_; + // The time when the posted delayed task is set to expire. Will be set to + // the infinite future if there is no such task running. + TimeMs posted_task_expiration_ = TimeMs::InfiniteFuture(); + // The time when the timeout expires. It will be set to the infinite future + // if the timeout is not running/not started. + TimeMs timeout_expiration_ = TimeMs::InfiniteFuture(); + // The current timeout ID that will be reported when expired. + TimeoutID timeout_id_ = TimeoutID(0); + }; + + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker thread_checker_; + webrtc::TaskQueueBase& task_queue_; + const std::function get_time_; + const std::function on_expired_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TIMER_TASK_QUEUE_TIMEOUT_H_ diff --git a/net/dcsctp/timer/task_queue_timeout_test.cc b/net/dcsctp/timer/task_queue_timeout_test.cc new file mode 100644 index 0000000000..9d3846953b --- /dev/null +++ b/net/dcsctp/timer/task_queue_timeout_test.cc @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/timer/task_queue_timeout.h" + +#include + +#include "rtc_base/gunit.h" +#include "test/gmock.h" +#include "test/time_controller/simulated_time_controller.h" + +namespace dcsctp { +namespace { +using ::testing::MockFunction; + +class TaskQueueTimeoutTest : public testing::Test { + protected: + TaskQueueTimeoutTest() + : time_controller_(webrtc::Timestamp::Millis(1234)), + task_queue_(time_controller_.GetMainThread()), + factory_( + *task_queue_, + [this]() { + return TimeMs(time_controller_.GetClock()->CurrentTime().ms()); + }, + on_expired_.AsStdFunction()) {} + + void AdvanceTime(DurationMs duration) { + time_controller_.AdvanceTime(webrtc::TimeDelta::Millis(*duration)); + } + + MockFunction on_expired_; + webrtc::GlobalSimulatedTimeController time_controller_; + + rtc::Thread* task_queue_; + TaskQueueTimeoutFactory factory_; +}; + +TEST_F(TaskQueueTimeoutTest, StartPostsDelayedTask) { + std::unique_ptr timeout = factory_.CreateTimeout(); + timeout->Start(DurationMs(1000), TimeoutID(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(999)); + + EXPECT_CALL(on_expired_, Call(TimeoutID(1))); + AdvanceTime(DurationMs(1)); +} + +TEST_F(TaskQueueTimeoutTest, StopBeforeExpiringDoesntTrigger) { + std::unique_ptr timeout = factory_.CreateTimeout(); + timeout->Start(DurationMs(1000), TimeoutID(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(999)); + + timeout->Stop(); + + AdvanceTime(DurationMs(1)); + AdvanceTime(DurationMs(1000)); +} + +TEST_F(TaskQueueTimeoutTest, RestartPrologingTimeoutDuration) { + std::unique_ptr timeout = factory_.CreateTimeout(); + timeout->Start(DurationMs(1000), TimeoutID(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(500)); + + timeout->Restart(DurationMs(1000), TimeoutID(2)); + + AdvanceTime(DurationMs(999)); + + EXPECT_CALL(on_expired_, Call(TimeoutID(2))); + AdvanceTime(DurationMs(1)); +} + +TEST_F(TaskQueueTimeoutTest, RestartWithShorterDurationExpiresWhenExpected) { + std::unique_ptr timeout = factory_.CreateTimeout(); + timeout->Start(DurationMs(1000), TimeoutID(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(500)); + + timeout->Restart(DurationMs(200), TimeoutID(2)); + + AdvanceTime(DurationMs(199)); + + EXPECT_CALL(on_expired_, Call(TimeoutID(2))); + AdvanceTime(DurationMs(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(1000)); +} + +TEST_F(TaskQueueTimeoutTest, KilledBeforeExpired) { + std::unique_ptr timeout = factory_.CreateTimeout(); + timeout->Start(DurationMs(1000), TimeoutID(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(500)); + + timeout = nullptr; + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTime(DurationMs(1000)); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/timer/timer.cc b/net/dcsctp/timer/timer.cc new file mode 100644 index 0000000000..593d639fa7 --- /dev/null +++ b/net/dcsctp/timer/timer.cc @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/timer/timer.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" +#include "net/dcsctp/public/timeout.h" +#include "rtc_base/checks.h" + +namespace dcsctp { +namespace { +TimeoutID MakeTimeoutId(TimerID timer_id, TimerGeneration generation) { + return TimeoutID(static_cast(*timer_id) << 32 | *generation); +} + +DurationMs GetBackoffDuration(TimerBackoffAlgorithm algorithm, + DurationMs base_duration, + int expiration_count) { + switch (algorithm) { + case TimerBackoffAlgorithm::kFixed: + return base_duration; + case TimerBackoffAlgorithm::kExponential: { + int32_t duration_ms = *base_duration; + + while (expiration_count > 0 && duration_ms < *Timer::kMaxTimerDuration) { + duration_ms *= 2; + --expiration_count; + } + + return DurationMs(std::min(duration_ms, *Timer::kMaxTimerDuration)); + } + } +} +} // namespace + +constexpr DurationMs Timer::kMaxTimerDuration; + +Timer::Timer(TimerID id, + absl::string_view name, + OnExpired on_expired, + UnregisterHandler unregister_handler, + std::unique_ptr timeout, + const TimerOptions& options) + : id_(id), + name_(name), + options_(options), + on_expired_(std::move(on_expired)), + unregister_handler_(std::move(unregister_handler)), + timeout_(std::move(timeout)), + duration_(options.duration) {} + +Timer::~Timer() { + Stop(); + unregister_handler_(); +} + +void Timer::Start() { + expiration_count_ = 0; + if (!is_running()) { + is_running_ = true; + generation_ = TimerGeneration(*generation_ + 1); + timeout_->Start(duration_, MakeTimeoutId(id_, generation_)); + } else { + // Timer was running - stop and restart it, to make it expire in `duration_` + // from now. + generation_ = TimerGeneration(*generation_ + 1); + timeout_->Restart(duration_, MakeTimeoutId(id_, generation_)); + } +} + +void Timer::Stop() { + if (is_running()) { + timeout_->Stop(); + expiration_count_ = 0; + is_running_ = false; + } +} + +void Timer::Trigger(TimerGeneration generation) { + if (is_running_ && generation == generation_) { + ++expiration_count_; + is_running_ = false; + if (options_.max_restarts < 0 || + expiration_count_ <= options_.max_restarts) { + // The timer should still be running after this triggers. Start a new + // timer. Note that it might be very quickly restarted again, if the + // `on_expired_` callback returns a new duration. + is_running_ = true; + DurationMs duration = GetBackoffDuration(options_.backoff_algorithm, + duration_, expiration_count_); + generation_ = TimerGeneration(*generation_ + 1); + timeout_->Start(duration, MakeTimeoutId(id_, generation_)); + } + + absl::optional new_duration = on_expired_(); + if (new_duration.has_value() && new_duration != duration_) { + duration_ = new_duration.value(); + if (is_running_) { + // Restart it with new duration. + timeout_->Stop(); + + DurationMs duration = GetBackoffDuration(options_.backoff_algorithm, + duration_, expiration_count_); + generation_ = TimerGeneration(*generation_ + 1); + timeout_->Start(duration, MakeTimeoutId(id_, generation_)); + } + } + } +} + +void TimerManager::HandleTimeout(TimeoutID timeout_id) { + TimerID timer_id(*timeout_id >> 32); + TimerGeneration generation(*timeout_id); + auto it = timers_.find(timer_id); + if (it != timers_.end()) { + it->second->Trigger(generation); + } +} + +std::unique_ptr TimerManager::CreateTimer(absl::string_view name, + Timer::OnExpired on_expired, + const TimerOptions& options) { + next_id_ = TimerID(*next_id_ + 1); + TimerID id = next_id_; + // This would overflow after 4 billion timers created, which in SCTP would be + // after 800 million reconnections on a single socket. Ensure this will never + // happen. + RTC_CHECK_NE(*id, std::numeric_limits::max()); + auto timer = absl::WrapUnique(new Timer( + id, name, std::move(on_expired), [this, id]() { timers_.erase(id); }, + create_timeout_(), options)); + timers_[id] = timer.get(); + return timer; +} + +} // namespace dcsctp diff --git a/net/dcsctp/timer/timer.h b/net/dcsctp/timer/timer.h new file mode 100644 index 0000000000..bf923ea4ca --- /dev/null +++ b/net/dcsctp/timer/timer.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TIMER_TIMER_H_ +#define NET_DCSCTP_TIMER_TIMER_H_ + +#include + +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "net/dcsctp/public/strong_alias.h" +#include "net/dcsctp/public/timeout.h" + +namespace dcsctp { + +using TimerID = StrongAlias; +using TimerGeneration = StrongAlias; + +enum class TimerBackoffAlgorithm { + // The base duration will be used for any restart. + kFixed, + // An exponential backoff is used for restarts, with a 2x multiplier, meaning + // that every restart will use a duration that is twice as long as the + // previous. + kExponential, +}; + +struct TimerOptions { + explicit TimerOptions(DurationMs duration) + : TimerOptions(duration, TimerBackoffAlgorithm::kExponential) {} + TimerOptions(DurationMs duration, TimerBackoffAlgorithm backoff_algorithm) + : TimerOptions(duration, backoff_algorithm, -1) {} + TimerOptions(DurationMs duration, + TimerBackoffAlgorithm backoff_algorithm, + int max_restarts) + : duration(duration), + backoff_algorithm(backoff_algorithm), + max_restarts(max_restarts) {} + + // The initial timer duration. Can be overridden with `set_duration`. + const DurationMs duration; + // If the duration should be increased (using exponential backoff) when it is + // restarted. If not set, the same duration will be used. + const TimerBackoffAlgorithm backoff_algorithm; + // The maximum number of times that the timer will be automatically restarted. + const int max_restarts; +}; + +// A high-level timer (in contrast to the low-level `Timeout` class). +// +// Timers are started and can be stopped or restarted. When a timer expires, +// the provided `on_expired` callback will be triggered. A timer is +// automatically restarted, as long as the number of restarts is below the +// configurable `max_restarts` parameter. The `is_running` property can be +// queried to know if it's still running after having expired. +// +// When a timer is restarted, it will use a configurable `backoff_algorithm` to +// possibly adjust the duration of the next expiry. It is also possible to +// return a new base duration (which is the duration before it's adjusted by the +// backoff algorithm). +class Timer { + public: + // The maximum timer duration - one day. + static constexpr DurationMs kMaxTimerDuration = DurationMs(24 * 3600 * 1000); + + // When expired, the timer handler can optionally return a new duration which + // will be set as `duration` and used as base duration when the timer is + // restarted and as input to the backoff algorithm. + using OnExpired = std::function()>; + + // TimerManager will have pointers to these instances, so they must not move. + Timer(const Timer&) = delete; + Timer& operator=(const Timer&) = delete; + + ~Timer(); + + // Starts the timer if it's stopped or restarts the timer if it's already + // running. The `expiration_count` will be reset. + void Start(); + + // Stops the timer. This can also be called when the timer is already stopped. + // The `expiration_count` will be reset. + void Stop(); + + // Sets the base duration. The actual timer duration may be larger depending + // on the backoff algorithm. + void set_duration(DurationMs duration) { + duration_ = std::min(duration, kMaxTimerDuration); + } + + // Retrieves the base duration. The actual timer duration may be larger + // depending on the backoff algorithm. + DurationMs duration() const { return duration_; } + + // Returns the number of times the timer has expired. + int expiration_count() const { return expiration_count_; } + + // Returns the timer's options. + const TimerOptions& options() const { return options_; } + + // Returns the name of the timer. + absl::string_view name() const { return name_; } + + // Indicates if this timer is currently running. + bool is_running() const { return is_running_; } + + private: + friend class TimerManager; + using UnregisterHandler = std::function; + Timer(TimerID id, + absl::string_view name, + OnExpired on_expired, + UnregisterHandler unregister, + std::unique_ptr timeout, + const TimerOptions& options); + + // Called by TimerManager. Will trigger the callback and increment + // `expiration_count`. The timer will automatically be restarted at the + // duration as decided by the backoff algorithm, unless the + // `TimerOptions::max_restarts` has been reached and then it will be stopped + // and `is_running()` will return false. + void Trigger(TimerGeneration generation); + + const TimerID id_; + const std::string name_; + const TimerOptions options_; + const OnExpired on_expired_; + const UnregisterHandler unregister_handler_; + const std::unique_ptr timeout_; + + DurationMs duration_; + + // Increased on each start, and is matched on Trigger, to avoid races. And by + // race, meaning that a timeout - which may be evaluated/expired on a + // different thread while this thread has stopped that timer already. Note + // that the entire socket is not thread-safe, so `TimerManager::HandleTimeout` + // is never executed concurrently with any timer starting/stopping. + // + // This will wrap around after 4 billion timer restarts, and if it wraps + // around, it would just trigger _this_ timer in advance (but it's hard to + // restart it 4 billion times within its duration). + TimerGeneration generation_ = TimerGeneration(0); + bool is_running_ = false; + // Incremented each time time has expired and reset when stopped or restarted. + int expiration_count_ = 0; +}; + +// Creates and manages timers. +class TimerManager { + public: + explicit TimerManager( + std::function()> create_timeout) + : create_timeout_(std::move(create_timeout)) {} + + // Creates a timer with name `name` that will expire (when started) after + // `options.duration` and call `on_expired`. There are more `options` that + // affects the behavior. Note that timers are created initially stopped. + std::unique_ptr CreateTimer(absl::string_view name, + Timer::OnExpired on_expired, + const TimerOptions& options); + + void HandleTimeout(TimeoutID timeout_id); + + private: + const std::function()> create_timeout_; + std::unordered_map timers_; + TimerID next_id_ = TimerID(0); +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_TIMER_TIMER_H_ diff --git a/net/dcsctp/timer/timer_test.cc b/net/dcsctp/timer/timer_test.cc new file mode 100644 index 0000000000..a403bb6b4b --- /dev/null +++ b/net/dcsctp/timer/timer_test.cc @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/timer/timer.h" + +#include + +#include "absl/types/optional.h" +#include "net/dcsctp/public/timeout.h" +#include "net/dcsctp/timer/fake_timeout.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::Return; + +class TimerTest : public testing::Test { + protected: + TimerTest() + : timeout_manager_([this]() { return now_; }), + manager_([this]() { return timeout_manager_.CreateTimeout(); }) { + ON_CALL(on_expired_, Call).WillByDefault(Return(absl::nullopt)); + } + + void AdvanceTimeAndRunTimers(DurationMs duration) { + now_ = now_ + duration; + + for (;;) { + absl::optional timeout_id = + timeout_manager_.GetNextExpiredTimeout(); + if (!timeout_id.has_value()) { + break; + } + manager_.HandleTimeout(*timeout_id); + } + } + + TimeMs now_ = TimeMs(0); + FakeTimeoutManager timeout_manager_; + TimerManager manager_; + testing::MockFunction()> on_expired_; +}; + +TEST_F(TimerTest, TimerIsInitiallyStopped) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed)); + + EXPECT_FALSE(t1->is_running()); +} + +TEST_F(TimerTest, TimerExpiresAtGivenTime) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed)); + + EXPECT_CALL(on_expired_, Call).Times(0); + t1->Start(); + EXPECT_TRUE(t1->is_running()); + + AdvanceTimeAndRunTimers(DurationMs(4000)); + + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); +} + +TEST_F(TimerTest, TimerReschedulesAfterExpiredWithFixedBackoff) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed)); + + EXPECT_CALL(on_expired_, Call).Times(0); + t1->Start(); + EXPECT_EQ(t1->expiration_count(), 0); + + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Fire first time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_TRUE(t1->is_running()); + EXPECT_EQ(t1->expiration_count(), 1); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Second time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_TRUE(t1->is_running()); + EXPECT_EQ(t1->expiration_count(), 2); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Third time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_TRUE(t1->is_running()); + EXPECT_EQ(t1->expiration_count(), 3); +} + +TEST_F(TimerTest, TimerWithNoRestarts) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed, + /*max_restart=*/0)); + + EXPECT_CALL(on_expired_, Call).Times(0); + t1->Start(); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Fire first time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + + EXPECT_FALSE(t1->is_running()); + + // Second time - shouldn't fire + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(5000)); + EXPECT_FALSE(t1->is_running()); +} + +TEST_F(TimerTest, TimerWithOneRestart) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed, + /*max_restart=*/1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + t1->Start(); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Fire first time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_TRUE(t1->is_running()); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Second time - max restart limit reached. + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_FALSE(t1->is_running()); + + // Third time - should not fire. + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(5000)); + EXPECT_FALSE(t1->is_running()); +} + +TEST_F(TimerTest, TimerWithTwoRestart) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed, + /*max_restart=*/2)); + + EXPECT_CALL(on_expired_, Call).Times(0); + t1->Start(); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Fire first time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_TRUE(t1->is_running()); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Second time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_TRUE(t1->is_running()); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Third time + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_FALSE(t1->is_running()); +} + +TEST_F(TimerTest, TimerWithExponentialBackoff) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kExponential)); + + t1->Start(); + + // Fire first time at 5 seconds + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(5000)); + + // Second time at 5*2^1 = 10 seconds later. + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(9000)); + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + + // Third time at 5*2^2 = 20 seconds later. + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(19000)); + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + + // Fourth time at 5*2^3 = 40 seconds later. + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(39000)); + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); +} + +TEST_F(TimerTest, StartTimerWillStopAndStart) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kExponential)); + + t1->Start(); + + AdvanceTimeAndRunTimers(DurationMs(3000)); + + t1->Start(); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(2000)); + + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(3000)); +} + +TEST_F(TimerTest, ExpirationCounterWillResetIfStopped) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kExponential)); + + t1->Start(); + + // Fire first time at 5 seconds + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(5000)); + EXPECT_EQ(t1->expiration_count(), 1); + + // Second time at 5*2^1 = 10 seconds later. + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(9000)); + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_EQ(t1->expiration_count(), 2); + + t1->Start(); + EXPECT_EQ(t1->expiration_count(), 0); + + // Third time at 5*2^0 = 5 seconds later. + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_EQ(t1->expiration_count(), 1); +} + +TEST_F(TimerTest, StopTimerWillMakeItNotExpire) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kExponential)); + + t1->Start(); + EXPECT_TRUE(t1->is_running()); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4000)); + t1->Stop(); + EXPECT_FALSE(t1->is_running()); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(1000)); +} + +TEST_F(TimerTest, ReturningNewDurationWhenExpired) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(5000), TimerBackoffAlgorithm::kFixed)); + + EXPECT_CALL(on_expired_, Call).Times(0); + t1->Start(); + EXPECT_EQ(t1->duration(), DurationMs(5000)); + + AdvanceTimeAndRunTimers(DurationMs(4000)); + + // Fire first time + EXPECT_CALL(on_expired_, Call).WillOnce(Return(DurationMs(2000))); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_EQ(t1->duration(), DurationMs(2000)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(1000)); + + // Second time + EXPECT_CALL(on_expired_, Call).WillOnce(Return(DurationMs(10000))); + AdvanceTimeAndRunTimers(DurationMs(1000)); + EXPECT_EQ(t1->duration(), DurationMs(10000)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(9000)); + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000)); +} + +TEST_F(TimerTest, TimersHaveMaximumDuration) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(1000), TimerBackoffAlgorithm::kExponential)); + + t1->set_duration(DurationMs(2 * *Timer::kMaxTimerDuration)); + EXPECT_EQ(t1->duration(), Timer::kMaxTimerDuration); +} + +TEST_F(TimerTest, TimersHaveMaximumBackoffDuration) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(1000), TimerBackoffAlgorithm::kExponential)); + + t1->Start(); + + int max_exponent = static_cast(log2(*Timer::kMaxTimerDuration / 1000)); + for (int i = 0; i < max_exponent; ++i) { + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1000 * (1 << i))); + } + + // Reached the maximum duration. + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(Timer::kMaxTimerDuration); + + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(Timer::kMaxTimerDuration); + + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(Timer::kMaxTimerDuration); + + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(Timer::kMaxTimerDuration); +} + +TEST_F(TimerTest, TimerCanBeStartedFromWithinExpirationHandler) { + std::unique_ptr t1 = manager_.CreateTimer( + "t1", on_expired_.AsStdFunction(), + TimerOptions(DurationMs(1000), TimerBackoffAlgorithm::kFixed)); + + t1->Start(); + + // Start a timer, but don't return any new duration in callback. + EXPECT_CALL(on_expired_, Call).WillOnce([&]() { + EXPECT_TRUE(t1->is_running()); + t1->set_duration(DurationMs(5000)); + t1->Start(); + return absl::nullopt; + }); + AdvanceTimeAndRunTimers(DurationMs(1000)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(4999)); + + // Start a timer, and return any new duration in callback. + EXPECT_CALL(on_expired_, Call).WillOnce([&]() { + EXPECT_TRUE(t1->is_running()); + t1->set_duration(DurationMs(5000)); + t1->Start(); + return absl::make_optional(DurationMs(8000)); + }); + AdvanceTimeAndRunTimers(DurationMs(1)); + + EXPECT_CALL(on_expired_, Call).Times(0); + AdvanceTimeAndRunTimers(DurationMs(7999)); + + EXPECT_CALL(on_expired_, Call).Times(1); + AdvanceTimeAndRunTimers(DurationMs(1)); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/tx/BUILD.gn b/net/dcsctp/tx/BUILD.gn new file mode 100644 index 0000000000..2f0b27afc6 --- /dev/null +++ b/net/dcsctp/tx/BUILD.gn @@ -0,0 +1,141 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("send_queue") { + deps = [ + "../../../api:array_view", + "../common:internal_types", + "../packet:chunk", + "../packet:data", + "../public:types", + ] + sources = [ "send_queue.h" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("rr_send_queue") { + deps = [ + ":send_queue", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:pair_hash", + "../packet:data", + "../public:socket", + "../public:types", + ] + sources = [ + "rr_send_queue.cc", + "rr_send_queue.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("retransmission_error_counter") { + deps = [ + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../public:types", + ] + sources = [ + "retransmission_error_counter.cc", + "retransmission_error_counter.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] +} + +rtc_library("retransmission_timeout") { + deps = [ + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../public:types", + ] + sources = [ + "retransmission_timeout.cc", + "retransmission_timeout.h", + ] +} + +rtc_library("retransmission_queue") { + deps = [ + ":retransmission_timeout", + ":send_queue", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:rtc_base_approved", + "../common:math", + "../common:pair_hash", + "../common:sequence_numbers", + "../common:str_join", + "../packet:chunk", + "../packet:data", + "../public:types", + "../timer", + ] + sources = [ + "retransmission_queue.cc", + "retransmission_queue.h", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +if (rtc_include_tests) { + rtc_source_set("mock_send_queue") { + testonly = true + deps = [ + ":send_queue", + "../../../api:array_view", + "../../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + sources = [ "mock_send_queue.h" ] + } + + rtc_library("dcsctp_tx_unittests") { + testonly = true + + deps = [ + ":mock_send_queue", + ":retransmission_error_counter", + ":retransmission_queue", + ":retransmission_timeout", + ":rr_send_queue", + ":send_queue", + "../../../api:array_view", + "../../../rtc_base:checks", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + "../packet:chunk", + "../packet:data", + "../public:socket", + "../public:types", + "../testing:data_generator", + "../testing:testing_macros", + "../timer", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + sources = [ + "retransmission_error_counter_test.cc", + "retransmission_queue_test.cc", + "retransmission_timeout_test.cc", + "rr_send_queue_test.cc", + ] + } +} diff --git a/net/dcsctp/tx/mock_send_queue.h b/net/dcsctp/tx/mock_send_queue.h new file mode 100644 index 0000000000..0cf64583ae --- /dev/null +++ b/net/dcsctp/tx/mock_send_queue.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_MOCK_SEND_QUEUE_H_ +#define NET_DCSCTP_TX_MOCK_SEND_QUEUE_H_ + +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/tx/send_queue.h" +#include "test/gmock.h" + +namespace dcsctp { + +class MockSendQueue : public SendQueue { + public: + MockSendQueue() { + ON_CALL(*this, Produce).WillByDefault([](TimeMs now, size_t max_size) { + return absl::nullopt; + }); + } + + MOCK_METHOD(absl::optional, + Produce, + (TimeMs now, size_t max_size), + (override)); + MOCK_METHOD(bool, + Discard, + (IsUnordered unordered, StreamID stream_id, MID message_id), + (override)); + MOCK_METHOD(void, + PrepareResetStreams, + (rtc::ArrayView streams), + (override)); + MOCK_METHOD(bool, CanResetStreams, (), (const, override)); + MOCK_METHOD(void, CommitResetStreams, (), (override)); + MOCK_METHOD(void, RollbackResetStreams, (), (override)); + MOCK_METHOD(void, Reset, (), (override)); + MOCK_METHOD(size_t, buffered_amount, (StreamID stream_id), (const, override)); + MOCK_METHOD(size_t, total_buffered_amount, (), (const, override)); + MOCK_METHOD(size_t, + buffered_amount_low_threshold, + (StreamID stream_id), + (const, override)); + MOCK_METHOD(void, + SetBufferedAmountLowThreshold, + (StreamID stream_id, size_t bytes), + (override)); +}; + +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_MOCK_SEND_QUEUE_H_ diff --git a/net/dcsctp/tx/retransmission_error_counter.cc b/net/dcsctp/tx/retransmission_error_counter.cc new file mode 100644 index 0000000000..111b6efe96 --- /dev/null +++ b/net/dcsctp/tx/retransmission_error_counter.cc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/retransmission_error_counter.h" + +#include "absl/strings/string_view.h" +#include "rtc_base/logging.h" + +namespace dcsctp { +bool RetransmissionErrorCounter::Increment(absl::string_view reason) { + ++counter_; + if (counter_ > limit_) { + RTC_DLOG(LS_INFO) << log_prefix_ << reason + << ", too many retransmissions, counter=" << counter_; + return false; + } + + RTC_DLOG(LS_VERBOSE) << log_prefix_ << reason << ", new counter=" << counter_ + << ", max=" << limit_; + return true; +} + +void RetransmissionErrorCounter::Clear() { + if (counter_ > 0) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "recovered from counter=" << counter_; + counter_ = 0; + } +} + +} // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_error_counter.h b/net/dcsctp/tx/retransmission_error_counter.h new file mode 100644 index 0000000000..bb8d1f754d --- /dev/null +++ b/net/dcsctp/tx/retransmission_error_counter.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_RETRANSMISSION_ERROR_COUNTER_H_ +#define NET_DCSCTP_TX_RETRANSMISSION_ERROR_COUNTER_H_ + +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "net/dcsctp/public/dcsctp_options.h" + +namespace dcsctp { + +// The RetransmissionErrorCounter is a simple counter with a limit, and when +// the limit is exceeded, the counter is exhausted and the connection will +// be closed. It's incremented on retransmission errors, such as the T3-RTX +// timer expiring, but also missing heartbeats and stream reset requests. +class RetransmissionErrorCounter { + public: + RetransmissionErrorCounter(absl::string_view log_prefix, + const DcSctpOptions& options) + : log_prefix_(std::string(log_prefix) + "rtx-errors: "), + limit_(options.max_retransmissions) {} + + // Increments the retransmission timer. If the maximum error count has been + // reached, `false` will be returned. + bool Increment(absl::string_view reason); + bool IsExhausted() const { return counter_ > limit_; } + + // Clears the retransmission errors. + void Clear(); + + // Returns its current value + int value() const { return counter_; } + + private: + const std::string log_prefix_; + const int limit_; + int counter_ = 0; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_RETRANSMISSION_ERROR_COUNTER_H_ diff --git a/net/dcsctp/tx/retransmission_error_counter_test.cc b/net/dcsctp/tx/retransmission_error_counter_test.cc new file mode 100644 index 0000000000..61ee82926d --- /dev/null +++ b/net/dcsctp/tx/retransmission_error_counter_test.cc @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/retransmission_error_counter.h" + +#include "net/dcsctp/public/dcsctp_options.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +TEST(RetransmissionErrorCounterTest, HasInitialValue) { + DcSctpOptions options; + RetransmissionErrorCounter counter("log: ", options); + EXPECT_EQ(counter.value(), 0); +} + +TEST(RetransmissionErrorCounterTest, ReturnsFalseAtMaximumValue) { + DcSctpOptions options; + options.max_retransmissions = 5; + RetransmissionErrorCounter counter("log: ", options); + EXPECT_TRUE(counter.Increment("test")); // 1 + EXPECT_TRUE(counter.Increment("test")); // 2 + EXPECT_TRUE(counter.Increment("test")); // 3 + EXPECT_TRUE(counter.Increment("test")); // 4 + EXPECT_TRUE(counter.Increment("test")); // 5 + EXPECT_FALSE(counter.Increment("test")); // Too many retransmissions +} + +TEST(RetransmissionErrorCounterTest, CanHandleZeroRetransmission) { + DcSctpOptions options; + options.max_retransmissions = 0; + RetransmissionErrorCounter counter("log: ", options); + EXPECT_FALSE(counter.Increment("test")); // One is too many. +} + +TEST(RetransmissionErrorCounterTest, IsExhaustedAtMaximum) { + DcSctpOptions options; + options.max_retransmissions = 3; + RetransmissionErrorCounter counter("log: ", options); + EXPECT_TRUE(counter.Increment("test")); // 1 + EXPECT_FALSE(counter.IsExhausted()); + EXPECT_TRUE(counter.Increment("test")); // 2 + EXPECT_FALSE(counter.IsExhausted()); + EXPECT_TRUE(counter.Increment("test")); // 3 + EXPECT_FALSE(counter.IsExhausted()); + EXPECT_FALSE(counter.Increment("test")); // Too many retransmissions + EXPECT_TRUE(counter.IsExhausted()); + EXPECT_FALSE(counter.Increment("test")); // One after too many + EXPECT_TRUE(counter.IsExhausted()); +} + +TEST(RetransmissionErrorCounterTest, ClearingCounter) { + DcSctpOptions options; + options.max_retransmissions = 3; + RetransmissionErrorCounter counter("log: ", options); + EXPECT_TRUE(counter.Increment("test")); // 1 + EXPECT_TRUE(counter.Increment("test")); // 2 + counter.Clear(); + EXPECT_TRUE(counter.Increment("test")); // 1 + EXPECT_TRUE(counter.Increment("test")); // 2 + EXPECT_TRUE(counter.Increment("test")); // 3 + EXPECT_FALSE(counter.IsExhausted()); + EXPECT_FALSE(counter.Increment("test")); // Too many retransmissions + EXPECT_TRUE(counter.IsExhausted()); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_queue.cc b/net/dcsctp/tx/retransmission_queue.cc new file mode 100644 index 0000000000..51bb65a30c --- /dev/null +++ b/net/dcsctp/tx/retransmission_queue.cc @@ -0,0 +1,909 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/retransmission_queue.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/math.h" +#include "net/dcsctp/common/pair_hash.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/common/str_join.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/chunk/idata_chunk.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" + +namespace dcsctp { +namespace { + +// The number of times a packet must be NACKed before it's retransmitted. +// See https://tools.ietf.org/html/rfc4960#section-7.2.4 +constexpr size_t kNumberOfNacksForRetransmission = 3; +} // namespace + +RetransmissionQueue::RetransmissionQueue( + absl::string_view log_prefix, + TSN initial_tsn, + size_t a_rwnd, + SendQueue& send_queue, + std::function on_new_rtt, + std::function on_clear_retransmission_counter, + Timer& t3_rtx, + const DcSctpOptions& options, + bool supports_partial_reliability, + bool use_message_interleaving) + : options_(options), + partial_reliability_(supports_partial_reliability), + log_prefix_(std::string(log_prefix) + "tx: "), + data_chunk_header_size_(use_message_interleaving + ? IDataChunk::kHeaderSize + : DataChunk::kHeaderSize), + on_new_rtt_(std::move(on_new_rtt)), + on_clear_retransmission_counter_( + std::move(on_clear_retransmission_counter)), + t3_rtx_(t3_rtx), + cwnd_(options_.cwnd_mtus_initial * options_.mtu), + rwnd_(a_rwnd), + // https://tools.ietf.org/html/rfc4960#section-7.2.1 + // "The initial value of ssthresh MAY be arbitrarily high (for + // example, implementations MAY use the size of the receiver advertised + // window)."" + ssthresh_(rwnd_), + next_tsn_(tsn_unwrapper_.Unwrap(initial_tsn)), + last_cumulative_tsn_ack_(tsn_unwrapper_.Unwrap(TSN(*initial_tsn - 1))), + send_queue_(send_queue) {} + +bool RetransmissionQueue::IsConsistent() const { + size_t actual_outstanding_bytes = 0; + + std::set actual_to_be_retransmitted; + for (const auto& elem : outstanding_data_) { + if (elem.second.is_outstanding()) { + actual_outstanding_bytes += GetSerializedChunkSize(elem.second.data()); + } + + if (elem.second.should_be_retransmitted()) { + actual_to_be_retransmitted.insert(elem.first); + } + } + + return actual_outstanding_bytes == outstanding_bytes_ && + actual_to_be_retransmitted == to_be_retransmitted_; +} + +// Returns how large a chunk will be, serialized, carrying the data +size_t RetransmissionQueue::GetSerializedChunkSize(const Data& data) const { + return RoundUpTo4(data_chunk_header_size_ + data.size()); +} + +void RetransmissionQueue::RemoveAcked(UnwrappedTSN cumulative_tsn_ack, + AckInfo& ack_info) { + auto first_unacked = outstanding_data_.upper_bound(cumulative_tsn_ack); + + for (auto it = outstanding_data_.begin(); it != first_unacked; ++it) { + ack_info.bytes_acked_by_cumulative_tsn_ack += it->second.data().size(); + ack_info.acked_tsns.push_back(it->first.Wrap()); + if (it->second.is_outstanding()) { + outstanding_bytes_ -= GetSerializedChunkSize(it->second.data()); + } else if (it->second.should_be_retransmitted()) { + to_be_retransmitted_.erase(it->first); + } + } + + outstanding_data_.erase(outstanding_data_.begin(), first_unacked); +} + +void RetransmissionQueue::AckGapBlocks( + UnwrappedTSN cumulative_tsn_ack, + rtc::ArrayView gap_ack_blocks, + AckInfo& ack_info) { + // Mark all non-gaps as ACKED (but they can't be removed) as (from RFC) + // "SCTP considers the information carried in the Gap Ack Blocks in the + // SACK chunk as advisory.". Note that when NR-SACK is supported, this can be + // handled differently. + + for (auto& block : gap_ack_blocks) { + auto start = outstanding_data_.lower_bound( + UnwrappedTSN::AddTo(cumulative_tsn_ack, block.start)); + auto end = outstanding_data_.upper_bound( + UnwrappedTSN::AddTo(cumulative_tsn_ack, block.end)); + for (auto iter = start; iter != end; ++iter) { + if (!iter->second.is_acked()) { + ack_info.bytes_acked_by_new_gap_ack_blocks += + iter->second.data().size(); + if (iter->second.is_outstanding()) { + outstanding_bytes_ -= GetSerializedChunkSize(iter->second.data()); + } + if (iter->second.should_be_retransmitted()) { + to_be_retransmitted_.erase(iter->first); + } + iter->second.Ack(); + ack_info.highest_tsn_acked = + std::max(ack_info.highest_tsn_acked, iter->first); + ack_info.acked_tsns.push_back(iter->first.Wrap()); + } + } + } +} + +void RetransmissionQueue::NackBetweenAckBlocks( + UnwrappedTSN cumulative_tsn_ack, + rtc::ArrayView gap_ack_blocks, + AckInfo& ack_info) { + // Mark everything between the blocks as NACKED/TO_BE_RETRANSMITTED. + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "Mark the DATA chunk(s) with three miss indications for retransmission." + // "For each incoming SACK, miss indications are incremented only for + // missing TSNs prior to the highest TSN newly acknowledged in the SACK." + // + // What this means is that only when there is a increasing stream of data + // received and there are new packets seen (since last time), packets that are + // in-flight and between gaps should be nacked. This means that SCTP relies on + // the T3-RTX-timer to re-send packets otherwise. + UnwrappedTSN max_tsn_to_nack = ack_info.highest_tsn_acked; + if (is_in_fast_recovery() && cumulative_tsn_ack > last_cumulative_tsn_ack_) { + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "If an endpoint is in Fast Recovery and a SACK arrives that advances + // the Cumulative TSN Ack Point, the miss indications are incremented for + // all TSNs reported missing in the SACK." + max_tsn_to_nack = UnwrappedTSN::AddTo( + cumulative_tsn_ack, + gap_ack_blocks.empty() ? 0 : gap_ack_blocks.rbegin()->end); + } + + UnwrappedTSN prev_block_last_acked = cumulative_tsn_ack; + for (auto& block : gap_ack_blocks) { + UnwrappedTSN cur_block_first_acked = + UnwrappedTSN::AddTo(cumulative_tsn_ack, block.start); + for (auto iter = outstanding_data_.upper_bound(prev_block_last_acked); + iter != outstanding_data_.lower_bound(cur_block_first_acked); ++iter) { + if (iter->first <= max_tsn_to_nack) { + ack_info.has_packet_loss = + NackItem(iter->first, iter->second, /*retransmit_now=*/false); + } + } + prev_block_last_acked = UnwrappedTSN::AddTo(cumulative_tsn_ack, block.end); + } + + // Note that packets are not NACKED which are above the highest gap-ack-block + // (or above the cumulative ack TSN if no gap-ack-blocks) as only packets + // up until the highest_tsn_acked (see above) should be considered when + // NACKing. +} + +void RetransmissionQueue::MaybeExitFastRecovery( + UnwrappedTSN cumulative_tsn_ack) { + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "When a SACK acknowledges all TSNs up to and including this [fast + // recovery] exit point, Fast Recovery is exited." + if (fast_recovery_exit_tsn_.has_value() && + cumulative_tsn_ack >= *fast_recovery_exit_tsn_) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "exit_point=" << *fast_recovery_exit_tsn_->Wrap() + << " reached - exiting fast recovery"; + fast_recovery_exit_tsn_ = absl::nullopt; + } +} + +void RetransmissionQueue::HandleIncreasedCumulativeTsnAck( + size_t outstanding_bytes, + size_t total_bytes_acked) { + // Allow some margin for classifying as fully utilized, due to e.g. that too + // small packets (less than kMinimumFragmentedPayload) are not sent + + // overhead. + bool is_fully_utilized = outstanding_bytes + options_.mtu >= cwnd_; + size_t old_cwnd = cwnd_; + if (phase() == CongestionAlgorithmPhase::kSlowStart) { + if (is_fully_utilized && !is_in_fast_recovery()) { + // https://tools.ietf.org/html/rfc4960#section-7.2.1 + // "Only when these three conditions are met can the cwnd be + // increased; otherwise, the cwnd MUST not be increased. If these + // conditions are met, then cwnd MUST be increased by, at most, the + // lesser of 1) the total size of the previously outstanding DATA + // chunk(s) acknowledged, and 2) the destination's path MTU." + if (options_.slow_start_tcp_style) { + cwnd_ += std::min(total_bytes_acked, cwnd_); + } else { + cwnd_ += std::min(total_bytes_acked, options_.mtu); + } + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "SS increase cwnd=" << cwnd_ + << " (" << old_cwnd << ")"; + } + } else if (phase() == CongestionAlgorithmPhase::kCongestionAvoidance) { + // https://tools.ietf.org/html/rfc4960#section-7.2.2 + // "Whenever cwnd is greater than ssthresh, upon each SACK arrival + // that advances the Cumulative TSN Ack Point, increase + // partial_bytes_acked by the total number of bytes of all new chunks + // acknowledged in that SACK including chunks acknowledged by the new + // Cumulative TSN Ack and by Gap Ack Blocks." + size_t old_pba = partial_bytes_acked_; + partial_bytes_acked_ += total_bytes_acked; + + if (partial_bytes_acked_ >= cwnd_ && is_fully_utilized) { + // https://tools.ietf.org/html/rfc4960#section-7.2.2 + // "When partial_bytes_acked is equal to or greater than cwnd and + // before the arrival of the SACK the sender had cwnd or more bytes of + // data outstanding (i.e., before arrival of the SACK, flightsize was + // greater than or equal to cwnd), increase cwnd by MTU, and reset + // partial_bytes_acked to (partial_bytes_acked - cwnd)." + cwnd_ += options_.mtu; + partial_bytes_acked_ -= cwnd_; + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "CA increase cwnd=" << cwnd_ + << " (" << old_cwnd << ") ssthresh=" << ssthresh_ + << ", pba=" << partial_bytes_acked_ << " (" + << old_pba << ")"; + } else { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "CA unchanged cwnd=" << cwnd_ + << " (" << old_cwnd << ") ssthresh=" << ssthresh_ + << ", pba=" << partial_bytes_acked_ << " (" + << old_pba << ")"; + } + } +} + +void RetransmissionQueue::HandlePacketLoss(UnwrappedTSN highest_tsn_acked) { + if (!is_in_fast_recovery()) { + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "If not in Fast Recovery, adjust the ssthresh and cwnd of the + // destination address(es) to which the missing DATA chunks were last + // sent, according to the formula described in Section 7.2.3." + size_t old_cwnd = cwnd_; + size_t old_pba = partial_bytes_acked_; + ssthresh_ = std::max(cwnd_ / 2, options_.cwnd_mtus_min * options_.mtu); + cwnd_ = ssthresh_; + partial_bytes_acked_ = 0; + + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "packet loss detected (not fast recovery). cwnd=" + << cwnd_ << " (" << old_cwnd + << "), ssthresh=" << ssthresh_ + << ", pba=" << partial_bytes_acked_ << " (" << old_pba + << ")"; + + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "If not in Fast Recovery, enter Fast Recovery and mark the highest + // outstanding TSN as the Fast Recovery exit point." + fast_recovery_exit_tsn_ = outstanding_data_.empty() + ? last_cumulative_tsn_ack_ + : outstanding_data_.rbegin()->first; + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "fast recovery initiated with exit_point=" + << *fast_recovery_exit_tsn_->Wrap(); + } else { + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "While in Fast Recovery, the ssthresh and cwnd SHOULD NOT change for + // any destinations due to a subsequent Fast Recovery event (i.e., one + // SHOULD NOT reduce the cwnd further due to a subsequent Fast Retransmit)." + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "packet loss detected (fast recovery). No changes."; + } +} + +void RetransmissionQueue::UpdateReceiverWindow(uint32_t a_rwnd) { + rwnd_ = outstanding_bytes_ >= a_rwnd ? 0 : a_rwnd - outstanding_bytes_; +} + +void RetransmissionQueue::StartT3RtxTimerIfOutstandingData() { + // Note: Can't use `outstanding_bytes()` as that one doesn't count chunks to + // be retransmitted. + if (outstanding_data_.empty()) { + // https://tools.ietf.org/html/rfc4960#section-6.3.2 + // "Whenever all outstanding data sent to an address have been + // acknowledged, turn off the T3-rtx timer of that address. + // Note: Already stopped in `StopT3RtxTimerOnIncreasedCumulativeTsnAck`." + } else { + // https://tools.ietf.org/html/rfc4960#section-6.3.2 + // "Whenever a SACK is received that acknowledges the DATA chunk + // with the earliest outstanding TSN for that address, restart the T3-rtx + // timer for that address with its current RTO (if there is still + // outstanding data on that address)." + // "Whenever a SACK is received missing a TSN that was previously + // acknowledged via a Gap Ack Block, start the T3-rtx for the destination + // address to which the DATA chunk was originally transmitted if it is not + // already running." + if (!t3_rtx_.is_running()) { + t3_rtx_.Start(); + } + } +} + +bool RetransmissionQueue::IsSackValid(const SackChunk& sack) const { + // https://tools.ietf.org/html/rfc4960#section-6.2.1 + // "If Cumulative TSN Ack is less than the Cumulative TSN Ack Point, + // then drop the SACK. Since Cumulative TSN Ack is monotonically increasing, + // a SACK whose Cumulative TSN Ack is less than the Cumulative TSN Ack Point + // indicates an out-of- order SACK." + // + // Note: Important not to drop SACKs with identical TSN to that previously + // received, as the gap ack blocks or dup tsn fields may have changed. + UnwrappedTSN cumulative_tsn_ack = + tsn_unwrapper_.PeekUnwrap(sack.cumulative_tsn_ack()); + if (cumulative_tsn_ack < last_cumulative_tsn_ack_) { + // https://tools.ietf.org/html/rfc4960#section-6.2.1 + // "If Cumulative TSN Ack is less than the Cumulative TSN Ack Point, + // then drop the SACK. Since Cumulative TSN Ack is monotonically + // increasing, a SACK whose Cumulative TSN Ack is less than the Cumulative + // TSN Ack Point indicates an out-of- order SACK." + return false; + } else if (outstanding_data_.empty() && + cumulative_tsn_ack > last_cumulative_tsn_ack_) { + // No in-flight data and cum-tsn-ack above what was last ACKed - not valid. + return false; + } else if (!outstanding_data_.empty() && + cumulative_tsn_ack > outstanding_data_.rbegin()->first) { + // There is in-flight data, but the cum-tsn-ack is beyond that - not valid. + return false; + } + return true; +} + +bool RetransmissionQueue::HandleSack(TimeMs now, const SackChunk& sack) { + if (!IsSackValid(sack)) { + return false; + } + + size_t old_outstanding_bytes = outstanding_bytes_; + size_t old_rwnd = rwnd_; + UnwrappedTSN cumulative_tsn_ack = + tsn_unwrapper_.Unwrap(sack.cumulative_tsn_ack()); + + if (sack.gap_ack_blocks().empty()) { + UpdateRTT(now, cumulative_tsn_ack); + } + + AckInfo ack_info(cumulative_tsn_ack); + // Erase all items up to cumulative_tsn_ack. + RemoveAcked(cumulative_tsn_ack, ack_info); + + // ACK packets reported in the gap ack blocks + AckGapBlocks(cumulative_tsn_ack, sack.gap_ack_blocks(), ack_info); + + // NACK and possibly mark for retransmit chunks that weren't acked. + NackBetweenAckBlocks(cumulative_tsn_ack, sack.gap_ack_blocks(), ack_info); + + // Update of outstanding_data_ is now done. Congestion control remains. + UpdateReceiverWindow(sack.a_rwnd()); + + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Received SACK. Acked TSN: " + << StrJoin(ack_info.acked_tsns, ",", + [](rtc::StringBuilder& sb, TSN tsn) { + sb << *tsn; + }) + << ", cum_tsn_ack=" << *cumulative_tsn_ack.Wrap() << " (" + << *last_cumulative_tsn_ack_.Wrap() + << "), outstanding_bytes=" << outstanding_bytes_ << " (" + << old_outstanding_bytes << "), rwnd=" << rwnd_ << " (" + << old_rwnd << ")"; + + MaybeExitFastRecovery(cumulative_tsn_ack); + + if (cumulative_tsn_ack > last_cumulative_tsn_ack_) { + // https://tools.ietf.org/html/rfc4960#section-6.3.2 + // "Whenever a SACK is received that acknowledges the DATA chunk + // with the earliest outstanding TSN for that address, restart the T3-rtx + // timer for that address with its current RTO (if there is still + // outstanding data on that address)." + // Note: It may be started again in a bit further down. + t3_rtx_.Stop(); + + HandleIncreasedCumulativeTsnAck( + old_outstanding_bytes, ack_info.bytes_acked_by_cumulative_tsn_ack + + ack_info.bytes_acked_by_new_gap_ack_blocks); + } + + if (ack_info.has_packet_loss) { + is_in_fast_retransmit_ = true; + HandlePacketLoss(ack_info.highest_tsn_acked); + } + + // https://tools.ietf.org/html/rfc4960#section-8.2 + // "When an outstanding TSN is acknowledged [...] the endpoint shall clear + // the error counter ..." + if (ack_info.bytes_acked_by_cumulative_tsn_ack > 0 || + ack_info.bytes_acked_by_new_gap_ack_blocks > 0) { + on_clear_retransmission_counter_(); + } + + last_cumulative_tsn_ack_ = cumulative_tsn_ack; + StartT3RtxTimerIfOutstandingData(); + RTC_DCHECK(IsConsistent()); + return true; +} + +void RetransmissionQueue::UpdateRTT(TimeMs now, + UnwrappedTSN cumulative_tsn_ack) { + // RTT updating is flawed in SCTP, as explained in e.g. Pedersen J, Griwodz C, + // Halvorsen P (2006) Considerations of SCTP retransmission delays for thin + // streams. + // Due to delayed acknowledgement, the SACK may be sent much later which + // increases the calculated RTT. + // TODO(boivie): Consider occasionally sending DATA chunks with I-bit set and + // use only those packets for measurement. + + auto it = outstanding_data_.find(cumulative_tsn_ack); + if (it != outstanding_data_.end()) { + if (!it->second.has_been_retransmitted()) { + // https://tools.ietf.org/html/rfc4960#section-6.3.1 + // "Karn's algorithm: RTT measurements MUST NOT be made using + // packets that were retransmitted (and thus for which it is ambiguous + // whether the reply was for the first instance of the chunk or for a + // later instance)" + DurationMs rtt = now - it->second.time_sent(); + on_new_rtt_(rtt); + } + } +} + +void RetransmissionQueue::HandleT3RtxTimerExpiry() { + size_t old_cwnd = cwnd_; + size_t old_outstanding_bytes = outstanding_bytes_; + // https://tools.ietf.org/html/rfc4960#section-6.3.3 + // "For the destination address for which the timer expires, adjust + // its ssthresh with rules defined in Section 7.2.3 and set the cwnd <- MTU." + ssthresh_ = std::max(cwnd_ / 2, 4 * options_.mtu); + cwnd_ = 1 * options_.mtu; + + // https://tools.ietf.org/html/rfc4960#section-6.3.3 + // "For the destination address for which the timer expires, set RTO + // <- RTO * 2 ("back off the timer"). The maximum value discussed in rule C7 + // above (RTO.max) may be used to provide an upper bound to this doubling + // operation." + + // Already done by the Timer implementation. + + // https://tools.ietf.org/html/rfc4960#section-6.3.3 + // "Determine how many of the earliest (i.e., lowest TSN) outstanding + // DATA chunks for the address for which the T3-rtx has expired will fit into + // a single packet" + + // https://tools.ietf.org/html/rfc4960#section-6.3.3 + // "Note: Any DATA chunks that were sent to the address for which the + // T3-rtx timer expired but did not fit in one MTU (rule E3 above) should be + // marked for retransmission and sent as soon as cwnd allows (normally, when a + // SACK arrives)." + for (auto& elem : outstanding_data_) { + UnwrappedTSN tsn = elem.first; + TxData& item = elem.second; + if (!item.is_acked()) { + NackItem(tsn, item, /*retransmit_now=*/true); + } + } + + // https://tools.ietf.org/html/rfc4960#section-6.3.3 + // "Start the retransmission timer T3-rtx on the destination address + // to which the retransmission is sent, if rule R1 above indicates to do so." + + // Already done by the Timer implementation. + + RTC_DLOG(LS_INFO) << log_prefix_ << "t3-rtx expired. new cwnd=" << cwnd_ + << " (" << old_cwnd << "), ssthresh=" << ssthresh_ + << ", outstanding_bytes " << outstanding_bytes_ << " (" + << old_outstanding_bytes << ")"; + RTC_DCHECK(IsConsistent()); +} + +bool RetransmissionQueue::NackItem(UnwrappedTSN tsn, + TxData& item, + bool retransmit_now) { + if (item.is_outstanding()) { + outstanding_bytes_ -= GetSerializedChunkSize(item.data()); + } + + switch (item.Nack(retransmit_now)) { + case TxData::NackAction::kNothing: + return false; + case TxData::NackAction::kRetransmit: + to_be_retransmitted_.insert(tsn); + RTC_DLOG(LS_VERBOSE) << log_prefix_ << *tsn.Wrap() + << " marked for retransmission"; + break; + case TxData::NackAction::kAbandon: + AbandonAllFor(item); + break; + } + return true; +} + +std::vector> +RetransmissionQueue::GetChunksToBeRetransmitted(size_t max_size) { + std::vector> result; + + for (auto it = to_be_retransmitted_.begin(); + it != to_be_retransmitted_.end();) { + UnwrappedTSN tsn = *it; + auto elem = outstanding_data_.find(tsn); + RTC_DCHECK(elem != outstanding_data_.end()); + TxData& item = elem->second; + RTC_DCHECK(item.should_be_retransmitted()); + RTC_DCHECK(!item.is_outstanding()); + RTC_DCHECK(!item.is_abandoned()); + RTC_DCHECK(!item.is_acked()); + + size_t serialized_size = GetSerializedChunkSize(item.data()); + if (serialized_size <= max_size) { + item.Retransmit(); + result.emplace_back(tsn.Wrap(), item.data().Clone()); + max_size -= serialized_size; + outstanding_bytes_ += serialized_size; + it = to_be_retransmitted_.erase(it); + } else { + ++it; + } + // No point in continuing if the packet is full. + if (max_size <= data_chunk_header_size_) { + break; + } + } + + return result; +} + +std::vector> RetransmissionQueue::GetChunksToSend( + TimeMs now, + size_t bytes_remaining_in_packet) { + // Chunks are always padded to even divisible by four. + RTC_DCHECK(IsDivisibleBy4(bytes_remaining_in_packet)); + + std::vector> to_be_sent; + size_t old_outstanding_bytes = outstanding_bytes_; + size_t old_rwnd = rwnd_; + if (is_in_fast_retransmit()) { + // https://tools.ietf.org/html/rfc4960#section-7.2.4 + // "Determine how many of the earliest (i.e., lowest TSN) DATA chunks + // marked for retransmission will fit into a single packet ... Retransmit + // those K DATA chunks in a single packet. When a Fast Retransmit is being + // performed, the sender SHOULD ignore the value of cwnd and SHOULD NOT + // delay retransmission for this single packet." + is_in_fast_retransmit_ = false; + to_be_sent = GetChunksToBeRetransmitted(bytes_remaining_in_packet); + size_t to_be_sent_bytes = absl::c_accumulate( + to_be_sent, 0, [&](size_t r, const std::pair& d) { + return r + GetSerializedChunkSize(d.second); + }); + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "fast-retransmit: sending " + << to_be_sent.size() << " chunks, " << to_be_sent_bytes + << " bytes"; + } else { + // Normal sending. Calculate the bandwidth budget (how many bytes that is + // allowed to be sent), and fill that up first with chunks that are + // scheduled to be retransmitted. If there is still budget, send new chunks + // (which will have their TSN assigned here.) + size_t remaining_cwnd_bytes = + outstanding_bytes_ >= cwnd_ ? 0 : cwnd_ - outstanding_bytes_; + size_t max_bytes = RoundDownTo4(std::min( + std::min(bytes_remaining_in_packet, rwnd()), remaining_cwnd_bytes)); + + to_be_sent = GetChunksToBeRetransmitted(max_bytes); + max_bytes -= absl::c_accumulate( + to_be_sent, 0, [&](size_t r, const std::pair& d) { + return r + GetSerializedChunkSize(d.second); + }); + + while (max_bytes > data_chunk_header_size_) { + RTC_DCHECK(IsDivisibleBy4(max_bytes)); + absl::optional chunk_opt = + send_queue_.Produce(now, max_bytes - data_chunk_header_size_); + if (!chunk_opt.has_value()) { + break; + } + + UnwrappedTSN tsn = next_tsn_; + next_tsn_.Increment(); + + // All chunks are always padded to be even divisible by 4. + size_t chunk_size = GetSerializedChunkSize(chunk_opt->data); + max_bytes -= chunk_size; + outstanding_bytes_ += chunk_size; + rwnd_ -= chunk_size; + auto item_it = + outstanding_data_ + .emplace(tsn, + RetransmissionQueue::TxData( + chunk_opt->data.Clone(), + partial_reliability_ ? chunk_opt->max_retransmissions + : absl::nullopt, + now, + partial_reliability_ ? chunk_opt->expires_at + : absl::nullopt)) + .first; + + if (item_it->second.has_expired(now)) { + // No need to send it - it was expired when it was in the send + // queue. + RTC_DLOG(LS_VERBOSE) + << log_prefix_ << "Marking freshly produced chunk " + << *item_it->first.Wrap() << " and message " + << *item_it->second.data().message_id << " as expired"; + AbandonAllFor(item_it->second); + } else { + to_be_sent.emplace_back(tsn.Wrap(), std::move(chunk_opt->data)); + } + } + } + + if (!to_be_sent.empty()) { + // https://tools.ietf.org/html/rfc4960#section-6.3.2 + // "Every time a DATA chunk is sent to any address (including a + // retransmission), if the T3-rtx timer of that address is not running, + // start it running so that it will expire after the RTO of that address." + if (!t3_rtx_.is_running()) { + t3_rtx_.Start(); + } + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Sending TSN " + << StrJoin(to_be_sent, ",", + [&](rtc::StringBuilder& sb, + const std::pair& c) { + sb << *c.first; + }) + << " - " + << absl::c_accumulate( + to_be_sent, 0, + [&](size_t r, const std::pair& d) { + return r + GetSerializedChunkSize(d.second); + }) + << " bytes. outstanding_bytes=" << outstanding_bytes_ + << " (" << old_outstanding_bytes << "), cwnd=" << cwnd_ + << ", rwnd=" << rwnd_ << " (" << old_rwnd << ")"; + } + RTC_DCHECK(IsConsistent()); + return to_be_sent; +} + +std::vector> +RetransmissionQueue::GetChunkStatesForTesting() const { + std::vector> states; + states.emplace_back(last_cumulative_tsn_ack_.Wrap(), State::kAcked); + for (const auto& elem : outstanding_data_) { + State state; + if (elem.second.is_abandoned()) { + state = State::kAbandoned; + } else if (elem.second.should_be_retransmitted()) { + state = State::kToBeRetransmitted; + } else if (elem.second.is_acked()) { + state = State::kAcked; + } else if (elem.second.is_outstanding()) { + state = State::kInFlight; + } else { + state = State::kNacked; + } + + states.emplace_back(elem.first.Wrap(), state); + } + return states; +} + +bool RetransmissionQueue::ShouldSendForwardTsn(TimeMs now) { + if (!partial_reliability_) { + return false; + } + ExpireOutstandingChunks(now); + if (!outstanding_data_.empty()) { + auto it = outstanding_data_.begin(); + return it->first == last_cumulative_tsn_ack_.next_value() && + it->second.is_abandoned(); + } + RTC_DCHECK(IsConsistent()); + return false; +} + +void RetransmissionQueue::TxData::Ack() { + ack_state_ = AckState::kAcked; + should_be_retransmitted_ = false; +} + +RetransmissionQueue::TxData::NackAction RetransmissionQueue::TxData::Nack( + bool retransmit_now) { + ack_state_ = AckState::kNacked; + ++nack_count_; + if ((retransmit_now || nack_count_ >= kNumberOfNacksForRetransmission) && + !is_abandoned_) { + // Nacked enough times - it's considered lost. + if (!max_retransmissions_.has_value() || + num_retransmissions_ < max_retransmissions_) { + should_be_retransmitted_ = true; + return NackAction::kRetransmit; + } + Abandon(); + return NackAction::kAbandon; + } + return NackAction::kNothing; +} + +void RetransmissionQueue::TxData::Retransmit() { + ack_state_ = AckState::kUnacked; + should_be_retransmitted_ = false; + + nack_count_ = 0; + ++num_retransmissions_; +} + +void RetransmissionQueue::TxData::Abandon() { + is_abandoned_ = true; + should_be_retransmitted_ = false; +} + +bool RetransmissionQueue::TxData::has_expired(TimeMs now) const { + return expires_at_.has_value() && *expires_at_ <= now; +} + +void RetransmissionQueue::ExpireOutstandingChunks(TimeMs now) { + for (const auto& elem : outstanding_data_) { + UnwrappedTSN tsn = elem.first; + const TxData& item = elem.second; + + // Chunks that are nacked can be expired. Care should be taken not to expire + // unacked (in-flight) chunks as they might have been received, but the SACK + // is either delayed or in-flight and may be received later. + if (item.is_abandoned()) { + // Already abandoned. + } else if (item.is_nacked() && item.has_expired(now)) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Marking nacked chunk " + << *tsn.Wrap() << " and message " + << *item.data().message_id << " as expired"; + AbandonAllFor(item); + } else { + // A non-expired chunk. No need to iterate any further. + break; + } + } +} + +void RetransmissionQueue::AbandonAllFor( + const RetransmissionQueue::TxData& item) { + // Erase all remaining chunks from the producer, if any. + if (send_queue_.Discard(item.data().is_unordered, item.data().stream_id, + item.data().message_id)) { + // There were remaining chunks to be produced for this message. Since the + // receiver may have already received all chunks (up till now) for this + // message, we can't just FORWARD-TSN to the last fragment in this + // (abandoned) message and start sending a new message, as the receiver will + // then see a new message before the end of the previous one was seen (or + // skipped over). So create a new fragment, representing the end, that the + // received will never see as it is abandoned immediately and used as cum + // TSN in the sent FORWARD-TSN. + UnwrappedTSN tsn = next_tsn_; + next_tsn_.Increment(); + Data message_end(item.data().stream_id, item.data().ssn, + item.data().message_id, item.data().fsn, item.data().ppid, + std::vector(), Data::IsBeginning(false), + Data::IsEnd(true), item.data().is_unordered); + TxData& added_item = + outstanding_data_ + .emplace(tsn, RetransmissionQueue::TxData(std::move(message_end), + absl::nullopt, TimeMs(0), + absl::nullopt)) + .first->second; + // The added chunk shouldn't be included in `outstanding_bytes`, so set it + // as acked. + added_item.Ack(); + RTC_DLOG(LS_VERBOSE) << log_prefix_ + << "Adding unsent end placeholder for message at tsn=" + << *tsn.Wrap(); + } + for (auto& elem : outstanding_data_) { + UnwrappedTSN tsn = elem.first; + TxData& other = elem.second; + + if (!other.is_abandoned() && + other.data().stream_id == item.data().stream_id && + other.data().is_unordered == item.data().is_unordered && + other.data().message_id == item.data().message_id) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Marking chunk " << *tsn.Wrap() + << " as abandoned"; + if (other.should_be_retransmitted()) { + to_be_retransmitted_.erase(tsn); + } + other.Abandon(); + } + } +} + +ForwardTsnChunk RetransmissionQueue::CreateForwardTsn() const { + std::unordered_map + skipped_per_ordered_stream; + UnwrappedTSN new_cumulative_ack = last_cumulative_tsn_ack_; + + for (const auto& elem : outstanding_data_) { + UnwrappedTSN tsn = elem.first; + const TxData& item = elem.second; + + if ((tsn != new_cumulative_ack.next_value()) || !item.is_abandoned()) { + break; + } + new_cumulative_ack = tsn; + if (!item.data().is_unordered && + item.data().ssn > skipped_per_ordered_stream[item.data().stream_id]) { + skipped_per_ordered_stream[item.data().stream_id] = item.data().ssn; + } + } + + std::vector skipped_streams; + skipped_streams.reserve(skipped_per_ordered_stream.size()); + for (const auto& elem : skipped_per_ordered_stream) { + skipped_streams.emplace_back(elem.first, elem.second); + } + return ForwardTsnChunk(new_cumulative_ack.Wrap(), std::move(skipped_streams)); +} + +IForwardTsnChunk RetransmissionQueue::CreateIForwardTsn() const { + std::unordered_map, MID, UnorderedStreamHash> + skipped_per_stream; + UnwrappedTSN new_cumulative_ack = last_cumulative_tsn_ack_; + + for (const auto& elem : outstanding_data_) { + UnwrappedTSN tsn = elem.first; + const TxData& item = elem.second; + + if ((tsn != new_cumulative_ack.next_value()) || !item.is_abandoned()) { + break; + } + new_cumulative_ack = tsn; + std::pair stream_id = + std::make_pair(item.data().is_unordered, item.data().stream_id); + + if (item.data().message_id > skipped_per_stream[stream_id]) { + skipped_per_stream[stream_id] = item.data().message_id; + } + } + + std::vector skipped_streams; + skipped_streams.reserve(skipped_per_stream.size()); + for (const auto& elem : skipped_per_stream) { + const std::pair& stream = elem.first; + MID message_id = elem.second; + skipped_streams.emplace_back(stream.first, stream.second, message_id); + } + + return IForwardTsnChunk(new_cumulative_ack.Wrap(), + std::move(skipped_streams)); +} + +void RetransmissionQueue::PrepareResetStreams( + rtc::ArrayView streams) { + // TODO(boivie): These calls are now only affecting the send queue. The + // packet buffer can also change behavior - for example draining the chunk + // producer and eagerly assign TSNs so that an "Outgoing SSN Reset Request" + // can be sent quickly, with a known `sender_last_assigned_tsn`. + send_queue_.PrepareResetStreams(streams); +} +bool RetransmissionQueue::CanResetStreams() const { + return send_queue_.CanResetStreams(); +} +void RetransmissionQueue::CommitResetStreams() { + send_queue_.CommitResetStreams(); +} +void RetransmissionQueue::RollbackResetStreams() { + send_queue_.RollbackResetStreams(); +} + +} // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_queue.h b/net/dcsctp/tx/retransmission_queue.h new file mode 100644 index 0000000000..c5a6a04db8 --- /dev/null +++ b/net/dcsctp/tx/retransmission_queue.h @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_RETRANSMISSION_QUEUE_H_ +#define NET_DCSCTP_TX_RETRANSMISSION_QUEUE_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/sequence_numbers.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/retransmission_timeout.h" +#include "net/dcsctp/tx/send_queue.h" + +namespace dcsctp { + +// The RetransmissionQueue manages all DATA/I-DATA chunks that are in-flight and +// schedules them to be retransmitted if necessary. Chunks are retransmitted +// when they have been lost for a number of consecutive SACKs, or when the +// retransmission timer, `t3_rtx` expires. +// +// As congestion control is tightly connected with the state of transmitted +// packets, that's also managed here to limit the amount of data that is +// in-flight (sent, but not yet acknowledged). +class RetransmissionQueue { + public: + static constexpr size_t kMinimumFragmentedPayload = 10; + // State for DATA chunks (message fragments) in the queue - used in tests. + enum class State { + // The chunk has been sent but not received yet (from the sender's point of + // view, as no SACK has been received yet that reference this chunk). + kInFlight, + // A SACK has been received which explicitly marked this chunk as missing - + // it's now NACKED and may be retransmitted if NACKED enough times. + kNacked, + // A chunk that will be retransmitted when possible. + kToBeRetransmitted, + // A SACK has been received which explicitly marked this chunk as received. + kAcked, + // A chunk whose message has expired or has been retransmitted too many + // times (RFC3758). It will not be retransmitted anymore. + kAbandoned, + }; + + // Creates a RetransmissionQueue which will send data using `initial_tsn` as + // the first TSN to use for sent fragments. It will poll data from + // `send_queue` and call `on_send_queue_empty` when it is empty. When + // SACKs are received, it will estimate the RTT, and call `on_new_rtt`. When + // an outstanding chunk has been ACKed, it will call + // `on_clear_retransmission_counter` and will also use `t3_rtx`, which is the + // SCTP retransmission timer to manage retransmissions. + RetransmissionQueue(absl::string_view log_prefix, + TSN initial_tsn, + size_t a_rwnd, + SendQueue& send_queue, + std::function on_new_rtt, + std::function on_clear_retransmission_counter, + Timer& t3_rtx, + const DcSctpOptions& options, + bool supports_partial_reliability = true, + bool use_message_interleaving = false); + + // Handles a received SACK. Returns true if the `sack` was processed and + // false if it was discarded due to received out-of-order and not relevant. + bool HandleSack(TimeMs now, const SackChunk& sack); + + // Handles an expired retransmission timer. + void HandleT3RtxTimerExpiry(); + + // Returns a list of chunks to send that would fit in one SCTP packet with + // `bytes_remaining_in_packet` bytes available. This may be further limited by + // the congestion control windows. Note that `ShouldSendForwardTSN` must be + // called prior to this method, to abandon expired chunks, as this method will + // not expire any chunks. + std::vector> GetChunksToSend( + TimeMs now, + size_t bytes_remaining_in_packet); + + // Returns the internal state of all queued chunks. This is only used in + // unit-tests. + std::vector> GetChunkStatesForTesting() const; + + // Returns the next TSN that will be allocated for sent DATA chunks. + TSN next_tsn() const { return next_tsn_.Wrap(); } + + // Returns the size of the congestion window, in bytes. This is the number of + // bytes that may be in-flight. + size_t cwnd() const { return cwnd_; } + + // Overrides the current congestion window size. + void set_cwnd(size_t cwnd) { cwnd_ = cwnd; } + + // Returns the current receiver window size. + size_t rwnd() const { return rwnd_; } + + // Returns the number of bytes of packets that are in-flight. + size_t outstanding_bytes() const { return outstanding_bytes_; } + + // Given the current time `now`, it will evaluate if there are chunks that + // have expired and that need to be discarded. It returns true if a + // FORWARD-TSN should be sent. + bool ShouldSendForwardTsn(TimeMs now); + + // Creates a FORWARD-TSN chunk. + ForwardTsnChunk CreateForwardTsn() const; + + // Creates an I-FORWARD-TSN chunk. + IForwardTsnChunk CreateIForwardTsn() const; + + // See the SendQueue for a longer description of these methods related + // to stream resetting. + void PrepareResetStreams(rtc::ArrayView streams); + bool CanResetStreams() const; + void CommitResetStreams(); + void RollbackResetStreams(); + + private: + enum class CongestionAlgorithmPhase { + kSlowStart, + kCongestionAvoidance, + }; + + // A fragmented message's DATA chunk while in the retransmission queue, and + // its associated metadata. + class TxData { + public: + enum class NackAction { + kNothing, + kRetransmit, + kAbandon, + }; + + explicit TxData(Data data, + absl::optional max_retransmissions, + TimeMs time_sent, + absl::optional expires_at) + : max_retransmissions_(max_retransmissions), + time_sent_(time_sent), + expires_at_(expires_at), + data_(std::move(data)) {} + + TimeMs time_sent() const { return time_sent_; } + + const Data& data() const { return data_; } + + // Acks an item. + void Ack(); + + // Nacks an item. If it has been nacked enough times, or if `retransmit_now` + // is set, it might be marked for retransmission. If the item has reached + // its max retransmission value, it will instead be abandoned. The action + // performed is indicated as return value. + NackAction Nack(bool retransmit_now = false); + + // Prepares the item to be retransmitted. Sets it as outstanding and + // clears all nack counters. + void Retransmit(); + + // Marks this item as abandoned. + void Abandon(); + + bool is_outstanding() const { return ack_state_ == AckState::kUnacked; } + bool is_acked() const { return ack_state_ == AckState::kAcked; } + bool is_nacked() const { return ack_state_ == AckState::kNacked; } + bool is_abandoned() const { return is_abandoned_; } + + // Indicates if this chunk should be retransmitted. + bool should_be_retransmitted() const { return should_be_retransmitted_; } + // Indicates if this chunk has ever been retransmitted. + bool has_been_retransmitted() const { return num_retransmissions_ > 0; } + + // Given the current time, and the current state of this DATA chunk, it will + // indicate if it has expired (SCTP Partial Reliability Extension). + bool has_expired(TimeMs now) const; + + private: + enum class AckState { + kUnacked, + kAcked, + kNacked, + }; + // Indicates the presence of this chunk, if it's in flight (Unacked), has + // been received (Acked) or is lost (Nacked). + AckState ack_state_ = AckState::kUnacked; + // Indicates if this chunk has been abandoned, which is a terminal state. + bool is_abandoned_ = false; + // Indicates if this chunk should be retransmitted. + bool should_be_retransmitted_ = false; + + // The number of times the DATA chunk has been nacked (by having received a + // SACK which doesn't include it). Will be cleared on retransmissions. + size_t nack_count_ = 0; + // The number of times the DATA chunk has been retransmitted. + size_t num_retransmissions_ = 0; + // If the message was sent with a maximum number of retransmissions, this is + // set to that number. The value zero (0) means that it will never be + // retransmitted. + const absl::optional max_retransmissions_; + // When the packet was sent, and placed in this queue. + const TimeMs time_sent_; + // If the message was sent with an expiration time, this is set. + const absl::optional expires_at_; + // The actual data to send/retransmit. + Data data_; + }; + + // Contains variables scoped to a processing of an incoming SACK. + struct AckInfo { + explicit AckInfo(UnwrappedTSN cumulative_tsn_ack) + : highest_tsn_acked(cumulative_tsn_ack) {} + + // All TSNs that have been acked (for the first time) in this SACK. + std::vector acked_tsns; + + // Bytes acked by increasing cumulative_tsn_ack in this SACK + size_t bytes_acked_by_cumulative_tsn_ack = 0; + + // Bytes acked by gap blocks in this SACK. + size_t bytes_acked_by_new_gap_ack_blocks = 0; + + // Indicates if this SACK indicates that packet loss has occurred. Just + // because a packet is missing in the SACK doesn't necessarily mean that + // there is packet loss as that packet might be in-flight and received + // out-of-order. But when it has been reported missing consecutive times, it + // will eventually be considered "lost" and this will be set. + bool has_packet_loss = false; + + // Highest TSN Newly Acknowledged, an SCTP variable. + UnwrappedTSN highest_tsn_acked; + }; + + bool IsConsistent() const; + + // Returns how large a chunk will be, serialized, carrying the data + size_t GetSerializedChunkSize(const Data& data) const; + + // Indicates if the congestion control algorithm is in "fast recovery". + bool is_in_fast_recovery() const { + return fast_recovery_exit_tsn_.has_value(); + } + + // Indicates if the congestion control algorithm is in "fast retransmit". + bool is_in_fast_retransmit() const { return is_in_fast_retransmit_; } + + // Indicates if the provided SACK is valid given what has previously been + // received. If it returns false, the SACK is most likely a duplicate of + // something already seen, so this returning false doesn't necessarily mean + // that the SACK is illegal. + bool IsSackValid(const SackChunk& sack) const; + + // Given a `cumulative_tsn_ack` from an incoming SACK, will remove those items + // in the retransmission queue up until this value and will update `ack_info` + // by setting `bytes_acked_by_cumulative_tsn_ack` and `acked_tsns`. + void RemoveAcked(UnwrappedTSN cumulative_tsn_ack, AckInfo& ack_info); + + // Helper method to nack an item and perform the correct operations given the + // action indicated when nacking an item (e.g. retransmitting or abandoning). + // The return value indicate if an action was performed, meaning that packet + // loss was detected and acted upon. + bool NackItem(UnwrappedTSN cumulative_tsn_ack, + TxData& item, + bool retransmit_now); + + // Will mark the chunks covered by the `gap_ack_blocks` from an incoming SACK + // as "acked" and update `ack_info` by adding new TSNs to `added_tsns`. + void AckGapBlocks(UnwrappedTSN cumulative_tsn_ack, + rtc::ArrayView gap_ack_blocks, + AckInfo& ack_info); + + // Mark chunks reported as "missing", as "nacked" or "to be retransmitted" + // depending how many times this has happened. Only packets up until + // `ack_info.highest_tsn_acked` (highest TSN newly acknowledged) are + // nacked/retransmitted. The method will set `ack_info.has_packet_loss`. + void NackBetweenAckBlocks( + UnwrappedTSN cumulative_tsn_ack, + rtc::ArrayView gap_ack_blocks, + AckInfo& ack_info); + + // When a SACK chunk is received, this method will be called which _may_ call + // into the `RetransmissionTimeout` to update the RTO. + void UpdateRTT(TimeMs now, UnwrappedTSN cumulative_tsn_ack); + + // If the congestion control is in "fast recovery mode", this may be exited + // now. + void MaybeExitFastRecovery(UnwrappedTSN cumulative_tsn_ack); + + // If chunks have been ACKed, stop the retransmission timer. + void StopT3RtxTimerOnIncreasedCumulativeTsnAck( + UnwrappedTSN cumulative_tsn_ack); + + // Update the congestion control algorithm given as the cumulative ack TSN + // value has increased, as reported in an incoming SACK chunk. + void HandleIncreasedCumulativeTsnAck(size_t outstanding_bytes, + size_t total_bytes_acked); + // Update the congestion control algorithm, given as packet loss has been + // detected, as reported in an incoming SACK chunk. + void HandlePacketLoss(UnwrappedTSN highest_tsn_acked); + // Update the view of the receiver window size. + void UpdateReceiverWindow(uint32_t a_rwnd); + // Given `max_size` of space left in a packet, which chunks can be added to + // it? + std::vector> GetChunksToBeRetransmitted(size_t max_size); + // If there is data sent and not ACKED, ensure that the retransmission timer + // is running. + void StartT3RtxTimerIfOutstandingData(); + + // Given the current time `now_ms`, expire and abandon outstanding (sent at + // least once) chunks that have a limited lifetime. + void ExpireOutstandingChunks(TimeMs now); + // Given that a message fragment, `item` has been abandoned, abandon all other + // fragments that share the same message - both never-before-sent fragments + // that are still in the SendQueue and outstanding chunks. + void AbandonAllFor(const RetransmissionQueue::TxData& item); + + // Returns the current congestion control algorithm phase. + CongestionAlgorithmPhase phase() const { + return (cwnd_ <= ssthresh_) + ? CongestionAlgorithmPhase::kSlowStart + : CongestionAlgorithmPhase::kCongestionAvoidance; + } + + const DcSctpOptions options_; + // If the peer supports RFC3758 - SCTP Partial Reliability Extension. + const bool partial_reliability_; + const std::string log_prefix_; + // The size of the data chunk (DATA/I-DATA) header that is used. + const size_t data_chunk_header_size_; + // Called when a new RTT measurement has been done + const std::function on_new_rtt_; + // Called when a SACK has been seen that cleared the retransmission counter. + const std::function on_clear_retransmission_counter_; + // The retransmission counter. + Timer& t3_rtx_; + // Unwraps TSNs + UnwrappedTSN::Unwrapper tsn_unwrapper_; + + // Congestion Window. Number of bytes that may be in-flight (sent, not acked). + size_t cwnd_; + // Receive Window. Number of bytes available in the receiver's RX buffer. + size_t rwnd_; + // Slow Start Threshold. See RFC4960. + size_t ssthresh_; + // Partial Bytes Acked. See RFC4960. + size_t partial_bytes_acked_ = 0; + // If set, fast recovery is enabled until this TSN has been cumulative + // acked. + absl::optional fast_recovery_exit_tsn_ = absl::nullopt; + // Indicates if the congestion algorithm is in fast retransmit. + bool is_in_fast_retransmit_ = false; + + // Next TSN to used. + UnwrappedTSN next_tsn_; + // The last cumulative TSN ack number + UnwrappedTSN last_cumulative_tsn_ack_; + // The send queue. + SendQueue& send_queue_; + // All the outstanding data chunks that are in-flight and that have not been + // cumulative acked. Note that it also contains chunks that have been acked in + // gap ack blocks. + std::map outstanding_data_; + // Data chunks that are to be retransmitted. + std::set to_be_retransmitted_; + // The number of bytes that are in-flight (sent but not yet acked or nacked). + size_t outstanding_bytes_ = 0; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_RETRANSMISSION_QUEUE_H_ diff --git a/net/dcsctp/tx/retransmission_queue_test.cc b/net/dcsctp/tx/retransmission_queue_test.cc new file mode 100644 index 0000000000..4aa76d66e5 --- /dev/null +++ b/net/dcsctp/tx/retransmission_queue_test.cc @@ -0,0 +1,1182 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/retransmission_queue.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/chunk/data_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/forward_tsn_common.h" +#include "net/dcsctp/packet/chunk/iforward_tsn_chunk.h" +#include "net/dcsctp/packet/chunk/sack_chunk.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/testing/data_generator.h" +#include "net/dcsctp/timer/fake_timeout.h" +#include "net/dcsctp/timer/timer.h" +#include "net/dcsctp/tx/mock_send_queue.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::MockFunction; +using State = ::dcsctp::RetransmissionQueue::State; +using ::testing::_; +using ::testing::ElementsAre; +using ::testing::IsEmpty; +using ::testing::NiceMock; +using ::testing::Pair; +using ::testing::Return; +using ::testing::SizeIs; +using ::testing::UnorderedElementsAre; + +constexpr uint32_t kArwnd = 100000; +constexpr uint32_t kMaxMtu = 1191; + +class RetransmissionQueueTest : public testing::Test { + protected: + RetransmissionQueueTest() + : gen_(MID(42)), + timeout_manager_([this]() { return now_; }), + timer_manager_([this]() { return timeout_manager_.CreateTimeout(); }), + timer_(timer_manager_.CreateTimer( + "test/t3_rtx", + []() { return absl::nullopt; }, + TimerOptions(DurationMs(0)))) {} + + std::function CreateChunk() { + return [this](TimeMs now, size_t max_size) { + return SendQueue::DataToSend(gen_.Ordered({1, 2, 3, 4}, "BE")); + }; + } + + std::vector GetSentPacketTSNs(RetransmissionQueue& queue) { + std::vector tsns; + for (const auto& elem : queue.GetChunksToSend(now_, 10000)) { + tsns.push_back(elem.first); + } + return tsns; + } + + RetransmissionQueue CreateQueue(bool supports_partial_reliability = true, + bool use_message_interleaving = false) { + DcSctpOptions options; + options.mtu = kMaxMtu; + return RetransmissionQueue( + "", TSN(10), kArwnd, producer_, on_rtt_.AsStdFunction(), + on_clear_retransmission_counter_.AsStdFunction(), *timer_, options, + supports_partial_reliability, use_message_interleaving); + } + + DataGenerator gen_; + TimeMs now_ = TimeMs(0); + FakeTimeoutManager timeout_manager_; + TimerManager timer_manager_; + NiceMock> on_rtt_; + NiceMock> on_clear_retransmission_counter_; + NiceMock producer_; + std::unique_ptr timer_; +}; + +TEST_F(RetransmissionQueueTest, InitialAckedPrevTsn) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked))); +} + +TEST_F(RetransmissionQueueTest, SendOneChunk) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), testing::ElementsAre(TSN(10))); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); +} + +TEST_F(RetransmissionQueueTest, SendOneChunkAndAck) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), testing::ElementsAre(TSN(10))); + + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(10), State::kAcked))); +} + +TEST_F(RetransmissionQueueTest, SendThreeChunksAndAckTwo) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), + testing::ElementsAre(TSN(10), TSN(11), TSN(12))); + + queue.HandleSack(now_, SackChunk(TSN(11), kArwnd, {}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kInFlight))); +} + +TEST_F(RetransmissionQueueTest, AckWithGapBlocksFromRFC4960Section334) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), + testing::ElementsAre(TSN(10), TSN(11), TSN(12), TSN(13), TSN(14), + TSN(15), TSN(16), TSN(17))); + + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, + {SackChunk::GapAckBlock(2, 3), + SackChunk::GapAckBlock(5, 5)}, + {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kNacked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kNacked), // + Pair(TSN(17), State::kAcked))); +} + +TEST_F(RetransmissionQueueTest, ResendPacketsWhenNackedThreeTimes) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), + testing::ElementsAre(TSN(10), TSN(11), TSN(12), TSN(13), TSN(14), + TSN(15), TSN(16), TSN(17))); + + // Send more chunks, but leave some as gaps to force retransmission after + // three NACKs. + + // Send 18 + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + EXPECT_THAT(GetSentPacketTSNs(queue), testing::ElementsAre(TSN(18))); + + // Ack 12, 14-15, 17-18 + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, + {SackChunk::GapAckBlock(2, 3), + SackChunk::GapAckBlock(5, 6)}, + {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kNacked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kNacked), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked))); + + // Send 19 + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + EXPECT_THAT(GetSentPacketTSNs(queue), testing::ElementsAre(TSN(19))); + + // Ack 12, 14-15, 17-19 + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, + {SackChunk::GapAckBlock(2, 3), + SackChunk::GapAckBlock(5, 7)}, + {})); + + // Send 20 + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + EXPECT_THAT(GetSentPacketTSNs(queue), testing::ElementsAre(TSN(20))); + + // Ack 12, 14-15, 17-20 + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, + {SackChunk::GapAckBlock(2, 3), + SackChunk::GapAckBlock(5, 8)}, + {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kToBeRetransmitted), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kToBeRetransmitted), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked), // + Pair(TSN(19), State::kAcked), // + Pair(TSN(20), State::kAcked))); + + // This will trigger "fast retransmit" mode and only chunks 13 and 16 will be + // resent right now. The send queue will not even be queried. + EXPECT_CALL(producer_, Produce).Times(0); + + EXPECT_THAT(GetSentPacketTSNs(queue), testing::ElementsAre(TSN(13), TSN(16))); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kInFlight), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked), // + Pair(TSN(19), State::kAcked), // + Pair(TSN(20), State::kAcked))); +} + +TEST_F(RetransmissionQueueTest, CanOnlyProduceTwoPacketsButWantsToSendThree) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + return SendQueue::DataToSend(gen_.Ordered({1, 2, 3, 4}, "BE")); + }) + .WillOnce([this](TimeMs, size_t) { + return SendQueue::DataToSend(gen_.Ordered({1, 2, 3, 4}, "BE")); + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _))); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight))); +} + +TEST_F(RetransmissionQueueTest, RetransmitsOnT3Expiry) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + return SendQueue::DataToSend(gen_.Ordered({1, 2, 3, 4}, "BE")); + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); + + // Will force chunks to be retransmitted + queue.HandleT3RtxTimerExpiry(); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted))); + + std::vector> chunks_to_rtx = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_rtx, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); +} + +TEST_F(RetransmissionQueueTest, LimitedRetransmissionOnlyWithRfc3758Support) { + RetransmissionQueue queue = + CreateQueue(/*supports_partial_reliability=*/false); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); + + // Will force chunks to be retransmitted + queue.HandleT3RtxTimerExpiry(); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(0); + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); +} // namespace dcsctp + +TEST_F(RetransmissionQueueTest, LimitsRetransmissionsAsUdp) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); + + // Will force chunks to be retransmitted + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(1); + + queue.HandleT3RtxTimerExpiry(); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned))); + + std::vector> chunks_to_rtx = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_rtx, testing::IsEmpty()); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned))); +} + +TEST_F(RetransmissionQueueTest, LimitsRetransmissionsToThreeSends) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 3; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(0); + + // Retransmission 1 + queue.HandleT3RtxTimerExpiry(); + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), SizeIs(1)); + + // Retransmission 2 + queue.HandleT3RtxTimerExpiry(); + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), SizeIs(1)); + + // Retransmission 3 + queue.HandleT3RtxTimerExpiry(); + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), SizeIs(1)); + + // Retransmission 4 - not allowed. + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(1); + queue.HandleT3RtxTimerExpiry(); + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), IsEmpty()); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned))); +} + +TEST_F(RetransmissionQueueTest, RetransmitsWhenSendBufferIsFullT3Expiry) { + RetransmissionQueue queue = CreateQueue(); + static constexpr size_t kCwnd = 1200; + queue.set_cwnd(kCwnd); + EXPECT_EQ(queue.cwnd(), kCwnd); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + std::vector payload(1000); + EXPECT_CALL(producer_, Produce) + .WillOnce([this, payload](TimeMs, size_t) { + return SendQueue::DataToSend(gen_.Ordered(payload, "BE")); + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1500); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); + EXPECT_EQ(queue.outstanding_bytes(), payload.size() + DataChunk::kHeaderSize); + + // Will force chunks to be retransmitted + queue.HandleT3RtxTimerExpiry(); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted))); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + std::vector> chunks_to_rtx = + queue.GetChunksToSend(now_, 1500); + EXPECT_THAT(chunks_to_rtx, ElementsAre(Pair(TSN(10), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight))); + EXPECT_EQ(queue.outstanding_bytes(), payload.size() + DataChunk::kHeaderSize); +} + +TEST_F(RetransmissionQueueTest, ProducesValidForwardTsn) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({9, 10, 11, 12}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + // Send and ack first chunk (TSN 10) + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight))); + + // Chunk 10 is acked, but the remaining are lost + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(true)); + + queue.HandleT3RtxTimerExpiry(); + + // NOTE: The TSN=13 represents the end fragment. + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(10), State::kAcked), // + Pair(TSN(11), State::kAbandoned), // + Pair(TSN(12), State::kAbandoned), // + Pair(TSN(13), State::kAbandoned))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + ForwardTsnChunk forward_tsn = queue.CreateForwardTsn(); + EXPECT_EQ(forward_tsn.new_cumulative_tsn(), TSN(13)); + EXPECT_THAT(forward_tsn.skipped_streams(), + UnorderedElementsAre( + ForwardTsnChunk::SkippedStream(StreamID(1), SSN(42)))); +} + +TEST_F(RetransmissionQueueTest, ProducesValidForwardTsnWhenFullySent) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({9, 10, 11, 12}, "E")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + // Send and ack first chunk (TSN 10) + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight))); + + // Chunk 10 is acked, but the remaining are lost + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(false)); + + queue.HandleT3RtxTimerExpiry(); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(10), State::kAcked), // + Pair(TSN(11), State::kAbandoned), // + Pair(TSN(12), State::kAbandoned))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + ForwardTsnChunk forward_tsn = queue.CreateForwardTsn(); + EXPECT_EQ(forward_tsn.new_cumulative_tsn(), TSN(12)); + EXPECT_THAT(forward_tsn.skipped_streams(), + UnorderedElementsAre( + ForwardTsnChunk::SkippedStream(StreamID(1), SSN(42)))); +} + +TEST_F(RetransmissionQueueTest, ProducesValidIForwardTsn) { + RetransmissionQueue queue = CreateQueue(/*use_message_interleaving=*/true); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + DataGeneratorOptions opts; + opts.stream_id = StreamID(1); + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B", opts)); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + DataGeneratorOptions opts; + opts.stream_id = StreamID(2); + SendQueue::DataToSend dts(gen_.Unordered({1, 2, 3, 4}, "B", opts)); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + DataGeneratorOptions opts; + opts.stream_id = StreamID(3); + SendQueue::DataToSend dts(gen_.Ordered({9, 10, 11, 12}, "B", opts)); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + DataGeneratorOptions opts; + opts.stream_id = StreamID(4); + SendQueue::DataToSend dts(gen_.Ordered({13, 14, 15, 16}, "B", opts)); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _), Pair(TSN(13), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight))); + + // Chunk 13 is acked, but the remaining are lost + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(4, 4)}, {})); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kNacked), // + Pair(TSN(12), State::kNacked), // + Pair(TSN(13), State::kAcked))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(true)); + EXPECT_CALL(producer_, Discard(IsUnordered(true), StreamID(2), MID(42))) + .WillOnce(Return(true)); + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(3), MID(42))) + .WillOnce(Return(true)); + + queue.HandleT3RtxTimerExpiry(); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAbandoned), // + Pair(TSN(12), State::kAbandoned), // + Pair(TSN(13), State::kAcked), + // Representing end fragments of stream 1-3 + Pair(TSN(14), State::kAbandoned), // + Pair(TSN(15), State::kAbandoned), // + Pair(TSN(16), State::kAbandoned))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + IForwardTsnChunk forward_tsn1 = queue.CreateIForwardTsn(); + EXPECT_EQ(forward_tsn1.new_cumulative_tsn(), TSN(12)); + EXPECT_THAT( + forward_tsn1.skipped_streams(), + UnorderedElementsAre(IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(1), MID(42)), + IForwardTsnChunk::SkippedStream( + IsUnordered(true), StreamID(2), MID(42)), + IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(3), MID(42)))); + + // When TSN 13 is acked, the placeholder "end fragments" must be skipped as + // well. + + // A receiver is more likely to ack TSN 13, but do it incrementally. + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, {}, {})); + + EXPECT_CALL(producer_, Discard).Times(0); + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + queue.HandleSack(now_, SackChunk(TSN(13), kArwnd, {}, {})); + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAbandoned), // + Pair(TSN(15), State::kAbandoned), // + Pair(TSN(16), State::kAbandoned))); + + IForwardTsnChunk forward_tsn2 = queue.CreateIForwardTsn(); + EXPECT_EQ(forward_tsn2.new_cumulative_tsn(), TSN(16)); + EXPECT_THAT( + forward_tsn2.skipped_streams(), + UnorderedElementsAre(IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(1), MID(42)), + IForwardTsnChunk::SkippedStream( + IsUnordered(true), StreamID(2), MID(42)), + IForwardTsnChunk::SkippedStream( + IsUnordered(false), StreamID(3), MID(42)))); +} + +TEST_F(RetransmissionQueueTest, MeasureRTT) { + RetransmissionQueue queue = CreateQueue(/*use_message_interleaving=*/true); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + + now_ = now_ + DurationMs(123); + + EXPECT_CALL(on_rtt_, Call(DurationMs(123))).Times(1); + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); +} + +TEST_F(RetransmissionQueueTest, ValidateCumTsnAtRest) { + RetransmissionQueue queue = CreateQueue(/*use_message_interleaving=*/true); + + EXPECT_FALSE(queue.HandleSack(now_, SackChunk(TSN(8), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(9), kArwnd, {}, {}))); + EXPECT_FALSE(queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {}))); +} + +TEST_F(RetransmissionQueueTest, ValidateCumTsnAckOnInflightData) { + RetransmissionQueue queue = CreateQueue(); + + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), + testing::ElementsAre(TSN(10), TSN(11), TSN(12), TSN(13), TSN(14), + TSN(15), TSN(16), TSN(17))); + + EXPECT_FALSE(queue.HandleSack(now_, SackChunk(TSN(8), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(9), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(11), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(13), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(14), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(15), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(16), kArwnd, {}, {}))); + EXPECT_TRUE(queue.HandleSack(now_, SackChunk(TSN(17), kArwnd, {}, {}))); + EXPECT_FALSE(queue.HandleSack(now_, SackChunk(TSN(18), kArwnd, {}, {}))); +} + +TEST_F(RetransmissionQueueTest, HandleGapAckBlocksMatchingNoInflightData) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), + testing::ElementsAre(TSN(10), TSN(11), TSN(12), TSN(13), TSN(14), + TSN(15), TSN(16), TSN(17))); + + // Ack 9, 20-25. This is an invalid SACK, but should still be handled. + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(11, 16)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight), // + Pair(TSN(14), State::kInFlight), // + Pair(TSN(15), State::kInFlight), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kInFlight))); +} + +TEST_F(RetransmissionQueueTest, HandleInvalidGapAckBlocks) { + RetransmissionQueue queue = CreateQueue(); + + // Nothing produced - nothing in retransmission queue + + // Ack 9, 12-13 + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(3, 4)}, {})); + + // Gap ack blocks are just ignore. + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked))); +} + +TEST_F(RetransmissionQueueTest, GapAckBlocksDoNotMoveCumTsnAck) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_THAT(GetSentPacketTSNs(queue), + testing::ElementsAre(TSN(10), TSN(11), TSN(12), TSN(13), TSN(14), + TSN(15), TSN(16), TSN(17))); + + // Ack 9, 10-14. This is actually an invalid ACK as the first gap can't be + // adjacent to the cum-tsn-ack, but it's not strictly forbidden. However, the + // cum-tsn-ack should not move, as the gap-ack-blocks are just advisory. + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(1, 5)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAcked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kInFlight), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kInFlight))); +} + +TEST_F(RetransmissionQueueTest, StaysWithinAvailableSize) { + RetransmissionQueue queue = CreateQueue(); + + // See SctpPacketTest::ReturnsCorrectSpaceAvailableToStayWithinMTU for the + // magic numbers in this test. + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t size) { + EXPECT_EQ(size, 1176 - DataChunk::kHeaderSize); + + std::vector payload(183); + return SendQueue::DataToSend(gen_.Ordered(payload, "BE")); + }) + .WillOnce([this](TimeMs, size_t size) { + EXPECT_EQ(size, 976 - DataChunk::kHeaderSize); + + std::vector payload(957); + return SendQueue::DataToSend(gen_.Ordered(payload, "BE")); + }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1188 - 12); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _))); +} + +TEST_F(RetransmissionQueueTest, AccountsNackedAbandonedChunksAsNotOutstanding) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({9, 10, 11, 12}, "")); + dts.max_retransmissions = 0; + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + // Send and ack first chunk (TSN 10) + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight))); + EXPECT_EQ(queue.outstanding_bytes(), (16 + 4) * 3u); + + // Mark the message as lost. + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(1); + queue.HandleT3RtxTimerExpiry(); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAbandoned), // + Pair(TSN(12), State::kAbandoned))); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + // Now ACK those, one at a time. + queue.HandleSack(now_, SackChunk(TSN(10), kArwnd, {}, {})); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + queue.HandleSack(now_, SackChunk(TSN(11), kArwnd, {}, {})); + EXPECT_EQ(queue.outstanding_bytes(), 0u); + + queue.HandleSack(now_, SackChunk(TSN(12), kArwnd, {}, {})); + EXPECT_EQ(queue.outstanding_bytes(), 0u); +} + +TEST_F(RetransmissionQueueTest, ExpireFromSendQueueWhenPartiallySent) { + RetransmissionQueue queue = CreateQueue(); + DataGeneratorOptions options; + options.stream_id = StreamID(17); + options.message_id = MID(42); + TimeMs test_start = now_; + EXPECT_CALL(producer_, Produce) + .WillOnce([&](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "B", options)); + dts.expires_at = TimeMs(test_start + DurationMs(10)); + return dts; + }) + .WillOnce([&](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({5, 6, 7, 8}, "", options)); + dts.expires_at = TimeMs(test_start + DurationMs(10)); + return dts; + }) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 24); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(17), MID(42))) + .WillOnce(Return(true)); + now_ += DurationMs(100); + + EXPECT_THAT(queue.GetChunksToSend(now_, 24), IsEmpty()); + + EXPECT_THAT( + queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // Initial TSN + Pair(TSN(10), State::kAbandoned), // Produced + Pair(TSN(11), State::kAbandoned), // Produced and expired + Pair(TSN(12), State::kAbandoned))); // Placeholder end +} + +TEST_F(RetransmissionQueueTest, LimitsRetransmissionsOnlyWhenNackedThreeTimes) { + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 0; + return dts; + }) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), + Pair(TSN(12), _), Pair(TSN(13), _))); + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(0); + + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 2)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 3)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(false)); + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 4)}, {})); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); +} + +TEST_F(RetransmissionQueueTest, AbandonsRtxLimit2WhenNackedNineTimes) { + // This is a fairly long test. + RetransmissionQueue queue = CreateQueue(); + EXPECT_CALL(producer_, Produce) + .WillOnce([this](TimeMs, size_t) { + SendQueue::DataToSend dts(gen_.Ordered({1, 2, 3, 4}, "BE")); + dts.max_retransmissions = 2; + return dts; + }) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillOnce(CreateChunk()) + .WillRepeatedly([](TimeMs, size_t) { return absl::nullopt; }); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + std::vector> chunks_to_send = + queue.GetChunksToSend(now_, 1000); + EXPECT_THAT(chunks_to_send, + ElementsAre(Pair(TSN(10), _), Pair(TSN(11), _), Pair(TSN(12), _), + Pair(TSN(13), _), Pair(TSN(14), _), Pair(TSN(15), _), + Pair(TSN(16), _), Pair(TSN(17), _), Pair(TSN(18), _), + Pair(TSN(19), _))); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kInFlight), // + Pair(TSN(11), State::kInFlight), // + Pair(TSN(12), State::kInFlight), // + Pair(TSN(13), State::kInFlight), // + Pair(TSN(14), State::kInFlight), // + Pair(TSN(15), State::kInFlight), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kInFlight), // + Pair(TSN(18), State::kInFlight), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .Times(0); + + // Ack TSN [11 to 13] - three nacks for TSN(10), which will retransmit it. + for (int tsn = 11; tsn <= 13; ++tsn) { + queue.HandleSack( + now_, + SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, (tsn - 9))}, {})); + } + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kInFlight), // + Pair(TSN(15), State::kInFlight), // + Pair(TSN(16), State::kInFlight), // + Pair(TSN(17), State::kInFlight), // + Pair(TSN(18), State::kInFlight), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), ElementsAre(Pair(TSN(10), _))); + + // Ack TSN [14 to 16] - three more nacks - second and last retransmission. + for (int tsn = 14; tsn <= 16; ++tsn) { + queue.HandleSack( + now_, + SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, (tsn - 9))}, {})); + } + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kToBeRetransmitted), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kAcked), // + Pair(TSN(17), State::kInFlight), // + Pair(TSN(18), State::kInFlight), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), ElementsAre(Pair(TSN(10), _))); + + // Ack TSN [17 to 18] + for (int tsn = 17; tsn <= 18; ++tsn) { + queue.HandleSack( + now_, + SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, (tsn - 9))}, {})); + } + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kNacked), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kAcked), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked), // + Pair(TSN(19), State::kInFlight))); + + EXPECT_FALSE(queue.ShouldSendForwardTsn(now_)); + + // Ack TSN 19 - three more nacks for TSN 10, no more retransmissions. + EXPECT_CALL(producer_, Discard(IsUnordered(false), StreamID(1), MID(42))) + .WillOnce(Return(false)); + queue.HandleSack( + now_, SackChunk(TSN(9), kArwnd, {SackChunk::GapAckBlock(2, 10)}, {})); + + EXPECT_THAT(queue.GetChunksToSend(now_, 1000), IsEmpty()); + + EXPECT_THAT(queue.GetChunkStatesForTesting(), + ElementsAre(Pair(TSN(9), State::kAcked), // + Pair(TSN(10), State::kAbandoned), // + Pair(TSN(11), State::kAcked), // + Pair(TSN(12), State::kAcked), // + Pair(TSN(13), State::kAcked), // + Pair(TSN(14), State::kAcked), // + Pair(TSN(15), State::kAcked), // + Pair(TSN(16), State::kAcked), // + Pair(TSN(17), State::kAcked), // + Pair(TSN(18), State::kAcked), // + Pair(TSN(19), State::kAcked))); + + EXPECT_TRUE(queue.ShouldSendForwardTsn(now_)); +} // namespace + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_timeout.cc b/net/dcsctp/tx/retransmission_timeout.cc new file mode 100644 index 0000000000..7d545a07d0 --- /dev/null +++ b/net/dcsctp/tx/retransmission_timeout.cc @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/retransmission_timeout.h" + +#include +#include + +#include "net/dcsctp/public/dcsctp_options.h" + +namespace dcsctp { +namespace { +// https://tools.ietf.org/html/rfc4960#section-15 +constexpr double kRtoAlpha = 0.125; +constexpr double kRtoBeta = 0.25; +} // namespace + +RetransmissionTimeout::RetransmissionTimeout(const DcSctpOptions& options) + : min_rto_(*options.rto_min), + max_rto_(*options.rto_max), + max_rtt_(*options.rtt_max), + rto_(*options.rto_initial) {} + +void RetransmissionTimeout::ObserveRTT(DurationMs measured_rtt) { + double rtt = *measured_rtt; + + // Unrealistic values will be skipped. If a wrongly measured (or otherwise + // corrupt) value was processed, it could change the state in a way that would + // take a very long time to recover. + if (rtt < 0.0 || rtt > max_rtt_) { + return; + } + + if (first_measurement_) { + // https://tools.ietf.org/html/rfc4960#section-6.3.1 + // "When the first RTT measurement R is made, set + // SRTT <- R, + // RTTVAR <- R/2, and + // RTO <- SRTT + 4 * RTTVAR." + srtt_ = rtt; + rttvar_ = rtt * 0.5; + rto_ = srtt_ + 4 * rttvar_; + first_measurement_ = false; + } else { + // https://tools.ietf.org/html/rfc4960#section-6.3.1 + // "When a new RTT measurement R' is made, set + // RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| + // SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' + // RTO <- SRTT + 4 * RTTVAR." + rttvar_ = (1 - kRtoBeta) * rttvar_ + kRtoBeta * std::abs(srtt_ - rtt); + srtt_ = (1 - kRtoAlpha) * srtt_ + kRtoAlpha * rtt; + rto_ = srtt_ + 4 * rttvar_; + } + + // If the RTO becomes smaller or equal to RTT, expiration timers will be + // scheduled at the same time as packets are expected. Only happens in + // extremely stable RTTs, i.e. in simulations. + rto_ = std::fmax(rto_, rtt + 1); + + // Clamp RTO between min and max. + rto_ = std::fmin(std::fmax(rto_, min_rto_), max_rto_); +} +} // namespace dcsctp diff --git a/net/dcsctp/tx/retransmission_timeout.h b/net/dcsctp/tx/retransmission_timeout.h new file mode 100644 index 0000000000..0fac33e59c --- /dev/null +++ b/net/dcsctp/tx/retransmission_timeout.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_RETRANSMISSION_TIMEOUT_H_ +#define NET_DCSCTP_TX_RETRANSMISSION_TIMEOUT_H_ + +#include +#include + +#include "net/dcsctp/public/dcsctp_options.h" + +namespace dcsctp { + +// Manages updating of the Retransmission Timeout (RTO) SCTP variable, which is +// used directly as the base timeout for T3-RTX and for other timers, such as +// delayed ack. +// +// When a round-trip-time (RTT) is calculated (outside this class), `Observe` +// is called, which calculates the retransmission timeout (RTO) value. The RTO +// value will become larger if the RTT is high and/or the RTT values are varying +// a lot, which is an indicator of a bad connection. +class RetransmissionTimeout { + public: + explicit RetransmissionTimeout(const DcSctpOptions& options); + + // To be called when a RTT has been measured, to update the RTO value. + void ObserveRTT(DurationMs measured_rtt); + + // Returns the Retransmission Timeout (RTO) value, in milliseconds. + DurationMs rto() const { return DurationMs(rto_); } + + // Returns the smoothed RTT value, in milliseconds. + DurationMs srtt() const { return DurationMs(srtt_); } + + private: + // Note that all intermediate state calculation is done in the floating point + // domain, to maintain precision. + const double min_rto_; + const double max_rto_; + const double max_rtt_; + // If this is the first measurement + bool first_measurement_ = true; + // Smoothed Round-Trip Time + double srtt_ = 0.0; + // Round-Trip Time Variation + double rttvar_ = 0.0; + // Retransmission Timeout + double rto_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_RETRANSMISSION_TIMEOUT_H_ diff --git a/net/dcsctp/tx/retransmission_timeout_test.cc b/net/dcsctp/tx/retransmission_timeout_test.cc new file mode 100644 index 0000000000..3b2e3399fe --- /dev/null +++ b/net/dcsctp/tx/retransmission_timeout_test.cc @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/retransmission_timeout.h" + +#include "net/dcsctp/public/dcsctp_options.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { + +constexpr DurationMs kMaxRtt = DurationMs(8'000); +constexpr DurationMs kInitialRto = DurationMs(200); +constexpr DurationMs kMaxRto = DurationMs(800); +constexpr DurationMs kMinRto = DurationMs(120); + +DcSctpOptions MakeOptions() { + DcSctpOptions options; + options.rtt_max = kMaxRtt; + options.rto_initial = kInitialRto; + options.rto_max = kMaxRto; + options.rto_min = kMinRto; + return options; +} + +TEST(RetransmissionTimeoutTest, HasValidInitialRto) { + RetransmissionTimeout rto_(MakeOptions()); + EXPECT_EQ(rto_.rto(), kInitialRto); +} + +TEST(RetransmissionTimeoutTest, NegativeValuesDoNotAffectRTO) { + RetransmissionTimeout rto_(MakeOptions()); + // Initial negative value + rto_.ObserveRTT(DurationMs(-10)); + EXPECT_EQ(rto_.rto(), kInitialRto); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 372); + // Subsequent negative value + rto_.ObserveRTT(DurationMs(-10)); + EXPECT_EQ(*rto_.rto(), 372); +} + +TEST(RetransmissionTimeoutTest, TooLargeValuesDoNotAffectRTO) { + RetransmissionTimeout rto_(MakeOptions()); + // Initial too large value + rto_.ObserveRTT(kMaxRtt + DurationMs(100)); + EXPECT_EQ(rto_.rto(), kInitialRto); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 372); + // Subsequent too large value + rto_.ObserveRTT(kMaxRtt + DurationMs(100)); + EXPECT_EQ(*rto_.rto(), 372); +} + +TEST(RetransmissionTimeoutTest, WillNeverGoBelowMinimumRto) { + RetransmissionTimeout rto_(MakeOptions()); + for (int i = 0; i < 1000; ++i) { + rto_.ObserveRTT(DurationMs(1)); + } + EXPECT_GE(rto_.rto(), kMinRto); +} + +TEST(RetransmissionTimeoutTest, WillNeverGoAboveMaximumRto) { + RetransmissionTimeout rto_(MakeOptions()); + for (int i = 0; i < 1000; ++i) { + rto_.ObserveRTT(kMaxRtt - DurationMs(1)); + // Adding jitter, which would make it RTO be well above RTT. + rto_.ObserveRTT(kMaxRtt - DurationMs(100)); + } + EXPECT_LE(rto_.rto(), kMaxRto); +} + +TEST(RetransmissionTimeoutTest, CalculatesRtoForStableRtt) { + RetransmissionTimeout rto_(MakeOptions()); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 372); + rto_.ObserveRTT(DurationMs(128)); + EXPECT_EQ(*rto_.rto(), 314); + rto_.ObserveRTT(DurationMs(123)); + EXPECT_EQ(*rto_.rto(), 268); + rto_.ObserveRTT(DurationMs(125)); + EXPECT_EQ(*rto_.rto(), 233); + rto_.ObserveRTT(DurationMs(127)); + EXPECT_EQ(*rto_.rto(), 208); +} + +TEST(RetransmissionTimeoutTest, CalculatesRtoForUnstableRtt) { + RetransmissionTimeout rto_(MakeOptions()); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 372); + rto_.ObserveRTT(DurationMs(402)); + EXPECT_EQ(*rto_.rto(), 622); + rto_.ObserveRTT(DurationMs(728)); + EXPECT_EQ(*rto_.rto(), 800); + rto_.ObserveRTT(DurationMs(89)); + EXPECT_EQ(*rto_.rto(), 800); + rto_.ObserveRTT(DurationMs(126)); + EXPECT_EQ(*rto_.rto(), 800); +} + +TEST(RetransmissionTimeoutTest, WillStabilizeAfterAWhile) { + RetransmissionTimeout rto_(MakeOptions()); + rto_.ObserveRTT(DurationMs(124)); + rto_.ObserveRTT(DurationMs(402)); + rto_.ObserveRTT(DurationMs(728)); + rto_.ObserveRTT(DurationMs(89)); + rto_.ObserveRTT(DurationMs(126)); + EXPECT_EQ(*rto_.rto(), 800); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 800); + rto_.ObserveRTT(DurationMs(122)); + EXPECT_EQ(*rto_.rto(), 709); + rto_.ObserveRTT(DurationMs(123)); + EXPECT_EQ(*rto_.rto(), 630); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 561); + rto_.ObserveRTT(DurationMs(122)); + EXPECT_EQ(*rto_.rto(), 504); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 453); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 409); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 372); + rto_.ObserveRTT(DurationMs(124)); + EXPECT_EQ(*rto_.rto(), 339); +} + +TEST(RetransmissionTimeoutTest, WillAlwaysStayAboveRTT) { + // In simulations, it's quite common to have a very stable RTT, and having an + // RTO at the same value will cause issues as expiry timers will be scheduled + // to be expire exactly when a packet is supposed to arrive. The RTO must be + // larger than the RTT. In non-simulated environments, this is a non-issue as + // any jitter will increase the RTO. + RetransmissionTimeout rto_(MakeOptions()); + + for (int i = 0; i < 100; ++i) { + rto_.ObserveRTT(DurationMs(124)); + } + EXPECT_GT(*rto_.rto(), 124); +} + +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/tx/rr_send_queue.cc b/net/dcsctp/tx/rr_send_queue.cc new file mode 100644 index 0000000000..254214e554 --- /dev/null +++ b/net/dcsctp/tx/rr_send_queue.cc @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/rr_send_queue.h" + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/logging.h" + +namespace dcsctp { + +bool RRSendQueue::OutgoingStream::HasDataToSend(TimeMs now) { + while (!items_.empty()) { + RRSendQueue::OutgoingStream::Item& item = items_.front(); + if (item.message_id.has_value()) { + // Already partially sent messages can always continue to be sent. + return true; + } + + // Message has expired. Remove it and inspect the next one. + if (item.expires_at.has_value() && *item.expires_at <= now) { + buffered_amount_.Decrease(item.remaining_size); + total_buffered_amount_.Decrease(item.remaining_size); + items_.pop_front(); + RTC_DCHECK(IsConsistent()); + continue; + } + + if (is_paused_) { + // The stream has paused (and there is no partially sent message). + return false; + } + return true; + } + return false; +} + +bool RRSendQueue::IsConsistent() const { + size_t total_buffered_amount = 0; + for (const auto& stream_entry : streams_) { + total_buffered_amount += stream_entry.second.buffered_amount().value(); + } + + if (previous_message_has_ended_) { + auto it = streams_.find(current_stream_id_); + if (it != streams_.end() && it->second.has_partially_sent_message()) { + RTC_DLOG(LS_ERROR) + << "Previous message has ended, but still partial message in stream"; + return false; + } + } else { + auto it = streams_.find(current_stream_id_); + if (it == streams_.end() || !it->second.has_partially_sent_message()) { + RTC_DLOG(LS_ERROR) + << "Previous message has NOT ended, but there is no partial message"; + return false; + } + } + + return total_buffered_amount == total_buffered_amount_.value(); +} + +bool RRSendQueue::OutgoingStream::IsConsistent() const { + size_t bytes = 0; + for (const auto& item : items_) { + bytes += item.remaining_size; + } + return bytes == buffered_amount_.value(); +} + +void RRSendQueue::ThresholdWatcher::Decrease(size_t bytes) { + RTC_DCHECK(bytes <= value_); + size_t old_value = value_; + value_ -= bytes; + + if (old_value > low_threshold_ && value_ <= low_threshold_) { + on_threshold_reached_(); + } +} + +void RRSendQueue::ThresholdWatcher::SetLowThreshold(size_t low_threshold) { + // Betting on https://github.com/w3c/webrtc-pc/issues/2654 being accepted. + if (low_threshold_ < value_ && low_threshold >= value_) { + on_threshold_reached_(); + } + low_threshold_ = low_threshold; +} + +void RRSendQueue::OutgoingStream::Add(DcSctpMessage message, + absl::optional expires_at, + const SendOptions& send_options) { + buffered_amount_.Increase(message.payload().size()); + total_buffered_amount_.Increase(message.payload().size()); + items_.emplace_back(std::move(message), expires_at, send_options); + + RTC_DCHECK(IsConsistent()); +} + +absl::optional RRSendQueue::OutgoingStream::Produce( + TimeMs now, + size_t max_size) { + RTC_DCHECK(!items_.empty()); + + Item* item = &items_.front(); + DcSctpMessage& message = item->message; + + if (item->remaining_size > max_size && max_size < kMinimumFragmentedPayload) { + RTC_DCHECK(IsConsistent()); + return absl::nullopt; + } + + // Allocate Message ID and SSN when the first fragment is sent. + if (!item->message_id.has_value()) { + MID& mid = + item->send_options.unordered ? next_unordered_mid_ : next_ordered_mid_; + item->message_id = mid; + mid = MID(*mid + 1); + } + if (!item->send_options.unordered && !item->ssn.has_value()) { + item->ssn = next_ssn_; + next_ssn_ = SSN(*next_ssn_ + 1); + } + + // Grab the next `max_size` fragment from this message and calculate flags. + rtc::ArrayView chunk_payload = + item->message.payload().subview(item->remaining_offset, max_size); + rtc::ArrayView message_payload = message.payload(); + Data::IsBeginning is_beginning(chunk_payload.data() == + message_payload.data()); + Data::IsEnd is_end((chunk_payload.data() + chunk_payload.size()) == + (message_payload.data() + message_payload.size())); + + StreamID stream_id = message.stream_id(); + PPID ppid = message.ppid(); + + // Zero-copy the payload if the message fits in a single chunk. + std::vector payload = + is_beginning && is_end + ? std::move(message).ReleasePayload() + : std::vector(chunk_payload.begin(), chunk_payload.end()); + + FSN fsn(item->current_fsn); + item->current_fsn = FSN(*item->current_fsn + 1); + buffered_amount_.Decrease(payload.size()); + total_buffered_amount_.Decrease(payload.size()); + + SendQueue::DataToSend chunk(Data(stream_id, item->ssn.value_or(SSN(0)), + item->message_id.value(), fsn, ppid, + std::move(payload), is_beginning, is_end, + item->send_options.unordered)); + chunk.max_retransmissions = item->send_options.max_retransmissions; + chunk.expires_at = item->expires_at; + + if (is_end) { + // The entire message has been sent, and its last data copied to `chunk`, so + // it can safely be discarded. + items_.pop_front(); + } else { + item->remaining_offset += chunk_payload.size(); + item->remaining_size -= chunk_payload.size(); + RTC_DCHECK(item->remaining_offset + item->remaining_size == + item->message.payload().size()); + RTC_DCHECK(item->remaining_size > 0); + } + RTC_DCHECK(IsConsistent()); + return chunk; +} + +bool RRSendQueue::OutgoingStream::Discard(IsUnordered unordered, + MID message_id) { + bool result = false; + if (!items_.empty()) { + Item& item = items_.front(); + if (item.send_options.unordered == unordered && + item.message_id.has_value() && *item.message_id == message_id) { + buffered_amount_.Decrease(item.remaining_size); + total_buffered_amount_.Decrease(item.remaining_size); + items_.pop_front(); + // As the item still existed, it had unsent data. + result = true; + } + } + RTC_DCHECK(IsConsistent()); + return result; +} + +void RRSendQueue::OutgoingStream::Pause() { + is_paused_ = true; + + // A stream is paused when it's about to be reset. In this implementation, + // it will throw away all non-partially send messages. This is subject to + // change. It will however not discard any partially sent messages - only + // whole messages. Partially delivered messages (at the time of receiving a + // Stream Reset command) will always deliver all the fragments before + // actually resetting the stream. + for (auto it = items_.begin(); it != items_.end();) { + if (it->remaining_offset == 0) { + buffered_amount_.Decrease(it->remaining_size); + total_buffered_amount_.Decrease(it->remaining_size); + it = items_.erase(it); + } else { + ++it; + } + } + RTC_DCHECK(IsConsistent()); +} + +void RRSendQueue::OutgoingStream::Reset() { + if (!items_.empty()) { + // If this message has been partially sent, reset it so that it will be + // re-sent. + auto& item = items_.front(); + buffered_amount_.Increase(item.message.payload().size() - + item.remaining_size); + total_buffered_amount_.Increase(item.message.payload().size() - + item.remaining_size); + item.remaining_offset = 0; + item.remaining_size = item.message.payload().size(); + item.message_id = absl::nullopt; + item.ssn = absl::nullopt; + item.current_fsn = FSN(0); + } + is_paused_ = false; + next_ordered_mid_ = MID(0); + next_unordered_mid_ = MID(0); + next_ssn_ = SSN(0); + RTC_DCHECK(IsConsistent()); +} + +bool RRSendQueue::OutgoingStream::has_partially_sent_message() const { + if (items_.empty()) { + return false; + } + return items_.front().message_id.has_value(); +} + +void RRSendQueue::Add(TimeMs now, + DcSctpMessage message, + const SendOptions& send_options) { + RTC_DCHECK(!message.payload().empty()); + // Any limited lifetime should start counting from now - when the message + // has been added to the queue. + absl::optional expires_at = absl::nullopt; + if (send_options.lifetime.has_value()) { + // `expires_at` is the time when it expires. Which is slightly larger than + // the message's lifetime, as the message is alive during its entire + // lifetime (which may be zero). + expires_at = now + *send_options.lifetime + DurationMs(1); + } + GetOrCreateStreamInfo(message.stream_id()) + .Add(std::move(message), expires_at, send_options); + RTC_DCHECK(IsConsistent()); +} + +bool RRSendQueue::IsFull() const { + return total_buffered_amount() >= buffer_size_; +} + +bool RRSendQueue::IsEmpty() const { + return total_buffered_amount() == 0; +} + +std::map::iterator +RRSendQueue::GetNextStream(TimeMs now) { + auto start_it = streams_.lower_bound(StreamID(*current_stream_id_ + 1)); + + for (auto it = start_it; it != streams_.end(); ++it) { + if (it->second.HasDataToSend(now)) { + current_stream_id_ = it->first; + return it; + } + } + + for (auto it = streams_.begin(); it != start_it; ++it) { + if (it->second.HasDataToSend(now)) { + current_stream_id_ = it->first; + return it; + } + } + return streams_.end(); +} + +absl::optional RRSendQueue::Produce(TimeMs now, + size_t max_size) { + std::map::iterator stream_it; + + if (previous_message_has_ended_) { + // Previous message has ended. Round-robin to a different stream, if there + // even is one with data to send. + stream_it = GetNextStream(now); + if (stream_it == streams_.end()) { + RTC_DLOG(LS_VERBOSE) + << log_prefix_ + << "There is no stream with data; Can't produce any data."; + return absl::nullopt; + } + } else { + // The previous message has not ended; Continue from the current stream. + stream_it = streams_.find(current_stream_id_); + RTC_DCHECK(stream_it != streams_.end()); + } + + absl::optional data = stream_it->second.Produce(now, max_size); + if (data.has_value()) { + RTC_DLOG(LS_VERBOSE) << log_prefix_ << "Producing DATA, type=" + << (data->data.is_unordered ? "unordered" : "ordered") + << "::" + << (*data->data.is_beginning && *data->data.is_end + ? "complete" + : *data->data.is_beginning + ? "first" + : *data->data.is_end ? "last" : "middle") + << ", stream_id=" << *stream_it->first + << ", ppid=" << *data->data.ppid + << ", length=" << data->data.payload.size(); + + previous_message_has_ended_ = *data->data.is_end; + } + + RTC_DCHECK(IsConsistent()); + return data; +} + +bool RRSendQueue::Discard(IsUnordered unordered, + StreamID stream_id, + MID message_id) { + bool has_discarded = + GetOrCreateStreamInfo(stream_id).Discard(unordered, message_id); + if (has_discarded) { + // Only partially sent messages are discarded, so if a message was + // discarded, then it was the currently sent message. + previous_message_has_ended_ = true; + } + + return has_discarded; +} + +void RRSendQueue::PrepareResetStreams(rtc::ArrayView streams) { + for (StreamID stream_id : streams) { + GetOrCreateStreamInfo(stream_id).Pause(); + } + RTC_DCHECK(IsConsistent()); +} + +bool RRSendQueue::CanResetStreams() const { + // Streams can be reset if those streams that are paused don't have any + // messages that are partially sent. + for (auto& stream : streams_) { + if (stream.second.is_paused() && + stream.second.has_partially_sent_message()) { + return false; + } + } + return true; +} + +void RRSendQueue::CommitResetStreams() { + for (auto& stream_entry : streams_) { + if (stream_entry.second.is_paused()) { + stream_entry.second.Reset(); + } + } + RTC_DCHECK(IsConsistent()); +} + +void RRSendQueue::RollbackResetStreams() { + for (auto& stream_entry : streams_) { + stream_entry.second.Resume(); + } + RTC_DCHECK(IsConsistent()); +} + +void RRSendQueue::Reset() { + // Recalculate buffered amount, as partially sent messages may have been put + // fully back in the queue. + for (auto& stream_entry : streams_) { + OutgoingStream& stream = stream_entry.second; + stream.Reset(); + } + previous_message_has_ended_ = true; +} + +size_t RRSendQueue::buffered_amount(StreamID stream_id) const { + auto it = streams_.find(stream_id); + if (it == streams_.end()) { + return 0; + } + return it->second.buffered_amount().value(); +} + +size_t RRSendQueue::buffered_amount_low_threshold(StreamID stream_id) const { + auto it = streams_.find(stream_id); + if (it == streams_.end()) { + return 0; + } + return it->second.buffered_amount().low_threshold(); +} + +void RRSendQueue::SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) { + GetOrCreateStreamInfo(stream_id).buffered_amount().SetLowThreshold(bytes); +} + +RRSendQueue::OutgoingStream& RRSendQueue::GetOrCreateStreamInfo( + StreamID stream_id) { + auto it = streams_.find(stream_id); + if (it != streams_.end()) { + return it->second; + } + + return streams_ + .emplace(stream_id, + OutgoingStream( + [this, stream_id]() { on_buffered_amount_low_(stream_id); }, + total_buffered_amount_)) + .first->second; +} +} // namespace dcsctp diff --git a/net/dcsctp/tx/rr_send_queue.h b/net/dcsctp/tx/rr_send_queue.h new file mode 100644 index 0000000000..3ec45af17d --- /dev/null +++ b/net/dcsctp/tx/rr_send_queue.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_RR_SEND_QUEUE_H_ +#define NET_DCSCTP_TX_RR_SEND_QUEUE_H_ + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/pair_hash.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/tx/send_queue.h" + +namespace dcsctp { + +// The Round Robin SendQueue holds all messages that the client wants to send, +// but that haven't yet been split into chunks and fully sent on the wire. +// +// As defined in https://datatracker.ietf.org/doc/html/rfc8260#section-3.2, +// it will cycle to send messages from different streams. It will send all +// fragments from one message before continuing with a different message on +// possibly a different stream, until support for message interleaving has been +// implemented. +// +// As messages can be (requested to be) sent before the connection is properly +// established, this send queue is always present - even for closed connections. +class RRSendQueue : public SendQueue { + public: + // How small a data chunk's payload may be, if having to fragment a message. + static constexpr size_t kMinimumFragmentedPayload = 10; + + RRSendQueue(absl::string_view log_prefix, + size_t buffer_size, + std::function on_buffered_amount_low, + size_t total_buffered_amount_low_threshold, + std::function on_total_buffered_amount_low) + : log_prefix_(std::string(log_prefix) + "fcfs: "), + buffer_size_(buffer_size), + on_buffered_amount_low_(std::move(on_buffered_amount_low)), + total_buffered_amount_(std::move(on_total_buffered_amount_low)) { + total_buffered_amount_.SetLowThreshold(total_buffered_amount_low_threshold); + } + + // Indicates if the buffer is full. Note that it's up to the caller to ensure + // that the buffer is not full prior to adding new items to it. + bool IsFull() const; + // Indicates if the buffer is empty. + bool IsEmpty() const; + + // Adds the message to be sent using the `send_options` provided. The current + // time should be in `now`. Note that it's the responsibility of the caller to + // ensure that the buffer is not full (by calling `IsFull`) before adding + // messages to it. + void Add(TimeMs now, + DcSctpMessage message, + const SendOptions& send_options = {}); + + // Implementation of `SendQueue`. + absl::optional Produce(TimeMs now, size_t max_size) override; + bool Discard(IsUnordered unordered, + StreamID stream_id, + MID message_id) override; + void PrepareResetStreams(rtc::ArrayView streams) override; + bool CanResetStreams() const override; + void CommitResetStreams() override; + void RollbackResetStreams() override; + void Reset() override; + size_t buffered_amount(StreamID stream_id) const override; + size_t total_buffered_amount() const override { + return total_buffered_amount_.value(); + } + size_t buffered_amount_low_threshold(StreamID stream_id) const override; + void SetBufferedAmountLowThreshold(StreamID stream_id, size_t bytes) override; + + private: + // Represents a value and a "low threshold" that when the value reaches or + // goes under the "low threshold", will trigger `on_threshold_reached` + // callback. + class ThresholdWatcher { + public: + explicit ThresholdWatcher(std::function on_threshold_reached) + : on_threshold_reached_(std::move(on_threshold_reached)) {} + // Increases the value. + void Increase(size_t bytes) { value_ += bytes; } + // Decreases the value and triggers `on_threshold_reached` if it's at or + // below `low_threshold()`. + void Decrease(size_t bytes); + + size_t value() const { return value_; } + size_t low_threshold() const { return low_threshold_; } + void SetLowThreshold(size_t low_threshold); + + private: + const std::function on_threshold_reached_; + size_t value_ = 0; + size_t low_threshold_ = 0; + }; + + // Per-stream information. + class OutgoingStream { + public: + explicit OutgoingStream(std::function on_buffered_amount_low, + ThresholdWatcher& total_buffered_amount) + : buffered_amount_(std::move(on_buffered_amount_low)), + total_buffered_amount_(total_buffered_amount) {} + + // Enqueues a message to this stream. + void Add(DcSctpMessage message, + absl::optional expires_at, + const SendOptions& send_options); + + // Possibly produces a data chunk to send. + absl::optional Produce(TimeMs now, size_t max_size); + + const ThresholdWatcher& buffered_amount() const { return buffered_amount_; } + ThresholdWatcher& buffered_amount() { return buffered_amount_; } + + // Discards a partially sent message, see `SendQueue::Discard`. + bool Discard(IsUnordered unordered, MID message_id); + + // Pauses this stream, which is used before resetting it. + void Pause(); + + // Resumes a paused stream. + void Resume() { is_paused_ = false; } + + bool is_paused() const { return is_paused_; } + + // Resets this stream, meaning MIDs and SSNs are set to zero. + void Reset(); + + // Indicates if this stream has a partially sent message in it. + bool has_partially_sent_message() const; + + // Indicates if the stream has data to send. It will also try to remove any + // expired non-partially sent message. + bool HasDataToSend(TimeMs now); + + private: + // An enqueued message and metadata. + struct Item { + explicit Item(DcSctpMessage msg, + absl::optional expires_at, + const SendOptions& send_options) + : message(std::move(msg)), + expires_at(expires_at), + send_options(send_options), + remaining_offset(0), + remaining_size(message.payload().size()) {} + DcSctpMessage message; + absl::optional expires_at; + SendOptions send_options; + // The remaining payload (offset and size) to be sent, when it has been + // fragmented. + size_t remaining_offset; + size_t remaining_size; + // If set, an allocated Message ID and SSN. Will be allocated when the + // first fragment is sent. + absl::optional message_id = absl::nullopt; + absl::optional ssn = absl::nullopt; + // The current Fragment Sequence Number, incremented for each fragment. + FSN current_fsn = FSN(0); + }; + + bool IsConsistent() const; + + // Streams are pause when they are about to be reset. + bool is_paused_ = false; + // MIDs are different for unordered and ordered messages sent on a stream. + MID next_unordered_mid_ = MID(0); + MID next_ordered_mid_ = MID(0); + + SSN next_ssn_ = SSN(0); + // Enqueued messages, and metadata. + std::deque items_; + + // The current amount of buffered data. + ThresholdWatcher buffered_amount_; + + // Reference to the total buffered amount, which is updated directly by each + // stream. + ThresholdWatcher& total_buffered_amount_; + }; + + bool IsConsistent() const; + OutgoingStream& GetOrCreateStreamInfo(StreamID stream_id); + absl::optional Produce( + std::map::iterator it, + TimeMs now, + size_t max_size); + + // Return the next stream, in round-robin fashion. + std::map::iterator GetNextStream(TimeMs now); + + const std::string log_prefix_; + const size_t buffer_size_; + + // Called when the buffered amount is below what has been set using + // `SetBufferedAmountLowThreshold`. + const std::function on_buffered_amount_low_; + + // Called when the total buffered amount is below what has been set using + // `SetTotalBufferedAmountLowThreshold`. + const std::function on_total_buffered_amount_low_; + + // The total amount of buffer data, for all streams. + ThresholdWatcher total_buffered_amount_; + + // Indicates if the previous fragment sent was the end of a message. For + // non-interleaved sending, this means that the next message may come from a + // different stream. If not true, the next fragment must be produced from the + // same stream as last time. + bool previous_message_has_ended_ = true; + + // The current stream to send chunks from. Modified by `GetNextStream`. + StreamID current_stream_id_ = StreamID(0); + + // All streams, and messages added to those. + std::map streams_; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_RR_SEND_QUEUE_H_ diff --git a/net/dcsctp/tx/rr_send_queue_test.cc b/net/dcsctp/tx/rr_send_queue_test.cc new file mode 100644 index 0000000000..425027762d --- /dev/null +++ b/net/dcsctp/tx/rr_send_queue_test.cc @@ -0,0 +1,783 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/tx/rr_send_queue.h" + +#include +#include +#include + +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/public/types.h" +#include "net/dcsctp/testing/testing_macros.h" +#include "net/dcsctp/tx/send_queue.h" +#include "rtc_base/gunit.h" +#include "test/gmock.h" + +namespace dcsctp { +namespace { +using ::testing::SizeIs; + +constexpr TimeMs kNow = TimeMs(0); +constexpr StreamID kStreamID(1); +constexpr PPID kPPID(53); +constexpr size_t kMaxQueueSize = 1000; +constexpr size_t kBufferedAmountLowThreshold = 500; +constexpr size_t kOneFragmentPacketSize = 100; +constexpr size_t kTwoFragmentPacketSize = 101; + +class RRSendQueueTest : public testing::Test { + protected: + RRSendQueueTest() + : buf_("log: ", + kMaxQueueSize, + on_buffered_amount_low_.AsStdFunction(), + kBufferedAmountLowThreshold, + on_total_buffered_amount_low_.AsStdFunction()) {} + + const DcSctpOptions options_; + testing::NiceMock> + on_buffered_amount_low_; + testing::NiceMock> + on_total_buffered_amount_low_; + RRSendQueue buf_; +}; + +TEST_F(RRSendQueueTest, EmptyBuffer) { + EXPECT_TRUE(buf_.IsEmpty()); + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); + EXPECT_FALSE(buf_.IsFull()); +} + +TEST_F(RRSendQueueTest, AddAndGetSingleChunk) { + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, {1, 2, 4, 5, 6})); + + EXPECT_FALSE(buf_.IsEmpty()); + EXPECT_FALSE(buf_.IsFull()); + absl::optional chunk_opt = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_opt.has_value()); + EXPECT_TRUE(chunk_opt->data.is_beginning); + EXPECT_TRUE(chunk_opt->data.is_end); +} + +TEST_F(RRSendQueueTest, CarveOutBeginningMiddleAndEnd) { + std::vector payload(60); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_beg = + buf_.Produce(kNow, /*max_size=*/20); + ASSERT_TRUE(chunk_beg.has_value()); + EXPECT_TRUE(chunk_beg->data.is_beginning); + EXPECT_FALSE(chunk_beg->data.is_end); + + absl::optional chunk_mid = + buf_.Produce(kNow, /*max_size=*/20); + ASSERT_TRUE(chunk_mid.has_value()); + EXPECT_FALSE(chunk_mid->data.is_beginning); + EXPECT_FALSE(chunk_mid->data.is_end); + + absl::optional chunk_end = + buf_.Produce(kNow, /*max_size=*/20); + ASSERT_TRUE(chunk_end.has_value()); + EXPECT_FALSE(chunk_end->data.is_beginning); + EXPECT_TRUE(chunk_end->data.is_end); + + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); +} + +TEST_F(RRSendQueueTest, GetChunksFromTwoMessages) { + std::vector payload(60); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(3), PPID(54), payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(chunk_one->data.ppid, kPPID); + EXPECT_TRUE(chunk_one->data.is_beginning); + EXPECT_TRUE(chunk_one->data.is_end); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_two->data.ppid, PPID(54)); + EXPECT_TRUE(chunk_two->data.is_beginning); + EXPECT_TRUE(chunk_two->data.is_end); +} + +TEST_F(RRSendQueueTest, BufferBecomesFullAndEmptied) { + std::vector payload(600); + EXPECT_FALSE(buf_.IsFull()); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_FALSE(buf_.IsFull()); + buf_.Add(kNow, DcSctpMessage(StreamID(3), PPID(54), payload)); + EXPECT_TRUE(buf_.IsFull()); + // However, it's still possible to add messages. It's a soft limit, and it + // might be necessary to forcefully add messages due to e.g. external + // fragmentation. + buf_.Add(kNow, DcSctpMessage(StreamID(5), PPID(55), payload)); + EXPECT_TRUE(buf_.IsFull()); + + absl::optional chunk_one = buf_.Produce(kNow, 1000); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(chunk_one->data.ppid, kPPID); + + EXPECT_TRUE(buf_.IsFull()); + + absl::optional chunk_two = buf_.Produce(kNow, 1000); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_two->data.ppid, PPID(54)); + + EXPECT_FALSE(buf_.IsFull()); + EXPECT_FALSE(buf_.IsEmpty()); + + absl::optional chunk_three = buf_.Produce(kNow, 1000); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.stream_id, StreamID(5)); + EXPECT_EQ(chunk_three->data.ppid, PPID(55)); + + EXPECT_FALSE(buf_.IsFull()); + EXPECT_TRUE(buf_.IsEmpty()); +} + +TEST_F(RRSendQueueTest, WillNotSendTooSmallPacket) { + std::vector payload(RRSendQueue::kMinimumFragmentedPayload + 1); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + // Wouldn't fit enough payload (wouldn't want to fragment) + EXPECT_FALSE( + buf_.Produce(kNow, + /*max_size=*/RRSendQueue::kMinimumFragmentedPayload - 1) + .has_value()); + + // Minimum fragment + absl::optional chunk_one = + buf_.Produce(kNow, + /*max_size=*/RRSendQueue::kMinimumFragmentedPayload); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(chunk_one->data.ppid, kPPID); + + // There is only one byte remaining - it can be fetched as it doesn't require + // additional fragmentation. + absl::optional chunk_two = + buf_.Produce(kNow, /*max_size=*/1); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, kStreamID); + EXPECT_EQ(chunk_two->data.ppid, kPPID); + + EXPECT_TRUE(buf_.IsEmpty()); +} + +TEST_F(RRSendQueueTest, DefaultsToOrderedSend) { + std::vector payload(20); + + // Default is ordered + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_FALSE(chunk_one->data.is_unordered); + + // Explicitly unordered. + SendOptions opts; + opts.unordered = IsUnordered(true); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload), opts); + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_TRUE(chunk_two->data.is_unordered); +} + +TEST_F(RRSendQueueTest, ProduceWithLifetimeExpiry) { + std::vector payload(20); + + // Default is no expiry + TimeMs now = kNow; + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload)); + now += DurationMs(1000000); + ASSERT_TRUE(buf_.Produce(now, kOneFragmentPacketSize)); + + SendOptions expires_2_seconds; + expires_2_seconds.lifetime = DurationMs(2000); + + // Add and consume within lifetime + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + now += DurationMs(2000); + ASSERT_TRUE(buf_.Produce(now, kOneFragmentPacketSize)); + + // Add and consume just outside lifetime + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + now += DurationMs(2001); + ASSERT_FALSE(buf_.Produce(now, kOneFragmentPacketSize)); + + // A long time after expiry + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + now += DurationMs(1000000); + ASSERT_FALSE(buf_.Produce(now, kOneFragmentPacketSize)); + + // Expire one message, but produce the second that is not expired. + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_2_seconds); + + SendOptions expires_4_seconds; + expires_4_seconds.lifetime = DurationMs(4000); + + buf_.Add(now, DcSctpMessage(kStreamID, kPPID, payload), expires_4_seconds); + now += DurationMs(2001); + + ASSERT_TRUE(buf_.Produce(now, kOneFragmentPacketSize)); + ASSERT_FALSE(buf_.Produce(now, kOneFragmentPacketSize)); +} + +TEST_F(RRSendQueueTest, DiscardPartialPackets) { + std::vector payload(120); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(2), PPID(54), payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_FALSE(chunk_one->data.is_end); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + buf_.Discard(IsUnordered(false), chunk_one->data.stream_id, + chunk_one->data.message_id); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_FALSE(chunk_two->data.is_end); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(2)); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_TRUE(chunk_three->data.is_end); + EXPECT_EQ(chunk_three->data.stream_id, StreamID(2)); + ASSERT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize)); + + // Calling it again shouldn't cause issues. + buf_.Discard(IsUnordered(false), chunk_one->data.stream_id, + chunk_one->data.message_id); + ASSERT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize)); +} + +TEST_F(RRSendQueueTest, PrepareResetStreamsDiscardsStream) { + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, {1, 2, 3})); + buf_.Add(kNow, DcSctpMessage(StreamID(2), PPID(54), {1, 2, 3, 4, 5})); + EXPECT_EQ(buf_.total_buffered_amount(), 8u); + + buf_.PrepareResetStreams(std::vector({StreamID(1)})); + EXPECT_EQ(buf_.total_buffered_amount(), 5u); + buf_.CommitResetStreams(); + buf_.PrepareResetStreams(std::vector({StreamID(2)})); + EXPECT_EQ(buf_.total_buffered_amount(), 0u); +} + +TEST_F(RRSendQueueTest, PrepareResetStreamsNotPartialPackets) { + std::vector payload(120); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_one = buf_.Produce(kNow, 50); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(buf_.total_buffered_amount(), 2 * payload.size() - 50); + + StreamID stream_ids[] = {StreamID(1)}; + buf_.PrepareResetStreams(stream_ids); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size() - 50); +} + +TEST_F(RRSendQueueTest, EnqueuedItemsArePausedDuringStreamReset) { + std::vector payload(50); + + buf_.PrepareResetStreams(std::vector({StreamID(1)})); + EXPECT_EQ(buf_.total_buffered_amount(), 0u); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); + buf_.CommitResetStreams(); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + absl::optional chunk_one = buf_.Produce(kNow, 50); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, kStreamID); + EXPECT_EQ(buf_.total_buffered_amount(), 0u); +} + +TEST_F(RRSendQueueTest, CommittingResetsSSN) { + std::vector payload(50); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.ssn, SSN(0)); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.ssn, SSN(1)); + + StreamID stream_ids[] = {StreamID(1)}; + buf_.PrepareResetStreams(stream_ids); + + // Buffered + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + EXPECT_TRUE(buf_.CanResetStreams()); + buf_.CommitResetStreams(); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.ssn, SSN(0)); +} + +TEST_F(RRSendQueueTest, CommittingResetsSSNForPausedStreamsOnly) { + std::vector payload(50); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.stream_id, StreamID(1)); + EXPECT_EQ(chunk_one->data.ssn, SSN(0)); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_two->data.ssn, SSN(0)); + + StreamID stream_ids[] = {StreamID(3)}; + buf_.PrepareResetStreams(stream_ids); + + // Send two more messages - SID 3 will buffer, SID 1 will send. + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, payload)); + + EXPECT_TRUE(buf_.CanResetStreams()); + buf_.CommitResetStreams(); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.stream_id, StreamID(1)); + EXPECT_EQ(chunk_three->data.ssn, SSN(1)); + + absl::optional chunk_four = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_four.has_value()); + EXPECT_EQ(chunk_four->data.stream_id, StreamID(3)); + EXPECT_EQ(chunk_four->data.ssn, SSN(0)); +} + +TEST_F(RRSendQueueTest, RollBackResumesSSN) { + std::vector payload(50); + + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + absl::optional chunk_one = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_one.has_value()); + EXPECT_EQ(chunk_one->data.ssn, SSN(0)); + + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_two.has_value()); + EXPECT_EQ(chunk_two->data.ssn, SSN(1)); + + buf_.PrepareResetStreams(std::vector({StreamID(1)})); + + // Buffered + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + + EXPECT_TRUE(buf_.CanResetStreams()); + buf_.RollbackResetStreams(); + + absl::optional chunk_three = + buf_.Produce(kNow, kOneFragmentPacketSize); + ASSERT_TRUE(chunk_three.has_value()); + EXPECT_EQ(chunk_three->data.ssn, SSN(2)); +} + +TEST_F(RRSendQueueTest, ReturnsFragmentsForOneMessageBeforeMovingToNext) { + std::vector payload(200); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, payload)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(2)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(2)); +} + +TEST_F(RRSendQueueTest, ReturnsAlsoSmallFragmentsBeforeMovingToNext) { + std::vector payload(kTwoFragmentPacketSize); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, payload)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(kOneFragmentPacketSize)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, + SizeIs(kTwoFragmentPacketSize - kOneFragmentPacketSize)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk4.data.payload, + SizeIs(kTwoFragmentPacketSize - kOneFragmentPacketSize)); +} + +TEST_F(RRSendQueueTest, WillCycleInRoundRobinFashionBetweenStreams) { + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(2))); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, std::vector(3))); + buf_.Add(kNow, DcSctpMessage(StreamID(2), kPPID, std::vector(4))); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, std::vector(5))); + buf_.Add(kNow, DcSctpMessage(StreamID(3), kPPID, std::vector(6))); + buf_.Add(kNow, DcSctpMessage(StreamID(4), kPPID, std::vector(7))); + buf_.Add(kNow, DcSctpMessage(StreamID(4), kPPID, std::vector(8))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk2.data.payload, SizeIs(3)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(3)); + EXPECT_THAT(chunk3.data.payload, SizeIs(5)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(4)); + EXPECT_THAT(chunk4.data.payload, SizeIs(7)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk5, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk5.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk5.data.payload, SizeIs(2)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk6, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk6.data.stream_id, StreamID(2)); + EXPECT_THAT(chunk6.data.payload, SizeIs(4)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk7, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk7.data.stream_id, StreamID(3)); + EXPECT_THAT(chunk7.data.payload, SizeIs(6)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk8, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk8.data.stream_id, StreamID(4)); + EXPECT_THAT(chunk8.data.payload, SizeIs(8)); +} + +TEST_F(RRSendQueueTest, DoesntTriggerOnBufferedAmountLowWhenSetToZero) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 0u); +} + +TEST_F(RRSendQueueTest, TriggersOnBufferedAmountAtZeroLowWhenSent) { + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 1u); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 0u); +} + +TEST_F(RRSendQueueTest, WillRetriggerOnBufferedAmountLowIfAddingMore) { + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 1u); + + // Should now trigger again, as buffer_amount went above the threshold. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(1)); +} + +TEST_F(RRSendQueueTest, OnlyTriggersWhenTransitioningFromAboveToBelowOrEqual) { + buf_.SetBufferedAmountLowThreshold(StreamID(1), 1000); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(10))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 10u); + + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(10)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 0u); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(20))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 20u); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(20)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 0u); +} + +TEST_F(RRSendQueueTest, WillTriggerOnBufferedAmountLowSetAboveZero) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.SetBufferedAmountLowThreshold(StreamID(1), 700); + + std::vector payload(1000); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, payload)); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 900u); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 800u); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 700u); + + // Doesn't trigger when reducing even further. + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 600u); +} + +TEST_F(RRSendQueueTest, WillRetriggerOnBufferedAmountLowSetAboveZero) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.SetBufferedAmountLowThreshold(StreamID(1), 700); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(1000))); + + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, 400)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk1.data.payload, SizeIs(400)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 600u); + + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(200))); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 800u); + + // Will trigger again, as it went above the limit. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, 200)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(200)); + EXPECT_EQ(buf_.buffered_amount(StreamID(1)), 600u); +} + +TEST_F(RRSendQueueTest, TriggersOnBufferedAmountLowOnThresholdChanged) { + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + + buf_.Add(kNow, DcSctpMessage(StreamID(1), kPPID, std::vector(100))); + + // Modifying the threshold, still under buffered_amount, should not trigger. + buf_.SetBufferedAmountLowThreshold(StreamID(1), 50); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 99); + + // When the threshold reaches buffered_amount, it will trigger. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 100); + + // But not when it's set low again. + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 50); + + // But it will trigger when it overshoots. + EXPECT_CALL(on_buffered_amount_low_, Call(StreamID(1))); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 150); + + // But not when it's set low again. + EXPECT_CALL(on_buffered_amount_low_, Call).Times(0); + buf_.SetBufferedAmountLowThreshold(StreamID(1), 0); +} + +TEST_F(RRSendQueueTest, + OnTotalBufferedAmountLowDoesNotTriggerOnBufferFillingUp) { + EXPECT_CALL(on_total_buffered_amount_low_, Call).Times(0); + std::vector payload(kBufferedAmountLowThreshold - 1); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + // Will not trigger if going above but never below. + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, + std::vector(kOneFragmentPacketSize))); +} + +TEST_F(RRSendQueueTest, TriggersOnTotalBufferedAmountLowWhenCrossing) { + EXPECT_CALL(on_total_buffered_amount_low_, Call).Times(0); + std::vector payload(kBufferedAmountLowThreshold); + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, payload)); + EXPECT_EQ(buf_.total_buffered_amount(), payload.size()); + + // Reaches it. + buf_.Add(kNow, DcSctpMessage(kStreamID, kPPID, std::vector(1))); + + // Drain it a bit - will trigger. + EXPECT_CALL(on_total_buffered_amount_low_, Call).Times(1); + absl::optional chunk_two = + buf_.Produce(kNow, kOneFragmentPacketSize); +} + +TEST_F(RRSendQueueTest, WillStayInAStreamAsLongAsThatMessageIsSending) { + buf_.Add(kNow, DcSctpMessage(StreamID(5), kPPID, std::vector(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(5)); + EXPECT_THAT(chunk1.data.payload, SizeIs(1)); + + // Next, it should pick a different stream. + + buf_.Add(kNow, + DcSctpMessage(StreamID(1), kPPID, + std::vector(kOneFragmentPacketSize * 2))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk2.data.payload, SizeIs(kOneFragmentPacketSize)); + + // It should still stay on the Stream1 now, even if might be tempted to switch + // to this stream, as it's the stream following 5. + buf_.Add(kNow, DcSctpMessage(StreamID(6), kPPID, std::vector(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(1)); + EXPECT_THAT(chunk3.data.payload, SizeIs(kOneFragmentPacketSize)); + + // After stream id 1 is complete, it's time to do stream 6. + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk4, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk4.data.stream_id, StreamID(6)); + EXPECT_THAT(chunk4.data.payload, SizeIs(1)); + + EXPECT_FALSE(buf_.Produce(kNow, kOneFragmentPacketSize).has_value()); +} + +TEST_F(RRSendQueueTest, WillStayInStreamWhenOnlySmallFragmentRemaining) { + buf_.Add(kNow, + DcSctpMessage(StreamID(5), kPPID, + std::vector(kOneFragmentPacketSize * 2))); + buf_.Add(kNow, DcSctpMessage(StreamID(6), kPPID, std::vector(1))); + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk1, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk1.data.stream_id, StreamID(5)); + EXPECT_THAT(chunk1.data.payload, SizeIs(kOneFragmentPacketSize)); + + // Now assume that there will be a lot of previous chunks that need to be + // retransmitted, which fills up the next packet and there is little space + // left in the packet for new chunks. What it should NOT do right now is to + // try to send a message from StreamID 6. And it should not try to send a very + // small fragment from StreamID 5 either. So just skip this one. + EXPECT_FALSE(buf_.Produce(kNow, 8).has_value()); + + // When the next produce request comes with a large buffer to fill, continue + // sending from StreamID 5. + + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk2, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk2.data.stream_id, StreamID(5)); + EXPECT_THAT(chunk2.data.payload, SizeIs(kOneFragmentPacketSize)); + + // Lastly, produce a message on StreamID 6. + ASSERT_HAS_VALUE_AND_ASSIGN(SendQueue::DataToSend chunk3, + buf_.Produce(kNow, kOneFragmentPacketSize)); + EXPECT_EQ(chunk3.data.stream_id, StreamID(6)); + EXPECT_THAT(chunk3.data.payload, SizeIs(1)); + + EXPECT_FALSE(buf_.Produce(kNow, 8).has_value()); +} +} // namespace +} // namespace dcsctp diff --git a/net/dcsctp/tx/send_queue.h b/net/dcsctp/tx/send_queue.h new file mode 100644 index 0000000000..877dbdda59 --- /dev/null +++ b/net/dcsctp/tx/send_queue.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef NET_DCSCTP_TX_SEND_QUEUE_H_ +#define NET_DCSCTP_TX_SEND_QUEUE_H_ + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "net/dcsctp/common/internal_types.h" +#include "net/dcsctp/packet/data.h" +#include "net/dcsctp/public/types.h" + +namespace dcsctp { + +class SendQueue { + public: + // Container for a data chunk that is produced by the SendQueue + struct DataToSend { + explicit DataToSend(Data data) : data(std::move(data)) {} + // The data to send, including all parameters. + Data data; + + // Partial reliability - RFC3758 + absl::optional max_retransmissions; + absl::optional expires_at; + }; + + virtual ~SendQueue() = default; + + // TODO(boivie): This interface is obviously missing an "Add" function, but + // that is postponed a bit until the story around how to model message + // prioritization, which is important for any advanced stream scheduler, is + // further clarified. + + // Produce a chunk to be sent. + // + // `max_size` refers to how many payload bytes that may be produced, not + // including any headers. + virtual absl::optional Produce(TimeMs now, size_t max_size) = 0; + + // Discards a partially sent message identified by the parameters `unordered`, + // `stream_id` and `message_id`. The `message_id` comes from the returned + // information when having called `Produce`. A partially sent message means + // that it has had at least one fragment of it returned when `Produce` was + // called prior to calling this method). + // + // This is used when a message has been found to be expired (by the partial + // reliability extension), and the retransmission queue will signal the + // receiver that any partially received message fragments should be skipped. + // This means that any remaining fragments in the Send Queue must be removed + // as well so that they are not sent. + // + // This function returns true if this message had unsent fragments still in + // the queue that were discarded, and false if there were no such fragments. + virtual bool Discard(IsUnordered unordered, + StreamID stream_id, + MID message_id) = 0; + + // Prepares the streams to be reset. This is used to close a WebRTC data + // channel and will be signaled to the other side. + // + // Concretely, it discards all whole (not partly sent) messages in the given + // streams and pauses those streams so that future added messages aren't + // produced until `ResumeStreams` is called. + // + // TODO(boivie): Investigate if it really should discard any message at all. + // RFC8831 only mentions that "[RFC6525] also guarantees that all the messages + // are delivered (or abandoned) before the stream is reset." + // + // This method can be called multiple times to add more streams to be + // reset, and paused while they are resetting. This is the first part of the + // two-phase commit protocol to reset streams, where the caller completes the + // procedure by either calling `CommitResetStreams` or `RollbackResetStreams`. + virtual void PrepareResetStreams(rtc::ArrayView streams) = 0; + + // Returns true if all non-discarded messages during `PrepareResetStreams` + // (which are those that was partially sent before that method was called) + // have been sent. + virtual bool CanResetStreams() const = 0; + + // Called to commit to reset the streams provided to `PrepareResetStreams`. + // It will reset the stream sequence numbers (SSNs) and message identifiers + // (MIDs) and resume the paused streams. + virtual void CommitResetStreams() = 0; + + // Called to abort the resetting of streams provided to `PrepareResetStreams`. + // Will resume the paused streams without resetting the stream sequence + // numbers (SSNs) or message identifiers (MIDs). Note that the non-partial + // messages that were discarded when calling `PrepareResetStreams` will not be + // recovered, to better match the intention from the sender to "close the + // channel". + virtual void RollbackResetStreams() = 0; + + // Resets all message identifier counters (MID, SSN) and makes all partially + // messages be ready to be re-sent in full. This is used when the peer has + // been detected to have restarted and is used to try to minimize the amount + // of data loss. However, data loss cannot be completely guaranteed when a + // peer restarts. + virtual void Reset() = 0; + + // Returns the amount of buffered data. This doesn't include packets that are + // e.g. inflight. + virtual size_t buffered_amount(StreamID stream_id) const = 0; + + // Returns the total amount of buffer data, for all streams. + virtual size_t total_buffered_amount() const = 0; + + // Returns the limit for the `OnBufferedAmountLow` event. Default value is 0. + virtual size_t buffered_amount_low_threshold(StreamID stream_id) const = 0; + + // Sets a limit for the `OnBufferedAmountLow` event. + virtual void SetBufferedAmountLowThreshold(StreamID stream_id, + size_t bytes) = 0; +}; +} // namespace dcsctp + +#endif // NET_DCSCTP_TX_SEND_QUEUE_H_ diff --git a/p2p/BUILD.gn b/p2p/BUILD.gn index ae49deb264..244bc39092 100644 --- a/p2p/BUILD.gn +++ b/p2p/BUILD.gn @@ -45,8 +45,6 @@ rtc_library("rtc_p2p") { "base/ice_credentials_iterator.h", "base/ice_transport_internal.cc", "base/ice_transport_internal.h", - "base/mdns_message.cc", - "base/mdns_message.h", "base/p2p_constants.cc", "base/p2p_constants.h", "base/p2p_transport_channel.cc", @@ -86,32 +84,51 @@ rtc_library("rtc_p2p") { ] deps = [ + "../api:array_view", + "../api:async_dns_resolver", "../api:libjingle_peerconnection_api", "../api:packet_socket_factory", "../api:rtc_error", "../api:scoped_refptr", + "../api:sequence_checker", "../api/crypto:options", "../api/rtc_event_log", + "../api/task_queue", "../api/transport:enums", "../api/transport:stun_types", "../logging:ice_log", "../rtc_base", + "../rtc_base:async_resolver_interface", + "../rtc_base:async_socket", + "../rtc_base:callback_list", "../rtc_base:checks", + "../rtc_base:ip_address", + "../rtc_base:net_helpers", + "../rtc_base:network_constants", "../rtc_base:rtc_numerics", + "../rtc_base:socket", + "../rtc_base:socket_address", + "../rtc_base:socket_server", + "../rtc_base:threading", "../rtc_base/experiments:field_trial_parser", - "//third_party/abseil-cpp/absl/memory", + "../rtc_base/system:no_unique_address", # Needed by pseudo_tcp, which should move to a separate target. "../rtc_base:safe_minmax", "../rtc_base:weak_ptr", - "../rtc_base/memory:fifo_buffer", "../rtc_base/network:sent_packet", + "../rtc_base/synchronization:mutex", "../rtc_base/system:rtc_export", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", "../rtc_base/third_party/base64", "../rtc_base/third_party/sigslot", "../system_wrappers:field_trial", "../system_wrappers:metrics", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -127,6 +144,10 @@ if (rtc_include_tests) { "../api:libjingle_peerconnection_api", "../rtc_base", "../rtc_base:rtc_base_approved", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/types:optional", ] @@ -139,6 +160,8 @@ if (rtc_include_tests) { deps = [ ":rtc_p2p", "../rtc_base", + "../rtc_base:net_helpers", + "../rtc_base:threading", ] } @@ -161,14 +184,22 @@ if (rtc_include_tests) { ":rtc_p2p", "../api:libjingle_peerconnection_api", "../api:packet_socket_factory", + "../api:sequence_checker", "../api/crypto:options", "../api/transport:stun_types", "../rtc_base", + "../rtc_base:async_resolver_interface", + "../rtc_base:async_socket", "../rtc_base:gunit_helpers", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_base_tests_utils", + "../rtc_base:socket_address", + "../rtc_base:socket_server", + "../rtc_base:threading", "../rtc_base/third_party/sigslot", "../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/types:optional", ] @@ -182,7 +213,6 @@ if (rtc_include_tests) { "base/basic_async_resolver_factory_unittest.cc", "base/dtls_transport_unittest.cc", "base/ice_credentials_iterator_unittest.cc", - "base/mdns_message_unittest.cc", "base/p2p_transport_channel_unittest.cc", "base/port_allocator_unittest.cc", "base/port_unittest.cc", @@ -205,24 +235,36 @@ if (rtc_include_tests) { ":p2p_test_utils", ":rtc_p2p", "../api:libjingle_peerconnection_api", + "../api:mock_async_dns_resolver", "../api:packet_socket_factory", "../api:scoped_refptr", "../api/transport:stun_types", "../api/units:time_delta", "../rtc_base", + "../rtc_base:async_socket", "../rtc_base:checks", "../rtc_base:gunit_helpers", + "../rtc_base:ip_address", + "../rtc_base:net_helpers", + "../rtc_base:network_constants", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_base_tests_utils", + "../rtc_base:socket", + "../rtc_base:socket_address", "../rtc_base:testclient", + "../rtc_base:threading", "../rtc_base/network:sent_packet", "../rtc_base/third_party/sigslot", "../system_wrappers:metrics", "../test:field_trial", + "../test:rtc_expect_death", "../test:test_support", "//testing/gtest", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:optional", ] } } @@ -238,12 +280,19 @@ rtc_library("p2p_server_utils") { deps = [ ":rtc_p2p", "../api:packet_socket_factory", + "../api:sequence_checker", "../api/transport:stun_types", "../rtc_base", "../rtc_base:checks", "../rtc_base:rtc_base_tests_utils", + "../rtc_base:socket_address", + "../rtc_base:threading", + "../rtc_base/task_utils:to_queued_task", "../rtc_base/third_party/sigslot", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory", ] } @@ -257,10 +306,17 @@ rtc_library("libstunprober") { deps = [ ":rtc_p2p", "../api:packet_socket_factory", + "../api:sequence_checker", "../api/transport:stun_types", "../rtc_base", + "../rtc_base:async_resolver_interface", "../rtc_base:checks", + "../rtc_base:ip_address", + "../rtc_base:socket_address", + "../rtc_base:threading", "../rtc_base/system:rtc_export", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", ] } @@ -276,6 +332,7 @@ if (rtc_include_tests) { "../rtc_base", "../rtc_base:checks", "../rtc_base:gunit_helpers", + "../rtc_base:ip_address", "../rtc_base:rtc_base_tests_utils", "../test:test_support", "//testing/gtest", diff --git a/p2p/base/basic_async_resolver_factory.cc b/p2p/base/basic_async_resolver_factory.cc index 9d8266eaf9..7f26a981ee 100644 --- a/p2p/base/basic_async_resolver_factory.cc +++ b/p2p/base/basic_async_resolver_factory.cc @@ -10,7 +10,13 @@ #include "p2p/base/basic_async_resolver_factory.h" -#include "rtc_base/net_helpers.h" +#include +#include + +#include "absl/memory/memory.h" +#include "api/async_dns_resolver.h" +#include "rtc_base/async_resolver.h" +#include "rtc_base/logging.h" namespace webrtc { @@ -18,4 +24,113 @@ rtc::AsyncResolverInterface* BasicAsyncResolverFactory::Create() { return new rtc::AsyncResolver(); } +class WrappingAsyncDnsResolver; + +class WrappingAsyncDnsResolverResult : public AsyncDnsResolverResult { + public: + explicit WrappingAsyncDnsResolverResult(WrappingAsyncDnsResolver* owner) + : owner_(owner) {} + ~WrappingAsyncDnsResolverResult() {} + + // Note: Inline declaration not possible, since it refers to + // WrappingAsyncDnsResolver. + bool GetResolvedAddress(int family, rtc::SocketAddress* addr) const override; + int GetError() const override; + + private: + WrappingAsyncDnsResolver* const owner_; +}; + +class WrappingAsyncDnsResolver : public AsyncDnsResolverInterface, + public sigslot::has_slots<> { + public: + explicit WrappingAsyncDnsResolver(rtc::AsyncResolverInterface* wrapped) + : wrapped_(absl::WrapUnique(wrapped)), result_(this) {} + + ~WrappingAsyncDnsResolver() override { + // Workaround to get around the fact that sigslot-using objects can't be + // destroyed from within their callback: Alert class users early. + // TODO(bugs.webrtc.org/12651): Delete this class once the sigslot users are + // gone. + RTC_CHECK(!within_resolve_result_); + wrapped_.release()->Destroy(false); + } + + void Start(const rtc::SocketAddress& addr, + std::function callback) override { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK_EQ(State::kNotStarted, state_); + state_ = State::kStarted; + callback_ = callback; + wrapped_->SignalDone.connect(this, + &WrappingAsyncDnsResolver::OnResolveResult); + wrapped_->Start(addr); + } + + const AsyncDnsResolverResult& result() const override { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK_EQ(State::kResolved, state_); + return result_; + } + + private: + enum class State { kNotStarted, kStarted, kResolved }; + + friend class WrappingAsyncDnsResolverResult; + // For use by WrappingAsyncDnsResolverResult + rtc::AsyncResolverInterface* wrapped() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return wrapped_.get(); + } + + void OnResolveResult(rtc::AsyncResolverInterface* ref) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(state_ == State::kStarted); + RTC_DCHECK_EQ(ref, wrapped_.get()); + state_ = State::kResolved; + within_resolve_result_ = true; + callback_(); + within_resolve_result_ = false; + } + + // The class variables need to be accessed on a single thread. + SequenceChecker sequence_checker_; + std::function callback_ RTC_GUARDED_BY(sequence_checker_); + std::unique_ptr wrapped_ + RTC_GUARDED_BY(sequence_checker_); + State state_ RTC_GUARDED_BY(sequence_checker_) = State::kNotStarted; + WrappingAsyncDnsResolverResult result_ RTC_GUARDED_BY(sequence_checker_); + bool within_resolve_result_ RTC_GUARDED_BY(sequence_checker_) = false; +}; + +bool WrappingAsyncDnsResolverResult::GetResolvedAddress( + int family, + rtc::SocketAddress* addr) const { + if (!owner_->wrapped()) { + return false; + } + return owner_->wrapped()->GetResolvedAddress(family, addr); +} + +int WrappingAsyncDnsResolverResult::GetError() const { + if (!owner_->wrapped()) { + return -1; // FIXME: Find a code that makes sense. + } + return owner_->wrapped()->GetError(); +} + +std::unique_ptr +WrappingAsyncDnsResolverFactory::Create() { + return std::make_unique(wrapped_factory_->Create()); +} + +std::unique_ptr +WrappingAsyncDnsResolverFactory::CreateAndResolve( + const rtc::SocketAddress& addr, + std::function callback) { + std::unique_ptr resolver = Create(); + resolver->Start(addr, callback); + return resolver; +} + } // namespace webrtc diff --git a/p2p/base/basic_async_resolver_factory.h b/p2p/base/basic_async_resolver_factory.h index c4661b448b..c988913068 100644 --- a/p2p/base/basic_async_resolver_factory.h +++ b/p2p/base/basic_async_resolver_factory.h @@ -11,16 +11,47 @@ #ifndef P2P_BASE_BASIC_ASYNC_RESOLVER_FACTORY_H_ #define P2P_BASE_BASIC_ASYNC_RESOLVER_FACTORY_H_ +#include +#include +#include + +#include "api/async_dns_resolver.h" #include "api/async_resolver_factory.h" #include "rtc_base/async_resolver_interface.h" namespace webrtc { -class BasicAsyncResolverFactory : public AsyncResolverFactory { +class BasicAsyncResolverFactory final : public AsyncResolverFactory { public: rtc::AsyncResolverInterface* Create() override; }; +// This class wraps a factory using the older webrtc::AsyncResolverFactory API, +// and produces webrtc::AsyncDnsResolver objects that contain an +// rtc::AsyncResolver object. +class WrappingAsyncDnsResolverFactory final + : public AsyncDnsResolverFactoryInterface { + public: + explicit WrappingAsyncDnsResolverFactory( + std::unique_ptr wrapped_factory) + : owned_factory_(std::move(wrapped_factory)), + wrapped_factory_(owned_factory_.get()) {} + + explicit WrappingAsyncDnsResolverFactory( + AsyncResolverFactory* non_owned_factory) + : wrapped_factory_(non_owned_factory) {} + + std::unique_ptr CreateAndResolve( + const rtc::SocketAddress& addr, + std::function callback) override; + + std::unique_ptr Create() override; + + private: + const std::unique_ptr owned_factory_; + AsyncResolverFactory* const wrapped_factory_; +}; + } // namespace webrtc #endif // P2P_BASE_BASIC_ASYNC_RESOLVER_FACTORY_H_ diff --git a/p2p/base/basic_async_resolver_factory_unittest.cc b/p2p/base/basic_async_resolver_factory_unittest.cc index 0c21c682fb..6706f50d61 100644 --- a/p2p/base/basic_async_resolver_factory_unittest.cc +++ b/p2p/base/basic_async_resolver_factory_unittest.cc @@ -10,10 +10,15 @@ #include "p2p/base/basic_async_resolver_factory.h" +#include "api/test/mock_async_dns_resolver.h" +#include "p2p/base/mock_async_resolver.h" +#include "rtc_base/async_resolver.h" #include "rtc_base/gunit.h" #include "rtc_base/socket_address.h" #include "rtc_base/third_party/sigslot/sigslot.h" +#include "test/gmock.h" #include "test/gtest.h" +#include "test/testsupport/rtc_expect_death.h" namespace webrtc { @@ -30,6 +35,7 @@ class BasicAsyncResolverFactoryTest : public ::testing::Test, rtc::SocketAddress address("", 0); resolver->Start(address); ASSERT_TRUE_WAIT(address_resolved_, 10000 /*ms*/); + resolver->Destroy(false); } void SetAddressResolved(rtc::AsyncResolverInterface* resolver) { @@ -46,4 +52,66 @@ TEST_F(BasicAsyncResolverFactoryTest, TestCreate) { TestCreate(); } +TEST(WrappingAsyncDnsResolverFactoryTest, TestCreateAndResolve) { + WrappingAsyncDnsResolverFactory factory( + std::make_unique()); + + std::unique_ptr resolver(factory.Create()); + ASSERT_TRUE(resolver); + + bool address_resolved = false; + rtc::SocketAddress address("", 0); + resolver->Start(address, [&address_resolved]() { address_resolved = true; }); + ASSERT_TRUE_WAIT(address_resolved, 10000 /*ms*/); + resolver.reset(); +} + +TEST(WrappingAsyncDnsResolverFactoryTest, WrapOtherResolver) { + BasicAsyncResolverFactory non_owned_factory; + WrappingAsyncDnsResolverFactory factory(&non_owned_factory); + std::unique_ptr resolver(factory.Create()); + ASSERT_TRUE(resolver); + + bool address_resolved = false; + rtc::SocketAddress address("", 0); + resolver->Start(address, [&address_resolved]() { address_resolved = true; }); + ASSERT_TRUE_WAIT(address_resolved, 10000 /*ms*/); + resolver.reset(); +} + +#if GTEST_HAS_DEATH_TEST && defined(WEBRTC_LINUX) +// Tests that the prohibition against deleting the resolver from the callback +// is enforced. This is required by the use of sigslot in the wrapped resolver. +// Checking the error message fails on a number of platforms, so run this +// test only on the platforms where it works. +void CallResolver(WrappingAsyncDnsResolverFactory& factory) { + rtc::SocketAddress address("", 0); + std::unique_ptr resolver(factory.Create()); + resolver->Start(address, [&resolver]() { resolver.reset(); }); + WAIT(!resolver.get(), 10000 /*ms*/); +} + +TEST(WrappingAsyncDnsResolverFactoryDeathTest, DestroyResolverInCallback) { + // This test requires the main thread to be wrapped. So we defeat the + // workaround in test/test_main_lib.cc by explicitly wrapping the main + // thread here. + auto thread = rtc::Thread::CreateWithSocketServer(); + thread->WrapCurrent(); + // TODO(bugs.webrtc.org/12652): Rewrite as death test in loop style when it + // works. + WrappingAsyncDnsResolverFactory factory( + std::make_unique()); + + // Since EXPECT_DEATH is thread sensitive, and the resolver creates a thread, + // we wrap the whole creation section in EXPECT_DEATH. + RTC_EXPECT_DEATH(CallResolver(factory), + "Check failed: !within_resolve_result_"); + // If we get here, we have to unwrap the thread. + thread->Quit(); + thread->Run(); + thread->UnwrapCurrent(); + thread = nullptr; +} +#endif + } // namespace webrtc diff --git a/p2p/base/basic_packet_socket_factory.cc b/p2p/base/basic_packet_socket_factory.cc index 8be9079338..232e58b546 100644 --- a/p2p/base/basic_packet_socket_factory.cc +++ b/p2p/base/basic_packet_socket_factory.cc @@ -15,6 +15,7 @@ #include #include "p2p/base/async_stun_tcp_socket.h" +#include "rtc_base/async_resolver.h" #include "rtc_base/async_tcp_socket.h" #include "rtc_base/async_udp_socket.h" #include "rtc_base/checks.h" @@ -81,16 +82,20 @@ AsyncPacketSocket* BasicPacketSocketFactory::CreateServerTcpSocket( return NULL; } + // Set TCP_NODELAY (via OPT_NODELAY) for improved performance; this causes + // small media packets to be sent immediately rather than being buffered up, + // reducing latency. + if (socket->SetOption(Socket::OPT_NODELAY, 1) != 0) { + RTC_LOG(LS_ERROR) << "Setting TCP_NODELAY option failed with error " + << socket->GetError(); + } + // If using fake TLS, wrap the TCP socket in a pseudo-SSL socket. if (opts & PacketSocketFactory::OPT_TLS_FAKE) { RTC_DCHECK(!(opts & PacketSocketFactory::OPT_TLS)); socket = new AsyncSSLSocket(socket); } - // Set TCP_NODELAY (via OPT_NODELAY) for improved performance. - // See http://go/gtalktcpnodelayexperiment - socket->SetOption(Socket::OPT_NODELAY, 1); - if (opts & PacketSocketFactory::OPT_STUN) return new cricket::AsyncStunTCPSocket(socket, true); @@ -123,6 +128,16 @@ AsyncPacketSocket* BasicPacketSocketFactory::CreateClientTcpSocket( } } + // Set TCP_NODELAY (via OPT_NODELAY) for improved performance; this causes + // small media packets to be sent immediately rather than being buffered up, + // reducing latency. + // + // Must be done before calling Connect, otherwise it may fail. + if (socket->SetOption(Socket::OPT_NODELAY, 1) != 0) { + RTC_LOG(LS_ERROR) << "Setting TCP_NODELAY option failed with error " + << socket->GetError(); + } + // If using a proxy, wrap the socket in a proxy socket. if (proxy_info.type == PROXY_SOCKS5) { socket = new AsyncSocksProxySocket( @@ -181,10 +196,6 @@ AsyncPacketSocket* BasicPacketSocketFactory::CreateClientTcpSocket( tcp_socket = new AsyncTCPSocket(socket, false); } - // Set TCP_NODELAY (via OPT_NODELAY) for improved performance. - // See http://go/gtalktcpnodelayexperiment - tcp_socket->SetOption(Socket::OPT_NODELAY, 1); - return tcp_socket; } diff --git a/p2p/base/connection.cc b/p2p/base/connection.cc index afb1457567..0aa2bcbeff 100644 --- a/p2p/base/connection.cc +++ b/p2p/base/connection.cc @@ -187,13 +187,13 @@ void ConnectionRequest::Prepare(StunMessage* request) { uint32_t network_info = connection_->port()->Network()->id(); network_info = (network_info << 16) | connection_->port()->network_cost(); request->AddAttribute(std::make_unique( - STUN_ATTR_NETWORK_INFO, network_info)); + STUN_ATTR_GOOG_NETWORK_INFO, network_info)); if (webrtc::field_trial::IsEnabled( "WebRTC-PiggybackIceCheckAcknowledgement") && connection_->last_ping_id_received()) { request->AddAttribute(std::make_unique( - STUN_ATTR_LAST_ICE_CHECK_RECEIVED, + STUN_ATTR_GOOG_LAST_ICE_CHECK_RECEIVED, connection_->last_ping_id_received().value())); } @@ -461,6 +461,7 @@ void Connection::OnReadPacket(const char* data, last_data_received_ = rtc::TimeMillis(); UpdateReceiving(last_data_received_); recv_rate_tracker_.AddSamples(size); + stats_.packets_received++; SignalReadPacket(this, data, size, packet_time_us); // If timed out sending writability checks, start up again @@ -479,6 +480,7 @@ void Connection::OnReadPacket(const char* data, // If this is a STUN response, then update the writable bit. // Log at LS_INFO if we receive a ping on an unwritable connection. rtc::LoggingSeverity sev = (!writable() ? rtc::LS_INFO : rtc::LS_VERBOSE); + msg->ValidateMessageIntegrity(remote_candidate().password()); switch (msg->type()) { case STUN_BINDING_REQUEST: RTC_LOG_V(sev) << ToString() << ": Received " @@ -504,8 +506,7 @@ void Connection::OnReadPacket(const char* data, // id's match. case STUN_BINDING_RESPONSE: case STUN_BINDING_ERROR_RESPONSE: - if (msg->ValidateMessageIntegrity(data, size, - remote_candidate().password())) { + if (msg->IntegrityOk()) { requests_.CheckResponse(msg.get()); } // Otherwise silently discard the response message. @@ -522,8 +523,7 @@ void Connection::OnReadPacket(const char* data, break; case GOOG_PING_RESPONSE: case GOOG_PING_ERROR_RESPONSE: - if (msg->ValidateMessageIntegrity32(data, size, - remote_candidate().password())) { + if (msg->IntegrityOk()) { requests_.CheckResponse(msg.get()); } break; @@ -615,7 +615,7 @@ void Connection::HandleStunBindingOrGoogPingRequest(IceMessage* msg) { // Note: If packets are re-ordered, we may get incorrect network cost // temporarily, but it should get the correct value shortly after that. const StunUInt32Attribute* network_attr = - msg->GetUInt32(STUN_ATTR_NETWORK_INFO); + msg->GetUInt32(STUN_ATTR_GOOG_NETWORK_INFO); if (network_attr) { uint32_t network_info = network_attr->value(); uint16_t network_cost = static_cast(network_info); @@ -867,7 +867,7 @@ void Connection::HandlePiggybackCheckAcknowledgementIfAny(StunMessage* msg) { RTC_DCHECK(msg->type() == STUN_BINDING_REQUEST || msg->type() == GOOG_PING_REQUEST); const StunByteStringAttribute* last_ice_check_received_attr = - msg->GetByteString(STUN_ATTR_LAST_ICE_CHECK_RECEIVED); + msg->GetByteString(STUN_ATTR_GOOG_LAST_ICE_CHECK_RECEIVED); if (last_ice_check_received_attr) { const std::string request_id = last_ice_check_received_attr->GetString(); auto iter = absl::c_find_if( @@ -1371,13 +1371,15 @@ int ProxyConnection::Send(const void* data, stats_.sent_total_packets++; int sent = port_->SendTo(data, size, remote_candidate_.address(), options, true); + int64_t now = rtc::TimeMillis(); if (sent <= 0) { RTC_DCHECK(sent < 0); error_ = port_->GetError(); stats_.sent_discarded_packets++; } else { - send_rate_tracker_.AddSamples(sent); + send_rate_tracker_.AddSamplesAtTime(now, sent); } + last_send_data_ = now; return sent; } diff --git a/p2p/base/connection.h b/p2p/base/connection.h index 4b71a7da55..d48137d01e 100644 --- a/p2p/base/connection.h +++ b/p2p/base/connection.h @@ -65,13 +65,13 @@ class ConnectionRequest : public StunRequest { int resend_delay() override; private: - Connection* connection_; + Connection* const connection_; }; // Represents a communication link between a port on the local client and a // port on the remote client. class Connection : public CandidatePairInterface, - public rtc::MessageHandler, + public rtc::MessageHandlerAutoCleanup, public sigslot::has_slots<> { public: struct SentPing { @@ -237,6 +237,8 @@ class Connection : public CandidatePairInterface, // that the remote peer has received, if it is indicated in the incoming // connectivity check from the peer. void HandlePiggybackCheckAcknowledgementIfAny(StunMessage* msg); + // Timestamp when data was last sent (or attempted to be sent). + int64_t last_send_data() const { return last_send_data_; } int64_t last_data_received() const { return last_data_received_; } // Debugging description of this connection @@ -378,6 +380,7 @@ class Connection : public CandidatePairInterface, ConnectionInfo stats_; rtc::RateTracker recv_rate_tracker_; rtc::RateTracker send_rate_tracker_; + int64_t last_send_data_ = 0; private: // Update the local candidate based on the mapped address attribute. diff --git a/p2p/base/connection_info.cc b/p2p/base/connection_info.cc index a4f8036769..ebea2ab5b0 100644 --- a/p2p/base/connection_info.cc +++ b/p2p/base/connection_info.cc @@ -28,6 +28,7 @@ ConnectionInfo::ConnectionInfo() sent_ping_responses(0), recv_total_bytes(0), recv_bytes_second(0), + packets_received(0), recv_ping_requests(0), recv_ping_responses(0), key(nullptr), diff --git a/p2p/base/connection_info.h b/p2p/base/connection_info.h index a62e8aec00..b5e1c14433 100644 --- a/p2p/base/connection_info.h +++ b/p2p/base/connection_info.h @@ -54,6 +54,7 @@ struct ConnectionInfo { size_t recv_total_bytes; // Total bytes received on this connection. size_t recv_bytes_second; // Bps over the last measurement interval. + size_t packets_received; // Number of packets that were received. size_t recv_ping_requests; // Number of STUN ping request received. size_t recv_ping_responses; // Number of STUN ping response received. Candidate local_candidate; // The local candidate for this connection. diff --git a/p2p/base/default_ice_transport_factory.cc b/p2p/base/default_ice_transport_factory.cc index f4b182efdf..0a7175cfd8 100644 --- a/p2p/base/default_ice_transport_factory.cc +++ b/p2p/base/default_ice_transport_factory.cc @@ -44,10 +44,10 @@ DefaultIceTransportFactory::CreateIceTransport( int component, IceTransportInit init) { BasicIceControllerFactory factory; - return new rtc::RefCountedObject( - std::make_unique( + return rtc::make_ref_counted( + cricket::P2PTransportChannel::Create( transport_name, component, init.port_allocator(), - init.async_resolver_factory(), init.event_log(), &factory)); + init.async_dns_resolver_factory(), init.event_log(), &factory)); } } // namespace webrtc diff --git a/p2p/base/default_ice_transport_factory.h b/p2p/base/default_ice_transport_factory.h index 4834c9ada7..e46680d480 100644 --- a/p2p/base/default_ice_transport_factory.h +++ b/p2p/base/default_ice_transport_factory.h @@ -36,7 +36,7 @@ class DefaultIceTransport : public IceTransportInterface { } private: - const rtc::ThreadChecker thread_checker_{}; + const SequenceChecker thread_checker_{}; std::unique_ptr internal_ RTC_GUARDED_BY(thread_checker_); }; diff --git a/p2p/base/dtls_transport.cc b/p2p/base/dtls_transport.cc index 1b7a66000d..76b94a8d79 100644 --- a/p2p/base/dtls_transport.cc +++ b/p2p/base/dtls_transport.cc @@ -15,6 +15,7 @@ #include #include "absl/memory/memory.h" +#include "api/dtls_transport_interface.h" #include "api/rtc_event_log/rtc_event_log.h" #include "logging/rtc_event_log/events/rtc_event_dtls_transport_state.h" #include "logging/rtc_event_log/events/rtc_event_dtls_writable_state.h" @@ -73,6 +74,8 @@ rtc::StreamResult StreamInterfaceChannel::Read(void* buffer, size_t buffer_len, size_t* read, int* error) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (state_ == rtc::SS_CLOSED) return rtc::SR_EOS; if (state_ == rtc::SS_OPENING) @@ -89,6 +92,7 @@ rtc::StreamResult StreamInterfaceChannel::Write(const void* data, size_t data_len, size_t* written, int* error) { + RTC_DCHECK_RUN_ON(&sequence_checker_); // Always succeeds, since this is an unreliable transport anyway. // TODO(zhihuang): Should this block if ice_transport_'s temporarily // unwritable? @@ -102,6 +106,7 @@ rtc::StreamResult StreamInterfaceChannel::Write(const void* data, } bool StreamInterfaceChannel::OnPacketReceived(const char* data, size_t size) { + RTC_DCHECK_RUN_ON(&sequence_checker_); if (packets_.size() > 0) { RTC_LOG(LS_WARNING) << "Packet already in queue."; } @@ -118,24 +123,25 @@ bool StreamInterfaceChannel::OnPacketReceived(const char* data, size_t size) { } rtc::StreamState StreamInterfaceChannel::GetState() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); return state_; } void StreamInterfaceChannel::Close() { + RTC_DCHECK_RUN_ON(&sequence_checker_); packets_.Clear(); state_ = rtc::SS_CLOSED; } DtlsTransport::DtlsTransport(IceTransportInternal* ice_transport, const webrtc::CryptoOptions& crypto_options, - webrtc::RtcEventLog* event_log) - : transport_name_(ice_transport->transport_name()), - component_(ice_transport->component()), + webrtc::RtcEventLog* event_log, + rtc::SSLProtocolVersion max_version) + : component_(ice_transport->component()), ice_transport_(ice_transport), downward_(NULL), srtp_ciphers_(crypto_options.GetSupportedDtlsSrtpCryptoSuites()), - ssl_max_version_(rtc::SSL_PROTOCOL_DTLS_12), - crypto_options_(crypto_options), + ssl_max_version_(max_version), event_log_(event_log) { RTC_DCHECK(ice_transport_); ConnectToIceTransport(); @@ -143,16 +149,12 @@ DtlsTransport::DtlsTransport(IceTransportInternal* ice_transport, DtlsTransport::~DtlsTransport() = default; -const webrtc::CryptoOptions& DtlsTransport::crypto_options() const { - return crypto_options_; -} - -DtlsTransportState DtlsTransport::dtls_state() const { +webrtc::DtlsTransportState DtlsTransport::dtls_state() const { return dtls_state_; } const std::string& DtlsTransport::transport_name() const { - return transport_name_; + return ice_transport_->transport_name(); } int DtlsTransport::component() const { @@ -193,17 +195,6 @@ rtc::scoped_refptr DtlsTransport::GetLocalCertificate() return local_certificate_; } -bool DtlsTransport::SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) { - if (dtls_active_) { - RTC_LOG(LS_ERROR) << "Not changing max. protocol version " - "while DTLS is negotiating"; - return false; - } - - ssl_max_version_ = version; - return true; -} - bool DtlsTransport::SetDtlsRole(rtc::SSLRole role) { if (dtls_) { RTC_DCHECK(dtls_role_); @@ -228,7 +219,7 @@ bool DtlsTransport::GetDtlsRole(rtc::SSLRole* role) const { } bool DtlsTransport::GetSslCipherSuite(int* cipher) { - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { return false; } @@ -286,7 +277,7 @@ bool DtlsTransport::SetRemoteFingerprint(const std::string& digest_alg, remote_fingerprint_value_.size(), &err)) { RTC_LOG(LS_ERROR) << ToString() << ": Couldn't set DTLS certificate digest."; - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); // If the error is "verification failed", don't return false, because // this means the fingerprint was formatted correctly but didn't match // the certificate from the DTLS handshake. Thus the DTLS state should go @@ -300,12 +291,12 @@ bool DtlsTransport::SetRemoteFingerprint(const std::string& digest_alg, // create a new one, resetting our state. if (dtls_ && fingerprint_changing) { dtls_.reset(nullptr); - set_dtls_state(DTLS_TRANSPORT_NEW); + set_dtls_state(webrtc::DtlsTransportState::kNew); set_writable(false); } if (!SetupDtls()) { - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); return false; } @@ -383,7 +374,7 @@ bool DtlsTransport::SetupDtls() { } bool DtlsTransport::GetSrtpCryptoSuite(int* cipher) { - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { return false; } @@ -391,7 +382,7 @@ bool DtlsTransport::GetSrtpCryptoSuite(int* cipher) { } bool DtlsTransport::GetSslVersionBytes(int* version) const { - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { return false; } @@ -409,14 +400,14 @@ int DtlsTransport::SendPacket(const char* data, } switch (dtls_state()) { - case DTLS_TRANSPORT_NEW: + case webrtc::DtlsTransportState::kNew: // Can't send data until the connection is active. // TODO(ekr@rtfm.com): assert here if dtls_ is NULL? return -1; - case DTLS_TRANSPORT_CONNECTING: + case webrtc::DtlsTransportState::kConnecting: // Can't send data until the connection is active. return -1; - case DTLS_TRANSPORT_CONNECTED: + case webrtc::DtlsTransportState::kConnected: if (flags & PF_SRTP_BYPASS) { RTC_DCHECK(!srtp_ciphers_.empty()); if (!IsRtpPacket(data, size)) { @@ -429,17 +420,17 @@ int DtlsTransport::SendPacket(const char* data, ? static_cast(size) : -1; } - case DTLS_TRANSPORT_FAILED: + case webrtc::DtlsTransportState::kFailed: // Can't send anything when we're failed. - RTC_LOG(LS_ERROR) - << ToString() - << ": Couldn't send packet due to DTLS_TRANSPORT_FAILED."; + RTC_LOG(LS_ERROR) << ToString() + << ": Couldn't send packet due to " + "webrtc::DtlsTransportState::kFailed."; return -1; - case DTLS_TRANSPORT_CLOSED: + case webrtc::DtlsTransportState::kClosed: // Can't send anything when we're closed. - RTC_LOG(LS_ERROR) - << ToString() - << ": Couldn't send packet due to DTLS_TRANSPORT_CLOSED."; + RTC_LOG(LS_ERROR) << ToString() + << ": Couldn't send packet due to " + "webrtc::DtlsTransportState::kClosed."; return -1; default: RTC_NOTREACHED(); @@ -518,27 +509,30 @@ void DtlsTransport::OnWritableState(rtc::PacketTransportInternal* transport) { } switch (dtls_state()) { - case DTLS_TRANSPORT_NEW: + case webrtc::DtlsTransportState::kNew: MaybeStartDtls(); break; - case DTLS_TRANSPORT_CONNECTED: + case webrtc::DtlsTransportState::kConnected: // Note: SignalWritableState fired by set_writable. set_writable(ice_transport_->writable()); break; - case DTLS_TRANSPORT_CONNECTING: + case webrtc::DtlsTransportState::kConnecting: // Do nothing. break; - case DTLS_TRANSPORT_FAILED: + case webrtc::DtlsTransportState::kFailed: // Should not happen. Do nothing. - RTC_LOG(LS_ERROR) - << ToString() - << ": OnWritableState() called in state DTLS_TRANSPORT_FAILED."; + RTC_LOG(LS_ERROR) << ToString() + << ": OnWritableState() called in state " + "webrtc::DtlsTransportState::kFailed."; break; - case DTLS_TRANSPORT_CLOSED: + case webrtc::DtlsTransportState::kClosed: // Should not happen. Do nothing. - RTC_LOG(LS_ERROR) - << ToString() - << ": OnWritableState() called in state DTLS_TRANSPORT_CLOSED."; + RTC_LOG(LS_ERROR) << ToString() + << ": OnWritableState() called in state " + "webrtc::DtlsTransportState::kClosed."; + break; + case webrtc::DtlsTransportState::kNumValues: + RTC_NOTREACHED(); break; } } @@ -550,7 +544,7 @@ void DtlsTransport::OnReceivingState(rtc::PacketTransportInternal* transport) { << ": ice_transport " "receiving state changed to " << ice_transport_->receiving(); - if (!dtls_active_ || dtls_state() == DTLS_TRANSPORT_CONNECTED) { + if (!dtls_active_ || dtls_state() == webrtc::DtlsTransportState::kConnected) { // Note: SignalReceivingState fired by set_receiving. set_receiving(ice_transport_->receiving()); } @@ -572,7 +566,7 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, } switch (dtls_state()) { - case DTLS_TRANSPORT_NEW: + case webrtc::DtlsTransportState::kNew: if (dtls_) { RTC_LOG(LS_INFO) << ToString() << ": Packet received before DTLS started."; @@ -601,8 +595,8 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, } break; - case DTLS_TRANSPORT_CONNECTING: - case DTLS_TRANSPORT_CONNECTED: + case webrtc::DtlsTransportState::kConnecting: + case webrtc::DtlsTransportState::kConnected: // We should only get DTLS or SRTP packets; STUN's already been demuxed. // Is this potentially a DTLS packet? if (IsDtlsPacket(data, size)) { @@ -612,7 +606,7 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, } } else { // Not a DTLS packet; our handshake should be complete by now. - if (dtls_state() != DTLS_TRANSPORT_CONNECTED) { + if (dtls_state() != webrtc::DtlsTransportState::kConnected) { RTC_LOG(LS_ERROR) << ToString() << ": Received non-DTLS packet before DTLS " "complete."; @@ -633,8 +627,9 @@ void DtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport, SignalReadPacket(this, data, size, packet_time_us, PF_SRTP_BYPASS); } break; - case DTLS_TRANSPORT_FAILED: - case DTLS_TRANSPORT_CLOSED: + case webrtc::DtlsTransportState::kFailed: + case webrtc::DtlsTransportState::kClosed: + case webrtc::DtlsTransportState::kNumValues: // This shouldn't be happening. Drop the packet. break; } @@ -662,7 +657,7 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { if (dtls_->GetState() == rtc::SS_OPEN) { // The check for OPEN shouldn't be necessary but let's make // sure we don't accidentally frob the state if it's closed. - set_dtls_state(DTLS_TRANSPORT_CONNECTED); + set_dtls_state(webrtc::DtlsTransportState::kConnected); set_writable(true); } } @@ -681,7 +676,7 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { // Remote peer shut down the association with no error. RTC_LOG(LS_INFO) << ToString() << ": DTLS transport closed by remote"; set_writable(false); - set_dtls_state(DTLS_TRANSPORT_CLOSED); + set_dtls_state(webrtc::DtlsTransportState::kClosed); SignalClosed(this); } else if (ret == rtc::SR_ERROR) { // Remote peer shut down the association with an error. @@ -690,7 +685,7 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { << ": Closed by remote with DTLS transport error, code=" << read_error; set_writable(false); - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); SignalClosed(this); } } while (ret == rtc::SR_SUCCESS); @@ -700,10 +695,10 @@ void DtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) { set_writable(false); if (!err) { RTC_LOG(LS_INFO) << ToString() << ": DTLS transport closed"; - set_dtls_state(DTLS_TRANSPORT_CLOSED); + set_dtls_state(webrtc::DtlsTransportState::kClosed); } else { RTC_LOG(LS_INFO) << ToString() << ": DTLS transport error, code=" << err; - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); } } } @@ -727,11 +722,11 @@ void DtlsTransport::MaybeStartDtls() { // configuration and therefore are our fault. RTC_NOTREACHED() << "StartSSL failed."; RTC_LOG(LS_ERROR) << ToString() << ": Couldn't start DTLS handshake"; - set_dtls_state(DTLS_TRANSPORT_FAILED); + set_dtls_state(webrtc::DtlsTransportState::kFailed); return; } RTC_LOG(LS_INFO) << ToString() << ": DtlsTransport: Started DTLS handshake"; - set_dtls_state(DTLS_TRANSPORT_CONNECTING); + set_dtls_state(webrtc::DtlsTransportState::kConnecting); // Now that the handshake has started, we can process a cached ClientHello // (if one exists). if (cached_client_hello_.size()) { @@ -799,22 +794,23 @@ void DtlsTransport::set_writable(bool writable) { SignalWritableState(this); } -void DtlsTransport::set_dtls_state(DtlsTransportState state) { +void DtlsTransport::set_dtls_state(webrtc::DtlsTransportState state) { if (dtls_state_ == state) { return; } if (event_log_) { - event_log_->Log(std::make_unique( - ConvertDtlsTransportState(state))); + event_log_->Log( + std::make_unique(state)); } - RTC_LOG(LS_VERBOSE) << ToString() << ": set_dtls_state from:" << dtls_state_ - << " to " << state; + RTC_LOG(LS_VERBOSE) << ToString() << ": set_dtls_state from:" + << static_cast(dtls_state_) << " to " + << static_cast(state); dtls_state_ = state; - SignalDtlsState(this, state); + SendDtlsState(this, state); } void DtlsTransport::OnDtlsHandshakeError(rtc::SSLHandshakeError error) { - SignalDtlsHandshakeError(error); + SendDtlsHandshakeError(error); } void DtlsTransport::ConfigureHandshakeTimeout() { diff --git a/p2p/base/dtls_transport.h b/p2p/base/dtls_transport.h index 89156a15d1..0296a742c0 100644 --- a/p2p/base/dtls_transport.h +++ b/p2p/base/dtls_transport.h @@ -16,6 +16,8 @@ #include #include "api/crypto/crypto_options.h" +#include "api/dtls_transport_interface.h" +#include "api/sequence_checker.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/ice_transport_internal.h" #include "rtc_base/buffer.h" @@ -24,7 +26,7 @@ #include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/stream.h" #include "rtc_base/strings/string_builder.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace rtc { class PacketTransportInternal; @@ -54,9 +56,10 @@ class StreamInterfaceChannel : public rtc::StreamInterface { int* error) override; private: - IceTransportInternal* ice_transport_; // owned by DtlsTransport - rtc::StreamState state_; - rtc::BufferQueue packets_; + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker sequence_checker_; + IceTransportInternal* const ice_transport_; // owned by DtlsTransport + rtc::StreamState state_ RTC_GUARDED_BY(sequence_checker_); + rtc::BufferQueue packets_ RTC_GUARDED_BY(sequence_checker_); RTC_DISALLOW_COPY_AND_ASSIGN(StreamInterfaceChannel); }; @@ -99,14 +102,15 @@ class DtlsTransport : public DtlsTransportInternal { // // |event_log| is an optional RtcEventLog for logging state changes. It should // outlive the DtlsTransport. - explicit DtlsTransport(IceTransportInternal* ice_transport, - const webrtc::CryptoOptions& crypto_options, - webrtc::RtcEventLog* event_log); + DtlsTransport( + IceTransportInternal* ice_transport, + const webrtc::CryptoOptions& crypto_options, + webrtc::RtcEventLog* event_log, + rtc::SSLProtocolVersion max_version = rtc::SSL_PROTOCOL_DTLS_12); ~DtlsTransport() override; - const webrtc::CryptoOptions& crypto_options() const override; - DtlsTransportState dtls_state() const override; + webrtc::DtlsTransportState dtls_state() const override; const std::string& transport_name() const override; int component() const override; @@ -140,8 +144,6 @@ class DtlsTransport : public DtlsTransportInternal { bool GetOption(rtc::Socket::Option opt, int* value) override; - bool SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) override; - // Find out which TLS version was negotiated bool GetSslVersionBytes(int* version) const override; // Find out which DTLS-SRTP cipher was negotiated @@ -189,7 +191,7 @@ class DtlsTransport : public DtlsTransportInternal { const absl::string_view RECEIVING_ABBREV[2] = {"_", "R"}; const absl::string_view WRITABLE_ABBREV[2] = {"_", "W"}; rtc::StringBuilder sb; - sb << "DtlsTransport[" << transport_name_ << "|" << component_ << "|" + sb << "DtlsTransport[" << transport_name() << "|" << component_ << "|" << RECEIVING_ABBREV[receiving()] << WRITABLE_ABBREV[writable()] << "]"; return sb.Release(); } @@ -218,24 +220,22 @@ class DtlsTransport : public DtlsTransportInternal { void set_receiving(bool receiving); void set_writable(bool writable); // Sets the DTLS state, signaling if necessary. - void set_dtls_state(DtlsTransportState state); + void set_dtls_state(webrtc::DtlsTransportState state); - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; - std::string transport_name_; - int component_; - DtlsTransportState dtls_state_ = DTLS_TRANSPORT_NEW; + const int component_; + webrtc::DtlsTransportState dtls_state_ = webrtc::DtlsTransportState::kNew; // Underlying ice_transport, not owned by this class. - IceTransportInternal* ice_transport_; + IceTransportInternal* const ice_transport_; std::unique_ptr dtls_; // The DTLS stream StreamInterfaceChannel* downward_; // Wrapper for ice_transport_, owned by dtls_. - std::vector srtp_ciphers_; // SRTP ciphers to use with DTLS. + const std::vector srtp_ciphers_; // SRTP ciphers to use with DTLS. bool dtls_active_ = false; rtc::scoped_refptr local_certificate_; absl::optional dtls_role_; - rtc::SSLProtocolVersion ssl_max_version_; - webrtc::CryptoOptions crypto_options_; + const rtc::SSLProtocolVersion ssl_max_version_; rtc::Buffer remote_fingerprint_value_; std::string remote_fingerprint_algorithm_; diff --git a/p2p/base/dtls_transport_factory.h b/p2p/base/dtls_transport_factory.h index 9ad78a7cc2..7c4a24adc8 100644 --- a/p2p/base/dtls_transport_factory.h +++ b/p2p/base/dtls_transport_factory.h @@ -31,7 +31,8 @@ class DtlsTransportFactory { virtual std::unique_ptr CreateDtlsTransport( IceTransportInternal* ice, - const webrtc::CryptoOptions& crypto_options) = 0; + const webrtc::CryptoOptions& crypto_options, + rtc::SSLProtocolVersion max_version) = 0; }; } // namespace cricket diff --git a/p2p/base/dtls_transport_internal.cc b/p2p/base/dtls_transport_internal.cc index dd23b1baa7..6997dbc702 100644 --- a/p2p/base/dtls_transport_internal.cc +++ b/p2p/base/dtls_transport_internal.cc @@ -16,22 +16,4 @@ DtlsTransportInternal::DtlsTransportInternal() = default; DtlsTransportInternal::~DtlsTransportInternal() = default; -webrtc::DtlsTransportState ConvertDtlsTransportState( - cricket::DtlsTransportState cricket_state) { - switch (cricket_state) { - case DtlsTransportState::DTLS_TRANSPORT_NEW: - return webrtc::DtlsTransportState::kNew; - case DtlsTransportState::DTLS_TRANSPORT_CONNECTING: - return webrtc::DtlsTransportState::kConnecting; - case DtlsTransportState::DTLS_TRANSPORT_CONNECTED: - return webrtc::DtlsTransportState::kConnected; - case DtlsTransportState::DTLS_TRANSPORT_CLOSED: - return webrtc::DtlsTransportState::kClosed; - case DtlsTransportState::DTLS_TRANSPORT_FAILED: - return webrtc::DtlsTransportState::kFailed; - } - RTC_NOTREACHED(); - return webrtc::DtlsTransportState::kNew; -} - } // namespace cricket diff --git a/p2p/base/dtls_transport_internal.h b/p2p/base/dtls_transport_internal.h index 4c35d7371f..0b26a7fd7a 100644 --- a/p2p/base/dtls_transport_internal.h +++ b/p2p/base/dtls_transport_internal.h @@ -16,36 +16,22 @@ #include #include +#include +#include "absl/base/attributes.h" #include "api/crypto/crypto_options.h" #include "api/dtls_transport_interface.h" #include "api/scoped_refptr.h" #include "p2p/base/ice_transport_internal.h" #include "p2p/base/packet_transport_internal.h" +#include "rtc_base/callback_list.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/ssl_fingerprint.h" #include "rtc_base/ssl_stream_adapter.h" -#include "rtc_base/third_party/sigslot/sigslot.h" namespace cricket { -enum DtlsTransportState { - // Haven't started negotiating. - DTLS_TRANSPORT_NEW = 0, - // Have started negotiating. - DTLS_TRANSPORT_CONNECTING, - // Negotiated, and has a secure connection. - DTLS_TRANSPORT_CONNECTED, - // Transport is closed. - DTLS_TRANSPORT_CLOSED, - // Failed due to some error in the handshake process. - DTLS_TRANSPORT_FAILED, -}; - -webrtc::DtlsTransportState ConvertDtlsTransportState( - cricket::DtlsTransportState cricket_state); - enum PacketFlags { PF_NORMAL = 0x00, // A normal packet. PF_SRTP_BYPASS = 0x01, // An encrypted SRTP packet; bypass any additional @@ -62,9 +48,7 @@ class DtlsTransportInternal : public rtc::PacketTransportInternal { public: ~DtlsTransportInternal() override; - virtual const webrtc::CryptoOptions& crypto_options() const = 0; - - virtual DtlsTransportState dtls_state() const = 0; + virtual webrtc::DtlsTransportState dtls_state() const = 0; virtual int component() const = 0; @@ -107,21 +91,55 @@ class DtlsTransportInternal : public rtc::PacketTransportInternal { const uint8_t* digest, size_t digest_len) = 0; - virtual bool SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) = 0; + ABSL_DEPRECATED("Set the max version via construction.") + bool SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) { + return true; + } // Expose the underneath IceTransport. virtual IceTransportInternal* ice_transport() = 0; - sigslot::signal2 SignalDtlsState; + // F: void(DtlsTransportInternal*, const webrtc::DtlsTransportState) + template + void SubscribeDtlsTransportState(F&& callback) { + dtls_transport_state_callback_list_.AddReceiver(std::forward(callback)); + } + + template + void SubscribeDtlsTransportState(const void* id, F&& callback) { + dtls_transport_state_callback_list_.AddReceiver(id, + std::forward(callback)); + } + // Unsubscribe the subscription with given id. + void UnsubscribeDtlsTransportState(const void* id) { + dtls_transport_state_callback_list_.RemoveReceivers(id); + } + + void SendDtlsState(DtlsTransportInternal* transport, + webrtc::DtlsTransportState state) { + dtls_transport_state_callback_list_.Send(transport, state); + } // Emitted whenever the Dtls handshake failed on some transport channel. - sigslot::signal1 SignalDtlsHandshakeError; + // F: void(rtc::SSLHandshakeError) + template + void SubscribeDtlsHandshakeError(F&& callback) { + dtls_handshake_error_callback_list_.AddReceiver(std::forward(callback)); + } + + void SendDtlsHandshakeError(rtc::SSLHandshakeError error) { + dtls_handshake_error_callback_list_.Send(error); + } protected: DtlsTransportInternal(); private: RTC_DISALLOW_COPY_AND_ASSIGN(DtlsTransportInternal); + webrtc::CallbackList + dtls_handshake_error_callback_list_; + webrtc::CallbackList + dtls_transport_state_callback_list_; }; } // namespace cricket diff --git a/p2p/base/dtls_transport_unittest.cc b/p2p/base/dtls_transport_unittest.cc index c31062dd94..f01566d263 100644 --- a/p2p/base/dtls_transport_unittest.cc +++ b/p2p/base/dtls_transport_unittest.cc @@ -15,6 +15,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "p2p/base/fake_ice_transport.h" #include "p2p/base/packet_transport_internal.h" #include "rtc_base/checks.h" @@ -52,7 +53,7 @@ void SetRemoteFingerprintFromCert( std::unique_ptr fingerprint = rtc::SSLFingerprint::CreateFromCertificate(*cert); if (modify_digest) { - ++fingerprint->digest[0]; + ++fingerprint->digest.MutableData()[0]; } // Even if digest is verified to be incorrect, should fail asynchrnously. EXPECT_TRUE(transport->SetRemoteFingerprint( @@ -86,10 +87,9 @@ class DtlsTestClient : public sigslot::has_slots<> { fake_ice_transport_->SignalReadPacket.connect( this, &DtlsTestClient::OnFakeIceTransportReadPacket); - dtls_transport_ = std::make_unique(fake_ice_transport_.get(), - webrtc::CryptoOptions(), - /*event_log=*/nullptr); - dtls_transport_->SetSslMaxProtocolVersion(ssl_max_version_); + dtls_transport_ = std::make_unique( + fake_ice_transport_.get(), webrtc::CryptoOptions(), + /*event_log=*/nullptr, ssl_max_version_); // Note: Certificate may be null here if testing passthrough. dtls_transport_->SetLocalCertificate(certificate_); dtls_transport_->SignalWritableState.connect( @@ -669,18 +669,19 @@ class DtlsEventOrderingTest // Sanity check that the handshake hasn't already finished. EXPECT_FALSE(client1_.dtls_transport()->IsDtlsConnected() || client1_.dtls_transport()->dtls_state() == - DTLS_TRANSPORT_FAILED); + webrtc::DtlsTransportState::kFailed); EXPECT_TRUE_SIMULATED_WAIT( client1_.dtls_transport()->IsDtlsConnected() || client1_.dtls_transport()->dtls_state() == - DTLS_TRANSPORT_FAILED, + webrtc::DtlsTransportState::kFailed, kTimeout, fake_clock_); break; } } - DtlsTransportState expected_final_state = - valid_fingerprint ? DTLS_TRANSPORT_CONNECTED : DTLS_TRANSPORT_FAILED; + webrtc::DtlsTransportState expected_final_state = + valid_fingerprint ? webrtc::DtlsTransportState::kConnected + : webrtc::DtlsTransportState::kFailed; EXPECT_EQ_SIMULATED_WAIT(expected_final_state, client1_.dtls_transport()->dtls_state(), kTimeout, fake_clock_); diff --git a/p2p/base/fake_dtls_transport.h b/p2p/base/fake_dtls_transport.h index 7061ea4b3e..e02755c68f 100644 --- a/p2p/base/fake_dtls_transport.h +++ b/p2p/base/fake_dtls_transport.h @@ -17,6 +17,7 @@ #include #include "api/crypto/crypto_options.h" +#include "api/dtls_transport_interface.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/fake_ice_transport.h" #include "rtc_base/fake_ssl_identity.h" @@ -55,9 +56,15 @@ class FakeDtlsTransport : public DtlsTransportInternal { // If this constructor is called, a new fake ICE transport will be created, // and this FakeDtlsTransport will take the ownership. - explicit FakeDtlsTransport(const std::string& name, int component) + FakeDtlsTransport(const std::string& name, int component) : FakeDtlsTransport(std::make_unique(name, component)) { } + FakeDtlsTransport(const std::string& name, + int component, + rtc::Thread* network_thread) + : FakeDtlsTransport(std::make_unique(name, + component, + network_thread)) {} ~FakeDtlsTransport() override { if (dest_ && dest_->dest_ == this) { @@ -83,9 +90,9 @@ class FakeDtlsTransport : public DtlsTransportInternal { ice_transport_->SetReceiving(receiving); set_receiving(receiving); } - void SetDtlsState(DtlsTransportState state) { + void SetDtlsState(webrtc::DtlsTransportState state) { dtls_state_ = state; - SignalDtlsState(this, dtls_state_); + SendDtlsState(this, dtls_state_); } // Simulates the two DTLS transports connecting to each other. @@ -115,7 +122,7 @@ class FakeDtlsTransport : public DtlsTransportInternal { if (!dtls_role_) { dtls_role_ = std::move(rtc::SSL_CLIENT); } - SetDtlsState(DTLS_TRANSPORT_CONNECTED); + SetDtlsState(webrtc::DtlsTransportState::kConnected); ice_transport_->SetDestination( static_cast(dest->ice_transport()), asymmetric); } else { @@ -127,7 +134,7 @@ class FakeDtlsTransport : public DtlsTransportInternal { } // Fake DtlsTransportInternal implementation. - DtlsTransportState dtls_state() const override { return dtls_state_; } + webrtc::DtlsTransportState dtls_state() const override { return dtls_state_; } const std::string& transport_name() const override { return transport_name_; } int component() const override { return component_; } const rtc::SSLFingerprint& dtls_fingerprint() const { @@ -140,9 +147,6 @@ class FakeDtlsTransport : public DtlsTransportInternal { rtc::SSLFingerprint(alg, rtc::MakeArrayView(digest, digest_len)); return true; } - bool SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) override { - return true; - } bool SetDtlsRole(rtc::SSLRole role) override { dtls_role_ = std::move(role); return true; @@ -154,12 +158,6 @@ class FakeDtlsTransport : public DtlsTransportInternal { *role = *dtls_role_; return true; } - const webrtc::CryptoOptions& crypto_options() const override { - return crypto_options_; - } - void SetCryptoOptions(const webrtc::CryptoOptions& crypto_options) { - crypto_options_ = crypto_options; - } bool SetLocalCertificate( const rtc::scoped_refptr& certificate) override { do_dtls_ = true; @@ -297,9 +295,8 @@ class FakeDtlsTransport : public DtlsTransportInternal { absl::optional dtls_role_; int crypto_suite_ = rtc::SRTP_AES128_CM_SHA1_80; absl::optional ssl_cipher_suite_; - webrtc::CryptoOptions crypto_options_; - DtlsTransportState dtls_state_ = DTLS_TRANSPORT_NEW; + webrtc::DtlsTransportState dtls_state_ = webrtc::DtlsTransportState::kNew; bool receiving_ = false; bool writable_ = false; diff --git a/p2p/base/fake_ice_transport.h b/p2p/base/fake_ice_transport.h index edc5730440..f8be8a9835 100644 --- a/p2p/base/fake_ice_transport.h +++ b/p2p/base/fake_ice_transport.h @@ -20,11 +20,15 @@ #include "absl/types/optional.h" #include "api/ice_transport_interface.h" #include "p2p/base/ice_transport_internal.h" -#include "rtc_base/async_invoker.h" #include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace cricket { +// All methods must be called on the network thread (which is either the thread +// calling the constructor, or the separate thread explicitly passed to the +// constructor). class FakeIceTransport : public IceTransportInternal { public: explicit FakeIceTransport(const std::string& name, @@ -34,6 +38,8 @@ class FakeIceTransport : public IceTransportInternal { component_(component), network_thread_(network_thread ? network_thread : rtc::Thread::Current()) {} + // Must be called either on the network thread, or after the network thread + // has been shut down. ~FakeIceTransport() override { if (dest_ && dest_->dest_ == this) { dest_->dest_ = nullptr; @@ -42,18 +48,31 @@ class FakeIceTransport : public IceTransportInternal { // If async, will send packets by "Post"-ing to message queue instead of // synchronously "Send"-ing. - void SetAsync(bool async) { async_ = async; } - void SetAsyncDelay(int delay_ms) { async_delay_ms_ = delay_ms; } + void SetAsync(bool async) { + RTC_DCHECK_RUN_ON(network_thread_); + async_ = async; + } + void SetAsyncDelay(int delay_ms) { + RTC_DCHECK_RUN_ON(network_thread_); + async_delay_ms_ = delay_ms; + } // SetWritable, SetReceiving and SetDestination are the main methods that can // be used for testing, to simulate connectivity or lack thereof. - void SetWritable(bool writable) { set_writable(writable); } - void SetReceiving(bool receiving) { set_receiving(receiving); } + void SetWritable(bool writable) { + RTC_DCHECK_RUN_ON(network_thread_); + set_writable(writable); + } + void SetReceiving(bool receiving) { + RTC_DCHECK_RUN_ON(network_thread_); + set_receiving(receiving); + } // Simulates the two transports connecting to each other. // If |asymmetric| is true this method only affects this FakeIceTransport. // If false, it affects |dest| as well. void SetDestination(FakeIceTransport* dest, bool asymmetric = false) { + RTC_DCHECK_RUN_ON(network_thread_); if (dest == dest_) { return; } @@ -75,12 +94,14 @@ class FakeIceTransport : public IceTransportInternal { void SetTransportState(webrtc::IceTransportState state, IceTransportState legacy_state) { + RTC_DCHECK_RUN_ON(network_thread_); transport_state_ = state; legacy_transport_state_ = legacy_state; SignalIceTransportStateChanged(this); } void SetConnectionCount(size_t connection_count) { + RTC_DCHECK_RUN_ON(network_thread_); size_t old_connection_count = connection_count_; connection_count_ = connection_count; if (connection_count) { @@ -94,6 +115,7 @@ class FakeIceTransport : public IceTransportInternal { } void SetCandidatesGatheringComplete() { + RTC_DCHECK_RUN_ON(network_thread_); if (gathering_state_ != kIceGatheringComplete) { gathering_state_ = kIceGatheringComplete; SignalGatheringState(this); @@ -102,16 +124,29 @@ class FakeIceTransport : public IceTransportInternal { // Convenience functions for accessing ICE config and other things. int receiving_timeout() const { + RTC_DCHECK_RUN_ON(network_thread_); return ice_config_.receiving_timeout_or_default(); } - bool gather_continually() const { return ice_config_.gather_continually(); } - const Candidates& remote_candidates() const { return remote_candidates_; } + bool gather_continually() const { + RTC_DCHECK_RUN_ON(network_thread_); + return ice_config_.gather_continually(); + } + const Candidates& remote_candidates() const { + RTC_DCHECK_RUN_ON(network_thread_); + return remote_candidates_; + } // Fake IceTransportInternal implementation. const std::string& transport_name() const override { return name_; } int component() const override { return component_; } - uint64_t IceTiebreaker() const { return tiebreaker_; } - IceMode remote_ice_mode() const { return remote_ice_mode_; } + uint64_t IceTiebreaker() const { + RTC_DCHECK_RUN_ON(network_thread_); + return tiebreaker_; + } + IceMode remote_ice_mode() const { + RTC_DCHECK_RUN_ON(network_thread_); + return remote_ice_mode_; + } const std::string& ice_ufrag() const { return ice_parameters_.ufrag; } const std::string& ice_pwd() const { return ice_parameters_.pwd; } const std::string& remote_ice_ufrag() const { @@ -126,6 +161,7 @@ class FakeIceTransport : public IceTransportInternal { } IceTransportState GetState() const override { + RTC_DCHECK_RUN_ON(network_thread_); if (legacy_transport_state_) { return *legacy_transport_state_; } @@ -143,6 +179,7 @@ class FakeIceTransport : public IceTransportInternal { } webrtc::IceTransportState GetIceTransportState() const override { + RTC_DCHECK_RUN_ON(network_thread_); if (transport_state_) { return *transport_state_; } @@ -159,21 +196,34 @@ class FakeIceTransport : public IceTransportInternal { return webrtc::IceTransportState::kConnected; } - void SetIceRole(IceRole role) override { role_ = role; } - IceRole GetIceRole() const override { return role_; } + void SetIceRole(IceRole role) override { + RTC_DCHECK_RUN_ON(network_thread_); + role_ = role; + } + IceRole GetIceRole() const override { + RTC_DCHECK_RUN_ON(network_thread_); + return role_; + } void SetIceTiebreaker(uint64_t tiebreaker) override { + RTC_DCHECK_RUN_ON(network_thread_); tiebreaker_ = tiebreaker; } void SetIceParameters(const IceParameters& ice_params) override { + RTC_DCHECK_RUN_ON(network_thread_); ice_parameters_ = ice_params; } void SetRemoteIceParameters(const IceParameters& params) override { + RTC_DCHECK_RUN_ON(network_thread_); remote_ice_parameters_ = params; } - void SetRemoteIceMode(IceMode mode) override { remote_ice_mode_ = mode; } + void SetRemoteIceMode(IceMode mode) override { + RTC_DCHECK_RUN_ON(network_thread_); + remote_ice_mode_ = mode; + } void MaybeStartGathering() override { + RTC_DCHECK_RUN_ON(network_thread_); if (gathering_state_ == kIceGatheringNew) { gathering_state_ = kIceGatheringGathering; SignalGatheringState(this); @@ -181,15 +231,21 @@ class FakeIceTransport : public IceTransportInternal { } IceGatheringState gathering_state() const override { + RTC_DCHECK_RUN_ON(network_thread_); return gathering_state_; } - void SetIceConfig(const IceConfig& config) override { ice_config_ = config; } + void SetIceConfig(const IceConfig& config) override { + RTC_DCHECK_RUN_ON(network_thread_); + ice_config_ = config; + } void AddRemoteCandidate(const Candidate& candidate) override { + RTC_DCHECK_RUN_ON(network_thread_); remote_candidates_.push_back(candidate); } void RemoveRemoteCandidate(const Candidate& candidate) override { + RTC_DCHECK_RUN_ON(network_thread_); auto it = absl::c_find(remote_candidates_, candidate); if (it == remote_candidates_.end()) { RTC_LOG(LS_INFO) << "Trying to remove a candidate which doesn't exist."; @@ -199,7 +255,10 @@ class FakeIceTransport : public IceTransportInternal { remote_candidates_.erase(it); } - void RemoveAllRemoteCandidates() override { remote_candidates_.clear(); } + void RemoveAllRemoteCandidates() override { + RTC_DCHECK_RUN_ON(network_thread_); + remote_candidates_.clear(); + } bool GetStats(IceTransportStats* ice_transport_stats) override { CandidateStats candidate_stats; @@ -220,17 +279,25 @@ class FakeIceTransport : public IceTransportInternal { } // Fake PacketTransportInternal implementation. - bool writable() const override { return writable_; } - bool receiving() const override { return receiving_; } + bool writable() const override { + RTC_DCHECK_RUN_ON(network_thread_); + return writable_; + } + bool receiving() const override { + RTC_DCHECK_RUN_ON(network_thread_); + return receiving_; + } // If combine is enabled, every two consecutive packets to be sent with // "SendPacket" will be combined into one outgoing packet. void combine_outgoing_packets(bool combine) { + RTC_DCHECK_RUN_ON(network_thread_); combine_outgoing_packets_ = combine; } int SendPacket(const char* data, size_t len, const rtc::PacketOptions& options, int flags) override { + RTC_DCHECK_RUN_ON(network_thread_); if (!dest_) { return -1; } @@ -239,9 +306,12 @@ class FakeIceTransport : public IceTransportInternal { if (!combine_outgoing_packets_ || send_packet_.size() > len) { rtc::CopyOnWriteBuffer packet(std::move(send_packet_)); if (async_) { - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, rtc::Thread::Current(), - rtc::Bind(&FakeIceTransport::SendPacketInternal, this, packet), + network_thread_->PostDelayedTask( + ToQueuedTask(task_safety_.flag(), + [this, packet] { + RTC_DCHECK_RUN_ON(network_thread_); + FakeIceTransport::SendPacketInternal(packet); + }), async_delay_ms_); } else { SendPacketInternal(packet); @@ -253,10 +323,12 @@ class FakeIceTransport : public IceTransportInternal { } int SetOption(rtc::Socket::Option opt, int value) override { + RTC_DCHECK_RUN_ON(network_thread_); socket_options_[opt] = value; return true; } bool GetOption(rtc::Socket::Option opt, int* value) override { + RTC_DCHECK_RUN_ON(network_thread_); auto it = socket_options_.find(opt); if (it != socket_options_.end()) { *value = it->second; @@ -268,19 +340,27 @@ class FakeIceTransport : public IceTransportInternal { int GetError() override { return 0; } - rtc::CopyOnWriteBuffer last_sent_packet() { return last_sent_packet_; } + rtc::CopyOnWriteBuffer last_sent_packet() { + RTC_DCHECK_RUN_ON(network_thread_); + return last_sent_packet_; + } absl::optional network_route() const override { + RTC_DCHECK_RUN_ON(network_thread_); return network_route_; } void SetNetworkRoute(absl::optional network_route) { + RTC_DCHECK_RUN_ON(network_thread_); network_route_ = network_route; - network_thread_->Invoke( - RTC_FROM_HERE, [this] { SignalNetworkRouteChanged(network_route_); }); + network_thread_->Invoke(RTC_FROM_HERE, [this] { + RTC_DCHECK_RUN_ON(network_thread_); + SignalNetworkRouteChanged(network_route_); + }); } private: - void set_writable(bool writable) { + void set_writable(bool writable) + RTC_EXCLUSIVE_LOCKS_REQUIRED(network_thread_) { if (writable_ == writable) { return; } @@ -292,7 +372,8 @@ class FakeIceTransport : public IceTransportInternal { SignalWritableState(this); } - void set_receiving(bool receiving) { + void set_receiving(bool receiving) + RTC_EXCLUSIVE_LOCKS_REQUIRED(network_thread_) { if (receiving_ == receiving) { return; } @@ -300,7 +381,8 @@ class FakeIceTransport : public IceTransportInternal { SignalReceivingState(this); } - void SendPacketInternal(const rtc::CopyOnWriteBuffer& packet) { + void SendPacketInternal(const rtc::CopyOnWriteBuffer& packet) + RTC_EXCLUSIVE_LOCKS_REQUIRED(network_thread_) { if (dest_) { last_sent_packet_ = packet; dest_->SignalReadPacket(dest_, packet.data(), packet.size(), @@ -308,32 +390,37 @@ class FakeIceTransport : public IceTransportInternal { } } - rtc::AsyncInvoker invoker_; - std::string name_; - int component_; - FakeIceTransport* dest_ = nullptr; - bool async_ = false; - int async_delay_ms_ = 0; - Candidates remote_candidates_; - IceConfig ice_config_; - IceRole role_ = ICEROLE_UNKNOWN; - uint64_t tiebreaker_ = 0; - IceParameters ice_parameters_; - IceParameters remote_ice_parameters_; - IceMode remote_ice_mode_ = ICEMODE_FULL; - size_t connection_count_ = 0; - absl::optional transport_state_; - absl::optional legacy_transport_state_; - IceGatheringState gathering_state_ = kIceGatheringNew; - bool had_connection_ = false; - bool writable_ = false; - bool receiving_ = false; - bool combine_outgoing_packets_ = false; - rtc::CopyOnWriteBuffer send_packet_; - absl::optional network_route_; - std::map socket_options_; - rtc::CopyOnWriteBuffer last_sent_packet_; + const std::string name_; + const int component_; + FakeIceTransport* dest_ RTC_GUARDED_BY(network_thread_) = nullptr; + bool async_ RTC_GUARDED_BY(network_thread_) = false; + int async_delay_ms_ RTC_GUARDED_BY(network_thread_) = 0; + Candidates remote_candidates_ RTC_GUARDED_BY(network_thread_); + IceConfig ice_config_ RTC_GUARDED_BY(network_thread_); + IceRole role_ RTC_GUARDED_BY(network_thread_) = ICEROLE_UNKNOWN; + uint64_t tiebreaker_ RTC_GUARDED_BY(network_thread_) = 0; + IceParameters ice_parameters_ RTC_GUARDED_BY(network_thread_); + IceParameters remote_ice_parameters_ RTC_GUARDED_BY(network_thread_); + IceMode remote_ice_mode_ RTC_GUARDED_BY(network_thread_) = ICEMODE_FULL; + size_t connection_count_ RTC_GUARDED_BY(network_thread_) = 0; + absl::optional transport_state_ + RTC_GUARDED_BY(network_thread_); + absl::optional legacy_transport_state_ + RTC_GUARDED_BY(network_thread_); + IceGatheringState gathering_state_ RTC_GUARDED_BY(network_thread_) = + kIceGatheringNew; + bool had_connection_ RTC_GUARDED_BY(network_thread_) = false; + bool writable_ RTC_GUARDED_BY(network_thread_) = false; + bool receiving_ RTC_GUARDED_BY(network_thread_) = false; + bool combine_outgoing_packets_ RTC_GUARDED_BY(network_thread_) = false; + rtc::CopyOnWriteBuffer send_packet_ RTC_GUARDED_BY(network_thread_); + absl::optional network_route_ + RTC_GUARDED_BY(network_thread_); + std::map socket_options_ + RTC_GUARDED_BY(network_thread_); + rtc::CopyOnWriteBuffer last_sent_packet_ RTC_GUARDED_BY(network_thread_); rtc::Thread* const network_thread_; + webrtc::ScopedTaskSafetyDetached task_safety_; }; class FakeIceTransportWrapper : public webrtc::IceTransportInterface { diff --git a/p2p/base/fake_packet_transport.h b/p2p/base/fake_packet_transport.h index a5e2abb7d6..b69c9b5208 100644 --- a/p2p/base/fake_packet_transport.h +++ b/p2p/base/fake_packet_transport.h @@ -15,7 +15,6 @@ #include #include "p2p/base/packet_transport_internal.h" -#include "rtc_base/async_invoker.h" #include "rtc_base/copy_on_write_buffer.h" namespace rtc { @@ -31,11 +30,6 @@ class FakePacketTransport : public PacketTransportInternal { } } - // If async, will send packets by "Post"-ing to message queue instead of - // synchronously "Send"-ing. - void SetAsync(bool async) { async_ = async; } - void SetAsyncDelay(int delay_ms) { async_delay_ms_ = delay_ms; } - // SetWritable, SetReceiving and SetDestination are the main methods that can // be used for testing, to simulate connectivity or lack thereof. void SetWritable(bool writable) { set_writable(writable); } @@ -70,14 +64,8 @@ class FakePacketTransport : public PacketTransportInternal { return -1; } CopyOnWriteBuffer packet(data, len); - if (async_) { - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, Thread::Current(), - Bind(&FakePacketTransport::SendPacketInternal, this, packet), - async_delay_ms_); - } else { - SendPacketInternal(packet); - } + SendPacketInternal(packet); + SentPacket sent_packet(options.packet_id, TimeMillis()); SignalSentPacket(this, sent_packet); return static_cast(len); @@ -139,11 +127,8 @@ class FakePacketTransport : public PacketTransportInternal { } CopyOnWriteBuffer last_sent_packet_; - AsyncInvoker invoker_; std::string transport_name_; FakePacketTransport* dest_ = nullptr; - bool async_ = false; - int async_delay_ms_ = 0; bool writable_ = false; bool receiving_ = false; diff --git a/p2p/base/fake_port_allocator.h b/p2p/base/fake_port_allocator.h index 266bb7956b..efe9a53a16 100644 --- a/p2p/base/fake_port_allocator.h +++ b/p2p/base/fake_port_allocator.h @@ -18,7 +18,6 @@ #include "p2p/base/basic_packet_socket_factory.h" #include "p2p/base/port_allocator.h" #include "p2p/base/udp_port.h" -#include "rtc_base/bind.h" #include "rtc_base/net_helpers.h" #include "rtc_base/thread.h" @@ -119,8 +118,8 @@ class FakePortAllocatorSession : public PortAllocatorSession { username(), password(), std::string(), false)); RTC_DCHECK(port_); - port_->SignalDestroyed.connect( - this, &FakePortAllocatorSession::OnPortDestroyed); + port_->SubscribePortDestroyed( + [this](PortInterface* port) { OnPortDestroyed(port); }); AddPort(port_.get()); } ++port_config_count_; @@ -222,9 +221,7 @@ class FakePortAllocator : public cricket::PortAllocator { Initialize(); return; } - network_thread_->Invoke(RTC_FROM_HERE, - rtc::Bind(&PortAllocator::Initialize, - static_cast(this))); + network_thread_->Invoke(RTC_FROM_HERE, [this] { Initialize(); }); } void SetNetworkIgnoreMask(int network_ignore_mask) override {} @@ -241,10 +238,19 @@ class FakePortAllocator : public cricket::PortAllocator { bool initialized() const { return initialized_; } + // For testing: Manipulate MdnsObfuscationEnabled() + bool MdnsObfuscationEnabled() const override { + return mdns_obfuscation_enabled_; + } + void SetMdnsObfuscationEnabledForTesting(bool enabled) { + mdns_obfuscation_enabled_ = enabled; + } + private: rtc::Thread* network_thread_; rtc::PacketSocketFactory* factory_; std::unique_ptr owned_factory_; + bool mdns_obfuscation_enabled_ = false; }; } // namespace cricket diff --git a/p2p/base/ice_controller_interface.h b/p2p/base/ice_controller_interface.h index cc4cf4d0d7..0e77d1dd00 100644 --- a/p2p/base/ice_controller_interface.h +++ b/p2p/base/ice_controller_interface.h @@ -51,12 +51,20 @@ struct IceControllerEvent { // - which connection to ping // - which connection to use // - which connection to prune +// - which connection to forget learned state on // -// P2PTransportChannel creates a |Connection| and adds a const pointer -// to the IceController using |AddConnection|, i.e the IceController -// should not call any non-const methods on a Connection. +// The P2PTransportChannel owns (creates and destroys) Connections, +// but P2PTransportChannel gives const pointers to the the IceController using +// |AddConnection|, i.e the IceController should not call any non-const methods +// on a Connection but signal back in the interface if any mutable function +// shall be called. // -// The IceController shall keeps track of all connections added +// Current these are limited to: +// Connection::Ping - returned in PingResult +// Connection::Prune - retuned in PruneConnections +// Connection::ForgetLearnedState - return in SwitchResult +// +// The IceController shall keep track of all connections added // (and not destroyed) and give them back using the connections()-function- // // When a Connection gets destroyed @@ -71,12 +79,17 @@ class IceControllerInterface { // An optional recheck event for when a Switch() should be attempted again. absl::optional recheck_event; + + // A vector with connection to run ForgetLearnedState on. + std::vector connections_to_forget_state_on; }; // This represents the result of a call to SelectConnectionToPing. struct PingResult { PingResult(const Connection* conn, int _recheck_delay_ms) - : connection(conn), recheck_delay_ms(_recheck_delay_ms) {} + : connection(conn ? absl::optional(conn) + : absl::nullopt), + recheck_delay_ms(_recheck_delay_ms) {} // Connection that we should (optionally) ping. const absl::optional connection; diff --git a/p2p/base/ice_transport_internal.cc b/p2p/base/ice_transport_internal.cc index 1d5b6e7403..104a95b5af 100644 --- a/p2p/base/ice_transport_internal.cc +++ b/p2p/base/ice_transport_internal.cc @@ -14,6 +14,50 @@ namespace cricket { +using webrtc::RTCError; +using webrtc::RTCErrorType; + +RTCError VerifyCandidate(const Candidate& cand) { + // No address zero. + if (cand.address().IsNil() || cand.address().IsAnyIP()) { + return RTCError(RTCErrorType::INVALID_PARAMETER, + "candidate has address of zero"); + } + + // Disallow all ports below 1024, except for 80 and 443 on public addresses. + int port = cand.address().port(); + if (cand.protocol() == cricket::TCP_PROTOCOL_NAME && + (cand.tcptype() == cricket::TCPTYPE_ACTIVE_STR || port == 0)) { + // Expected for active-only candidates per + // http://tools.ietf.org/html/rfc6544#section-4.5 so no error. + // Libjingle clients emit port 0, in "active" mode. + return RTCError::OK(); + } + if (port < 1024) { + if ((port != 80) && (port != 443)) { + return RTCError(RTCErrorType::INVALID_PARAMETER, + "candidate has port below 1024, but not 80 or 443"); + } + + if (cand.address().IsPrivateIP()) { + return RTCError( + RTCErrorType::INVALID_PARAMETER, + "candidate has port of 80 or 443 with private IP address"); + } + } + + return RTCError::OK(); +} + +RTCError VerifyCandidates(const Candidates& candidates) { + for (const Candidate& candidate : candidates) { + RTCError error = VerifyCandidate(candidate); + if (!error.ok()) + return error; + } + return RTCError::OK(); +} + IceConfig::IceConfig() = default; IceConfig::IceConfig(int receiving_timeout_ms, diff --git a/p2p/base/ice_transport_internal.h b/p2p/base/ice_transport_internal.h index b735a1a742..b3eb2dc9e2 100644 --- a/p2p/base/ice_transport_internal.h +++ b/p2p/base/ice_transport_internal.h @@ -18,6 +18,7 @@ #include "absl/types/optional.h" #include "api/candidate.h" +#include "api/rtc_error.h" #include "api/transport/enums.h" #include "p2p/base/connection.h" #include "p2p/base/packet_transport_internal.h" @@ -74,6 +75,17 @@ enum class NominationMode { // The details are described in P2PTransportChannel. }; +// Utility method that checks if various required Candidate fields are filled in +// and contain valid values. If conditions are not met, an RTCError with the +// appropriated error number and description is returned. If the configuration +// is valid RTCError::OK() is returned. +webrtc::RTCError VerifyCandidate(const Candidate& cand); + +// Runs through a list of cricket::Candidate instances and calls VerifyCandidate +// for each one, stopping on the first error encounted and returning that error +// value if so. On success returns RTCError::OK(). +webrtc::RTCError VerifyCandidates(const Candidates& candidates); + // Information about ICE configuration. // TODO(deadbeef): Use absl::optional to represent unset values, instead of // -1. diff --git a/p2p/base/mdns_message.cc b/p2p/base/mdns_message.cc deleted file mode 100644 index 1aa996c4a8..0000000000 --- a/p2p/base/mdns_message.cc +++ /dev/null @@ -1,396 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "p2p/base/mdns_message.h" - -#include "rtc_base/logging.h" -#include "rtc_base/net_helpers.h" -#include "rtc_base/string_encode.h" - -namespace webrtc { - -namespace { -// RFC 1035, Section 4.1.1. -// -// QR bit. -constexpr uint16_t kMdnsFlagMaskQueryOrResponse = 0x8000; -// AA bit. -constexpr uint16_t kMdnsFlagMaskAuthoritative = 0x0400; -// RFC 1035, Section 4.1.2, QCLASS and RFC 6762, Section 18.12, repurposing of -// top bit of QCLASS as the unicast response bit. -constexpr uint16_t kMdnsQClassMaskUnicastResponse = 0x8000; -constexpr size_t kMdnsHeaderSizeBytes = 12; - -bool ReadDomainName(MessageBufferReader* buf, std::string* name) { - size_t name_start_pos = buf->CurrentOffset(); - uint8_t label_length; - if (!buf->ReadUInt8(&label_length)) { - return false; - } - // RFC 1035, Section 4.1.4. - // - // If the first two bits of the length octet are ones, the name is compressed - // and the rest six bits with the next octet denotes its position in the - // message by the offset from the start of the message. - auto is_pointer = [](uint8_t octet) { - return (octet & 0x80) && (octet & 0x40); - }; - while (label_length && !is_pointer(label_length)) { - // RFC 1035, Section 2.3.1, labels are restricted to 63 octets or less. - if (label_length > 63) { - return false; - } - std::string label; - if (!buf->ReadString(&label, label_length)) { - return false; - } - (*name) += label + "."; - if (!buf->ReadUInt8(&label_length)) { - return false; - } - } - if (is_pointer(label_length)) { - uint8_t next_octet; - if (!buf->ReadUInt8(&next_octet)) { - return false; - } - size_t pos_jump_to = ((label_length & 0x3f) << 8) | next_octet; - // A legitimate pointer only refers to a prior occurrence of the same name, - // and we should only move strictly backward to a prior name field after the - // header. - if (pos_jump_to >= name_start_pos || pos_jump_to < kMdnsHeaderSizeBytes) { - return false; - } - MessageBufferReader new_buf(buf->MessageData(), buf->MessageLength()); - if (!new_buf.Consume(pos_jump_to)) { - return false; - } - return ReadDomainName(&new_buf, name); - } - return true; -} - -void WriteDomainName(rtc::ByteBufferWriter* buf, const std::string& name) { - std::vector labels; - rtc::tokenize(name, '.', &labels); - for (const auto& label : labels) { - buf->WriteUInt8(label.length()); - buf->WriteString(label); - } - buf->WriteUInt8(0); -} - -} // namespace - -void MdnsHeader::SetQueryOrResponse(bool is_query) { - if (is_query) { - flags &= ~kMdnsFlagMaskQueryOrResponse; - } else { - flags |= kMdnsFlagMaskQueryOrResponse; - } -} - -void MdnsHeader::SetAuthoritative(bool is_authoritative) { - if (is_authoritative) { - flags |= kMdnsFlagMaskAuthoritative; - } else { - flags &= ~kMdnsFlagMaskAuthoritative; - } -} - -bool MdnsHeader::IsAuthoritative() const { - return flags & kMdnsFlagMaskAuthoritative; -} - -bool MdnsHeader::Read(MessageBufferReader* buf) { - if (!buf->ReadUInt16(&id) || !buf->ReadUInt16(&flags) || - !buf->ReadUInt16(&qdcount) || !buf->ReadUInt16(&ancount) || - !buf->ReadUInt16(&nscount) || !buf->ReadUInt16(&arcount)) { - RTC_LOG(LS_ERROR) << "Invalid mDNS header."; - return false; - } - return true; -} - -void MdnsHeader::Write(rtc::ByteBufferWriter* buf) const { - buf->WriteUInt16(id); - buf->WriteUInt16(flags); - buf->WriteUInt16(qdcount); - buf->WriteUInt16(ancount); - buf->WriteUInt16(nscount); - buf->WriteUInt16(arcount); -} - -bool MdnsHeader::IsQuery() const { - return !(flags & kMdnsFlagMaskQueryOrResponse); -} - -MdnsSectionEntry::MdnsSectionEntry() = default; -MdnsSectionEntry::~MdnsSectionEntry() = default; -MdnsSectionEntry::MdnsSectionEntry(const MdnsSectionEntry& other) = default; - -void MdnsSectionEntry::SetType(SectionEntryType type) { - switch (type) { - case SectionEntryType::kA: - type_ = 1; - return; - case SectionEntryType::kAAAA: - type_ = 28; - return; - default: - RTC_NOTREACHED(); - } -} - -SectionEntryType MdnsSectionEntry::GetType() const { - switch (type_) { - case 1: - return SectionEntryType::kA; - case 28: - return SectionEntryType::kAAAA; - default: - return SectionEntryType::kUnsupported; - } -} - -void MdnsSectionEntry::SetClass(SectionEntryClass cls) { - switch (cls) { - case SectionEntryClass::kIN: - class_ = 1; - return; - default: - RTC_NOTREACHED(); - } -} - -SectionEntryClass MdnsSectionEntry::GetClass() const { - switch (class_) { - case 1: - return SectionEntryClass::kIN; - default: - return SectionEntryClass::kUnsupported; - } -} - -MdnsQuestion::MdnsQuestion() = default; -MdnsQuestion::MdnsQuestion(const MdnsQuestion& other) = default; -MdnsQuestion::~MdnsQuestion() = default; - -bool MdnsQuestion::Read(MessageBufferReader* buf) { - if (!ReadDomainName(buf, &name_)) { - RTC_LOG(LS_ERROR) << "Invalid name."; - return false; - } - if (!buf->ReadUInt16(&type_) || !buf->ReadUInt16(&class_)) { - RTC_LOG(LS_ERROR) << "Invalid type and class."; - return false; - } - return true; -} - -bool MdnsQuestion::Write(rtc::ByteBufferWriter* buf) const { - WriteDomainName(buf, name_); - buf->WriteUInt16(type_); - buf->WriteUInt16(class_); - return true; -} - -void MdnsQuestion::SetUnicastResponse(bool should_unicast) { - if (should_unicast) { - class_ |= kMdnsQClassMaskUnicastResponse; - } else { - class_ &= ~kMdnsQClassMaskUnicastResponse; - } -} - -bool MdnsQuestion::ShouldUnicastResponse() const { - return class_ & kMdnsQClassMaskUnicastResponse; -} - -MdnsResourceRecord::MdnsResourceRecord() = default; -MdnsResourceRecord::MdnsResourceRecord(const MdnsResourceRecord& other) = - default; -MdnsResourceRecord::~MdnsResourceRecord() = default; - -bool MdnsResourceRecord::Read(MessageBufferReader* buf) { - if (!ReadDomainName(buf, &name_)) { - return false; - } - if (!buf->ReadUInt16(&type_) || !buf->ReadUInt16(&class_) || - !buf->ReadUInt32(&ttl_seconds_) || !buf->ReadUInt16(&rdlength_)) { - return false; - } - - switch (GetType()) { - case SectionEntryType::kA: - return ReadARData(buf); - case SectionEntryType::kAAAA: - return ReadQuadARData(buf); - case SectionEntryType::kUnsupported: - return false; - default: - RTC_NOTREACHED(); - } - return false; -} -bool MdnsResourceRecord::ReadARData(MessageBufferReader* buf) { - // A RDATA contains a 32-bit IPv4 address. - return buf->ReadString(&rdata_, 4); -} - -bool MdnsResourceRecord::ReadQuadARData(MessageBufferReader* buf) { - // AAAA RDATA contains a 128-bit IPv6 address. - return buf->ReadString(&rdata_, 16); -} - -bool MdnsResourceRecord::Write(rtc::ByteBufferWriter* buf) const { - WriteDomainName(buf, name_); - buf->WriteUInt16(type_); - buf->WriteUInt16(class_); - buf->WriteUInt32(ttl_seconds_); - buf->WriteUInt16(rdlength_); - switch (GetType()) { - case SectionEntryType::kA: - WriteARData(buf); - return true; - case SectionEntryType::kAAAA: - WriteQuadARData(buf); - return true; - case SectionEntryType::kUnsupported: - return false; - default: - RTC_NOTREACHED(); - } - return true; -} - -void MdnsResourceRecord::WriteARData(rtc::ByteBufferWriter* buf) const { - buf->WriteString(rdata_); -} - -void MdnsResourceRecord::WriteQuadARData(rtc::ByteBufferWriter* buf) const { - buf->WriteString(rdata_); -} - -bool MdnsResourceRecord::SetIPAddressInRecordData( - const rtc::IPAddress& address) { - int af = address.family(); - if (af != AF_INET && af != AF_INET6) { - return false; - } - char out[16] = {0}; - if (!rtc::inet_pton(af, address.ToString().c_str(), out)) { - return false; - } - rdlength_ = (af == AF_INET) ? 4 : 16; - rdata_ = std::string(out, rdlength_); - return true; -} - -bool MdnsResourceRecord::GetIPAddressFromRecordData( - rtc::IPAddress* address) const { - if (GetType() != SectionEntryType::kA && - GetType() != SectionEntryType::kAAAA) { - return false; - } - if (rdata_.size() != 4 && rdata_.size() != 16) { - return false; - } - char out[INET6_ADDRSTRLEN] = {0}; - int af = (GetType() == SectionEntryType::kA) ? AF_INET : AF_INET6; - if (!rtc::inet_ntop(af, rdata_.data(), out, sizeof(out))) { - return false; - } - return rtc::IPFromString(std::string(out), address); -} - -MdnsMessage::MdnsMessage() = default; -MdnsMessage::~MdnsMessage() = default; - -bool MdnsMessage::Read(MessageBufferReader* buf) { - RTC_DCHECK_EQ(0u, buf->CurrentOffset()); - if (!header_.Read(buf)) { - return false; - } - - auto read_question = [&buf](std::vector* section, - uint16_t count) { - section->resize(count); - for (auto& question : (*section)) { - if (!question.Read(buf)) { - return false; - } - } - return true; - }; - auto read_rr = [&buf](std::vector* section, - uint16_t count) { - section->resize(count); - for (auto& rr : (*section)) { - if (!rr.Read(buf)) { - return false; - } - } - return true; - }; - - if (!read_question(&question_section_, header_.qdcount) || - !read_rr(&answer_section_, header_.ancount) || - !read_rr(&authority_section_, header_.nscount) || - !read_rr(&additional_section_, header_.arcount)) { - return false; - } - return true; -} - -bool MdnsMessage::Write(rtc::ByteBufferWriter* buf) const { - header_.Write(buf); - - auto write_rr = [&buf](const std::vector& section) { - for (const auto& rr : section) { - if (!rr.Write(buf)) { - return false; - } - } - return true; - }; - - for (const auto& question : question_section_) { - if (!question.Write(buf)) { - return false; - } - } - if (!write_rr(answer_section_) || !write_rr(authority_section_) || - !write_rr(additional_section_)) { - return false; - } - - return true; -} - -bool MdnsMessage::ShouldUnicastResponse() const { - bool should_unicast = false; - for (const auto& question : question_section_) { - should_unicast |= question.ShouldUnicastResponse(); - } - return should_unicast; -} - -void MdnsMessage::AddQuestion(const MdnsQuestion& question) { - question_section_.push_back(question); - header_.qdcount = question_section_.size(); -} - -void MdnsMessage::AddAnswerRecord(const MdnsResourceRecord& answer) { - answer_section_.push_back(answer); - header_.ancount = answer_section_.size(); -} - -} // namespace webrtc diff --git a/p2p/base/mdns_message.h b/p2p/base/mdns_message.h deleted file mode 100644 index 79be5219e4..0000000000 --- a/p2p/base/mdns_message.h +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef P2P_BASE_MDNS_MESSAGE_H_ -#define P2P_BASE_MDNS_MESSAGE_H_ - -// This file contains classes to read and write mDNSs message defined in RFC -// 6762 and RFC 1025 (DNS messages). Note that it is recommended by RFC 6762 to -// use the name compression scheme defined in RFC 1035 whenever possible. We -// currently only implement the capability of reading compressed names in mDNS -// messages in MdnsMessage::Read(); however, the MdnsMessage::Write() does not -// support name compression yet. -// -// Fuzzer tests (test/fuzzers/mdns_parser_fuzzer.cc) MUST always be performed -// after changes made to this file. - -#include - -#include -#include - -#include "rtc_base/byte_buffer.h" -#include "rtc_base/ip_address.h" -#include "rtc_base/message_buffer_reader.h" - -namespace webrtc { - -// We use "section entry" to denote either a question or a resource record. -// -// RFC 1035 Section 3.2.2. -enum class SectionEntryType { - kA, - kAAAA, - // Only the above types are processed in the current implementation. - kUnsupported, -}; - -// RFC 1035 Section 3.2.4. -enum class SectionEntryClass { - kIN, - kUnsupported, -}; - -// RFC 1035, Section 4.1.1. -class MdnsHeader final { - public: - bool Read(MessageBufferReader* buf); - void Write(rtc::ByteBufferWriter* buf) const; - - void SetQueryOrResponse(bool is_query); - bool IsQuery() const; - void SetAuthoritative(bool is_authoritative); - bool IsAuthoritative() const; - - uint16_t id = 0; - uint16_t flags = 0; - // Number of entries in the question section. - uint16_t qdcount = 0; - // Number of resource records in the answer section. - uint16_t ancount = 0; - // Number of name server resource records in the authority records section. - uint16_t nscount = 0; - // Number of resource records in the additional records section. - uint16_t arcount = 0; -}; - -// Entries in each section after the header share a common structure. Note that -// this is not a concept defined in RFC 1035. -class MdnsSectionEntry { - public: - MdnsSectionEntry(); - MdnsSectionEntry(const MdnsSectionEntry& other); - virtual ~MdnsSectionEntry(); - virtual bool Read(MessageBufferReader* buf) = 0; - virtual bool Write(rtc::ByteBufferWriter* buf) const = 0; - - void SetName(const std::string& name) { name_ = name; } - // Returns the fully qualified domain name in the section entry, i.e., QNAME - // in a question or NAME in a resource record. - std::string GetName() const { return name_; } - - void SetType(SectionEntryType type); - SectionEntryType GetType() const; - void SetClass(SectionEntryClass cls); - SectionEntryClass GetClass() const; - - protected: - std::string name_; // Fully qualified domain name. - uint16_t type_ = 0; - uint16_t class_ = 0; -}; - -// RFC 1035, Section 4.1.2. -class MdnsQuestion final : public MdnsSectionEntry { - public: - MdnsQuestion(); - MdnsQuestion(const MdnsQuestion& other); - ~MdnsQuestion() override; - - bool Read(MessageBufferReader* buf) override; - bool Write(rtc::ByteBufferWriter* buf) const override; - - void SetUnicastResponse(bool should_unicast); - bool ShouldUnicastResponse() const; -}; - -// RFC 1035, Section 4.1.3. -class MdnsResourceRecord final : public MdnsSectionEntry { - public: - MdnsResourceRecord(); - MdnsResourceRecord(const MdnsResourceRecord& other); - ~MdnsResourceRecord() override; - - bool Read(MessageBufferReader* buf) override; - bool Write(rtc::ByteBufferWriter* buf) const override; - - void SetTtlSeconds(uint32_t ttl_seconds) { ttl_seconds_ = ttl_seconds; } - uint32_t GetTtlSeconds() const { return ttl_seconds_; } - // Returns true if |address| is in the address family AF_INET or AF_INET6 and - // |address| has a valid IPv4 or IPv6 address; false otherwise. - bool SetIPAddressInRecordData(const rtc::IPAddress& address); - // Returns true if the record is of type A or AAAA and the record has a valid - // IPv4 or IPv6 address; false otherwise. Stores the valid IP in |address|. - bool GetIPAddressFromRecordData(rtc::IPAddress* address) const; - - private: - // The list of methods reading and writing rdata can grow as we support more - // types of rdata. - bool ReadARData(MessageBufferReader* buf); - void WriteARData(rtc::ByteBufferWriter* buf) const; - - bool ReadQuadARData(MessageBufferReader* buf); - void WriteQuadARData(rtc::ByteBufferWriter* buf) const; - - uint32_t ttl_seconds_ = 0; - uint16_t rdlength_ = 0; - std::string rdata_; -}; - -class MdnsMessage final { - public: - // RFC 1035, Section 4.1. - enum class Section { kQuestion, kAnswer, kAuthority, kAdditional }; - - MdnsMessage(); - ~MdnsMessage(); - // Reads the mDNS message in |buf| and populates the corresponding fields in - // MdnsMessage. - bool Read(MessageBufferReader* buf); - // Write an mDNS message to |buf| based on the fields in MdnsMessage. - // - // TODO(qingsi): Implement name compression when writing mDNS messages. - bool Write(rtc::ByteBufferWriter* buf) const; - - void SetId(uint16_t id) { header_.id = id; } - uint16_t GetId() const { return header_.id; } - - void SetQueryOrResponse(bool is_query) { - header_.SetQueryOrResponse(is_query); - } - bool IsQuery() const { return header_.IsQuery(); } - - void SetAuthoritative(bool is_authoritative) { - header_.SetAuthoritative(is_authoritative); - } - bool IsAuthoritative() const { return header_.IsAuthoritative(); } - - // Returns true if the message is a query and the unicast response is - // preferred. False otherwise. - bool ShouldUnicastResponse() const; - - void AddQuestion(const MdnsQuestion& question); - // TODO(qingsi): Implement AddXRecord for name server and additional records. - void AddAnswerRecord(const MdnsResourceRecord& answer); - - const std::vector& question_section() const { - return question_section_; - } - const std::vector& answer_section() const { - return answer_section_; - } - const std::vector& authority_section() const { - return authority_section_; - } - const std::vector& additional_section() const { - return additional_section_; - } - - private: - MdnsHeader header_; - std::vector question_section_; - std::vector answer_section_; - std::vector authority_section_; - std::vector additional_section_; -}; - -} // namespace webrtc - -#endif // P2P_BASE_MDNS_MESSAGE_H_ diff --git a/p2p/base/mdns_message_unittest.cc b/p2p/base/mdns_message_unittest.cc deleted file mode 100644 index 2f1f74d8e3..0000000000 --- a/p2p/base/mdns_message_unittest.cc +++ /dev/null @@ -1,571 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "p2p/base/mdns_message.h" - -#include -#include -#include - -#include "rtc_base/byte_buffer.h" -#include "rtc_base/gunit.h" -#include "rtc_base/ip_address.h" -#include "rtc_base/socket_address.h" -#include "test/gmock.h" - -#define ReadMdnsMessage(X, Y) ReadMdnsMessageTestCase(X, Y, sizeof(Y)) -#define WriteMdnsMessageAndCompare(X, Y) \ - WriteMdnsMessageAndCompareWithTestCast(X, Y, sizeof(Y)) - -using ::testing::ElementsAre; -using ::testing::Pair; -using ::testing::UnorderedElementsAre; - -namespace webrtc { - -namespace { - -const uint8_t kSingleQuestionForIPv4AddrWithUnicastResponse[] = { - 0x12, 0x34, // ID - 0x00, 0x00, // flags - 0x00, 0x01, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x06, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, // webrtc - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x80, 0x01, // class IN, unicast response -}; - -const uint8_t kTwoQuestionsForIPv4AndIPv6AddrWithMulticastResponse[] = { - 0x12, 0x34, // ID - 0x00, 0x00, // flags - 0x00, 0x02, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x07, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, 0x34, // webrtc4 - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN, multicast response - 0x07, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, 0x36, // webrtc6 - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x1C, // type AAAA Record - 0x00, 0x01, // class IN, multicast response -}; - -const uint8_t - kTwoQuestionsForIPv4AndIPv6AddrWithMulticastResponseAndNameCompression[] = { - 0x12, 0x34, // ID - 0x00, 0x00, // flags - 0x00, 0x02, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x03, 0x77, 0x77, 0x77, // www - 0x06, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, // webrtc - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN, multicast response - 0x04, 0x6d, 0x64, 0x6e, 0x73, // mdns - 0xc0, 0x10, // offset 16, webrtc.org. - 0x00, 0x1C, // type AAAA Record - 0x00, 0x01, // class IN, multicast response -}; - -const uint8_t kThreeQuestionsWithTwoPointersToTheSameNameSuffix[] = { - 0x12, 0x34, // ID - 0x00, 0x00, // flags - 0x00, 0x03, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x03, 0x77, 0x77, 0x77, // www - 0x06, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, // webrtc - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN, multicast response - 0x04, 0x6d, 0x64, 0x6e, 0x73, // mdns - 0xc0, 0x10, // offset 16, webrtc.org. - 0x00, 0x1C, // type AAAA Record - 0x00, 0x01, // class IN, multicast response - 0xc0, 0x10, // offset 16, webrtc.org. - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN, multicast response -}; - -const uint8_t kThreeQuestionsWithPointerToNameSuffixContainingAnotherPointer[] = - { - 0x12, 0x34, // ID - 0x00, 0x00, // flags - 0x00, 0x03, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x03, 0x77, 0x77, 0x77, // www - 0x06, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, // webrtc - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN, multicast response - 0x04, 0x6d, 0x64, 0x6e, 0x73, // mdns - 0xc0, 0x10, // offset 16, webrtc.org. - 0x00, 0x1C, // type AAAA Record - 0x00, 0x01, // class IN, multicast response - 0x03, 0x77, 0x77, 0x77, // www - 0xc0, 0x20, // offset 32, mdns.webrtc.org. - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN, multicast response -}; - -const uint8_t kCorruptedQuestionWithNameCompression1[] = { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x01, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0xc0, 0x0c, // offset 12, - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN -}; - -const uint8_t kCorruptedQuestionWithNameCompression2[] = { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x01, // number of questions - 0x00, 0x00, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x01, 0x77, // w - 0xc0, 0x0c, // offset 12, - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN -}; - -const uint8_t kSingleAuthoritativeAnswerWithIPv4Addr[] = { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x00, // number of questions - 0x00, 0x01, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x06, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, // webrtc - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x78, // TTL, 120 seconds - 0x00, 0x04, // rdlength, 32 bits - 0xC0, 0xA8, 0x00, 0x01, // 192.168.0.1 -}; - -const uint8_t kTwoAuthoritativeAnswersWithIPv4AndIPv6Addr[] = { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x00, // number of questions - 0x00, 0x02, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x07, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, 0x34, // webrtc4 - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x3c, // TTL, 60 seconds - 0x00, 0x04, // rdlength, 32 bits - 0xC0, 0xA8, 0x00, 0x01, // 192.168.0.1 - 0x07, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, 0x36, // webrtc6 - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x1C, // type AAAA Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x78, // TTL, 120 seconds - 0x00, 0x10, // rdlength, 128 bits - 0xfd, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x00, 0x01, // fd12:3456:789a:1::1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, -}; - -const uint8_t kTwoAuthoritativeAnswersWithIPv4AndIPv6AddrWithNameCompression[] = - { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x00, // number of questions - 0x00, 0x02, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x03, 0x77, 0x77, 0x77, // www - 0x06, 0x77, 0x65, 0x62, 0x72, 0x74, 0x63, // webrtc - 0x03, 0x6f, 0x72, 0x67, // org - 0x00, // null label - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x3c, // TTL, 60 seconds - 0x00, 0x04, // rdlength, 32 bits - 0xc0, 0xA8, 0x00, 0x01, // 192.168.0.1 - 0xc0, 0x10, // offset 16, webrtc.org. - 0x00, 0x1C, // type AAAA Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x78, // TTL, 120 seconds - 0x00, 0x10, // rdlength, 128 bits - 0xfd, 0x12, 0x34, 0x56, 0x78, 0x9a, 0x00, 0x01, // fd12:3456:789a:1::1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, -}; - -const uint8_t kCorruptedAnswerWithNameCompression1[] = { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x00, // number of questions - 0x00, 0x01, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0xc0, 0x0c, // offset 12, - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x3c, // TTL, 60 seconds - 0x00, 0x04, // rdlength, 32 bits - 0xc0, 0xA8, 0x00, 0x01, // 192.168.0.1 -}; - -const uint8_t kCorruptedAnswerWithNameCompression2[] = { - 0x12, 0x34, // ID - 0x84, 0x00, // flags - 0x00, 0x00, // number of questions - 0x00, 0x01, // number of answer rr - 0x00, 0x00, // number of name server rr - 0x00, 0x00, // number of additional rr - 0x01, 0x77, // w - 0xc0, 0x0c, // offset 12, - 0x00, 0x01, // type A Record - 0x00, 0x01, // class IN - 0x00, 0x00, 0x00, 0x3c, // TTL, 60 seconds - 0x00, 0x04, // rdlength, 32 bits - 0xc0, 0xA8, 0x00, 0x01, // 192.168.0.1 -}; - -bool ReadMdnsMessageTestCase(MdnsMessage* msg, - const uint8_t* testcase, - size_t size) { - MessageBufferReader buf(reinterpret_cast(testcase), size); - return msg->Read(&buf); -} - -void WriteMdnsMessageAndCompareWithTestCast(MdnsMessage* msg, - const uint8_t* testcase, - size_t size) { - rtc::ByteBufferWriter out; - EXPECT_TRUE(msg->Write(&out)); - EXPECT_EQ(size, out.Length()); - int len = static_cast(out.Length()); - rtc::ByteBufferReader read_buf(out); - std::string bytes; - read_buf.ReadString(&bytes, len); - std::string testcase_bytes(reinterpret_cast(testcase), size); - EXPECT_EQ(testcase_bytes, bytes); -} - -bool GetQueriedNames(MdnsMessage* msg, std::set* names) { - if (!msg->IsQuery() || msg->question_section().empty()) { - return false; - } - for (const auto& question : msg->question_section()) { - names->insert(question.GetName()); - } - return true; -} - -bool GetResolution(MdnsMessage* msg, - std::map* names) { - if (msg->IsQuery() || msg->answer_section().empty()) { - return false; - } - for (const auto& answer : msg->answer_section()) { - rtc::IPAddress resolved_addr; - if (!answer.GetIPAddressFromRecordData(&resolved_addr)) { - return false; - } - (*names)[answer.GetName()] = resolved_addr; - } - return true; -} - -} // namespace - -TEST(MdnsMessageTest, ReadSingleQuestionForIPv4Address) { - MdnsMessage msg; - ASSERT_TRUE( - ReadMdnsMessage(&msg, kSingleQuestionForIPv4AddrWithUnicastResponse)); - EXPECT_TRUE(msg.IsQuery()); - EXPECT_EQ(0x1234, msg.GetId()); - ASSERT_EQ(1u, msg.question_section().size()); - EXPECT_EQ(0u, msg.answer_section().size()); - EXPECT_EQ(0u, msg.authority_section().size()); - EXPECT_EQ(0u, msg.additional_section().size()); - EXPECT_TRUE(msg.ShouldUnicastResponse()); - - const auto& question = msg.question_section()[0]; - EXPECT_EQ(SectionEntryType::kA, question.GetType()); - - std::set queried_names; - EXPECT_TRUE(GetQueriedNames(&msg, &queried_names)); - EXPECT_THAT(queried_names, ElementsAre("webrtc.org.")); -} - -TEST(MdnsMessageTest, ReadTwoQuestionsForIPv4AndIPv6Addr) { - MdnsMessage msg; - ASSERT_TRUE(ReadMdnsMessage( - &msg, kTwoQuestionsForIPv4AndIPv6AddrWithMulticastResponse)); - EXPECT_TRUE(msg.IsQuery()); - EXPECT_EQ(0x1234, msg.GetId()); - ASSERT_EQ(2u, msg.question_section().size()); - EXPECT_EQ(0u, msg.answer_section().size()); - EXPECT_EQ(0u, msg.authority_section().size()); - EXPECT_EQ(0u, msg.additional_section().size()); - - const auto& question1 = msg.question_section()[0]; - const auto& question2 = msg.question_section()[1]; - EXPECT_EQ(SectionEntryType::kA, question1.GetType()); - EXPECT_EQ(SectionEntryType::kAAAA, question2.GetType()); - - std::set queried_names; - EXPECT_TRUE(GetQueriedNames(&msg, &queried_names)); - EXPECT_THAT(queried_names, - UnorderedElementsAre("webrtc4.org.", "webrtc6.org.")); -} - -TEST(MdnsMessageTest, ReadTwoQuestionsForIPv4AndIPv6AddrWithNameCompression) { - MdnsMessage msg; - ASSERT_TRUE(ReadMdnsMessage( - &msg, - kTwoQuestionsForIPv4AndIPv6AddrWithMulticastResponseAndNameCompression)); - - ASSERT_EQ(2u, msg.question_section().size()); - const auto& question1 = msg.question_section()[0]; - const auto& question2 = msg.question_section()[1]; - EXPECT_EQ(SectionEntryType::kA, question1.GetType()); - EXPECT_EQ(SectionEntryType::kAAAA, question2.GetType()); - - std::set queried_names; - EXPECT_TRUE(GetQueriedNames(&msg, &queried_names)); - EXPECT_THAT(queried_names, - UnorderedElementsAre("www.webrtc.org.", "mdns.webrtc.org.")); -} - -TEST(MdnsMessageTest, ReadThreeQuestionsWithTwoPointersToTheSameNameSuffix) { - MdnsMessage msg; - ASSERT_TRUE( - ReadMdnsMessage(&msg, kThreeQuestionsWithTwoPointersToTheSameNameSuffix)); - - ASSERT_EQ(3u, msg.question_section().size()); - const auto& question1 = msg.question_section()[0]; - const auto& question2 = msg.question_section()[1]; - const auto& question3 = msg.question_section()[2]; - EXPECT_EQ(SectionEntryType::kA, question1.GetType()); - EXPECT_EQ(SectionEntryType::kAAAA, question2.GetType()); - EXPECT_EQ(SectionEntryType::kA, question3.GetType()); - - std::set queried_names; - EXPECT_TRUE(GetQueriedNames(&msg, &queried_names)); - EXPECT_THAT(queried_names, - UnorderedElementsAre("www.webrtc.org.", "mdns.webrtc.org.", - "webrtc.org.")); -} - -TEST(MdnsMessageTest, - ReadThreeQuestionsWithPointerToNameSuffixContainingAnotherPointer) { - MdnsMessage msg; - ASSERT_TRUE(ReadMdnsMessage( - &msg, kThreeQuestionsWithPointerToNameSuffixContainingAnotherPointer)); - - ASSERT_EQ(3u, msg.question_section().size()); - const auto& question1 = msg.question_section()[0]; - const auto& question2 = msg.question_section()[1]; - const auto& question3 = msg.question_section()[2]; - EXPECT_EQ(SectionEntryType::kA, question1.GetType()); - EXPECT_EQ(SectionEntryType::kAAAA, question2.GetType()); - EXPECT_EQ(SectionEntryType::kA, question3.GetType()); - - std::set queried_names; - EXPECT_TRUE(GetQueriedNames(&msg, &queried_names)); - EXPECT_THAT(queried_names, - UnorderedElementsAre("www.webrtc.org.", "mdns.webrtc.org.", - "www.mdns.webrtc.org.")); -} - -TEST(MdnsMessageTest, - ReadQuestionWithCorruptedPointerInNameCompressionShouldFail) { - MdnsMessage msg; - EXPECT_FALSE(ReadMdnsMessage(&msg, kCorruptedQuestionWithNameCompression1)); - EXPECT_FALSE(ReadMdnsMessage(&msg, kCorruptedQuestionWithNameCompression2)); -} - -TEST(MdnsMessageTest, ReadSingleAnswerForIPv4Addr) { - MdnsMessage msg; - ASSERT_TRUE(ReadMdnsMessage(&msg, kSingleAuthoritativeAnswerWithIPv4Addr)); - EXPECT_FALSE(msg.IsQuery()); - EXPECT_TRUE(msg.IsAuthoritative()); - EXPECT_EQ(0x1234, msg.GetId()); - EXPECT_EQ(0u, msg.question_section().size()); - ASSERT_EQ(1u, msg.answer_section().size()); - EXPECT_EQ(0u, msg.authority_section().size()); - EXPECT_EQ(0u, msg.additional_section().size()); - - const auto& answer = msg.answer_section()[0]; - EXPECT_EQ(SectionEntryType::kA, answer.GetType()); - EXPECT_EQ(120u, answer.GetTtlSeconds()); - - std::map resolution; - EXPECT_TRUE(GetResolution(&msg, &resolution)); - rtc::IPAddress expected_addr(rtc::SocketAddress("192.168.0.1", 0).ipaddr()); - EXPECT_THAT(resolution, ElementsAre(Pair("webrtc.org.", expected_addr))); -} - -TEST(MdnsMessageTest, ReadTwoAnswersForIPv4AndIPv6Addr) { - MdnsMessage msg; - ASSERT_TRUE( - ReadMdnsMessage(&msg, kTwoAuthoritativeAnswersWithIPv4AndIPv6Addr)); - EXPECT_FALSE(msg.IsQuery()); - EXPECT_TRUE(msg.IsAuthoritative()); - EXPECT_EQ(0x1234, msg.GetId()); - EXPECT_EQ(0u, msg.question_section().size()); - ASSERT_EQ(2u, msg.answer_section().size()); - EXPECT_EQ(0u, msg.authority_section().size()); - EXPECT_EQ(0u, msg.additional_section().size()); - - const auto& answer1 = msg.answer_section()[0]; - const auto& answer2 = msg.answer_section()[1]; - EXPECT_EQ(SectionEntryType::kA, answer1.GetType()); - EXPECT_EQ(SectionEntryType::kAAAA, answer2.GetType()); - EXPECT_EQ(60u, answer1.GetTtlSeconds()); - EXPECT_EQ(120u, answer2.GetTtlSeconds()); - - std::map resolution; - EXPECT_TRUE(GetResolution(&msg, &resolution)); - rtc::IPAddress expected_addr_ipv4( - rtc::SocketAddress("192.168.0.1", 0).ipaddr()); - rtc::IPAddress expected_addr_ipv6( - rtc::SocketAddress("fd12:3456:789a:1::1", 0).ipaddr()); - EXPECT_THAT(resolution, - UnorderedElementsAre(Pair("webrtc4.org.", expected_addr_ipv4), - Pair("webrtc6.org.", expected_addr_ipv6))); -} - -TEST(MdnsMessageTest, ReadTwoAnswersForIPv4AndIPv6AddrWithNameCompression) { - MdnsMessage msg; - ASSERT_TRUE(ReadMdnsMessage( - &msg, kTwoAuthoritativeAnswersWithIPv4AndIPv6AddrWithNameCompression)); - - std::map resolution; - EXPECT_TRUE(GetResolution(&msg, &resolution)); - rtc::IPAddress expected_addr_ipv4( - rtc::SocketAddress("192.168.0.1", 0).ipaddr()); - rtc::IPAddress expected_addr_ipv6( - rtc::SocketAddress("fd12:3456:789a:1::1", 0).ipaddr()); - EXPECT_THAT(resolution, - UnorderedElementsAre(Pair("www.webrtc.org.", expected_addr_ipv4), - Pair("webrtc.org.", expected_addr_ipv6))); -} - -TEST(MdnsMessageTest, - ReadAnswerWithCorruptedPointerInNameCompressionShouldFail) { - MdnsMessage msg; - EXPECT_FALSE(ReadMdnsMessage(&msg, kCorruptedAnswerWithNameCompression1)); - EXPECT_FALSE(ReadMdnsMessage(&msg, kCorruptedAnswerWithNameCompression2)); -} - -TEST(MdnsMessageTest, WriteSingleQuestionForIPv4Addr) { - MdnsMessage msg; - msg.SetId(0x1234); - msg.SetQueryOrResponse(true); - - MdnsQuestion question; - question.SetName("webrtc.org."); - question.SetType(SectionEntryType::kA); - question.SetClass(SectionEntryClass::kIN); - question.SetUnicastResponse(true); - msg.AddQuestion(question); - - WriteMdnsMessageAndCompare(&msg, - kSingleQuestionForIPv4AddrWithUnicastResponse); -} - -TEST(MdnsMessageTest, WriteTwoQuestionsForIPv4AndIPv6Addr) { - MdnsMessage msg; - msg.SetId(0x1234); - msg.SetQueryOrResponse(true); - - MdnsQuestion question1; - question1.SetName("webrtc4.org."); - question1.SetType(SectionEntryType::kA); - question1.SetClass(SectionEntryClass::kIN); - msg.AddQuestion(question1); - - MdnsQuestion question2; - question2.SetName("webrtc6.org."); - question2.SetType(SectionEntryType::kAAAA); - question2.SetClass(SectionEntryClass::kIN); - msg.AddQuestion(question2); - - WriteMdnsMessageAndCompare( - &msg, kTwoQuestionsForIPv4AndIPv6AddrWithMulticastResponse); -} - -TEST(MdnsMessageTest, WriteSingleAnswerToIPv4Addr) { - MdnsMessage msg; - msg.SetId(0x1234); - msg.SetQueryOrResponse(false); - msg.SetAuthoritative(true); - - MdnsResourceRecord answer; - answer.SetName("webrtc.org."); - answer.SetType(SectionEntryType::kA); - answer.SetClass(SectionEntryClass::kIN); - EXPECT_TRUE(answer.SetIPAddressInRecordData( - rtc::SocketAddress("192.168.0.1", 0).ipaddr())); - answer.SetTtlSeconds(120); - msg.AddAnswerRecord(answer); - - WriteMdnsMessageAndCompare(&msg, kSingleAuthoritativeAnswerWithIPv4Addr); -} - -TEST(MdnsMessageTest, WriteTwoAnswersToIPv4AndIPv6Addr) { - MdnsMessage msg; - msg.SetId(0x1234); - msg.SetQueryOrResponse(false); - msg.SetAuthoritative(true); - - MdnsResourceRecord answer1; - answer1.SetName("webrtc4.org."); - answer1.SetType(SectionEntryType::kA); - answer1.SetClass(SectionEntryClass::kIN); - answer1.SetIPAddressInRecordData( - rtc::SocketAddress("192.168.0.1", 0).ipaddr()); - answer1.SetTtlSeconds(60); - msg.AddAnswerRecord(answer1); - - MdnsResourceRecord answer2; - answer2.SetName("webrtc6.org."); - answer2.SetType(SectionEntryType::kAAAA); - answer2.SetClass(SectionEntryClass::kIN); - answer2.SetIPAddressInRecordData( - rtc::SocketAddress("fd12:3456:789a:1::1", 0).ipaddr()); - answer2.SetTtlSeconds(120); - msg.AddAnswerRecord(answer2); - - WriteMdnsMessageAndCompare(&msg, kTwoAuthoritativeAnswersWithIPv4AndIPv6Addr); -} - -} // namespace webrtc diff --git a/p2p/base/mock_async_resolver.h b/p2p/base/mock_async_resolver.h index 7d3be5b0b0..8bc0eb9cff 100644 --- a/p2p/base/mock_async_resolver.h +++ b/p2p/base/mock_async_resolver.h @@ -29,14 +29,17 @@ class MockAsyncResolver : public AsyncResolverInterface { } ~MockAsyncResolver() = default; - MOCK_METHOD1(Start, void(const rtc::SocketAddress&)); - MOCK_CONST_METHOD2(GetResolvedAddress, bool(int family, SocketAddress* addr)); - MOCK_CONST_METHOD0(GetError, int()); + MOCK_METHOD(void, Start, (const rtc::SocketAddress&), (override)); + MOCK_METHOD(bool, + GetResolvedAddress, + (int family, SocketAddress* addr), + (const, override)); + MOCK_METHOD(int, GetError, (), (const, override)); // Note that this won't delete the object like AsyncResolverInterface says in // order to avoid sanitizer failures caused by this being a synchronous // implementation. The test code should delete the object instead. - MOCK_METHOD1(Destroy, void(bool)); + MOCK_METHOD(void, Destroy, (bool), (override)); }; } // namespace rtc @@ -45,7 +48,7 @@ namespace webrtc { class MockAsyncResolverFactory : public AsyncResolverFactory { public: - MOCK_METHOD0(Create, rtc::AsyncResolverInterface*()); + MOCK_METHOD(rtc::AsyncResolverInterface*, Create, (), (override)); }; } // namespace webrtc diff --git a/p2p/base/mock_ice_transport.h b/p2p/base/mock_ice_transport.h index 1436cacb50..ef9f1b18ea 100644 --- a/p2p/base/mock_ice_transport.h +++ b/p2p/base/mock_ice_transport.h @@ -32,15 +32,20 @@ class MockIceTransport : public IceTransportInternal { SignalWritableState(this); } - MOCK_METHOD4(SendPacket, - int(const char* data, - size_t len, - const rtc::PacketOptions& options, - int flags)); - MOCK_METHOD2(SetOption, int(rtc::Socket::Option opt, int value)); - MOCK_METHOD0(GetError, int()); - MOCK_CONST_METHOD0(GetIceRole, cricket::IceRole()); - MOCK_METHOD1(GetStats, bool(cricket::IceTransportStats* ice_transport_stats)); + MOCK_METHOD(int, + SendPacket, + (const char* data, + size_t len, + const rtc::PacketOptions& options, + int flags), + (override)); + MOCK_METHOD(int, SetOption, (rtc::Socket::Option opt, int value), (override)); + MOCK_METHOD(int, GetError, (), (override)); + MOCK_METHOD(cricket::IceRole, GetIceRole, (), (const, override)); + MOCK_METHOD(bool, + GetStats, + (cricket::IceTransportStats * ice_transport_stats), + (override)); IceTransportState GetState() const override { return IceTransportState::STATE_INIT; diff --git a/p2p/base/p2p_transport_channel.cc b/p2p/base/p2p_transport_channel.cc index 73d12c7741..836721c151 100644 --- a/p2p/base/p2p_transport_channel.cc +++ b/p2p/base/p2p_transport_channel.cc @@ -10,27 +10,40 @@ #include "p2p/base/p2p_transport_channel.h" -#include +#include +#include + +#include +#include #include #include #include #include "absl/algorithm/container.h" +#include "absl/memory/memory.h" #include "absl/strings/match.h" +#include "api/async_dns_resolver.h" #include "api/candidate.h" +#include "api/task_queue/queued_task.h" #include "logging/rtc_event_log/ice_logger.h" +#include "p2p/base/basic_async_resolver_factory.h" #include "p2p/base/basic_ice_controller.h" -#include "p2p/base/candidate_pair_interface.h" #include "p2p/base/connection.h" +#include "p2p/base/connection_info.h" #include "p2p/base/port.h" #include "rtc_base/checks.h" #include "rtc_base/crc32.h" #include "rtc_base/experiments/struct_parameters_parser.h" +#include "rtc_base/ip_address.h" #include "rtc_base/logging.h" #include "rtc_base/net_helper.h" -#include "rtc_base/net_helpers.h" +#include "rtc_base/network.h" +#include "rtc_base/network_constants.h" #include "rtc_base/string_encode.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" @@ -108,6 +121,7 @@ namespace cricket { using webrtc::RTCError; using webrtc::RTCErrorType; +using webrtc::ToQueuedTask; bool IceCredentialsChanged(const std::string& old_ufrag, const std::string& old_pwd, @@ -120,26 +134,50 @@ bool IceCredentialsChanged(const std::string& old_ufrag, return (old_ufrag != new_ufrag) || (old_pwd != new_pwd); } +// static +std::unique_ptr P2PTransportChannel::Create( + const std::string& transport_name, + int component, + PortAllocator* allocator, + webrtc::AsyncDnsResolverFactoryInterface* async_dns_resolver_factory, + webrtc::RtcEventLog* event_log, + IceControllerFactoryInterface* ice_controller_factory) { + return absl::WrapUnique(new P2PTransportChannel( + transport_name, component, allocator, async_dns_resolver_factory, + /* owned_dns_resolver_factory= */ nullptr, event_log, + ice_controller_factory)); +} + P2PTransportChannel::P2PTransportChannel(const std::string& transport_name, int component, PortAllocator* allocator) : P2PTransportChannel(transport_name, component, allocator, - nullptr, - nullptr) {} + /* async_dns_resolver_factory= */ nullptr, + /* owned_dns_resolver_factory= */ nullptr, + /* event_log= */ nullptr, + /* ice_controller_factory= */ nullptr) {} +// Private constructor, called from Create() P2PTransportChannel::P2PTransportChannel( const std::string& transport_name, int component, PortAllocator* allocator, - webrtc::AsyncResolverFactory* async_resolver_factory, + webrtc::AsyncDnsResolverFactoryInterface* async_dns_resolver_factory, + std::unique_ptr + owned_dns_resolver_factory, webrtc::RtcEventLog* event_log, IceControllerFactoryInterface* ice_controller_factory) : transport_name_(transport_name), component_(component), allocator_(allocator), - async_resolver_factory_(async_resolver_factory), + // If owned_dns_resolver_factory is given, async_dns_resolver_factory is + // ignored. + async_dns_resolver_factory_(owned_dns_resolver_factory + ? owned_dns_resolver_factory.get() + : async_dns_resolver_factory), + owned_dns_resolver_factory_(std::move(owned_dns_resolver_factory)), network_thread_(rtc::Thread::Current()), incoming_only_(false), error_(0), @@ -156,6 +194,7 @@ P2PTransportChannel::P2PTransportChannel( true /* presume_writable_when_fully_relayed */, REGATHER_ON_FAILED_NETWORKS_INTERVAL, RECEIVING_SWITCHING_DELAY) { + TRACE_EVENT0("webrtc", "P2PTransportChannel::P2PTransportChannel"); RTC_DCHECK(allocator_ != nullptr); weak_ping_interval_ = GetWeakPingIntervalInFieldTrial(); // Validate IceConfig even for mostly built-in constant default values in case @@ -190,16 +229,33 @@ P2PTransportChannel::P2PTransportChannel( } } +// Public constructor, exposed for backwards compatibility. +// Deprecated. +P2PTransportChannel::P2PTransportChannel( + const std::string& transport_name, + int component, + PortAllocator* allocator, + webrtc::AsyncResolverFactory* async_resolver_factory, + webrtc::RtcEventLog* event_log, + IceControllerFactoryInterface* ice_controller_factory) + : P2PTransportChannel( + transport_name, + component, + allocator, + nullptr, + std::make_unique( + async_resolver_factory), + event_log, + ice_controller_factory) {} + P2PTransportChannel::~P2PTransportChannel() { + TRACE_EVENT0("webrtc", "P2PTransportChannel::~P2PTransportChannel"); + RTC_DCHECK_RUN_ON(network_thread_); std::vector copy(connections().begin(), connections().end()); for (Connection* con : copy) { con->Destroy(); } - for (auto& p : resolvers_) { - p.resolver_->Destroy(false); - } resolvers_.clear(); - RTC_DCHECK_RUN_ON(network_thread_); } // Add the allocator session to our list so that we know which sessions @@ -274,8 +330,7 @@ bool P2PTransportChannel::MaybeSwitchSelectedConnection( if (result.connection.has_value()) { RTC_LOG(LS_INFO) << "Switching selected connection due to: " << reason.ToString(); - SwitchSelectedConnection(const_cast(*result.connection), - reason); + SwitchSelectedConnection(FromIceController(*result.connection), reason); } if (result.recheck_event.has_value()) { @@ -283,13 +338,18 @@ bool P2PTransportChannel::MaybeSwitchSelectedConnection( // threshold, the new connection is in a better receiving state than the // currently selected connection. So we need to re-check whether it needs // to be switched at a later time. - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, thread(), - rtc::Bind(&P2PTransportChannel::SortConnectionsAndUpdateState, this, - *result.recheck_event), + network_thread_->PostDelayedTask( + ToQueuedTask(task_safety_, + [this, recheck = *result.recheck_event]() { + SortConnectionsAndUpdateState(recheck); + }), result.recheck_event->recheck_delay_ms); } + for (const auto* con : result.connections_to_forget_state_on) { + FromIceController(con)->ForgetLearnedState(); + } + return result.connection.has_value(); } @@ -692,11 +752,17 @@ void P2PTransportChannel::SetIceConfig(const IceConfig& config) { // Make sure that nomination reaching ICE controlled asap. "send_ping_on_switch_ice_controlling", &field_trials_.send_ping_on_switch_ice_controlling, + // Make sure that nomination reaching ICE controlled asap. + "send_ping_on_selected_ice_controlling", + &field_trials_.send_ping_on_selected_ice_controlling, // Reply to nomination ASAP. "send_ping_on_nomination_ice_controlled", &field_trials_.send_ping_on_nomination_ice_controlled, // Allow connections to live untouched longer that 30s. - "dead_connection_timeout_ms", &field_trials_.dead_connection_timeout_ms) + "dead_connection_timeout_ms", &field_trials_.dead_connection_timeout_ms, + // Stop gathering on strongly connected. + "stop_gather_on_strongly_connected", + &field_trials_.stop_gather_on_strongly_connected) ->Parse(webrtc::field_trial::FindFullName("WebRTC-IceFieldTrials")); if (field_trials_.dead_connection_timeout_ms < 30000) { @@ -831,6 +897,13 @@ void P2PTransportChannel::MaybeStartGathering() { static_cast(IceRestartState::MAX_VALUE)); } + for (const auto& session : allocator_sessions_) { + if (session->IsStopped()) { + continue; + } + session->StopGettingPorts(); + } + // Time for a new allocator. std::unique_ptr pooled_session = allocator_->TakePooledSession(transport_name(), component(), @@ -884,7 +957,8 @@ void P2PTransportChannel::OnPortReady(PortAllocatorSession* session, ports_.push_back(port); port->SignalUnknownAddress.connect(this, &P2PTransportChannel::OnUnknownAddress); - port->SignalDestroyed.connect(this, &P2PTransportChannel::OnPortDestroyed); + port->SubscribePortDestroyed( + [this](PortInterface* port) { OnPortDestroyed(port); }); port->SignalRoleConflict.connect(this, &P2PTransportChannel::OnRoleConflict); port->SignalSentPacket.connect(this, &P2PTransportChannel::OnSentPacket); @@ -1009,7 +1083,7 @@ void P2PTransportChannel::OnUnknownAddress(PortInterface* port, uint16_t network_id = 0; uint16_t network_cost = 0; const StunUInt32Attribute* network_attr = - stun_msg->GetUInt32(STUN_ATTR_NETWORK_INFO); + stun_msg->GetUInt32(STUN_ATTR_GOOG_NETWORK_INFO); if (network_attr) { uint32_t network_info = network_attr->value(); network_id = static_cast(network_info >> 16); @@ -1144,16 +1218,17 @@ void P2PTransportChannel::OnNominated(Connection* conn) { void P2PTransportChannel::ResolveHostnameCandidate(const Candidate& candidate) { RTC_DCHECK_RUN_ON(network_thread_); - if (!async_resolver_factory_) { + if (!async_dns_resolver_factory_) { RTC_LOG(LS_WARNING) << "Dropping ICE candidate with hostname address " "(no AsyncResolverFactory)"; return; } - rtc::AsyncResolverInterface* resolver = async_resolver_factory_->Create(); - resolvers_.emplace_back(candidate, resolver); - resolver->SignalDone.connect(this, &P2PTransportChannel::OnCandidateResolved); - resolver->Start(candidate.address()); + auto resolver = async_dns_resolver_factory_->Create(); + auto resptr = resolver.get(); + resolvers_.emplace_back(candidate, std::move(resolver)); + resptr->Start(candidate.address(), + [this, resptr]() { OnCandidateResolved(resptr); }); RTC_LOG(LS_INFO) << "Asynchronously resolving ICE candidate hostname " << candidate.address().HostAsSensitiveURIString(); } @@ -1194,7 +1269,12 @@ void P2PTransportChannel::AddRemoteCandidate(const Candidate& candidate) { } if (new_remote_candidate.address().IsUnresolvedIP()) { - ResolveHostnameCandidate(new_remote_candidate); + // Don't do DNS lookups if the IceTransportPolicy is "none" or "relay". + bool sharing_host = ((allocator_->candidate_filter() & CF_HOST) != 0); + bool sharing_stun = ((allocator_->candidate_filter() & CF_REFLEXIVE) != 0); + if (sharing_host || sharing_stun) { + ResolveHostnameCandidate(new_remote_candidate); + } return; } @@ -1203,39 +1283,44 @@ void P2PTransportChannel::AddRemoteCandidate(const Candidate& candidate) { P2PTransportChannel::CandidateAndResolver::CandidateAndResolver( const Candidate& candidate, - rtc::AsyncResolverInterface* resolver) - : candidate_(candidate), resolver_(resolver) {} + std::unique_ptr&& resolver) + : candidate_(candidate), resolver_(std::move(resolver)) {} P2PTransportChannel::CandidateAndResolver::~CandidateAndResolver() {} void P2PTransportChannel::OnCandidateResolved( - rtc::AsyncResolverInterface* resolver) { + webrtc::AsyncDnsResolverInterface* resolver) { RTC_DCHECK_RUN_ON(network_thread_); auto p = absl::c_find_if(resolvers_, [resolver](const CandidateAndResolver& cr) { - return cr.resolver_ == resolver; + return cr.resolver_.get() == resolver; }); if (p == resolvers_.end()) { - RTC_LOG(LS_ERROR) << "Unexpected AsyncResolver signal"; + RTC_LOG(LS_ERROR) << "Unexpected AsyncDnsResolver return"; RTC_NOTREACHED(); return; } Candidate candidate = p->candidate_; + AddRemoteCandidateWithResult(candidate, resolver->result()); + // Now we can delete the resolver. + // TODO(bugs.webrtc.org/12651): Replace the stuff below with + // resolvers_.erase(p); + std::unique_ptr to_delete = + std::move(p->resolver_); + // Delay the actual deletion of the resolver until the lambda executes. + network_thread_->PostTask( + ToQueuedTask([delete_this = std::move(to_delete)] {})); resolvers_.erase(p); - AddRemoteCandidateWithResolver(candidate, resolver); - invoker_.AsyncInvoke( - RTC_FROM_HERE, thread(), - rtc::Bind(&rtc::AsyncResolverInterface::Destroy, resolver, false)); } -void P2PTransportChannel::AddRemoteCandidateWithResolver( +void P2PTransportChannel::AddRemoteCandidateWithResult( Candidate candidate, - rtc::AsyncResolverInterface* resolver) { + const webrtc::AsyncDnsResolverResult& result) { RTC_DCHECK_RUN_ON(network_thread_); - if (resolver->GetError()) { + if (result.GetError()) { RTC_LOG(LS_WARNING) << "Failed to resolve ICE candidate hostname " << candidate.address().HostAsSensitiveURIString() - << " with error " << resolver->GetError(); + << " with error " << result.GetError(); return; } @@ -1243,9 +1328,8 @@ void P2PTransportChannel::AddRemoteCandidateWithResolver( // Prefer IPv6 to IPv4 if we have it (see RFC 5245 Section 15.1). // TODO(zstein): This won't work if we only have IPv4 locally but receive an // AAAA DNS record. - bool have_address = - resolver->GetResolvedAddress(AF_INET6, &resolved_address) || - resolver->GetResolvedAddress(AF_INET, &resolved_address); + bool have_address = result.GetResolvedAddress(AF_INET6, &resolved_address) || + result.GetResolvedAddress(AF_INET, &resolved_address); if (!have_address) { RTC_LOG(LS_INFO) << "ICE candidate hostname " << candidate.address().HostAsSensitiveURIString() @@ -1398,7 +1482,7 @@ bool P2PTransportChannel::CreateConnection(PortInterface* port, return false; } -bool P2PTransportChannel::FindConnection(Connection* connection) const { +bool P2PTransportChannel::FindConnection(const Connection* connection) const { RTC_DCHECK_RUN_ON(network_thread_); return absl::c_linear_search(connections(), connection); } @@ -1598,10 +1682,10 @@ void P2PTransportChannel::RequestSortAndStateUpdate( IceControllerEvent reason_to_sort) { RTC_DCHECK_RUN_ON(network_thread_); if (!sort_dirty_) { - invoker_.AsyncInvoke( - RTC_FROM_HERE, thread(), - rtc::Bind(&P2PTransportChannel::SortConnectionsAndUpdateState, this, - reason_to_sort)); + network_thread_->PostTask( + ToQueuedTask(task_safety_, [this, reason_to_sort]() { + SortConnectionsAndUpdateState(reason_to_sort); + })); sort_dirty_ = true; } } @@ -1616,9 +1700,8 @@ void P2PTransportChannel::MaybeStartPinging() { RTC_LOG(LS_INFO) << ToString() << ": Have a pingable connection for the first time; " "starting to ping."; - invoker_.AsyncInvoke( - RTC_FROM_HERE, thread(), - rtc::Bind(&P2PTransportChannel::CheckAndPing, this)); + network_thread_->PostTask( + ToQueuedTask(task_safety_, [this]() { CheckAndPing(); })); regathering_controller_->Start(); started_pinging_ = true; } @@ -1704,7 +1787,7 @@ void P2PTransportChannel::PruneConnections() { std::vector connections_to_prune = ice_controller_->PruneConnections(); for (const Connection* conn : connections_to_prune) { - const_cast(conn)->Prune(); + FromIceController(conn)->Prune(); } } @@ -1760,9 +1843,10 @@ void P2PTransportChannel::SwitchSelectedConnection(Connection* conn, RTC_LOG(LS_INFO) << ToString() << ": No selected connection"; } - if (field_trials_.send_ping_on_switch_ice_controlling && - ice_role_ == ICEROLE_CONTROLLING && old_selected_connection != nullptr && - conn != nullptr) { + if (conn != nullptr && ice_role_ == ICEROLE_CONTROLLING && + ((field_trials_.send_ping_on_switch_ice_controlling && + old_selected_connection != nullptr) || + field_trials_.send_ping_on_selected_ice_controlling)) { PingConnection(conn); MarkConnectionPinged(conn); } @@ -1776,6 +1860,15 @@ void P2PTransportChannel::SwitchSelectedConnection(Connection* conn, pair_change.selected_candidate_pair = *GetSelectedCandidatePair(); pair_change.last_data_received_ms = selected_connection_->last_data_received(); + + if (old_selected_connection) { + pair_change.estimated_disconnected_time_ms = + ComputeEstimatedDisconnectedTimeMs(rtc::TimeMillis(), + old_selected_connection); + } else { + pair_change.estimated_disconnected_time_ms = 0; + } + SignalCandidatePairChanged(pair_change); } @@ -1784,6 +1877,16 @@ void P2PTransportChannel::SwitchSelectedConnection(Connection* conn, ice_controller_->SetSelectedConnection(selected_connection_); } +int64_t P2PTransportChannel::ComputeEstimatedDisconnectedTimeMs( + int64_t now_ms, + Connection* old_connection) { + // TODO(jonaso): nicer keeps estimate of how frequently data _should_ be + // received, this could be used to give better estimate (if needed). + int64_t last_data_or_old_ping = + std::max(old_connection->last_received(), last_data_received_ms_); + return (now_ms - last_data_or_old_ping); +} + // Warning: UpdateState should eventually be called whenever a connection // is added, deleted, or the write state of any connection changes so that the // transport controller will get the up-to-date channel state. However it @@ -1907,24 +2010,27 @@ void P2PTransportChannel::CheckAndPing() { UpdateConnectionStates(); auto result = ice_controller_->SelectConnectionToPing(last_ping_sent_ms_); - Connection* conn = - const_cast(result.connection.value_or(nullptr)); int delay = result.recheck_delay_ms; - if (conn) { + if (result.connection.value_or(nullptr)) { + Connection* conn = FromIceController(*result.connection); PingConnection(conn); MarkConnectionPinged(conn); } - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, thread(), - rtc::Bind(&P2PTransportChannel::CheckAndPing, this), delay); + network_thread_->PostDelayedTask( + ToQueuedTask(task_safety_, [this]() { CheckAndPing(); }), delay); } // This method is only for unit testing. Connection* P2PTransportChannel::FindNextPingableConnection() { RTC_DCHECK_RUN_ON(network_thread_); - return const_cast(ice_controller_->FindNextPingableConnection()); + auto* conn = ice_controller_->FindNextPingableConnection(); + if (conn) { + return FromIceController(conn); + } else { + return nullptr; + } } // A connection is considered a backup connection if the channel state @@ -1980,11 +2086,13 @@ void P2PTransportChannel::OnConnectionStateChange(Connection* connection) { // the connection is at the latest generation. It is not enough to check // that the connection becomes weakly connected because the connection may be // changing from (writable, receiving) to (writable, not receiving). - bool strongly_connected = !connection->weak(); - bool latest_generation = connection->local_candidate().generation() >= - allocator_session()->generation(); - if (strongly_connected && latest_generation) { - MaybeStopPortAllocatorSessions(); + if (field_trials_.stop_gather_on_strongly_connected) { + bool strongly_connected = !connection->weak(); + bool latest_generation = connection->local_candidate().generation() >= + allocator_session()->generation(); + if (strongly_connected && latest_generation) { + MaybeStopPortAllocatorSessions(); + } } // We have to unroll the stack before doing this because we may be changing // the state of connections while sorting. @@ -2098,6 +2206,9 @@ void P2PTransportChannel::OnReadPacket(Connection* connection, if (connection == selected_connection_) { // Let the client know of an incoming packet + RTC_DCHECK(connection->last_data_received() >= last_data_received_ms_); + last_data_received_ms_ = + std::max(last_data_received_ms_, connection->last_data_received()); SignalReadPacket(this, data, len, packet_time_us, 0); return; } @@ -2106,6 +2217,10 @@ void P2PTransportChannel::OnReadPacket(Connection* connection, if (!FindConnection(connection)) return; + RTC_DCHECK(connection->last_data_received() >= last_data_received_ms_); + last_data_received_ms_ = + std::max(last_data_received_ms_, connection->last_data_received()); + // Let the client know of an incoming packet SignalReadPacket(this, data, len, packet_time_us, 0); diff --git a/p2p/base/p2p_transport_channel.h b/p2p/base/p2p_transport_channel.h index 3d6c86f031..462aa105b1 100644 --- a/p2p/base/p2p_transport_channel.h +++ b/p2p/base/p2p_transport_channel.h @@ -20,6 +20,9 @@ #ifndef P2P_BASE_P2P_TRANSPORT_CHANNEL_H_ #define P2P_BASE_P2P_TRANSPORT_CHANNEL_H_ +#include +#include + #include #include #include @@ -27,26 +30,43 @@ #include #include +#include "absl/base/attributes.h" +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/async_dns_resolver.h" #include "api/async_resolver_factory.h" #include "api/candidate.h" #include "api/rtc_error.h" +#include "api/sequence_checker.h" +#include "api/transport/enums.h" +#include "api/transport/stun.h" #include "logging/rtc_event_log/events/rtc_event_ice_candidate_pair_config.h" #include "logging/rtc_event_log/ice_logger.h" #include "p2p/base/candidate_pair_interface.h" +#include "p2p/base/connection.h" #include "p2p/base/ice_controller_factory_interface.h" #include "p2p/base/ice_controller_interface.h" #include "p2p/base/ice_transport_internal.h" #include "p2p/base/p2p_constants.h" #include "p2p/base/p2p_transport_channel_ice_field_trials.h" +#include "p2p/base/port.h" #include "p2p/base/port_allocator.h" #include "p2p/base/port_interface.h" #include "p2p/base/regathering_controller.h" -#include "rtc_base/async_invoker.h" +#include "p2p/base/transport_description.h" #include "rtc_base/async_packet_socket.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" +#include "rtc_base/dscp.h" +#include "rtc_base/network/sent_packet.h" +#include "rtc_base/network_route.h" +#include "rtc_base/socket.h" +#include "rtc_base/socket_address.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/system/rtc_export.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -82,11 +102,19 @@ class RemoteCandidate : public Candidate { // two P2P clients connected to each other. class RTC_EXPORT P2PTransportChannel : public IceTransportInternal { public: + static std::unique_ptr Create( + const std::string& transport_name, + int component, + PortAllocator* allocator, + webrtc::AsyncDnsResolverFactoryInterface* async_dns_resolver_factory, + webrtc::RtcEventLog* event_log = nullptr, + IceControllerFactoryInterface* ice_controller_factory = nullptr); // For testing only. - // TODO(zstein): Remove once AsyncResolverFactory is required. + // TODO(zstein): Remove once AsyncDnsResolverFactory is required. P2PTransportChannel(const std::string& transport_name, int component, PortAllocator* allocator); + ABSL_DEPRECATED("bugs.webrtc.org/12598") P2PTransportChannel( const std::string& transport_name, int component, @@ -209,8 +237,18 @@ class RTC_EXPORT P2PTransportChannel : public IceTransportInternal { } private: - rtc::Thread* thread() const { return network_thread_; } - + P2PTransportChannel( + const std::string& transport_name, + int component, + PortAllocator* allocator, + // DNS resolver factory + webrtc::AsyncDnsResolverFactoryInterface* async_dns_resolver_factory, + // If the P2PTransportChannel has to delete the DNS resolver factory + // on release, this pointer is set. + std::unique_ptr + owned_dns_resolver_factory, + webrtc::RtcEventLog* event_log = nullptr, + IceControllerFactoryInterface* ice_controller_factory = nullptr); bool IsGettingPorts() { RTC_DCHECK_RUN_ON(network_thread_); return allocator_session()->IsGettingPorts(); @@ -245,7 +283,7 @@ class RTC_EXPORT P2PTransportChannel : public IceTransportInternal { bool CreateConnection(PortInterface* port, const Candidate& remote_candidate, PortInterface* origin_port); - bool FindConnection(Connection* connection) const; + bool FindConnection(const Connection* connection) const; uint32_t GetRemoteCandidateGeneration(const Candidate& candidate); bool IsDuplicateRemoteCandidate(const Candidate& candidate); @@ -348,12 +386,28 @@ class RTC_EXPORT P2PTransportChannel : public IceTransportInternal { // 2. Peer-reflexive remote candidates. Candidate SanitizeRemoteCandidate(const Candidate& c) const; + // Cast a Connection returned from IceController and verify that it exists. + // (P2P owns all Connections, and only gives const pointers to IceController, + // see IceControllerInterface). + Connection* FromIceController(const Connection* conn) { + // Verify that IceController does not return a connection + // that we have destroyed. + RTC_DCHECK(FindConnection(conn)); + return const_cast(conn); + } + + int64_t ComputeEstimatedDisconnectedTimeMs(int64_t now, + Connection* old_connection); + + webrtc::ScopedTaskSafety task_safety_; std::string transport_name_ RTC_GUARDED_BY(network_thread_); int component_ RTC_GUARDED_BY(network_thread_); PortAllocator* allocator_ RTC_GUARDED_BY(network_thread_); - webrtc::AsyncResolverFactory* async_resolver_factory_ + webrtc::AsyncDnsResolverFactoryInterface* const async_dns_resolver_factory_ RTC_GUARDED_BY(network_thread_); - rtc::Thread* network_thread_; + const std::unique_ptr + owned_dns_resolver_factory_; + rtc::Thread* const network_thread_; bool incoming_only_ RTC_GUARDED_BY(network_thread_); int error_ RTC_GUARDED_BY(network_thread_); std::vector> allocator_sessions_ @@ -406,7 +460,6 @@ class RTC_EXPORT P2PTransportChannel : public IceTransportInternal { bool has_been_writable_ RTC_GUARDED_BY(network_thread_) = false; // if writable_ has ever been true - rtc::AsyncInvoker invoker_ RTC_GUARDED_BY(network_thread_); absl::optional network_route_ RTC_GUARDED_BY(network_thread_); webrtc::IceEventLog ice_event_log_ RTC_GUARDED_BY(network_thread_); @@ -415,21 +468,31 @@ class RTC_EXPORT P2PTransportChannel : public IceTransportInternal { RTC_GUARDED_BY(network_thread_); struct CandidateAndResolver final { - CandidateAndResolver(const Candidate& candidate, - rtc::AsyncResolverInterface* resolver); + CandidateAndResolver( + const Candidate& candidate, + std::unique_ptr&& resolver); ~CandidateAndResolver(); + // Moveable, but not copyable. + CandidateAndResolver(CandidateAndResolver&&) = default; + CandidateAndResolver& operator=(CandidateAndResolver&&) = default; + Candidate candidate_; - rtc::AsyncResolverInterface* resolver_; + std::unique_ptr resolver_; }; std::vector resolvers_ RTC_GUARDED_BY(network_thread_); void FinishAddingRemoteCandidate(const Candidate& new_remote_candidate); - void OnCandidateResolved(rtc::AsyncResolverInterface* resolver); - void AddRemoteCandidateWithResolver(Candidate candidate, - rtc::AsyncResolverInterface* resolver); + void OnCandidateResolved(webrtc::AsyncDnsResolverInterface* resolver); + void AddRemoteCandidateWithResult( + Candidate candidate, + const webrtc::AsyncDnsResolverResult& result); // Number of times the selected_connection_ has been modified. uint32_t selected_candidate_pair_changes_ = 0; + // When was last data received on a existing connection, + // from connection->last_data_received() that uses rtc::TimeMillis(). + int64_t last_data_received_ms_ = 0; + IceFieldTrials field_trials_; RTC_DISALLOW_COPY_AND_ASSIGN(P2PTransportChannel); diff --git a/p2p/base/p2p_transport_channel_ice_field_trials.h b/p2p/base/p2p_transport_channel_ice_field_trials.h index f30366fd1f..82dc580c1e 100644 --- a/p2p/base/p2p_transport_channel_ice_field_trials.h +++ b/p2p/base/p2p_transport_channel_ice_field_trials.h @@ -44,14 +44,23 @@ struct IceFieldTrials { int rtt_estimate_halftime_ms = 500; // Sending a PING directly after a switch on ICE_CONTROLLING-side. + // TODO(jonaso) : Deprecate this in favor of + // |send_ping_on_selected_ice_controlling|. bool send_ping_on_switch_ice_controlling = false; + // Sending a PING directly after selecting a connection + // (i.e either a switch or the inital selection). + bool send_ping_on_selected_ice_controlling = false; + // Sending a PING directly after a nomination on ICE_CONTROLLED-side. bool send_ping_on_nomination_ice_controlled = false; // The timeout after which the connection will be considered dead if no // traffic is received. int dead_connection_timeout_ms = 30000; + + // Stop gathering when having a strong connection. + bool stop_gather_on_strongly_connected = true; }; } // namespace cricket diff --git a/p2p/base/p2p_transport_channel_unittest.cc b/p2p/base/p2p_transport_channel_unittest.cc index ce78335fd9..b217a74859 100644 --- a/p2p/base/p2p_transport_channel_unittest.cc +++ b/p2p/base/p2p_transport_channel_unittest.cc @@ -14,6 +14,7 @@ #include #include +#include "api/test/mock_async_dns_resolver.h" #include "p2p/base/basic_ice_controller.h" #include "p2p/base/connection.h" #include "p2p/base/fake_port_allocator.h" @@ -51,9 +52,12 @@ using ::testing::Assign; using ::testing::Contains; using ::testing::DoAll; using ::testing::InSequence; +using ::testing::InvokeArgument; using ::testing::InvokeWithoutArgs; using ::testing::NiceMock; using ::testing::Return; +using ::testing::ReturnRef; +using ::testing::SaveArg; using ::testing::SetArgPointee; using ::testing::SizeIs; @@ -177,14 +181,59 @@ cricket::BasicPortAllocator* CreateBasicPortAllocator( class MockIceControllerFactory : public cricket::IceControllerFactoryInterface { public: - ~MockIceControllerFactory() = default; + ~MockIceControllerFactory() override = default; std::unique_ptr Create( - const cricket::IceControllerFactoryArgs& args) { + const cricket::IceControllerFactoryArgs& args) override { RecordIceControllerCreated(); return std::make_unique(args); } - MOCK_METHOD0(RecordIceControllerCreated, void()); + MOCK_METHOD(void, RecordIceControllerCreated, ()); +}; + +// An one-shot resolver factory with default return arguments. +// Resolution is immediate, always succeeds, and returns nonsense. +class ResolverFactoryFixture : public webrtc::MockAsyncDnsResolverFactory { + public: + ResolverFactoryFixture() { + mock_async_dns_resolver_ = std::make_unique(); + ON_CALL(*mock_async_dns_resolver_, Start(_, _)) + .WillByDefault(InvokeArgument<1>()); + EXPECT_CALL(*mock_async_dns_resolver_, result()) + .WillOnce(ReturnRef(mock_async_dns_resolver_result_)); + + // A default action for GetResolvedAddress. Will be overruled + // by SetAddressToReturn. + ON_CALL(mock_async_dns_resolver_result_, GetResolvedAddress(_, _)) + .WillByDefault(Return(true)); + + EXPECT_CALL(mock_async_dns_resolver_result_, GetError()) + .WillOnce(Return(0)); + EXPECT_CALL(*this, Create()).WillOnce([this]() { + return std::move(mock_async_dns_resolver_); + }); + } + + void SetAddressToReturn(rtc::SocketAddress address_to_return) { + EXPECT_CALL(mock_async_dns_resolver_result_, GetResolvedAddress(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(address_to_return), Return(true))); + } + void DelayResolution() { + // This function must be called before Create(). + ASSERT_TRUE(!!mock_async_dns_resolver_); + EXPECT_CALL(*mock_async_dns_resolver_, Start(_, _)) + .WillOnce(SaveArg<1>(&saved_callback_)); + } + void FireDelayedResolution() { + // This function must be called after Create(). + ASSERT_TRUE(saved_callback_); + saved_callback_(); + } + + private: + std::unique_ptr mock_async_dns_resolver_; + webrtc::MockAsyncDnsResolverResult mock_async_dns_resolver_result_; + std::function saved_callback_; }; } // namespace @@ -207,7 +256,7 @@ namespace cricket { // Note that this class is a base class for use by other tests, who will provide // specialized test behavior. class P2PTransportChannelTestBase : public ::testing::Test, - public rtc::MessageHandler, + public rtc::MessageHandlerAutoCleanup, public sigslot::has_slots<> { public: P2PTransportChannelTestBase() @@ -215,7 +264,7 @@ class P2PTransportChannelTestBase : public ::testing::Test, nss_(new rtc::NATSocketServer(vss_.get())), ss_(new rtc::FirewallSocketServer(nss_.get())), main_(ss_.get()), - stun_server_(TestStunServer::Create(&main_, kStunAddr)), + stun_server_(TestStunServer::Create(ss_.get(), kStunAddr)), turn_server_(&main_, kTurnUdpIntAddr, kTurnUdpExtAddr), socks_server1_(ss_.get(), kSocksProxyAddrs[0], @@ -345,7 +394,7 @@ class P2PTransportChannelTestBase : public ::testing::Test, rtc::FakeNetworkManager network_manager_; std::unique_ptr allocator_; - webrtc::AsyncResolverFactory* async_resolver_factory_; + webrtc::AsyncDnsResolverFactoryInterface* async_dns_resolver_factory_; ChannelData cd1_; ChannelData cd2_; IceRole role_; @@ -378,10 +427,10 @@ class P2PTransportChannelTestBase : public ::testing::Test, IceParamsWithRenomination(kIceParams[0], renomination); IceParameters ice_ep2_cd1_ch = IceParamsWithRenomination(kIceParams[1], renomination); - ep1_.cd1_.ch_.reset(CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, - ice_ep1_cd1_ch, ice_ep2_cd1_ch)); - ep2_.cd1_.ch_.reset(CreateChannel(1, ICE_CANDIDATE_COMPONENT_DEFAULT, - ice_ep2_cd1_ch, ice_ep1_cd1_ch)); + ep1_.cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + ice_ep1_cd1_ch, ice_ep2_cd1_ch); + ep2_.cd1_.ch_ = CreateChannel(1, ICE_CANDIDATE_COMPONENT_DEFAULT, + ice_ep2_cd1_ch, ice_ep1_cd1_ch); ep1_.cd1_.ch_->SetIceConfig(ep1_config); ep2_.cd1_.ch_->SetIceConfig(ep2_config); ep1_.cd1_.ch_->MaybeStartGathering(); @@ -397,13 +446,14 @@ class P2PTransportChannelTestBase : public ::testing::Test, CreateChannels(default_config, default_config, false); } - P2PTransportChannel* CreateChannel(int endpoint, - int component, - const IceParameters& local_ice, - const IceParameters& remote_ice) { - P2PTransportChannel* channel = new P2PTransportChannel( + std::unique_ptr CreateChannel( + int endpoint, + int component, + const IceParameters& local_ice, + const IceParameters& remote_ice) { + auto channel = P2PTransportChannel::Create( "test content name", component, GetAllocator(endpoint), - GetEndpoint(endpoint)->async_resolver_factory_); + GetEndpoint(endpoint)->async_dns_resolver_factory_); channel->SignalReadyToSend.connect( this, &P2PTransportChannelTestBase::OnReadyToSend); channel->SignalCandidateGathered.connect( @@ -1284,6 +1334,7 @@ TEST_F(P2PTransportChannelTest, GetStats) { ep2_ch1()->receiving() && ep2_ch1()->writable(), kMediumTimeout, clock); + // Sends and receives 10 packets. TestSendRecv(&clock); IceTransportStats ice_transport_stats; ASSERT_TRUE(ep1_ch1()->GetStats(&ice_transport_stats)); @@ -1306,6 +1357,7 @@ TEST_F(P2PTransportChannelTest, GetStats) { EXPECT_EQ(0U, best_conn_info->sent_discarded_packets); EXPECT_EQ(10 * 36U, best_conn_info->sent_total_bytes); EXPECT_EQ(10 * 36U, best_conn_info->recv_total_bytes); + EXPECT_EQ(10U, best_conn_info->packets_received); DestroyChannels(); } @@ -1480,7 +1532,7 @@ TEST_F(P2PTransportChannelTest, PeerReflexiveCandidateBeforeSignaling) { PauseCandidates(1); // Wait until the callee becomes writable to make sure that a ping request is - // received by the caller before his remote ICE credentials are set. + // received by the caller before their remote ICE credentials are set. ASSERT_TRUE_WAIT(ep2_ch1()->selected_connection() != nullptr, kMediumTimeout); // Add two sets of remote ICE credentials, so that the ones used by the // candidate will be generation 1 instead of 0. @@ -1588,7 +1640,7 @@ TEST_F(P2PTransportChannelTest, PeerReflexiveCandidateBeforeSignalingWithNAT) { PauseCandidates(1); // Wait until the callee becomes writable to make sure that a ping request is - // received by the caller before his remote ICE credentials are set. + // received by the caller before their remote ICE credentials are set. ASSERT_TRUE_WAIT(ep2_ch1()->selected_connection() != nullptr, kMediumTimeout); // Add two sets of remote ICE credentials, so that the ones used by the // candidate will be generation 1 instead of 0. @@ -2077,8 +2129,8 @@ TEST_F(P2PTransportChannelTest, TurnToTurnPresumedWritable) { kDefaultPortAllocatorFlags); // Only configure one channel so we can control when the remote candidate // is added. - GetEndpoint(0)->cd1_.ch_.reset(CreateChannel( - 0, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[0], kIceParams[1])); + GetEndpoint(0)->cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[0], kIceParams[1]); IceConfig config; config.presume_writable_when_fully_relayed = true; ep1_ch1()->SetIceConfig(config); @@ -2126,10 +2178,10 @@ TEST_F(P2PTransportChannelTest, TurnToPrflxPresumedWritable) { test_turn_server()->set_enable_permission_checks(false); IceConfig config; config.presume_writable_when_fully_relayed = true; - GetEndpoint(0)->cd1_.ch_.reset(CreateChannel( - 0, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[0], kIceParams[1])); - GetEndpoint(1)->cd1_.ch_.reset(CreateChannel( - 1, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[1], kIceParams[0])); + GetEndpoint(0)->cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[0], kIceParams[1]); + GetEndpoint(1)->cd1_.ch_ = CreateChannel(1, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[1], kIceParams[0]); ep1_ch1()->SetIceConfig(config); ep2_ch1()->SetIceConfig(config); // Don't signal candidates from channel 2, so that channel 1 sees the TURN @@ -2165,10 +2217,10 @@ TEST_F(P2PTransportChannelTest, PresumedWritablePreferredOverUnreliable) { kDefaultPortAllocatorFlags); IceConfig config; config.presume_writable_when_fully_relayed = true; - GetEndpoint(0)->cd1_.ch_.reset(CreateChannel( - 0, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[0], kIceParams[1])); - GetEndpoint(1)->cd1_.ch_.reset(CreateChannel( - 1, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[1], kIceParams[0])); + GetEndpoint(0)->cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[0], kIceParams[1]); + GetEndpoint(1)->cd1_.ch_ = CreateChannel(1, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[1], kIceParams[0]); ep1_ch1()->SetIceConfig(config); ep2_ch1()->SetIceConfig(config); ep1_ch1()->MaybeStartGathering(); @@ -2203,8 +2255,8 @@ TEST_F(P2PTransportChannelTest, SignalReadyToSendWithPresumedWritable) { kDefaultPortAllocatorFlags); // Only test one endpoint, so we can ensure the connection doesn't receive a // binding response and advance beyond being "presumed" writable. - GetEndpoint(0)->cd1_.ch_.reset(CreateChannel( - 0, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[0], kIceParams[1])); + GetEndpoint(0)->cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[0], kIceParams[1]); IceConfig config; config.presume_writable_when_fully_relayed = true; ep1_ch1()->SetIceConfig(config); @@ -2256,10 +2308,10 @@ TEST_F(P2PTransportChannelTest, // to configure the server to accept packets from an address we haven't // explicitly installed permission for. test_turn_server()->set_enable_permission_checks(false); - GetEndpoint(0)->cd1_.ch_.reset(CreateChannel( - 0, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[0], kIceParams[1])); - GetEndpoint(1)->cd1_.ch_.reset(CreateChannel( - 1, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[1], kIceParams[0])); + GetEndpoint(0)->cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[0], kIceParams[1]); + GetEndpoint(1)->cd1_.ch_ = CreateChannel(1, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[1], kIceParams[0]); // Don't signal candidates from channel 2, so that channel 1 sees the TURN // candidate as peer reflexive. PauseCandidates(1); @@ -2885,6 +2937,53 @@ TEST_F(P2PTransportChannelMultihomedTest, TestPingBackupConnectionRate) { DestroyChannels(); } +// Test that the connection is pinged at a rate no faster than +// what was configured when stable and writable. +TEST_F(P2PTransportChannelMultihomedTest, TestStableWritableRate) { + AddAddress(0, kPublicAddrs[0]); + // Adding alternate address will make sure |kPublicAddrs| has the higher + // priority than others. This is due to FakeNetwork::AddInterface method. + AddAddress(1, kAlternateAddrs[1]); + AddAddress(1, kPublicAddrs[1]); + + // Use only local ports for simplicity. + SetAllocatorFlags(0, kOnlyLocalPorts); + SetAllocatorFlags(1, kOnlyLocalPorts); + + // Create channels and let them go writable, as usual. + CreateChannels(); + EXPECT_TRUE_WAIT_MARGIN(CheckConnected(ep1_ch1(), ep2_ch1()), 1000, 1000); + // Set a value larger than the default value of 2500 ms + int ping_interval_ms = 3456; + IceConfig config = CreateIceConfig(2 * ping_interval_ms, GATHER_ONCE); + config.stable_writable_connection_ping_interval = ping_interval_ms; + ep2_ch1()->SetIceConfig(config); + // After the state becomes COMPLETED and is stable and writable, the + // connection will be pinged once every |ping_interval_ms| milliseconds. + ASSERT_TRUE_WAIT(ep2_ch1()->GetState() == IceTransportState::STATE_COMPLETED, + 1000); + auto connections = ep2_ch1()->connections(); + ASSERT_EQ(2U, connections.size()); + Connection* conn = connections[0]; + EXPECT_TRUE_WAIT(conn->writable(), kMediumTimeout); + + int64_t last_ping_response_ms; + // Burn through some pings so the connection is stable. + for (int i = 0; i < 5; i++) { + last_ping_response_ms = conn->last_ping_response_received(); + EXPECT_TRUE_WAIT( + last_ping_response_ms < conn->last_ping_response_received(), + kDefaultTimeout); + } + EXPECT_TRUE(conn->stable(last_ping_response_ms)) << "Connection not stable"; + int time_elapsed = + conn->last_ping_response_received() - last_ping_response_ms; + RTC_LOG(LS_INFO) << "Time elapsed: " << time_elapsed; + EXPECT_GE(time_elapsed, ping_interval_ms); + + DestroyChannels(); +} + TEST_F(P2PTransportChannelMultihomedTest, TestGetState) { rtc::ScopedFakeClock clock; AddAddress(0, kAlternateAddrs[0]); @@ -3200,7 +3299,7 @@ class P2PTransportChannelPingTest : public ::testing::Test, } if (piggyback_ping_id) { msg.AddAttribute(std::make_unique( - STUN_ATTR_LAST_ICE_CHECK_RECEIVED, piggyback_ping_id.value())); + STUN_ATTR_GOOG_LAST_ICE_CHECK_RECEIVED, piggyback_ping_id.value())); } msg.SetTransactionID(rtc::CreateRandomString(kStunTransactionIdLength)); msg.AddMessageIntegrity(conn->local_candidate().password()); @@ -3266,6 +3365,14 @@ class P2PTransportChannelPingTest : public ::testing::Test, } } + int64_t LastEstimatedDisconnectedTimeMs() const { + if (!last_candidate_change_event_.has_value()) { + return 0; + } else { + return last_candidate_change_event_->estimated_disconnected_time_ms; + } + } + private: std::unique_ptr vss_; rtc::AutoSocketServerThread thread_; @@ -3816,6 +3923,33 @@ TEST_F(P2PTransportChannelPingTest, TestPingOnSwitch) { EXPECT_EQ(conn2->num_pings_sent(), before + 1); } +// Test the field trial send_ping_on_switch_ice_controlling +// that sends a ping directly when selecteing a new connection +// on the ICE_CONTROLLING-side (i.e also initial selection). +TEST_F(P2PTransportChannelPingTest, TestPingOnSelected) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-IceFieldTrials/send_ping_on_selected_ice_controlling:true/"); + FakePortAllocator pa(rtc::Thread::Current(), nullptr); + P2PTransportChannel ch("receiving state change", 1, &pa); + PrepareChannel(&ch); + ch.SetIceConfig(ch.config()); + ch.SetIceRole(ICEROLE_CONTROLLING); + ch.MaybeStartGathering(); + ch.AddRemoteCandidate(CreateUdpCandidate(LOCAL_PORT_TYPE, "1.1.1.1", 1, 1)); + Connection* conn1 = WaitForConnectionTo(&ch, "1.1.1.1", 1); + ASSERT_TRUE(conn1 != nullptr); + + const int before = conn1->num_pings_sent(); + + // A connection needs to be writable before it is selected for transmission. + conn1->ReceivedPingResponse(LOW_RTT, "id"); + EXPECT_EQ_WAIT(conn1, ch.selected_connection(), kDefaultTimeout); + EXPECT_TRUE(CandidatePairMatchesNetworkRoute(conn1)); + + // And the additional ping should have been sent directly. + EXPECT_EQ(conn1->num_pings_sent(), before + 1); +} + // The controlled side will select a connection as the "selected connection" // based on requests from an unknown address before the controlling side // nominates a connection, and will nominate a connection from an unknown @@ -4086,6 +4220,64 @@ TEST_F(P2PTransportChannelPingTest, EXPECT_EQ(0, reset_selected_candidate_pair_switches()); } +TEST_F(P2PTransportChannelPingTest, TestEstimatedDisconnectedTime) { + rtc::ScopedFakeClock clock; + clock.AdvanceTime(webrtc::TimeDelta::Seconds(1)); + + FakePortAllocator pa(rtc::Thread::Current(), nullptr); + P2PTransportChannel ch("test", 1, &pa); + PrepareChannel(&ch); + ch.SetIceRole(ICEROLE_CONTROLLED); + ch.MaybeStartGathering(); + // The connections have decreasing priority. + Connection* conn1 = + CreateConnectionWithCandidate(&ch, &clock, "1.1.1.1", /* port= */ 1, + /* priority= */ 10, /* writable= */ true); + ASSERT_TRUE(conn1 != nullptr); + Connection* conn2 = + CreateConnectionWithCandidate(&ch, &clock, "2.2.2.2", /* port= */ 2, + /* priority= */ 9, /* writable= */ true); + ASSERT_TRUE(conn2 != nullptr); + + // conn1 is the selected connection because it has a higher priority, + EXPECT_EQ_SIMULATED_WAIT(conn1, ch.selected_connection(), kDefaultTimeout, + clock); + EXPECT_TRUE(CandidatePairMatchesNetworkRoute(conn1)); + // No estimateded disconnect time at first connect <=> value is 0. + EXPECT_EQ(LastEstimatedDisconnectedTimeMs(), 0); + + // Use nomination to force switching of selected connection. + int nomination = 1; + + { + clock.AdvanceTime(webrtc::TimeDelta::Seconds(1)); + // This will not parse as STUN, and is considered data + conn1->OnReadPacket("XYZ", 3, rtc::TimeMicros()); + clock.AdvanceTime(webrtc::TimeDelta::Seconds(2)); + + // conn2 is nominated; it becomes selected. + NominateConnection(conn2, nomination++); + EXPECT_EQ(conn2, ch.selected_connection()); + // We got data 2s ago...guess that we lost 2s of connectivity. + EXPECT_EQ(LastEstimatedDisconnectedTimeMs(), 2000); + } + + { + clock.AdvanceTime(webrtc::TimeDelta::Seconds(1)); + conn2->OnReadPacket("XYZ", 3, rtc::TimeMicros()); + + clock.AdvanceTime(webrtc::TimeDelta::Seconds(2)); + ReceivePingOnConnection(conn2, kIceUfrag[1], 1, nomination++); + + clock.AdvanceTime(webrtc::TimeDelta::Millis(500)); + + ReceivePingOnConnection(conn1, kIceUfrag[1], 1, nomination++); + EXPECT_EQ(conn1, ch.selected_connection()); + // We got ping 500ms ago...guess that we lost 500ms of connectivity. + EXPECT_EQ(LastEstimatedDisconnectedTimeMs(), 500); + } +} + TEST_F(P2PTransportChannelPingTest, TestControlledAgentIgnoresSmallerNomination) { rtc::ScopedFakeClock clock; @@ -4739,31 +4931,18 @@ TEST_F(P2PTransportChannelMostLikelyToWorkFirstTest, TestTcpTurn) { // when the address is a hostname. The destruction should happen even // if the channel is not destroyed. TEST(P2PTransportChannelResolverTest, HostnameCandidateIsResolved) { - rtc::MockAsyncResolver mock_async_resolver; - EXPECT_CALL(mock_async_resolver, GetError()).WillOnce(Return(0)); - EXPECT_CALL(mock_async_resolver, GetResolvedAddress(_, _)) - .WillOnce(Return(true)); - // Destroy is called asynchronously after the address is resolved, - // so we need a variable to wait on. - bool destroy_called = false; - EXPECT_CALL(mock_async_resolver, Destroy(_)) - .WillOnce(Assign(&destroy_called, true)); - webrtc::MockAsyncResolverFactory mock_async_resolver_factory; - EXPECT_CALL(mock_async_resolver_factory, Create()) - .WillOnce(Return(&mock_async_resolver)); - + ResolverFactoryFixture resolver_fixture; FakePortAllocator allocator(rtc::Thread::Current(), nullptr); - P2PTransportChannel channel("tn", 0, &allocator, - &mock_async_resolver_factory); + auto channel = + P2PTransportChannel::Create("tn", 0, &allocator, &resolver_fixture); Candidate hostname_candidate; SocketAddress hostname_address("fake.test", 1000); hostname_candidate.set_address(hostname_address); - channel.AddRemoteCandidate(hostname_candidate); + channel->AddRemoteCandidate(hostname_candidate); - ASSERT_EQ_WAIT(1u, channel.remote_candidates().size(), kDefaultTimeout); - const RemoteCandidate& candidate = channel.remote_candidates()[0]; + ASSERT_EQ_WAIT(1u, channel->remote_candidates().size(), kDefaultTimeout); + const RemoteCandidate& candidate = channel->remote_candidates()[0]; EXPECT_FALSE(candidate.address().IsUnresolvedIP()); - WAIT(destroy_called, kShortTimeout); } // Test that if we signal a hostname candidate after the remote endpoint @@ -4772,11 +4951,6 @@ TEST(P2PTransportChannelResolverTest, HostnameCandidateIsResolved) { // done. TEST_F(P2PTransportChannelTest, PeerReflexiveCandidateBeforeSignalingWithMdnsName) { - rtc::MockAsyncResolver mock_async_resolver; - webrtc::MockAsyncResolverFactory mock_async_resolver_factory; - EXPECT_CALL(mock_async_resolver_factory, Create()) - .WillOnce(Return(&mock_async_resolver)); - // ep1 and ep2 will only gather host candidates with addresses // kPublicAddrs[0] and kPublicAddrs[1], respectively. ConfigureEndpoints(OPEN, OPEN, kOnlyLocalPorts, kOnlyLocalPorts); @@ -4784,7 +4958,9 @@ TEST_F(P2PTransportChannelTest, set_remote_ice_parameter_source(FROM_SETICEPARAMETERS); GetEndpoint(0)->network_manager_.set_mdns_responder( std::make_unique(rtc::Thread::Current())); - GetEndpoint(1)->async_resolver_factory_ = &mock_async_resolver_factory; + + ResolverFactoryFixture resolver_fixture; + GetEndpoint(1)->async_dns_resolver_factory_ = &resolver_fixture; CreateChannels(); // Pause sending candidates from both endpoints until we find out what port // number is assgined to ep1's host candidate. @@ -4799,6 +4975,7 @@ TEST_F(P2PTransportChannelTest, // This is the underlying private IP address of the same candidate at ep1. const auto local_address = rtc::SocketAddress( kPublicAddrs[0].ipaddr(), local_candidate.address().port()); + // Let ep2 signal its candidate to ep1. ep1 should form a candidate // pair and start to ping. After receiving the ping, ep2 discovers a prflx // remote candidate and form a candidate pair as well. @@ -4814,19 +4991,7 @@ TEST_F(P2PTransportChannelTest, EXPECT_EQ(kIceUfrag[0], selected_connection->remote_candidate().username()); EXPECT_EQ(kIcePwd[0], selected_connection->remote_candidate().password()); // Set expectation before ep1 signals a hostname candidate. - { - InSequence sequencer; - EXPECT_CALL(mock_async_resolver, Start(_)); - EXPECT_CALL(mock_async_resolver, GetError()).WillOnce(Return(0)); - // Let the mock resolver of ep2 receives the correct resolution. - EXPECT_CALL(mock_async_resolver, GetResolvedAddress(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(local_address), Return(true))); - } - // Destroy is called asynchronously after the address is resolved, - // so we need a variable to wait on. - bool destroy_called = false; - EXPECT_CALL(mock_async_resolver, Destroy(_)) - .WillOnce(Assign(&destroy_called, true)); + resolver_fixture.SetAddressToReturn(local_address); ResumeCandidates(0); // Verify ep2's selected connection is updated to use the 'local' candidate. EXPECT_EQ_WAIT(LOCAL_PORT_TYPE, @@ -4834,7 +4999,6 @@ TEST_F(P2PTransportChannelTest, kMediumTimeout); EXPECT_EQ(selected_connection, ep2_ch1()->selected_connection()); - WAIT(destroy_called, kShortTimeout); DestroyChannels(); } @@ -4844,10 +5008,9 @@ TEST_F(P2PTransportChannelTest, // address after the resolution completes. TEST_F(P2PTransportChannelTest, PeerReflexiveCandidateDuringResolvingHostCandidateWithMdnsName) { - NiceMock mock_async_resolver; - webrtc::MockAsyncResolverFactory mock_async_resolver_factory; - EXPECT_CALL(mock_async_resolver_factory, Create()) - .WillOnce(Return(&mock_async_resolver)); + ResolverFactoryFixture resolver_fixture; + // Prevent resolution until triggered by FireDelayedResolution. + resolver_fixture.DelayResolution(); // ep1 and ep2 will only gather host candidates with addresses // kPublicAddrs[0] and kPublicAddrs[1], respectively. @@ -4856,12 +5019,13 @@ TEST_F(P2PTransportChannelTest, set_remote_ice_parameter_source(FROM_SETICEPARAMETERS); GetEndpoint(0)->network_manager_.set_mdns_responder( std::make_unique(rtc::Thread::Current())); - GetEndpoint(1)->async_resolver_factory_ = &mock_async_resolver_factory; + GetEndpoint(1)->async_dns_resolver_factory_ = &resolver_fixture; CreateChannels(); // Pause sending candidates from both endpoints until we find out what port // number is assgined to ep1's host candidate. PauseCandidates(0); PauseCandidates(1); + ASSERT_EQ_WAIT(1u, GetEndpoint(0)->saved_candidates_.size(), kMediumTimeout); ASSERT_EQ(1u, GetEndpoint(0)->saved_candidates_[0]->candidates.size()); const auto& local_candidate = @@ -4871,24 +5035,16 @@ TEST_F(P2PTransportChannelTest, // This is the underlying private IP address of the same candidate at ep1. const auto local_address = rtc::SocketAddress( kPublicAddrs[0].ipaddr(), local_candidate.address().port()); - bool mock_async_resolver_started = false; - // Not signaling done yet, and only make sure we are in the process of - // resolution. - EXPECT_CALL(mock_async_resolver, Start(_)) - .WillOnce(InvokeWithoutArgs([&mock_async_resolver_started]() { - mock_async_resolver_started = true; - })); // Let ep1 signal its hostname candidate to ep2. ResumeCandidates(0); - ASSERT_TRUE_WAIT(mock_async_resolver_started, kMediumTimeout); // Now that ep2 is in the process of resolving the hostname candidate signaled // by ep1. Let ep2 signal its host candidate with an IP address to ep1, so // that ep1 can form a candidate pair, select it and start to ping ep2. ResumeCandidates(1); ASSERT_TRUE_WAIT(ep1_ch1()->selected_connection() != nullptr, kMediumTimeout); // Let the mock resolver of ep2 receives the correct resolution. - EXPECT_CALL(mock_async_resolver, GetResolvedAddress(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(local_address), Return(true))); + resolver_fixture.SetAddressToReturn(local_address); + // Upon receiving a ping from ep1, ep2 adds a prflx candidate from the // unknown address and establishes a connection. // @@ -4899,7 +5055,9 @@ TEST_F(P2PTransportChannelTest, ep2_ch1()->selected_connection()->remote_candidate().type()); // ep2 should also be able resolve the hostname candidate. The resolved remote // host candidate should be merged with the prflx remote candidate. - mock_async_resolver.SignalDone(&mock_async_resolver); + + resolver_fixture.FireDelayedResolution(); + EXPECT_EQ_WAIT(LOCAL_PORT_TYPE, ep2_ch1()->selected_connection()->remote_candidate().type(), kMediumTimeout); @@ -4912,10 +5070,7 @@ TEST_F(P2PTransportChannelTest, // which is obfuscated by an mDNS name, and if the peer can complete the name // resolution with the correct IP address, we can have a p2p connection. TEST_F(P2PTransportChannelTest, CanConnectWithHostCandidateWithMdnsName) { - NiceMock mock_async_resolver; - webrtc::MockAsyncResolverFactory mock_async_resolver_factory; - EXPECT_CALL(mock_async_resolver_factory, Create()) - .WillOnce(Return(&mock_async_resolver)); + ResolverFactoryFixture resolver_fixture; // ep1 and ep2 will only gather host candidates with addresses // kPublicAddrs[0] and kPublicAddrs[1], respectively. @@ -4924,7 +5079,7 @@ TEST_F(P2PTransportChannelTest, CanConnectWithHostCandidateWithMdnsName) { set_remote_ice_parameter_source(FROM_SETICEPARAMETERS); GetEndpoint(0)->network_manager_.set_mdns_responder( std::make_unique(rtc::Thread::Current())); - GetEndpoint(1)->async_resolver_factory_ = &mock_async_resolver_factory; + GetEndpoint(1)->async_dns_resolver_factory_ = &resolver_fixture; CreateChannels(); // Pause sending candidates from both endpoints until we find out what port // number is assgined to ep1's host candidate. @@ -4941,8 +5096,7 @@ TEST_F(P2PTransportChannelTest, CanConnectWithHostCandidateWithMdnsName) { rtc::SocketAddress resolved_address_ep1(local_candidate_ep1.address()); resolved_address_ep1.SetResolvedIP(kPublicAddrs[0].ipaddr()); - EXPECT_CALL(mock_async_resolver, GetResolvedAddress(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(resolved_address_ep1), Return(true))); + resolver_fixture.SetAddressToReturn(resolved_address_ep1); // Let ep1 signal its hostname candidate to ep2. ResumeCandidates(0); @@ -4966,10 +5120,7 @@ TEST_F(P2PTransportChannelTest, CanConnectWithHostCandidateWithMdnsName) { // this remote host candidate in stats. TEST_F(P2PTransportChannelTest, CandidatesSanitizedInStatsWhenMdnsObfuscationEnabled) { - NiceMock mock_async_resolver; - webrtc::MockAsyncResolverFactory mock_async_resolver_factory; - EXPECT_CALL(mock_async_resolver_factory, Create()) - .WillOnce(Return(&mock_async_resolver)); + ResolverFactoryFixture resolver_fixture; // ep1 and ep2 will gather host candidates with addresses // kPublicAddrs[0] and kPublicAddrs[1], respectively. ep1 also gathers a srflx @@ -4981,7 +5132,7 @@ TEST_F(P2PTransportChannelTest, set_remote_ice_parameter_source(FROM_SETICEPARAMETERS); GetEndpoint(0)->network_manager_.set_mdns_responder( std::make_unique(rtc::Thread::Current())); - GetEndpoint(1)->async_resolver_factory_ = &mock_async_resolver_factory; + GetEndpoint(1)->async_dns_resolver_factory_ = &resolver_fixture; CreateChannels(); // Pause sending candidates from both endpoints until we find out what port // number is assigned to ep1's host candidate. @@ -4999,9 +5150,7 @@ TEST_F(P2PTransportChannelTest, // and let the mock resolver of ep2 receive the correct resolution. rtc::SocketAddress resolved_address_ep1(local_candidate_ep1.address()); resolved_address_ep1.SetResolvedIP(kPublicAddrs[0].ipaddr()); - EXPECT_CALL(mock_async_resolver, GetResolvedAddress(_, _)) - .WillOnce( - DoAll(SetArgPointee<1>(resolved_address_ep1), Return(true))); + resolver_fixture.SetAddressToReturn(resolved_address_ep1); break; } } @@ -5150,10 +5299,7 @@ TEST_F(P2PTransportChannelTest, // when it is queried via GetSelectedCandidatePair. TEST_F(P2PTransportChannelTest, SelectedCandidatePairSanitizedWhenMdnsObfuscationEnabled) { - NiceMock mock_async_resolver; - webrtc::MockAsyncResolverFactory mock_async_resolver_factory; - EXPECT_CALL(mock_async_resolver_factory, Create()) - .WillOnce(Return(&mock_async_resolver)); + ResolverFactoryFixture resolver_fixture; // ep1 and ep2 will gather host candidates with addresses // kPublicAddrs[0] and kPublicAddrs[1], respectively. @@ -5162,7 +5308,7 @@ TEST_F(P2PTransportChannelTest, set_remote_ice_parameter_source(FROM_SETICEPARAMETERS); GetEndpoint(0)->network_manager_.set_mdns_responder( std::make_unique(rtc::Thread::Current())); - GetEndpoint(1)->async_resolver_factory_ = &mock_async_resolver_factory; + GetEndpoint(1)->async_dns_resolver_factory_ = &resolver_fixture; CreateChannels(); // Pause sending candidates from both endpoints until we find out what port // number is assigned to ep1's host candidate. @@ -5177,8 +5323,8 @@ TEST_F(P2PTransportChannelTest, // and let the mock resolver of ep2 receive the correct resolution. rtc::SocketAddress resolved_address_ep1(local_candidate_ep1.address()); resolved_address_ep1.SetResolvedIP(kPublicAddrs[0].ipaddr()); - EXPECT_CALL(mock_async_resolver, GetResolvedAddress(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(resolved_address_ep1), Return(true))); + resolver_fixture.SetAddressToReturn(resolved_address_ep1); + ResumeCandidates(0); ResumeCandidates(1); @@ -5207,8 +5353,8 @@ TEST_F(P2PTransportChannelTest, // We use one endpoint to test the behavior of adding remote candidates, and // this endpoint only gathers relay candidates. ConfigureEndpoints(OPEN, OPEN, kOnlyRelayPorts, kDefaultPortAllocatorFlags); - GetEndpoint(0)->cd1_.ch_.reset(CreateChannel( - 0, ICE_CANDIDATE_COMPONENT_DEFAULT, kIceParams[0], kIceParams[1])); + GetEndpoint(0)->cd1_.ch_ = CreateChannel(0, ICE_CANDIDATE_COMPONENT_DEFAULT, + kIceParams[0], kIceParams[1]); IceConfig config; // Start gathering and we should have only a single relay port. ep1_ch1()->SetIceConfig(config); @@ -5255,10 +5401,14 @@ TEST_F(P2PTransportChannelTest, class MockMdnsResponder : public webrtc::MdnsResponderInterface { public: - MOCK_METHOD2(CreateNameForAddress, - void(const rtc::IPAddress&, NameCreatedCallback)); - MOCK_METHOD2(RemoveNameForAddress, - void(const rtc::IPAddress&, NameRemovedCallback)); + MOCK_METHOD(void, + CreateNameForAddress, + (const rtc::IPAddress&, NameCreatedCallback), + (override)); + MOCK_METHOD(void, + RemoveNameForAddress, + (const rtc::IPAddress&, NameRemovedCallback), + (override)); }; TEST_F(P2PTransportChannelTest, @@ -5539,6 +5689,76 @@ TEST_F(P2PTransportChannelTest, DestroyChannels(); } +// Verify that things break unless +// - both parties use the surface_ice_candidates_on_ice_transport_type_changed +// - both parties loosen candidate filter at the same time (approx.). +// +// i.e surface_ice_candidates_on_ice_transport_type_changed requires +// coordination outside of webrtc to function properly. +TEST_F(P2PTransportChannelTest, SurfaceRequiresCoordination) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-IceFieldTrials/skip_relay_to_non_relay_connections:true/"); + rtc::ScopedFakeClock clock; + + ConfigureEndpoints( + OPEN, OPEN, + kDefaultPortAllocatorFlags | PORTALLOCATOR_ENABLE_SHARED_SOCKET, + kDefaultPortAllocatorFlags | PORTALLOCATOR_ENABLE_SHARED_SOCKET); + auto* ep1 = GetEndpoint(0); + auto* ep2 = GetEndpoint(1); + ep1->allocator_->SetCandidateFilter(CF_RELAY); + ep2->allocator_->SetCandidateFilter(CF_ALL); + // Enable continual gathering and also resurfacing gathered candidates upon + // the candidate filter changed in the ICE configuration. + IceConfig ice_config = CreateIceConfig(1000, GATHER_CONTINUALLY); + ice_config.surface_ice_candidates_on_ice_transport_type_changed = true; + // Pause candidates gathering so we can gather all types of candidates. See + // P2PTransportChannel::OnConnectionStateChange, where we would stop the + // gathering when we have a strongly connected candidate pair. + PauseCandidates(0); + PauseCandidates(1); + CreateChannels(ice_config, ice_config); + + // On the caller we only have relay, + // on the callee we have host, srflx and relay. + EXPECT_TRUE_SIMULATED_WAIT(ep1->saved_candidates_.size() == 1u, + kDefaultTimeout, clock); + EXPECT_TRUE_SIMULATED_WAIT(ep2->saved_candidates_.size() == 3u, + kDefaultTimeout, clock); + + ResumeCandidates(0); + ResumeCandidates(1); + ASSERT_TRUE_SIMULATED_WAIT( + ep1_ch1()->selected_connection() != nullptr && + RELAY_PORT_TYPE == + ep1_ch1()->selected_connection()->local_candidate().type() && + ep2_ch1()->selected_connection() != nullptr && + RELAY_PORT_TYPE == + ep1_ch1()->selected_connection()->remote_candidate().type(), + kDefaultTimeout, clock); + ASSERT_TRUE_SIMULATED_WAIT(ep2_ch1()->selected_connection() != nullptr, + kDefaultTimeout, clock); + + // Wait until the callee discards it's candidates + // since they don't manage to connect. + SIMULATED_WAIT(false, 300000, clock); + + // And then loosen caller candidate filter. + ep1->allocator_->SetCandidateFilter(CF_ALL); + + SIMULATED_WAIT(false, kDefaultTimeout, clock); + + // No p2p connection will be made, it will remain on relay. + EXPECT_TRUE(ep1_ch1()->selected_connection() != nullptr && + RELAY_PORT_TYPE == + ep1_ch1()->selected_connection()->local_candidate().type() && + ep2_ch1()->selected_connection() != nullptr && + RELAY_PORT_TYPE == + ep1_ch1()->selected_connection()->remote_candidate().type()); + + DestroyChannels(); +} + TEST_F(P2PTransportChannelPingTest, TestInitialSelectDampening0) { webrtc::test::ScopedFieldTrials field_trials( "WebRTC-IceFieldTrials/initial_select_dampening:0/"); @@ -5651,4 +5871,322 @@ TEST(P2PTransportChannel, InjectIceController) { /* event_log = */ nullptr, &factory); } +class ForgetLearnedStateController : public cricket::BasicIceController { + public: + explicit ForgetLearnedStateController( + const cricket::IceControllerFactoryArgs& args) + : cricket::BasicIceController(args) {} + + SwitchResult SortAndSwitchConnection(IceControllerEvent reason) override { + auto result = cricket::BasicIceController::SortAndSwitchConnection(reason); + if (forget_connnection_) { + result.connections_to_forget_state_on.push_back(forget_connnection_); + forget_connnection_ = nullptr; + } + result.recheck_event = + IceControllerEvent(IceControllerEvent::ICE_CONTROLLER_RECHECK); + result.recheck_event->recheck_delay_ms = 100; + return result; + } + + void ForgetThisConnectionNextTimeSortAndSwitchConnectionIsCalled( + Connection* con) { + forget_connnection_ = con; + } + + private: + Connection* forget_connnection_ = nullptr; +}; + +class ForgetLearnedStateControllerFactory + : public cricket::IceControllerFactoryInterface { + public: + std::unique_ptr Create( + const cricket::IceControllerFactoryArgs& args) override { + auto controller = std::make_unique(args); + // Keep a pointer to allow modifying calls. + // Must not be used after the p2ptransportchannel has been destructed. + controller_ = controller.get(); + return controller; + } + virtual ~ForgetLearnedStateControllerFactory() = default; + + ForgetLearnedStateController* controller_; +}; + +TEST_F(P2PTransportChannelPingTest, TestForgetLearnedState) { + ForgetLearnedStateControllerFactory factory; + FakePortAllocator pa(rtc::Thread::Current(), nullptr); + auto ch = P2PTransportChannel::Create("ping sufficiently", 1, &pa, nullptr, + nullptr, &factory); + PrepareChannel(ch.get()); + ch->MaybeStartGathering(); + ch->AddRemoteCandidate(CreateUdpCandidate(LOCAL_PORT_TYPE, "1.1.1.1", 1, 1)); + ch->AddRemoteCandidate(CreateUdpCandidate(LOCAL_PORT_TYPE, "2.2.2.2", 2, 2)); + + Connection* conn1 = WaitForConnectionTo(ch.get(), "1.1.1.1", 1); + Connection* conn2 = WaitForConnectionTo(ch.get(), "2.2.2.2", 2); + ASSERT_TRUE(conn1 != nullptr); + ASSERT_TRUE(conn2 != nullptr); + + // Wait for conn1 to be selected. + conn1->ReceivedPingResponse(LOW_RTT, "id"); + EXPECT_EQ_WAIT(conn1, ch->selected_connection(), kMediumTimeout); + + conn2->ReceivedPingResponse(LOW_RTT, "id"); + EXPECT_TRUE(conn2->writable()); + + // Now let the ice controller signal to P2PTransportChannel that it + // should Forget conn2. + factory.controller_ + ->ForgetThisConnectionNextTimeSortAndSwitchConnectionIsCalled(conn2); + + // We don't have a mock Connection, so verify this by checking that it + // is no longer writable. + EXPECT_EQ_WAIT(false, conn2->writable(), kMediumTimeout); +} + +TEST_F(P2PTransportChannelTest, DisableDnsLookupsWithTransportPolicyRelay) { + ConfigureEndpoints(OPEN, OPEN, kDefaultPortAllocatorFlags, + kDefaultPortAllocatorFlags); + auto* ep1 = GetEndpoint(0); + ep1->allocator_->SetCandidateFilter(CF_RELAY); + + std::unique_ptr mock_async_resolver = + std::make_unique(); + // This test expects resolution to not be started. + EXPECT_CALL(*mock_async_resolver, Start(_, _)).Times(0); + + webrtc::MockAsyncDnsResolverFactory mock_async_resolver_factory; + ON_CALL(mock_async_resolver_factory, Create()) + .WillByDefault( + [&mock_async_resolver]() { return std::move(mock_async_resolver); }); + + ep1->async_dns_resolver_factory_ = &mock_async_resolver_factory; + + CreateChannels(); + + ep1_ch1()->AddRemoteCandidate( + CreateUdpCandidate(LOCAL_PORT_TYPE, "hostname.test", 1, 100)); + + DestroyChannels(); +} + +TEST_F(P2PTransportChannelTest, DisableDnsLookupsWithTransportPolicyNone) { + ConfigureEndpoints(OPEN, OPEN, kDefaultPortAllocatorFlags, + kDefaultPortAllocatorFlags); + auto* ep1 = GetEndpoint(0); + ep1->allocator_->SetCandidateFilter(CF_NONE); + + std::unique_ptr mock_async_resolver = + std::make_unique(); + // This test expects resolution to not be started. + EXPECT_CALL(*mock_async_resolver, Start(_, _)).Times(0); + + webrtc::MockAsyncDnsResolverFactory mock_async_resolver_factory; + ON_CALL(mock_async_resolver_factory, Create()) + .WillByDefault( + [&mock_async_resolver]() { return std::move(mock_async_resolver); }); + + ep1->async_dns_resolver_factory_ = &mock_async_resolver_factory; + + CreateChannels(); + + ep1_ch1()->AddRemoteCandidate( + CreateUdpCandidate(LOCAL_PORT_TYPE, "hostname.test", 1, 100)); + + DestroyChannels(); +} + +TEST_F(P2PTransportChannelTest, EnableDnsLookupsWithTransportPolicyNoHost) { + ConfigureEndpoints(OPEN, OPEN, kDefaultPortAllocatorFlags, + kDefaultPortAllocatorFlags); + auto* ep1 = GetEndpoint(0); + ep1->allocator_->SetCandidateFilter(CF_ALL & ~CF_HOST); + + std::unique_ptr mock_async_resolver = + std::make_unique(); + bool lookup_started = false; + EXPECT_CALL(*mock_async_resolver, Start(_, _)) + .WillOnce(Assign(&lookup_started, true)); + + webrtc::MockAsyncDnsResolverFactory mock_async_resolver_factory; + EXPECT_CALL(mock_async_resolver_factory, Create()) + .WillOnce( + [&mock_async_resolver]() { return std::move(mock_async_resolver); }); + + ep1->async_dns_resolver_factory_ = &mock_async_resolver_factory; + + CreateChannels(); + + ep1_ch1()->AddRemoteCandidate( + CreateUdpCandidate(LOCAL_PORT_TYPE, "hostname.test", 1, 100)); + + EXPECT_TRUE(lookup_started); + + DestroyChannels(); +} + +class GatherAfterConnectedTest : public P2PTransportChannelTest, + public ::testing::WithParamInterface {}; + +TEST_P(GatherAfterConnectedTest, GatherAfterConnected) { + const bool stop_gather_on_strongly_connected = GetParam(); + const std::string field_trial = + std::string("WebRTC-IceFieldTrials/stop_gather_on_strongly_connected:") + + (stop_gather_on_strongly_connected ? "true/" : "false/"); + webrtc::test::ScopedFieldTrials field_trials(field_trial); + + rtc::ScopedFakeClock clock; + // Use local + relay + constexpr uint32_t flags = + kDefaultPortAllocatorFlags | PORTALLOCATOR_ENABLE_SHARED_SOCKET | + PORTALLOCATOR_DISABLE_STUN | PORTALLOCATOR_DISABLE_TCP; + ConfigureEndpoints(OPEN, OPEN, flags, flags); + auto* ep1 = GetEndpoint(0); + auto* ep2 = GetEndpoint(1); + ep1->allocator_->SetCandidateFilter(CF_ALL); + ep2->allocator_->SetCandidateFilter(CF_ALL); + + // Use step delay 3s which is long enough for + // connection to be established before managing to gather relay candidates. + int delay = 3000; + SetAllocationStepDelay(0, delay); + SetAllocationStepDelay(1, delay); + IceConfig ice_config = CreateIceConfig(1000, GATHER_CONTINUALLY); + CreateChannels(ice_config, ice_config); + + PauseCandidates(0); + PauseCandidates(1); + + // We have gathered host candidates but not relay. + ASSERT_TRUE_SIMULATED_WAIT(ep1->saved_candidates_.size() == 1u && + ep2->saved_candidates_.size() == 1u, + kDefaultTimeout, clock); + + ResumeCandidates(0); + ResumeCandidates(1); + + PauseCandidates(0); + PauseCandidates(1); + + ASSERT_TRUE_SIMULATED_WAIT(ep1_ch1()->remote_candidates().size() == 1 && + ep2_ch1()->remote_candidates().size() == 1, + kDefaultTimeout, clock); + + ASSERT_TRUE_SIMULATED_WAIT( + ep1_ch1()->selected_connection() && ep2_ch1()->selected_connection(), + kDefaultTimeout, clock); + + clock.AdvanceTime(webrtc::TimeDelta::Millis(10 * delay)); + + if (stop_gather_on_strongly_connected) { + // The relay candiates gathered has not been propagated to channel. + EXPECT_EQ(ep1->saved_candidates_.size(), 0u); + EXPECT_EQ(ep2->saved_candidates_.size(), 0u); + } else { + // The relay candiates gathered has been propagated to channel. + EXPECT_EQ(ep1->saved_candidates_.size(), 1u); + EXPECT_EQ(ep2->saved_candidates_.size(), 1u); + } +} + +TEST_P(GatherAfterConnectedTest, GatherAfterConnectedMultiHomed) { + const bool stop_gather_on_strongly_connected = GetParam(); + const std::string field_trial = + std::string("WebRTC-IceFieldTrials/stop_gather_on_strongly_connected:") + + (stop_gather_on_strongly_connected ? "true/" : "false/"); + webrtc::test::ScopedFieldTrials field_trials(field_trial); + + rtc::ScopedFakeClock clock; + // Use local + relay + constexpr uint32_t flags = + kDefaultPortAllocatorFlags | PORTALLOCATOR_ENABLE_SHARED_SOCKET | + PORTALLOCATOR_DISABLE_STUN | PORTALLOCATOR_DISABLE_TCP; + AddAddress(0, kAlternateAddrs[0]); + ConfigureEndpoints(OPEN, OPEN, flags, flags); + auto* ep1 = GetEndpoint(0); + auto* ep2 = GetEndpoint(1); + ep1->allocator_->SetCandidateFilter(CF_ALL); + ep2->allocator_->SetCandidateFilter(CF_ALL); + + // Use step delay 3s which is long enough for + // connection to be established before managing to gather relay candidates. + int delay = 3000; + SetAllocationStepDelay(0, delay); + SetAllocationStepDelay(1, delay); + IceConfig ice_config = CreateIceConfig(1000, GATHER_CONTINUALLY); + CreateChannels(ice_config, ice_config); + + PauseCandidates(0); + PauseCandidates(1); + + // We have gathered host candidates but not relay. + ASSERT_TRUE_SIMULATED_WAIT(ep1->saved_candidates_.size() == 2u && + ep2->saved_candidates_.size() == 1u, + kDefaultTimeout, clock); + + ResumeCandidates(0); + ResumeCandidates(1); + + PauseCandidates(0); + PauseCandidates(1); + + ASSERT_TRUE_SIMULATED_WAIT(ep1_ch1()->remote_candidates().size() == 1 && + ep2_ch1()->remote_candidates().size() == 2, + kDefaultTimeout, clock); + + ASSERT_TRUE_SIMULATED_WAIT( + ep1_ch1()->selected_connection() && ep2_ch1()->selected_connection(), + kDefaultTimeout, clock); + + clock.AdvanceTime(webrtc::TimeDelta::Millis(10 * delay)); + + if (stop_gather_on_strongly_connected) { + // The relay candiates gathered has not been propagated to channel. + EXPECT_EQ(ep1->saved_candidates_.size(), 0u); + EXPECT_EQ(ep2->saved_candidates_.size(), 0u); + } else { + // The relay candiates gathered has been propagated. + EXPECT_EQ(ep1->saved_candidates_.size(), 2u); + EXPECT_EQ(ep2->saved_candidates_.size(), 1u); + } +} + +INSTANTIATE_TEST_SUITE_P(GatherAfterConnectedTest, + GatherAfterConnectedTest, + ::testing::Values(true, false)); + +// Tests no candidates are generated with old ice ufrag/passwd after an ice +// restart even if continual gathering is enabled. +TEST_F(P2PTransportChannelTest, TestIceNoOldCandidatesAfterIceRestart) { + rtc::ScopedFakeClock clock; + AddAddress(0, kAlternateAddrs[0]); + ConfigureEndpoints(OPEN, OPEN, kDefaultPortAllocatorFlags, + kDefaultPortAllocatorFlags); + + // gathers continually. + IceConfig config = CreateIceConfig(1000, GATHER_CONTINUALLY); + CreateChannels(config, config); + + EXPECT_TRUE_SIMULATED_WAIT(CheckConnected(ep1_ch1(), ep2_ch1()), + kDefaultTimeout, clock); + + PauseCandidates(0); + + ep1_ch1()->SetIceParameters(kIceParams[3]); + ep1_ch1()->MaybeStartGathering(); + + EXPECT_TRUE_SIMULATED_WAIT(GetEndpoint(0)->saved_candidates_.size() > 0, + kDefaultTimeout, clock); + + for (const auto& cd : GetEndpoint(0)->saved_candidates_) { + for (const auto& c : cd->candidates) { + EXPECT_EQ(c.username(), kIceUfrag[3]); + } + } + + DestroyChannels(); +} + } // namespace cricket diff --git a/p2p/base/port.cc b/p2p/base/port.cc index 035d3d4bb3..a03a0d6a66 100644 --- a/p2p/base/port.cc +++ b/p2p/base/port.cc @@ -33,6 +33,7 @@ #include "rtc_base/string_utils.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/third_party/base64/base64.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" namespace { @@ -104,16 +105,6 @@ std::string Port::ComputeFoundation(const std::string& type, return rtc::ToString(rtc::ComputeCrc32(sb.Release())); } -CandidateStats::CandidateStats() = default; - -CandidateStats::CandidateStats(const CandidateStats&) = default; - -CandidateStats::CandidateStats(Candidate candidate) { - this->candidate = candidate; -} - -CandidateStats::~CandidateStats() = default; - Port::Port(rtc::Thread* thread, const std::string& type, rtc::PacketSocketFactory* factory, @@ -137,6 +128,7 @@ Port::Port(rtc::Thread* thread, tiebreaker_(0), shared_socket_(true), weak_factory_(this) { + RTC_DCHECK(factory_ != NULL); Construct(); } @@ -188,6 +180,9 @@ void Port::Construct() { } Port::~Port() { + RTC_DCHECK_RUN_ON(thread_); + CancelPendingTasks(); + // Delete all of the remaining connections. We copy the list up front // because each deletion will cause it to be modified. @@ -490,7 +485,8 @@ bool Port::GetStunMessage(const char* data, } // If ICE, and the MESSAGE-INTEGRITY is bad, fail with a 401 Unauthorized - if (!stun_msg->ValidateMessageIntegrity(data, size, password_)) { + if (stun_msg->ValidateMessageIntegrity(password_) != + StunMessage::IntegrityStatus::kIntegrityOk) { RTC_LOG(LS_ERROR) << ToString() << ": Received " << StunMethodToString(stun_msg->type()) << " with bad M-I from " << addr.ToSensitiveString() @@ -556,7 +552,8 @@ bool Port::GetStunMessage(const char* data, // No stun attributes will be verified, if it's stun indication message. // Returning from end of the this method. } else if (stun_msg->type() == GOOG_PING_REQUEST) { - if (!stun_msg->ValidateMessageIntegrity32(data, size, password_)) { + if (stun_msg->ValidateMessageIntegrity(password_) != + StunMessage::IntegrityStatus::kIntegrityOk) { RTC_LOG(LS_ERROR) << ToString() << ": Received " << StunMethodToString(stun_msg->type()) << " with bad M-I from " << addr.ToSensitiveString() @@ -609,6 +606,16 @@ rtc::DiffServCodePoint Port::StunDscpValue() const { return rtc::DSCP_NO_CHANGE; } +void Port::set_timeout_delay(int delay) { + RTC_DCHECK_RUN_ON(thread_); + // Although this method is meant to only be used by tests, some downstream + // projects have started using it. Ideally we should update our tests to not + // require to modify this state and instead use a testing harness that allows + // adjusting the clock and then just use the kPortTimeoutDelay constant + // directly. + timeout_delay_ = delay; +} + bool Port::ParseStunUsername(const StunMessage* stun_msg, std::string* local_ufrag, std::string* remote_ufrag) const { @@ -818,7 +825,15 @@ void Port::Prune() { thread_->Post(RTC_FROM_HERE, this, MSG_DESTROY_IF_DEAD); } +// Call to stop any currently pending operations from running. +void Port::CancelPendingTasks() { + TRACE_EVENT0("webrtc", "Port::CancelPendingTasks"); + RTC_DCHECK_RUN_ON(thread_); + thread_->Clear(this); +} + void Port::OnMessage(rtc::Message* pmsg) { + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(pmsg->message_id == MSG_DESTROY_IF_DEAD); bool dead = (state_ == State::INIT || state_ == State::PRUNED) && @@ -829,6 +844,14 @@ void Port::OnMessage(rtc::Message* pmsg) { } } +void Port::SubscribePortDestroyed( + std::function callback) { + port_destroyed_callback_list_.AddReceiver(callback); +} + +void Port::SendPortDestroyed(Port* port) { + port_destroyed_callback_list_.Send(port); +} void Port::OnNetworkTypeChanged(const rtc::Network* network) { RTC_DCHECK(network == network_); @@ -893,7 +916,7 @@ void Port::OnConnectionDestroyed(Connection* conn) { void Port::Destroy() { RTC_DCHECK(connections_.empty()); RTC_LOG(LS_INFO) << ToString() << ": Port deleted"; - SignalDestroyed(this); + SendPortDestroyed(this); delete this; } diff --git a/p2p/base/port.h b/p2p/base/port.h index 893e80b20f..2c18f1adeb 100644 --- a/p2p/base/port.h +++ b/p2p/base/port.h @@ -33,6 +33,7 @@ #include "p2p/base/port_interface.h" #include "p2p/base/stun_request.h" #include "rtc_base/async_packet_socket.h" +#include "rtc_base/callback_list.h" #include "rtc_base/checks.h" #include "rtc_base/net_helper.h" #include "rtc_base/network.h" @@ -98,14 +99,24 @@ class StunStats { // Stats that we can return about a candidate. class CandidateStats { public: - CandidateStats(); - explicit CandidateStats(Candidate candidate); - CandidateStats(const CandidateStats&); - ~CandidateStats(); + CandidateStats() = default; + CandidateStats(const CandidateStats&) = default; + CandidateStats(CandidateStats&&) = default; + CandidateStats(Candidate candidate, + absl::optional stats = absl::nullopt) + : candidate_(std::move(candidate)), stun_stats_(std::move(stats)) {} + ~CandidateStats() = default; - Candidate candidate; + CandidateStats& operator=(const CandidateStats& other) = default; + + const Candidate& candidate() const { return candidate_; } + + const absl::optional& stun_stats() const { return stun_stats_; } + + private: + Candidate candidate_; // STUN port stats if this candidate is a STUN candidate. - absl::optional stun_stats; + absl::optional stun_stats_; }; typedef std::vector CandidateStatsList; @@ -150,6 +161,8 @@ struct CandidatePairChangeEvent { CandidatePair selected_candidate_pair; int64_t last_data_received_ms; std::string reason; + // How long do we estimate that we've been disconnected. + int64_t estimated_disconnected_time_ms; }; typedef std::set ServerAddresses; @@ -207,14 +220,14 @@ class Port : public PortInterface, // Allows a port to be destroyed if no connection is using it. void Prune(); + // Call to stop any currently pending operations from running. + void CancelPendingTasks(); + // The thread on which this port performs its I/O. rtc::Thread* thread() { return thread_; } // The factory used to create the sockets of this port. rtc::PacketSocketFactory* socket_factory() const { return factory_; } - void set_socket_factory(rtc::PacketSocketFactory* factory) { - factory_ = factory; - } // For debugging purposes. const std::string& content_name() const { return content_name_; } @@ -264,6 +277,9 @@ class Port : public PortInterface, // connection. sigslot::signal1 SignalPortError; + void SubscribePortDestroyed( + std::function callback) override; + void SendPortDestroyed(Port* port); // Returns a map containing all of the connections of this port, keyed by the // remote address. typedef std::map AddressMap; @@ -320,7 +336,7 @@ class Port : public PortInterface, uint16_t max_port() { return max_port_; } // Timeout shortening function to speed up unit tests. - void set_timeout_delay(int delay) { timeout_delay_ = delay; } + void set_timeout_delay(int delay); // This method will return local and remote username fragements from the // stun username attribute if present. @@ -435,8 +451,8 @@ class Port : public PortInterface, void OnNetworkTypeChanged(const rtc::Network* network); - rtc::Thread* thread_; - rtc::PacketSocketFactory* factory_; + rtc::Thread* const thread_; + rtc::PacketSocketFactory* const factory_; std::string type_; bool send_retransmit_count_attribute_; rtc::Network* network_; @@ -482,6 +498,7 @@ class Port : public PortInterface, bool is_final); friend class Connection; + webrtc::CallbackList port_destroyed_callback_list_; }; } // namespace cricket diff --git a/p2p/base/port_allocator.cc b/p2p/base/port_allocator.cc index b13896c4bc..d8ff637e2c 100644 --- a/p2p/base/port_allocator.cc +++ b/p2p/base/port_allocator.cc @@ -317,7 +317,8 @@ Candidate PortAllocator::SanitizeCandidate(const Candidate& c) const { // For a local host candidate, we need to conceal its IP address candidate if // the mDNS obfuscation is enabled. bool use_hostname_address = - c.type() == LOCAL_PORT_TYPE && MdnsObfuscationEnabled(); + (c.type() == LOCAL_PORT_TYPE || c.type() == PRFLX_PORT_TYPE) && + MdnsObfuscationEnabled(); // If adapter enumeration is disabled or host candidates are disabled, // clear the raddr of STUN candidates to avoid local address leakage. bool filter_stun_related_address = diff --git a/p2p/base/port_allocator.h b/p2p/base/port_allocator.h index 4bbe56c0b5..33a23484f2 100644 --- a/p2p/base/port_allocator.h +++ b/p2p/base/port_allocator.h @@ -16,6 +16,7 @@ #include #include +#include "api/sequence_checker.h" #include "api/transport/enums.h" #include "p2p/base/port.h" #include "p2p/base/port_interface.h" @@ -25,7 +26,6 @@ #include "rtc_base/system/rtc_export.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" namespace webrtc { class TurnCustomizer; @@ -638,7 +638,7 @@ class RTC_EXPORT PortAllocator : public sigslot::has_slots<> { bool allow_tcp_listen_; uint32_t candidate_filter_; std::string origin_; - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; private: ServerAddresses stun_servers_; diff --git a/p2p/base/port_allocator_unittest.cc b/p2p/base/port_allocator_unittest.cc index 70946a3d81..cbac5cccaf 100644 --- a/p2p/base/port_allocator_unittest.cc +++ b/p2p/base/port_allocator_unittest.cc @@ -305,3 +305,56 @@ TEST_F(PortAllocatorTest, RestrictIceCredentialsChange) { credentials[0].pwd)); allocator_->DiscardCandidatePool(); } + +// Constants for testing candidates +const char kIpv4Address[] = "12.34.56.78"; +const char kIpv4AddressWithPort[] = "12.34.56.78:443"; + +TEST_F(PortAllocatorTest, SanitizeEmptyCandidateDefaultConfig) { + cricket::Candidate input; + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizeIpv4CandidateDefaultConfig) { + cricket::Candidate input(1, "udp", rtc::SocketAddress(kIpv4Address, 443), 1, + "username", "password", cricket::LOCAL_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_EQ(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ(kIpv4Address, output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizeIpv4CandidateMdnsObfuscationEnabled) { + allocator_->SetMdnsObfuscationEnabledForTesting(true); + cricket::Candidate input(1, "udp", rtc::SocketAddress(kIpv4Address, 443), 1, + "username", "password", cricket::LOCAL_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_NE(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizePrflxCandidateMdnsObfuscationEnabled) { + allocator_->SetMdnsObfuscationEnabledForTesting(true); + // Create the candidate from an IP literal. This populates the hostname. + cricket::Candidate input(1, "udp", rtc::SocketAddress(kIpv4Address, 443), 1, + "username", "password", cricket::PRFLX_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_NE(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} + +TEST_F(PortAllocatorTest, SanitizeIpv4NonLiteralMdnsObfuscationEnabled) { + // Create the candidate with an empty hostname. + allocator_->SetMdnsObfuscationEnabledForTesting(true); + rtc::IPAddress ip; + EXPECT_TRUE(IPFromString(kIpv4Address, &ip)); + cricket::Candidate input(1, "udp", rtc::SocketAddress(ip, 443), 1, "username", + "password", cricket::LOCAL_PORT_TYPE, 1, + "foundation", 1, 1); + cricket::Candidate output = allocator_->SanitizeCandidate(input); + EXPECT_NE(kIpv4AddressWithPort, output.address().ToString()); + EXPECT_EQ("", output.address().ipaddr().ToString()); +} diff --git a/p2p/base/port_interface.h b/p2p/base/port_interface.h index 39eae18a0d..73c8e36c78 100644 --- a/p2p/base/port_interface.h +++ b/p2p/base/port_interface.h @@ -12,12 +12,14 @@ #define P2P_BASE_PORT_INTERFACE_H_ #include +#include #include #include "absl/types/optional.h" #include "api/candidate.h" #include "p2p/base/transport_description.h" #include "rtc_base/async_packet_socket.h" +#include "rtc_base/callback_list.h" #include "rtc_base/socket_address.h" namespace rtc { @@ -112,7 +114,8 @@ class PortInterface { // Signaled when this port decides to delete itself because it no longer has // any usefulness. - sigslot::signal1 SignalDestroyed; + virtual void SubscribePortDestroyed( + std::function callback) = 0; // Signaled when Port discovers ice role conflict with the peer. sigslot::signal1 SignalRoleConflict; diff --git a/p2p/base/port_unittest.cc b/p2p/base/port_unittest.cc index 7703a9c281..293a8d1f8b 100644 --- a/p2p/base/port_unittest.cc +++ b/p2p/base/port_unittest.cc @@ -270,7 +270,8 @@ class TestChannel : public sigslot::has_slots<> { explicit TestChannel(std::unique_ptr p1) : port_(std::move(p1)) { port_->SignalPortComplete.connect(this, &TestChannel::OnPortComplete); port_->SignalUnknownAddress.connect(this, &TestChannel::OnUnknownAddress); - port_->SignalDestroyed.connect(this, &TestChannel::OnSrcPortDestroyed); + port_->SubscribePortDestroyed( + [this](PortInterface* port) { OnSrcPortDestroyed(port); }); } int complete_count() { return complete_count_; } @@ -400,7 +401,7 @@ class PortTest : public ::testing::Test, public sigslot::has_slots<> { nat_factory2_(ss_.get(), kNatAddr2, SocketAddress()), nat_socket_factory1_(&nat_factory1_), nat_socket_factory2_(&nat_factory2_), - stun_server_(TestStunServer::Create(&main_, kStunAddr)), + stun_server_(TestStunServer::Create(ss_.get(), kStunAddr)), turn_server_(&main_, kTurnUdpIntAddr, kTurnUdpExtAddr), username_(rtc::CreateRandomString(ICE_UFRAG_LENGTH)), password_(rtc::CreateRandomString(ICE_PWD_LENGTH)), @@ -777,7 +778,8 @@ class PortTest : public ::testing::Test, public sigslot::has_slots<> { bool role_conflict() const { return role_conflict_; } void ConnectToSignalDestroyed(PortInterface* port) { - port->SignalDestroyed.connect(this, &PortTest::OnDestroyed); + port->SubscribePortDestroyed( + [this](PortInterface* port) { OnDestroyed(port); }); } void OnDestroyed(PortInterface* port) { ++ports_destroyed_; } @@ -1724,9 +1726,8 @@ TEST_F(PortTest, TestSendStunMessage) { EXPECT_EQ(kDefaultPrflxPriority, priority_attr->value()); EXPECT_EQ("rfrag:lfrag", username_attr->GetString()); EXPECT_TRUE(msg->GetByteString(STUN_ATTR_MESSAGE_INTEGRITY) != NULL); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( - lport->last_stun_buf()->data(), lport->last_stun_buf()->size(), - "rpass")); + EXPECT_EQ(StunMessage::IntegrityStatus::kIntegrityOk, + msg->ValidateMessageIntegrity("rpass")); const StunUInt64Attribute* ice_controlling_attr = msg->GetUInt64(STUN_ATTR_ICE_CONTROLLING); ASSERT_TRUE(ice_controlling_attr != NULL); @@ -1765,9 +1766,8 @@ TEST_F(PortTest, TestSendStunMessage) { ASSERT_TRUE(addr_attr != NULL); EXPECT_EQ(lport->Candidates()[0].address(), addr_attr->GetAddress()); EXPECT_TRUE(msg->GetByteString(STUN_ATTR_MESSAGE_INTEGRITY) != NULL); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( - rport->last_stun_buf()->data(), rport->last_stun_buf()->size(), - "rpass")); + EXPECT_EQ(StunMessage::IntegrityStatus::kIntegrityOk, + msg->ValidateMessageIntegrity("rpass")); EXPECT_TRUE(msg->GetUInt32(STUN_ATTR_FINGERPRINT) != NULL); EXPECT_TRUE(StunMessage::ValidateFingerprint( lport->last_stun_buf()->data(), lport->last_stun_buf()->size())); @@ -1796,9 +1796,8 @@ TEST_F(PortTest, TestSendStunMessage) { EXPECT_EQ(STUN_ERROR_SERVER_ERROR, error_attr->code()); EXPECT_EQ(std::string(STUN_ERROR_REASON_SERVER_ERROR), error_attr->reason()); EXPECT_TRUE(msg->GetByteString(STUN_ATTR_MESSAGE_INTEGRITY) != NULL); - EXPECT_TRUE(StunMessage::ValidateMessageIntegrity( - rport->last_stun_buf()->data(), rport->last_stun_buf()->size(), - "rpass")); + EXPECT_EQ(StunMessage::IntegrityStatus::kIntegrityOk, + msg->ValidateMessageIntegrity("rpass")); EXPECT_TRUE(msg->GetUInt32(STUN_ATTR_FINGERPRINT) != NULL); EXPECT_TRUE(StunMessage::ValidateFingerprint( lport->last_stun_buf()->data(), lport->last_stun_buf()->size())); @@ -2052,7 +2051,7 @@ TEST_F(PortTest, TestNetworkInfoAttribute) { ASSERT_TRUE_WAIT(lport->last_stun_msg() != NULL, kDefaultTimeout); IceMessage* msg = lport->last_stun_msg(); const StunUInt32Attribute* network_info_attr = - msg->GetUInt32(STUN_ATTR_NETWORK_INFO); + msg->GetUInt32(STUN_ATTR_GOOG_NETWORK_INFO); ASSERT_TRUE(network_info_attr != NULL); uint32_t network_info = network_info_attr->value(); EXPECT_EQ(lnetwork_id, network_info >> 16); @@ -2069,7 +2068,7 @@ TEST_F(PortTest, TestNetworkInfoAttribute) { rconn->Ping(0); ASSERT_TRUE_WAIT(rport->last_stun_msg() != NULL, kDefaultTimeout); msg = rport->last_stun_msg(); - network_info_attr = msg->GetUInt32(STUN_ATTR_NETWORK_INFO); + network_info_attr = msg->GetUInt32(STUN_ATTR_GOOG_NETWORK_INFO); ASSERT_TRUE(network_info_attr != NULL); network_info = network_info_attr->value(); EXPECT_EQ(rnetwork_id, network_info >> 16); diff --git a/p2p/base/pseudo_tcp.cc b/p2p/base/pseudo_tcp.cc index 56e6b9b6ad..13e7a2214f 100644 --- a/p2p/base/pseudo_tcp.cc +++ b/p2p/base/pseudo_tcp.cc @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -402,9 +403,7 @@ uint32_t PseudoTcp::GetBytesInFlight() const { } uint32_t PseudoTcp::GetBytesBufferedNotSent() const { - size_t buffered_bytes = 0; - m_sbuf.GetBuffered(&buffered_bytes); - return static_cast(m_snd_una + buffered_bytes - m_snd_nxt); + return static_cast(m_snd_una + m_sbuf.GetBuffered() - m_snd_nxt); } uint32_t PseudoTcp::GetRoundTripTimeEstimateMs() const { @@ -422,15 +421,11 @@ int PseudoTcp::Recv(char* buffer, size_t len) { } size_t read = 0; - rtc::StreamResult result = m_rbuf.Read(buffer, len, &read, NULL); - - // If there's no data in |m_rbuf|. - if (result == rtc::SR_BLOCK) { + if (!m_rbuf.Read(buffer, len, &read)) { m_bReadEnable = true; m_error = EWOULDBLOCK; return SOCKET_ERROR; } - RTC_DCHECK(result == rtc::SR_SUCCESS); size_t available_space = 0; m_rbuf.GetWriteRemaining(&available_space); @@ -497,14 +492,13 @@ uint32_t PseudoTcp::queue(const char* data, uint32_t len, bool bCtrl) { (m_slist.back().xmit == 0)) { m_slist.back().len += len; } else { - size_t snd_buffered = 0; - m_sbuf.GetBuffered(&snd_buffered); - SSegment sseg(static_cast(m_snd_una + snd_buffered), len, bCtrl); + SSegment sseg(static_cast(m_snd_una + m_sbuf.GetBuffered()), len, + bCtrl); m_slist.push_back(sseg); } size_t written = 0; - m_sbuf.Write(data, len, &written, NULL); + m_sbuf.Write(data, len, &written); return static_cast(written); } @@ -532,9 +526,9 @@ IPseudoTcpNotify::WriteResult PseudoTcp::packet(uint32_t seq, if (len) { size_t bytes_read = 0; - rtc::StreamResult result = + bool result = m_sbuf.ReadOffset(buffer.get() + HEADER_SIZE, len, offset, &bytes_read); - RTC_DCHECK(result == rtc::SR_SUCCESS); + RTC_DCHECK(result); RTC_DCHECK(static_cast(bytes_read) == len); } @@ -601,11 +595,9 @@ bool PseudoTcp::clock_check(uint32_t now, long& nTimeout) { if (m_shutdown == SD_FORCEFUL) return false; - size_t snd_buffered = 0; - m_sbuf.GetBuffered(&snd_buffered); if ((m_shutdown == SD_GRACEFUL) && ((m_state != TCP_ESTABLISHED) || - ((snd_buffered == 0) && (m_t_ack == 0)))) { + ((m_sbuf.GetBuffered() == 0) && (m_t_ack == 0)))) { return false; } @@ -830,10 +822,8 @@ bool PseudoTcp::process(Segment& seg) { // The goal it to make sure we always have at least enough data to fill the // window. We'd like to notify the app when we are halfway to that point. const uint32_t kIdealRefillSize = (m_sbuf_len + m_rbuf_len) / 2; - size_t snd_buffered = 0; - m_sbuf.GetBuffered(&snd_buffered); if (m_bWriteEnable && - static_cast(snd_buffered) < kIdealRefillSize) { + static_cast(m_sbuf.GetBuffered()) < kIdealRefillSize) { m_bWriteEnable = false; if (m_notify) { m_notify->OnTcpWriteable(this); @@ -912,8 +902,7 @@ bool PseudoTcp::process(Segment& seg) { // there's not already data ready to read, but this should always be // true in the problematic scenario, since control frames are always // sent first in the stream. - size_t rcv_buffered; - if (m_rbuf.GetBuffered(&rcv_buffered) && rcv_buffered == 0) { + if (m_rbuf.GetBuffered() == 0) { m_rbuf.ConsumeWriteBuffer(seg.len); m_rbuf.ConsumeReadData(seg.len); // After shifting the position in the buffer, we may have @@ -924,15 +913,11 @@ bool PseudoTcp::process(Segment& seg) { } else { uint32_t nOffset = seg.seq - m_rcv_nxt; - rtc::StreamResult result = - m_rbuf.WriteOffset(seg.data, seg.len, nOffset, NULL); - if (result == rtc::SR_BLOCK) { + if (!m_rbuf.WriteOffset(seg.data, seg.len, nOffset, NULL)) { // Ignore incoming packets outside of the receive window. return false; } - RTC_DCHECK(result == rtc::SR_SUCCESS); - if (seg.seq == m_rcv_nxt) { m_rbuf.ConsumeWriteBuffer(seg.len); m_rcv_nxt += seg.len; @@ -1078,8 +1063,7 @@ void PseudoTcp::attemptSend(SendFlags sflags) { uint32_t nInFlight = m_snd_nxt - m_snd_una; uint32_t nUseable = (nInFlight < nWindow) ? (nWindow - nInFlight) : 0; - size_t snd_buffered = 0; - m_sbuf.GetBuffered(&snd_buffered); + size_t snd_buffered = m_sbuf.GetBuffered(); uint32_t nAvailable = std::min(static_cast(snd_buffered) - nInFlight, m_mss); @@ -1300,4 +1284,149 @@ void PseudoTcp::resizeReceiveBuffer(uint32_t new_size) { m_rcv_wnd = static_cast(available_space); } +PseudoTcp::LockedFifoBuffer::LockedFifoBuffer(size_t size) + : buffer_(new char[size]), + buffer_length_(size), + data_length_(0), + read_position_(0) {} + +PseudoTcp::LockedFifoBuffer::~LockedFifoBuffer() {} + +size_t PseudoTcp::LockedFifoBuffer::GetBuffered() const { + webrtc::MutexLock lock(&mutex_); + return data_length_; +} + +bool PseudoTcp::LockedFifoBuffer::SetCapacity(size_t size) { + webrtc::MutexLock lock(&mutex_); + if (data_length_ > size) + return false; + + if (size != buffer_length_) { + char* buffer = new char[size]; + const size_t copy = data_length_; + const size_t tail_copy = std::min(copy, buffer_length_ - read_position_); + memcpy(buffer, &buffer_[read_position_], tail_copy); + memcpy(buffer + tail_copy, &buffer_[0], copy - tail_copy); + buffer_.reset(buffer); + read_position_ = 0; + buffer_length_ = size; + } + + return true; +} + +bool PseudoTcp::LockedFifoBuffer::ReadOffset(void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_read) { + webrtc::MutexLock lock(&mutex_); + return ReadOffsetLocked(buffer, bytes, offset, bytes_read); +} + +bool PseudoTcp::LockedFifoBuffer::WriteOffset(const void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_written) { + webrtc::MutexLock lock(&mutex_); + return WriteOffsetLocked(buffer, bytes, offset, bytes_written); +} + +bool PseudoTcp::LockedFifoBuffer::Read(void* buffer, + size_t bytes, + size_t* bytes_read) { + webrtc::MutexLock lock(&mutex_); + size_t copy = 0; + if (!ReadOffsetLocked(buffer, bytes, 0, ©)) + return false; + + // If read was successful then adjust the read position and number of + // bytes buffered. + read_position_ = (read_position_ + copy) % buffer_length_; + data_length_ -= copy; + if (bytes_read) + *bytes_read = copy; + + return true; +} + +bool PseudoTcp::LockedFifoBuffer::Write(const void* buffer, + size_t bytes, + size_t* bytes_written) { + webrtc::MutexLock lock(&mutex_); + size_t copy = 0; + if (!WriteOffsetLocked(buffer, bytes, 0, ©)) + return false; + + // If write was successful then adjust the number of readable bytes. + data_length_ += copy; + if (bytes_written) { + *bytes_written = copy; + } + + return true; +} + +void PseudoTcp::LockedFifoBuffer::ConsumeReadData(size_t size) { + webrtc::MutexLock lock(&mutex_); + RTC_DCHECK(size <= data_length_); + read_position_ = (read_position_ + size) % buffer_length_; + data_length_ -= size; +} + +void PseudoTcp::LockedFifoBuffer::ConsumeWriteBuffer(size_t size) { + webrtc::MutexLock lock(&mutex_); + RTC_DCHECK(size <= buffer_length_ - data_length_); + data_length_ += size; +} + +bool PseudoTcp::LockedFifoBuffer::GetWriteRemaining(size_t* size) const { + webrtc::MutexLock lock(&mutex_); + *size = buffer_length_ - data_length_; + return true; +} + +bool PseudoTcp::LockedFifoBuffer::ReadOffsetLocked(void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_read) { + if (offset >= data_length_) + return false; + + const size_t available = data_length_ - offset; + const size_t read_position = (read_position_ + offset) % buffer_length_; + const size_t copy = std::min(bytes, available); + const size_t tail_copy = std::min(copy, buffer_length_ - read_position); + char* const p = static_cast(buffer); + memcpy(p, &buffer_[read_position], tail_copy); + memcpy(p + tail_copy, &buffer_[0], copy - tail_copy); + + if (bytes_read) + *bytes_read = copy; + + return true; +} + +bool PseudoTcp::LockedFifoBuffer::WriteOffsetLocked(const void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_written) { + if (data_length_ + offset >= buffer_length_) + return false; + + const size_t available = buffer_length_ - data_length_ - offset; + const size_t write_position = + (read_position_ + data_length_ + offset) % buffer_length_; + const size_t copy = std::min(bytes, available); + const size_t tail_copy = std::min(copy, buffer_length_ - write_position); + const char* const p = static_cast(buffer); + memcpy(&buffer_[write_position], p, tail_copy); + memcpy(&buffer_[0], p + tail_copy, copy - tail_copy); + + if (bytes_written) + *bytes_written = copy; + + return true; +} + } // namespace cricket diff --git a/p2p/base/pseudo_tcp.h b/p2p/base/pseudo_tcp.h index cb6d974496..74ffee631c 100644 --- a/p2p/base/pseudo_tcp.h +++ b/p2p/base/pseudo_tcp.h @@ -15,8 +15,9 @@ #include #include +#include -#include "rtc_base/memory/fifo_buffer.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/rtc_export.h" namespace cricket { @@ -196,6 +197,50 @@ class RTC_EXPORT PseudoTcp { // window scale factor |m_swnd_scale| accordingly. void resizeReceiveBuffer(uint32_t new_size); + class LockedFifoBuffer final { + public: + explicit LockedFifoBuffer(size_t size); + ~LockedFifoBuffer(); + + size_t GetBuffered() const; + bool SetCapacity(size_t size); + bool ReadOffset(void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_read); + bool WriteOffset(const void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_written); + bool Read(void* buffer, size_t bytes, size_t* bytes_read); + bool Write(const void* buffer, size_t bytes, size_t* bytes_written); + void ConsumeReadData(size_t size); + void ConsumeWriteBuffer(size_t size); + bool GetWriteRemaining(size_t* size) const; + + private: + bool ReadOffsetLocked(void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_read) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + bool WriteOffsetLocked(const void* buffer, + size_t bytes, + size_t offset, + size_t* bytes_written) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + + // the allocated buffer + std::unique_ptr buffer_ RTC_GUARDED_BY(mutex_); + // size of the allocated buffer + size_t buffer_length_ RTC_GUARDED_BY(mutex_); + // amount of readable data in the buffer + size_t data_length_ RTC_GUARDED_BY(mutex_); + // offset to the readable data + size_t read_position_ RTC_GUARDED_BY(mutex_); + mutable webrtc::Mutex mutex_; + }; + IPseudoTcpNotify* m_notify; enum Shutdown { SD_NONE, SD_GRACEFUL, SD_FORCEFUL } m_shutdown; int m_error; @@ -211,13 +256,13 @@ class RTC_EXPORT PseudoTcp { RList m_rlist; uint32_t m_rbuf_len, m_rcv_nxt, m_rcv_wnd, m_lastrecv; uint8_t m_rwnd_scale; // Window scale factor. - rtc::FifoBuffer m_rbuf; + LockedFifoBuffer m_rbuf; // Outgoing data SList m_slist; uint32_t m_sbuf_len, m_snd_nxt, m_snd_wnd, m_lastsend, m_snd_una; uint8_t m_swnd_scale; // Window scale factor. - rtc::FifoBuffer m_sbuf; + LockedFifoBuffer m_sbuf; // Maximum segment size, estimated protocol level, largest segment sent uint32_t m_mss, m_msslevel, m_largest, m_mtu_advise; diff --git a/p2p/base/pseudo_tcp_unittest.cc b/p2p/base/pseudo_tcp_unittest.cc index a7fc9b3e69..ecafec9fb6 100644 --- a/p2p/base/pseudo_tcp_unittest.cc +++ b/p2p/base/pseudo_tcp_unittest.cc @@ -44,7 +44,7 @@ class PseudoTcpForTest : public cricket::PseudoTcp { }; class PseudoTcpTestBase : public ::testing::Test, - public rtc::MessageHandler, + public rtc::MessageHandlerAutoCleanup, public cricket::IPseudoTcpNotify { public: PseudoTcpTestBase() diff --git a/p2p/base/regathering_controller.cc b/p2p/base/regathering_controller.cc index fe38a3e4d4..293e9dbcfd 100644 --- a/p2p/base/regathering_controller.cc +++ b/p2p/base/regathering_controller.cc @@ -9,6 +9,7 @@ */ #include "p2p/base/regathering_controller.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace webrtc { @@ -17,8 +18,8 @@ BasicRegatheringController::BasicRegatheringController( cricket::IceTransportInternal* ice_transport, rtc::Thread* thread) : config_(config), ice_transport_(ice_transport), thread_(thread) { + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(ice_transport_); - RTC_DCHECK(thread_); ice_transport_->SignalStateChanged.connect( this, &BasicRegatheringController::OnIceTransportStateChanged); ice_transport->SignalWritableState.connect( @@ -29,51 +30,49 @@ BasicRegatheringController::BasicRegatheringController( this, &BasicRegatheringController::OnIceTransportNetworkRouteChanged); } -BasicRegatheringController::~BasicRegatheringController() = default; +BasicRegatheringController::~BasicRegatheringController() { + RTC_DCHECK_RUN_ON(thread_); +} void BasicRegatheringController::Start() { + RTC_DCHECK_RUN_ON(thread_); ScheduleRecurringRegatheringOnFailedNetworks(); } void BasicRegatheringController::SetConfig(const Config& config) { - bool need_cancel_and_reschedule_on_failed_networks = - has_recurring_schedule_on_failed_networks_ && - (config_.regather_on_failed_networks_interval != - config.regather_on_failed_networks_interval); + RTC_DCHECK_RUN_ON(thread_); + bool need_reschedule_on_failed_networks = + pending_regathering_ && (config_.regather_on_failed_networks_interval != + config.regather_on_failed_networks_interval); config_ = config; - if (need_cancel_and_reschedule_on_failed_networks) { - CancelScheduledRecurringRegatheringOnFailedNetworks(); + if (need_reschedule_on_failed_networks) { ScheduleRecurringRegatheringOnFailedNetworks(); } } void BasicRegatheringController:: ScheduleRecurringRegatheringOnFailedNetworks() { + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(config_.regather_on_failed_networks_interval >= 0); - CancelScheduledRecurringRegatheringOnFailedNetworks(); - has_recurring_schedule_on_failed_networks_ = true; - invoker_for_failed_networks_.AsyncInvokeDelayed( - RTC_FROM_HERE, thread_, - rtc::Bind( - &BasicRegatheringController::RegatherOnFailedNetworksIfDoneGathering, - this), - config_.regather_on_failed_networks_interval); -} + // Reset pending_regathering_ to cancel any potentially pending tasks. + pending_regathering_.reset(new ScopedTaskSafety()); -void BasicRegatheringController::RegatherOnFailedNetworksIfDoneGathering() { - // Only regather when the current session is in the CLEARED state (i.e., not - // running or stopped). It is only possible to enter this state when we gather - // continually, so there is an implicit check on continual gathering here. - if (allocator_session_ && allocator_session_->IsCleared()) { - allocator_session_->RegatherOnFailedNetworks(); - } - ScheduleRecurringRegatheringOnFailedNetworks(); -} - -void BasicRegatheringController:: - CancelScheduledRecurringRegatheringOnFailedNetworks() { - invoker_for_failed_networks_.Clear(); - has_recurring_schedule_on_failed_networks_ = false; + thread_->PostDelayedTask( + ToQueuedTask(*pending_regathering_.get(), + [this]() { + RTC_DCHECK_RUN_ON(thread_); + // Only regather when the current session is in the CLEARED + // state (i.e., not running or stopped). It is only + // possible to enter this state when we gather continually, + // so there is an implicit check on continual gathering + // here. + if (allocator_session_ && + allocator_session_->IsCleared()) { + allocator_session_->RegatherOnFailedNetworks(); + } + ScheduleRecurringRegatheringOnFailedNetworks(); + }), + config_.regather_on_failed_networks_interval); } } // namespace webrtc diff --git a/p2p/base/regathering_controller.h b/p2p/base/regathering_controller.h index 54a76dc3e5..116d820a82 100644 --- a/p2p/base/regathering_controller.h +++ b/p2p/base/regathering_controller.h @@ -11,9 +11,11 @@ #ifndef P2P_BASE_REGATHERING_CONTROLLER_H_ #define P2P_BASE_REGATHERING_CONTROLLER_H_ +#include + #include "p2p/base/ice_transport_internal.h" #include "p2p/base/port_allocator.h" -#include "rtc_base/async_invoker.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread.h" namespace webrtc { @@ -80,20 +82,14 @@ class BasicRegatheringController : public sigslot::has_slots<> { void ScheduleRecurringRegatheringOnFailedNetworks(); // Cancels regathering scheduled by ScheduleRecurringRegatheringOnAllNetworks. void CancelScheduledRecurringRegatheringOnAllNetworks(); - // Cancels regathering scheduled by - // ScheduleRecurringRegatheringOnFailedNetworks. - void CancelScheduledRecurringRegatheringOnFailedNetworks(); - - // The following method perform the actual regathering, if the recent port - // allocator session has done the initial gathering. - void RegatherOnFailedNetworksIfDoneGathering(); + // We use a flag to be able to cancel pending regathering operations when + // the object goes out of scope or the config changes. + std::unique_ptr pending_regathering_; Config config_; cricket::IceTransportInternal* ice_transport_; cricket::PortAllocatorSession* allocator_session_ = nullptr; - bool has_recurring_schedule_on_failed_networks_ = false; - rtc::Thread* thread_; - rtc::AsyncInvoker invoker_for_failed_networks_; + rtc::Thread* const thread_; }; } // namespace webrtc diff --git a/p2p/base/stun_port.cc b/p2p/base/stun_port.cc index 4e1a1f6a97..7b1a2a83a2 100644 --- a/p2p/base/stun_port.cc +++ b/p2p/base/stun_port.cc @@ -17,11 +17,11 @@ #include "p2p/base/connection.h" #include "p2p/base/p2p_constants.h" #include "p2p/base/port_allocator.h" +#include "rtc_base/async_resolver_interface.h" #include "rtc_base/checks.h" #include "rtc_base/helpers.h" #include "rtc_base/ip_address.h" #include "rtc_base/logging.h" -#include "rtc_base/net_helpers.h" #include "rtc_base/strings/string_builder.h" namespace cricket { @@ -306,7 +306,9 @@ int UDPPort::SendTo(const void* data, if (send_error_count_ < kSendErrorLogLimit) { ++send_error_count_; RTC_LOG(LS_ERROR) << ToString() << ": UDP send of " << size - << " bytes failed with error " << error_; + << " bytes to host " << addr.ToSensitiveString() << " (" + << addr.ToResolvedSensitiveString() + << ") failed with error " << error_; } } else { send_error_count_ = 0; @@ -593,7 +595,11 @@ void UDPPort::OnSendPacket(const void* data, size_t size, StunRequest* req) { options.info_signaled_after_sent.packet_type = rtc::PacketType::kStunMessage; CopyPortInformationToPacketInfo(&options.info_signaled_after_sent); if (socket_->SendTo(data, size, sreq->server_addr(), options) < 0) { - RTC_LOG_ERR_EX(LERROR, socket_->GetError()) << "sendto"; + RTC_LOG_ERR_EX(LERROR, socket_->GetError()) + << "UDP send of " << size << " bytes to host " + << sreq->server_addr().ToSensitiveString() << " (" + << sreq->server_addr().ToResolvedSensitiveString() + << ") failed with error " << error_; } stats_.stun_binding_requests_sent++; } diff --git a/p2p/base/stun_port_unittest.cc b/p2p/base/stun_port_unittest.cc index dfc72362ce..31542daccc 100644 --- a/p2p/base/stun_port_unittest.cc +++ b/p2p/base/stun_port_unittest.cc @@ -49,10 +49,8 @@ class StunPortTestBase : public ::testing::Test, public sigslot::has_slots<> { thread_(ss_.get()), network_("unittest", "unittest", kLocalAddr.ipaddr(), 32), socket_factory_(rtc::Thread::Current()), - stun_server_1_(cricket::TestStunServer::Create(rtc::Thread::Current(), - kStunAddr1)), - stun_server_2_(cricket::TestStunServer::Create(rtc::Thread::Current(), - kStunAddr2)), + stun_server_1_(cricket::TestStunServer::Create(ss_.get(), kStunAddr1)), + stun_server_2_(cricket::TestStunServer::Create(ss_.get(), kStunAddr2)), done_(false), error_(false), stun_keepalive_delay_(1), @@ -225,7 +223,7 @@ TEST_F(StunPortTest, TestPrepareAddressFail) { EXPECT_EQ_SIMULATED_WAIT(error_event_.error_code, cricket::SERVER_NOT_REACHABLE_ERROR, kTimeoutMs, fake_clock); - ASSERT_NE(error_event_.error_text.find("."), std::string::npos); + ASSERT_NE(error_event_.error_text.find('.'), std::string::npos); ASSERT_NE(error_event_.address.find(kLocalAddr.HostAsSensitiveURIString()), std::string::npos); std::string server_url = "stun:" + kBadAddr.ToString(); @@ -412,24 +410,29 @@ class MockAsyncPacketSocket : public rtc::AsyncPacketSocket { public: ~MockAsyncPacketSocket() = default; - MOCK_CONST_METHOD0(GetLocalAddress, SocketAddress()); - MOCK_CONST_METHOD0(GetRemoteAddress, SocketAddress()); - MOCK_METHOD3(Send, - int(const void* pv, - size_t cb, - const rtc::PacketOptions& options)); - - MOCK_METHOD4(SendTo, - int(const void* pv, - size_t cb, - const SocketAddress& addr, - const rtc::PacketOptions& options)); - MOCK_METHOD0(Close, int()); - MOCK_CONST_METHOD0(GetState, State()); - MOCK_METHOD2(GetOption, int(rtc::Socket::Option opt, int* value)); - MOCK_METHOD2(SetOption, int(rtc::Socket::Option opt, int value)); - MOCK_CONST_METHOD0(GetError, int()); - MOCK_METHOD1(SetError, void(int error)); + MOCK_METHOD(SocketAddress, GetLocalAddress, (), (const, override)); + MOCK_METHOD(SocketAddress, GetRemoteAddress, (), (const, override)); + MOCK_METHOD(int, + Send, + (const void* pv, size_t cb, const rtc::PacketOptions& options), + (override)); + + MOCK_METHOD(int, + SendTo, + (const void* pv, + size_t cb, + const SocketAddress& addr, + const rtc::PacketOptions& options), + (override)); + MOCK_METHOD(int, Close, (), (override)); + MOCK_METHOD(State, GetState, (), (const, override)); + MOCK_METHOD(int, + GetOption, + (rtc::Socket::Option opt, int* value), + (override)); + MOCK_METHOD(int, SetOption, (rtc::Socket::Option opt, int value), (override)); + MOCK_METHOD(int, GetError, (), (const, override)); + MOCK_METHOD(void, SetError, (int error), (override)); }; // Test that outbound packets inherit the dscp value assigned to the socket. diff --git a/p2p/base/stun_request.cc b/p2p/base/stun_request.cc index 44376ced95..2870dcdfc5 100644 --- a/p2p/base/stun_request.cc +++ b/p2p/base/stun_request.cc @@ -120,6 +120,18 @@ bool StunRequestManager::CheckResponse(StunMessage* msg) { } StunRequest* request = iter->second; + + // Now that we know the request, we can see if the response is + // integrity-protected or not. + // For some tests, the message integrity is not set in the request. + // Complain, and then don't check. + bool skip_integrity_checking = false; + if (request->msg()->integrity() == StunMessage::IntegrityStatus::kNotSet) { + skip_integrity_checking = true; + } else { + msg->ValidateMessageIntegrity(request->msg()->password()); + } + if (!msg->GetNonComprehendedAttributes().empty()) { // If a response contains unknown comprehension-required attributes, it's // simply discarded and the transaction is considered failed. See RFC5389 @@ -129,6 +141,9 @@ bool StunRequestManager::CheckResponse(StunMessage* msg) { delete request; return false; } else if (msg->type() == GetStunSuccessResponseType(request->type())) { + if (!msg->IntegrityOk() && !skip_integrity_checking) { + return false; + } request->OnResponse(msg); } else if (msg->type() == GetStunErrorResponseType(request->type())) { request->OnErrorResponse(msg); diff --git a/p2p/base/stun_request.h b/p2p/base/stun_request.h index d45376ea55..39f928eaf4 100644 --- a/p2p/base/stun_request.h +++ b/p2p/base/stun_request.h @@ -76,7 +76,7 @@ class StunRequestManager { private: typedef std::map RequestMap; - rtc::Thread* thread_; + rtc::Thread* const thread_; RequestMap requests_; std::string origin_; diff --git a/p2p/base/tcp_port.cc b/p2p/base/tcp_port.cc index efbf62e496..d4266bf0b6 100644 --- a/p2p/base/tcp_port.cc +++ b/p2p/base/tcp_port.cc @@ -403,12 +403,14 @@ int TCPConnection::Send(const void* data, static_cast(port_)->CopyPortInformationToPacketInfo( &modified_options.info_signaled_after_sent); int sent = socket_->Send(data, size, modified_options); + int64_t now = rtc::TimeMillis(); if (sent < 0) { stats_.sent_discarded_packets++; error_ = socket_->GetError(); } else { - send_rate_tracker_.AddSamples(sent); + send_rate_tracker_.AddSamplesAtTime(now, sent); } + last_send_data_ = now; return sent; } diff --git a/p2p/base/test_stun_server.cc b/p2p/base/test_stun_server.cc index 9330a00075..54bdfb3793 100644 --- a/p2p/base/test_stun_server.cc +++ b/p2p/base/test_stun_server.cc @@ -15,10 +15,9 @@ namespace cricket { -TestStunServer* TestStunServer::Create(rtc::Thread* thread, +TestStunServer* TestStunServer::Create(rtc::SocketServer* ss, const rtc::SocketAddress& addr) { - rtc::AsyncSocket* socket = - thread->socketserver()->CreateAsyncSocket(addr.family(), SOCK_DGRAM); + rtc::AsyncSocket* socket = ss->CreateAsyncSocket(addr.family(), SOCK_DGRAM); rtc::AsyncUDPSocket* udp_socket = rtc::AsyncUDPSocket::Create(socket, addr); return new TestStunServer(udp_socket); diff --git a/p2p/base/test_stun_server.h b/p2p/base/test_stun_server.h index e44e7dbcdd..11ac620bb8 100644 --- a/p2p/base/test_stun_server.h +++ b/p2p/base/test_stun_server.h @@ -15,14 +15,14 @@ #include "p2p/base/stun_server.h" #include "rtc_base/async_udp_socket.h" #include "rtc_base/socket_address.h" -#include "rtc_base/thread.h" +#include "rtc_base/socket_server.h" namespace cricket { // A test STUN server. Useful for unit tests. class TestStunServer : StunServer { public: - static TestStunServer* Create(rtc::Thread* thread, + static TestStunServer* Create(rtc::SocketServer* ss, const rtc::SocketAddress& addr); // Set a fake STUN address to return to the client. diff --git a/p2p/base/test_turn_server.h b/p2p/base/test_turn_server.h index d438a83301..ecd934861b 100644 --- a/p2p/base/test_turn_server.h +++ b/p2p/base/test_turn_server.h @@ -14,6 +14,7 @@ #include #include +#include "api/sequence_checker.h" #include "api/transport/stun.h" #include "p2p/base/basic_packet_socket_factory.h" #include "p2p/base/turn_server.h" @@ -21,7 +22,6 @@ #include "rtc_base/ssl_adapter.h" #include "rtc_base/ssl_identity.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" namespace cricket { @@ -147,7 +147,7 @@ class TestTurnServer : public TurnAuthInterface { TurnServer server_; rtc::Thread* thread_; - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; }; } // namespace cricket diff --git a/p2p/base/transport_description.cc b/p2p/base/transport_description.cc index 729b4ae8c3..96fb9597e0 100644 --- a/p2p/base/transport_description.cc +++ b/p2p/base/transport_description.cc @@ -172,8 +172,7 @@ TransportDescription::TransportDescription(const TransportDescription& from) ice_pwd(from.ice_pwd), ice_mode(from.ice_mode), connection_role(from.connection_role), - identity_fingerprint(CopyFingerprint(from.identity_fingerprint.get())), - opaque_parameters(from.opaque_parameters) {} + identity_fingerprint(CopyFingerprint(from.identity_fingerprint.get())) {} TransportDescription::~TransportDescription() = default; @@ -190,7 +189,6 @@ TransportDescription& TransportDescription::operator=( connection_role = from.connection_role; identity_fingerprint.reset(CopyFingerprint(from.identity_fingerprint.get())); - opaque_parameters = from.opaque_parameters; return *this; } diff --git a/p2p/base/transport_description.h b/p2p/base/transport_description.h index 1a458c9571..32fdb5c9b3 100644 --- a/p2p/base/transport_description.h +++ b/p2p/base/transport_description.h @@ -100,28 +100,6 @@ constexpr auto* ICE_OPTION_RENOMINATION = "renomination"; bool StringToConnectionRole(const std::string& role_str, ConnectionRole* role); bool ConnectionRoleToString(const ConnectionRole& role, std::string* role_str); -// Parameters for an opaque transport protocol which may be plugged into WebRTC. -struct OpaqueTransportParameters { - // Protocol used by this opaque transport. Two endpoints that support the - // same protocol are expected to be able to understand the contents of each - // others' |parameters| fields. If those parameters are compatible, the - // endpoints are expected to use this transport protocol. - std::string protocol; - - // Opaque parameters for this transport. These parameters are serialized in a - // manner determined by the |protocol|. They can be parsed and understood by - // the plugin that supports |protocol|. - std::string parameters; - - bool operator==(const OpaqueTransportParameters& other) const { - return protocol == other.protocol && parameters == other.parameters; - } - - bool operator!=(const OpaqueTransportParameters& other) const { - return !(*this == other); - } -}; - struct TransportDescription { TransportDescription(); TransportDescription(const std::vector& transport_options, @@ -168,7 +146,6 @@ struct TransportDescription { ConnectionRole connection_role; std::unique_ptr identity_fingerprint; - absl::optional opaque_parameters; }; } // namespace cricket diff --git a/p2p/base/transport_description_factory.cc b/p2p/base/transport_description_factory.cc index 17152d1a04..5cce2ac09d 100644 --- a/p2p/base/transport_description_factory.cc +++ b/p2p/base/transport_description_factory.cc @@ -55,8 +55,6 @@ std::unique_ptr TransportDescriptionFactory::CreateOffer( } } - desc->opaque_parameters = options.opaque_parameters; - return desc; } @@ -110,13 +108,6 @@ std::unique_ptr TransportDescriptionFactory::CreateAnswer( return NULL; } - // Answers may only attach opaque parameters if the offer contained them as - // well. The answer's parameters may differ, and it's up to the opaque - // transport implementation to decide if the difference is acceptable. - if (offer->opaque_parameters && options.opaque_parameters) { - desc->opaque_parameters = options.opaque_parameters; - } - return desc; } diff --git a/p2p/base/transport_description_factory.h b/p2p/base/transport_description_factory.h index d0813dc541..c1656a0fac 100644 --- a/p2p/base/transport_description_factory.h +++ b/p2p/base/transport_description_factory.h @@ -29,9 +29,6 @@ struct TransportOptions { // If true, ICE renomination is supported and will be used if it is also // supported by the remote side. bool enable_ice_renomination = false; - - // Opaque parameters for plug-in transports. - absl::optional opaque_parameters; }; // Creates transport descriptions according to the supplied configuration. diff --git a/p2p/base/transport_description_factory_unittest.cc b/p2p/base/transport_description_factory_unittest.cc index 8359ffc1c9..f7675ae643 100644 --- a/p2p/base/transport_description_factory_unittest.cc +++ b/p2p/base/transport_description_factory_unittest.cc @@ -26,7 +26,6 @@ #include "test/gmock.h" #include "test/gtest.h" -using cricket::OpaqueTransportParameters; using cricket::TransportDescription; using cricket::TransportDescriptionFactory; using cricket::TransportOptions; @@ -210,73 +209,6 @@ TEST_F(TransportDescriptionFactoryTest, TestOfferDtlsReofferDtls) { CheckDesc(desc.get(), "", old_desc->ice_ufrag, old_desc->ice_pwd, digest_alg); } -TEST_F(TransportDescriptionFactoryTest, TestOfferOpaqueTransportParameters) { - OpaqueTransportParameters params; - params.protocol = "fake"; - params.parameters = "foobar"; - - TransportOptions options; - options.opaque_parameters = params; - - std::unique_ptr desc = - f1_.CreateOffer(options, NULL, &ice_credentials_); - - CheckDesc(desc.get(), "", "", "", ""); - EXPECT_EQ(desc->opaque_parameters, params); -} - -TEST_F(TransportDescriptionFactoryTest, TestAnswerOpaqueTransportParameters) { - OpaqueTransportParameters params; - params.protocol = "fake"; - params.parameters = "foobar"; - - TransportOptions options; - options.opaque_parameters = params; - - std::unique_ptr offer = - f1_.CreateOffer(options, NULL, &ice_credentials_); - std::unique_ptr answer = - f2_.CreateAnswer(offer.get(), options, true, NULL, &ice_credentials_); - - CheckDesc(answer.get(), "", "", "", ""); - EXPECT_EQ(answer->opaque_parameters, params); -} - -TEST_F(TransportDescriptionFactoryTest, TestAnswerNoOpaqueTransportParameters) { - OpaqueTransportParameters params; - params.protocol = "fake"; - params.parameters = "foobar"; - - TransportOptions options; - options.opaque_parameters = params; - - std::unique_ptr offer = - f1_.CreateOffer(options, NULL, &ice_credentials_); - std::unique_ptr answer = f2_.CreateAnswer( - offer.get(), TransportOptions(), true, NULL, &ice_credentials_); - - CheckDesc(answer.get(), "", "", "", ""); - EXPECT_EQ(answer->opaque_parameters, absl::nullopt); -} - -TEST_F(TransportDescriptionFactoryTest, - TestAnswerNoOpaqueTransportParametersInOffer) { - std::unique_ptr offer = - f1_.CreateOffer(TransportOptions(), NULL, &ice_credentials_); - - OpaqueTransportParameters params; - params.protocol = "fake"; - params.parameters = "foobar"; - - TransportOptions options; - options.opaque_parameters = params; - std::unique_ptr answer = - f2_.CreateAnswer(offer.get(), options, true, NULL, &ice_credentials_); - - CheckDesc(answer.get(), "", "", "", ""); - EXPECT_EQ(answer->opaque_parameters, absl::nullopt); -} - TEST_F(TransportDescriptionFactoryTest, TestAnswerDefault) { std::unique_ptr offer = f1_.CreateOffer(TransportOptions(), NULL, &ice_credentials_); diff --git a/p2p/base/turn_port.cc b/p2p/base/turn_port.cc index 4d39f207b4..33925d43e7 100644 --- a/p2p/base/turn_port.cc +++ b/p2p/base/turn_port.cc @@ -28,6 +28,7 @@ #include "rtc_base/net_helpers.h" #include "rtc_base/socket_address.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "system_wrappers/include/field_trial.h" namespace cricket { @@ -346,6 +347,15 @@ void TurnPort::PrepareAddress() { server_address_.address.SetPort(TURN_DEFAULT_PORT); } + if (!AllowedTurnPort(server_address_.address.port())) { + // This can only happen after a 300 ALTERNATE SERVER, since the port can't + // be created with a disallowed port number. + RTC_LOG(LS_ERROR) << "Attempt to start allocation with disallowed port# " + << server_address_.address.port(); + OnAllocateError(STUN_ERROR_SERVER_ERROR, + "Attempt to start allocation to a disallowed port"); + return; + } if (server_address_.address.IsUnresolvedIP()) { ResolveTurnAddress(server_address_.address); } else { @@ -715,16 +725,6 @@ bool TurnPort::HandleIncomingPacket(rtc::AsyncPacketSocket* socket, return false; } - // This must be a response for one of our requests. - // Check success responses, but not errors, for MESSAGE-INTEGRITY. - if (IsStunSuccessResponseType(msg_type) && - !StunMessage::ValidateMessageIntegrity(data, size, hash())) { - RTC_LOG(LS_WARNING) << ToString() - << ": Received TURN message with invalid " - "message integrity, msg_type: " - << msg_type; - return true; - } request_manager_.CheckResponse(data, size); return true; @@ -943,6 +943,21 @@ rtc::DiffServCodePoint TurnPort::StunDscpValue() const { return stun_dscp_value_; } +// static +bool TurnPort::AllowedTurnPort(int port) { + // Port 53, 80 and 443 are used for existing deployments. + // Ports above 1024 are assumed to be OK to use. + if (port == 53 || port == 80 || port == 443 || port >= 1024) { + return true; + } + // Allow any port if relevant field trial is set. This allows disabling the + // check. + if (webrtc::field_trial::IsEnabled("WebRTC-Turn-AllowSystemPorts")) { + return true; + } + return false; +} + void TurnPort::OnMessage(rtc::Message* message) { switch (message->message_id) { case MSG_ALLOCATE_ERROR: @@ -1274,10 +1289,12 @@ void TurnPort::ScheduleEntryDestruction(TurnEntry* entry) { RTC_DCHECK(!entry->destruction_timestamp().has_value()); int64_t timestamp = rtc::TimeMillis(); entry->set_destruction_timestamp(timestamp); - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, thread(), - rtc::Bind(&TurnPort::DestroyEntryIfNotCancelled, this, entry, timestamp), - TURN_PERMISSION_TIMEOUT); + thread()->PostDelayedTask(ToQueuedTask(task_safety_.flag(), + [this, entry, timestamp] { + DestroyEntryIfNotCancelled( + entry, timestamp); + }), + TURN_PERMISSION_TIMEOUT); } bool TurnPort::SetEntryChannelId(const rtc::SocketAddress& address, diff --git a/p2p/base/turn_port.h b/p2p/base/turn_port.h index 8247dbc777..55dbda5ece 100644 --- a/p2p/base/turn_port.h +++ b/p2p/base/turn_port.h @@ -23,9 +23,10 @@ #include "absl/memory/memory.h" #include "p2p/base/port.h" #include "p2p/client/basic_port_allocator.h" -#include "rtc_base/async_invoker.h" #include "rtc_base/async_packet_socket.h" +#include "rtc_base/async_resolver_interface.h" #include "rtc_base/ssl_certificate.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" namespace webrtc { class TurnCustomizer; @@ -33,6 +34,8 @@ class TurnCustomizer; namespace cricket { +const int kMaxTurnUsernameLength = 509; // RFC 8489 section 14.3 + extern const int STUN_ATTR_TURN_LOGGING_ID; extern const char TURN_PORT_TYPE[]; class TurnAllocateRequest; @@ -61,6 +64,18 @@ class TurnPort : public Port { int server_priority, const std::string& origin, webrtc::TurnCustomizer* customizer) { + // Do basic parameter validation. + if (credentials.username.size() > kMaxTurnUsernameLength) { + RTC_LOG(LS_ERROR) << "Attempt to use TURN with a too long username " + << "of length " << credentials.username.size(); + return nullptr; + } + // Do not connect to low-numbered ports. The default STUN port is 3478. + if (!AllowedTurnPort(server_address.address.port())) { + RTC_LOG(LS_ERROR) << "Attempt to use TURN to connect to port " + << server_address.address.port(); + return nullptr; + } // Using `new` to access a non-public constructor. return absl::WrapUnique(new TurnPort( thread, factory, network, socket, username, password, server_address, @@ -102,6 +117,18 @@ class TurnPort : public Port { const std::vector& tls_elliptic_curves, webrtc::TurnCustomizer* customizer, rtc::SSLCertificateVerifier* tls_cert_verifier = nullptr) { + // Do basic parameter validation. + if (credentials.username.size() > kMaxTurnUsernameLength) { + RTC_LOG(LS_ERROR) << "Attempt to use TURN with a too long username " + << "of length " << credentials.username.size(); + return nullptr; + } + // Do not connect to low-numbered ports. The default STUN port is 3478. + if (!AllowedTurnPort(server_address.address.port())) { + RTC_LOG(LS_ERROR) << "Attempt to use TURN to connect to port " + << server_address.address.port(); + return nullptr; + } // Using `new` to access a non-public constructor. return absl::WrapUnique( new TurnPort(thread, factory, network, min_port, max_port, username, @@ -200,9 +227,6 @@ class TurnPort : public Port { rtc::AsyncPacketSocket* socket() const { return socket_; } - // For testing only. - rtc::AsyncInvoker* invoker() { return &invoker_; } - // Signal with resolved server address. // Parameters are port, server address and resolved server address. // This signal will be sent only if server address is resolved successfully. @@ -285,6 +309,7 @@ class TurnPort : public Port { typedef std::map SocketOptionsMap; typedef std::set AttemptedServerSet; + static bool AllowedTurnPort(int port); void OnMessage(rtc::Message* pmsg) override; bool CreateTurnClientSocket(); @@ -387,8 +412,6 @@ class TurnPort : public Port { // The number of retries made due to allocate mismatch error. size_t allocate_mismatch_retries_; - rtc::AsyncInvoker invoker_; - // Optional TurnCustomizer that can modify outgoing messages. Once set, this // must outlive the TurnPort's lifetime. webrtc::TurnCustomizer* turn_customizer_ = nullptr; @@ -401,6 +424,8 @@ class TurnPort : public Port { // to be more easy to work with. std::string turn_logging_id_; + webrtc::ScopedTaskSafety task_safety_; + friend class TurnEntry; friend class TurnAllocateRequest; friend class TurnRefreshRequest; diff --git a/p2p/base/turn_port_unittest.cc b/p2p/base/turn_port_unittest.cc index 1f5a7bf49e..6d396ad520 100644 --- a/p2p/base/turn_port_unittest.cc +++ b/p2p/base/turn_port_unittest.cc @@ -41,6 +41,7 @@ #include "rtc_base/thread.h" #include "rtc_base/time_utils.h" #include "rtc_base/virtual_socket_server.h" +#include "test/field_trial.h" #include "test/gtest.h" using rtc::SocketAddress; @@ -58,6 +59,15 @@ static const SocketAddress kTurnTcpIntAddr("99.99.99.4", static const SocketAddress kTurnUdpExtAddr("99.99.99.5", 0); static const SocketAddress kTurnAlternateIntAddr("99.99.99.6", cricket::TURN_SERVER_PORT); +// Port for redirecting to a TCP Web server. Should not work. +static const SocketAddress kTurnDangerousAddr("99.99.99.7", 81); +// Port 53 (the DNS port); should work. +static const SocketAddress kTurnPort53Addr("99.99.99.7", 53); +// Port 80 (the HTTP port); should work. +static const SocketAddress kTurnPort80Addr("99.99.99.7", 80); +// Port 443 (the HTTPS port); should work. +static const SocketAddress kTurnPort443Addr("99.99.99.7", 443); +// The default TURN server port. static const SocketAddress kTurnIntAddr("99.99.99.7", cricket::TURN_SERVER_PORT); static const SocketAddress kTurnIPv6IntAddr( @@ -94,6 +104,15 @@ static const cricket::ProtocolAddress kTurnTlsProtoAddr(kTurnTcpIntAddr, cricket::PROTO_TLS); static const cricket::ProtocolAddress kTurnUdpIPv6ProtoAddr(kTurnUdpIPv6IntAddr, cricket::PROTO_UDP); +static const cricket::ProtocolAddress kTurnDangerousProtoAddr( + kTurnDangerousAddr, + cricket::PROTO_TCP); +static const cricket::ProtocolAddress kTurnPort53ProtoAddr(kTurnPort53Addr, + cricket::PROTO_TCP); +static const cricket::ProtocolAddress kTurnPort80ProtoAddr(kTurnPort80Addr, + cricket::PROTO_TCP); +static const cricket::ProtocolAddress kTurnPort443ProtoAddr(kTurnPort443Addr, + cricket::PROTO_TCP); static const unsigned int MSG_TESTFINISH = 0; @@ -148,7 +167,7 @@ class TestConnectionWrapper : public sigslot::has_slots<> { // (between local port and TURN server) of kSimulatedRtt. class TurnPortTest : public ::testing::Test, public sigslot::has_slots<>, - public rtc::MessageHandler { + public rtc::MessageHandlerAutoCleanup { public: TurnPortTest() : ss_(new TurnPortTestVirtualSocketServer()), @@ -236,43 +255,43 @@ class TurnPortTest : public ::testing::Test, return &networks_.back(); } - void CreateTurnPort(const std::string& username, + bool CreateTurnPort(const std::string& username, const std::string& password, const ProtocolAddress& server_address) { - CreateTurnPortWithAllParams(MakeNetwork(kLocalAddr1), username, password, - server_address, std::string()); + return CreateTurnPortWithAllParams(MakeNetwork(kLocalAddr1), username, + password, server_address, std::string()); } - void CreateTurnPort(const rtc::SocketAddress& local_address, + bool CreateTurnPort(const rtc::SocketAddress& local_address, const std::string& username, const std::string& password, const ProtocolAddress& server_address) { - CreateTurnPortWithAllParams(MakeNetwork(local_address), username, password, - server_address, std::string()); + return CreateTurnPortWithAllParams(MakeNetwork(local_address), username, + password, server_address, std::string()); } // Should be identical to CreateTurnPort but specifies an origin value // when creating the instance of TurnPort. - void CreateTurnPortWithOrigin(const rtc::SocketAddress& local_address, + bool CreateTurnPortWithOrigin(const rtc::SocketAddress& local_address, const std::string& username, const std::string& password, const ProtocolAddress& server_address, const std::string& origin) { - CreateTurnPortWithAllParams(MakeNetwork(local_address), username, password, - server_address, origin); + return CreateTurnPortWithAllParams(MakeNetwork(local_address), username, + password, server_address, origin); } - void CreateTurnPortWithNetwork(rtc::Network* network, + bool CreateTurnPortWithNetwork(rtc::Network* network, const std::string& username, const std::string& password, const ProtocolAddress& server_address) { - CreateTurnPortWithAllParams(network, username, password, server_address, - std::string()); + return CreateTurnPortWithAllParams(network, username, password, + server_address, std::string()); } // Version of CreateTurnPort that takes all possible parameters; all other // helper methods call this, such that "SetIceRole" and "ConnectSignals" (and // possibly other things in the future) only happen in one place. - void CreateTurnPortWithAllParams(rtc::Network* network, + bool CreateTurnPortWithAllParams(rtc::Network* network, const std::string& username, const std::string& password, const ProtocolAddress& server_address, @@ -281,6 +300,9 @@ class TurnPortTest : public ::testing::Test, turn_port_ = TurnPort::Create( &main_, &socket_factory_, network, 0, 0, kIceUfrag1, kIcePwd1, server_address, credentials, 0, origin, {}, {}, turn_customizer_.get()); + if (!turn_port_) { + return false; + } // This TURN port will be the controlling. turn_port_->SetIceRole(ICEROLE_CONTROLLING); ConnectSignals(); @@ -292,6 +314,7 @@ class TurnPortTest : public ::testing::Test, turn_port_->SetTlsCertPolicy( TlsCertPolicy::TLS_CERT_POLICY_INSECURE_NO_CHECK); } + return true; } void CreateSharedTurnPort(const std::string& username, @@ -331,8 +354,8 @@ class TurnPortTest : public ::testing::Test, this, &TurnPortTest::OnTurnRefreshResult); turn_port_->SignalTurnPortClosed.connect(this, &TurnPortTest::OnTurnPortClosed); - turn_port_->SignalDestroyed.connect(this, - &TurnPortTest::OnTurnPortDestroyed); + turn_port_->SubscribePortDestroyed( + [this](PortInterface* port) { OnTurnPortDestroyed(port); }); } void CreateUdpPort() { CreateUdpPort(kLocalAddr2); } @@ -611,6 +634,11 @@ class TurnPortTest : public ::testing::Test, Port::ORIGIN_MESSAGE); Connection* conn2 = turn_port_->CreateConnection(udp_port_->Candidates()[0], Port::ORIGIN_MESSAGE); + + // Increased to 10 minutes, to ensure that the TurnEntry times out before + // the TurnPort. + turn_port_->set_timeout_delay(10 * 60 * 1000); + ASSERT_TRUE(conn2 != NULL); ASSERT_TRUE_SIMULATED_WAIT(turn_create_permission_success_, kSimulatedRtt, fake_clock_); @@ -627,11 +655,11 @@ class TurnPortTest : public ::testing::Test, EXPECT_TRUE_SIMULATED_WAIT(turn_unknown_address_, kSimulatedRtt, fake_clock_); - // Flush all requests in the invoker to destroy the TurnEntry. + // Wait for TurnEntry to expire. Timeout is 5 minutes. // Expect that it still processes an incoming ping and signals the // unknown address. turn_unknown_address_ = false; - turn_port_->invoker()->Flush(rtc::Thread::Current()); + fake_clock_.AdvanceTime(webrtc::TimeDelta::Seconds(5 * 60)); conn1->Ping(0); EXPECT_TRUE_SIMULATED_WAIT(turn_unknown_address_, kSimulatedRtt, fake_clock_); @@ -930,7 +958,7 @@ TEST_F(TurnPortTest, EXPECT_TRUE_SIMULATED_WAIT(turn_error_, kSimulatedRtt, fake_clock_); EXPECT_EQ_SIMULATED_WAIT(error_event_.error_code, STUN_ERROR_GLOBAL_FAILURE, kSimulatedRtt, fake_clock_); - ASSERT_NE(error_event_.error_text.find("."), std::string::npos); + ASSERT_NE(error_event_.error_text.find('.'), std::string::npos); ASSERT_NE(error_event_.address.find(kLocalAddr2.HostAsSensitiveURIString()), std::string::npos); ASSERT_NE(error_event_.port, 0); @@ -1774,4 +1802,65 @@ TEST_F(TurnPortTest, TestTurnCustomizerAddAttribute) { turn_port_.reset(nullptr); } +TEST_F(TurnPortTest, TestOverlongUsername) { + std::string overlong_username(513, 'x'); + RelayCredentials credentials(overlong_username, kTurnPassword); + EXPECT_FALSE( + CreateTurnPort(overlong_username, kTurnPassword, kTurnTlsProtoAddr)); +} + +TEST_F(TurnPortTest, TestTurnDangerousServer) { + CreateTurnPort(kTurnUsername, kTurnPassword, kTurnDangerousProtoAddr); + ASSERT_FALSE(turn_port_); +} + +TEST_F(TurnPortTest, TestTurnDangerousServerPermits53) { + CreateTurnPort(kTurnUsername, kTurnPassword, kTurnPort53ProtoAddr); + ASSERT_TRUE(turn_port_); +} + +TEST_F(TurnPortTest, TestTurnDangerousServerPermits80) { + CreateTurnPort(kTurnUsername, kTurnPassword, kTurnPort80ProtoAddr); + ASSERT_TRUE(turn_port_); +} + +TEST_F(TurnPortTest, TestTurnDangerousServerPermits443) { + CreateTurnPort(kTurnUsername, kTurnPassword, kTurnPort443ProtoAddr); + ASSERT_TRUE(turn_port_); +} + +TEST_F(TurnPortTest, TestTurnDangerousAlternateServer) { + const ProtocolType protocol_type = PROTO_TCP; + std::vector redirect_addresses; + redirect_addresses.push_back(kTurnDangerousAddr); + + TestTurnRedirector redirector(redirect_addresses); + + turn_server_.AddInternalSocket(kTurnIntAddr, protocol_type); + turn_server_.AddInternalSocket(kTurnDangerousAddr, protocol_type); + turn_server_.set_redirect_hook(&redirector); + CreateTurnPort(kTurnUsername, kTurnPassword, + ProtocolAddress(kTurnIntAddr, protocol_type)); + + // Retrieve the address before we run the state machine. + const SocketAddress old_addr = turn_port_->server_address().address; + + turn_port_->PrepareAddress(); + // This should result in an error event. + EXPECT_TRUE_SIMULATED_WAIT(error_event_.error_code != 0, + TimeToGetAlternateTurnCandidate(protocol_type), + fake_clock_); + // but should NOT result in the port turning ready, and no candidates + // should be gathered. + EXPECT_FALSE(turn_ready_); + ASSERT_EQ(0U, turn_port_->Candidates().size()); +} + +TEST_F(TurnPortTest, TestTurnDangerousServerAllowedWithFieldTrial) { + webrtc::test::ScopedFieldTrials override_field_trials( + "WebRTC-Turn-AllowSystemPorts/Enabled/"); + CreateTurnPort(kTurnUsername, kTurnPassword, kTurnDangerousProtoAddr); + ASSERT_TRUE(turn_port_); +} + } // namespace cricket diff --git a/p2p/base/turn_server.cc b/p2p/base/turn_server.cc index 3a4784ac52..53f283bc96 100644 --- a/p2p/base/turn_server.cc +++ b/p2p/base/turn_server.cc @@ -15,10 +15,10 @@ #include #include "absl/algorithm/container.h" +#include "absl/memory/memory.h" #include "api/packet_socket_factory.h" #include "api/transport/stun.h" #include "p2p/base/async_stun_tcp_socket.h" -#include "rtc_base/bind.h" #include "rtc_base/byte_buffer.h" #include "rtc_base/checks.h" #include "rtc_base/helpers.h" @@ -26,6 +26,7 @@ #include "rtc_base/message_digest.h" #include "rtc_base/socket_adapters.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" namespace cricket { @@ -59,7 +60,7 @@ enum { // Encapsulates a TURN permission. // The object is created when a create permission request is received by an // allocation, and self-deletes when its lifetime timer expires. -class TurnServerAllocation::Permission : public rtc::MessageHandler { +class TurnServerAllocation::Permission : public rtc::MessageHandlerAutoCleanup { public: Permission(rtc::Thread* thread, const rtc::IPAddress& peer); ~Permission() override; @@ -79,7 +80,7 @@ class TurnServerAllocation::Permission : public rtc::MessageHandler { // Encapsulates a TURN channel binding. // The object is created when a channel bind request is received by an // allocation, and self-deletes when its lifetime timer expires. -class TurnServerAllocation::Channel : public rtc::MessageHandler { +class TurnServerAllocation::Channel : public rtc::MessageHandlerAutoCleanup { public: Channel(rtc::Thread* thread, int id, const rtc::SocketAddress& peer); ~Channel() override; @@ -129,7 +130,7 @@ TurnServer::TurnServer(rtc::Thread* thread) enable_otu_nonce_(false) {} TurnServer::~TurnServer() { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); for (InternalSocketMap::iterator it = server_sockets_.begin(); it != server_sockets_.end(); ++it) { rtc::AsyncPacketSocket* socket = it->first; @@ -145,7 +146,7 @@ TurnServer::~TurnServer() { void TurnServer::AddInternalSocket(rtc::AsyncPacketSocket* socket, ProtocolType proto) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(server_sockets_.end() == server_sockets_.find(socket)); server_sockets_[socket] = proto; socket->SignalReadPacket.connect(this, &TurnServer::OnInternalPacket); @@ -153,7 +154,7 @@ void TurnServer::AddInternalSocket(rtc::AsyncPacketSocket* socket, void TurnServer::AddInternalServerSocket(rtc::AsyncSocket* socket, ProtocolType proto) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(server_listen_sockets_.end() == server_listen_sockets_.find(socket)); server_listen_sockets_[socket] = proto; @@ -163,20 +164,19 @@ void TurnServer::AddInternalServerSocket(rtc::AsyncSocket* socket, void TurnServer::SetExternalSocketFactory( rtc::PacketSocketFactory* factory, const rtc::SocketAddress& external_addr) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); external_socket_factory_.reset(factory); external_addr_ = external_addr; } void TurnServer::OnNewInternalConnection(rtc::AsyncSocket* socket) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(server_listen_sockets_.find(socket) != server_listen_sockets_.end()); AcceptConnection(socket); } void TurnServer::AcceptConnection(rtc::AsyncSocket* server_socket) { - RTC_DCHECK(thread_checker_.IsCurrent()); // Check if someone is trying to connect to us. rtc::SocketAddress accept_addr; rtc::AsyncSocket* accepted_socket = server_socket->Accept(&accept_addr); @@ -193,7 +193,7 @@ void TurnServer::AcceptConnection(rtc::AsyncSocket* server_socket) { void TurnServer::OnInternalSocketClose(rtc::AsyncPacketSocket* socket, int err) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); DestroyInternalSocket(socket); } @@ -202,7 +202,7 @@ void TurnServer::OnInternalPacket(rtc::AsyncPacketSocket* socket, size_t size, const rtc::SocketAddress& addr, const int64_t& /* packet_time_us */) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); // Fail if the packet is too small to even contain a channel header. if (size < TURN_CHANNEL_HEADER_SIZE) { return; @@ -229,7 +229,6 @@ void TurnServer::OnInternalPacket(rtc::AsyncPacketSocket* socket, void TurnServer::HandleStunMessage(TurnServerConnection* conn, const char* data, size_t size) { - RTC_DCHECK(thread_checker_.IsCurrent()); TurnMessage msg; rtc::ByteBufferReader buf(data, size); if (!msg.Read(&buf) || (buf.Length() > 0)) { @@ -295,7 +294,6 @@ void TurnServer::HandleStunMessage(TurnServerConnection* conn, } bool TurnServer::GetKey(const StunMessage* msg, std::string* key) { - RTC_DCHECK(thread_checker_.IsCurrent()); const StunByteStringAttribute* username_attr = msg->GetByteString(STUN_ATTR_USERNAME); if (!username_attr) { @@ -307,11 +305,10 @@ bool TurnServer::GetKey(const StunMessage* msg, std::string* key) { } bool TurnServer::CheckAuthorization(TurnServerConnection* conn, - const StunMessage* msg, + StunMessage* msg, const char* data, size_t size, const std::string& key) { - RTC_DCHECK(thread_checker_.IsCurrent()); // RFC 5389, 10.2.2. RTC_DCHECK(IsStunRequestType(msg->type())); const StunByteStringAttribute* mi_attr = @@ -323,14 +320,14 @@ bool TurnServer::CheckAuthorization(TurnServerConnection* conn, const StunByteStringAttribute* nonce_attr = msg->GetByteString(STUN_ATTR_NONCE); - // Fail if no M-I. + // Fail if no MESSAGE_INTEGRITY. if (!mi_attr) { SendErrorResponseWithRealmAndNonce(conn, msg, STUN_ERROR_UNAUTHORIZED, STUN_ERROR_REASON_UNAUTHORIZED); return false; } - // Fail if there is M-I but no username, nonce, or realm. + // Fail if there is MESSAGE_INTEGRITY but no username, nonce, or realm. if (!username_attr || !realm_attr || !nonce_attr) { SendErrorResponse(conn, msg, STUN_ERROR_BAD_REQUEST, STUN_ERROR_REASON_BAD_REQUEST); @@ -344,9 +341,9 @@ bool TurnServer::CheckAuthorization(TurnServerConnection* conn, return false; } - // Fail if bad username or M-I. - // We need |data| and |size| for the call to ValidateMessageIntegrity. - if (key.empty() || !StunMessage::ValidateMessageIntegrity(data, size, key)) { + // Fail if bad MESSAGE_INTEGRITY. + if (key.empty() || msg->ValidateMessageIntegrity(key) != + StunMessage::IntegrityStatus::kIntegrityOk) { SendErrorResponseWithRealmAndNonce(conn, msg, STUN_ERROR_UNAUTHORIZED, STUN_ERROR_REASON_UNAUTHORIZED); return false; @@ -370,7 +367,6 @@ bool TurnServer::CheckAuthorization(TurnServerConnection* conn, void TurnServer::HandleBindingRequest(TurnServerConnection* conn, const StunMessage* req) { - RTC_DCHECK(thread_checker_.IsCurrent()); StunMessage response; InitResponse(req, &response); @@ -385,7 +381,6 @@ void TurnServer::HandleBindingRequest(TurnServerConnection* conn, void TurnServer::HandleAllocateRequest(TurnServerConnection* conn, const TurnMessage* msg, const std::string& key) { - RTC_DCHECK(thread_checker_.IsCurrent()); // Check the parameters in the request. const StunUInt32Attribute* transport_attr = msg->GetUInt32(STUN_ATTR_REQUESTED_TRANSPORT); @@ -415,7 +410,6 @@ void TurnServer::HandleAllocateRequest(TurnServerConnection* conn, } std::string TurnServer::GenerateNonce(int64_t now) const { - RTC_DCHECK(thread_checker_.IsCurrent()); // Generate a nonce of the form hex(now + HMAC-MD5(nonce_key_, now)) std::string input(reinterpret_cast(&now), sizeof(now)); std::string nonce = rtc::hex_encode(input.c_str(), input.size()); @@ -426,7 +420,6 @@ std::string TurnServer::GenerateNonce(int64_t now) const { } bool TurnServer::ValidateNonce(const std::string& nonce) const { - RTC_DCHECK(thread_checker_.IsCurrent()); // Check the size. if (nonce.size() != kNonceSize) { return false; @@ -453,7 +446,6 @@ bool TurnServer::ValidateNonce(const std::string& nonce) const { } TurnServerAllocation* TurnServer::FindAllocation(TurnServerConnection* conn) { - RTC_DCHECK(thread_checker_.IsCurrent()); AllocationMap::const_iterator it = allocations_.find(*conn); return (it != allocations_.end()) ? it->second.get() : nullptr; } @@ -461,7 +453,6 @@ TurnServerAllocation* TurnServer::FindAllocation(TurnServerConnection* conn) { TurnServerAllocation* TurnServer::CreateAllocation(TurnServerConnection* conn, int proto, const std::string& key) { - RTC_DCHECK(thread_checker_.IsCurrent()); rtc::AsyncPacketSocket* external_socket = (external_socket_factory_) ? external_socket_factory_->CreateUdpSocket(external_addr_, 0, 0) @@ -482,7 +473,7 @@ void TurnServer::SendErrorResponse(TurnServerConnection* conn, const StunMessage* req, int code, const std::string& reason) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); TurnMessage resp; InitErrorResponse(req, code, reason, &resp); RTC_LOG(LS_INFO) << "Sending error response, type=" << resp.type() @@ -494,7 +485,6 @@ void TurnServer::SendErrorResponseWithRealmAndNonce(TurnServerConnection* conn, const StunMessage* msg, int code, const std::string& reason) { - RTC_DCHECK(thread_checker_.IsCurrent()); TurnMessage resp; InitErrorResponse(msg, code, reason, &resp); @@ -514,7 +504,6 @@ void TurnServer::SendErrorResponseWithAlternateServer( TurnServerConnection* conn, const StunMessage* msg, const rtc::SocketAddress& addr) { - RTC_DCHECK(thread_checker_.IsCurrent()); TurnMessage resp; InitErrorResponse(msg, STUN_ERROR_TRY_ALTERNATE, STUN_ERROR_REASON_TRY_ALTERNATE_SERVER, &resp); @@ -524,7 +513,7 @@ void TurnServer::SendErrorResponseWithAlternateServer( } void TurnServer::SendStun(TurnServerConnection* conn, StunMessage* msg) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); rtc::ByteBufferWriter buf; // Add a SOFTWARE attribute if one is set. if (!software_.empty()) { @@ -537,13 +526,12 @@ void TurnServer::SendStun(TurnServerConnection* conn, StunMessage* msg) { void TurnServer::Send(TurnServerConnection* conn, const rtc::ByteBufferWriter& buf) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); rtc::PacketOptions options; conn->socket()->SendTo(buf.Data(), buf.Length(), conn->src(), options); } void TurnServer::OnAllocationDestroyed(TurnServerAllocation* allocation) { - RTC_DCHECK(thread_checker_.IsCurrent()); // Removing the internal socket if the connection is not udp. rtc::AsyncPacketSocket* socket = allocation->conn()->socket(); InternalSocketMap::iterator iter = server_sockets_.find(socket); @@ -563,27 +551,21 @@ void TurnServer::OnAllocationDestroyed(TurnServerAllocation* allocation) { } void TurnServer::DestroyInternalSocket(rtc::AsyncPacketSocket* socket) { - RTC_DCHECK(thread_checker_.IsCurrent()); InternalSocketMap::iterator iter = server_sockets_.find(socket); if (iter != server_sockets_.end()) { rtc::AsyncPacketSocket* socket = iter->first; socket->SignalReadPacket.disconnect(this); server_sockets_.erase(iter); + std::unique_ptr socket_to_delete = + absl::WrapUnique(socket); // We must destroy the socket async to avoid invalidating the sigslot // callback list iterator inside a sigslot callback. (In other words, // deleting an object from within a callback from that object). - sockets_to_delete_.push_back( - std::unique_ptr(socket)); - invoker_.AsyncInvoke(RTC_FROM_HERE, rtc::Thread::Current(), - rtc::Bind(&TurnServer::FreeSockets, this)); + thread_->PostTask(webrtc::ToQueuedTask( + [socket_to_delete = std::move(socket_to_delete)] {})); } } -void TurnServer::FreeSockets() { - RTC_DCHECK(thread_checker_.IsCurrent()); - sockets_to_delete_.clear(); -} - TurnServerConnection::TurnServerConnection(const rtc::SocketAddress& src, ProtocolType proto, rtc::AsyncPacketSocket* socket) diff --git a/p2p/base/turn_server.h b/p2p/base/turn_server.h index 0f4fefea84..f90c3dac0d 100644 --- a/p2p/base/turn_server.h +++ b/p2p/base/turn_server.h @@ -19,13 +19,12 @@ #include #include +#include "api/sequence_checker.h" #include "p2p/base/port_interface.h" -#include "rtc_base/async_invoker.h" #include "rtc_base/async_packet_socket.h" #include "rtc_base/socket_address.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" namespace rtc { class ByteBufferWriter; @@ -66,7 +65,7 @@ class TurnServerConnection { // handles TURN messages (via HandleTurnMessage) and channel data messages // (via HandleChannelData) for this allocation when received by the server. // The object self-deletes and informs the server if its lifetime timer expires. -class TurnServerAllocation : public rtc::MessageHandler, +class TurnServerAllocation : public rtc::MessageHandlerAutoCleanup, public sigslot::has_slots<> { public: TurnServerAllocation(TurnServer* server_, @@ -129,8 +128,8 @@ class TurnServerAllocation : public rtc::MessageHandler, void OnChannelDestroyed(Channel* channel); void OnMessage(rtc::Message* msg) override; - TurnServer* server_; - rtc::Thread* thread_; + TurnServer* const server_; + rtc::Thread* const thread_; TurnServerConnection conn_; std::unique_ptr external_socket_; std::string key_; @@ -183,53 +182,53 @@ class TurnServer : public sigslot::has_slots<> { // Gets/sets the realm value to use for the server. const std::string& realm() const { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); return realm_; } void set_realm(const std::string& realm) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); realm_ = realm; } // Gets/sets the value for the SOFTWARE attribute for TURN messages. const std::string& software() const { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); return software_; } void set_software(const std::string& software) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); software_ = software; } const AllocationMap& allocations() const { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); return allocations_; } // Sets the authentication callback; does not take ownership. void set_auth_hook(TurnAuthInterface* auth_hook) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); auth_hook_ = auth_hook; } void set_redirect_hook(TurnRedirectInterface* redirect_hook) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); redirect_hook_ = redirect_hook; } void set_enable_otu_nonce(bool enable) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); enable_otu_nonce_ = enable; } // If set to true, reject CreatePermission requests to RFC1918 addresses. void set_reject_private_addresses(bool filter) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); reject_private_addresses_ = filter; } void set_enable_permission_checks(bool enable) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); enable_permission_checks_ = enable; } @@ -244,18 +243,22 @@ class TurnServer : public sigslot::has_slots<> { const rtc::SocketAddress& address); // For testing only. std::string SetTimestampForNextNonce(int64_t timestamp) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); ts_for_next_nonce_ = timestamp; return GenerateNonce(timestamp); } void SetStunMessageObserver(std::unique_ptr observer) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); stun_message_observer_ = std::move(observer); } private: - std::string GenerateNonce(int64_t now) const; + // All private member functions and variables should have access restricted to + // thread_. But compile-time annotations are missing for members access from + // TurnServerAllocation (via friend declaration), and the On* methods, which + // are called via sigslot. + std::string GenerateNonce(int64_t now) const RTC_RUN_ON(thread_); void OnInternalPacket(rtc::AsyncPacketSocket* socket, const char* data, size_t size, @@ -265,29 +268,32 @@ class TurnServer : public sigslot::has_slots<> { void OnNewInternalConnection(rtc::AsyncSocket* socket); // Accept connections on this server socket. - void AcceptConnection(rtc::AsyncSocket* server_socket); + void AcceptConnection(rtc::AsyncSocket* server_socket) RTC_RUN_ON(thread_); void OnInternalSocketClose(rtc::AsyncPacketSocket* socket, int err); void HandleStunMessage(TurnServerConnection* conn, const char* data, - size_t size); - void HandleBindingRequest(TurnServerConnection* conn, const StunMessage* msg); + size_t size) RTC_RUN_ON(thread_); + void HandleBindingRequest(TurnServerConnection* conn, const StunMessage* msg) + RTC_RUN_ON(thread_); void HandleAllocateRequest(TurnServerConnection* conn, const TurnMessage* msg, - const std::string& key); + const std::string& key) RTC_RUN_ON(thread_); - bool GetKey(const StunMessage* msg, std::string* key); + bool GetKey(const StunMessage* msg, std::string* key) RTC_RUN_ON(thread_); bool CheckAuthorization(TurnServerConnection* conn, - const StunMessage* msg, + StunMessage* msg, const char* data, size_t size, - const std::string& key); - bool ValidateNonce(const std::string& nonce) const; + const std::string& key) RTC_RUN_ON(thread_); + bool ValidateNonce(const std::string& nonce) const RTC_RUN_ON(thread_); - TurnServerAllocation* FindAllocation(TurnServerConnection* conn); + TurnServerAllocation* FindAllocation(TurnServerConnection* conn) + RTC_RUN_ON(thread_); TurnServerAllocation* CreateAllocation(TurnServerConnection* conn, int proto, - const std::string& key); + const std::string& key) + RTC_RUN_ON(thread_); void SendErrorResponse(TurnServerConnection* conn, const StunMessage* req, @@ -297,55 +303,53 @@ class TurnServer : public sigslot::has_slots<> { void SendErrorResponseWithRealmAndNonce(TurnServerConnection* conn, const StunMessage* req, int code, - const std::string& reason); + const std::string& reason) + RTC_RUN_ON(thread_); void SendErrorResponseWithAlternateServer(TurnServerConnection* conn, const StunMessage* req, - const rtc::SocketAddress& addr); + const rtc::SocketAddress& addr) + RTC_RUN_ON(thread_); void SendStun(TurnServerConnection* conn, StunMessage* msg); void Send(TurnServerConnection* conn, const rtc::ByteBufferWriter& buf); - void OnAllocationDestroyed(TurnServerAllocation* allocation); - void DestroyInternalSocket(rtc::AsyncPacketSocket* socket); - - // Just clears |sockets_to_delete_|; called asynchronously. - void FreeSockets(); + void OnAllocationDestroyed(TurnServerAllocation* allocation) + RTC_RUN_ON(thread_); + void DestroyInternalSocket(rtc::AsyncPacketSocket* socket) + RTC_RUN_ON(thread_); typedef std::map InternalSocketMap; typedef std::map ServerSocketMap; - rtc::Thread* thread_; - rtc::ThreadChecker thread_checker_; - std::string nonce_key_; - std::string realm_; - std::string software_; - TurnAuthInterface* auth_hook_; - TurnRedirectInterface* redirect_hook_; + rtc::Thread* const thread_; + const std::string nonce_key_; + std::string realm_ RTC_GUARDED_BY(thread_); + std::string software_ RTC_GUARDED_BY(thread_); + TurnAuthInterface* auth_hook_ RTC_GUARDED_BY(thread_); + TurnRedirectInterface* redirect_hook_ RTC_GUARDED_BY(thread_); // otu - one-time-use. Server will respond with 438 if it's // sees the same nonce in next transaction. - bool enable_otu_nonce_; + bool enable_otu_nonce_ RTC_GUARDED_BY(thread_); bool reject_private_addresses_ = false; // Check for permission when receiving an external packet. bool enable_permission_checks_ = true; - InternalSocketMap server_sockets_; - ServerSocketMap server_listen_sockets_; - // Used when we need to delete a socket asynchronously. - std::vector> sockets_to_delete_; - std::unique_ptr external_socket_factory_; - rtc::SocketAddress external_addr_; - - AllocationMap allocations_; + InternalSocketMap server_sockets_ RTC_GUARDED_BY(thread_); + ServerSocketMap server_listen_sockets_ RTC_GUARDED_BY(thread_); + std::unique_ptr external_socket_factory_ + RTC_GUARDED_BY(thread_); + rtc::SocketAddress external_addr_ RTC_GUARDED_BY(thread_); - rtc::AsyncInvoker invoker_; + AllocationMap allocations_ RTC_GUARDED_BY(thread_); // For testing only. If this is non-zero, the next NONCE will be generated // from this value, and it will be reset to 0 after generating the NONCE. - int64_t ts_for_next_nonce_ = 0; + int64_t ts_for_next_nonce_ RTC_GUARDED_BY(thread_) = 0; // For testing only. Used to observe STUN messages received. - std::unique_ptr stun_message_observer_; + std::unique_ptr stun_message_observer_ + RTC_GUARDED_BY(thread_); friend class TurnServerAllocation; }; diff --git a/p2p/client/basic_port_allocator.cc b/p2p/client/basic_port_allocator.cc index bb640d9498..1d38a4c19f 100644 --- a/p2p/client/basic_port_allocator.cc +++ b/p2p/client/basic_port_allocator.cc @@ -12,12 +12,14 @@ #include #include +#include #include #include #include #include #include "absl/algorithm/container.h" +#include "absl/memory/memory.h" #include "p2p/base/basic_packet_socket_factory.h" #include "p2p/base/port.h" #include "p2p/base/stun_port.h" @@ -27,6 +29,8 @@ #include "rtc_base/checks.h" #include "rtc_base/helpers.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" @@ -35,15 +39,6 @@ using rtc::CreateRandomId; namespace cricket { namespace { -enum { - MSG_CONFIG_START, - MSG_CONFIG_READY, - MSG_ALLOCATE, - MSG_ALLOCATION_PHASE, - MSG_SEQUENCEOBJECTS_CREATED, - MSG_CONFIG_STOP, -}; - const int PHASE_UDP = 0; const int PHASE_RELAY = 1; const int PHASE_TCP = 2; @@ -268,16 +263,18 @@ BasicPortAllocatorSession::BasicPortAllocatorSession( network_manager_started_(false), allocation_sequences_created_(false), turn_port_prune_policy_(allocator->turn_port_prune_policy()) { + TRACE_EVENT0("webrtc", + "BasicPortAllocatorSession::BasicPortAllocatorSession"); allocator_->network_manager()->SignalNetworksChanged.connect( this, &BasicPortAllocatorSession::OnNetworksChanged); allocator_->network_manager()->StartUpdating(); } BasicPortAllocatorSession::~BasicPortAllocatorSession() { + TRACE_EVENT0("webrtc", + "BasicPortAllocatorSession::~BasicPortAllocatorSession"); RTC_DCHECK_RUN_ON(network_thread_); allocator_->network_manager()->StopUpdating(); - if (network_thread_ != NULL) - network_thread_->Clear(this); for (uint32_t i = 0; i < sequences_.size(); ++i) { // AllocationSequence should clear it's map entry for turn ports before @@ -289,8 +286,7 @@ BasicPortAllocatorSession::~BasicPortAllocatorSession() { for (it = ports_.begin(); it != ports_.end(); it++) delete it->port(); - for (uint32_t i = 0; i < configs_.size(); ++i) - delete configs_[i]; + configs_.clear(); for (uint32_t i = 0; i < sequences_.size(); ++i) delete sequences_[i]; @@ -370,7 +366,8 @@ void BasicPortAllocatorSession::StartGettingPorts() { socket_factory_ = owned_socket_factory_.get(); } - network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_START); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this] { GetPortConfigurations(); })); RTC_LOG(LS_INFO) << "Start getting ports with turn_port_prune_policy " << turn_port_prune_policy_; @@ -386,11 +383,12 @@ void BasicPortAllocatorSession::StopGettingPorts() { void BasicPortAllocatorSession::ClearGettingPorts() { RTC_DCHECK_RUN_ON(network_thread_); - network_thread_->Clear(this, MSG_ALLOCATE); + ++allocation_epoch_; for (uint32_t i = 0; i < sequences_.size(); ++i) { sequences_[i]->Stop(); } - network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_STOP); + network_thread_->PostTask( + webrtc::ToQueuedTask(network_safety_, [this] { OnConfigStop(); })); state_ = SessionState::CLEARED; } @@ -489,8 +487,10 @@ void BasicPortAllocatorSession::GetCandidateStatsFromReadyPorts( for (auto* port : ports) { auto candidates = port->Candidates(); for (const auto& candidate : candidates) { - CandidateStats candidate_stats(allocator_->SanitizeCandidate(candidate)); - port->GetStunStats(&candidate_stats.stun_stats); + absl::optional stun_stats; + port->GetStunStats(&stun_stats); + CandidateStats candidate_stats(allocator_->SanitizeCandidate(candidate), + std::move(stun_stats)); candidate_stats_list->push_back(std::move(candidate_stats)); } } @@ -574,28 +574,6 @@ bool BasicPortAllocatorSession::CandidatesAllocationDone() const { ports_, [](const PortData& port) { return port.inprogress(); }); } -void BasicPortAllocatorSession::OnMessage(rtc::Message* message) { - switch (message->message_id) { - case MSG_CONFIG_START: - GetPortConfigurations(); - break; - case MSG_CONFIG_READY: - OnConfigReady(static_cast(message->pdata)); - break; - case MSG_ALLOCATE: - OnAllocate(); - break; - case MSG_SEQUENCEOBJECTS_CREATED: - OnAllocationSequenceObjectsCreated(); - break; - case MSG_CONFIG_STOP: - OnConfigStop(); - break; - default: - RTC_NOTREACHED(); - } -} - void BasicPortAllocatorSession::UpdateIceParametersInternal() { RTC_DCHECK_RUN_ON(network_thread_); for (PortData& port : ports_) { @@ -607,26 +585,35 @@ void BasicPortAllocatorSession::UpdateIceParametersInternal() { void BasicPortAllocatorSession::GetPortConfigurations() { RTC_DCHECK_RUN_ON(network_thread_); - PortConfiguration* config = - new PortConfiguration(allocator_->stun_servers(), username(), password()); + auto config = std::make_unique(allocator_->stun_servers(), + username(), password()); for (const RelayServerConfig& turn_server : allocator_->turn_servers()) { config->AddRelay(turn_server); } - ConfigReady(config); + ConfigReady(std::move(config)); } void BasicPortAllocatorSession::ConfigReady(PortConfiguration* config) { RTC_DCHECK_RUN_ON(network_thread_); - network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_READY, config); + ConfigReady(absl::WrapUnique(config)); +} + +void BasicPortAllocatorSession::ConfigReady( + std::unique_ptr config) { + RTC_DCHECK_RUN_ON(network_thread_); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this, config = std::move(config)]() mutable { + OnConfigReady(std::move(config)); + })); } // Adds a configuration to the list. -void BasicPortAllocatorSession::OnConfigReady(PortConfiguration* config) { +void BasicPortAllocatorSession::OnConfigReady( + std::unique_ptr config) { RTC_DCHECK_RUN_ON(network_thread_); - if (config) { - configs_.push_back(config); - } + if (config) + configs_.push_back(std::move(config)); AllocatePorts(); } @@ -664,11 +651,16 @@ void BasicPortAllocatorSession::OnConfigStop() { void BasicPortAllocatorSession::AllocatePorts() { RTC_DCHECK_RUN_ON(network_thread_); - network_thread_->Post(RTC_FROM_HERE, this, MSG_ALLOCATE); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this, allocation_epoch = allocation_epoch_] { + OnAllocate(allocation_epoch); + })); } -void BasicPortAllocatorSession::OnAllocate() { +void BasicPortAllocatorSession::OnAllocate(int allocation_epoch) { RTC_DCHECK_RUN_ON(network_thread_); + if (allocation_epoch != allocation_epoch_) + return; if (network_manager_started_ && !IsStopped()) { bool disable_equivalent_phases = true; @@ -774,7 +766,8 @@ void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent) { done_signal_needed = true; } else { RTC_LOG(LS_INFO) << "Allocate ports on " << networks.size() << " networks"; - PortConfiguration* config = configs_.empty() ? nullptr : configs_.back(); + PortConfiguration* config = + configs_.empty() ? nullptr : configs_.back().get(); for (uint32_t i = 0; i < networks.size(); ++i) { uint32_t sequence_flags = flags(); if ((sequence_flags & DISABLE_ALL_PHASES) == DISABLE_ALL_PHASES) { @@ -814,9 +807,11 @@ void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent) { } AllocationSequence* sequence = - new AllocationSequence(this, networks[i], config, sequence_flags); - sequence->SignalPortAllocationComplete.connect( - this, &BasicPortAllocatorSession::OnPortAllocationComplete); + new AllocationSequence(this, networks[i], config, sequence_flags, + [this, safety_flag = network_safety_.flag()] { + if (safety_flag->alive()) + OnPortAllocationComplete(); + }); sequence->Init(); sequence->Start(); sequences_.push_back(sequence); @@ -824,7 +819,8 @@ void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent) { } } if (done_signal_needed) { - network_thread_->Post(RTC_FROM_HERE, this, MSG_SEQUENCEOBJECTS_CREATED); + network_thread_->PostTask(webrtc::ToQueuedTask( + network_safety_, [this] { OnAllocationSequenceObjectsCreated(); })); } } @@ -900,8 +896,9 @@ void BasicPortAllocatorSession::AddAllocatedPort(Port* port, this, &BasicPortAllocatorSession::OnCandidateError); port->SignalPortComplete.connect(this, &BasicPortAllocatorSession::OnPortComplete); - port->SignalDestroyed.connect(this, - &BasicPortAllocatorSession::OnPortDestroyed); + port->SubscribePortDestroyed( + [this](PortInterface* port) { OnPortDestroyed(port); }); + port->SignalPortError.connect(this, &BasicPortAllocatorSession::OnPortError); RTC_LOG(LS_INFO) << port->ToString() << ": Added port to allocator"; @@ -1127,8 +1124,7 @@ bool BasicPortAllocatorSession::CandidatePairable(const Candidate& c, !host_candidates_disabled); } -void BasicPortAllocatorSession::OnPortAllocationComplete( - AllocationSequence* seq) { +void BasicPortAllocatorSession::OnPortAllocationComplete() { RTC_DCHECK_RUN_ON(network_thread_); // Send candidate allocation complete signal if all ports are done. MaybeSignalCandidatesAllocationDone(); @@ -1219,10 +1215,12 @@ void BasicPortAllocatorSession::PrunePortsAndRemoveCandidates( // AllocationSequence -AllocationSequence::AllocationSequence(BasicPortAllocatorSession* session, - rtc::Network* network, - PortConfiguration* config, - uint32_t flags) +AllocationSequence::AllocationSequence( + BasicPortAllocatorSession* session, + rtc::Network* network, + PortConfiguration* config, + uint32_t flags, + std::function port_allocation_complete_callback) : session_(session), network_(network), config_(config), @@ -1230,7 +1228,9 @@ AllocationSequence::AllocationSequence(BasicPortAllocatorSession* session, flags_(flags), udp_socket_(), udp_port_(NULL), - phase_(0) {} + phase_(0), + port_allocation_complete_callback_( + std::move(port_allocation_complete_callback)) {} void AllocationSequence::Init() { if (IsFlagSet(PORTALLOCATOR_ENABLE_SHARED_SOCKET)) { @@ -1247,6 +1247,7 @@ void AllocationSequence::Init() { } void AllocationSequence::Clear() { + TRACE_EVENT0("webrtc", "AllocationSequence::Clear"); udp_port_ = NULL; relay_ports_.clear(); } @@ -1258,10 +1259,6 @@ void AllocationSequence::OnNetworkFailed() { Stop(); } -AllocationSequence::~AllocationSequence() { - session_->network_thread()->Clear(this); -} - void AllocationSequence::DisableEquivalentPhases(rtc::Network* network, PortConfiguration* config, uint32_t* flags) { @@ -1336,7 +1333,9 @@ void AllocationSequence::DisableEquivalentPhases(rtc::Network* network, void AllocationSequence::Start() { state_ = kRunning; - session_->network_thread()->Post(RTC_FROM_HERE, this, MSG_ALLOCATION_PHASE); + + session_->network_thread()->PostTask(webrtc::ToQueuedTask( + safety_, [this, epoch = epoch_] { Process(epoch); })); // Take a snapshot of the best IP, so that when DisableEquivalentPhases is // called next time, we enable all phases if the best IP has since changed. previous_best_ip_ = network_->GetBestIP(); @@ -1346,16 +1345,18 @@ void AllocationSequence::Stop() { // If the port is completed, don't set it to stopped. if (state_ == kRunning) { state_ = kStopped; - session_->network_thread()->Clear(this, MSG_ALLOCATION_PHASE); + // Cause further Process calls in the previous epoch to be ignored. + ++epoch_; } } -void AllocationSequence::OnMessage(rtc::Message* msg) { +void AllocationSequence::Process(int epoch) { RTC_DCHECK(rtc::Thread::Current() == session_->network_thread()); - RTC_DCHECK(msg->message_id == MSG_ALLOCATION_PHASE); - const char* const PHASE_NAMES[kNumPhases] = {"Udp", "Relay", "Tcp"}; + if (epoch != epoch_) + return; + // Perform all of the phases in the current step. RTC_LOG(LS_INFO) << network_->ToString() << ": Allocation Phase=" << PHASE_NAMES[phase_]; @@ -1381,14 +1382,16 @@ void AllocationSequence::OnMessage(rtc::Message* msg) { if (state() == kRunning) { ++phase_; - session_->network_thread()->PostDelayed(RTC_FROM_HERE, - session_->allocator()->step_delay(), - this, MSG_ALLOCATION_PHASE); + session_->network_thread()->PostDelayedTask( + webrtc::ToQueuedTask(safety_, + [this, epoch = epoch_] { Process(epoch); }), + session_->allocator()->step_delay()); } else { - // If all phases in AllocationSequence are completed, no allocation - // steps needed further. Canceling pending signal. - session_->network_thread()->Clear(this, MSG_ALLOCATION_PHASE); - SignalPortAllocationComplete(this); + // No allocation steps needed further if all phases in AllocationSequence + // are completed. Cause further Process calls in the previous epoch to be + // ignored. + ++epoch_; + port_allocation_complete_callback_(); } } @@ -1423,7 +1426,8 @@ void AllocationSequence::CreateUDPPorts() { // UDPPort. if (IsFlagSet(PORTALLOCATOR_ENABLE_SHARED_SOCKET)) { udp_port_ = port.get(); - port->SignalDestroyed.connect(this, &AllocationSequence::OnPortDestroyed); + port->SubscribePortDestroyed( + [this](PortInterface* port) { OnPortDestroyed(port); }); // If STUN is not disabled, setting stun server address to port. if (!IsFlagSet(PORTALLOCATOR_DISABLE_STUN)) { @@ -1561,8 +1565,10 @@ void AllocationSequence::CreateTurnPort(const RelayServerConfig& config) { relay_ports_.push_back(port.get()); // Listen to the port destroyed signal, to allow AllocationSequence to - // remove entrt from it's map. - port->SignalDestroyed.connect(this, &AllocationSequence::OnPortDestroyed); + // remove the entry from it's map. + port->SubscribePortDestroyed( + [this](PortInterface* port) { OnPortDestroyed(port); }); + } else { port = session_->allocator()->relay_port_factory()->Create( args, session_->allocator()->min_port(), @@ -1653,8 +1659,6 @@ PortConfiguration::PortConfiguration(const ServerAddresses& stun_servers, webrtc::field_trial::IsDisabled("WebRTC-UseTurnServerAsStunServer"); } -PortConfiguration::~PortConfiguration() = default; - ServerAddresses PortConfiguration::StunServers() { if (!stun_address.IsNil() && stun_servers.find(stun_address) == stun_servers.end()) { diff --git a/p2p/client/basic_port_allocator.h b/p2p/client/basic_port_allocator.h index b27016a1dc..77aceb1e9c 100644 --- a/p2p/client/basic_port_allocator.h +++ b/p2p/client/basic_port_allocator.h @@ -22,7 +22,9 @@ #include "rtc_base/checks.h" #include "rtc_base/network.h" #include "rtc_base/system/rtc_export.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace cricket { @@ -106,8 +108,9 @@ enum class SessionState { // process will be started. }; -class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, - public rtc::MessageHandler { +// This class is thread-compatible and assumes it's created, operated upon and +// destroyed on the network thread. +class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession { public: BasicPortAllocatorSession(BasicPortAllocator* allocator, const std::string& content_name, @@ -155,10 +158,11 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, // Adds a port configuration that is now ready. Once we have one for each // network (or a timeout occurs), we will start allocating ports. - virtual void ConfigReady(PortConfiguration* config); - - // MessageHandler. Can be overriden if message IDs do not conflict. - void OnMessage(rtc::Message* message) override; + void ConfigReady(std::unique_ptr config); + // TODO(bugs.webrtc.org/12840) Remove once unused in downstream projects. + ABSL_DEPRECATED( + "Use ConfigReady(std::unique_ptr) instead!") + void ConfigReady(PortConfiguration* config); private: class PortData { @@ -213,10 +217,10 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, State state_ = STATE_INPROGRESS; }; - void OnConfigReady(PortConfiguration* config); + void OnConfigReady(std::unique_ptr config); void OnConfigStop(); void AllocatePorts(); - void OnAllocate(); + void OnAllocate(int allocation_epoch); void DoAllocate(bool disable_equivalent_phases); void OnNetworksChanged(); void OnAllocationSequenceObjectsCreated(); @@ -233,7 +237,7 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, void OnProtocolEnabled(AllocationSequence* seq, ProtocolType proto); void OnPortDestroyed(PortInterface* port); void MaybeSignalCandidatesAllocationDone(); - void OnPortAllocationComplete(AllocationSequence* seq); + void OnPortAllocationComplete(); PortData* FindPort(Port* port); std::vector GetNetworks(); std::vector GetFailedNetworks(); @@ -266,7 +270,7 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, bool allocation_started_; bool network_manager_started_; bool allocation_sequences_created_; - std::vector configs_; + std::vector> configs_; std::vector sequences_; std::vector ports_; std::vector candidate_error_events_; @@ -274,13 +278,15 @@ class RTC_EXPORT BasicPortAllocatorSession : public PortAllocatorSession, // Policy on how to prune turn ports, taken from the port allocator. webrtc::PortPrunePolicy turn_port_prune_policy_; SessionState state_ = SessionState::CLEARED; + int allocation_epoch_ RTC_GUARDED_BY(network_thread_) = 0; + webrtc::ScopedTaskSafety network_safety_; friend class AllocationSequence; }; // Records configuration information useful in creating ports. // TODO(deadbeef): Rename "relay" to "turn_server" in this struct. -struct RTC_EXPORT PortConfiguration : public rtc::MessageData { +struct RTC_EXPORT PortConfiguration { // TODO(jiayl): remove |stun_address| when Chrome is updated. rtc::SocketAddress stun_address; ServerAddresses stun_servers; @@ -300,8 +306,6 @@ struct RTC_EXPORT PortConfiguration : public rtc::MessageData { const std::string& username, const std::string& password); - ~PortConfiguration() override; - // Returns addresses of both the explicitly configured STUN servers, // and TURN servers that should be used as STUN servers. ServerAddresses StunServers(); @@ -323,8 +327,8 @@ class TurnPort; // Performs the allocation of ports, in a sequenced (timed) manner, for a given // network and IP address. -class AllocationSequence : public rtc::MessageHandler, - public sigslot::has_slots<> { +// This class is thread-compatible. +class AllocationSequence : public sigslot::has_slots<> { public: enum State { kInit, // Initial state. @@ -334,11 +338,18 @@ class AllocationSequence : public rtc::MessageHandler, // kInit --> kRunning --> {kCompleted|kStopped} }; + // |port_allocation_complete_callback| is called when AllocationSequence is + // done with allocating ports. This signal is useful when port allocation + // fails which doesn't result in any candidates. Using this signal + // BasicPortAllocatorSession can send its candidate discovery conclusion + // signal. Without this signal, BasicPortAllocatorSession doesn't have any + // event to trigger signal. This can also be achieved by starting a timer in + // BPAS, but this is less deterministic. AllocationSequence(BasicPortAllocatorSession* session, rtc::Network* network, PortConfiguration* config, - uint32_t flags); - ~AllocationSequence() override; + uint32_t flags, + std::function port_allocation_complete_callback); void Init(); void Clear(); void OnNetworkFailed(); @@ -360,17 +371,6 @@ class AllocationSequence : public rtc::MessageHandler, void Start(); void Stop(); - // MessageHandler - void OnMessage(rtc::Message* msg) override; - - // Signal from AllocationSequence, when it's done with allocating ports. - // This signal is useful, when port allocation fails which doesn't result - // in any candidates. Using this signal BasicPortAllocatorSession can send - // its candidate discovery conclusion signal. Without this signal, - // BasicPortAllocatorSession doesn't have any event to trigger signal. This - // can also be achieved by starting timer in BPAS. - sigslot::signal1 SignalPortAllocationComplete; - protected: // For testing. void CreateTurnPort(const RelayServerConfig& config); @@ -378,6 +378,7 @@ class AllocationSequence : public rtc::MessageHandler, private: typedef std::vector ProtocolList; + void Process(int epoch); bool IsFlagSet(uint32_t flag) { return ((flags_ & flag) != 0); } void CreateUDPPorts(); void CreateTCPPorts(); @@ -406,6 +407,12 @@ class AllocationSequence : public rtc::MessageHandler, UDPPort* udp_port_; std::vector relay_ports_; int phase_; + std::function port_allocation_complete_callback_; + // This counter is sampled and passed together with tasks when tasks are + // posted. If the sampled counter doesn't match |epoch_| on reception, the + // posted task is ignored. + int epoch_ = 0; + webrtc::ScopedTaskSafety safety_; }; } // namespace cricket diff --git a/p2p/client/basic_port_allocator_unittest.cc b/p2p/client/basic_port_allocator_unittest.cc index 5393321f79..fa1f49c9b1 100644 --- a/p2p/client/basic_port_allocator_unittest.cc +++ b/p2p/client/basic_port_allocator_unittest.cc @@ -152,7 +152,7 @@ class BasicPortAllocatorTestBase : public ::testing::Test, // must be called. nat_factory_(vss_.get(), kNatUdpAddr, kNatTcpAddr), nat_socket_factory_(new rtc::BasicPacketSocketFactory(&nat_factory_)), - stun_server_(TestStunServer::Create(rtc::Thread::Current(), kStunAddr)), + stun_server_(TestStunServer::Create(fss_.get(), kStunAddr)), turn_server_(rtc::Thread::Current(), kTurnUdpIntAddr, kTurnUdpExtAddr), candidate_allocation_done_(false) { ServerAddresses stun_servers; @@ -1058,7 +1058,6 @@ TEST_F(BasicPortAllocatorTest, TestSameNetworkDownAndUpWhenSessionNotStopped) { AddInterface(kClientAddr, if_name); ASSERT_TRUE_SIMULATED_WAIT(candidate_allocation_done_, kDefaultAllocationTimeout, fake_clock); - // TODO(nisse): Needs fixing, test fails with sizes == 0. EXPECT_EQ(3U, candidates_.size()); EXPECT_EQ(3U, ports_.size()); } diff --git a/p2p/client/turn_port_factory.cc b/p2p/client/turn_port_factory.cc index de4b9e6a09..fd3420c016 100644 --- a/p2p/client/turn_port_factory.cc +++ b/p2p/client/turn_port_factory.cc @@ -28,6 +28,8 @@ std::unique_ptr TurnPortFactory::Create( args.username, args.password, *args.server_address, args.config->credentials, args.config->priority, args.origin, args.turn_customizer); + if (!port) + return nullptr; port->SetTlsCertPolicy(args.config->tls_cert_policy); port->SetTurnLoggingId(args.config->turn_logging_id); return std::move(port); @@ -42,6 +44,8 @@ std::unique_ptr TurnPortFactory::Create(const CreateRelayPortArgs& args, args.config->credentials, args.config->priority, args.origin, args.config->tls_alpn_protocols, args.config->tls_elliptic_curves, args.turn_customizer, args.config->tls_cert_verifier); + if (!port) + return nullptr; port->SetTlsCertPolicy(args.config->tls_cert_policy); port->SetTurnLoggingId(args.config->turn_logging_id); return std::move(port); diff --git a/p2p/g3doc/ice.md b/p2p/g3doc/ice.md new file mode 100644 index 0000000000..be81ff9e22 --- /dev/null +++ b/p2p/g3doc/ice.md @@ -0,0 +1,102 @@ +# ICE + + + + +## Overview + +ICE ([link](https://developer.mozilla.org/en-US/docs/Glossary/ICE)) provides +unreliable packet transport between two clients (p2p) or between a client and a +server. + +This documentation provides an overview of how ICE is implemented, i.e how the +following classes interact. + +* [`cricket::IceTransportInternal`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/ice_transport_internal.h;l=225;drc=8cb97062880b0e0a78f9d578370a01aced81a13f) - + is the interface that does ICE (manage ports, candidates, connections to + send/receive packets). The interface is implemented by + [`cricket::P2PTransportChannel`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/p2p_transport_channel.h;l=103;drc=0ccfbd2de7bc3b237a0f8c30f48666c97b9e5523). + +* [`cricket::PortInterface`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/port_interface.h;l=47;drc=c3a486c41e682cce943f2b20fe987c9421d4b631) + Represents a local communication mechanism that can be used to create + connections to similar mechanisms of the other client. There are 4 + implementations of `cricket::PortInterface` + [`cricket::UDPPort`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/stun_port.h;l=33;drc=a4d873786f10eedd72de25ad0d94ad7c53c1f68a), + [`cricket::StunPort`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/stun_port.h;l=265;drc=a4d873786f10eedd72de25ad0d94ad7c53c1f68a), + [`cricket::TcpPort`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/tcp_port.h;l=33;drc=7a284e1614a38286477ed2334ecbdde78e87b79c) + and + [`cricket::TurnPort`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/turn_port.h;l=44;drc=ffb7603b6025fbd6e79f360d293ab49092bded54). + The ports share lots of functionality in a base class, + [`cricket::Port`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/port.h;l=187;drc=3ba7beba29c4e542c4a9bffcc5a47d5e911865be). + +* [`cricket::Candidate`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/candidate.h;l=30;drc=10542f21c8e4e2d60b136fab45338f2b1e132dde) + represents an address discovered by a `cricket::Port`. A candidate can be + local (i.e discovered by a local port) or remote. Remote candidates are + transported using signaling, i.e outside of webrtc. There are 4 types of + candidates: `local`, `stun`, `prflx` or `relay` + ([standard](https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidateType)) + +* [`cricket::Connection`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/connection.h) + provides the management of a `cricket::CandidatePair`, i.e for sending data + between two candidates. It sends STUN Binding requests (aka STUN pings) to + verify that packets can traverse back and forth and keep connections alive + (both that NAT binding is kept, and that the remote peer still wants the + connection to remain open). + +* `cricket::P2PTransportChannel` uses an + [`cricket::PortAllocator`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/port_allocator.h;l=335;drc=9438fb3fff97c803d1ead34c0e4f223db168526f) + to create ports and discover local candidates. The `cricket::PortAllocator` + is implemented by + [`cricket::BasicPortAllocator`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/client/basic_port_allocator.h;l=29;drc=e27f3dea8293884701283a54f90f8a429ea99505). + +* `cricket::P2PTransportChannel` uses an + [`cricket::IceControllerInterface`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/ice_controller_interface.h;l=73;drc=9438fb3fff97c803d1ead34c0e4f223db168526f) + to manage a set of connections. The `cricket::IceControllerInterface` + decides which `cricket::Connection` to send data on. + +## Connection establishment + +This section describes a normal sequence of interactions to establish ice state +completed +[ link ](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/ice_transport_internal.h;l=208;drc=9438fb3fff97c803d1ead34c0e4f223db168526f) +([ standard ](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/iceConnectionState)) + +All of these steps are invoked by interactions with `PeerConnection`. + +1. [`P2PTransportChannel::MaybeStartGathering`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/p2p_transport_channel.cc;l=864;drc=0ccfbd2de7bc3b237a0f8c30f48666c97b9e5523) + This function is invoked as part of `PeerConnection::SetLocalDescription`. + `P2PTransportChannel` will use the `cricket::PortAllocator` to create a + `cricket::PortAllocatorSession`. The `cricket::PortAllocatorSession` will + create local ports as configured, and the ports will start gathering + candidates. + +2. [`IceTransportInternal::SignalCandidateGathered`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/ice_transport_internal.h;l=293;drc=8cb97062880b0e0a78f9d578370a01aced81a13f) + When a port finds a local candidate, it will be added to a list on + `cricket::P2PTransportChannel` and signaled to application using + `IceTransportInternal::SignalCandidateGathered`. A p2p application can then + send them to peer using favorite transport mechanism whereas a client-server + application will do nothing. + +3. [`P2PTransportChannel::AddRemoteCandidate`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/p2p_transport_channel.cc;l=1233;drc=0ccfbd2de7bc3b237a0f8c30f48666c97b9e5523) + When the application get a remote candidate, it can add it using + `PeerConnection::AddRemoteCandidate` (after + `PeerConnection::SetRemoteDescription` has been called!), this will trickle + down to `P2PTransportChannel::AddRemoteCandidate`. `P2PTransportChannel` + will combine the remote candidate with all compatible local candidates to + form new `cricket::Connection`(s). Candidates are compatible if it is + possible to send/receive data (e.g ipv4 can only send to ipv4, tcp can only + connect to tcp etc...) The newly formed `cricket::Connection`(s) will be + added to the `cricket::IceController` that will decide which + `cricket::Connection` to send STUN ping on. + +4. [`P2PTransportChannel::SignalCandidatePairChanged`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/ice_transport_internal.h;l=310;drc=8cb97062880b0e0a78f9d578370a01aced81a13f) + When a remote connection replies to a STUN ping, `cricket::IceController` + will instruct `P2PTransportChannel` to use the connection. This is signalled + up the stack using `P2PTransportChannel::SignalCandidatePairChanged`. Note + that `cricket::IceController` will continue to send STUN pings on the + selected connection, as well as other connections. + +5. [`P2PTransportChannel::SignalIceTransportStateChanged`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/ice_transport_internal.h;l=323;drc=8cb97062880b0e0a78f9d578370a01aced81a13f) + The initial selection of a connection makes `P2PTransportChannel` signal up + stack that state has changed, which may make [`cricket::DtlsTransportInternal`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport_internal.h;l=63;drc=653bab6790ac92c513b7cf4cd3ad59039c589a95) + initiate a DTLS handshake (depending on the DTLS role). diff --git a/p2p/stunprober/stun_prober.cc b/p2p/stunprober/stun_prober.cc index f37f24994a..d85d5f27ea 100644 --- a/p2p/stunprober/stun_prober.cc +++ b/p2p/stunprober/stun_prober.cc @@ -20,11 +20,11 @@ #include "api/transport/stun.h" #include "rtc_base/async_packet_socket.h" #include "rtc_base/async_resolver_interface.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/helpers.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" #include "rtc_base/time_utils.h" @@ -104,7 +104,7 @@ class StunProber::Requester : public sigslot::has_slots<> { int16_t num_request_sent_ = 0; int16_t num_response_received_ = 0; - rtc::ThreadChecker& thread_checker_; + webrtc::SequenceChecker& thread_checker_; RTC_DISALLOW_COPY_AND_ASSIGN(Requester); }; @@ -262,6 +262,7 @@ StunProber::StunProber(rtc::PacketSocketFactory* socket_factory, networks_(networks) {} StunProber::~StunProber() { + RTC_DCHECK(thread_checker_.IsCurrent()); for (auto* req : requesters_) { if (req) { delete req; @@ -358,9 +359,8 @@ void StunProber::OnServerResolved(rtc::AsyncResolverInterface* resolver) { // Deletion of AsyncResolverInterface can't be done in OnResolveResult which // handles SignalDone. - invoker_.AsyncInvoke( - RTC_FROM_HERE, thread_, - rtc::Bind(&rtc::AsyncResolverInterface::Destroy, resolver, false)); + thread_->PostTask( + webrtc::ToQueuedTask([resolver] { resolver->Destroy(false); })); servers_.pop_back(); if (servers_.size()) { @@ -453,13 +453,14 @@ int StunProber::get_wake_up_interval_ms() { } void StunProber::MaybeScheduleStunRequests() { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(thread_); int64_t now = rtc::TimeMillis(); if (Done()) { - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, thread_, - rtc::Bind(&StunProber::ReportOnFinished, this, SUCCESS), timeout_ms_); + thread_->PostDelayedTask( + webrtc::ToQueuedTask(task_safety_.flag(), + [this] { ReportOnFinished(SUCCESS); }), + timeout_ms_); return; } if (should_send_next_request(now)) { @@ -469,9 +470,9 @@ void StunProber::MaybeScheduleStunRequests() { } next_request_time_ms_ = now + interval_ms_; } - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, thread_, - rtc::Bind(&StunProber::MaybeScheduleStunRequests, this), + thread_->PostDelayedTask( + webrtc::ToQueuedTask(task_safety_.flag(), + [this] { MaybeScheduleStunRequests(); }), get_wake_up_interval_ms()); } diff --git a/p2p/stunprober/stun_prober.h b/p2p/stunprober/stun_prober.h index a739a6c98b..43d84ff806 100644 --- a/p2p/stunprober/stun_prober.h +++ b/p2p/stunprober/stun_prober.h @@ -15,16 +15,15 @@ #include #include -#include "rtc_base/async_invoker.h" +#include "api/sequence_checker.h" #include "rtc_base/byte_buffer.h" -#include "rtc_base/callback.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ip_address.h" #include "rtc_base/network.h" #include "rtc_base/socket_address.h" #include "rtc_base/system/rtc_export.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" namespace rtc { class AsyncPacketSocket; @@ -40,7 +39,7 @@ class StunProber; static const int kMaxUdpBufferSize = 1200; -typedef rtc::Callback2 AsyncCallback; +typedef std::function AsyncCallback; enum NatType { NATTYPE_INVALID, @@ -227,15 +226,13 @@ class RTC_EXPORT StunProber : public sigslot::has_slots<> { // The set of STUN probe sockets and their state. std::vector requesters_; - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; // Temporary storage for created sockets. std::vector sockets_; // This tracks how many of the sockets are ready. size_t total_ready_sockets_ = 0; - rtc::AsyncInvoker invoker_; - Observer* observer_ = nullptr; // TODO(guoweis): Remove this once all dependencies move away from // AsyncCallback. @@ -243,6 +240,8 @@ class RTC_EXPORT StunProber : public sigslot::has_slots<> { rtc::NetworkManager::NetworkList networks_; + webrtc::ScopedTaskSafety task_safety_; + RTC_DISALLOW_COPY_AND_ASSIGN(StunProber); }; diff --git a/p2p/stunprober/stun_prober_unittest.cc b/p2p/stunprober/stun_prober_unittest.cc index 91e2aac2a0..e098ec6f48 100644 --- a/p2p/stunprober/stun_prober_unittest.cc +++ b/p2p/stunprober/stun_prober_unittest.cc @@ -43,10 +43,8 @@ class StunProberTest : public ::testing::Test { : ss_(new rtc::VirtualSocketServer()), main_(ss_.get()), result_(StunProber::SUCCESS), - stun_server_1_(cricket::TestStunServer::Create(rtc::Thread::Current(), - kStunAddr1)), - stun_server_2_(cricket::TestStunServer::Create(rtc::Thread::Current(), - kStunAddr2)) { + stun_server_1_(cricket::TestStunServer::Create(ss_.get(), kStunAddr1)), + stun_server_2_(cricket::TestStunServer::Create(ss_.get(), kStunAddr2)) { stun_server_1_->set_fake_stun_addr(kStunMappedAddr); stun_server_2_->set_fake_stun_addr(kStunMappedAddr); rtc::InitializeSSL(); diff --git a/pc/BUILD.gn b/pc/BUILD.gn index c5223b10c4..460462e54a 100644 --- a/pc/BUILD.gn +++ b/pc/BUILD.gn @@ -19,10 +19,24 @@ group("pc") { config("rtc_pc_config") { defines = [] if (rtc_enable_sctp) { - defines += [ "HAVE_SCTP" ] + defines += [ "WEBRTC_HAVE_SCTP" ] } } +rtc_library("proxy") { + sources = [ + "proxy.cc", + "proxy.h", + ] + deps = [ + "../api:scoped_refptr", + "../api/task_queue", + "../rtc_base:rtc_base_approved", + "../rtc_base:threading", + "../rtc_base/system:rtc_export", + ] +} + rtc_library("rtc_pc_base") { visibility = [ "*" ] defines = [] @@ -32,12 +46,6 @@ rtc_library("rtc_pc_base") { "channel_interface.h", "channel_manager.cc", "channel_manager.h", - "composite_data_channel_transport.cc", - "composite_data_channel_transport.h", - "composite_rtp_transport.cc", - "composite_rtp_transport.h", - "datagram_rtp_transport.cc", - "datagram_rtp_transport.h", "dtls_srtp_transport.cc", "dtls_srtp_transport.h", "dtls_transport.cc", @@ -48,14 +56,22 @@ rtc_library("rtc_pc_base") { "ice_transport.h", "jsep_transport.cc", "jsep_transport.h", + "jsep_transport_collection.cc", + "jsep_transport_collection.h", "jsep_transport_controller.cc", "jsep_transport_controller.h", "media_session.cc", "media_session.h", + "media_stream_proxy.h", + "media_stream_track_proxy.h", + "peer_connection_factory_proxy.h", + "peer_connection_proxy.h", "rtcp_mux_filter.cc", "rtcp_mux_filter.h", "rtp_media_utils.cc", "rtp_media_utils.h", + "rtp_receiver_proxy.h", + "rtp_sender_proxy.h", "rtp_transport.cc", "rtp_transport.h", "rtp_transport_internal.h", @@ -65,10 +81,6 @@ rtc_library("rtc_pc_base") { "sctp_transport.h", "sctp_utils.cc", "sctp_utils.h", - "session_description.cc", - "session_description.h", - "simulcast_description.cc", - "simulcast_description.h", "srtp_filter.cc", "srtp_filter.h", "srtp_session.cc", @@ -78,53 +90,77 @@ rtc_library("rtc_pc_base") { "transport_stats.cc", "transport_stats.h", "used_ids.h", + "video_track_source_proxy.cc", + "video_track_source_proxy.h", ] deps = [ ":media_protocol_names", + ":proxy", + ":session_description", + ":simulcast_description", "../api:array_view", + "../api:async_dns_resolver", "../api:audio_options_api", "../api:call_api", "../api:function_view", "../api:ice_transport_factory", "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:packet_socket_factory", "../api:priority", "../api:rtc_error", "../api:rtp_headers", "../api:rtp_parameters", "../api:rtp_parameters", + "../api:rtp_transceiver_direction", "../api:scoped_refptr", + "../api:sequence_checker", "../api/crypto:options", "../api/rtc_event_log", + "../api/task_queue", "../api/transport:datagram_transport_interface", - "../api/transport/media:media_transport_interface", + "../api/transport:enums", + "../api/transport:sctp_transport_factory_interface", "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_bitrate_allocator_factory", "../api/video:video_frame", "../api/video:video_rtp_headers", + "../api/video_codecs:video_codecs_api", "../call:call_interfaces", "../call:rtp_interfaces", "../call:rtp_receiver", "../common_video", "../common_video:common_video", "../logging:ice_log", - "../media:rtc_data", - "../media:rtc_h264_profile_id", + "../media:rtc_data_sctp_transport_internal", "../media:rtc_media_base", "../media:rtc_media_config", + "../media:rtc_sdp_video_format_utils", "../modules/rtp_rtcp:rtp_rtcp", "../modules/rtp_rtcp:rtp_rtcp_format", "../p2p:rtc_p2p", "../rtc_base", + "../rtc_base:callback_list", "../rtc_base:checks", - "../rtc_base:deprecation", "../rtc_base:rtc_task_queue", + "../rtc_base:socket", + "../rtc_base:socket_address", "../rtc_base:stringutils", + "../rtc_base:threading", + "../rtc_base/network:sent_packet", + "../rtc_base/synchronization:mutex", "../rtc_base/system:file_wrapper", + "../rtc_base/system:no_unique_address", "../rtc_base/system:rtc_export", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", "../rtc_base/third_party/base64", "../rtc_base/third_party/sigslot", "../system_wrappers:field_trial", "../system_wrappers:metrics", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/memory", @@ -139,6 +175,43 @@ rtc_library("rtc_pc_base") { public_configs = [ ":rtc_pc_config" ] } +rtc_source_set("session_description") { + visibility = [ "*" ] + sources = [ + "session_description.cc", + "session_description.h", + ] + deps = [ + ":media_protocol_names", + ":simulcast_description", + "../api:libjingle_peerconnection_api", + "../api:rtp_parameters", + "../api:rtp_transceiver_direction", + "../media:rtc_media_base", + "../p2p:rtc_p2p", + "../rtc_base:checks", + "../rtc_base:socket_address", + "../rtc_base/system:rtc_export", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory:memory", + ] +} + +rtc_source_set("simulcast_description") { + visibility = [ "*" ] + sources = [ + "simulcast_description.cc", + "simulcast_description.h", + ] + deps = [ + "../rtc_base:checks", + "../rtc_base:socket_address", + "../rtc_base/system:rtc_export", + ] +} + rtc_source_set("rtc_pc") { visibility = [ "*" ] allow_poison = [ "audio_codecs" ] # TODO(bugs.webrtc.org/8396): Remove. @@ -159,50 +232,31 @@ rtc_library("peerconnection") { visibility = [ "*" ] cflags = [] sources = [ - "audio_rtp_receiver.cc", - "audio_rtp_receiver.h", - "audio_track.cc", - "audio_track.h", - "data_channel.cc", - "data_channel.h", "data_channel_controller.cc", "data_channel_controller.h", - "dtmf_sender.cc", - "dtmf_sender.h", + "data_channel_utils.cc", + "data_channel_utils.h", "ice_server_parsing.cc", "ice_server_parsing.h", - "jitter_buffer_delay.cc", - "jitter_buffer_delay.h", - "jitter_buffer_delay_interface.h", - "jitter_buffer_delay_proxy.h", "jsep_ice_candidate.cc", "jsep_session_description.cc", "local_audio_source.cc", "local_audio_source.h", - "media_stream.cc", - "media_stream.h", "media_stream_observer.cc", "media_stream_observer.h", - "media_stream_track.h", "peer_connection.cc", "peer_connection.h", "peer_connection_factory.cc", "peer_connection_factory.h", "peer_connection_internal.h", - "remote_audio_source.cc", - "remote_audio_source.h", "rtc_stats_collector.cc", "rtc_stats_collector.h", "rtc_stats_traversal.cc", "rtc_stats_traversal.h", - "rtp_parameters_conversion.cc", - "rtp_parameters_conversion.h", - "rtp_receiver.cc", - "rtp_receiver.h", - "rtp_sender.cc", - "rtp_sender.h", - "rtp_transceiver.cc", - "rtp_transceiver.h", + "sctp_data_channel.cc", + "sctp_data_channel.h", + "sdp_offer_answer.cc", # TODO: Make separate target when not circular + "sdp_offer_answer.h", # dependent on peerconnection.h "sdp_serializer.cc", "sdp_serializer.h", "sdp_utils.cc", @@ -212,14 +266,6 @@ rtc_library("peerconnection") { "stream_collection.h", "track_media_info_map.cc", "track_media_info_map.h", - "video_rtp_receiver.cc", - "video_rtp_receiver.h", - "video_rtp_track_source.cc", - "video_rtp_track_source.h", - "video_track.cc", - "video_track.h", - "video_track_source.cc", - "video_track_source.h", "webrtc_sdp.cc", "webrtc_sdp.h", "webrtc_session_description_factory.cc", @@ -227,58 +273,590 @@ rtc_library("peerconnection") { ] deps = [ + ":audio_rtp_receiver", + ":audio_track", + ":connection_context", + ":dtmf_sender", + ":jitter_buffer_delay", + ":media_protocol_names", + ":media_stream", + ":peer_connection_message_handler", + ":proxy", + ":remote_audio_source", ":rtc_pc_base", + ":rtp_parameters_conversion", + ":rtp_receiver", + ":rtp_sender", + ":rtp_transceiver", + ":rtp_transmission_manager", + ":sdp_state_provider", + ":session_description", + ":simulcast_description", + ":stats_collector_interface", + ":transceiver_list", + ":usage_pattern", + ":video_rtp_receiver", + ":video_track", + ":video_track_source", "../api:array_view", + "../api:async_dns_resolver", "../api:audio_options_api", "../api:call_api", + "../api:callfactory_api", "../api:fec_controller_api", "../api:frame_transformer_interface", "../api:ice_transport_factory", + "../api:libjingle_logging_api", "../api:libjingle_peerconnection_api", "../api:media_stream_interface", "../api:network_state_predictor_api", + "../api:packet_socket_factory", "../api:priority", "../api:rtc_error", "../api:rtc_event_log_output_file", "../api:rtc_stats_api", "../api:rtp_parameters", + "../api:rtp_transceiver_direction", "../api:scoped_refptr", + "../api:sequence_checker", + "../api/adaptation:resource_adaptation_api", + "../api/audio_codecs:audio_codecs_api", "../api/crypto:frame_decryptor_interface", + "../api/crypto:options", + "../api/neteq:neteq_api", "../api/rtc_event_log", "../api/task_queue", + "../api/transport:bitrate_settings", "../api/transport:datagram_transport_interface", + "../api/transport:enums", "../api/transport:field_trial_based_config", - "../api/transport/media:media_transport_interface", + "../api/transport:network_control", + "../api/transport:sctp_transport_factory_interface", + "../api/transport:webrtc_key_value_config", "../api/units:data_rate", "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_bitrate_allocator_factory", + "../api/video:video_codec_constants", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../api/video_codecs:video_codecs_api", + "../call:call_interfaces", + "../call:rtp_interfaces", + "../call:rtp_sender", + "../common_video", + "../logging:ice_log", + "../media:rtc_data_sctp_transport_internal", + "../media:rtc_media_base", + "../media:rtc_media_config", + "../modules/audio_processing:audio_processing_statistics", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../p2p:rtc_p2p", + "../rtc_base", + "../rtc_base:callback_list", + "../rtc_base:checks", + "../rtc_base:ip_address", + "../rtc_base:network_constants", + "../rtc_base:rtc_base_approved", + "../rtc_base:rtc_operations_chain", + "../rtc_base:safe_minmax", + "../rtc_base:socket_address", + "../rtc_base:threading", + "../rtc_base:weak_ptr", + "../rtc_base/experiments:field_trial_parser", + "../rtc_base/network:sent_packet", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:file_wrapper", + "../rtc_base/system:no_unique_address", + "../rtc_base/system:rtc_export", + "../rtc_base/system:unused", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", + "../rtc_base/third_party/base64", + "../rtc_base/third_party/sigslot", + "../stats", + "../system_wrappers", + "../system_wrappers:field_trial", + "../system_wrappers:metrics", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("connection_context") { + sources = [ + "connection_context.cc", + "connection_context.h", + ] + deps = [ + ":rtc_pc_base", + "../api:callfactory_api", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:refcountedbase", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/neteq:neteq_api", + "../api/transport:field_trial_based_config", + "../api/transport:sctp_transport_factory_interface", + "../api/transport:webrtc_key_value_config", + "../media:rtc_data_sctp_transport_factory", + "../media:rtc_media_base", + "../p2p:rtc_p2p", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:threading", + "../rtc_base/task_utils:to_queued_task", + ] +} + +rtc_library("peer_connection_message_handler") { + sources = [ + "peer_connection_message_handler.cc", + "peer_connection_message_handler.h", + ] + deps = [ + ":stats_collector_interface", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:rtc_error", + "../api:scoped_refptr", + "../api:sequence_checker", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:threading", + ] +} + +rtc_library("usage_pattern") { + sources = [ + "usage_pattern.cc", + "usage_pattern.h", + ] + deps = [ + "../api:libjingle_peerconnection_api", + "../rtc_base:logging", + "../system_wrappers:metrics", + ] +} + +rtc_library("rtp_transceiver") { + sources = [ + "rtp_transceiver.cc", + "rtp_transceiver.h", + ] + deps = [ + ":proxy", + ":rtc_pc_base", + ":rtp_parameters_conversion", + ":rtp_receiver", + ":rtp_sender", + ":session_description", + "../api:array_view", + "../api:libjingle_peerconnection_api", + "../api:rtc_error", + "../api:rtp_parameters", + "../api:rtp_transceiver_direction", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/task_queue", + "../media:rtc_media_base", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:macromagic", + "../rtc_base:refcount", + "../rtc_base:threading", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", + "../rtc_base/third_party/sigslot", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("rtp_transmission_manager") { + sources = [ + "rtp_transmission_manager.cc", + "rtp_transmission_manager.h", + ] + deps = [ + ":audio_rtp_receiver", + ":rtc_pc_base", + ":rtp_receiver", + ":rtp_sender", + ":rtp_transceiver", + ":stats_collector_interface", + ":transceiver_list", + ":usage_pattern", + ":video_rtp_receiver", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:rtc_error", + "../api:rtp_parameters", + "../api:rtp_transceiver_direction", + "../api:scoped_refptr", + "../api:sequence_checker", + "../media:rtc_media_base", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:threading", + "../rtc_base:weak_ptr", + "../rtc_base/third_party/sigslot", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("transceiver_list") { + sources = [ + "transceiver_list.cc", + "transceiver_list.h", + ] + deps = [ + ":rtp_transceiver", + "../api:libjingle_peerconnection_api", + "../api:rtc_error", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api:sequence_checker", + "../rtc_base:checks", + "../rtc_base:macromagic", + "../rtc_base/system:no_unique_address", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("rtp_receiver") { + sources = [ + "rtp_receiver.cc", + "rtp_receiver.h", + ] + deps = [ + ":media_stream", + ":rtc_pc_base", + ":video_track_source", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api/crypto:frame_decryptor_interface", + "../api/video:video_frame", + "../media:rtc_media_base", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:rtc_base", + "../rtc_base:rtc_base_approved", + "../rtc_base:threading", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("audio_rtp_receiver") { + sources = [ + "audio_rtp_receiver.cc", + "audio_rtp_receiver.h", + ] + deps = [ + ":audio_track", + ":jitter_buffer_delay", + ":media_stream", + ":remote_audio_source", + ":rtc_pc_base", + ":rtp_receiver", + "../api:frame_transformer_interface", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/crypto:frame_decryptor_interface", + "../api/transport/rtp:rtp_source", + "../media:rtc_media_base", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:refcount", + "../rtc_base:threading", + "../rtc_base/system:no_unique_address", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("video_rtp_receiver") { + sources = [ + "video_rtp_receiver.cc", + "video_rtp_receiver.h", + ] + deps = [ + ":jitter_buffer_delay", + ":media_stream", + ":rtc_pc_base", + ":rtp_receiver", + ":video_rtp_track_source", + ":video_track", + "../api:frame_transformer_interface", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/crypto:frame_decryptor_interface", + "../api/transport/rtp:rtp_source", + "../api/video:recordable_encoded_frame", + "../api/video:video_frame", + "../media:rtc_media_base", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:rtc_base_approved", + "../rtc_base:threading", + "../rtc_base/system:no_unique_address", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("video_rtp_track_source") { + sources = [ + "video_rtp_track_source.cc", + "video_rtp_track_source.h", + ] + deps = [ + ":video_track_source", + "../api:sequence_checker", + "../api/video:recordable_encoded_frame", + "../api/video:video_frame", + "../media:rtc_media_base", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:no_unique_address", + ] +} + +rtc_library("audio_track") { + sources = [ + "audio_track.cc", + "audio_track.h", + ] + deps = [ + "../api:media_stream_interface", + "../api:scoped_refptr", + "../api:sequence_checker", + "../rtc_base:checks", + "../rtc_base:refcount", + ] +} + +rtc_library("video_track") { + sources = [ + "video_track.cc", + "video_track.h", + ] + deps = [ + "../api:media_stream_interface", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/video:video_frame", + "../media:rtc_media_base", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:refcount", + "../rtc_base:rtc_base_approved", + "../rtc_base:threading", + ] +} + +rtc_source_set("sdp_state_provider") { + sources = [ "sdp_state_provider.h" ] + deps = [ + ":rtc_pc_base", + "../api:libjingle_peerconnection_api", + ] +} + +rtc_library("jitter_buffer_delay") { + sources = [ + "jitter_buffer_delay.cc", + "jitter_buffer_delay.h", + ] + deps = [ + "../api:sequence_checker", + "../rtc_base:checks", + "../rtc_base:safe_conversions", + "../rtc_base:safe_minmax", + "../rtc_base/system:no_unique_address", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("remote_audio_source") { + sources = [ + "remote_audio_source.cc", + "remote_audio_source.h", + ] + deps = [ + ":rtc_pc_base", + "../api:call_api", + "../api:media_stream_interface", + "../api:scoped_refptr", + "../api:sequence_checker", + "../media:rtc_media_base", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:rtc_base_approved", + "../rtc_base:safe_conversions", + "../rtc_base:stringutils", + "../rtc_base:threading", + "../rtc_base/synchronization:mutex", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("rtp_sender") { + sources = [ + "rtp_sender.cc", + "rtp_sender.h", + ] + deps = [ + ":dtmf_sender", + ":stats_collector_interface", + "../api:audio_options_api", + "../api:frame_transformer_interface", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:priority", + "../api:rtc_error", + "../api:rtp_parameters", + "../api:scoped_refptr", + "../api/crypto:frame_encryptor_interface", + "../media:rtc_media_base", + "../rtc_base:checks", + "../rtc_base:rtc_base", + "../rtc_base:threading", + "../rtc_base/synchronization:mutex", + "../rtc_base/third_party/sigslot", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("rtp_parameters_conversion") { + sources = [ + "rtp_parameters_conversion.cc", + "rtp_parameters_conversion.h", + ] + deps = [ + ":rtc_pc_base", + ":session_description", + "../api:array_view", + "../api:libjingle_peerconnection_api", + "../api:rtc_error", + "../api:rtp_parameters", + "../media:rtc_media_base", + "../rtc_base:checks", + "../rtc_base:rtc_base", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("dtmf_sender") { + sources = [ + "dtmf_sender.cc", + "dtmf_sender.h", + ] + deps = [ + ":proxy", + "../api:libjingle_peerconnection_api", + "../api:scoped_refptr", + "../rtc_base:checks", + "../rtc_base:rtc_base", + "../rtc_base:threading", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", + "../rtc_base/third_party/sigslot", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("media_stream") { + sources = [ + "media_stream.cc", + "media_stream.h", + ] + deps = [ + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:scoped_refptr", + "../rtc_base:checks", + "../rtc_base:refcount", + "../rtc_base:rtc_base", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + +rtc_library("video_track_source") { + sources = [ + "video_track_source.cc", + "video_track_source.h", + ] + deps = [ + "../api:media_stream_interface", + "../api:sequence_checker", + "../api/video:recordable_encoded_frame", "../api/video:video_frame", - "../api/video:video_rtp_headers", - "../api/video_codecs:video_codecs_api", - "../call:call_interfaces", - "../common_video", - "../logging:ice_log", - "../media:rtc_data", "../media:rtc_media_base", - "../modules/rtp_rtcp:rtp_rtcp_format", - "../p2p:rtc_p2p", - "../rtc_base", "../rtc_base:checks", "../rtc_base:rtc_base_approved", - "../rtc_base:rtc_operations_chain", - "../rtc_base:safe_minmax", - "../rtc_base:weak_ptr", - "../rtc_base/experiments:field_trial_parser", - "../rtc_base/system:file_wrapper", "../rtc_base/system:rtc_export", - "../rtc_base/third_party/base64", - "../rtc_base/third_party/sigslot", - "../stats", - "../system_wrappers", - "../system_wrappers:field_trial", - "../system_wrappers:metrics", - "//third_party/abseil-cpp/absl/algorithm:container", - "//third_party/abseil-cpp/absl/strings", - "//third_party/abseil-cpp/absl/types:optional", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_source_set("stats_collector_interface") { + sources = [ "stats_collector_interface.h" ] + deps = [ + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", ] } @@ -290,14 +868,13 @@ rtc_source_set("libjingle_peerconnection") { ] } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_test("rtc_pc_unittests") { testonly = true sources = [ "channel_manager_unittest.cc", "channel_unittest.cc", - "composite_rtp_transport_test.cc", "dtls_srtp_transport_unittest.cc", "dtls_transport_unittest.cc", "ice_transport_unittest.cc", @@ -329,21 +906,20 @@ if (rtc_include_tests) { ":peerconnection", ":rtc_pc", ":rtc_pc_base", + ":session_description", + ":video_rtp_receiver", "../api:array_view", "../api:audio_options_api", - "../api:fake_media_transport", "../api:ice_transport_factory", "../api:libjingle_peerconnection_api", - "../api:loopback_media_transport", "../api:rtc_error", "../api:rtp_headers", "../api:rtp_parameters", - "../api/transport/media:media_transport_interface", "../api/video:builtin_video_bitrate_allocator_factory", "../api/video/test:mock_recordable_encoded_frame", "../call:rtp_interfaces", "../call:rtp_receiver", - "../media:rtc_data", + "../media:rtc_data_sctp_transport_internal", "../media:rtc_media_base", "../media:rtc_media_tests_utils", "../modules/rtp_rtcp:rtp_rtcp_format", @@ -356,8 +932,12 @@ if (rtc_include_tests) { "../rtc_base:gunit_helpers", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_base_tests_utils", + "../rtc_base:threading", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", "../rtc_base/third_party/sigslot", "../system_wrappers:metrics", + "../test:field_trial", "../test:test_main", "../test:test_support", "//third_party/abseil-cpp/absl/algorithm:container", @@ -403,11 +983,13 @@ if (rtc_include_tests) { "../rtc_base:checks", "../rtc_base:gunit_helpers", "../rtc_base:rtc_base_tests_utils", + "../rtc_base:socket_address", + "../rtc_base:threading", "../system_wrappers", "../test:perf_test", "../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("peerconnection_wrapper") { @@ -433,84 +1015,10 @@ if (rtc_include_tests) { ] } - rtc_library("pc_test_utils") { - testonly = true - sources = [ - "test/fake_audio_capture_module.cc", - "test/fake_audio_capture_module.h", - "test/fake_data_channel_provider.h", - "test/fake_peer_connection_base.h", - "test/fake_peer_connection_for_stats.h", - "test/fake_periodic_video_source.h", - "test/fake_periodic_video_track_source.h", - "test/fake_rtc_certificate_generator.h", - "test/fake_sctp_transport.h", - "test/fake_video_track_renderer.h", - "test/fake_video_track_source.h", - "test/frame_generator_capturer_video_track_source.h", - "test/mock_channel_interface.h", - "test/mock_data_channel.h", - "test/mock_delayable.h", - "test/mock_peer_connection_observers.h", - "test/mock_rtp_receiver_internal.h", - "test/mock_rtp_sender_internal.h", - "test/peer_connection_test_wrapper.cc", - "test/peer_connection_test_wrapper.h", - "test/rtc_stats_obtainer.h", - "test/test_sdp_strings.h", - ] - - deps = [ - ":libjingle_peerconnection", - ":peerconnection", - ":rtc_pc_base", - "../api:audio_options_api", - "../api:create_frame_generator", - "../api:create_peerconnection_factory", - "../api:libjingle_peerconnection_api", - "../api:media_stream_interface", - "../api:rtc_error", - "../api:rtc_stats_api", - "../api:scoped_refptr", - "../api/audio:audio_mixer_api", - "../api/audio_codecs:audio_codecs_api", - "../api/task_queue", - "../api/task_queue:default_task_queue_factory", - "../api/video:builtin_video_bitrate_allocator_factory", - "../api/video:video_frame", - "../api/video:video_rtp_headers", - "../api/video_codecs:builtin_video_decoder_factory", - "../api/video_codecs:builtin_video_encoder_factory", - "../api/video_codecs:video_codecs_api", - "../call:call_interfaces", - "../media:rtc_data", - "../media:rtc_media", - "../media:rtc_media_base", - "../media:rtc_media_tests_utils", - "../modules/audio_device", - "../modules/audio_processing", - "../modules/audio_processing:api", - "../p2p:fake_port_allocator", - "../p2p:p2p_test_utils", - "../p2p:rtc_p2p", - "../rtc_base", - "../rtc_base:checks", - "../rtc_base:gunit_helpers", - "../rtc_base:rtc_base_approved", - "../rtc_base:rtc_task_queue", - "../rtc_base:task_queue_for_test", - "../rtc_base/synchronization:sequence_checker", - "../rtc_base/task_utils:repeating_task", - "../rtc_base/third_party/sigslot", - "../test:test_support", - "../test:video_test_common", - "//third_party/abseil-cpp/absl/types:optional", - ] - } - rtc_test("peerconnection_unittests") { testonly = true sources = [ + "data_channel_integrationtest.cc", "data_channel_unittest.cc", "dtmf_sender_unittest.cc", "ice_server_parsing_unittest.cc", @@ -518,6 +1026,7 @@ if (rtc_include_tests) { "jsep_session_description_unittest.cc", "local_audio_source_unittest.cc", "media_stream_unittest.cc", + "peer_connection_adaptation_integrationtest.cc", "peer_connection_bundle_unittest.cc", "peer_connection_crypto_unittest.cc", "peer_connection_data_channel_unittest.cc", @@ -554,13 +1063,27 @@ if (rtc_include_tests) { "webrtc_sdp_unittest.cc", ] - if (rtc_enable_sctp) { - defines = [ "HAVE_SCTP" ] - } - deps = [ + ":audio_rtp_receiver", + ":audio_track", + ":dtmf_sender", + ":integration_test_helpers", + ":jitter_buffer_delay", + ":media_stream", ":peerconnection", + ":proxy", + ":remote_audio_source", ":rtc_pc_base", + ":rtp_parameters_conversion", + ":rtp_receiver", + ":rtp_sender", + ":rtp_transceiver", + ":session_description", + ":usage_pattern", + ":video_rtp_receiver", + ":video_rtp_track_source", + ":video_track", + ":video_track_source", "../api:array_view", "../api:audio_options_api", "../api:create_peerconnection_factory", @@ -569,10 +1092,11 @@ if (rtc_include_tests) { "../api:function_view", "../api:libjingle_logging_api", "../api:libjingle_peerconnection_api", - "../api:loopback_media_transport", "../api:media_stream_interface", "../api:mock_rtp", + "../api:packet_socket_factory", "../api:rtc_error", + "../api:rtp_transceiver_direction", "../api:scoped_refptr", "../api/audio:audio_mixer_api", "../api/crypto:frame_decryptor_interface", @@ -580,12 +1104,17 @@ if (rtc_include_tests) { "../api/crypto:options", "../api/rtc_event_log", "../api/rtc_event_log:rtc_event_log_factory", + "../api/task_queue", "../api/task_queue:default_task_queue_factory", - "../api/transport/media:media_transport_interface", + "../api/transport:field_trial_based_config", + "../api/transport:webrtc_key_value_config", "../api/transport/rtp:rtp_source", "../api/units:time_delta", "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_rtp_headers", + "../call/adaptation:resource_adaptation_test_utilities", "../logging:fake_rtc_event_log", + "../media:rtc_data_sctp_transport_internal", "../media:rtc_media_config", "../media:rtc_media_engine_defaults", "../modules/audio_device:audio_device_api", @@ -594,20 +1123,29 @@ if (rtc_include_tests) { "../modules/rtp_rtcp:rtp_rtcp_format", "../p2p:fake_ice_transport", "../p2p:fake_port_allocator", + "../p2p:p2p_server_utils", "../rtc_base:checks", "../rtc_base:gunit_helpers", + "../rtc_base:ip_address", "../rtc_base:rtc_base_tests_utils", "../rtc_base:rtc_json", + "../rtc_base:socket_address", + "../rtc_base:threading", + "../rtc_base/synchronization:mutex", "../rtc_base/third_party/base64", "../rtc_base/third_party/sigslot", + "../system_wrappers:field_trial", "../system_wrappers:metrics", "../test:field_trial", "../test:fileutils", "../test:rtp_test_utils", + "../test:test_common", + "../test/pc/sctp:fake_sctp_transport", "./scenario_tests:pc_scenario_tests", "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] if (is_android) { deps += [ ":android_black_magic" ] @@ -632,8 +1170,6 @@ if (rtc_include_tests) { "../api/video_codecs:video_codecs_api", "../call:call_interfaces", "../media:rtc_audio_video", - "../media:rtc_data", # TODO(phoglund): AFAIK only used for one sctp - # constant. "../media:rtc_media_base", "../media:rtc_media_tests_utils", "../modules/audio_processing", @@ -649,7 +1185,6 @@ if (rtc_include_tests) { "../test:audio_codec_mocks", "../test:test_main", "../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", ] if (is_android) { @@ -682,4 +1217,187 @@ if (rtc_include_tests) { ] } } + + rtc_library("integration_test_helpers") { + testonly = true + sources = [ + "test/integration_test_helpers.cc", + "test/integration_test_helpers.h", + ] + deps = [ + ":audio_rtp_receiver", + ":audio_track", + ":dtmf_sender", + ":jitter_buffer_delay", + ":media_stream", + ":pc_test_utils", + ":peerconnection", + ":remote_audio_source", + ":rtc_pc_base", + ":rtp_parameters_conversion", + ":rtp_receiver", + ":rtp_sender", + ":rtp_transceiver", + ":session_description", + ":usage_pattern", + ":video_rtp_receiver", + ":video_rtp_track_source", + ":video_track", + ":video_track_source", + "../api:array_view", + "../api:audio_options_api", + "../api:callfactory_api", + "../api:create_peerconnection_factory", + "../api:fake_frame_decryptor", + "../api:fake_frame_encryptor", + "../api:function_view", + "../api:libjingle_logging_api", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:mock_rtp", + "../api:packet_socket_factory", + "../api:rtc_error", + "../api:rtc_stats_api", + "../api:rtp_parameters", + "../api:rtp_transceiver_direction", + "../api:scoped_refptr", + "../api/audio:audio_mixer_api", + "../api/crypto:frame_decryptor_interface", + "../api/crypto:frame_encryptor_interface", + "../api/crypto:options", + "../api/rtc_event_log", + "../api/rtc_event_log:rtc_event_log_factory", + "../api/task_queue", + "../api/task_queue:default_task_queue_factory", + "../api/transport:field_trial_based_config", + "../api/transport:webrtc_key_value_config", + "../api/transport/rtp:rtp_source", + "../api/units:time_delta", + "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_rtp_headers", + "../api/video_codecs:video_codecs_api", + "../call:call_interfaces", + "../call/adaptation:resource_adaptation_test_utilities", + "../logging:fake_rtc_event_log", + "../media:rtc_audio_video", + "../media:rtc_media_base", + "../media:rtc_media_config", + "../media:rtc_media_engine_defaults", + "../media:rtc_media_tests_utils", + "../modules/audio_device:audio_device_api", + "../modules/audio_processing:api", + "../modules/audio_processing:audio_processing_statistics", + "../modules/audio_processing:audioproc_test_utils", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../p2p:fake_ice_transport", + "../p2p:fake_port_allocator", + "../p2p:p2p_server_utils", + "../p2p:p2p_test_utils", + "../p2p:rtc_p2p", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:gunit_helpers", + "../rtc_base:ip_address", + "../rtc_base:rtc_base_tests_utils", + "../rtc_base:rtc_json", + "../rtc_base:socket_address", + "../rtc_base:threading", + "../rtc_base:timeutils", + "../rtc_base/synchronization:mutex", + "../rtc_base/task_utils:pending_task_safety_flag", + "../rtc_base/task_utils:to_queued_task", + "../rtc_base/third_party/base64", + "../rtc_base/third_party/sigslot", + "../system_wrappers:metrics", + "../test:field_trial", + "../test:fileutils", + "../test:rtp_test_utils", + "../test:test_support", + "../test/pc/sctp:fake_sctp_transport", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + } + + rtc_library("pc_test_utils") { + testonly = true + sources = [ + "test/fake_audio_capture_module.cc", + "test/fake_audio_capture_module.h", + "test/fake_data_channel_provider.h", + "test/fake_peer_connection_base.h", + "test/fake_peer_connection_for_stats.h", + "test/fake_periodic_video_source.h", + "test/fake_periodic_video_track_source.h", + "test/fake_rtc_certificate_generator.h", + "test/fake_video_track_renderer.h", + "test/fake_video_track_source.h", + "test/frame_generator_capturer_video_track_source.h", + "test/mock_channel_interface.h", + "test/mock_data_channel.h", + "test/mock_peer_connection_observers.h", + "test/mock_rtp_receiver_internal.h", + "test/mock_rtp_sender_internal.h", + "test/peer_connection_test_wrapper.cc", + "test/peer_connection_test_wrapper.h", + "test/rtc_stats_obtainer.h", + "test/test_sdp_strings.h", + ] + + deps = [ + ":jitter_buffer_delay", + ":libjingle_peerconnection", + ":peerconnection", + ":rtc_pc_base", + ":rtp_receiver", + ":rtp_sender", + ":video_track_source", + "../api:audio_options_api", + "../api:create_frame_generator", + "../api:create_peerconnection_factory", + "../api:libjingle_peerconnection_api", + "../api:media_stream_interface", + "../api:rtc_error", + "../api:rtc_stats_api", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/audio:audio_mixer_api", + "../api/audio_codecs:audio_codecs_api", + "../api/task_queue", + "../api/task_queue:default_task_queue_factory", + "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../api/video_codecs:builtin_video_decoder_factory", + "../api/video_codecs:builtin_video_encoder_factory", + "../api/video_codecs:video_codecs_api", + "../call:call_interfaces", + "../media:rtc_media", + "../media:rtc_media_base", + "../media:rtc_media_tests_utils", + "../modules/audio_device", + "../modules/audio_processing", + "../modules/audio_processing:api", + "../p2p:fake_port_allocator", + "../p2p:p2p_test_utils", + "../p2p:rtc_p2p", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:gunit_helpers", + "../rtc_base:rtc_base_approved", + "../rtc_base:rtc_task_queue", + "../rtc_base:task_queue_for_test", + "../rtc_base:threading", + "../rtc_base/synchronization:mutex", + "../rtc_base/task_utils:repeating_task", + "../rtc_base/third_party/sigslot", + "../test:test_support", + "../test:video_test_common", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } } diff --git a/pc/audio_rtp_receiver.cc b/pc/audio_rtp_receiver.cc index 54912a5d71..4efab24d15 100644 --- a/pc/audio_rtp_receiver.cc +++ b/pc/audio_rtp_receiver.cc @@ -15,41 +15,43 @@ #include #include -#include "api/media_stream_proxy.h" -#include "api/media_stream_track_proxy.h" +#include "api/sequence_checker.h" #include "pc/audio_track.h" -#include "pc/jitter_buffer_delay.h" -#include "pc/jitter_buffer_delay_proxy.h" -#include "pc/media_stream.h" +#include "pc/media_stream_track_proxy.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" -#include "rtc_base/trace_event.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace webrtc { AudioRtpReceiver::AudioRtpReceiver(rtc::Thread* worker_thread, std::string receiver_id, - std::vector stream_ids) + std::vector stream_ids, + bool is_unified_plan) : AudioRtpReceiver(worker_thread, receiver_id, - CreateStreamsFromIds(std::move(stream_ids))) {} + CreateStreamsFromIds(std::move(stream_ids)), + is_unified_plan) {} AudioRtpReceiver::AudioRtpReceiver( rtc::Thread* worker_thread, const std::string& receiver_id, - const std::vector>& streams) + const std::vector>& streams, + bool is_unified_plan) : worker_thread_(worker_thread), id_(receiver_id), - source_(new rtc::RefCountedObject(worker_thread)), - track_(AudioTrackProxy::Create(rtc::Thread::Current(), - AudioTrack::Create(receiver_id, source_))), + source_(rtc::make_ref_counted( + worker_thread, + is_unified_plan + ? RemoteAudioSource::OnAudioChannelGoneAction::kSurvive + : RemoteAudioSource::OnAudioChannelGoneAction::kEnd)), + track_(AudioTrackProxyWithInternal::Create( + rtc::Thread::Current(), + AudioTrack::Create(receiver_id, source_))), cached_track_enabled_(track_->enabled()), attachment_id_(GenerateUniqueId()), - delay_(JitterBufferDelayProxy::Create( - rtc::Thread::Current(), - worker_thread_, - new rtc::RefCountedObject(worker_thread))) { + worker_thread_safety_(PendingTaskSafetyFlag::CreateDetachedInactive()) { RTC_DCHECK(worker_thread_); RTC_DCHECK(track_->GetSource()->remote()); track_->RegisterObserver(this); @@ -58,134 +60,188 @@ AudioRtpReceiver::AudioRtpReceiver( } AudioRtpReceiver::~AudioRtpReceiver() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + RTC_DCHECK(stopped_); + RTC_DCHECK(!media_channel_); + track_->GetSource()->UnregisterAudioObserver(this); track_->UnregisterObserver(this); - Stop(); } void AudioRtpReceiver::OnChanged() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); if (cached_track_enabled_ != track_->enabled()) { cached_track_enabled_ = track_->enabled(); - Reconfigure(); + worker_thread_->PostTask(ToQueuedTask( + worker_thread_safety_, + [this, enabled = cached_track_enabled_, volume = cached_volume_]() { + RTC_DCHECK_RUN_ON(worker_thread_); + Reconfigure(enabled, volume); + })); } } -bool AudioRtpReceiver::SetOutputVolume(double volume) { +// RTC_RUN_ON(worker_thread_) +void AudioRtpReceiver::SetOutputVolume_w(double volume) { RTC_DCHECK_GE(volume, 0.0); RTC_DCHECK_LE(volume, 10.0); - RTC_DCHECK(media_channel_); - RTC_DCHECK(!stopped_); - return worker_thread_->Invoke(RTC_FROM_HERE, [&] { - return ssrc_ ? media_channel_->SetOutputVolume(*ssrc_, volume) - : media_channel_->SetDefaultOutputVolume(volume); - }); + ssrc_ ? media_channel_->SetOutputVolume(*ssrc_, volume) + : media_channel_->SetDefaultOutputVolume(volume); } void AudioRtpReceiver::OnSetVolume(double volume) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RTC_DCHECK_GE(volume, 0); RTC_DCHECK_LE(volume, 10); - cached_volume_ = volume; - if (!media_channel_ || stopped_) { - RTC_LOG(LS_ERROR) - << "AudioRtpReceiver::OnSetVolume: No audio channel exists."; + if (stopped_) return; - } + + cached_volume_ = volume; + // When the track is disabled, the volume of the source, which is the // corresponding WebRtc Voice Engine channel will be 0. So we do not allow // setting the volume to the source when the track is disabled. - if (!stopped_ && track_->enabled()) { - if (!SetOutputVolume(cached_volume_)) { - RTC_NOTREACHED(); - } + if (track_->enabled()) { + worker_thread_->PostTask( + ToQueuedTask(worker_thread_safety_, [this, volume = cached_volume_]() { + RTC_DCHECK_RUN_ON(worker_thread_); + SetOutputVolume_w(volume); + })); } } +rtc::scoped_refptr AudioRtpReceiver::dtls_transport() + const { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + return dtls_transport_; +} + std::vector AudioRtpReceiver::stream_ids() const { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); std::vector stream_ids(streams_.size()); for (size_t i = 0; i < streams_.size(); ++i) stream_ids[i] = streams_[i]->id(); return stream_ids; } +std::vector> +AudioRtpReceiver::streams() const { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + return streams_; +} + RtpParameters AudioRtpReceiver::GetParameters() const { - if (!media_channel_ || stopped_) { + RTC_DCHECK_RUN_ON(worker_thread_); + if (!media_channel_) return RtpParameters(); - } - return worker_thread_->Invoke(RTC_FROM_HERE, [&] { - return ssrc_ ? media_channel_->GetRtpReceiveParameters(*ssrc_) - : media_channel_->GetDefaultRtpReceiveParameters(); - }); + return ssrc_ ? media_channel_->GetRtpReceiveParameters(*ssrc_) + : media_channel_->GetDefaultRtpReceiveParameters(); } void AudioRtpReceiver::SetFrameDecryptor( rtc::scoped_refptr frame_decryptor) { + RTC_DCHECK_RUN_ON(worker_thread_); frame_decryptor_ = std::move(frame_decryptor); // Special Case: Set the frame decryptor to any value on any existing channel. - if (media_channel_ && ssrc_.has_value() && !stopped_) { - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - media_channel_->SetFrameDecryptor(*ssrc_, frame_decryptor_); - }); + if (media_channel_ && ssrc_) { + media_channel_->SetFrameDecryptor(*ssrc_, frame_decryptor_); } } rtc::scoped_refptr AudioRtpReceiver::GetFrameDecryptor() const { + RTC_DCHECK_RUN_ON(worker_thread_); return frame_decryptor_; } void AudioRtpReceiver::Stop() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); // TODO(deadbeef): Need to do more here to fully stop receiving packets. - if (stopped_) { - return; - } - if (media_channel_) { - // Allow that SetOutputVolume fail. This is the normal case when the - // underlying media channel has already been deleted. - SetOutputVolume(0.0); + if (!stopped_) { + source_->SetState(MediaSourceInterface::kEnded); + stopped_ = true; } - stopped_ = true; + + worker_thread_->Invoke(RTC_FROM_HERE, [&]() { + RTC_DCHECK_RUN_ON(worker_thread_); + if (media_channel_) + SetOutputVolume_w(0.0); + SetMediaChannel_w(nullptr); + }); +} + +void AudioRtpReceiver::StopAndEndTrack() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + Stop(); + track_->internal()->set_ended(); } void AudioRtpReceiver::RestartMediaChannel(absl::optional ssrc) { - RTC_DCHECK(media_channel_); - if (!stopped_ && ssrc_ == ssrc) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + bool ok = worker_thread_->Invoke( + RTC_FROM_HERE, [&, enabled = cached_track_enabled_, + volume = cached_volume_, was_stopped = stopped_]() { + RTC_DCHECK_RUN_ON(worker_thread_); + if (!media_channel_) { + RTC_DCHECK(was_stopped); + return false; // Can't restart. + } + + if (!was_stopped && ssrc_ == ssrc) { + // Already running with that ssrc. + RTC_DCHECK(worker_thread_safety_->alive()); + return true; + } + + if (!was_stopped) { + source_->Stop(media_channel_, ssrc_); + } + + ssrc_ = std::move(ssrc); + source_->Start(media_channel_, ssrc_); + if (ssrc_) { + media_channel_->SetBaseMinimumPlayoutDelayMs(*ssrc_, delay_.GetMs()); + } + + Reconfigure(enabled, volume); + return true; + }); + + if (!ok) return; - } - if (!stopped_) { - source_->Stop(media_channel_, ssrc_); - delay_->OnStop(); - } - ssrc_ = ssrc; stopped_ = false; - source_->Start(media_channel_, ssrc); - delay_->OnStart(media_channel_, ssrc.value_or(0)); - Reconfigure(); } void AudioRtpReceiver::SetupMediaChannel(uint32_t ssrc) { - if (!media_channel_) { - RTC_LOG(LS_ERROR) - << "AudioRtpReceiver::SetupMediaChannel: No audio channel exists."; - return; - } + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RestartMediaChannel(ssrc); } void AudioRtpReceiver::SetupUnsignaledMediaChannel() { - if (!media_channel_) { - RTC_LOG(LS_ERROR) << "AudioRtpReceiver::SetupUnsignaledMediaChannel: No " - "audio channel exists."; - } + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RestartMediaChannel(absl::nullopt); } +uint32_t AudioRtpReceiver::ssrc() const { + RTC_DCHECK_RUN_ON(worker_thread_); + return ssrc_.value_or(0); +} + void AudioRtpReceiver::set_stream_ids(std::vector stream_ids) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); SetStreams(CreateStreamsFromIds(std::move(stream_ids))); } +void AudioRtpReceiver::set_transport( + rtc::scoped_refptr dtls_transport) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + dtls_transport_ = std::move(dtls_transport); +} + void AudioRtpReceiver::SetStreams( const std::vector>& streams) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); // Remove remote track from any streams that are going away. for (const auto& existing_stream : streams_) { bool removed = true; @@ -218,51 +274,42 @@ void AudioRtpReceiver::SetStreams( } std::vector AudioRtpReceiver::GetSources() const { - if (!media_channel_ || !ssrc_ || stopped_) { + RTC_DCHECK_RUN_ON(worker_thread_); + if (!media_channel_ || !ssrc_) { return {}; } - return worker_thread_->Invoke>( - RTC_FROM_HERE, [&] { return media_channel_->GetSources(*ssrc_); }); + return media_channel_->GetSources(*ssrc_); } void AudioRtpReceiver::SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) { - worker_thread_->Invoke( - RTC_FROM_HERE, [this, frame_transformer = std::move(frame_transformer)] { - RTC_DCHECK_RUN_ON(worker_thread_); - frame_transformer_ = frame_transformer; - if (media_channel_ && ssrc_.has_value() && !stopped_) { - media_channel_->SetDepacketizerToDecoderFrameTransformer( - *ssrc_, frame_transformer); - } - }); + RTC_DCHECK_RUN_ON(worker_thread_); + if (media_channel_) { + media_channel_->SetDepacketizerToDecoderFrameTransformer(ssrc_.value_or(0), + frame_transformer); + } + frame_transformer_ = std::move(frame_transformer); } -void AudioRtpReceiver::Reconfigure() { - if (!media_channel_ || stopped_) { - RTC_LOG(LS_ERROR) - << "AudioRtpReceiver::Reconfigure: No audio channel exists."; - return; - } - if (!SetOutputVolume(track_->enabled() ? cached_volume_ : 0)) { - RTC_NOTREACHED(); +// RTC_RUN_ON(worker_thread_) +void AudioRtpReceiver::Reconfigure(bool track_enabled, double volume) { + RTC_DCHECK(media_channel_); + + SetOutputVolume_w(track_enabled ? volume : 0); + + if (ssrc_ && frame_decryptor_) { + // Reattach the frame decryptor if we were reconfigured. + media_channel_->SetFrameDecryptor(*ssrc_, frame_decryptor_); } - // Reattach the frame decryptor if we were reconfigured. - MaybeAttachFrameDecryptorToMediaChannel( - ssrc_, worker_thread_, frame_decryptor_, media_channel_, stopped_); - - if (media_channel_ && ssrc_.has_value() && !stopped_) { - worker_thread_->Invoke(RTC_FROM_HERE, [this] { - RTC_DCHECK_RUN_ON(worker_thread_); - if (!frame_transformer_) - return; - media_channel_->SetDepacketizerToDecoderFrameTransformer( - *ssrc_, frame_transformer_); - }); + + if (frame_transformer_) { + media_channel_->SetDepacketizerToDecoderFrameTransformer( + ssrc_.value_or(0), frame_transformer_); } } void AudioRtpReceiver::SetObserver(RtpReceiverObserverInterface* observer) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); observer_ = observer; // Deliver any notifications the observer may have missed by being set late. if (received_first_packet_ && observer_) { @@ -272,16 +319,35 @@ void AudioRtpReceiver::SetObserver(RtpReceiverObserverInterface* observer) { void AudioRtpReceiver::SetJitterBufferMinimumDelay( absl::optional delay_seconds) { - delay_->Set(delay_seconds); + RTC_DCHECK_RUN_ON(worker_thread_); + delay_.Set(delay_seconds); + if (media_channel_ && ssrc_) + media_channel_->SetBaseMinimumPlayoutDelayMs(*ssrc_, delay_.GetMs()); } void AudioRtpReceiver::SetMediaChannel(cricket::MediaChannel* media_channel) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RTC_DCHECK(media_channel == nullptr || media_channel->media_type() == media_type()); + + if (stopped_ && !media_channel) + return; + + worker_thread_->Invoke(RTC_FROM_HERE, [&] { + RTC_DCHECK_RUN_ON(worker_thread_); + SetMediaChannel_w(media_channel); + }); +} + +// RTC_RUN_ON(worker_thread_) +void AudioRtpReceiver::SetMediaChannel_w(cricket::MediaChannel* media_channel) { + media_channel ? worker_thread_safety_->SetAlive() + : worker_thread_safety_->SetNotAlive(); media_channel_ = static_cast(media_channel); } void AudioRtpReceiver::NotifyFirstPacketReceived() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); if (observer_) { observer_->OnFirstPacketReceived(media_type()); } diff --git a/pc/audio_rtp_receiver.h b/pc/audio_rtp_receiver.h index 88b16ee682..c3468721d8 100644 --- a/pc/audio_rtp_receiver.h +++ b/pc/audio_rtp_receiver.h @@ -18,31 +18,43 @@ #include "absl/types/optional.h" #include "api/crypto/frame_decryptor_interface.h" +#include "api/dtls_transport_interface.h" +#include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" #include "api/rtp_parameters.h" +#include "api/rtp_receiver_interface.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/transport/rtp/rtp_source.h" #include "media/base/media_channel.h" -#include "pc/jitter_buffer_delay_interface.h" +#include "pc/audio_track.h" +#include "pc/jitter_buffer_delay.h" +#include "pc/media_stream_track_proxy.h" #include "pc/remote_audio_source.h" #include "pc/rtp_receiver.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { class AudioRtpReceiver : public ObserverInterface, public AudioSourceInterface::AudioObserver, - public rtc::RefCountedObject { + public RtpReceiverInternal { public: AudioRtpReceiver(rtc::Thread* worker_thread, std::string receiver_id, - std::vector stream_ids); + std::vector stream_ids, + bool is_unified_plan); // TODO(https://crbug.com/webrtc/9480): Remove this when streams() is removed. AudioRtpReceiver( rtc::Thread* worker_thread, const std::string& receiver_id, - const std::vector>& streams); + const std::vector>& streams, + bool is_unified_plan); virtual ~AudioRtpReceiver(); // ObserverInterface implementation @@ -51,22 +63,16 @@ class AudioRtpReceiver : public ObserverInterface, // AudioSourceInterface::AudioObserver implementation void OnSetVolume(double volume) override; - rtc::scoped_refptr audio_track() const { - return track_.get(); - } + rtc::scoped_refptr audio_track() const { return track_; } // RtpReceiverInterface implementation rtc::scoped_refptr track() const override { - return track_.get(); - } - rtc::scoped_refptr dtls_transport() const override { - return dtls_transport_; + return track_; } + rtc::scoped_refptr dtls_transport() const override; std::vector stream_ids() const override; std::vector> streams() - const override { - return streams_; - } + const override; cricket::MediaType media_type() const override { return cricket::MEDIA_TYPE_AUDIO; @@ -84,15 +90,14 @@ class AudioRtpReceiver : public ObserverInterface, // RtpReceiverInternal implementation. void Stop() override; + void StopAndEndTrack() override; void SetupMediaChannel(uint32_t ssrc) override; void SetupUnsignaledMediaChannel() override; - uint32_t ssrc() const override { return ssrc_.value_or(0); } + uint32_t ssrc() const override; void NotifyFirstPacketReceived() override; void set_stream_ids(std::vector stream_ids) override; void set_transport( - rtc::scoped_refptr dtls_transport) override { - dtls_transport_ = dtls_transport; - } + rtc::scoped_refptr dtls_transport) override; void SetStreams(const std::vector>& streams) override; void SetObserver(RtpReceiverObserverInterface* observer) override; @@ -110,29 +115,40 @@ class AudioRtpReceiver : public ObserverInterface, private: void RestartMediaChannel(absl::optional ssrc); - void Reconfigure(); - bool SetOutputVolume(double volume); + void Reconfigure(bool track_enabled, double volume) + RTC_RUN_ON(worker_thread_); + void SetOutputVolume_w(double volume) RTC_RUN_ON(worker_thread_); + void SetMediaChannel_w(cricket::MediaChannel* media_channel) + RTC_RUN_ON(worker_thread_); + RTC_NO_UNIQUE_ADDRESS SequenceChecker signaling_thread_checker_; rtc::Thread* const worker_thread_; const std::string id_; const rtc::scoped_refptr source_; - const rtc::scoped_refptr track_; - cricket::VoiceMediaChannel* media_channel_ = nullptr; - absl::optional ssrc_; - std::vector> streams_; - bool cached_track_enabled_; - double cached_volume_ = 1; - bool stopped_ = true; - RtpReceiverObserverInterface* observer_ = nullptr; - bool received_first_packet_ = false; - int attachment_id_ = 0; - rtc::scoped_refptr frame_decryptor_; - rtc::scoped_refptr dtls_transport_; - // Allows to thread safely change playout delay. Handles caching cases if + const rtc::scoped_refptr> track_; + cricket::VoiceMediaChannel* media_channel_ RTC_GUARDED_BY(worker_thread_) = + nullptr; + absl::optional ssrc_ RTC_GUARDED_BY(worker_thread_); + std::vector> streams_ + RTC_GUARDED_BY(&signaling_thread_checker_); + bool cached_track_enabled_ RTC_GUARDED_BY(&signaling_thread_checker_); + double cached_volume_ RTC_GUARDED_BY(&signaling_thread_checker_) = 1.0; + bool stopped_ RTC_GUARDED_BY(&signaling_thread_checker_) = true; + RtpReceiverObserverInterface* observer_ + RTC_GUARDED_BY(&signaling_thread_checker_) = nullptr; + bool received_first_packet_ RTC_GUARDED_BY(&signaling_thread_checker_) = + false; + const int attachment_id_; + rtc::scoped_refptr frame_decryptor_ + RTC_GUARDED_BY(worker_thread_); + rtc::scoped_refptr dtls_transport_ + RTC_GUARDED_BY(&signaling_thread_checker_); + // Stores and updates the playout delay. Handles caching cases if // |SetJitterBufferMinimumDelay| is called before start. - rtc::scoped_refptr delay_; + JitterBufferDelay delay_ RTC_GUARDED_BY(worker_thread_); rtc::scoped_refptr frame_transformer_ RTC_GUARDED_BY(worker_thread_); + const rtc::scoped_refptr worker_thread_safety_; }; } // namespace webrtc diff --git a/pc/audio_track.cc b/pc/audio_track.cc index ff680652c9..be087f693b 100644 --- a/pc/audio_track.cc +++ b/pc/audio_track.cc @@ -19,7 +19,7 @@ namespace webrtc { rtc::scoped_refptr AudioTrack::Create( const std::string& id, const rtc::scoped_refptr& source) { - return new rtc::RefCountedObject(id, source); + return rtc::make_ref_counted(id, source); } AudioTrack::AudioTrack(const std::string& label, @@ -32,36 +32,35 @@ AudioTrack::AudioTrack(const std::string& label, } AudioTrack::~AudioTrack() { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); set_state(MediaStreamTrackInterface::kEnded); if (audio_source_) audio_source_->UnregisterObserver(this); } std::string AudioTrack::kind() const { - RTC_DCHECK(thread_checker_.IsCurrent()); return kAudioKind; } AudioSourceInterface* AudioTrack::GetSource() const { - RTC_DCHECK(thread_checker_.IsCurrent()); + // Callable from any thread. return audio_source_.get(); } void AudioTrack::AddSink(AudioTrackSinkInterface* sink) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (audio_source_) audio_source_->AddSink(sink); } void AudioTrack::RemoveSink(AudioTrackSinkInterface* sink) { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (audio_source_) audio_source_->RemoveSink(sink); } void AudioTrack::OnChanged() { - RTC_DCHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (audio_source_->state() == MediaSourceInterface::kEnded) { set_state(kEnded); } else { diff --git a/pc/audio_track.h b/pc/audio_track.h index f89bbcdd1d..8a705cf8fb 100644 --- a/pc/audio_track.h +++ b/pc/audio_track.h @@ -14,10 +14,9 @@ #include #include "api/media_stream_interface.h" +#include "api/media_stream_track.h" #include "api/scoped_refptr.h" -#include "pc/media_stream_track.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/thread_checker.h" +#include "api/sequence_checker.h" namespace webrtc { @@ -27,6 +26,11 @@ class AudioTrack : public MediaStreamTrack, // Protected ctor to force use of factory method. AudioTrack(const std::string& label, const rtc::scoped_refptr& source); + + AudioTrack() = delete; + AudioTrack(const AudioTrack&) = delete; + AudioTrack& operator=(const AudioTrack&) = delete; + ~AudioTrack() override; public: @@ -34,7 +38,6 @@ class AudioTrack : public MediaStreamTrack, const std::string& id, const rtc::scoped_refptr& source); - private: // MediaStreamTrack implementation. std::string kind() const override; @@ -44,13 +47,13 @@ class AudioTrack : public MediaStreamTrack, void AddSink(AudioTrackSinkInterface* sink) override; void RemoveSink(AudioTrackSinkInterface* sink) override; + private: // ObserverInterface implementation. void OnChanged() override; private: const rtc::scoped_refptr audio_source_; - rtc::ThreadChecker thread_checker_; - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioTrack); + SequenceChecker thread_checker_; }; } // namespace webrtc diff --git a/pc/channel.cc b/pc/channel.cc index 8a3a800210..8630703be1 100644 --- a/pc/channel.cc +++ b/pc/channel.cc @@ -10,40 +10,39 @@ #include "pc/channel.h" +#include +#include #include +#include #include #include "absl/algorithm/container.h" -#include "absl/memory/memory.h" -#include "api/call/audio_sink.h" -#include "api/transport/media/media_transport_config.h" -#include "media/base/media_constants.h" +#include "absl/strings/string_view.h" +#include "api/rtp_parameters.h" +#include "api/sequence_checker.h" +#include "api/task_queue/queued_task.h" +#include "media/base/codec.h" +#include "media/base/rid_description.h" #include "media/base/rtp_utils.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "p2p/base/packet_transport_internal.h" -#include "pc/channel_manager.h" #include "pc/rtp_media_utils.h" -#include "rtc_base/bind.h" -#include "rtc_base/byte_order.h" #include "rtc_base/checks.h" #include "rtc_base/copy_on_write_buffer.h" -#include "rtc_base/dscp.h" #include "rtc_base/logging.h" #include "rtc_base/network_route.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/trace_event.h" namespace cricket { -using rtc::Bind; -using rtc::UniqueRandomIdGenerator; -using webrtc::SdpType; - namespace { -struct SendPacketMessageData : public rtc::MessageData { - rtc::CopyOnWriteBuffer packet; - rtc::PacketOptions options; -}; +using ::rtc::UniqueRandomIdGenerator; +using ::webrtc::PendingTaskSafetyFlag; +using ::webrtc::SdpType; +using ::webrtc::ToQueuedTask; // Finds a stream based on target's Primary SSRC or RIDs. // This struct is used in BaseChannel::UpdateLocalStreams_w. @@ -80,14 +79,6 @@ struct StreamFinder { } // namespace -enum { - MSG_SEND_RTP_PACKET = 1, - MSG_SEND_RTCP_PACKET, - MSG_READYTOSENDDATA, - MSG_DATARECEIVED, - MSG_FIRSTPACKETRECEIVED, -}; - static void SafeSetError(const std::string& message, std::string* error_desc) { if (error_desc) { *error_desc = message; @@ -134,6 +125,7 @@ BaseChannel::BaseChannel(rtc::Thread* worker_thread, : worker_thread_(worker_thread), network_thread_(network_thread), signaling_thread_(signaling_thread), + alive_(PendingTaskSafetyFlag::Create()), content_name_(content_name), srtp_required_(srtp_required), crypto_options_(crypto_options), @@ -150,13 +142,10 @@ BaseChannel::~BaseChannel() { RTC_DCHECK_RUN_ON(worker_thread_); // Eats any outstanding messages or packets. - worker_thread_->Clear(&invoker_); - worker_thread_->Clear(this); - // We must destroy the media channel before the transport channel, otherwise - // the media channel may try to send on the dead transport channel. NULLing - // is not an effective strategy since the sends will come on another thread. - media_channel_.reset(); - RTC_LOG(LS_INFO) << "Destroyed channel: " << ToString(); + alive_->SetNotAlive(); + // The media channel is destroyed at the end of the destructor, since it + // is a std::unique_ptr. The transport channel (rtp_transport) must outlive + // the media channel. } std::string BaseChannel::ToString() const { @@ -171,7 +160,15 @@ std::string BaseChannel::ToString() const { bool BaseChannel::ConnectToRtpTransport() { RTC_DCHECK(rtp_transport_); - if (!RegisterRtpDemuxerSink()) { + RTC_DCHECK(media_channel()); + + // We don't need to call OnDemuxerCriteriaUpdatePending/Complete because + // there's no previous criteria to worry about. + bool result = rtp_transport_->RegisterRtpDemuxerSink(demuxer_criteria_, this); + if (result) { + previous_demuxer_criteria_ = demuxer_criteria_; + } else { + previous_demuxer_criteria_ = {}; RTC_LOG(LS_ERROR) << "Failed to set up demuxing for " << ToString(); return false; } @@ -188,6 +185,7 @@ bool BaseChannel::ConnectToRtpTransport() { void BaseChannel::DisconnectFromRtpTransport() { RTC_DCHECK(rtp_transport_); + RTC_DCHECK(media_channel()); rtp_transport_->UnregisterRtpDemuxerSink(this); rtp_transport_->SignalReadyToSend.disconnect(this); rtp_transport_->SignalNetworkRouteChanged.disconnect(this); @@ -195,50 +193,39 @@ void BaseChannel::DisconnectFromRtpTransport() { rtp_transport_->SignalSentPacket.disconnect(this); } -void BaseChannel::Init_w( - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config) { - RTC_DCHECK_RUN_ON(worker_thread_); - media_transport_config_ = media_transport_config; - - network_thread_->Invoke( - RTC_FROM_HERE, [this, rtp_transport] { SetRtpTransport(rtp_transport); }); +void BaseChannel::Init_w(webrtc::RtpTransportInternal* rtp_transport) { + RTC_DCHECK_RUN_ON(worker_thread()); - // Both RTP and RTCP channels should be set, we can call SetInterface on - // the media channel and it can set network options. - media_channel_->SetInterface(this, media_transport_config); + network_thread_->Invoke(RTC_FROM_HERE, [this, rtp_transport] { + SetRtpTransport(rtp_transport); + // Both RTP and RTCP channels should be set, we can call SetInterface on + // the media channel and it can set network options. + media_channel_->SetInterface(this); + }); } void BaseChannel::Deinit() { - RTC_DCHECK(worker_thread_->IsCurrent()); - media_channel_->SetInterface(/*iface=*/nullptr, - webrtc::MediaTransportConfig()); + RTC_DCHECK_RUN_ON(worker_thread()); // Packets arrive on the network thread, processing packets calls virtual // functions, so need to stop this process in Deinit that is called in // derived classes destructor. network_thread_->Invoke(RTC_FROM_HERE, [&] { - FlushRtcpMessages_n(); + RTC_DCHECK_RUN_ON(network_thread()); + media_channel_->SetInterface(/*iface=*/nullptr); if (rtp_transport_) { DisconnectFromRtpTransport(); } - // Clear pending read packets/messages. - network_thread_->Clear(&invoker_); - network_thread_->Clear(this); }); } bool BaseChannel::SetRtpTransport(webrtc::RtpTransportInternal* rtp_transport) { + TRACE_EVENT0("webrtc", "BaseChannel::SetRtpTransport"); + RTC_DCHECK_RUN_ON(network_thread()); if (rtp_transport == rtp_transport_) { return true; } - if (!network_thread_->IsCurrent()) { - return network_thread_->Invoke(RTC_FROM_HERE, [this, rtp_transport] { - return SetRtpTransport(rtp_transport); - }); - } - if (rtp_transport_) { DisconnectFromRtpTransport(); } @@ -268,48 +255,59 @@ bool BaseChannel::SetRtpTransport(webrtc::RtpTransportInternal* rtp_transport) { return true; } -bool BaseChannel::Enable(bool enable) { - worker_thread_->Invoke( - RTC_FROM_HERE, - Bind(enable ? &BaseChannel::EnableMedia_w : &BaseChannel::DisableMedia_w, - this)); - return true; +void BaseChannel::Enable(bool enable) { + RTC_DCHECK_RUN_ON(signaling_thread()); + + if (enable == enabled_s_) + return; + + enabled_s_ = enable; + + worker_thread_->PostTask(ToQueuedTask(alive_, [this, enable] { + RTC_DCHECK_RUN_ON(worker_thread()); + // Sanity check to make sure that enabled_ and enabled_s_ + // stay in sync. + RTC_DCHECK_NE(enabled_, enable); + if (enable) { + EnableMedia_w(); + } else { + DisableMedia_w(); + } + })); } bool BaseChannel::SetLocalContent(const MediaContentDescription* content, SdpType type, std::string* error_desc) { + RTC_DCHECK_RUN_ON(worker_thread()); TRACE_EVENT0("webrtc", "BaseChannel::SetLocalContent"); - return InvokeOnWorker( - RTC_FROM_HERE, - Bind(&BaseChannel::SetLocalContent_w, this, content, type, error_desc)); + return SetLocalContent_w(content, type, error_desc); } bool BaseChannel::SetRemoteContent(const MediaContentDescription* content, SdpType type, std::string* error_desc) { + RTC_DCHECK_RUN_ON(worker_thread()); TRACE_EVENT0("webrtc", "BaseChannel::SetRemoteContent"); - return InvokeOnWorker( - RTC_FROM_HERE, - Bind(&BaseChannel::SetRemoteContent_w, this, content, type, error_desc)); + return SetRemoteContent_w(content, type, error_desc); +} + +bool BaseChannel::SetPayloadTypeDemuxingEnabled(bool enabled) { + RTC_DCHECK_RUN_ON(worker_thread()); + TRACE_EVENT0("webrtc", "BaseChannel::SetPayloadTypeDemuxingEnabled"); + return SetPayloadTypeDemuxingEnabled_w(enabled); } bool BaseChannel::IsReadyToReceiveMedia_w() const { // Receive data if we are enabled and have local content, - return enabled() && + return enabled_ && webrtc::RtpTransceiverDirectionHasRecv(local_content_direction_); } bool BaseChannel::IsReadyToSendMedia_w() const { - // Need to access some state updated on the network thread. - return network_thread_->Invoke( - RTC_FROM_HERE, Bind(&BaseChannel::IsReadyToSendMedia_n, this)); -} - -bool BaseChannel::IsReadyToSendMedia_n() const { // Send outgoing data if we are enabled, have local and remote content, // and we have had some form of connectivity. - return enabled() && + return enabled_ && webrtc::RtpTransceiverDirectionHasRecv(remote_content_direction_) && webrtc::RtpTransceiverDirectionHasSend(local_content_direction_) && was_ever_writable(); @@ -328,14 +326,7 @@ bool BaseChannel::SendRtcp(rtc::CopyOnWriteBuffer* packet, int BaseChannel::SetOption(SocketType type, rtc::Socket::Option opt, int value) { - return network_thread_->Invoke( - RTC_FROM_HERE, Bind(&BaseChannel::SetOption_n, this, type, opt, value)); -} - -int BaseChannel::SetOption_n(SocketType type, - rtc::Socket::Option opt, - int value) { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread()); RTC_DCHECK(rtp_transport_); switch (type) { case ST_RTP: @@ -351,7 +342,7 @@ int BaseChannel::SetOption_n(SocketType type, } void BaseChannel::OnWritableState(bool writable) { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread()); if (writable) { ChannelWritable_n(); } else { @@ -361,9 +352,9 @@ void BaseChannel::OnWritableState(bool writable) { void BaseChannel::OnNetworkRouteChanged( absl::optional network_route) { - RTC_LOG(LS_INFO) << "Network route for " << ToString() << " was changed."; + RTC_LOG(LS_INFO) << "Network route changed for " << ToString(); - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread()); rtc::NetworkRoute new_route; if (network_route) { new_route = *(network_route); @@ -372,19 +363,25 @@ void BaseChannel::OnNetworkRouteChanged( // use the same transport name and MediaChannel::OnNetworkRouteChanged cannot // work correctly. Intentionally leave it broken to simplify the code and // encourage the users to stop using non-muxing RTCP. - invoker_.AsyncInvoke(RTC_FROM_HERE, worker_thread_, [=] { - media_channel_->OnNetworkRouteChanged(transport_name_, new_route); - }); + media_channel_->OnNetworkRouteChanged(transport_name_, new_route); +} + +void BaseChannel::SetFirstPacketReceivedCallback( + std::function callback) { + RTC_DCHECK_RUN_ON(network_thread()); + RTC_DCHECK(!on_first_packet_received_ || !callback); + on_first_packet_received_ = std::move(callback); } void BaseChannel::OnTransportReadyToSend(bool ready) { - invoker_.AsyncInvoke(RTC_FROM_HERE, worker_thread_, - [=] { media_channel_->OnReadyToSend(ready); }); + RTC_DCHECK_RUN_ON(network_thread()); + media_channel_->OnReadyToSend(ready); } bool BaseChannel::SendPacket(bool rtcp, rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options) { + RTC_DCHECK_RUN_ON(network_thread()); // Until all the code is migrated to use RtpPacketType instead of bool. RtpPacketType packet_type = rtcp ? RtpPacketType::kRtcp : RtpPacketType::kRtp; // SendPacket gets called from MediaEngine, on a pacer or an encoder thread. @@ -394,15 +391,6 @@ bool BaseChannel::SendPacket(bool rtcp, // SRTP and the inner workings of the transport channels. // The only downside is that we can't return a proper failure code if // needed. Since UDP is unreliable anyway, this should be a non-issue. - if (!network_thread_->IsCurrent()) { - // Avoid a copy by transferring the ownership of the packet data. - int message_id = rtcp ? MSG_SEND_RTCP_PACKET : MSG_SEND_RTP_PACKET; - SendPacketMessageData* data = new SendPacketMessageData; - data->packet = std::move(*packet); - data->options = options; - network_thread_->Post(RTC_FROM_HERE, this, message_id, data); - return true; - } TRACE_EVENT0("webrtc", "BaseChannel::SendPacket"); @@ -439,9 +427,9 @@ bool BaseChannel::SendPacket(bool rtcp, } std::string packet_type = rtcp ? "RTCP" : "RTP"; - RTC_LOG(LS_WARNING) << "Sending an " << packet_type - << " packet without encryption for " << ToString() - << "."; + RTC_DLOG(LS_WARNING) << "Sending an " << packet_type + << " packet without encryption for " << ToString() + << "."; } // Bon voyage. @@ -450,16 +438,11 @@ bool BaseChannel::SendPacket(bool rtcp, } void BaseChannel::OnRtpPacket(const webrtc::RtpPacketReceived& parsed_packet) { - // Take packet time from the |parsed_packet|. - // RtpPacketReceived.arrival_time_ms = (timestamp_us + 500) / 1000; - int64_t packet_time_us = -1; - if (parsed_packet.arrival_time_ms() > 0) { - packet_time_us = parsed_packet.arrival_time_ms() * 1000; - } + RTC_DCHECK_RUN_ON(network_thread()); - if (!has_received_packet_) { - has_received_packet_ = true; - signaling_thread()->Post(RTC_FROM_HERE, this, MSG_FIRSTPACKETRECEIVED); + if (on_first_packet_received_) { + on_first_packet_received_(); + on_first_packet_received_ = nullptr; } if (!srtp_active() && srtp_required_) { @@ -480,40 +463,50 @@ void BaseChannel::OnRtpPacket(const webrtc::RtpPacketReceived& parsed_packet) { return; } - auto packet_buffer = parsed_packet.Buffer(); - - invoker_.AsyncInvoke( - RTC_FROM_HERE, worker_thread_, [this, packet_buffer, packet_time_us] { - RTC_DCHECK(worker_thread_->IsCurrent()); - media_channel_->OnPacketReceived(packet_buffer, packet_time_us); - }); + webrtc::Timestamp packet_time = parsed_packet.arrival_time(); + media_channel_->OnPacketReceived( + parsed_packet.Buffer(), + packet_time.IsMinusInfinity() ? -1 : packet_time.us()); } void BaseChannel::UpdateRtpHeaderExtensionMap( const RtpHeaderExtensions& header_extensions) { - RTC_DCHECK(rtp_transport_); // Update the header extension map on network thread in case there is data // race. - // TODO(zhihuang): Add an rtc::ThreadChecker make sure to RtpTransport won't - // be accessed from different threads. // // NOTE: This doesn't take the BUNDLE case in account meaning the RTP header // extension maps are not merged when BUNDLE is enabled. This is fine because // the ID for MID should be consistent among all the RTP transports. network_thread_->Invoke(RTC_FROM_HERE, [this, &header_extensions] { + RTC_DCHECK_RUN_ON(network_thread()); rtp_transport_->UpdateRtpHeaderExtensionMap(header_extensions); }); } -bool BaseChannel::RegisterRtpDemuxerSink() { - RTC_DCHECK(rtp_transport_); - return network_thread_->Invoke(RTC_FROM_HERE, [this] { - return rtp_transport_->RegisterRtpDemuxerSink(demuxer_criteria_, this); - }); +bool BaseChannel::RegisterRtpDemuxerSink_w() { + if (demuxer_criteria_ == previous_demuxer_criteria_) { + return true; + } + media_channel_->OnDemuxerCriteriaUpdatePending(); + // Copy demuxer criteria, since they're a worker-thread variable + // and we want to pass them to the network thread + return network_thread_->Invoke( + RTC_FROM_HERE, [this, demuxer_criteria = demuxer_criteria_] { + RTC_DCHECK_RUN_ON(network_thread()); + RTC_DCHECK(rtp_transport_); + bool result = + rtp_transport_->RegisterRtpDemuxerSink(demuxer_criteria, this); + if (result) { + previous_demuxer_criteria_ = demuxer_criteria; + } else { + previous_demuxer_criteria_ = {}; + } + media_channel_->OnDemuxerCriteriaUpdateComplete(); + return result; + }); } void BaseChannel::EnableMedia_w() { - RTC_DCHECK(worker_thread_ == rtc::Thread::Current()); if (enabled_) return; @@ -523,7 +516,6 @@ void BaseChannel::EnableMedia_w() { } void BaseChannel::DisableMedia_w() { - RTC_DCHECK(worker_thread_ == rtc::Thread::Current()); if (!enabled_) return; @@ -533,6 +525,7 @@ void BaseChannel::DisableMedia_w() { } void BaseChannel::UpdateWritableState_n() { + TRACE_EVENT0("webrtc", "BaseChannel::UpdateWritableState_n"); if (rtp_transport_->IsWritable(/*rtcp=*/true) && rtp_transport_->IsWritable(/*rtcp=*/false)) { ChannelWritable_n(); @@ -542,44 +535,76 @@ void BaseChannel::UpdateWritableState_n() { } void BaseChannel::ChannelWritable_n() { - RTC_DCHECK(network_thread_->IsCurrent()); + TRACE_EVENT0("webrtc", "BaseChannel::ChannelWritable_n"); if (writable_) { return; } - - RTC_LOG(LS_INFO) << "Channel writable (" << ToString() << ")" - << (was_ever_writable_ ? "" : " for the first time"); - - was_ever_writable_ = true; writable_ = true; - UpdateMediaSendRecvState(); + RTC_LOG(LS_INFO) << "Channel writable (" << ToString() << ")" + << (was_ever_writable_n_ ? "" : " for the first time"); + // We only have to do this PostTask once, when first transitioning to + // writable. + if (!was_ever_writable_n_) { + worker_thread_->PostTask(ToQueuedTask(alive_, [this] { + RTC_DCHECK_RUN_ON(worker_thread()); + was_ever_writable_ = true; + UpdateMediaSendRecvState_w(); + })); + } + was_ever_writable_n_ = true; } void BaseChannel::ChannelNotWritable_n() { - RTC_DCHECK(network_thread_->IsCurrent()); - if (!writable_) + TRACE_EVENT0("webrtc", "BaseChannel::ChannelNotWritable_n"); + if (!writable_) { return; - - RTC_LOG(LS_INFO) << "Channel not writable (" << ToString() << ")"; + } writable_ = false; - UpdateMediaSendRecvState(); + RTC_LOG(LS_INFO) << "Channel not writable (" << ToString() << ")"; } bool BaseChannel::AddRecvStream_w(const StreamParams& sp) { - RTC_DCHECK(worker_thread() == rtc::Thread::Current()); return media_channel()->AddRecvStream(sp); } bool BaseChannel::RemoveRecvStream_w(uint32_t ssrc) { - RTC_DCHECK(worker_thread() == rtc::Thread::Current()); return media_channel()->RemoveRecvStream(ssrc); } void BaseChannel::ResetUnsignaledRecvStream_w() { - RTC_DCHECK(worker_thread() == rtc::Thread::Current()); media_channel()->ResetUnsignaledRecvStream(); } +bool BaseChannel::SetPayloadTypeDemuxingEnabled_w(bool enabled) { + if (enabled == payload_type_demuxing_enabled_) { + return true; + } + payload_type_demuxing_enabled_ = enabled; + if (!enabled) { + // TODO(crbug.com/11477): This will remove *all* unsignaled streams (those + // without an explicitly signaled SSRC), which may include streams that + // were matched to this channel by MID or RID. Ideally we'd remove only the + // streams that were matched based on payload type alone, but currently + // there is no straightforward way to identify those streams. + media_channel()->ResetUnsignaledRecvStream(); + demuxer_criteria_.payload_types.clear(); + if (!RegisterRtpDemuxerSink_w()) { + RTC_LOG(LS_ERROR) << "Failed to disable payload type demuxing for " + << ToString(); + return false; + } + } else if (!payload_types_.empty()) { + demuxer_criteria_.payload_types.insert(payload_types_.begin(), + payload_types_.end()); + if (!RegisterRtpDemuxerSink_w()) { + RTC_LOG(LS_ERROR) << "Failed to enable payload type demuxing for " + << ToString(); + return false; + } + } + return true; +} + bool BaseChannel::UpdateLocalStreams_w(const std::vector& streams, SdpType type, std::string* error_desc) { @@ -604,7 +629,8 @@ bool BaseChannel::UpdateLocalStreams_w(const std::vector& streams, if (!media_channel()->RemoveSendStream(old_stream.first_ssrc())) { rtc::StringBuilder desc; desc << "Failed to remove send stream with ssrc " - << old_stream.first_ssrc() << "."; + << old_stream.first_ssrc() << " from m-section with mid='" + << content_name() << "'."; SafeSetError(desc.str(), error_desc); ret = false; } @@ -630,7 +656,8 @@ bool BaseChannel::UpdateLocalStreams_w(const std::vector& streams, if (new_stream.has_ssrcs() && new_stream.has_rids()) { rtc::StringBuilder desc; desc << "Failed to add send stream: " << new_stream.first_ssrc() - << ". Stream has both SSRCs and RIDs."; + << " into m-section with mid='" << content_name() + << "'. Stream has both SSRCs and RIDs."; SafeSetError(desc.str(), error_desc); ret = false; continue; @@ -649,7 +676,8 @@ bool BaseChannel::UpdateLocalStreams_w(const std::vector& streams, << " into " << ToString(); } else { rtc::StringBuilder desc; - desc << "Failed to add send stream ssrc: " << new_stream.first_ssrc(); + desc << "Failed to add send stream ssrc: " << new_stream.first_ssrc() + << " into m-section with mid='" << content_name() << "'"; SafeSetError(desc.str(), error_desc); ret = false; } @@ -679,7 +707,8 @@ bool BaseChannel::UpdateRemoteStreams_w( } else { rtc::StringBuilder desc; desc << "Failed to remove remote stream with ssrc " - << old_stream.first_ssrc() << "."; + << old_stream.first_ssrc() << " from m-section with mid='" + << content_name() << "'."; SafeSetError(desc.str(), error_desc); ret = false; } @@ -715,75 +744,39 @@ bool BaseChannel::UpdateRemoteStreams_w( new_stream.ssrcs.end()); } // Re-register the sink to update the receiving ssrcs. - if (!RegisterRtpDemuxerSink()) { + if (!RegisterRtpDemuxerSink_w()) { RTC_LOG(LS_ERROR) << "Failed to set up demuxing for " << ToString(); + ret = false; } remote_streams_ = streams; return ret; } -RtpHeaderExtensions BaseChannel::GetFilteredRtpHeaderExtensions( +RtpHeaderExtensions BaseChannel::GetDeduplicatedRtpHeaderExtensions( const RtpHeaderExtensions& extensions) { - RTC_DCHECK(rtp_transport_); - if (crypto_options_.srtp.enable_encrypted_rtp_header_extensions) { - RtpHeaderExtensions filtered; - absl::c_copy_if(extensions, std::back_inserter(filtered), - [](const webrtc::RtpExtension& extension) { - return !extension.encrypt; - }); - return filtered; - } - - return webrtc::RtpExtension::FilterDuplicateNonEncrypted(extensions); + return webrtc::RtpExtension::DeduplicateHeaderExtensions( + extensions, crypto_options_.srtp.enable_encrypted_rtp_header_extensions + ? webrtc::RtpExtension::kPreferEncryptedExtension + : webrtc::RtpExtension::kDiscardEncryptedExtension); } -void BaseChannel::OnMessage(rtc::Message* pmsg) { - TRACE_EVENT0("webrtc", "BaseChannel::OnMessage"); - switch (pmsg->message_id) { - case MSG_SEND_RTP_PACKET: - case MSG_SEND_RTCP_PACKET: { - RTC_DCHECK(network_thread_->IsCurrent()); - SendPacketMessageData* data = - static_cast(pmsg->pdata); - bool rtcp = pmsg->message_id == MSG_SEND_RTCP_PACKET; - SendPacket(rtcp, &data->packet, data->options); - delete data; - break; - } - case MSG_FIRSTPACKETRECEIVED: { - SignalFirstPacketReceived_(this); - break; - } +void BaseChannel::MaybeAddHandledPayloadType(int payload_type) { + if (payload_type_demuxing_enabled_) { + demuxer_criteria_.payload_types.insert(static_cast(payload_type)); } -} - -void BaseChannel::AddHandledPayloadType(int payload_type) { - demuxer_criteria_.payload_types.insert(static_cast(payload_type)); + // Even if payload type demuxing is currently disabled, we need to remember + // the payload types in case it's re-enabled later. + payload_types_.insert(static_cast(payload_type)); } void BaseChannel::ClearHandledPayloadTypes() { demuxer_criteria_.payload_types.clear(); -} - -void BaseChannel::FlushRtcpMessages_n() { - // Flush all remaining RTCP messages. This should only be called in - // destructor. - RTC_DCHECK(network_thread_->IsCurrent()); - rtc::MessageList rtcp_messages; - network_thread_->Clear(this, MSG_SEND_RTCP_PACKET, &rtcp_messages); - for (const auto& message : rtcp_messages) { - network_thread_->Send(RTC_FROM_HERE, this, MSG_SEND_RTCP_PACKET, - message.pdata); - } + payload_types_.clear(); } void BaseChannel::SignalSentPacket_n(const rtc::SentPacket& sent_packet) { - RTC_DCHECK(network_thread_->IsCurrent()); - invoker_.AsyncInvoke(RTC_FROM_HERE, worker_thread_, - [this, sent_packet] { - RTC_DCHECK(worker_thread_->IsCurrent()); - SignalSentPacket(sent_packet); - }); + RTC_DCHECK_RUN_ON(network_thread()); + media_channel()->OnPacketSent(sent_packet); } VoiceChannel::VoiceChannel(rtc::Thread* worker_thread, @@ -810,21 +803,10 @@ VoiceChannel::~VoiceChannel() { Deinit(); } -void BaseChannel::UpdateMediaSendRecvState() { - RTC_DCHECK(network_thread_->IsCurrent()); - invoker_.AsyncInvoke(RTC_FROM_HERE, worker_thread_, - [this] { UpdateMediaSendRecvState_w(); }); -} - -void VoiceChannel::Init_w( - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config) { - BaseChannel::Init_w(rtp_transport, media_transport_config); -} - void VoiceChannel::UpdateMediaSendRecvState_w() { // Render incoming data if we're the active call, and we have the local // content. We receive data on the default channel and multiplexed streams. + RTC_DCHECK_RUN_ON(worker_thread()); bool recv = IsReadyToReceiveMedia_w(); media_channel()->SetPlayout(recv); @@ -844,35 +826,34 @@ bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content, RTC_DCHECK_RUN_ON(worker_thread()); RTC_LOG(LS_INFO) << "Setting local voice description for " << ToString(); - RTC_DCHECK(content); - if (!content) { - SafeSetError("Can't find audio content in local description.", error_desc); - return false; - } - - const AudioContentDescription* audio = content->as_audio(); - RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(audio->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(content->rtp_header_extensions()); + // TODO(tommi): There's a hop to the network thread here. + // some of the below is also network thread related. UpdateRtpHeaderExtensionMap(rtp_header_extensions); - media_channel()->SetExtmapAllowMixed(audio->extmap_allow_mixed()); + media_channel()->SetExtmapAllowMixed(content->extmap_allow_mixed()); AudioRecvParameters recv_params = last_recv_params_; RtpParametersFromMediaDescription( - audio, rtp_header_extensions, - webrtc::RtpTransceiverDirectionHasRecv(audio->direction()), &recv_params); + content->as_audio(), rtp_header_extensions, + webrtc::RtpTransceiverDirectionHasRecv(content->direction()), + &recv_params); + if (!media_channel()->SetRecvParameters(recv_params)) { - SafeSetError("Failed to set local audio description recv parameters.", - error_desc); + SafeSetError( + "Failed to set local audio description recv parameters for m-section " + "with mid='" + + content_name() + "'.", + error_desc); return false; } - if (webrtc::RtpTransceiverDirectionHasRecv(audio->direction())) { - for (const AudioCodec& codec : audio->codecs()) { - AddHandledPayloadType(codec.id); + if (webrtc::RtpTransceiverDirectionHasRecv(content->direction())) { + for (const AudioCodec& codec : content->as_audio()->codecs()) { + MaybeAddHandledPayloadType(codec.id); } // Need to re-register the sink to update the handled payload. - if (!RegisterRtpDemuxerSink()) { + if (!RegisterRtpDemuxerSink_w()) { RTC_LOG(LS_ERROR) << "Failed to set up audio demuxing for " << ToString(); return false; } @@ -884,8 +865,12 @@ bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content, // only give it to the media channel once we have a remote // description too (without a remote description, we won't be able // to send them anyway). - if (!UpdateLocalStreams_w(audio->streams(), type, error_desc)) { - SafeSetError("Failed to set local audio description streams.", error_desc); + if (!UpdateLocalStreams_w(content->as_audio()->streams(), type, error_desc)) { + SafeSetError( + "Failed to set local audio description streams for m-section with " + "mid='" + + content_name() + "'.", + error_desc); return false; } @@ -901,16 +886,10 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content, RTC_DCHECK_RUN_ON(worker_thread()); RTC_LOG(LS_INFO) << "Setting remote voice description for " << ToString(); - RTC_DCHECK(content); - if (!content) { - SafeSetError("Can't find audio content in remote description.", error_desc); - return false; - } - const AudioContentDescription* audio = content->as_audio(); RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(audio->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(audio->rtp_header_extensions()); AudioSendParameters send_params = last_send_params_; RtpSendParametersFromMediaDescription( @@ -920,8 +899,11 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content, bool parameters_applied = media_channel()->SetSendParameters(send_params); if (!parameters_applied) { - SafeSetError("Failed to set remote audio description send parameters.", - error_desc); + SafeSetError( + "Failed to set remote audio description send parameters for m-section " + "with mid='" + + content_name() + "'.", + error_desc); return false; } last_send_params_ = send_params; @@ -931,7 +913,7 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content, "disable payload type demuxing for " << ToString(); ClearHandledPayloadTypes(); - if (!RegisterRtpDemuxerSink()) { + if (!RegisterRtpDemuxerSink_w()) { RTC_LOG(LS_ERROR) << "Failed to update audio demuxing for " << ToString(); return false; } @@ -942,7 +924,11 @@ bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content, // description too (without a local description, we won't be able to // recv them anyway). if (!UpdateRemoteStreams_w(audio->streams(), type, error_desc)) { - SafeSetError("Failed to set remote audio description streams.", error_desc); + SafeSetError( + "Failed to set remote audio description streams for m-section with " + "mid='" + + content_name() + "'.", + error_desc); return false; } @@ -978,6 +964,7 @@ VideoChannel::~VideoChannel() { void VideoChannel::UpdateMediaSendRecvState_w() { // Send outgoing data if we're the active call, we have the remote content, // and we have had some form of connectivity. + RTC_DCHECK_RUN_ON(worker_thread()); bool send = IsReadyToSendMedia_w(); if (!media_channel()->SetSend(send)) { RTC_LOG(LS_ERROR) << "Failed to SetSend on video channel: " + ToString(); @@ -989,8 +976,9 @@ void VideoChannel::UpdateMediaSendRecvState_w() { } void VideoChannel::FillBitrateInfo(BandwidthEstimationInfo* bwe_info) { - InvokeOnWorker(RTC_FROM_HERE, Bind(&VideoMediaChannel::FillBitrateInfo, - media_channel(), bwe_info)); + RTC_DCHECK_RUN_ON(worker_thread()); + VideoMediaChannel* mc = media_channel(); + mc->FillBitrateInfo(bwe_info); } bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, @@ -1000,23 +988,17 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, RTC_DCHECK_RUN_ON(worker_thread()); RTC_LOG(LS_INFO) << "Setting local video description for " << ToString(); - RTC_DCHECK(content); - if (!content) { - SafeSetError("Can't find video content in local description.", error_desc); - return false; - } - - const VideoContentDescription* video = content->as_video(); - RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(video->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(content->rtp_header_extensions()); UpdateRtpHeaderExtensionMap(rtp_header_extensions); - media_channel()->SetExtmapAllowMixed(video->extmap_allow_mixed()); + media_channel()->SetExtmapAllowMixed(content->extmap_allow_mixed()); VideoRecvParameters recv_params = last_recv_params_; + RtpParametersFromMediaDescription( - video, rtp_header_extensions, - webrtc::RtpTransceiverDirectionHasRecv(video->direction()), &recv_params); + content->as_video(), rtp_header_extensions, + webrtc::RtpTransceiverDirectionHasRecv(content->direction()), + &recv_params); VideoSendParameters send_params = last_send_params_; @@ -1030,7 +1012,9 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, needs_send_params_update = true; } else if (recv_codec->packetization != send_codec.packetization) { SafeSetError( - "Failed to set local answer due to invalid codec packetization.", + "Failed to set local answer due to invalid codec packetization " + "specified in m-section with mid='" + + content_name() + "'.", error_desc); return false; } @@ -1039,17 +1023,20 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, } if (!media_channel()->SetRecvParameters(recv_params)) { - SafeSetError("Failed to set local video description recv parameters.", - error_desc); + SafeSetError( + "Failed to set local video description recv parameters for m-section " + "with mid='" + + content_name() + "'.", + error_desc); return false; } - if (webrtc::RtpTransceiverDirectionHasRecv(video->direction())) { - for (const VideoCodec& codec : video->codecs()) { - AddHandledPayloadType(codec.id); + if (webrtc::RtpTransceiverDirectionHasRecv(content->direction())) { + for (const VideoCodec& codec : content->as_video()->codecs()) { + MaybeAddHandledPayloadType(codec.id); } // Need to re-register the sink to update the handled payload. - if (!RegisterRtpDemuxerSink()) { + if (!RegisterRtpDemuxerSink_w()) { RTC_LOG(LS_ERROR) << "Failed to set up video demuxing for " << ToString(); return false; } @@ -1059,7 +1046,9 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, if (needs_send_params_update) { if (!media_channel()->SetSendParameters(send_params)) { - SafeSetError("Failed to set send parameters.", error_desc); + SafeSetError("Failed to set send parameters for m-section with mid='" + + content_name() + "'.", + error_desc); return false; } last_send_params_ = send_params; @@ -1069,8 +1058,12 @@ bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content, // only give it to the media channel once we have a remote // description too (without a remote description, we won't be able // to send them anyway). - if (!UpdateLocalStreams_w(video->streams(), type, error_desc)) { - SafeSetError("Failed to set local video description streams.", error_desc); + if (!UpdateLocalStreams_w(content->as_video()->streams(), type, error_desc)) { + SafeSetError( + "Failed to set local video description streams for m-section with " + "mid='" + + content_name() + "'.", + error_desc); return false; } @@ -1086,16 +1079,10 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content, RTC_DCHECK_RUN_ON(worker_thread()); RTC_LOG(LS_INFO) << "Setting remote video description for " << ToString(); - RTC_DCHECK(content); - if (!content) { - SafeSetError("Can't find video content in remote description.", error_desc); - return false; - } - const VideoContentDescription* video = content->as_video(); RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(video->rtp_header_extensions()); + GetDeduplicatedRtpHeaderExtensions(video->rtp_header_extensions()); VideoSendParameters send_params = last_send_params_; RtpSendParametersFromMediaDescription( @@ -1118,7 +1105,9 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content, needs_recv_params_update = true; } else if (send_codec->packetization != recv_codec.packetization) { SafeSetError( - "Failed to set remote answer due to invalid codec packetization.", + "Failed to set remote answer due to invalid codec packetization " + "specifid in m-section with mid='" + + content_name() + "'.", error_desc); return false; } @@ -1127,15 +1116,20 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content, } if (!media_channel()->SetSendParameters(send_params)) { - SafeSetError("Failed to set remote video description send parameters.", - error_desc); + SafeSetError( + "Failed to set remote video description send parameters for m-section " + "with mid='" + + content_name() + "'.", + error_desc); return false; } last_send_params_ = send_params; if (needs_recv_params_update) { if (!media_channel()->SetRecvParameters(recv_params)) { - SafeSetError("Failed to set recv parameters.", error_desc); + SafeSetError("Failed to set recv parameters for m-section with mid='" + + content_name() + "'.", + error_desc); return false; } last_recv_params_ = recv_params; @@ -1146,7 +1140,7 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content, "disable payload type demuxing for " << ToString(); ClearHandledPayloadTypes(); - if (!RegisterRtpDemuxerSink()) { + if (!RegisterRtpDemuxerSink_w()) { RTC_LOG(LS_ERROR) << "Failed to update video demuxing for " << ToString(); return false; } @@ -1157,237 +1151,16 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content, // description too (without a local description, we won't be able to // recv them anyway). if (!UpdateRemoteStreams_w(video->streams(), type, error_desc)) { - SafeSetError("Failed to set remote video description streams.", error_desc); - return false; - } - set_remote_content_direction(content->direction()); - UpdateMediaSendRecvState_w(); - return true; -} - -RtpDataChannel::RtpDataChannel(rtc::Thread* worker_thread, - rtc::Thread* network_thread, - rtc::Thread* signaling_thread, - std::unique_ptr media_channel, - const std::string& content_name, - bool srtp_required, - webrtc::CryptoOptions crypto_options, - UniqueRandomIdGenerator* ssrc_generator) - : BaseChannel(worker_thread, - network_thread, - signaling_thread, - std::move(media_channel), - content_name, - srtp_required, - crypto_options, - ssrc_generator) {} - -RtpDataChannel::~RtpDataChannel() { - TRACE_EVENT0("webrtc", "RtpDataChannel::~RtpDataChannel"); - // this can't be done in the base class, since it calls a virtual - DisableMedia_w(); - Deinit(); -} - -void RtpDataChannel::Init_w( - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config) { - BaseChannel::Init_w(rtp_transport, media_transport_config); - media_channel()->SignalDataReceived.connect(this, - &RtpDataChannel::OnDataReceived); - media_channel()->SignalReadyToSend.connect( - this, &RtpDataChannel::OnDataChannelReadyToSend); -} - -bool RtpDataChannel::SendData(const SendDataParams& params, - const rtc::CopyOnWriteBuffer& payload, - SendDataResult* result) { - return InvokeOnWorker( - RTC_FROM_HERE, Bind(&DataMediaChannel::SendData, media_channel(), params, - payload, result)); -} - -bool RtpDataChannel::CheckDataChannelTypeFromContent( - const MediaContentDescription* content, - std::string* error_desc) { - if (!content->as_rtp_data()) { - if (content->as_sctp()) { - SafeSetError("Data channel type mismatch. Expected RTP, got SCTP.", - error_desc); - } else { - SafeSetError("Data channel is not RTP or SCTP.", error_desc); - } - return false; - } - return true; -} - -bool RtpDataChannel::SetLocalContent_w(const MediaContentDescription* content, - SdpType type, - std::string* error_desc) { - TRACE_EVENT0("webrtc", "RtpDataChannel::SetLocalContent_w"); - RTC_DCHECK_RUN_ON(worker_thread()); - RTC_LOG(LS_INFO) << "Setting local data description for " << ToString(); - - RTC_DCHECK(content); - if (!content) { - SafeSetError("Can't find data content in local description.", error_desc); - return false; - } - - if (!CheckDataChannelTypeFromContent(content, error_desc)) { - return false; - } - const RtpDataContentDescription* data = content->as_rtp_data(); - - RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(data->rtp_header_extensions()); - - DataRecvParameters recv_params = last_recv_params_; - RtpParametersFromMediaDescription( - data, rtp_header_extensions, - webrtc::RtpTransceiverDirectionHasRecv(data->direction()), &recv_params); - if (!media_channel()->SetRecvParameters(recv_params)) { - SafeSetError("Failed to set remote data description recv parameters.", - error_desc); - return false; - } - for (const DataCodec& codec : data->codecs()) { - AddHandledPayloadType(codec.id); - } - // Need to re-register the sink to update the handled payload. - if (!RegisterRtpDemuxerSink()) { - RTC_LOG(LS_ERROR) << "Failed to set up data demuxing for " << ToString(); - return false; - } - - last_recv_params_ = recv_params; - - // TODO(pthatcher): Move local streams into DataSendParameters, and - // only give it to the media channel once we have a remote - // description too (without a remote description, we won't be able - // to send them anyway). - if (!UpdateLocalStreams_w(data->streams(), type, error_desc)) { - SafeSetError("Failed to set local data description streams.", error_desc); + SafeSetError( + "Failed to set remote video description streams for m-section with " + "mid='" + + content_name() + "'.", + error_desc); return false; } - - set_local_content_direction(content->direction()); - UpdateMediaSendRecvState_w(); - return true; -} - -bool RtpDataChannel::SetRemoteContent_w(const MediaContentDescription* content, - SdpType type, - std::string* error_desc) { - TRACE_EVENT0("webrtc", "RtpDataChannel::SetRemoteContent_w"); - RTC_DCHECK_RUN_ON(worker_thread()); - RTC_LOG(LS_INFO) << "Setting remote data description for " << ToString(); - - RTC_DCHECK(content); - if (!content) { - SafeSetError("Can't find data content in remote description.", error_desc); - return false; - } - - if (!CheckDataChannelTypeFromContent(content, error_desc)) { - return false; - } - - const RtpDataContentDescription* data = content->as_rtp_data(); - - // If the remote data doesn't have codecs, it must be empty, so ignore it. - if (!data->has_codecs()) { - return true; - } - - RtpHeaderExtensions rtp_header_extensions = - GetFilteredRtpHeaderExtensions(data->rtp_header_extensions()); - - RTC_LOG(LS_INFO) << "Setting remote data description for " << ToString(); - DataSendParameters send_params = last_send_params_; - RtpSendParametersFromMediaDescription( - data, rtp_header_extensions, - webrtc::RtpTransceiverDirectionHasRecv(data->direction()), &send_params); - if (!media_channel()->SetSendParameters(send_params)) { - SafeSetError("Failed to set remote data description send parameters.", - error_desc); - return false; - } - last_send_params_ = send_params; - - // TODO(pthatcher): Move remote streams into DataRecvParameters, - // and only give it to the media channel once we have a local - // description too (without a local description, we won't be able to - // recv them anyway). - if (!UpdateRemoteStreams_w(data->streams(), type, error_desc)) { - SafeSetError("Failed to set remote data description streams.", error_desc); - return false; - } - set_remote_content_direction(content->direction()); UpdateMediaSendRecvState_w(); return true; } -void RtpDataChannel::UpdateMediaSendRecvState_w() { - // Render incoming data if we're the active call, and we have the local - // content. We receive data on the default channel and multiplexed streams. - bool recv = IsReadyToReceiveMedia_w(); - if (!media_channel()->SetReceive(recv)) { - RTC_LOG(LS_ERROR) << "Failed to SetReceive on data channel: " << ToString(); - } - - // Send outgoing data if we're the active call, we have the remote content, - // and we have had some form of connectivity. - bool send = IsReadyToSendMedia_w(); - if (!media_channel()->SetSend(send)) { - RTC_LOG(LS_ERROR) << "Failed to SetSend on data channel: " << ToString(); - } - - // Trigger SignalReadyToSendData asynchronously. - OnDataChannelReadyToSend(send); - - RTC_LOG(LS_INFO) << "Changing data state, recv=" << recv << " send=" << send - << " for " << ToString(); -} - -void RtpDataChannel::OnMessage(rtc::Message* pmsg) { - switch (pmsg->message_id) { - case MSG_READYTOSENDDATA: { - DataChannelReadyToSendMessageData* data = - static_cast(pmsg->pdata); - ready_to_send_data_ = data->data(); - SignalReadyToSendData(ready_to_send_data_); - delete data; - break; - } - case MSG_DATARECEIVED: { - DataReceivedMessageData* data = - static_cast(pmsg->pdata); - SignalDataReceived(data->params, data->payload); - delete data; - break; - } - default: - BaseChannel::OnMessage(pmsg); - break; - } -} - -void RtpDataChannel::OnDataReceived(const ReceiveDataParams& params, - const char* data, - size_t len) { - DataReceivedMessageData* msg = new DataReceivedMessageData(params, data, len); - signaling_thread()->Post(RTC_FROM_HERE, this, MSG_DATARECEIVED, msg); -} - -void RtpDataChannel::OnDataChannelReadyToSend(bool writable) { - // This is usded for congestion control to indicate that the stream is ready - // to send by the MediaChannel, as opposed to OnReadyToSend, which indicates - // that the transport channel is ready. - signaling_thread()->Post(RTC_FROM_HERE, this, MSG_READYTOSENDDATA, - new DataChannelReadyToSendMessageData(writable)); -} - } // namespace cricket diff --git a/pc/channel.h b/pc/channel.h index 9dc652236a..d1dbe2cd6c 100644 --- a/pc/channel.h +++ b/pc/channel.h @@ -11,6 +11,9 @@ #ifndef PC_CHANNEL_H_ #define PC_CHANNEL_H_ +#include +#include + #include #include #include @@ -18,35 +21,52 @@ #include #include +#include "absl/types/optional.h" #include "api/call/audio_sink.h" +#include "api/crypto/crypto_options.h" #include "api/function_view.h" #include "api/jsep.h" +#include "api/media_types.h" #include "api/rtp_receiver_interface.h" -#include "api/transport/media/media_transport_config.h" +#include "api/rtp_transceiver_direction.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" +#include "call/rtp_demuxer.h" #include "call/rtp_packet_sink_interface.h" #include "media/base/media_channel.h" #include "media/base/media_engine.h" #include "media/base/stream_params.h" +#include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/packet_transport_internal.h" #include "pc/channel_interface.h" #include "pc/dtls_srtp_transport.h" #include "pc/media_session.h" #include "pc/rtp_transport.h" +#include "pc/rtp_transport_internal.h" +#include "pc/session_description.h" #include "pc/srtp_filter.h" #include "pc/srtp_transport.h" -#include "rtc_base/async_invoker.h" +#include "rtc_base/async_packet_socket.h" #include "rtc_base/async_udp_socket.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/checks.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/location.h" #include "rtc_base/network.h" +#include "rtc_base/network/sent_packet.h" +#include "rtc_base/network_route.h" +#include "rtc_base/socket.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/thread_message.h" #include "rtc_base/unique_id_generator.h" namespace webrtc { class AudioSinkInterface; -class MediaTransportInterface; } // namespace webrtc namespace cricket { @@ -72,8 +92,10 @@ struct CryptoParams; // NetworkInterface. class BaseChannel : public ChannelInterface, - public rtc::MessageHandler, + // TODO(tommi): Remove has_slots inheritance. public sigslot::has_slots<>, + // TODO(tommi): Consider implementing these interfaces + // via composition. public MediaChannel::NetworkInterface, public webrtc::RtpPacketSinkInterface { public: @@ -92,9 +114,7 @@ class BaseChannel : public ChannelInterface, webrtc::CryptoOptions crypto_options, rtc::UniqueRandomIdGenerator* ssrc_generator); virtual ~BaseChannel(); - virtual void Init_w( - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config); + virtual void Init_w(webrtc::RtpTransportInternal* rtp_transport); // Deinit may be called multiple times and is simply ignored if it's already // done. @@ -104,23 +124,30 @@ class BaseChannel : public ChannelInterface, rtc::Thread* network_thread() const { return network_thread_; } const std::string& content_name() const override { return content_name_; } // TODO(deadbeef): This is redundant; remove this. - const std::string& transport_name() const override { return transport_name_; } - bool enabled() const override { return enabled_; } + const std::string& transport_name() const override { + RTC_DCHECK_RUN_ON(network_thread()); + if (rtp_transport_) + return rtp_transport_->transport_name(); + // TODO(tommi): Delete this variable. + return transport_name_; + } // This function returns true if using SRTP (DTLS-based keying or SDES). bool srtp_active() const { + RTC_DCHECK_RUN_ON(network_thread()); return rtp_transport_ && rtp_transport_->IsSrtpActive(); } - bool writable() const { return writable_; } - // Set an RTP level transport which could be an RtpTransport without // encryption, an SrtpTransport for SDES or a DtlsSrtpTransport for DTLS-SRTP. // This can be called from any thread and it hops to the network thread // internally. It would replace the |SetTransports| and its variants. bool SetRtpTransport(webrtc::RtpTransportInternal* rtp_transport) override; - webrtc::RtpTransportInternal* rtp_transport() const { return rtp_transport_; } + webrtc::RtpTransportInternal* rtp_transport() const { + RTC_DCHECK_RUN_ON(network_thread()); + return rtp_transport_; + } // Channel control bool SetLocalContent(const MediaContentDescription* content, @@ -129,8 +156,17 @@ class BaseChannel : public ChannelInterface, bool SetRemoteContent(const MediaContentDescription* content, webrtc::SdpType type, std::string* error_desc) override; + // Controls whether this channel will receive packets on the basis of + // matching payload type alone. This is needed for legacy endpoints that + // don't signal SSRCs or use MID/RID, but doesn't make sense if there is + // more than channel of specific media type, As that creates an ambiguity. + // + // This method will also remove any existing streams that were bound to this + // channel on the basis of payload type, since one of these streams might + // actually belong to a new channel. See: crbug.com/webrtc/11477 + bool SetPayloadTypeDemuxingEnabled(bool enabled) override; - bool Enable(bool enable) override; + void Enable(bool enable) override; const std::vector& local_streams() const override { return local_streams_; @@ -139,47 +175,33 @@ class BaseChannel : public ChannelInterface, return remote_streams_; } - sigslot::signal2 SignalDtlsSrtpSetupFailure; - void SignalDtlsSrtpSetupFailure_n(bool rtcp); - void SignalDtlsSrtpSetupFailure_s(bool rtcp); - // Used for latency measurements. - sigslot::signal1& SignalFirstPacketReceived() override { - return SignalFirstPacketReceived_; - } - - // Forward SignalSentPacket to worker thread. - sigslot::signal1 SignalSentPacket; - - // Emitted whenever rtcp-mux is fully negotiated and the rtcp-transport can - // be destroyed. - // Fired on the network thread. - sigslot::signal1 SignalRtcpMuxFullyActive; + void SetFirstPacketReceivedCallback(std::function callback) override; // From RtpTransport - public for testing only void OnTransportReadyToSend(bool ready); // Only public for unit tests. Otherwise, consider protected. int SetOption(SocketType type, rtc::Socket::Option o, int val) override; - int SetOption_n(SocketType type, rtc::Socket::Option o, int val); // RtpPacketSinkInterface overrides. void OnRtpPacket(const webrtc::RtpPacketReceived& packet) override; - // Used by the RTCStatsCollector tests to set the transport name without - // creating RtpTransports. - void set_transport_name_for_testing(const std::string& transport_name) { - transport_name_ = transport_name; + MediaChannel* media_channel() const override { + return media_channel_.get(); } - MediaChannel* media_channel() const override { return media_channel_.get(); } - protected: - bool was_ever_writable() const { return was_ever_writable_; } + bool was_ever_writable() const { + RTC_DCHECK_RUN_ON(worker_thread()); + return was_ever_writable_; + } void set_local_content_direction(webrtc::RtpTransceiverDirection direction) { + RTC_DCHECK_RUN_ON(worker_thread()); local_content_direction_ = direction; } void set_remote_content_direction(webrtc::RtpTransceiverDirection direction) { + RTC_DCHECK_RUN_ON(worker_thread()); remote_content_direction_ = direction; } // These methods verify that: @@ -192,11 +214,9 @@ class BaseChannel : public ChannelInterface, // // When any of these properties change, UpdateMediaSendRecvState_w should be // called. - bool IsReadyToReceiveMedia_w() const; - bool IsReadyToSendMedia_w() const; - rtc::Thread* signaling_thread() { return signaling_thread_; } - - void FlushRtcpMessages_n(); + bool IsReadyToReceiveMedia_w() const RTC_RUN_ON(worker_thread()); + bool IsReadyToSendMedia_w() const RTC_RUN_ON(worker_thread()); + rtc::Thread* signaling_thread() const { return signaling_thread_; } // NetworkInterface implementation, called by MediaEngine bool SendPacket(rtc::CopyOnWriteBuffer* packet, @@ -209,121 +229,151 @@ class BaseChannel : public ChannelInterface, void OnNetworkRouteChanged(absl::optional network_route); - bool PacketIsRtcp(const rtc::PacketTransportInternal* transport, - const char* data, - size_t len); bool SendPacket(bool rtcp, rtc::CopyOnWriteBuffer* packet, const rtc::PacketOptions& options); - void EnableMedia_w(); - void DisableMedia_w(); + void EnableMedia_w() RTC_RUN_ON(worker_thread()); + void DisableMedia_w() RTC_RUN_ON(worker_thread()); // Performs actions if the RTP/RTCP writable state changed. This should // be called whenever a channel's writable state changes or when RTCP muxing // becomes active/inactive. - void UpdateWritableState_n(); - void ChannelWritable_n(); - void ChannelNotWritable_n(); - - bool AddRecvStream_w(const StreamParams& sp); - bool RemoveRecvStream_w(uint32_t ssrc); - void ResetUnsignaledRecvStream_w(); - bool AddSendStream_w(const StreamParams& sp); - bool RemoveSendStream_w(uint32_t ssrc); + void UpdateWritableState_n() RTC_RUN_ON(network_thread()); + void ChannelWritable_n() RTC_RUN_ON(network_thread()); + void ChannelNotWritable_n() RTC_RUN_ON(network_thread()); + + bool AddRecvStream_w(const StreamParams& sp) RTC_RUN_ON(worker_thread()); + bool RemoveRecvStream_w(uint32_t ssrc) RTC_RUN_ON(worker_thread()); + void ResetUnsignaledRecvStream_w() RTC_RUN_ON(worker_thread()); + bool SetPayloadTypeDemuxingEnabled_w(bool enabled) + RTC_RUN_ON(worker_thread()); + bool AddSendStream_w(const StreamParams& sp) RTC_RUN_ON(worker_thread()); + bool RemoveSendStream_w(uint32_t ssrc) RTC_RUN_ON(worker_thread()); // Should be called whenever the conditions for // IsReadyToReceiveMedia/IsReadyToSendMedia are satisfied (or unsatisfied). // Updates the send/recv state of the media channel. - void UpdateMediaSendRecvState(); - virtual void UpdateMediaSendRecvState_w() = 0; + virtual void UpdateMediaSendRecvState_w() RTC_RUN_ON(worker_thread()) = 0; bool UpdateLocalStreams_w(const std::vector& streams, webrtc::SdpType type, - std::string* error_desc); + std::string* error_desc) + RTC_RUN_ON(worker_thread()); bool UpdateRemoteStreams_w(const std::vector& streams, webrtc::SdpType type, - std::string* error_desc); + std::string* error_desc) + RTC_RUN_ON(worker_thread()); virtual bool SetLocalContent_w(const MediaContentDescription* content, webrtc::SdpType type, - std::string* error_desc) = 0; + std::string* error_desc) + RTC_RUN_ON(worker_thread()) = 0; virtual bool SetRemoteContent_w(const MediaContentDescription* content, webrtc::SdpType type, - std::string* error_desc) = 0; - // Return a list of RTP header extensions with the non-encrypted extensions - // removed depending on the current crypto_options_ and only if both the - // non-encrypted and encrypted extension is present for the same URI. - RtpHeaderExtensions GetFilteredRtpHeaderExtensions( - const RtpHeaderExtensions& extensions); - - // From MessageHandler - void OnMessage(rtc::Message* pmsg) override; + std::string* error_desc) + RTC_RUN_ON(worker_thread()) = 0; - // Helper function template for invoking methods on the worker thread. - template - T InvokeOnWorker(const rtc::Location& posted_from, - rtc::FunctionView functor) { - return worker_thread_->Invoke(posted_from, functor); - } + // Returns a list of RTP header extensions where any extension URI is unique. + // Encrypted extensions will be either preferred or discarded, depending on + // the current crypto_options_. + RtpHeaderExtensions GetDeduplicatedRtpHeaderExtensions( + const RtpHeaderExtensions& extensions); - void AddHandledPayloadType(int payload_type); + // Add |payload_type| to |demuxer_criteria_| if payload type demuxing is + // enabled. + void MaybeAddHandledPayloadType(int payload_type) RTC_RUN_ON(worker_thread()); - void ClearHandledPayloadTypes(); + void ClearHandledPayloadTypes() RTC_RUN_ON(worker_thread()); void UpdateRtpHeaderExtensionMap( const RtpHeaderExtensions& header_extensions); - bool RegisterRtpDemuxerSink(); + bool RegisterRtpDemuxerSink_w() RTC_RUN_ON(worker_thread()); // Return description of media channel to facilitate logging std::string ToString() const; - bool has_received_packet_ = false; - private: - bool ConnectToRtpTransport(); - void DisconnectFromRtpTransport(); + bool ConnectToRtpTransport() RTC_RUN_ON(network_thread()); + void DisconnectFromRtpTransport() RTC_RUN_ON(network_thread()); void SignalSentPacket_n(const rtc::SentPacket& sent_packet); - bool IsReadyToSendMedia_n() const; rtc::Thread* const worker_thread_; rtc::Thread* const network_thread_; rtc::Thread* const signaling_thread_; - rtc::AsyncInvoker invoker_; - sigslot::signal1 SignalFirstPacketReceived_; + rtc::scoped_refptr alive_; const std::string content_name_; + std::function on_first_packet_received_ + RTC_GUARDED_BY(network_thread()); + // Won't be set when using raw packet transports. SDP-specific thing. + // TODO(bugs.webrtc.org/12230): Written on network thread, read on + // worker thread (at least). + // TODO(tommi): Remove this variable and instead use rtp_transport_ to + // return the transport name. This variable is currently required for + // "for_test" methods. std::string transport_name_; - webrtc::RtpTransportInternal* rtp_transport_ = nullptr; - - // Optional media transport configuration (experimental). - webrtc::MediaTransportConfig media_transport_config_; + webrtc::RtpTransportInternal* rtp_transport_ + RTC_GUARDED_BY(network_thread()) = nullptr; - std::vector > socket_options_; - std::vector > rtcp_socket_options_; - bool writable_ = false; - bool was_ever_writable_ = false; + std::vector > socket_options_ + RTC_GUARDED_BY(network_thread()); + std::vector > rtcp_socket_options_ + RTC_GUARDED_BY(network_thread()); + bool writable_ RTC_GUARDED_BY(network_thread()) = false; + bool was_ever_writable_n_ RTC_GUARDED_BY(network_thread()) = false; + bool was_ever_writable_ RTC_GUARDED_BY(worker_thread()) = false; const bool srtp_required_ = true; - webrtc::CryptoOptions crypto_options_; + + // TODO(tommi): This field shouldn't be necessary. It's a copy of + // PeerConnection::GetCryptoOptions(), which is const state. It's also only + // used to filter header extensions when calling + // `rtp_transport_->UpdateRtpHeaderExtensionMap()` when the local/remote + // content description is updated. Since the transport is actually owned + // by the transport controller that also gets updated whenever the content + // description changes, it seems we have two paths into the transports, along + // with several thread hops via various classes (such as the Channel classes) + // that only serve as additional layers and store duplicate state. The Jsep* + // family of classes already apply session description updates on the network + // thread every time it changes. + // For the Channel classes, we should be able to get rid of: + // * crypto_options (and fewer construction parameters)_ + // * UpdateRtpHeaderExtensionMap + // * GetFilteredRtpHeaderExtensions + // * Blocking thread hop to the network thread for every call to set + // local/remote content is updated. + const webrtc::CryptoOptions crypto_options_; // MediaChannel related members that should be accessed from the worker // thread. - std::unique_ptr media_channel_; + const std::unique_ptr media_channel_; // Currently the |enabled_| flag is accessed from the signaling thread as // well, but it can be changed only when signaling thread does a synchronous // call to the worker thread, so it should be safe. - bool enabled_ = false; - std::vector local_streams_; - std::vector remote_streams_; + bool enabled_ RTC_GUARDED_BY(worker_thread()) = false; + bool enabled_s_ RTC_GUARDED_BY(signaling_thread()) = false; + bool payload_type_demuxing_enabled_ RTC_GUARDED_BY(worker_thread()) = true; + std::vector local_streams_ RTC_GUARDED_BY(worker_thread()); + std::vector remote_streams_ RTC_GUARDED_BY(worker_thread()); + // TODO(bugs.webrtc.org/12230): local_content_direction and + // remote_content_direction are set on the worker thread, but accessed on the + // network thread. webrtc::RtpTransceiverDirection local_content_direction_ = webrtc::RtpTransceiverDirection::kInactive; webrtc::RtpTransceiverDirection remote_content_direction_ = webrtc::RtpTransceiverDirection::kInactive; + // Cached list of payload types, used if payload type demuxing is re-enabled. + std::set payload_types_ RTC_GUARDED_BY(worker_thread()); + // TODO(bugs.webrtc.org/12239): Modified on worker thread, accessed + // on network thread in RegisterRtpDemuxerSink_n (called from Init_w) webrtc::RtpDemuxerCriteria demuxer_criteria_; + // Accessed on the worker thread, modified on the network thread from + // RegisterRtpDemuxerSink_w's Invoke. + webrtc::RtpDemuxerCriteria previous_demuxer_criteria_; // This generator is used to generate SSRCs for local streams. // This is needed in cases where SSRCs are not negotiated or set explicitly // like in Simulcast. @@ -353,9 +403,6 @@ class VoiceChannel : public BaseChannel { cricket::MediaType media_type() const override { return cricket::MEDIA_TYPE_AUDIO; } - void Init_w( - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config) override; private: // overrides from BaseChannel @@ -417,106 +464,6 @@ class VideoChannel : public BaseChannel { VideoRecvParameters last_recv_params_; }; -// RtpDataChannel is a specialization for data. -class RtpDataChannel : public BaseChannel { - public: - RtpDataChannel(rtc::Thread* worker_thread, - rtc::Thread* network_thread, - rtc::Thread* signaling_thread, - std::unique_ptr channel, - const std::string& content_name, - bool srtp_required, - webrtc::CryptoOptions crypto_options, - rtc::UniqueRandomIdGenerator* ssrc_generator); - ~RtpDataChannel(); - // TODO(zhihuang): Remove this once the RtpTransport can be shared between - // BaseChannels. - void Init_w(DtlsTransportInternal* rtp_dtls_transport, - DtlsTransportInternal* rtcp_dtls_transport, - rtc::PacketTransportInternal* rtp_packet_transport, - rtc::PacketTransportInternal* rtcp_packet_transport); - void Init_w( - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config) override; - - virtual bool SendData(const SendDataParams& params, - const rtc::CopyOnWriteBuffer& payload, - SendDataResult* result); - - // Should be called on the signaling thread only. - bool ready_to_send_data() const { return ready_to_send_data_; } - - sigslot::signal2 - SignalDataReceived; - // Signal for notifying when the channel becomes ready to send data. - // That occurs when the channel is enabled, the transport is writable, - // both local and remote descriptions are set, and the channel is unblocked. - sigslot::signal1 SignalReadyToSendData; - cricket::MediaType media_type() const override { - return cricket::MEDIA_TYPE_DATA; - } - - protected: - // downcasts a MediaChannel. - DataMediaChannel* media_channel() const override { - return static_cast(BaseChannel::media_channel()); - } - - private: - struct SendDataMessageData : public rtc::MessageData { - SendDataMessageData(const SendDataParams& params, - const rtc::CopyOnWriteBuffer* payload, - SendDataResult* result) - : params(params), payload(payload), result(result), succeeded(false) {} - - const SendDataParams& params; - const rtc::CopyOnWriteBuffer* payload; - SendDataResult* result; - bool succeeded; - }; - - struct DataReceivedMessageData : public rtc::MessageData { - // We copy the data because the data will become invalid after we - // handle DataMediaChannel::SignalDataReceived but before we fire - // SignalDataReceived. - DataReceivedMessageData(const ReceiveDataParams& params, - const char* data, - size_t len) - : params(params), payload(data, len) {} - const ReceiveDataParams params; - const rtc::CopyOnWriteBuffer payload; - }; - - typedef rtc::TypedMessageData DataChannelReadyToSendMessageData; - - // overrides from BaseChannel - // Checks that data channel type is RTP. - bool CheckDataChannelTypeFromContent(const MediaContentDescription* content, - std::string* error_desc); - bool SetLocalContent_w(const MediaContentDescription* content, - webrtc::SdpType type, - std::string* error_desc) override; - bool SetRemoteContent_w(const MediaContentDescription* content, - webrtc::SdpType type, - std::string* error_desc) override; - void UpdateMediaSendRecvState_w() override; - - void OnMessage(rtc::Message* pmsg) override; - void OnDataReceived(const ReceiveDataParams& params, - const char* data, - size_t len); - void OnDataChannelReadyToSend(bool writable); - - bool ready_to_send_data_ = false; - - // Last DataSendParameters sent down to the media_channel() via - // SetSendParameters. - DataSendParameters last_send_params_; - // Last DataRecvParameters sent down to the media_channel() via - // SetRecvParameters. - DataRecvParameters last_recv_params_; -}; - } // namespace cricket #endif // PC_CHANNEL_H_ diff --git a/pc/channel_interface.h b/pc/channel_interface.h index cd29ed4f84..3b71f0f8b5 100644 --- a/pc/channel_interface.h +++ b/pc/channel_interface.h @@ -37,13 +37,12 @@ class ChannelInterface { virtual const std::string& content_name() const = 0; - virtual bool enabled() const = 0; - // Enables or disables this channel - virtual bool Enable(bool enable) = 0; + virtual void Enable(bool enable) = 0; // Used for latency measurements. - virtual sigslot::signal1& SignalFirstPacketReceived() = 0; + virtual void SetFirstPacketReceivedCallback( + std::function callback) = 0; // Channel control virtual bool SetLocalContent(const MediaContentDescription* content, @@ -52,6 +51,7 @@ class ChannelInterface { virtual bool SetRemoteContent(const MediaContentDescription* content, webrtc::SdpType type, std::string* error_desc) = 0; + virtual bool SetPayloadTypeDemuxingEnabled(bool enabled) = 0; // Access to the local and remote streams that were set on the channel. virtual const std::vector& local_streams() const = 0; diff --git a/pc/channel_manager.cc b/pc/channel_manager.cc index f5f3dd4a7b..b58830b215 100644 --- a/pc/channel_manager.cc +++ b/pc/channel_manager.cc @@ -10,57 +10,54 @@ #include "pc/channel_manager.h" +#include #include #include "absl/algorithm/container.h" #include "absl/memory/memory.h" #include "absl/strings/match.h" +#include "api/sequence_checker.h" #include "media/base/media_constants.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" -#include "rtc_base/thread_checker.h" #include "rtc_base/trace_event.h" namespace cricket { +// static +std::unique_ptr ChannelManager::Create( + std::unique_ptr media_engine, + bool enable_rtx, + rtc::Thread* worker_thread, + rtc::Thread* network_thread) { + RTC_DCHECK_RUN_ON(worker_thread); + RTC_DCHECK(network_thread); + RTC_DCHECK(worker_thread); + + if (media_engine) + media_engine->Init(); + + return absl::WrapUnique(new ChannelManager( + std::move(media_engine), enable_rtx, worker_thread, network_thread)); +} + ChannelManager::ChannelManager( std::unique_ptr media_engine, - std::unique_ptr data_engine, + bool enable_rtx, rtc::Thread* worker_thread, rtc::Thread* network_thread) : media_engine_(std::move(media_engine)), - data_engine_(std::move(data_engine)), - main_thread_(rtc::Thread::Current()), worker_thread_(worker_thread), - network_thread_(network_thread) { - RTC_DCHECK(data_engine_); + network_thread_(network_thread), + enable_rtx_(enable_rtx) { RTC_DCHECK(worker_thread_); RTC_DCHECK(network_thread_); + RTC_DCHECK_RUN_ON(worker_thread_); } ChannelManager::~ChannelManager() { - if (initialized_) { - Terminate(); - } - // The media engine needs to be deleted on the worker thread for thread safe - // destruction, - worker_thread_->Invoke(RTC_FROM_HERE, [&] { media_engine_.reset(); }); -} - -bool ChannelManager::SetVideoRtxEnabled(bool enable) { - // To be safe, this call is only allowed before initialization. Apps like - // Flute only have a singleton ChannelManager and we don't want this flag to - // be toggled between calls or when there's concurrent calls. We expect apps - // to enable this at startup and retain that setting for the lifetime of the - // app. - if (!initialized_) { - enable_rtx_ = enable; - return true; - } else { - RTC_LOG(LS_WARNING) << "Cannot toggle rtx after initialization!"; - return false; - } + RTC_DCHECK_RUN_ON(worker_thread_); } void ChannelManager::GetSupportedAudioSendCodecs( @@ -113,34 +110,6 @@ void ChannelManager::GetSupportedVideoReceiveCodecs( } } -void ChannelManager::GetSupportedDataCodecs( - std::vector* codecs) const { - *codecs = data_engine_->data_codecs(); -} - -bool ChannelManager::Init() { - RTC_DCHECK(!initialized_); - if (initialized_) { - return false; - } - RTC_DCHECK(network_thread_); - RTC_DCHECK(worker_thread_); - if (!network_thread_->IsCurrent()) { - // Do not allow invoking calls to other threads on the network thread. - network_thread_->Invoke( - RTC_FROM_HERE, [&] { network_thread_->DisallowBlockingCalls(); }); - } - - if (media_engine_) { - initialized_ = worker_thread_->Invoke( - RTC_FROM_HERE, [&] { return media_engine_->Init(); }); - RTC_DCHECK(initialized_); - } else { - initialized_ = true; - } - return initialized_; -} - RtpHeaderExtensions ChannelManager::GetDefaultEnabledAudioRtpHeaderExtensions() const { if (!media_engine_) @@ -169,46 +138,30 @@ ChannelManager::GetSupportedVideoRtpHeaderExtensions() const { return media_engine_->video().GetRtpHeaderExtensions(); } -void ChannelManager::Terminate() { - RTC_DCHECK(initialized_); - if (!initialized_) { - return; - } - // Need to destroy the channels on the worker thread. - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - video_channels_.clear(); - voice_channels_.clear(); - data_channels_.clear(); - }); - initialized_ = false; -} - VoiceChannel* ChannelManager::CreateVoiceChannel( webrtc::Call* call, - const cricket::MediaConfig& media_config, + const MediaConfig& media_config, webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config, rtc::Thread* signaling_thread, const std::string& content_name, bool srtp_required, const webrtc::CryptoOptions& crypto_options, rtc::UniqueRandomIdGenerator* ssrc_generator, const AudioOptions& options) { + RTC_DCHECK(call); + RTC_DCHECK(media_engine_); + // TODO(bugs.webrtc.org/11992): Remove this workaround after updates in + // PeerConnection and add the expectation that we're already on the right + // thread. if (!worker_thread_->IsCurrent()) { return worker_thread_->Invoke(RTC_FROM_HERE, [&] { return CreateVoiceChannel(call, media_config, rtp_transport, - media_transport_config, signaling_thread, - content_name, srtp_required, crypto_options, - ssrc_generator, options); + signaling_thread, content_name, srtp_required, + crypto_options, ssrc_generator, options); }); } RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DCHECK(initialized_); - RTC_DCHECK(call); - if (!media_engine_) { - return nullptr; - } VoiceMediaChannel* media_channel = media_engine_->voice().CreateMediaChannel( call, media_config, options, crypto_options); @@ -221,7 +174,7 @@ VoiceChannel* ChannelManager::CreateVoiceChannel( absl::WrapUnique(media_channel), content_name, srtp_required, crypto_options, ssrc_generator); - voice_channel->Init_w(rtp_transport, media_transport_config); + voice_channel->Init_w(rtp_transport); VoiceChannel* voice_channel_ptr = voice_channel.get(); voice_channels_.push_back(std::move(voice_channel)); @@ -230,34 +183,26 @@ VoiceChannel* ChannelManager::CreateVoiceChannel( void ChannelManager::DestroyVoiceChannel(VoiceChannel* voice_channel) { TRACE_EVENT0("webrtc", "ChannelManager::DestroyVoiceChannel"); - if (!voice_channel) { - return; - } + RTC_DCHECK(voice_channel); + if (!worker_thread_->IsCurrent()) { worker_thread_->Invoke(RTC_FROM_HERE, [&] { DestroyVoiceChannel(voice_channel); }); return; } - RTC_DCHECK(initialized_); - - auto it = absl::c_find_if(voice_channels_, - [&](const std::unique_ptr& p) { - return p.get() == voice_channel; - }); - RTC_DCHECK(it != voice_channels_.end()); - if (it == voice_channels_.end()) { - return; - } + RTC_DCHECK_RUN_ON(worker_thread_); - voice_channels_.erase(it); + voice_channels_.erase(absl::c_find_if( + voice_channels_, [&](const std::unique_ptr& p) { + return p.get() == voice_channel; + })); } VideoChannel* ChannelManager::CreateVideoChannel( webrtc::Call* call, - const cricket::MediaConfig& media_config, + const MediaConfig& media_config, webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config, rtc::Thread* signaling_thread, const std::string& content_name, bool srtp_required, @@ -265,21 +210,21 @@ VideoChannel* ChannelManager::CreateVideoChannel( rtc::UniqueRandomIdGenerator* ssrc_generator, const VideoOptions& options, webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) { + RTC_DCHECK(call); + RTC_DCHECK(media_engine_); + // TODO(bugs.webrtc.org/11992): Remove this workaround after updates in + // PeerConnection and add the expectation that we're already on the right + // thread. if (!worker_thread_->IsCurrent()) { return worker_thread_->Invoke(RTC_FROM_HERE, [&] { - return CreateVideoChannel( - call, media_config, rtp_transport, media_transport_config, - signaling_thread, content_name, srtp_required, crypto_options, - ssrc_generator, options, video_bitrate_allocator_factory); + return CreateVideoChannel(call, media_config, rtp_transport, + signaling_thread, content_name, srtp_required, + crypto_options, ssrc_generator, options, + video_bitrate_allocator_factory); }); } RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DCHECK(initialized_); - RTC_DCHECK(call); - if (!media_engine_) { - return nullptr; - } VideoMediaChannel* media_channel = media_engine_->video().CreateMediaChannel( call, media_config, options, crypto_options, @@ -293,7 +238,7 @@ VideoChannel* ChannelManager::CreateVideoChannel( absl::WrapUnique(media_channel), content_name, srtp_required, crypto_options, ssrc_generator); - video_channel->Init_w(rtp_transport, media_transport_config); + video_channel->Init_w(rtp_transport); VideoChannel* video_channel_ptr = video_channel.get(); video_channels_.push_back(std::move(video_channel)); @@ -302,101 +247,30 @@ VideoChannel* ChannelManager::CreateVideoChannel( void ChannelManager::DestroyVideoChannel(VideoChannel* video_channel) { TRACE_EVENT0("webrtc", "ChannelManager::DestroyVideoChannel"); - if (!video_channel) { - return; - } + RTC_DCHECK(video_channel); + if (!worker_thread_->IsCurrent()) { worker_thread_->Invoke(RTC_FROM_HERE, [&] { DestroyVideoChannel(video_channel); }); return; } + RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DCHECK(initialized_); - - auto it = absl::c_find_if(video_channels_, - [&](const std::unique_ptr& p) { - return p.get() == video_channel; - }); - RTC_DCHECK(it != video_channels_.end()); - if (it == video_channels_.end()) { - return; - } - - video_channels_.erase(it); -} - -RtpDataChannel* ChannelManager::CreateRtpDataChannel( - const cricket::MediaConfig& media_config, - webrtc::RtpTransportInternal* rtp_transport, - rtc::Thread* signaling_thread, - const std::string& content_name, - bool srtp_required, - const webrtc::CryptoOptions& crypto_options, - rtc::UniqueRandomIdGenerator* ssrc_generator) { - if (!worker_thread_->IsCurrent()) { - return worker_thread_->Invoke(RTC_FROM_HERE, [&] { - return CreateRtpDataChannel(media_config, rtp_transport, signaling_thread, - content_name, srtp_required, crypto_options, - ssrc_generator); - }); - } - - // This is ok to alloc from a thread other than the worker thread. - RTC_DCHECK(initialized_); - DataMediaChannel* media_channel = data_engine_->CreateChannel(media_config); - if (!media_channel) { - RTC_LOG(LS_WARNING) << "Failed to create RTP data channel."; - return nullptr; - } - - auto data_channel = std::make_unique( - worker_thread_, network_thread_, signaling_thread, - absl::WrapUnique(media_channel), content_name, srtp_required, - crypto_options, ssrc_generator); - - // Media Transports are not supported with Rtp Data Channel. - data_channel->Init_w(rtp_transport, webrtc::MediaTransportConfig()); - - RtpDataChannel* data_channel_ptr = data_channel.get(); - data_channels_.push_back(std::move(data_channel)); - return data_channel_ptr; -} - -void ChannelManager::DestroyRtpDataChannel(RtpDataChannel* data_channel) { - TRACE_EVENT0("webrtc", "ChannelManager::DestroyRtpDataChannel"); - if (!data_channel) { - return; - } - if (!worker_thread_->IsCurrent()) { - worker_thread_->Invoke( - RTC_FROM_HERE, [&] { return DestroyRtpDataChannel(data_channel); }); - return; - } - - RTC_DCHECK(initialized_); - - auto it = absl::c_find_if(data_channels_, - [&](const std::unique_ptr& p) { - return p.get() == data_channel; - }); - RTC_DCHECK(it != data_channels_.end()); - if (it == data_channels_.end()) { - return; - } - - data_channels_.erase(it); + video_channels_.erase(absl::c_find_if( + video_channels_, [&](const std::unique_ptr& p) { + return p.get() == video_channel; + })); } bool ChannelManager::StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) { - return worker_thread_->Invoke(RTC_FROM_HERE, [&] { - return media_engine_->voice().StartAecDump(std::move(file), max_size_bytes); - }); + RTC_DCHECK_RUN_ON(worker_thread_); + return media_engine_->voice().StartAecDump(std::move(file), max_size_bytes); } void ChannelManager::StopAecDump() { - worker_thread_->Invoke(RTC_FROM_HERE, - [&] { media_engine_->voice().StopAecDump(); }); + RTC_DCHECK_RUN_ON(worker_thread_); + media_engine_->voice().StopAecDump(); } } // namespace cricket diff --git a/pc/channel_manager.h b/pc/channel_manager.h index 415e476a90..43fa27935f 100644 --- a/pc/channel_manager.h +++ b/pc/channel_manager.h @@ -19,7 +19,8 @@ #include "api/audio_options.h" #include "api/crypto/crypto_options.h" -#include "api/transport/media/media_transport_config.h" +#include "api/rtp_parameters.h" +#include "api/video/video_bitrate_allocator_factory.h" #include "call/call.h" #include "media/base/codec.h" #include "media/base/media_channel.h" @@ -30,6 +31,7 @@ #include "pc/session_description.h" #include "rtc_base/system/file_wrapper.h" #include "rtc_base/thread.h" +#include "rtc_base/unique_id_generator.h" namespace cricket { @@ -43,32 +45,20 @@ namespace cricket { // using device manager. class ChannelManager final { public: - // Construct a ChannelManager with the specified media engine and data engine. - ChannelManager(std::unique_ptr media_engine, - std::unique_ptr data_engine, - rtc::Thread* worker_thread, - rtc::Thread* network_thread); + // Returns an initialized instance of ChannelManager. + // If media_engine is non-nullptr, then the returned ChannelManager instance + // will own that reference and media engine initialization + static std::unique_ptr Create( + std::unique_ptr media_engine, + bool enable_rtx, + rtc::Thread* worker_thread, + rtc::Thread* network_thread); + + ChannelManager() = delete; ~ChannelManager(); - // Accessors for the worker thread, allowing it to be set after construction, - // but before Init. set_worker_thread will return false if called after Init. rtc::Thread* worker_thread() const { return worker_thread_; } - bool set_worker_thread(rtc::Thread* thread) { - if (initialized_) { - return false; - } - worker_thread_ = thread; - return true; - } rtc::Thread* network_thread() const { return network_thread_; } - bool set_network_thread(rtc::Thread* thread) { - if (initialized_) { - return false; - } - network_thread_ = thread; - return true; - } - MediaEngineInterface* media_engine() { return media_engine_.get(); } // Retrieves the list of supported audio & video codec types. @@ -77,7 +67,6 @@ class ChannelManager final { void GetSupportedAudioReceiveCodecs(std::vector* codecs) const; void GetSupportedVideoSendCodecs(std::vector* codecs) const; void GetSupportedVideoReceiveCodecs(std::vector* codecs) const; - void GetSupportedDataCodecs(std::vector* codecs) const; RtpHeaderExtensions GetDefaultEnabledAudioRtpHeaderExtensions() const; std::vector GetSupportedAudioRtpHeaderExtensions() const; @@ -85,29 +74,20 @@ class ChannelManager final { std::vector GetSupportedVideoRtpHeaderExtensions() const; - // Indicates whether the media engine is started. - bool initialized() const { return initialized_; } - // Starts up the media engine. - bool Init(); - // Shuts down the media engine. - void Terminate(); - // The operations below all occur on the worker thread. // ChannelManager retains ownership of the created channels, so clients should // call the appropriate Destroy*Channel method when done. // Creates a voice channel, to be associated with the specified session. - VoiceChannel* CreateVoiceChannel( - webrtc::Call* call, - const cricket::MediaConfig& media_config, - webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config, - rtc::Thread* signaling_thread, - const std::string& content_name, - bool srtp_required, - const webrtc::CryptoOptions& crypto_options, - rtc::UniqueRandomIdGenerator* ssrc_generator, - const AudioOptions& options); + VoiceChannel* CreateVoiceChannel(webrtc::Call* call, + const MediaConfig& media_config, + webrtc::RtpTransportInternal* rtp_transport, + rtc::Thread* signaling_thread, + const std::string& content_name, + bool srtp_required, + const webrtc::CryptoOptions& crypto_options, + rtc::UniqueRandomIdGenerator* ssrc_generator, + const AudioOptions& options); // Destroys a voice channel created by CreateVoiceChannel. void DestroyVoiceChannel(VoiceChannel* voice_channel); @@ -116,9 +96,8 @@ class ChannelManager final { // Version of the above that takes PacketTransportInternal. VideoChannel* CreateVideoChannel( webrtc::Call* call, - const cricket::MediaConfig& media_config, + const MediaConfig& media_config, webrtc::RtpTransportInternal* rtp_transport, - const webrtc::MediaTransportConfig& media_transport_config, rtc::Thread* signaling_thread, const std::string& content_name, bool srtp_required, @@ -129,32 +108,6 @@ class ChannelManager final { // Destroys a video channel created by CreateVideoChannel. void DestroyVideoChannel(VideoChannel* video_channel); - RtpDataChannel* CreateRtpDataChannel( - const cricket::MediaConfig& media_config, - webrtc::RtpTransportInternal* rtp_transport, - rtc::Thread* signaling_thread, - const std::string& content_name, - bool srtp_required, - const webrtc::CryptoOptions& crypto_options, - rtc::UniqueRandomIdGenerator* ssrc_generator); - // Destroys a data channel created by CreateRtpDataChannel. - void DestroyRtpDataChannel(RtpDataChannel* data_channel); - - // Indicates whether any channels exist. - bool has_channels() const { - return (!voice_channels_.empty() || !video_channels_.empty() || - !data_channels_.empty()); - } - - // RTX will be enabled/disabled in engines that support it. The supporting - // engines will start offering an RTX codec. Must be called before Init(). - bool SetVideoRtxEnabled(bool enable); - - // Starts/stops the local microphone and enables polling of the input level. - bool capturing() const { return capturing_; } - - // The operations below occur on the main thread. - // Starts AEC dump using existing file, with a specified maximum file size in // bytes. When the limit is reached, logging will stop and the file will be // closed. If max_size_bytes is set to <= 0, no limit will be used. @@ -164,20 +117,22 @@ class ChannelManager final { void StopAecDump(); private: - std::unique_ptr media_engine_; // Nullable. - std::unique_ptr data_engine_; // Non-null. - bool initialized_ = false; - rtc::Thread* main_thread_; - rtc::Thread* worker_thread_; - rtc::Thread* network_thread_; + ChannelManager(std::unique_ptr media_engine, + bool enable_rtx, + rtc::Thread* worker_thread, + rtc::Thread* network_thread); + + const std::unique_ptr media_engine_; // Nullable. + rtc::Thread* const worker_thread_; + rtc::Thread* const network_thread_; // Vector contents are non-null. - std::vector> voice_channels_; - std::vector> video_channels_; - std::vector> data_channels_; + std::vector> voice_channels_ + RTC_GUARDED_BY(worker_thread_); + std::vector> video_channels_ + RTC_GUARDED_BY(worker_thread_); - bool enable_rtx_ = false; - bool capturing_ = false; + const bool enable_rtx_; }; } // namespace cricket diff --git a/pc/channel_manager_unittest.cc b/pc/channel_manager_unittest.cc index 6f3128ebde..88de1f6a48 100644 --- a/pc/channel_manager_unittest.cc +++ b/pc/channel_manager_unittest.cc @@ -13,7 +13,6 @@ #include #include "api/rtc_error.h" -#include "api/transport/media/media_transport_config.h" #include "api/video/builtin_video_bitrate_allocator_factory.h" #include "media/base/fake_media_engine.h" #include "media/base/test_utils.h" @@ -27,11 +26,9 @@ #include "rtc_base/thread.h" #include "test/gtest.h" +namespace cricket { namespace { const bool kDefaultSrtpRequired = true; -} - -namespace cricket { static const AudioCodec kAudioCodecs[] = { AudioCodec(97, "voice", 1, 2, 3), @@ -44,103 +41,57 @@ static const VideoCodec kVideoCodecs[] = { VideoCodec(96, "rtx"), }; +std::unique_ptr CreateFakeMediaEngine() { + auto fme = std::make_unique(); + fme->SetAudioCodecs(MAKE_VECTOR(kAudioCodecs)); + fme->SetVideoCodecs(MAKE_VECTOR(kVideoCodecs)); + return fme; +} + +} // namespace + class ChannelManagerTest : public ::testing::Test { protected: ChannelManagerTest() : network_(rtc::Thread::CreateWithSocketServer()), - worker_(rtc::Thread::Create()), + worker_(rtc::Thread::Current()), video_bitrate_allocator_factory_( webrtc::CreateBuiltinVideoBitrateAllocatorFactory()), - fme_(new cricket::FakeMediaEngine()), - fdme_(new cricket::FakeDataEngine()), - cm_(new cricket::ChannelManager( - std::unique_ptr(fme_), - std::unique_ptr(fdme_), - rtc::Thread::Current(), - rtc::Thread::Current())), - fake_call_() { - fme_->SetAudioCodecs(MAKE_VECTOR(kAudioCodecs)); - fme_->SetVideoCodecs(MAKE_VECTOR(kVideoCodecs)); - } - - std::unique_ptr CreateDtlsSrtpTransport() { - rtp_dtls_transport_ = std::make_unique( - "fake_dtls_transport", cricket::ICE_CANDIDATE_COMPONENT_RTP); - auto dtls_srtp_transport = std::make_unique( - /*rtcp_mux_required=*/true); - dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport_.get(), - /*rtcp_dtls_transport=*/nullptr); - return dtls_srtp_transport; + cm_(cricket::ChannelManager::Create(CreateFakeMediaEngine(), + false, + worker_, + network_.get())), + fake_call_(worker_, network_.get()) { + network_->SetName("Network", this); + network_->Start(); } - void TestCreateDestroyChannels( - webrtc::RtpTransportInternal* rtp_transport, - webrtc::MediaTransportConfig media_transport_config) { + void TestCreateDestroyChannels(webrtc::RtpTransportInternal* rtp_transport) { + RTC_DCHECK_RUN_ON(worker_); cricket::VoiceChannel* voice_channel = cm_->CreateVoiceChannel( &fake_call_, cricket::MediaConfig(), rtp_transport, - media_transport_config, rtc::Thread::Current(), cricket::CN_AUDIO, - kDefaultSrtpRequired, webrtc::CryptoOptions(), &ssrc_generator_, - AudioOptions()); + rtc::Thread::Current(), cricket::CN_AUDIO, kDefaultSrtpRequired, + webrtc::CryptoOptions(), &ssrc_generator_, AudioOptions()); EXPECT_TRUE(voice_channel != nullptr); cricket::VideoChannel* video_channel = cm_->CreateVideoChannel( &fake_call_, cricket::MediaConfig(), rtp_transport, - media_transport_config, rtc::Thread::Current(), cricket::CN_VIDEO, - kDefaultSrtpRequired, webrtc::CryptoOptions(), &ssrc_generator_, - VideoOptions(), video_bitrate_allocator_factory_.get()); + rtc::Thread::Current(), cricket::CN_VIDEO, kDefaultSrtpRequired, + webrtc::CryptoOptions(), &ssrc_generator_, VideoOptions(), + video_bitrate_allocator_factory_.get()); EXPECT_TRUE(video_channel != nullptr); - cricket::RtpDataChannel* rtp_data_channel = cm_->CreateRtpDataChannel( - cricket::MediaConfig(), rtp_transport, rtc::Thread::Current(), - cricket::CN_DATA, kDefaultSrtpRequired, webrtc::CryptoOptions(), - &ssrc_generator_); - EXPECT_TRUE(rtp_data_channel != nullptr); cm_->DestroyVideoChannel(video_channel); cm_->DestroyVoiceChannel(voice_channel); - cm_->DestroyRtpDataChannel(rtp_data_channel); - cm_->Terminate(); } - std::unique_ptr rtp_dtls_transport_; std::unique_ptr network_; - std::unique_ptr worker_; + rtc::Thread* const worker_; std::unique_ptr video_bitrate_allocator_factory_; - // |fme_| and |fdme_| are actually owned by |cm_|. - cricket::FakeMediaEngine* fme_; - cricket::FakeDataEngine* fdme_; std::unique_ptr cm_; cricket::FakeCall fake_call_; rtc::UniqueRandomIdGenerator ssrc_generator_; }; -// Test that we startup/shutdown properly. -TEST_F(ChannelManagerTest, StartupShutdown) { - EXPECT_FALSE(cm_->initialized()); - EXPECT_EQ(rtc::Thread::Current(), cm_->worker_thread()); - EXPECT_TRUE(cm_->Init()); - EXPECT_TRUE(cm_->initialized()); - cm_->Terminate(); - EXPECT_FALSE(cm_->initialized()); -} - -// Test that we startup/shutdown properly with a worker thread. -TEST_F(ChannelManagerTest, StartupShutdownOnThread) { - network_->Start(); - worker_->Start(); - EXPECT_FALSE(cm_->initialized()); - EXPECT_EQ(rtc::Thread::Current(), cm_->worker_thread()); - EXPECT_TRUE(cm_->set_network_thread(network_.get())); - EXPECT_EQ(network_.get(), cm_->network_thread()); - EXPECT_TRUE(cm_->set_worker_thread(worker_.get())); - EXPECT_EQ(worker_.get(), cm_->worker_thread()); - EXPECT_TRUE(cm_->Init()); - EXPECT_TRUE(cm_->initialized()); - // Setting the network or worker thread while initialized should fail. - EXPECT_FALSE(cm_->set_network_thread(rtc::Thread::Current())); - EXPECT_FALSE(cm_->set_worker_thread(rtc::Thread::Current())); - cm_->Terminate(); - EXPECT_FALSE(cm_->initialized()); -} - TEST_F(ChannelManagerTest, SetVideoRtxEnabled) { std::vector send_codecs; std::vector recv_codecs; @@ -153,49 +104,34 @@ TEST_F(ChannelManagerTest, SetVideoRtxEnabled) { EXPECT_FALSE(ContainsMatchingCodec(recv_codecs, rtx_codec)); // Enable and check. - EXPECT_TRUE(cm_->SetVideoRtxEnabled(true)); + cm_ = cricket::ChannelManager::Create(CreateFakeMediaEngine(), + true, worker_, network_.get()); cm_->GetSupportedVideoSendCodecs(&send_codecs); EXPECT_TRUE(ContainsMatchingCodec(send_codecs, rtx_codec)); cm_->GetSupportedVideoSendCodecs(&recv_codecs); EXPECT_TRUE(ContainsMatchingCodec(recv_codecs, rtx_codec)); // Disable and check. - EXPECT_TRUE(cm_->SetVideoRtxEnabled(false)); + cm_ = cricket::ChannelManager::Create(CreateFakeMediaEngine(), + false, worker_, network_.get()); cm_->GetSupportedVideoSendCodecs(&send_codecs); EXPECT_FALSE(ContainsMatchingCodec(send_codecs, rtx_codec)); cm_->GetSupportedVideoSendCodecs(&recv_codecs); EXPECT_FALSE(ContainsMatchingCodec(recv_codecs, rtx_codec)); - - // Cannot toggle rtx after initialization. - EXPECT_TRUE(cm_->Init()); - EXPECT_FALSE(cm_->SetVideoRtxEnabled(true)); - EXPECT_FALSE(cm_->SetVideoRtxEnabled(false)); - - // Can set again after terminate. - cm_->Terminate(); - EXPECT_TRUE(cm_->SetVideoRtxEnabled(true)); - cm_->GetSupportedVideoSendCodecs(&send_codecs); - EXPECT_TRUE(ContainsMatchingCodec(send_codecs, rtx_codec)); - cm_->GetSupportedVideoSendCodecs(&recv_codecs); - EXPECT_TRUE(ContainsMatchingCodec(recv_codecs, rtx_codec)); } TEST_F(ChannelManagerTest, CreateDestroyChannels) { - EXPECT_TRUE(cm_->Init()); - auto rtp_transport = CreateDtlsSrtpTransport(); - TestCreateDestroyChannels(rtp_transport.get(), - webrtc::MediaTransportConfig()); -} - -TEST_F(ChannelManagerTest, CreateDestroyChannelsOnThread) { - network_->Start(); - worker_->Start(); - EXPECT_TRUE(cm_->set_worker_thread(worker_.get())); - EXPECT_TRUE(cm_->set_network_thread(network_.get())); - EXPECT_TRUE(cm_->Init()); - auto rtp_transport = CreateDtlsSrtpTransport(); - TestCreateDestroyChannels(rtp_transport.get(), - webrtc::MediaTransportConfig()); + auto rtp_dtls_transport = std::make_unique( + "fake_dtls_transport", cricket::ICE_CANDIDATE_COMPONENT_RTP, + network_.get()); + auto dtls_srtp_transport = std::make_unique( + /*rtcp_mux_required=*/true); + network_->Invoke( + RTC_FROM_HERE, [&rtp_dtls_transport, &dtls_srtp_transport] { + dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport.get(), + /*rtcp_dtls_transport=*/nullptr); + }); + TestCreateDestroyChannels(dtls_srtp_transport.get()); } } // namespace cricket diff --git a/pc/channel_unittest.cc b/pc/channel_unittest.cc index a3fe3f68de..581f6de7ac 100644 --- a/pc/channel_unittest.cc +++ b/pc/channel_unittest.cc @@ -17,7 +17,6 @@ #include "api/array_view.h" #include "api/audio_options.h" #include "api/rtp_parameters.h" -#include "api/transport/media/media_transport_config.h" #include "media/base/codec.h" #include "media/base/fake_media_engine.h" #include "media/base/fake_rtp.h" @@ -36,6 +35,8 @@ #include "rtc_base/checks.h" #include "rtc_base/rtc_certificate.h" #include "rtc_base/ssl_identity.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "test/gmock.h" #include "test/gtest.h" @@ -53,7 +54,6 @@ const cricket::AudioCodec kPcmaCodec(8, "PCMA", 64000, 8000, 1); const cricket::AudioCodec kIsacCodec(103, "ISAC", 40000, 16000, 1); const cricket::VideoCodec kH264Codec(97, "H264"); const cricket::VideoCodec kH264SvcCodec(99, "H264-SVC"); -const cricket::DataCodec kGoogleDataCodec(101, "google-data"); const uint32_t kSsrc1 = 0x1111; const uint32_t kSsrc2 = 0x2222; const uint32_t kSsrc3 = 0x3333; @@ -94,14 +94,7 @@ class VideoTraits : public Traits {}; -class DataTraits : public Traits {}; - -// Base class for Voice/Video/RtpDataChannel tests +// Base class for Voice/Video tests template class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { public: @@ -128,19 +121,30 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { network_thread_keeper_->SetName("Network", nullptr); network_thread_ = network_thread_keeper_.get(); } + RTC_DCHECK(network_thread_); + } + + ~ChannelTest() { + if (network_thread_) { + network_thread_->Invoke( + RTC_FROM_HERE, [this]() { network_thread_safety_->SetNotAlive(); }); + } } void CreateChannels(int flags1, int flags2) { CreateChannels(std::make_unique( - nullptr, typename T::Options()), + nullptr, typename T::Options(), network_thread_), std::make_unique( - nullptr, typename T::Options()), + nullptr, typename T::Options(), network_thread_), flags1, flags2); } void CreateChannels(std::unique_ptr ch1, std::unique_ptr ch2, int flags1, int flags2) { + RTC_DCHECK(!channel1_); + RTC_DCHECK(!channel2_); + // Network thread is started in CreateChannels, to allow the test to // configure a fake clock before any threads are spawned and attempt to // access the time. @@ -152,8 +156,6 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // channels. RTC_DCHECK_EQ(flags1 & RAW_PACKET_TRANSPORT, flags2 & RAW_PACKET_TRANSPORT); rtc::Thread* worker_thread = rtc::Thread::Current(); - media_channel1_ = ch1.get(); - media_channel2_ = ch2.get(); rtc::PacketTransportInternal* rtp1 = nullptr; rtc::PacketTransportInternal* rtcp1 = nullptr; rtc::PacketTransportInternal* rtp2 = nullptr; @@ -171,11 +173,12 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { } else { // Confirmed to work with KT_RSA and KT_ECDSA. fake_rtp_dtls_transport1_.reset(new cricket::FakeDtlsTransport( - "channel1", cricket::ICE_CANDIDATE_COMPONENT_RTP)); + "channel1", cricket::ICE_CANDIDATE_COMPONENT_RTP, network_thread_)); rtp1 = fake_rtp_dtls_transport1_.get(); if (!(flags1 & RTCP_MUX)) { fake_rtcp_dtls_transport1_.reset(new cricket::FakeDtlsTransport( - "channel1", cricket::ICE_CANDIDATE_COMPONENT_RTCP)); + "channel1", cricket::ICE_CANDIDATE_COMPONENT_RTCP, + network_thread_)); rtcp1 = fake_rtcp_dtls_transport1_.get(); } if (flags1 & DTLS) { @@ -200,11 +203,12 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { } else { // Confirmed to work with KT_RSA and KT_ECDSA. fake_rtp_dtls_transport2_.reset(new cricket::FakeDtlsTransport( - "channel2", cricket::ICE_CANDIDATE_COMPONENT_RTP)); + "channel2", cricket::ICE_CANDIDATE_COMPONENT_RTP, network_thread_)); rtp2 = fake_rtp_dtls_transport2_.get(); if (!(flags2 & RTCP_MUX)) { fake_rtcp_dtls_transport2_.reset(new cricket::FakeDtlsTransport( - "channel2", cricket::ICE_CANDIDATE_COMPONENT_RTCP)); + "channel2", cricket::ICE_CANDIDATE_COMPONENT_RTCP, + network_thread_)); rtcp2 = fake_rtcp_dtls_transport2_.get(); } if (flags2 & DTLS) { @@ -229,10 +233,6 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { rtp_transport1_.get(), flags1); channel2_ = CreateChannel(worker_thread, network_thread_, std::move(ch2), rtp_transport2_.get(), flags2); - channel1_->SignalRtcpMuxFullyActive.connect( - this, &ChannelTest::OnRtcpMuxFullyActive1); - channel2_->SignalRtcpMuxFullyActive.connect( - this, &ChannelTest::OnRtcpMuxFullyActive2); CreateContent(flags1, kPcmuCodec, kH264Codec, &local_media_content1_); CreateContent(flags2, kPcmuCodec, kH264Codec, &local_media_content2_); CopyContent(local_media_content1_, &remote_media_content1_); @@ -289,10 +289,14 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { auto rtp_transport = std::make_unique( rtcp_packet_transport == nullptr); - rtp_transport->SetRtpPacketTransport(rtp_packet_transport); - if (rtcp_packet_transport) { - rtp_transport->SetRtcpPacketTransport(rtcp_packet_transport); - } + network_thread_->Invoke( + RTC_FROM_HERE, + [&rtp_transport, rtp_packet_transport, rtcp_packet_transport] { + rtp_transport->SetRtpPacketTransport(rtp_packet_transport); + if (rtcp_packet_transport) { + rtp_transport->SetRtcpPacketTransport(rtcp_packet_transport); + } + }); return rtp_transport; } @@ -302,8 +306,12 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { auto dtls_srtp_transport = std::make_unique( rtcp_dtls_transport == nullptr); - dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport, - rtcp_dtls_transport); + network_thread_->Invoke( + RTC_FROM_HERE, + [&dtls_srtp_transport, rtp_dtls_transport, rtcp_dtls_transport] { + dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport, + rtcp_dtls_transport); + }); return dtls_srtp_transport; } @@ -328,6 +336,10 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { fake_rtcp_packet_transport2_.get(), asymmetric); } }); + // The transport becoming writable will asynchronously update the send state + // on the worker thread; since this test uses the main thread as the worker + // thread, we must process the message queue for this to occur. + WaitForThreads(); } bool SendInitiate() { @@ -335,6 +347,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { SdpType::kOffer, NULL); if (result) { channel1_->Enable(true); + FlushCurrentThread(); result = channel2_->SetRemoteContent(&remote_media_content1_, SdpType::kOffer, NULL); if (result) { @@ -348,6 +361,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { bool SendAccept() { channel2_->Enable(true); + FlushCurrentThread(); return channel1_->SetRemoteContent(&remote_media_content2_, SdpType::kAnswer, NULL); } @@ -384,55 +398,52 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { return result; } - bool Terminate() { - channel1_.reset(); - channel2_.reset(); - fake_rtp_dtls_transport1_.reset(); - fake_rtcp_dtls_transport1_.reset(); - fake_rtp_dtls_transport2_.reset(); - fake_rtcp_dtls_transport2_.reset(); - fake_rtp_packet_transport1_.reset(); - fake_rtcp_packet_transport1_.reset(); - fake_rtp_packet_transport2_.reset(); - fake_rtcp_packet_transport2_.reset(); - if (network_thread_keeper_) { - network_thread_keeper_.reset(); - } - return true; + void SendRtp(typename T::MediaChannel* media_channel, rtc::Buffer data) { + network_thread_->PostTask(webrtc::ToQueuedTask( + network_thread_safety_, [media_channel, data = std::move(data)]() { + media_channel->SendRtp(data.data(), data.size(), + rtc::PacketOptions()); + })); } void SendRtp1() { - media_channel1_->SendRtp(rtp_packet_.data(), rtp_packet_.size(), - rtc::PacketOptions()); + SendRtp1(rtc::Buffer(rtp_packet_.data(), rtp_packet_.size())); + } + + void SendRtp1(rtc::Buffer data) { + SendRtp(media_channel1(), std::move(data)); } + void SendRtp2() { - media_channel2_->SendRtp(rtp_packet_.data(), rtp_packet_.size(), - rtc::PacketOptions()); + SendRtp2(rtc::Buffer(rtp_packet_.data(), rtp_packet_.size())); + } + + void SendRtp2(rtc::Buffer data) { + SendRtp(media_channel2(), std::move(data)); } + // Methods to send custom data. void SendCustomRtp1(uint32_t ssrc, int sequence_number, int pl_type = -1) { - rtc::Buffer data = CreateRtpData(ssrc, sequence_number, pl_type); - media_channel1_->SendRtp(data.data(), data.size(), rtc::PacketOptions()); + SendRtp1(CreateRtpData(ssrc, sequence_number, pl_type)); } void SendCustomRtp2(uint32_t ssrc, int sequence_number, int pl_type = -1) { - rtc::Buffer data = CreateRtpData(ssrc, sequence_number, pl_type); - media_channel2_->SendRtp(data.data(), data.size(), rtc::PacketOptions()); + SendRtp2(CreateRtpData(ssrc, sequence_number, pl_type)); } bool CheckRtp1() { - return media_channel1_->CheckRtp(rtp_packet_.data(), rtp_packet_.size()); + return media_channel1()->CheckRtp(rtp_packet_.data(), rtp_packet_.size()); } bool CheckRtp2() { - return media_channel2_->CheckRtp(rtp_packet_.data(), rtp_packet_.size()); + return media_channel2()->CheckRtp(rtp_packet_.data(), rtp_packet_.size()); } // Methods to check custom data. bool CheckCustomRtp1(uint32_t ssrc, int sequence_number, int pl_type = -1) { rtc::Buffer data = CreateRtpData(ssrc, sequence_number, pl_type); - return media_channel1_->CheckRtp(data.data(), data.size()); + return media_channel1()->CheckRtp(data.data(), data.size()); } bool CheckCustomRtp2(uint32_t ssrc, int sequence_number, int pl_type = -1) { rtc::Buffer data = CreateRtpData(ssrc, sequence_number, pl_type); - return media_channel2_->CheckRtp(data.data(), data.size()); + return media_channel2()->CheckRtp(data.data(), data.size()); } rtc::Buffer CreateRtpData(uint32_t ssrc, int sequence_number, int pl_type) { rtc::Buffer data(rtp_packet_.data(), rtp_packet_.size()); @@ -445,8 +456,8 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { return data; } - bool CheckNoRtp1() { return media_channel1_->CheckNoRtp(); } - bool CheckNoRtp2() { return media_channel2_->CheckNoRtp(); } + bool CheckNoRtp1() { return media_channel1()->CheckNoRtp(); } + bool CheckNoRtp2() { return media_channel2()->CheckNoRtp(); } void CreateContent(int flags, const cricket::AudioCodec& audio_codec, @@ -491,13 +502,6 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { return false; // overridden in specialized classes } - void OnRtcpMuxFullyActive1(const std::string&) { - rtcp_mux_activated_callbacks1_++; - } - void OnRtcpMuxFullyActive2(const std::string&) { - rtcp_mux_activated_callbacks2_++; - } - cricket::CandidatePairInterface* last_selected_candidate_pair() { return last_selected_candidate_pair_; } @@ -508,19 +512,38 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // Base implementation. } + // Utility method that calls BaseChannel::srtp_active() on the network thread + // and returns the result. The |srtp_active()| state is maintained on the + // network thread, which callers need to factor in. + bool IsSrtpActive(std::unique_ptr& channel) { + RTC_DCHECK(channel.get()); + return network_thread_->Invoke( + RTC_FROM_HERE, [&] { return channel->srtp_active(); }); + } + + // Returns true iff the transport is set for a channel and rtcp_mux_enabled() + // returns true. + bool IsRtcpMuxEnabled(std::unique_ptr& channel) { + RTC_DCHECK(channel.get()); + return network_thread_->Invoke(RTC_FROM_HERE, [&] { + return channel->rtp_transport() && + channel->rtp_transport()->rtcp_mux_enabled(); + }); + } + // Tests that can be used by derived classes. // Basic sanity check. void TestInit() { CreateChannels(0, 0); - EXPECT_FALSE(channel1_->srtp_active()); - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(IsSrtpActive(channel1_)); + EXPECT_FALSE(media_channel1()->sending()); if (verify_playout_) { - EXPECT_FALSE(media_channel1_->playout()); + EXPECT_FALSE(media_channel1()->playout()); } - EXPECT_TRUE(media_channel1_->codecs().empty()); - EXPECT_TRUE(media_channel1_->recv_streams().empty()); - EXPECT_TRUE(media_channel1_->rtp_packets().empty()); + EXPECT_TRUE(media_channel1()->codecs().empty()); + EXPECT_TRUE(media_channel1()->recv_streams().empty()); + EXPECT_TRUE(media_channel1()->rtp_packets().empty()); } // Test that SetLocalContent and SetRemoteContent properly configure @@ -530,11 +553,11 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { typename T::Content content; CreateContent(0, kPcmuCodec, kH264Codec, &content); EXPECT_TRUE(channel1_->SetLocalContent(&content, SdpType::kOffer, NULL)); - EXPECT_EQ(0U, media_channel1_->codecs().size()); + EXPECT_EQ(0U, media_channel1()->codecs().size()); EXPECT_TRUE(channel1_->SetRemoteContent(&content, SdpType::kAnswer, NULL)); - ASSERT_EQ(1U, media_channel1_->codecs().size()); + ASSERT_EQ(1U, media_channel1()->codecs().size()); EXPECT_TRUE( - CodecMatches(content.codecs()[0], media_channel1_->codecs()[0])); + CodecMatches(content.codecs()[0], media_channel1()->codecs()[0])); } // Test that SetLocalContent and SetRemoteContent properly configure @@ -551,7 +574,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { EXPECT_TRUE(channel1_->SetLocalContent(&content, SdpType::kOffer, NULL)); content.set_extmap_allow_mixed_enum(answer_enum); EXPECT_TRUE(channel1_->SetRemoteContent(&content, SdpType::kAnswer, NULL)); - EXPECT_EQ(answer, media_channel1_->ExtmapAllowMixed()); + EXPECT_EQ(answer, media_channel1()->ExtmapAllowMixed()); } void TestSetContentsExtmapAllowMixedCallee(bool offer, bool answer) { // For a callee, SetRemoteContent() is called first with an offer and next @@ -565,7 +588,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { EXPECT_TRUE(channel1_->SetRemoteContent(&content, SdpType::kOffer, NULL)); content.set_extmap_allow_mixed_enum(answer_enum); EXPECT_TRUE(channel1_->SetLocalContent(&content, SdpType::kAnswer, NULL)); - EXPECT_EQ(answer, media_channel1_->ExtmapAllowMixed()); + EXPECT_EQ(answer, media_channel1()->ExtmapAllowMixed()); } // Test that SetLocalContent and SetRemoteContent properly deals @@ -575,11 +598,11 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { typename T::Content content; EXPECT_TRUE(channel1_->SetLocalContent(&content, SdpType::kOffer, NULL)); CreateContent(0, kPcmuCodec, kH264Codec, &content); - EXPECT_EQ(0U, media_channel1_->codecs().size()); + EXPECT_EQ(0U, media_channel1()->codecs().size()); EXPECT_TRUE(channel1_->SetRemoteContent(&content, SdpType::kAnswer, NULL)); - ASSERT_EQ(1U, media_channel1_->codecs().size()); + ASSERT_EQ(1U, media_channel1()->codecs().size()); EXPECT_TRUE( - CodecMatches(content.codecs()[0], media_channel1_->codecs()[0])); + CodecMatches(content.codecs()[0], media_channel1()->codecs()[0])); } // Test that SetLocalContent and SetRemoteContent properly set RTCP @@ -598,29 +621,6 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { EXPECT_TRUE(channel2_->SetRemoteContent(&content, SdpType::kAnswer, NULL)); } - // Test that SetLocalContent and SetRemoteContent properly set RTCP - // mux when a provisional answer is received. - void TestSetContentsRtcpMuxWithPrAnswer() { - CreateChannels(0, 0); - typename T::Content content; - CreateContent(0, kPcmuCodec, kH264Codec, &content); - content.set_rtcp_mux(true); - EXPECT_TRUE(channel1_->SetLocalContent(&content, SdpType::kOffer, NULL)); - EXPECT_TRUE( - channel1_->SetRemoteContent(&content, SdpType::kPrAnswer, NULL)); - // Both sides agree on mux. Should signal RTCP mux as fully activated. - EXPECT_EQ(0, rtcp_mux_activated_callbacks1_); - EXPECT_TRUE(channel1_->SetRemoteContent(&content, SdpType::kAnswer, NULL)); - EXPECT_EQ(1, rtcp_mux_activated_callbacks1_); - // Only initiator supports mux. Should still have a separate RTCP channel. - EXPECT_TRUE(channel2_->SetLocalContent(&content, SdpType::kOffer, NULL)); - content.set_rtcp_mux(false); - EXPECT_TRUE( - channel2_->SetRemoteContent(&content, SdpType::kPrAnswer, NULL)); - EXPECT_TRUE(channel2_->SetRemoteContent(&content, SdpType::kAnswer, NULL)); - EXPECT_EQ(0, rtcp_mux_activated_callbacks2_); - } - // Test that SetLocalContent and SetRemoteContent properly // handles adding and removing StreamParams when the action is a full // SdpType::kOffer / SdpType::kAnswer. @@ -643,21 +643,21 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateContent(0, kPcmuCodec, kH264Codec, &content1); content1.AddStream(stream1); EXPECT_TRUE(channel1_->SetLocalContent(&content1, SdpType::kOffer, NULL)); - EXPECT_TRUE(channel1_->Enable(true)); - EXPECT_EQ(1u, media_channel1_->send_streams().size()); + channel1_->Enable(true); + EXPECT_EQ(1u, media_channel1()->send_streams().size()); EXPECT_TRUE(channel2_->SetRemoteContent(&content1, SdpType::kOffer, NULL)); - EXPECT_EQ(1u, media_channel2_->recv_streams().size()); + EXPECT_EQ(1u, media_channel2()->recv_streams().size()); ConnectFakeTransports(); // Channel 2 do not send anything. typename T::Content content2; CreateContent(0, kPcmuCodec, kH264Codec, &content2); EXPECT_TRUE(channel1_->SetRemoteContent(&content2, SdpType::kAnswer, NULL)); - EXPECT_EQ(0u, media_channel1_->recv_streams().size()); + EXPECT_EQ(0u, media_channel1()->recv_streams().size()); EXPECT_TRUE(channel2_->SetLocalContent(&content2, SdpType::kAnswer, NULL)); - EXPECT_TRUE(channel2_->Enable(true)); - EXPECT_EQ(0u, media_channel2_->send_streams().size()); + channel2_->Enable(true); + EXPECT_EQ(0u, media_channel2()->send_streams().size()); SendCustomRtp1(kSsrc1, 0); WaitForThreads(); @@ -668,21 +668,21 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateContent(0, kPcmuCodec, kH264Codec, &content3); content3.AddStream(stream2); EXPECT_TRUE(channel2_->SetLocalContent(&content3, SdpType::kOffer, NULL)); - ASSERT_EQ(1u, media_channel2_->send_streams().size()); - EXPECT_EQ(stream2, media_channel2_->send_streams()[0]); + ASSERT_EQ(1u, media_channel2()->send_streams().size()); + EXPECT_EQ(stream2, media_channel2()->send_streams()[0]); EXPECT_TRUE(channel1_->SetRemoteContent(&content3, SdpType::kOffer, NULL)); - ASSERT_EQ(1u, media_channel1_->recv_streams().size()); - EXPECT_EQ(stream2, media_channel1_->recv_streams()[0]); + ASSERT_EQ(1u, media_channel1()->recv_streams().size()); + EXPECT_EQ(stream2, media_channel1()->recv_streams()[0]); // Channel 1 replies but stop sending stream1. typename T::Content content4; CreateContent(0, kPcmuCodec, kH264Codec, &content4); EXPECT_TRUE(channel1_->SetLocalContent(&content4, SdpType::kAnswer, NULL)); - EXPECT_EQ(0u, media_channel1_->send_streams().size()); + EXPECT_EQ(0u, media_channel1()->send_streams().size()); EXPECT_TRUE(channel2_->SetRemoteContent(&content4, SdpType::kAnswer, NULL)); - EXPECT_EQ(0u, media_channel2_->recv_streams().size()); + EXPECT_EQ(0u, media_channel2()->recv_streams().size()); SendCustomRtp2(kSsrc2, 0); WaitForThreads(); @@ -693,56 +693,58 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { void TestPlayoutAndSendingStates() { CreateChannels(0, 0); if (verify_playout_) { - EXPECT_FALSE(media_channel1_->playout()); + EXPECT_FALSE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(media_channel1()->sending()); if (verify_playout_) { - EXPECT_FALSE(media_channel2_->playout()); + EXPECT_FALSE(media_channel2()->playout()); } - EXPECT_FALSE(media_channel2_->sending()); - EXPECT_TRUE(channel1_->Enable(true)); + EXPECT_FALSE(media_channel2()->sending()); + channel1_->Enable(true); + FlushCurrentThread(); if (verify_playout_) { - EXPECT_FALSE(media_channel1_->playout()); + EXPECT_FALSE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(media_channel1()->sending()); EXPECT_TRUE(channel1_->SetLocalContent(&local_media_content1_, SdpType::kOffer, NULL)); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(media_channel1()->sending()); EXPECT_TRUE(channel2_->SetRemoteContent(&local_media_content1_, SdpType::kOffer, NULL)); if (verify_playout_) { - EXPECT_FALSE(media_channel2_->playout()); + EXPECT_FALSE(media_channel2()->playout()); } - EXPECT_FALSE(media_channel2_->sending()); + EXPECT_FALSE(media_channel2()->sending()); EXPECT_TRUE(channel2_->SetLocalContent(&local_media_content2_, SdpType::kAnswer, NULL)); if (verify_playout_) { - EXPECT_FALSE(media_channel2_->playout()); + EXPECT_FALSE(media_channel2()->playout()); } - EXPECT_FALSE(media_channel2_->sending()); + EXPECT_FALSE(media_channel2()->sending()); ConnectFakeTransports(); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(media_channel1()->sending()); if (verify_playout_) { - EXPECT_FALSE(media_channel2_->playout()); + EXPECT_FALSE(media_channel2()->playout()); } - EXPECT_FALSE(media_channel2_->sending()); - EXPECT_TRUE(channel2_->Enable(true)); + EXPECT_FALSE(media_channel2()->sending()); + channel2_->Enable(true); + FlushCurrentThread(); if (verify_playout_) { - EXPECT_TRUE(media_channel2_->playout()); + EXPECT_TRUE(media_channel2()->playout()); } - EXPECT_TRUE(media_channel2_->sending()); + EXPECT_TRUE(media_channel2()->sending()); EXPECT_TRUE(channel1_->SetRemoteContent(&local_media_content2_, SdpType::kAnswer, NULL)); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_TRUE(media_channel1_->sending()); + EXPECT_TRUE(media_channel1()->sending()); } // Test that changing the MediaContentDirection in the local and remote @@ -756,16 +758,17 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // Set |content2| to be InActive. content2.set_direction(RtpTransceiverDirection::kInactive); - EXPECT_TRUE(channel1_->Enable(true)); - EXPECT_TRUE(channel2_->Enable(true)); + channel1_->Enable(true); + channel2_->Enable(true); + FlushCurrentThread(); if (verify_playout_) { - EXPECT_FALSE(media_channel1_->playout()); + EXPECT_FALSE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(media_channel1()->sending()); if (verify_playout_) { - EXPECT_FALSE(media_channel2_->playout()); + EXPECT_FALSE(media_channel2()->playout()); } - EXPECT_FALSE(media_channel2_->sending()); + EXPECT_FALSE(media_channel2()->sending()); EXPECT_TRUE(channel1_->SetLocalContent(&content1, SdpType::kOffer, NULL)); EXPECT_TRUE(channel2_->SetRemoteContent(&content1, SdpType::kOffer, NULL)); @@ -776,13 +779,13 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { ConnectFakeTransports(); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); // remote InActive + EXPECT_FALSE(media_channel1()->sending()); // remote InActive if (verify_playout_) { - EXPECT_FALSE(media_channel2_->playout()); // local InActive + EXPECT_FALSE(media_channel2()->playout()); // local InActive } - EXPECT_FALSE(media_channel2_->sending()); // local InActive + EXPECT_FALSE(media_channel2()->sending()); // local InActive // Update |content2| to be RecvOnly. content2.set_direction(RtpTransceiverDirection::kRecvOnly); @@ -792,13 +795,13 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { channel1_->SetRemoteContent(&content2, SdpType::kPrAnswer, NULL)); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_TRUE(media_channel1_->sending()); + EXPECT_TRUE(media_channel1()->sending()); if (verify_playout_) { - EXPECT_TRUE(media_channel2_->playout()); // local RecvOnly + EXPECT_TRUE(media_channel2()->playout()); // local RecvOnly } - EXPECT_FALSE(media_channel2_->sending()); // local RecvOnly + EXPECT_FALSE(media_channel2()->sending()); // local RecvOnly // Update |content2| to be SendRecv. content2.set_direction(RtpTransceiverDirection::kSendRecv); @@ -806,13 +809,13 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { EXPECT_TRUE(channel1_->SetRemoteContent(&content2, SdpType::kAnswer, NULL)); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_TRUE(media_channel1_->sending()); + EXPECT_TRUE(media_channel1()->sending()); if (verify_playout_) { - EXPECT_TRUE(media_channel2_->playout()); + EXPECT_TRUE(media_channel2()->playout()); } - EXPECT_TRUE(media_channel2_->sending()); + EXPECT_TRUE(media_channel2()->sending()); } // Tests that when the transport channel signals a candidate pair change @@ -878,43 +881,21 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // Test setting up a call. void TestCallSetup() { CreateChannels(0, 0); - EXPECT_FALSE(channel1_->srtp_active()); + EXPECT_FALSE(IsSrtpActive(channel1_)); EXPECT_TRUE(SendInitiate()); if (verify_playout_) { - EXPECT_TRUE(media_channel1_->playout()); + EXPECT_TRUE(media_channel1()->playout()); } - EXPECT_FALSE(media_channel1_->sending()); + EXPECT_FALSE(media_channel1()->sending()); EXPECT_TRUE(SendAccept()); - EXPECT_FALSE(channel1_->srtp_active()); - EXPECT_TRUE(media_channel1_->sending()); - EXPECT_EQ(1U, media_channel1_->codecs().size()); + EXPECT_FALSE(IsSrtpActive(channel1_)); + EXPECT_TRUE(media_channel1()->sending()); + EXPECT_EQ(1U, media_channel1()->codecs().size()); if (verify_playout_) { - EXPECT_TRUE(media_channel2_->playout()); + EXPECT_TRUE(media_channel2()->playout()); } - EXPECT_TRUE(media_channel2_->sending()); - EXPECT_EQ(1U, media_channel2_->codecs().size()); - } - - // Test that we don't crash if packets are sent during call teardown - // when RTCP mux is enabled. This is a regression test against a specific - // race condition that would only occur when a RTCP packet was sent during - // teardown of a channel on which RTCP mux was enabled. - void TestCallTeardownRtcpMux() { - class LastWordMediaChannel : public T::MediaChannel { - public: - LastWordMediaChannel() : T::MediaChannel(NULL, typename T::Options()) {} - ~LastWordMediaChannel() { - T::MediaChannel::SendRtp(kPcmuFrame, sizeof(kPcmuFrame), - rtc::PacketOptions()); - T::MediaChannel::SendRtcp(kRtcpReport, sizeof(kRtcpReport)); - } - }; - CreateChannels(std::make_unique(), - std::make_unique(), RTCP_MUX, - RTCP_MUX); - EXPECT_TRUE(SendInitiate()); - EXPECT_TRUE(SendAccept()); - EXPECT_TRUE(Terminate()); + EXPECT_TRUE(media_channel2()->sending()); + EXPECT_EQ(1U, media_channel2()->codecs().size()); } // Send voice RTP data to the other side and ensure it gets there. @@ -922,8 +903,8 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateChannels(RTCP_MUX, RTCP_MUX); EXPECT_TRUE(SendInitiate()); EXPECT_TRUE(SendAccept()); - EXPECT_TRUE(channel1_->rtp_transport()->rtcp_mux_enabled()); - EXPECT_TRUE(channel2_->rtp_transport()->rtcp_mux_enabled()); + EXPECT_TRUE(IsRtcpMuxEnabled(channel1_)); + EXPECT_TRUE(IsRtcpMuxEnabled(channel2_)); SendRtp1(); SendRtp2(); WaitForThreads(); @@ -946,15 +927,13 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { void SendDtlsSrtpToDtlsSrtp(int flags1, int flags2) { CreateChannels(flags1 | DTLS, flags2 | DTLS); - EXPECT_FALSE(channel1_->srtp_active()); - EXPECT_FALSE(channel2_->srtp_active()); + EXPECT_FALSE(IsSrtpActive(channel1_)); + EXPECT_FALSE(IsSrtpActive(channel2_)); EXPECT_TRUE(SendInitiate()); WaitForThreads(); - EXPECT_TRUE(channel1_->writable()); - EXPECT_TRUE(channel2_->writable()); EXPECT_TRUE(SendAccept()); - EXPECT_TRUE(channel1_->srtp_active()); - EXPECT_TRUE(channel2_->srtp_active()); + EXPECT_TRUE(IsSrtpActive(channel1_)); + EXPECT_TRUE(IsSrtpActive(channel2_)); SendRtp1(); SendRtp2(); WaitForThreads(); @@ -972,10 +951,10 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateChannels(SSRC_MUX | RTCP_MUX | DTLS, SSRC_MUX | RTCP_MUX | DTLS); EXPECT_TRUE(SendOffer()); EXPECT_TRUE(SendProvisionalAnswer()); - EXPECT_TRUE(channel1_->srtp_active()); - EXPECT_TRUE(channel2_->srtp_active()); - EXPECT_TRUE(channel1_->rtp_transport()->rtcp_mux_enabled()); - EXPECT_TRUE(channel2_->rtp_transport()->rtcp_mux_enabled()); + EXPECT_TRUE(IsSrtpActive(channel1_)); + EXPECT_TRUE(IsSrtpActive(channel2_)); + EXPECT_TRUE(IsRtcpMuxEnabled(channel1_)); + EXPECT_TRUE(IsRtcpMuxEnabled(channel2_)); WaitForThreads(); // Wait for 'sending' flag go through network thread. SendCustomRtp1(kSsrc1, ++sequence_number1_1); WaitForThreads(); @@ -988,8 +967,8 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // Complete call setup and ensure everything is still OK. EXPECT_TRUE(SendFinalAnswer()); - EXPECT_TRUE(channel1_->srtp_active()); - EXPECT_TRUE(channel2_->srtp_active()); + EXPECT_TRUE(IsSrtpActive(channel1_)); + EXPECT_TRUE(IsSrtpActive(channel2_)); SendCustomRtp1(kSsrc1, ++sequence_number1_1); SendCustomRtp2(kSsrc2, ++sequence_number2_2); WaitForThreads(); @@ -1018,8 +997,8 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateChannels(RTCP_MUX, RTCP_MUX); EXPECT_TRUE(SendInitiate()); EXPECT_TRUE(SendAccept()); - EXPECT_TRUE(channel1_->rtp_transport()->rtcp_mux_enabled()); - EXPECT_TRUE(channel2_->rtp_transport()->rtcp_mux_enabled()); + EXPECT_TRUE(IsRtcpMuxEnabled(channel1_)); + EXPECT_TRUE(IsRtcpMuxEnabled(channel2_)); SendRtp1(); SendRtp2(); WaitForThreads(); @@ -1042,7 +1021,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { network_thread_->Invoke(RTC_FROM_HERE, [this] { fake_rtp_dtls_transport1_->SetWritable(true); }); - EXPECT_TRUE(media_channel1_->sending()); + EXPECT_TRUE(media_channel1()->sending()); SendRtp1(); SendRtp2(); WaitForThreads(); @@ -1056,7 +1035,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { bool asymmetric = true; fake_rtp_dtls_transport1_->SetDestination(nullptr, asymmetric); }); - EXPECT_TRUE(media_channel1_->sending()); + EXPECT_TRUE(media_channel1()->sending()); // Should fail also. SendRtp1(); @@ -1072,7 +1051,7 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { fake_rtp_dtls_transport1_->SetDestination(fake_rtp_dtls_transport2_.get(), asymmetric); }); - EXPECT_TRUE(media_channel1_->sending()); + EXPECT_TRUE(media_channel1()->sending()); SendRtp1(); SendRtp2(); WaitForThreads(); @@ -1125,17 +1104,17 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { std::unique_ptr content( CreateMediaContentWithStream(1)); - media_channel1_->set_fail_set_recv_codecs(true); + media_channel1()->set_fail_set_recv_codecs(true); EXPECT_FALSE( channel1_->SetLocalContent(content.get(), SdpType::kOffer, &err)); EXPECT_FALSE( channel1_->SetLocalContent(content.get(), SdpType::kAnswer, &err)); - media_channel1_->set_fail_set_send_codecs(true); + media_channel1()->set_fail_set_send_codecs(true); EXPECT_FALSE( channel1_->SetRemoteContent(content.get(), SdpType::kOffer, &err)); - media_channel1_->set_fail_set_send_codecs(true); + media_channel1()->set_fail_set_send_codecs(true); EXPECT_FALSE( channel1_->SetRemoteContent(content.get(), SdpType::kAnswer, &err)); } @@ -1148,14 +1127,14 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateMediaContentWithStream(1)); EXPECT_TRUE( channel1_->SetLocalContent(content1.get(), SdpType::kOffer, &err)); - EXPECT_TRUE(media_channel1_->HasSendStream(1)); + EXPECT_TRUE(media_channel1()->HasSendStream(1)); std::unique_ptr content2( CreateMediaContentWithStream(2)); EXPECT_TRUE( channel1_->SetLocalContent(content2.get(), SdpType::kOffer, &err)); - EXPECT_FALSE(media_channel1_->HasSendStream(1)); - EXPECT_TRUE(media_channel1_->HasSendStream(2)); + EXPECT_FALSE(media_channel1()->HasSendStream(1)); + EXPECT_TRUE(media_channel1()->HasSendStream(2)); } void TestReceiveTwoOffers() { @@ -1166,14 +1145,14 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateMediaContentWithStream(1)); EXPECT_TRUE( channel1_->SetRemoteContent(content1.get(), SdpType::kOffer, &err)); - EXPECT_TRUE(media_channel1_->HasRecvStream(1)); + EXPECT_TRUE(media_channel1()->HasRecvStream(1)); std::unique_ptr content2( CreateMediaContentWithStream(2)); EXPECT_TRUE( channel1_->SetRemoteContent(content2.get(), SdpType::kOffer, &err)); - EXPECT_FALSE(media_channel1_->HasRecvStream(1)); - EXPECT_TRUE(media_channel1_->HasRecvStream(2)); + EXPECT_FALSE(media_channel1()->HasRecvStream(1)); + EXPECT_TRUE(media_channel1()->HasRecvStream(2)); } void TestSendPrAnswer() { @@ -1185,24 +1164,24 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateMediaContentWithStream(1)); EXPECT_TRUE( channel1_->SetRemoteContent(content1.get(), SdpType::kOffer, &err)); - EXPECT_TRUE(media_channel1_->HasRecvStream(1)); + EXPECT_TRUE(media_channel1()->HasRecvStream(1)); // Send PR answer std::unique_ptr content2( CreateMediaContentWithStream(2)); EXPECT_TRUE( channel1_->SetLocalContent(content2.get(), SdpType::kPrAnswer, &err)); - EXPECT_TRUE(media_channel1_->HasRecvStream(1)); - EXPECT_TRUE(media_channel1_->HasSendStream(2)); + EXPECT_TRUE(media_channel1()->HasRecvStream(1)); + EXPECT_TRUE(media_channel1()->HasSendStream(2)); // Send answer std::unique_ptr content3( CreateMediaContentWithStream(3)); EXPECT_TRUE( channel1_->SetLocalContent(content3.get(), SdpType::kAnswer, &err)); - EXPECT_TRUE(media_channel1_->HasRecvStream(1)); - EXPECT_FALSE(media_channel1_->HasSendStream(2)); - EXPECT_TRUE(media_channel1_->HasSendStream(3)); + EXPECT_TRUE(media_channel1()->HasRecvStream(1)); + EXPECT_FALSE(media_channel1()->HasSendStream(2)); + EXPECT_TRUE(media_channel1()->HasSendStream(3)); } void TestReceivePrAnswer() { @@ -1214,37 +1193,39 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateMediaContentWithStream(1)); EXPECT_TRUE( channel1_->SetLocalContent(content1.get(), SdpType::kOffer, &err)); - EXPECT_TRUE(media_channel1_->HasSendStream(1)); + EXPECT_TRUE(media_channel1()->HasSendStream(1)); // Receive PR answer std::unique_ptr content2( CreateMediaContentWithStream(2)); EXPECT_TRUE( channel1_->SetRemoteContent(content2.get(), SdpType::kPrAnswer, &err)); - EXPECT_TRUE(media_channel1_->HasSendStream(1)); - EXPECT_TRUE(media_channel1_->HasRecvStream(2)); + EXPECT_TRUE(media_channel1()->HasSendStream(1)); + EXPECT_TRUE(media_channel1()->HasRecvStream(2)); // Receive answer std::unique_ptr content3( CreateMediaContentWithStream(3)); EXPECT_TRUE( channel1_->SetRemoteContent(content3.get(), SdpType::kAnswer, &err)); - EXPECT_TRUE(media_channel1_->HasSendStream(1)); - EXPECT_FALSE(media_channel1_->HasRecvStream(2)); - EXPECT_TRUE(media_channel1_->HasRecvStream(3)); + EXPECT_TRUE(media_channel1()->HasSendStream(1)); + EXPECT_FALSE(media_channel1()->HasRecvStream(2)); + EXPECT_TRUE(media_channel1()->HasRecvStream(3)); } void TestOnTransportReadyToSend() { CreateChannels(0, 0); - EXPECT_FALSE(media_channel1_->ready_to_send()); + EXPECT_FALSE(media_channel1()->ready_to_send()); - channel1_->OnTransportReadyToSend(true); + network_thread_->PostTask( + RTC_FROM_HERE, [this] { channel1_->OnTransportReadyToSend(true); }); WaitForThreads(); - EXPECT_TRUE(media_channel1_->ready_to_send()); + EXPECT_TRUE(media_channel1()->ready_to_send()); - channel1_->OnTransportReadyToSend(false); + network_thread_->PostTask( + RTC_FROM_HERE, [this] { channel1_->OnTransportReadyToSend(false); }); WaitForThreads(); - EXPECT_FALSE(media_channel1_->ready_to_send()); + EXPECT_FALSE(media_channel1()->ready_to_send()); } bool SetRemoteContentWithBitrateLimit(int remote_limit) { @@ -1272,8 +1253,8 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateChannels(0, 0); EXPECT_TRUE(channel1_->SetLocalContent(&local_media_content1_, SdpType::kOffer, NULL)); - EXPECT_EQ(media_channel1_->max_bps(), -1); - VerifyMaxBitrate(media_channel1_->GetRtpSendParameters(kSsrc1), + EXPECT_EQ(media_channel1()->max_bps(), -1); + VerifyMaxBitrate(media_channel1()->GetRtpSendParameters(kSsrc1), absl::nullopt); } @@ -1290,22 +1271,27 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { CreateChannels(DTLS, DTLS); - channel1_->SetOption(cricket::BaseChannel::ST_RTP, - rtc::Socket::Option::OPT_SNDBUF, kSndBufSize); - channel2_->SetOption(cricket::BaseChannel::ST_RTP, - rtc::Socket::Option::OPT_RCVBUF, kRcvBufSize); - new_rtp_transport_ = CreateDtlsSrtpTransport( fake_rtp_dtls_transport2_.get(), fake_rtcp_dtls_transport2_.get()); - channel1_->SetRtpTransport(new_rtp_transport_.get()); - int option_val; - ASSERT_TRUE(fake_rtp_dtls_transport2_->GetOption( - rtc::Socket::Option::OPT_SNDBUF, &option_val)); - EXPECT_EQ(kSndBufSize, option_val); - ASSERT_TRUE(fake_rtp_dtls_transport2_->GetOption( - rtc::Socket::Option::OPT_RCVBUF, &option_val)); - EXPECT_EQ(kRcvBufSize, option_val); + bool rcv_success, send_success; + int rcv_buf, send_buf; + network_thread_->Invoke(RTC_FROM_HERE, [&] { + channel1_->SetOption(cricket::BaseChannel::ST_RTP, + rtc::Socket::Option::OPT_SNDBUF, kSndBufSize); + channel2_->SetOption(cricket::BaseChannel::ST_RTP, + rtc::Socket::Option::OPT_RCVBUF, kRcvBufSize); + channel1_->SetRtpTransport(new_rtp_transport_.get()); + send_success = fake_rtp_dtls_transport2_->GetOption( + rtc::Socket::Option::OPT_SNDBUF, &send_buf); + rcv_success = fake_rtp_dtls_transport2_->GetOption( + rtc::Socket::Option::OPT_RCVBUF, &rcv_buf); + }); + + ASSERT_TRUE(send_success); + EXPECT_EQ(kSndBufSize, send_buf); + ASSERT_TRUE(rcv_success); + EXPECT_EQ(kRcvBufSize, rcv_buf); } void CreateSimulcastContent(const std::vector& rids, @@ -1369,6 +1355,9 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { thread->ProcessMessages(0); } } + static void FlushCurrentThread() { + rtc::Thread::Current()->ProcessMessages(0); + } void WaitForThreads(rtc::ArrayView threads) { // |threads| and current thread post packets to network thread. for (rtc::Thread* thread : threads) { @@ -1384,9 +1373,24 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // Worker thread = current Thread process received messages. ProcessThreadQueue(rtc::Thread::Current()); } + + typename T::MediaChannel* media_channel1() { + RTC_DCHECK(channel1_); + RTC_DCHECK(channel1_->media_channel()); + return static_cast(channel1_->media_channel()); + } + + typename T::MediaChannel* media_channel2() { + RTC_DCHECK(channel2_); + RTC_DCHECK(channel2_->media_channel()); + return static_cast(channel2_->media_channel()); + } + // TODO(pbos): Remove playout from all media channels and let renderers mute // themselves. const bool verify_playout_; + rtc::scoped_refptr network_thread_safety_ = + webrtc::PendingTaskSafetyFlag::CreateDetached(); std::unique_ptr network_thread_keeper_; rtc::Thread* network_thread_; std::unique_ptr fake_rtp_dtls_transport1_; @@ -1401,9 +1405,6 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { std::unique_ptr rtp_transport2_; std::unique_ptr new_rtp_transport_; cricket::FakeMediaEngine media_engine_; - // The media channels are owned by the voice channel objects below. - typename T::MediaChannel* media_channel1_ = nullptr; - typename T::MediaChannel* media_channel2_ = nullptr; std::unique_ptr channel1_; std::unique_ptr channel2_; typename T::Content local_media_content1_; @@ -1413,8 +1414,6 @@ class ChannelTest : public ::testing::Test, public sigslot::has_slots<> { // The RTP and RTCP packets to send in the tests. rtc::Buffer rtp_packet_; rtc::Buffer rtcp_packet_; - int rtcp_mux_activated_callbacks1_ = 0; - int rtcp_mux_activated_callbacks2_ = 0; cricket::CandidatePairInterface* last_selected_candidate_pair_; rtc::UniqueRandomIdGenerator ssrc_generator_; }; @@ -1431,7 +1430,7 @@ std::unique_ptr ChannelTest::CreateChannel( worker_thread, network_thread, signaling_thread, std::move(ch), cricket::CN_AUDIO, (flags & DTLS) != 0, webrtc::CryptoOptions(), &ssrc_generator_); - channel->Init_w(rtp_transport, webrtc::MediaTransportConfig()); + channel->Init_w(rtp_transport); return channel; } @@ -1514,7 +1513,7 @@ std::unique_ptr ChannelTest::CreateChannel( worker_thread, network_thread, signaling_thread, std::move(ch), cricket::CN_VIDEO, (flags & DTLS) != 0, webrtc::CryptoOptions(), &ssrc_generator_); - channel->Init_w(rtp_transport, webrtc::MediaTransportConfig()); + channel->Init_w(rtp_transport); return channel; } @@ -1565,8 +1564,8 @@ class VideoChannelDoubleThreadTest : public ChannelTest { TEST_F(VoiceChannelSingleThreadTest, TestInit) { Base::TestInit(); - EXPECT_FALSE(media_channel1_->IsStreamMuted(0)); - EXPECT_TRUE(media_channel1_->dtmf_info_queue().empty()); + EXPECT_FALSE(media_channel1()->IsStreamMuted(0)); + EXPECT_TRUE(media_channel1()->dtmf_info_queue().empty()); } TEST_F(VoiceChannelSingleThreadTest, TestDeinit) { @@ -1627,10 +1626,6 @@ TEST_F(VoiceChannelSingleThreadTest, TestCallSetup) { Base::TestCallSetup(); } -TEST_F(VoiceChannelSingleThreadTest, TestCallTeardownRtcpMux) { - Base::TestCallTeardownRtcpMux(); -} - TEST_F(VoiceChannelSingleThreadTest, SendRtpToRtp) { Base::SendRtpToRtp(); } @@ -1706,8 +1701,8 @@ TEST_F(VoiceChannelSingleThreadTest, SocketOptionsMergedOnSetTransport) { // VoiceChannelDoubleThreadTest TEST_F(VoiceChannelDoubleThreadTest, TestInit) { Base::TestInit(); - EXPECT_FALSE(media_channel1_->IsStreamMuted(0)); - EXPECT_TRUE(media_channel1_->dtmf_info_queue().empty()); + EXPECT_FALSE(media_channel1()->IsStreamMuted(0)); + EXPECT_TRUE(media_channel1()->dtmf_info_queue().empty()); } TEST_F(VoiceChannelDoubleThreadTest, TestDeinit) { @@ -1768,10 +1763,6 @@ TEST_F(VoiceChannelDoubleThreadTest, TestCallSetup) { Base::TestCallSetup(); } -TEST_F(VoiceChannelDoubleThreadTest, TestCallTeardownRtcpMux) { - Base::TestCallTeardownRtcpMux(); -} - TEST_F(VoiceChannelDoubleThreadTest, SendRtpToRtp) { Base::SendRtpToRtp(); } @@ -1907,10 +1898,6 @@ TEST_F(VideoChannelSingleThreadTest, TestCallSetup) { Base::TestCallSetup(); } -TEST_F(VideoChannelSingleThreadTest, TestCallTeardownRtcpMux) { - Base::TestCallTeardownRtcpMux(); -} - TEST_F(VideoChannelSingleThreadTest, SendRtpToRtp) { Base::SendRtpToRtp(); } @@ -1997,12 +1984,12 @@ TEST_F(VideoChannelSingleThreadTest, TestSetLocalOfferWithPacketization) { CreateChannels(0, 0); EXPECT_TRUE(channel1_->SetLocalContent(&video, SdpType::kOffer, NULL)); - EXPECT_THAT(media_channel1_->send_codecs(), testing::IsEmpty()); - ASSERT_THAT(media_channel1_->recv_codecs(), testing::SizeIs(2)); - EXPECT_TRUE(media_channel1_->recv_codecs()[0].Matches(kVp8Codec)); - EXPECT_EQ(media_channel1_->recv_codecs()[0].packetization, absl::nullopt); - EXPECT_TRUE(media_channel1_->recv_codecs()[1].Matches(vp9_codec)); - EXPECT_EQ(media_channel1_->recv_codecs()[1].packetization, + EXPECT_THAT(media_channel1()->send_codecs(), testing::IsEmpty()); + ASSERT_THAT(media_channel1()->recv_codecs(), testing::SizeIs(2)); + EXPECT_TRUE(media_channel1()->recv_codecs()[0].Matches(kVp8Codec)); + EXPECT_EQ(media_channel1()->recv_codecs()[0].packetization, absl::nullopt); + EXPECT_TRUE(media_channel1()->recv_codecs()[1].Matches(vp9_codec)); + EXPECT_EQ(media_channel1()->recv_codecs()[1].packetization, cricket::kPacketizationParamRaw); } @@ -2016,12 +2003,12 @@ TEST_F(VideoChannelSingleThreadTest, TestSetRemoteOfferWithPacketization) { CreateChannels(0, 0); EXPECT_TRUE(channel1_->SetRemoteContent(&video, SdpType::kOffer, NULL)); - EXPECT_THAT(media_channel1_->recv_codecs(), testing::IsEmpty()); - ASSERT_THAT(media_channel1_->send_codecs(), testing::SizeIs(2)); - EXPECT_TRUE(media_channel1_->send_codecs()[0].Matches(kVp8Codec)); - EXPECT_EQ(media_channel1_->send_codecs()[0].packetization, absl::nullopt); - EXPECT_TRUE(media_channel1_->send_codecs()[1].Matches(vp9_codec)); - EXPECT_EQ(media_channel1_->send_codecs()[1].packetization, + EXPECT_THAT(media_channel1()->recv_codecs(), testing::IsEmpty()); + ASSERT_THAT(media_channel1()->send_codecs(), testing::SizeIs(2)); + EXPECT_TRUE(media_channel1()->send_codecs()[0].Matches(kVp8Codec)); + EXPECT_EQ(media_channel1()->send_codecs()[0].packetization, absl::nullopt); + EXPECT_TRUE(media_channel1()->send_codecs()[1].Matches(vp9_codec)); + EXPECT_EQ(media_channel1()->send_codecs()[1].packetization, cricket::kPacketizationParamRaw); } @@ -2036,17 +2023,17 @@ TEST_F(VideoChannelSingleThreadTest, TestSetAnswerWithPacketization) { EXPECT_TRUE(channel1_->SetLocalContent(&video, SdpType::kOffer, NULL)); EXPECT_TRUE(channel1_->SetRemoteContent(&video, SdpType::kAnswer, NULL)); - ASSERT_THAT(media_channel1_->recv_codecs(), testing::SizeIs(2)); - EXPECT_TRUE(media_channel1_->recv_codecs()[0].Matches(kVp8Codec)); - EXPECT_EQ(media_channel1_->recv_codecs()[0].packetization, absl::nullopt); - EXPECT_TRUE(media_channel1_->recv_codecs()[1].Matches(vp9_codec)); - EXPECT_EQ(media_channel1_->recv_codecs()[1].packetization, + ASSERT_THAT(media_channel1()->recv_codecs(), testing::SizeIs(2)); + EXPECT_TRUE(media_channel1()->recv_codecs()[0].Matches(kVp8Codec)); + EXPECT_EQ(media_channel1()->recv_codecs()[0].packetization, absl::nullopt); + EXPECT_TRUE(media_channel1()->recv_codecs()[1].Matches(vp9_codec)); + EXPECT_EQ(media_channel1()->recv_codecs()[1].packetization, cricket::kPacketizationParamRaw); - EXPECT_THAT(media_channel1_->send_codecs(), testing::SizeIs(2)); - EXPECT_TRUE(media_channel1_->send_codecs()[0].Matches(kVp8Codec)); - EXPECT_EQ(media_channel1_->send_codecs()[0].packetization, absl::nullopt); - EXPECT_TRUE(media_channel1_->send_codecs()[1].Matches(vp9_codec)); - EXPECT_EQ(media_channel1_->send_codecs()[1].packetization, + EXPECT_THAT(media_channel1()->send_codecs(), testing::SizeIs(2)); + EXPECT_TRUE(media_channel1()->send_codecs()[0].Matches(kVp8Codec)); + EXPECT_EQ(media_channel1()->send_codecs()[0].packetization, absl::nullopt); + EXPECT_TRUE(media_channel1()->send_codecs()[1].Matches(vp9_codec)); + EXPECT_EQ(media_channel1()->send_codecs()[1].packetization, cricket::kPacketizationParamRaw); } @@ -2064,10 +2051,10 @@ TEST_F(VideoChannelSingleThreadTest, TestSetLocalAnswerWithoutPacketization) { EXPECT_TRUE( channel1_->SetRemoteContent(&remote_video, SdpType::kOffer, NULL)); EXPECT_TRUE(channel1_->SetLocalContent(&local_video, SdpType::kAnswer, NULL)); - ASSERT_THAT(media_channel1_->recv_codecs(), testing::SizeIs(1)); - EXPECT_EQ(media_channel1_->recv_codecs()[0].packetization, absl::nullopt); - ASSERT_THAT(media_channel1_->send_codecs(), testing::SizeIs(1)); - EXPECT_EQ(media_channel1_->send_codecs()[0].packetization, absl::nullopt); + ASSERT_THAT(media_channel1()->recv_codecs(), testing::SizeIs(1)); + EXPECT_EQ(media_channel1()->recv_codecs()[0].packetization, absl::nullopt); + ASSERT_THAT(media_channel1()->send_codecs(), testing::SizeIs(1)); + EXPECT_EQ(media_channel1()->send_codecs()[0].packetization, absl::nullopt); } TEST_F(VideoChannelSingleThreadTest, TestSetRemoteAnswerWithoutPacketization) { @@ -2084,10 +2071,10 @@ TEST_F(VideoChannelSingleThreadTest, TestSetRemoteAnswerWithoutPacketization) { EXPECT_TRUE(channel1_->SetLocalContent(&local_video, SdpType::kOffer, NULL)); EXPECT_TRUE( channel1_->SetRemoteContent(&remote_video, SdpType::kAnswer, NULL)); - ASSERT_THAT(media_channel1_->recv_codecs(), testing::SizeIs(1)); - EXPECT_EQ(media_channel1_->recv_codecs()[0].packetization, absl::nullopt); - ASSERT_THAT(media_channel1_->send_codecs(), testing::SizeIs(1)); - EXPECT_EQ(media_channel1_->send_codecs()[0].packetization, absl::nullopt); + ASSERT_THAT(media_channel1()->recv_codecs(), testing::SizeIs(1)); + EXPECT_EQ(media_channel1()->recv_codecs()[0].packetization, absl::nullopt); + ASSERT_THAT(media_channel1()->send_codecs(), testing::SizeIs(1)); + EXPECT_EQ(media_channel1()->send_codecs()[0].packetization, absl::nullopt); } TEST_F(VideoChannelSingleThreadTest, @@ -2106,10 +2093,10 @@ TEST_F(VideoChannelSingleThreadTest, EXPECT_TRUE(channel1_->SetLocalContent(&local_video, SdpType::kOffer, NULL)); EXPECT_FALSE( channel1_->SetRemoteContent(&remote_video, SdpType::kAnswer, NULL)); - ASSERT_THAT(media_channel1_->recv_codecs(), testing::SizeIs(1)); - EXPECT_EQ(media_channel1_->recv_codecs()[0].packetization, + ASSERT_THAT(media_channel1()->recv_codecs(), testing::SizeIs(1)); + EXPECT_EQ(media_channel1()->recv_codecs()[0].packetization, cricket::kPacketizationParamRaw); - EXPECT_THAT(media_channel1_->send_codecs(), testing::IsEmpty()); + EXPECT_THAT(media_channel1()->send_codecs(), testing::IsEmpty()); } TEST_F(VideoChannelSingleThreadTest, @@ -2128,9 +2115,9 @@ TEST_F(VideoChannelSingleThreadTest, channel1_->SetRemoteContent(&remote_video, SdpType::kOffer, NULL)); EXPECT_FALSE( channel1_->SetLocalContent(&local_video, SdpType::kAnswer, NULL)); - EXPECT_THAT(media_channel1_->recv_codecs(), testing::IsEmpty()); - ASSERT_THAT(media_channel1_->send_codecs(), testing::SizeIs(1)); - EXPECT_EQ(media_channel1_->send_codecs()[0].packetization, absl::nullopt); + EXPECT_THAT(media_channel1()->recv_codecs(), testing::IsEmpty()); + ASSERT_THAT(media_channel1()->send_codecs(), testing::SizeIs(1)); + EXPECT_EQ(media_channel1()->send_codecs()[0].packetization, absl::nullopt); } // VideoChannelDoubleThreadTest @@ -2196,10 +2183,6 @@ TEST_F(VideoChannelDoubleThreadTest, TestCallSetup) { Base::TestCallSetup(); } -TEST_F(VideoChannelDoubleThreadTest, TestCallTeardownRtcpMux) { - Base::TestCallTeardownRtcpMux(); -} - TEST_F(VideoChannelDoubleThreadTest, SendRtpToRtp) { Base::SendRtpToRtp(); } @@ -2272,220 +2255,5 @@ TEST_F(VideoChannelDoubleThreadTest, SocketOptionsMergedOnSetTransport) { Base::SocketOptionsMergedOnSetTransport(); } -// RtpDataChannelSingleThreadTest -class RtpDataChannelSingleThreadTest : public ChannelTest { - public: - typedef ChannelTest Base; - RtpDataChannelSingleThreadTest() - : Base(true, kDataPacket, kRtcpReport, NetworkIsWorker::Yes) {} -}; - -// RtpDataChannelDoubleThreadTest -class RtpDataChannelDoubleThreadTest : public ChannelTest { - public: - typedef ChannelTest Base; - RtpDataChannelDoubleThreadTest() - : Base(true, kDataPacket, kRtcpReport, NetworkIsWorker::No) {} -}; - -// Override to avoid engine channel parameter. -template <> -std::unique_ptr ChannelTest::CreateChannel( - rtc::Thread* worker_thread, - rtc::Thread* network_thread, - std::unique_ptr ch, - webrtc::RtpTransportInternal* rtp_transport, - int flags) { - rtc::Thread* signaling_thread = rtc::Thread::Current(); - auto channel = std::make_unique( - worker_thread, network_thread, signaling_thread, std::move(ch), - cricket::CN_DATA, (flags & DTLS) != 0, webrtc::CryptoOptions(), - &ssrc_generator_); - channel->Init_w(rtp_transport, webrtc::MediaTransportConfig()); - return channel; -} - -template <> -void ChannelTest::CreateContent( - int flags, - const cricket::AudioCodec& audio_codec, - const cricket::VideoCodec& video_codec, - cricket::RtpDataContentDescription* data) { - data->AddCodec(kGoogleDataCodec); - data->set_rtcp_mux((flags & RTCP_MUX) != 0); -} - -template <> -void ChannelTest::CopyContent( - const cricket::RtpDataContentDescription& source, - cricket::RtpDataContentDescription* data) { - *data = source; -} - -template <> -bool ChannelTest::CodecMatches(const cricket::DataCodec& c1, - const cricket::DataCodec& c2) { - return c1.name == c2.name; -} - -template <> -void ChannelTest::AddLegacyStreamInContent( - uint32_t ssrc, - int flags, - cricket::RtpDataContentDescription* data) { - data->AddLegacyStream(ssrc); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestInit) { - Base::TestInit(); - EXPECT_FALSE(media_channel1_->IsStreamMuted(0)); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestDeinit) { - Base::TestDeinit(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestSetContents) { - Base::TestSetContents(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestSetContentsNullOffer) { - Base::TestSetContentsNullOffer(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestSetContentsRtcpMux) { - Base::TestSetContentsRtcpMux(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestChangeStreamParamsInContent) { - Base::TestChangeStreamParamsInContent(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestPlayoutAndSendingStates) { - Base::TestPlayoutAndSendingStates(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestMediaContentDirection) { - Base::TestMediaContentDirection(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestCallSetup) { - Base::TestCallSetup(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestCallTeardownRtcpMux) { - Base::TestCallTeardownRtcpMux(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestOnTransportReadyToSend) { - Base::TestOnTransportReadyToSend(); -} - -TEST_F(RtpDataChannelSingleThreadTest, SendRtpToRtp) { - Base::SendRtpToRtp(); -} - -TEST_F(RtpDataChannelSingleThreadTest, SendRtpToRtpOnThread) { - Base::SendRtpToRtpOnThread(); -} - -TEST_F(RtpDataChannelSingleThreadTest, SendWithWritabilityLoss) { - Base::SendWithWritabilityLoss(); -} - -TEST_F(RtpDataChannelSingleThreadTest, SocketOptionsMergedOnSetTransport) { - Base::SocketOptionsMergedOnSetTransport(); -} - -TEST_F(RtpDataChannelSingleThreadTest, TestSendData) { - CreateChannels(0, 0); - EXPECT_TRUE(SendInitiate()); - EXPECT_TRUE(SendAccept()); - - cricket::SendDataParams params; - params.ssrc = 42; - unsigned char data[] = {'f', 'o', 'o'}; - rtc::CopyOnWriteBuffer payload(data, 3); - cricket::SendDataResult result; - ASSERT_TRUE(media_channel1_->SendData(params, payload, &result)); - EXPECT_EQ(params.ssrc, media_channel1_->last_sent_data_params().ssrc); - EXPECT_EQ("foo", media_channel1_->last_sent_data()); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestInit) { - Base::TestInit(); - EXPECT_FALSE(media_channel1_->IsStreamMuted(0)); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestDeinit) { - Base::TestDeinit(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestSetContents) { - Base::TestSetContents(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestSetContentsNullOffer) { - Base::TestSetContentsNullOffer(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestSetContentsRtcpMux) { - Base::TestSetContentsRtcpMux(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestChangeStreamParamsInContent) { - Base::TestChangeStreamParamsInContent(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestPlayoutAndSendingStates) { - Base::TestPlayoutAndSendingStates(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestMediaContentDirection) { - Base::TestMediaContentDirection(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestCallSetup) { - Base::TestCallSetup(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestCallTeardownRtcpMux) { - Base::TestCallTeardownRtcpMux(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestOnTransportReadyToSend) { - Base::TestOnTransportReadyToSend(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, SendRtpToRtp) { - Base::SendRtpToRtp(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, SendRtpToRtpOnThread) { - Base::SendRtpToRtpOnThread(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, SendWithWritabilityLoss) { - Base::SendWithWritabilityLoss(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, SocketOptionsMergedOnSetTransport) { - Base::SocketOptionsMergedOnSetTransport(); -} - -TEST_F(RtpDataChannelDoubleThreadTest, TestSendData) { - CreateChannels(0, 0); - EXPECT_TRUE(SendInitiate()); - EXPECT_TRUE(SendAccept()); - - cricket::SendDataParams params; - params.ssrc = 42; - unsigned char data[] = {'f', 'o', 'o'}; - rtc::CopyOnWriteBuffer payload(data, 3); - cricket::SendDataResult result; - ASSERT_TRUE(media_channel1_->SendData(params, payload, &result)); - EXPECT_EQ(params.ssrc, media_channel1_->last_sent_data_params().ssrc); - EXPECT_EQ("foo", media_channel1_->last_sent_data()); -} // TODO(pthatcher): TestSetReceiver? diff --git a/pc/composite_data_channel_transport.cc b/pc/composite_data_channel_transport.cc deleted file mode 100644 index e66febc12b..0000000000 --- a/pc/composite_data_channel_transport.cc +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2019 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "pc/composite_data_channel_transport.h" - -#include - -#include "absl/algorithm/container.h" - -namespace webrtc { - -CompositeDataChannelTransport::CompositeDataChannelTransport( - std::vector transports) - : transports_(std::move(transports)) { - for (auto transport : transports_) { - transport->SetDataSink(this); - } -} - -CompositeDataChannelTransport::~CompositeDataChannelTransport() { - for (auto transport : transports_) { - transport->SetDataSink(nullptr); - } -} - -void CompositeDataChannelTransport::SetSendTransport( - DataChannelTransportInterface* send_transport) { - if (!absl::c_linear_search(transports_, send_transport)) { - return; - } - send_transport_ = send_transport; - // NB: OnReadyToSend() checks if we're actually ready to send, and signals - // |sink_| if appropriate. This signal is required upon setting the sink. - OnReadyToSend(); -} - -void CompositeDataChannelTransport::RemoveTransport( - DataChannelTransportInterface* transport) { - RTC_DCHECK(transport != send_transport_) << "Cannot remove send transport"; - - auto it = absl::c_find(transports_, transport); - if (it == transports_.end()) { - return; - } - - transport->SetDataSink(nullptr); - transports_.erase(it); -} - -RTCError CompositeDataChannelTransport::OpenChannel(int channel_id) { - RTCError error = RTCError::OK(); - for (auto transport : transports_) { - RTCError e = transport->OpenChannel(channel_id); - if (!e.ok()) { - error = std::move(e); - } - } - return error; -} - -RTCError CompositeDataChannelTransport::SendData( - int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) { - if (send_transport_) { - return send_transport_->SendData(channel_id, params, buffer); - } - return RTCError(RTCErrorType::NETWORK_ERROR, "Send transport is not ready"); -} - -RTCError CompositeDataChannelTransport::CloseChannel(int channel_id) { - if (send_transport_) { - return send_transport_->CloseChannel(channel_id); - } - return RTCError(RTCErrorType::NETWORK_ERROR, "Send transport is not ready"); -} - -void CompositeDataChannelTransport::SetDataSink(DataChannelSink* sink) { - sink_ = sink; - // NB: OnReadyToSend() checks if we're actually ready to send, and signals - // |sink_| if appropriate. This signal is required upon setting the sink. - OnReadyToSend(); -} - -bool CompositeDataChannelTransport::IsReadyToSend() const { - return send_transport_ && send_transport_->IsReadyToSend(); -} - -void CompositeDataChannelTransport::OnDataReceived( - int channel_id, - DataMessageType type, - const rtc::CopyOnWriteBuffer& buffer) { - if (sink_) { - sink_->OnDataReceived(channel_id, type, buffer); - } -} - -void CompositeDataChannelTransport::OnChannelClosing(int channel_id) { - if (sink_) { - sink_->OnChannelClosing(channel_id); - } -} - -void CompositeDataChannelTransport::OnChannelClosed(int channel_id) { - if (sink_) { - sink_->OnChannelClosed(channel_id); - } -} - -void CompositeDataChannelTransport::OnReadyToSend() { - if (sink_ && send_transport_ && send_transport_->IsReadyToSend()) { - sink_->OnReadyToSend(); - } -} - -} // namespace webrtc diff --git a/pc/composite_data_channel_transport.h b/pc/composite_data_channel_transport.h deleted file mode 100644 index 97633cb6ed..0000000000 --- a/pc/composite_data_channel_transport.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2019 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_COMPOSITE_DATA_CHANNEL_TRANSPORT_H_ -#define PC_COMPOSITE_DATA_CHANNEL_TRANSPORT_H_ - -#include - -#include "api/transport/data_channel_transport_interface.h" -#include "rtc_base/critical_section.h" - -namespace webrtc { - -// Composite implementation of DataChannelTransportInterface. Allows users to -// receive data channel messages over multiple transports and send over one of -// those transports. -class CompositeDataChannelTransport : public DataChannelTransportInterface, - public DataChannelSink { - public: - explicit CompositeDataChannelTransport( - std::vector transports); - ~CompositeDataChannelTransport() override; - - // Specifies which transport to be used for sending. Must be called before - // sending data. - void SetSendTransport(DataChannelTransportInterface* send_transport); - - // Removes a given transport from the composite, if present. - void RemoveTransport(DataChannelTransportInterface* transport); - - // DataChannelTransportInterface overrides. - RTCError OpenChannel(int channel_id) override; - RTCError SendData(int channel_id, - const SendDataParams& params, - const rtc::CopyOnWriteBuffer& buffer) override; - RTCError CloseChannel(int channel_id) override; - void SetDataSink(DataChannelSink* sink) override; - bool IsReadyToSend() const override; - - // DataChannelSink overrides. - void OnDataReceived(int channel_id, - DataMessageType type, - const rtc::CopyOnWriteBuffer& buffer) override; - void OnChannelClosing(int channel_id) override; - void OnChannelClosed(int channel_id) override; - void OnReadyToSend() override; - - private: - std::vector transports_; - DataChannelTransportInterface* send_transport_ = nullptr; - DataChannelSink* sink_ = nullptr; -}; - -} // namespace webrtc - -#endif // PC_COMPOSITE_DATA_CHANNEL_TRANSPORT_H_ diff --git a/pc/composite_rtp_transport.cc b/pc/composite_rtp_transport.cc deleted file mode 100644 index 641d1d0fab..0000000000 --- a/pc/composite_rtp_transport.cc +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "pc/composite_rtp_transport.h" - -#include -#include - -#include "absl/memory/memory.h" -#include "p2p/base/packet_transport_internal.h" - -namespace webrtc { - -CompositeRtpTransport::CompositeRtpTransport( - std::vector transports) - : transports_(std::move(transports)) { - RTC_DCHECK(!transports_.empty()) << "Cannot have an empty composite"; - std::vector rtp_transports; - std::vector rtcp_transports; - for (RtpTransportInternal* transport : transports_) { - RTC_DCHECK_EQ(transport->rtcp_mux_enabled(), rtcp_mux_enabled()) - << "Either all or none of the transports in a composite must enable " - "rtcp mux"; - RTC_DCHECK_EQ(transport->transport_name(), transport_name()) - << "All transports in a composite must have the same transport name"; - - transport->SignalNetworkRouteChanged.connect( - this, &CompositeRtpTransport::OnNetworkRouteChanged); - transport->SignalRtcpPacketReceived.connect( - this, &CompositeRtpTransport::OnRtcpPacketReceived); - } -} - -void CompositeRtpTransport::SetSendTransport( - RtpTransportInternal* send_transport) { - if (send_transport_ == send_transport) { - return; - } - - RTC_DCHECK(absl::c_linear_search(transports_, send_transport)) - << "Cannot set a send transport that isn't part of the composite"; - - if (send_transport_) { - send_transport_->SignalReadyToSend.disconnect(this); - send_transport_->SignalWritableState.disconnect(this); - send_transport_->SignalSentPacket.disconnect(this); - } - - send_transport_ = send_transport; - send_transport_->SignalReadyToSend.connect( - this, &CompositeRtpTransport::OnReadyToSend); - send_transport_->SignalWritableState.connect( - this, &CompositeRtpTransport::OnWritableState); - send_transport_->SignalSentPacket.connect( - this, &CompositeRtpTransport::OnSentPacket); - - SignalWritableState(send_transport_->IsWritable(/*rtcp=*/true) && - send_transport_->IsWritable(/*rtcp=*/false)); - if (send_transport_->IsReadyToSend()) { - SignalReadyToSend(true); - } -} - -void CompositeRtpTransport::RemoveTransport(RtpTransportInternal* transport) { - RTC_DCHECK(transport != send_transport_) << "Cannot remove send transport"; - - auto it = absl::c_find(transports_, transport); - if (it == transports_.end()) { - return; - } - - transport->SignalNetworkRouteChanged.disconnect(this); - transport->SignalRtcpPacketReceived.disconnect(this); - for (auto sink : rtp_demuxer_sinks_) { - transport->UnregisterRtpDemuxerSink(sink); - } - - transports_.erase(it); -} - -const std::string& CompositeRtpTransport::transport_name() const { - return transports_.front()->transport_name(); -} - -int CompositeRtpTransport::SetRtpOption(rtc::Socket::Option opt, int value) { - int result = 0; - for (auto transport : transports_) { - result |= transport->SetRtpOption(opt, value); - } - return result; -} - -int CompositeRtpTransport::SetRtcpOption(rtc::Socket::Option opt, int value) { - int result = 0; - for (auto transport : transports_) { - result |= transport->SetRtcpOption(opt, value); - } - return result; -} - -bool CompositeRtpTransport::rtcp_mux_enabled() const { - return transports_.front()->rtcp_mux_enabled(); -} - -void CompositeRtpTransport::SetRtcpMuxEnabled(bool enabled) { - for (auto transport : transports_) { - transport->SetRtcpMuxEnabled(enabled); - } -} - -bool CompositeRtpTransport::IsReadyToSend() const { - return send_transport_ && send_transport_->IsReadyToSend(); -} - -bool CompositeRtpTransport::IsWritable(bool rtcp) const { - return send_transport_ && send_transport_->IsWritable(rtcp); -} - -bool CompositeRtpTransport::SendRtpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) { - if (!send_transport_) { - return false; - } - return send_transport_->SendRtpPacket(packet, options, flags); -} - -bool CompositeRtpTransport::SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) { - if (!send_transport_) { - return false; - } - return send_transport_->SendRtcpPacket(packet, options, flags); -} - -void CompositeRtpTransport::UpdateRtpHeaderExtensionMap( - const cricket::RtpHeaderExtensions& header_extensions) { - for (RtpTransportInternal* transport : transports_) { - transport->UpdateRtpHeaderExtensionMap(header_extensions); - } -} - -bool CompositeRtpTransport::IsSrtpActive() const { - bool active = true; - for (RtpTransportInternal* transport : transports_) { - active &= transport->IsSrtpActive(); - } - return active; -} - -bool CompositeRtpTransport::RegisterRtpDemuxerSink( - const RtpDemuxerCriteria& criteria, - RtpPacketSinkInterface* sink) { - for (RtpTransportInternal* transport : transports_) { - transport->RegisterRtpDemuxerSink(criteria, sink); - } - rtp_demuxer_sinks_.insert(sink); - return true; -} - -bool CompositeRtpTransport::UnregisterRtpDemuxerSink( - RtpPacketSinkInterface* sink) { - for (RtpTransportInternal* transport : transports_) { - transport->UnregisterRtpDemuxerSink(sink); - } - rtp_demuxer_sinks_.erase(sink); - return true; -} - -void CompositeRtpTransport::OnNetworkRouteChanged( - absl::optional route) { - SignalNetworkRouteChanged(route); -} - -void CompositeRtpTransport::OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet, - int64_t packet_time_us) { - SignalRtcpPacketReceived(packet, packet_time_us); -} - -void CompositeRtpTransport::OnWritableState(bool writable) { - SignalWritableState(writable); -} - -void CompositeRtpTransport::OnReadyToSend(bool ready_to_send) { - SignalReadyToSend(ready_to_send); -} - -void CompositeRtpTransport::OnSentPacket(const rtc::SentPacket& packet) { - SignalSentPacket(packet); -} - -} // namespace webrtc diff --git a/pc/composite_rtp_transport.h b/pc/composite_rtp_transport.h deleted file mode 100644 index 35f9382571..0000000000 --- a/pc/composite_rtp_transport.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_COMPOSITE_RTP_TRANSPORT_H_ -#define PC_COMPOSITE_RTP_TRANSPORT_H_ - -#include -#include -#include -#include - -#include "call/rtp_demuxer.h" -#include "call/rtp_packet_sink_interface.h" -#include "pc/rtp_transport_internal.h" -#include "pc/session_description.h" -#include "rtc_base/async_packet_socket.h" -#include "rtc_base/copy_on_write_buffer.h" - -namespace webrtc { - -// Composite RTP transport capable of receiving from multiple sub-transports. -// -// CompositeRtpTransport is receive-only until the caller explicitly chooses -// which transport will be used to send and calls |SetSendTransport|. This -// choice must be made as part of the SDP negotiation process, based on receipt -// of a provisional answer. |CompositeRtpTransport| does not become writable or -// ready to send until |SetSendTransport| is called. -// -// When a full answer is received, the user should replace the composite -// transport with the single, chosen RTP transport, then delete the composite -// and all non-chosen transports. -class CompositeRtpTransport : public RtpTransportInternal { - public: - // Constructs a composite out of the given |transports|. |transports| must - // not be empty. All |transports| must outlive the composite. - explicit CompositeRtpTransport(std::vector transports); - - // Sets which transport will be used for sending packets. Once called, - // |IsReadyToSend|, |IsWritable|, and the associated signals will reflect the - // state of |send_tranpsort|. - void SetSendTransport(RtpTransportInternal* send_transport); - - // Removes |transport| from the composite. No-op if |transport| is null or - // not found in the composite. Removing a transport disconnects all signals - // and RTP demux sinks from that transport. The send transport may not be - // removed. - void RemoveTransport(RtpTransportInternal* transport); - - // All transports within a composite must have the same name. - const std::string& transport_name() const override; - - int SetRtpOption(rtc::Socket::Option opt, int value) override; - int SetRtcpOption(rtc::Socket::Option opt, int value) override; - - // All transports within a composite must either enable or disable RTCP mux. - bool rtcp_mux_enabled() const override; - - // Enables or disables RTCP mux for all component transports. - void SetRtcpMuxEnabled(bool enabled) override; - - // The composite is ready to send if |send_transport_| is set and ready to - // send. - bool IsReadyToSend() const override; - - // The composite is writable if |send_transport_| is set and writable. - bool IsWritable(bool rtcp) const override; - - // Sends an RTP packet. May only be called after |send_transport_| is set. - bool SendRtpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) override; - - // Sends an RTCP packet. May only be called after |send_transport_| is set. - bool SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) override; - - // Updates the mapping of RTP header extensions for all component transports. - void UpdateRtpHeaderExtensionMap( - const cricket::RtpHeaderExtensions& header_extensions) override; - - // SRTP is only active for a composite if it is active for all component - // transports. - bool IsSrtpActive() const override; - - // Registers an RTP demux sink with all component transports. - bool RegisterRtpDemuxerSink(const RtpDemuxerCriteria& criteria, - RtpPacketSinkInterface* sink) override; - bool UnregisterRtpDemuxerSink(RtpPacketSinkInterface* sink) override; - - private: - // Receive-side signals. - void OnNetworkRouteChanged(absl::optional route); - void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* packet, - int64_t packet_time_us); - - // Send-side signals. - void OnWritableState(bool writable); - void OnReadyToSend(bool ready_to_send); - void OnSentPacket(const rtc::SentPacket& packet); - - std::vector transports_; - RtpTransportInternal* send_transport_ = nullptr; - - // Record of registered RTP demuxer sinks. Used to unregister sinks when a - // transport is removed. - std::set rtp_demuxer_sinks_; -}; - -} // namespace webrtc - -#endif // PC_COMPOSITE_RTP_TRANSPORT_H_ diff --git a/pc/composite_rtp_transport_test.cc b/pc/composite_rtp_transport_test.cc deleted file mode 100644 index fee8c215b2..0000000000 --- a/pc/composite_rtp_transport_test.cc +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "pc/composite_rtp_transport.h" - -#include - -#include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "p2p/base/fake_packet_transport.h" -#include "pc/rtp_transport.h" -#include "test/gtest.h" - -namespace webrtc { -namespace { - -constexpr char kTransportName[] = "test-transport"; -constexpr char kRtcpTransportName[] = "test-transport-rtcp"; -constexpr uint8_t kRtpPayloadType = 100; - -constexpr uint8_t kRtcpPacket[] = {0x80, 73, 0, 0}; -constexpr uint8_t kRtpPacket[] = {0x80, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - -class CompositeRtpTransportTest : public ::testing::Test, - public sigslot::has_slots<>, - public RtpPacketSinkInterface { - public: - CompositeRtpTransportTest() - : packet_transport_1_( - std::make_unique(kTransportName)), - packet_transport_2_( - std::make_unique(kTransportName)), - rtcp_transport_1_( - std::make_unique(kRtcpTransportName)), - rtcp_transport_2_( - std::make_unique(kRtcpTransportName)) {} - - void SetupRtpTransports(bool rtcp_mux) { - transport_1_ = std::make_unique(rtcp_mux); - transport_2_ = std::make_unique(rtcp_mux); - - transport_1_->SetRtpPacketTransport(packet_transport_1_.get()); - transport_2_->SetRtpPacketTransport(packet_transport_2_.get()); - if (!rtcp_mux) { - transport_1_->SetRtcpPacketTransport(rtcp_transport_1_.get()); - transport_2_->SetRtcpPacketTransport(rtcp_transport_2_.get()); - } - - composite_ = std::make_unique( - std::vector{transport_1_.get(), - transport_2_.get()}); - - composite_->SignalReadyToSend.connect( - this, &CompositeRtpTransportTest::OnReadyToSend); - composite_->SignalWritableState.connect( - this, &CompositeRtpTransportTest::OnWritableState); - composite_->SignalSentPacket.connect( - this, &CompositeRtpTransportTest::OnSentPacket); - composite_->SignalNetworkRouteChanged.connect( - this, &CompositeRtpTransportTest::OnNetworkRouteChanged); - composite_->SignalRtcpPacketReceived.connect( - this, &CompositeRtpTransportTest::OnRtcpPacketReceived); - - RtpDemuxerCriteria criteria; - criteria.payload_types.insert(kRtpPayloadType); - composite_->RegisterRtpDemuxerSink(criteria, this); - } - - void TearDown() override { composite_->UnregisterRtpDemuxerSink(this); } - - void OnReadyToSend(bool ready) { ++ready_to_send_count_; } - void OnWritableState(bool writable) { ++writable_state_count_; } - void OnSentPacket(const rtc::SentPacket& packet) { ++sent_packet_count_; } - void OnNetworkRouteChanged(absl::optional route) { - ++network_route_count_; - last_network_route_ = route; - } - void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer* buffer, - int64_t packet_time_us) { - ++rtcp_packet_count_; - last_packet_ = *buffer; - } - void OnRtpPacket(const RtpPacketReceived& packet) { - ++rtp_packet_count_; - last_packet_ = packet.Buffer(); - } - - protected: - std::unique_ptr packet_transport_1_; - std::unique_ptr packet_transport_2_; - std::unique_ptr rtcp_transport_1_; - std::unique_ptr rtcp_transport_2_; - std::unique_ptr transport_1_; - std::unique_ptr transport_2_; - std::unique_ptr composite_; - - int ready_to_send_count_ = 0; - int writable_state_count_ = 0; - int sent_packet_count_ = 0; - int network_route_count_ = 0; - int rtcp_packet_count_ = 0; - int rtp_packet_count_ = 0; - - absl::optional last_network_route_; - rtc::CopyOnWriteBuffer last_packet_; -}; - -TEST_F(CompositeRtpTransportTest, EnableRtcpMux) { - SetupRtpTransports(/*rtcp_mux=*/false); - EXPECT_FALSE(composite_->rtcp_mux_enabled()); - EXPECT_FALSE(transport_1_->rtcp_mux_enabled()); - EXPECT_FALSE(transport_2_->rtcp_mux_enabled()); - - composite_->SetRtcpMuxEnabled(true); - EXPECT_TRUE(composite_->rtcp_mux_enabled()); - EXPECT_TRUE(transport_1_->rtcp_mux_enabled()); - EXPECT_TRUE(transport_2_->rtcp_mux_enabled()); -} - -TEST_F(CompositeRtpTransportTest, DisableRtcpMux) { - SetupRtpTransports(/*rtcp_mux=*/true); - EXPECT_TRUE(composite_->rtcp_mux_enabled()); - EXPECT_TRUE(transport_1_->rtcp_mux_enabled()); - EXPECT_TRUE(transport_2_->rtcp_mux_enabled()); - - // If the component transports didn't have an RTCP transport before, they need - // to be set independently before disabling RTCP mux. There's no other sane - // way to do this, as the interface only allows sending a single RTCP - // transport, and we need one for each component. - transport_1_->SetRtcpPacketTransport(rtcp_transport_1_.get()); - transport_2_->SetRtcpPacketTransport(rtcp_transport_2_.get()); - - composite_->SetRtcpMuxEnabled(false); - EXPECT_FALSE(composite_->rtcp_mux_enabled()); - EXPECT_FALSE(transport_1_->rtcp_mux_enabled()); - EXPECT_FALSE(transport_2_->rtcp_mux_enabled()); -} - -TEST_F(CompositeRtpTransportTest, SetRtpOption) { - SetupRtpTransports(/*rtcp_mux=*/true); - EXPECT_EQ(0, composite_->SetRtpOption(rtc::Socket::OPT_DSCP, 2)); - - int value = 0; - EXPECT_TRUE(packet_transport_1_->GetOption(rtc::Socket::OPT_DSCP, &value)); - EXPECT_EQ(value, 2); - - EXPECT_TRUE(packet_transport_2_->GetOption(rtc::Socket::OPT_DSCP, &value)); - EXPECT_EQ(value, 2); -} - -TEST_F(CompositeRtpTransportTest, SetRtcpOption) { - SetupRtpTransports(/*rtcp_mux=*/false); - EXPECT_EQ(0, composite_->SetRtcpOption(rtc::Socket::OPT_DSCP, 2)); - - int value = 0; - EXPECT_TRUE(rtcp_transport_1_->GetOption(rtc::Socket::OPT_DSCP, &value)); - EXPECT_EQ(value, 2); - - EXPECT_TRUE(rtcp_transport_2_->GetOption(rtc::Socket::OPT_DSCP, &value)); - EXPECT_EQ(value, 2); -} - -TEST_F(CompositeRtpTransportTest, NeverWritableWithoutSendTransport) { - SetupRtpTransports(/*rtcp_mux=*/true); - - packet_transport_1_->SetWritable(true); - packet_transport_2_->SetWritable(true); - - EXPECT_FALSE(composite_->IsWritable(false)); - EXPECT_FALSE(composite_->IsWritable(true)); - EXPECT_FALSE(composite_->IsReadyToSend()); - EXPECT_EQ(0, ready_to_send_count_); - EXPECT_EQ(0, writable_state_count_); -} - -TEST_F(CompositeRtpTransportTest, WritableWhenSendTransportBecomesWritable) { - SetupRtpTransports(/*rtcp_mux=*/true); - - composite_->SetSendTransport(transport_1_.get()); - - EXPECT_FALSE(composite_->IsWritable(false)); - EXPECT_FALSE(composite_->IsWritable(true)); - EXPECT_FALSE(composite_->IsReadyToSend()); - EXPECT_EQ(0, ready_to_send_count_); - EXPECT_EQ(1, writable_state_count_); - - packet_transport_2_->SetWritable(true); - - EXPECT_FALSE(composite_->IsWritable(false)); - EXPECT_FALSE(composite_->IsWritable(true)); - EXPECT_FALSE(composite_->IsReadyToSend()); - EXPECT_EQ(0, ready_to_send_count_); - EXPECT_EQ(1, writable_state_count_); - - packet_transport_1_->SetWritable(true); - - EXPECT_TRUE(composite_->IsWritable(false)); - EXPECT_TRUE(composite_->IsWritable(true)); - EXPECT_TRUE(composite_->IsReadyToSend()); - EXPECT_EQ(1, ready_to_send_count_); - EXPECT_EQ(2, writable_state_count_); -} - -TEST_F(CompositeRtpTransportTest, SendTransportAlreadyWritable) { - SetupRtpTransports(/*rtcp_mux=*/true); - packet_transport_1_->SetWritable(true); - - composite_->SetSendTransport(transport_1_.get()); - - EXPECT_TRUE(composite_->IsWritable(false)); - EXPECT_TRUE(composite_->IsWritable(true)); - EXPECT_TRUE(composite_->IsReadyToSend()); - EXPECT_EQ(1, ready_to_send_count_); - EXPECT_EQ(1, writable_state_count_); -} - -TEST_F(CompositeRtpTransportTest, IsSrtpActive) { - SetupRtpTransports(/*rtcp_mux=*/true); - EXPECT_FALSE(composite_->IsSrtpActive()); -} - -TEST_F(CompositeRtpTransportTest, NetworkRouteChange) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::NetworkRoute route; - route.local = rtc::RouteEndpoint::CreateWithNetworkId(7); - packet_transport_1_->SetNetworkRoute(route); - - EXPECT_EQ(1, network_route_count_); - EXPECT_EQ(7, last_network_route_->local.network_id()); - - route.local = rtc::RouteEndpoint::CreateWithNetworkId(8); - packet_transport_2_->SetNetworkRoute(route); - - EXPECT_EQ(2, network_route_count_); - EXPECT_EQ(8, last_network_route_->local.network_id()); -} - -TEST_F(CompositeRtpTransportTest, RemoveTransport) { - SetupRtpTransports(/*rtcp_mux=*/true); - - composite_->RemoveTransport(transport_1_.get()); - - // Check that signals are disconnected. - rtc::NetworkRoute route; - route.local = rtc::RouteEndpoint::CreateWithNetworkId(7); - packet_transport_1_->SetNetworkRoute(route); - - EXPECT_EQ(0, network_route_count_); -} - -TEST_F(CompositeRtpTransportTest, SendRtcpBeforeSendTransportSet) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_1_.get(), false); - - rtc::CopyOnWriteBuffer packet(kRtcpPacket); - EXPECT_FALSE(composite_->SendRtcpPacket(&packet, rtc::PacketOptions(), 0)); - EXPECT_EQ(0, sent_packet_count_); -} - -TEST_F(CompositeRtpTransportTest, SendRtcpOn1) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_1_.get(), false); - composite_->SetSendTransport(transport_1_.get()); - - rtc::CopyOnWriteBuffer packet(kRtcpPacket); - EXPECT_TRUE(composite_->SendRtcpPacket(&packet, rtc::PacketOptions(), 0)); - EXPECT_EQ(1, sent_packet_count_); - EXPECT_EQ(packet, *packet_transport_1_->last_sent_packet()); -} - -TEST_F(CompositeRtpTransportTest, SendRtcpOn2) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_2_.get(), false); - composite_->SetSendTransport(transport_2_.get()); - - rtc::CopyOnWriteBuffer packet(kRtcpPacket); - EXPECT_TRUE(composite_->SendRtcpPacket(&packet, rtc::PacketOptions(), 0)); - EXPECT_EQ(1, sent_packet_count_); - EXPECT_EQ(packet, *packet_transport_2_->last_sent_packet()); -} - -TEST_F(CompositeRtpTransportTest, SendRtpBeforeSendTransportSet) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_1_.get(), false); - - rtc::CopyOnWriteBuffer packet(kRtpPacket); - EXPECT_FALSE(composite_->SendRtpPacket(&packet, rtc::PacketOptions(), 0)); - EXPECT_EQ(0, sent_packet_count_); -} - -TEST_F(CompositeRtpTransportTest, SendRtpOn1) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_1_.get(), false); - composite_->SetSendTransport(transport_1_.get()); - - rtc::CopyOnWriteBuffer packet(kRtpPacket); - EXPECT_TRUE(composite_->SendRtpPacket(&packet, rtc::PacketOptions(), 0)); - EXPECT_EQ(1, sent_packet_count_); - EXPECT_EQ(packet, *packet_transport_1_->last_sent_packet()); -} - -TEST_F(CompositeRtpTransportTest, SendRtpOn2) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_2_.get(), false); - composite_->SetSendTransport(transport_2_.get()); - - rtc::CopyOnWriteBuffer packet(kRtpPacket); - EXPECT_TRUE(composite_->SendRtpPacket(&packet, rtc::PacketOptions(), 0)); - EXPECT_EQ(1, sent_packet_count_); - EXPECT_EQ(packet, *packet_transport_2_->last_sent_packet()); -} - -TEST_F(CompositeRtpTransportTest, ReceiveRtcpFrom1) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_1_.get(), false); - - rtc::CopyOnWriteBuffer packet(kRtcpPacket); - remote.SendPacket(packet.cdata(), packet.size(), rtc::PacketOptions(), - 0); - - EXPECT_EQ(1, rtcp_packet_count_); - EXPECT_EQ(packet, last_packet_); -} - -TEST_F(CompositeRtpTransportTest, ReceiveRtcpFrom2) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_2_.get(), false); - - rtc::CopyOnWriteBuffer packet(kRtcpPacket); - remote.SendPacket(packet.cdata(), packet.size(), rtc::PacketOptions(), - 0); - - EXPECT_EQ(1, rtcp_packet_count_); - EXPECT_EQ(packet, last_packet_); -} - -TEST_F(CompositeRtpTransportTest, ReceiveRtpFrom1) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_1_.get(), false); - - rtc::CopyOnWriteBuffer packet(kRtpPacket); - remote.SendPacket(packet.cdata(), packet.size(), rtc::PacketOptions(), - 0); - - EXPECT_EQ(1, rtp_packet_count_); - EXPECT_EQ(packet, last_packet_); -} - -TEST_F(CompositeRtpTransportTest, ReceiveRtpFrom2) { - SetupRtpTransports(/*rtcp_mux=*/true); - - rtc::FakePacketTransport remote("remote"); - remote.SetDestination(packet_transport_2_.get(), false); - - rtc::CopyOnWriteBuffer packet(kRtpPacket); - remote.SendPacket(packet.cdata(), packet.size(), rtc::PacketOptions(), - 0); - - EXPECT_EQ(1, rtp_packet_count_); - EXPECT_EQ(packet, last_packet_); -} - -} // namespace -} // namespace webrtc diff --git a/pc/connection_context.cc b/pc/connection_context.cc new file mode 100644 index 0000000000..1bb7908f5c --- /dev/null +++ b/pc/connection_context.cc @@ -0,0 +1,161 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/connection_context.h" + +#include +#include +#include + +#include "api/transport/field_trial_based_config.h" +#include "media/sctp/sctp_transport_factory.h" +#include "rtc_base/helpers.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/time_utils.h" + +namespace webrtc { + +namespace { + +rtc::Thread* MaybeStartThread(rtc::Thread* old_thread, + const std::string& thread_name, + bool with_socket_server, + std::unique_ptr& thread_holder) { + if (old_thread) { + return old_thread; + } + if (with_socket_server) { + thread_holder = rtc::Thread::CreateWithSocketServer(); + } else { + thread_holder = rtc::Thread::Create(); + } + thread_holder->SetName(thread_name, nullptr); + thread_holder->Start(); + return thread_holder.get(); +} + +rtc::Thread* MaybeWrapThread(rtc::Thread* signaling_thread, + bool& wraps_current_thread) { + wraps_current_thread = false; + if (signaling_thread) { + return signaling_thread; + } + auto this_thread = rtc::Thread::Current(); + if (!this_thread) { + // If this thread isn't already wrapped by an rtc::Thread, create a + // wrapper and own it in this class. + this_thread = rtc::ThreadManager::Instance()->WrapCurrentThread(); + wraps_current_thread = true; + } + return this_thread; +} + +std::unique_ptr MaybeCreateSctpFactory( + std::unique_ptr factory, + rtc::Thread* network_thread) { + if (factory) { + return factory; + } +#ifdef WEBRTC_HAVE_SCTP + return std::make_unique(network_thread); +#else + return nullptr; +#endif +} + +} // namespace + +// Static +rtc::scoped_refptr ConnectionContext::Create( + PeerConnectionFactoryDependencies* dependencies) { + return new ConnectionContext(dependencies); +} + +ConnectionContext::ConnectionContext( + PeerConnectionFactoryDependencies* dependencies) + : network_thread_(MaybeStartThread(dependencies->network_thread, + "pc_network_thread", + true, + owned_network_thread_)), + worker_thread_(MaybeStartThread(dependencies->worker_thread, + "pc_worker_thread", + false, + owned_worker_thread_)), + signaling_thread_(MaybeWrapThread(dependencies->signaling_thread, + wraps_current_thread_)), + network_monitor_factory_( + std::move(dependencies->network_monitor_factory)), + call_factory_(std::move(dependencies->call_factory)), + sctp_factory_( + MaybeCreateSctpFactory(std::move(dependencies->sctp_factory), + network_thread())), + trials_(dependencies->trials + ? std::move(dependencies->trials) + : std::make_unique()) { + signaling_thread_->AllowInvokesToThread(worker_thread_); + signaling_thread_->AllowInvokesToThread(network_thread_); + worker_thread_->AllowInvokesToThread(network_thread_); + if (network_thread_->IsCurrent()) { + // TODO(https://crbug.com/webrtc/12802) switch to DisallowAllInvokes + network_thread_->AllowInvokesToThread(network_thread_); + } else { + network_thread_->PostTask(ToQueuedTask([thread = network_thread_] { + thread->DisallowBlockingCalls(); + // TODO(https://crbug.com/webrtc/12802) switch to DisallowAllInvokes + thread->AllowInvokesToThread(thread); + })); + } + + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::InitRandom(rtc::Time32()); + + // If network_monitor_factory_ is non-null, it will be used to create a + // network monitor while on the network thread. + default_network_manager_ = std::make_unique( + network_monitor_factory_.get()); + + default_socket_factory_ = + std::make_unique(network_thread()); + + worker_thread_->Invoke(RTC_FROM_HERE, [&]() { + channel_manager_ = cricket::ChannelManager::Create( + std::move(dependencies->media_engine), + /*enable_rtx=*/true, worker_thread(), network_thread()); + }); + + // Set warning levels on the threads, to give warnings when response + // may be slower than is expected of the thread. + // Since some of the threads may be the same, start with the least + // restrictive limits and end with the least permissive ones. + // This will give warnings for all cases. + signaling_thread_->SetDispatchWarningMs(100); + worker_thread_->SetDispatchWarningMs(30); + network_thread_->SetDispatchWarningMs(10); +} + +ConnectionContext::~ConnectionContext() { + RTC_DCHECK_RUN_ON(signaling_thread_); + worker_thread_->Invoke(RTC_FROM_HERE, + [&]() { channel_manager_.reset(nullptr); }); + + // Make sure |worker_thread()| and |signaling_thread()| outlive + // |default_socket_factory_| and |default_network_manager_|. + default_socket_factory_ = nullptr; + default_network_manager_ = nullptr; + + if (wraps_current_thread_) + rtc::ThreadManager::Instance()->UnwrapCurrentThread(); +} + +cricket::ChannelManager* ConnectionContext::channel_manager() const { + return channel_manager_.get(); +} + +} // namespace webrtc diff --git a/pc/connection_context.h b/pc/connection_context.h new file mode 100644 index 0000000000..8fad13c10c --- /dev/null +++ b/pc/connection_context.h @@ -0,0 +1,131 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_CONNECTION_CONTEXT_H_ +#define PC_CONNECTION_CONTEXT_H_ + +#include +#include + +#include "api/call/call_factory_interface.h" +#include "api/media_stream_interface.h" +#include "api/peer_connection_interface.h" +#include "api/ref_counted_base.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/transport/sctp_transport_factory_interface.h" +#include "api/transport/webrtc_key_value_config.h" +#include "media/base/media_engine.h" +#include "p2p/base/basic_packet_socket_factory.h" +#include "pc/channel_manager.h" +#include "rtc_base/checks.h" +#include "rtc_base/network.h" +#include "rtc_base/network_monitor_factory.h" +#include "rtc_base/rtc_certificate_generator.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" + +namespace rtc { +class BasicNetworkManager; +class BasicPacketSocketFactory; +} // namespace rtc + +namespace webrtc { + +class RtcEventLog; + +// This class contains resources needed by PeerConnection and associated +// objects. A reference to this object is passed to each PeerConnection. The +// methods on this object are assumed not to change the state in any way that +// interferes with the operation of other PeerConnections. +// +// This class must be created and destroyed on the signaling thread. +class ConnectionContext final + : public rtc::RefCountedNonVirtual { + public: + // Creates a ConnectionContext. May return null if initialization fails. + // The Dependencies class allows simple management of all new dependencies + // being added to the ConnectionContext. + static rtc::scoped_refptr Create( + PeerConnectionFactoryDependencies* dependencies); + + // This class is not copyable or movable. + ConnectionContext(const ConnectionContext&) = delete; + ConnectionContext& operator=(const ConnectionContext&) = delete; + + // Functions called from PeerConnection and friends + SctpTransportFactoryInterface* sctp_transport_factory() const { + return sctp_factory_.get(); + } + + cricket::ChannelManager* channel_manager() const; + + rtc::Thread* signaling_thread() { return signaling_thread_; } + const rtc::Thread* signaling_thread() const { return signaling_thread_; } + rtc::Thread* worker_thread() { return worker_thread_; } + const rtc::Thread* worker_thread() const { return worker_thread_; } + rtc::Thread* network_thread() { return network_thread_; } + const rtc::Thread* network_thread() const { return network_thread_; } + + const WebRtcKeyValueConfig& trials() const { return *trials_.get(); } + + // Accessors only used from the PeerConnectionFactory class + rtc::BasicNetworkManager* default_network_manager() { + RTC_DCHECK_RUN_ON(signaling_thread_); + return default_network_manager_.get(); + } + rtc::BasicPacketSocketFactory* default_socket_factory() { + RTC_DCHECK_RUN_ON(signaling_thread_); + return default_socket_factory_.get(); + } + CallFactoryInterface* call_factory() { + RTC_DCHECK_RUN_ON(worker_thread_); + return call_factory_.get(); + } + + protected: + explicit ConnectionContext(PeerConnectionFactoryDependencies* dependencies); + + friend class rtc::RefCountedNonVirtual; + ~ConnectionContext(); + + private: + // The following three variables are used to communicate between the + // constructor and the destructor, and are never exposed externally. + bool wraps_current_thread_; + // Note: Since owned_network_thread_ and owned_worker_thread_ are used + // in the initialization of network_thread_ and worker_thread_, they + // must be declared before them, so that they are initialized first. + std::unique_ptr owned_network_thread_ + RTC_GUARDED_BY(signaling_thread_); + std::unique_ptr owned_worker_thread_ + RTC_GUARDED_BY(signaling_thread_); + rtc::Thread* const network_thread_; + rtc::Thread* const worker_thread_; + rtc::Thread* const signaling_thread_; + // channel_manager is accessed both on signaling thread and worker thread. + std::unique_ptr channel_manager_; + std::unique_ptr const network_monitor_factory_ + RTC_GUARDED_BY(signaling_thread_); + std::unique_ptr default_network_manager_ + RTC_GUARDED_BY(signaling_thread_); + std::unique_ptr const call_factory_ + RTC_GUARDED_BY(worker_thread_); + + std::unique_ptr default_socket_factory_ + RTC_GUARDED_BY(signaling_thread_); + std::unique_ptr const sctp_factory_; + // Accessed both on signaling thread and worker thread. + std::unique_ptr const trials_; +}; + +} // namespace webrtc + +#endif // PC_CONNECTION_CONTEXT_H_ diff --git a/pc/data_channel.cc b/pc/data_channel.cc deleted file mode 100644 index 0b9af37544..0000000000 --- a/pc/data_channel.cc +++ /dev/null @@ -1,749 +0,0 @@ -/* - * Copyright 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "pc/data_channel.h" - -#include -#include -#include - -#include "media/sctp/sctp_transport_internal.h" -#include "pc/sctp_utils.h" -#include "rtc_base/checks.h" -#include "rtc_base/location.h" -#include "rtc_base/logging.h" -#include "rtc_base/ref_counted_object.h" -#include "rtc_base/thread.h" - -namespace webrtc { - -static size_t kMaxQueuedReceivedDataBytes = 16 * 1024 * 1024; -static size_t kMaxQueuedSendDataBytes = 16 * 1024 * 1024; - -namespace { - -static std::atomic g_unique_id{0}; - -int GenerateUniqueId() { - return ++g_unique_id; -} - -} // namespace - -InternalDataChannelInit::InternalDataChannelInit(const DataChannelInit& base) - : DataChannelInit(base), open_handshake_role(kOpener) { - // If the channel is externally negotiated, do not send the OPEN message. - if (base.negotiated) { - open_handshake_role = kNone; - } else { - // Datachannel is externally negotiated. Ignore the id value. - // Specified in createDataChannel, WebRTC spec section 6.1 bullet 13. - id = -1; - } - // Backwards compatibility: If base.maxRetransmits or base.maxRetransmitTime - // have been set to -1, unset them. - if (maxRetransmits && *maxRetransmits == -1) { - RTC_LOG(LS_ERROR) - << "Accepting maxRetransmits = -1 for backwards compatibility"; - maxRetransmits = absl::nullopt; - } - if (maxRetransmitTime && *maxRetransmitTime == -1) { - RTC_LOG(LS_ERROR) - << "Accepting maxRetransmitTime = -1 for backwards compatibility"; - maxRetransmitTime = absl::nullopt; - } -} - -bool SctpSidAllocator::AllocateSid(rtc::SSLRole role, int* sid) { - int potential_sid = (role == rtc::SSL_CLIENT) ? 0 : 1; - while (!IsSidAvailable(potential_sid)) { - potential_sid += 2; - if (potential_sid > static_cast(cricket::kMaxSctpSid)) { - return false; - } - } - - *sid = potential_sid; - used_sids_.insert(potential_sid); - return true; -} - -bool SctpSidAllocator::ReserveSid(int sid) { - if (!IsSidAvailable(sid)) { - return false; - } - used_sids_.insert(sid); - return true; -} - -void SctpSidAllocator::ReleaseSid(int sid) { - auto it = used_sids_.find(sid); - if (it != used_sids_.end()) { - used_sids_.erase(it); - } -} - -bool SctpSidAllocator::IsSidAvailable(int sid) const { - if (sid < static_cast(cricket::kMinSctpSid) || - sid > static_cast(cricket::kMaxSctpSid)) { - return false; - } - return used_sids_.find(sid) == used_sids_.end(); -} - -bool DataChannel::PacketQueue::Empty() const { - return packets_.empty(); -} - -std::unique_ptr DataChannel::PacketQueue::PopFront() { - RTC_DCHECK(!packets_.empty()); - byte_count_ -= packets_.front()->size(); - std::unique_ptr packet = std::move(packets_.front()); - packets_.pop_front(); - return packet; -} - -void DataChannel::PacketQueue::PushFront(std::unique_ptr packet) { - byte_count_ += packet->size(); - packets_.push_front(std::move(packet)); -} - -void DataChannel::PacketQueue::PushBack(std::unique_ptr packet) { - byte_count_ += packet->size(); - packets_.push_back(std::move(packet)); -} - -void DataChannel::PacketQueue::Clear() { - packets_.clear(); - byte_count_ = 0; -} - -void DataChannel::PacketQueue::Swap(PacketQueue* other) { - size_t other_byte_count = other->byte_count_; - other->byte_count_ = byte_count_; - byte_count_ = other_byte_count; - - other->packets_.swap(packets_); -} - -rtc::scoped_refptr DataChannel::Create( - DataChannelProviderInterface* provider, - cricket::DataChannelType dct, - const std::string& label, - const InternalDataChannelInit& config) { - rtc::scoped_refptr channel( - new rtc::RefCountedObject(provider, dct, label)); - if (!channel->Init(config)) { - return NULL; - } - return channel; -} - -bool DataChannel::IsSctpLike(cricket::DataChannelType type) { - return type == cricket::DCT_SCTP || type == cricket::DCT_MEDIA_TRANSPORT || - type == cricket::DCT_DATA_CHANNEL_TRANSPORT || - type == cricket::DCT_DATA_CHANNEL_TRANSPORT_SCTP; -} - -DataChannel::DataChannel(DataChannelProviderInterface* provider, - cricket::DataChannelType dct, - const std::string& label) - : internal_id_(GenerateUniqueId()), - label_(label), - observer_(nullptr), - state_(kConnecting), - messages_sent_(0), - bytes_sent_(0), - messages_received_(0), - bytes_received_(0), - buffered_amount_(0), - data_channel_type_(dct), - provider_(provider), - handshake_state_(kHandshakeInit), - connected_to_provider_(false), - send_ssrc_set_(false), - receive_ssrc_set_(false), - writable_(false), - send_ssrc_(0), - receive_ssrc_(0) {} - -bool DataChannel::Init(const InternalDataChannelInit& config) { - if (data_channel_type_ == cricket::DCT_RTP) { - if (config.reliable || config.id != -1 || config.maxRetransmits || - config.maxRetransmitTime) { - RTC_LOG(LS_ERROR) << "Failed to initialize the RTP data channel due to " - "invalid DataChannelInit."; - return false; - } - handshake_state_ = kHandshakeReady; - } else if (IsSctpLike(data_channel_type_)) { - if (config.id < -1 || - (config.maxRetransmits && *config.maxRetransmits < 0) || - (config.maxRetransmitTime && *config.maxRetransmitTime < 0)) { - RTC_LOG(LS_ERROR) << "Failed to initialize the SCTP data channel due to " - "invalid DataChannelInit."; - return false; - } - if (config.maxRetransmits && config.maxRetransmitTime) { - RTC_LOG(LS_ERROR) - << "maxRetransmits and maxRetransmitTime should not be both set."; - return false; - } - config_ = config; - - switch (config_.open_handshake_role) { - case webrtc::InternalDataChannelInit::kNone: // pre-negotiated - handshake_state_ = kHandshakeReady; - break; - case webrtc::InternalDataChannelInit::kOpener: - handshake_state_ = kHandshakeShouldSendOpen; - break; - case webrtc::InternalDataChannelInit::kAcker: - handshake_state_ = kHandshakeShouldSendAck; - break; - } - - // Try to connect to the transport in case the transport channel already - // exists. - OnTransportChannelCreated(); - - // Checks if the transport is ready to send because the initial channel - // ready signal may have been sent before the DataChannel creation. - // This has to be done async because the upper layer objects (e.g. - // Chrome glue and WebKit) are not wired up properly until after this - // function returns. - if (provider_->ReadyToSendData()) { - invoker_.AsyncInvoke(RTC_FROM_HERE, rtc::Thread::Current(), - [this] { OnChannelReady(true); }); - } - } - - return true; -} - -DataChannel::~DataChannel() {} - -void DataChannel::RegisterObserver(DataChannelObserver* observer) { - observer_ = observer; - DeliverQueuedReceivedData(); -} - -void DataChannel::UnregisterObserver() { - observer_ = NULL; -} - -bool DataChannel::reliable() const { - if (data_channel_type_ == cricket::DCT_RTP) { - return false; - } else { - return !config_.maxRetransmits && !config_.maxRetransmitTime; - } -} - -uint64_t DataChannel::buffered_amount() const { - return buffered_amount_; -} - -void DataChannel::Close() { - if (state_ == kClosed) - return; - send_ssrc_ = 0; - send_ssrc_set_ = false; - SetState(kClosing); - // Will send queued data before beginning the underlying closing procedure. - UpdateState(); -} - -RTCError DataChannel::error() const { - return error_; -} - -bool DataChannel::Send(const DataBuffer& buffer) { - buffered_amount_ += buffer.size(); - if (state_ != kOpen) { - return false; - } - - // TODO(jiayl): the spec is unclear about if the remote side should get the - // onmessage event. We need to figure out the expected behavior and change the - // code accordingly. - if (buffer.size() == 0) { - return true; - } - - // If the queue is non-empty, we're waiting for SignalReadyToSend, - // so just add to the end of the queue and keep waiting. - if (!queued_send_data_.Empty()) { - // Only SCTP DataChannel queues the outgoing data when the transport is - // blocked. - RTC_DCHECK(IsSctpLike(data_channel_type_)); - if (!QueueSendDataMessage(buffer)) { - RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to queue " - "additional data."; - // https://w3c.github.io/webrtc-pc/#dom-rtcdatachannel-send step 5 - // Note that the spec doesn't explicitly say to close in this situation. - CloseAbruptlyWithError(RTCError(RTCErrorType::RESOURCE_EXHAUSTED, - "Unable to queue data for sending")); - } - return true; - } - - bool success = SendDataMessage(buffer, true); - if (data_channel_type_ == cricket::DCT_RTP) { - return success; - } - - // Always return true for SCTP DataChannel per the spec. - return true; -} - -void DataChannel::SetReceiveSsrc(uint32_t receive_ssrc) { - RTC_DCHECK(data_channel_type_ == cricket::DCT_RTP); - - if (receive_ssrc_set_) { - return; - } - receive_ssrc_ = receive_ssrc; - receive_ssrc_set_ = true; - UpdateState(); -} - -void DataChannel::SetSctpSid(int sid) { - RTC_DCHECK_LT(config_.id, 0); - RTC_DCHECK_GE(sid, 0); - RTC_DCHECK(IsSctpLike(data_channel_type_)); - if (config_.id == sid) { - return; - } - - config_.id = sid; - provider_->AddSctpDataStream(sid); -} - -void DataChannel::OnClosingProcedureStartedRemotely(int sid) { - if (IsSctpLike(data_channel_type_) && sid == config_.id && - state_ != kClosing && state_ != kClosed) { - // Don't bother sending queued data since the side that initiated the - // closure wouldn't receive it anyway. See crbug.com/559394 for a lengthy - // discussion about this. - queued_send_data_.Clear(); - queued_control_data_.Clear(); - // Just need to change state to kClosing, SctpTransport will handle the - // rest of the closing procedure and OnClosingProcedureComplete will be - // called later. - started_closing_procedure_ = true; - SetState(kClosing); - } -} - -void DataChannel::OnClosingProcedureComplete(int sid) { - if (IsSctpLike(data_channel_type_) && sid == config_.id) { - // If the closing procedure is complete, we should have finished sending - // all pending data and transitioned to kClosing already. - RTC_DCHECK_EQ(state_, kClosing); - RTC_DCHECK(queued_send_data_.Empty()); - DisconnectFromProvider(); - SetState(kClosed); - } -} - -void DataChannel::OnTransportChannelCreated() { - RTC_DCHECK(IsSctpLike(data_channel_type_)); - if (!connected_to_provider_) { - connected_to_provider_ = provider_->ConnectDataChannel(this); - } - // The sid may have been unassigned when provider_->ConnectDataChannel was - // done. So always add the streams even if connected_to_provider_ is true. - if (config_.id >= 0) { - provider_->AddSctpDataStream(config_.id); - } -} - -void DataChannel::OnTransportChannelClosed() { - // The SctpTransport is unusable (for example, because the SCTP m= section - // was rejected, or because the DTLS transport closed), so we need to close - // abruptly. - RTCError error = RTCError(RTCErrorType::OPERATION_ERROR_WITH_DATA, - "Transport channel closed"); - error.set_error_detail(RTCErrorDetailType::SCTP_FAILURE); - CloseAbruptlyWithError(std::move(error)); -} - -// The remote peer request that this channel shall be closed. -void DataChannel::RemotePeerRequestClose() { - RTC_DCHECK(data_channel_type_ == cricket::DCT_RTP); - // Close with error code explicitly set to OK. - CloseAbruptlyWithError(RTCError()); -} - -void DataChannel::SetSendSsrc(uint32_t send_ssrc) { - RTC_DCHECK(data_channel_type_ == cricket::DCT_RTP); - if (send_ssrc_set_) { - return; - } - send_ssrc_ = send_ssrc; - send_ssrc_set_ = true; - UpdateState(); -} - -void DataChannel::OnDataReceived(const cricket::ReceiveDataParams& params, - const rtc::CopyOnWriteBuffer& payload) { - if (data_channel_type_ == cricket::DCT_RTP && params.ssrc != receive_ssrc_) { - return; - } - if (IsSctpLike(data_channel_type_) && params.sid != config_.id) { - return; - } - - if (params.type == cricket::DMT_CONTROL) { - RTC_DCHECK(IsSctpLike(data_channel_type_)); - if (handshake_state_ != kHandshakeWaitingForAck) { - // Ignore it if we are not expecting an ACK message. - RTC_LOG(LS_WARNING) - << "DataChannel received unexpected CONTROL message, sid = " - << params.sid; - return; - } - if (ParseDataChannelOpenAckMessage(payload)) { - // We can send unordered as soon as we receive the ACK message. - handshake_state_ = kHandshakeReady; - RTC_LOG(LS_INFO) << "DataChannel received OPEN_ACK message, sid = " - << params.sid; - } else { - RTC_LOG(LS_WARNING) - << "DataChannel failed to parse OPEN_ACK message, sid = " - << params.sid; - } - return; - } - - RTC_DCHECK(params.type == cricket::DMT_BINARY || - params.type == cricket::DMT_TEXT); - - RTC_LOG(LS_VERBOSE) << "DataChannel received DATA message, sid = " - << params.sid; - // We can send unordered as soon as we receive any DATA message since the - // remote side must have received the OPEN (and old clients do not send - // OPEN_ACK). - if (handshake_state_ == kHandshakeWaitingForAck) { - handshake_state_ = kHandshakeReady; - } - - bool binary = (params.type == cricket::DMT_BINARY); - auto buffer = std::make_unique(payload, binary); - if (state_ == kOpen && observer_) { - ++messages_received_; - bytes_received_ += buffer->size(); - observer_->OnMessage(*buffer.get()); - } else { - if (queued_received_data_.byte_count() + payload.size() > - kMaxQueuedReceivedDataBytes) { - RTC_LOG(LS_ERROR) << "Queued received data exceeds the max buffer size."; - - queued_received_data_.Clear(); - if (data_channel_type_ != cricket::DCT_RTP) { - CloseAbruptlyWithError( - RTCError(RTCErrorType::RESOURCE_EXHAUSTED, - "Queued received data exceeds the max buffer size.")); - } - - return; - } - queued_received_data_.PushBack(std::move(buffer)); - } -} - -void DataChannel::OnChannelReady(bool writable) { - writable_ = writable; - if (!writable) { - return; - } - - SendQueuedControlMessages(); - SendQueuedDataMessages(); - UpdateState(); -} - -void DataChannel::CloseAbruptlyWithError(RTCError error) { - if (state_ == kClosed) { - return; - } - - if (connected_to_provider_) { - DisconnectFromProvider(); - } - - // Closing abruptly means any queued data gets thrown away. - queued_send_data_.Clear(); - buffered_amount_ = 0; - queued_control_data_.Clear(); - - // Still go to "kClosing" before "kClosed", since observers may be expecting - // that. - SetState(kClosing); - error_ = std::move(error); - SetState(kClosed); -} - -void DataChannel::CloseAbruptlyWithDataChannelFailure( - const std::string& message) { - RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, message); - error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); - CloseAbruptlyWithError(std::move(error)); -} - -void DataChannel::UpdateState() { - // UpdateState determines what to do from a few state variables. Include - // all conditions required for each state transition here for - // clarity. OnChannelReady(true) will send any queued data and then invoke - // UpdateState(). - switch (state_) { - case kConnecting: { - if (send_ssrc_set_ == receive_ssrc_set_) { - if (data_channel_type_ == cricket::DCT_RTP && !connected_to_provider_) { - connected_to_provider_ = provider_->ConnectDataChannel(this); - } - if (connected_to_provider_) { - if (handshake_state_ == kHandshakeShouldSendOpen) { - rtc::CopyOnWriteBuffer payload; - WriteDataChannelOpenMessage(label_, config_, &payload); - SendControlMessage(payload); - } else if (handshake_state_ == kHandshakeShouldSendAck) { - rtc::CopyOnWriteBuffer payload; - WriteDataChannelOpenAckMessage(&payload); - SendControlMessage(payload); - } - if (writable_ && (handshake_state_ == kHandshakeReady || - handshake_state_ == kHandshakeWaitingForAck)) { - SetState(kOpen); - // If we have received buffers before the channel got writable. - // Deliver them now. - DeliverQueuedReceivedData(); - } - } - } - break; - } - case kOpen: { - break; - } - case kClosing: { - // Wait for all queued data to be sent before beginning the closing - // procedure. - if (queued_send_data_.Empty() && queued_control_data_.Empty()) { - if (data_channel_type_ == cricket::DCT_RTP) { - // For RTP data channels, we can go to "closed" after we finish - // sending data and the send/recv SSRCs are unset. - if (connected_to_provider_) { - DisconnectFromProvider(); - } - if (!send_ssrc_set_ && !receive_ssrc_set_) { - SetState(kClosed); - } - } else { - // For SCTP data channels, we need to wait for the closing procedure - // to complete; after calling RemoveSctpDataStream, - // OnClosingProcedureComplete will end up called asynchronously - // afterwards. - if (connected_to_provider_ && !started_closing_procedure_ && - config_.id >= 0) { - started_closing_procedure_ = true; - provider_->RemoveSctpDataStream(config_.id); - } - } - } - break; - } - case kClosed: - break; - } -} - -void DataChannel::SetState(DataState state) { - if (state_ == state) { - return; - } - - state_ = state; - if (observer_) { - observer_->OnStateChange(); - } - if (state_ == kOpen) { - SignalOpened(this); - } else if (state_ == kClosed) { - SignalClosed(this); - } -} - -void DataChannel::DisconnectFromProvider() { - if (!connected_to_provider_) - return; - - provider_->DisconnectDataChannel(this); - connected_to_provider_ = false; -} - -void DataChannel::DeliverQueuedReceivedData() { - if (!observer_) { - return; - } - - while (!queued_received_data_.Empty()) { - std::unique_ptr buffer = queued_received_data_.PopFront(); - ++messages_received_; - bytes_received_ += buffer->size(); - observer_->OnMessage(*buffer); - } -} - -void DataChannel::SendQueuedDataMessages() { - if (queued_send_data_.Empty()) { - return; - } - - RTC_DCHECK(state_ == kOpen || state_ == kClosing); - - while (!queued_send_data_.Empty()) { - std::unique_ptr buffer = queued_send_data_.PopFront(); - if (!SendDataMessage(*buffer, false)) { - // Return the message to the front of the queue if sending is aborted. - queued_send_data_.PushFront(std::move(buffer)); - break; - } - } -} - -bool DataChannel::SendDataMessage(const DataBuffer& buffer, - bool queue_if_blocked) { - cricket::SendDataParams send_params; - - if (IsSctpLike(data_channel_type_)) { - send_params.ordered = config_.ordered; - // Send as ordered if it is still going through OPEN/ACK signaling. - if (handshake_state_ != kHandshakeReady && !config_.ordered) { - send_params.ordered = true; - RTC_LOG(LS_VERBOSE) - << "Sending data as ordered for unordered DataChannel " - "because the OPEN_ACK message has not been received."; - } - - send_params.max_rtx_count = - config_.maxRetransmits ? *config_.maxRetransmits : -1; - send_params.max_rtx_ms = - config_.maxRetransmitTime ? *config_.maxRetransmitTime : -1; - send_params.sid = config_.id; - } else { - send_params.ssrc = send_ssrc_; - } - send_params.type = buffer.binary ? cricket::DMT_BINARY : cricket::DMT_TEXT; - - cricket::SendDataResult send_result = cricket::SDR_SUCCESS; - bool success = provider_->SendData(send_params, buffer.data, &send_result); - - if (success) { - ++messages_sent_; - bytes_sent_ += buffer.size(); - - RTC_DCHECK(buffered_amount_ >= buffer.size()); - buffered_amount_ -= buffer.size(); - if (observer_ && buffer.size() > 0) { - observer_->OnBufferedAmountChange(buffer.size()); - } - return true; - } - - if (!IsSctpLike(data_channel_type_)) { - return false; - } - - if (send_result == cricket::SDR_BLOCK) { - if (!queue_if_blocked || QueueSendDataMessage(buffer)) { - return false; - } - } - // Close the channel if the error is not SDR_BLOCK, or if queuing the - // message failed. - RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to send data, " - "send_result = " - << send_result; - CloseAbruptlyWithError( - RTCError(RTCErrorType::NETWORK_ERROR, "Failure to send data")); - - return false; -} - -bool DataChannel::QueueSendDataMessage(const DataBuffer& buffer) { - size_t start_buffered_amount = queued_send_data_.byte_count(); - if (start_buffered_amount + buffer.size() > kMaxQueuedSendDataBytes) { - RTC_LOG(LS_ERROR) << "Can't buffer any more data for the data channel."; - return false; - } - queued_send_data_.PushBack(std::make_unique(buffer)); - return true; -} - -void DataChannel::SendQueuedControlMessages() { - PacketQueue control_packets; - control_packets.Swap(&queued_control_data_); - - while (!control_packets.Empty()) { - std::unique_ptr buf = control_packets.PopFront(); - SendControlMessage(buf->data); - } -} - -void DataChannel::QueueControlMessage(const rtc::CopyOnWriteBuffer& buffer) { - queued_control_data_.PushBack(std::make_unique(buffer, true)); -} - -bool DataChannel::SendControlMessage(const rtc::CopyOnWriteBuffer& buffer) { - bool is_open_message = handshake_state_ == kHandshakeShouldSendOpen; - - RTC_DCHECK(IsSctpLike(data_channel_type_)); - RTC_DCHECK(writable_); - RTC_DCHECK_GE(config_.id, 0); - RTC_DCHECK(!is_open_message || !config_.negotiated); - - cricket::SendDataParams send_params; - send_params.sid = config_.id; - // Send data as ordered before we receive any message from the remote peer to - // make sure the remote peer will not receive any data before it receives the - // OPEN message. - send_params.ordered = config_.ordered || is_open_message; - send_params.type = cricket::DMT_CONTROL; - - cricket::SendDataResult send_result = cricket::SDR_SUCCESS; - bool retval = provider_->SendData(send_params, buffer, &send_result); - if (retval) { - RTC_LOG(LS_VERBOSE) << "Sent CONTROL message on channel " << config_.id; - - if (handshake_state_ == kHandshakeShouldSendAck) { - handshake_state_ = kHandshakeReady; - } else if (handshake_state_ == kHandshakeShouldSendOpen) { - handshake_state_ = kHandshakeWaitingForAck; - } - } else if (send_result == cricket::SDR_BLOCK) { - QueueControlMessage(buffer); - } else { - RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to send" - " the CONTROL message, send_result = " - << send_result; - CloseAbruptlyWithError(RTCError(RTCErrorType::NETWORK_ERROR, - "Failed to send a CONTROL message")); - } - return retval; -} - -// static -void DataChannel::ResetInternalIdAllocatorForTesting(int new_value) { - g_unique_id = new_value; -} - -} // namespace webrtc diff --git a/pc/data_channel.h b/pc/data_channel.h deleted file mode 100644 index 7c7d220640..0000000000 --- a/pc/data_channel.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_DATA_CHANNEL_H_ -#define PC_DATA_CHANNEL_H_ - -#include -#include -#include -#include - -#include "api/data_channel_interface.h" -#include "api/priority.h" -#include "api/proxy.h" -#include "api/scoped_refptr.h" -#include "media/base/media_channel.h" -#include "pc/channel.h" -#include "rtc_base/async_invoker.h" -#include "rtc_base/third_party/sigslot/sigslot.h" - -namespace webrtc { - -class DataChannel; - -// TODO(deadbeef): Once RTP data channels go away, get rid of this and have -// DataChannel depend on SctpTransportInternal (pure virtual SctpTransport -// interface) instead. -class DataChannelProviderInterface { - public: - // Sends the data to the transport. - virtual bool SendData(const cricket::SendDataParams& params, - const rtc::CopyOnWriteBuffer& payload, - cricket::SendDataResult* result) = 0; - // Connects to the transport signals. - virtual bool ConnectDataChannel(DataChannel* data_channel) = 0; - // Disconnects from the transport signals. - virtual void DisconnectDataChannel(DataChannel* data_channel) = 0; - // Adds the data channel SID to the transport for SCTP. - virtual void AddSctpDataStream(int sid) = 0; - // Begins the closing procedure by sending an outgoing stream reset. Still - // need to wait for callbacks to tell when this completes. - virtual void RemoveSctpDataStream(int sid) = 0; - // Returns true if the transport channel is ready to send data. - virtual bool ReadyToSendData() const = 0; - - protected: - virtual ~DataChannelProviderInterface() {} -}; - -struct InternalDataChannelInit : public DataChannelInit { - enum OpenHandshakeRole { kOpener, kAcker, kNone }; - // The default role is kOpener because the default |negotiated| is false. - InternalDataChannelInit() : open_handshake_role(kOpener) {} - explicit InternalDataChannelInit(const DataChannelInit& base); - OpenHandshakeRole open_handshake_role; -}; - -// Helper class to allocate unique IDs for SCTP DataChannels -class SctpSidAllocator { - public: - // Gets the first unused odd/even id based on the DTLS role. If |role| is - // SSL_CLIENT, the allocated id starts from 0 and takes even numbers; - // otherwise, the id starts from 1 and takes odd numbers. - // Returns false if no ID can be allocated. - bool AllocateSid(rtc::SSLRole role, int* sid); - - // Attempts to reserve a specific sid. Returns false if it's unavailable. - bool ReserveSid(int sid); - - // Indicates that |sid| isn't in use any more, and is thus available again. - void ReleaseSid(int sid); - - private: - // Checks if |sid| is available to be assigned to a new SCTP data channel. - bool IsSidAvailable(int sid) const; - - std::set used_sids_; -}; - -// DataChannel is a an implementation of the DataChannelInterface based on -// libjingle's data engine. It provides an implementation of unreliable or -// reliabledata channels. Currently this class is specifically designed to use -// both RtpDataChannel and SctpTransport. - -// DataChannel states: -// kConnecting: The channel has been created the transport might not yet be -// ready. -// kOpen: The channel have a local SSRC set by a call to UpdateSendSsrc -// and a remote SSRC set by call to UpdateReceiveSsrc and the transport -// has been writable once. -// kClosing: DataChannelInterface::Close has been called or UpdateReceiveSsrc -// has been called with SSRC==0 -// kClosed: Both UpdateReceiveSsrc and UpdateSendSsrc has been called with -// SSRC==0. -// -// How the closing procedure works for SCTP: -// 1. Alice calls Close(), state changes to kClosing. -// 2. Alice finishes sending any queued data. -// 3. Alice calls RemoveSctpDataStream, sends outgoing stream reset. -// 4. Bob receives incoming stream reset; OnClosingProcedureStartedRemotely -// called. -// 5. Bob sends outgoing stream reset. 6. Alice receives incoming reset, -// Bob receives acknowledgement. Both receive OnClosingProcedureComplete -// callback and transition to kClosed. -class DataChannel : public DataChannelInterface, public sigslot::has_slots<> { - public: - static rtc::scoped_refptr Create( - DataChannelProviderInterface* provider, - cricket::DataChannelType dct, - const std::string& label, - const InternalDataChannelInit& config); - - static bool IsSctpLike(cricket::DataChannelType type); - - virtual void RegisterObserver(DataChannelObserver* observer); - virtual void UnregisterObserver(); - - virtual std::string label() const { return label_; } - virtual bool reliable() const; - virtual bool ordered() const { return config_.ordered; } - // Backwards compatible accessors - virtual uint16_t maxRetransmitTime() const { - return config_.maxRetransmitTime ? *config_.maxRetransmitTime - : static_cast(-1); - } - virtual uint16_t maxRetransmits() const { - return config_.maxRetransmits ? *config_.maxRetransmits - : static_cast(-1); - } - virtual absl::optional maxPacketLifeTime() const { - return config_.maxRetransmitTime; - } - virtual absl::optional maxRetransmitsOpt() const { - return config_.maxRetransmits; - } - virtual std::string protocol() const { return config_.protocol; } - virtual bool negotiated() const { return config_.negotiated; } - virtual int id() const { return config_.id; } - virtual Priority priority() const { - return config_.priority ? *config_.priority : Priority::kLow; - } - virtual int internal_id() const { return internal_id_; } - virtual uint64_t buffered_amount() const; - virtual void Close(); - virtual DataState state() const { return state_; } - virtual RTCError error() const; - virtual uint32_t messages_sent() const { return messages_sent_; } - virtual uint64_t bytes_sent() const { return bytes_sent_; } - virtual uint32_t messages_received() const { return messages_received_; } - virtual uint64_t bytes_received() const { return bytes_received_; } - virtual bool Send(const DataBuffer& buffer); - - // Close immediately, ignoring any queued data or closing procedure. - // This is called for RTP data channels when SDP indicates a channel should - // be removed, or SCTP data channels when the underlying SctpTransport is - // being destroyed. - // It is also called by the PeerConnection if SCTP ID assignment fails. - void CloseAbruptlyWithError(RTCError error); - // Specializations of CloseAbruptlyWithError - void CloseAbruptlyWithDataChannelFailure(const std::string& message); - void CloseAbruptlyWithSctpCauseCode(const std::string& message, - uint16_t cause_code); - - // Called when the channel's ready to use. That can happen when the - // underlying DataMediaChannel becomes ready, or when this channel is a new - // stream on an existing DataMediaChannel, and we've finished negotiation. - void OnChannelReady(bool writable); - - // Slots for provider to connect signals to. - void OnDataReceived(const cricket::ReceiveDataParams& params, - const rtc::CopyOnWriteBuffer& payload); - - /******************************************** - * The following methods are for SCTP only. * - ********************************************/ - - // Sets the SCTP sid and adds to transport layer if not set yet. Should only - // be called once. - void SetSctpSid(int sid); - // The remote side started the closing procedure by resetting its outgoing - // stream (our incoming stream). Sets state to kClosing. - void OnClosingProcedureStartedRemotely(int sid); - // The closing procedure is complete; both incoming and outgoing stream - // resets are done and the channel can transition to kClosed. Called - // asynchronously after RemoveSctpDataStream. - void OnClosingProcedureComplete(int sid); - // Called when the transport channel is created. - // Only needs to be called for SCTP data channels. - void OnTransportChannelCreated(); - // Called when the transport channel is unusable. - // This method makes sure the DataChannel is disconnected and changes state - // to kClosed. - void OnTransportChannelClosed(); - - /******************************************* - * The following methods are for RTP only. * - *******************************************/ - - // The remote peer requested that this channel should be closed. - void RemotePeerRequestClose(); - // Set the SSRC this channel should use to send data on the - // underlying data engine. |send_ssrc| == 0 means that the channel is no - // longer part of the session negotiation. - void SetSendSsrc(uint32_t send_ssrc); - // Set the SSRC this channel should use to receive data from the - // underlying data engine. - void SetReceiveSsrc(uint32_t receive_ssrc); - - cricket::DataChannelType data_channel_type() const { - return data_channel_type_; - } - - // Emitted when state transitions to kOpen. - sigslot::signal1 SignalOpened; - // Emitted when state transitions to kClosed. - // In the case of SCTP channels, this signal can be used to tell when the - // channel's sid is free. - sigslot::signal1 SignalClosed; - - // Reset the allocator for internal ID values for testing, so that - // the internal IDs generated are predictable. Test only. - static void ResetInternalIdAllocatorForTesting(int new_value); - - protected: - DataChannel(DataChannelProviderInterface* client, - cricket::DataChannelType dct, - const std::string& label); - virtual ~DataChannel(); - - private: - // A packet queue which tracks the total queued bytes. Queued packets are - // owned by this class. - class PacketQueue final { - public: - size_t byte_count() const { return byte_count_; } - - bool Empty() const; - - std::unique_ptr PopFront(); - - void PushFront(std::unique_ptr packet); - void PushBack(std::unique_ptr packet); - - void Clear(); - - void Swap(PacketQueue* other); - - private: - std::deque> packets_; - size_t byte_count_ = 0; - }; - - // The OPEN(_ACK) signaling state. - enum HandshakeState { - kHandshakeInit, - kHandshakeShouldSendOpen, - kHandshakeShouldSendAck, - kHandshakeWaitingForAck, - kHandshakeReady - }; - - bool Init(const InternalDataChannelInit& config); - void UpdateState(); - void SetState(DataState state); - void DisconnectFromProvider(); - - void DeliverQueuedReceivedData(); - - void SendQueuedDataMessages(); - bool SendDataMessage(const DataBuffer& buffer, bool queue_if_blocked); - bool QueueSendDataMessage(const DataBuffer& buffer); - - void SendQueuedControlMessages(); - void QueueControlMessage(const rtc::CopyOnWriteBuffer& buffer); - bool SendControlMessage(const rtc::CopyOnWriteBuffer& buffer); - - const int internal_id_; - std::string label_; - InternalDataChannelInit config_; - DataChannelObserver* observer_; - DataState state_; - RTCError error_; - uint32_t messages_sent_; - uint64_t bytes_sent_; - uint32_t messages_received_; - uint64_t bytes_received_; - // Number of bytes of data that have been queued using Send(). Increased - // before each transport send and decreased after each successful send. - uint64_t buffered_amount_; - cricket::DataChannelType data_channel_type_; - DataChannelProviderInterface* provider_; - HandshakeState handshake_state_; - bool connected_to_provider_; - bool send_ssrc_set_; - bool receive_ssrc_set_; - bool writable_; - // Did we already start the graceful SCTP closing procedure? - bool started_closing_procedure_ = false; - uint32_t send_ssrc_; - uint32_t receive_ssrc_; - // Control messages that always have to get sent out before any queued - // data. - PacketQueue queued_control_data_; - PacketQueue queued_received_data_; - PacketQueue queued_send_data_; - rtc::AsyncInvoker invoker_; -}; - -// Define proxy for DataChannelInterface. -BEGIN_SIGNALING_PROXY_MAP(DataChannel) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_METHOD1(void, RegisterObserver, DataChannelObserver*) -PROXY_METHOD0(void, UnregisterObserver) -PROXY_CONSTMETHOD0(std::string, label) -PROXY_CONSTMETHOD0(bool, reliable) -PROXY_CONSTMETHOD0(bool, ordered) -PROXY_CONSTMETHOD0(uint16_t, maxRetransmitTime) -PROXY_CONSTMETHOD0(uint16_t, maxRetransmits) -PROXY_CONSTMETHOD0(absl::optional, maxRetransmitsOpt) -PROXY_CONSTMETHOD0(absl::optional, maxPacketLifeTime) -PROXY_CONSTMETHOD0(std::string, protocol) -PROXY_CONSTMETHOD0(bool, negotiated) -PROXY_CONSTMETHOD0(int, id) -PROXY_CONSTMETHOD0(Priority, priority) -PROXY_CONSTMETHOD0(DataState, state) -PROXY_CONSTMETHOD0(RTCError, error) -PROXY_CONSTMETHOD0(uint32_t, messages_sent) -PROXY_CONSTMETHOD0(uint64_t, bytes_sent) -PROXY_CONSTMETHOD0(uint32_t, messages_received) -PROXY_CONSTMETHOD0(uint64_t, bytes_received) -PROXY_CONSTMETHOD0(uint64_t, buffered_amount) -PROXY_METHOD0(void, Close) -PROXY_METHOD1(bool, Send, const DataBuffer&) -END_PROXY_MAP() - -} // namespace webrtc - -#endif // PC_DATA_CHANNEL_H_ diff --git a/pc/data_channel_controller.cc b/pc/data_channel_controller.cc index e9ea742c44..7a6fd3c168 100644 --- a/pc/data_channel_controller.cc +++ b/pc/data_channel_controller.cc @@ -10,103 +10,68 @@ #include "pc/data_channel_controller.h" +#include #include +#include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/peer_connection_interface.h" +#include "api/rtc_error.h" #include "pc/peer_connection.h" #include "pc/sctp_utils.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace webrtc { bool DataChannelController::HasDataChannels() const { RTC_DCHECK_RUN_ON(signaling_thread()); - return !rtp_data_channels_.empty() || !sctp_data_channels_.empty(); + return !sctp_data_channels_.empty(); } -bool DataChannelController::SendData(const cricket::SendDataParams& params, +bool DataChannelController::SendData(int sid, + const SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result) { - // RTC_DCHECK_RUN_ON(signaling_thread()); - if (data_channel_transport()) { - SendDataParams send_params; - send_params.type = ToWebrtcDataMessageType(params.type); - send_params.ordered = params.ordered; - if (params.max_rtx_count >= 0) { - send_params.max_rtx_count = params.max_rtx_count; - } else if (params.max_rtx_ms >= 0) { - send_params.max_rtx_ms = params.max_rtx_ms; - } - - RTCError error = network_thread()->Invoke( - RTC_FROM_HERE, [this, params, send_params, payload] { - return data_channel_transport()->SendData(params.sid, send_params, - payload); - }); - - if (error.ok()) { - *result = cricket::SendDataResult::SDR_SUCCESS; - return true; - } else if (error.type() == RTCErrorType::RESOURCE_EXHAUSTED) { - // SCTP transport uses RESOURCE_EXHAUSTED when it's blocked. - // TODO(mellem): Stop using RTCError here and get rid of the mapping. - *result = cricket::SendDataResult::SDR_BLOCK; - return false; - } - *result = cricket::SendDataResult::SDR_ERROR; - return false; - } else if (rtp_data_channel()) { - return rtp_data_channel()->SendData(params, payload, result); - } + if (data_channel_transport()) + return DataChannelSendData(sid, params, payload, result); RTC_LOG(LS_ERROR) << "SendData called before transport is ready"; return false; } bool DataChannelController::ConnectDataChannel( - DataChannel* webrtc_data_channel) { + SctpDataChannel* webrtc_data_channel) { RTC_DCHECK_RUN_ON(signaling_thread()); - if (!rtp_data_channel() && !data_channel_transport()) { + if (!data_channel_transport()) { // Don't log an error here, because DataChannels are expected to call // ConnectDataChannel in this state. It's the only way to initially tell // whether or not the underlying transport is ready. return false; } - if (data_channel_transport()) { - SignalDataChannelTransportWritable_s.connect(webrtc_data_channel, - &DataChannel::OnChannelReady); - SignalDataChannelTransportReceivedData_s.connect( - webrtc_data_channel, &DataChannel::OnDataReceived); - SignalDataChannelTransportChannelClosing_s.connect( - webrtc_data_channel, &DataChannel::OnClosingProcedureStartedRemotely); - SignalDataChannelTransportChannelClosed_s.connect( - webrtc_data_channel, &DataChannel::OnClosingProcedureComplete); - } - if (rtp_data_channel()) { - rtp_data_channel()->SignalReadyToSendData.connect( - webrtc_data_channel, &DataChannel::OnChannelReady); - rtp_data_channel()->SignalDataReceived.connect( - webrtc_data_channel, &DataChannel::OnDataReceived); - } + SignalDataChannelTransportWritable_s.connect( + webrtc_data_channel, &SctpDataChannel::OnTransportReady); + SignalDataChannelTransportReceivedData_s.connect( + webrtc_data_channel, &SctpDataChannel::OnDataReceived); + SignalDataChannelTransportChannelClosing_s.connect( + webrtc_data_channel, &SctpDataChannel::OnClosingProcedureStartedRemotely); + SignalDataChannelTransportChannelClosed_s.connect( + webrtc_data_channel, &SctpDataChannel::OnClosingProcedureComplete); return true; } void DataChannelController::DisconnectDataChannel( - DataChannel* webrtc_data_channel) { + SctpDataChannel* webrtc_data_channel) { RTC_DCHECK_RUN_ON(signaling_thread()); - if (!rtp_data_channel() && !data_channel_transport()) { + if (!data_channel_transport()) { RTC_LOG(LS_ERROR) - << "DisconnectDataChannel called when rtp_data_channel_ and " - "sctp_transport_ are NULL."; + << "DisconnectDataChannel called when sctp_transport_ is NULL."; return; } - if (data_channel_transport()) { - SignalDataChannelTransportWritable_s.disconnect(webrtc_data_channel); - SignalDataChannelTransportReceivedData_s.disconnect(webrtc_data_channel); - SignalDataChannelTransportChannelClosing_s.disconnect(webrtc_data_channel); - SignalDataChannelTransportChannelClosed_s.disconnect(webrtc_data_channel); - } - if (rtp_data_channel()) { - rtp_data_channel()->SignalReadyToSendData.disconnect(webrtc_data_channel); - rtp_data_channel()->SignalDataReceived.disconnect(webrtc_data_channel); - } + SignalDataChannelTransportWritable_s.disconnect(webrtc_data_channel); + SignalDataChannelTransportReceivedData_s.disconnect(webrtc_data_channel); + SignalDataChannelTransportChannelClosing_s.disconnect(webrtc_data_channel); + SignalDataChannelTransportChannelClosed_s.disconnect(webrtc_data_channel); } void DataChannelController::AddSctpDataStream(int sid) { @@ -131,8 +96,7 @@ void DataChannelController::RemoveSctpDataStream(int sid) { bool DataChannelController::ReadyToSendData() const { RTC_DCHECK_RUN_ON(signaling_thread()); - return (rtp_data_channel() && rtp_data_channel()->ready_to_send_data()) || - (data_channel_transport() && data_channel_transport_ready_to_send_); + return (data_channel_transport() && data_channel_transport_ready_to_send_); } void DataChannelController::OnDataReceived( @@ -142,62 +106,83 @@ void DataChannelController::OnDataReceived( RTC_DCHECK_RUN_ON(network_thread()); cricket::ReceiveDataParams params; params.sid = channel_id; - params.type = ToCricketDataMessageType(type); - data_channel_transport_invoker_->AsyncInvoke( - RTC_FROM_HERE, signaling_thread(), [this, params, buffer] { - RTC_DCHECK_RUN_ON(signaling_thread()); - if (!HandleOpenMessage_s(params, buffer)) { - SignalDataChannelTransportReceivedData_s(params, buffer); + params.type = type; + signaling_thread()->PostTask( + ToQueuedTask([self = weak_factory_.GetWeakPtr(), params, buffer] { + if (self) { + RTC_DCHECK_RUN_ON(self->signaling_thread()); + // TODO(bugs.webrtc.org/11547): The data being received should be + // delivered on the network thread. The way HandleOpenMessage_s works + // right now is that it's called for all types of buffers and operates + // as a selector function. Change this so that it's only called for + // buffers that it should be able to handle. Once we do that, we can + // deliver all other buffers on the network thread (change + // SignalDataChannelTransportReceivedData_s to + // SignalDataChannelTransportReceivedData_n). + if (!self->HandleOpenMessage_s(params, buffer)) { + self->SignalDataChannelTransportReceivedData_s(params, buffer); + } } - }); + })); } void DataChannelController::OnChannelClosing(int channel_id) { RTC_DCHECK_RUN_ON(network_thread()); - data_channel_transport_invoker_->AsyncInvoke( - RTC_FROM_HERE, signaling_thread(), [this, channel_id] { - RTC_DCHECK_RUN_ON(signaling_thread()); - SignalDataChannelTransportChannelClosing_s(channel_id); - }); + signaling_thread()->PostTask( + ToQueuedTask([self = weak_factory_.GetWeakPtr(), channel_id] { + if (self) { + RTC_DCHECK_RUN_ON(self->signaling_thread()); + self->SignalDataChannelTransportChannelClosing_s(channel_id); + } + })); } void DataChannelController::OnChannelClosed(int channel_id) { RTC_DCHECK_RUN_ON(network_thread()); - data_channel_transport_invoker_->AsyncInvoke( - RTC_FROM_HERE, signaling_thread(), [this, channel_id] { - RTC_DCHECK_RUN_ON(signaling_thread()); - SignalDataChannelTransportChannelClosed_s(channel_id); - }); + signaling_thread()->PostTask( + ToQueuedTask([self = weak_factory_.GetWeakPtr(), channel_id] { + if (self) { + RTC_DCHECK_RUN_ON(self->signaling_thread()); + self->SignalDataChannelTransportChannelClosed_s(channel_id); + } + })); } void DataChannelController::OnReadyToSend() { RTC_DCHECK_RUN_ON(network_thread()); - data_channel_transport_invoker_->AsyncInvoke( - RTC_FROM_HERE, signaling_thread(), [this] { - RTC_DCHECK_RUN_ON(signaling_thread()); - data_channel_transport_ready_to_send_ = true; - SignalDataChannelTransportWritable_s( - data_channel_transport_ready_to_send_); - }); + signaling_thread()->PostTask( + ToQueuedTask([self = weak_factory_.GetWeakPtr()] { + if (self) { + RTC_DCHECK_RUN_ON(self->signaling_thread()); + self->data_channel_transport_ready_to_send_ = true; + self->SignalDataChannelTransportWritable_s( + self->data_channel_transport_ready_to_send_); + } + })); } -void DataChannelController::OnTransportClosed() { +void DataChannelController::OnTransportClosed(RTCError error) { RTC_DCHECK_RUN_ON(network_thread()); - data_channel_transport_invoker_->AsyncInvoke( - RTC_FROM_HERE, signaling_thread(), [this] { - RTC_DCHECK_RUN_ON(signaling_thread()); - OnTransportChannelClosed(); - }); + signaling_thread()->PostTask( + ToQueuedTask([self = weak_factory_.GetWeakPtr(), error] { + if (self) { + RTC_DCHECK_RUN_ON(self->signaling_thread()); + self->OnTransportChannelClosed(error); + } + })); } void DataChannelController::SetupDataChannelTransport_n() { RTC_DCHECK_RUN_ON(network_thread()); - data_channel_transport_invoker_ = std::make_unique(); + + // There's a new data channel transport. This needs to be signaled to the + // |sctp_data_channels_| so that they can reopen and reconnect. This is + // necessary when bundling is applied. + NotifyDataChannelsOfTransportCreated(); } void DataChannelController::TeardownDataChannelTransport_n() { RTC_DCHECK_RUN_ON(network_thread()); - data_channel_transport_invoker_ = nullptr; if (data_channel_transport()) { data_channel_transport()->SetDataSink(nullptr); } @@ -219,29 +204,33 @@ void DataChannelController::OnTransportChanged( // There's a new data channel transport. This needs to be signaled to the // |sctp_data_channels_| so that they can reopen and reconnect. This is // necessary when bundling is applied. - data_channel_transport_invoker_->AsyncInvoke( - RTC_FROM_HERE, signaling_thread(), [this] { - RTC_DCHECK_RUN_ON(signaling_thread()); - for (const auto& channel : sctp_data_channels_) { - channel->OnTransportChannelCreated(); - } - }); + NotifyDataChannelsOfTransportCreated(); } } } +std::vector DataChannelController::GetDataChannelStats() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector stats; + stats.reserve(sctp_data_channels_.size()); + for (const auto& channel : sctp_data_channels_) + stats.push_back(channel->GetStats()); + return stats; +} + bool DataChannelController::HandleOpenMessage_s( const cricket::ReceiveDataParams& params, const rtc::CopyOnWriteBuffer& buffer) { - if (params.type == cricket::DMT_CONTROL && IsOpenMessage(buffer)) { + if (params.type == DataMessageType::kControl && IsOpenMessage(buffer)) { // Received OPEN message; parse and signal that a new data channel should // be created. std::string label; InternalDataChannelInit config; - config.id = params.ssrc; + config.id = params.sid; if (!ParseDataChannelOpenMessage(buffer, &label, &config)) { - RTC_LOG(LS_WARNING) << "Failed to parse the OPEN message for ssrc " - << params.ssrc; + RTC_LOG(LS_WARNING) << "Failed to parse the OPEN message for sid " + << params.sid; return true; } config.open_handshake_role = InternalDataChannelInit::kAcker; @@ -254,77 +243,69 @@ bool DataChannelController::HandleOpenMessage_s( void DataChannelController::OnDataChannelOpenMessage( const std::string& label, const InternalDataChannelInit& config) { - rtc::scoped_refptr channel( - InternalCreateDataChannel(label, &config)); + rtc::scoped_refptr channel( + InternalCreateDataChannelWithProxy(label, &config)); if (!channel.get()) { RTC_LOG(LS_ERROR) << "Failed to create DataChannel from the OPEN message."; return; } - rtc::scoped_refptr proxy_channel = - DataChannelProxy::Create(signaling_thread(), channel); - pc_->Observer()->OnDataChannel(std::move(proxy_channel)); + pc_->Observer()->OnDataChannel(std::move(channel)); pc_->NoteDataAddedEvent(); } -rtc::scoped_refptr -DataChannelController::InternalCreateDataChannel( +rtc::scoped_refptr +DataChannelController::InternalCreateDataChannelWithProxy( const std::string& label, const InternalDataChannelInit* config) { RTC_DCHECK_RUN_ON(signaling_thread()); if (pc_->IsClosed()) { return nullptr; } - if (data_channel_type_ == cricket::DCT_NONE) { - RTC_LOG(LS_ERROR) - << "InternalCreateDataChannel: Data is not supported in this call."; - return nullptr; + + rtc::scoped_refptr channel = + InternalCreateSctpDataChannel(label, config); + if (channel) { + return SctpDataChannel::CreateProxy(channel); } + + return nullptr; +} + +rtc::scoped_refptr +DataChannelController::InternalCreateSctpDataChannel( + const std::string& label, + const InternalDataChannelInit* config) { + RTC_DCHECK_RUN_ON(signaling_thread()); InternalDataChannelInit new_config = config ? (*config) : InternalDataChannelInit(); - if (DataChannel::IsSctpLike(data_channel_type_)) { - if (new_config.id < 0) { - rtc::SSLRole role; - if ((pc_->GetSctpSslRole(&role)) && - !sid_allocator_.AllocateSid(role, &new_config.id)) { - RTC_LOG(LS_ERROR) - << "No id can be allocated for the SCTP data channel."; - return nullptr; - } - } else if (!sid_allocator_.ReserveSid(new_config.id)) { - RTC_LOG(LS_ERROR) << "Failed to create a SCTP data channel " - "because the id is already in use or out of range."; + if (new_config.id < 0) { + rtc::SSLRole role; + if ((pc_->GetSctpSslRole(&role)) && + !sid_allocator_.AllocateSid(role, &new_config.id)) { + RTC_LOG(LS_ERROR) << "No id can be allocated for the SCTP data channel."; return nullptr; } + } else if (!sid_allocator_.ReserveSid(new_config.id)) { + RTC_LOG(LS_ERROR) << "Failed to create a SCTP data channel " + "because the id is already in use or out of range."; + return nullptr; } - - rtc::scoped_refptr channel( - DataChannel::Create(this, data_channel_type(), label, new_config)); + rtc::scoped_refptr channel(SctpDataChannel::Create( + this, label, new_config, signaling_thread(), network_thread())); if (!channel) { sid_allocator_.ReleaseSid(new_config.id); return nullptr; } - - if (channel->data_channel_type() == cricket::DCT_RTP) { - if (rtp_data_channels_.find(channel->label()) != rtp_data_channels_.end()) { - RTC_LOG(LS_ERROR) << "DataChannel with label " << channel->label() - << " already exists."; - return nullptr; - } - rtp_data_channels_[channel->label()] = channel; - } else { - RTC_DCHECK(DataChannel::IsSctpLike(data_channel_type_)); - sctp_data_channels_.push_back(channel); - channel->SignalClosed.connect(pc_, - &PeerConnection::OnSctpDataChannelClosed); - } - SignalDataChannelCreated_(channel.get()); + sctp_data_channels_.push_back(channel); + channel->SignalClosed.connect(pc_, &PeerConnection::OnSctpDataChannelClosed); + SignalSctpDataChannelCreated_(channel.get()); return channel; } void DataChannelController::AllocateSctpSids(rtc::SSLRole role) { RTC_DCHECK_RUN_ON(signaling_thread()); - std::vector> channels_to_close; + std::vector> channels_to_close; for (const auto& channel : sctp_data_channels_) { if (channel->id() < 0) { int sid; @@ -343,7 +324,7 @@ void DataChannelController::AllocateSctpSids(rtc::SSLRole role) { } } -void DataChannelController::OnSctpDataChannelClosed(DataChannel* channel) { +void DataChannelController::OnSctpDataChannelClosed(SctpDataChannel* channel) { RTC_DCHECK_RUN_ON(signaling_thread()); for (auto it = sctp_data_channels_.begin(); it != sctp_data_channels_.end(); ++it) { @@ -358,35 +339,29 @@ void DataChannelController::OnSctpDataChannelClosed(DataChannel* channel) { sctp_data_channels_to_free_.push_back(*it); sctp_data_channels_.erase(it); signaling_thread()->PostTask( - RTC_FROM_HERE, [self = weak_factory_.GetWeakPtr()] { + ToQueuedTask([self = weak_factory_.GetWeakPtr()] { if (self) { RTC_DCHECK_RUN_ON(self->signaling_thread()); self->sctp_data_channels_to_free_.clear(); } - }); + })); return; } } } -void DataChannelController::OnTransportChannelClosed() { +void DataChannelController::OnTransportChannelClosed(RTCError error) { RTC_DCHECK_RUN_ON(signaling_thread()); - // Use a temporary copy of the RTP/SCTP DataChannel list because the + // Use a temporary copy of the SCTP DataChannel list because the // DataChannel may callback to us and try to modify the list. - std::map> temp_rtp_dcs; - temp_rtp_dcs.swap(rtp_data_channels_); - for (const auto& kv : temp_rtp_dcs) { - kv.second->OnTransportChannelClosed(); - } - - std::vector> temp_sctp_dcs; + std::vector> temp_sctp_dcs; temp_sctp_dcs.swap(sctp_data_channels_); for (const auto& channel : temp_sctp_dcs) { - channel->OnTransportChannelClosed(); + channel->OnTransportChannelClosed(error); } } -DataChannel* DataChannelController::FindDataChannelBySid(int sid) const { +SctpDataChannel* DataChannelController::FindDataChannelBySid(int sid) const { RTC_DCHECK_RUN_ON(signaling_thread()); for (const auto& channel : sctp_data_channels_) { if (channel->id() == sid) { @@ -396,96 +371,60 @@ DataChannel* DataChannelController::FindDataChannelBySid(int sid) const { return nullptr; } -void DataChannelController::UpdateLocalRtpDataChannels( - const cricket::StreamParamsVec& streams) { - std::vector existing_channels; - - RTC_DCHECK_RUN_ON(signaling_thread()); - // Find new and active data channels. - for (const cricket::StreamParams& params : streams) { - // |it->sync_label| is actually the data channel label. The reason is that - // we use the same naming of data channels as we do for - // MediaStreams and Tracks. - // For MediaStreams, the sync_label is the MediaStream label and the - // track label is the same as |streamid|. - const std::string& channel_label = params.first_stream_id(); - auto data_channel_it = rtp_data_channels()->find(channel_label); - if (data_channel_it == rtp_data_channels()->end()) { - RTC_LOG(LS_ERROR) << "channel label not found"; - continue; - } - // Set the SSRC the data channel should use for sending. - data_channel_it->second->SetSendSsrc(params.first_ssrc()); - existing_channels.push_back(data_channel_it->first); - } - - UpdateClosingRtpDataChannels(existing_channels, true); +DataChannelTransportInterface* DataChannelController::data_channel_transport() + const { + // TODO(bugs.webrtc.org/11547): Only allow this accessor to be called on the + // network thread. + // RTC_DCHECK_RUN_ON(network_thread()); + return data_channel_transport_; } -void DataChannelController::UpdateRemoteRtpDataChannels( - const cricket::StreamParamsVec& streams) { - std::vector existing_channels; - - RTC_DCHECK_RUN_ON(signaling_thread()); - // Find new and active data channels. - for (const cricket::StreamParams& params : streams) { - // The data channel label is either the mslabel or the SSRC if the mslabel - // does not exist. Ex a=ssrc:444330170 mslabel:test1. - std::string label = params.first_stream_id().empty() - ? rtc::ToString(params.first_ssrc()) - : params.first_stream_id(); - auto data_channel_it = rtp_data_channels()->find(label); - if (data_channel_it == rtp_data_channels()->end()) { - // This is a new data channel. - CreateRemoteRtpDataChannel(label, params.first_ssrc()); - } else { - data_channel_it->second->SetReceiveSsrc(params.first_ssrc()); - } - existing_channels.push_back(label); - } - - UpdateClosingRtpDataChannels(existing_channels, false); +void DataChannelController::set_data_channel_transport( + DataChannelTransportInterface* transport) { + RTC_DCHECK_RUN_ON(network_thread()); + data_channel_transport_ = transport; } -void DataChannelController::UpdateClosingRtpDataChannels( - const std::vector& active_channels, - bool is_local_update) { - auto it = rtp_data_channels_.begin(); - while (it != rtp_data_channels_.end()) { - DataChannel* data_channel = it->second; - if (absl::c_linear_search(active_channels, data_channel->label())) { - ++it; - continue; - } +bool DataChannelController::DataChannelSendData( + int sid, + const SendDataParams& params, + const rtc::CopyOnWriteBuffer& payload, + cricket::SendDataResult* result) { + // TODO(bugs.webrtc.org/11547): Expect method to be called on the network + // thread instead. Remove the Invoke() below and move assocated state to + // the network thread. + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(data_channel_transport()); - if (is_local_update) { - data_channel->SetSendSsrc(0); - } else { - data_channel->RemotePeerRequestClose(); - } + RTCError error = network_thread()->Invoke( + RTC_FROM_HERE, [this, sid, params, payload] { + return data_channel_transport()->SendData(sid, params, payload); + }); - if (data_channel->state() == DataChannel::kClosed) { - rtp_data_channels_.erase(it); - it = rtp_data_channels_.begin(); - } else { - ++it; - } + if (error.ok()) { + *result = cricket::SendDataResult::SDR_SUCCESS; + return true; + } else if (error.type() == RTCErrorType::RESOURCE_EXHAUSTED) { + // SCTP transport uses RESOURCE_EXHAUSTED when it's blocked. + // TODO(mellem): Stop using RTCError here and get rid of the mapping. + *result = cricket::SendDataResult::SDR_BLOCK; + return false; } + *result = cricket::SendDataResult::SDR_ERROR; + return false; } -void DataChannelController::CreateRemoteRtpDataChannel(const std::string& label, - uint32_t remote_ssrc) { - rtc::scoped_refptr channel( - InternalCreateDataChannel(label, nullptr)); - if (!channel.get()) { - RTC_LOG(LS_WARNING) << "Remote peer requested a DataChannel but" - "CreateDataChannel failed."; - return; - } - channel->SetReceiveSsrc(remote_ssrc); - rtc::scoped_refptr proxy_channel = - DataChannelProxy::Create(signaling_thread(), channel); - pc_->Observer()->OnDataChannel(std::move(proxy_channel)); +void DataChannelController::NotifyDataChannelsOfTransportCreated() { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask( + ToQueuedTask([self = weak_factory_.GetWeakPtr()] { + if (self) { + RTC_DCHECK_RUN_ON(self->signaling_thread()); + for (const auto& channel : self->sctp_data_channels_) { + channel->OnTransportChannelCreated(); + } + } + })); } rtc::Thread* DataChannelController::network_thread() const { diff --git a/pc/data_channel_controller.h b/pc/data_channel_controller.h index 60bcbb32a8..7b1ff26690 100644 --- a/pc/data_channel_controller.h +++ b/pc/data_channel_controller.h @@ -11,20 +11,36 @@ #ifndef PC_DATA_CHANNEL_CONTROLLER_H_ #define PC_DATA_CHANNEL_CONTROLLER_H_ +#include + #include #include #include #include +#include "api/data_channel_interface.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/transport/data_channel_transport_interface.h" +#include "media/base/media_channel.h" +#include "media/base/media_engine.h" +#include "media/base/stream_params.h" #include "pc/channel.h" -#include "pc/data_channel.h" +#include "pc/data_channel_utils.h" +#include "pc/sctp_data_channel.h" +#include "rtc_base/checks.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" #include "rtc_base/weak_ptr.h" namespace webrtc { class PeerConnection; -class DataChannelController : public DataChannelProviderInterface, +class DataChannelController : public SctpDataChannelProviderInterface, public DataChannelSink { public: explicit DataChannelController(PeerConnection* pc) : pc_(pc) {} @@ -35,12 +51,14 @@ class DataChannelController : public DataChannelProviderInterface, DataChannelController(DataChannelController&&) = delete; DataChannelController& operator=(DataChannelController&& other) = delete; - // Implements DataChannelProviderInterface. - bool SendData(const cricket::SendDataParams& params, + // Implements + // SctpDataChannelProviderInterface. + bool SendData(int sid, + const SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result) override; - bool ConnectDataChannel(DataChannel* webrtc_data_channel) override; - void DisconnectDataChannel(DataChannel* webrtc_data_channel) override; + bool ConnectDataChannel(SctpDataChannel* webrtc_data_channel) override; + void DisconnectDataChannel(SctpDataChannel* webrtc_data_channel) override; void AddSctpDataStream(int sid) override; void RemoveSctpDataStream(int sid) override; bool ReadyToSendData() const override; @@ -52,7 +70,7 @@ class DataChannelController : public DataChannelProviderInterface, void OnChannelClosing(int channel_id) override; void OnChannelClosed(int channel_id) override; void OnReadyToSend() override; - void OnTransportClosed() override; + void OnTransportClosed(RTCError error) override; // Called from PeerConnection::SetupDataChannelTransport_n void SetupDataChannelTransport_n(); @@ -64,15 +82,18 @@ class DataChannelController : public DataChannelProviderInterface, void OnTransportChanged( DataChannelTransportInterface* data_channel_transport); + // Called from PeerConnection::GetDataChannelStats on the signaling thread. + std::vector GetDataChannelStats() const; + // Creates channel and adds it to the collection of DataChannels that will - // be offered in a SessionDescription. - rtc::scoped_refptr InternalCreateDataChannel( + // be offered in a SessionDescription, and wraps it in a proxy object. + rtc::scoped_refptr InternalCreateDataChannelWithProxy( const std::string& label, const InternalDataChannelInit* config) /* RTC_RUN_ON(signaling_thread()) */; void AllocateSctpSids(rtc::SSLRole role); - DataChannel* FindDataChannelBySid(int sid) const; + SctpDataChannel* FindDataChannelBySid(int sid) const; // Checks if any data channel has been added. bool HasDataChannels() const; @@ -80,54 +101,26 @@ class DataChannelController : public DataChannelProviderInterface, RTC_DCHECK_RUN_ON(signaling_thread()); return !sctp_data_channels_.empty(); } - bool HasRtpDataChannels() const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return !rtp_data_channels_.empty(); - } - - void UpdateLocalRtpDataChannels(const cricket::StreamParamsVec& streams); - void UpdateRemoteRtpDataChannels(const cricket::StreamParamsVec& streams); // Accessors - cricket::DataChannelType data_channel_type() const { - return data_channel_type_; - } - void set_data_channel_type(cricket::DataChannelType type) { - data_channel_type_ = type; - } - cricket::RtpDataChannel* rtp_data_channel() const { - return rtp_data_channel_; - } - void set_rtp_data_channel(cricket::RtpDataChannel* channel) { - rtp_data_channel_ = channel; - } - DataChannelTransportInterface* data_channel_transport() const { - return data_channel_transport_; - } - void set_data_channel_transport(DataChannelTransportInterface* transport) { - data_channel_transport_ = transport; - } - const std::map>* - rtp_data_channels() const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return &rtp_data_channels_; - } - const std::vector>* sctp_data_channels() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return &sctp_data_channels_; - } + DataChannelTransportInterface* data_channel_transport() const; + void set_data_channel_transport(DataChannelTransportInterface* transport); - sigslot::signal1& SignalDataChannelCreated() { + sigslot::signal1& SignalSctpDataChannelCreated() { RTC_DCHECK_RUN_ON(signaling_thread()); - return SignalDataChannelCreated_; + return SignalSctpDataChannelCreated_; } // Called when the transport for the data channels is closed or destroyed. - void OnTransportChannelClosed(); + void OnTransportChannelClosed(RTCError error); - void OnSctpDataChannelClosed(DataChannel* channel); + void OnSctpDataChannelClosed(SctpDataChannel* channel); private: + rtc::scoped_refptr InternalCreateSctpDataChannel( + const std::string& label, + const InternalDataChannelInit* + config) /* RTC_RUN_ON(signaling_thread()) */; + // Parses and handles open messages. Returns true if the message is an open // message, false otherwise. bool HandleOpenMessage_s(const cricket::ReceiveDataParams& params, @@ -138,28 +131,19 @@ class DataChannelController : public DataChannelProviderInterface, const InternalDataChannelInit& config) RTC_RUN_ON(signaling_thread()); - void CreateRemoteRtpDataChannel(const std::string& label, - uint32_t remote_ssrc) - RTC_RUN_ON(signaling_thread()); + // Called from SendData when data_channel_transport() is true. + bool DataChannelSendData(int sid, + const SendDataParams& params, + const rtc::CopyOnWriteBuffer& payload, + cricket::SendDataResult* result); - void UpdateClosingRtpDataChannels( - const std::vector& active_channels, - bool is_local_update) RTC_RUN_ON(signaling_thread()); + // Called when all data channels need to be notified of a transport channel + // (calls OnTransportChannelCreated on the signaling thread). + void NotifyDataChannelsOfTransportCreated(); rtc::Thread* network_thread() const; rtc::Thread* signaling_thread() const; - // Specifies which kind of data channel is allowed. This is controlled - // by the chrome command-line flag and constraints: - // 1. If chrome command-line switch 'enable-sctp-data-channels' is enabled, - // constraint kEnableDtlsSrtp is true, and constaint kEnableRtpDataChannels is - // not set or false, SCTP is allowed (DCT_SCTP); - // 2. If constraint kEnableRtpDataChannels is true, RTP is allowed (DCT_RTP); - // 3. If both 1&2 are false, data channel is not allowed (DCT_NONE). - cricket::DataChannelType data_channel_type_ = - cricket::DCT_NONE; // TODO(bugs.webrtc.org/9987): Accessed on both - // signaling and network thread. - // Plugin transport used for data channels. Pointer may be accessed and // checked from any thread, but the object may only be touched on the // network thread. @@ -171,24 +155,16 @@ class DataChannelController : public DataChannelProviderInterface, bool data_channel_transport_ready_to_send_ RTC_GUARDED_BY(signaling_thread()) = false; - // |rtp_data_channel_| is used if in RTP data channel mode, - // |data_channel_transport_| when using SCTP. - cricket::RtpDataChannel* rtp_data_channel_ = nullptr; - // TODO(bugs.webrtc.org/9987): Accessed on both - // signaling and some other thread. - SctpSidAllocator sid_allocator_ /* RTC_GUARDED_BY(signaling_thread()) */; - std::vector> sctp_data_channels_ - RTC_GUARDED_BY(signaling_thread()); - std::vector> sctp_data_channels_to_free_ + std::vector> sctp_data_channels_ RTC_GUARDED_BY(signaling_thread()); - - // Map of label -> DataChannel - std::map> rtp_data_channels_ + std::vector> sctp_data_channels_to_free_ RTC_GUARDED_BY(signaling_thread()); // Signals from |data_channel_transport_|. These are invoked on the // signaling thread. + // TODO(bugs.webrtc.org/11547): These '_s' signals likely all belong on the + // network thread. sigslot::signal1 SignalDataChannelTransportWritable_s RTC_GUARDED_BY(signaling_thread()); sigslot::signal2 SignalDataChannelTransportChannelClosed_s RTC_GUARDED_BY(signaling_thread()); - sigslot::signal1 SignalDataChannelCreated_ + sigslot::signal1 SignalSctpDataChannelCreated_ RTC_GUARDED_BY(signaling_thread()); - // Used to invoke data channel transport signals on the signaling thread. - std::unique_ptr data_channel_transport_invoker_ - RTC_GUARDED_BY(network_thread()); - // Owning PeerConnection. PeerConnection* const pc_; + // The weak pointers must be dereferenced and invalidated on the signalling + // thread only. rtc::WeakPtrFactory weak_factory_{this}; }; diff --git a/pc/data_channel_integrationtest.cc b/pc/data_channel_integrationtest.cc new file mode 100644 index 0000000000..47ea74a4b2 --- /dev/null +++ b/pc/data_channel_integrationtest.cc @@ -0,0 +1,845 @@ +/* + * Copyright 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/data_channel_interface.h" +#include "api/dtmf_sender_interface.h" +#include "api/peer_connection_interface.h" +#include "api/scoped_refptr.h" +#include "api/units/time_delta.h" +#include "pc/test/integration_test_helpers.h" +#include "pc/test/mock_peer_connection_observers.h" +#include "rtc_base/fake_clock.h" +#include "rtc_base/gunit.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/virtual_socket_server.h" +#include "system_wrappers/include/field_trial.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { + +// All tests in this file require SCTP support. +#ifdef WEBRTC_HAVE_SCTP + +class DataChannelIntegrationTest : public PeerConnectionIntegrationBaseTest, + public ::testing::WithParamInterface< + std::tuple> { + protected: + DataChannelIntegrationTest() + : PeerConnectionIntegrationBaseTest(std::get<0>(GetParam()), + std::get<1>(GetParam())) {} +}; + +// Fake clock must be set before threads are started to prevent race on +// Set/GetClockForTesting(). +// To achieve that, multiple inheritance is used as a mixin pattern +// where order of construction is finely controlled. +// This also ensures peerconnection is closed before switching back to non-fake +// clock, avoiding other races and DCHECK failures such as in rtp_sender.cc. +class FakeClockForTest : public rtc::ScopedFakeClock { + protected: + FakeClockForTest() { + // Some things use a time of "0" as a special value, so we need to start out + // the fake clock at a nonzero time. + // TODO(deadbeef): Fix this. + AdvanceTime(webrtc::TimeDelta::Seconds(1)); + } + + // Explicit handle. + ScopedFakeClock& FakeClock() { return *this; } +}; + +class DataChannelIntegrationTestPlanB + : public PeerConnectionIntegrationBaseTest { + protected: + DataChannelIntegrationTestPlanB() + : PeerConnectionIntegrationBaseTest(SdpSemantics::kPlanB) {} +}; + +class DataChannelIntegrationTestUnifiedPlan + : public PeerConnectionIntegrationBaseTest { + protected: + DataChannelIntegrationTestUnifiedPlan() + : PeerConnectionIntegrationBaseTest(SdpSemantics::kUnifiedPlan) {} +}; + +// This test causes a PeerConnection to enter Disconnected state, and +// sends data on a DataChannel while disconnected. +// The data should be surfaced when the connection reestablishes. +TEST_P(DataChannelIntegrationTest, DataChannelWhileDisconnected) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); + std::string data1 = "hello first"; + caller()->data_channel()->Send(DataBuffer(data1)); + EXPECT_EQ_WAIT(data1, callee()->data_observer()->last_message(), + kDefaultTimeout); + // Cause a network outage + virtual_socket_server()->set_drop_probability(1.0); + EXPECT_EQ_WAIT(PeerConnectionInterface::kIceConnectionDisconnected, + caller()->standardized_ice_connection_state(), + kDefaultTimeout); + std::string data2 = "hello second"; + caller()->data_channel()->Send(DataBuffer(data2)); + // Remove the network outage. The connection should reestablish. + virtual_socket_server()->set_drop_probability(0.0); + EXPECT_EQ_WAIT(data2, callee()->data_observer()->last_message(), + kDefaultTimeout); +} + +// This test causes a PeerConnection to enter Disconnected state, +// sends data on a DataChannel while disconnected, and then triggers +// an ICE restart. +// The data should be surfaced when the connection reestablishes. +TEST_P(DataChannelIntegrationTest, DataChannelWhileDisconnectedIceRestart) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); + std::string data1 = "hello first"; + caller()->data_channel()->Send(DataBuffer(data1)); + EXPECT_EQ_WAIT(data1, callee()->data_observer()->last_message(), + kDefaultTimeout); + // Cause a network outage + virtual_socket_server()->set_drop_probability(1.0); + ASSERT_EQ_WAIT(PeerConnectionInterface::kIceConnectionDisconnected, + caller()->standardized_ice_connection_state(), + kDefaultTimeout); + std::string data2 = "hello second"; + caller()->data_channel()->Send(DataBuffer(data2)); + + // Trigger an ICE restart. The signaling channel is not affected by + // the network outage. + caller()->SetOfferAnswerOptions(IceRestartOfferAnswerOptions()); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Remove the network outage. The connection should reestablish. + virtual_socket_server()->set_drop_probability(0.0); + EXPECT_EQ_WAIT(data2, callee()->data_observer()->last_message(), + kDefaultTimeout); +} + +// This test sets up a call between two parties with audio, video and an SCTP +// data channel. +TEST_P(DataChannelIntegrationTest, EndToEndCallWithSctpDataChannel) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Expect that data channel created on caller side will show up for callee as + // well. + caller()->CreateDataChannel(); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Ensure the existence of the SCTP data channel didn't impede audio/video. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + // Caller data channel should already exist (it created one). Callee data + // channel may not exist yet, since negotiation happens in-band, not in SDP. + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + // Ensure data can be sent in both directions. + std::string data = "hello world"; + caller()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), + kDefaultTimeout); + callee()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), + kDefaultTimeout); +} + +// This test sets up a call between two parties with an SCTP +// data channel only, and sends messages of various sizes. +TEST_P(DataChannelIntegrationTest, + EndToEndCallWithSctpDataChannelVariousSizes) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Expect that data channel created on caller side will show up for callee as + // well. + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Caller data channel should already exist (it created one). Callee data + // channel may not exist yet, since negotiation happens in-band, not in SDP. + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + for (int message_size = 1; message_size < 100000; message_size *= 2) { + std::string data(message_size, 'a'); + caller()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), + kDefaultTimeout); + callee()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), + kDefaultTimeout); + } + // Specifically probe the area around the MTU size. + for (int message_size = 1100; message_size < 1300; message_size += 1) { + std::string data(message_size, 'a'); + caller()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), + kDefaultTimeout); + callee()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), + kDefaultTimeout); + } +} + +// This test sets up a call between two parties with an SCTP +// data channel only, and sends empty messages +TEST_P(DataChannelIntegrationTest, + EndToEndCallWithSctpDataChannelEmptyMessages) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Expect that data channel created on caller side will show up for callee as + // well. + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Caller data channel should already exist (it created one). Callee data + // channel may not exist yet, since negotiation happens in-band, not in SDP. + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + // Ensure data can be sent in both directions. + // Sending empty string data + std::string data = ""; + caller()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + EXPECT_TRUE(callee()->data_observer()->last_message().empty()); + EXPECT_FALSE(callee()->data_observer()->messages().back().binary); + callee()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(1u, caller()->data_observer()->received_message_count(), + kDefaultTimeout); + EXPECT_TRUE(caller()->data_observer()->last_message().empty()); + EXPECT_FALSE(caller()->data_observer()->messages().back().binary); + + // Sending empty binary data + rtc::CopyOnWriteBuffer empty_buffer; + caller()->data_channel()->Send(DataBuffer(empty_buffer, true)); + EXPECT_EQ_WAIT(2u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + EXPECT_TRUE(callee()->data_observer()->last_message().empty()); + EXPECT_TRUE(callee()->data_observer()->messages().back().binary); + callee()->data_channel()->Send(DataBuffer(empty_buffer, true)); + EXPECT_EQ_WAIT(2u, caller()->data_observer()->received_message_count(), + kDefaultTimeout); + EXPECT_TRUE(caller()->data_observer()->last_message().empty()); + EXPECT_TRUE(caller()->data_observer()->messages().back().binary); +} + +TEST_P(DataChannelIntegrationTest, + EndToEndCallWithSctpDataChannelLowestSafeMtu) { + // The lowest payload size limit that's tested and found safe for this + // application. Note that this is not the safe limit under all conditions; + // in particular, the default is not the largest DTLS signature, and + // this test does not use TURN. + const size_t kLowestSafePayloadSizeLimit = 1225; + + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Expect that data channel created on caller side will show up for callee as + // well. + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Caller data channel should already exist (it created one). Callee data + // channel may not exist yet, since negotiation happens in-band, not in SDP. + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + virtual_socket_server()->set_max_udp_payload(kLowestSafePayloadSizeLimit); + for (int message_size = 1140; message_size < 1240; message_size += 1) { + std::string data(message_size, 'a'); + caller()->data_channel()->Send(DataBuffer(data)); + ASSERT_EQ_WAIT(data, callee()->data_observer()->last_message(), + kDefaultTimeout); + callee()->data_channel()->Send(DataBuffer(data)); + ASSERT_EQ_WAIT(data, caller()->data_observer()->last_message(), + kDefaultTimeout); + } +} + +// This test verifies that lowering the MTU of the connection will cause +// the datachannel to not transmit reliably. +// The purpose of this test is to ensure that we know how a too-small MTU +// error manifests itself. +TEST_P(DataChannelIntegrationTest, EndToEndCallWithSctpDataChannelHarmfulMtu) { + // The lowest payload size limit that's tested and found safe for this + // application in this configuration (see test above). + const size_t kLowestSafePayloadSizeLimit = 1225; + // The size of the smallest message that fails to be delivered. + const size_t kMessageSizeThatIsNotDelivered = 1157; + + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + virtual_socket_server()->set_max_udp_payload(kLowestSafePayloadSizeLimit - 1); + // Probe for an undelivered or slowly delivered message. The exact + // size limit seems to be dependent on the message history, so make the + // code easily able to find the current value. + bool failure_seen = false; + for (size_t message_size = 1110; message_size < 1400; message_size++) { + const size_t message_count = + callee()->data_observer()->received_message_count(); + const std::string data(message_size, 'a'); + caller()->data_channel()->Send(DataBuffer(data)); + // Wait a very short time for the message to be delivered. + // Note: Waiting only 10 ms is too short for Windows bots; they will + // flakily fail at a random frame. + WAIT(callee()->data_observer()->received_message_count() > message_count, + 100); + if (callee()->data_observer()->received_message_count() == message_count) { + ASSERT_EQ(kMessageSizeThatIsNotDelivered, message_size); + failure_seen = true; + break; + } + } + ASSERT_TRUE(failure_seen); +} + +// Ensure that when the callee closes an SCTP data channel, the closing +// procedure results in the data channel being closed for the caller as well. +TEST_P(DataChannelIntegrationTest, CalleeClosesSctpDataChannel) { + // Same procedure as above test. + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + // Close the data channel on the callee side, and wait for it to reach the + // "closed" state on both sides. + callee()->data_channel()->Close(); + + DataChannelInterface::DataState expected_states[] = { + DataChannelInterface::DataState::kConnecting, + DataChannelInterface::DataState::kOpen, + DataChannelInterface::DataState::kClosing, + DataChannelInterface::DataState::kClosed}; + + EXPECT_EQ_WAIT(DataChannelInterface::DataState::kClosed, + caller()->data_observer()->state(), kDefaultTimeout); + EXPECT_THAT(caller()->data_observer()->states(), + ::testing::ElementsAreArray(expected_states)); + + EXPECT_EQ_WAIT(DataChannelInterface::DataState::kClosed, + callee()->data_observer()->state(), kDefaultTimeout); + EXPECT_THAT(callee()->data_observer()->states(), + ::testing::ElementsAreArray(expected_states)); +} + +TEST_P(DataChannelIntegrationTest, SctpDataChannelConfigSentToOtherSide) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + webrtc::DataChannelInit init; + init.id = 53; + init.maxRetransmits = 52; + caller()->CreateDataChannel("data-channel", &init); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + // Since "negotiated" is false, the "id" parameter should be ignored. + EXPECT_NE(init.id, callee()->data_channel()->id()); + EXPECT_EQ("data-channel", callee()->data_channel()->label()); + EXPECT_EQ(init.maxRetransmits, callee()->data_channel()->maxRetransmits()); + EXPECT_FALSE(callee()->data_channel()->negotiated()); +} + +// Test usrsctp's ability to process unordered data stream, where data actually +// arrives out of order using simulated delays. Previously there have been some +// bugs in this area. +TEST_P(DataChannelIntegrationTest, StressTestUnorderedSctpDataChannel) { + // Introduce random network delays. + // Otherwise it's not a true "unordered" test. + virtual_socket_server()->set_delay_mean(20); + virtual_socket_server()->set_delay_stddev(5); + virtual_socket_server()->UpdateDelayDistribution(); + // Normal procedure, but with unordered data channel config. + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + webrtc::DataChannelInit init; + init.ordered = false; + caller()->CreateDataChannel(&init); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + static constexpr int kNumMessages = 100; + // Deliberately chosen to be larger than the MTU so messages get fragmented. + static constexpr size_t kMaxMessageSize = 4096; + // Create and send random messages. + std::vector sent_messages; + for (int i = 0; i < kNumMessages; ++i) { + size_t length = + (rand() % kMaxMessageSize) + 1; // NOLINT (rand_r instead of rand) + std::string message; + ASSERT_TRUE(rtc::CreateRandomString(length, &message)); + caller()->data_channel()->Send(DataBuffer(message)); + callee()->data_channel()->Send(DataBuffer(message)); + sent_messages.push_back(message); + } + + // Wait for all messages to be received. + EXPECT_EQ_WAIT(rtc::checked_cast(kNumMessages), + caller()->data_observer()->received_message_count(), + kDefaultTimeout); + EXPECT_EQ_WAIT(rtc::checked_cast(kNumMessages), + callee()->data_observer()->received_message_count(), + kDefaultTimeout); + + // Sort and compare to make sure none of the messages were corrupted. + std::vector caller_received_messages; + absl::c_transform(caller()->data_observer()->messages(), + std::back_inserter(caller_received_messages), + [](const auto& a) { return a.data; }); + + std::vector callee_received_messages; + absl::c_transform(callee()->data_observer()->messages(), + std::back_inserter(callee_received_messages), + [](const auto& a) { return a.data; }); + + absl::c_sort(sent_messages); + absl::c_sort(caller_received_messages); + absl::c_sort(callee_received_messages); + EXPECT_EQ(sent_messages, caller_received_messages); + EXPECT_EQ(sent_messages, callee_received_messages); +} + +// This test sets up a call between two parties with audio, and video. When +// audio and video are setup and flowing, an SCTP data channel is negotiated. +TEST_P(DataChannelIntegrationTest, AddSctpDataChannelInSubsequentOffer) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Do initial offer/answer with audio/video. + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Create data channel and do new offer and answer. + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Caller data channel should already exist (it created one). Callee data + // channel may not exist yet, since negotiation happens in-band, not in SDP. + ASSERT_NE(nullptr, caller()->data_channel()); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + // Ensure data can be sent in both directions. + std::string data = "hello world"; + caller()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), + kDefaultTimeout); + callee()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), + kDefaultTimeout); +} + +// Set up a connection initially just using SCTP data channels, later upgrading +// to audio/video, ensuring frames are received end-to-end. Effectively the +// inverse of the test above. +// This was broken in M57; see https://crbug.com/711243 +TEST_P(DataChannelIntegrationTest, SctpDataChannelToAudioVideoUpgrade) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Do initial offer/answer with just data channel. + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Wait until data can be sent over the data channel. + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + // Do subsequent offer/answer with two-way audio and video. Audio and video + // should end up bundled on the DTLS/ICE transport already used for data. + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); +} + +static void MakeSpecCompliantSctpOffer(cricket::SessionDescription* desc) { + cricket::SctpDataContentDescription* dcd_offer = + GetFirstSctpDataContentDescription(desc); + // See https://crbug.com/webrtc/11211 - this function is a no-op + ASSERT_TRUE(dcd_offer); + dcd_offer->set_use_sctpmap(false); + dcd_offer->set_protocol("UDP/DTLS/SCTP"); +} + +// Test that the data channel works when a spec-compliant SCTP m= section is +// offered (using "a=sctp-port" instead of "a=sctpmap", and using +// "UDP/DTLS/SCTP" as the protocol). +TEST_P(DataChannelIntegrationTest, + DataChannelWorksWhenSpecCompliantSctpOfferReceived) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->SetGeneratedSdpMunger(MakeSpecCompliantSctpOffer); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); + EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + + // Ensure data can be sent in both directions. + std::string data = "hello world"; + caller()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), + kDefaultTimeout); + callee()->data_channel()->Send(DataBuffer(data)); + EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), + kDefaultTimeout); +} + +// Test that after closing PeerConnections, they stop sending any packets (ICE, +// DTLS, RTP...). +TEST_P(DataChannelIntegrationTest, ClosingConnectionStopsPacketFlow) { + // Set up audio/video/data, wait for some frames to be received. + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddAudioVideoTracks(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + // Close PeerConnections. + ClosePeerConnections(); + // Pump messages for a second, and ensure no new packets end up sent. + uint32_t sent_packets_a = virtual_socket_server()->sent_packets(); + WAIT(false, 1000); + uint32_t sent_packets_b = virtual_socket_server()->sent_packets(); + EXPECT_EQ(sent_packets_a, sent_packets_b); +} + +// Test that transport stats are generated by the RTCStatsCollector for a +// connection that only involves data channels. This is a regression test for +// crbug.com/826972. +TEST_P(DataChannelIntegrationTest, + TransportStatsReportedForDataChannelOnlyConnection) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + + auto caller_report = caller()->NewGetStats(); + EXPECT_EQ(1u, caller_report->GetStatsOfType().size()); + auto callee_report = callee()->NewGetStats(); + EXPECT_EQ(1u, callee_report->GetStatsOfType().size()); +} + +TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDeliveredInReliableMode) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + + caller()->data_channel()->Send(DataBuffer("hello first")); + ASSERT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + // Cause a temporary network outage + virtual_socket_server()->set_drop_probability(1.0); + for (int i = 1; i <= 10; i++) { + caller()->data_channel()->Send(DataBuffer("Sent while blocked")); + } + // Nothing should be delivered during outage. Short wait. + EXPECT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), 10); + // Reverse outage + virtual_socket_server()->set_drop_probability(0.0); + // All packets should be delivered. + EXPECT_EQ_WAIT(11u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); +} + +TEST_P(DataChannelIntegrationTest, QueuedPacketsGetDroppedInUnreliableMode) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + DataChannelInit init; + init.maxRetransmits = 0; + init.ordered = false; + caller()->CreateDataChannel(&init); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + caller()->data_channel()->Send(DataBuffer("hello first")); + ASSERT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + // Cause a temporary network outage + virtual_socket_server()->set_drop_probability(1.0); + // Send a few packets. Note that all get dropped only when all packets + // fit into the receiver receive window/congestion window, so that they + // actually get sent. + for (int i = 1; i <= 10; i++) { + caller()->data_channel()->Send(DataBuffer("Sent while blocked")); + } + // Nothing should be delivered during outage. + // We do a short wait to verify that delivery count is still 1. + WAIT(false, 10); + EXPECT_EQ(1u, callee()->data_observer()->received_message_count()); + // Reverse the network outage. + virtual_socket_server()->set_drop_probability(0.0); + // Send a new packet, and wait for it to be delivered. + caller()->data_channel()->Send(DataBuffer("After block")); + EXPECT_EQ_WAIT("After block", callee()->data_observer()->last_message(), + kDefaultTimeout); + // Some messages should be lost, but first and last message should have + // been delivered. + // First, check that the protocol guarantee is preserved. + EXPECT_GT(11u, callee()->data_observer()->received_message_count()); + EXPECT_LE(2u, callee()->data_observer()->received_message_count()); + // Then, check that observed behavior (lose all messages) has not changed + EXPECT_EQ(2u, callee()->data_observer()->received_message_count()); +} + +TEST_P(DataChannelIntegrationTest, + QueuedPacketsGetDroppedInLifetimeLimitedMode) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + DataChannelInit init; + init.maxRetransmitTime = 1; + init.ordered = false; + caller()->CreateDataChannel(&init); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + caller()->data_channel()->Send(DataBuffer("hello first")); + ASSERT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + // Cause a temporary network outage + virtual_socket_server()->set_drop_probability(1.0); + for (int i = 1; i <= 200; i++) { + caller()->data_channel()->Send(DataBuffer("Sent while blocked")); + } + // Nothing should be delivered during outage. + // We do a short wait to verify that delivery count is still 1, + // and to make sure max packet lifetime (which is in ms) is exceeded. + WAIT(false, 10); + EXPECT_EQ(1u, callee()->data_observer()->received_message_count()); + // Reverse the network outage. + virtual_socket_server()->set_drop_probability(0.0); + // Send a new packet, and wait for it to be delivered. + caller()->data_channel()->Send(DataBuffer("After block")); + EXPECT_EQ_WAIT("After block", callee()->data_observer()->last_message(), + kDefaultTimeout); + // Some messages should be lost, but first and last message should have + // been delivered. + // First, check that the protocol guarantee is preserved. + EXPECT_GT(202u, callee()->data_observer()->received_message_count()); + EXPECT_LE(2u, callee()->data_observer()->received_message_count()); + // Then, check that observed behavior (lose some messages) has not changed + if (webrtc::field_trial::IsEnabled("WebRTC-DataChannel-Dcsctp")) { + // DcSctp loses all messages. This is correct. + EXPECT_EQ(2u, callee()->data_observer()->received_message_count()); + } else { + // Usrsctp loses some messages, but keeps messages not attempted. + // THIS IS THE WRONG BEHAVIOR. According to discussion in + // https://github.com/sctplab/usrsctp/issues/584, all these packets + // should be discarded. + // TODO(bugs.webrtc.org/12731): Fix this. + EXPECT_EQ(90u, callee()->data_observer()->received_message_count()); + } +} + +TEST_P(DataChannelIntegrationTest, + SomeQueuedPacketsGetDroppedInMaxRetransmitsMode) { + CreatePeerConnectionWrappers(); + ConnectFakeSignaling(); + DataChannelInit init; + init.maxRetransmits = 0; + init.ordered = false; + caller()->CreateDataChannel(&init); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + caller()->data_channel()->Send(DataBuffer("hello first")); + ASSERT_EQ_WAIT(1u, callee()->data_observer()->received_message_count(), + kDefaultTimeout); + // Cause a temporary network outage + virtual_socket_server()->set_drop_probability(1.0); + // Fill the buffer until queued data starts to build + size_t packet_counter = 0; + while (caller()->data_channel()->buffered_amount() < 1 && + packet_counter < 10000) { + packet_counter++; + caller()->data_channel()->Send(DataBuffer("Sent while blocked")); + } + if (caller()->data_channel()->buffered_amount()) { + RTC_LOG(LS_INFO) << "Buffered data after " << packet_counter << " packets"; + } else { + RTC_LOG(LS_INFO) << "No buffered data after " << packet_counter + << " packets"; + } + // Nothing should be delivered during outage. + // We do a short wait to verify that delivery count is still 1. + WAIT(false, 10); + EXPECT_EQ(1u, callee()->data_observer()->received_message_count()); + // Reverse the network outage. + virtual_socket_server()->set_drop_probability(0.0); + // Send a new packet, and wait for it to be delivered. + caller()->data_channel()->Send(DataBuffer("After block")); + EXPECT_EQ_WAIT("After block", callee()->data_observer()->last_message(), + kDefaultTimeout); + // Some messages should be lost, but first and last message should have + // been delivered. + // Due to the fact that retransmissions are only counted when the packet + // goes on the wire, NOT when they are stalled in queue due to + // congestion, we expect some of the packets to be delivered, because + // congestion prevented them from being sent. + // Citation: https://tools.ietf.org/html/rfc7496#section-3.1 + + // First, check that the protocol guarantee is preserved. + EXPECT_GT(packet_counter, + callee()->data_observer()->received_message_count()); + EXPECT_LE(2u, callee()->data_observer()->received_message_count()); + // Then, check that observed behavior (lose between 100 and 200 messages) + // has not changed. + // Usrsctp behavior is different on Android (177) and other platforms (122). + // Dcsctp loses 432 packets. + EXPECT_GT(2 + packet_counter - 100, + callee()->data_observer()->received_message_count()); + EXPECT_LT(2 + packet_counter - 500, + callee()->data_observer()->received_message_count()); +} + +INSTANTIATE_TEST_SUITE_P( + DataChannelIntegrationTest, + DataChannelIntegrationTest, + Combine(Values(SdpSemantics::kPlanB, SdpSemantics::kUnifiedPlan), + Values("WebRTC-DataChannel-Dcsctp/Enabled/", + "WebRTC-DataChannel-Dcsctp/Disabled/"))); + +TEST_F(DataChannelIntegrationTestUnifiedPlan, + EndToEndCallWithBundledSctpDataChannel) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(caller()->pc()->GetSctpTransport(), kDefaultTimeout); + ASSERT_EQ_WAIT(SctpTransportState::kConnected, + caller()->pc()->GetSctpTransport()->Information().state(), + kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); +} + +TEST_F(DataChannelIntegrationTestUnifiedPlan, + EndToEndCallWithDataChannelOnlyConnects) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + ASSERT_TRUE(caller()->data_observer()->IsOpen()); +} + +TEST_F(DataChannelIntegrationTestUnifiedPlan, DataChannelClosesWhenClosed) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + caller()->data_channel()->Close(); + ASSERT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout); +} + +TEST_F(DataChannelIntegrationTestUnifiedPlan, + DataChannelClosesWhenClosedReverse) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + callee()->data_channel()->Close(); + ASSERT_TRUE_WAIT(!caller()->data_observer()->IsOpen(), kDefaultTimeout); +} + +TEST_F(DataChannelIntegrationTestUnifiedPlan, + DataChannelClosesWhenPeerConnectionClosed) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->CreateDataChannel(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); + ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + caller()->pc()->Close(); + ASSERT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout); +} + +#endif // WEBRTC_HAVE_SCTP + +} // namespace + +} // namespace webrtc diff --git a/pc/data_channel_unittest.cc b/pc/data_channel_unittest.cc index b29be338cb..770892cbe1 100644 --- a/pc/data_channel_unittest.cc +++ b/pc/data_channel_unittest.cc @@ -8,20 +8,21 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "pc/data_channel.h" - #include #include #include +#include "media/sctp/sctp_transport_internal.h" +#include "pc/sctp_data_channel.h" #include "pc/sctp_utils.h" #include "pc/test/fake_data_channel_provider.h" #include "rtc_base/gunit.h" #include "rtc_base/numerics/safe_conversions.h" #include "test/gtest.h" -using webrtc::DataChannel; +using webrtc::DataChannelInterface; +using webrtc::SctpDataChannel; using webrtc::SctpSidAllocator; static constexpr int kDefaultTimeout = 10000; @@ -64,14 +65,16 @@ class FakeDataChannelObserver : public webrtc::DataChannelObserver { // TODO(deadbeef): The fact that these tests use a fake provider makes them not // too valuable. Should rewrite using the // peerconnection_datachannel_unittest.cc infrastructure. +// TODO(bugs.webrtc.org/11547): Incorporate a dedicated network thread. class SctpDataChannelTest : public ::testing::Test { protected: SctpDataChannelTest() : provider_(new FakeDataChannelProvider()), - webrtc_data_channel_(DataChannel::Create(provider_.get(), - cricket::DCT_SCTP, - "test", - init_)) {} + webrtc_data_channel_(SctpDataChannel::Create(provider_.get(), + "test", + init_, + rtc::Thread::Current(), + rtc::Thread::Current())) {} void SetChannelReady() { provider_->set_transport_available(true); @@ -90,7 +93,7 @@ class SctpDataChannelTest : public ::testing::Test { webrtc::InternalDataChannelInit init_; std::unique_ptr provider_; std::unique_ptr observer_; - rtc::scoped_refptr webrtc_data_channel_; + rtc::scoped_refptr webrtc_data_channel_; }; class StateSignalsListener : public sigslot::has_slots<> { @@ -98,9 +101,9 @@ class StateSignalsListener : public sigslot::has_slots<> { int opened_count() const { return opened_count_; } int closed_count() const { return closed_count_; } - void OnSignalOpened(DataChannel* data_channel) { ++opened_count_; } + void OnSignalOpened(DataChannelInterface* data_channel) { ++opened_count_; } - void OnSignalClosed(DataChannel* data_channel) { ++closed_count_; } + void OnSignalClosed(DataChannelInterface* data_channel) { ++closed_count_; } private: int opened_count_ = 0; @@ -110,8 +113,9 @@ class StateSignalsListener : public sigslot::has_slots<> { // Verifies that the data channel is connected to the transport after creation. TEST_F(SctpDataChannelTest, ConnectedToTransportOnCreated) { provider_->set_transport_available(true); - rtc::scoped_refptr dc = - DataChannel::Create(provider_.get(), cricket::DCT_SCTP, "test1", init_); + rtc::scoped_refptr dc = + SctpDataChannel::Create(provider_.get(), "test1", init_, + rtc::Thread::Current(), rtc::Thread::Current()); EXPECT_TRUE(provider_->IsConnected(dc.get())); // The sid is not set yet, so it should not have added the streams. @@ -283,9 +287,9 @@ TEST_F(SctpDataChannelTest, OpenMessageSent) { SetChannelReady(); EXPECT_GE(webrtc_data_channel_->id(), 0); - EXPECT_EQ(cricket::DMT_CONTROL, provider_->last_send_data_params().type); - EXPECT_EQ(provider_->last_send_data_params().ssrc, - static_cast(webrtc_data_channel_->id())); + EXPECT_EQ(webrtc::DataMessageType::kControl, + provider_->last_send_data_params().type); + EXPECT_EQ(provider_->last_sid(), webrtc_data_channel_->id()); } TEST_F(SctpDataChannelTest, QueuedOpenMessageSent) { @@ -293,9 +297,9 @@ TEST_F(SctpDataChannelTest, QueuedOpenMessageSent) { SetChannelReady(); provider_->set_send_blocked(false); - EXPECT_EQ(cricket::DMT_CONTROL, provider_->last_send_data_params().type); - EXPECT_EQ(provider_->last_send_data_params().ssrc, - static_cast(webrtc_data_channel_->id())); + EXPECT_EQ(webrtc::DataMessageType::kControl, + provider_->last_send_data_params().type); + EXPECT_EQ(provider_->last_sid(), webrtc_data_channel_->id()); } // Tests that the DataChannel created after transport gets ready can enter OPEN @@ -304,8 +308,9 @@ TEST_F(SctpDataChannelTest, LateCreatedChannelTransitionToOpen) { SetChannelReady(); webrtc::InternalDataChannelInit init; init.id = 1; - rtc::scoped_refptr dc = - DataChannel::Create(provider_.get(), cricket::DCT_SCTP, "test1", init); + rtc::scoped_refptr dc = + SctpDataChannel::Create(provider_.get(), "test1", init, + rtc::Thread::Current(), rtc::Thread::Current()); EXPECT_EQ(webrtc::DataChannelInterface::kConnecting, dc->state()); EXPECT_TRUE_WAIT(webrtc::DataChannelInterface::kOpen == dc->state(), 1000); } @@ -317,8 +322,9 @@ TEST_F(SctpDataChannelTest, SendUnorderedAfterReceivesOpenAck) { webrtc::InternalDataChannelInit init; init.id = 1; init.ordered = false; - rtc::scoped_refptr dc = - DataChannel::Create(provider_.get(), cricket::DCT_SCTP, "test1", init); + rtc::scoped_refptr dc = + SctpDataChannel::Create(provider_.get(), "test1", init, + rtc::Thread::Current(), rtc::Thread::Current()); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kOpen, dc->state(), 1000); @@ -329,8 +335,8 @@ TEST_F(SctpDataChannelTest, SendUnorderedAfterReceivesOpenAck) { // Emulates receiving an OPEN_ACK message. cricket::ReceiveDataParams params; - params.ssrc = init.id; - params.type = cricket::DMT_CONTROL; + params.sid = init.id; + params.type = webrtc::DataMessageType::kControl; rtc::CopyOnWriteBuffer payload; webrtc::WriteDataChannelOpenAckMessage(&payload); dc->OnDataReceived(params, payload); @@ -347,15 +353,16 @@ TEST_F(SctpDataChannelTest, SendUnorderedAfterReceiveData) { webrtc::InternalDataChannelInit init; init.id = 1; init.ordered = false; - rtc::scoped_refptr dc = - DataChannel::Create(provider_.get(), cricket::DCT_SCTP, "test1", init); + rtc::scoped_refptr dc = + SctpDataChannel::Create(provider_.get(), "test1", init, + rtc::Thread::Current(), rtc::Thread::Current()); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kOpen, dc->state(), 1000); // Emulates receiving a DATA message. cricket::ReceiveDataParams params; - params.ssrc = init.id; - params.type = cricket::DMT_TEXT; + params.sid = init.id; + params.type = webrtc::DataMessageType::kText; webrtc::DataBuffer buffer("data"); dc->OnDataReceived(params, buffer.data); @@ -376,7 +383,8 @@ TEST_F(SctpDataChannelTest, OpenWaitsForOpenMesssage) { provider_->set_send_blocked(false); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kOpen, webrtc_data_channel_->state(), 1000); - EXPECT_EQ(cricket::DMT_CONTROL, provider_->last_send_data_params().type); + EXPECT_EQ(webrtc::DataMessageType::kControl, + provider_->last_send_data_params().type); } // Tests that close first makes sure all queued data gets sent. @@ -397,42 +405,43 @@ TEST_F(SctpDataChannelTest, QueuedCloseFlushes) { EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kClosed, webrtc_data_channel_->state(), 1000); EXPECT_TRUE(webrtc_data_channel_->error().ok()); - EXPECT_EQ(cricket::DMT_TEXT, provider_->last_send_data_params().type); + EXPECT_EQ(webrtc::DataMessageType::kText, + provider_->last_send_data_params().type); } -// Tests that messages are sent with the right ssrc. -TEST_F(SctpDataChannelTest, SendDataSsrc) { +// Tests that messages are sent with the right id. +TEST_F(SctpDataChannelTest, SendDataId) { webrtc_data_channel_->SetSctpSid(1); SetChannelReady(); webrtc::DataBuffer buffer("data"); EXPECT_TRUE(webrtc_data_channel_->Send(buffer)); - EXPECT_EQ(1U, provider_->last_send_data_params().ssrc); + EXPECT_EQ(1, provider_->last_sid()); } -// Tests that the incoming messages with wrong ssrcs are rejected. -TEST_F(SctpDataChannelTest, ReceiveDataWithInvalidSsrc) { +// Tests that the incoming messages with wrong ids are rejected. +TEST_F(SctpDataChannelTest, ReceiveDataWithInvalidId) { webrtc_data_channel_->SetSctpSid(1); SetChannelReady(); AddObserver(); cricket::ReceiveDataParams params; - params.ssrc = 0; + params.sid = 0; webrtc::DataBuffer buffer("abcd"); webrtc_data_channel_->OnDataReceived(params, buffer.data); EXPECT_EQ(0U, observer_->messages_received()); } -// Tests that the incoming messages with right ssrcs are acceted. -TEST_F(SctpDataChannelTest, ReceiveDataWithValidSsrc) { +// Tests that the incoming messages with right ids are accepted. +TEST_F(SctpDataChannelTest, ReceiveDataWithValidId) { webrtc_data_channel_->SetSctpSid(1); SetChannelReady(); AddObserver(); cricket::ReceiveDataParams params; - params.ssrc = 1; + params.sid = 1; webrtc::DataBuffer buffer("abcd"); webrtc_data_channel_->OnDataReceived(params, buffer.data); @@ -448,11 +457,12 @@ TEST_F(SctpDataChannelTest, NoMsgSentIfNegotiatedAndNotFromOpenMsg) { config.open_handshake_role = webrtc::InternalDataChannelInit::kNone; SetChannelReady(); - rtc::scoped_refptr dc = - DataChannel::Create(provider_.get(), cricket::DCT_SCTP, "test1", config); + rtc::scoped_refptr dc = + SctpDataChannel::Create(provider_.get(), "test1", config, + rtc::Thread::Current(), rtc::Thread::Current()); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kOpen, dc->state(), 1000); - EXPECT_EQ(0U, provider_->last_send_data_params().ssrc); + EXPECT_EQ(0, provider_->last_sid()); } // Tests that DataChannel::messages_received() and DataChannel::bytes_received() @@ -470,7 +480,7 @@ TEST_F(SctpDataChannelTest, VerifyMessagesAndBytesReceived) { webrtc_data_channel_->SetSctpSid(1); cricket::ReceiveDataParams params; - params.ssrc = 1; + params.sid = 1; // Default values. EXPECT_EQ(0U, webrtc_data_channel_->messages_received()); @@ -511,14 +521,15 @@ TEST_F(SctpDataChannelTest, OpenAckSentIfCreatedFromOpenMessage) { config.open_handshake_role = webrtc::InternalDataChannelInit::kAcker; SetChannelReady(); - rtc::scoped_refptr dc = - DataChannel::Create(provider_.get(), cricket::DCT_SCTP, "test1", config); + rtc::scoped_refptr dc = + SctpDataChannel::Create(provider_.get(), "test1", config, + rtc::Thread::Current(), rtc::Thread::Current()); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kOpen, dc->state(), 1000); - EXPECT_EQ(static_cast(config.id), - provider_->last_send_data_params().ssrc); - EXPECT_EQ(cricket::DMT_CONTROL, provider_->last_send_data_params().type); + EXPECT_EQ(config.id, provider_->last_sid()); + EXPECT_EQ(webrtc::DataMessageType::kControl, + provider_->last_send_data_params().type); } // Tests the OPEN_ACK role assigned by InternalDataChannelInit. @@ -538,7 +549,7 @@ TEST_F(SctpDataChannelTest, ClosedWhenSendBufferFull) { SetChannelReady(); rtc::CopyOnWriteBuffer buffer(1024); - memset(buffer.data(), 0, buffer.size()); + memset(buffer.MutableData(), 0, buffer.size()); webrtc::DataBuffer packet(buffer, true); provider_->set_send_blocked(true); @@ -573,10 +584,10 @@ TEST_F(SctpDataChannelTest, ClosedOnTransportError) { TEST_F(SctpDataChannelTest, ClosedWhenReceivedBufferFull) { SetChannelReady(); rtc::CopyOnWriteBuffer buffer(1024); - memset(buffer.data(), 0, buffer.size()); + memset(buffer.MutableData(), 0, buffer.size()); cricket::ReceiveDataParams params; - params.ssrc = 0; + params.sid = 0; // Receiving data without having an observer will overflow the buffer. for (size_t i = 0; i < 16 * 1024 + 1; ++i) { @@ -615,7 +626,7 @@ TEST_F(SctpDataChannelTest, TransportDestroyedWhileDataBuffered) { SetChannelReady(); rtc::CopyOnWriteBuffer buffer(1024); - memset(buffer.data(), 0, buffer.size()); + memset(buffer.MutableData(), 0, buffer.size()); webrtc::DataBuffer packet(buffer, true); // Send a packet while sending is blocked so it ends up buffered. @@ -625,7 +636,31 @@ TEST_F(SctpDataChannelTest, TransportDestroyedWhileDataBuffered) { // Tell the data channel that its transport is being destroyed. // It should then stop using the transport (allowing us to delete it) and // transition to the "closed" state. - webrtc_data_channel_->OnTransportChannelClosed(); + webrtc::RTCError error(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, ""); + error.set_error_detail(webrtc::RTCErrorDetailType::SCTP_FAILURE); + webrtc_data_channel_->OnTransportChannelClosed(error); + provider_.reset(nullptr); + EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kClosed, + webrtc_data_channel_->state(), kDefaultTimeout); + EXPECT_FALSE(webrtc_data_channel_->error().ok()); + EXPECT_EQ(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, + webrtc_data_channel_->error().type()); + EXPECT_EQ(webrtc::RTCErrorDetailType::SCTP_FAILURE, + webrtc_data_channel_->error().error_detail()); +} + +TEST_F(SctpDataChannelTest, TransportGotErrorCode) { + SetChannelReady(); + + // Tell the data channel that its transport is being destroyed with an + // error code. + // It should then report that error code. + webrtc::RTCError error(webrtc::RTCErrorType::OPERATION_ERROR_WITH_DATA, + "Transport channel closed"); + error.set_error_detail(webrtc::RTCErrorDetailType::SCTP_FAILURE); + error.set_sctp_cause_code( + static_cast(cricket::SctpErrorCauseCode::kProtocolViolation)); + webrtc_data_channel_->OnTransportChannelClosed(error); provider_.reset(nullptr); EXPECT_EQ_WAIT(webrtc::DataChannelInterface::kClosed, webrtc_data_channel_->state(), kDefaultTimeout); @@ -634,6 +669,9 @@ TEST_F(SctpDataChannelTest, TransportDestroyedWhileDataBuffered) { webrtc_data_channel_->error().type()); EXPECT_EQ(webrtc::RTCErrorDetailType::SCTP_FAILURE, webrtc_data_channel_->error().error_detail()); + EXPECT_EQ( + static_cast(cricket::SctpErrorCauseCode::kProtocolViolation), + webrtc_data_channel_->error().sctp_cause_code()); } class SctpSidAllocatorTest : public ::testing::Test { diff --git a/pc/data_channel_utils.cc b/pc/data_channel_utils.cc new file mode 100644 index 0000000000..a772241c3e --- /dev/null +++ b/pc/data_channel_utils.cc @@ -0,0 +1,54 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/data_channel_utils.h" + +#include + +#include "rtc_base/checks.h" + +namespace webrtc { + +bool PacketQueue::Empty() const { + return packets_.empty(); +} + +std::unique_ptr PacketQueue::PopFront() { + RTC_DCHECK(!packets_.empty()); + byte_count_ -= packets_.front()->size(); + std::unique_ptr packet = std::move(packets_.front()); + packets_.pop_front(); + return packet; +} + +void PacketQueue::PushFront(std::unique_ptr packet) { + byte_count_ += packet->size(); + packets_.push_front(std::move(packet)); +} + +void PacketQueue::PushBack(std::unique_ptr packet) { + byte_count_ += packet->size(); + packets_.push_back(std::move(packet)); +} + +void PacketQueue::Clear() { + packets_.clear(); + byte_count_ = 0; +} + +void PacketQueue::Swap(PacketQueue* other) { + size_t other_byte_count = other->byte_count_; + other->byte_count_ = byte_count_; + byte_count_ = other_byte_count; + + other->packets_.swap(packets_); +} + +} // namespace webrtc diff --git a/pc/data_channel_utils.h b/pc/data_channel_utils.h new file mode 100644 index 0000000000..85cacdb563 --- /dev/null +++ b/pc/data_channel_utils.h @@ -0,0 +1,62 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_DATA_CHANNEL_UTILS_H_ +#define PC_DATA_CHANNEL_UTILS_H_ + +#include +#include +#include +#include +#include +#include + +#include "api/data_channel_interface.h" +#include "media/base/media_engine.h" + +namespace webrtc { + +// A packet queue which tracks the total queued bytes. Queued packets are +// owned by this class. +class PacketQueue final { + public: + size_t byte_count() const { return byte_count_; } + + bool Empty() const; + + std::unique_ptr PopFront(); + + void PushFront(std::unique_ptr packet); + void PushBack(std::unique_ptr packet); + + void Clear(); + + void Swap(PacketQueue* other); + + private: + std::deque> packets_; + size_t byte_count_ = 0; +}; + +struct DataChannelStats { + int internal_id; + int id; + std::string label; + std::string protocol; + DataChannelInterface::DataState state; + uint32_t messages_sent; + uint32_t messages_received; + uint64_t bytes_sent; + uint64_t bytes_received; +}; + +} // namespace webrtc + +#endif // PC_DATA_CHANNEL_UTILS_H_ diff --git a/pc/datagram_rtp_transport.cc b/pc/datagram_rtp_transport.cc deleted file mode 100644 index ad1e6dc995..0000000000 --- a/pc/datagram_rtp_transport.cc +++ /dev/null @@ -1,380 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "pc/datagram_rtp_transport.h" - -#include -#include -#include - -#include "absl/memory/memory.h" -#include "absl/strings/string_view.h" -#include "absl/types/optional.h" -#include "api/array_view.h" -#include "api/rtc_error.h" -#include "media/base/rtp_utils.h" -#include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" -#include "modules/rtp_rtcp/source/rtp_packet.h" -#include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "p2p/base/dtls_transport_internal.h" -#include "p2p/base/packet_transport_internal.h" -#include "rtc_base/buffer.h" -#include "rtc_base/checks.h" -#include "rtc_base/dscp.h" -#include "rtc_base/logging.h" -#include "rtc_base/rtc_certificate.h" -#include "rtc_base/ssl_stream_adapter.h" -#include "rtc_base/stream.h" -#include "rtc_base/thread.h" -#include "system_wrappers/include/field_trial.h" - -namespace webrtc { - -namespace { - -// Field trials. -// Disable datagram to RTCP feedback translation and enable RTCP feedback loop -// on top of datagram feedback loop. Note that two -// feedback loops add unneccesary overhead, so it's preferable to use feedback -// loop provided by datagram transport and convert datagram ACKs to RTCP ACKs, -// but enabling RTCP feedback loop may be useful in tests and experiments. -const char kDisableDatagramToRtcpFeebackTranslationFieldTrial[] = - "WebRTC-kDisableDatagramToRtcpFeebackTranslation"; - -} // namespace - -// Maximum packet size of RTCP feedback packet for allocation. We re-create RTCP -// feedback packets when we get ACK notifications from datagram transport. Our -// rtcp feedback packets contain only 1 ACK, so they are much smaller than 1250. -constexpr size_t kMaxRtcpFeedbackPacketSize = 1250; - -DatagramRtpTransport::DatagramRtpTransport( - const std::vector& rtp_header_extensions, - cricket::IceTransportInternal* ice_transport, - DatagramTransportInterface* datagram_transport) - : ice_transport_(ice_transport), - datagram_transport_(datagram_transport), - disable_datagram_to_rtcp_feeback_translation_(field_trial::IsEnabled( - kDisableDatagramToRtcpFeebackTranslationFieldTrial)) { - // Save extension map for parsing RTP packets (we only need transport - // sequence numbers). - const RtpExtension* transport_sequence_number_extension = - RtpExtension::FindHeaderExtensionByUri(rtp_header_extensions, - TransportSequenceNumber::kUri); - - if (transport_sequence_number_extension != nullptr) { - rtp_header_extension_map_.Register( - transport_sequence_number_extension->id); - } else { - RTC_LOG(LS_ERROR) << "Transport sequence numbers are not supported in " - "datagram transport connection"; - } - - RTC_DCHECK(ice_transport_); - RTC_DCHECK(datagram_transport_); - - ice_transport_->SignalNetworkRouteChanged.connect( - this, &DatagramRtpTransport::OnNetworkRouteChanged); - // Subscribe to DatagramTransport to read incoming packets. - datagram_transport_->SetDatagramSink(this); - datagram_transport_->SetTransportStateCallback(this); -} - -DatagramRtpTransport::~DatagramRtpTransport() { - // Unsubscribe from DatagramTransport sinks. - datagram_transport_->SetDatagramSink(nullptr); - datagram_transport_->SetTransportStateCallback(nullptr); -} - -bool DatagramRtpTransport::SendRtpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - // Assign and increment datagram_id. - const DatagramId datagram_id = current_datagram_id_++; - - // Send as is (without extracting transport sequence number) for - // RTP packets if we are not doing datagram => RTCP feedback translation. - if (disable_datagram_to_rtcp_feeback_translation_) { - // Even if we are not extracting transport sequence number we need to - // propagate "Sent" notification for both RTP and RTCP packets. For this - // reason we need save options.packet_id in packet map. - sent_rtp_packet_map_[datagram_id] = SentPacketInfo(options.packet_id); - - return SendDatagram(*packet, datagram_id); - } - - // Parse RTP packet. - RtpPacket rtp_packet(&rtp_header_extension_map_); - // TODO(mellem): Verify that this doesn't mangle something (it shouldn't). - if (!rtp_packet.Parse(*packet)) { - RTC_NOTREACHED() << "Failed to parse outgoing RtpPacket, len=" - << packet->size() - << ", options.packet_id=" << options.packet_id; - return -1; - } - - // Try to get transport sequence number. - uint16_t transport_senquence_number; - if (!rtp_packet.GetExtension( - &transport_senquence_number)) { - // Save packet info without transport sequence number. - sent_rtp_packet_map_[datagram_id] = SentPacketInfo(options.packet_id); - - RTC_LOG(LS_VERBOSE) - << "Sending rtp packet without transport sequence number, packet=" - << rtp_packet.ToString(); - - return SendDatagram(*packet, datagram_id); - } - - // Save packet info with sequence number and ssrc so we could reconstruct - // RTCP feedback packet when we receive datagram ACK. - sent_rtp_packet_map_[datagram_id] = SentPacketInfo( - options.packet_id, rtp_packet.Ssrc(), transport_senquence_number); - - // Since datagram transport provides feedback and timestamps, we do not need - // to send transport sequence number, so we remove it from RTP packet. Later - // when we get Ack for sent datagram, we will re-create RTCP feedback packet. - if (!rtp_packet.RemoveExtension(TransportSequenceNumber::kId)) { - RTC_NOTREACHED() << "Failed to remove transport sequence number, packet=" - << rtp_packet.ToString(); - return -1; - } - - RTC_LOG(LS_VERBOSE) << "Removed transport_senquence_number=" - << transport_senquence_number - << " from packet=" << rtp_packet.ToString() - << ", saved bytes=" << packet->size() - rtp_packet.size(); - - return SendDatagram( - rtc::ArrayView(rtp_packet.data(), rtp_packet.size()), - datagram_id); -} - -bool DatagramRtpTransport::SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - // Assign and increment datagram_id. - const DatagramId datagram_id = current_datagram_id_++; - - // Even if we are not extracting transport sequence number we need to - // propagate "Sent" notification for both RTP and RTCP packets. For this - // reason we need save options.packet_id in packet map. - sent_rtp_packet_map_[datagram_id] = SentPacketInfo(options.packet_id); - return SendDatagram(*packet, datagram_id); -} - -bool DatagramRtpTransport::SendDatagram(rtc::ArrayView data, - DatagramId datagram_id) { - return datagram_transport_->SendDatagram(data, datagram_id).ok(); -} - -void DatagramRtpTransport::OnDatagramReceived( - rtc::ArrayView data) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - rtc::ArrayView cdata(reinterpret_cast(data.data()), - data.size()); - if (cricket::InferRtpPacketType(cdata) == cricket::RtpPacketType::kRtcp) { - rtc::CopyOnWriteBuffer buffer(data.data(), data.size()); - SignalRtcpPacketReceived(&buffer, /*packet_time_us=*/-1); - return; - } - - // TODO(sukhanov): I am not filling out time, but on my video quality - // test in WebRTC the time was not set either and higher layers of the stack - // overwrite -1 with current current rtc time. Leaveing comment for now to - // make sure it works as expected. - RtpPacketReceived parsed_packet(&rtp_header_extension_map_); - if (!parsed_packet.Parse(data)) { - RTC_LOG(LS_ERROR) << "Failed to parse incoming RTP packet"; - return; - } - if (!rtp_demuxer_.OnRtpPacket(parsed_packet)) { - RTC_LOG(LS_WARNING) << "Failed to demux RTP packet: " - << RtpDemuxer::DescribePacket(parsed_packet); - } -} - -void DatagramRtpTransport::OnDatagramSent(DatagramId datagram_id) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - // Find packet_id and propagate OnPacketSent notification. - const auto& it = sent_rtp_packet_map_.find(datagram_id); - if (it == sent_rtp_packet_map_.end()) { - RTC_NOTREACHED() << "Did not find sent packet info for sent datagram_id=" - << datagram_id; - return; - } - - // Also see how DatagramRtpTransport::OnSentPacket handles OnSentPacket - // notification from ICE in bypass mode. - rtc::SentPacket sent_packet(/*packet_id=*/it->second.packet_id, - rtc::TimeMillis()); - - SignalSentPacket(sent_packet); -} - -bool DatagramRtpTransport::GetAndRemoveSentPacketInfo( - DatagramId datagram_id, - SentPacketInfo* sent_packet_info) { - RTC_CHECK(sent_packet_info != nullptr); - - const auto& it = sent_rtp_packet_map_.find(datagram_id); - if (it == sent_rtp_packet_map_.end()) { - return false; - } - - *sent_packet_info = it->second; - sent_rtp_packet_map_.erase(it); - return true; -} - -void DatagramRtpTransport::OnDatagramAcked(const DatagramAck& ack) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - SentPacketInfo sent_packet_info; - if (!GetAndRemoveSentPacketInfo(ack.datagram_id, &sent_packet_info)) { - // TODO(sukhanov): If OnDatagramAck() can come after OnDatagramLost(), - // datagram_id is already deleted and we may need to relax the CHECK below. - // It's probably OK to ignore such datagrams, because it's been a few RTTs - // anyway since they were sent. - RTC_NOTREACHED() << "Did not find sent packet info for datagram_id=" - << ack.datagram_id; - return; - } - - RTC_LOG(LS_VERBOSE) << "Datagram acked, ack.datagram_id=" << ack.datagram_id - << ", sent_packet_info.packet_id=" - << sent_packet_info.packet_id - << ", sent_packet_info.transport_sequence_number=" - << sent_packet_info.transport_sequence_number.value_or(-1) - << ", sent_packet_info.ssrc=" - << sent_packet_info.ssrc.value_or(-1) - << ", receive_timestamp_ms=" - << ack.receive_timestamp.ms(); - - // If transport sequence number was not present in RTP packet, we do not need - // to propagate RTCP feedback. - if (!sent_packet_info.transport_sequence_number) { - return; - } - - // TODO(sukhanov): We noticed that datagram transport implementations can - // return zero timestamps in the middle of the call. This is workaround to - // avoid propagating zero timestamps, but we need to understand why we have - // them in the first place. - int64_t receive_timestamp_us = ack.receive_timestamp.us(); - - if (receive_timestamp_us == 0) { - receive_timestamp_us = previous_nonzero_timestamp_us_; - } else { - previous_nonzero_timestamp_us_ = receive_timestamp_us; - } - - // Ssrc must be provided in packet info if transport sequence number is set, - // which is guaranteed by SentPacketInfo constructor. - RTC_CHECK(sent_packet_info.ssrc); - - // Recreate RTCP feedback packet. - rtcp::TransportFeedback feedback_packet; - feedback_packet.SetMediaSsrc(*sent_packet_info.ssrc); - - const uint16_t transport_sequence_number = - sent_packet_info.transport_sequence_number.value(); - - feedback_packet.SetBase(transport_sequence_number, receive_timestamp_us); - feedback_packet.AddReceivedPacket(transport_sequence_number, - receive_timestamp_us); - - rtc::CopyOnWriteBuffer buffer(kMaxRtcpFeedbackPacketSize); - size_t index = 0; - if (!feedback_packet.Create(buffer.data(), &index, buffer.capacity(), - nullptr)) { - RTC_NOTREACHED() << "Failed to create RTCP feedback packet"; - return; - } - - RTC_CHECK_GT(index, 0); - RTC_CHECK_LE(index, kMaxRtcpFeedbackPacketSize); - - // Propagage created RTCP packet as normal incoming packet. - buffer.SetSize(index); - SignalRtcpPacketReceived(&buffer, /*packet_time_us=*/-1); -} - -void DatagramRtpTransport::OnDatagramLost(DatagramId datagram_id) { - RTC_DCHECK_RUN_ON(&thread_checker_); - - RTC_LOG(LS_INFO) << "Datagram lost, datagram_id=" << datagram_id; - - SentPacketInfo sent_packet_info; - if (!GetAndRemoveSentPacketInfo(datagram_id, &sent_packet_info)) { - RTC_NOTREACHED() << "Did not find sent packet info for lost datagram_id=" - << datagram_id; - } -} - -void DatagramRtpTransport::OnStateChanged(MediaTransportState state) { - state_ = state; - SignalWritableState(state_ == MediaTransportState::kWritable); - if (state_ == MediaTransportState::kWritable) { - SignalReadyToSend(true); - } -} - -const std::string& DatagramRtpTransport::transport_name() const { - return ice_transport_->transport_name(); -} - -int DatagramRtpTransport::SetRtpOption(rtc::Socket::Option opt, int value) { - return ice_transport_->SetOption(opt, value); -} - -int DatagramRtpTransport::SetRtcpOption(rtc::Socket::Option opt, int value) { - return -1; -} - -bool DatagramRtpTransport::IsReadyToSend() const { - return state_ == MediaTransportState::kWritable; -} - -bool DatagramRtpTransport::IsWritable(bool /*rtcp*/) const { - return state_ == MediaTransportState::kWritable; -} - -void DatagramRtpTransport::UpdateRtpHeaderExtensionMap( - const cricket::RtpHeaderExtensions& header_extensions) { - rtp_header_extension_map_ = RtpHeaderExtensionMap(header_extensions); -} - -bool DatagramRtpTransport::RegisterRtpDemuxerSink( - const RtpDemuxerCriteria& criteria, - RtpPacketSinkInterface* sink) { - rtp_demuxer_.RemoveSink(sink); - return rtp_demuxer_.AddSink(criteria, sink); -} - -bool DatagramRtpTransport::UnregisterRtpDemuxerSink( - RtpPacketSinkInterface* sink) { - return rtp_demuxer_.RemoveSink(sink); -} - -void DatagramRtpTransport::OnNetworkRouteChanged( - absl::optional network_route) { - RTC_DCHECK_RUN_ON(&thread_checker_); - SignalNetworkRouteChanged(network_route); -} - -} // namespace webrtc diff --git a/pc/datagram_rtp_transport.h b/pc/datagram_rtp_transport.h deleted file mode 100644 index f9684c69c0..0000000000 --- a/pc/datagram_rtp_transport.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_DATAGRAM_RTP_TRANSPORT_H_ -#define PC_DATAGRAM_RTP_TRANSPORT_H_ - -#include -#include -#include -#include - -#include "api/crypto/crypto_options.h" -#include "api/transport/datagram_transport_interface.h" -#include "api/transport/media/media_transport_interface.h" -#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "modules/rtp_rtcp/source/rtp_header_extensions.h" -#include "p2p/base/ice_transport_internal.h" -#include "p2p/base/packet_transport_internal.h" -#include "pc/rtp_transport_internal.h" -#include "rtc_base/buffer.h" -#include "rtc_base/buffer_queue.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/ssl_stream_adapter.h" -#include "rtc_base/stream.h" -#include "rtc_base/strings/string_builder.h" -#include "rtc_base/thread_checker.h" - -namespace webrtc { - -constexpr int kDatagramDtlsAdaptorComponent = -1; - -// RTP transport which uses the DatagramTransportInterface to send and receive -// packets. -class DatagramRtpTransport : public RtpTransportInternal, - public webrtc::DatagramSinkInterface, - public webrtc::MediaTransportStateCallback { - public: - DatagramRtpTransport( - const std::vector& rtp_header_extensions, - cricket::IceTransportInternal* ice_transport, - DatagramTransportInterface* datagram_transport); - - ~DatagramRtpTransport() override; - - // ===================================================== - // Overrides for webrtc::DatagramTransportSinkInterface - // and MediaTransportStateCallback - // ===================================================== - void OnDatagramReceived(rtc::ArrayView data) override; - - void OnDatagramSent(webrtc::DatagramId datagram_id) override; - - void OnDatagramAcked(const webrtc::DatagramAck& ack) override; - - void OnDatagramLost(webrtc::DatagramId datagram_id) override; - - void OnStateChanged(webrtc::MediaTransportState state) override; - - // ===================================================== - // RtpTransportInternal overrides - // ===================================================== - bool SendRtpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) override; - - bool SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, - const rtc::PacketOptions& options, - int flags) override; - - const std::string& transport_name() const override; - - // Datagram transport always muxes RTCP. - bool rtcp_mux_enabled() const override { return true; } - void SetRtcpMuxEnabled(bool enable) override {} - - int SetRtpOption(rtc::Socket::Option opt, int value) override; - int SetRtcpOption(rtc::Socket::Option opt, int value) override; - - bool IsReadyToSend() const override; - - bool IsWritable(bool rtcp) const override; - - bool IsSrtpActive() const override { return false; } - - void UpdateRtpHeaderExtensionMap( - const cricket::RtpHeaderExtensions& header_extensions) override; - - bool RegisterRtpDemuxerSink(const RtpDemuxerCriteria& criteria, - RtpPacketSinkInterface* sink) override; - - bool UnregisterRtpDemuxerSink(RtpPacketSinkInterface* sink) override; - - private: - // RTP/RTCP packet info stored for each sent packet. - struct SentPacketInfo { - // RTP packet info with ssrc and transport sequence number. - SentPacketInfo(int64_t packet_id, - uint32_t ssrc, - uint16_t transport_sequence_number) - : ssrc(ssrc), - transport_sequence_number(transport_sequence_number), - packet_id(packet_id) {} - - // Packet info without SSRC and transport sequence number used for RTCP - // packets, RTP packets when transport sequence number is not provided or - // when feedback translation is disabled. - explicit SentPacketInfo(int64_t packet_id) : packet_id(packet_id) {} - - SentPacketInfo() = default; - - absl::optional ssrc; - - // Transport sequence number (if it was provided in outgoing RTP packet). - // It is used to re-create RTCP feedback packets from datagram ACKs. - absl::optional transport_sequence_number; - - // Packet id from rtc::PacketOptions. It is required to propagage sent - // notification up the stack (SignalSentPacket). - int64_t packet_id = 0; - }; - - // Finds SentPacketInfo for given |datagram_id| and removes map entry. - // Returns false if entry was not found. - bool GetAndRemoveSentPacketInfo(webrtc::DatagramId datagram_id, - SentPacketInfo* sent_packet_info); - - // Sends datagram to datagram_transport. - bool SendDatagram(rtc::ArrayView data, - webrtc::DatagramId datagram_id); - - // Propagates network route changes from ICE. - void OnNetworkRouteChanged(absl::optional network_route); - - rtc::ThreadChecker thread_checker_; - cricket::IceTransportInternal* ice_transport_; - webrtc::DatagramTransportInterface* datagram_transport_; - - RtpDemuxer rtp_demuxer_; - - MediaTransportState state_ = MediaTransportState::kPending; - - // Extension map for parsing transport sequence numbers. - webrtc::RtpHeaderExtensionMap rtp_header_extension_map_; - - // Keeps information about sent RTP packet until they are Acked or Lost. - std::map sent_rtp_packet_map_; - - // Current datagram_id, incremented after each sent RTP packets. - // Datagram id is passed to datagram transport when we send datagram and we - // get it back in notifications about Sent, Acked and Lost datagrams. - int64_t current_datagram_id_ = 0; - - // TODO(sukhanov): Previous nonzero timestamp is required for workaround for - // zero timestamps received, which sometimes are received from datagram - // transport. Investigate if we can eliminate zero timestamps. - int64_t previous_nonzero_timestamp_us_ = 0; - - // Disable datagram to RTCP feedback translation and enable RTCP feedback - // loop (note that having both RTCP and datagram feedback loops is - // inefficient, but can be useful in tests and experiments). - const bool disable_datagram_to_rtcp_feeback_translation_; -}; - -} // namespace webrtc - -#endif // PC_DATAGRAM_RTP_TRANSPORT_H_ diff --git a/pc/dtls_srtp_transport.cc b/pc/dtls_srtp_transport.cc index dacbcb411d..ac091c6131 100644 --- a/pc/dtls_srtp_transport.cc +++ b/pc/dtls_srtp_transport.cc @@ -15,6 +15,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/ssl_stream_adapter.h" @@ -114,10 +115,9 @@ bool DtlsSrtpTransport::IsDtlsConnected() { auto rtcp_dtls_transport = rtcp_mux_enabled() ? nullptr : rtcp_dtls_transport_; return (rtp_dtls_transport_ && - rtp_dtls_transport_->dtls_state() == - cricket::DTLS_TRANSPORT_CONNECTED && + rtp_dtls_transport_->dtls_state() == DtlsTransportState::kConnected && (!rtcp_dtls_transport || rtcp_dtls_transport->dtls_state() == - cricket::DTLS_TRANSPORT_CONNECTED)); + DtlsTransportState::kConnected)); } bool DtlsSrtpTransport::IsDtlsWritable() { @@ -166,7 +166,6 @@ void DtlsSrtpTransport::SetupRtpDtlsSrtp() { static_cast(send_key.size()), send_extension_ids, selected_crypto_suite, &recv_key[0], static_cast(recv_key.size()), recv_extension_ids)) { - SignalDtlsSrtpSetupFailure(this, /*rtcp=*/false); RTC_LOG(LS_WARNING) << "DTLS-SRTP key installation for RTP failed"; } } @@ -198,7 +197,6 @@ void DtlsSrtpTransport::SetupRtcpDtlsSrtp() { selected_crypto_suite, &rtcp_recv_key[0], static_cast(rtcp_recv_key.size()), recv_extension_ids)) { - SignalDtlsSrtpSetupFailure(this, /*rtcp=*/true); RTC_LOG(LS_WARNING) << "DTLS-SRTP key installation for RTCP failed"; } } @@ -277,14 +275,16 @@ void DtlsSrtpTransport::SetDtlsTransport( } if (*old_dtls_transport) { - (*old_dtls_transport)->SignalDtlsState.disconnect(this); + (*old_dtls_transport)->UnsubscribeDtlsTransportState(this); } *old_dtls_transport = new_dtls_transport; if (new_dtls_transport) { - new_dtls_transport->SignalDtlsState.connect( - this, &DtlsSrtpTransport::OnDtlsState); + new_dtls_transport->SubscribeDtlsTransportState( + this, + [this](cricket::DtlsTransportInternal* transport, + DtlsTransportState state) { OnDtlsState(transport, state); }); } } @@ -299,13 +299,15 @@ void DtlsSrtpTransport::SetRtcpDtlsTransport( } void DtlsSrtpTransport::OnDtlsState(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { RTC_DCHECK(transport == rtp_dtls_transport_ || transport == rtcp_dtls_transport_); - SignalDtlsStateChange(); + if (on_dtls_state_change_) { + on_dtls_state_change_(); + } - if (state != cricket::DTLS_TRANSPORT_CONNECTED) { + if (state != DtlsTransportState::kConnected) { ResetParams(); return; } @@ -318,4 +320,8 @@ void DtlsSrtpTransport::OnWritableState( MaybeSetupDtlsSrtp(); } +void DtlsSrtpTransport::SetOnDtlsStateChange( + std::function callback) { + on_dtls_state_change_ = std::move(callback); +} } // namespace webrtc diff --git a/pc/dtls_srtp_transport.h b/pc/dtls_srtp_transport.h index c63a3ca5dd..9c52dcf809 100644 --- a/pc/dtls_srtp_transport.h +++ b/pc/dtls_srtp_transport.h @@ -11,10 +11,12 @@ #ifndef PC_DTLS_SRTP_TRANSPORT_H_ #define PC_DTLS_SRTP_TRANSPORT_H_ +#include #include #include "absl/types/optional.h" #include "api/crypto_params.h" +#include "api/dtls_transport_interface.h" #include "api/rtc_error.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/packet_transport_internal.h" @@ -45,8 +47,7 @@ class DtlsSrtpTransport : public SrtpTransport { void UpdateRecvEncryptedHeaderExtensionIds( const std::vector& recv_extension_ids); - sigslot::signal SignalDtlsSrtpSetupFailure; - sigslot::signal<> SignalDtlsStateChange; + void SetOnDtlsStateChange(std::function callback); RTCError SetSrtpSendKey(const cricket::CryptoParams& params) override { return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, @@ -82,7 +83,7 @@ class DtlsSrtpTransport : public SrtpTransport { cricket::DtlsTransportInternal* rtcp_dtls_transport); void OnDtlsState(cricket::DtlsTransportInternal* dtls_transport, - cricket::DtlsTransportState state); + DtlsTransportState state); // Override the SrtpTransport::OnWritableState. void OnWritableState(rtc::PacketTransportInternal* packet_transport) override; @@ -96,6 +97,7 @@ class DtlsSrtpTransport : public SrtpTransport { absl::optional> recv_extension_ids_; bool active_reset_srtp_params_ = false; + std::function on_dtls_state_change_; }; } // namespace webrtc diff --git a/pc/dtls_transport.cc b/pc/dtls_transport.cc index 1362f94ac1..074f44e22b 100644 --- a/pc/dtls_transport.cc +++ b/pc/dtls_transport.cc @@ -12,40 +12,31 @@ #include +#include "absl/types/optional.h" +#include "api/dtls_transport_interface.h" +#include "api/sequence_checker.h" #include "pc/ice_transport.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/ssl_certificate.h" namespace webrtc { -namespace { - -DtlsTransportState TranslateState(cricket::DtlsTransportState internal_state) { - switch (internal_state) { - case cricket::DTLS_TRANSPORT_NEW: - return DtlsTransportState::kNew; - case cricket::DTLS_TRANSPORT_CONNECTING: - return DtlsTransportState::kConnecting; - case cricket::DTLS_TRANSPORT_CONNECTED: - return DtlsTransportState::kConnected; - case cricket::DTLS_TRANSPORT_CLOSED: - return DtlsTransportState::kClosed; - case cricket::DTLS_TRANSPORT_FAILED: - return DtlsTransportState::kFailed; - } -} - -} // namespace - // Implementation of DtlsTransportInterface DtlsTransport::DtlsTransport( std::unique_ptr internal) : owner_thread_(rtc::Thread::Current()), info_(DtlsTransportState::kNew), internal_dtls_transport_(std::move(internal)), - ice_transport_(new rtc::RefCountedObject( + ice_transport_(rtc::make_ref_counted( internal_dtls_transport_->ice_transport())) { RTC_DCHECK(internal_dtls_transport_.get()); - internal_dtls_transport_->SignalDtlsState.connect( - this, &DtlsTransport::OnInternalDtlsState); + internal_dtls_transport_->SubscribeDtlsTransportState( + [this](cricket::DtlsTransportInternal* transport, + DtlsTransportState state) { + OnInternalDtlsState(transport, state); + }); UpdateInformation(); } @@ -56,7 +47,7 @@ DtlsTransport::~DtlsTransport() { } DtlsTransportInformation DtlsTransport::Information() { - rtc::CritScope scope(&lock_); + MutexLock lock(&lock_); return info_; } @@ -80,12 +71,12 @@ void DtlsTransport::Clear() { RTC_DCHECK_RUN_ON(owner_thread_); RTC_DCHECK(internal()); bool must_send_event = - (internal()->dtls_state() != cricket::DTLS_TRANSPORT_CLOSED); + (internal()->dtls_state() != DtlsTransportState::kClosed); // The destructor of cricket::DtlsTransportInternal calls back // into DtlsTransport, so we can't hold the lock while releasing. std::unique_ptr transport_to_release; { - rtc::CritScope scope(&lock_); + MutexLock lock(&lock_); transport_to_release = std::move(internal_dtls_transport_); ice_transport_->Clear(); } @@ -97,7 +88,7 @@ void DtlsTransport::Clear() { void DtlsTransport::OnInternalDtlsState( cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { RTC_DCHECK_RUN_ON(owner_thread_); RTC_DCHECK(transport == internal()); RTC_DCHECK(state == internal()->dtls_state()); @@ -109,10 +100,10 @@ void DtlsTransport::OnInternalDtlsState( void DtlsTransport::UpdateInformation() { RTC_DCHECK_RUN_ON(owner_thread_); - rtc::CritScope scope(&lock_); + MutexLock lock(&lock_); if (internal_dtls_transport_) { if (internal_dtls_transport_->dtls_state() == - cricket::DTLS_TRANSPORT_CONNECTED) { + DtlsTransportState::kConnected) { bool success = true; int ssl_cipher_suite; int tls_version; @@ -122,20 +113,19 @@ void DtlsTransport::UpdateInformation() { success &= internal_dtls_transport_->GetSrtpCryptoSuite(&srtp_cipher); if (success) { info_ = DtlsTransportInformation( - TranslateState(internal_dtls_transport_->dtls_state()), tls_version, + internal_dtls_transport_->dtls_state(), tls_version, ssl_cipher_suite, srtp_cipher, internal_dtls_transport_->GetRemoteSSLCertChain()); } else { RTC_LOG(LS_ERROR) << "DtlsTransport in connected state has incomplete " "TLS information"; info_ = DtlsTransportInformation( - TranslateState(internal_dtls_transport_->dtls_state()), - absl::nullopt, absl::nullopt, absl::nullopt, + internal_dtls_transport_->dtls_state(), absl::nullopt, + absl::nullopt, absl::nullopt, internal_dtls_transport_->GetRemoteSSLCertChain()); } } else { - info_ = DtlsTransportInformation( - TranslateState(internal_dtls_transport_->dtls_state())); + info_ = DtlsTransportInformation(internal_dtls_transport_->dtls_state()); } } else { info_ = DtlsTransportInformation(DtlsTransportState::kClosed); diff --git a/pc/dtls_transport.h b/pc/dtls_transport.h index b5caae5212..cca4cc980a 100644 --- a/pc/dtls_transport.h +++ b/pc/dtls_transport.h @@ -17,6 +17,11 @@ #include "api/ice_transport_interface.h" #include "api/scoped_refptr.h" #include "p2p/base/dtls_transport.h" +#include "p2p/base/dtls_transport_internal.h" +#include "pc/ice_transport.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -24,8 +29,7 @@ class IceTransportWithPointer; // This implementation wraps a cricket::DtlsTransport, and takes // ownership of it. -class DtlsTransport : public DtlsTransportInterface, - public sigslot::has_slots<> { +class DtlsTransport : public DtlsTransportInterface { public: // This object must be constructed and updated on a consistent thread, // the same thread as the one the cricket::DtlsTransportInternal object @@ -42,12 +46,12 @@ class DtlsTransport : public DtlsTransportInterface, void Clear(); cricket::DtlsTransportInternal* internal() { - rtc::CritScope scope(&lock_); + MutexLock lock(&lock_); return internal_dtls_transport_.get(); } const cricket::DtlsTransportInternal* internal() const { - rtc::CritScope scope(&lock_); + MutexLock lock(&lock_); return internal_dtls_transport_.get(); } @@ -56,12 +60,12 @@ class DtlsTransport : public DtlsTransportInterface, private: void OnInternalDtlsState(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state); + DtlsTransportState state); void UpdateInformation(); DtlsTransportObserverInterface* observer_ = nullptr; rtc::Thread* owner_thread_; - rtc::CriticalSection lock_; + mutable Mutex lock_; DtlsTransportInformation info_ RTC_GUARDED_BY(lock_); std::unique_ptr internal_dtls_transport_ RTC_GUARDED_BY(lock_); diff --git a/pc/dtls_transport_unittest.cc b/pc/dtls_transport_unittest.cc index a3f0a7ce8b..f80d99b05e 100644 --- a/pc/dtls_transport_unittest.cc +++ b/pc/dtls_transport_unittest.cc @@ -63,7 +63,7 @@ class DtlsTransportTest : public ::testing::Test { } cricket_transport->SetSslCipherSuite(kNonsenseCipherSuite); transport_ = - new rtc::RefCountedObject(std::move(cricket_transport)); + rtc::make_ref_counted(std::move(cricket_transport)); } void CompleteDtlsHandshake() { @@ -86,8 +86,8 @@ class DtlsTransportTest : public ::testing::Test { TEST_F(DtlsTransportTest, CreateClearDelete) { auto cricket_transport = std::make_unique( "audio", cricket::ICE_CANDIDATE_COMPONENT_RTP); - rtc::scoped_refptr webrtc_transport = - new rtc::RefCountedObject(std::move(cricket_transport)); + auto webrtc_transport = + rtc::make_ref_counted(std::move(cricket_transport)); ASSERT_TRUE(webrtc_transport->internal()); ASSERT_EQ(DtlsTransportState::kNew, webrtc_transport->Information().state()); webrtc_transport->Clear(); diff --git a/pc/dtmf_sender.cc b/pc/dtmf_sender.cc index 10378028c8..67c3fac134 100644 --- a/pc/dtmf_sender.cc +++ b/pc/dtmf_sender.cc @@ -18,6 +18,7 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" namespace webrtc { @@ -64,9 +65,7 @@ rtc::scoped_refptr DtmfSender::Create( if (!signaling_thread) { return nullptr; } - rtc::scoped_refptr dtmf_sender( - new rtc::RefCountedObject(signaling_thread, provider)); - return dtmf_sender; + return rtc::make_ref_counted(signaling_thread, provider); } DtmfSender::DtmfSender(rtc::Thread* signaling_thread, @@ -86,19 +85,22 @@ DtmfSender::DtmfSender(rtc::Thread* signaling_thread, } DtmfSender::~DtmfSender() { + RTC_DCHECK_RUN_ON(signaling_thread_); StopSending(); } void DtmfSender::RegisterObserver(DtmfSenderObserverInterface* observer) { + RTC_DCHECK_RUN_ON(signaling_thread_); observer_ = observer; } void DtmfSender::UnregisterObserver() { + RTC_DCHECK_RUN_ON(signaling_thread_); observer_ = nullptr; } bool DtmfSender::CanInsertDtmf() { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); if (!provider_) { return false; } @@ -109,7 +111,7 @@ bool DtmfSender::InsertDtmf(const std::string& tones, int duration, int inter_tone_gap, int comma_delay) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); if (duration > kDtmfMaxDurationMs || duration < kDtmfMinDurationMs || inter_tone_gap < kDtmfMinGapMs || comma_delay < kDtmfMinGapMs) { @@ -132,38 +134,49 @@ bool DtmfSender::InsertDtmf(const std::string& tones, duration_ = duration; inter_tone_gap_ = inter_tone_gap; comma_delay_ = comma_delay; - // Clear the previous queue. - dtmf_driver_.Clear(); - // Kick off a new DTMF task queue. + + // Cancel any remaining tasks for previous tones. + if (safety_flag_) { + safety_flag_->SetNotAlive(); + } + safety_flag_ = PendingTaskSafetyFlag::Create(); + // Kick off a new DTMF task. QueueInsertDtmf(RTC_FROM_HERE, 1 /*ms*/); return true; } std::string DtmfSender::tones() const { + RTC_DCHECK_RUN_ON(signaling_thread_); return tones_; } int DtmfSender::duration() const { + RTC_DCHECK_RUN_ON(signaling_thread_); return duration_; } int DtmfSender::inter_tone_gap() const { + RTC_DCHECK_RUN_ON(signaling_thread_); return inter_tone_gap_; } int DtmfSender::comma_delay() const { + RTC_DCHECK_RUN_ON(signaling_thread_); return comma_delay_; } void DtmfSender::QueueInsertDtmf(const rtc::Location& posted_from, uint32_t delay_ms) { - dtmf_driver_.AsyncInvokeDelayed( - posted_from, signaling_thread_, [this] { DoInsertDtmf(); }, delay_ms); + signaling_thread_->PostDelayedTask( + ToQueuedTask(safety_flag_, + [this] { + RTC_DCHECK_RUN_ON(signaling_thread_); + DoInsertDtmf(); + }), + delay_ms); } void DtmfSender::DoInsertDtmf() { - RTC_DCHECK(signaling_thread_->IsCurrent()); - // Get the first DTMF tone from the tone buffer. Unrecognized characters will // be ignored and skipped. size_t first_tone_pos = tones_.find_first_of(kDtmfValidTones); @@ -222,13 +235,17 @@ void DtmfSender::DoInsertDtmf() { } void DtmfSender::OnProviderDestroyed() { + RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_LOG(LS_INFO) << "The Dtmf provider is deleted. Clear the sending queue."; StopSending(); provider_ = nullptr; } void DtmfSender::StopSending() { - dtmf_driver_.Clear(); + if (safety_flag_) { + safety_flag_->SetNotAlive(); + } } } // namespace webrtc diff --git a/pc/dtmf_sender.h b/pc/dtmf_sender.h index e332a7ef58..b64b50e09c 100644 --- a/pc/dtmf_sender.h +++ b/pc/dtmf_sender.h @@ -11,13 +11,18 @@ #ifndef PC_DTMF_SENDER_H_ #define PC_DTMF_SENDER_H_ +#include + #include #include "api/dtmf_sender_interface.h" -#include "api/proxy.h" -#include "rtc_base/async_invoker.h" +#include "api/scoped_refptr.h" +#include "pc/proxy.h" #include "rtc_base/constructor_magic.h" +#include "rtc_base/location.h" #include "rtc_base/ref_count.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/thread.h" // DtmfSender is the native implementation of the RTCDTMFSender defined by @@ -70,32 +75,34 @@ class DtmfSender : public DtmfSenderInterface, public sigslot::has_slots<> { private: DtmfSender(); - void QueueInsertDtmf(const rtc::Location& posted_from, uint32_t delay_ms); + void QueueInsertDtmf(const rtc::Location& posted_from, uint32_t delay_ms) + RTC_RUN_ON(signaling_thread_); // The DTMF sending task. - void DoInsertDtmf(); + void DoInsertDtmf() RTC_RUN_ON(signaling_thread_); void OnProviderDestroyed(); - void StopSending(); + void StopSending() RTC_RUN_ON(signaling_thread_); - DtmfSenderObserverInterface* observer_; + DtmfSenderObserverInterface* observer_ RTC_GUARDED_BY(signaling_thread_); rtc::Thread* signaling_thread_; - DtmfProviderInterface* provider_; - std::string tones_; - int duration_; - int inter_tone_gap_; - int comma_delay_; - // Invoker for running delayed tasks which feed the DTMF provider one tone at - // a time. - rtc::AsyncInvoker dtmf_driver_; + DtmfProviderInterface* provider_ RTC_GUARDED_BY(signaling_thread_); + std::string tones_ RTC_GUARDED_BY(signaling_thread_); + int duration_ RTC_GUARDED_BY(signaling_thread_); + int inter_tone_gap_ RTC_GUARDED_BY(signaling_thread_); + int comma_delay_ RTC_GUARDED_BY(signaling_thread_); + + // For cancelling the tasks which feed the DTMF provider one tone at a time. + rtc::scoped_refptr safety_flag_ RTC_GUARDED_BY( + signaling_thread_) RTC_PT_GUARDED_BY(signaling_thread_) = nullptr; RTC_DISALLOW_COPY_AND_ASSIGN(DtmfSender); }; // Define proxy for DtmfSenderInterface. -BEGIN_SIGNALING_PROXY_MAP(DtmfSender) -PROXY_SIGNALING_THREAD_DESTRUCTOR() +BEGIN_PRIMARY_PROXY_MAP(DtmfSender) +PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD1(void, RegisterObserver, DtmfSenderObserverInterface*) PROXY_METHOD0(void, UnregisterObserver) PROXY_METHOD0(bool, CanInsertDtmf) @@ -104,7 +111,7 @@ PROXY_CONSTMETHOD0(std::string, tones) PROXY_CONSTMETHOD0(int, duration) PROXY_CONSTMETHOD0(int, inter_tone_gap) PROXY_CONSTMETHOD0(int, comma_delay) -END_PROXY_MAP() +END_PROXY_MAP(DtmfSender) // Get DTMF code from the DTMF event character. bool GetDtmfCode(char tone, int* code); diff --git a/pc/dtmf_sender_unittest.cc b/pc/dtmf_sender_unittest.cc index f7f229a887..261cbd0303 100644 --- a/pc/dtmf_sender_unittest.cc +++ b/pc/dtmf_sender_unittest.cc @@ -18,7 +18,6 @@ #include "rtc_base/fake_clock.h" #include "rtc_base/gunit.h" -#include "rtc_base/ref_counted_object.h" #include "rtc_base/time_utils.h" #include "test/gtest.h" @@ -118,8 +117,7 @@ class FakeDtmfProvider : public DtmfProviderInterface { class DtmfSenderTest : public ::testing::Test { protected: DtmfSenderTest() - : observer_(new rtc::RefCountedObject()), - provider_(new FakeDtmfProvider()) { + : observer_(new FakeDtmfObserver()), provider_(new FakeDtmfProvider()) { provider_->SetCanInsertDtmf(true); dtmf_ = DtmfSender::Create(rtc::Thread::Current(), provider_.get()); dtmf_->RegisterObserver(observer_.get()); diff --git a/pc/g3doc/dtls_transport.md b/pc/g3doc/dtls_transport.md new file mode 100644 index 0000000000..65206dff5d --- /dev/null +++ b/pc/g3doc/dtls_transport.md @@ -0,0 +1,53 @@ + + + +## Overview + +WebRTC uses DTLS in two ways: + +* to negotiate keys for SRTP encryption using + [DTLS-SRTP](https://www.rfc-editor.org/info/rfc5763) +* as a transport for SCTP which is used by the Datachannel API + +The W3C WebRTC API represents this as the +[DtlsTransport](https://w3c.github.io/webrtc-pc/#rtcdtlstransport-interface). + +The DTLS handshake happens after the ICE transport becomes writable and has +found a valid pair. It results in a set of keys being derived for DTLS-SRTP as +well as a fingerprint of the remote certificate which is compared to the one +given in the SDP `a=fingerprint:` line. + +This documentation provides an overview of how DTLS is implemented, i.e how the +following classes interact. + +## webrtc::DtlsTransport + +The [`webrtc::DtlsTransport`][1] class is a wrapper around the +`cricket::DtlsTransportInternal` and allows registering observers implementing +the `webrtc::DtlsTransportObserverInterface`. The +[`webrtc::DtlsTransportObserverInterface`][2] will provide updates to the +observers, passing around a snapshot of the transports state such as the +connection state, the remote certificate(s) and the SRTP ciphers as +[`DtlsTransportInformation`][3]. + +## cricket::DtlsTransportInternal + +The [`cricket::DtlsTransportInternal`][4] class is an interface. Its +implementation is [`cricket::DtlsTransport`][5]. The `cricket::DtlsTransport` +sends and receives network packets via an ICE transport. It also demultiplexes +DTLS packets and SRTP packets according to the scheme described in +[RFC 5764](https://tools.ietf.org/html/rfc5764#section-5.1.2). + +## webrtc::DtlsSrtpTranport + +The [`webrtc::DtlsSrtpTransport`][6] class is responsіble for extracting the +SRTP keys after the DTLS handshake as well as protection and unprotection of +SRTP packets via its [`cricket::SrtpSession`][7]. + +[1]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/dtls_transport.h;l=32;drc=6a55e7307b78edb50f94a1ff1ef8393d58218369 +[2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/dtls_transport_interface.h;l=76;drc=34437d5660a80393d631657329ef74c6538be25a +[3]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/dtls_transport_interface.h;l=41;drc=34437d5660a80393d631657329ef74c6538be25a +[4]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport_internal.h;l=63;drc=34437d5660a80393d631657329ef74c6538be25a +[5]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/p2p/base/dtls_transport.h;l=94;drc=653bab6790ac92c513b7cf4cd3ad59039c589a95 +[6]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/dtls_srtp_transport.h;l=31;drc=c32f00ea9ddf3267257fe6b45d4d79c6f6bcb829 +[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=33;drc=be66d95ab7f9428028806bbf66cb83800bda9241 diff --git a/pc/g3doc/peer_connection.md b/pc/g3doc/peer_connection.md new file mode 100644 index 0000000000..1eae135991 --- /dev/null +++ b/pc/g3doc/peer_connection.md @@ -0,0 +1,59 @@ + + + +# PeerConnection and friends + +The PeerConnection is the C++-level implementation of the Javascript +object "RTCPeerConnection" from the +[WEBRTC specification](https://w3c.github.io/webrtc-pc/). + +Like many objects in WebRTC, the PeerConnection is used via a factory and an +observer: + + * PeerConnectionFactory, which is created via a static Create method and takes + a PeerConnectionFactoryDependencies structure listing such things as + non-default threads and factories for use by all PeerConnections using + the same factory. (Using more than one factory should be avoided, since + it takes more resources.) + * PeerConnection itself, which is created by the method called + PeerConnectionFactory::CreatePeerConnectionOrError, and takes a + PeerConnectionInterface::RTCConfiguration argument, as well as + a PeerConnectionDependencies (even more factories, plus other stuff). + * PeerConnectionObserver (a member of PeerConnectionDependencies), which + contains the functions that will be called on events in the PeerConnection + +These types are visible in the API. + +## Internal structure of PeerConnection and friends + +The PeerConnection is, to a large extent, a "God object" - most things +that are done in WebRTC require a PeerConnection. + +Internally, it is divided into several objects, each with its own +responsibilities, all of which are owned by the PeerConnection and live +as long as the PeerConnection: + + * SdpOfferAnswerHandler takes care of negotiating configurations with + a remote peer, using SDP-formatted descriptions. + * RtpTransmissionManager takes care of the lists of RtpSenders, + RtpReceivers and RtpTransceivers that form the heart of the transmission + service. + * DataChannelController takes care of managing the PeerConnection's + DataChannels and its SctpTransport. + * JsepTransportController takes care of configuring the details of senders + and receivers. + * Call does management of overall call state. + * RtcStatsCollector (and its obsolete sibling, StatsCollector) collects + statistics from all the objects comprising the PeerConnection when + requested. + +There are a number of other smaller objects that are also owned by +the PeerConnection, but it would take too much space to describe them +all here; please consult the .h files. + +PeerConnectionFactory owns an object called ConnectionContext, and a +reference to this is passed to each PeerConnection. It is referenced +via an rtc::scoped_refptr, which means that it is guaranteed to be +alive as long as either the factory or one of the PeerConnections +is using it. + diff --git a/pc/g3doc/rtp.md b/pc/g3doc/rtp.md new file mode 100644 index 0000000000..38c1702ad3 --- /dev/null +++ b/pc/g3doc/rtp.md @@ -0,0 +1,56 @@ + + + +# RTP in WebRTC + +WebRTC uses the RTP protocol described in +[RFC3550](https://datatracker.ietf.org/doc/html/rfc3550) for transporting audio +and video. Media is encrypted using [SRTP](./srtp.md). + +## Allocation of payload types + +RTP packets have a payload type field that describes which media codec can be +used to handle a packet. For some (older) codecs like PCMU the payload type is +assigned statically as described in +[RFC3551](https://datatracker.ietf.org/doc/html/rfc3551). For others, it is +assigned dynamically through the SDP. **Note:** there are no guarantees on the +stability of a payload type assignment. + +For this allocation, the range from 96 to 127 is used. When this range is +exhausted, the allocation falls back to the range from 35 to 63 as permitted by +[section 5.1 of RFC3550][1]. Note that older versions of WebRTC failed to +recognize payload types in the lower range. Newer codecs (such as flexfec-03 and +AV1) will by default be allocated in that range. + +Payload types in the range 64 to 95 are not used to avoid confusion with RTCP as +described in [RFC5761](https://datatracker.ietf.org/doc/html/rfc5761). + +## Allocation of audio payload types + +Audio payload types are assigned from a table by the [PayloadTypeMapper][2] +class. New audio codecs should be allocated in the lower dynamic range [35,63], +starting at 63, to reduce collisions with payload types + +## Allocation of video payload types + +Video payload types are allocated by the +[GetPayloadTypesAndDefaultCodecs method][3]. The set of codecs depends on the +platform, in particular for H264 codecs and their different profiles. Payload +numbers are assigned ascending from 96 for video codecs and their +[associated retransmission format](https://datatracker.ietf.org/doc/html/rfc4588). +Some codecs like flexfec-03 and AV1 are assigned to the lower range [35,63] for +reasons explained above. When the upper range [96,127] is exhausted, payload +types are assigned to the lower range [35,63], starting at 35. + +## Handling of payload type collisions + +Due to the requirement that payload types must be uniquely identifiable when +using [BUNDLE](https://datatracker.ietf.org/doc/html/rfc8829) collisions between +the assignments of the audio and video payload types may arise. These are +resolved by the [UsedPayloadTypes][4] class which will reassign payload type +numbers descending from 127. + +[1]: https://datatracker.ietf.org/doc/html/rfc3550#section-5.1 +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/payload_type_mapper.cc;l=25;drc=4f26a3c7e8e20e0e0ca4ca67a6ebdf3f5543dc3f +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/media/engine/webrtc_video_engine.cc;l=119;drc=b412efdb780c86e6530493afa403783d14985347 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/used_ids.h;l=94;drc=b412efdb780c86e6530493afa403783d14985347 diff --git a/pc/g3doc/sctp_transport.md b/pc/g3doc/sctp_transport.md new file mode 100644 index 0000000000..254e264b0b --- /dev/null +++ b/pc/g3doc/sctp_transport.md @@ -0,0 +1,44 @@ + + + + +# SctpTransport + +## webrtc::SctpTransport + +The [`webrtc::SctpTransport`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/sctp_transport.h;l=33?q=class%20webrtc::SctpTransport) class encapsulates an SCTP association, and exposes a +few properties of this association to the WebRTC user (such as Chrome). + +The SctpTransport is used to support Datachannels, as described in the [WebRTC +specification for the Peer-to-peer Data +API](https://w3c.github.io/webrtc-pc/#peer-to-peer-data-api). + +The public interface ([`webrtc::SctpTransportInterface`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/sctp_transport_interface.h?q=webrtc::SctpTransportInterface)) exposes an observer +interface where the user can define a callback to be called whenever the state +of an SctpTransport changes; this callback is called on the network thread (as +set during PeerConnectionFactory initialization). + +The implementation of this object lives in pc/sctp_transport.{h,cc}, and is +basically a wrapper around a `cricket::SctpTransportInternal`, hiding its +implementation details and APIs that shoudldn't be accessed from the user. + +The `webrtc::SctpTransport` is a ref counted object; it should be regarded +as owned by the PeerConnection, and will be closed when the PeerConnection +closes, but the object itself may survive longer than the PeerConnection. + +## cricket::SctpTransportInternal + +[`cricket::SctpTransportInternal`](https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/media/sctp/sctp_transport_internal.h?q=cricket::SctpTransportInternal) owns two objects: The SCTP association object (currently +implemented by wrapping the usrsctp library) and the DTLS transport, which is +the object used to send and receive messages as emitted from or consumed by the +usrsctp library. + +It communicates state changes and events using sigslot. + +See header files for details. + + + + + + diff --git a/pc/g3doc/srtp.md b/pc/g3doc/srtp.md new file mode 100644 index 0000000000..47446157c9 --- /dev/null +++ b/pc/g3doc/srtp.md @@ -0,0 +1,72 @@ + + + +# SRTP in WebRTC + +WebRTC mandates encryption of media by means of the Secure Realtime Protocol, or +SRTP, which is described in +[RFC 3711](https://datatracker.ietf.org/doc/html/rfc3711). + +The key negotiation in WebRTC happens using DTLS-SRTP which is described in +[RFC 5764](https://datatracker.ietf.org/doc/html/rfc5764). The older +[SDES protocol](https://datatracker.ietf.org/doc/html/rfc4568) is implemented +but not enabled by default. + +Unencrypted RTP can be enabled for debugging purposes by setting the +PeerConnections [`disable_encryption`][1] option to true. + +## Supported cipher suites + +The implementation supports the following cipher suites: + +* SRTP_AES128_CM_HMAC_SHA1_80 +* SRTP_AEAD_AES_128_GCM +* SRTP_AEAD_AES_256_GCM + +The SRTP_AES128_CM_HMAC_SHA1_32 cipher suite is accepted for audio-only +connections if offered by the other side. It is not actively supported, see +[SelectCrypto][2] for details. + +The cipher suite ordering allows a non-WebRTC peer to prefer GCM cipher suites, +however they are not selected as default by two instances of the WebRTC library. + +## cricket::SrtpSession + +The [`cricket::SrtpSession`][3] is providing encryption and decryption of SRTP +packets using [`libsrtp`](https://github.com/cisco/libsrtp). Keys will be +provided by `SrtpTransport` or `DtlsSrtpTransport` in the [`SetSend`][4] and +[`SetRecv`][5] methods. + +Encryption and decryption happens in-place in the [`ProtectRtp`][6], +[`ProtectRtcp`][7], [`UnprotectRtp`][8] and [`UnprotectRtcp`][9] methods. The +`SrtpSession` class also takes care of initializing and deinitializing `libsrtp` +by keeping track of how many instances are being used. + +## webrtc::SrtpTransport and webrtc::DtlsSrtpTransport + +The [`webrtc::SrtpTransport`][10] class is controlling the `SrtpSession` +instances for RTP and RTCP. When +[rtcp-mux](https://datatracker.ietf.org/doc/html/rfc5761) is used, the +`SrtpSession` for RTCP is not needed. + +[`webrtc:DtlsSrtpTransport`][11] is a subclass of the `SrtpTransport` that +extracts the keying material when the DTLS handshake is done and configures it +in its base class. It will also become writable only once the DTLS handshake is +done. + +## cricket::SrtpFilter + +The [`cricket::SrtpFilter`][12] class is used to negotiate SDES. + +[1]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/api/peer_connection_interface.h;l=1413;drc=f467b445631189557d44de86a77ca6a0c3e2108d +[2]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/media_session.cc;l=297;drc=3ac73bd0aa5322abee98f1ff8705af64a184bf61 +[3]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=33;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[4]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=40;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[5]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=51;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[6]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=62;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[7]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=69;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[8]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=72;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[9]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_session.h;l=73;drc=be66d95ab7f9428028806bbf66cb83800bda9241 +[10]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_transport.h;l=37;drc=a4d873786f10eedd72de25ad0d94ad7c53c1f68a +[11]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/dtls_srtp_transport.h;l=31;drc=2f8e0536eb97ce2131e7a74e3ca06077aa0b64b3 +[12]: https://source.chromium.org/chromium/chromium/src/+/main:third_party/webrtc/pc/srtp_filter.h;drc=d15a575ec3528c252419149d35977e55269d8a41 diff --git a/pc/ice_server_parsing.cc b/pc/ice_server_parsing.cc index 2400fd516f..0daf8e445d 100644 --- a/pc/ice_server_parsing.cc +++ b/pc/ice_server_parsing.cc @@ -12,7 +12,9 @@ #include +#include #include // For std::isdigit. +#include #include #include "p2p/base/port_interface.h" @@ -21,6 +23,7 @@ #include "rtc_base/ip_address.h" #include "rtc_base/logging.h" #include "rtc_base/socket_address.h" +#include "rtc_base/string_encode.h" namespace webrtc { @@ -31,6 +34,15 @@ static const int kDefaultStunPort = 3478; static const int kDefaultStunTlsPort = 5349; static const char kTransport[] = "transport"; +// Allowed characters in hostname per RFC 3986 Appendix A "reg-name" +static const char kRegNameCharacters[] = + "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789" + "-._~" // unreserved + "%" // pct-encoded + "!$&'()*+,;="; // sub-delims + // NOTE: Must be in the same order as the ServiceType enum. static const char* kValidIceServiceTypes[] = {"stun", "stuns", "turn", "turns"}; @@ -99,6 +111,7 @@ static bool ParseHostnameAndPortFromString(const std::string& in_str, int* port) { RTC_DCHECK(host->empty()); if (in_str.at(0) == '[') { + // IP_literal syntax std::string::size_type closebracket = in_str.rfind(']'); if (closebracket != std::string::npos) { std::string::size_type colonpos = in_str.find(':', closebracket); @@ -113,6 +126,7 @@ static bool ParseHostnameAndPortFromString(const std::string& in_str, return false; } } else { + // IPv4address or reg-name syntax std::string::size_type colonpos = in_str.find(':'); if (std::string::npos != colonpos) { if (!ParsePort(in_str.substr(colonpos + 1, std::string::npos), port)) { @@ -122,6 +136,10 @@ static bool ParseHostnameAndPortFromString(const std::string& in_str, } else { *host = in_str; } + // RFC 3986 section 3.2.2 and Appendix A - "reg-name" syntax + if (host->find_first_not_of(kRegNameCharacters) != std::string::npos) { + return false; + } } return !host->empty(); } diff --git a/pc/ice_server_parsing_unittest.cc b/pc/ice_server_parsing_unittest.cc index 2625b24590..e4dbd3a0f5 100644 --- a/pc/ice_server_parsing_unittest.cc +++ b/pc/ice_server_parsing_unittest.cc @@ -182,6 +182,11 @@ TEST_F(IceServerParsingTest, ParseHostnameAndPort) { EXPECT_FALSE(ParseUrl("stun:[1:2:3:4:5:6:7:8]junk:1000")); EXPECT_FALSE(ParseUrl("stun::5555")); EXPECT_FALSE(ParseUrl("stun:")); + // Test illegal URLs according to RFC 3986 (URI generic syntax) + // and RFC 7064 (URI schemes for STUN and TURN) + EXPECT_FALSE(ParseUrl("stun:/hostname")); // / is not allowed + EXPECT_FALSE(ParseUrl("stun:?hostname")); // ? is not allowed + EXPECT_FALSE(ParseUrl("stun:#hostname")); // # is not allowed } // Test parsing the "?transport=xxx" part of the URL. diff --git a/pc/ice_transport.cc b/pc/ice_transport.cc index ccc5ecd7f2..205846755d 100644 --- a/pc/ice_transport.cc +++ b/pc/ice_transport.cc @@ -10,8 +10,7 @@ #include "pc/ice_transport.h" -#include -#include +#include "api/sequence_checker.h" namespace webrtc { diff --git a/pc/ice_transport.h b/pc/ice_transport.h index 69b69e41d8..11f3de5d27 100644 --- a/pc/ice_transport.h +++ b/pc/ice_transport.h @@ -12,9 +12,10 @@ #define PC_ICE_TRANSPORT_H_ #include "api/ice_transport_interface.h" -#include "rtc_base/constructor_magic.h" +#include "api/sequence_checker.h" +#include "rtc_base/checks.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -29,6 +30,10 @@ class IceTransportWithPointer : public IceTransportInterface { RTC_DCHECK(internal_); } + IceTransportWithPointer() = delete; + IceTransportWithPointer(const IceTransportWithPointer&) = delete; + IceTransportWithPointer& operator=(const IceTransportWithPointer&) = delete; + cricket::IceTransportInternal* internal() override; // This call will ensure that the pointer passed at construction is // no longer in use by this object. Later calls to internal() will return @@ -39,7 +44,6 @@ class IceTransportWithPointer : public IceTransportInterface { ~IceTransportWithPointer() override; private: - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(IceTransportWithPointer); const rtc::Thread* creator_thread_; cricket::IceTransportInternal* internal_ RTC_GUARDED_BY(creator_thread_); }; diff --git a/pc/ice_transport_unittest.cc b/pc/ice_transport_unittest.cc index 3711a86d5d..ebb46cb5d5 100644 --- a/pc/ice_transport_unittest.cc +++ b/pc/ice_transport_unittest.cc @@ -28,9 +28,8 @@ class IceTransportTest : public ::testing::Test {}; TEST_F(IceTransportTest, CreateNonSelfDeletingTransport) { auto cricket_transport = std::make_unique("name", 0, nullptr); - rtc::scoped_refptr ice_transport = - new rtc::RefCountedObject( - cricket_transport.get()); + auto ice_transport = + rtc::make_ref_counted(cricket_transport.get()); EXPECT_EQ(ice_transport->internal(), cricket_transport.get()); ice_transport->Clear(); EXPECT_NE(ice_transport->internal(), cricket_transport.get()); diff --git a/pc/jitter_buffer_delay.cc b/pc/jitter_buffer_delay.cc index c9506b3c59..801cef7215 100644 --- a/pc/jitter_buffer_delay.cc +++ b/pc/jitter_buffer_delay.cc @@ -10,13 +10,10 @@ #include "pc/jitter_buffer_delay.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" -#include "rtc_base/location.h" -#include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/numerics/safe_minmax.h" -#include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" namespace { constexpr int kDefaultDelay = 0; @@ -25,43 +22,21 @@ constexpr int kMaximumDelayMs = 10000; namespace webrtc { -JitterBufferDelay::JitterBufferDelay(rtc::Thread* worker_thread) - : signaling_thread_(rtc::Thread::Current()), worker_thread_(worker_thread) { - RTC_DCHECK(worker_thread_); -} - -void JitterBufferDelay::OnStart(cricket::Delayable* media_channel, - uint32_t ssrc) { - RTC_DCHECK_RUN_ON(signaling_thread_); - - media_channel_ = media_channel; - ssrc_ = ssrc; - - // Trying to apply cached delay for the audio stream. - if (cached_delay_seconds_) { - Set(cached_delay_seconds_.value()); - } -} - -void JitterBufferDelay::OnStop() { - RTC_DCHECK_RUN_ON(signaling_thread_); - // Assume that audio stream is no longer present. - media_channel_ = nullptr; - ssrc_ = absl::nullopt; +JitterBufferDelay::JitterBufferDelay() { + worker_thread_checker_.Detach(); } void JitterBufferDelay::Set(absl::optional delay_seconds) { - RTC_DCHECK_RUN_ON(worker_thread_); - - // TODO(kuddai) propagate absl::optional deeper down as default preference. - int delay_ms = - rtc::saturated_cast(delay_seconds.value_or(kDefaultDelay) * 1000); - delay_ms = rtc::SafeClamp(delay_ms, 0, kMaximumDelayMs); - + RTC_DCHECK_RUN_ON(&worker_thread_checker_); cached_delay_seconds_ = delay_seconds; - if (media_channel_ && ssrc_) { - media_channel_->SetBaseMinimumPlayoutDelayMs(ssrc_.value(), delay_ms); - } +} + +int JitterBufferDelay::GetMs() const { + RTC_DCHECK_RUN_ON(&worker_thread_checker_); + return rtc::SafeClamp( + rtc::saturated_cast(cached_delay_seconds_.value_or(kDefaultDelay) * + 1000), + 0, kMaximumDelayMs); } } // namespace webrtc diff --git a/pc/jitter_buffer_delay.h b/pc/jitter_buffer_delay.h index 8edfc6ce20..dc10e3d2ba 100644 --- a/pc/jitter_buffer_delay.h +++ b/pc/jitter_buffer_delay.h @@ -14,36 +14,25 @@ #include #include "absl/types/optional.h" -#include "media/base/delayable.h" -#include "pc/jitter_buffer_delay_interface.h" -#include "rtc_base/thread.h" +#include "api/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { // JitterBufferDelay converts delay from seconds to milliseconds for the // underlying media channel. It also handles cases when user sets delay before -// the start of media_channel by caching its request. Note, this class is not -// thread safe. Its thread safe version is defined in -// pc/jitter_buffer_delay_proxy.h -class JitterBufferDelay : public JitterBufferDelayInterface { +// the start of media_channel by caching its request. +class JitterBufferDelay { public: - // Must be called on signaling thread. - explicit JitterBufferDelay(rtc::Thread* worker_thread); + JitterBufferDelay(); - void OnStart(cricket::Delayable* media_channel, uint32_t ssrc) override; - - void OnStop() override; - - void Set(absl::optional delay_seconds) override; + void Set(absl::optional delay_seconds); + int GetMs() const; private: - // Throughout webrtc source, sometimes it is also called as |main_thread_|. - rtc::Thread* const signaling_thread_; - rtc::Thread* const worker_thread_; - // Media channel and ssrc together uniqely identify audio stream. - cricket::Delayable* media_channel_ = nullptr; - absl::optional ssrc_; - absl::optional cached_delay_seconds_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_; + absl::optional cached_delay_seconds_ + RTC_GUARDED_BY(&worker_thread_checker_); }; } // namespace webrtc diff --git a/pc/jitter_buffer_delay_interface.h b/pc/jitter_buffer_delay_interface.h deleted file mode 100644 index f2132d318d..0000000000 --- a/pc/jitter_buffer_delay_interface.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_JITTER_BUFFER_DELAY_INTERFACE_H_ -#define PC_JITTER_BUFFER_DELAY_INTERFACE_H_ - -#include - -#include "absl/types/optional.h" -#include "media/base/delayable.h" -#include "rtc_base/ref_count.h" - -namespace webrtc { - -// JitterBufferDelay delivers user's queries to the underlying media channel. It -// can describe either video or audio delay for receiving stream. "Interface" -// suffix in the interface name is required to be compatible with api/proxy.cc -class JitterBufferDelayInterface : public rtc::RefCountInterface { - public: - // OnStart allows to uniqely identify to which receiving stream playout - // delay must correpond through |media_channel| and |ssrc| pair. - virtual void OnStart(cricket::Delayable* media_channel, uint32_t ssrc) = 0; - - // Indicates that underlying receiving stream is stopped. - virtual void OnStop() = 0; - - virtual void Set(absl::optional delay_seconds) = 0; -}; - -} // namespace webrtc - -#endif // PC_JITTER_BUFFER_DELAY_INTERFACE_H_ diff --git a/pc/jitter_buffer_delay_proxy.h b/pc/jitter_buffer_delay_proxy.h deleted file mode 100644 index b3380fd258..0000000000 --- a/pc/jitter_buffer_delay_proxy.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_JITTER_BUFFER_DELAY_PROXY_H_ -#define PC_JITTER_BUFFER_DELAY_PROXY_H_ - -#include - -#include "api/proxy.h" -#include "media/base/delayable.h" -#include "pc/jitter_buffer_delay_interface.h" - -namespace webrtc { - -BEGIN_PROXY_MAP(JitterBufferDelay) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_METHOD2(void, OnStart, cricket::Delayable*, uint32_t) -PROXY_METHOD0(void, OnStop) -PROXY_WORKER_METHOD1(void, Set, absl::optional) -END_PROXY_MAP() - -} // namespace webrtc - -#endif // PC_JITTER_BUFFER_DELAY_PROXY_H_ diff --git a/pc/jitter_buffer_delay_unittest.cc b/pc/jitter_buffer_delay_unittest.cc index 7edd09acd2..b00075ceb5 100644 --- a/pc/jitter_buffer_delay_unittest.cc +++ b/pc/jitter_buffer_delay_unittest.cc @@ -13,79 +13,47 @@ #include #include "absl/types/optional.h" -#include "api/scoped_refptr.h" -#include "pc/test/mock_delayable.h" -#include "rtc_base/ref_counted_object.h" -#include "rtc_base/thread.h" -#include "test/gmock.h" #include "test/gtest.h" -using ::testing::Return; - -namespace { -constexpr int kSsrc = 1234; -} // namespace - namespace webrtc { class JitterBufferDelayTest : public ::testing::Test { public: - JitterBufferDelayTest() - : delay_(new rtc::RefCountedObject( - rtc::Thread::Current())) {} + JitterBufferDelayTest() {} protected: - rtc::scoped_refptr delay_; - MockDelayable delayable_; + JitterBufferDelay delay_; }; TEST_F(JitterBufferDelayTest, Set) { - delay_->OnStart(&delayable_, kSsrc); - - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 3000)) - .WillOnce(Return(true)); - // Delay in seconds. - delay_->Set(3.0); + delay_.Set(3.0); + EXPECT_EQ(delay_.GetMs(), 3000); } -TEST_F(JitterBufferDelayTest, Caching) { - // Check that value is cached before start. - delay_->Set(4.0); - - // Check that cached value applied on the start. - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 4000)) - .WillOnce(Return(true)); - delay_->OnStart(&delayable_, kSsrc); +TEST_F(JitterBufferDelayTest, DefaultValue) { + EXPECT_EQ(delay_.GetMs(), 0); // Default value is 0ms. } TEST_F(JitterBufferDelayTest, Clamping) { - delay_->OnStart(&delayable_, kSsrc); - // In current Jitter Buffer implementation (Audio or Video) maximum supported // value is 10000 milliseconds. - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 10000)) - .WillOnce(Return(true)); - delay_->Set(10.5); + delay_.Set(10.5); + EXPECT_EQ(delay_.GetMs(), 10000); // Test int overflow. - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 10000)) - .WillOnce(Return(true)); - delay_->Set(21474836470.0); + delay_.Set(21474836470.0); + EXPECT_EQ(delay_.GetMs(), 10000); - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 0)) - .WillOnce(Return(true)); - delay_->Set(-21474836470.0); + delay_.Set(-21474836470.0); + EXPECT_EQ(delay_.GetMs(), 0); // Boundary value in seconds to milliseconds conversion. - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 0)) - .WillOnce(Return(true)); - delay_->Set(0.0009); - - EXPECT_CALL(delayable_, SetBaseMinimumPlayoutDelayMs(kSsrc, 0)) - .WillOnce(Return(true)); + delay_.Set(0.0009); + EXPECT_EQ(delay_.GetMs(), 0); - delay_->Set(-2.0); + delay_.Set(-2.0); + EXPECT_EQ(delay_.GetMs(), 0); } } // namespace webrtc diff --git a/pc/jsep_ice_candidate.cc b/pc/jsep_ice_candidate.cc index 4e4542182a..6dacde629c 100644 --- a/pc/jsep_ice_candidate.cc +++ b/pc/jsep_ice_candidate.cc @@ -14,6 +14,11 @@ #include "pc/webrtc_sdp.h" +// This file contains JsepIceCandidate-related functions that are not +// included in api/jsep_ice_candidate.cc. Some of these link to SDP +// parsing/serializing functions, which some users may not want. +// TODO(bugs.webrtc.org/12330): Merge the two .cc files somehow. + namespace webrtc { IceCandidateInterface* CreateIceCandidate(const std::string& sdp_mid, @@ -49,6 +54,16 @@ JsepIceCandidate::JsepIceCandidate(const std::string& sdp_mid, JsepIceCandidate::~JsepIceCandidate() {} +JsepCandidateCollection JsepCandidateCollection::Clone() const { + JsepCandidateCollection new_collection; + for (const auto& candidate : candidates_) { + new_collection.candidates_.push_back(std::make_unique( + candidate->sdp_mid(), candidate->sdp_mline_index(), + candidate->candidate())); + } + return new_collection; +} + bool JsepIceCandidate::Initialize(const std::string& sdp, SdpParseError* err) { return SdpDeserializeCandidate(sdp, this, err); } diff --git a/pc/jsep_session_description.cc b/pc/jsep_session_description.cc index 7f30b50d97..9de81947de 100644 --- a/pc/jsep_session_description.cc +++ b/pc/jsep_session_description.cc @@ -215,6 +215,18 @@ bool JsepSessionDescription::Initialize( return true; } +std::unique_ptr JsepSessionDescription::Clone() + const { + auto new_description = std::make_unique(type_); + new_description->session_id_ = session_id_; + new_description->session_version_ = session_version_; + new_description->description_ = description_->Clone(); + for (const auto& collection : candidate_collection_) { + new_description->candidate_collection_.push_back(collection.Clone()); + } + return new_description; +} + bool JsepSessionDescription::AddCandidate( const IceCandidateInterface* candidate) { if (!candidate) diff --git a/pc/jsep_session_description_unittest.cc b/pc/jsep_session_description_unittest.cc index 8caac94613..d922a586c5 100644 --- a/pc/jsep_session_description_unittest.cc +++ b/pc/jsep_session_description_unittest.cc @@ -117,6 +117,45 @@ class JsepSessionDescriptionTest : public ::testing::Test { std::unique_ptr jsep_desc_; }; +TEST_F(JsepSessionDescriptionTest, CloneDefault) { + auto new_desc = jsep_desc_->Clone(); + EXPECT_EQ(jsep_desc_->type(), new_desc->type()); + std::string old_desc_string; + std::string new_desc_string; + EXPECT_TRUE(jsep_desc_->ToString(&old_desc_string)); + EXPECT_TRUE(new_desc->ToString(&new_desc_string)); + EXPECT_EQ(old_desc_string, new_desc_string); + EXPECT_EQ(jsep_desc_->session_id(), new_desc->session_id()); + EXPECT_EQ(jsep_desc_->session_version(), new_desc->session_version()); +} + +TEST_F(JsepSessionDescriptionTest, CloneWithCandidates) { + cricket::Candidate candidate_v4( + cricket::ICE_CANDIDATE_COMPONENT_RTP, "udp", + rtc::SocketAddress("192.168.1.5", 1234), kCandidatePriority, "", "", + cricket::STUN_PORT_TYPE, kCandidateGeneration, kCandidateFoundation); + cricket::Candidate candidate_v6( + cricket::ICE_CANDIDATE_COMPONENT_RTP, "udp", + rtc::SocketAddress("::1", 1234), kCandidatePriority, "", "", + cricket::LOCAL_PORT_TYPE, kCandidateGeneration, kCandidateFoundation); + + JsepIceCandidate jice_v4("audio", 0, candidate_v4); + JsepIceCandidate jice_v6("audio", 0, candidate_v6); + JsepIceCandidate jice_v4_video("video", 0, candidate_v4); + JsepIceCandidate jice_v6_video("video", 0, candidate_v6); + ASSERT_TRUE(jsep_desc_->AddCandidate(&jice_v4)); + ASSERT_TRUE(jsep_desc_->AddCandidate(&jice_v6)); + ASSERT_TRUE(jsep_desc_->AddCandidate(&jice_v4_video)); + ASSERT_TRUE(jsep_desc_->AddCandidate(&jice_v6_video)); + auto new_desc = jsep_desc_->Clone(); + EXPECT_EQ(jsep_desc_->type(), new_desc->type()); + std::string old_desc_string; + std::string new_desc_string; + EXPECT_TRUE(jsep_desc_->ToString(&old_desc_string)); + EXPECT_TRUE(new_desc->ToString(&new_desc_string)); + EXPECT_EQ(old_desc_string, new_desc_string); +} + // Test that number_of_mediasections() returns the number of media contents in // a session description. TEST_F(JsepSessionDescriptionTest, CheckSessionDescription) { diff --git a/pc/jsep_transport.cc b/pc/jsep_transport.cc index 5788825230..e72088885f 100644 --- a/pc/jsep_transport.cc +++ b/pc/jsep_transport.cc @@ -14,7 +14,6 @@ #include #include -#include #include // for std::pair #include "api/array_view.h" @@ -25,7 +24,9 @@ #include "rtc_base/checks.h" #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/trace_event.h" using webrtc::SdpType; @@ -38,16 +39,12 @@ JsepTransportDescription::JsepTransportDescription( const std::vector& cryptos, const std::vector& encrypted_header_extension_ids, int rtp_abs_sendtime_extn_id, - const TransportDescription& transport_desc, - absl::optional media_alt_protocol, - absl::optional data_alt_protocol) + const TransportDescription& transport_desc) : rtcp_mux_enabled(rtcp_mux_enabled), cryptos(cryptos), encrypted_header_extension_ids(encrypted_header_extension_ids), rtp_abs_sendtime_extn_id(rtp_abs_sendtime_extn_id), - transport_desc(transport_desc), - media_alt_protocol(media_alt_protocol), - data_alt_protocol(data_alt_protocol) {} + transport_desc(transport_desc) {} JsepTransportDescription::JsepTransportDescription( const JsepTransportDescription& from) @@ -55,9 +52,7 @@ JsepTransportDescription::JsepTransportDescription( cryptos(from.cryptos), encrypted_header_extension_ids(from.encrypted_header_extension_ids), rtp_abs_sendtime_extn_id(from.rtp_abs_sendtime_extn_id), - transport_desc(from.transport_desc), - media_alt_protocol(from.media_alt_protocol), - data_alt_protocol(from.data_alt_protocol) {} + transport_desc(from.transport_desc) {} JsepTransportDescription::~JsepTransportDescription() = default; @@ -71,8 +66,6 @@ JsepTransportDescription& JsepTransportDescription::operator=( encrypted_header_extension_ids = from.encrypted_header_extension_ids; rtp_abs_sendtime_extn_id = from.rtp_abs_sendtime_extn_id; transport_desc = from.transport_desc; - media_alt_protocol = from.media_alt_protocol; - data_alt_protocol = from.data_alt_protocol; return *this; } @@ -85,12 +78,9 @@ JsepTransport::JsepTransport( std::unique_ptr unencrypted_rtp_transport, std::unique_ptr sdes_transport, std::unique_ptr dtls_srtp_transport, - std::unique_ptr datagram_rtp_transport, std::unique_ptr rtp_dtls_transport, std::unique_ptr rtcp_dtls_transport, - std::unique_ptr sctp_transport, - std::unique_ptr datagram_transport, - webrtc::DataChannelTransportInterface* data_channel_transport) + std::unique_ptr sctp_transport) : network_thread_(rtc::Thread::Current()), mid_(mid), local_certificate_(local_certificate), @@ -99,26 +89,23 @@ JsepTransport::JsepTransport( unencrypted_rtp_transport_(std::move(unencrypted_rtp_transport)), sdes_transport_(std::move(sdes_transport)), dtls_srtp_transport_(std::move(dtls_srtp_transport)), - rtp_dtls_transport_( - rtp_dtls_transport ? new rtc::RefCountedObject( - std::move(rtp_dtls_transport)) - : nullptr), - rtcp_dtls_transport_( - rtcp_dtls_transport - ? new rtc::RefCountedObject( - std::move(rtcp_dtls_transport)) - : nullptr), + rtp_dtls_transport_(rtp_dtls_transport + ? rtc::make_ref_counted( + std::move(rtp_dtls_transport)) + : nullptr), + rtcp_dtls_transport_(rtcp_dtls_transport + ? rtc::make_ref_counted( + std::move(rtcp_dtls_transport)) + : nullptr), sctp_data_channel_transport_( sctp_transport ? std::make_unique( sctp_transport.get()) : nullptr), sctp_transport_(sctp_transport - ? new rtc::RefCountedObject( + ? rtc::make_ref_counted( std::move(sctp_transport)) - : nullptr), - datagram_transport_(std::move(datagram_transport)), - datagram_rtp_transport_(std::move(datagram_rtp_transport)), - data_channel_transport_(data_channel_transport) { + : nullptr) { + TRACE_EVENT0("webrtc", "JsepTransport::JsepTransport"); RTC_DCHECK(ice_transport_); RTC_DCHECK(rtp_dtls_transport_); // |rtcp_ice_transport_| must be present iff |rtcp_dtls_transport_| is @@ -141,22 +128,10 @@ JsepTransport::JsepTransport( if (sctp_transport_) { sctp_transport_->SetDtlsTransport(rtp_dtls_transport_); } - - if (datagram_rtp_transport_ && default_rtp_transport()) { - composite_rtp_transport_ = std::make_unique( - std::vector{ - datagram_rtp_transport_.get(), default_rtp_transport()}); - } - - if (data_channel_transport_ && sctp_data_channel_transport_) { - composite_data_channel_transport_ = - std::make_unique( - std::vector{ - data_channel_transport_, sctp_data_channel_transport_.get()}); - } } JsepTransport::~JsepTransport() { + TRACE_EVENT0("webrtc", "JsepTransport::~JsepTransport"); if (sctp_transport_) { sctp_transport_->Clear(); } @@ -175,7 +150,7 @@ webrtc::RTCError JsepTransport::SetLocalJsepTransportDescription( const JsepTransportDescription& jsep_description, SdpType type) { webrtc::RTCError error; - + TRACE_EVENT0("webrtc", "JsepTransport::SetLocalJsepTransportDescription"); RTC_DCHECK_RUN_ON(network_thread_); IceParameters ice_parameters = @@ -195,23 +170,20 @@ webrtc::RTCError JsepTransport::SetLocalJsepTransportDescription( } // If doing SDES, setup the SDES crypto parameters. - { - rtc::CritScope scope(&accessor_lock_); - if (sdes_transport_) { - RTC_DCHECK(!unencrypted_rtp_transport_); - RTC_DCHECK(!dtls_srtp_transport_); - if (!SetSdes(jsep_description.cryptos, - jsep_description.encrypted_header_extension_ids, type, - ContentSource::CS_LOCAL)) { - return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, - "Failed to setup SDES crypto parameters."); - } - } else if (dtls_srtp_transport_) { - RTC_DCHECK(!unencrypted_rtp_transport_); - RTC_DCHECK(!sdes_transport_); - dtls_srtp_transport_->UpdateRecvEncryptedHeaderExtensionIds( - jsep_description.encrypted_header_extension_ids); + if (sdes_transport_) { + RTC_DCHECK(!unencrypted_rtp_transport_); + RTC_DCHECK(!dtls_srtp_transport_); + if (!SetSdes(jsep_description.cryptos, + jsep_description.encrypted_header_extension_ids, type, + ContentSource::CS_LOCAL)) { + return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, + "Failed to setup SDES crypto parameters."); } + } else if (dtls_srtp_transport_) { + RTC_DCHECK(!unencrypted_rtp_transport_); + RTC_DCHECK(!sdes_transport_); + dtls_srtp_transport_->UpdateRecvEncryptedHeaderExtensionIds( + jsep_description.encrypted_header_extension_ids); } bool ice_restarting = local_description_ != nullptr && @@ -232,34 +204,30 @@ webrtc::RTCError JsepTransport::SetLocalJsepTransportDescription( return error; } } - { - rtc::CritScope scope(&accessor_lock_); RTC_DCHECK(rtp_dtls_transport_->internal()); rtp_dtls_transport_->internal()->ice_transport()->SetIceParameters( ice_parameters); - if (rtcp_dtls_transport_) { - RTC_DCHECK(rtcp_dtls_transport_->internal()); - rtcp_dtls_transport_->internal()->ice_transport()->SetIceParameters( - ice_parameters); + { + if (rtcp_dtls_transport_) { + RTC_DCHECK(rtcp_dtls_transport_->internal()); + rtcp_dtls_transport_->internal()->ice_transport()->SetIceParameters( + ice_parameters); + } } - } // If PRANSWER/ANSWER is set, we should decide transport protocol type. if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { error = NegotiateAndSetDtlsParameters(type); - NegotiateDatagramTransport(type); } if (!error.ok()) { local_description_.reset(); return error; } - { - rtc::CritScope scope(&accessor_lock_); - if (needs_ice_restart_ && ice_restarting) { - needs_ice_restart_ = false; - RTC_LOG(LS_VERBOSE) << "needs-ice-restart flag cleared for transport " - << mid(); - } + + if (needs_ice_restart_ && ice_restarting) { + needs_ice_restart_ = false; + RTC_LOG(LS_VERBOSE) << "needs-ice-restart flag cleared for transport " + << mid(); } return webrtc::RTCError::OK(); @@ -268,6 +236,7 @@ webrtc::RTCError JsepTransport::SetLocalJsepTransportDescription( webrtc::RTCError JsepTransport::SetRemoteJsepTransportDescription( const JsepTransportDescription& jsep_description, webrtc::SdpType type) { + TRACE_EVENT0("webrtc", "JsepTransport::SetLocalJsepTransportDescription"); webrtc::RTCError error; RTC_DCHECK_RUN_ON(network_thread_); @@ -290,27 +259,24 @@ webrtc::RTCError JsepTransport::SetRemoteJsepTransportDescription( } // If doing SDES, setup the SDES crypto parameters. - { - rtc::CritScope lock(&accessor_lock_); - if (sdes_transport_) { - RTC_DCHECK(!unencrypted_rtp_transport_); - RTC_DCHECK(!dtls_srtp_transport_); - if (!SetSdes(jsep_description.cryptos, - jsep_description.encrypted_header_extension_ids, type, - ContentSource::CS_REMOTE)) { - return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, - "Failed to setup SDES crypto parameters."); - } - sdes_transport_->CacheRtpAbsSendTimeHeaderExtension( - jsep_description.rtp_abs_sendtime_extn_id); - } else if (dtls_srtp_transport_) { - RTC_DCHECK(!unencrypted_rtp_transport_); - RTC_DCHECK(!sdes_transport_); - dtls_srtp_transport_->UpdateSendEncryptedHeaderExtensionIds( - jsep_description.encrypted_header_extension_ids); - dtls_srtp_transport_->CacheRtpAbsSendTimeHeaderExtension( - jsep_description.rtp_abs_sendtime_extn_id); + if (sdes_transport_) { + RTC_DCHECK(!unencrypted_rtp_transport_); + RTC_DCHECK(!dtls_srtp_transport_); + if (!SetSdes(jsep_description.cryptos, + jsep_description.encrypted_header_extension_ids, type, + ContentSource::CS_REMOTE)) { + return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, + "Failed to setup SDES crypto parameters."); } + sdes_transport_->CacheRtpAbsSendTimeHeaderExtension( + jsep_description.rtp_abs_sendtime_extn_id); + } else if (dtls_srtp_transport_) { + RTC_DCHECK(!unencrypted_rtp_transport_); + RTC_DCHECK(!sdes_transport_); + dtls_srtp_transport_->UpdateSendEncryptedHeaderExtensionIds( + jsep_description.encrypted_header_extension_ids); + dtls_srtp_transport_->CacheRtpAbsSendTimeHeaderExtension( + jsep_description.rtp_abs_sendtime_extn_id); } remote_description_.reset(new JsepTransportDescription(jsep_description)); @@ -325,7 +291,6 @@ webrtc::RTCError JsepTransport::SetRemoteJsepTransportDescription( // If PRANSWER/ANSWER is set, we should decide transport protocol type. if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { error = NegotiateAndSetDtlsParameters(SdpType::kOffer); - NegotiateDatagramTransport(type); } if (!error.ok()) { remote_description_.reset(); @@ -363,7 +328,7 @@ webrtc::RTCError JsepTransport::AddRemoteCandidates( } void JsepTransport::SetNeedsIceRestartFlag() { - rtc::CritScope scope(&accessor_lock_); + RTC_DCHECK_RUN_ON(network_thread_); if (!needs_ice_restart_) { needs_ice_restart_ = true; RTC_LOG(LS_VERBOSE) << "needs-ice-restart flag set for transport " << mid(); @@ -372,7 +337,6 @@ void JsepTransport::SetNeedsIceRestartFlag() { absl::optional JsepTransport::GetDtlsRole() const { RTC_DCHECK_RUN_ON(network_thread_); - rtc::CritScope scope(&accessor_lock_); RTC_DCHECK(rtp_dtls_transport_); RTC_DCHECK(rtp_dtls_transport_->internal()); rtc::SSLRole dtls_role; @@ -383,28 +347,19 @@ absl::optional JsepTransport::GetDtlsRole() const { return absl::optional(dtls_role); } -absl::optional -JsepTransport::GetTransportParameters() const { - rtc::CritScope scope(&accessor_lock_); - if (!datagram_transport_) { - return absl::nullopt; - } - - OpaqueTransportParameters params; - params.parameters = datagram_transport_->GetTransportParameters(); - return params; -} - bool JsepTransport::GetStats(TransportStats* stats) { + TRACE_EVENT0("webrtc", "JsepTransport::GetStats"); RTC_DCHECK_RUN_ON(network_thread_); - rtc::CritScope scope(&accessor_lock_); stats->transport_name = mid(); stats->channel_stats.clear(); RTC_DCHECK(rtp_dtls_transport_->internal()); - bool ret = GetTransportStats(rtp_dtls_transport_->internal(), stats); + bool ret = GetTransportStats(rtp_dtls_transport_->internal(), + ICE_CANDIDATE_COMPONENT_RTP, stats); + if (rtcp_dtls_transport_) { RTC_DCHECK(rtcp_dtls_transport_->internal()); - ret &= GetTransportStats(rtcp_dtls_transport_->internal(), stats); + ret &= GetTransportStats(rtcp_dtls_transport_->internal(), + ICE_CANDIDATE_COMPONENT_RTCP, stats); } return ret; } @@ -412,6 +367,7 @@ bool JsepTransport::GetStats(TransportStats* stats) { webrtc::RTCError JsepTransport::VerifyCertificateFingerprint( const rtc::RTCCertificate* certificate, const rtc::SSLFingerprint* fingerprint) const { + TRACE_EVENT0("webrtc", "JsepTransport::VerifyCertificateFingerprint"); RTC_DCHECK_RUN_ON(network_thread_); if (!fingerprint) { return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, @@ -439,7 +395,6 @@ webrtc::RTCError JsepTransport::VerifyCertificateFingerprint( void JsepTransport::SetActiveResetSrtpParams(bool active_reset_srtp_params) { RTC_DCHECK_RUN_ON(network_thread_); - rtc::CritScope scope(&accessor_lock_); if (dtls_srtp_transport_) { RTC_LOG(INFO) << "Setting active_reset_srtp_params of DtlsSrtpTransport to: " @@ -451,6 +406,7 @@ void JsepTransport::SetActiveResetSrtpParams(bool active_reset_srtp_params) { void JsepTransport::SetRemoteIceParameters( const IceParameters& ice_parameters, IceTransportInternal* ice_transport) { + TRACE_EVENT0("webrtc", "JsepTransport::SetRemoteIceParameters"); RTC_DCHECK_RUN_ON(network_thread_); RTC_DCHECK(ice_transport); RTC_DCHECK(remote_description_); @@ -514,31 +470,22 @@ bool JsepTransport::SetRtcpMux(bool enable, } void JsepTransport::ActivateRtcpMux() { - { - // Don't hold the network_thread_ lock while calling other functions, - // since they might call other functions that call RTC_DCHECK_RUN_ON. - // TODO(https://crbug.com/webrtc/10318): Simplify when possible. - RTC_DCHECK_RUN_ON(network_thread_); - } - { - rtc::CritScope scope(&accessor_lock_); - if (unencrypted_rtp_transport_) { - RTC_DCHECK(!sdes_transport_); - RTC_DCHECK(!dtls_srtp_transport_); - unencrypted_rtp_transport_->SetRtcpPacketTransport(nullptr); - } else if (sdes_transport_) { - RTC_DCHECK(!unencrypted_rtp_transport_); - RTC_DCHECK(!dtls_srtp_transport_); - sdes_transport_->SetRtcpPacketTransport(nullptr); - } else if (dtls_srtp_transport_) { - RTC_DCHECK(dtls_srtp_transport_); - RTC_DCHECK(!unencrypted_rtp_transport_); - RTC_DCHECK(!sdes_transport_); - dtls_srtp_transport_->SetDtlsTransports(rtp_dtls_transport_locked(), - /*rtcp_dtls_transport=*/nullptr); - } - rtcp_dtls_transport_ = nullptr; // Destroy this reference. + if (unencrypted_rtp_transport_) { + RTC_DCHECK(!sdes_transport_); + RTC_DCHECK(!dtls_srtp_transport_); + unencrypted_rtp_transport_->SetRtcpPacketTransport(nullptr); + } else if (sdes_transport_) { + RTC_DCHECK(!unencrypted_rtp_transport_); + RTC_DCHECK(!dtls_srtp_transport_); + sdes_transport_->SetRtcpPacketTransport(nullptr); + } else if (dtls_srtp_transport_) { + RTC_DCHECK(dtls_srtp_transport_); + RTC_DCHECK(!unencrypted_rtp_transport_); + RTC_DCHECK(!sdes_transport_); + dtls_srtp_transport_->SetDtlsTransports(rtp_dtls_transport(), + /*rtcp_dtls_transport=*/nullptr); } + rtcp_dtls_transport_ = nullptr; // Destroy this reference. // Notify the JsepTransportController to update the aggregate states. SignalRtcpMuxActive(); } @@ -730,17 +677,12 @@ webrtc::RTCError JsepTransport::NegotiateDtlsRole( } bool JsepTransport::GetTransportStats(DtlsTransportInternal* dtls_transport, + int component, TransportStats* stats) { RTC_DCHECK_RUN_ON(network_thread_); RTC_DCHECK(dtls_transport); TransportChannelStats substats; - if (rtcp_dtls_transport_) { - substats.component = dtls_transport == rtcp_dtls_transport_->internal() - ? ICE_CANDIDATE_COMPONENT_RTCP - : ICE_CANDIDATE_COMPONENT_RTP; - } else { - substats.component = ICE_CANDIDATE_COMPONENT_RTP; - } + substats.component = component; dtls_transport->GetSslVersionBytes(&substats.ssl_version_bytes); dtls_transport->GetSrtpCryptoSuite(&substats.srtp_crypto_suite); dtls_transport->GetSslCipherSuite(&substats.ssl_cipher_suite); @@ -753,106 +695,4 @@ bool JsepTransport::GetTransportStats(DtlsTransportInternal* dtls_transport, return true; } -void JsepTransport::NegotiateDatagramTransport(SdpType type) { - RTC_DCHECK(type == SdpType::kAnswer || type == SdpType::kPrAnswer); - rtc::CritScope lock(&accessor_lock_); - if (!datagram_transport_) { - return; // No need to negotiate the use of datagram transport. - } - - bool compatible_datagram_transport = false; - if (datagram_transport_ && - local_description_->transport_desc.opaque_parameters && - remote_description_->transport_desc.opaque_parameters) { - // If both descriptions have datagram transport parameters, and the remote - // parameters are accepted by the datagram transport, then use the datagram - // transport. Otherwise, fall back to RTP. - compatible_datagram_transport = - datagram_transport_ - ->SetRemoteTransportParameters(remote_description_->transport_desc - .opaque_parameters->parameters) - .ok(); - } - - bool use_datagram_transport_for_media = - compatible_datagram_transport && - remote_description_->media_alt_protocol == - remote_description_->transport_desc.opaque_parameters->protocol && - remote_description_->media_alt_protocol == - local_description_->media_alt_protocol; - - bool use_datagram_transport_for_data = - compatible_datagram_transport && - remote_description_->data_alt_protocol == - remote_description_->transport_desc.opaque_parameters->protocol && - remote_description_->data_alt_protocol == - local_description_->data_alt_protocol; - - RTC_LOG(LS_INFO) - << "Negotiating datagram transport, use_datagram_transport_for_media=" - << use_datagram_transport_for_media - << ", use_datagram_transport_for_data=" << use_datagram_transport_for_data - << " answer type=" << (type == SdpType::kAnswer ? "answer" : "pr_answer"); - - // A provisional or full or answer lets the peer start sending on one of the - // transports. - if (composite_rtp_transport_) { - composite_rtp_transport_->SetSendTransport( - use_datagram_transport_for_media ? datagram_rtp_transport_.get() - : default_rtp_transport()); - } - if (composite_data_channel_transport_) { - composite_data_channel_transport_->SetSendTransport( - use_datagram_transport_for_data ? data_channel_transport_ - : sctp_data_channel_transport_.get()); - } - - if (type != SdpType::kAnswer) { - return; - } - - if (composite_rtp_transport_) { - if (use_datagram_transport_for_media) { - // Negotiated use of datagram transport for RTP, so remove the - // non-datagram RTP transport. - composite_rtp_transport_->RemoveTransport(default_rtp_transport()); - if (unencrypted_rtp_transport_) { - unencrypted_rtp_transport_ = nullptr; - } else if (sdes_transport_) { - sdes_transport_ = nullptr; - } else { - dtls_srtp_transport_ = nullptr; - } - } else { - composite_rtp_transport_->RemoveTransport(datagram_rtp_transport_.get()); - datagram_rtp_transport_ = nullptr; - } - } - - if (composite_data_channel_transport_) { - if (use_datagram_transport_for_data) { - // Negotiated use of datagram transport for data channels, so remove the - // non-datagram data channel transport. - composite_data_channel_transport_->RemoveTransport( - sctp_data_channel_transport_.get()); - sctp_data_channel_transport_ = nullptr; - sctp_transport_ = nullptr; - } else { - composite_data_channel_transport_->RemoveTransport( - data_channel_transport_); - data_channel_transport_ = nullptr; - } - } else if (data_channel_transport_ && !use_datagram_transport_for_data) { - // The datagram transport has been rejected without a fallback. We still - // need to inform the application and delete it. - SignalDataChannelTransportNegotiated(this, nullptr); - data_channel_transport_ = nullptr; - } - - if (!use_datagram_transport_for_media && !use_datagram_transport_for_data) { - // Datagram transport is not being used for anything, so clean it up. - datagram_transport_ = nullptr; - } -} - } // namespace cricket diff --git a/pc/jsep_transport.h b/pc/jsep_transport.h index 2d20d29479..5e8cae0ecf 100644 --- a/pc/jsep_transport.h +++ b/pc/jsep_transport.h @@ -18,29 +18,38 @@ #include "absl/types/optional.h" #include "api/candidate.h" +#include "api/crypto_params.h" #include "api/ice_transport_interface.h" #include "api/jsep.h" -#include "api/transport/datagram_transport_interface.h" +#include "api/rtc_error.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/transport/data_channel_transport_interface.h" #include "media/sctp/sctp_transport_internal.h" #include "p2p/base/dtls_transport.h" +#include "p2p/base/dtls_transport_internal.h" +#include "p2p/base/ice_transport_internal.h" #include "p2p/base/p2p_constants.h" +#include "p2p/base/transport_description.h" #include "p2p/base/transport_info.h" -#include "pc/composite_data_channel_transport.h" -#include "pc/composite_rtp_transport.h" #include "pc/dtls_srtp_transport.h" #include "pc/dtls_transport.h" #include "pc/rtcp_mux_filter.h" #include "pc/rtp_transport.h" +#include "pc/rtp_transport_internal.h" #include "pc/sctp_transport.h" #include "pc/session_description.h" #include "pc/srtp_filter.h" #include "pc/srtp_transport.h" #include "pc/transport_stats.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/rtc_certificate.h" +#include "rtc_base/ssl_fingerprint.h" #include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/third_party/sigslot/sigslot.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace cricket { @@ -54,9 +63,7 @@ struct JsepTransportDescription { const std::vector& cryptos, const std::vector& encrypted_header_extension_ids, int rtp_abs_sendtime_extn_id, - const TransportDescription& transport_description, - absl::optional media_alt_protocol, - absl::optional data_alt_protocol); + const TransportDescription& transport_description); JsepTransportDescription(const JsepTransportDescription& from); ~JsepTransportDescription(); @@ -69,14 +76,6 @@ struct JsepTransportDescription { // TODO(zhihuang): Add the ICE and DTLS related variables and methods from // TransportDescription and remove this extra layer of abstraction. TransportDescription transport_desc; - - // Alt-protocols that apply to this JsepTransport. Presence indicates a - // request to use an alternative protocol for media and/or data. The - // alt-protocol is handled by a datagram transport. If one or both of these - // values are present, JsepTransport will attempt to negotiate use of the - // datagram transport for media and/or data. - absl::optional media_alt_protocol; - absl::optional data_alt_protocol; }; // Helper class used by JsepTransportController that processes @@ -100,12 +99,9 @@ class JsepTransport : public sigslot::has_slots<> { std::unique_ptr unencrypted_rtp_transport, std::unique_ptr sdes_transport, std::unique_ptr dtls_srtp_transport, - std::unique_ptr datagram_rtp_transport, std::unique_ptr rtp_dtls_transport, std::unique_ptr rtcp_dtls_transport, - std::unique_ptr sctp_transport, - std::unique_ptr datagram_transport, - webrtc::DataChannelTransportInterface* data_channel_transport); + std::unique_ptr sctp_transport); ~JsepTransport() override; @@ -128,41 +124,36 @@ class JsepTransport : public sigslot::has_slots<> { webrtc::RTCError SetLocalJsepTransportDescription( const JsepTransportDescription& jsep_description, - webrtc::SdpType type) RTC_LOCKS_EXCLUDED(accessor_lock_); + webrtc::SdpType type); // Set the remote TransportDescription to be used by DTLS and ICE channels // that are part of this Transport. webrtc::RTCError SetRemoteJsepTransportDescription( const JsepTransportDescription& jsep_description, - webrtc::SdpType type) RTC_LOCKS_EXCLUDED(accessor_lock_); - webrtc::RTCError AddRemoteCandidates(const Candidates& candidates) - RTC_LOCKS_EXCLUDED(accessor_lock_); + webrtc::SdpType type); + webrtc::RTCError AddRemoteCandidates(const Candidates& candidates); // Set the "needs-ice-restart" flag as described in JSEP. After the flag is // set, offers should generate new ufrags/passwords until an ICE restart // occurs. // - // This and the below method can be called safely from any thread as long as - // SetXTransportDescription is not in progress. - void SetNeedsIceRestartFlag() RTC_LOCKS_EXCLUDED(accessor_lock_); + // This and |needs_ice_restart()| must be called on the network thread. + void SetNeedsIceRestartFlag(); + // Returns true if the ICE restart flag above was set, and no ICE restart has // occurred yet for this transport (by applying a local description with // changed ufrag/password). - bool needs_ice_restart() const RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); + bool needs_ice_restart() const { + RTC_DCHECK_RUN_ON(network_thread_); return needs_ice_restart_; } // Returns role if negotiated, or empty absl::optional if it hasn't been // negotiated yet. - absl::optional GetDtlsRole() const - RTC_LOCKS_EXCLUDED(accessor_lock_); - - absl::optional GetTransportParameters() const - RTC_LOCKS_EXCLUDED(accessor_lock_); + absl::optional GetDtlsRole() const; // TODO(deadbeef): Make this const. See comment in transportcontroller.h. - bool GetStats(TransportStats* stats) RTC_LOCKS_EXCLUDED(accessor_lock_); + bool GetStats(TransportStats* stats); const JsepTransportDescription* local_description() const { RTC_DCHECK_RUN_ON(network_thread_); @@ -174,82 +165,65 @@ class JsepTransport : public sigslot::has_slots<> { return remote_description_.get(); } - webrtc::RtpTransportInternal* rtp_transport() const - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); - if (composite_rtp_transport_) { - return composite_rtp_transport_.get(); - } else if (datagram_rtp_transport_) { - return datagram_rtp_transport_.get(); - } else { - return default_rtp_transport(); + // Returns the rtp transport, if any. + webrtc::RtpTransportInternal* rtp_transport() const { + if (dtls_srtp_transport_) { + return dtls_srtp_transport_.get(); + } + if (sdes_transport_) { + return sdes_transport_.get(); + } + if (unencrypted_rtp_transport_) { + return unencrypted_rtp_transport_.get(); } + return nullptr; } - const DtlsTransportInternal* rtp_dtls_transport() const - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); + const DtlsTransportInternal* rtp_dtls_transport() const { if (rtp_dtls_transport_) { return rtp_dtls_transport_->internal(); - } else { - return nullptr; } + return nullptr; } - DtlsTransportInternal* rtp_dtls_transport() - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); - return rtp_dtls_transport_locked(); + DtlsTransportInternal* rtp_dtls_transport() { + if (rtp_dtls_transport_) { + return rtp_dtls_transport_->internal(); + } + return nullptr; } - const DtlsTransportInternal* rtcp_dtls_transport() const - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); + const DtlsTransportInternal* rtcp_dtls_transport() const { + RTC_DCHECK_RUN_ON(network_thread_); if (rtcp_dtls_transport_) { return rtcp_dtls_transport_->internal(); - } else { - return nullptr; } + return nullptr; } - DtlsTransportInternal* rtcp_dtls_transport() - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); + DtlsTransportInternal* rtcp_dtls_transport() { + RTC_DCHECK_RUN_ON(network_thread_); if (rtcp_dtls_transport_) { return rtcp_dtls_transport_->internal(); - } else { - return nullptr; } + return nullptr; } - rtc::scoped_refptr RtpDtlsTransport() - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); + rtc::scoped_refptr RtpDtlsTransport() { return rtp_dtls_transport_; } - rtc::scoped_refptr SctpTransport() const - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); + rtc::scoped_refptr SctpTransport() const { return sctp_transport_; } - webrtc::DataChannelTransportInterface* data_channel_transport() const - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); - if (composite_data_channel_transport_) { - return composite_data_channel_transport_.get(); - } else if (sctp_data_channel_transport_) { + // TODO(bugs.webrtc.org/9719): Delete method, update callers to use + // SctpTransport() instead. + webrtc::DataChannelTransportInterface* data_channel_transport() const { + if (sctp_data_channel_transport_) { return sctp_data_channel_transport_.get(); } - return data_channel_transport_; - } - - // Returns datagram transport, if available. - webrtc::DatagramTransportInterface* datagram_transport() const - RTC_LOCKS_EXCLUDED(accessor_lock_) { - rtc::CritScope scope(&accessor_lock_); - return datagram_transport_.get(); + return nullptr; } // This is signaled when RTCP-mux becomes active and @@ -257,15 +231,6 @@ class JsepTransport : public sigslot::has_slots<> { // handle the signal and update the aggregate transport states. sigslot::signal<> SignalRtcpMuxActive; - // Signals that a data channel transport was negotiated and may be used to - // send data. The first parameter is |this|. The second parameter is the - // transport that was negotiated, or null if negotiation rejected the data - // channel transport. The third parameter (bool) indicates whether the - // negotiation was provisional or final. If true, it is provisional, if - // false, it is final. - sigslot::signal2 - SignalDataChannelTransportNegotiated; - // TODO(deadbeef): The methods below are only public for testing. Should make // them utility functions or objects so they can be tested independently from // this class. @@ -279,24 +244,14 @@ class JsepTransport : public sigslot::has_slots<> { void SetActiveResetSrtpParams(bool active_reset_srtp_params); private: - DtlsTransportInternal* rtp_dtls_transport_locked() - RTC_EXCLUSIVE_LOCKS_REQUIRED(accessor_lock_) { - if (rtp_dtls_transport_) { - return rtp_dtls_transport_->internal(); - } else { - return nullptr; - } - } - bool SetRtcpMux(bool enable, webrtc::SdpType type, ContentSource source); - void ActivateRtcpMux(); + void ActivateRtcpMux() RTC_RUN_ON(network_thread_); bool SetSdes(const std::vector& cryptos, const std::vector& encrypted_extension_ids, webrtc::SdpType type, - ContentSource source) - RTC_EXCLUSIVE_LOCKS_REQUIRED(accessor_lock_); + ContentSource source); // Negotiates and sets the DTLS parameters based on the current local and // remote transport description, such as the DTLS role to use, and whether @@ -313,8 +268,7 @@ class JsepTransport : public sigslot::has_slots<> { webrtc::SdpType local_description_type, ConnectionRole local_connection_role, ConnectionRole remote_connection_role, - absl::optional* negotiated_dtls_role) - RTC_LOCKS_EXCLUDED(accessor_lock_); + absl::optional* negotiated_dtls_role); // Pushes down the ICE parameters from the remote description. void SetRemoteIceParameters(const IceParameters& ice_parameters, @@ -327,37 +281,14 @@ class JsepTransport : public sigslot::has_slots<> { rtc::SSLFingerprint* remote_fingerprint); bool GetTransportStats(DtlsTransportInternal* dtls_transport, - TransportStats* stats) - RTC_EXCLUSIVE_LOCKS_REQUIRED(accessor_lock_); - - // Deactivates, signals removal, and deletes |composite_rtp_transport_| if the - // current state of negotiation is sufficient to determine which rtp_transport - // and data channel transport to use. - void NegotiateDatagramTransport(webrtc::SdpType type) - RTC_RUN_ON(network_thread_) RTC_LOCKS_EXCLUDED(accessor_lock_); - - // Returns the default (non-datagram) rtp transport, if any. - webrtc::RtpTransportInternal* default_rtp_transport() const - RTC_EXCLUSIVE_LOCKS_REQUIRED(accessor_lock_) { - if (dtls_srtp_transport_) { - return dtls_srtp_transport_.get(); - } else if (sdes_transport_) { - return sdes_transport_.get(); - } else if (unencrypted_rtp_transport_) { - return unencrypted_rtp_transport_.get(); - } else { - return nullptr; - } - } + int component, + TransportStats* stats); // Owning thread, for safety checks const rtc::Thread* const network_thread_; - // Critical scope for fields accessed off-thread - // TODO(https://bugs.webrtc.org/10300): Stop doing this. - rtc::CriticalSection accessor_lock_; const std::string mid_; // needs-ice-restart bit as described in JSEP. - bool needs_ice_restart_ RTC_GUARDED_BY(accessor_lock_) = false; + bool needs_ice_restart_ RTC_GUARDED_BY(network_thread_) = false; rtc::scoped_refptr local_certificate_ RTC_GUARDED_BY(network_thread_); std::unique_ptr local_description_ @@ -372,31 +303,19 @@ class JsepTransport : public sigslot::has_slots<> { // To avoid downcasting and make it type safe, keep three unique pointers for // different SRTP mode and only one of these is non-nullptr. - std::unique_ptr unencrypted_rtp_transport_ - RTC_GUARDED_BY(accessor_lock_); - std::unique_ptr sdes_transport_ - RTC_GUARDED_BY(accessor_lock_); - std::unique_ptr dtls_srtp_transport_ - RTC_GUARDED_BY(accessor_lock_); - - // If multiple RTP transports are in use, |composite_rtp_transport_| will be - // passed to callers. This is only valid for offer-only, receive-only - // scenarios, as it is not possible for the composite to correctly choose - // which transport to use for sending. - std::unique_ptr composite_rtp_transport_ - RTC_GUARDED_BY(accessor_lock_); - - rtc::scoped_refptr rtp_dtls_transport_ - RTC_GUARDED_BY(accessor_lock_); + const std::unique_ptr unencrypted_rtp_transport_; + const std::unique_ptr sdes_transport_; + const std::unique_ptr dtls_srtp_transport_; + + const rtc::scoped_refptr rtp_dtls_transport_; + // The RTCP transport is const for all usages, except that it is cleared + // when RTCP multiplexing is turned on; this happens on the network thread. rtc::scoped_refptr rtcp_dtls_transport_ - RTC_GUARDED_BY(accessor_lock_); - rtc::scoped_refptr datagram_dtls_transport_ - RTC_GUARDED_BY(accessor_lock_); + RTC_GUARDED_BY(network_thread_); - std::unique_ptr - sctp_data_channel_transport_ RTC_GUARDED_BY(accessor_lock_); - rtc::scoped_refptr sctp_transport_ - RTC_GUARDED_BY(accessor_lock_); + const std::unique_ptr + sctp_data_channel_transport_; + const rtc::scoped_refptr sctp_transport_; SrtpFilter sdes_negotiator_ RTC_GUARDED_BY(network_thread_); RtcpMuxFilter rtcp_mux_negotiator_ RTC_GUARDED_BY(network_thread_); @@ -407,22 +326,6 @@ class JsepTransport : public sigslot::has_slots<> { absl::optional> recv_extension_ids_ RTC_GUARDED_BY(network_thread_); - // Optional datagram transport (experimental). - std::unique_ptr datagram_transport_ - RTC_GUARDED_BY(accessor_lock_); - - std::unique_ptr datagram_rtp_transport_ - RTC_GUARDED_BY(accessor_lock_); - - // Non-SCTP data channel transport. Set to |datagram_transport_| if that - // transport should be used for data chanels. Unset otherwise. - webrtc::DataChannelTransportInterface* data_channel_transport_ - RTC_GUARDED_BY(accessor_lock_) = nullptr; - - // Composite data channel transport, used during negotiation. - std::unique_ptr - composite_data_channel_transport_ RTC_GUARDED_BY(accessor_lock_); - RTC_DISALLOW_COPY_AND_ASSIGN(JsepTransport); }; diff --git a/pc/jsep_transport_collection.cc b/pc/jsep_transport_collection.cc new file mode 100644 index 0000000000..ce068d99fc --- /dev/null +++ b/pc/jsep_transport_collection.cc @@ -0,0 +1,255 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/jsep_transport_collection.h" + +#include +#include +#include +#include + +#include "p2p/base/p2p_constants.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +void BundleManager::Update(const cricket::SessionDescription* description) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + bundle_groups_.clear(); + for (const cricket::ContentGroup* new_bundle_group : + description->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE)) { + bundle_groups_.push_back( + std::make_unique(*new_bundle_group)); + RTC_DLOG(LS_VERBOSE) << "Establishing bundle group " + << new_bundle_group->ToString(); + } + established_bundle_groups_by_mid_.clear(); + for (const auto& bundle_group : bundle_groups_) { + for (const std::string& content_name : bundle_group->content_names()) { + established_bundle_groups_by_mid_[content_name] = bundle_group.get(); + } + } +} + +const cricket::ContentGroup* BundleManager::LookupGroupByMid( + const std::string& mid) const { + auto it = established_bundle_groups_by_mid_.find(mid); + return it != established_bundle_groups_by_mid_.end() ? it->second : nullptr; +} +bool BundleManager::IsFirstMidInGroup(const std::string& mid) const { + auto group = LookupGroupByMid(mid); + if (!group) { + return true; // Unbundled MIDs are considered group leaders + } + return mid == *(group->FirstContentName()); +} + +cricket::ContentGroup* BundleManager::LookupGroupByMid(const std::string& mid) { + auto it = established_bundle_groups_by_mid_.find(mid); + return it != established_bundle_groups_by_mid_.end() ? it->second : nullptr; +} + +void BundleManager::DeleteMid(const cricket::ContentGroup* bundle_group, + const std::string& mid) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_LOG(LS_VERBOSE) << "Deleting mid " << mid << " from bundle group " + << bundle_group->ToString(); + // Remove the rejected content from the |bundle_group|. + // The const pointer arg is used to identify the group, we verify + // it before we use it to make a modification. + auto bundle_group_it = std::find_if( + bundle_groups_.begin(), bundle_groups_.end(), + [bundle_group](std::unique_ptr& group) { + return bundle_group == group.get(); + }); + RTC_DCHECK(bundle_group_it != bundle_groups_.end()); + (*bundle_group_it)->RemoveContentName(mid); + established_bundle_groups_by_mid_.erase( + established_bundle_groups_by_mid_.find(mid)); +} + +void BundleManager::DeleteGroup(const cricket::ContentGroup* bundle_group) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DLOG(LS_VERBOSE) << "Deleting bundle group " << bundle_group->ToString(); + + auto bundle_group_it = std::find_if( + bundle_groups_.begin(), bundle_groups_.end(), + [bundle_group](std::unique_ptr& group) { + return bundle_group == group.get(); + }); + RTC_DCHECK(bundle_group_it != bundle_groups_.end()); + auto mid_list = (*bundle_group_it)->content_names(); + for (const auto& content_name : mid_list) { + DeleteMid(bundle_group, content_name); + } + bundle_groups_.erase(bundle_group_it); +} + +void JsepTransportCollection::RegisterTransport( + const std::string& mid, + std::unique_ptr transport) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + SetTransportForMid(mid, transport.get()); + jsep_transports_by_name_[mid] = std::move(transport); + RTC_DCHECK(IsConsistent()); +} + +std::vector JsepTransportCollection::Transports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + std::vector result; + for (auto& kv : jsep_transports_by_name_) { + result.push_back(kv.second.get()); + } + return result; +} + +void JsepTransportCollection::DestroyAllTransports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (const auto& jsep_transport : jsep_transports_by_name_) { + map_change_callback_(jsep_transport.first, nullptr); + } + jsep_transports_by_name_.clear(); + RTC_DCHECK(IsConsistent()); +} + +const cricket::JsepTransport* JsepTransportCollection::GetTransportByName( + const std::string& transport_name) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = jsep_transports_by_name_.find(transport_name); + return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); +} + +cricket::JsepTransport* JsepTransportCollection::GetTransportByName( + const std::string& transport_name) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = jsep_transports_by_name_.find(transport_name); + return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); +} + +cricket::JsepTransport* JsepTransportCollection::GetTransportForMid( + const std::string& mid) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = mid_to_transport_.find(mid); + return it == mid_to_transport_.end() ? nullptr : it->second; +} + +const cricket::JsepTransport* JsepTransportCollection::GetTransportForMid( + const std::string& mid) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + auto it = mid_to_transport_.find(mid); + return it == mid_to_transport_.end() ? nullptr : it->second; +} + +bool JsepTransportCollection::SetTransportForMid( + const std::string& mid, + cricket::JsepTransport* jsep_transport) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(jsep_transport); + + auto it = mid_to_transport_.find(mid); + if (it != mid_to_transport_.end() && it->second == jsep_transport) + return true; + + pending_mids_.push_back(mid); + + // The map_change_callback must be called before destroying the + // transport, because it removes references to the transport + // in the RTP demuxer. + bool result = map_change_callback_(mid, jsep_transport); + + if (it == mid_to_transport_.end()) { + mid_to_transport_.insert(std::make_pair(mid, jsep_transport)); + } else { + auto old_transport = it->second; + it->second = jsep_transport; + MaybeDestroyJsepTransport(old_transport); + } + RTC_DCHECK(IsConsistent()); + return result; +} + +void JsepTransportCollection::RemoveTransportForMid(const std::string& mid) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(IsConsistent()); + bool ret = map_change_callback_(mid, nullptr); + // Calling OnTransportChanged with nullptr should always succeed, since it is + // only expected to fail when adding media to a transport (not removing). + RTC_DCHECK(ret); + + auto old_transport = GetTransportForMid(mid); + if (old_transport) { + mid_to_transport_.erase(mid); + MaybeDestroyJsepTransport(old_transport); + } + RTC_DCHECK(IsConsistent()); +} + +void JsepTransportCollection::RollbackTransports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (auto&& mid : pending_mids_) { + RemoveTransportForMid(mid); + } + pending_mids_.clear(); +} + +void JsepTransportCollection::CommitTransports() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + pending_mids_.clear(); +} + +bool JsepTransportCollection::TransportInUse( + cricket::JsepTransport* jsep_transport) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (const auto& kv : mid_to_transport_) { + if (kv.second == jsep_transport) { + return true; + } + } + return false; +} + +void JsepTransportCollection::MaybeDestroyJsepTransport( + cricket::JsepTransport* transport) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + // Don't destroy the JsepTransport if there are still media sections referring + // to it. + if (TransportInUse(transport)) { + return; + } + for (const auto& it : jsep_transports_by_name_) { + if (it.second.get() == transport) { + jsep_transports_by_name_.erase(it.first); + state_change_callback_(); + break; + } + } + RTC_DCHECK(IsConsistent()); +} + +bool JsepTransportCollection::IsConsistent() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (const auto& it : jsep_transports_by_name_) { + if (!TransportInUse(it.second.get())) { + RTC_LOG(LS_ERROR) << "Transport registered with mid " << it.first + << " is not in use, transport " << it.second.get(); + return false; + } + const auto& lookup = mid_to_transport_.find(it.first); + if (lookup->second != it.second.get()) { + // Not an error, but unusual. + RTC_DLOG(LS_INFO) << "Note: Mid " << it.first << " was registered to " + << it.second.get() << " but currently maps to " + << lookup->second; + } + } + return true; +} + +} // namespace webrtc diff --git a/pc/jsep_transport_collection.h b/pc/jsep_transport_collection.h new file mode 100644 index 0000000000..0dd528d348 --- /dev/null +++ b/pc/jsep_transport_collection.h @@ -0,0 +1,145 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_JSEP_TRANSPORT_COLLECTION_H_ +#define PC_JSEP_TRANSPORT_COLLECTION_H_ + +#include +#include +#include +#include +#include +#include + +#include "api/sequence_checker.h" +#include "pc/jsep_transport.h" +#include "pc/session_description.h" +#include "rtc_base/checks.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// This class manages information about RFC 8843 BUNDLE bundles +// in SDP descriptions. + +// This is a work-in-progress. Planned steps: +// 1) Move all Bundle-related data structures from JsepTransport +// into this class. +// 2) Move all Bundle-related functions into this class. +// 3) Move remaining Bundle-related logic into this class. +// Make data members private. +// 4) Refine interface to have comprehensible semantics. +// 5) Add unit tests. +// 6) Change the logic to do what's right. +class BundleManager { + public: + BundleManager() { + // Allow constructor to be called on a different thread. + sequence_checker_.Detach(); + } + const std::vector>& bundle_groups() + const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return bundle_groups_; + } + // Lookup a bundle group by a member mid name. + const cricket::ContentGroup* LookupGroupByMid(const std::string& mid) const; + cricket::ContentGroup* LookupGroupByMid(const std::string& mid); + // Returns true if the MID is the first item of a group, or if + // the MID is not a member of a group. + bool IsFirstMidInGroup(const std::string& mid) const; + // Update the groups description. This completely replaces the group + // description with the one from the SessionDescription. + void Update(const cricket::SessionDescription* description); + // Delete a MID from the group that contains it. + void DeleteMid(const cricket::ContentGroup* bundle_group, + const std::string& mid); + // Delete a group. + void DeleteGroup(const cricket::ContentGroup* bundle_group); + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + std::vector> bundle_groups_ + RTC_GUARDED_BY(sequence_checker_); + std::map + established_bundle_groups_by_mid_; +}; + +// This class keeps the mapping of MIDs to transports. +// It is pulled out here because a lot of the code that deals with +// bundles end up modifying this map, and the two need to be consistent; +// the managers may merge. +class JsepTransportCollection { + public: + JsepTransportCollection(std::function + map_change_callback, + std::function state_change_callback) + : map_change_callback_(map_change_callback), + state_change_callback_(state_change_callback) { + // Allow constructor to be called on a different thread. + sequence_checker_.Detach(); + } + + void RegisterTransport(const std::string& mid, + std::unique_ptr transport); + std::vector Transports(); + void DestroyAllTransports(); + // Lookup a JsepTransport by the MID that was used to register it. + cricket::JsepTransport* GetTransportByName(const std::string& mid); + const cricket::JsepTransport* GetTransportByName( + const std::string& mid) const; + // Lookup a JsepTransport by any MID that refers to it. + cricket::JsepTransport* GetTransportForMid(const std::string& mid); + const cricket::JsepTransport* GetTransportForMid( + const std::string& mid) const; + // Set transport for a MID. This may destroy a transport if it is no + // longer in use. + bool SetTransportForMid(const std::string& mid, + cricket::JsepTransport* jsep_transport); + // Remove a transport for a MID. This may destroy a transport if it is + // no longer in use. + void RemoveTransportForMid(const std::string& mid); + // Roll back pending mid-to-transport mappings. + void RollbackTransports(); + // Commit pending mid-transport mappings (rollback is no longer possible). + void CommitTransports(); + // Returns true if any mid currently maps to this transport. + bool TransportInUse(cricket::JsepTransport* jsep_transport) const; + + private: + // Destroy a transport if it's no longer in use. + void MaybeDestroyJsepTransport(cricket::JsepTransport* transport); + + bool IsConsistent(); // For testing only: Verify internal structure. + + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + // This member owns the JSEP transports. + std::map> + jsep_transports_by_name_ RTC_GUARDED_BY(sequence_checker_); + + // This keeps track of the mapping between media section + // (BaseChannel/SctpTransport) and the JsepTransport underneath. + std::map mid_to_transport_ + RTC_GUARDED_BY(sequence_checker_); + // Keep track of mids that have been mapped to transports. Used for rollback. + std::vector pending_mids_ RTC_GUARDED_BY(sequence_checker_); + // Callback used to inform subscribers of altered transports. + const std::function + map_change_callback_; + // Callback used to inform subscribers of possibly altered state. + const std::function state_change_callback_; +}; + +} // namespace webrtc + +#endif // PC_JSEP_TRANSPORT_COLLECTION_H_ diff --git a/pc/jsep_transport_controller.cc b/pc/jsep_transport_controller.cc index a7e1b876fe..f0e377e048 100644 --- a/pc/jsep_transport_controller.cc +++ b/pc/jsep_transport_controller.cc @@ -10,105 +10,76 @@ #include "pc/jsep_transport_controller.h" +#include + +#include +#include #include +#include #include #include "absl/algorithm/container.h" -#include "api/ice_transport_factory.h" -#include "api/transport/datagram_transport_interface.h" -#include "api/transport/media/media_transport_interface.h" +#include "api/dtls_transport_interface.h" +#include "api/rtp_parameters.h" +#include "api/sequence_checker.h" +#include "api/transport/enums.h" +#include "media/sctp/sctp_transport_internal.h" +#include "p2p/base/dtls_transport.h" #include "p2p/base/ice_transport_internal.h" +#include "p2p/base/p2p_constants.h" #include "p2p/base/port.h" -#include "pc/datagram_rtp_transport.h" -#include "pc/srtp_filter.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" #include "rtc_base/thread.h" +#include "rtc_base/trace_event.h" using webrtc::SdpType; -namespace { - -webrtc::RTCError VerifyCandidate(const cricket::Candidate& cand) { - // No address zero. - if (cand.address().IsNil() || cand.address().IsAnyIP()) { - return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER, - "candidate has address of zero"); - } - - // Disallow all ports below 1024, except for 80 and 443 on public addresses. - int port = cand.address().port(); - if (cand.protocol() == cricket::TCP_PROTOCOL_NAME && - (cand.tcptype() == cricket::TCPTYPE_ACTIVE_STR || port == 0)) { - // Expected for active-only candidates per - // http://tools.ietf.org/html/rfc6544#section-4.5 so no error. - // Libjingle clients emit port 0, in "active" mode. - return webrtc::RTCError::OK(); - } - if (port < 1024) { - if ((port != 80) && (port != 443)) { - return webrtc::RTCError( - webrtc::RTCErrorType::INVALID_PARAMETER, - "candidate has port below 1024, but not 80 or 443"); - } - - if (cand.address().IsPrivateIP()) { - return webrtc::RTCError( - webrtc::RTCErrorType::INVALID_PARAMETER, - "candidate has port of 80 or 443 with private IP address"); - } - } - - return webrtc::RTCError::OK(); -} - -webrtc::RTCError VerifyCandidates(const cricket::Candidates& candidates) { - for (const cricket::Candidate& candidate : candidates) { - webrtc::RTCError error = VerifyCandidate(candidate); - if (!error.ok()) { - return error; - } - } - return webrtc::RTCError::OK(); -} - -} // namespace - namespace webrtc { JsepTransportController::JsepTransportController( - rtc::Thread* signaling_thread, rtc::Thread* network_thread, cricket::PortAllocator* port_allocator, - AsyncResolverFactory* async_resolver_factory, + AsyncDnsResolverFactoryInterface* async_dns_resolver_factory, Config config) - : signaling_thread_(signaling_thread), - network_thread_(network_thread), + : network_thread_(network_thread), port_allocator_(port_allocator), - async_resolver_factory_(async_resolver_factory), - config_(config) { + async_dns_resolver_factory_(async_dns_resolver_factory), + transports_( + [this](const std::string& mid, cricket::JsepTransport* transport) { + return OnTransportChanged(mid, transport); + }, + [this]() { + RTC_DCHECK_RUN_ON(network_thread_); + UpdateAggregateStates_n(); + }), + config_(config), + active_reset_srtp_params_(config.active_reset_srtp_params) { // The |transport_observer| is assumed to be non-null. RTC_DCHECK(config_.transport_observer); RTC_DCHECK(config_.rtcp_handler); RTC_DCHECK(config_.ice_transport_factory); + RTC_DCHECK(config_.on_dtls_handshake_error_); } JsepTransportController::~JsepTransportController() { // Channel destructors may try to send packets, so this needs to happen on // the network thread. - network_thread_->Invoke( - RTC_FROM_HERE, - rtc::Bind(&JsepTransportController::DestroyAllJsepTransports_n, this)); + RTC_DCHECK_RUN_ON(network_thread_); + DestroyAllJsepTransports_n(); } RTCError JsepTransportController::SetLocalDescription( SdpType type, const cricket::SessionDescription* description) { + TRACE_EVENT0("webrtc", "JsepTransportController::SetLocalDescription"); if (!network_thread_->IsCurrent()) { return network_thread_->Invoke( RTC_FROM_HERE, [=] { return SetLocalDescription(type, description); }); } + RTC_DCHECK_RUN_ON(network_thread_); if (!initial_offerer_.has_value()) { initial_offerer_.emplace(type == SdpType::kOffer); if (*initial_offerer_) { @@ -123,16 +94,19 @@ RTCError JsepTransportController::SetLocalDescription( RTCError JsepTransportController::SetRemoteDescription( SdpType type, const cricket::SessionDescription* description) { + TRACE_EVENT0("webrtc", "JsepTransportController::SetRemoteDescription"); if (!network_thread_->IsCurrent()) { return network_thread_->Invoke( RTC_FROM_HERE, [=] { return SetRemoteDescription(type, description); }); } + RTC_DCHECK_RUN_ON(network_thread_); return ApplyDescription_n(/*local=*/false, type, description); } RtpTransportInternal* JsepTransportController::GetRtpTransport( const std::string& mid) const { + RTC_DCHECK_RUN_ON(network_thread_); auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; @@ -140,28 +114,9 @@ RtpTransportInternal* JsepTransportController::GetRtpTransport( return jsep_transport->rtp_transport(); } -MediaTransportConfig JsepTransportController::GetMediaTransportConfig( - const std::string& mid) const { - auto jsep_transport = GetJsepTransportForMid(mid); - if (!jsep_transport) { - return MediaTransportConfig(); - } - - DatagramTransportInterface* datagram_transport = nullptr; - if (config_.use_datagram_transport) { - datagram_transport = jsep_transport->datagram_transport(); - } - - if (datagram_transport) { - return MediaTransportConfig( - /*rtp_max_packet_size=*/datagram_transport->GetLargestDatagramSize()); - } else { - return MediaTransportConfig(); - } -} - DataChannelTransportInterface* JsepTransportController::GetDataChannelTransport( const std::string& mid) const { + RTC_DCHECK_RUN_ON(network_thread_); auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; @@ -171,6 +126,7 @@ DataChannelTransportInterface* JsepTransportController::GetDataChannelTransport( cricket::DtlsTransportInternal* JsepTransportController::GetDtlsTransport( const std::string& mid) { + RTC_DCHECK_RUN_ON(network_thread_); auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; @@ -180,6 +136,7 @@ cricket::DtlsTransportInternal* JsepTransportController::GetDtlsTransport( const cricket::DtlsTransportInternal* JsepTransportController::GetRtcpDtlsTransport(const std::string& mid) const { + RTC_DCHECK_RUN_ON(network_thread_); auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; @@ -189,6 +146,7 @@ JsepTransportController::GetRtcpDtlsTransport(const std::string& mid) const { rtc::scoped_refptr JsepTransportController::LookupDtlsTransportByMid(const std::string& mid) { + RTC_DCHECK_RUN_ON(network_thread_); auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; @@ -198,6 +156,7 @@ JsepTransportController::LookupDtlsTransportByMid(const std::string& mid) { rtc::scoped_refptr JsepTransportController::GetSctpTransport( const std::string& mid) const { + RTC_DCHECK_RUN_ON(network_thread_); auto jsep_transport = GetJsepTransportForMid(mid); if (!jsep_transport) { return nullptr; @@ -206,11 +165,7 @@ rtc::scoped_refptr JsepTransportController::GetSctpTransport( } void JsepTransportController::SetIceConfig(const cricket::IceConfig& config) { - if (!network_thread_->IsCurrent()) { - network_thread_->Invoke(RTC_FROM_HERE, [&] { SetIceConfig(config); }); - return; - } - + RTC_DCHECK_RUN_ON(network_thread_); ice_config_ = config; for (auto& dtls : GetDtlsTransports()) { dtls->ice_transport()->SetIceConfig(ice_config_); @@ -218,13 +173,16 @@ void JsepTransportController::SetIceConfig(const cricket::IceConfig& config) { } void JsepTransportController::SetNeedsIceRestartFlag() { - for (auto& kv : jsep_transports_by_name_) { - kv.second->SetNeedsIceRestartFlag(); + RTC_DCHECK_RUN_ON(network_thread_); + for (auto& transport : transports_.Transports()) { + transport->SetNeedsIceRestartFlag(); } } bool JsepTransportController::NeedsIceRestart( const std::string& transport_name) const { + RTC_DCHECK_RUN_ON(network_thread_); + const cricket::JsepTransport* transport = GetJsepTransportByName(transport_name); if (!transport) { @@ -235,11 +193,16 @@ bool JsepTransportController::NeedsIceRestart( absl::optional JsepTransportController::GetDtlsRole( const std::string& mid) const { + // TODO(tommi): Remove this hop. Currently it's called from the signaling + // thread during negotiations, potentially multiple times. + // WebRtcSessionDescriptionFactory::InternalCreateAnswer is one example. if (!network_thread_->IsCurrent()) { return network_thread_->Invoke>( RTC_FROM_HERE, [&] { return GetDtlsRole(mid); }); } + RTC_DCHECK_RUN_ON(network_thread_); + const cricket::JsepTransport* t = GetJsepTransportForMid(mid); if (!t) { return absl::optional(); @@ -254,6 +217,8 @@ bool JsepTransportController::SetLocalCertificate( RTC_FROM_HERE, [&] { return SetLocalCertificate(certificate); }); } + RTC_DCHECK_RUN_ON(network_thread_); + // Can't change a certificate, or set a null certificate. if (certificate_ || !certificate) { return false; @@ -263,8 +228,8 @@ bool JsepTransportController::SetLocalCertificate( // Set certificate for JsepTransport, which verifies it matches the // fingerprint in SDP, and DTLS transport. // Fallback from DTLS to SDES is not supported. - for (auto& kv : jsep_transports_by_name_) { - kv.second->SetLocalCertificate(certificate_); + for (auto& transport : transports_.Transports()) { + transport->SetLocalCertificate(certificate_); } for (auto& dtls : GetDtlsTransports()) { bool set_cert_success = dtls->SetLocalCertificate(certificate_); @@ -276,10 +241,7 @@ bool JsepTransportController::SetLocalCertificate( rtc::scoped_refptr JsepTransportController::GetLocalCertificate( const std::string& transport_name) const { - if (!network_thread_->IsCurrent()) { - return network_thread_->Invoke>( - RTC_FROM_HERE, [&] { return GetLocalCertificate(transport_name); }); - } + RTC_DCHECK_RUN_ON(network_thread_); const cricket::JsepTransport* t = GetJsepTransportByName(transport_name); if (!t) { @@ -291,10 +253,7 @@ JsepTransportController::GetLocalCertificate( std::unique_ptr JsepTransportController::GetRemoteSSLCertChain( const std::string& transport_name) const { - if (!network_thread_->IsCurrent()) { - return network_thread_->Invoke>( - RTC_FROM_HERE, [&] { return GetRemoteSSLCertChain(transport_name); }); - } + RTC_DCHECK_RUN_ON(network_thread_); // Get the certificate from the RTP transport's DTLS handshake. Should be // identical to the RTCP transport's, since they were given the same remote @@ -326,17 +285,8 @@ void JsepTransportController::MaybeStartGathering() { RTCError JsepTransportController::AddRemoteCandidates( const std::string& transport_name, const cricket::Candidates& candidates) { - if (!network_thread_->IsCurrent()) { - return network_thread_->Invoke(RTC_FROM_HERE, [&] { - return AddRemoteCandidates(transport_name, candidates); - }); - } - - // Verify each candidate before passing down to the transport layer. - RTCError error = VerifyCandidates(candidates); - if (!error.ok()) { - return error; - } + RTC_DCHECK_RUN_ON(network_thread_); + RTC_DCHECK(VerifyCandidates(candidates).ok()); auto jsep_transport = GetJsepTransportByName(transport_name); if (!jsep_transport) { RTC_LOG(LS_WARNING) << "Not adding candidate because the JsepTransport " @@ -353,6 +303,8 @@ RTCError JsepTransportController::RemoveRemoteCandidates( RTC_FROM_HERE, [&] { return RemoveRemoteCandidates(candidates); }); } + RTC_DCHECK_RUN_ON(network_thread_); + // Verify each candidate before passing down to the transport layer. RTCError error = VerifyCandidates(candidates); if (!error.ok()) { @@ -395,10 +347,7 @@ RTCError JsepTransportController::RemoveRemoteCandidates( bool JsepTransportController::GetStats(const std::string& transport_name, cricket::TransportStats* stats) { - if (!network_thread_->IsCurrent()) { - return network_thread_->Invoke( - RTC_FROM_HERE, [=] { return GetStats(transport_name, stats); }); - } + RTC_DCHECK_RUN_ON(network_thread_); cricket::JsepTransport* transport = GetJsepTransportByName(transport_name); if (!transport) { @@ -415,40 +364,23 @@ void JsepTransportController::SetActiveResetSrtpParams( }); return; } - + RTC_DCHECK_RUN_ON(network_thread_); RTC_LOG(INFO) << "Updating the active_reset_srtp_params for JsepTransportController: " << active_reset_srtp_params; - config_.active_reset_srtp_params = active_reset_srtp_params; - for (auto& kv : jsep_transports_by_name_) { - kv.second->SetActiveResetSrtpParams(active_reset_srtp_params); + active_reset_srtp_params_ = active_reset_srtp_params; + for (auto& transport : transports_.Transports()) { + transport->SetActiveResetSrtpParams(active_reset_srtp_params); } } -void JsepTransportController::SetMediaTransportSettings( - bool use_datagram_transport, - bool use_datagram_transport_for_data_channels, - bool use_datagram_transport_for_data_channels_receive_only) { - config_.use_datagram_transport = use_datagram_transport; - config_.use_datagram_transport_for_data_channels = - use_datagram_transport_for_data_channels; - config_.use_datagram_transport_for_data_channels_receive_only = - use_datagram_transport_for_data_channels_receive_only; -} - void JsepTransportController::RollbackTransports() { if (!network_thread_->IsCurrent()) { network_thread_->Invoke(RTC_FROM_HERE, [=] { RollbackTransports(); }); return; } RTC_DCHECK_RUN_ON(network_thread_); - for (auto&& mid : pending_mids_) { - RemoveTransportForMid(mid); - } - for (auto&& mid : pending_mids_) { - MaybeDestroyJsepTransport(mid); - } - pending_mids_.clear(); + transports_.RollbackTransports(); } rtc::scoped_refptr @@ -459,7 +391,7 @@ JsepTransportController::CreateIceTransport(const std::string& transport_name, IceTransportInit init; init.set_port_allocator(port_allocator_); - init.set_async_resolver_factory(async_resolver_factory_); + init.set_async_dns_resolver_factory(async_dns_resolver_factory_); init.set_event_log(config_.event_log); return config_.ice_transport_factory->CreateIceTransport( transport_name, component, std::move(init)); @@ -468,25 +400,21 @@ JsepTransportController::CreateIceTransport(const std::string& transport_name, std::unique_ptr JsepTransportController::CreateDtlsTransport( const cricket::ContentInfo& content_info, - cricket::IceTransportInternal* ice, - DatagramTransportInterface* datagram_transport) { - RTC_DCHECK(network_thread_->IsCurrent()); + cricket::IceTransportInternal* ice) { + RTC_DCHECK_RUN_ON(network_thread_); std::unique_ptr dtls; - if (datagram_transport) { - RTC_DCHECK(config_.use_datagram_transport || - config_.use_datagram_transport_for_data_channels); - } else if (config_.dtls_transport_factory) { + if (config_.dtls_transport_factory) { dtls = config_.dtls_transport_factory->CreateDtlsTransport( - ice, config_.crypto_options); + ice, config_.crypto_options, config_.ssl_max_version); } else { dtls = std::make_unique(ice, config_.crypto_options, - config_.event_log); + config_.event_log, + config_.ssl_max_version); } RTC_DCHECK(dtls); - dtls->SetSslMaxProtocolVersion(config_.ssl_max_version); dtls->ice_transport()->SetIceRole(ice_role_); dtls->ice_transport()->SetIceTiebreaker(ice_tiebreaker_); dtls->ice_transport()->SetIceConfig(ice_config_); @@ -500,8 +428,6 @@ JsepTransportController::CreateDtlsTransport( this, &JsepTransportController::OnTransportWritableState_n); dtls->SignalReceivingState.connect( this, &JsepTransportController::OnTransportReceivingState_n); - dtls->SignalDtlsHandshakeError.connect( - this, &JsepTransportController::OnDtlsHandshakeError); dtls->ice_transport()->SignalGatheringState.connect( this, &JsepTransportController::OnTransportGatheringState_n); dtls->ice_transport()->SignalCandidateGathered.connect( @@ -518,6 +444,9 @@ JsepTransportController::CreateDtlsTransport( this, &JsepTransportController::OnTransportStateChanged_n); dtls->ice_transport()->SignalCandidatePairChanged.connect( this, &JsepTransportController::OnTransportCandidatePairChanged_n); + + dtls->SubscribeDtlsHandshakeError( + [this](rtc::SSLHandshakeError error) { OnDtlsHandshakeError(error); }); return dtls; } @@ -526,7 +455,7 @@ JsepTransportController::CreateUnencryptedRtpTransport( const std::string& transport_name, rtc::PacketTransportInternal* rtp_packet_transport, rtc::PacketTransportInternal* rtcp_packet_transport) { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); auto unencrypted_rtp_transport = std::make_unique(rtcp_packet_transport == nullptr); unencrypted_rtp_transport->SetRtpPacketTransport(rtp_packet_transport); @@ -541,7 +470,7 @@ JsepTransportController::CreateSdesTransport( const std::string& transport_name, cricket::DtlsTransportInternal* rtp_dtls_transport, cricket::DtlsTransportInternal* rtcp_dtls_transport) { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); auto srtp_transport = std::make_unique(rtcp_dtls_transport == nullptr); RTC_DCHECK(rtp_dtls_transport); @@ -560,7 +489,7 @@ JsepTransportController::CreateDtlsSrtpTransport( const std::string& transport_name, cricket::DtlsTransportInternal* rtp_dtls_transport, cricket::DtlsTransportInternal* rtcp_dtls_transport) { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); auto dtls_srtp_transport = std::make_unique( rtcp_dtls_transport == nullptr); if (config_.enable_external_auth) { @@ -569,19 +498,21 @@ JsepTransportController::CreateDtlsSrtpTransport( dtls_srtp_transport->SetDtlsTransports(rtp_dtls_transport, rtcp_dtls_transport); - dtls_srtp_transport->SetActiveResetSrtpParams( - config_.active_reset_srtp_params); - dtls_srtp_transport->SignalDtlsStateChange.connect( - this, &JsepTransportController::UpdateAggregateStates_n); + dtls_srtp_transport->SetActiveResetSrtpParams(active_reset_srtp_params_); + // Capturing this in the callback because JsepTransportController will always + // outlive the DtlsSrtpTransport. + dtls_srtp_transport->SetOnDtlsStateChange([this]() { + RTC_DCHECK_RUN_ON(this->network_thread_); + this->UpdateAggregateStates_n(); + }); return dtls_srtp_transport; } std::vector JsepTransportController::GetDtlsTransports() { + RTC_DCHECK_RUN_ON(network_thread_); std::vector dtls_transports; - for (auto it = jsep_transports_by_name_.begin(); - it != jsep_transports_by_name_.end(); ++it) { - auto jsep_transport = it->second.get(); + for (auto jsep_transport : transports_.Transports()) { RTC_DCHECK(jsep_transport); if (jsep_transport->rtp_dtls_transport()) { dtls_transports.push_back(jsep_transport->rtp_dtls_transport()); @@ -598,7 +529,7 @@ RTCError JsepTransportController::ApplyDescription_n( bool local, SdpType type, const cricket::SessionDescription* description) { - RTC_DCHECK_RUN_ON(network_thread_); + TRACE_EVENT0("webrtc", "JsepTransportController::ApplyDescription_n"); RTC_DCHECK(description); if (local) { @@ -608,28 +539,22 @@ RTCError JsepTransportController::ApplyDescription_n( } RTCError error; - error = ValidateAndMaybeUpdateBundleGroup(local, type, description); + error = ValidateAndMaybeUpdateBundleGroups(local, type, description); if (!error.ok()) { return error; } - std::vector merged_encrypted_extension_ids; - absl::optional bundle_media_alt_protocol; - absl::optional bundle_data_alt_protocol; - if (bundle_group_) { - merged_encrypted_extension_ids = - MergeEncryptedHeaderExtensionIdsForBundle(description); - error = GetAltProtocolsForBundle(description, &bundle_media_alt_protocol, - &bundle_data_alt_protocol); - if (!error.ok()) { - return error; - } + std::map> + merged_encrypted_extension_ids_by_bundle; + if (!bundles_.bundle_groups().empty()) { + merged_encrypted_extension_ids_by_bundle = + MergeEncryptedHeaderExtensionIdsForBundles(description); } for (const cricket::ContentInfo& content_info : description->contents()) { - // Don't create transports for rejected m-lines and bundled m-lines." + // Don't create transports for rejected m-lines and bundled m-lines. if (content_info.rejected || - (IsBundled(content_info.name) && content_info.name != *bundled_mid())) { + !bundles_.IsFirstMidInGroup(content_info.name)) { continue; } error = MaybeCreateJsepTransport(local, content_info, *description); @@ -642,19 +567,26 @@ RTCError JsepTransportController::ApplyDescription_n( description->transport_infos().size()); for (size_t i = 0; i < description->contents().size(); ++i) { const cricket::ContentInfo& content_info = description->contents()[i]; - const cricket::MediaContentDescription* media_description = - content_info.media_description(); const cricket::TransportInfo& transport_info = description->transport_infos()[i]; if (content_info.rejected) { - HandleRejectedContent(content_info, description); + // This may cause groups to be removed from |bundles_.bundle_groups()|. + HandleRejectedContent(content_info); continue; } - if (IsBundled(content_info.name) && content_info.name != *bundled_mid()) { - if (!HandleBundledContent(content_info)) { + const cricket::ContentGroup* established_bundle_group = + bundles_.LookupGroupByMid(content_info.name); + + // For bundle members that are not BUNDLE-tagged (not first in the group), + // configure their transport to be the same as the BUNDLE-tagged transport. + if (established_bundle_group && + content_info.name != *established_bundle_group->FirstContentName()) { + if (!HandleBundledContent(content_info, *established_bundle_group)) { return RTCError(RTCErrorType::INVALID_PARAMETER, - "Failed to process the bundled m= section."); + "Failed to process the bundled m= section with " + "mid='" + + content_info.name + "'."); } continue; } @@ -665,23 +597,15 @@ RTCError JsepTransportController::ApplyDescription_n( } std::vector extension_ids; - absl::optional media_alt_protocol; - absl::optional data_alt_protocol; - if (bundled_mid() && content_info.name == *bundled_mid()) { - extension_ids = merged_encrypted_extension_ids; - media_alt_protocol = bundle_media_alt_protocol; - data_alt_protocol = bundle_data_alt_protocol; + // Is BUNDLE-tagged (first in the group)? + if (established_bundle_group && + content_info.name == *established_bundle_group->FirstContentName()) { + auto it = merged_encrypted_extension_ids_by_bundle.find( + established_bundle_group); + RTC_DCHECK(it != merged_encrypted_extension_ids_by_bundle.end()); + extension_ids = it->second; } else { extension_ids = GetEncryptedHeaderExtensionIds(content_info); - switch (media_description->type()) { - case cricket::MEDIA_TYPE_AUDIO: - case cricket::MEDIA_TYPE_VIDEO: - media_alt_protocol = media_description->alt_protocol(); - break; - case cricket::MEDIA_TYPE_DATA: - data_alt_protocol = media_description->alt_protocol(); - break; - } } int rtp_abs_sendtime_extn_id = @@ -695,8 +619,7 @@ RTCError JsepTransportController::ApplyDescription_n( cricket::JsepTransportDescription jsep_description = CreateJsepTransportDescription(content_info, transport_info, - extension_ids, rtp_abs_sendtime_extn_id, - media_alt_protocol, data_alt_protocol); + extension_ids, rtp_abs_sendtime_extn_id); if (local) { error = transport->SetLocalJsepTransportDescription(jsep_description, type); @@ -706,64 +629,116 @@ RTCError JsepTransportController::ApplyDescription_n( } if (!error.ok()) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "Failed to apply the description for " + - content_info.name + ": " + error.message()); + LOG_AND_RETURN_ERROR( + RTCErrorType::INVALID_PARAMETER, + "Failed to apply the description for m= section with mid='" + + content_info.name + "': " + error.message()); } } if (type == SdpType::kAnswer) { - pending_mids_.clear(); + transports_.CommitTransports(); } return RTCError::OK(); } -RTCError JsepTransportController::ValidateAndMaybeUpdateBundleGroup( +RTCError JsepTransportController::ValidateAndMaybeUpdateBundleGroups( bool local, SdpType type, const cricket::SessionDescription* description) { RTC_DCHECK(description); - const cricket::ContentGroup* new_bundle_group = - description->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - // The BUNDLE group containing a MID that no m= section has is invalid. - if (new_bundle_group) { - for (const auto& content_name : new_bundle_group->content_names()) { + std::vector new_bundle_groups = + description->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + // Verify |new_bundle_groups|. + std::map new_bundle_groups_by_mid; + for (const cricket::ContentGroup* new_bundle_group : new_bundle_groups) { + for (const std::string& content_name : new_bundle_group->content_names()) { + // The BUNDLE group must not contain a MID that is a member of a different + // BUNDLE group, or that contains the same MID multiple times. + if (new_bundle_groups_by_mid.find(content_name) != + new_bundle_groups_by_mid.end()) { + return RTCError(RTCErrorType::INVALID_PARAMETER, + "A BUNDLE group contains a MID='" + content_name + + "' that is already in a BUNDLE group."); + } + new_bundle_groups_by_mid.insert( + std::make_pair(content_name, new_bundle_group)); + // The BUNDLE group must not contain a MID that no m= section has. if (!description->GetContentByName(content_name)) { return RTCError(RTCErrorType::INVALID_PARAMETER, - "The BUNDLE group contains MID:" + content_name + - " matching no m= section."); + "A BUNDLE group contains a MID='" + content_name + + "' matching no m= section."); } } } if (type == SdpType::kAnswer) { - const cricket::ContentGroup* offered_bundle_group = - local ? remote_desc_->GetGroupByName(cricket::GROUP_TYPE_BUNDLE) - : local_desc_->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - - if (new_bundle_group) { - // The BUNDLE group in answer should be a subset of offered group. - for (const auto& content_name : new_bundle_group->content_names()) { - if (!offered_bundle_group || - !offered_bundle_group->HasContentName(content_name)) { + std::vector offered_bundle_groups = + local ? remote_desc_->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE) + : local_desc_->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + + std::map + offered_bundle_groups_by_mid; + for (const cricket::ContentGroup* offered_bundle_group : + offered_bundle_groups) { + for (const std::string& content_name : + offered_bundle_group->content_names()) { + offered_bundle_groups_by_mid[content_name] = offered_bundle_group; + } + } + + std::map + new_bundle_groups_by_offered_bundle_groups; + for (const cricket::ContentGroup* new_bundle_group : new_bundle_groups) { + if (!new_bundle_group->FirstContentName()) { + // Empty groups could be a subset of any group. + continue; + } + // The group in the answer (new_bundle_group) must have a corresponding + // group in the offer (original_group), because the answer groups may only + // be subsets of the offer groups. + auto it = offered_bundle_groups_by_mid.find( + *new_bundle_group->FirstContentName()); + if (it == offered_bundle_groups_by_mid.end()) { + return RTCError(RTCErrorType::INVALID_PARAMETER, + "A BUNDLE group was added in the answer that did not " + "exist in the offer."); + } + const cricket::ContentGroup* offered_bundle_group = it->second; + if (new_bundle_groups_by_offered_bundle_groups.find( + offered_bundle_group) != + new_bundle_groups_by_offered_bundle_groups.end()) { + return RTCError(RTCErrorType::INVALID_PARAMETER, + "A MID in the answer has changed group."); + } + new_bundle_groups_by_offered_bundle_groups.insert( + std::make_pair(offered_bundle_group, new_bundle_group)); + for (const std::string& content_name : + new_bundle_group->content_names()) { + it = offered_bundle_groups_by_mid.find(content_name); + // The BUNDLE group in answer should be a subset of offered group. + if (it == offered_bundle_groups_by_mid.end() || + it->second != offered_bundle_group) { return RTCError(RTCErrorType::INVALID_PARAMETER, - "The BUNDLE group in answer contains a MID that was " - "not in the offered group."); + "A BUNDLE group in answer contains a MID='" + + content_name + + "' that was not in the offered group."); } } } - if (bundle_group_) { - for (const auto& content_name : bundle_group_->content_names()) { + for (const auto& bundle_group : bundles_.bundle_groups()) { + for (const std::string& content_name : bundle_group->content_names()) { // An answer that removes m= sections from pre-negotiated BUNDLE group // without rejecting it, is invalid. - if (!new_bundle_group || - !new_bundle_group->HasContentName(content_name)) { + auto it = new_bundle_groups_by_mid.find(content_name); + if (it == new_bundle_groups_by_mid.end()) { auto* content_info = description->GetContentByName(content_name); if (!content_info || !content_info->rejected) { return RTCError(RTCErrorType::INVALID_PARAMETER, - "Answer cannot remove m= section " + content_name + - " from already-established BUNDLE group."); + "Answer cannot remove m= section with mid='" + + content_name + + "' from already-established BUNDLE group."); } } } @@ -778,33 +753,35 @@ RTCError JsepTransportController::ValidateAndMaybeUpdateBundleGroup( } if (ShouldUpdateBundleGroup(type, description)) { - bundle_group_ = *new_bundle_group; + bundles_.Update(description); } - if (!bundled_mid()) { - return RTCError::OK(); - } + for (const auto& bundle_group : bundles_.bundle_groups()) { + if (!bundle_group->FirstContentName()) + continue; - auto bundled_content = description->GetContentByName(*bundled_mid()); - if (!bundled_content) { - return RTCError( - RTCErrorType::INVALID_PARAMETER, - "An m= section associated with the BUNDLE-tag doesn't exist."); - } + // The first MID in a BUNDLE group is BUNDLE-tagged. + auto bundled_content = + description->GetContentByName(*bundle_group->FirstContentName()); + if (!bundled_content) { + return RTCError( + RTCErrorType::INVALID_PARAMETER, + "An m= section associated with the BUNDLE-tag doesn't exist."); + } - // If the |bundled_content| is rejected, other contents in the bundle group - // should be rejected. - if (bundled_content->rejected) { - for (const auto& content_name : bundle_group_->content_names()) { - auto other_content = description->GetContentByName(content_name); - if (!other_content->rejected) { - return RTCError( - RTCErrorType::INVALID_PARAMETER, - "The m= section:" + content_name + " should be rejected."); + // If the |bundled_content| is rejected, other contents in the bundle group + // must also be rejected. + if (bundled_content->rejected) { + for (const auto& content_name : bundle_group->content_names()) { + auto other_content = description->GetContentByName(content_name); + if (!other_content->rejected) { + return RTCError(RTCErrorType::INVALID_PARAMETER, + "The m= section with mid='" + content_name + + "' should be rejected."); + } } } } - return RTCError::OK(); } @@ -815,76 +792,54 @@ RTCError JsepTransportController::ValidateContent( content_info.type == cricket::MediaProtocolType::kRtp && !content_info.media_description()->rtcp_mux()) { return RTCError(RTCErrorType::INVALID_PARAMETER, - "The m= section:" + content_info.name + - " is invalid. RTCP-MUX is not " + "The m= section with mid='" + content_info.name + + "' is invalid. RTCP-MUX is not " "enabled when it is required."); } return RTCError::OK(); } void JsepTransportController::HandleRejectedContent( - const cricket::ContentInfo& content_info, - const cricket::SessionDescription* description) { + const cricket::ContentInfo& content_info) { // If the content is rejected, let the // BaseChannel/SctpTransport change the RtpTransport/DtlsTransport first, // then destroy the cricket::JsepTransport. - RemoveTransportForMid(content_info.name); - if (content_info.name == bundled_mid()) { - for (const auto& content_name : bundle_group_->content_names()) { - RemoveTransportForMid(content_name); + cricket::ContentGroup* bundle_group = + bundles_.LookupGroupByMid(content_info.name); + if (bundle_group && !bundle_group->content_names().empty() && + content_info.name == *bundle_group->FirstContentName()) { + // Rejecting a BUNDLE group's first mid means we are rejecting the entire + // group. + for (const auto& content_name : bundle_group->content_names()) { + transports_.RemoveTransportForMid(content_name); } - bundle_group_.reset(); - } else if (IsBundled(content_info.name)) { - // Remove the rejected content from the |bundle_group_|. - bundle_group_->RemoveContentName(content_info.name); - // Reset the bundle group if nothing left. - if (!bundle_group_->FirstContentName()) { - bundle_group_.reset(); + // Delete the BUNDLE group. + bundles_.DeleteGroup(bundle_group); + } else { + transports_.RemoveTransportForMid(content_info.name); + if (bundle_group) { + // Remove the rejected content from the |bundle_group|. + bundles_.DeleteMid(bundle_group, content_info.name); } } - MaybeDestroyJsepTransport(content_info.name); } bool JsepTransportController::HandleBundledContent( - const cricket::ContentInfo& content_info) { - auto jsep_transport = GetJsepTransportByName(*bundled_mid()); + const cricket::ContentInfo& content_info, + const cricket::ContentGroup& bundle_group) { + TRACE_EVENT0("webrtc", "JsepTransportController::HandleBundledContent"); + RTC_DCHECK(bundle_group.FirstContentName()); + auto jsep_transport = + GetJsepTransportByName(*bundle_group.FirstContentName()); RTC_DCHECK(jsep_transport); // If the content is bundled, let the // BaseChannel/SctpTransport change the RtpTransport/DtlsTransport first, // then destroy the cricket::JsepTransport. - if (SetTransportForMid(content_info.name, jsep_transport)) { - // TODO(bugs.webrtc.org/9719) For media transport this is far from ideal, - // because it means that we first create media transport and start - // connecting it, and then we destroy it. We will need to address it before - // video path is enabled. - MaybeDestroyJsepTransport(content_info.name); - return true; - } - return false; -} - -bool JsepTransportController::SetTransportForMid( - const std::string& mid, - cricket::JsepTransport* jsep_transport) { - RTC_DCHECK(jsep_transport); - if (mid_to_transport_[mid] == jsep_transport) { - return true; - } - RTC_DCHECK_RUN_ON(network_thread_); - pending_mids_.push_back(mid); - mid_to_transport_[mid] = jsep_transport; - return config_.transport_observer->OnTransportChanged( - mid, jsep_transport->rtp_transport(), jsep_transport->RtpDtlsTransport(), - jsep_transport->data_channel_transport()); -} - -void JsepTransportController::RemoveTransportForMid(const std::string& mid) { - bool ret = config_.transport_observer->OnTransportChanged(mid, nullptr, - nullptr, nullptr); - // Calling OnTransportChanged with nullptr should always succeed, since it is - // only expected to fail when adding media to a transport (not removing). - RTC_DCHECK(ret); - mid_to_transport_.erase(mid); + // TODO(bugs.webrtc.org/9719) For media transport this is far from ideal, + // because it means that we first create media transport and start + // connecting it, and then we destroy it. We will need to address it before + // video path is enabled. + return transports_.SetTransportForMid(content_info.name, jsep_transport); } cricket::JsepTransportDescription @@ -892,9 +847,9 @@ JsepTransportController::CreateJsepTransportDescription( const cricket::ContentInfo& content_info, const cricket::TransportInfo& transport_info, const std::vector& encrypted_extension_ids, - int rtp_abs_sendtime_extn_id, - absl::optional media_alt_protocol, - absl::optional data_alt_protocol) { + int rtp_abs_sendtime_extn_id) { + TRACE_EVENT0("webrtc", + "JsepTransportController::CreateJsepTransportDescription"); const cricket::MediaContentDescription* content_desc = content_info.media_description(); RTC_DCHECK(content_desc); @@ -904,8 +859,7 @@ JsepTransportController::CreateJsepTransportDescription( return cricket::JsepTransportDescription( rtcp_mux_enabled, content_desc->cryptos(), encrypted_extension_ids, - rtp_abs_sendtime_extn_id, transport_info.description, media_alt_protocol, - data_alt_protocol); + rtp_abs_sendtime_extn_id, transport_info.description); } bool JsepTransportController::ShouldUpdateBundleGroup( @@ -921,11 +875,11 @@ bool JsepTransportController::ShouldUpdateBundleGroup( } RTC_DCHECK(local_desc_ && remote_desc_); - const cricket::ContentGroup* local_bundle = - local_desc_->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - const cricket::ContentGroup* remote_bundle = - remote_desc_->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - return local_bundle && remote_bundle; + std::vector local_bundles = + local_desc_->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + std::vector remote_bundles = + remote_desc_->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + return !local_bundles.empty() && !remote_bundles.empty(); } std::vector JsepTransportController::GetEncryptedHeaderExtensionIds( @@ -949,75 +903,31 @@ std::vector JsepTransportController::GetEncryptedHeaderExtensionIds( return encrypted_header_extension_ids; } -std::vector -JsepTransportController::MergeEncryptedHeaderExtensionIdsForBundle( +std::map> +JsepTransportController::MergeEncryptedHeaderExtensionIdsForBundles( const cricket::SessionDescription* description) { RTC_DCHECK(description); - RTC_DCHECK(bundle_group_); - - std::vector merged_ids; + RTC_DCHECK(!bundles_.bundle_groups().empty()); + std::map> + merged_encrypted_extension_ids_by_bundle; // Union the encrypted header IDs in the group when bundle is enabled. for (const cricket::ContentInfo& content_info : description->contents()) { - if (bundle_group_->HasContentName(content_info.name)) { - std::vector extension_ids = - GetEncryptedHeaderExtensionIds(content_info); - for (int id : extension_ids) { - if (!absl::c_linear_search(merged_ids, id)) { - merged_ids.push_back(id); - } - } - } - } - return merged_ids; -} - -RTCError JsepTransportController::GetAltProtocolsForBundle( - const cricket::SessionDescription* description, - absl::optional* media_alt_protocol, - absl::optional* data_alt_protocol) { - RTC_DCHECK(description); - RTC_DCHECK(bundle_group_); - RTC_DCHECK(media_alt_protocol); - RTC_DCHECK(data_alt_protocol); - - bool found_media = false; - bool found_data = false; - for (const cricket::ContentInfo& content : description->contents()) { - if (bundle_group_->HasContentName(content.name)) { - const cricket::MediaContentDescription* media_description = - content.media_description(); - switch (media_description->type()) { - case cricket::MEDIA_TYPE_AUDIO: - case cricket::MEDIA_TYPE_VIDEO: - if (found_media && - *media_alt_protocol != media_description->alt_protocol()) { - return RTCError(RTCErrorType::INVALID_PARAMETER, - "The BUNDLE group contains conflicting " - "alt-protocols for media ('" + - media_alt_protocol->value_or("") + "' and '" + - media_description->alt_protocol().value_or("") + - "')"); - } - found_media = true; - *media_alt_protocol = media_description->alt_protocol(); - break; - case cricket::MEDIA_TYPE_DATA: - if (found_data && - *data_alt_protocol != media_description->alt_protocol()) { - return RTCError(RTCErrorType::INVALID_PARAMETER, - "The BUNDLE group contains conflicting " - "alt-protocols for data ('" + - data_alt_protocol->value_or("") + "' and '" + - media_description->alt_protocol().value_or("") + - "')"); - } - found_data = true; - *data_alt_protocol = media_description->alt_protocol(); - break; + auto group = bundles_.LookupGroupByMid(content_info.name); + if (!group) + continue; + // Get or create list of IDs for the BUNDLE group. + std::vector& merged_ids = + merged_encrypted_extension_ids_by_bundle[group]; + // Add IDs not already in the list. + std::vector extension_ids = + GetEncryptedHeaderExtensionIds(content_info); + for (int id : extension_ids) { + if (!absl::c_linear_search(merged_ids, id)) { + merged_ids.push_back(id); } } } - return RTCError::OK(); + return merged_encrypted_extension_ids_by_bundle; } int JsepTransportController::GetRtpAbsSendTimeHeaderExtensionId( @@ -1032,116 +942,37 @@ int JsepTransportController::GetRtpAbsSendTimeHeaderExtensionId( const webrtc::RtpExtension* send_time_extension = webrtc::RtpExtension::FindHeaderExtensionByUri( content_desc->rtp_header_extensions(), - webrtc::RtpExtension::kAbsSendTimeUri); + webrtc::RtpExtension::kAbsSendTimeUri, + config_.crypto_options.srtp.enable_encrypted_rtp_header_extensions + ? webrtc::RtpExtension::kPreferEncryptedExtension + : webrtc::RtpExtension::kDiscardEncryptedExtension); return send_time_extension ? send_time_extension->id : -1; } const cricket::JsepTransport* JsepTransportController::GetJsepTransportForMid( const std::string& mid) const { - auto it = mid_to_transport_.find(mid); - return it == mid_to_transport_.end() ? nullptr : it->second; + return transports_.GetTransportForMid(mid); } cricket::JsepTransport* JsepTransportController::GetJsepTransportForMid( const std::string& mid) { - auto it = mid_to_transport_.find(mid); - return it == mid_to_transport_.end() ? nullptr : it->second; + return transports_.GetTransportForMid(mid); } const cricket::JsepTransport* JsepTransportController::GetJsepTransportByName( const std::string& transport_name) const { - auto it = jsep_transports_by_name_.find(transport_name); - return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); + return transports_.GetTransportByName(transport_name); } cricket::JsepTransport* JsepTransportController::GetJsepTransportByName( const std::string& transport_name) { - auto it = jsep_transports_by_name_.find(transport_name); - return (it == jsep_transports_by_name_.end()) ? nullptr : it->second.get(); -} - -// TODO(sukhanov): Refactor to avoid code duplication for Media and Datagram -// transports setup. -std::unique_ptr -JsepTransportController::MaybeCreateDatagramTransport( - const cricket::ContentInfo& content_info, - const cricket::SessionDescription& description, - bool local) { - if (config_.media_transport_factory == nullptr) { - return nullptr; - } - - if (!(config_.use_datagram_transport || - config_.use_datagram_transport_for_data_channels)) { - return nullptr; - } - - // Caller (offerer) datagram transport. - if (offer_datagram_transport_) { - RTC_DCHECK(local); - RTC_LOG(LS_INFO) << "Offered datagram transport has now been activated."; - return std::move(offer_datagram_transport_); - } - - const cricket::TransportDescription* transport_description = - description.GetTransportDescriptionByName(content_info.mid()); - RTC_DCHECK(transport_description) - << "Missing transport description for mid=" << content_info.mid(); - - if (!transport_description->opaque_parameters) { - RTC_LOG(LS_INFO) - << "No opaque transport parameters, not creating datagram transport"; - return nullptr; - } - - if (transport_description->opaque_parameters->protocol != - config_.media_transport_factory->GetTransportName()) { - RTC_LOG(LS_INFO) << "Opaque transport parameters for protocol=" - << transport_description->opaque_parameters->protocol - << ", which does not match supported protocol=" - << config_.media_transport_factory->GetTransportName(); - return nullptr; - } - - RTC_DCHECK(!local); - // When bundle is enabled, two JsepTransports are created, and then - // the second transport is destroyed (right away). - // For datagram transport, we don't want to create the second - // datagram transport in the first place. - RTC_LOG(LS_INFO) << "Returning new, client datagram transport."; - - MediaTransportSettings settings; - settings.is_caller = local; - settings.remote_transport_parameters = - transport_description->opaque_parameters->parameters; - settings.event_log = config_.event_log; - - auto datagram_transport_result = - config_.media_transport_factory->CreateDatagramTransport(network_thread_, - settings); - - if (!datagram_transport_result.ok()) { - // Datagram transport negotiation will fail and we'll fall back to RTP. - return nullptr; - } - - if (!datagram_transport_result.value() - ->SetRemoteTransportParameters( - transport_description->opaque_parameters->parameters) - .ok()) { - // Datagram transport negotiation failed (parameters are incompatible). - // Fall back to RTP. - return nullptr; - } - - return datagram_transport_result.MoveValue(); + return transports_.GetTransportByName(transport_name); } RTCError JsepTransportController::MaybeCreateJsepTransport( bool local, const cricket::ContentInfo& content_info, const cricket::SessionDescription& description) { - RTC_DCHECK(network_thread_->IsCurrent()); cricket::JsepTransport* transport = GetJsepTransportByName(content_info.name); if (transport) { return RTCError::OK(); @@ -1158,48 +989,21 @@ RTCError JsepTransportController::MaybeCreateJsepTransport( CreateIceTransport(content_info.name, /*rtcp=*/false); RTC_DCHECK(ice); - std::unique_ptr datagram_transport = - MaybeCreateDatagramTransport(content_info, description, local); - if (datagram_transport) { - datagram_transport->Connect(ice->internal()); - } - std::unique_ptr rtp_dtls_transport = - CreateDtlsTransport(content_info, ice->internal(), nullptr); + CreateDtlsTransport(content_info, ice->internal()); std::unique_ptr rtcp_dtls_transport; std::unique_ptr unencrypted_rtp_transport; std::unique_ptr sdes_transport; std::unique_ptr dtls_srtp_transport; - std::unique_ptr datagram_rtp_transport; rtc::scoped_refptr rtcp_ice; if (config_.rtcp_mux_policy != PeerConnectionInterface::kRtcpMuxPolicyRequire && content_info.type == cricket::MediaProtocolType::kRtp) { - RTC_DCHECK(datagram_transport == nullptr); rtcp_ice = CreateIceTransport(content_info.name, /*rtcp=*/true); rtcp_dtls_transport = - CreateDtlsTransport(content_info, rtcp_ice->internal(), - /*datagram_transport=*/nullptr); - } - - // Only create a datagram RTP transport if the datagram transport should be - // used for RTP. - if (datagram_transport && config_.use_datagram_transport) { - // TODO(sukhanov): We use unencrypted RTP transport over DatagramTransport, - // because MediaTransport encrypts. In the future we may want to - // implement our own version of RtpTransport over MediaTransport, because - // it will give us more control over things like: - // - Fusing - // - Rtp header compression - // - Handling Rtcp feedback. - RTC_LOG(LS_INFO) << "Creating UnencryptedRtpTransport, because datagram " - "transport is used."; - RTC_DCHECK(!rtcp_dtls_transport); - datagram_rtp_transport = std::make_unique( - content_info.media_description()->rtp_header_extensions(), - ice->internal(), datagram_transport.get()); + CreateDtlsTransport(content_info, rtcp_ice->internal()); } if (config_.disable_encryption) { @@ -1223,69 +1027,31 @@ RTCError JsepTransportController::MaybeCreateJsepTransport( config_.sctp_factory->CreateSctpTransport(rtp_dtls_transport.get()); } - DataChannelTransportInterface* data_channel_transport = nullptr; - if (config_.use_datagram_transport_for_data_channels) { - data_channel_transport = datagram_transport.get(); - } - std::unique_ptr jsep_transport = std::make_unique( content_info.name, certificate_, std::move(ice), std::move(rtcp_ice), std::move(unencrypted_rtp_transport), std::move(sdes_transport), - std::move(dtls_srtp_transport), std::move(datagram_rtp_transport), - std::move(rtp_dtls_transport), std::move(rtcp_dtls_transport), - std::move(sctp_transport), std::move(datagram_transport), - data_channel_transport); + std::move(dtls_srtp_transport), std::move(rtp_dtls_transport), + std::move(rtcp_dtls_transport), std::move(sctp_transport)); jsep_transport->rtp_transport()->SignalRtcpPacketReceived.connect( this, &JsepTransportController::OnRtcpPacketReceived_n); jsep_transport->SignalRtcpMuxActive.connect( this, &JsepTransportController::UpdateAggregateStates_n); - jsep_transport->SignalDataChannelTransportNegotiated.connect( - this, &JsepTransportController::OnDataChannelTransportNegotiated_n); - SetTransportForMid(content_info.name, jsep_transport.get()); - - jsep_transports_by_name_[content_info.name] = std::move(jsep_transport); + transports_.RegisterTransport(content_info.name, std::move(jsep_transport)); UpdateAggregateStates_n(); return RTCError::OK(); } -void JsepTransportController::MaybeDestroyJsepTransport( - const std::string& mid) { - auto jsep_transport = GetJsepTransportByName(mid); - if (!jsep_transport) { - return; - } - - // Don't destroy the JsepTransport if there are still media sections referring - // to it. - for (const auto& kv : mid_to_transport_) { - if (kv.second == jsep_transport) { - return; - } - } - - jsep_transports_by_name_.erase(mid); - UpdateAggregateStates_n(); -} - void JsepTransportController::DestroyAllJsepTransports_n() { - RTC_DCHECK(network_thread_->IsCurrent()); - - for (const auto& jsep_transport : jsep_transports_by_name_) { - config_.transport_observer->OnTransportChanged(jsep_transport.first, - nullptr, nullptr, nullptr); - } - - jsep_transports_by_name_.clear(); + transports_.DestroyAllTransports(); } void JsepTransportController::SetIceRole_n(cricket::IceRole ice_role) { - RTC_DCHECK(network_thread_->IsCurrent()); - ice_role_ = ice_role; - for (auto& dtls : GetDtlsTransports()) { + auto dtls_transports = GetDtlsTransports(); + for (auto& dtls : dtls_transports) { dtls->ice_transport()->SetIceRole(ice_role_); } } @@ -1340,7 +1106,6 @@ cricket::IceRole JsepTransportController::DetermineIceRole( void JsepTransportController::OnTransportWritableState_n( rtc::PacketTransportInternal* transport) { - RTC_DCHECK(network_thread_->IsCurrent()); RTC_LOG(LS_INFO) << " Transport " << transport->transport_name() << " writability changed to " << transport->writable() << "."; @@ -1349,58 +1114,44 @@ void JsepTransportController::OnTransportWritableState_n( void JsepTransportController::OnTransportReceivingState_n( rtc::PacketTransportInternal* transport) { - RTC_DCHECK(network_thread_->IsCurrent()); UpdateAggregateStates_n(); } void JsepTransportController::OnTransportGatheringState_n( cricket::IceTransportInternal* transport) { - RTC_DCHECK(network_thread_->IsCurrent()); UpdateAggregateStates_n(); } void JsepTransportController::OnTransportCandidateGathered_n( cricket::IceTransportInternal* transport, const cricket::Candidate& candidate) { - RTC_DCHECK(network_thread_->IsCurrent()); - // We should never signal peer-reflexive candidates. if (candidate.type() == cricket::PRFLX_PORT_TYPE) { RTC_NOTREACHED(); return; } - std::string transport_name = transport->transport_name(); - invoker_.AsyncInvoke( - RTC_FROM_HERE, signaling_thread_, [this, transport_name, candidate] { - SignalIceCandidatesGathered(transport_name, {candidate}); - }); + + signal_ice_candidates_gathered_.Send( + transport->transport_name(), std::vector{candidate}); } void JsepTransportController::OnTransportCandidateError_n( cricket::IceTransportInternal* transport, const cricket::IceCandidateErrorEvent& event) { - RTC_DCHECK(network_thread_->IsCurrent()); - - invoker_.AsyncInvoke(RTC_FROM_HERE, signaling_thread_, - [this, event] { SignalIceCandidateError(event); }); + signal_ice_candidate_error_.Send(event); } void JsepTransportController::OnTransportCandidatesRemoved_n( cricket::IceTransportInternal* transport, const cricket::Candidates& candidates) { - invoker_.AsyncInvoke( - RTC_FROM_HERE, signaling_thread_, - [this, candidates] { SignalIceCandidatesRemoved(candidates); }); + signal_ice_candidates_removed_.Send(candidates); } void JsepTransportController::OnTransportCandidatePairChanged_n( const cricket::CandidatePairChangeEvent& event) { - invoker_.AsyncInvoke(RTC_FROM_HERE, signaling_thread_, [this, event] { - SignalIceCandidatePairChanged(event); - }); + signal_ice_candidate_pair_changed_.Send(event); } void JsepTransportController::OnTransportRoleConflict_n( cricket::IceTransportInternal* transport) { - RTC_DCHECK(network_thread_->IsCurrent()); // Note: since the role conflict is handled entirely on the network thread, // we don't need to worry about role conflicts occurring on two ports at // once. The first one encountered should immediately reverse the role. @@ -1417,28 +1168,14 @@ void JsepTransportController::OnTransportRoleConflict_n( void JsepTransportController::OnTransportStateChanged_n( cricket::IceTransportInternal* transport) { - RTC_DCHECK(network_thread_->IsCurrent()); RTC_LOG(LS_INFO) << transport->transport_name() << " Transport " << transport->component() << " state changed. Check if state is complete."; UpdateAggregateStates_n(); } -void JsepTransportController::OnDataChannelTransportNegotiated_n( - cricket::JsepTransport* transport, - DataChannelTransportInterface* data_channel_transport) { - for (const auto& it : mid_to_transport_) { - if (it.second == transport) { - config_.transport_observer->OnTransportChanged( - it.first, transport->rtp_transport(), transport->RtpDtlsTransport(), - data_channel_transport); - } - } -} - void JsepTransportController::UpdateAggregateStates_n() { - RTC_DCHECK(network_thread_->IsCurrent()); - + TRACE_EVENT0("webrtc", "JsepTransportController::UpdateAggregateStates_n"); auto dtls_transports = GetDtlsTransports(); cricket::IceConnectionState new_connection_state = cricket::kIceConnectionConnecting; @@ -1454,7 +1191,7 @@ void JsepTransportController::UpdateAggregateStates_n() { bool all_done_gathering = !dtls_transports.empty(); std::map ice_state_counts; - std::map dtls_state_counts; + std::map dtls_state_counts; for (const auto& dtls : dtls_transports) { any_failed = any_failed || dtls->ice_transport()->GetState() == @@ -1486,10 +1223,8 @@ void JsepTransportController::UpdateAggregateStates_n() { } if (ice_connection_state_ != new_connection_state) { ice_connection_state_ = new_connection_state; - invoker_.AsyncInvoke(RTC_FROM_HERE, signaling_thread_, - [this, new_connection_state] { - SignalIceConnectionState(new_connection_state); - }); + + signal_ice_connection_state_.Send(new_connection_state); } // Compute the current RTCIceConnectionState as described in @@ -1545,16 +1280,11 @@ void JsepTransportController::UpdateAggregateStates_n() { new_ice_connection_state == PeerConnectionInterface::kIceConnectionCompleted) { // Ensure that we never skip over the "connected" state. - invoker_.AsyncInvoke(RTC_FROM_HERE, signaling_thread_, [this] { - SignalStandardizedIceConnectionState( - PeerConnectionInterface::kIceConnectionConnected); - }); + signal_standardized_ice_connection_state_.Send( + PeerConnectionInterface::kIceConnectionConnected); } standardized_ice_connection_state_ = new_ice_connection_state; - invoker_.AsyncInvoke( - RTC_FROM_HERE, signaling_thread_, [this, new_ice_connection_state] { - SignalStandardizedIceConnectionState(new_ice_connection_state); - }); + signal_standardized_ice_connection_state_.Send(new_ice_connection_state); } // Compute the current RTCPeerConnectionState as described in @@ -1563,16 +1293,15 @@ void JsepTransportController::UpdateAggregateStates_n() { // Note that "connecting" is only a valid state for DTLS transports while // "checking", "completed" and "disconnected" are only valid for ICE // transports. - int total_connected = total_ice_connected + - dtls_state_counts[cricket::DTLS_TRANSPORT_CONNECTED]; + int total_connected = + total_ice_connected + dtls_state_counts[DtlsTransportState::kConnected]; int total_dtls_connecting = - dtls_state_counts[cricket::DTLS_TRANSPORT_CONNECTING]; + dtls_state_counts[DtlsTransportState::kConnecting]; int total_failed = - total_ice_failed + dtls_state_counts[cricket::DTLS_TRANSPORT_FAILED]; + total_ice_failed + dtls_state_counts[DtlsTransportState::kFailed]; int total_closed = - total_ice_closed + dtls_state_counts[cricket::DTLS_TRANSPORT_CLOSED]; - int total_new = - total_ice_new + dtls_state_counts[cricket::DTLS_TRANSPORT_NEW]; + total_ice_closed + dtls_state_counts[DtlsTransportState::kClosed]; + int total_new = total_ice_new + dtls_state_counts[DtlsTransportState::kNew]; int total_transports = total_ice * 2; if (total_failed > 0) { @@ -1605,23 +1334,20 @@ void JsepTransportController::UpdateAggregateStates_n() { if (combined_connection_state_ != new_combined_state) { combined_connection_state_ = new_combined_state; - invoker_.AsyncInvoke(RTC_FROM_HERE, signaling_thread_, - [this, new_combined_state] { - SignalConnectionState(new_combined_state); - }); + signal_connection_state_.Send(new_combined_state); } - if (all_done_gathering) { + // Compute the gathering state. + if (dtls_transports.empty()) { + new_gathering_state = cricket::kIceGatheringNew; + } else if (all_done_gathering) { new_gathering_state = cricket::kIceGatheringComplete; } else if (any_gathering) { new_gathering_state = cricket::kIceGatheringGathering; } if (ice_gathering_state_ != new_gathering_state) { ice_gathering_state_ = new_gathering_state; - invoker_.AsyncInvoke(RTC_FROM_HERE, signaling_thread_, - [this, new_gathering_state] { - SignalIceGatheringState(new_gathering_state); - }); + signal_ice_gathering_state_.Send(new_gathering_state); } } @@ -1634,57 +1360,24 @@ void JsepTransportController::OnRtcpPacketReceived_n( void JsepTransportController::OnDtlsHandshakeError( rtc::SSLHandshakeError error) { - SignalDtlsHandshakeError(error); + config_.on_dtls_handshake_error_(error); } -absl::optional -JsepTransportController::GetTransportParameters(const std::string& mid) { - if (!(config_.use_datagram_transport || - config_.use_datagram_transport_for_data_channels)) { - return absl::nullopt; - } - - cricket::JsepTransport* transport = GetJsepTransportForMid(mid); - if (transport) { - absl::optional params = - transport->GetTransportParameters(); - if (params) { - params->protocol = config_.media_transport_factory->GetTransportName(); - } - return params; - } - - RTC_DCHECK(!local_desc_ && !remote_desc_) - << "JsepTransport should exist for every mid once any description is set"; - - if (config_.use_datagram_transport_for_data_channels_receive_only) { - return absl::nullopt; - } - - // Need to generate a transport for the offer. - if (!offer_datagram_transport_) { - webrtc::MediaTransportSettings settings; - settings.is_caller = true; - settings.pre_shared_key = rtc::CreateRandomString(32); - settings.event_log = config_.event_log; - auto datagram_transport_or_error = - config_.media_transport_factory->CreateDatagramTransport( - network_thread_, settings); - - if (datagram_transport_or_error.ok()) { - offer_datagram_transport_ = - std::move(datagram_transport_or_error.value()); +bool JsepTransportController::OnTransportChanged( + const std::string& mid, + cricket::JsepTransport* jsep_transport) { + if (config_.transport_observer) { + if (jsep_transport) { + return config_.transport_observer->OnTransportChanged( + mid, jsep_transport->rtp_transport(), + jsep_transport->RtpDtlsTransport(), + jsep_transport->data_channel_transport()); } else { - RTC_LOG(LS_INFO) << "Unable to create datagram transport, error=" - << datagram_transport_or_error.error().message(); + return config_.transport_observer->OnTransportChanged(mid, nullptr, + nullptr, nullptr); } } - - // We have prepared a transport for the offer, and can now use its parameters. - cricket::OpaqueTransportParameters params; - params.parameters = offer_datagram_transport_->GetTransportParameters(); - params.protocol = config_.media_transport_factory->GetTransportName(); - return params; + return false; } } // namespace webrtc diff --git a/pc/jsep_transport_controller.h b/pc/jsep_transport_controller.h index c966e744c6..71b01bffb2 100644 --- a/pc/jsep_transport_controller.h +++ b/pc/jsep_transport_controller.h @@ -11,32 +11,63 @@ #ifndef PC_JSEP_TRANSPORT_CONTROLLER_H_ #define PC_JSEP_TRANSPORT_CONTROLLER_H_ +#include + +#include #include #include #include #include #include +#include "absl/types/optional.h" +#include "api/async_dns_resolver.h" #include "api/candidate.h" #include "api/crypto/crypto_options.h" #include "api/ice_transport_factory.h" +#include "api/ice_transport_interface.h" +#include "api/jsep.h" #include "api/peer_connection_interface.h" +#include "api/rtc_error.h" #include "api/rtc_event_log/rtc_event_log.h" -#include "api/transport/media/media_transport_config.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/transport/data_channel_transport_interface.h" +#include "api/transport/sctp_transport_factory_interface.h" #include "media/sctp/sctp_transport_internal.h" #include "p2p/base/dtls_transport.h" #include "p2p/base/dtls_transport_factory.h" +#include "p2p/base/dtls_transport_internal.h" +#include "p2p/base/ice_transport_internal.h" #include "p2p/base/p2p_transport_channel.h" +#include "p2p/base/packet_transport_internal.h" +#include "p2p/base/port.h" +#include "p2p/base/port_allocator.h" +#include "p2p/base/transport_description.h" +#include "p2p/base/transport_info.h" #include "pc/channel.h" #include "pc/dtls_srtp_transport.h" #include "pc/dtls_transport.h" #include "pc/jsep_transport.h" +#include "pc/jsep_transport_collection.h" #include "pc/rtp_transport.h" +#include "pc/rtp_transport_internal.h" +#include "pc/sctp_transport.h" +#include "pc/session_description.h" #include "pc/srtp_transport.h" -#include "rtc_base/async_invoker.h" +#include "pc/transport_stats.h" +#include "rtc_base/callback_list.h" +#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/helpers.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/rtc_certificate.h" +#include "rtc_base/ssl_certificate.h" +#include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace rtc { class Thread; @@ -98,45 +129,25 @@ class JsepTransportController : public sigslot::has_slots<> { std::function rtcp_handler; + // Initial value for whether DtlsTransport reset causes a reset + // of SRTP parameters. bool active_reset_srtp_params = false; RtcEventLog* event_log = nullptr; // Factory for SCTP transports. - cricket::SctpTransportInternalFactory* sctp_factory = nullptr; - - // Whether an RtpMediaTransport should be created as default, when no - // MediaTransportFactory is provided. - bool use_rtp_media_transport = false; - - // Use encrypted datagram transport to send packets. - bool use_datagram_transport = false; - - // Use datagram transport's implementation of data channels instead of SCTP. - bool use_datagram_transport_for_data_channels = false; - - // Whether |use_datagram_transport_for_data_channels| applies to outgoing - // calls. If true, |use_datagram_transport_for_data_channels| applies only - // to incoming calls. - bool use_datagram_transport_for_data_channels_receive_only = false; - - // Optional media transport factory (experimental). If provided it will be - // used to create datagram_transport (as long as either - // |use_datagram_transport| or - // |use_datagram_transport_for_data_channels| is set to true). However, - // whether it will be used to send / receive audio and video frames instead - // of RTP is determined by |use_datagram_transport|. Note that currently - // datagram_transport co-exists with RTP / RTCP transports and may use the - // same underlying ICE transport. - MediaTransportFactory* media_transport_factory = nullptr; + SctpTransportFactoryInterface* sctp_factory = nullptr; + std::function on_dtls_handshake_error_; }; - // The ICE related events are signaled on the |signaling_thread|. - // All the transport related methods are called on the |network_thread|. - JsepTransportController(rtc::Thread* signaling_thread, - rtc::Thread* network_thread, - cricket::PortAllocator* port_allocator, - AsyncResolverFactory* async_resolver_factory, - Config config); + // The ICE related events are fired on the |network_thread|. + // All the transport related methods are called on the |network_thread| + // and destruction of the JsepTransportController must occur on the + // |network_thread|. + JsepTransportController( + rtc::Thread* network_thread, + cricket::PortAllocator* port_allocator, + AsyncDnsResolverFactoryInterface* async_dns_resolver_factory, + Config config); virtual ~JsepTransportController(); // The main method to be called; applies a description at the transport @@ -161,8 +172,6 @@ class JsepTransportController : public sigslot::has_slots<> { rtc::scoped_refptr GetSctpTransport( const std::string& mid) const; - MediaTransportConfig GetMediaTransportConfig(const std::string& mid) const; - DataChannelTransportInterface* GetDataChannelTransport( const std::string& mid) const; @@ -215,111 +224,133 @@ class JsepTransportController : public sigslot::has_slots<> { void SetActiveResetSrtpParams(bool active_reset_srtp_params); - // Allows to overwrite the settings from config. You may set or reset the - // media transport configuration on the jsep transport controller, as long as - // you did not call 'GetMediaTransport' or 'MaybeCreateJsepTransport'. Once - // Jsep transport is created, you can't change this setting. - void SetMediaTransportSettings( - bool use_datagram_transport, - bool use_datagram_transport_for_data_channels, - bool use_datagram_transport_for_data_channels_receive_only); - // For now the rollback only removes mid to transport mappings // and deletes unused transports, but doesn't consider anything more complex. void RollbackTransports(); - // Gets the transport parameters for the transport identified by |mid|. - // If |mid| is bundled, returns the parameters for the bundled transport. - // If the transport for |mid| has not been created yet, it may be allocated in - // order to generate transport parameters. - absl::optional GetTransportParameters( - const std::string& mid); + // F: void(const std::string&, const std::vector&) + template + void SubscribeIceCandidateGathered(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_ice_candidates_gathered_.AddReceiver(std::forward(callback)); + } + + // F: void(cricket::IceConnectionState) + template + void SubscribeIceConnectionState(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_ice_connection_state_.AddReceiver(std::forward(callback)); + } + + // F: void(PeerConnectionInterface::PeerConnectionState) + template + void SubscribeConnectionState(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_connection_state_.AddReceiver(std::forward(callback)); + } + + // F: void(PeerConnectionInterface::IceConnectionState) + template + void SubscribeStandardizedIceConnectionState(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_standardized_ice_connection_state_.AddReceiver( + std::forward(callback)); + } + + // F: void(cricket::IceGatheringState) + template + void SubscribeIceGatheringState(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_ice_gathering_state_.AddReceiver(std::forward(callback)); + } + + // F: void(const cricket::IceCandidateErrorEvent&) + template + void SubscribeIceCandidateError(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_ice_candidate_error_.AddReceiver(std::forward(callback)); + } + + // F: void(const std::vector&) + template + void SubscribeIceCandidatesRemoved(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_ice_candidates_removed_.AddReceiver(std::forward(callback)); + } + + // F: void(const cricket::CandidatePairChangeEvent&) + template + void SubscribeIceCandidatePairChanged(F&& callback) { + RTC_DCHECK_RUN_ON(network_thread_); + signal_ice_candidate_pair_changed_.AddReceiver(std::forward(callback)); + } - // All of these signals are fired on the signaling thread. + private: + // All of these callbacks are fired on the network thread. // If any transport failed => failed, // Else if all completed => completed, // Else if all connected => connected, // Else => connecting - sigslot::signal1 SignalIceConnectionState; + CallbackList signal_ice_connection_state_ + RTC_GUARDED_BY(network_thread_); + + CallbackList + signal_connection_state_ RTC_GUARDED_BY(network_thread_); - sigslot::signal1 - SignalConnectionState; - sigslot::signal1 - SignalStandardizedIceConnectionState; + CallbackList + signal_standardized_ice_connection_state_ RTC_GUARDED_BY(network_thread_); // If all transports done gathering => complete, // Else if any are gathering => gathering, // Else => new - sigslot::signal1 SignalIceGatheringState; + CallbackList signal_ice_gathering_state_ + RTC_GUARDED_BY(network_thread_); - // (mid, candidates) - sigslot::signal2&> - SignalIceCandidatesGathered; + // [mid, candidates] + CallbackList&> + signal_ice_candidates_gathered_ RTC_GUARDED_BY(network_thread_); - sigslot::signal1 - SignalIceCandidateError; + CallbackList + signal_ice_candidate_error_ RTC_GUARDED_BY(network_thread_); - sigslot::signal1&> - SignalIceCandidatesRemoved; + CallbackList&> + signal_ice_candidates_removed_ RTC_GUARDED_BY(network_thread_); - sigslot::signal1 - SignalIceCandidatePairChanged; + CallbackList + signal_ice_candidate_pair_changed_ RTC_GUARDED_BY(network_thread_); - sigslot::signal1 SignalDtlsHandshakeError; - - private: RTCError ApplyDescription_n(bool local, SdpType type, - const cricket::SessionDescription* description); - RTCError ValidateAndMaybeUpdateBundleGroup( + const cricket::SessionDescription* description) + RTC_RUN_ON(network_thread_); + RTCError ValidateAndMaybeUpdateBundleGroups( bool local, SdpType type, const cricket::SessionDescription* description); RTCError ValidateContent(const cricket::ContentInfo& content_info); - void HandleRejectedContent(const cricket::ContentInfo& content_info, - const cricket::SessionDescription* description); - bool HandleBundledContent(const cricket::ContentInfo& content_info); - - bool SetTransportForMid(const std::string& mid, - cricket::JsepTransport* jsep_transport); - void RemoveTransportForMid(const std::string& mid); + void HandleRejectedContent(const cricket::ContentInfo& content_info) + RTC_RUN_ON(network_thread_); + bool HandleBundledContent(const cricket::ContentInfo& content_info, + const cricket::ContentGroup& bundle_group) + RTC_RUN_ON(network_thread_); cricket::JsepTransportDescription CreateJsepTransportDescription( const cricket::ContentInfo& content_info, const cricket::TransportInfo& transport_info, const std::vector& encrypted_extension_ids, - int rtp_abs_sendtime_extn_id, - absl::optional media_alt_protocol, - absl::optional data_alt_protocol); - - absl::optional bundled_mid() const { - absl::optional bundled_mid; - if (bundle_group_ && bundle_group_->FirstContentName()) { - bundled_mid = *(bundle_group_->FirstContentName()); - } - return bundled_mid; - } - - bool IsBundled(const std::string& mid) const { - return bundle_group_ && bundle_group_->HasContentName(mid); - } + int rtp_abs_sendtime_extn_id); bool ShouldUpdateBundleGroup(SdpType type, const cricket::SessionDescription* description); - std::vector MergeEncryptedHeaderExtensionIdsForBundle( + std::map> + MergeEncryptedHeaderExtensionIdsForBundles( const cricket::SessionDescription* description); std::vector GetEncryptedHeaderExtensionIds( const cricket::ContentInfo& content_info); - // Extracts the alt-protocol settings that apply to the bundle group. - RTCError GetAltProtocolsForBundle( - const cricket::SessionDescription* description, - absl::optional* media_alt_protocol, - absl::optional* data_alt_protocol); - int GetRtpAbsSendTimeHeaderExtensionId( const cricket::ContentInfo& content_info); @@ -328,15 +359,16 @@ class JsepTransportController : public sigslot::has_slots<> { // transports are bundled on (In current implementation, it is the first // content in the BUNDLE group). const cricket::JsepTransport* GetJsepTransportForMid( - const std::string& mid) const; - cricket::JsepTransport* GetJsepTransportForMid(const std::string& mid); + const std::string& mid) const RTC_RUN_ON(network_thread_); + cricket::JsepTransport* GetJsepTransportForMid(const std::string& mid) + RTC_RUN_ON(network_thread_); // Get the JsepTransport without considering the BUNDLE group. Return nullptr // if the JsepTransport is destroyed. const cricket::JsepTransport* GetJsepTransportByName( - const std::string& transport_name) const; + const std::string& transport_name) const RTC_RUN_ON(network_thread_); cricket::JsepTransport* GetJsepTransportByName( - const std::string& transport_name); + const std::string& transport_name) RTC_RUN_ON(network_thread_); // Creates jsep transport. Noop if transport is already created. // Transport is created either during SetLocalDescription (|local| == true) or @@ -345,22 +377,12 @@ class JsepTransportController : public sigslot::has_slots<> { RTCError MaybeCreateJsepTransport( bool local, const cricket::ContentInfo& content_info, - const cricket::SessionDescription& description); - - // Creates datagram transport if config wants to use it, and a=x-mt line is - // present for the current media transport. Returned - // DatagramTransportInterface is not connected, and must be connected to ICE. - // You must call |GenerateOrGetLastMediaTransportOffer| on the caller before - // calling MaybeCreateDatagramTransport. - std::unique_ptr - MaybeCreateDatagramTransport(const cricket::ContentInfo& content_info, - const cricket::SessionDescription& description, - bool local); + const cricket::SessionDescription& description) + RTC_RUN_ON(network_thread_); - void MaybeDestroyJsepTransport(const std::string& mid); - void DestroyAllJsepTransports_n(); + void DestroyAllJsepTransports_n() RTC_RUN_ON(network_thread_); - void SetIceRole_n(cricket::IceRole ice_role); + void SetIceRole_n(cricket::IceRole ice_role) RTC_RUN_ON(network_thread_); cricket::IceRole DetermineIceRole( cricket::JsepTransport* jsep_transport, @@ -370,8 +392,7 @@ class JsepTransportController : public sigslot::has_slots<> { std::unique_ptr CreateDtlsTransport( const cricket::ContentInfo& content_info, - cricket::IceTransportInternal* ice, - DatagramTransportInterface* datagram_transport); + cricket::IceTransportInternal* ice); rtc::scoped_refptr CreateIceTransport( const std::string& transport_name, bool rtcp); @@ -395,43 +416,44 @@ class JsepTransportController : public sigslot::has_slots<> { std::vector GetDtlsTransports(); // Handlers for signals from Transport. - void OnTransportWritableState_n(rtc::PacketTransportInternal* transport); - void OnTransportReceivingState_n(rtc::PacketTransportInternal* transport); - void OnTransportGatheringState_n(cricket::IceTransportInternal* transport); + void OnTransportWritableState_n(rtc::PacketTransportInternal* transport) + RTC_RUN_ON(network_thread_); + void OnTransportReceivingState_n(rtc::PacketTransportInternal* transport) + RTC_RUN_ON(network_thread_); + void OnTransportGatheringState_n(cricket::IceTransportInternal* transport) + RTC_RUN_ON(network_thread_); void OnTransportCandidateGathered_n(cricket::IceTransportInternal* transport, - const cricket::Candidate& candidate); - void OnTransportCandidateError_n( - cricket::IceTransportInternal* transport, - const cricket::IceCandidateErrorEvent& event); + const cricket::Candidate& candidate) + RTC_RUN_ON(network_thread_); + void OnTransportCandidateError_n(cricket::IceTransportInternal* transport, + const cricket::IceCandidateErrorEvent& event) + RTC_RUN_ON(network_thread_); void OnTransportCandidatesRemoved_n(cricket::IceTransportInternal* transport, - const cricket::Candidates& candidates); - void OnTransportRoleConflict_n(cricket::IceTransportInternal* transport); - void OnTransportStateChanged_n(cricket::IceTransportInternal* transport); + const cricket::Candidates& candidates) + RTC_RUN_ON(network_thread_); + void OnTransportRoleConflict_n(cricket::IceTransportInternal* transport) + RTC_RUN_ON(network_thread_); + void OnTransportStateChanged_n(cricket::IceTransportInternal* transport) + RTC_RUN_ON(network_thread_); void OnTransportCandidatePairChanged_n( - const cricket::CandidatePairChangeEvent& event); - void OnDataChannelTransportNegotiated_n( - cricket::JsepTransport* transport, - DataChannelTransportInterface* data_channel_transport); - - void UpdateAggregateStates_n(); + const cricket::CandidatePairChangeEvent& event) + RTC_RUN_ON(network_thread_); + void UpdateAggregateStates_n() RTC_RUN_ON(network_thread_); void OnRtcpPacketReceived_n(rtc::CopyOnWriteBuffer* packet, - int64_t packet_time_us); + int64_t packet_time_us) + RTC_RUN_ON(network_thread_); void OnDtlsHandshakeError(rtc::SSLHandshakeError error); - rtc::Thread* const signaling_thread_ = nullptr; + bool OnTransportChanged(const std::string& mid, + cricket::JsepTransport* transport); + rtc::Thread* const network_thread_ = nullptr; cricket::PortAllocator* const port_allocator_ = nullptr; - AsyncResolverFactory* const async_resolver_factory_ = nullptr; - - std::map> - jsep_transports_by_name_; - // This keeps track of the mapping between media section - // (BaseChannel/SctpTransport) and the JsepTransport underneath. - std::map mid_to_transport_; - // Keep track of mids that have been mapped to transports. Used for rollback. - std::vector pending_mids_ RTC_GUARDED_BY(network_thread_); + AsyncDnsResolverFactoryInterface* const async_dns_resolver_factory_ = nullptr; + + JsepTransportCollection transports_ RTC_GUARDED_BY(network_thread_); // Aggregate states for Transports. // standardized_ice_connection_state_ is intended to replace // ice_connection_state, see bugs.webrtc.org/9308 @@ -444,30 +466,19 @@ class JsepTransportController : public sigslot::has_slots<> { PeerConnectionInterface::PeerConnectionState::kNew; cricket::IceGatheringState ice_gathering_state_ = cricket::kIceGatheringNew; - Config config_; - - // Early on in the call we don't know if datagram transport is going to be - // used, but we need to get the server-supported parameters to add to an SDP. - // This server datagram transport will be promoted to the used datagram - // transport after the local description is set, and the ownership will be - // transferred to the actual JsepTransport. This "offer" datagram transport is - // not created if it's done on the party that provides answer. This offer - // datagram transport is only created once at the beginning of the connection, - // and never again. - std::unique_ptr offer_datagram_transport_ = - nullptr; + const Config config_; + bool active_reset_srtp_params_ RTC_GUARDED_BY(network_thread_); const cricket::SessionDescription* local_desc_ = nullptr; const cricket::SessionDescription* remote_desc_ = nullptr; absl::optional initial_offerer_; - absl::optional bundle_group_; - cricket::IceConfig ice_config_; cricket::IceRole ice_role_ = cricket::ICEROLE_CONTROLLING; uint64_t ice_tiebreaker_ = rtc::CreateRandomId64(); rtc::scoped_refptr certificate_; - rtc::AsyncInvoker invoker_; + + BundleManager bundles_; RTC_DISALLOW_COPY_AND_ASSIGN(JsepTransportController); }; diff --git a/pc/jsep_transport_controller_unittest.cc b/pc/jsep_transport_controller_unittest.cc index 3fc6f8b7e5..2b261c83c8 100644 --- a/pc/jsep_transport_controller_unittest.cc +++ b/pc/jsep_transport_controller_unittest.cc @@ -13,9 +13,7 @@ #include #include -#include "api/test/fake_media_transport.h" -#include "api/test/loopback_media_transport.h" -#include "api/transport/media/media_transport_interface.h" +#include "api/dtls_transport_interface.h" #include "p2p/base/dtls_transport_factory.h" #include "p2p/base/fake_dtls_transport.h" #include "p2p/base/fake_ice_transport.h" @@ -36,6 +34,8 @@ static const char kIceUfrag2[] = "u0002"; static const char kIcePwd2[] = "TESTICEPWD00000000000002"; static const char kIceUfrag3[] = "u0003"; static const char kIcePwd3[] = "TESTICEPWD00000000000003"; +static const char kIceUfrag4[] = "u0004"; +static const char kIcePwd4[] = "TESTICEPWD00000000000004"; static const char kAudioMid1[] = "audio1"; static const char kAudioMid2[] = "audio2"; static const char kVideoMid1[] = "video1"; @@ -44,20 +44,6 @@ static const char kDataMid1[] = "data1"; namespace webrtc { -namespace { - -// Media transport factory requires crypto settings to be present in order to -// create media transport. -void AddCryptoSettings(cricket::SessionDescription* description) { - for (auto& content : description->contents()) { - content.media_description()->AddCrypto(cricket::CryptoParams( - /*t=*/0, std::string(rtc::CS_AES_CM_128_HMAC_SHA1_80), - "inline:YUJDZGVmZ2hpSktMbW9QUXJzVHVWd3l6MTIzNDU2", "")); - } -} - -} // namespace - class FakeIceTransportFactory : public webrtc::IceTransportFactory { public: ~FakeIceTransportFactory() override = default; @@ -65,7 +51,7 @@ class FakeIceTransportFactory : public webrtc::IceTransportFactory { const std::string& transport_name, int component, IceTransportInit init) override { - return new rtc::RefCountedObject( + return rtc::make_ref_counted( std::make_unique(transport_name, component)); } }; @@ -74,7 +60,8 @@ class FakeDtlsTransportFactory : public cricket::DtlsTransportFactory { public: std::unique_ptr CreateDtlsTransport( cricket::IceTransportInternal* ice, - const webrtc::CryptoOptions& crypto_options) override { + const webrtc::CryptoOptions& crypto_options, + rtc::SSLProtocolVersion max_version) override { return std::make_unique( static_cast(ice)); } @@ -91,7 +78,6 @@ class JsepTransportControllerTest : public JsepTransportController::Observer, void CreateJsepTransportController( JsepTransportController::Config config, - rtc::Thread* signaling_thread = rtc::Thread::Current(), rtc::Thread* network_thread = rtc::Thread::Current(), cricket::PortAllocator* port_allocator = nullptr) { config.transport_observer = this; @@ -99,23 +85,37 @@ class JsepTransportControllerTest : public JsepTransportController::Observer, int64_t packet_time_us) { RTC_NOTREACHED(); }; config.ice_transport_factory = fake_ice_transport_factory_.get(); config.dtls_transport_factory = fake_dtls_transport_factory_.get(); + config.on_dtls_handshake_error_ = [](rtc::SSLHandshakeError s) {}; transport_controller_ = std::make_unique( - signaling_thread, network_thread, port_allocator, - nullptr /* async_resolver_factory */, config); - ConnectTransportControllerSignals(); + network_thread, port_allocator, nullptr /* async_resolver_factory */, + config); + network_thread->Invoke(RTC_FROM_HERE, + [&] { ConnectTransportControllerSignals(); }); } void ConnectTransportControllerSignals() { - transport_controller_->SignalIceConnectionState.connect( - this, &JsepTransportControllerTest::OnConnectionState); - transport_controller_->SignalStandardizedIceConnectionState.connect( - this, &JsepTransportControllerTest::OnStandardizedIceConnectionState); - transport_controller_->SignalConnectionState.connect( - this, &JsepTransportControllerTest::OnCombinedConnectionState); - transport_controller_->SignalIceGatheringState.connect( - this, &JsepTransportControllerTest::OnGatheringState); - transport_controller_->SignalIceCandidatesGathered.connect( - this, &JsepTransportControllerTest::OnCandidatesGathered); + transport_controller_->SubscribeIceConnectionState( + [this](cricket::IceConnectionState s) { + JsepTransportControllerTest::OnConnectionState(s); + }); + transport_controller_->SubscribeConnectionState( + [this](PeerConnectionInterface::PeerConnectionState s) { + JsepTransportControllerTest::OnCombinedConnectionState(s); + }); + transport_controller_->SubscribeStandardizedIceConnectionState( + [this](PeerConnectionInterface::IceConnectionState s) { + JsepTransportControllerTest::OnStandardizedIceConnectionState(s); + }); + transport_controller_->SubscribeIceGatheringState( + [this](cricket::IceGatheringState s) { + JsepTransportControllerTest::OnGatheringState(s); + }); + transport_controller_->SubscribeIceCandidateGathered( + [this](const std::string& transport, + const std::vector& candidates) { + JsepTransportControllerTest::OnCandidatesGathered(transport, + candidates); + }); } std::unique_ptr @@ -280,18 +280,14 @@ class JsepTransportControllerTest : public JsepTransportController::Observer, protected: void OnConnectionState(cricket::IceConnectionState state) { - if (!signaling_thread_->IsCurrent()) { - signaled_on_non_signaling_thread_ = true; - } + ice_signaled_on_thread_ = rtc::Thread::Current(); connection_state_ = state; ++connection_state_signal_count_; } void OnStandardizedIceConnectionState( PeerConnectionInterface::IceConnectionState state) { - if (!signaling_thread_->IsCurrent()) { - signaled_on_non_signaling_thread_ = true; - } + ice_signaled_on_thread_ = rtc::Thread::Current(); ice_connection_state_ = state; ++ice_connection_state_signal_count_; } @@ -300,26 +296,20 @@ class JsepTransportControllerTest : public JsepTransportController::Observer, PeerConnectionInterface::PeerConnectionState state) { RTC_LOG(LS_INFO) << "OnCombinedConnectionState: " << static_cast(state); - if (!signaling_thread_->IsCurrent()) { - signaled_on_non_signaling_thread_ = true; - } + ice_signaled_on_thread_ = rtc::Thread::Current(); combined_connection_state_ = state; ++combined_connection_state_signal_count_; } void OnGatheringState(cricket::IceGatheringState state) { - if (!signaling_thread_->IsCurrent()) { - signaled_on_non_signaling_thread_ = true; - } + ice_signaled_on_thread_ = rtc::Thread::Current(); gathering_state_ = state; ++gathering_state_signal_count_; } void OnCandidatesGathered(const std::string& transport_name, const Candidates& candidates) { - if (!signaling_thread_->IsCurrent()) { - signaled_on_non_signaling_thread_ = true; - } + ice_signaled_on_thread_ = rtc::Thread::Current(); candidates_[transport_name].insert(candidates_[transport_name].end(), candidates.begin(), candidates.end()); ++candidates_signal_count_; @@ -364,7 +354,7 @@ class JsepTransportControllerTest : public JsepTransportController::Observer, std::unique_ptr fake_ice_transport_factory_; std::unique_ptr fake_dtls_transport_factory_; rtc::Thread* const signaling_thread_ = nullptr; - bool signaled_on_non_signaling_thread_ = false; + rtc::Thread* ice_signaled_on_thread_ = nullptr; // Used to verify the SignalRtpTransportChanged/SignalDtlsTransportChanged are // signaled correctly. std::map changed_rtp_transport_by_mid_; @@ -440,96 +430,6 @@ TEST_F(JsepTransportControllerTest, GetDtlsTransportWithRtcpMux) { EXPECT_EQ(nullptr, transport_controller_->GetRtcpDtlsTransport(kVideoMid1)); } -TEST_F(JsepTransportControllerTest, - DtlsIsStillCreatedIfDatagramTransportIsOnlyUsedForDataChannels) { - FakeMediaTransportFactory fake_media_transport_factory("transport_params"); - JsepTransportController::Config config; - - config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - config.media_transport_factory = &fake_media_transport_factory; - config.use_datagram_transport_for_data_channels = true; - CreateJsepTransportController(config); - - auto description = CreateSessionDescriptionWithBundledData(); - AddCryptoSettings(description.get()); - - absl::optional params = - transport_controller_->GetTransportParameters(kAudioMid1); - for (auto& info : description->transport_infos()) { - info.description.opaque_parameters = params; - } - for (cricket::ContentInfo& content_info : description->contents()) { - if (content_info.media_description()->type() == cricket::MEDIA_TYPE_DATA) { - content_info.media_description()->set_alt_protocol(params->protocol); - } - } - - EXPECT_TRUE(transport_controller_ - ->SetLocalDescription(SdpType::kOffer, description.get()) - .ok()); - EXPECT_TRUE(transport_controller_ - ->SetRemoteDescription(SdpType::kAnswer, description.get()) - .ok()); - - FakeDatagramTransport* datagram_transport = - static_cast( - transport_controller_->GetDataChannelTransport(kAudioMid1)); - - ASSERT_NE(nullptr, datagram_transport); - - EXPECT_EQ(cricket::ICE_CANDIDATE_COMPONENT_RTP, - transport_controller_->GetDtlsTransport(kAudioMid1)->component()) - << "Datagram transport for media was not enabled, and so DTLS transport " - "should be created."; - - // Datagram transport is not used for media, so no max packet size is - // specified. - EXPECT_EQ(transport_controller_->GetMediaTransportConfig(kAudioMid1) - .rtp_max_packet_size, - absl::nullopt); - - // Since datagram transport is not used for RTP, setting it to writable should - // not make the RTP transport writable. - datagram_transport->set_state(MediaTransportState::kWritable); - EXPECT_FALSE(transport_controller_->GetRtpTransport(kAudioMid1) - ->IsWritable(/*rtcp=*/false)); -} - -// An offer that bundles different alt-protocols should be rejected. -TEST_F(JsepTransportControllerTest, CannotBundleDifferentAltProtocols) { - FakeMediaTransportFactory fake_media_transport_factory("transport_params"); - JsepTransportController::Config config; - config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - config.media_transport_factory = &fake_media_transport_factory; - config.use_datagram_transport = true; - config.use_datagram_transport_for_data_channels = true; - CreateJsepTransportController(config); - - auto description = CreateSessionDescriptionWithBundledData(); - AddCryptoSettings(description.get()); - - absl::optional params = - transport_controller_->GetTransportParameters(kAudioMid1); - for (auto& info : description->transport_infos()) { - info.description.opaque_parameters = params; - } - - // Append a different alt-protocol to each of the sections. - for (cricket::ContentInfo& content_info : description->contents()) { - content_info.media_description()->set_alt_protocol(params->protocol + "-" + - content_info.name); - } - - EXPECT_FALSE(transport_controller_ - ->SetLocalDescription(SdpType::kOffer, description.get()) - .ok()); - EXPECT_FALSE(transport_controller_ - ->SetRemoteDescription(SdpType::kAnswer, description.get()) - .ok()); -} - TEST_F(JsepTransportControllerTest, SetIceConfig) { CreateJsepTransportController(JsepTransportController::Config()); auto description = CreateSessionDescriptionWithoutBundle(); @@ -794,8 +694,8 @@ TEST_F(JsepTransportControllerTest, combined_connection_state_, kTimeout); EXPECT_EQ(2, combined_connection_state_signal_count_); - fake_audio_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); - fake_video_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); + fake_audio_dtls->SetDtlsState(DtlsTransportState::kConnected); + fake_video_dtls->SetDtlsState(DtlsTransportState::kConnected); // Set the connection count to be 2 and the cricket::FakeIceTransport will set // the transport state to be STATE_CONNECTING. fake_video_dtls->fake_ice_transport()->SetConnectionCount(2); @@ -851,8 +751,8 @@ TEST_F(JsepTransportControllerTest, SignalConnectionStateComplete) { combined_connection_state_, kTimeout); EXPECT_EQ(2, combined_connection_state_signal_count_); - fake_audio_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); - fake_video_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); + fake_audio_dtls->SetDtlsState(DtlsTransportState::kConnected); + fake_video_dtls->SetDtlsState(DtlsTransportState::kConnected); // Set the connection count to be 1 and the cricket::FakeIceTransport will set // the transport state to be STATE_COMPLETED. fake_video_dtls->fake_ice_transport()->SetTransportState( @@ -940,7 +840,7 @@ TEST_F(JsepTransportControllerTest, fake_audio_dtls->SetWritable(true); fake_audio_dtls->fake_ice_transport()->SetCandidatesGatheringComplete(); fake_audio_dtls->fake_ice_transport()->SetConnectionCount(1); - fake_audio_dtls->SetDtlsState(cricket::DTLS_TRANSPORT_CONNECTED); + fake_audio_dtls->SetDtlsState(DtlsTransportState::kConnected); EXPECT_EQ(1, gathering_state_signal_count_); // Set the remote description and enable the bundle. @@ -977,11 +877,12 @@ TEST_F(JsepTransportControllerTest, SignalCandidatesGathered) { EXPECT_EQ(1u, candidates_[kAudioMid1].size()); } -TEST_F(JsepTransportControllerTest, IceSignalingOccursOnSignalingThread) { +TEST_F(JsepTransportControllerTest, IceSignalingOccursOnNetworkThread) { network_thread_ = rtc::Thread::CreateWithSocketServer(); network_thread_->Start(); + EXPECT_EQ(ice_signaled_on_thread_, nullptr); CreateJsepTransportController(JsepTransportController::Config(), - signaling_thread_, network_thread_.get(), + network_thread_.get(), /*port_allocator=*/nullptr); CreateLocalDescriptionAndCompleteConnectionOnNetworkThread(); @@ -997,7 +898,10 @@ TEST_F(JsepTransportControllerTest, IceSignalingOccursOnSignalingThread) { EXPECT_EQ_WAIT(1u, candidates_[kVideoMid1].size(), kTimeout); EXPECT_EQ(2, candidates_signal_count_); - EXPECT_TRUE(!signaled_on_non_signaling_thread_); + EXPECT_EQ(ice_signaled_on_thread_, network_thread_.get()); + + network_thread_->Invoke(RTC_FROM_HERE, + [&] { transport_controller_.reset(); }); } // Test that if the TransportController was created with the @@ -1198,6 +1102,512 @@ TEST_F(JsepTransportControllerTest, MultipleMediaSectionsOfSameTypeWithBundle) { ASSERT_TRUE(it2 != changed_dtls_transport_by_mid_.end()); } +TEST_F(JsepTransportControllerTest, MultipleBundleGroups) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Video[] = "2_video"; + static const char kMid3Audio[] = "3_audio"; + static const char kMid4Video[] = "4_video"; + + CreateJsepTransportController(JsepTransportController::Config()); + cricket::ContentGroup bundle_group1(cricket::GROUP_TYPE_BUNDLE); + bundle_group1.AddContentName(kMid1Audio); + bundle_group1.AddContentName(kMid2Video); + cricket::ContentGroup bundle_group2(cricket::GROUP_TYPE_BUNDLE); + bundle_group2.AddContentName(kMid3Audio); + bundle_group2.AddContentName(kMid4Video); + + auto local_offer = std::make_unique(); + AddAudioSection(local_offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + local_offer->AddGroup(bundle_group1); + local_offer->AddGroup(bundle_group2); + + auto remote_answer = std::make_unique(); + AddAudioSection(remote_answer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + remote_answer->AddGroup(bundle_group1); + remote_answer->AddGroup(bundle_group2); + + EXPECT_TRUE(transport_controller_ + ->SetLocalDescription(SdpType::kOffer, local_offer.get()) + .ok()); + EXPECT_TRUE(transport_controller_ + ->SetRemoteDescription(SdpType::kAnswer, remote_answer.get()) + .ok()); + + // Verify that (kMid1Audio,kMid2Video) and (kMid3Audio,kMid4Video) form two + // distinct bundled groups. + auto mid1_transport = transport_controller_->GetRtpTransport(kMid1Audio); + auto mid2_transport = transport_controller_->GetRtpTransport(kMid2Video); + auto mid3_transport = transport_controller_->GetRtpTransport(kMid3Audio); + auto mid4_transport = transport_controller_->GetRtpTransport(kMid4Video); + EXPECT_EQ(mid1_transport, mid2_transport); + EXPECT_EQ(mid3_transport, mid4_transport); + EXPECT_NE(mid1_transport, mid3_transport); + + auto it = changed_rtp_transport_by_mid_.find(kMid1Audio); + ASSERT_TRUE(it != changed_rtp_transport_by_mid_.end()); + EXPECT_EQ(it->second, mid1_transport); + + it = changed_rtp_transport_by_mid_.find(kMid2Video); + ASSERT_TRUE(it != changed_rtp_transport_by_mid_.end()); + EXPECT_EQ(it->second, mid2_transport); + + it = changed_rtp_transport_by_mid_.find(kMid3Audio); + ASSERT_TRUE(it != changed_rtp_transport_by_mid_.end()); + EXPECT_EQ(it->second, mid3_transport); + + it = changed_rtp_transport_by_mid_.find(kMid4Video); + ASSERT_TRUE(it != changed_rtp_transport_by_mid_.end()); + EXPECT_EQ(it->second, mid4_transport); +} + +TEST_F(JsepTransportControllerTest, + MultipleBundleGroupsInOfferButOnlyASingleGroupInAnswer) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Video[] = "2_video"; + static const char kMid3Audio[] = "3_audio"; + static const char kMid4Video[] = "4_video"; + + CreateJsepTransportController(JsepTransportController::Config()); + cricket::ContentGroup bundle_group1(cricket::GROUP_TYPE_BUNDLE); + bundle_group1.AddContentName(kMid1Audio); + bundle_group1.AddContentName(kMid2Video); + cricket::ContentGroup bundle_group2(cricket::GROUP_TYPE_BUNDLE); + bundle_group2.AddContentName(kMid3Audio); + bundle_group2.AddContentName(kMid4Video); + + auto local_offer = std::make_unique(); + AddAudioSection(local_offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + // The offer has both groups. + local_offer->AddGroup(bundle_group1); + local_offer->AddGroup(bundle_group2); + + auto remote_answer = std::make_unique(); + AddAudioSection(remote_answer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + // The answer only has a single group! This is what happens when talking to an + // endpoint that does not have support for multiple BUNDLE groups. + remote_answer->AddGroup(bundle_group1); + + EXPECT_TRUE(transport_controller_ + ->SetLocalDescription(SdpType::kOffer, local_offer.get()) + .ok()); + EXPECT_TRUE(transport_controller_ + ->SetRemoteDescription(SdpType::kAnswer, remote_answer.get()) + .ok()); + + // Verify that (kMid1Audio,kMid2Video) form a bundle group, but that + // kMid3Audio and kMid4Video are unbundled. + auto mid1_transport = transport_controller_->GetRtpTransport(kMid1Audio); + auto mid2_transport = transport_controller_->GetRtpTransport(kMid2Video); + auto mid3_transport = transport_controller_->GetRtpTransport(kMid3Audio); + auto mid4_transport = transport_controller_->GetRtpTransport(kMid4Video); + EXPECT_EQ(mid1_transport, mid2_transport); + EXPECT_NE(mid3_transport, mid4_transport); + EXPECT_NE(mid1_transport, mid3_transport); + EXPECT_NE(mid1_transport, mid4_transport); +} + +TEST_F(JsepTransportControllerTest, MultipleBundleGroupsIllegallyChangeGroup) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Video[] = "2_video"; + static const char kMid3Audio[] = "3_audio"; + static const char kMid4Video[] = "4_video"; + + CreateJsepTransportController(JsepTransportController::Config()); + // Offer groups (kMid1Audio,kMid2Video) and (kMid3Audio,kMid4Video). + cricket::ContentGroup offer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group1.AddContentName(kMid1Audio); + offer_bundle_group1.AddContentName(kMid2Video); + cricket::ContentGroup offer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group2.AddContentName(kMid3Audio); + offer_bundle_group2.AddContentName(kMid4Video); + // Answer groups (kMid1Audio,kMid4Video) and (kMid3Audio,kMid2Video), i.e. the + // second group members have switched places. This should get rejected. + cricket::ContentGroup answer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group1.AddContentName(kMid1Audio); + answer_bundle_group1.AddContentName(kMid4Video); + cricket::ContentGroup answer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group2.AddContentName(kMid3Audio); + answer_bundle_group2.AddContentName(kMid2Video); + + auto local_offer = std::make_unique(); + AddAudioSection(local_offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + local_offer->AddGroup(offer_bundle_group1); + local_offer->AddGroup(offer_bundle_group2); + + auto remote_answer = std::make_unique(); + AddAudioSection(remote_answer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + remote_answer->AddGroup(answer_bundle_group1); + remote_answer->AddGroup(answer_bundle_group2); + + // Accept offer. + EXPECT_TRUE(transport_controller_ + ->SetLocalDescription(SdpType::kOffer, local_offer.get()) + .ok()); + // Reject answer! + EXPECT_FALSE(transport_controller_ + ->SetRemoteDescription(SdpType::kAnswer, remote_answer.get()) + .ok()); +} + +TEST_F(JsepTransportControllerTest, MultipleBundleGroupsInvalidSubsets) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Video[] = "2_video"; + static const char kMid3Audio[] = "3_audio"; + static const char kMid4Video[] = "4_video"; + + CreateJsepTransportController(JsepTransportController::Config()); + // Offer groups (kMid1Audio,kMid2Video) and (kMid3Audio,kMid4Video). + cricket::ContentGroup offer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group1.AddContentName(kMid1Audio); + offer_bundle_group1.AddContentName(kMid2Video); + cricket::ContentGroup offer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group2.AddContentName(kMid3Audio); + offer_bundle_group2.AddContentName(kMid4Video); + // Answer groups (kMid1Audio) and (kMid2Video), i.e. the second group was + // moved from the first group. This should get rejected. + cricket::ContentGroup answer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group1.AddContentName(kMid1Audio); + cricket::ContentGroup answer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group2.AddContentName(kMid2Video); + + auto local_offer = std::make_unique(); + AddAudioSection(local_offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + local_offer->AddGroup(offer_bundle_group1); + local_offer->AddGroup(offer_bundle_group2); + + auto remote_answer = std::make_unique(); + AddAudioSection(remote_answer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid4Video, kIceUfrag4, kIcePwd4, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + remote_answer->AddGroup(answer_bundle_group1); + remote_answer->AddGroup(answer_bundle_group2); + + // Accept offer. + EXPECT_TRUE(transport_controller_ + ->SetLocalDescription(SdpType::kOffer, local_offer.get()) + .ok()); + // Reject answer! + EXPECT_FALSE(transport_controller_ + ->SetRemoteDescription(SdpType::kAnswer, remote_answer.get()) + .ok()); +} + +TEST_F(JsepTransportControllerTest, MultipleBundleGroupsInvalidOverlap) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Video[] = "2_video"; + static const char kMid3Audio[] = "3_audio"; + + CreateJsepTransportController(JsepTransportController::Config()); + // Offer groups (kMid1Audio,kMid3Audio) and (kMid2Video,kMid3Audio), i.e. + // kMid3Audio is in both groups - this is illegal. + cricket::ContentGroup offer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group1.AddContentName(kMid1Audio); + offer_bundle_group1.AddContentName(kMid3Audio); + cricket::ContentGroup offer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group2.AddContentName(kMid2Video); + offer_bundle_group2.AddContentName(kMid3Audio); + + auto offer = std::make_unique(); + AddAudioSection(offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(offer.get(), kMid2Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(offer.get(), kMid3Audio, kIceUfrag3, kIcePwd3, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + offer->AddGroup(offer_bundle_group1); + offer->AddGroup(offer_bundle_group2); + + // Reject offer, both if set as local or remote. + EXPECT_FALSE( + transport_controller_->SetLocalDescription(SdpType::kOffer, offer.get()) + .ok()); + EXPECT_FALSE( + transport_controller_->SetRemoteDescription(SdpType::kOffer, offer.get()) + .ok()); +} + +TEST_F(JsepTransportControllerTest, MultipleBundleGroupsUnbundleFirstMid) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Audio[] = "2_audio"; + static const char kMid3Audio[] = "3_audio"; + static const char kMid4Video[] = "4_video"; + static const char kMid5Video[] = "5_video"; + static const char kMid6Video[] = "6_video"; + + CreateJsepTransportController(JsepTransportController::Config()); + // Offer groups (kMid1Audio,kMid2Audio,kMid3Audio) and + // (kMid4Video,kMid5Video,kMid6Video). + cricket::ContentGroup offer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group1.AddContentName(kMid1Audio); + offer_bundle_group1.AddContentName(kMid2Audio); + offer_bundle_group1.AddContentName(kMid3Audio); + cricket::ContentGroup offer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group2.AddContentName(kMid4Video); + offer_bundle_group2.AddContentName(kMid5Video); + offer_bundle_group2.AddContentName(kMid6Video); + // Answer groups (kMid2Audio,kMid3Audio) and (kMid5Video,kMid6Video), i.e. + // we've moved the first MIDs out of the groups. + cricket::ContentGroup answer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group1.AddContentName(kMid2Audio); + answer_bundle_group1.AddContentName(kMid3Audio); + cricket::ContentGroup answer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group2.AddContentName(kMid5Video); + answer_bundle_group2.AddContentName(kMid6Video); + + auto local_offer = std::make_unique(); + AddAudioSection(local_offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid2Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid3Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid4Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid5Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid6Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + local_offer->AddGroup(offer_bundle_group1); + local_offer->AddGroup(offer_bundle_group2); + + auto remote_answer = std::make_unique(); + AddAudioSection(remote_answer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid2Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid3Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid4Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid5Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid6Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + remote_answer->AddGroup(answer_bundle_group1); + remote_answer->AddGroup(answer_bundle_group2); + + EXPECT_TRUE(transport_controller_ + ->SetLocalDescription(SdpType::kOffer, local_offer.get()) + .ok()); + EXPECT_TRUE(transport_controller_ + ->SetRemoteDescription(SdpType::kAnswer, remote_answer.get()) + .ok()); + + auto mid1_transport = transport_controller_->GetRtpTransport(kMid1Audio); + auto mid2_transport = transport_controller_->GetRtpTransport(kMid2Audio); + auto mid3_transport = transport_controller_->GetRtpTransport(kMid3Audio); + auto mid4_transport = transport_controller_->GetRtpTransport(kMid4Video); + auto mid5_transport = transport_controller_->GetRtpTransport(kMid5Video); + auto mid6_transport = transport_controller_->GetRtpTransport(kMid6Video); + EXPECT_NE(mid1_transport, mid2_transport); + EXPECT_EQ(mid2_transport, mid3_transport); + EXPECT_NE(mid4_transport, mid5_transport); + EXPECT_EQ(mid5_transport, mid6_transport); + EXPECT_NE(mid1_transport, mid4_transport); + EXPECT_NE(mid2_transport, mid5_transport); +} + +TEST_F(JsepTransportControllerTest, MultipleBundleGroupsChangeFirstMid) { + static const char kMid1Audio[] = "1_audio"; + static const char kMid2Audio[] = "2_audio"; + static const char kMid3Audio[] = "3_audio"; + static const char kMid4Video[] = "4_video"; + static const char kMid5Video[] = "5_video"; + static const char kMid6Video[] = "6_video"; + + CreateJsepTransportController(JsepTransportController::Config()); + // Offer groups (kMid1Audio,kMid2Audio,kMid3Audio) and + // (kMid4Video,kMid5Video,kMid6Video). + cricket::ContentGroup offer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group1.AddContentName(kMid1Audio); + offer_bundle_group1.AddContentName(kMid2Audio); + offer_bundle_group1.AddContentName(kMid3Audio); + cricket::ContentGroup offer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + offer_bundle_group2.AddContentName(kMid4Video); + offer_bundle_group2.AddContentName(kMid5Video); + offer_bundle_group2.AddContentName(kMid6Video); + // Answer groups (kMid2Audio,kMid1Audio,kMid3Audio) and + // (kMid5Video,kMid6Video,kMid4Video), i.e. we've changed which MID is first + // but accept the whole group. + cricket::ContentGroup answer_bundle_group1(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group1.AddContentName(kMid2Audio); + answer_bundle_group1.AddContentName(kMid1Audio); + answer_bundle_group1.AddContentName(kMid3Audio); + cricket::ContentGroup answer_bundle_group2(cricket::GROUP_TYPE_BUNDLE); + answer_bundle_group2.AddContentName(kMid5Video); + answer_bundle_group2.AddContentName(kMid6Video); + answer_bundle_group2.AddContentName(kMid4Video); + + auto local_offer = std::make_unique(); + AddAudioSection(local_offer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid2Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(local_offer.get(), kMid3Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid4Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid5Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(local_offer.get(), kMid6Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + local_offer->AddGroup(offer_bundle_group1); + local_offer->AddGroup(offer_bundle_group2); + + auto remote_answer = std::make_unique(); + AddAudioSection(remote_answer.get(), kMid1Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid2Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddAudioSection(remote_answer.get(), kMid3Audio, kIceUfrag1, kIcePwd1, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid4Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid5Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + AddVideoSection(remote_answer.get(), kMid6Video, kIceUfrag2, kIcePwd2, + cricket::ICEMODE_FULL, cricket::CONNECTIONROLE_ACTPASS, + nullptr); + remote_answer->AddGroup(answer_bundle_group1); + remote_answer->AddGroup(answer_bundle_group2); + + EXPECT_TRUE(transport_controller_ + ->SetLocalDescription(SdpType::kOffer, local_offer.get()) + .ok()); + + // The fact that we accept this answer is actually a bug. If we accept the + // first MID to be in the group, we should also accept that it is the tagged + // one. + // TODO(https://crbug.com/webrtc/12699): When this issue is fixed, change this + // to EXPECT_FALSE and remove the below expectations about transports. + EXPECT_TRUE(transport_controller_ + ->SetRemoteDescription(SdpType::kAnswer, remote_answer.get()) + .ok()); + auto mid1_transport = transport_controller_->GetRtpTransport(kMid1Audio); + auto mid2_transport = transport_controller_->GetRtpTransport(kMid2Audio); + auto mid3_transport = transport_controller_->GetRtpTransport(kMid3Audio); + auto mid4_transport = transport_controller_->GetRtpTransport(kMid4Video); + auto mid5_transport = transport_controller_->GetRtpTransport(kMid5Video); + auto mid6_transport = transport_controller_->GetRtpTransport(kMid6Video); + EXPECT_NE(mid1_transport, mid4_transport); + EXPECT_EQ(mid1_transport, mid2_transport); + EXPECT_EQ(mid2_transport, mid3_transport); + EXPECT_EQ(mid4_transport, mid5_transport); + EXPECT_EQ(mid5_transport, mid6_transport); +} + // Tests that only a subset of all the m= sections are bundled. TEST_F(JsepTransportControllerTest, BundleSubsetOfMediaSections) { CreateJsepTransportController(JsepTransportController::Config()); @@ -1650,423 +2060,4 @@ TEST_F(JsepTransportControllerTest, ChangeTaggedMediaSectionMaxBundle) { .ok()); } -constexpr char kFakeTransportParameters[] = "fake-params"; - -// Test fixture that provides common setup and helpers for tests related to the -// datagram transport. -class JsepTransportControllerDatagramTest - : public JsepTransportControllerTest, - public testing::WithParamInterface { - public: - JsepTransportControllerDatagramTest() - : JsepTransportControllerTest(), - fake_media_transport_factory_(kFakeTransportParameters) { - JsepTransportController::Config config; - config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - config.media_transport_factory = &fake_media_transport_factory_; - config.use_datagram_transport = true; - CreateJsepTransportController(config); - } - - // Whether the JsepTransportController under test acts as the offerer or - // answerer in this test. - bool IsOfferer() { return GetParam(); } - - // Sets a description as local or remote based on type and current - // perspective. - RTCError SetDescription(SdpType type, - const cricket::SessionDescription* description) { - if (IsOfferer() == (type == SdpType::kOffer)) { - return transport_controller_->SetLocalDescription(type, description); - } else { - return transport_controller_->SetRemoteDescription(type, description); - } - } - - // Creates a session description with the settings necessary for datagram - // transport (bundle + crypto) and the given |transport_params|. - std::unique_ptr - CreateSessionDescriptionForDatagramTransport( - absl::optional transport_params) { - auto description = CreateSessionDescriptionWithBundleGroup(); - AddCryptoSettings(description.get()); - - for (auto& info : description->transport_infos()) { - info.description.opaque_parameters = transport_params; - } - if (transport_params) { - for (auto& content_info : description->contents()) { - content_info.media_description()->set_alt_protocol( - transport_params->protocol); - } - } - return description; - } - - // Creates transport parameters with |protocol| and |parameters| - // matching what |fake_media_transport_factory_| provides. - cricket::OpaqueTransportParameters CreateTransportParameters() { - cricket::OpaqueTransportParameters params; - params.protocol = fake_media_transport_factory_.GetTransportName(); - params.parameters = "fake-params"; - return params; - } - - protected: - FakeMediaTransportFactory fake_media_transport_factory_; -}; - -TEST_P(JsepTransportControllerDatagramTest, InitDatagramTransport) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - // Getting transport parameters is allowed before setting a description. - // This is necessary so that the offerer can include these params. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - // Setting a description activates the datagram transport without changing - // transport parameters. - auto description = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, description.get()).ok()); - - // After setting an offer with transport parameters, those parameters are - // reflected by the controller. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -TEST_P(JsepTransportControllerDatagramTest, - OfferMissingDatagramTransportParams) { - if (IsOfferer()) { - // This test doesn't make sense from the offerer's perspective, as the offer - // must contain datagram transport params if the offerer supports it. - return; - } - - auto description = - CreateSessionDescriptionForDatagramTransport(absl::nullopt); - EXPECT_TRUE(SetDescription(SdpType::kOffer, description.get()).ok()); - - // The offer didn't contain any datagram transport parameters, so the answer - // won't either. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); -} - -TEST_P(JsepTransportControllerDatagramTest, OfferHasWrongTransportName) { - if (IsOfferer()) { - // This test doesn't make sense from the offerer's perspective, as the - // offerer cannot offer itself the wrong transport. - return; - } - - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - fake_params.protocol = "wrong-name"; - - auto description = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, description.get()).ok()); - - // The offerer and answerer support different datagram transports, so the - // answerer rejects the offered parameters. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); -} - -TEST_P(JsepTransportControllerDatagramTest, IncompatibleAnswer) { - // Transport will claim that no parameters are compatible, even if they match - // exactly. - fake_media_transport_factory_.set_transport_parameters_comparison( - [](absl::string_view, absl::string_view) { return false; }); - - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - auto answer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - // The offerer and answerer have incompatible parameters, so the answerer - // rejects the offered parameters. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); -} - -TEST_P(JsepTransportControllerDatagramTest, CompatibleAnswer) { - // Transport will claim that no parameters are compatible, even if they are - // completely different. - fake_media_transport_factory_.set_transport_parameters_comparison( - [](absl::string_view, absl::string_view) { return true; }); - - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - cricket::OpaqueTransportParameters answer_params; - answer_params.protocol = fake_params.protocol; - answer_params.parameters = "something different from offer"; - auto answer = CreateSessionDescriptionForDatagramTransport(answer_params); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - // The offerer and answerer have compatible parameters, so the answerer - // accepts the offered parameters. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -TEST_P(JsepTransportControllerDatagramTest, AnswerRejectsDatagram) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto answer = CreateSessionDescriptionForDatagramTransport(absl::nullopt); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - // The answer rejected datagram transport, so its parameters are empty. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); -} - -TEST_P(JsepTransportControllerDatagramTest, AnswerAcceptsDatagram) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto answer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - // The answer accepted datagram transport, so it is present. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -TEST_P(JsepTransportControllerDatagramTest, PrAnswerRejectsDatagram) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto answer = CreateSessionDescriptionForDatagramTransport(absl::nullopt); - EXPECT_TRUE(SetDescription(SdpType::kPrAnswer, answer.get()).ok()); - - // The answer rejected datagram transport, but it's provisional, so the - // transport is kept around for now. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -TEST_P(JsepTransportControllerDatagramTest, PrAnswerAcceptsDatagram) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto answer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kPrAnswer, answer.get()).ok()); - - // The answer provisionally accepted datagram transport, so it's kept. - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -TEST_P(JsepTransportControllerDatagramTest, RenegotiationCannotAddDatagram) { - auto offer = CreateSessionDescriptionForDatagramTransport(absl::nullopt); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); - - auto answer = CreateSessionDescriptionForDatagramTransport(absl::nullopt); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); - - // Attempting to add a datagram transport on a re-offer does not cause an - // error, but also does not add a datagram transport. - auto reoffer = - CreateSessionDescriptionForDatagramTransport(CreateTransportParameters()); - EXPECT_TRUE(SetDescription(SdpType::kOffer, reoffer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - absl::nullopt); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - absl::nullopt); -} - -TEST_P(JsepTransportControllerDatagramTest, RenegotiationCannotRemoveDatagram) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto answer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - // Attempting to remove a datagram transport on a re-offer does not cause an - // error, but also does not remove the datagram transport. - auto reoffer = CreateSessionDescriptionForDatagramTransport(absl::nullopt); - EXPECT_TRUE(SetDescription(SdpType::kOffer, reoffer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -TEST_P(JsepTransportControllerDatagramTest, - RenegotiationKeepsDatagramTransport) { - cricket::OpaqueTransportParameters fake_params = CreateTransportParameters(); - if (IsOfferer()) { - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - } - - auto offer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, offer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto answer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, answer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - // Attempting to remove a datagram transport on a re-offer does not cause an - // error, but also does not remove the datagram transport. - auto reoffer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kOffer, reoffer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); - - auto reanswer = CreateSessionDescriptionForDatagramTransport(fake_params); - EXPECT_TRUE(SetDescription(SdpType::kAnswer, reanswer.get()).ok()); - - EXPECT_EQ(transport_controller_->GetTransportParameters(kAudioMid1), - fake_params); - EXPECT_EQ(transport_controller_->GetTransportParameters(kVideoMid1), - fake_params); -} - -INSTANTIATE_TEST_SUITE_P( - JsepTransportControllerDatagramTests, - JsepTransportControllerDatagramTest, - testing::Values(true, false), - // The parameter value is the local perspective (offerer or answerer). - [](const testing::TestParamInfo& info) { - return info.param ? "Offerer" : "Answerer"; - }); - } // namespace webrtc diff --git a/pc/jsep_transport_unittest.cc b/pc/jsep_transport_unittest.cc index a4b1d5593e..5f4334068a 100644 --- a/pc/jsep_transport_unittest.cc +++ b/pc/jsep_transport_unittest.cc @@ -48,8 +48,7 @@ rtc::scoped_refptr CreateIceTransport( return nullptr; } - return new rtc::RefCountedObject( - std::move(internal)); + return rtc::make_ref_counted(std::move(internal)); } class JsepTransport2Test : public ::testing::Test, public sigslot::has_slots<> { @@ -118,11 +117,8 @@ class JsepTransport2Test : public ::testing::Test, public sigslot::has_slots<> { kTransportName, /*local_certificate=*/nullptr, std::move(ice), std::move(rtcp_ice), std::move(unencrypted_rtp_transport), std::move(sdes_transport), std::move(dtls_srtp_transport), - /*datagram_rtp_transport=*/nullptr, std::move(rtp_dtls_transport), - std::move(rtcp_dtls_transport), - /*sctp_transport=*/nullptr, - /*datagram_transport=*/nullptr, - /*data_channel_transport=*/nullptr); + std::move(rtp_dtls_transport), std::move(rtcp_dtls_transport), + /*sctp_transport=*/nullptr); signal_rtcp_mux_active_received_ = false; jsep_transport->SignalRtcpMuxActive.connect( diff --git a/pc/local_audio_source.cc b/pc/local_audio_source.cc index 22ab1c39c3..3fcad50a1d 100644 --- a/pc/local_audio_source.cc +++ b/pc/local_audio_source.cc @@ -18,8 +18,7 @@ namespace webrtc { rtc::scoped_refptr LocalAudioSource::Create( const cricket::AudioOptions* audio_options) { - rtc::scoped_refptr source( - new rtc::RefCountedObject()); + auto source = rtc::make_ref_counted(); source->Initialize(audio_options); return source; } diff --git a/pc/media_protocol_names.cc b/pc/media_protocol_names.cc index 3def3f0f20..ae4fcf3391 100644 --- a/pc/media_protocol_names.cc +++ b/pc/media_protocol_names.cc @@ -10,6 +10,9 @@ #include "pc/media_protocol_names.h" +#include +#include + namespace cricket { // There are multiple variants of the RTP protocol stack, including diff --git a/pc/media_session.cc b/pc/media_session.cc index 51885b4fc4..3c73ddf535 100644 --- a/pc/media_session.cc +++ b/pc/media_session.cc @@ -10,8 +10,9 @@ #include "pc/media_session.h" +#include + #include -#include #include #include #include @@ -20,22 +21,27 @@ #include "absl/algorithm/container.h" #include "absl/strings/match.h" +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/crypto_params.h" -#include "media/base/h264_profile_level_id.h" +#include "api/video_codecs/h264_profile_level_id.h" +#include "media/base/codec.h" #include "media/base/media_constants.h" +#include "media/base/sdp_video_format_utils.h" #include "media/sctp/sctp_transport_internal.h" #include "p2p/base/p2p_constants.h" #include "pc/channel_manager.h" #include "pc/media_protocol_names.h" #include "pc/rtp_media_utils.h" -#include "pc/srtp_filter.h" #include "pc/used_ids.h" #include "rtc_base/checks.h" #include "rtc_base/helpers.h" #include "rtc_base/logging.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/string_encode.h" #include "rtc_base/third_party/base64/base64.h" #include "rtc_base/unique_id_generator.h" +#include "system_wrappers/include/field_trial.h" namespace { @@ -55,6 +61,57 @@ void GetSupportedSdesCryptoSuiteNames( } } +webrtc::RtpExtension RtpExtensionFromCapability( + const webrtc::RtpHeaderExtensionCapability& capability) { + return webrtc::RtpExtension(capability.uri, + capability.preferred_id.value_or(1)); +} + +cricket::RtpHeaderExtensions RtpHeaderExtensionsFromCapabilities( + const std::vector& capabilities) { + cricket::RtpHeaderExtensions exts; + for (const auto& capability : capabilities) { + exts.push_back(RtpExtensionFromCapability(capability)); + } + return exts; +} + +std::vector +UnstoppedRtpHeaderExtensionCapabilities( + std::vector capabilities) { + capabilities.erase( + std::remove_if( + capabilities.begin(), capabilities.end(), + [](const webrtc::RtpHeaderExtensionCapability& capability) { + return capability.direction == RtpTransceiverDirection::kStopped; + }), + capabilities.end()); + return capabilities; +} + +bool IsCapabilityPresent(const webrtc::RtpHeaderExtensionCapability& capability, + const cricket::RtpHeaderExtensions& extensions) { + return std::find_if(extensions.begin(), extensions.end(), + [&capability](const webrtc::RtpExtension& extension) { + return capability.uri == extension.uri; + }) != extensions.end(); +} + +cricket::RtpHeaderExtensions UnstoppedOrPresentRtpHeaderExtensions( + const std::vector& capabilities, + const cricket::RtpHeaderExtensions& unencrypted, + const cricket::RtpHeaderExtensions& encrypted) { + cricket::RtpHeaderExtensions extensions; + for (const auto& capability : capabilities) { + if (capability.direction != RtpTransceiverDirection::kStopped || + IsCapabilityPresent(capability, unencrypted) || + IsCapabilityPresent(capability, encrypted)) { + extensions.push_back(RtpExtensionFromCapability(capability)); + } + } + return extensions; +} + } // namespace namespace cricket { @@ -268,19 +325,6 @@ static StreamParamsVec GetCurrentStreamParams( return stream_params; } -// Filters the data codecs for the data channel type. -void FilterDataCodecs(std::vector* codecs, bool sctp) { - // Filter RTP codec for SCTP and vice versa. - const char* codec_name = - sctp ? kGoogleRtpDataCodecName : kGoogleSctpDataCodecName; - codecs->erase(std::remove_if(codecs->begin(), codecs->end(), - [&codec_name](const DataCodec& codec) { - return absl::EqualsIgnoreCase(codec.name, - codec_name); - }), - codecs->end()); -} - static StreamParams CreateStreamParamsForNewSenderWithSsrcs( const SenderOptions& sender, const std::string& rtcp_cname, @@ -298,6 +342,12 @@ static StreamParams CreateStreamParamsForNewSenderWithSsrcs( "a single media streams. This session has multiple " "media streams however, so no FlexFEC SSRC will be generated."; } + if (include_flexfec_stream && + !webrtc::field_trial::IsEnabled("WebRTC-FlexFEC-03")) { + include_flexfec_stream = false; + RTC_LOG(LS_WARNING) + << "WebRTC-FlexFEC trial is not enabled, not sending FlexFEC"; + } result.GenerateSsrcs(sender.num_sim_layers, include_rtx_streams, include_flexfec_stream, ssrc_generator); @@ -452,15 +502,12 @@ static bool UpdateTransportInfoForBundle(const ContentGroup& bundle_group, selected_transport_info->description.ice_pwd; ConnectionRole selected_connection_role = selected_transport_info->description.connection_role; - const absl::optional& selected_opaque_parameters = - selected_transport_info->description.opaque_parameters; for (TransportInfo& transport_info : sdesc->transport_infos()) { if (bundle_group.HasContentName(transport_info.content_name) && transport_info.content_name != selected_content_name) { transport_info.description.ice_ufrag = selected_ufrag; transport_info.description.ice_pwd = selected_pwd; transport_info.description.connection_role = selected_connection_role; - transport_info.description.opaque_parameters = selected_opaque_parameters; } } return true; @@ -646,7 +693,21 @@ static bool CreateContentOffer( if (offer->type() == cricket::MEDIA_TYPE_VIDEO) { offer->set_rtcp_reduced_size(true); } - offer->set_rtp_header_extensions(rtp_extensions); + + // Build the vector of header extensions with directions for this + // media_description's options. + RtpHeaderExtensions extensions; + for (auto extension_with_id : rtp_extensions) { + for (const auto& extension : media_description_options.header_extensions) { + if (extension_with_id.uri == extension.uri) { + // TODO(crbug.com/1051821): Configure the extension direction from + // the information in the media_description_options extension + // capability. + extensions.push_back(extension_with_id); + } + } + } + offer->set_rtp_header_extensions(extensions); AddSimulcastToMediaDescription(media_description_options, offer); @@ -661,8 +722,6 @@ static bool CreateContentOffer( } } - offer->set_alt_protocol(media_description_options.alt_protocol); - if (secure_policy == SEC_REQUIRED && offer->cryptos().empty()) { return false; } @@ -735,10 +794,16 @@ static void NegotiateCodecs(const std::vector& local_codecs, // FindMatchingCodec shouldn't return something with no apt value. RTC_DCHECK(apt_it != theirs.params.end()); negotiated.SetParam(kCodecParamAssociatedPayloadType, apt_it->second); + + // We support parsing the declarative rtx-time parameter. + const auto rtx_time_it = theirs.params.find(kCodecParamRtxTime); + if (rtx_time_it != theirs.params.end()) { + negotiated.SetParam(kCodecParamRtxTime, rtx_time_it->second); + } } if (absl::EqualsIgnoreCase(ours.name, kH264CodecName)) { - webrtc::H264::GenerateProfileLevelIdForAnswer( - ours.params, theirs.params, &negotiated.params); + webrtc::H264GenerateProfileLevelIdForAnswer(ours.params, theirs.params, + &negotiated.params); } negotiated.id = theirs.id; negotiated.name = theirs.name; @@ -924,68 +989,6 @@ static Codecs MatchCodecPreference( return filtered_codecs; } -static bool FindByUriAndEncryption(const RtpHeaderExtensions& extensions, - const webrtc::RtpExtension& ext_to_match, - webrtc::RtpExtension* found_extension) { - auto it = absl::c_find_if( - extensions, [&ext_to_match](const webrtc::RtpExtension& extension) { - // We assume that all URIs are given in a canonical - // format. - return extension.uri == ext_to_match.uri && - extension.encrypt == ext_to_match.encrypt; - }); - if (it == extensions.end()) { - return false; - } - if (found_extension) { - *found_extension = *it; - } - return true; -} - -static bool FindByUri(const RtpHeaderExtensions& extensions, - const webrtc::RtpExtension& ext_to_match, - webrtc::RtpExtension* found_extension) { - // We assume that all URIs are given in a canonical format. - const webrtc::RtpExtension* found = - webrtc::RtpExtension::FindHeaderExtensionByUri(extensions, - ext_to_match.uri); - if (!found) { - return false; - } - if (found_extension) { - *found_extension = *found; - } - return true; -} - -static bool FindByUriWithEncryptionPreference( - const RtpHeaderExtensions& extensions, - absl::string_view uri_to_match, - bool encryption_preference, - webrtc::RtpExtension* found_extension) { - const webrtc::RtpExtension* unencrypted_extension = nullptr; - for (const webrtc::RtpExtension& extension : extensions) { - // We assume that all URIs are given in a canonical format. - if (extension.uri == uri_to_match) { - if (!encryption_preference || extension.encrypt) { - if (found_extension) { - *found_extension = extension; - } - return true; - } - unencrypted_extension = &extension; - } - } - if (unencrypted_extension) { - if (found_extension) { - *found_extension = *unencrypted_extension; - } - return true; - } - return false; -} - // Adds all extensions from |reference_extensions| to |offered_extensions| that // don't already exist in |offered_extensions| and ensure the IDs don't // collide. If an extension is added, it's also added to |regular_extensions| or @@ -1000,22 +1003,28 @@ static void MergeRtpHdrExts(const RtpHeaderExtensions& reference_extensions, RtpHeaderExtensions* encrypted_extensions, UsedRtpHeaderExtensionIds* used_ids) { for (auto reference_extension : reference_extensions) { - if (!FindByUriAndEncryption(*offered_extensions, reference_extension, - nullptr)) { - webrtc::RtpExtension existing; + if (!webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *offered_extensions, reference_extension.uri, + reference_extension.encrypt)) { if (reference_extension.encrypt) { - if (FindByUriAndEncryption(*encrypted_extensions, reference_extension, - &existing)) { - offered_extensions->push_back(existing); + const webrtc::RtpExtension* existing = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *encrypted_extensions, reference_extension.uri, + reference_extension.encrypt); + if (existing) { + offered_extensions->push_back(*existing); } else { used_ids->FindAndSetIdUsed(&reference_extension); encrypted_extensions->push_back(reference_extension); offered_extensions->push_back(reference_extension); } } else { - if (FindByUriAndEncryption(*regular_extensions, reference_extension, - &existing)) { - offered_extensions->push_back(existing); + const webrtc::RtpExtension* existing = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *regular_extensions, reference_extension.uri, + reference_extension.encrypt); + if (existing) { + offered_extensions->push_back(*existing); } else { used_ids->FindAndSetIdUsed(&reference_extension); regular_extensions->push_back(reference_extension); @@ -1026,41 +1035,86 @@ static void MergeRtpHdrExts(const RtpHeaderExtensions& reference_extensions, } } -static void AddEncryptedVersionsOfHdrExts(RtpHeaderExtensions* extensions, - RtpHeaderExtensions* all_extensions, - UsedRtpHeaderExtensionIds* used_ids) { - RtpHeaderExtensions encrypted_extensions; - for (const webrtc::RtpExtension& extension : *extensions) { - webrtc::RtpExtension existing; - // Don't add encrypted extensions again that were already included in a - // previous offer or regular extensions that are also included as encrypted - // extensions. - if (extension.encrypt || - !webrtc::RtpExtension::IsEncryptionSupported(extension.uri) || - (FindByUriWithEncryptionPreference(*extensions, extension.uri, true, - &existing) && - existing.encrypt)) { +static void AddEncryptedVersionsOfHdrExts( + RtpHeaderExtensions* offered_extensions, + RtpHeaderExtensions* encrypted_extensions, + UsedRtpHeaderExtensionIds* used_ids) { + RtpHeaderExtensions encrypted_extensions_to_add; + for (const auto& extension : *offered_extensions) { + // Skip existing encrypted offered extension + if (extension.encrypt) { continue; } - if (FindByUri(*all_extensions, extension, &existing)) { - encrypted_extensions.push_back(existing); - } else { - webrtc::RtpExtension encrypted(extension); - encrypted.encrypt = true; - used_ids->FindAndSetIdUsed(&encrypted); - all_extensions->push_back(encrypted); - encrypted_extensions.push_back(encrypted); + // Skip if we cannot encrypt the extension + if (!webrtc::RtpExtension::IsEncryptionSupported(extension.uri)) { + continue; } + + // Skip if an encrypted extension with that URI already exists in the + // offered extensions. + const bool have_encrypted_extension = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *offered_extensions, extension.uri, true); + if (have_encrypted_extension) { + continue; + } + + // Determine if a shared encrypted extension with that URI already exists. + const webrtc::RtpExtension* shared_encrypted_extension = + webrtc::RtpExtension::FindHeaderExtensionByUriAndEncryption( + *encrypted_extensions, extension.uri, true); + if (shared_encrypted_extension) { + // Re-use the shared encrypted extension + encrypted_extensions_to_add.push_back(*shared_encrypted_extension); + continue; + } + + // None exists. Create a new shared encrypted extension from the + // non-encrypted one. + webrtc::RtpExtension new_encrypted_extension(extension); + new_encrypted_extension.encrypt = true; + used_ids->FindAndSetIdUsed(&new_encrypted_extension); + encrypted_extensions->push_back(new_encrypted_extension); + encrypted_extensions_to_add.push_back(new_encrypted_extension); + } + + // Append the additional encrypted extensions to be offered + offered_extensions->insert(offered_extensions->end(), + encrypted_extensions_to_add.begin(), + encrypted_extensions_to_add.end()); +} + +// Mostly identical to RtpExtension::FindHeaderExtensionByUri but discards any +// encrypted extensions that this implementation cannot encrypt. +static const webrtc::RtpExtension* FindHeaderExtensionByUriDiscardUnsupported( + const std::vector& extensions, + absl::string_view uri, + webrtc::RtpExtension::Filter filter) { + // Note: While it's technically possible to decrypt extensions that we don't + // encrypt, the symmetric API of libsrtp does not allow us to supply + // different IDs for encryption/decryption of header extensions depending on + // whether the packet is inbound or outbound. Thereby, we are limited to + // what we can send in encrypted form. + if (!webrtc::RtpExtension::IsEncryptionSupported(uri)) { + // If there's no encryption support and we only want encrypted extensions, + // there's no point in continuing the search here. + if (filter == webrtc::RtpExtension::kRequireEncryptedExtension) { + return nullptr; + } + + // Instruct to only return non-encrypted extensions + filter = webrtc::RtpExtension::Filter::kDiscardEncryptedExtension; } - extensions->insert(extensions->end(), encrypted_extensions.begin(), - encrypted_extensions.end()); + + return webrtc::RtpExtension::FindHeaderExtensionByUri(extensions, uri, + filter); } static void NegotiateRtpHeaderExtensions( const RtpHeaderExtensions& local_extensions, const RtpHeaderExtensions& offered_extensions, - bool enable_encrypted_rtp_header_extensions, + webrtc::RtpExtension::Filter filter, RtpHeaderExtensions* negotiated_extensions) { // TransportSequenceNumberV2 is not offered by default. The special logic for // the TransportSequenceNumber extensions works as follows: @@ -1069,9 +1123,9 @@ static void NegotiateRtpHeaderExtensions( // V1 and V2 V2 regardless of local_extensions. // V2 V2 regardless of local_extensions. const webrtc::RtpExtension* transport_sequence_number_v2_offer = - webrtc::RtpExtension::FindHeaderExtensionByUri( + FindHeaderExtensionByUriDiscardUnsupported( offered_extensions, - webrtc::RtpExtension::kTransportSequenceNumberV2Uri); + webrtc::RtpExtension::kTransportSequenceNumberV2Uri, filter); bool frame_descriptor_in_local = false; bool dependency_descriptor_in_local = false; @@ -1084,10 +1138,10 @@ static void NegotiateRtpHeaderExtensions( dependency_descriptor_in_local = true; else if (ours.uri == webrtc::RtpExtension::kAbsoluteCaptureTimeUri) abs_capture_time_in_local = true; - webrtc::RtpExtension theirs; - if (FindByUriWithEncryptionPreference( - offered_extensions, ours.uri, - enable_encrypted_rtp_header_extensions, &theirs)) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported(offered_extensions, ours.uri, + filter); + if (theirs) { if (transport_sequence_number_v2_offer && ours.uri == webrtc::RtpExtension::kTransportSequenceNumberUri) { // Don't respond to @@ -1097,7 +1151,7 @@ static void NegotiateRtpHeaderExtensions( continue; } else { // We respond with their RTP header extension id. - negotiated_extensions->push_back(theirs); + negotiated_extensions->push_back(*theirs); } } } @@ -1109,28 +1163,35 @@ static void NegotiateRtpHeaderExtensions( // Frame descriptors support. If the extension is not present locally, but is // in the offer, we add it to the list. - webrtc::RtpExtension theirs; - if (!dependency_descriptor_in_local && - FindByUriWithEncryptionPreference( - offered_extensions, webrtc::RtpExtension::kDependencyDescriptorUri, - enable_encrypted_rtp_header_extensions, &theirs)) { - negotiated_extensions->push_back(theirs); - } - if (!frame_descriptor_in_local && - FindByUriWithEncryptionPreference( - offered_extensions, - webrtc::RtpExtension::kGenericFrameDescriptorUri00, - enable_encrypted_rtp_header_extensions, &theirs)) { - negotiated_extensions->push_back(theirs); + if (!dependency_descriptor_in_local) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported( + offered_extensions, webrtc::RtpExtension::kDependencyDescriptorUri, + filter); + if (theirs) { + negotiated_extensions->push_back(*theirs); + } + } + if (!frame_descriptor_in_local) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported( + offered_extensions, + webrtc::RtpExtension::kGenericFrameDescriptorUri00, filter); + if (theirs) { + negotiated_extensions->push_back(*theirs); + } } // Absolute capture time support. If the extension is not present locally, but // is in the offer, we add it to the list. - if (!abs_capture_time_in_local && - FindByUriWithEncryptionPreference( - offered_extensions, webrtc::RtpExtension::kAbsoluteCaptureTimeUri, - enable_encrypted_rtp_header_extensions, &theirs)) { - negotiated_extensions->push_back(theirs); + if (!abs_capture_time_in_local) { + const webrtc::RtpExtension* theirs = + FindHeaderExtensionByUriDiscardUnsupported( + offered_extensions, webrtc::RtpExtension::kAbsoluteCaptureTimeUri, + filter); + if (theirs) { + negotiated_extensions->push_back(*theirs); + } } } @@ -1178,17 +1239,21 @@ static bool CreateMediaContentAnswer( const MediaSessionOptions& session_options, const SecurePolicy& sdes_policy, const CryptoParamsVec* current_cryptos, - const RtpHeaderExtensions& local_rtp_extenstions, + const RtpHeaderExtensions& local_rtp_extensions, UniqueRandomIdGenerator* ssrc_generator, bool enable_encrypted_rtp_header_extensions, StreamParamsVec* current_streams, bool bundle_enabled, MediaContentDescription* answer) { answer->set_extmap_allow_mixed_enum(offer->extmap_allow_mixed_enum()); + const webrtc::RtpExtension::Filter extensions_filter = + enable_encrypted_rtp_header_extensions + ? webrtc::RtpExtension::Filter::kPreferEncryptedExtension + : webrtc::RtpExtension::Filter::kDiscardEncryptedExtension; RtpHeaderExtensions negotiated_rtp_extensions; - NegotiateRtpHeaderExtensions( - local_rtp_extenstions, offer->rtp_header_extensions(), - enable_encrypted_rtp_header_extensions, &negotiated_rtp_extensions); + NegotiateRtpHeaderExtensions(local_rtp_extensions, + offer->rtp_header_extensions(), + extensions_filter, &negotiated_rtp_extensions); answer->set_rtp_header_extensions(negotiated_rtp_extensions); answer->set_rtcp_mux(session_options.rtcp_mux_enabled && offer->rtcp_mux()); @@ -1218,9 +1283,6 @@ static bool CreateMediaContentAnswer( answer->set_direction(NegotiateRtpTransceiverDirection( offer->direction(), media_description_options.direction)); - if (offer->alt_protocol() == media_description_options.alt_protocol) { - answer->set_alt_protocol(media_description_options.alt_protocol); - } return true; } @@ -1319,14 +1381,6 @@ void MediaDescriptionOptions::AddVideoSender( num_sim_layers); } -void MediaDescriptionOptions::AddRtpDataChannel(const std::string& track_id, - const std::string& stream_id) { - RTC_DCHECK(type == MEDIA_TYPE_DATA); - // TODO(steveanton): Is it the case that RtpDataChannel will never have more - // than one stream? - AddSenderInternal(track_id, {stream_id}, {}, SimulcastLayerList(), 1); -} - void MediaDescriptionOptions::AddSenderInternal( const std::string& track_id, const std::vector& stream_ids, @@ -1365,13 +1419,8 @@ MediaSessionDescriptionFactory::MediaSessionDescriptionFactory( : MediaSessionDescriptionFactory(transport_desc_factory, ssrc_generator) { channel_manager->GetSupportedAudioSendCodecs(&audio_send_codecs_); channel_manager->GetSupportedAudioReceiveCodecs(&audio_recv_codecs_); - audio_rtp_extensions_ = - channel_manager->GetDefaultEnabledAudioRtpHeaderExtensions(); channel_manager->GetSupportedVideoSendCodecs(&video_send_codecs_); channel_manager->GetSupportedVideoReceiveCodecs(&video_recv_codecs_); - video_rtp_extensions_ = - channel_manager->GetDefaultEnabledVideoRtpHeaderExtensions(); - channel_manager->GetSupportedDataCodecs(&rtp_data_codecs_); ComputeAudioCodecsIntersectionAndUnion(); ComputeVideoCodecsIntersectionAndUnion(); } @@ -1433,22 +1482,11 @@ static void RemoveUnifiedPlanExtensions(RtpHeaderExtensions* extensions) { } RtpHeaderExtensions -MediaSessionDescriptionFactory::audio_rtp_header_extensions() const { - RtpHeaderExtensions extensions = audio_rtp_extensions_; - if (!is_unified_plan_) { - RemoveUnifiedPlanExtensions(&extensions); - } - - return extensions; -} - -RtpHeaderExtensions -MediaSessionDescriptionFactory::video_rtp_header_extensions() const { - RtpHeaderExtensions extensions = video_rtp_extensions_; +MediaSessionDescriptionFactory::filtered_rtp_header_extensions( + RtpHeaderExtensions extensions) const { if (!is_unified_plan_) { RemoveUnifiedPlanExtensions(&extensions); } - return extensions; } @@ -1475,22 +1513,12 @@ std::unique_ptr MediaSessionDescriptionFactory::CreateOffer( AudioCodecs offer_audio_codecs; VideoCodecs offer_video_codecs; - RtpDataCodecs offer_rtp_data_codecs; GetCodecsForOffer(current_active_contents, &offer_audio_codecs, - &offer_video_codecs, &offer_rtp_data_codecs); - - if (!session_options.vad_enabled) { - // If application doesn't want CN codecs in offer. - StripCNCodecs(&offer_audio_codecs); - } - FilterDataCodecs(&offer_rtp_data_codecs, - session_options.data_channel_type == DCT_SCTP); - - RtpHeaderExtensions audio_rtp_extensions; - RtpHeaderExtensions video_rtp_extensions; - GetRtpHdrExtsToOffer(current_active_contents, - session_options.offer_extmap_allow_mixed, - &audio_rtp_extensions, &video_rtp_extensions); + &offer_video_codecs); + AudioVideoRtpHeaderExtensions extensions_with_ids = + GetOfferedRtpHeaderExtensionsWithIds( + current_active_contents, session_options.offer_extmap_allow_mixed, + session_options.media_description_options); auto offer = std::make_unique(); @@ -1510,26 +1538,35 @@ std::unique_ptr MediaSessionDescriptionFactory::CreateOffer( } switch (media_description_options.type) { case MEDIA_TYPE_AUDIO: - if (!AddAudioContentForOffer( - media_description_options, session_options, current_content, - current_description, audio_rtp_extensions, offer_audio_codecs, - ¤t_streams, offer.get(), &ice_credentials)) { + if (!AddAudioContentForOffer(media_description_options, session_options, + current_content, current_description, + extensions_with_ids.audio, + offer_audio_codecs, ¤t_streams, + offer.get(), &ice_credentials)) { return nullptr; } break; case MEDIA_TYPE_VIDEO: - if (!AddVideoContentForOffer( - media_description_options, session_options, current_content, - current_description, video_rtp_extensions, offer_video_codecs, - ¤t_streams, offer.get(), &ice_credentials)) { + if (!AddVideoContentForOffer(media_description_options, session_options, + current_content, current_description, + extensions_with_ids.video, + offer_video_codecs, ¤t_streams, + offer.get(), &ice_credentials)) { return nullptr; } break; case MEDIA_TYPE_DATA: if (!AddDataContentForOffer(media_description_options, session_options, current_content, current_description, - offer_rtp_data_codecs, ¤t_streams, - offer.get(), &ice_credentials)) { + ¤t_streams, offer.get(), + &ice_credentials)) { + return nullptr; + } + break; + case MEDIA_TYPE_UNSUPPORTED: + if (!AddUnsupportedContentForOffer( + media_description_options, session_options, current_content, + current_description, offer.get(), &ice_credentials)) { return nullptr; } break; @@ -1620,25 +1657,26 @@ MediaSessionDescriptionFactory::CreateAnswer( // sections. AudioCodecs answer_audio_codecs; VideoCodecs answer_video_codecs; - RtpDataCodecs answer_rtp_data_codecs; GetCodecsForAnswer(current_active_contents, *offer, &answer_audio_codecs, - &answer_video_codecs, &answer_rtp_data_codecs); - - if (!session_options.vad_enabled) { - // If application doesn't want CN codecs in answer. - StripCNCodecs(&answer_audio_codecs); - } - FilterDataCodecs(&answer_rtp_data_codecs, - session_options.data_channel_type == DCT_SCTP); + &answer_video_codecs); auto answer = std::make_unique(); // If the offer supports BUNDLE, and we want to use it too, create a BUNDLE // group in the answer with the appropriate content names. - const ContentGroup* offer_bundle = offer->GetGroupByName(GROUP_TYPE_BUNDLE); - ContentGroup answer_bundle(GROUP_TYPE_BUNDLE); - // Transport info shared by the bundle group. - std::unique_ptr bundle_transport; + std::vector offer_bundles = + offer->GetGroupsByName(GROUP_TYPE_BUNDLE); + // There are as many answer BUNDLE groups as offer BUNDLE groups (even if + // rejected, we respond with an empty group). |offer_bundles|, + // |answer_bundles| and |bundle_transports| share the same size and indices. + std::vector answer_bundles; + std::vector> bundle_transports; + answer_bundles.reserve(offer_bundles.size()); + bundle_transports.reserve(offer_bundles.size()); + for (size_t i = 0; i < offer_bundles.size(); ++i) { + answer_bundles.emplace_back(GROUP_TYPE_BUNDLE); + bundle_transports.emplace_back(nullptr); + } answer->set_extmap_allow_mixed(offer->extmap_allow_mixed()); @@ -1653,17 +1691,32 @@ MediaSessionDescriptionFactory::CreateAnswer( RTC_DCHECK( IsMediaContentOfType(offer_content, media_description_options.type)); RTC_DCHECK(media_description_options.mid == offer_content->name); + // Get the index of the BUNDLE group that this MID belongs to, if any. + absl::optional bundle_index; + for (size_t i = 0; i < offer_bundles.size(); ++i) { + if (offer_bundles[i]->HasContentName(media_description_options.mid)) { + bundle_index = i; + break; + } + } + TransportInfo* bundle_transport = + bundle_index.has_value() ? bundle_transports[bundle_index.value()].get() + : nullptr; + const ContentInfo* current_content = nullptr; if (current_description && msection_index < current_description->contents().size()) { current_content = ¤t_description->contents()[msection_index]; } + RtpHeaderExtensions header_extensions = RtpHeaderExtensionsFromCapabilities( + UnstoppedRtpHeaderExtensionCapabilities( + media_description_options.header_extensions)); switch (media_description_options.type) { case MEDIA_TYPE_AUDIO: if (!AddAudioContentForAnswer( media_description_options, session_options, offer_content, - offer, current_content, current_description, - bundle_transport.get(), answer_audio_codecs, ¤t_streams, + offer, current_content, current_description, bundle_transport, + answer_audio_codecs, header_extensions, ¤t_streams, answer.get(), &ice_credentials)) { return nullptr; } @@ -1671,8 +1724,8 @@ MediaSessionDescriptionFactory::CreateAnswer( case MEDIA_TYPE_VIDEO: if (!AddVideoContentForAnswer( media_description_options, session_options, offer_content, - offer, current_content, current_description, - bundle_transport.get(), answer_video_codecs, ¤t_streams, + offer, current_content, current_description, bundle_transport, + answer_video_codecs, header_extensions, ¤t_streams, answer.get(), &ice_credentials)) { return nullptr; } @@ -1680,12 +1733,19 @@ MediaSessionDescriptionFactory::CreateAnswer( case MEDIA_TYPE_DATA: if (!AddDataContentForAnswer( media_description_options, session_options, offer_content, - offer, current_content, current_description, - bundle_transport.get(), answer_rtp_data_codecs, + offer, current_content, current_description, bundle_transport, ¤t_streams, answer.get(), &ice_credentials)) { return nullptr; } break; + case MEDIA_TYPE_UNSUPPORTED: + if (!AddUnsupportedContentForAnswer( + media_description_options, session_options, offer_content, + offer, current_content, current_description, bundle_transport, + answer.get(), &ice_credentials)) { + return nullptr; + } + break; default: RTC_NOTREACHED(); } @@ -1693,37 +1753,41 @@ MediaSessionDescriptionFactory::CreateAnswer( // See if we can add the newly generated m= section to the BUNDLE group in // the answer. ContentInfo& added = answer->contents().back(); - if (!added.rejected && session_options.bundle_enabled && offer_bundle && - offer_bundle->HasContentName(added.name)) { - answer_bundle.AddContentName(added.name); - bundle_transport.reset( + if (!added.rejected && session_options.bundle_enabled && + bundle_index.has_value()) { + // The |bundle_index| is for |media_description_options.mid|. + RTC_DCHECK_EQ(media_description_options.mid, added.name); + answer_bundles[bundle_index.value()].AddContentName(added.name); + bundle_transports[bundle_index.value()].reset( new TransportInfo(*answer->GetTransportInfoByName(added.name))); } } - // If a BUNDLE group was offered, put a BUNDLE group in the answer even if - // it's empty. RFC5888 says: + // If BUNDLE group(s) were offered, put the same number of BUNDLE groups in + // the answer even if they're empty. RFC5888 says: // // A SIP entity that receives an offer that contains an "a=group" line // with semantics that are understood MUST return an answer that // contains an "a=group" line with the same semantics. - if (offer_bundle) { - answer->AddGroup(answer_bundle); - } - - if (answer_bundle.FirstContentName()) { - // Share the same ICE credentials and crypto params across all contents, - // as BUNDLE requires. - if (!UpdateTransportInfoForBundle(answer_bundle, answer.get())) { - RTC_LOG(LS_ERROR) - << "CreateAnswer failed to UpdateTransportInfoForBundle."; - return NULL; - } + if (!offer_bundles.empty()) { + for (const ContentGroup& answer_bundle : answer_bundles) { + answer->AddGroup(answer_bundle); + + if (answer_bundle.FirstContentName()) { + // Share the same ICE credentials and crypto params across all contents, + // as BUNDLE requires. + if (!UpdateTransportInfoForBundle(answer_bundle, answer.get())) { + RTC_LOG(LS_ERROR) + << "CreateAnswer failed to UpdateTransportInfoForBundle."; + return NULL; + } - if (!UpdateCryptoParamsForBundle(answer_bundle, answer.get())) { - RTC_LOG(LS_ERROR) - << "CreateAnswer failed to UpdateCryptoParamsForBundle."; - return NULL; + if (!UpdateCryptoParamsForBundle(answer_bundle, answer.get())) { + RTC_LOG(LS_ERROR) + << "CreateAnswer failed to UpdateCryptoParamsForBundle."; + return NULL; + } + } } } @@ -1768,16 +1832,15 @@ const AudioCodecs& MediaSessionDescriptionFactory::GetAudioCodecsForOffer( switch (direction) { // If stream is inactive - generate list as if sendrecv. case RtpTransceiverDirection::kSendRecv: + case RtpTransceiverDirection::kStopped: case RtpTransceiverDirection::kInactive: return audio_sendrecv_codecs_; case RtpTransceiverDirection::kSendOnly: return audio_send_codecs_; case RtpTransceiverDirection::kRecvOnly: return audio_recv_codecs_; - case RtpTransceiverDirection::kStopped: - RTC_NOTREACHED(); - return audio_sendrecv_codecs_; } + RTC_CHECK_NOTREACHED(); } const AudioCodecs& MediaSessionDescriptionFactory::GetAudioCodecsForAnswer( @@ -1787,6 +1850,7 @@ const AudioCodecs& MediaSessionDescriptionFactory::GetAudioCodecsForAnswer( // For inactive and sendrecv answers, generate lists as if we were to accept // the offer's direction. See RFC 3264 Section 6.1. case RtpTransceiverDirection::kSendRecv: + case RtpTransceiverDirection::kStopped: case RtpTransceiverDirection::kInactive: return GetAudioCodecsForOffer( webrtc::RtpTransceiverDirectionReversed(offer)); @@ -1794,10 +1858,8 @@ const AudioCodecs& MediaSessionDescriptionFactory::GetAudioCodecsForAnswer( return audio_send_codecs_; case RtpTransceiverDirection::kRecvOnly: return audio_recv_codecs_; - case RtpTransceiverDirection::kStopped: - RTC_NOTREACHED(); - return audio_sendrecv_codecs_; } + RTC_CHECK_NOTREACHED(); } const VideoCodecs& MediaSessionDescriptionFactory::GetVideoCodecsForOffer( @@ -1805,16 +1867,15 @@ const VideoCodecs& MediaSessionDescriptionFactory::GetVideoCodecsForOffer( switch (direction) { // If stream is inactive - generate list as if sendrecv. case RtpTransceiverDirection::kSendRecv: + case RtpTransceiverDirection::kStopped: case RtpTransceiverDirection::kInactive: return video_sendrecv_codecs_; case RtpTransceiverDirection::kSendOnly: return video_send_codecs_; case RtpTransceiverDirection::kRecvOnly: return video_recv_codecs_; - case RtpTransceiverDirection::kStopped: - RTC_NOTREACHED(); - return video_sendrecv_codecs_; } + RTC_CHECK_NOTREACHED(); } const VideoCodecs& MediaSessionDescriptionFactory::GetVideoCodecsForAnswer( @@ -1824,6 +1885,7 @@ const VideoCodecs& MediaSessionDescriptionFactory::GetVideoCodecsForAnswer( // For inactive and sendrecv answers, generate lists as if we were to accept // the offer's direction. See RFC 3264 Section 6.1. case RtpTransceiverDirection::kSendRecv: + case RtpTransceiverDirection::kStopped: case RtpTransceiverDirection::kInactive: return GetVideoCodecsForOffer( webrtc::RtpTransceiverDirectionReversed(offer)); @@ -1831,17 +1893,14 @@ const VideoCodecs& MediaSessionDescriptionFactory::GetVideoCodecsForAnswer( return video_send_codecs_; case RtpTransceiverDirection::kRecvOnly: return video_recv_codecs_; - case RtpTransceiverDirection::kStopped: - RTC_NOTREACHED(); - return video_sendrecv_codecs_; } + RTC_CHECK_NOTREACHED(); } void MergeCodecsFromDescription( const std::vector& current_active_contents, AudioCodecs* audio_codecs, VideoCodecs* video_codecs, - RtpDataCodecs* rtp_data_codecs, UsedPayloadTypes* used_pltypes) { for (const ContentInfo* content : current_active_contents) { if (IsMediaContentOfType(content, MEDIA_TYPE_AUDIO)) { @@ -1852,14 +1911,6 @@ void MergeCodecsFromDescription( const VideoContentDescription* video = content->media_description()->as_video(); MergeCodecs(video->codecs(), video_codecs, used_pltypes); - } else if (IsMediaContentOfType(content, MEDIA_TYPE_DATA)) { - const RtpDataContentDescription* data = - content->media_description()->as_rtp_data(); - if (data) { - // Only relevant for RTP datachannels - MergeCodecs(data->codecs(), rtp_data_codecs, - used_pltypes); - } } } } @@ -1873,19 +1924,17 @@ void MergeCodecsFromDescription( void MediaSessionDescriptionFactory::GetCodecsForOffer( const std::vector& current_active_contents, AudioCodecs* audio_codecs, - VideoCodecs* video_codecs, - RtpDataCodecs* rtp_data_codecs) const { + VideoCodecs* video_codecs) const { // First - get all codecs from the current description if the media type // is used. Add them to |used_pltypes| so the payload type is not reused if a // new media type is added. UsedPayloadTypes used_pltypes; MergeCodecsFromDescription(current_active_contents, audio_codecs, - video_codecs, rtp_data_codecs, &used_pltypes); + video_codecs, &used_pltypes); // Add our codecs that are not in the current description. MergeCodecs(all_audio_codecs_, audio_codecs, &used_pltypes); MergeCodecs(all_video_codecs_, video_codecs, &used_pltypes); - MergeCodecs(rtp_data_codecs_, rtp_data_codecs, &used_pltypes); } // Getting codecs for an answer involves these steps: @@ -1899,19 +1948,17 @@ void MediaSessionDescriptionFactory::GetCodecsForAnswer( const std::vector& current_active_contents, const SessionDescription& remote_offer, AudioCodecs* audio_codecs, - VideoCodecs* video_codecs, - RtpDataCodecs* rtp_data_codecs) const { + VideoCodecs* video_codecs) const { // First - get all codecs from the current description if the media type // is used. Add them to |used_pltypes| so the payload type is not reused if a // new media type is added. UsedPayloadTypes used_pltypes; MergeCodecsFromDescription(current_active_contents, audio_codecs, - video_codecs, rtp_data_codecs, &used_pltypes); + video_codecs, &used_pltypes); // Second - filter out codecs that we don't support at all and should ignore. AudioCodecs filtered_offered_audio_codecs; VideoCodecs filtered_offered_video_codecs; - RtpDataCodecs filtered_offered_rtp_data_codecs; for (const ContentInfo& content : remote_offer.contents()) { if (IsMediaContentOfType(&content, MEDIA_TYPE_AUDIO)) { const AudioContentDescription* audio = @@ -1937,22 +1984,6 @@ void MediaSessionDescriptionFactory::GetCodecsForAnswer( filtered_offered_video_codecs.push_back(offered_video_codec); } } - } else if (IsMediaContentOfType(&content, MEDIA_TYPE_DATA)) { - const RtpDataContentDescription* data = - content.media_description()->as_rtp_data(); - if (data) { - // RTP data. This part is inactive for SCTP data. - for (const RtpDataCodec& offered_rtp_data_codec : data->codecs()) { - if (!FindMatchingCodec( - data->codecs(), filtered_offered_rtp_data_codecs, - offered_rtp_data_codec, nullptr) && - FindMatchingCodec(data->codecs(), rtp_data_codecs_, - offered_rtp_data_codec, - nullptr)) { - filtered_offered_rtp_data_codecs.push_back(offered_rtp_data_codec); - } - } - } } } @@ -1962,15 +1993,14 @@ void MediaSessionDescriptionFactory::GetCodecsForAnswer( &used_pltypes); MergeCodecs(filtered_offered_video_codecs, video_codecs, &used_pltypes); - MergeCodecs(filtered_offered_rtp_data_codecs, rtp_data_codecs, - &used_pltypes); } -void MediaSessionDescriptionFactory::GetRtpHdrExtsToOffer( +MediaSessionDescriptionFactory::AudioVideoRtpHeaderExtensions +MediaSessionDescriptionFactory::GetOfferedRtpHeaderExtensionsWithIds( const std::vector& current_active_contents, bool extmap_allow_mixed, - RtpHeaderExtensions* offer_audio_extensions, - RtpHeaderExtensions* offer_video_extensions) const { + const std::vector& media_description_options) + const { // All header extensions allocated from the same range to avoid potential // issues when using BUNDLE. @@ -1984,6 +2014,7 @@ void MediaSessionDescriptionFactory::GetRtpHdrExtsToOffer( RtpHeaderExtensions all_regular_extensions; RtpHeaderExtensions all_encrypted_extensions; + AudioVideoRtpHeaderExtensions offered_extensions; // First - get all extensions from the current description if the media type // is used. // Add them to |used_ids| so the local ids are not reused if a new media @@ -1992,36 +2023,45 @@ void MediaSessionDescriptionFactory::GetRtpHdrExtsToOffer( if (IsMediaContentOfType(content, MEDIA_TYPE_AUDIO)) { const AudioContentDescription* audio = content->media_description()->as_audio(); - MergeRtpHdrExts(audio->rtp_header_extensions(), offer_audio_extensions, + MergeRtpHdrExts(audio->rtp_header_extensions(), &offered_extensions.audio, &all_regular_extensions, &all_encrypted_extensions, &used_ids); } else if (IsMediaContentOfType(content, MEDIA_TYPE_VIDEO)) { const VideoContentDescription* video = content->media_description()->as_video(); - MergeRtpHdrExts(video->rtp_header_extensions(), offer_video_extensions, + MergeRtpHdrExts(video->rtp_header_extensions(), &offered_extensions.video, &all_regular_extensions, &all_encrypted_extensions, &used_ids); } } - // Add our default RTP header extensions that are not in the current - // description. - MergeRtpHdrExts(audio_rtp_header_extensions(), offer_audio_extensions, - &all_regular_extensions, &all_encrypted_extensions, - &used_ids); - MergeRtpHdrExts(video_rtp_header_extensions(), offer_video_extensions, - &all_regular_extensions, &all_encrypted_extensions, - &used_ids); + // Add all encountered header extensions in the media description options that + // are not in the current description. + for (const auto& entry : media_description_options) { + RtpHeaderExtensions filtered_extensions = + filtered_rtp_header_extensions(UnstoppedOrPresentRtpHeaderExtensions( + entry.header_extensions, all_regular_extensions, + all_encrypted_extensions)); + if (entry.type == MEDIA_TYPE_AUDIO) + MergeRtpHdrExts(filtered_extensions, &offered_extensions.audio, + &all_regular_extensions, &all_encrypted_extensions, + &used_ids); + else if (entry.type == MEDIA_TYPE_VIDEO) + MergeRtpHdrExts(filtered_extensions, &offered_extensions.video, + &all_regular_extensions, &all_encrypted_extensions, + &used_ids); + } // TODO(jbauch): Support adding encrypted header extensions to existing // sessions. if (enable_encrypted_rtp_header_extensions_ && current_active_contents.empty()) { - AddEncryptedVersionsOfHdrExts(offer_audio_extensions, + AddEncryptedVersionsOfHdrExts(&offered_extensions.audio, &all_encrypted_extensions, &used_ids); - AddEncryptedVersionsOfHdrExts(offer_video_extensions, + AddEncryptedVersionsOfHdrExts(&offered_extensions.video, &all_encrypted_extensions, &used_ids); } + return offered_extensions; } bool MediaSessionDescriptionFactory::AddTransportOffer( @@ -2134,6 +2174,10 @@ bool MediaSessionDescriptionFactory::AddAudioContentForOffer( } } } + if (!session_options.vad_enabled) { + // If application doesn't want CN codecs in offer. + StripCNCodecs(&filtered_codecs); + } cricket::SecurePolicy sdes_policy = IsDtlsActive(current_content, current_description) ? cricket::SEC_DISABLED @@ -2261,7 +2305,7 @@ bool MediaSessionDescriptionFactory::AddVideoContentForOffer( return true; } -bool MediaSessionDescriptionFactory::AddSctpDataContentForOffer( +bool MediaSessionDescriptionFactory::AddDataContentForOffer( const MediaDescriptionOptions& media_description_options, const MediaSessionOptions& session_options, const ContentInfo* current_content, @@ -2306,36 +2350,23 @@ bool MediaSessionDescriptionFactory::AddSctpDataContentForOffer( return true; } -bool MediaSessionDescriptionFactory::AddRtpDataContentForOffer( +bool MediaSessionDescriptionFactory::AddUnsupportedContentForOffer( const MediaDescriptionOptions& media_description_options, const MediaSessionOptions& session_options, const ContentInfo* current_content, const SessionDescription* current_description, - const RtpDataCodecs& rtp_data_codecs, - StreamParamsVec* current_streams, SessionDescription* desc, IceCredentialsIterator* ice_credentials) const { - auto data = std::make_unique(); - bool secure_transport = (transport_desc_factory_->secure() != SEC_DISABLED); + RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_UNSUPPORTED)); - cricket::SecurePolicy sdes_policy = - IsDtlsActive(current_content, current_description) ? cricket::SEC_DISABLED - : secure(); - std::vector crypto_suites; - GetSupportedDataSdesCryptoSuiteNames(session_options.crypto_options, - &crypto_suites); - if (!CreateMediaContentOffer(media_description_options, session_options, - rtp_data_codecs, sdes_policy, - GetCryptos(current_content), crypto_suites, - RtpHeaderExtensions(), ssrc_generator_, - current_streams, data.get())) { - return false; - } + const UnsupportedContentDescription* current_unsupported_description = + current_content->media_description()->as_unsupported(); + auto unsupported = std::make_unique( + current_unsupported_description->media_type()); + unsupported->set_protocol(current_content->media_description()->protocol()); + desc->AddContent(media_description_options.mid, MediaProtocolType::kOther, + /*rejected=*/true, std::move(unsupported)); - data->set_bandwidth(kDataMaxBandwidth); - SetMediaProtocol(secure_transport, data.get()); - desc->AddContent(media_description_options.mid, MediaProtocolType::kRtp, - media_description_options.stopped, std::move(data)); if (!AddTransportOffer(media_description_options.mid, media_description_options.transport_options, current_description, desc, ice_credentials)) { @@ -2344,37 +2375,6 @@ bool MediaSessionDescriptionFactory::AddRtpDataContentForOffer( return true; } -bool MediaSessionDescriptionFactory::AddDataContentForOffer( - const MediaDescriptionOptions& media_description_options, - const MediaSessionOptions& session_options, - const ContentInfo* current_content, - const SessionDescription* current_description, - const RtpDataCodecs& rtp_data_codecs, - StreamParamsVec* current_streams, - SessionDescription* desc, - IceCredentialsIterator* ice_credentials) const { - bool is_sctp = - (session_options.data_channel_type == DCT_SCTP || - session_options.data_channel_type == DCT_DATA_CHANNEL_TRANSPORT_SCTP); - // If the DataChannel type is not specified, use the DataChannel type in - // the current description. - if (session_options.data_channel_type == DCT_NONE && current_content) { - RTC_CHECK(IsMediaContentOfType(current_content, MEDIA_TYPE_DATA)); - is_sctp = (current_content->media_description()->protocol() == - kMediaProtocolSctp); - } - if (is_sctp) { - return AddSctpDataContentForOffer( - media_description_options, session_options, current_content, - current_description, current_streams, desc, ice_credentials); - } else { - return AddRtpDataContentForOffer(media_description_options, session_options, - current_content, current_description, - rtp_data_codecs, current_streams, desc, - ice_credentials); - } -} - // |audio_codecs| = set of all possible codecs that can be used, with correct // payload type mappings // @@ -2396,6 +2396,7 @@ bool MediaSessionDescriptionFactory::AddAudioContentForAnswer( const SessionDescription* current_description, const TransportInfo* bundle_transport, const AudioCodecs& audio_codecs, + const RtpHeaderExtensions& default_audio_rtp_header_extensions, StreamParamsVec* current_streams, SessionDescription* answer, IceCredentialsIterator* ice_credentials) const { @@ -2452,6 +2453,10 @@ bool MediaSessionDescriptionFactory::AddAudioContentForAnswer( } } } + if (!session_options.vad_enabled) { + // If application doesn't want CN codecs in answer. + StripCNCodecs(&filtered_codecs); + } bool bundle_enabled = offer_description->HasGroup(GROUP_TYPE_BUNDLE) && session_options.bundle_enabled; @@ -2468,9 +2473,9 @@ bool MediaSessionDescriptionFactory::AddAudioContentForAnswer( if (!CreateMediaContentAnswer( offer_audio_description, media_description_options, session_options, sdes_policy, GetCryptos(current_content), - audio_rtp_header_extensions(), ssrc_generator_, - enable_encrypted_rtp_header_extensions_, current_streams, - bundle_enabled, audio_answer.get())) { + filtered_rtp_header_extensions(default_audio_rtp_header_extensions), + ssrc_generator_, enable_encrypted_rtp_header_extensions_, + current_streams, bundle_enabled, audio_answer.get())) { return false; // Fails the session setup. } @@ -2506,6 +2511,7 @@ bool MediaSessionDescriptionFactory::AddVideoContentForAnswer( const SessionDescription* current_description, const TransportInfo* bundle_transport, const VideoCodecs& video_codecs, + const RtpHeaderExtensions& default_video_rtp_header_extensions, StreamParamsVec* current_streams, SessionDescription* answer, IceCredentialsIterator* ice_credentials) const { @@ -2586,9 +2592,9 @@ bool MediaSessionDescriptionFactory::AddVideoContentForAnswer( if (!CreateMediaContentAnswer( offer_video_description, media_description_options, session_options, sdes_policy, GetCryptos(current_content), - video_rtp_header_extensions(), ssrc_generator_, - enable_encrypted_rtp_header_extensions_, current_streams, - bundle_enabled, video_answer.get())) { + filtered_rtp_header_extensions(default_video_rtp_header_extensions), + ssrc_generator_, enable_encrypted_rtp_header_extensions_, + current_streams, bundle_enabled, video_answer.get())) { return false; // Failed the sessin setup. } bool secure = bundle_transport ? bundle_transport->description.secure() @@ -2621,7 +2627,6 @@ bool MediaSessionDescriptionFactory::AddDataContentForAnswer( const ContentInfo* current_content, const SessionDescription* current_description, const TransportInfo* bundle_transport, - const RtpDataCodecs& rtp_data_codecs, StreamParamsVec* current_streams, SessionDescription* answer, IceCredentialsIterator* ice_credentials) const { @@ -2669,32 +2674,13 @@ bool MediaSessionDescriptionFactory::AddDataContentForAnswer( bool offer_uses_sctpmap = offer_data_description->use_sctpmap(); data_answer->as_sctp()->set_use_sctpmap(offer_uses_sctpmap); } else { - // RTP offer - data_answer = std::make_unique(); - - const RtpDataContentDescription* offer_data_description = - offer_content->media_description()->as_rtp_data(); - RTC_CHECK(offer_data_description); - if (!SetCodecsInAnswer(offer_data_description, rtp_data_codecs, - media_description_options, session_options, - ssrc_generator_, current_streams, - data_answer->as_rtp_data())) { - return false; - } - if (!CreateMediaContentAnswer( - offer_data_description, media_description_options, session_options, - sdes_policy, GetCryptos(current_content), RtpHeaderExtensions(), - ssrc_generator_, enable_encrypted_rtp_header_extensions_, - current_streams, bundle_enabled, data_answer.get())) { - return false; // Fails the session setup. - } + RTC_NOTREACHED() << "Non-SCTP data content found"; } bool secure = bundle_transport ? bundle_transport->description.secure() : data_transport->secure(); - bool rejected = session_options.data_channel_type == DCT_NONE || - media_description_options.stopped || + bool rejected = media_description_options.stopped || offer_content->rejected || !IsMediaProtocolSupported(MEDIA_TYPE_DATA, data_answer->protocol(), secure); @@ -2703,18 +2689,47 @@ bool MediaSessionDescriptionFactory::AddDataContentForAnswer( return false; } - if (!rejected) { - data_answer->set_bandwidth(kDataMaxBandwidth); - } else { - // RFC 3264 - // The answer MUST contain the same number of m-lines as the offer. - RTC_LOG(LS_INFO) << "Data is not supported in the answer."; - } answer->AddContent(media_description_options.mid, offer_content->type, rejected, std::move(data_answer)); return true; } +bool MediaSessionDescriptionFactory::AddUnsupportedContentForAnswer( + const MediaDescriptionOptions& media_description_options, + const MediaSessionOptions& session_options, + const ContentInfo* offer_content, + const SessionDescription* offer_description, + const ContentInfo* current_content, + const SessionDescription* current_description, + const TransportInfo* bundle_transport, + SessionDescription* answer, + IceCredentialsIterator* ice_credentials) const { + std::unique_ptr unsupported_transport = + CreateTransportAnswer(media_description_options.mid, offer_description, + media_description_options.transport_options, + current_description, bundle_transport != nullptr, + ice_credentials); + if (!unsupported_transport) { + return false; + } + RTC_CHECK(IsMediaContentOfType(offer_content, MEDIA_TYPE_UNSUPPORTED)); + + const UnsupportedContentDescription* offer_unsupported_description = + offer_content->media_description()->as_unsupported(); + std::unique_ptr unsupported_answer = + std::make_unique( + offer_unsupported_description->media_type()); + unsupported_answer->set_protocol(offer_unsupported_description->protocol()); + + if (!AddTransportAnswer(media_description_options.mid, + *(unsupported_transport.get()), answer)) { + return false; + } + answer->AddContent(media_description_options.mid, offer_content->type, + /*rejected=*/true, std::move(unsupported_answer)); + return true; +} + void MediaSessionDescriptionFactory::ComputeAudioCodecsIntersectionAndUnion() { audio_sendrecv_codecs_.clear(); all_audio_codecs_.clear(); @@ -2792,6 +2807,10 @@ bool IsDataContent(const ContentInfo* content) { return IsMediaContentOfType(content, MEDIA_TYPE_DATA); } +bool IsUnsupportedContent(const ContentInfo* content) { + return IsMediaContentOfType(content, MEDIA_TYPE_UNSUPPORTED); +} + const ContentInfo* GetFirstMediaContent(const ContentInfos& contents, MediaType media_type) { for (const ContentInfo& content : contents) { @@ -2854,12 +2873,6 @@ const VideoContentDescription* GetFirstVideoContentDescription( return desc ? desc->as_video() : nullptr; } -const RtpDataContentDescription* GetFirstRtpDataContentDescription( - const SessionDescription* sdesc) { - auto desc = GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_DATA); - return desc ? desc->as_rtp_data() : nullptr; -} - const SctpDataContentDescription* GetFirstSctpDataContentDescription( const SessionDescription* sdesc) { auto desc = GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_DATA); @@ -2932,12 +2945,6 @@ VideoContentDescription* GetFirstVideoContentDescription( return desc ? desc->as_video() : nullptr; } -RtpDataContentDescription* GetFirstRtpDataContentDescription( - SessionDescription* sdesc) { - auto desc = GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_DATA); - return desc ? desc->as_rtp_data() : nullptr; -} - SctpDataContentDescription* GetFirstSctpDataContentDescription( SessionDescription* sdesc) { auto desc = GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_DATA); diff --git a/pc/media_session.h b/pc/media_session.h index ef83834318..d4c8025bc0 100644 --- a/pc/media_session.h +++ b/pc/media_session.h @@ -18,14 +18,21 @@ #include #include +#include "api/crypto/crypto_options.h" #include "api/media_types.h" +#include "api/rtp_parameters.h" +#include "api/rtp_transceiver_direction.h" #include "media/base/media_constants.h" -#include "media/base/media_engine.h" // For DataChannelType +#include "media/base/rid_description.h" +#include "media/base/stream_params.h" #include "p2p/base/ice_credentials_iterator.h" +#include "p2p/base/transport_description.h" #include "p2p/base/transport_description_factory.h" +#include "p2p/base/transport_info.h" #include "pc/jsep_transport.h" #include "pc/media_protocol_names.h" #include "pc/session_description.h" +#include "pc/simulcast_description.h" #include "rtc_base/unique_id_generator.h" namespace cricket { @@ -65,10 +72,6 @@ struct MediaDescriptionOptions { const SimulcastLayerList& simulcast_layers, int num_sim_layers); - // Internally just uses sender_options. - void AddRtpDataChannel(const std::string& track_id, - const std::string& stream_id); - MediaType type; std::string mid; webrtc::RtpTransceiverDirection direction; @@ -78,7 +81,7 @@ struct MediaDescriptionOptions { // stream information goes in the local descriptions. std::vector sender_options; std::vector codec_preferences; - absl::optional alt_protocol; + std::vector header_extensions; private: // Doesn't DCHECK on |type|. @@ -102,7 +105,6 @@ struct MediaSessionOptions { bool HasMediaDescription(MediaType type) const; - DataChannelType data_channel_type = DCT_NONE; bool vad_enabled = true; // When disabled, removes all CN codecs from SDP. bool rtcp_mux_enabled = true; bool bundle_enabled = false; @@ -147,23 +149,13 @@ class MediaSessionDescriptionFactory { const AudioCodecs& audio_recv_codecs() const; void set_audio_codecs(const AudioCodecs& send_codecs, const AudioCodecs& recv_codecs); - void set_audio_rtp_header_extensions(const RtpHeaderExtensions& extensions) { - audio_rtp_extensions_ = extensions; - } - RtpHeaderExtensions audio_rtp_header_extensions() const; const VideoCodecs& video_sendrecv_codecs() const; const VideoCodecs& video_send_codecs() const; const VideoCodecs& video_recv_codecs() const; void set_video_codecs(const VideoCodecs& send_codecs, const VideoCodecs& recv_codecs); - void set_video_rtp_header_extensions(const RtpHeaderExtensions& extensions) { - video_rtp_extensions_ = extensions; - } - RtpHeaderExtensions video_rtp_header_extensions() const; - const RtpDataCodecs& rtp_data_codecs() const { return rtp_data_codecs_; } - void set_rtp_data_codecs(const RtpDataCodecs& codecs) { - rtp_data_codecs_ = codecs; - } + RtpHeaderExtensions filtered_rtp_header_extensions( + RtpHeaderExtensions extensions) const; SecurePolicy secure() const { return secure_; } void set_secure(SecurePolicy s) { secure_ = s; } @@ -184,6 +176,11 @@ class MediaSessionDescriptionFactory { const SessionDescription* current_description) const; private: + struct AudioVideoRtpHeaderExtensions { + RtpHeaderExtensions audio; + RtpHeaderExtensions video; + }; + const AudioCodecs& GetAudioCodecsForOffer( const webrtc::RtpTransceiverDirection& direction) const; const AudioCodecs& GetAudioCodecsForAnswer( @@ -197,19 +194,17 @@ class MediaSessionDescriptionFactory { void GetCodecsForOffer( const std::vector& current_active_contents, AudioCodecs* audio_codecs, - VideoCodecs* video_codecs, - RtpDataCodecs* rtp_data_codecs) const; + VideoCodecs* video_codecs) const; void GetCodecsForAnswer( const std::vector& current_active_contents, const SessionDescription& remote_offer, AudioCodecs* audio_codecs, - VideoCodecs* video_codecs, - RtpDataCodecs* rtp_data_codecs) const; - void GetRtpHdrExtsToOffer( + VideoCodecs* video_codecs) const; + AudioVideoRtpHeaderExtensions GetOfferedRtpHeaderExtensionsWithIds( const std::vector& current_active_contents, bool extmap_allow_mixed, - RtpHeaderExtensions* audio_extensions, - RtpHeaderExtensions* video_extensions) const; + const std::vector& media_description_options) + const; bool AddTransportOffer(const std::string& content_name, const TransportOptions& transport_options, const SessionDescription* current_desc, @@ -254,33 +249,20 @@ class MediaSessionDescriptionFactory { SessionDescription* desc, IceCredentialsIterator* ice_credentials) const; - bool AddSctpDataContentForOffer( - const MediaDescriptionOptions& media_description_options, - const MediaSessionOptions& session_options, - const ContentInfo* current_content, - const SessionDescription* current_description, - StreamParamsVec* current_streams, - SessionDescription* desc, - IceCredentialsIterator* ice_credentials) const; - bool AddRtpDataContentForOffer( + bool AddDataContentForOffer( const MediaDescriptionOptions& media_description_options, const MediaSessionOptions& session_options, const ContentInfo* current_content, const SessionDescription* current_description, - const RtpDataCodecs& rtp_data_codecs, StreamParamsVec* current_streams, SessionDescription* desc, IceCredentialsIterator* ice_credentials) const; - // This function calls either AddRtpDataContentForOffer or - // AddSctpDataContentForOffer depending on protocol. - // The codecs argument is ignored for SCTP. - bool AddDataContentForOffer( + + bool AddUnsupportedContentForOffer( const MediaDescriptionOptions& media_description_options, const MediaSessionOptions& session_options, const ContentInfo* current_content, const SessionDescription* current_description, - const RtpDataCodecs& rtp_data_codecs, - StreamParamsVec* current_streams, SessionDescription* desc, IceCredentialsIterator* ice_credentials) const; @@ -293,6 +275,7 @@ class MediaSessionDescriptionFactory { const SessionDescription* current_description, const TransportInfo* bundle_transport, const AudioCodecs& audio_codecs, + const RtpHeaderExtensions& default_audio_rtp_header_extensions, StreamParamsVec* current_streams, SessionDescription* answer, IceCredentialsIterator* ice_credentials) const; @@ -306,6 +289,7 @@ class MediaSessionDescriptionFactory { const SessionDescription* current_description, const TransportInfo* bundle_transport, const VideoCodecs& video_codecs, + const RtpHeaderExtensions& default_video_rtp_header_extensions, StreamParamsVec* current_streams, SessionDescription* answer, IceCredentialsIterator* ice_credentials) const; @@ -318,11 +302,21 @@ class MediaSessionDescriptionFactory { const ContentInfo* current_content, const SessionDescription* current_description, const TransportInfo* bundle_transport, - const RtpDataCodecs& rtp_data_codecs, StreamParamsVec* current_streams, SessionDescription* answer, IceCredentialsIterator* ice_credentials) const; + bool AddUnsupportedContentForAnswer( + const MediaDescriptionOptions& media_description_options, + const MediaSessionOptions& session_options, + const ContentInfo* offer_content, + const SessionDescription* offer_description, + const ContentInfo* current_content, + const SessionDescription* current_description, + const TransportInfo* bundle_transport, + SessionDescription* answer, + IceCredentialsIterator* ice_credentials) const; + void ComputeAudioCodecsIntersectionAndUnion(); void ComputeVideoCodecsIntersectionAndUnion(); @@ -334,15 +328,12 @@ class MediaSessionDescriptionFactory { AudioCodecs audio_sendrecv_codecs_; // Union of send and recv. AudioCodecs all_audio_codecs_; - RtpHeaderExtensions audio_rtp_extensions_; VideoCodecs video_send_codecs_; VideoCodecs video_recv_codecs_; // Intersection of send and recv. VideoCodecs video_sendrecv_codecs_; // Union of send and recv. VideoCodecs all_video_codecs_; - RtpHeaderExtensions video_rtp_extensions_; - RtpDataCodecs rtp_data_codecs_; // This object is not owned by the channel so it must outlive it. rtc::UniqueRandomIdGenerator* const ssrc_generator_; bool enable_encrypted_rtp_header_extensions_ = false; @@ -357,6 +348,7 @@ bool IsMediaContent(const ContentInfo* content); bool IsAudioContent(const ContentInfo* content); bool IsVideoContent(const ContentInfo* content); bool IsDataContent(const ContentInfo* content); +bool IsUnsupportedContent(const ContentInfo* content); const ContentInfo* GetFirstMediaContent(const ContentInfos& contents, MediaType media_type); const ContentInfo* GetFirstAudioContent(const ContentInfos& contents); @@ -371,8 +363,6 @@ const AudioContentDescription* GetFirstAudioContentDescription( const SessionDescription* sdesc); const VideoContentDescription* GetFirstVideoContentDescription( const SessionDescription* sdesc); -const RtpDataContentDescription* GetFirstRtpDataContentDescription( - const SessionDescription* sdesc); const SctpDataContentDescription* GetFirstSctpDataContentDescription( const SessionDescription* sdesc); // Non-const versions of the above functions. @@ -390,8 +380,6 @@ AudioContentDescription* GetFirstAudioContentDescription( SessionDescription* sdesc); VideoContentDescription* GetFirstVideoContentDescription( SessionDescription* sdesc); -RtpDataContentDescription* GetFirstRtpDataContentDescription( - SessionDescription* sdesc); SctpDataContentDescription* GetFirstSctpDataContentDescription( SessionDescription* sdesc); diff --git a/pc/media_session_unittest.cc b/pc/media_session_unittest.cc index ba4db0a674..c7c07fc527 100644 --- a/pc/media_session_unittest.cc +++ b/pc/media_session_unittest.cc @@ -34,6 +34,7 @@ #include "rtc_base/ssl_adapter.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/unique_id_generator.h" +#include "test/field_trial.h" #include "test/gmock.h" #define ASSERT_CRYPTO(cd, s, cs) \ @@ -49,7 +50,6 @@ using cricket::CryptoParamsVec; using cricket::GetFirstAudioContent; using cricket::GetFirstAudioContentDescription; using cricket::GetFirstDataContent; -using cricket::GetFirstRtpDataContentDescription; using cricket::GetFirstVideoContent; using cricket::GetFirstVideoContentDescription; using cricket::kAutoBandwidth; @@ -64,8 +64,6 @@ using cricket::MediaSessionOptions; using cricket::MediaType; using cricket::RidDescription; using cricket::RidDirection; -using cricket::RtpDataCodec; -using cricket::RtpDataContentDescription; using cricket::SctpDataContentDescription; using cricket::SEC_DISABLED; using cricket::SEC_ENABLED; @@ -132,15 +130,6 @@ static const VideoCodec kVideoCodecs2[] = {VideoCodec(126, "H264"), static const VideoCodec kVideoCodecsAnswer[] = {VideoCodec(97, "H264")}; -static const RtpDataCodec kDataCodecs1[] = {RtpDataCodec(98, "binary-data"), - RtpDataCodec(99, "utf8-text")}; - -static const RtpDataCodec kDataCodecs2[] = {RtpDataCodec(126, "binary-data"), - RtpDataCodec(127, "utf8-text")}; - -static const RtpDataCodec kDataCodecsAnswer[] = { - RtpDataCodec(98, "binary-data"), RtpDataCodec(99, "utf8-text")}; - static const RtpExtension kAudioRtpExtension1[] = { RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8), RtpExtension("http://google.com/testing/audio_something", 10), @@ -150,6 +139,7 @@ static const RtpExtension kAudioRtpExtensionEncrypted1[] = { RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8), RtpExtension("http://google.com/testing/audio_something", 10), RtpExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 12, true), + RtpExtension("http://google.com/testing/audio_something", 11, true), }; static const RtpExtension kAudioRtpExtension2[] = { @@ -172,7 +162,15 @@ static const RtpExtension kAudioRtpExtension3ForEncryption[] = { static const RtpExtension kAudioRtpExtension3ForEncryptionOffer[] = { RtpExtension("http://google.com/testing/audio_something", 2), RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3), - RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14, true), + RtpExtension("http://google.com/testing/audio_something", 14, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 13, true), +}; + +static const RtpExtension kVideoRtpExtension3ForEncryptionOffer[] = { + RtpExtension("http://google.com/testing/video_something", 4), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 3), + RtpExtension("http://google.com/testing/video_something", 12, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 13, true), }; static const RtpExtension kAudioRtpExtensionAnswer[] = { @@ -191,7 +189,8 @@ static const RtpExtension kVideoRtpExtension1[] = { static const RtpExtension kVideoRtpExtensionEncrypted1[] = { RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 14), RtpExtension("http://google.com/testing/video_something", 13), - RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 11, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 9, true), + RtpExtension("http://google.com/testing/video_something", 7, true), }; static const RtpExtension kVideoRtpExtension2[] = { @@ -216,7 +215,7 @@ static const RtpExtension kVideoRtpExtensionAnswer[] = { }; static const RtpExtension kVideoRtpExtensionEncryptedAnswer[] = { - RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 11, true), + RtpExtension("urn:ietf:params:rtp-hdrext:toffset", 9, true), }; static const RtpExtension kRtpExtensionTransportSequenceNumber01[] = { @@ -259,9 +258,6 @@ static const char kVideoTrack2[] = "video_2"; static const char kAudioTrack1[] = "audio_1"; static const char kAudioTrack2[] = "audio_2"; static const char kAudioTrack3[] = "audio_3"; -static const char kDataTrack1[] = "data_1"; -static const char kDataTrack2[] = "data_2"; -static const char kDataTrack3[] = "data_3"; static const char* kMediaProtocols[] = {"RTP/AVP", "RTP/SAVP", "RTP/AVPF", "RTP/SAVPF"}; @@ -343,10 +339,8 @@ static void AddAudioVideoSections(RtpTransceiverDirection direction, opts); } -static void AddDataSection(cricket::DataChannelType dct, - RtpTransceiverDirection direction, +static void AddDataSection(RtpTransceiverDirection direction, MediaSessionOptions* opts) { - opts->data_channel_type = dct; AddMediaDescriptionOptions(MEDIA_TYPE_DATA, "data", direction, kActive, opts); } @@ -368,10 +362,6 @@ static void AttachSenderToMediaDescriptionOptions( it->AddVideoSender(track_id, stream_ids, rids, simulcast_layers, num_sim_layer); break; - case MEDIA_TYPE_DATA: - RTC_CHECK(stream_ids.size() == 1U); - it->AddRtpDataChannel(track_id, stream_ids[0]); - break; default: RTC_NOTREACHED(); } @@ -436,12 +426,10 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { MAKE_VECTOR(kAudioCodecs1)); f1_.set_video_codecs(MAKE_VECTOR(kVideoCodecs1), MAKE_VECTOR(kVideoCodecs1)); - f1_.set_rtp_data_codecs(MAKE_VECTOR(kDataCodecs1)); f2_.set_audio_codecs(MAKE_VECTOR(kAudioCodecs2), MAKE_VECTOR(kAudioCodecs2)); f2_.set_video_codecs(MAKE_VECTOR(kVideoCodecs2), MAKE_VECTOR(kVideoCodecs2)); - f2_.set_rtp_data_codecs(MAKE_VECTOR(kDataCodecs2)); tdf1_.set_certificate(rtc::RTCCertificate::Create( std::unique_ptr(new rtc::FakeSSLIdentity("id1")))); tdf2_.set_certificate(rtc::RTCCertificate::Create( @@ -541,9 +529,6 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { EXPECT_EQ( media_desc_options_it->transport_options.enable_ice_renomination, GetIceRenomination(ti_audio)); - EXPECT_EQ(media_desc_options_it->transport_options.opaque_parameters, - ti_audio->description.opaque_parameters); - } else { EXPECT_TRUE(ti_audio == NULL); } @@ -556,8 +541,6 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { EXPECT_EQ(ti_audio->description.ice_ufrag, ti_video->description.ice_ufrag); EXPECT_EQ(ti_audio->description.ice_pwd, ti_video->description.ice_pwd); - EXPECT_EQ(ti_audio->description.opaque_parameters, - ti_video->description.opaque_parameters); } else { if (has_current_desc) { EXPECT_EQ(current_video_ufrag, ti_video->description.ice_ufrag); @@ -568,8 +551,6 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { EXPECT_EQ(static_cast(cricket::ICE_PWD_LENGTH), ti_video->description.ice_pwd.size()); } - EXPECT_EQ(media_desc_options_it->transport_options.opaque_parameters, - ti_video->description.opaque_parameters); } EXPECT_EQ( media_desc_options_it->transport_options.enable_ice_renomination, @@ -610,8 +591,6 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { f1_.set_secure(SEC_ENABLED); MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); std::unique_ptr ref_desc; std::unique_ptr desc; if (offer) { @@ -754,13 +733,10 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { const cricket::RtpHeaderExtensions& expectedAnswer) { MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_audio_rtp_header_extensions(offered); - f1_.set_video_rtp_header_extensions(offered); - f2_.set_audio_rtp_header_extensions(local); - f2_.set_video_rtp_header_extensions(local); - + SetAudioVideoRtpHeaderExtensions(offered, offered, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); + SetAudioVideoRtpHeaderExtensions(local, local, &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); @@ -772,6 +748,38 @@ class MediaSessionDescriptionFactoryTest : public ::testing::Test { GetFirstVideoContentDescription(answer.get())->rtp_header_extensions()); } + std::vector + HeaderExtensionCapabilitiesFromRtpExtensions( + cricket::RtpHeaderExtensions extensions) { + std::vector capabilities; + for (const auto& extension : extensions) { + webrtc::RtpHeaderExtensionCapability capability( + extension.uri, extension.id, + webrtc::RtpTransceiverDirection::kSendRecv); + capabilities.push_back(capability); + } + return capabilities; + } + + void SetAudioVideoRtpHeaderExtensions(cricket::RtpHeaderExtensions audio_exts, + cricket::RtpHeaderExtensions video_exts, + MediaSessionOptions* opts) { + auto audio_caps = HeaderExtensionCapabilitiesFromRtpExtensions(audio_exts); + auto video_caps = HeaderExtensionCapabilitiesFromRtpExtensions(video_exts); + for (auto& entry : opts->media_description_options) { + switch (entry.type) { + case MEDIA_TYPE_AUDIO: + entry.header_extensions = audio_caps; + break; + case MEDIA_TYPE_VIDEO: + entry.header_extensions = video_caps; + break; + default: + break; + } + } + } + protected: UniqueRandomIdGenerator ssrc_generator1; UniqueRandomIdGenerator ssrc_generator2; @@ -839,30 +847,21 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoOffer) { TEST_F(MediaSessionDescriptionFactoryTest, TestBundleOfferWithSameCodecPlType) { const VideoCodec& offered_video_codec = f2_.video_sendrecv_codecs()[0]; const AudioCodec& offered_audio_codec = f2_.audio_sendrecv_codecs()[0]; - const RtpDataCodec& offered_data_codec = f2_.rtp_data_codecs()[0]; ASSERT_EQ(offered_video_codec.id, offered_audio_codec.id); - ASSERT_EQ(offered_video_codec.id, offered_data_codec.id); MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); opts.bundle_enabled = true; std::unique_ptr offer = f2_.CreateOffer(opts, NULL); const VideoContentDescription* vcd = GetFirstVideoContentDescription(offer.get()); const AudioContentDescription* acd = GetFirstAudioContentDescription(offer.get()); - const RtpDataContentDescription* dcd = - GetFirstRtpDataContentDescription(offer.get()); ASSERT_TRUE(NULL != vcd); ASSERT_TRUE(NULL != acd); - ASSERT_TRUE(NULL != dcd); EXPECT_NE(vcd->codecs()[0].id, acd->codecs()[0].id); - EXPECT_NE(vcd->codecs()[0].id, dcd->codecs()[0].id); - EXPECT_NE(acd->codecs()[0].id, dcd->codecs()[0].id); EXPECT_EQ(vcd->codecs()[0].name, offered_video_codec.name); EXPECT_EQ(acd->codecs()[0].name, offered_audio_codec.name); - EXPECT_EQ(dcd->codecs()[0].name, offered_data_codec.name); } // Test creating an updated offer with bundle, audio, video and data @@ -878,7 +877,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video", RtpTransceiverDirection::kInactive, kStopped, &opts); - opts.data_channel_type = cricket::DCT_NONE; opts.bundle_enabled = true; std::unique_ptr offer = f1_.CreateOffer(opts, NULL); std::unique_ptr answer = @@ -886,8 +884,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, MediaSessionOptions updated_opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &updated_opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &updated_opts); updated_opts.bundle_enabled = true; std::unique_ptr updated_offer( f1_.CreateOffer(updated_opts, answer.get())); @@ -896,58 +892,20 @@ TEST_F(MediaSessionDescriptionFactoryTest, GetFirstAudioContentDescription(updated_offer.get()); const VideoContentDescription* vcd = GetFirstVideoContentDescription(updated_offer.get()); - const RtpDataContentDescription* dcd = - GetFirstRtpDataContentDescription(updated_offer.get()); EXPECT_TRUE(NULL != vcd); EXPECT_TRUE(NULL != acd); - EXPECT_TRUE(NULL != dcd); ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite); EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol()); ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite); EXPECT_EQ(cricket::kMediaProtocolSavpf, vcd->protocol()); - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_EQ(cricket::kMediaProtocolSavpf, dcd->protocol()); -} - -// Create a RTP data offer, and ensure it matches what we expect. -TEST_F(MediaSessionDescriptionFactoryTest, TestCreateRtpDataOffer) { - MediaSessionOptions opts; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_secure(SEC_ENABLED); - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - ASSERT_TRUE(offer.get() != NULL); - const ContentInfo* ac = offer->GetContentByName("audio"); - const ContentInfo* dc = offer->GetContentByName("data"); - ASSERT_TRUE(ac != NULL); - ASSERT_TRUE(dc != NULL); - EXPECT_EQ(MediaProtocolType::kRtp, ac->type); - EXPECT_EQ(MediaProtocolType::kRtp, dc->type); - const AudioContentDescription* acd = ac->media_description()->as_audio(); - const RtpDataContentDescription* dcd = dc->media_description()->as_rtp_data(); - EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type()); - EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs()); - EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attched. - EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto) - EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on - ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_EQ(cricket::kMediaProtocolSavpf, acd->protocol()); - EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type()); - EXPECT_EQ(f1_.rtp_data_codecs(), dcd->codecs()); - EXPECT_EQ(0U, dcd->first_ssrc()); // no sender is attached. - EXPECT_EQ(cricket::kDataMaxBandwidth, - dcd->bandwidth()); // default bandwidth (auto) - EXPECT_TRUE(dcd->rtcp_mux()); // rtcp-mux defaults on - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_EQ(cricket::kMediaProtocolSavpf, dcd->protocol()); } // Create an SCTP data offer with bundle without error. TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSctpDataOffer) { MediaSessionOptions opts; opts.bundle_enabled = true; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); f1_.set_secure(SEC_ENABLED); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); EXPECT_TRUE(offer.get() != NULL); @@ -962,7 +920,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSctpDataOffer) { TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSecureSctpDataOffer) { MediaSessionOptions opts; opts.bundle_enabled = true; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); f1_.set_secure(SEC_ENABLED); tdf1_.set_secure(SEC_ENABLED); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); @@ -978,7 +936,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSecureSctpDataOffer) { TEST_F(MediaSessionDescriptionFactoryTest, TestCreateImplicitSctpDataOffer) { MediaSessionOptions opts; opts.bundle_enabled = true; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); f1_.set_secure(SEC_ENABLED); std::unique_ptr offer1(f1_.CreateOffer(opts, NULL)); ASSERT_TRUE(offer1.get() != NULL); @@ -986,10 +944,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateImplicitSctpDataOffer) { ASSERT_TRUE(data != NULL); ASSERT_EQ(cricket::kMediaProtocolSctp, data->media_description()->protocol()); - // Now set data_channel_type to 'none' (default) and make sure that the - // datachannel type that gets generated from the previous offer, is of the - // same type. - opts.data_channel_type = cricket::DCT_NONE; std::unique_ptr offer2( f1_.CreateOffer(opts, offer1.get())); data = offer2->GetContentByName("data"); @@ -1092,6 +1046,66 @@ TEST_F(MediaSessionDescriptionFactoryTest, ReAnswerChangedBundleOffererTagged) { EXPECT_TRUE(bundle_group->HasContentName("video")); } +TEST_F(MediaSessionDescriptionFactoryTest, + CreateAnswerForOfferWithMultipleBundleGroups) { + // Create an offer with 4 m= sections, initially without BUNDLE groups. + MediaSessionOptions opts; + opts.bundle_enabled = false; + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "1", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "2", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "3", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "4", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + ASSERT_TRUE(offer->groups().empty()); + + // Munge the offer to have two groups. Offers like these cannot be generated + // without munging, but it is valid to receive such offers from remote + // endpoints. + cricket::ContentGroup bundle_group1(cricket::GROUP_TYPE_BUNDLE); + bundle_group1.AddContentName("1"); + bundle_group1.AddContentName("2"); + cricket::ContentGroup bundle_group2(cricket::GROUP_TYPE_BUNDLE); + bundle_group2.AddContentName("3"); + bundle_group2.AddContentName("4"); + offer->AddGroup(bundle_group1); + offer->AddGroup(bundle_group2); + + // If BUNDLE is enabled, the answer to this offer should accept both BUNDLE + // groups. + opts.bundle_enabled = true; + std::unique_ptr answer = + f2_.CreateAnswer(offer.get(), opts, nullptr); + + std::vector answer_groups = + answer->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + ASSERT_EQ(answer_groups.size(), 2u); + EXPECT_EQ(answer_groups[0]->content_names().size(), 2u); + EXPECT_TRUE(answer_groups[0]->HasContentName("1")); + EXPECT_TRUE(answer_groups[0]->HasContentName("2")); + EXPECT_EQ(answer_groups[1]->content_names().size(), 2u); + EXPECT_TRUE(answer_groups[1]->HasContentName("3")); + EXPECT_TRUE(answer_groups[1]->HasContentName("4")); + + // If BUNDLE is disabled, the answer to this offer should reject both BUNDLE + // groups. + opts.bundle_enabled = false; + answer = f2_.CreateAnswer(offer.get(), opts, nullptr); + + answer_groups = answer->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + // Rejected groups are still listed, but they are empty. + ASSERT_EQ(answer_groups.size(), 2u); + EXPECT_TRUE(answer_groups[0]->content_names().empty()); + EXPECT_TRUE(answer_groups[1]->content_names().empty()); +} + // Test that if the BUNDLE offerer-tagged media section is changed in a reoffer // and there is still a non-rejected media section that was in the initial // offer, then the ICE credentials do not change in the reoffer offerer-tagged @@ -1193,7 +1207,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateSendOnlyOffer) { // SessionDescription is preserved in the new SessionDescription. TEST_F(MediaSessionDescriptionFactoryTest, TestCreateOfferContentOrder) { MediaSessionOptions opts; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer1(f1_.CreateOffer(opts, NULL)); ASSERT_TRUE(offer1.get() != NULL); @@ -1327,79 +1341,11 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerGcmAnswer) { TestVideoGcmCipher(false, true); } -TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswer) { - MediaSessionOptions opts = CreatePlanBMediaSessionOptions(); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_secure(SEC_ENABLED); - f2_.set_secure(SEC_ENABLED); - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - ASSERT_TRUE(offer.get() != NULL); - std::unique_ptr answer = - f2_.CreateAnswer(offer.get(), opts, NULL); - const ContentInfo* ac = answer->GetContentByName("audio"); - const ContentInfo* dc = answer->GetContentByName("data"); - ASSERT_TRUE(ac != NULL); - ASSERT_TRUE(dc != NULL); - EXPECT_EQ(MediaProtocolType::kRtp, ac->type); - EXPECT_EQ(MediaProtocolType::kRtp, dc->type); - const AudioContentDescription* acd = ac->media_description()->as_audio(); - const RtpDataContentDescription* dcd = dc->media_description()->as_rtp_data(); - EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type()); - EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer)); - EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw - EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached - EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux - ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type()); - EXPECT_THAT(dcd->codecs(), ElementsAreArray(kDataCodecsAnswer)); - EXPECT_EQ(0U, dcd->first_ssrc()); // no sender is attached - EXPECT_TRUE(dcd->rtcp_mux()); // negotiated rtcp-mux - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_EQ(cricket::kMediaProtocolSavpf, dcd->protocol()); -} - -TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerGcm) { - MediaSessionOptions opts = CreatePlanBMediaSessionOptions(); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); - opts.crypto_options.srtp.enable_gcm_crypto_suites = true; - f1_.set_secure(SEC_ENABLED); - f2_.set_secure(SEC_ENABLED); - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - ASSERT_TRUE(offer.get() != NULL); - for (cricket::ContentInfo& content : offer->contents()) { - auto cryptos = content.media_description()->cryptos(); - PreferGcmCryptoParameters(&cryptos); - content.media_description()->set_cryptos(cryptos); - } - std::unique_ptr answer = - f2_.CreateAnswer(offer.get(), opts, NULL); - const ContentInfo* ac = answer->GetContentByName("audio"); - const ContentInfo* dc = answer->GetContentByName("data"); - ASSERT_TRUE(ac != NULL); - ASSERT_TRUE(dc != NULL); - EXPECT_EQ(MediaProtocolType::kRtp, ac->type); - EXPECT_EQ(MediaProtocolType::kRtp, dc->type); - const AudioContentDescription* acd = ac->media_description()->as_audio(); - const RtpDataContentDescription* dcd = dc->media_description()->as_rtp_data(); - EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type()); - EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer)); - EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw - EXPECT_EQ(0U, acd->first_ssrc()); // no sender is attached - EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux - ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuiteGcm); - EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type()); - EXPECT_THAT(dcd->codecs(), ElementsAreArray(kDataCodecsAnswer)); - EXPECT_EQ(0U, dcd->first_ssrc()); // no sender is attached - EXPECT_TRUE(dcd->rtcp_mux()); // negotiated rtcp-mux - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuiteGcm); - EXPECT_EQ(cricket::kMediaProtocolSavpf, dcd->protocol()); -} - // The use_sctpmap flag should be set in an Sctp DataContentDescription by // default. The answer's use_sctpmap flag should match the offer's. TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerUsesSctpmap) { MediaSessionOptions opts; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); ContentInfo* dc_offer = offer->GetContentByName("data"); @@ -1420,7 +1366,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerUsesSctpmap) { // The answer's use_sctpmap flag should match the offer's. TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswerWithoutSctpmap) { MediaSessionOptions opts; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); ContentInfo* dc_offer = offer->GetContentByName("data"); @@ -1450,7 +1396,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, tdf2_.set_secure(SEC_ENABLED); MediaSessionOptions opts; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); ASSERT_TRUE(offer.get() != nullptr); ContentInfo* dc_offer = offer->GetContentByName("data"); @@ -1484,7 +1430,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, tdf2_.set_secure(SEC_ENABLED); MediaSessionOptions opts; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); ASSERT_TRUE(offer.get() != nullptr); ContentInfo* dc_offer = offer->GetContentByName("data"); @@ -1513,7 +1459,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, tdf2_.set_secure(SEC_ENABLED); MediaSessionOptions opts; - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); ASSERT_TRUE(offer.get() != nullptr); ContentInfo* dc_offer = offer->GetContentByName("data"); @@ -1538,7 +1484,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAnswerContentOrder) { MediaSessionOptions opts; // Creates a data only offer. - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kSendRecv, &opts); + AddDataSection(RtpTransceiverDirection::kSendRecv, &opts); std::unique_ptr offer1(f1_.CreateOffer(opts, NULL)); ASSERT_TRUE(offer1.get() != NULL); @@ -1598,35 +1544,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToInactiveOffer) { RtpTransceiverDirection::kInactive); } -// Test that a data content with an unknown protocol is rejected in an answer. -TEST_F(MediaSessionDescriptionFactoryTest, - CreateDataAnswerToOfferWithUnknownProtocol) { - MediaSessionOptions opts; - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_secure(SEC_ENABLED); - f2_.set_secure(SEC_ENABLED); - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - ContentInfo* dc_offer = offer->GetContentByName("data"); - ASSERT_TRUE(dc_offer != NULL); - RtpDataContentDescription* dcd_offer = - dc_offer->media_description()->as_rtp_data(); - ASSERT_TRUE(dcd_offer != NULL); - // Offer must be acceptable as an RTP protocol in order to be set. - std::string protocol = "RTP/a weird unknown protocol"; - dcd_offer->set_protocol(protocol); - - std::unique_ptr answer = - f2_.CreateAnswer(offer.get(), opts, NULL); - - const ContentInfo* dc_answer = answer->GetContentByName("data"); - ASSERT_TRUE(dc_answer != NULL); - EXPECT_TRUE(dc_answer->rejected); - const RtpDataContentDescription* dcd_answer = - dc_answer->media_description()->as_rtp_data(); - ASSERT_TRUE(dcd_answer != NULL); - EXPECT_EQ(protocol, dcd_answer->protocol()); -} - // Test that the media protocol is RTP/AVPF if DTLS and SDES are disabled. TEST_F(MediaSessionDescriptionFactoryTest, AudioOfferAnswerWithCryptoDisabled) { MediaSessionOptions opts = CreatePlanBMediaSessionOptions(); @@ -1659,13 +1576,13 @@ TEST_F(MediaSessionDescriptionFactoryTest, AudioOfferAnswerWithCryptoDisabled) { TEST_F(MediaSessionDescriptionFactoryTest, TestOfferAnswerWithRtpExtensions) { MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1)); - f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1)); - f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2)); - f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2)); + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1), + MAKE_VECTOR(kVideoRtpExtension1), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2), + MAKE_VECTOR(kVideoRtpExtension2), &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); @@ -1714,21 +1631,21 @@ TEST_F(MediaSessionDescriptionFactoryTest, MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - const auto offered = MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00); - f1_.set_audio_rtp_header_extensions(offered); - f1_.set_video_rtp_header_extensions(offered); - const auto local = MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01); - f2_.set_audio_rtp_header_extensions(local); - f2_.set_video_rtp_header_extensions(local); + SetAudioVideoRtpHeaderExtensions( + MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00), + MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + SetAudioVideoRtpHeaderExtensions( + MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), + MAKE_VECTOR(kRtpExtensionTransportSequenceNumber01), &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(), - ElementsAreArray(offered)); + ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00)); EXPECT_THAT( GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(), - ElementsAreArray(offered)); + ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00)); } TEST_F(MediaSessionDescriptionFactoryTest, @@ -1736,21 +1653,18 @@ TEST_F(MediaSessionDescriptionFactoryTest, MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - const auto offered = MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00); - f1_.set_audio_rtp_header_extensions(offered); - f1_.set_video_rtp_header_extensions(offered); - const auto local = MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00); - f2_.set_audio_rtp_header_extensions(local); - f2_.set_video_rtp_header_extensions(local); + SetAudioVideoRtpHeaderExtensions( + MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00), + MAKE_VECTOR(kRtpExtensionGenericFrameDescriptorUri00), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( GetFirstAudioContentDescription(answer.get())->rtp_header_extensions(), - ElementsAreArray(offered)); + ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00)); EXPECT_THAT( GetFirstVideoContentDescription(answer.get())->rtp_header_extensions(), - ElementsAreArray(offered)); + ElementsAreArray(kRtpExtensionGenericFrameDescriptorUri00)); } TEST_F(MediaSessionDescriptionFactoryTest, @@ -1759,10 +1673,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); RtpExtension offer_dd(RtpExtension::kDependencyDescriptorUri, 7); - RtpExtension local_tsn(RtpExtension::kTransportSequenceNumberUri, 5); - f1_.set_video_rtp_header_extensions({offer_dd}); - f2_.set_video_rtp_header_extensions({local_tsn}); + SetAudioVideoRtpHeaderExtensions({}, {offer_dd}, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + RtpExtension local_tsn(RtpExtension::kTransportSequenceNumberUri, 5); + SetAudioVideoRtpHeaderExtensions({}, {local_tsn}, &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( @@ -1777,9 +1691,9 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtension offer_dd(RtpExtension::kDependencyDescriptorUri, 7); RtpExtension local_dd(RtpExtension::kDependencyDescriptorUri, 5); - f1_.set_video_rtp_header_extensions({offer_dd}); - f2_.set_video_rtp_header_extensions({local_dd}); + SetAudioVideoRtpHeaderExtensions({}, {offer_dd}, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + SetAudioVideoRtpHeaderExtensions({}, {local_dd}, &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( @@ -1796,12 +1710,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 7)}; const cricket::RtpHeaderExtensions local_extensions = { RtpExtension(RtpExtension::kTransportSequenceNumberUri, 5)}; - f1_.set_video_rtp_header_extensions(offered_extensions); - f1_.set_audio_rtp_header_extensions(offered_extensions); - f2_.set_video_rtp_header_extensions(local_extensions); - f2_.set_audio_rtp_header_extensions(local_extensions); - + SetAudioVideoRtpHeaderExtensions(offered_extensions, offered_extensions, + &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + SetAudioVideoRtpHeaderExtensions(local_extensions, local_extensions, &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( @@ -1821,12 +1733,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 7)}; const cricket::RtpHeaderExtensions local_extensions = { RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 5)}; - f1_.set_video_rtp_header_extensions(offered_extensions); - f1_.set_audio_rtp_header_extensions(offered_extensions); - f2_.set_video_rtp_header_extensions(local_extensions); - f2_.set_audio_rtp_header_extensions(local_extensions); - + SetAudioVideoRtpHeaderExtensions(offered_extensions, offered_extensions, + &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + SetAudioVideoRtpHeaderExtensions(local_extensions, local_extensions, &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( @@ -1846,12 +1756,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtension(RtpExtension::kTransportSequenceNumberUri, 7)}; const cricket::RtpHeaderExtensions local_extensions = { RtpExtension(RtpExtension::kAbsoluteCaptureTimeUri, 5)}; - f1_.set_video_rtp_header_extensions(offered_extensions); - f1_.set_audio_rtp_header_extensions(offered_extensions); - f2_.set_video_rtp_header_extensions(local_extensions); - f2_.set_audio_rtp_header_extensions(local_extensions); - + SetAudioVideoRtpHeaderExtensions(offered_extensions, offered_extensions, + &opts); std::unique_ptr offer = f1_.CreateOffer(opts, nullptr); + SetAudioVideoRtpHeaderExtensions(local_extensions, local_extensions, &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, nullptr); EXPECT_THAT( @@ -1862,6 +1770,203 @@ TEST_F(MediaSessionDescriptionFactoryTest, IsEmpty()); } +TEST_F(MediaSessionDescriptionFactoryTest, + OffersUnstoppedExtensionsWithAudioVideoExtensionStopped) { + MediaSessionOptions opts; + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 1, + RtpTransceiverDirection::kStopped), + webrtc::RtpHeaderExtensionCapability("uri2", 3, + RtpTransceiverDirection::kSendOnly)}; + AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video1", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 1, + RtpTransceiverDirection::kStopped), + webrtc::RtpHeaderExtensionCapability("uri3", 7, + RtpTransceiverDirection::kSendOnly)}; + auto offer = f1_.CreateOffer(opts, nullptr); + EXPECT_THAT( + offer->contents(), + ElementsAre( + Property(&ContentInfo::media_description, + Pointee(Property( + &MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri2"))))), + Property(&ContentInfo::media_description, + Pointee(Property( + &MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri3"))))))); +} + +TEST_F(MediaSessionDescriptionFactoryTest, + OffersUnstoppedExtensionsWithAudioExtensionStopped) { + MediaSessionOptions opts; + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 1, + RtpTransceiverDirection::kSendOnly), + webrtc::RtpHeaderExtensionCapability("uri2", 3, + RtpTransceiverDirection::kStopped)}; + AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video1", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri42", 42, + RtpTransceiverDirection::kSendRecv), + webrtc::RtpHeaderExtensionCapability("uri3", 7, + RtpTransceiverDirection::kSendOnly)}; + auto offer = f1_.CreateOffer(opts, nullptr); + EXPECT_THAT( + offer->contents(), + ElementsAre( + Property(&ContentInfo::media_description, + Pointee(Property( + &MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri1"))))), + Property( + &ContentInfo::media_description, + Pointee(Property( + &MediaContentDescription::rtp_header_extensions, + UnorderedElementsAre(Field(&RtpExtension::uri, "uri3"), + Field(&RtpExtension::uri, "uri42"))))))); +} + +TEST_F(MediaSessionDescriptionFactoryTest, + OffersUnstoppedExtensionsWithVideoExtensionStopped) { + MediaSessionOptions opts; + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 5, + RtpTransceiverDirection::kSendOnly), + webrtc::RtpHeaderExtensionCapability("uri2", 7, + RtpTransceiverDirection::kSendRecv)}; + AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video1", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri42", 42, + RtpTransceiverDirection::kSendRecv), + webrtc::RtpHeaderExtensionCapability("uri3", 7, + RtpTransceiverDirection::kStopped)}; + auto offer = f1_.CreateOffer(opts, nullptr); + EXPECT_THAT( + offer->contents(), + ElementsAre( + Property( + &ContentInfo::media_description, + Pointee(Property( + &MediaContentDescription::rtp_header_extensions, + UnorderedElementsAre(Field(&RtpExtension::uri, "uri1"), + Field(&RtpExtension::uri, "uri2"))))), + Property(&ContentInfo::media_description, + Pointee(Property( + &MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri42"))))))); +} + +TEST_F(MediaSessionDescriptionFactoryTest, AnswersUnstoppedExtensions) { + MediaSessionOptions opts; + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 4, + RtpTransceiverDirection::kStopped), + webrtc::RtpHeaderExtensionCapability("uri2", 3, + RtpTransceiverDirection::kSendOnly), + webrtc::RtpHeaderExtensionCapability("uri3", 2, + RtpTransceiverDirection::kRecvOnly), + webrtc::RtpHeaderExtensionCapability("uri4", 1, + RtpTransceiverDirection::kSendRecv)}; + auto offer = f1_.CreateOffer(opts, nullptr); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 4, + RtpTransceiverDirection::kSendOnly), + webrtc::RtpHeaderExtensionCapability("uri2", 3, + RtpTransceiverDirection::kRecvOnly), + webrtc::RtpHeaderExtensionCapability("uri3", 2, + RtpTransceiverDirection::kStopped), + webrtc::RtpHeaderExtensionCapability("uri4", 1, + RtpTransceiverDirection::kSendRecv)}; + auto answer = f2_.CreateAnswer(offer.get(), opts, nullptr); + EXPECT_THAT( + answer->contents(), + ElementsAre(Property( + &ContentInfo::media_description, + Pointee(Property(&MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri2"), + Field(&RtpExtension::uri, "uri4"))))))); +} + +TEST_F(MediaSessionDescriptionFactoryTest, + AppendsUnstoppedExtensionsToCurrentDescription) { + MediaSessionOptions opts; + AddMediaDescriptionOptions(MEDIA_TYPE_AUDIO, "audio", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 1, + RtpTransceiverDirection::kSendRecv)}; + auto offer = f1_.CreateOffer(opts, nullptr); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 2, + RtpTransceiverDirection::kSendRecv), + webrtc::RtpHeaderExtensionCapability("uri2", 3, + RtpTransceiverDirection::kRecvOnly), + webrtc::RtpHeaderExtensionCapability("uri3", 5, + RtpTransceiverDirection::kStopped), + webrtc::RtpHeaderExtensionCapability("uri4", 6, + RtpTransceiverDirection::kSendRecv)}; + auto offer2 = f1_.CreateOffer(opts, offer.get()); + EXPECT_THAT( + offer2->contents(), + ElementsAre(Property( + &ContentInfo::media_description, + Pointee(Property(&MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri1"), + Field(&RtpExtension::uri, "uri2"), + Field(&RtpExtension::uri, "uri4"))))))); +} + +TEST_F(MediaSessionDescriptionFactoryTest, + AppendsStoppedExtensionIfKnownAndPresentInTheOffer) { + MediaSessionOptions opts; + AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video", + RtpTransceiverDirection::kSendRecv, kActive, + &opts); + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 1, + RtpTransceiverDirection::kSendRecv), + webrtc::RtpHeaderExtensionCapability("uri2", 1, + RtpTransceiverDirection::kSendRecv)}; + auto offer = f1_.CreateOffer(opts, nullptr); + + // Now add "uri2" as stopped to the options verify that the offer contains + // uri2 since it's already present since before. + opts.media_description_options.back().header_extensions = { + webrtc::RtpHeaderExtensionCapability("uri1", 1, + RtpTransceiverDirection::kSendRecv), + webrtc::RtpHeaderExtensionCapability("uri2", 2, + RtpTransceiverDirection::kStopped)}; + auto offer2 = f1_.CreateOffer(opts, offer.get()); + EXPECT_THAT( + offer2->contents(), + ElementsAre(Property( + &ContentInfo::media_description, + Pointee(Property(&MediaContentDescription::rtp_header_extensions, + ElementsAre(Field(&RtpExtension::uri, "uri1"), + Field(&RtpExtension::uri, "uri2"))))))); +} + TEST_F(MediaSessionDescriptionFactoryTest, TestOfferAnswerWithEncryptedRtpExtensionsBoth) { MediaSessionOptions opts; @@ -1870,13 +1975,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, f1_.set_enable_encrypted_rtp_header_extensions(true); f2_.set_enable_encrypted_rtp_header_extensions(true); - f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1)); - f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1)); - f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2)); - f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2)); - + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1), + MAKE_VECTOR(kVideoRtpExtension1), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2), + MAKE_VECTOR(kVideoRtpExtension2), &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); @@ -1901,13 +2005,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, f1_.set_enable_encrypted_rtp_header_extensions(true); - f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1)); - f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1)); - f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2)); - f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2)); - + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1), + MAKE_VECTOR(kVideoRtpExtension1), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2), + MAKE_VECTOR(kVideoRtpExtension2), &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); @@ -1932,13 +2035,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, f2_.set_enable_encrypted_rtp_header_extensions(true); - f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1)); - f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1)); - f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2)); - f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2)); - + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1), + MAKE_VECTOR(kVideoRtpExtension1), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2), + MAKE_VECTOR(kVideoRtpExtension2), &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); @@ -1961,36 +2063,28 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAnswerWithoutLegacyStreams) { MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); const ContentInfo* ac = answer->GetContentByName("audio"); const ContentInfo* vc = answer->GetContentByName("video"); - const ContentInfo* dc = answer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); const AudioContentDescription* acd = ac->media_description()->as_audio(); const VideoContentDescription* vcd = vc->media_description()->as_video(); - const RtpDataContentDescription* dcd = dc->media_description()->as_rtp_data(); EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams. EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams. - EXPECT_FALSE(dcd->has_ssrcs()); // No StreamParams. } // Create a typical video answer, and ensure it matches what we expect. TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) { MediaSessionOptions offer_opts; AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &offer_opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kSendRecv, - &offer_opts); MediaSessionOptions answer_opts; AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &answer_opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kSendRecv, - &answer_opts); std::unique_ptr offer; std::unique_ptr answer; @@ -2001,16 +2095,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) { answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(answer.get())); EXPECT_TRUE(GetFirstAudioContentDescription(offer.get())->rtcp_mux()); EXPECT_TRUE(GetFirstVideoContentDescription(offer.get())->rtcp_mux()); - EXPECT_TRUE(GetFirstRtpDataContentDescription(offer.get())->rtcp_mux()); EXPECT_TRUE(GetFirstAudioContentDescription(answer.get())->rtcp_mux()); EXPECT_TRUE(GetFirstVideoContentDescription(answer.get())->rtcp_mux()); - EXPECT_TRUE(GetFirstRtpDataContentDescription(answer.get())->rtcp_mux()); offer_opts.rtcp_mux_enabled = true; answer_opts.rtcp_mux_enabled = false; @@ -2018,16 +2108,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) { answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(answer.get())); EXPECT_TRUE(GetFirstAudioContentDescription(offer.get())->rtcp_mux()); EXPECT_TRUE(GetFirstVideoContentDescription(offer.get())->rtcp_mux()); - EXPECT_TRUE(GetFirstRtpDataContentDescription(offer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux()); - EXPECT_FALSE(GetFirstRtpDataContentDescription(answer.get())->rtcp_mux()); offer_opts.rtcp_mux_enabled = false; answer_opts.rtcp_mux_enabled = true; @@ -2035,16 +2121,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) { answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(answer.get())); EXPECT_FALSE(GetFirstAudioContentDescription(offer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstVideoContentDescription(offer.get())->rtcp_mux()); - EXPECT_FALSE(GetFirstRtpDataContentDescription(offer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux()); - EXPECT_FALSE(GetFirstRtpDataContentDescription(answer.get())->rtcp_mux()); offer_opts.rtcp_mux_enabled = false; answer_opts.rtcp_mux_enabled = false; @@ -2052,16 +2134,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) { answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(offer.get())); ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get())); ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get())); - ASSERT_TRUE(NULL != GetFirstRtpDataContentDescription(answer.get())); EXPECT_FALSE(GetFirstAudioContentDescription(offer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstVideoContentDescription(offer.get())->rtcp_mux()); - EXPECT_FALSE(GetFirstRtpDataContentDescription(offer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux()); EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux()); - EXPECT_FALSE(GetFirstRtpDataContentDescription(answer.get())->rtcp_mux()); } // Create an audio-only answer to a video offer. @@ -2087,122 +2165,141 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswerToVideo) { EXPECT_TRUE(vc->rejected); } -// Create an audio-only answer to an offer with data. -TEST_F(MediaSessionDescriptionFactoryTest, TestCreateNoDataAnswerToDataOffer) { - MediaSessionOptions opts = CreatePlanBMediaSessionOptions(); - opts.data_channel_type = cricket::DCT_RTP; - AddMediaDescriptionOptions(MEDIA_TYPE_DATA, "data", - RtpTransceiverDirection::kRecvOnly, kActive, - &opts); - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - ASSERT_TRUE(offer.get() != NULL); - - opts.media_description_options[1].stopped = true; - std::unique_ptr answer = - f2_.CreateAnswer(offer.get(), opts, NULL); - const ContentInfo* ac = answer->GetContentByName("audio"); - const ContentInfo* dc = answer->GetContentByName("data"); - ASSERT_TRUE(ac != NULL); - ASSERT_TRUE(dc != NULL); - ASSERT_TRUE(dc->media_description() != NULL); - EXPECT_TRUE(dc->rejected); -} - // Create an answer that rejects the contents which are rejected in the offer. TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToOfferWithRejectedMedia) { MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); ContentInfo* ac = offer->GetContentByName("audio"); ContentInfo* vc = offer->GetContentByName("video"); - ContentInfo* dc = offer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); - ASSERT_TRUE(dc != NULL); ac->rejected = true; vc->rejected = true; - dc->rejected = true; std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); ac = answer->GetContentByName("audio"); vc = answer->GetContentByName("video"); - dc = answer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); - ASSERT_TRUE(dc != NULL); EXPECT_TRUE(ac->rejected); EXPECT_TRUE(vc->rejected); - EXPECT_TRUE(dc->rejected); } TEST_F(MediaSessionDescriptionFactoryTest, - CreateAnswerSupportsMixedOneAndTwoByteHeaderExtensions) { + OfferAndAnswerDoesNotHaveMixedByteSessionAttribute) { MediaSessionOptions opts; - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - // Offer without request of mixed one- and two-byte header extensions. + std::unique_ptr offer = + f1_.CreateOffer(opts, /*current_description=*/nullptr); offer->set_extmap_allow_mixed(false); - ASSERT_TRUE(offer.get() != NULL); - std::unique_ptr answer_no_support( - f2_.CreateAnswer(offer.get(), opts, NULL)); - EXPECT_FALSE(answer_no_support->extmap_allow_mixed()); - // Offer with request of mixed one- and two-byte header extensions. + std::unique_ptr answer( + f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr)); + + EXPECT_FALSE(answer->extmap_allow_mixed()); +} + +TEST_F(MediaSessionDescriptionFactoryTest, + OfferAndAnswerHaveMixedByteSessionAttribute) { + MediaSessionOptions opts; + std::unique_ptr offer = + f1_.CreateOffer(opts, /*current_description=*/nullptr); offer->set_extmap_allow_mixed(true); - ASSERT_TRUE(offer.get() != NULL); + std::unique_ptr answer_support( - f2_.CreateAnswer(offer.get(), opts, NULL)); + f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr)); + EXPECT_TRUE(answer_support->extmap_allow_mixed()); } TEST_F(MediaSessionDescriptionFactoryTest, - CreateAnswerSupportsMixedOneAndTwoByteHeaderExtensionsOnMediaLevel) { + OfferAndAnswerDoesNotHaveMixedByteMediaAttributes) { MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts); - std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - MediaContentDescription* video_offer = - offer->GetContentDescriptionByName("video"); - ASSERT_TRUE(video_offer); + std::unique_ptr offer = + f1_.CreateOffer(opts, /*current_description=*/nullptr); + offer->set_extmap_allow_mixed(false); MediaContentDescription* audio_offer = offer->GetContentDescriptionByName("audio"); - ASSERT_TRUE(audio_offer); + MediaContentDescription* video_offer = + offer->GetContentDescriptionByName("video"); + ASSERT_EQ(MediaContentDescription::kNo, + audio_offer->extmap_allow_mixed_enum()); + ASSERT_EQ(MediaContentDescription::kNo, + video_offer->extmap_allow_mixed_enum()); - // Explicit disable of mixed one-two byte header support in offer. - video_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kNo); - audio_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kNo); + std::unique_ptr answer( + f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr)); - ASSERT_TRUE(offer.get() != NULL); - std::unique_ptr answer_no_support( - f2_.CreateAnswer(offer.get(), opts, NULL)); - MediaContentDescription* video_answer = - answer_no_support->GetContentDescriptionByName("video"); MediaContentDescription* audio_answer = - answer_no_support->GetContentDescriptionByName("audio"); - EXPECT_EQ(MediaContentDescription::kNo, - video_answer->extmap_allow_mixed_enum()); + answer->GetContentDescriptionByName("audio"); + MediaContentDescription* video_answer = + answer->GetContentDescriptionByName("video"); EXPECT_EQ(MediaContentDescription::kNo, audio_answer->extmap_allow_mixed_enum()); + EXPECT_EQ(MediaContentDescription::kNo, + video_answer->extmap_allow_mixed_enum()); +} - // Enable mixed one-two byte header support in offer. - video_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia); +TEST_F(MediaSessionDescriptionFactoryTest, + OfferAndAnswerHaveSameMixedByteMediaAttributes) { + MediaSessionOptions opts; + AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts); + std::unique_ptr offer = + f1_.CreateOffer(opts, /*current_description=*/nullptr); + offer->set_extmap_allow_mixed(false); + MediaContentDescription* audio_offer = + offer->GetContentDescriptionByName("audio"); audio_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia); - ASSERT_TRUE(offer.get() != NULL); - std::unique_ptr answer_support( - f2_.CreateAnswer(offer.get(), opts, NULL)); - video_answer = answer_support->GetContentDescriptionByName("video"); - audio_answer = answer_support->GetContentDescriptionByName("audio"); + MediaContentDescription* video_offer = + offer->GetContentDescriptionByName("video"); + video_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia); + + std::unique_ptr answer( + f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr)); + + MediaContentDescription* audio_answer = + answer->GetContentDescriptionByName("audio"); + MediaContentDescription* video_answer = + answer->GetContentDescriptionByName("video"); EXPECT_EQ(MediaContentDescription::kMedia, - video_answer->extmap_allow_mixed_enum()); + audio_answer->extmap_allow_mixed_enum()); EXPECT_EQ(MediaContentDescription::kMedia, + video_answer->extmap_allow_mixed_enum()); +} + +TEST_F(MediaSessionDescriptionFactoryTest, + OfferAndAnswerHaveDifferentMixedByteMediaAttributes) { + MediaSessionOptions opts; + AddAudioVideoSections(RtpTransceiverDirection::kSendRecv, &opts); + std::unique_ptr offer = + f1_.CreateOffer(opts, /*current_description=*/nullptr); + offer->set_extmap_allow_mixed(false); + MediaContentDescription* audio_offer = + offer->GetContentDescriptionByName("audio"); + audio_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kNo); + MediaContentDescription* video_offer = + offer->GetContentDescriptionByName("video"); + video_offer->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia); + + std::unique_ptr answer( + f2_.CreateAnswer(offer.get(), opts, /*current_description=*/nullptr)); + + MediaContentDescription* audio_answer = + answer->GetContentDescriptionByName("audio"); + MediaContentDescription* video_answer = + answer->GetContentDescriptionByName("video"); + EXPECT_EQ(MediaContentDescription::kNo, audio_answer->extmap_allow_mixed_enum()); + EXPECT_EQ(MediaContentDescription::kMedia, + video_answer->extmap_allow_mixed_enum()); } // Create an audio and video offer with: // - one video track // - two audio tracks -// - two data tracks // and ensure it matches what we expect. Also updates the initial offer by // adding a new video track and replaces one of the audio tracks. TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) { @@ -2215,25 +2312,16 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) { AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack2, {kMediaStream1}, 1, &opts); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kSendRecv, &opts); - AttachSenderToMediaDescriptionOptions("data", MEDIA_TYPE_DATA, kDataTrack1, - {kMediaStream1}, 1, &opts); - AttachSenderToMediaDescriptionOptions("data", MEDIA_TYPE_DATA, kDataTrack2, - {kMediaStream1}, 1, &opts); - f1_.set_secure(SEC_ENABLED); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); ASSERT_TRUE(offer.get() != NULL); const ContentInfo* ac = offer->GetContentByName("audio"); const ContentInfo* vc = offer->GetContentByName("video"); - const ContentInfo* dc = offer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); - ASSERT_TRUE(dc != NULL); const AudioContentDescription* acd = ac->media_description()->as_audio(); const VideoContentDescription* vcd = vc->media_description()->as_video(); - const RtpDataContentDescription* dcd = dc->media_description()->as_rtp_data(); EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type()); EXPECT_EQ(f1_.audio_sendrecv_codecs(), acd->codecs()); @@ -2262,25 +2350,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) { EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto) EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on - EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type()); - EXPECT_EQ(f1_.rtp_data_codecs(), dcd->codecs()); - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuite); - - const StreamParamsVec& data_streams = dcd->streams(); - ASSERT_EQ(2U, data_streams.size()); - EXPECT_EQ(data_streams[0].cname, data_streams[1].cname); - EXPECT_EQ(kDataTrack1, data_streams[0].id); - ASSERT_EQ(1U, data_streams[0].ssrcs.size()); - EXPECT_NE(0U, data_streams[0].ssrcs[0]); - EXPECT_EQ(kDataTrack2, data_streams[1].id); - ASSERT_EQ(1U, data_streams[1].ssrcs.size()); - EXPECT_NE(0U, data_streams[1].ssrcs[0]); - - EXPECT_EQ(cricket::kDataMaxBandwidth, - dcd->bandwidth()); // default bandwidth (auto) - EXPECT_TRUE(dcd->rtcp_mux()); // rtcp-mux defaults on - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuite); - // Update the offer. Add a new video track that is not synched to the // other tracks and replace audio track 2 with audio track 3. AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack2, @@ -2288,38 +2357,27 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) { DetachSenderFromMediaSection("audio", kAudioTrack2, &opts); AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack3, {kMediaStream1}, 1, &opts); - DetachSenderFromMediaSection("data", kDataTrack2, &opts); - AttachSenderToMediaDescriptionOptions("data", MEDIA_TYPE_DATA, kDataTrack3, - {kMediaStream1}, 1, &opts); std::unique_ptr updated_offer( f1_.CreateOffer(opts, offer.get())); ASSERT_TRUE(updated_offer.get() != NULL); ac = updated_offer->GetContentByName("audio"); vc = updated_offer->GetContentByName("video"); - dc = updated_offer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); - ASSERT_TRUE(dc != NULL); const AudioContentDescription* updated_acd = ac->media_description()->as_audio(); const VideoContentDescription* updated_vcd = vc->media_description()->as_video(); - const RtpDataContentDescription* updated_dcd = - dc->media_description()->as_rtp_data(); EXPECT_EQ(acd->type(), updated_acd->type()); EXPECT_EQ(acd->codecs(), updated_acd->codecs()); EXPECT_EQ(vcd->type(), updated_vcd->type()); EXPECT_EQ(vcd->codecs(), updated_vcd->codecs()); - EXPECT_EQ(dcd->type(), updated_dcd->type()); - EXPECT_EQ(dcd->codecs(), updated_dcd->codecs()); ASSERT_CRYPTO(updated_acd, 1U, kDefaultSrtpCryptoSuite); EXPECT_TRUE(CompareCryptoParams(acd->cryptos(), updated_acd->cryptos())); ASSERT_CRYPTO(updated_vcd, 1U, kDefaultSrtpCryptoSuite); EXPECT_TRUE(CompareCryptoParams(vcd->cryptos(), updated_vcd->cryptos())); - ASSERT_CRYPTO(updated_dcd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_TRUE(CompareCryptoParams(dcd->cryptos(), updated_dcd->cryptos())); const StreamParamsVec& updated_audio_streams = updated_acd->streams(); ASSERT_EQ(2U, updated_audio_streams.size()); @@ -2335,18 +2393,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) { EXPECT_EQ(kVideoTrack2, updated_video_streams[1].id); // All the media streams in one PeerConnection share one RTCP CNAME. EXPECT_EQ(updated_video_streams[1].cname, updated_video_streams[0].cname); - - const StreamParamsVec& updated_data_streams = updated_dcd->streams(); - ASSERT_EQ(2U, updated_data_streams.size()); - EXPECT_EQ(data_streams[0], updated_data_streams[0]); - EXPECT_EQ(kDataTrack3, updated_data_streams[1].id); // New data track. - ASSERT_EQ(1U, updated_data_streams[1].ssrcs.size()); - EXPECT_NE(0U, updated_data_streams[1].ssrcs[0]); - EXPECT_EQ(updated_data_streams[0].cname, updated_data_streams[1].cname); - // The stream correctly got the CNAME from the MediaSessionOptions. - // The Expected RTCP CNAME is the default one as we are using the default - // MediaSessionOptions. - EXPECT_EQ(updated_data_streams[0].cname, cricket::kDefaultRtcpCname); } // Create an offer with simulcast video stream. @@ -2549,10 +2595,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) { AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video", RtpTransceiverDirection::kRecvOnly, kActive, &offer_opts); - offer_opts.data_channel_type = cricket::DCT_RTP; - AddMediaDescriptionOptions(MEDIA_TYPE_DATA, "data", - RtpTransceiverDirection::kRecvOnly, kActive, - &offer_opts); f1_.set_secure(SEC_ENABLED); f2_.set_secure(SEC_ENABLED); std::unique_ptr offer = f1_.CreateOffer(offer_opts, NULL); @@ -2571,31 +2613,18 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) { AttachSenderToMediaDescriptionOptions("audio", MEDIA_TYPE_AUDIO, kAudioTrack2, {kMediaStream1}, 1, &answer_opts); - AddMediaDescriptionOptions(MEDIA_TYPE_DATA, "data", - RtpTransceiverDirection::kSendRecv, kActive, - &answer_opts); - AttachSenderToMediaDescriptionOptions("data", MEDIA_TYPE_DATA, kDataTrack1, - {kMediaStream1}, 1, &answer_opts); - AttachSenderToMediaDescriptionOptions("data", MEDIA_TYPE_DATA, kDataTrack2, - {kMediaStream1}, 1, &answer_opts); - answer_opts.data_channel_type = cricket::DCT_RTP; - std::unique_ptr answer = f2_.CreateAnswer(offer.get(), answer_opts, NULL); ASSERT_TRUE(answer.get() != NULL); const ContentInfo* ac = answer->GetContentByName("audio"); const ContentInfo* vc = answer->GetContentByName("video"); - const ContentInfo* dc = answer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); - ASSERT_TRUE(dc != NULL); const AudioContentDescription* acd = ac->media_description()->as_audio(); const VideoContentDescription* vcd = vc->media_description()->as_video(); - const RtpDataContentDescription* dcd = dc->media_description()->as_rtp_data(); ASSERT_CRYPTO(acd, 1U, kDefaultSrtpCryptoSuite); ASSERT_CRYPTO(vcd, 1U, kDefaultSrtpCryptoSuite); - ASSERT_CRYPTO(dcd, 1U, kDefaultSrtpCryptoSuite); EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type()); EXPECT_THAT(acd->codecs(), ElementsAreArray(kAudioCodecsAnswer)); @@ -2623,59 +2652,33 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) { EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto) EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on - EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type()); - EXPECT_THAT(dcd->codecs(), ElementsAreArray(kDataCodecsAnswer)); - - const StreamParamsVec& data_streams = dcd->streams(); - ASSERT_EQ(2U, data_streams.size()); - EXPECT_TRUE(data_streams[0].cname == data_streams[1].cname); - EXPECT_EQ(kDataTrack1, data_streams[0].id); - ASSERT_EQ(1U, data_streams[0].ssrcs.size()); - EXPECT_NE(0U, data_streams[0].ssrcs[0]); - EXPECT_EQ(kDataTrack2, data_streams[1].id); - ASSERT_EQ(1U, data_streams[1].ssrcs.size()); - EXPECT_NE(0U, data_streams[1].ssrcs[0]); - - EXPECT_EQ(cricket::kDataMaxBandwidth, - dcd->bandwidth()); // default bandwidth (auto) - EXPECT_TRUE(dcd->rtcp_mux()); // rtcp-mux defaults on - // Update the answer. Add a new video track that is not synched to the // other tracks and remove 1 audio track. AttachSenderToMediaDescriptionOptions("video", MEDIA_TYPE_VIDEO, kVideoTrack2, {kMediaStream2}, 1, &answer_opts); DetachSenderFromMediaSection("audio", kAudioTrack2, &answer_opts); - DetachSenderFromMediaSection("data", kDataTrack2, &answer_opts); std::unique_ptr updated_answer( f2_.CreateAnswer(offer.get(), answer_opts, answer.get())); ASSERT_TRUE(updated_answer.get() != NULL); ac = updated_answer->GetContentByName("audio"); vc = updated_answer->GetContentByName("video"); - dc = updated_answer->GetContentByName("data"); ASSERT_TRUE(ac != NULL); ASSERT_TRUE(vc != NULL); - ASSERT_TRUE(dc != NULL); const AudioContentDescription* updated_acd = ac->media_description()->as_audio(); const VideoContentDescription* updated_vcd = vc->media_description()->as_video(); - const RtpDataContentDescription* updated_dcd = - dc->media_description()->as_rtp_data(); ASSERT_CRYPTO(updated_acd, 1U, kDefaultSrtpCryptoSuite); EXPECT_TRUE(CompareCryptoParams(acd->cryptos(), updated_acd->cryptos())); ASSERT_CRYPTO(updated_vcd, 1U, kDefaultSrtpCryptoSuite); EXPECT_TRUE(CompareCryptoParams(vcd->cryptos(), updated_vcd->cryptos())); - ASSERT_CRYPTO(updated_dcd, 1U, kDefaultSrtpCryptoSuite); - EXPECT_TRUE(CompareCryptoParams(dcd->cryptos(), updated_dcd->cryptos())); EXPECT_EQ(acd->type(), updated_acd->type()); EXPECT_EQ(acd->codecs(), updated_acd->codecs()); EXPECT_EQ(vcd->type(), updated_vcd->type()); EXPECT_EQ(vcd->codecs(), updated_vcd->codecs()); - EXPECT_EQ(dcd->type(), updated_dcd->type()); - EXPECT_EQ(dcd->codecs(), updated_dcd->codecs()); const StreamParamsVec& updated_audio_streams = updated_acd->streams(); ASSERT_EQ(1U, updated_audio_streams.size()); @@ -2687,10 +2690,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) { EXPECT_EQ(kVideoTrack2, updated_video_streams[1].id); // All media streams in one PeerConnection share one CNAME. EXPECT_EQ(updated_video_streams[1].cname, updated_video_streams[0].cname); - - const StreamParamsVec& updated_data_streams = updated_dcd->streams(); - ASSERT_EQ(1U, updated_data_streams.size()); - EXPECT_TRUE(data_streams[0] == updated_data_streams[0]); } // Create an updated offer after creating an answer to the original offer and @@ -3249,8 +3248,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, SimSsrcsGenerateMultipleRtxSsrcs) { } // Test that, when the FlexFEC codec is added, a FlexFEC ssrc is created -// together with a FEC-FR grouping. +// together with a FEC-FR grouping. Guarded by WebRTC-FlexFEC-03 trial. TEST_F(MediaSessionDescriptionFactoryTest, GenerateFlexfecSsrc) { + webrtc::test::ScopedFieldTrials override_field_trials( + "WebRTC-FlexFEC-03/Enabled/"); MediaSessionOptions opts; AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video", RtpTransceiverDirection::kSendRecv, kActive, @@ -3292,6 +3293,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, GenerateFlexfecSsrc) { // TODO(brandtr): Remove this test when we support simulcast, either through // multiple FlexfecSenders, or through multistream protection. TEST_F(MediaSessionDescriptionFactoryTest, SimSsrcsGenerateNoFlexfecSsrcs) { + webrtc::test::ScopedFieldTrials override_field_trials( + "WebRTC-FlexFEC-03/Enabled/"); MediaSessionOptions opts; AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video", RtpTransceiverDirection::kSendRecv, kActive, @@ -3340,12 +3343,11 @@ TEST_F(MediaSessionDescriptionFactoryTest, MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1)); - f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1)); - f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2)); - f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2)); - + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension1), + MAKE_VECTOR(kVideoRtpExtension1), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension2), + MAKE_VECTOR(kVideoRtpExtension2), &opts); std::unique_ptr answer = f2_.CreateAnswer(offer.get(), opts, NULL); @@ -3396,9 +3398,8 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtensionIdReused) { MediaSessionOptions opts; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &opts); - f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension3)); - f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension3)); - + SetAudioVideoRtpHeaderExtensions(MAKE_VECTOR(kAudioRtpExtension3), + MAKE_VECTOR(kVideoRtpExtension3), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); // Since the audio extensions used ID 3 for "both_audio_and_video", so should @@ -3435,26 +3436,16 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtensionIdReusedEncrypted) { f1_.set_enable_encrypted_rtp_header_extensions(true); f2_.set_enable_encrypted_rtp_header_extensions(true); - f1_.set_audio_rtp_header_extensions( - MAKE_VECTOR(kAudioRtpExtension3ForEncryption)); - f1_.set_video_rtp_header_extensions( - MAKE_VECTOR(kVideoRtpExtension3ForEncryption)); - + SetAudioVideoRtpHeaderExtensions( + MAKE_VECTOR(kAudioRtpExtension3ForEncryption), + MAKE_VECTOR(kVideoRtpExtension3ForEncryption), &opts); std::unique_ptr offer = f1_.CreateOffer(opts, NULL); - // The extensions that are shared between audio and video should use the same - // id. - const RtpExtension kExpectedVideoRtpExtension[] = { - kVideoRtpExtension3ForEncryption[0], - kAudioRtpExtension3ForEncryptionOffer[1], - kAudioRtpExtension3ForEncryptionOffer[2], - }; - EXPECT_EQ( MAKE_VECTOR(kAudioRtpExtension3ForEncryptionOffer), GetFirstAudioContentDescription(offer.get())->rtp_header_extensions()); EXPECT_EQ( - MAKE_VECTOR(kExpectedVideoRtpExtension), + MAKE_VECTOR(kVideoRtpExtension3ForEncryptionOffer), GetFirstVideoContentDescription(offer.get())->rtp_header_extensions()); // Nothing should change when creating a new offer @@ -3464,7 +3455,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, RtpExtensionIdReusedEncrypted) { EXPECT_EQ(MAKE_VECTOR(kAudioRtpExtension3ForEncryptionOffer), GetFirstAudioContentDescription(updated_offer.get()) ->rtp_header_extensions()); - EXPECT_EQ(MAKE_VECTOR(kExpectedVideoRtpExtension), + EXPECT_EQ(MAKE_VECTOR(kVideoRtpExtension3ForEncryptionOffer), GetFirstVideoContentDescription(updated_offer.get()) ->rtp_header_extensions()); } @@ -3534,8 +3525,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferAudioCurrent) { TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferMultimedia) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); TestTransportInfo(true, options, false); } @@ -3543,16 +3532,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferMultimediaCurrent) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); TestTransportInfo(true, options, true); } TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferBundle) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); options.bundle_enabled = true; TestTransportInfo(true, options, false); } @@ -3561,8 +3546,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferBundleCurrent) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); options.bundle_enabled = true; TestTransportInfo(true, options, true); } @@ -3598,8 +3581,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerMultimedia) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); TestTransportInfo(false, options, false); } @@ -3607,16 +3588,12 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerMultimediaCurrent) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); TestTransportInfo(false, options, true); } TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerBundle) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); options.bundle_enabled = true; TestTransportInfo(false, options, false); } @@ -3625,170 +3602,10 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerBundleCurrent) { MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); options.bundle_enabled = true; TestTransportInfo(false, options, true); } -TEST_F(MediaSessionDescriptionFactoryTest, - TestTransportInfoOfferBundlesTransportOptions) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - - cricket::OpaqueTransportParameters audio_params; - audio_params.protocol = "audio-transport"; - audio_params.parameters = "audio-params"; - FindFirstMediaDescriptionByMid("audio", &options) - ->transport_options.opaque_parameters = audio_params; - - cricket::OpaqueTransportParameters video_params; - video_params.protocol = "video-transport"; - video_params.parameters = "video-params"; - FindFirstMediaDescriptionByMid("video", &options) - ->transport_options.opaque_parameters = video_params; - - TestTransportInfo(/*offer=*/true, options, /*has_current_desc=*/false); -} - -TEST_F(MediaSessionDescriptionFactoryTest, - TestTransportInfoAnswerBundlesTransportOptions) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - - cricket::OpaqueTransportParameters audio_params; - audio_params.protocol = "audio-transport"; - audio_params.parameters = "audio-params"; - FindFirstMediaDescriptionByMid("audio", &options) - ->transport_options.opaque_parameters = audio_params; - - cricket::OpaqueTransportParameters video_params; - video_params.protocol = "video-transport"; - video_params.parameters = "video-params"; - FindFirstMediaDescriptionByMid("video", &options) - ->transport_options.opaque_parameters = video_params; - - TestTransportInfo(/*offer=*/false, options, /*has_current_desc=*/false); -} - -TEST_F(MediaSessionDescriptionFactoryTest, AltProtocolAddedToOffer) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = "foo"; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = "bar"; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = "baz"; - - std::unique_ptr offer = f1_.CreateOffer(options, nullptr); - - EXPECT_EQ(offer->GetContentDescriptionByName("audio")->alt_protocol(), "foo"); - EXPECT_EQ(offer->GetContentDescriptionByName("video")->alt_protocol(), "bar"); - EXPECT_EQ(offer->GetContentDescriptionByName("data")->alt_protocol(), "baz"); -} - -TEST_F(MediaSessionDescriptionFactoryTest, AltProtocolAddedToAnswer) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kRecvOnly, - &options); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = "foo"; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = "bar"; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = "baz"; - - std::unique_ptr offer = f1_.CreateOffer(options, nullptr); - std::unique_ptr answer = - f1_.CreateAnswer(offer.get(), options, nullptr); - - EXPECT_EQ(answer->GetContentDescriptionByName("audio")->alt_protocol(), - "foo"); - EXPECT_EQ(answer->GetContentDescriptionByName("video")->alt_protocol(), - "bar"); - EXPECT_EQ(answer->GetContentDescriptionByName("data")->alt_protocol(), "baz"); -} - -TEST_F(MediaSessionDescriptionFactoryTest, AltProtocolNotInOffer) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kRecvOnly, - &options); - - std::unique_ptr offer = f1_.CreateOffer(options, nullptr); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = "foo"; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = "bar"; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = "baz"; - - std::unique_ptr answer = - f1_.CreateAnswer(offer.get(), options, nullptr); - - EXPECT_EQ(answer->GetContentDescriptionByName("audio")->alt_protocol(), - absl::nullopt); - EXPECT_EQ(answer->GetContentDescriptionByName("video")->alt_protocol(), - absl::nullopt); - EXPECT_EQ(answer->GetContentDescriptionByName("data")->alt_protocol(), - absl::nullopt); -} - -TEST_F(MediaSessionDescriptionFactoryTest, AltProtocolDifferentInOffer) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kRecvOnly, - &options); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = "not-foo"; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = "not-bar"; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = "not-baz"; - - std::unique_ptr offer = f1_.CreateOffer(options, nullptr); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = "foo"; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = "bar"; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = "baz"; - - std::unique_ptr answer = - f1_.CreateAnswer(offer.get(), options, nullptr); - - EXPECT_EQ(answer->GetContentDescriptionByName("audio")->alt_protocol(), - absl::nullopt); - EXPECT_EQ(answer->GetContentDescriptionByName("video")->alt_protocol(), - absl::nullopt); - EXPECT_EQ(answer->GetContentDescriptionByName("data")->alt_protocol(), - absl::nullopt); -} - -TEST_F(MediaSessionDescriptionFactoryTest, AltProtocolNotInAnswer) { - MediaSessionOptions options; - AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_SCTP, RtpTransceiverDirection::kRecvOnly, - &options); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = "foo"; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = "bar"; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = "baz"; - - std::unique_ptr offer = f1_.CreateOffer(options, nullptr); - - FindFirstMediaDescriptionByMid("audio", &options)->alt_protocol = - absl::nullopt; - FindFirstMediaDescriptionByMid("video", &options)->alt_protocol = - absl::nullopt; - FindFirstMediaDescriptionByMid("data", &options)->alt_protocol = - absl::nullopt; - - std::unique_ptr answer = - f1_.CreateAnswer(offer.get(), options, nullptr); - - EXPECT_EQ(answer->GetContentDescriptionByName("audio")->alt_protocol(), - absl::nullopt); - EXPECT_EQ(answer->GetContentDescriptionByName("video")->alt_protocol(), - absl::nullopt); - EXPECT_EQ(answer->GetContentDescriptionByName("data")->alt_protocol(), - absl::nullopt); -} - // Create an offer with bundle enabled and verify the crypto parameters are // the common set of the available cryptos. TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoWithOfferBundle) { @@ -3974,8 +3791,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoOfferDtlsButNotSdes) { tdf2_.set_secure(SEC_ENABLED); MediaSessionOptions options; AddAudioVideoSections(RtpTransceiverDirection::kRecvOnly, &options); - AddDataSection(cricket::DCT_RTP, RtpTransceiverDirection::kRecvOnly, - &options); // Generate an offer with DTLS but without SDES. std::unique_ptr offer = f1_.CreateOffer(options, NULL); @@ -3987,9 +3802,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoOfferDtlsButNotSdes) { const VideoContentDescription* video_offer = GetFirstVideoContentDescription(offer.get()); ASSERT_TRUE(video_offer->cryptos().empty()); - const RtpDataContentDescription* data_offer = - GetFirstRtpDataContentDescription(offer.get()); - ASSERT_TRUE(data_offer->cryptos().empty()); const cricket::TransportDescription* audio_offer_trans_desc = offer->GetTransportDescriptionByName("audio"); @@ -3997,9 +3809,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoOfferDtlsButNotSdes) { const cricket::TransportDescription* video_offer_trans_desc = offer->GetTransportDescriptionByName("video"); ASSERT_TRUE(video_offer_trans_desc->identity_fingerprint.get() != NULL); - const cricket::TransportDescription* data_offer_trans_desc = - offer->GetTransportDescriptionByName("data"); - ASSERT_TRUE(data_offer_trans_desc->identity_fingerprint.get() != NULL); // Generate an answer with DTLS. std::unique_ptr answer = @@ -4012,9 +3821,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoOfferDtlsButNotSdes) { const cricket::TransportDescription* video_answer_trans_desc = answer->GetTransportDescriptionByName("video"); EXPECT_TRUE(video_answer_trans_desc->identity_fingerprint.get() != NULL); - const cricket::TransportDescription* data_answer_trans_desc = - answer->GetTransportDescriptionByName("data"); - EXPECT_TRUE(data_answer_trans_desc->identity_fingerprint.get() != NULL); } // Verifies if vad_enabled option is set to false, CN codecs are not present in @@ -4048,7 +3854,6 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestMIDsMatchesExistingOffer) { AddMediaDescriptionOptions(MEDIA_TYPE_VIDEO, "video_modified", RtpTransceiverDirection::kRecvOnly, kActive, &opts); - opts.data_channel_type = cricket::DCT_SCTP; AddMediaDescriptionOptions(MEDIA_TYPE_DATA, "data_modified", RtpTransceiverDirection::kSendRecv, kActive, &opts); @@ -4520,12 +4325,10 @@ class MediaProtocolTest : public ::testing::TestWithParam { MAKE_VECTOR(kAudioCodecs1)); f1_.set_video_codecs(MAKE_VECTOR(kVideoCodecs1), MAKE_VECTOR(kVideoCodecs1)); - f1_.set_rtp_data_codecs(MAKE_VECTOR(kDataCodecs1)); f2_.set_audio_codecs(MAKE_VECTOR(kAudioCodecs2), MAKE_VECTOR(kAudioCodecs2)); f2_.set_video_codecs(MAKE_VECTOR(kVideoCodecs2), MAKE_VECTOR(kVideoCodecs2)); - f2_.set_rtp_data_codecs(MAKE_VECTOR(kDataCodecs2)); f1_.set_secure(SEC_ENABLED); f2_.set_secure(SEC_ENABLED); tdf1_.set_certificate(rtc::RTCCertificate::Create( @@ -4599,7 +4402,7 @@ TEST_F(MediaSessionDescriptionFactoryTest, TestSetAudioCodecs) { // properly. send_codecs[1].channels = 0; - // Alther iLBC receive codec to be lowercase, to test that case conversions + // Alter iLBC receive codec to be lowercase, to test that case conversions // are handled properly. recv_codecs[2].name = "ilbc"; @@ -4818,7 +4621,8 @@ void TestAudioCodecsAnswer(RtpTransceiverDirection offer_direction, kResultSendrecv_SendrecvCodecs); } break; - default: + case RtpTransceiverDirection::kStopped: + // This does not happen in any current test. RTC_NOTREACHED(); } diff --git a/pc/media_stream.cc b/pc/media_stream.cc index 00f491b3cb..08a2a723d0 100644 --- a/pc/media_stream.cc +++ b/pc/media_stream.cc @@ -31,9 +31,7 @@ static typename V::iterator FindTrack(V* vector, const std::string& track_id) { } rtc::scoped_refptr MediaStream::Create(const std::string& id) { - rtc::RefCountedObject* stream = - new rtc::RefCountedObject(id); - return stream; + return rtc::make_ref_counted(id); } MediaStream::MediaStream(const std::string& id) : id_(id) {} diff --git a/pc/media_stream.h b/pc/media_stream.h index 34299f46e3..6f16bea1d9 100644 --- a/pc/media_stream.h +++ b/pc/media_stream.h @@ -48,7 +48,7 @@ class MediaStream : public Notifier { template bool RemoveTrack(TrackVector* Tracks, MediaStreamTrackInterface* track); - std::string id_; + const std::string id_; AudioTrackVector audio_tracks_; VideoTrackVector video_tracks_; }; diff --git a/api/media_stream_proxy.h b/pc/media_stream_proxy.h similarity index 75% rename from api/media_stream_proxy.h rename to pc/media_stream_proxy.h index 516967998f..36069a4369 100644 --- a/api/media_stream_proxy.h +++ b/pc/media_stream_proxy.h @@ -8,21 +8,21 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_MEDIA_STREAM_PROXY_H_ -#define API_MEDIA_STREAM_PROXY_H_ +#ifndef PC_MEDIA_STREAM_PROXY_H_ +#define PC_MEDIA_STREAM_PROXY_H_ #include #include "api/media_stream_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_SIGNALING_PROXY_MAP(MediaStream) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_CONSTMETHOD0(std::string, id) +// TODO(deadbeef): Move this to a .cc file. What threads methods are called on +// is an implementation detail. +BEGIN_PRIMARY_PROXY_MAP(MediaStream) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +BYPASS_PROXY_CONSTMETHOD0(std::string, id) PROXY_METHOD0(AudioTrackVector, GetAudioTracks) PROXY_METHOD0(VideoTrackVector, GetVideoTracks) PROXY_METHOD1(rtc::scoped_refptr, @@ -37,8 +37,8 @@ PROXY_METHOD1(bool, RemoveTrack, AudioTrackInterface*) PROXY_METHOD1(bool, RemoveTrack, VideoTrackInterface*) PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -END_PROXY_MAP() +END_PROXY_MAP(MediaStream) } // namespace webrtc -#endif // API_MEDIA_STREAM_PROXY_H_ +#endif // PC_MEDIA_STREAM_PROXY_H_ diff --git a/api/media_stream_track_proxy.h b/pc/media_stream_track_proxy.h similarity index 50% rename from api/media_stream_track_proxy.h rename to pc/media_stream_track_proxy.h index d3dc25504b..f563137c77 100644 --- a/api/media_stream_track_proxy.h +++ b/pc/media_stream_track_proxy.h @@ -11,26 +11,25 @@ // This file includes proxy classes for tracks. The purpose is // to make sure tracks are only accessed from the signaling thread. -#ifndef API_MEDIA_STREAM_TRACK_PROXY_H_ -#define API_MEDIA_STREAM_TRACK_PROXY_H_ +#ifndef PC_MEDIA_STREAM_TRACK_PROXY_H_ +#define PC_MEDIA_STREAM_TRACK_PROXY_H_ #include #include "api/media_stream_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. - -BEGIN_SIGNALING_PROXY_MAP(AudioTrack) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_CONSTMETHOD0(std::string, kind) -PROXY_CONSTMETHOD0(std::string, id) +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PRIMARY_PROXY_MAP(AudioTrack) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +BYPASS_PROXY_CONSTMETHOD0(std::string, kind) +BYPASS_PROXY_CONSTMETHOD0(std::string, id) PROXY_CONSTMETHOD0(TrackState, state) PROXY_CONSTMETHOD0(bool, enabled) -PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource) +BYPASS_PROXY_CONSTMETHOD0(AudioSourceInterface*, GetSource) PROXY_METHOD1(void, AddSink, AudioTrackSinkInterface*) PROXY_METHOD1(void, RemoveSink, AudioTrackSinkInterface*) PROXY_METHOD1(bool, GetSignalLevel, int*) @@ -38,28 +37,28 @@ PROXY_METHOD0(rtc::scoped_refptr, GetAudioProcessor) PROXY_METHOD1(bool, set_enabled, bool) PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -END_PROXY_MAP() +END_PROXY_MAP(AudioTrack) BEGIN_PROXY_MAP(VideoTrack) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_CONSTMETHOD0(std::string, kind) -PROXY_CONSTMETHOD0(std::string, id) -PROXY_CONSTMETHOD0(TrackState, state) -PROXY_CONSTMETHOD0(bool, enabled) -PROXY_METHOD1(bool, set_enabled, bool) -PROXY_CONSTMETHOD0(ContentHint, content_hint) -PROXY_METHOD1(void, set_content_hint, ContentHint) -PROXY_WORKER_METHOD2(void, - AddOrUpdateSink, - rtc::VideoSinkInterface*, - const rtc::VideoSinkWants&) -PROXY_WORKER_METHOD1(void, RemoveSink, rtc::VideoSinkInterface*) -PROXY_CONSTMETHOD0(VideoTrackSourceInterface*, GetSource) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +BYPASS_PROXY_CONSTMETHOD0(std::string, kind) +BYPASS_PROXY_CONSTMETHOD0(std::string, id) +PROXY_SECONDARY_CONSTMETHOD0(TrackState, state) +PROXY_SECONDARY_CONSTMETHOD0(bool, enabled) +PROXY_SECONDARY_METHOD1(bool, set_enabled, bool) +PROXY_SECONDARY_CONSTMETHOD0(ContentHint, content_hint) +PROXY_SECONDARY_METHOD1(void, set_content_hint, ContentHint) +PROXY_SECONDARY_METHOD2(void, + AddOrUpdateSink, + rtc::VideoSinkInterface*, + const rtc::VideoSinkWants&) +PROXY_SECONDARY_METHOD1(void, RemoveSink, rtc::VideoSinkInterface*) +BYPASS_PROXY_CONSTMETHOD0(VideoTrackSourceInterface*, GetSource) PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) -END_PROXY_MAP() +END_PROXY_MAP(VideoTrack) } // namespace webrtc -#endif // API_MEDIA_STREAM_TRACK_PROXY_H_ +#endif // PC_MEDIA_STREAM_TRACK_PROXY_H_ diff --git a/pc/peer_connection.cc b/pc/peer_connection.cc index 05e7b95591..276af1787d 100644 --- a/pc/peer_connection.cc +++ b/pc/peer_connection.cc @@ -10,55 +10,56 @@ #include "pc/peer_connection.h" +#include +#include + #include -#include #include -#include #include #include -#include #include "absl/algorithm/container.h" #include "absl/strings/match.h" #include "api/jsep_ice_candidate.h" -#include "api/jsep_session_description.h" -#include "api/media_stream_proxy.h" -#include "api/media_stream_track_proxy.h" -#include "api/rtc_error.h" -#include "api/rtc_event_log/rtc_event_log.h" -#include "api/rtc_event_log_output_file.h" #include "api/rtp_parameters.h" +#include "api/rtp_transceiver_direction.h" +#include "api/task_queue/queued_task.h" +#include "api/transport/webrtc_key_value_config.h" #include "api/uma_metrics.h" -#include "api/video/builtin_video_bitrate_allocator_factory.h" -#include "call/call.h" -#include "logging/rtc_event_log/ice_logger.h" +#include "api/video/video_codec_constants.h" +#include "call/audio_state.h" +#include "call/packet_receiver.h" +#include "media/base/media_channel.h" +#include "media/base/media_config.h" #include "media/base/rid_description.h" -#include "media/sctp/sctp_transport.h" -#include "pc/audio_rtp_receiver.h" -#include "pc/audio_track.h" +#include "media/base/stream_params.h" +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "p2p/base/basic_async_resolver_factory.h" +#include "p2p/base/connection.h" +#include "p2p/base/connection_info.h" +#include "p2p/base/dtls_transport_internal.h" +#include "p2p/base/p2p_constants.h" +#include "p2p/base/p2p_transport_channel.h" +#include "p2p/base/transport_info.h" #include "pc/channel.h" -#include "pc/channel_manager.h" -#include "pc/dtmf_sender.h" -#include "pc/media_stream.h" -#include "pc/media_stream_observer.h" -#include "pc/remote_audio_source.h" -#include "pc/rtp_media_utils.h" +#include "pc/ice_server_parsing.h" #include "pc/rtp_receiver.h" #include "pc/rtp_sender.h" #include "pc/sctp_transport.h" -#include "pc/sctp_utils.h" -#include "pc/sdp_utils.h" -#include "pc/stream_collection.h" -#include "pc/video_rtp_receiver.h" -#include "pc/video_track.h" -#include "rtc_base/bind.h" -#include "rtc_base/checks.h" +#include "pc/simulcast_description.h" +#include "pc/webrtc_session_description_factory.h" +#include "rtc_base/helpers.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/location.h" #include "rtc_base/logging.h" +#include "rtc_base/net_helper.h" +#include "rtc_base/network_constants.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/socket_address.h" #include "rtc_base/string_encode.h" -#include "rtc_base/strings/string_builder.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/trace_event.h" -#include "system_wrappers/include/clock.h" -#include "system_wrappers/include/field_trial.h" +#include "rtc_base/unique_id_generator.h" #include "system_wrappers/include/metrics.h" using cricket::ContentInfo; @@ -81,166 +82,14 @@ using cricket::STUN_PORT_TYPE; namespace webrtc { -// Error messages -const char kBundleWithoutRtcpMux[] = - "rtcp-mux must be enabled when BUNDLE " - "is enabled."; -const char kInvalidCandidates[] = "Description contains invalid candidates."; -const char kInvalidSdp[] = "Invalid session description."; -const char kMlineMismatchInAnswer[] = - "The order of m-lines in answer doesn't match order in offer. Rejecting " - "answer."; -const char kMlineMismatchInSubsequentOffer[] = - "The order of m-lines in subsequent offer doesn't match order from " - "previous offer/answer."; -const char kSdpWithoutDtlsFingerprint[] = - "Called with SDP without DTLS fingerprint."; -const char kSdpWithoutSdesCrypto[] = "Called with SDP without SDES crypto."; -const char kSdpWithoutIceUfragPwd[] = - "Called with SDP without ice-ufrag and ice-pwd."; -const char kSessionError[] = "Session error code: "; -const char kSessionErrorDesc[] = "Session error description: "; -const char kDtlsSrtpSetupFailureRtp[] = - "Couldn't set up DTLS-SRTP on RTP channel."; -const char kDtlsSrtpSetupFailureRtcp[] = - "Couldn't set up DTLS-SRTP on RTCP channel."; - namespace { -// Field trials. -// Controls datagram transport support. -const char kDatagramTransportFieldTrial[] = "WebRTC-DatagramTransport"; -// Controls datagram transport data channel support. -const char kDatagramTransportDataChannelFieldTrial[] = - "WebRTC-DatagramTransportDataChannels"; - // UMA metric names. -const char kSimulcastVersionApplyLocalDescription[] = - "WebRTC.PeerConnection.Simulcast.ApplyLocalDescription"; -const char kSimulcastVersionApplyRemoteDescription[] = - "WebRTC.PeerConnection.Simulcast.ApplyRemoteDescription"; const char kSimulcastNumberOfEncodings[] = "WebRTC.PeerConnection.Simulcast.NumberOfSendEncodings"; -const char kSimulcastDisabled[] = "WebRTC.PeerConnection.Simulcast.Disabled"; - -static const char kDefaultStreamId[] = "default"; -static const char kDefaultAudioSenderId[] = "defaulta0"; -static const char kDefaultVideoSenderId[] = "defaultv0"; - -// The length of RTCP CNAMEs. -static const int kRtcpCnameLength = 16; - -enum { - MSG_SET_SESSIONDESCRIPTION_SUCCESS = 0, - MSG_SET_SESSIONDESCRIPTION_FAILED, - MSG_CREATE_SESSIONDESCRIPTION_FAILED, - MSG_GETSTATS, - MSG_REPORT_USAGE_PATTERN, -}; static const int REPORT_USAGE_PATTERN_DELAY_MS = 60000; -struct SetSessionDescriptionMsg : public rtc::MessageData { - explicit SetSessionDescriptionMsg( - webrtc::SetSessionDescriptionObserver* observer) - : observer(observer) {} - - rtc::scoped_refptr observer; - RTCError error; -}; - -struct CreateSessionDescriptionMsg : public rtc::MessageData { - explicit CreateSessionDescriptionMsg( - webrtc::CreateSessionDescriptionObserver* observer) - : observer(observer) {} - - rtc::scoped_refptr observer; - RTCError error; -}; - -struct GetStatsMsg : public rtc::MessageData { - GetStatsMsg(webrtc::StatsObserver* observer, - webrtc::MediaStreamTrackInterface* track) - : observer(observer), track(track) {} - rtc::scoped_refptr observer; - rtc::scoped_refptr track; -}; - -// Check if we can send |new_stream| on a PeerConnection. -bool CanAddLocalMediaStream(webrtc::StreamCollectionInterface* current_streams, - webrtc::MediaStreamInterface* new_stream) { - if (!new_stream || !current_streams) { - return false; - } - if (current_streams->find(new_stream->id()) != nullptr) { - RTC_LOG(LS_ERROR) << "MediaStream with ID " << new_stream->id() - << " is already added."; - return false; - } - return true; -} - -// If the direction is "recvonly" or "inactive", treat the description -// as containing no streams. -// See: https://code.google.com/p/webrtc/issues/detail?id=5054 -std::vector GetActiveStreams( - const cricket::MediaContentDescription* desc) { - return RtpTransceiverDirectionHasSend(desc->direction()) - ? desc->streams() - : std::vector(); -} - -bool IsValidOfferToReceiveMedia(int value) { - typedef PeerConnectionInterface::RTCOfferAnswerOptions Options; - return (value >= Options::kUndefined) && - (value <= Options::kMaxOfferToReceiveMedia); -} - -// Add options to |[audio/video]_media_description_options| from |senders|. -void AddPlanBRtpSenderOptions( - const std::vector>>& senders, - cricket::MediaDescriptionOptions* audio_media_description_options, - cricket::MediaDescriptionOptions* video_media_description_options, - int num_sim_layers) { - for (const auto& sender : senders) { - if (sender->media_type() == cricket::MEDIA_TYPE_AUDIO) { - if (audio_media_description_options) { - audio_media_description_options->AddAudioSender( - sender->id(), sender->internal()->stream_ids()); - } - } else { - RTC_DCHECK(sender->media_type() == cricket::MEDIA_TYPE_VIDEO); - if (video_media_description_options) { - video_media_description_options->AddVideoSender( - sender->id(), sender->internal()->stream_ids(), {}, - SimulcastLayerList(), num_sim_layers); - } - } - } -} - -// Add options to |session_options| from |rtp_data_channels|. -void AddRtpDataChannelOptions( - const std::map>& - rtp_data_channels, - cricket::MediaDescriptionOptions* data_media_description_options) { - if (!data_media_description_options) { - return; - } - // Check for data channels. - for (const auto& kv : rtp_data_channels) { - const DataChannel* channel = kv.second; - if (channel->state() == DataChannel::kConnecting || - channel->state() == DataChannel::kOpen) { - // Legacy RTP data channels are signaled with the track/stream ID set to - // the data channel's label. - data_media_description_options->AddRtpDataChannel(channel->label(), - channel->label()); - } - } -} - uint32_t ConvertIceTransportTypeToCandidateFilter( PeerConnectionInterface::IceTransportsType type) { switch (type) { @@ -258,26 +107,6 @@ uint32_t ConvertIceTransportTypeToCandidateFilter( return cricket::CF_NONE; } -std::string GetSignalingStateString( - PeerConnectionInterface::SignalingState state) { - switch (state) { - case PeerConnectionInterface::kStable: - return "kStable"; - case PeerConnectionInterface::kHaveLocalOffer: - return "kHaveLocalOffer"; - case PeerConnectionInterface::kHaveLocalPrAnswer: - return "kHavePrAnswer"; - case PeerConnectionInterface::kHaveRemoteOffer: - return "kHaveRemoteOffer"; - case PeerConnectionInterface::kHaveRemotePrAnswer: - return "kHaveRemotePrAnswer"; - case PeerConnectionInterface::kClosed: - return "kClosed"; - } - RTC_NOTREACHED(); - return ""; -} - IceCandidatePairType GetIceCandidatePairCounter( const cricket::Candidate& local, const cricket::Candidate& remote) { @@ -351,254 +180,6 @@ IceCandidatePairType GetIceCandidatePairCounter( return kIceCandidatePairMax; } -// Logic to decide if an m= section can be recycled. This means that the new -// m= section is not rejected, but the old local or remote m= section is -// rejected. |old_content_one| and |old_content_two| refer to the m= section -// of the old remote and old local descriptions in no particular order. -// We need to check both the old local and remote because either -// could be the most current from the latest negotation. -bool IsMediaSectionBeingRecycled(SdpType type, - const ContentInfo& content, - const ContentInfo* old_content_one, - const ContentInfo* old_content_two) { - return type == SdpType::kOffer && !content.rejected && - ((old_content_one && old_content_one->rejected) || - (old_content_two && old_content_two->rejected)); -} - -// Verify that the order of media sections in |new_desc| matches -// |current_desc|. The number of m= sections in |new_desc| should be no -// less than |current_desc|. In the case of checking an answer's -// |new_desc|, the |current_desc| is the last offer that was set as the -// local or remote. In the case of checking an offer's |new_desc| we -// check against the local and remote descriptions stored from the last -// negotiation, because either of these could be the most up to date for -// possible rejected m sections. These are the |current_desc| and -// |secondary_current_desc|. -bool MediaSectionsInSameOrder(const SessionDescription& current_desc, - const SessionDescription* secondary_current_desc, - const SessionDescription& new_desc, - const SdpType type) { - if (current_desc.contents().size() > new_desc.contents().size()) { - return false; - } - - for (size_t i = 0; i < current_desc.contents().size(); ++i) { - const cricket::ContentInfo* secondary_content_info = nullptr; - if (secondary_current_desc && - i < secondary_current_desc->contents().size()) { - secondary_content_info = &secondary_current_desc->contents()[i]; - } - if (IsMediaSectionBeingRecycled(type, new_desc.contents()[i], - ¤t_desc.contents()[i], - secondary_content_info)) { - // For new offer descriptions, if the media section can be recycled, it's - // valid for the MID and media type to change. - continue; - } - if (new_desc.contents()[i].name != current_desc.contents()[i].name) { - return false; - } - const MediaContentDescription* new_desc_mdesc = - new_desc.contents()[i].media_description(); - const MediaContentDescription* current_desc_mdesc = - current_desc.contents()[i].media_description(); - if (new_desc_mdesc->type() != current_desc_mdesc->type()) { - return false; - } - } - return true; -} - -bool MediaSectionsHaveSameCount(const SessionDescription& desc1, - const SessionDescription& desc2) { - return desc1.contents().size() == desc2.contents().size(); -} - -void NoteKeyProtocolAndMedia(KeyExchangeProtocolType protocol_type, - cricket::MediaType media_type) { - // Array of structs needed to map {KeyExchangeProtocolType, - // cricket::MediaType} to KeyExchangeProtocolMedia without using std::map in - // order to avoid -Wglobal-constructors and -Wexit-time-destructors. - static constexpr struct { - KeyExchangeProtocolType protocol_type; - cricket::MediaType media_type; - KeyExchangeProtocolMedia protocol_media; - } kEnumCounterKeyProtocolMediaMap[] = { - {kEnumCounterKeyProtocolDtls, cricket::MEDIA_TYPE_AUDIO, - kEnumCounterKeyProtocolMediaTypeDtlsAudio}, - {kEnumCounterKeyProtocolDtls, cricket::MEDIA_TYPE_VIDEO, - kEnumCounterKeyProtocolMediaTypeDtlsVideo}, - {kEnumCounterKeyProtocolDtls, cricket::MEDIA_TYPE_DATA, - kEnumCounterKeyProtocolMediaTypeDtlsData}, - {kEnumCounterKeyProtocolSdes, cricket::MEDIA_TYPE_AUDIO, - kEnumCounterKeyProtocolMediaTypeSdesAudio}, - {kEnumCounterKeyProtocolSdes, cricket::MEDIA_TYPE_VIDEO, - kEnumCounterKeyProtocolMediaTypeSdesVideo}, - {kEnumCounterKeyProtocolSdes, cricket::MEDIA_TYPE_DATA, - kEnumCounterKeyProtocolMediaTypeSdesData}, - }; - - RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.KeyProtocol", protocol_type, - kEnumCounterKeyProtocolMax); - - for (const auto& i : kEnumCounterKeyProtocolMediaMap) { - if (i.protocol_type == protocol_type && i.media_type == media_type) { - RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.KeyProtocolByMedia", - i.protocol_media, - kEnumCounterKeyProtocolMediaTypeMax); - } - } -} - -void NoteAddIceCandidateResult(int result) { - RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.AddIceCandidate", result, - kAddIceCandidateMax); -} - -// Checks that each non-rejected content has SDES crypto keys or a DTLS -// fingerprint, unless it's in a BUNDLE group, in which case only the -// BUNDLE-tag section (first media section/description in the BUNDLE group) -// needs a ufrag and pwd. Mismatches, such as replying with a DTLS fingerprint -// to SDES keys, will be caught in JsepTransport negotiation, and backstopped -// by Channel's |srtp_required| check. -RTCError VerifyCrypto(const SessionDescription* desc, bool dtls_enabled) { - const cricket::ContentGroup* bundle = - desc->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - for (const cricket::ContentInfo& content_info : desc->contents()) { - if (content_info.rejected) { - continue; - } - // Note what media is used with each crypto protocol, for all sections. - NoteKeyProtocolAndMedia(dtls_enabled ? webrtc::kEnumCounterKeyProtocolDtls - : webrtc::kEnumCounterKeyProtocolSdes, - content_info.media_description()->type()); - const std::string& mid = content_info.name; - if (bundle && bundle->HasContentName(mid) && - mid != *(bundle->FirstContentName())) { - // This isn't the first media section in the BUNDLE group, so it's not - // required to have crypto attributes, since only the crypto attributes - // from the first section actually get used. - continue; - } - - // If the content isn't rejected or bundled into another m= section, crypto - // must be present. - const MediaContentDescription* media = content_info.media_description(); - const TransportInfo* tinfo = desc->GetTransportInfoByName(mid); - if (!media || !tinfo) { - // Something is not right. - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, kInvalidSdp); - } - if (dtls_enabled) { - if (!tinfo->description.identity_fingerprint) { - RTC_LOG(LS_WARNING) - << "Session description must have DTLS fingerprint if " - "DTLS enabled."; - return RTCError(RTCErrorType::INVALID_PARAMETER, - kSdpWithoutDtlsFingerprint); - } - } else { - if (media->cryptos().empty()) { - RTC_LOG(LS_WARNING) - << "Session description must have SDES when DTLS disabled."; - return RTCError(RTCErrorType::INVALID_PARAMETER, kSdpWithoutSdesCrypto); - } - } - } - return RTCError::OK(); -} - -// Checks that each non-rejected content has ice-ufrag and ice-pwd set, unless -// it's in a BUNDLE group, in which case only the BUNDLE-tag section (first -// media section/description in the BUNDLE group) needs a ufrag and pwd. -bool VerifyIceUfragPwdPresent(const SessionDescription* desc) { - const cricket::ContentGroup* bundle = - desc->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - for (const cricket::ContentInfo& content_info : desc->contents()) { - if (content_info.rejected) { - continue; - } - const std::string& mid = content_info.name; - if (bundle && bundle->HasContentName(mid) && - mid != *(bundle->FirstContentName())) { - // This isn't the first media section in the BUNDLE group, so it's not - // required to have ufrag/password, since only the ufrag/password from - // the first section actually get used. - continue; - } - - // If the content isn't rejected or bundled into another m= section, - // ice-ufrag and ice-pwd must be present. - const TransportInfo* tinfo = desc->GetTransportInfoByName(mid); - if (!tinfo) { - // Something is not right. - RTC_LOG(LS_ERROR) << kInvalidSdp; - return false; - } - if (tinfo->description.ice_ufrag.empty() || - tinfo->description.ice_pwd.empty()) { - RTC_LOG(LS_ERROR) << "Session description must have ice ufrag and pwd."; - return false; - } - } - return true; -} - -// Returns true if |new_desc| requests an ICE restart (i.e., new ufrag/pwd). -bool CheckForRemoteIceRestart(const SessionDescriptionInterface* old_desc, - const SessionDescriptionInterface* new_desc, - const std::string& content_name) { - if (!old_desc) { - return false; - } - const SessionDescription* new_sd = new_desc->description(); - const SessionDescription* old_sd = old_desc->description(); - const ContentInfo* cinfo = new_sd->GetContentByName(content_name); - if (!cinfo || cinfo->rejected) { - return false; - } - // If the content isn't rejected, check if ufrag and password has changed. - const cricket::TransportDescription* new_transport_desc = - new_sd->GetTransportDescriptionByName(content_name); - const cricket::TransportDescription* old_transport_desc = - old_sd->GetTransportDescriptionByName(content_name); - if (!new_transport_desc || !old_transport_desc) { - // No transport description exists. This is not an ICE restart. - return false; - } - if (cricket::IceCredentialsChanged( - old_transport_desc->ice_ufrag, old_transport_desc->ice_pwd, - new_transport_desc->ice_ufrag, new_transport_desc->ice_pwd)) { - RTC_LOG(LS_INFO) << "Remote peer requests ICE restart for " << content_name - << "."; - return true; - } - return false; -} - -// Generates a string error message for SetLocalDescription/SetRemoteDescription -// from an RTCError. -std::string GetSetDescriptionErrorMessage(cricket::ContentSource source, - SdpType type, - const RTCError& error) { - rtc::StringBuilder oss; - oss << "Failed to set " << (source == cricket::CS_LOCAL ? "local" : "remote") - << " " << SdpTypeToString(type) << " sdp: " << error.message(); - return oss.Release(); -} - -std::string GetStreamIdsString(rtc::ArrayView stream_ids) { - std::string output = "streams=["; - const char* separator = ""; - for (const auto& stream_id : stream_ids) { - output.append(separator).append(stream_id); - separator = ", "; - } - output.append("]"); - return output; -} - absl::optional RTCConfigurationToIceConfigOptionalInt( int rtc_configuration_parameter) { if (rtc_configuration_parameter == @@ -608,246 +189,98 @@ absl::optional RTCConfigurationToIceConfigOptionalInt( return rtc_configuration_parameter; } -void ReportSimulcastApiVersion(const char* name, - const SessionDescription& session) { - bool has_legacy = false; - bool has_spec_compliant = false; - for (const ContentInfo& content : session.contents()) { - if (!content.media_description()) { - continue; - } - has_spec_compliant |= content.media_description()->HasSimulcast(); - for (const StreamParams& sp : content.media_description()->streams()) { - has_legacy |= sp.has_ssrc_group(cricket::kSimSsrcGroupSemantics); - } +// Check if the changes of IceTransportsType motives an ice restart. +bool NeedIceRestart(bool surface_ice_candidates_on_ice_transport_type_changed, + PeerConnectionInterface::IceTransportsType current, + PeerConnectionInterface::IceTransportsType modified) { + if (current == modified) { + return false; } - if (has_legacy) { - RTC_HISTOGRAM_ENUMERATION(name, kSimulcastApiVersionLegacy, - kSimulcastApiVersionMax); - } - if (has_spec_compliant) { - RTC_HISTOGRAM_ENUMERATION(name, kSimulcastApiVersionSpecCompliant, - kSimulcastApiVersionMax); - } - if (!has_legacy && !has_spec_compliant) { - RTC_HISTOGRAM_ENUMERATION(name, kSimulcastApiVersionNone, - kSimulcastApiVersionMax); + if (!surface_ice_candidates_on_ice_transport_type_changed) { + return true; } -} - -const ContentInfo* FindTransceiverMSection( - RtpTransceiverProxyWithInternal* transceiver, - const SessionDescriptionInterface* session_description) { - return transceiver->mid() - ? session_description->description()->GetContentByName( - *transceiver->mid()) - : nullptr; -} -// Wraps a CreateSessionDescriptionObserver and an OperationsChain operation -// complete callback. When the observer is invoked, the wrapped observer is -// invoked followed by invoking the completion callback. -class CreateSessionDescriptionObserverOperationWrapper - : public CreateSessionDescriptionObserver { - public: - CreateSessionDescriptionObserverOperationWrapper( - rtc::scoped_refptr observer, - std::function operation_complete_callback) - : observer_(std::move(observer)), - operation_complete_callback_(std::move(operation_complete_callback)) { - RTC_DCHECK(observer_); - } - ~CreateSessionDescriptionObserverOperationWrapper() override { - RTC_DCHECK(was_called_); - } + auto current_filter = ConvertIceTransportTypeToCandidateFilter(current); + auto modified_filter = ConvertIceTransportTypeToCandidateFilter(modified); - void OnSuccess(SessionDescriptionInterface* desc) override { - RTC_DCHECK(!was_called_); -#ifdef RTC_DCHECK_IS_ON - was_called_ = true; -#endif // RTC_DCHECK_IS_ON - // Completing the operation before invoking the observer allows the observer - // to execute SetLocalDescription() without delay. - operation_complete_callback_(); - observer_->OnSuccess(desc); - } + // If surface_ice_candidates_on_ice_transport_type_changed is true and we + // extend the filter, then no ice restart is needed. + return (current_filter & modified_filter) != current_filter; +} - void OnFailure(RTCError error) override { - RTC_DCHECK(!was_called_); -#ifdef RTC_DCHECK_IS_ON - was_called_ = true; -#endif // RTC_DCHECK_IS_ON - operation_complete_callback_(); - observer_->OnFailure(std::move(error)); +cricket::IceConfig ParseIceConfig( + const PeerConnectionInterface::RTCConfiguration& config) { + cricket::ContinualGatheringPolicy gathering_policy; + switch (config.continual_gathering_policy) { + case PeerConnectionInterface::GATHER_ONCE: + gathering_policy = cricket::GATHER_ONCE; + break; + case PeerConnectionInterface::GATHER_CONTINUALLY: + gathering_policy = cricket::GATHER_CONTINUALLY; + break; + default: + RTC_NOTREACHED(); + gathering_policy = cricket::GATHER_ONCE; } - private: -#ifdef RTC_DCHECK_IS_ON - bool was_called_ = false; -#endif // RTC_DCHECK_IS_ON - rtc::scoped_refptr observer_; - std::function operation_complete_callback_; -}; - -} // namespace + cricket::IceConfig ice_config; + ice_config.receiving_timeout = RTCConfigurationToIceConfigOptionalInt( + config.ice_connection_receiving_timeout); + ice_config.prioritize_most_likely_candidate_pairs = + config.prioritize_most_likely_ice_candidate_pairs; + ice_config.backup_connection_ping_interval = + RTCConfigurationToIceConfigOptionalInt( + config.ice_backup_candidate_pair_ping_interval); + ice_config.continual_gathering_policy = gathering_policy; + ice_config.presume_writable_when_fully_relayed = + config.presume_writable_when_fully_relayed; + ice_config.surface_ice_candidates_on_ice_transport_type_changed = + config.surface_ice_candidates_on_ice_transport_type_changed; + ice_config.ice_check_interval_strong_connectivity = + config.ice_check_interval_strong_connectivity; + ice_config.ice_check_interval_weak_connectivity = + config.ice_check_interval_weak_connectivity; + ice_config.ice_check_min_interval = config.ice_check_min_interval; + ice_config.ice_unwritable_timeout = config.ice_unwritable_timeout; + ice_config.ice_unwritable_min_checks = config.ice_unwritable_min_checks; + ice_config.ice_inactive_timeout = config.ice_inactive_timeout; + ice_config.stun_keepalive_interval = config.stun_candidate_keepalive_interval; + ice_config.network_preference = config.network_preference; + ice_config.stable_writable_connection_ping_interval = + config.stable_writable_connection_ping_interval_ms; + return ice_config; +} -// Used by parameterless SetLocalDescription() to create an offer or answer. -// Upon completion of creating the session description, SetLocalDescription() is -// invoked with the result. -// For consistency with DoSetLocalDescription(), if the PeerConnection is -// destroyed midst operation, we DO NOT inform the -// |set_local_description_observer| that the operation failed. -// TODO(hbos): If/when we process SLD messages in ~PeerConnection, the -// consistent thing would be to inform the observer here. -class PeerConnection::ImplicitCreateSessionDescriptionObserver - : public CreateSessionDescriptionObserver { - public: - ImplicitCreateSessionDescriptionObserver( - rtc::WeakPtr pc, - rtc::scoped_refptr - set_local_description_observer) - : pc_(std::move(pc)), - set_local_description_observer_( - std::move(set_local_description_observer)) {} - ~ImplicitCreateSessionDescriptionObserver() override { - RTC_DCHECK(was_called_); - } - - void SetOperationCompleteCallback( - std::function operation_complete_callback) { - operation_complete_callback_ = std::move(operation_complete_callback); - } - - bool was_called() const { return was_called_; } - - void OnSuccess(SessionDescriptionInterface* desc_ptr) override { - RTC_DCHECK(!was_called_); - std::unique_ptr desc(desc_ptr); - was_called_ = true; - - // Abort early if |pc_| is no longer valid. - if (!pc_) { - operation_complete_callback_(); - return; - } - // DoSetLocalDescription() is currently implemented as a synchronous - // operation but where the |set_local_description_observer_|'s callbacks are - // invoked asynchronously in a post to PeerConnection::OnMessage(). - pc_->DoSetLocalDescription(std::move(desc), - std::move(set_local_description_observer_)); - // For backwards-compatability reasons, we declare the operation as - // completed here (rather than in PeerConnection::OnMessage()). This ensures - // that subsequent offer/answer operations can start immediately (without - // waiting for OnMessage()). - operation_complete_callback_(); - } - - void OnFailure(RTCError error) override { - RTC_DCHECK(!was_called_); - was_called_ = true; - - // Abort early if |pc_| is no longer valid. - if (!pc_) { - operation_complete_callback_(); - return; - } - // DoSetLocalDescription() reports its failures in a post. We do the - // same thing here for consistency. - pc_->PostSetSessionDescriptionFailure( - set_local_description_observer_, - RTCError(error.type(), - std::string("SetLocalDescription failed to create " - "session description - ") + - error.message())); - operation_complete_callback_(); - } - - private: - bool was_called_ = false; - rtc::WeakPtr pc_; - rtc::scoped_refptr - set_local_description_observer_; - std::function operation_complete_callback_; -}; - -class PeerConnection::LocalIceCredentialsToReplace { - public: - // Sets the ICE credentials that need restarting to the ICE credentials of - // the current and pending descriptions. - void SetIceCredentialsFromLocalDescriptions( - const SessionDescriptionInterface* current_local_description, - const SessionDescriptionInterface* pending_local_description) { - ice_credentials_.clear(); - if (current_local_description) { - AppendIceCredentialsFromSessionDescription(*current_local_description); - } - if (pending_local_description) { - AppendIceCredentialsFromSessionDescription(*pending_local_description); - } - } +// Ensures the configuration doesn't have any parameters with invalid values, +// or values that conflict with other parameters. +// +// Returns RTCError::OK() if there are no issues. +RTCError ValidateConfiguration( + const PeerConnectionInterface::RTCConfiguration& config) { + return cricket::P2PTransportChannel::ValidateIceConfig( + ParseIceConfig(config)); +} - void ClearIceCredentials() { ice_credentials_.clear(); } +bool HasRtcpMuxEnabled(const cricket::ContentInfo* content) { + return content->media_description()->rtcp_mux(); +} - // Returns true if we have ICE credentials that need restarting. - bool HasIceCredentials() const { return !ice_credentials_.empty(); } +bool DtlsEnabled(const PeerConnectionInterface::RTCConfiguration& configuration, + const PeerConnectionFactoryInterface::Options& options, + const PeerConnectionDependencies& dependencies) { + if (options.disable_encryption) + return false; - // Returns true if |local_description| shares no ICE credentials with the - // ICE credentials that need restarting. - bool SatisfiesIceRestart( - const SessionDescriptionInterface& local_description) const { - for (const auto& transport_info : - local_description.description()->transport_infos()) { - if (ice_credentials_.find(std::make_pair( - transport_info.description.ice_ufrag, - transport_info.description.ice_pwd)) != ice_credentials_.end()) { - return false; - } - } - return true; - } + // Enable DTLS by default if we have an identity store or a certificate. + bool default_enabled = + (dependencies.cert_generator || !configuration.certificates.empty()); - private: - void AppendIceCredentialsFromSessionDescription( - const SessionDescriptionInterface& desc) { - for (const auto& transport_info : desc.description()->transport_infos()) { - ice_credentials_.insert( - std::make_pair(transport_info.description.ice_ufrag, - transport_info.description.ice_pwd)); - } - } + // The |configuration| can override the default value. + return configuration.enable_dtls_srtp.value_or(default_enabled); +} - std::set> ice_credentials_; -}; - -// Upon completion, posts a task to execute the callback of the -// SetSessionDescriptionObserver asynchronously on the same thread. At this -// point, the state of the peer connection might no longer reflect the effects -// of the SetRemoteDescription operation, as the peer connection could have been -// modified during the post. -// TODO(hbos): Remove this class once we remove the version of -// PeerConnectionInterface::SetRemoteDescription() that takes a -// SetSessionDescriptionObserver as an argument. -class PeerConnection::SetRemoteDescriptionObserverAdapter - : public rtc::RefCountedObject { - public: - SetRemoteDescriptionObserverAdapter( - rtc::scoped_refptr pc, - rtc::scoped_refptr wrapper) - : pc_(std::move(pc)), wrapper_(std::move(wrapper)) {} - - // SetRemoteDescriptionObserverInterface implementation. - void OnSetRemoteDescriptionComplete(RTCError error) override { - if (error.ok()) - pc_->PostSetSessionDescriptionSuccess(wrapper_); - else - pc_->PostSetSessionDescriptionFailure(wrapper_, std::move(error)); - } - - private: - rtc::scoped_refptr pc_; - rtc::scoped_refptr wrapper_; -}; +} // namespace bool PeerConnectionInterface::RTCConfiguration::operator==( const PeerConnectionInterface::RTCConfiguration& o) const { @@ -896,16 +329,13 @@ bool PeerConnectionInterface::RTCConfiguration::operator==( SdpSemantics sdp_semantics; absl::optional network_preference; bool active_reset_srtp_params; - bool use_media_transport; - bool use_media_transport_for_data_channels; - absl::optional use_datagram_transport; - absl::optional use_datagram_transport_for_data_channels; - absl::optional use_datagram_transport_for_data_channels_receive_only; absl::optional crypto_options; bool offer_extmap_allow_mixed; std::string turn_logging_id; bool enable_implicit_rollback; absl::optional allow_codec_switching; + absl::optional report_usage_pattern_delay_ms; + absl::optional stable_writable_connection_ping_interval_ms; }; static_assert(sizeof(stuff_being_tested_for_equality) == sizeof(*this), "Did you add something to RTCConfiguration and forget to " @@ -934,7 +364,6 @@ bool PeerConnectionInterface::RTCConfiguration::operator==( disable_ipv6_on_wifi == o.disable_ipv6_on_wifi && max_ipv6_networks == o.max_ipv6_networks && disable_link_local_networks == o.disable_link_local_networks && - enable_rtp_data_channel == o.enable_rtp_data_channel && screencast_min_bitrate == o.screencast_min_bitrate && combined_audio_video_bwe == o.combined_audio_video_bwe && enable_dtls_srtp == o.enable_dtls_srtp && @@ -961,20 +390,14 @@ bool PeerConnectionInterface::RTCConfiguration::operator==( sdp_semantics == o.sdp_semantics && network_preference == o.network_preference && active_reset_srtp_params == o.active_reset_srtp_params && - use_media_transport == o.use_media_transport && - use_media_transport_for_data_channels == - o.use_media_transport_for_data_channels && - use_datagram_transport == o.use_datagram_transport && - use_datagram_transport_for_data_channels == - o.use_datagram_transport_for_data_channels && - use_datagram_transport_for_data_channels_receive_only == - o.use_datagram_transport_for_data_channels_receive_only && crypto_options == o.crypto_options && offer_extmap_allow_mixed == o.offer_extmap_allow_mixed && turn_logging_id == o.turn_logging_id && enable_implicit_rollback == o.enable_implicit_rollback && allow_codec_switching == o.allow_codec_switching && - enable_simulcast_stats == o.enable_simulcast_stats; + report_usage_pattern_delay_ms == o.report_usage_pattern_delay_ms && + stable_writable_connection_ping_interval_ms == + o.stable_writable_connection_ping_interval_ms; } bool PeerConnectionInterface::RTCConfiguration::operator!=( @@ -982,86 +405,130 @@ bool PeerConnectionInterface::RTCConfiguration::operator!=( return !(*this == o); } -void PeerConnection::TransceiverStableState::set_newly_created() { - RTC_DCHECK(!has_m_section_); - newly_created_ = true; -} - -void PeerConnection::TransceiverStableState::SetMSectionIfUnset( - absl::optional mid, - absl::optional mline_index) { - if (!has_m_section_) { - mid_ = mid; - mline_index_ = mline_index; - has_m_section_ = true; - } -} - -void PeerConnection::TransceiverStableState::SetRemoteStreamIdsIfUnset( - const std::vector& ids) { - if (!remote_stream_ids_.has_value()) { - remote_stream_ids_ = ids; +RTCErrorOr> PeerConnection::Create( + rtc::scoped_refptr context, + const PeerConnectionFactoryInterface::Options& options, + std::unique_ptr event_log, + std::unique_ptr call, + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies dependencies) { + RTCError config_error = cricket::P2PTransportChannel::ValidateIceConfig( + ParseIceConfig(configuration)); + if (!config_error.ok()) { + RTC_LOG(LS_ERROR) << "Invalid ICE configuration: " + << config_error.message(); + return config_error; } -} -// Generate a RTCP CNAME when a PeerConnection is created. -std::string GenerateRtcpCname() { - std::string cname; - if (!rtc::CreateRandomString(kRtcpCnameLength, &cname)) { - RTC_LOG(LS_ERROR) << "Failed to generate CNAME."; - RTC_NOTREACHED(); + if (!dependencies.allocator) { + RTC_LOG(LS_ERROR) + << "PeerConnection initialized without a PortAllocator? " + "This shouldn't happen if using PeerConnectionFactory."; + return RTCError( + RTCErrorType::INVALID_PARAMETER, + "Attempt to create a PeerConnection without a PortAllocatorFactory"); } - return cname; -} - -bool ValidateOfferAnswerOptions( - const PeerConnectionInterface::RTCOfferAnswerOptions& rtc_options) { - return IsValidOfferToReceiveMedia(rtc_options.offer_to_receive_audio) && - IsValidOfferToReceiveMedia(rtc_options.offer_to_receive_video); -} - -// From |rtc_options|, fill parts of |session_options| shared by all generated -// m= sections (in other words, nothing that involves a map/array). -void ExtractSharedMediaSessionOptions( - const PeerConnectionInterface::RTCOfferAnswerOptions& rtc_options, - cricket::MediaSessionOptions* session_options) { - session_options->vad_enabled = rtc_options.voice_activity_detection; - session_options->bundle_enabled = rtc_options.use_rtp_mux; - session_options->raw_packetization_for_video = - rtc_options.raw_packetization_for_video; -} -PeerConnection::PeerConnection(PeerConnectionFactory* factory, - std::unique_ptr event_log, - std::unique_ptr call) - : factory_(factory), + if (!dependencies.observer) { + // TODO(deadbeef): Why do we do this? + RTC_LOG(LS_ERROR) << "PeerConnection initialized without a " + "PeerConnectionObserver"; + return RTCError(RTCErrorType::INVALID_PARAMETER, + "Attempt to create a PeerConnection without an observer"); + } + + bool is_unified_plan = + configuration.sdp_semantics == SdpSemantics::kUnifiedPlan; + bool dtls_enabled = DtlsEnabled(configuration, options, dependencies); + + // Interim code: If an AsyncResolverFactory is given, but not an + // AsyncDnsResolverFactory, wrap it in a WrappingAsyncDnsResolverFactory + // If neither is given, create a WrappingAsyncDnsResolverFactory wrapping + // a BasicAsyncResolver. + // TODO(bugs.webrtc.org/12598): Remove code once all callers pass a + // AsyncDnsResolverFactory. + if (dependencies.async_dns_resolver_factory && + dependencies.async_resolver_factory) { + RTC_LOG(LS_ERROR) + << "Attempt to set both old and new type of DNS resolver factory"; + return RTCError(RTCErrorType::INVALID_PARAMETER, + "Both old and new type of DNS resolver given"); + } + if (dependencies.async_resolver_factory) { + dependencies.async_dns_resolver_factory = + std::make_unique( + std::move(dependencies.async_resolver_factory)); + } else { + dependencies.async_dns_resolver_factory = + std::make_unique( + std::make_unique()); + } + + // The PeerConnection constructor consumes some, but not all, dependencies. + auto pc = rtc::make_ref_counted( + context, options, is_unified_plan, std::move(event_log), std::move(call), + dependencies, dtls_enabled); + RTCError init_error = pc->Initialize(configuration, std::move(dependencies)); + if (!init_error.ok()) { + RTC_LOG(LS_ERROR) << "PeerConnection initialization failed"; + return init_error; + } + return pc; +} + +PeerConnection::PeerConnection( + rtc::scoped_refptr context, + const PeerConnectionFactoryInterface::Options& options, + bool is_unified_plan, + std::unique_ptr event_log, + std::unique_ptr call, + PeerConnectionDependencies& dependencies, + bool dtls_enabled) + : context_(context), + options_(options), + observer_(dependencies.observer), + is_unified_plan_(is_unified_plan), event_log_(std::move(event_log)), event_log_ptr_(event_log_.get()), - operations_chain_(rtc::OperationsChain::Create()), - datagram_transport_config_( - field_trial::FindFullName(kDatagramTransportFieldTrial)), - datagram_transport_data_channel_config_( - field_trial::FindFullName(kDatagramTransportDataChannelFieldTrial)), - rtcp_cname_(GenerateRtcpCname()), - local_streams_(StreamCollection::Create()), - remote_streams_(StreamCollection::Create()), + async_dns_resolver_factory_( + std::move(dependencies.async_dns_resolver_factory)), + port_allocator_(std::move(dependencies.allocator)), + ice_transport_factory_(std::move(dependencies.ice_transport_factory)), + tls_cert_verifier_(std::move(dependencies.tls_cert_verifier)), call_(std::move(call)), call_ptr_(call_.get()), - local_ice_credentials_to_replace_(new LocalIceCredentialsToReplace()), + // RFC 3264: The numeric value of the session id and version in the + // o line MUST be representable with a "64 bit signed integer". + // Due to this constraint session id |session_id_| is max limited to + // LLONG_MAX. + session_id_(rtc::ToString(rtc::CreateRandomId64() & LLONG_MAX)), + dtls_enabled_(dtls_enabled), data_channel_controller_(this), - weak_ptr_factory_(this) {} + message_handler_(signaling_thread()), + weak_factory_(this) { + worker_thread()->Invoke(RTC_FROM_HERE, [this] { + RTC_DCHECK_RUN_ON(worker_thread()); + worker_thread_safety_ = PendingTaskSafetyFlag::Create(); + if (!call_) + worker_thread_safety_->SetNotAlive(); + }); +} PeerConnection::~PeerConnection() { TRACE_EVENT0("webrtc", "PeerConnection::~PeerConnection"); RTC_DCHECK_RUN_ON(signaling_thread()); - weak_ptr_factory_.InvalidateWeakPtrs(); + if (sdp_handler_) { + sdp_handler_->PrepareForShutdown(); + } // Need to stop transceivers before destroying the stats collector because // AudioRtpSender has a reference to the StatsCollector it will update when // stopping. - for (const auto& transceiver : transceivers_) { - transceiver->Stop(); + if (rtp_manager()) { + for (const auto& transceiver : rtp_manager()->transceivers()->List()) { + transceiver->StopInternal(); + } } stats_.reset(nullptr); @@ -1070,103 +537,50 @@ PeerConnection::~PeerConnection() { stats_collector_ = nullptr; } - // Don't destroy BaseChannels until after stats has been cleaned up so that - // the last stats request can still read from the channels. - DestroyAllChannels(); + if (sdp_handler_) { + // Don't destroy BaseChannels until after stats has been cleaned up so that + // the last stats request can still read from the channels. + sdp_handler_->DestroyAllChannels(); - RTC_LOG(LS_INFO) << "Session: " << session_id() << " is destroyed."; + RTC_LOG(LS_INFO) << "Session: " << session_id() << " is destroyed."; - webrtc_session_desc_factory_.reset(); - sctp_factory_.reset(); - transport_controller_.reset(); + sdp_handler_->ResetSessionDescFactory(); + } - // port_allocator_ lives on the network thread and should be destroyed there. + // port_allocator_ and transport_controller_ live on the network thread and + // should be destroyed there. network_thread()->Invoke(RTC_FROM_HERE, [this] { RTC_DCHECK_RUN_ON(network_thread()); + TeardownDataChannelTransport_n(); + transport_controller_.reset(); port_allocator_.reset(); + if (network_thread_safety_) + network_thread_safety_->SetNotAlive(); }); + // call_ and event_log_ must be destroyed on the worker thread. worker_thread()->Invoke(RTC_FROM_HERE, [this] { RTC_DCHECK_RUN_ON(worker_thread()); + worker_thread_safety_->SetNotAlive(); call_.reset(); // The event log must outlive call (and any other object that uses it). event_log_.reset(); }); - - // Process all pending notifications in the message queue. If we don't do - // this, requests will linger and not know they succeeded or failed. - rtc::MessageList list; - signaling_thread()->Clear(this, rtc::MQID_ANY, &list); - for (auto& msg : list) { - if (msg.message_id == MSG_CREATE_SESSIONDESCRIPTION_FAILED) { - // Processing CreateOffer() and CreateAnswer() messages ensures their - // observers are invoked even if the PeerConnection is destroyed early. - OnMessage(&msg); - } else { - // TODO(hbos): Consider processing all pending messages. This would mean - // that SetLocalDescription() and SetRemoteDescription() observers are - // informed of successes and failures; this is currently NOT the case. - delete msg.pdata; - } - } -} - -void PeerConnection::DestroyAllChannels() { - // Destroy video channels first since they may have a pointer to a voice - // channel. - for (const auto& transceiver : transceivers_) { - if (transceiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { - DestroyTransceiverChannel(transceiver); - } - } - for (const auto& transceiver : transceivers_) { - if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { - DestroyTransceiverChannel(transceiver); - } - } - DestroyDataChannelTransport(); } -bool PeerConnection::Initialize( +RTCError PeerConnection::Initialize( const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies) { RTC_DCHECK_RUN_ON(signaling_thread()); TRACE_EVENT0("webrtc", "PeerConnection::Initialize"); - RTCError config_error = ValidateConfiguration(configuration); - if (!config_error.ok()) { - RTC_LOG(LS_ERROR) << "Invalid configuration: " << config_error.message(); - return false; - } - - if (!dependencies.allocator) { - RTC_LOG(LS_ERROR) - << "PeerConnection initialized without a PortAllocator? " - "This shouldn't happen if using PeerConnectionFactory."; - return false; - } - - if (!dependencies.observer) { - // TODO(deadbeef): Why do we do this? - RTC_LOG(LS_ERROR) << "PeerConnection initialized without a " - "PeerConnectionObserver"; - return false; - } - - observer_ = dependencies.observer; - async_resolver_factory_ = std::move(dependencies.async_resolver_factory); - port_allocator_ = std::move(dependencies.allocator); - packet_socket_factory_ = std::move(dependencies.packet_socket_factory); - ice_transport_factory_ = std::move(dependencies.ice_transport_factory); - tls_cert_verifier_ = std::move(dependencies.tls_cert_verifier); - cricket::ServerAddresses stun_servers; std::vector turn_servers; RTCErrorType parse_error = ParseIceServers(configuration.servers, &stun_servers, &turn_servers); if (parse_error != RTCErrorType::NONE) { - return false; + return RTCError(parse_error, "ICE server parse failed"); } // Add the turn logging id to all turn servers @@ -1174,16 +588,7 @@ bool PeerConnection::Initialize( turn_server.turn_logging_id = configuration.turn_logging_id; } - // The port allocator lives on the network thread and should be initialized - // there. - const auto pa_result = - network_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::InitializePortAllocator_n, this, - stun_servers, turn_servers, configuration)); - - // If initialization was successful, note if STUN or TURN servers - // were supplied. + // Note if STUN or TURN servers were supplied. if (!stun_servers.empty()) { NoteUsageEvent(UsageEvent::STUN_SERVER_ADDED); } @@ -1191,241 +596,181 @@ bool PeerConnection::Initialize( NoteUsageEvent(UsageEvent::TURN_SERVER_ADDED); } - // Send information about IPv4/IPv6 status. - PeerConnectionAddressFamilyCounter address_family; - if (pa_result.enable_ipv6) { - address_family = kPeerConnection_IPv6; - } else { - address_family = kPeerConnection_IPv4; + // Network thread initialization. + network_thread()->Invoke(RTC_FROM_HERE, [this, &stun_servers, + &turn_servers, &configuration, + &dependencies] { + RTC_DCHECK_RUN_ON(network_thread()); + network_thread_safety_ = PendingTaskSafetyFlag::Create(); + InitializePortAllocatorResult pa_result = + InitializePortAllocator_n(stun_servers, turn_servers, configuration); + // Send information about IPv4/IPv6 status. + PeerConnectionAddressFamilyCounter address_family = + pa_result.enable_ipv6 ? kPeerConnection_IPv6 : kPeerConnection_IPv4; + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.IPMetrics", address_family, + kPeerConnectionAddressFamilyCounter_Max); + InitializeTransportController_n(configuration, dependencies); + }); + + configuration_ = configuration; + + stats_ = std::make_unique(this); + stats_collector_ = RTCStatsCollector::Create(this); + + sdp_handler_ = + SdpOfferAnswerHandler::Create(this, configuration, dependencies); + + rtp_manager_ = std::make_unique( + IsUnifiedPlan(), signaling_thread(), worker_thread(), channel_manager(), + &usage_pattern_, observer_, stats_.get(), [this]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + sdp_handler_->UpdateNegotiationNeeded(); + }); + + // Add default audio/video transceivers for Plan B SDP. + if (!IsUnifiedPlan()) { + rtp_manager()->transceivers()->Add( + RtpTransceiverProxyWithInternal::Create( + signaling_thread(), + new RtpTransceiver(cricket::MEDIA_TYPE_AUDIO, channel_manager()))); + rtp_manager()->transceivers()->Add( + RtpTransceiverProxyWithInternal::Create( + signaling_thread(), + new RtpTransceiver(cricket::MEDIA_TYPE_VIDEO, channel_manager()))); } - RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.IPMetrics", address_family, - kPeerConnectionAddressFamilyCounter_Max); - const PeerConnectionFactoryInterface::Options& options = factory_->options(); + int delay_ms = configuration.report_usage_pattern_delay_ms + ? *configuration.report_usage_pattern_delay_ms + : REPORT_USAGE_PATTERN_DELAY_MS; + message_handler_.RequestUsagePatternReport( + [this]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + ReportUsagePattern(); + }, + delay_ms); - // RFC 3264: The numeric value of the session id and version in the - // o line MUST be representable with a "64 bit signed integer". - // Due to this constraint session id |session_id_| is max limited to - // LLONG_MAX. - session_id_ = rtc::ToString(rtc::CreateRandomId64() & LLONG_MAX); + return RTCError::OK(); +} + +void PeerConnection::InitializeTransportController_n( + const RTCConfiguration& configuration, + const PeerConnectionDependencies& dependencies) { JsepTransportController::Config config; config.redetermine_role_on_ice_restart = configuration.redetermine_role_on_ice_restart; - config.ssl_max_version = factory_->options().ssl_max_version; - config.disable_encryption = options.disable_encryption; + config.ssl_max_version = options_.ssl_max_version; + config.disable_encryption = options_.disable_encryption; config.bundle_policy = configuration.bundle_policy; config.rtcp_mux_policy = configuration.rtcp_mux_policy; - // TODO(bugs.webrtc.org/9891) - Remove options.crypto_options then remove this - // stub. + // TODO(bugs.webrtc.org/9891) - Remove options_.crypto_options then remove + // this stub. config.crypto_options = configuration.crypto_options.has_value() ? *configuration.crypto_options - : options.crypto_options; + : options_.crypto_options; config.transport_observer = this; - // It's safe to pass |this| and using |rtcp_invoker_| and the |call_| pointer - // since the JsepTransportController instance is owned by this PeerConnection - // instance and is destroyed before both |rtcp_invoker_| and the |call_| - // pointer. - config.rtcp_handler = [this](const rtc::CopyOnWriteBuffer& packet, - int64_t packet_time_us) { - RTC_DCHECK_RUN_ON(network_thread()); - rtcp_invoker_.AsyncInvoke( - RTC_FROM_HERE, worker_thread(), [this, packet, packet_time_us] { - RTC_DCHECK_RUN_ON(worker_thread()); - // |call_| is reset on the worker thread in the PeerConnection - // destructor, so we check that it's still valid before propagating - // the packet. - if (call_) { - call_->Receiver()->DeliverPacket(MediaType::ANY, packet, - packet_time_us); - } - }); - }; + config.rtcp_handler = InitializeRtcpCallback(); config.event_log = event_log_ptr_; #if defined(ENABLE_EXTERNAL_AUTH) config.enable_external_auth = true; #endif config.active_reset_srtp_params = configuration.active_reset_srtp_params; - use_datagram_transport_ = datagram_transport_config_.enabled && - configuration.use_datagram_transport.value_or( - datagram_transport_config_.default_value); - use_datagram_transport_for_data_channels_ = - datagram_transport_data_channel_config_.enabled && - configuration.use_datagram_transport_for_data_channels.value_or( - datagram_transport_data_channel_config_.default_value); - use_datagram_transport_for_data_channels_receive_only_ = - configuration.use_datagram_transport_for_data_channels_receive_only - .value_or(datagram_transport_data_channel_config_.receive_only); - if (use_datagram_transport_ || use_datagram_transport_for_data_channels_) { - if (!factory_->media_transport_factory()) { - RTC_DCHECK(false) - << "PeerConnecton is initialized with use_datagram_transport = true " - "or use_datagram_transport_for_data_channels = true " - "but media transport factory is not set in PeerConnectionFactory"; - return false; - } - - config.use_datagram_transport = use_datagram_transport_; - config.use_datagram_transport_for_data_channels = - use_datagram_transport_for_data_channels_; - config.use_datagram_transport_for_data_channels_receive_only = - use_datagram_transport_for_data_channels_receive_only_; - config.media_transport_factory = factory_->media_transport_factory(); - } - - // Obtain a certificate from RTCConfiguration if any were provided (optional). - rtc::scoped_refptr certificate; - if (!configuration.certificates.empty()) { - // TODO(hbos,torbjorng): Decide on certificate-selection strategy instead of - // just picking the first one. The decision should be made based on the DTLS - // handshake. The DTLS negotiations need to know about all certificates. - certificate = configuration.certificates[0]; - } - - if (options.disable_encryption) { - dtls_enabled_ = false; - } else { - // Enable DTLS by default if we have an identity store or a certificate. - dtls_enabled_ = (dependencies.cert_generator || certificate); - // |configuration| can override the default |dtls_enabled_| value. - if (configuration.enable_dtls_srtp) { - dtls_enabled_ = *(configuration.enable_dtls_srtp); - } - } - - sctp_factory_ = factory_->CreateSctpTransportInternalFactory(); - - if (use_datagram_transport_for_data_channels_) { - if (configuration.enable_rtp_data_channel) { - RTC_LOG(LS_ERROR) << "enable_rtp_data_channel and " - "use_datagram_transport_for_data_channels are " - "incompatible and cannot both be set to true"; - return false; - } - if (configuration.enable_dtls_srtp && !*configuration.enable_dtls_srtp) { - RTC_LOG(LS_INFO) << "Using data channel transport with no fallback"; - data_channel_controller_.set_data_channel_type( - cricket::DCT_DATA_CHANNEL_TRANSPORT); - } else { - RTC_LOG(LS_INFO) << "Using data channel transport with fallback to SCTP"; - data_channel_controller_.set_data_channel_type( - cricket::DCT_DATA_CHANNEL_TRANSPORT_SCTP); - config.sctp_factory = sctp_factory_.get(); - } - } else if (configuration.enable_rtp_data_channel) { - // Enable creation of RTP data channels if the kEnableRtpDataChannels is - // set. It takes precendence over the disable_sctp_data_channels - // PeerConnectionFactoryInterface::Options. - data_channel_controller_.set_data_channel_type(cricket::DCT_RTP); - } else { - // DTLS has to be enabled to use SCTP. - if (!options.disable_sctp_data_channels && dtls_enabled_) { - data_channel_controller_.set_data_channel_type(cricket::DCT_SCTP); - config.sctp_factory = sctp_factory_.get(); - } + // DTLS has to be enabled to use SCTP. + if (dtls_enabled_) { + config.sctp_factory = context_->sctp_transport_factory(); } config.ice_transport_factory = ice_transport_factory_.get(); + config.on_dtls_handshake_error_ = + [weak_ptr = weak_factory_.GetWeakPtr()](rtc::SSLHandshakeError s) { + if (weak_ptr) { + weak_ptr->OnTransportControllerDtlsHandshakeError(s); + } + }; - transport_controller_.reset(new JsepTransportController( - signaling_thread(), network_thread(), port_allocator_.get(), - async_resolver_factory_.get(), config)); - transport_controller_->SignalIceConnectionState.connect( - this, &PeerConnection::OnTransportControllerConnectionState); - transport_controller_->SignalStandardizedIceConnectionState.connect( - this, &PeerConnection::SetStandardizedIceConnectionState); - transport_controller_->SignalConnectionState.connect( - this, &PeerConnection::SetConnectionState); - transport_controller_->SignalIceGatheringState.connect( - this, &PeerConnection::OnTransportControllerGatheringState); - transport_controller_->SignalIceCandidatesGathered.connect( - this, &PeerConnection::OnTransportControllerCandidatesGathered); - transport_controller_->SignalIceCandidateError.connect( - this, &PeerConnection::OnTransportControllerCandidateError); - transport_controller_->SignalIceCandidatesRemoved.connect( - this, &PeerConnection::OnTransportControllerCandidatesRemoved); - transport_controller_->SignalDtlsHandshakeError.connect( - this, &PeerConnection::OnTransportControllerDtlsHandshakeError); - transport_controller_->SignalIceCandidatePairChanged.connect( - this, &PeerConnection::OnTransportControllerCandidateChanged); - - stats_.reset(new StatsCollector(this)); - stats_collector_ = RTCStatsCollector::Create(this); + transport_controller_.reset( + new JsepTransportController(network_thread(), port_allocator_.get(), + async_dns_resolver_factory_.get(), config)); - configuration_ = configuration; + transport_controller_->SubscribeIceConnectionState( + [this](cricket::IceConnectionState s) { + RTC_DCHECK_RUN_ON(network_thread()); + if (s == cricket::kIceConnectionConnected) { + ReportTransportStats(); + } + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), [this, s]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + OnTransportControllerConnectionState(s); + })); + }); + transport_controller_->SubscribeConnectionState( + [this](PeerConnectionInterface::PeerConnectionState s) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), [this, s]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + SetConnectionState(s); + })); + }); + transport_controller_->SubscribeStandardizedIceConnectionState( + [this](PeerConnectionInterface::IceConnectionState s) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), [this, s]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + SetStandardizedIceConnectionState(s); + })); + }); + transport_controller_->SubscribeIceGatheringState( + [this](cricket::IceGatheringState s) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), [this, s]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + OnTransportControllerGatheringState(s); + })); + }); + transport_controller_->SubscribeIceCandidateGathered( + [this](const std::string& transport, + const std::vector& candidates) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), + [this, t = transport, c = candidates]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + OnTransportControllerCandidatesGathered(t, c); + })); + }); + transport_controller_->SubscribeIceCandidateError( + [this](const cricket::IceCandidateErrorEvent& event) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask(ToQueuedTask( + signaling_thread_safety_.flag(), [this, event = event]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + OnTransportControllerCandidateError(event); + })); + }); + transport_controller_->SubscribeIceCandidatesRemoved( + [this](const std::vector& c) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), [this, c = c]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + OnTransportControllerCandidatesRemoved(c); + })); + }); + transport_controller_->SubscribeIceCandidatePairChanged( + [this](const cricket::CandidatePairChangeEvent& event) { + RTC_DCHECK_RUN_ON(network_thread()); + signaling_thread()->PostTask(ToQueuedTask( + signaling_thread_safety_.flag(), [this, event = event]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + OnTransportControllerCandidateChanged(event); + })); + }); transport_controller_->SetIceConfig(ParseIceConfig(configuration)); - - video_options_.screencast_min_bitrate_kbps = - configuration.screencast_min_bitrate; - audio_options_.combined_audio_video_bwe = - configuration.combined_audio_video_bwe; - - audio_options_.audio_jitter_buffer_max_packets = - configuration.audio_jitter_buffer_max_packets; - - audio_options_.audio_jitter_buffer_fast_accelerate = - configuration.audio_jitter_buffer_fast_accelerate; - - audio_options_.audio_jitter_buffer_min_delay_ms = - configuration.audio_jitter_buffer_min_delay_ms; - - audio_options_.audio_jitter_buffer_enable_rtx_handling = - configuration.audio_jitter_buffer_enable_rtx_handling; - - // Whether the certificate generator/certificate is null or not determines - // what PeerConnectionDescriptionFactory will do, so make sure that we give it - // the right instructions by clearing the variables if needed. - if (!dtls_enabled_) { - dependencies.cert_generator.reset(); - certificate = nullptr; - } else if (certificate) { - // Favor generated certificate over the certificate generator. - dependencies.cert_generator.reset(); - } - - webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory( - signaling_thread(), channel_manager(), this, session_id(), - std::move(dependencies.cert_generator), certificate, &ssrc_generator_)); - webrtc_session_desc_factory_->SignalCertificateReady.connect( - this, &PeerConnection::OnCertificateReady); - - if (options.disable_encryption) { - webrtc_session_desc_factory_->SetSdesPolicy(cricket::SEC_DISABLED); - } - - webrtc_session_desc_factory_->set_enable_encrypted_rtp_header_extensions( - GetCryptoOptions().srtp.enable_encrypted_rtp_header_extensions); - webrtc_session_desc_factory_->set_is_unified_plan(IsUnifiedPlan()); - - // Add default audio/video transceivers for Plan B SDP. - if (!IsUnifiedPlan()) { - transceivers_.push_back( - RtpTransceiverProxyWithInternal::Create( - signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_AUDIO))); - transceivers_.push_back( - RtpTransceiverProxyWithInternal::Create( - signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_VIDEO))); - } - int delay_ms = - return_histogram_very_quickly_ ? 0 : REPORT_USAGE_PATTERN_DELAY_MS; - signaling_thread()->PostDelayed(RTC_FROM_HERE, delay_ms, this, - MSG_REPORT_USAGE_PATTERN, nullptr); - - if (dependencies.video_bitrate_allocator_factory) { - video_bitrate_allocator_factory_ = - std::move(dependencies.video_bitrate_allocator_factory); - } else { - video_bitrate_allocator_factory_ = - CreateBuiltinVideoBitrateAllocatorFactory(); - } - return true; -} - -RTCError PeerConnection::ValidateConfiguration( - const RTCConfiguration& config) const { - return cricket::P2PTransportChannel::ValidateIceConfig( - ParseIceConfig(config)); } rtc::scoped_refptr PeerConnection::local_streams() { @@ -1433,7 +778,7 @@ rtc::scoped_refptr PeerConnection::local_streams() { RTC_CHECK(!IsUnifiedPlan()) << "local_streams is not available with Unified " "Plan SdpSemantics. Please use GetSenders " "instead."; - return local_streams_; + return sdp_handler_->local_streams(); } rtc::scoped_refptr PeerConnection::remote_streams() { @@ -1441,7 +786,7 @@ rtc::scoped_refptr PeerConnection::remote_streams() { RTC_CHECK(!IsUnifiedPlan()) << "remote_streams is not available with Unified " "Plan SdpSemantics. Please use GetReceivers " "instead."; - return remote_streams_; + return sdp_handler_->remote_streams(); } bool PeerConnection::AddStream(MediaStreamInterface* local_stream) { @@ -1449,35 +794,7 @@ bool PeerConnection::AddStream(MediaStreamInterface* local_stream) { RTC_CHECK(!IsUnifiedPlan()) << "AddStream is not available with Unified Plan " "SdpSemantics. Please use AddTrack instead."; TRACE_EVENT0("webrtc", "PeerConnection::AddStream"); - if (IsClosed()) { - return false; - } - if (!CanAddLocalMediaStream(local_streams_, local_stream)) { - return false; - } - - local_streams_->AddStream(local_stream); - MediaStreamObserver* observer = new MediaStreamObserver(local_stream); - observer->SignalAudioTrackAdded.connect(this, - &PeerConnection::OnAudioTrackAdded); - observer->SignalAudioTrackRemoved.connect( - this, &PeerConnection::OnAudioTrackRemoved); - observer->SignalVideoTrackAdded.connect(this, - &PeerConnection::OnVideoTrackAdded); - observer->SignalVideoTrackRemoved.connect( - this, &PeerConnection::OnVideoTrackRemoved); - stream_observers_.push_back(std::unique_ptr(observer)); - - for (const auto& track : local_stream->GetAudioTracks()) { - AddAudioTrack(track.get(), local_stream); - } - for (const auto& track : local_stream->GetVideoTracks()) { - AddVideoTrack(track.get(), local_stream); - } - - stats_->AddStream(local_stream); - UpdateNegotiationNeeded(); - return true; + return sdp_handler_->AddStream(local_stream); } void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) { @@ -1486,27 +803,7 @@ void PeerConnection::RemoveStream(MediaStreamInterface* local_stream) { "Plan SdpSemantics. Please use RemoveTrack " "instead."; TRACE_EVENT0("webrtc", "PeerConnection::RemoveStream"); - if (!IsClosed()) { - for (const auto& track : local_stream->GetAudioTracks()) { - RemoveAudioTrack(track.get(), local_stream); - } - for (const auto& track : local_stream->GetVideoTracks()) { - RemoveVideoTrack(track.get(), local_stream); - } - } - local_streams_->RemoveStream(local_stream); - stream_observers_.erase( - std::remove_if( - stream_observers_.begin(), stream_observers_.end(), - [local_stream](const std::unique_ptr& observer) { - return observer->stream()->id().compare(local_stream->id()) == 0; - }), - stream_observers_.end()); - - if (IsClosed()) { - return; - } - UpdateNegotiationNeeded(); + sdp_handler_->RemoveStream(local_stream); } RTCErrorOr> PeerConnection::AddTrack( @@ -1526,121 +823,19 @@ RTCErrorOr> PeerConnection::AddTrack( LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, "PeerConnection is closed."); } - if (FindSenderForTrack(track)) { + if (rtp_manager()->FindSenderForTrack(track)) { LOG_AND_RETURN_ERROR( RTCErrorType::INVALID_PARAMETER, "Sender already exists for track " + track->id() + "."); } - auto sender_or_error = - (IsUnifiedPlan() ? AddTrackUnifiedPlan(track, stream_ids) - : AddTrackPlanB(track, stream_ids)); + auto sender_or_error = rtp_manager()->AddTrack(track, stream_ids); if (sender_or_error.ok()) { - UpdateNegotiationNeeded(); + sdp_handler_->UpdateNegotiationNeeded(); stats_->AddTrack(track); } return sender_or_error; } -RTCErrorOr> -PeerConnection::AddTrackPlanB( - rtc::scoped_refptr track, - const std::vector& stream_ids) { - if (stream_ids.size() > 1u) { - LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_OPERATION, - "AddTrack with more than one stream is not " - "supported with Plan B semantics."); - } - std::vector adjusted_stream_ids = stream_ids; - if (adjusted_stream_ids.empty()) { - adjusted_stream_ids.push_back(rtc::CreateRandomUuid()); - } - cricket::MediaType media_type = - (track->kind() == MediaStreamTrackInterface::kAudioKind - ? cricket::MEDIA_TYPE_AUDIO - : cricket::MEDIA_TYPE_VIDEO); - auto new_sender = - CreateSender(media_type, track->id(), track, adjusted_stream_ids, {}); - if (track->kind() == MediaStreamTrackInterface::kAudioKind) { - new_sender->internal()->SetMediaChannel(voice_media_channel()); - GetAudioTransceiver()->internal()->AddSender(new_sender); - const RtpSenderInfo* sender_info = - FindSenderInfo(local_audio_sender_infos_, - new_sender->internal()->stream_ids()[0], track->id()); - if (sender_info) { - new_sender->internal()->SetSsrc(sender_info->first_ssrc); - } - } else { - RTC_DCHECK_EQ(MediaStreamTrackInterface::kVideoKind, track->kind()); - new_sender->internal()->SetMediaChannel(video_media_channel()); - GetVideoTransceiver()->internal()->AddSender(new_sender); - const RtpSenderInfo* sender_info = - FindSenderInfo(local_video_sender_infos_, - new_sender->internal()->stream_ids()[0], track->id()); - if (sender_info) { - new_sender->internal()->SetSsrc(sender_info->first_ssrc); - } - } - return rtc::scoped_refptr(new_sender); -} - -RTCErrorOr> -PeerConnection::AddTrackUnifiedPlan( - rtc::scoped_refptr track, - const std::vector& stream_ids) { - auto transceiver = FindFirstTransceiverForAddedTrack(track); - if (transceiver) { - RTC_LOG(LS_INFO) << "Reusing an existing " - << cricket::MediaTypeToString(transceiver->media_type()) - << " transceiver for AddTrack."; - if (transceiver->direction() == RtpTransceiverDirection::kRecvOnly) { - transceiver->internal()->set_direction( - RtpTransceiverDirection::kSendRecv); - } else if (transceiver->direction() == RtpTransceiverDirection::kInactive) { - transceiver->internal()->set_direction( - RtpTransceiverDirection::kSendOnly); - } - transceiver->sender()->SetTrack(track); - transceiver->internal()->sender_internal()->set_stream_ids(stream_ids); - transceiver->internal()->set_reused_for_addtrack(true); - } else { - cricket::MediaType media_type = - (track->kind() == MediaStreamTrackInterface::kAudioKind - ? cricket::MEDIA_TYPE_AUDIO - : cricket::MEDIA_TYPE_VIDEO); - RTC_LOG(LS_INFO) << "Adding " << cricket::MediaTypeToString(media_type) - << " transceiver in response to a call to AddTrack."; - std::string sender_id = track->id(); - // Avoid creating a sender with an existing ID by generating a random ID. - // This can happen if this is the second time AddTrack has created a sender - // for this track. - if (FindSenderById(sender_id)) { - sender_id = rtc::CreateRandomUuid(); - } - auto sender = CreateSender(media_type, sender_id, track, stream_ids, {}); - auto receiver = CreateReceiver(media_type, rtc::CreateRandomUuid()); - transceiver = CreateAndAddTransceiver(sender, receiver); - transceiver->internal()->set_created_by_addtrack(true); - transceiver->internal()->set_direction(RtpTransceiverDirection::kSendRecv); - } - return transceiver->sender(); -} - -rtc::scoped_refptr> -PeerConnection::FindFirstTransceiverForAddedTrack( - rtc::scoped_refptr track) { - RTC_DCHECK(track); - for (auto transceiver : transceivers_) { - if (!transceiver->sender()->track() && - cricket::MediaTypeToString(transceiver->media_type()) == - track->kind() && - !transceiver->internal()->has_ever_been_used_to_send() && - !transceiver->stopped()) { - return transceiver; - } - } - return nullptr; -} - bool PeerConnection::RemoveTrack(RtpSenderInterface* sender) { TRACE_EVENT0("webrtc", "PeerConnection::RemoveTrack"); return RemoveTrackNew(sender).ok(); @@ -1672,10 +867,12 @@ RTCError PeerConnection::RemoveTrackNew( } else { bool removed; if (sender->media_type() == cricket::MEDIA_TYPE_AUDIO) { - removed = GetAudioTransceiver()->internal()->RemoveSender(sender); + removed = rtp_manager()->GetAudioTransceiver()->internal()->RemoveSender( + sender); } else { RTC_DCHECK_EQ(cricket::MEDIA_TYPE_VIDEO, sender->media_type()); - removed = GetVideoTransceiver()->internal()->RemoveSender(sender); + removed = rtp_manager()->GetVideoTransceiver()->internal()->RemoveSender( + sender); } if (!removed) { LOG_AND_RETURN_ERROR( @@ -1683,19 +880,14 @@ RTCError PeerConnection::RemoveTrackNew( "Couldn't find sender " + sender->id() + " to remove."); } } - UpdateNegotiationNeeded(); + sdp_handler_->UpdateNegotiationNeeded(); return RTCError::OK(); } rtc::scoped_refptr> PeerConnection::FindTransceiverBySender( rtc::scoped_refptr sender) { - for (auto transceiver : transceivers_) { - if (transceiver->sender() == sender) { - return transceiver; - } - } - return nullptr; + return rtp_manager()->transceivers()->FindBySender(sender); } RTCErrorOr> @@ -1704,6 +896,16 @@ PeerConnection::AddTransceiver( return AddTransceiver(track, RtpTransceiverInit()); } +RtpTransportInternal* PeerConnection::GetRtpTransport(const std::string& mid) { + RTC_DCHECK_RUN_ON(signaling_thread()); + return network_thread()->Invoke( + RTC_FROM_HERE, [this, &mid] { + auto rtp_transport = transport_controller_->GetRtpTransport(mid); + RTC_DCHECK(rtp_transport); + return rtp_transport; + }); +} + RTCErrorOr> PeerConnection::AddTransceiver( rtc::scoped_refptr track, @@ -1751,6 +953,7 @@ PeerConnection::AddTransceiver( rtc::scoped_refptr track, const RtpTransceiverInit& init, bool update_negotiation_needed) { + RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK((media_type == cricket::MEDIA_TYPE_AUDIO || media_type == cricket::MEDIA_TYPE_VIDEO)); if (track) { @@ -1794,9 +997,11 @@ PeerConnection::AddTransceiver( parameters.encodings = init.send_encodings; // Encodings are dropped from the tail if too many are provided. - if (parameters.encodings.size() > kMaxSimulcastStreams) { + size_t max_simulcast_streams = + media_type == cricket::MEDIA_TYPE_VIDEO ? kMaxSimulcastStreams : 1u; + if (parameters.encodings.size() > max_simulcast_streams) { parameters.encodings.erase( - parameters.encodings.begin() + kMaxSimulcastStreams, + parameters.encodings.begin() + max_simulcast_streams, parameters.encodings.end()); } @@ -1830,100 +1035,27 @@ PeerConnection::AddTransceiver( << " transceiver in response to a call to AddTransceiver."; // Set the sender ID equal to the track ID if the track is specified unless // that sender ID is already in use. - std::string sender_id = - (track && !FindSenderById(track->id()) ? track->id() - : rtc::CreateRandomUuid()); - auto sender = CreateSender(media_type, sender_id, track, init.stream_ids, - parameters.encodings); - auto receiver = CreateReceiver(media_type, rtc::CreateRandomUuid()); - auto transceiver = CreateAndAddTransceiver(sender, receiver); + std::string sender_id = (track && !rtp_manager()->FindSenderById(track->id()) + ? track->id() + : rtc::CreateRandomUuid()); + auto sender = rtp_manager()->CreateSender( + media_type, sender_id, track, init.stream_ids, parameters.encodings); + auto receiver = + rtp_manager()->CreateReceiver(media_type, rtc::CreateRandomUuid()); + auto transceiver = rtp_manager()->CreateAndAddTransceiver(sender, receiver); transceiver->internal()->set_direction(init.direction); if (update_negotiation_needed) { - UpdateNegotiationNeeded(); + sdp_handler_->UpdateNegotiationNeeded(); } return rtc::scoped_refptr(transceiver); } -rtc::scoped_refptr> -PeerConnection::CreateSender( - cricket::MediaType media_type, - const std::string& id, - rtc::scoped_refptr track, - const std::vector& stream_ids, - const std::vector& send_encodings) { - RTC_DCHECK_RUN_ON(signaling_thread()); - rtc::scoped_refptr> sender; - if (media_type == cricket::MEDIA_TYPE_AUDIO) { - RTC_DCHECK(!track || - (track->kind() == MediaStreamTrackInterface::kAudioKind)); - sender = RtpSenderProxyWithInternal::Create( - signaling_thread(), - AudioRtpSender::Create(worker_thread(), id, stats_.get(), this)); - NoteUsageEvent(UsageEvent::AUDIO_ADDED); - } else { - RTC_DCHECK_EQ(media_type, cricket::MEDIA_TYPE_VIDEO); - RTC_DCHECK(!track || - (track->kind() == MediaStreamTrackInterface::kVideoKind)); - sender = RtpSenderProxyWithInternal::Create( - signaling_thread(), VideoRtpSender::Create(worker_thread(), id, this)); - NoteUsageEvent(UsageEvent::VIDEO_ADDED); - } - bool set_track_succeeded = sender->SetTrack(track); - RTC_DCHECK(set_track_succeeded); - sender->internal()->set_stream_ids(stream_ids); - sender->internal()->set_init_send_encodings(send_encodings); - return sender; -} - -rtc::scoped_refptr> -PeerConnection::CreateReceiver(cricket::MediaType media_type, - const std::string& receiver_id) { - rtc::scoped_refptr> - receiver; - if (media_type == cricket::MEDIA_TYPE_AUDIO) { - receiver = RtpReceiverProxyWithInternal::Create( - signaling_thread(), new AudioRtpReceiver(worker_thread(), receiver_id, - std::vector({}))); - NoteUsageEvent(UsageEvent::AUDIO_ADDED); - } else { - RTC_DCHECK_EQ(media_type, cricket::MEDIA_TYPE_VIDEO); - receiver = RtpReceiverProxyWithInternal::Create( - signaling_thread(), new VideoRtpReceiver(worker_thread(), receiver_id, - std::vector({}))); - NoteUsageEvent(UsageEvent::VIDEO_ADDED); - } - return receiver; -} - -rtc::scoped_refptr> -PeerConnection::CreateAndAddTransceiver( - rtc::scoped_refptr> sender, - rtc::scoped_refptr> - receiver) { - // Ensure that the new sender does not have an ID that is already in use by - // another sender. - // Allow receiver IDs to conflict since those come from remote SDP (which - // could be invalid, but should not cause a crash). - RTC_DCHECK(!FindSenderById(sender->id())); - auto transceiver = RtpTransceiverProxyWithInternal::Create( - signaling_thread(), - new RtpTransceiver( - sender, receiver, channel_manager(), - sender->media_type() == cricket::MEDIA_TYPE_AUDIO - ? channel_manager()->GetSupportedAudioRtpHeaderExtensions() - : channel_manager()->GetSupportedVideoRtpHeaderExtensions())); - transceivers_.push_back(transceiver); - transceiver->internal()->SignalNegotiationNeeded.connect( - this, &PeerConnection::OnNegotiationNeeded); - return transceiver; -} - void PeerConnection::OnNegotiationNeeded() { RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(!IsClosed()); - UpdateNegotiationNeeded(); + sdp_handler_->UpdateNegotiationNeeded(); } rtc::scoped_refptr PeerConnection::CreateSender( @@ -1954,18 +1086,18 @@ rtc::scoped_refptr PeerConnection::CreateSender( rtc::scoped_refptr> new_sender; if (kind == MediaStreamTrackInterface::kAudioKind) { auto audio_sender = AudioRtpSender::Create( - worker_thread(), rtc::CreateRandomUuid(), stats_.get(), this); - audio_sender->SetMediaChannel(voice_media_channel()); + worker_thread(), rtc::CreateRandomUuid(), stats_.get(), rtp_manager()); + audio_sender->SetMediaChannel(rtp_manager()->voice_media_channel()); new_sender = RtpSenderProxyWithInternal::Create( signaling_thread(), audio_sender); - GetAudioTransceiver()->internal()->AddSender(new_sender); + rtp_manager()->GetAudioTransceiver()->internal()->AddSender(new_sender); } else if (kind == MediaStreamTrackInterface::kVideoKind) { - auto video_sender = - VideoRtpSender::Create(worker_thread(), rtc::CreateRandomUuid(), this); - video_sender->SetMediaChannel(video_media_channel()); + auto video_sender = VideoRtpSender::Create( + worker_thread(), rtc::CreateRandomUuid(), rtp_manager()); + video_sender->SetMediaChannel(rtp_manager()->video_media_channel()); new_sender = RtpSenderProxyWithInternal::Create( signaling_thread(), video_sender); - GetVideoTransceiver()->internal()->AddSender(new_sender); + rtp_manager()->GetVideoTransceiver()->internal()->AddSender(new_sender); } else { RTC_LOG(LS_ERROR) << "CreateSender called with invalid kind: " << kind; return nullptr; @@ -1979,54 +1111,29 @@ std::vector> PeerConnection::GetSenders() const { RTC_DCHECK_RUN_ON(signaling_thread()); std::vector> ret; - for (const auto& sender : GetSendersInternal()) { + for (const auto& sender : rtp_manager()->GetSendersInternal()) { ret.push_back(sender); } return ret; } -std::vector>> -PeerConnection::GetSendersInternal() const { - std::vector>> - all_senders; - for (const auto& transceiver : transceivers_) { - auto senders = transceiver->internal()->senders(); - all_senders.insert(all_senders.end(), senders.begin(), senders.end()); - } - return all_senders; -} - std::vector> PeerConnection::GetReceivers() const { RTC_DCHECK_RUN_ON(signaling_thread()); std::vector> ret; - for (const auto& receiver : GetReceiversInternal()) { + for (const auto& receiver : rtp_manager()->GetReceiversInternal()) { ret.push_back(receiver); } return ret; } -std::vector< - rtc::scoped_refptr>> -PeerConnection::GetReceiversInternal() const { - std::vector< - rtc::scoped_refptr>> - all_receivers; - for (const auto& transceiver : transceivers_) { - auto receivers = transceiver->internal()->receivers(); - all_receivers.insert(all_receivers.end(), receivers.begin(), - receivers.end()); - } - return all_receivers; -} - std::vector> PeerConnection::GetTransceivers() const { RTC_DCHECK_RUN_ON(signaling_thread()); RTC_CHECK(IsUnifiedPlan()) << "GetTransceivers is only supported with Unified Plan SdpSemantics."; std::vector> all_transceivers; - for (const auto& transceiver : transceivers_) { + for (const auto& transceiver : rtp_manager()->transceivers()->List()) { all_transceivers.push_back(transceiver); } return all_transceivers; @@ -2042,6 +1149,8 @@ bool PeerConnection::GetStats(StatsObserver* observer, return false; } + RTC_LOG_THREAD_BLOCK_COUNT(); + stats_->UpdateStats(level); // The StatsCollector is used to tell if a track is valid because it may // remember tracks that the PeerConnection previously removed. @@ -2050,8 +1159,8 @@ bool PeerConnection::GetStats(StatsObserver* observer, << track->id(); return false; } - signaling_thread()->Post(RTC_FROM_HERE, this, MSG_GETSTATS, - new GetStatsMsg(observer, track)); + message_handler_.PostGetStats(observer, stats_.get(), track); + return true; } @@ -2060,6 +1169,7 @@ void PeerConnection::GetStats(RTCStatsCollectorCallback* callback) { RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(stats_collector_); RTC_DCHECK(callback); + RTC_LOG_THREAD_BLOCK_COUNT(); stats_collector_->GetStatsReport(callback); } @@ -2072,7 +1182,8 @@ void PeerConnection::GetStats( RTC_DCHECK(stats_collector_); rtc::scoped_refptr internal_sender; if (selector) { - for (const auto& proxy_transceiver : transceivers_) { + for (const auto& proxy_transceiver : + rtp_manager()->transceivers()->List()) { for (const auto& proxy_sender : proxy_transceiver->internal()->senders()) { if (proxy_sender == selector) { @@ -2101,7 +1212,8 @@ void PeerConnection::GetStats( RTC_DCHECK(stats_collector_); rtc::scoped_refptr internal_receiver; if (selector) { - for (const auto& proxy_transceiver : transceivers_) { + for (const auto& proxy_transceiver : + rtp_manager()->transceivers()->List()) { for (const auto& proxy_receiver : proxy_transceiver->internal()->receivers()) { if (proxy_receiver == selector) { @@ -2123,7 +1235,7 @@ void PeerConnection::GetStats( PeerConnectionInterface::SignalingState PeerConnection::signaling_state() { RTC_DCHECK_RUN_ON(signaling_thread()); - return signaling_state_; + return sdp_handler_->signaling_state(); } PeerConnectionInterface::IceConnectionState @@ -2152,9 +1264,9 @@ PeerConnection::ice_gathering_state() { absl::optional PeerConnection::can_trickle_ice_candidates() { RTC_DCHECK_RUN_ON(signaling_thread()); - SessionDescriptionInterface* description = current_remote_description_.get(); + const SessionDescriptionInterface* description = current_remote_description(); if (!description) { - description = pending_remote_description_.get(); + description = pending_remote_description(); } if (!description) { return absl::nullopt; @@ -2167,9 +1279,9 @@ absl::optional PeerConnection::can_trickle_ice_candidates() { "trickle"); } -rtc::scoped_refptr PeerConnection::CreateDataChannel( - const std::string& label, - const DataChannelInit* config) { +RTCErrorOr> +PeerConnection::CreateDataChannelOrError(const std::string& label, + const DataChannelInit* config) { RTC_DCHECK_RUN_ON(signaling_thread()); TRACE_EVENT0("webrtc", "PeerConnection::CreateDataChannel"); @@ -2179,3627 +1291,690 @@ rtc::scoped_refptr PeerConnection::CreateDataChannel( if (config) { internal_config.reset(new InternalDataChannelInit(*config)); } + // TODO(bugs.webrtc.org/12796): Return a more specific error. rtc::scoped_refptr channel( - data_channel_controller_.InternalCreateDataChannel( + data_channel_controller_.InternalCreateDataChannelWithProxy( label, internal_config.get())); if (!channel.get()) { - return nullptr; + return RTCError(RTCErrorType::INTERNAL_ERROR, + "Data channel creation failed"); } - // Trigger the onRenegotiationNeeded event for every new RTP DataChannel, or + // Trigger the onRenegotiationNeeded event for // the first SCTP DataChannel. - if (data_channel_type() == cricket::DCT_RTP || first_datachannel) { - UpdateNegotiationNeeded(); + if (first_datachannel) { + sdp_handler_->UpdateNegotiationNeeded(); } NoteUsageEvent(UsageEvent::DATA_ADDED); - return DataChannelProxy::Create(signaling_thread(), channel.get()); + return channel; } void PeerConnection::RestartIce() { RTC_DCHECK_RUN_ON(signaling_thread()); - local_ice_credentials_to_replace_->SetIceCredentialsFromLocalDescriptions( - current_local_description_.get(), pending_local_description_.get()); - UpdateNegotiationNeeded(); + sdp_handler_->RestartIce(); } void PeerConnection::CreateOffer(CreateSessionDescriptionObserver* observer, const RTCOfferAnswerOptions& options) { RTC_DCHECK_RUN_ON(signaling_thread()); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), - observer_refptr = - rtc::scoped_refptr(observer), - options](std::function operations_chain_callback) { - // Abort early if |this_weak_ptr| is no longer valid. - if (!this_weak_ptr) { - observer_refptr->OnFailure( - RTCError(RTCErrorType::INTERNAL_ERROR, - "CreateOffer failed because the session was shut down")); - operations_chain_callback(); - return; - } - // The operation completes asynchronously when the wrapper is invoked. - rtc::scoped_refptr - observer_wrapper(new rtc::RefCountedObject< - CreateSessionDescriptionObserverOperationWrapper>( - std::move(observer_refptr), - std::move(operations_chain_callback))); - this_weak_ptr->DoCreateOffer(options, observer_wrapper); - }); + sdp_handler_->CreateOffer(observer, options); } -void PeerConnection::DoCreateOffer( - const RTCOfferAnswerOptions& options, - rtc::scoped_refptr observer) { +void PeerConnection::CreateAnswer(CreateSessionDescriptionObserver* observer, + const RTCOfferAnswerOptions& options) { RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::DoCreateOffer"); - - if (!observer) { - RTC_LOG(LS_ERROR) << "CreateOffer - observer is NULL."; - return; - } - - if (IsClosed()) { - std::string error = "CreateOffer called when PeerConnection is closed."; - RTC_LOG(LS_ERROR) << error; - PostCreateSessionDescriptionFailure( - observer, RTCError(RTCErrorType::INVALID_STATE, std::move(error))); - return; - } - - // If a session error has occurred the PeerConnection is in a possibly - // inconsistent state so fail right away. - if (session_error() != SessionError::kNone) { - std::string error_message = GetSessionErrorMsg(); - RTC_LOG(LS_ERROR) << "CreateOffer: " << error_message; - PostCreateSessionDescriptionFailure( - observer, - RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); - return; - } - - if (!ValidateOfferAnswerOptions(options)) { - std::string error = "CreateOffer called with invalid options."; - RTC_LOG(LS_ERROR) << error; - PostCreateSessionDescriptionFailure( - observer, RTCError(RTCErrorType::INVALID_PARAMETER, std::move(error))); - return; - } - - // Legacy handling for offer_to_receive_audio and offer_to_receive_video. - // Specified in WebRTC section 4.4.3.2 "Legacy configuration extensions". - if (IsUnifiedPlan()) { - RTCError error = HandleLegacyOfferOptions(options); - if (!error.ok()) { - PostCreateSessionDescriptionFailure(observer, std::move(error)); - return; - } - } - - cricket::MediaSessionOptions session_options; - GetOptionsForOffer(options, &session_options); - webrtc_session_desc_factory_->CreateOffer(observer, options, session_options); -} - -RTCError PeerConnection::HandleLegacyOfferOptions( - const RTCOfferAnswerOptions& options) { - RTC_DCHECK(IsUnifiedPlan()); - - if (options.offer_to_receive_audio == 0) { - RemoveRecvDirectionFromReceivingTransceiversOfType( - cricket::MEDIA_TYPE_AUDIO); - } else if (options.offer_to_receive_audio == 1) { - AddUpToOneReceivingTransceiverOfType(cricket::MEDIA_TYPE_AUDIO); - } else if (options.offer_to_receive_audio > 1) { - LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_PARAMETER, - "offer_to_receive_audio > 1 is not supported."); - } - - if (options.offer_to_receive_video == 0) { - RemoveRecvDirectionFromReceivingTransceiversOfType( - cricket::MEDIA_TYPE_VIDEO); - } else if (options.offer_to_receive_video == 1) { - AddUpToOneReceivingTransceiverOfType(cricket::MEDIA_TYPE_VIDEO); - } else if (options.offer_to_receive_video > 1) { - LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_PARAMETER, - "offer_to_receive_video > 1 is not supported."); - } - - return RTCError::OK(); + sdp_handler_->CreateAnswer(observer, options); } -void PeerConnection::RemoveRecvDirectionFromReceivingTransceiversOfType( - cricket::MediaType media_type) { - for (const auto& transceiver : GetReceivingTransceiversOfType(media_type)) { - RtpTransceiverDirection new_direction = - RtpTransceiverDirectionWithRecvSet(transceiver->direction(), false); - if (new_direction != transceiver->direction()) { - RTC_LOG(LS_INFO) << "Changing " << cricket::MediaTypeToString(media_type) - << " transceiver (MID=" - << transceiver->mid().value_or("") << ") from " - << RtpTransceiverDirectionToString( - transceiver->direction()) - << " to " - << RtpTransceiverDirectionToString(new_direction) - << " since CreateOffer specified offer_to_receive=0"; - transceiver->internal()->set_direction(new_direction); - } - } +void PeerConnection::SetLocalDescription( + SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc_ptr) { + RTC_DCHECK_RUN_ON(signaling_thread()); + sdp_handler_->SetLocalDescription(observer, desc_ptr); } -void PeerConnection::AddUpToOneReceivingTransceiverOfType( - cricket::MediaType media_type) { +void PeerConnection::SetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) { RTC_DCHECK_RUN_ON(signaling_thread()); - if (GetReceivingTransceiversOfType(media_type).empty()) { - RTC_LOG(LS_INFO) - << "Adding one recvonly " << cricket::MediaTypeToString(media_type) - << " transceiver since CreateOffer specified offer_to_receive=1"; - RtpTransceiverInit init; - init.direction = RtpTransceiverDirection::kRecvOnly; - AddTransceiver(media_type, nullptr, init, - /*update_negotiation_needed=*/false); - } -} - -std::vector>> -PeerConnection::GetReceivingTransceiversOfType(cricket::MediaType media_type) { - std::vector< - rtc::scoped_refptr>> - receiving_transceivers; - for (const auto& transceiver : transceivers_) { - if (!transceiver->stopped() && transceiver->media_type() == media_type && - RtpTransceiverDirectionHasRecv(transceiver->direction())) { - receiving_transceivers.push_back(transceiver); - } - } - return receiving_transceivers; + sdp_handler_->SetLocalDescription(std::move(desc), observer); } -void PeerConnection::CreateAnswer(CreateSessionDescriptionObserver* observer, - const RTCOfferAnswerOptions& options) { +void PeerConnection::SetLocalDescription( + SetSessionDescriptionObserver* observer) { RTC_DCHECK_RUN_ON(signaling_thread()); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), - observer_refptr = - rtc::scoped_refptr(observer), - options](std::function operations_chain_callback) { - // Abort early if |this_weak_ptr| is no longer valid. - if (!this_weak_ptr) { - observer_refptr->OnFailure(RTCError( - RTCErrorType::INTERNAL_ERROR, - "CreateAnswer failed because the session was shut down")); - operations_chain_callback(); - return; - } - // The operation completes asynchronously when the wrapper is invoked. - rtc::scoped_refptr - observer_wrapper(new rtc::RefCountedObject< - CreateSessionDescriptionObserverOperationWrapper>( - std::move(observer_refptr), - std::move(operations_chain_callback))); - this_weak_ptr->DoCreateAnswer(options, observer_wrapper); - }); + sdp_handler_->SetLocalDescription(observer); } -void PeerConnection::DoCreateAnswer( - const RTCOfferAnswerOptions& options, - rtc::scoped_refptr observer) { +void PeerConnection::SetLocalDescription( + rtc::scoped_refptr observer) { RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::DoCreateAnswer"); - if (!observer) { - RTC_LOG(LS_ERROR) << "CreateAnswer - observer is NULL."; - return; - } - - // If a session error has occurred the PeerConnection is in a possibly - // inconsistent state so fail right away. - if (session_error() != SessionError::kNone) { - std::string error_message = GetSessionErrorMsg(); - RTC_LOG(LS_ERROR) << "CreateAnswer: " << error_message; - PostCreateSessionDescriptionFailure( - observer, - RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); - return; - } - - if (!(signaling_state_ == kHaveRemoteOffer || - signaling_state_ == kHaveLocalPrAnswer)) { - std::string error = - "PeerConnection cannot create an answer in a state other than " - "have-remote-offer or have-local-pranswer."; - RTC_LOG(LS_ERROR) << error; - PostCreateSessionDescriptionFailure( - observer, RTCError(RTCErrorType::INVALID_STATE, std::move(error))); - return; - } - - // The remote description should be set if we're in the right state. - RTC_DCHECK(remote_description()); - - if (IsUnifiedPlan()) { - if (options.offer_to_receive_audio != RTCOfferAnswerOptions::kUndefined) { - RTC_LOG(LS_WARNING) << "CreateAnswer: offer_to_receive_audio is not " - "supported with Unified Plan semantics. Use the " - "RtpTransceiver API instead."; - } - if (options.offer_to_receive_video != RTCOfferAnswerOptions::kUndefined) { - RTC_LOG(LS_WARNING) << "CreateAnswer: offer_to_receive_video is not " - "supported with Unified Plan semantics. Use the " - "RtpTransceiver API instead."; - } - } - - cricket::MediaSessionOptions session_options; - GetOptionsForAnswer(options, &session_options); - - webrtc_session_desc_factory_->CreateAnswer(observer, session_options); + sdp_handler_->SetLocalDescription(observer); } -void PeerConnection::SetLocalDescription( +void PeerConnection::SetRemoteDescription( SetSessionDescriptionObserver* observer, SessionDescriptionInterface* desc_ptr) { RTC_DCHECK_RUN_ON(signaling_thread()); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), - observer_refptr = - rtc::scoped_refptr(observer), - desc = std::unique_ptr(desc_ptr)]( - std::function operations_chain_callback) mutable { - // Abort early if |this_weak_ptr| is no longer valid. - if (!this_weak_ptr) { - // For consistency with DoSetLocalDescription(), we DO NOT inform the - // |observer_refptr| that the operation failed in this case. - // TODO(hbos): If/when we process SLD messages in ~PeerConnection, - // the consistent thing would be to inform the observer here. - operations_chain_callback(); - return; - } - this_weak_ptr->DoSetLocalDescription(std::move(desc), - std::move(observer_refptr)); - // DoSetLocalDescription() is currently implemented as a synchronous - // operation but where the |observer|'s callbacks are invoked - // asynchronously in a post to OnMessage(). - // For backwards-compatability reasons, we declare the operation as - // completed here (rather than in OnMessage()). This ensures that - // subsequent offer/answer operations can start immediately (without - // waiting for OnMessage()). - operations_chain_callback(); - }); + sdp_handler_->SetRemoteDescription(observer, desc_ptr); } -void PeerConnection::SetLocalDescription( - SetSessionDescriptionObserver* observer) { +void PeerConnection::SetRemoteDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) { RTC_DCHECK_RUN_ON(signaling_thread()); - // The |create_sdp_observer| handles performing DoSetLocalDescription() with - // the resulting description as well as completing the operation. - rtc::scoped_refptr - create_sdp_observer( - new rtc::RefCountedObject( - weak_ptr_factory_.GetWeakPtr(), - rtc::scoped_refptr(observer))); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), - create_sdp_observer](std::function operations_chain_callback) { - // The |create_sdp_observer| is responsible for completing the - // operation. - create_sdp_observer->SetOperationCompleteCallback( - std::move(operations_chain_callback)); - // Abort early if |this_weak_ptr| is no longer valid. This triggers the - // same code path as if DoCreateOffer() or DoCreateAnswer() failed. - if (!this_weak_ptr) { - create_sdp_observer->OnFailure(RTCError( - RTCErrorType::INTERNAL_ERROR, - "SetLocalDescription failed because the session was shut down")); - return; - } - switch (this_weak_ptr->signaling_state()) { - case PeerConnectionInterface::kStable: - case PeerConnectionInterface::kHaveLocalOffer: - case PeerConnectionInterface::kHaveRemotePrAnswer: - // TODO(hbos): If [LastCreatedOffer] exists and still represents the - // current state of the system, use that instead of creating another - // offer. - this_weak_ptr->DoCreateOffer(RTCOfferAnswerOptions(), - create_sdp_observer); - break; - case PeerConnectionInterface::kHaveLocalPrAnswer: - case PeerConnectionInterface::kHaveRemoteOffer: - // TODO(hbos): If [LastCreatedAnswer] exists and still represents - // the current state of the system, use that instead of creating - // another answer. - this_weak_ptr->DoCreateAnswer(RTCOfferAnswerOptions(), - create_sdp_observer); - break; - case PeerConnectionInterface::kClosed: - create_sdp_observer->OnFailure(RTCError( - RTCErrorType::INVALID_STATE, - "SetLocalDescription called when PeerConnection is closed.")); - break; - } - }); + sdp_handler_->SetRemoteDescription(std::move(desc), observer); } -void PeerConnection::DoSetLocalDescription( - std::unique_ptr desc, - rtc::scoped_refptr observer) { +PeerConnectionInterface::RTCConfiguration PeerConnection::GetConfiguration() { RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::DoSetLocalDescription"); - - if (!observer) { - RTC_LOG(LS_ERROR) << "SetLocalDescription - observer is NULL."; - return; - } - - if (!desc) { - PostSetSessionDescriptionFailure( - observer, - RTCError(RTCErrorType::INTERNAL_ERROR, "SessionDescription is NULL.")); - return; - } - - // If a session error has occurred the PeerConnection is in a possibly - // inconsistent state so fail right away. - if (session_error() != SessionError::kNone) { - std::string error_message = GetSessionErrorMsg(); - RTC_LOG(LS_ERROR) << "SetLocalDescription: " << error_message; - PostSetSessionDescriptionFailure( - observer, - RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); - return; - } - - // For SLD we support only explicit rollback. - if (desc->GetType() == SdpType::kRollback) { - if (IsUnifiedPlan()) { - RTCError error = Rollback(desc->GetType()); - if (error.ok()) { - PostSetSessionDescriptionSuccess(observer); - } else { - PostSetSessionDescriptionFailure(observer, std::move(error)); - } - } else { - PostSetSessionDescriptionFailure( - observer, RTCError(RTCErrorType::UNSUPPORTED_OPERATION, - "Rollback not supported in Plan B")); - } - return; - } - - RTCError error = ValidateSessionDescription(desc.get(), cricket::CS_LOCAL); - if (!error.ok()) { - std::string error_message = GetSetDescriptionErrorMessage( - cricket::CS_LOCAL, desc->GetType(), error); - RTC_LOG(LS_ERROR) << error_message; - PostSetSessionDescriptionFailure( - observer, - RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); - return; - } + return configuration_; +} - // Grab the description type before moving ownership to ApplyLocalDescription, - // which may destroy it before returning. - const SdpType type = desc->GetType(); - - error = ApplyLocalDescription(std::move(desc)); - // |desc| may be destroyed at this point. - - if (!error.ok()) { - // If ApplyLocalDescription fails, the PeerConnection could be in an - // inconsistent state, so act conservatively here and set the session error - // so that future calls to SetLocalDescription/SetRemoteDescription fail. - SetSessionError(SessionError::kContent, error.message()); - std::string error_message = - GetSetDescriptionErrorMessage(cricket::CS_LOCAL, type, error); - RTC_LOG(LS_ERROR) << error_message; - PostSetSessionDescriptionFailure( - observer, - RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); - return; +RTCError PeerConnection::SetConfiguration( + const RTCConfiguration& configuration) { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "PeerConnection::SetConfiguration"); + if (IsClosed()) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, + "SetConfiguration: PeerConnection is closed."); } - RTC_DCHECK(local_description()); - - PostSetSessionDescriptionSuccess(observer); - - // MaybeStartGathering needs to be called after posting - // MSG_SET_SESSIONDESCRIPTION_SUCCESS, so that we don't signal any candidates - // before signaling that SetLocalDescription completed. - transport_controller_->MaybeStartGathering(); - if (local_description()->GetType() == SdpType::kAnswer) { - // TODO(deadbeef): We already had to hop to the network thread for - // MaybeStartGathering... - network_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&cricket::PortAllocator::DiscardCandidatePool, - port_allocator_.get())); - // Make UMA notes about what was agreed to. - ReportNegotiatedSdpSemantics(*local_description()); + // According to JSEP, after setLocalDescription, changing the candidate pool + // size is not allowed, and changing the set of ICE servers will not result + // in new candidates being gathered. + if (local_description() && configuration.ice_candidate_pool_size != + configuration_.ice_candidate_pool_size) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, + "Can't change candidate pool size after calling " + "SetLocalDescription."); } - if (IsUnifiedPlan()) { - bool was_negotiation_needed = is_negotiation_needed_; - UpdateNegotiationNeeded(); - if (signaling_state() == kStable && was_negotiation_needed && - is_negotiation_needed_) { - Observer()->OnRenegotiationNeeded(); - } + if (local_description() && + configuration.crypto_options != configuration_.crypto_options) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, + "Can't change crypto_options after calling " + "SetLocalDescription."); } - NoteUsageEvent(UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED); -} - -RTCError PeerConnection::ApplyLocalDescription( - std::unique_ptr desc) { - RTC_DCHECK_RUN_ON(signaling_thread()); - RTC_DCHECK(desc); - - // Update stats here so that we have the most recent stats for tracks and - // streams that might be removed by updating the session description. - stats_->UpdateStats(kStatsOutputLevelStandard); - - // Take a reference to the old local description since it's used below to - // compare against the new local description. When setting the new local - // description, grab ownership of the replaced session description in case it - // is the same as |old_local_description|, to keep it alive for the duration - // of the method. - const SessionDescriptionInterface* old_local_description = - local_description(); - std::unique_ptr replaced_local_description; - SdpType type = desc->GetType(); - if (type == SdpType::kAnswer) { - replaced_local_description = pending_local_description_ - ? std::move(pending_local_description_) - : std::move(current_local_description_); - current_local_description_ = std::move(desc); - pending_local_description_ = nullptr; - current_remote_description_ = std::move(pending_remote_description_); - } else { - replaced_local_description = std::move(pending_local_description_); - pending_local_description_ = std::move(desc); + // The simplest (and most future-compatible) way to tell if the config was + // modified in an invalid way is to copy each property we do support + // modifying, then use operator==. There are far more properties we don't + // support modifying than those we do, and more could be added. + RTCConfiguration modified_config = configuration_; + modified_config.servers = configuration.servers; + modified_config.type = configuration.type; + modified_config.ice_candidate_pool_size = + configuration.ice_candidate_pool_size; + modified_config.prune_turn_ports = configuration.prune_turn_ports; + modified_config.turn_port_prune_policy = configuration.turn_port_prune_policy; + modified_config.surface_ice_candidates_on_ice_transport_type_changed = + configuration.surface_ice_candidates_on_ice_transport_type_changed; + modified_config.ice_check_min_interval = configuration.ice_check_min_interval; + modified_config.ice_check_interval_strong_connectivity = + configuration.ice_check_interval_strong_connectivity; + modified_config.ice_check_interval_weak_connectivity = + configuration.ice_check_interval_weak_connectivity; + modified_config.ice_unwritable_timeout = configuration.ice_unwritable_timeout; + modified_config.ice_unwritable_min_checks = + configuration.ice_unwritable_min_checks; + modified_config.ice_inactive_timeout = configuration.ice_inactive_timeout; + modified_config.stun_candidate_keepalive_interval = + configuration.stun_candidate_keepalive_interval; + modified_config.turn_customizer = configuration.turn_customizer; + modified_config.network_preference = configuration.network_preference; + modified_config.active_reset_srtp_params = + configuration.active_reset_srtp_params; + modified_config.turn_logging_id = configuration.turn_logging_id; + modified_config.allow_codec_switching = configuration.allow_codec_switching; + modified_config.stable_writable_connection_ping_interval_ms = + configuration.stable_writable_connection_ping_interval_ms; + if (configuration != modified_config) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, + "Modifying the configuration in an unsupported way."); } - // The session description to apply now must be accessed by - // |local_description()|. - RTC_DCHECK(local_description()); - // Report statistics about any use of simulcast. - ReportSimulcastApiVersion(kSimulcastVersionApplyLocalDescription, - *local_description()->description()); - - if (!is_caller_) { - if (remote_description()) { - // Remote description was applied first, so this PC is the callee. - is_caller_ = false; - } else { - // Local description is applied first, so this PC is the caller. - is_caller_ = true; - } + // Validate the modified configuration. + RTCError validate_error = ValidateConfiguration(modified_config); + if (!validate_error.ok()) { + return validate_error; } - RTCError error = PushdownTransportDescription(cricket::CS_LOCAL, type); - if (!error.ok()) { - return error; + // Note that this isn't possible through chromium, since it's an unsigned + // short in WebIDL. + if (configuration.ice_candidate_pool_size < 0 || + configuration.ice_candidate_pool_size > static_cast(UINT16_MAX)) { + return RTCError(RTCErrorType::INVALID_RANGE); } - if (IsUnifiedPlan()) { - RTCError error = UpdateTransceiversAndDataChannels( - cricket::CS_LOCAL, *local_description(), old_local_description, - remote_description()); - if (!error.ok()) { - return error; - } - std::vector> remove_list; - std::vector> removed_streams; - for (const auto& transceiver : transceivers_) { - // 2.2.7.1.1.(6-9): Set sender and receiver's transport slots. - // Note that code paths that don't set MID won't be able to use - // information about DTLS transports. - if (transceiver->mid()) { - auto dtls_transport = - LookupDtlsTransportByMidInternal(*transceiver->mid()); - transceiver->internal()->sender_internal()->set_transport( - dtls_transport); - transceiver->internal()->receiver_internal()->set_transport( - dtls_transport); - } - - const ContentInfo* content = - FindMediaSectionForTransceiver(transceiver, local_description()); - if (!content) { - continue; - } - const MediaContentDescription* media_desc = content->media_description(); - // 2.2.7.1.6: If description is of type "answer" or "pranswer", then run - // the following steps: - if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { - // 2.2.7.1.6.1: If direction is "sendonly" or "inactive", and - // transceiver's [[FiredDirection]] slot is either "sendrecv" or - // "recvonly", process the removal of a remote track for the media - // description, given transceiver, removeList, and muteTracks. - if (!RtpTransceiverDirectionHasRecv(media_desc->direction()) && - (transceiver->internal()->fired_direction() && - RtpTransceiverDirectionHasRecv( - *transceiver->internal()->fired_direction()))) { - ProcessRemovalOfRemoteTrack(transceiver, &remove_list, - &removed_streams); - } - // 2.2.7.1.6.2: Set transceiver's [[CurrentDirection]] and - // [[FiredDirection]] slots to direction. - transceiver->internal()->set_current_direction(media_desc->direction()); - transceiver->internal()->set_fired_direction(media_desc->direction()); - } - } - auto observer = Observer(); - for (const auto& transceiver : remove_list) { - observer->OnRemoveTrack(transceiver->receiver()); - } - for (const auto& stream : removed_streams) { - observer->OnRemoveStream(stream); - } - } else { - // Media channels will be created only when offer is set. These may use new - // transports just created by PushdownTransportDescription. - if (type == SdpType::kOffer) { - // TODO(bugs.webrtc.org/4676) - Handle CreateChannel failure, as new local - // description is applied. Restore back to old description. - RTCError error = CreateChannels(*local_description()->description()); - if (!error.ok()) { - return error; - } - } - // Remove unused channels if MediaContentDescription is rejected. - RemoveUnusedChannels(local_description()->description()); + // Parse ICE servers before hopping to network thread. + cricket::ServerAddresses stun_servers; + std::vector turn_servers; + RTCErrorType parse_error = + ParseIceServers(configuration.servers, &stun_servers, &turn_servers); + if (parse_error != RTCErrorType::NONE) { + return RTCError(parse_error); } - - error = UpdateSessionState(type, cricket::CS_LOCAL, - local_description()->description()); - if (!error.ok()) { - return error; + // Add the turn logging id to all turn servers + for (cricket::RelayServerConfig& turn_server : turn_servers) { + turn_server.turn_logging_id = configuration.turn_logging_id; } - if (remote_description()) { - // Now that we have a local description, we can push down remote candidates. - UseCandidatesInSessionDescription(remote_description()); + // Note if STUN or TURN servers were supplied. + if (!stun_servers.empty()) { + NoteUsageEvent(UsageEvent::STUN_SERVER_ADDED); } + if (!turn_servers.empty()) { + NoteUsageEvent(UsageEvent::TURN_SERVER_ADDED); + } + + const bool has_local_description = local_description() != nullptr; - pending_ice_restarts_.clear(); - if (session_error() != SessionError::kNone) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, GetSessionErrorMsg()); + const bool needs_ice_restart = + modified_config.servers != configuration_.servers || + NeedIceRestart( + configuration_.surface_ice_candidates_on_ice_transport_type_changed, + configuration_.type, modified_config.type) || + modified_config.GetTurnPortPrunePolicy() != + configuration_.GetTurnPortPrunePolicy(); + cricket::IceConfig ice_config = ParseIceConfig(modified_config); + + // Apply part of the configuration on the network thread. In theory this + // shouldn't fail. + if (!network_thread()->Invoke( + RTC_FROM_HERE, + [this, needs_ice_restart, &ice_config, &stun_servers, &turn_servers, + &modified_config, has_local_description] { + // As described in JSEP, calling setConfiguration with new ICE + // servers or candidate policy must set a "needs-ice-restart" bit so + // that the next offer triggers an ICE restart which will pick up + // the changes. + if (needs_ice_restart) + transport_controller_->SetNeedsIceRestartFlag(); + + transport_controller_->SetIceConfig(ice_config); + return ReconfigurePortAllocator_n( + stun_servers, turn_servers, modified_config.type, + modified_config.ice_candidate_pool_size, + modified_config.GetTurnPortPrunePolicy(), + modified_config.turn_customizer, + modified_config.stun_candidate_keepalive_interval, + has_local_description); + })) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Failed to apply configuration to PortAllocator."); } - // If setting the description decided our SSL role, allocate any necessary - // SCTP sids. - rtc::SSLRole role; - if (DataChannel::IsSctpLike(data_channel_type()) && GetSctpSslRole(&role)) { - data_channel_controller_.AllocateSctpSids(role); + if (configuration_.active_reset_srtp_params != + modified_config.active_reset_srtp_params) { + // TODO(tommi): move to the network thread - this hides an invoke. + transport_controller_->SetActiveResetSrtpParams( + modified_config.active_reset_srtp_params); } - if (IsUnifiedPlan()) { - for (const auto& transceiver : transceivers_) { - const ContentInfo* content = - FindMediaSectionForTransceiver(transceiver, local_description()); - if (!content) { + if (modified_config.allow_codec_switching.has_value()) { + std::vector channels; + for (const auto& transceiver : rtp_manager()->transceivers()->List()) { + if (transceiver->media_type() != cricket::MEDIA_TYPE_VIDEO) continue; - } - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (content->rejected || !channel || channel->local_streams().empty()) { - // 0 is a special value meaning "this sender has no associated send - // stream". Need to call this so the sender won't attempt to configure - // a no longer existing stream and run into DCHECKs in the lower - // layers. - transceiver->internal()->sender_internal()->SetSsrc(0); - } else { - // Get the StreamParams from the channel which could generate SSRCs. - const std::vector& streams = channel->local_streams(); - transceiver->internal()->sender_internal()->set_stream_ids( - streams[0].stream_ids()); - transceiver->internal()->sender_internal()->SetSsrc( - streams[0].first_ssrc()); - } - } - } else { - // Plan B semantics. - - // Update state and SSRC of local MediaStreams and DataChannels based on the - // local session description. - const cricket::ContentInfo* audio_content = - GetFirstAudioContent(local_description()->description()); - if (audio_content) { - if (audio_content->rejected) { - RemoveSenders(cricket::MEDIA_TYPE_AUDIO); - } else { - const cricket::AudioContentDescription* audio_desc = - audio_content->media_description()->as_audio(); - UpdateLocalSenders(audio_desc->streams(), audio_desc->type()); - } - } - - const cricket::ContentInfo* video_content = - GetFirstVideoContent(local_description()->description()); - if (video_content) { - if (video_content->rejected) { - RemoveSenders(cricket::MEDIA_TYPE_VIDEO); - } else { - const cricket::VideoContentDescription* video_desc = - video_content->media_description()->as_video(); - UpdateLocalSenders(video_desc->streams(), video_desc->type()); - } - } - } - const cricket::ContentInfo* data_content = - GetFirstDataContent(local_description()->description()); - if (data_content) { - const cricket::RtpDataContentDescription* rtp_data_desc = - data_content->media_description()->as_rtp_data(); - // rtp_data_desc will be null if this is an SCTP description. - if (rtp_data_desc) { - data_channel_controller_.UpdateLocalRtpDataChannels( - rtp_data_desc->streams()); + auto* video_channel = static_cast( + transceiver->internal()->channel()); + if (video_channel) + channels.push_back(video_channel->media_channel()); } - } - if (type == SdpType::kAnswer && - local_ice_credentials_to_replace_->SatisfiesIceRestart( - *current_local_description_)) { - local_ice_credentials_to_replace_->ClearIceCredentials(); + worker_thread()->Invoke( + RTC_FROM_HERE, + [channels = std::move(channels), + allow_codec_switching = *modified_config.allow_codec_switching]() { + for (auto* ch : channels) + ch->SetVideoCodecSwitchingEnabled(allow_codec_switching); + }); } + configuration_ = modified_config; return RTCError::OK(); } -// The SDP parser used to populate these values by default for the 'content -// name' if an a=mid line was absent. -static absl::string_view GetDefaultMidForPlanB(cricket::MediaType media_type) { - switch (media_type) { - case cricket::MEDIA_TYPE_AUDIO: - return cricket::CN_AUDIO; - case cricket::MEDIA_TYPE_VIDEO: - return cricket::CN_VIDEO; - case cricket::MEDIA_TYPE_DATA: - return cricket::CN_DATA; - } - RTC_NOTREACHED(); - return ""; -} - -void PeerConnection::FillInMissingRemoteMids( - cricket::SessionDescription* new_remote_description) { - RTC_DCHECK(new_remote_description); - const cricket::ContentInfos no_infos; - const cricket::ContentInfos& local_contents = - (local_description() ? local_description()->description()->contents() - : no_infos); - const cricket::ContentInfos& remote_contents = - (remote_description() ? remote_description()->description()->contents() - : no_infos); - for (size_t i = 0; i < new_remote_description->contents().size(); ++i) { - cricket::ContentInfo& content = new_remote_description->contents()[i]; - if (!content.name.empty()) { - continue; - } - std::string new_mid; - absl::string_view source_explanation; - if (IsUnifiedPlan()) { - if (i < local_contents.size()) { - new_mid = local_contents[i].name; - source_explanation = "from the matching local media section"; - } else if (i < remote_contents.size()) { - new_mid = remote_contents[i].name; - source_explanation = "from the matching previous remote media section"; - } else { - new_mid = mid_generator_(); - source_explanation = "generated just now"; - } - } else { - new_mid = std::string( - GetDefaultMidForPlanB(content.media_description()->type())); - source_explanation = "to match pre-existing behavior"; - } - RTC_DCHECK(!new_mid.empty()); - content.name = new_mid; - new_remote_description->transport_infos()[i].content_name = new_mid; - RTC_LOG(LS_INFO) << "SetRemoteDescription: Remote media section at i=" << i - << " is missing an a=mid line. Filling in the value '" - << new_mid << "' " << source_explanation << "."; - } -} - -void PeerConnection::SetRemoteDescription( - SetSessionDescriptionObserver* observer, - SessionDescriptionInterface* desc_ptr) { +bool PeerConnection::AddIceCandidate( + const IceCandidateInterface* ice_candidate) { RTC_DCHECK_RUN_ON(signaling_thread()); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), - observer_refptr = - rtc::scoped_refptr(observer), - desc = std::unique_ptr(desc_ptr)]( - std::function operations_chain_callback) mutable { - // Abort early if |this_weak_ptr| is no longer valid. - if (!this_weak_ptr) { - // For consistency with SetRemoteDescriptionObserverAdapter, we DO NOT - // inform the |observer_refptr| that the operation failed in this - // case. - // TODO(hbos): If/when we process SRD messages in ~PeerConnection, - // the consistent thing would be to inform the observer here. - operations_chain_callback(); - return; - } - this_weak_ptr->DoSetRemoteDescription( - std::move(desc), - rtc::scoped_refptr( - new SetRemoteDescriptionObserverAdapter( - this_weak_ptr.get(), std::move(observer_refptr)))); - // DoSetRemoteDescription() is currently implemented as a synchronous - // operation but where SetRemoteDescriptionObserverAdapter ensures that - // the |observer|'s callbacks are invoked asynchronously in a post to - // OnMessage(). - // For backwards-compatability reasons, we declare the operation as - // completed here (rather than in OnMessage()). This ensures that - // subsequent offer/answer operations can start immediately (without - // waiting for OnMessage()). - operations_chain_callback(); - }); + return sdp_handler_->AddIceCandidate(ice_candidate); } -void PeerConnection::SetRemoteDescription( - std::unique_ptr desc, - rtc::scoped_refptr observer) { +void PeerConnection::AddIceCandidate( + std::unique_ptr candidate, + std::function callback) { RTC_DCHECK_RUN_ON(signaling_thread()); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), observer, - desc = std::move(desc)]( - std::function operations_chain_callback) mutable { - // Abort early if |this_weak_ptr| is no longer valid. - if (!this_weak_ptr) { - // For consistency with DoSetRemoteDescription(), we DO inform the - // |observer| that the operation failed in this case. - observer->OnSetRemoteDescriptionComplete(RTCError( - RTCErrorType::INVALID_STATE, - "Failed to set remote offer sdp: failed because the session was " - "shut down")); - operations_chain_callback(); - return; - } - this_weak_ptr->DoSetRemoteDescription(std::move(desc), - std::move(observer)); - // DoSetRemoteDescription() is currently implemented as a synchronous - // operation. The |observer| will already have been informed that it - // completed, and we can mark this operation as complete without any - // loose ends. - operations_chain_callback(); - }); + sdp_handler_->AddIceCandidate(std::move(candidate), callback); } -void PeerConnection::DoSetRemoteDescription( - std::unique_ptr desc, - rtc::scoped_refptr observer) { +bool PeerConnection::RemoveIceCandidates( + const std::vector& candidates) { + TRACE_EVENT0("webrtc", "PeerConnection::RemoveIceCandidates"); RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::DoSetRemoteDescription"); - - if (!observer) { - RTC_LOG(LS_ERROR) << "SetRemoteDescription - observer is NULL."; - return; - } - - if (!desc) { - observer->OnSetRemoteDescriptionComplete(RTCError( - RTCErrorType::INVALID_PARAMETER, "SessionDescription is NULL.")); - return; - } - - // If a session error has occurred the PeerConnection is in a possibly - // inconsistent state so fail right away. - if (session_error() != SessionError::kNone) { - std::string error_message = GetSessionErrorMsg(); - RTC_LOG(LS_ERROR) << "SetRemoteDescription: " << error_message; - observer->OnSetRemoteDescriptionComplete( - RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); - return; - } - if (IsUnifiedPlan()) { - if (configuration_.enable_implicit_rollback) { - if (desc->GetType() == SdpType::kOffer && - signaling_state() == kHaveLocalOffer) { - Rollback(desc->GetType()); - } - } - // Explicit rollback. - if (desc->GetType() == SdpType::kRollback) { - observer->OnSetRemoteDescriptionComplete(Rollback(desc->GetType())); - return; - } - } else if (desc->GetType() == SdpType::kRollback) { - observer->OnSetRemoteDescriptionComplete( - RTCError(RTCErrorType::UNSUPPORTED_OPERATION, - "Rollback not supported in Plan B")); - return; - } - if (desc->GetType() == SdpType::kOffer) { - // Report to UMA the format of the received offer. - ReportSdpFormatReceived(*desc); - } - - // Handle remote descriptions missing a=mid lines for interop with legacy end - // points. - FillInMissingRemoteMids(desc->description()); - - RTCError error = ValidateSessionDescription(desc.get(), cricket::CS_REMOTE); - if (!error.ok()) { - std::string error_message = GetSetDescriptionErrorMessage( - cricket::CS_REMOTE, desc->GetType(), error); - RTC_LOG(LS_ERROR) << error_message; - observer->OnSetRemoteDescriptionComplete( - RTCError(error.type(), std::move(error_message))); - return; - } - - // Grab the description type before moving ownership to - // ApplyRemoteDescription, which may destroy it before returning. - const SdpType type = desc->GetType(); - - error = ApplyRemoteDescription(std::move(desc)); - // |desc| may be destroyed at this point. - - if (!error.ok()) { - // If ApplyRemoteDescription fails, the PeerConnection could be in an - // inconsistent state, so act conservatively here and set the session error - // so that future calls to SetLocalDescription/SetRemoteDescription fail. - SetSessionError(SessionError::kContent, error.message()); - std::string error_message = - GetSetDescriptionErrorMessage(cricket::CS_REMOTE, type, error); - RTC_LOG(LS_ERROR) << error_message; - observer->OnSetRemoteDescriptionComplete( - RTCError(error.type(), std::move(error_message))); - return; - } - RTC_DCHECK(remote_description()); - - if (type == SdpType::kAnswer) { - // TODO(deadbeef): We already had to hop to the network thread for - // MaybeStartGathering... - network_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&cricket::PortAllocator::DiscardCandidatePool, - port_allocator_.get())); - // Make UMA notes about what was agreed to. - ReportNegotiatedSdpSemantics(*remote_description()); - } - - if (IsUnifiedPlan()) { - bool was_negotiation_needed = is_negotiation_needed_; - UpdateNegotiationNeeded(); - if (signaling_state() == kStable && was_negotiation_needed && - is_negotiation_needed_) { - Observer()->OnRenegotiationNeeded(); - } - } - - observer->OnSetRemoteDescriptionComplete(RTCError::OK()); - NoteUsageEvent(UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED); + return sdp_handler_->RemoveIceCandidates(candidates); } -RTCError PeerConnection::ApplyRemoteDescription( - std::unique_ptr desc) { - RTC_DCHECK_RUN_ON(signaling_thread()); - RTC_DCHECK(desc); - - // Update stats here so that we have the most recent stats for tracks and - // streams that might be removed by updating the session description. - stats_->UpdateStats(kStatsOutputLevelStandard); - - // Take a reference to the old remote description since it's used below to - // compare against the new remote description. When setting the new remote - // description, grab ownership of the replaced session description in case it - // is the same as |old_remote_description|, to keep it alive for the duration - // of the method. - const SessionDescriptionInterface* old_remote_description = - remote_description(); - std::unique_ptr replaced_remote_description; - SdpType type = desc->GetType(); - if (type == SdpType::kAnswer) { - replaced_remote_description = pending_remote_description_ - ? std::move(pending_remote_description_) - : std::move(current_remote_description_); - current_remote_description_ = std::move(desc); - pending_remote_description_ = nullptr; - current_local_description_ = std::move(pending_local_description_); - } else { - replaced_remote_description = std::move(pending_remote_description_); - pending_remote_description_ = std::move(desc); - } - // The session description to apply now must be accessed by - // |remote_description()|. - RTC_DCHECK(remote_description()); - - // Report statistics about any use of simulcast. - ReportSimulcastApiVersion(kSimulcastVersionApplyRemoteDescription, - *remote_description()->description()); - - RTCError error = PushdownTransportDescription(cricket::CS_REMOTE, type); - if (!error.ok()) { - return error; - } - // Transport and Media channels will be created only when offer is set. - if (IsUnifiedPlan()) { - RTCError error = UpdateTransceiversAndDataChannels( - cricket::CS_REMOTE, *remote_description(), local_description(), - old_remote_description); - if (!error.ok()) { - return error; - } - } else { - // Media channels will be created only when offer is set. These may use new - // transports just created by PushdownTransportDescription. - if (type == SdpType::kOffer) { - // TODO(mallinath) - Handle CreateChannel failure, as new local - // description is applied. Restore back to old description. - RTCError error = CreateChannels(*remote_description()->description()); - if (!error.ok()) { - return error; - } - } - // Remove unused channels if MediaContentDescription is rejected. - RemoveUnusedChannels(remote_description()->description()); +RTCError PeerConnection::SetBitrate(const BitrateSettings& bitrate) { + if (!worker_thread()->IsCurrent()) { + return worker_thread()->Invoke( + RTC_FROM_HERE, [&]() { return SetBitrate(bitrate); }); } + RTC_DCHECK_RUN_ON(worker_thread()); - // NOTE: Candidates allocation will be initiated only when - // SetLocalDescription is called. - error = UpdateSessionState(type, cricket::CS_REMOTE, - remote_description()->description()); - if (!error.ok()) { - return error; + const bool has_min = bitrate.min_bitrate_bps.has_value(); + const bool has_start = bitrate.start_bitrate_bps.has_value(); + const bool has_max = bitrate.max_bitrate_bps.has_value(); + if (has_min && *bitrate.min_bitrate_bps < 0) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "min_bitrate_bps <= 0"); } - - if (local_description() && - !UseCandidatesInSessionDescription(remote_description())) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, kInvalidCandidates); - } - - if (old_remote_description) { - for (const cricket::ContentInfo& content : - old_remote_description->description()->contents()) { - // Check if this new SessionDescription contains new ICE ufrag and - // password that indicates the remote peer requests an ICE restart. - // TODO(deadbeef): When we start storing both the current and pending - // remote description, this should reset pending_ice_restarts and compare - // against the current description. - if (CheckForRemoteIceRestart(old_remote_description, remote_description(), - content.name)) { - if (type == SdpType::kOffer) { - pending_ice_restarts_.insert(content.name); - } - } else { - // We retain all received candidates only if ICE is not restarted. - // When ICE is restarted, all previous candidates belong to an old - // generation and should not be kept. - // TODO(deadbeef): This goes against the W3C spec which says the remote - // description should only contain candidates from the last set remote - // description plus any candidates added since then. We should remove - // this once we're sure it won't break anything. - WebRtcSessionDescriptionFactory::CopyCandidatesFromSessionDescription( - old_remote_description, content.name, mutable_remote_description()); - } + if (has_start) { + if (has_min && *bitrate.start_bitrate_bps < *bitrate.min_bitrate_bps) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "start_bitrate_bps < min_bitrate_bps"); + } else if (*bitrate.start_bitrate_bps < 0) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "curent_bitrate_bps < 0"); } } - - if (session_error() != SessionError::kNone) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, GetSessionErrorMsg()); - } - - // Set the the ICE connection state to connecting since the connection may - // become writable with peer reflexive candidates before any remote candidate - // is signaled. - // TODO(pthatcher): This is a short-term solution for crbug/446908. A real fix - // is to have a new signal the indicates a change in checking state from the - // transport and expose a new checking() member from transport that can be - // read to determine the current checking state. The existing SignalConnecting - // actually means "gathering candidates", so cannot be be used here. - if (remote_description()->GetType() != SdpType::kOffer && - remote_description()->number_of_mediasections() > 0u && - ice_connection_state() == PeerConnectionInterface::kIceConnectionNew) { - SetIceConnectionState(PeerConnectionInterface::kIceConnectionChecking); - } - - // If setting the description decided our SSL role, allocate any necessary - // SCTP sids. - rtc::SSLRole role; - if (DataChannel::IsSctpLike(data_channel_type()) && GetSctpSslRole(&role)) { - data_channel_controller_.AllocateSctpSids(role); - } - - if (IsUnifiedPlan()) { - std::vector> - now_receiving_transceivers; - std::vector> remove_list; - std::vector> added_streams; - std::vector> removed_streams; - for (const auto& transceiver : transceivers_) { - const ContentInfo* content = - FindMediaSectionForTransceiver(transceiver, remote_description()); - if (!content) { - continue; - } - const MediaContentDescription* media_desc = content->media_description(); - RtpTransceiverDirection local_direction = - RtpTransceiverDirectionReversed(media_desc->direction()); - // Roughly the same as steps 2.2.8.6 of section 4.4.1.6 "Set the - // RTCSessionDescription: Set the associated remote streams given - // transceiver.[[Receiver]], msids, addList, and removeList". - // https://w3c.github.io/webrtc-pc/#set-the-rtcsessiondescription - if (RtpTransceiverDirectionHasRecv(local_direction)) { - std::vector stream_ids; - if (!media_desc->streams().empty()) { - // The remote description has signaled the stream IDs. - stream_ids = media_desc->streams()[0].stream_ids(); - } - transceiver_stable_states_by_transceivers_[transceiver] - .SetRemoteStreamIdsIfUnset(transceiver->receiver()->stream_ids()); - - RTC_LOG(LS_INFO) << "Processing the MSIDs for MID=" << content->name - << " (" << GetStreamIdsString(stream_ids) << ")."; - SetAssociatedRemoteStreams(transceiver->internal()->receiver_internal(), - stream_ids, &added_streams, - &removed_streams); - // From the WebRTC specification, steps 2.2.8.5/6 of section 4.4.1.6 - // "Set the RTCSessionDescription: If direction is sendrecv or recvonly, - // and transceiver's current direction is neither sendrecv nor recvonly, - // process the addition of a remote track for the media description. - if (!transceiver->fired_direction() || - !RtpTransceiverDirectionHasRecv(*transceiver->fired_direction())) { - RTC_LOG(LS_INFO) - << "Processing the addition of a remote track for MID=" - << content->name << "."; - now_receiving_transceivers.push_back(transceiver); - } - } - // 2.2.8.1.9: If direction is "sendonly" or "inactive", and transceiver's - // [[FiredDirection]] slot is either "sendrecv" or "recvonly", process the - // removal of a remote track for the media description, given transceiver, - // removeList, and muteTracks. - if (!RtpTransceiverDirectionHasRecv(local_direction) && - (transceiver->fired_direction() && - RtpTransceiverDirectionHasRecv(*transceiver->fired_direction()))) { - ProcessRemovalOfRemoteTrack(transceiver, &remove_list, - &removed_streams); - } - // 2.2.8.1.10: Set transceiver's [[FiredDirection]] slot to direction. - transceiver->internal()->set_fired_direction(local_direction); - // 2.2.8.1.11: If description is of type "answer" or "pranswer", then run - // the following steps: - if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { - // 2.2.8.1.11.1: Set transceiver's [[CurrentDirection]] slot to - // direction. - transceiver->internal()->set_current_direction(local_direction); - // 2.2.8.1.11.[3-6]: Set the transport internal slots. - if (transceiver->mid()) { - auto dtls_transport = - LookupDtlsTransportByMidInternal(*transceiver->mid()); - transceiver->internal()->sender_internal()->set_transport( - dtls_transport); - transceiver->internal()->receiver_internal()->set_transport( - dtls_transport); - } - } - // 2.2.8.1.12: If the media description is rejected, and transceiver is - // not already stopped, stop the RTCRtpTransceiver transceiver. - if (content->rejected && !transceiver->stopped()) { - RTC_LOG(LS_INFO) << "Stopping transceiver for MID=" << content->name - << " since the media section was rejected."; - transceiver->Stop(); - } - if (!content->rejected && - RtpTransceiverDirectionHasRecv(local_direction)) { - if (!media_desc->streams().empty() && - media_desc->streams()[0].has_ssrcs()) { - uint32_t ssrc = media_desc->streams()[0].first_ssrc(); - transceiver->internal()->receiver_internal()->SetupMediaChannel(ssrc); - } else { - transceiver->internal() - ->receiver_internal() - ->SetupUnsignaledMediaChannel(); - } - } - } - // Once all processing has finished, fire off callbacks. - auto observer = Observer(); - for (const auto& transceiver : now_receiving_transceivers) { - stats_->AddTrack(transceiver->receiver()->track()); - observer->OnTrack(transceiver); - observer->OnAddTrack(transceiver->receiver(), - transceiver->receiver()->streams()); - } - for (const auto& stream : added_streams) { - observer->OnAddStream(stream); - } - for (const auto& transceiver : remove_list) { - observer->OnRemoveTrack(transceiver->receiver()); - } - for (const auto& stream : removed_streams) { - observer->OnRemoveStream(stream); + if (has_max) { + if (has_start && *bitrate.max_bitrate_bps < *bitrate.start_bitrate_bps) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "max_bitrate_bps < start_bitrate_bps"); + } else if (has_min && *bitrate.max_bitrate_bps < *bitrate.min_bitrate_bps) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "max_bitrate_bps < min_bitrate_bps"); + } else if (*bitrate.max_bitrate_bps < 0) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "max_bitrate_bps < 0"); } } - const cricket::ContentInfo* audio_content = - GetFirstAudioContent(remote_description()->description()); - const cricket::ContentInfo* video_content = - GetFirstVideoContent(remote_description()->description()); - const cricket::AudioContentDescription* audio_desc = - GetFirstAudioContentDescription(remote_description()->description()); - const cricket::VideoContentDescription* video_desc = - GetFirstVideoContentDescription(remote_description()->description()); - const cricket::RtpDataContentDescription* rtp_data_desc = - GetFirstRtpDataContentDescription(remote_description()->description()); - - // Check if the descriptions include streams, just in case the peer supports - // MSID, but doesn't indicate so with "a=msid-semantic". - if (remote_description()->description()->msid_supported() || - (audio_desc && !audio_desc->streams().empty()) || - (video_desc && !video_desc->streams().empty())) { - remote_peer_supports_msid_ = true; - } - - // We wait to signal new streams until we finish processing the description, - // since only at that point will new streams have all their tracks. - rtc::scoped_refptr new_streams(StreamCollection::Create()); + RTC_DCHECK(call_.get()); + call_->SetClientBitratePreferences(bitrate); - if (!IsUnifiedPlan()) { - // TODO(steveanton): When removing RTP senders/receivers in response to a - // rejected media section, there is some cleanup logic that expects the - // voice/ video channel to still be set. But in this method the voice/video - // channel would have been destroyed by the SetRemoteDescription caller - // above so the cleanup that relies on them fails to run. The RemoveSenders - // calls should be moved to right before the DestroyChannel calls to fix - // this. - - // Find all audio rtp streams and create corresponding remote AudioTracks - // and MediaStreams. - if (audio_content) { - if (audio_content->rejected) { - RemoveSenders(cricket::MEDIA_TYPE_AUDIO); - } else { - bool default_audio_track_needed = - !remote_peer_supports_msid_ && - RtpTransceiverDirectionHasSend(audio_desc->direction()); - UpdateRemoteSendersList(GetActiveStreams(audio_desc), - default_audio_track_needed, audio_desc->type(), - new_streams); - } - } + return RTCError::OK(); +} - // Find all video rtp streams and create corresponding remote VideoTracks - // and MediaStreams. - if (video_content) { - if (video_content->rejected) { - RemoveSenders(cricket::MEDIA_TYPE_VIDEO); - } else { - bool default_video_track_needed = - !remote_peer_supports_msid_ && - RtpTransceiverDirectionHasSend(video_desc->direction()); - UpdateRemoteSendersList(GetActiveStreams(video_desc), - default_video_track_needed, video_desc->type(), - new_streams); - } - } - - // If this is an RTP data transport, update the DataChannels with the - // information from the remote peer. - if (rtp_data_desc) { - data_channel_controller_.UpdateRemoteRtpDataChannels( - GetActiveStreams(rtp_data_desc)); - } - - // Iterate new_streams and notify the observer about new MediaStreams. - auto observer = Observer(); - for (size_t i = 0; i < new_streams->count(); ++i) { - MediaStreamInterface* new_stream = new_streams->at(i); - stats_->AddStream(new_stream); - observer->OnAddStream( - rtc::scoped_refptr(new_stream)); - } - - UpdateEndedRemoteMediaStreams(); - } - - if (type == SdpType::kAnswer && - local_ice_credentials_to_replace_->SatisfiesIceRestart( - *current_local_description_)) { - local_ice_credentials_to_replace_->ClearIceCredentials(); - } - - return RTCError::OK(); -} - -void PeerConnection::SetAssociatedRemoteStreams( - rtc::scoped_refptr receiver, - const std::vector& stream_ids, - std::vector>* added_streams, - std::vector>* removed_streams) { - std::vector> media_streams; - for (const std::string& stream_id : stream_ids) { - rtc::scoped_refptr stream = - remote_streams_->find(stream_id); - if (!stream) { - stream = MediaStreamProxy::Create(rtc::Thread::Current(), - MediaStream::Create(stream_id)); - remote_streams_->AddStream(stream); - added_streams->push_back(stream); - } - media_streams.push_back(stream); - } - // Special case: "a=msid" missing, use random stream ID. - if (media_streams.empty() && - !(remote_description()->description()->msid_signaling() & - cricket::kMsidSignalingMediaSection)) { - if (!missing_msid_default_stream_) { - missing_msid_default_stream_ = MediaStreamProxy::Create( - rtc::Thread::Current(), MediaStream::Create(rtc::CreateRandomUuid())); - added_streams->push_back(missing_msid_default_stream_); - } - media_streams.push_back(missing_msid_default_stream_); - } - std::vector> previous_streams = - receiver->streams(); - // SetStreams() will add/remove the receiver's track to/from the streams. This - // differs from the spec - the spec uses an "addList" and "removeList" to - // update the stream-track relationships in a later step. We do this earlier, - // changing the order of things, but the end-result is the same. - // TODO(hbos): When we remove remote_streams(), use set_stream_ids() - // instead. https://crbug.com/webrtc/9480 - receiver->SetStreams(media_streams); - RemoveRemoteStreamsIfEmpty(previous_streams, removed_streams); -} - -void PeerConnection::ProcessRemovalOfRemoteTrack( - rtc::scoped_refptr> - transceiver, - std::vector>* remove_list, - std::vector>* removed_streams) { - RTC_DCHECK(transceiver->mid()); - RTC_LOG(LS_INFO) << "Processing the removal of a track for MID=" - << *transceiver->mid(); - std::vector> previous_streams = - transceiver->internal()->receiver_internal()->streams(); - // This will remove the remote track from the streams. - transceiver->internal()->receiver_internal()->set_stream_ids({}); - remove_list->push_back(transceiver); - RemoveRemoteStreamsIfEmpty(previous_streams, removed_streams); -} - -void PeerConnection::RemoveRemoteStreamsIfEmpty( - const std::vector>& remote_streams, - std::vector>* removed_streams) { - // TODO(https://crbug.com/webrtc/9480): When we use stream IDs instead of - // streams, see if the stream was removed by checking if this was the last - // receiver with that stream ID. - for (const auto& remote_stream : remote_streams) { - if (remote_stream->GetAudioTracks().empty() && - remote_stream->GetVideoTracks().empty()) { - remote_streams_->RemoveStream(remote_stream); - removed_streams->push_back(remote_stream); - } - } -} - -RTCError PeerConnection::UpdateTransceiversAndDataChannels( - cricket::ContentSource source, - const SessionDescriptionInterface& new_session, - const SessionDescriptionInterface* old_local_description, - const SessionDescriptionInterface* old_remote_description) { - RTC_DCHECK(IsUnifiedPlan()); - - const cricket::ContentGroup* bundle_group = nullptr; - if (new_session.GetType() == SdpType::kOffer) { - auto bundle_group_or_error = - GetEarlyBundleGroup(*new_session.description()); - if (!bundle_group_or_error.ok()) { - return bundle_group_or_error.MoveError(); - } - bundle_group = bundle_group_or_error.MoveValue(); - } - - const ContentInfos& new_contents = new_session.description()->contents(); - for (size_t i = 0; i < new_contents.size(); ++i) { - const cricket::ContentInfo& new_content = new_contents[i]; - cricket::MediaType media_type = new_content.media_description()->type(); - mid_generator_.AddKnownId(new_content.name); - if (media_type == cricket::MEDIA_TYPE_AUDIO || - media_type == cricket::MEDIA_TYPE_VIDEO) { - const cricket::ContentInfo* old_local_content = nullptr; - if (old_local_description && - i < old_local_description->description()->contents().size()) { - old_local_content = - &old_local_description->description()->contents()[i]; - } - const cricket::ContentInfo* old_remote_content = nullptr; - if (old_remote_description && - i < old_remote_description->description()->contents().size()) { - old_remote_content = - &old_remote_description->description()->contents()[i]; - } - auto transceiver_or_error = - AssociateTransceiver(source, new_session.GetType(), i, new_content, - old_local_content, old_remote_content); - if (!transceiver_or_error.ok()) { - return transceiver_or_error.MoveError(); - } - auto transceiver = transceiver_or_error.MoveValue(); - RTCError error = - UpdateTransceiverChannel(transceiver, new_content, bundle_group); - if (!error.ok()) { - return error; - } - } else if (media_type == cricket::MEDIA_TYPE_DATA) { - if (GetDataMid() && new_content.name != *GetDataMid()) { - // Ignore all but the first data section. - RTC_LOG(LS_INFO) << "Ignoring data media section with MID=" - << new_content.name; - continue; - } - RTCError error = UpdateDataChannel(source, new_content, bundle_group); - if (!error.ok()) { - return error; - } - } else { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, - "Unknown section type."); - } - } - - return RTCError::OK(); -} - -RTCError PeerConnection::UpdateTransceiverChannel( - rtc::scoped_refptr> - transceiver, - const cricket::ContentInfo& content, - const cricket::ContentGroup* bundle_group) { - RTC_DCHECK(IsUnifiedPlan()); - RTC_DCHECK(transceiver); - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (content.rejected) { - if (channel) { - transceiver->internal()->SetChannel(nullptr); - DestroyChannelInterface(channel); - } - } else { - if (!channel) { - if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { - channel = CreateVoiceChannel(content.name); - } else { - RTC_DCHECK_EQ(cricket::MEDIA_TYPE_VIDEO, transceiver->media_type()); - channel = CreateVideoChannel(content.name); - } - if (!channel) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INTERNAL_ERROR, - "Failed to create channel for mid=" + content.name); - } - transceiver->internal()->SetChannel(channel); - } - } - return RTCError::OK(); -} - -RTCError PeerConnection::UpdateDataChannel( - cricket::ContentSource source, - const cricket::ContentInfo& content, - const cricket::ContentGroup* bundle_group) { - if (data_channel_type() == cricket::DCT_NONE) { - // If data channels are disabled, ignore this media section. CreateAnswer - // will take care of rejecting it. - return RTCError::OK(); - } - if (content.rejected) { - RTC_LOG(LS_INFO) << "Rejected data channel, mid=" << content.mid(); - DestroyDataChannelTransport(); - } else { - if (!data_channel_controller_.rtp_data_channel() && - !data_channel_controller_.data_channel_transport()) { - RTC_LOG(LS_INFO) << "Creating data channel, mid=" << content.mid(); - if (!CreateDataChannel(content.name)) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, - "Failed to create data channel."); - } - } - if (source == cricket::CS_REMOTE) { - const MediaContentDescription* data_desc = content.media_description(); - if (data_desc && cricket::IsRtpProtocol(data_desc->protocol())) { - data_channel_controller_.UpdateRemoteRtpDataChannels( - GetActiveStreams(data_desc)); - } - } - } - return RTCError::OK(); -} - -// This method will extract any send encodings that were sent by the remote -// connection. This is currently only relevant for Simulcast scenario (where -// the number of layers may be communicated by the server). -static std::vector GetSendEncodingsFromRemoteDescription( - const MediaContentDescription& desc) { - if (!desc.HasSimulcast()) { - return {}; - } - std::vector result; - const SimulcastDescription& simulcast = desc.simulcast_description(); - - // This is a remote description, the parameters we are after should appear - // as receive streams. - for (const auto& alternatives : simulcast.receive_layers()) { - RTC_DCHECK(!alternatives.empty()); - // There is currently no way to specify or choose from alternatives. - // We will always use the first alternative, which is the most preferred. - const SimulcastLayer& layer = alternatives[0]; - RtpEncodingParameters parameters; - parameters.rid = layer.rid; - parameters.active = !layer.is_paused; - result.push_back(parameters); - } - - return result; -} - -static RTCError UpdateSimulcastLayerStatusInSender( - const std::vector& layers, - rtc::scoped_refptr sender) { - RTC_DCHECK(sender); - RtpParameters parameters = sender->GetParametersInternal(); - std::vector disabled_layers; - - // The simulcast envelope cannot be changed, only the status of the streams. - // So we will iterate over the send encodings rather than the layers. - for (RtpEncodingParameters& encoding : parameters.encodings) { - auto iter = std::find_if(layers.begin(), layers.end(), - [&encoding](const SimulcastLayer& layer) { - return layer.rid == encoding.rid; - }); - // A layer that cannot be found may have been removed by the remote party. - if (iter == layers.end()) { - disabled_layers.push_back(encoding.rid); - continue; - } - - encoding.active = !iter->is_paused; - } - - RTCError result = sender->SetParametersInternal(parameters); - if (result.ok()) { - result = sender->DisableEncodingLayers(disabled_layers); - } - - return result; -} - -static bool SimulcastIsRejected( - const ContentInfo* local_content, - const MediaContentDescription& answer_media_desc) { - bool simulcast_offered = local_content && - local_content->media_description() && - local_content->media_description()->HasSimulcast(); - bool simulcast_answered = answer_media_desc.HasSimulcast(); - bool rids_supported = RtpExtension::FindHeaderExtensionByUri( - answer_media_desc.rtp_header_extensions(), RtpExtension::kRidUri); - return simulcast_offered && (!simulcast_answered || !rids_supported); -} - -static RTCError DisableSimulcastInSender( - rtc::scoped_refptr sender) { - RTC_DCHECK(sender); - RtpParameters parameters = sender->GetParametersInternal(); - if (parameters.encodings.size() <= 1) { - return RTCError::OK(); - } - - std::vector disabled_layers; - std::transform( - parameters.encodings.begin() + 1, parameters.encodings.end(), - std::back_inserter(disabled_layers), - [](const RtpEncodingParameters& encoding) { return encoding.rid; }); - return sender->DisableEncodingLayers(disabled_layers); -} - -RTCErrorOr>> -PeerConnection::AssociateTransceiver(cricket::ContentSource source, - SdpType type, - size_t mline_index, - const ContentInfo& content, - const ContentInfo* old_local_content, - const ContentInfo* old_remote_content) { - RTC_DCHECK(IsUnifiedPlan()); - // If this is an offer then the m= section might be recycled. If the m= - // section is being recycled (defined as: rejected in the current local or - // remote description and not rejected in new description), dissociate the - // currently associated RtpTransceiver by setting its mid property to null, - // and discard the mapping between the transceiver and its m= section index. - if (IsMediaSectionBeingRecycled(type, content, old_local_content, - old_remote_content)) { - // We want to dissociate the transceiver that has the rejected mid. - const std::string& old_mid = - (old_local_content && old_local_content->rejected) - ? old_local_content->name - : old_remote_content->name; - auto old_transceiver = GetAssociatedTransceiver(old_mid); - if (old_transceiver) { - RTC_LOG(LS_INFO) << "Dissociating transceiver for MID=" << old_mid - << " since the media section is being recycled."; - old_transceiver->internal()->set_mid(absl::nullopt); - old_transceiver->internal()->set_mline_index(absl::nullopt); - } - } - const MediaContentDescription* media_desc = content.media_description(); - auto transceiver = GetAssociatedTransceiver(content.name); - if (source == cricket::CS_LOCAL) { - // Find the RtpTransceiver that corresponds to this m= section, using the - // mapping between transceivers and m= section indices established when - // creating the offer. - if (!transceiver) { - transceiver = GetTransceiverByMLineIndex(mline_index); - } - if (!transceiver) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "Unknown transceiver"); - } - } else { - RTC_DCHECK_EQ(source, cricket::CS_REMOTE); - // If the m= section is sendrecv or recvonly, and there are RtpTransceivers - // of the same type... - // When simulcast is requested, a transceiver cannot be associated because - // AddTrack cannot be called to initialize it. - if (!transceiver && - RtpTransceiverDirectionHasRecv(media_desc->direction()) && - !media_desc->HasSimulcast()) { - transceiver = FindAvailableTransceiverToReceive(media_desc->type()); - } - // If no RtpTransceiver was found in the previous step, create one with a - // recvonly direction. - if (!transceiver) { - RTC_LOG(LS_INFO) << "Adding " - << cricket::MediaTypeToString(media_desc->type()) - << " transceiver for MID=" << content.name - << " at i=" << mline_index - << " in response to the remote description."; - std::string sender_id = rtc::CreateRandomUuid(); - std::vector send_encodings = - GetSendEncodingsFromRemoteDescription(*media_desc); - auto sender = CreateSender(media_desc->type(), sender_id, nullptr, {}, - send_encodings); - std::string receiver_id; - if (!media_desc->streams().empty()) { - receiver_id = media_desc->streams()[0].id; - } else { - receiver_id = rtc::CreateRandomUuid(); - } - auto receiver = CreateReceiver(media_desc->type(), receiver_id); - transceiver = CreateAndAddTransceiver(sender, receiver); - transceiver->internal()->set_direction( - RtpTransceiverDirection::kRecvOnly); - if (type == SdpType::kOffer) { - transceiver_stable_states_by_transceivers_[transceiver] - .set_newly_created(); - } - } - // Check if the offer indicated simulcast but the answer rejected it. - // This can happen when simulcast is not supported on the remote party. - if (SimulcastIsRejected(old_local_content, *media_desc)) { - RTC_HISTOGRAM_BOOLEAN(kSimulcastDisabled, true); - RTCError error = - DisableSimulcastInSender(transceiver->internal()->sender_internal()); - if (!error.ok()) { - RTC_LOG(LS_ERROR) << "Failed to remove rejected simulcast."; - return std::move(error); - } - } - } - RTC_DCHECK(transceiver); - if (transceiver->media_type() != media_desc->type()) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_PARAMETER, - "Transceiver type does not match media description type."); - } - if (media_desc->HasSimulcast()) { - std::vector layers = - source == cricket::CS_LOCAL - ? media_desc->simulcast_description().send_layers().GetAllLayers() - : media_desc->simulcast_description() - .receive_layers() - .GetAllLayers(); - RTCError error = UpdateSimulcastLayerStatusInSender( - layers, transceiver->internal()->sender_internal()); - if (!error.ok()) { - RTC_LOG(LS_ERROR) << "Failed updating status for simulcast layers."; - return std::move(error); - } - } - if (type == SdpType::kOffer) { - bool state_changes = transceiver->internal()->mid() != content.name || - transceiver->internal()->mline_index() != mline_index; - if (state_changes) { - transceiver_stable_states_by_transceivers_[transceiver] - .SetMSectionIfUnset(transceiver->internal()->mid(), - transceiver->internal()->mline_index()); - } - } - // Associate the found or created RtpTransceiver with the m= section by - // setting the value of the RtpTransceiver's mid property to the MID of the m= - // section, and establish a mapping between the transceiver and the index of - // the m= section. - transceiver->internal()->set_mid(content.name); - transceiver->internal()->set_mline_index(mline_index); - return std::move(transceiver); -} - -rtc::scoped_refptr> -PeerConnection::GetAssociatedTransceiver(const std::string& mid) const { - RTC_DCHECK(IsUnifiedPlan()); - for (auto transceiver : transceivers_) { - if (transceiver->mid() == mid) { - return transceiver; - } - } - return nullptr; -} - -rtc::scoped_refptr> -PeerConnection::GetTransceiverByMLineIndex(size_t mline_index) const { - RTC_DCHECK(IsUnifiedPlan()); - for (auto transceiver : transceivers_) { - if (transceiver->internal()->mline_index() == mline_index) { - return transceiver; - } - } - return nullptr; -} - -rtc::scoped_refptr> -PeerConnection::FindAvailableTransceiverToReceive( - cricket::MediaType media_type) const { - RTC_DCHECK(IsUnifiedPlan()); - // From JSEP section 5.10 (Applying a Remote Description): - // If the m= section is sendrecv or recvonly, and there are RtpTransceivers of - // the same type that were added to the PeerConnection by addTrack and are not - // associated with any m= section and are not stopped, find the first such - // RtpTransceiver. - for (auto transceiver : transceivers_) { - if (transceiver->media_type() == media_type && - transceiver->internal()->created_by_addtrack() && !transceiver->mid() && - !transceiver->stopped()) { - return transceiver; - } - } - return nullptr; -} - -const cricket::ContentInfo* PeerConnection::FindMediaSectionForTransceiver( - rtc::scoped_refptr> - transceiver, - const SessionDescriptionInterface* sdesc) const { - RTC_DCHECK(transceiver); - RTC_DCHECK(sdesc); - if (IsUnifiedPlan()) { - if (!transceiver->internal()->mid()) { - // This transceiver is not associated with a media section yet. - return nullptr; - } - return sdesc->description()->GetContentByName( - *transceiver->internal()->mid()); - } else { - // Plan B only allows at most one audio and one video section, so use the - // first media section of that type. - return cricket::GetFirstMediaContent(sdesc->description()->contents(), - transceiver->media_type()); - } -} - -PeerConnectionInterface::RTCConfiguration PeerConnection::GetConfiguration() { - RTC_DCHECK_RUN_ON(signaling_thread()); - return configuration_; -} - -RTCError PeerConnection::SetConfiguration( - const RTCConfiguration& configuration) { - RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::SetConfiguration"); - if (IsClosed()) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, - "SetConfiguration: PeerConnection is closed."); - } - - // According to JSEP, after setLocalDescription, changing the candidate pool - // size is not allowed, and changing the set of ICE servers will not result - // in new candidates being gathered. - if (local_description() && configuration.ice_candidate_pool_size != - configuration_.ice_candidate_pool_size) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, - "Can't change candidate pool size after calling " - "SetLocalDescription."); - } - - if (local_description() && - configuration.crypto_options != configuration_.crypto_options) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, - "Can't change crypto_options after calling " - "SetLocalDescription."); - } - - if (local_description() && configuration.use_datagram_transport != - configuration_.use_datagram_transport) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, - "Can't change use_datagram_transport " - "after calling SetLocalDescription."); - } - - if (remote_description() && configuration.use_datagram_transport != - configuration_.use_datagram_transport) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, - "Can't change use_datagram_transport " - "after calling SetRemoteDescription."); - } - - if (local_description() && - configuration.use_datagram_transport_for_data_channels != - configuration_.use_datagram_transport_for_data_channels) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_MODIFICATION, - "Can't change use_datagram_transport_for_data_channels " - "after calling SetLocalDescription."); - } - - if (remote_description() && - configuration.use_datagram_transport_for_data_channels != - configuration_.use_datagram_transport_for_data_channels) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_MODIFICATION, - "Can't change use_datagram_transport_for_data_channels " - "after calling SetRemoteDescription."); - } - - if (local_description() && - configuration.use_datagram_transport_for_data_channels_receive_only != - configuration_ - .use_datagram_transport_for_data_channels_receive_only) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_MODIFICATION, - "Can't change use_datagram_transport_for_data_channels_receive_only " - "after calling SetLocalDescription."); - } - - if (remote_description() && - configuration.use_datagram_transport_for_data_channels_receive_only != - configuration_ - .use_datagram_transport_for_data_channels_receive_only) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_MODIFICATION, - "Can't change use_datagram_transport_for_data_channels_receive_only " - "after calling SetRemoteDescription."); - } - - if ((configuration.use_datagram_transport && - *configuration.use_datagram_transport) || - (configuration.use_datagram_transport_for_data_channels && - *configuration.use_datagram_transport_for_data_channels)) { - RTC_CHECK(configuration.bundle_policy == kBundlePolicyMaxBundle) - << "Media transport requires MaxBundle policy."; - } - - // The simplest (and most future-compatible) way to tell if the config was - // modified in an invalid way is to copy each property we do support - // modifying, then use operator==. There are far more properties we don't - // support modifying than those we do, and more could be added. - RTCConfiguration modified_config = configuration_; - modified_config.servers = configuration.servers; - modified_config.type = configuration.type; - modified_config.ice_candidate_pool_size = - configuration.ice_candidate_pool_size; - modified_config.prune_turn_ports = configuration.prune_turn_ports; - modified_config.turn_port_prune_policy = configuration.turn_port_prune_policy; - modified_config.surface_ice_candidates_on_ice_transport_type_changed = - configuration.surface_ice_candidates_on_ice_transport_type_changed; - modified_config.ice_check_min_interval = configuration.ice_check_min_interval; - modified_config.ice_check_interval_strong_connectivity = - configuration.ice_check_interval_strong_connectivity; - modified_config.ice_check_interval_weak_connectivity = - configuration.ice_check_interval_weak_connectivity; - modified_config.ice_unwritable_timeout = configuration.ice_unwritable_timeout; - modified_config.ice_unwritable_min_checks = - configuration.ice_unwritable_min_checks; - modified_config.ice_inactive_timeout = configuration.ice_inactive_timeout; - modified_config.stun_candidate_keepalive_interval = - configuration.stun_candidate_keepalive_interval; - modified_config.turn_customizer = configuration.turn_customizer; - modified_config.network_preference = configuration.network_preference; - modified_config.active_reset_srtp_params = - configuration.active_reset_srtp_params; - modified_config.use_datagram_transport = configuration.use_datagram_transport; - modified_config.use_datagram_transport_for_data_channels = - configuration.use_datagram_transport_for_data_channels; - modified_config.use_datagram_transport_for_data_channels_receive_only = - configuration.use_datagram_transport_for_data_channels_receive_only; - modified_config.turn_logging_id = configuration.turn_logging_id; - modified_config.allow_codec_switching = configuration.allow_codec_switching; - if (configuration != modified_config) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_MODIFICATION, - "Modifying the configuration in an unsupported way."); - } - - // Validate the modified configuration. - RTCError validate_error = ValidateConfiguration(modified_config); - if (!validate_error.ok()) { - return validate_error; - } - - // Note that this isn't possible through chromium, since it's an unsigned - // short in WebIDL. - if (configuration.ice_candidate_pool_size < 0 || - configuration.ice_candidate_pool_size > static_cast(UINT16_MAX)) { - return RTCError(RTCErrorType::INVALID_RANGE); - } - - // Parse ICE servers before hopping to network thread. - cricket::ServerAddresses stun_servers; - std::vector turn_servers; - RTCErrorType parse_error = - ParseIceServers(configuration.servers, &stun_servers, &turn_servers); - if (parse_error != RTCErrorType::NONE) { - return RTCError(parse_error); - } - // Add the turn logging id to all turn servers - for (cricket::RelayServerConfig& turn_server : turn_servers) { - turn_server.turn_logging_id = configuration.turn_logging_id; - } - - // Note if STUN or TURN servers were supplied. - if (!stun_servers.empty()) { - NoteUsageEvent(UsageEvent::STUN_SERVER_ADDED); - } - if (!turn_servers.empty()) { - NoteUsageEvent(UsageEvent::TURN_SERVER_ADDED); - } - - // In theory this shouldn't fail. - if (!network_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::ReconfigurePortAllocator_n, this, - stun_servers, turn_servers, modified_config.type, - modified_config.ice_candidate_pool_size, - modified_config.GetTurnPortPrunePolicy(), - modified_config.turn_customizer, - modified_config.stun_candidate_keepalive_interval, - static_cast(local_description())))) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, - "Failed to apply configuration to PortAllocator."); - } - - // As described in JSEP, calling setConfiguration with new ICE servers or - // candidate policy must set a "needs-ice-restart" bit so that the next offer - // triggers an ICE restart which will pick up the changes. - if (modified_config.servers != configuration_.servers || - modified_config.type != configuration_.type || - modified_config.GetTurnPortPrunePolicy() != - configuration_.GetTurnPortPrunePolicy()) { - transport_controller_->SetNeedsIceRestartFlag(); - } - - transport_controller_->SetIceConfig(ParseIceConfig(modified_config)); - - use_datagram_transport_ = datagram_transport_config_.enabled && - modified_config.use_datagram_transport.value_or( - datagram_transport_config_.default_value); - use_datagram_transport_for_data_channels_ = - datagram_transport_data_channel_config_.enabled && - modified_config.use_datagram_transport_for_data_channels.value_or( - datagram_transport_data_channel_config_.default_value); - use_datagram_transport_for_data_channels_receive_only_ = - modified_config.use_datagram_transport_for_data_channels_receive_only - .value_or(datagram_transport_data_channel_config_.receive_only); - transport_controller_->SetMediaTransportSettings( - use_datagram_transport_, use_datagram_transport_for_data_channels_, - use_datagram_transport_for_data_channels_receive_only_); - - if (configuration_.active_reset_srtp_params != - modified_config.active_reset_srtp_params) { - transport_controller_->SetActiveResetSrtpParams( - modified_config.active_reset_srtp_params); - } - - if (modified_config.allow_codec_switching.has_value()) { - cricket::VideoMediaChannel* video_channel = video_media_channel(); - if (video_channel) { - video_channel->SetVideoCodecSwitchingEnabled( - *modified_config.allow_codec_switching); - } - } - - configuration_ = modified_config; - return RTCError::OK(); -} - -bool PeerConnection::AddIceCandidate( - const IceCandidateInterface* ice_candidate) { - RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::AddIceCandidate"); - if (IsClosed()) { - RTC_LOG(LS_ERROR) << "AddIceCandidate: PeerConnection is closed."; - NoteAddIceCandidateResult(kAddIceCandidateFailClosed); - return false; - } - - if (!remote_description()) { - RTC_LOG(LS_ERROR) << "AddIceCandidate: ICE candidates can't be added " - "without any remote session description."; - NoteAddIceCandidateResult(kAddIceCandidateFailNoRemoteDescription); - return false; - } - - if (!ice_candidate) { - RTC_LOG(LS_ERROR) << "AddIceCandidate: Candidate is null."; - NoteAddIceCandidateResult(kAddIceCandidateFailNullCandidate); - return false; - } - - bool valid = false; - bool ready = ReadyToUseRemoteCandidate(ice_candidate, nullptr, &valid); - if (!valid) { - NoteAddIceCandidateResult(kAddIceCandidateFailNotValid); - return false; - } - - // Add this candidate to the remote session description. - if (!mutable_remote_description()->AddCandidate(ice_candidate)) { - RTC_LOG(LS_ERROR) << "AddIceCandidate: Candidate cannot be used."; - NoteAddIceCandidateResult(kAddIceCandidateFailInAddition); - return false; - } - - if (ready) { - bool result = UseCandidate(ice_candidate); - if (result) { - NoteUsageEvent(UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED); - NoteAddIceCandidateResult(kAddIceCandidateSuccess); - } else { - NoteAddIceCandidateResult(kAddIceCandidateFailNotUsable); - } - return result; - } else { - RTC_LOG(LS_INFO) << "AddIceCandidate: Not ready to use candidate."; - NoteAddIceCandidateResult(kAddIceCandidateFailNotReady); - return true; - } -} - -void PeerConnection::AddIceCandidate( - std::unique_ptr candidate, - std::function callback) { - RTC_DCHECK_RUN_ON(signaling_thread()); - // Chain this operation. If asynchronous operations are pending on the chain, - // this operation will be queued to be invoked, otherwise the contents of the - // lambda will execute immediately. - operations_chain_->ChainOperation( - [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), - candidate = std::move(candidate), callback = std::move(callback)]( - std::function operations_chain_callback) { - if (!this_weak_ptr) { - operations_chain_callback(); - callback(RTCError( - RTCErrorType::INVALID_STATE, - "AddIceCandidate failed because the session was shut down")); - return; - } - if (!this_weak_ptr->AddIceCandidate(candidate.get())) { - operations_chain_callback(); - // Fail with an error type and message consistent with Chromium. - // TODO(hbos): Fail with error types according to spec. - callback(RTCError(RTCErrorType::UNSUPPORTED_OPERATION, - "Error processing ICE candidate")); - return; - } - operations_chain_callback(); - callback(RTCError::OK()); - }); -} - -bool PeerConnection::RemoveIceCandidates( - const std::vector& candidates) { - TRACE_EVENT0("webrtc", "PeerConnection::RemoveIceCandidates"); - RTC_DCHECK_RUN_ON(signaling_thread()); - if (IsClosed()) { - RTC_LOG(LS_ERROR) << "RemoveIceCandidates: PeerConnection is closed."; - return false; - } - - if (!remote_description()) { - RTC_LOG(LS_ERROR) << "RemoveIceCandidates: ICE candidates can't be removed " - "without any remote session description."; - return false; - } - - if (candidates.empty()) { - RTC_LOG(LS_ERROR) << "RemoveIceCandidates: candidates are empty."; - return false; - } - - size_t number_removed = - mutable_remote_description()->RemoveCandidates(candidates); - if (number_removed != candidates.size()) { - RTC_LOG(LS_ERROR) - << "RemoveIceCandidates: Failed to remove candidates. Requested " - << candidates.size() << " but only " << number_removed - << " are removed."; - } - - // Remove the candidates from the transport controller. - RTCError error = transport_controller_->RemoveRemoteCandidates(candidates); - if (!error.ok()) { - RTC_LOG(LS_ERROR) - << "RemoveIceCandidates: Error when removing remote candidates: " - << error.message(); - } - return true; -} - -RTCError PeerConnection::SetBitrate(const BitrateSettings& bitrate) { - if (!worker_thread()->IsCurrent()) { - return worker_thread()->Invoke( - RTC_FROM_HERE, [&]() { return SetBitrate(bitrate); }); - } - RTC_DCHECK_RUN_ON(worker_thread()); - - const bool has_min = bitrate.min_bitrate_bps.has_value(); - const bool has_start = bitrate.start_bitrate_bps.has_value(); - const bool has_max = bitrate.max_bitrate_bps.has_value(); - if (has_min && *bitrate.min_bitrate_bps < 0) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "min_bitrate_bps <= 0"); - } - if (has_start) { - if (has_min && *bitrate.start_bitrate_bps < *bitrate.min_bitrate_bps) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "start_bitrate_bps < min_bitrate_bps"); - } else if (*bitrate.start_bitrate_bps < 0) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "curent_bitrate_bps < 0"); - } - } - if (has_max) { - if (has_start && *bitrate.max_bitrate_bps < *bitrate.start_bitrate_bps) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "max_bitrate_bps < start_bitrate_bps"); - } else if (has_min && *bitrate.max_bitrate_bps < *bitrate.min_bitrate_bps) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "max_bitrate_bps < min_bitrate_bps"); - } else if (*bitrate.max_bitrate_bps < 0) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "max_bitrate_bps < 0"); - } - } - - RTC_DCHECK(call_.get()); - call_->SetClientBitratePreferences(bitrate); - - return RTCError::OK(); -} - -void PeerConnection::SetAudioPlayout(bool playout) { - if (!worker_thread()->IsCurrent()) { - worker_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::SetAudioPlayout, this, playout)); - return; - } - auto audio_state = - factory_->channel_manager()->media_engine()->voice().GetAudioState(); - audio_state->SetPlayout(playout); -} - -void PeerConnection::SetAudioRecording(bool recording) { - if (!worker_thread()->IsCurrent()) { - worker_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::SetAudioRecording, this, recording)); - return; - } - auto audio_state = - factory_->channel_manager()->media_engine()->voice().GetAudioState(); - audio_state->SetRecording(recording); -} - -std::unique_ptr -PeerConnection::GetRemoteAudioSSLCertificate() { - std::unique_ptr chain = GetRemoteAudioSSLCertChain(); - if (!chain || !chain->GetSize()) { - return nullptr; - } - return chain->Get(0).Clone(); -} - -std::unique_ptr -PeerConnection::GetRemoteAudioSSLCertChain() { - RTC_DCHECK_RUN_ON(signaling_thread()); - auto audio_transceiver = GetFirstAudioTransceiver(); - if (!audio_transceiver || !audio_transceiver->internal()->channel()) { - return nullptr; - } - return transport_controller_->GetRemoteSSLCertChain( - audio_transceiver->internal()->channel()->transport_name()); -} - -rtc::scoped_refptr> -PeerConnection::GetFirstAudioTransceiver() const { - for (auto transceiver : transceivers_) { - if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { - return transceiver; - } - } - return nullptr; -} - -bool PeerConnection::StartRtcEventLog(std::unique_ptr output, - int64_t output_period_ms) { - return worker_thread()->Invoke( - RTC_FROM_HERE, - [this, output = std::move(output), output_period_ms]() mutable { - return StartRtcEventLog_w(std::move(output), output_period_ms); - }); -} - -bool PeerConnection::StartRtcEventLog( - std::unique_ptr output) { - int64_t output_period_ms = webrtc::RtcEventLog::kImmediateOutput; - if (field_trial::IsEnabled("WebRTC-RtcEventLogNewFormat")) { - output_period_ms = 5000; - } - return StartRtcEventLog(std::move(output), output_period_ms); -} - -void PeerConnection::StopRtcEventLog() { - worker_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&PeerConnection::StopRtcEventLog_w, this)); -} - -rtc::scoped_refptr -PeerConnection::LookupDtlsTransportByMid(const std::string& mid) { - RTC_DCHECK_RUN_ON(signaling_thread()); - return transport_controller_->LookupDtlsTransportByMid(mid); -} - -rtc::scoped_refptr -PeerConnection::LookupDtlsTransportByMidInternal(const std::string& mid) { - RTC_DCHECK_RUN_ON(signaling_thread()); - return transport_controller_->LookupDtlsTransportByMid(mid); -} - -rtc::scoped_refptr PeerConnection::GetSctpTransport() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - if (!sctp_mid_s_) { - return nullptr; - } - return transport_controller_->GetSctpTransport(*sctp_mid_s_); -} - -const SessionDescriptionInterface* PeerConnection::local_description() const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return pending_local_description_ ? pending_local_description_.get() - : current_local_description_.get(); -} - -const SessionDescriptionInterface* PeerConnection::remote_description() const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return pending_remote_description_ ? pending_remote_description_.get() - : current_remote_description_.get(); -} - -const SessionDescriptionInterface* PeerConnection::current_local_description() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return current_local_description_.get(); -} - -const SessionDescriptionInterface* PeerConnection::current_remote_description() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return current_remote_description_.get(); -} - -const SessionDescriptionInterface* PeerConnection::pending_local_description() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return pending_local_description_.get(); -} - -const SessionDescriptionInterface* PeerConnection::pending_remote_description() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - return pending_remote_description_.get(); -} - -void PeerConnection::Close() { - RTC_DCHECK_RUN_ON(signaling_thread()); - TRACE_EVENT0("webrtc", "PeerConnection::Close"); - // Update stats here so that we have the most recent stats for tracks and - // streams before the channels are closed. - stats_->UpdateStats(kStatsOutputLevelStandard); - - ChangeSignalingState(PeerConnectionInterface::kClosed); - NoteUsageEvent(UsageEvent::CLOSE_CALLED); - - for (const auto& transceiver : transceivers_) { - transceiver->Stop(); - } - - // Ensure that all asynchronous stats requests are completed before destroying - // the transport controller below. - if (stats_collector_) { - stats_collector_->WaitForPendingRequest(); - } - - // Don't destroy BaseChannels until after stats has been cleaned up so that - // the last stats request can still read from the channels. - DestroyAllChannels(); - - // The event log is used in the transport controller, which must be outlived - // by the former. CreateOffer by the peer connection is implemented - // asynchronously and if the peer connection is closed without resetting the - // WebRTC session description factory, the session description factory would - // call the transport controller. - webrtc_session_desc_factory_.reset(); - transport_controller_.reset(); - - network_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&cricket::PortAllocator::DiscardCandidatePool, - port_allocator_.get())); - - worker_thread()->Invoke(RTC_FROM_HERE, [this] { - RTC_DCHECK_RUN_ON(worker_thread()); - call_.reset(); - // The event log must outlive call (and any other object that uses it). - event_log_.reset(); - }); - ReportUsagePattern(); - // The .h file says that observer can be discarded after close() returns. - // Make sure this is true. - observer_ = nullptr; -} - -void PeerConnection::OnMessage(rtc::Message* msg) { - RTC_DCHECK_RUN_ON(signaling_thread()); - switch (msg->message_id) { - case MSG_SET_SESSIONDESCRIPTION_SUCCESS: { - SetSessionDescriptionMsg* param = - static_cast(msg->pdata); - param->observer->OnSuccess(); - delete param; - break; - } - case MSG_SET_SESSIONDESCRIPTION_FAILED: { - SetSessionDescriptionMsg* param = - static_cast(msg->pdata); - param->observer->OnFailure(std::move(param->error)); - delete param; - break; - } - case MSG_CREATE_SESSIONDESCRIPTION_FAILED: { - CreateSessionDescriptionMsg* param = - static_cast(msg->pdata); - param->observer->OnFailure(std::move(param->error)); - delete param; - break; - } - case MSG_GETSTATS: { - GetStatsMsg* param = static_cast(msg->pdata); - StatsReports reports; - stats_->GetStats(param->track, &reports); - param->observer->OnComplete(reports); - delete param; - break; - } - case MSG_REPORT_USAGE_PATTERN: { - ReportUsagePattern(); - break; - } - default: - RTC_NOTREACHED() << "Not implemented"; - break; - } -} - -cricket::VoiceMediaChannel* PeerConnection::voice_media_channel() const { - RTC_DCHECK(!IsUnifiedPlan()); - auto* voice_channel = static_cast( - GetAudioTransceiver()->internal()->channel()); - if (voice_channel) { - return voice_channel->media_channel(); - } else { - return nullptr; - } -} - -cricket::VideoMediaChannel* PeerConnection::video_media_channel() const { - RTC_DCHECK(!IsUnifiedPlan()); - auto* video_channel = static_cast( - GetVideoTransceiver()->internal()->channel()); - if (video_channel) { - return video_channel->media_channel(); - } else { - return nullptr; - } -} - -void PeerConnection::CreateAudioReceiver( - MediaStreamInterface* stream, - const RtpSenderInfo& remote_sender_info) { - std::vector> streams; - streams.push_back(rtc::scoped_refptr(stream)); - // TODO(https://crbug.com/webrtc/9480): When we remove remote_streams(), use - // the constructor taking stream IDs instead. - auto* audio_receiver = new AudioRtpReceiver( - worker_thread(), remote_sender_info.sender_id, streams); - audio_receiver->SetMediaChannel(voice_media_channel()); - if (remote_sender_info.sender_id == kDefaultAudioSenderId) { - audio_receiver->SetupUnsignaledMediaChannel(); - } else { - audio_receiver->SetupMediaChannel(remote_sender_info.first_ssrc); - } - auto receiver = RtpReceiverProxyWithInternal::Create( - signaling_thread(), audio_receiver); - GetAudioTransceiver()->internal()->AddReceiver(receiver); - Observer()->OnAddTrack(receiver, streams); - NoteUsageEvent(UsageEvent::AUDIO_ADDED); -} - -void PeerConnection::CreateVideoReceiver( - MediaStreamInterface* stream, - const RtpSenderInfo& remote_sender_info) { - std::vector> streams; - streams.push_back(rtc::scoped_refptr(stream)); - // TODO(https://crbug.com/webrtc/9480): When we remove remote_streams(), use - // the constructor taking stream IDs instead. - auto* video_receiver = new VideoRtpReceiver( - worker_thread(), remote_sender_info.sender_id, streams); - video_receiver->SetMediaChannel(video_media_channel()); - if (remote_sender_info.sender_id == kDefaultVideoSenderId) { - video_receiver->SetupUnsignaledMediaChannel(); - } else { - video_receiver->SetupMediaChannel(remote_sender_info.first_ssrc); - } - auto receiver = RtpReceiverProxyWithInternal::Create( - signaling_thread(), video_receiver); - GetVideoTransceiver()->internal()->AddReceiver(receiver); - Observer()->OnAddTrack(receiver, streams); - NoteUsageEvent(UsageEvent::VIDEO_ADDED); -} - -// TODO(deadbeef): Keep RtpReceivers around even if track goes away in remote -// description. -rtc::scoped_refptr PeerConnection::RemoveAndStopReceiver( - const RtpSenderInfo& remote_sender_info) { - auto receiver = FindReceiverById(remote_sender_info.sender_id); - if (!receiver) { - RTC_LOG(LS_WARNING) << "RtpReceiver for track with id " - << remote_sender_info.sender_id << " doesn't exist."; - return nullptr; - } - if (receiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { - GetAudioTransceiver()->internal()->RemoveReceiver(receiver); - } else { - GetVideoTransceiver()->internal()->RemoveReceiver(receiver); - } - return receiver; -} - -void PeerConnection::AddAudioTrack(AudioTrackInterface* track, - MediaStreamInterface* stream) { - RTC_DCHECK(!IsClosed()); - RTC_DCHECK(track); - RTC_DCHECK(stream); - auto sender = FindSenderForTrack(track); - if (sender) { - // We already have a sender for this track, so just change the stream_id - // so that it's correct in the next call to CreateOffer. - sender->internal()->set_stream_ids({stream->id()}); - return; - } - - // Normal case; we've never seen this track before. - auto new_sender = CreateSender(cricket::MEDIA_TYPE_AUDIO, track->id(), track, - {stream->id()}, {}); - new_sender->internal()->SetMediaChannel(voice_media_channel()); - GetAudioTransceiver()->internal()->AddSender(new_sender); - // If the sender has already been configured in SDP, we call SetSsrc, - // which will connect the sender to the underlying transport. This can - // occur if a local session description that contains the ID of the sender - // is set before AddStream is called. It can also occur if the local - // session description is not changed and RemoveStream is called, and - // later AddStream is called again with the same stream. - const RtpSenderInfo* sender_info = - FindSenderInfo(local_audio_sender_infos_, stream->id(), track->id()); - if (sender_info) { - new_sender->internal()->SetSsrc(sender_info->first_ssrc); - } -} - -// TODO(deadbeef): Don't destroy RtpSenders here; they should be kept around -// indefinitely, when we have unified plan SDP. -void PeerConnection::RemoveAudioTrack(AudioTrackInterface* track, - MediaStreamInterface* stream) { - RTC_DCHECK(!IsClosed()); - auto sender = FindSenderForTrack(track); - if (!sender) { - RTC_LOG(LS_WARNING) << "RtpSender for track with id " << track->id() - << " doesn't exist."; - return; - } - GetAudioTransceiver()->internal()->RemoveSender(sender); -} - -void PeerConnection::AddVideoTrack(VideoTrackInterface* track, - MediaStreamInterface* stream) { - RTC_DCHECK(!IsClosed()); - RTC_DCHECK(track); - RTC_DCHECK(stream); - auto sender = FindSenderForTrack(track); - if (sender) { - // We already have a sender for this track, so just change the stream_id - // so that it's correct in the next call to CreateOffer. - sender->internal()->set_stream_ids({stream->id()}); - return; - } - - // Normal case; we've never seen this track before. - auto new_sender = CreateSender(cricket::MEDIA_TYPE_VIDEO, track->id(), track, - {stream->id()}, {}); - new_sender->internal()->SetMediaChannel(video_media_channel()); - GetVideoTransceiver()->internal()->AddSender(new_sender); - const RtpSenderInfo* sender_info = - FindSenderInfo(local_video_sender_infos_, stream->id(), track->id()); - if (sender_info) { - new_sender->internal()->SetSsrc(sender_info->first_ssrc); - } -} - -void PeerConnection::RemoveVideoTrack(VideoTrackInterface* track, - MediaStreamInterface* stream) { - RTC_DCHECK(!IsClosed()); - auto sender = FindSenderForTrack(track); - if (!sender) { - RTC_LOG(LS_WARNING) << "RtpSender for track with id " << track->id() - << " doesn't exist."; - return; - } - GetVideoTransceiver()->internal()->RemoveSender(sender); -} - -void PeerConnection::SetIceConnectionState(IceConnectionState new_state) { - if (ice_connection_state_ == new_state) { - return; - } - - // After transitioning to "closed", ignore any additional states from - // TransportController (such as "disconnected"). - if (IsClosed()) { - return; - } - - RTC_LOG(LS_INFO) << "Changing IceConnectionState " << ice_connection_state_ - << " => " << new_state; - RTC_DCHECK(ice_connection_state_ != - PeerConnectionInterface::kIceConnectionClosed); - - ice_connection_state_ = new_state; - Observer()->OnIceConnectionChange(ice_connection_state_); -} - -void PeerConnection::SetStandardizedIceConnectionState( - PeerConnectionInterface::IceConnectionState new_state) { - if (standardized_ice_connection_state_ == new_state) { - return; - } - - if (IsClosed()) { - return; - } - - RTC_LOG(LS_INFO) << "Changing standardized IceConnectionState " - << standardized_ice_connection_state_ << " => " << new_state; - - standardized_ice_connection_state_ = new_state; - Observer()->OnStandardizedIceConnectionChange(new_state); -} - -void PeerConnection::SetConnectionState( - PeerConnectionInterface::PeerConnectionState new_state) { - if (connection_state_ == new_state) - return; - if (IsClosed()) - return; - connection_state_ = new_state; - Observer()->OnConnectionChange(new_state); -} - -void PeerConnection::OnIceGatheringChange( - PeerConnectionInterface::IceGatheringState new_state) { - if (IsClosed()) { - return; - } - ice_gathering_state_ = new_state; - Observer()->OnIceGatheringChange(ice_gathering_state_); -} - -void PeerConnection::OnIceCandidate( - std::unique_ptr candidate) { - if (IsClosed()) { - return; - } - ReportIceCandidateCollected(candidate->candidate()); - Observer()->OnIceCandidate(candidate.get()); -} - -void PeerConnection::OnIceCandidateError(const std::string& address, - int port, - const std::string& url, - int error_code, - const std::string& error_text) { - if (IsClosed()) { - return; - } - Observer()->OnIceCandidateError(address, port, url, error_code, error_text); - // Leftover not to break wpt test during migration to the new API. - Observer()->OnIceCandidateError(address + ":", url, error_code, error_text); -} - -void PeerConnection::OnIceCandidatesRemoved( - const std::vector& candidates) { - if (IsClosed()) { - return; - } - Observer()->OnIceCandidatesRemoved(candidates); -} - -void PeerConnection::OnSelectedCandidatePairChanged( - const cricket::CandidatePairChangeEvent& event) { - if (IsClosed()) { - return; - } - - if (event.selected_candidate_pair.local_candidate().type() == - LOCAL_PORT_TYPE && - event.selected_candidate_pair.remote_candidate().type() == - LOCAL_PORT_TYPE) { - NoteUsageEvent(UsageEvent::DIRECT_CONNECTION_SELECTED); - } - - Observer()->OnIceSelectedCandidatePairChanged(event); -} - -void PeerConnection::ChangeSignalingState( - PeerConnectionInterface::SignalingState signaling_state) { - if (signaling_state_ == signaling_state) { - return; - } - RTC_LOG(LS_INFO) << "Session: " << session_id() << " Old state: " - << GetSignalingStateString(signaling_state_) - << " New state: " - << GetSignalingStateString(signaling_state); - signaling_state_ = signaling_state; - if (signaling_state == kClosed) { - ice_connection_state_ = kIceConnectionClosed; - Observer()->OnIceConnectionChange(ice_connection_state_); - standardized_ice_connection_state_ = - PeerConnectionInterface::IceConnectionState::kIceConnectionClosed; - connection_state_ = PeerConnectionInterface::PeerConnectionState::kClosed; - Observer()->OnConnectionChange(connection_state_); - } - Observer()->OnSignalingChange(signaling_state_); -} - -void PeerConnection::OnAudioTrackAdded(AudioTrackInterface* track, - MediaStreamInterface* stream) { - if (IsClosed()) { +void PeerConnection::SetAudioPlayout(bool playout) { + if (!worker_thread()->IsCurrent()) { + worker_thread()->Invoke( + RTC_FROM_HERE, [this, playout] { SetAudioPlayout(playout); }); return; } - AddAudioTrack(track, stream); - UpdateNegotiationNeeded(); + auto audio_state = + context_->channel_manager()->media_engine()->voice().GetAudioState(); + audio_state->SetPlayout(playout); } -void PeerConnection::OnAudioTrackRemoved(AudioTrackInterface* track, - MediaStreamInterface* stream) { - if (IsClosed()) { +void PeerConnection::SetAudioRecording(bool recording) { + if (!worker_thread()->IsCurrent()) { + worker_thread()->Invoke( + RTC_FROM_HERE, [this, recording] { SetAudioRecording(recording); }); return; } - RemoveAudioTrack(track, stream); - UpdateNegotiationNeeded(); + auto audio_state = + context_->channel_manager()->media_engine()->voice().GetAudioState(); + audio_state->SetRecording(recording); } -void PeerConnection::OnVideoTrackAdded(VideoTrackInterface* track, - MediaStreamInterface* stream) { - if (IsClosed()) { - return; +void PeerConnection::AddAdaptationResource( + rtc::scoped_refptr resource) { + if (!worker_thread()->IsCurrent()) { + return worker_thread()->Invoke(RTC_FROM_HERE, [this, resource]() { + return AddAdaptationResource(resource); + }); } - AddVideoTrack(track, stream); - UpdateNegotiationNeeded(); -} - -void PeerConnection::OnVideoTrackRemoved(VideoTrackInterface* track, - MediaStreamInterface* stream) { - if (IsClosed()) { + RTC_DCHECK_RUN_ON(worker_thread()); + if (!call_) { + // The PeerConnection has been closed. return; } - RemoveVideoTrack(track, stream); - UpdateNegotiationNeeded(); + call_->AddAdaptationResource(resource); } -void PeerConnection::PostSetSessionDescriptionSuccess( - SetSessionDescriptionObserver* observer) { - SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer); - signaling_thread()->Post(RTC_FROM_HERE, this, - MSG_SET_SESSIONDESCRIPTION_SUCCESS, msg); +bool PeerConnection::StartRtcEventLog(std::unique_ptr output, + int64_t output_period_ms) { + return worker_thread()->Invoke( + RTC_FROM_HERE, + [this, output = std::move(output), output_period_ms]() mutable { + return StartRtcEventLog_w(std::move(output), output_period_ms); + }); } -void PeerConnection::PostSetSessionDescriptionFailure( - SetSessionDescriptionObserver* observer, - RTCError&& error) { - RTC_DCHECK(!error.ok()); - SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer); - msg->error = std::move(error); - signaling_thread()->Post(RTC_FROM_HERE, this, - MSG_SET_SESSIONDESCRIPTION_FAILED, msg); +bool PeerConnection::StartRtcEventLog( + std::unique_ptr output) { + int64_t output_period_ms = webrtc::RtcEventLog::kImmediateOutput; + if (absl::StartsWith(context_->trials().Lookup("WebRTC-RtcEventLogNewFormat"), + "Enabled")) { + output_period_ms = 5000; + } + return StartRtcEventLog(std::move(output), output_period_ms); } -void PeerConnection::PostCreateSessionDescriptionFailure( - CreateSessionDescriptionObserver* observer, - RTCError error) { - RTC_DCHECK(!error.ok()); - CreateSessionDescriptionMsg* msg = new CreateSessionDescriptionMsg(observer); - msg->error = std::move(error); - signaling_thread()->Post(RTC_FROM_HERE, this, - MSG_CREATE_SESSIONDESCRIPTION_FAILED, msg); +void PeerConnection::StopRtcEventLog() { + worker_thread()->Invoke(RTC_FROM_HERE, [this] { StopRtcEventLog_w(); }); } -void PeerConnection::GetOptionsForOffer( - const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) { - ExtractSharedMediaSessionOptions(offer_answer_options, session_options); - - if (IsUnifiedPlan()) { - GetOptionsForUnifiedPlanOffer(offer_answer_options, session_options); - } else { - GetOptionsForPlanBOffer(offer_answer_options, session_options); - } - - // Intentionally unset the data channel type for RTP data channel with the - // second condition. Otherwise the RTP data channels would be successfully - // negotiated by default and the unit tests in WebRtcDataBrowserTest will fail - // when building with chromium. We want to leave RTP data channels broken, so - // people won't try to use them. - if (data_channel_controller_.HasRtpDataChannels() || - data_channel_type() != cricket::DCT_RTP) { - session_options->data_channel_type = data_channel_type(); - } - - // Apply ICE restart flag and renomination flag. - bool ice_restart = offer_answer_options.ice_restart || - local_ice_credentials_to_replace_->HasIceCredentials(); - for (auto& options : session_options->media_description_options) { - options.transport_options.ice_restart = ice_restart; - options.transport_options.enable_ice_renomination = - configuration_.enable_ice_renomination; - } - - session_options->rtcp_cname = rtcp_cname_; - session_options->crypto_options = GetCryptoOptions(); - session_options->pooled_ice_credentials = - network_thread()->Invoke>( - RTC_FROM_HERE, - rtc::Bind(&cricket::PortAllocator::GetPooledIceCredentials, - port_allocator_.get())); - session_options->offer_extmap_allow_mixed = - configuration_.offer_extmap_allow_mixed; - - // If datagram transport is in use, add opaque transport parameters. - if (use_datagram_transport_ || use_datagram_transport_for_data_channels_) { - for (auto& options : session_options->media_description_options) { - absl::optional params = - transport_controller_->GetTransportParameters(options.mid); - if (!params) { - continue; - } - options.transport_options.opaque_parameters = params; - if ((use_datagram_transport_ && - (options.type == cricket::MEDIA_TYPE_AUDIO || - options.type == cricket::MEDIA_TYPE_VIDEO)) || - (use_datagram_transport_for_data_channels_ && - options.type == cricket::MEDIA_TYPE_DATA)) { - options.alt_protocol = params->protocol; - } - } - } - - // Allow fallback for using obsolete SCTP syntax. - // Note that the default in |session_options| is true, while - // the default in |options| is false. - session_options->use_obsolete_sctp_sdp = - offer_answer_options.use_obsolete_sctp_sdp; -} - -void PeerConnection::GetOptionsForPlanBOffer( - const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) { - // Figure out transceiver directional preferences. - bool send_audio = HasRtpSender(cricket::MEDIA_TYPE_AUDIO); - bool send_video = HasRtpSender(cricket::MEDIA_TYPE_VIDEO); - - // By default, generate sendrecv/recvonly m= sections. - bool recv_audio = true; - bool recv_video = true; - - // By default, only offer a new m= section if we have media to send with it. - bool offer_new_audio_description = send_audio; - bool offer_new_video_description = send_video; - bool offer_new_data_description = data_channel_controller_.HasDataChannels(); - - // The "offer_to_receive_X" options allow those defaults to be overridden. - if (offer_answer_options.offer_to_receive_audio != - RTCOfferAnswerOptions::kUndefined) { - recv_audio = (offer_answer_options.offer_to_receive_audio > 0); - offer_new_audio_description = - offer_new_audio_description || - (offer_answer_options.offer_to_receive_audio > 0); - } - if (offer_answer_options.offer_to_receive_video != - RTCOfferAnswerOptions::kUndefined) { - recv_video = (offer_answer_options.offer_to_receive_video > 0); - offer_new_video_description = - offer_new_video_description || - (offer_answer_options.offer_to_receive_video > 0); - } - - absl::optional audio_index; - absl::optional video_index; - absl::optional data_index; - // If a current description exists, generate m= sections in the same order, - // using the first audio/video/data section that appears and rejecting - // extraneous ones. - if (local_description()) { - GenerateMediaDescriptionOptions( - local_description(), - RtpTransceiverDirectionFromSendRecv(send_audio, recv_audio), - RtpTransceiverDirectionFromSendRecv(send_video, recv_video), - &audio_index, &video_index, &data_index, session_options); - } - - // Add audio/video/data m= sections to the end if needed. - if (!audio_index && offer_new_audio_description) { - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions( - cricket::MEDIA_TYPE_AUDIO, cricket::CN_AUDIO, - RtpTransceiverDirectionFromSendRecv(send_audio, recv_audio), - false)); - - audio_index = session_options->media_description_options.size() - 1; - } - if (!video_index && offer_new_video_description) { - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions( - cricket::MEDIA_TYPE_VIDEO, cricket::CN_VIDEO, - RtpTransceiverDirectionFromSendRecv(send_video, recv_video), - false)); - - video_index = session_options->media_description_options.size() - 1; - } - if (!data_index && offer_new_data_description) { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForActiveData(cricket::CN_DATA)); - data_index = session_options->media_description_options.size() - 1; - } - - cricket::MediaDescriptionOptions* audio_media_description_options = - !audio_index ? nullptr - : &session_options->media_description_options[*audio_index]; - cricket::MediaDescriptionOptions* video_media_description_options = - !video_index ? nullptr - : &session_options->media_description_options[*video_index]; - - AddPlanBRtpSenderOptions(GetSendersInternal(), - audio_media_description_options, - video_media_description_options, - offer_answer_options.num_simulcast_layers); -} - -static cricket::MediaDescriptionOptions -GetMediaDescriptionOptionsForTransceiver( - rtc::scoped_refptr> - transceiver, - const std::string& mid) { - cricket::MediaDescriptionOptions media_description_options( - transceiver->media_type(), mid, transceiver->direction(), - transceiver->stopped()); - media_description_options.codec_preferences = - transceiver->codec_preferences(); - // This behavior is specified in JSEP. The gist is that: - // 1. The MSID is included if the RtpTransceiver's direction is sendonly or - // sendrecv. - // 2. If the MSID is included, then it must be included in any subsequent - // offer/answer exactly the same until the RtpTransceiver is stopped. - if (transceiver->stopped() || - (!RtpTransceiverDirectionHasSend(transceiver->direction()) && - !transceiver->internal()->has_ever_been_used_to_send())) { - return media_description_options; - } - - cricket::SenderOptions sender_options; - sender_options.track_id = transceiver->sender()->id(); - sender_options.stream_ids = transceiver->sender()->stream_ids(); - - // The following sets up RIDs and Simulcast. - // RIDs are included if Simulcast is requested or if any RID was specified. - RtpParameters send_parameters = - transceiver->internal()->sender_internal()->GetParametersInternal(); - bool has_rids = std::any_of(send_parameters.encodings.begin(), - send_parameters.encodings.end(), - [](const RtpEncodingParameters& encoding) { - return !encoding.rid.empty(); - }); - - std::vector send_rids; - SimulcastLayerList send_layers; - for (const RtpEncodingParameters& encoding : send_parameters.encodings) { - if (encoding.rid.empty()) { - continue; - } - send_rids.push_back(RidDescription(encoding.rid, RidDirection::kSend)); - send_layers.AddLayer(SimulcastLayer(encoding.rid, !encoding.active)); - } - - if (has_rids) { - sender_options.rids = send_rids; - } - - sender_options.simulcast_layers = send_layers; - // When RIDs are configured, we must set num_sim_layers to 0 to. - // Otherwise, num_sim_layers must be 1 because either there is no - // simulcast, or simulcast is acheived by munging the SDP. - sender_options.num_sim_layers = has_rids ? 0 : 1; - media_description_options.sender_options.push_back(sender_options); +rtc::scoped_refptr +PeerConnection::LookupDtlsTransportByMid(const std::string& mid) { + RTC_DCHECK_RUN_ON(network_thread()); + return transport_controller_->LookupDtlsTransportByMid(mid); +} - return media_description_options; +rtc::scoped_refptr +PeerConnection::LookupDtlsTransportByMidInternal(const std::string& mid) { + RTC_DCHECK_RUN_ON(signaling_thread()); + return transport_controller_->LookupDtlsTransportByMid(mid); } -// Returns the ContentInfo at mline index |i|, or null if none exists. -static const ContentInfo* GetContentByIndex( - const SessionDescriptionInterface* sdesc, - size_t i) { - if (!sdesc) { +rtc::scoped_refptr PeerConnection::GetSctpTransport() + const { + RTC_DCHECK_RUN_ON(network_thread()); + if (!sctp_mid_n_) return nullptr; - } - const ContentInfos& contents = sdesc->description()->contents(); - return (i < contents.size() ? &contents[i] : nullptr); -} - -void PeerConnection::GetOptionsForUnifiedPlanOffer( - const RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) { - // Rules for generating an offer are dictated by JSEP sections 5.2.1 (Initial - // Offers) and 5.2.2 (Subsequent Offers). - RTC_DCHECK_EQ(session_options->media_description_options.size(), 0); - const ContentInfos no_infos; - const ContentInfos& local_contents = - (local_description() ? local_description()->description()->contents() - : no_infos); - const ContentInfos& remote_contents = - (remote_description() ? remote_description()->description()->contents() - : no_infos); - // The mline indices that can be recycled. New transceivers should reuse these - // slots first. - std::queue recycleable_mline_indices; - // First, go through each media section that exists in either the local or - // remote description and generate a media section in this offer for the - // associated transceiver. If a media section can be recycled, generate a - // default, rejected media section here that can be later overwritten. - for (size_t i = 0; - i < std::max(local_contents.size(), remote_contents.size()); ++i) { - // Either |local_content| or |remote_content| is non-null. - const ContentInfo* local_content = - (i < local_contents.size() ? &local_contents[i] : nullptr); - const ContentInfo* current_local_content = - GetContentByIndex(current_local_description(), i); - const ContentInfo* remote_content = - (i < remote_contents.size() ? &remote_contents[i] : nullptr); - const ContentInfo* current_remote_content = - GetContentByIndex(current_remote_description(), i); - bool had_been_rejected = - (current_local_content && current_local_content->rejected) || - (current_remote_content && current_remote_content->rejected); - const std::string& mid = - (local_content ? local_content->name : remote_content->name); - cricket::MediaType media_type = - (local_content ? local_content->media_description()->type() - : remote_content->media_description()->type()); - if (media_type == cricket::MEDIA_TYPE_AUDIO || - media_type == cricket::MEDIA_TYPE_VIDEO) { - auto transceiver = GetAssociatedTransceiver(mid); - RTC_CHECK(transceiver); - // A media section is considered eligible for recycling if it is marked as - // rejected in either the current local or current remote description. - if (had_been_rejected && transceiver->stopped()) { - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions(transceiver->media_type(), mid, - RtpTransceiverDirection::kInactive, - /*stopped=*/true)); - recycleable_mline_indices.push(i); - } else { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForTransceiver(transceiver, mid)); - // CreateOffer shouldn't really cause any state changes in - // PeerConnection, but we need a way to match new transceivers to new - // media sections in SetLocalDescription and JSEP specifies this is done - // by recording the index of the media section generated for the - // transceiver in the offer. - transceiver->internal()->set_mline_index(i); - } - } else { - RTC_CHECK_EQ(cricket::MEDIA_TYPE_DATA, media_type); - if (had_been_rejected) { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForRejectedData(mid)); - } else { - RTC_CHECK(GetDataMid()); - if (mid == *GetDataMid()) { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForActiveData(mid)); - } else { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForRejectedData(mid)); - } - } - } - } - // Next, look for transceivers that are newly added (that is, are not stopped - // and not associated). Reuse media sections marked as recyclable first, - // otherwise append to the end of the offer. New media sections should be - // added in the order they were added to the PeerConnection. - for (const auto& transceiver : transceivers_) { - if (transceiver->mid() || transceiver->stopped()) { - continue; - } - size_t mline_index; - if (!recycleable_mline_indices.empty()) { - mline_index = recycleable_mline_indices.front(); - recycleable_mline_indices.pop(); - session_options->media_description_options[mline_index] = - GetMediaDescriptionOptionsForTransceiver(transceiver, - mid_generator_()); - } else { - mline_index = session_options->media_description_options.size(); - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForTransceiver(transceiver, - mid_generator_())); - } - // See comment above for why CreateOffer changes the transceiver's state. - transceiver->internal()->set_mline_index(mline_index); - } - // Lastly, add a m-section if we have local data channels and an m section - // does not already exist. - if (!GetDataMid() && data_channel_controller_.HasDataChannels()) { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForActiveData(mid_generator_())); - } + return transport_controller_->GetSctpTransport(*sctp_mid_n_); } -void PeerConnection::GetOptionsForAnswer( - const RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) { - ExtractSharedMediaSessionOptions(offer_answer_options, session_options); - - if (IsUnifiedPlan()) { - GetOptionsForUnifiedPlanAnswer(offer_answer_options, session_options); - } else { - GetOptionsForPlanBAnswer(offer_answer_options, session_options); - } - - // Intentionally unset the data channel type for RTP data channel. Otherwise - // the RTP data channels would be successfully negotiated by default and the - // unit tests in WebRtcDataBrowserTest will fail when building with chromium. - // We want to leave RTP data channels broken, so people won't try to use them. - if (data_channel_controller_.HasRtpDataChannels() || - data_channel_type() != cricket::DCT_RTP) { - session_options->data_channel_type = data_channel_type(); - } - - // Apply ICE renomination flag. - for (auto& options : session_options->media_description_options) { - options.transport_options.enable_ice_renomination = - configuration_.enable_ice_renomination; - } - - session_options->rtcp_cname = rtcp_cname_; - session_options->crypto_options = GetCryptoOptions(); - session_options->pooled_ice_credentials = - network_thread()->Invoke>( - RTC_FROM_HERE, - rtc::Bind(&cricket::PortAllocator::GetPooledIceCredentials, - port_allocator_.get())); - - // If datagram transport is in use, add opaque transport parameters. - if (use_datagram_transport_ || use_datagram_transport_for_data_channels_) { - for (auto& options : session_options->media_description_options) { - absl::optional params = - transport_controller_->GetTransportParameters(options.mid); - if (!params) { - continue; - } - options.transport_options.opaque_parameters = params; - if ((use_datagram_transport_ && - (options.type == cricket::MEDIA_TYPE_AUDIO || - options.type == cricket::MEDIA_TYPE_VIDEO)) || - (use_datagram_transport_for_data_channels_ && - options.type == cricket::MEDIA_TYPE_DATA)) { - options.alt_protocol = params->protocol; - } - } - } +const SessionDescriptionInterface* PeerConnection::local_description() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sdp_handler_->local_description(); } -void PeerConnection::GetOptionsForPlanBAnswer( - const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) { - // Figure out transceiver directional preferences. - bool send_audio = HasRtpSender(cricket::MEDIA_TYPE_AUDIO); - bool send_video = HasRtpSender(cricket::MEDIA_TYPE_VIDEO); - - // By default, generate sendrecv/recvonly m= sections. The direction is also - // restricted by the direction in the offer. - bool recv_audio = true; - bool recv_video = true; - - // The "offer_to_receive_X" options allow those defaults to be overridden. - if (offer_answer_options.offer_to_receive_audio != - RTCOfferAnswerOptions::kUndefined) { - recv_audio = (offer_answer_options.offer_to_receive_audio > 0); - } - if (offer_answer_options.offer_to_receive_video != - RTCOfferAnswerOptions::kUndefined) { - recv_video = (offer_answer_options.offer_to_receive_video > 0); - } - - absl::optional audio_index; - absl::optional video_index; - absl::optional data_index; - - // Generate m= sections that match those in the offer. - // Note that mediasession.cc will handle intersection our preferred - // direction with the offered direction. - GenerateMediaDescriptionOptions( - remote_description(), - RtpTransceiverDirectionFromSendRecv(send_audio, recv_audio), - RtpTransceiverDirectionFromSendRecv(send_video, recv_video), &audio_index, - &video_index, &data_index, session_options); - - cricket::MediaDescriptionOptions* audio_media_description_options = - !audio_index ? nullptr - : &session_options->media_description_options[*audio_index]; - cricket::MediaDescriptionOptions* video_media_description_options = - !video_index ? nullptr - : &session_options->media_description_options[*video_index]; - - AddPlanBRtpSenderOptions(GetSendersInternal(), - audio_media_description_options, - video_media_description_options, - offer_answer_options.num_simulcast_layers); -} - -void PeerConnection::GetOptionsForUnifiedPlanAnswer( - const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) { - // Rules for generating an answer are dictated by JSEP sections 5.3.1 (Initial - // Answers) and 5.3.2 (Subsequent Answers). - RTC_DCHECK(remote_description()); - RTC_DCHECK(remote_description()->GetType() == SdpType::kOffer); - for (const ContentInfo& content : - remote_description()->description()->contents()) { - cricket::MediaType media_type = content.media_description()->type(); - if (media_type == cricket::MEDIA_TYPE_AUDIO || - media_type == cricket::MEDIA_TYPE_VIDEO) { - auto transceiver = GetAssociatedTransceiver(content.name); - RTC_CHECK(transceiver); - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForTransceiver(transceiver, content.name)); - } else { - RTC_CHECK_EQ(cricket::MEDIA_TYPE_DATA, media_type); - // Reject all data sections if data channels are disabled. - // Reject a data section if it has already been rejected. - // Reject all data sections except for the first one. - if (data_channel_type() == cricket::DCT_NONE || content.rejected || - content.name != *GetDataMid()) { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForRejectedData(content.name)); - } else { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForActiveData(content.name)); - } - } - } +const SessionDescriptionInterface* PeerConnection::remote_description() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sdp_handler_->remote_description(); } -void PeerConnection::GenerateMediaDescriptionOptions( - const SessionDescriptionInterface* session_desc, - RtpTransceiverDirection audio_direction, - RtpTransceiverDirection video_direction, - absl::optional* audio_index, - absl::optional* video_index, - absl::optional* data_index, - cricket::MediaSessionOptions* session_options) { - for (const cricket::ContentInfo& content : - session_desc->description()->contents()) { - if (IsAudioContent(&content)) { - // If we already have an audio m= section, reject this extra one. - if (*audio_index) { - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions( - cricket::MEDIA_TYPE_AUDIO, content.name, - RtpTransceiverDirection::kInactive, /*stopped=*/true)); - } else { - bool stopped = (audio_direction == RtpTransceiverDirection::kInactive); - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions(cricket::MEDIA_TYPE_AUDIO, - content.name, audio_direction, - stopped)); - *audio_index = session_options->media_description_options.size() - 1; - } - } else if (IsVideoContent(&content)) { - // If we already have an video m= section, reject this extra one. - if (*video_index) { - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions( - cricket::MEDIA_TYPE_VIDEO, content.name, - RtpTransceiverDirection::kInactive, /*stopped=*/true)); - } else { - bool stopped = (video_direction == RtpTransceiverDirection::kInactive); - session_options->media_description_options.push_back( - cricket::MediaDescriptionOptions(cricket::MEDIA_TYPE_VIDEO, - content.name, video_direction, - stopped)); - *video_index = session_options->media_description_options.size() - 1; - } - } else { - RTC_DCHECK(IsDataContent(&content)); - // If we already have an data m= section, reject this extra one. - if (*data_index) { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForRejectedData(content.name)); - } else { - session_options->media_description_options.push_back( - GetMediaDescriptionOptionsForActiveData(content.name)); - *data_index = session_options->media_description_options.size() - 1; - } - } - } +const SessionDescriptionInterface* PeerConnection::current_local_description() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sdp_handler_->current_local_description(); } -cricket::MediaDescriptionOptions -PeerConnection::GetMediaDescriptionOptionsForActiveData( - const std::string& mid) const { - // Direction for data sections is meaningless, but legacy endpoints might - // expect sendrecv. - cricket::MediaDescriptionOptions options(cricket::MEDIA_TYPE_DATA, mid, - RtpTransceiverDirection::kSendRecv, - /*stopped=*/false); - AddRtpDataChannelOptions(*data_channel_controller_.rtp_data_channels(), - &options); - return options; +const SessionDescriptionInterface* PeerConnection::current_remote_description() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sdp_handler_->current_remote_description(); } -cricket::MediaDescriptionOptions -PeerConnection::GetMediaDescriptionOptionsForRejectedData( - const std::string& mid) const { - cricket::MediaDescriptionOptions options(cricket::MEDIA_TYPE_DATA, mid, - RtpTransceiverDirection::kInactive, - /*stopped=*/true); - AddRtpDataChannelOptions(*data_channel_controller_.rtp_data_channels(), - &options); - return options; +const SessionDescriptionInterface* PeerConnection::pending_local_description() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sdp_handler_->pending_local_description(); } -absl::optional PeerConnection::GetDataMid() const { - switch (data_channel_type()) { - case cricket::DCT_RTP: - if (!data_channel_controller_.rtp_data_channel()) { - return absl::nullopt; - } - return data_channel_controller_.rtp_data_channel()->content_name(); - case cricket::DCT_SCTP: - case cricket::DCT_DATA_CHANNEL_TRANSPORT: - case cricket::DCT_DATA_CHANNEL_TRANSPORT_SCTP: - return sctp_mid_s_; - default: - return absl::nullopt; - } +const SessionDescriptionInterface* PeerConnection::pending_remote_description() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sdp_handler_->pending_remote_description(); } -void PeerConnection::RemoveSenders(cricket::MediaType media_type) { - UpdateLocalSenders(std::vector(), media_type); - UpdateRemoteSendersList(std::vector(), false, - media_type, nullptr); -} +void PeerConnection::Close() { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "PeerConnection::Close"); -void PeerConnection::UpdateRemoteSendersList( - const cricket::StreamParamsVec& streams, - bool default_sender_needed, - cricket::MediaType media_type, - StreamCollection* new_streams) { - RTC_DCHECK(!IsUnifiedPlan()); - - std::vector* current_senders = - GetRemoteSenderInfos(media_type); - - // Find removed senders. I.e., senders where the sender id or ssrc don't match - // the new StreamParam. - for (auto sender_it = current_senders->begin(); - sender_it != current_senders->end(); - /* incremented manually */) { - const RtpSenderInfo& info = *sender_it; - const cricket::StreamParams* params = - cricket::GetStreamBySsrc(streams, info.first_ssrc); - std::string params_stream_id; - if (params) { - params_stream_id = - (!params->first_stream_id().empty() ? params->first_stream_id() - : kDefaultStreamId); - } - bool sender_exists = params && params->id == info.sender_id && - params_stream_id == info.stream_id; - // If this is a default track, and we still need it, don't remove it. - if ((info.stream_id == kDefaultStreamId && default_sender_needed) || - sender_exists) { - ++sender_it; - } else { - OnRemoteSenderRemoved(info, media_type); - sender_it = current_senders->erase(sender_it); - } + RTC_LOG_THREAD_BLOCK_COUNT(); + + if (IsClosed()) { + return; } + // Update stats here so that we have the most recent stats for tracks and + // streams before the channels are closed. + stats_->UpdateStats(kStatsOutputLevelStandard); - // Find new and active senders. - for (const cricket::StreamParams& params : streams) { - if (!params.has_ssrcs()) { - // The remote endpoint has streams, but didn't signal ssrcs. For an active - // sender, this means it is coming from a Unified Plan endpoint,so we just - // create a default. - default_sender_needed = true; - break; - } + ice_connection_state_ = PeerConnectionInterface::kIceConnectionClosed; + Observer()->OnIceConnectionChange(ice_connection_state_); + standardized_ice_connection_state_ = + PeerConnectionInterface::IceConnectionState::kIceConnectionClosed; + connection_state_ = PeerConnectionInterface::PeerConnectionState::kClosed; + Observer()->OnConnectionChange(connection_state_); - // |params.id| is the sender id and the stream id uses the first of - // |params.stream_ids|. The remote description could come from a Unified - // Plan endpoint, with multiple or no stream_ids() signaled. Since this is - // not supported in Plan B, we just take the first here and create the - // default stream ID if none is specified. - const std::string& stream_id = - (!params.first_stream_id().empty() ? params.first_stream_id() - : kDefaultStreamId); - const std::string& sender_id = params.id; - uint32_t ssrc = params.first_ssrc(); - - rtc::scoped_refptr stream = - remote_streams_->find(stream_id); - if (!stream) { - // This is a new MediaStream. Create a new remote MediaStream. - stream = MediaStreamProxy::Create(rtc::Thread::Current(), - MediaStream::Create(stream_id)); - remote_streams_->AddStream(stream); - new_streams->AddStream(stream); - } + sdp_handler_->Close(); - const RtpSenderInfo* sender_info = - FindSenderInfo(*current_senders, stream_id, sender_id); - if (!sender_info) { - current_senders->push_back(RtpSenderInfo(stream_id, sender_id, ssrc)); - OnRemoteSenderAdded(current_senders->back(), media_type); - } - } + NoteUsageEvent(UsageEvent::CLOSE_CALLED); - // Add default sender if necessary. - if (default_sender_needed) { - rtc::scoped_refptr default_stream = - remote_streams_->find(kDefaultStreamId); - if (!default_stream) { - // Create the new default MediaStream. - default_stream = MediaStreamProxy::Create( - rtc::Thread::Current(), MediaStream::Create(kDefaultStreamId)); - remote_streams_->AddStream(default_stream); - new_streams->AddStream(default_stream); - } - std::string default_sender_id = (media_type == cricket::MEDIA_TYPE_AUDIO) - ? kDefaultAudioSenderId - : kDefaultVideoSenderId; - const RtpSenderInfo* default_sender_info = - FindSenderInfo(*current_senders, kDefaultStreamId, default_sender_id); - if (!default_sender_info) { - current_senders->push_back( - RtpSenderInfo(kDefaultStreamId, default_sender_id, /*ssrc=*/0)); - OnRemoteSenderAdded(current_senders->back(), media_type); - } + for (const auto& transceiver : rtp_manager()->transceivers()->List()) { + transceiver->internal()->SetPeerConnectionClosed(); + if (!transceiver->stopped()) + transceiver->StopInternal(); } -} - -void PeerConnection::OnRemoteSenderAdded(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) { - RTC_LOG(LS_INFO) << "Creating " << cricket::MediaTypeToString(media_type) - << " receiver for track_id=" << sender_info.sender_id - << " and stream_id=" << sender_info.stream_id; - MediaStreamInterface* stream = remote_streams_->find(sender_info.stream_id); - if (media_type == cricket::MEDIA_TYPE_AUDIO) { - CreateAudioReceiver(stream, sender_info); - } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { - CreateVideoReceiver(stream, sender_info); - } else { - RTC_NOTREACHED() << "Invalid media type"; + // Ensure that all asynchronous stats requests are completed before destroying + // the transport controller below. + if (stats_collector_) { + stats_collector_->WaitForPendingRequest(); } -} -void PeerConnection::OnRemoteSenderRemoved(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) { - RTC_LOG(LS_INFO) << "Removing " << cricket::MediaTypeToString(media_type) - << " receiver for track_id=" << sender_info.sender_id - << " and stream_id=" << sender_info.stream_id; + // Don't destroy BaseChannels until after stats has been cleaned up so that + // the last stats request can still read from the channels. + sdp_handler_->DestroyAllChannels(); - MediaStreamInterface* stream = remote_streams_->find(sender_info.stream_id); + // The event log is used in the transport controller, which must be outlived + // by the former. CreateOffer by the peer connection is implemented + // asynchronously and if the peer connection is closed without resetting the + // WebRTC session description factory, the session description factory would + // call the transport controller. + sdp_handler_->ResetSessionDescFactory(); + rtp_manager_->Close(); - rtc::scoped_refptr receiver; - if (media_type == cricket::MEDIA_TYPE_AUDIO) { - // When the MediaEngine audio channel is destroyed, the RemoteAudioSource - // will be notified which will end the AudioRtpReceiver::track(). - receiver = RemoveAndStopReceiver(sender_info); - rtc::scoped_refptr audio_track = - stream->FindAudioTrack(sender_info.sender_id); - if (audio_track) { - stream->RemoveTrack(audio_track); - } - } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { - // Stopping or destroying a VideoRtpReceiver will end the - // VideoRtpReceiver::track(). - receiver = RemoveAndStopReceiver(sender_info); - rtc::scoped_refptr video_track = - stream->FindVideoTrack(sender_info.sender_id); - if (video_track) { - // There's no guarantee the track is still available, e.g. the track may - // have been removed from the stream by an application. - stream->RemoveTrack(video_track); + network_thread()->Invoke(RTC_FROM_HERE, [this] { + // Data channels will already have been unset via the DestroyAllChannels() + // call above, which triggers a call to TeardownDataChannelTransport_n(). + // TODO(tommi): ^^ That's not exactly optimal since this is yet another + // blocking hop to the network thread during Close(). Further still, the + // voice/video/data channels will be cleared on the worker thread. + transport_controller_.reset(); + port_allocator_->DiscardCandidatePool(); + if (network_thread_safety_) { + network_thread_safety_->SetNotAlive(); } - } else { - RTC_NOTREACHED() << "Invalid media type"; - } - if (receiver) { - Observer()->OnRemoveTrack(receiver); - } -} + }); -void PeerConnection::UpdateEndedRemoteMediaStreams() { - std::vector> streams_to_remove; - for (size_t i = 0; i < remote_streams_->count(); ++i) { - MediaStreamInterface* stream = remote_streams_->at(i); - if (stream->GetAudioTracks().empty() && stream->GetVideoTracks().empty()) { - streams_to_remove.push_back(stream); - } - } + worker_thread()->Invoke(RTC_FROM_HERE, [this] { + RTC_DCHECK_RUN_ON(worker_thread()); + worker_thread_safety_->SetNotAlive(); + call_.reset(); + // The event log must outlive call (and any other object that uses it). + event_log_.reset(); + }); + ReportUsagePattern(); + // The .h file says that observer can be discarded after close() returns. + // Make sure this is true. + observer_ = nullptr; - for (auto& stream : streams_to_remove) { - remote_streams_->RemoveStream(stream); - Observer()->OnRemoveStream(std::move(stream)); - } + // Signal shutdown to the sdp handler. This invalidates weak pointers for + // internal pending callbacks. + sdp_handler_->PrepareForShutdown(); } -void PeerConnection::UpdateLocalSenders( - const std::vector& streams, - cricket::MediaType media_type) { - std::vector* current_senders = GetLocalSenderInfos(media_type); - - // Find removed tracks. I.e., tracks where the track id, stream id or ssrc - // don't match the new StreamParam. - for (auto sender_it = current_senders->begin(); - sender_it != current_senders->end(); - /* incremented manually */) { - const RtpSenderInfo& info = *sender_it; - const cricket::StreamParams* params = - cricket::GetStreamBySsrc(streams, info.first_ssrc); - if (!params || params->id != info.sender_id || - params->first_stream_id() != info.stream_id) { - OnLocalSenderRemoved(info, media_type); - sender_it = current_senders->erase(sender_it); - } else { - ++sender_it; - } +void PeerConnection::SetIceConnectionState(IceConnectionState new_state) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (ice_connection_state_ == new_state) { + return; } - // Find new and active senders. - for (const cricket::StreamParams& params : streams) { - // The sync_label is the MediaStream label and the |stream.id| is the - // sender id. - const std::string& stream_id = params.first_stream_id(); - const std::string& sender_id = params.id; - uint32_t ssrc = params.first_ssrc(); - const RtpSenderInfo* sender_info = - FindSenderInfo(*current_senders, stream_id, sender_id); - if (!sender_info) { - current_senders->push_back(RtpSenderInfo(stream_id, sender_id, ssrc)); - OnLocalSenderAdded(current_senders->back(), media_type); - } + // After transitioning to "closed", ignore any additional states from + // TransportController (such as "disconnected"). + if (IsClosed()) { + return; } + + RTC_LOG(LS_INFO) << "Changing IceConnectionState " << ice_connection_state_ + << " => " << new_state; + RTC_DCHECK(ice_connection_state_ != + PeerConnectionInterface::kIceConnectionClosed); + + ice_connection_state_ = new_state; + Observer()->OnIceConnectionChange(ice_connection_state_); } -void PeerConnection::OnLocalSenderAdded(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) { - RTC_DCHECK(!IsUnifiedPlan()); - auto sender = FindSenderById(sender_info.sender_id); - if (!sender) { - RTC_LOG(LS_WARNING) << "An unknown RtpSender with id " - << sender_info.sender_id - << " has been configured in the local description."; +void PeerConnection::SetStandardizedIceConnectionState( + PeerConnectionInterface::IceConnectionState new_state) { + if (standardized_ice_connection_state_ == new_state) { return; } - if (sender->media_type() != media_type) { - RTC_LOG(LS_WARNING) << "An RtpSender has been configured in the local" - " description with an unexpected media type."; + if (IsClosed()) { return; } - sender->internal()->set_stream_ids({sender_info.stream_id}); - sender->internal()->SetSsrc(sender_info.first_ssrc); + RTC_LOG(LS_INFO) << "Changing standardized IceConnectionState " + << standardized_ice_connection_state_ << " => " << new_state; + + standardized_ice_connection_state_ = new_state; + Observer()->OnStandardizedIceConnectionChange(new_state); } -void PeerConnection::OnLocalSenderRemoved(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) { - auto sender = FindSenderById(sender_info.sender_id); - if (!sender) { - // This is the normal case. I.e., RemoveStream has been called and the - // SessionDescriptions has been renegotiated. +void PeerConnection::SetConnectionState( + PeerConnectionInterface::PeerConnectionState new_state) { + if (connection_state_ == new_state) return; - } - - // A sender has been removed from the SessionDescription but it's still - // associated with the PeerConnection. This only occurs if the SDP doesn't - // match with the calls to CreateSender, AddStream and RemoveStream. - if (sender->media_type() != media_type) { - RTC_LOG(LS_WARNING) << "An RtpSender has been configured in the local" - " description with an unexpected media type."; + if (IsClosed()) return; - } + connection_state_ = new_state; + Observer()->OnConnectionChange(new_state); + + if (new_state == PeerConnectionState::kConnected && !was_ever_connected_) { + was_ever_connected_ = true; + + // The first connection state change to connected happens once per + // connection which makes it a good point to report metrics. + // Record bundle-policy from configuration. Done here from + // connectionStateChange to limit to actually established connections. + BundlePolicyUsage policy = kBundlePolicyUsageMax; + switch (configuration_.bundle_policy) { + case kBundlePolicyBalanced: + policy = kBundlePolicyUsageBalanced; + break; + case kBundlePolicyMaxBundle: + policy = kBundlePolicyUsageMaxBundle; + break; + case kBundlePolicyMaxCompat: + policy = kBundlePolicyUsageMaxCompat; + break; + } + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.BundlePolicy", policy, + kBundlePolicyUsageMax); + + // Record configured ice candidate pool size depending on the + // BUNDLE policy. See + // https://w3c.github.io/webrtc-pc/#dom-rtcconfiguration-icecandidatepoolsize + // The ICE candidate pool size is an optimization and it may be desirable + // to restrict the maximum size of the pre-gathered candidates. + switch (configuration_.bundle_policy) { + case kBundlePolicyBalanced: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.PeerConnection.CandidatePoolUsage.Balanced", + configuration_.ice_candidate_pool_size, 0, 255, 256); + break; + case kBundlePolicyMaxBundle: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.PeerConnection.CandidatePoolUsage.MaxBundle", + configuration_.ice_candidate_pool_size, 0, 255, 256); + break; + case kBundlePolicyMaxCompat: + RTC_HISTOGRAM_COUNTS_LINEAR( + "WebRTC.PeerConnection.CandidatePoolUsage.MaxCompat", + configuration_.ice_candidate_pool_size, 0, 255, 256); + break; + } - sender->internal()->SetSsrc(0); + // Record whether there was a local or remote provisional answer. + ProvisionalAnswerUsage pranswer = kProvisionalAnswerNotUsed; + if (local_description()->GetType() == SdpType::kPrAnswer) { + pranswer = kProvisionalAnswerLocal; + } else if (remote_description()->GetType() == SdpType::kPrAnswer) { + pranswer = kProvisionalAnswerRemote; + } + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.ProvisionalAnswer", + pranswer, kProvisionalAnswerMax); + } } -void PeerConnection::OnSctpDataChannelClosed(DataChannel* channel) { - // Since data_channel_controller doesn't do signals, this - // signal is relayed here. - data_channel_controller_.OnSctpDataChannelClosed(channel); +void PeerConnection::OnIceGatheringChange( + PeerConnectionInterface::IceGatheringState new_state) { + if (IsClosed()) { + return; + } + ice_gathering_state_ = new_state; + Observer()->OnIceGatheringChange(ice_gathering_state_); } -rtc::scoped_refptr> -PeerConnection::GetAudioTransceiver() const { - // This method only works with Plan B SDP, where there is a single - // audio/video transceiver. - RTC_DCHECK(!IsUnifiedPlan()); - for (auto transceiver : transceivers_) { - if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { - return transceiver; - } +void PeerConnection::OnIceCandidate( + std::unique_ptr candidate) { + if (IsClosed()) { + return; } - RTC_NOTREACHED(); - return nullptr; + ReportIceCandidateCollected(candidate->candidate()); + Observer()->OnIceCandidate(candidate.get()); } -rtc::scoped_refptr> -PeerConnection::GetVideoTransceiver() const { - // This method only works with Plan B SDP, where there is a single - // audio/video transceiver. - RTC_DCHECK(!IsUnifiedPlan()); - for (auto transceiver : transceivers_) { - if (transceiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { - return transceiver; - } +void PeerConnection::OnIceCandidateError(const std::string& address, + int port, + const std::string& url, + int error_code, + const std::string& error_text) { + if (IsClosed()) { + return; } - RTC_NOTREACHED(); - return nullptr; + Observer()->OnIceCandidateError(address, port, url, error_code, error_text); + // Leftover not to break wpt test during migration to the new API. + Observer()->OnIceCandidateError(address + ":", url, error_code, error_text); } -// TODO(bugs.webrtc.org/7600): Remove this when multiple transceivers with -// individual transceiver directions are supported. -bool PeerConnection::HasRtpSender(cricket::MediaType type) const { - switch (type) { - case cricket::MEDIA_TYPE_AUDIO: - return !GetAudioTransceiver()->internal()->senders().empty(); - case cricket::MEDIA_TYPE_VIDEO: - return !GetVideoTransceiver()->internal()->senders().empty(); - case cricket::MEDIA_TYPE_DATA: - return false; - } - RTC_NOTREACHED(); - return false; +void PeerConnection::OnIceCandidatesRemoved( + const std::vector& candidates) { + if (IsClosed()) { + return; + } + Observer()->OnIceCandidatesRemoved(candidates); } -rtc::scoped_refptr> -PeerConnection::FindSenderForTrack(MediaStreamTrackInterface* track) const { - for (const auto& transceiver : transceivers_) { - for (auto sender : transceiver->internal()->senders()) { - if (sender->track() == track) { - return sender; - } - } +void PeerConnection::OnSelectedCandidatePairChanged( + const cricket::CandidatePairChangeEvent& event) { + if (IsClosed()) { + return; } - return nullptr; -} -rtc::scoped_refptr> -PeerConnection::FindSenderById(const std::string& sender_id) const { - for (const auto& transceiver : transceivers_) { - for (auto sender : transceiver->internal()->senders()) { - if (sender->id() == sender_id) { - return sender; - } - } + if (event.selected_candidate_pair.local_candidate().type() == + LOCAL_PORT_TYPE && + event.selected_candidate_pair.remote_candidate().type() == + LOCAL_PORT_TYPE) { + NoteUsageEvent(UsageEvent::DIRECT_CONNECTION_SELECTED); } - return nullptr; + + Observer()->OnIceSelectedCandidatePairChanged(event); } -rtc::scoped_refptr> -PeerConnection::FindReceiverById(const std::string& receiver_id) const { - for (const auto& transceiver : transceivers_) { - for (auto receiver : transceiver->internal()->receivers()) { - if (receiver->id() == receiver_id) { - return receiver; - } - } - } - return nullptr; +absl::optional PeerConnection::GetDataMid() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sctp_mid_s_; } -std::vector* -PeerConnection::GetRemoteSenderInfos(cricket::MediaType media_type) { - RTC_DCHECK(media_type == cricket::MEDIA_TYPE_AUDIO || - media_type == cricket::MEDIA_TYPE_VIDEO); - return (media_type == cricket::MEDIA_TYPE_AUDIO) - ? &remote_audio_sender_infos_ - : &remote_video_sender_infos_; +void PeerConnection::SetSctpDataMid(const std::string& mid) { + RTC_DCHECK_RUN_ON(signaling_thread()); + sctp_mid_s_ = mid; } -std::vector* PeerConnection::GetLocalSenderInfos( - cricket::MediaType media_type) { - RTC_DCHECK(media_type == cricket::MEDIA_TYPE_AUDIO || - media_type == cricket::MEDIA_TYPE_VIDEO); - return (media_type == cricket::MEDIA_TYPE_AUDIO) ? &local_audio_sender_infos_ - : &local_video_sender_infos_; +void PeerConnection::ResetSctpDataMid() { + RTC_DCHECK_RUN_ON(signaling_thread()); + sctp_mid_s_.reset(); + sctp_transport_name_s_.clear(); } -const PeerConnection::RtpSenderInfo* PeerConnection::FindSenderInfo( - const std::vector& infos, - const std::string& stream_id, - const std::string sender_id) const { - for (const RtpSenderInfo& sender_info : infos) { - if (sender_info.stream_id == stream_id && - sender_info.sender_id == sender_id) { - return &sender_info; - } - } - return nullptr; +void PeerConnection::OnSctpDataChannelClosed(DataChannelInterface* channel) { + // Since data_channel_controller doesn't do signals, this + // signal is relayed here. + data_channel_controller_.OnSctpDataChannelClosed( + static_cast(channel)); } -DataChannel* PeerConnection::FindDataChannelBySid(int sid) const { +SctpDataChannel* PeerConnection::FindDataChannelBySid(int sid) const { return data_channel_controller_.FindDataChannelBySid(sid); } @@ -5821,12 +1996,10 @@ PeerConnection::InitializePortAllocator_n( // by experiment. if (configuration.disable_ipv6) { port_allocator_flags &= ~(cricket::PORTALLOCATOR_ENABLE_IPV6); - } else if (absl::StartsWith( - webrtc::field_trial::FindFullName("WebRTC-IPv6Default"), - "Disabled")) { + } else if (absl::StartsWith(context_->trials().Lookup("WebRTC-IPv6Default"), + "Disabled")) { port_allocator_flags &= ~(cricket::PORTALLOCATOR_ENABLE_IPV6); } - if (configuration.disable_ipv6_on_wifi) { port_allocator_flags &= ~(cricket::PORTALLOCATOR_ENABLE_IPV6_ON_WIFI); RTC_LOG(LS_INFO) << "IPv6 candidates on Wi-Fi are disabled."; @@ -5881,6 +2054,7 @@ bool PeerConnection::ReconfigurePortAllocator_n( webrtc::TurnCustomizer* turn_customizer, absl::optional stun_candidate_keepalive_interval, bool have_local_description) { + RTC_DCHECK_RUN_ON(network_thread()); port_allocator_->SetCandidateFilter( ConvertIceTransportTypeToCandidateFilter(type)); // According to JSEP, after setLocalDescription, changing the candidate pool @@ -5903,7 +2077,7 @@ bool PeerConnection::ReconfigurePortAllocator_n( } cricket::ChannelManager* PeerConnection::channel_manager() const { - return factory_->channel_manager(); + return context_->channel_manager(); } bool PeerConnection::StartRtcEventLog_w( @@ -5925,16 +2099,12 @@ void PeerConnection::StopRtcEventLog_w() { cricket::ChannelInterface* PeerConnection::GetChannel( const std::string& content_name) { - for (const auto& transceiver : transceivers_) { + for (const auto& transceiver : rtp_manager()->transceivers()->UnsafeList()) { cricket::ChannelInterface* channel = transceiver->internal()->channel(); if (channel && channel->content_name() == content_name) { return channel; } } - if (rtp_data_channel() && - rtp_data_channel()->content_name() == content_name) { - return rtp_data_channel(); - } return nullptr; } @@ -5955,8 +2125,9 @@ bool PeerConnection::GetSctpSslRole(rtc::SSLRole* role) { absl::optional dtls_role; if (sctp_mid_s_) { dtls_role = transport_controller_->GetDtlsRole(*sctp_mid_s_); - if (!dtls_role && is_caller_.has_value()) { - dtls_role = *is_caller_ ? rtc::SSL_SERVER : rtc::SSL_CLIENT; + if (!dtls_role && sdp_handler_->is_caller().has_value()) { + dtls_role = + *sdp_handler_->is_caller() ? rtc::SSL_SERVER : rtc::SSL_CLIENT; } *role = *dtls_role; return true; @@ -5982,155 +2153,6 @@ bool PeerConnection::GetSslRole(const std::string& content_name, return false; } -void PeerConnection::SetSessionError(SessionError error, - const std::string& error_desc) { - RTC_DCHECK_RUN_ON(signaling_thread()); - if (error != session_error_) { - session_error_ = error; - session_error_desc_ = error_desc; - } -} - -RTCError PeerConnection::UpdateSessionState( - SdpType type, - cricket::ContentSource source, - const cricket::SessionDescription* description) { - RTC_DCHECK_RUN_ON(signaling_thread()); - - // If there's already a pending error then no state transition should happen. - // But all call-sites should be verifying this before calling us! - RTC_DCHECK(session_error() == SessionError::kNone); - - // If this is answer-ish we're ready to let media flow. - if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { - EnableSending(); - } - - // Update the signaling state according to the specified state machine (see - // https://w3c.github.io/webrtc-pc/#rtcsignalingstate-enum). - if (type == SdpType::kOffer) { - ChangeSignalingState(source == cricket::CS_LOCAL - ? PeerConnectionInterface::kHaveLocalOffer - : PeerConnectionInterface::kHaveRemoteOffer); - } else if (type == SdpType::kPrAnswer) { - ChangeSignalingState(source == cricket::CS_LOCAL - ? PeerConnectionInterface::kHaveLocalPrAnswer - : PeerConnectionInterface::kHaveRemotePrAnswer); - } else { - RTC_DCHECK(type == SdpType::kAnswer); - ChangeSignalingState(PeerConnectionInterface::kStable); - transceiver_stable_states_by_transceivers_.clear(); - have_pending_rtp_data_channel_ = false; - } - - // Update internal objects according to the session description's media - // descriptions. - RTCError error = PushdownMediaDescription(type, source); - if (!error.ok()) { - return error; - } - - return RTCError::OK(); -} - -RTCError PeerConnection::PushdownMediaDescription( - SdpType type, - cricket::ContentSource source) { - const SessionDescriptionInterface* sdesc = - (source == cricket::CS_LOCAL ? local_description() - : remote_description()); - RTC_DCHECK(sdesc); - - // Push down the new SDP media section for each audio/video transceiver. - for (const auto& transceiver : transceivers_) { - const ContentInfo* content_info = - FindMediaSectionForTransceiver(transceiver, sdesc); - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (!channel || !content_info || content_info->rejected) { - continue; - } - const MediaContentDescription* content_desc = - content_info->media_description(); - if (!content_desc) { - continue; - } - std::string error; - bool success = (source == cricket::CS_LOCAL) - ? channel->SetLocalContent(content_desc, type, &error) - : channel->SetRemoteContent(content_desc, type, &error); - if (!success) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, error); - } - } - - // If using the RtpDataChannel, push down the new SDP section for it too. - if (data_channel_controller_.rtp_data_channel()) { - const ContentInfo* data_content = - cricket::GetFirstDataContent(sdesc->description()); - if (data_content && !data_content->rejected) { - const MediaContentDescription* data_desc = - data_content->media_description(); - if (data_desc) { - std::string error; - bool success = - (source == cricket::CS_LOCAL) - ? data_channel_controller_.rtp_data_channel()->SetLocalContent( - data_desc, type, &error) - : data_channel_controller_.rtp_data_channel()->SetRemoteContent( - data_desc, type, &error); - if (!success) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, error); - } - } - } - } - - // Need complete offer/answer with an SCTP m= section before starting SCTP, - // according to https://tools.ietf.org/html/draft-ietf-mmusic-sctp-sdp-19 - if (sctp_mid_s_ && local_description() && remote_description()) { - rtc::scoped_refptr sctp_transport = - transport_controller_->GetSctpTransport(*sctp_mid_s_); - auto local_sctp_description = cricket::GetFirstSctpDataContentDescription( - local_description()->description()); - auto remote_sctp_description = cricket::GetFirstSctpDataContentDescription( - remote_description()->description()); - if (sctp_transport && local_sctp_description && remote_sctp_description) { - int max_message_size; - // A remote max message size of zero means "any size supported". - // We configure the connection with our own max message size. - if (remote_sctp_description->max_message_size() == 0) { - max_message_size = local_sctp_description->max_message_size(); - } else { - max_message_size = - std::min(local_sctp_description->max_message_size(), - remote_sctp_description->max_message_size()); - } - sctp_transport->Start(local_sctp_description->port(), - remote_sctp_description->port(), max_message_size); - } - } - - return RTCError::OK(); -} - -RTCError PeerConnection::PushdownTransportDescription( - cricket::ContentSource source, - SdpType type) { - RTC_DCHECK_RUN_ON(signaling_thread()); - - if (source == cricket::CS_LOCAL) { - const SessionDescriptionInterface* sdesc = local_description(); - RTC_DCHECK(sdesc); - return transport_controller_->SetLocalDescription(type, - sdesc->description()); - } else { - const SessionDescriptionInterface* sdesc = remote_description(); - RTC_DCHECK(sdesc); - return transport_controller_->SetRemoteDescription(type, - sdesc->description()); - } -} - bool PeerConnection::GetTransportDescription( const SessionDescription* description, const std::string& content_name, @@ -6147,102 +2169,41 @@ bool PeerConnection::GetTransportDescription( return true; } -cricket::IceConfig PeerConnection::ParseIceConfig( - const PeerConnectionInterface::RTCConfiguration& config) const { - cricket::ContinualGatheringPolicy gathering_policy; - switch (config.continual_gathering_policy) { - case PeerConnectionInterface::GATHER_ONCE: - gathering_policy = cricket::GATHER_ONCE; - break; - case PeerConnectionInterface::GATHER_CONTINUALLY: - gathering_policy = cricket::GATHER_CONTINUALLY; - break; - default: - RTC_NOTREACHED(); - gathering_policy = cricket::GATHER_ONCE; - } - - cricket::IceConfig ice_config; - ice_config.receiving_timeout = RTCConfigurationToIceConfigOptionalInt( - config.ice_connection_receiving_timeout); - ice_config.prioritize_most_likely_candidate_pairs = - config.prioritize_most_likely_ice_candidate_pairs; - ice_config.backup_connection_ping_interval = - RTCConfigurationToIceConfigOptionalInt( - config.ice_backup_candidate_pair_ping_interval); - ice_config.continual_gathering_policy = gathering_policy; - ice_config.presume_writable_when_fully_relayed = - config.presume_writable_when_fully_relayed; - ice_config.surface_ice_candidates_on_ice_transport_type_changed = - config.surface_ice_candidates_on_ice_transport_type_changed; - ice_config.ice_check_interval_strong_connectivity = - config.ice_check_interval_strong_connectivity; - ice_config.ice_check_interval_weak_connectivity = - config.ice_check_interval_weak_connectivity; - ice_config.ice_check_min_interval = config.ice_check_min_interval; - ice_config.ice_unwritable_timeout = config.ice_unwritable_timeout; - ice_config.ice_unwritable_min_checks = config.ice_unwritable_min_checks; - ice_config.ice_inactive_timeout = config.ice_inactive_timeout; - ice_config.stun_keepalive_interval = config.stun_candidate_keepalive_interval; - ice_config.network_preference = config.network_preference; - return ice_config; +std::vector PeerConnection::GetDataChannelStats() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return data_channel_controller_.GetDataChannelStats(); } absl::optional PeerConnection::sctp_transport_name() const { RTC_DCHECK_RUN_ON(signaling_thread()); - if (sctp_mid_s_ && transport_controller_) { - auto dtls_transport = transport_controller_->GetDtlsTransport(*sctp_mid_s_); - if (dtls_transport) { - return dtls_transport->transport_name(); - } - return absl::optional(); - } + if (sctp_mid_s_ && transport_controller_) + return sctp_transport_name_s_; return absl::optional(); } +absl::optional PeerConnection::sctp_mid() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return sctp_mid_s_; +} + cricket::CandidateStatsList PeerConnection::GetPooledCandidateStats() const { + RTC_DCHECK_RUN_ON(network_thread()); + if (!network_thread_safety_->alive()) + return {}; cricket::CandidateStatsList candidate_states_list; - network_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&cricket::PortAllocator::GetCandidateStatsFromPooledSessions, - port_allocator_.get(), &candidate_states_list)); + port_allocator_->GetCandidateStatsFromPooledSessions(&candidate_states_list); return candidate_states_list; } -std::map PeerConnection::GetTransportNamesByMid() - const { - RTC_DCHECK_RUN_ON(signaling_thread()); - std::map transport_names_by_mid; - for (const auto& transceiver : transceivers_) { - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (channel) { - transport_names_by_mid[channel->content_name()] = - channel->transport_name(); - } - } - if (data_channel_controller_.rtp_data_channel()) { - transport_names_by_mid[data_channel_controller_.rtp_data_channel() - ->content_name()] = - data_channel_controller_.rtp_data_channel()->transport_name(); - } - if (data_channel_controller_.data_channel_transport()) { - absl::optional transport_name = sctp_transport_name(); - RTC_DCHECK(transport_name); - transport_names_by_mid[*sctp_mid_s_] = *transport_name; - } - return transport_names_by_mid; -} - std::map PeerConnection::GetTransportStatsByNames( const std::set& transport_names) { - if (!network_thread()->IsCurrent()) { - return network_thread() - ->Invoke>( - RTC_FROM_HERE, - [&] { return GetTransportStatsByNames(transport_names); }); - } + TRACE_EVENT0("webrtc", "PeerConnection::GetTransportStatsByNames"); RTC_DCHECK_RUN_ON(network_thread()); + if (!network_thread_safety_->alive()) + return {}; + + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; std::map transport_stats_by_name; for (const std::string& transport_name : transport_names) { cricket::TransportStats transport_stats; @@ -6261,7 +2222,8 @@ PeerConnection::GetTransportStatsByNames( bool PeerConnection::GetLocalCertificate( const std::string& transport_name, rtc::scoped_refptr* certificate) { - if (!certificate) { + RTC_DCHECK_RUN_ON(network_thread()); + if (!network_thread_safety_->alive() || !certificate) { return false; } *certificate = transport_controller_->GetLocalCertificate(transport_name); @@ -6270,31 +2232,20 @@ bool PeerConnection::GetLocalCertificate( std::unique_ptr PeerConnection::GetRemoteSSLCertChain( const std::string& transport_name) { + RTC_DCHECK_RUN_ON(network_thread()); return transport_controller_->GetRemoteSSLCertChain(transport_name); } -cricket::DataChannelType PeerConnection::data_channel_type() const { - return data_channel_controller_.data_channel_type(); -} - bool PeerConnection::IceRestartPending(const std::string& content_name) const { RTC_DCHECK_RUN_ON(signaling_thread()); - return pending_ice_restarts_.find(content_name) != - pending_ice_restarts_.end(); + return sdp_handler_->IceRestartPending(content_name); } bool PeerConnection::NeedsIceRestart(const std::string& content_name) const { - return transport_controller_->NeedsIceRestart(content_name); -} - -void PeerConnection::OnCertificateReady( - const rtc::scoped_refptr& certificate) { - transport_controller_->SetLocalCertificate(certificate); -} - -void PeerConnection::OnDtlsSrtpSetupFailure(cricket::BaseChannel*, bool rtcp) { - SetSessionError(SessionError::kTransport, - rtcp ? kDtlsSrtpSetupFailureRtcp : kDtlsSrtpSetupFailureRtp); + return network_thread()->Invoke(RTC_FROM_HERE, [this, &content_name] { + RTC_DCHECK_RUN_ON(network_thread()); + return transport_controller_->NeedsIceRestart(content_name); + }); } void PeerConnection::OnTransportControllerConnectionState( @@ -6334,8 +2285,8 @@ void PeerConnection::OnTransportControllerConnectionState( SetIceConnectionState(PeerConnectionInterface::kIceConnectionConnected); } SetIceConnectionState(PeerConnectionInterface::kIceConnectionCompleted); + NoteUsageEvent(UsageEvent::ICE_STATE_CONNECTED); - ReportTransportStats(); break; default: RTC_NOTREACHED(); @@ -6345,6 +2296,8 @@ void PeerConnection::OnTransportControllerConnectionState( void PeerConnection::OnTransportControllerCandidatesGathered( const std::string& transport_name, const cricket::Candidates& candidates) { + // TODO(bugs.webrtc.org/12427): Expect this to come in on the network thread + // (not signaling as it currently does), handle appropriately. int sdp_mline_index; if (!GetLocalCandidateMediaIndex(transport_name, &sdp_mline_index)) { RTC_LOG(LS_ERROR) @@ -6358,9 +2311,7 @@ void PeerConnection::OnTransportControllerCandidatesGathered( // Use transport_name as the candidate media id. std::unique_ptr candidate( new JsepIceCandidate(transport_name, sdp_mline_index, *citer)); - if (local_description()) { - mutable_local_description()->AddCandidate(candidate.get()); - } + sdp_handler_->AddLocalIceCandidate(candidate.get()); OnIceCandidate(std::move(candidate)); } } @@ -6382,10 +2333,7 @@ void PeerConnection::OnTransportControllerCandidatesRemoved( return; } } - - if (local_description()) { - mutable_local_description()->RemoveCandidates(candidates); - } + sdp_handler_->RemoveLocalIceCandidates(candidates); OnIceCandidatesRemoved(candidates); } @@ -6401,20 +2349,6 @@ void PeerConnection::OnTransportControllerDtlsHandshakeError( static_cast(rtc::SSLHandshakeError::MAX_VALUE)); } -void PeerConnection::EnableSending() { - for (const auto& transceiver : transceivers_) { - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (channel && !channel->enabled()) { - channel->Enable(true); - } - } - - if (data_channel_controller_.rtp_data_channel() && - !data_channel_controller_.rtp_data_channel()->enabled()) { - data_channel_controller_.rtp_data_channel()->Enable(true); - } -} - // Returns the media index for a local ice candidate given the content name. bool PeerConnection::GetLocalCandidateMediaIndex( const std::string& content_name, @@ -6423,287 +2357,25 @@ bool PeerConnection::GetLocalCandidateMediaIndex( return false; } - bool content_found = false; - const ContentInfos& contents = local_description()->description()->contents(); - for (size_t index = 0; index < contents.size(); ++index) { - if (contents[index].name == content_name) { - *sdp_mline_index = static_cast(index); - content_found = true; - break; - } - } - return content_found; -} - -bool PeerConnection::UseCandidatesInSessionDescription( - const SessionDescriptionInterface* remote_desc) { - if (!remote_desc) { - return true; - } - bool ret = true; - - for (size_t m = 0; m < remote_desc->number_of_mediasections(); ++m) { - const IceCandidateCollection* candidates = remote_desc->candidates(m); - for (size_t n = 0; n < candidates->count(); ++n) { - const IceCandidateInterface* candidate = candidates->at(n); - bool valid = false; - if (!ReadyToUseRemoteCandidate(candidate, remote_desc, &valid)) { - if (valid) { - RTC_LOG(LS_INFO) - << "UseCandidatesInSessionDescription: Not ready to use " - "candidate."; - } - continue; - } - ret = UseCandidate(candidate); - if (!ret) { - break; - } - } - } - return ret; -} - -bool PeerConnection::UseCandidate(const IceCandidateInterface* candidate) { - RTCErrorOr result = - FindContentInfo(remote_description(), candidate); - if (!result.ok()) { - RTC_LOG(LS_ERROR) << "UseCandidate: Invalid candidate. " - << result.error().message(); - return false; - } - std::vector candidates; - candidates.push_back(candidate->candidate()); - // Invoking BaseSession method to handle remote candidates. - RTCError error = transport_controller_->AddRemoteCandidates( - result.value()->name, candidates); - if (error.ok()) { - ReportRemoteIceCandidateAdded(candidate->candidate()); - // Candidates successfully submitted for checking. - if (ice_connection_state_ == PeerConnectionInterface::kIceConnectionNew || - ice_connection_state_ == - PeerConnectionInterface::kIceConnectionDisconnected) { - // If state is New, then the session has just gotten its first remote ICE - // candidates, so go to Checking. - // If state is Disconnected, the session is re-using old candidates or - // receiving additional ones, so go to Checking. - // If state is Connected, stay Connected. - // TODO(bemasc): If state is Connected, and the new candidates are for a - // newly added transport, then the state actually _should_ move to - // checking. Add a way to distinguish that case. - SetIceConnectionState(PeerConnectionInterface::kIceConnectionChecking); - } - // TODO(bemasc): If state is Completed, go back to Connected. - } else { - RTC_LOG(LS_WARNING) << error.message(); - } - return true; -} - -RTCErrorOr PeerConnection::FindContentInfo( - const SessionDescriptionInterface* description, - const IceCandidateInterface* candidate) { - if (candidate->sdp_mline_index() >= 0) { - size_t mediacontent_index = - static_cast(candidate->sdp_mline_index()); - size_t content_size = description->description()->contents().size(); - if (mediacontent_index < content_size) { - return &description->description()->contents()[mediacontent_index]; - } else { - return RTCError(RTCErrorType::INVALID_RANGE, - "Media line index (" + - rtc::ToString(candidate->sdp_mline_index()) + - ") out of range (number of mlines: " + - rtc::ToString(content_size) + ")."); - } - } else if (!candidate->sdp_mid().empty()) { - auto& contents = description->description()->contents(); - auto it = absl::c_find_if( - contents, [candidate](const cricket::ContentInfo& content_info) { - return content_info.mid() == candidate->sdp_mid(); - }); - if (it == contents.end()) { - return RTCError( - RTCErrorType::INVALID_PARAMETER, - "Mid " + candidate->sdp_mid() + - " specified but no media section with that mid found."); - } else { - return &*it; - } - } - - return RTCError(RTCErrorType::INVALID_PARAMETER, - "Neither sdp_mline_index nor sdp_mid specified."); -} - -void PeerConnection::RemoveUnusedChannels(const SessionDescription* desc) { - // Destroy video channel first since it may have a pointer to the - // voice channel. - const cricket::ContentInfo* video_info = cricket::GetFirstVideoContent(desc); - if (!video_info || video_info->rejected) { - DestroyTransceiverChannel(GetVideoTransceiver()); - } - - const cricket::ContentInfo* audio_info = cricket::GetFirstAudioContent(desc); - if (!audio_info || audio_info->rejected) { - DestroyTransceiverChannel(GetAudioTransceiver()); - } - - const cricket::ContentInfo* data_info = cricket::GetFirstDataContent(desc); - if (!data_info || data_info->rejected) { - DestroyDataChannelTransport(); - } -} - -RTCErrorOr PeerConnection::GetEarlyBundleGroup( - const SessionDescription& desc) const { - const cricket::ContentGroup* bundle_group = nullptr; - if (configuration_.bundle_policy == - PeerConnectionInterface::kBundlePolicyMaxBundle) { - bundle_group = desc.GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - if (!bundle_group) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "max-bundle configured but session description " - "has no BUNDLE group"); - } - } - return bundle_group; -} - -RTCError PeerConnection::CreateChannels(const SessionDescription& desc) { - // Creating the media channels. Transports should already have been created - // at this point. - const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(&desc); - if (voice && !voice->rejected && - !GetAudioTransceiver()->internal()->channel()) { - cricket::VoiceChannel* voice_channel = CreateVoiceChannel(voice->name); - if (!voice_channel) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, - "Failed to create voice channel."); - } - GetAudioTransceiver()->internal()->SetChannel(voice_channel); - } - - const cricket::ContentInfo* video = cricket::GetFirstVideoContent(&desc); - if (video && !video->rejected && - !GetVideoTransceiver()->internal()->channel()) { - cricket::VideoChannel* video_channel = CreateVideoChannel(video->name); - if (!video_channel) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, - "Failed to create video channel."); - } - GetVideoTransceiver()->internal()->SetChannel(video_channel); - } - - const cricket::ContentInfo* data = cricket::GetFirstDataContent(&desc); - if (data_channel_type() != cricket::DCT_NONE && data && !data->rejected && - !data_channel_controller_.rtp_data_channel() && - !data_channel_controller_.data_channel_transport()) { - if (!CreateDataChannel(data->name)) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, - "Failed to create data channel."); - } - } - - return RTCError::OK(); -} - -// TODO(steveanton): Perhaps this should be managed by the RtpTransceiver. -cricket::VoiceChannel* PeerConnection::CreateVoiceChannel( - const std::string& mid) { - RtpTransportInternal* rtp_transport = GetRtpTransport(mid); - MediaTransportConfig media_transport_config = - transport_controller_->GetMediaTransportConfig(mid); - - cricket::VoiceChannel* voice_channel = channel_manager()->CreateVoiceChannel( - call_ptr_, configuration_.media_config, rtp_transport, - media_transport_config, signaling_thread(), mid, SrtpRequired(), - GetCryptoOptions(), &ssrc_generator_, audio_options_); - if (!voice_channel) { - return nullptr; - } - voice_channel->SignalDtlsSrtpSetupFailure.connect( - this, &PeerConnection::OnDtlsSrtpSetupFailure); - voice_channel->SignalSentPacket.connect(this, - &PeerConnection::OnSentPacket_w); - voice_channel->SetRtpTransport(rtp_transport); - - return voice_channel; -} - -// TODO(steveanton): Perhaps this should be managed by the RtpTransceiver. -cricket::VideoChannel* PeerConnection::CreateVideoChannel( - const std::string& mid) { - RtpTransportInternal* rtp_transport = GetRtpTransport(mid); - MediaTransportConfig media_transport_config = - transport_controller_->GetMediaTransportConfig(mid); - - cricket::VideoChannel* video_channel = channel_manager()->CreateVideoChannel( - call_ptr_, configuration_.media_config, rtp_transport, - media_transport_config, signaling_thread(), mid, SrtpRequired(), - GetCryptoOptions(), &ssrc_generator_, video_options_, - video_bitrate_allocator_factory_.get()); - if (!video_channel) { - return nullptr; - } - video_channel->SignalDtlsSrtpSetupFailure.connect( - this, &PeerConnection::OnDtlsSrtpSetupFailure); - video_channel->SignalSentPacket.connect(this, - &PeerConnection::OnSentPacket_w); - video_channel->SetRtpTransport(rtp_transport); - - return video_channel; -} - -bool PeerConnection::CreateDataChannel(const std::string& mid) { - switch (data_channel_type()) { - case cricket::DCT_SCTP: - case cricket::DCT_DATA_CHANNEL_TRANSPORT_SCTP: - case cricket::DCT_DATA_CHANNEL_TRANSPORT: - if (network_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnection::SetupDataChannelTransport_n, this, - mid))) { - sctp_mid_s_ = mid; - } else { - return false; - } - - // All non-RTP data channels must initialize |sctp_data_channels_|. - for (const auto& channel : - *data_channel_controller_.sctp_data_channels()) { - channel->OnTransportChannelCreated(); - } - return true; - case cricket::DCT_RTP: - default: - RtpTransportInternal* rtp_transport = GetRtpTransport(mid); - data_channel_controller_.set_rtp_data_channel( - channel_manager()->CreateRtpDataChannel( - configuration_.media_config, rtp_transport, signaling_thread(), - mid, SrtpRequired(), GetCryptoOptions(), &ssrc_generator_)); - if (!data_channel_controller_.rtp_data_channel()) { - return false; - } - data_channel_controller_.rtp_data_channel() - ->SignalDtlsSrtpSetupFailure.connect( - this, &PeerConnection::OnDtlsSrtpSetupFailure); - data_channel_controller_.rtp_data_channel()->SignalSentPacket.connect( - this, &PeerConnection::OnSentPacket_w); - data_channel_controller_.rtp_data_channel()->SetRtpTransport( - rtp_transport); - have_pending_rtp_data_channel_ = true; - return true; + bool content_found = false; + const ContentInfos& contents = local_description()->description()->contents(); + for (size_t index = 0; index < contents.size(); ++index) { + if (contents[index].name == content_name) { + *sdp_mline_index = static_cast(index); + content_found = true; + break; + } } - return false; + return content_found; } Call::Stats PeerConnection::GetCallStats() { if (!worker_thread()->IsCurrent()) { return worker_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&PeerConnection::GetCallStats, this)); + RTC_FROM_HERE, [this] { return GetCallStats(); }); } RTC_DCHECK_RUN_ON(worker_thread()); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; if (call_) { return call_->GetStats(); } else { @@ -6725,6 +2397,16 @@ bool PeerConnection::SetupDataChannelTransport_n(const std::string& mid) { data_channel_controller_.set_data_channel_transport(transport); data_channel_controller_.SetupDataChannelTransport_n(); sctp_mid_n_ = mid; + cricket::DtlsTransportInternal* dtls_transport = + transport_controller_->GetDtlsTransport(mid); + if (dtls_transport) { + signaling_thread()->PostTask( + ToQueuedTask(signaling_thread_safety_.flag(), + [this, name = dtls_transport->transport_name()] { + RTC_DCHECK_RUN_ON(signaling_thread()); + sctp_transport_name_s_ = std::move(name); + })); + } // Note: setting the data sink and checking initial state must be done last, // after setting up the data channel. Setting the data sink may trigger @@ -6735,34 +2417,32 @@ bool PeerConnection::SetupDataChannelTransport_n(const std::string& mid) { } void PeerConnection::TeardownDataChannelTransport_n() { - if (!sctp_mid_n_ && !data_channel_controller_.data_channel_transport()) { - return; + if (sctp_mid_n_) { + // |sctp_mid_| may still be active through an SCTP transport. If not, unset + // it. + RTC_LOG(LS_INFO) << "Tearing down data channel transport for mid=" + << *sctp_mid_n_; + sctp_mid_n_.reset(); } - RTC_LOG(LS_INFO) << "Tearing down data channel transport for mid=" - << *sctp_mid_n_; - // |sctp_mid_| may still be active through an SCTP transport. If not, unset - // it. - sctp_mid_n_.reset(); data_channel_controller_.TeardownDataChannelTransport_n(); } // Returns false if bundle is enabled and rtcp_mux is disabled. -bool PeerConnection::ValidateBundleSettings(const SessionDescription* desc) { - bool bundle_enabled = desc->HasGroup(cricket::GROUP_TYPE_BUNDLE); - if (!bundle_enabled) +bool PeerConnection::ValidateBundleSettings( + const SessionDescription* desc, + const std::map& + bundle_groups_by_mid) { + if (bundle_groups_by_mid.empty()) return true; - const cricket::ContentGroup* bundle_group = - desc->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); - RTC_DCHECK(bundle_group != NULL); - const cricket::ContentInfos& contents = desc->contents(); for (cricket::ContentInfos::const_iterator citer = contents.begin(); citer != contents.end(); ++citer) { const cricket::ContentInfo* content = (&*citer); RTC_DCHECK(content != NULL); - if (bundle_group->HasContentName(content->name) && !content->rejected && + auto it = bundle_groups_by_mid.find(content->name); + if (it != bundle_groups_by_mid.end() && !content->rejected && content->type == MediaProtocolType::kRtp) { if (!HasRtcpMuxEnabled(content)) return false; @@ -6772,185 +2452,14 @@ bool PeerConnection::ValidateBundleSettings(const SessionDescription* desc) { return true; } -bool PeerConnection::HasRtcpMuxEnabled(const cricket::ContentInfo* content) { - return content->media_description()->rtcp_mux(); -} - -static RTCError ValidateMids(const cricket::SessionDescription& description) { - std::set mids; - for (const cricket::ContentInfo& content : description.contents()) { - if (content.name.empty()) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "A media section is missing a MID attribute."); - } - if (!mids.insert(content.name).second) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "Duplicate a=mid value '" + content.name + "'."); - } - } - return RTCError::OK(); -} - -RTCError PeerConnection::ValidateSessionDescription( - const SessionDescriptionInterface* sdesc, - cricket::ContentSource source) { - if (session_error() != SessionError::kNone) { - LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, GetSessionErrorMsg()); - } - - if (!sdesc || !sdesc->description()) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, kInvalidSdp); - } - - SdpType type = sdesc->GetType(); - if ((source == cricket::CS_LOCAL && !ExpectSetLocalDescription(type)) || - (source == cricket::CS_REMOTE && !ExpectSetRemoteDescription(type))) { - LOG_AND_RETURN_ERROR( - RTCErrorType::INVALID_STATE, - "Called in wrong state: " + GetSignalingStateString(signaling_state())); - } - - RTCError error = ValidateMids(*sdesc->description()); - if (!error.ok()) { - return error; - } - - // Verify crypto settings. - std::string crypto_error; - if (webrtc_session_desc_factory_->SdesPolicy() == cricket::SEC_REQUIRED || - dtls_enabled_) { - RTCError crypto_error = VerifyCrypto(sdesc->description(), dtls_enabled_); - if (!crypto_error.ok()) { - return crypto_error; - } - } - - // Verify ice-ufrag and ice-pwd. - if (!VerifyIceUfragPwdPresent(sdesc->description())) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - kSdpWithoutIceUfragPwd); - } - - if (!ValidateBundleSettings(sdesc->description())) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - kBundleWithoutRtcpMux); - } - - // TODO(skvlad): When the local rtcp-mux policy is Require, reject any - // m-lines that do not rtcp-mux enabled. - - // Verify m-lines in Answer when compared against Offer. - if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { - // With an answer we want to compare the new answer session description with - // the offer's session description from the current negotiation. - const cricket::SessionDescription* offer_desc = - (source == cricket::CS_LOCAL) ? remote_description()->description() - : local_description()->description(); - if (!MediaSectionsHaveSameCount(*offer_desc, *sdesc->description()) || - !MediaSectionsInSameOrder(*offer_desc, nullptr, *sdesc->description(), - type)) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - kMlineMismatchInAnswer); - } - } else { - // The re-offers should respect the order of m= sections in current - // description. See RFC3264 Section 8 paragraph 4 for more details. - // With a re-offer, either the current local or current remote descriptions - // could be the most up to date, so we would like to check against both of - // them if they exist. It could be the case that one of them has a 0 port - // for a media section, but the other does not. This is important to check - // against in the case that we are recycling an m= section. - const cricket::SessionDescription* current_desc = nullptr; - const cricket::SessionDescription* secondary_current_desc = nullptr; - if (local_description()) { - current_desc = local_description()->description(); - if (remote_description()) { - secondary_current_desc = remote_description()->description(); - } - } else if (remote_description()) { - current_desc = remote_description()->description(); - } - if (current_desc && - !MediaSectionsInSameOrder(*current_desc, secondary_current_desc, - *sdesc->description(), type)) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - kMlineMismatchInSubsequentOffer); - } - } - - if (IsUnifiedPlan()) { - // Ensure that each audio and video media section has at most one - // "StreamParams". This will return an error if receiving a session - // description from a "Plan B" endpoint which adds multiple tracks of the - // same type. With Unified Plan, there can only be at most one track per - // media section. - for (const ContentInfo& content : sdesc->description()->contents()) { - const MediaContentDescription& desc = *content.media_description(); - if ((desc.type() == cricket::MEDIA_TYPE_AUDIO || - desc.type() == cricket::MEDIA_TYPE_VIDEO) && - desc.streams().size() > 1u) { - LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, - "Media section has more than one track specified " - "with a=ssrc lines which is not supported with " - "Unified Plan."); - } - } - } - - return RTCError::OK(); -} - -bool PeerConnection::ExpectSetLocalDescription(SdpType type) { - PeerConnectionInterface::SignalingState state = signaling_state(); - if (type == SdpType::kOffer) { - return (state == PeerConnectionInterface::kStable) || - (state == PeerConnectionInterface::kHaveLocalOffer); - } else { - RTC_DCHECK(type == SdpType::kPrAnswer || type == SdpType::kAnswer); - return (state == PeerConnectionInterface::kHaveRemoteOffer) || - (state == PeerConnectionInterface::kHaveLocalPrAnswer); - } -} - -bool PeerConnection::ExpectSetRemoteDescription(SdpType type) { - PeerConnectionInterface::SignalingState state = signaling_state(); - if (type == SdpType::kOffer) { - return (state == PeerConnectionInterface::kStable) || - (state == PeerConnectionInterface::kHaveRemoteOffer); - } else { - RTC_DCHECK(type == SdpType::kPrAnswer || type == SdpType::kAnswer); - return (state == PeerConnectionInterface::kHaveLocalOffer) || - (state == PeerConnectionInterface::kHaveRemotePrAnswer); - } -} - -const char* PeerConnection::SessionErrorToString(SessionError error) const { - switch (error) { - case SessionError::kNone: - return "ERROR_NONE"; - case SessionError::kContent: - return "ERROR_CONTENT"; - case SessionError::kTransport: - return "ERROR_TRANSPORT"; - } - RTC_NOTREACHED(); - return ""; -} - -std::string PeerConnection::GetSessionErrorMsg() { - rtc::StringBuilder desc; - desc << kSessionError << SessionErrorToString(session_error()) << ". "; - desc << kSessionErrorDesc << session_error_desc() << "."; - return desc.Release(); -} - void PeerConnection::ReportSdpFormatReceived( - const SessionDescriptionInterface& remote_offer) { + const SessionDescriptionInterface& remote_description) { int num_audio_mlines = 0; int num_video_mlines = 0; int num_audio_tracks = 0; int num_video_tracks = 0; - for (const ContentInfo& content : remote_offer.description()->contents()) { + for (const ContentInfo& content : + remote_description.description()->contents()) { cricket::MediaType media_type = content.media_description()->type(); int num_tracks = std::max( 1, static_cast(content.media_description()->streams().size())); @@ -6970,8 +2479,67 @@ void PeerConnection::ReportSdpFormatReceived( } else if (num_audio_tracks > 0 || num_video_tracks > 0) { format = kSdpFormatReceivedSimple; } - RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.SdpFormatReceived", format, - kSdpFormatReceivedMax); + switch (remote_description.GetType()) { + case SdpType::kOffer: + // Historically only offers were counted. + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.SdpFormatReceived", + format, kSdpFormatReceivedMax); + break; + case SdpType::kAnswer: + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.SdpFormatReceivedAnswer", + format, kSdpFormatReceivedMax); + break; + default: + RTC_LOG(LS_ERROR) << "Can not report SdpFormatReceived for " + << SdpTypeToString(remote_description.GetType()); + break; + } +} + +void PeerConnection::ReportSdpBundleUsage( + const SessionDescriptionInterface& remote_description) { + RTC_DCHECK_RUN_ON(signaling_thread()); + + bool using_bundle = + remote_description.description()->HasGroup(cricket::GROUP_TYPE_BUNDLE); + int num_audio_mlines = 0; + int num_video_mlines = 0; + int num_data_mlines = 0; + for (const ContentInfo& content : + remote_description.description()->contents()) { + cricket::MediaType media_type = content.media_description()->type(); + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + num_audio_mlines += 1; + } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { + num_video_mlines += 1; + } else if (media_type == cricket::MEDIA_TYPE_DATA) { + num_data_mlines += 1; + } + } + bool simple = num_audio_mlines <= 1 && num_video_mlines <= 1; + BundleUsage usage = kBundleUsageMax; + if (num_audio_mlines == 0 && num_video_mlines == 0) { + if (num_data_mlines > 0) { + usage = using_bundle ? kBundleUsageBundleDatachannelOnly + : kBundleUsageNoBundleDatachannelOnly; + } else { + usage = kBundleUsageEmpty; + } + } else if (configuration_.sdp_semantics == SdpSemantics::kPlanB) { + // In plan-b, simple/complex usage will not show up in the number of + // m-lines or BUNDLE. + usage = using_bundle ? kBundleUsageBundlePlanB : kBundleUsageNoBundlePlanB; + } else { + if (simple) { + usage = + using_bundle ? kBundleUsageBundleSimple : kBundleUsageNoBundleSimple; + } else { + usage = using_bundle ? kBundleUsageBundleComplex + : kBundleUsageNoBundleComplex; + } + } + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.BundleUsage", usage, + kBundleUsageMax); } void PeerConnection::ReportIceCandidateCollected( @@ -6988,9 +2556,62 @@ void PeerConnection::ReportIceCandidateCollected( } } +void PeerConnection::NoteUsageEvent(UsageEvent event) { + RTC_DCHECK_RUN_ON(signaling_thread()); + usage_pattern_.NoteUsageEvent(event); +} + +// Asynchronously adds remote candidates on the network thread. +void PeerConnection::AddRemoteCandidate(const std::string& mid, + const cricket::Candidate& candidate) { + RTC_DCHECK_RUN_ON(signaling_thread()); + + network_thread()->PostTask(ToQueuedTask( + network_thread_safety_, [this, mid = mid, candidate = candidate] { + RTC_DCHECK_RUN_ON(network_thread()); + std::vector candidates = {candidate}; + RTCError error = + transport_controller_->AddRemoteCandidates(mid, candidates); + if (error.ok()) { + signaling_thread()->PostTask(ToQueuedTask( + signaling_thread_safety_.flag(), + [this, candidate = std::move(candidate)] { + ReportRemoteIceCandidateAdded(candidate); + // Candidates successfully submitted for checking. + if (ice_connection_state() == + PeerConnectionInterface::kIceConnectionNew || + ice_connection_state() == + PeerConnectionInterface::kIceConnectionDisconnected) { + // If state is New, then the session has just gotten its first + // remote ICE candidates, so go to Checking. If state is + // Disconnected, the session is re-using old candidates or + // receiving additional ones, so go to Checking. If state is + // Connected, stay Connected. + // TODO(bemasc): If state is Connected, and the new candidates + // are for a newly added transport, then the state actually + // _should_ move to checking. Add a way to distinguish that + // case. + SetIceConnectionState( + PeerConnectionInterface::kIceConnectionChecking); + } + // TODO(bemasc): If state is Completed, go back to Connected. + })); + } else { + RTC_LOG(LS_WARNING) << error.message(); + } + })); +} + +void PeerConnection::ReportUsagePattern() const { + usage_pattern_.ReportUsagePattern(observer_); +} + void PeerConnection::ReportRemoteIceCandidateAdded( const cricket::Candidate& candidate) { + RTC_DCHECK_RUN_ON(signaling_thread()); + NoteUsageEvent(UsageEvent::REMOTE_CANDIDATE_ADDED); + if (candidate.address().IsPrivateIP()) { NoteUsageEvent(UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED); } @@ -7002,97 +2623,11 @@ void PeerConnection::ReportRemoteIceCandidateAdded( } } -void PeerConnection::NoteUsageEvent(UsageEvent event) { - RTC_DCHECK_RUN_ON(signaling_thread()); - usage_event_accumulator_ |= static_cast(event); -} - -void PeerConnection::ReportUsagePattern() const { - RTC_DLOG(LS_INFO) << "Usage signature is " << usage_event_accumulator_; - RTC_HISTOGRAM_ENUMERATION_SPARSE("WebRTC.PeerConnection.UsagePattern", - usage_event_accumulator_, - static_cast(UsageEvent::MAX_VALUE)); - const int bad_bits = - static_cast(UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED) | - static_cast(UsageEvent::CANDIDATE_COLLECTED); - const int good_bits = - static_cast(UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED) | - static_cast(UsageEvent::REMOTE_CANDIDATE_ADDED) | - static_cast(UsageEvent::ICE_STATE_CONNECTED); - if ((usage_event_accumulator_ & bad_bits) == bad_bits && - (usage_event_accumulator_ & good_bits) == 0) { - // If called after close(), we can't report, because observer may have - // been deallocated, and therefore pointer is null. Write to log instead. - if (observer_) { - Observer()->OnInterestingUsage(usage_event_accumulator_); - } else { - RTC_LOG(LS_INFO) << "Interesting usage signature " - << usage_event_accumulator_ - << " observed after observer shutdown"; - } - } -} - -void PeerConnection::ReportNegotiatedSdpSemantics( - const SessionDescriptionInterface& answer) { - SdpSemanticNegotiated semantics_negotiated; - switch (answer.description()->msid_signaling()) { - case 0: - semantics_negotiated = kSdpSemanticNegotiatedNone; - break; - case cricket::kMsidSignalingMediaSection: - semantics_negotiated = kSdpSemanticNegotiatedUnifiedPlan; - break; - case cricket::kMsidSignalingSsrcAttribute: - semantics_negotiated = kSdpSemanticNegotiatedPlanB; - break; - case cricket::kMsidSignalingMediaSection | - cricket::kMsidSignalingSsrcAttribute: - semantics_negotiated = kSdpSemanticNegotiatedMixed; - break; - default: - RTC_NOTREACHED(); - } - RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.SdpSemanticNegotiated", - semantics_negotiated, kSdpSemanticNegotiatedMax); -} - -// We need to check the local/remote description for the Transport instead of -// the session, because a new Transport added during renegotiation may have -// them unset while the session has them set from the previous negotiation. -// Not doing so may trigger the auto generation of transport description and -// mess up DTLS identity information, ICE credential, etc. -bool PeerConnection::ReadyToUseRemoteCandidate( - const IceCandidateInterface* candidate, - const SessionDescriptionInterface* remote_desc, - bool* valid) { - *valid = true; - - const SessionDescriptionInterface* current_remote_desc = - remote_desc ? remote_desc : remote_description(); - - if (!current_remote_desc) { - return false; - } - - RTCErrorOr result = - FindContentInfo(current_remote_desc, candidate); - if (!result.ok()) { - RTC_LOG(LS_ERROR) << "ReadyToUseRemoteCandidate: Invalid candidate. " - << result.error().message(); - - *valid = false; - return false; - } - - std::string transport_name = GetTransportName(result.value()->name); - return !transport_name.empty(); -} - bool PeerConnection::SrtpRequired() const { - return !use_datagram_transport_ && - (dtls_enabled_ || - webrtc_session_desc_factory_->SdesPolicy() == cricket::SEC_REQUIRED); + RTC_DCHECK_RUN_ON(signaling_thread()); + return (dtls_enabled_ || + sdp_handler_->webrtc_session_desc_factory()->SdesPolicy() == + cricket::SEC_REQUIRED); } void PeerConnection::OnTransportControllerGatheringState( @@ -7102,13 +2637,21 @@ void PeerConnection::OnTransportControllerGatheringState( OnIceGatheringChange(PeerConnectionInterface::kIceGatheringGathering); } else if (state == cricket::kIceGatheringComplete) { OnIceGatheringChange(PeerConnectionInterface::kIceGatheringComplete); + } else if (state == cricket::kIceGatheringNew) { + OnIceGatheringChange(PeerConnectionInterface::kIceGatheringNew); + } else { + RTC_LOG(LS_ERROR) << "Unknown state received: " << state; + RTC_NOTREACHED(); } } +// Runs on network_thread(). void PeerConnection::ReportTransportStats() { + TRACE_EVENT0("webrtc", "PeerConnection::ReportTransportStats"); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; std::map> media_types_by_transport_name; - for (const auto& transceiver : transceivers_) { + for (const auto& transceiver : rtp_manager()->transceivers()->UnsafeList()) { if (transceiver->internal()->channel()) { const std::string& transport_name = transceiver->internal()->channel()->transport_name(); @@ -7116,15 +2659,14 @@ void PeerConnection::ReportTransportStats() { transceiver->media_type()); } } - if (rtp_data_channel()) { - media_types_by_transport_name[rtp_data_channel()->transport_name()].insert( - cricket::MEDIA_TYPE_DATA); - } - absl::optional transport_name = sctp_transport_name(); - if (transport_name) { - media_types_by_transport_name[*transport_name].insert( - cricket::MEDIA_TYPE_DATA); + if (sctp_mid_n_) { + cricket::DtlsTransportInternal* dtls_transport = + transport_controller_->GetDtlsTransport(*sctp_mid_n_); + if (dtls_transport) { + media_types_by_transport_name[dtls_transport->transport_name()].insert( + cricket::MEDIA_TYPE_DATA); + } } for (const auto& entry : media_types_by_transport_name) { @@ -7133,12 +2675,14 @@ void PeerConnection::ReportTransportStats() { cricket::TransportStats stats; if (transport_controller_->GetStats(transport_name, &stats)) { ReportBestConnectionState(stats); - ReportNegotiatedCiphers(stats, media_types); + ReportNegotiatedCiphers(dtls_enabled_, stats, media_types); } } } + // Walk through the ConnectionInfos to gather best connection usage // for IPv4 and IPv6. +// static (no member state required) void PeerConnection::ReportBestConnectionState( const cricket::TransportStats& stats) { for (const cricket::TransportChannelStats& channel_stats : @@ -7164,7 +2708,7 @@ void PeerConnection::ReportBestConnectionState( GetIceCandidatePairCounter(local, remote), kIceCandidatePairMax); } else { - RTC_CHECK(0); + RTC_CHECK_NOTREACHED(); } // Increment the counter for IP type. @@ -7186,10 +2730,12 @@ void PeerConnection::ReportBestConnectionState( } } +// static void PeerConnection::ReportNegotiatedCiphers( + bool dtls_enabled, const cricket::TransportStats& stats, const std::set& media_types) { - if (!dtls_enabled_ || stats.channel_stats.empty()) { + if (!dtls_enabled || stats.channel_stats.empty()) { return; } @@ -7251,85 +2797,6 @@ void PeerConnection::ReportNegotiatedCiphers( } } -void PeerConnection::OnSentPacket_w(const rtc::SentPacket& sent_packet) { - RTC_DCHECK_RUN_ON(worker_thread()); - RTC_DCHECK(call_); - call_->OnSentPacket(sent_packet); -} - -const std::string PeerConnection::GetTransportName( - const std::string& content_name) { - cricket::ChannelInterface* channel = GetChannel(content_name); - if (channel) { - return channel->transport_name(); - } - if (data_channel_controller_.data_channel_transport()) { - RTC_DCHECK(sctp_mid_s_); - if (content_name == *sctp_mid_s_) { - return *sctp_transport_name(); - } - } - // Return an empty string if failed to retrieve the transport name. - return ""; -} - -void PeerConnection::DestroyTransceiverChannel( - rtc::scoped_refptr> - transceiver) { - RTC_DCHECK(transceiver); - - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (channel) { - transceiver->internal()->SetChannel(nullptr); - DestroyChannelInterface(channel); - } -} - -void PeerConnection::DestroyDataChannelTransport() { - if (data_channel_controller_.rtp_data_channel()) { - data_channel_controller_.OnTransportChannelClosed(); - DestroyChannelInterface(data_channel_controller_.rtp_data_channel()); - data_channel_controller_.set_rtp_data_channel(nullptr); - } - - // Note: Cannot use rtc::Bind to create a functor to invoke because it will - // grab a reference to this PeerConnection. If this is called from the - // PeerConnection destructor, the RefCountedObject vtable will have already - // been destroyed (since it is a subclass of PeerConnection) and using - // rtc::Bind will cause "Pure virtual function called" error to appear. - - if (sctp_mid_s_) { - data_channel_controller_.OnTransportChannelClosed(); - network_thread()->Invoke(RTC_FROM_HERE, [this] { - RTC_DCHECK_RUN_ON(network_thread()); - TeardownDataChannelTransport_n(); - }); - sctp_mid_s_.reset(); - } -} - -void PeerConnection::DestroyChannelInterface( - cricket::ChannelInterface* channel) { - RTC_DCHECK(channel); - switch (channel->media_type()) { - case cricket::MEDIA_TYPE_AUDIO: - channel_manager()->DestroyVoiceChannel( - static_cast(channel)); - break; - case cricket::MEDIA_TYPE_VIDEO: - channel_manager()->DestroyVideoChannel( - static_cast(channel)); - break; - case cricket::MEDIA_TYPE_DATA: - channel_manager()->DestroyRtpDataChannel( - static_cast(channel)); - break; - default: - RTC_NOTREACHED() << "Unknown media type: " << channel->media_type(); - break; - } -} - bool PeerConnection::OnTransportChanged( const std::string& mid, RtpTransportInternal* rtp_transport, @@ -7341,16 +2808,20 @@ bool PeerConnection::OnTransportChanged( if (base_channel) { ret = base_channel->SetRtpTransport(rtp_transport); } + if (mid == sctp_mid_n_) { data_channel_controller_.OnTransportChanged(data_channel_transport); + if (dtls_transport) { + signaling_thread()->PostTask(ToQueuedTask( + signaling_thread_safety_.flag(), + [this, name = dtls_transport->internal()->transport_name()] { + RTC_DCHECK_RUN_ON(signaling_thread()); + sctp_transport_name_s_ = std::move(name); + })); + } } - return ret; -} -void PeerConnection::OnSetStreams() { - RTC_DCHECK_RUN_ON(signaling_thread()); - if (IsUnifiedPlan()) - UpdateNegotiationNeeded(); + return ret; } PeerConnectionObserver* PeerConnection::Observer() const { @@ -7359,12 +2830,30 @@ PeerConnectionObserver* PeerConnection::Observer() const { return observer_; } +void PeerConnection::StartSctpTransport(int local_port, + int remote_port, + int max_message_size) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!sctp_mid_s_) + return; + + network_thread()->PostTask(ToQueuedTask( + network_thread_safety_, + [this, mid = *sctp_mid_s_, local_port, remote_port, max_message_size] { + rtc::scoped_refptr sctp_transport = + transport_controller()->GetSctpTransport(mid); + if (sctp_transport) + sctp_transport->Start(local_port, remote_port, max_message_size); + })); +} + CryptoOptions PeerConnection::GetCryptoOptions() { + RTC_DCHECK_RUN_ON(signaling_thread()); // TODO(bugs.webrtc.org/9891) - Remove PeerConnectionFactory::CryptoOptions // after it has been removed. return configuration_.crypto_options.has_value() ? *configuration_.crypto_options - : factory_->options().crypto_options; + : options_.crypto_options; } void PeerConnection::ClearStatsCache() { @@ -7374,276 +2863,29 @@ void PeerConnection::ClearStatsCache() { } } -void PeerConnection::RequestUsagePatternReportForTesting() { - signaling_thread()->Post(RTC_FROM_HERE, this, MSG_REPORT_USAGE_PATTERN, - nullptr); -} - -void PeerConnection::UpdateNegotiationNeeded() { +bool PeerConnection::ShouldFireNegotiationNeededEvent(uint32_t event_id) { RTC_DCHECK_RUN_ON(signaling_thread()); - if (!IsUnifiedPlan()) { - Observer()->OnRenegotiationNeeded(); - return; - } - - // If connection's [[IsClosed]] slot is true, abort these steps. - if (IsClosed()) - return; - - // If connection's signaling state is not "stable", abort these steps. - if (signaling_state() != kStable) - return; - - // NOTE - // The negotiation-needed flag will be updated once the state transitions to - // "stable", as part of the steps for setting an RTCSessionDescription. - - // If the result of checking if negotiation is needed is false, clear the - // negotiation-needed flag by setting connection's [[NegotiationNeeded]] slot - // to false, and abort these steps. - bool is_negotiation_needed = CheckIfNegotiationIsNeeded(); - if (!is_negotiation_needed) { - is_negotiation_needed_ = false; - return; - } - - // If connection's [[NegotiationNeeded]] slot is already true, abort these - // steps. - if (is_negotiation_needed_) - return; - - // Set connection's [[NegotiationNeeded]] slot to true. - is_negotiation_needed_ = true; - - // Queue a task that runs the following steps: - // If connection's [[IsClosed]] slot is true, abort these steps. - // If connection's [[NegotiationNeeded]] slot is false, abort these steps. - // Fire an event named negotiationneeded at connection. - Observer()->OnRenegotiationNeeded(); + return sdp_handler_->ShouldFireNegotiationNeededEvent(event_id); } -bool PeerConnection::CheckIfNegotiationIsNeeded() { - RTC_DCHECK_RUN_ON(signaling_thread()); - // 1. If any implementation-specific negotiation is required, as described at - // the start of this section, return true. - - // 2. If connection's [[RestartIce]] internal slot is true, return true. - if (local_ice_credentials_to_replace_->HasIceCredentials()) { - return true; - } - - // 3. Let description be connection.[[CurrentLocalDescription]]. - const SessionDescriptionInterface* description = current_local_description(); - if (!description) - return true; - - // 4. If connection has created any RTCDataChannels, and no m= section in - // description has been negotiated yet for data, return true. - if (data_channel_controller_.HasSctpDataChannels()) { - if (!cricket::GetFirstDataContent(description->description()->contents())) - return true; - } - - // 5. For each transceiver in connection's set of transceivers, perform the - // following checks: - for (const auto& transceiver : transceivers_) { - const ContentInfo* current_local_msection = - FindTransceiverMSection(transceiver.get(), description); - - const ContentInfo* current_remote_msection = FindTransceiverMSection( - transceiver.get(), current_remote_description()); - - // 5.3 If transceiver is stopped and is associated with an m= section, - // but the associated m= section is not yet rejected in - // connection.[[CurrentLocalDescription]] or - // connection.[[CurrentRemoteDescription]], return true. - if (transceiver->stopped()) { - if (current_local_msection && !current_local_msection->rejected && - ((current_remote_msection && !current_remote_msection->rejected) || - !current_remote_msection)) { - return true; - } - continue; - } - - // 5.1 If transceiver isn't stopped and isn't yet associated with an m= - // section in description, return true. - if (!current_local_msection) - return true; - - const MediaContentDescription* current_local_media_description = - current_local_msection->media_description(); - // 5.2 If transceiver isn't stopped and is associated with an m= section - // in description then perform the following checks: - - // 5.2.1 If transceiver.[[Direction]] is "sendrecv" or "sendonly", and the - // associated m= section in description either doesn't contain a single - // "a=msid" line, or the number of MSIDs from the "a=msid" lines in this - // m= section, or the MSID values themselves, differ from what is in - // transceiver.sender.[[AssociatedMediaStreamIds]], return true. - if (RtpTransceiverDirectionHasSend(transceiver->direction())) { - if (current_local_media_description->streams().size() == 0) - return true; - - std::vector msection_msids; - for (const auto& stream : current_local_media_description->streams()) { - for (const std::string& msid : stream.stream_ids()) - msection_msids.push_back(msid); - } - - std::vector transceiver_msids = - transceiver->sender()->stream_ids(); - if (msection_msids.size() != transceiver_msids.size()) - return true; - - absl::c_sort(transceiver_msids); - absl::c_sort(msection_msids); - if (transceiver_msids != msection_msids) - return true; - } - - // 5.2.2 If description is of type "offer", and the direction of the - // associated m= section in neither connection.[[CurrentLocalDescription]] - // nor connection.[[CurrentRemoteDescription]] matches - // transceiver.[[Direction]], return true. - if (description->GetType() == SdpType::kOffer) { - if (!current_remote_description()) - return true; - - if (!current_remote_msection) - return true; - - RtpTransceiverDirection current_local_direction = - current_local_media_description->direction(); - RtpTransceiverDirection current_remote_direction = - current_remote_msection->media_description()->direction(); - if (transceiver->direction() != current_local_direction && - transceiver->direction() != - RtpTransceiverDirectionReversed(current_remote_direction)) { - return true; - } - } - - // 5.2.3 If description is of type "answer", and the direction of the - // associated m= section in the description does not match - // transceiver.[[Direction]] intersected with the offered direction (as - // described in [JSEP] (section 5.3.1.)), return true. - if (description->GetType() == SdpType::kAnswer) { - if (!remote_description()) - return true; - - const ContentInfo* offered_remote_msection = - FindTransceiverMSection(transceiver.get(), remote_description()); - - RtpTransceiverDirection offered_direction = - offered_remote_msection - ? offered_remote_msection->media_description()->direction() - : RtpTransceiverDirection::kInactive; - - if (current_local_media_description->direction() != - (RtpTransceiverDirectionIntersection( - transceiver->direction(), - RtpTransceiverDirectionReversed(offered_direction)))) { - return true; - } - } - } - - // If all the preceding checks were performed and true was not returned, - // nothing remains to be negotiated; return false. - return false; +void PeerConnection::RequestUsagePatternReportForTesting() { + message_handler_.RequestUsagePatternReport( + [this]() { + RTC_DCHECK_RUN_ON(signaling_thread()); + ReportUsagePattern(); + }, + /* delay_ms= */ 0); } -RTCError PeerConnection::Rollback(SdpType sdp_type) { - auto state = signaling_state(); - if (state != PeerConnectionInterface::kHaveLocalOffer && - state != PeerConnectionInterface::kHaveRemoteOffer) { - return RTCError(RTCErrorType::INVALID_STATE, - "Called in wrong signalingState: " + - GetSignalingStateString(signaling_state())); - } - RTC_DCHECK_RUN_ON(signaling_thread()); - RTC_DCHECK(IsUnifiedPlan()); - std::vector> all_added_streams; - std::vector> all_removed_streams; - std::vector> removed_receivers; - - for (auto&& transceivers_stable_state_pair : - transceiver_stable_states_by_transceivers_) { - auto transceiver = transceivers_stable_state_pair.first; - auto state = transceivers_stable_state_pair.second; - - if (state.remote_stream_ids()) { - std::vector> added_streams; - std::vector> removed_streams; - SetAssociatedRemoteStreams(transceiver->internal()->receiver_internal(), - state.remote_stream_ids().value(), - &added_streams, &removed_streams); - all_added_streams.insert(all_added_streams.end(), added_streams.begin(), - added_streams.end()); - all_removed_streams.insert(all_removed_streams.end(), - removed_streams.begin(), - removed_streams.end()); - if (!state.has_m_section() && !state.newly_created()) { - continue; - } - } - - RTC_DCHECK(transceiver->internal()->mid().has_value()); - DestroyTransceiverChannel(transceiver); - - if (signaling_state() == PeerConnectionInterface::kHaveRemoteOffer && - transceiver->receiver()) { - removed_receivers.push_back(transceiver->receiver()); - } - if (state.newly_created()) { - if (transceiver->internal()->reused_for_addtrack()) { - transceiver->internal()->set_created_by_addtrack(true); - } else { - int remaining_transceiver_count = 0; - for (auto&& t : transceivers_) { - if (t != transceiver) { - transceivers_[remaining_transceiver_count++] = t; - } - } - transceivers_.resize(remaining_transceiver_count); - } - } - transceiver->internal()->sender_internal()->set_transport(nullptr); - transceiver->internal()->receiver_internal()->set_transport(nullptr); - transceiver->internal()->set_mid(state.mid()); - transceiver->internal()->set_mline_index(state.mline_index()); - } - transport_controller_->RollbackTransports(); - if (have_pending_rtp_data_channel_) { - DestroyDataChannelTransport(); - have_pending_rtp_data_channel_ = false; - } - transceiver_stable_states_by_transceivers_.clear(); - pending_local_description_.reset(); - pending_remote_description_.reset(); - ChangeSignalingState(PeerConnectionInterface::kStable); - - // Once all processing has finished, fire off callbacks. - for (const auto& receiver : removed_receivers) { - Observer()->OnRemoveTrack(receiver); - } - for (const auto& stream : all_added_streams) { - Observer()->OnAddStream(stream); - } - for (const auto& stream : all_removed_streams) { - Observer()->OnRemoveStream(stream); - } - - // The assumption is that in case of implicit rollback UpdateNegotiationNeeded - // gets called in SetRemoteDescription. - if (sdp_type == SdpType::kRollback) { - UpdateNegotiationNeeded(); - if (is_negotiation_needed_) { - Observer()->OnRenegotiationNeeded(); - } - } - return RTCError::OK(); +std::function +PeerConnection::InitializeRtcpCallback() { + RTC_DCHECK_RUN_ON(network_thread()); + return [this](const rtc::CopyOnWriteBuffer& packet, int64_t packet_time_us) { + RTC_DCHECK_RUN_ON(network_thread()); + call_ptr_->Receiver()->DeliverPacket(MediaType::ANY, packet, + packet_time_us); + }; } } // namespace webrtc diff --git a/pc/peer_connection.h b/pc/peer_connection.h index f3102572fb..4476c5d8e1 100644 --- a/pc/peer_connection.h +++ b/pc/peer_connection.h @@ -11,6 +11,9 @@ #ifndef PC_PEER_CONNECTION_H_ #define PC_PEER_CONNECTION_H_ +#include + +#include #include #include #include @@ -18,33 +21,86 @@ #include #include +#include "absl/types/optional.h" +#include "api/adaptation/resource.h" +#include "api/async_dns_resolver.h" +#include "api/async_resolver_factory.h" +#include "api/audio_options.h" +#include "api/candidate.h" +#include "api/crypto/crypto_options.h" +#include "api/data_channel_interface.h" +#include "api/dtls_transport_interface.h" +#include "api/ice_transport_interface.h" +#include "api/jsep.h" +#include "api/media_stream_interface.h" +#include "api/media_types.h" +#include "api/packet_socket_factory.h" #include "api/peer_connection_interface.h" +#include "api/rtc_error.h" +#include "api/rtc_event_log/rtc_event_log.h" +#include "api/rtc_event_log_output.h" +#include "api/rtp_parameters.h" +#include "api/rtp_receiver_interface.h" +#include "api/rtp_sender_interface.h" +#include "api/rtp_transceiver_interface.h" +#include "api/scoped_refptr.h" +#include "api/sctp_transport_interface.h" +#include "api/sequence_checker.h" +#include "api/set_local_description_observer_interface.h" +#include "api/set_remote_description_observer_interface.h" +#include "api/stats/rtc_stats_collector_callback.h" +#include "api/transport/bitrate_settings.h" #include "api/transport/data_channel_transport_interface.h" +#include "api/transport/enums.h" #include "api/turn_customizer.h" +#include "api/video/video_bitrate_allocator_factory.h" +#include "call/call.h" +#include "media/base/media_channel.h" +#include "media/base/media_engine.h" +#include "p2p/base/ice_transport_internal.h" +#include "p2p/base/port.h" +#include "p2p/base/port_allocator.h" +#include "p2p/base/transport_description.h" +#include "pc/channel.h" +#include "pc/channel_interface.h" +#include "pc/channel_manager.h" +#include "pc/connection_context.h" #include "pc/data_channel_controller.h" -#include "pc/ice_server_parsing.h" +#include "pc/data_channel_utils.h" +#include "pc/dtls_transport.h" #include "pc/jsep_transport_controller.h" -#include "pc/peer_connection_factory.h" #include "pc/peer_connection_internal.h" +#include "pc/peer_connection_message_handler.h" #include "pc/rtc_stats_collector.h" +#include "pc/rtp_receiver.h" #include "pc/rtp_sender.h" #include "pc/rtp_transceiver.h" +#include "pc/rtp_transmission_manager.h" +#include "pc/rtp_transport_internal.h" +#include "pc/sctp_data_channel.h" #include "pc/sctp_transport.h" +#include "pc/sdp_offer_answer.h" +#include "pc/session_description.h" #include "pc/stats_collector.h" #include "pc/stream_collection.h" -#include "pc/webrtc_session_description_factory.h" -#include "rtc_base/experiments/field_trial_parser.h" -#include "rtc_base/operations_chain.h" -#include "rtc_base/race_checker.h" +#include "pc/transceiver_list.h" +#include "pc/transport_stats.h" +#include "pc/usage_pattern.h" +#include "rtc_base/checks.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/network/sent_packet.h" +#include "rtc_base/rtc_certificate.h" +#include "rtc_base/ssl_certificate.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" #include "rtc_base/unique_id_generator.h" #include "rtc_base/weak_ptr.h" namespace webrtc { -class MediaStreamObserver; -class VideoRtpReceiver; -class RtcEventLog; - // PeerConnection is the implementation of the PeerConnection object as defined // by the PeerConnectionInterface API surface. // The class currently is solely responsible for the following: @@ -61,62 +117,19 @@ class RtcEventLog; // - Generating stats. class PeerConnection : public PeerConnectionInternal, public JsepTransportController::Observer, - public RtpSenderBase::SetStreamsObserver, - public rtc::MessageHandler, public sigslot::has_slots<> { public: - // A bit in the usage pattern is registered when its defining event occurs at - // least once. - enum class UsageEvent : int { - TURN_SERVER_ADDED = 0x01, - STUN_SERVER_ADDED = 0x02, - DATA_ADDED = 0x04, - AUDIO_ADDED = 0x08, - VIDEO_ADDED = 0x10, - // |SetLocalDescription| returns successfully. - SET_LOCAL_DESCRIPTION_SUCCEEDED = 0x20, - // |SetRemoteDescription| returns successfully. - SET_REMOTE_DESCRIPTION_SUCCEEDED = 0x40, - // A local candidate (with type host, server-reflexive, or relay) is - // collected. - CANDIDATE_COLLECTED = 0x80, - // A remote candidate is successfully added via |AddIceCandidate|. - ADD_ICE_CANDIDATE_SUCCEEDED = 0x100, - ICE_STATE_CONNECTED = 0x200, - CLOSE_CALLED = 0x400, - // A local candidate with private IP is collected. - PRIVATE_CANDIDATE_COLLECTED = 0x800, - // A remote candidate with private IP is added, either via AddiceCandidate - // or from the remote description. - REMOTE_PRIVATE_CANDIDATE_ADDED = 0x1000, - // A local mDNS candidate is collected. - MDNS_CANDIDATE_COLLECTED = 0x2000, - // A remote mDNS candidate is added, either via AddIceCandidate or from the - // remote description. - REMOTE_MDNS_CANDIDATE_ADDED = 0x4000, - // A local candidate with IPv6 address is collected. - IPV6_CANDIDATE_COLLECTED = 0x8000, - // A remote candidate with IPv6 address is added, either via AddIceCandidate - // or from the remote description. - REMOTE_IPV6_CANDIDATE_ADDED = 0x10000, - // A remote candidate (with type host, server-reflexive, or relay) is - // successfully added, either via AddIceCandidate or from the remote - // description. - REMOTE_CANDIDATE_ADDED = 0x20000, - // An explicit host-host candidate pair is selected, i.e. both the local and - // the remote candidates have the host type. This does not include candidate - // pairs formed with equivalent prflx remote candidates, e.g. a host-prflx - // pair where the prflx candidate has the same base as a host candidate of - // the remote peer. - DIRECT_CONNECTION_SELECTED = 0x40000, - MAX_VALUE = 0x80000, - }; - - explicit PeerConnection(PeerConnectionFactory* factory, - std::unique_ptr event_log, - std::unique_ptr call); - - bool Initialize( + // Creates a PeerConnection and initializes it with the given values. + // If the initialization fails, the function releases the PeerConnection + // and returns nullptr. + // + // Note that the function takes ownership of dependencies, and will + // either use them or release them, whether it succeeds or fails. + static RTCErrorOr> Create( + rtc::scoped_refptr context, + const PeerConnectionFactoryInterface::Options& options, + std::unique_ptr event_log, + std::unique_ptr call, const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies); @@ -143,18 +156,6 @@ class PeerConnection : public PeerConnectionInternal, cricket::MediaType media_type, const RtpTransceiverInit& init) override; - // Gets the DTLS SSL certificate associated with the audio transport on the - // remote side. This will become populated once the DTLS connection with the - // peer has been completed, as indicated by the ICE connection state - // transitioning to kIceConnectionCompleted. - // Note that this will be removed once we implement RTCDtlsTransport which - // has standardized method for getting this information. - // See https://www.w3.org/TR/webrtc/#rtcdtlstransport-interface - std::unique_ptr GetRemoteAudioSSLCertificate(); - - // Version of the above method that returns the full certificate chain. - std::unique_ptr GetRemoteAudioSSLCertChain(); - rtc::scoped_refptr CreateSender( const std::string& kind, const std::string& stream_id) override; @@ -166,7 +167,7 @@ class PeerConnection : public PeerConnectionInternal, std::vector> GetTransceivers() const override; - rtc::scoped_refptr CreateDataChannel( + RTCErrorOr> CreateDataChannelOrError( const std::string& label, const DataChannelInit* config) override; // WARNING: LEGACY. See peerconnectioninterface.h @@ -207,15 +208,29 @@ class PeerConnection : public PeerConnectionInternal, const RTCOfferAnswerOptions& options) override; void CreateAnswer(CreateSessionDescriptionObserver* observer, const RTCOfferAnswerOptions& options) override; + + void SetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) + override; + void SetLocalDescription( + rtc::scoped_refptr observer) + override; + // TODO(https://crbug.com/webrtc/11798): Delete these methods in favor of the + // ones taking SetLocalDescriptionObserverInterface as argument. void SetLocalDescription(SetSessionDescriptionObserver* observer, SessionDescriptionInterface* desc) override; void SetLocalDescription(SetSessionDescriptionObserver* observer) override; - void SetRemoteDescription(SetSessionDescriptionObserver* observer, - SessionDescriptionInterface* desc) override; + void SetRemoteDescription( std::unique_ptr desc, rtc::scoped_refptr observer) override; + // TODO(https://crbug.com/webrtc/11798): Delete this methods in favor of the + // ones taking SetRemoteDescriptionObserverInterface as argument. + void SetRemoteDescription(SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc) override; + PeerConnectionInterface::RTCConfiguration GetConfiguration() override; RTCError SetConfiguration( const PeerConnectionInterface::RTCConfiguration& configuration) override; @@ -237,6 +252,8 @@ class PeerConnection : public PeerConnectionInternal, rtc::scoped_refptr GetSctpTransport() const override; + void AddAdaptationResource(rtc::scoped_refptr resource) override; + bool StartRtcEventLog(std::unique_ptr output, int64_t output_period_ms) override; bool StartRtcEventLog(std::unique_ptr output) override; @@ -244,17 +261,17 @@ class PeerConnection : public PeerConnectionInternal, void Close() override; + rtc::Thread* signaling_thread() const final { + return context_->signaling_thread(); + } + // PeerConnectionInternal implementation. rtc::Thread* network_thread() const final { - return factory_->network_thread(); - } - rtc::Thread* worker_thread() const final { return factory_->worker_thread(); } - rtc::Thread* signaling_thread() const final { - return factory_->signaling_thread(); + return context_->network_thread(); } + rtc::Thread* worker_thread() const final { return context_->worker_thread(); } std::string session_id() const override { - RTC_DCHECK_RUN_ON(signaling_thread()); return session_id_; } @@ -267,27 +284,19 @@ class PeerConnection : public PeerConnectionInternal, rtc::scoped_refptr>> GetTransceiversInternal() const override { RTC_DCHECK_RUN_ON(signaling_thread()); - return transceivers_; + return rtp_manager()->transceivers()->List(); } - sigslot::signal1& SignalDataChannelCreated() override { - return data_channel_controller_.SignalDataChannelCreated(); + sigslot::signal1& SignalSctpDataChannelCreated() override { + return data_channel_controller_.SignalSctpDataChannelCreated(); } - cricket::RtpDataChannel* rtp_data_channel() const override { - return data_channel_controller_.rtp_data_channel(); - } - - std::vector> sctp_data_channels() - const override { - RTC_DCHECK_RUN_ON(signaling_thread()); - return *data_channel_controller_.sctp_data_channels(); - } + std::vector GetDataChannelStats() const override; absl::optional sctp_transport_name() const override; + absl::optional sctp_mid() const override; cricket::CandidateStatsList GetPooledCandidateStats() const override; - std::map GetTransportNamesByMid() const override; std::map GetTransportStatsByNames( const std::set& transport_names) override; Call::Stats GetCallStats() override; @@ -307,225 +316,103 @@ class PeerConnection : public PeerConnectionInternal, PeerConnectionObserver* Observer() const; bool IsClosed() const { RTC_DCHECK_RUN_ON(signaling_thread()); - return signaling_state_ == PeerConnectionInterface::kClosed; + return !sdp_handler_ || + sdp_handler_->signaling_state() == PeerConnectionInterface::kClosed; } // Get current SSL role used by SCTP's underlying transport. bool GetSctpSslRole(rtc::SSLRole* role); // Handler for the "channel closed" signal - void OnSctpDataChannelClosed(DataChannel* channel); + void OnSctpDataChannelClosed(DataChannelInterface* channel); - // Functions made public for testing. - void ReturnHistogramVeryQuicklyForTesting() { + bool ShouldFireNegotiationNeededEvent(uint32_t event_id) override; + + // Functions needed by SdpOfferAnswerHandler + StatsCollector* stats() { RTC_DCHECK_RUN_ON(signaling_thread()); - return_histogram_very_quickly_ = true; + return stats_.get(); } - void RequestUsagePatternReportForTesting(); - absl::optional sctp_mid() { + DataChannelController* data_channel_controller() { RTC_DCHECK_RUN_ON(signaling_thread()); - return sctp_mid_s_; + return &data_channel_controller_; + } + bool dtls_enabled() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return dtls_enabled_; + } + const PeerConnectionInterface::RTCConfiguration* configuration() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return &configuration_; + } + PeerConnectionMessageHandler* message_handler() { + RTC_DCHECK_RUN_ON(signaling_thread()); + return &message_handler_; } - protected: - ~PeerConnection() override; - - private: - class ImplicitCreateSessionDescriptionObserver; - friend class ImplicitCreateSessionDescriptionObserver; - class SetRemoteDescriptionObserverAdapter; - friend class SetRemoteDescriptionObserverAdapter; - - // Represents the [[LocalIceCredentialsToReplace]] internal slot in the spec. - // It makes the next CreateOffer() produce new ICE credentials even if - // RTCOfferAnswerOptions::ice_restart is false. - // https://w3c.github.io/webrtc-pc/#dfn-localufragstoreplace - // TODO(hbos): When JsepTransportController/JsepTransport supports rollback, - // move this type of logic to JsepTransportController/JsepTransport. - class LocalIceCredentialsToReplace; - - struct RtpSenderInfo { - RtpSenderInfo() : first_ssrc(0) {} - RtpSenderInfo(const std::string& stream_id, - const std::string sender_id, - uint32_t ssrc) - : stream_id(stream_id), sender_id(sender_id), first_ssrc(ssrc) {} - bool operator==(const RtpSenderInfo& other) { - return this->stream_id == other.stream_id && - this->sender_id == other.sender_id && - this->first_ssrc == other.first_ssrc; - } - std::string stream_id; - std::string sender_id; - // An RtpSender can have many SSRCs. The first one is used as a sort of ID - // for communicating with the lower layers. - uint32_t first_ssrc; - }; - - // Field-trial based configuration for datagram transport. - struct DatagramTransportConfig { - explicit DatagramTransportConfig(const std::string& field_trial) - : enabled("enabled", true), default_value("default_value", false) { - ParseFieldTrial({&enabled, &default_value}, field_trial); - } - - // Whether datagram transport support is enabled at all. Defaults to true, - // allowing datagram transport to be used if (a) the application provides a - // factory for it and (b) the configuration specifies its use. This flag - // provides a kill-switch to force-disable datagram transport across all - // applications, without code changes. - FieldTrialFlag enabled; - - // Whether the datagram transport is enabled or disabled by default. - // Defaults to false, meaning that applications must configure use of - // datagram transport through RTCConfiguration. If set to true, - // applications will use the datagram transport by default (but may still - // explicitly configure themselves not to use it through RTCConfiguration). - FieldTrialFlag default_value; - }; + RtpTransmissionManager* rtp_manager() { return rtp_manager_.get(); } + const RtpTransmissionManager* rtp_manager() const { + return rtp_manager_.get(); + } + cricket::ChannelManager* channel_manager() const; - // Field-trial based configuration for datagram transport data channels. - struct DatagramTransportDataChannelConfig { - explicit DatagramTransportDataChannelConfig(const std::string& field_trial) - : enabled("enabled", true), - default_value("default_value", false), - receive_only("receive_only", false) { - ParseFieldTrial({&enabled, &default_value, &receive_only}, field_trial); - } - - // Whether datagram transport data channel support is enabled at all. - // Defaults to true, allowing datagram transport to be used if (a) the - // application provides a factory for it and (b) the configuration specifies - // its use. This flag provides a kill-switch to force-disable datagram - // transport across all applications, without code changes. - FieldTrialFlag enabled; - - // Whether the datagram transport data channels are enabled or disabled by - // default. Defaults to false, meaning that applications must configure use - // of datagram transport through RTCConfiguration. If set to true, - // applications will use the datagram transport by default (but may still - // explicitly configure themselves not to use it through RTCConfiguration). - FieldTrialFlag default_value; - - // Whether the datagram transport is enabled in receive-only mode. If true, - // and if the datagram transport is enabled, it will only be used when - // receiving incoming calls, not when placing outgoing calls. - FieldTrialFlag receive_only; - }; + JsepTransportController* transport_controller() { + return transport_controller_.get(); + } + cricket::PortAllocator* port_allocator() { return port_allocator_.get(); } + Call* call_ptr() { return call_ptr_; } - // Captures partial state to be used for rollback. Applicable only in - // Unified Plan. - class TransceiverStableState { - public: - TransceiverStableState() {} - void set_newly_created(); - void SetMSectionIfUnset(absl::optional mid, - absl::optional mline_index); - void SetRemoteStreamIdsIfUnset(const std::vector& ids); - absl::optional mid() const { return mid_; } - absl::optional mline_index() const { return mline_index_; } - absl::optional> remote_stream_ids() const { - return remote_stream_ids_; - } - bool has_m_section() const { return has_m_section_; } - bool newly_created() const { return newly_created_; } - - private: - absl::optional mid_; - absl::optional mline_index_; - absl::optional> remote_stream_ids_; - // Indicates that mid value from stable state has been captured and - // that rollback has to restore the transceiver. Also protects against - // subsequent overwrites. - bool has_m_section_ = false; - // Indicates that the transceiver was created as part of applying a - // description to track potential need for removing transceiver during - // rollback. - bool newly_created_ = false; - }; + ConnectionContext* context() { return context_.get(); } + const PeerConnectionFactoryInterface::Options* options() const { + return &options_; + } + void SetIceConnectionState(IceConnectionState new_state); + void NoteUsageEvent(UsageEvent event); - // Implements MessageHandler. - void OnMessage(rtc::Message* msg) override; + // Asynchronously adds a remote candidate on the network thread. + void AddRemoteCandidate(const std::string& mid, + const cricket::Candidate& candidate); - // Plan B helpers for getting the voice/video media channels for the single - // audio/video transceiver, if it exists. - cricket::VoiceMediaChannel* voice_media_channel() const - RTC_RUN_ON(signaling_thread()); - cricket::VideoMediaChannel* video_media_channel() const - RTC_RUN_ON(signaling_thread()); + // Report the UMA metric SdpFormatReceived for the given remote description. + void ReportSdpFormatReceived( + const SessionDescriptionInterface& remote_description); - std::vector>> - GetSendersInternal() const RTC_RUN_ON(signaling_thread()); - std::vector< - rtc::scoped_refptr>> - GetReceiversInternal() const RTC_RUN_ON(signaling_thread()); + // Report the UMA metric BundleUsage for the given remote description. + void ReportSdpBundleUsage( + const SessionDescriptionInterface& remote_description); - rtc::scoped_refptr> - GetAudioTransceiver() const RTC_RUN_ON(signaling_thread()); - rtc::scoped_refptr> - GetVideoTransceiver() const RTC_RUN_ON(signaling_thread()); + // Returns true if the PeerConnection is configured to use Unified Plan + // semantics for creating offers/answers and setting local/remote + // descriptions. If this is true the RtpTransceiver API will also be available + // to the user. If this is false, Plan B semantics are assumed. + // TODO(bugs.webrtc.org/8530): Flip the default to be Unified Plan once + // sufficient time has passed. + bool IsUnifiedPlan() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return is_unified_plan_; + } + bool ValidateBundleSettings( + const cricket::SessionDescription* desc, + const std::map& + bundle_groups_by_mid); - rtc::scoped_refptr> - GetFirstAudioTransceiver() const RTC_RUN_ON(signaling_thread()); - - // Implementation of the offer/answer exchange operations. These are chained - // onto the |operations_chain_| when the public CreateOffer(), CreateAnswer(), - // SetLocalDescription() and SetRemoteDescription() methods are invoked. - void DoCreateOffer( - const RTCOfferAnswerOptions& options, - rtc::scoped_refptr observer); - void DoCreateAnswer( - const RTCOfferAnswerOptions& options, - rtc::scoped_refptr observer); - void DoSetLocalDescription( - std::unique_ptr desc, - rtc::scoped_refptr observer); - void DoSetRemoteDescription( - std::unique_ptr desc, - rtc::scoped_refptr observer); + // Returns the MID for the data section associated with the + // SCTP data channel, if it has been set. If no data + // channels are configured this will return nullopt. + absl::optional GetDataMid() const; - void CreateAudioReceiver(MediaStreamInterface* stream, - const RtpSenderInfo& remote_sender_info) - RTC_RUN_ON(signaling_thread()); + void SetSctpDataMid(const std::string& mid); - void CreateVideoReceiver(MediaStreamInterface* stream, - const RtpSenderInfo& remote_sender_info) - RTC_RUN_ON(signaling_thread()); - rtc::scoped_refptr RemoveAndStopReceiver( - const RtpSenderInfo& remote_sender_info) RTC_RUN_ON(signaling_thread()); + void ResetSctpDataMid(); - // May be called either by AddStream/RemoveStream, or when a track is - // added/removed from a stream previously added via AddStream. - void AddAudioTrack(AudioTrackInterface* track, MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - void RemoveAudioTrack(AudioTrackInterface* track, - MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - void AddVideoTrack(VideoTrackInterface* track, MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - void RemoveVideoTrack(VideoTrackInterface* track, - MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); + // Asynchronously calls SctpTransport::Start() on the network thread for + // |sctp_mid()| if set. Called as part of setting the local description. + void StartSctpTransport(int local_port, + int remote_port, + int max_message_size); - // AddTrack implementation when Unified Plan is specified. - RTCErrorOr> AddTrackUnifiedPlan( - rtc::scoped_refptr track, - const std::vector& stream_ids) - RTC_RUN_ON(signaling_thread()); - // AddTrack implementation when Plan B is specified. - RTCErrorOr> AddTrackPlanB( - rtc::scoped_refptr track, - const std::vector& stream_ids) - RTC_RUN_ON(signaling_thread()); - - // Returns the first RtpTransceiver suitable for a newly added track, if such - // transceiver is available. - rtc::scoped_refptr> - FindFirstTransceiverForAddedTrack( - rtc::scoped_refptr track) - RTC_RUN_ON(signaling_thread()); - - rtc::scoped_refptr> - FindTransceiverBySender(rtc::scoped_refptr sender) - RTC_RUN_ON(signaling_thread()); + // Returns the CryptoOptions for this PeerConnection. This will always + // return the RTCConfiguration.crypto_options if set and will only default + // back to the PeerConnectionFactory settings if nothing was set. + CryptoOptions GetCryptoOptions(); // Internal implementation for AddTransceiver family of methods. If // |fire_callback| is set, fires OnRenegotiationNeeded callback if successful. @@ -533,28 +420,53 @@ class PeerConnection : public PeerConnectionInternal, cricket::MediaType media_type, rtc::scoped_refptr track, const RtpTransceiverInit& init, - bool fire_callback = true) RTC_RUN_ON(signaling_thread()); + bool fire_callback = true); - rtc::scoped_refptr> - CreateSender(cricket::MediaType media_type, - const std::string& id, - rtc::scoped_refptr track, - const std::vector& stream_ids, - const std::vector& send_encodings); + // Returns rtp transport, result can not be nullptr. + RtpTransportInternal* GetRtpTransport(const std::string& mid); - rtc::scoped_refptr> - CreateReceiver(cricket::MediaType media_type, const std::string& receiver_id); + // Returns true if SRTP (either using DTLS-SRTP or SDES) is required by + // this session. + bool SrtpRequired() const; - // Create a new RtpTransceiver of the given type and add it to the list of - // transceivers. - rtc::scoped_refptr> - CreateAndAddTransceiver( - rtc::scoped_refptr> sender, - rtc::scoped_refptr> - receiver) RTC_RUN_ON(signaling_thread()); + bool SetupDataChannelTransport_n(const std::string& mid) + RTC_RUN_ON(network_thread()); + void TeardownDataChannelTransport_n() RTC_RUN_ON(network_thread()); + cricket::ChannelInterface* GetChannel(const std::string& content_name) + RTC_RUN_ON(network_thread()); + + // Functions made public for testing. + void ReturnHistogramVeryQuicklyForTesting() { + RTC_DCHECK_RUN_ON(signaling_thread()); + return_histogram_very_quickly_ = true; + } + void RequestUsagePatternReportForTesting(); + + protected: + // Available for rtc::scoped_refptr creation + PeerConnection(rtc::scoped_refptr context, + const PeerConnectionFactoryInterface::Options& options, + bool is_unified_plan, + std::unique_ptr event_log, + std::unique_ptr call, + PeerConnectionDependencies& dependencies, + bool dtls_enabled); + + ~PeerConnection() override; + + private: + RTCError Initialize( + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies dependencies); + void InitializeTransportController_n( + const RTCConfiguration& configuration, + const PeerConnectionDependencies& dependencies) + RTC_RUN_ON(network_thread()); - void SetIceConnectionState(IceConnectionState new_state) + rtc::scoped_refptr> + FindTransceiverBySender(rtc::scoped_refptr sender) RTC_RUN_ON(signaling_thread()); + void SetStandardizedIceConnectionState( PeerConnectionInterface::IceConnectionState new_state) RTC_RUN_ON(signaling_thread()); @@ -583,312 +495,11 @@ class PeerConnection : public PeerConnectionInternal, const cricket::CandidatePairChangeEvent& event) RTC_RUN_ON(signaling_thread()); - // Update the state, signaling if necessary. - void ChangeSignalingState(SignalingState signaling_state) - RTC_RUN_ON(signaling_thread()); - - // Signals from MediaStreamObserver. - void OnAudioTrackAdded(AudioTrackInterface* track, - MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - void OnAudioTrackRemoved(AudioTrackInterface* track, - MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - void OnVideoTrackAdded(VideoTrackInterface* track, - MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - void OnVideoTrackRemoved(VideoTrackInterface* track, - MediaStreamInterface* stream) - RTC_RUN_ON(signaling_thread()); - - void PostSetSessionDescriptionSuccess( - SetSessionDescriptionObserver* observer); - void PostSetSessionDescriptionFailure(SetSessionDescriptionObserver* observer, - RTCError&& error); - void PostCreateSessionDescriptionFailure( - CreateSessionDescriptionObserver* observer, - RTCError error); - - // Synchronous implementations of SetLocalDescription/SetRemoteDescription - // that return an RTCError instead of invoking a callback. - RTCError ApplyLocalDescription( - std::unique_ptr desc); - RTCError ApplyRemoteDescription( - std::unique_ptr desc); - - // Updates the local RtpTransceivers according to the JSEP rules. Called as - // part of setting the local/remote description. - RTCError UpdateTransceiversAndDataChannels( - cricket::ContentSource source, - const SessionDescriptionInterface& new_session, - const SessionDescriptionInterface* old_local_description, - const SessionDescriptionInterface* old_remote_description) - RTC_RUN_ON(signaling_thread()); - - // Either creates or destroys the transceiver's BaseChannel according to the - // given media section. - RTCError UpdateTransceiverChannel( - rtc::scoped_refptr> - transceiver, - const cricket::ContentInfo& content, - const cricket::ContentGroup* bundle_group) RTC_RUN_ON(signaling_thread()); - - // Either creates or destroys the local data channel according to the given - // media section. - RTCError UpdateDataChannel(cricket::ContentSource source, - const cricket::ContentInfo& content, - const cricket::ContentGroup* bundle_group) - RTC_RUN_ON(signaling_thread()); - - // Associate the given transceiver according to the JSEP rules. - RTCErrorOr< - rtc::scoped_refptr>> - AssociateTransceiver(cricket::ContentSource source, - SdpType type, - size_t mline_index, - const cricket::ContentInfo& content, - const cricket::ContentInfo* old_local_content, - const cricket::ContentInfo* old_remote_content) - RTC_RUN_ON(signaling_thread()); - - // Returns the RtpTransceiver, if found, that is associated to the given MID. - rtc::scoped_refptr> - GetAssociatedTransceiver(const std::string& mid) const - RTC_RUN_ON(signaling_thread()); - - // Returns the RtpTransceiver, if found, that was assigned to the given mline - // index in CreateOffer. - rtc::scoped_refptr> - GetTransceiverByMLineIndex(size_t mline_index) const - RTC_RUN_ON(signaling_thread()); - - // Returns an RtpTransciever, if available, that can be used to receive the - // given media type according to JSEP rules. - rtc::scoped_refptr> - FindAvailableTransceiverToReceive(cricket::MediaType media_type) const - RTC_RUN_ON(signaling_thread()); - - // Returns the media section in the given session description that is - // associated with the RtpTransceiver. Returns null if none found or this - // RtpTransceiver is not associated. Logic varies depending on the - // SdpSemantics specified in the configuration. - const cricket::ContentInfo* FindMediaSectionForTransceiver( - rtc::scoped_refptr> - transceiver, - const SessionDescriptionInterface* sdesc) const - RTC_RUN_ON(signaling_thread()); - - // Runs the algorithm **set the associated remote streams** specified in - // https://w3c.github.io/webrtc-pc/#set-associated-remote-streams. - void SetAssociatedRemoteStreams( - rtc::scoped_refptr receiver, - const std::vector& stream_ids, - std::vector>* added_streams, - std::vector>* removed_streams) - RTC_RUN_ON(signaling_thread()); - - // Runs the algorithm **process the removal of a remote track** specified in - // the WebRTC specification. - // This method will update the following lists: - // |remove_list| is the list of transceivers for which the receiving track is - // being removed. - // |removed_streams| is the list of streams which no longer have a receiving - // track so should be removed. - // https://w3c.github.io/webrtc-pc/#process-remote-track-removal - void ProcessRemovalOfRemoteTrack( - rtc::scoped_refptr> - transceiver, - std::vector>* remove_list, - std::vector>* removed_streams) - RTC_RUN_ON(signaling_thread()); - - void RemoveRemoteStreamsIfEmpty( - const std::vector>& - remote_streams, - std::vector>* removed_streams) - RTC_RUN_ON(signaling_thread()); - void OnNegotiationNeeded(); - // Returns a MediaSessionOptions struct with options decided by |options|, - // the local MediaStreams and DataChannels. - void GetOptionsForOffer(const PeerConnectionInterface::RTCOfferAnswerOptions& - offer_answer_options, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - void GetOptionsForPlanBOffer( - const PeerConnectionInterface::RTCOfferAnswerOptions& - offer_answer_options, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - void GetOptionsForUnifiedPlanOffer( - const PeerConnectionInterface::RTCOfferAnswerOptions& - offer_answer_options, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - - RTCError HandleLegacyOfferOptions(const RTCOfferAnswerOptions& options) - RTC_RUN_ON(signaling_thread()); - void RemoveRecvDirectionFromReceivingTransceiversOfType( - cricket::MediaType media_type) RTC_RUN_ON(signaling_thread()); - void AddUpToOneReceivingTransceiverOfType(cricket::MediaType media_type); - std::vector< - rtc::scoped_refptr>> - GetReceivingTransceiversOfType(cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Returns a MediaSessionOptions struct with options decided by - // |constraints|, the local MediaStreams and DataChannels. - void GetOptionsForAnswer(const RTCOfferAnswerOptions& offer_answer_options, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - void GetOptionsForPlanBAnswer( - const PeerConnectionInterface::RTCOfferAnswerOptions& - offer_answer_options, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - void GetOptionsForUnifiedPlanAnswer( - const PeerConnectionInterface::RTCOfferAnswerOptions& - offer_answer_options, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - - // Generates MediaDescriptionOptions for the |session_opts| based on existing - // local description or remote description. - void GenerateMediaDescriptionOptions( - const SessionDescriptionInterface* session_desc, - RtpTransceiverDirection audio_direction, - RtpTransceiverDirection video_direction, - absl::optional* audio_index, - absl::optional* video_index, - absl::optional* data_index, - cricket::MediaSessionOptions* session_options) - RTC_RUN_ON(signaling_thread()); - - // Generates the active MediaDescriptionOptions for the local data channel - // given the specified MID. - cricket::MediaDescriptionOptions GetMediaDescriptionOptionsForActiveData( - const std::string& mid) const RTC_RUN_ON(signaling_thread()); - - // Generates the rejected MediaDescriptionOptions for the local data channel - // given the specified MID. - cricket::MediaDescriptionOptions GetMediaDescriptionOptionsForRejectedData( - const std::string& mid) const RTC_RUN_ON(signaling_thread()); - - // Returns the MID for the data section associated with either the - // RtpDataChannel or SCTP data channel, if it has been set. If no data - // channels are configured this will return nullopt. - absl::optional GetDataMid() const RTC_RUN_ON(signaling_thread()); - - // Remove all local and remote senders of type |media_type|. - // Called when a media type is rejected (m-line set to port 0). - void RemoveSenders(cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Makes sure a MediaStreamTrack is created for each StreamParam in |streams|, - // and existing MediaStreamTracks are removed if there is no corresponding - // StreamParam. If |default_track_needed| is true, a default MediaStreamTrack - // is created if it doesn't exist; if false, it's removed if it exists. - // |media_type| is the type of the |streams| and can be either audio or video. - // If a new MediaStream is created it is added to |new_streams|. - void UpdateRemoteSendersList( - const std::vector& streams, - bool default_track_needed, - cricket::MediaType media_type, - StreamCollection* new_streams) RTC_RUN_ON(signaling_thread()); - - // Triggered when a remote sender has been seen for the first time in a remote - // session description. It creates a remote MediaStreamTrackInterface - // implementation and triggers CreateAudioReceiver or CreateVideoReceiver. - void OnRemoteSenderAdded(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Triggered when a remote sender has been removed from a remote session - // description. It removes the remote sender with id |sender_id| from a remote - // MediaStream and triggers DestroyAudioReceiver or DestroyVideoReceiver. - void OnRemoteSenderRemoved(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Finds remote MediaStreams without any tracks and removes them from - // |remote_streams_| and notifies the observer that the MediaStreams no longer - // exist. - void UpdateEndedRemoteMediaStreams() RTC_RUN_ON(signaling_thread()); - - // Loops through the vector of |streams| and finds added and removed - // StreamParams since last time this method was called. - // For each new or removed StreamParam, OnLocalSenderSeen or - // OnLocalSenderRemoved is invoked. - void UpdateLocalSenders(const std::vector& streams, - cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Triggered when a local sender has been seen for the first time in a local - // session description. - // This method triggers CreateAudioSender or CreateVideoSender if the rtp - // streams in the local SessionDescription can be mapped to a MediaStreamTrack - // in a MediaStream in |local_streams_| - void OnLocalSenderAdded(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Triggered when a local sender has been removed from a local session - // description. - // This method triggers DestroyAudioSender or DestroyVideoSender if a stream - // has been removed from the local SessionDescription and the stream can be - // mapped to a MediaStreamTrack in a MediaStream in |local_streams_|. - void OnLocalSenderRemoved(const RtpSenderInfo& sender_info, - cricket::MediaType media_type) - RTC_RUN_ON(signaling_thread()); - - // Returns true if the PeerConnection is configured to use Unified Plan - // semantics for creating offers/answers and setting local/remote - // descriptions. If this is true the RtpTransceiver API will also be available - // to the user. If this is false, Plan B semantics are assumed. - // TODO(bugs.webrtc.org/8530): Flip the default to be Unified Plan once - // sufficient time has passed. - bool IsUnifiedPlan() const RTC_RUN_ON(signaling_thread()) { - return configuration_.sdp_semantics == SdpSemantics::kUnifiedPlan; - } - - // The offer/answer machinery assumes the media section MID is present and - // unique. To support legacy end points that do not supply a=mid lines, this - // method will modify the session description to add MIDs generated according - // to the SDP semantics. - void FillInMissingRemoteMids(cricket::SessionDescription* remote_description) - RTC_RUN_ON(signaling_thread()); - - // Is there an RtpSender of the given type? - bool HasRtpSender(cricket::MediaType type) const - RTC_RUN_ON(signaling_thread()); - - // Return the RtpSender with the given track attached. - rtc::scoped_refptr> - FindSenderForTrack(MediaStreamTrackInterface* track) const - RTC_RUN_ON(signaling_thread()); - - // Return the RtpSender with the given id, or null if none exists. - rtc::scoped_refptr> - FindSenderById(const std::string& sender_id) const - RTC_RUN_ON(signaling_thread()); - - // Return the RtpReceiver with the given id, or null if none exists. - rtc::scoped_refptr> - FindReceiverById(const std::string& receiver_id) const - RTC_RUN_ON(signaling_thread()); - - std::vector* GetRemoteSenderInfos( - cricket::MediaType media_type); - std::vector* GetLocalSenderInfos( - cricket::MediaType media_type); - const RtpSenderInfo* FindSenderInfo(const std::vector& infos, - const std::string& stream_id, - const std::string sender_id) const; - // Returns the specified SCTP DataChannel in sctp_data_channels_, // or nullptr if not found. - DataChannel* FindDataChannelBySid(int sid) const + SctpDataChannel* FindDataChannelBySid(int sid) const RTC_RUN_ON(signaling_thread()); // Called when first configuring the port allocator. @@ -920,66 +531,6 @@ class PeerConnection : public PeerConnectionInternal, // This function should only be called from the worker thread. void StopRtcEventLog_w(); - // Ensures the configuration doesn't have any parameters with invalid values, - // or values that conflict with other parameters. - // - // Returns RTCError::OK() if there are no issues. - RTCError ValidateConfiguration(const RTCConfiguration& config) const; - - cricket::ChannelManager* channel_manager() const; - - enum class SessionError { - kNone, // No error. - kContent, // Error in BaseChannel SetLocalContent/SetRemoteContent. - kTransport, // Error from the underlying transport. - }; - - // Returns the last error in the session. See the enum above for details. - SessionError session_error() const RTC_RUN_ON(signaling_thread()) { - return session_error_; - } - const std::string& session_error_desc() const { return session_error_desc_; } - - cricket::ChannelInterface* GetChannel(const std::string& content_name); - - cricket::IceConfig ParseIceConfig( - const PeerConnectionInterface::RTCConfiguration& config) const; - - cricket::DataChannelType data_channel_type() const; - - // Called when an RTCCertificate is generated or retrieved by - // WebRTCSessionDescriptionFactory. Should happen before setLocalDescription. - void OnCertificateReady( - const rtc::scoped_refptr& certificate); - void OnDtlsSrtpSetupFailure(cricket::BaseChannel*, bool rtcp); - - // Non-const versions of local_description()/remote_description(), for use - // internally. - SessionDescriptionInterface* mutable_local_description() - RTC_RUN_ON(signaling_thread()) { - return pending_local_description_ ? pending_local_description_.get() - : current_local_description_.get(); - } - SessionDescriptionInterface* mutable_remote_description() - RTC_RUN_ON(signaling_thread()) { - return pending_remote_description_ ? pending_remote_description_.get() - : current_remote_description_.get(); - } - - // Updates the error state, signaling if necessary. - void SetSessionError(SessionError error, const std::string& error_desc); - - RTCError UpdateSessionState(SdpType type, - cricket::ContentSource source, - const cricket::SessionDescription* description); - // Push the media parts of the local or remote session description - // down to all of the channels. - RTCError PushdownMediaDescription(SdpType type, cricket::ContentSource source) - RTC_RUN_ON(signaling_thread()); - - RTCError PushdownTransportDescription(cricket::ContentSource source, - SdpType type); - // Returns true and the TransportInfo of the given |content_name| // from |description|. Returns false if it's not available. static bool GetTransportDescription( @@ -987,89 +538,12 @@ class PeerConnection : public PeerConnectionInternal, const std::string& content_name, cricket::TransportDescription* info); - // Enables media channels to allow sending of media. - // This enables media to flow on all configured audio/video channels and the - // RtpDataChannel. - void EnableSending() RTC_RUN_ON(signaling_thread()); - - // Destroys all BaseChannels and destroys the SCTP data channel, if present. - void DestroyAllChannels() RTC_RUN_ON(signaling_thread()); - // Returns the media index for a local ice candidate given the content name. // Returns false if the local session description does not have a media // content called |content_name|. bool GetLocalCandidateMediaIndex(const std::string& content_name, int* sdp_mline_index) RTC_RUN_ON(signaling_thread()); - // Uses all remote candidates in |remote_desc| in this session. - bool UseCandidatesInSessionDescription( - const SessionDescriptionInterface* remote_desc) - RTC_RUN_ON(signaling_thread()); - // Uses |candidate| in this session. - bool UseCandidate(const IceCandidateInterface* candidate) - RTC_RUN_ON(signaling_thread()); - RTCErrorOr FindContentInfo( - const SessionDescriptionInterface* description, - const IceCandidateInterface* candidate) RTC_RUN_ON(signaling_thread()); - // Deletes the corresponding channel of contents that don't exist in |desc|. - // |desc| can be null. This means that all channels are deleted. - void RemoveUnusedChannels(const cricket::SessionDescription* desc) - RTC_RUN_ON(signaling_thread()); - - // Allocates media channels based on the |desc|. If |desc| doesn't have - // the BUNDLE option, this method will disable BUNDLE in PortAllocator. - // This method will also delete any existing media channels before creating. - RTCError CreateChannels(const cricket::SessionDescription& desc) - RTC_RUN_ON(signaling_thread()); - - // If the BUNDLE policy is max-bundle, then we know for sure that all - // transports will be bundled from the start. This method returns the BUNDLE - // group if that's the case, or null if BUNDLE will be negotiated later. An - // error is returned if max-bundle is specified but the session description - // does not have a BUNDLE group. - RTCErrorOr GetEarlyBundleGroup( - const cricket::SessionDescription& desc) const - RTC_RUN_ON(signaling_thread()); - - // Helper methods to create media channels. - cricket::VoiceChannel* CreateVoiceChannel(const std::string& mid) - RTC_RUN_ON(signaling_thread()); - cricket::VideoChannel* CreateVideoChannel(const std::string& mid) - RTC_RUN_ON(signaling_thread()); - bool CreateDataChannel(const std::string& mid) RTC_RUN_ON(signaling_thread()); - - bool SetupDataChannelTransport_n(const std::string& mid) - RTC_RUN_ON(network_thread()); - void TeardownDataChannelTransport_n() RTC_RUN_ON(network_thread()); - - bool ValidateBundleSettings(const cricket::SessionDescription* desc); - bool HasRtcpMuxEnabled(const cricket::ContentInfo* content); - // Below methods are helper methods which verifies SDP. - RTCError ValidateSessionDescription(const SessionDescriptionInterface* sdesc, - cricket::ContentSource source) - RTC_RUN_ON(signaling_thread()); - - // Check if a call to SetLocalDescription is acceptable with a session - // description of the given type. - bool ExpectSetLocalDescription(SdpType type); - // Check if a call to SetRemoteDescription is acceptable with a session - // description of the given type. - bool ExpectSetRemoteDescription(SdpType type); - // Verifies a=setup attribute as per RFC 5763. - bool ValidateDtlsSetupAttribute(const cricket::SessionDescription* desc, - SdpType type); - - // Returns true if we are ready to push down the remote candidate. - // |remote_desc| is the new remote description, or NULL if the current remote - // description should be used. Output |valid| is true if the candidate media - // index is valid. - bool ReadyToUseRemoteCandidate(const IceCandidateInterface* candidate, - const SessionDescriptionInterface* remote_desc, - bool* valid) RTC_RUN_ON(signaling_thread()); - - // Returns true if SRTP (either using DTLS-SRTP or SDES) is required by - // this session. - bool SrtpRequired() const RTC_RUN_ON(signaling_thread()); // JsepTransportController signal handlers. void OnTransportControllerConnectionState(cricket::IceConnectionState state) @@ -1091,56 +565,23 @@ class PeerConnection : public PeerConnectionInternal, RTC_RUN_ON(signaling_thread()); void OnTransportControllerDtlsHandshakeError(rtc::SSLHandshakeError error); - const char* SessionErrorToString(SessionError error) const; - std::string GetSessionErrorMsg() RTC_RUN_ON(signaling_thread()); - - // Report the UMA metric SdpFormatReceived for the given remote offer. - void ReportSdpFormatReceived(const SessionDescriptionInterface& remote_offer); - - // Report inferred negotiated SDP semantics from a local/remote answer to the - // UMA observer. - void ReportNegotiatedSdpSemantics(const SessionDescriptionInterface& answer); - // Invoked when TransportController connection completion is signaled. // Reports stats for all transports in use. - void ReportTransportStats() RTC_RUN_ON(signaling_thread()); + void ReportTransportStats() RTC_RUN_ON(network_thread()); // Gather the usage of IPv4/IPv6 as best connection. - void ReportBestConnectionState(const cricket::TransportStats& stats); + static void ReportBestConnectionState(const cricket::TransportStats& stats); - void ReportNegotiatedCiphers(const cricket::TransportStats& stats, - const std::set& media_types) - RTC_RUN_ON(signaling_thread()); + static void ReportNegotiatedCiphers( + bool dtls_enabled, + const cricket::TransportStats& stats, + const std::set& media_types); void ReportIceCandidateCollected(const cricket::Candidate& candidate) RTC_RUN_ON(signaling_thread()); - void ReportRemoteIceCandidateAdded(const cricket::Candidate& candidate) - RTC_RUN_ON(signaling_thread()); - void NoteUsageEvent(UsageEvent event); void ReportUsagePattern() const RTC_RUN_ON(signaling_thread()); - void OnSentPacket_w(const rtc::SentPacket& sent_packet); - - const std::string GetTransportName(const std::string& content_name) - RTC_RUN_ON(signaling_thread()); - - // Functions for dealing with transports. - // Note that cricket code uses the term "channel" for what other code - // refers to as "transport". - - // Destroys and clears the BaseChannel associated with the given transceiver, - // if such channel is set. - void DestroyTransceiverChannel( - rtc::scoped_refptr> - transceiver); - - // Destroys the RTP data channel transport and/or the SCTP data channel - // transport and clears it. - void DestroyDataChannelTransport() RTC_RUN_ON(signaling_thread()); - - // Destroys the given ChannelInterface. - // The channel cannot be accessed after this method is called. - void DestroyChannelInterface(cricket::ChannelInterface* channel); + void ReportRemoteIceCandidateAdded(const cricket::Candidate& candidate); // JsepTransportController::Observer override. // @@ -1154,38 +595,17 @@ class PeerConnection : public PeerConnectionInternal, rtc::scoped_refptr dtls_transport, DataChannelTransportInterface* data_channel_transport) override; - // RtpSenderBase::SetStreamsObserver override. - void OnSetStreams() override; - - // Returns the CryptoOptions for this PeerConnection. This will always - // return the RTCConfiguration.crypto_options if set and will only default - // back to the PeerConnectionFactory settings if nothing was set. - CryptoOptions GetCryptoOptions() RTC_RUN_ON(signaling_thread()); + std::function + InitializeRtcpCallback(); - // Returns rtp transport, result can not be nullptr. - RtpTransportInternal* GetRtpTransport(const std::string& mid) - RTC_RUN_ON(signaling_thread()) { - auto rtp_transport = transport_controller_->GetRtpTransport(mid); - RTC_DCHECK(rtp_transport); - return rtp_transport; - } - - void UpdateNegotiationNeeded(); - bool CheckIfNegotiationIsNeeded(); - - // | sdp_type | is the type of the SDP that caused the rollback. - RTCError Rollback(SdpType sdp_type); - - // Storing the factory as a scoped reference pointer ensures that the memory - // in the PeerConnectionFactoryImpl remains available as long as the - // PeerConnection is running. It is passed to PeerConnection as a raw pointer. - // However, since the reference counting is done in the - // PeerConnectionFactoryInterface all instances created using the raw pointer - // will refer to the same reference count. - const rtc::scoped_refptr factory_; + const rtc::scoped_refptr context_; + const PeerConnectionFactoryInterface::Options options_; PeerConnectionObserver* observer_ RTC_GUARDED_BY(signaling_thread()) = nullptr; + const bool is_unified_plan_; + // The EventLog needs to outlive |call_| (and any other object that uses it). std::unique_ptr event_log_ RTC_GUARDED_BY(worker_thread()); @@ -1193,15 +613,6 @@ class PeerConnection : public PeerConnectionInternal, // pointer (but not touch the object) from any thread. RtcEventLog* const event_log_ptr_ RTC_PT_GUARDED_BY(worker_thread()); - // The operations chain is used by the offer/answer exchange methods to ensure - // they are executed in the right order. For example, if - // SetRemoteDescription() is invoked while CreateOffer() is still pending, the - // SRD operation will not start until CreateOffer() has completed. See - // https://w3c.github.io/webrtc-pc/#dfn-operations-chain. - rtc::scoped_refptr operations_chain_ - RTC_GUARDED_BY(signaling_thread()); - - SignalingState signaling_state_ RTC_GUARDED_BY(signaling_thread()) = kStable; IceConnectionState ice_connection_state_ RTC_GUARDED_BY(signaling_thread()) = kIceConnectionNew; PeerConnectionInterface::IceConnectionState standardized_ice_connection_state_ @@ -1214,122 +625,43 @@ class PeerConnection : public PeerConnectionInternal, PeerConnectionInterface::RTCConfiguration configuration_ RTC_GUARDED_BY(signaling_thread()); - // Field-trial based configuration for datagram transport. - const DatagramTransportConfig datagram_transport_config_; - - // Field-trial based configuration for datagram transport data channels. - const DatagramTransportDataChannelConfig - datagram_transport_data_channel_config_; - - // Final, resolved value for whether datagram transport is in use. - bool use_datagram_transport_ RTC_GUARDED_BY(signaling_thread()) = false; - - // Equivalent of |use_datagram_transport_|, but for its use with data - // channels. - bool use_datagram_transport_for_data_channels_ - RTC_GUARDED_BY(signaling_thread()) = false; - - // Resolved value of whether to use data channels only for incoming calls. - bool use_datagram_transport_for_data_channels_receive_only_ - RTC_GUARDED_BY(signaling_thread()) = false; - - // TODO(zstein): |async_resolver_factory_| can currently be nullptr if it - // is not injected. It should be required once chromium supplies it. - std::unique_ptr async_resolver_factory_ - RTC_GUARDED_BY(signaling_thread()); + const std::unique_ptr + async_dns_resolver_factory_; std::unique_ptr port_allocator_; // TODO(bugs.webrtc.org/9987): Accessed on both // signaling and network thread. - std::unique_ptr packet_socket_factory_; - std::unique_ptr + const std::unique_ptr ice_transport_factory_; // TODO(bugs.webrtc.org/9987): Accessed on the // signaling thread but the underlying raw // pointer is given to // |jsep_transport_controller_| and used on the // network thread. - std::unique_ptr - tls_cert_verifier_; // TODO(bugs.webrtc.org/9987): Accessed on both - // signaling and network thread. - - // One PeerConnection has only one RTCP CNAME. - // https://tools.ietf.org/html/draft-ietf-rtcweb-rtp-usage-26#section-4.9 - const std::string rtcp_cname_; - - // Streams added via AddStream. - const rtc::scoped_refptr local_streams_ - RTC_GUARDED_BY(signaling_thread()); - // Streams created as a result of SetRemoteDescription. - const rtc::scoped_refptr remote_streams_ - RTC_GUARDED_BY(signaling_thread()); - - std::vector> stream_observers_ - RTC_GUARDED_BY(signaling_thread()); - - // These lists store sender info seen in local/remote descriptions. - std::vector remote_audio_sender_infos_ - RTC_GUARDED_BY(signaling_thread()); - std::vector remote_video_sender_infos_ - RTC_GUARDED_BY(signaling_thread()); - std::vector local_audio_sender_infos_ - RTC_GUARDED_BY(signaling_thread()); - std::vector local_video_sender_infos_ - RTC_GUARDED_BY(signaling_thread()); - - bool remote_peer_supports_msid_ RTC_GUARDED_BY(signaling_thread()) = false; + const std::unique_ptr tls_cert_verifier_ + RTC_GUARDED_BY(network_thread()); // The unique_ptr belongs to the worker thread, but the Call object manages // its own thread safety. std::unique_ptr call_ RTC_GUARDED_BY(worker_thread()); - - rtc::AsyncInvoker rtcp_invoker_ RTC_GUARDED_BY(network_thread()); + ScopedTaskSafety signaling_thread_safety_; + rtc::scoped_refptr network_thread_safety_; + rtc::scoped_refptr worker_thread_safety_; // Points to the same thing as `call_`. Since it's const, we may read the // pointer from any thread. + // TODO(bugs.webrtc.org/11992): Remove this workaround (and potential dangling + // pointer). Call* const call_ptr_; std::unique_ptr stats_ RTC_GUARDED_BY(signaling_thread()); // A pointer is passed to senders_ rtc::scoped_refptr stats_collector_ RTC_GUARDED_BY(signaling_thread()); - // Holds changes made to transceivers during applying descriptors for - // potential rollback. Gets cleared once signaling state goes to stable. - std::map>, - TransceiverStableState> - transceiver_stable_states_by_transceivers_; - // Used when rolling back RTP data channels. - bool have_pending_rtp_data_channel_ RTC_GUARDED_BY(signaling_thread()) = - false; - // Holds remote stream ids for transceivers from stable state. - std::map>, - std::vector> - remote_stream_ids_by_transceivers_; - std::vector< - rtc::scoped_refptr>> - transceivers_; // TODO(bugs.webrtc.org/9987): Accessed on both signaling - // and network thread. - - // In Unified Plan, if we encounter remote SDP that does not contain an a=msid - // line we create and use a stream with a random ID for our receivers. This is - // to support legacy endpoints that do not support the a=msid attribute (as - // opposed to streamless tracks with "a=msid:-"). - rtc::scoped_refptr missing_msid_default_stream_ - RTC_GUARDED_BY(signaling_thread()); - // MIDs will be generated using this generator which will keep track of - // all the MIDs that have been seen over the life of the PeerConnection. - rtc::UniqueStringGenerator mid_generator_ RTC_GUARDED_BY(signaling_thread()); - SessionError session_error_ RTC_GUARDED_BY(signaling_thread()) = - SessionError::kNone; - std::string session_error_desc_ RTC_GUARDED_BY(signaling_thread()); - - std::string session_id_ RTC_GUARDED_BY(signaling_thread()); + const std::string session_id_; std::unique_ptr transport_controller_; // TODO(bugs.webrtc.org/9987): Accessed on both // signaling and network thread. - std::unique_ptr - sctp_factory_; // TODO(bugs.webrtc.org/9987): Accessed on both - // signaling and network thread. // |sctp_mid_| is the content name (MID) in SDP. // Note: this is used as the data channel MID by both SCTP and data channel @@ -1340,59 +672,32 @@ class PeerConnection : public PeerConnectionInternal, // thread, but applied first on the networking thread via an invoke(). absl::optional sctp_mid_s_ RTC_GUARDED_BY(signaling_thread()); absl::optional sctp_mid_n_ RTC_GUARDED_BY(network_thread()); + std::string sctp_transport_name_s_ RTC_GUARDED_BY(signaling_thread()); - // Whether this peer is the caller. Set when the local description is applied. - absl::optional is_caller_ RTC_GUARDED_BY(signaling_thread()); - - - - std::unique_ptr current_local_description_ - RTC_GUARDED_BY(signaling_thread()); - std::unique_ptr pending_local_description_ - RTC_GUARDED_BY(signaling_thread()); - std::unique_ptr current_remote_description_ - RTC_GUARDED_BY(signaling_thread()); - std::unique_ptr pending_remote_description_ + // The machinery for handling offers and answers. Const after initialization. + std::unique_ptr sdp_handler_ RTC_GUARDED_BY(signaling_thread()); - bool dtls_enabled_ RTC_GUARDED_BY(signaling_thread()) = false; - // List of content names for which the remote side triggered an ICE restart. - std::set pending_ice_restarts_ - RTC_GUARDED_BY(signaling_thread()); - - std::unique_ptr webrtc_session_desc_factory_ - RTC_GUARDED_BY(signaling_thread()); + const bool dtls_enabled_; - // Member variables for caching global options. - cricket::AudioOptions audio_options_ RTC_GUARDED_BY(signaling_thread()); - cricket::VideoOptions video_options_ RTC_GUARDED_BY(signaling_thread()); - - int usage_event_accumulator_ RTC_GUARDED_BY(signaling_thread()) = 0; + UsagePattern usage_pattern_ RTC_GUARDED_BY(signaling_thread()); bool return_histogram_very_quickly_ RTC_GUARDED_BY(signaling_thread()) = false; - // This object should be used to generate any SSRC that is not explicitly - // specified by the user (or by the remote party). - // The generator is not used directly, instead it is passed on to the - // channel manager and the session description factory. - rtc::UniqueRandomIdGenerator ssrc_generator_ - RTC_GUARDED_BY(signaling_thread()); + DataChannelController data_channel_controller_; - // A video bitrate allocator factory. - // This can injected using the PeerConnectionDependencies, - // or else the CreateBuiltinVideoBitrateAllocatorFactory() will be called. - // Note that one can still choose to override this in a MediaEngine - // if one wants too. - std::unique_ptr - video_bitrate_allocator_factory_; + // Machinery for handling messages posted to oneself + PeerConnectionMessageHandler message_handler_; - std::unique_ptr - local_ice_credentials_to_replace_ RTC_GUARDED_BY(signaling_thread()); - bool is_negotiation_needed_ RTC_GUARDED_BY(signaling_thread()) = false; + // Administration of senders, receivers and transceivers + // Accessed on both signaling and network thread. Const after Initialize(). + std::unique_ptr rtp_manager_; - DataChannelController data_channel_controller_; - rtc::WeakPtrFactory weak_ptr_factory_ - RTC_GUARDED_BY(signaling_thread()); + rtc::WeakPtrFactory weak_factory_; + + // Did the connectionState ever change to `connected`? + // Used to gather metrics only the first such state change. + bool was_ever_connected_ RTC_GUARDED_BY(signaling_thread()) = false; }; } // namespace webrtc diff --git a/pc/peer_connection_adaptation_integrationtest.cc b/pc/peer_connection_adaptation_integrationtest.cc new file mode 100644 index 0000000000..dfb12971b4 --- /dev/null +++ b/pc/peer_connection_adaptation_integrationtest.cc @@ -0,0 +1,161 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "api/audio_codecs/builtin_audio_decoder_factory.h" +#include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/rtp_parameters.h" +#include "api/scoped_refptr.h" +#include "call/adaptation/test/fake_resource.h" +#include "pc/test/fake_periodic_video_source.h" +#include "pc/test/fake_periodic_video_track_source.h" +#include "pc/test/peer_connection_test_wrapper.h" +#include "rtc_base/checks.h" +#include "rtc_base/gunit.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/thread.h" +#include "rtc_base/virtual_socket_server.h" +#include "test/gtest.h" + +namespace webrtc { + +const int64_t kDefaultTimeoutMs = 5000; + +struct TrackWithPeriodicSource { + rtc::scoped_refptr track; + rtc::scoped_refptr periodic_track_source; +}; + +// Performs an O/A exchange and waits until the signaling state is stable again. +void Negotiate(rtc::scoped_refptr caller, + rtc::scoped_refptr callee) { + // Wire up callbacks and listeners such that a full O/A is performed in + // response to CreateOffer(). + PeerConnectionTestWrapper::Connect(caller.get(), callee.get()); + caller->CreateOffer(PeerConnectionInterface::RTCOfferAnswerOptions()); + caller->WaitForNegotiation(); +} + +TrackWithPeriodicSource CreateTrackWithPeriodicSource( + rtc::scoped_refptr factory) { + FakePeriodicVideoSource::Config periodic_track_source_config; + periodic_track_source_config.frame_interval_ms = 100; + periodic_track_source_config.timestamp_offset_ms = rtc::TimeMillis(); + rtc::scoped_refptr periodic_track_source = + rtc::make_ref_counted( + periodic_track_source_config, /* remote */ false); + TrackWithPeriodicSource track_with_source; + track_with_source.track = + factory->CreateVideoTrack("PeriodicTrack", periodic_track_source); + track_with_source.periodic_track_source = periodic_track_source; + return track_with_source; +} + +// Triggers overuse and obtains VideoSinkWants. Adaptation processing happens in +// parallel and this function makes no guarantee that the returnd VideoSinkWants +// have yet to reflect the overuse signal. Used together with EXPECT_TRUE_WAIT +// to "spam overuse until a change is observed". +rtc::VideoSinkWants TriggerOveruseAndGetSinkWants( + rtc::scoped_refptr fake_resource, + const FakePeriodicVideoSource& source) { + fake_resource->SetUsageState(ResourceUsageState::kOveruse); + return source.wants(); +} + +class PeerConnectionAdaptationIntegrationTest : public ::testing::Test { + public: + PeerConnectionAdaptationIntegrationTest() + : virtual_socket_server_(), + network_thread_(new rtc::Thread(&virtual_socket_server_)), + worker_thread_(rtc::Thread::Create()) { + RTC_CHECK(network_thread_->Start()); + RTC_CHECK(worker_thread_->Start()); + } + + rtc::scoped_refptr CreatePcWrapper( + const char* name) { + rtc::scoped_refptr pc_wrapper = + rtc::make_ref_counted( + name, network_thread_.get(), worker_thread_.get()); + PeerConnectionInterface::RTCConfiguration config; + config.sdp_semantics = SdpSemantics::kUnifiedPlan; + EXPECT_TRUE(pc_wrapper->CreatePc(config, CreateBuiltinAudioEncoderFactory(), + CreateBuiltinAudioDecoderFactory())); + return pc_wrapper; + } + + protected: + rtc::VirtualSocketServer virtual_socket_server_; + std::unique_ptr network_thread_; + std::unique_ptr worker_thread_; +}; + +TEST_F(PeerConnectionAdaptationIntegrationTest, + ResouceInjectedAfterNegotiationCausesReductionInResolution) { + auto caller_wrapper = CreatePcWrapper("caller"); + auto caller = caller_wrapper->pc(); + auto callee_wrapper = CreatePcWrapper("callee"); + + // Adding a track and negotiating ensures that a VideoSendStream exists. + TrackWithPeriodicSource track_with_source = + CreateTrackWithPeriodicSource(caller_wrapper->pc_factory()); + auto sender = caller->AddTrack(track_with_source.track, {}).value(); + Negotiate(caller_wrapper, callee_wrapper); + // Prefer degrading resolution. + auto parameters = sender->GetParameters(); + parameters.degradation_preference = DegradationPreference::MAINTAIN_FRAMERATE; + sender->SetParameters(parameters); + + const auto& source = + track_with_source.periodic_track_source->fake_periodic_source(); + int pixel_count_before_overuse = source.wants().max_pixel_count; + + // Inject a fake resource and spam kOveruse until resolution becomes limited. + auto fake_resource = FakeResource::Create("FakeResource"); + caller->AddAdaptationResource(fake_resource); + EXPECT_TRUE_WAIT( + TriggerOveruseAndGetSinkWants(fake_resource, source).max_pixel_count < + pixel_count_before_overuse, + kDefaultTimeoutMs); +} + +TEST_F(PeerConnectionAdaptationIntegrationTest, + ResouceInjectedBeforeNegotiationCausesReductionInResolution) { + auto caller_wrapper = CreatePcWrapper("caller"); + auto caller = caller_wrapper->pc(); + auto callee_wrapper = CreatePcWrapper("callee"); + + // Inject a fake resource before adding any tracks or negotiating. + auto fake_resource = FakeResource::Create("FakeResource"); + caller->AddAdaptationResource(fake_resource); + + // Adding a track and negotiating ensures that a VideoSendStream exists. + TrackWithPeriodicSource track_with_source = + CreateTrackWithPeriodicSource(caller_wrapper->pc_factory()); + auto sender = caller->AddTrack(track_with_source.track, {}).value(); + Negotiate(caller_wrapper, callee_wrapper); + // Prefer degrading resolution. + auto parameters = sender->GetParameters(); + parameters.degradation_preference = DegradationPreference::MAINTAIN_FRAMERATE; + sender->SetParameters(parameters); + + const auto& source = + track_with_source.periodic_track_source->fake_periodic_source(); + int pixel_count_before_overuse = source.wants().max_pixel_count; + + // Spam kOveruse until resolution becomes limited. + EXPECT_TRUE_WAIT( + TriggerOveruseAndGetSinkWants(fake_resource, source).max_pixel_count < + pixel_count_before_overuse, + kDefaultTimeoutMs); +} + +} // namespace webrtc diff --git a/pc/peer_connection_bundle_unittest.cc b/pc/peer_connection_bundle_unittest.cc index 543c9be81a..08754c6820 100644 --- a/pc/peer_connection_bundle_unittest.cc +++ b/pc/peer_connection_bundle_unittest.cc @@ -13,7 +13,6 @@ #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/create_peerconnection_factory.h" -#include "api/peer_connection_proxy.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "p2p/base/fake_port_allocator.h" @@ -21,6 +20,7 @@ #include "p2p/client/basic_port_allocator.h" #include "pc/media_session.h" #include "pc/peer_connection.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #ifdef WEBRTC_ANDROID @@ -753,11 +753,9 @@ TEST_P(PeerConnectionBundleTest, RejectDescriptionChangingBundleTag) { // This tests that removing contents from BUNDLE group and reject the whole // BUNDLE group could work. This is a regression test for // (https://bugs.chromium.org/p/chromium/issues/detail?id=827917) +#ifdef HAVE_SCTP TEST_P(PeerConnectionBundleTest, RemovingContentAndRejectBundleGroup) { RTCConfiguration config; -#ifndef HAVE_SCTP - config.enable_rtp_data_channel = true; -#endif config.bundle_policy = BundlePolicy::kBundlePolicyMaxBundle; auto caller = CreatePeerConnectionWithAudioVideo(config); caller->CreateDataChannel("dc"); @@ -782,6 +780,7 @@ TEST_P(PeerConnectionBundleTest, RemovingContentAndRejectBundleGroup) { EXPECT_TRUE(caller->SetLocalDescription(std::move(re_offer))); } +#endif // This tests that the BUNDLE group in answer should be a subset of the offered // group. @@ -873,7 +872,7 @@ TEST_F(PeerConnectionBundleTestUnifiedPlan, // Stop all transceivers, causing all m= sections to be rejected. for (const auto& transceiver : callee->pc()->GetTransceivers()) { - transceiver->Stop(); + transceiver->StopInternal(); } EXPECT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); @@ -887,4 +886,56 @@ TEST_F(PeerConnectionBundleTestUnifiedPlan, EXPECT_TRUE(bundle_group->content_names().empty()); } +TEST_F(PeerConnectionBundleTestUnifiedPlan, MultipleBundleGroups) { + auto caller = CreatePeerConnection(); + caller->AddAudioTrack("0_audio"); + caller->AddAudioTrack("1_audio"); + caller->AddVideoTrack("2_audio"); + caller->AddVideoTrack("3_audio"); + auto callee = CreatePeerConnection(); + + auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); + // Modify the GROUP to have two BUNDLEs. We know that the MIDs will be 0,1,2,4 + // because our implementation has predictable MIDs. + offer->description()->RemoveGroupByName(cricket::GROUP_TYPE_BUNDLE); + cricket::ContentGroup bundle_group1(cricket::GROUP_TYPE_BUNDLE); + bundle_group1.AddContentName("0"); + bundle_group1.AddContentName("1"); + cricket::ContentGroup bundle_group2(cricket::GROUP_TYPE_BUNDLE); + bundle_group2.AddContentName("2"); + bundle_group2.AddContentName("3"); + offer->description()->AddGroup(bundle_group1); + offer->description()->AddGroup(bundle_group2); + + EXPECT_TRUE( + caller->SetLocalDescription(CloneSessionDescription(offer.get()))); + callee->SetRemoteDescription(std::move(offer)); + auto answer = callee->CreateAnswer(); + EXPECT_TRUE( + callee->SetLocalDescription(CloneSessionDescription(answer.get()))); + caller->SetRemoteDescription(std::move(answer)); + + // Verify bundling on sender side. + auto senders = caller->pc()->GetSenders(); + ASSERT_EQ(senders.size(), 4u); + auto sender0_transport = senders[0]->dtls_transport(); + auto sender1_transport = senders[1]->dtls_transport(); + auto sender2_transport = senders[2]->dtls_transport(); + auto sender3_transport = senders[3]->dtls_transport(); + EXPECT_EQ(sender0_transport, sender1_transport); + EXPECT_EQ(sender2_transport, sender3_transport); + EXPECT_NE(sender0_transport, sender2_transport); + + // Verify bundling on receiver side. + auto receivers = callee->pc()->GetReceivers(); + ASSERT_EQ(receivers.size(), 4u); + auto receiver0_transport = receivers[0]->dtls_transport(); + auto receiver1_transport = receivers[1]->dtls_transport(); + auto receiver2_transport = receivers[2]->dtls_transport(); + auto receiver3_transport = receivers[3]->dtls_transport(); + EXPECT_EQ(receiver0_transport, receiver1_transport); + EXPECT_EQ(receiver2_transport, receiver3_transport); + EXPECT_NE(receiver0_transport, receiver2_transport); +} + } // namespace webrtc diff --git a/pc/peer_connection_crypto_unittest.cc b/pc/peer_connection_crypto_unittest.cc index 32e8cbd74c..394203cb02 100644 --- a/pc/peer_connection_crypto_unittest.cc +++ b/pc/peer_connection_crypto_unittest.cc @@ -631,7 +631,7 @@ TEST_P(PeerConnectionCryptoDtlsCertGenTest, TestCertificateGeneration) { observers; for (size_t i = 0; i < concurrent_calls_; i++) { rtc::scoped_refptr observer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); observers.push_back(observer); if (sdp_type_ == SdpType::kOffer) { pc->pc()->CreateOffer(observer, diff --git a/pc/peer_connection_data_channel_unittest.cc b/pc/peer_connection_data_channel_unittest.cc index 0a674f462b..2544473536 100644 --- a/pc/peer_connection_data_channel_unittest.cc +++ b/pc/peer_connection_data_channel_unittest.cc @@ -19,7 +19,6 @@ #include "api/jsep.h" #include "api/media_types.h" #include "api/peer_connection_interface.h" -#include "api/peer_connection_proxy.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" #include "media/base/codec.h" @@ -32,6 +31,7 @@ #include "pc/media_session.h" #include "pc/peer_connection.h" #include "pc/peer_connection_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #include "pc/session_description.h" @@ -45,8 +45,8 @@ #ifdef WEBRTC_ANDROID #include "pc/test/android_test_initializer.h" #endif -#include "pc/test/fake_sctp_transport.h" #include "rtc_base/virtual_socket_server.h" +#include "test/pc/sctp/fake_sctp_transport.h" namespace webrtc { @@ -58,46 +58,20 @@ using ::testing::Values; namespace { -PeerConnectionFactoryDependencies CreatePeerConnectionFactoryDependencies( - rtc::Thread* network_thread, - rtc::Thread* worker_thread, - rtc::Thread* signaling_thread, - std::unique_ptr media_engine, - std::unique_ptr call_factory) { +PeerConnectionFactoryDependencies CreatePeerConnectionFactoryDependencies() { PeerConnectionFactoryDependencies deps; - deps.network_thread = network_thread; - deps.worker_thread = worker_thread; - deps.signaling_thread = signaling_thread; + deps.network_thread = rtc::Thread::Current(); + deps.worker_thread = rtc::Thread::Current(); + deps.signaling_thread = rtc::Thread::Current(); deps.task_queue_factory = CreateDefaultTaskQueueFactory(); - deps.media_engine = std::move(media_engine); - deps.call_factory = std::move(call_factory); + deps.media_engine = std::make_unique(); + deps.call_factory = CreateCallFactory(); + deps.sctp_factory = std::make_unique(); return deps; } } // namespace -class PeerConnectionFactoryForDataChannelTest - : public rtc::RefCountedObject { - public: - PeerConnectionFactoryForDataChannelTest() - : rtc::RefCountedObject( - CreatePeerConnectionFactoryDependencies( - rtc::Thread::Current(), - rtc::Thread::Current(), - rtc::Thread::Current(), - std::make_unique(), - CreateCallFactory())) {} - - std::unique_ptr - CreateSctpTransportInternalFactory() { - auto factory = std::make_unique(); - last_fake_sctp_transport_factory_ = factory.get(); - return factory; - } - - FakeSctpTransportFactory* last_fake_sctp_transport_factory_ = nullptr; -}; - class PeerConnectionWrapperForDataChannelTest : public PeerConnectionWrapper { public: using PeerConnectionWrapper::PeerConnectionWrapper; @@ -155,10 +129,12 @@ class PeerConnectionDataChannelBaseTest : public ::testing::Test { WrapperPtr CreatePeerConnection( const RTCConfiguration& config, const PeerConnectionFactoryInterface::Options factory_options) { - rtc::scoped_refptr pc_factory( - new PeerConnectionFactoryForDataChannelTest()); + auto factory_deps = CreatePeerConnectionFactoryDependencies(); + FakeSctpTransportFactory* fake_sctp_transport_factory = + static_cast(factory_deps.sctp_factory.get()); + rtc::scoped_refptr pc_factory = + CreateModularPeerConnectionFactory(std::move(factory_deps)); pc_factory->SetOptions(factory_options); - RTC_CHECK(pc_factory->Initialize()); auto observer = std::make_unique(); RTCConfiguration modified_config = config; modified_config.sdp_semantics = sdp_semantics_; @@ -171,9 +147,7 @@ class PeerConnectionDataChannelBaseTest : public ::testing::Test { observer->SetPeerConnectionInterface(pc.get()); auto wrapper = std::make_unique( pc_factory, pc, std::move(observer)); - RTC_DCHECK(pc_factory->last_fake_sctp_transport_factory_); - wrapper->set_sctp_transport_factory( - pc_factory->last_fake_sctp_transport_factory_); + wrapper->set_sctp_transport_factory(fake_sctp_transport_factory); return wrapper; } @@ -219,28 +193,6 @@ class PeerConnectionDataChannelUnifiedPlanTest : PeerConnectionDataChannelBaseTest(SdpSemantics::kUnifiedPlan) {} }; -TEST_P(PeerConnectionDataChannelTest, - NoSctpTransportCreatedIfRtpDataChannelEnabled) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - auto caller = CreatePeerConnectionWithDataChannel(config); - - ASSERT_TRUE(caller->SetLocalDescription(caller->CreateOffer())); - EXPECT_FALSE(caller->sctp_transport_factory()->last_fake_sctp_transport()); -} - -TEST_P(PeerConnectionDataChannelTest, - RtpDataChannelCreatedEvenIfSctpAvailable) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - PeerConnectionFactoryInterface::Options options; - options.disable_sctp_data_channels = false; - auto caller = CreatePeerConnectionWithDataChannel(config, options); - - ASSERT_TRUE(caller->SetLocalDescription(caller->CreateOffer())); - EXPECT_FALSE(caller->sctp_transport_factory()->last_fake_sctp_transport()); -} - TEST_P(PeerConnectionDataChannelTest, InternalSctpTransportDeletedOnTeardown) { auto caller = CreatePeerConnectionWithDataChannel(); @@ -337,34 +289,6 @@ TEST_P(PeerConnectionDataChannelTest, EXPECT_TRUE(caller->pc()->CreateDataChannel("dc", nullptr)); } -TEST_P(PeerConnectionDataChannelTest, CreateDataChannelWithSctpDisabledFails) { - PeerConnectionFactoryInterface::Options options; - options.disable_sctp_data_channels = true; - auto caller = CreatePeerConnection(RTCConfiguration(), options); - - EXPECT_FALSE(caller->pc()->CreateDataChannel("dc", nullptr)); -} - -// Test that if a callee has SCTP disabled and receives an offer with an SCTP -// data channel, the data section is rejected and no SCTP transport is created -// on the callee. -TEST_P(PeerConnectionDataChannelTest, - DataSectionRejectedIfCalleeHasSctpDisabled) { - auto caller = CreatePeerConnectionWithDataChannel(); - PeerConnectionFactoryInterface::Options options; - options.disable_sctp_data_channels = true; - auto callee = CreatePeerConnection(RTCConfiguration(), options); - - ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); - - EXPECT_FALSE(callee->sctp_transport_factory()->last_fake_sctp_transport()); - - auto answer = callee->CreateAnswer(); - auto* data_content = cricket::GetFirstDataContent(answer->description()); - ASSERT_TRUE(data_content); - EXPECT_TRUE(data_content->rejected); -} - TEST_P(PeerConnectionDataChannelTest, SctpPortPropagatedFromSdpToTransport) { constexpr int kNewSendPort = 9998; constexpr int kNewRecvPort = 7775; @@ -378,8 +302,9 @@ TEST_P(PeerConnectionDataChannelTest, SctpPortPropagatedFromSdpToTransport) { auto answer = callee->CreateAnswer(); ChangeSctpPortOnDescription(answer->description(), kNewRecvPort); + std::string sdp; + answer->ToString(&sdp); ASSERT_TRUE(callee->SetLocalDescription(std::move(answer))); - auto* callee_transport = callee->sctp_transport_factory()->last_fake_sctp_transport(); ASSERT_TRUE(callee_transport); @@ -418,28 +343,4 @@ INSTANTIATE_TEST_SUITE_P(PeerConnectionDataChannelTest, Values(SdpSemantics::kPlanB, SdpSemantics::kUnifiedPlan)); -TEST_F(PeerConnectionDataChannelUnifiedPlanTest, - ReOfferAfterPeerRejectsDataChannel) { - auto caller = CreatePeerConnectionWithDataChannel(); - PeerConnectionFactoryInterface::Options options; - options.disable_sctp_data_channels = true; - auto callee = CreatePeerConnection(RTCConfiguration(), options); - - ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - - auto offer = caller->CreateOffer(); - ASSERT_TRUE(offer); - const auto& contents = offer->description()->contents(); - ASSERT_EQ(1u, contents.size()); - EXPECT_TRUE(contents[0].rejected); - - ASSERT_TRUE( - caller->SetLocalDescription(CloneSessionDescription(offer.get()))); - ASSERT_TRUE(callee->SetRemoteDescription(std::move(offer))); - - auto answer = callee->CreateAnswerAndSetAsLocal(); - ASSERT_TRUE(answer); - EXPECT_TRUE(caller->SetRemoteDescription(std::move(answer))); -} - } // namespace webrtc diff --git a/pc/peer_connection_end_to_end_unittest.cc b/pc/peer_connection_end_to_end_unittest.cc index 24ef69c111..b29371c59b 100644 --- a/pc/peer_connection_end_to_end_unittest.cc +++ b/pc/peer_connection_end_to_end_unittest.cc @@ -465,7 +465,7 @@ TEST_P(PeerConnectionEndToEndTest, CallWithCustomCodec) { EXPECT_NE(encoder_id1, encoder_id2); } -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP // Verifies that a DataChannel created before the negotiation can transition to // "OPEN" and transfer data. TEST_P(PeerConnectionEndToEndTest, CreateDataChannelBeforeNegotiate) { @@ -735,7 +735,7 @@ TEST_P(PeerConnectionEndToEndTest, TooManyDataChannelsOpenedBeforeConnecting) { channels[cricket::kMaxSctpStreams / 2]->state()); } -#endif // HAVE_SCTP +#endif // WEBRTC_HAVE_SCTP TEST_P(PeerConnectionEndToEndTest, CanRestartIce) { rtc::scoped_refptr real_decoder_factory = diff --git a/pc/peer_connection_factory.cc b/pc/peer_connection_factory.cc index 9a758bea2d..50755a38c7 100644 --- a/pc/peer_connection_factory.cc +++ b/pc/peer_connection_factory.cc @@ -12,38 +12,45 @@ #include #include -#include #include "absl/strings/match.h" +#include "api/async_resolver_factory.h" +#include "api/call/call_factory_interface.h" #include "api/fec_controller.h" -#include "api/media_stream_proxy.h" -#include "api/media_stream_track_proxy.h" +#include "api/ice_transport_interface.h" #include "api/network_state_predictor.h" -#include "api/peer_connection_factory_proxy.h" -#include "api/peer_connection_proxy.h" +#include "api/packet_socket_factory.h" #include "api/rtc_event_log/rtc_event_log.h" -#include "api/transport/field_trial_based_config.h" -#include "api/transport/media/media_transport_interface.h" -#include "api/turn_customizer.h" +#include "api/sequence_checker.h" +#include "api/transport/bitrate_settings.h" #include "api/units/data_rate.h" -#include "api/video_track_source_proxy.h" -#include "media/base/rtp_data_engine.h" -#include "media/sctp/sctp_transport.h" +#include "call/audio_state.h" +#include "call/rtp_transport_controller_send_factory.h" +#include "media/base/media_engine.h" #include "p2p/base/basic_async_resolver_factory.h" #include "p2p/base/basic_packet_socket_factory.h" #include "p2p/base/default_ice_transport_factory.h" +#include "p2p/base/port_allocator.h" #include "p2p/client/basic_port_allocator.h" #include "pc/audio_track.h" #include "pc/local_audio_source.h" #include "pc/media_stream.h" +#include "pc/media_stream_proxy.h" +#include "pc/media_stream_track_proxy.h" #include "pc/peer_connection.h" +#include "pc/peer_connection_factory_proxy.h" +#include "pc/peer_connection_proxy.h" #include "pc/rtp_parameters_conversion.h" +#include "pc/session_description.h" #include "pc/video_track.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/experiments/field_trial_units.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/rtc_certificate_generator.h" #include "rtc_base/system/file_wrapper.h" namespace webrtc { @@ -51,197 +58,146 @@ namespace webrtc { rtc::scoped_refptr CreateModularPeerConnectionFactory( PeerConnectionFactoryDependencies dependencies) { - rtc::scoped_refptr pc_factory( - new rtc::RefCountedObject( - std::move(dependencies))); - // Call Initialize synchronously but make sure it is executed on - // |signaling_thread|. - MethodCall call( - pc_factory.get(), &PeerConnectionFactory::Initialize); - bool result = call.Marshal(RTC_FROM_HERE, pc_factory->signaling_thread()); - - if (!result) { + // The PeerConnectionFactory must be created on the signaling thread. + if (dependencies.signaling_thread && + !dependencies.signaling_thread->IsCurrent()) { + return dependencies.signaling_thread + ->Invoke>( + RTC_FROM_HERE, [&dependencies] { + return CreateModularPeerConnectionFactory( + std::move(dependencies)); + }); + } + + auto pc_factory = PeerConnectionFactory::Create(std::move(dependencies)); + if (!pc_factory) { + return nullptr; + } + // Verify that the invocation and the initialization ended up agreeing on the + // thread. + RTC_DCHECK_RUN_ON(pc_factory->signaling_thread()); + return PeerConnectionFactoryProxy::Create( + pc_factory->signaling_thread(), pc_factory->worker_thread(), pc_factory); +} + +// Static +rtc::scoped_refptr PeerConnectionFactory::Create( + PeerConnectionFactoryDependencies dependencies) { + auto context = ConnectionContext::Create(&dependencies); + if (!context) { return nullptr; } - return PeerConnectionFactoryProxy::Create(pc_factory->signaling_thread(), - pc_factory); + return rtc::make_ref_counted(context, &dependencies); } PeerConnectionFactory::PeerConnectionFactory( - PeerConnectionFactoryDependencies dependencies) - : wraps_current_thread_(false), - network_thread_(dependencies.network_thread), - worker_thread_(dependencies.worker_thread), - signaling_thread_(dependencies.signaling_thread), - task_queue_factory_(std::move(dependencies.task_queue_factory)), - media_engine_(std::move(dependencies.media_engine)), - call_factory_(std::move(dependencies.call_factory)), - event_log_factory_(std::move(dependencies.event_log_factory)), - fec_controller_factory_(std::move(dependencies.fec_controller_factory)), + rtc::scoped_refptr context, + PeerConnectionFactoryDependencies* dependencies) + : context_(context), + task_queue_factory_(std::move(dependencies->task_queue_factory)), + event_log_factory_(std::move(dependencies->event_log_factory)), + fec_controller_factory_(std::move(dependencies->fec_controller_factory)), network_state_predictor_factory_( - std::move(dependencies.network_state_predictor_factory)), + std::move(dependencies->network_state_predictor_factory)), injected_network_controller_factory_( - std::move(dependencies.network_controller_factory)), - media_transport_factory_(std::move(dependencies.media_transport_factory)), - neteq_factory_(std::move(dependencies.neteq_factory)), - trials_(dependencies.trials ? std::move(dependencies.trials) - : std::make_unique()) { - if (!network_thread_) { - owned_network_thread_ = rtc::Thread::CreateWithSocketServer(); - owned_network_thread_->SetName("pc_network_thread", nullptr); - owned_network_thread_->Start(); - network_thread_ = owned_network_thread_.get(); - } - - if (!worker_thread_) { - owned_worker_thread_ = rtc::Thread::Create(); - owned_worker_thread_->SetName("pc_worker_thread", nullptr); - owned_worker_thread_->Start(); - worker_thread_ = owned_worker_thread_.get(); - } + std::move(dependencies->network_controller_factory)), + neteq_factory_(std::move(dependencies->neteq_factory)), + transport_controller_send_factory_( + (dependencies->transport_controller_send_factory) + ? std::move(dependencies->transport_controller_send_factory) + : std::make_unique()) {} - if (!signaling_thread_) { - signaling_thread_ = rtc::Thread::Current(); - if (!signaling_thread_) { - // If this thread isn't already wrapped by an rtc::Thread, create a - // wrapper and own it in this class. - signaling_thread_ = rtc::ThreadManager::Instance()->WrapCurrentThread(); - wraps_current_thread_ = true; - } - } -} +PeerConnectionFactory::PeerConnectionFactory( + PeerConnectionFactoryDependencies dependencies) + : PeerConnectionFactory(ConnectionContext::Create(&dependencies), + &dependencies) {} PeerConnectionFactory::~PeerConnectionFactory() { - RTC_DCHECK(signaling_thread_->IsCurrent()); - channel_manager_.reset(nullptr); - - // Make sure |worker_thread_| and |signaling_thread_| outlive - // |default_socket_factory_| and |default_network_manager_|. - default_socket_factory_ = nullptr; - default_network_manager_ = nullptr; - - if (wraps_current_thread_) - rtc::ThreadManager::Instance()->UnwrapCurrentThread(); -} - -bool PeerConnectionFactory::Initialize() { - RTC_DCHECK(signaling_thread_->IsCurrent()); - rtc::InitRandom(rtc::Time32()); - - default_network_manager_.reset(new rtc::BasicNetworkManager()); - if (!default_network_manager_) { - return false; - } - - default_socket_factory_.reset( - new rtc::BasicPacketSocketFactory(network_thread_)); - if (!default_socket_factory_) { - return false; - } - - channel_manager_ = std::make_unique( - std::move(media_engine_), std::make_unique(), - worker_thread_, network_thread_); - - channel_manager_->SetVideoRtxEnabled(true); - if (!channel_manager_->Init()) { - return false; - } - - return true; + RTC_DCHECK_RUN_ON(signaling_thread()); } void PeerConnectionFactory::SetOptions(const Options& options) { + RTC_DCHECK_RUN_ON(signaling_thread()); options_ = options; } RtpCapabilities PeerConnectionFactory::GetRtpSenderCapabilities( cricket::MediaType kind) const { - RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_DCHECK_RUN_ON(signaling_thread()); switch (kind) { case cricket::MEDIA_TYPE_AUDIO: { cricket::AudioCodecs cricket_codecs; - channel_manager_->GetSupportedAudioSendCodecs(&cricket_codecs); + channel_manager()->GetSupportedAudioSendCodecs(&cricket_codecs); return ToRtpCapabilities( cricket_codecs, - channel_manager_->GetDefaultEnabledAudioRtpHeaderExtensions()); + channel_manager()->GetDefaultEnabledAudioRtpHeaderExtensions()); } case cricket::MEDIA_TYPE_VIDEO: { cricket::VideoCodecs cricket_codecs; - channel_manager_->GetSupportedVideoSendCodecs(&cricket_codecs); + channel_manager()->GetSupportedVideoSendCodecs(&cricket_codecs); return ToRtpCapabilities( cricket_codecs, - channel_manager_->GetDefaultEnabledVideoRtpHeaderExtensions()); + channel_manager()->GetDefaultEnabledVideoRtpHeaderExtensions()); } case cricket::MEDIA_TYPE_DATA: return RtpCapabilities(); + case cricket::MEDIA_TYPE_UNSUPPORTED: + return RtpCapabilities(); } - // Not reached; avoids compile warning. - FATAL(); + RTC_DLOG(LS_ERROR) << "Got unexpected MediaType " << kind; + RTC_CHECK_NOTREACHED(); } RtpCapabilities PeerConnectionFactory::GetRtpReceiverCapabilities( cricket::MediaType kind) const { - RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_DCHECK_RUN_ON(signaling_thread()); switch (kind) { case cricket::MEDIA_TYPE_AUDIO: { cricket::AudioCodecs cricket_codecs; - channel_manager_->GetSupportedAudioReceiveCodecs(&cricket_codecs); + channel_manager()->GetSupportedAudioReceiveCodecs(&cricket_codecs); return ToRtpCapabilities( cricket_codecs, - channel_manager_->GetDefaultEnabledAudioRtpHeaderExtensions()); + channel_manager()->GetDefaultEnabledAudioRtpHeaderExtensions()); } case cricket::MEDIA_TYPE_VIDEO: { cricket::VideoCodecs cricket_codecs; - channel_manager_->GetSupportedVideoReceiveCodecs(&cricket_codecs); + channel_manager()->GetSupportedVideoReceiveCodecs(&cricket_codecs); return ToRtpCapabilities( cricket_codecs, - channel_manager_->GetDefaultEnabledVideoRtpHeaderExtensions()); + channel_manager()->GetDefaultEnabledVideoRtpHeaderExtensions()); } case cricket::MEDIA_TYPE_DATA: return RtpCapabilities(); + case cricket::MEDIA_TYPE_UNSUPPORTED: + return RtpCapabilities(); } - // Not reached; avoids compile warning. - FATAL(); + RTC_DLOG(LS_ERROR) << "Got unexpected MediaType " << kind; + RTC_CHECK_NOTREACHED(); } rtc::scoped_refptr PeerConnectionFactory::CreateAudioSource(const cricket::AudioOptions& options) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK(signaling_thread()->IsCurrent()); rtc::scoped_refptr source( LocalAudioSource::Create(&options)); return source; } bool PeerConnectionFactory::StartAecDump(FILE* file, int64_t max_size_bytes) { - RTC_DCHECK(signaling_thread_->IsCurrent()); - return channel_manager_->StartAecDump(FileWrapper(file), max_size_bytes); + RTC_DCHECK_RUN_ON(worker_thread()); + return channel_manager()->StartAecDump(FileWrapper(file), max_size_bytes); } void PeerConnectionFactory::StopAecDump() { - RTC_DCHECK(signaling_thread_->IsCurrent()); - channel_manager_->StopAecDump(); + RTC_DCHECK_RUN_ON(worker_thread()); + channel_manager()->StopAecDump(); } -rtc::scoped_refptr -PeerConnectionFactory::CreatePeerConnection( - const PeerConnectionInterface::RTCConfiguration& configuration, - std::unique_ptr allocator, - std::unique_ptr cert_generator, - PeerConnectionObserver* observer) { - // Convert the legacy API into the new dependency structure. - PeerConnectionDependencies dependencies(observer); - dependencies.allocator = std::move(allocator); - dependencies.cert_generator = std::move(cert_generator); - // Pass that into the new API. - return CreatePeerConnection(configuration, std::move(dependencies)); -} - -rtc::scoped_refptr -PeerConnectionFactory::CreatePeerConnection( +RTCErrorOr> +PeerConnectionFactory::CreatePeerConnectionOrError( const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread()); RTC_DCHECK(!(dependencies.allocator && dependencies.packet_socket_factory)) << "You can't set both allocator and packet_socket_factory; " "the former is going away (see bugs.webrtc.org/7447"; @@ -249,23 +205,19 @@ PeerConnectionFactory::CreatePeerConnection( // Set internal defaults if optional dependencies are not set. if (!dependencies.cert_generator) { dependencies.cert_generator = - std::make_unique(signaling_thread_, - network_thread_); + std::make_unique(signaling_thread(), + network_thread()); } if (!dependencies.allocator) { rtc::PacketSocketFactory* packet_socket_factory; if (dependencies.packet_socket_factory) packet_socket_factory = dependencies.packet_socket_factory.get(); else - packet_socket_factory = default_socket_factory_.get(); - - network_thread_->Invoke(RTC_FROM_HERE, [this, &configuration, - &dependencies, - &packet_socket_factory]() { - dependencies.allocator = std::make_unique( - default_network_manager_.get(), packet_socket_factory, - configuration.turn_customizer); - }); + packet_socket_factory = context_->default_socket_factory(); + + dependencies.allocator = std::make_unique( + context_->default_network_manager(), packet_socket_factory, + configuration.turn_customizer); } if (!dependencies.async_resolver_factory) { @@ -278,69 +230,64 @@ PeerConnectionFactory::CreatePeerConnection( std::make_unique(); } - network_thread_->Invoke( - RTC_FROM_HERE, - rtc::Bind(&cricket::PortAllocator::SetNetworkIgnoreMask, - dependencies.allocator.get(), options_.network_ignore_mask)); + dependencies.allocator->SetNetworkIgnoreMask(options().network_ignore_mask); std::unique_ptr event_log = - worker_thread_->Invoke>( - RTC_FROM_HERE, - rtc::Bind(&PeerConnectionFactory::CreateRtcEventLog_w, this)); + worker_thread()->Invoke>( + RTC_FROM_HERE, [this] { return CreateRtcEventLog_w(); }); - std::unique_ptr call = worker_thread_->Invoke>( + std::unique_ptr call = worker_thread()->Invoke>( RTC_FROM_HERE, - rtc::Bind(&PeerConnectionFactory::CreateCall_w, this, event_log.get())); + [this, &event_log] { return CreateCall_w(event_log.get()); }); - rtc::scoped_refptr pc( - new rtc::RefCountedObject(this, std::move(event_log), - std::move(call))); - ActionsBeforeInitializeForTesting(pc); - if (!pc->Initialize(configuration, std::move(dependencies))) { - return nullptr; + auto result = PeerConnection::Create(context_, options_, std::move(event_log), + std::move(call), configuration, + std::move(dependencies)); + if (!result.ok()) { + return result.MoveError(); } - return PeerConnectionProxy::Create(signaling_thread(), pc); + // We configure the proxy with a pointer to the network thread for methods + // that need to be invoked there rather than on the signaling thread. + // Internally, the proxy object has a member variable named |worker_thread_| + // which will point to the network thread (and not the factory's + // worker_thread()). All such methods have thread checks though, so the code + // should still be clear (outside of macro expansion). + rtc::scoped_refptr result_proxy = + PeerConnectionProxy::Create(signaling_thread(), network_thread(), + result.MoveValue()); + return result_proxy; } rtc::scoped_refptr PeerConnectionFactory::CreateLocalMediaStream(const std::string& stream_id) { - RTC_DCHECK(signaling_thread_->IsCurrent()); - return MediaStreamProxy::Create(signaling_thread_, + RTC_DCHECK(signaling_thread()->IsCurrent()); + return MediaStreamProxy::Create(signaling_thread(), MediaStream::Create(stream_id)); } rtc::scoped_refptr PeerConnectionFactory::CreateVideoTrack( const std::string& id, VideoTrackSourceInterface* source) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK(signaling_thread()->IsCurrent()); rtc::scoped_refptr track( - VideoTrack::Create(id, source, worker_thread_)); - return VideoTrackProxy::Create(signaling_thread_, worker_thread_, track); + VideoTrack::Create(id, source, worker_thread())); + return VideoTrackProxy::Create(signaling_thread(), worker_thread(), track); } rtc::scoped_refptr PeerConnectionFactory::CreateAudioTrack( const std::string& id, AudioSourceInterface* source) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK(signaling_thread()->IsCurrent()); rtc::scoped_refptr track(AudioTrack::Create(id, source)); - return AudioTrackProxy::Create(signaling_thread_, track); -} - -std::unique_ptr -PeerConnectionFactory::CreateSctpTransportInternalFactory() { -#ifdef HAVE_SCTP - return std::make_unique(network_thread()); -#else - return nullptr; -#endif + return AudioTrackProxy::Create(signaling_thread(), track); } cricket::ChannelManager* PeerConnectionFactory::channel_manager() { - return channel_manager_.get(); + return context_->channel_manager(); } std::unique_ptr PeerConnectionFactory::CreateRtcEventLog_w() { - RTC_DCHECK_RUN_ON(worker_thread_); + RTC_DCHECK_RUN_ON(worker_thread()); auto encoding_type = RtcEventLog::EncodingType::Legacy; if (IsTrialEnabled("WebRTC-RtcEventLogNewFormat")) @@ -352,14 +299,14 @@ std::unique_ptr PeerConnectionFactory::CreateRtcEventLog_w() { std::unique_ptr PeerConnectionFactory::CreateCall_w( RtcEventLog* event_log) { - RTC_DCHECK_RUN_ON(worker_thread_); + RTC_DCHECK_RUN_ON(worker_thread()); - webrtc::Call::Config call_config(event_log); - if (!channel_manager_->media_engine() || !call_factory_) { + webrtc::Call::Config call_config(event_log, network_thread()); + if (!channel_manager()->media_engine() || !context_->call_factory()) { return nullptr; } call_config.audio_state = - channel_manager_->media_engine()->voice().GetAudioState(); + channel_manager()->media_engine()->voice().GetAudioState(); FieldTrialParameter min_bandwidth("min", DataRate::KilobitsPerSec(30)); @@ -368,7 +315,7 @@ std::unique_ptr PeerConnectionFactory::CreateCall_w( FieldTrialParameter max_bandwidth("max", DataRate::KilobitsPerSec(2000)); ParseFieldTrial({&min_bandwidth, &start_bandwidth, &max_bandwidth}, - trials_->Lookup("WebRTC-PcFactoryDefaultBitrates")); + trials().Lookup("WebRTC-PcFactoryDefaultBitrates")); call_config.bitrate_config.min_bitrate_bps = rtc::saturated_cast(min_bandwidth->bps()); @@ -391,14 +338,15 @@ std::unique_ptr PeerConnectionFactory::CreateCall_w( RTC_LOG(LS_INFO) << "Using default network controller factory"; } - call_config.trials = trials_.get(); - - return std::unique_ptr(call_factory_->CreateCall(call_config)); + call_config.trials = &trials(); + call_config.rtp_transport_controller_send_factory = + transport_controller_send_factory_.get(); + return std::unique_ptr( + context_->call_factory()->CreateCall(call_config)); } bool PeerConnectionFactory::IsTrialEnabled(absl::string_view key) const { - RTC_DCHECK(trials_); - return absl::StartsWith(trials_->Lookup(key), "Enabled"); + return absl::StartsWith(trials().Lookup(key), "Enabled"); } } // namespace webrtc diff --git a/pc/peer_connection_factory.h b/pc/peer_connection_factory.h index 962b08c7c9..4946ec6ea2 100644 --- a/pc/peer_connection_factory.h +++ b/pc/peer_connection_factory.h @@ -12,17 +12,39 @@ #ifndef PC_PEER_CONNECTION_FACTORY_H_ #define PC_PEER_CONNECTION_FACTORY_H_ +#include +#include + #include #include +#include "absl/strings/string_view.h" +#include "api/audio_options.h" +#include "api/fec_controller.h" #include "api/media_stream_interface.h" +#include "api/media_types.h" +#include "api/neteq/neteq_factory.h" +#include "api/network_state_predictor.h" #include "api/peer_connection_interface.h" +#include "api/rtc_error.h" +#include "api/rtc_event_log/rtc_event_log.h" +#include "api/rtc_event_log/rtc_event_log_factory_interface.h" +#include "api/rtp_parameters.h" #include "api/scoped_refptr.h" -#include "api/transport/media/media_transport_interface.h" -#include "media/sctp/sctp_transport_internal.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/transport/network_control.h" +#include "api/transport/sctp_transport_factory_interface.h" +#include "api/transport/webrtc_key_value_config.h" +#include "call/call.h" +#include "call/rtp_transport_controller_send_factory_interface.h" +#include "p2p/base/port_allocator.h" #include "pc/channel_manager.h" +#include "pc/connection_context.h" +#include "rtc_base/checks.h" #include "rtc_base/rtc_certificate_generator.h" #include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace rtc { class BasicNetworkManager; @@ -35,20 +57,21 @@ class RtcEventLog; class PeerConnectionFactory : public PeerConnectionFactoryInterface { public: - void SetOptions(const Options& options) override; + // Creates a PeerConnectionFactory. It returns nullptr on initialization + // error. + // + // The Dependencies structure allows simple management of all new + // dependencies being added to the PeerConnectionFactory. + static rtc::scoped_refptr Create( + PeerConnectionFactoryDependencies dependencies); - rtc::scoped_refptr CreatePeerConnection( - const PeerConnectionInterface::RTCConfiguration& configuration, - std::unique_ptr allocator, - std::unique_ptr cert_generator, - PeerConnectionObserver* observer) override; + void SetOptions(const Options& options) override; - rtc::scoped_refptr CreatePeerConnection( + RTCErrorOr> + CreatePeerConnectionOrError( const PeerConnectionInterface::RTCConfiguration& configuration, PeerConnectionDependencies dependencies) override; - bool Initialize(); - RtpCapabilities GetRtpSenderCapabilities( cricket::MediaType kind) const override; @@ -72,65 +95,63 @@ class PeerConnectionFactory : public PeerConnectionFactoryInterface { bool StartAecDump(FILE* file, int64_t max_size_bytes) override; void StopAecDump() override; - virtual std::unique_ptr - CreateSctpTransportInternalFactory(); + SctpTransportFactoryInterface* sctp_transport_factory() { + return context_->sctp_transport_factory(); + } virtual cricket::ChannelManager* channel_manager(); - rtc::Thread* signaling_thread() { + rtc::Thread* signaling_thread() const { // This method can be called on a different thread when the factory is // created in CreatePeerConnectionFactory(). - return signaling_thread_; + return context_->signaling_thread(); } - rtc::Thread* worker_thread() { return worker_thread_; } - rtc::Thread* network_thread() { return network_thread_; } - const Options& options() const { return options_; } + rtc::Thread* worker_thread() const { return context_->worker_thread(); } - MediaTransportFactory* media_transport_factory() { - return media_transport_factory_.get(); + const Options& options() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return options_; } + const WebRtcKeyValueConfig& trials() const { return context_->trials(); } + protected: - // This structure allows simple management of all new dependencies being added - // to the PeerConnectionFactory. + // Constructor used by the static Create() method. Modifies the dependencies. + PeerConnectionFactory(rtc::scoped_refptr context, + PeerConnectionFactoryDependencies* dependencies); + + // Constructor for use in testing. Ignores the possibility of initialization + // failure. The dependencies are passed in by std::move(). explicit PeerConnectionFactory( PeerConnectionFactoryDependencies dependencies); - // Hook to let testing framework insert actions between - // "new RTCPeerConnection" and "pc.Initialize" - virtual void ActionsBeforeInitializeForTesting(PeerConnectionInterface*) {} - virtual ~PeerConnectionFactory(); private: + rtc::Thread* network_thread() const { return context_->network_thread(); } + bool IsTrialEnabled(absl::string_view key) const; + const cricket::ChannelManager* channel_manager() const { + return context_->channel_manager(); + } std::unique_ptr CreateRtcEventLog_w(); std::unique_ptr CreateCall_w(RtcEventLog* event_log); - bool wraps_current_thread_; - rtc::Thread* network_thread_; - rtc::Thread* worker_thread_; - rtc::Thread* signaling_thread_; - std::unique_ptr owned_network_thread_; - std::unique_ptr owned_worker_thread_; - const std::unique_ptr task_queue_factory_; - Options options_; - std::unique_ptr channel_manager_; - std::unique_ptr default_network_manager_; - std::unique_ptr default_socket_factory_; - std::unique_ptr media_engine_; - std::unique_ptr call_factory_; + rtc::scoped_refptr context_; + PeerConnectionFactoryInterface::Options options_ + RTC_GUARDED_BY(signaling_thread()); + std::unique_ptr task_queue_factory_; std::unique_ptr event_log_factory_; std::unique_ptr fec_controller_factory_; std::unique_ptr network_state_predictor_factory_; std::unique_ptr injected_network_controller_factory_; - std::unique_ptr media_transport_factory_; std::unique_ptr neteq_factory_; - const std::unique_ptr trials_; + const std::unique_ptr + transport_controller_send_factory_; }; } // namespace webrtc diff --git a/api/peer_connection_factory_proxy.h b/pc/peer_connection_factory_proxy.h similarity index 62% rename from api/peer_connection_factory_proxy.h rename to pc/peer_connection_factory_proxy.h index e33fb457ae..59e373db7b 100644 --- a/api/peer_connection_factory_proxy.h +++ b/pc/peer_connection_factory_proxy.h @@ -8,32 +8,25 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_PEER_CONNECTION_FACTORY_PROXY_H_ -#define API_PEER_CONNECTION_FACTORY_PROXY_H_ +#ifndef PC_PEER_CONNECTION_FACTORY_PROXY_H_ +#define PC_PEER_CONNECTION_FACTORY_PROXY_H_ #include #include #include #include "api/peer_connection_interface.h" -#include "api/proxy.h" -#include "rtc_base/bind.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_SIGNALING_PROXY_MAP(PeerConnectionFactory) -PROXY_SIGNALING_THREAD_DESTRUCTOR() +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PROXY_MAP(PeerConnectionFactory) +PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD1(void, SetOptions, const Options&) -PROXY_METHOD4(rtc::scoped_refptr, - CreatePeerConnection, - const PeerConnectionInterface::RTCConfiguration&, - std::unique_ptr, - std::unique_ptr, - PeerConnectionObserver*) -PROXY_METHOD2(rtc::scoped_refptr, - CreatePeerConnection, +PROXY_METHOD2(RTCErrorOr>, + CreatePeerConnectionOrError, const PeerConnectionInterface::RTCConfiguration&, PeerConnectionDependencies) PROXY_CONSTMETHOD1(webrtc::RtpCapabilities, @@ -56,10 +49,10 @@ PROXY_METHOD2(rtc::scoped_refptr, CreateAudioTrack, const std::string&, AudioSourceInterface*) -PROXY_METHOD2(bool, StartAecDump, FILE*, int64_t) -PROXY_METHOD0(void, StopAecDump) -END_PROXY_MAP() +PROXY_SECONDARY_METHOD2(bool, StartAecDump, FILE*, int64_t) +PROXY_SECONDARY_METHOD0(void, StopAecDump) +END_PROXY_MAP(PeerConnectionFactory) } // namespace webrtc -#endif // API_PEER_CONNECTION_FACTORY_PROXY_H_ +#endif // PC_PEER_CONNECTION_FACTORY_PROXY_H_ diff --git a/pc/peer_connection_header_extension_unittest.cc b/pc/peer_connection_header_extension_unittest.cc index 3f44d4f877..8bf6c7ab44 100644 --- a/pc/peer_connection_header_extension_unittest.cc +++ b/pc/peer_connection_header_extension_unittest.cc @@ -33,16 +33,31 @@ class PeerConnectionHeaderExtensionTest : public ::testing::TestWithParam< std::tuple> { protected: + PeerConnectionHeaderExtensionTest() + : extensions_( + {RtpHeaderExtensionCapability("uri1", + 1, + RtpTransceiverDirection::kStopped), + RtpHeaderExtensionCapability("uri2", + 2, + RtpTransceiverDirection::kSendOnly), + RtpHeaderExtensionCapability("uri3", + 3, + RtpTransceiverDirection::kRecvOnly), + RtpHeaderExtensionCapability( + "uri4", + 4, + RtpTransceiverDirection::kSendRecv)}) {} + std::unique_ptr CreatePeerConnection( cricket::MediaType media_type, - absl::optional semantics, - std::vector extensions) { + absl::optional semantics) { auto voice = std::make_unique(); auto video = std::make_unique(); if (media_type == cricket::MediaType::MEDIA_TYPE_AUDIO) - voice->SetRtpHeaderExtensions(extensions); + voice->SetRtpHeaderExtensions(extensions_); else - video->SetRtpHeaderExtensions(extensions); + video->SetRtpHeaderExtensions(extensions_); auto media_engine = std::make_unique( std::move(voice), std::move(video)); PeerConnectionFactoryDependencies factory_dependencies; @@ -71,6 +86,8 @@ class PeerConnectionHeaderExtensionTest return std::make_unique(pc_factory, pc, std::move(observer)); } + + std::vector extensions_; }; TEST_P(PeerConnectionHeaderExtensionTest, TransceiverOffersHeaderExtensions) { @@ -79,19 +96,10 @@ TEST_P(PeerConnectionHeaderExtensionTest, TransceiverOffersHeaderExtensions) { std::tie(media_type, semantics) = GetParam(); if (semantics != SdpSemantics::kUnifiedPlan) return; - std::vector extensions( - {RtpHeaderExtensionCapability("uri1", 1, - RtpTransceiverDirection::kStopped), - RtpHeaderExtensionCapability("uri2", 2, - RtpTransceiverDirection::kSendOnly), - RtpHeaderExtensionCapability("uri3", 3, - RtpTransceiverDirection::kRecvOnly), - RtpHeaderExtensionCapability("uri4", 4, - RtpTransceiverDirection::kSendRecv)}); std::unique_ptr wrapper = - CreatePeerConnection(media_type, semantics, extensions); + CreatePeerConnection(media_type, semantics); auto transceiver = wrapper->AddTransceiver(media_type); - EXPECT_EQ(transceiver->HeaderExtensionsToOffer(), extensions); + EXPECT_EQ(transceiver->HeaderExtensionsToOffer(), extensions_); } TEST_P(PeerConnectionHeaderExtensionTest, @@ -99,20 +107,14 @@ TEST_P(PeerConnectionHeaderExtensionTest, cricket::MediaType media_type; SdpSemantics semantics; std::tie(media_type, semantics) = GetParam(); - std::unique_ptr wrapper = CreatePeerConnection( - media_type, semantics, - std::vector( - {RtpHeaderExtensionCapability("uri1", 1, - RtpTransceiverDirection::kSendRecv), - RtpHeaderExtensionCapability("uri2", 2, - RtpTransceiverDirection::kStopped), - RtpHeaderExtensionCapability("uri3", 3, - RtpTransceiverDirection::kRecvOnly)})); + std::unique_ptr wrapper = + CreatePeerConnection(media_type, semantics); EXPECT_THAT(wrapper->pc_factory() ->GetRtpSenderCapabilities(media_type) .header_extensions, - ElementsAre(Field(&RtpHeaderExtensionCapability::uri, "uri1"), - Field(&RtpHeaderExtensionCapability::uri, "uri3"))); + ElementsAre(Field(&RtpHeaderExtensionCapability::uri, "uri2"), + Field(&RtpHeaderExtensionCapability::uri, "uri3"), + Field(&RtpHeaderExtensionCapability::uri, "uri4"))); EXPECT_EQ(wrapper->pc_factory() ->GetRtpReceiverCapabilities(media_type) .header_extensions, @@ -121,6 +123,79 @@ TEST_P(PeerConnectionHeaderExtensionTest, .header_extensions); } +TEST_P(PeerConnectionHeaderExtensionTest, OffersUnstoppedDefaultExtensions) { + cricket::MediaType media_type; + SdpSemantics semantics; + std::tie(media_type, semantics) = GetParam(); + if (semantics != SdpSemantics::kUnifiedPlan) + return; + std::unique_ptr wrapper = + CreatePeerConnection(media_type, semantics); + auto transceiver = wrapper->AddTransceiver(media_type); + auto session_description = wrapper->CreateOffer(); + EXPECT_THAT(session_description->description() + ->contents()[0] + .media_description() + ->rtp_header_extensions(), + ElementsAre(Field(&RtpExtension::uri, "uri2"), + Field(&RtpExtension::uri, "uri3"), + Field(&RtpExtension::uri, "uri4"))); +} + +TEST_P(PeerConnectionHeaderExtensionTest, OffersUnstoppedModifiedExtensions) { + cricket::MediaType media_type; + SdpSemantics semantics; + std::tie(media_type, semantics) = GetParam(); + if (semantics != SdpSemantics::kUnifiedPlan) + return; + std::unique_ptr wrapper = + CreatePeerConnection(media_type, semantics); + auto transceiver = wrapper->AddTransceiver(media_type); + auto modified_extensions = transceiver->HeaderExtensionsToOffer(); + modified_extensions[0].direction = RtpTransceiverDirection::kSendRecv; + modified_extensions[3].direction = RtpTransceiverDirection::kStopped; + EXPECT_TRUE( + transceiver->SetOfferedRtpHeaderExtensions(modified_extensions).ok()); + auto session_description = wrapper->CreateOffer(); + EXPECT_THAT(session_description->description() + ->contents()[0] + .media_description() + ->rtp_header_extensions(), + ElementsAre(Field(&RtpExtension::uri, "uri1"), + Field(&RtpExtension::uri, "uri2"), + Field(&RtpExtension::uri, "uri3"))); +} + +TEST_P(PeerConnectionHeaderExtensionTest, NegotiatedExtensionsAreAccessible) { + cricket::MediaType media_type; + SdpSemantics semantics; + std::tie(media_type, semantics) = GetParam(); + if (semantics != SdpSemantics::kUnifiedPlan) + return; + std::unique_ptr pc1 = + CreatePeerConnection(media_type, semantics); + auto transceiver1 = pc1->AddTransceiver(media_type); + auto modified_extensions = transceiver1->HeaderExtensionsToOffer(); + modified_extensions[3].direction = RtpTransceiverDirection::kStopped; + transceiver1->SetOfferedRtpHeaderExtensions(modified_extensions); + auto offer = pc1->CreateOfferAndSetAsLocal( + PeerConnectionInterface::RTCOfferAnswerOptions()); + + std::unique_ptr pc2 = + CreatePeerConnection(media_type, semantics); + auto transceiver2 = pc2->AddTransceiver(media_type); + pc2->SetRemoteDescription(std::move(offer)); + auto answer = pc2->CreateAnswerAndSetAsLocal( + PeerConnectionInterface::RTCOfferAnswerOptions()); + pc1->SetRemoteDescription(std::move(answer)); + + // PC1 has exts 2-4 unstopped and PC2 has exts 1-3 unstopped -> ext 2, 3 + // survives. + EXPECT_THAT(transceiver1->HeaderExtensionsNegotiated(), + ElementsAre(Field(&RtpHeaderExtensionCapability::uri, "uri2"), + Field(&RtpHeaderExtensionCapability::uri, "uri3"))); +} + INSTANTIATE_TEST_SUITE_P( , PeerConnectionHeaderExtensionTest, diff --git a/pc/peer_connection_histogram_unittest.cc b/pc/peer_connection_histogram_unittest.cc index e36b29a23a..fa46ce9802 100644 --- a/pc/peer_connection_histogram_unittest.cc +++ b/pc/peer_connection_histogram_unittest.cc @@ -19,7 +19,6 @@ #include "api/jsep.h" #include "api/jsep_session_description.h" #include "api/peer_connection_interface.h" -#include "api/peer_connection_proxy.h" #include "api/rtc_error.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" @@ -29,9 +28,11 @@ #include "p2p/client/basic_port_allocator.h" #include "pc/peer_connection.h" #include "pc/peer_connection_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" #include "pc/test/mock_peer_connection_observers.h" +#include "pc/usage_pattern.h" #include "pc/webrtc_sdp.h" #include "rtc_base/arraysize.h" #include "rtc_base/checks.h" @@ -61,7 +62,7 @@ static const rtc::SocketAddress kPrivateLocalAddress("10.1.1.1", 0); static const rtc::SocketAddress kPrivateIpv6LocalAddress("fd12:3456:789a:1::1", 0); -int MakeUsageFingerprint(std::set events) { +int MakeUsageFingerprint(std::set events) { int signature = 0; for (const auto it : events) { signature |= static_cast(it); @@ -84,18 +85,6 @@ class PeerConnectionFactoryForUsageHistogramTest dependencies.call_factory = CreateCallFactory(); return dependencies; }()) {} - - void ActionsBeforeInitializeForTesting(PeerConnectionInterface* pc) override { - PeerConnection* internal_pc = static_cast(pc); - if (return_histogram_very_quickly_) { - internal_pc->ReturnHistogramVeryQuicklyForTesting(); - } - } - - void ReturnHistogramVeryQuickly() { return_histogram_very_quickly_ = true; } - - private: - bool return_histogram_very_quickly_ = false; }; class PeerConnectionWrapperForUsageHistogramTest; @@ -254,14 +243,13 @@ class PeerConnectionUsageHistogramTest : public ::testing::Test { } WrapperPtr CreatePeerConnection() { - return CreatePeerConnection(RTCConfiguration(), - PeerConnectionFactoryInterface::Options(), - nullptr, false); + return CreatePeerConnection( + RTCConfiguration(), PeerConnectionFactoryInterface::Options(), nullptr); } WrapperPtr CreatePeerConnection(const RTCConfiguration& config) { return CreatePeerConnection( - config, PeerConnectionFactoryInterface::Options(), nullptr, false); + config, PeerConnectionFactoryInterface::Options(), nullptr); } WrapperPtr CreatePeerConnectionWithMdns(const RTCConfiguration& config) { @@ -281,15 +269,15 @@ class PeerConnectionUsageHistogramTest : public ::testing::Test { deps.async_resolver_factory = std::move(resolver_factory); deps.allocator = std::move(port_allocator); - return CreatePeerConnection(config, - PeerConnectionFactoryInterface::Options(), - std::move(deps), false); + return CreatePeerConnection( + config, PeerConnectionFactoryInterface::Options(), std::move(deps)); } WrapperPtr CreatePeerConnectionWithImmediateReport() { - return CreatePeerConnection(RTCConfiguration(), - PeerConnectionFactoryInterface::Options(), - nullptr, true); + RTCConfiguration configuration; + configuration.report_usage_pattern_delay_ms = 0; + return CreatePeerConnection( + configuration, PeerConnectionFactoryInterface::Options(), nullptr); } WrapperPtr CreatePeerConnectionWithPrivateLocalAddresses() { @@ -299,10 +287,9 @@ class PeerConnectionUsageHistogramTest : public ::testing::Test { auto port_allocator = std::make_unique(fake_network); - return CreatePeerConnection(RTCConfiguration(), PeerConnectionFactoryInterface::Options(), - std::move(port_allocator), false); + std::move(port_allocator)); } WrapperPtr CreatePeerConnectionWithPrivateIpv6LocalAddresses() { @@ -315,33 +302,26 @@ class PeerConnectionUsageHistogramTest : public ::testing::Test { return CreatePeerConnection(RTCConfiguration(), PeerConnectionFactoryInterface::Options(), - std::move(port_allocator), false); + std::move(port_allocator)); } WrapperPtr CreatePeerConnection( const RTCConfiguration& config, const PeerConnectionFactoryInterface::Options factory_options, - std::unique_ptr allocator, - bool immediate_report) { + std::unique_ptr allocator) { PeerConnectionDependencies deps(nullptr); deps.allocator = std::move(allocator); - return CreatePeerConnection(config, factory_options, std::move(deps), - immediate_report); + return CreatePeerConnection(config, factory_options, std::move(deps)); } WrapperPtr CreatePeerConnection( const RTCConfiguration& config, const PeerConnectionFactoryInterface::Options factory_options, - PeerConnectionDependencies deps, - bool immediate_report) { + PeerConnectionDependencies deps) { rtc::scoped_refptr pc_factory( new PeerConnectionFactoryForUsageHistogramTest()); pc_factory->SetOptions(factory_options); - RTC_CHECK(pc_factory->Initialize()); - if (immediate_report) { - pc_factory->ReturnHistogramVeryQuickly(); - } // If no allocator is provided, one will be created using a network manager // that uses the host network. This doesn't work on all trybots. @@ -418,16 +398,12 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintAudioVideo) { caller->pc()->Close(); callee->pc()->Close(); int expected_fingerprint = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::VIDEO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::VIDEO_ADDED, + UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::ICE_STATE_CONNECTED, UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); // In this case, we may or may not have PRIVATE_CANDIDATE_COLLECTED, // depending on the machine configuration. EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); @@ -437,9 +413,7 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintAudioVideo) { webrtc::metrics::NumEvents( kUsagePatternMetric, expected_fingerprint | - static_cast( - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == - 2); + static_cast(UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == 2); } // Test getting the usage fingerprint when the caller collects an mDNS @@ -458,32 +432,24 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintWithMdnsCaller) { callee->pc()->Close(); int expected_fingerprint_caller = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::VIDEO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::MDNS_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::VIDEO_ADDED, + UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::MDNS_CANDIDATE_COLLECTED, + UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); // Without a resolver, the callee cannot resolve the received mDNS candidate // but can still connect with the caller via a prflx candidate. As a result, // the bit for the direct connection should not be logged. int expected_fingerprint_callee = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::VIDEO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::REMOTE_MDNS_CANDIDATE_ADDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::VIDEO_ADDED, + UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::REMOTE_MDNS_CANDIDATE_ADDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::REMOTE_CANDIDATE_ADDED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint_caller)); @@ -509,29 +475,21 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintWithMdnsCallee) { // Similar to the test above, the caller connects with the callee via a prflx // candidate. int expected_fingerprint_caller = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::VIDEO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::REMOTE_MDNS_CANDIDATE_ADDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::VIDEO_ADDED, + UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::REMOTE_MDNS_CANDIDATE_ADDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::REMOTE_CANDIDATE_ADDED, UsageEvent::CLOSE_CALLED}); int expected_fingerprint_callee = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::VIDEO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::MDNS_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::VIDEO_ADDED, + UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::MDNS_CANDIDATE_COLLECTED, + UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint_caller)); @@ -539,7 +497,7 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintWithMdnsCallee) { expected_fingerprint_callee)); } -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP TEST_F(PeerConnectionUsageHistogramTest, FingerprintDataOnly) { auto caller = CreatePeerConnection(); auto callee = CreatePeerConnection(); @@ -549,15 +507,11 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintDataOnly) { caller->pc()->Close(); callee->pc()->Close(); int expected_fingerprint = MakeUsageFingerprint( - {PeerConnection::UsageEvent::DATA_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::DATA_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::ICE_STATE_CONNECTED, UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_TRUE( webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint) == @@ -565,29 +519,26 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintDataOnly) { webrtc::metrics::NumEvents( kUsagePatternMetric, expected_fingerprint | - static_cast( - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == - 2); + static_cast(UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == 2); } -#endif // HAVE_SCTP +#endif // WEBRTC_HAVE_SCTP #endif // WEBRTC_ANDROID TEST_F(PeerConnectionUsageHistogramTest, FingerprintStunTurn) { RTCConfiguration configuration; PeerConnection::IceServer server; - server.urls = {"stun:dummy.stun.server/"}; + server.urls = {"stun:dummy.stun.server"}; configuration.servers.push_back(server); - server.urls = {"turn:dummy.turn.server/"}; + server.urls = {"turn:dummy.turn.server"}; server.username = "username"; server.password = "password"; configuration.servers.push_back(server); auto caller = CreatePeerConnection(configuration); ASSERT_TRUE(caller); caller->pc()->Close(); - int expected_fingerprint = - MakeUsageFingerprint({PeerConnection::UsageEvent::STUN_SERVER_ADDED, - PeerConnection::UsageEvent::TURN_SERVER_ADDED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + int expected_fingerprint = MakeUsageFingerprint( + {UsageEvent::STUN_SERVER_ADDED, UsageEvent::TURN_SERVER_ADDED, + UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(1, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ( 1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint)); @@ -596,9 +547,9 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintStunTurn) { TEST_F(PeerConnectionUsageHistogramTest, FingerprintStunTurnInReconfiguration) { RTCConfiguration configuration; PeerConnection::IceServer server; - server.urls = {"stun:dummy.stun.server/"}; + server.urls = {"stun:dummy.stun.server"}; configuration.servers.push_back(server); - server.urls = {"turn:dummy.turn.server/"}; + server.urls = {"turn:dummy.turn.server"}; server.username = "username"; server.password = "password"; configuration.servers.push_back(server); @@ -606,10 +557,9 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintStunTurnInReconfiguration) { ASSERT_TRUE(caller); ASSERT_TRUE(caller->pc()->SetConfiguration(configuration).ok()); caller->pc()->Close(); - int expected_fingerprint = - MakeUsageFingerprint({PeerConnection::UsageEvent::STUN_SERVER_ADDED, - PeerConnection::UsageEvent::TURN_SERVER_ADDED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + int expected_fingerprint = MakeUsageFingerprint( + {UsageEvent::STUN_SERVER_ADDED, UsageEvent::TURN_SERVER_ADDED, + UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(1, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ( 1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint)); @@ -624,28 +574,20 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintWithPrivateIPCaller) { callee->pc()->Close(); int expected_fingerprint_caller = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::PRIVATE_CANDIDATE_COLLECTED, + UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); int expected_fingerprint_callee = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED, + UsageEvent::ICE_STATE_CONNECTED, UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint_caller)); @@ -662,30 +604,22 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintWithPrivateIpv6Callee) { callee->pc()->Close(); int expected_fingerprint_caller = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::REMOTE_IPV6_CANDIDATE_ADDED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED, + UsageEvent::ICE_STATE_CONNECTED, UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::REMOTE_IPV6_CANDIDATE_ADDED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); int expected_fingerprint_callee = MakeUsageFingerprint( - {PeerConnection::UsageEvent::AUDIO_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::IPV6_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::AUDIO_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::PRIVATE_CANDIDATE_COLLECTED, + UsageEvent::IPV6_CANDIDATE_COLLECTED, + UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED, + UsageEvent::REMOTE_CANDIDATE_ADDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint_caller)); @@ -694,7 +628,7 @@ TEST_F(PeerConnectionUsageHistogramTest, FingerprintWithPrivateIpv6Callee) { } #ifndef WEBRTC_ANDROID -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP // Test that the usage pattern bits for adding remote (private IPv6) candidates // are set when the remote candidates are retrieved from the Offer SDP instead // of trickled ICE messages. @@ -747,27 +681,20 @@ TEST_F(PeerConnectionUsageHistogramTest, // with the callee via a prflx candidate and hence no direct connection bit // should be set. int expected_fingerprint_caller = MakeUsageFingerprint( - {PeerConnection::UsageEvent::DATA_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::IPV6_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::DATA_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::PRIVATE_CANDIDATE_COLLECTED, + UsageEvent::IPV6_CANDIDATE_COLLECTED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::CLOSE_CALLED}); int expected_fingerprint_callee = MakeUsageFingerprint( - {PeerConnection::UsageEvent::DATA_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::MDNS_CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::REMOTE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED, - PeerConnection::UsageEvent::REMOTE_IPV6_CANDIDATE_ADDED, - PeerConnection::UsageEvent::ICE_STATE_CONNECTED, - PeerConnection::UsageEvent::DIRECT_CONNECTION_SELECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::DATA_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::MDNS_CANDIDATE_COLLECTED, + UsageEvent::REMOTE_CANDIDATE_ADDED, + UsageEvent::REMOTE_PRIVATE_CANDIDATE_ADDED, + UsageEvent::REMOTE_IPV6_CANDIDATE_ADDED, UsageEvent::ICE_STATE_CONNECTED, + UsageEvent::DIRECT_CONNECTION_SELECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(2, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents(kUsagePatternMetric, expected_fingerprint_caller)); @@ -781,16 +708,13 @@ TEST_F(PeerConnectionUsageHistogramTest, NotableUsageNoted) { caller->GenerateOfferAndCollectCandidates(); caller->pc()->Close(); int expected_fingerprint = MakeUsageFingerprint( - {PeerConnection::UsageEvent::DATA_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::DATA_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(1, webrtc::metrics::NumSamples(kUsagePatternMetric)); EXPECT_METRIC_TRUE( expected_fingerprint == ObservedFingerprint() || (expected_fingerprint | - static_cast( - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == + static_cast(UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == ObservedFingerprint()); EXPECT_METRIC_EQ(absl::make_optional(ObservedFingerprint()), caller->observer()->interesting_usage_detected()); @@ -801,9 +725,8 @@ TEST_F(PeerConnectionUsageHistogramTest, NotableUsageOnEventFiring) { caller->CreateDataChannel("foo"); caller->GenerateOfferAndCollectCandidates(); int expected_fingerprint = MakeUsageFingerprint( - {PeerConnection::UsageEvent::DATA_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED}); + {UsageEvent::DATA_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED}); EXPECT_METRIC_EQ(0, webrtc::metrics::NumSamples(kUsagePatternMetric)); caller->GetInternalPeerConnection()->RequestUsagePatternReportForTesting(); EXPECT_METRIC_EQ_WAIT(1, webrtc::metrics::NumSamples(kUsagePatternMetric), @@ -811,8 +734,7 @@ TEST_F(PeerConnectionUsageHistogramTest, NotableUsageOnEventFiring) { EXPECT_METRIC_TRUE( expected_fingerprint == ObservedFingerprint() || (expected_fingerprint | - static_cast( - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == + static_cast(UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == ObservedFingerprint()); EXPECT_METRIC_EQ(absl::make_optional(ObservedFingerprint()), caller->observer()->interesting_usage_detected()); @@ -824,10 +746,8 @@ TEST_F(PeerConnectionUsageHistogramTest, caller->CreateDataChannel("foo"); caller->GenerateOfferAndCollectCandidates(); int expected_fingerprint = MakeUsageFingerprint( - {PeerConnection::UsageEvent::DATA_ADDED, - PeerConnection::UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, - PeerConnection::UsageEvent::CANDIDATE_COLLECTED, - PeerConnection::UsageEvent::CLOSE_CALLED}); + {UsageEvent::DATA_ADDED, UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED, + UsageEvent::CANDIDATE_COLLECTED, UsageEvent::CLOSE_CALLED}); EXPECT_METRIC_EQ(0, webrtc::metrics::NumSamples(kUsagePatternMetric)); caller->pc()->Close(); EXPECT_METRIC_EQ(1, webrtc::metrics::NumSamples(kUsagePatternMetric)); @@ -838,8 +758,7 @@ TEST_F(PeerConnectionUsageHistogramTest, EXPECT_METRIC_TRUE( expected_fingerprint == ObservedFingerprint() || (expected_fingerprint | - static_cast( - PeerConnection::UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == + static_cast(UsageEvent::PRIVATE_CANDIDATE_COLLECTED)) == ObservedFingerprint()); // After close, the usage-detection callback should NOT have been called. EXPECT_METRIC_FALSE(caller->observer()->interesting_usage_detected()); diff --git a/pc/peer_connection_ice_unittest.cc b/pc/peer_connection_ice_unittest.cc index 8502dd427a..7971547ffa 100644 --- a/pc/peer_connection_ice_unittest.cc +++ b/pc/peer_connection_ice_unittest.cc @@ -23,10 +23,10 @@ #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/create_peerconnection_factory.h" -#include "api/peer_connection_proxy.h" #include "api/uma_metrics.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" +#include "pc/peer_connection_proxy.h" #include "pc/test/fake_audio_capture_module.h" #include "pc/test/mock_peer_connection_observers.h" #include "rtc_base/fake_network.h" @@ -497,6 +497,24 @@ TEST_P(PeerConnectionIceTest, DuplicateIceCandidateIgnoredWhenAdded) { EXPECT_EQ(1u, caller->GetIceCandidatesFromRemoteDescription().size()); } +// TODO(tommi): Re-enable after updating RTCPeerConnection-blockedPorts.html in +// Chromium (the test needs setRemoteDescription to succeed for an invalid +// candidate). +TEST_P(PeerConnectionIceTest, DISABLED_ErrorOnInvalidRemoteIceCandidateAdded) { + auto caller = CreatePeerConnectionWithAudioVideo(); + auto callee = CreatePeerConnectionWithAudioVideo(); + ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); + // Add a candidate to the remote description with a candidate that has an + // invalid address (port number == 2). + auto answer = callee->CreateAnswerAndSetAsLocal(); + cricket::Candidate bad_candidate = + CreateLocalUdpCandidate(SocketAddress("2.2.2.2", 2)); + RTC_LOG(LS_INFO) << "Bad candidate: " << bad_candidate.ToString(); + AddCandidateToFirstTransport(&bad_candidate, answer.get()); + // Now the call to SetRemoteDescription should fail. + EXPECT_FALSE(caller->SetRemoteDescription(std::move(answer))); +} + TEST_P(PeerConnectionIceTest, CannotRemoveIceCandidatesWhenPeerConnectionClosed) { const SocketAddress kCalleeAddress("1.1.1.1", 1111); @@ -750,8 +768,8 @@ TEST_P(PeerConnectionIceTest, ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); // Chain an operation that will block AddIceCandidate() from executing. - rtc::scoped_refptr answer_observer( - new rtc::RefCountedObject()); + auto answer_observer = + rtc::make_ref_counted(); callee->pc()->CreateAnswer(answer_observer, RTCOfferAnswerOptions()); auto jsep_candidate = @@ -798,8 +816,8 @@ TEST_P(PeerConnectionIceTest, ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); // Chain an operation that will block AddIceCandidate() from executing. - rtc::scoped_refptr answer_observer( - new rtc::RefCountedObject()); + auto answer_observer = + rtc::make_ref_counted(); callee->pc()->CreateAnswer(answer_observer, RTCOfferAnswerOptions()); auto jsep_candidate = @@ -1041,9 +1059,11 @@ TEST_P(PeerConnectionIceTest, RestartIceCausesNegotiationNeeded) { auto callee = CreatePeerConnectionWithAudioVideo(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); } // In Unified Plan, "onnegotiationneeded" is spec-compliant, including not @@ -1064,14 +1084,17 @@ TEST_F(PeerConnectionIceTestUnifiedPlan, ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); // ICE restart becomes needed while an O/A is pending and |caller| is the // offerer. - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); // In Unified Plan, the event should not fire until we are back in the stable // signaling state. - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionIceTestUnifiedPlan, @@ -1084,14 +1107,17 @@ TEST_F(PeerConnectionIceTestUnifiedPlan, ASSERT_TRUE(caller->SetRemoteDescription(callee->CreateOfferAndSetAsLocal())); // ICE restart becomes needed while an O/A is pending and |caller| is the // answerer. - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); // In Unified Plan, the event should not fire until we are back in the stable // signaling state. - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE( callee->SetRemoteDescription(caller->CreateAnswerAndSetAsLocal())); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionIceTestUnifiedPlan, @@ -1102,14 +1128,16 @@ TEST_F(PeerConnectionIceTestUnifiedPlan, ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); // Local restart. caller->pc()->RestartIce(); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); // Remote restart and O/A exchange with |caller| as the answerer should // restart ICE locally as well. callee->pc()->RestartIce(); ASSERT_TRUE(callee->ExchangeOfferAnswerWith(caller.get())); // Having restarted ICE by the remote offer, we do not need to renegotiate ICE // credentials when back in the stable signaling state. - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionIceTestUnifiedPlan, @@ -1119,10 +1147,13 @@ TEST_F(PeerConnectionIceTestUnifiedPlan, ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); caller->pc()->RestartIce(); - EXPECT_TRUE(caller->observer()->negotiation_needed()); - caller->observer()->clear_negotiation_needed(); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } // In Plan B, "onnegotiationneeded" is not spec-compliant, firing based on if @@ -1140,15 +1171,19 @@ TEST_F(PeerConnectionIceTestPlanB, auto callee = CreatePeerConnectionWithAudioVideo(); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); - EXPECT_TRUE(caller->observer()->negotiation_needed()); - caller->observer()->clear_negotiation_needed(); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); ASSERT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); // In Plan B, the event fired early so we don't expect it to fire now. This is // not spec-compliant but follows the pattern of existing Plan B behavior. - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionIceTestPlanB, @@ -1157,15 +1192,19 @@ TEST_F(PeerConnectionIceTestPlanB, auto callee = CreatePeerConnectionWithAudioVideo(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); - EXPECT_TRUE(caller->observer()->negotiation_needed()); - caller->observer()->clear_negotiation_needed(); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); caller->pc()->RestartIce(); // In Plan B, the event fires every time something changed, even if we have // already fired the event. This is not spec-compliant but follows the same // pattern of existing Plan B behavior. - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); } // The following parameterized test verifies that if an offer is sent with a @@ -1367,6 +1406,36 @@ TEST_F(PeerConnectionIceConfigTest, SetStunCandidateKeepaliveInterval) { EXPECT_EQ(actual_stun_keepalive_interval.value_or(-1), 321); } +TEST_F(PeerConnectionIceConfigTest, SetStableWritableConnectionInterval) { + RTCConfiguration config; + config.stable_writable_connection_ping_interval_ms = 3500; + CreatePeerConnection(config); + EXPECT_TRUE(pc_->SetConfiguration(config).ok()); + EXPECT_EQ(pc_->GetConfiguration().stable_writable_connection_ping_interval_ms, + config.stable_writable_connection_ping_interval_ms); +} + +TEST_F(PeerConnectionIceConfigTest, + SetStableWritableConnectionInterval_FailsValidation) { + RTCConfiguration config; + CreatePeerConnection(config); + ASSERT_TRUE(pc_->SetConfiguration(config).ok()); + config.stable_writable_connection_ping_interval_ms = 5000; + config.ice_check_interval_strong_connectivity = 7500; + EXPECT_FALSE(pc_->SetConfiguration(config).ok()); +} + +TEST_F(PeerConnectionIceConfigTest, + SetStableWritableConnectionInterval_DefaultValue_FailsValidation) { + RTCConfiguration config; + CreatePeerConnection(config); + ASSERT_TRUE(pc_->SetConfiguration(config).ok()); + config.ice_check_interval_strong_connectivity = 2500; + EXPECT_TRUE(pc_->SetConfiguration(config).ok()); + config.ice_check_interval_strong_connectivity = 2501; + EXPECT_FALSE(pc_->SetConfiguration(config).ok()); +} + TEST_P(PeerConnectionIceTest, IceCredentialsCreateOffer) { RTCConfiguration config; config.ice_candidate_pool_size = 1; @@ -1413,4 +1482,24 @@ TEST_P(PeerConnectionIceTest, CloseDoesNotTransitionGatheringStateToComplete) { pc->pc()->ice_gathering_state()); } +TEST_P(PeerConnectionIceTest, PrefersMidOverMLineIndex) { + const SocketAddress kCalleeAddress("1.1.1.1", 1111); + + auto caller = CreatePeerConnectionWithAudioVideo(); + auto callee = CreatePeerConnectionWithAudioVideo(); + + ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); + ASSERT_TRUE( + caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); + + // |candidate.transport_name()| is empty. + cricket::Candidate candidate = CreateLocalUdpCandidate(kCalleeAddress); + auto* audio_content = cricket::GetFirstAudioContent( + caller->pc()->local_description()->description()); + std::unique_ptr ice_candidate = + CreateIceCandidate(audio_content->name, 65535, candidate); + EXPECT_TRUE(caller->pc()->AddIceCandidate(ice_candidate.get())); + EXPECT_TRUE(caller->pc()->RemoveIceCandidates({candidate})); +} + } // namespace webrtc diff --git a/pc/peer_connection_integrationtest.cc b/pc/peer_connection_integrationtest.cc index 2fa4fb6ade..dfceacd777 100644 --- a/pc/peer_connection_integrationtest.cc +++ b/pc/peer_connection_integrationtest.cc @@ -8,3285 +8,994 @@ * be found in the AUTHORS file in the root of the source tree. */ -// Disable for TSan v2, see -// https://code.google.com/p/webrtc/issues/detail?id=1205 for details. -#if !defined(THREAD_SANITIZER) - -#include +#include -#include -#include -#include +#include #include +#include +#include #include #include #include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/async_resolver_factory.h" +#include "api/candidate.h" +#include "api/crypto/crypto_options.h" +#include "api/dtmf_sender_interface.h" +#include "api/ice_transport_interface.h" +#include "api/jsep.h" #include "api/media_stream_interface.h" +#include "api/media_types.h" #include "api/peer_connection_interface.h" -#include "api/peer_connection_proxy.h" -#include "api/rtc_event_log/rtc_event_log_factory.h" +#include "api/rtc_error.h" +#include "api/rtc_event_log/rtc_event.h" +#include "api/rtc_event_log/rtc_event_log.h" +#include "api/rtc_event_log_output.h" +#include "api/rtp_parameters.h" #include "api/rtp_receiver_interface.h" -#include "api/task_queue/default_task_queue_factory.h" -#include "api/test/loopback_media_transport.h" +#include "api/rtp_sender_interface.h" +#include "api/rtp_transceiver_direction.h" +#include "api/rtp_transceiver_interface.h" +#include "api/scoped_refptr.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtc_stats_report.h" +#include "api/stats/rtcstats_objects.h" +#include "api/transport/rtp/rtp_source.h" #include "api/uma_metrics.h" -#include "api/video_codecs/sdp_video_format.h" -#include "call/call.h" +#include "api/units/time_delta.h" +#include "api/video/video_rotation.h" +#include "logging/rtc_event_log/fake_rtc_event_log.h" #include "logging/rtc_event_log/fake_rtc_event_log_factory.h" -#include "media/engine/fake_webrtc_video_engine.h" -#include "media/engine/webrtc_media_engine.h" -#include "media/engine/webrtc_media_engine_defaults.h" -#include "modules/audio_processing/test/audio_processing_builder_for_testing.h" -#include "p2p/base/fake_ice_transport.h" +#include "media/base/codec.h" +#include "media/base/media_constants.h" +#include "media/base/stream_params.h" #include "p2p/base/mock_async_resolver.h" -#include "p2p/base/p2p_constants.h" +#include "p2p/base/port.h" +#include "p2p/base/port_allocator.h" #include "p2p/base/port_interface.h" +#include "p2p/base/stun_server.h" #include "p2p/base/test_stun_server.h" #include "p2p/base/test_turn_customizer.h" #include "p2p/base/test_turn_server.h" -#include "p2p/client/basic_port_allocator.h" -#include "pc/dtmf_sender.h" -#include "pc/local_audio_source.h" +#include "p2p/base/transport_description.h" +#include "p2p/base/transport_info.h" #include "pc/media_session.h" #include "pc/peer_connection.h" #include "pc/peer_connection_factory.h" -#include "pc/rtp_media_utils.h" #include "pc/session_description.h" -#include "pc/test/fake_audio_capture_module.h" -#include "pc/test/fake_periodic_video_track_source.h" -#include "pc/test/fake_rtc_certificate_generator.h" -#include "pc/test/fake_video_track_renderer.h" +#include "pc/test/fake_periodic_video_source.h" +#include "pc/test/integration_test_helpers.h" #include "pc/test/mock_peer_connection_observers.h" #include "rtc_base/fake_clock.h" #include "rtc_base/fake_mdns_responder.h" #include "rtc_base/fake_network.h" #include "rtc_base/firewall_socket_server.h" #include "rtc_base/gunit.h" -#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/helpers.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/ssl_certificate.h" +#include "rtc_base/ssl_fingerprint.h" +#include "rtc_base/ssl_identity.h" +#include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/test_certificate_verifier.h" +#include "rtc_base/thread.h" #include "rtc_base/time_utils.h" #include "rtc_base/virtual_socket_server.h" #include "system_wrappers/include/metrics.h" -#include "test/field_trial.h" -#include "test/gmock.h" namespace webrtc { + namespace { -using ::cricket::ContentInfo; -using ::cricket::StreamParams; -using ::rtc::SocketAddress; -using ::testing::_; -using ::testing::Combine; -using ::testing::Contains; -using ::testing::DoAll; -using ::testing::ElementsAre; -using ::testing::NiceMock; -using ::testing::Return; -using ::testing::SetArgPointee; -using ::testing::UnorderedElementsAreArray; -using ::testing::Values; -using RTCConfiguration = PeerConnectionInterface::RTCConfiguration; - -static const int kDefaultTimeout = 10000; -static const int kMaxWaitForStatsMs = 3000; -static const int kMaxWaitForActivationMs = 5000; -static const int kMaxWaitForFramesMs = 10000; -// Default number of audio/video frames to wait for before considering a test -// successful. -static const int kDefaultExpectedAudioFrameCount = 3; -static const int kDefaultExpectedVideoFrameCount = 3; - -static const char kDataChannelLabel[] = "data_channel"; - -// SRTP cipher name negotiated by the tests. This must be updated if the -// default changes. -static const int kDefaultSrtpCryptoSuite = rtc::SRTP_AES128_CM_SHA1_80; -static const int kDefaultSrtpCryptoSuiteGcm = rtc::SRTP_AEAD_AES_256_GCM; - -static const SocketAddress kDefaultLocalAddress("192.168.1.1", 0); - -// Helper function for constructing offer/answer options to initiate an ICE -// restart. -PeerConnectionInterface::RTCOfferAnswerOptions IceRestartOfferAnswerOptions() { - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.ice_restart = true; - return options; -} +class PeerConnectionIntegrationTest + : public PeerConnectionIntegrationBaseTest, + public ::testing::WithParamInterface { + protected: + PeerConnectionIntegrationTest() + : PeerConnectionIntegrationBaseTest(GetParam()) {} +}; -// Remove all stream information (SSRCs, track IDs, etc.) and "msid-semantic" -// attribute from received SDP, simulating a legacy endpoint. -void RemoveSsrcsAndMsids(cricket::SessionDescription* desc) { - for (ContentInfo& content : desc->contents()) { - content.media_description()->mutable_streams().clear(); +// Fake clock must be set before threads are started to prevent race on +// Set/GetClockForTesting(). +// To achieve that, multiple inheritance is used as a mixin pattern +// where order of construction is finely controlled. +// This also ensures peerconnection is closed before switching back to non-fake +// clock, avoiding other races and DCHECK failures such as in rtp_sender.cc. +class FakeClockForTest : public rtc::ScopedFakeClock { + protected: + FakeClockForTest() { + // Some things use a time of "0" as a special value, so we need to start out + // the fake clock at a nonzero time. + // TODO(deadbeef): Fix this. + AdvanceTime(webrtc::TimeDelta::Seconds(1)); } - desc->set_msid_supported(false); - desc->set_msid_signaling(0); -} -// Removes all stream information besides the stream ids, simulating an -// endpoint that only signals a=msid lines to convey stream_ids. -void RemoveSsrcsAndKeepMsids(cricket::SessionDescription* desc) { - for (ContentInfo& content : desc->contents()) { - std::string track_id; - std::vector stream_ids; - if (!content.media_description()->streams().empty()) { - const StreamParams& first_stream = - content.media_description()->streams()[0]; - track_id = first_stream.id; - stream_ids = first_stream.stream_ids(); - } - content.media_description()->mutable_streams().clear(); - StreamParams new_stream; - new_stream.id = track_id; - new_stream.set_stream_ids(stream_ids); - content.media_description()->AddStream(new_stream); - } -} + // Explicit handle. + ScopedFakeClock& FakeClock() { return *this; } +}; -int FindFirstMediaStatsIndexByKind( - const std::string& kind, - const std::vector& - media_stats_vec) { - for (size_t i = 0; i < media_stats_vec.size(); i++) { - if (media_stats_vec[i]->kind.ValueToString() == kind) { - return i; - } - } - return -1; -} +// Ensure FakeClockForTest is constructed first (see class for rationale). +class PeerConnectionIntegrationTestWithFakeClock + : public FakeClockForTest, + public PeerConnectionIntegrationTest {}; -class SignalingMessageReceiver { - public: - virtual void ReceiveSdpMessage(SdpType type, const std::string& msg) = 0; - virtual void ReceiveIceMessage(const std::string& sdp_mid, - int sdp_mline_index, - const std::string& msg) = 0; +class PeerConnectionIntegrationTestPlanB + : public PeerConnectionIntegrationBaseTest { + protected: + PeerConnectionIntegrationTestPlanB() + : PeerConnectionIntegrationBaseTest(SdpSemantics::kPlanB) {} +}; +class PeerConnectionIntegrationTestUnifiedPlan + : public PeerConnectionIntegrationBaseTest { protected: - SignalingMessageReceiver() {} - virtual ~SignalingMessageReceiver() {} + PeerConnectionIntegrationTestUnifiedPlan() + : PeerConnectionIntegrationBaseTest(SdpSemantics::kUnifiedPlan) {} }; -class MockRtpReceiverObserver : public webrtc::RtpReceiverObserverInterface { +// Test the OnFirstPacketReceived callback from audio/video RtpReceivers. This +// includes testing that the callback is invoked if an observer is connected +// after the first packet has already been received. +TEST_P(PeerConnectionIntegrationTest, + RtpReceiverObserverOnFirstPacketReceived) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + // Start offer/answer exchange and wait for it to complete. + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Should be one receiver each for audio/video. + EXPECT_EQ(2U, caller()->rtp_receiver_observers().size()); + EXPECT_EQ(2U, callee()->rtp_receiver_observers().size()); + // Wait for all "first packet received" callbacks to be fired. + EXPECT_TRUE_WAIT( + absl::c_all_of(caller()->rtp_receiver_observers(), + [](const std::unique_ptr& o) { + return o->first_packet_received(); + }), + kMaxWaitForFramesMs); + EXPECT_TRUE_WAIT( + absl::c_all_of(callee()->rtp_receiver_observers(), + [](const std::unique_ptr& o) { + return o->first_packet_received(); + }), + kMaxWaitForFramesMs); + // If new observers are set after the first packet was already received, the + // callback should still be invoked. + caller()->ResetRtpReceiverObservers(); + callee()->ResetRtpReceiverObservers(); + EXPECT_EQ(2U, caller()->rtp_receiver_observers().size()); + EXPECT_EQ(2U, callee()->rtp_receiver_observers().size()); + EXPECT_TRUE( + absl::c_all_of(caller()->rtp_receiver_observers(), + [](const std::unique_ptr& o) { + return o->first_packet_received(); + })); + EXPECT_TRUE( + absl::c_all_of(callee()->rtp_receiver_observers(), + [](const std::unique_ptr& o) { + return o->first_packet_received(); + })); +} + +class DummyDtmfObserver : public DtmfSenderObserverInterface { public: - explicit MockRtpReceiverObserver(cricket::MediaType media_type) - : expected_media_type_(media_type) {} + DummyDtmfObserver() : completed_(false) {} - void OnFirstPacketReceived(cricket::MediaType media_type) override { - ASSERT_EQ(expected_media_type_, media_type); - first_packet_received_ = true; + // Implements DtmfSenderObserverInterface. + void OnToneChange(const std::string& tone) override { + tones_.push_back(tone); + if (tone.empty()) { + completed_ = true; + } } - bool first_packet_received() const { return first_packet_received_; } - - virtual ~MockRtpReceiverObserver() {} + const std::vector& tones() const { return tones_; } + bool completed() const { return completed_; } private: - bool first_packet_received_ = false; - cricket::MediaType expected_media_type_; + bool completed_; + std::vector tones_; }; -// Helper class that wraps a peer connection, observes it, and can accept -// signaling messages from another wrapper. -// -// Uses a fake network, fake A/V capture, and optionally fake -// encoders/decoders, though they aren't used by default since they don't -// advertise support of any codecs. -// TODO(steveanton): See how this could become a subclass of -// PeerConnectionWrapper defined in peerconnectionwrapper.h. -class PeerConnectionWrapper : public webrtc::PeerConnectionObserver, - public SignalingMessageReceiver { - public: - // Different factory methods for convenience. - // TODO(deadbeef): Could use the pattern of: - // - // PeerConnectionWrapper = - // WrapperBuilder.WithConfig(...).WithOptions(...).build(); - // - // To reduce some code duplication. - static PeerConnectionWrapper* CreateWithDtlsIdentityStore( - const std::string& debug_name, - std::unique_ptr cert_generator, - rtc::Thread* network_thread, - rtc::Thread* worker_thread) { - PeerConnectionWrapper* client(new PeerConnectionWrapper(debug_name)); - webrtc::PeerConnectionDependencies dependencies(nullptr); - dependencies.cert_generator = std::move(cert_generator); - if (!client->Init(nullptr, nullptr, std::move(dependencies), network_thread, - worker_thread, nullptr, - /*media_transport_factory=*/nullptr, - /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false)) { - delete client; - return nullptr; - } - return client; - } - - webrtc::PeerConnectionFactoryInterface* pc_factory() const { - return peer_connection_factory_.get(); - } +// Assumes |sender| already has an audio track added and the offer/answer +// exchange is done. +void TestDtmfFromSenderToReceiver(PeerConnectionIntegrationWrapper* sender, + PeerConnectionIntegrationWrapper* receiver) { + // We should be able to get a DTMF sender from the local sender. + rtc::scoped_refptr dtmf_sender = + sender->pc()->GetSenders().at(0)->GetDtmfSender(); + ASSERT_TRUE(dtmf_sender); + DummyDtmfObserver observer; + dtmf_sender->RegisterObserver(&observer); - webrtc::PeerConnectionInterface* pc() const { return peer_connection_.get(); } + // Test the DtmfSender object just created. + EXPECT_TRUE(dtmf_sender->CanInsertDtmf()); + EXPECT_TRUE(dtmf_sender->InsertDtmf("1a", 100, 50)); - // If a signaling message receiver is set (via ConnectFakeSignaling), this - // will set the whole offer/answer exchange in motion. Just need to wait for - // the signaling state to reach "stable". - void CreateAndSetAndSignalOffer() { - auto offer = CreateOfferAndWait(); - ASSERT_NE(nullptr, offer); - EXPECT_TRUE(SetLocalDescriptionAndSendSdpMessage(std::move(offer))); - } + EXPECT_TRUE_WAIT(observer.completed(), kDefaultTimeout); + std::vector tones = {"1", "a", ""}; + EXPECT_EQ(tones, observer.tones()); + dtmf_sender->UnregisterObserver(); + // TODO(deadbeef): Verify the tones were actually received end-to-end. +} - // Sets the options to be used when CreateAndSetAndSignalOffer is called, or - // when a remote offer is received (via fake signaling) and an answer is - // generated. By default, uses default options. - void SetOfferAnswerOptions( - const PeerConnectionInterface::RTCOfferAnswerOptions& options) { - offer_answer_options_ = options; - } +// Verifies the DtmfSenderObserver callbacks for a DtmfSender (one in each +// direction). +TEST_P(PeerConnectionIntegrationTest, DtmfSenderObserver) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Only need audio for DTMF. + caller()->AddAudioTrack(); + callee()->AddAudioTrack(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // DTLS must finish before the DTMF sender can be used reliably. + ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); + TestDtmfFromSenderToReceiver(caller(), callee()); + TestDtmfFromSenderToReceiver(callee(), caller()); +} - // Set a callback to be invoked when SDP is received via the fake signaling - // channel, which provides an opportunity to munge (modify) the SDP. This is - // used to test SDP being applied that a PeerConnection would normally not - // generate, but a non-JSEP endpoint might. - void SetReceivedSdpMunger( - std::function munger) { - received_sdp_munger_ = std::move(munger); - } +// Basic end-to-end test, verifying media can be encoded/transmitted/decoded +// between two connections, using DTLS-SRTP. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); - // Similar to the above, but this is run on SDP immediately after it's - // generated. - void SetGeneratedSdpMunger( - std::function munger) { - generated_sdp_munger_ = std::move(munger); - } + // Do normal offer/answer and wait for some frames to be received in each + // direction. + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + EXPECT_METRIC_LE( + 2, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", + webrtc::kEnumCounterKeyProtocolDtls)); + EXPECT_METRIC_EQ( + 0, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", + webrtc::kEnumCounterKeyProtocolSdes)); +} - // Set a callback to be invoked when a remote offer is received via the fake - // signaling channel. This provides an opportunity to change the - // PeerConnection state before an answer is created and sent to the caller. - void SetRemoteOfferHandler(std::function handler) { - remote_offer_handler_ = std::move(handler); - } +// Uses SDES instead of DTLS for key agreement. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSdes) { + PeerConnectionInterface::RTCConfiguration sdes_config; + sdes_config.enable_dtls_srtp.emplace(false); + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(sdes_config, sdes_config)); + ConnectFakeSignaling(); - void SetRemoteAsyncResolver(rtc::MockAsyncResolver* resolver) { - remote_async_resolver_ = resolver; - } + // Do normal offer/answer and wait for some frames to be received in each + // direction. + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + EXPECT_METRIC_LE( + 2, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", + webrtc::kEnumCounterKeyProtocolSdes)); + EXPECT_METRIC_EQ( + 0, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", + webrtc::kEnumCounterKeyProtocolDtls)); +} - // Every ICE connection state in order that has been seen by the observer. - std::vector - ice_connection_state_history() const { - return ice_connection_state_history_; - } - void clear_ice_connection_state_history() { - ice_connection_state_history_.clear(); - } +// Basic end-to-end test specifying the |enable_encrypted_rtp_header_extensions| +// option to offer encrypted versions of all header extensions alongside the +// unencrypted versions. +TEST_P(PeerConnectionIntegrationTest, + EndToEndCallWithEncryptedRtpHeaderExtensions) { + CryptoOptions crypto_options; + crypto_options.srtp.enable_encrypted_rtp_header_extensions = true; + PeerConnectionInterface::RTCConfiguration config; + config.crypto_options = crypto_options; + // Note: This allows offering >14 RTP header extensions. + config.offer_extmap_allow_mixed = true; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); + ConnectFakeSignaling(); - // Every standardized ICE connection state in order that has been seen by the - // observer. - std::vector - standardized_ice_connection_state_history() const { - return standardized_ice_connection_state_history_; - } + // Do normal offer/answer and wait for some frames to be received in each + // direction. + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); +} - // Every PeerConnection state in order that has been seen by the observer. - std::vector - peer_connection_state_history() const { - return peer_connection_state_history_; - } +// This test sets up a call between two parties with a source resolution of +// 1280x720 and verifies that a 16:9 aspect ratio is received. +TEST_P(PeerConnectionIntegrationTest, + Send1280By720ResolutionAndReceive16To9AspectRatio) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); - // Every ICE gathering state in order that has been seen by the observer. - std::vector - ice_gathering_state_history() const { - return ice_gathering_state_history_; - } - std::vector - ice_candidate_pair_change_history() const { - return ice_candidate_pair_change_history_; - } + // Add video tracks with 16:9 aspect ratio, size 1280 x 720. + webrtc::FakePeriodicVideoSource::Config config; + config.width = 1280; + config.height = 720; + config.timestamp_offset_ms = rtc::TimeMillis(); + caller()->AddTrack(caller()->CreateLocalVideoTrackWithConfig(config)); + callee()->AddTrack(callee()->CreateLocalVideoTrackWithConfig(config)); - // Every PeerConnection signaling state in order that has been seen by the - // observer. - std::vector - peer_connection_signaling_state_history() const { - return peer_connection_signaling_state_history_; - } + // Do normal offer/answer and wait for at least one frame to be received in + // each direction. + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(caller()->min_video_frames_received_per_track() > 0 && + callee()->min_video_frames_received_per_track() > 0, + kMaxWaitForFramesMs); - void AddAudioVideoTracks() { - AddAudioTrack(); - AddVideoTrack(); - } + // Check rendered aspect ratio. + EXPECT_EQ(16.0 / 9, caller()->local_rendered_aspect_ratio()); + EXPECT_EQ(16.0 / 9, caller()->rendered_aspect_ratio()); + EXPECT_EQ(16.0 / 9, callee()->local_rendered_aspect_ratio()); + EXPECT_EQ(16.0 / 9, callee()->rendered_aspect_ratio()); +} - rtc::scoped_refptr AddAudioTrack() { - return AddTrack(CreateLocalAudioTrack()); - } - - rtc::scoped_refptr AddVideoTrack() { - return AddTrack(CreateLocalVideoTrack()); - } - - rtc::scoped_refptr CreateLocalAudioTrack() { - cricket::AudioOptions options; - // Disable highpass filter so that we can get all the test audio frames. - options.highpass_filter = false; - rtc::scoped_refptr source = - peer_connection_factory_->CreateAudioSource(options); - // TODO(perkj): Test audio source when it is implemented. Currently audio - // always use the default input. - return peer_connection_factory_->CreateAudioTrack(rtc::CreateRandomUuid(), - source); - } - - rtc::scoped_refptr CreateLocalVideoTrack() { - webrtc::FakePeriodicVideoSource::Config config; - config.timestamp_offset_ms = rtc::TimeMillis(); - return CreateLocalVideoTrackInternal(config); - } - - rtc::scoped_refptr - CreateLocalVideoTrackWithConfig( - webrtc::FakePeriodicVideoSource::Config config) { - return CreateLocalVideoTrackInternal(config); - } - - rtc::scoped_refptr - CreateLocalVideoTrackWithRotation(webrtc::VideoRotation rotation) { - webrtc::FakePeriodicVideoSource::Config config; - config.rotation = rotation; - config.timestamp_offset_ms = rtc::TimeMillis(); - return CreateLocalVideoTrackInternal(config); - } - - rtc::scoped_refptr AddTrack( - rtc::scoped_refptr track, - const std::vector& stream_ids = {}) { - auto result = pc()->AddTrack(track, stream_ids); - EXPECT_EQ(RTCErrorType::NONE, result.error().type()); - return result.MoveValue(); - } - - std::vector> GetReceiversOfType( - cricket::MediaType media_type) { - std::vector> receivers; - for (const auto& receiver : pc()->GetReceivers()) { - if (receiver->media_type() == media_type) { - receivers.push_back(receiver); - } - } - return receivers; - } - - rtc::scoped_refptr GetFirstTransceiverOfType( - cricket::MediaType media_type) { - for (auto transceiver : pc()->GetTransceivers()) { - if (transceiver->receiver()->media_type() == media_type) { - return transceiver; - } - } - return nullptr; - } - - bool SignalingStateStable() { - return pc()->signaling_state() == webrtc::PeerConnectionInterface::kStable; - } - - void CreateDataChannel() { CreateDataChannel(nullptr); } - - void CreateDataChannel(const webrtc::DataChannelInit* init) { - CreateDataChannel(kDataChannelLabel, init); - } - - void CreateDataChannel(const std::string& label, - const webrtc::DataChannelInit* init) { - data_channel_ = pc()->CreateDataChannel(label, init); - ASSERT_TRUE(data_channel_.get() != nullptr); - data_observer_.reset(new MockDataChannelObserver(data_channel_)); - } - - DataChannelInterface* data_channel() { return data_channel_; } - const MockDataChannelObserver* data_observer() const { - return data_observer_.get(); - } - - int audio_frames_received() const { - return fake_audio_capture_module_->frames_received(); - } - - // Takes minimum of video frames received for each track. - // - // Can be used like: - // EXPECT_GE(expected_frames, min_video_frames_received_per_track()); - // - // To ensure that all video tracks received at least a certain number of - // frames. - int min_video_frames_received_per_track() const { - int min_frames = INT_MAX; - if (fake_video_renderers_.empty()) { - return 0; - } - - for (const auto& pair : fake_video_renderers_) { - min_frames = std::min(min_frames, pair.second->num_rendered_frames()); - } - return min_frames; - } - - // Returns a MockStatsObserver in a state after stats gathering finished, - // which can be used to access the gathered stats. - rtc::scoped_refptr OldGetStatsForTrack( - webrtc::MediaStreamTrackInterface* track) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); - EXPECT_TRUE(peer_connection_->GetStats( - observer, nullptr, PeerConnectionInterface::kStatsOutputLevelStandard)); - EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); - return observer; - } - - // Version that doesn't take a track "filter", and gathers all stats. - rtc::scoped_refptr OldGetStats() { - return OldGetStatsForTrack(nullptr); - } - - // Synchronously gets stats and returns them. If it times out, fails the test - // and returns null. - rtc::scoped_refptr NewGetStats() { - rtc::scoped_refptr callback( - new rtc::RefCountedObject()); - peer_connection_->GetStats(callback); - EXPECT_TRUE_WAIT(callback->called(), kDefaultTimeout); - return callback->report(); - } - - int rendered_width() { - EXPECT_FALSE(fake_video_renderers_.empty()); - return fake_video_renderers_.empty() - ? 0 - : fake_video_renderers_.begin()->second->width(); - } - - int rendered_height() { - EXPECT_FALSE(fake_video_renderers_.empty()); - return fake_video_renderers_.empty() - ? 0 - : fake_video_renderers_.begin()->second->height(); - } - - double rendered_aspect_ratio() { - if (rendered_height() == 0) { - return 0.0; - } - return static_cast(rendered_width()) / rendered_height(); - } - - webrtc::VideoRotation rendered_rotation() { - EXPECT_FALSE(fake_video_renderers_.empty()); - return fake_video_renderers_.empty() - ? webrtc::kVideoRotation_0 - : fake_video_renderers_.begin()->second->rotation(); - } - - int local_rendered_width() { - return local_video_renderer_ ? local_video_renderer_->width() : 0; - } - - int local_rendered_height() { - return local_video_renderer_ ? local_video_renderer_->height() : 0; - } - - double local_rendered_aspect_ratio() { - if (local_rendered_height() == 0) { - return 0.0; - } - return static_cast(local_rendered_width()) / - local_rendered_height(); - } - - size_t number_of_remote_streams() { - if (!pc()) { - return 0; - } - return pc()->remote_streams()->count(); - } - - StreamCollectionInterface* remote_streams() const { - if (!pc()) { - ADD_FAILURE(); - return nullptr; - } - return pc()->remote_streams(); - } - - StreamCollectionInterface* local_streams() { - if (!pc()) { - ADD_FAILURE(); - return nullptr; - } - return pc()->local_streams(); - } - - webrtc::PeerConnectionInterface::SignalingState signaling_state() { - return pc()->signaling_state(); - } - - webrtc::PeerConnectionInterface::IceConnectionState ice_connection_state() { - return pc()->ice_connection_state(); - } - - webrtc::PeerConnectionInterface::IceConnectionState - standardized_ice_connection_state() { - return pc()->standardized_ice_connection_state(); - } - - webrtc::PeerConnectionInterface::IceGatheringState ice_gathering_state() { - return pc()->ice_gathering_state(); - } - - // Returns a MockRtpReceiverObserver for each RtpReceiver returned by - // GetReceivers. They're updated automatically when a remote offer/answer - // from the fake signaling channel is applied, or when - // ResetRtpReceiverObservers below is called. - const std::vector>& - rtp_receiver_observers() { - return rtp_receiver_observers_; - } - - void ResetRtpReceiverObservers() { - rtp_receiver_observers_.clear(); - for (const rtc::scoped_refptr& receiver : - pc()->GetReceivers()) { - std::unique_ptr observer( - new MockRtpReceiverObserver(receiver->media_type())); - receiver->SetObserver(observer.get()); - rtp_receiver_observers_.push_back(std::move(observer)); - } - } - - rtc::FakeNetworkManager* network_manager() const { - return fake_network_manager_.get(); - } - cricket::PortAllocator* port_allocator() const { return port_allocator_; } - - webrtc::FakeRtcEventLogFactory* event_log_factory() const { - return event_log_factory_; - } - - const cricket::Candidate& last_candidate_gathered() const { - return last_candidate_gathered_; - } - const cricket::IceCandidateErrorEvent& error_event() const { - return error_event_; - } - - // Sets the mDNS responder for the owned fake network manager and keeps a - // reference to the responder. - void SetMdnsResponder( - std::unique_ptr mdns_responder) { - RTC_DCHECK(mdns_responder != nullptr); - mdns_responder_ = mdns_responder.get(); - network_manager()->set_mdns_responder(std::move(mdns_responder)); - } - - // Returns null on failure. - std::unique_ptr CreateOfferAndWait() { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); - pc()->CreateOffer(observer, offer_answer_options_); - return WaitForDescriptionFromObserver(observer); - } - bool Rollback() { - return SetRemoteDescription( - webrtc::CreateSessionDescription(SdpType::kRollback, "")); - } - - private: - explicit PeerConnectionWrapper(const std::string& debug_name) - : debug_name_(debug_name) {} - - bool Init( - const PeerConnectionFactory::Options* options, - const PeerConnectionInterface::RTCConfiguration* config, - webrtc::PeerConnectionDependencies dependencies, - rtc::Thread* network_thread, - rtc::Thread* worker_thread, - std::unique_ptr event_log_factory, - std::unique_ptr media_transport_factory, - bool reset_encoder_factory, - bool reset_decoder_factory) { - // There's an error in this test code if Init ends up being called twice. - RTC_DCHECK(!peer_connection_); - RTC_DCHECK(!peer_connection_factory_); - - fake_network_manager_.reset(new rtc::FakeNetworkManager()); - fake_network_manager_->AddInterface(kDefaultLocalAddress); - - std::unique_ptr port_allocator( - new cricket::BasicPortAllocator(fake_network_manager_.get())); - port_allocator_ = port_allocator.get(); - fake_audio_capture_module_ = FakeAudioCaptureModule::Create(); - if (!fake_audio_capture_module_) { - return false; - } - rtc::Thread* const signaling_thread = rtc::Thread::Current(); - - webrtc::PeerConnectionFactoryDependencies pc_factory_dependencies; - pc_factory_dependencies.network_thread = network_thread; - pc_factory_dependencies.worker_thread = worker_thread; - pc_factory_dependencies.signaling_thread = signaling_thread; - pc_factory_dependencies.task_queue_factory = - webrtc::CreateDefaultTaskQueueFactory(); - cricket::MediaEngineDependencies media_deps; - media_deps.task_queue_factory = - pc_factory_dependencies.task_queue_factory.get(); - media_deps.adm = fake_audio_capture_module_; - webrtc::SetMediaEngineDefaults(&media_deps); - - if (reset_encoder_factory) { - media_deps.video_encoder_factory.reset(); - } - if (reset_decoder_factory) { - media_deps.video_decoder_factory.reset(); - } - - if (!media_deps.audio_processing) { - // If the standard Creation method for APM returns a null pointer, instead - // use the builder for testing to create an APM object. - media_deps.audio_processing = AudioProcessingBuilderForTesting().Create(); - } - - pc_factory_dependencies.media_engine = - cricket::CreateMediaEngine(std::move(media_deps)); - pc_factory_dependencies.call_factory = webrtc::CreateCallFactory(); - if (event_log_factory) { - event_log_factory_ = event_log_factory.get(); - pc_factory_dependencies.event_log_factory = std::move(event_log_factory); - } else { - pc_factory_dependencies.event_log_factory = - std::make_unique( - pc_factory_dependencies.task_queue_factory.get()); - } - if (media_transport_factory) { - pc_factory_dependencies.media_transport_factory = - std::move(media_transport_factory); - } - peer_connection_factory_ = webrtc::CreateModularPeerConnectionFactory( - std::move(pc_factory_dependencies)); - - if (!peer_connection_factory_) { - return false; - } - if (options) { - peer_connection_factory_->SetOptions(*options); - } - if (config) { - sdp_semantics_ = config->sdp_semantics; - } - - dependencies.allocator = std::move(port_allocator); - peer_connection_ = CreatePeerConnection(config, std::move(dependencies)); - return peer_connection_.get() != nullptr; - } - - rtc::scoped_refptr CreatePeerConnection( - const PeerConnectionInterface::RTCConfiguration* config, - webrtc::PeerConnectionDependencies dependencies) { - PeerConnectionInterface::RTCConfiguration modified_config; - // If |config| is null, this will result in a default configuration being - // used. - if (config) { - modified_config = *config; - } - // Disable resolution adaptation; we don't want it interfering with the - // test results. - // TODO(deadbeef): Do something more robust. Since we're testing for aspect - // ratios and not specific resolutions, is this even necessary? - modified_config.set_cpu_adaptation(false); - - dependencies.observer = this; - return peer_connection_factory_->CreatePeerConnection( - modified_config, std::move(dependencies)); - } - - void set_signaling_message_receiver( - SignalingMessageReceiver* signaling_message_receiver) { - signaling_message_receiver_ = signaling_message_receiver; - } - - void set_signaling_delay_ms(int delay_ms) { signaling_delay_ms_ = delay_ms; } - - void set_signal_ice_candidates(bool signal) { - signal_ice_candidates_ = signal; - } - - rtc::scoped_refptr CreateLocalVideoTrackInternal( - webrtc::FakePeriodicVideoSource::Config config) { - // Set max frame rate to 10fps to reduce the risk of test flakiness. - // TODO(deadbeef): Do something more robust. - config.frame_interval_ms = 100; - - video_track_sources_.emplace_back( - new rtc::RefCountedObject( - config, false /* remote */)); - rtc::scoped_refptr track( - peer_connection_factory_->CreateVideoTrack( - rtc::CreateRandomUuid(), video_track_sources_.back())); - if (!local_video_renderer_) { - local_video_renderer_.reset(new webrtc::FakeVideoTrackRenderer(track)); - } - return track; - } - - void HandleIncomingOffer(const std::string& msg) { - RTC_LOG(LS_INFO) << debug_name_ << ": HandleIncomingOffer"; - std::unique_ptr desc = - webrtc::CreateSessionDescription(SdpType::kOffer, msg); - if (received_sdp_munger_) { - received_sdp_munger_(desc->description()); - } - - EXPECT_TRUE(SetRemoteDescription(std::move(desc))); - // Setting a remote description may have changed the number of receivers, - // so reset the receiver observers. - ResetRtpReceiverObservers(); - if (remote_offer_handler_) { - remote_offer_handler_(); - } - auto answer = CreateAnswer(); - ASSERT_NE(nullptr, answer); - EXPECT_TRUE(SetLocalDescriptionAndSendSdpMessage(std::move(answer))); - } - - void HandleIncomingAnswer(const std::string& msg) { - RTC_LOG(LS_INFO) << debug_name_ << ": HandleIncomingAnswer"; - std::unique_ptr desc = - webrtc::CreateSessionDescription(SdpType::kAnswer, msg); - if (received_sdp_munger_) { - received_sdp_munger_(desc->description()); - } - - EXPECT_TRUE(SetRemoteDescription(std::move(desc))); - // Set the RtpReceiverObserver after receivers are created. - ResetRtpReceiverObservers(); - } - - // Returns null on failure. - std::unique_ptr CreateAnswer() { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); - pc()->CreateAnswer(observer, offer_answer_options_); - return WaitForDescriptionFromObserver(observer); - } - - std::unique_ptr WaitForDescriptionFromObserver( - MockCreateSessionDescriptionObserver* observer) { - EXPECT_EQ_WAIT(true, observer->called(), kDefaultTimeout); - if (!observer->result()) { - return nullptr; - } - auto description = observer->MoveDescription(); - if (generated_sdp_munger_) { - generated_sdp_munger_(description->description()); - } - return description; - } - - // Setting the local description and sending the SDP message over the fake - // signaling channel are combined into the same method because the SDP - // message needs to be sent as soon as SetLocalDescription finishes, without - // waiting for the observer to be called. This ensures that ICE candidates - // don't outrace the description. - bool SetLocalDescriptionAndSendSdpMessage( - std::unique_ptr desc) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); - RTC_LOG(LS_INFO) << debug_name_ << ": SetLocalDescriptionAndSendSdpMessage"; - SdpType type = desc->GetType(); - std::string sdp; - EXPECT_TRUE(desc->ToString(&sdp)); - RTC_LOG(LS_INFO) << debug_name_ << ": local SDP contents=\n" << sdp; - pc()->SetLocalDescription(observer, desc.release()); - if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) { - RemoveUnusedVideoRenderers(); - } - // As mentioned above, we need to send the message immediately after - // SetLocalDescription. - SendSdpMessage(type, sdp); - EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); - return true; - } - - bool SetRemoteDescription(std::unique_ptr desc) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); - RTC_LOG(LS_INFO) << debug_name_ << ": SetRemoteDescription"; - pc()->SetRemoteDescription(observer, desc.release()); - if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) { - RemoveUnusedVideoRenderers(); - } - EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); - return observer->result(); - } - - // This is a work around to remove unused fake_video_renderers from - // transceivers that have either stopped or are no longer receiving. - void RemoveUnusedVideoRenderers() { - auto transceivers = pc()->GetTransceivers(); - for (auto& transceiver : transceivers) { - if (transceiver->receiver()->media_type() != cricket::MEDIA_TYPE_VIDEO) { - continue; - } - // Remove fake video renderers from any stopped transceivers. - if (transceiver->stopped()) { - auto it = - fake_video_renderers_.find(transceiver->receiver()->track()->id()); - if (it != fake_video_renderers_.end()) { - fake_video_renderers_.erase(it); - } - } - // Remove fake video renderers from any transceivers that are no longer - // receiving. - if ((transceiver->current_direction() && - !webrtc::RtpTransceiverDirectionHasRecv( - *transceiver->current_direction()))) { - auto it = - fake_video_renderers_.find(transceiver->receiver()->track()->id()); - if (it != fake_video_renderers_.end()) { - fake_video_renderers_.erase(it); - } - } - } - } - - // Simulate sending a blob of SDP with delay |signaling_delay_ms_| (0 by - // default). - void SendSdpMessage(SdpType type, const std::string& msg) { - if (signaling_delay_ms_ == 0) { - RelaySdpMessageIfReceiverExists(type, msg); - } else { - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, rtc::Thread::Current(), - rtc::Bind(&PeerConnectionWrapper::RelaySdpMessageIfReceiverExists, - this, type, msg), - signaling_delay_ms_); - } - } - - void RelaySdpMessageIfReceiverExists(SdpType type, const std::string& msg) { - if (signaling_message_receiver_) { - signaling_message_receiver_->ReceiveSdpMessage(type, msg); - } - } - - // Simulate trickling an ICE candidate with delay |signaling_delay_ms_| (0 by - // default). - void SendIceMessage(const std::string& sdp_mid, - int sdp_mline_index, - const std::string& msg) { - if (signaling_delay_ms_ == 0) { - RelayIceMessageIfReceiverExists(sdp_mid, sdp_mline_index, msg); - } else { - invoker_.AsyncInvokeDelayed( - RTC_FROM_HERE, rtc::Thread::Current(), - rtc::Bind(&PeerConnectionWrapper::RelayIceMessageIfReceiverExists, - this, sdp_mid, sdp_mline_index, msg), - signaling_delay_ms_); - } - } - - void RelayIceMessageIfReceiverExists(const std::string& sdp_mid, - int sdp_mline_index, - const std::string& msg) { - if (signaling_message_receiver_) { - signaling_message_receiver_->ReceiveIceMessage(sdp_mid, sdp_mline_index, - msg); - } - } - - // SignalingMessageReceiver callbacks. - void ReceiveSdpMessage(SdpType type, const std::string& msg) override { - if (type == SdpType::kOffer) { - HandleIncomingOffer(msg); - } else { - HandleIncomingAnswer(msg); - } - } - - void ReceiveIceMessage(const std::string& sdp_mid, - int sdp_mline_index, - const std::string& msg) override { - RTC_LOG(LS_INFO) << debug_name_ << ": ReceiveIceMessage"; - std::unique_ptr candidate( - webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, msg, nullptr)); - EXPECT_TRUE(pc()->AddIceCandidate(candidate.get())); - } - - // PeerConnectionObserver callbacks. - void OnSignalingChange( - webrtc::PeerConnectionInterface::SignalingState new_state) override { - EXPECT_EQ(pc()->signaling_state(), new_state); - peer_connection_signaling_state_history_.push_back(new_state); - } - void OnAddTrack(rtc::scoped_refptr receiver, - const std::vector>& - streams) override { - if (receiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { - rtc::scoped_refptr video_track( - static_cast(receiver->track().get())); - ASSERT_TRUE(fake_video_renderers_.find(video_track->id()) == - fake_video_renderers_.end()); - fake_video_renderers_[video_track->id()] = - std::make_unique(video_track); - } - } - void OnRemoveTrack( - rtc::scoped_refptr receiver) override { - if (receiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { - auto it = fake_video_renderers_.find(receiver->track()->id()); - RTC_DCHECK(it != fake_video_renderers_.end()); - fake_video_renderers_.erase(it); - } - } - void OnRenegotiationNeeded() override {} - void OnIceConnectionChange( - webrtc::PeerConnectionInterface::IceConnectionState new_state) override { - EXPECT_EQ(pc()->ice_connection_state(), new_state); - ice_connection_state_history_.push_back(new_state); - } - void OnStandardizedIceConnectionChange( - webrtc::PeerConnectionInterface::IceConnectionState new_state) override { - standardized_ice_connection_state_history_.push_back(new_state); - } - void OnConnectionChange( - webrtc::PeerConnectionInterface::PeerConnectionState new_state) override { - peer_connection_state_history_.push_back(new_state); - } - - void OnIceGatheringChange( - webrtc::PeerConnectionInterface::IceGatheringState new_state) override { - EXPECT_EQ(pc()->ice_gathering_state(), new_state); - ice_gathering_state_history_.push_back(new_state); - } - - void OnIceSelectedCandidatePairChanged( - const cricket::CandidatePairChangeEvent& event) { - ice_candidate_pair_change_history_.push_back(event); - } - - void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) override { - RTC_LOG(LS_INFO) << debug_name_ << ": OnIceCandidate"; - - if (remote_async_resolver_) { - const auto& local_candidate = candidate->candidate(); - if (local_candidate.address().IsUnresolvedIP()) { - RTC_DCHECK(local_candidate.type() == cricket::LOCAL_PORT_TYPE); - rtc::SocketAddress resolved_addr(local_candidate.address()); - const auto resolved_ip = mdns_responder_->GetMappedAddressForName( - local_candidate.address().hostname()); - RTC_DCHECK(!resolved_ip.IsNil()); - resolved_addr.SetResolvedIP(resolved_ip); - EXPECT_CALL(*remote_async_resolver_, GetResolvedAddress(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(resolved_addr), Return(true))); - EXPECT_CALL(*remote_async_resolver_, Destroy(_)); - } - } - - std::string ice_sdp; - EXPECT_TRUE(candidate->ToString(&ice_sdp)); - if (signaling_message_receiver_ == nullptr || !signal_ice_candidates_) { - // Remote party may be deleted. - return; - } - SendIceMessage(candidate->sdp_mid(), candidate->sdp_mline_index(), ice_sdp); - last_candidate_gathered_ = candidate->candidate(); - } - void OnIceCandidateError(const std::string& address, - int port, - const std::string& url, - int error_code, - const std::string& error_text) override { - error_event_ = cricket::IceCandidateErrorEvent(address, port, url, - error_code, error_text); - } - void OnDataChannel( - rtc::scoped_refptr data_channel) override { - RTC_LOG(LS_INFO) << debug_name_ << ": OnDataChannel"; - data_channel_ = data_channel; - data_observer_.reset(new MockDataChannelObserver(data_channel)); - } - - std::string debug_name_; - - std::unique_ptr fake_network_manager_; - // Reference to the mDNS responder owned by |fake_network_manager_| after set. - webrtc::FakeMdnsResponder* mdns_responder_ = nullptr; - - rtc::scoped_refptr peer_connection_; - rtc::scoped_refptr - peer_connection_factory_; - - cricket::PortAllocator* port_allocator_; - // Needed to keep track of number of frames sent. - rtc::scoped_refptr fake_audio_capture_module_; - // Needed to keep track of number of frames received. - std::map> - fake_video_renderers_; - // Needed to ensure frames aren't received for removed tracks. - std::vector> - removed_fake_video_renderers_; - - // For remote peer communication. - SignalingMessageReceiver* signaling_message_receiver_ = nullptr; - int signaling_delay_ms_ = 0; - bool signal_ice_candidates_ = true; - cricket::Candidate last_candidate_gathered_; - cricket::IceCandidateErrorEvent error_event_; - - // Store references to the video sources we've created, so that we can stop - // them, if required. - std::vector> - video_track_sources_; - // |local_video_renderer_| attached to the first created local video track. - std::unique_ptr local_video_renderer_; - - SdpSemantics sdp_semantics_; - PeerConnectionInterface::RTCOfferAnswerOptions offer_answer_options_; - std::function received_sdp_munger_; - std::function generated_sdp_munger_; - std::function remote_offer_handler_; - rtc::MockAsyncResolver* remote_async_resolver_ = nullptr; - rtc::scoped_refptr data_channel_; - std::unique_ptr data_observer_; - - std::vector> rtp_receiver_observers_; - - std::vector - ice_connection_state_history_; - std::vector - standardized_ice_connection_state_history_; - std::vector - peer_connection_state_history_; - std::vector - ice_gathering_state_history_; - std::vector - ice_candidate_pair_change_history_; - std::vector - peer_connection_signaling_state_history_; - webrtc::FakeRtcEventLogFactory* event_log_factory_; - - rtc::AsyncInvoker invoker_; - - friend class PeerConnectionIntegrationBaseTest; -}; - -class MockRtcEventLogOutput : public webrtc::RtcEventLogOutput { - public: - virtual ~MockRtcEventLogOutput() = default; - MOCK_METHOD(bool, IsActive, (), (const, override)); - MOCK_METHOD(bool, Write, (const std::string&), (override)); -}; - -// This helper object is used for both specifying how many audio/video frames -// are expected to be received for a caller/callee. It provides helper functions -// to specify these expectations. The object initially starts in a state of no -// expectations. -class MediaExpectations { - public: - enum ExpectFrames { - kExpectSomeFrames, - kExpectNoFrames, - kNoExpectation, - }; - - void ExpectBidirectionalAudioAndVideo() { - ExpectBidirectionalAudio(); - ExpectBidirectionalVideo(); - } - - void ExpectBidirectionalAudio() { - CallerExpectsSomeAudio(); - CalleeExpectsSomeAudio(); - } - - void ExpectNoAudio() { - CallerExpectsNoAudio(); - CalleeExpectsNoAudio(); - } - - void ExpectBidirectionalVideo() { - CallerExpectsSomeVideo(); - CalleeExpectsSomeVideo(); - } - - void ExpectNoVideo() { - CallerExpectsNoVideo(); - CalleeExpectsNoVideo(); - } - - void CallerExpectsSomeAudioAndVideo() { - CallerExpectsSomeAudio(); - CallerExpectsSomeVideo(); - } - - void CalleeExpectsSomeAudioAndVideo() { - CalleeExpectsSomeAudio(); - CalleeExpectsSomeVideo(); - } - - // Caller's audio functions. - void CallerExpectsSomeAudio( - int expected_audio_frames = kDefaultExpectedAudioFrameCount) { - caller_audio_expectation_ = kExpectSomeFrames; - caller_audio_frames_expected_ = expected_audio_frames; - } - - void CallerExpectsNoAudio() { - caller_audio_expectation_ = kExpectNoFrames; - caller_audio_frames_expected_ = 0; - } - - // Caller's video functions. - void CallerExpectsSomeVideo( - int expected_video_frames = kDefaultExpectedVideoFrameCount) { - caller_video_expectation_ = kExpectSomeFrames; - caller_video_frames_expected_ = expected_video_frames; - } - - void CallerExpectsNoVideo() { - caller_video_expectation_ = kExpectNoFrames; - caller_video_frames_expected_ = 0; - } - - // Callee's audio functions. - void CalleeExpectsSomeAudio( - int expected_audio_frames = kDefaultExpectedAudioFrameCount) { - callee_audio_expectation_ = kExpectSomeFrames; - callee_audio_frames_expected_ = expected_audio_frames; - } - - void CalleeExpectsNoAudio() { - callee_audio_expectation_ = kExpectNoFrames; - callee_audio_frames_expected_ = 0; - } - - // Callee's video functions. - void CalleeExpectsSomeVideo( - int expected_video_frames = kDefaultExpectedVideoFrameCount) { - callee_video_expectation_ = kExpectSomeFrames; - callee_video_frames_expected_ = expected_video_frames; - } - - void CalleeExpectsNoVideo() { - callee_video_expectation_ = kExpectNoFrames; - callee_video_frames_expected_ = 0; - } - - ExpectFrames caller_audio_expectation_ = kNoExpectation; - ExpectFrames caller_video_expectation_ = kNoExpectation; - ExpectFrames callee_audio_expectation_ = kNoExpectation; - ExpectFrames callee_video_expectation_ = kNoExpectation; - int caller_audio_frames_expected_ = 0; - int caller_video_frames_expected_ = 0; - int callee_audio_frames_expected_ = 0; - int callee_video_frames_expected_ = 0; -}; - -class MockIceTransport : public webrtc::IceTransportInterface { - public: - MockIceTransport(const std::string& name, int component) - : internal_(std::make_unique( - name, - component, - nullptr /* network_thread */)) {} - ~MockIceTransport() = default; - cricket::IceTransportInternal* internal() { return internal_.get(); } - - private: - std::unique_ptr internal_; -}; - -class MockIceTransportFactory : public IceTransportFactory { - public: - ~MockIceTransportFactory() override = default; - rtc::scoped_refptr CreateIceTransport( - const std::string& transport_name, - int component, - IceTransportInit init) { - RecordIceTransportCreated(); - return new rtc::RefCountedObject(transport_name, - component); - } - MOCK_METHOD(void, RecordIceTransportCreated, ()); -}; - -// Tests two PeerConnections connecting to each other end-to-end, using a -// virtual network, fake A/V capture and fake encoder/decoders. The -// PeerConnections share the threads/socket servers, but use separate versions -// of everything else (including "PeerConnectionFactory"s). -class PeerConnectionIntegrationBaseTest : public ::testing::Test { - public: - explicit PeerConnectionIntegrationBaseTest(SdpSemantics sdp_semantics) - : sdp_semantics_(sdp_semantics), - ss_(new rtc::VirtualSocketServer()), - fss_(new rtc::FirewallSocketServer(ss_.get())), - network_thread_(new rtc::Thread(fss_.get())), - worker_thread_(rtc::Thread::Create()), - loopback_media_transports_(network_thread_.get()) { - network_thread_->SetName("PCNetworkThread", this); - worker_thread_->SetName("PCWorkerThread", this); - RTC_CHECK(network_thread_->Start()); - RTC_CHECK(worker_thread_->Start()); - webrtc::metrics::Reset(); - } - - ~PeerConnectionIntegrationBaseTest() { - // The PeerConnections should deleted before the TurnCustomizers. - // A TurnPort is created with a raw pointer to a TurnCustomizer. The - // TurnPort has the same lifetime as the PeerConnection, so it's expected - // that the TurnCustomizer outlives the life of the PeerConnection or else - // when Send() is called it will hit a seg fault. - if (caller_) { - caller_->set_signaling_message_receiver(nullptr); - delete SetCallerPcWrapperAndReturnCurrent(nullptr); - } - if (callee_) { - callee_->set_signaling_message_receiver(nullptr); - delete SetCalleePcWrapperAndReturnCurrent(nullptr); - } - - // If turn servers were created for the test they need to be destroyed on - // the network thread. - network_thread()->Invoke(RTC_FROM_HERE, [this] { - turn_servers_.clear(); - turn_customizers_.clear(); - }); - } - - bool SignalingStateStable() { - return caller_->SignalingStateStable() && callee_->SignalingStateStable(); - } - - bool DtlsConnected() { - // TODO(deadbeef): kIceConnectionConnected currently means both ICE and DTLS - // are connected. This is an important distinction. Once we have separate - // ICE and DTLS state, this check needs to use the DTLS state. - return (callee()->ice_connection_state() == - webrtc::PeerConnectionInterface::kIceConnectionConnected || - callee()->ice_connection_state() == - webrtc::PeerConnectionInterface::kIceConnectionCompleted) && - (caller()->ice_connection_state() == - webrtc::PeerConnectionInterface::kIceConnectionConnected || - caller()->ice_connection_state() == - webrtc::PeerConnectionInterface::kIceConnectionCompleted); - } - - // When |event_log_factory| is null, the default implementation of the event - // log factory will be used. - std::unique_ptr CreatePeerConnectionWrapper( - const std::string& debug_name, - const PeerConnectionFactory::Options* options, - const RTCConfiguration* config, - webrtc::PeerConnectionDependencies dependencies, - std::unique_ptr event_log_factory, - std::unique_ptr media_transport_factory, - bool reset_encoder_factory, - bool reset_decoder_factory) { - RTCConfiguration modified_config; - if (config) { - modified_config = *config; - } - modified_config.sdp_semantics = sdp_semantics_; - if (!dependencies.cert_generator) { - dependencies.cert_generator = - std::make_unique(); - } - std::unique_ptr client( - new PeerConnectionWrapper(debug_name)); - - if (!client->Init(options, &modified_config, std::move(dependencies), - network_thread_.get(), worker_thread_.get(), - std::move(event_log_factory), - std::move(media_transport_factory), reset_encoder_factory, - reset_decoder_factory)) { - return nullptr; - } - return client; - } - - std::unique_ptr - CreatePeerConnectionWrapperWithFakeRtcEventLog( - const std::string& debug_name, - const PeerConnectionFactory::Options* options, - const RTCConfiguration* config, - webrtc::PeerConnectionDependencies dependencies) { - std::unique_ptr event_log_factory( - new webrtc::FakeRtcEventLogFactory(rtc::Thread::Current())); - return CreatePeerConnectionWrapper( - debug_name, options, config, std::move(dependencies), - std::move(event_log_factory), - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - } - - bool CreatePeerConnectionWrappers() { - return CreatePeerConnectionWrappersWithConfig( - PeerConnectionInterface::RTCConfiguration(), - PeerConnectionInterface::RTCConfiguration()); - } - - bool CreatePeerConnectionWrappersWithSdpSemantics( - SdpSemantics caller_semantics, - SdpSemantics callee_semantics) { - // Can't specify the sdp_semantics in the passed-in configuration since it - // will be overwritten by CreatePeerConnectionWrapper with whatever is - // stored in sdp_semantics_. So get around this by modifying the instance - // variable before calling CreatePeerConnectionWrapper for the caller and - // callee PeerConnections. - SdpSemantics original_semantics = sdp_semantics_; - sdp_semantics_ = caller_semantics; - caller_ = CreatePeerConnectionWrapper( - "Caller", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), - nullptr, /*media_transport_factory=*/nullptr, - /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - sdp_semantics_ = callee_semantics; - callee_ = CreatePeerConnectionWrapper( - "Callee", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), - nullptr, /*media_transport_factory=*/nullptr, - /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - sdp_semantics_ = original_semantics; - return caller_ && callee_; - } - - bool CreatePeerConnectionWrappersWithConfig( - const PeerConnectionInterface::RTCConfiguration& caller_config, - const PeerConnectionInterface::RTCConfiguration& callee_config) { - caller_ = CreatePeerConnectionWrapper( - "Caller", nullptr, &caller_config, - webrtc::PeerConnectionDependencies(nullptr), nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - callee_ = CreatePeerConnectionWrapper( - "Callee", nullptr, &callee_config, - webrtc::PeerConnectionDependencies(nullptr), nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - return caller_ && callee_; - } - - bool CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - const PeerConnectionInterface::RTCConfiguration& caller_config, - const PeerConnectionInterface::RTCConfiguration& callee_config, - std::unique_ptr caller_factory, - std::unique_ptr callee_factory) { - caller_ = CreatePeerConnectionWrapper( - "Caller", nullptr, &caller_config, - webrtc::PeerConnectionDependencies(nullptr), nullptr, - std::move(caller_factory), /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - callee_ = CreatePeerConnectionWrapper( - "Callee", nullptr, &callee_config, - webrtc::PeerConnectionDependencies(nullptr), nullptr, - std::move(callee_factory), /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - return caller_ && callee_; - } - - bool CreatePeerConnectionWrappersWithConfigAndDeps( - const PeerConnectionInterface::RTCConfiguration& caller_config, - webrtc::PeerConnectionDependencies caller_dependencies, - const PeerConnectionInterface::RTCConfiguration& callee_config, - webrtc::PeerConnectionDependencies callee_dependencies) { - caller_ = CreatePeerConnectionWrapper( - "Caller", nullptr, &caller_config, std::move(caller_dependencies), - nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - callee_ = CreatePeerConnectionWrapper( - "Callee", nullptr, &callee_config, std::move(callee_dependencies), - nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - return caller_ && callee_; - } - - bool CreatePeerConnectionWrappersWithOptions( - const PeerConnectionFactory::Options& caller_options, - const PeerConnectionFactory::Options& callee_options) { - caller_ = CreatePeerConnectionWrapper( - "Caller", &caller_options, nullptr, - webrtc::PeerConnectionDependencies(nullptr), nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - callee_ = CreatePeerConnectionWrapper( - "Callee", &callee_options, nullptr, - webrtc::PeerConnectionDependencies(nullptr), nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - return caller_ && callee_; - } - - bool CreatePeerConnectionWrappersWithFakeRtcEventLog() { - PeerConnectionInterface::RTCConfiguration default_config; - caller_ = CreatePeerConnectionWrapperWithFakeRtcEventLog( - "Caller", nullptr, &default_config, - webrtc::PeerConnectionDependencies(nullptr)); - callee_ = CreatePeerConnectionWrapperWithFakeRtcEventLog( - "Callee", nullptr, &default_config, - webrtc::PeerConnectionDependencies(nullptr)); - return caller_ && callee_; - } - - std::unique_ptr - CreatePeerConnectionWrapperWithAlternateKey() { - std::unique_ptr cert_generator( - new FakeRTCCertificateGenerator()); - cert_generator->use_alternate_key(); - - webrtc::PeerConnectionDependencies dependencies(nullptr); - dependencies.cert_generator = std::move(cert_generator); - return CreatePeerConnectionWrapper( - "New Peer", nullptr, nullptr, std::move(dependencies), nullptr, - /*media_transport_factory=*/nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); - } - - bool CreateOneDirectionalPeerConnectionWrappers(bool caller_to_callee) { - caller_ = CreatePeerConnectionWrapper( - "Caller", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), - nullptr, /*media_transport_factory=*/nullptr, - /*reset_encoder_factory=*/!caller_to_callee, - /*reset_decoder_factory=*/caller_to_callee); - callee_ = CreatePeerConnectionWrapper( - "Callee", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), - nullptr, /*media_transport_factory=*/nullptr, - /*reset_encoder_factory=*/caller_to_callee, - /*reset_decoder_factory=*/!caller_to_callee); - return caller_ && callee_; - } - - cricket::TestTurnServer* CreateTurnServer( - rtc::SocketAddress internal_address, - rtc::SocketAddress external_address, - cricket::ProtocolType type = cricket::ProtocolType::PROTO_UDP, - const std::string& common_name = "test turn server") { - rtc::Thread* thread = network_thread(); - std::unique_ptr turn_server = - network_thread()->Invoke>( - RTC_FROM_HERE, - [thread, internal_address, external_address, type, common_name] { - return std::make_unique( - thread, internal_address, external_address, type, - /*ignore_bad_certs=*/true, common_name); - }); - turn_servers_.push_back(std::move(turn_server)); - // Interactions with the turn server should be done on the network thread. - return turn_servers_.back().get(); - } - - cricket::TestTurnCustomizer* CreateTurnCustomizer() { - std::unique_ptr turn_customizer = - network_thread()->Invoke>( - RTC_FROM_HERE, - [] { return std::make_unique(); }); - turn_customizers_.push_back(std::move(turn_customizer)); - // Interactions with the turn customizer should be done on the network - // thread. - return turn_customizers_.back().get(); - } - - // Checks that the function counters for a TestTurnCustomizer are greater than - // 0. - void ExpectTurnCustomizerCountersIncremented( - cricket::TestTurnCustomizer* turn_customizer) { - unsigned int allow_channel_data_counter = - network_thread()->Invoke( - RTC_FROM_HERE, [turn_customizer] { - return turn_customizer->allow_channel_data_cnt_; - }); - EXPECT_GT(allow_channel_data_counter, 0u); - unsigned int modify_counter = network_thread()->Invoke( - RTC_FROM_HERE, - [turn_customizer] { return turn_customizer->modify_cnt_; }); - EXPECT_GT(modify_counter, 0u); - } - - // Once called, SDP blobs and ICE candidates will be automatically signaled - // between PeerConnections. - void ConnectFakeSignaling() { - caller_->set_signaling_message_receiver(callee_.get()); - callee_->set_signaling_message_receiver(caller_.get()); - } - - // Once called, SDP blobs will be automatically signaled between - // PeerConnections. Note that ICE candidates will not be signaled unless they - // are in the exchanged SDP blobs. - void ConnectFakeSignalingForSdpOnly() { - ConnectFakeSignaling(); - SetSignalIceCandidates(false); - } - - void SetSignalingDelayMs(int delay_ms) { - caller_->set_signaling_delay_ms(delay_ms); - callee_->set_signaling_delay_ms(delay_ms); - } - - void SetSignalIceCandidates(bool signal) { - caller_->set_signal_ice_candidates(signal); - callee_->set_signal_ice_candidates(signal); - } - - // Messages may get lost on the unreliable DataChannel, so we send multiple - // times to avoid test flakiness. - void SendRtpDataWithRetries(webrtc::DataChannelInterface* dc, - const std::string& data, - int retries) { - for (int i = 0; i < retries; ++i) { - dc->Send(DataBuffer(data)); - } - } - - rtc::Thread* network_thread() { return network_thread_.get(); } - - rtc::VirtualSocketServer* virtual_socket_server() { return ss_.get(); } - - webrtc::MediaTransportPair* loopback_media_transports() { - return &loopback_media_transports_; - } - - PeerConnectionWrapper* caller() { return caller_.get(); } - - // Set the |caller_| to the |wrapper| passed in and return the - // original |caller_|. - PeerConnectionWrapper* SetCallerPcWrapperAndReturnCurrent( - PeerConnectionWrapper* wrapper) { - PeerConnectionWrapper* old = caller_.release(); - caller_.reset(wrapper); - return old; - } - - PeerConnectionWrapper* callee() { return callee_.get(); } - - // Set the |callee_| to the |wrapper| passed in and return the - // original |callee_|. - PeerConnectionWrapper* SetCalleePcWrapperAndReturnCurrent( - PeerConnectionWrapper* wrapper) { - PeerConnectionWrapper* old = callee_.release(); - callee_.reset(wrapper); - return old; - } - - void SetPortAllocatorFlags(uint32_t caller_flags, uint32_t callee_flags) { - network_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&cricket::PortAllocator::set_flags, - caller()->port_allocator(), caller_flags)); - network_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&cricket::PortAllocator::set_flags, - callee()->port_allocator(), callee_flags)); - } - - rtc::FirewallSocketServer* firewall() const { return fss_.get(); } - - // Expects the provided number of new frames to be received within - // kMaxWaitForFramesMs. The new expected frames are specified in - // |media_expectations|. Returns false if any of the expectations were - // not met. - bool ExpectNewFrames(const MediaExpectations& media_expectations) { - // First initialize the expected frame counts based upon the current - // frame count. - int total_caller_audio_frames_expected = caller()->audio_frames_received(); - if (media_expectations.caller_audio_expectation_ == - MediaExpectations::kExpectSomeFrames) { - total_caller_audio_frames_expected += - media_expectations.caller_audio_frames_expected_; - } - int total_caller_video_frames_expected = - caller()->min_video_frames_received_per_track(); - if (media_expectations.caller_video_expectation_ == - MediaExpectations::kExpectSomeFrames) { - total_caller_video_frames_expected += - media_expectations.caller_video_frames_expected_; - } - int total_callee_audio_frames_expected = callee()->audio_frames_received(); - if (media_expectations.callee_audio_expectation_ == - MediaExpectations::kExpectSomeFrames) { - total_callee_audio_frames_expected += - media_expectations.callee_audio_frames_expected_; - } - int total_callee_video_frames_expected = - callee()->min_video_frames_received_per_track(); - if (media_expectations.callee_video_expectation_ == - MediaExpectations::kExpectSomeFrames) { - total_callee_video_frames_expected += - media_expectations.callee_video_frames_expected_; - } - - // Wait for the expected frames. - EXPECT_TRUE_WAIT(caller()->audio_frames_received() >= - total_caller_audio_frames_expected && - caller()->min_video_frames_received_per_track() >= - total_caller_video_frames_expected && - callee()->audio_frames_received() >= - total_callee_audio_frames_expected && - callee()->min_video_frames_received_per_track() >= - total_callee_video_frames_expected, - kMaxWaitForFramesMs); - bool expectations_correct = - caller()->audio_frames_received() >= - total_caller_audio_frames_expected && - caller()->min_video_frames_received_per_track() >= - total_caller_video_frames_expected && - callee()->audio_frames_received() >= - total_callee_audio_frames_expected && - callee()->min_video_frames_received_per_track() >= - total_callee_video_frames_expected; - - // After the combined wait, print out a more detailed message upon - // failure. - EXPECT_GE(caller()->audio_frames_received(), - total_caller_audio_frames_expected); - EXPECT_GE(caller()->min_video_frames_received_per_track(), - total_caller_video_frames_expected); - EXPECT_GE(callee()->audio_frames_received(), - total_callee_audio_frames_expected); - EXPECT_GE(callee()->min_video_frames_received_per_track(), - total_callee_video_frames_expected); - - // We want to make sure nothing unexpected was received. - if (media_expectations.caller_audio_expectation_ == - MediaExpectations::kExpectNoFrames) { - EXPECT_EQ(caller()->audio_frames_received(), - total_caller_audio_frames_expected); - if (caller()->audio_frames_received() != - total_caller_audio_frames_expected) { - expectations_correct = false; - } - } - if (media_expectations.caller_video_expectation_ == - MediaExpectations::kExpectNoFrames) { - EXPECT_EQ(caller()->min_video_frames_received_per_track(), - total_caller_video_frames_expected); - if (caller()->min_video_frames_received_per_track() != - total_caller_video_frames_expected) { - expectations_correct = false; - } - } - if (media_expectations.callee_audio_expectation_ == - MediaExpectations::kExpectNoFrames) { - EXPECT_EQ(callee()->audio_frames_received(), - total_callee_audio_frames_expected); - if (callee()->audio_frames_received() != - total_callee_audio_frames_expected) { - expectations_correct = false; - } - } - if (media_expectations.callee_video_expectation_ == - MediaExpectations::kExpectNoFrames) { - EXPECT_EQ(callee()->min_video_frames_received_per_track(), - total_callee_video_frames_expected); - if (callee()->min_video_frames_received_per_track() != - total_callee_video_frames_expected) { - expectations_correct = false; - } - } - return expectations_correct; - } - - void ClosePeerConnections() { - caller()->pc()->Close(); - callee()->pc()->Close(); - } - - void TestNegotiatedCipherSuite( - const PeerConnectionFactory::Options& caller_options, - const PeerConnectionFactory::Options& callee_options, - int expected_cipher_suite) { - ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(caller_options, - callee_options)); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); - EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(expected_cipher_suite), - caller()->OldGetStats()->SrtpCipher(), kDefaultTimeout); - // TODO(bugs.webrtc.org/9456): Fix it. - EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents( - "WebRTC.PeerConnection.SrtpCryptoSuite.Audio", - expected_cipher_suite)); - } - - void TestGcmNegotiationUsesCipherSuite(bool local_gcm_enabled, - bool remote_gcm_enabled, - bool aes_ctr_enabled, - int expected_cipher_suite) { - PeerConnectionFactory::Options caller_options; - caller_options.crypto_options.srtp.enable_gcm_crypto_suites = - local_gcm_enabled; - caller_options.crypto_options.srtp.enable_aes128_sha1_80_crypto_cipher = - aes_ctr_enabled; - PeerConnectionFactory::Options callee_options; - callee_options.crypto_options.srtp.enable_gcm_crypto_suites = - remote_gcm_enabled; - callee_options.crypto_options.srtp.enable_aes128_sha1_80_crypto_cipher = - aes_ctr_enabled; - TestNegotiatedCipherSuite(caller_options, callee_options, - expected_cipher_suite); - } - - protected: - SdpSemantics sdp_semantics_; - - private: - // |ss_| is used by |network_thread_| so it must be destroyed later. - std::unique_ptr ss_; - std::unique_ptr fss_; - // |network_thread_| and |worker_thread_| are used by both - // |caller_| and |callee_| so they must be destroyed - // later. - std::unique_ptr network_thread_; - std::unique_ptr worker_thread_; - // The turn servers and turn customizers should be accessed & deleted on the - // network thread to avoid a race with the socket read/write that occurs - // on the network thread. - std::vector> turn_servers_; - std::vector> turn_customizers_; - webrtc::MediaTransportPair loopback_media_transports_; - std::unique_ptr caller_; - std::unique_ptr callee_; -}; - -class PeerConnectionIntegrationTest - : public PeerConnectionIntegrationBaseTest, - public ::testing::WithParamInterface { - protected: - PeerConnectionIntegrationTest() - : PeerConnectionIntegrationBaseTest(GetParam()) {} -}; - -// Fake clock must be set before threads are started to prevent race on -// Set/GetClockForTesting(). -// To achieve that, multiple inheritance is used as a mixin pattern -// where order of construction is finely controlled. -// This also ensures peerconnection is closed before switching back to non-fake -// clock, avoiding other races and DCHECK failures such as in rtp_sender.cc. -class FakeClockForTest : public rtc::ScopedFakeClock { - protected: - FakeClockForTest() { - // Some things use a time of "0" as a special value, so we need to start out - // the fake clock at a nonzero time. - // TODO(deadbeef): Fix this. - AdvanceTime(webrtc::TimeDelta::Seconds(1)); - } - - // Explicit handle. - ScopedFakeClock& FakeClock() { return *this; } -}; - -// Ensure FakeClockForTest is constructed first (see class for rationale). -class PeerConnectionIntegrationTestWithFakeClock - : public FakeClockForTest, - public PeerConnectionIntegrationTest {}; - -class PeerConnectionIntegrationTestPlanB - : public PeerConnectionIntegrationBaseTest { - protected: - PeerConnectionIntegrationTestPlanB() - : PeerConnectionIntegrationBaseTest(SdpSemantics::kPlanB) {} -}; - -class PeerConnectionIntegrationTestUnifiedPlan - : public PeerConnectionIntegrationBaseTest { - protected: - PeerConnectionIntegrationTestUnifiedPlan() - : PeerConnectionIntegrationBaseTest(SdpSemantics::kUnifiedPlan) {} -}; - -// Test the OnFirstPacketReceived callback from audio/video RtpReceivers. This -// includes testing that the callback is invoked if an observer is connected -// after the first packet has already been received. -TEST_P(PeerConnectionIntegrationTest, - RtpReceiverObserverOnFirstPacketReceived) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - // Start offer/answer exchange and wait for it to complete. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Should be one receiver each for audio/video. - EXPECT_EQ(2U, caller()->rtp_receiver_observers().size()); - EXPECT_EQ(2U, callee()->rtp_receiver_observers().size()); - // Wait for all "first packet received" callbacks to be fired. - EXPECT_TRUE_WAIT( - absl::c_all_of(caller()->rtp_receiver_observers(), - [](const std::unique_ptr& o) { - return o->first_packet_received(); - }), - kMaxWaitForFramesMs); - EXPECT_TRUE_WAIT( - absl::c_all_of(callee()->rtp_receiver_observers(), - [](const std::unique_ptr& o) { - return o->first_packet_received(); - }), - kMaxWaitForFramesMs); - // If new observers are set after the first packet was already received, the - // callback should still be invoked. - caller()->ResetRtpReceiverObservers(); - callee()->ResetRtpReceiverObservers(); - EXPECT_EQ(2U, caller()->rtp_receiver_observers().size()); - EXPECT_EQ(2U, callee()->rtp_receiver_observers().size()); - EXPECT_TRUE( - absl::c_all_of(caller()->rtp_receiver_observers(), - [](const std::unique_ptr& o) { - return o->first_packet_received(); - })); - EXPECT_TRUE( - absl::c_all_of(callee()->rtp_receiver_observers(), - [](const std::unique_ptr& o) { - return o->first_packet_received(); - })); -} - -class DummyDtmfObserver : public DtmfSenderObserverInterface { - public: - DummyDtmfObserver() : completed_(false) {} - - // Implements DtmfSenderObserverInterface. - void OnToneChange(const std::string& tone) override { - tones_.push_back(tone); - if (tone.empty()) { - completed_ = true; - } - } - - const std::vector& tones() const { return tones_; } - bool completed() const { return completed_; } - - private: - bool completed_; - std::vector tones_; -}; - -// Assumes |sender| already has an audio track added and the offer/answer -// exchange is done. -void TestDtmfFromSenderToReceiver(PeerConnectionWrapper* sender, - PeerConnectionWrapper* receiver) { - // We should be able to get a DTMF sender from the local sender. - rtc::scoped_refptr dtmf_sender = - sender->pc()->GetSenders().at(0)->GetDtmfSender(); - ASSERT_TRUE(dtmf_sender); - DummyDtmfObserver observer; - dtmf_sender->RegisterObserver(&observer); - - // Test the DtmfSender object just created. - EXPECT_TRUE(dtmf_sender->CanInsertDtmf()); - EXPECT_TRUE(dtmf_sender->InsertDtmf("1a", 100, 50)); - - EXPECT_TRUE_WAIT(observer.completed(), kDefaultTimeout); - std::vector tones = {"1", "a", ""}; - EXPECT_EQ(tones, observer.tones()); - dtmf_sender->UnregisterObserver(); - // TODO(deadbeef): Verify the tones were actually received end-to-end. -} - -// Verifies the DtmfSenderObserver callbacks for a DtmfSender (one in each -// direction). -TEST_P(PeerConnectionIntegrationTest, DtmfSenderObserver) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Only need audio for DTMF. - caller()->AddAudioTrack(); - callee()->AddAudioTrack(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // DTLS must finish before the DTMF sender can be used reliably. - ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); - TestDtmfFromSenderToReceiver(caller(), callee()); - TestDtmfFromSenderToReceiver(callee(), caller()); -} - -// Basic end-to-end test, verifying media can be encoded/transmitted/decoded -// between two connections, using DTLS-SRTP. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - - // Do normal offer/answer and wait for some frames to be received in each - // direction. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - EXPECT_METRIC_LE( - 2, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", - webrtc::kEnumCounterKeyProtocolDtls)); - EXPECT_METRIC_EQ( - 0, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", - webrtc::kEnumCounterKeyProtocolSdes)); -} - -// Uses SDES instead of DTLS for key agreement. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSdes) { - PeerConnectionInterface::RTCConfiguration sdes_config; - sdes_config.enable_dtls_srtp.emplace(false); - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(sdes_config, sdes_config)); - ConnectFakeSignaling(); - - // Do normal offer/answer and wait for some frames to be received in each - // direction. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - EXPECT_METRIC_LE( - 2, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", - webrtc::kEnumCounterKeyProtocolSdes)); - EXPECT_METRIC_EQ( - 0, webrtc::metrics::NumEvents("WebRTC.PeerConnection.KeyProtocol", - webrtc::kEnumCounterKeyProtocolDtls)); -} - -// Basic end-to-end test specifying the |enable_encrypted_rtp_header_extensions| -// option to offer encrypted versions of all header extensions alongside the -// unencrypted versions. -TEST_P(PeerConnectionIntegrationTest, - EndToEndCallWithEncryptedRtpHeaderExtensions) { - CryptoOptions crypto_options; - crypto_options.srtp.enable_encrypted_rtp_header_extensions = true; - PeerConnectionInterface::RTCConfiguration config; - config.crypto_options = crypto_options; - // Note: This allows offering >14 RTP header extensions. - config.offer_extmap_allow_mixed = true; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); - ConnectFakeSignaling(); - - // Do normal offer/answer and wait for some frames to be received in each - // direction. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Tests that the GetRemoteAudioSSLCertificate method returns the remote DTLS -// certificate once the DTLS handshake has finished. -TEST_P(PeerConnectionIntegrationTest, - GetRemoteAudioSSLCertificateReturnsExchangedCertificate) { - auto GetRemoteAudioSSLCertificate = [](PeerConnectionWrapper* wrapper) { - auto pci = reinterpret_cast(wrapper->pc()); - auto pc = reinterpret_cast(pci->internal()); - return pc->GetRemoteAudioSSLCertificate(); - }; - auto GetRemoteAudioSSLCertChain = [](PeerConnectionWrapper* wrapper) { - auto pci = reinterpret_cast(wrapper->pc()); - auto pc = reinterpret_cast(pci->internal()); - return pc->GetRemoteAudioSSLCertChain(); - }; - - auto caller_cert = rtc::RTCCertificate::FromPEM(kRsaPems[0]); - auto callee_cert = rtc::RTCCertificate::FromPEM(kRsaPems[1]); - - // Configure each side with a known certificate so they can be compared later. - PeerConnectionInterface::RTCConfiguration caller_config; - caller_config.enable_dtls_srtp.emplace(true); - caller_config.certificates.push_back(caller_cert); - PeerConnectionInterface::RTCConfiguration callee_config; - callee_config.enable_dtls_srtp.emplace(true); - callee_config.certificates.push_back(callee_cert); - ASSERT_TRUE( - CreatePeerConnectionWrappersWithConfig(caller_config, callee_config)); - ConnectFakeSignaling(); - - // When first initialized, there should not be a remote SSL certificate (and - // calling this method should not crash). - EXPECT_EQ(nullptr, GetRemoteAudioSSLCertificate(caller())); - EXPECT_EQ(nullptr, GetRemoteAudioSSLCertificate(callee())); - EXPECT_EQ(nullptr, GetRemoteAudioSSLCertChain(caller())); - EXPECT_EQ(nullptr, GetRemoteAudioSSLCertChain(callee())); - - caller()->AddAudioTrack(); - callee()->AddAudioTrack(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); - - // Once DTLS has been connected, each side should return the other's SSL - // certificate when calling GetRemoteAudioSSLCertificate. - - auto caller_remote_cert = GetRemoteAudioSSLCertificate(caller()); - ASSERT_TRUE(caller_remote_cert); - EXPECT_EQ(callee_cert->GetSSLCertificate().ToPEMString(), - caller_remote_cert->ToPEMString()); - - auto callee_remote_cert = GetRemoteAudioSSLCertificate(callee()); - ASSERT_TRUE(callee_remote_cert); - EXPECT_EQ(caller_cert->GetSSLCertificate().ToPEMString(), - callee_remote_cert->ToPEMString()); - - auto caller_remote_cert_chain = GetRemoteAudioSSLCertChain(caller()); - ASSERT_TRUE(caller_remote_cert_chain); - ASSERT_EQ(1U, caller_remote_cert_chain->GetSize()); - auto remote_cert = &caller_remote_cert_chain->Get(0); - EXPECT_EQ(callee_cert->GetSSLCertificate().ToPEMString(), - remote_cert->ToPEMString()); - - auto callee_remote_cert_chain = GetRemoteAudioSSLCertChain(callee()); - ASSERT_TRUE(callee_remote_cert_chain); - ASSERT_EQ(1U, callee_remote_cert_chain->GetSize()); - remote_cert = &callee_remote_cert_chain->Get(0); - EXPECT_EQ(caller_cert->GetSSLCertificate().ToPEMString(), - remote_cert->ToPEMString()); -} - -// This test sets up a call between two parties with a source resolution of -// 1280x720 and verifies that a 16:9 aspect ratio is received. -TEST_P(PeerConnectionIntegrationTest, - Send1280By720ResolutionAndReceive16To9AspectRatio) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - - // Add video tracks with 16:9 aspect ratio, size 1280 x 720. - webrtc::FakePeriodicVideoSource::Config config; - config.width = 1280; - config.height = 720; - config.timestamp_offset_ms = rtc::TimeMillis(); - caller()->AddTrack(caller()->CreateLocalVideoTrackWithConfig(config)); - callee()->AddTrack(callee()->CreateLocalVideoTrackWithConfig(config)); - - // Do normal offer/answer and wait for at least one frame to be received in - // each direction. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(caller()->min_video_frames_received_per_track() > 0 && - callee()->min_video_frames_received_per_track() > 0, - kMaxWaitForFramesMs); - - // Check rendered aspect ratio. - EXPECT_EQ(16.0 / 9, caller()->local_rendered_aspect_ratio()); - EXPECT_EQ(16.0 / 9, caller()->rendered_aspect_ratio()); - EXPECT_EQ(16.0 / 9, callee()->local_rendered_aspect_ratio()); - EXPECT_EQ(16.0 / 9, callee()->rendered_aspect_ratio()); -} - -// This test sets up an one-way call, with media only from caller to -// callee. -TEST_P(PeerConnectionIntegrationTest, OneWayMediaCall) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); - media_expectations.CallerExpectsNoAudio(); - media_expectations.CallerExpectsNoVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Tests that send only works without the caller having a decoder factory and -// the callee having an encoder factory. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSendOnlyVideo) { - ASSERT_TRUE( - CreateOneDirectionalPeerConnectionWrappers(/*caller_to_callee=*/true)); - ConnectFakeSignaling(); - // Add one-directional video, from caller to callee. - rtc::scoped_refptr caller_track = - caller()->CreateLocalVideoTrack(); - caller()->AddTrack(caller_track); - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_video = 0; - caller()->SetOfferAnswerOptions(options); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_EQ(callee()->pc()->GetReceivers().size(), 1u); - - // Expect video to be received in one direction. - MediaExpectations media_expectations; - media_expectations.CallerExpectsNoVideo(); - media_expectations.CalleeExpectsSomeVideo(); - - EXPECT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Tests that receive only works without the caller having an encoder factory -// and the callee having a decoder factory. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithReceiveOnlyVideo) { - ASSERT_TRUE( - CreateOneDirectionalPeerConnectionWrappers(/*caller_to_callee=*/false)); - ConnectFakeSignaling(); - // Add one-directional video, from callee to caller. - rtc::scoped_refptr callee_track = - callee()->CreateLocalVideoTrack(); - callee()->AddTrack(callee_track); - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_video = 1; - caller()->SetOfferAnswerOptions(options); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_EQ(caller()->pc()->GetReceivers().size(), 1u); - - // Expect video to be received in one direction. - MediaExpectations media_expectations; - media_expectations.CallerExpectsSomeVideo(); - media_expectations.CalleeExpectsNoVideo(); - - EXPECT_TRUE(ExpectNewFrames(media_expectations)); -} - -TEST_P(PeerConnectionIntegrationTest, - EndToEndCallAddReceiveVideoToSendOnlyCall) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add one-directional video, from caller to callee. - rtc::scoped_refptr caller_track = - caller()->CreateLocalVideoTrack(); - caller()->AddTrack(caller_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Add receive video. - rtc::scoped_refptr callee_track = - callee()->CreateLocalVideoTrack(); - callee()->AddTrack(callee_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that video frames are received end-to-end. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -TEST_P(PeerConnectionIntegrationTest, - EndToEndCallAddSendVideoToReceiveOnlyCall) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add one-directional video, from callee to caller. - rtc::scoped_refptr callee_track = - callee()->CreateLocalVideoTrack(); - callee()->AddTrack(callee_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Add send video. - rtc::scoped_refptr caller_track = - caller()->CreateLocalVideoTrack(); - caller()->AddTrack(caller_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Expect video to be received in one direction. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -TEST_P(PeerConnectionIntegrationTest, - EndToEndCallRemoveReceiveVideoFromSendReceiveCall) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add send video, from caller to callee. - rtc::scoped_refptr caller_track = - caller()->CreateLocalVideoTrack(); - rtc::scoped_refptr caller_sender = - caller()->AddTrack(caller_track); - // Add receive video, from callee to caller. - rtc::scoped_refptr callee_track = - callee()->CreateLocalVideoTrack(); - - rtc::scoped_refptr callee_sender = - callee()->AddTrack(callee_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Remove receive video (i.e., callee sender track). - callee()->pc()->RemoveTrack(callee_sender); - - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Expect one-directional video. - MediaExpectations media_expectations; - media_expectations.CallerExpectsNoVideo(); - media_expectations.CalleeExpectsSomeVideo(); - - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -TEST_P(PeerConnectionIntegrationTest, - EndToEndCallRemoveSendVideoFromSendReceiveCall) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add send video, from caller to callee. - rtc::scoped_refptr caller_track = - caller()->CreateLocalVideoTrack(); - rtc::scoped_refptr caller_sender = - caller()->AddTrack(caller_track); - // Add receive video, from callee to caller. - rtc::scoped_refptr callee_track = - callee()->CreateLocalVideoTrack(); - - rtc::scoped_refptr callee_sender = - callee()->AddTrack(callee_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Remove send video (i.e., caller sender track). - caller()->pc()->RemoveTrack(caller_sender); - - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Expect one-directional video. - MediaExpectations media_expectations; - media_expectations.CalleeExpectsNoVideo(); - media_expectations.CallerExpectsSomeVideo(); - - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// This test sets up a audio call initially, with the callee rejecting video -// initially. Then later the callee decides to upgrade to audio/video, and -// initiates a new offer/answer exchange. -TEST_P(PeerConnectionIntegrationTest, AudioToVideoUpgrade) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Initially, offer an audio/video stream from the caller, but refuse to - // send/receive video on the callee side. - caller()->AddAudioVideoTracks(); - callee()->AddAudioTrack(); - if (sdp_semantics_ == SdpSemantics::kPlanB) { - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_video = 0; - callee()->SetOfferAnswerOptions(options); - } else { - callee()->SetRemoteOfferHandler([this] { - callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop(); - }); - } - // Do offer/answer and make sure audio is still received end-to-end. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - { - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudio(); - media_expectations.ExpectNoVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - } - // Sanity check that the callee's description has a rejected video section. - ASSERT_NE(nullptr, callee()->pc()->local_description()); - const ContentInfo* callee_video_content = - GetFirstVideoContent(callee()->pc()->local_description()->description()); - ASSERT_NE(nullptr, callee_video_content); - EXPECT_TRUE(callee_video_content->rejected); - - // Now negotiate with video and ensure negotiation succeeds, with video - // frames and additional audio frames being received. - callee()->AddVideoTrack(); - if (sdp_semantics_ == SdpSemantics::kPlanB) { - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_video = 1; - callee()->SetOfferAnswerOptions(options); - } else { - callee()->SetRemoteOfferHandler(nullptr); - caller()->SetRemoteOfferHandler([this] { - // The caller creates a new transceiver to receive video on when receiving - // the offer, but by default it is send only. - auto transceivers = caller()->pc()->GetTransceivers(); - ASSERT_EQ(3U, transceivers.size()); - ASSERT_EQ(cricket::MEDIA_TYPE_VIDEO, - transceivers[2]->receiver()->media_type()); - transceivers[2]->sender()->SetTrack(caller()->CreateLocalVideoTrack()); - transceivers[2]->SetDirection(RtpTransceiverDirection::kSendRecv); - }); - } - callee()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - { - // Expect additional audio frames to be received after the upgrade. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - } -} - -// Simpler than the above test; just add an audio track to an established -// video-only connection. -TEST_P(PeerConnectionIntegrationTest, AddAudioToVideoOnlyCall) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Do initial offer/answer with just a video track. - caller()->AddVideoTrack(); - callee()->AddVideoTrack(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Now add an audio track and do another offer/answer. - caller()->AddAudioTrack(); - callee()->AddAudioTrack(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Ensure both audio and video frames are received end-to-end. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// This test sets up a call that's transferred to a new caller with a different -// DTLS fingerprint. -TEST_P(PeerConnectionIntegrationTest, CallTransferredForCallee) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Keep the original peer around which will still send packets to the - // receiving client. These SRTP packets will be dropped. - std::unique_ptr original_peer( - SetCallerPcWrapperAndReturnCurrent( - CreatePeerConnectionWrapperWithAlternateKey().release())); - // TODO(deadbeef): Why do we call Close here? That goes against the comment - // directly above. - original_peer->pc()->Close(); - - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Wait for some additional frames to be transmitted end-to-end. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// This test sets up a call that's transferred to a new callee with a different -// DTLS fingerprint. -TEST_P(PeerConnectionIntegrationTest, CallTransferredForCaller) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Keep the original peer around which will still send packets to the - // receiving client. These SRTP packets will be dropped. - std::unique_ptr original_peer( - SetCalleePcWrapperAndReturnCurrent( - CreatePeerConnectionWrapperWithAlternateKey().release())); - // TODO(deadbeef): Why do we call Close here? That goes against the comment - // directly above. - original_peer->pc()->Close(); - - ConnectFakeSignaling(); - callee()->AddAudioVideoTracks(); - caller()->SetOfferAnswerOptions(IceRestartOfferAnswerOptions()); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Wait for some additional frames to be transmitted end-to-end. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// This test sets up a non-bundled call and negotiates bundling at the same -// time as starting an ICE restart. When bundling is in effect in the restart, -// the DTLS-SRTP context should be successfully reset. -TEST_P(PeerConnectionIntegrationTest, BundlingEnabledWhileIceRestartOccurs) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - // Remove the bundle group from the SDP received by the callee. - callee()->SetReceivedSdpMunger([](cricket::SessionDescription* desc) { - desc->RemoveGroupByName("BUNDLE"); - }); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - { - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - } - // Now stop removing the BUNDLE group, and trigger an ICE restart. - callee()->SetReceivedSdpMunger(nullptr); - caller()->SetOfferAnswerOptions(IceRestartOfferAnswerOptions()); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Expect additional frames to be received after the ICE restart. - { - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - } -} - -// Test CVO (Coordination of Video Orientation). If a video source is rotated -// and both peers support the CVO RTP header extension, the actual video frames -// don't need to be encoded in different resolutions, since the rotation is -// communicated through the RTP header extension. -TEST_P(PeerConnectionIntegrationTest, RotatedVideoWithCVOExtension) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add rotated video tracks. - caller()->AddTrack( - caller()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_90)); - callee()->AddTrack( - callee()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_270)); - - // Wait for video frames to be received by both sides. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(caller()->min_video_frames_received_per_track() > 0 && - callee()->min_video_frames_received_per_track() > 0, - kMaxWaitForFramesMs); - - // Ensure that the aspect ratio is unmodified. - // TODO(deadbeef): Where does 4:3 come from? Should be explicit in the test, - // not just assumed. - EXPECT_EQ(4.0 / 3, caller()->local_rendered_aspect_ratio()); - EXPECT_EQ(4.0 / 3, caller()->rendered_aspect_ratio()); - EXPECT_EQ(4.0 / 3, callee()->local_rendered_aspect_ratio()); - EXPECT_EQ(4.0 / 3, callee()->rendered_aspect_ratio()); - // Ensure that the CVO bits were surfaced to the renderer. - EXPECT_EQ(webrtc::kVideoRotation_270, caller()->rendered_rotation()); - EXPECT_EQ(webrtc::kVideoRotation_90, callee()->rendered_rotation()); -} - -// Test that when the CVO extension isn't supported, video is rotated the -// old-fashioned way, by encoding rotated frames. -TEST_P(PeerConnectionIntegrationTest, RotatedVideoWithoutCVOExtension) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add rotated video tracks. - caller()->AddTrack( - caller()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_90)); - callee()->AddTrack( - callee()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_270)); - - // Remove the CVO extension from the offered SDP. - callee()->SetReceivedSdpMunger([](cricket::SessionDescription* desc) { - cricket::VideoContentDescription* video = - GetFirstVideoContentDescription(desc); - video->ClearRtpHeaderExtensions(); - }); - // Wait for video frames to be received by both sides. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(caller()->min_video_frames_received_per_track() > 0 && - callee()->min_video_frames_received_per_track() > 0, - kMaxWaitForFramesMs); - - // Expect that the aspect ratio is inversed to account for the 90/270 degree - // rotation. - // TODO(deadbeef): Where does 4:3 come from? Should be explicit in the test, - // not just assumed. - EXPECT_EQ(3.0 / 4, caller()->local_rendered_aspect_ratio()); - EXPECT_EQ(3.0 / 4, caller()->rendered_aspect_ratio()); - EXPECT_EQ(3.0 / 4, callee()->local_rendered_aspect_ratio()); - EXPECT_EQ(3.0 / 4, callee()->rendered_aspect_ratio()); - // Expect that each endpoint is unaware of the rotation of the other endpoint. - EXPECT_EQ(webrtc::kVideoRotation_0, caller()->rendered_rotation()); - EXPECT_EQ(webrtc::kVideoRotation_0, callee()->rendered_rotation()); -} - -// Test that if the answerer rejects the audio m= section, no audio is sent or -// received, but video still can be. -TEST_P(PeerConnectionIntegrationTest, AnswererRejectsAudioSection) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - if (sdp_semantics_ == SdpSemantics::kPlanB) { - // Only add video track for callee, and set offer_to_receive_audio to 0, so - // it will reject the audio m= section completely. - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_audio = 0; - callee()->SetOfferAnswerOptions(options); - } else { - // Stopping the audio RtpTransceiver will cause the media section to be - // rejected in the answer. - callee()->SetRemoteOfferHandler([this] { - callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO)->Stop(); - }); - } - callee()->AddTrack(callee()->CreateLocalVideoTrack()); - // Do offer/answer and wait for successful end-to-end video frames. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalVideo(); - media_expectations.ExpectNoAudio(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - - // Sanity check that the callee's description has a rejected audio section. - ASSERT_NE(nullptr, callee()->pc()->local_description()); - const ContentInfo* callee_audio_content = - GetFirstAudioContent(callee()->pc()->local_description()->description()); - ASSERT_NE(nullptr, callee_audio_content); - EXPECT_TRUE(callee_audio_content->rejected); - if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) { - // The caller's transceiver should have stopped after receiving the answer. - EXPECT_TRUE(caller() - ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO) - ->stopped()); - } -} - -// Test that if the answerer rejects the video m= section, no video is sent or -// received, but audio still can be. -TEST_P(PeerConnectionIntegrationTest, AnswererRejectsVideoSection) { +// This test sets up an one-way call, with media only from caller to +// callee. +TEST_P(PeerConnectionIntegrationTest, OneWayMediaCall) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); caller()->AddAudioVideoTracks(); - if (sdp_semantics_ == SdpSemantics::kPlanB) { - // Only add audio track for callee, and set offer_to_receive_video to 0, so - // it will reject the video m= section completely. - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_video = 0; - callee()->SetOfferAnswerOptions(options); - } else { - // Stopping the video RtpTransceiver will cause the media section to be - // rejected in the answer. - callee()->SetRemoteOfferHandler([this] { - callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop(); - }); - } - callee()->AddTrack(callee()->CreateLocalAudioTrack()); - // Do offer/answer and wait for successful end-to-end audio frames. caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudio(); - media_expectations.ExpectNoVideo(); + media_expectations.CalleeExpectsSomeAudioAndVideo(); + media_expectations.CallerExpectsNoAudio(); + media_expectations.CallerExpectsNoVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); - - // Sanity check that the callee's description has a rejected video section. - ASSERT_NE(nullptr, callee()->pc()->local_description()); - const ContentInfo* callee_video_content = - GetFirstVideoContent(callee()->pc()->local_description()->description()); - ASSERT_NE(nullptr, callee_video_content); - EXPECT_TRUE(callee_video_content->rejected); - if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) { - // The caller's transceiver should have stopped after receiving the answer. - EXPECT_TRUE(caller() - ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO) - ->stopped()); - } -} - -// Test that if the answerer rejects both audio and video m= sections, nothing -// bad happens. -// TODO(deadbeef): Test that a data channel still works. Currently this doesn't -// test anything but the fact that negotiation succeeds, which doesn't mean -// much. -TEST_P(PeerConnectionIntegrationTest, AnswererRejectsAudioAndVideoSections) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - if (sdp_semantics_ == SdpSemantics::kPlanB) { - // Don't give the callee any tracks, and set offer_to_receive_X to 0, so it - // will reject both audio and video m= sections. - PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_audio = 0; - options.offer_to_receive_video = 0; - callee()->SetOfferAnswerOptions(options); - } else { - callee()->SetRemoteOfferHandler([this] { - // Stopping all transceivers will cause all media sections to be rejected. - for (const auto& transceiver : callee()->pc()->GetTransceivers()) { - transceiver->Stop(); - } - }); - } - // Do offer/answer and wait for stable signaling state. - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Sanity check that the callee's description has rejected m= sections. - ASSERT_NE(nullptr, callee()->pc()->local_description()); - const ContentInfo* callee_audio_content = - GetFirstAudioContent(callee()->pc()->local_description()->description()); - ASSERT_NE(nullptr, callee_audio_content); - EXPECT_TRUE(callee_audio_content->rejected); - const ContentInfo* callee_video_content = - GetFirstVideoContent(callee()->pc()->local_description()->description()); - ASSERT_NE(nullptr, callee_video_content); - EXPECT_TRUE(callee_video_content->rejected); -} - -// This test sets up an audio and video call between two parties. After the -// call runs for a while, the caller sends an updated offer with video being -// rejected. Once the re-negotiation is done, the video flow should stop and -// the audio flow should continue. -TEST_P(PeerConnectionIntegrationTest, VideoRejectedInSubsequentOffer) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - { - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - } - // Renegotiate, rejecting the video m= section. - if (sdp_semantics_ == SdpSemantics::kPlanB) { - caller()->SetGeneratedSdpMunger( - [](cricket::SessionDescription* description) { - for (cricket::ContentInfo& content : description->contents()) { - if (cricket::IsVideoContent(&content)) { - content.rejected = true; - } - } - }); - } else { - caller()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop(); - } - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kMaxWaitForActivationMs); - - // Sanity check that the caller's description has a rejected video section. - ASSERT_NE(nullptr, caller()->pc()->local_description()); - const ContentInfo* caller_video_content = - GetFirstVideoContent(caller()->pc()->local_description()->description()); - ASSERT_NE(nullptr, caller_video_content); - EXPECT_TRUE(caller_video_content->rejected); - // Wait for some additional audio frames to be received. - { - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudio(); - media_expectations.ExpectNoVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - } } -// Do one offer/answer with audio, another that disables it (rejecting the m= -// section), and another that re-enables it. Regression test for: -// bugs.webrtc.org/6023 -TEST_F(PeerConnectionIntegrationTestPlanB, EnableAudioAfterRejecting) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); +// Tests that send only works without the caller having a decoder factory and +// the callee having an encoder factory. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSendOnlyVideo) { + ASSERT_TRUE( + CreateOneDirectionalPeerConnectionWrappers(/*caller_to_callee=*/true)); ConnectFakeSignaling(); - - // Add audio track, do normal offer/answer. - rtc::scoped_refptr track = - caller()->CreateLocalAudioTrack(); - rtc::scoped_refptr sender = - caller()->pc()->AddTrack(track, {"stream"}).MoveValue(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Remove audio track, and set offer_to_receive_audio to false to cause the - // m= section to be completely disabled, not just "recvonly". - caller()->pc()->RemoveTrack(sender); + // Add one-directional video, from caller to callee. + rtc::scoped_refptr caller_track = + caller()->CreateLocalVideoTrack(); + caller()->AddTrack(caller_track); PeerConnectionInterface::RTCOfferAnswerOptions options; - options.offer_to_receive_audio = 0; - caller()->SetOfferAnswerOptions(options); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Add the audio track again, expecting negotiation to succeed and frames to - // flow. - sender = caller()->pc()->AddTrack(track, {"stream"}).MoveValue(); - options.offer_to_receive_audio = 1; + options.offer_to_receive_video = 0; caller()->SetOfferAnswerOptions(options); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_EQ(callee()->pc()->GetReceivers().size(), 1u); + // Expect video to be received in one direction. MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudio(); + media_expectations.CallerExpectsNoVideo(); + media_expectations.CalleeExpectsSomeVideo(); + EXPECT_TRUE(ExpectNewFrames(media_expectations)); } -// Basic end-to-end test, but without SSRC/MSID signaling. This functionality -// is needed to support legacy endpoints. -// TODO(deadbeef): When we support the MID extension and demuxing on MID, also -// add a test for an end-to-end test without MID signaling either (basically, -// the minimum acceptable SDP). -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithoutSsrcOrMsidSignaling) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); +// Tests that receive only works without the caller having an encoder factory +// and the callee having a decoder factory. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithReceiveOnlyVideo) { + ASSERT_TRUE( + CreateOneDirectionalPeerConnectionWrappers(/*caller_to_callee=*/false)); ConnectFakeSignaling(); - // Add audio and video, testing that packets can be demuxed on payload type. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - // Remove SSRCs and MSIDs from the received offer SDP. - callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + // Add one-directional video, from callee to caller. + rtc::scoped_refptr callee_track = + callee()->CreateLocalVideoTrack(); + callee()->AddTrack(callee_track); + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_video = 1; + caller()->SetOfferAnswerOptions(options); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Basic end-to-end test, without SSRC signaling. This means that the track -// was created properly and frames are delivered when the MSIDs are communicated -// with a=msid lines and no a=ssrc lines. -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - EndToEndCallWithoutSsrcSignaling) { - const char kStreamId[] = "streamId"; - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - // Add just audio tracks. - caller()->AddTrack(caller()->CreateLocalAudioTrack(), {kStreamId}); - callee()->AddAudioTrack(); + ASSERT_EQ(caller()->pc()->GetReceivers().size(), 1u); - // Remove SSRCs from the received offer SDP. - callee()->SetReceivedSdpMunger(RemoveSsrcsAndKeepMsids); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Expect video to be received in one direction. MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudio(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); + media_expectations.CallerExpectsSomeVideo(); + media_expectations.CalleeExpectsNoVideo(); + + EXPECT_TRUE(ExpectNewFrames(media_expectations)); } -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, +TEST_P(PeerConnectionIntegrationTest, EndToEndCallAddReceiveVideoToSendOnlyCall) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); // Add one-directional video, from caller to callee. - rtc::scoped_refptr track = + rtc::scoped_refptr caller_track = caller()->CreateLocalVideoTrack(); + caller()->AddTrack(caller_track); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - RtpTransceiverInit video_transceiver_init; - video_transceiver_init.stream_ids = {"video1"}; - video_transceiver_init.direction = RtpTransceiverDirection::kSendOnly; - auto video_sender = - caller()->pc()->AddTransceiver(track, video_transceiver_init).MoveValue(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Add receive direction. - video_sender->SetDirection(RtpTransceiverDirection::kSendRecv); - - rtc::scoped_refptr callee_track = - callee()->CreateLocalVideoTrack(); - - callee()->AddTrack(callee_track); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Ensure that video frames are received end-to-end. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Tests that video flows between multiple video tracks when SSRCs are not -// signaled. This exercises the MID RTP header extension which is needed to -// demux the incoming video tracks. -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - EndToEndCallWithTwoVideoTracksAndNoSignaledSsrc) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddVideoTrack(); - caller()->AddVideoTrack(); - callee()->AddVideoTrack(); - callee()->AddVideoTrack(); - - caller()->SetReceivedSdpMunger(&RemoveSsrcsAndKeepMsids); - callee()->SetReceivedSdpMunger(&RemoveSsrcsAndKeepMsids); + // Add receive video. + rtc::scoped_refptr callee_track = + callee()->CreateLocalVideoTrack(); + callee()->AddTrack(callee_track); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_EQ(2u, caller()->pc()->GetReceivers().size()); - ASSERT_EQ(2u, callee()->pc()->GetReceivers().size()); - // Expect video to be received in both directions on both tracks. + // Ensure that video frames are received end-to-end. MediaExpectations media_expectations; media_expectations.ExpectBidirectionalVideo(); - EXPECT_TRUE(ExpectNewFrames(media_expectations)); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, NoStreamsMsidLinePresent) { +TEST_P(PeerConnectionIntegrationTest, + EndToEndCallAddSendVideoToReceiveOnlyCall) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioTrack(); - caller()->AddVideoTrack(); + // Add one-directional video, from callee to caller. + rtc::scoped_refptr callee_track = + callee()->CreateLocalVideoTrack(); + callee()->AddTrack(callee_track); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - auto callee_receivers = callee()->pc()->GetReceivers(); - ASSERT_EQ(2u, callee_receivers.size()); - EXPECT_TRUE(callee_receivers[0]->stream_ids().empty()); - EXPECT_TRUE(callee_receivers[1]->stream_ids().empty()); -} -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, NoStreamsMsidLineMissing) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioTrack(); - caller()->AddVideoTrack(); - callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + // Add send video. + rtc::scoped_refptr caller_track = + caller()->CreateLocalVideoTrack(); + caller()->AddTrack(caller_track); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - auto callee_receivers = callee()->pc()->GetReceivers(); - ASSERT_EQ(2u, callee_receivers.size()); - ASSERT_EQ(1u, callee_receivers[0]->stream_ids().size()); - ASSERT_EQ(1u, callee_receivers[1]->stream_ids().size()); - EXPECT_EQ(callee_receivers[0]->stream_ids()[0], - callee_receivers[1]->stream_ids()[0]); - EXPECT_EQ(callee_receivers[0]->streams()[0], - callee_receivers[1]->streams()[0]); + + // Expect video to be received in one direction. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that if two video tracks are sent (from caller to callee, in this test), -// they're transmitted correctly end-to-end. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithTwoVideoTracks) { +TEST_P(PeerConnectionIntegrationTest, + EndToEndCallRemoveReceiveVideoFromSendReceiveCall) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Add one audio/video stream, and one video-only stream. - caller()->AddAudioVideoTracks(); - caller()->AddVideoTrack(); + // Add send video, from caller to callee. + rtc::scoped_refptr caller_track = + caller()->CreateLocalVideoTrack(); + rtc::scoped_refptr caller_sender = + caller()->AddTrack(caller_track); + // Add receive video, from callee to caller. + rtc::scoped_refptr callee_track = + callee()->CreateLocalVideoTrack(); + + rtc::scoped_refptr callee_sender = + callee()->AddTrack(callee_track); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + + // Remove receive video (i.e., callee sender track). + callee()->pc()->RemoveTrack(callee_sender); + caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_EQ(3u, callee()->pc()->GetReceivers().size()); + // Expect one-directional video. MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} + media_expectations.CallerExpectsNoVideo(); + media_expectations.CalleeExpectsSomeVideo(); -static void MakeSpecCompliantMaxBundleOffer(cricket::SessionDescription* desc) { - bool first = true; - for (cricket::ContentInfo& content : desc->contents()) { - if (first) { - first = false; - continue; - } - content.bundle_only = true; - } - first = true; - for (cricket::TransportInfo& transport : desc->transport_infos()) { - if (first) { - first = false; - continue; - } - transport.description.ice_ufrag.clear(); - transport.description.ice_pwd.clear(); - transport.description.connection_role = cricket::CONNECTIONROLE_NONE; - transport.description.identity_fingerprint.reset(nullptr); - } + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that if applying a true "max bundle" offer, which uses ports of 0, -// "a=bundle-only", omitting "a=fingerprint", "a=setup", "a=ice-ufrag" and -// "a=ice-pwd" for all but the audio "m=" section, negotiation still completes -// successfully and media flows. -// TODO(deadbeef): Update this test to also omit "a=rtcp-mux", once that works. -// TODO(deadbeef): Won't need this test once we start generating actual -// standards-compliant SDP. TEST_P(PeerConnectionIntegrationTest, - EndToEndCallWithSpecCompliantMaxBundleOffer) { + EndToEndCallRemoveSendVideoFromSendReceiveCall) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - // Do the equivalent of setting the port to 0, adding a=bundle-only, and - // removing a=ice-ufrag, a=ice-pwd, a=fingerprint and a=setup from all - // but the first m= section. - callee()->SetReceivedSdpMunger(MakeSpecCompliantMaxBundleOffer); + // Add send video, from caller to callee. + rtc::scoped_refptr caller_track = + caller()->CreateLocalVideoTrack(); + rtc::scoped_refptr caller_sender = + caller()->AddTrack(caller_track); + // Add receive video, from callee to caller. + rtc::scoped_refptr callee_track = + callee()->CreateLocalVideoTrack(); + + rtc::scoped_refptr callee_sender = + callee()->AddTrack(callee_track); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + + // Remove send video (i.e., caller sender track). + caller()->pc()->RemoveTrack(caller_sender); + caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + + // Expect one-directional video. MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); + media_expectations.CalleeExpectsNoVideo(); + media_expectations.CallerExpectsSomeVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that we can receive the audio output level from a remote audio track. -// TODO(deadbeef): Use a fake audio source and verify that the output level is -// exactly what the source on the other side was configured with. -TEST_P(PeerConnectionIntegrationTest, GetAudioOutputLevelStatsWithOldStatsApi) { +// This test sets up a audio call initially, with the callee rejecting video +// initially. Then later the callee decides to upgrade to audio/video, and +// initiates a new offer/answer exchange. +TEST_P(PeerConnectionIntegrationTest, AudioToVideoUpgrade) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Just add an audio track. - caller()->AddAudioTrack(); + // Initially, offer an audio/video stream from the caller, but refuse to + // send/receive video on the callee side. + caller()->AddAudioVideoTracks(); + callee()->AddAudioTrack(); + if (sdp_semantics_ == SdpSemantics::kPlanB) { + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_video = 0; + callee()->SetOfferAnswerOptions(options); + } else { + callee()->SetRemoteOfferHandler([this] { + callee() + ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO) + ->StopInternal(); + }); + } + // Do offer/answer and make sure audio is still received end-to-end. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + { + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudio(); + media_expectations.ExpectNoVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + } + // Sanity check that the callee's description has a rejected video section. + ASSERT_NE(nullptr, callee()->pc()->local_description()); + const ContentInfo* callee_video_content = + GetFirstVideoContent(callee()->pc()->local_description()->description()); + ASSERT_NE(nullptr, callee_video_content); + EXPECT_TRUE(callee_video_content->rejected); - // Get the audio output level stats. Note that the level is not available - // until an RTCP packet has been received. - EXPECT_TRUE_WAIT(callee()->OldGetStats()->AudioOutputLevel() > 0, - kMaxWaitForFramesMs); + // Now negotiate with video and ensure negotiation succeeds, with video + // frames and additional audio frames being received. + callee()->AddVideoTrack(); + if (sdp_semantics_ == SdpSemantics::kPlanB) { + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_video = 1; + callee()->SetOfferAnswerOptions(options); + } else { + callee()->SetRemoteOfferHandler(nullptr); + caller()->SetRemoteOfferHandler([this] { + // The caller creates a new transceiver to receive video on when receiving + // the offer, but by default it is send only. + auto transceivers = caller()->pc()->GetTransceivers(); + ASSERT_EQ(2U, transceivers.size()); + ASSERT_EQ(cricket::MEDIA_TYPE_VIDEO, + transceivers[1]->receiver()->media_type()); + transceivers[1]->sender()->SetTrack(caller()->CreateLocalVideoTrack()); + transceivers[1]->SetDirectionWithError( + RtpTransceiverDirection::kSendRecv); + }); + } + callee()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + { + // Expect additional audio frames to be received after the upgrade. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + } } -// Test that an audio input level is reported. -// TODO(deadbeef): Use a fake audio source and verify that the input level is -// exactly what the source was configured with. -TEST_P(PeerConnectionIntegrationTest, GetAudioInputLevelStatsWithOldStatsApi) { +// Simpler than the above test; just add an audio track to an established +// video-only connection. +TEST_P(PeerConnectionIntegrationTest, AddAudioToVideoOnlyCall) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Just add an audio track. + // Do initial offer/answer with just a video track. + caller()->AddVideoTrack(); + callee()->AddVideoTrack(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Now add an audio track and do another offer/answer. caller()->AddAudioTrack(); + callee()->AddAudioTrack(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Get the audio input level stats. The level should be available very - // soon after the test starts. - EXPECT_TRUE_WAIT(caller()->OldGetStats()->AudioInputLevel() > 0, - kMaxWaitForStatsMs); + // Ensure both audio and video frames are received end-to-end. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that we can get incoming byte counts from both audio and video tracks. -TEST_P(PeerConnectionIntegrationTest, GetBytesReceivedStatsWithOldStatsApi) { +// This test sets up a call that's transferred to a new caller with a different +// DTLS fingerprint. +TEST_P(PeerConnectionIntegrationTest, CallTransferredForCallee) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); caller()->AddAudioVideoTracks(); - // Do offer/answer, wait for the callee to receive some frames. + callee()->AddAudioVideoTracks(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - - // Get a handle to the remote tracks created, so they can be used as GetStats - // filters. - for (const auto& receiver : callee()->pc()->GetReceivers()) { - // We received frames, so we definitely should have nonzero "received bytes" - // stats at this point. - EXPECT_GT(callee()->OldGetStatsForTrack(receiver->track())->BytesReceived(), - 0); - } -} + // Keep the original peer around which will still send packets to the + // receiving client. These SRTP packets will be dropped. + std::unique_ptr original_peer( + SetCallerPcWrapperAndReturnCurrent( + CreatePeerConnectionWrapperWithAlternateKey().release())); + // TODO(deadbeef): Why do we call Close here? That goes against the comment + // directly above. + original_peer->pc()->Close(); -// Test that we can get outgoing byte counts from both audio and video tracks. -TEST_P(PeerConnectionIntegrationTest, GetBytesSentStatsWithOldStatsApi) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - auto audio_track = caller()->CreateLocalAudioTrack(); - auto video_track = caller()->CreateLocalVideoTrack(); - caller()->AddTrack(audio_track); - caller()->AddTrack(video_track); - // Do offer/answer, wait for the callee to receive some frames. + caller()->AddAudioVideoTracks(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Wait for some additional frames to be transmitted end-to-end. MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); + media_expectations.ExpectBidirectionalAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); - - // The callee received frames, so we definitely should have nonzero "sent - // bytes" stats at this point. - EXPECT_GT(caller()->OldGetStatsForTrack(audio_track)->BytesSent(), 0); - EXPECT_GT(caller()->OldGetStatsForTrack(video_track)->BytesSent(), 0); } -// Test that we can get capture start ntp time. -TEST_P(PeerConnectionIntegrationTest, GetCaptureStartNtpTimeWithOldStatsApi) { +// This test sets up a call that's transferred to a new callee with a different +// DTLS fingerprint. +TEST_P(PeerConnectionIntegrationTest, CallTransferredForCaller) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioTrack(); - - callee()->AddAudioTrack(); - - // Do offer/answer, wait for the callee to receive some frames. + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Get the remote audio track created on the receiver, so they can be used as - // GetStats filters. - auto receivers = callee()->pc()->GetReceivers(); - ASSERT_EQ(1u, receivers.size()); - auto remote_audio_track = receivers[0]->track(); - - // Get the audio output level stats. Note that the level is not available - // until an RTCP packet has been received. - EXPECT_TRUE_WAIT( - callee()->OldGetStatsForTrack(remote_audio_track)->CaptureStartNtpTime() > - 0, - 2 * kMaxWaitForFramesMs); -} + // Keep the original peer around which will still send packets to the + // receiving client. These SRTP packets will be dropped. + std::unique_ptr original_peer( + SetCalleePcWrapperAndReturnCurrent( + CreatePeerConnectionWrapperWithAlternateKey().release())); + // TODO(deadbeef): Why do we call Close here? That goes against the comment + // directly above. + original_peer->pc()->Close(); -// Test that the track ID is associated with all local and remote SSRC stats -// using the old GetStats() and more than 1 audio and more than 1 video track. -// This is a regression test for crbug.com/906988 -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - OldGetStatsAssociatesTrackIdForManyMediaSections) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - auto audio_sender_1 = caller()->AddAudioTrack(); - auto video_sender_1 = caller()->AddVideoTrack(); - auto audio_sender_2 = caller()->AddAudioTrack(); - auto video_sender_2 = caller()->AddVideoTrack(); + callee()->AddAudioVideoTracks(); + caller()->SetOfferAnswerOptions(IceRestartOfferAnswerOptions()); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - + // Wait for some additional frames to be transmitted end-to-end. MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); - ASSERT_TRUE_WAIT(ExpectNewFrames(media_expectations), kDefaultTimeout); - - std::vector track_ids = { - audio_sender_1->track()->id(), video_sender_1->track()->id(), - audio_sender_2->track()->id(), video_sender_2->track()->id()}; - - auto caller_stats = caller()->OldGetStats(); - EXPECT_THAT(caller_stats->TrackIds(), UnorderedElementsAreArray(track_ids)); - auto callee_stats = callee()->OldGetStats(); - EXPECT_THAT(callee_stats->TrackIds(), UnorderedElementsAreArray(track_ids)); + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that the new GetStats() returns stats for all outgoing/incoming streams -// with the correct track IDs if there are more than one audio and more than one -// video senders/receivers. -TEST_P(PeerConnectionIntegrationTest, NewGetStatsManyAudioAndManyVideoStreams) { +// This test sets up a non-bundled call and negotiates bundling at the same +// time as starting an ICE restart. When bundling is in effect in the restart, +// the DTLS-SRTP context should be successfully reset. +TEST_P(PeerConnectionIntegrationTest, BundlingEnabledWhileIceRestartOccurs) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - auto audio_sender_1 = caller()->AddAudioTrack(); - auto video_sender_1 = caller()->AddVideoTrack(); - auto audio_sender_2 = caller()->AddAudioTrack(); - auto video_sender_2 = caller()->AddVideoTrack(); + + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + // Remove the bundle group from the SDP received by the callee. + callee()->SetReceivedSdpMunger([](cricket::SessionDescription* desc) { + desc->RemoveGroupByName("BUNDLE"); + }); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + { + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + } + // Now stop removing the BUNDLE group, and trigger an ICE restart. + callee()->SetReceivedSdpMunger(nullptr); + caller()->SetOfferAnswerOptions(IceRestartOfferAnswerOptions()); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); - ASSERT_TRUE_WAIT(ExpectNewFrames(media_expectations), kDefaultTimeout); + // Expect additional frames to be received after the ICE restart. + { + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + } +} - std::vector track_ids = { - audio_sender_1->track()->id(), video_sender_1->track()->id(), - audio_sender_2->track()->id(), video_sender_2->track()->id()}; +// Test CVO (Coordination of Video Orientation). If a video source is rotated +// and both peers support the CVO RTP header extension, the actual video frames +// don't need to be encoded in different resolutions, since the rotation is +// communicated through the RTP header extension. +TEST_P(PeerConnectionIntegrationTest, RotatedVideoWithCVOExtension) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + // Add rotated video tracks. + caller()->AddTrack( + caller()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_90)); + callee()->AddTrack( + callee()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_270)); - rtc::scoped_refptr caller_report = - caller()->NewGetStats(); - ASSERT_TRUE(caller_report); - auto outbound_stream_stats = - caller_report->GetStatsOfType(); - ASSERT_EQ(outbound_stream_stats.size(), 4u); - std::vector outbound_track_ids; - for (const auto& stat : outbound_stream_stats) { - ASSERT_TRUE(stat->bytes_sent.is_defined()); - EXPECT_LT(0u, *stat->bytes_sent); - if (*stat->kind == "video") { - ASSERT_TRUE(stat->key_frames_encoded.is_defined()); - EXPECT_GT(*stat->key_frames_encoded, 0u); - ASSERT_TRUE(stat->frames_encoded.is_defined()); - EXPECT_GE(*stat->frames_encoded, *stat->key_frames_encoded); - } - ASSERT_TRUE(stat->track_id.is_defined()); - const auto* track_stat = - caller_report->GetAs(*stat->track_id); - ASSERT_TRUE(track_stat); - outbound_track_ids.push_back(*track_stat->track_identifier); - } - EXPECT_THAT(outbound_track_ids, UnorderedElementsAreArray(track_ids)); + // Wait for video frames to be received by both sides. + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_TRUE_WAIT(caller()->min_video_frames_received_per_track() > 0 && + callee()->min_video_frames_received_per_track() > 0, + kMaxWaitForFramesMs); - rtc::scoped_refptr callee_report = - callee()->NewGetStats(); - ASSERT_TRUE(callee_report); - auto inbound_stream_stats = - callee_report->GetStatsOfType(); - ASSERT_EQ(4u, inbound_stream_stats.size()); - std::vector inbound_track_ids; - for (const auto& stat : inbound_stream_stats) { - ASSERT_TRUE(stat->bytes_received.is_defined()); - EXPECT_LT(0u, *stat->bytes_received); - if (*stat->kind == "video") { - ASSERT_TRUE(stat->key_frames_decoded.is_defined()); - EXPECT_GT(*stat->key_frames_decoded, 0u); - ASSERT_TRUE(stat->frames_decoded.is_defined()); - EXPECT_GE(*stat->frames_decoded, *stat->key_frames_decoded); - } - ASSERT_TRUE(stat->track_id.is_defined()); - const auto* track_stat = - callee_report->GetAs(*stat->track_id); - ASSERT_TRUE(track_stat); - inbound_track_ids.push_back(*track_stat->track_identifier); - } - EXPECT_THAT(inbound_track_ids, UnorderedElementsAreArray(track_ids)); + // Ensure that the aspect ratio is unmodified. + // TODO(deadbeef): Where does 4:3 come from? Should be explicit in the test, + // not just assumed. + EXPECT_EQ(4.0 / 3, caller()->local_rendered_aspect_ratio()); + EXPECT_EQ(4.0 / 3, caller()->rendered_aspect_ratio()); + EXPECT_EQ(4.0 / 3, callee()->local_rendered_aspect_ratio()); + EXPECT_EQ(4.0 / 3, callee()->rendered_aspect_ratio()); + // Ensure that the CVO bits were surfaced to the renderer. + EXPECT_EQ(webrtc::kVideoRotation_270, caller()->rendered_rotation()); + EXPECT_EQ(webrtc::kVideoRotation_90, callee()->rendered_rotation()); } -// Test that we can get stats (using the new stats implementation) for -// unsignaled streams. Meaning when SSRCs/MSIDs aren't signaled explicitly in -// SDP. -TEST_P(PeerConnectionIntegrationTest, - GetStatsForUnsignaledStreamWithNewStatsApi) { +// Test that when the CVO extension isn't supported, video is rotated the +// old-fashioned way, by encoding rotated frames. +TEST_P(PeerConnectionIntegrationTest, RotatedVideoWithoutCVOExtension) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioTrack(); - // Remove SSRCs and MSIDs from the received offer SDP. - callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + // Add rotated video tracks. + caller()->AddTrack( + caller()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_90)); + callee()->AddTrack( + callee()->CreateLocalVideoTrackWithRotation(webrtc::kVideoRotation_270)); + + // Remove the CVO extension from the offered SDP. + callee()->SetReceivedSdpMunger([](cricket::SessionDescription* desc) { + cricket::VideoContentDescription* video = + GetFirstVideoContentDescription(desc); + video->ClearRtpHeaderExtensions(); + }); + // Wait for video frames to be received by both sides. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudio(1); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); + ASSERT_TRUE_WAIT(caller()->min_video_frames_received_per_track() > 0 && + callee()->min_video_frames_received_per_track() > 0, + kMaxWaitForFramesMs); - // We received a frame, so we should have nonzero "bytes received" stats for - // the unsignaled stream, if stats are working for it. - rtc::scoped_refptr report = - callee()->NewGetStats(); - ASSERT_NE(nullptr, report); - auto inbound_stream_stats = - report->GetStatsOfType(); - ASSERT_EQ(1U, inbound_stream_stats.size()); - ASSERT_TRUE(inbound_stream_stats[0]->bytes_received.is_defined()); - ASSERT_GT(*inbound_stream_stats[0]->bytes_received, 0U); - ASSERT_TRUE(inbound_stream_stats[0]->track_id.is_defined()); + // Expect that the aspect ratio is inversed to account for the 90/270 degree + // rotation. + // TODO(deadbeef): Where does 4:3 come from? Should be explicit in the test, + // not just assumed. + EXPECT_EQ(3.0 / 4, caller()->local_rendered_aspect_ratio()); + EXPECT_EQ(3.0 / 4, caller()->rendered_aspect_ratio()); + EXPECT_EQ(3.0 / 4, callee()->local_rendered_aspect_ratio()); + EXPECT_EQ(3.0 / 4, callee()->rendered_aspect_ratio()); + // Expect that each endpoint is unaware of the rotation of the other endpoint. + EXPECT_EQ(webrtc::kVideoRotation_0, caller()->rendered_rotation()); + EXPECT_EQ(webrtc::kVideoRotation_0, callee()->rendered_rotation()); } -// Same as above but for the legacy stats implementation. -TEST_P(PeerConnectionIntegrationTest, - GetStatsForUnsignaledStreamWithOldStatsApi) { +// Test that if the answerer rejects the audio m= section, no audio is sent or +// received, but video still can be. +TEST_P(PeerConnectionIntegrationTest, AnswererRejectsAudioSection) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioTrack(); - // Remove SSRCs and MSIDs from the received offer SDP. - callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + caller()->AddAudioVideoTracks(); + if (sdp_semantics_ == SdpSemantics::kPlanB) { + // Only add video track for callee, and set offer_to_receive_audio to 0, so + // it will reject the audio m= section completely. + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_audio = 0; + callee()->SetOfferAnswerOptions(options); + } else { + // Stopping the audio RtpTransceiver will cause the media section to be + // rejected in the answer. + callee()->SetRemoteOfferHandler([this] { + callee() + ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO) + ->StopInternal(); + }); + } + callee()->AddTrack(callee()->CreateLocalVideoTrack()); + // Do offer/answer and wait for successful end-to-end video frames. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalVideo(); + media_expectations.ExpectNoAudio(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); - // Note that, since the old stats implementation associates SSRCs with tracks - // using SDP, when SSRCs aren't signaled in SDP these stats won't have an - // associated track ID. So we can't use the track "selector" argument. - // - // Also, we use "EXPECT_TRUE_WAIT" because the stats collector may decide to - // return cached stats if not enough time has passed since the last update. - EXPECT_TRUE_WAIT(callee()->OldGetStats()->BytesReceived() > 0, - kDefaultTimeout); + // Sanity check that the callee's description has a rejected audio section. + ASSERT_NE(nullptr, callee()->pc()->local_description()); + const ContentInfo* callee_audio_content = + GetFirstAudioContent(callee()->pc()->local_description()->description()); + ASSERT_NE(nullptr, callee_audio_content); + EXPECT_TRUE(callee_audio_content->rejected); + if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) { + // The caller's transceiver should have stopped after receiving the answer, + // and thus no longer listed in transceivers. + EXPECT_EQ(nullptr, + caller()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO)); + } } -// Test that we can successfully get the media related stats (audio level -// etc.) for the unsignaled stream. -TEST_P(PeerConnectionIntegrationTest, - GetMediaStatsForUnsignaledStreamWithNewStatsApi) { +// Test that if the answerer rejects the video m= section, no video is sent or +// received, but audio still can be. +TEST_P(PeerConnectionIntegrationTest, AnswererRejectsVideoSection) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); caller()->AddAudioVideoTracks(); - // Remove SSRCs and MSIDs from the received offer SDP. - callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + if (sdp_semantics_ == SdpSemantics::kPlanB) { + // Only add audio track for callee, and set offer_to_receive_video to 0, so + // it will reject the video m= section completely. + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_video = 0; + callee()->SetOfferAnswerOptions(options); + } else { + // Stopping the video RtpTransceiver will cause the media section to be + // rejected in the answer. + callee()->SetRemoteOfferHandler([this] { + callee() + ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO) + ->StopInternal(); + }); + } + callee()->AddTrack(callee()->CreateLocalAudioTrack()); + // Do offer/answer and wait for successful end-to-end audio frames. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudio(1); - media_expectations.CalleeExpectsSomeVideo(1); + media_expectations.ExpectBidirectionalAudio(); + media_expectations.ExpectNoVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); - rtc::scoped_refptr report = - callee()->NewGetStats(); - ASSERT_NE(nullptr, report); - - auto media_stats = report->GetStatsOfType(); - auto audio_index = FindFirstMediaStatsIndexByKind("audio", media_stats); - ASSERT_GE(audio_index, 0); - EXPECT_TRUE(media_stats[audio_index]->audio_level.is_defined()); + // Sanity check that the callee's description has a rejected video section. + ASSERT_NE(nullptr, callee()->pc()->local_description()); + const ContentInfo* callee_video_content = + GetFirstVideoContent(callee()->pc()->local_description()->description()); + ASSERT_NE(nullptr, callee_video_content); + EXPECT_TRUE(callee_video_content->rejected); + if (sdp_semantics_ == SdpSemantics::kUnifiedPlan) { + // The caller's transceiver should have stopped after receiving the answer, + // and thus is no longer present. + EXPECT_EQ(nullptr, + caller()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)); + } } -// Helper for test below. -void ModifySsrcs(cricket::SessionDescription* desc) { - for (ContentInfo& content : desc->contents()) { - for (StreamParams& stream : - content.media_description()->mutable_streams()) { - for (uint32_t& ssrc : stream.ssrcs) { - ssrc = rtc::CreateRandomId(); +// Test that if the answerer rejects both audio and video m= sections, nothing +// bad happens. +// TODO(deadbeef): Test that a data channel still works. Currently this doesn't +// test anything but the fact that negotiation succeeds, which doesn't mean +// much. +TEST_P(PeerConnectionIntegrationTest, AnswererRejectsAudioAndVideoSections) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddAudioVideoTracks(); + if (sdp_semantics_ == SdpSemantics::kPlanB) { + // Don't give the callee any tracks, and set offer_to_receive_X to 0, so it + // will reject both audio and video m= sections. + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_audio = 0; + options.offer_to_receive_video = 0; + callee()->SetOfferAnswerOptions(options); + } else { + callee()->SetRemoteOfferHandler([this] { + // Stopping all transceivers will cause all media sections to be rejected. + for (const auto& transceiver : callee()->pc()->GetTransceivers()) { + transceiver->StopInternal(); } - } + }); } + // Do offer/answer and wait for stable signaling state. + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + + // Sanity check that the callee's description has rejected m= sections. + ASSERT_NE(nullptr, callee()->pc()->local_description()); + const ContentInfo* callee_audio_content = + GetFirstAudioContent(callee()->pc()->local_description()->description()); + ASSERT_NE(nullptr, callee_audio_content); + EXPECT_TRUE(callee_audio_content->rejected); + const ContentInfo* callee_video_content = + GetFirstVideoContent(callee()->pc()->local_description()->description()); + ASSERT_NE(nullptr, callee_video_content); + EXPECT_TRUE(callee_video_content->rejected); } -// Test that the "RTCMediaSteamTrackStats" object is updated correctly when -// SSRCs are unsignaled, and the SSRC of the received (audio) stream changes. -// This should result in two "RTCInboundRTPStreamStats", but only one -// "RTCMediaStreamTrackStats", whose counters go up continuously rather than -// being reset to 0 once the SSRC change occurs. -// -// Regression test for this bug: -// https://bugs.chromium.org/p/webrtc/issues/detail?id=8158 -// -// The bug causes the track stats to only represent one of the two streams: -// whichever one has the higher SSRC. So with this bug, there was a 50% chance -// that the track stat counters would reset to 0 when the new stream is -// received, and a 50% chance that they'll stop updating (while -// "concealed_samples" continues increasing, due to silence being generated for -// the inactive stream). -TEST_P(PeerConnectionIntegrationTest, - TrackStatsUpdatedCorrectlyWhenUnsignaledSsrcChanges) { +// This test sets up an audio and video call between two parties. After the +// call runs for a while, the caller sends an updated offer with video being +// rejected. Once the re-negotiation is done, the video flow should stop and +// the audio flow should continue. +TEST_P(PeerConnectionIntegrationTest, VideoRejectedInSubsequentOffer) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioTrack(); - // Remove SSRCs and MSIDs from the received offer SDP, simulating an endpoint - // that doesn't signal SSRCs (from the callee's perspective). - callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Wait for 50 audio frames (500ms of audio) to be received by the callee. { MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudio(50); + media_expectations.ExpectBidirectionalAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); } - // Some audio frames were received, so we should have nonzero "samples - // received" for the track. - rtc::scoped_refptr report = - callee()->NewGetStats(); - ASSERT_NE(nullptr, report); - auto track_stats = report->GetStatsOfType(); - ASSERT_EQ(1U, track_stats.size()); - ASSERT_TRUE(track_stats[0]->total_samples_received.is_defined()); - ASSERT_GT(*track_stats[0]->total_samples_received, 0U); - // uint64_t prev_samples_received = *track_stats[0]->total_samples_received; - - // Create a new offer and munge it to cause the caller to use a new SSRC. - caller()->SetGeneratedSdpMunger(ModifySsrcs); + // Renegotiate, rejecting the video m= section. + if (sdp_semantics_ == SdpSemantics::kPlanB) { + caller()->SetGeneratedSdpMunger( + [](cricket::SessionDescription* description) { + for (cricket::ContentInfo& content : description->contents()) { + if (cricket::IsVideoContent(&content)) { + content.rejected = true; + } + } + }); + } else { + caller() + ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO) + ->StopInternal(); + } caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Wait for 25 more audio frames (250ms of audio) to be received, from the new - // SSRC. + ASSERT_TRUE_WAIT(SignalingStateStable(), kMaxWaitForActivationMs); + + // Sanity check that the caller's description has a rejected video section. + ASSERT_NE(nullptr, caller()->pc()->local_description()); + const ContentInfo* caller_video_content = + GetFirstVideoContent(caller()->pc()->local_description()->description()); + ASSERT_NE(nullptr, caller_video_content); + EXPECT_TRUE(caller_video_content->rejected); + // Wait for some additional audio frames to be received. { MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudio(25); + media_expectations.ExpectBidirectionalAudio(); + media_expectations.ExpectNoVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); } +} - report = callee()->NewGetStats(); - ASSERT_NE(nullptr, report); - track_stats = report->GetStatsOfType(); - ASSERT_EQ(1U, track_stats.size()); - ASSERT_TRUE(track_stats[0]->total_samples_received.is_defined()); - // The "total samples received" stat should only be greater than it was - // before. - // TODO(deadbeef): Uncomment this assertion once the bug is completely fixed. - // Right now, the new SSRC will cause the counters to reset to 0. - // EXPECT_GT(*track_stats[0]->total_samples_received, prev_samples_received); +// Do one offer/answer with audio, another that disables it (rejecting the m= +// section), and another that re-enables it. Regression test for: +// bugs.webrtc.org/6023 +TEST_F(PeerConnectionIntegrationTestPlanB, EnableAudioAfterRejecting) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); - // Additionally, the percentage of concealed samples (samples generated to - // conceal packet loss) should be less than 50%. If it's greater, that's a - // good sign that we're seeing stats from the old stream that's no longer - // receiving packets, and is generating concealed samples of silence. - constexpr double kAcceptableConcealedSamplesPercentage = 0.50; - ASSERT_TRUE(track_stats[0]->concealed_samples.is_defined()); - EXPECT_LT(*track_stats[0]->concealed_samples, - *track_stats[0]->total_samples_received * - kAcceptableConcealedSamplesPercentage); + // Add audio track, do normal offer/answer. + rtc::scoped_refptr track = + caller()->CreateLocalAudioTrack(); + rtc::scoped_refptr sender = + caller()->pc()->AddTrack(track, {"stream"}).MoveValue(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Also ensure that we have two "RTCInboundRTPStreamStats" as expected, as a - // sanity check that the SSRC really changed. - // TODO(deadbeef): This isn't working right now, because we're not returning - // *any* stats for the inactive stream. Uncomment when the bug is completely - // fixed. - // auto inbound_stream_stats = - // report->GetStatsOfType(); - // ASSERT_EQ(2U, inbound_stream_stats.size()); -} + // Remove audio track, and set offer_to_receive_audio to false to cause the + // m= section to be completely disabled, not just "recvonly". + caller()->pc()->RemoveTrack(sender); + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.offer_to_receive_audio = 0; + caller()->SetOfferAnswerOptions(options); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); -// Test that DTLS 1.0 is used if both sides only support DTLS 1.0. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls10) { - PeerConnectionFactory::Options dtls_10_options; - dtls_10_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; - ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_10_options, - dtls_10_options)); + // Add the audio track again, expecting negotiation to succeed and frames to + // flow. + sender = caller()->pc()->AddTrack(track, {"stream"}).MoveValue(); + options.offer_to_receive_audio = 1; + caller()->SetOfferAnswerOptions(options); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudio(); + EXPECT_TRUE(ExpectNewFrames(media_expectations)); +} + +// Basic end-to-end test, but without SSRC/MSID signaling. This functionality +// is needed to support legacy endpoints. +// TODO(deadbeef): When we support the MID extension and demuxing on MID, also +// add a test for an end-to-end test without MID signaling either (basically, +// the minimum acceptable SDP). +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithoutSsrcOrMsidSignaling) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Do normal offer/answer and wait for some frames to be received in each - // direction. + // Add audio and video, testing that packets can be demuxed on payload type. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); + // Remove SSRCs and MSIDs from the received offer SDP. + callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); MediaExpectations media_expectations; @@ -3294,1453 +1003,830 @@ TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls10) { ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test getting cipher stats and UMA metrics when DTLS 1.0 is negotiated. -TEST_P(PeerConnectionIntegrationTest, Dtls10CipherStatsAndUmaMetrics) { - PeerConnectionFactory::Options dtls_10_options; - dtls_10_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; - ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_10_options, - dtls_10_options)); +// Basic end-to-end test, without SSRC signaling. This means that the track +// was created properly and frames are delivered when the MSIDs are communicated +// with a=msid lines and no a=ssrc lines. +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + EndToEndCallWithoutSsrcSignaling) { + const char kStreamId[] = "streamId"; + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Add just audio tracks. + caller()->AddTrack(caller()->CreateLocalAudioTrack(), {kStreamId}); + callee()->AddAudioTrack(); + + // Remove SSRCs from the received offer SDP. + callee()->SetReceivedSdpMunger(RemoveSsrcsAndKeepMsids); caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); - EXPECT_TRUE_WAIT(rtc::SSLStreamAdapter::IsAcceptableCipher( - caller()->OldGetStats()->DtlsCipher(), rtc::KT_DEFAULT), - kDefaultTimeout); - EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite), - caller()->OldGetStats()->SrtpCipher(), kDefaultTimeout); - // TODO(bugs.webrtc.org/9456): Fix it. - EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents( - "WebRTC.PeerConnection.SrtpCryptoSuite.Audio", - kDefaultSrtpCryptoSuite)); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudio(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test getting cipher stats and UMA metrics when DTLS 1.2 is negotiated. -TEST_P(PeerConnectionIntegrationTest, Dtls12CipherStatsAndUmaMetrics) { - PeerConnectionFactory::Options dtls_12_options; - dtls_12_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12; - ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_12_options, - dtls_12_options)); +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + EndToEndCallAddReceiveVideoToSendOnlyCall) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Add one-directional video, from caller to callee. + rtc::scoped_refptr track = + caller()->CreateLocalVideoTrack(); + + RtpTransceiverInit video_transceiver_init; + video_transceiver_init.stream_ids = {"video1"}; + video_transceiver_init.direction = RtpTransceiverDirection::kSendOnly; + auto video_sender = + caller()->pc()->AddTransceiver(track, video_transceiver_init).MoveValue(); caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); - EXPECT_TRUE_WAIT(rtc::SSLStreamAdapter::IsAcceptableCipher( - caller()->OldGetStats()->DtlsCipher(), rtc::KT_DEFAULT), - kDefaultTimeout); - EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite), - caller()->OldGetStats()->SrtpCipher(), kDefaultTimeout); - // TODO(bugs.webrtc.org/9456): Fix it. - EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents( - "WebRTC.PeerConnection.SrtpCryptoSuite.Audio", - kDefaultSrtpCryptoSuite)); -} + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); -// Test that DTLS 1.0 can be used if the caller supports DTLS 1.2 and the -// callee only supports 1.0. -TEST_P(PeerConnectionIntegrationTest, CallerDtls12ToCalleeDtls10) { - PeerConnectionFactory::Options caller_options; - caller_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12; - PeerConnectionFactory::Options callee_options; - callee_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; - ASSERT_TRUE( - CreatePeerConnectionWrappersWithOptions(caller_options, callee_options)); - ConnectFakeSignaling(); - // Do normal offer/answer and wait for some frames to be received in each - // direction. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Add receive direction. + video_sender->SetDirectionWithError(RtpTransceiverDirection::kSendRecv); + + rtc::scoped_refptr callee_track = + callee()->CreateLocalVideoTrack(); + + callee()->AddTrack(callee_track); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Ensure that video frames are received end-to-end. MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); + media_expectations.ExpectBidirectionalVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that DTLS 1.0 can be used if the caller only supports DTLS 1.0 and the -// callee supports 1.2. -TEST_P(PeerConnectionIntegrationTest, CallerDtls10ToCalleeDtls12) { - PeerConnectionFactory::Options caller_options; - caller_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; - PeerConnectionFactory::Options callee_options; - callee_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12; - ASSERT_TRUE( - CreatePeerConnectionWrappersWithOptions(caller_options, callee_options)); +// Tests that video flows between multiple video tracks when SSRCs are not +// signaled. This exercises the MID RTP header extension which is needed to +// demux the incoming video tracks. +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + EndToEndCallWithTwoVideoTracksAndNoSignaledSsrc) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Do normal offer/answer and wait for some frames to be received in each - // direction. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + caller()->AddVideoTrack(); + caller()->AddVideoTrack(); + callee()->AddVideoTrack(); + callee()->AddVideoTrack(); + + caller()->SetReceivedSdpMunger(&RemoveSsrcsAndKeepMsids); + callee()->SetReceivedSdpMunger(&RemoveSsrcsAndKeepMsids); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} + ASSERT_EQ(2u, caller()->pc()->GetReceivers().size()); + ASSERT_EQ(2u, callee()->pc()->GetReceivers().size()); -// The three tests below verify that "enable_aes128_sha1_32_crypto_cipher" -// works as expected; the cipher should only be used if enabled by both sides. -TEST_P(PeerConnectionIntegrationTest, - Aes128Sha1_32_CipherNotUsedWhenOnlyCallerSupported) { - PeerConnectionFactory::Options caller_options; - caller_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; - PeerConnectionFactory::Options callee_options; - callee_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = - false; - int expected_cipher_suite = rtc::SRTP_AES128_CM_SHA1_80; - TestNegotiatedCipherSuite(caller_options, callee_options, - expected_cipher_suite); + // Expect video to be received in both directions on both tracks. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalVideo(); + EXPECT_TRUE(ExpectNewFrames(media_expectations)); } -TEST_P(PeerConnectionIntegrationTest, - Aes128Sha1_32_CipherNotUsedWhenOnlyCalleeSupported) { - PeerConnectionFactory::Options caller_options; - caller_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = - false; - PeerConnectionFactory::Options callee_options; - callee_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; - int expected_cipher_suite = rtc::SRTP_AES128_CM_SHA1_80; - TestNegotiatedCipherSuite(caller_options, callee_options, - expected_cipher_suite); -} +// Used for the test below. +void RemoveBundleGroupSsrcsAndMidExtension(cricket::SessionDescription* desc) { + RemoveSsrcsAndKeepMsids(desc); + desc->RemoveGroupByName("BUNDLE"); + for (ContentInfo& content : desc->contents()) { + cricket::MediaContentDescription* media = content.media_description(); + cricket::RtpHeaderExtensions extensions = media->rtp_header_extensions(); + extensions.erase(std::remove_if(extensions.begin(), extensions.end(), + [](const RtpExtension& extension) { + return extension.uri == + RtpExtension::kMidUri; + }), + extensions.end()); + media->set_rtp_header_extensions(extensions); + } +} + +// Tests that video flows between multiple video tracks when BUNDLE is not used, +// SSRCs are not signaled and the MID RTP header extension is not used. This +// relies on demuxing by payload type, which normally doesn't work if you have +// multiple media sections using the same payload type, but which should work as +// long as the media sections aren't bundled. +// Regression test for: http://crbug.com/webrtc/12023 +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + EndToEndCallWithTwoVideoTracksNoBundleNoSignaledSsrcAndNoMid) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddVideoTrack(); + caller()->AddVideoTrack(); + callee()->AddVideoTrack(); + callee()->AddVideoTrack(); + caller()->SetReceivedSdpMunger(&RemoveBundleGroupSsrcsAndMidExtension); + callee()->SetReceivedSdpMunger(&RemoveBundleGroupSsrcsAndMidExtension); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_EQ(2u, caller()->pc()->GetReceivers().size()); + ASSERT_EQ(2u, callee()->pc()->GetReceivers().size()); + // Make sure we are not bundled. + ASSERT_NE(caller()->pc()->GetSenders()[0]->dtls_transport(), + caller()->pc()->GetSenders()[1]->dtls_transport()); -TEST_P(PeerConnectionIntegrationTest, Aes128Sha1_32_CipherUsedWhenSupported) { - PeerConnectionFactory::Options caller_options; - caller_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; - PeerConnectionFactory::Options callee_options; - callee_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; - int expected_cipher_suite = rtc::SRTP_AES128_CM_SHA1_32; - TestNegotiatedCipherSuite(caller_options, callee_options, - expected_cipher_suite); + // Expect video to be received in both directions on both tracks. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalVideo(); + EXPECT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that a non-GCM cipher is used if both sides only support non-GCM. -TEST_P(PeerConnectionIntegrationTest, NonGcmCipherUsedWhenGcmNotSupported) { - bool local_gcm_enabled = false; - bool remote_gcm_enabled = false; - bool aes_ctr_enabled = true; - int expected_cipher_suite = kDefaultSrtpCryptoSuite; - TestGcmNegotiationUsesCipherSuite(local_gcm_enabled, remote_gcm_enabled, - aes_ctr_enabled, expected_cipher_suite); -} +// Used for the test below. +void ModifyPayloadTypesAndRemoveMidExtension( + cricket::SessionDescription* desc) { + int pt = 96; + for (ContentInfo& content : desc->contents()) { + cricket::MediaContentDescription* media = content.media_description(); + cricket::RtpHeaderExtensions extensions = media->rtp_header_extensions(); + extensions.erase(std::remove_if(extensions.begin(), extensions.end(), + [](const RtpExtension& extension) { + return extension.uri == + RtpExtension::kMidUri; + }), + extensions.end()); + media->set_rtp_header_extensions(extensions); + cricket::VideoContentDescription* video = media->as_video(); + ASSERT_TRUE(video != nullptr); + std::vector codecs = {{pt++, "VP8"}}; + video->set_codecs(codecs); + } +} + +// Tests that two video tracks can be demultiplexed by payload type alone, by +// using different payload types for the same codec in different m= sections. +// This practice is discouraged but historically has been supported. +// Regression test for: http://crbug.com/webrtc/12029 +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + EndToEndCallWithTwoVideoTracksDemultiplexedByPayloadType) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddVideoTrack(); + caller()->AddVideoTrack(); + callee()->AddVideoTrack(); + callee()->AddVideoTrack(); + caller()->SetGeneratedSdpMunger(&ModifyPayloadTypesAndRemoveMidExtension); + callee()->SetGeneratedSdpMunger(&ModifyPayloadTypesAndRemoveMidExtension); + // We can't remove SSRCs from the generated SDP because then no send streams + // would be created. + caller()->SetReceivedSdpMunger(&RemoveSsrcsAndKeepMsids); + callee()->SetReceivedSdpMunger(&RemoveSsrcsAndKeepMsids); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + ASSERT_EQ(2u, caller()->pc()->GetReceivers().size()); + ASSERT_EQ(2u, callee()->pc()->GetReceivers().size()); + // Make sure we are bundled. + ASSERT_EQ(caller()->pc()->GetSenders()[0]->dtls_transport(), + caller()->pc()->GetSenders()[1]->dtls_transport()); -// Test that a GCM cipher is used if both ends support it and non-GCM is -// disabled. -TEST_P(PeerConnectionIntegrationTest, GcmCipherUsedWhenOnlyGcmSupported) { - bool local_gcm_enabled = true; - bool remote_gcm_enabled = true; - bool aes_ctr_enabled = false; - int expected_cipher_suite = kDefaultSrtpCryptoSuiteGcm; - TestGcmNegotiationUsesCipherSuite(local_gcm_enabled, remote_gcm_enabled, - aes_ctr_enabled, expected_cipher_suite); + // Expect video to be received in both directions on both tracks. + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalVideo(); + EXPECT_TRUE(ExpectNewFrames(media_expectations)); } -// Verify that media can be transmitted end-to-end when GCM crypto suites are -// enabled. Note that the above tests, such as GcmCipherUsedWhenGcmSupported, -// only verify that a GCM cipher is negotiated, and not necessarily that SRTP -// works with it. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithGcmCipher) { - PeerConnectionFactory::Options gcm_options; - gcm_options.crypto_options.srtp.enable_gcm_crypto_suites = true; - gcm_options.crypto_options.srtp.enable_aes128_sha1_80_crypto_cipher = false; - ASSERT_TRUE( - CreatePeerConnectionWrappersWithOptions(gcm_options, gcm_options)); +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, NoStreamsMsidLinePresent) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Do normal offer/answer and wait for some frames to be received in each - // direction. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + caller()->AddAudioTrack(); + caller()->AddVideoTrack(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); + auto callee_receivers = callee()->pc()->GetReceivers(); + ASSERT_EQ(2u, callee_receivers.size()); + EXPECT_TRUE(callee_receivers[0]->stream_ids().empty()); + EXPECT_TRUE(callee_receivers[1]->stream_ids().empty()); +} + +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, NoStreamsMsidLineMissing) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddAudioTrack(); + caller()->AddVideoTrack(); + callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto callee_receivers = callee()->pc()->GetReceivers(); + ASSERT_EQ(2u, callee_receivers.size()); + ASSERT_EQ(1u, callee_receivers[0]->stream_ids().size()); + ASSERT_EQ(1u, callee_receivers[1]->stream_ids().size()); + EXPECT_EQ(callee_receivers[0]->stream_ids()[0], + callee_receivers[1]->stream_ids()[0]); + EXPECT_EQ(callee_receivers[0]->streams()[0], + callee_receivers[1]->streams()[0]); } -// This test sets up a call between two parties with audio, video and an RTP -// data channel. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithRtpDataChannel) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(rtc_config, rtc_config)); +// Test that if two video tracks are sent (from caller to callee, in this test), +// they're transmitted correctly end-to-end. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithTwoVideoTracks) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Expect that data channel created on caller side will show up for callee as - // well. - caller()->CreateDataChannel(); + // Add one audio/video stream, and one video-only stream. caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + caller()->AddVideoTrack(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Ensure the existence of the RTP data channel didn't impede audio/video. + ASSERT_EQ(3u, callee()->pc()->GetReceivers().size()); + MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); + media_expectations.CalleeExpectsSomeAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_NE(nullptr, callee()->data_channel()); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - SendRtpDataWithRetries(caller()->data_channel(), data, 5); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - SendRtpDataWithRetries(callee()->data_channel(), data, 5); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); } -TEST_P(PeerConnectionIntegrationTest, RtpDataChannelWorksAfterRollback) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(rtc_config, rtc_config)); - ConnectFakeSignaling(); - auto data_channel = caller()->pc()->CreateDataChannel("label_1", nullptr); - ASSERT_TRUE(data_channel.get() != nullptr); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - caller()->CreateDataChannel("label_2", nullptr); - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); - caller()->pc()->SetLocalDescription(observer, - caller()->CreateOfferAndWait().release()); - EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); - caller()->Rollback(); - - std::string data = "hello world"; - SendRtpDataWithRetries(data_channel, data, 5); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); +static void MakeSpecCompliantMaxBundleOffer(cricket::SessionDescription* desc) { + bool first = true; + for (cricket::ContentInfo& content : desc->contents()) { + if (first) { + first = false; + continue; + } + content.bundle_only = true; + } + first = true; + for (cricket::TransportInfo& transport : desc->transport_infos()) { + if (first) { + first = false; + continue; + } + transport.description.ice_ufrag.clear(); + transport.description.ice_pwd.clear(); + transport.description.connection_role = cricket::CONNECTIONROLE_NONE; + transport.description.identity_fingerprint.reset(nullptr); + } } -// Ensure that an RTP data channel is signaled as closed for the caller when -// the callee rejects it in a subsequent offer. +// Test that if applying a true "max bundle" offer, which uses ports of 0, +// "a=bundle-only", omitting "a=fingerprint", "a=setup", "a=ice-ufrag" and +// "a=ice-pwd" for all but the audio "m=" section, negotiation still completes +// successfully and media flows. +// TODO(deadbeef): Update this test to also omit "a=rtcp-mux", once that works. +// TODO(deadbeef): Won't need this test once we start generating actual +// standards-compliant SDP. TEST_P(PeerConnectionIntegrationTest, - RtpDataChannelSignaledClosedInCalleeOffer) { - // Same procedure as above test. - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(rtc_config, rtc_config)); + EndToEndCallWithSpecCompliantMaxBundleOffer) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->CreateDataChannel(); caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); + // Do the equivalent of setting the port to 0, adding a=bundle-only, and + // removing a=ice-ufrag, a=ice-pwd, a=fingerprint and a=setup from all + // but the first m= section. + callee()->SetReceivedSdpMunger(MakeSpecCompliantMaxBundleOffer); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_NE(nullptr, callee()->data_channel()); - ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Close the data channel on the callee, and do an updated offer/answer. - callee()->data_channel()->Close(); - callee()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - EXPECT_FALSE(caller()->data_observer()->IsOpen()); - EXPECT_FALSE(callee()->data_observer()->IsOpen()); + MediaExpectations media_expectations; + media_expectations.ExpectBidirectionalAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Tests that data is buffered in an RTP data channel until an observer is -// registered for it. -// -// NOTE: RTP data channels can receive data before the underlying -// transport has detected that a channel is writable and thus data can be -// received before the data channel state changes to open. That is hard to test -// but the same buffering is expected to be used in that case. -// -// Use fake clock and simulated network delay so that we predictably can wait -// until an SCTP message has been delivered without "sleep()"ing. -TEST_P(PeerConnectionIntegrationTestWithFakeClock, - DataBufferedUntilRtpDataChannelObserverRegistered) { - virtual_socket_server()->set_delay_mean(5); // 5 ms per hop. - virtual_socket_server()->UpdateDelayDistribution(); - - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(rtc_config, rtc_config)); +// Test that we can receive the audio output level from a remote audio track. +// TODO(deadbeef): Use a fake audio source and verify that the output level is +// exactly what the source on the other side was configured with. +TEST_P(PeerConnectionIntegrationTest, GetAudioOutputLevelStatsWithOldStatsApi) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->CreateDataChannel(); + // Just add an audio track. + caller()->AddAudioTrack(); caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE(caller()->data_channel() != nullptr); - ASSERT_TRUE_SIMULATED_WAIT(callee()->data_channel() != nullptr, - kDefaultTimeout, FakeClock()); - ASSERT_TRUE_SIMULATED_WAIT(caller()->data_observer()->IsOpen(), - kDefaultTimeout, FakeClock()); - ASSERT_EQ_SIMULATED_WAIT(DataChannelInterface::kOpen, - callee()->data_channel()->state(), kDefaultTimeout, - FakeClock()); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Unregister the observer which is normally automatically registered. - callee()->data_channel()->UnregisterObserver(); - // Send data and advance fake clock until it should have been received. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - SIMULATED_WAIT(false, 50, FakeClock()); - - // Attach data channel and expect data to be received immediately. Note that - // EXPECT_EQ_WAIT is used, such that the simulated clock is not advanced any - // further, but data can be received even if the callback is asynchronous. - MockDataChannelObserver new_observer(callee()->data_channel()); - EXPECT_EQ_SIMULATED_WAIT(data, new_observer.last_message(), kDefaultTimeout, - FakeClock()); + // Get the audio output level stats. Note that the level is not available + // until an RTCP packet has been received. + EXPECT_TRUE_WAIT(callee()->OldGetStats()->AudioOutputLevel() > 0, + kMaxWaitForFramesMs); } -// This test sets up a call between two parties with audio, video and but only -// the caller client supports RTP data channels. -TEST_P(PeerConnectionIntegrationTest, RtpDataChannelsRejectedByCallee) { - PeerConnectionInterface::RTCConfiguration rtc_config_1; - rtc_config_1.enable_rtp_data_channel = true; - // Must disable DTLS to make negotiation succeed. - rtc_config_1.enable_dtls_srtp = false; - PeerConnectionInterface::RTCConfiguration rtc_config_2; - rtc_config_2.enable_dtls_srtp = false; - rtc_config_2.enable_dtls_srtp = false; - ASSERT_TRUE( - CreatePeerConnectionWrappersWithConfig(rtc_config_1, rtc_config_2)); +// Test that an audio input level is reported. +// TODO(deadbeef): Use a fake audio source and verify that the input level is +// exactly what the source was configured with. +TEST_P(PeerConnectionIntegrationTest, GetAudioInputLevelStatsWithOldStatsApi) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->CreateDataChannel(); - ASSERT_TRUE(caller()->data_channel() != nullptr); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Just add an audio track. + caller()->AddAudioTrack(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // The caller should still have a data channel, but it should be closed, and - // one should ever have been created for the callee. - EXPECT_TRUE(caller()->data_channel() != nullptr); - EXPECT_FALSE(caller()->data_observer()->IsOpen()); - EXPECT_EQ(nullptr, callee()->data_channel()); -} -// This test sets up a call between two parties with audio, and video. When -// audio and video is setup and flowing, an RTP data channel is negotiated. -TEST_P(PeerConnectionIntegrationTest, AddRtpDataChannelInSubsequentOffer) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(rtc_config, rtc_config)); - ConnectFakeSignaling(); - // Do initial offer/answer with audio/video. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Create data channel and do new offer and answer. - caller()->CreateDataChannel(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_NE(nullptr, callee()->data_channel()); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - // Ensure data can be sent in both directions. - std::string data = "hello world"; - SendRtpDataWithRetries(caller()->data_channel(), data, 5); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - SendRtpDataWithRetries(callee()->data_channel(), data, 5); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); + // Get the audio input level stats. The level should be available very + // soon after the test starts. + EXPECT_TRUE_WAIT(caller()->OldGetStats()->AudioInputLevel() > 0, + kMaxWaitForStatsMs); } -#ifdef HAVE_SCTP - -// This test sets up a call between two parties with audio, video and an SCTP -// data channel. -TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithSctpDataChannel) { +// Test that we can get incoming byte counts from both audio and video tracks. +TEST_P(PeerConnectionIntegrationTest, GetBytesReceivedStatsWithOldStatsApi) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Expect that data channel created on caller side will show up for callee as - // well. - caller()->CreateDataChannel(); caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Do offer/answer, wait for the callee to receive some frames. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Ensure the existence of the SCTP data channel didn't impede audio/video. + MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); + media_expectations.CalleeExpectsSomeAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); - // Caller data channel should already exist (it created one). Callee data - // channel may not exist yet, since negotiation happens in-band, not in SDP. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); + + // Get a handle to the remote tracks created, so they can be used as GetStats + // filters. + for (const auto& receiver : callee()->pc()->GetReceivers()) { + // We received frames, so we definitely should have nonzero "received bytes" + // stats at this point. + EXPECT_GT(callee()->OldGetStatsForTrack(receiver->track())->BytesReceived(), + 0); + } } -// Ensure that when the callee closes an SCTP data channel, the closing -// procedure results in the data channel being closed for the caller as well. -TEST_P(PeerConnectionIntegrationTest, CalleeClosesSctpDataChannel) { - // Same procedure as above test. +// Test that we can get outgoing byte counts from both audio and video tracks. +TEST_P(PeerConnectionIntegrationTest, GetBytesSentStatsWithOldStatsApi) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + auto audio_track = caller()->CreateLocalAudioTrack(); + auto video_track = caller()->CreateLocalVideoTrack(); + caller()->AddTrack(audio_track); + caller()->AddTrack(video_track); + // Do offer/answer, wait for the callee to receive some frames. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Close the data channel on the callee side, and wait for it to reach the - // "closed" state on both sides. - callee()->data_channel()->Close(); - EXPECT_TRUE_WAIT(!caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout); + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudioAndVideo(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + + // The callee received frames, so we definitely should have nonzero "sent + // bytes" stats at this point. + EXPECT_GT(caller()->OldGetStatsForTrack(audio_track)->BytesSent(), 0); + EXPECT_GT(caller()->OldGetStatsForTrack(video_track)->BytesSent(), 0); } -TEST_P(PeerConnectionIntegrationTest, SctpDataChannelConfigSentToOtherSide) { +// Test that we can get capture start ntp time. +TEST_P(PeerConnectionIntegrationTest, GetCaptureStartNtpTimeWithOldStatsApi) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - webrtc::DataChannelInit init; - init.id = 53; - init.maxRetransmits = 52; - caller()->CreateDataChannel("data-channel", &init); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + caller()->AddAudioTrack(); + + callee()->AddAudioTrack(); + + // Do offer/answer, wait for the callee to receive some frames. caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - // Since "negotiated" is false, the "id" parameter should be ignored. - EXPECT_NE(init.id, callee()->data_channel()->id()); - EXPECT_EQ("data-channel", callee()->data_channel()->label()); - EXPECT_EQ(init.maxRetransmits, callee()->data_channel()->maxRetransmits()); - EXPECT_FALSE(callee()->data_channel()->negotiated()); + + // Get the remote audio track created on the receiver, so they can be used as + // GetStats filters. + auto receivers = callee()->pc()->GetReceivers(); + ASSERT_EQ(1u, receivers.size()); + auto remote_audio_track = receivers[0]->track(); + + // Get the audio output level stats. Note that the level is not available + // until an RTCP packet has been received. + EXPECT_TRUE_WAIT( + callee()->OldGetStatsForTrack(remote_audio_track)->CaptureStartNtpTime() > + 0, + 2 * kMaxWaitForFramesMs); } -// Test usrsctp's ability to process unordered data stream, where data actually -// arrives out of order using simulated delays. Previously there have been some -// bugs in this area. -TEST_P(PeerConnectionIntegrationTest, StressTestUnorderedSctpDataChannel) { - // Introduce random network delays. - // Otherwise it's not a true "unordered" test. - virtual_socket_server()->set_delay_mean(20); - virtual_socket_server()->set_delay_stddev(5); - virtual_socket_server()->UpdateDelayDistribution(); - // Normal procedure, but with unordered data channel config. +// Test that the track ID is associated with all local and remote SSRC stats +// using the old GetStats() and more than 1 audio and more than 1 video track. +// This is a regression test for crbug.com/906988 +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + OldGetStatsAssociatesTrackIdForManyMediaSections) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - webrtc::DataChannelInit init; - init.ordered = false; - caller()->CreateDataChannel(&init); + auto audio_sender_1 = caller()->AddAudioTrack(); + auto video_sender_1 = caller()->AddVideoTrack(); + auto audio_sender_2 = caller()->AddAudioTrack(); + auto video_sender_2 = caller()->AddVideoTrack(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - static constexpr int kNumMessages = 100; - // Deliberately chosen to be larger than the MTU so messages get fragmented. - static constexpr size_t kMaxMessageSize = 4096; - // Create and send random messages. - std::vector sent_messages; - for (int i = 0; i < kNumMessages; ++i) { - size_t length = - (rand() % kMaxMessageSize) + 1; // NOLINT (rand_r instead of rand) - std::string message; - ASSERT_TRUE(rtc::CreateRandomString(length, &message)); - caller()->data_channel()->Send(DataBuffer(message)); - callee()->data_channel()->Send(DataBuffer(message)); - sent_messages.push_back(message); - } - // Wait for all messages to be received. - EXPECT_EQ_WAIT(rtc::checked_cast(kNumMessages), - caller()->data_observer()->received_message_count(), - kDefaultTimeout); - EXPECT_EQ_WAIT(rtc::checked_cast(kNumMessages), - callee()->data_observer()->received_message_count(), - kDefaultTimeout); - - // Sort and compare to make sure none of the messages were corrupted. - std::vector caller_received_messages = - caller()->data_observer()->messages(); - std::vector callee_received_messages = - callee()->data_observer()->messages(); - absl::c_sort(sent_messages); - absl::c_sort(caller_received_messages); - absl::c_sort(callee_received_messages); - EXPECT_EQ(sent_messages, caller_received_messages); - EXPECT_EQ(sent_messages, callee_received_messages); + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudioAndVideo(); + ASSERT_TRUE_WAIT(ExpectNewFrames(media_expectations), kDefaultTimeout); + + std::vector track_ids = { + audio_sender_1->track()->id(), video_sender_1->track()->id(), + audio_sender_2->track()->id(), video_sender_2->track()->id()}; + + auto caller_stats = caller()->OldGetStats(); + EXPECT_THAT(caller_stats->TrackIds(), UnorderedElementsAreArray(track_ids)); + auto callee_stats = callee()->OldGetStats(); + EXPECT_THAT(callee_stats->TrackIds(), UnorderedElementsAreArray(track_ids)); } -// This test sets up a call between two parties with audio, and video. When -// audio and video are setup and flowing, an SCTP data channel is negotiated. -TEST_P(PeerConnectionIntegrationTest, AddSctpDataChannelInSubsequentOffer) { +// Test that the new GetStats() returns stats for all outgoing/incoming streams +// with the correct track IDs if there are more than one audio and more than one +// video senders/receivers. +TEST_P(PeerConnectionIntegrationTest, NewGetStatsManyAudioAndManyVideoStreams) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Do initial offer/answer with audio/video. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Create data channel and do new offer and answer. - caller()->CreateDataChannel(); + auto audio_sender_1 = caller()->AddAudioTrack(); + auto video_sender_1 = caller()->AddVideoTrack(); + auto audio_sender_2 = caller()->AddAudioTrack(); + auto video_sender_2 = caller()->AddVideoTrack(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Caller data channel should already exist (it created one). Callee data - // channel may not exist yet, since negotiation happens in-band, not in SDP. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); + + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudioAndVideo(); + ASSERT_TRUE_WAIT(ExpectNewFrames(media_expectations), kDefaultTimeout); + + std::vector track_ids = { + audio_sender_1->track()->id(), video_sender_1->track()->id(), + audio_sender_2->track()->id(), video_sender_2->track()->id()}; + + rtc::scoped_refptr caller_report = + caller()->NewGetStats(); + ASSERT_TRUE(caller_report); + auto outbound_stream_stats = + caller_report->GetStatsOfType(); + ASSERT_EQ(outbound_stream_stats.size(), 4u); + std::vector outbound_track_ids; + for (const auto& stat : outbound_stream_stats) { + ASSERT_TRUE(stat->bytes_sent.is_defined()); + EXPECT_LT(0u, *stat->bytes_sent); + if (*stat->kind == "video") { + ASSERT_TRUE(stat->key_frames_encoded.is_defined()); + EXPECT_GT(*stat->key_frames_encoded, 0u); + ASSERT_TRUE(stat->frames_encoded.is_defined()); + EXPECT_GE(*stat->frames_encoded, *stat->key_frames_encoded); + } + ASSERT_TRUE(stat->track_id.is_defined()); + const auto* track_stat = + caller_report->GetAs(*stat->track_id); + ASSERT_TRUE(track_stat); + outbound_track_ids.push_back(*track_stat->track_identifier); + } + EXPECT_THAT(outbound_track_ids, UnorderedElementsAreArray(track_ids)); + + rtc::scoped_refptr callee_report = + callee()->NewGetStats(); + ASSERT_TRUE(callee_report); + auto inbound_stream_stats = + callee_report->GetStatsOfType(); + ASSERT_EQ(4u, inbound_stream_stats.size()); + std::vector inbound_track_ids; + for (const auto& stat : inbound_stream_stats) { + ASSERT_TRUE(stat->bytes_received.is_defined()); + EXPECT_LT(0u, *stat->bytes_received); + if (*stat->kind == "video") { + ASSERT_TRUE(stat->key_frames_decoded.is_defined()); + EXPECT_GT(*stat->key_frames_decoded, 0u); + ASSERT_TRUE(stat->frames_decoded.is_defined()); + EXPECT_GE(*stat->frames_decoded, *stat->key_frames_decoded); + } + ASSERT_TRUE(stat->track_id.is_defined()); + const auto* track_stat = + callee_report->GetAs(*stat->track_id); + ASSERT_TRUE(track_stat); + inbound_track_ids.push_back(*track_stat->track_identifier); + } + EXPECT_THAT(inbound_track_ids, UnorderedElementsAreArray(track_ids)); } -// Set up a connection initially just using SCTP data channels, later upgrading -// to audio/video, ensuring frames are received end-to-end. Effectively the -// inverse of the test above. -// This was broken in M57; see https://crbug.com/711243 -TEST_P(PeerConnectionIntegrationTest, SctpDataChannelToAudioVideoUpgrade) { +// Test that we can get stats (using the new stats implementation) for +// unsignaled streams. Meaning when SSRCs/MSIDs aren't signaled explicitly in +// SDP. +TEST_P(PeerConnectionIntegrationTest, + GetStatsForUnsignaledStreamWithNewStatsApi) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - // Do initial offer/answer with just data channel. - caller()->CreateDataChannel(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - // Wait until data can be sent over the data channel. - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Do subsequent offer/answer with two-way audio and video. Audio and video - // should end up bundled on the DTLS/ICE transport already used for data. - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + caller()->AddAudioTrack(); + // Remove SSRCs and MSIDs from the received offer SDP. + callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); + media_expectations.CalleeExpectsSomeAudio(1); ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} -static void MakeSpecCompliantSctpOffer(cricket::SessionDescription* desc) { - cricket::SctpDataContentDescription* dcd_offer = - GetFirstSctpDataContentDescription(desc); - // See https://crbug.com/webrtc/11211 - this function is a no-op - ASSERT_TRUE(dcd_offer); - dcd_offer->set_use_sctpmap(false); - dcd_offer->set_protocol("UDP/DTLS/SCTP"); + // We received a frame, so we should have nonzero "bytes received" stats for + // the unsignaled stream, if stats are working for it. + rtc::scoped_refptr report = + callee()->NewGetStats(); + ASSERT_NE(nullptr, report); + auto inbound_stream_stats = + report->GetStatsOfType(); + ASSERT_EQ(1U, inbound_stream_stats.size()); + ASSERT_TRUE(inbound_stream_stats[0]->bytes_received.is_defined()); + ASSERT_GT(*inbound_stream_stats[0]->bytes_received, 0U); + ASSERT_TRUE(inbound_stream_stats[0]->track_id.is_defined()); } -// Test that the data channel works when a spec-compliant SCTP m= section is -// offered (using "a=sctp-port" instead of "a=sctpmap", and using -// "UDP/DTLS/SCTP" as the protocol). +// Same as above but for the legacy stats implementation. TEST_P(PeerConnectionIntegrationTest, - DataChannelWorksWhenSpecCompliantSctpOfferReceived) { + GetStatsForUnsignaledStreamWithOldStatsApi) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - caller()->CreateDataChannel(); - caller()->SetGeneratedSdpMunger(MakeSpecCompliantSctpOffer); + caller()->AddAudioTrack(); + // Remove SSRCs and MSIDs from the received offer SDP. + callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); + + // Note that, since the old stats implementation associates SSRCs with tracks + // using SDP, when SSRCs aren't signaled in SDP these stats won't have an + // associated track ID. So we can't use the track "selector" argument. + // + // Also, we use "EXPECT_TRUE_WAIT" because the stats collector may decide to + // return cached stats if not enough time has passed since the last update. + EXPECT_TRUE_WAIT(callee()->OldGetStats()->BytesReceived() > 0, + kDefaultTimeout); } -// Tests that the datagram transport to SCTP fallback works correctly when -// datagram transport negotiation fails. +// Test that we can successfully get the media related stats (audio level +// etc.) for the unsignaled stream. TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelFallbackToSctp) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - - // Configure one endpoint to use datagram transport for data channels while - // the other does not. - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, RTCConfiguration(), - loopback_media_transports()->first_factory(), nullptr)); + GetMediaStatsForUnsignaledStreamWithNewStatsApi) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - - // The caller offers a data channel using either datagram transport or SCTP. - caller()->CreateDataChannel(); caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Remove SSRCs and MSIDs from the received offer SDP. + callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Negotiation should fallback to SCTP, allowing the data channel to be - // established. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Ensure that failure of the datagram negotiation doesn't impede media flow. MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); + media_expectations.CalleeExpectsSomeAudio(1); + media_expectations.CalleeExpectsSomeVideo(1); ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} -// Tests that the data channel transport works correctly when datagram transport -// negotiation succeeds and does not fall back to SCTP. -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelDoesNotFallbackToSctp) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - - // Configure one endpoint to use datagram transport for data channels while - // the other does not. - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); + rtc::scoped_refptr report = + callee()->NewGetStats(); + ASSERT_NE(nullptr, report); - // The caller offers a data channel using either datagram transport or SCTP. - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto media_stats = report->GetStatsOfType(); + auto audio_index = FindFirstMediaStatsIndexByKind("audio", media_stats); + ASSERT_GE(audio_index, 0); + EXPECT_TRUE(media_stats[audio_index]->audio_level.is_defined()); +} - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Negotiation should succeed, allowing the data channel to be established. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Ensure that failure of the datagram negotiation doesn't impede media flow. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); +// Helper for test below. +void ModifySsrcs(cricket::SessionDescription* desc) { + for (ContentInfo& content : desc->contents()) { + for (StreamParams& stream : + content.media_description()->mutable_streams()) { + for (uint32_t& ssrc : stream.ssrcs) { + ssrc = rtc::CreateRandomId(); + } + } + } } -// Tests that the datagram transport to SCTP fallback works correctly when -// datagram transports do not advertise compatible transport parameters. +// Test that the "RTCMediaSteamTrackStats" object is updated correctly when +// SSRCs are unsignaled, and the SSRC of the received (audio) stream changes. +// This should result in two "RTCInboundRTPStreamStats", but only one +// "RTCMediaStreamTrackStats", whose counters go up continuously rather than +// being reset to 0 once the SSRC change occurs. +// +// Regression test for this bug: +// https://bugs.chromium.org/p/webrtc/issues/detail?id=8158 +// +// The bug causes the track stats to only represent one of the two streams: +// whichever one has the higher SSRC. So with this bug, there was a 50% chance +// that the track stat counters would reset to 0 when the new stream is +// received, and a 50% chance that they'll stop updating (while +// "concealed_samples" continues increasing, due to silence being generated for +// the inactive stream). TEST_P(PeerConnectionIntegrationTest, - DatagramTransportIncompatibleParametersFallsBackToSctp) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - - // By default, only equal parameters are compatible. - loopback_media_transports()->SetFirstDatagramTransportParameters("foo"); - loopback_media_transports()->SetSecondDatagramTransportParameters("bar"); - - // Configure one endpoint to use datagram transport for data channels while - // the other does not. - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); + TrackStatsUpdatedCorrectlyWhenUnsignaledSsrcChanges) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); - - // The caller offers a data channel using either datagram transport or SCTP. - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + caller()->AddAudioTrack(); + // Remove SSRCs and MSIDs from the received offer SDP, simulating an endpoint + // that doesn't signal SSRCs (from the callee's perspective). + callee()->SetReceivedSdpMunger(RemoveSsrcsAndMsids); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Wait for 50 audio frames (500ms of audio) to be received by the callee. + { + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudio(50); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + } + // Some audio frames were received, so we should have nonzero "samples + // received" for the track. + rtc::scoped_refptr report = + callee()->NewGetStats(); + ASSERT_NE(nullptr, report); + auto track_stats = report->GetStatsOfType(); + ASSERT_EQ(1U, track_stats.size()); + ASSERT_TRUE(track_stats[0]->total_samples_received.is_defined()); + ASSERT_GT(*track_stats[0]->total_samples_received, 0U); + // uint64_t prev_samples_received = *track_stats[0]->total_samples_received; - // Negotiation should fallback to SCTP, allowing the data channel to be - // established. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use SCTP for data channels. - EXPECT_NE(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_NE(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Ensure that failure of the datagram negotiation doesn't impede media flow. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Tests that the datagram transport to SCTP fallback works correctly when -// only the answerer believes datagram transport parameters are incompatible. -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportIncompatibleParametersOnAnswererFallsBackToSctp) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - - // By default, only equal parameters are compatible. - loopback_media_transports()->SetFirstDatagramTransportParameters("foo"); - loopback_media_transports()->SetSecondDatagramTransportParameters("bar"); - - // Set the offerer to accept different parameters, while the answerer rejects - // them. - loopback_media_transports()->SetFirstDatagramTransportParametersComparison( - [](absl::string_view a, absl::string_view b) { return true; }); - loopback_media_transports()->SetSecondDatagramTransportParametersComparison( - [](absl::string_view a, absl::string_view b) { return false; }); - - // Configure one endpoint to use datagram transport for data channels while - // the other does not. - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - // The caller offers a data channel using either datagram transport or SCTP. - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + // Create a new offer and munge it to cause the caller to use a new SSRC. + caller()->SetGeneratedSdpMunger(ModifySsrcs); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Wait for 25 more audio frames (250ms of audio) to be received, from the new + // SSRC. + { + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudio(25); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + } - // Negotiation should fallback to SCTP, allowing the data channel to be - // established. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use SCTP for data channels. - EXPECT_NE(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_NE(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Ensure that failure of the datagram negotiation doesn't impede media flow. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); -} - -// Tests that the data channel transport works correctly when datagram -// transports provide different, but compatible, transport parameters. -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportCompatibleParametersDoNotFallbackToSctp) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - - // By default, only equal parameters are compatible. - loopback_media_transports()->SetFirstDatagramTransportParameters("foo"); - loopback_media_transports()->SetSecondDatagramTransportParameters("bar"); - - // Change the comparison used to treat these transport parameters are - // compatible (on both sides). - loopback_media_transports()->SetFirstDatagramTransportParametersComparison( - [](absl::string_view a, absl::string_view b) { return true; }); - loopback_media_transports()->SetSecondDatagramTransportParametersComparison( - [](absl::string_view a, absl::string_view b) { return true; }); - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); + report = callee()->NewGetStats(); + ASSERT_NE(nullptr, report); + track_stats = report->GetStatsOfType(); + ASSERT_EQ(1U, track_stats.size()); + ASSERT_TRUE(track_stats[0]->total_samples_received.is_defined()); + // The "total samples received" stat should only be greater than it was + // before. + // TODO(deadbeef): Uncomment this assertion once the bug is completely fixed. + // Right now, the new SSRC will cause the counters to reset to 0. + // EXPECT_GT(*track_stats[0]->total_samples_received, prev_samples_received); - // The caller offers a data channel using either datagram transport or SCTP. - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Additionally, the percentage of concealed samples (samples generated to + // conceal packet loss) should be less than 50%. If it's greater, that's a + // good sign that we're seeing stats from the old stream that's no longer + // receiving packets, and is generating concealed samples of silence. + constexpr double kAcceptableConcealedSamplesPercentage = 0.50; + ASSERT_TRUE(track_stats[0]->concealed_samples.is_defined()); + EXPECT_LT(*track_stats[0]->concealed_samples, + *track_stats[0]->total_samples_received * + kAcceptableConcealedSamplesPercentage); - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Negotiation should succeed, allowing the data channel to be established. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use datagram transport for data channels. - EXPECT_EQ(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_EQ(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Ensure that failure of the datagram negotiation doesn't impede media flow. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); + // Also ensure that we have two "RTCInboundRTPStreamStats" as expected, as a + // sanity check that the SSRC really changed. + // TODO(deadbeef): This isn't working right now, because we're not returning + // *any* stats for the inactive stream. Uncomment when the bug is completely + // fixed. + // auto inbound_stream_stats = + // report->GetStatsOfType(); + // ASSERT_EQ(2U, inbound_stream_stats.size()); } -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelWithMediaOnCaller) { - // Configure the caller to attempt use of datagram transport for media and - // data channels. - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport_for_data_channels = true; - offerer_config.use_datagram_transport = true; - - // Configure the callee to only use datagram transport for data channels. - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - answerer_config.use_datagram_transport_for_data_channels = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); +// Test that DTLS 1.0 is used if both sides only support DTLS 1.0. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithDtls10) { + PeerConnectionFactory::Options dtls_10_options; + dtls_10_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; + ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_10_options, + dtls_10_options)); ConnectFakeSignaling(); - - // Offer both media and data. + // Do normal offer/answer and wait for some frames to be received in each + // direction. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); - caller()->CreateDataChannel(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use datagram transport for data channels. - EXPECT_EQ(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_EQ(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Media flow should not be impacted. MediaExpectations media_expectations; media_expectations.ExpectBidirectionalAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportMediaWithDataChannelOnCaller) { - // Configure the caller to attempt use of datagram transport for media and - // data channels. - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport_for_data_channels = true; - offerer_config.use_datagram_transport = true; - - // Configure the callee to only use datagram transport for media. - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - answerer_config.use_datagram_transport = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); +// Test getting cipher stats and UMA metrics when DTLS 1.0 is negotiated. +TEST_P(PeerConnectionIntegrationTest, Dtls10CipherStatsAndUmaMetrics) { + PeerConnectionFactory::Options dtls_10_options; + dtls_10_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; + ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_10_options, + dtls_10_options)); ConnectFakeSignaling(); - - // Offer both media and data. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); - caller()->CreateDataChannel(); caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use SCTP for data channels. - EXPECT_NE(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_NE(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Media flow should not be impacted. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); + ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); + EXPECT_TRUE_WAIT(rtc::SSLStreamAdapter::IsAcceptableCipher( + caller()->OldGetStats()->DtlsCipher(), rtc::KT_DEFAULT), + kDefaultTimeout); + EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite), + caller()->OldGetStats()->SrtpCipher(), kDefaultTimeout); + // TODO(bugs.webrtc.org/9456): Fix it. + EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents( + "WebRTC.PeerConnection.SrtpCryptoSuite.Audio", + kDefaultSrtpCryptoSuite)); } -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelWithMediaOnCallee) { - // Configure the caller to attempt use of datagram transport for data - // channels. - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport_for_data_channels = true; - - // Configure the callee to use datagram transport for data channels and media. - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - answerer_config.use_datagram_transport_for_data_channels = true; - answerer_config.use_datagram_transport = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); +// Test getting cipher stats and UMA metrics when DTLS 1.2 is negotiated. +TEST_P(PeerConnectionIntegrationTest, Dtls12CipherStatsAndUmaMetrics) { + PeerConnectionFactory::Options dtls_12_options; + dtls_12_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12; + ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(dtls_12_options, + dtls_12_options)); ConnectFakeSignaling(); - - // Offer both media and data. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); - caller()->CreateDataChannel(); caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use datagram transport for data channels. - EXPECT_EQ(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_EQ(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Media flow should not be impacted. - MediaExpectations media_expectations; - media_expectations.ExpectBidirectionalAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); + ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); + EXPECT_TRUE_WAIT(rtc::SSLStreamAdapter::IsAcceptableCipher( + caller()->OldGetStats()->DtlsCipher(), rtc::KT_DEFAULT), + kDefaultTimeout); + EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(kDefaultSrtpCryptoSuite), + caller()->OldGetStats()->SrtpCipher(), kDefaultTimeout); + // TODO(bugs.webrtc.org/9456): Fix it. + EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents( + "WebRTC.PeerConnection.SrtpCryptoSuite.Audio", + kDefaultSrtpCryptoSuite)); } -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportMediaWithDataChannelOnCallee) { - // Configure the caller to attempt use of datagram transport for media. - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport = true; - - // Configure the callee to only use datagram transport for media and data - // channels. - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - answerer_config.use_datagram_transport = true; - answerer_config.use_datagram_transport_for_data_channels = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); +// Test that DTLS 1.0 can be used if the caller supports DTLS 1.2 and the +// callee only supports 1.0. +TEST_P(PeerConnectionIntegrationTest, CallerDtls12ToCalleeDtls10) { + PeerConnectionFactory::Options caller_options; + caller_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12; + PeerConnectionFactory::Options callee_options; + callee_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; + ASSERT_TRUE( + CreatePeerConnectionWrappersWithOptions(caller_options, callee_options)); ConnectFakeSignaling(); - - // Offer both media and data. + // Do normal offer/answer and wait for some frames to be received in each + // direction. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); - caller()->CreateDataChannel(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use SCTP for data channels. - EXPECT_NE(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_NE(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Media flow should not be impacted. MediaExpectations media_expectations; media_expectations.ExpectBidirectionalAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -TEST_P(PeerConnectionIntegrationTest, DatagramTransportDataChannelAndMedia) { - // Configure the caller to use datagram transport for data channels and media. - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport_for_data_channels = true; - offerer_config.use_datagram_transport = true; - - // Configure the callee to use datagram transport for data channels and media. - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - answerer_config.use_datagram_transport_for_data_channels = true; - answerer_config.use_datagram_transport = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); +// Test that DTLS 1.0 can be used if the caller only supports DTLS 1.0 and the +// callee supports 1.2. +TEST_P(PeerConnectionIntegrationTest, CallerDtls10ToCalleeDtls12) { + PeerConnectionFactory::Options caller_options; + caller_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_10; + PeerConnectionFactory::Options callee_options; + callee_options.ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12; + ASSERT_TRUE( + CreatePeerConnectionWrappersWithOptions(caller_options, callee_options)); ConnectFakeSignaling(); - - // Offer both media and data. + // Do normal offer/answer and wait for some frames to be received in each + // direction. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); - caller()->CreateDataChannel(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Both endpoints should agree to use datagram transport for data channels. - EXPECT_EQ(nullptr, caller()->pc()->GetSctpTransport()); - EXPECT_EQ(nullptr, callee()->pc()->GetSctpTransport()); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); - - // Media flow should not be impacted. MediaExpectations media_expectations; media_expectations.ExpectBidirectionalAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Tests that data channels use SCTP instead of datagram transport if datagram -// transport is configured in receive-only mode on the caller. +// The three tests below verify that "enable_aes128_sha1_32_crypto_cipher" +// works as expected; the cipher should only be used if enabled by both sides. TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelReceiveOnlyOnCallerUsesSctp) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - rtc_config.use_datagram_transport_for_data_channels_receive_only = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - // The caller should offer a data channel using SCTP. - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // SCTP transports should be present, since they are in use. - EXPECT_NE(caller()->pc()->GetSctpTransport(), nullptr); - EXPECT_NE(callee()->pc()->GetSctpTransport(), nullptr); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); + Aes128Sha1_32_CipherNotUsedWhenOnlyCallerSupported) { + PeerConnectionFactory::Options caller_options; + caller_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; + PeerConnectionFactory::Options callee_options; + callee_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = + false; + int expected_cipher_suite = rtc::SRTP_AES128_CM_SHA1_80; + TestNegotiatedCipherSuite(caller_options, callee_options, + expected_cipher_suite); } -#endif // HAVE_SCTP - -// Tests that a callee configured for receive-only use of datagram transport -// data channels accepts them on incoming calls. TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelReceiveOnlyOnCallee) { - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport_for_data_channels = true; - - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - answerer_config.use_datagram_transport_for_data_channels = true; - answerer_config.use_datagram_transport_for_data_channels_receive_only = true; - - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - caller()->CreateDataChannel(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // SCTP transports should not be present, since datagram transport is used. - EXPECT_EQ(caller()->pc()->GetSctpTransport(), nullptr); - EXPECT_EQ(callee()->pc()->GetSctpTransport(), nullptr); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); -} - -// This test sets up a call between two parties with a datagram transport data -// channel. -TEST_P(PeerConnectionIntegrationTest, DatagramTransportDataChannelEndToEnd) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - // Expect that data channel created on caller side will show up for callee as - // well. - caller()->CreateDataChannel(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Caller data channel should already exist (it created one). Callee data - // channel may not exist yet, since negotiation happens in-band, not in SDP. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); + Aes128Sha1_32_CipherNotUsedWhenOnlyCalleeSupported) { + PeerConnectionFactory::Options caller_options; + caller_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = + false; + PeerConnectionFactory::Options callee_options; + callee_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; + int expected_cipher_suite = rtc::SRTP_AES128_CM_SHA1_80; + TestNegotiatedCipherSuite(caller_options, callee_options, + expected_cipher_suite); } -// Tests that 'zero-rtt' data channel transports (which are ready-to-send as -// soon as they're created) work correctly. -TEST_P(PeerConnectionIntegrationTest, DatagramTransportDataChannelZeroRtt) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; - rtc_config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; - rtc_config.use_datagram_transport_for_data_channels = true; - rtc_config.enable_dtls_srtp = false; // SDES is required for media transport. - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - // Ensure that the callee's media transport is ready-to-send immediately. - // Note that only the callee can become writable in zero RTTs. The caller - // must wait for the callee's answer. - loopback_media_transports()->SetSecondStateAfterConnect( - webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Expect that data channel created on caller side will show up for callee as - // well. - caller()->CreateDataChannel(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - loopback_media_transports()->SetFirstState( - webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Caller data channel should already exist (it created one). Callee data - // channel may not exist yet, since negotiation happens in-band, not in SDP. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - EXPECT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Ensure data can be sent in both directions. - std::string data = "hello world"; - caller()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, callee()->data_observer()->last_message(), - kDefaultTimeout); - callee()->data_channel()->Send(DataBuffer(data)); - EXPECT_EQ_WAIT(data, caller()->data_observer()->last_message(), - kDefaultTimeout); +TEST_P(PeerConnectionIntegrationTest, Aes128Sha1_32_CipherUsedWhenSupported) { + PeerConnectionFactory::Options caller_options; + caller_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; + PeerConnectionFactory::Options callee_options; + callee_options.crypto_options.srtp.enable_aes128_sha1_32_crypto_cipher = true; + int expected_cipher_suite = rtc::SRTP_AES128_CM_SHA1_32; + TestNegotiatedCipherSuite(caller_options, callee_options, + expected_cipher_suite); } -// Ensures that when the callee closes a datagram transport data channel, the -// closing procedure results in the data channel being closed for the caller -// as well. -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelCalleeCloses) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.use_datagram_transport_for_data_channels = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - // Create a data channel on the caller and signal it to the callee. - caller()->CreateDataChannel(); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Data channels exist and open on both ends of the connection. - ASSERT_NE(nullptr, caller()->data_channel()); - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - ASSERT_TRUE_WAIT(caller()->data_observer()->IsOpen(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - - // Close the data channel on the callee side, and wait for it to reach the - // "closed" state on both sides. - callee()->data_channel()->Close(); - EXPECT_TRUE_WAIT(!caller()->data_observer()->IsOpen(), kDefaultTimeout); - EXPECT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout); +// Test that a non-GCM cipher is used if both sides only support non-GCM. +TEST_P(PeerConnectionIntegrationTest, NonGcmCipherUsedWhenGcmNotSupported) { + bool local_gcm_enabled = false; + bool remote_gcm_enabled = false; + bool aes_ctr_enabled = true; + int expected_cipher_suite = kDefaultSrtpCryptoSuite; + TestGcmNegotiationUsesCipherSuite(local_gcm_enabled, remote_gcm_enabled, + aes_ctr_enabled, expected_cipher_suite); } -// Tests that datagram transport data channels can do in-band negotiation. -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelConfigSentToOtherSide) { - PeerConnectionInterface::RTCConfiguration rtc_config; - rtc_config.use_datagram_transport_for_data_channels = true; - rtc_config.enable_dtls_srtp = false; - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - rtc_config, rtc_config, loopback_media_transports()->first_factory(), - loopback_media_transports()->second_factory())); - ConnectFakeSignaling(); - - // Create a data channel with a non-default configuration and signal it to the - // callee. - webrtc::DataChannelInit init; - init.id = 53; - init.maxRetransmits = 52; - caller()->CreateDataChannel("data-channel", &init); - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Ensure that the data channel transport is ready. - loopback_media_transports()->SetState(webrtc::MediaTransportState::kWritable); - loopback_media_transports()->FlushAsyncInvokes(); - - // Ensure that the data channel exists on the callee with the correct - // configuration. - ASSERT_TRUE_WAIT(callee()->data_channel() != nullptr, kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - // Since "negotiate" is false, the "id" parameter is ignored. - EXPECT_NE(init.id, callee()->data_channel()->id()); - EXPECT_EQ("data-channel", callee()->data_channel()->label()); - EXPECT_EQ(init.maxRetransmits, callee()->data_channel()->maxRetransmits()); - EXPECT_FALSE(callee()->data_channel()->negotiated()); +// Test that a GCM cipher is used if both ends support it and non-GCM is +// disabled. +TEST_P(PeerConnectionIntegrationTest, GcmCipherUsedWhenOnlyGcmSupported) { + bool local_gcm_enabled = true; + bool remote_gcm_enabled = true; + bool aes_ctr_enabled = false; + int expected_cipher_suite = kDefaultSrtpCryptoSuiteGcm; + TestGcmNegotiationUsesCipherSuite(local_gcm_enabled, remote_gcm_enabled, + aes_ctr_enabled, expected_cipher_suite); } -TEST_P(PeerConnectionIntegrationTest, - DatagramTransportDataChannelRejectedWithNoFallback) { - PeerConnectionInterface::RTCConfiguration offerer_config; - offerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - offerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - offerer_config.use_datagram_transport_for_data_channels = true; - // Disabling DTLS precludes a fallback to SCTP. - offerer_config.enable_dtls_srtp = false; - - PeerConnectionInterface::RTCConfiguration answerer_config; - answerer_config.rtcp_mux_policy = - PeerConnectionInterface::kRtcpMuxPolicyRequire; - answerer_config.bundle_policy = - PeerConnectionInterface::kBundlePolicyMaxBundle; - // Both endpoints must disable DTLS or SetRemoteDescription will fail. - answerer_config.enable_dtls_srtp = false; - - // Configure one endpoint to use datagram transport for data channels while - // the other does not. - ASSERT_TRUE(CreatePeerConnectionWrappersWithConfigAndMediaTransportFactory( - offerer_config, answerer_config, - loopback_media_transports()->first_factory(), nullptr)); +// Verify that media can be transmitted end-to-end when GCM crypto suites are +// enabled. Note that the above tests, such as GcmCipherUsedWhenGcmSupported, +// only verify that a GCM cipher is negotiated, and not necessarily that SRTP +// works with it. +TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithGcmCipher) { + PeerConnectionFactory::Options gcm_options; + gcm_options.crypto_options.srtp.enable_gcm_crypto_suites = true; + gcm_options.crypto_options.srtp.enable_aes128_sha1_80_crypto_cipher = false; + ASSERT_TRUE( + CreatePeerConnectionWrappersWithOptions(gcm_options, gcm_options)); ConnectFakeSignaling(); - - // The caller offers a data channel using either datagram transport or SCTP. - caller()->CreateDataChannel(); + // Do normal offer/answer and wait for some frames to be received in each + // direction. caller()->AddAudioVideoTracks(); callee()->AddAudioVideoTracks(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - - // Caller data channel should already exist (it created one). Callee data - // channel should not exist, since negotiation happens in-band, not in SDP. - EXPECT_NE(nullptr, caller()->data_channel()); - EXPECT_EQ(nullptr, callee()->data_channel()); - - // The caller's data channel should close when the datagram transport is - // rejected. - EXPECT_FALSE(caller()->data_observer()->IsOpen()); - - // Media flow should not be impacted by the failed data channel. MediaExpectations media_expectations; media_expectations.ExpectBidirectionalAudioAndVideo(); ASSERT_TRUE(ExpectNewFrames(media_expectations)); @@ -4771,14 +1857,25 @@ TEST_P(PeerConnectionIntegrationTest, IceStatesReachCompletion) { callee()->ice_connection_state(), kDefaultTimeout); } +#if !defined(THREAD_SANITIZER) +// This test provokes TSAN errors. See bugs.webrtc.org/3608 + constexpr int kOnlyLocalPorts = cricket::PORTALLOCATOR_DISABLE_STUN | cricket::PORTALLOCATOR_DISABLE_RELAY | cricket::PORTALLOCATOR_DISABLE_TCP; // Use a mock resolver to resolve the hostname back to the original IP on both // sides and check that the ICE connection connects. +// TODO(bugs.webrtc.org/12590): Flaky on Windows and on Linux MSAN. +#if defined(WEBRTC_WIN) || defined(WEBRTC_LINUX) +#define MAYBE_IceStatesReachCompletionWithRemoteHostname \ + DISABLED_IceStatesReachCompletionWithRemoteHostname +#else +#define MAYBE_IceStatesReachCompletionWithRemoteHostname \ + IceStatesReachCompletionWithRemoteHostname +#endif TEST_P(PeerConnectionIntegrationTest, - IceStatesReachCompletionWithRemoteHostname) { + MAYBE_IceStatesReachCompletionWithRemoteHostname) { auto caller_resolver_factory = std::make_unique>(); auto callee_resolver_factory = @@ -4831,6 +1928,8 @@ TEST_P(PeerConnectionIntegrationTest, webrtc::kIceCandidatePairHostNameHostName)); } +#endif // !defined(THREAD_SANITIZER) + // Test that firewalling the ICE connection causes the clients to identify the // disconnected state and then removing the firewall causes them to reconnect. class PeerConnectionIntegrationIceStatesTest @@ -4845,7 +1944,7 @@ class PeerConnectionIntegrationIceStatesTest void StartStunServer(const SocketAddress& server_address) { stun_server_.reset( - cricket::TestStunServer::Create(network_thread(), server_address)); + cricket::TestStunServer::Create(firewall(), server_address)); } bool TestIPv6() { @@ -4899,6 +1998,9 @@ class PeerConnectionIntegrationIceStatesTestWithFakeClock : public FakeClockForTest, public PeerConnectionIntegrationIceStatesTest {}; +#if !defined(THREAD_SANITIZER) +// This test provokes TSAN errors. bugs.webrtc.org/11282 + // Tests that the PeerConnection goes through all the ICE gathering/connection // states over the duration of the call. This includes Disconnected and Failed // states, induced by putting a firewall between the peers and waiting for them @@ -5025,9 +2127,17 @@ TEST_P(PeerConnectionIntegrationIceStatesTestWithFakeClock, kConsentTimeout, FakeClock()); } +#endif // !defined(THREAD_SANITIZER) + // Tests that the best connection is set to the appropriate IPv4/IPv6 connection // and that the statistics in the metric observers are updated correctly. -TEST_P(PeerConnectionIntegrationIceStatesTest, VerifyBestConnection) { +// TODO(bugs.webrtc.org/12591): Flaky on Windows. +#if defined(WEBRTC_WIN) +#define MAYBE_VerifyBestConnection DISABLED_VerifyBestConnection +#else +#define MAYBE_VerifyBestConnection VerifyBestConnection +#endif +TEST_P(PeerConnectionIntegrationIceStatesTest, MAYBE_VerifyBestConnection) { ASSERT_TRUE(CreatePeerConnectionWrappers()); ConnectFakeSignaling(); SetPortAllocatorFlags(); @@ -5201,8 +2311,16 @@ TEST_P(PeerConnectionIntegrationTest, EndToEndCallWithIceRenomination) { // With a max bundle policy and RTCP muxing, adding a new media description to // the connection should not affect ICE at all because the new media will use // the existing connection. +// TODO(bugs.webrtc.org/12538): Fails on tsan. +#if defined(THREAD_SANITIZER) +#define MAYBE_AddMediaToConnectedBundleDoesNotRestartIce \ + DISABLED_AddMediaToConnectedBundleDoesNotRestartIce +#else +#define MAYBE_AddMediaToConnectedBundleDoesNotRestartIce \ + AddMediaToConnectedBundleDoesNotRestartIce +#endif TEST_P(PeerConnectionIntegrationTest, - AddMediaToConnectedBundleDoesNotRestartIce) { + MAYBE_AddMediaToConnectedBundleDoesNotRestartIce) { PeerConnectionInterface::RTCConfiguration config; config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; config.rtcp_mux_policy = PeerConnectionInterface::kRtcpMuxPolicyRequire; @@ -5247,7 +2365,9 @@ TEST_P(PeerConnectionIntegrationTest, callee()->SetOfferAnswerOptions(options); } else { callee()->SetRemoteOfferHandler([this] { - callee()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO)->Stop(); + callee() + ->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO) + ->StopInternal(); }); } caller()->CreateAndSetAndSignalOffer(); @@ -5268,7 +2388,7 @@ TEST_P(PeerConnectionIntegrationTest, // The caller's transceiver is stopped, so we need to add another track. auto caller_transceiver = caller()->GetFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO); - EXPECT_TRUE(caller_transceiver->stopped()); + EXPECT_EQ(nullptr, caller_transceiver.get()); caller()->AddVideoTrack(); } callee()->AddVideoTrack(); @@ -5331,9 +2451,9 @@ TEST_F(PeerConnectionIntegrationTestUnifiedPlan, auto caller_video_sender = video_result.MoveValue()->sender(); callee()->SetRemoteOfferHandler([this] { ASSERT_EQ(2u, callee()->pc()->GetTransceivers().size()); - callee()->pc()->GetTransceivers()[0]->SetDirection( + callee()->pc()->GetTransceivers()[0]->SetDirectionWithError( RtpTransceiverDirection::kSendRecv); - callee()->pc()->GetTransceivers()[1]->SetDirection( + callee()->pc()->GetTransceivers()[1]->SetDirectionWithError( RtpTransceiverDirection::kSendRecv); }); caller()->CreateAndSetAndSignalOffer(); @@ -5379,6 +2499,9 @@ TEST_F(PeerConnectionIntegrationTestPlanB, CanSendRemoteVideoTrack) { ASSERT_TRUE(ExpectNewFrames(media_expectations)); } +#if !defined(THREAD_SANITIZER) +// This test provokes TSAN errors. bugs.webrtc.org/11282 + // Test that we achieve the expected end-to-end connection time, using a // fake clock and simulated latency on the media and signaling paths. // We use a TURN<->TURN connection because this is usually the quickest to @@ -5469,6 +2592,8 @@ TEST_P(PeerConnectionIntegrationTestWithFakeClock, ClosePeerConnections(); } +#endif // !defined(THREAD_SANITIZER) + // Verify that a TurnCustomizer passed in through RTCConfiguration // is actually used by the underlying TURN candidate pair. // Note that turnport_unittest.cc contains more detailed, lower-level tests. @@ -5702,14 +2827,13 @@ TEST_P(PeerConnectionIntegrationTest, IceTransportFactoryUsedForConnections) { auto ice_transport_factory = std::make_unique(); EXPECT_CALL(*ice_transport_factory, RecordIceTransportCreated()).Times(1); dependencies.ice_transport_factory = std::move(ice_transport_factory); - auto wrapper = CreatePeerConnectionWrapper( - "Caller", nullptr, &default_config, std::move(dependencies), nullptr, - nullptr, /*reset_encoder_factory=*/false, - /*reset_decoder_factory=*/false); + auto wrapper = CreatePeerConnectionWrapper("Caller", nullptr, &default_config, + std::move(dependencies), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); ASSERT_TRUE(wrapper); wrapper->CreateDataChannel(); - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); wrapper->pc()->SetLocalDescription(observer, wrapper->CreateOfferAndWait().release()); } @@ -5891,6 +3015,9 @@ TEST_P(PeerConnectionIntegrationTest, MediaFlowsWhenCandidatesSetOnlyInSdp) { ASSERT_TRUE(ExpectNewFrames(media_expectations)); } +#if !defined(THREAD_SANITIZER) +// These tests provokes TSAN errors. See bugs.webrtc.org/11305. + // Test that SetAudioPlayout can be used to disable audio playout from the // start, then later enable it. This may be useful, for example, if the caller // needs to play a local ringtone until some event occurs, after which it @@ -5922,7 +3049,7 @@ TEST_P(PeerConnectionIntegrationTest, DisableAndEnableAudioPlayout) { ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -double GetAudioEnergyStat(PeerConnectionWrapper* pc) { +double GetAudioEnergyStat(PeerConnectionIntegrationWrapper* pc) { auto report = pc->NewGetStats(); auto track_stats_list = report->GetStatsOfType(); @@ -5961,6 +3088,8 @@ TEST_P(PeerConnectionIntegrationTest, EXPECT_TRUE_WAIT(GetAudioEnergyStat(caller()) > 0, kMaxWaitForFramesMs); } +#endif // !defined(THREAD_SANITIZER) + // Test that SetAudioRecording can be used to disable audio recording from the // start, then later enable it. This may be useful, for example, if the caller // wants to ensure that no audio resources are active before a certain state @@ -5992,51 +3121,6 @@ TEST_P(PeerConnectionIntegrationTest, DisableAndEnableAudioRecording) { ASSERT_TRUE(ExpectNewFrames(media_expectations)); } -// Test that after closing PeerConnections, they stop sending any packets (ICE, -// DTLS, RTP...). -TEST_P(PeerConnectionIntegrationTest, ClosingConnectionStopsPacketFlow) { - // Set up audio/video/data, wait for some frames to be received. - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->AddAudioVideoTracks(); -#ifdef HAVE_SCTP - caller()->CreateDataChannel(); -#endif - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - MediaExpectations media_expectations; - media_expectations.CalleeExpectsSomeAudioAndVideo(); - ASSERT_TRUE(ExpectNewFrames(media_expectations)); - // Close PeerConnections. - ClosePeerConnections(); - // Pump messages for a second, and ensure no new packets end up sent. - uint32_t sent_packets_a = virtual_socket_server()->sent_packets(); - WAIT(false, 1000); - uint32_t sent_packets_b = virtual_socket_server()->sent_packets(); - EXPECT_EQ(sent_packets_a, sent_packets_b); -} - -// Test that transport stats are generated by the RTCStatsCollector for a -// connection that only involves data channels. This is a regression test for -// crbug.com/826972. -#ifdef HAVE_SCTP -TEST_P(PeerConnectionIntegrationTest, - TransportStatsReportedForDataChannelOnlyConnection) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->CreateDataChannel(); - - caller()->CreateAndSetAndSignalOffer(); - ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); - - auto caller_report = caller()->NewGetStats(); - EXPECT_EQ(1u, caller_report->GetStatsOfType().size()); - auto callee_report = callee()->NewGetStats(); - EXPECT_EQ(1u, callee_report->GetStatsOfType().size()); -} -#endif // HAVE_SCTP - TEST_P(PeerConnectionIntegrationTest, IceEventsGeneratedAndLoggedInRtcEventLog) { ASSERT_TRUE(CreatePeerConnectionWrappersWithFakeRtcEventLog()); @@ -6049,11 +3133,9 @@ TEST_P(PeerConnectionIntegrationTest, ASSERT_NE(nullptr, caller()->event_log_factory()); ASSERT_NE(nullptr, callee()->event_log_factory()); webrtc::FakeRtcEventLog* caller_event_log = - static_cast( - caller()->event_log_factory()->last_log_created()); + caller()->event_log_factory()->last_log_created(); webrtc::FakeRtcEventLog* callee_event_log = - static_cast( - callee()->event_log_factory()->last_log_created()); + callee()->event_log_factory()->last_log_created(); ASSERT_NE(nullptr, caller_event_log); ASSERT_NE(nullptr, callee_event_log); int caller_ice_config_count = caller_event_log->GetEventCount( @@ -6134,6 +3216,23 @@ TEST_P(PeerConnectionIntegrationTest, RegatherAfterChangingIceTransportType) { callee()->pc()->SetConfiguration(callee_config); EXPECT_EQ_WAIT(cricket::LOCAL_PORT_TYPE, callee()->last_candidate_gathered().type(), kDefaultTimeout); + + // Create an offer and verify that it does not contain an ICE restart (i.e new + // ice credentials). + std::string caller_ufrag_pre_offer = caller() + ->pc() + ->local_description() + ->description() + ->transport_infos()[0] + .description.ice_ufrag; + caller()->CreateAndSetAndSignalOffer(); + std::string caller_ufrag_post_offer = caller() + ->pc() + ->local_description() + ->description() + ->transport_infos()[0] + .description.ice_ufrag; + EXPECT_EQ(caller_ufrag_pre_offer, caller_ufrag_post_offer); } TEST_P(PeerConnectionIntegrationTest, OnIceCandidateError) { @@ -6219,8 +3318,7 @@ TEST_F(PeerConnectionIntegrationTestUnifiedPlan, SetSignalIceCandidates(false); // Workaround candidate outrace sdp. caller()->AddVideoTrack(); callee()->AddVideoTrack(); - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); callee()->pc()->SetLocalDescription(observer, callee()->CreateOfferAndWait().release()); EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); @@ -6237,15 +3335,15 @@ TEST_F(PeerConnectionIntegrationTestUnifiedPlan, ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); - rtc::scoped_refptr sld_observer( - new rtc::RefCountedObject()); + auto sld_observer = + rtc::make_ref_counted(); callee()->pc()->SetLocalDescription(sld_observer, callee()->CreateOfferAndWait().release()); EXPECT_TRUE_WAIT(sld_observer->called(), kDefaultTimeout); EXPECT_EQ(sld_observer->error(), ""); - rtc::scoped_refptr srd_observer( - new rtc::RefCountedObject()); + auto srd_observer = + rtc::make_ref_counted(); callee()->pc()->SetRemoteDescription( srd_observer, caller()->CreateOfferAndWait().release()); EXPECT_TRUE_WAIT(srd_observer->called(), kDefaultTimeout); @@ -6257,6 +3355,164 @@ TEST_F(PeerConnectionIntegrationTestUnifiedPlan, PeerConnectionInterface::kHaveRemoteOffer)); } +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + H264FmtpSpsPpsIdrInKeyframeParameterUsage) { + ASSERT_TRUE(CreatePeerConnectionWrappers()); + ConnectFakeSignaling(); + caller()->AddVideoTrack(); + callee()->AddVideoTrack(); + auto munger = [](cricket::SessionDescription* desc) { + cricket::VideoContentDescription* video = + GetFirstVideoContentDescription(desc); + auto codecs = video->codecs(); + for (auto&& codec : codecs) { + if (codec.name == "H264") { + std::string value; + // The parameter is not supposed to be present in SDP by default. + EXPECT_FALSE( + codec.GetParam(cricket::kH264FmtpSpsPpsIdrInKeyframe, &value)); + codec.SetParam(std::string(cricket::kH264FmtpSpsPpsIdrInKeyframe), + std::string("")); + } + } + video->set_codecs(codecs); + }; + // Munge local offer for SLD. + caller()->SetGeneratedSdpMunger(munger); + // Munge remote answer for SRD. + caller()->SetReceivedSdpMunger(munger); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Observe that after munging the parameter is present in generated SDP. + caller()->SetGeneratedSdpMunger([](cricket::SessionDescription* desc) { + cricket::VideoContentDescription* video = + GetFirstVideoContentDescription(desc); + for (auto&& codec : video->codecs()) { + if (codec.name == "H264") { + std::string value; + EXPECT_TRUE( + codec.GetParam(cricket::kH264FmtpSpsPpsIdrInKeyframe, &value)); + } + } + }); + caller()->CreateOfferAndWait(); +} + +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + RenegotiateManyAudioTransceivers) { + PeerConnectionInterface::RTCConfiguration config; + config.sdp_semantics = SdpSemantics::kUnifiedPlan; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); + ConnectFakeSignaling(); + caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + int current_size = caller()->pc()->GetTransceivers().size(); + // Add more tracks until we get close to having issues. + // Issues have been seen at: + // - 32 tracks on android_arm64_rel and android_arm_dbg bots + // - 16 tracks on android_arm_dbg (flaky) + while (current_size < 8) { + // Double the number of tracks + for (int i = 0; i < current_size; i++) { + caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + } + current_size = caller()->pc()->GetTransceivers().size(); + RTC_LOG(LS_INFO) << "Renegotiating with " << current_size << " tracks"; + auto start_time_ms = rtc::TimeMillis(); + caller()->CreateAndSetAndSignalOffer(); + // We want to stop when the time exceeds one second. + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto elapsed_time_ms = rtc::TimeMillis() - start_time_ms; + RTC_LOG(LS_INFO) << "Renegotiating took " << elapsed_time_ms << " ms"; + ASSERT_GT(1000, elapsed_time_ms) + << "Audio transceivers: Negotiation took too long after " + << current_size << " tracks added"; + } +} + +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + RenegotiateManyVideoTransceivers) { + PeerConnectionInterface::RTCConfiguration config; + config.sdp_semantics = SdpSemantics::kUnifiedPlan; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); + ConnectFakeSignaling(); + caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_VIDEO); + + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + int current_size = caller()->pc()->GetTransceivers().size(); + // Add more tracks until we get close to having issues. + // Issues have been seen at: + // - 96 on a Linux workstation + // - 64 at win_x86_more_configs and win_x64_msvc_dbg + // - 32 on android_arm64_rel and linux_dbg bots + // - 16 on Android 64 (Nexus 5x) + while (current_size < 8) { + // Double the number of tracks + for (int i = 0; i < current_size; i++) { + caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_VIDEO); + } + current_size = caller()->pc()->GetTransceivers().size(); + RTC_LOG(LS_INFO) << "Renegotiating with " << current_size << " tracks"; + auto start_time_ms = rtc::TimeMillis(); + caller()->CreateAndSetAndSignalOffer(); + // We want to stop when the time exceeds one second. + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto elapsed_time_ms = rtc::TimeMillis() - start_time_ms; + RTC_LOG(LS_INFO) << "Renegotiating took " << elapsed_time_ms << " ms"; + ASSERT_GT(1000, elapsed_time_ms) + << "Video transceivers: Negotiation took too long after " + << current_size << " tracks added"; + } +} + +TEST_F(PeerConnectionIntegrationTestUnifiedPlan, + RenegotiateManyVideoTransceiversAndWatchAudioDelay) { + PeerConnectionInterface::RTCConfiguration config; + config.sdp_semantics = SdpSemantics::kUnifiedPlan; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); + ConnectFakeSignaling(); + caller()->AddAudioTrack(); + callee()->AddAudioTrack(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + // Wait until we can see the audio flowing. + MediaExpectations media_expectations; + media_expectations.CalleeExpectsSomeAudio(); + ASSERT_TRUE(ExpectNewFrames(media_expectations)); + + // Get the baseline numbers for audio_packets and audio_delay + // in both directions. + caller()->StartWatchingDelayStats(); + callee()->StartWatchingDelayStats(); + + int current_size = caller()->pc()->GetTransceivers().size(); + // Add more tracks until we get close to having issues. + // Making this number very large makes the test very slow. + while (current_size < 16) { + // Double the number of tracks + for (int i = 0; i < current_size; i++) { + caller()->pc()->AddTransceiver(cricket::MEDIA_TYPE_VIDEO); + } + current_size = caller()->pc()->GetTransceivers().size(); + RTC_LOG(LS_INFO) << "Renegotiating with " << current_size << " tracks"; + auto start_time_ms = rtc::TimeMillis(); + caller()->CreateAndSetAndSignalOffer(); + // We want to stop when the time exceeds one second. + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto elapsed_time_ms = rtc::TimeMillis() - start_time_ms; + RTC_LOG(LS_INFO) << "Renegotiating took " << elapsed_time_ms << " ms"; + // This is a guard against the test using excessive amounts of time. + ASSERT_GT(5000, elapsed_time_ms) + << "Video transceivers: Negotiation took too long after " + << current_size << " tracks added"; + caller()->UpdateDelayStats("caller reception", current_size); + callee()->UpdateDelayStats("callee reception", current_size); + } +} + INSTANTIATE_TEST_SUITE_P(PeerConnectionIntegrationTest, PeerConnectionIntegrationTest, Values(SdpSemantics::kPlanB, @@ -6276,7 +3532,7 @@ class PeerConnectionIntegrationInteropTest protected: // Setting the SdpSemantics for the base test to kDefault does not matter // because we specify not to use the test semantics when creating - // PeerConnectionWrappers. + // PeerConnectionIntegrationWrappers. PeerConnectionIntegrationInteropTest() : PeerConnectionIntegrationBaseTest(SdpSemantics::kPlanB), caller_semantics_(std::get<0>(GetParam())), @@ -6437,7 +3693,7 @@ TEST_F(PeerConnectionIntegrationTestUnifiedPlan, ASSERT_TRUE(ExpectNewFrames(media_expectations)); } - audio_transceiver->Stop(); + audio_transceiver->StopInternal(); caller()->pc()->AddTransceiver(caller()->CreateLocalVideoTrack()); caller()->CreateAndSetAndSignalOffer(); @@ -6449,77 +3705,104 @@ TEST_F(PeerConnectionIntegrationTestUnifiedPlan, } } -#ifdef HAVE_SCTP - TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - EndToEndCallWithBundledSctpDataChannel) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); + StopTransceiverRemovesDtlsTransports) { + RTCConfiguration config; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); ConnectFakeSignaling(); - caller()->CreateDataChannel(); - caller()->AddAudioVideoTracks(); - callee()->AddAudioVideoTracks(); + auto audio_transceiver_or_error = + caller()->pc()->AddTransceiver(caller()->CreateLocalAudioTrack()); + ASSERT_TRUE(audio_transceiver_or_error.ok()); + auto audio_transceiver = audio_transceiver_or_error.MoveValue(); + + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + + audio_transceiver->StopStandard(); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_EQ_WAIT(SctpTransportState::kConnected, - caller()->pc()->GetSctpTransport()->Information().state(), - kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); + ASSERT_EQ(0U, caller()->pc()->GetTransceivers().size()); + EXPECT_EQ(PeerConnectionInterface::kIceGatheringNew, + caller()->pc()->ice_gathering_state()); + EXPECT_THAT(caller()->ice_gathering_state_history(), + ElementsAre(PeerConnectionInterface::kIceGatheringGathering, + PeerConnectionInterface::kIceGatheringComplete, + PeerConnectionInterface::kIceGatheringNew)); } TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - EndToEndCallWithDataChannelOnlyConnects) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); + StopTransceiverStopsAndRemovesTransceivers) { + RTCConfiguration config; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); ConnectFakeSignaling(); - caller()->CreateDataChannel(); + auto audio_transceiver_or_error = + caller()->pc()->AddTransceiver(caller()->CreateLocalAudioTrack()); + ASSERT_TRUE(audio_transceiver_or_error.ok()); + auto caller_transceiver = audio_transceiver_or_error.MoveValue(); + caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_channel(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - ASSERT_TRUE(caller()->data_observer()->IsOpen()); -} + caller_transceiver->StopStandard(); -TEST_F(PeerConnectionIntegrationTestUnifiedPlan, DataChannelClosesWhenClosed) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); - ConnectFakeSignaling(); - caller()->CreateDataChannel(); + auto callee_transceiver = callee()->pc()->GetTransceivers()[0]; caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - caller()->data_channel()->Close(); - ASSERT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_EQ(0U, caller()->pc()->GetTransceivers().size()); + EXPECT_EQ(0U, callee()->pc()->GetTransceivers().size()); + EXPECT_EQ(0U, caller()->pc()->GetSenders().size()); + EXPECT_EQ(0U, callee()->pc()->GetSenders().size()); + EXPECT_EQ(0U, caller()->pc()->GetReceivers().size()); + EXPECT_EQ(0U, callee()->pc()->GetReceivers().size()); + EXPECT_TRUE(caller_transceiver->stopped()); + EXPECT_TRUE(callee_transceiver->stopped()); } TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - DataChannelClosesWhenClosedReverse) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); + StopTransceiverEndsIncomingAudioTrack) { + RTCConfiguration config; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); ConnectFakeSignaling(); - caller()->CreateDataChannel(); + auto audio_transceiver_or_error = + caller()->pc()->AddTransceiver(caller()->CreateLocalAudioTrack()); + ASSERT_TRUE(audio_transceiver_or_error.ok()); + auto audio_transceiver = audio_transceiver_or_error.MoveValue(); + + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto caller_track = audio_transceiver->receiver()->track(); + auto callee_track = callee()->pc()->GetReceivers()[0]->track(); + audio_transceiver->StopStandard(); + EXPECT_EQ(MediaStreamTrackInterface::TrackState::kEnded, + caller_track->state()); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - callee()->data_channel()->Close(); - ASSERT_TRUE_WAIT(!caller()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_EQ(MediaStreamTrackInterface::TrackState::kEnded, + callee_track->state()); } TEST_F(PeerConnectionIntegrationTestUnifiedPlan, - DataChannelClosesWhenPeerConnectionClosed) { - ASSERT_TRUE(CreatePeerConnectionWrappers()); + StopTransceiverEndsIncomingVideoTrack) { + RTCConfiguration config; + ASSERT_TRUE(CreatePeerConnectionWrappersWithConfig(config, config)); ConnectFakeSignaling(); - caller()->CreateDataChannel(); + auto audio_transceiver_or_error = + caller()->pc()->AddTransceiver(caller()->CreateLocalVideoTrack()); + ASSERT_TRUE(audio_transceiver_or_error.ok()); + auto audio_transceiver = audio_transceiver_or_error.MoveValue(); + + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); + auto caller_track = audio_transceiver->receiver()->track(); + auto callee_track = callee()->pc()->GetReceivers()[0]->track(); + audio_transceiver->StopStandard(); + EXPECT_EQ(MediaStreamTrackInterface::TrackState::kEnded, + caller_track->state()); caller()->CreateAndSetAndSignalOffer(); ASSERT_TRUE_WAIT(SignalingStateStable(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer(), kDefaultTimeout); - ASSERT_TRUE_WAIT(callee()->data_observer()->IsOpen(), kDefaultTimeout); - caller()->pc()->Close(); - ASSERT_TRUE_WAIT(!callee()->data_observer()->IsOpen(), kDefaultTimeout); + EXPECT_EQ(MediaStreamTrackInterface::TrackState::kEnded, + callee_track->state()); } -#endif // HAVE_SCTP - } // namespace -} // namespace webrtc -#endif // if !defined(THREAD_SANITIZER) +} // namespace webrtc diff --git a/pc/peer_connection_interface_unittest.cc b/pc/peer_connection_interface_unittest.cc index 2f4fdf734a..fcea842b22 100644 --- a/pc/peer_connection_interface_unittest.cc +++ b/pc/peer_connection_interface_unittest.cc @@ -43,6 +43,7 @@ #include "api/rtp_transceiver_interface.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" +#include "api/transport/field_trial_based_config.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "api/video_codecs/video_decoder_factory.h" @@ -646,19 +647,21 @@ class PeerConnectionFactoryForTest : public webrtc::PeerConnectionFactory { dependencies.network_thread = rtc::Thread::Current(); dependencies.signaling_thread = rtc::Thread::Current(); dependencies.task_queue_factory = CreateDefaultTaskQueueFactory(); + dependencies.trials = std::make_unique(); cricket::MediaEngineDependencies media_deps; media_deps.task_queue_factory = dependencies.task_queue_factory.get(); // Use fake audio device module since we're only testing the interface // level, and using a real one could make tests flaky when run in parallel. media_deps.adm = FakeAudioCaptureModule::Create(); SetMediaEngineDefaults(&media_deps); + media_deps.trials = dependencies.trials.get(); dependencies.media_engine = cricket::CreateMediaEngine(std::move(media_deps)); dependencies.call_factory = webrtc::CreateCallFactory(); dependencies.event_log_factory = std::make_unique( dependencies.task_queue_factory.get()); - return new rtc::RefCountedObject( + return rtc::make_ref_counted( std::move(dependencies)); } @@ -680,7 +683,7 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { #endif } - virtual void SetUp() { + void SetUp() override { // Use fake audio capture module since we're only testing the interface // level, and using a real one could make tests flaky when run in parallel. fake_audio_capture_module_ = FakeAudioCaptureModule::Create(); @@ -696,7 +699,11 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { ASSERT_TRUE(pc_factory_); pc_factory_for_test_ = PeerConnectionFactoryForTest::CreatePeerConnectionFactoryForTest(); - pc_factory_for_test_->Initialize(); + } + + void TearDown() override { + if (pc_) + pc_->Close(); } void CreatePeerConnection() { @@ -732,6 +739,10 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { } void CreatePeerConnection(const RTCConfiguration& config) { + if (pc_) { + pc_->Close(); + pc_ = nullptr; + } std::unique_ptr port_allocator( new cricket::FakePortAllocator(rtc::Thread::Current(), nullptr)); port_allocator_ = port_allocator.get(); @@ -868,8 +879,8 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { bool DoCreateOfferAnswer(std::unique_ptr* desc, const RTCOfferAnswerOptions* options, bool offer) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = + rtc::make_ref_counted(); if (offer) { pc_->CreateOffer(observer, options ? *options : RTCOfferAnswerOptions()); } else { @@ -893,8 +904,7 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { bool DoSetSessionDescription( std::unique_ptr desc, bool local) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); if (local) { pc_->SetLocalDescription(observer, desc.release()); } else { @@ -920,8 +930,7 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { // It does not verify the values in the StatReports since a RTCP packet might // be required. bool DoGetStats(MediaStreamTrackInterface* track) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); if (!pc_->GetStats(observer, track, PeerConnectionInterface::kStatsOutputLevelStandard)) return false; @@ -931,8 +940,8 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { // Call the standards-compliant GetStats function. bool DoGetRTCStats() { - rtc::scoped_refptr callback( - new rtc::RefCountedObject()); + auto callback = + rtc::make_ref_counted(); pc_->GetStats(callback); EXPECT_TRUE_WAIT(callback->called(), kTimeout); return callback->called(); @@ -1187,8 +1196,8 @@ class PeerConnectionInterfaceBaseTest : public ::testing::Test { std::unique_ptr CreateOfferWithOptions( const RTCOfferAnswerOptions& offer_answer_options) { RTC_DCHECK(pc_); - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = + rtc::make_ref_counted(); pc_->CreateOffer(observer, offer_answer_options); EXPECT_EQ_WAIT(true, observer->called(), kTimeout); return observer->MoveDescription(); @@ -1421,15 +1430,11 @@ TEST_P(PeerConnectionInterfaceTest, GetConfigurationAfterSetConfiguration) { PeerConnectionInterface::RTCConfiguration config = pc_->GetConfiguration(); config.type = PeerConnectionInterface::kRelay; - config.use_datagram_transport = true; - config.use_datagram_transport_for_data_channels = true; EXPECT_TRUE(pc_->SetConfiguration(config).ok()); PeerConnectionInterface::RTCConfiguration returned_config = pc_->GetConfiguration(); EXPECT_EQ(PeerConnectionInterface::kRelay, returned_config.type); - EXPECT_TRUE(returned_config.use_datagram_transport); - EXPECT_TRUE(returned_config.use_datagram_transport_for_data_channels); } TEST_P(PeerConnectionInterfaceTest, SetConfigurationFailsAfterClose) { @@ -1894,179 +1899,6 @@ TEST_P(PeerConnectionInterfaceTest, GetRTCStatsBeforeAndAfterCalling) { EXPECT_TRUE(DoGetRTCStats()); } -// This test setup two RTP data channels in loop back. -TEST_P(PeerConnectionInterfaceTest, TestDataChannel) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - config.enable_dtls_srtp = false; - CreatePeerConnection(config); - rtc::scoped_refptr data1 = - pc_->CreateDataChannel("test1", NULL); - rtc::scoped_refptr data2 = - pc_->CreateDataChannel("test2", NULL); - ASSERT_TRUE(data1 != NULL); - std::unique_ptr observer1( - new MockDataChannelObserver(data1)); - std::unique_ptr observer2( - new MockDataChannelObserver(data2)); - - EXPECT_EQ(DataChannelInterface::kConnecting, data1->state()); - EXPECT_EQ(DataChannelInterface::kConnecting, data2->state()); - std::string data_to_send1 = "testing testing"; - std::string data_to_send2 = "testing something else"; - EXPECT_FALSE(data1->Send(DataBuffer(data_to_send1))); - - CreateOfferReceiveAnswer(); - EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout); - EXPECT_TRUE_WAIT(observer2->IsOpen(), kTimeout); - - EXPECT_EQ(DataChannelInterface::kOpen, data1->state()); - EXPECT_EQ(DataChannelInterface::kOpen, data2->state()); - EXPECT_TRUE(data1->Send(DataBuffer(data_to_send1))); - EXPECT_TRUE(data2->Send(DataBuffer(data_to_send2))); - - EXPECT_EQ_WAIT(data_to_send1, observer1->last_message(), kTimeout); - EXPECT_EQ_WAIT(data_to_send2, observer2->last_message(), kTimeout); - - data1->Close(); - EXPECT_EQ(DataChannelInterface::kClosing, data1->state()); - CreateOfferReceiveAnswer(); - EXPECT_FALSE(observer1->IsOpen()); - EXPECT_EQ(DataChannelInterface::kClosed, data1->state()); - EXPECT_TRUE(observer2->IsOpen()); - - data_to_send2 = "testing something else again"; - EXPECT_TRUE(data2->Send(DataBuffer(data_to_send2))); - - EXPECT_EQ_WAIT(data_to_send2, observer2->last_message(), kTimeout); -} - -// This test verifies that sendnig binary data over RTP data channels should -// fail. -TEST_P(PeerConnectionInterfaceTest, TestSendBinaryOnRtpDataChannel) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - config.enable_dtls_srtp = false; - CreatePeerConnection(config); - rtc::scoped_refptr data1 = - pc_->CreateDataChannel("test1", NULL); - rtc::scoped_refptr data2 = - pc_->CreateDataChannel("test2", NULL); - ASSERT_TRUE(data1 != NULL); - std::unique_ptr observer1( - new MockDataChannelObserver(data1)); - std::unique_ptr observer2( - new MockDataChannelObserver(data2)); - - EXPECT_EQ(DataChannelInterface::kConnecting, data1->state()); - EXPECT_EQ(DataChannelInterface::kConnecting, data2->state()); - - CreateOfferReceiveAnswer(); - EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout); - EXPECT_TRUE_WAIT(observer2->IsOpen(), kTimeout); - - EXPECT_EQ(DataChannelInterface::kOpen, data1->state()); - EXPECT_EQ(DataChannelInterface::kOpen, data2->state()); - - rtc::CopyOnWriteBuffer buffer("test", 4); - EXPECT_FALSE(data1->Send(DataBuffer(buffer, true))); -} - -// This test setup a RTP data channels in loop back and test that a channel is -// opened even if the remote end answer with a zero SSRC. -TEST_P(PeerConnectionInterfaceTest, TestSendOnlyDataChannel) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - config.enable_dtls_srtp = false; - CreatePeerConnection(config); - rtc::scoped_refptr data1 = - pc_->CreateDataChannel("test1", NULL); - std::unique_ptr observer1( - new MockDataChannelObserver(data1)); - - CreateOfferReceiveAnswerWithoutSsrc(); - - EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout); - - data1->Close(); - EXPECT_EQ(DataChannelInterface::kClosing, data1->state()); - CreateOfferReceiveAnswerWithoutSsrc(); - EXPECT_EQ(DataChannelInterface::kClosed, data1->state()); - EXPECT_FALSE(observer1->IsOpen()); -} - -// This test that if a data channel is added in an answer a receive only channel -// channel is created. -TEST_P(PeerConnectionInterfaceTest, TestReceiveOnlyDataChannel) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - config.enable_dtls_srtp = false; - - CreatePeerConnection(config); - - std::string offer_label = "offer_channel"; - rtc::scoped_refptr offer_channel = - pc_->CreateDataChannel(offer_label, NULL); - - CreateOfferAsLocalDescription(); - - // Replace the data channel label in the offer and apply it as an answer. - std::string receive_label = "answer_channel"; - std::string sdp; - EXPECT_TRUE(pc_->local_description()->ToString(&sdp)); - absl::StrReplaceAll({{offer_label, receive_label}}, &sdp); - CreateAnswerAsRemoteDescription(sdp); - - // Verify that a new incoming data channel has been created and that - // it is open but can't we written to. - ASSERT_TRUE(observer_.last_datachannel_ != NULL); - DataChannelInterface* received_channel = observer_.last_datachannel_; - EXPECT_EQ(DataChannelInterface::kConnecting, received_channel->state()); - EXPECT_EQ(receive_label, received_channel->label()); - EXPECT_FALSE(received_channel->Send(DataBuffer("something"))); - - // Verify that the channel we initially offered has been rejected. - EXPECT_EQ(DataChannelInterface::kClosed, offer_channel->state()); - - // Do another offer / answer exchange and verify that the data channel is - // opened. - CreateOfferReceiveAnswer(); - EXPECT_EQ_WAIT(DataChannelInterface::kOpen, received_channel->state(), - kTimeout); -} - -// This test that no data channel is returned if a reliable channel is -// requested. -// TODO(perkj): Remove this test once reliable channels are implemented. -TEST_P(PeerConnectionInterfaceTest, CreateReliableRtpDataChannelShouldFail) { - RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - CreatePeerConnection(rtc_config); - - std::string label = "test"; - webrtc::DataChannelInit config; - config.reliable = true; - rtc::scoped_refptr channel = - pc_->CreateDataChannel(label, &config); - EXPECT_TRUE(channel == NULL); -} - -// Verifies that duplicated label is not allowed for RTP data channel. -TEST_P(PeerConnectionInterfaceTest, RtpDuplicatedLabelNotAllowed) { - RTCConfiguration config; - config.enable_rtp_data_channel = true; - CreatePeerConnection(config); - - std::string label = "test"; - rtc::scoped_refptr channel = - pc_->CreateDataChannel(label, nullptr); - EXPECT_NE(channel, nullptr); - - rtc::scoped_refptr dup_channel = - pc_->CreateDataChannel(label, nullptr); - EXPECT_EQ(dup_channel, nullptr); -} - // This tests that a SCTP data channel is returned using different // DataChannelInit configurations. TEST_P(PeerConnectionInterfaceTest, CreateSctpDataChannel) { @@ -2184,80 +2016,8 @@ TEST_P(PeerConnectionInterfaceTest, SctpDuplicatedLabelAllowed) { EXPECT_NE(dup_channel, nullptr); } -// This test verifies that OnRenegotiationNeeded is fired for every new RTP -// DataChannel. -TEST_P(PeerConnectionInterfaceTest, RenegotiationNeededForNewRtpDataChannel) { - RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - CreatePeerConnection(rtc_config); - - rtc::scoped_refptr dc1 = - pc_->CreateDataChannel("test1", NULL); - EXPECT_TRUE(observer_.renegotiation_needed_); - observer_.renegotiation_needed_ = false; - - CreateOfferReceiveAnswer(); - - rtc::scoped_refptr dc2 = - pc_->CreateDataChannel("test2", NULL); - EXPECT_EQ(observer_.renegotiation_needed_, - GetParam() == SdpSemantics::kPlanB); -} - -// This test that a data channel closes when a PeerConnection is deleted/closed. -TEST_P(PeerConnectionInterfaceTest, DataChannelCloseWhenPeerConnectionClose) { - RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - CreatePeerConnection(rtc_config); - - rtc::scoped_refptr data1 = - pc_->CreateDataChannel("test1", NULL); - rtc::scoped_refptr data2 = - pc_->CreateDataChannel("test2", NULL); - ASSERT_TRUE(data1 != NULL); - std::unique_ptr observer1( - new MockDataChannelObserver(data1)); - std::unique_ptr observer2( - new MockDataChannelObserver(data2)); - - CreateOfferReceiveAnswer(); - EXPECT_TRUE_WAIT(observer1->IsOpen(), kTimeout); - EXPECT_TRUE_WAIT(observer2->IsOpen(), kTimeout); - ReleasePeerConnection(); - EXPECT_EQ(DataChannelInterface::kClosed, data1->state()); - EXPECT_EQ(DataChannelInterface::kClosed, data2->state()); -} - -// This tests that RTP data channels can be rejected in an answer. -TEST_P(PeerConnectionInterfaceTest, TestRejectRtpDataChannelInAnswer) { - RTCConfiguration rtc_config; - rtc_config.enable_rtp_data_channel = true; - rtc_config.enable_dtls_srtp = false; - CreatePeerConnection(rtc_config); - - rtc::scoped_refptr offer_channel( - pc_->CreateDataChannel("offer_channel", NULL)); - - CreateOfferAsLocalDescription(); - - // Create an answer where the m-line for data channels are rejected. - std::string sdp; - EXPECT_TRUE(pc_->local_description()->ToString(&sdp)); - std::unique_ptr answer( - webrtc::CreateSessionDescription(SdpType::kAnswer, sdp)); - ASSERT_TRUE(answer); - cricket::ContentInfo* data_info = - cricket::GetFirstDataContent(answer->description()); - data_info->rejected = true; - - DoSetRemoteDescription(std::move(answer)); - EXPECT_EQ(DataChannelInterface::kClosed, offer_channel->state()); -} - -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP // This tests that SCTP data channels can be rejected in an answer. TEST_P(PeerConnectionInterfaceTest, TestRejectSctpDataChannelInAnswer) #else @@ -2312,7 +2072,7 @@ TEST_P(PeerConnectionInterfaceTest, ReceiveFireFoxOffer) { cricket::GetFirstVideoContent(pc_->local_description()->description()); ASSERT_TRUE(content != NULL); EXPECT_FALSE(content->rejected); -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP content = cricket::GetFirstDataContent(pc_->local_description()->description()); ASSERT_TRUE(content != NULL); @@ -2672,23 +2432,24 @@ TEST_P(PeerConnectionInterfaceTest, CloseAndTestStreamsAndStates) { EXPECT_EQ(1u, pc_->local_streams()->count()); EXPECT_EQ(1u, pc_->remote_streams()->count()); } else { - // Verify that the RtpTransceivers are still present but all stopped. + // Verify that the RtpTransceivers are still returned. EXPECT_EQ(2u, pc_->GetTransceivers().size()); - for (const auto& transceiver : pc_->GetTransceivers()) { - EXPECT_TRUE(transceiver->stopped()); - } } auto audio_receiver = GetFirstReceiverOfType(cricket::MEDIA_TYPE_AUDIO); - ASSERT_TRUE(audio_receiver); auto video_receiver = GetFirstReceiverOfType(cricket::MEDIA_TYPE_VIDEO); - ASSERT_TRUE(video_receiver); - - // Track state may be updated asynchronously. - EXPECT_EQ_WAIT(MediaStreamTrackInterface::kEnded, - audio_receiver->track()->state(), kTimeout); - EXPECT_EQ_WAIT(MediaStreamTrackInterface::kEnded, - video_receiver->track()->state(), kTimeout); + if (sdp_semantics_ == SdpSemantics::kPlanB) { + ASSERT_TRUE(audio_receiver); + ASSERT_TRUE(video_receiver); + // Track state may be updated asynchronously. + EXPECT_EQ_WAIT(MediaStreamTrackInterface::kEnded, + audio_receiver->track()->state(), kTimeout); + EXPECT_EQ_WAIT(MediaStreamTrackInterface::kEnded, + video_receiver->track()->state(), kTimeout); + } else { + ASSERT_FALSE(audio_receiver); + ASSERT_FALSE(video_receiver); + } } // Test that PeerConnection methods fails gracefully after @@ -3594,12 +3355,12 @@ TEST_F(PeerConnectionInterfaceTestPlanB, // Test that negotiation can succeed with a data channel only, and with the max // bundle policy. Previously there was a bug that prevented this. -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP TEST_P(PeerConnectionInterfaceTest, DataChannelOnlyOfferWithMaxBundlePolicy) { #else TEST_P(PeerConnectionInterfaceTest, DISABLED_DataChannelOnlyOfferWithMaxBundlePolicy) { -#endif // HAVE_SCTP +#endif // WEBRTC_HAVE_SCTP PeerConnectionInterface::RTCConfiguration config; config.bundle_policy = PeerConnectionInterface::kBundlePolicyMaxBundle; CreatePeerConnection(config); @@ -3619,44 +3380,44 @@ TEST_P(PeerConnectionInterfaceTest, TEST_P(PeerConnectionInterfaceTest, SetBitrateWithoutMinSucceeds) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; - bitrate.current_bitrate_bps = 100000; + BitrateSettings bitrate; + bitrate.start_bitrate_bps = 100000; EXPECT_TRUE(pc_->SetBitrate(bitrate).ok()); } TEST_P(PeerConnectionInterfaceTest, SetBitrateNegativeMinFails) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; + BitrateSettings bitrate; bitrate.min_bitrate_bps = -1; EXPECT_FALSE(pc_->SetBitrate(bitrate).ok()); } TEST_P(PeerConnectionInterfaceTest, SetBitrateCurrentLessThanMinFails) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; + BitrateSettings bitrate; bitrate.min_bitrate_bps = 5; - bitrate.current_bitrate_bps = 3; + bitrate.start_bitrate_bps = 3; EXPECT_FALSE(pc_->SetBitrate(bitrate).ok()); } TEST_P(PeerConnectionInterfaceTest, SetBitrateCurrentNegativeFails) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; - bitrate.current_bitrate_bps = -1; + BitrateSettings bitrate; + bitrate.start_bitrate_bps = -1; EXPECT_FALSE(pc_->SetBitrate(bitrate).ok()); } TEST_P(PeerConnectionInterfaceTest, SetBitrateMaxLessThanCurrentFails) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; - bitrate.current_bitrate_bps = 10; + BitrateSettings bitrate; + bitrate.start_bitrate_bps = 10; bitrate.max_bitrate_bps = 8; EXPECT_FALSE(pc_->SetBitrate(bitrate).ok()); } TEST_P(PeerConnectionInterfaceTest, SetBitrateMaxLessThanMinFails) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; + BitrateSettings bitrate; bitrate.min_bitrate_bps = 10; bitrate.max_bitrate_bps = 8; EXPECT_FALSE(pc_->SetBitrate(bitrate).ok()); @@ -3664,7 +3425,7 @@ TEST_P(PeerConnectionInterfaceTest, SetBitrateMaxLessThanMinFails) { TEST_P(PeerConnectionInterfaceTest, SetBitrateMaxNegativeFails) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; + BitrateSettings bitrate; bitrate.max_bitrate_bps = -1; EXPECT_FALSE(pc_->SetBitrate(bitrate).ok()); } @@ -3675,8 +3436,8 @@ TEST_P(PeerConnectionInterfaceTest, SetBitrateMaxNegativeFails) { // be clamped succeeds. TEST_P(PeerConnectionInterfaceTest, SetBitrateCurrentLessThanImplicitMin) { CreatePeerConnection(); - PeerConnectionInterface::BitrateParameters bitrate; - bitrate.current_bitrate_bps = 1; + BitrateSettings bitrate; + bitrate.start_bitrate_bps = 1; EXPECT_TRUE(pc_->SetBitrate(bitrate).ok()); } @@ -3901,17 +3662,17 @@ TEST_P(PeerConnectionInterfaceTest, TEST_P(PeerConnectionInterfaceTest, ExtmapAllowMixedIsConfigurable) { RTCConfiguration config; - // Default behavior is false. + // Default behavior is true. CreatePeerConnection(config); std::unique_ptr offer; ASSERT_TRUE(DoCreateOffer(&offer, nullptr)); - EXPECT_FALSE(offer->description()->extmap_allow_mixed()); - // Possible to set to true. - config.offer_extmap_allow_mixed = true; + EXPECT_TRUE(offer->description()->extmap_allow_mixed()); + // Possible to set to false. + config.offer_extmap_allow_mixed = false; CreatePeerConnection(config); - offer.release(); + offer = nullptr; ASSERT_TRUE(DoCreateOffer(&offer, nullptr)); - EXPECT_TRUE(offer->description()->extmap_allow_mixed()); + EXPECT_FALSE(offer->description()->extmap_allow_mixed()); } INSTANTIATE_TEST_SUITE_P(PeerConnectionInterfaceTest, @@ -3923,7 +3684,6 @@ class PeerConnectionMediaConfigTest : public ::testing::Test { protected: void SetUp() override { pcf_ = PeerConnectionFactoryForTest::CreatePeerConnectionFactoryForTest(); - pcf_->Initialize(); } const cricket::MediaConfig TestCreatePeerConnection( const RTCConfiguration& config) { diff --git a/pc/peer_connection_internal.h b/pc/peer_connection_internal.h index 52ffe85c2c..6f97612914 100644 --- a/pc/peer_connection_internal.h +++ b/pc/peer_connection_internal.h @@ -19,8 +19,8 @@ #include "api/peer_connection_interface.h" #include "call/call.h" -#include "pc/data_channel.h" #include "pc/rtp_transceiver.h" +#include "pc/sctp_data_channel.h" namespace webrtc { @@ -29,7 +29,6 @@ class PeerConnectionInternal : public PeerConnectionInterface { public: virtual rtc::Thread* network_thread() const = 0; virtual rtc::Thread* worker_thread() const = 0; - virtual rtc::Thread* signaling_thread() const = 0; // The SDP session ID as defined by RFC 3264. virtual std::string session_id() const = 0; @@ -41,23 +40,23 @@ class PeerConnectionInternal : public PeerConnectionInterface { rtc::scoped_refptr>> GetTransceiversInternal() const = 0; - virtual sigslot::signal1& SignalDataChannelCreated() = 0; + virtual sigslot::signal1& + SignalSctpDataChannelCreated() = 0; - // Only valid when using deprecated RTP data channels. - virtual cricket::RtpDataChannel* rtp_data_channel() const = 0; - - virtual std::vector> sctp_data_channels() - const = 0; + // Call on the network thread to fetch stats for all the data channels. + // TODO(tommi): Make pure virtual after downstream updates. + virtual std::vector GetDataChannelStats() const { + return {}; + } virtual absl::optional sctp_transport_name() const = 0; + virtual absl::optional sctp_mid() const = 0; virtual cricket::CandidateStatsList GetPooledCandidateStats() const = 0; - // Returns a map from MID to transport name for all active media sections. - virtual std::map GetTransportNamesByMid() const = 0; - // Returns a map from transport name to transport stats for all given // transport names. + // Must be called on the network thread. virtual std::map GetTransportStatsByNames(const std::set& transport_names) = 0; diff --git a/pc/peer_connection_jsep_unittest.cc b/pc/peer_connection_jsep_unittest.cc index 0b2f375dde..4713068a15 100644 --- a/pc/peer_connection_jsep_unittest.cc +++ b/pc/peer_connection_jsep_unittest.cc @@ -11,6 +11,7 @@ #include #include "api/task_queue/default_task_queue_factory.h" +#include "api/transport/field_trial_based_config.h" #include "media/engine/webrtc_media_engine.h" #include "media/engine/webrtc_media_engine_defaults.h" #include "pc/media_session.h" @@ -21,10 +22,10 @@ #include "pc/test/android_test_initializer.h" #endif #include "pc/test/fake_audio_capture_module.h" -#include "pc/test/fake_sctp_transport.h" #include "rtc_base/gunit.h" #include "rtc_base/virtual_socket_server.h" #include "test/gmock.h" +#include "test/pc/sctp/fake_sctp_transport.h" // This file contains tests that ensure the PeerConnection's implementation of // CreateOffer/CreateAnswer/SetLocalDescription/SetRemoteDescription conform @@ -41,30 +42,23 @@ using ::testing::ElementsAre; using ::testing::UnorderedElementsAre; using ::testing::Values; -class PeerConnectionFactoryForJsepTest : public PeerConnectionFactory { - public: - PeerConnectionFactoryForJsepTest() - : PeerConnectionFactory([] { - PeerConnectionFactoryDependencies dependencies; - dependencies.worker_thread = rtc::Thread::Current(); - dependencies.network_thread = rtc::Thread::Current(); - dependencies.signaling_thread = rtc::Thread::Current(); - dependencies.task_queue_factory = CreateDefaultTaskQueueFactory(); - cricket::MediaEngineDependencies media_deps; - media_deps.task_queue_factory = dependencies.task_queue_factory.get(); - media_deps.adm = FakeAudioCaptureModule::Create(); - SetMediaEngineDefaults(&media_deps); - dependencies.media_engine = - cricket::CreateMediaEngine(std::move(media_deps)); - dependencies.call_factory = CreateCallFactory(); - return dependencies; - }()) {} - - std::unique_ptr - CreateSctpTransportInternalFactory() { - return std::make_unique(); - } -}; +PeerConnectionFactoryDependencies CreatePeerConnectionFactoryDependencies() { + PeerConnectionFactoryDependencies dependencies; + dependencies.worker_thread = rtc::Thread::Current(); + dependencies.network_thread = rtc::Thread::Current(); + dependencies.signaling_thread = rtc::Thread::Current(); + dependencies.task_queue_factory = CreateDefaultTaskQueueFactory(); + dependencies.trials = std::make_unique(); + cricket::MediaEngineDependencies media_deps; + media_deps.task_queue_factory = dependencies.task_queue_factory.get(); + media_deps.adm = FakeAudioCaptureModule::Create(); + media_deps.trials = dependencies.trials.get(); + SetMediaEngineDefaults(&media_deps); + dependencies.media_engine = cricket::CreateMediaEngine(std::move(media_deps)); + dependencies.call_factory = CreateCallFactory(); + dependencies.sctp_factory = std::make_unique(); + return dependencies; +} class PeerConnectionJsepTest : public ::testing::Test { protected: @@ -84,9 +78,9 @@ class PeerConnectionJsepTest : public ::testing::Test { } WrapperPtr CreatePeerConnection(const RTCConfiguration& config) { - rtc::scoped_refptr pc_factory( - new rtc::RefCountedObject()); - RTC_CHECK(pc_factory->Initialize()); + rtc::scoped_refptr pc_factory = + CreateModularPeerConnectionFactory( + CreatePeerConnectionFactoryDependencies()); auto observer = std::make_unique(); auto pc = pc_factory->CreatePeerConnection(config, nullptr, nullptr, observer.get()); @@ -212,7 +206,7 @@ TEST_F(PeerConnectionJsepTest, StoppedTransceiverHasNoMediaSectionInInitialOffer) { auto caller = CreatePeerConnection(); auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - transceiver->Stop(); + transceiver->StopInternal(); auto offer = caller->CreateOffer(); EXPECT_EQ(0u, offer->description()->contents().size()); @@ -300,7 +294,7 @@ TEST_F(PeerConnectionJsepTest, auto caller = CreatePeerConnection(); caller->AddAudioTrack("a"); auto caller_audio = caller->pc()->GetTransceivers()[0]; - caller_audio->SetDirection(RtpTransceiverDirection::kSendOnly); + caller_audio->SetDirectionWithError(RtpTransceiverDirection::kSendOnly); auto callee = CreatePeerConnection(); callee->AddAudioTrack("a"); @@ -358,16 +352,18 @@ TEST_F(PeerConnectionJsepTest, SetRemoteOfferDoesNotReuseStoppedTransceiver) { caller->AddAudioTrack("a"); auto callee = CreatePeerConnection(); callee->AddAudioTrack("a"); - callee->pc()->GetTransceivers()[0]->Stop(); + callee->pc()->GetTransceivers()[0]->StopInternal(); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); auto transceivers = callee->pc()->GetTransceivers(); ASSERT_EQ(2u, transceivers.size()); - EXPECT_EQ(absl::nullopt, transceivers[0]->mid()); - EXPECT_TRUE(transceivers[0]->stopped()); - EXPECT_EQ(caller->pc()->GetTransceivers()[0]->mid(), transceivers[1]->mid()); - EXPECT_FALSE(transceivers[1]->stopped()); + // The stopped transceiver is removed in SetLocalDescription(answer) + ASSERT_TRUE(callee->SetLocalDescription(callee->CreateAnswer())); + transceivers = callee->pc()->GetTransceivers(); + ASSERT_EQ(1u, transceivers.size()); + EXPECT_EQ(caller->pc()->GetTransceivers()[0]->mid(), transceivers[0]->mid()); + EXPECT_FALSE(transceivers[0]->stopped()); } // Test that audio and video transceivers created on the remote side with @@ -432,7 +428,7 @@ TEST_F(PeerConnectionJsepTest, CreateAnswerRejectsStoppedTransceiver) { ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); - callee->pc()->GetTransceivers()[0]->Stop(); + callee->pc()->GetTransceivers()[0]->StopInternal(); auto answer = callee->CreateAnswer(); auto contents = answer->description()->contents(); @@ -469,7 +465,7 @@ TEST_F(PeerConnectionJsepTest, CreateAnswerNegotiatesDirection) { TEST_F(PeerConnectionJsepTest, SetLocalAnswerUpdatesCurrentDirection) { auto caller = CreatePeerConnection(); auto caller_audio = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - caller_audio->SetDirection(RtpTransceiverDirection::kRecvOnly); + caller_audio->SetDirectionWithError(RtpTransceiverDirection::kRecvOnly); auto callee = CreatePeerConnection(); callee->AddAudioTrack("a"); @@ -494,7 +490,7 @@ TEST_F(PeerConnectionJsepTest, SetRemoteAnswerUpdatesCurrentDirection) { auto callee = CreatePeerConnection(); callee->AddAudioTrack("a"); auto callee_audio = callee->pc()->GetTransceivers()[0]; - callee_audio->SetDirection(RtpTransceiverDirection::kSendOnly); + callee_audio->SetDirectionWithError(RtpTransceiverDirection::kSendOnly); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); ASSERT_TRUE( @@ -518,7 +514,7 @@ TEST_F(PeerConnectionJsepTest, SettingTransceiverInactiveDoesNotStopIt) { caller->AddAudioTrack("a"); auto callee = CreatePeerConnection(); callee->AddAudioTrack("a"); - callee->pc()->GetTransceivers()[0]->SetDirection( + callee->pc()->GetTransceivers()[0]->SetDirectionWithError( RtpTransceiverDirection::kInactive); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); @@ -543,7 +539,7 @@ TEST_F(PeerConnectionJsepTest, caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); ASSERT_TRUE(transceiver->mid()); - transceiver->Stop(); + transceiver->StopInternal(); auto reoffer = caller->CreateOffer(); auto contents = reoffer->description()->contents(); @@ -564,13 +560,15 @@ TEST_F(PeerConnectionJsepTest, ASSERT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); - transceiver->Stop(); + transceiver->StopInternal(); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); auto transceivers = callee->pc()->GetTransceivers(); - EXPECT_TRUE(transceivers[0]->stopped()); - EXPECT_TRUE(transceivers[0]->mid()); + EXPECT_EQ(1u, transceivers.size()); + ASSERT_TRUE(callee->SetLocalDescription(callee->CreateAnswer())); + transceivers = callee->pc()->GetTransceivers(); + EXPECT_EQ(0u, transceivers.size()); } // Test that CreateOffer will only generate a recycled media section if the @@ -586,7 +584,7 @@ TEST_F(PeerConnectionJsepTest, caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); auto second_transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - first_transceiver->Stop(); + first_transceiver->StopInternal(); auto reoffer = caller->CreateOffer(); auto contents = reoffer->description()->contents(); @@ -605,14 +603,17 @@ TEST_F(PeerConnectionJsepTest, auto callee = CreatePeerConnection(); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); - callee->pc()->GetTransceivers()[0]->Stop(); + std::string first_mid = *first_transceiver->mid(); + ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); + callee->pc()->GetTransceivers()[0]->StopInternal(); + ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); ASSERT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); EXPECT_TRUE(first_transceiver->stopped()); - // First transceivers aren't dissociated yet. - ASSERT_NE(absl::nullopt, first_transceiver->mid()); - std::string first_mid = *first_transceiver->mid(); - EXPECT_EQ(first_mid, callee->pc()->GetTransceivers()[0]->mid()); + // First transceivers are dissociated on caller side. + ASSERT_EQ(absl::nullopt, first_transceiver->mid()); + // They are disassociated on callee side. + ASSERT_EQ(0u, callee->pc()->GetTransceivers().size()); // New offer exchange with new transceivers that recycles the m section // correctly. @@ -630,10 +631,11 @@ TEST_F(PeerConnectionJsepTest, ASSERT_TRUE( caller->SetLocalDescription(CloneSessionDescription(offer.get()))); EXPECT_EQ(absl::nullopt, first_transceiver->mid()); - EXPECT_EQ(second_mid, caller->pc()->GetTransceivers()[1]->mid()); + ASSERT_EQ(1u, caller->pc()->GetTransceivers().size()); + EXPECT_EQ(second_mid, caller->pc()->GetTransceivers()[0]->mid()); ASSERT_TRUE(callee->SetRemoteDescription(std::move(offer))); - EXPECT_EQ(absl::nullopt, callee->pc()->GetTransceivers()[0]->mid()); - EXPECT_EQ(second_mid, callee->pc()->GetTransceivers()[1]->mid()); + ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); + EXPECT_EQ(second_mid, callee->pc()->GetTransceivers()[0]->mid()); // The new answer should also recycle the m section correctly. auto answer = callee->CreateAnswer(); @@ -647,13 +649,11 @@ TEST_F(PeerConnectionJsepTest, callee->SetLocalDescription(CloneSessionDescription(answer.get()))); ASSERT_TRUE(caller->SetRemoteDescription(std::move(answer))); auto caller_transceivers = caller->pc()->GetTransceivers(); - ASSERT_EQ(2u, caller_transceivers.size()); - EXPECT_EQ(absl::nullopt, caller_transceivers[0]->mid()); - EXPECT_EQ(second_mid, caller_transceivers[1]->mid()); + ASSERT_EQ(1u, caller_transceivers.size()); + EXPECT_EQ(second_mid, caller_transceivers[0]->mid()); auto callee_transceivers = callee->pc()->GetTransceivers(); - ASSERT_EQ(2u, callee_transceivers.size()); - EXPECT_EQ(absl::nullopt, callee_transceivers[0]->mid()); - EXPECT_EQ(second_mid, callee_transceivers[1]->mid()); + ASSERT_EQ(1u, callee_transceivers.size()); + EXPECT_EQ(second_mid, callee_transceivers[0]->mid()); } // Test that creating/setting a local offer that recycles an m= section is @@ -664,7 +664,7 @@ TEST_F(PeerConnectionJsepTest, CreateOfferRecyclesWhenOfferingTwice) { auto first_transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); auto callee = CreatePeerConnection(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - first_transceiver->Stop(); + first_transceiver->StopInternal(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); caller->AddAudioTrack("audio2"); @@ -675,7 +675,8 @@ TEST_F(PeerConnectionJsepTest, CreateOfferRecyclesWhenOfferingTwice) { ASSERT_EQ(1u, offer_contents.size()); EXPECT_FALSE(offer_contents[0].rejected); ASSERT_TRUE(caller->SetLocalDescription(std::move(offer))); - EXPECT_FALSE(caller->pc()->GetTransceivers()[1]->stopped()); + ASSERT_EQ(1u, caller->pc()->GetTransceivers().size()); + EXPECT_FALSE(caller->pc()->GetTransceivers()[0]->stopped()); std::string second_mid = offer_contents[0].name; // Create another new offer and set the local description again without the @@ -690,10 +691,9 @@ TEST_F(PeerConnectionJsepTest, CreateOfferRecyclesWhenOfferingTwice) { ASSERT_TRUE(caller->SetLocalDescription(std::move(second_offer))); // Make sure that the caller's transceivers are associated correctly. auto caller_transceivers = caller->pc()->GetTransceivers(); - ASSERT_EQ(2u, caller_transceivers.size()); - EXPECT_EQ(absl::nullopt, caller_transceivers[0]->mid()); - EXPECT_EQ(second_mid, caller_transceivers[1]->mid()); - EXPECT_FALSE(caller_transceivers[1]->stopped()); + ASSERT_EQ(1u, caller_transceivers.size()); + EXPECT_EQ(second_mid, caller_transceivers[0]->mid()); + EXPECT_FALSE(caller_transceivers[0]->stopped()); } // Test that the offer/answer and transceivers for both the caller and callee @@ -729,7 +729,7 @@ TEST_P(RecycleMediaSectionTest, CurrentLocalAndCurrentRemoteRejected) { ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); std::string first_mid = *first_transceiver->mid(); - first_transceiver->Stop(); + first_transceiver->StopInternal(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); @@ -756,11 +756,9 @@ TEST_P(RecycleMediaSectionTest, CurrentLocalAndCurrentRemoteRejected) { // create a new transceiver for the media section. ASSERT_TRUE(callee->SetRemoteDescription(std::move(offer))); auto callee_transceivers = callee->pc()->GetTransceivers(); - ASSERT_EQ(2u, callee_transceivers.size()); - EXPECT_EQ(absl::nullopt, callee_transceivers[0]->mid()); - EXPECT_EQ(first_type_, callee_transceivers[0]->media_type()); - EXPECT_EQ(second_mid, callee_transceivers[1]->mid()); - EXPECT_EQ(second_type_, callee_transceivers[1]->media_type()); + ASSERT_EQ(1u, callee_transceivers.size()); + EXPECT_EQ(second_mid, callee_transceivers[0]->mid()); + EXPECT_EQ(second_type_, callee_transceivers[0]->media_type()); // The answer should have only one media section for the new transceiver. auto answer = callee->CreateAnswer(); @@ -777,8 +775,8 @@ TEST_P(RecycleMediaSectionTest, CurrentLocalAndCurrentRemoteRejected) { // Setting the remote answer should succeed and not create any new // transceivers. ASSERT_TRUE(caller->SetRemoteDescription(std::move(answer))); - ASSERT_EQ(2u, caller->pc()->GetTransceivers().size()); - ASSERT_EQ(2u, callee->pc()->GetTransceivers().size()); + ASSERT_EQ(1u, caller->pc()->GetTransceivers().size()); + ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); } // Test that recycling works properly when a new transceiver recycles an m= @@ -793,7 +791,7 @@ TEST_P(RecycleMediaSectionTest, CurrentRemoteOnlyRejected) { std::string first_mid = *caller_first_transceiver->mid(); ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); auto callee_first_transceiver = callee->pc()->GetTransceivers()[0]; - callee_first_transceiver->Stop(); + callee_first_transceiver->StopInternal(); // The answer will have a rejected m= section. ASSERT_TRUE( @@ -821,11 +819,9 @@ TEST_P(RecycleMediaSectionTest, CurrentRemoteOnlyRejected) { // create a new transceiver for the media section. ASSERT_TRUE(callee->SetRemoteDescription(std::move(offer))); auto callee_transceivers = callee->pc()->GetTransceivers(); - ASSERT_EQ(2u, callee_transceivers.size()); - EXPECT_EQ(absl::nullopt, callee_transceivers[0]->mid()); - EXPECT_EQ(first_type_, callee_transceivers[0]->media_type()); - EXPECT_EQ(second_mid, callee_transceivers[1]->mid()); - EXPECT_EQ(second_type_, callee_transceivers[1]->media_type()); + ASSERT_EQ(1u, callee_transceivers.size()); + EXPECT_EQ(second_mid, callee_transceivers[0]->mid()); + EXPECT_EQ(second_type_, callee_transceivers[0]->media_type()); // The answer should have only one media section for the new transceiver. auto answer = callee->CreateAnswer(); @@ -842,8 +838,8 @@ TEST_P(RecycleMediaSectionTest, CurrentRemoteOnlyRejected) { // Setting the remote answer should succeed and not create any new // transceivers. ASSERT_TRUE(caller->SetRemoteDescription(std::move(answer))); - ASSERT_EQ(2u, caller->pc()->GetTransceivers().size()); - ASSERT_EQ(2u, callee->pc()->GetTransceivers().size()); + ASSERT_EQ(1u, caller->pc()->GetTransceivers().size()); + ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); } // Test that recycling works properly when a new transceiver recycles an m= @@ -858,7 +854,7 @@ TEST_P(RecycleMediaSectionTest, CurrentLocalOnlyRejected) { std::string first_mid = *caller_first_transceiver->mid(); ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); auto callee_first_transceiver = callee->pc()->GetTransceivers()[0]; - callee_first_transceiver->Stop(); + callee_first_transceiver->StopInternal(); // The answer will have a rejected m= section. ASSERT_TRUE( @@ -886,11 +882,9 @@ TEST_P(RecycleMediaSectionTest, CurrentLocalOnlyRejected) { // create a new transceiver for the media section. ASSERT_TRUE(caller->SetRemoteDescription(std::move(offer))); auto caller_transceivers = caller->pc()->GetTransceivers(); - ASSERT_EQ(2u, caller_transceivers.size()); - EXPECT_EQ(absl::nullopt, caller_transceivers[0]->mid()); - EXPECT_EQ(first_type_, caller_transceivers[0]->media_type()); - EXPECT_EQ(second_mid, caller_transceivers[1]->mid()); - EXPECT_EQ(second_type_, caller_transceivers[1]->media_type()); + ASSERT_EQ(1u, caller_transceivers.size()); + EXPECT_EQ(second_mid, caller_transceivers[0]->mid()); + EXPECT_EQ(second_type_, caller_transceivers[0]->media_type()); // The answer should have only one media section for the new transceiver. auto answer = caller->CreateAnswer(); @@ -907,8 +901,8 @@ TEST_P(RecycleMediaSectionTest, CurrentLocalOnlyRejected) { // Setting the remote answer should succeed and not create any new // transceivers. ASSERT_TRUE(callee->SetRemoteDescription(std::move(answer))); - ASSERT_EQ(2u, callee->pc()->GetTransceivers().size()); - ASSERT_EQ(2u, caller->pc()->GetTransceivers().size()); + ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); + ASSERT_EQ(1u, caller->pc()->GetTransceivers().size()); } // Test that a m= section is *not* recycled if the media section is only @@ -921,7 +915,7 @@ TEST_P(RecycleMediaSectionTest, PendingLocalRejectedAndNoRemote) { ASSERT_TRUE(caller->SetLocalDescription(caller->CreateOffer())); std::string first_mid = *caller_first_transceiver->mid(); - caller_first_transceiver->Stop(); + caller_first_transceiver->StopInternal(); // The reoffer will have a rejected m= section. ASSERT_TRUE(caller->SetLocalDescription(caller->CreateOffer())); @@ -959,7 +953,7 @@ TEST_P(RecycleMediaSectionTest, PendingLocalRejectedAndNotRejectedRemote) { ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); std::string first_mid = *caller_first_transceiver->mid(); - caller_first_transceiver->Stop(); + caller_first_transceiver->StopInternal(); // The reoffer will have a rejected m= section. ASSERT_TRUE(caller->SetLocalDescription(caller->CreateOffer())); @@ -999,7 +993,7 @@ TEST_P(RecycleMediaSectionTest, PendingRemoteRejectedAndNoLocal) { ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); auto callee_first_transceiver = callee->pc()->GetTransceivers()[0]; std::string first_mid = *callee_first_transceiver->mid(); - caller_first_transceiver->Stop(); + caller_first_transceiver->StopInternal(); // The reoffer will have a rejected m= section. ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); @@ -1036,7 +1030,7 @@ TEST_P(RecycleMediaSectionTest, PendingRemoteRejectedAndNotRejectedLocal) { ASSERT_EQ(1u, callee->pc()->GetTransceivers().size()); auto callee_first_transceiver = callee->pc()->GetTransceivers()[0]; std::string first_mid = *callee_first_transceiver->mid(); - caller_first_transceiver->Stop(); + caller_first_transceiver->StopInternal(); // The reoffer will have a rejected m= section. ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); @@ -1080,7 +1074,7 @@ TEST_F(PeerConnectionJsepTest, DataChannelDoesNotRecycleMediaSection) { ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - transceiver->Stop(); + transceiver->StopInternal(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); @@ -1367,7 +1361,7 @@ TEST_F(PeerConnectionJsepTest, IncludeMsidEvenIfDirectionHasChanged) { ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->pc()->GetTransceivers()[0]->SetDirection( + caller->pc()->GetTransceivers()[0]->SetDirectionWithError( RtpTransceiverDirection::kInactive); // The transceiver direction on both sides will turn to inactive. @@ -1395,7 +1389,7 @@ TEST_F(PeerConnectionJsepTest, RemoveMsidIfTransceiverStopped) { ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - transceiver->Stop(); + transceiver->StopInternal(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); @@ -1552,8 +1546,9 @@ TEST_F(PeerConnectionJsepTest, CurrentDirectionResetWhenRtpTransceiverStopped) { ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); ASSERT_TRUE(transceiver->current_direction()); - transceiver->Stop(); - EXPECT_FALSE(transceiver->current_direction()); + transceiver->StopInternal(); + EXPECT_EQ(transceiver->current_direction(), + RtpTransceiverDirection::kStopped); } // Test that you can't set an answer on a PeerConnection before setting the @@ -1797,7 +1792,8 @@ TEST_F(PeerConnectionJsepTest, RollbackImplicitly) { EXPECT_EQ(callee->signaling_state(), PeerConnectionInterface::kHaveRemoteOffer); EXPECT_TRUE(callee->CreateAnswerAndSetAsLocal()); - EXPECT_FALSE(callee->observer()->negotiation_needed()); + EXPECT_FALSE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(callee->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionJsepTest, RollbackImplicitlyNegotatiationNotNeeded) { @@ -1809,13 +1805,15 @@ TEST_F(PeerConnectionJsepTest, RollbackImplicitlyNegotatiationNotNeeded) { caller->AddAudioTrack("a"); callee->AddAudioTrack("b"); EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); - callee->observer()->clear_negotiation_needed(); + callee->observer()->clear_legacy_renegotiation_needed(); + callee->observer()->clear_latest_negotiation_needed_event(); EXPECT_TRUE(callee->SetRemoteDescription(caller->CreateOffer())); EXPECT_EQ(callee->signaling_state(), PeerConnectionInterface::kHaveRemoteOffer); EXPECT_TRUE(callee->CreateAnswerAndSetAsLocal()); // No negotiation needed as track got attached in the answer. - EXPECT_FALSE(callee->observer()->negotiation_needed()); + EXPECT_FALSE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(callee->observer()->has_negotiation_needed_event()); EXPECT_EQ(callee->observer()->remove_track_events_.size(), 0u); } @@ -1827,13 +1825,16 @@ TEST_F(PeerConnectionJsepTest, RollbackImplicitlyAndNegotiationNeeded) { auto callee = CreatePeerConnection(config); callee->AddAudioTrack("a"); EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); - callee->observer()->clear_negotiation_needed(); + callee->observer()->clear_legacy_renegotiation_needed(); + callee->observer()->clear_latest_negotiation_needed_event(); EXPECT_TRUE(callee->SetRemoteDescription(caller->CreateOffer())); EXPECT_EQ(callee->signaling_state(), PeerConnectionInterface::kHaveRemoteOffer); - EXPECT_FALSE(callee->observer()->negotiation_needed()); + EXPECT_FALSE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(callee->observer()->has_negotiation_needed_event()); EXPECT_TRUE(callee->CreateAnswerAndSetAsLocal()); - EXPECT_TRUE(callee->observer()->negotiation_needed()); + EXPECT_TRUE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(callee->observer()->has_negotiation_needed_event()); EXPECT_EQ(callee->observer()->remove_track_events_.size(), 0u); } @@ -1914,6 +1915,68 @@ TEST_F(PeerConnectionJsepTest, RollbackRestoresMid) { EXPECT_TRUE(callee->SetLocalDescription(std::move(offer))); } +TEST_F(PeerConnectionJsepTest, RollbackRestoresInitSendEncodings) { + auto caller = CreatePeerConnection(); + RtpTransceiverInit init; + init.direction = RtpTransceiverDirection::kSendRecv; + RtpEncodingParameters encoding; + encoding.rid = "hi"; + init.send_encodings.push_back(encoding); + encoding.rid = "mid"; + init.send_encodings.push_back(encoding); + encoding.rid = "lo"; + init.send_encodings.push_back(encoding); + caller->AddTransceiver(cricket::MEDIA_TYPE_VIDEO, init); + auto encodings = + caller->pc()->GetTransceivers()[0]->sender()->init_send_encodings(); + EXPECT_TRUE(caller->SetLocalDescription(caller->CreateOffer())); + EXPECT_NE(caller->pc()->GetTransceivers()[0]->sender()->init_send_encodings(), + encodings); + EXPECT_TRUE(caller->SetLocalDescription(caller->CreateRollback())); + EXPECT_EQ(caller->pc()->GetTransceivers()[0]->sender()->init_send_encodings(), + encodings); +} + +TEST_F(PeerConnectionJsepTest, RollbackDoesNotAffectSendEncodings) { + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + RtpTransceiverInit init; + init.direction = RtpTransceiverDirection::kSendOnly; + RtpEncodingParameters encoding; + encoding.rid = "hi"; + init.send_encodings.push_back(encoding); + encoding.rid = "mid"; + init.send_encodings.push_back(encoding); + encoding.rid = "lo"; + init.send_encodings.push_back(encoding); + caller->AddTransceiver(cricket::MEDIA_TYPE_VIDEO, init); + callee->AddTransceiver(cricket::MEDIA_TYPE_VIDEO); + callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal()); + caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal()); + auto params = caller->pc()->GetTransceivers()[0]->sender()->GetParameters(); + EXPECT_TRUE(params.encodings[0].active); + params.encodings[0].active = false; + caller->pc()->GetTransceivers()[0]->sender()->SetParameters(params); + auto offer = caller->CreateOffer(); + std::string offer_string; + EXPECT_TRUE(offer.get()->ToString(&offer_string)); + std::string simulcast_line = + offer_string.substr(offer_string.find("a=simulcast")); + EXPECT_FALSE(simulcast_line.empty()); + EXPECT_TRUE(caller->SetLocalDescription(std::move(offer))); + EXPECT_TRUE(caller->SetLocalDescription(caller->CreateRollback())); + EXPECT_FALSE(caller->pc() + ->GetTransceivers()[0] + ->sender() + ->GetParameters() + .encodings[0] + .active); + offer = caller->CreateOffer(); + EXPECT_TRUE(offer.get()->ToString(&offer_string)); + EXPECT_EQ(offer_string.substr(offer_string.find("a=simulcast")), + simulcast_line); +} + TEST_F(PeerConnectionJsepTest, RollbackRestoresMidAndRemovesTransceiver) { auto callee = CreatePeerConnection(); callee->AddVideoTrack("a"); @@ -1944,7 +2007,8 @@ TEST_F(PeerConnectionJsepTest, RollbackHasNoEffectOnStableTransceivers) { EXPECT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); // In stable don't add or remove anything. - callee->observer()->clear_negotiation_needed(); + callee->observer()->clear_legacy_renegotiation_needed(); + callee->observer()->clear_latest_negotiation_needed_event(); size_t transceiver_count = callee->pc()->GetTransceivers().size(); auto mid_0 = callee->pc()->GetTransceivers()[0]->mid(); auto mid_1 = callee->pc()->GetTransceivers()[1]->mid(); @@ -1954,7 +2018,8 @@ TEST_F(PeerConnectionJsepTest, RollbackHasNoEffectOnStableTransceivers) { EXPECT_EQ(callee->pc()->GetTransceivers()[0]->mid(), mid_0); EXPECT_EQ(callee->pc()->GetTransceivers()[1]->mid(), mid_1); EXPECT_EQ(callee->observer()->remove_track_events_.size(), 0u); - EXPECT_FALSE(callee->observer()->negotiation_needed()); + EXPECT_FALSE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(callee->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionJsepTest, ImplicitlyRollbackTransceiversWithSameMids) { @@ -2039,7 +2104,7 @@ TEST_F(PeerConnectionJsepTest, RollbackLocalDirectionChange) { EXPECT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); callee->AddAudioTrack("a"); - callee->pc()->GetTransceivers()[0]->SetDirection( + callee->pc()->GetTransceivers()[0]->SetDirectionWithError( RtpTransceiverDirection::kSendOnly); EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); EXPECT_EQ(callee->pc()->GetTransceivers().size(), 1u); @@ -2063,7 +2128,7 @@ TEST_F(PeerConnectionJsepTest, RollbackRemoteDirectionChange) { EXPECT_TRUE( caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); // In stable make remote audio receive only. - caller_transceiver->SetDirection(RtpTransceiverDirection::kRecvOnly); + caller_transceiver->SetDirectionWithError(RtpTransceiverDirection::kRecvOnly); EXPECT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); EXPECT_EQ(callee->pc()->GetTransceivers().size(), 1u); // The direction attribute is not modified by the offer. @@ -2089,9 +2154,11 @@ TEST_F(PeerConnectionJsepTest, RollbackAfterMultipleSLD) { EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); callee->AddTransceiver(cricket::MEDIA_TYPE_VIDEO); EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); - callee->observer()->clear_negotiation_needed(); + callee->observer()->clear_legacy_renegotiation_needed(); + callee->observer()->clear_latest_negotiation_needed_event(); EXPECT_TRUE(callee->SetRemoteDescription(callee->CreateRollback())); - EXPECT_TRUE(callee->observer()->negotiation_needed()); + EXPECT_TRUE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(callee->observer()->has_negotiation_needed_event()); EXPECT_EQ(callee->pc()->GetTransceivers().size(), 2u); EXPECT_EQ(callee->pc()->GetTransceivers()[0]->mid(), absl::nullopt); EXPECT_EQ(callee->pc()->GetTransceivers()[1]->mid(), absl::nullopt); @@ -2140,7 +2207,8 @@ TEST_F(PeerConnectionJsepTest, DataChannelImplicitRollback) { EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); EXPECT_TRUE(callee->SetRemoteDescription(caller->CreateOffer())); EXPECT_TRUE(callee->CreateAnswerAndSetAsLocal()); - EXPECT_TRUE(callee->observer()->negotiation_needed()); + EXPECT_TRUE(callee->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(callee->observer()->has_negotiation_needed_event()); EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); } @@ -2198,16 +2266,4 @@ TEST_F(PeerConnectionJsepTest, EXPECT_TRUE(callee->CreateOfferAndSetAsLocal()); } -TEST_F(PeerConnectionJsepTest, RollbackRtpDataChannel) { - RTCConfiguration config; - config.sdp_semantics = SdpSemantics::kUnifiedPlan; - config.enable_rtp_data_channel = true; - auto pc = CreatePeerConnection(config); - pc->CreateDataChannel("dummy"); - auto offer = pc->CreateOffer(); - EXPECT_TRUE(pc->CreateOfferAndSetAsLocal()); - EXPECT_TRUE(pc->SetRemoteDescription(pc->CreateRollback())); - EXPECT_TRUE(pc->SetLocalDescription(std::move(offer))); -} - } // namespace webrtc diff --git a/pc/peer_connection_media_unittest.cc b/pc/peer_connection_media_unittest.cc index c9ffd776d9..d5d0b926b7 100644 --- a/pc/peer_connection_media_unittest.cc +++ b/pc/peer_connection_media_unittest.cc @@ -290,8 +290,8 @@ TEST_F(PeerConnectionMediaTestUnifiedPlan, // Stop both audio and video transceivers on the caller. auto transceivers = caller->pc()->GetTransceivers(); ASSERT_EQ(2u, transceivers.size()); - transceivers[0]->Stop(); - transceivers[1]->Stop(); + transceivers[0]->StopInternal(); + transceivers[1]->StopInternal(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); @@ -388,8 +388,8 @@ TEST_F(PeerConnectionMediaTestUnifiedPlan, // Stop both audio and video transceivers on the callee. auto transceivers = callee->pc()->GetTransceivers(); ASSERT_EQ(2u, transceivers.size()); - transceivers[0]->Stop(); - transceivers[1]->Stop(); + transceivers[0]->StopInternal(); + transceivers[1]->StopInternal(); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); @@ -825,8 +825,10 @@ TEST_P(PeerConnectionMediaTest, AnswerHasDifferentDirectionsForAudioVideo) { } void AddComfortNoiseCodecsToSend(cricket::FakeMediaEngine* media_engine) { - const cricket::AudioCodec kComfortNoiseCodec8k(102, "CN", 8000, 0, 1); - const cricket::AudioCodec kComfortNoiseCodec16k(103, "CN", 16000, 0, 1); + const cricket::AudioCodec kComfortNoiseCodec8k(102, cricket::kCnCodecName, + 8000, 0, 1); + const cricket::AudioCodec kComfortNoiseCodec16k(103, cricket::kCnCodecName, + 16000, 0, 1); auto codecs = media_engine->voice().send_codecs(); codecs.push_back(kComfortNoiseCodec8k); @@ -837,7 +839,7 @@ void AddComfortNoiseCodecsToSend(cricket::FakeMediaEngine* media_engine) { bool HasAnyComfortNoiseCodecs(const cricket::SessionDescription* desc) { const auto* audio_desc = cricket::GetFirstAudioContentDescription(desc); for (const auto& codec : audio_desc->codecs()) { - if (codec.name == "CN") { + if (codec.name == cricket::kCnCodecName) { return true; } } @@ -846,8 +848,9 @@ bool HasAnyComfortNoiseCodecs(const cricket::SessionDescription* desc) { TEST_P(PeerConnectionMediaTest, CreateOfferWithNoVoiceActivityDetectionIncludesNoComfortNoiseCodecs) { - auto caller = CreatePeerConnectionWithAudioVideo(); - AddComfortNoiseCodecsToSend(caller->media_engine()); + auto fake_engine = std::make_unique(); + AddComfortNoiseCodecsToSend(fake_engine.get()); + auto caller = CreatePeerConnectionWithAudioVideo(std::move(fake_engine)); RTCOfferAnswerOptions options; options.voice_activity_detection = false; @@ -857,11 +860,47 @@ TEST_P(PeerConnectionMediaTest, } TEST_P(PeerConnectionMediaTest, - CreateAnswerWithNoVoiceActivityDetectionIncludesNoComfortNoiseCodecs) { + CreateOfferWithVoiceActivityDetectionIncludesComfortNoiseCodecs) { + auto fake_engine = std::make_unique(); + AddComfortNoiseCodecsToSend(fake_engine.get()); + auto caller = CreatePeerConnectionWithAudioVideo(std::move(fake_engine)); + + RTCOfferAnswerOptions options; + options.voice_activity_detection = true; + auto offer = caller->CreateOffer(options); + + EXPECT_TRUE(HasAnyComfortNoiseCodecs(offer->description())); +} + +TEST_P(PeerConnectionMediaTest, + CreateAnswerWithVoiceActivityDetectionIncludesNoComfortNoiseCodecs) { auto caller = CreatePeerConnectionWithAudioVideo(); - AddComfortNoiseCodecsToSend(caller->media_engine()); - auto callee = CreatePeerConnectionWithAudioVideo(); - AddComfortNoiseCodecsToSend(callee->media_engine()); + + auto callee_fake_engine = std::make_unique(); + AddComfortNoiseCodecsToSend(callee_fake_engine.get()); + auto callee = + CreatePeerConnectionWithAudioVideo(std::move(callee_fake_engine)); + + ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); + + RTCOfferAnswerOptions options; + options.voice_activity_detection = true; + auto answer = callee->CreateAnswer(options); + + EXPECT_FALSE(HasAnyComfortNoiseCodecs(answer->description())); +} + +TEST_P(PeerConnectionMediaTest, + CreateAnswerWithNoVoiceActivityDetectionIncludesNoComfortNoiseCodecs) { + auto caller_fake_engine = std::make_unique(); + AddComfortNoiseCodecsToSend(caller_fake_engine.get()); + auto caller = + CreatePeerConnectionWithAudioVideo(std::move(caller_fake_engine)); + + auto callee_fake_engine = std::make_unique(); + AddComfortNoiseCodecsToSend(callee_fake_engine.get()); + auto callee = + CreatePeerConnectionWithAudioVideo(std::move(callee_fake_engine)); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); @@ -1118,10 +1157,11 @@ TEST_P(PeerConnectionMediaTest, MediaEngineErrorPropagatedToClients) { std::string error; ASSERT_FALSE(caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal(), &error)); - EXPECT_EQ( - "Failed to set remote answer sdp: Failed to set remote video description " - "send parameters.", - error); + EXPECT_EQ(std::string("Failed to set remote answer sdp: Failed to set remote " + "video description " + "send parameters for m-section with mid='") + + (IsUnifiedPlan() ? "1" : "video") + "'.", + error); } // Tests that if the underlying video encoder fails once then subsequent @@ -1733,6 +1773,26 @@ TEST_F(PeerConnectionMediaTestUnifiedPlan, EXPECT_TRUE(CompareCodecs(video_codecs_vpx_reverse, recv_codecs)); } +TEST_F(PeerConnectionMediaTestUnifiedPlan, + SetCodecPreferencesVoiceActivityDetection) { + auto fake_engine = std::make_unique(); + AddComfortNoiseCodecsToSend(fake_engine.get()); + auto caller = CreatePeerConnectionWithAudio(std::move(fake_engine)); + + RTCOfferAnswerOptions options; + auto offer = caller->CreateOffer(options); + EXPECT_TRUE(HasAnyComfortNoiseCodecs(offer->description())); + + auto transceiver = caller->pc()->GetTransceivers().front(); + auto capabilities = caller->pc_factory()->GetRtpSenderCapabilities( + cricket::MediaType::MEDIA_TYPE_AUDIO); + EXPECT_TRUE(transceiver->SetCodecPreferences(capabilities.codecs).ok()); + + options.voice_activity_detection = false; + offer = caller->CreateOffer(options); + EXPECT_FALSE(HasAnyComfortNoiseCodecs(offer->description())); +} + INSTANTIATE_TEST_SUITE_P(PeerConnectionMediaTest, PeerConnectionMediaTest, Values(SdpSemantics::kPlanB, diff --git a/pc/peer_connection_message_handler.cc b/pc/peer_connection_message_handler.cc new file mode 100644 index 0000000000..4b7913d678 --- /dev/null +++ b/pc/peer_connection_message_handler.cc @@ -0,0 +1,180 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/peer_connection_message_handler.h" + +#include + +#include "api/jsep.h" +#include "api/media_stream_interface.h" +#include "api/peer_connection_interface.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/stats_types.h" +#include "pc/stats_collector_interface.h" +#include "rtc_base/checks.h" +#include "rtc_base/location.h" + +namespace webrtc { + +namespace { + +enum { + MSG_SET_SESSIONDESCRIPTION_SUCCESS = 0, + MSG_SET_SESSIONDESCRIPTION_FAILED, + MSG_CREATE_SESSIONDESCRIPTION_FAILED, + MSG_GETSTATS, + MSG_REPORT_USAGE_PATTERN, +}; + +struct SetSessionDescriptionMsg : public rtc::MessageData { + explicit SetSessionDescriptionMsg( + webrtc::SetSessionDescriptionObserver* observer) + : observer(observer) {} + + rtc::scoped_refptr observer; + RTCError error; +}; + +struct CreateSessionDescriptionMsg : public rtc::MessageData { + explicit CreateSessionDescriptionMsg( + webrtc::CreateSessionDescriptionObserver* observer) + : observer(observer) {} + + rtc::scoped_refptr observer; + RTCError error; +}; + +struct GetStatsMsg : public rtc::MessageData { + GetStatsMsg(webrtc::StatsObserver* observer, + StatsCollectorInterface* stats, + webrtc::MediaStreamTrackInterface* track) + : observer(observer), stats(stats), track(track) {} + rtc::scoped_refptr observer; + StatsCollectorInterface* stats; + rtc::scoped_refptr track; +}; + +struct RequestUsagePatternMsg : public rtc::MessageData { + explicit RequestUsagePatternMsg(std::function func) + : function(func) {} + std::function function; +}; + +} // namespace + +PeerConnectionMessageHandler::~PeerConnectionMessageHandler() { + // Process all pending notifications in the message queue. If we don't do + // this, requests will linger and not know they succeeded or failed. + rtc::MessageList list; + signaling_thread()->Clear(this, rtc::MQID_ANY, &list); + for (auto& msg : list) { + if (msg.message_id == MSG_CREATE_SESSIONDESCRIPTION_FAILED) { + // Processing CreateOffer() and CreateAnswer() messages ensures their + // observers are invoked even if the PeerConnection is destroyed early. + OnMessage(&msg); + } else { + // TODO(hbos): Consider processing all pending messages. This would mean + // that SetLocalDescription() and SetRemoteDescription() observers are + // informed of successes and failures; this is currently NOT the case. + delete msg.pdata; + } + } +} + +void PeerConnectionMessageHandler::OnMessage(rtc::Message* msg) { + RTC_DCHECK_RUN_ON(signaling_thread()); + switch (msg->message_id) { + case MSG_SET_SESSIONDESCRIPTION_SUCCESS: { + SetSessionDescriptionMsg* param = + static_cast(msg->pdata); + param->observer->OnSuccess(); + delete param; + break; + } + case MSG_SET_SESSIONDESCRIPTION_FAILED: { + SetSessionDescriptionMsg* param = + static_cast(msg->pdata); + param->observer->OnFailure(std::move(param->error)); + delete param; + break; + } + case MSG_CREATE_SESSIONDESCRIPTION_FAILED: { + CreateSessionDescriptionMsg* param = + static_cast(msg->pdata); + param->observer->OnFailure(std::move(param->error)); + delete param; + break; + } + case MSG_GETSTATS: { + GetStatsMsg* param = static_cast(msg->pdata); + StatsReports reports; + param->stats->GetStats(param->track, &reports); + param->observer->OnComplete(reports); + delete param; + break; + } + case MSG_REPORT_USAGE_PATTERN: { + RequestUsagePatternMsg* param = + static_cast(msg->pdata); + param->function(); + delete param; + break; + } + default: + RTC_NOTREACHED() << "Not implemented"; + break; + } +} + +void PeerConnectionMessageHandler::PostSetSessionDescriptionSuccess( + SetSessionDescriptionObserver* observer) { + SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer); + signaling_thread()->Post(RTC_FROM_HERE, this, + MSG_SET_SESSIONDESCRIPTION_SUCCESS, msg); +} + +void PeerConnectionMessageHandler::PostSetSessionDescriptionFailure( + SetSessionDescriptionObserver* observer, + RTCError&& error) { + RTC_DCHECK(!error.ok()); + SetSessionDescriptionMsg* msg = new SetSessionDescriptionMsg(observer); + msg->error = std::move(error); + signaling_thread()->Post(RTC_FROM_HERE, this, + MSG_SET_SESSIONDESCRIPTION_FAILED, msg); +} + +void PeerConnectionMessageHandler::PostCreateSessionDescriptionFailure( + CreateSessionDescriptionObserver* observer, + RTCError error) { + RTC_DCHECK(!error.ok()); + CreateSessionDescriptionMsg* msg = new CreateSessionDescriptionMsg(observer); + msg->error = std::move(error); + signaling_thread()->Post(RTC_FROM_HERE, this, + MSG_CREATE_SESSIONDESCRIPTION_FAILED, msg); +} + +void PeerConnectionMessageHandler::PostGetStats( + StatsObserver* observer, + StatsCollectorInterface* stats, + MediaStreamTrackInterface* track) { + signaling_thread()->Post(RTC_FROM_HERE, this, MSG_GETSTATS, + new GetStatsMsg(observer, stats, track)); +} + +void PeerConnectionMessageHandler::RequestUsagePatternReport( + std::function func, + int delay_ms) { + signaling_thread()->PostDelayed(RTC_FROM_HERE, delay_ms, this, + MSG_REPORT_USAGE_PATTERN, + new RequestUsagePatternMsg(func)); +} + +} // namespace webrtc diff --git a/pc/peer_connection_message_handler.h b/pc/peer_connection_message_handler.h new file mode 100644 index 0000000000..c19f5a4e50 --- /dev/null +++ b/pc/peer_connection_message_handler.h @@ -0,0 +1,62 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_PEER_CONNECTION_MESSAGE_HANDLER_H_ +#define PC_PEER_CONNECTION_MESSAGE_HANDLER_H_ + +#include + +#include "api/jsep.h" +#include "api/media_stream_interface.h" +#include "api/peer_connection_interface.h" +#include "api/rtc_error.h" +#include "api/stats_types.h" +#include "pc/stats_collector_interface.h" +#include "rtc_base/message_handler.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_message.h" + +namespace webrtc { + +class CreateSessionDescriptionObserver; +class SetSessionDescriptionObserver; +class StatsCollectorInterface; +class StatsObserver; +class MediaStreamTrackInterface; + +class PeerConnectionMessageHandler : public rtc::MessageHandler { + public: + explicit PeerConnectionMessageHandler(rtc::Thread* signaling_thread) + : signaling_thread_(signaling_thread) {} + ~PeerConnectionMessageHandler(); + + // Implements MessageHandler. + void OnMessage(rtc::Message* msg) override; + void PostSetSessionDescriptionSuccess( + SetSessionDescriptionObserver* observer); + void PostSetSessionDescriptionFailure(SetSessionDescriptionObserver* observer, + RTCError&& error); + void PostCreateSessionDescriptionFailure( + CreateSessionDescriptionObserver* observer, + RTCError error); + void PostGetStats(StatsObserver* observer, + StatsCollectorInterface* stats, + MediaStreamTrackInterface* track); + void RequestUsagePatternReport(std::function, int delay_ms); + + private: + rtc::Thread* signaling_thread() const { return signaling_thread_; } + + rtc::Thread* const signaling_thread_; +}; + +} // namespace webrtc + +#endif // PC_PEER_CONNECTION_MESSAGE_HANDLER_H_ diff --git a/api/peer_connection_proxy.h b/pc/peer_connection_proxy.h similarity index 76% rename from api/peer_connection_proxy.h rename to pc/peer_connection_proxy.h index c278308ccb..7601c9d053 100644 --- a/api/peer_connection_proxy.h +++ b/pc/peer_connection_proxy.h @@ -8,22 +8,25 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef API_PEER_CONNECTION_PROXY_H_ -#define API_PEER_CONNECTION_PROXY_H_ +#ifndef PC_PEER_CONNECTION_PROXY_H_ +#define PC_PEER_CONNECTION_PROXY_H_ #include #include #include #include "api/peer_connection_interface.h" -#include "api/proxy.h" +#include "pc/proxy.h" namespace webrtc { -// TODO(deadbeef): Move this to .cc file and out of api/. What threads methods -// are called on is an implementation detail. -BEGIN_SIGNALING_PROXY_MAP(PeerConnection) -PROXY_SIGNALING_THREAD_DESTRUCTOR() +// PeerConnection proxy objects will be constructed with two thread pointers, +// signaling and network. The proxy macros don't have 'network' specific macros +// and support for a secondary thread is provided via 'SECONDARY' macros. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PROXY_MAP(PeerConnection) +PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD0(rtc::scoped_refptr, local_streams) PROXY_METHOD0(rtc::scoped_refptr, remote_streams) PROXY_METHOD1(bool, AddStream, MediaStreamInterface*) @@ -73,8 +76,8 @@ PROXY_METHOD2(void, rtc::scoped_refptr, rtc::scoped_refptr) PROXY_METHOD0(void, ClearStatsCache) -PROXY_METHOD2(rtc::scoped_refptr, - CreateDataChannel, +PROXY_METHOD2(RTCErrorOr>, + CreateDataChannelOrError, const std::string&, const DataChannelInit*) PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, local_description) @@ -98,17 +101,25 @@ PROXY_METHOD2(void, const RTCOfferAnswerOptions&) PROXY_METHOD2(void, SetLocalDescription, - SetSessionDescriptionObserver*, - SessionDescriptionInterface*) -PROXY_METHOD1(void, SetLocalDescription, SetSessionDescriptionObserver*) + std::unique_ptr, + rtc::scoped_refptr) +PROXY_METHOD1(void, + SetLocalDescription, + rtc::scoped_refptr) PROXY_METHOD2(void, - SetRemoteDescription, + SetLocalDescription, SetSessionDescriptionObserver*, SessionDescriptionInterface*) +PROXY_METHOD1(void, SetLocalDescription, SetSessionDescriptionObserver*) PROXY_METHOD2(void, SetRemoteDescription, std::unique_ptr, rtc::scoped_refptr) +PROXY_METHOD2(void, + SetRemoteDescription, + SetSessionDescriptionObserver*, + SessionDescriptionInterface*) +PROXY_METHOD1(bool, ShouldFireNegotiationNeededEvent, uint32_t) PROXY_METHOD0(PeerConnectionInterface::RTCConfiguration, GetConfiguration) PROXY_METHOD1(RTCError, SetConfiguration, @@ -122,16 +133,22 @@ PROXY_METHOD1(bool, RemoveIceCandidates, const std::vector&) PROXY_METHOD1(RTCError, SetBitrate, const BitrateSettings&) PROXY_METHOD1(void, SetAudioPlayout, bool) PROXY_METHOD1(void, SetAudioRecording, bool) -PROXY_METHOD1(rtc::scoped_refptr, - LookupDtlsTransportByMid, - const std::string&) -PROXY_CONSTMETHOD0(rtc::scoped_refptr, GetSctpTransport) +// This method will be invoked on the network thread. See +// PeerConnectionFactory::CreatePeerConnectionOrError for more details. +PROXY_SECONDARY_METHOD1(rtc::scoped_refptr, + LookupDtlsTransportByMid, + const std::string&) +// This method will be invoked on the network thread. See +// PeerConnectionFactory::CreatePeerConnectionOrError for more details. +PROXY_SECONDARY_CONSTMETHOD0(rtc::scoped_refptr, + GetSctpTransport) PROXY_METHOD0(SignalingState, signaling_state) PROXY_METHOD0(IceConnectionState, ice_connection_state) PROXY_METHOD0(IceConnectionState, standardized_ice_connection_state) PROXY_METHOD0(PeerConnectionState, peer_connection_state) PROXY_METHOD0(IceGatheringState, ice_gathering_state) PROXY_METHOD0(absl::optional, can_trickle_ice_candidates) +PROXY_METHOD1(void, AddAdaptationResource, rtc::scoped_refptr) PROXY_METHOD2(bool, StartRtcEventLog, std::unique_ptr, @@ -139,8 +156,9 @@ PROXY_METHOD2(bool, PROXY_METHOD1(bool, StartRtcEventLog, std::unique_ptr) PROXY_METHOD0(void, StopRtcEventLog) PROXY_METHOD0(void, Close) -END_PROXY_MAP() +BYPASS_PROXY_CONSTMETHOD0(rtc::Thread*, signaling_thread) +END_PROXY_MAP(PeerConnection) } // namespace webrtc -#endif // API_PEER_CONNECTION_PROXY_H_ +#endif // PC_PEER_CONNECTION_PROXY_H_ diff --git a/pc/peer_connection_rampup_tests.cc b/pc/peer_connection_rampup_tests.cc index b50489d534..d50d488125 100644 --- a/pc/peer_connection_rampup_tests.cc +++ b/pc/peer_connection_rampup_tests.cc @@ -120,7 +120,7 @@ class PeerConnectionWrapperForRampUpTest : public PeerConnectionWrapper { FrameGeneratorCapturerVideoTrackSource::Config config, Clock* clock) { video_track_sources_.emplace_back( - new rtc::RefCountedObject( + rtc::make_ref_counted( config, clock, /*is_screencast=*/false)); video_track_sources_.back()->Start(); return rtc::scoped_refptr( @@ -192,14 +192,14 @@ class PeerConnectionRampUpTest : public ::testing::Test { dependencies.tls_cert_verifier = std::make_unique(); - auto pc = - pc_factory_->CreatePeerConnection(config, std::move(dependencies)); - if (!pc) { + auto result = pc_factory_->CreatePeerConnectionOrError( + config, std::move(dependencies)); + if (!result.ok()) { return nullptr; } return std::make_unique( - pc_factory_, pc, std::move(observer)); + pc_factory_, result.MoveValue(), std::move(observer)); } void SetupOneWayCall() { @@ -333,7 +333,7 @@ class PeerConnectionRampUpTest : public ::testing::Test { std::unique_ptr callee_; }; -TEST_F(PeerConnectionRampUpTest, TurnOverTCP) { +TEST_F(PeerConnectionRampUpTest, Bwe_After_TurnOverTCP) { CreateTurnServer(cricket::ProtocolType::PROTO_TCP); PeerConnectionInterface::IceServer ice_server; std::string ice_server_url = "turn:" + std::string(kTurnInternalAddress) + @@ -354,7 +354,7 @@ TEST_F(PeerConnectionRampUpTest, TurnOverTCP) { RunTest("turn_over_tcp"); } -TEST_F(PeerConnectionRampUpTest, TurnOverUDP) { +TEST_F(PeerConnectionRampUpTest, Bwe_After_TurnOverUDP) { CreateTurnServer(cricket::ProtocolType::PROTO_UDP); PeerConnectionInterface::IceServer ice_server; std::string ice_server_url = "turn:" + std::string(kTurnInternalAddress) + @@ -375,7 +375,7 @@ TEST_F(PeerConnectionRampUpTest, TurnOverUDP) { RunTest("turn_over_udp"); } -TEST_F(PeerConnectionRampUpTest, TurnOverTLS) { +TEST_F(PeerConnectionRampUpTest, Bwe_After_TurnOverTLS) { CreateTurnServer(cricket::ProtocolType::PROTO_TLS, kTurnInternalAddress); PeerConnectionInterface::IceServer ice_server; std::string ice_server_url = "turns:" + std::string(kTurnInternalAddress) + @@ -397,7 +397,7 @@ TEST_F(PeerConnectionRampUpTest, TurnOverTLS) { RunTest("turn_over_tls"); } -TEST_F(PeerConnectionRampUpTest, UDPPeerToPeer) { +TEST_F(PeerConnectionRampUpTest, Bwe_After_UDPPeerToPeer) { PeerConnectionInterface::RTCConfiguration client_1_config; client_1_config.tcp_candidate_policy = PeerConnection::kTcpCandidatePolicyDisabled; @@ -410,7 +410,7 @@ TEST_F(PeerConnectionRampUpTest, UDPPeerToPeer) { RunTest("udp_peer_to_peer"); } -TEST_F(PeerConnectionRampUpTest, TCPPeerToPeer) { +TEST_F(PeerConnectionRampUpTest, Bwe_After_TCPPeerToPeer) { firewall_socket_server()->set_udp_sockets_enabled(false); ASSERT_TRUE(CreatePeerConnectionWrappers( PeerConnectionInterface::RTCConfiguration(), diff --git a/pc/peer_connection_rtp_unittest.cc b/pc/peer_connection_rtp_unittest.cc index 9e4a816a45..2822854a2d 100644 --- a/pc/peer_connection_rtp_unittest.cc +++ b/pc/peer_connection_rtp_unittest.cc @@ -164,6 +164,28 @@ class PeerConnectionRtpTestUnifiedPlan : public PeerConnectionRtpBaseTest { protected: PeerConnectionRtpTestUnifiedPlan() : PeerConnectionRtpBaseTest(SdpSemantics::kUnifiedPlan) {} + + // Helper to emulate an SFU that rejects an offered media section + // in answer. + bool ExchangeOfferAnswerWhereRemoteStopsTransceiver( + PeerConnectionWrapper* caller, + PeerConnectionWrapper* callee, + size_t mid_to_stop) { + auto offer = caller->CreateOffer(); + caller->SetLocalDescription(CloneSessionDescription(offer.get())); + callee->SetRemoteDescription(std::move(offer)); + EXPECT_LT(mid_to_stop, callee->pc()->GetTransceivers().size()); + // Must use StopInternal in order to do instant reject. + callee->pc()->GetTransceivers()[mid_to_stop]->StopInternal(); + auto answer = callee->CreateAnswer(); + EXPECT_TRUE(answer); + bool set_local_answer = + callee->SetLocalDescription(CloneSessionDescription(answer.get())); + EXPECT_TRUE(set_local_answer); + bool set_remote_answer = caller->SetRemoteDescription(std::move(answer)); + EXPECT_TRUE(set_remote_answer); + return set_remote_answer; + } }; // These tests cover |webrtc::PeerConnectionObserver| callbacks firing upon @@ -370,19 +392,25 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, SetDirectionCallsOnTrack) { auto callee = CreatePeerConnection(); auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - transceiver->SetDirection(RtpTransceiverDirection::kInactive); + EXPECT_TRUE( + transceiver->SetDirectionWithError(RtpTransceiverDirection::kInactive) + .ok()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); EXPECT_EQ(0u, caller->observer()->on_track_transceivers_.size()); EXPECT_EQ(0u, callee->observer()->on_track_transceivers_.size()); - transceiver->SetDirection(RtpTransceiverDirection::kSendOnly); + EXPECT_TRUE( + transceiver->SetDirectionWithError(RtpTransceiverDirection::kSendOnly) + .ok()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); EXPECT_EQ(0u, caller->observer()->on_track_transceivers_.size()); EXPECT_EQ(1u, callee->observer()->on_track_transceivers_.size()); // If the direction changes but it is still receiving on the remote side, then // OnTrack should not be fired again. - transceiver->SetDirection(RtpTransceiverDirection::kSendRecv); + EXPECT_TRUE( + transceiver->SetDirectionWithError(RtpTransceiverDirection::kSendRecv) + .ok()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); EXPECT_EQ(0u, caller->observer()->on_track_transceivers_.size()); EXPECT_EQ(1u, callee->observer()->on_track_transceivers_.size()); @@ -401,8 +429,10 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, SetDirectionHoldCallsOnTrackTwice) { EXPECT_EQ(1u, callee->observer()->on_track_transceivers_.size()); // Put the call on hold by no longer receiving the track. - callee->pc()->GetTransceivers()[0]->SetDirection( - RtpTransceiverDirection::kInactive); + EXPECT_TRUE(callee->pc() + ->GetTransceivers()[0] + ->SetDirectionWithError(RtpTransceiverDirection::kInactive) + .ok()); ASSERT_TRUE(callee->ExchangeOfferAnswerWith(caller.get())); EXPECT_EQ(0u, caller->observer()->on_track_transceivers_.size()); @@ -410,8 +440,10 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, SetDirectionHoldCallsOnTrackTwice) { // Resume the call by changing the direction to recvonly. This should call // OnTrack again on the callee side. - callee->pc()->GetTransceivers()[0]->SetDirection( - RtpTransceiverDirection::kRecvOnly); + EXPECT_TRUE(callee->pc() + ->GetTransceivers()[0] + ->SetDirectionWithError(RtpTransceiverDirection::kRecvOnly) + .ok()); ASSERT_TRUE(callee->ExchangeOfferAnswerWith(caller.get())); EXPECT_EQ(0u, caller->observer()->on_track_transceivers_.size()); @@ -470,7 +502,9 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, EXPECT_EQ(0u, callee->observer()->remove_track_events_.size()); auto callee_transceiver = callee->pc()->GetTransceivers()[0]; - callee_transceiver->SetDirection(RtpTransceiverDirection::kSendOnly); + EXPECT_TRUE(callee_transceiver + ->SetDirectionWithError(RtpTransceiverDirection::kSendOnly) + .ok()); ASSERT_TRUE(callee->SetLocalDescription(callee->CreateAnswer())); EXPECT_EQ(1u, callee->observer()->add_track_events_.size()); @@ -745,6 +779,56 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, UnsignaledSsrcCreatesReceiverStreams) { EXPECT_EQ(receivers[0]->streams()[0]->id(), kStreamId1); EXPECT_EQ(receivers[0]->streams()[1]->id(), kStreamId2); } +TEST_F(PeerConnectionRtpTestUnifiedPlan, TracksDoNotEndWhenSsrcChanges) { + constexpr uint32_t kFirstMungedSsrc = 1337u; + + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + + // Caller offers to receive audio and video. + RtpTransceiverInit init; + init.direction = RtpTransceiverDirection::kRecvOnly; + caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO, init); + caller->AddTransceiver(cricket::MEDIA_TYPE_VIDEO, init); + + // Callee wants to send audio and video tracks. + callee->AddTrack(callee->CreateAudioTrack("audio_track"), {}); + callee->AddTrack(callee->CreateVideoTrack("video_track"), {}); + + // Do inittial offer/answer exchange. + ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); + ASSERT_TRUE( + caller->SetRemoteDescription(callee->CreateAnswerAndSetAsLocal())); + ASSERT_EQ(caller->observer()->add_track_events_.size(), 2u); + ASSERT_EQ(caller->pc()->GetReceivers().size(), 2u); + + // Do a follow-up offer/answer exchange where the SSRCs are modified. + ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); + auto answer = callee->CreateAnswer(); + auto& contents = answer->description()->contents(); + ASSERT_TRUE(!contents.empty()); + for (size_t i = 0; i < contents.size(); ++i) { + auto& mutable_streams = contents[i].media_description()->mutable_streams(); + ASSERT_EQ(mutable_streams.size(), 1u); + mutable_streams[0].ssrcs = {kFirstMungedSsrc + static_cast(i)}; + } + ASSERT_TRUE( + callee->SetLocalDescription(CloneSessionDescription(answer.get()))); + ASSERT_TRUE( + caller->SetRemoteDescription(CloneSessionDescription(answer.get()))); + + // No furher track events should fire because we never changed direction, only + // SSRCs. + ASSERT_EQ(caller->observer()->add_track_events_.size(), 2u); + // We should have the same number of receivers as before. + auto receivers = caller->pc()->GetReceivers(); + ASSERT_EQ(receivers.size(), 2u); + // The tracks are still alive. + EXPECT_EQ(receivers[0]->track()->state(), + MediaStreamTrackInterface::TrackState::kLive); + EXPECT_EQ(receivers[1]->track()->state(), + MediaStreamTrackInterface::TrackState::kLive); +} // Tests that with Unified Plan if the the stream id changes for a track when // when setting a new remote description, that the media stream is updated @@ -835,7 +919,7 @@ TEST_P(PeerConnectionRtpTest, auto callee = CreatePeerConnection(); rtc::scoped_refptr observer = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); auto offer = caller->CreateOfferAndSetAsLocal(); callee->pc()->SetRemoteDescription(observer, offer.release()); @@ -1133,12 +1217,15 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, RtpTransceiverInit init; init.direction = RtpTransceiverDirection::kInactive; auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO, init); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); ASSERT_TRUE(caller->AddAudioTrack("a")); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); EXPECT_EQ(RtpTransceiverDirection::kSendOnly, transceiver->direction()); } @@ -1153,12 +1240,15 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, RtpTransceiverInit init; init.direction = RtpTransceiverDirection::kRecvOnly; auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO, init); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); ASSERT_TRUE(caller->AddAudioTrack("a")); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); EXPECT_EQ(RtpTransceiverDirection::kSendRecv, transceiver->direction()); } @@ -1182,10 +1272,12 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, AddTrackErrorIfClosed) { auto audio_track = caller->CreateAudioTrack("a"); caller->pc()->Close(); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); auto result = caller->pc()->AddTrack(audio_track, std::vector()); EXPECT_EQ(RTCErrorType::INVALID_STATE, result.error().type()); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionRtpTestUnifiedPlan, AddTrackErrorIfTrackAlreadyHasSender) { @@ -1194,10 +1286,12 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, AddTrackErrorIfTrackAlreadyHasSender) { auto audio_track = caller->CreateAudioTrack("a"); ASSERT_TRUE(caller->AddTrack(audio_track)); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); auto result = caller->pc()->AddTrack(audio_track, std::vector()); EXPECT_EQ(RTCErrorType::INVALID_PARAMETER, result.error().type()); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } // Unified Plan RemoveTrack tests. @@ -1224,13 +1318,16 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, init.direction = RtpTransceiverDirection::kSendRecv; auto transceiver = caller->AddTransceiver(caller->CreateAudioTrack("a"), init); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); ASSERT_TRUE(caller->pc()->RemoveTrack(transceiver->sender())); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); EXPECT_EQ(RtpTransceiverDirection::kRecvOnly, transceiver->direction()); } @@ -1246,13 +1343,16 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, init.direction = RtpTransceiverDirection::kSendOnly; auto transceiver = caller->AddTransceiver(caller->CreateAudioTrack("a"), init); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); ASSERT_TRUE(caller->pc()->RemoveTrack(transceiver->sender())); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); EXPECT_EQ(RtpTransceiverDirection::kInactive, transceiver->direction()); } @@ -1266,9 +1366,11 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, RemoveTrackWithNullSenderTrackIsNoOp) { auto transceiver = caller->pc()->GetTransceivers()[0]; ASSERT_TRUE(sender->SetTrack(nullptr)); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); ASSERT_TRUE(caller->pc()->RemoveTrack(sender)); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); EXPECT_EQ(RtpTransceiverDirection::kSendRecv, transceiver->direction()); } @@ -1281,9 +1383,11 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, RemoveTrackErrorIfClosed) { auto sender = caller->AddAudioTrack("a"); caller->pc()->Close(); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); EXPECT_FALSE(caller->pc()->RemoveTrack(sender)); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } TEST_F(PeerConnectionRtpTestUnifiedPlan, @@ -1293,9 +1397,11 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, auto sender = caller->AddAudioTrack("a"); ASSERT_TRUE(caller->pc()->RemoveTrack(sender)); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); EXPECT_TRUE(caller->pc()->RemoveTrack(sender)); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } // Test that setting offers that add/remove/add a track repeatedly without @@ -1401,16 +1507,20 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, RenegotiationNeededAfterTransceiverSetDirection) { auto caller = CreatePeerConnection(); auto callee = CreatePeerConnection(); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); - transceiver->SetDirection(RtpTransceiverDirection::kInactive); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + transceiver->SetDirectionWithError(RtpTransceiverDirection::kInactive); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); } // Test that OnRenegotiationNeeded is not fired if SetDirection is called on an @@ -1421,9 +1531,11 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - caller->observer()->clear_negotiation_needed(); - transceiver->SetDirection(transceiver->direction()); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); + transceiver->SetDirectionWithError(transceiver->direction()); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); } // Test that OnRenegotiationNeeded is not fired if SetDirection is called on a @@ -1433,11 +1545,140 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, auto caller = CreatePeerConnection(); auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); - transceiver->Stop(); + transceiver->StopInternal(); + + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); + transceiver->SetDirectionWithError(RtpTransceiverDirection::kInactive); + EXPECT_FALSE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); +} + +// Test that currentDirection returnes "stopped" if the transceiver was stopped. +TEST_F(PeerConnectionRtpTestUnifiedPlan, + CheckStoppedCurrentDirectionOnStoppedTransceiver) { + auto caller = CreatePeerConnection(); + + auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + transceiver->StopInternal(); - caller->observer()->clear_negotiation_needed(); - transceiver->SetDirection(RtpTransceiverDirection::kInactive); - EXPECT_FALSE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(transceiver->stopping()); + EXPECT_TRUE(transceiver->stopped()); + EXPECT_EQ(RtpTransceiverDirection::kStopped, + transceiver->current_direction()); +} + +// Test that InvalidState is thrown on a stopping transceiver. +TEST_F(PeerConnectionRtpTestUnifiedPlan, + CheckForInvalidStateOnStoppingTransceiver) { + auto caller = CreatePeerConnection(); + + auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + transceiver->StopStandard(); + + EXPECT_TRUE(transceiver->stopping()); + EXPECT_FALSE(transceiver->stopped()); + EXPECT_EQ( + RTCErrorType::INVALID_STATE, + transceiver->SetDirectionWithError(RtpTransceiverDirection::kInactive) + .type()); +} + +// Test that InvalidState is thrown on a stopped transceiver. +TEST_F(PeerConnectionRtpTestUnifiedPlan, + CheckForInvalidStateOnStoppedTransceiver) { + auto caller = CreatePeerConnection(); + + auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + transceiver->StopInternal(); + + EXPECT_TRUE(transceiver->stopping()); + EXPECT_TRUE(transceiver->stopped()); + EXPECT_EQ( + RTCErrorType::INVALID_STATE, + transceiver->SetDirectionWithError(RtpTransceiverDirection::kInactive) + .type()); +} + +// Test that TypeError is thrown if the direction is set to "stopped". +TEST_F(PeerConnectionRtpTestUnifiedPlan, + CheckForTypeErrorForStoppedOnTransceiver) { + auto caller = CreatePeerConnection(); + + auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + EXPECT_EQ( + RTCErrorType::INVALID_PARAMETER, + transceiver->SetDirectionWithError(RtpTransceiverDirection::kStopped) + .type()); +} + +// Test that you can do createOffer/setLocalDescription with a stopped +// media section. +TEST_F(PeerConnectionRtpTestUnifiedPlan, + SetLocalDescriptionWithStoppedMediaSection) { + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + callee->pc()->GetTransceivers()[0]->StopStandard(); + ASSERT_TRUE(callee->ExchangeOfferAnswerWith(caller.get())); + EXPECT_EQ(RtpTransceiverDirection::kStopped, + transceiver->current_direction()); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); +} + +TEST_F(PeerConnectionRtpTestUnifiedPlan, + StopAndNegotiateCausesTransceiverToDisappear) { + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + auto transceiver = caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + callee->pc()->GetTransceivers()[0]->StopStandard(); + ASSERT_TRUE(callee->ExchangeOfferAnswerWith(caller.get())); + EXPECT_EQ(RtpTransceiverDirection::kStopped, + transceiver->current_direction()); + EXPECT_EQ(0U, caller->pc()->GetTransceivers().size()); + EXPECT_EQ(0U, callee->pc()->GetTransceivers().size()); + EXPECT_EQ(0U, caller->pc()->GetSenders().size()); + EXPECT_EQ(0U, callee->pc()->GetSenders().size()); + EXPECT_EQ(0U, caller->pc()->GetReceivers().size()); + EXPECT_EQ(0U, callee->pc()->GetReceivers().size()); +} + +TEST_F(PeerConnectionRtpTestUnifiedPlan, + SetLocalDescriptionWorksAfterRepeatedAddRemove) { + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + auto video_track = caller->CreateVideoTrack("v"); + auto track = caller->CreateAudioTrack("a"); + caller->AddTransceiver(video_track); + auto transceiver = caller->AddTransceiver(track); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + caller->pc()->RemoveTrack(transceiver->sender()); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + caller->AddTrack(track); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + caller->pc()->RemoveTrack(transceiver->sender()); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); +} + +// This is a repro of Chromium bug https://crbug.com/1134686 +TEST_F(PeerConnectionRtpTestUnifiedPlan, + SetLocalDescriptionWorksAfterRepeatedAddRemoveWithRemoteReject) { + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + auto video_track = caller->CreateVideoTrack("v"); + auto track = caller->CreateAudioTrack("a"); + caller->AddTransceiver(video_track); + auto transceiver = caller->AddTransceiver(track); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + caller->pc()->RemoveTrack(transceiver->sender()); + ExchangeOfferAnswerWhereRemoteStopsTransceiver(caller.get(), callee.get(), 1); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + caller->AddTrack(track); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); + caller->pc()->RemoveTrack(transceiver->sender()); + ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); } // Test that AddTransceiver fails if trying to use unimplemented RTP encoding @@ -1653,7 +1894,7 @@ TEST_F(PeerConnectionMsidSignalingTest, PureUnifiedPlanToUs) { class SdpFormatReceivedTest : public PeerConnectionRtpTestUnifiedPlan {}; -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP TEST_F(SdpFormatReceivedTest, DataChannelOnlyIsReportedAsNoTracks) { auto caller = CreatePeerConnectionWithUnifiedPlan(); caller->CreateDataChannel("dc"); @@ -1665,7 +1906,7 @@ TEST_F(SdpFormatReceivedTest, DataChannelOnlyIsReportedAsNoTracks) { metrics::Samples("WebRTC.PeerConnection.SdpFormatReceived"), ElementsAre(Pair(kSdpFormatReceivedNoTracks, 1))); } -#endif // HAVE_SCTP +#endif // WEBRTC_HAVE_SCTP TEST_F(SdpFormatReceivedTest, SimpleUnifiedPlanIsReportedAsSimple) { auto caller = CreatePeerConnectionWithUnifiedPlan(); @@ -1722,6 +1963,19 @@ TEST_F(SdpFormatReceivedTest, ComplexPlanBIsReportedAsComplexPlanB) { ElementsAre(Pair(kSdpFormatReceivedComplexPlanB, 1))); } +TEST_F(SdpFormatReceivedTest, AnswerIsReported) { + auto caller = CreatePeerConnectionWithPlanB(); + caller->AddAudioTrack("audio"); + caller->AddVideoTrack("video"); + auto callee = CreatePeerConnectionWithUnifiedPlan(); + + ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); + ASSERT_TRUE(caller->SetRemoteDescription(callee->CreateAnswer())); + EXPECT_METRIC_THAT( + metrics::Samples("WebRTC.PeerConnection.SdpFormatReceivedAnswer"), + ElementsAre(Pair(kSdpFormatReceivedSimple, 1))); +} + // Sender setups in a call. TEST_P(PeerConnectionRtpTest, CreateTwoSendersWithSameTrack) { @@ -1759,13 +2013,16 @@ TEST_F(PeerConnectionRtpTestUnifiedPlan, init.direction = RtpTransceiverDirection::kSendRecv; auto transceiver = caller->AddTransceiver(caller->CreateAudioTrack("a"), init); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(caller->ExchangeOfferAnswerWith(callee.get())); - caller->observer()->clear_negotiation_needed(); + caller->observer()->clear_legacy_renegotiation_needed(); + caller->observer()->clear_latest_negotiation_needed_event(); transceiver->sender()->SetStreams({"stream3", "stream4", "stream5"}); - EXPECT_TRUE(caller->observer()->negotiation_needed()); + EXPECT_TRUE(caller->observer()->legacy_renegotiation_needed()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); ASSERT_TRUE(callee->SetRemoteDescription(caller->CreateOfferAndSetAsLocal())); auto callee_streams = callee->pc()->GetReceivers()[0]->streams(); diff --git a/pc/peer_connection_signaling_unittest.cc b/pc/peer_connection_signaling_unittest.cc index 30b11ceaa7..1c94570ec7 100644 --- a/pc/peer_connection_signaling_unittest.cc +++ b/pc/peer_connection_signaling_unittest.cc @@ -11,18 +11,21 @@ // This file contains tests that check the PeerConnection's signaling state // machine, as well as tests that check basic, media-agnostic aspects of SDP. +#include #include #include #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "api/create_peerconnection_factory.h" -#include "api/peer_connection_proxy.h" +#include "api/jsep_session_description.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "pc/peer_connection.h" +#include "pc/peer_connection_proxy.h" #include "pc/peer_connection_wrapper.h" #include "pc/sdp_utils.h" +#include "pc/webrtc_sdp.h" #ifdef WEBRTC_ANDROID #include "pc/test/android_test_initializer.h" #endif @@ -535,8 +538,7 @@ TEST_P(PeerConnectionSignalingTest, CreateOffersAndShutdown) { rtc::scoped_refptr observers[100]; for (auto& observer : observers) { - observer = - new rtc::RefCountedObject(); + observer = rtc::make_ref_counted(); caller->pc()->CreateOffer(observer, options); } @@ -557,38 +559,109 @@ TEST_P(PeerConnectionSignalingTest, CreateOffersAndShutdown) { // the WebRtcSessionDescriptionFactory is responsible for it. TEST_P(PeerConnectionSignalingTest, CloseCreateOfferAndShutdown) { auto caller = CreatePeerConnection(); - rtc::scoped_refptr observer = - new rtc::RefCountedObject(); + auto observer = rtc::make_ref_counted(); caller->pc()->Close(); caller->pc()->CreateOffer(observer, RTCOfferAnswerOptions()); caller.reset(nullptr); EXPECT_TRUE(observer->called()); } -TEST_P(PeerConnectionSignalingTest, ImplicitCreateOfferAndShutdown) { +TEST_P(PeerConnectionSignalingTest, + ImplicitCreateOfferAndShutdownWithOldObserver) { auto caller = CreatePeerConnection(); auto observer = MockSetSessionDescriptionObserver::Create(); + caller->pc()->SetLocalDescription(observer.get()); + caller.reset(nullptr); + // The old observer does not get invoked because posted messages are lost. + EXPECT_FALSE(observer->called()); +} + +TEST_P(PeerConnectionSignalingTest, ImplicitCreateOfferAndShutdown) { + auto caller = CreatePeerConnection(); + rtc::scoped_refptr observer( + new FakeSetLocalDescriptionObserver()); caller->pc()->SetLocalDescription(observer); caller.reset(nullptr); + // The new observer gets invoked because it is called immediately. + EXPECT_TRUE(observer->called()); + EXPECT_FALSE(observer->error().ok()); +} + +TEST_P(PeerConnectionSignalingTest, + CloseBeforeImplicitCreateOfferAndShutdownWithOldObserver) { + auto caller = CreatePeerConnection(); + auto observer = MockSetSessionDescriptionObserver::Create(); + caller->pc()->Close(); + caller->pc()->SetLocalDescription(observer.get()); + caller.reset(nullptr); + // The old observer does not get invoked because posted messages are lost. EXPECT_FALSE(observer->called()); } TEST_P(PeerConnectionSignalingTest, CloseBeforeImplicitCreateOfferAndShutdown) { auto caller = CreatePeerConnection(); - auto observer = MockSetSessionDescriptionObserver::Create(); + rtc::scoped_refptr observer( + new FakeSetLocalDescriptionObserver()); caller->pc()->Close(); caller->pc()->SetLocalDescription(observer); caller.reset(nullptr); + // The new observer gets invoked because it is called immediately. + EXPECT_TRUE(observer->called()); + EXPECT_FALSE(observer->error().ok()); +} + +TEST_P(PeerConnectionSignalingTest, + CloseAfterImplicitCreateOfferAndShutdownWithOldObserver) { + auto caller = CreatePeerConnection(); + auto observer = MockSetSessionDescriptionObserver::Create(); + caller->pc()->SetLocalDescription(observer.get()); + caller->pc()->Close(); + caller.reset(nullptr); + // The old observer does not get invoked because posted messages are lost. EXPECT_FALSE(observer->called()); } TEST_P(PeerConnectionSignalingTest, CloseAfterImplicitCreateOfferAndShutdown) { auto caller = CreatePeerConnection(); - auto observer = MockSetSessionDescriptionObserver::Create(); + rtc::scoped_refptr observer( + new FakeSetLocalDescriptionObserver()); caller->pc()->SetLocalDescription(observer); caller->pc()->Close(); caller.reset(nullptr); + // The new observer gets invoked because it is called immediately. + EXPECT_TRUE(observer->called()); + EXPECT_FALSE(observer->error().ok()); +} + +TEST_P(PeerConnectionSignalingTest, + SetLocalDescriptionNewObserverIsInvokedImmediately) { + auto caller = CreatePeerConnection(); + auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); + + rtc::scoped_refptr observer( + new FakeSetLocalDescriptionObserver()); + caller->pc()->SetLocalDescription(std::move(offer), observer); + // The new observer is invoked immediately. + EXPECT_TRUE(observer->called()); + EXPECT_TRUE(observer->error().ok()); +} + +TEST_P(PeerConnectionSignalingTest, + SetLocalDescriptionOldObserverIsInvokedInAPostedMessage) { + auto caller = CreatePeerConnection(); + auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); + + auto observer = MockSetSessionDescriptionObserver::Create(); + caller->pc()->SetLocalDescription(observer, offer.release()); + // The old observer is not invoked immediately. EXPECT_FALSE(observer->called()); + // Process all currently pending messages by waiting for a posted task to run. + bool checkpoint_reached = false; + rtc::Thread::Current()->PostTask( + RTC_FROM_HERE, [&checkpoint_reached] { checkpoint_reached = true; }); + EXPECT_TRUE_WAIT(checkpoint_reached, kWaitTimeout); + // If resolving the observer was pending, it must now have been called. + EXPECT_TRUE(observer->called()); } TEST_P(PeerConnectionSignalingTest, SetRemoteDescriptionExecutesImmediately) { @@ -601,7 +674,7 @@ TEST_P(PeerConnectionSignalingTest, SetRemoteDescriptionExecutesImmediately) { // By not waiting for the observer's callback we can verify that the operation // executed immediately. callee->pc()->SetRemoteDescription(std::move(offer), - new MockSetRemoteDescriptionObserver()); + new FakeSetRemoteDescriptionObserver()); EXPECT_EQ(2u, callee->pc()->GetReceivers().size()); } @@ -613,14 +686,14 @@ TEST_P(PeerConnectionSignalingTest, CreateOfferBlocksSetRemoteDescription) { auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); EXPECT_EQ(0u, callee->pc()->GetReceivers().size()); - rtc::scoped_refptr offer_observer( - new rtc::RefCountedObject()); + auto offer_observer = + rtc::make_ref_counted(); // Synchronously invoke CreateOffer() and SetRemoteDescription(). The // SetRemoteDescription() operation should be chained to be executed // asynchronously, when CreateOffer() completes. callee->pc()->CreateOffer(offer_observer, RTCOfferAnswerOptions()); callee->pc()->SetRemoteDescription(std::move(offer), - new MockSetRemoteDescriptionObserver()); + new FakeSetRemoteDescriptionObserver()); // CreateOffer() is asynchronous; without message processing this operation // should not have completed. EXPECT_FALSE(offer_observer->called()); @@ -639,7 +712,7 @@ TEST_P(PeerConnectionSignalingTest, auto caller = CreatePeerConnectionWithAudioVideo(); auto observer = MockSetSessionDescriptionObserver::Create(); - caller->pc()->SetLocalDescription(observer); + caller->pc()->SetLocalDescription(observer.get()); // The offer is created asynchronously; message processing is needed for it to // complete. @@ -665,7 +738,7 @@ TEST_P(PeerConnectionSignalingTest, EXPECT_EQ(PeerConnection::kHaveRemoteOffer, callee->signaling_state()); auto observer = MockSetSessionDescriptionObserver::Create(); - callee->pc()->SetLocalDescription(observer); + callee->pc()->SetLocalDescription(observer.get()); // The answer is created asynchronously; message processing is needed for it // to complete. @@ -687,28 +760,27 @@ TEST_P(PeerConnectionSignalingTest, auto callee = CreatePeerConnectionWithAudioVideo(); // SetLocalDescription(), implicitly creating an offer. - rtc::scoped_refptr - caller_set_local_description_observer( - new rtc::RefCountedObject()); - caller->pc()->SetLocalDescription(caller_set_local_description_observer); + auto caller_set_local_description_observer = + MockSetSessionDescriptionObserver::Create(); + caller->pc()->SetLocalDescription( + caller_set_local_description_observer.get()); EXPECT_TRUE_WAIT(caller_set_local_description_observer->called(), kWaitTimeout); ASSERT_TRUE(caller->pc()->pending_local_description()); // SetRemoteDescription(offer) - rtc::scoped_refptr - callee_set_remote_description_observer( - new rtc::RefCountedObject()); + auto callee_set_remote_description_observer = + MockSetSessionDescriptionObserver::Create(); callee->pc()->SetRemoteDescription( - callee_set_remote_description_observer.get(), + callee_set_remote_description_observer, CloneSessionDescription(caller->pc()->pending_local_description()) .release()); // SetLocalDescription(), implicitly creating an answer. - rtc::scoped_refptr - callee_set_local_description_observer( - new rtc::RefCountedObject()); - callee->pc()->SetLocalDescription(callee_set_local_description_observer); + auto callee_set_local_description_observer = + MockSetSessionDescriptionObserver::Create(); + callee->pc()->SetLocalDescription( + callee_set_local_description_observer.get()); EXPECT_TRUE_WAIT(callee_set_local_description_observer->called(), kWaitTimeout); // Chaining guarantees SetRemoteDescription() happened before @@ -717,9 +789,8 @@ TEST_P(PeerConnectionSignalingTest, EXPECT_TRUE(callee->pc()->current_local_description()); // SetRemoteDescription(answer) - rtc::scoped_refptr - caller_set_remote_description_observer( - new rtc::RefCountedObject()); + auto caller_set_remote_description_observer = + MockSetSessionDescriptionObserver::Create(); caller->pc()->SetRemoteDescription( caller_set_remote_description_observer, CloneSessionDescription(callee->pc()->current_local_description()) @@ -737,7 +808,7 @@ TEST_P(PeerConnectionSignalingTest, auto observer = MockSetSessionDescriptionObserver::Create(); caller->pc()->Close(); - caller->pc()->SetLocalDescription(observer); + caller->pc()->SetLocalDescription(observer.get()); // The operation should fail asynchronously. EXPECT_FALSE(observer->called()); @@ -756,7 +827,7 @@ TEST_P(PeerConnectionSignalingTest, auto caller = CreatePeerConnectionWithAudioVideo(); auto observer = MockSetSessionDescriptionObserver::Create(); - caller->pc()->SetLocalDescription(observer); + caller->pc()->SetLocalDescription(observer.get()); caller->pc()->Close(); // The operation should fail asynchronously. @@ -771,6 +842,195 @@ TEST_P(PeerConnectionSignalingTest, observer->error()); } +TEST_P(PeerConnectionSignalingTest, UnsupportedContentType) { + auto caller = CreatePeerConnection(); + + // Call setRemoteDescription with a m= line we don't understand. + std::string sdp = + "v=0\r\n" + "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" + "s=-\r\n" + "t=0 0\r\n" + "m=bogus 9 FOO 0 8\r\n" + "c=IN IP4 0.0.0.0\r\n" + "a=mid:bogusmid\r\n"; + std::unique_ptr remote_description = + webrtc::CreateSessionDescription(SdpType::kOffer, sdp, nullptr); + + EXPECT_TRUE(caller->SetRemoteDescription(std::move(remote_description))); + + // Assert we respond back with something meaningful. + auto answer = caller->CreateAnswer(); + ASSERT_EQ(answer->description()->contents().size(), 1u); + EXPECT_NE(answer->description() + ->contents()[0] + .media_description() + ->as_unsupported(), + nullptr); + EXPECT_EQ(answer->description() + ->contents()[0] + .media_description() + ->as_unsupported() + ->media_type(), + "bogus"); + EXPECT_TRUE(answer->description()->contents()[0].rejected); + EXPECT_EQ(answer->description()->contents()[0].mid(), "bogusmid"); + EXPECT_EQ( + answer->description()->contents()[0].media_description()->protocol(), + "FOO"); + EXPECT_FALSE( + answer->description()->contents()[0].media_description()->has_codecs()); + + EXPECT_TRUE(caller->SetLocalDescription(std::move(answer))); + + // Assert we keep this in susequent offers. + auto offer = caller->CreateOffer(); + EXPECT_EQ(offer->description() + ->contents()[0] + .media_description() + ->as_unsupported() + ->media_type(), + "bogus"); + EXPECT_TRUE(offer->description()->contents()[0].rejected); + EXPECT_EQ(offer->description()->contents()[0].media_description()->protocol(), + "FOO"); + EXPECT_EQ(offer->description()->contents()[0].mid(), "bogusmid"); + EXPECT_FALSE( + offer->description()->contents()[0].media_description()->has_codecs()); + EXPECT_TRUE(caller->SetLocalDescription(std::move(offer))); +} + +TEST_P(PeerConnectionSignalingTest, ReceiveFlexFec) { + auto caller = CreatePeerConnection(); + + std::string sdp = + "v=0\r\n" + "o=- 8403615332048243445 2 IN IP4 127.0.0.1\r\n" + "s=-\r\n" + "t=0 0\r\n" + "a=group:BUNDLE 0\r\n" + "m=video 9 UDP/TLS/RTP/SAVPF 102 122\r\n" + "c=IN IP4 0.0.0.0\r\n" + "a=rtcp:9 IN IP4 0.0.0.0\r\n" + "a=ice-ufrag:IZeV\r\n" + "a=ice-pwd:uaZhQD4rYM/Tta2qWBT1Bbt4\r\n" + "a=ice-options:trickle\r\n" + "a=fingerprint:sha-256 " + "D8:6C:3D:FA:23:E2:2C:63:11:2D:D0:86:BE:C4:D0:65:F9:42:F7:1C:06:04:27:E6:" + "1C:2C:74:01:8D:50:67:23\r\n" + "a=setup:actpass\r\n" + "a=mid:0\r\n" + "a=sendrecv\r\n" + "a=msid:stream track\r\n" + "a=rtcp-mux\r\n" + "a=rtcp-rsize\r\n" + "a=rtpmap:102 VP8/90000\r\n" + "a=rtcp-fb:102 goog-remb\r\n" + "a=rtcp-fb:102 transport-cc\r\n" + "a=rtcp-fb:102 ccm fir\r\n" + "a=rtcp-fb:102 nack\r\n" + "a=rtcp-fb:102 nack pli\r\n" + "a=rtpmap:122 flexfec-03/90000\r\n" + "a=fmtp:122 repair-window=10000000\r\n" + "a=ssrc-group:FEC-FR 1224551896 1953032773\r\n" + "a=ssrc:1224551896 cname:/exJcmhSLpyu9FgV\r\n" + "a=ssrc:1953032773 cname:/exJcmhSLpyu9FgV\r\n"; + std::unique_ptr remote_description = + webrtc::CreateSessionDescription(SdpType::kOffer, sdp, nullptr); + + EXPECT_TRUE(caller->SetRemoteDescription(std::move(remote_description))); + + auto answer = caller->CreateAnswer(); + ASSERT_EQ(answer->description()->contents().size(), 1u); + ASSERT_NE( + answer->description()->contents()[0].media_description()->as_video(), + nullptr); + auto codecs = answer->description() + ->contents()[0] + .media_description() + ->as_video() + ->codecs(); + ASSERT_EQ(codecs.size(), 2u); + EXPECT_EQ(codecs[1].name, "flexfec-03"); + + EXPECT_TRUE(caller->SetLocalDescription(std::move(answer))); +} + +TEST_P(PeerConnectionSignalingTest, ReceiveFlexFecReoffer) { + auto caller = CreatePeerConnection(); + + std::string sdp = + "v=0\r\n" + "o=- 8403615332048243445 2 IN IP4 127.0.0.1\r\n" + "s=-\r\n" + "t=0 0\r\n" + "a=group:BUNDLE 0\r\n" + "m=video 9 UDP/TLS/RTP/SAVPF 102 35\r\n" + "c=IN IP4 0.0.0.0\r\n" + "a=rtcp:9 IN IP4 0.0.0.0\r\n" + "a=ice-ufrag:IZeV\r\n" + "a=ice-pwd:uaZhQD4rYM/Tta2qWBT1Bbt4\r\n" + "a=ice-options:trickle\r\n" + "a=fingerprint:sha-256 " + "D8:6C:3D:FA:23:E2:2C:63:11:2D:D0:86:BE:C4:D0:65:F9:42:F7:1C:06:04:27:E6:" + "1C:2C:74:01:8D:50:67:23\r\n" + "a=setup:actpass\r\n" + "a=mid:0\r\n" + "a=sendrecv\r\n" + "a=msid:stream track\r\n" + "a=rtcp-mux\r\n" + "a=rtcp-rsize\r\n" + "a=rtpmap:102 VP8/90000\r\n" + "a=rtcp-fb:102 goog-remb\r\n" + "a=rtcp-fb:102 transport-cc\r\n" + "a=rtcp-fb:102 ccm fir\r\n" + "a=rtcp-fb:102 nack\r\n" + "a=rtcp-fb:102 nack pli\r\n" + "a=rtpmap:35 flexfec-03/90000\r\n" + "a=fmtp:35 repair-window=10000000\r\n" + "a=ssrc-group:FEC-FR 1224551896 1953032773\r\n" + "a=ssrc:1224551896 cname:/exJcmhSLpyu9FgV\r\n" + "a=ssrc:1953032773 cname:/exJcmhSLpyu9FgV\r\n"; + std::unique_ptr remote_description = + webrtc::CreateSessionDescription(SdpType::kOffer, sdp, nullptr); + + EXPECT_TRUE(caller->SetRemoteDescription(std::move(remote_description))); + + auto answer = caller->CreateAnswer(); + ASSERT_EQ(answer->description()->contents().size(), 1u); + ASSERT_NE( + answer->description()->contents()[0].media_description()->as_video(), + nullptr); + auto codecs = answer->description() + ->contents()[0] + .media_description() + ->as_video() + ->codecs(); + ASSERT_EQ(codecs.size(), 2u); + EXPECT_EQ(codecs[1].name, "flexfec-03"); + EXPECT_EQ(codecs[1].id, 35); + + EXPECT_TRUE(caller->SetLocalDescription(std::move(answer))); + + // This generates a collision for AV1 which needs to be remapped. + auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); + auto offer_codecs = offer->description() + ->contents()[0] + .media_description() + ->as_video() + ->codecs(); + auto flexfec_it = std::find_if( + offer_codecs.begin(), offer_codecs.end(), + [](const cricket::Codec& codec) { return codec.name == "flexfec-03"; }); + ASSERT_EQ(flexfec_it->id, 35); + auto av1_it = std::find_if( + offer_codecs.begin(), offer_codecs.end(), + [](const cricket::Codec& codec) { return codec.name == "AV1X"; }); + if (av1_it != offer_codecs.end()) { + ASSERT_NE(av1_it->id, 35); + } +} + INSTANTIATE_TEST_SUITE_P(PeerConnectionSignalingTest, PeerConnectionSignalingTest, Values(SdpSemantics::kPlanB, @@ -788,21 +1048,38 @@ class PeerConnectionSignalingUnifiedPlanTest // unique to Unified Plan, but the transceivers used to verify this are only // available in Unified Plan. TEST_F(PeerConnectionSignalingUnifiedPlanTest, - SetLocalDescriptionExecutesImmediately) { + SetLocalDescriptionExecutesImmediatelyUsingOldObserver) { auto caller = CreatePeerConnectionWithAudioVideo(); // This offer will cause transceiver mids to get assigned. auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); // By not waiting for the observer's callback we can verify that the operation - // executed immediately. + // executed immediately. The old observer is invoked in a posted message, so + // waiting for it would not ensure synchronicity. RTC_DCHECK(!caller->pc()->GetTransceivers()[0]->mid().has_value()); caller->pc()->SetLocalDescription( - new rtc::RefCountedObject(), + rtc::make_ref_counted(), offer.release()); EXPECT_TRUE(caller->pc()->GetTransceivers()[0]->mid().has_value()); } +TEST_F(PeerConnectionSignalingUnifiedPlanTest, + SetLocalDescriptionExecutesImmediatelyUsingNewObserver) { + auto caller = CreatePeerConnectionWithAudioVideo(); + + // This offer will cause transceiver mids to get assigned. + auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); + + // Verify that mids were assigned without waiting for the observer. (However, + // the new observer should also be invoked synchronously - as is ensured by + // other tests.) + RTC_DCHECK(!caller->pc()->GetTransceivers()[0]->mid().has_value()); + caller->pc()->SetLocalDescription(std::move(offer), + new FakeSetLocalDescriptionObserver()); + EXPECT_TRUE(caller->pc()->GetTransceivers()[0]->mid().has_value()); +} + TEST_F(PeerConnectionSignalingUnifiedPlanTest, SetLocalDescriptionExecutesImmediatelyInsideCreateOfferCallback) { auto caller = CreatePeerConnectionWithAudioVideo(); @@ -810,9 +1087,8 @@ TEST_F(PeerConnectionSignalingUnifiedPlanTest, // This offer will cause transceiver mids to get assigned. auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); - rtc::scoped_refptr - offer_observer(new rtc::RefCountedObject< - ExecuteFunctionOnCreateSessionDescriptionObserver>( + auto offer_observer = + rtc::make_ref_counted( [pc = caller->pc()](SessionDescriptionInterface* desc) { // By not waiting for the observer's callback we can verify that the // operation executed immediately. @@ -821,7 +1097,7 @@ TEST_F(PeerConnectionSignalingUnifiedPlanTest, new rtc::RefCountedObject(), desc); EXPECT_TRUE(pc->GetTransceivers()[0]->mid().has_value()); - })); + }); caller->pc()->CreateOffer(offer_observer, RTCOfferAnswerOptions()); EXPECT_TRUE_WAIT(offer_observer->was_called(), kWaitTimeout); } @@ -889,4 +1165,67 @@ TEST_F(PeerConnectionSignalingUnifiedPlanTest, ASSERT_EQ(SignalingState::kStable, caller->signaling_state()); } +TEST_F(PeerConnectionSignalingUnifiedPlanTest, + ShouldFireNegotiationNeededWhenNoChangesArePending) { + auto caller = CreatePeerConnection(); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); + auto transceiver = + caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO, RtpTransceiverInit()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); + EXPECT_TRUE(caller->pc()->ShouldFireNegotiationNeededEvent( + caller->observer()->latest_negotiation_needed_event())); +} + +TEST_F(PeerConnectionSignalingUnifiedPlanTest, + SuppressNegotiationNeededWhenOperationChainIsNotEmpty) { + auto caller = CreatePeerConnection(); + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); + auto transceiver = + caller->AddTransceiver(cricket::MEDIA_TYPE_AUDIO, RtpTransceiverInit()); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); + + auto observer = rtc::make_ref_counted(); + caller->pc()->CreateOffer(observer, RTCOfferAnswerOptions()); + // For this test to work, the operation has to be pending, i.e. the observer + // has not yet been invoked. + EXPECT_FALSE(observer->called()); + // Because the Operations Chain is not empty, the event is now suppressed. + EXPECT_FALSE(caller->pc()->ShouldFireNegotiationNeededEvent( + caller->observer()->latest_negotiation_needed_event())); + caller->observer()->clear_latest_negotiation_needed_event(); + + // When the Operations Chain becomes empty again, a new negotiation needed + // event will be generated that is not suppressed. + EXPECT_TRUE_WAIT(observer->called(), kWaitTimeout); + EXPECT_TRUE(caller->observer()->has_negotiation_needed_event()); + EXPECT_TRUE(caller->pc()->ShouldFireNegotiationNeededEvent( + caller->observer()->latest_negotiation_needed_event())); +} + +TEST_F(PeerConnectionSignalingUnifiedPlanTest, + SuppressNegotiationNeededWhenSignalingStateIsNotStable) { + auto caller = CreatePeerConnection(); + auto callee = CreatePeerConnection(); + auto offer = caller->CreateOffer(RTCOfferAnswerOptions()); + + EXPECT_FALSE(caller->observer()->has_negotiation_needed_event()); + auto transceiver = + callee->AddTransceiver(cricket::MEDIA_TYPE_AUDIO, RtpTransceiverInit()); + EXPECT_TRUE(callee->observer()->has_negotiation_needed_event()); + + // Change signaling state (to "have-remote-offer") by setting a remote offer. + callee->SetRemoteDescription(std::move(offer)); + // Because the signaling state is not "stable", the event is now suppressed. + EXPECT_FALSE(callee->pc()->ShouldFireNegotiationNeededEvent( + callee->observer()->latest_negotiation_needed_event())); + callee->observer()->clear_latest_negotiation_needed_event(); + + // Upon rolling back to "stable", a new negotiation needed event will be + // generated that is not suppressed. + callee->SetLocalDescription(CreateSessionDescription(SdpType::kRollback, "")); + EXPECT_TRUE(callee->observer()->has_negotiation_needed_event()); + EXPECT_TRUE(callee->pc()->ShouldFireNegotiationNeededEvent( + callee->observer()->latest_negotiation_needed_event())); +} + } // namespace webrtc diff --git a/pc/peer_connection_simulcast_unittest.cc b/pc/peer_connection_simulcast_unittest.cc index 42bdae17b9..31385754b7 100644 --- a/pc/peer_connection_simulcast_unittest.cc +++ b/pc/peer_connection_simulcast_unittest.cc @@ -157,9 +157,10 @@ class PeerConnectionSimulcastTests : public ::testing::Test { rtc::scoped_refptr AddTransceiver( PeerConnectionWrapper* pc, - const std::vector& layers) { + const std::vector& layers, + cricket::MediaType media_type = cricket::MEDIA_TYPE_VIDEO) { auto init = CreateTransceiverInit(layers); - return pc->AddTransceiver(cricket::MEDIA_TYPE_VIDEO, init); + return pc->AddTransceiver(media_type, init); } SimulcastDescription RemoveSimulcast(SessionDescriptionInterface* sd) { @@ -455,7 +456,7 @@ TEST_F(PeerConnectionSimulcastTests, ServerSendsOfferToReceiveSimulcast) { std::string error; EXPECT_TRUE(remote->SetRemoteDescription(std::move(offer), &error)) << error; auto transceiver = remote->pc()->GetTransceivers()[0]; - transceiver->SetDirection(RtpTransceiverDirection::kSendRecv); + transceiver->SetDirectionWithError(RtpTransceiverDirection::kSendRecv); EXPECT_TRUE(remote->CreateAnswerAndSetAsLocal()); ValidateTransceiverParameters(transceiver, layers); } @@ -478,7 +479,7 @@ TEST_F(PeerConnectionSimulcastTests, TransceiverIsNotRecycledWithSimulcast) { auto transceivers = remote->pc()->GetTransceivers(); ASSERT_EQ(2u, transceivers.size()); auto transceiver = transceivers[1]; - transceiver->SetDirection(RtpTransceiverDirection::kSendRecv); + transceiver->SetDirectionWithError(RtpTransceiverDirection::kSendRecv); EXPECT_TRUE(remote->CreateAnswerAndSetAsLocal()); ValidateTransceiverParameters(transceiver, layers); } @@ -556,6 +557,25 @@ TEST_F(PeerConnectionSimulcastTests, NegotiationDoesNotHaveRidExtension) { ValidateTransceiverParameters(transceiver, expected_layers); } +TEST_F(PeerConnectionSimulcastTests, SimulcastAudioRejected) { + auto local = CreatePeerConnectionWrapper(); + auto remote = CreatePeerConnectionWrapper(); + auto layers = CreateLayers({"1", "2", "3", "4"}, true); + auto transceiver = + AddTransceiver(local.get(), layers, cricket::MEDIA_TYPE_AUDIO); + // Should only have the first layer. + auto parameters = transceiver->sender()->GetParameters(); + EXPECT_EQ(1u, parameters.encodings.size()); + EXPECT_THAT(parameters.encodings, + ElementsAre(Field("rid", &RtpEncodingParameters::rid, Eq("")))); + ExchangeOfferAnswer(local.get(), remote.get(), {}); + // Still have a single layer after negotiation + parameters = transceiver->sender()->GetParameters(); + EXPECT_EQ(1u, parameters.encodings.size()); + EXPECT_THAT(parameters.encodings, + ElementsAre(Field("rid", &RtpEncodingParameters::rid, Eq("")))); +} + #if RTC_METRICS_ENABLED // // Checks the logged metrics when simulcast is not used. @@ -611,7 +631,7 @@ TEST_F(PeerConnectionSimulcastMetricsTests, IncomingSimulcastIsLogged) { ElementsAre(Pair(kSimulcastApiVersionSpecCompliant, 1))); auto transceiver = remote->pc()->GetTransceivers()[0]; - transceiver->SetDirection(RtpTransceiverDirection::kSendRecv); + transceiver->SetDirectionWithError(RtpTransceiverDirection::kSendRecv); EXPECT_TRUE(remote->CreateAnswerAndSetAsLocal()); EXPECT_THAT(LocalDescriptionSamples(), ElementsAre(Pair(kSimulcastApiVersionSpecCompliant, 2))); diff --git a/pc/peer_connection_wrapper.cc b/pc/peer_connection_wrapper.cc index 7c0b3391d0..3b4d28f0d9 100644 --- a/pc/peer_connection_wrapper.cc +++ b/pc/peer_connection_wrapper.cc @@ -48,7 +48,10 @@ PeerConnectionWrapper::PeerConnectionWrapper( observer_->SetPeerConnectionInterface(pc_.get()); } -PeerConnectionWrapper::~PeerConnectionWrapper() = default; +PeerConnectionWrapper::~PeerConnectionWrapper() { + if (pc_) + pc_->Close(); +} PeerConnectionFactoryInterface* PeerConnectionWrapper::pc_factory() { return pc_factory_.get(); @@ -133,8 +136,7 @@ PeerConnectionWrapper::CreateRollback() { std::unique_ptr PeerConnectionWrapper::CreateSdp( rtc::FunctionView fn, std::string* error_out) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); fn(observer); EXPECT_EQ_WAIT(true, observer->called(), kDefaultTimeout); if (error_out && !observer->result()) { @@ -166,8 +168,8 @@ bool PeerConnectionWrapper::SetRemoteDescription( bool PeerConnectionWrapper::SetRemoteDescription( std::unique_ptr desc, RTCError* error_out) { - rtc::scoped_refptr observer = - new MockSetRemoteDescriptionObserver(); + rtc::scoped_refptr observer = + new FakeSetRemoteDescriptionObserver(); pc()->SetRemoteDescription(std::move(desc), observer); EXPECT_EQ_WAIT(true, observer->called(), kDefaultTimeout); bool ok = observer->error().ok(); @@ -179,8 +181,7 @@ bool PeerConnectionWrapper::SetRemoteDescription( bool PeerConnectionWrapper::SetSdp( rtc::FunctionView fn, std::string* error_out) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); fn(observer); EXPECT_EQ_WAIT(true, observer->called(), kDefaultTimeout); if (error_out && !observer->result()) { @@ -305,7 +306,14 @@ rtc::scoped_refptr PeerConnectionWrapper::AddVideoTrack( rtc::scoped_refptr PeerConnectionWrapper::CreateDataChannel(const std::string& label) { - return pc()->CreateDataChannel(label, nullptr); + auto result = pc()->CreateDataChannelOrError(label, nullptr); + if (!result.ok()) { + RTC_LOG(LS_ERROR) << "CreateDataChannel failed: " + << ToString(result.error().type()) << " " + << result.error().message(); + return nullptr; + } + return result.MoveValue(); } PeerConnectionInterface::SignalingState @@ -323,8 +331,7 @@ bool PeerConnectionWrapper::IsIceConnected() { rtc::scoped_refptr PeerConnectionWrapper::GetStats() { - rtc::scoped_refptr callback( - new rtc::RefCountedObject()); + auto callback = rtc::make_ref_counted(); pc()->GetStats(callback); EXPECT_TRUE_WAIT(callback->called(), kDefaultTimeout); return callback->report(); diff --git a/pc/proxy.cc b/pc/proxy.cc new file mode 100644 index 0000000000..5f4e0b8832 --- /dev/null +++ b/pc/proxy.cc @@ -0,0 +1,25 @@ +/* + * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/proxy.h" + +#include "rtc_base/trace_event.h" + +namespace webrtc { +namespace proxy_internal { +ScopedTrace::ScopedTrace(const char* class_and_method_name) + : class_and_method_name_(class_and_method_name) { + TRACE_EVENT_BEGIN0("webrtc", class_and_method_name_); +} +ScopedTrace::~ScopedTrace() { + TRACE_EVENT_END0("webrtc", class_and_method_name_); +} +} // namespace proxy_internal +} // namespace webrtc diff --git a/api/proxy.h b/pc/proxy.h similarity index 58% rename from api/proxy.h rename to pc/proxy.h index 385992e659..565ae80175 100644 --- a/api/proxy.h +++ b/pc/proxy.h @@ -12,6 +12,13 @@ // PeerConnection classes. // TODO(deadbeef): Move this to pc/; this is part of the implementation. +// The proxied objects are initialized with either one or two thread +// objects that operations can be proxied to: The primary and secondary +// threads. +// In common usage, the primary thread will be the PeerConnection's +// signaling thread, and the secondary thread will be either the +// PeerConnection's worker thread or the PeerConnection's network thread. + // // Example usage: // @@ -29,46 +36,66 @@ // }; // // BEGIN_PROXY_MAP(Test) -// PROXY_SIGNALING_THREAD_DESTRUCTOR() +// PROXY_PRIMARY_THREAD_DESTRUCTOR() // PROXY_METHOD0(std::string, FooA) // PROXY_CONSTMETHOD1(std::string, FooB, arg1) -// PROXY_WORKER_METHOD1(std::string, FooC, arg1) +// PROXY_SECONDARY_METHOD1(std::string, FooC, arg1) // END_PROXY_MAP() // -// Where the destructor and first two methods are invoked on the signaling -// thread, and the third is invoked on the worker thread. +// Where the destructor and first two methods are invoked on the primary +// thread, and the third is invoked on the secondary thread. // // The proxy can be created using // // TestProxy::Create(Thread* signaling_thread, Thread* worker_thread, // TestInterface*). // -// The variant defined with BEGIN_SIGNALING_PROXY_MAP is unaware of -// the worker thread, and invokes all methods on the signaling thread. +// The variant defined with BEGIN_PRIMARY_PROXY_MAP is unaware of +// the secondary thread, and invokes all methods on the primary thread. // // The variant defined with BEGIN_OWNED_PROXY_MAP does not use // refcounting, and instead just takes ownership of the object being proxied. -#ifndef API_PROXY_H_ -#define API_PROXY_H_ +#ifndef PC_PROXY_H_ +#define PC_PROXY_H_ #include #include #include +#include #include #include "api/scoped_refptr.h" +#include "api/task_queue/queued_task.h" +#include "api/task_queue/task_queue_base.h" #include "rtc_base/event.h" #include "rtc_base/message_handler.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/string_utils.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/thread.h" +#if !defined(RTC_DISABLE_PROXY_TRACE_EVENTS) && !defined(WEBRTC_CHROMIUM_BUILD) +#define RTC_DISABLE_PROXY_TRACE_EVENTS +#endif + namespace rtc { class Location; } namespace webrtc { +namespace proxy_internal { + +// Class for tracing the lifetime of MethodCall::Marshal. +class ScopedTrace { + public: + explicit ScopedTrace(const char* class_and_method_name); + ~ScopedTrace(); + + private: + const char* const class_and_method_name_; +}; +} // namespace proxy_internal template class ReturnType { @@ -95,27 +122,8 @@ class ReturnType { void moved_result() {} }; -namespace internal { - -class RTC_EXPORT SynchronousMethodCall : public rtc::MessageData, - public rtc::MessageHandler { - public: - explicit SynchronousMethodCall(rtc::MessageHandler* proxy); - ~SynchronousMethodCall() override; - - void Invoke(const rtc::Location& posted_from, rtc::Thread* t); - - private: - void OnMessage(rtc::Message*) override; - - rtc::Event e_; - rtc::MessageHandler* proxy_; -}; - -} // namespace internal - template -class MethodCall : public rtc::Message, public rtc::MessageHandler { +class MethodCall : public QueuedTask { public: typedef R (C::*Method)(Args...); MethodCall(C* c, Method m, Args&&... args) @@ -124,12 +132,21 @@ class MethodCall : public rtc::Message, public rtc::MessageHandler { args_(std::forward_as_tuple(std::forward(args)...)) {} R Marshal(const rtc::Location& posted_from, rtc::Thread* t) { - internal::SynchronousMethodCall(this).Invoke(posted_from, t); + if (t->IsCurrent()) { + Invoke(std::index_sequence_for()); + } else { + t->PostTask(std::unique_ptr(this)); + event_.Wait(rtc::Event::kForever); + } return r_.moved_result(); } private: - void OnMessage(rtc::Message*) { Invoke(std::index_sequence_for()); } + bool Run() override { + Invoke(std::index_sequence_for()); + event_.Set(); + return false; + } template void Invoke(std::index_sequence) { @@ -140,10 +157,11 @@ class MethodCall : public rtc::Message, public rtc::MessageHandler { Method m_; ReturnType r_; std::tuple args_; + rtc::Event event_; }; template -class ConstMethodCall : public rtc::Message, public rtc::MessageHandler { +class ConstMethodCall : public QueuedTask { public: typedef R (C::*Method)(Args...) const; ConstMethodCall(const C* c, Method m, Args&&... args) @@ -152,12 +170,21 @@ class ConstMethodCall : public rtc::Message, public rtc::MessageHandler { args_(std::forward_as_tuple(std::forward(args)...)) {} R Marshal(const rtc::Location& posted_from, rtc::Thread* t) { - internal::SynchronousMethodCall(this).Invoke(posted_from, t); + if (t->IsCurrent()) { + Invoke(std::index_sequence_for()); + } else { + t->PostTask(std::unique_ptr(this)); + event_.Wait(rtc::Event::kForever); + } return r_.moved_result(); } private: - void OnMessage(rtc::Message*) { Invoke(std::index_sequence_for()); } + bool Run() override { + Invoke(std::index_sequence_for()); + event_.Set(); + return false; + } template void Invoke(std::index_sequence) { @@ -168,8 +195,12 @@ class ConstMethodCall : public rtc::Message, public rtc::MessageHandler { Method m_; ReturnType r_; std::tuple args_; + rtc::Event event_; }; +#define PROXY_STRINGIZE_IMPL(x) #x +#define PROXY_STRINGIZE(x) PROXY_STRINGIZE_IMPL(x) + // Helper macros to reduce code duplication. #define PROXY_MAP_BOILERPLATE(c) \ template \ @@ -178,6 +209,7 @@ class ConstMethodCall : public rtc::Message, public rtc::MessageHandler { template \ class c##ProxyWithInternal : public c##Interface { \ protected: \ + static constexpr char proxy_name_[] = #c "Proxy"; \ typedef c##Interface C; \ \ public: \ @@ -187,29 +219,31 @@ class ConstMethodCall : public rtc::Message, public rtc::MessageHandler { // clang-format off // clang-format would put the semicolon alone, // leading to a presubmit error (cpplint.py) -#define END_PROXY_MAP() \ - }; +#define END_PROXY_MAP(c) \ + }; \ + template \ + constexpr char c##ProxyWithInternal::proxy_name_[]; // clang-format on -#define SIGNALING_PROXY_MAP_BOILERPLATE(c) \ +#define PRIMARY_PROXY_MAP_BOILERPLATE(c) \ + protected: \ + c##ProxyWithInternal(rtc::Thread* primary_thread, INTERNAL_CLASS* c) \ + : primary_thread_(primary_thread), c_(c) {} \ + \ + private: \ + mutable rtc::Thread* primary_thread_; + +#define SECONDARY_PROXY_MAP_BOILERPLATE(c) \ protected: \ - c##ProxyWithInternal(rtc::Thread* signaling_thread, INTERNAL_CLASS* c) \ - : signaling_thread_(signaling_thread), c_(c) {} \ + c##ProxyWithInternal(rtc::Thread* primary_thread, \ + rtc::Thread* secondary_thread, INTERNAL_CLASS* c) \ + : primary_thread_(primary_thread), \ + secondary_thread_(secondary_thread), \ + c_(c) {} \ \ private: \ - mutable rtc::Thread* signaling_thread_; - -#define WORKER_PROXY_MAP_BOILERPLATE(c) \ - protected: \ - c##ProxyWithInternal(rtc::Thread* signaling_thread, \ - rtc::Thread* worker_thread, INTERNAL_CLASS* c) \ - : signaling_thread_(signaling_thread), \ - worker_thread_(worker_thread), \ - c_(c) {} \ - \ - private: \ - mutable rtc::Thread* signaling_thread_; \ - mutable rtc::Thread* worker_thread_; + mutable rtc::Thread* primary_thread_; \ + mutable rtc::Thread* secondary_thread_; // Note that the destructor is protected so that the proxy can only be // destroyed via RefCountInterface. @@ -242,160 +276,198 @@ class ConstMethodCall : public rtc::Message, public rtc::MessageHandler { void DestroyInternal() { delete c_; } \ INTERNAL_CLASS* c_; -#define BEGIN_SIGNALING_PROXY_MAP(c) \ +#define BEGIN_PRIMARY_PROXY_MAP(c) \ + PROXY_MAP_BOILERPLATE(c) \ + PRIMARY_PROXY_MAP_BOILERPLATE(c) \ + REFCOUNTED_PROXY_MAP_BOILERPLATE(c) \ + public: \ + static rtc::scoped_refptr Create( \ + rtc::Thread* primary_thread, INTERNAL_CLASS* c) { \ + return rtc::make_ref_counted(primary_thread, c); \ + } + +#define BEGIN_PROXY_MAP(c) \ PROXY_MAP_BOILERPLATE(c) \ - SIGNALING_PROXY_MAP_BOILERPLATE(c) \ + SECONDARY_PROXY_MAP_BOILERPLATE(c) \ REFCOUNTED_PROXY_MAP_BOILERPLATE(c) \ public: \ static rtc::scoped_refptr Create( \ - rtc::Thread* signaling_thread, INTERNAL_CLASS* c) { \ - return new rtc::RefCountedObject(signaling_thread, \ - c); \ - } - -#define BEGIN_PROXY_MAP(c) \ - PROXY_MAP_BOILERPLATE(c) \ - WORKER_PROXY_MAP_BOILERPLATE(c) \ - REFCOUNTED_PROXY_MAP_BOILERPLATE(c) \ - public: \ - static rtc::scoped_refptr Create( \ - rtc::Thread* signaling_thread, rtc::Thread* worker_thread, \ - INTERNAL_CLASS* c) { \ - return new rtc::RefCountedObject(signaling_thread, \ - worker_thread, c); \ + rtc::Thread* primary_thread, rtc::Thread* secondary_thread, \ + INTERNAL_CLASS* c) { \ + return rtc::make_ref_counted(primary_thread, \ + secondary_thread, c); \ } #define BEGIN_OWNED_PROXY_MAP(c) \ PROXY_MAP_BOILERPLATE(c) \ - WORKER_PROXY_MAP_BOILERPLATE(c) \ + SECONDARY_PROXY_MAP_BOILERPLATE(c) \ OWNED_PROXY_MAP_BOILERPLATE(c) \ public: \ static std::unique_ptr Create( \ - rtc::Thread* signaling_thread, rtc::Thread* worker_thread, \ + rtc::Thread* primary_thread, rtc::Thread* secondary_thread, \ std::unique_ptr c) { \ return std::unique_ptr(new c##ProxyWithInternal( \ - signaling_thread, worker_thread, c.release())); \ + primary_thread, secondary_thread, c.release())); \ } -#define PROXY_SIGNALING_THREAD_DESTRUCTOR() \ - private: \ - rtc::Thread* destructor_thread() const { return signaling_thread_; } \ - \ +#define PROXY_PRIMARY_THREAD_DESTRUCTOR() \ + private: \ + rtc::Thread* destructor_thread() const { return primary_thread_; } \ + \ public: // NOLINTNEXTLINE -#define PROXY_WORKER_THREAD_DESTRUCTOR() \ - private: \ - rtc::Thread* destructor_thread() const { return worker_thread_; } \ - \ +#define PROXY_SECONDARY_THREAD_DESTRUCTOR() \ + private: \ + rtc::Thread* destructor_thread() const { return secondary_thread_; } \ + \ public: // NOLINTNEXTLINE -#define PROXY_METHOD0(r, method) \ - r method() override { \ - MethodCall call(c_, &C::method); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ +#if defined(RTC_DISABLE_PROXY_TRACE_EVENTS) +#define TRACE_BOILERPLATE(method) \ + do { \ + } while (0) +#else // if defined(RTC_DISABLE_PROXY_TRACE_EVENTS) +#define TRACE_BOILERPLATE(method) \ + static constexpr auto class_and_method_name = \ + rtc::MakeCompileTimeString(proxy_name_) \ + .Concat(rtc::MakeCompileTimeString("::")) \ + .Concat(rtc::MakeCompileTimeString(#method)); \ + proxy_internal::ScopedTrace scoped_trace(class_and_method_name.string) + +#endif // if defined(RTC_DISABLE_PROXY_TRACE_EVENTS) + +#define PROXY_METHOD0(r, method) \ + r method() override { \ + TRACE_BOILERPLATE(method); \ + MethodCall call(c_, &C::method); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } -#define PROXY_CONSTMETHOD0(r, method) \ - r method() const override { \ - ConstMethodCall call(c_, &C::method); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ +#define PROXY_CONSTMETHOD0(r, method) \ + r method() const override { \ + TRACE_BOILERPLATE(method); \ + ConstMethodCall call(c_, &C::method); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD1(r, method, t1) \ r method(t1 a1) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1)); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_CONSTMETHOD1(r, method, t1) \ r method(t1 a1) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1)); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD2(r, method, t1, t2) \ r method(t1 a1, t2 a2) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2)); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD3(r, method, t1, t2, t3) \ r method(t1 a1, t2 a2, t3 a3) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3)); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD4(r, method, t1, t2, t3, t4) \ r method(t1 a1, t2 a2, t3 a3, t4 a4) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3), \ std::move(a4)); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } #define PROXY_METHOD5(r, method, t1, t2, t3, t4, t5) \ r method(t1 a1, t2 a2, t3 a3, t4 a4, t5 a5) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3), \ std::move(a4), std::move(a5)); \ - return call.Marshal(RTC_FROM_HERE, signaling_thread_); \ + return call.Marshal(RTC_FROM_HERE, primary_thread_); \ } -// Define methods which should be invoked on the worker thread. -#define PROXY_WORKER_METHOD0(r, method) \ - r method() override { \ - MethodCall call(c_, &C::method); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ +// Define methods which should be invoked on the secondary thread. +#define PROXY_SECONDARY_METHOD0(r, method) \ + r method() override { \ + TRACE_BOILERPLATE(method); \ + MethodCall call(c_, &C::method); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_CONSTMETHOD0(r, method) \ - r method() const override { \ - ConstMethodCall call(c_, &C::method); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ +#define PROXY_SECONDARY_CONSTMETHOD0(r, method) \ + r method() const override { \ + TRACE_BOILERPLATE(method); \ + ConstMethodCall call(c_, &C::method); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_METHOD1(r, method, t1) \ +#define PROXY_SECONDARY_METHOD1(r, method, t1) \ r method(t1 a1) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1)); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_CONSTMETHOD1(r, method, t1) \ +#define PROXY_SECONDARY_CONSTMETHOD1(r, method, t1) \ r method(t1 a1) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1)); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_METHOD2(r, method, t1, t2) \ +#define PROXY_SECONDARY_METHOD2(r, method, t1, t2) \ r method(t1 a1, t2 a2) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2)); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_CONSTMETHOD2(r, method, t1, t2) \ +#define PROXY_SECONDARY_CONSTMETHOD2(r, method, t1, t2) \ r method(t1 a1, t2 a2) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1), \ std::move(a2)); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_METHOD3(r, method, t1, t2, t3) \ +#define PROXY_SECONDARY_METHOD3(r, method, t1, t2, t3) \ r method(t1 a1, t2 a2, t3 a3) override { \ + TRACE_BOILERPLATE(method); \ MethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3)); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ } -#define PROXY_WORKER_CONSTMETHOD3(r, method, t1, t2) \ +#define PROXY_SECONDARY_CONSTMETHOD3(r, method, t1, t2) \ r method(t1 a1, t2 a2, t3 a3) const override { \ + TRACE_BOILERPLATE(method); \ ConstMethodCall call(c_, &C::method, std::move(a1), \ std::move(a2), std::move(a3)); \ - return call.Marshal(RTC_FROM_HERE, worker_thread_); \ + return call.Marshal(RTC_FROM_HERE, secondary_thread_); \ + } + +// For use when returning purely const state (set during construction). +// Use with caution. This method should only be used when the return value will +// always be the same. +#define BYPASS_PROXY_CONSTMETHOD0(r, method) \ + r method() const override { \ + TRACE_BOILERPLATE(method); \ + return c_->method(); \ } } // namespace webrtc -#endif // API_PROXY_H_ +#endif // PC_PROXY_H_ diff --git a/pc/proxy_unittest.cc b/pc/proxy_unittest.cc index 500828a03e..ef3d97eddc 100644 --- a/pc/proxy_unittest.cc +++ b/pc/proxy_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "api/proxy.h" +#include "pc/proxy.h" #include #include @@ -43,7 +43,7 @@ class FakeInterface : public rtc::RefCountInterface { class Fake : public FakeInterface { public: static rtc::scoped_refptr Create() { - return new rtc::RefCountedObject(); + return rtc::make_ref_counted(); } // Used to verify destructor is called on the correct thread. MOCK_METHOD(void, Destroy, ()); @@ -64,27 +64,27 @@ class Fake : public FakeInterface { // Proxies for the test interface. BEGIN_PROXY_MAP(Fake) -PROXY_WORKER_THREAD_DESTRUCTOR() +PROXY_SECONDARY_THREAD_DESTRUCTOR() PROXY_METHOD0(void, VoidMethod0) PROXY_METHOD0(std::string, Method0) PROXY_CONSTMETHOD0(std::string, ConstMethod0) -PROXY_WORKER_METHOD1(std::string, Method1, std::string) +PROXY_SECONDARY_METHOD1(std::string, Method1, std::string) PROXY_CONSTMETHOD1(std::string, ConstMethod1, std::string) -PROXY_WORKER_METHOD2(std::string, Method2, std::string, std::string) -END_PROXY_MAP() +PROXY_SECONDARY_METHOD2(std::string, Method2, std::string, std::string) +END_PROXY_MAP(Fake) // Preprocessor hack to get a proxy class a name different than FakeProxy. #define FakeProxy FakeSignalingProxy #define FakeProxyWithInternal FakeSignalingProxyWithInternal -BEGIN_SIGNALING_PROXY_MAP(Fake) -PROXY_SIGNALING_THREAD_DESTRUCTOR() +BEGIN_PRIMARY_PROXY_MAP(Fake) +PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD0(void, VoidMethod0) PROXY_METHOD0(std::string, Method0) PROXY_CONSTMETHOD0(std::string, ConstMethod0) PROXY_METHOD1(std::string, Method1, std::string) PROXY_CONSTMETHOD1(std::string, ConstMethod1, std::string) PROXY_METHOD2(std::string, Method2, std::string, std::string) -END_PROXY_MAP() +END_PROXY_MAP(Fake) #undef FakeProxy class SignalingProxyTest : public ::testing::Test { @@ -270,9 +270,9 @@ class Foo : public FooInterface { }; BEGIN_OWNED_PROXY_MAP(Foo) -PROXY_SIGNALING_THREAD_DESTRUCTOR() +PROXY_PRIMARY_THREAD_DESTRUCTOR() PROXY_METHOD0(void, Bar) -END_PROXY_MAP() +END_PROXY_MAP(Foo) class OwnedProxyTest : public ::testing::Test { public: diff --git a/pc/remote_audio_source.cc b/pc/remote_audio_source.cc index da00402e41..dc890e737c 100644 --- a/pc/remote_audio_source.cc +++ b/pc/remote_audio_source.cc @@ -13,17 +13,15 @@ #include #include -#include #include "absl/algorithm/container.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" -#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/strings/string_format.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -35,6 +33,11 @@ class RemoteAudioSource::AudioDataProxy : public AudioSinkInterface { explicit AudioDataProxy(RemoteAudioSource* source) : source_(source) { RTC_DCHECK(source); } + + AudioDataProxy() = delete; + AudioDataProxy(const AudioDataProxy&) = delete; + AudioDataProxy& operator=(const AudioDataProxy&) = delete; + ~AudioDataProxy() override { source_->OnAudioChannelGone(); } // AudioSinkInterface implementation. @@ -44,64 +47,73 @@ class RemoteAudioSource::AudioDataProxy : public AudioSinkInterface { private: const rtc::scoped_refptr source_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(AudioDataProxy); }; -RemoteAudioSource::RemoteAudioSource(rtc::Thread* worker_thread) +RemoteAudioSource::RemoteAudioSource( + rtc::Thread* worker_thread, + OnAudioChannelGoneAction on_audio_channel_gone_action) : main_thread_(rtc::Thread::Current()), worker_thread_(worker_thread), + on_audio_channel_gone_action_(on_audio_channel_gone_action), state_(MediaSourceInterface::kLive) { RTC_DCHECK(main_thread_); RTC_DCHECK(worker_thread_); } RemoteAudioSource::~RemoteAudioSource() { - RTC_DCHECK(main_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(main_thread_); RTC_DCHECK(audio_observers_.empty()); - RTC_DCHECK(sinks_.empty()); + if (!sinks_.empty()) { + RTC_LOG(LS_WARNING) + << "RemoteAudioSource destroyed while sinks_ is non-empty."; + } } void RemoteAudioSource::Start(cricket::VoiceMediaChannel* media_channel, absl::optional ssrc) { - RTC_DCHECK_RUN_ON(main_thread_); - RTC_DCHECK(media_channel); + RTC_DCHECK_RUN_ON(worker_thread_); // Register for callbacks immediately before AddSink so that we always get // notified when a channel goes out of scope (signaled when "AudioDataProxy" // is destroyed). - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - ssrc ? media_channel->SetRawAudioSink( - *ssrc, std::make_unique(this)) - : media_channel->SetDefaultRawAudioSink( - std::make_unique(this)); - }); + RTC_DCHECK(media_channel); + ssrc ? media_channel->SetRawAudioSink(*ssrc, + std::make_unique(this)) + : media_channel->SetDefaultRawAudioSink( + std::make_unique(this)); } void RemoteAudioSource::Stop(cricket::VoiceMediaChannel* media_channel, absl::optional ssrc) { - RTC_DCHECK_RUN_ON(main_thread_); + RTC_DCHECK_RUN_ON(worker_thread_); RTC_DCHECK(media_channel); + ssrc ? media_channel->SetRawAudioSink(*ssrc, nullptr) + : media_channel->SetDefaultRawAudioSink(nullptr); +} - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - ssrc ? media_channel->SetRawAudioSink(*ssrc, nullptr) - : media_channel->SetDefaultRawAudioSink(nullptr); - }); +void RemoteAudioSource::SetState(SourceState new_state) { + RTC_DCHECK_RUN_ON(main_thread_); + if (state_ != new_state) { + state_ = new_state; + FireOnChanged(); + } } MediaSourceInterface::SourceState RemoteAudioSource::state() const { - RTC_DCHECK(main_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(main_thread_); return state_; } bool RemoteAudioSource::remote() const { - RTC_DCHECK(main_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(main_thread_); return true; } void RemoteAudioSource::SetVolume(double volume) { RTC_DCHECK_GE(volume, 0); RTC_DCHECK_LE(volume, 10); + RTC_LOG(LS_INFO) << rtc::StringFormat("RAS::%s({volume=%.2f})", __func__, + volume); for (auto* observer : audio_observers_) { observer->OnSetVolume(volume); } @@ -119,7 +131,7 @@ void RemoteAudioSource::UnregisterAudioObserver(AudioObserver* observer) { } void RemoteAudioSource::AddSink(AudioTrackSinkInterface* sink) { - RTC_DCHECK(main_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(main_thread_); RTC_DCHECK(sink); if (state_ != MediaSourceInterface::kLive) { @@ -127,22 +139,22 @@ void RemoteAudioSource::AddSink(AudioTrackSinkInterface* sink) { return; } - rtc::CritScope lock(&sink_lock_); + MutexLock lock(&sink_lock_); RTC_DCHECK(!absl::c_linear_search(sinks_, sink)); sinks_.push_back(sink); } void RemoteAudioSource::RemoveSink(AudioTrackSinkInterface* sink) { - RTC_DCHECK(main_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(main_thread_); RTC_DCHECK(sink); - rtc::CritScope lock(&sink_lock_); + MutexLock lock(&sink_lock_); sinks_.remove(sink); } void RemoteAudioSource::OnData(const AudioSinkInterface::Data& audio) { // Called on the externally-owned audio callback thread, via/from webrtc. - rtc::CritScope lock(&sink_lock_); + MutexLock lock(&sink_lock_); for (auto* sink : sinks_) { // When peerconnection acts as an audio source, it should not provide // absolute capture timestamp. @@ -153,6 +165,9 @@ void RemoteAudioSource::OnData(const AudioSinkInterface::Data& audio) { } void RemoteAudioSource::OnAudioChannelGone() { + if (on_audio_channel_gone_action_ != OnAudioChannelGoneAction::kEnd) { + return; + } // Called when the audio channel is deleted. It may be the worker thread // in libjingle or may be a different worker thread. // This object needs to live long enough for the cleanup logic in OnMessage to @@ -165,10 +180,9 @@ void RemoteAudioSource::OnAudioChannelGone() { } void RemoteAudioSource::OnMessage(rtc::Message* msg) { - RTC_DCHECK(main_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(main_thread_); sinks_.clear(); - state_ = MediaSourceInterface::kEnded; - FireOnChanged(); + SetState(MediaSourceInterface::kEnded); // Will possibly delete this RemoteAudioSource since it is reference counted // in the message. delete msg->pdata; diff --git a/pc/remote_audio_source.h b/pc/remote_audio_source.h index 15dc75b511..2eae073272 100644 --- a/pc/remote_audio_source.h +++ b/pc/remote_audio_source.h @@ -11,15 +11,21 @@ #ifndef PC_REMOTE_AUDIO_SOURCE_H_ #define PC_REMOTE_AUDIO_SOURCE_H_ +#include + #include #include #include "absl/types/optional.h" #include "api/call/audio_sink.h" +#include "api/media_stream_interface.h" #include "api/notifier.h" +#include "media/base/media_channel.h" #include "pc/channel.h" -#include "rtc_base/critical_section.h" #include "rtc_base/message_handler.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_message.h" namespace rtc { struct Message; @@ -34,7 +40,21 @@ namespace webrtc { class RemoteAudioSource : public Notifier, rtc::MessageHandler { public: - explicit RemoteAudioSource(rtc::Thread* worker_thread); + // In Unified Plan, receivers map to m= sections and their tracks and sources + // survive SSRCs being reconfigured. The life cycle of the remote audio source + // is associated with the life cycle of the m= section, and thus even if an + // audio channel is destroyed the RemoteAudioSource should kSurvive. + // + // In Plan B however, remote audio sources map 1:1 with an SSRCs and if an + // audio channel is destroyed, the RemoteAudioSource should kEnd. + enum class OnAudioChannelGoneAction { + kSurvive, + kEnd, + }; + + explicit RemoteAudioSource( + rtc::Thread* worker_thread, + OnAudioChannelGoneAction on_audio_channel_gone_action); // Register and unregister remote audio source with the underlying media // engine. @@ -42,6 +62,7 @@ class RemoteAudioSource : public Notifier, absl::optional ssrc); void Stop(cricket::VoiceMediaChannel* media_channel, absl::optional ssrc); + void SetState(SourceState new_state); // MediaSourceInterface implementation. MediaSourceInterface::SourceState state() const override; @@ -61,6 +82,7 @@ class RemoteAudioSource : public Notifier, private: // These are callbacks from the media engine. class AudioDataProxy; + void OnData(const AudioSinkInterface::Data& audio); void OnAudioChannelGone(); @@ -68,8 +90,9 @@ class RemoteAudioSource : public Notifier, rtc::Thread* const main_thread_; rtc::Thread* const worker_thread_; + const OnAudioChannelGoneAction on_audio_channel_gone_action_; std::list audio_observers_; - rtc::CriticalSection sink_lock_; + Mutex sink_lock_; std::list sinks_; SourceState state_; }; diff --git a/pc/rtc_stats_collector.cc b/pc/rtc_stats_collector.cc index 0e2f170ff0..6599d0ef49 100644 --- a/pc/rtc_stats_collector.cc +++ b/pc/rtc_stats_collector.cc @@ -10,23 +10,52 @@ #include "pc/rtc_stats_collector.h" +#include + +#include +#include #include #include #include #include #include +#include "api/array_view.h" #include "api/candidate.h" #include "api/media_stream_interface.h" -#include "api/peer_connection_interface.h" +#include "api/rtp_parameters.h" +#include "api/rtp_receiver_interface.h" +#include "api/rtp_sender_interface.h" +#include "api/sequence_checker.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtcstats_objects.h" +#include "api/task_queue/queued_task.h" #include "api/video/video_content_type.h" +#include "common_video/include/quality_limitation_reason.h" #include "media/base/media_channel.h" +#include "modules/audio_processing/include/audio_processing_statistics.h" +#include "modules/rtp_rtcp/include/report_block_data.h" +#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "p2p/base/connection_info.h" +#include "p2p/base/dtls_transport_internal.h" +#include "p2p/base/ice_transport_internal.h" #include "p2p/base/p2p_constants.h" #include "p2p/base/port.h" -#include "pc/peer_connection.h" +#include "pc/channel.h" +#include "pc/channel_interface.h" +#include "pc/data_channel_utils.h" #include "pc/rtc_stats_traversal.h" #include "pc/webrtc_sdp.h" #include "rtc_base/checks.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/network_constants.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/rtc_certificate.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/string_encode.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" @@ -80,17 +109,23 @@ std::string RTCTransportStatsIDFromTransportChannel( return sb.str(); } -std::string RTCInboundRTPStreamStatsIDFromSSRC(bool audio, uint32_t ssrc) { +std::string RTCInboundRTPStreamStatsIDFromSSRC(cricket::MediaType media_type, + uint32_t ssrc) { char buf[1024]; rtc::SimpleStringBuilder sb(buf); - sb << "RTCInboundRTP" << (audio ? "Audio" : "Video") << "Stream_" << ssrc; + sb << "RTCInboundRTP" + << (media_type == cricket::MEDIA_TYPE_AUDIO ? "Audio" : "Video") + << "Stream_" << ssrc; return sb.str(); } -std::string RTCOutboundRTPStreamStatsIDFromSSRC(bool audio, uint32_t ssrc) { +std::string RTCOutboundRTPStreamStatsIDFromSSRC(cricket::MediaType media_type, + uint32_t ssrc) { char buf[1024]; rtc::SimpleStringBuilder sb(buf); - sb << "RTCOutboundRTP" << (audio ? "Audio" : "Video") << "Stream_" << ssrc; + sb << "RTCOutboundRTP" + << (media_type == cricket::MEDIA_TYPE_AUDIO ? "Audio" : "Video") + << "Stream_" << ssrc; return sb.str(); } @@ -105,6 +140,17 @@ std::string RTCRemoteInboundRtpStreamStatsIdFromSourceSsrc( return sb.str(); } +std::string RTCRemoteOutboundRTPStreamStatsIDFromSSRC( + cricket::MediaType media_type, + uint32_t source_ssrc) { + char buf[1024]; + rtc::SimpleStringBuilder sb(buf); + sb << "RTCRemoteOutboundRTP" + << (media_type == cricket::MEDIA_TYPE_AUDIO ? "Audio" : "Video") + << "Stream_" << source_ssrc; + return sb.str(); +} + std::string RTCMediaSourceStatsIDFromKindAndAttachment( cricket::MediaType media_type, int attachment_id) { @@ -163,20 +209,20 @@ const char* IceCandidatePairStateToRTCStatsIceCandidatePairState( } const char* DtlsTransportStateToRTCDtlsTransportState( - cricket::DtlsTransportState state) { + DtlsTransportState state) { switch (state) { - case cricket::DTLS_TRANSPORT_NEW: + case DtlsTransportState::kNew: return RTCDtlsTransportState::kNew; - case cricket::DTLS_TRANSPORT_CONNECTING: + case DtlsTransportState::kConnecting: return RTCDtlsTransportState::kConnecting; - case cricket::DTLS_TRANSPORT_CONNECTED: + case DtlsTransportState::kConnected: return RTCDtlsTransportState::kConnected; - case cricket::DTLS_TRANSPORT_CLOSED: + case DtlsTransportState::kClosed: return RTCDtlsTransportState::kClosed; - case cricket::DTLS_TRANSPORT_FAILED: + case DtlsTransportState::kFailed: return RTCDtlsTransportState::kFailed; default: - RTC_NOTREACHED(); + RTC_CHECK_NOTREACHED(); return nullptr; } } @@ -216,6 +262,18 @@ const char* QualityLimitationReasonToRTCQualityLimitationReason( case QualityLimitationReason::kOther: return RTCQualityLimitationReason::kOther; } + RTC_CHECK_NOTREACHED(); +} + +std::map +QualityLimitationDurationToRTCQualityLimitationDuration( + std::map durations_ms) { + std::map result; + for (const auto& elem : durations_ms) { + result[QualityLimitationReasonToRTCQualityLimitationReason(elem.first)] = + elem.second; + } + return result; } double DoubleAudioLevelFromIntAudioLevel(int audio_level) { @@ -227,6 +285,7 @@ double DoubleAudioLevelFromIntAudioLevel(int audio_level) { std::unique_ptr CodecStatsFromRtpCodecParameters( uint64_t timestamp_us, const std::string& mid, + const std::string& transport_id, bool inbound, const RtpCodecParameters& codec_params) { RTC_DCHECK_GE(codec_params.payload_type, 0); @@ -249,6 +308,7 @@ std::unique_ptr CodecStatsFromRtpCodecParameters( if (WriteFmtpParameters(codec_params.parameters, &fmtp)) { codec_stats->sdp_fmtp_line = fmtp.Release(); } + codec_stats->transport_id = transport_id; return codec_stats; } @@ -265,8 +325,6 @@ void SetInboundRTPStreamStatsFromMediaReceiverInfo( RTCInboundRTPStreamStats* inbound_stats) { RTC_DCHECK(inbound_stats); inbound_stats->ssrc = media_receiver_info.ssrc(); - // TODO(hbos): Support the remote case. https://crbug.com/657855 - inbound_stats->is_remote = false; inbound_stats->packets_received = static_cast(media_receiver_info.packets_rcvd); inbound_stats->bytes_received = @@ -275,31 +333,58 @@ void SetInboundRTPStreamStatsFromMediaReceiverInfo( static_cast(media_receiver_info.header_and_padding_bytes_rcvd); inbound_stats->packets_lost = static_cast(media_receiver_info.packets_lost); + inbound_stats->jitter_buffer_delay = + media_receiver_info.jitter_buffer_delay_seconds; + inbound_stats->jitter_buffer_emitted_count = + media_receiver_info.jitter_buffer_emitted_count; + if (media_receiver_info.nacks_sent) { + inbound_stats->nack_count = *media_receiver_info.nacks_sent; + } } -void SetInboundRTPStreamStatsFromVoiceReceiverInfo( - const std::string& mid, +std::unique_ptr CreateInboundAudioStreamStats( const cricket::VoiceReceiverInfo& voice_receiver_info, - RTCInboundRTPStreamStats* inbound_audio) { + const std::string& mid, + int64_t timestamp_us) { + auto inbound_audio = std::make_unique( + /*id=*/RTCInboundRTPStreamStatsIDFromSSRC(cricket::MEDIA_TYPE_AUDIO, + voice_receiver_info.ssrc()), + timestamp_us); SetInboundRTPStreamStatsFromMediaReceiverInfo(voice_receiver_info, - inbound_audio); + inbound_audio.get()); inbound_audio->media_type = "audio"; inbound_audio->kind = "audio"; if (voice_receiver_info.codec_payload_type) { inbound_audio->codec_id = RTCCodecStatsIDFromMidDirectionAndPayload( - mid, true, *voice_receiver_info.codec_payload_type); + mid, /*inbound=*/true, *voice_receiver_info.codec_payload_type); } inbound_audio->jitter = static_cast(voice_receiver_info.jitter_ms) / rtc::kNumMillisecsPerSec; + inbound_audio->total_samples_received = + voice_receiver_info.total_samples_received; + inbound_audio->concealed_samples = voice_receiver_info.concealed_samples; + inbound_audio->silent_concealed_samples = + voice_receiver_info.silent_concealed_samples; + inbound_audio->concealment_events = voice_receiver_info.concealment_events; + inbound_audio->inserted_samples_for_deceleration = + voice_receiver_info.inserted_samples_for_deceleration; + inbound_audio->removed_samples_for_acceleration = + voice_receiver_info.removed_samples_for_acceleration; + if (voice_receiver_info.audio_level >= 0) { + inbound_audio->audio_level = + DoubleAudioLevelFromIntAudioLevel(voice_receiver_info.audio_level); + } + inbound_audio->total_audio_energy = voice_receiver_info.total_output_energy; + inbound_audio->total_samples_duration = + voice_receiver_info.total_output_duration; // |fir_count|, |pli_count| and |sli_count| are only valid for video and are // purposefully left undefined for audio. if (voice_receiver_info.last_packet_received_timestamp_ms) { - inbound_audio->last_packet_received_timestamp = - static_cast( - *voice_receiver_info.last_packet_received_timestamp_ms) / - rtc::kNumMillisecsPerSec; + inbound_audio->last_packet_received_timestamp = static_cast( + *voice_receiver_info.last_packet_received_timestamp_ms); } if (voice_receiver_info.estimated_playout_ntp_timestamp_ms) { + // TODO(bugs.webrtc.org/10529): Fix time origin. inbound_audio->estimated_playout_timestamp = static_cast( *voice_receiver_info.estimated_playout_ntp_timestamp_ms); } @@ -307,6 +392,51 @@ void SetInboundRTPStreamStatsFromVoiceReceiverInfo( voice_receiver_info.fec_packets_received; inbound_audio->fec_packets_discarded = voice_receiver_info.fec_packets_discarded; + return inbound_audio; +} + +std::unique_ptr +CreateRemoteOutboundAudioStreamStats( + const cricket::VoiceReceiverInfo& voice_receiver_info, + const std::string& mid, + const std::string& inbound_audio_id, + const std::string& transport_id) { + if (!voice_receiver_info.last_sender_report_timestamp_ms.has_value()) { + // Cannot create `RTCRemoteOutboundRtpStreamStats` when the RTCP SR arrival + // timestamp is not available - i.e., until the first sender report is + // received. + return nullptr; + } + RTC_DCHECK_GT(voice_receiver_info.sender_reports_reports_count, 0); + + // Create. + auto stats = std::make_unique( + /*id=*/RTCRemoteOutboundRTPStreamStatsIDFromSSRC( + cricket::MEDIA_TYPE_AUDIO, voice_receiver_info.ssrc()), + /*timestamp_us=*/rtc::kNumMicrosecsPerMillisec * + voice_receiver_info.last_sender_report_timestamp_ms.value()); + + // Populate. + // - RTCRtpStreamStats. + stats->ssrc = voice_receiver_info.ssrc(); + stats->kind = "audio"; + stats->transport_id = transport_id; + stats->codec_id = RTCCodecStatsIDFromMidDirectionAndPayload( + mid, + /*inbound=*/true, // Remote-outbound same as local-inbound. + *voice_receiver_info.codec_payload_type); + // - RTCSentRtpStreamStats. + stats->packets_sent = voice_receiver_info.sender_reports_packets_sent; + stats->bytes_sent = voice_receiver_info.sender_reports_bytes_sent; + // - RTCRemoteOutboundRtpStreamStats. + stats->local_id = inbound_audio_id; + RTC_DCHECK( + voice_receiver_info.last_sender_report_remote_timestamp_ms.has_value()); + stats->remote_timestamp = static_cast( + voice_receiver_info.last_sender_report_remote_timestamp_ms.value()); + stats->reports_sent = voice_receiver_info.sender_reports_reports_count; + + return stats; } void SetInboundRTPStreamStatsFromVideoReceiverInfo( @@ -319,16 +449,29 @@ void SetInboundRTPStreamStatsFromVideoReceiverInfo( inbound_video->kind = "video"; if (video_receiver_info.codec_payload_type) { inbound_video->codec_id = RTCCodecStatsIDFromMidDirectionAndPayload( - mid, true, *video_receiver_info.codec_payload_type); + mid, /*inbound=*/true, *video_receiver_info.codec_payload_type); } + inbound_video->jitter = static_cast(video_receiver_info.jitter_ms) / + rtc::kNumMillisecsPerSec; inbound_video->fir_count = static_cast(video_receiver_info.firs_sent); inbound_video->pli_count = static_cast(video_receiver_info.plis_sent); - inbound_video->nack_count = - static_cast(video_receiver_info.nacks_sent); + inbound_video->frames_received = video_receiver_info.frames_received; inbound_video->frames_decoded = video_receiver_info.frames_decoded; + inbound_video->frames_dropped = video_receiver_info.frames_dropped; inbound_video->key_frames_decoded = video_receiver_info.key_frames_decoded; + if (video_receiver_info.frame_width > 0) { + inbound_video->frame_width = + static_cast(video_receiver_info.frame_width); + } + if (video_receiver_info.frame_height > 0) { + inbound_video->frame_height = + static_cast(video_receiver_info.frame_height); + } + if (video_receiver_info.framerate_rcvd > 0) { + inbound_video->frames_per_second = video_receiver_info.framerate_rcvd; + } if (video_receiver_info.qp_sum) inbound_video->qp_sum = *video_receiver_info.qp_sum; inbound_video->total_decode_time = @@ -339,17 +482,16 @@ void SetInboundRTPStreamStatsFromVideoReceiverInfo( inbound_video->total_squared_inter_frame_delay = video_receiver_info.total_squared_inter_frame_delay; if (video_receiver_info.last_packet_received_timestamp_ms) { - inbound_video->last_packet_received_timestamp = - static_cast( - *video_receiver_info.last_packet_received_timestamp_ms) / - rtc::kNumMillisecsPerSec; + inbound_video->last_packet_received_timestamp = static_cast( + *video_receiver_info.last_packet_received_timestamp_ms); } if (video_receiver_info.estimated_playout_ntp_timestamp_ms) { + // TODO(bugs.webrtc.org/10529): Fix time origin if needed. inbound_video->estimated_playout_timestamp = static_cast( *video_receiver_info.estimated_playout_ntp_timestamp_ms); } - // TODO(https://crbug.com/webrtc/10529): When info's |content_info| is - // optional, support the "unspecified" value. + // TODO(bugs.webrtc.org/10529): When info's |content_info| is optional + // support the "unspecified" value. if (video_receiver_info.content_type == VideoContentType::SCREENSHARE) inbound_video->content_type = RTCContentType::kScreenshare; if (!video_receiver_info.decoder_implementation_name.empty()) { @@ -364,8 +506,6 @@ void SetOutboundRTPStreamStatsFromMediaSenderInfo( RTCOutboundRTPStreamStats* outbound_stats) { RTC_DCHECK(outbound_stats); outbound_stats->ssrc = media_sender_info.ssrc(); - // TODO(hbos): Support the remote case. https://crbug.com/657856 - outbound_stats->is_remote = false; outbound_stats->packets_sent = static_cast(media_sender_info.packets_sent); outbound_stats->retransmitted_packets_sent = @@ -376,6 +516,7 @@ void SetOutboundRTPStreamStatsFromMediaSenderInfo( static_cast(media_sender_info.header_and_padding_bytes_sent); outbound_stats->retransmitted_bytes_sent = media_sender_info.retransmitted_bytes_sent; + outbound_stats->nack_count = media_sender_info.nacks_rcvd; } void SetOutboundRTPStreamStatsFromVoiceSenderInfo( @@ -388,7 +529,7 @@ void SetOutboundRTPStreamStatsFromVoiceSenderInfo( outbound_audio->kind = "audio"; if (voice_sender_info.codec_payload_type) { outbound_audio->codec_id = RTCCodecStatsIDFromMidDirectionAndPayload( - mid, false, *voice_sender_info.codec_payload_type); + mid, /*inbound=*/false, *voice_sender_info.codec_payload_type); } // |fir_count|, |pli_count| and |sli_count| are only valid for video and are // purposefully left undefined for audio. @@ -397,7 +538,6 @@ void SetOutboundRTPStreamStatsFromVoiceSenderInfo( void SetOutboundRTPStreamStatsFromVideoSenderInfo( const std::string& mid, const cricket::VideoSenderInfo& video_sender_info, - bool enable_simulcast_stats, RTCOutboundRTPStreamStats* outbound_video) { SetOutboundRTPStreamStatsFromMediaSenderInfo(video_sender_info, outbound_video); @@ -405,14 +545,12 @@ void SetOutboundRTPStreamStatsFromVideoSenderInfo( outbound_video->kind = "video"; if (video_sender_info.codec_payload_type) { outbound_video->codec_id = RTCCodecStatsIDFromMidDirectionAndPayload( - mid, false, *video_sender_info.codec_payload_type); + mid, /*inbound=*/false, *video_sender_info.codec_payload_type); } outbound_video->fir_count = static_cast(video_sender_info.firs_rcvd); outbound_video->pli_count = static_cast(video_sender_info.plis_rcvd); - outbound_video->nack_count = - static_cast(video_sender_info.nacks_rcvd); if (video_sender_info.qp_sum) outbound_video->qp_sum = *video_sender_info.qp_sum; outbound_video->frames_encoded = video_sender_info.frames_encoded; @@ -422,27 +560,28 @@ void SetOutboundRTPStreamStatsFromVideoSenderInfo( rtc::kNumMillisecsPerSec; outbound_video->total_encoded_bytes_target = video_sender_info.total_encoded_bytes_target; - if (enable_simulcast_stats) { - if (video_sender_info.send_frame_width > 0) { - outbound_video->frame_width = - static_cast(video_sender_info.send_frame_width); - } - if (video_sender_info.send_frame_height > 0) { - outbound_video->frame_height = - static_cast(video_sender_info.send_frame_height); - } - if (video_sender_info.framerate_sent > 0) { - outbound_video->frames_per_second = video_sender_info.framerate_sent; - } - outbound_video->frames_sent = video_sender_info.frames_sent; - outbound_video->huge_frames_sent = video_sender_info.huge_frames_sent; + if (video_sender_info.send_frame_width > 0) { + outbound_video->frame_width = + static_cast(video_sender_info.send_frame_width); + } + if (video_sender_info.send_frame_height > 0) { + outbound_video->frame_height = + static_cast(video_sender_info.send_frame_height); } + if (video_sender_info.framerate_sent > 0) { + outbound_video->frames_per_second = video_sender_info.framerate_sent; + } + outbound_video->frames_sent = video_sender_info.frames_sent; + outbound_video->huge_frames_sent = video_sender_info.huge_frames_sent; outbound_video->total_packet_send_delay = static_cast(video_sender_info.total_packet_send_delay_ms) / rtc::kNumMillisecsPerSec; outbound_video->quality_limitation_reason = QualityLimitationReasonToRTCQualityLimitationReason( video_sender_info.quality_limitation_reason); + outbound_video->quality_limitation_durations = + QualityLimitationDurationToRTCQualityLimitationDuration( + video_sender_info.quality_limitation_durations_ms); outbound_video->quality_limitation_resolution_changes = video_sender_info.quality_limitation_resolution_changes; // TODO(https://crbug.com/webrtc/10529): When info's |content_info| is @@ -462,7 +601,7 @@ std::unique_ptr ProduceRemoteInboundRtpStreamStatsFromReportBlockData( const ReportBlockData& report_block_data, cricket::MediaType media_type, - std::map outbound_rtps, + const std::map& outbound_rtps, const RTCStatsReport& report) { const auto& report_block = report_block_data.report_block(); // RTCStats' timestamp generally refers to when the metric was sampled, but @@ -476,12 +615,19 @@ ProduceRemoteInboundRtpStreamStatsFromReportBlockData( remote_inbound->kind = media_type == cricket::MEDIA_TYPE_AUDIO ? "audio" : "video"; remote_inbound->packets_lost = report_block.packets_lost; + remote_inbound->fraction_lost = + static_cast(report_block.fraction_lost) / (1 << 8); remote_inbound->round_trip_time = static_cast(report_block_data.last_rtt_ms()) / rtc::kNumMillisecsPerSec; + remote_inbound->total_round_trip_time = + static_cast(report_block_data.sum_rtt_ms()) / + rtc::kNumMillisecsPerSec; + remote_inbound->round_trip_time_measurements = + report_block_data.num_rtts(); - std::string local_id = RTCOutboundRTPStreamStatsIDFromSSRC( - media_type == cricket::MEDIA_TYPE_AUDIO, report_block.source_ssrc); + std::string local_id = + RTCOutboundRTPStreamStatsIDFromSSRC(media_type, report_block.source_ssrc); // Look up local stat from |outbound_rtps| where the pointers are non-const. auto local_id_it = outbound_rtps.find(local_id); if (local_id_it != outbound_rtps.end()) { @@ -582,6 +728,7 @@ const std::string& ProduceIceCandidateStats(int64_t timestamp_us, RTC_DCHECK_EQ(rtc::ADAPTER_TYPE_UNKNOWN, candidate.network_type()); } candidate_stats->ip = candidate.address().ipaddr().ToString(); + candidate_stats->address = candidate.address().ipaddr().ToString(); candidate_stats->port = static_cast(candidate.address().port()); candidate_stats->protocol = candidate.protocol(); candidate_stats->candidate_type = @@ -596,10 +743,22 @@ const std::string& ProduceIceCandidateStats(int64_t timestamp_us, return stats->id(); } +template +void SetAudioProcessingStats(StatsType* stats, + const AudioProcessingStats& apm_stats) { + if (apm_stats.echo_return_loss) { + stats->echo_return_loss = *apm_stats.echo_return_loss; + } + if (apm_stats.echo_return_loss_enhancement) { + stats->echo_return_loss_enhancement = + *apm_stats.echo_return_loss_enhancement; + } +} + std::unique_ptr ProduceMediaStreamTrackStatsFromVoiceSenderInfo( int64_t timestamp_us, - const AudioTrackInterface& audio_track, + AudioTrackInterface& audio_track, const cricket::VoiceSenderInfo& voice_sender_info, int attachment_id) { std::unique_ptr audio_track_stats( @@ -614,13 +773,17 @@ ProduceMediaStreamTrackStatsFromVoiceSenderInfo( attachment_id); audio_track_stats->remote_source = false; audio_track_stats->detached = false; - if (voice_sender_info.apm_statistics.echo_return_loss) { - audio_track_stats->echo_return_loss = - *voice_sender_info.apm_statistics.echo_return_loss; - } - if (voice_sender_info.apm_statistics.echo_return_loss_enhancement) { - audio_track_stats->echo_return_loss_enhancement = - *voice_sender_info.apm_statistics.echo_return_loss_enhancement; + // Audio processor may be attached to either the track or the send + // stream, so look in both places. + SetAudioProcessingStats(audio_track_stats.get(), + voice_sender_info.apm_statistics); + auto audio_processor(audio_track.GetAudioProcessor()); + if (audio_processor.get()) { + // The |has_remote_tracks| argument is obsolete; makes no difference if it's + // set to true or false. + AudioProcessorInterface::AudioProcessorStatistics ap_stats = + audio_processor->GetStats(/*has_remote_tracks=*/false); + SetAudioProcessingStats(audio_track_stats.get(), ap_stats.apm_statistics); } return audio_track_stats; } @@ -964,8 +1127,7 @@ RTCStatsCollector::RequestInfo::RequestInfo( rtc::scoped_refptr RTCStatsCollector::Create( PeerConnectionInternal* pc, int64_t cache_lifetime_us) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(pc, cache_lifetime_us)); + return rtc::make_ref_counted(pc, cache_lifetime_us); } RTCStatsCollector::RTCStatsCollector(PeerConnectionInternal* pc, @@ -985,9 +1147,8 @@ RTCStatsCollector::RTCStatsCollector(PeerConnectionInternal* pc, RTC_DCHECK(worker_thread_); RTC_DCHECK(network_thread_); RTC_DCHECK_GE(cache_lifetime_us_, 0); - pc_->SignalDataChannelCreated().connect( - this, &RTCStatsCollector::OnDataChannelCreated); - enable_simulcast_stats_ = pc_->GetConfiguration().enable_simulcast_stats; + pc_->SignalSctpDataChannelCreated().connect( + this, &RTCStatsCollector::OnSctpDataChannelCreated); } RTCStatsCollector::~RTCStatsCollector() { @@ -1013,7 +1174,7 @@ void RTCStatsCollector::GetStatsReport( void RTCStatsCollector::GetStatsReportInternal( RTCStatsCollector::RequestInfo request) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); requests_.push_back(std::move(request)); // "Now" using a monotonically increasing timer. @@ -1025,9 +1186,30 @@ void RTCStatsCollector::GetStatsReportInternal( // reentrancy problems. std::vector requests; requests.swap(requests_); - signaling_thread_->PostTask( - RTC_FROM_HERE, rtc::Bind(&RTCStatsCollector::DeliverCachedReport, this, - cached_report_, std::move(requests))); + + // Task subclass to take ownership of the requests. + // TODO(nisse): Delete when we can use C++14, and do lambda capture with + // std::move. + class DeliveryTask : public QueuedTask { + public: + DeliveryTask(rtc::scoped_refptr collector, + rtc::scoped_refptr cached_report, + std::vector requests) + : collector_(collector), + cached_report_(cached_report), + requests_(std::move(requests)) {} + bool Run() override { + collector_->DeliverCachedReport(cached_report_, std::move(requests_)); + return true; + } + + private: + rtc::scoped_refptr collector_; + rtc::scoped_refptr cached_report_; + std::vector requests_; + }; + signaling_thread_->PostTask(std::make_unique( + this, cached_report_, std::move(requests))); } else if (!num_pending_partial_reports_) { // Only start gathering stats if we're not already gathering stats. In the // case of already gathering stats, |callback_| will be invoked when there @@ -1041,39 +1223,33 @@ void RTCStatsCollector::GetStatsReportInternal( num_pending_partial_reports_ = 2; partial_report_timestamp_us_ = cache_now_us; - // Prepare |transceiver_stats_infos_| for use in + // Prepare |transceiver_stats_infos_| and |call_stats_| for use in // |ProducePartialResultsOnNetworkThread| and // |ProducePartialResultsOnSignalingThread|. - transceiver_stats_infos_ = PrepareTransceiverStatsInfos_s(); - // Prepare |transport_names_| for use in - // |ProducePartialResultsOnNetworkThread|. - transport_names_ = PrepareTransportNames_s(); - - // Prepare |call_stats_| here since GetCallStats() will hop to the worker - // thread. - // TODO(holmer): To avoid the hop we could move BWE and BWE stats to the - // network thread, where it more naturally belongs. - call_stats_ = pc_->GetCallStats(); - + PrepareTransceiverStatsInfosAndCallStats_s_w_n(); // Don't touch |network_report_| on the signaling thread until // ProducePartialResultsOnNetworkThread() has signaled the // |network_report_event_|. network_report_event_.Reset(); + rtc::scoped_refptr collector(this); network_thread_->PostTask( RTC_FROM_HERE, - rtc::Bind(&RTCStatsCollector::ProducePartialResultsOnNetworkThread, - this, timestamp_us)); + [collector, sctp_transport_name = pc_->sctp_transport_name(), + timestamp_us]() mutable { + collector->ProducePartialResultsOnNetworkThread( + timestamp_us, std::move(sctp_transport_name)); + }); ProducePartialResultsOnSignalingThread(timestamp_us); } } void RTCStatsCollector::ClearCachedStatsReport() { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); cached_report_ = nullptr; } void RTCStatsCollector::WaitForPendingRequest() { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); // If a request is pending, blocks until the |network_report_event_| is // signaled and then delivers the result. Otherwise this is a NO-OP. MergeNetworkReport_s(); @@ -1081,7 +1257,9 @@ void RTCStatsCollector::WaitForPendingRequest() { void RTCStatsCollector::ProducePartialResultsOnSignalingThread( int64_t timestamp_us) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + partial_report_ = RTCStatsReport::Create(timestamp_us); ProducePartialResultsOnSignalingThreadImpl(timestamp_us, @@ -1098,7 +1276,9 @@ void RTCStatsCollector::ProducePartialResultsOnSignalingThread( void RTCStatsCollector::ProducePartialResultsOnSignalingThreadImpl( int64_t timestamp_us, RTCStatsReport* partial_report) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + ProduceDataChannelStats_s(timestamp_us, partial_report); ProduceMediaStreamStats_s(timestamp_us, partial_report); ProduceMediaStreamTrackStats_s(timestamp_us, partial_report); @@ -1107,14 +1287,29 @@ void RTCStatsCollector::ProducePartialResultsOnSignalingThreadImpl( } void RTCStatsCollector::ProducePartialResultsOnNetworkThread( - int64_t timestamp_us) { - RTC_DCHECK(network_thread_->IsCurrent()); + int64_t timestamp_us, + absl::optional sctp_transport_name) { + TRACE_EVENT0("webrtc", + "RTCStatsCollector::ProducePartialResultsOnNetworkThread"); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + // Touching |network_report_| on this thread is safe by this method because // |network_report_event_| is reset before this method is invoked. network_report_ = RTCStatsReport::Create(timestamp_us); + std::set transport_names; + if (sctp_transport_name) { + transport_names.emplace(std::move(*sctp_transport_name)); + } + + for (const auto& info : transceiver_stats_infos_) { + if (info.transport_name) + transport_names.insert(*info.transport_name); + } + std::map transport_stats_by_name = - pc_->GetTransportStatsByNames(transport_names_); + pc_->GetTransportStatsByNames(transport_names); std::map transport_cert_stats = PrepareTransportCertificateStats_n(transport_stats_by_name); @@ -1125,8 +1320,9 @@ void RTCStatsCollector::ProducePartialResultsOnNetworkThread( // Signal that it is now safe to touch |network_report_| on the signaling // thread, and post a task to merge it into the final results. network_report_event_.Set(); + rtc::scoped_refptr collector(this); signaling_thread_->PostTask( - RTC_FROM_HERE, rtc::Bind(&RTCStatsCollector::MergeNetworkReport_s, this)); + RTC_FROM_HERE, [collector] { collector->MergeNetworkReport_s(); }); } void RTCStatsCollector::ProducePartialResultsOnNetworkThreadImpl( @@ -1135,7 +1331,9 @@ void RTCStatsCollector::ProducePartialResultsOnNetworkThreadImpl( transport_stats_by_name, const std::map& transport_cert_stats, RTCStatsReport* partial_report) { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + ProduceCertificateStats_n(timestamp_us, transport_cert_stats, partial_report); ProduceCodecStats_n(timestamp_us, transceiver_stats_infos_, partial_report); ProduceIceCandidateAndPairStats_n(timestamp_us, transport_stats_by_name, @@ -1147,7 +1345,7 @@ void RTCStatsCollector::ProducePartialResultsOnNetworkThreadImpl( } void RTCStatsCollector::MergeNetworkReport_s() { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); // The |network_report_event_| must be signaled for it to be safe to touch // |network_report_|. This is normally not blocking, but if // WaitForPendingRequest() is called while a request is pending, we might have @@ -1190,7 +1388,7 @@ void RTCStatsCollector::MergeNetworkReport_s() { void RTCStatsCollector::DeliverCachedReport( rtc::scoped_refptr cached_report, std::vector requests) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); RTC_DCHECK(!requests.empty()); RTC_DCHECK(cached_report); @@ -1221,7 +1419,9 @@ void RTCStatsCollector::ProduceCertificateStats_n( int64_t timestamp_us, const std::map& transport_cert_stats, RTCStatsReport* report) const { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const auto& transport_cert_stats_pair : transport_cert_stats) { if (transport_cert_stats_pair.second.local) { ProduceCertificateStatsFromSSLCertificateStats( @@ -1238,11 +1438,16 @@ void RTCStatsCollector::ProduceCodecStats_n( int64_t timestamp_us, const std::vector& transceiver_stats_infos, RTCStatsReport* report) const { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const auto& stats : transceiver_stats_infos) { if (!stats.mid) { continue; } + std::string transport_id = RTCTransportStatsIDFromTransportChannel( + *stats.transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP); + const cricket::VoiceMediaInfo* voice_media_info = stats.track_media_info_map->voice_media_info(); const cricket::VideoMediaInfo* video_media_info = @@ -1252,12 +1457,12 @@ void RTCStatsCollector::ProduceCodecStats_n( // Inbound for (const auto& pair : voice_media_info->receive_codecs) { report->AddStats(CodecStatsFromRtpCodecParameters( - timestamp_us, *stats.mid, true, pair.second)); + timestamp_us, *stats.mid, transport_id, true, pair.second)); } // Outbound for (const auto& pair : voice_media_info->send_codecs) { report->AddStats(CodecStatsFromRtpCodecParameters( - timestamp_us, *stats.mid, false, pair.second)); + timestamp_us, *stats.mid, transport_id, false, pair.second)); } } // Video @@ -1265,12 +1470,12 @@ void RTCStatsCollector::ProduceCodecStats_n( // Inbound for (const auto& pair : video_media_info->receive_codecs) { report->AddStats(CodecStatsFromRtpCodecParameters( - timestamp_us, *stats.mid, true, pair.second)); + timestamp_us, *stats.mid, transport_id, true, pair.second)); } // Outbound for (const auto& pair : video_media_info->send_codecs) { report->AddStats(CodecStatsFromRtpCodecParameters( - timestamp_us, *stats.mid, false, pair.second)); + timestamp_us, *stats.mid, transport_id, false, pair.second)); } } } @@ -1279,22 +1484,22 @@ void RTCStatsCollector::ProduceCodecStats_n( void RTCStatsCollector::ProduceDataChannelStats_s( int64_t timestamp_us, RTCStatsReport* report) const { - RTC_DCHECK(signaling_thread_->IsCurrent()); - for (const rtc::scoped_refptr& data_channel : - pc_->sctp_data_channels()) { + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + std::vector data_stats = pc_->GetDataChannelStats(); + for (const auto& stats : data_stats) { std::unique_ptr data_channel_stats( new RTCDataChannelStats( - "RTCDataChannel_" + rtc::ToString(data_channel->internal_id()), + "RTCDataChannel_" + rtc::ToString(stats.internal_id), timestamp_us)); - data_channel_stats->label = data_channel->label(); - data_channel_stats->protocol = data_channel->protocol(); - data_channel_stats->datachannelid = data_channel->id(); - data_channel_stats->state = - DataStateToRTCDataChannelState(data_channel->state()); - data_channel_stats->messages_sent = data_channel->messages_sent(); - data_channel_stats->bytes_sent = data_channel->bytes_sent(); - data_channel_stats->messages_received = data_channel->messages_received(); - data_channel_stats->bytes_received = data_channel->bytes_received(); + data_channel_stats->label = std::move(stats.label); + data_channel_stats->protocol = std::move(stats.protocol); + data_channel_stats->data_channel_identifier = stats.id; + data_channel_stats->state = DataStateToRTCDataChannelState(stats.state); + data_channel_stats->messages_sent = stats.messages_sent; + data_channel_stats->bytes_sent = stats.bytes_sent; + data_channel_stats->messages_received = stats.messages_received; + data_channel_stats->bytes_received = stats.bytes_received; report->AddStats(std::move(data_channel_stats)); } } @@ -1305,7 +1510,9 @@ void RTCStatsCollector::ProduceIceCandidateAndPairStats_n( transport_stats_by_name, const Call::Stats& call_stats, RTCStatsReport* report) const { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const auto& entry : transport_stats_by_name) { const std::string& transport_name = entry.first; const cricket::TransportStats& transport_stats = entry.second; @@ -1385,7 +1592,8 @@ void RTCStatsCollector::ProduceIceCandidateAndPairStats_n( void RTCStatsCollector::ProduceMediaStreamStats_s( int64_t timestamp_us, RTCStatsReport* report) const { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; std::map> track_ids; @@ -1421,7 +1629,9 @@ void RTCStatsCollector::ProduceMediaStreamStats_s( void RTCStatsCollector::ProduceMediaStreamTrackStats_s( int64_t timestamp_us, RTCStatsReport* report) const { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const RtpTransceiverStatsInfo& stats : transceiver_stats_infos_) { std::vector> senders; for (const auto& sender : stats.transceiver->senders()) { @@ -1442,7 +1652,9 @@ void RTCStatsCollector::ProduceMediaStreamTrackStats_s( void RTCStatsCollector::ProduceMediaSourceStats_s( int64_t timestamp_us, RTCStatsReport* report) const { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const RtpTransceiverStatsInfo& transceiver_stats_info : transceiver_stats_infos_) { const auto& track_media_info_map = @@ -1461,6 +1673,8 @@ void RTCStatsCollector::ProduceMediaSourceStats_s( // create separate media source stats objects on a per-attachment basis. std::unique_ptr media_source_stats; if (track->kind() == MediaStreamTrackInterface::kAudioKind) { + AudioTrackInterface* audio_track = + static_cast(track.get()); auto audio_source_stats = std::make_unique( RTCMediaSourceStatsIDFromKindAndAttachment( cricket::MEDIA_TYPE_AUDIO, sender_internal->AttachmentId()), @@ -1481,8 +1695,21 @@ void RTCStatsCollector::ProduceMediaSourceStats_s( voice_sender_info->total_input_energy; audio_source_stats->total_samples_duration = voice_sender_info->total_input_duration; + SetAudioProcessingStats(audio_source_stats.get(), + voice_sender_info->apm_statistics); } } + // Audio processor may be attached to either the track or the send + // stream, so look in both places. + auto audio_processor(audio_track->GetAudioProcessor()); + if (audio_processor.get()) { + // The |has_remote_tracks| argument is obsolete; makes no difference + // if it's set to true or false. + AudioProcessorInterface::AudioProcessorStatistics ap_stats = + audio_processor->GetStats(/*has_remote_tracks=*/false); + SetAudioProcessingStats(audio_source_stats.get(), + ap_stats.apm_statistics); + } media_source_stats = std::move(audio_source_stats); } else { RTC_DCHECK_EQ(MediaStreamTrackInterface::kVideoKind, track->kind()); @@ -1509,6 +1736,7 @@ void RTCStatsCollector::ProduceMediaSourceStats_s( if (video_sender_info) { video_source_stats->frames_per_second = video_sender_info->framerate_input; + video_source_stats->frames = video_sender_info->frames; } } media_source_stats = std::move(video_source_stats); @@ -1523,7 +1751,9 @@ void RTCStatsCollector::ProduceMediaSourceStats_s( void RTCStatsCollector::ProducePeerConnectionStats_s( int64_t timestamp_us, RTCStatsReport* report) const { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + std::unique_ptr stats( new RTCPeerConnectionStats("RTCPeerConnection", timestamp_us)); stats->data_channels_opened = internal_record_.data_channels_opened; @@ -1535,7 +1765,8 @@ void RTCStatsCollector::ProduceRTPStreamStats_n( int64_t timestamp_us, const std::vector& transceiver_stats_infos, RTCStatsReport* report) const { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; for (const RtpTransceiverStatsInfo& stats : transceiver_stats_infos) { if (stats.media_type == cricket::MEDIA_TYPE_AUDIO) { @@ -1552,6 +1783,9 @@ void RTCStatsCollector::ProduceAudioRTPStreamStats_n( int64_t timestamp_us, const RtpTransceiverStatsInfo& stats, RTCStatsReport* report) const { + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + if (!stats.mid || !stats.transport_name) { return; } @@ -1561,16 +1795,16 @@ void RTCStatsCollector::ProduceAudioRTPStreamStats_n( std::string mid = *stats.mid; std::string transport_id = RTCTransportStatsIDFromTransportChannel( *stats.transport_name, cricket::ICE_CANDIDATE_COMPONENT_RTP); - // Inbound + // Inbound and remote-outbound. + // The remote-outbound stats are based on RTCP sender reports sent from the + // remote endpoint providing metrics about the remote outbound streams. for (const cricket::VoiceReceiverInfo& voice_receiver_info : track_media_info_map.voice_media_info()->receivers) { if (!voice_receiver_info.connected()) continue; - auto inbound_audio = std::make_unique( - RTCInboundRTPStreamStatsIDFromSSRC(true, voice_receiver_info.ssrc()), - timestamp_us); - SetInboundRTPStreamStatsFromVoiceReceiverInfo(mid, voice_receiver_info, - inbound_audio.get()); + // Inbound. + auto inbound_audio = + CreateInboundAudioStreamStats(voice_receiver_info, mid, timestamp_us); // TODO(hta): This lookup should look for the sender, not the track. rtc::scoped_refptr audio_track = track_media_info_map.GetAudioTrack(voice_receiver_info); @@ -1581,16 +1815,27 @@ void RTCStatsCollector::ProduceAudioRTPStreamStats_n( track_media_info_map.GetAttachmentIdByTrack(audio_track).value()); } inbound_audio->transport_id = transport_id; + // Remote-outbound. + auto remote_outbound_audio = CreateRemoteOutboundAudioStreamStats( + voice_receiver_info, mid, inbound_audio->id(), transport_id); + // Add stats. + if (remote_outbound_audio) { + // When the remote outbound stats are available, the remote ID for the + // local inbound stats is set. + inbound_audio->remote_id = remote_outbound_audio->id(); + report->AddStats(std::move(remote_outbound_audio)); + } report->AddStats(std::move(inbound_audio)); } - // Outbound + // Outbound. std::map audio_outbound_rtps; for (const cricket::VoiceSenderInfo& voice_sender_info : track_media_info_map.voice_media_info()->senders) { if (!voice_sender_info.connected()) continue; auto outbound_audio = std::make_unique( - RTCOutboundRTPStreamStatsIDFromSSRC(true, voice_sender_info.ssrc()), + RTCOutboundRTPStreamStatsIDFromSSRC(cricket::MEDIA_TYPE_AUDIO, + voice_sender_info.ssrc()), timestamp_us); SetOutboundRTPStreamStatsFromVoiceSenderInfo(mid, voice_sender_info, outbound_audio.get()); @@ -1611,7 +1856,7 @@ void RTCStatsCollector::ProduceAudioRTPStreamStats_n( std::make_pair(outbound_audio->id(), outbound_audio.get())); report->AddStats(std::move(outbound_audio)); } - // Remote-inbound + // Remote-inbound. // These are Report Block-based, information sent from the remote endpoint, // providing metrics about our Outbound streams. We take advantage of the fact // that RTCOutboundRtpStreamStats, RTCCodecStats and RTCTransport have already @@ -1620,8 +1865,8 @@ void RTCStatsCollector::ProduceAudioRTPStreamStats_n( track_media_info_map.voice_media_info()->senders) { for (const auto& report_block_data : voice_sender_info.report_block_datas) { report->AddStats(ProduceRemoteInboundRtpStreamStatsFromReportBlockData( - report_block_data, cricket::MEDIA_TYPE_AUDIO, - std::move(audio_outbound_rtps), *report)); + report_block_data, cricket::MEDIA_TYPE_AUDIO, audio_outbound_rtps, + *report)); } } } @@ -1630,6 +1875,9 @@ void RTCStatsCollector::ProduceVideoRTPStreamStats_n( int64_t timestamp_us, const RtpTransceiverStatsInfo& stats, RTCStatsReport* report) const { + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + if (!stats.mid || !stats.transport_name) { return; } @@ -1645,7 +1893,8 @@ void RTCStatsCollector::ProduceVideoRTPStreamStats_n( if (!video_receiver_info.connected()) continue; auto inbound_video = std::make_unique( - RTCInboundRTPStreamStatsIDFromSSRC(false, video_receiver_info.ssrc()), + RTCInboundRTPStreamStatsIDFromSSRC(cricket::MEDIA_TYPE_VIDEO, + video_receiver_info.ssrc()), timestamp_us); SetInboundRTPStreamStatsFromVideoReceiverInfo(mid, video_receiver_info, inbound_video.get()); @@ -1659,20 +1908,20 @@ void RTCStatsCollector::ProduceVideoRTPStreamStats_n( } inbound_video->transport_id = transport_id; report->AddStats(std::move(inbound_video)); + // TODO(crbug.com/webrtc/12529): Add remote-outbound stats. } // Outbound std::map video_outbound_rtps; for (const cricket::VideoSenderInfo& video_sender_info : - enable_simulcast_stats_ - ? track_media_info_map.video_media_info()->senders - : track_media_info_map.video_media_info()->aggregated_senders) { + track_media_info_map.video_media_info()->senders) { if (!video_sender_info.connected()) continue; auto outbound_video = std::make_unique( - RTCOutboundRTPStreamStatsIDFromSSRC(false, video_sender_info.ssrc()), + RTCOutboundRTPStreamStatsIDFromSSRC(cricket::MEDIA_TYPE_VIDEO, + video_sender_info.ssrc()), timestamp_us); - SetOutboundRTPStreamStatsFromVideoSenderInfo( - mid, video_sender_info, enable_simulcast_stats_, outbound_video.get()); + SetOutboundRTPStreamStatsFromVideoSenderInfo(mid, video_sender_info, + outbound_video.get()); rtc::scoped_refptr video_track = track_media_info_map.GetVideoTrack(video_sender_info); if (video_track) { @@ -1699,8 +1948,8 @@ void RTCStatsCollector::ProduceVideoRTPStreamStats_n( track_media_info_map.video_media_info()->senders) { for (const auto& report_block_data : video_sender_info.report_block_datas) { report->AddStats(ProduceRemoteInboundRtpStreamStatsFromReportBlockData( - report_block_data, cricket::MEDIA_TYPE_VIDEO, - std::move(video_outbound_rtps), *report)); + report_block_data, cricket::MEDIA_TYPE_VIDEO, video_outbound_rtps, + *report)); } } } @@ -1711,7 +1960,9 @@ void RTCStatsCollector::ProduceTransportStats_n( transport_stats_by_name, const std::map& transport_cert_stats, RTCStatsReport* report) const { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const auto& entry : transport_stats_by_name) { const std::string& transport_name = entry.first; const cricket::TransportStats& transport_stats = entry.second; @@ -1751,7 +2002,9 @@ void RTCStatsCollector::ProduceTransportStats_n( transport_name, channel_stats.component), timestamp_us)); transport_stats->bytes_sent = 0; + transport_stats->packets_sent = 0; transport_stats->bytes_received = 0; + transport_stats->packets_received = 0; transport_stats->dtls_state = DtlsTransportStateToRTCDtlsTransportState(channel_stats.dtls_state); transport_stats->selected_candidate_pair_changes = @@ -1759,7 +2012,10 @@ void RTCStatsCollector::ProduceTransportStats_n( for (const cricket::ConnectionInfo& info : channel_stats.ice_transport_stats.connection_infos) { *transport_stats->bytes_sent += info.sent_total_bytes; + *transport_stats->packets_sent += + info.sent_total_packets - info.sent_discarded_packets; *transport_stats->bytes_received += info.recv_total_bytes; + *transport_stats->packets_received += info.packets_received; if (info.best_connection) { transport_stats->selected_candidate_pair_id = RTCIceCandidatePairStatsIDFromConnectionInfo(info); @@ -1802,7 +2058,9 @@ std::map RTCStatsCollector::PrepareTransportCertificateStats_n( const std::map& transport_stats_by_name) const { - RTC_DCHECK(network_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + std::map transport_cert_stats; for (const auto& entry : transport_stats_by_name) { const std::string& transport_name = entry.first; @@ -1826,10 +2084,10 @@ RTCStatsCollector::PrepareTransportCertificateStats_n( return transport_cert_stats; } -std::vector -RTCStatsCollector::PrepareTransceiverStatsInfos_s() const { - std::vector transceiver_stats_infos; +void RTCStatsCollector::PrepareTransceiverStatsInfosAndCallStats_s_w_n() { + RTC_DCHECK_RUN_ON(signaling_thread_); + transceiver_stats_infos_.clear(); // These are used to invoke GetStats for all the media channels together in // one worker thread hop. std::map> video_stats; - for (const auto& transceiver : pc_->GetTransceiversInternal()) { - cricket::MediaType media_type = transceiver->media_type(); + auto transceivers = pc_->GetTransceiversInternal(); - // Prepare stats entry. The TrackMediaInfoMap will be filled in after the - // stats have been fetched on the worker thread. - transceiver_stats_infos.emplace_back(); - RtpTransceiverStatsInfo& stats = transceiver_stats_infos.back(); - stats.transceiver = transceiver->internal(); - stats.media_type = media_type; + // TODO(tommi): See if we can avoid synchronously blocking the signaling + // thread while we do this (or avoid the Invoke at all). + network_thread_->Invoke(RTC_FROM_HERE, [this, &transceivers, + &voice_stats, &video_stats] { + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; - cricket::ChannelInterface* channel = transceiver->internal()->channel(); - if (!channel) { - // The remaining fields require a BaseChannel. - continue; - } + for (const auto& transceiver_proxy : transceivers) { + RtpTransceiver* transceiver = transceiver_proxy->internal(); + cricket::MediaType media_type = transceiver->media_type(); - stats.mid = channel->content_name(); - stats.transport_name = channel->transport_name(); - - if (media_type == cricket::MEDIA_TYPE_AUDIO) { - auto* voice_channel = static_cast(channel); - RTC_DCHECK(voice_stats.find(voice_channel->media_channel()) == - voice_stats.end()); - voice_stats[voice_channel->media_channel()] = - std::make_unique(); - } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { - auto* video_channel = static_cast(channel); - RTC_DCHECK(video_stats.find(video_channel->media_channel()) == - video_stats.end()); - video_stats[video_channel->media_channel()] = - std::make_unique(); - } else { - RTC_NOTREACHED(); + // Prepare stats entry. The TrackMediaInfoMap will be filled in after the + // stats have been fetched on the worker thread. + transceiver_stats_infos_.emplace_back(); + RtpTransceiverStatsInfo& stats = transceiver_stats_infos_.back(); + stats.transceiver = transceiver; + stats.media_type = media_type; + + cricket::ChannelInterface* channel = transceiver->channel(); + if (!channel) { + // The remaining fields require a BaseChannel. + continue; + } + + stats.mid = channel->content_name(); + stats.transport_name = channel->transport_name(); + + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + auto* voice_channel = static_cast(channel); + RTC_DCHECK(voice_stats.find(voice_channel->media_channel()) == + voice_stats.end()); + voice_stats[voice_channel->media_channel()] = + std::make_unique(); + } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { + auto* video_channel = static_cast(channel); + RTC_DCHECK(video_stats.find(video_channel->media_channel()) == + video_stats.end()); + video_stats[video_channel->media_channel()] = + std::make_unique(); + } else { + RTC_NOTREACHED(); + } } - } + }); - // Call GetStats for all media channels together on the worker thread in one - // hop. + // We jump to the worker thread and call GetStats() on each media channel as + // well as GetCallStats(). At the same time we construct the + // TrackMediaInfoMaps, which also needs info from the worker thread. This + // minimizes the number of thread jumps. worker_thread_->Invoke(RTC_FROM_HERE, [&] { + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + for (const auto& entry : voice_stats) { - if (!entry.first->GetStats(entry.second.get())) { + if (!entry.first->GetStats(entry.second.get(), + /*get_and_clear_legacy_stats=*/false)) { RTC_LOG(LS_WARNING) << "Failed to get voice stats."; } } @@ -1888,69 +2161,52 @@ RTCStatsCollector::PrepareTransceiverStatsInfos_s() const { RTC_LOG(LS_WARNING) << "Failed to get video stats."; } } - }); - // Create the TrackMediaInfoMap for each transceiver stats object. - for (auto& stats : transceiver_stats_infos) { - auto transceiver = stats.transceiver; - std::unique_ptr voice_media_info; - std::unique_ptr video_media_info; - if (transceiver->channel()) { - cricket::MediaType media_type = transceiver->media_type(); - if (media_type == cricket::MEDIA_TYPE_AUDIO) { - auto* voice_channel = - static_cast(transceiver->channel()); - RTC_DCHECK(voice_stats[voice_channel->media_channel()]); - voice_media_info = - std::move(voice_stats[voice_channel->media_channel()]); - } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { - auto* video_channel = - static_cast(transceiver->channel()); - RTC_DCHECK(video_stats[video_channel->media_channel()]); - video_media_info = - std::move(video_stats[video_channel->media_channel()]); + // Create the TrackMediaInfoMap for each transceiver stats object. + for (auto& stats : transceiver_stats_infos_) { + auto transceiver = stats.transceiver; + std::unique_ptr voice_media_info; + std::unique_ptr video_media_info; + if (transceiver->channel()) { + cricket::MediaType media_type = transceiver->media_type(); + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + auto* voice_channel = + static_cast(transceiver->channel()); + RTC_DCHECK(voice_stats[voice_channel->media_channel()]); + voice_media_info = + std::move(voice_stats[voice_channel->media_channel()]); + } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { + auto* video_channel = + static_cast(transceiver->channel()); + RTC_DCHECK(video_stats[video_channel->media_channel()]); + video_media_info = + std::move(video_stats[video_channel->media_channel()]); + } } + std::vector> senders; + for (const auto& sender : transceiver->senders()) { + senders.push_back(sender->internal()); + } + std::vector> receivers; + for (const auto& receiver : transceiver->receivers()) { + receivers.push_back(receiver->internal()); + } + stats.track_media_info_map = std::make_unique( + std::move(voice_media_info), std::move(video_media_info), senders, + receivers); } - std::vector> senders; - for (const auto& sender : transceiver->senders()) { - senders.push_back(sender->internal()); - } - std::vector> receivers; - for (const auto& receiver : transceiver->receivers()) { - receivers.push_back(receiver->internal()); - } - stats.track_media_info_map = std::make_unique( - std::move(voice_media_info), std::move(video_media_info), senders, - receivers); - } - - return transceiver_stats_infos; -} -std::set RTCStatsCollector::PrepareTransportNames_s() const { - std::set transport_names; - for (const auto& transceiver : pc_->GetTransceiversInternal()) { - if (transceiver->internal()->channel()) { - transport_names.insert( - transceiver->internal()->channel()->transport_name()); - } - } - if (pc_->rtp_data_channel()) { - transport_names.insert(pc_->rtp_data_channel()->transport_name()); - } - if (pc_->sctp_transport_name()) { - transport_names.insert(*pc_->sctp_transport_name()); - } - return transport_names; + call_stats_ = pc_->GetCallStats(); + }); } -void RTCStatsCollector::OnDataChannelCreated(DataChannel* channel) { +void RTCStatsCollector::OnSctpDataChannelCreated(SctpDataChannel* channel) { channel->SignalOpened.connect(this, &RTCStatsCollector::OnDataChannelOpened); channel->SignalClosed.connect(this, &RTCStatsCollector::OnDataChannelClosed); } -void RTCStatsCollector::OnDataChannelOpened(DataChannel* channel) { - RTC_DCHECK(signaling_thread_->IsCurrent()); +void RTCStatsCollector::OnDataChannelOpened(DataChannelInterface* channel) { + RTC_DCHECK_RUN_ON(signaling_thread_); bool result = internal_record_.opened_data_channels .insert(reinterpret_cast(channel)) .second; @@ -1958,8 +2214,8 @@ void RTCStatsCollector::OnDataChannelOpened(DataChannel* channel) { RTC_DCHECK(result); } -void RTCStatsCollector::OnDataChannelClosed(DataChannel* channel) { - RTC_DCHECK(signaling_thread_->IsCurrent()); +void RTCStatsCollector::OnDataChannelClosed(DataChannelInterface* channel) { + RTC_DCHECK_RUN_ON(signaling_thread_); // Only channels that have been fully opened (and have increased the // |data_channels_opened_| counter) increase the closed counter. if (internal_record_.opened_data_channels.erase( diff --git a/pc/rtc_stats_collector.h b/pc/rtc_stats_collector.h index 7c85a35fe0..5f13f54d26 100644 --- a/pc/rtc_stats_collector.h +++ b/pc/rtc_stats_collector.h @@ -11,6 +11,7 @@ #ifndef PC_RTC_STATS_COLLECTOR_H_ #define PC_RTC_STATS_COLLECTOR_H_ +#include #include #include #include @@ -18,19 +19,29 @@ #include #include "absl/types/optional.h" +#include "api/data_channel_interface.h" +#include "api/media_types.h" #include "api/scoped_refptr.h" #include "api/stats/rtc_stats_collector_callback.h" #include "api/stats/rtc_stats_report.h" #include "api/stats/rtcstats_objects.h" #include "call/call.h" #include "media/base/media_channel.h" -#include "pc/data_channel.h" +#include "pc/data_channel_utils.h" #include "pc/peer_connection_internal.h" +#include "pc/rtp_receiver.h" +#include "pc/rtp_sender.h" +#include "pc/rtp_transceiver.h" +#include "pc/sctp_data_channel.h" #include "pc/track_media_info_map.h" +#include "pc/transport_stats.h" +#include "rtc_base/checks.h" #include "rtc_base/event.h" #include "rtc_base/ref_count.h" +#include "rtc_base/ssl_certificate.h" #include "rtc_base/ssl_identity.h" #include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" #include "rtc_base/time_utils.h" namespace webrtc { @@ -42,7 +53,7 @@ class RtpReceiverInternal; // Stats are gathered on the signaling, worker and network threads // asynchronously. The callback is invoked on the signaling thread. Resulting // reports are cached for |cache_lifetime_| ms. -class RTCStatsCollector : public virtual rtc::RefCountInterface, +class RTCStatsCollector : public rtc::RefCountInterface, public sigslot::has_slots<> { public: static rtc::scoped_refptr Create( @@ -215,21 +226,23 @@ class RTCStatsCollector : public virtual rtc::RefCountInterface, PrepareTransportCertificateStats_n( const std::map& transport_stats_by_name) const; - std::vector PrepareTransceiverStatsInfos_s() const; - std::set PrepareTransportNames_s() const; + // The results are stored in |transceiver_stats_infos_| and |call_stats_|. + void PrepareTransceiverStatsInfosAndCallStats_s_w_n(); // Stats gathering on a particular thread. void ProducePartialResultsOnSignalingThread(int64_t timestamp_us); - void ProducePartialResultsOnNetworkThread(int64_t timestamp_us); + void ProducePartialResultsOnNetworkThread( + int64_t timestamp_us, + absl::optional sctp_transport_name); // Merges |network_report_| into |partial_report_| and completes the request. // This is a NO-OP if |network_report_| is null. void MergeNetworkReport_s(); // Slots for signals (sigslot) that are wired up to |pc_|. - void OnDataChannelCreated(DataChannel* channel); + void OnSctpDataChannelCreated(SctpDataChannel* channel); // Slots for signals (sigslot) that are wired up to |channel|. - void OnDataChannelOpened(DataChannel* channel); - void OnDataChannelClosed(DataChannel* channel); + void OnDataChannelOpened(DataChannelInterface* channel); + void OnDataChannelClosed(DataChannelInterface* channel); PeerConnectionInternal* const pc_; rtc::Thread* const signaling_thread_; @@ -254,12 +267,16 @@ class RTCStatsCollector : public virtual rtc::RefCountInterface, // has updated the value of |network_report_|. rtc::Event network_report_event_; - // Set in |GetStatsReport|, read in |ProducePartialResultsOnNetworkThread| and - // |ProducePartialResultsOnSignalingThread|, reset after work is complete. Not - // passed as arguments to avoid copies. This is thread safe - when we - // set/reset we know there are no pending stats requests in progress. + // Cleared and set in `PrepareTransceiverStatsInfosAndCallStats_s_w_n`, + // starting out on the signaling thread, then network. Later read on the + // network and signaling threads as part of collecting stats and finally + // reset when the work is done. Initially this variable was added and not + // passed around as an arguments to avoid copies. This is thread safe due to + // how operations are sequenced and we don't start the stats collection + // sequence if one is in progress. As a future improvement though, we could + // now get rid of the variable and keep the data scoped within a stats + // collection sequence. std::vector transceiver_stats_infos_; - std::set transport_names_; Call::Stats call_stats_; @@ -288,7 +305,6 @@ class RTCStatsCollector : public virtual rtc::RefCountInterface, std::set opened_data_channels; }; InternalRecord internal_record_; - bool enable_simulcast_stats_ = false; }; const char* CandidateTypeToRTCIceCandidateTypeForTesting( diff --git a/pc/rtc_stats_collector_unittest.cc b/pc/rtc_stats_collector_unittest.cc index db00dd7d91..2ac0737715 100644 --- a/pc/rtc_stats_collector_unittest.cc +++ b/pc/rtc_stats_collector_unittest.cc @@ -22,6 +22,8 @@ #include "absl/memory/memory.h" #include "absl/strings/str_replace.h" +#include "api/dtls_transport_interface.h" +#include "api/media_stream_track.h" #include "api/rtp_parameters.h" #include "api/stats/rtc_stats_report.h" #include "api/stats/rtcstats_objects.h" @@ -31,7 +33,7 @@ #include "p2p/base/p2p_constants.h" #include "p2p/base/port.h" #include "pc/media_stream.h" -#include "pc/media_stream_track.h" +#include "pc/test/fake_data_channel_provider.h" #include "pc/test/fake_peer_connection_for_stats.h" #include "pc/test/mock_data_channel.h" #include "pc/test/mock_rtp_receiver_internal.h" @@ -43,8 +45,10 @@ #include "rtc_base/gunit.h" #include "rtc_base/logging.h" #include "rtc_base/strings/json.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" +using ::testing::_; using ::testing::AtLeast; using ::testing::Invoke; using ::testing::Return; @@ -116,6 +120,14 @@ namespace { const int64_t kGetStatsReportTimeoutMs = 1000; +// Fake data used by `SetupExampleStatsVoiceGraph()` to fill in remote outbound +// stats. +constexpr int64_t kRemoteOutboundStatsTimestampMs = 123; +constexpr int64_t kRemoteOutboundStatsRemoteTimestampMs = 456; +constexpr uint32_t kRemoteOutboundStatsPacketsSent = 7u; +constexpr uint64_t kRemoteOutboundStatsBytesSent = 8u; +constexpr uint64_t kRemoteOutboundStatsReportsCount = 9u; + struct CertificateInfo { rtc::scoped_refptr certificate; std::vector ders; @@ -188,14 +200,34 @@ std::unique_ptr CreateFakeCandidate( return candidate; } +class FakeAudioProcessor : public AudioProcessorInterface { + public: + FakeAudioProcessor() {} + ~FakeAudioProcessor() {} + + private: + AudioProcessorInterface::AudioProcessorStatistics GetStats( + bool has_recv_streams) override { + AudioProcessorStatistics stats; + stats.apm_statistics.echo_return_loss = 2.0; + stats.apm_statistics.echo_return_loss_enhancement = 3.0; + return stats; + } +}; + class FakeAudioTrackForStats : public MediaStreamTrack { public: static rtc::scoped_refptr Create( const std::string& id, - MediaStreamTrackInterface::TrackState state) { + MediaStreamTrackInterface::TrackState state, + bool create_fake_audio_processor) { rtc::scoped_refptr audio_track_stats( new rtc::RefCountedObject(id)); audio_track_stats->set_state(state); + if (create_fake_audio_processor) { + audio_track_stats->processor_ = + rtc::make_ref_counted(); + } return audio_track_stats; } @@ -210,8 +242,11 @@ class FakeAudioTrackForStats : public MediaStreamTrack { void RemoveSink(webrtc::AudioTrackSinkInterface* sink) override {} bool GetSignalLevel(int* level) override { return false; } rtc::scoped_refptr GetAudioProcessor() override { - return nullptr; + return processor_; } + + private: + rtc::scoped_refptr processor_; }; class FakeVideoTrackSourceForStats : public VideoTrackSourceInterface { @@ -296,9 +331,11 @@ class FakeVideoTrackForStats : public MediaStreamTrack { rtc::scoped_refptr CreateFakeTrack( cricket::MediaType media_type, const std::string& track_id, - MediaStreamTrackInterface::TrackState track_state) { + MediaStreamTrackInterface::TrackState track_state, + bool create_fake_audio_processor = false) { if (media_type == cricket::MEDIA_TYPE_AUDIO) { - return FakeAudioTrackForStats::Create(track_id, track_state); + return FakeAudioTrackForStats::Create(track_id, track_state, + create_fake_audio_processor); } else { RTC_DCHECK_EQ(media_type, cricket::MEDIA_TYPE_VIDEO); return FakeVideoTrackForStats::Create(track_id, track_state, nullptr); @@ -329,6 +366,8 @@ rtc::scoped_refptr CreateMockSender( })); EXPECT_CALL(*sender, AttachmentId()).WillRepeatedly(Return(attachment_id)); EXPECT_CALL(*sender, stream_ids()).WillRepeatedly(Return(local_stream_ids)); + EXPECT_CALL(*sender, SetTransceiverAsStopped()); + EXPECT_CALL(*sender, Stop()); return sender; } @@ -355,6 +394,7 @@ rtc::scoped_refptr CreateMockReceiver( return params; })); EXPECT_CALL(*receiver, AttachmentId()).WillRepeatedly(Return(attachment_id)); + EXPECT_CALL(*receiver, StopAndEndTrack()); return receiver; } @@ -496,6 +536,7 @@ class RTCStatsCollectorWrapper { rtc::scoped_refptr(local_audio_track), voice_sender_info.local_stats[0].ssrc, voice_sender_info.local_stats[0].ssrc + 10, local_stream_ids); + EXPECT_CALL(*rtp_sender, SetMediaChannel(_)); pc_->AddSender(rtp_sender); } @@ -514,6 +555,7 @@ class RTCStatsCollectorWrapper { voice_receiver_info.local_stats[0].ssrc + 10); EXPECT_CALL(*rtp_receiver, streams()) .WillRepeatedly(Return(remote_streams)); + EXPECT_CALL(*rtp_receiver, SetMediaChannel(_)); pc_->AddReceiver(rtp_receiver); } @@ -531,6 +573,7 @@ class RTCStatsCollectorWrapper { rtc::scoped_refptr(local_video_track), video_sender_info.local_stats[0].ssrc, video_sender_info.local_stats[0].ssrc + 10, local_stream_ids); + EXPECT_CALL(*rtp_sender, SetMediaChannel(_)); pc_->AddSender(rtp_sender); } @@ -549,6 +592,7 @@ class RTCStatsCollectorWrapper { video_receiver_info.local_stats[0].ssrc + 10); EXPECT_CALL(*rtp_receiver, streams()) .WillRepeatedly(Return(remote_streams)); + EXPECT_CALL(*rtp_receiver, SetMediaChannel(_)); pc_->AddReceiver(rtp_receiver); } @@ -565,6 +609,11 @@ class RTCStatsCollectorWrapper { EXPECT_TRUE_WAIT(callback->report(), kGetStatsReportTimeoutMs); int64_t after = rtc::TimeUTCMicros(); for (const RTCStats& stats : *callback->report()) { + if (stats.type() == RTCRemoteInboundRtpStreamStats::kType || + stats.type() == RTCRemoteOutboundRtpStreamStats::kType) { + // Ignore remote timestamps. + continue; + } EXPECT_LE(stats.timestamp_us(), after); } return callback->report(); @@ -609,6 +658,7 @@ class RTCStatsCollectorTest : public ::testing::Test { std::string recv_codec_id; std::string outbound_rtp_id; std::string inbound_rtp_id; + std::string remote_outbound_rtp_id; std::string transport_id; std::string sender_track_id; std::string receiver_track_id; @@ -617,9 +667,9 @@ class RTCStatsCollectorTest : public ::testing::Test { std::string media_source_id; }; - // Sets up the example stats graph (see ASCII art below) used for testing the - // stats selection algorithm, - // https://w3c.github.io/webrtc-pc/#dfn-stats-selection-algorithm. + // Sets up the example stats graph (see ASCII art below) for a video only + // call. The graph is used for testing the stats selection algorithm (see + // https://w3c.github.io/webrtc-pc/#dfn-stats-selection-algorithm). // These tests test the integration of the stats traversal algorithm inside of // RTCStatsCollector. See rtcstatstraveral_unittest.cc for more stats // traversal tests. @@ -721,6 +771,125 @@ class RTCStatsCollectorTest : public ::testing::Test { return graph; } + // Sets up an example stats graph (see ASCII art below) for an audio only call + // and checks that the expected stats are generated. + ExampleStatsGraph SetupExampleStatsVoiceGraph( + bool add_remote_outbound_stats) { + constexpr uint32_t kLocalSsrc = 3; + constexpr uint32_t kRemoteSsrc = 4; + ExampleStatsGraph graph; + + // codec (send) + graph.send_codec_id = "RTCCodec_VoiceMid_Outbound_1"; + cricket::VoiceMediaInfo media_info; + RtpCodecParameters send_codec; + send_codec.payload_type = 1; + send_codec.clock_rate = 0; + media_info.send_codecs.insert( + std::make_pair(send_codec.payload_type, send_codec)); + // codec (recv) + graph.recv_codec_id = "RTCCodec_VoiceMid_Inbound_2"; + RtpCodecParameters recv_codec; + recv_codec.payload_type = 2; + recv_codec.clock_rate = 0; + media_info.receive_codecs.insert( + std::make_pair(recv_codec.payload_type, recv_codec)); + // outbound-rtp + graph.outbound_rtp_id = "RTCOutboundRTPAudioStream_3"; + media_info.senders.push_back(cricket::VoiceSenderInfo()); + media_info.senders[0].local_stats.push_back(cricket::SsrcSenderInfo()); + media_info.senders[0].local_stats[0].ssrc = kLocalSsrc; + media_info.senders[0].codec_payload_type = send_codec.payload_type; + // inbound-rtp + graph.inbound_rtp_id = "RTCInboundRTPAudioStream_4"; + media_info.receivers.push_back(cricket::VoiceReceiverInfo()); + media_info.receivers[0].local_stats.push_back(cricket::SsrcReceiverInfo()); + media_info.receivers[0].local_stats[0].ssrc = kRemoteSsrc; + media_info.receivers[0].codec_payload_type = recv_codec.payload_type; + // remote-outbound-rtp + if (add_remote_outbound_stats) { + graph.remote_outbound_rtp_id = "RTCRemoteOutboundRTPAudioStream_4"; + media_info.receivers[0].last_sender_report_timestamp_ms = + kRemoteOutboundStatsTimestampMs; + media_info.receivers[0].last_sender_report_remote_timestamp_ms = + kRemoteOutboundStatsRemoteTimestampMs; + media_info.receivers[0].sender_reports_packets_sent = + kRemoteOutboundStatsPacketsSent; + media_info.receivers[0].sender_reports_bytes_sent = + kRemoteOutboundStatsBytesSent; + media_info.receivers[0].sender_reports_reports_count = + kRemoteOutboundStatsReportsCount; + } + + // transport + graph.transport_id = "RTCTransport_TransportName_1"; + auto* video_media_channel = + pc_->AddVoiceChannel("VoiceMid", "TransportName"); + video_media_channel->SetStats(media_info); + // track (sender) + graph.sender = stats_->SetupLocalTrackAndSender( + cricket::MEDIA_TYPE_AUDIO, "LocalAudioTrackID", kLocalSsrc, false, 50); + graph.sender_track_id = "RTCMediaStreamTrack_sender_" + + rtc::ToString(graph.sender->AttachmentId()); + // track (receiver) and stream (remote stream) + graph.receiver = stats_->SetupRemoteTrackAndReceiver( + cricket::MEDIA_TYPE_AUDIO, "RemoteAudioTrackID", "RemoteStreamId", + kRemoteSsrc); + graph.receiver_track_id = "RTCMediaStreamTrack_receiver_" + + rtc::ToString(graph.receiver->AttachmentId()); + graph.remote_stream_id = "RTCMediaStream_RemoteStreamId"; + // peer-connection + graph.peer_connection_id = "RTCPeerConnection"; + // media-source (kind: video) + graph.media_source_id = + "RTCAudioSource_" + rtc::ToString(graph.sender->AttachmentId()); + + // Expected stats graph: + // + // +--- track (sender) stream (remote stream) ---> track (receiver) + // | ^ ^ + // | | | + // | +--------- outbound-rtp inbound-rtp ---------------+ + // | | | | | | + // | | v v v v + // | | codec (send) transport codec (recv) peer-connection + // v v + // media-source + + // Verify the stats graph is set up correctly. + graph.full_report = stats_->GetStatsReport(); + EXPECT_EQ(graph.full_report->size(), add_remote_outbound_stats ? 11u : 10u); + EXPECT_TRUE(graph.full_report->Get(graph.send_codec_id)); + EXPECT_TRUE(graph.full_report->Get(graph.recv_codec_id)); + EXPECT_TRUE(graph.full_report->Get(graph.outbound_rtp_id)); + EXPECT_TRUE(graph.full_report->Get(graph.inbound_rtp_id)); + EXPECT_TRUE(graph.full_report->Get(graph.transport_id)); + EXPECT_TRUE(graph.full_report->Get(graph.sender_track_id)); + EXPECT_TRUE(graph.full_report->Get(graph.receiver_track_id)); + EXPECT_TRUE(graph.full_report->Get(graph.remote_stream_id)); + EXPECT_TRUE(graph.full_report->Get(graph.peer_connection_id)); + EXPECT_TRUE(graph.full_report->Get(graph.media_source_id)); + // `graph.remote_outbound_rtp_id` is omitted on purpose so that expectations + // can be added by the caller depending on what value it sets for the + // `add_remote_outbound_stats` argument. + const auto& sender_track = graph.full_report->Get(graph.sender_track_id) + ->cast_to(); + EXPECT_EQ(*sender_track.media_source_id, graph.media_source_id); + const auto& outbound_rtp = graph.full_report->Get(graph.outbound_rtp_id) + ->cast_to(); + EXPECT_EQ(*outbound_rtp.media_source_id, graph.media_source_id); + EXPECT_EQ(*outbound_rtp.codec_id, graph.send_codec_id); + EXPECT_EQ(*outbound_rtp.track_id, graph.sender_track_id); + EXPECT_EQ(*outbound_rtp.transport_id, graph.transport_id); + const auto& inbound_rtp = graph.full_report->Get(graph.inbound_rtp_id) + ->cast_to(); + EXPECT_EQ(*inbound_rtp.codec_id, graph.recv_codec_id); + EXPECT_EQ(*inbound_rtp.track_id, graph.receiver_track_id); + EXPECT_EQ(*inbound_rtp.transport_id, graph.transport_id); + + return graph; + } + protected: rtc::ScopedFakeClock fake_clock_; rtc::scoped_refptr pc_; @@ -782,9 +951,14 @@ TEST_F(RTCStatsCollectorTest, ToJsonProducesParseableJson) { ExampleStatsGraph graph = SetupExampleStatsGraphForSelectorTests(); rtc::scoped_refptr report = stats_->GetStatsReport(); std::string json_format = report->ToJson(); - Json::Reader reader; + + Json::CharReaderBuilder builder; Json::Value json_value; - ASSERT_TRUE(reader.parse(json_format, json_value)); + std::unique_ptr reader(builder.newCharReader()); + ASSERT_TRUE(reader->parse(json_format.c_str(), + json_format.c_str() + json_format.size(), + &json_value, nullptr)); + // A very brief sanity check on the result. EXPECT_EQ(report->size(), json_value.size()); } @@ -872,6 +1046,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCCodecStats) { expected_inbound_audio_codec.clock_rate = 1337; expected_inbound_audio_codec.channels = 1; expected_inbound_audio_codec.sdp_fmtp_line = "minptime=10;useinbandfec=1"; + expected_inbound_audio_codec.transport_id = "RTCTransport_TransportName_1"; RTCCodecStats expected_outbound_audio_codec("RTCCodec_AudioMid_Outbound_2", report->timestamp_us()); @@ -879,6 +1054,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCCodecStats) { expected_outbound_audio_codec.mime_type = "audio/isac"; expected_outbound_audio_codec.clock_rate = 1338; expected_outbound_audio_codec.channels = 2; + expected_outbound_audio_codec.transport_id = "RTCTransport_TransportName_1"; RTCCodecStats expected_inbound_video_codec("RTCCodec_VideoMid_Inbound_3", report->timestamp_us()); @@ -887,12 +1063,14 @@ TEST_F(RTCStatsCollectorTest, CollectRTCCodecStats) { expected_inbound_video_codec.clock_rate = 1339; expected_inbound_video_codec.sdp_fmtp_line = "level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42001f"; + expected_inbound_video_codec.transport_id = "RTCTransport_TransportName_1"; RTCCodecStats expected_outbound_video_codec("RTCCodec_VideoMid_Outbound_4", report->timestamp_us()); expected_outbound_video_codec.payload_type = 4; expected_outbound_video_codec.mime_type = "video/VP8"; expected_outbound_video_codec.clock_rate = 1340; + expected_outbound_video_codec.transport_id = "RTCTransport_TransportName_1"; ASSERT_TRUE(report->Get(expected_inbound_audio_codec.id())); EXPECT_EQ( @@ -975,9 +1153,9 @@ TEST_F(RTCStatsCollectorTest, CollectRTCCertificateStatsChain) { TEST_F(RTCStatsCollectorTest, CollectTwoRTCDataChannelStatsWithPendingId) { pc_->AddSctpDataChannel( - new MockDataChannel(/*id=*/-1, DataChannelInterface::kConnecting)); + new MockSctpDataChannel(/*id=*/-1, DataChannelInterface::kConnecting)); pc_->AddSctpDataChannel( - new MockDataChannel(/*id=*/-1, DataChannelInterface::kConnecting)); + new MockSctpDataChannel(/*id=*/-1, DataChannelInterface::kConnecting)); rtc::scoped_refptr report = stats_->GetStatsReport(); } @@ -986,52 +1164,53 @@ TEST_F(RTCStatsCollectorTest, CollectRTCDataChannelStats) { // Note: The test assumes data channel IDs are predictable. // This is not a safe assumption, but in order to make it work for // the test, we reset the ID allocator at test start. - DataChannel::ResetInternalIdAllocatorForTesting(-1); - pc_->AddSctpDataChannel(new MockDataChannel(0, "MockDataChannel0", - DataChannelInterface::kConnecting, - "udp", 1, 2, 3, 4)); + SctpDataChannel::ResetInternalIdAllocatorForTesting(-1); + pc_->AddSctpDataChannel(new MockSctpDataChannel( + 0, "MockSctpDataChannel0", DataChannelInterface::kConnecting, "udp", 1, 2, + 3, 4)); RTCDataChannelStats expected_data_channel0("RTCDataChannel_0", 0); - expected_data_channel0.label = "MockDataChannel0"; + expected_data_channel0.label = "MockSctpDataChannel0"; expected_data_channel0.protocol = "udp"; - expected_data_channel0.datachannelid = 0; + expected_data_channel0.data_channel_identifier = 0; expected_data_channel0.state = "connecting"; expected_data_channel0.messages_sent = 1; expected_data_channel0.bytes_sent = 2; expected_data_channel0.messages_received = 3; expected_data_channel0.bytes_received = 4; - pc_->AddSctpDataChannel(new MockDataChannel( - 1, "MockDataChannel1", DataChannelInterface::kOpen, "tcp", 5, 6, 7, 8)); + pc_->AddSctpDataChannel(new MockSctpDataChannel(1, "MockSctpDataChannel1", + DataChannelInterface::kOpen, + "tcp", 5, 6, 7, 8)); RTCDataChannelStats expected_data_channel1("RTCDataChannel_1", 0); - expected_data_channel1.label = "MockDataChannel1"; + expected_data_channel1.label = "MockSctpDataChannel1"; expected_data_channel1.protocol = "tcp"; - expected_data_channel1.datachannelid = 1; + expected_data_channel1.data_channel_identifier = 1; expected_data_channel1.state = "open"; expected_data_channel1.messages_sent = 5; expected_data_channel1.bytes_sent = 6; expected_data_channel1.messages_received = 7; expected_data_channel1.bytes_received = 8; - pc_->AddSctpDataChannel(new MockDataChannel(2, "MockDataChannel2", - DataChannelInterface::kClosing, - "udp", 9, 10, 11, 12)); + pc_->AddSctpDataChannel(new MockSctpDataChannel( + 2, "MockSctpDataChannel2", DataChannelInterface::kClosing, "udp", 9, 10, + 11, 12)); RTCDataChannelStats expected_data_channel2("RTCDataChannel_2", 0); - expected_data_channel2.label = "MockDataChannel2"; + expected_data_channel2.label = "MockSctpDataChannel2"; expected_data_channel2.protocol = "udp"; - expected_data_channel2.datachannelid = 2; + expected_data_channel2.data_channel_identifier = 2; expected_data_channel2.state = "closing"; expected_data_channel2.messages_sent = 9; expected_data_channel2.bytes_sent = 10; expected_data_channel2.messages_received = 11; expected_data_channel2.bytes_received = 12; - pc_->AddSctpDataChannel(new MockDataChannel(3, "MockDataChannel3", - DataChannelInterface::kClosed, - "tcp", 13, 14, 15, 16)); + pc_->AddSctpDataChannel(new MockSctpDataChannel(3, "MockSctpDataChannel3", + DataChannelInterface::kClosed, + "tcp", 13, 14, 15, 16)); RTCDataChannelStats expected_data_channel3("RTCDataChannel_3", 0); - expected_data_channel3.label = "MockDataChannel3"; + expected_data_channel3.label = "MockSctpDataChannel3"; expected_data_channel3.protocol = "tcp"; - expected_data_channel3.datachannelid = 3; + expected_data_channel3.data_channel_identifier = 3; expected_data_channel3.state = "closed"; expected_data_channel3.messages_sent = 13; expected_data_channel3.bytes_sent = 14; @@ -1068,6 +1247,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { expected_a_local_host.transport_id = "RTCTransport_a_0"; expected_a_local_host.network_type = "vpn"; expected_a_local_host.ip = "1.2.3.4"; + expected_a_local_host.address = "1.2.3.4"; expected_a_local_host.port = 5; expected_a_local_host.protocol = "a_local_host's protocol"; expected_a_local_host.candidate_type = "host"; @@ -1081,11 +1261,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { "RTCIceCandidate_" + a_remote_srflx->id(), 0); expected_a_remote_srflx.transport_id = "RTCTransport_a_0"; expected_a_remote_srflx.ip = "6.7.8.9"; + expected_a_remote_srflx.address = "6.7.8.9"; expected_a_remote_srflx.port = 10; expected_a_remote_srflx.protocol = "remote_srflx's protocol"; expected_a_remote_srflx.candidate_type = "srflx"; expected_a_remote_srflx.priority = 1; - expected_a_remote_srflx.deleted = false; EXPECT_TRUE(*expected_a_remote_srflx.is_remote); std::unique_ptr a_local_prflx = CreateFakeCandidate( @@ -1096,11 +1276,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { expected_a_local_prflx.transport_id = "RTCTransport_a_0"; expected_a_local_prflx.network_type = "cellular"; expected_a_local_prflx.ip = "11.12.13.14"; + expected_a_local_prflx.address = "11.12.13.14"; expected_a_local_prflx.port = 15; expected_a_local_prflx.protocol = "a_local_prflx's protocol"; expected_a_local_prflx.candidate_type = "prflx"; expected_a_local_prflx.priority = 2; - expected_a_local_prflx.deleted = false; EXPECT_FALSE(*expected_a_local_prflx.is_remote); std::unique_ptr a_remote_relay = CreateFakeCandidate( @@ -1110,11 +1290,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { "RTCIceCandidate_" + a_remote_relay->id(), 0); expected_a_remote_relay.transport_id = "RTCTransport_a_0"; expected_a_remote_relay.ip = "16.17.18.19"; + expected_a_remote_relay.address = "16.17.18.19"; expected_a_remote_relay.port = 20; expected_a_remote_relay.protocol = "a_remote_relay's protocol"; expected_a_remote_relay.candidate_type = "relay"; expected_a_remote_relay.priority = 3; - expected_a_remote_relay.deleted = false; EXPECT_TRUE(*expected_a_remote_relay.is_remote); std::unique_ptr a_local_relay = CreateFakeCandidate( @@ -1126,12 +1306,12 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { "RTCIceCandidate_" + a_local_relay->id(), 0); expected_a_local_relay.transport_id = "RTCTransport_a_0"; expected_a_local_relay.ip = "16.17.18.19"; + expected_a_local_relay.address = "16.17.18.19"; expected_a_local_relay.port = 21; expected_a_local_relay.protocol = "a_local_relay's protocol"; expected_a_local_relay.relay_protocol = "tcp"; expected_a_local_relay.candidate_type = "relay"; expected_a_local_relay.priority = 1; - expected_a_local_relay.deleted = false; EXPECT_TRUE(*expected_a_local_relay.is_remote); // Candidates in the second transport stats. @@ -1143,11 +1323,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { expected_b_local.transport_id = "RTCTransport_b_0"; expected_b_local.network_type = "wifi"; expected_b_local.ip = "42.42.42.42"; + expected_b_local.address = "42.42.42.42"; expected_b_local.port = 42; expected_b_local.protocol = "b_local's protocol"; expected_b_local.candidate_type = "host"; expected_b_local.priority = 42; - expected_b_local.deleted = false; EXPECT_FALSE(*expected_b_local.is_remote); std::unique_ptr b_remote = CreateFakeCandidate( @@ -1157,11 +1337,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidateStats) { "RTCIceCandidate_" + b_remote->id(), 0); expected_b_remote.transport_id = "RTCTransport_b_0"; expected_b_remote.ip = "42.42.42.42"; + expected_b_remote.address = "42.42.42.42"; expected_b_remote.port = 42; expected_b_remote.protocol = "b_remote's protocol"; expected_b_remote.candidate_type = "host"; expected_b_remote.priority = 42; - expected_b_remote.deleted = false; EXPECT_TRUE(*expected_b_remote.is_remote); // Add candidate pairs to connection. @@ -1358,11 +1538,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidatePairStats) { expected_local_candidate.transport_id = *expected_pair.transport_id; expected_local_candidate.network_type = "wifi"; expected_local_candidate.ip = "42.42.42.42"; + expected_local_candidate.address = "42.42.42.42"; expected_local_candidate.port = 42; expected_local_candidate.protocol = "protocol"; expected_local_candidate.candidate_type = "host"; expected_local_candidate.priority = 42; - expected_local_candidate.deleted = false; EXPECT_FALSE(*expected_local_candidate.is_remote); ASSERT_TRUE(report->Get(expected_local_candidate.id())); EXPECT_EQ(expected_local_candidate, @@ -1373,11 +1553,11 @@ TEST_F(RTCStatsCollectorTest, CollectRTCIceCandidatePairStats) { *expected_pair.remote_candidate_id, report->timestamp_us()); expected_remote_candidate.transport_id = *expected_pair.transport_id; expected_remote_candidate.ip = "42.42.42.42"; + expected_remote_candidate.address = "42.42.42.42"; expected_remote_candidate.port = 42; expected_remote_candidate.protocol = "protocol"; expected_remote_candidate.candidate_type = "host"; expected_remote_candidate.priority = 42; - expected_remote_candidate.deleted = false; EXPECT_TRUE(*expected_remote_candidate.is_remote); ASSERT_TRUE(report->Get(expected_remote_candidate.id())); EXPECT_EQ(expected_remote_candidate, @@ -1398,12 +1578,16 @@ TEST_F(RTCStatsCollectorTest, CollectRTCPeerConnectionStats) { report->Get("RTCPeerConnection")->cast_to()); } - rtc::scoped_refptr dummy_channel_a = DataChannel::Create( - nullptr, cricket::DCT_NONE, "DummyChannelA", InternalDataChannelInit()); - pc_->SignalDataChannelCreated()(dummy_channel_a.get()); - rtc::scoped_refptr dummy_channel_b = DataChannel::Create( - nullptr, cricket::DCT_NONE, "DummyChannelB", InternalDataChannelInit()); - pc_->SignalDataChannelCreated()(dummy_channel_b.get()); + // TODO(bugs.webrtc.org/11547): Supply a separate network thread. + FakeDataChannelProvider provider; + rtc::scoped_refptr dummy_channel_a = SctpDataChannel::Create( + &provider, "DummyChannelA", InternalDataChannelInit(), + rtc::Thread::Current(), rtc::Thread::Current()); + pc_->SignalSctpDataChannelCreated()(dummy_channel_a.get()); + rtc::scoped_refptr dummy_channel_b = SctpDataChannel::Create( + &provider, "DummyChannelB", InternalDataChannelInit(), + rtc::Thread::Current(), rtc::Thread::Current()); + pc_->SignalSctpDataChannelCreated()(dummy_channel_b.get()); dummy_channel_a->SignalOpened(dummy_channel_a.get()); // Closing a channel that is not opened should not affect the counts. @@ -1544,7 +1728,7 @@ TEST_F(RTCStatsCollectorTest, cricket::VoiceReceiverInfo voice_receiver_info; voice_receiver_info.local_stats.push_back(cricket::SsrcReceiverInfo()); voice_receiver_info.local_stats[0].ssrc = 3; - voice_receiver_info.audio_level = 16383; + voice_receiver_info.audio_level = 16383; // [0,32767] voice_receiver_info.total_output_energy = 0.125; voice_receiver_info.total_samples_received = 4567; voice_receiver_info.total_output_duration = 0.25; @@ -1553,7 +1737,7 @@ TEST_F(RTCStatsCollectorTest, voice_receiver_info.inserted_samples_for_deceleration = 987; voice_receiver_info.removed_samples_for_acceleration = 876; voice_receiver_info.silent_concealed_samples = 765; - voice_receiver_info.jitter_buffer_delay_seconds = 3456; + voice_receiver_info.jitter_buffer_delay_seconds = 3.456; voice_receiver_info.jitter_buffer_emitted_count = 13; voice_receiver_info.jitter_buffer_target_delay_seconds = 7.894; voice_receiver_info.jitter_buffer_flushes = 7; @@ -1589,7 +1773,7 @@ TEST_F(RTCStatsCollectorTest, expected_remote_audio_track.remote_source = true; expected_remote_audio_track.ended = false; expected_remote_audio_track.detached = false; - expected_remote_audio_track.audio_level = 16383.0 / 32767.0; + expected_remote_audio_track.audio_level = 16383.0 / 32767.0; // [0,1] expected_remote_audio_track.total_audio_energy = 0.125; expected_remote_audio_track.total_samples_received = 4567; expected_remote_audio_track.total_samples_duration = 0.25; @@ -1598,7 +1782,7 @@ TEST_F(RTCStatsCollectorTest, expected_remote_audio_track.inserted_samples_for_deceleration = 987; expected_remote_audio_track.removed_samples_for_acceleration = 876; expected_remote_audio_track.silent_concealed_samples = 765; - expected_remote_audio_track.jitter_buffer_delay = 3456; + expected_remote_audio_track.jitter_buffer_delay = 3.456; expected_remote_audio_track.jitter_buffer_emitted_count = 13; expected_remote_audio_track.jitter_buffer_target_delay = 7.894; expected_remote_audio_track.jitter_buffer_flushes = 7; @@ -1770,12 +1954,25 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Audio) { voice_media_info.receivers[0].local_stats[0].ssrc = 1; voice_media_info.receivers[0].packets_lost = -1; // Signed per RFC3550 voice_media_info.receivers[0].packets_rcvd = 2; + voice_media_info.receivers[0].nacks_sent = 5; voice_media_info.receivers[0].fec_packets_discarded = 5566; voice_media_info.receivers[0].fec_packets_received = 6677; voice_media_info.receivers[0].payload_bytes_rcvd = 3; voice_media_info.receivers[0].header_and_padding_bytes_rcvd = 4; voice_media_info.receivers[0].codec_payload_type = 42; voice_media_info.receivers[0].jitter_ms = 4500; + voice_media_info.receivers[0].jitter_buffer_delay_seconds = 1.0; + voice_media_info.receivers[0].jitter_buffer_emitted_count = 2; + voice_media_info.receivers[0].total_samples_received = 3; + voice_media_info.receivers[0].concealed_samples = 4; + voice_media_info.receivers[0].silent_concealed_samples = 5; + voice_media_info.receivers[0].concealment_events = 6; + voice_media_info.receivers[0].inserted_samples_for_deceleration = 7; + voice_media_info.receivers[0].removed_samples_for_acceleration = 8; + voice_media_info.receivers[0].audio_level = 14442; // [0,32767] + voice_media_info.receivers[0].total_output_energy = 10.0; + voice_media_info.receivers[0].total_output_duration = 11.0; + voice_media_info.receivers[0].last_packet_received_timestamp_ms = absl::nullopt; @@ -1800,13 +1997,13 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Audio) { RTCInboundRTPStreamStats expected_audio("RTCInboundRTPAudioStream_1", report->timestamp_us()); expected_audio.ssrc = 1; - expected_audio.is_remote = false; expected_audio.media_type = "audio"; expected_audio.kind = "audio"; expected_audio.track_id = stats_of_track_type[0]->id(); expected_audio.transport_id = "RTCTransport_TransportName_1"; expected_audio.codec_id = "RTCCodec_AudioMid_Inbound_42"; expected_audio.packets_received = 2; + expected_audio.nack_count = 5; expected_audio.fec_packets_discarded = 5566; expected_audio.fec_packets_received = 6677; expected_audio.bytes_received = 3; @@ -1814,6 +2011,18 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Audio) { expected_audio.packets_lost = -1; // |expected_audio.last_packet_received_timestamp| should be undefined. expected_audio.jitter = 4.5; + expected_audio.jitter_buffer_delay = 1.0; + expected_audio.jitter_buffer_emitted_count = 2; + expected_audio.total_samples_received = 3; + expected_audio.concealed_samples = 4; + expected_audio.silent_concealed_samples = 5; + expected_audio.concealment_events = 6; + expected_audio.inserted_samples_for_deceleration = 7; + expected_audio.removed_samples_for_acceleration = 8; + expected_audio.audio_level = 14442.0 / 32767.0; // [0,1] + expected_audio.total_audio_energy = 10.0; + expected_audio.total_samples_duration = 11.0; + ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ( report->Get(expected_audio.id())->cast_to(), @@ -1821,7 +2030,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Audio) { // Set previously undefined values and "GetStats" again. voice_media_info.receivers[0].last_packet_received_timestamp_ms = 3000; - expected_audio.last_packet_received_timestamp = 3.0; + expected_audio.last_packet_received_timestamp = 3000.0; voice_media_info.receivers[0].estimated_playout_ntp_timestamp_ms = 4567; expected_audio.estimated_playout_timestamp = 4567; voice_media_channel->SetStats(voice_media_info); @@ -1852,12 +2061,17 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Video) { video_media_info.receivers[0].firs_sent = 5; video_media_info.receivers[0].plis_sent = 6; video_media_info.receivers[0].nacks_sent = 7; - video_media_info.receivers[0].frames_decoded = 8; + video_media_info.receivers[0].frames_received = 8; + video_media_info.receivers[0].frames_decoded = 9; video_media_info.receivers[0].key_frames_decoded = 3; + video_media_info.receivers[0].frames_dropped = 13; video_media_info.receivers[0].qp_sum = absl::nullopt; video_media_info.receivers[0].total_decode_time_ms = 9000; video_media_info.receivers[0].total_inter_frame_delay = 0.123; video_media_info.receivers[0].total_squared_inter_frame_delay = 0.00456; + video_media_info.receivers[0].jitter_ms = 1199; + video_media_info.receivers[0].jitter_buffer_delay_seconds = 3.456; + video_media_info.receivers[0].jitter_buffer_emitted_count = 13; video_media_info.receivers[0].last_packet_received_timestamp_ms = absl::nullopt; @@ -1884,7 +2098,6 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Video) { RTCInboundRTPStreamStats expected_video("RTCInboundRTPVideoStream_1", report->timestamp_us()); expected_video.ssrc = 1; - expected_video.is_remote = false; expected_video.media_type = "video"; expected_video.kind = "video"; expected_video.track_id = IdForType(report); @@ -1897,12 +2110,17 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Video) { expected_video.bytes_received = 3; expected_video.header_bytes_received = 12; expected_video.packets_lost = 42; - expected_video.frames_decoded = 8; + expected_video.frames_received = 8; + expected_video.frames_decoded = 9; expected_video.key_frames_decoded = 3; + expected_video.frames_dropped = 13; // |expected_video.qp_sum| should be undefined. expected_video.total_decode_time = 9.0; expected_video.total_inter_frame_delay = 0.123; expected_video.total_squared_inter_frame_delay = 0.00456; + expected_video.jitter = 1.199; + expected_video.jitter_buffer_delay = 3.456; + expected_video.jitter_buffer_emitted_count = 13; // |expected_video.last_packet_received_timestamp| should be undefined. // |expected_video.content_type| should be undefined. // |expected_video.decoder_implementation| should be undefined. @@ -1916,7 +2134,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCInboundRTPStreamStats_Video) { video_media_info.receivers[0].qp_sum = 9; expected_video.qp_sum = 9; video_media_info.receivers[0].last_packet_received_timestamp_ms = 1000; - expected_video.last_packet_received_timestamp = 1.0; + expected_video.last_packet_received_timestamp = 1000.0; video_media_info.receivers[0].content_type = VideoContentType::SCREENSHARE; expected_video.content_type = "screenshare"; video_media_info.receivers[0].estimated_playout_ntp_timestamp_ms = 1234; @@ -1947,6 +2165,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Audio) { voice_media_info.senders[0].payload_bytes_sent = 3; voice_media_info.senders[0].header_and_padding_bytes_sent = 12; voice_media_info.senders[0].retransmitted_bytes_sent = 30; + voice_media_info.senders[0].nacks_rcvd = 31; voice_media_info.senders[0].codec_payload_type = 42; RtpCodecParameters codec_parameters; @@ -1970,7 +2189,6 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Audio) { expected_audio.media_source_id = "RTCAudioSource_50"; // |expected_audio.remote_id| should be undefined. expected_audio.ssrc = 1; - expected_audio.is_remote = false; expected_audio.media_type = "audio"; expected_audio.kind = "audio"; expected_audio.track_id = IdForType(report); @@ -1981,6 +2199,7 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Audio) { expected_audio.bytes_sent = 3; expected_audio.header_bytes_sent = 12; expected_audio.retransmitted_bytes_sent = 30; + expected_audio.nack_count = 31; ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ( @@ -2018,6 +2237,8 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Video) { video_media_info.senders[0].total_packet_send_delay_ms = 10000; video_media_info.senders[0].quality_limitation_reason = QualityLimitationReason::kBandwidth; + video_media_info.senders[0].quality_limitation_durations_ms + [webrtc::QualityLimitationReason::kBandwidth] = 300; video_media_info.senders[0].quality_limitation_resolution_changes = 56u; video_media_info.senders[0].qp_sum = absl::nullopt; video_media_info.senders[0].content_type = VideoContentType::UNSPECIFIED; @@ -2054,7 +2275,6 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Video) { expected_video.media_source_id = "RTCVideoSource_50"; // |expected_video.remote_id| should be undefined. expected_video.ssrc = 1; - expected_video.is_remote = false; expected_video.media_type = "video"; expected_video.kind = "video"; expected_video.track_id = stats_of_track_type[0]->id(); @@ -2074,14 +2294,15 @@ TEST_F(RTCStatsCollectorTest, CollectRTCOutboundRTPStreamStats_Video) { expected_video.total_encoded_bytes_target = 1234; expected_video.total_packet_send_delay = 10.0; expected_video.quality_limitation_reason = "bandwidth"; + expected_video.quality_limitation_durations = std::map{ + std::pair{"bandwidth", 300.0}, + }; expected_video.quality_limitation_resolution_changes = 56u; - if (pc_->GetConfiguration().enable_simulcast_stats) { - expected_video.frame_width = 200u; - expected_video.frame_height = 100u; - expected_video.frames_per_second = 10.0; - expected_video.frames_sent = 5; - expected_video.huge_frames_sent = 2; - } + expected_video.frame_width = 200u; + expected_video.frame_height = 100u; + expected_video.frames_per_second = 10.0; + expected_video.frames_sent = 5; + expected_video.huge_frames_sent = 2; // |expected_video.content_type| should be undefined. // |expected_video.qp_sum| should be undefined. // |expected_video.encoder_implementation| should be undefined. @@ -2138,11 +2359,14 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStats) { rtp_connection_info.remote_candidate = *rtp_remote_candidate.get(); rtp_connection_info.sent_total_bytes = 42; rtp_connection_info.recv_total_bytes = 1337; + rtp_connection_info.sent_total_packets = 3; + rtp_connection_info.sent_discarded_packets = 2; + rtp_connection_info.packets_received = 4; cricket::TransportChannelStats rtp_transport_channel_stats; rtp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTP; rtp_transport_channel_stats.ice_transport_stats.connection_infos.push_back( rtp_connection_info); - rtp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_NEW; + rtp_transport_channel_stats.dtls_state = DtlsTransportState::kNew; rtp_transport_channel_stats.ice_transport_stats .selected_candidate_pair_changes = 1; pc_->SetTransportStats(kTransportName, {rtp_transport_channel_stats}); @@ -2155,7 +2379,9 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStats) { rtc::ToString(cricket::ICE_CANDIDATE_COMPONENT_RTP), report->timestamp_us()); expected_rtp_transport.bytes_sent = 42; + expected_rtp_transport.packets_sent = 1; expected_rtp_transport.bytes_received = 1337; + expected_rtp_transport.packets_received = 4; expected_rtp_transport.dtls_state = RTCDtlsTransportState::kNew; expected_rtp_transport.selected_candidate_pair_changes = 1; @@ -2170,12 +2396,15 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStats) { rtcp_connection_info.remote_candidate = *rtcp_remote_candidate.get(); rtcp_connection_info.sent_total_bytes = 1337; rtcp_connection_info.recv_total_bytes = 42; + rtcp_connection_info.sent_total_packets = 3; + rtcp_connection_info.sent_discarded_packets = 2; + rtcp_connection_info.packets_received = 4; cricket::TransportChannelStats rtcp_transport_channel_stats; rtcp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTCP; rtcp_transport_channel_stats.ice_transport_stats.connection_infos.push_back( rtcp_connection_info); - rtcp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_CONNECTING; + rtcp_transport_channel_stats.dtls_state = DtlsTransportState::kConnecting; pc_->SetTransportStats(kTransportName, {rtp_transport_channel_stats, rtcp_transport_channel_stats}); @@ -2187,7 +2416,9 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStats) { rtc::ToString(cricket::ICE_CANDIDATE_COMPONENT_RTCP), report->timestamp_us()); expected_rtcp_transport.bytes_sent = 1337; + expected_rtcp_transport.packets_sent = 1; expected_rtcp_transport.bytes_received = 42; + expected_rtcp_transport.packets_received = 4; expected_rtcp_transport.dtls_state = RTCDtlsTransportState::kConnecting; expected_rtcp_transport.selected_candidate_pair_changes = 0; @@ -2281,12 +2512,15 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStatsWithCrypto) { rtp_connection_info.remote_candidate = *rtp_remote_candidate.get(); rtp_connection_info.sent_total_bytes = 42; rtp_connection_info.recv_total_bytes = 1337; + rtp_connection_info.sent_total_packets = 3; + rtp_connection_info.sent_discarded_packets = 2; + rtp_connection_info.packets_received = 4; cricket::TransportChannelStats rtp_transport_channel_stats; rtp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTP; rtp_transport_channel_stats.ice_transport_stats.connection_infos.push_back( rtp_connection_info); // The state must be connected in order for crypto parameters to show up. - rtp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_CONNECTED; + rtp_transport_channel_stats.dtls_state = DtlsTransportState::kConnected; rtp_transport_channel_stats.ice_transport_stats .selected_candidate_pair_changes = 1; rtp_transport_channel_stats.ssl_version_bytes = 0x0203; @@ -2303,7 +2537,9 @@ TEST_F(RTCStatsCollectorTest, CollectRTCTransportStatsWithCrypto) { rtc::ToString(cricket::ICE_CANDIDATE_COMPONENT_RTP), report->timestamp_us()); expected_rtp_transport.bytes_sent = 42; + expected_rtp_transport.packets_sent = 1; expected_rtp_transport.bytes_received = 1337; + expected_rtp_transport.packets_received = 4; expected_rtp_transport.dtls_state = RTCDtlsTransportState::kConnected; expected_rtp_transport.selected_candidate_pair_changes = 1; // Crypto parameters @@ -2328,6 +2564,7 @@ TEST_F(RTCStatsCollectorTest, CollectNoStreamRTCOutboundRTPStreamStats_Audio) { voice_media_info.senders[0].payload_bytes_sent = 3; voice_media_info.senders[0].header_and_padding_bytes_sent = 4; voice_media_info.senders[0].retransmitted_bytes_sent = 30; + voice_media_info.senders[0].nacks_rcvd = 31; voice_media_info.senders[0].codec_payload_type = 42; RtpCodecParameters codec_parameters; @@ -2351,7 +2588,6 @@ TEST_F(RTCStatsCollectorTest, CollectNoStreamRTCOutboundRTPStreamStats_Audio) { report->timestamp_us()); expected_audio.media_source_id = "RTCAudioSource_50"; expected_audio.ssrc = 1; - expected_audio.is_remote = false; expected_audio.media_type = "audio"; expected_audio.kind = "audio"; expected_audio.track_id = IdForType(report); @@ -2362,6 +2598,7 @@ TEST_F(RTCStatsCollectorTest, CollectNoStreamRTCOutboundRTPStreamStats_Audio) { expected_audio.bytes_sent = 3; expected_audio.header_bytes_sent = 4; expected_audio.retransmitted_bytes_sent = 30; + expected_audio.nack_count = 31; ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ( @@ -2383,6 +2620,9 @@ TEST_F(RTCStatsCollectorTest, RTCAudioSourceStatsCollectedForSenderWithTrack) { voice_media_info.senders[0].audio_level = 32767; // [0,32767] voice_media_info.senders[0].total_input_energy = 2.0; voice_media_info.senders[0].total_input_duration = 3.0; + voice_media_info.senders[0].apm_statistics.echo_return_loss = 42.0; + voice_media_info.senders[0].apm_statistics.echo_return_loss_enhancement = + 52.0; auto* voice_media_channel = pc_->AddVoiceChannel("AudioMid", "TransportName"); voice_media_channel->SetStats(voice_media_info); stats_->SetupLocalTrackAndSender(cricket::MEDIA_TYPE_AUDIO, @@ -2398,6 +2638,8 @@ TEST_F(RTCStatsCollectorTest, RTCAudioSourceStatsCollectedForSenderWithTrack) { expected_audio.audio_level = 1.0; // [0,1] expected_audio.total_audio_energy = 2.0; expected_audio.total_samples_duration = 3.0; + expected_audio.echo_return_loss = 42.0; + expected_audio.echo_return_loss_enhancement = 52.0; ASSERT_TRUE(report->Get(expected_audio.id())); EXPECT_EQ(report->Get(expected_audio.id())->cast_to(), @@ -2420,6 +2662,7 @@ TEST_F(RTCStatsCollectorTest, RTCVideoSourceStatsCollectedForSenderWithTrack) { cricket::SsrcSenderInfo()); video_media_info.aggregated_senders[0].local_stats[0].ssrc = kSsrc; video_media_info.aggregated_senders[0].framerate_input = 29; + video_media_info.aggregated_senders[0].frames = 10001; auto* video_media_channel = pc_->AddVideoChannel("VideoMid", "TransportName"); video_media_channel->SetStats(video_media_info); @@ -2439,9 +2682,8 @@ TEST_F(RTCStatsCollectorTest, RTCVideoSourceStatsCollectedForSenderWithTrack) { expected_video.kind = "video"; expected_video.width = kVideoSourceWidth; expected_video.height = kVideoSourceHeight; - // |expected_video.frames| is expected to be undefined because it is not set. - // TODO(hbos): When implemented, set its expected value here. expected_video.frames_per_second = 29; + expected_video.frames = 10001; ASSERT_TRUE(report->Get(expected_video.id())); EXPECT_EQ(report->Get(expected_video.id())->cast_to(), @@ -2481,6 +2723,7 @@ TEST_F(RTCStatsCollectorTest, auto video_stats = report->Get("RTCVideoSource_42")->cast_to(); EXPECT_FALSE(video_stats.frames_per_second.is_defined()); + EXPECT_FALSE(video_stats.frames.is_defined()); } // The track not having a source is not expected to be true in practise, but @@ -2549,6 +2792,7 @@ class RTCStatsCollectorTestWithParamKind case cricket::MEDIA_TYPE_VIDEO: return "Video"; case cricket::MEDIA_TYPE_DATA: + case cricket::MEDIA_TYPE_UNSUPPORTED: RTC_NOTREACHED(); return ""; } @@ -2562,49 +2806,52 @@ class RTCStatsCollectorTestWithParamKind // Adds a sender and channel of the appropriate kind, creating a sender info // with the report block's |source_ssrc| and report block data. - void AddSenderInfoAndMediaChannel(std::string transport_name, - ReportBlockData report_block_data, - absl::optional codec) { + void AddSenderInfoAndMediaChannel( + std::string transport_name, + const std::vector& report_block_datas, + absl::optional codec) { switch (media_type_) { case cricket::MEDIA_TYPE_AUDIO: { cricket::VoiceMediaInfo voice_media_info; - voice_media_info.senders.push_back(cricket::VoiceSenderInfo()); - voice_media_info.senders[0].local_stats.push_back( - cricket::SsrcSenderInfo()); - voice_media_info.senders[0].local_stats[0].ssrc = - report_block_data.report_block().source_ssrc; - if (codec.has_value()) { - voice_media_info.senders[0].codec_payload_type = codec->payload_type; - voice_media_info.send_codecs.insert( - std::make_pair(codec->payload_type, *codec)); + for (const auto& report_block_data : report_block_datas) { + cricket::VoiceSenderInfo sender; + sender.local_stats.push_back(cricket::SsrcSenderInfo()); + sender.local_stats[0].ssrc = + report_block_data.report_block().source_ssrc; + if (codec.has_value()) { + sender.codec_payload_type = codec->payload_type; + voice_media_info.send_codecs.insert( + std::make_pair(codec->payload_type, *codec)); + } + sender.report_block_datas.push_back(report_block_data); + voice_media_info.senders.push_back(sender); } - voice_media_info.senders[0].report_block_datas.push_back( - report_block_data); auto* voice_media_channel = pc_->AddVoiceChannel("mid", transport_name); voice_media_channel->SetStats(voice_media_info); return; } case cricket::MEDIA_TYPE_VIDEO: { cricket::VideoMediaInfo video_media_info; - video_media_info.senders.push_back(cricket::VideoSenderInfo()); - video_media_info.senders[0].local_stats.push_back( - cricket::SsrcSenderInfo()); - video_media_info.senders[0].local_stats[0].ssrc = - report_block_data.report_block().source_ssrc; - if (codec.has_value()) { - video_media_info.senders[0].codec_payload_type = codec->payload_type; - video_media_info.send_codecs.insert( - std::make_pair(codec->payload_type, *codec)); + for (const auto& report_block_data : report_block_datas) { + cricket::VideoSenderInfo sender; + sender.local_stats.push_back(cricket::SsrcSenderInfo()); + sender.local_stats[0].ssrc = + report_block_data.report_block().source_ssrc; + if (codec.has_value()) { + sender.codec_payload_type = codec->payload_type; + video_media_info.send_codecs.insert( + std::make_pair(codec->payload_type, *codec)); + } + sender.report_block_datas.push_back(report_block_data); + video_media_info.aggregated_senders.push_back(sender); + video_media_info.senders.push_back(sender); } - video_media_info.senders[0].report_block_datas.push_back( - report_block_data); - video_media_info.aggregated_senders.push_back( - video_media_info.senders[0]); auto* video_media_channel = pc_->AddVideoChannel("mid", transport_name); video_media_channel->SetStats(video_media_info); return; } case cricket::MEDIA_TYPE_DATA: + case cricket::MEDIA_TYPE_UNSUPPORTED: RTC_NOTREACHED(); } } @@ -2618,55 +2865,70 @@ class RTCStatsCollectorTestWithParamKind TEST_P(RTCStatsCollectorTestWithParamKind, RTCRemoteInboundRtpStreamStatsCollectedFromReportBlock) { const int64_t kReportBlockTimestampUtcUs = 123456789; - const int64_t kRoundTripTimeMs = 13000; - const double kRoundTripTimeSeconds = 13.0; + const uint8_t kFractionLost = 12; + const int64_t kRoundTripTimeSample1Ms = 1234; + const double kRoundTripTimeSample1Seconds = 1.234; + const int64_t kRoundTripTimeSample2Ms = 13000; + const double kRoundTripTimeSample2Seconds = 13; // The report block's timestamp cannot be from the future, set the fake clock // to match. fake_clock_.SetTime(Timestamp::Micros(kReportBlockTimestampUtcUs)); - - RTCPReportBlock report_block; - // The remote-inbound-rtp SSRC and the outbound-rtp SSRC is the same as the - // |source_ssrc|, "SSRC of the RTP packet sender". - report_block.source_ssrc = 12; - report_block.packets_lost = 7; - ReportBlockData report_block_data; - report_block_data.SetReportBlock(report_block, kReportBlockTimestampUtcUs); - report_block_data.AddRoundTripTimeSample(1234); - // Only the last sample should be exposed as the - // |RTCRemoteInboundRtpStreamStats::round_trip_time|. - report_block_data.AddRoundTripTimeSample(kRoundTripTimeMs); - - AddSenderInfoAndMediaChannel("TransportName", report_block_data, + auto ssrcs = {12, 13}; + std::vector report_block_datas; + for (auto ssrc : ssrcs) { + RTCPReportBlock report_block; + // The remote-inbound-rtp SSRC and the outbound-rtp SSRC is the same as the + // |source_ssrc|, "SSRC of the RTP packet sender". + report_block.source_ssrc = ssrc; + report_block.packets_lost = 7; + report_block.fraction_lost = kFractionLost; + ReportBlockData report_block_data; + report_block_data.SetReportBlock(report_block, kReportBlockTimestampUtcUs); + report_block_data.AddRoundTripTimeSample(kRoundTripTimeSample1Ms); + // Only the last sample should be exposed as the + // |RTCRemoteInboundRtpStreamStats::round_trip_time|. + report_block_data.AddRoundTripTimeSample(kRoundTripTimeSample2Ms); + report_block_datas.push_back(report_block_data); + } + AddSenderInfoAndMediaChannel("TransportName", report_block_datas, absl::nullopt); rtc::scoped_refptr report = stats_->GetStatsReport(); - - RTCRemoteInboundRtpStreamStats expected_remote_inbound_rtp( - "RTCRemoteInboundRtp" + MediaTypeUpperCase() + "Stream_12", - kReportBlockTimestampUtcUs); - expected_remote_inbound_rtp.ssrc = 12; - expected_remote_inbound_rtp.kind = MediaTypeLowerCase(); - expected_remote_inbound_rtp.transport_id = - "RTCTransport_TransportName_1"; // 1 for RTP (we have no RTCP transport) - expected_remote_inbound_rtp.packets_lost = 7; - expected_remote_inbound_rtp.local_id = - "RTCOutboundRTP" + MediaTypeUpperCase() + "Stream_12"; - expected_remote_inbound_rtp.round_trip_time = kRoundTripTimeSeconds; - // This test does not set up RTCCodecStats, so |codec_id| and |jitter| are - // expected to be missing. These are tested separately. - - ASSERT_TRUE(report->Get(expected_remote_inbound_rtp.id())); - EXPECT_EQ(report->Get(expected_remote_inbound_rtp.id()) - ->cast_to(), - expected_remote_inbound_rtp); - EXPECT_TRUE(report->Get(*expected_remote_inbound_rtp.transport_id)); - ASSERT_TRUE(report->Get(*expected_remote_inbound_rtp.local_id)); - // Lookup works in both directions. - EXPECT_EQ(*report->Get(*expected_remote_inbound_rtp.local_id) - ->cast_to() - .remote_id, - expected_remote_inbound_rtp.id()); + for (auto ssrc : ssrcs) { + std::string stream_id = "Stream_" + std::to_string(ssrc); + RTCRemoteInboundRtpStreamStats expected_remote_inbound_rtp( + "RTCRemoteInboundRtp" + MediaTypeUpperCase() + stream_id, + kReportBlockTimestampUtcUs); + expected_remote_inbound_rtp.ssrc = ssrc; + expected_remote_inbound_rtp.fraction_lost = + static_cast(kFractionLost) / (1 << 8); + expected_remote_inbound_rtp.kind = MediaTypeLowerCase(); + expected_remote_inbound_rtp.transport_id = + "RTCTransport_TransportName_1"; // 1 for RTP (we have no RTCP + // transport) + expected_remote_inbound_rtp.packets_lost = 7; + expected_remote_inbound_rtp.local_id = + "RTCOutboundRTP" + MediaTypeUpperCase() + stream_id; + expected_remote_inbound_rtp.round_trip_time = kRoundTripTimeSample2Seconds; + expected_remote_inbound_rtp.total_round_trip_time = + kRoundTripTimeSample1Seconds + kRoundTripTimeSample2Seconds; + expected_remote_inbound_rtp.round_trip_time_measurements = 2; + // This test does not set up RTCCodecStats, so |codec_id| and |jitter| are + // expected to be missing. These are tested separately. + + ASSERT_TRUE(report->Get(expected_remote_inbound_rtp.id())); + EXPECT_EQ(report->Get(expected_remote_inbound_rtp.id()) + ->cast_to(), + expected_remote_inbound_rtp); + EXPECT_TRUE(report->Get(*expected_remote_inbound_rtp.transport_id)); + ASSERT_TRUE(report->Get(*expected_remote_inbound_rtp.local_id)); + // Lookup works in both directions. + EXPECT_EQ(*report->Get(*expected_remote_inbound_rtp.local_id) + ->cast_to() + .remote_id, + expected_remote_inbound_rtp.id()); + } } TEST_P(RTCStatsCollectorTestWithParamKind, @@ -2681,7 +2943,7 @@ TEST_P(RTCStatsCollectorTestWithParamKind, ReportBlockData report_block_data; report_block_data.SetReportBlock(report_block, kReportBlockTimestampUtcUs); - AddSenderInfoAndMediaChannel("TransportName", report_block_data, + AddSenderInfoAndMediaChannel("TransportName", {report_block_data}, absl::nullopt); // Advance time, it should be OK to have fresher reports than report blocks. @@ -2719,7 +2981,7 @@ TEST_P(RTCStatsCollectorTestWithParamKind, codec.kind = media_type_; codec.clock_rate = 1000; - AddSenderInfoAndMediaChannel("TransportName", report_block_data, codec); + AddSenderInfoAndMediaChannel("TransportName", {report_block_data}, codec); rtc::scoped_refptr report = stats_->GetStatsReport(); @@ -2752,14 +3014,14 @@ TEST_P(RTCStatsCollectorTestWithParamKind, cricket::TransportChannelStats rtp_transport_channel_stats; rtp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTP; - rtp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_NEW; + rtp_transport_channel_stats.dtls_state = DtlsTransportState::kNew; cricket::TransportChannelStats rtcp_transport_channel_stats; rtcp_transport_channel_stats.component = cricket::ICE_CANDIDATE_COMPONENT_RTCP; - rtcp_transport_channel_stats.dtls_state = cricket::DTLS_TRANSPORT_NEW; + rtcp_transport_channel_stats.dtls_state = DtlsTransportState::kNew; pc_->SetTransportStats("TransportName", {rtp_transport_channel_stats, rtcp_transport_channel_stats}); - AddSenderInfoAndMediaChannel("TransportName", report_block_data, + AddSenderInfoAndMediaChannel("TransportName", {report_block_data}, absl::nullopt); rtc::scoped_refptr report = stats_->GetStatsReport(); @@ -2781,6 +3043,43 @@ INSTANTIATE_TEST_SUITE_P(All, ::testing::Values(cricket::MEDIA_TYPE_AUDIO, // "/0" cricket::MEDIA_TYPE_VIDEO)); // "/1" +// Checks that no remote outbound stats are collected if not available in +// `VoiceMediaInfo`. +TEST_F(RTCStatsCollectorTest, + RTCRemoteOutboundRtpAudioStreamStatsNotCollected) { + ExampleStatsGraph graph = + SetupExampleStatsVoiceGraph(/*add_remote_outbound_stats=*/false); + EXPECT_FALSE(graph.full_report->Get(graph.remote_outbound_rtp_id)); + // Also check that no other remote outbound report is created (in case the + // expected ID is incorrect). + rtc::scoped_refptr report = stats_->GetStatsReport(); + ASSERT_NE(report->begin(), report->end()) + << "No reports have been generated."; + for (const auto& stats : *report) { + SCOPED_TRACE(stats.id()); + EXPECT_NE(stats.type(), RTCRemoteOutboundRtpStreamStats::kType); + } +} + +// Checks that the remote outbound stats are collected when available in +// `VoiceMediaInfo`. +TEST_F(RTCStatsCollectorTest, RTCRemoteOutboundRtpAudioStreamStatsCollected) { + ExampleStatsGraph graph = + SetupExampleStatsVoiceGraph(/*add_remote_outbound_stats=*/true); + ASSERT_TRUE(graph.full_report->Get(graph.remote_outbound_rtp_id)); + const auto& remote_outbound_rtp = + graph.full_report->Get(graph.remote_outbound_rtp_id) + ->cast_to(); + EXPECT_EQ(remote_outbound_rtp.timestamp_us(), + kRemoteOutboundStatsTimestampMs * rtc::kNumMicrosecsPerMillisec); + EXPECT_FLOAT_EQ(*remote_outbound_rtp.remote_timestamp, + static_cast(kRemoteOutboundStatsRemoteTimestampMs)); + EXPECT_EQ(*remote_outbound_rtp.packets_sent, kRemoteOutboundStatsPacketsSent); + EXPECT_EQ(*remote_outbound_rtp.bytes_sent, kRemoteOutboundStatsBytesSent); + EXPECT_EQ(*remote_outbound_rtp.reports_sent, + kRemoteOutboundStatsReportsCount); +} + TEST_F(RTCStatsCollectorTest, RTCVideoSourceStatsNotCollectedForSenderWithoutTrack) { const uint32_t kSsrc = 4; @@ -2802,6 +3101,64 @@ TEST_F(RTCStatsCollectorTest, EXPECT_FALSE(report->Get("RTCVideoSource_42")); } +// Test collecting echo return loss stats from the audio processor attached to +// the track, rather than the voice sender info. +TEST_F(RTCStatsCollectorTest, CollectEchoReturnLossFromTrackAudioProcessor) { + rtc::scoped_refptr local_stream = + MediaStream::Create("LocalStreamId"); + pc_->mutable_local_streams()->AddStream(local_stream); + + // Local audio track + rtc::scoped_refptr local_audio_track = + CreateFakeTrack(cricket::MEDIA_TYPE_AUDIO, "LocalAudioTrackID", + MediaStreamTrackInterface::kEnded, + /*create_fake_audio_processor=*/true); + local_stream->AddTrack( + static_cast(local_audio_track.get())); + + cricket::VoiceSenderInfo voice_sender_info_ssrc1; + voice_sender_info_ssrc1.local_stats.push_back(cricket::SsrcSenderInfo()); + voice_sender_info_ssrc1.local_stats[0].ssrc = 1; + + stats_->CreateMockRtpSendersReceiversAndChannels( + {std::make_pair(local_audio_track.get(), voice_sender_info_ssrc1)}, {}, + {}, {}, {local_stream->id()}, {}); + + rtc::scoped_refptr report = stats_->GetStatsReport(); + + RTCMediaStreamTrackStats expected_local_audio_track_ssrc1( + IdForType(report), report->timestamp_us(), + RTCMediaStreamTrackKind::kAudio); + expected_local_audio_track_ssrc1.track_identifier = local_audio_track->id(); + expected_local_audio_track_ssrc1.media_source_id = + "RTCAudioSource_11"; // Attachment ID = SSRC + 10 + expected_local_audio_track_ssrc1.remote_source = false; + expected_local_audio_track_ssrc1.ended = true; + expected_local_audio_track_ssrc1.detached = false; + expected_local_audio_track_ssrc1.echo_return_loss = 2.0; + expected_local_audio_track_ssrc1.echo_return_loss_enhancement = 3.0; + ASSERT_TRUE(report->Get(expected_local_audio_track_ssrc1.id())) + << "Did not find " << expected_local_audio_track_ssrc1.id() << " in " + << report->ToJson(); + EXPECT_EQ(expected_local_audio_track_ssrc1, + report->Get(expected_local_audio_track_ssrc1.id()) + ->cast_to()); + + RTCAudioSourceStats expected_audio("RTCAudioSource_11", + report->timestamp_us()); + expected_audio.track_identifier = "LocalAudioTrackID"; + expected_audio.kind = "audio"; + expected_audio.audio_level = 0; + expected_audio.total_audio_energy = 0; + expected_audio.total_samples_duration = 0; + expected_audio.echo_return_loss = 2.0; + expected_audio.echo_return_loss_enhancement = 3.0; + + ASSERT_TRUE(report->Get(expected_audio.id())); + EXPECT_EQ(report->Get(expected_audio.id())->cast_to(), + expected_audio); +} + TEST_F(RTCStatsCollectorTest, GetStatsWithSenderSelector) { ExampleStatsGraph graph = SetupExampleStatsGraphForSelectorTests(); // Expected stats graph when filtered by sender: @@ -2967,16 +3324,25 @@ class FakeRTCStatsCollector : public RTCStatsCollector, static rtc::scoped_refptr Create( PeerConnectionInternal* pc, int64_t cache_lifetime_us) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(pc, - cache_lifetime_us)); + return new rtc::RefCountedObject(pc, + cache_lifetime_us); } + // Since FakeRTCStatsCollector inherits twice from RefCountInterface, once via + // RTCStatsCollector and once via RTCStatsCollectorCallback, scoped_refptr + // will get confused about which AddRef()/Release() methods to call. + // So to remove all doubt, we declare them here again in the class that we + // give to scoped_refptr. + // Satisfying the implementation of these methods and associating them with a + // reference counter, will be done by RefCountedObject. + virtual void AddRef() const = 0; + virtual rtc::RefCountReleaseStatus Release() const = 0; + // RTCStatsCollectorCallback implementation. void OnStatsDelivered( const rtc::scoped_refptr& report) override { EXPECT_TRUE(signaling_thread_->IsCurrent()); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); delivered_report_ = report; } @@ -2987,7 +3353,7 @@ class FakeRTCStatsCollector : public RTCStatsCollector, bool HasVerifiedResults() { EXPECT_TRUE(signaling_thread_->IsCurrent()); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (!delivered_report_) return false; EXPECT_EQ(produced_on_signaling_thread_, 1); @@ -3014,7 +3380,7 @@ class FakeRTCStatsCollector : public RTCStatsCollector, RTCStatsReport* partial_report) override { EXPECT_TRUE(signaling_thread_->IsCurrent()); { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); EXPECT_FALSE(delivered_report_); ++produced_on_signaling_thread_; } @@ -3030,7 +3396,7 @@ class FakeRTCStatsCollector : public RTCStatsCollector, RTCStatsReport* partial_report) override { EXPECT_TRUE(network_thread_->IsCurrent()); { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); EXPECT_FALSE(delivered_report_); ++produced_on_network_thread_; } @@ -3044,7 +3410,7 @@ class FakeRTCStatsCollector : public RTCStatsCollector, rtc::Thread* const worker_thread_; rtc::Thread* const network_thread_; - rtc::CriticalSection lock_; + Mutex lock_; rtc::scoped_refptr delivered_report_; int produced_on_signaling_thread_ = 0; int produced_on_network_thread_ = 0; diff --git a/pc/rtc_stats_integrationtest.cc b/pc/rtc_stats_integrationtest.cc index d6d5c6f819..2dfe1b5cd5 100644 --- a/pc/rtc_stats_integrationtest.cc +++ b/pc/rtc_stats_integrationtest.cc @@ -114,9 +114,9 @@ class RTCStatsIntegrationTest : public ::testing::Test { RTC_CHECK(network_thread_->Start()); RTC_CHECK(worker_thread_->Start()); - caller_ = new rtc::RefCountedObject( + caller_ = rtc::make_ref_counted( "caller", network_thread_.get(), worker_thread_.get()); - callee_ = new rtc::RefCountedObject( + callee_ = rtc::make_ref_counted( "callee", network_thread_.get(), worker_thread_.get()); } @@ -352,8 +352,7 @@ class RTCStatsReportVerifier { explicit RTCStatsReportVerifier(const RTCStatsReport* report) : report_(report) {} - void VerifyReport(std::vector allowed_missing_stats, - bool enable_simulcast_stats) { + void VerifyReport(std::vector allowed_missing_stats) { std::set missing_stats = StatsTypes(); bool verify_successful = true; std::vector transport_stats = @@ -396,10 +395,13 @@ class RTCStatsReportVerifier { stats.cast_to()); } else if (stats.type() == RTCOutboundRTPStreamStats::kType) { verify_successful &= VerifyRTCOutboundRTPStreamStats( - stats.cast_to(), enable_simulcast_stats); + stats.cast_to()); } else if (stats.type() == RTCRemoteInboundRtpStreamStats::kType) { verify_successful &= VerifyRTCRemoteInboundRtpStreamStats( stats.cast_to()); + } else if (stats.type() == RTCRemoteOutboundRtpStreamStats::kType) { + verify_successful &= VerifyRTCRemoteOutboundRTPStreamStats( + stats.cast_to()); } else if (stats.type() == RTCAudioSourceStats::kType) { // RTCAudioSourceStats::kType and RTCVideoSourceStats::kType both have // the value "media-source", but they are distinguishable with pointer @@ -443,6 +445,8 @@ class RTCStatsReportVerifier { bool VerifyRTCCodecStats(const RTCCodecStats& codec) { RTCStatsVerifier verifier(report_, &codec); + verifier.TestMemberIsIDReference(codec.transport_id, + RTCTransportStats::kType); verifier.TestMemberIsDefined(codec.payload_type); verifier.TestMemberIsDefined(codec.mime_type); verifier.TestMemberIsPositive(codec.clock_rate); @@ -461,7 +465,7 @@ class RTCStatsReportVerifier { RTCStatsVerifier verifier(report_, &data_channel); verifier.TestMemberIsDefined(data_channel.label); verifier.TestMemberIsDefined(data_channel.protocol); - verifier.TestMemberIsDefined(data_channel.datachannelid); + verifier.TestMemberIsDefined(data_channel.data_channel_identifier); verifier.TestMemberIsDefined(data_channel.state); verifier.TestMemberIsNonNegative(data_channel.messages_sent); verifier.TestMemberIsNonNegative(data_channel.bytes_sent); @@ -527,12 +531,12 @@ class RTCStatsReportVerifier { verifier.TestMemberIsDefined(candidate.network_type); } verifier.TestMemberIsDefined(candidate.ip); + verifier.TestMemberIsDefined(candidate.address); verifier.TestMemberIsNonNegative(candidate.port); verifier.TestMemberIsDefined(candidate.protocol); verifier.TestMemberIsDefined(candidate.candidate_type); verifier.TestMemberIsNonNegative(candidate.priority); verifier.TestMemberIsUndefined(candidate.url); - verifier.TestMemberIsDefined(candidate.deleted); verifier.TestMemberIsUndefined(candidate.relay_protocol); return verifier.ExpectAllMembersSuccessfullyTested(); } @@ -767,32 +771,38 @@ class RTCStatsReportVerifier { } void VerifyRTCRTPStreamStats(const RTCRTPStreamStats& stream, - RTCStatsVerifier* verifier) { - verifier->TestMemberIsDefined(stream.ssrc); - verifier->TestMemberIsDefined(stream.is_remote); - verifier->TestMemberIsDefined(stream.media_type); - verifier->TestMemberIsDefined(stream.kind); - verifier->TestMemberIsIDReference(stream.track_id, - RTCMediaStreamTrackStats::kType); - verifier->TestMemberIsIDReference(stream.transport_id, - RTCTransportStats::kType); - verifier->TestMemberIsIDReference(stream.codec_id, RTCCodecStats::kType); - if (stream.media_type.is_defined() && *stream.media_type == "video") { - verifier->TestMemberIsNonNegative(stream.fir_count); - verifier->TestMemberIsNonNegative(stream.pli_count); - verifier->TestMemberIsNonNegative(stream.nack_count); + RTCStatsVerifier& verifier) { + verifier.TestMemberIsDefined(stream.ssrc); + verifier.TestMemberIsDefined(stream.kind); + // Some legacy metrics are only defined for some of the RTP types in the + // hierarcy. + if (stream.type() == RTCInboundRTPStreamStats::kType || + stream.type() == RTCOutboundRTPStreamStats::kType) { + verifier.TestMemberIsDefined(stream.media_type); + verifier.TestMemberIsIDReference(stream.track_id, + RTCMediaStreamTrackStats::kType); } else { - verifier->TestMemberIsUndefined(stream.fir_count); - verifier->TestMemberIsUndefined(stream.pli_count); - verifier->TestMemberIsUndefined(stream.nack_count); + verifier.TestMemberIsUndefined(stream.media_type); + verifier.TestMemberIsUndefined(stream.track_id); } - verifier->TestMemberIsUndefined(stream.sli_count); + verifier.TestMemberIsIDReference(stream.transport_id, + RTCTransportStats::kType); + verifier.TestMemberIsIDReference(stream.codec_id, RTCCodecStats::kType); + } + + void VerifyRTCSentRTPStreamStats(const RTCSentRtpStreamStats& sent_stream, + RTCStatsVerifier& verifier) { + VerifyRTCRTPStreamStats(sent_stream, verifier); + verifier.TestMemberIsDefined(sent_stream.packets_sent); + verifier.TestMemberIsDefined(sent_stream.bytes_sent); } bool VerifyRTCInboundRTPStreamStats( const RTCInboundRTPStreamStats& inbound_stream) { RTCStatsVerifier verifier(report_, &inbound_stream); - VerifyRTCRTPStreamStats(inbound_stream, &verifier); + VerifyRTCReceivedRtpStreamStats(inbound_stream, verifier); + verifier.TestMemberIsOptionalIDReference( + inbound_stream.remote_id, RTCRemoteOutboundRtpStreamStats::kType); if (inbound_stream.media_type.is_defined() && *inbound_stream.media_type == "video") { verifier.TestMemberIsNonNegative(inbound_stream.qp_sum); @@ -815,17 +825,64 @@ class RTCStatsReportVerifier { verifier.TestMemberIsNonNegative(inbound_stream.bytes_received); verifier.TestMemberIsNonNegative( inbound_stream.header_bytes_received); - // packets_lost is defined as signed, but this should never happen in - // this test. See RFC 3550. - verifier.TestMemberIsNonNegative(inbound_stream.packets_lost); verifier.TestMemberIsDefined(inbound_stream.last_packet_received_timestamp); + if (inbound_stream.frames_received.ValueOrDefault(0) > 0) { + verifier.TestMemberIsNonNegative(inbound_stream.frame_width); + verifier.TestMemberIsNonNegative(inbound_stream.frame_height); + } else { + verifier.TestMemberIsUndefined(inbound_stream.frame_width); + verifier.TestMemberIsUndefined(inbound_stream.frame_height); + } + if (inbound_stream.frames_per_second.is_defined()) { + verifier.TestMemberIsNonNegative( + inbound_stream.frames_per_second); + } else { + verifier.TestMemberIsUndefined(inbound_stream.frames_per_second); + } + verifier.TestMemberIsUndefined(inbound_stream.frame_bit_depth); + verifier.TestMemberIsNonNegative( + inbound_stream.jitter_buffer_delay); + verifier.TestMemberIsNonNegative( + inbound_stream.jitter_buffer_emitted_count); if (inbound_stream.media_type.is_defined() && *inbound_stream.media_type == "video") { - verifier.TestMemberIsUndefined(inbound_stream.jitter); + verifier.TestMemberIsUndefined(inbound_stream.total_samples_received); + verifier.TestMemberIsUndefined(inbound_stream.concealed_samples); + verifier.TestMemberIsUndefined(inbound_stream.silent_concealed_samples); + verifier.TestMemberIsUndefined(inbound_stream.concealment_events); + verifier.TestMemberIsUndefined( + inbound_stream.inserted_samples_for_deceleration); + verifier.TestMemberIsUndefined( + inbound_stream.removed_samples_for_acceleration); + verifier.TestMemberIsUndefined(inbound_stream.audio_level); + verifier.TestMemberIsUndefined(inbound_stream.total_audio_energy); + verifier.TestMemberIsUndefined(inbound_stream.total_samples_duration); + verifier.TestMemberIsNonNegative(inbound_stream.frames_received); + verifier.TestMemberIsNonNegative(inbound_stream.fir_count); + verifier.TestMemberIsNonNegative(inbound_stream.pli_count); + verifier.TestMemberIsNonNegative(inbound_stream.nack_count); } else { - verifier.TestMemberIsNonNegative(inbound_stream.jitter); + verifier.TestMemberIsUndefined(inbound_stream.fir_count); + verifier.TestMemberIsUndefined(inbound_stream.pli_count); + verifier.TestMemberIsUndefined(inbound_stream.nack_count); + verifier.TestMemberIsPositive( + inbound_stream.total_samples_received); + verifier.TestMemberIsNonNegative( + inbound_stream.concealed_samples); + verifier.TestMemberIsNonNegative( + inbound_stream.silent_concealed_samples); + verifier.TestMemberIsNonNegative( + inbound_stream.concealment_events); + verifier.TestMemberIsNonNegative( + inbound_stream.inserted_samples_for_deceleration); + verifier.TestMemberIsNonNegative( + inbound_stream.removed_samples_for_acceleration); + verifier.TestMemberIsPositive(inbound_stream.audio_level); + verifier.TestMemberIsPositive(inbound_stream.total_audio_energy); + verifier.TestMemberIsPositive( + inbound_stream.total_samples_duration); + verifier.TestMemberIsUndefined(inbound_stream.frames_received); } - verifier.TestMemberIsUndefined(inbound_stream.round_trip_time); verifier.TestMemberIsUndefined(inbound_stream.packets_discarded); verifier.TestMemberIsUndefined(inbound_stream.packets_repaired); @@ -844,6 +901,7 @@ class RTCStatsReportVerifier { *inbound_stream.media_type == "video") { verifier.TestMemberIsDefined(inbound_stream.frames_decoded); verifier.TestMemberIsDefined(inbound_stream.key_frames_decoded); + verifier.TestMemberIsNonNegative(inbound_stream.frames_dropped); verifier.TestMemberIsNonNegative( inbound_stream.total_decode_time); verifier.TestMemberIsNonNegative( @@ -856,6 +914,7 @@ class RTCStatsReportVerifier { } else { verifier.TestMemberIsUndefined(inbound_stream.frames_decoded); verifier.TestMemberIsUndefined(inbound_stream.key_frames_decoded); + verifier.TestMemberIsUndefined(inbound_stream.frames_dropped); verifier.TestMemberIsUndefined(inbound_stream.total_decode_time); verifier.TestMemberIsUndefined(inbound_stream.total_inter_frame_delay); verifier.TestMemberIsUndefined( @@ -866,24 +925,28 @@ class RTCStatsReportVerifier { } bool VerifyRTCOutboundRTPStreamStats( - const RTCOutboundRTPStreamStats& outbound_stream, - bool enable_simulcast_stats) { + const RTCOutboundRTPStreamStats& outbound_stream) { RTCStatsVerifier verifier(report_, &outbound_stream); - VerifyRTCRTPStreamStats(outbound_stream, &verifier); + VerifyRTCRTPStreamStats(outbound_stream, verifier); if (outbound_stream.media_type.is_defined() && *outbound_stream.media_type == "video") { verifier.TestMemberIsIDReference(outbound_stream.media_source_id, RTCVideoSourceStats::kType); + verifier.TestMemberIsNonNegative(outbound_stream.fir_count); + verifier.TestMemberIsNonNegative(outbound_stream.pli_count); if (*outbound_stream.frames_encoded > 0) { verifier.TestMemberIsNonNegative(outbound_stream.qp_sum); } else { verifier.TestMemberIsUndefined(outbound_stream.qp_sum); } } else { + verifier.TestMemberIsUndefined(outbound_stream.fir_count); + verifier.TestMemberIsUndefined(outbound_stream.pli_count); verifier.TestMemberIsIDReference(outbound_stream.media_source_id, RTCAudioSourceStats::kType); verifier.TestMemberIsUndefined(outbound_stream.qp_sum); } + verifier.TestMemberIsNonNegative(outbound_stream.nack_count); verifier.TestMemberIsOptionalIDReference( outbound_stream.remote_id, RTCRemoteInboundRtpStreamStats::kType); verifier.TestMemberIsNonNegative(outbound_stream.packets_sent); @@ -906,30 +969,31 @@ class RTCStatsReportVerifier { verifier.TestMemberIsNonNegative( outbound_stream.total_packet_send_delay); verifier.TestMemberIsDefined(outbound_stream.quality_limitation_reason); + verifier.TestMemberIsDefined( + outbound_stream.quality_limitation_durations); verifier.TestMemberIsNonNegative( outbound_stream.quality_limitation_resolution_changes); // The integration test is not set up to test screen share; don't require // this to be present. verifier.MarkMemberTested(outbound_stream.content_type, true); verifier.TestMemberIsDefined(outbound_stream.encoder_implementation); - if (enable_simulcast_stats) { + // Unless an implementation-specific amount of time has passed and at + // least one frame has been encoded, undefined is reported. Because it + // is hard to tell what is the case here, we treat FPS as optional. + // TODO(hbos): Update the tests to run until all implemented metrics + // should be populated. + if (outbound_stream.frames_per_second.is_defined()) { verifier.TestMemberIsNonNegative( outbound_stream.frames_per_second); - verifier.TestMemberIsNonNegative( - outbound_stream.frame_height); - verifier.TestMemberIsNonNegative(outbound_stream.frame_width); - verifier.TestMemberIsNonNegative(outbound_stream.frames_sent); - verifier.TestMemberIsNonNegative( - outbound_stream.huge_frames_sent); - verifier.MarkMemberTested(outbound_stream.rid, true); } else { verifier.TestMemberIsUndefined(outbound_stream.frames_per_second); - verifier.TestMemberIsUndefined(outbound_stream.frame_height); - verifier.TestMemberIsUndefined(outbound_stream.frame_width); - verifier.TestMemberIsUndefined(outbound_stream.frames_sent); - verifier.TestMemberIsUndefined(outbound_stream.huge_frames_sent); - verifier.TestMemberIsUndefined(outbound_stream.rid); } + verifier.TestMemberIsNonNegative(outbound_stream.frame_height); + verifier.TestMemberIsNonNegative(outbound_stream.frame_width); + verifier.TestMemberIsNonNegative(outbound_stream.frames_sent); + verifier.TestMemberIsNonNegative( + outbound_stream.huge_frames_sent); + verifier.MarkMemberTested(outbound_stream.rid, true); } else { verifier.TestMemberIsUndefined(outbound_stream.frames_encoded); verifier.TestMemberIsUndefined(outbound_stream.key_frames_encoded); @@ -939,6 +1003,8 @@ class RTCStatsReportVerifier { // TODO(https://crbug.com/webrtc/10635): Implement for audio as well. verifier.TestMemberIsUndefined(outbound_stream.total_packet_send_delay); verifier.TestMemberIsUndefined(outbound_stream.quality_limitation_reason); + verifier.TestMemberIsUndefined( + outbound_stream.quality_limitation_durations); verifier.TestMemberIsUndefined( outbound_stream.quality_limitation_resolution_changes); verifier.TestMemberIsUndefined(outbound_stream.content_type); @@ -954,23 +1020,40 @@ class RTCStatsReportVerifier { return verifier.ExpectAllMembersSuccessfullyTested(); } + void VerifyRTCReceivedRtpStreamStats( + const RTCReceivedRtpStreamStats& received_rtp, + RTCStatsVerifier& verifier) { + VerifyRTCRTPStreamStats(received_rtp, verifier); + verifier.TestMemberIsNonNegative(received_rtp.jitter); + verifier.TestMemberIsDefined(received_rtp.packets_lost); + } + bool VerifyRTCRemoteInboundRtpStreamStats( const RTCRemoteInboundRtpStreamStats& remote_inbound_stream) { RTCStatsVerifier verifier(report_, &remote_inbound_stream); - verifier.TestMemberIsDefined(remote_inbound_stream.ssrc); - verifier.TestMemberIsDefined(remote_inbound_stream.kind); - verifier.TestMemberIsIDReference(remote_inbound_stream.transport_id, - RTCTransportStats::kType); - verifier.TestMemberIsIDReference(remote_inbound_stream.codec_id, - RTCCodecStats::kType); - verifier.TestMemberIsDefined(remote_inbound_stream.packets_lost); - // Note that the existance of RTCCodecStats is needed for |codec_id| and - // |jitter| to be present. - verifier.TestMemberIsNonNegative(remote_inbound_stream.jitter); + VerifyRTCReceivedRtpStreamStats(remote_inbound_stream, verifier); + verifier.TestMemberIsDefined(remote_inbound_stream.fraction_lost); verifier.TestMemberIsIDReference(remote_inbound_stream.local_id, RTCOutboundRTPStreamStats::kType); verifier.TestMemberIsNonNegative( remote_inbound_stream.round_trip_time); + verifier.TestMemberIsNonNegative( + remote_inbound_stream.total_round_trip_time); + verifier.TestMemberIsNonNegative( + remote_inbound_stream.round_trip_time_measurements); + return verifier.ExpectAllMembersSuccessfullyTested(); + } + + bool VerifyRTCRemoteOutboundRTPStreamStats( + const RTCRemoteOutboundRtpStreamStats& remote_outbound_stream) { + RTCStatsVerifier verifier(report_, &remote_outbound_stream); + VerifyRTCRTPStreamStats(remote_outbound_stream, verifier); + VerifyRTCSentRTPStreamStats(remote_outbound_stream, verifier); + verifier.TestMemberIsIDReference(remote_outbound_stream.local_id, + RTCOutboundRTPStreamStats::kType); + verifier.TestMemberIsNonNegative( + remote_outbound_stream.remote_timestamp); + verifier.TestMemberIsDefined(remote_outbound_stream.reports_sent); return verifier.ExpectAllMembersSuccessfullyTested(); } @@ -995,6 +1078,12 @@ class RTCStatsReportVerifier { verifier.TestMemberIsNonNegative(audio_source.audio_level); verifier.TestMemberIsPositive(audio_source.total_audio_energy); verifier.TestMemberIsPositive(audio_source.total_samples_duration); + // TODO(hbos): |echo_return_loss| and |echo_return_loss_enhancement| are + // flaky on msan bot (sometimes defined, sometimes undefined). Should the + // test run until available or is there a way to have it always be + // defined? crbug.com/627816 + verifier.MarkMemberTested(audio_source.echo_return_loss, true); + verifier.MarkMemberTested(audio_source.echo_return_loss_enhancement, true); return verifier.ExpectAllMembersSuccessfullyTested(); } @@ -1007,9 +1096,7 @@ class RTCStatsReportVerifier { // reflect real code. verifier.TestMemberIsUndefined(video_source.width); verifier.TestMemberIsUndefined(video_source.height); - // TODO(hbos): When |frames| is implemented test that this member should be - // expected to be non-negative. - verifier.TestMemberIsUndefined(video_source.frames); + verifier.TestMemberIsNonNegative(video_source.frames); verifier.TestMemberIsNonNegative(video_source.frames_per_second); return verifier.ExpectAllMembersSuccessfullyTested(); } @@ -1017,7 +1104,9 @@ class RTCStatsReportVerifier { bool VerifyRTCTransportStats(const RTCTransportStats& transport) { RTCStatsVerifier verifier(report_, &transport); verifier.TestMemberIsNonNegative(transport.bytes_sent); + verifier.TestMemberIsNonNegative(transport.packets_sent); verifier.TestMemberIsNonNegative(transport.bytes_received); + verifier.TestMemberIsNonNegative(transport.packets_received); verifier.TestMemberIsOptionalIDReference(transport.rtcp_transport_stats_id, RTCTransportStats::kType); verifier.TestMemberIsDefined(transport.dtls_state); @@ -1039,14 +1128,12 @@ class RTCStatsReportVerifier { rtc::scoped_refptr report_; }; -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP TEST_F(RTCStatsIntegrationTest, GetStatsFromCaller) { StartCall(); rtc::scoped_refptr report = GetStatsFromCaller(); - RTCStatsReportVerifier(report.get()) - .VerifyReport({}, - caller_->pc()->GetConfiguration().enable_simulcast_stats); + RTCStatsReportVerifier(report.get()).VerifyReport({}); #if RTC_TRACE_EVENTS_ENABLED EXPECT_EQ(report->ToJson(), RTCStatsReportTraceListener::last_trace()); @@ -1057,9 +1144,7 @@ TEST_F(RTCStatsIntegrationTest, GetStatsFromCallee) { StartCall(); rtc::scoped_refptr report = GetStatsFromCallee(); - RTCStatsReportVerifier(report.get()) - .VerifyReport({}, - caller_->pc()->GetConfiguration().enable_simulcast_stats); + RTCStatsReportVerifier(report.get()).VerifyReport({}); #if RTC_TRACE_EVENTS_ENABLED EXPECT_EQ(report->ToJson(), RTCStatsReportTraceListener::last_trace()); @@ -1083,9 +1168,7 @@ TEST_F(RTCStatsIntegrationTest, GetStatsWithSenderSelector) { RTCMediaStreamStats::kType, RTCDataChannelStats::kType, }; - RTCStatsReportVerifier(report.get()) - .VerifyReport(allowed_missing_stats, - caller_->pc()->GetConfiguration().enable_simulcast_stats); + RTCStatsReportVerifier(report.get()).VerifyReport(allowed_missing_stats); EXPECT_TRUE(report->size()); } @@ -1104,9 +1187,7 @@ TEST_F(RTCStatsIntegrationTest, GetStatsWithReceiverSelector) { RTCMediaStreamStats::kType, RTCDataChannelStats::kType, }; - RTCStatsReportVerifier(report.get()) - .VerifyReport(allowed_missing_stats, - caller_->pc()->GetConfiguration().enable_simulcast_stats); + RTCStatsReportVerifier(report.get()).VerifyReport(allowed_missing_stats); EXPECT_TRUE(report->size()); } @@ -1210,7 +1291,21 @@ TEST_F(RTCStatsIntegrationTest, GetStatsReferencedIds) { } } } -#endif // HAVE_SCTP + +TEST_F(RTCStatsIntegrationTest, GetStatsContainsNoDuplicateMembers) { + StartCall(); + + rtc::scoped_refptr report = GetStatsFromCallee(); + for (const RTCStats& stats : *report) { + std::set member_names; + for (const auto* member : stats.Members()) { + EXPECT_TRUE(member_names.find(member->name()) == member_names.end()) + << member->name() << " is a duplicate!"; + member_names.insert(member->name()); + } + } +} +#endif // WEBRTC_HAVE_SCTP } // namespace diff --git a/pc/rtc_stats_traversal.cc b/pc/rtc_stats_traversal.cc index c08643eba8..e579072ea5 100644 --- a/pc/rtc_stats_traversal.cc +++ b/pc/rtc_stats_traversal.cc @@ -76,7 +76,8 @@ std::vector GetStatsReferencedIds(const RTCStats& stats) { const auto& certificate = static_cast(stats); AddIdIfDefined(certificate.issuer_certificate_id, &neighbor_ids); } else if (type == RTCCodecStats::kType) { - // RTCCodecStats does not have any neighbor references. + const auto& codec = static_cast(stats); + AddIdIfDefined(codec.transport_id, &neighbor_ids); } else if (type == RTCDataChannelStats::kType) { // RTCDataChannelStats does not have any neighbor references. } else if (type == RTCIceCandidatePairStats::kType) { @@ -98,24 +99,36 @@ std::vector GetStatsReferencedIds(const RTCStats& stats) { AddIdIfDefined(track.media_source_id, &neighbor_ids); } else if (type == RTCPeerConnectionStats::kType) { // RTCPeerConnectionStats does not have any neighbor references. - } else if (type == RTCInboundRTPStreamStats::kType || - type == RTCOutboundRTPStreamStats::kType) { - const auto& rtp = static_cast(stats); - AddIdIfDefined(rtp.track_id, &neighbor_ids); - AddIdIfDefined(rtp.transport_id, &neighbor_ids); - AddIdIfDefined(rtp.codec_id, &neighbor_ids); - if (type == RTCOutboundRTPStreamStats::kType) { - const auto& outbound_rtp = - static_cast(stats); - AddIdIfDefined(outbound_rtp.media_source_id, &neighbor_ids); - AddIdIfDefined(outbound_rtp.remote_id, &neighbor_ids); - } + } else if (type == RTCInboundRTPStreamStats::kType) { + const auto& inbound_rtp = + static_cast(stats); + AddIdIfDefined(inbound_rtp.remote_id, &neighbor_ids); + AddIdIfDefined(inbound_rtp.track_id, &neighbor_ids); + AddIdIfDefined(inbound_rtp.transport_id, &neighbor_ids); + AddIdIfDefined(inbound_rtp.codec_id, &neighbor_ids); + } else if (type == RTCOutboundRTPStreamStats::kType) { + const auto& outbound_rtp = + static_cast(stats); + AddIdIfDefined(outbound_rtp.remote_id, &neighbor_ids); + AddIdIfDefined(outbound_rtp.track_id, &neighbor_ids); + AddIdIfDefined(outbound_rtp.transport_id, &neighbor_ids); + AddIdIfDefined(outbound_rtp.codec_id, &neighbor_ids); + AddIdIfDefined(outbound_rtp.media_source_id, &neighbor_ids); } else if (type == RTCRemoteInboundRtpStreamStats::kType) { const auto& remote_inbound_rtp = static_cast(stats); AddIdIfDefined(remote_inbound_rtp.transport_id, &neighbor_ids); AddIdIfDefined(remote_inbound_rtp.codec_id, &neighbor_ids); AddIdIfDefined(remote_inbound_rtp.local_id, &neighbor_ids); + } else if (type == RTCRemoteOutboundRtpStreamStats::kType) { + const auto& remote_outbound_rtp = + static_cast(stats); + // Inherited from `RTCRTPStreamStats`. + AddIdIfDefined(remote_outbound_rtp.track_id, &neighbor_ids); + AddIdIfDefined(remote_outbound_rtp.transport_id, &neighbor_ids); + AddIdIfDefined(remote_outbound_rtp.codec_id, &neighbor_ids); + // Direct members of `RTCRemoteOutboundRtpStreamStats`. + AddIdIfDefined(remote_outbound_rtp.local_id, &neighbor_ids); } else if (type == RTCAudioSourceStats::kType || type == RTCVideoSourceStats::kType) { // RTC[Audio/Video]SourceStats does not have any neighbor references. diff --git a/pc/rtp_media_utils.cc b/pc/rtp_media_utils.cc index 8fbfca1f98..c5d642b685 100644 --- a/pc/rtp_media_utils.cc +++ b/pc/rtp_media_utils.cc @@ -42,6 +42,7 @@ RtpTransceiverDirection RtpTransceiverDirectionReversed( switch (direction) { case RtpTransceiverDirection::kSendRecv: case RtpTransceiverDirection::kInactive: + case RtpTransceiverDirection::kStopped: return direction; case RtpTransceiverDirection::kSendOnly: return RtpTransceiverDirection::kRecvOnly; diff --git a/pc/rtp_media_utils.h b/pc/rtp_media_utils.h index f556fe3977..d45cc744a1 100644 --- a/pc/rtp_media_utils.h +++ b/pc/rtp_media_utils.h @@ -11,6 +11,7 @@ #ifndef PC_RTP_MEDIA_UTILS_H_ #define PC_RTP_MEDIA_UTILS_H_ +#include "api/rtp_transceiver_direction.h" #include "api/rtp_transceiver_interface.h" namespace webrtc { @@ -49,13 +50,13 @@ RtpTransceiverDirection RtpTransceiverDirectionIntersection( RtpTransceiverDirection lhs, RtpTransceiverDirection rhs); -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& os, // no-presubmit-check TODO(webrtc:8982) RtpTransceiverDirection direction) { return os << RtpTransceiverDirectionToString(direction); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST } // namespace webrtc diff --git a/pc/rtp_parameters_conversion.cc b/pc/rtp_parameters_conversion.cc index 9c7a337ab4..8d3064ed93 100644 --- a/pc/rtp_parameters_conversion.cc +++ b/pc/rtp_parameters_conversion.cc @@ -10,10 +10,10 @@ #include "pc/rtp_parameters_conversion.h" +#include #include #include #include -#include #include #include "api/array_view.h" @@ -76,8 +76,7 @@ RTCErrorOr ToCricketFeedbackParam( } return cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc); } - // Not reached; avoids compile warning. - FATAL(); + RTC_CHECK_NOTREACHED(); } template diff --git a/pc/rtp_receiver.cc b/pc/rtp_receiver.cc index f65afd7dc4..2444c9b60d 100644 --- a/pc/rtp_receiver.cc +++ b/pc/rtp_receiver.cc @@ -15,13 +15,9 @@ #include #include -#include "api/media_stream_proxy.h" -#include "api/media_stream_track_proxy.h" #include "pc/media_stream.h" -#include "rtc_base/checks.h" +#include "pc/media_stream_proxy.h" #include "rtc_base/location.h" -#include "rtc_base/logging.h" -#include "rtc_base/trace_event.h" namespace webrtc { @@ -43,20 +39,4 @@ RtpReceiverInternal::CreateStreamsFromIds(std::vector stream_ids) { return streams; } -// Attempt to attach the frame decryptor to the current media channel on the -// correct worker thread only if both the media channel exists and a ssrc has -// been allocated to the stream. -void RtpReceiverInternal::MaybeAttachFrameDecryptorToMediaChannel( - const absl::optional& ssrc, - rtc::Thread* worker_thread, - rtc::scoped_refptr frame_decryptor, - cricket::MediaChannel* media_channel, - bool stopped) { - if (media_channel && frame_decryptor && ssrc.has_value() && !stopped) { - worker_thread->Invoke(RTC_FROM_HERE, [&] { - media_channel->SetFrameDecryptor(*ssrc, frame_decryptor); - }); - } -} - } // namespace webrtc diff --git a/pc/rtp_receiver.h b/pc/rtp_receiver.h index 84c2ff723b..73fc5b9858 100644 --- a/pc/rtp_receiver.h +++ b/pc/rtp_receiver.h @@ -22,6 +22,7 @@ #include "absl/types/optional.h" #include "api/crypto/frame_decryptor_interface.h" +#include "api/dtls_transport_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" #include "api/rtp_parameters.h" @@ -41,7 +42,11 @@ namespace webrtc { // Internal class used by PeerConnection. class RtpReceiverInternal : public RtpReceiverInterface { public: + // Stops receiving. The track may be reactivated. virtual void Stop() = 0; + // Stops the receiver permanently. + // Causes the associated track to enter kEnded state. Cannot be reversed. + virtual void StopAndEndTrack() = 0; // Sets the underlying MediaEngine channel associated with this RtpSender. // A VoiceMediaChannel should be used for audio RtpSenders and @@ -87,13 +92,6 @@ class RtpReceiverInternal : public RtpReceiverInterface { static std::vector> CreateStreamsFromIds(std::vector stream_ids); - - static void MaybeAttachFrameDecryptorToMediaChannel( - const absl::optional& ssrc, - rtc::Thread* worker_thread, - rtc::scoped_refptr frame_decryptor, - cricket::MediaChannel* media_channel, - bool stopped); }; } // namespace webrtc diff --git a/pc/rtp_receiver_proxy.h b/pc/rtp_receiver_proxy.h new file mode 100644 index 0000000000..d4114e0f0b --- /dev/null +++ b/pc/rtp_receiver_proxy.h @@ -0,0 +1,54 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_RTP_RECEIVER_PROXY_H_ +#define PC_RTP_RECEIVER_PROXY_H_ + +#include +#include + +#include "api/rtp_receiver_interface.h" +#include "pc/proxy.h" + +namespace webrtc { + +// Define proxy for RtpReceiverInterface. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PROXY_MAP(RtpReceiver) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +BYPASS_PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) +PROXY_CONSTMETHOD0(std::vector, stream_ids) +PROXY_CONSTMETHOD0(std::vector>, + streams) +BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) +BYPASS_PROXY_CONSTMETHOD0(std::string, id) +PROXY_SECONDARY_CONSTMETHOD0(RtpParameters, GetParameters) +PROXY_METHOD1(void, SetObserver, RtpReceiverObserverInterface*) +PROXY_SECONDARY_METHOD1(void, + SetJitterBufferMinimumDelay, + absl::optional) +PROXY_SECONDARY_CONSTMETHOD0(std::vector, GetSources) +// TODO(bugs.webrtc.org/12772): Remove. +PROXY_SECONDARY_METHOD1(void, + SetFrameDecryptor, + rtc::scoped_refptr) +// TODO(bugs.webrtc.org/12772): Remove. +PROXY_SECONDARY_CONSTMETHOD0(rtc::scoped_refptr, + GetFrameDecryptor) +PROXY_SECONDARY_METHOD1(void, + SetDepacketizerToDecoderFrameTransformer, + rtc::scoped_refptr) +END_PROXY_MAP(RtpReceiver) + +} // namespace webrtc + +#endif // PC_RTP_RECEIVER_PROXY_H_ diff --git a/pc/rtp_sender.cc b/pc/rtp_sender.cc index c56f4a94d9..aa268cef45 100644 --- a/pc/rtp_sender.cc +++ b/pc/rtp_sender.cc @@ -10,19 +10,22 @@ #include "pc/rtp_sender.h" +#include #include #include #include +#include "absl/algorithm/container.h" #include "api/audio_options.h" #include "api/media_stream_interface.h" +#include "api/priority.h" #include "media/base/media_engine.h" -#include "pc/peer_connection.h" -#include "pc/stats_collector.h" +#include "pc/stats_collector_interface.h" #include "rtc_base/checks.h" #include "rtc_base/helpers.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" #include "rtc_base/trace_event.h" namespace webrtc { @@ -184,6 +187,15 @@ RTCError RtpSenderBase::SetParametersInternal(const RtpParameters& parameters) { RTCError RtpSenderBase::SetParameters(const RtpParameters& parameters) { TRACE_EVENT0("webrtc", "RtpSenderBase::SetParameters"); + if (is_transceiver_stopped_) { + LOG_AND_RETURN_ERROR( + RTCErrorType::INVALID_STATE, + "Cannot set parameters on sender of a stopped transceiver."); + } + if (stopped_) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, + "Cannot set parameters on a stopped sender."); + } if (stopped_) { LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, "Cannot set parameters on a stopped sender."); @@ -381,7 +393,7 @@ void RtpSenderBase::SetEncoderToPacketizerFrameTransformer( LocalAudioSinkAdapter::LocalAudioSinkAdapter() : sink_(nullptr) {} LocalAudioSinkAdapter::~LocalAudioSinkAdapter() { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (sink_) sink_->OnClose(); } @@ -393,15 +405,16 @@ void LocalAudioSinkAdapter::OnData( size_t number_of_channels, size_t number_of_frames, absl::optional absolute_capture_timestamp_ms) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (sink_) { sink_->OnData(audio_data, bits_per_sample, sample_rate, number_of_channels, number_of_frames, absolute_capture_timestamp_ms); + num_preferred_channels_ = sink_->NumPreferredChannels(); } } void LocalAudioSinkAdapter::SetSink(cricket::AudioSource::Sink* sink) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); RTC_DCHECK(!sink || !sink_); sink_ = sink; } @@ -409,16 +422,15 @@ void LocalAudioSinkAdapter::SetSink(cricket::AudioSource::Sink* sink) { rtc::scoped_refptr AudioRtpSender::Create( rtc::Thread* worker_thread, const std::string& id, - StatsCollector* stats, + StatsCollectorInterface* stats, SetStreamsObserver* set_streams_observer) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(worker_thread, id, stats, - set_streams_observer)); + return rtc::make_ref_counted(worker_thread, id, stats, + set_streams_observer); } AudioRtpSender::AudioRtpSender(rtc::Thread* worker_thread, const std::string& id, - StatsCollector* stats, + StatsCollectorInterface* stats, SetStreamsObserver* set_streams_observer) : RtpSenderBase(worker_thread, id, set_streams_observer), stats_(stats), @@ -558,9 +570,8 @@ rtc::scoped_refptr VideoRtpSender::Create( rtc::Thread* worker_thread, const std::string& id, SetStreamsObserver* set_streams_observer) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(worker_thread, id, - set_streams_observer)); + return rtc::make_ref_counted(worker_thread, id, + set_streams_observer); } VideoRtpSender::VideoRtpSender(rtc::Thread* worker_thread, diff --git a/pc/rtp_sender.h b/pc/rtp_sender.h index 1e0de22c5c..0b4c204902 100644 --- a/pc/rtp_sender.h +++ b/pc/rtp_sender.h @@ -15,20 +15,34 @@ #ifndef PC_RTP_SENDER_H_ #define PC_RTP_SENDER_H_ +#include +#include #include #include #include +#include "absl/types/optional.h" +#include "api/crypto/frame_encryptor_interface.h" +#include "api/dtls_transport_interface.h" +#include "api/dtmf_sender_interface.h" +#include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" +#include "api/media_types.h" +#include "api/rtc_error.h" +#include "api/rtp_parameters.h" #include "api/rtp_sender_interface.h" +#include "api/scoped_refptr.h" #include "media/base/audio_source.h" #include "media/base/media_channel.h" #include "pc/dtmf_sender.h" -#include "rtc_base/critical_section.h" +#include "pc/stats_collector_interface.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" namespace webrtc { -class StatsCollector; +class StatsCollectorInterface; bool UnimplementedRtpParameterHasValue(const RtpParameters& parameters); @@ -69,6 +83,8 @@ class RtpSenderInternal : public RtpSenderInterface { // If the specified list is empty, this is a no-op. virtual RTCError DisableEncodingLayers( const std::vector& rid) = 0; + + virtual void SetTransceiverAsStopped() = 0; }; // Shared implementation for RtpSenderInternal interface. @@ -152,6 +168,8 @@ class RtpSenderBase : public RtpSenderInternal, public ObserverInterface { void SetEncoderToPacketizerFrameTransformer( rtc::scoped_refptr frame_transformer) override; + void SetTransceiverAsStopped() override { is_transceiver_stopped_ = true; } + protected: // If |set_streams_observer| is not null, it is invoked when SetStreams() // is called. |set_streams_observer| is not owned by this object. If not @@ -180,6 +198,7 @@ class RtpSenderBase : public RtpSenderInternal, public ObserverInterface { rtc::Thread* worker_thread_; uint32_t ssrc_ = 0; bool stopped_ = false; + bool is_transceiver_stopped_ = false; int attachment_id_ = 0; const std::string id_; @@ -232,12 +251,16 @@ class LocalAudioSinkAdapter : public AudioTrackSinkInterface, /*absolute_capture_timestamp_ms=*/absl::nullopt); } + // AudioSinkInterface implementation. + int NumPreferredChannels() const override { return num_preferred_channels_; } + // cricket::AudioSource implementation. void SetSink(cricket::AudioSource::Sink* sink) override; cricket::AudioSource::Sink* sink_; // Critical section protecting |sink_|. - rtc::CriticalSection lock_; + Mutex lock_; + int num_preferred_channels_ = -1; }; class AudioRtpSender : public DtmfProviderInterface, public RtpSenderBase { @@ -252,7 +275,7 @@ class AudioRtpSender : public DtmfProviderInterface, public RtpSenderBase { static rtc::scoped_refptr Create( rtc::Thread* worker_thread, const std::string& id, - StatsCollector* stats, + StatsCollectorInterface* stats, SetStreamsObserver* set_streams_observer); virtual ~AudioRtpSender(); @@ -276,7 +299,7 @@ class AudioRtpSender : public DtmfProviderInterface, public RtpSenderBase { protected: AudioRtpSender(rtc::Thread* worker_thread, const std::string& id, - StatsCollector* stats, + StatsCollectorInterface* stats, SetStreamsObserver* set_streams_observer); void SetSend() override; @@ -298,7 +321,7 @@ class AudioRtpSender : public DtmfProviderInterface, public RtpSenderBase { } sigslot::signal0<> SignalDestroyed; - StatsCollector* stats_ = nullptr; + StatsCollectorInterface* stats_ = nullptr; rtc::scoped_refptr dtmf_sender_proxy_; bool cached_track_enabled_ = false; diff --git a/pc/rtp_sender_proxy.h b/pc/rtp_sender_proxy.h new file mode 100644 index 0000000000..2f8fe2c0bf --- /dev/null +++ b/pc/rtp_sender_proxy.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_RTP_SENDER_PROXY_H_ +#define PC_RTP_SENDER_PROXY_H_ + +#include +#include + +#include "api/rtp_sender_interface.h" +#include "pc/proxy.h" + +namespace webrtc { + +// Define proxy for RtpSenderInterface. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PRIMARY_PROXY_MAP(RtpSender) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +PROXY_METHOD1(bool, SetTrack, MediaStreamTrackInterface*) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, track) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, dtls_transport) +PROXY_CONSTMETHOD0(uint32_t, ssrc) +BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) +BYPASS_PROXY_CONSTMETHOD0(std::string, id) +PROXY_CONSTMETHOD0(std::vector, stream_ids) +PROXY_CONSTMETHOD0(std::vector, init_send_encodings) +PROXY_CONSTMETHOD0(RtpParameters, GetParameters) +PROXY_METHOD1(RTCError, SetParameters, const RtpParameters&) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, GetDtmfSender) +PROXY_METHOD1(void, + SetFrameEncryptor, + rtc::scoped_refptr) +PROXY_CONSTMETHOD0(rtc::scoped_refptr, + GetFrameEncryptor) +PROXY_METHOD1(void, SetStreams, const std::vector&) +PROXY_METHOD1(void, + SetEncoderToPacketizerFrameTransformer, + rtc::scoped_refptr) +END_PROXY_MAP(RtpSender) + +} // namespace webrtc + +#endif // PC_RTP_SENDER_PROXY_H_ diff --git a/pc/rtp_sender_receiver_unittest.cc b/pc/rtp_sender_receiver_unittest.cc index 98560f858b..10dc894518 100644 --- a/pc/rtp_sender_receiver_unittest.cc +++ b/pc/rtp_sender_receiver_unittest.cc @@ -37,7 +37,6 @@ #include "media/base/media_channel.h" #include "media/base/media_config.h" #include "media/base/media_engine.h" -#include "media/base/rtp_data_engine.h" #include "media/base/stream_params.h" #include "media/base/test_utils.h" #include "media/engine/fake_webrtc_call.h" @@ -64,6 +63,7 @@ #include "rtc_base/thread.h" #include "test/gmock.h" #include "test/gtest.h" +#include "test/run_loop.h" using ::testing::_; using ::testing::ContainerEq; @@ -108,29 +108,28 @@ class RtpSenderReceiverTest // Create fake media engine/etc. so we can create channels to use to // test RtpSenders/RtpReceivers. media_engine_(new cricket::FakeMediaEngine()), - channel_manager_(absl::WrapUnique(media_engine_), - std::make_unique(), - worker_thread_, - network_thread_), - fake_call_(), + fake_call_(worker_thread_, network_thread_), local_stream_(MediaStream::Create(kStreamId1)) { - // Create channels to be used by the RtpSenders and RtpReceivers. - channel_manager_.Init(); + worker_thread_->Invoke(RTC_FROM_HERE, [&]() { + channel_manager_ = cricket::ChannelManager::Create( + absl::WrapUnique(media_engine_), false, worker_thread_, + network_thread_); + }); + bool srtp_required = true; rtp_dtls_transport_ = std::make_unique( "fake_dtls_transport", cricket::ICE_CANDIDATE_COMPONENT_RTP); rtp_transport_ = CreateDtlsSrtpTransport(); - voice_channel_ = channel_manager_.CreateVoiceChannel( + voice_channel_ = channel_manager_->CreateVoiceChannel( &fake_call_, cricket::MediaConfig(), rtp_transport_.get(), - MediaTransportConfig(), rtc::Thread::Current(), cricket::CN_AUDIO, - srtp_required, webrtc::CryptoOptions(), &ssrc_generator_, - cricket::AudioOptions()); - video_channel_ = channel_manager_.CreateVideoChannel( + rtc::Thread::Current(), cricket::CN_AUDIO, srtp_required, + webrtc::CryptoOptions(), &ssrc_generator_, cricket::AudioOptions()); + video_channel_ = channel_manager_->CreateVideoChannel( &fake_call_, cricket::MediaConfig(), rtp_transport_.get(), - MediaTransportConfig(), rtc::Thread::Current(), cricket::CN_VIDEO, - srtp_required, webrtc::CryptoOptions(), &ssrc_generator_, - cricket::VideoOptions(), video_bitrate_allocator_factory_.get()); + rtc::Thread::Current(), cricket::CN_VIDEO, srtp_required, + webrtc::CryptoOptions(), &ssrc_generator_, cricket::VideoOptions(), + video_bitrate_allocator_factory_.get()); voice_channel_->Enable(true); video_channel_->Enable(true); voice_media_channel_ = media_engine_->GetVoiceChannel(0); @@ -162,6 +161,18 @@ class RtpSenderReceiverTest cricket::StreamParams::CreateLegacy(kVideoSsrc2)); } + ~RtpSenderReceiverTest() { + audio_rtp_sender_ = nullptr; + video_rtp_sender_ = nullptr; + audio_rtp_receiver_ = nullptr; + video_rtp_receiver_ = nullptr; + local_stream_ = nullptr; + video_track_ = nullptr; + audio_track_ = nullptr; + worker_thread_->Invoke(RTC_FROM_HERE, + [&]() { channel_manager_.reset(); }); + } + std::unique_ptr CreateDtlsSrtpTransport() { auto dtls_srtp_transport = std::make_unique( /*rtcp_mux_required=*/true); @@ -289,8 +300,9 @@ class RtpSenderReceiverTest void CreateAudioRtpReceiver( std::vector> streams = {}) { - audio_rtp_receiver_ = - new AudioRtpReceiver(rtc::Thread::Current(), kAudioTrackId, streams); + audio_rtp_receiver_ = rtc::make_ref_counted( + rtc::Thread::Current(), kAudioTrackId, streams, + /*is_unified_plan=*/true); audio_rtp_receiver_->SetMediaChannel(voice_media_channel_); audio_rtp_receiver_->SetupMediaChannel(kAudioSsrc); audio_track_ = audio_rtp_receiver_->audio_track(); @@ -299,8 +311,8 @@ class RtpSenderReceiverTest void CreateVideoRtpReceiver( std::vector> streams = {}) { - video_rtp_receiver_ = - new VideoRtpReceiver(rtc::Thread::Current(), kVideoTrackId, streams); + video_rtp_receiver_ = rtc::make_ref_counted( + rtc::Thread::Current(), kVideoTrackId, streams); video_rtp_receiver_->SetMediaChannel(video_media_channel_); video_rtp_receiver_->SetupMediaChannel(kVideoSsrc); video_track_ = video_rtp_receiver_->video_track(); @@ -319,19 +331,25 @@ class RtpSenderReceiverTest video_media_channel_->AddRecvStream(stream_params); uint32_t primary_ssrc = stream_params.first_ssrc(); - video_rtp_receiver_ = - new VideoRtpReceiver(rtc::Thread::Current(), kVideoTrackId, streams); + video_rtp_receiver_ = rtc::make_ref_counted( + rtc::Thread::Current(), kVideoTrackId, streams); video_rtp_receiver_->SetMediaChannel(video_media_channel_); video_rtp_receiver_->SetupMediaChannel(primary_ssrc); video_track_ = video_rtp_receiver_->video_track(); } void DestroyAudioRtpReceiver() { + if (!audio_rtp_receiver_) + return; + audio_rtp_receiver_->Stop(); audio_rtp_receiver_ = nullptr; VerifyVoiceChannelNoOutput(); } void DestroyVideoRtpReceiver() { + if (!video_rtp_receiver_) + return; + video_rtp_receiver_->Stop(); video_rtp_receiver_ = nullptr; VerifyVideoChannelNoOutput(); } @@ -487,6 +505,7 @@ class RtpSenderReceiverTest } protected: + test::RunLoop run_loop_; rtc::Thread* const network_thread_; rtc::Thread* const worker_thread_; webrtc::RtcEventLogNull event_log_; @@ -498,7 +517,7 @@ class RtpSenderReceiverTest video_bitrate_allocator_factory_; // |media_engine_| is actually owned by |channel_manager_|. cricket::FakeMediaEngine* media_engine_; - cricket::ChannelManager channel_manager_; + std::unique_ptr channel_manager_; cricket::FakeCall fake_call_; cricket::VoiceChannel* voice_channel_; cricket::VideoChannel* video_channel_; @@ -588,11 +607,15 @@ TEST_F(RtpSenderReceiverTest, RemoteAudioTrackDisable) { EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(1, volume); + // Handling of enable/disable is applied asynchronously. audio_track_->set_enabled(false); + run_loop_.Flush(); + EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(0, volume); audio_track_->set_enabled(true); + run_loop_.Flush(); EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(1, volume); @@ -625,6 +648,7 @@ TEST_F(RtpSenderReceiverTest, RemoteVideoTrackState) { EXPECT_EQ(webrtc::MediaStreamTrackInterface::kEnded, video_track_->state()); EXPECT_EQ(webrtc::MediaSourceInterface::kEnded, video_track_->GetSource()->state()); + DestroyVideoRtpReceiver(); } // Currently no action is taken when a remote video track is disabled or @@ -646,22 +670,27 @@ TEST_F(RtpSenderReceiverTest, RemoteAudioTrackSetVolume) { double volume; audio_track_->GetSource()->SetVolume(0.5); + run_loop_.Flush(); EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(0.5, volume); // Disable the audio track, this should prevent setting the volume. audio_track_->set_enabled(false); + RTC_DCHECK_EQ(worker_thread_, run_loop_.task_queue()); + run_loop_.Flush(); audio_track_->GetSource()->SetVolume(0.8); EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(0, volume); // When the track is enabled, the previously set volume should take effect. audio_track_->set_enabled(true); + run_loop_.Flush(); EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(0.8, volume); // Try changing volume one more time. audio_track_->GetSource()->SetVolume(0.9); + run_loop_.Flush(); EXPECT_TRUE(voice_media_channel_->GetOutputVolume(kAudioSsrc, &volume)); EXPECT_EQ(0.9, volume); @@ -672,12 +701,14 @@ TEST_F(RtpSenderReceiverTest, AudioRtpReceiverDelay) { CreateAudioRtpReceiver(); VerifyRtpReceiverDelayBehaviour(voice_media_channel_, audio_rtp_receiver_.get(), kAudioSsrc); + DestroyAudioRtpReceiver(); } TEST_F(RtpSenderReceiverTest, VideoRtpReceiverDelay) { CreateVideoRtpReceiver(); VerifyRtpReceiverDelayBehaviour(video_media_channel_, video_rtp_receiver_.get(), kVideoSsrc); + DestroyVideoRtpReceiver(); } // Test that the media channel isn't enabled for sending if the audio sender @@ -1571,6 +1602,7 @@ TEST_F(RtpSenderReceiverTest, AudioReceiverCanSetFrameDecryptor) { audio_rtp_receiver_->SetFrameDecryptor(fake_frame_decryptor); EXPECT_EQ(fake_frame_decryptor.get(), audio_rtp_receiver_->GetFrameDecryptor().get()); + DestroyAudioRtpReceiver(); } // Validate that the default FrameEncryptor setting is nullptr. @@ -1582,6 +1614,7 @@ TEST_F(RtpSenderReceiverTest, AudioReceiverCannotSetFrameDecryptorAfterStop) { audio_rtp_receiver_->Stop(); audio_rtp_receiver_->SetFrameDecryptor(fake_frame_decryptor); // TODO(webrtc:9926) - Validate media channel not set once fakes updated. + DestroyAudioRtpReceiver(); } // Validate that the default FrameEncryptor setting is nullptr. @@ -1616,6 +1649,7 @@ TEST_F(RtpSenderReceiverTest, VideoReceiverCanSetFrameDecryptor) { video_rtp_receiver_->SetFrameDecryptor(fake_frame_decryptor); EXPECT_EQ(fake_frame_decryptor.get(), video_rtp_receiver_->GetFrameDecryptor().get()); + DestroyVideoRtpReceiver(); } // Validate that the default FrameEncryptor setting is nullptr. @@ -1627,6 +1661,7 @@ TEST_F(RtpSenderReceiverTest, VideoReceiverCannotSetFrameDecryptorAfterStop) { video_rtp_receiver_->Stop(); video_rtp_receiver_->SetFrameDecryptor(fake_frame_decryptor); // TODO(webrtc:9926) - Validate media channel not set once fakes updated. + DestroyVideoRtpReceiver(); } // Checks that calling the internal methods for get/set parameters do not diff --git a/pc/rtp_transceiver.cc b/pc/rtp_transceiver.cc index d6e5ff46a1..a78b9d6be6 100644 --- a/pc/rtp_transceiver.cc +++ b/pc/rtp_transceiver.cc @@ -10,16 +10,23 @@ #include "pc/rtp_transceiver.h" +#include #include #include +#include #include "absl/algorithm/container.h" #include "api/rtp_parameters.h" +#include "api/sequence_checker.h" +#include "media/base/codec.h" +#include "media/base/media_constants.h" #include "pc/channel_manager.h" #include "pc/rtp_media_utils.h" -#include "pc/rtp_parameters_conversion.h" +#include "pc/session_description.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/thread.h" namespace webrtc { namespace { @@ -97,12 +104,25 @@ RTCError VerifyCodecPreferences(const std::vector& codecs, return RTCError::OK(); } +TaskQueueBase* GetCurrentTaskQueueOrThread() { + TaskQueueBase* current = TaskQueueBase::Current(); + if (!current) + current = rtc::ThreadManager::Instance()->CurrentThread(); + return current; +} + } // namespace -RtpTransceiver::RtpTransceiver(cricket::MediaType media_type) - : unified_plan_(false), media_type_(media_type) { +RtpTransceiver::RtpTransceiver( + cricket::MediaType media_type, + cricket::ChannelManager* channel_manager /* = nullptr*/) + : thread_(GetCurrentTaskQueueOrThread()), + unified_plan_(false), + media_type_(media_type), + channel_manager_(channel_manager) { RTC_DCHECK(media_type == cricket::MEDIA_TYPE_AUDIO || media_type == cricket::MEDIA_TYPE_VIDEO); + RTC_DCHECK(channel_manager_); } RtpTransceiver::RtpTransceiver( @@ -110,60 +130,97 @@ RtpTransceiver::RtpTransceiver( rtc::scoped_refptr> receiver, cricket::ChannelManager* channel_manager, - std::vector header_extensions_offered) - : unified_plan_(true), + std::vector header_extensions_offered, + std::function on_negotiation_needed) + : thread_(GetCurrentTaskQueueOrThread()), + unified_plan_(true), media_type_(sender->media_type()), channel_manager_(channel_manager), - HeaderExtensionsToOffer_(std::move(header_extensions_offered)) { + header_extensions_to_offer_(std::move(header_extensions_offered)), + on_negotiation_needed_(std::move(on_negotiation_needed)) { RTC_DCHECK(media_type_ == cricket::MEDIA_TYPE_AUDIO || media_type_ == cricket::MEDIA_TYPE_VIDEO); RTC_DCHECK_EQ(sender->media_type(), receiver->media_type()); + RTC_DCHECK(channel_manager_); senders_.push_back(sender); receivers_.push_back(receiver); } RtpTransceiver::~RtpTransceiver() { - Stop(); + // TODO(tommi): On Android, when running PeerConnectionClientTest (e.g. + // PeerConnectionClientTest#testCameraSwitch), the instance doesn't get + // deleted on `thread_`. See if we can fix that. + if (!stopped_) { + RTC_DCHECK_RUN_ON(thread_); + StopInternal(); + } } void RtpTransceiver::SetChannel(cricket::ChannelInterface* channel) { + RTC_DCHECK_RUN_ON(thread_); // Cannot set a non-null channel on a stopped transceiver. if (stopped_ && channel) { return; } + RTC_DCHECK(channel || channel_); + + RTC_LOG_THREAD_BLOCK_COUNT(); + + if (channel_) { + signaling_thread_safety_->SetNotAlive(); + signaling_thread_safety_ = nullptr; + } + if (channel) { RTC_DCHECK_EQ(media_type(), channel->media_type()); + signaling_thread_safety_ = PendingTaskSafetyFlag::Create(); } - if (channel_) { - channel_->SignalFirstPacketReceived().disconnect(this); - } + // An alternative to this, could be to require SetChannel to be called + // on the network thread. The channel object operates for the most part + // on the network thread, as part of its initialization being on the network + // thread is required, so setting a channel object as part of the construction + // (without thread hopping) might be the more efficient thing to do than + // how SetChannel works today. + // Similarly, if the channel() accessor is limited to the network thread, that + // helps with keeping the channel implementation requirements being met and + // avoids synchronization for accessing the pointer or network related state. + channel_manager_->network_thread()->Invoke(RTC_FROM_HERE, [&]() { + if (channel_) { + channel_->SetFirstPacketReceivedCallback(nullptr); + } - channel_ = channel; + channel_ = channel; - if (channel_) { - channel_->SignalFirstPacketReceived().connect( - this, &RtpTransceiver::OnFirstPacketReceived); - } + if (channel_) { + channel_->SetFirstPacketReceivedCallback( + [thread = thread_, flag = signaling_thread_safety_, this]() mutable { + thread->PostTask(ToQueuedTask( + std::move(flag), [this]() { OnFirstPacketReceived(); })); + }); + } + }); for (const auto& sender : senders_) { sender->internal()->SetMediaChannel(channel_ ? channel_->media_channel() : nullptr); } + RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(1); + for (const auto& receiver : receivers_) { if (!channel_) { receiver->internal()->Stop(); + } else { + receiver->internal()->SetMediaChannel(channel_->media_channel()); } - - receiver->internal()->SetMediaChannel(channel_ ? channel_->media_channel() - : nullptr); } } void RtpTransceiver::AddSender( rtc::scoped_refptr> sender) { + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(!stopped_); RTC_DCHECK(!unified_plan_); RTC_DCHECK(sender); @@ -189,6 +246,7 @@ bool RtpTransceiver::RemoveSender(RtpSenderInterface* sender) { void RtpTransceiver::AddReceiver( rtc::scoped_refptr> receiver) { + RTC_DCHECK_RUN_ON(thread_); RTC_DCHECK(!stopped_); RTC_DCHECK(!unified_plan_); RTC_DCHECK(receiver); @@ -206,12 +264,8 @@ bool RtpTransceiver::RemoveReceiver(RtpReceiverInterface* receiver) { if (it == receivers_.end()) { return false; } + // `Stop()` will clear the internally cached pointer to the media channel. (*it)->internal()->Stop(); - // After the receiver has been removed, there's no guarantee that the - // contained media channel isn't deleted shortly after this. To make sure that - // the receiver doesn't spontaneously try to use it's (potentially stale) - // media channel reference, we clear it out. - (*it)->internal()->SetMediaChannel(nullptr); receivers_.erase(it); return true; } @@ -237,7 +291,7 @@ absl::optional RtpTransceiver::mid() const { return mid_; } -void RtpTransceiver::OnFirstPacketReceived(cricket::ChannelInterface*) { +void RtpTransceiver::OnFirstPacketReceived() { for (const auto& receiver : receivers_) { receiver->internal()->NotifyFirstPacketReceived(); } @@ -274,26 +328,47 @@ void RtpTransceiver::set_fired_direction(RtpTransceiverDirection direction) { } bool RtpTransceiver::stopped() const { + RTC_DCHECK_RUN_ON(thread_); return stopped_; } +bool RtpTransceiver::stopping() const { + RTC_DCHECK_RUN_ON(thread_); + return stopping_; +} + RtpTransceiverDirection RtpTransceiver::direction() const { + if (unified_plan_ && stopping()) + return webrtc::RtpTransceiverDirection::kStopped; + return direction_; } -void RtpTransceiver::SetDirection(RtpTransceiverDirection new_direction) { - if (stopped()) { - return; +RTCError RtpTransceiver::SetDirectionWithError( + RtpTransceiverDirection new_direction) { + if (unified_plan_ && stopping()) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, + "Cannot set direction on a stopping transceiver."); } - if (new_direction == direction_) { - return; + if (new_direction == direction_) + return RTCError::OK(); + + if (new_direction == RtpTransceiverDirection::kStopped) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "The set direction 'stopped' is invalid."); } + direction_ = new_direction; - SignalNegotiationNeeded(); + on_negotiation_needed_(); + + return RTCError::OK(); } absl::optional RtpTransceiver::current_direction() const { + if (unified_plan_ && stopped()) + return webrtc::RtpTransceiverDirection::kStopped; + return current_direction_; } @@ -302,14 +377,79 @@ absl::optional RtpTransceiver::fired_direction() return fired_direction_; } -void RtpTransceiver::Stop() { - for (const auto& sender : senders_) { +void RtpTransceiver::StopSendingAndReceiving() { + // 1. Let sender be transceiver.[[Sender]]. + // 2. Let receiver be transceiver.[[Receiver]]. + // + // 3. Stop sending media with sender. + // + // 4. Send an RTCP BYE for each RTP stream that was being sent by sender, as + // specified in [RFC3550]. + RTC_DCHECK_RUN_ON(thread_); + for (const auto& sender : senders_) sender->internal()->Stop(); + + // 5. Stop receiving media with receiver. + for (const auto& receiver : receivers_) + receiver->internal()->StopAndEndTrack(); + + stopping_ = true; + direction_ = webrtc::RtpTransceiverDirection::kInactive; +} + +RTCError RtpTransceiver::StopStandard() { + RTC_DCHECK_RUN_ON(thread_); + // If we're on Plan B, do what Stop() used to do there. + if (!unified_plan_) { + StopInternal(); + return RTCError::OK(); } - for (const auto& receiver : receivers_) { - receiver->internal()->Stop(); + // 1. Let transceiver be the RTCRtpTransceiver object on which the method is + // invoked. + // + // 2. Let connection be the RTCPeerConnection object associated with + // transceiver. + // + // 3. If connection.[[IsClosed]] is true, throw an InvalidStateError. + if (is_pc_closed_) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_STATE, + "PeerConnection is closed."); } + + // 4. If transceiver.[[Stopping]] is true, abort these steps. + if (stopping_) + return RTCError::OK(); + + // 5. Stop sending and receiving given transceiver, and update the + // negotiation-needed flag for connection. + StopSendingAndReceiving(); + on_negotiation_needed_(); + + return RTCError::OK(); +} + +void RtpTransceiver::StopInternal() { + RTC_DCHECK_RUN_ON(thread_); + StopTransceiverProcedure(); +} + +void RtpTransceiver::StopTransceiverProcedure() { + RTC_DCHECK_RUN_ON(thread_); + // As specified in the "Stop the RTCRtpTransceiver" procedure + // 1. If transceiver.[[Stopping]] is false, stop sending and receiving given + // transceiver. + if (!stopping_) + StopSendingAndReceiving(); + + // 2. Set transceiver.[[Stopped]] to true. stopped_ = true; + + // Signal the updated change to the senders. + for (const auto& sender : senders_) + sender->internal()->SetTransceiverAsStopped(); + + // 3. Set transceiver.[[Receptive]] to false. + // 4. Set transceiver.[[CurrentDirection]] to null. current_direction_ = absl::nullopt; } @@ -356,7 +496,74 @@ RTCError RtpTransceiver::SetCodecPreferences( std::vector RtpTransceiver::HeaderExtensionsToOffer() const { - return HeaderExtensionsToOffer_; + return header_extensions_to_offer_; +} + +std::vector +RtpTransceiver::HeaderExtensionsNegotiated() const { + RTC_DCHECK_RUN_ON(thread_); + std::vector result; + for (const auto& ext : negotiated_header_extensions_) { + result.emplace_back(ext.uri, ext.id, RtpTransceiverDirection::kSendRecv); + } + return result; +} + +RTCError RtpTransceiver::SetOfferedRtpHeaderExtensions( + rtc::ArrayView + header_extensions_to_offer) { + for (const auto& entry : header_extensions_to_offer) { + // Handle unsupported requests for mandatory extensions as per + // https://w3c.github.io/webrtc-extensions/#rtcrtptransceiver-interface. + // Note: + // - We do not handle setOfferedRtpHeaderExtensions algorithm step 2.1, + // this has to be checked on a higher level. We naturally error out + // in the handling of Step 2.2 if an unset URI is encountered. + + // Step 2.2. + // Handle unknown extensions. + auto it = std::find_if( + header_extensions_to_offer_.begin(), header_extensions_to_offer_.end(), + [&entry](const auto& offered) { return entry.uri == offered.uri; }); + if (it == header_extensions_to_offer_.end()) { + return RTCError(RTCErrorType::UNSUPPORTED_PARAMETER, + "Attempted to modify an unoffered extension."); + } + + // Step 2.4-2.5. + // - Use of the transceiver interface indicates unified plan is in effect, + // hence the MID extension needs to be enabled. + // - Also handle the mandatory video orientation extensions. + if ((entry.uri == RtpExtension::kMidUri || + entry.uri == RtpExtension::kVideoRotationUri) && + entry.direction != RtpTransceiverDirection::kSendRecv) { + return RTCError(RTCErrorType::INVALID_MODIFICATION, + "Attempted to stop a mandatory extension."); + } + } + + // Apply mutation after error checking. + for (const auto& entry : header_extensions_to_offer) { + auto it = std::find_if( + header_extensions_to_offer_.begin(), header_extensions_to_offer_.end(), + [&entry](const auto& offered) { return entry.uri == offered.uri; }); + it->direction = entry.direction; + } + + return RTCError::OK(); +} + +void RtpTransceiver::OnNegotiationUpdate( + SdpType sdp_type, + const cricket::MediaContentDescription* content) { + RTC_DCHECK_RUN_ON(thread_); + RTC_DCHECK(content); + if (sdp_type == SdpType::kAnswer) + negotiated_header_extensions_ = content->rtp_header_extensions(); +} + +void RtpTransceiver::SetPeerConnectionClosed() { + is_pc_closed_ = true; } } // namespace webrtc diff --git a/pc/rtp_transceiver.h b/pc/rtp_transceiver.h index 0668447b9f..6b1307b1db 100644 --- a/pc/rtp_transceiver.h +++ b/pc/rtp_transceiver.h @@ -11,14 +11,33 @@ #ifndef PC_RTP_TRANSCEIVER_H_ #define PC_RTP_TRANSCEIVER_H_ +#include + +#include +#include #include #include +#include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/media_types.h" +#include "api/rtc_error.h" +#include "api/rtp_parameters.h" +#include "api/rtp_transceiver_direction.h" #include "api/rtp_transceiver_interface.h" +#include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_base.h" #include "pc/channel_interface.h" #include "pc/channel_manager.h" +#include "pc/proxy.h" #include "pc/rtp_receiver.h" +#include "pc/rtp_receiver_proxy.h" #include "pc/rtp_sender.h" +#include "pc/rtp_sender_proxy.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -60,7 +79,8 @@ class RtpTransceiver final // channel set. // |media_type| specifies the type of RtpTransceiver (and, by transitivity, // the type of senders, receivers, and channel). Can either by audio or video. - explicit RtpTransceiver(cricket::MediaType media_type); + RtpTransceiver(cricket::MediaType media_type, + cricket::ChannelManager* channel_manager); // Construct a Unified Plan-style RtpTransceiver with the given sender and // receiver. The media type will be derived from the media types of the sender // and receiver. The sender and receiver should have the same media type. @@ -71,7 +91,8 @@ class RtpTransceiver final rtc::scoped_refptr> receiver, cricket::ChannelManager* channel_manager, - std::vector HeaderExtensionsToOffer); + std::vector HeaderExtensionsToOffer, + std::function on_negotiation_needed); ~RtpTransceiver() override; // Returns the Voice/VideoChannel set for this transceiver. May be null if @@ -173,9 +194,17 @@ class RtpTransceiver final return has_ever_been_used_to_send_; } + // Informs the transceiver that its owning + // PeerConnection is closed. + void SetPeerConnectionClosed(); + + // Executes the "stop the RTCRtpTransceiver" procedure from + // the webrtc-pc specification, described under the stop() method. + void StopTransceiverProcedure(); + // Fired when the RtpTransceiver state changes such that negotiation is now // needed (e.g., in response to a direction change). - sigslot::signal0<> SignalNegotiationNeeded; + // sigslot::signal0<> SignalNegotiationNeeded; // RtpTransceiverInterface implementation. cricket::MediaType media_type() const override; @@ -183,11 +212,14 @@ class RtpTransceiver final rtc::scoped_refptr sender() const override; rtc::scoped_refptr receiver() const override; bool stopped() const override; + bool stopping() const override; RtpTransceiverDirection direction() const override; - void SetDirection(RtpTransceiverDirection new_direction) override; + RTCError SetDirectionWithError( + RtpTransceiverDirection new_direction) override; absl::optional current_direction() const override; absl::optional fired_direction() const override; - void Stop() override; + RTCError StopStandard() override; + void StopInternal() override; RTCError SetCodecPreferences( rtc::ArrayView codecs) override; std::vector codec_preferences() const override { @@ -195,19 +227,40 @@ class RtpTransceiver final } std::vector HeaderExtensionsToOffer() const override; + std::vector HeaderExtensionsNegotiated() + const override; + RTCError SetOfferedRtpHeaderExtensions( + rtc::ArrayView + header_extensions_to_offer) override; + + // Called on the signaling thread when the local or remote content description + // is updated. Used to update the negotiated header extensions. + // TODO(tommi): The implementation of this method is currently very simple and + // only used for updating the negotiated headers. However, we're planning to + // move all the updates done on the channel from the transceiver into this + // method. This will happen with the ownership of the channel object being + // moved into the transceiver. + void OnNegotiationUpdate(SdpType sdp_type, + const cricket::MediaContentDescription* content); private: - void OnFirstPacketReceived(cricket::ChannelInterface* channel); + void OnFirstPacketReceived(); + void StopSendingAndReceiving(); + // Enforce that this object is created, used and destroyed on one thread. + TaskQueueBase* const thread_; const bool unified_plan_; const cricket::MediaType media_type_; + rtc::scoped_refptr signaling_thread_safety_; std::vector>> senders_; std::vector< rtc::scoped_refptr>> receivers_; - bool stopped_ = false; + bool stopped_ RTC_GUARDED_BY(thread_) = false; + bool stopping_ RTC_GUARDED_BY(thread_) = false; + bool is_pc_closed_ = false; RtpTransceiverDirection direction_ = RtpTransceiverDirection::kInactive; absl::optional current_direction_; absl::optional fired_direction_; @@ -220,28 +273,44 @@ class RtpTransceiver final cricket::ChannelInterface* channel_ = nullptr; cricket::ChannelManager* channel_manager_ = nullptr; std::vector codec_preferences_; - std::vector HeaderExtensionsToOffer_; + std::vector header_extensions_to_offer_; + + // |negotiated_header_extensions_| is read and written to on the signaling + // thread from the SdpOfferAnswerHandler class (e.g. + // PushdownMediaDescription(). + cricket::RtpHeaderExtensions negotiated_header_extensions_ + RTC_GUARDED_BY(thread_); + + const std::function on_negotiation_needed_; }; -BEGIN_SIGNALING_PROXY_MAP(RtpTransceiver) -PROXY_SIGNALING_THREAD_DESTRUCTOR() -PROXY_CONSTMETHOD0(cricket::MediaType, media_type) +BEGIN_PRIMARY_PROXY_MAP(RtpTransceiver) + +PROXY_PRIMARY_THREAD_DESTRUCTOR() +BYPASS_PROXY_CONSTMETHOD0(cricket::MediaType, media_type) PROXY_CONSTMETHOD0(absl::optional, mid) PROXY_CONSTMETHOD0(rtc::scoped_refptr, sender) PROXY_CONSTMETHOD0(rtc::scoped_refptr, receiver) PROXY_CONSTMETHOD0(bool, stopped) +PROXY_CONSTMETHOD0(bool, stopping) PROXY_CONSTMETHOD0(RtpTransceiverDirection, direction) -PROXY_METHOD1(void, SetDirection, RtpTransceiverDirection) +PROXY_METHOD1(webrtc::RTCError, SetDirectionWithError, RtpTransceiverDirection) PROXY_CONSTMETHOD0(absl::optional, current_direction) PROXY_CONSTMETHOD0(absl::optional, fired_direction) -PROXY_METHOD0(void, Stop) +PROXY_METHOD0(webrtc::RTCError, StopStandard) +PROXY_METHOD0(void, StopInternal) PROXY_METHOD1(webrtc::RTCError, SetCodecPreferences, rtc::ArrayView) PROXY_CONSTMETHOD0(std::vector, codec_preferences) PROXY_CONSTMETHOD0(std::vector, HeaderExtensionsToOffer) -END_PROXY_MAP() +PROXY_CONSTMETHOD0(std::vector, + HeaderExtensionsNegotiated) +PROXY_METHOD1(webrtc::RTCError, + SetOfferedRtpHeaderExtensions, + rtc::ArrayView) +END_PROXY_MAP(RtpTransceiver) } // namespace webrtc diff --git a/pc/rtp_transceiver_unittest.cc b/pc/rtp_transceiver_unittest.cc index 5e345739f1..0128e912e3 100644 --- a/pc/rtp_transceiver_unittest.cc +++ b/pc/rtp_transceiver_unittest.cc @@ -14,6 +14,8 @@ #include +#include "absl/types/optional.h" +#include "api/rtp_parameters.h" #include "media/base/fake_media_engine.h" #include "pc/test/mock_channel_interface.h" #include "pc/test/mock_rtp_receiver_internal.h" @@ -21,10 +23,10 @@ #include "test/gmock.h" #include "test/gtest.h" +using ::testing::_; using ::testing::ElementsAre; -using ::testing::Eq; -using ::testing::Field; -using ::testing::Not; +using ::testing::Optional; +using ::testing::Property; using ::testing::Return; using ::testing::ReturnRef; @@ -32,19 +34,19 @@ namespace webrtc { // Checks that a channel cannot be set on a stopped |RtpTransceiver|. TEST(RtpTransceiverTest, CannotSetChannelOnStoppedTransceiver) { - RtpTransceiver transceiver(cricket::MediaType::MEDIA_TYPE_AUDIO); + auto cm = cricket::ChannelManager::Create( + nullptr, true, rtc::Thread::Current(), rtc::Thread::Current()); + RtpTransceiver transceiver(cricket::MediaType::MEDIA_TYPE_AUDIO, cm.get()); cricket::MockChannelInterface channel1; - sigslot::signal1 signal; EXPECT_CALL(channel1, media_type()) .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); - EXPECT_CALL(channel1, SignalFirstPacketReceived()) - .WillRepeatedly(ReturnRef(signal)); + EXPECT_CALL(channel1, SetFirstPacketReceivedCallback(_)); transceiver.SetChannel(&channel1); EXPECT_EQ(&channel1, transceiver.channel()); // Stop the transceiver. - transceiver.Stop(); + transceiver.StopInternal(); EXPECT_EQ(&channel1, transceiver.channel()); cricket::MockChannelInterface channel2; @@ -58,19 +60,20 @@ TEST(RtpTransceiverTest, CannotSetChannelOnStoppedTransceiver) { // Checks that a channel can be unset on a stopped |RtpTransceiver| TEST(RtpTransceiverTest, CanUnsetChannelOnStoppedTransceiver) { - RtpTransceiver transceiver(cricket::MediaType::MEDIA_TYPE_VIDEO); + auto cm = cricket::ChannelManager::Create( + nullptr, true, rtc::Thread::Current(), rtc::Thread::Current()); + RtpTransceiver transceiver(cricket::MediaType::MEDIA_TYPE_VIDEO, cm.get()); cricket::MockChannelInterface channel; - sigslot::signal1 signal; EXPECT_CALL(channel, media_type()) .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_VIDEO)); - EXPECT_CALL(channel, SignalFirstPacketReceived()) - .WillRepeatedly(ReturnRef(signal)); + EXPECT_CALL(channel, SetFirstPacketReceivedCallback(_)) + .WillRepeatedly(testing::Return()); transceiver.SetChannel(&channel); EXPECT_EQ(&channel, transceiver.channel()); // Stop the transceiver. - transceiver.Stop(); + transceiver.StopInternal(); EXPECT_EQ(&channel, transceiver.channel()); // Set the channel to |nullptr|. @@ -78,27 +81,273 @@ TEST(RtpTransceiverTest, CanUnsetChannelOnStoppedTransceiver) { EXPECT_EQ(nullptr, transceiver.channel()); } -TEST(RtpTransceiverTest, - InitsWithChannelManagerRtpHeaderExtensionCapabilities) { - cricket::ChannelManager channel_manager( - std::make_unique(), - std::make_unique(), rtc::Thread::Current(), - rtc::Thread::Current()); - std::vector extensions({ - RtpHeaderExtensionCapability("uri1", 1, - RtpTransceiverDirection::kSendRecv), - RtpHeaderExtensionCapability("uri2", 2, - RtpTransceiverDirection::kRecvOnly), - }); - RtpTransceiver transceiver( - RtpSenderProxyWithInternal::Create( - rtc::Thread::Current(), - new rtc::RefCountedObject()), - RtpReceiverProxyWithInternal::Create( - rtc::Thread::Current(), - new rtc::RefCountedObject()), - &channel_manager, extensions); - EXPECT_EQ(transceiver.HeaderExtensionsToOffer(), extensions); +class RtpTransceiverUnifiedPlanTest : public ::testing::Test { + public: + RtpTransceiverUnifiedPlanTest() + : channel_manager_(cricket::ChannelManager::Create( + std::make_unique(), + false, + rtc::Thread::Current(), + rtc::Thread::Current())), + transceiver_(RtpSenderProxyWithInternal::Create( + rtc::Thread::Current(), + sender_), + RtpReceiverProxyWithInternal::Create( + rtc::Thread::Current(), + rtc::Thread::Current(), + receiver_), + channel_manager_.get(), + channel_manager_->GetSupportedAudioRtpHeaderExtensions(), + /* on_negotiation_needed= */ [] {}) {} + + static rtc::scoped_refptr MockReceiver() { + auto receiver = rtc::make_ref_counted(); + EXPECT_CALL(*receiver.get(), media_type()) + .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); + return receiver; + } + + static rtc::scoped_refptr MockSender() { + auto sender = rtc::make_ref_counted(); + EXPECT_CALL(*sender.get(), media_type()) + .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); + return sender; + } + + rtc::scoped_refptr receiver_ = MockReceiver(); + rtc::scoped_refptr sender_ = MockSender(); + std::unique_ptr channel_manager_; + RtpTransceiver transceiver_; +}; + +// Basic tests for Stop() +TEST_F(RtpTransceiverUnifiedPlanTest, StopSetsDirection) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + EXPECT_EQ(RtpTransceiverDirection::kInactive, transceiver_.direction()); + EXPECT_FALSE(transceiver_.current_direction()); + transceiver_.StopStandard(); + EXPECT_EQ(RtpTransceiverDirection::kStopped, transceiver_.direction()); + EXPECT_FALSE(transceiver_.current_direction()); + transceiver_.StopTransceiverProcedure(); + EXPECT_TRUE(transceiver_.current_direction()); + EXPECT_EQ(RtpTransceiverDirection::kStopped, transceiver_.direction()); + EXPECT_EQ(RtpTransceiverDirection::kStopped, + *transceiver_.current_direction()); +} + +class RtpTransceiverTestForHeaderExtensions : public ::testing::Test { + public: + RtpTransceiverTestForHeaderExtensions() + : channel_manager_(cricket::ChannelManager::Create( + std::make_unique(), + false, + rtc::Thread::Current(), + rtc::Thread::Current())), + extensions_( + {RtpHeaderExtensionCapability("uri1", + 1, + RtpTransceiverDirection::kSendOnly), + RtpHeaderExtensionCapability("uri2", + 2, + RtpTransceiverDirection::kRecvOnly), + RtpHeaderExtensionCapability(RtpExtension::kMidUri, + 3, + RtpTransceiverDirection::kSendRecv), + RtpHeaderExtensionCapability(RtpExtension::kVideoRotationUri, + 4, + RtpTransceiverDirection::kSendRecv)}), + transceiver_(RtpSenderProxyWithInternal::Create( + rtc::Thread::Current(), + sender_), + RtpReceiverProxyWithInternal::Create( + rtc::Thread::Current(), + rtc::Thread::Current(), + receiver_), + channel_manager_.get(), + extensions_, + /* on_negotiation_needed= */ [] {}) {} + + static rtc::scoped_refptr MockReceiver() { + auto receiver = rtc::make_ref_counted(); + EXPECT_CALL(*receiver.get(), media_type()) + .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); + return receiver; + } + + static rtc::scoped_refptr MockSender() { + auto sender = rtc::make_ref_counted(); + EXPECT_CALL(*sender.get(), media_type()) + .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); + return sender; + } + + rtc::scoped_refptr receiver_ = MockReceiver(); + rtc::scoped_refptr sender_ = MockSender(); + + std::unique_ptr channel_manager_; + std::vector extensions_; + RtpTransceiver transceiver_; +}; + +TEST_F(RtpTransceiverTestForHeaderExtensions, OffersChannelManagerList) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), extensions_); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, ModifiesDirection) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + auto modified_extensions = extensions_; + modified_extensions[0].direction = RtpTransceiverDirection::kSendOnly; + EXPECT_TRUE( + transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions).ok()); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), modified_extensions); + modified_extensions[0].direction = RtpTransceiverDirection::kRecvOnly; + EXPECT_TRUE( + transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions).ok()); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), modified_extensions); + modified_extensions[0].direction = RtpTransceiverDirection::kSendRecv; + EXPECT_TRUE( + transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions).ok()); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), modified_extensions); + modified_extensions[0].direction = RtpTransceiverDirection::kInactive; + EXPECT_TRUE( + transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions).ok()); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), modified_extensions); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, AcceptsStoppedExtension) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + auto modified_extensions = extensions_; + modified_extensions[0].direction = RtpTransceiverDirection::kStopped; + EXPECT_TRUE( + transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions).ok()); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), modified_extensions); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, RejectsUnsupportedExtension) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + std::vector modified_extensions( + {RtpHeaderExtensionCapability("uri3", 1, + RtpTransceiverDirection::kSendRecv)}); + EXPECT_THAT(transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions), + Property(&RTCError::type, RTCErrorType::UNSUPPORTED_PARAMETER)); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), extensions_); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, + RejectsStoppedMandatoryExtensions) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + std::vector modified_extensions = extensions_; + // Attempting to stop the mandatory MID extension. + modified_extensions[2].direction = RtpTransceiverDirection::kStopped; + EXPECT_THAT(transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions), + Property(&RTCError::type, RTCErrorType::INVALID_MODIFICATION)); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), extensions_); + modified_extensions = extensions_; + // Attempting to stop the mandatory video orientation extension. + modified_extensions[3].direction = RtpTransceiverDirection::kStopped; + EXPECT_THAT(transceiver_.SetOfferedRtpHeaderExtensions(modified_extensions), + Property(&RTCError::type, RTCErrorType::INVALID_MODIFICATION)); + EXPECT_EQ(transceiver_.HeaderExtensionsToOffer(), extensions_); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, + NoNegotiatedHdrExtsWithoutChannel) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + EXPECT_THAT(transceiver_.HeaderExtensionsNegotiated(), ElementsAre()); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, + NoNegotiatedHdrExtsWithChannelWithoutNegotiation) { + EXPECT_CALL(*receiver_.get(), SetMediaChannel(_)); + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetMediaChannel(_)); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + cricket::MockChannelInterface mock_channel; + EXPECT_CALL(mock_channel, SetFirstPacketReceivedCallback(_)); + EXPECT_CALL(mock_channel, media_type()) + .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); + EXPECT_CALL(mock_channel, media_channel()).WillRepeatedly(Return(nullptr)); + transceiver_.SetChannel(&mock_channel); + EXPECT_THAT(transceiver_.HeaderExtensionsNegotiated(), ElementsAre()); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, ReturnsNegotiatedHdrExts) { + EXPECT_CALL(*receiver_.get(), SetMediaChannel(_)); + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetMediaChannel(_)); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + cricket::MockChannelInterface mock_channel; + EXPECT_CALL(mock_channel, SetFirstPacketReceivedCallback(_)); + EXPECT_CALL(mock_channel, media_type()) + .WillRepeatedly(Return(cricket::MediaType::MEDIA_TYPE_AUDIO)); + EXPECT_CALL(mock_channel, media_channel()).WillRepeatedly(Return(nullptr)); + + cricket::RtpHeaderExtensions extensions = {webrtc::RtpExtension("uri1", 1), + webrtc::RtpExtension("uri2", 2)}; + cricket::AudioContentDescription description; + description.set_rtp_header_extensions(extensions); + transceiver_.OnNegotiationUpdate(SdpType::kAnswer, &description); + + transceiver_.SetChannel(&mock_channel); + EXPECT_THAT(transceiver_.HeaderExtensionsNegotiated(), + ElementsAre(RtpHeaderExtensionCapability( + "uri1", 1, RtpTransceiverDirection::kSendRecv), + RtpHeaderExtensionCapability( + "uri2", 2, RtpTransceiverDirection::kSendRecv))); +} + +TEST_F(RtpTransceiverTestForHeaderExtensions, + ReturnsNegotiatedHdrExtsSecondTime) { + EXPECT_CALL(*receiver_.get(), StopAndEndTrack()); + EXPECT_CALL(*sender_.get(), SetTransceiverAsStopped()); + EXPECT_CALL(*sender_.get(), Stop()); + + cricket::RtpHeaderExtensions extensions = {webrtc::RtpExtension("uri1", 1), + webrtc::RtpExtension("uri2", 2)}; + cricket::AudioContentDescription description; + description.set_rtp_header_extensions(extensions); + transceiver_.OnNegotiationUpdate(SdpType::kAnswer, &description); + + EXPECT_THAT(transceiver_.HeaderExtensionsNegotiated(), + ElementsAre(RtpHeaderExtensionCapability( + "uri1", 1, RtpTransceiverDirection::kSendRecv), + RtpHeaderExtensionCapability( + "uri2", 2, RtpTransceiverDirection::kSendRecv))); + + extensions = {webrtc::RtpExtension("uri3", 4), + webrtc::RtpExtension("uri5", 6)}; + description.set_rtp_header_extensions(extensions); + transceiver_.OnNegotiationUpdate(SdpType::kAnswer, &description); + + EXPECT_THAT(transceiver_.HeaderExtensionsNegotiated(), + ElementsAre(RtpHeaderExtensionCapability( + "uri3", 4, RtpTransceiverDirection::kSendRecv), + RtpHeaderExtensionCapability( + "uri5", 6, RtpTransceiverDirection::kSendRecv))); } } // namespace webrtc diff --git a/pc/rtp_transmission_manager.cc b/pc/rtp_transmission_manager.cc new file mode 100644 index 0000000000..9040a69699 --- /dev/null +++ b/pc/rtp_transmission_manager.cc @@ -0,0 +1,689 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/rtp_transmission_manager.h" + +#include +#include + +#include "absl/types/optional.h" +#include "api/peer_connection_interface.h" +#include "api/rtp_transceiver_direction.h" +#include "pc/audio_rtp_receiver.h" +#include "pc/channel.h" +#include "pc/stats_collector_interface.h" +#include "pc/video_rtp_receiver.h" +#include "rtc_base/checks.h" +#include "rtc_base/helpers.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +namespace { + +static const char kDefaultAudioSenderId[] = "defaulta0"; +static const char kDefaultVideoSenderId[] = "defaultv0"; + +} // namespace + +RtpTransmissionManager::RtpTransmissionManager( + bool is_unified_plan, + rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, + cricket::ChannelManager* channel_manager, + UsagePattern* usage_pattern, + PeerConnectionObserver* observer, + StatsCollectorInterface* stats, + std::function on_negotiation_needed) + : is_unified_plan_(is_unified_plan), + signaling_thread_(signaling_thread), + worker_thread_(worker_thread), + channel_manager_(channel_manager), + usage_pattern_(usage_pattern), + observer_(observer), + stats_(stats), + on_negotiation_needed_(on_negotiation_needed), + weak_ptr_factory_(this) {} + +void RtpTransmissionManager::Close() { + closed_ = true; + observer_ = nullptr; +} + +// Implementation of SetStreamsObserver +void RtpTransmissionManager::OnSetStreams() { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (IsUnifiedPlan()) + OnNegotiationNeeded(); +} + +// Function to call back to the PeerConnection when negotiation is needed +void RtpTransmissionManager::OnNegotiationNeeded() { + on_negotiation_needed_(); +} + +// Function that returns the currently valid observer +PeerConnectionObserver* RtpTransmissionManager::Observer() const { + RTC_DCHECK(!closed_); + RTC_DCHECK(observer_); + return observer_; +} + +cricket::VoiceMediaChannel* RtpTransmissionManager::voice_media_channel() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(!IsUnifiedPlan()); + auto* voice_channel = static_cast( + GetAudioTransceiver()->internal()->channel()); + if (voice_channel) { + return voice_channel->media_channel(); + } else { + return nullptr; + } +} + +cricket::VideoMediaChannel* RtpTransmissionManager::video_media_channel() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(!IsUnifiedPlan()); + auto* video_channel = static_cast( + GetVideoTransceiver()->internal()->channel()); + if (video_channel) { + return video_channel->media_channel(); + } else { + return nullptr; + } +} + +RTCErrorOr> +RtpTransmissionManager::AddTrack( + rtc::scoped_refptr track, + const std::vector& stream_ids) { + RTC_DCHECK_RUN_ON(signaling_thread()); + + return (IsUnifiedPlan() ? AddTrackUnifiedPlan(track, stream_ids) + : AddTrackPlanB(track, stream_ids)); +} + +RTCErrorOr> +RtpTransmissionManager::AddTrackPlanB( + rtc::scoped_refptr track, + const std::vector& stream_ids) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (stream_ids.size() > 1u) { + LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_OPERATION, + "AddTrack with more than one stream is not " + "supported with Plan B semantics."); + } + std::vector adjusted_stream_ids = stream_ids; + if (adjusted_stream_ids.empty()) { + adjusted_stream_ids.push_back(rtc::CreateRandomUuid()); + } + cricket::MediaType media_type = + (track->kind() == MediaStreamTrackInterface::kAudioKind + ? cricket::MEDIA_TYPE_AUDIO + : cricket::MEDIA_TYPE_VIDEO); + auto new_sender = + CreateSender(media_type, track->id(), track, adjusted_stream_ids, {}); + if (track->kind() == MediaStreamTrackInterface::kAudioKind) { + new_sender->internal()->SetMediaChannel(voice_media_channel()); + GetAudioTransceiver()->internal()->AddSender(new_sender); + const RtpSenderInfo* sender_info = + FindSenderInfo(local_audio_sender_infos_, + new_sender->internal()->stream_ids()[0], track->id()); + if (sender_info) { + new_sender->internal()->SetSsrc(sender_info->first_ssrc); + } + } else { + RTC_DCHECK_EQ(MediaStreamTrackInterface::kVideoKind, track->kind()); + new_sender->internal()->SetMediaChannel(video_media_channel()); + GetVideoTransceiver()->internal()->AddSender(new_sender); + const RtpSenderInfo* sender_info = + FindSenderInfo(local_video_sender_infos_, + new_sender->internal()->stream_ids()[0], track->id()); + if (sender_info) { + new_sender->internal()->SetSsrc(sender_info->first_ssrc); + } + } + return rtc::scoped_refptr(new_sender); +} + +RTCErrorOr> +RtpTransmissionManager::AddTrackUnifiedPlan( + rtc::scoped_refptr track, + const std::vector& stream_ids) { + auto transceiver = FindFirstTransceiverForAddedTrack(track); + if (transceiver) { + RTC_LOG(LS_INFO) << "Reusing an existing " + << cricket::MediaTypeToString(transceiver->media_type()) + << " transceiver for AddTrack."; + if (transceiver->stopping()) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "The existing transceiver is stopping."); + } + + if (transceiver->direction() == RtpTransceiverDirection::kRecvOnly) { + transceiver->internal()->set_direction( + RtpTransceiverDirection::kSendRecv); + } else if (transceiver->direction() == RtpTransceiverDirection::kInactive) { + transceiver->internal()->set_direction( + RtpTransceiverDirection::kSendOnly); + } + transceiver->sender()->SetTrack(track); + transceiver->internal()->sender_internal()->set_stream_ids(stream_ids); + transceiver->internal()->set_reused_for_addtrack(true); + } else { + cricket::MediaType media_type = + (track->kind() == MediaStreamTrackInterface::kAudioKind + ? cricket::MEDIA_TYPE_AUDIO + : cricket::MEDIA_TYPE_VIDEO); + RTC_LOG(LS_INFO) << "Adding " << cricket::MediaTypeToString(media_type) + << " transceiver in response to a call to AddTrack."; + std::string sender_id = track->id(); + // Avoid creating a sender with an existing ID by generating a random ID. + // This can happen if this is the second time AddTrack has created a sender + // for this track. + if (FindSenderById(sender_id)) { + sender_id = rtc::CreateRandomUuid(); + } + auto sender = CreateSender(media_type, sender_id, track, stream_ids, {}); + auto receiver = CreateReceiver(media_type, rtc::CreateRandomUuid()); + transceiver = CreateAndAddTransceiver(sender, receiver); + transceiver->internal()->set_created_by_addtrack(true); + transceiver->internal()->set_direction(RtpTransceiverDirection::kSendRecv); + } + return transceiver->sender(); +} + +rtc::scoped_refptr> +RtpTransmissionManager::CreateSender( + cricket::MediaType media_type, + const std::string& id, + rtc::scoped_refptr track, + const std::vector& stream_ids, + const std::vector& send_encodings) { + RTC_DCHECK_RUN_ON(signaling_thread()); + rtc::scoped_refptr> sender; + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + RTC_DCHECK(!track || + (track->kind() == MediaStreamTrackInterface::kAudioKind)); + sender = RtpSenderProxyWithInternal::Create( + signaling_thread(), + AudioRtpSender::Create(worker_thread(), id, stats_, this)); + NoteUsageEvent(UsageEvent::AUDIO_ADDED); + } else { + RTC_DCHECK_EQ(media_type, cricket::MEDIA_TYPE_VIDEO); + RTC_DCHECK(!track || + (track->kind() == MediaStreamTrackInterface::kVideoKind)); + sender = RtpSenderProxyWithInternal::Create( + signaling_thread(), VideoRtpSender::Create(worker_thread(), id, this)); + NoteUsageEvent(UsageEvent::VIDEO_ADDED); + } + bool set_track_succeeded = sender->SetTrack(track); + RTC_DCHECK(set_track_succeeded); + sender->internal()->set_stream_ids(stream_ids); + sender->internal()->set_init_send_encodings(send_encodings); + return sender; +} + +rtc::scoped_refptr> +RtpTransmissionManager::CreateReceiver(cricket::MediaType media_type, + const std::string& receiver_id) { + RTC_DCHECK_RUN_ON(signaling_thread()); + rtc::scoped_refptr> + receiver; + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + receiver = RtpReceiverProxyWithInternal::Create( + signaling_thread(), worker_thread(), + rtc::make_ref_counted(worker_thread(), receiver_id, + std::vector({}), + IsUnifiedPlan())); + NoteUsageEvent(UsageEvent::AUDIO_ADDED); + } else { + RTC_DCHECK_EQ(media_type, cricket::MEDIA_TYPE_VIDEO); + receiver = RtpReceiverProxyWithInternal::Create( + signaling_thread(), worker_thread(), + rtc::make_ref_counted(worker_thread(), receiver_id, + std::vector({}))); + NoteUsageEvent(UsageEvent::VIDEO_ADDED); + } + return receiver; +} + +rtc::scoped_refptr> +RtpTransmissionManager::CreateAndAddTransceiver( + rtc::scoped_refptr> sender, + rtc::scoped_refptr> + receiver) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Ensure that the new sender does not have an ID that is already in use by + // another sender. + // Allow receiver IDs to conflict since those come from remote SDP (which + // could be invalid, but should not cause a crash). + RTC_DCHECK(!FindSenderById(sender->id())); + auto transceiver = RtpTransceiverProxyWithInternal::Create( + signaling_thread(), + new RtpTransceiver( + sender, receiver, channel_manager(), + sender->media_type() == cricket::MEDIA_TYPE_AUDIO + ? channel_manager()->GetSupportedAudioRtpHeaderExtensions() + : channel_manager()->GetSupportedVideoRtpHeaderExtensions(), + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr()]() { + if (this_weak_ptr) { + this_weak_ptr->OnNegotiationNeeded(); + } + })); + transceivers()->Add(transceiver); + return transceiver; +} + +rtc::scoped_refptr> +RtpTransmissionManager::FindFirstTransceiverForAddedTrack( + rtc::scoped_refptr track) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(track); + for (auto transceiver : transceivers()->List()) { + if (!transceiver->sender()->track() && + cricket::MediaTypeToString(transceiver->media_type()) == + track->kind() && + !transceiver->internal()->has_ever_been_used_to_send() && + !transceiver->stopped()) { + return transceiver; + } + } + return nullptr; +} + +std::vector>> +RtpTransmissionManager::GetSendersInternal() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector>> + all_senders; + for (const auto& transceiver : transceivers_.List()) { + if (IsUnifiedPlan() && transceiver->internal()->stopped()) + continue; + + auto senders = transceiver->internal()->senders(); + all_senders.insert(all_senders.end(), senders.begin(), senders.end()); + } + return all_senders; +} + +std::vector< + rtc::scoped_refptr>> +RtpTransmissionManager::GetReceiversInternal() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector< + rtc::scoped_refptr>> + all_receivers; + for (const auto& transceiver : transceivers_.List()) { + if (IsUnifiedPlan() && transceiver->internal()->stopped()) + continue; + + auto receivers = transceiver->internal()->receivers(); + all_receivers.insert(all_receivers.end(), receivers.begin(), + receivers.end()); + } + return all_receivers; +} + +rtc::scoped_refptr> +RtpTransmissionManager::GetAudioTransceiver() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + // This method only works with Plan B SDP, where there is a single + // audio/video transceiver. + RTC_DCHECK(!IsUnifiedPlan()); + for (auto transceiver : transceivers_.List()) { + if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { + return transceiver; + } + } + RTC_NOTREACHED(); + return nullptr; +} + +rtc::scoped_refptr> +RtpTransmissionManager::GetVideoTransceiver() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + // This method only works with Plan B SDP, where there is a single + // audio/video transceiver. + RTC_DCHECK(!IsUnifiedPlan()); + for (auto transceiver : transceivers_.List()) { + if (transceiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { + return transceiver; + } + } + RTC_NOTREACHED(); + return nullptr; +} + +void RtpTransmissionManager::AddAudioTrack(AudioTrackInterface* track, + MediaStreamInterface* stream) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(track); + RTC_DCHECK(stream); + auto sender = FindSenderForTrack(track); + if (sender) { + // We already have a sender for this track, so just change the stream_id + // so that it's correct in the next call to CreateOffer. + sender->internal()->set_stream_ids({stream->id()}); + return; + } + + // Normal case; we've never seen this track before. + auto new_sender = CreateSender(cricket::MEDIA_TYPE_AUDIO, track->id(), track, + {stream->id()}, {}); + new_sender->internal()->SetMediaChannel(voice_media_channel()); + GetAudioTransceiver()->internal()->AddSender(new_sender); + // If the sender has already been configured in SDP, we call SetSsrc, + // which will connect the sender to the underlying transport. This can + // occur if a local session description that contains the ID of the sender + // is set before AddStream is called. It can also occur if the local + // session description is not changed and RemoveStream is called, and + // later AddStream is called again with the same stream. + const RtpSenderInfo* sender_info = + FindSenderInfo(local_audio_sender_infos_, stream->id(), track->id()); + if (sender_info) { + new_sender->internal()->SetSsrc(sender_info->first_ssrc); + } +} + +// TODO(deadbeef): Don't destroy RtpSenders here; they should be kept around +// indefinitely, when we have unified plan SDP. +void RtpTransmissionManager::RemoveAudioTrack(AudioTrackInterface* track, + MediaStreamInterface* stream) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(!IsUnifiedPlan()); + auto sender = FindSenderForTrack(track); + if (!sender) { + RTC_LOG(LS_WARNING) << "RtpSender for track with id " << track->id() + << " doesn't exist."; + return; + } + GetAudioTransceiver()->internal()->RemoveSender(sender); +} + +void RtpTransmissionManager::AddVideoTrack(VideoTrackInterface* track, + MediaStreamInterface* stream) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(track); + RTC_DCHECK(stream); + auto sender = FindSenderForTrack(track); + if (sender) { + // We already have a sender for this track, so just change the stream_id + // so that it's correct in the next call to CreateOffer. + sender->internal()->set_stream_ids({stream->id()}); + return; + } + + // Normal case; we've never seen this track before. + auto new_sender = CreateSender(cricket::MEDIA_TYPE_VIDEO, track->id(), track, + {stream->id()}, {}); + new_sender->internal()->SetMediaChannel(video_media_channel()); + GetVideoTransceiver()->internal()->AddSender(new_sender); + const RtpSenderInfo* sender_info = + FindSenderInfo(local_video_sender_infos_, stream->id(), track->id()); + if (sender_info) { + new_sender->internal()->SetSsrc(sender_info->first_ssrc); + } +} + +void RtpTransmissionManager::RemoveVideoTrack(VideoTrackInterface* track, + MediaStreamInterface* stream) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(!IsUnifiedPlan()); + auto sender = FindSenderForTrack(track); + if (!sender) { + RTC_LOG(LS_WARNING) << "RtpSender for track with id " << track->id() + << " doesn't exist."; + return; + } + GetVideoTransceiver()->internal()->RemoveSender(sender); +} + +void RtpTransmissionManager::CreateAudioReceiver( + MediaStreamInterface* stream, + const RtpSenderInfo& remote_sender_info) { + RTC_DCHECK(!closed_); + std::vector> streams; + streams.push_back(rtc::scoped_refptr(stream)); + // TODO(https://crbug.com/webrtc/9480): When we remove remote_streams(), use + // the constructor taking stream IDs instead. + auto audio_receiver = rtc::make_ref_counted( + worker_thread(), remote_sender_info.sender_id, streams, IsUnifiedPlan()); + audio_receiver->SetMediaChannel(voice_media_channel()); + if (remote_sender_info.sender_id == kDefaultAudioSenderId) { + audio_receiver->SetupUnsignaledMediaChannel(); + } else { + audio_receiver->SetupMediaChannel(remote_sender_info.first_ssrc); + } + auto receiver = RtpReceiverProxyWithInternal::Create( + signaling_thread(), worker_thread(), std::move(audio_receiver)); + GetAudioTransceiver()->internal()->AddReceiver(receiver); + Observer()->OnAddTrack(receiver, streams); + NoteUsageEvent(UsageEvent::AUDIO_ADDED); +} + +void RtpTransmissionManager::CreateVideoReceiver( + MediaStreamInterface* stream, + const RtpSenderInfo& remote_sender_info) { + RTC_DCHECK(!closed_); + std::vector> streams; + streams.push_back(rtc::scoped_refptr(stream)); + // TODO(https://crbug.com/webrtc/9480): When we remove remote_streams(), use + // the constructor taking stream IDs instead. + auto video_receiver = rtc::make_ref_counted( + worker_thread(), remote_sender_info.sender_id, streams); + video_receiver->SetMediaChannel(video_media_channel()); + if (remote_sender_info.sender_id == kDefaultVideoSenderId) { + video_receiver->SetupUnsignaledMediaChannel(); + } else { + video_receiver->SetupMediaChannel(remote_sender_info.first_ssrc); + } + auto receiver = RtpReceiverProxyWithInternal::Create( + signaling_thread(), worker_thread(), std::move(video_receiver)); + GetVideoTransceiver()->internal()->AddReceiver(receiver); + Observer()->OnAddTrack(receiver, streams); + NoteUsageEvent(UsageEvent::VIDEO_ADDED); +} + +// TODO(deadbeef): Keep RtpReceivers around even if track goes away in remote +// description. +rtc::scoped_refptr +RtpTransmissionManager::RemoveAndStopReceiver( + const RtpSenderInfo& remote_sender_info) { + auto receiver = FindReceiverById(remote_sender_info.sender_id); + if (!receiver) { + RTC_LOG(LS_WARNING) << "RtpReceiver for track with id " + << remote_sender_info.sender_id << " doesn't exist."; + return nullptr; + } + if (receiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { + GetAudioTransceiver()->internal()->RemoveReceiver(receiver); + } else { + GetVideoTransceiver()->internal()->RemoveReceiver(receiver); + } + return receiver; +} + +void RtpTransmissionManager::OnRemoteSenderAdded( + const RtpSenderInfo& sender_info, + MediaStreamInterface* stream, + cricket::MediaType media_type) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_LOG(LS_INFO) << "Creating " << cricket::MediaTypeToString(media_type) + << " receiver for track_id=" << sender_info.sender_id + << " and stream_id=" << sender_info.stream_id; + + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + CreateAudioReceiver(stream, sender_info); + } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { + CreateVideoReceiver(stream, sender_info); + } else { + RTC_NOTREACHED() << "Invalid media type"; + } +} + +void RtpTransmissionManager::OnRemoteSenderRemoved( + const RtpSenderInfo& sender_info, + MediaStreamInterface* stream, + cricket::MediaType media_type) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_LOG(LS_INFO) << "Removing " << cricket::MediaTypeToString(media_type) + << " receiver for track_id=" << sender_info.sender_id + << " and stream_id=" << sender_info.stream_id; + + rtc::scoped_refptr receiver; + if (media_type == cricket::MEDIA_TYPE_AUDIO) { + // When the MediaEngine audio channel is destroyed, the RemoteAudioSource + // will be notified which will end the AudioRtpReceiver::track(). + receiver = RemoveAndStopReceiver(sender_info); + rtc::scoped_refptr audio_track = + stream->FindAudioTrack(sender_info.sender_id); + if (audio_track) { + stream->RemoveTrack(audio_track); + } + } else if (media_type == cricket::MEDIA_TYPE_VIDEO) { + // Stopping or destroying a VideoRtpReceiver will end the + // VideoRtpReceiver::track(). + receiver = RemoveAndStopReceiver(sender_info); + rtc::scoped_refptr video_track = + stream->FindVideoTrack(sender_info.sender_id); + if (video_track) { + // There's no guarantee the track is still available, e.g. the track may + // have been removed from the stream by an application. + stream->RemoveTrack(video_track); + } + } else { + RTC_NOTREACHED() << "Invalid media type"; + } + if (receiver) { + RTC_DCHECK(!closed_); + Observer()->OnRemoveTrack(receiver); + } +} + +void RtpTransmissionManager::OnLocalSenderAdded( + const RtpSenderInfo& sender_info, + cricket::MediaType media_type) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(!IsUnifiedPlan()); + auto sender = FindSenderById(sender_info.sender_id); + if (!sender) { + RTC_LOG(LS_WARNING) << "An unknown RtpSender with id " + << sender_info.sender_id + << " has been configured in the local description."; + return; + } + + if (sender->media_type() != media_type) { + RTC_LOG(LS_WARNING) << "An RtpSender has been configured in the local" + " description with an unexpected media type."; + return; + } + + sender->internal()->set_stream_ids({sender_info.stream_id}); + sender->internal()->SetSsrc(sender_info.first_ssrc); +} + +void RtpTransmissionManager::OnLocalSenderRemoved( + const RtpSenderInfo& sender_info, + cricket::MediaType media_type) { + RTC_DCHECK_RUN_ON(signaling_thread()); + auto sender = FindSenderById(sender_info.sender_id); + if (!sender) { + // This is the normal case. I.e., RemoveStream has been called and the + // SessionDescriptions has been renegotiated. + return; + } + + // A sender has been removed from the SessionDescription but it's still + // associated with the PeerConnection. This only occurs if the SDP doesn't + // match with the calls to CreateSender, AddStream and RemoveStream. + if (sender->media_type() != media_type) { + RTC_LOG(LS_WARNING) << "An RtpSender has been configured in the local" + " description with an unexpected media type."; + return; + } + + sender->internal()->SetSsrc(0); +} + +std::vector* RtpTransmissionManager::GetRemoteSenderInfos( + cricket::MediaType media_type) { + RTC_DCHECK(media_type == cricket::MEDIA_TYPE_AUDIO || + media_type == cricket::MEDIA_TYPE_VIDEO); + return (media_type == cricket::MEDIA_TYPE_AUDIO) + ? &remote_audio_sender_infos_ + : &remote_video_sender_infos_; +} + +std::vector* RtpTransmissionManager::GetLocalSenderInfos( + cricket::MediaType media_type) { + RTC_DCHECK(media_type == cricket::MEDIA_TYPE_AUDIO || + media_type == cricket::MEDIA_TYPE_VIDEO); + return (media_type == cricket::MEDIA_TYPE_AUDIO) ? &local_audio_sender_infos_ + : &local_video_sender_infos_; +} + +const RtpSenderInfo* RtpTransmissionManager::FindSenderInfo( + const std::vector& infos, + const std::string& stream_id, + const std::string sender_id) const { + for (const RtpSenderInfo& sender_info : infos) { + if (sender_info.stream_id == stream_id && + sender_info.sender_id == sender_id) { + return &sender_info; + } + } + return nullptr; +} + +rtc::scoped_refptr> +RtpTransmissionManager::FindSenderForTrack( + MediaStreamTrackInterface* track) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + for (const auto& transceiver : transceivers_.List()) { + for (auto sender : transceiver->internal()->senders()) { + if (sender->track() == track) { + return sender; + } + } + } + return nullptr; +} + +rtc::scoped_refptr> +RtpTransmissionManager::FindSenderById(const std::string& sender_id) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + for (const auto& transceiver : transceivers_.List()) { + for (auto sender : transceiver->internal()->senders()) { + if (sender->id() == sender_id) { + return sender; + } + } + } + return nullptr; +} + +rtc::scoped_refptr> +RtpTransmissionManager::FindReceiverById(const std::string& receiver_id) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + for (const auto& transceiver : transceivers_.List()) { + for (auto receiver : transceiver->internal()->receivers()) { + if (receiver->id() == receiver_id) { + return receiver; + } + } + } + return nullptr; +} + +} // namespace webrtc diff --git a/pc/rtp_transmission_manager.h b/pc/rtp_transmission_manager.h new file mode 100644 index 0000000000..fe0e3abdd3 --- /dev/null +++ b/pc/rtp_transmission_manager.h @@ -0,0 +1,269 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_RTP_TRANSMISSION_MANAGER_H_ +#define PC_RTP_TRANSMISSION_MANAGER_H_ + +#include + +#include +#include +#include + +#include "api/media_stream_interface.h" +#include "api/media_types.h" +#include "api/peer_connection_interface.h" +#include "api/rtc_error.h" +#include "api/rtp_parameters.h" +#include "api/rtp_receiver_interface.h" +#include "api/rtp_sender_interface.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "media/base/media_channel.h" +#include "pc/channel_manager.h" +#include "pc/rtp_receiver.h" +#include "pc/rtp_sender.h" +#include "pc/rtp_transceiver.h" +#include "pc/stats_collector_interface.h" +#include "pc/transceiver_list.h" +#include "pc/usage_pattern.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/weak_ptr.h" + +namespace rtc { +class Thread; +} + +namespace webrtc { + +// This class contains information about +// an RTPSender, used for things like looking it up by SSRC. +struct RtpSenderInfo { + RtpSenderInfo() : first_ssrc(0) {} + RtpSenderInfo(const std::string& stream_id, + const std::string sender_id, + uint32_t ssrc) + : stream_id(stream_id), sender_id(sender_id), first_ssrc(ssrc) {} + bool operator==(const RtpSenderInfo& other) { + return this->stream_id == other.stream_id && + this->sender_id == other.sender_id && + this->first_ssrc == other.first_ssrc; + } + std::string stream_id; + std::string sender_id; + // An RtpSender can have many SSRCs. The first one is used as a sort of ID + // for communicating with the lower layers. + uint32_t first_ssrc; +}; + +// The RtpTransmissionManager class is responsible for managing the lifetime +// and relationships between objects of type RtpSender, RtpReceiver and +// RtpTransceiver. +class RtpTransmissionManager : public RtpSenderBase::SetStreamsObserver { + public: + RtpTransmissionManager(bool is_unified_plan, + rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, + cricket::ChannelManager* channel_manager, + UsagePattern* usage_pattern, + PeerConnectionObserver* observer, + StatsCollectorInterface* stats_, + std::function on_negotiation_needed); + + // No move or copy permitted. + RtpTransmissionManager(const RtpTransmissionManager&) = delete; + RtpTransmissionManager& operator=(const RtpTransmissionManager&) = delete; + + // Stop activity. In particular, don't call observer_ any more. + void Close(); + + // RtpSenderBase::SetStreamsObserver override. + void OnSetStreams() override; + + // Add a new track, creating transceiver if required. + RTCErrorOr> AddTrack( + rtc::scoped_refptr track, + const std::vector& stream_ids); + + // Create a new RTP sender. Does not associate with a transceiver. + rtc::scoped_refptr> + CreateSender(cricket::MediaType media_type, + const std::string& id, + rtc::scoped_refptr track, + const std::vector& stream_ids, + const std::vector& send_encodings); + + // Create a new RTP receiver. Does not associate with a transceiver. + rtc::scoped_refptr> + CreateReceiver(cricket::MediaType media_type, const std::string& receiver_id); + + // Create a new RtpTransceiver of the given type and add it to the list of + // registered transceivers. + rtc::scoped_refptr> + CreateAndAddTransceiver( + rtc::scoped_refptr> sender, + rtc::scoped_refptr> + receiver); + + // Returns the first RtpTransceiver suitable for a newly added track, if such + // transceiver is available. + rtc::scoped_refptr> + FindFirstTransceiverForAddedTrack( + rtc::scoped_refptr track); + + // Returns the list of senders currently associated with some + // registered transceiver + std::vector>> + GetSendersInternal() const; + + // Returns the list of receivers currently associated with a transceiver + std::vector< + rtc::scoped_refptr>> + GetReceiversInternal() const; + + // Plan B: Get the transceiver containing all audio senders and receivers + rtc::scoped_refptr> + GetAudioTransceiver() const; + // Plan B: Get the transceiver containing all video senders and receivers + rtc::scoped_refptr> + GetVideoTransceiver() const; + + // Add an audio track, reusing or creating the sender. + void AddAudioTrack(AudioTrackInterface* track, MediaStreamInterface* stream); + // Plan B: Remove an audio track, removing the sender. + void RemoveAudioTrack(AudioTrackInterface* track, + MediaStreamInterface* stream); + // Add a video track, reusing or creating the sender. + void AddVideoTrack(VideoTrackInterface* track, MediaStreamInterface* stream); + // Plan B: Remove a video track, removing the sender. + void RemoveVideoTrack(VideoTrackInterface* track, + MediaStreamInterface* stream); + + // Triggered when a remote sender has been seen for the first time in a remote + // session description. It creates a remote MediaStreamTrackInterface + // implementation and triggers CreateAudioReceiver or CreateVideoReceiver. + void OnRemoteSenderAdded(const RtpSenderInfo& sender_info, + MediaStreamInterface* stream, + cricket::MediaType media_type); + + // Triggered when a remote sender has been removed from a remote session + // description. It removes the remote sender with id |sender_id| from a remote + // MediaStream and triggers DestroyAudioReceiver or DestroyVideoReceiver. + void OnRemoteSenderRemoved(const RtpSenderInfo& sender_info, + MediaStreamInterface* stream, + cricket::MediaType media_type); + + // Triggered when a local sender has been seen for the first time in a local + // session description. + // This method triggers CreateAudioSender or CreateVideoSender if the rtp + // streams in the local SessionDescription can be mapped to a MediaStreamTrack + // in a MediaStream in |local_streams_| + void OnLocalSenderAdded(const RtpSenderInfo& sender_info, + cricket::MediaType media_type); + + // Triggered when a local sender has been removed from a local session + // description. + // This method triggers DestroyAudioSender or DestroyVideoSender if a stream + // has been removed from the local SessionDescription and the stream can be + // mapped to a MediaStreamTrack in a MediaStream in |local_streams_|. + void OnLocalSenderRemoved(const RtpSenderInfo& sender_info, + cricket::MediaType media_type); + + std::vector* GetRemoteSenderInfos( + cricket::MediaType media_type); + std::vector* GetLocalSenderInfos( + cricket::MediaType media_type); + const RtpSenderInfo* FindSenderInfo(const std::vector& infos, + const std::string& stream_id, + const std::string sender_id) const; + + // Return the RtpSender with the given track attached. + rtc::scoped_refptr> + FindSenderForTrack(MediaStreamTrackInterface* track) const; + + // Return the RtpSender with the given id, or null if none exists. + rtc::scoped_refptr> + FindSenderById(const std::string& sender_id) const; + + // Return the RtpReceiver with the given id, or null if none exists. + rtc::scoped_refptr> + FindReceiverById(const std::string& receiver_id) const; + + TransceiverList* transceivers() { return &transceivers_; } + const TransceiverList* transceivers() const { return &transceivers_; } + + // Plan B helpers for getting the voice/video media channels for the single + // audio/video transceiver, if it exists. + cricket::VoiceMediaChannel* voice_media_channel() const; + cricket::VideoMediaChannel* video_media_channel() const; + + private: + rtc::Thread* signaling_thread() const { return signaling_thread_; } + rtc::Thread* worker_thread() const { return worker_thread_; } + cricket::ChannelManager* channel_manager() const { return channel_manager_; } + bool IsUnifiedPlan() const { return is_unified_plan_; } + void NoteUsageEvent(UsageEvent event) { + usage_pattern_->NoteUsageEvent(event); + } + + // AddTrack implementation when Unified Plan is specified. + RTCErrorOr> AddTrackUnifiedPlan( + rtc::scoped_refptr track, + const std::vector& stream_ids); + // AddTrack implementation when Plan B is specified. + RTCErrorOr> AddTrackPlanB( + rtc::scoped_refptr track, + const std::vector& stream_ids); + + // Create an RtpReceiver that sources an audio track. + void CreateAudioReceiver(MediaStreamInterface* stream, + const RtpSenderInfo& remote_sender_info) + RTC_RUN_ON(signaling_thread()); + + // Create an RtpReceiver that sources a video track. + void CreateVideoReceiver(MediaStreamInterface* stream, + const RtpSenderInfo& remote_sender_info) + RTC_RUN_ON(signaling_thread()); + rtc::scoped_refptr RemoveAndStopReceiver( + const RtpSenderInfo& remote_sender_info) RTC_RUN_ON(signaling_thread()); + + PeerConnectionObserver* Observer() const; + void OnNegotiationNeeded(); + + TransceiverList transceivers_; + + // These lists store sender info seen in local/remote descriptions. + std::vector remote_audio_sender_infos_ + RTC_GUARDED_BY(signaling_thread()); + std::vector remote_video_sender_infos_ + RTC_GUARDED_BY(signaling_thread()); + std::vector local_audio_sender_infos_ + RTC_GUARDED_BY(signaling_thread()); + std::vector local_video_sender_infos_ + RTC_GUARDED_BY(signaling_thread()); + + bool closed_ = false; + bool const is_unified_plan_; + rtc::Thread* signaling_thread_; + rtc::Thread* worker_thread_; + cricket::ChannelManager* channel_manager_; + UsagePattern* usage_pattern_; + PeerConnectionObserver* observer_; + StatsCollectorInterface* const stats_; + std::function on_negotiation_needed_; + rtc::WeakPtrFactory weak_ptr_factory_ + RTC_GUARDED_BY(signaling_thread()); +}; + +} // namespace webrtc + +#endif // PC_RTP_TRANSMISSION_MANAGER_H_ diff --git a/pc/rtp_transport.cc b/pc/rtp_transport.cc index fe7357fc94..d4edb9501c 100644 --- a/pc/rtp_transport.cc +++ b/pc/rtp_transport.cc @@ -11,12 +11,11 @@ #include "pc/rtp_transport.h" #include - #include #include -#include "api/rtp_headers.h" -#include "api/rtp_parameters.h" +#include "absl/strings/string_view.h" +#include "api/array_view.h" #include "media/base/rtp_utils.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "rtc_base/checks.h" @@ -182,16 +181,16 @@ bool RtpTransport::UnregisterRtpDemuxerSink(RtpPacketSinkInterface* sink) { void RtpTransport::DemuxPacket(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { - webrtc::RtpPacketReceived parsed_packet(&header_extension_map_); + webrtc::RtpPacketReceived parsed_packet( + &header_extension_map_, packet_time_us == -1 + ? Timestamp::MinusInfinity() + : Timestamp::Micros(packet_time_us)); if (!parsed_packet.Parse(std::move(packet))) { RTC_LOG(LS_ERROR) << "Failed to parse the incoming RTP packet before demuxing. Drop it."; return; } - if (packet_time_us != -1) { - parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000); - } if (!rtp_demuxer_.OnRtpPacket(parsed_packet)) { RTC_LOG(LS_WARNING) << "Failed to demux RTP packet: " << RtpDemuxer::DescribePacket(parsed_packet); diff --git a/pc/rtp_transport.h b/pc/rtp_transport.h index 57ad9e5fd0..893d91e734 100644 --- a/pc/rtp_transport.h +++ b/pc/rtp_transport.h @@ -11,11 +11,22 @@ #ifndef PC_RTP_TRANSPORT_H_ #define PC_RTP_TRANSPORT_H_ +#include +#include + #include +#include "absl/types/optional.h" #include "call/rtp_demuxer.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" +#include "p2p/base/packet_transport_internal.h" #include "pc/rtp_transport_internal.h" +#include "pc/session_description.h" +#include "rtc_base/async_packet_socket.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/network/sent_packet.h" +#include "rtc_base/network_route.h" +#include "rtc_base/socket.h" #include "rtc_base/third_party/sigslot/sigslot.h" namespace rtc { diff --git a/pc/scenario_tests/goog_cc_test.cc b/pc/scenario_tests/goog_cc_test.cc index fba617dd5c..d9e27e2edf 100644 --- a/pc/scenario_tests/goog_cc_test.cc +++ b/pc/scenario_tests/goog_cc_test.cc @@ -32,10 +32,7 @@ TEST(GoogCcPeerScenarioTest, MAYBE_NoBweChangeFromVideoUnmute) { // packets sizes. This will create a change in propagation time which might be // detected as an overuse. Using separate overuse detectors for audio and // video avoids the issue. - std::string audio_twcc_trials( - "WebRTC-Audio-SendSideBwe/Enabled/" // - "WebRTC-SendSideBwe-WithOverhead/Enabled/" // - "WebRTC-Audio-AlrProbing/Disabled/"); + std::string audio_twcc_trials("WebRTC-Audio-AlrProbing/Disabled/"); std::string separate_audio_video( "WebRTC-Bwe-SeparateAudioPackets/" "enabled:true,packet_threshold:15,time_threshold:1000ms/"); @@ -76,8 +73,8 @@ TEST(GoogCcPeerScenarioTest, MAYBE_NoBweChangeFromVideoUnmute) { ASSERT_EQ(num_video_streams, 1); // Exactly 1 video stream. auto get_bwe = [&] { - rtc::scoped_refptr callback( - new rtc::RefCountedObject()); + auto callback = + rtc::make_ref_counted(); caller->pc()->GetStats(callback); s.net()->time_controller()->Wait([&] { return callback->called(); }); auto stats = diff --git a/pc/sctp_data_channel.cc b/pc/sctp_data_channel.cc new file mode 100644 index 0000000000..0e4ef7de88 --- /dev/null +++ b/pc/sctp_data_channel.cc @@ -0,0 +1,742 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/sctp_data_channel.h" + +#include +#include +#include +#include + +#include "media/sctp/sctp_transport_internal.h" +#include "pc/proxy.h" +#include "pc/sctp_utils.h" +#include "rtc_base/checks.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/system/unused.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/thread.h" + +namespace webrtc { + +namespace { + +static size_t kMaxQueuedReceivedDataBytes = 16 * 1024 * 1024; +static size_t kMaxQueuedSendDataBytes = 16 * 1024 * 1024; + +static std::atomic g_unique_id{0}; + +int GenerateUniqueId() { + return ++g_unique_id; +} + +// Define proxy for DataChannelInterface. +BEGIN_PRIMARY_PROXY_MAP(DataChannel) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +PROXY_METHOD1(void, RegisterObserver, DataChannelObserver*) +PROXY_METHOD0(void, UnregisterObserver) +BYPASS_PROXY_CONSTMETHOD0(std::string, label) +BYPASS_PROXY_CONSTMETHOD0(bool, reliable) +BYPASS_PROXY_CONSTMETHOD0(bool, ordered) +BYPASS_PROXY_CONSTMETHOD0(uint16_t, maxRetransmitTime) +BYPASS_PROXY_CONSTMETHOD0(uint16_t, maxRetransmits) +BYPASS_PROXY_CONSTMETHOD0(absl::optional, maxRetransmitsOpt) +BYPASS_PROXY_CONSTMETHOD0(absl::optional, maxPacketLifeTime) +BYPASS_PROXY_CONSTMETHOD0(std::string, protocol) +BYPASS_PROXY_CONSTMETHOD0(bool, negotiated) +// Can't bypass the proxy since the id may change. +PROXY_CONSTMETHOD0(int, id) +BYPASS_PROXY_CONSTMETHOD0(Priority, priority) +PROXY_CONSTMETHOD0(DataState, state) +PROXY_CONSTMETHOD0(RTCError, error) +PROXY_CONSTMETHOD0(uint32_t, messages_sent) +PROXY_CONSTMETHOD0(uint64_t, bytes_sent) +PROXY_CONSTMETHOD0(uint32_t, messages_received) +PROXY_CONSTMETHOD0(uint64_t, bytes_received) +PROXY_CONSTMETHOD0(uint64_t, buffered_amount) +PROXY_METHOD0(void, Close) +// TODO(bugs.webrtc.org/11547): Change to run on the network thread. +PROXY_METHOD1(bool, Send, const DataBuffer&) +END_PROXY_MAP(DataChannel) + +} // namespace + +InternalDataChannelInit::InternalDataChannelInit(const DataChannelInit& base) + : DataChannelInit(base), open_handshake_role(kOpener) { + // If the channel is externally negotiated, do not send the OPEN message. + if (base.negotiated) { + open_handshake_role = kNone; + } else { + // Datachannel is externally negotiated. Ignore the id value. + // Specified in createDataChannel, WebRTC spec section 6.1 bullet 13. + id = -1; + } + // Backwards compatibility: If maxRetransmits or maxRetransmitTime + // are negative, the feature is not enabled. + // Values are clamped to a 16bit range. + if (maxRetransmits) { + if (*maxRetransmits < 0) { + RTC_LOG(LS_ERROR) + << "Accepting maxRetransmits < 0 for backwards compatibility"; + maxRetransmits = absl::nullopt; + } else if (*maxRetransmits > std::numeric_limits::max()) { + maxRetransmits = std::numeric_limits::max(); + } + } + + if (maxRetransmitTime) { + if (*maxRetransmitTime < 0) { + RTC_LOG(LS_ERROR) + << "Accepting maxRetransmitTime < 0 for backwards compatibility"; + maxRetransmitTime = absl::nullopt; + } else if (*maxRetransmitTime > std::numeric_limits::max()) { + maxRetransmitTime = std::numeric_limits::max(); + } + } +} + +bool SctpSidAllocator::AllocateSid(rtc::SSLRole role, int* sid) { + int potential_sid = (role == rtc::SSL_CLIENT) ? 0 : 1; + while (!IsSidAvailable(potential_sid)) { + potential_sid += 2; + if (potential_sid > static_cast(cricket::kMaxSctpSid)) { + return false; + } + } + + *sid = potential_sid; + used_sids_.insert(potential_sid); + return true; +} + +bool SctpSidAllocator::ReserveSid(int sid) { + if (!IsSidAvailable(sid)) { + return false; + } + used_sids_.insert(sid); + return true; +} + +void SctpSidAllocator::ReleaseSid(int sid) { + auto it = used_sids_.find(sid); + if (it != used_sids_.end()) { + used_sids_.erase(it); + } +} + +bool SctpSidAllocator::IsSidAvailable(int sid) const { + if (sid < static_cast(cricket::kMinSctpSid) || + sid > static_cast(cricket::kMaxSctpSid)) { + return false; + } + return used_sids_.find(sid) == used_sids_.end(); +} + +rtc::scoped_refptr SctpDataChannel::Create( + SctpDataChannelProviderInterface* provider, + const std::string& label, + const InternalDataChannelInit& config, + rtc::Thread* signaling_thread, + rtc::Thread* network_thread) { + auto channel = rtc::make_ref_counted( + config, provider, label, signaling_thread, network_thread); + if (!channel->Init()) { + return nullptr; + } + return channel; +} + +// static +rtc::scoped_refptr SctpDataChannel::CreateProxy( + rtc::scoped_refptr channel) { + // TODO(bugs.webrtc.org/11547): incorporate the network thread in the proxy. + // Also, consider allowing the proxy object to own the reference (std::move). + // As is, the proxy has a raw pointer and no reference to the channel object + // and trusting that the lifetime management aligns with the + // sctp_data_channels_ array in SctpDataChannelController. + return DataChannelProxy::Create(channel->signaling_thread_, channel.get()); +} + +SctpDataChannel::SctpDataChannel(const InternalDataChannelInit& config, + SctpDataChannelProviderInterface* provider, + const std::string& label, + rtc::Thread* signaling_thread, + rtc::Thread* network_thread) + : signaling_thread_(signaling_thread), + network_thread_(network_thread), + internal_id_(GenerateUniqueId()), + label_(label), + config_(config), + observer_(nullptr), + provider_(provider) { + RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_UNUSED(network_thread_); +} + +bool SctpDataChannel::Init() { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (config_.id < -1 || + (config_.maxRetransmits && *config_.maxRetransmits < 0) || + (config_.maxRetransmitTime && *config_.maxRetransmitTime < 0)) { + RTC_LOG(LS_ERROR) << "Failed to initialize the SCTP data channel due to " + "invalid DataChannelInit."; + return false; + } + if (config_.maxRetransmits && config_.maxRetransmitTime) { + RTC_LOG(LS_ERROR) + << "maxRetransmits and maxRetransmitTime should not be both set."; + return false; + } + + switch (config_.open_handshake_role) { + case webrtc::InternalDataChannelInit::kNone: // pre-negotiated + handshake_state_ = kHandshakeReady; + break; + case webrtc::InternalDataChannelInit::kOpener: + handshake_state_ = kHandshakeShouldSendOpen; + break; + case webrtc::InternalDataChannelInit::kAcker: + handshake_state_ = kHandshakeShouldSendAck; + break; + } + + // Try to connect to the transport in case the transport channel already + // exists. + OnTransportChannelCreated(); + + // Checks if the transport is ready to send because the initial channel + // ready signal may have been sent before the DataChannel creation. + // This has to be done async because the upper layer objects (e.g. + // Chrome glue and WebKit) are not wired up properly until after this + // function returns. + if (provider_->ReadyToSendData()) { + AddRef(); + rtc::Thread::Current()->PostTask(ToQueuedTask( + [this] { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (state_ != kClosed) + OnTransportReady(true); + }, + [this] { Release(); })); + } + + return true; +} + +SctpDataChannel::~SctpDataChannel() { + RTC_DCHECK_RUN_ON(signaling_thread_); +} + +void SctpDataChannel::RegisterObserver(DataChannelObserver* observer) { + RTC_DCHECK_RUN_ON(signaling_thread_); + observer_ = observer; + DeliverQueuedReceivedData(); +} + +void SctpDataChannel::UnregisterObserver() { + RTC_DCHECK_RUN_ON(signaling_thread_); + observer_ = nullptr; +} + +bool SctpDataChannel::reliable() const { + // May be called on any thread. + return !config_.maxRetransmits && !config_.maxRetransmitTime; +} + +uint64_t SctpDataChannel::buffered_amount() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return buffered_amount_; +} + +void SctpDataChannel::Close() { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (state_ == kClosed) + return; + SetState(kClosing); + // Will send queued data before beginning the underlying closing procedure. + UpdateState(); +} + +SctpDataChannel::DataState SctpDataChannel::state() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return state_; +} + +RTCError SctpDataChannel::error() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return error_; +} + +uint32_t SctpDataChannel::messages_sent() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return messages_sent_; +} + +uint64_t SctpDataChannel::bytes_sent() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return bytes_sent_; +} + +uint32_t SctpDataChannel::messages_received() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return messages_received_; +} + +uint64_t SctpDataChannel::bytes_received() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + return bytes_received_; +} + +bool SctpDataChannel::Send(const DataBuffer& buffer) { + RTC_DCHECK_RUN_ON(signaling_thread_); + // TODO(bugs.webrtc.org/11547): Expect this method to be called on the network + // thread. Bring buffer management etc to the network thread and keep the + // operational state management on the signaling thread. + + if (state_ != kOpen) { + return false; + } + + buffered_amount_ += buffer.size(); + + // If the queue is non-empty, we're waiting for SignalReadyToSend, + // so just add to the end of the queue and keep waiting. + if (!queued_send_data_.Empty()) { + if (!QueueSendDataMessage(buffer)) { + RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to queue " + "additional data."; + // https://w3c.github.io/webrtc-pc/#dom-rtcdatachannel-send step 5 + // Note that the spec doesn't explicitly say to close in this situation. + CloseAbruptlyWithError(RTCError(RTCErrorType::RESOURCE_EXHAUSTED, + "Unable to queue data for sending")); + } + return true; + } + + SendDataMessage(buffer, true); + + // Always return true for SCTP DataChannel per the spec. + return true; +} + +void SctpDataChannel::SetSctpSid(int sid) { + RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_DCHECK_LT(config_.id, 0); + RTC_DCHECK_GE(sid, 0); + RTC_DCHECK_NE(handshake_state_, kHandshakeWaitingForAck); + RTC_DCHECK_EQ(state_, kConnecting); + + if (config_.id == sid) { + return; + } + + const_cast(config_).id = sid; + provider_->AddSctpDataStream(sid); +} + +void SctpDataChannel::OnClosingProcedureStartedRemotely(int sid) { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (sid == config_.id && state_ != kClosing && state_ != kClosed) { + // Don't bother sending queued data since the side that initiated the + // closure wouldn't receive it anyway. See crbug.com/559394 for a lengthy + // discussion about this. + queued_send_data_.Clear(); + queued_control_data_.Clear(); + // Just need to change state to kClosing, SctpTransport will handle the + // rest of the closing procedure and OnClosingProcedureComplete will be + // called later. + started_closing_procedure_ = true; + SetState(kClosing); + } +} + +void SctpDataChannel::OnClosingProcedureComplete(int sid) { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (sid == config_.id) { + // If the closing procedure is complete, we should have finished sending + // all pending data and transitioned to kClosing already. + RTC_DCHECK_EQ(state_, kClosing); + RTC_DCHECK(queued_send_data_.Empty()); + DisconnectFromProvider(); + SetState(kClosed); + } +} + +void SctpDataChannel::OnTransportChannelCreated() { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (!connected_to_provider_) { + connected_to_provider_ = provider_->ConnectDataChannel(this); + } + // The sid may have been unassigned when provider_->ConnectDataChannel was + // done. So always add the streams even if connected_to_provider_ is true. + if (config_.id >= 0) { + provider_->AddSctpDataStream(config_.id); + } +} + +void SctpDataChannel::OnTransportChannelClosed(RTCError error) { + // The SctpTransport is unusable, which could come from multiplie reasons: + // - the SCTP m= section was rejected + // - the DTLS transport is closed + // - the SCTP transport is closed + CloseAbruptlyWithError(std::move(error)); +} + +DataChannelStats SctpDataChannel::GetStats() const { + RTC_DCHECK_RUN_ON(signaling_thread_); + DataChannelStats stats{internal_id_, id(), label(), + protocol(), state(), messages_sent(), + messages_received(), bytes_sent(), bytes_received()}; + return stats; +} + +void SctpDataChannel::OnDataReceived(const cricket::ReceiveDataParams& params, + const rtc::CopyOnWriteBuffer& payload) { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (params.sid != config_.id) { + return; + } + + if (params.type == DataMessageType::kControl) { + if (handshake_state_ != kHandshakeWaitingForAck) { + // Ignore it if we are not expecting an ACK message. + RTC_LOG(LS_WARNING) + << "DataChannel received unexpected CONTROL message, sid = " + << params.sid; + return; + } + if (ParseDataChannelOpenAckMessage(payload)) { + // We can send unordered as soon as we receive the ACK message. + handshake_state_ = kHandshakeReady; + RTC_LOG(LS_INFO) << "DataChannel received OPEN_ACK message, sid = " + << params.sid; + } else { + RTC_LOG(LS_WARNING) + << "DataChannel failed to parse OPEN_ACK message, sid = " + << params.sid; + } + return; + } + + RTC_DCHECK(params.type == DataMessageType::kBinary || + params.type == DataMessageType::kText); + + RTC_LOG(LS_VERBOSE) << "DataChannel received DATA message, sid = " + << params.sid; + // We can send unordered as soon as we receive any DATA message since the + // remote side must have received the OPEN (and old clients do not send + // OPEN_ACK). + if (handshake_state_ == kHandshakeWaitingForAck) { + handshake_state_ = kHandshakeReady; + } + + bool binary = (params.type == webrtc::DataMessageType::kBinary); + auto buffer = std::make_unique(payload, binary); + if (state_ == kOpen && observer_) { + ++messages_received_; + bytes_received_ += buffer->size(); + observer_->OnMessage(*buffer.get()); + } else { + if (queued_received_data_.byte_count() + payload.size() > + kMaxQueuedReceivedDataBytes) { + RTC_LOG(LS_ERROR) << "Queued received data exceeds the max buffer size."; + + queued_received_data_.Clear(); + CloseAbruptlyWithError( + RTCError(RTCErrorType::RESOURCE_EXHAUSTED, + "Queued received data exceeds the max buffer size.")); + + return; + } + queued_received_data_.PushBack(std::move(buffer)); + } +} + +void SctpDataChannel::OnTransportReady(bool writable) { + RTC_DCHECK_RUN_ON(signaling_thread_); + + writable_ = writable; + if (!writable) { + return; + } + + SendQueuedControlMessages(); + SendQueuedDataMessages(); + + UpdateState(); +} + +void SctpDataChannel::CloseAbruptlyWithError(RTCError error) { + RTC_DCHECK_RUN_ON(signaling_thread_); + + if (state_ == kClosed) { + return; + } + + if (connected_to_provider_) { + DisconnectFromProvider(); + } + + // Closing abruptly means any queued data gets thrown away. + buffered_amount_ = 0; + + queued_send_data_.Clear(); + queued_control_data_.Clear(); + + // Still go to "kClosing" before "kClosed", since observers may be expecting + // that. + SetState(kClosing); + error_ = std::move(error); + SetState(kClosed); +} + +void SctpDataChannel::CloseAbruptlyWithDataChannelFailure( + const std::string& message) { + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, message); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + CloseAbruptlyWithError(std::move(error)); +} + +void SctpDataChannel::UpdateState() { + RTC_DCHECK_RUN_ON(signaling_thread_); + // UpdateState determines what to do from a few state variables. Include + // all conditions required for each state transition here for + // clarity. OnTransportReady(true) will send any queued data and then invoke + // UpdateState(). + + switch (state_) { + case kConnecting: { + if (connected_to_provider_) { + if (handshake_state_ == kHandshakeShouldSendOpen) { + rtc::CopyOnWriteBuffer payload; + WriteDataChannelOpenMessage(label_, config_, &payload); + SendControlMessage(payload); + } else if (handshake_state_ == kHandshakeShouldSendAck) { + rtc::CopyOnWriteBuffer payload; + WriteDataChannelOpenAckMessage(&payload); + SendControlMessage(payload); + } + if (writable_ && (handshake_state_ == kHandshakeReady || + handshake_state_ == kHandshakeWaitingForAck)) { + SetState(kOpen); + // If we have received buffers before the channel got writable. + // Deliver them now. + DeliverQueuedReceivedData(); + } + } + break; + } + case kOpen: { + break; + } + case kClosing: { + // Wait for all queued data to be sent before beginning the closing + // procedure. + if (queued_send_data_.Empty() && queued_control_data_.Empty()) { + // For SCTP data channels, we need to wait for the closing procedure + // to complete; after calling RemoveSctpDataStream, + // OnClosingProcedureComplete will end up called asynchronously + // afterwards. + if (connected_to_provider_ && !started_closing_procedure_ && + config_.id >= 0) { + started_closing_procedure_ = true; + provider_->RemoveSctpDataStream(config_.id); + } + } + break; + } + case kClosed: + break; + } +} + +void SctpDataChannel::SetState(DataState state) { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (state_ == state) { + return; + } + + state_ = state; + if (observer_) { + observer_->OnStateChange(); + } + if (state_ == kOpen) { + SignalOpened(this); + } else if (state_ == kClosed) { + SignalClosed(this); + } +} + +void SctpDataChannel::DisconnectFromProvider() { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (!connected_to_provider_) + return; + + provider_->DisconnectDataChannel(this); + connected_to_provider_ = false; +} + +void SctpDataChannel::DeliverQueuedReceivedData() { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (!observer_) { + return; + } + + while (!queued_received_data_.Empty()) { + std::unique_ptr buffer = queued_received_data_.PopFront(); + ++messages_received_; + bytes_received_ += buffer->size(); + observer_->OnMessage(*buffer); + } +} + +void SctpDataChannel::SendQueuedDataMessages() { + RTC_DCHECK_RUN_ON(signaling_thread_); + if (queued_send_data_.Empty()) { + return; + } + + RTC_DCHECK(state_ == kOpen || state_ == kClosing); + + while (!queued_send_data_.Empty()) { + std::unique_ptr buffer = queued_send_data_.PopFront(); + if (!SendDataMessage(*buffer, false)) { + // Return the message to the front of the queue if sending is aborted. + queued_send_data_.PushFront(std::move(buffer)); + break; + } + } +} + +bool SctpDataChannel::SendDataMessage(const DataBuffer& buffer, + bool queue_if_blocked) { + RTC_DCHECK_RUN_ON(signaling_thread_); + SendDataParams send_params; + + send_params.ordered = config_.ordered; + // Send as ordered if it is still going through OPEN/ACK signaling. + if (handshake_state_ != kHandshakeReady && !config_.ordered) { + send_params.ordered = true; + RTC_LOG(LS_VERBOSE) + << "Sending data as ordered for unordered DataChannel " + "because the OPEN_ACK message has not been received."; + } + + send_params.max_rtx_count = config_.maxRetransmits; + send_params.max_rtx_ms = config_.maxRetransmitTime; + send_params.type = + buffer.binary ? DataMessageType::kBinary : DataMessageType::kText; + + cricket::SendDataResult send_result = cricket::SDR_SUCCESS; + bool success = + provider_->SendData(config_.id, send_params, buffer.data, &send_result); + + if (success) { + ++messages_sent_; + bytes_sent_ += buffer.size(); + + RTC_DCHECK(buffered_amount_ >= buffer.size()); + buffered_amount_ -= buffer.size(); + if (observer_ && buffer.size() > 0) { + observer_->OnBufferedAmountChange(buffer.size()); + } + return true; + } + + if (send_result == cricket::SDR_BLOCK) { + if (!queue_if_blocked || QueueSendDataMessage(buffer)) { + return false; + } + } + // Close the channel if the error is not SDR_BLOCK, or if queuing the + // message failed. + RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to send data, " + "send_result = " + << send_result; + CloseAbruptlyWithError( + RTCError(RTCErrorType::NETWORK_ERROR, "Failure to send data")); + + return false; +} + +bool SctpDataChannel::QueueSendDataMessage(const DataBuffer& buffer) { + RTC_DCHECK_RUN_ON(signaling_thread_); + size_t start_buffered_amount = queued_send_data_.byte_count(); + if (start_buffered_amount + buffer.size() > kMaxQueuedSendDataBytes) { + RTC_LOG(LS_ERROR) << "Can't buffer any more data for the data channel."; + return false; + } + queued_send_data_.PushBack(std::make_unique(buffer)); + return true; +} + +void SctpDataChannel::SendQueuedControlMessages() { + RTC_DCHECK_RUN_ON(signaling_thread_); + PacketQueue control_packets; + control_packets.Swap(&queued_control_data_); + + while (!control_packets.Empty()) { + std::unique_ptr buf = control_packets.PopFront(); + SendControlMessage(buf->data); + } +} + +void SctpDataChannel::QueueControlMessage( + const rtc::CopyOnWriteBuffer& buffer) { + RTC_DCHECK_RUN_ON(signaling_thread_); + queued_control_data_.PushBack(std::make_unique(buffer, true)); +} + +bool SctpDataChannel::SendControlMessage(const rtc::CopyOnWriteBuffer& buffer) { + RTC_DCHECK_RUN_ON(signaling_thread_); + RTC_DCHECK(writable_); + RTC_DCHECK_GE(config_.id, 0); + + bool is_open_message = handshake_state_ == kHandshakeShouldSendOpen; + RTC_DCHECK(!is_open_message || !config_.negotiated); + + SendDataParams send_params; + // Send data as ordered before we receive any message from the remote peer to + // make sure the remote peer will not receive any data before it receives the + // OPEN message. + send_params.ordered = config_.ordered || is_open_message; + send_params.type = DataMessageType::kControl; + + cricket::SendDataResult send_result = cricket::SDR_SUCCESS; + bool retval = + provider_->SendData(config_.id, send_params, buffer, &send_result); + if (retval) { + RTC_LOG(LS_VERBOSE) << "Sent CONTROL message on channel " << config_.id; + + if (handshake_state_ == kHandshakeShouldSendAck) { + handshake_state_ = kHandshakeReady; + } else if (handshake_state_ == kHandshakeShouldSendOpen) { + handshake_state_ = kHandshakeWaitingForAck; + } + } else if (send_result == cricket::SDR_BLOCK) { + QueueControlMessage(buffer); + } else { + RTC_LOG(LS_ERROR) << "Closing the DataChannel due to a failure to send" + " the CONTROL message, send_result = " + << send_result; + CloseAbruptlyWithError(RTCError(RTCErrorType::NETWORK_ERROR, + "Failed to send a CONTROL message")); + } + return retval; +} + +// static +void SctpDataChannel::ResetInternalIdAllocatorForTesting(int new_value) { + g_unique_id = new_value; +} + +} // namespace webrtc diff --git a/pc/sctp_data_channel.h b/pc/sctp_data_channel.h new file mode 100644 index 0000000000..b0df48758b --- /dev/null +++ b/pc/sctp_data_channel.h @@ -0,0 +1,289 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_SCTP_DATA_CHANNEL_H_ +#define PC_SCTP_DATA_CHANNEL_H_ + +#include + +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/data_channel_interface.h" +#include "api/priority.h" +#include "api/rtc_error.h" +#include "api/scoped_refptr.h" +#include "api/transport/data_channel_transport_interface.h" +#include "media/base/media_channel.h" +#include "pc/data_channel_utils.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/ssl_stream_adapter.h" // For SSLRole +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +class SctpDataChannel; + +// TODO(deadbeef): Get rid of this and have SctpDataChannel depend on +// SctpTransportInternal (pure virtual SctpTransport interface) instead. +class SctpDataChannelProviderInterface { + public: + // Sends the data to the transport. + virtual bool SendData(int sid, + const SendDataParams& params, + const rtc::CopyOnWriteBuffer& payload, + cricket::SendDataResult* result) = 0; + // Connects to the transport signals. + virtual bool ConnectDataChannel(SctpDataChannel* data_channel) = 0; + // Disconnects from the transport signals. + virtual void DisconnectDataChannel(SctpDataChannel* data_channel) = 0; + // Adds the data channel SID to the transport for SCTP. + virtual void AddSctpDataStream(int sid) = 0; + // Begins the closing procedure by sending an outgoing stream reset. Still + // need to wait for callbacks to tell when this completes. + virtual void RemoveSctpDataStream(int sid) = 0; + // Returns true if the transport channel is ready to send data. + virtual bool ReadyToSendData() const = 0; + + protected: + virtual ~SctpDataChannelProviderInterface() {} +}; + +// TODO(tommi): Change to not inherit from DataChannelInit but to have it as +// a const member. Block access to the 'id' member since it cannot be const. +struct InternalDataChannelInit : public DataChannelInit { + enum OpenHandshakeRole { kOpener, kAcker, kNone }; + // The default role is kOpener because the default |negotiated| is false. + InternalDataChannelInit() : open_handshake_role(kOpener) {} + explicit InternalDataChannelInit(const DataChannelInit& base); + OpenHandshakeRole open_handshake_role; +}; + +// Helper class to allocate unique IDs for SCTP DataChannels. +class SctpSidAllocator { + public: + // Gets the first unused odd/even id based on the DTLS role. If |role| is + // SSL_CLIENT, the allocated id starts from 0 and takes even numbers; + // otherwise, the id starts from 1 and takes odd numbers. + // Returns false if no ID can be allocated. + bool AllocateSid(rtc::SSLRole role, int* sid); + + // Attempts to reserve a specific sid. Returns false if it's unavailable. + bool ReserveSid(int sid); + + // Indicates that |sid| isn't in use any more, and is thus available again. + void ReleaseSid(int sid); + + private: + // Checks if |sid| is available to be assigned to a new SCTP data channel. + bool IsSidAvailable(int sid) const; + + std::set used_sids_; +}; + +// SctpDataChannel is an implementation of the DataChannelInterface based on +// SctpTransport. It provides an implementation of unreliable or +// reliabledata channels. + +// DataChannel states: +// kConnecting: The channel has been created the transport might not yet be +// ready. +// kOpen: The open handshake has been performed (if relevant) and the data +// channel is able to send messages. +// kClosing: DataChannelInterface::Close has been called, or the remote side +// initiated the closing procedure, but the closing procedure has not +// yet finished. +// kClosed: The closing handshake is finished (possibly initiated from this, +// side, possibly from the peer). +// +// How the closing procedure works for SCTP: +// 1. Alice calls Close(), state changes to kClosing. +// 2. Alice finishes sending any queued data. +// 3. Alice calls RemoveSctpDataStream, sends outgoing stream reset. +// 4. Bob receives incoming stream reset; OnClosingProcedureStartedRemotely +// called. +// 5. Bob sends outgoing stream reset. +// 6. Alice receives incoming reset, Bob receives acknowledgement. Both receive +// OnClosingProcedureComplete callback and transition to kClosed. +class SctpDataChannel : public DataChannelInterface, + public sigslot::has_slots<> { + public: + static rtc::scoped_refptr Create( + SctpDataChannelProviderInterface* provider, + const std::string& label, + const InternalDataChannelInit& config, + rtc::Thread* signaling_thread, + rtc::Thread* network_thread); + + // Instantiates an API proxy for a SctpDataChannel instance that will be + // handed out to external callers. + static rtc::scoped_refptr CreateProxy( + rtc::scoped_refptr channel); + + void RegisterObserver(DataChannelObserver* observer) override; + void UnregisterObserver() override; + + std::string label() const override { return label_; } + bool reliable() const override; + bool ordered() const override { return config_.ordered; } + // Backwards compatible accessors + uint16_t maxRetransmitTime() const override { + return config_.maxRetransmitTime ? *config_.maxRetransmitTime + : static_cast(-1); + } + uint16_t maxRetransmits() const override { + return config_.maxRetransmits ? *config_.maxRetransmits + : static_cast(-1); + } + absl::optional maxPacketLifeTime() const override { + return config_.maxRetransmitTime; + } + absl::optional maxRetransmitsOpt() const override { + return config_.maxRetransmits; + } + std::string protocol() const override { return config_.protocol; } + bool negotiated() const override { return config_.negotiated; } + int id() const override { return config_.id; } + Priority priority() const override { + return config_.priority ? *config_.priority : Priority::kLow; + } + + virtual int internal_id() const { return internal_id_; } + + uint64_t buffered_amount() const override; + void Close() override; + DataState state() const override; + RTCError error() const override; + uint32_t messages_sent() const override; + uint64_t bytes_sent() const override; + uint32_t messages_received() const override; + uint64_t bytes_received() const override; + bool Send(const DataBuffer& buffer) override; + + // Close immediately, ignoring any queued data or closing procedure. + // This is called when the underlying SctpTransport is being destroyed. + // It is also called by the PeerConnection if SCTP ID assignment fails. + void CloseAbruptlyWithError(RTCError error); + // Specializations of CloseAbruptlyWithError + void CloseAbruptlyWithDataChannelFailure(const std::string& message); + + // Slots for provider to connect signals to. + // + // TODO(deadbeef): Make these private once we're hooking up signals ourselves, + // instead of relying on SctpDataChannelProviderInterface. + + // Called when the SctpTransport's ready to use. That can happen when we've + // finished negotiation, or if the channel was created after negotiation has + // already finished. + void OnTransportReady(bool writable); + + void OnDataReceived(const cricket::ReceiveDataParams& params, + const rtc::CopyOnWriteBuffer& payload); + + // Sets the SCTP sid and adds to transport layer if not set yet. Should only + // be called once. + void SetSctpSid(int sid); + // The remote side started the closing procedure by resetting its outgoing + // stream (our incoming stream). Sets state to kClosing. + void OnClosingProcedureStartedRemotely(int sid); + // The closing procedure is complete; both incoming and outgoing stream + // resets are done and the channel can transition to kClosed. Called + // asynchronously after RemoveSctpDataStream. + void OnClosingProcedureComplete(int sid); + // Called when the transport channel is created. + // Only needs to be called for SCTP data channels. + void OnTransportChannelCreated(); + // Called when the transport channel is unusable. + // This method makes sure the DataChannel is disconnected and changes state + // to kClosed. + void OnTransportChannelClosed(RTCError error); + + DataChannelStats GetStats() const; + + // Emitted when state transitions to kOpen. + sigslot::signal1 SignalOpened; + // Emitted when state transitions to kClosed. + // This signal can be used to tell when the channel's sid is free. + sigslot::signal1 SignalClosed; + + // Reset the allocator for internal ID values for testing, so that + // the internal IDs generated are predictable. Test only. + static void ResetInternalIdAllocatorForTesting(int new_value); + + protected: + SctpDataChannel(const InternalDataChannelInit& config, + SctpDataChannelProviderInterface* client, + const std::string& label, + rtc::Thread* signaling_thread, + rtc::Thread* network_thread); + ~SctpDataChannel() override; + + private: + // The OPEN(_ACK) signaling state. + enum HandshakeState { + kHandshakeInit, + kHandshakeShouldSendOpen, + kHandshakeShouldSendAck, + kHandshakeWaitingForAck, + kHandshakeReady + }; + + bool Init(); + void UpdateState(); + void SetState(DataState state); + void DisconnectFromProvider(); + + void DeliverQueuedReceivedData(); + + void SendQueuedDataMessages(); + bool SendDataMessage(const DataBuffer& buffer, bool queue_if_blocked); + bool QueueSendDataMessage(const DataBuffer& buffer); + + void SendQueuedControlMessages(); + void QueueControlMessage(const rtc::CopyOnWriteBuffer& buffer); + bool SendControlMessage(const rtc::CopyOnWriteBuffer& buffer); + + rtc::Thread* const signaling_thread_; + rtc::Thread* const network_thread_; + const int internal_id_; + const std::string label_; + const InternalDataChannelInit config_; + DataChannelObserver* observer_ RTC_GUARDED_BY(signaling_thread_) = nullptr; + DataState state_ RTC_GUARDED_BY(signaling_thread_) = kConnecting; + RTCError error_ RTC_GUARDED_BY(signaling_thread_); + uint32_t messages_sent_ RTC_GUARDED_BY(signaling_thread_) = 0; + uint64_t bytes_sent_ RTC_GUARDED_BY(signaling_thread_) = 0; + uint32_t messages_received_ RTC_GUARDED_BY(signaling_thread_) = 0; + uint64_t bytes_received_ RTC_GUARDED_BY(signaling_thread_) = 0; + // Number of bytes of data that have been queued using Send(). Increased + // before each transport send and decreased after each successful send. + uint64_t buffered_amount_ RTC_GUARDED_BY(signaling_thread_) = 0; + SctpDataChannelProviderInterface* const provider_ + RTC_GUARDED_BY(signaling_thread_); + HandshakeState handshake_state_ RTC_GUARDED_BY(signaling_thread_) = + kHandshakeInit; + bool connected_to_provider_ RTC_GUARDED_BY(signaling_thread_) = false; + bool writable_ RTC_GUARDED_BY(signaling_thread_) = false; + // Did we already start the graceful SCTP closing procedure? + bool started_closing_procedure_ RTC_GUARDED_BY(signaling_thread_) = false; + // Control messages that always have to get sent out before any queued + // data. + PacketQueue queued_control_data_ RTC_GUARDED_BY(signaling_thread_); + PacketQueue queued_received_data_ RTC_GUARDED_BY(signaling_thread_); + PacketQueue queued_send_data_ RTC_GUARDED_BY(signaling_thread_); +}; + +} // namespace webrtc + +#endif // PC_SCTP_DATA_CHANNEL_H_ diff --git a/pc/sctp_data_channel_transport.cc b/pc/sctp_data_channel_transport.cc index 497e11fcc9..f01f86ebd8 100644 --- a/pc/sctp_data_channel_transport.cc +++ b/pc/sctp_data_channel_transport.cc @@ -9,6 +9,8 @@ */ #include "pc/sctp_data_channel_transport.h" + +#include "absl/types/optional.h" #include "pc/sctp_utils.h" namespace webrtc { @@ -37,18 +39,8 @@ RTCError SctpDataChannelTransport::SendData( int channel_id, const SendDataParams& params, const rtc::CopyOnWriteBuffer& buffer) { - // Map webrtc::SendDataParams to cricket::SendDataParams. - // TODO(mellem): See about unifying these structs. - cricket::SendDataParams sd_params; - sd_params.sid = channel_id; - sd_params.type = ToCricketDataMessageType(params.type); - sd_params.ordered = params.ordered; - sd_params.reliable = !(params.max_rtx_count || params.max_rtx_ms); - sd_params.max_rtx_count = params.max_rtx_count.value_or(-1); - sd_params.max_rtx_ms = params.max_rtx_ms.value_or(-1); - cricket::SendDataResult result; - sctp_transport_->SendData(sd_params, buffer, &result); + sctp_transport_->SendData(channel_id, params, buffer, &result); // TODO(mellem): See about changing the interfaces to not require mapping // SendDataResult to RTCError and back again. @@ -93,8 +85,7 @@ void SctpDataChannelTransport::OnDataReceived( const cricket::ReceiveDataParams& params, const rtc::CopyOnWriteBuffer& buffer) { if (sink_) { - sink_->OnDataReceived(params.sid, ToWebrtcDataMessageType(params.type), - buffer); + sink_->OnDataReceived(params.sid, params.type, buffer); } } @@ -111,9 +102,9 @@ void SctpDataChannelTransport::OnClosingProcedureComplete(int channel_id) { } } -void SctpDataChannelTransport::OnClosedAbruptly() { +void SctpDataChannelTransport::OnClosedAbruptly(RTCError error) { if (sink_) { - sink_->OnTransportClosed(); + sink_->OnTransportClosed(error); } } diff --git a/pc/sctp_data_channel_transport.h b/pc/sctp_data_channel_transport.h index 623a490053..4b89205ea1 100644 --- a/pc/sctp_data_channel_transport.h +++ b/pc/sctp_data_channel_transport.h @@ -11,8 +11,11 @@ #ifndef PC_SCTP_DATA_CHANNEL_TRANSPORT_H_ #define PC_SCTP_DATA_CHANNEL_TRANSPORT_H_ +#include "api/rtc_error.h" #include "api/transport/data_channel_transport_interface.h" +#include "media/base/media_channel.h" #include "media/sctp/sctp_transport_internal.h" +#include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/third_party/sigslot/sigslot.h" namespace webrtc { @@ -38,7 +41,7 @@ class SctpDataChannelTransport : public DataChannelTransportInterface, const rtc::CopyOnWriteBuffer& buffer); void OnClosingProcedureStartedRemotely(int channel_id); void OnClosingProcedureComplete(int channel_id); - void OnClosedAbruptly(); + void OnClosedAbruptly(RTCError error); cricket::SctpTransportInternal* const sctp_transport_; diff --git a/pc/sctp_transport.cc b/pc/sctp_transport.cc index 532e91c67d..7d4e4551f1 100644 --- a/pc/sctp_transport.cc +++ b/pc/sctp_transport.cc @@ -13,6 +13,13 @@ #include #include +#include "absl/types/optional.h" +#include "api/dtls_transport_interface.h" +#include "api/sequence_checker.h" +#include "rtc_base/checks.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" + namespace webrtc { SctpTransport::SctpTransport( @@ -39,7 +46,15 @@ SctpTransport::~SctpTransport() { } SctpTransportInformation SctpTransport::Information() const { - rtc::CritScope scope(&lock_); + // TODO(tommi): Update PeerConnection::GetSctpTransport to hand out a proxy + // to the transport so that we can be sure that methods get called on the + // expected thread. Chromium currently calls this method from + // TransceiverStateSurfacer. + if (!owner_thread_->IsCurrent()) { + return owner_thread_->Invoke( + RTC_FROM_HERE, [this] { return Information(); }); + } + RTC_DCHECK_RUN_ON(owner_thread_); return info_; } @@ -65,111 +80,91 @@ rtc::scoped_refptr SctpTransport::dtls_transport() void SctpTransport::Clear() { RTC_DCHECK_RUN_ON(owner_thread_); RTC_DCHECK(internal()); - { - rtc::CritScope scope(&lock_); - // Note that we delete internal_sctp_transport_, but - // only drop the reference to dtls_transport_. - dtls_transport_ = nullptr; - internal_sctp_transport_ = nullptr; - } + // Note that we delete internal_sctp_transport_, but + // only drop the reference to dtls_transport_. + dtls_transport_ = nullptr; + internal_sctp_transport_ = nullptr; UpdateInformation(SctpTransportState::kClosed); } void SctpTransport::SetDtlsTransport( rtc::scoped_refptr transport) { RTC_DCHECK_RUN_ON(owner_thread_); - SctpTransportState next_state; - { - rtc::CritScope scope(&lock_); - next_state = info_.state(); - dtls_transport_ = transport; - if (internal_sctp_transport_) { - if (transport) { - internal_sctp_transport_->SetDtlsTransport(transport->internal()); - transport->internal()->SignalDtlsState.connect( - this, &SctpTransport::OnDtlsStateChange); - if (info_.state() == SctpTransportState::kNew) { - next_state = SctpTransportState::kConnecting; - } - } else { - internal_sctp_transport_->SetDtlsTransport(nullptr); + SctpTransportState next_state = info_.state(); + dtls_transport_ = transport; + if (internal_sctp_transport_) { + if (transport) { + internal_sctp_transport_->SetDtlsTransport(transport->internal()); + + transport->internal()->SubscribeDtlsTransportState( + [this](cricket::DtlsTransportInternal* transport, + DtlsTransportState state) { + OnDtlsStateChange(transport, state); + }); + if (info_.state() == SctpTransportState::kNew) { + next_state = SctpTransportState::kConnecting; } + } else { + internal_sctp_transport_->SetDtlsTransport(nullptr); } } + UpdateInformation(next_state); } void SctpTransport::Start(int local_port, int remote_port, int max_message_size) { - { - rtc::CritScope scope(&lock_); - // Record max message size on calling thread. - info_ = SctpTransportInformation(info_.state(), info_.dtls_transport(), - max_message_size, info_.MaxChannels()); - } - if (owner_thread_->IsCurrent()) { - if (!internal()->Start(local_port, remote_port, max_message_size)) { - RTC_LOG(LS_ERROR) << "Failed to push down SCTP parameters, closing."; - UpdateInformation(SctpTransportState::kClosed); - } - } else { - owner_thread_->Invoke( - RTC_FROM_HERE, rtc::Bind(&SctpTransport::Start, this, local_port, - remote_port, max_message_size)); + RTC_DCHECK_RUN_ON(owner_thread_); + info_ = SctpTransportInformation(info_.state(), info_.dtls_transport(), + max_message_size, info_.MaxChannels()); + + if (!internal()->Start(local_port, remote_port, max_message_size)) { + RTC_LOG(LS_ERROR) << "Failed to push down SCTP parameters, closing."; + UpdateInformation(SctpTransportState::kClosed); } } void SctpTransport::UpdateInformation(SctpTransportState state) { RTC_DCHECK_RUN_ON(owner_thread_); - bool must_send_update; - SctpTransportInformation info_copy(SctpTransportState::kNew); - { - rtc::CritScope scope(&lock_); - must_send_update = (state != info_.state()); - // TODO(https://bugs.webrtc.org/10358): Update max channels from internal - // SCTP transport when available. - if (internal_sctp_transport_) { - info_ = SctpTransportInformation( - state, dtls_transport_, info_.MaxMessageSize(), info_.MaxChannels()); - } else { - info_ = SctpTransportInformation( - state, dtls_transport_, info_.MaxMessageSize(), info_.MaxChannels()); - } - if (observer_ && must_send_update) { - info_copy = info_; - } + bool must_send_update = (state != info_.state()); + // TODO(https://bugs.webrtc.org/10358): Update max channels from internal + // SCTP transport when available. + if (internal_sctp_transport_) { + info_ = SctpTransportInformation( + state, dtls_transport_, info_.MaxMessageSize(), info_.MaxChannels()); + } else { + info_ = SctpTransportInformation( + state, dtls_transport_, info_.MaxMessageSize(), info_.MaxChannels()); } - // We call the observer without holding the lock. + if (observer_ && must_send_update) { - observer_->OnStateChange(info_copy); + observer_->OnStateChange(info_); } } void SctpTransport::OnAssociationChangeCommunicationUp() { RTC_DCHECK_RUN_ON(owner_thread_); - { - rtc::CritScope scope(&lock_); - RTC_DCHECK(internal_sctp_transport_); - if (internal_sctp_transport_->max_outbound_streams() && - internal_sctp_transport_->max_inbound_streams()) { - int max_channels = - std::min(*(internal_sctp_transport_->max_outbound_streams()), - *(internal_sctp_transport_->max_inbound_streams())); - // Record max channels. - info_ = SctpTransportInformation(info_.state(), info_.dtls_transport(), - info_.MaxMessageSize(), max_channels); - } + RTC_DCHECK(internal_sctp_transport_); + if (internal_sctp_transport_->max_outbound_streams() && + internal_sctp_transport_->max_inbound_streams()) { + int max_channels = + std::min(*(internal_sctp_transport_->max_outbound_streams()), + *(internal_sctp_transport_->max_inbound_streams())); + // Record max channels. + info_ = SctpTransportInformation(info_.state(), info_.dtls_transport(), + info_.MaxMessageSize(), max_channels); } + UpdateInformation(SctpTransportState::kConnected); } void SctpTransport::OnDtlsStateChange(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state) { + DtlsTransportState state) { RTC_DCHECK_RUN_ON(owner_thread_); RTC_CHECK(transport == dtls_transport_->internal()); - if (state == cricket::DTLS_TRANSPORT_CLOSED || - state == cricket::DTLS_TRANSPORT_FAILED) { + if (state == DtlsTransportState::kClosed || + state == DtlsTransportState::kFailed) { UpdateInformation(SctpTransportState::kClosed); // TODO(http://bugs.webrtc.org/11090): Close all the data channels } diff --git a/pc/sctp_transport.h b/pc/sctp_transport.h index a13a58c68e..87fde53d97 100644 --- a/pc/sctp_transport.h +++ b/pc/sctp_transport.h @@ -13,10 +13,15 @@ #include +#include "api/dtls_transport_interface.h" #include "api/scoped_refptr.h" #include "api/sctp_transport_interface.h" -#include "media/sctp/sctp_transport.h" +#include "media/sctp/sctp_transport_internal.h" +#include "p2p/base/dtls_transport_internal.h" #include "pc/dtls_transport.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -47,12 +52,12 @@ class SctpTransport : public SctpTransportInterface, // internal() to be functions on the webrtc::SctpTransport interface, // and make the internal() function private. cricket::SctpTransportInternal* internal() { - rtc::CritScope scope(&lock_); + RTC_DCHECK_RUN_ON(owner_thread_); return internal_sctp_transport_.get(); } const cricket::SctpTransportInternal* internal() const { - rtc::CritScope scope(&lock_); + RTC_DCHECK_RUN_ON(owner_thread_); return internal_sctp_transport_.get(); } @@ -66,17 +71,14 @@ class SctpTransport : public SctpTransportInterface, void OnInternalClosingProcedureStartedRemotely(int sid); void OnInternalClosingProcedureComplete(int sid); void OnDtlsStateChange(cricket::DtlsTransportInternal* transport, - cricket::DtlsTransportState state); + DtlsTransportState state); - // Note - owner_thread never changes, but can't be const if we do - // Invoke() on it. - rtc::Thread* owner_thread_; - rtc::CriticalSection lock_; - // Variables accessible off-thread, guarded by lock_ - SctpTransportInformation info_ RTC_GUARDED_BY(lock_); + // NOTE: |owner_thread_| is the thread that the SctpTransport object is + // constructed on. In the context of PeerConnection, it's the network thread. + rtc::Thread* const owner_thread_; + SctpTransportInformation info_ RTC_GUARDED_BY(owner_thread_); std::unique_ptr internal_sctp_transport_ - RTC_GUARDED_BY(lock_); - // Variables only accessed on-thread + RTC_GUARDED_BY(owner_thread_); SctpTransportObserverInterface* observer_ RTC_GUARDED_BY(owner_thread_) = nullptr; rtc::scoped_refptr dtls_transport_ diff --git a/pc/sctp_transport_unittest.cc b/pc/sctp_transport_unittest.cc index f3070cd9a7..679b481f4c 100644 --- a/pc/sctp_transport_unittest.cc +++ b/pc/sctp_transport_unittest.cc @@ -14,6 +14,7 @@ #include #include "absl/memory/memory.h" +#include "api/dtls_transport_interface.h" #include "p2p/base/fake_dtls_transport.h" #include "pc/dtls_transport.h" #include "rtc_base/gunit.h" @@ -38,7 +39,8 @@ class FakeCricketSctpTransport : public cricket::SctpTransportInternal { } bool OpenStream(int sid) override { return true; } bool ResetStream(int sid) override { return true; } - bool SendData(const cricket::SendDataParams& params, + bool SendData(int sid, + const SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result = nullptr) override { return true; @@ -112,8 +114,8 @@ class SctpTransportTest : public ::testing::Test { void CreateTransport() { auto cricket_sctp_transport = absl::WrapUnique(new FakeCricketSctpTransport()); - transport_ = new rtc::RefCountedObject( - std::move(cricket_sctp_transport)); + transport_ = + rtc::make_ref_counted(std::move(cricket_sctp_transport)); } void AddDtlsTransport() { @@ -121,7 +123,7 @@ class SctpTransportTest : public ::testing::Test { std::make_unique( "audio", cricket::ICE_CANDIDATE_COMPONENT_RTP); dtls_transport_ = - new rtc::RefCountedObject(std::move(cricket_transport)); + rtc::make_ref_counted(std::move(cricket_transport)); transport_->SetDtlsTransport(dtls_transport_); } @@ -147,7 +149,7 @@ TEST(SctpTransportSimpleTest, CreateClearDelete) { std::unique_ptr fake_cricket_sctp_transport = absl::WrapUnique(new FakeCricketSctpTransport()); rtc::scoped_refptr sctp_transport = - new rtc::RefCountedObject( + rtc::make_ref_counted( std::move(fake_cricket_sctp_transport)); ASSERT_TRUE(sctp_transport->internal()); ASSERT_EQ(SctpTransportState::kNew, sctp_transport->Information().state()); @@ -203,7 +205,7 @@ TEST_F(SctpTransportTest, CloseWhenTransportCloses) { ASSERT_EQ_WAIT(SctpTransportState::kConnected, observer_.State(), kDefaultTimeout); static_cast(dtls_transport_->internal()) - ->SetDtlsState(cricket::DTLS_TRANSPORT_CLOSED); + ->SetDtlsState(DtlsTransportState::kClosed); ASSERT_EQ_WAIT(SctpTransportState::kClosed, observer_.State(), kDefaultTimeout); } diff --git a/pc/sctp_utils.cc b/pc/sctp_utils.cc index 1882a1525f..f7458405ea 100644 --- a/pc/sctp_utils.cc +++ b/pc/sctp_utils.cc @@ -13,8 +13,10 @@ #include #include +#include "absl/types/optional.h" #include "api/priority.h" #include "rtc_base/byte_buffer.h" +#include "rtc_base/checks.h" #include "rtc_base/copy_on_write_buffer.h" #include "rtc_base/logging.h" @@ -228,33 +230,4 @@ void WriteDataChannelOpenAckMessage(rtc::CopyOnWriteBuffer* payload) { payload->SetData(&data, sizeof(data)); } -cricket::DataMessageType ToCricketDataMessageType(DataMessageType type) { - switch (type) { - case DataMessageType::kText: - return cricket::DMT_TEXT; - case DataMessageType::kBinary: - return cricket::DMT_BINARY; - case DataMessageType::kControl: - return cricket::DMT_CONTROL; - default: - return cricket::DMT_NONE; - } - return cricket::DMT_NONE; -} - -DataMessageType ToWebrtcDataMessageType(cricket::DataMessageType type) { - switch (type) { - case cricket::DMT_TEXT: - return DataMessageType::kText; - case cricket::DMT_BINARY: - return DataMessageType::kBinary; - case cricket::DMT_CONTROL: - return DataMessageType::kControl; - case cricket::DMT_NONE: - default: - RTC_NOTREACHED(); - } - return DataMessageType::kControl; -} - } // namespace webrtc diff --git a/pc/sctp_utils.h b/pc/sctp_utils.h index 339ef21163..da854458f4 100644 --- a/pc/sctp_utils.h +++ b/pc/sctp_utils.h @@ -16,6 +16,7 @@ #include "api/data_channel_interface.h" #include "api/transport/data_channel_transport_interface.h" #include "media/base/media_channel.h" +#include "rtc_base/copy_on_write_buffer.h" namespace rtc { class CopyOnWriteBuffer; @@ -39,10 +40,6 @@ bool WriteDataChannelOpenMessage(const std::string& label, void WriteDataChannelOpenAckMessage(rtc::CopyOnWriteBuffer* payload); -cricket::DataMessageType ToCricketDataMessageType(DataMessageType type); - -DataMessageType ToWebrtcDataMessageType(cricket::DataMessageType type); - } // namespace webrtc #endif // PC_SCTP_UTILS_H_ diff --git a/pc/sctp_utils_unittest.cc b/pc/sctp_utils_unittest.cc index 690a9dc523..af14fe4f6b 100644 --- a/pc/sctp_utils_unittest.cc +++ b/pc/sctp_utils_unittest.cc @@ -178,15 +178,15 @@ TEST_F(SctpUtilsTest, WriteParseAckMessage) { TEST_F(SctpUtilsTest, TestIsOpenMessage) { rtc::CopyOnWriteBuffer open(1); - open[0] = 0x03; + open.MutableData()[0] = 0x03; EXPECT_TRUE(webrtc::IsOpenMessage(open)); rtc::CopyOnWriteBuffer openAck(1); - openAck[0] = 0x02; + openAck.MutableData()[0] = 0x02; EXPECT_FALSE(webrtc::IsOpenMessage(openAck)); rtc::CopyOnWriteBuffer invalid(1); - invalid[0] = 0x01; + invalid.MutableData()[0] = 0x01; EXPECT_FALSE(webrtc::IsOpenMessage(invalid)); rtc::CopyOnWriteBuffer empty; diff --git a/pc/sdp_offer_answer.cc b/pc/sdp_offer_answer.cc new file mode 100644 index 0000000000..533bd84dbe --- /dev/null +++ b/pc/sdp_offer_answer.cc @@ -0,0 +1,5036 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/sdp_offer_answer.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/memory/memory.h" +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/crypto/crypto_options.h" +#include "api/dtls_transport_interface.h" +#include "api/rtp_parameters.h" +#include "api/rtp_receiver_interface.h" +#include "api/rtp_sender_interface.h" +#include "api/video/builtin_video_bitrate_allocator_factory.h" +#include "media/base/codec.h" +#include "media/base/media_engine.h" +#include "media/base/rid_description.h" +#include "p2p/base/ice_transport_internal.h" +#include "p2p/base/p2p_constants.h" +#include "p2p/base/p2p_transport_channel.h" +#include "p2p/base/port.h" +#include "p2p/base/transport_description.h" +#include "p2p/base/transport_description_factory.h" +#include "p2p/base/transport_info.h" +#include "pc/data_channel_utils.h" +#include "pc/dtls_transport.h" +#include "pc/media_stream.h" +#include "pc/media_stream_proxy.h" +#include "pc/peer_connection.h" +#include "pc/peer_connection_message_handler.h" +#include "pc/rtp_media_utils.h" +#include "pc/rtp_sender.h" +#include "pc/rtp_transport_internal.h" +#include "pc/simulcast_description.h" +#include "pc/stats_collector.h" +#include "pc/usage_pattern.h" +#include "pc/webrtc_session_description_factory.h" +#include "rtc_base/helpers.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/rtc_certificate.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/string_encode.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/trace_event.h" +#include "system_wrappers/include/field_trial.h" +#include "system_wrappers/include/metrics.h" + +using cricket::ContentInfo; +using cricket::ContentInfos; +using cricket::MediaContentDescription; +using cricket::MediaProtocolType; +using cricket::RidDescription; +using cricket::RidDirection; +using cricket::SessionDescription; +using cricket::SimulcastDescription; +using cricket::SimulcastLayer; +using cricket::SimulcastLayerList; +using cricket::StreamParams; +using cricket::TransportInfo; + +using cricket::LOCAL_PORT_TYPE; +using cricket::PRFLX_PORT_TYPE; +using cricket::RELAY_PORT_TYPE; +using cricket::STUN_PORT_TYPE; + +namespace webrtc { + +namespace { + +typedef webrtc::PeerConnectionInterface::RTCOfferAnswerOptions + RTCOfferAnswerOptions; + +constexpr const char* kAlwaysAllowPayloadTypeDemuxingFieldTrialName = + "WebRTC-AlwaysAllowPayloadTypeDemuxing"; + +// Error messages +const char kInvalidSdp[] = "Invalid session description."; +const char kInvalidCandidates[] = "Description contains invalid candidates."; +const char kBundleWithoutRtcpMux[] = + "rtcp-mux must be enabled when BUNDLE " + "is enabled."; +const char kMlineMismatchInAnswer[] = + "The order of m-lines in answer doesn't match order in offer. Rejecting " + "answer."; +const char kMlineMismatchInSubsequentOffer[] = + "The order of m-lines in subsequent offer doesn't match order from " + "previous offer/answer."; +const char kSdpWithoutIceUfragPwd[] = + "Called with SDP without ice-ufrag and ice-pwd."; +const char kSdpWithoutDtlsFingerprint[] = + "Called with SDP without DTLS fingerprint."; +const char kSdpWithoutSdesCrypto[] = "Called with SDP without SDES crypto."; + +const char kSessionError[] = "Session error code: "; +const char kSessionErrorDesc[] = "Session error description: "; + +// UMA metric names. +const char kSimulcastVersionApplyLocalDescription[] = + "WebRTC.PeerConnection.Simulcast.ApplyLocalDescription"; +const char kSimulcastVersionApplyRemoteDescription[] = + "WebRTC.PeerConnection.Simulcast.ApplyRemoteDescription"; +const char kSimulcastDisabled[] = "WebRTC.PeerConnection.Simulcast.Disabled"; + +// The length of RTCP CNAMEs. +static const int kRtcpCnameLength = 16; + +const char kDefaultStreamId[] = "default"; +// NOTE: Duplicated in peer_connection.cc: +static const char kDefaultAudioSenderId[] = "defaulta0"; +static const char kDefaultVideoSenderId[] = "defaultv0"; + +void NoteAddIceCandidateResult(int result) { + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.AddIceCandidate", result, + kAddIceCandidateMax); +} + +void NoteKeyProtocolAndMedia(KeyExchangeProtocolType protocol_type, + cricket::MediaType media_type) { + // Array of structs needed to map {KeyExchangeProtocolType, + // cricket::MediaType} to KeyExchangeProtocolMedia without using std::map in + // order to avoid -Wglobal-constructors and -Wexit-time-destructors. + static constexpr struct { + KeyExchangeProtocolType protocol_type; + cricket::MediaType media_type; + KeyExchangeProtocolMedia protocol_media; + } kEnumCounterKeyProtocolMediaMap[] = { + {kEnumCounterKeyProtocolDtls, cricket::MEDIA_TYPE_AUDIO, + kEnumCounterKeyProtocolMediaTypeDtlsAudio}, + {kEnumCounterKeyProtocolDtls, cricket::MEDIA_TYPE_VIDEO, + kEnumCounterKeyProtocolMediaTypeDtlsVideo}, + {kEnumCounterKeyProtocolDtls, cricket::MEDIA_TYPE_DATA, + kEnumCounterKeyProtocolMediaTypeDtlsData}, + {kEnumCounterKeyProtocolSdes, cricket::MEDIA_TYPE_AUDIO, + kEnumCounterKeyProtocolMediaTypeSdesAudio}, + {kEnumCounterKeyProtocolSdes, cricket::MEDIA_TYPE_VIDEO, + kEnumCounterKeyProtocolMediaTypeSdesVideo}, + {kEnumCounterKeyProtocolSdes, cricket::MEDIA_TYPE_DATA, + kEnumCounterKeyProtocolMediaTypeSdesData}, + }; + + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.KeyProtocol", protocol_type, + kEnumCounterKeyProtocolMax); + + for (const auto& i : kEnumCounterKeyProtocolMediaMap) { + if (i.protocol_type == protocol_type && i.media_type == media_type) { + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.KeyProtocolByMedia", + i.protocol_media, + kEnumCounterKeyProtocolMediaTypeMax); + } + } +} + +std::map GetBundleGroupsByMid( + const SessionDescription* desc) { + std::vector bundle_groups = + desc->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + std::map bundle_groups_by_mid; + for (const cricket::ContentGroup* bundle_group : bundle_groups) { + for (const std::string& content_name : bundle_group->content_names()) { + bundle_groups_by_mid[content_name] = bundle_group; + } + } + return bundle_groups_by_mid; +} + +// Returns true if |new_desc| requests an ICE restart (i.e., new ufrag/pwd). +bool CheckForRemoteIceRestart(const SessionDescriptionInterface* old_desc, + const SessionDescriptionInterface* new_desc, + const std::string& content_name) { + if (!old_desc) { + return false; + } + const SessionDescription* new_sd = new_desc->description(); + const SessionDescription* old_sd = old_desc->description(); + const ContentInfo* cinfo = new_sd->GetContentByName(content_name); + if (!cinfo || cinfo->rejected) { + return false; + } + // If the content isn't rejected, check if ufrag and password has changed. + const cricket::TransportDescription* new_transport_desc = + new_sd->GetTransportDescriptionByName(content_name); + const cricket::TransportDescription* old_transport_desc = + old_sd->GetTransportDescriptionByName(content_name); + if (!new_transport_desc || !old_transport_desc) { + // No transport description exists. This is not an ICE restart. + return false; + } + if (cricket::IceCredentialsChanged( + old_transport_desc->ice_ufrag, old_transport_desc->ice_pwd, + new_transport_desc->ice_ufrag, new_transport_desc->ice_pwd)) { + RTC_LOG(LS_INFO) << "Remote peer requests ICE restart for " << content_name + << "."; + return true; + } + return false; +} + +// Generates a string error message for SetLocalDescription/SetRemoteDescription +// from an RTCError. +std::string GetSetDescriptionErrorMessage(cricket::ContentSource source, + SdpType type, + const RTCError& error) { + rtc::StringBuilder oss; + oss << "Failed to set " << (source == cricket::CS_LOCAL ? "local" : "remote") + << " " << SdpTypeToString(type) << " sdp: " << error.message(); + return oss.Release(); +} + +std::string GetStreamIdsString(rtc::ArrayView stream_ids) { + std::string output = "streams=["; + const char* separator = ""; + for (const auto& stream_id : stream_ids) { + output.append(separator).append(stream_id); + separator = ", "; + } + output.append("]"); + return output; +} + +void ReportSimulcastApiVersion(const char* name, + const SessionDescription& session) { + bool has_legacy = false; + bool has_spec_compliant = false; + for (const ContentInfo& content : session.contents()) { + if (!content.media_description()) { + continue; + } + has_spec_compliant |= content.media_description()->HasSimulcast(); + for (const StreamParams& sp : content.media_description()->streams()) { + has_legacy |= sp.has_ssrc_group(cricket::kSimSsrcGroupSemantics); + } + } + + if (has_legacy) { + RTC_HISTOGRAM_ENUMERATION(name, kSimulcastApiVersionLegacy, + kSimulcastApiVersionMax); + } + if (has_spec_compliant) { + RTC_HISTOGRAM_ENUMERATION(name, kSimulcastApiVersionSpecCompliant, + kSimulcastApiVersionMax); + } + if (!has_legacy && !has_spec_compliant) { + RTC_HISTOGRAM_ENUMERATION(name, kSimulcastApiVersionNone, + kSimulcastApiVersionMax); + } +} + +const ContentInfo* FindTransceiverMSection( + RtpTransceiver* transceiver, + const SessionDescriptionInterface* session_description) { + return transceiver->mid() + ? session_description->description()->GetContentByName( + *transceiver->mid()) + : nullptr; +} + +// If the direction is "recvonly" or "inactive", treat the description +// as containing no streams. +// See: https://code.google.com/p/webrtc/issues/detail?id=5054 +std::vector GetActiveStreams( + const cricket::MediaContentDescription* desc) { + return RtpTransceiverDirectionHasSend(desc->direction()) + ? desc->streams() + : std::vector(); +} + +// Logic to decide if an m= section can be recycled. This means that the new +// m= section is not rejected, but the old local or remote m= section is +// rejected. |old_content_one| and |old_content_two| refer to the m= section +// of the old remote and old local descriptions in no particular order. +// We need to check both the old local and remote because either +// could be the most current from the latest negotation. +bool IsMediaSectionBeingRecycled(SdpType type, + const ContentInfo& content, + const ContentInfo* old_content_one, + const ContentInfo* old_content_two) { + return type == SdpType::kOffer && !content.rejected && + ((old_content_one && old_content_one->rejected) || + (old_content_two && old_content_two->rejected)); +} + +// Verify that the order of media sections in |new_desc| matches +// |current_desc|. The number of m= sections in |new_desc| should be no +// less than |current_desc|. In the case of checking an answer's +// |new_desc|, the |current_desc| is the last offer that was set as the +// local or remote. In the case of checking an offer's |new_desc| we +// check against the local and remote descriptions stored from the last +// negotiation, because either of these could be the most up to date for +// possible rejected m sections. These are the |current_desc| and +// |secondary_current_desc|. +bool MediaSectionsInSameOrder(const SessionDescription& current_desc, + const SessionDescription* secondary_current_desc, + const SessionDescription& new_desc, + const SdpType type) { + if (current_desc.contents().size() > new_desc.contents().size()) { + return false; + } + + for (size_t i = 0; i < current_desc.contents().size(); ++i) { + const cricket::ContentInfo* secondary_content_info = nullptr; + if (secondary_current_desc && + i < secondary_current_desc->contents().size()) { + secondary_content_info = &secondary_current_desc->contents()[i]; + } + if (IsMediaSectionBeingRecycled(type, new_desc.contents()[i], + ¤t_desc.contents()[i], + secondary_content_info)) { + // For new offer descriptions, if the media section can be recycled, it's + // valid for the MID and media type to change. + continue; + } + if (new_desc.contents()[i].name != current_desc.contents()[i].name) { + return false; + } + const MediaContentDescription* new_desc_mdesc = + new_desc.contents()[i].media_description(); + const MediaContentDescription* current_desc_mdesc = + current_desc.contents()[i].media_description(); + if (new_desc_mdesc->type() != current_desc_mdesc->type()) { + return false; + } + } + return true; +} + +bool MediaSectionsHaveSameCount(const SessionDescription& desc1, + const SessionDescription& desc2) { + return desc1.contents().size() == desc2.contents().size(); +} +// Checks that each non-rejected content has SDES crypto keys or a DTLS +// fingerprint, unless it's in a BUNDLE group, in which case only the +// BUNDLE-tag section (first media section/description in the BUNDLE group) +// needs a ufrag and pwd. Mismatches, such as replying with a DTLS fingerprint +// to SDES keys, will be caught in JsepTransport negotiation, and backstopped +// by Channel's |srtp_required| check. +RTCError VerifyCrypto(const SessionDescription* desc, + bool dtls_enabled, + const std::map& + bundle_groups_by_mid) { + for (const cricket::ContentInfo& content_info : desc->contents()) { + if (content_info.rejected) { + continue; + } + // Note what media is used with each crypto protocol, for all sections. + NoteKeyProtocolAndMedia(dtls_enabled ? webrtc::kEnumCounterKeyProtocolDtls + : webrtc::kEnumCounterKeyProtocolSdes, + content_info.media_description()->type()); + const std::string& mid = content_info.name; + auto it = bundle_groups_by_mid.find(mid); + const cricket::ContentGroup* bundle = + it != bundle_groups_by_mid.end() ? it->second : nullptr; + if (bundle && mid != *(bundle->FirstContentName())) { + // This isn't the first media section in the BUNDLE group, so it's not + // required to have crypto attributes, since only the crypto attributes + // from the first section actually get used. + continue; + } + + // If the content isn't rejected or bundled into another m= section, crypto + // must be present. + const MediaContentDescription* media = content_info.media_description(); + const TransportInfo* tinfo = desc->GetTransportInfoByName(mid); + if (!media || !tinfo) { + // Something is not right. + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, kInvalidSdp); + } + if (dtls_enabled) { + if (!tinfo->description.identity_fingerprint) { + RTC_LOG(LS_WARNING) + << "Session description must have DTLS fingerprint if " + "DTLS enabled."; + return RTCError(RTCErrorType::INVALID_PARAMETER, + kSdpWithoutDtlsFingerprint); + } + } else { + if (media->cryptos().empty()) { + RTC_LOG(LS_WARNING) + << "Session description must have SDES when DTLS disabled."; + return RTCError(RTCErrorType::INVALID_PARAMETER, kSdpWithoutSdesCrypto); + } + } + } + return RTCError::OK(); +} + +// Checks that each non-rejected content has ice-ufrag and ice-pwd set, unless +// it's in a BUNDLE group, in which case only the BUNDLE-tag section (first +// media section/description in the BUNDLE group) needs a ufrag and pwd. +bool VerifyIceUfragPwdPresent( + const SessionDescription* desc, + const std::map& + bundle_groups_by_mid) { + for (const cricket::ContentInfo& content_info : desc->contents()) { + if (content_info.rejected) { + continue; + } + const std::string& mid = content_info.name; + auto it = bundle_groups_by_mid.find(mid); + const cricket::ContentGroup* bundle = + it != bundle_groups_by_mid.end() ? it->second : nullptr; + if (bundle && mid != *(bundle->FirstContentName())) { + // This isn't the first media section in the BUNDLE group, so it's not + // required to have ufrag/password, since only the ufrag/password from + // the first section actually get used. + continue; + } + + // If the content isn't rejected or bundled into another m= section, + // ice-ufrag and ice-pwd must be present. + const TransportInfo* tinfo = desc->GetTransportInfoByName(mid); + if (!tinfo) { + // Something is not right. + RTC_LOG(LS_ERROR) << kInvalidSdp; + return false; + } + if (tinfo->description.ice_ufrag.empty() || + tinfo->description.ice_pwd.empty()) { + RTC_LOG(LS_ERROR) << "Session description must have ice ufrag and pwd."; + return false; + } + } + return true; +} + +RTCError ValidateMids(const cricket::SessionDescription& description) { + std::set mids; + for (const cricket::ContentInfo& content : description.contents()) { + if (content.name.empty()) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "A media section is missing a MID attribute."); + } + if (!mids.insert(content.name).second) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "Duplicate a=mid value '" + content.name + "'."); + } + } + return RTCError::OK(); +} + +bool IsValidOfferToReceiveMedia(int value) { + typedef PeerConnectionInterface::RTCOfferAnswerOptions Options; + return (value >= Options::kUndefined) && + (value <= Options::kMaxOfferToReceiveMedia); +} + +bool ValidateOfferAnswerOptions( + const PeerConnectionInterface::RTCOfferAnswerOptions& rtc_options) { + return IsValidOfferToReceiveMedia(rtc_options.offer_to_receive_audio) && + IsValidOfferToReceiveMedia(rtc_options.offer_to_receive_video); +} + +// Map internal signaling state name to spec name: +// https://w3c.github.io/webrtc-pc/#rtcsignalingstate-enum +std::string GetSignalingStateString( + PeerConnectionInterface::SignalingState state) { + switch (state) { + case PeerConnectionInterface::kStable: + return "stable"; + case PeerConnectionInterface::kHaveLocalOffer: + return "have-local-offer"; + case PeerConnectionInterface::kHaveLocalPrAnswer: + return "have-local-pranswer"; + case PeerConnectionInterface::kHaveRemoteOffer: + return "have-remote-offer"; + case PeerConnectionInterface::kHaveRemotePrAnswer: + return "have-remote-pranswer"; + case PeerConnectionInterface::kClosed: + return "closed"; + } + RTC_NOTREACHED(); + return ""; +} + +// This method will extract any send encodings that were sent by the remote +// connection. This is currently only relevant for Simulcast scenario (where +// the number of layers may be communicated by the server). +std::vector GetSendEncodingsFromRemoteDescription( + const MediaContentDescription& desc) { + if (!desc.HasSimulcast()) { + return {}; + } + std::vector result; + const SimulcastDescription& simulcast = desc.simulcast_description(); + + // This is a remote description, the parameters we are after should appear + // as receive streams. + for (const auto& alternatives : simulcast.receive_layers()) { + RTC_DCHECK(!alternatives.empty()); + // There is currently no way to specify or choose from alternatives. + // We will always use the first alternative, which is the most preferred. + const SimulcastLayer& layer = alternatives[0]; + RtpEncodingParameters parameters; + parameters.rid = layer.rid; + parameters.active = !layer.is_paused; + result.push_back(parameters); + } + + return result; +} + +RTCError UpdateSimulcastLayerStatusInSender( + const std::vector& layers, + rtc::scoped_refptr sender) { + RTC_DCHECK(sender); + RtpParameters parameters = sender->GetParametersInternal(); + std::vector disabled_layers; + + // The simulcast envelope cannot be changed, only the status of the streams. + // So we will iterate over the send encodings rather than the layers. + for (RtpEncodingParameters& encoding : parameters.encodings) { + auto iter = std::find_if(layers.begin(), layers.end(), + [&encoding](const SimulcastLayer& layer) { + return layer.rid == encoding.rid; + }); + // A layer that cannot be found may have been removed by the remote party. + if (iter == layers.end()) { + disabled_layers.push_back(encoding.rid); + continue; + } + + encoding.active = !iter->is_paused; + } + + RTCError result = sender->SetParametersInternal(parameters); + if (result.ok()) { + result = sender->DisableEncodingLayers(disabled_layers); + } + + return result; +} + +bool SimulcastIsRejected(const ContentInfo* local_content, + const MediaContentDescription& answer_media_desc, + bool enable_encrypted_rtp_header_extensions) { + bool simulcast_offered = local_content && + local_content->media_description() && + local_content->media_description()->HasSimulcast(); + bool simulcast_answered = answer_media_desc.HasSimulcast(); + bool rids_supported = RtpExtension::FindHeaderExtensionByUri( + answer_media_desc.rtp_header_extensions(), RtpExtension::kRidUri, + enable_encrypted_rtp_header_extensions + ? RtpExtension::Filter::kPreferEncryptedExtension + : RtpExtension::Filter::kDiscardEncryptedExtension); + return simulcast_offered && (!simulcast_answered || !rids_supported); +} + +RTCError DisableSimulcastInSender( + rtc::scoped_refptr sender) { + RTC_DCHECK(sender); + RtpParameters parameters = sender->GetParametersInternal(); + if (parameters.encodings.size() <= 1) { + return RTCError::OK(); + } + + std::vector disabled_layers; + std::transform( + parameters.encodings.begin() + 1, parameters.encodings.end(), + std::back_inserter(disabled_layers), + [](const RtpEncodingParameters& encoding) { return encoding.rid; }); + return sender->DisableEncodingLayers(disabled_layers); +} + +// The SDP parser used to populate these values by default for the 'content +// name' if an a=mid line was absent. +absl::string_view GetDefaultMidForPlanB(cricket::MediaType media_type) { + switch (media_type) { + case cricket::MEDIA_TYPE_AUDIO: + return cricket::CN_AUDIO; + case cricket::MEDIA_TYPE_VIDEO: + return cricket::CN_VIDEO; + case cricket::MEDIA_TYPE_DATA: + return cricket::CN_DATA; + case cricket::MEDIA_TYPE_UNSUPPORTED: + return "not supported"; + } + RTC_NOTREACHED(); + return ""; +} + +// Add options to |[audio/video]_media_description_options| from |senders|. +void AddPlanBRtpSenderOptions( + const std::vector>>& senders, + cricket::MediaDescriptionOptions* audio_media_description_options, + cricket::MediaDescriptionOptions* video_media_description_options, + int num_sim_layers) { + for (const auto& sender : senders) { + if (sender->media_type() == cricket::MEDIA_TYPE_AUDIO) { + if (audio_media_description_options) { + audio_media_description_options->AddAudioSender( + sender->id(), sender->internal()->stream_ids()); + } + } else { + RTC_DCHECK(sender->media_type() == cricket::MEDIA_TYPE_VIDEO); + if (video_media_description_options) { + video_media_description_options->AddVideoSender( + sender->id(), sender->internal()->stream_ids(), {}, + SimulcastLayerList(), num_sim_layers); + } + } + } +} + +cricket::MediaDescriptionOptions GetMediaDescriptionOptionsForTransceiver( + RtpTransceiver* transceiver, + const std::string& mid, + bool is_create_offer) { + // NOTE: a stopping transceiver should be treated as a stopped one in + // createOffer as specified in + // https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-createoffer. + bool stopped = + is_create_offer ? transceiver->stopping() : transceiver->stopped(); + cricket::MediaDescriptionOptions media_description_options( + transceiver->media_type(), mid, transceiver->direction(), stopped); + media_description_options.codec_preferences = + transceiver->codec_preferences(); + media_description_options.header_extensions = + transceiver->HeaderExtensionsToOffer(); + // This behavior is specified in JSEP. The gist is that: + // 1. The MSID is included if the RtpTransceiver's direction is sendonly or + // sendrecv. + // 2. If the MSID is included, then it must be included in any subsequent + // offer/answer exactly the same until the RtpTransceiver is stopped. + if (stopped || (!RtpTransceiverDirectionHasSend(transceiver->direction()) && + !transceiver->has_ever_been_used_to_send())) { + return media_description_options; + } + + cricket::SenderOptions sender_options; + sender_options.track_id = transceiver->sender()->id(); + sender_options.stream_ids = transceiver->sender()->stream_ids(); + + // The following sets up RIDs and Simulcast. + // RIDs are included if Simulcast is requested or if any RID was specified. + RtpParameters send_parameters = + transceiver->sender_internal()->GetParametersInternal(); + bool has_rids = std::any_of(send_parameters.encodings.begin(), + send_parameters.encodings.end(), + [](const RtpEncodingParameters& encoding) { + return !encoding.rid.empty(); + }); + + std::vector send_rids; + SimulcastLayerList send_layers; + for (const RtpEncodingParameters& encoding : send_parameters.encodings) { + if (encoding.rid.empty()) { + continue; + } + send_rids.push_back(RidDescription(encoding.rid, RidDirection::kSend)); + send_layers.AddLayer(SimulcastLayer(encoding.rid, !encoding.active)); + } + + if (has_rids) { + sender_options.rids = send_rids; + } + + sender_options.simulcast_layers = send_layers; + // When RIDs are configured, we must set num_sim_layers to 0 to. + // Otherwise, num_sim_layers must be 1 because either there is no + // simulcast, or simulcast is acheived by munging the SDP. + sender_options.num_sim_layers = has_rids ? 0 : 1; + media_description_options.sender_options.push_back(sender_options); + + return media_description_options; +} + +// Returns the ContentInfo at mline index |i|, or null if none exists. +const ContentInfo* GetContentByIndex(const SessionDescriptionInterface* sdesc, + size_t i) { + if (!sdesc) { + return nullptr; + } + const ContentInfos& contents = sdesc->description()->contents(); + return (i < contents.size() ? &contents[i] : nullptr); +} + +// From |rtc_options|, fill parts of |session_options| shared by all generated +// m= sectionss (in other words, nothing that involves a map/array). +void ExtractSharedMediaSessionOptions( + const PeerConnectionInterface::RTCOfferAnswerOptions& rtc_options, + cricket::MediaSessionOptions* session_options) { + session_options->vad_enabled = rtc_options.voice_activity_detection; + session_options->bundle_enabled = rtc_options.use_rtp_mux; + session_options->raw_packetization_for_video = + rtc_options.raw_packetization_for_video; +} + +// Generate a RTCP CNAME when a PeerConnection is created. +std::string GenerateRtcpCname() { + std::string cname; + if (!rtc::CreateRandomString(kRtcpCnameLength, &cname)) { + RTC_LOG(LS_ERROR) << "Failed to generate CNAME."; + RTC_NOTREACHED(); + } + return cname; +} + +// Check if we can send |new_stream| on a PeerConnection. +bool CanAddLocalMediaStream(webrtc::StreamCollectionInterface* current_streams, + webrtc::MediaStreamInterface* new_stream) { + if (!new_stream || !current_streams) { + return false; + } + if (current_streams->find(new_stream->id()) != nullptr) { + RTC_LOG(LS_ERROR) << "MediaStream with ID " << new_stream->id() + << " is already added."; + return false; + } + return true; +} + +rtc::scoped_refptr LookupDtlsTransportByMid( + rtc::Thread* network_thread, + JsepTransportController* controller, + const std::string& mid) { + // TODO(tommi): Can we post this (and associated operations where this + // function is called) to the network thread and avoid this Invoke? + // We might be able to simplify a few things if we set the transport on + // the network thread and then update the implementation to check that + // the set_ and relevant get methods are always called on the network + // thread (we'll need to update proxy maps). + return network_thread->Invoke>( + RTC_FROM_HERE, + [controller, &mid] { return controller->LookupDtlsTransportByMid(mid); }); +} + +bool ContentHasHeaderExtension(const cricket::ContentInfo& content_info, + absl::string_view header_extension_uri) { + for (const RtpExtension& rtp_header_extension : + content_info.media_description()->rtp_header_extensions()) { + if (rtp_header_extension.uri == header_extension_uri) { + return true; + } + } + return false; +} + +} // namespace + +// Used by parameterless SetLocalDescription() to create an offer or answer. +// Upon completion of creating the session description, SetLocalDescription() is +// invoked with the result. +class SdpOfferAnswerHandler::ImplicitCreateSessionDescriptionObserver + : public CreateSessionDescriptionObserver { + public: + ImplicitCreateSessionDescriptionObserver( + rtc::WeakPtr sdp_handler, + rtc::scoped_refptr + set_local_description_observer) + : sdp_handler_(std::move(sdp_handler)), + set_local_description_observer_( + std::move(set_local_description_observer)) {} + ~ImplicitCreateSessionDescriptionObserver() override { + RTC_DCHECK(was_called_); + } + + void SetOperationCompleteCallback( + std::function operation_complete_callback) { + operation_complete_callback_ = std::move(operation_complete_callback); + } + + bool was_called() const { return was_called_; } + + void OnSuccess(SessionDescriptionInterface* desc_ptr) override { + RTC_DCHECK(!was_called_); + std::unique_ptr desc(desc_ptr); + was_called_ = true; + + // Abort early if |pc_| is no longer valid. + if (!sdp_handler_) { + operation_complete_callback_(); + return; + } + // DoSetLocalDescription() is a synchronous operation that invokes + // |set_local_description_observer_| with the result. + sdp_handler_->DoSetLocalDescription( + std::move(desc), std::move(set_local_description_observer_)); + operation_complete_callback_(); + } + + void OnFailure(RTCError error) override { + RTC_DCHECK(!was_called_); + was_called_ = true; + set_local_description_observer_->OnSetLocalDescriptionComplete(RTCError( + error.type(), std::string("SetLocalDescription failed to create " + "session description - ") + + error.message())); + operation_complete_callback_(); + } + + private: + bool was_called_ = false; + rtc::WeakPtr sdp_handler_; + rtc::scoped_refptr + set_local_description_observer_; + std::function operation_complete_callback_; +}; + +// Wraps a CreateSessionDescriptionObserver and an OperationsChain operation +// complete callback. When the observer is invoked, the wrapped observer is +// invoked followed by invoking the completion callback. +class CreateSessionDescriptionObserverOperationWrapper + : public CreateSessionDescriptionObserver { + public: + CreateSessionDescriptionObserverOperationWrapper( + rtc::scoped_refptr observer, + std::function operation_complete_callback) + : observer_(std::move(observer)), + operation_complete_callback_(std::move(operation_complete_callback)) { + RTC_DCHECK(observer_); + } + ~CreateSessionDescriptionObserverOperationWrapper() override { +#if RTC_DCHECK_IS_ON + RTC_DCHECK(was_called_); +#endif + } + + void OnSuccess(SessionDescriptionInterface* desc) override { +#if RTC_DCHECK_IS_ON + RTC_DCHECK(!was_called_); + was_called_ = true; +#endif // RTC_DCHECK_IS_ON + // Completing the operation before invoking the observer allows the observer + // to execute SetLocalDescription() without delay. + operation_complete_callback_(); + observer_->OnSuccess(desc); + } + + void OnFailure(RTCError error) override { +#if RTC_DCHECK_IS_ON + RTC_DCHECK(!was_called_); + was_called_ = true; +#endif // RTC_DCHECK_IS_ON + operation_complete_callback_(); + observer_->OnFailure(std::move(error)); + } + + private: +#if RTC_DCHECK_IS_ON + bool was_called_ = false; +#endif // RTC_DCHECK_IS_ON + rtc::scoped_refptr observer_; + std::function operation_complete_callback_; +}; + +// Wrapper for SetSessionDescriptionObserver that invokes the success or failure +// callback in a posted message handled by the peer connection. This introduces +// a delay that prevents recursive API calls by the observer, but this also +// means that the PeerConnection can be modified before the observer sees the +// result of the operation. This is ill-advised for synchronizing states. +// +// Implements both the SetLocalDescriptionObserverInterface and the +// SetRemoteDescriptionObserverInterface. +class SdpOfferAnswerHandler::SetSessionDescriptionObserverAdapter + : public SetLocalDescriptionObserverInterface, + public SetRemoteDescriptionObserverInterface { + public: + SetSessionDescriptionObserverAdapter( + rtc::WeakPtr handler, + rtc::scoped_refptr inner_observer) + : handler_(std::move(handler)), + inner_observer_(std::move(inner_observer)) {} + + // SetLocalDescriptionObserverInterface implementation. + void OnSetLocalDescriptionComplete(RTCError error) override { + OnSetDescriptionComplete(std::move(error)); + } + // SetRemoteDescriptionObserverInterface implementation. + void OnSetRemoteDescriptionComplete(RTCError error) override { + OnSetDescriptionComplete(std::move(error)); + } + + private: + void OnSetDescriptionComplete(RTCError error) { + if (!handler_) + return; + if (error.ok()) { + handler_->pc_->message_handler()->PostSetSessionDescriptionSuccess( + inner_observer_); + } else { + handler_->pc_->message_handler()->PostSetSessionDescriptionFailure( + inner_observer_, std::move(error)); + } + } + + rtc::WeakPtr handler_; + rtc::scoped_refptr inner_observer_; +}; + +class SdpOfferAnswerHandler::LocalIceCredentialsToReplace { + public: + // Sets the ICE credentials that need restarting to the ICE credentials of + // the current and pending descriptions. + void SetIceCredentialsFromLocalDescriptions( + const SessionDescriptionInterface* current_local_description, + const SessionDescriptionInterface* pending_local_description) { + ice_credentials_.clear(); + if (current_local_description) { + AppendIceCredentialsFromSessionDescription(*current_local_description); + } + if (pending_local_description) { + AppendIceCredentialsFromSessionDescription(*pending_local_description); + } + } + + void ClearIceCredentials() { ice_credentials_.clear(); } + + // Returns true if we have ICE credentials that need restarting. + bool HasIceCredentials() const { return !ice_credentials_.empty(); } + + // Returns true if |local_description| shares no ICE credentials with the + // ICE credentials that need restarting. + bool SatisfiesIceRestart( + const SessionDescriptionInterface& local_description) const { + for (const auto& transport_info : + local_description.description()->transport_infos()) { + if (ice_credentials_.find(std::make_pair( + transport_info.description.ice_ufrag, + transport_info.description.ice_pwd)) != ice_credentials_.end()) { + return false; + } + } + return true; + } + + private: + void AppendIceCredentialsFromSessionDescription( + const SessionDescriptionInterface& desc) { + for (const auto& transport_info : desc.description()->transport_infos()) { + ice_credentials_.insert( + std::make_pair(transport_info.description.ice_ufrag, + transport_info.description.ice_pwd)); + } + } + + std::set> ice_credentials_; +}; + +SdpOfferAnswerHandler::SdpOfferAnswerHandler(PeerConnection* pc) + : pc_(pc), + local_streams_(StreamCollection::Create()), + remote_streams_(StreamCollection::Create()), + operations_chain_(rtc::OperationsChain::Create()), + rtcp_cname_(GenerateRtcpCname()), + local_ice_credentials_to_replace_(new LocalIceCredentialsToReplace()), + weak_ptr_factory_(this) { + operations_chain_->SetOnChainEmptyCallback( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr()]() { + if (!this_weak_ptr) + return; + this_weak_ptr->OnOperationsChainEmpty(); + }); +} + +SdpOfferAnswerHandler::~SdpOfferAnswerHandler() {} + +// Static +std::unique_ptr SdpOfferAnswerHandler::Create( + PeerConnection* pc, + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies& dependencies) { + auto handler = absl::WrapUnique(new SdpOfferAnswerHandler(pc)); + handler->Initialize(configuration, dependencies); + return handler; +} + +void SdpOfferAnswerHandler::Initialize( + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies& dependencies) { + RTC_DCHECK_RUN_ON(signaling_thread()); + video_options_.screencast_min_bitrate_kbps = + configuration.screencast_min_bitrate; + audio_options_.combined_audio_video_bwe = + configuration.combined_audio_video_bwe; + + audio_options_.audio_jitter_buffer_max_packets = + configuration.audio_jitter_buffer_max_packets; + + audio_options_.audio_jitter_buffer_fast_accelerate = + configuration.audio_jitter_buffer_fast_accelerate; + + audio_options_.audio_jitter_buffer_min_delay_ms = + configuration.audio_jitter_buffer_min_delay_ms; + + audio_options_.audio_jitter_buffer_enable_rtx_handling = + configuration.audio_jitter_buffer_enable_rtx_handling; + + // Obtain a certificate from RTCConfiguration if any were provided (optional). + rtc::scoped_refptr certificate; + if (!configuration.certificates.empty()) { + // TODO(hbos,torbjorng): Decide on certificate-selection strategy instead of + // just picking the first one. The decision should be made based on the DTLS + // handshake. The DTLS negotiations need to know about all certificates. + certificate = configuration.certificates[0]; + } + + webrtc_session_desc_factory_ = + std::make_unique( + signaling_thread(), channel_manager(), this, pc_->session_id(), + pc_->dtls_enabled(), std::move(dependencies.cert_generator), + certificate, &ssrc_generator_, + [this](const rtc::scoped_refptr& certificate) { + transport_controller()->SetLocalCertificate(certificate); + }); + + if (pc_->options()->disable_encryption) { + webrtc_session_desc_factory_->SetSdesPolicy(cricket::SEC_DISABLED); + } + + webrtc_session_desc_factory_->set_enable_encrypted_rtp_header_extensions( + pc_->GetCryptoOptions().srtp.enable_encrypted_rtp_header_extensions); + webrtc_session_desc_factory_->set_is_unified_plan(IsUnifiedPlan()); + + if (dependencies.video_bitrate_allocator_factory) { + video_bitrate_allocator_factory_ = + std::move(dependencies.video_bitrate_allocator_factory); + } else { + video_bitrate_allocator_factory_ = + CreateBuiltinVideoBitrateAllocatorFactory(); + } +} + +// ================================================================== +// Access to pc_ variables +cricket::ChannelManager* SdpOfferAnswerHandler::channel_manager() const { + return pc_->channel_manager(); +} +TransceiverList* SdpOfferAnswerHandler::transceivers() { + if (!pc_->rtp_manager()) { + return nullptr; + } + return pc_->rtp_manager()->transceivers(); +} +const TransceiverList* SdpOfferAnswerHandler::transceivers() const { + if (!pc_->rtp_manager()) { + return nullptr; + } + return pc_->rtp_manager()->transceivers(); +} +JsepTransportController* SdpOfferAnswerHandler::transport_controller() { + return pc_->transport_controller(); +} +const JsepTransportController* SdpOfferAnswerHandler::transport_controller() + const { + return pc_->transport_controller(); +} +DataChannelController* SdpOfferAnswerHandler::data_channel_controller() { + return pc_->data_channel_controller(); +} +const DataChannelController* SdpOfferAnswerHandler::data_channel_controller() + const { + return pc_->data_channel_controller(); +} +cricket::PortAllocator* SdpOfferAnswerHandler::port_allocator() { + return pc_->port_allocator(); +} +const cricket::PortAllocator* SdpOfferAnswerHandler::port_allocator() const { + return pc_->port_allocator(); +} +RtpTransmissionManager* SdpOfferAnswerHandler::rtp_manager() { + return pc_->rtp_manager(); +} +const RtpTransmissionManager* SdpOfferAnswerHandler::rtp_manager() const { + return pc_->rtp_manager(); +} + +// =================================================================== + +void SdpOfferAnswerHandler::PrepareForShutdown() { + RTC_DCHECK_RUN_ON(signaling_thread()); + weak_ptr_factory_.InvalidateWeakPtrs(); +} + +void SdpOfferAnswerHandler::Close() { + ChangeSignalingState(PeerConnectionInterface::kClosed); +} + +void SdpOfferAnswerHandler::RestartIce() { + RTC_DCHECK_RUN_ON(signaling_thread()); + local_ice_credentials_to_replace_->SetIceCredentialsFromLocalDescriptions( + current_local_description(), pending_local_description()); + UpdateNegotiationNeeded(); +} + +rtc::Thread* SdpOfferAnswerHandler::signaling_thread() const { + return pc_->signaling_thread(); +} + +void SdpOfferAnswerHandler::CreateOffer( + CreateSessionDescriptionObserver* observer, + const PeerConnectionInterface::RTCOfferAnswerOptions& options) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), + observer_refptr = + rtc::scoped_refptr(observer), + options](std::function operations_chain_callback) { + // Abort early if |this_weak_ptr| is no longer valid. + if (!this_weak_ptr) { + observer_refptr->OnFailure( + RTCError(RTCErrorType::INTERNAL_ERROR, + "CreateOffer failed because the session was shut down")); + operations_chain_callback(); + return; + } + // The operation completes asynchronously when the wrapper is invoked. + rtc::scoped_refptr + observer_wrapper(new rtc::RefCountedObject< + CreateSessionDescriptionObserverOperationWrapper>( + std::move(observer_refptr), + std::move(operations_chain_callback))); + this_weak_ptr->DoCreateOffer(options, observer_wrapper); + }); +} + +void SdpOfferAnswerHandler::SetLocalDescription( + SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc_ptr) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), + observer_refptr = + rtc::scoped_refptr(observer), + desc = std::unique_ptr(desc_ptr)]( + std::function operations_chain_callback) mutable { + // Abort early if |this_weak_ptr| is no longer valid. + if (!this_weak_ptr) { + // For consistency with SetSessionDescriptionObserverAdapter whose + // posted messages doesn't get processed when the PC is destroyed, we + // do not inform |observer_refptr| that the operation failed. + operations_chain_callback(); + return; + } + // SetSessionDescriptionObserverAdapter takes care of making sure the + // |observer_refptr| is invoked in a posted message. + this_weak_ptr->DoSetLocalDescription( + std::move(desc), + rtc::scoped_refptr( + new rtc::RefCountedObject( + this_weak_ptr, observer_refptr))); + // For backwards-compatability reasons, we declare the operation as + // completed here (rather than in a post), so that the operation chain + // is not blocked by this operation when the observer is invoked. This + // allows the observer to trigger subsequent offer/answer operations + // synchronously if the operation chain is now empty. + operations_chain_callback(); + }); +} + +void SdpOfferAnswerHandler::SetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), observer, + desc = std::move(desc)]( + std::function operations_chain_callback) mutable { + // Abort early if |this_weak_ptr| is no longer valid. + if (!this_weak_ptr) { + observer->OnSetLocalDescriptionComplete(RTCError( + RTCErrorType::INTERNAL_ERROR, + "SetLocalDescription failed because the session was shut down")); + operations_chain_callback(); + return; + } + this_weak_ptr->DoSetLocalDescription(std::move(desc), observer); + // DoSetLocalDescription() is implemented as a synchronous operation. + // The |observer| will already have been informed that it completed, and + // we can mark this operation as complete without any loose ends. + operations_chain_callback(); + }); +} + +void SdpOfferAnswerHandler::SetLocalDescription( + SetSessionDescriptionObserver* observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + SetLocalDescription( + new rtc::RefCountedObject( + weak_ptr_factory_.GetWeakPtr(), observer)); +} + +void SdpOfferAnswerHandler::SetLocalDescription( + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // The |create_sdp_observer| handles performing DoSetLocalDescription() with + // the resulting description as well as completing the operation. + rtc::scoped_refptr + create_sdp_observer( + new rtc::RefCountedObject( + weak_ptr_factory_.GetWeakPtr(), observer)); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), + create_sdp_observer](std::function operations_chain_callback) { + // The |create_sdp_observer| is responsible for completing the + // operation. + create_sdp_observer->SetOperationCompleteCallback( + std::move(operations_chain_callback)); + // Abort early if |this_weak_ptr| is no longer valid. This triggers the + // same code path as if DoCreateOffer() or DoCreateAnswer() failed. + if (!this_weak_ptr) { + create_sdp_observer->OnFailure(RTCError( + RTCErrorType::INTERNAL_ERROR, + "SetLocalDescription failed because the session was shut down")); + return; + } + switch (this_weak_ptr->signaling_state()) { + case PeerConnectionInterface::kStable: + case PeerConnectionInterface::kHaveLocalOffer: + case PeerConnectionInterface::kHaveRemotePrAnswer: + // TODO(hbos): If [LastCreatedOffer] exists and still represents the + // current state of the system, use that instead of creating another + // offer. + this_weak_ptr->DoCreateOffer( + PeerConnectionInterface::RTCOfferAnswerOptions(), + create_sdp_observer); + break; + case PeerConnectionInterface::kHaveLocalPrAnswer: + case PeerConnectionInterface::kHaveRemoteOffer: + // TODO(hbos): If [LastCreatedAnswer] exists and still represents + // the current state of the system, use that instead of creating + // another answer. + this_weak_ptr->DoCreateAnswer( + PeerConnectionInterface::RTCOfferAnswerOptions(), + create_sdp_observer); + break; + case PeerConnectionInterface::kClosed: + create_sdp_observer->OnFailure(RTCError( + RTCErrorType::INVALID_STATE, + "SetLocalDescription called when PeerConnection is closed.")); + break; + } + }); +} + +RTCError SdpOfferAnswerHandler::ApplyLocalDescription( + std::unique_ptr desc, + const std::map& + bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::ApplyLocalDescription"); + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(desc); + + // Update stats here so that we have the most recent stats for tracks and + // streams that might be removed by updating the session description. + pc_->stats()->UpdateStats(PeerConnectionInterface::kStatsOutputLevelStandard); + + // Take a reference to the old local description since it's used below to + // compare against the new local description. When setting the new local + // description, grab ownership of the replaced session description in case it + // is the same as |old_local_description|, to keep it alive for the duration + // of the method. + const SessionDescriptionInterface* old_local_description = + local_description(); + std::unique_ptr replaced_local_description; + SdpType type = desc->GetType(); + if (type == SdpType::kAnswer) { + replaced_local_description = pending_local_description_ + ? std::move(pending_local_description_) + : std::move(current_local_description_); + current_local_description_ = std::move(desc); + pending_local_description_ = nullptr; + current_remote_description_ = std::move(pending_remote_description_); + } else { + replaced_local_description = std::move(pending_local_description_); + pending_local_description_ = std::move(desc); + } + // The session description to apply now must be accessed by + // |local_description()|. + RTC_DCHECK(local_description()); + + // Report statistics about any use of simulcast. + ReportSimulcastApiVersion(kSimulcastVersionApplyLocalDescription, + *local_description()->description()); + + if (!is_caller_) { + if (remote_description()) { + // Remote description was applied first, so this PC is the callee. + is_caller_ = false; + } else { + // Local description is applied first, so this PC is the caller. + is_caller_ = true; + } + } + + RTCError error = PushdownTransportDescription(cricket::CS_LOCAL, type); + if (!error.ok()) { + return error; + } + + if (IsUnifiedPlan()) { + RTCError error = UpdateTransceiversAndDataChannels( + cricket::CS_LOCAL, *local_description(), old_local_description, + remote_description(), bundle_groups_by_mid); + if (!error.ok()) { + return error; + } + std::vector> remove_list; + std::vector> removed_streams; + for (const auto& transceiver_ext : transceivers()->List()) { + auto transceiver = transceiver_ext->internal(); + if (transceiver->stopped()) { + continue; + } + + // 2.2.7.1.1.(6-9): Set sender and receiver's transport slots. + // Note that code paths that don't set MID won't be able to use + // information about DTLS transports. + if (transceiver->mid()) { + auto dtls_transport = LookupDtlsTransportByMid( + pc_->network_thread(), transport_controller(), *transceiver->mid()); + transceiver->sender_internal()->set_transport(dtls_transport); + transceiver->receiver_internal()->set_transport(dtls_transport); + } + + const ContentInfo* content = + FindMediaSectionForTransceiver(transceiver, local_description()); + if (!content) { + continue; + } + const MediaContentDescription* media_desc = content->media_description(); + // 2.2.7.1.6: If description is of type "answer" or "pranswer", then run + // the following steps: + if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { + // 2.2.7.1.6.1: If direction is "sendonly" or "inactive", and + // transceiver's [[FiredDirection]] slot is either "sendrecv" or + // "recvonly", process the removal of a remote track for the media + // description, given transceiver, removeList, and muteTracks. + if (!RtpTransceiverDirectionHasRecv(media_desc->direction()) && + (transceiver->fired_direction() && + RtpTransceiverDirectionHasRecv(*transceiver->fired_direction()))) { + ProcessRemovalOfRemoteTrack(transceiver_ext, &remove_list, + &removed_streams); + } + // 2.2.7.1.6.2: Set transceiver's [[CurrentDirection]] and + // [[FiredDirection]] slots to direction. + transceiver->set_current_direction(media_desc->direction()); + transceiver->set_fired_direction(media_desc->direction()); + } + } + auto observer = pc_->Observer(); + for (const auto& transceiver : remove_list) { + observer->OnRemoveTrack(transceiver->receiver()); + } + for (const auto& stream : removed_streams) { + observer->OnRemoveStream(stream); + } + } else { + // Media channels will be created only when offer is set. These may use new + // transports just created by PushdownTransportDescription. + if (type == SdpType::kOffer) { + // TODO(bugs.webrtc.org/4676) - Handle CreateChannel failure, as new local + // description is applied. Restore back to old description. + RTCError error = CreateChannels(*local_description()->description()); + if (!error.ok()) { + return error; + } + } + // Remove unused channels if MediaContentDescription is rejected. + RemoveUnusedChannels(local_description()->description()); + } + + error = UpdateSessionState(type, cricket::CS_LOCAL, + local_description()->description(), + bundle_groups_by_mid); + if (!error.ok()) { + return error; + } + + if (remote_description()) { + // Now that we have a local description, we can push down remote candidates. + UseCandidatesInSessionDescription(remote_description()); + } + + pending_ice_restarts_.clear(); + if (session_error() != SessionError::kNone) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, GetSessionErrorMsg()); + } + + // If setting the description decided our SSL role, allocate any necessary + // SCTP sids. + rtc::SSLRole role; + if (pc_->GetSctpSslRole(&role)) { + data_channel_controller()->AllocateSctpSids(role); + } + + if (IsUnifiedPlan()) { + // We must use List and not ListInternal here because + // transceivers()->StableState() is indexed by the non-internal refptr. + for (const auto& transceiver_ext : transceivers()->List()) { + auto transceiver = transceiver_ext->internal(); + if (transceiver->stopped()) { + continue; + } + const ContentInfo* content = + FindMediaSectionForTransceiver(transceiver, local_description()); + if (!content) { + continue; + } + cricket::ChannelInterface* channel = transceiver->channel(); + if (content->rejected || !channel || channel->local_streams().empty()) { + // 0 is a special value meaning "this sender has no associated send + // stream". Need to call this so the sender won't attempt to configure + // a no longer existing stream and run into DCHECKs in the lower + // layers. + transceiver->sender_internal()->SetSsrc(0); + } else { + // Get the StreamParams from the channel which could generate SSRCs. + const std::vector& streams = channel->local_streams(); + transceiver->sender_internal()->set_stream_ids(streams[0].stream_ids()); + auto encodings = transceiver->sender_internal()->init_send_encodings(); + transceiver->sender_internal()->SetSsrc(streams[0].first_ssrc()); + if (!encodings.empty()) { + transceivers() + ->StableState(transceiver_ext) + ->SetInitSendEncodings(encodings); + } + } + } + } else { + // Plan B semantics. + + // Update state and SSRC of local MediaStreams and DataChannels based on the + // local session description. + const cricket::ContentInfo* audio_content = + GetFirstAudioContent(local_description()->description()); + if (audio_content) { + if (audio_content->rejected) { + RemoveSenders(cricket::MEDIA_TYPE_AUDIO); + } else { + const cricket::AudioContentDescription* audio_desc = + audio_content->media_description()->as_audio(); + UpdateLocalSenders(audio_desc->streams(), audio_desc->type()); + } + } + + const cricket::ContentInfo* video_content = + GetFirstVideoContent(local_description()->description()); + if (video_content) { + if (video_content->rejected) { + RemoveSenders(cricket::MEDIA_TYPE_VIDEO); + } else { + const cricket::VideoContentDescription* video_desc = + video_content->media_description()->as_video(); + UpdateLocalSenders(video_desc->streams(), video_desc->type()); + } + } + } + + // This function does nothing with data content. + + if (type == SdpType::kAnswer && + local_ice_credentials_to_replace_->SatisfiesIceRestart( + *current_local_description_)) { + local_ice_credentials_to_replace_->ClearIceCredentials(); + } + + return RTCError::OK(); +} + +void SdpOfferAnswerHandler::SetRemoteDescription( + SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc_ptr) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), + observer_refptr = + rtc::scoped_refptr(observer), + desc = std::unique_ptr(desc_ptr)]( + std::function operations_chain_callback) mutable { + // Abort early if |this_weak_ptr| is no longer valid. + if (!this_weak_ptr) { + // For consistency with SetSessionDescriptionObserverAdapter whose + // posted messages doesn't get processed when the PC is destroyed, we + // do not inform |observer_refptr| that the operation failed. + operations_chain_callback(); + return; + } + // SetSessionDescriptionObserverAdapter takes care of making sure the + // |observer_refptr| is invoked in a posted message. + this_weak_ptr->DoSetRemoteDescription( + std::move(desc), + rtc::scoped_refptr( + new rtc::RefCountedObject( + this_weak_ptr, observer_refptr))); + // For backwards-compatability reasons, we declare the operation as + // completed here (rather than in a post), so that the operation chain + // is not blocked by this operation when the observer is invoked. This + // allows the observer to trigger subsequent offer/answer operations + // synchronously if the operation chain is now empty. + operations_chain_callback(); + }); +} + +void SdpOfferAnswerHandler::SetRemoteDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), observer, + desc = std::move(desc)]( + std::function operations_chain_callback) mutable { + // Abort early if |this_weak_ptr| is no longer valid. + if (!this_weak_ptr) { + observer->OnSetRemoteDescriptionComplete(RTCError( + RTCErrorType::INTERNAL_ERROR, + "SetRemoteDescription failed because the session was shut down")); + operations_chain_callback(); + return; + } + this_weak_ptr->DoSetRemoteDescription(std::move(desc), + std::move(observer)); + // DoSetRemoteDescription() is implemented as a synchronous operation. + // The |observer| will already have been informed that it completed, and + // we can mark this operation as complete without any loose ends. + operations_chain_callback(); + }); +} + +RTCError SdpOfferAnswerHandler::ApplyRemoteDescription( + std::unique_ptr desc, + const std::map& + bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::ApplyRemoteDescription"); + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(desc); + + // Update stats here so that we have the most recent stats for tracks and + // streams that might be removed by updating the session description. + pc_->stats()->UpdateStats(PeerConnectionInterface::kStatsOutputLevelStandard); + + // Take a reference to the old remote description since it's used below to + // compare against the new remote description. When setting the new remote + // description, grab ownership of the replaced session description in case it + // is the same as |old_remote_description|, to keep it alive for the duration + // of the method. + const SessionDescriptionInterface* old_remote_description = + remote_description(); + std::unique_ptr replaced_remote_description; + SdpType type = desc->GetType(); + if (type == SdpType::kAnswer) { + replaced_remote_description = pending_remote_description_ + ? std::move(pending_remote_description_) + : std::move(current_remote_description_); + current_remote_description_ = std::move(desc); + pending_remote_description_ = nullptr; + current_local_description_ = std::move(pending_local_description_); + } else { + replaced_remote_description = std::move(pending_remote_description_); + pending_remote_description_ = std::move(desc); + } + // The session description to apply now must be accessed by + // |remote_description()|. + RTC_DCHECK(remote_description()); + + // Report statistics about any use of simulcast. + ReportSimulcastApiVersion(kSimulcastVersionApplyRemoteDescription, + *remote_description()->description()); + + RTCError error = PushdownTransportDescription(cricket::CS_REMOTE, type); + if (!error.ok()) { + return error; + } + // Transport and Media channels will be created only when offer is set. + if (IsUnifiedPlan()) { + RTCError error = UpdateTransceiversAndDataChannels( + cricket::CS_REMOTE, *remote_description(), local_description(), + old_remote_description, bundle_groups_by_mid); + if (!error.ok()) { + return error; + } + } else { + // Media channels will be created only when offer is set. These may use new + // transports just created by PushdownTransportDescription. + if (type == SdpType::kOffer) { + // TODO(mallinath) - Handle CreateChannel failure, as new local + // description is applied. Restore back to old description. + RTCError error = CreateChannels(*remote_description()->description()); + if (!error.ok()) { + return error; + } + } + // Remove unused channels if MediaContentDescription is rejected. + RemoveUnusedChannels(remote_description()->description()); + } + + // NOTE: Candidates allocation will be initiated only when + // SetLocalDescription is called. + error = UpdateSessionState(type, cricket::CS_REMOTE, + remote_description()->description(), + bundle_groups_by_mid); + if (!error.ok()) { + return error; + } + + if (local_description() && + !UseCandidatesInSessionDescription(remote_description())) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, kInvalidCandidates); + } + + if (old_remote_description) { + for (const cricket::ContentInfo& content : + old_remote_description->description()->contents()) { + // Check if this new SessionDescription contains new ICE ufrag and + // password that indicates the remote peer requests an ICE restart. + // TODO(deadbeef): When we start storing both the current and pending + // remote description, this should reset pending_ice_restarts and compare + // against the current description. + if (CheckForRemoteIceRestart(old_remote_description, remote_description(), + content.name)) { + if (type == SdpType::kOffer) { + pending_ice_restarts_.insert(content.name); + } + } else { + // We retain all received candidates only if ICE is not restarted. + // When ICE is restarted, all previous candidates belong to an old + // generation and should not be kept. + // TODO(deadbeef): This goes against the W3C spec which says the remote + // description should only contain candidates from the last set remote + // description plus any candidates added since then. We should remove + // this once we're sure it won't break anything. + WebRtcSessionDescriptionFactory::CopyCandidatesFromSessionDescription( + old_remote_description, content.name, mutable_remote_description()); + } + } + } + + if (session_error() != SessionError::kNone) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, GetSessionErrorMsg()); + } + + // Set the the ICE connection state to connecting since the connection may + // become writable with peer reflexive candidates before any remote candidate + // is signaled. + // TODO(pthatcher): This is a short-term solution for crbug/446908. A real fix + // is to have a new signal the indicates a change in checking state from the + // transport and expose a new checking() member from transport that can be + // read to determine the current checking state. The existing SignalConnecting + // actually means "gathering candidates", so cannot be be used here. + if (remote_description()->GetType() != SdpType::kOffer && + remote_description()->number_of_mediasections() > 0u && + pc_->ice_connection_state() == + PeerConnectionInterface::kIceConnectionNew) { + pc_->SetIceConnectionState(PeerConnectionInterface::kIceConnectionChecking); + } + + // If setting the description decided our SSL role, allocate any necessary + // SCTP sids. + rtc::SSLRole role; + if (pc_->GetSctpSslRole(&role)) { + data_channel_controller()->AllocateSctpSids(role); + } + + if (IsUnifiedPlan()) { + std::vector> + now_receiving_transceivers; + std::vector> remove_list; + std::vector> added_streams; + std::vector> removed_streams; + for (const auto& transceiver_ext : transceivers()->List()) { + const auto transceiver = transceiver_ext->internal(); + const ContentInfo* content = + FindMediaSectionForTransceiver(transceiver, remote_description()); + if (!content) { + continue; + } + const MediaContentDescription* media_desc = content->media_description(); + RtpTransceiverDirection local_direction = + RtpTransceiverDirectionReversed(media_desc->direction()); + // Roughly the same as steps 2.2.8.6 of section 4.4.1.6 "Set the + // RTCSessionDescription: Set the associated remote streams given + // transceiver.[[Receiver]], msids, addList, and removeList". + // https://w3c.github.io/webrtc-pc/#set-the-rtcsessiondescription + if (RtpTransceiverDirectionHasRecv(local_direction)) { + std::vector stream_ids; + if (!media_desc->streams().empty()) { + // The remote description has signaled the stream IDs. + stream_ids = media_desc->streams()[0].stream_ids(); + } + transceivers() + ->StableState(transceiver_ext) + ->SetRemoteStreamIdsIfUnset(transceiver->receiver()->stream_ids()); + + RTC_LOG(LS_INFO) << "Processing the MSIDs for MID=" << content->name + << " (" << GetStreamIdsString(stream_ids) << ")."; + SetAssociatedRemoteStreams(transceiver->receiver_internal(), stream_ids, + &added_streams, &removed_streams); + // From the WebRTC specification, steps 2.2.8.5/6 of section 4.4.1.6 + // "Set the RTCSessionDescription: If direction is sendrecv or recvonly, + // and transceiver's current direction is neither sendrecv nor recvonly, + // process the addition of a remote track for the media description. + if (!transceiver->fired_direction() || + !RtpTransceiverDirectionHasRecv(*transceiver->fired_direction())) { + RTC_LOG(LS_INFO) + << "Processing the addition of a remote track for MID=" + << content->name << "."; + now_receiving_transceivers.push_back(transceiver); + } + } + // 2.2.8.1.9: If direction is "sendonly" or "inactive", and transceiver's + // [[FiredDirection]] slot is either "sendrecv" or "recvonly", process the + // removal of a remote track for the media description, given transceiver, + // removeList, and muteTracks. + if (!RtpTransceiverDirectionHasRecv(local_direction) && + (transceiver->fired_direction() && + RtpTransceiverDirectionHasRecv(*transceiver->fired_direction()))) { + ProcessRemovalOfRemoteTrack(transceiver_ext, &remove_list, + &removed_streams); + } + // 2.2.8.1.10: Set transceiver's [[FiredDirection]] slot to direction. + transceiver->set_fired_direction(local_direction); + // 2.2.8.1.11: If description is of type "answer" or "pranswer", then run + // the following steps: + if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { + // 2.2.8.1.11.1: Set transceiver's [[CurrentDirection]] slot to + // direction. + transceiver->set_current_direction(local_direction); + // 2.2.8.1.11.[3-6]: Set the transport internal slots. + if (transceiver->mid()) { + auto dtls_transport = LookupDtlsTransportByMid(pc_->network_thread(), + transport_controller(), + *transceiver->mid()); + transceiver->sender_internal()->set_transport(dtls_transport); + transceiver->receiver_internal()->set_transport(dtls_transport); + } + } + // 2.2.8.1.12: If the media description is rejected, and transceiver is + // not already stopped, stop the RTCRtpTransceiver transceiver. + if (content->rejected && !transceiver->stopped()) { + RTC_LOG(LS_INFO) << "Stopping transceiver for MID=" << content->name + << " since the media section was rejected."; + transceiver->StopTransceiverProcedure(); + } + if (!content->rejected && + RtpTransceiverDirectionHasRecv(local_direction)) { + if (!media_desc->streams().empty() && + media_desc->streams()[0].has_ssrcs()) { + uint32_t ssrc = media_desc->streams()[0].first_ssrc(); + transceiver->receiver_internal()->SetupMediaChannel(ssrc); + } else { + transceiver->receiver_internal()->SetupUnsignaledMediaChannel(); + } + } + } + // Once all processing has finished, fire off callbacks. + auto observer = pc_->Observer(); + for (const auto& transceiver : now_receiving_transceivers) { + pc_->stats()->AddTrack(transceiver->receiver()->track()); + observer->OnTrack(transceiver); + observer->OnAddTrack(transceiver->receiver(), + transceiver->receiver()->streams()); + } + for (const auto& stream : added_streams) { + observer->OnAddStream(stream); + } + for (const auto& transceiver : remove_list) { + observer->OnRemoveTrack(transceiver->receiver()); + } + for (const auto& stream : removed_streams) { + observer->OnRemoveStream(stream); + } + } + + const cricket::ContentInfo* audio_content = + GetFirstAudioContent(remote_description()->description()); + const cricket::ContentInfo* video_content = + GetFirstVideoContent(remote_description()->description()); + const cricket::AudioContentDescription* audio_desc = + GetFirstAudioContentDescription(remote_description()->description()); + const cricket::VideoContentDescription* video_desc = + GetFirstVideoContentDescription(remote_description()->description()); + + // Check if the descriptions include streams, just in case the peer supports + // MSID, but doesn't indicate so with "a=msid-semantic". + if (remote_description()->description()->msid_supported() || + (audio_desc && !audio_desc->streams().empty()) || + (video_desc && !video_desc->streams().empty())) { + remote_peer_supports_msid_ = true; + } + + // We wait to signal new streams until we finish processing the description, + // since only at that point will new streams have all their tracks. + rtc::scoped_refptr new_streams(StreamCollection::Create()); + + if (!IsUnifiedPlan()) { + // TODO(steveanton): When removing RTP senders/receivers in response to a + // rejected media section, there is some cleanup logic that expects the + // voice/ video channel to still be set. But in this method the voice/video + // channel would have been destroyed by the SetRemoteDescription caller + // above so the cleanup that relies on them fails to run. The RemoveSenders + // calls should be moved to right before the DestroyChannel calls to fix + // this. + + // Find all audio rtp streams and create corresponding remote AudioTracks + // and MediaStreams. + if (audio_content) { + if (audio_content->rejected) { + RemoveSenders(cricket::MEDIA_TYPE_AUDIO); + } else { + bool default_audio_track_needed = + !remote_peer_supports_msid_ && + RtpTransceiverDirectionHasSend(audio_desc->direction()); + UpdateRemoteSendersList(GetActiveStreams(audio_desc), + default_audio_track_needed, audio_desc->type(), + new_streams); + } + } + + // Find all video rtp streams and create corresponding remote VideoTracks + // and MediaStreams. + if (video_content) { + if (video_content->rejected) { + RemoveSenders(cricket::MEDIA_TYPE_VIDEO); + } else { + bool default_video_track_needed = + !remote_peer_supports_msid_ && + RtpTransceiverDirectionHasSend(video_desc->direction()); + UpdateRemoteSendersList(GetActiveStreams(video_desc), + default_video_track_needed, video_desc->type(), + new_streams); + } + } + + // Iterate new_streams and notify the observer about new MediaStreams. + auto observer = pc_->Observer(); + for (size_t i = 0; i < new_streams->count(); ++i) { + MediaStreamInterface* new_stream = new_streams->at(i); + pc_->stats()->AddStream(new_stream); + observer->OnAddStream( + rtc::scoped_refptr(new_stream)); + } + + UpdateEndedRemoteMediaStreams(); + } + + if (type == SdpType::kAnswer && + local_ice_credentials_to_replace_->SatisfiesIceRestart( + *current_local_description_)) { + local_ice_credentials_to_replace_->ClearIceCredentials(); + } + + return RTCError::OK(); +} + +void SdpOfferAnswerHandler::DoSetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DoSetLocalDescription"); + + if (!observer) { + RTC_LOG(LS_ERROR) << "SetLocalDescription - observer is NULL."; + return; + } + + if (!desc) { + observer->OnSetLocalDescriptionComplete( + RTCError(RTCErrorType::INTERNAL_ERROR, "SessionDescription is NULL.")); + return; + } + + // If a session error has occurred the PeerConnection is in a possibly + // inconsistent state so fail right away. + if (session_error() != SessionError::kNone) { + std::string error_message = GetSessionErrorMsg(); + RTC_LOG(LS_ERROR) << "SetLocalDescription: " << error_message; + observer->OnSetLocalDescriptionComplete( + RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); + return; + } + + // For SLD we support only explicit rollback. + if (desc->GetType() == SdpType::kRollback) { + if (IsUnifiedPlan()) { + observer->OnSetLocalDescriptionComplete(Rollback(desc->GetType())); + } else { + observer->OnSetLocalDescriptionComplete( + RTCError(RTCErrorType::UNSUPPORTED_OPERATION, + "Rollback not supported in Plan B")); + } + return; + } + + std::map bundle_groups_by_mid = + GetBundleGroupsByMid(desc->description()); + RTCError error = ValidateSessionDescription(desc.get(), cricket::CS_LOCAL, + bundle_groups_by_mid); + if (!error.ok()) { + std::string error_message = GetSetDescriptionErrorMessage( + cricket::CS_LOCAL, desc->GetType(), error); + RTC_LOG(LS_ERROR) << error_message; + observer->OnSetLocalDescriptionComplete( + RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); + return; + } + + // Grab the description type before moving ownership to ApplyLocalDescription, + // which may destroy it before returning. + const SdpType type = desc->GetType(); + + error = ApplyLocalDescription(std::move(desc), bundle_groups_by_mid); + // |desc| may be destroyed at this point. + + if (!error.ok()) { + // If ApplyLocalDescription fails, the PeerConnection could be in an + // inconsistent state, so act conservatively here and set the session error + // so that future calls to SetLocalDescription/SetRemoteDescription fail. + SetSessionError(SessionError::kContent, error.message()); + std::string error_message = + GetSetDescriptionErrorMessage(cricket::CS_LOCAL, type, error); + RTC_LOG(LS_ERROR) << error_message; + observer->OnSetLocalDescriptionComplete( + RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); + return; + } + RTC_DCHECK(local_description()); + + if (local_description()->GetType() == SdpType::kAnswer) { + RemoveStoppedTransceivers(); + + // TODO(deadbeef): We already had to hop to the network thread for + // MaybeStartGathering... + pc_->network_thread()->Invoke( + RTC_FROM_HERE, [this] { port_allocator()->DiscardCandidatePool(); }); + // Make UMA notes about what was agreed to. + ReportNegotiatedSdpSemantics(*local_description()); + } + + observer->OnSetLocalDescriptionComplete(RTCError::OK()); + pc_->NoteUsageEvent(UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED); + + // Check if negotiation is needed. We must do this after informing the + // observer that SetLocalDescription() has completed to ensure negotiation is + // not needed prior to the promise resolving. + if (IsUnifiedPlan()) { + bool was_negotiation_needed = is_negotiation_needed_; + UpdateNegotiationNeeded(); + if (signaling_state() == PeerConnectionInterface::kStable && + was_negotiation_needed && is_negotiation_needed_) { + // Legacy version. + pc_->Observer()->OnRenegotiationNeeded(); + // Spec-compliant version; the event may get invalidated before firing. + GenerateNegotiationNeededEvent(); + } + } + + // MaybeStartGathering needs to be called after informing the observer so that + // we don't signal any candidates before signaling that SetLocalDescription + // completed. + transport_controller()->MaybeStartGathering(); +} + +void SdpOfferAnswerHandler::DoCreateOffer( + const PeerConnectionInterface::RTCOfferAnswerOptions& options, + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DoCreateOffer"); + + if (!observer) { + RTC_LOG(LS_ERROR) << "CreateOffer - observer is NULL."; + return; + } + + if (pc_->IsClosed()) { + std::string error = "CreateOffer called when PeerConnection is closed."; + RTC_LOG(LS_ERROR) << error; + pc_->message_handler()->PostCreateSessionDescriptionFailure( + observer, RTCError(RTCErrorType::INVALID_STATE, std::move(error))); + return; + } + + // If a session error has occurred the PeerConnection is in a possibly + // inconsistent state so fail right away. + if (session_error() != SessionError::kNone) { + std::string error_message = GetSessionErrorMsg(); + RTC_LOG(LS_ERROR) << "CreateOffer: " << error_message; + pc_->message_handler()->PostCreateSessionDescriptionFailure( + observer, + RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); + return; + } + + if (!ValidateOfferAnswerOptions(options)) { + std::string error = "CreateOffer called with invalid options."; + RTC_LOG(LS_ERROR) << error; + pc_->message_handler()->PostCreateSessionDescriptionFailure( + observer, RTCError(RTCErrorType::INVALID_PARAMETER, std::move(error))); + return; + } + + // Legacy handling for offer_to_receive_audio and offer_to_receive_video. + // Specified in WebRTC section 4.4.3.2 "Legacy configuration extensions". + if (IsUnifiedPlan()) { + RTCError error = HandleLegacyOfferOptions(options); + if (!error.ok()) { + pc_->message_handler()->PostCreateSessionDescriptionFailure( + observer, std::move(error)); + return; + } + } + + cricket::MediaSessionOptions session_options; + GetOptionsForOffer(options, &session_options); + webrtc_session_desc_factory_->CreateOffer(observer, options, session_options); +} + +void SdpOfferAnswerHandler::CreateAnswer( + CreateSessionDescriptionObserver* observer, + const PeerConnectionInterface::RTCOfferAnswerOptions& options) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateAnswer"); + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), + observer_refptr = + rtc::scoped_refptr(observer), + options](std::function operations_chain_callback) { + // Abort early if |this_weak_ptr| is no longer valid. + if (!this_weak_ptr) { + observer_refptr->OnFailure(RTCError( + RTCErrorType::INTERNAL_ERROR, + "CreateAnswer failed because the session was shut down")); + operations_chain_callback(); + return; + } + // The operation completes asynchronously when the wrapper is invoked. + rtc::scoped_refptr + observer_wrapper(new rtc::RefCountedObject< + CreateSessionDescriptionObserverOperationWrapper>( + std::move(observer_refptr), + std::move(operations_chain_callback))); + this_weak_ptr->DoCreateAnswer(options, observer_wrapper); + }); +} + +void SdpOfferAnswerHandler::DoCreateAnswer( + const PeerConnectionInterface::RTCOfferAnswerOptions& options, + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DoCreateAnswer"); + if (!observer) { + RTC_LOG(LS_ERROR) << "CreateAnswer - observer is NULL."; + return; + } + + // If a session error has occurred the PeerConnection is in a possibly + // inconsistent state so fail right away. + if (session_error() != SessionError::kNone) { + std::string error_message = GetSessionErrorMsg(); + RTC_LOG(LS_ERROR) << "CreateAnswer: " << error_message; + pc_->message_handler()->PostCreateSessionDescriptionFailure( + observer, + RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); + return; + } + + if (!(signaling_state_ == PeerConnectionInterface::kHaveRemoteOffer || + signaling_state_ == PeerConnectionInterface::kHaveLocalPrAnswer)) { + std::string error = + "PeerConnection cannot create an answer in a state other than " + "have-remote-offer or have-local-pranswer."; + RTC_LOG(LS_ERROR) << error; + pc_->message_handler()->PostCreateSessionDescriptionFailure( + observer, RTCError(RTCErrorType::INVALID_STATE, std::move(error))); + return; + } + + // The remote description should be set if we're in the right state. + RTC_DCHECK(remote_description()); + + if (IsUnifiedPlan()) { + if (options.offer_to_receive_audio != + PeerConnectionInterface::RTCOfferAnswerOptions::kUndefined) { + RTC_LOG(LS_WARNING) << "CreateAnswer: offer_to_receive_audio is not " + "supported with Unified Plan semantics. Use the " + "RtpTransceiver API instead."; + } + if (options.offer_to_receive_video != + PeerConnectionInterface::RTCOfferAnswerOptions::kUndefined) { + RTC_LOG(LS_WARNING) << "CreateAnswer: offer_to_receive_video is not " + "supported with Unified Plan semantics. Use the " + "RtpTransceiver API instead."; + } + } + + cricket::MediaSessionOptions session_options; + GetOptionsForAnswer(options, &session_options); + webrtc_session_desc_factory_->CreateAnswer(observer, session_options); +} + +void SdpOfferAnswerHandler::DoSetRemoteDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer) { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DoSetRemoteDescription"); + + if (!observer) { + RTC_LOG(LS_ERROR) << "SetRemoteDescription - observer is NULL."; + return; + } + + if (!desc) { + observer->OnSetRemoteDescriptionComplete(RTCError( + RTCErrorType::INVALID_PARAMETER, "SessionDescription is NULL.")); + return; + } + + // If a session error has occurred the PeerConnection is in a possibly + // inconsistent state so fail right away. + if (session_error() != SessionError::kNone) { + std::string error_message = GetSessionErrorMsg(); + RTC_LOG(LS_ERROR) << "SetRemoteDescription: " << error_message; + observer->OnSetRemoteDescriptionComplete( + RTCError(RTCErrorType::INTERNAL_ERROR, std::move(error_message))); + return; + } + if (IsUnifiedPlan()) { + if (pc_->configuration()->enable_implicit_rollback) { + if (desc->GetType() == SdpType::kOffer && + signaling_state() == PeerConnectionInterface::kHaveLocalOffer) { + Rollback(desc->GetType()); + } + } + // Explicit rollback. + if (desc->GetType() == SdpType::kRollback) { + observer->OnSetRemoteDescriptionComplete(Rollback(desc->GetType())); + return; + } + } else if (desc->GetType() == SdpType::kRollback) { + observer->OnSetRemoteDescriptionComplete( + RTCError(RTCErrorType::UNSUPPORTED_OPERATION, + "Rollback not supported in Plan B")); + return; + } + if (desc->GetType() == SdpType::kOffer || + desc->GetType() == SdpType::kAnswer) { + // Report to UMA the format of the received offer or answer. + pc_->ReportSdpFormatReceived(*desc); + pc_->ReportSdpBundleUsage(*desc); + } + + // Handle remote descriptions missing a=mid lines for interop with legacy end + // points. + FillInMissingRemoteMids(desc->description()); + + std::map bundle_groups_by_mid = + GetBundleGroupsByMid(desc->description()); + RTCError error = ValidateSessionDescription(desc.get(), cricket::CS_REMOTE, + bundle_groups_by_mid); + if (!error.ok()) { + std::string error_message = GetSetDescriptionErrorMessage( + cricket::CS_REMOTE, desc->GetType(), error); + RTC_LOG(LS_ERROR) << error_message; + observer->OnSetRemoteDescriptionComplete( + RTCError(error.type(), std::move(error_message))); + return; + } + + // Grab the description type before moving ownership to + // ApplyRemoteDescription, which may destroy it before returning. + const SdpType type = desc->GetType(); + + error = ApplyRemoteDescription(std::move(desc), bundle_groups_by_mid); + // |desc| may be destroyed at this point. + + if (!error.ok()) { + // If ApplyRemoteDescription fails, the PeerConnection could be in an + // inconsistent state, so act conservatively here and set the session error + // so that future calls to SetLocalDescription/SetRemoteDescription fail. + SetSessionError(SessionError::kContent, error.message()); + std::string error_message = + GetSetDescriptionErrorMessage(cricket::CS_REMOTE, type, error); + RTC_LOG(LS_ERROR) << error_message; + observer->OnSetRemoteDescriptionComplete( + RTCError(error.type(), std::move(error_message))); + return; + } + RTC_DCHECK(remote_description()); + + if (type == SdpType::kAnswer) { + RemoveStoppedTransceivers(); + // TODO(deadbeef): We already had to hop to the network thread for + // MaybeStartGathering... + pc_->network_thread()->Invoke( + RTC_FROM_HERE, [this] { port_allocator()->DiscardCandidatePool(); }); + // Make UMA notes about what was agreed to. + ReportNegotiatedSdpSemantics(*remote_description()); + } + + observer->OnSetRemoteDescriptionComplete(RTCError::OK()); + pc_->NoteUsageEvent(UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED); + + // Check if negotiation is needed. We must do this after informing the + // observer that SetRemoteDescription() has completed to ensure negotiation is + // not needed prior to the promise resolving. + if (IsUnifiedPlan()) { + bool was_negotiation_needed = is_negotiation_needed_; + UpdateNegotiationNeeded(); + if (signaling_state() == PeerConnectionInterface::kStable && + was_negotiation_needed && is_negotiation_needed_) { + // Legacy version. + pc_->Observer()->OnRenegotiationNeeded(); + // Spec-compliant version; the event may get invalidated before firing. + GenerateNegotiationNeededEvent(); + } + } +} + +void SdpOfferAnswerHandler::SetAssociatedRemoteStreams( + rtc::scoped_refptr receiver, + const std::vector& stream_ids, + std::vector>* added_streams, + std::vector>* removed_streams) { + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector> media_streams; + for (const std::string& stream_id : stream_ids) { + rtc::scoped_refptr stream = + remote_streams_->find(stream_id); + if (!stream) { + stream = MediaStreamProxy::Create(rtc::Thread::Current(), + MediaStream::Create(stream_id)); + remote_streams_->AddStream(stream); + added_streams->push_back(stream); + } + media_streams.push_back(stream); + } + // Special case: "a=msid" missing, use random stream ID. + if (media_streams.empty() && + !(remote_description()->description()->msid_signaling() & + cricket::kMsidSignalingMediaSection)) { + if (!missing_msid_default_stream_) { + missing_msid_default_stream_ = MediaStreamProxy::Create( + rtc::Thread::Current(), MediaStream::Create(rtc::CreateRandomUuid())); + added_streams->push_back(missing_msid_default_stream_); + } + media_streams.push_back(missing_msid_default_stream_); + } + std::vector> previous_streams = + receiver->streams(); + // SetStreams() will add/remove the receiver's track to/from the streams. This + // differs from the spec - the spec uses an "addList" and "removeList" to + // update the stream-track relationships in a later step. We do this earlier, + // changing the order of things, but the end-result is the same. + // TODO(hbos): When we remove remote_streams(), use set_stream_ids() + // instead. https://crbug.com/webrtc/9480 + receiver->SetStreams(media_streams); + RemoveRemoteStreamsIfEmpty(previous_streams, removed_streams); +} + +bool SdpOfferAnswerHandler::AddIceCandidate( + const IceCandidateInterface* ice_candidate) { + const AddIceCandidateResult result = AddIceCandidateInternal(ice_candidate); + NoteAddIceCandidateResult(result); + // If the return value is kAddIceCandidateFailNotReady, the candidate has been + // added, although not 'ready', but that's a success. + return result == kAddIceCandidateSuccess || + result == kAddIceCandidateFailNotReady; +} + +AddIceCandidateResult SdpOfferAnswerHandler::AddIceCandidateInternal( + const IceCandidateInterface* ice_candidate) { + RTC_DCHECK_RUN_ON(signaling_thread()); + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::AddIceCandidate"); + if (pc_->IsClosed()) { + RTC_LOG(LS_ERROR) << "AddIceCandidate: PeerConnection is closed."; + return kAddIceCandidateFailClosed; + } + + if (!remote_description()) { + RTC_LOG(LS_ERROR) << "AddIceCandidate: ICE candidates can't be added " + "without any remote session description."; + return kAddIceCandidateFailNoRemoteDescription; + } + + if (!ice_candidate) { + RTC_LOG(LS_ERROR) << "AddIceCandidate: Candidate is null."; + return kAddIceCandidateFailNullCandidate; + } + + bool valid = false; + bool ready = ReadyToUseRemoteCandidate(ice_candidate, nullptr, &valid); + if (!valid) { + return kAddIceCandidateFailNotValid; + } + + // Add this candidate to the remote session description. + if (!mutable_remote_description()->AddCandidate(ice_candidate)) { + RTC_LOG(LS_ERROR) << "AddIceCandidate: Candidate cannot be used."; + return kAddIceCandidateFailInAddition; + } + + if (!ready) { + RTC_LOG(LS_INFO) << "AddIceCandidate: Not ready to use candidate."; + return kAddIceCandidateFailNotReady; + } + + if (!UseCandidate(ice_candidate)) { + return kAddIceCandidateFailNotUsable; + } + + pc_->NoteUsageEvent(UsageEvent::ADD_ICE_CANDIDATE_SUCCEEDED); + + return kAddIceCandidateSuccess; +} + +void SdpOfferAnswerHandler::AddIceCandidate( + std::unique_ptr candidate, + std::function callback) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::AddIceCandidate"); + RTC_DCHECK_RUN_ON(signaling_thread()); + // Chain this operation. If asynchronous operations are pending on the chain, + // this operation will be queued to be invoked, otherwise the contents of the + // lambda will execute immediately. + operations_chain_->ChainOperation( + [this_weak_ptr = weak_ptr_factory_.GetWeakPtr(), + candidate = std::move(candidate), callback = std::move(callback)]( + std::function operations_chain_callback) { + auto result = + this_weak_ptr + ? this_weak_ptr->AddIceCandidateInternal(candidate.get()) + : kAddIceCandidateFailClosed; + NoteAddIceCandidateResult(result); + operations_chain_callback(); + if (result == kAddIceCandidateFailClosed) { + callback(RTCError( + RTCErrorType::INVALID_STATE, + "AddIceCandidate failed because the session was shut down")); + } else if (result != kAddIceCandidateSuccess && + result != kAddIceCandidateFailNotReady) { + // Fail with an error type and message consistent with Chromium. + // TODO(hbos): Fail with error types according to spec. + callback(RTCError(RTCErrorType::UNSUPPORTED_OPERATION, + "Error processing ICE candidate")); + } else { + callback(RTCError::OK()); + } + }); +} + +bool SdpOfferAnswerHandler::RemoveIceCandidates( + const std::vector& candidates) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::RemoveIceCandidates"); + RTC_DCHECK_RUN_ON(signaling_thread()); + if (pc_->IsClosed()) { + RTC_LOG(LS_ERROR) << "RemoveIceCandidates: PeerConnection is closed."; + return false; + } + + if (!remote_description()) { + RTC_LOG(LS_ERROR) << "RemoveIceCandidates: ICE candidates can't be removed " + "without any remote session description."; + return false; + } + + if (candidates.empty()) { + RTC_LOG(LS_ERROR) << "RemoveIceCandidates: candidates are empty."; + return false; + } + + size_t number_removed = + mutable_remote_description()->RemoveCandidates(candidates); + if (number_removed != candidates.size()) { + RTC_LOG(LS_ERROR) + << "RemoveIceCandidates: Failed to remove candidates. Requested " + << candidates.size() << " but only " << number_removed + << " are removed."; + } + + // Remove the candidates from the transport controller. + RTCError error = transport_controller()->RemoveRemoteCandidates(candidates); + if (!error.ok()) { + RTC_LOG(LS_ERROR) + << "RemoveIceCandidates: Error when removing remote candidates: " + << error.message(); + } + return true; +} + +void SdpOfferAnswerHandler::AddLocalIceCandidate( + const JsepIceCandidate* candidate) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (local_description()) { + mutable_local_description()->AddCandidate(candidate); + } +} + +void SdpOfferAnswerHandler::RemoveLocalIceCandidates( + const std::vector& candidates) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (local_description()) { + mutable_local_description()->RemoveCandidates(candidates); + } +} + +const SessionDescriptionInterface* SdpOfferAnswerHandler::local_description() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return pending_local_description_ ? pending_local_description_.get() + : current_local_description_.get(); +} + +const SessionDescriptionInterface* SdpOfferAnswerHandler::remote_description() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return pending_remote_description_ ? pending_remote_description_.get() + : current_remote_description_.get(); +} + +const SessionDescriptionInterface* +SdpOfferAnswerHandler::current_local_description() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return current_local_description_.get(); +} + +const SessionDescriptionInterface* +SdpOfferAnswerHandler::current_remote_description() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return current_remote_description_.get(); +} + +const SessionDescriptionInterface* +SdpOfferAnswerHandler::pending_local_description() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return pending_local_description_.get(); +} + +const SessionDescriptionInterface* +SdpOfferAnswerHandler::pending_remote_description() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return pending_remote_description_.get(); +} + +PeerConnectionInterface::SignalingState SdpOfferAnswerHandler::signaling_state() + const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return signaling_state_; +} + +void SdpOfferAnswerHandler::ChangeSignalingState( + PeerConnectionInterface::SignalingState signaling_state) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::ChangeSignalingState"); + RTC_DCHECK_RUN_ON(signaling_thread()); + if (signaling_state_ == signaling_state) { + return; + } + RTC_LOG(LS_INFO) << "Session: " << pc_->session_id() << " Old state: " + << GetSignalingStateString(signaling_state_) + << " New state: " + << GetSignalingStateString(signaling_state); + signaling_state_ = signaling_state; + pc_->Observer()->OnSignalingChange(signaling_state_); +} + +RTCError SdpOfferAnswerHandler::UpdateSessionState( + SdpType type, + cricket::ContentSource source, + const cricket::SessionDescription* description, + const std::map& + bundle_groups_by_mid) { + RTC_DCHECK_RUN_ON(signaling_thread()); + + // If there's already a pending error then no state transition should happen. + // But all call-sites should be verifying this before calling us! + RTC_DCHECK(session_error() == SessionError::kNone); + + // If this is answer-ish we're ready to let media flow. + if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { + EnableSending(); + } + + // Update the signaling state according to the specified state machine (see + // https://w3c.github.io/webrtc-pc/#rtcsignalingstate-enum). + if (type == SdpType::kOffer) { + ChangeSignalingState(source == cricket::CS_LOCAL + ? PeerConnectionInterface::kHaveLocalOffer + : PeerConnectionInterface::kHaveRemoteOffer); + } else if (type == SdpType::kPrAnswer) { + ChangeSignalingState(source == cricket::CS_LOCAL + ? PeerConnectionInterface::kHaveLocalPrAnswer + : PeerConnectionInterface::kHaveRemotePrAnswer); + } else { + RTC_DCHECK(type == SdpType::kAnswer); + ChangeSignalingState(PeerConnectionInterface::kStable); + transceivers()->DiscardStableStates(); + } + + // Update internal objects according to the session description's media + // descriptions. + return PushdownMediaDescription(type, source, bundle_groups_by_mid); +} + +bool SdpOfferAnswerHandler::ShouldFireNegotiationNeededEvent( + uint32_t event_id) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Plan B? Always fire to conform with useless legacy behavior. + if (!IsUnifiedPlan()) { + return true; + } + // The event ID has been invalidated. Either negotiation is no longer needed + // or a newer negotiation needed event has been generated. + if (event_id != negotiation_needed_event_id_) { + return false; + } + // The chain is no longer empty, update negotiation needed when it becomes + // empty. This should generate a newer negotiation needed event, making this + // one obsolete. + if (!operations_chain_->IsEmpty()) { + // Since we just suppressed an event that would have been fired, if + // negotiation is still needed by the time the chain becomes empty again, we + // must make sure to generate another event if negotiation is needed then. + // This happens when |is_negotiation_needed_| goes from false to true, so we + // set it to false until UpdateNegotiationNeeded() is called. + is_negotiation_needed_ = false; + update_negotiation_needed_on_empty_chain_ = true; + return false; + } + // We must not fire if the signaling state is no longer "stable". If + // negotiation is still needed when we return to "stable", a new negotiation + // needed event will be generated, so this one can safely be suppressed. + if (signaling_state_ != PeerConnectionInterface::kStable) { + return false; + } + // All checks have passed - please fire "negotiationneeded" now! + return true; +} + +rtc::scoped_refptr +SdpOfferAnswerHandler::local_streams() { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_CHECK(!IsUnifiedPlan()) << "local_streams is not available with Unified " + "Plan SdpSemantics. Please use GetSenders " + "instead."; + return local_streams_; +} + +rtc::scoped_refptr +SdpOfferAnswerHandler::remote_streams() { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_CHECK(!IsUnifiedPlan()) << "remote_streams is not available with Unified " + "Plan SdpSemantics. Please use GetReceivers " + "instead."; + return remote_streams_; +} + +bool SdpOfferAnswerHandler::AddStream(MediaStreamInterface* local_stream) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_CHECK(!IsUnifiedPlan()) << "AddStream is not available with Unified Plan " + "SdpSemantics. Please use AddTrack instead."; + if (pc_->IsClosed()) { + return false; + } + if (!CanAddLocalMediaStream(local_streams_, local_stream)) { + return false; + } + + local_streams_->AddStream(local_stream); + MediaStreamObserver* observer = new MediaStreamObserver(local_stream); + observer->SignalAudioTrackAdded.connect( + this, &SdpOfferAnswerHandler::OnAudioTrackAdded); + observer->SignalAudioTrackRemoved.connect( + this, &SdpOfferAnswerHandler::OnAudioTrackRemoved); + observer->SignalVideoTrackAdded.connect( + this, &SdpOfferAnswerHandler::OnVideoTrackAdded); + observer->SignalVideoTrackRemoved.connect( + this, &SdpOfferAnswerHandler::OnVideoTrackRemoved); + stream_observers_.push_back(std::unique_ptr(observer)); + + for (const auto& track : local_stream->GetAudioTracks()) { + rtp_manager()->AddAudioTrack(track.get(), local_stream); + } + for (const auto& track : local_stream->GetVideoTracks()) { + rtp_manager()->AddVideoTrack(track.get(), local_stream); + } + + pc_->stats()->AddStream(local_stream); + UpdateNegotiationNeeded(); + return true; +} + +void SdpOfferAnswerHandler::RemoveStream(MediaStreamInterface* local_stream) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_CHECK(!IsUnifiedPlan()) << "RemoveStream is not available with Unified " + "Plan SdpSemantics. Please use RemoveTrack " + "instead."; + TRACE_EVENT0("webrtc", "PeerConnection::RemoveStream"); + if (!pc_->IsClosed()) { + for (const auto& track : local_stream->GetAudioTracks()) { + rtp_manager()->RemoveAudioTrack(track.get(), local_stream); + } + for (const auto& track : local_stream->GetVideoTracks()) { + rtp_manager()->RemoveVideoTrack(track.get(), local_stream); + } + } + local_streams_->RemoveStream(local_stream); + stream_observers_.erase( + std::remove_if( + stream_observers_.begin(), stream_observers_.end(), + [local_stream](const std::unique_ptr& observer) { + return observer->stream()->id().compare(local_stream->id()) == 0; + }), + stream_observers_.end()); + + if (pc_->IsClosed()) { + return; + } + UpdateNegotiationNeeded(); +} + +void SdpOfferAnswerHandler::OnAudioTrackAdded(AudioTrackInterface* track, + MediaStreamInterface* stream) { + if (pc_->IsClosed()) { + return; + } + rtp_manager()->AddAudioTrack(track, stream); + UpdateNegotiationNeeded(); +} + +void SdpOfferAnswerHandler::OnAudioTrackRemoved(AudioTrackInterface* track, + MediaStreamInterface* stream) { + if (pc_->IsClosed()) { + return; + } + rtp_manager()->RemoveAudioTrack(track, stream); + UpdateNegotiationNeeded(); +} + +void SdpOfferAnswerHandler::OnVideoTrackAdded(VideoTrackInterface* track, + MediaStreamInterface* stream) { + if (pc_->IsClosed()) { + return; + } + rtp_manager()->AddVideoTrack(track, stream); + UpdateNegotiationNeeded(); +} + +void SdpOfferAnswerHandler::OnVideoTrackRemoved(VideoTrackInterface* track, + MediaStreamInterface* stream) { + if (pc_->IsClosed()) { + return; + } + rtp_manager()->RemoveVideoTrack(track, stream); + UpdateNegotiationNeeded(); +} + +RTCError SdpOfferAnswerHandler::Rollback(SdpType desc_type) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::Rollback"); + auto state = signaling_state(); + if (state != PeerConnectionInterface::kHaveLocalOffer && + state != PeerConnectionInterface::kHaveRemoteOffer) { + return RTCError(RTCErrorType::INVALID_STATE, + "Called in wrong signalingState: " + + GetSignalingStateString(signaling_state())); + } + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(IsUnifiedPlan()); + std::vector> all_added_streams; + std::vector> all_removed_streams; + std::vector> removed_receivers; + + for (auto&& transceivers_stable_state_pair : transceivers()->StableStates()) { + auto transceiver = transceivers_stable_state_pair.first; + auto state = transceivers_stable_state_pair.second; + + if (state.remote_stream_ids()) { + std::vector> added_streams; + std::vector> removed_streams; + SetAssociatedRemoteStreams(transceiver->internal()->receiver_internal(), + state.remote_stream_ids().value(), + &added_streams, &removed_streams); + all_added_streams.insert(all_added_streams.end(), added_streams.begin(), + added_streams.end()); + all_removed_streams.insert(all_removed_streams.end(), + removed_streams.begin(), + removed_streams.end()); + if (!state.has_m_section() && !state.newly_created()) { + continue; + } + } + + RTC_DCHECK(transceiver->internal()->mid().has_value()); + DestroyTransceiverChannel(transceiver); + + if (signaling_state() == PeerConnectionInterface::kHaveRemoteOffer && + transceiver->receiver()) { + removed_receivers.push_back(transceiver->receiver()); + } + if (state.newly_created()) { + if (transceiver->internal()->reused_for_addtrack()) { + transceiver->internal()->set_created_by_addtrack(true); + } else { + transceivers()->Remove(transceiver); + } + } + if (state.init_send_encodings()) { + transceiver->internal()->sender_internal()->set_init_send_encodings( + state.init_send_encodings().value()); + } + transceiver->internal()->sender_internal()->set_transport(nullptr); + transceiver->internal()->receiver_internal()->set_transport(nullptr); + transceiver->internal()->set_mid(state.mid()); + transceiver->internal()->set_mline_index(state.mline_index()); + } + transport_controller()->RollbackTransports(); + transceivers()->DiscardStableStates(); + pending_local_description_.reset(); + pending_remote_description_.reset(); + ChangeSignalingState(PeerConnectionInterface::kStable); + + // Once all processing has finished, fire off callbacks. + for (const auto& receiver : removed_receivers) { + pc_->Observer()->OnRemoveTrack(receiver); + } + for (const auto& stream : all_added_streams) { + pc_->Observer()->OnAddStream(stream); + } + for (const auto& stream : all_removed_streams) { + pc_->Observer()->OnRemoveStream(stream); + } + + // The assumption is that in case of implicit rollback UpdateNegotiationNeeded + // gets called in SetRemoteDescription. + if (desc_type == SdpType::kRollback) { + UpdateNegotiationNeeded(); + if (is_negotiation_needed_) { + // Legacy version. + pc_->Observer()->OnRenegotiationNeeded(); + // Spec-compliant version; the event may get invalidated before firing. + GenerateNegotiationNeededEvent(); + } + } + return RTCError::OK(); +} + +bool SdpOfferAnswerHandler::IsUnifiedPlan() const { + return pc_->IsUnifiedPlan(); +} + +void SdpOfferAnswerHandler::OnOperationsChainEmpty() { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (pc_->IsClosed() || !update_negotiation_needed_on_empty_chain_) + return; + update_negotiation_needed_on_empty_chain_ = false; + // Firing when chain is empty is only supported in Unified Plan to avoid Plan + // B regressions. (In Plan B, onnegotiationneeded is already broken anyway, so + // firing it even more might just be confusing.) + if (IsUnifiedPlan()) { + UpdateNegotiationNeeded(); + } +} + +absl::optional SdpOfferAnswerHandler::is_caller() { + RTC_DCHECK_RUN_ON(signaling_thread()); + return is_caller_; +} + +bool SdpOfferAnswerHandler::HasNewIceCredentials() { + RTC_DCHECK_RUN_ON(signaling_thread()); + return local_ice_credentials_to_replace_->HasIceCredentials(); +} + +bool SdpOfferAnswerHandler::IceRestartPending( + const std::string& content_name) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return pending_ice_restarts_.find(content_name) != + pending_ice_restarts_.end(); +} + +bool SdpOfferAnswerHandler::NeedsIceRestart( + const std::string& content_name) const { + return pc_->NeedsIceRestart(content_name); +} + +absl::optional SdpOfferAnswerHandler::GetDtlsRole( + const std::string& mid) const { + return transport_controller()->GetDtlsRole(mid); +} + +void SdpOfferAnswerHandler::UpdateNegotiationNeeded() { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!IsUnifiedPlan()) { + pc_->Observer()->OnRenegotiationNeeded(); + GenerateNegotiationNeededEvent(); + return; + } + + // In the spec, a task is queued here to run the following steps - this is + // meant to ensure we do not fire onnegotiationneeded prematurely if multiple + // changes are being made at once. In order to support Chromium's + // implementation where the JavaScript representation of the PeerConnection + // lives on a separate thread though, the queuing of a task is instead + // performed by the PeerConnectionObserver posting from the signaling thread + // to the JavaScript main thread that negotiation is needed. And because the + // Operations Chain lives on the WebRTC signaling thread, + // ShouldFireNegotiationNeededEvent() must be called before firing the event + // to ensure the Operations Chain is still empty and the event has not been + // invalidated. + + // If connection's [[IsClosed]] slot is true, abort these steps. + if (pc_->IsClosed()) + return; + + // If connection's signaling state is not "stable", abort these steps. + if (signaling_state() != PeerConnectionInterface::kStable) + return; + + // NOTE + // The negotiation-needed flag will be updated once the state transitions to + // "stable", as part of the steps for setting an RTCSessionDescription. + + // If the result of checking if negotiation is needed is false, clear the + // negotiation-needed flag by setting connection's [[NegotiationNeeded]] slot + // to false, and abort these steps. + bool is_negotiation_needed = CheckIfNegotiationIsNeeded(); + if (!is_negotiation_needed) { + is_negotiation_needed_ = false; + // Invalidate any negotiation needed event that may previosuly have been + // generated. + ++negotiation_needed_event_id_; + return; + } + + // If connection's [[NegotiationNeeded]] slot is already true, abort these + // steps. + if (is_negotiation_needed_) + return; + + // Set connection's [[NegotiationNeeded]] slot to true. + is_negotiation_needed_ = true; + + // Queue a task that runs the following steps: + // If connection's [[IsClosed]] slot is true, abort these steps. + // If connection's [[NegotiationNeeded]] slot is false, abort these steps. + // Fire an event named negotiationneeded at connection. + pc_->Observer()->OnRenegotiationNeeded(); + // Fire the spec-compliant version; when ShouldFireNegotiationNeededEvent() is + // used in the task queued by the observer, this event will only fire when the + // chain is empty. + GenerateNegotiationNeededEvent(); +} + +bool SdpOfferAnswerHandler::CheckIfNegotiationIsNeeded() { + RTC_DCHECK_RUN_ON(signaling_thread()); + // 1. If any implementation-specific negotiation is required, as described at + // the start of this section, return true. + + // 2. If connection.[[LocalIceCredentialsToReplace]] is not empty, return + // true. + if (local_ice_credentials_to_replace_->HasIceCredentials()) { + return true; + } + + // 3. Let description be connection.[[CurrentLocalDescription]]. + const SessionDescriptionInterface* description = current_local_description(); + if (!description) + return true; + + // 4. If connection has created any RTCDataChannels, and no m= section in + // description has been negotiated yet for data, return true. + if (data_channel_controller()->HasSctpDataChannels()) { + if (!cricket::GetFirstDataContent(description->description()->contents())) + return true; + } + + // 5. For each transceiver in connection's set of transceivers, perform the + // following checks: + for (const auto& transceiver : transceivers()->ListInternal()) { + const ContentInfo* current_local_msection = + FindTransceiverMSection(transceiver, description); + + const ContentInfo* current_remote_msection = + FindTransceiverMSection(transceiver, current_remote_description()); + + // 5.4 If transceiver is stopped and is associated with an m= section, + // but the associated m= section is not yet rejected in + // connection.[[CurrentLocalDescription]] or + // connection.[[CurrentRemoteDescription]], return true. + if (transceiver->stopped()) { + RTC_DCHECK(transceiver->stopping()); + if (current_local_msection && !current_local_msection->rejected && + ((current_remote_msection && !current_remote_msection->rejected) || + !current_remote_msection)) { + return true; + } + continue; + } + + // 5.1 If transceiver.[[Stopping]] is true and transceiver.[[Stopped]] is + // false, return true. + if (transceiver->stopping() && !transceiver->stopped()) + return true; + + // 5.2 If transceiver isn't stopped and isn't yet associated with an m= + // section in description, return true. + if (!current_local_msection) + return true; + + const MediaContentDescription* current_local_media_description = + current_local_msection->media_description(); + // 5.3 If transceiver isn't stopped and is associated with an m= section + // in description then perform the following checks: + + // 5.3.1 If transceiver.[[Direction]] is "sendrecv" or "sendonly", and the + // associated m= section in description either doesn't contain a single + // "a=msid" line, or the number of MSIDs from the "a=msid" lines in this + // m= section, or the MSID values themselves, differ from what is in + // transceiver.sender.[[AssociatedMediaStreamIds]], return true. + if (RtpTransceiverDirectionHasSend(transceiver->direction())) { + if (current_local_media_description->streams().size() == 0) + return true; + + std::vector msection_msids; + for (const auto& stream : current_local_media_description->streams()) { + for (const std::string& msid : stream.stream_ids()) + msection_msids.push_back(msid); + } + + std::vector transceiver_msids = + transceiver->sender()->stream_ids(); + if (msection_msids.size() != transceiver_msids.size()) + return true; + + absl::c_sort(transceiver_msids); + absl::c_sort(msection_msids); + if (transceiver_msids != msection_msids) + return true; + } + + // 5.3.2 If description is of type "offer", and the direction of the + // associated m= section in neither connection.[[CurrentLocalDescription]] + // nor connection.[[CurrentRemoteDescription]] matches + // transceiver.[[Direction]], return true. + if (description->GetType() == SdpType::kOffer) { + if (!current_remote_description()) + return true; + + if (!current_remote_msection) + return true; + + RtpTransceiverDirection current_local_direction = + current_local_media_description->direction(); + RtpTransceiverDirection current_remote_direction = + current_remote_msection->media_description()->direction(); + if (transceiver->direction() != current_local_direction && + transceiver->direction() != + RtpTransceiverDirectionReversed(current_remote_direction)) { + return true; + } + } + + // 5.3.3 If description is of type "answer", and the direction of the + // associated m= section in the description does not match + // transceiver.[[Direction]] intersected with the offered direction (as + // described in [JSEP] (section 5.3.1.)), return true. + if (description->GetType() == SdpType::kAnswer) { + if (!remote_description()) + return true; + + const ContentInfo* offered_remote_msection = + FindTransceiverMSection(transceiver, remote_description()); + + RtpTransceiverDirection offered_direction = + offered_remote_msection + ? offered_remote_msection->media_description()->direction() + : RtpTransceiverDirection::kInactive; + + if (current_local_media_description->direction() != + (RtpTransceiverDirectionIntersection( + transceiver->direction(), + RtpTransceiverDirectionReversed(offered_direction)))) { + return true; + } + } + } + + // If all the preceding checks were performed and true was not returned, + // nothing remains to be negotiated; return false. + return false; +} + +void SdpOfferAnswerHandler::GenerateNegotiationNeededEvent() { + RTC_DCHECK_RUN_ON(signaling_thread()); + ++negotiation_needed_event_id_; + pc_->Observer()->OnNegotiationNeededEvent(negotiation_needed_event_id_); +} + +RTCError SdpOfferAnswerHandler::ValidateSessionDescription( + const SessionDescriptionInterface* sdesc, + cricket::ContentSource source, + const std::map& + bundle_groups_by_mid) { + if (session_error() != SessionError::kNone) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, GetSessionErrorMsg()); + } + + if (!sdesc || !sdesc->description()) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, kInvalidSdp); + } + + SdpType type = sdesc->GetType(); + if ((source == cricket::CS_LOCAL && !ExpectSetLocalDescription(type)) || + (source == cricket::CS_REMOTE && !ExpectSetRemoteDescription(type))) { + LOG_AND_RETURN_ERROR( + RTCErrorType::INVALID_STATE, + "Called in wrong state: " + GetSignalingStateString(signaling_state())); + } + + RTCError error = ValidateMids(*sdesc->description()); + if (!error.ok()) { + return error; + } + + // Verify crypto settings. + std::string crypto_error; + if (webrtc_session_desc_factory_->SdesPolicy() == cricket::SEC_REQUIRED || + pc_->dtls_enabled()) { + RTCError crypto_error = VerifyCrypto( + sdesc->description(), pc_->dtls_enabled(), bundle_groups_by_mid); + if (!crypto_error.ok()) { + return crypto_error; + } + } + + // Verify ice-ufrag and ice-pwd. + if (!VerifyIceUfragPwdPresent(sdesc->description(), bundle_groups_by_mid)) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + kSdpWithoutIceUfragPwd); + } + + if (!pc_->ValidateBundleSettings(sdesc->description(), + bundle_groups_by_mid)) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + kBundleWithoutRtcpMux); + } + + // TODO(skvlad): When the local rtcp-mux policy is Require, reject any + // m-lines that do not rtcp-mux enabled. + + // Verify m-lines in Answer when compared against Offer. + if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) { + // With an answer we want to compare the new answer session description with + // the offer's session description from the current negotiation. + const cricket::SessionDescription* offer_desc = + (source == cricket::CS_LOCAL) ? remote_description()->description() + : local_description()->description(); + if (!MediaSectionsHaveSameCount(*offer_desc, *sdesc->description()) || + !MediaSectionsInSameOrder(*offer_desc, nullptr, *sdesc->description(), + type)) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + kMlineMismatchInAnswer); + } + } else { + // The re-offers should respect the order of m= sections in current + // description. See RFC3264 Section 8 paragraph 4 for more details. + // With a re-offer, either the current local or current remote descriptions + // could be the most up to date, so we would like to check against both of + // them if they exist. It could be the case that one of them has a 0 port + // for a media section, but the other does not. This is important to check + // against in the case that we are recycling an m= section. + const cricket::SessionDescription* current_desc = nullptr; + const cricket::SessionDescription* secondary_current_desc = nullptr; + if (local_description()) { + current_desc = local_description()->description(); + if (remote_description()) { + secondary_current_desc = remote_description()->description(); + } + } else if (remote_description()) { + current_desc = remote_description()->description(); + } + if (current_desc && + !MediaSectionsInSameOrder(*current_desc, secondary_current_desc, + *sdesc->description(), type)) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + kMlineMismatchInSubsequentOffer); + } + } + + if (IsUnifiedPlan()) { + // Ensure that each audio and video media section has at most one + // "StreamParams". This will return an error if receiving a session + // description from a "Plan B" endpoint which adds multiple tracks of the + // same type. With Unified Plan, there can only be at most one track per + // media section. + for (const ContentInfo& content : sdesc->description()->contents()) { + const MediaContentDescription& desc = *content.media_description(); + if ((desc.type() == cricket::MEDIA_TYPE_AUDIO || + desc.type() == cricket::MEDIA_TYPE_VIDEO) && + desc.streams().size() > 1u) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "Media section has more than one track specified " + "with a=ssrc lines which is not supported with " + "Unified Plan."); + } + } + } + + return RTCError::OK(); +} + +RTCError SdpOfferAnswerHandler::UpdateTransceiversAndDataChannels( + cricket::ContentSource source, + const SessionDescriptionInterface& new_session, + const SessionDescriptionInterface* old_local_description, + const SessionDescriptionInterface* old_remote_description, + const std::map& + bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", + "SdpOfferAnswerHandler::UpdateTransceiversAndDataChannels"); + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(IsUnifiedPlan()); + + if (new_session.GetType() == SdpType::kOffer) { + // If the BUNDLE policy is max-bundle, then we know for sure that all + // transports will be bundled from the start. Return an error if max-bundle + // is specified but the session description does not have a BUNDLE group. + if (pc_->configuration()->bundle_policy == + PeerConnectionInterface::kBundlePolicyMaxBundle && + bundle_groups_by_mid.empty()) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "max-bundle configured but session description " + "has no BUNDLE group"); + } + } + + const ContentInfos& new_contents = new_session.description()->contents(); + for (size_t i = 0; i < new_contents.size(); ++i) { + const cricket::ContentInfo& new_content = new_contents[i]; + cricket::MediaType media_type = new_content.media_description()->type(); + mid_generator_.AddKnownId(new_content.name); + auto it = bundle_groups_by_mid.find(new_content.name); + const cricket::ContentGroup* bundle_group = + it != bundle_groups_by_mid.end() ? it->second : nullptr; + if (media_type == cricket::MEDIA_TYPE_AUDIO || + media_type == cricket::MEDIA_TYPE_VIDEO) { + const cricket::ContentInfo* old_local_content = nullptr; + if (old_local_description && + i < old_local_description->description()->contents().size()) { + old_local_content = + &old_local_description->description()->contents()[i]; + } + const cricket::ContentInfo* old_remote_content = nullptr; + if (old_remote_description && + i < old_remote_description->description()->contents().size()) { + old_remote_content = + &old_remote_description->description()->contents()[i]; + } + auto transceiver_or_error = + AssociateTransceiver(source, new_session.GetType(), i, new_content, + old_local_content, old_remote_content); + if (!transceiver_or_error.ok()) { + // In the case where a transceiver is rejected locally, we don't + // expect to find a transceiver, but might find it in the case + // where state is still "stopping", not "stopped". + if (new_content.rejected) { + continue; + } + return transceiver_or_error.MoveError(); + } + auto transceiver = transceiver_or_error.MoveValue(); + RTCError error = + UpdateTransceiverChannel(transceiver, new_content, bundle_group); + if (!error.ok()) { + return error; + } + } else if (media_type == cricket::MEDIA_TYPE_DATA) { + if (pc_->GetDataMid() && new_content.name != *(pc_->GetDataMid())) { + // Ignore all but the first data section. + RTC_LOG(LS_INFO) << "Ignoring data media section with MID=" + << new_content.name; + continue; + } + RTCError error = UpdateDataChannel(source, new_content, bundle_group); + if (!error.ok()) { + return error; + } + } else if (media_type == cricket::MEDIA_TYPE_UNSUPPORTED) { + RTC_LOG(LS_INFO) << "Ignoring unsupported media type"; + } else { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Unknown section type."); + } + } + + return RTCError::OK(); +} + +RTCErrorOr>> +SdpOfferAnswerHandler::AssociateTransceiver( + cricket::ContentSource source, + SdpType type, + size_t mline_index, + const ContentInfo& content, + const ContentInfo* old_local_content, + const ContentInfo* old_remote_content) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::AssociateTransceiver"); + RTC_DCHECK(IsUnifiedPlan()); +#if RTC_DCHECK_IS_ON + // If this is an offer then the m= section might be recycled. If the m= + // section is being recycled (defined as: rejected in the current local or + // remote description and not rejected in new description), the transceiver + // should have been removed by RemoveStoppedtransceivers()-> + if (IsMediaSectionBeingRecycled(type, content, old_local_content, + old_remote_content)) { + const std::string& old_mid = + (old_local_content && old_local_content->rejected) + ? old_local_content->name + : old_remote_content->name; + auto old_transceiver = transceivers()->FindByMid(old_mid); + // The transceiver should be disassociated in RemoveStoppedTransceivers() + RTC_DCHECK(!old_transceiver); + } +#endif + + const MediaContentDescription* media_desc = content.media_description(); + auto transceiver = transceivers()->FindByMid(content.name); + if (source == cricket::CS_LOCAL) { + // Find the RtpTransceiver that corresponds to this m= section, using the + // mapping between transceivers and m= section indices established when + // creating the offer. + if (!transceiver) { + transceiver = transceivers()->FindByMLineIndex(mline_index); + } + if (!transceiver) { + // This may happen normally when media sections are rejected. + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, + "Transceiver not found based on m-line index"); + } + } else { + RTC_DCHECK_EQ(source, cricket::CS_REMOTE); + // If the m= section is sendrecv or recvonly, and there are RtpTransceivers + // of the same type... + // When simulcast is requested, a transceiver cannot be associated because + // AddTrack cannot be called to initialize it. + if (!transceiver && + RtpTransceiverDirectionHasRecv(media_desc->direction()) && + !media_desc->HasSimulcast()) { + transceiver = FindAvailableTransceiverToReceive(media_desc->type()); + } + // If no RtpTransceiver was found in the previous step, create one with a + // recvonly direction. + if (!transceiver) { + RTC_LOG(LS_INFO) << "Adding " + << cricket::MediaTypeToString(media_desc->type()) + << " transceiver for MID=" << content.name + << " at i=" << mline_index + << " in response to the remote description."; + std::string sender_id = rtc::CreateRandomUuid(); + std::vector send_encodings = + GetSendEncodingsFromRemoteDescription(*media_desc); + auto sender = rtp_manager()->CreateSender(media_desc->type(), sender_id, + nullptr, {}, send_encodings); + std::string receiver_id; + if (!media_desc->streams().empty()) { + receiver_id = media_desc->streams()[0].id; + } else { + receiver_id = rtc::CreateRandomUuid(); + } + auto receiver = + rtp_manager()->CreateReceiver(media_desc->type(), receiver_id); + transceiver = rtp_manager()->CreateAndAddTransceiver(sender, receiver); + transceiver->internal()->set_direction( + RtpTransceiverDirection::kRecvOnly); + if (type == SdpType::kOffer) { + transceivers()->StableState(transceiver)->set_newly_created(); + } + } + + RTC_DCHECK(transceiver); + + // Check if the offer indicated simulcast but the answer rejected it. + // This can happen when simulcast is not supported on the remote party. + if (SimulcastIsRejected(old_local_content, *media_desc, + pc_->GetCryptoOptions() + .srtp.enable_encrypted_rtp_header_extensions)) { + RTC_HISTOGRAM_BOOLEAN(kSimulcastDisabled, true); + RTCError error = + DisableSimulcastInSender(transceiver->internal()->sender_internal()); + if (!error.ok()) { + RTC_LOG(LS_ERROR) << "Failed to remove rejected simulcast."; + return std::move(error); + } + } + } + + if (transceiver->media_type() != media_desc->type()) { + LOG_AND_RETURN_ERROR( + RTCErrorType::INVALID_PARAMETER, + "Transceiver type does not match media description type."); + } + + if (media_desc->HasSimulcast()) { + std::vector layers = + source == cricket::CS_LOCAL + ? media_desc->simulcast_description().send_layers().GetAllLayers() + : media_desc->simulcast_description() + .receive_layers() + .GetAllLayers(); + RTCError error = UpdateSimulcastLayerStatusInSender( + layers, transceiver->internal()->sender_internal()); + if (!error.ok()) { + RTC_LOG(LS_ERROR) << "Failed updating status for simulcast layers."; + return std::move(error); + } + } + if (type == SdpType::kOffer) { + bool state_changes = transceiver->internal()->mid() != content.name || + transceiver->internal()->mline_index() != mline_index; + if (state_changes) { + transceivers() + ->StableState(transceiver) + ->SetMSectionIfUnset(transceiver->internal()->mid(), + transceiver->internal()->mline_index()); + } + } + // Associate the found or created RtpTransceiver with the m= section by + // setting the value of the RtpTransceiver's mid property to the MID of the m= + // section, and establish a mapping between the transceiver and the index of + // the m= section. + transceiver->internal()->set_mid(content.name); + transceiver->internal()->set_mline_index(mline_index); + return std::move(transceiver); +} + +RTCError SdpOfferAnswerHandler::UpdateTransceiverChannel( + rtc::scoped_refptr> + transceiver, + const cricket::ContentInfo& content, + const cricket::ContentGroup* bundle_group) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::UpdateTransceiverChannel"); + RTC_DCHECK(IsUnifiedPlan()); + RTC_DCHECK(transceiver); + cricket::ChannelInterface* channel = transceiver->internal()->channel(); + if (content.rejected) { + if (channel) { + transceiver->internal()->SetChannel(nullptr); + DestroyChannelInterface(channel); + } + } else { + if (!channel) { + if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { + channel = CreateVoiceChannel(content.name); + } else { + RTC_DCHECK_EQ(cricket::MEDIA_TYPE_VIDEO, transceiver->media_type()); + channel = CreateVideoChannel(content.name); + } + if (!channel) { + LOG_AND_RETURN_ERROR( + RTCErrorType::INTERNAL_ERROR, + "Failed to create channel for mid=" + content.name); + } + transceiver->internal()->SetChannel(channel); + } + } + return RTCError::OK(); +} + +RTCError SdpOfferAnswerHandler::UpdateDataChannel( + cricket::ContentSource source, + const cricket::ContentInfo& content, + const cricket::ContentGroup* bundle_group) { + if (content.rejected) { + RTC_LOG(LS_INFO) << "Rejected data channel transport with mid=" + << content.mid(); + + rtc::StringBuilder sb; + sb << "Rejected data channel transport with mid=" << content.mid(); + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, sb.Release()); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + DestroyDataChannelTransport(error); + } else { + if (!data_channel_controller()->data_channel_transport()) { + RTC_LOG(LS_INFO) << "Creating data channel, mid=" << content.mid(); + if (!CreateDataChannel(content.name)) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Failed to create data channel."); + } + } + } + return RTCError::OK(); +} + +bool SdpOfferAnswerHandler::ExpectSetLocalDescription(SdpType type) { + PeerConnectionInterface::SignalingState state = signaling_state(); + if (type == SdpType::kOffer) { + return (state == PeerConnectionInterface::kStable) || + (state == PeerConnectionInterface::kHaveLocalOffer); + } else { + RTC_DCHECK(type == SdpType::kPrAnswer || type == SdpType::kAnswer); + return (state == PeerConnectionInterface::kHaveRemoteOffer) || + (state == PeerConnectionInterface::kHaveLocalPrAnswer); + } +} + +bool SdpOfferAnswerHandler::ExpectSetRemoteDescription(SdpType type) { + PeerConnectionInterface::SignalingState state = signaling_state(); + if (type == SdpType::kOffer) { + return (state == PeerConnectionInterface::kStable) || + (state == PeerConnectionInterface::kHaveRemoteOffer); + } else { + RTC_DCHECK(type == SdpType::kPrAnswer || type == SdpType::kAnswer); + return (state == PeerConnectionInterface::kHaveLocalOffer) || + (state == PeerConnectionInterface::kHaveRemotePrAnswer); + } +} + +void SdpOfferAnswerHandler::FillInMissingRemoteMids( + cricket::SessionDescription* new_remote_description) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(new_remote_description); + const cricket::ContentInfos no_infos; + const cricket::ContentInfos& local_contents = + (local_description() ? local_description()->description()->contents() + : no_infos); + const cricket::ContentInfos& remote_contents = + (remote_description() ? remote_description()->description()->contents() + : no_infos); + for (size_t i = 0; i < new_remote_description->contents().size(); ++i) { + cricket::ContentInfo& content = new_remote_description->contents()[i]; + if (!content.name.empty()) { + continue; + } + std::string new_mid; + absl::string_view source_explanation; + if (IsUnifiedPlan()) { + if (i < local_contents.size()) { + new_mid = local_contents[i].name; + source_explanation = "from the matching local media section"; + } else if (i < remote_contents.size()) { + new_mid = remote_contents[i].name; + source_explanation = "from the matching previous remote media section"; + } else { + new_mid = mid_generator_.GenerateString(); + source_explanation = "generated just now"; + } + } else { + new_mid = std::string( + GetDefaultMidForPlanB(content.media_description()->type())); + source_explanation = "to match pre-existing behavior"; + } + RTC_DCHECK(!new_mid.empty()); + content.name = new_mid; + new_remote_description->transport_infos()[i].content_name = new_mid; + RTC_LOG(LS_INFO) << "SetRemoteDescription: Remote media section at i=" << i + << " is missing an a=mid line. Filling in the value '" + << new_mid << "' " << source_explanation << "."; + } +} + +rtc::scoped_refptr> +SdpOfferAnswerHandler::FindAvailableTransceiverToReceive( + cricket::MediaType media_type) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(IsUnifiedPlan()); + // From JSEP section 5.10 (Applying a Remote Description): + // If the m= section is sendrecv or recvonly, and there are RtpTransceivers of + // the same type that were added to the PeerConnection by addTrack and are not + // associated with any m= section and are not stopped, find the first such + // RtpTransceiver. + for (auto transceiver : transceivers()->List()) { + if (transceiver->media_type() == media_type && + transceiver->internal()->created_by_addtrack() && !transceiver->mid() && + !transceiver->stopped()) { + return transceiver; + } + } + return nullptr; +} + +const cricket::ContentInfo* +SdpOfferAnswerHandler::FindMediaSectionForTransceiver( + const RtpTransceiver* transceiver, + const SessionDescriptionInterface* sdesc) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(transceiver); + RTC_DCHECK(sdesc); + if (IsUnifiedPlan()) { + if (!transceiver->mid()) { + // This transceiver is not associated with a media section yet. + return nullptr; + } + return sdesc->description()->GetContentByName(*transceiver->mid()); + } else { + // Plan B only allows at most one audio and one video section, so use the + // first media section of that type. + return cricket::GetFirstMediaContent(sdesc->description()->contents(), + transceiver->media_type()); + } +} + +void SdpOfferAnswerHandler::GetOptionsForOffer( + const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, + cricket::MediaSessionOptions* session_options) { + RTC_DCHECK_RUN_ON(signaling_thread()); + ExtractSharedMediaSessionOptions(offer_answer_options, session_options); + + if (IsUnifiedPlan()) { + GetOptionsForUnifiedPlanOffer(offer_answer_options, session_options); + } else { + GetOptionsForPlanBOffer(offer_answer_options, session_options); + } + + // Apply ICE restart flag and renomination flag. + bool ice_restart = offer_answer_options.ice_restart || HasNewIceCredentials(); + for (auto& options : session_options->media_description_options) { + options.transport_options.ice_restart = ice_restart; + options.transport_options.enable_ice_renomination = + pc_->configuration()->enable_ice_renomination; + } + + session_options->rtcp_cname = rtcp_cname_; + session_options->crypto_options = pc_->GetCryptoOptions(); + session_options->pooled_ice_credentials = + pc_->network_thread()->Invoke>( + RTC_FROM_HERE, + [this] { return port_allocator()->GetPooledIceCredentials(); }); + session_options->offer_extmap_allow_mixed = + pc_->configuration()->offer_extmap_allow_mixed; + + // Allow fallback for using obsolete SCTP syntax. + // Note that the default in |session_options| is true, while + // the default in |options| is false. + session_options->use_obsolete_sctp_sdp = + offer_answer_options.use_obsolete_sctp_sdp; +} + +void SdpOfferAnswerHandler::GetOptionsForPlanBOffer( + const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, + cricket::MediaSessionOptions* session_options) { + // Figure out transceiver directional preferences. + bool send_audio = + !rtp_manager()->GetAudioTransceiver()->internal()->senders().empty(); + bool send_video = + !rtp_manager()->GetVideoTransceiver()->internal()->senders().empty(); + + // By default, generate sendrecv/recvonly m= sections. + bool recv_audio = true; + bool recv_video = true; + + // By default, only offer a new m= section if we have media to send with it. + bool offer_new_audio_description = send_audio; + bool offer_new_video_description = send_video; + bool offer_new_data_description = + data_channel_controller()->HasDataChannels(); + + // The "offer_to_receive_X" options allow those defaults to be overridden. + if (offer_answer_options.offer_to_receive_audio != + PeerConnectionInterface::RTCOfferAnswerOptions::kUndefined) { + recv_audio = (offer_answer_options.offer_to_receive_audio > 0); + offer_new_audio_description = + offer_new_audio_description || + (offer_answer_options.offer_to_receive_audio > 0); + } + if (offer_answer_options.offer_to_receive_video != + RTCOfferAnswerOptions::kUndefined) { + recv_video = (offer_answer_options.offer_to_receive_video > 0); + offer_new_video_description = + offer_new_video_description || + (offer_answer_options.offer_to_receive_video > 0); + } + + absl::optional audio_index; + absl::optional video_index; + absl::optional data_index; + // If a current description exists, generate m= sections in the same order, + // using the first audio/video/data section that appears and rejecting + // extraneous ones. + if (local_description()) { + GenerateMediaDescriptionOptions( + local_description(), + RtpTransceiverDirectionFromSendRecv(send_audio, recv_audio), + RtpTransceiverDirectionFromSendRecv(send_video, recv_video), + &audio_index, &video_index, &data_index, session_options); + } + + // Add audio/video/data m= sections to the end if needed. + if (!audio_index && offer_new_audio_description) { + cricket::MediaDescriptionOptions options( + cricket::MEDIA_TYPE_AUDIO, cricket::CN_AUDIO, + RtpTransceiverDirectionFromSendRecv(send_audio, recv_audio), false); + options.header_extensions = + channel_manager()->GetSupportedAudioRtpHeaderExtensions(); + session_options->media_description_options.push_back(options); + audio_index = session_options->media_description_options.size() - 1; + } + if (!video_index && offer_new_video_description) { + cricket::MediaDescriptionOptions options( + cricket::MEDIA_TYPE_VIDEO, cricket::CN_VIDEO, + RtpTransceiverDirectionFromSendRecv(send_video, recv_video), false); + options.header_extensions = + channel_manager()->GetSupportedVideoRtpHeaderExtensions(); + session_options->media_description_options.push_back(options); + video_index = session_options->media_description_options.size() - 1; + } + if (!data_index && offer_new_data_description) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForActiveData(cricket::CN_DATA)); + data_index = session_options->media_description_options.size() - 1; + } + + cricket::MediaDescriptionOptions* audio_media_description_options = + !audio_index ? nullptr + : &session_options->media_description_options[*audio_index]; + cricket::MediaDescriptionOptions* video_media_description_options = + !video_index ? nullptr + : &session_options->media_description_options[*video_index]; + + AddPlanBRtpSenderOptions(rtp_manager()->GetSendersInternal(), + audio_media_description_options, + video_media_description_options, + offer_answer_options.num_simulcast_layers); +} + +void SdpOfferAnswerHandler::GetOptionsForUnifiedPlanOffer( + const RTCOfferAnswerOptions& offer_answer_options, + cricket::MediaSessionOptions* session_options) { + // Rules for generating an offer are dictated by JSEP sections 5.2.1 (Initial + // Offers) and 5.2.2 (Subsequent Offers). + RTC_DCHECK_EQ(session_options->media_description_options.size(), 0); + const ContentInfos no_infos; + const ContentInfos& local_contents = + (local_description() ? local_description()->description()->contents() + : no_infos); + const ContentInfos& remote_contents = + (remote_description() ? remote_description()->description()->contents() + : no_infos); + // The mline indices that can be recycled. New transceivers should reuse these + // slots first. + std::queue recycleable_mline_indices; + // First, go through each media section that exists in either the local or + // remote description and generate a media section in this offer for the + // associated transceiver. If a media section can be recycled, generate a + // default, rejected media section here that can be later overwritten. + for (size_t i = 0; + i < std::max(local_contents.size(), remote_contents.size()); ++i) { + // Either |local_content| or |remote_content| is non-null. + const ContentInfo* local_content = + (i < local_contents.size() ? &local_contents[i] : nullptr); + const ContentInfo* current_local_content = + GetContentByIndex(current_local_description(), i); + const ContentInfo* remote_content = + (i < remote_contents.size() ? &remote_contents[i] : nullptr); + const ContentInfo* current_remote_content = + GetContentByIndex(current_remote_description(), i); + bool had_been_rejected = + (current_local_content && current_local_content->rejected) || + (current_remote_content && current_remote_content->rejected); + const std::string& mid = + (local_content ? local_content->name : remote_content->name); + cricket::MediaType media_type = + (local_content ? local_content->media_description()->type() + : remote_content->media_description()->type()); + if (media_type == cricket::MEDIA_TYPE_AUDIO || + media_type == cricket::MEDIA_TYPE_VIDEO) { + // A media section is considered eligible for recycling if it is marked as + // rejected in either the current local or current remote description. + auto transceiver = transceivers()->FindByMid(mid); + if (!transceiver) { + // No associated transceiver. The media section has been stopped. + recycleable_mline_indices.push(i); + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(media_type, mid, + RtpTransceiverDirection::kInactive, + /*stopped=*/true)); + } else { + // NOTE: a stopping transceiver should be treated as a stopped one in + // createOffer as specified in + // https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-createoffer. + if (had_been_rejected && transceiver->stopping()) { + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions( + transceiver->media_type(), mid, + RtpTransceiverDirection::kInactive, + /*stopped=*/true)); + recycleable_mline_indices.push(i); + } else { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForTransceiver( + transceiver->internal(), mid, + /*is_create_offer=*/true)); + // CreateOffer shouldn't really cause any state changes in + // PeerConnection, but we need a way to match new transceivers to new + // media sections in SetLocalDescription and JSEP specifies this is + // done by recording the index of the media section generated for the + // transceiver in the offer. + transceiver->internal()->set_mline_index(i); + } + } + } else if (media_type == cricket::MEDIA_TYPE_UNSUPPORTED) { + RTC_DCHECK(local_content->rejected); + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(media_type, mid, + RtpTransceiverDirection::kInactive, + /*stopped=*/true)); + } else { + RTC_CHECK_EQ(cricket::MEDIA_TYPE_DATA, media_type); + if (had_been_rejected) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForRejectedData(mid)); + } else { + RTC_CHECK(pc_->GetDataMid()); + if (mid == *(pc_->GetDataMid())) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForActiveData(mid)); + } else { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForRejectedData(mid)); + } + } + } + } + + // Next, look for transceivers that are newly added (that is, are not stopped + // and not associated). Reuse media sections marked as recyclable first, + // otherwise append to the end of the offer. New media sections should be + // added in the order they were added to the PeerConnection. + for (const auto& transceiver : transceivers()->ListInternal()) { + if (transceiver->mid() || transceiver->stopping()) { + continue; + } + size_t mline_index; + if (!recycleable_mline_indices.empty()) { + mline_index = recycleable_mline_indices.front(); + recycleable_mline_indices.pop(); + session_options->media_description_options[mline_index] = + GetMediaDescriptionOptionsForTransceiver( + transceiver, mid_generator_.GenerateString(), + /*is_create_offer=*/true); + } else { + mline_index = session_options->media_description_options.size(); + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForTransceiver( + transceiver, mid_generator_.GenerateString(), + /*is_create_offer=*/true)); + } + // See comment above for why CreateOffer changes the transceiver's state. + transceiver->set_mline_index(mline_index); + } + // Lastly, add a m-section if we have local data channels and an m section + // does not already exist. + if (!pc_->GetDataMid() && data_channel_controller()->HasDataChannels()) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForActiveData( + mid_generator_.GenerateString())); + } +} + +void SdpOfferAnswerHandler::GetOptionsForAnswer( + const RTCOfferAnswerOptions& offer_answer_options, + cricket::MediaSessionOptions* session_options) { + RTC_DCHECK_RUN_ON(signaling_thread()); + ExtractSharedMediaSessionOptions(offer_answer_options, session_options); + + if (IsUnifiedPlan()) { + GetOptionsForUnifiedPlanAnswer(offer_answer_options, session_options); + } else { + GetOptionsForPlanBAnswer(offer_answer_options, session_options); + } + + // Apply ICE renomination flag. + for (auto& options : session_options->media_description_options) { + options.transport_options.enable_ice_renomination = + pc_->configuration()->enable_ice_renomination; + } + + session_options->rtcp_cname = rtcp_cname_; + session_options->crypto_options = pc_->GetCryptoOptions(); + session_options->pooled_ice_credentials = + pc_->network_thread()->Invoke>( + RTC_FROM_HERE, + [this] { return port_allocator()->GetPooledIceCredentials(); }); +} + +void SdpOfferAnswerHandler::GetOptionsForPlanBAnswer( + const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, + cricket::MediaSessionOptions* session_options) { + // Figure out transceiver directional preferences. + bool send_audio = + !rtp_manager()->GetAudioTransceiver()->internal()->senders().empty(); + bool send_video = + !rtp_manager()->GetVideoTransceiver()->internal()->senders().empty(); + + // By default, generate sendrecv/recvonly m= sections. The direction is also + // restricted by the direction in the offer. + bool recv_audio = true; + bool recv_video = true; + + // The "offer_to_receive_X" options allow those defaults to be overridden. + if (offer_answer_options.offer_to_receive_audio != + RTCOfferAnswerOptions::kUndefined) { + recv_audio = (offer_answer_options.offer_to_receive_audio > 0); + } + if (offer_answer_options.offer_to_receive_video != + RTCOfferAnswerOptions::kUndefined) { + recv_video = (offer_answer_options.offer_to_receive_video > 0); + } + + absl::optional audio_index; + absl::optional video_index; + absl::optional data_index; + + // Generate m= sections that match those in the offer. + // Note that mediasession.cc will handle intersection our preferred + // direction with the offered direction. + GenerateMediaDescriptionOptions( + remote_description(), + RtpTransceiverDirectionFromSendRecv(send_audio, recv_audio), + RtpTransceiverDirectionFromSendRecv(send_video, recv_video), &audio_index, + &video_index, &data_index, session_options); + + cricket::MediaDescriptionOptions* audio_media_description_options = + !audio_index ? nullptr + : &session_options->media_description_options[*audio_index]; + cricket::MediaDescriptionOptions* video_media_description_options = + !video_index ? nullptr + : &session_options->media_description_options[*video_index]; + + AddPlanBRtpSenderOptions(rtp_manager()->GetSendersInternal(), + audio_media_description_options, + video_media_description_options, + offer_answer_options.num_simulcast_layers); +} + +void SdpOfferAnswerHandler::GetOptionsForUnifiedPlanAnswer( + const PeerConnectionInterface::RTCOfferAnswerOptions& offer_answer_options, + cricket::MediaSessionOptions* session_options) { + // Rules for generating an answer are dictated by JSEP sections 5.3.1 (Initial + // Answers) and 5.3.2 (Subsequent Answers). + RTC_DCHECK(remote_description()); + RTC_DCHECK(remote_description()->GetType() == SdpType::kOffer); + for (const ContentInfo& content : + remote_description()->description()->contents()) { + cricket::MediaType media_type = content.media_description()->type(); + if (media_type == cricket::MEDIA_TYPE_AUDIO || + media_type == cricket::MEDIA_TYPE_VIDEO) { + auto transceiver = transceivers()->FindByMid(content.name); + if (transceiver) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForTransceiver( + transceiver->internal(), content.name, + /*is_create_offer=*/false)); + } else { + // This should only happen with rejected transceivers. + RTC_DCHECK(content.rejected); + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(media_type, content.name, + RtpTransceiverDirection::kInactive, + /*stopped=*/true)); + } + } else if (media_type == cricket::MEDIA_TYPE_UNSUPPORTED) { + RTC_DCHECK(content.rejected); + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(media_type, content.name, + RtpTransceiverDirection::kInactive, + /*stopped=*/true)); + } else { + RTC_CHECK_EQ(cricket::MEDIA_TYPE_DATA, media_type); + // Reject all data sections if data channels are disabled. + // Reject a data section if it has already been rejected. + // Reject all data sections except for the first one. + if (content.rejected || content.name != *(pc_->GetDataMid())) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForRejectedData(content.name)); + } else { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForActiveData(content.name)); + } + } + } +} + +const char* SdpOfferAnswerHandler::SessionErrorToString( + SessionError error) const { + switch (error) { + case SessionError::kNone: + return "ERROR_NONE"; + case SessionError::kContent: + return "ERROR_CONTENT"; + case SessionError::kTransport: + return "ERROR_TRANSPORT"; + } + RTC_NOTREACHED(); + return ""; +} + +std::string SdpOfferAnswerHandler::GetSessionErrorMsg() { + RTC_DCHECK_RUN_ON(signaling_thread()); + rtc::StringBuilder desc; + desc << kSessionError << SessionErrorToString(session_error()) << ". "; + desc << kSessionErrorDesc << session_error_desc() << "."; + return desc.Release(); +} + +void SdpOfferAnswerHandler::SetSessionError(SessionError error, + const std::string& error_desc) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (error != session_error_) { + session_error_ = error; + session_error_desc_ = error_desc; + } +} + +RTCError SdpOfferAnswerHandler::HandleLegacyOfferOptions( + const PeerConnectionInterface::RTCOfferAnswerOptions& options) { + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(IsUnifiedPlan()); + + if (options.offer_to_receive_audio == 0) { + RemoveRecvDirectionFromReceivingTransceiversOfType( + cricket::MEDIA_TYPE_AUDIO); + } else if (options.offer_to_receive_audio == 1) { + AddUpToOneReceivingTransceiverOfType(cricket::MEDIA_TYPE_AUDIO); + } else if (options.offer_to_receive_audio > 1) { + LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_PARAMETER, + "offer_to_receive_audio > 1 is not supported."); + } + + if (options.offer_to_receive_video == 0) { + RemoveRecvDirectionFromReceivingTransceiversOfType( + cricket::MEDIA_TYPE_VIDEO); + } else if (options.offer_to_receive_video == 1) { + AddUpToOneReceivingTransceiverOfType(cricket::MEDIA_TYPE_VIDEO); + } else if (options.offer_to_receive_video > 1) { + LOG_AND_RETURN_ERROR(RTCErrorType::UNSUPPORTED_PARAMETER, + "offer_to_receive_video > 1 is not supported."); + } + + return RTCError::OK(); +} + +void SdpOfferAnswerHandler::RemoveRecvDirectionFromReceivingTransceiversOfType( + cricket::MediaType media_type) { + for (const auto& transceiver : GetReceivingTransceiversOfType(media_type)) { + RtpTransceiverDirection new_direction = + RtpTransceiverDirectionWithRecvSet(transceiver->direction(), false); + if (new_direction != transceiver->direction()) { + RTC_LOG(LS_INFO) << "Changing " << cricket::MediaTypeToString(media_type) + << " transceiver (MID=" + << transceiver->mid().value_or("") << ") from " + << RtpTransceiverDirectionToString( + transceiver->direction()) + << " to " + << RtpTransceiverDirectionToString(new_direction) + << " since CreateOffer specified offer_to_receive=0"; + transceiver->internal()->set_direction(new_direction); + } + } +} + +void SdpOfferAnswerHandler::AddUpToOneReceivingTransceiverOfType( + cricket::MediaType media_type) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (GetReceivingTransceiversOfType(media_type).empty()) { + RTC_LOG(LS_INFO) + << "Adding one recvonly " << cricket::MediaTypeToString(media_type) + << " transceiver since CreateOffer specified offer_to_receive=1"; + RtpTransceiverInit init; + init.direction = RtpTransceiverDirection::kRecvOnly; + pc_->AddTransceiver(media_type, nullptr, init, + /*update_negotiation_needed=*/false); + } +} + +std::vector>> +SdpOfferAnswerHandler::GetReceivingTransceiversOfType( + cricket::MediaType media_type) { + std::vector< + rtc::scoped_refptr>> + receiving_transceivers; + for (const auto& transceiver : transceivers()->List()) { + if (!transceiver->stopped() && transceiver->media_type() == media_type && + RtpTransceiverDirectionHasRecv(transceiver->direction())) { + receiving_transceivers.push_back(transceiver); + } + } + return receiving_transceivers; +} + +void SdpOfferAnswerHandler::ProcessRemovalOfRemoteTrack( + rtc::scoped_refptr> + transceiver, + std::vector>* remove_list, + std::vector>* removed_streams) { + RTC_DCHECK(transceiver->mid()); + RTC_LOG(LS_INFO) << "Processing the removal of a track for MID=" + << *transceiver->mid(); + std::vector> previous_streams = + transceiver->internal()->receiver_internal()->streams(); + // This will remove the remote track from the streams. + transceiver->internal()->receiver_internal()->set_stream_ids({}); + remove_list->push_back(transceiver); + RemoveRemoteStreamsIfEmpty(previous_streams, removed_streams); +} + +void SdpOfferAnswerHandler::RemoveRemoteStreamsIfEmpty( + const std::vector>& remote_streams, + std::vector>* removed_streams) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // TODO(https://crbug.com/webrtc/9480): When we use stream IDs instead of + // streams, see if the stream was removed by checking if this was the last + // receiver with that stream ID. + for (const auto& remote_stream : remote_streams) { + if (remote_stream->GetAudioTracks().empty() && + remote_stream->GetVideoTracks().empty()) { + remote_streams_->RemoveStream(remote_stream); + removed_streams->push_back(remote_stream); + } + } +} + +void SdpOfferAnswerHandler::RemoveSenders(cricket::MediaType media_type) { + RTC_DCHECK_RUN_ON(signaling_thread()); + UpdateLocalSenders(std::vector(), media_type); + UpdateRemoteSendersList(std::vector(), false, + media_type, nullptr); +} + +void SdpOfferAnswerHandler::UpdateLocalSenders( + const std::vector& streams, + cricket::MediaType media_type) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::UpdateLocalSenders"); + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector* current_senders = + rtp_manager()->GetLocalSenderInfos(media_type); + + // Find removed tracks. I.e., tracks where the track id, stream id or ssrc + // don't match the new StreamParam. + for (auto sender_it = current_senders->begin(); + sender_it != current_senders->end(); + /* incremented manually */) { + const RtpSenderInfo& info = *sender_it; + const cricket::StreamParams* params = + cricket::GetStreamBySsrc(streams, info.first_ssrc); + if (!params || params->id != info.sender_id || + params->first_stream_id() != info.stream_id) { + rtp_manager()->OnLocalSenderRemoved(info, media_type); + sender_it = current_senders->erase(sender_it); + } else { + ++sender_it; + } + } + + // Find new and active senders. + for (const cricket::StreamParams& params : streams) { + // The sync_label is the MediaStream label and the |stream.id| is the + // sender id. + const std::string& stream_id = params.first_stream_id(); + const std::string& sender_id = params.id; + uint32_t ssrc = params.first_ssrc(); + const RtpSenderInfo* sender_info = + rtp_manager()->FindSenderInfo(*current_senders, stream_id, sender_id); + if (!sender_info) { + current_senders->push_back(RtpSenderInfo(stream_id, sender_id, ssrc)); + rtp_manager()->OnLocalSenderAdded(current_senders->back(), media_type); + } + } +} + +void SdpOfferAnswerHandler::UpdateRemoteSendersList( + const cricket::StreamParamsVec& streams, + bool default_sender_needed, + cricket::MediaType media_type, + StreamCollection* new_streams) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::UpdateRemoteSendersList"); + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(!IsUnifiedPlan()); + + std::vector* current_senders = + rtp_manager()->GetRemoteSenderInfos(media_type); + + // Find removed senders. I.e., senders where the sender id or ssrc don't match + // the new StreamParam. + for (auto sender_it = current_senders->begin(); + sender_it != current_senders->end(); + /* incremented manually */) { + const RtpSenderInfo& info = *sender_it; + const cricket::StreamParams* params = + cricket::GetStreamBySsrc(streams, info.first_ssrc); + std::string params_stream_id; + if (params) { + params_stream_id = + (!params->first_stream_id().empty() ? params->first_stream_id() + : kDefaultStreamId); + } + bool sender_exists = params && params->id == info.sender_id && + params_stream_id == info.stream_id; + // If this is a default track, and we still need it, don't remove it. + if ((info.stream_id == kDefaultStreamId && default_sender_needed) || + sender_exists) { + ++sender_it; + } else { + rtp_manager()->OnRemoteSenderRemoved( + info, remote_streams_->find(info.stream_id), media_type); + sender_it = current_senders->erase(sender_it); + } + } + + // Find new and active senders. + for (const cricket::StreamParams& params : streams) { + if (!params.has_ssrcs()) { + // The remote endpoint has streams, but didn't signal ssrcs. For an active + // sender, this means it is coming from a Unified Plan endpoint,so we just + // create a default. + default_sender_needed = true; + break; + } + + // |params.id| is the sender id and the stream id uses the first of + // |params.stream_ids|. The remote description could come from a Unified + // Plan endpoint, with multiple or no stream_ids() signaled. Since this is + // not supported in Plan B, we just take the first here and create the + // default stream ID if none is specified. + const std::string& stream_id = + (!params.first_stream_id().empty() ? params.first_stream_id() + : kDefaultStreamId); + const std::string& sender_id = params.id; + uint32_t ssrc = params.first_ssrc(); + + rtc::scoped_refptr stream = + remote_streams_->find(stream_id); + if (!stream) { + // This is a new MediaStream. Create a new remote MediaStream. + stream = MediaStreamProxy::Create(rtc::Thread::Current(), + MediaStream::Create(stream_id)); + remote_streams_->AddStream(stream); + new_streams->AddStream(stream); + } + + const RtpSenderInfo* sender_info = + rtp_manager()->FindSenderInfo(*current_senders, stream_id, sender_id); + if (!sender_info) { + current_senders->push_back(RtpSenderInfo(stream_id, sender_id, ssrc)); + rtp_manager()->OnRemoteSenderAdded(current_senders->back(), stream, + media_type); + } + } + + // Add default sender if necessary. + if (default_sender_needed) { + rtc::scoped_refptr default_stream = + remote_streams_->find(kDefaultStreamId); + if (!default_stream) { + // Create the new default MediaStream. + default_stream = MediaStreamProxy::Create( + rtc::Thread::Current(), MediaStream::Create(kDefaultStreamId)); + remote_streams_->AddStream(default_stream); + new_streams->AddStream(default_stream); + } + std::string default_sender_id = (media_type == cricket::MEDIA_TYPE_AUDIO) + ? kDefaultAudioSenderId + : kDefaultVideoSenderId; + const RtpSenderInfo* default_sender_info = rtp_manager()->FindSenderInfo( + *current_senders, kDefaultStreamId, default_sender_id); + if (!default_sender_info) { + current_senders->push_back( + RtpSenderInfo(kDefaultStreamId, default_sender_id, /*ssrc=*/0)); + rtp_manager()->OnRemoteSenderAdded(current_senders->back(), + default_stream, media_type); + } + } +} + +void SdpOfferAnswerHandler::EnableSending() { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::EnableSending"); + RTC_DCHECK_RUN_ON(signaling_thread()); + for (const auto& transceiver : transceivers()->ListInternal()) { + cricket::ChannelInterface* channel = transceiver->channel(); + if (channel) { + channel->Enable(true); + } + } +} + +RTCError SdpOfferAnswerHandler::PushdownMediaDescription( + SdpType type, + cricket::ContentSource source, + const std::map& + bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::PushdownMediaDescription"); + const SessionDescriptionInterface* sdesc = + (source == cricket::CS_LOCAL ? local_description() + : remote_description()); + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(sdesc); + + if (!UpdatePayloadTypeDemuxingState(source, bundle_groups_by_mid)) { + // Note that this is never expected to fail, since RtpDemuxer doesn't return + // an error when changing payload type demux criteria, which is all this + // does. + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Failed to update payload type demuxing state."); + } + + // Push down the new SDP media section for each audio/video transceiver. + auto rtp_transceivers = transceivers()->ListInternal(); + std::vector< + std::pair> + channels; + for (const auto& transceiver : rtp_transceivers) { + const ContentInfo* content_info = + FindMediaSectionForTransceiver(transceiver, sdesc); + cricket::ChannelInterface* channel = transceiver->channel(); + if (!channel || !content_info || content_info->rejected) { + continue; + } + const MediaContentDescription* content_desc = + content_info->media_description(); + if (!content_desc) { + continue; + } + + transceiver->OnNegotiationUpdate(type, content_desc); + channels.push_back(std::make_pair(channel, content_desc)); + } + + // This for-loop of invokes helps audio impairment during re-negotiations. + // One of the causes is that downstairs decoder creation is synchronous at the + // moment, and that a decoder is created for each codec listed in the SDP. + // + // TODO(bugs.webrtc.org/12840): consider merging the invokes again after + // these projects have shipped: + // - bugs.webrtc.org/12462 + // - crbug.com/1157227 + // - crbug.com/1187289 + for (const auto& entry : channels) { + RTCError error = + pc_->worker_thread()->Invoke(RTC_FROM_HERE, [&]() { + std::string error; + bool success = + (source == cricket::CS_LOCAL) + ? entry.first->SetLocalContent(entry.second, type, &error) + : entry.first->SetRemoteContent(entry.second, type, &error); + if (!success) { + LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, error); + } + return RTCError::OK(); + }); + if (!error.ok()) { + return error; + } + } + + // Need complete offer/answer with an SCTP m= section before starting SCTP, + // according to https://tools.ietf.org/html/draft-ietf-mmusic-sctp-sdp-19 + if (pc_->sctp_mid() && local_description() && remote_description()) { + auto local_sctp_description = cricket::GetFirstSctpDataContentDescription( + local_description()->description()); + auto remote_sctp_description = cricket::GetFirstSctpDataContentDescription( + remote_description()->description()); + if (local_sctp_description && remote_sctp_description) { + int max_message_size; + // A remote max message size of zero means "any size supported". + // We configure the connection with our own max message size. + if (remote_sctp_description->max_message_size() == 0) { + max_message_size = local_sctp_description->max_message_size(); + } else { + max_message_size = + std::min(local_sctp_description->max_message_size(), + remote_sctp_description->max_message_size()); + } + pc_->StartSctpTransport(local_sctp_description->port(), + remote_sctp_description->port(), + max_message_size); + } + } + + return RTCError::OK(); +} + +RTCError SdpOfferAnswerHandler::PushdownTransportDescription( + cricket::ContentSource source, + SdpType type) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::PushdownTransportDescription"); + RTC_DCHECK_RUN_ON(signaling_thread()); + + if (source == cricket::CS_LOCAL) { + const SessionDescriptionInterface* sdesc = local_description(); + RTC_DCHECK(sdesc); + return transport_controller()->SetLocalDescription(type, + sdesc->description()); + } else { + const SessionDescriptionInterface* sdesc = remote_description(); + RTC_DCHECK(sdesc); + return transport_controller()->SetRemoteDescription(type, + sdesc->description()); + } +} + +void SdpOfferAnswerHandler::RemoveStoppedTransceivers() { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::RemoveStoppedTransceivers"); + RTC_DCHECK_RUN_ON(signaling_thread()); + // 3.2.10.1: For each transceiver in the connection's set of transceivers + // run the following steps: + if (!IsUnifiedPlan()) + return; + // Traverse a copy of the transceiver list. + auto transceiver_list = transceivers()->List(); + for (auto transceiver : transceiver_list) { + // 3.2.10.1.1: If transceiver is stopped, associated with an m= section + // and the associated m= section is rejected in + // connection.[[CurrentLocalDescription]] or + // connection.[[CurrentRemoteDescription]], remove the + // transceiver from the connection's set of transceivers. + if (!transceiver->stopped()) { + continue; + } + const ContentInfo* local_content = FindMediaSectionForTransceiver( + transceiver->internal(), local_description()); + const ContentInfo* remote_content = FindMediaSectionForTransceiver( + transceiver->internal(), remote_description()); + if ((local_content && local_content->rejected) || + (remote_content && remote_content->rejected)) { + RTC_LOG(LS_INFO) << "Dissociating transceiver" + " since the media section is being recycled."; + transceiver->internal()->set_mid(absl::nullopt); + transceiver->internal()->set_mline_index(absl::nullopt); + } else if (!local_content && !remote_content) { + // TODO(bugs.webrtc.org/11973): Consider if this should be removed already + // See https://github.com/w3c/webrtc-pc/issues/2576 + RTC_LOG(LS_INFO) + << "Dropping stopped transceiver that was never associated"; + } + transceivers()->Remove(transceiver); + } +} + +void SdpOfferAnswerHandler::RemoveUnusedChannels( + const SessionDescription* desc) { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Destroy video channel first since it may have a pointer to the + // voice channel. + const cricket::ContentInfo* video_info = cricket::GetFirstVideoContent(desc); + if (!video_info || video_info->rejected) { + DestroyTransceiverChannel(rtp_manager()->GetVideoTransceiver()); + } + + const cricket::ContentInfo* audio_info = cricket::GetFirstAudioContent(desc); + if (!audio_info || audio_info->rejected) { + DestroyTransceiverChannel(rtp_manager()->GetAudioTransceiver()); + } + + const cricket::ContentInfo* data_info = cricket::GetFirstDataContent(desc); + if (!data_info) { + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, + "No data channel section in the description."); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + DestroyDataChannelTransport(error); + } else if (data_info->rejected) { + rtc::StringBuilder sb; + sb << "Rejected data channel with mid=" << data_info->name << "."; + + RTCError error(RTCErrorType::OPERATION_ERROR_WITH_DATA, sb.Release()); + error.set_error_detail(RTCErrorDetailType::DATA_CHANNEL_FAILURE); + DestroyDataChannelTransport(error); + } +} + +void SdpOfferAnswerHandler::ReportNegotiatedSdpSemantics( + const SessionDescriptionInterface& answer) { + SdpSemanticNegotiated semantics_negotiated; + switch (answer.description()->msid_signaling()) { + case 0: + semantics_negotiated = kSdpSemanticNegotiatedNone; + break; + case cricket::kMsidSignalingMediaSection: + semantics_negotiated = kSdpSemanticNegotiatedUnifiedPlan; + break; + case cricket::kMsidSignalingSsrcAttribute: + semantics_negotiated = kSdpSemanticNegotiatedPlanB; + break; + case cricket::kMsidSignalingMediaSection | + cricket::kMsidSignalingSsrcAttribute: + semantics_negotiated = kSdpSemanticNegotiatedMixed; + break; + default: + RTC_NOTREACHED(); + } + RTC_HISTOGRAM_ENUMERATION("WebRTC.PeerConnection.SdpSemanticNegotiated", + semantics_negotiated, kSdpSemanticNegotiatedMax); +} + +void SdpOfferAnswerHandler::UpdateEndedRemoteMediaStreams() { + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector> streams_to_remove; + for (size_t i = 0; i < remote_streams_->count(); ++i) { + MediaStreamInterface* stream = remote_streams_->at(i); + if (stream->GetAudioTracks().empty() && stream->GetVideoTracks().empty()) { + streams_to_remove.push_back(stream); + } + } + + for (auto& stream : streams_to_remove) { + remote_streams_->RemoveStream(stream); + pc_->Observer()->OnRemoveStream(std::move(stream)); + } +} + +bool SdpOfferAnswerHandler::UseCandidatesInSessionDescription( + const SessionDescriptionInterface* remote_desc) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!remote_desc) { + return true; + } + bool ret = true; + + for (size_t m = 0; m < remote_desc->number_of_mediasections(); ++m) { + const IceCandidateCollection* candidates = remote_desc->candidates(m); + for (size_t n = 0; n < candidates->count(); ++n) { + const IceCandidateInterface* candidate = candidates->at(n); + bool valid = false; + if (!ReadyToUseRemoteCandidate(candidate, remote_desc, &valid)) { + if (valid) { + RTC_LOG(LS_INFO) + << "UseCandidatesInSessionDescription: Not ready to use " + "candidate."; + } + continue; + } + ret = UseCandidate(candidate); + if (!ret) { + break; + } + } + } + return ret; +} + +bool SdpOfferAnswerHandler::UseCandidate( + const IceCandidateInterface* candidate) { + RTC_DCHECK_RUN_ON(signaling_thread()); + + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + + RTCErrorOr result = + FindContentInfo(remote_description(), candidate); + if (!result.ok()) + return false; + + const cricket::Candidate& c = candidate->candidate(); + RTCError error = cricket::VerifyCandidate(c); + if (!error.ok()) { + RTC_LOG(LS_WARNING) << "Invalid candidate: " << c.ToString(); + return true; + } + + pc_->AddRemoteCandidate(result.value()->name, c); + + return true; +} + +// We need to check the local/remote description for the Transport instead of +// the session, because a new Transport added during renegotiation may have +// them unset while the session has them set from the previous negotiation. +// Not doing so may trigger the auto generation of transport description and +// mess up DTLS identity information, ICE credential, etc. +bool SdpOfferAnswerHandler::ReadyToUseRemoteCandidate( + const IceCandidateInterface* candidate, + const SessionDescriptionInterface* remote_desc, + bool* valid) { + RTC_DCHECK_RUN_ON(signaling_thread()); + *valid = true; + + const SessionDescriptionInterface* current_remote_desc = + remote_desc ? remote_desc : remote_description(); + + if (!current_remote_desc) { + return false; + } + + RTCErrorOr result = + FindContentInfo(current_remote_desc, candidate); + if (!result.ok()) { + RTC_LOG(LS_ERROR) << "ReadyToUseRemoteCandidate: Invalid candidate. " + << result.error().message(); + + *valid = false; + return false; + } + + return true; +} + +RTCErrorOr SdpOfferAnswerHandler::FindContentInfo( + const SessionDescriptionInterface* description, + const IceCandidateInterface* candidate) { + if (!candidate->sdp_mid().empty()) { + auto& contents = description->description()->contents(); + auto it = absl::c_find_if( + contents, [candidate](const cricket::ContentInfo& content_info) { + return content_info.mid() == candidate->sdp_mid(); + }); + if (it == contents.end()) { + return RTCError( + RTCErrorType::INVALID_PARAMETER, + "Mid " + candidate->sdp_mid() + + " specified but no media section with that mid found."); + } else { + return &*it; + } + } else if (candidate->sdp_mline_index() >= 0) { + size_t mediacontent_index = + static_cast(candidate->sdp_mline_index()); + size_t content_size = description->description()->contents().size(); + if (mediacontent_index < content_size) { + return &description->description()->contents()[mediacontent_index]; + } else { + return RTCError(RTCErrorType::INVALID_RANGE, + "Media line index (" + + rtc::ToString(candidate->sdp_mline_index()) + + ") out of range (number of mlines: " + + rtc::ToString(content_size) + ")."); + } + } + + return RTCError(RTCErrorType::INVALID_PARAMETER, + "Neither sdp_mline_index nor sdp_mid specified."); +} + +RTCError SdpOfferAnswerHandler::CreateChannels(const SessionDescription& desc) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateChannels"); + // Creating the media channels. Transports should already have been created + // at this point. + RTC_DCHECK_RUN_ON(signaling_thread()); + const cricket::ContentInfo* voice = cricket::GetFirstAudioContent(&desc); + if (voice && !voice->rejected && + !rtp_manager()->GetAudioTransceiver()->internal()->channel()) { + cricket::VoiceChannel* voice_channel = CreateVoiceChannel(voice->name); + if (!voice_channel) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Failed to create voice channel."); + } + rtp_manager()->GetAudioTransceiver()->internal()->SetChannel(voice_channel); + } + + const cricket::ContentInfo* video = cricket::GetFirstVideoContent(&desc); + if (video && !video->rejected && + !rtp_manager()->GetVideoTransceiver()->internal()->channel()) { + cricket::VideoChannel* video_channel = CreateVideoChannel(video->name); + if (!video_channel) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Failed to create video channel."); + } + rtp_manager()->GetVideoTransceiver()->internal()->SetChannel(video_channel); + } + + const cricket::ContentInfo* data = cricket::GetFirstDataContent(&desc); + if (data && !data->rejected && + !data_channel_controller()->data_channel_transport()) { + if (!CreateDataChannel(data->name)) { + LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, + "Failed to create data channel."); + } + } + + return RTCError::OK(); +} + +// TODO(steveanton): Perhaps this should be managed by the RtpTransceiver. +cricket::VoiceChannel* SdpOfferAnswerHandler::CreateVoiceChannel( + const std::string& mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateVoiceChannel"); + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!channel_manager()->media_engine()) + return nullptr; + + RtpTransportInternal* rtp_transport = pc_->GetRtpTransport(mid); + + // TODO(bugs.webrtc.org/11992): CreateVoiceChannel internally switches to the + // worker thread. We shouldn't be using the |call_ptr_| hack here but simply + // be on the worker thread and use |call_| (update upstream code). + return channel_manager()->CreateVoiceChannel( + pc_->call_ptr(), pc_->configuration()->media_config, rtp_transport, + signaling_thread(), mid, pc_->SrtpRequired(), pc_->GetCryptoOptions(), + &ssrc_generator_, audio_options()); +} + +// TODO(steveanton): Perhaps this should be managed by the RtpTransceiver. +cricket::VideoChannel* SdpOfferAnswerHandler::CreateVideoChannel( + const std::string& mid) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::CreateVideoChannel"); + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!channel_manager()->media_engine()) + return nullptr; + + // NOTE: This involves a non-ideal hop (Invoke) over to the network thread. + RtpTransportInternal* rtp_transport = pc_->GetRtpTransport(mid); + + // TODO(bugs.webrtc.org/11992): CreateVideoChannel internally switches to the + // worker thread. We shouldn't be using the |call_ptr_| hack here but simply + // be on the worker thread and use |call_| (update upstream code). + return channel_manager()->CreateVideoChannel( + pc_->call_ptr(), pc_->configuration()->media_config, rtp_transport, + signaling_thread(), mid, pc_->SrtpRequired(), pc_->GetCryptoOptions(), + &ssrc_generator_, video_options(), + video_bitrate_allocator_factory_.get()); +} + +bool SdpOfferAnswerHandler::CreateDataChannel(const std::string& mid) { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!pc_->network_thread()->Invoke(RTC_FROM_HERE, [this, &mid] { + RTC_DCHECK_RUN_ON(pc_->network_thread()); + return pc_->SetupDataChannelTransport_n(mid); + })) { + return false; + } + // TODO(tommi): Is this necessary? SetupDataChannelTransport_n() above + // will have queued up updating the transport name on the signaling thread + // and could update the mid at the same time. This here is synchronous + // though, but it changes the state of PeerConnection and makes it be + // out of sync (transport name not set while the mid is set). + pc_->SetSctpDataMid(mid); + return true; +} + +void SdpOfferAnswerHandler::DestroyTransceiverChannel( + rtc::scoped_refptr> + transceiver) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DestroyTransceiverChannel"); + RTC_DCHECK(transceiver); + RTC_LOG_THREAD_BLOCK_COUNT(); + + // TODO(tommi): We're currently on the signaling thread. + // There are multiple hops to the worker ahead. + // Consider if we can make the call to SetChannel() on the worker thread + // (and require that to be the context it's always called in) and also + // call DestroyChannelInterface there, since it also needs to hop to the + // worker. + + cricket::ChannelInterface* channel = transceiver->internal()->channel(); + RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(0); + if (channel) { + // TODO(tommi): VideoRtpReceiver::SetMediaChannel blocks and jumps to the + // worker thread. When being set to nullptr, there are additional + // blocking calls to e.g. ClearRecordableEncodedFrameCallback which triggers + // another blocking call or Stop() for video channels. + // The channel object also needs to be de-initialized on the network thread + // so if ownership of the channel object lies with the transceiver, we could + // un-set the channel pointer and uninitialize/destruct the channel object + // at the same time, rather than in separate steps. + transceiver->internal()->SetChannel(nullptr); + // TODO(tommi): All channel objects end up getting deleted on the + // worker thread (ideally should be on the network thread but the + // MediaChannel objects are tied to the worker. Can the teardown be done + // asynchronously across the threads rather than blocking? + DestroyChannelInterface(channel); + } +} + +void SdpOfferAnswerHandler::DestroyDataChannelTransport(RTCError error) { + RTC_DCHECK_RUN_ON(signaling_thread()); + const bool has_sctp = pc_->sctp_mid().has_value(); + + if (has_sctp) + data_channel_controller()->OnTransportChannelClosed(error); + + pc_->network_thread()->Invoke(RTC_FROM_HERE, [this] { + RTC_DCHECK_RUN_ON(pc_->network_thread()); + pc_->TeardownDataChannelTransport_n(); + }); + + if (has_sctp) + pc_->ResetSctpDataMid(); +} + +void SdpOfferAnswerHandler::DestroyChannelInterface( + cricket::ChannelInterface* channel) { + TRACE_EVENT0("webrtc", "SdpOfferAnswerHandler::DestroyChannelInterface"); + RTC_DCHECK_RUN_ON(signaling_thread()); + RTC_DCHECK(channel_manager()->media_engine()); + RTC_DCHECK(channel); + + // TODO(bugs.webrtc.org/11992): All the below methods should be called on the + // worker thread. (they switch internally anyway). Change + // DestroyChannelInterface to either be called on the worker thread, or do + // this asynchronously on the worker. + RTC_LOG_THREAD_BLOCK_COUNT(); + + switch (channel->media_type()) { + case cricket::MEDIA_TYPE_AUDIO: + channel_manager()->DestroyVoiceChannel( + static_cast(channel)); + break; + case cricket::MEDIA_TYPE_VIDEO: + channel_manager()->DestroyVideoChannel( + static_cast(channel)); + break; + case cricket::MEDIA_TYPE_DATA: + RTC_NOTREACHED() + << "Trying to destroy datachannel through DestroyChannelInterface"; + break; + default: + RTC_NOTREACHED() << "Unknown media type: " << channel->media_type(); + break; + } + + // TODO(tommi): Figure out why we can get 2 blocking calls when running + // PeerConnectionCryptoTest.CreateAnswerWithDifferentSslRoles. + // and 3 when running + // PeerConnectionCryptoTest.CreateAnswerWithDifferentSslRoles + // RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(1); +} + +void SdpOfferAnswerHandler::DestroyAllChannels() { + RTC_DCHECK_RUN_ON(signaling_thread()); + if (!transceivers()) { + return; + } + + RTC_LOG_THREAD_BLOCK_COUNT(); + + // Destroy video channels first since they may have a pointer to a voice + // channel. + auto list = transceivers()->List(); + RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(0); + + for (const auto& transceiver : list) { + if (transceiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { + DestroyTransceiverChannel(transceiver); + } + } + for (const auto& transceiver : list) { + if (transceiver->media_type() == cricket::MEDIA_TYPE_AUDIO) { + DestroyTransceiverChannel(transceiver); + } + } + + DestroyDataChannelTransport({}); +} + +void SdpOfferAnswerHandler::GenerateMediaDescriptionOptions( + const SessionDescriptionInterface* session_desc, + RtpTransceiverDirection audio_direction, + RtpTransceiverDirection video_direction, + absl::optional* audio_index, + absl::optional* video_index, + absl::optional* data_index, + cricket::MediaSessionOptions* session_options) { + RTC_DCHECK_RUN_ON(signaling_thread()); + for (const cricket::ContentInfo& content : + session_desc->description()->contents()) { + if (IsAudioContent(&content)) { + // If we already have an audio m= section, reject this extra one. + if (*audio_index) { + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions( + cricket::MEDIA_TYPE_AUDIO, content.name, + RtpTransceiverDirection::kInactive, /*stopped=*/true)); + } else { + bool stopped = (audio_direction == RtpTransceiverDirection::kInactive); + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(cricket::MEDIA_TYPE_AUDIO, + content.name, audio_direction, + stopped)); + *audio_index = session_options->media_description_options.size() - 1; + } + session_options->media_description_options.back().header_extensions = + channel_manager()->GetSupportedAudioRtpHeaderExtensions(); + } else if (IsVideoContent(&content)) { + // If we already have an video m= section, reject this extra one. + if (*video_index) { + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions( + cricket::MEDIA_TYPE_VIDEO, content.name, + RtpTransceiverDirection::kInactive, /*stopped=*/true)); + } else { + bool stopped = (video_direction == RtpTransceiverDirection::kInactive); + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(cricket::MEDIA_TYPE_VIDEO, + content.name, video_direction, + stopped)); + *video_index = session_options->media_description_options.size() - 1; + } + session_options->media_description_options.back().header_extensions = + channel_manager()->GetSupportedVideoRtpHeaderExtensions(); + } else if (IsUnsupportedContent(&content)) { + session_options->media_description_options.push_back( + cricket::MediaDescriptionOptions(cricket::MEDIA_TYPE_UNSUPPORTED, + content.name, + RtpTransceiverDirection::kInactive, + /*stopped=*/true)); + } else { + RTC_DCHECK(IsDataContent(&content)); + // If we already have an data m= section, reject this extra one. + if (*data_index) { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForRejectedData(content.name)); + } else { + session_options->media_description_options.push_back( + GetMediaDescriptionOptionsForActiveData(content.name)); + *data_index = session_options->media_description_options.size() - 1; + } + } + } +} + +cricket::MediaDescriptionOptions +SdpOfferAnswerHandler::GetMediaDescriptionOptionsForActiveData( + const std::string& mid) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + // Direction for data sections is meaningless, but legacy endpoints might + // expect sendrecv. + cricket::MediaDescriptionOptions options(cricket::MEDIA_TYPE_DATA, mid, + RtpTransceiverDirection::kSendRecv, + /*stopped=*/false); + return options; +} + +cricket::MediaDescriptionOptions +SdpOfferAnswerHandler::GetMediaDescriptionOptionsForRejectedData( + const std::string& mid) const { + RTC_DCHECK_RUN_ON(signaling_thread()); + cricket::MediaDescriptionOptions options(cricket::MEDIA_TYPE_DATA, mid, + RtpTransceiverDirection::kInactive, + /*stopped=*/true); + return options; +} + +bool SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState( + cricket::ContentSource source, + const std::map& + bundle_groups_by_mid) { + TRACE_EVENT0("webrtc", + "SdpOfferAnswerHandler::UpdatePayloadTypeDemuxingState"); + RTC_DCHECK_RUN_ON(signaling_thread()); + // We may need to delete any created default streams and disable creation of + // new ones on the basis of payload type. This is needed to avoid SSRC + // collisions in Call's RtpDemuxer, in the case that a transceiver has + // created a default stream, and then some other channel gets the SSRC + // signaled in the corresponding Unified Plan "m=" section. Specifically, we + // need to disable payload type based demuxing when two bundled "m=" sections + // are using the same payload type(s). For more context + // see https://bugs.chromium.org/p/webrtc/issues/detail?id=11477 + const SessionDescriptionInterface* sdesc = + (source == cricket::CS_LOCAL ? local_description() + : remote_description()); + struct PayloadTypes { + std::set audio_payload_types; + std::set video_payload_types; + bool pt_demuxing_possible_audio = true; + bool pt_demuxing_possible_video = true; + }; + std::map payload_types_by_bundle; + // If the MID is missing from *any* receiving m= section, this is set to true. + bool mid_header_extension_missing_audio = false; + bool mid_header_extension_missing_video = false; + for (auto& content_info : sdesc->description()->contents()) { + auto it = bundle_groups_by_mid.find(content_info.name); + const cricket::ContentGroup* bundle_group = + it != bundle_groups_by_mid.end() ? it->second : nullptr; + // If this m= section isn't bundled, it's safe to demux by payload type + // since other m= sections using the same payload type will also be using + // different transports. + if (!bundle_group) { + continue; + } + PayloadTypes* payload_types = &payload_types_by_bundle[bundle_group]; + if (content_info.rejected || + (source == cricket::ContentSource::CS_LOCAL && + !RtpTransceiverDirectionHasRecv( + content_info.media_description()->direction())) || + (source == cricket::ContentSource::CS_REMOTE && + !RtpTransceiverDirectionHasSend( + content_info.media_description()->direction()))) { + // Ignore transceivers that are not receiving. + continue; + } + switch (content_info.media_description()->type()) { + case cricket::MediaType::MEDIA_TYPE_AUDIO: { + if (!mid_header_extension_missing_audio) { + mid_header_extension_missing_audio = + !ContentHasHeaderExtension(content_info, RtpExtension::kMidUri); + } + const cricket::AudioContentDescription* audio_desc = + content_info.media_description()->as_audio(); + for (const cricket::AudioCodec& audio : audio_desc->codecs()) { + if (payload_types->audio_payload_types.count(audio.id)) { + // Two m= sections are using the same payload type, thus demuxing + // by payload type is not possible. + payload_types->pt_demuxing_possible_audio = false; + } + payload_types->audio_payload_types.insert(audio.id); + } + break; + } + case cricket::MediaType::MEDIA_TYPE_VIDEO: { + if (!mid_header_extension_missing_video) { + mid_header_extension_missing_video = + !ContentHasHeaderExtension(content_info, RtpExtension::kMidUri); + } + const cricket::VideoContentDescription* video_desc = + content_info.media_description()->as_video(); + for (const cricket::VideoCodec& video : video_desc->codecs()) { + if (payload_types->video_payload_types.count(video.id)) { + // Two m= sections are using the same payload type, thus demuxing + // by payload type is not possible. + payload_types->pt_demuxing_possible_video = false; + } + payload_types->video_payload_types.insert(video.id); + } + break; + } + default: + // Ignore data channels. + continue; + } + } + + // Gather all updates ahead of time so that all channels can be updated in a + // single Invoke; necessary due to thread guards. + std::vector> + channels_to_update; + for (const auto& transceiver : transceivers()->ListInternal()) { + cricket::ChannelInterface* channel = transceiver->channel(); + const ContentInfo* content = + FindMediaSectionForTransceiver(transceiver, sdesc); + if (!channel || !content) { + continue; + } + RtpTransceiverDirection local_direction = + content->media_description()->direction(); + if (source == cricket::CS_REMOTE) { + local_direction = RtpTransceiverDirectionReversed(local_direction); + } + channels_to_update.emplace_back(local_direction, transceiver->channel()); + } + + if (channels_to_update.empty()) { + return true; + } + + // In Unified Plan, payload type demuxing is useful for legacy endpoints that + // don't support the MID header extension, but it can also cause incorrrect + // forwarding of packets when going from one m= section to multiple m= + // sections in the same BUNDLE. This only happens if media arrives prior to + // negotiation, but this can cause missing video and unsignalled ssrc bugs + // severe enough to warrant disabling PT demuxing in such cases. Therefore, if + // a MID header extension is present on all m= sections for a given kind + // (audio/video) then we use that as an OK to disable payload type demuxing in + // BUNDLEs of that kind. However if PT demuxing was ever turned on (e.g. MID + // was ever removed on ANY m= section of that kind) then we continue to allow + // PT demuxing in order to prevent disabling it in follow-up O/A exchanges and + // allowing early media by PT. + bool bundled_pt_demux_allowed_audio = !IsUnifiedPlan() || + mid_header_extension_missing_audio || + pt_demuxing_has_been_used_audio_; + bool bundled_pt_demux_allowed_video = !IsUnifiedPlan() || + mid_header_extension_missing_video || + pt_demuxing_has_been_used_video_; + // Kill switch for the above change. + if (field_trial::IsEnabled(kAlwaysAllowPayloadTypeDemuxingFieldTrialName)) { + // TODO(https://crbug.com/webrtc/12814): If disabling PT-based demux does + // not trigger regressions, remove this kill switch. + bundled_pt_demux_allowed_audio = true; + bundled_pt_demux_allowed_video = true; + } + + return pc_->worker_thread()->Invoke( + RTC_FROM_HERE, + [&channels_to_update, &bundle_groups_by_mid, &payload_types_by_bundle, + bundled_pt_demux_allowed_audio, bundled_pt_demux_allowed_video, + pt_demuxing_has_been_used_audio = &pt_demuxing_has_been_used_audio_, + pt_demuxing_has_been_used_video = &pt_demuxing_has_been_used_video_]() { + for (const auto& it : channels_to_update) { + RtpTransceiverDirection local_direction = it.first; + cricket::ChannelInterface* channel = it.second; + cricket::MediaType media_type = channel->media_type(); + auto bundle_it = bundle_groups_by_mid.find(channel->content_name()); + const cricket::ContentGroup* bundle_group = + bundle_it != bundle_groups_by_mid.end() ? bundle_it->second + : nullptr; + if (media_type == cricket::MediaType::MEDIA_TYPE_AUDIO) { + bool pt_demux_enabled = + RtpTransceiverDirectionHasRecv(local_direction) && + (!bundle_group || (bundled_pt_demux_allowed_audio && + payload_types_by_bundle[bundle_group] + .pt_demuxing_possible_audio)); + if (pt_demux_enabled) { + *pt_demuxing_has_been_used_audio = true; + } + if (!channel->SetPayloadTypeDemuxingEnabled(pt_demux_enabled)) { + return false; + } + } else if (media_type == cricket::MediaType::MEDIA_TYPE_VIDEO) { + bool pt_demux_enabled = + RtpTransceiverDirectionHasRecv(local_direction) && + (!bundle_group || (bundled_pt_demux_allowed_video && + payload_types_by_bundle[bundle_group] + .pt_demuxing_possible_video)); + if (pt_demux_enabled) { + *pt_demuxing_has_been_used_video = true; + } + if (!channel->SetPayloadTypeDemuxingEnabled(pt_demux_enabled)) { + return false; + } + } + } + return true; + }); +} + +} // namespace webrtc diff --git a/pc/sdp_offer_answer.h b/pc/sdp_offer_answer.h new file mode 100644 index 0000000000..f86b900b91 --- /dev/null +++ b/pc/sdp_offer_answer.h @@ -0,0 +1,685 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_SDP_OFFER_ANSWER_H_ +#define PC_SDP_OFFER_ANSWER_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/audio_options.h" +#include "api/candidate.h" +#include "api/jsep.h" +#include "api/jsep_ice_candidate.h" +#include "api/media_stream_interface.h" +#include "api/media_types.h" +#include "api/peer_connection_interface.h" +#include "api/rtc_error.h" +#include "api/rtp_transceiver_direction.h" +#include "api/rtp_transceiver_interface.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/set_local_description_observer_interface.h" +#include "api/set_remote_description_observer_interface.h" +#include "api/transport/data_channel_transport_interface.h" +#include "api/turn_customizer.h" +#include "api/uma_metrics.h" +#include "api/video/video_bitrate_allocator_factory.h" +#include "media/base/media_channel.h" +#include "media/base/stream_params.h" +#include "p2p/base/port_allocator.h" +#include "pc/channel.h" +#include "pc/channel_interface.h" +#include "pc/channel_manager.h" +#include "pc/data_channel_controller.h" +#include "pc/ice_server_parsing.h" +#include "pc/jsep_transport_controller.h" +#include "pc/media_session.h" +#include "pc/media_stream_observer.h" +#include "pc/peer_connection_factory.h" +#include "pc/peer_connection_internal.h" +#include "pc/rtc_stats_collector.h" +#include "pc/rtp_receiver.h" +#include "pc/rtp_sender.h" +#include "pc/rtp_transceiver.h" +#include "pc/rtp_transmission_manager.h" +#include "pc/sctp_transport.h" +#include "pc/sdp_state_provider.h" +#include "pc/session_description.h" +#include "pc/stats_collector.h" +#include "pc/stream_collection.h" +#include "pc/transceiver_list.h" +#include "pc/webrtc_session_description_factory.h" +#include "rtc_base/checks.h" +#include "rtc_base/experiments/field_trial_parser.h" +#include "rtc_base/operations_chain.h" +#include "rtc_base/race_checker.h" +#include "rtc_base/rtc_certificate.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/unique_id_generator.h" +#include "rtc_base/weak_ptr.h" + +namespace webrtc { + +// SdpOfferAnswerHandler is a component +// of the PeerConnection object as defined +// by the PeerConnectionInterface API surface. +// The class is responsible for the following: +// - Parsing and interpreting SDP. +// - Generating offers and answers based on the current state. +// This class lives on the signaling thread. +class SdpOfferAnswerHandler : public SdpStateProvider, + public sigslot::has_slots<> { + public: + ~SdpOfferAnswerHandler(); + + // Creates an SdpOfferAnswerHandler. Modifies dependencies. + static std::unique_ptr Create( + PeerConnection* pc, + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies& dependencies); + + void ResetSessionDescFactory() { + RTC_DCHECK_RUN_ON(signaling_thread()); + webrtc_session_desc_factory_.reset(); + } + const WebRtcSessionDescriptionFactory* webrtc_session_desc_factory() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return webrtc_session_desc_factory_.get(); + } + + // Change signaling state to Closed, and perform appropriate actions. + void Close(); + + // Called as part of destroying the owning PeerConnection. + void PrepareForShutdown(); + + // Implementation of SdpStateProvider + PeerConnectionInterface::SignalingState signaling_state() const override; + + const SessionDescriptionInterface* local_description() const override; + const SessionDescriptionInterface* remote_description() const override; + const SessionDescriptionInterface* current_local_description() const override; + const SessionDescriptionInterface* current_remote_description() + const override; + const SessionDescriptionInterface* pending_local_description() const override; + const SessionDescriptionInterface* pending_remote_description() + const override; + + bool NeedsIceRestart(const std::string& content_name) const override; + bool IceRestartPending(const std::string& content_name) const override; + absl::optional GetDtlsRole( + const std::string& mid) const override; + + void RestartIce(); + + // JSEP01 + void CreateOffer( + CreateSessionDescriptionObserver* observer, + const PeerConnectionInterface::RTCOfferAnswerOptions& options); + void CreateAnswer( + CreateSessionDescriptionObserver* observer, + const PeerConnectionInterface::RTCOfferAnswerOptions& options); + + void SetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer); + void SetLocalDescription( + rtc::scoped_refptr observer); + void SetLocalDescription(SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc); + void SetLocalDescription(SetSessionDescriptionObserver* observer); + + void SetRemoteDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer); + void SetRemoteDescription(SetSessionDescriptionObserver* observer, + SessionDescriptionInterface* desc); + + PeerConnectionInterface::RTCConfiguration GetConfiguration(); + RTCError SetConfiguration( + const PeerConnectionInterface::RTCConfiguration& configuration); + bool AddIceCandidate(const IceCandidateInterface* candidate); + void AddIceCandidate(std::unique_ptr candidate, + std::function callback); + bool RemoveIceCandidates(const std::vector& candidates); + // Adds a locally generated candidate to the local description. + void AddLocalIceCandidate(const JsepIceCandidate* candidate); + void RemoveLocalIceCandidates( + const std::vector& candidates); + bool ShouldFireNegotiationNeededEvent(uint32_t event_id); + + bool AddStream(MediaStreamInterface* local_stream); + void RemoveStream(MediaStreamInterface* local_stream); + + absl::optional is_caller(); + bool HasNewIceCredentials(); + void UpdateNegotiationNeeded(); + + // Destroys all BaseChannels and destroys the SCTP data channel, if present. + void DestroyAllChannels(); + + rtc::scoped_refptr local_streams(); + rtc::scoped_refptr remote_streams(); + + private: + class ImplicitCreateSessionDescriptionObserver; + + friend class ImplicitCreateSessionDescriptionObserver; + class SetSessionDescriptionObserverAdapter; + + friend class SetSessionDescriptionObserverAdapter; + + enum class SessionError { + kNone, // No error. + kContent, // Error in BaseChannel SetLocalContent/SetRemoteContent. + kTransport, // Error from the underlying transport. + }; + + // Represents the [[LocalIceCredentialsToReplace]] internal slot in the spec. + // It makes the next CreateOffer() produce new ICE credentials even if + // RTCOfferAnswerOptions::ice_restart is false. + // https://w3c.github.io/webrtc-pc/#dfn-localufragstoreplace + // TODO(hbos): When JsepTransportController/JsepTransport supports rollback, + // move this type of logic to JsepTransportController/JsepTransport. + class LocalIceCredentialsToReplace; + + // Only called by the Create() function. + explicit SdpOfferAnswerHandler(PeerConnection* pc); + // Called from the `Create()` function. Can only be called + // once. Modifies dependencies. + void Initialize( + const PeerConnectionInterface::RTCConfiguration& configuration, + PeerConnectionDependencies& dependencies); + + rtc::Thread* signaling_thread() const; + // Non-const versions of local_description()/remote_description(), for use + // internally. + SessionDescriptionInterface* mutable_local_description() + RTC_RUN_ON(signaling_thread()) { + return pending_local_description_ ? pending_local_description_.get() + : current_local_description_.get(); + } + SessionDescriptionInterface* mutable_remote_description() + RTC_RUN_ON(signaling_thread()) { + return pending_remote_description_ ? pending_remote_description_.get() + : current_remote_description_.get(); + } + + // Synchronous implementations of SetLocalDescription/SetRemoteDescription + // that return an RTCError instead of invoking a callback. + RTCError ApplyLocalDescription( + std::unique_ptr desc, + const std::map& + bundle_groups_by_mid); + RTCError ApplyRemoteDescription( + std::unique_ptr desc, + const std::map& + bundle_groups_by_mid); + + // Implementation of the offer/answer exchange operations. These are chained + // onto the |operations_chain_| when the public CreateOffer(), CreateAnswer(), + // SetLocalDescription() and SetRemoteDescription() methods are invoked. + void DoCreateOffer( + const PeerConnectionInterface::RTCOfferAnswerOptions& options, + rtc::scoped_refptr observer); + void DoCreateAnswer( + const PeerConnectionInterface::RTCOfferAnswerOptions& options, + rtc::scoped_refptr observer); + void DoSetLocalDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer); + void DoSetRemoteDescription( + std::unique_ptr desc, + rtc::scoped_refptr observer); + + // Update the state, signaling if necessary. + void ChangeSignalingState( + PeerConnectionInterface::SignalingState signaling_state); + + RTCError UpdateSessionState( + SdpType type, + cricket::ContentSource source, + const cricket::SessionDescription* description, + const std::map& + bundle_groups_by_mid); + + bool IsUnifiedPlan() const RTC_RUN_ON(signaling_thread()); + + // Signals from MediaStreamObserver. + void OnAudioTrackAdded(AudioTrackInterface* track, + MediaStreamInterface* stream) + RTC_RUN_ON(signaling_thread()); + void OnAudioTrackRemoved(AudioTrackInterface* track, + MediaStreamInterface* stream) + RTC_RUN_ON(signaling_thread()); + void OnVideoTrackAdded(VideoTrackInterface* track, + MediaStreamInterface* stream) + RTC_RUN_ON(signaling_thread()); + void OnVideoTrackRemoved(VideoTrackInterface* track, + MediaStreamInterface* stream) + RTC_RUN_ON(signaling_thread()); + + // | desc_type | is the type of the description that caused the rollback. + RTCError Rollback(SdpType desc_type); + void OnOperationsChainEmpty(); + + // Runs the algorithm **set the associated remote streams** specified in + // https://w3c.github.io/webrtc-pc/#set-associated-remote-streams. + void SetAssociatedRemoteStreams( + rtc::scoped_refptr receiver, + const std::vector& stream_ids, + std::vector>* added_streams, + std::vector>* removed_streams); + + bool CheckIfNegotiationIsNeeded(); + void GenerateNegotiationNeededEvent(); + // Helper method which verifies SDP. + RTCError ValidateSessionDescription( + const SessionDescriptionInterface* sdesc, + cricket::ContentSource source, + const std::map& + bundle_groups_by_mid) RTC_RUN_ON(signaling_thread()); + + // Updates the local RtpTransceivers according to the JSEP rules. Called as + // part of setting the local/remote description. + RTCError UpdateTransceiversAndDataChannels( + cricket::ContentSource source, + const SessionDescriptionInterface& new_session, + const SessionDescriptionInterface* old_local_description, + const SessionDescriptionInterface* old_remote_description, + const std::map& + bundle_groups_by_mid); + + // Associate the given transceiver according to the JSEP rules. + RTCErrorOr< + rtc::scoped_refptr>> + AssociateTransceiver(cricket::ContentSource source, + SdpType type, + size_t mline_index, + const cricket::ContentInfo& content, + const cricket::ContentInfo* old_local_content, + const cricket::ContentInfo* old_remote_content) + RTC_RUN_ON(signaling_thread()); + + // Returns the media section in the given session description that is + // associated with the RtpTransceiver. Returns null if none found or this + // RtpTransceiver is not associated. Logic varies depending on the + // SdpSemantics specified in the configuration. + const cricket::ContentInfo* FindMediaSectionForTransceiver( + const RtpTransceiver* transceiver, + const SessionDescriptionInterface* sdesc) const; + + // Either creates or destroys the transceiver's BaseChannel according to the + // given media section. + RTCError UpdateTransceiverChannel( + rtc::scoped_refptr> + transceiver, + const cricket::ContentInfo& content, + const cricket::ContentGroup* bundle_group) RTC_RUN_ON(signaling_thread()); + + // Either creates or destroys the local data channel according to the given + // media section. + RTCError UpdateDataChannel(cricket::ContentSource source, + const cricket::ContentInfo& content, + const cricket::ContentGroup* bundle_group) + RTC_RUN_ON(signaling_thread()); + // Check if a call to SetLocalDescription is acceptable with a session + // description of the given type. + bool ExpectSetLocalDescription(SdpType type); + // Check if a call to SetRemoteDescription is acceptable with a session + // description of the given type. + bool ExpectSetRemoteDescription(SdpType type); + + // The offer/answer machinery assumes the media section MID is present and + // unique. To support legacy end points that do not supply a=mid lines, this + // method will modify the session description to add MIDs generated according + // to the SDP semantics. + void FillInMissingRemoteMids(cricket::SessionDescription* remote_description); + + // Returns an RtpTransciever, if available, that can be used to receive the + // given media type according to JSEP rules. + rtc::scoped_refptr> + FindAvailableTransceiverToReceive(cricket::MediaType media_type) const; + + // Returns a MediaSessionOptions struct with options decided by |options|, + // the local MediaStreams and DataChannels. + void GetOptionsForOffer(const PeerConnectionInterface::RTCOfferAnswerOptions& + offer_answer_options, + cricket::MediaSessionOptions* session_options); + void GetOptionsForPlanBOffer( + const PeerConnectionInterface::RTCOfferAnswerOptions& + offer_answer_options, + cricket::MediaSessionOptions* session_options) + RTC_RUN_ON(signaling_thread()); + void GetOptionsForUnifiedPlanOffer( + const PeerConnectionInterface::RTCOfferAnswerOptions& + offer_answer_options, + cricket::MediaSessionOptions* session_options) + RTC_RUN_ON(signaling_thread()); + + // Returns a MediaSessionOptions struct with options decided by + // |constraints|, the local MediaStreams and DataChannels. + void GetOptionsForAnswer(const PeerConnectionInterface::RTCOfferAnswerOptions& + offer_answer_options, + cricket::MediaSessionOptions* session_options); + void GetOptionsForPlanBAnswer( + const PeerConnectionInterface::RTCOfferAnswerOptions& + offer_answer_options, + cricket::MediaSessionOptions* session_options) + RTC_RUN_ON(signaling_thread()); + void GetOptionsForUnifiedPlanAnswer( + const PeerConnectionInterface::RTCOfferAnswerOptions& + offer_answer_options, + cricket::MediaSessionOptions* session_options) + RTC_RUN_ON(signaling_thread()); + + const char* SessionErrorToString(SessionError error) const; + std::string GetSessionErrorMsg(); + // Returns the last error in the session. See the enum above for details. + SessionError session_error() const { + RTC_DCHECK_RUN_ON(signaling_thread()); + return session_error_; + } + const std::string& session_error_desc() const { return session_error_desc_; } + + RTCError HandleLegacyOfferOptions( + const PeerConnectionInterface::RTCOfferAnswerOptions& options); + void RemoveRecvDirectionFromReceivingTransceiversOfType( + cricket::MediaType media_type) RTC_RUN_ON(signaling_thread()); + void AddUpToOneReceivingTransceiverOfType(cricket::MediaType media_type); + + std::vector< + rtc::scoped_refptr>> + GetReceivingTransceiversOfType(cricket::MediaType media_type) + RTC_RUN_ON(signaling_thread()); + + // Runs the algorithm specified in + // https://w3c.github.io/webrtc-pc/#process-remote-track-removal + // This method will update the following lists: + // |remove_list| is the list of transceivers for which the receiving track is + // being removed. + // |removed_streams| is the list of streams which no longer have a receiving + // track so should be removed. + void ProcessRemovalOfRemoteTrack( + const rtc::scoped_refptr> + transceiver, + std::vector>* remove_list, + std::vector>* removed_streams); + + void RemoveRemoteStreamsIfEmpty( + const std::vector>& + remote_streams, + std::vector>* removed_streams); + + // Remove all local and remote senders of type |media_type|. + // Called when a media type is rejected (m-line set to port 0). + void RemoveSenders(cricket::MediaType media_type); + + // Loops through the vector of |streams| and finds added and removed + // StreamParams since last time this method was called. + // For each new or removed StreamParam, OnLocalSenderSeen or + // OnLocalSenderRemoved is invoked. + void UpdateLocalSenders(const std::vector& streams, + cricket::MediaType media_type); + + // Makes sure a MediaStreamTrack is created for each StreamParam in |streams|, + // and existing MediaStreamTracks are removed if there is no corresponding + // StreamParam. If |default_track_needed| is true, a default MediaStreamTrack + // is created if it doesn't exist; if false, it's removed if it exists. + // |media_type| is the type of the |streams| and can be either audio or video. + // If a new MediaStream is created it is added to |new_streams|. + void UpdateRemoteSendersList( + const std::vector& streams, + bool default_track_needed, + cricket::MediaType media_type, + StreamCollection* new_streams); + + // Enables media channels to allow sending of media. + // This enables media to flow on all configured audio/video channels. + void EnableSending(); + // Push the media parts of the local or remote session description + // down to all of the channels. + RTCError PushdownMediaDescription( + SdpType type, + cricket::ContentSource source, + const std::map& + bundle_groups_by_mid); + + RTCError PushdownTransportDescription(cricket::ContentSource source, + SdpType type); + // Helper function to remove stopped transceivers. + void RemoveStoppedTransceivers(); + // Deletes the corresponding channel of contents that don't exist in |desc|. + // |desc| can be null. This means that all channels are deleted. + void RemoveUnusedChannels(const cricket::SessionDescription* desc); + + // Report inferred negotiated SDP semantics from a local/remote answer to the + // UMA observer. + void ReportNegotiatedSdpSemantics(const SessionDescriptionInterface& answer); + + // Finds remote MediaStreams without any tracks and removes them from + // |remote_streams_| and notifies the observer that the MediaStreams no longer + // exist. + void UpdateEndedRemoteMediaStreams(); + + // Uses all remote candidates in |remote_desc| in this session. + bool UseCandidatesInSessionDescription( + const SessionDescriptionInterface* remote_desc); + // Uses |candidate| in this session. + bool UseCandidate(const IceCandidateInterface* candidate); + // Returns true if we are ready to push down the remote candidate. + // |remote_desc| is the new remote description, or NULL if the current remote + // description should be used. Output |valid| is true if the candidate media + // index is valid. + bool ReadyToUseRemoteCandidate(const IceCandidateInterface* candidate, + const SessionDescriptionInterface* remote_desc, + bool* valid); + + RTCErrorOr FindContentInfo( + const SessionDescriptionInterface* description, + const IceCandidateInterface* candidate) RTC_RUN_ON(signaling_thread()); + + // Functions for dealing with transports. + // Note that cricket code uses the term "channel" for what other code + // refers to as "transport". + + // Allocates media channels based on the |desc|. If |desc| doesn't have + // the BUNDLE option, this method will disable BUNDLE in PortAllocator. + // This method will also delete any existing media channels before creating. + RTCError CreateChannels(const cricket::SessionDescription& desc); + + // Helper methods to create media channels. + cricket::VoiceChannel* CreateVoiceChannel(const std::string& mid); + cricket::VideoChannel* CreateVideoChannel(const std::string& mid); + bool CreateDataChannel(const std::string& mid); + + // Destroys and clears the BaseChannel associated with the given transceiver, + // if such channel is set. + void DestroyTransceiverChannel( + rtc::scoped_refptr> + transceiver); + + // Destroys the RTP data channel transport and/or the SCTP data channel + // transport and clears it. + void DestroyDataChannelTransport(RTCError error); + + // Destroys the given ChannelInterface. + // The channel cannot be accessed after this method is called. + void DestroyChannelInterface(cricket::ChannelInterface* channel); + // Generates MediaDescriptionOptions for the |session_opts| based on existing + // local description or remote description. + + void GenerateMediaDescriptionOptions( + const SessionDescriptionInterface* session_desc, + RtpTransceiverDirection audio_direction, + RtpTransceiverDirection video_direction, + absl::optional* audio_index, + absl::optional* video_index, + absl::optional* data_index, + cricket::MediaSessionOptions* session_options); + + // Generates the active MediaDescriptionOptions for the local data channel + // given the specified MID. + cricket::MediaDescriptionOptions GetMediaDescriptionOptionsForActiveData( + const std::string& mid) const; + + // Generates the rejected MediaDescriptionOptions for the local data channel + // given the specified MID. + cricket::MediaDescriptionOptions GetMediaDescriptionOptionsForRejectedData( + const std::string& mid) const; + + // Based on number of transceivers per media type, enabled or disable + // payload type based demuxing in the affected channels. + bool UpdatePayloadTypeDemuxingState( + cricket::ContentSource source, + const std::map& + bundle_groups_by_mid); + + // ================================================================== + // Access to pc_ variables + cricket::ChannelManager* channel_manager() const; + TransceiverList* transceivers(); + const TransceiverList* transceivers() const; + DataChannelController* data_channel_controller(); + const DataChannelController* data_channel_controller() const; + cricket::PortAllocator* port_allocator(); + const cricket::PortAllocator* port_allocator() const; + RtpTransmissionManager* rtp_manager(); + const RtpTransmissionManager* rtp_manager() const; + JsepTransportController* transport_controller(); + const JsepTransportController* transport_controller() const; + // =================================================================== + const cricket::AudioOptions& audio_options() { return audio_options_; } + const cricket::VideoOptions& video_options() { return video_options_; } + + PeerConnection* const pc_; + + std::unique_ptr webrtc_session_desc_factory_ + RTC_GUARDED_BY(signaling_thread()); + + std::unique_ptr current_local_description_ + RTC_GUARDED_BY(signaling_thread()); + std::unique_ptr pending_local_description_ + RTC_GUARDED_BY(signaling_thread()); + std::unique_ptr current_remote_description_ + RTC_GUARDED_BY(signaling_thread()); + std::unique_ptr pending_remote_description_ + RTC_GUARDED_BY(signaling_thread()); + + PeerConnectionInterface::SignalingState signaling_state_ + RTC_GUARDED_BY(signaling_thread()) = PeerConnectionInterface::kStable; + + // Whether this peer is the caller. Set when the local description is applied. + absl::optional is_caller_ RTC_GUARDED_BY(signaling_thread()); + + // Streams added via AddStream. + const rtc::scoped_refptr local_streams_ + RTC_GUARDED_BY(signaling_thread()); + // Streams created as a result of SetRemoteDescription. + const rtc::scoped_refptr remote_streams_ + RTC_GUARDED_BY(signaling_thread()); + + std::vector> stream_observers_ + RTC_GUARDED_BY(signaling_thread()); + + // The operations chain is used by the offer/answer exchange methods to ensure + // they are executed in the right order. For example, if + // SetRemoteDescription() is invoked while CreateOffer() is still pending, the + // SRD operation will not start until CreateOffer() has completed. See + // https://w3c.github.io/webrtc-pc/#dfn-operations-chain. + rtc::scoped_refptr operations_chain_ + RTC_GUARDED_BY(signaling_thread()); + + // One PeerConnection has only one RTCP CNAME. + // https://tools.ietf.org/html/draft-ietf-rtcweb-rtp-usage-26#section-4.9 + const std::string rtcp_cname_; + + // MIDs will be generated using this generator which will keep track of + // all the MIDs that have been seen over the life of the PeerConnection. + rtc::UniqueStringGenerator mid_generator_ RTC_GUARDED_BY(signaling_thread()); + + // List of content names for which the remote side triggered an ICE restart. + std::set pending_ice_restarts_ + RTC_GUARDED_BY(signaling_thread()); + + std::unique_ptr + local_ice_credentials_to_replace_ RTC_GUARDED_BY(signaling_thread()); + + bool remote_peer_supports_msid_ RTC_GUARDED_BY(signaling_thread()) = false; + bool is_negotiation_needed_ RTC_GUARDED_BY(signaling_thread()) = false; + uint32_t negotiation_needed_event_id_ = 0; + bool update_negotiation_needed_on_empty_chain_ + RTC_GUARDED_BY(signaling_thread()) = false; + // If PT demuxing is successfully negotiated one time we will allow PT + // demuxing for the rest of the session so that PT-based apps default to PT + // demuxing in follow-up O/A exchanges. + bool pt_demuxing_has_been_used_audio_ = false; + bool pt_demuxing_has_been_used_video_ = false; + + // In Unified Plan, if we encounter remote SDP that does not contain an a=msid + // line we create and use a stream with a random ID for our receivers. This is + // to support legacy endpoints that do not support the a=msid attribute (as + // opposed to streamless tracks with "a=msid:-"). + rtc::scoped_refptr missing_msid_default_stream_ + RTC_GUARDED_BY(signaling_thread()); + + // Updates the error state, signaling if necessary. + void SetSessionError(SessionError error, const std::string& error_desc); + + // Implements AddIceCandidate without reporting usage, but returns the + // particular success/error value that should be reported (and can be utilized + // for other purposes). + AddIceCandidateResult AddIceCandidateInternal( + const IceCandidateInterface* candidate); + + SessionError session_error_ RTC_GUARDED_BY(signaling_thread()) = + SessionError::kNone; + std::string session_error_desc_ RTC_GUARDED_BY(signaling_thread()); + + // Member variables for caching global options. + cricket::AudioOptions audio_options_ RTC_GUARDED_BY(signaling_thread()); + cricket::VideoOptions video_options_ RTC_GUARDED_BY(signaling_thread()); + + // This object should be used to generate any SSRC that is not explicitly + // specified by the user (or by the remote party). + // The generator is not used directly, instead it is passed on to the + // channel manager and the session description factory. + // TODO(bugs.webrtc.org/12666): This variable is used from both the signaling + // and worker threads. See if we can't restrict usage to a single thread. + rtc::UniqueRandomIdGenerator ssrc_generator_; + + // A video bitrate allocator factory. + // This can be injected using the PeerConnectionDependencies, + // or else the CreateBuiltinVideoBitrateAllocatorFactory() will be called. + // Note that one can still choose to override this in a MediaEngine + // if one wants too. + std::unique_ptr + video_bitrate_allocator_factory_; + + rtc::WeakPtrFactory weak_ptr_factory_ + RTC_GUARDED_BY(signaling_thread()); +}; + +} // namespace webrtc + +#endif // PC_SDP_OFFER_ANSWER_H_ diff --git a/pc/sdp_serializer.cc b/pc/sdp_serializer.cc index 7ebaffda86..107431627c 100644 --- a/pc/sdp_serializer.cc +++ b/pc/sdp_serializer.cc @@ -10,12 +10,14 @@ #include "pc/sdp_serializer.h" +#include +#include #include #include #include #include "absl/algorithm/container.h" -#include "api/jsep.h" +#include "absl/types/optional.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "rtc_base/checks.h" #include "rtc_base/string_encode.h" diff --git a/pc/sdp_serializer.h b/pc/sdp_serializer.h index 476ebafbdc..1223cd1af7 100644 --- a/pc/sdp_serializer.h +++ b/pc/sdp_serializer.h @@ -17,6 +17,7 @@ #include "api/rtc_error.h" #include "media/base/rid_description.h" #include "pc/session_description.h" +#include "pc/simulcast_description.h" namespace webrtc { diff --git a/pc/sdp_state_provider.h b/pc/sdp_state_provider.h new file mode 100644 index 0000000000..23ffc91bd9 --- /dev/null +++ b/pc/sdp_state_provider.h @@ -0,0 +1,54 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_SDP_STATE_PROVIDER_H_ +#define PC_SDP_STATE_PROVIDER_H_ + +#include + +#include "api/jsep.h" +#include "api/peer_connection_interface.h" + +namespace webrtc { + +// This interface provides access to the state of an SDP offer/answer +// negotiation. +// +// All the functions are const, so using this interface serves as +// assurance that the user is not modifying the state. +class SdpStateProvider { + public: + virtual ~SdpStateProvider() {} + + virtual PeerConnectionInterface::SignalingState signaling_state() const = 0; + + virtual const SessionDescriptionInterface* local_description() const = 0; + virtual const SessionDescriptionInterface* remote_description() const = 0; + virtual const SessionDescriptionInterface* current_local_description() + const = 0; + virtual const SessionDescriptionInterface* current_remote_description() + const = 0; + virtual const SessionDescriptionInterface* pending_local_description() + const = 0; + virtual const SessionDescriptionInterface* pending_remote_description() + const = 0; + + // Whether an ICE restart has been asked for. Used in CreateOffer. + virtual bool NeedsIceRestart(const std::string& content_name) const = 0; + // Whether an ICE restart was indicated in the remote offer. + // Used in CreateAnswer. + virtual bool IceRestartPending(const std::string& content_name) const = 0; + virtual absl::optional GetDtlsRole( + const std::string& mid) const = 0; +}; + +} // namespace webrtc + +#endif // PC_SDP_STATE_PROVIDER_H_ diff --git a/pc/sdp_utils.cc b/pc/sdp_utils.cc index f5385a6529..b750b04a46 100644 --- a/pc/sdp_utils.cc +++ b/pc/sdp_utils.cc @@ -11,10 +11,10 @@ #include "pc/sdp_utils.h" #include -#include #include #include "api/jsep_session_description.h" +#include "rtc_base/checks.h" namespace webrtc { diff --git a/pc/sdp_utils.h b/pc/sdp_utils.h index fc4b289f91..effd7cd034 100644 --- a/pc/sdp_utils.h +++ b/pc/sdp_utils.h @@ -16,6 +16,7 @@ #include #include "api/jsep.h" +#include "p2p/base/transport_info.h" #include "pc/session_description.h" #include "rtc_base/system/rtc_export.h" diff --git a/pc/session_description.cc b/pc/session_description.cc index 87d6667270..7b878cbf7b 100644 --- a/pc/session_description.cc +++ b/pc/session_description.cc @@ -10,12 +10,10 @@ #include "pc/session_description.h" -#include #include #include "absl/algorithm/container.h" #include "absl/memory/memory.h" -#include "pc/media_protocol_names.h" #include "rtc_base/checks.h" namespace cricket { @@ -87,6 +85,18 @@ bool ContentGroup::RemoveContentName(const std::string& content_name) { return true; } +std::string ContentGroup::ToString() const { + rtc::StringBuilder acc; + acc << semantics_ << "("; + if (!content_names_.empty()) { + for (const auto& name : content_names_) { + acc << name << " "; + } + } + acc << ")"; + return acc.Release(); +} + SessionDescription::SessionDescription() = default; SessionDescription::SessionDescription(const SessionDescription&) = default; @@ -261,6 +271,17 @@ const ContentGroup* SessionDescription::GetGroupByName( return NULL; } +std::vector SessionDescription::GetGroupsByName( + const std::string& name) const { + std::vector content_groups; + for (const ContentGroup& content_group : content_groups_) { + if (content_group.semantics() == name) { + content_groups.push_back(&content_group); + } + } + return content_groups; +} + ContentInfo::~ContentInfo() { } diff --git a/pc/session_description.h b/pc/session_description.h index bfd19b8c7a..a20caf624a 100644 --- a/pc/session_description.h +++ b/pc/session_description.h @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -24,14 +25,18 @@ #include "api/crypto_params.h" #include "api/media_types.h" #include "api/rtp_parameters.h" +#include "api/rtp_transceiver_direction.h" #include "api/rtp_transceiver_interface.h" +#include "media/base/codec.h" #include "media/base/media_channel.h" +#include "media/base/media_constants.h" +#include "media/base/rid_description.h" #include "media/base/stream_params.h" #include "p2p/base/transport_description.h" #include "p2p/base/transport_info.h" #include "pc/media_protocol_names.h" #include "pc/simulcast_description.h" -#include "rtc_base/deprecation.h" +#include "rtc_base/checks.h" #include "rtc_base/socket_address.h" #include "rtc_base/system/rtc_export.h" @@ -39,7 +44,6 @@ namespace cricket { typedef std::vector AudioCodecs; typedef std::vector VideoCodecs; -typedef std::vector RtpDataCodecs; typedef std::vector CryptoParamsVec; typedef std::vector RtpHeaderExtensions; @@ -55,8 +59,8 @@ const int kAutoBandwidth = -1; class AudioContentDescription; class VideoContentDescription; -class RtpDataContentDescription; class SctpDataContentDescription; +class UnsupportedContentDescription; // Describes a session description media section. There are subclasses for each // media type (audio, video, data) that will have additional information. @@ -77,14 +81,14 @@ class MediaContentDescription { virtual VideoContentDescription* as_video() { return nullptr; } virtual const VideoContentDescription* as_video() const { return nullptr; } - virtual RtpDataContentDescription* as_rtp_data() { return nullptr; } - virtual const RtpDataContentDescription* as_rtp_data() const { - return nullptr; - } - virtual SctpDataContentDescription* as_sctp() { return nullptr; } virtual const SctpDataContentDescription* as_sctp() const { return nullptr; } + virtual UnsupportedContentDescription* as_unsupported() { return nullptr; } + virtual const UnsupportedContentDescription* as_unsupported() const { + return nullptr; + } + virtual bool has_codecs() const = 0; // Copy operator that returns an unique_ptr. @@ -126,6 +130,10 @@ class MediaContentDescription { virtual int bandwidth() const { return bandwidth_; } virtual void set_bandwidth(int bandwidth) { bandwidth_ = bandwidth; } + virtual std::string bandwidth_type() const { return bandwidth_type_; } + virtual void set_bandwidth_type(std::string bandwidth_type) { + bandwidth_type_ = bandwidth_type; + } virtual const std::vector& cryptos() const { return cryptos_; } virtual void AddCrypto(const CryptoParams& params) { @@ -135,6 +143,11 @@ class MediaContentDescription { cryptos_ = cryptos; } + // List of RTP header extensions. URIs are **NOT** guaranteed to be unique + // as they can appear twice when both encrypted and non-encrypted extensions + // are present. + // Use RtpExtension::FindHeaderExtensionByUri for finding and + // RtpExtension::DeduplicateHeaderExtensions for filtering. virtual const RtpHeaderExtensions& rtp_header_extensions() const { return rtp_header_extensions_; } @@ -147,13 +160,6 @@ class MediaContentDescription { rtp_header_extensions_.push_back(ext); rtp_header_extensions_set_ = true; } - virtual void AddRtpHeaderExtension(const cricket::RtpHeaderExtension& ext) { - webrtc::RtpExtension webrtc_extension; - webrtc_extension.uri = ext.uri; - webrtc_extension.id = ext.id; - rtp_header_extensions_.push_back(webrtc_extension); - rtp_header_extensions_set_ = true; - } virtual void ClearRtpHeaderExtensions() { rtp_header_extensions_.clear(); rtp_header_extensions_set_ = true; @@ -253,18 +259,12 @@ class MediaContentDescription { receive_rids_ = rids; } - virtual const absl::optional& alt_protocol() const { - return alt_protocol_; - } - virtual void set_alt_protocol(const absl::optional& protocol) { - alt_protocol_ = protocol; - } - protected: bool rtcp_mux_ = false; bool rtcp_reduced_size_ = false; bool remote_estimate_ = false; int bandwidth_ = kAutoBandwidth; + std::string bandwidth_type_ = kApplicationSpecificBandwidth; std::string protocol_; std::vector cryptos_; std::vector rtp_header_extensions_; @@ -274,16 +274,11 @@ class MediaContentDescription { webrtc::RtpTransceiverDirection direction_ = webrtc::RtpTransceiverDirection::kSendRecv; rtc::SocketAddress connection_address_; - // Mixed one- and two-byte header not included in offer on media level or - // session level, but we will respond that we support it. The plan is to add - // it to our offer on session level. See todo in SessionDescription. - ExtmapAllowMixed extmap_allow_mixed_enum_ = kNo; + ExtmapAllowMixed extmap_allow_mixed_enum_ = kMedia; SimulcastDescription simulcast_; std::vector receive_rids_; - absl::optional alt_protocol_; - private: // Copy function that returns a raw pointer. Caller will assert ownership. // Should only be called by the Clone() function. Must be implemented @@ -364,20 +359,6 @@ class VideoContentDescription : public MediaContentDescriptionImpl { } }; -class RtpDataContentDescription - : public MediaContentDescriptionImpl { - public: - RtpDataContentDescription() {} - MediaType type() const override { return MEDIA_TYPE_DATA; } - RtpDataContentDescription* as_rtp_data() override { return this; } - const RtpDataContentDescription* as_rtp_data() const override { return this; } - - private: - RtpDataContentDescription* CloneInternal() const override { - return new RtpDataContentDescription(*this); - } -}; - class SctpDataContentDescription : public MediaContentDescription { public: SctpDataContentDescription() {} @@ -416,13 +397,37 @@ class SctpDataContentDescription : public MediaContentDescription { int max_message_size_ = 64 * 1024; }; +class UnsupportedContentDescription : public MediaContentDescription { + public: + explicit UnsupportedContentDescription(const std::string& media_type) + : media_type_(media_type) {} + MediaType type() const override { return MEDIA_TYPE_UNSUPPORTED; } + + UnsupportedContentDescription* as_unsupported() override { return this; } + const UnsupportedContentDescription* as_unsupported() const override { + return this; + } + + bool has_codecs() const override { return false; } + const std::string& media_type() const { return media_type_; } + + private: + UnsupportedContentDescription* CloneInternal() const override { + return new UnsupportedContentDescription(*this); + } + + std::string media_type_; +}; + // Protocol used for encoding media. This is the "top level" protocol that may // be wrapped by zero or many transport protocols (UDP, ICE, etc.). enum class MediaProtocolType { - kRtp, // Section will use the RTP protocol (e.g., for audio or video). - // https://tools.ietf.org/html/rfc3550 - kSctp // Section will use the SCTP protocol (e.g., for a data channel). - // https://tools.ietf.org/html/rfc4960 + kRtp, // Section will use the RTP protocol (e.g., for audio or video). + // https://tools.ietf.org/html/rfc3550 + kSctp, // Section will use the SCTP protocol (e.g., for a data channel). + // https://tools.ietf.org/html/rfc4960 + kOther // Section will use another top protocol which is not + // explicitly supported. }; // Represents a session description section. Most information about the section @@ -483,6 +488,8 @@ class ContentGroup { bool HasContentName(const std::string& content_name) const; void AddContentName(const std::string& content_name); bool RemoveContentName(const std::string& content_name); + // for debugging + std::string ToString() const; private: std::string semantics_; @@ -567,6 +574,8 @@ class SessionDescription { // Group accessors. const ContentGroups& groups() const { return content_groups_; } const ContentGroup* GetGroupByName(const std::string& name) const; + std::vector GetGroupsByName( + const std::string& name) const; bool HasGroup(const std::string& name) const; // Group mutators. @@ -613,12 +622,7 @@ class SessionDescription { // Default to what Plan B would do. // TODO(bugs.webrtc.org/8530): Change default to kMsidSignalingMediaSection. int msid_signaling_ = kMsidSignalingSsrcAttribute; - // TODO(webrtc:9985): Activate mixed one- and two-byte header extension in - // offer at session level. It's currently not included in offer by default - // because clients prior to https://bugs.webrtc.org/9712 cannot parse this - // correctly. If it's included in offer to us we will respond that we support - // it. - bool extmap_allow_mixed_ = false; + bool extmap_allow_mixed_ = true; }; // Indicates whether a session description was sent by the local client or diff --git a/pc/session_description_unittest.cc b/pc/session_description_unittest.cc index 75e0974ecd..00ce538398 100644 --- a/pc/session_description_unittest.cc +++ b/pc/session_description_unittest.cc @@ -17,7 +17,8 @@ namespace cricket { TEST(MediaContentDescriptionTest, ExtmapAllowMixedDefaultValue) { VideoContentDescription video_desc; - EXPECT_EQ(MediaContentDescription::kNo, video_desc.extmap_allow_mixed_enum()); + EXPECT_EQ(MediaContentDescription::kMedia, + video_desc.extmap_allow_mixed_enum()); } TEST(MediaContentDescriptionTest, SetExtmapAllowMixed) { @@ -129,16 +130,6 @@ TEST(SessionDescriptionTest, AddContentTransfersExtmapAllowMixedSetting) { EXPECT_EQ(MediaContentDescription::kSession, session_desc.GetContentDescriptionByName("video") ->extmap_allow_mixed_enum()); - - // Session level setting overrides media level when new content is added. - std::unique_ptr data_desc = - std::make_unique(); - data_desc->set_extmap_allow_mixed_enum(MediaContentDescription::kMedia); - session_desc.AddContent("data", MediaProtocolType::kRtp, - std::move(data_desc)); - EXPECT_EQ(MediaContentDescription::kSession, - session_desc.GetContentDescriptionByName("data") - ->extmap_allow_mixed_enum()); } } // namespace cricket diff --git a/pc/simulcast_description.cc b/pc/simulcast_description.cc index 8b510febaa..0ae3e2074e 100644 --- a/pc/simulcast_description.cc +++ b/pc/simulcast_description.cc @@ -10,8 +10,6 @@ #include "pc/simulcast_description.h" -#include - #include "rtc_base/checks.h" namespace cricket { diff --git a/pc/simulcast_description.h b/pc/simulcast_description.h index 1337a9ce4d..f7ae28837e 100644 --- a/pc/simulcast_description.h +++ b/pc/simulcast_description.h @@ -11,6 +11,8 @@ #ifndef PC_SIMULCAST_DESCRIPTION_H_ #define PC_SIMULCAST_DESCRIPTION_H_ +#include + #include #include diff --git a/pc/srtp_filter.cc b/pc/srtp_filter.cc index bd48eac83d..2f8d06cbea 100644 --- a/pc/srtp_filter.cc +++ b/pc/srtp_filter.cc @@ -11,8 +11,8 @@ #include "pc/srtp_filter.h" #include - #include +#include #include "absl/strings/match.h" #include "rtc_base/logging.h" @@ -210,9 +210,9 @@ bool SrtpFilter::ApplySendParams(const CryptoParams& send_params) { int send_key_len, send_salt_len; if (!rtc::GetSrtpKeyAndSaltLengths(*send_cipher_suite_, &send_key_len, &send_salt_len)) { - RTC_LOG(LS_WARNING) << "Could not get lengths for crypto suite(s):" - " send cipher_suite " - << send_params.cipher_suite; + RTC_LOG(LS_ERROR) << "Could not get lengths for crypto suite(s):" + " send cipher_suite " + << send_params.cipher_suite; return false; } @@ -241,9 +241,9 @@ bool SrtpFilter::ApplyRecvParams(const CryptoParams& recv_params) { int recv_key_len, recv_salt_len; if (!rtc::GetSrtpKeyAndSaltLengths(*recv_cipher_suite_, &recv_key_len, &recv_salt_len)) { - RTC_LOG(LS_WARNING) << "Could not get lengths for crypto suite(s):" - " recv cipher_suite " - << recv_params.cipher_suite; + RTC_LOG(LS_ERROR) << "Could not get lengths for crypto suite(s):" + " recv cipher_suite " + << recv_params.cipher_suite; return false; } diff --git a/pc/srtp_filter.h b/pc/srtp_filter.h index 5b6c99dcb5..f1e164936c 100644 --- a/pc/srtp_filter.h +++ b/pc/srtp_filter.h @@ -11,6 +11,9 @@ #ifndef PC_SRTP_FILTER_H_ #define PC_SRTP_FILTER_H_ +#include +#include + #include #include #include @@ -21,12 +24,11 @@ #include "api/array_view.h" #include "api/crypto_params.h" #include "api/jsep.h" +#include "api/sequence_checker.h" #include "pc/session_description.h" #include "rtc_base/buffer.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/ssl_stream_adapter.h" -#include "rtc_base/thread_checker.h" // Forward declaration to avoid pulling in libsrtp headers here struct srtp_event_data_t; diff --git a/pc/srtp_session.cc b/pc/srtp_session.cc index 5ded455ee5..45f6b67d12 100644 --- a/pc/srtp_session.cc +++ b/pc/srtp_session.cc @@ -10,12 +10,16 @@ #include "pc/srtp_session.h" +#include + #include "absl/base/attributes.h" #include "media/base/rtp_utils.h" #include "pc/external_hmac.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" #include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/string_encode.h" +#include "rtc_base/time_utils.h" +#include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" #include "third_party/libsrtp/include/srtp.h" #include "third_party/libsrtp/include/srtp_priv.h" @@ -27,7 +31,9 @@ namespace cricket { // in srtp.h. constexpr int kSrtpErrorCodeBoundary = 28; -SrtpSession::SrtpSession() {} +SrtpSession::SrtpSession() { + dump_plain_rtp_ = webrtc::field_trial::IsEnabled("WebRTC-Debugging-RtpDump"); +} SrtpSession::~SrtpSession() { if (session_) { @@ -74,12 +80,19 @@ bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) { return false; } + // Note: the need_len differs from the libsrtp recommendatіon to ensure + // SRTP_MAX_TRAILER_LEN bytes of free space after the data. WebRTC + // never includes a MKI, therefore the amount of bytes added by the + // srtp_protect call is known in advance and depends on the cipher suite. int need_len = in_len + rtp_auth_tag_len_; // NOLINT if (max_len < need_len) { RTC_LOG(LS_WARNING) << "Failed to protect SRTP packet: The buffer length " << max_len << " is less than the needed " << need_len; return false; } + if (dump_plain_rtp_) { + DumpPacket(p, in_len, /*outbound=*/true); + } *out_len = in_len; int err = srtp_protect(session_, p, out_len); @@ -113,12 +126,19 @@ bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) { return false; } + // Note: the need_len differs from the libsrtp recommendatіon to ensure + // SRTP_MAX_TRAILER_LEN bytes of free space after the data. WebRTC + // never includes a MKI, therefore the amount of bytes added by the + // srtp_protect_rtp call is known in advance and depends on the cipher suite. int need_len = in_len + sizeof(uint32_t) + rtcp_auth_tag_len_; // NOLINT if (max_len < need_len) { RTC_LOG(LS_WARNING) << "Failed to protect SRTCP packet: The buffer length " << max_len << " is less than the needed " << need_len; return false; } + if (dump_plain_rtp_) { + DumpPacket(p, in_len, /*outbound=*/true); + } *out_len = in_len; int err = srtp_protect_rtcp(session_, p, out_len); @@ -152,6 +172,9 @@ bool SrtpSession::UnprotectRtp(void* p, int in_len, int* out_len) { static_cast(err), kSrtpErrorCodeBoundary); return false; } + if (dump_plain_rtp_) { + DumpPacket(p, *out_len, /*outbound=*/false); + } return true; } @@ -170,6 +193,9 @@ bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) { static_cast(err), kSrtpErrorCodeBoundary); return false; } + if (dump_plain_rtp_) { + DumpPacket(p, *out_len, /*outbound=*/false); + } return true; } @@ -243,42 +269,18 @@ bool SrtpSession::DoSetKey(int type, srtp_policy_t policy; memset(&policy, 0, sizeof(policy)); - if (cs == rtc::SRTP_AES128_CM_SHA1_80) { - srtp_crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp); - srtp_crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); - } else if (cs == rtc::SRTP_AES128_CM_SHA1_32) { - // RTP HMAC is shortened to 32 bits, but RTCP remains 80 bits. - srtp_crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp); - srtp_crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); - } else if (cs == rtc::SRTP_AEAD_AES_128_GCM) { - srtp_crypto_policy_set_aes_gcm_128_16_auth(&policy.rtp); - srtp_crypto_policy_set_aes_gcm_128_16_auth(&policy.rtcp); - } else if (cs == rtc::SRTP_AEAD_AES_256_GCM) { - srtp_crypto_policy_set_aes_gcm_256_16_auth(&policy.rtp); - srtp_crypto_policy_set_aes_gcm_256_16_auth(&policy.rtcp); - } else { - RTC_LOG(LS_WARNING) << "Failed to " << (session_ ? "update" : "create") - << " SRTP session: unsupported cipher_suite " << cs; - return false; - } - - int expected_key_len; - int expected_salt_len; - if (!rtc::GetSrtpKeyAndSaltLengths(cs, &expected_key_len, - &expected_salt_len)) { - // This should never happen. - RTC_NOTREACHED(); - RTC_LOG(LS_WARNING) - << "Failed to " << (session_ ? "update" : "create") - << " SRTP session: unsupported cipher_suite without length information" - << cs; + if (!(srtp_crypto_policy_set_from_profile_for_rtp( + &policy.rtp, (srtp_profile_t)cs) == srtp_err_status_ok && + srtp_crypto_policy_set_from_profile_for_rtcp( + &policy.rtcp, (srtp_profile_t)cs) == srtp_err_status_ok)) { + RTC_LOG(LS_ERROR) << "Failed to " << (session_ ? "update" : "create") + << " SRTP session: unsupported cipher_suite " << cs; return false; } - if (!key || - len != static_cast(expected_key_len + expected_salt_len)) { - RTC_LOG(LS_WARNING) << "Failed to " << (session_ ? "update" : "create") - << " SRTP session: invalid key"; + if (!key || len != static_cast(policy.rtp.cipher_key_len)) { + RTC_LOG(LS_ERROR) << "Failed to " << (session_ ? "update" : "create") + << " SRTP session: invalid key"; return false; } @@ -364,16 +366,16 @@ bool SrtpSession::UpdateKey(int type, } ABSL_CONST_INIT int g_libsrtp_usage_count = 0; -ABSL_CONST_INIT rtc::GlobalLock g_libsrtp_lock; +ABSL_CONST_INIT webrtc::GlobalMutex g_libsrtp_lock(absl::kConstInit); void ProhibitLibsrtpInitialization() { - rtc::GlobalLockScope ls(&g_libsrtp_lock); + webrtc::GlobalMutexLock ls(&g_libsrtp_lock); ++g_libsrtp_usage_count; } // static bool SrtpSession::IncrementLibsrtpUsageCountAndMaybeInit() { - rtc::GlobalLockScope ls(&g_libsrtp_lock); + webrtc::GlobalMutexLock ls(&g_libsrtp_lock); RTC_DCHECK_GE(g_libsrtp_usage_count, 0); if (g_libsrtp_usage_count == 0) { @@ -402,7 +404,7 @@ bool SrtpSession::IncrementLibsrtpUsageCountAndMaybeInit() { // static void SrtpSession::DecrementLibsrtpUsageCountAndMaybeDeinit() { - rtc::GlobalLockScope ls(&g_libsrtp_lock); + webrtc::GlobalMutexLock ls(&g_libsrtp_lock); RTC_DCHECK_GE(g_libsrtp_usage_count, 1); if (--g_libsrtp_usage_count == 0) { @@ -445,4 +447,26 @@ void SrtpSession::HandleEventThunk(srtp_event_data_t* ev) { } } +// Logs the unencrypted packet in text2pcap format. This can then be +// extracted by searching for RTP_DUMP +// grep RTP_DUMP chrome_debug.log > in.txt +// and converted to pcap using +// text2pcap -D -u 1000,2000 -t %H:%M:%S. in.txt out.pcap +// The resulting file can be replayed using the WebRTC video_replay tool and +// be inspected in Wireshark using the RTP, VP8 and H264 dissectors. +void SrtpSession::DumpPacket(const void* buf, int len, bool outbound) { + int64_t time_of_day = rtc::TimeUTCMillis() % (24 * 3600 * 1000); + int64_t hours = time_of_day / (3600 * 1000); + int64_t minutes = (time_of_day / (60 * 1000)) % 60; + int64_t seconds = (time_of_day / 1000) % 60; + int64_t millis = time_of_day % 1000; + RTC_LOG(LS_VERBOSE) << "\n" << (outbound ? "O" : "I") << " " + << std::setfill('0') << std::setw(2) << hours << ":" + << std::setfill('0') << std::setw(2) << minutes << ":" + << std::setfill('0') << std::setw(2) << seconds << "." + << std::setfill('0') << std::setw(3) << millis << " " + << "000000 " << rtc::hex_encode_with_delimiter((const char *)buf, len, ' ') + << " # RTP_DUMP"; +} + } // namespace cricket diff --git a/pc/srtp_session.h b/pc/srtp_session.h index 0a26c02c9f..0396412481 100644 --- a/pc/srtp_session.h +++ b/pc/srtp_session.h @@ -14,7 +14,9 @@ #include #include "api/scoped_refptr.h" -#include "rtc_base/thread_checker.h" +#include "api/sequence_checker.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/synchronization/mutex.h" // Forward declaration to avoid pulling in libsrtp headers here struct srtp_event_data_t; @@ -107,6 +109,10 @@ class SrtpSession { // Returns send stream current packet index from srtp db. bool GetSendStreamPacketIndex(void* data, int in_len, int64_t* index); + // Writes unencrypted packets in text2pcap format to the log file + // for debugging. + void DumpPacket(const void* buf, int len, bool outbound); + // These methods are responsible for initializing libsrtp (if the usage count // is incremented from 0 to 1) or deinitializing it (when decremented from 1 // to 0). @@ -118,16 +124,23 @@ class SrtpSession { void HandleEvent(const srtp_event_data_t* ev); static void HandleEventThunk(srtp_event_data_t* ev); - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; srtp_ctx_t_* session_ = nullptr; + + // Overhead of the SRTP auth tag for RTP and RTCP in bytes. + // Depends on the cipher suite used and is usually the same with the exception + // of the CS_AES_CM_128_HMAC_SHA1_32 cipher suite. The additional four bytes + // required for RTCP protection are not included. int rtp_auth_tag_len_ = 0; int rtcp_auth_tag_len_ = 0; + bool inited_ = false; - static rtc::GlobalLock lock_; + static webrtc::GlobalMutex lock_; int last_send_seq_num_ = -1; bool external_auth_active_ = false; bool external_auth_enabled_ = false; int decryption_failure_count_ = 0; + bool dump_plain_rtp_ = false; RTC_DISALLOW_COPY_AND_ASSIGN(SrtpSession); }; diff --git a/pc/srtp_transport.cc b/pc/srtp_transport.cc index 71a58d0850..c90b3fa227 100644 --- a/pc/srtp_transport.cc +++ b/pc/srtp_transport.cc @@ -10,7 +10,6 @@ #include "pc/srtp_transport.h" -#include #include #include @@ -128,7 +127,7 @@ bool SrtpTransport::SendRtpPacket(rtc::CopyOnWriteBuffer* packet, rtc::PacketOptions updated_options = options; TRACE_EVENT0("webrtc", "SRTP Encode"); bool res; - uint8_t* data = packet->data(); + uint8_t* data = packet->MutableData(); int len = rtc::checked_cast(packet->size()); // If ENABLE_EXTERNAL_AUTH flag is on then packet authentication is not done // inside libsrtp for a RTP packet. A external HMAC module will be writing @@ -185,7 +184,7 @@ bool SrtpTransport::SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, } TRACE_EVENT0("webrtc", "SRTP Encode"); - uint8_t* data = packet->data(); + uint8_t* data = packet->MutableData(); int len = rtc::checked_cast(packet->size()); if (!ProtectRtcp(data, len, static_cast(packet->capacity()), &len)) { int type = -1; @@ -202,13 +201,13 @@ bool SrtpTransport::SendRtcpPacket(rtc::CopyOnWriteBuffer* packet, void SrtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { + TRACE_EVENT0("webrtc", "SrtpTransport::OnRtpPacketReceived"); if (!IsSrtpActive()) { RTC_LOG(LS_WARNING) << "Inactive SRTP transport received an RTP packet. Drop it."; return; } - TRACE_EVENT0("webrtc", "SRTP Decode"); - char* data = packet.data(); + char* data = packet.MutableData(); int len = rtc::checked_cast(packet.size()); if (!UnprotectRtp(data, len, &len)) { int seq_num = -1; @@ -234,13 +233,13 @@ void SrtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet, void SrtpTransport::OnRtcpPacketReceived(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) { + TRACE_EVENT0("webrtc", "SrtpTransport::OnRtcpPacketReceived"); if (!IsSrtpActive()) { RTC_LOG(LS_WARNING) << "Inactive SRTP transport received an RTCP packet. Drop it."; return; } - TRACE_EVENT0("webrtc", "SRTP Decode"); - char* data = packet.data(); + char* data = packet.MutableData(); int len = rtc::checked_cast(packet.size()); if (!UnprotectRtcp(data, len, &len)) { int type = -1; @@ -268,7 +267,7 @@ void SrtpTransport::OnNetworkRouteChanged( void SrtpTransport::OnWritableState( rtc::PacketTransportInternal* packet_transport) { - SignalWritableState(IsWritable(/*rtcp=*/true) && IsWritable(/*rtcp=*/true)); + SignalWritableState(IsWritable(/*rtcp=*/false) && IsWritable(/*rtcp=*/true)); } bool SrtpTransport::SetRtpParams(int send_cs, diff --git a/pc/stats_collector.cc b/pc/stats_collector.cc index 0509c6dc19..eb2176ed38 100644 --- a/pc/stats_collector.cc +++ b/pc/stats_collector.cc @@ -10,15 +10,47 @@ #include "pc/stats_collector.h" +#include +#include + #include #include #include #include +#include "absl/types/optional.h" +#include "api/audio_codecs/audio_encoder.h" +#include "api/candidate.h" +#include "api/data_channel_interface.h" +#include "api/media_types.h" +#include "api/rtp_receiver_interface.h" +#include "api/rtp_sender_interface.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/video/video_content_type.h" +#include "api/video/video_timing.h" +#include "call/call.h" +#include "media/base/media_channel.h" +#include "modules/audio_processing/include/audio_processing_statistics.h" +#include "p2p/base/ice_transport_internal.h" +#include "p2p/base/p2p_constants.h" #include "pc/channel.h" -#include "pc/peer_connection.h" +#include "pc/channel_interface.h" +#include "pc/data_channel_utils.h" +#include "pc/rtp_receiver.h" +#include "pc/rtp_transceiver.h" +#include "pc/transport_stats.h" #include "rtc_base/checks.h" -#include "rtc_base/third_party/base64/base64.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/rtc_certificate.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/string_encode.h" +#include "rtc_base/thread.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" namespace webrtc { @@ -287,6 +319,10 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, if (info.qp_sum) report->AddInt64(StatsReport::kStatsValueNameQpSum, *info.qp_sum); + if (info.nacks_sent) { + report->AddInt(StatsReport::kStatsValueNameNacksSent, *info.nacks_sent); + } + const IntForAdd ints[] = { {StatsReport::kStatsValueNameCurrentDelayMs, info.current_delay_ms}, {StatsReport::kStatsValueNameDecodeMs, info.decode_ms}, @@ -300,7 +336,6 @@ void ExtractStats(const cricket::VideoReceiverInfo& info, {StatsReport::kStatsValueNameMaxDecodeMs, info.max_decode_ms}, {StatsReport::kStatsValueNameMinPlayoutDelayMs, info.min_playout_delay_ms}, - {StatsReport::kStatsValueNameNacksSent, info.nacks_sent}, {StatsReport::kStatsValueNamePacketsLost, info.packets_lost}, {StatsReport::kStatsValueNamePacketsReceived, info.packets_rcvd}, {StatsReport::kStatsValueNamePlisSent, info.plis_sent}, @@ -509,7 +544,7 @@ StatsCollector::StatsCollector(PeerConnectionInternal* pc) } StatsCollector::~StatsCollector() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); } // Wallclock time in ms. @@ -520,7 +555,7 @@ double StatsCollector::GetTimeNow() { // Adds a MediaStream with tracks that can be used as a |selector| in a call // to GetStats. void StatsCollector::AddStream(MediaStreamInterface* stream) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); RTC_DCHECK(stream != NULL); CreateTrackReports(stream->GetAudioTracks(), &reports_, @@ -543,7 +578,7 @@ void StatsCollector::AddTrack(MediaStreamTrackInterface* track) { void StatsCollector::AddLocalAudioTrack(AudioTrackInterface* audio_track, uint32_t ssrc) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); RTC_DCHECK(audio_track != NULL); #if RTC_DCHECK_IS_ON for (const auto& track : local_audio_tracks_) @@ -577,7 +612,7 @@ void StatsCollector::RemoveLocalAudioTrack(AudioTrackInterface* audio_track, void StatsCollector::GetStats(MediaStreamTrackInterface* track, StatsReports* reports) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); RTC_DCHECK(reports != NULL); RTC_DCHECK(reports->empty()); @@ -617,26 +652,33 @@ void StatsCollector::GetStats(MediaStreamTrackInterface* track, void StatsCollector::UpdateStats( PeerConnectionInterface::StatsOutputLevel level) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); - double time_now = GetTimeNow(); - // Calls to UpdateStats() that occur less than kMinGatherStatsPeriod number of - // ms apart will be ignored. - const double kMinGatherStatsPeriod = 50; - if (stats_gathering_started_ != 0 && - stats_gathering_started_ + kMinGatherStatsPeriod > time_now) { + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); + // Calls to UpdateStats() that occur less than kMinGatherStatsPeriodMs apart + // will be ignored. Using a monotonic clock specifically for this, while using + // a UTC clock for the reports themselves. + const int64_t kMinGatherStatsPeriodMs = 50; + int64_t cache_now_ms = rtc::TimeMillis(); + if (cache_timestamp_ms_ != 0 && + cache_timestamp_ms_ + kMinGatherStatsPeriodMs > cache_now_ms) { return; } - stats_gathering_started_ = time_now; + cache_timestamp_ms_ = cache_now_ms; + stats_gathering_started_ = GetTimeNow(); + + // TODO(tommi): ExtractSessionInfo now has a single hop to the network thread + // to fetch stats, then applies them on the signaling thread. See if we need + // to do this synchronously or if updating the stats without blocking is safe. + std::map transport_names_by_mid = + ExtractSessionInfo(); // TODO(tommi): All of these hop over to the worker thread to fetch - // information. We could use an AsyncInvoker to run all of these and post + // information. We could post a task to run all of these and post // the information back to the signaling thread where we can create and // update stats reports. That would also clean up the threading story a bit // since we'd be creating/updating the stats report objects consistently on // the same thread (this class has no locks right now). - ExtractSessionInfo(); ExtractBweInfo(); - ExtractMediaInfo(); + ExtractMediaInfo(transport_names_by_mid); ExtractSenderInfo(); ExtractDataInfo(); UpdateTrackReports(); @@ -647,7 +689,7 @@ StatsReport* StatsCollector::PrepareReport(bool local, const std::string& track_id, const StatsReport::Id& transport_id, StatsReport::Direction direction) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); StatsReport::Id id(StatsReport::NewIdWithDirection( local ? StatsReport::kStatsReportTypeSsrc : StatsReport::kStatsReportTypeRemoteSsrc, @@ -670,7 +712,7 @@ StatsReport* StatsCollector::PrepareReport(bool local, } StatsReport* StatsCollector::PrepareADMReport() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); StatsReport::Id id(StatsReport::NewTypedId( StatsReport::kStatsReportTypeSession, pc_->session_id())); StatsReport* report = reports_.FindOrAddNew(id); @@ -684,7 +726,7 @@ bool StatsCollector::IsValidTrack(const std::string& track_id) { StatsReport* StatsCollector::AddCertificateReports( std::unique_ptr cert_stats) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); StatsReport* first_report = nullptr; StatsReport* prev_report = nullptr; @@ -772,7 +814,7 @@ StatsReport* StatsCollector::AddConnectionInfoReport( StatsReport* StatsCollector::AddCandidateReport( const cricket::CandidateStats& candidate_stats, bool local) { - const auto& candidate = candidate_stats.candidate; + const auto& candidate = candidate_stats.candidate(); StatsReport::Id id(StatsReport::NewCandidateId(local, candidate.id())); StatsReport* report = reports_.Find(id); if (!report) { @@ -795,8 +837,8 @@ StatsReport* StatsCollector::AddCandidateReport( } report->set_timestamp(stats_gathering_started_); - if (local && candidate_stats.stun_stats.has_value()) { - const auto& stun_stats = candidate_stats.stun_stats.value(); + if (local && candidate_stats.stun_stats().has_value()) { + const auto& stun_stats = candidate_stats.stun_stats().value(); report->AddInt64(StatsReport::kStatsValueNameSentStunKeepaliveRequests, stun_stats.stun_binding_requests_sent); report->AddInt64(StatsReport::kStatsValueNameRecvStunKeepaliveResponses, @@ -810,35 +852,58 @@ StatsReport* StatsCollector::AddCandidateReport( return report; } -void StatsCollector::ExtractSessionInfo() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); +std::map StatsCollector::ExtractSessionInfo() { + TRACE_EVENT0("webrtc", "StatsCollector::ExtractSessionInfo"); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); - // Extract information from the base session. - StatsReport::Id id(StatsReport::NewTypedId( - StatsReport::kStatsReportTypeSession, pc_->session_id())); - StatsReport* report = reports_.ReplaceOrAddNew(id); - report->set_timestamp(stats_gathering_started_); - report->AddBoolean(StatsReport::kStatsValueNameInitiator, - pc_->initial_offerer()); + SessionStats stats; + auto transceivers = pc_->GetTransceiversInternal(); + pc_->network_thread()->Invoke( + RTC_FROM_HERE, [&, sctp_transport_name = pc_->sctp_transport_name(), + sctp_mid = pc_->sctp_mid()]() mutable { + stats = ExtractSessionInfo_n( + transceivers, std::move(sctp_transport_name), std::move(sctp_mid)); + }); - cricket::CandidateStatsList pooled_candidate_stats_list = - pc_->GetPooledCandidateStats(); + ExtractSessionInfo_s(stats); - for (const cricket::CandidateStats& stats : pooled_candidate_stats_list) { - AddCandidateReport(stats, true); + return std::move(stats.transport_names_by_mid); +} + +StatsCollector::SessionStats StatsCollector::ExtractSessionInfo_n( + const std::vector>>& transceivers, + absl::optional sctp_transport_name, + absl::optional sctp_mid) { + TRACE_EVENT0("webrtc", "StatsCollector::ExtractSessionInfo_n"); + RTC_DCHECK_RUN_ON(pc_->network_thread()); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + SessionStats stats; + stats.candidate_stats = pc_->GetPooledCandidateStats(); + for (auto& transceiver : transceivers) { + cricket::ChannelInterface* channel = transceiver->internal()->channel(); + if (channel) { + stats.transport_names_by_mid[channel->content_name()] = + channel->transport_name(); + } + } + + if (sctp_transport_name) { + RTC_DCHECK(sctp_mid); + stats.transport_names_by_mid[*sctp_mid] = *sctp_transport_name; } std::set transport_names; - for (const auto& entry : pc_->GetTransportNamesByMid()) { + for (const auto& entry : stats.transport_names_by_mid) { transport_names.insert(entry.second); } std::map transport_stats_by_name = pc_->GetTransportStatsByNames(transport_names); - for (const auto& entry : transport_stats_by_name) { - const std::string& transport_name = entry.first; - const cricket::TransportStats& transport_stats = entry.second; + for (auto& entry : transport_stats_by_name) { + stats.transport_stats.emplace_back(entry.first, std::move(entry.second)); + TransportStats& transport = stats.transport_stats.back(); // Attempt to get a copy of the certificates from the transport and // expose them in stats reports. All channels in a transport share the @@ -846,24 +911,59 @@ void StatsCollector::ExtractSessionInfo() { // StatsReport::Id local_cert_report_id, remote_cert_report_id; rtc::scoped_refptr certificate; - if (pc_->GetLocalCertificate(transport_name, &certificate)) { - StatsReport* r = AddCertificateReports( - certificate->GetSSLCertificateChain().GetStats()); - if (r) - local_cert_report_id = r->id(); + if (pc_->GetLocalCertificate(transport.name, &certificate)) { + transport.local_cert_stats = + certificate->GetSSLCertificateChain().GetStats(); } std::unique_ptr remote_cert_chain = - pc_->GetRemoteSSLCertChain(transport_name); + pc_->GetRemoteSSLCertChain(transport.name); if (remote_cert_chain) { - StatsReport* r = AddCertificateReports(remote_cert_chain->GetStats()); + transport.remote_cert_stats = remote_cert_chain->GetStats(); + } + } + + return stats; +} + +void StatsCollector::ExtractSessionInfo_s(SessionStats& session_stats) { + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + + StatsReport::Id id(StatsReport::NewTypedId( + StatsReport::kStatsReportTypeSession, pc_->session_id())); + StatsReport* report = reports_.ReplaceOrAddNew(id); + report->set_timestamp(stats_gathering_started_); + report->AddBoolean(StatsReport::kStatsValueNameInitiator, + pc_->initial_offerer()); + + for (const cricket::CandidateStats& stats : session_stats.candidate_stats) { + AddCandidateReport(stats, true); + } + + for (auto& transport : session_stats.transport_stats) { + // Attempt to get a copy of the certificates from the transport and + // expose them in stats reports. All channels in a transport share the + // same local and remote certificates. + // + StatsReport::Id local_cert_report_id, remote_cert_report_id; + if (transport.local_cert_stats) { + StatsReport* r = + AddCertificateReports(std::move(transport.local_cert_stats)); + if (r) + local_cert_report_id = r->id(); + } + + if (transport.remote_cert_stats) { + StatsReport* r = + AddCertificateReports(std::move(transport.remote_cert_stats)); if (r) remote_cert_report_id = r->id(); } - for (const auto& channel_iter : transport_stats.channel_stats) { + for (const auto& channel_iter : transport.stats.channel_stats) { StatsReport::Id id( - StatsReport::NewComponentId(transport_name, channel_iter.component)); + StatsReport::NewComponentId(transport.name, channel_iter.component)); StatsReport* channel_report = reports_.ReplaceOrAddNew(id); channel_report->set_timestamp(stats_gathering_started_); channel_report->AddInt(StatsReport::kStatsValueNameComponent, @@ -906,7 +1006,7 @@ void StatsCollector::ExtractSessionInfo() { for (const cricket::ConnectionInfo& info : channel_iter.ice_transport_stats.connection_infos) { StatsReport* connection_report = AddConnectionInfoReport( - transport_name, channel_iter.component, connection_id++, + transport.name, channel_iter.component, connection_id++, channel_report->id(), info); if (info.best_connection) { channel_report->AddId( @@ -919,7 +1019,7 @@ void StatsCollector::ExtractSessionInfo() { } void StatsCollector::ExtractBweInfo() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); if (pc_->signaling_state() == PeerConnectionInterface::kClosed) return; @@ -932,16 +1032,25 @@ void StatsCollector::ExtractBweInfo() { // Fill in target encoder bitrate, actual encoder bitrate, rtx bitrate, etc. // TODO(holmer): Also fill this in for audio. - for (const auto& transceiver : pc_->GetTransceiversInternal()) { + auto transceivers = pc_->GetTransceiversInternal(); + std::vector video_channels; + for (const auto& transceiver : transceivers) { if (transceiver->media_type() != cricket::MEDIA_TYPE_VIDEO) { continue; } auto* video_channel = static_cast(transceiver->internal()->channel()); - if (!video_channel) { - continue; + if (video_channel) { + video_channels.push_back(video_channel); } - video_channel->FillBitrateInfo(&bwe_info); + } + + if (!video_channels.empty()) { + pc_->worker_thread()->Invoke(RTC_FROM_HERE, [&] { + for (const auto& channel : video_channels) { + channel->FillBitrateInfo(&bwe_info); + } + }); } StatsReport::Id report_id(StatsReport::NewBandwidthEstimationId()); @@ -991,7 +1100,8 @@ class VoiceMediaChannelStatsGatherer final : public MediaChannelStatsGatherer { } bool GetStatsOnWorkerThread() override { - return voice_media_channel_->GetStats(&voice_media_info); + return voice_media_channel_->GetStats(&voice_media_info, + /*get_and_clear_legacy_stats=*/true); } void ExtractStats(StatsCollector* collector) const override { @@ -1053,14 +1163,16 @@ std::unique_ptr CreateMediaChannelStatsGatherer( } // namespace -void StatsCollector::ExtractMediaInfo() { +void StatsCollector::ExtractMediaInfo( + const std::map& transport_names_by_mid) { RTC_DCHECK_RUN_ON(pc_->signaling_thread()); std::vector> gatherers; + auto transceivers = pc_->GetTransceiversInternal(); { rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; - for (const auto& transceiver : pc_->GetTransceiversInternal()) { + for (const auto& transceiver : transceivers) { cricket::ChannelInterface* channel = transceiver->internal()->channel(); if (!channel) { continue; @@ -1068,22 +1180,40 @@ void StatsCollector::ExtractMediaInfo() { std::unique_ptr gatherer = CreateMediaChannelStatsGatherer(channel->media_channel()); gatherer->mid = channel->content_name(); - gatherer->transport_name = channel->transport_name(); + gatherer->transport_name = transport_names_by_mid.at(gatherer->mid); + for (const auto& sender : transceiver->internal()->senders()) { - std::string track_id = (sender->track() ? sender->track()->id() : ""); + auto track = sender->track(); + std::string track_id = (track ? track->id() : ""); gatherer->sender_track_id_by_ssrc.insert( std::make_pair(sender->ssrc(), track_id)); } - for (const auto& receiver : transceiver->internal()->receivers()) { - gatherer->receiver_track_id_by_ssrc.insert(std::make_pair( - receiver->internal()->ssrc(), receiver->track()->id())); - } + + // Populating `receiver_track_id_by_ssrc` will be done on the worker + // thread as the `ssrc` property of the receiver needs to be accessed + // there. + gatherers.push_back(std::move(gatherer)); } } pc_->worker_thread()->Invoke(RTC_FROM_HERE, [&] { rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + // Populate `receiver_track_id_by_ssrc` for the gatherers. + int i = 0; + for (const auto& transceiver : transceivers) { + cricket::ChannelInterface* channel = transceiver->internal()->channel(); + if (!channel) + continue; + MediaChannelStatsGatherer* gatherer = gatherers[i++].get(); + RTC_DCHECK_EQ(gatherer->mid, channel->content_name()); + + for (const auto& receiver : transceiver->internal()->receivers()) { + gatherer->receiver_track_id_by_ssrc.insert(std::make_pair( + receiver->internal()->ssrc(), receiver->track()->id())); + } + } + for (auto it = gatherers.begin(); it != gatherers.end(); /* incremented manually */) { MediaChannelStatsGatherer* gatherer = it->get(); @@ -1109,7 +1239,7 @@ void StatsCollector::ExtractMediaInfo() { } void StatsCollector::ExtractSenderInfo() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); for (const auto& sender : pc_->GetSenders()) { // TODO(nisse): SSRC == 0 currently means none. Delete check when @@ -1142,30 +1272,31 @@ void StatsCollector::ExtractSenderInfo() { } void StatsCollector::ExtractDataInfo() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; - for (const auto& dc : pc_->sctp_data_channels()) { + std::vector data_stats = pc_->GetDataChannelStats(); + for (const auto& stats : data_stats) { StatsReport::Id id(StatsReport::NewTypedIntId( - StatsReport::kStatsReportTypeDataChannel, dc->id())); + StatsReport::kStatsReportTypeDataChannel, stats.id)); StatsReport* report = reports_.ReplaceOrAddNew(id); report->set_timestamp(stats_gathering_started_); - report->AddString(StatsReport::kStatsValueNameLabel, dc->label()); + report->AddString(StatsReport::kStatsValueNameLabel, stats.label); // Filter out the initial id (-1). - if (dc->id() >= 0) { - report->AddInt(StatsReport::kStatsValueNameDataChannelId, dc->id()); + if (stats.id >= 0) { + report->AddInt(StatsReport::kStatsValueNameDataChannelId, stats.id); } - report->AddString(StatsReport::kStatsValueNameProtocol, dc->protocol()); + report->AddString(StatsReport::kStatsValueNameProtocol, stats.protocol); report->AddString(StatsReport::kStatsValueNameState, - DataChannelInterface::DataStateString(dc->state())); + DataChannelInterface::DataStateString(stats.state)); } } StatsReport* StatsCollector::GetReport(const StatsReport::StatsType& type, const std::string& id, StatsReport::Direction direction) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); RTC_DCHECK(type == StatsReport::kStatsReportTypeSsrc || type == StatsReport::kStatsReportTypeRemoteSsrc); return reports_.Find(StatsReport::NewIdWithDirection(type, id, direction)); @@ -1173,7 +1304,7 @@ StatsReport* StatsCollector::GetReport(const StatsReport::StatsType& type, void StatsCollector::UpdateStatsFromExistingLocalAudioTracks( bool has_remote_tracks) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); // Loop through the existing local audio tracks. for (const auto& it : local_audio_tracks_) { AudioTrackInterface* track = it.first; @@ -1201,7 +1332,7 @@ void StatsCollector::UpdateStatsFromExistingLocalAudioTracks( void StatsCollector::UpdateReportFromAudioTrack(AudioTrackInterface* track, StatsReport* report, bool has_remote_tracks) { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); RTC_DCHECK(track != NULL); // Don't overwrite report values if they're not available. @@ -1223,7 +1354,7 @@ void StatsCollector::UpdateReportFromAudioTrack(AudioTrackInterface* track, } void StatsCollector::UpdateTrackReports() { - RTC_DCHECK(pc_->signaling_thread()->IsCurrent()); + RTC_DCHECK_RUN_ON(pc_->signaling_thread()); rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; @@ -1234,7 +1365,7 @@ void StatsCollector::UpdateTrackReports() { } void StatsCollector::ClearUpdateStatsCacheForTest() { - stats_gathering_started_ = 0; + cache_timestamp_ms_ = 0; } } // namespace webrtc diff --git a/pc/stats_collector.h b/pc/stats_collector.h index 041fe2f8fe..2fd5d9d8f8 100644 --- a/pc/stats_collector.h +++ b/pc/stats_collector.h @@ -16,6 +16,8 @@ #include +#include +#include #include #include #include @@ -25,8 +27,10 @@ #include "api/media_stream_interface.h" #include "api/peer_connection_interface.h" #include "api/stats_types.h" +#include "p2p/base/connection_info.h" #include "p2p/base/port.h" #include "pc/peer_connection_internal.h" +#include "pc/stats_collector_interface.h" #include "rtc_base/network_constants.h" #include "rtc_base/ssl_certificate.h" @@ -44,37 +48,40 @@ const char* AdapterTypeToStatsType(rtc::AdapterType type); // A mapping between track ids and their StatsReport. typedef std::map TrackIdMap; -class StatsCollector { +class StatsCollector : public StatsCollectorInterface { public: // The caller is responsible for ensuring that the pc outlives the // StatsCollector instance. explicit StatsCollector(PeerConnectionInternal* pc); virtual ~StatsCollector(); - // Adds a MediaStream with tracks that can be used as a |selector| in a call + // Adds a MediaStream with tracks that can be used as a `selector` in a call // to GetStats. void AddStream(MediaStreamInterface* stream); void AddTrack(MediaStreamTrackInterface* track); // Adds a local audio track that is used for getting some voice statistics. - void AddLocalAudioTrack(AudioTrackInterface* audio_track, uint32_t ssrc); + void AddLocalAudioTrack(AudioTrackInterface* audio_track, + uint32_t ssrc) override; // Removes a local audio tracks that is used for getting some voice // statistics. - void RemoveLocalAudioTrack(AudioTrackInterface* audio_track, uint32_t ssrc); + void RemoveLocalAudioTrack(AudioTrackInterface* audio_track, + uint32_t ssrc) override; // Gather statistics from the session and store them for future use. void UpdateStats(PeerConnectionInterface::StatsOutputLevel level); // Gets a StatsReports of the last collected stats. Note that UpdateStats must - // be called before this function to get the most recent stats. |selector| is + // be called before this function to get the most recent stats. `selector` is // a track label or empty string. The most recent reports are stored in - // |reports|. + // `reports`. // TODO(tommi): Change this contract to accept a callback object instead - // of filling in |reports|. As is, there's a requirement that the caller - // uses |reports| immediately without allowing any async activity on + // of filling in `reports`. As is, there's a requirement that the caller + // uses `reports` immediately without allowing any async activity on // the thread (message handling etc) and then discard the results. - void GetStats(MediaStreamTrackInterface* track, StatsReports* reports); + void GetStats(MediaStreamTrackInterface* track, + StatsReports* reports) override; // Prepare a local or remote SSRC report for the given ssrc. Used internally // in the ExtractStatsFromList template. @@ -99,19 +106,48 @@ class StatsCollector { private: friend class StatsCollectorTest; + // Struct that's populated on the network thread and carries the values to + // the signaling thread where the stats are added to the stats reports. + struct TransportStats { + TransportStats() = default; + TransportStats(std::string transport_name, + cricket::TransportStats transport_stats) + : name(std::move(transport_name)), stats(std::move(transport_stats)) {} + TransportStats(TransportStats&&) = default; + TransportStats(const TransportStats&) = delete; + + std::string name; + cricket::TransportStats stats; + std::unique_ptr local_cert_stats; + std::unique_ptr remote_cert_stats; + }; + + struct SessionStats { + SessionStats() = default; + SessionStats(SessionStats&&) = default; + SessionStats(const SessionStats&) = delete; + + SessionStats& operator=(SessionStats&&) = default; + SessionStats& operator=(SessionStats&) = delete; + + cricket::CandidateStatsList candidate_stats; + std::vector transport_stats; + std::map transport_names_by_mid; + }; + // Overridden in unit tests to fake timing. virtual double GetTimeNow(); bool CopySelectedReports(const std::string& selector, StatsReports* reports); - // Helper method for creating IceCandidate report. |is_local| indicates + // Helper method for creating IceCandidate report. `is_local` indicates // whether this candidate is local or remote. StatsReport* AddCandidateReport( const cricket::CandidateStats& candidate_stats, bool local); // Adds a report for this certificate and every certificate in its chain, and - // returns the leaf certificate's report (|cert_stats|'s report). + // returns the leaf certificate's report (`cert_stats`'s report). StatsReport* AddCertificateReports( std::unique_ptr cert_stats); @@ -122,9 +158,14 @@ class StatsCollector { const cricket::ConnectionInfo& info); void ExtractDataInfo(); - void ExtractSessionInfo(); + + // Returns the `transport_names_by_mid` member from the SessionStats as + // gathered and used to populate the stats. + std::map ExtractSessionInfo(); + void ExtractBweInfo(); - void ExtractMediaInfo(); + void ExtractMediaInfo( + const std::map& transport_names_by_mid); void ExtractSenderInfo(); webrtc::StatsReport* GetReport(const StatsReport::StatsType& type, const std::string& id, @@ -139,11 +180,19 @@ class StatsCollector { // Helper method to update the timestamp of track records. void UpdateTrackReports(); + SessionStats ExtractSessionInfo_n( + const std::vector>>& transceivers, + absl::optional sctp_transport_name, + absl::optional sctp_mid); + void ExtractSessionInfo_s(SessionStats& session_stats); + // A collection for all of our stats reports. StatsCollection reports_; TrackIdMap track_ids_; // Raw pointer to the peer connection the statistics are gathered from. PeerConnectionInternal* const pc_; + int64_t cache_timestamp_ms_ = 0; double stats_gathering_started_; const bool use_standard_bytes_stats_; diff --git a/pc/stats_collector_interface.h b/pc/stats_collector_interface.h new file mode 100644 index 0000000000..4d5c98a4ab --- /dev/null +++ b/pc/stats_collector_interface.h @@ -0,0 +1,43 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This file contains an interface for the (obsolete) StatsCollector class that +// is used by compilation units that do not wish to depend on the StatsCollector +// implementation. + +#ifndef PC_STATS_COLLECTOR_INTERFACE_H_ +#define PC_STATS_COLLECTOR_INTERFACE_H_ + +#include + +#include "api/media_stream_interface.h" +#include "api/stats_types.h" + +namespace webrtc { + +class StatsCollectorInterface { + public: + virtual ~StatsCollectorInterface() {} + + // Adds a local audio track that is used for getting some voice statistics. + virtual void AddLocalAudioTrack(AudioTrackInterface* audio_track, + uint32_t ssrc) = 0; + + // Removes a local audio tracks that is used for getting some voice + // statistics. + virtual void RemoveLocalAudioTrack(AudioTrackInterface* audio_track, + uint32_t ssrc) = 0; + virtual void GetStats(MediaStreamTrackInterface* track, + StatsReports* reports) = 0; +}; + +} // namespace webrtc + +#endif // PC_STATS_COLLECTOR_INTERFACE_H_ diff --git a/pc/stats_collector_unittest.cc b/pc/stats_collector_unittest.cc index ab469729ae..c630c3af6c 100644 --- a/pc/stats_collector_unittest.cc +++ b/pc/stats_collector_unittest.cc @@ -19,13 +19,13 @@ #include "api/audio_codecs/audio_encoder.h" #include "api/candidate.h" #include "api/data_channel_interface.h" +#include "api/media_stream_track.h" #include "api/scoped_refptr.h" #include "call/call.h" #include "media/base/media_channel.h" #include "modules/audio_processing/include/audio_processing_statistics.h" -#include "pc/data_channel.h" #include "pc/media_stream.h" -#include "pc/media_stream_track.h" +#include "pc/sctp_data_channel.h" #include "pc/test/fake_peer_connection_for_stats.h" #include "pc/test/fake_video_track_source.h" #include "pc/test/mock_rtp_receiver_internal.h" @@ -96,7 +96,7 @@ class FakeAudioTrack : public MediaStreamTrack { public: explicit FakeAudioTrack(const std::string& id) : MediaStreamTrack(id), - processor_(new rtc::RefCountedObject()) {} + processor_(rtc::make_ref_counted()) {} std::string kind() const override { return "audio"; } AudioSourceInterface* GetSource() const override { return NULL; } void AddSink(AudioTrackSinkInterface* sink) override {} @@ -134,8 +134,7 @@ class FakeAudioTrackWithInitValue public: explicit FakeAudioTrackWithInitValue(const std::string& id) : MediaStreamTrack(id), - processor_( - new rtc::RefCountedObject()) {} + processor_(rtc::make_ref_counted()) {} std::string kind() const override { return "audio"; } AudioSourceInterface* GetSource() const override { return NULL; } void AddSink(AudioTrackSinkInterface* sink) override {} @@ -600,7 +599,7 @@ class StatsCollectorForTest : public StatsCollector { class StatsCollectorTest : public ::testing::Test { protected: rtc::scoped_refptr CreatePeerConnection() { - return new rtc::RefCountedObject(); + return rtc::make_ref_counted(); } std::unique_ptr CreateStatsCollector( @@ -738,8 +737,7 @@ class StatsCollectorTest : public ::testing::Test { static rtc::scoped_refptr CreateMockSender( rtc::scoped_refptr track, uint32_t ssrc) { - rtc::scoped_refptr sender( - new rtc::RefCountedObject()); + auto sender = rtc::make_ref_counted(); EXPECT_CALL(*sender, track()).WillRepeatedly(Return(track)); EXPECT_CALL(*sender, ssrc()).WillRepeatedly(Return(ssrc)); EXPECT_CALL(*sender, media_type()) @@ -753,8 +751,7 @@ static rtc::scoped_refptr CreateMockSender( static rtc::scoped_refptr CreateMockReceiver( rtc::scoped_refptr track, uint32_t ssrc) { - rtc::scoped_refptr receiver( - new rtc::RefCountedObject()); + auto receiver = rtc::make_ref_counted(); EXPECT_CALL(*receiver, track()).WillRepeatedly(Return(track)); EXPECT_CALL(*receiver, ssrc()).WillRepeatedly(Return(ssrc)); EXPECT_CALL(*receiver, media_type()) @@ -808,7 +805,7 @@ class StatsCollectorTrackTest : public StatsCollectorTest, rtc::scoped_refptr AddOutgoingAudioTrack( FakePeerConnectionForStats* pc, StatsCollectorForTest* stats) { - audio_track_ = new rtc::RefCountedObject(kLocalTrackId); + audio_track_ = rtc::make_ref_counted(kLocalTrackId); if (GetParam()) { if (!stream_) stream_ = MediaStream::Create("streamid"); @@ -823,7 +820,7 @@ class StatsCollectorTrackTest : public StatsCollectorTest, // Adds a incoming audio track with a given SSRC into the stats. void AddIncomingAudioTrack(FakePeerConnectionForStats* pc, StatsCollectorForTest* stats) { - audio_track_ = new rtc::RefCountedObject(kRemoteTrackId); + audio_track_ = rtc::make_ref_counted(kRemoteTrackId); if (GetParam()) { if (stream_ == NULL) stream_ = MediaStream::Create("streamid"); @@ -1483,8 +1480,8 @@ TEST_P(StatsCollectorTrackTest, FilterOutNegativeInitialValues) { // Create a local stream with a local audio track and adds it to the stats. stream_ = MediaStream::Create("streamid"); - rtc::scoped_refptr local_track( - new rtc::RefCountedObject(kLocalTrackId)); + auto local_track = + rtc::make_ref_counted(kLocalTrackId); stream_->AddTrack(local_track); pc->AddSender(CreateMockSender(local_track, kSsrcOfTrack)); if (GetParam()) { @@ -1495,8 +1492,8 @@ TEST_P(StatsCollectorTrackTest, FilterOutNegativeInitialValues) { // Create a remote stream with a remote audio track and adds it to the stats. rtc::scoped_refptr remote_stream( MediaStream::Create("remotestreamid")); - rtc::scoped_refptr remote_track( - new rtc::RefCountedObject(kRemoteTrackId)); + auto remote_track = + rtc::make_ref_counted(kRemoteTrackId); remote_stream->AddTrack(remote_track); pc->AddReceiver(CreateMockReceiver(remote_track, kSsrcOfTrack)); if (GetParam()) { @@ -1665,8 +1662,7 @@ TEST_P(StatsCollectorTrackTest, LocalAndRemoteTracksWithSameSsrc) { // Create a remote stream with a remote audio track and adds it to the stats. rtc::scoped_refptr remote_stream( MediaStream::Create("remotestreamid")); - rtc::scoped_refptr remote_track( - new rtc::RefCountedObject(kRemoteTrackId)); + auto remote_track = rtc::make_ref_counted(kRemoteTrackId); pc->AddReceiver(CreateMockReceiver(remote_track, kSsrcOfTrack)); remote_stream->AddTrack(remote_track); stats->AddStream(remote_stream); @@ -1755,8 +1751,7 @@ TEST_P(StatsCollectorTrackTest, TwoLocalTracksWithSameSsrc) { // Create a new audio track and adds it to the stream and stats. static const std::string kNewTrackId = "new_track_id"; - rtc::scoped_refptr new_audio_track( - new rtc::RefCountedObject(kNewTrackId)); + auto new_audio_track = rtc::make_ref_counted(kNewTrackId); pc->AddSender(CreateMockSender(new_audio_track, kSsrcOfTrack)); stream_->AddTrack(new_audio_track); @@ -1785,8 +1780,8 @@ TEST_P(StatsCollectorTrackTest, TwoLocalSendersWithSameTrack) { auto pc = CreatePeerConnection(); auto stats = CreateStatsCollector(pc); - rtc::scoped_refptr local_track( - new rtc::RefCountedObject(kLocalTrackId)); + auto local_track = + rtc::make_ref_counted(kLocalTrackId); pc->AddSender(CreateMockSender(local_track, kFirstSsrc)); stats->AddLocalAudioTrack(local_track.get(), kFirstSsrc); pc->AddSender(CreateMockSender(local_track, kSecondSsrc)); diff --git a/pc/stream_collection.h b/pc/stream_collection.h index 28cd46fc5d..9bbf957efd 100644 --- a/pc/stream_collection.h +++ b/pc/stream_collection.h @@ -22,16 +22,12 @@ namespace webrtc { class StreamCollection : public StreamCollectionInterface { public: static rtc::scoped_refptr Create() { - rtc::RefCountedObject* implementation = - new rtc::RefCountedObject(); - return implementation; + return rtc::make_ref_counted(); } static rtc::scoped_refptr Create( StreamCollection* streams) { - rtc::RefCountedObject* implementation = - new rtc::RefCountedObject(streams); - return implementation; + return rtc::make_ref_counted(streams); } virtual size_t count() { return media_streams_.size(); } diff --git a/pc/test/fake_audio_capture_module.cc b/pc/test/fake_audio_capture_module.cc index 1a7efd4ad1..214ed6b523 100644 --- a/pc/test/fake_audio_capture_module.cc +++ b/pc/test/fake_audio_capture_module.cc @@ -58,8 +58,7 @@ FakeAudioCaptureModule::~FakeAudioCaptureModule() { } rtc::scoped_refptr FakeAudioCaptureModule::Create() { - rtc::scoped_refptr capture_module( - new rtc::RefCountedObject()); + auto capture_module = rtc::make_ref_counted(); if (!capture_module->Initialize()) { return nullptr; } @@ -67,7 +66,7 @@ rtc::scoped_refptr FakeAudioCaptureModule::Create() { } int FakeAudioCaptureModule::frames_received() const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return frames_received_; } @@ -79,7 +78,7 @@ int32_t FakeAudioCaptureModule::ActiveAudioLayer( int32_t FakeAudioCaptureModule::RegisterAudioCallback( webrtc::AudioTransport* audio_callback) { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); audio_callback_ = audio_callback; return 0; } @@ -183,7 +182,7 @@ int32_t FakeAudioCaptureModule::StartPlayout() { return -1; } { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); playing_ = true; } bool start = true; @@ -194,7 +193,7 @@ int32_t FakeAudioCaptureModule::StartPlayout() { int32_t FakeAudioCaptureModule::StopPlayout() { bool start = false; { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); playing_ = false; start = ShouldStartProcessing(); } @@ -203,7 +202,7 @@ int32_t FakeAudioCaptureModule::StopPlayout() { } bool FakeAudioCaptureModule::Playing() const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return playing_; } @@ -212,7 +211,7 @@ int32_t FakeAudioCaptureModule::StartRecording() { return -1; } { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); recording_ = true; } bool start = true; @@ -223,7 +222,7 @@ int32_t FakeAudioCaptureModule::StartRecording() { int32_t FakeAudioCaptureModule::StopRecording() { bool start = false; { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); recording_ = false; start = ShouldStartProcessing(); } @@ -232,7 +231,7 @@ int32_t FakeAudioCaptureModule::StopRecording() { } bool FakeAudioCaptureModule::Recording() const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return recording_; } @@ -290,13 +289,13 @@ int32_t FakeAudioCaptureModule::MicrophoneVolumeIsAvailable( } int32_t FakeAudioCaptureModule::SetMicrophoneVolume(uint32_t volume) { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); current_mic_level_ = volume; return 0; } int32_t FakeAudioCaptureModule::MicrophoneVolume(uint32_t* volume) const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); *volume = current_mic_level_; return 0; } @@ -452,7 +451,7 @@ void FakeAudioCaptureModule::UpdateProcessing(bool start) { process_thread_.reset(nullptr); process_thread_checker_.Detach(); } - rtc::CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); started_ = false; } } @@ -460,7 +459,7 @@ void FakeAudioCaptureModule::UpdateProcessing(bool start) { void FakeAudioCaptureModule::StartProcessP() { RTC_DCHECK_RUN_ON(&process_thread_checker_); { - rtc::CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); if (started_) { // Already started. return; @@ -472,7 +471,7 @@ void FakeAudioCaptureModule::StartProcessP() { void FakeAudioCaptureModule::ProcessFrameP() { RTC_DCHECK_RUN_ON(&process_thread_checker_); { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (!started_) { next_frame_time_ = rtc::TimeMillis(); started_ = true; diff --git a/pc/test/fake_audio_capture_module.h b/pc/test/fake_audio_capture_module.h index 498b6daf61..d2db3d666d 100644 --- a/pc/test/fake_audio_capture_module.h +++ b/pc/test/fake_audio_capture_module.h @@ -20,20 +20,27 @@ #ifndef PC_TEST_FAKE_AUDIO_CAPTURE_MODULE_H_ #define PC_TEST_FAKE_AUDIO_CAPTURE_MODULE_H_ +#include +#include + #include #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "modules/audio_device/include/audio_device.h" -#include "rtc_base/critical_section.h" +#include "modules/audio_device/include/audio_device_defines.h" #include "rtc_base/message_handler.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "rtc_base/thread_message.h" namespace rtc { class Thread; } // namespace rtc class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, - public rtc::MessageHandler { + public rtc::MessageHandlerAutoCleanup { public: typedef uint16_t Sample; @@ -48,13 +55,13 @@ class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, // Returns the number of frames that have been successfully pulled by the // instance. Note that correctly detecting success can only be done if the // pulled frame was generated/pushed from a FakeAudioCaptureModule. - int frames_received() const RTC_LOCKS_EXCLUDED(crit_); + int frames_received() const RTC_LOCKS_EXCLUDED(mutex_); int32_t ActiveAudioLayer(AudioLayer* audio_layer) const override; // Note: Calling this method from a callback may result in deadlock. int32_t RegisterAudioCallback(webrtc::AudioTransport* audio_callback) override - RTC_LOCKS_EXCLUDED(crit_); + RTC_LOCKS_EXCLUDED(mutex_); int32_t Init() override; int32_t Terminate() override; @@ -81,12 +88,12 @@ class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, int32_t InitRecording() override; bool RecordingIsInitialized() const override; - int32_t StartPlayout() RTC_LOCKS_EXCLUDED(crit_) override; - int32_t StopPlayout() RTC_LOCKS_EXCLUDED(crit_) override; - bool Playing() const RTC_LOCKS_EXCLUDED(crit_) override; - int32_t StartRecording() RTC_LOCKS_EXCLUDED(crit_) override; - int32_t StopRecording() RTC_LOCKS_EXCLUDED(crit_) override; - bool Recording() const RTC_LOCKS_EXCLUDED(crit_) override; + int32_t StartPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t StopPlayout() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Playing() const RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t StartRecording() RTC_LOCKS_EXCLUDED(mutex_) override; + int32_t StopRecording() RTC_LOCKS_EXCLUDED(mutex_) override; + bool Recording() const RTC_LOCKS_EXCLUDED(mutex_) override; int32_t InitSpeaker() override; bool SpeakerIsInitialized() const override; @@ -101,9 +108,9 @@ class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, int32_t MicrophoneVolumeIsAvailable(bool* available) override; int32_t SetMicrophoneVolume(uint32_t volume) - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; int32_t MicrophoneVolume(uint32_t* volume) const - RTC_LOCKS_EXCLUDED(crit_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; int32_t MaxMicrophoneVolume(uint32_t* max_volume) const override; int32_t MinMicrophoneVolume(uint32_t* min_volume) const override; @@ -173,28 +180,28 @@ class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, // Returns true/false depending on if recording or playback has been // enabled/started. - bool ShouldStartProcessing() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + bool ShouldStartProcessing() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Starts or stops the pushing and pulling of audio frames. - void UpdateProcessing(bool start) RTC_LOCKS_EXCLUDED(crit_); + void UpdateProcessing(bool start) RTC_LOCKS_EXCLUDED(mutex_); // Starts the periodic calling of ProcessFrame() in a thread safe way. void StartProcessP(); // Periodcally called function that ensures that frames are pulled and pushed // periodically if enabled/started. - void ProcessFrameP() RTC_LOCKS_EXCLUDED(crit_); + void ProcessFrameP() RTC_LOCKS_EXCLUDED(mutex_); // Pulls frames from the registered webrtc::AudioTransport. - void ReceiveFrameP() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void ReceiveFrameP() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Pushes frames to the registered webrtc::AudioTransport. - void SendFrameP() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void SendFrameP() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Callback for playout and recording. - webrtc::AudioTransport* audio_callback_ RTC_GUARDED_BY(crit_); + webrtc::AudioTransport* audio_callback_ RTC_GUARDED_BY(mutex_); bool recording_ RTC_GUARDED_BY( - crit_); // True when audio is being pushed from the instance. + mutex_); // True when audio is being pushed from the instance. bool playing_ RTC_GUARDED_BY( - crit_); // True when audio is being pulled by the instance. + mutex_); // True when audio is being pulled by the instance. bool play_is_initialized_; // True when the instance is ready to pull audio. bool rec_is_initialized_; // True when the instance is ready to push audio. @@ -202,12 +209,12 @@ class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, // Input to and output from RecordedDataIsAvailable(..) makes it possible to // modify the current mic level. The implementation does not care about the // mic level so it just feeds back what it receives. - uint32_t current_mic_level_ RTC_GUARDED_BY(crit_); + uint32_t current_mic_level_ RTC_GUARDED_BY(mutex_); // next_frame_time_ is updated in a non-drifting manner to indicate the next // wall clock time the next frame should be generated and received. started_ // ensures that next_frame_time_ can be initialized properly on first call. - bool started_ RTC_GUARDED_BY(crit_); + bool started_ RTC_GUARDED_BY(mutex_); int64_t next_frame_time_ RTC_GUARDED_BY(process_thread_checker_); std::unique_ptr process_thread_; @@ -224,7 +231,7 @@ class FakeAudioCaptureModule : public webrtc::AudioDeviceModule, // Protects variables that are accessed from process_thread_ and // the main thread. - rtc::CriticalSection crit_; + mutable webrtc::Mutex mutex_; webrtc::SequenceChecker process_thread_checker_; }; diff --git a/pc/test/fake_audio_capture_module_unittest.cc b/pc/test/fake_audio_capture_module_unittest.cc index 8dd252a733..63b41cdded 100644 --- a/pc/test/fake_audio_capture_module_unittest.cc +++ b/pc/test/fake_audio_capture_module_unittest.cc @@ -15,8 +15,8 @@ #include #include "api/scoped_refptr.h" -#include "rtc_base/critical_section.h" #include "rtc_base/gunit.h" +#include "rtc_base/synchronization/mutex.h" #include "test/gtest.h" class FakeAdmTest : public ::testing::Test, public webrtc::AudioTransport { @@ -45,7 +45,7 @@ class FakeAdmTest : public ::testing::Test, public webrtc::AudioTransport { const uint32_t currentMicLevel, const bool keyPressed, uint32_t& newMicLevel) override { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); rec_buffer_bytes_ = nSamples * nBytesPerSample; if ((rec_buffer_bytes_ == 0) || (rec_buffer_bytes_ > @@ -77,7 +77,7 @@ class FakeAdmTest : public ::testing::Test, public webrtc::AudioTransport { size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); ++pull_iterations_; const size_t audio_buffer_size = nSamples * nBytesPerSample; const size_t bytes_out = @@ -91,11 +91,11 @@ class FakeAdmTest : public ::testing::Test, public webrtc::AudioTransport { } int push_iterations() const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return push_iterations_; } int pull_iterations() const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return pull_iterations_; } @@ -115,7 +115,7 @@ class FakeAdmTest : public ::testing::Test, public webrtc::AudioTransport { return min_buffer_size; } - rtc::CriticalSection crit_; + mutable webrtc::Mutex mutex_; int push_iterations_; int pull_iterations_; diff --git a/pc/test/fake_data_channel_provider.h b/pc/test/fake_data_channel_provider.h index 2ada4a992d..f9e9e91d48 100644 --- a/pc/test/fake_data_channel_provider.h +++ b/pc/test/fake_data_channel_provider.h @@ -13,10 +13,11 @@ #include -#include "pc/data_channel.h" +#include "pc/sctp_data_channel.h" #include "rtc_base/checks.h" -class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { +class FakeDataChannelProvider + : public webrtc::SctpDataChannelProviderInterface { public: FakeDataChannelProvider() : send_blocked_(false), @@ -25,7 +26,8 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { transport_error_(false) {} virtual ~FakeDataChannelProvider() {} - bool SendData(const cricket::SendDataParams& params, + bool SendData(int sid, + const webrtc::SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result) override { RTC_CHECK(ready_to_send_); @@ -35,16 +37,17 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { return false; } - if (transport_error_ || payload.size() == 0) { + if (transport_error_) { *result = cricket::SDR_ERROR; return false; } + last_sid_ = sid; last_send_data_params_ = params; return true; } - bool ConnectDataChannel(webrtc::DataChannel* data_channel) override { + bool ConnectDataChannel(webrtc::SctpDataChannel* data_channel) override { RTC_CHECK(connected_channels_.find(data_channel) == connected_channels_.end()); if (!transport_available_) { @@ -55,7 +58,7 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { return true; } - void DisconnectDataChannel(webrtc::DataChannel* data_channel) override { + void DisconnectDataChannel(webrtc::SctpDataChannel* data_channel) override { RTC_CHECK(connected_channels_.find(data_channel) != connected_channels_.end()); RTC_LOG(LS_INFO) << "DataChannel disconnected " << data_channel; @@ -77,7 +80,7 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { recv_ssrcs_.erase(sid); // Unlike the real SCTP transport, act like the closing procedure finished // instantly, doing the same snapshot thing as below. - for (webrtc::DataChannel* ch : std::set( + for (webrtc::SctpDataChannel* ch : std::set( connected_channels_.begin(), connected_channels_.end())) { if (connected_channels_.count(ch)) { ch->OnClosingProcedureComplete(sid); @@ -93,12 +96,12 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { if (!blocked) { // Take a snapshot of the connected channels and check to see whether // each value is still in connected_channels_ before calling - // OnChannelReady(). This avoids problems where the set gets modified - // in response to OnChannelReady(). - for (webrtc::DataChannel* ch : std::set( + // OnTransportReady(). This avoids problems where the set gets modified + // in response to OnTransportReady(). + for (webrtc::SctpDataChannel* ch : std::set( connected_channels_.begin(), connected_channels_.end())) { if (connected_channels_.count(ch)) { - ch->OnChannelReady(true); + ch->OnTransportReady(true); } } } @@ -116,21 +119,22 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { RTC_CHECK(transport_available_); ready_to_send_ = ready; if (ready) { - std::set::iterator it; + std::set::iterator it; for (it = connected_channels_.begin(); it != connected_channels_.end(); ++it) { - (*it)->OnChannelReady(true); + (*it)->OnTransportReady(true); } } } void set_transport_error() { transport_error_ = true; } - cricket::SendDataParams last_send_data_params() const { + int last_sid() const { return last_sid_; } + const webrtc::SendDataParams& last_send_data_params() const { return last_send_data_params_; } - bool IsConnected(webrtc::DataChannel* data_channel) const { + bool IsConnected(webrtc::SctpDataChannel* data_channel) const { return connected_channels_.find(data_channel) != connected_channels_.end(); } @@ -143,12 +147,13 @@ class FakeDataChannelProvider : public webrtc::DataChannelProviderInterface { } private: - cricket::SendDataParams last_send_data_params_; + int last_sid_; + webrtc::SendDataParams last_send_data_params_; bool send_blocked_; bool transport_available_; bool ready_to_send_; bool transport_error_; - std::set connected_channels_; + std::set connected_channels_; std::set send_ssrcs_; std::set recv_ssrcs_; }; diff --git a/pc/test/fake_peer_connection_base.h b/pc/test/fake_peer_connection_base.h index f4b27f03e1..7970dd0f0f 100644 --- a/pc/test/fake_peer_connection_base.h +++ b/pc/test/fake_peer_connection_base.h @@ -120,10 +120,11 @@ class FakePeerConnectionBase : public PeerConnectionInternal { return nullptr; } - rtc::scoped_refptr CreateDataChannel( + RTCErrorOr> CreateDataChannelOrError( const std::string& label, const DataChannelInit* config) override { - return nullptr; + return RTCError(RTCErrorType::UNSUPPORTED_OPERATION, + "Fake function called"); } const SessionDescriptionInterface* local_description() const override { @@ -248,23 +249,16 @@ class FakePeerConnectionBase : public PeerConnectionInternal { return {}; } - sigslot::signal1& SignalDataChannelCreated() override { - return SignalDataChannelCreated_; - } - - cricket::RtpDataChannel* rtp_data_channel() const override { return nullptr; } - - std::vector> sctp_data_channels() - const override { - return {}; + sigslot::signal1& SignalSctpDataChannelCreated() override { + return SignalSctpDataChannelCreated_; } absl::optional sctp_transport_name() const override { return absl::nullopt; } - std::map GetTransportNamesByMid() const override { - return {}; + absl::optional sctp_mid() const override { + return absl::nullopt; } std::map GetTransportStatsByNames( @@ -299,7 +293,7 @@ class FakePeerConnectionBase : public PeerConnectionInternal { } protected: - sigslot::signal1 SignalDataChannelCreated_; + sigslot::signal1 SignalSctpDataChannelCreated_; }; } // namespace webrtc diff --git a/pc/test/fake_peer_connection_for_stats.h b/pc/test/fake_peer_connection_for_stats.h index c6391583f5..4cdbd82162 100644 --- a/pc/test/fake_peer_connection_for_stats.h +++ b/pc/test/fake_peer_connection_for_stats.h @@ -28,15 +28,18 @@ namespace webrtc { // Fake VoiceMediaChannel where the result of GetStats can be configured. class FakeVoiceMediaChannelForStats : public cricket::FakeVoiceMediaChannel { public: - FakeVoiceMediaChannelForStats() - : cricket::FakeVoiceMediaChannel(nullptr, cricket::AudioOptions()) {} + explicit FakeVoiceMediaChannelForStats(TaskQueueBase* network_thread) + : cricket::FakeVoiceMediaChannel(nullptr, + cricket::AudioOptions(), + network_thread) {} void SetStats(const cricket::VoiceMediaInfo& voice_info) { stats_ = voice_info; } // VoiceMediaChannel overrides. - bool GetStats(cricket::VoiceMediaInfo* info) override { + bool GetStats(cricket::VoiceMediaInfo* info, + bool get_and_clear_legacy_stats) override { if (stats_) { *info = *stats_; return true; @@ -51,8 +54,10 @@ class FakeVoiceMediaChannelForStats : public cricket::FakeVoiceMediaChannel { // Fake VideoMediaChannel where the result of GetStats can be configured. class FakeVideoMediaChannelForStats : public cricket::FakeVideoMediaChannel { public: - FakeVideoMediaChannelForStats() - : cricket::FakeVideoMediaChannel(nullptr, cricket::VideoOptions()) {} + explicit FakeVideoMediaChannelForStats(TaskQueueBase* network_thread) + : cricket::FakeVideoMediaChannel(nullptr, + cricket::VideoOptions(), + network_thread) {} void SetStats(const cricket::VideoMediaInfo& video_info) { stats_ = video_info; @@ -74,6 +79,64 @@ class FakeVideoMediaChannelForStats : public cricket::FakeVideoMediaChannel { constexpr bool kDefaultRtcpMuxRequired = true; constexpr bool kDefaultSrtpRequired = true; +class VoiceChannelForTesting : public cricket::VoiceChannel { + public: + VoiceChannelForTesting(rtc::Thread* worker_thread, + rtc::Thread* network_thread, + rtc::Thread* signaling_thread, + std::unique_ptr channel, + const std::string& content_name, + bool srtp_required, + webrtc::CryptoOptions crypto_options, + rtc::UniqueRandomIdGenerator* ssrc_generator, + std::string transport_name) + : VoiceChannel(worker_thread, + network_thread, + signaling_thread, + std::move(channel), + content_name, + srtp_required, + std::move(crypto_options), + ssrc_generator), + test_transport_name_(std::move(transport_name)) {} + + private: + const std::string& transport_name() const override { + return test_transport_name_; + } + + const std::string test_transport_name_; +}; + +class VideoChannelForTesting : public cricket::VideoChannel { + public: + VideoChannelForTesting(rtc::Thread* worker_thread, + rtc::Thread* network_thread, + rtc::Thread* signaling_thread, + std::unique_ptr channel, + const std::string& content_name, + bool srtp_required, + webrtc::CryptoOptions crypto_options, + rtc::UniqueRandomIdGenerator* ssrc_generator, + std::string transport_name) + : VideoChannel(worker_thread, + network_thread, + signaling_thread, + std::move(channel), + content_name, + srtp_required, + std::move(crypto_options), + ssrc_generator), + test_transport_name_(std::move(transport_name)) {} + + private: + const std::string& transport_name() const override { + return test_transport_name_; + } + + const std::string test_transport_name_; +}; + // This class is intended to be fed into the StatsCollector and // RTCStatsCollector so that the stats functionality can be unit tested. // Individual tests can configure this fake as needed to simulate scenarios @@ -119,7 +182,7 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { // TODO(steveanton): Switch tests to use RtpTransceivers directly. auto receiver_proxy = RtpReceiverProxyWithInternal::Create( - signaling_thread_, receiver); + signaling_thread_, worker_thread_, receiver); GetOrCreateFirstTransceiverOfType(receiver->media_type()) ->internal() ->AddReceiver(receiver_proxy); @@ -137,13 +200,12 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { const std::string& transport_name) { RTC_DCHECK(!voice_channel_); auto voice_media_channel = - std::make_unique(); + std::make_unique(network_thread_); auto* voice_media_channel_ptr = voice_media_channel.get(); - voice_channel_ = std::make_unique( + voice_channel_ = std::make_unique( worker_thread_, network_thread_, signaling_thread_, std::move(voice_media_channel), mid, kDefaultSrtpRequired, - webrtc::CryptoOptions(), &ssrc_generator_); - voice_channel_->set_transport_name_for_testing(transport_name); + webrtc::CryptoOptions(), &ssrc_generator_, transport_name); GetOrCreateFirstTransceiverOfType(cricket::MEDIA_TYPE_AUDIO) ->internal() ->SetChannel(voice_channel_.get()); @@ -155,13 +217,12 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { const std::string& transport_name) { RTC_DCHECK(!video_channel_); auto video_media_channel = - std::make_unique(); + std::make_unique(network_thread_); auto video_media_channel_ptr = video_media_channel.get(); - video_channel_ = std::make_unique( + video_channel_ = std::make_unique( worker_thread_, network_thread_, signaling_thread_, std::move(video_media_channel), mid, kDefaultSrtpRequired, - webrtc::CryptoOptions(), &ssrc_generator_); - video_channel_->set_transport_name_for_testing(transport_name); + webrtc::CryptoOptions(), &ssrc_generator_, transport_name); GetOrCreateFirstTransceiverOfType(cricket::MEDIA_TYPE_VIDEO) ->internal() ->SetChannel(video_channel_.get()); @@ -174,11 +235,13 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { void AddSctpDataChannel(const std::string& label, const InternalDataChannelInit& init) { - AddSctpDataChannel(DataChannel::Create(&data_channel_provider_, - cricket::DCT_SCTP, label, init)); + // TODO(bugs.webrtc.org/11547): Supply a separate network thread. + AddSctpDataChannel(SctpDataChannel::Create(&data_channel_provider_, label, + init, rtc::Thread::Current(), + rtc::Thread::Current())); } - void AddSctpDataChannel(rtc::scoped_refptr data_channel) { + void AddSctpDataChannel(rtc::scoped_refptr data_channel) { sctp_data_channels_.push_back(data_channel); } @@ -257,30 +320,21 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { return transceivers_; } - std::vector> sctp_data_channels() - const override { - return sctp_data_channels_; + std::vector GetDataChannelStats() const override { + RTC_DCHECK_RUN_ON(signaling_thread()); + std::vector stats; + for (const auto& channel : sctp_data_channels_) + stats.push_back(channel->GetStats()); + return stats; } cricket::CandidateStatsList GetPooledCandidateStats() const override { return {}; } - std::map GetTransportNamesByMid() const override { - std::map transport_names_by_mid; - if (voice_channel_) { - transport_names_by_mid[voice_channel_->content_name()] = - voice_channel_->transport_name(); - } - if (video_channel_) { - transport_names_by_mid[video_channel_->content_name()] = - video_channel_->transport_name(); - } - return transport_names_by_mid; - } - std::map GetTransportStatsByNames( const std::set& transport_names) override { + RTC_DCHECK_RUN_ON(network_thread_); std::map transport_stats_by_name; for (const std::string& transport_name : transport_names) { transport_stats_by_name[transport_name] = @@ -338,7 +392,8 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { } } auto transceiver = RtpTransceiverProxyWithInternal::Create( - signaling_thread_, new RtpTransceiver(media_type)); + signaling_thread_, + new RtpTransceiver(media_type, channel_manager_.get())); transceivers_.push_back(transceiver); return transceiver; } @@ -347,6 +402,12 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { rtc::Thread* const worker_thread_; rtc::Thread* const signaling_thread_; + std::unique_ptr channel_manager_ = + cricket::ChannelManager::Create(nullptr /* MediaEngineInterface */, + true, + worker_thread_, + network_thread_); + rtc::scoped_refptr local_streams_; rtc::scoped_refptr remote_streams_; @@ -359,7 +420,7 @@ class FakePeerConnectionForStats : public FakePeerConnectionBase { std::unique_ptr voice_channel_; std::unique_ptr video_channel_; - std::vector> sctp_data_channels_; + std::vector> sctp_data_channels_; std::map transport_stats_by_name_; diff --git a/pc/test/fake_periodic_video_source.h b/pc/test/fake_periodic_video_source.h index 1684ca4adb..871c29cbae 100644 --- a/pc/test/fake_periodic_video_source.h +++ b/pc/test/fake_periodic_video_source.h @@ -16,6 +16,7 @@ #include "api/video/video_source_interface.h" #include "media/base/fake_frame_source.h" #include "media/base/video_broadcaster.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_utils/repeating_task.h" @@ -59,6 +60,11 @@ class FakePeriodicVideoSource final }); } + rtc::VideoSinkWants wants() const { + MutexLock lock(&mutex_); + return wants_; + } + void RemoveSink(rtc::VideoSinkInterface* sink) override { RTC_DCHECK(thread_checker_.IsCurrent()); broadcaster_.RemoveSink(sink); @@ -67,6 +73,10 @@ class FakePeriodicVideoSource final void AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) override { RTC_DCHECK(thread_checker_.IsCurrent()); + { + MutexLock lock(&mutex_); + wants_ = wants; + } broadcaster_.AddOrUpdateSink(sink, wants); } @@ -76,10 +86,12 @@ class FakePeriodicVideoSource final } private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; rtc::VideoBroadcaster broadcaster_; cricket::FakeFrameSource frame_source_; + mutable Mutex mutex_; + rtc::VideoSinkWants wants_ RTC_GUARDED_BY(&mutex_); std::unique_ptr task_queue_; }; diff --git a/pc/test/fake_periodic_video_track_source.h b/pc/test/fake_periodic_video_track_source.h index cc406d6d3f..98a456f232 100644 --- a/pc/test/fake_periodic_video_track_source.h +++ b/pc/test/fake_periodic_video_track_source.h @@ -29,6 +29,10 @@ class FakePeriodicVideoTrackSource : public VideoTrackSource { ~FakePeriodicVideoTrackSource() = default; + const FakePeriodicVideoSource& fake_periodic_source() const { + return source_; + } + protected: rtc::VideoSourceInterface* source() override { return &source_; } diff --git a/pc/test/fake_rtc_certificate_generator.h b/pc/test/fake_rtc_certificate_generator.h index 9c43ba9726..b591c4c4ab 100644 --- a/pc/test/fake_rtc_certificate_generator.h +++ b/pc/test/fake_rtc_certificate_generator.h @@ -83,7 +83,7 @@ static const rtc::RTCCertificatePEM kRsaPems[] = { // ECDSA with EC_NIST_P256. // These PEM strings were created by generating an identity with -// |SSLIdentity::Generate| and invoking |identity->PrivateKeyToPEMString()|, +// |SSLIdentity::Create| and invoking |identity->PrivateKeyToPEMString()|, // |identity->PublicKeyToPEMString()| and // |identity->certificate().ToPEMString()|. static const rtc::RTCCertificatePEM kEcdsaPems[] = { @@ -118,7 +118,7 @@ static const rtc::RTCCertificatePEM kEcdsaPems[] = { class FakeRTCCertificateGenerator : public rtc::RTCCertificateGeneratorInterface, - public rtc::MessageHandler { + public rtc::MessageHandlerAutoCleanup { public: typedef rtc::TypedMessageData< rtc::scoped_refptr > diff --git a/pc/test/fake_video_track_source.h b/pc/test/fake_video_track_source.h index d6562313c5..2042c39175 100644 --- a/pc/test/fake_video_track_source.h +++ b/pc/test/fake_video_track_source.h @@ -22,7 +22,7 @@ namespace webrtc { class FakeVideoTrackSource : public VideoTrackSource { public: static rtc::scoped_refptr Create(bool is_screencast) { - return new rtc::RefCountedObject(is_screencast); + return rtc::make_ref_counted(is_screencast); } static rtc::scoped_refptr Create() { diff --git a/pc/test/integration_test_helpers.cc b/pc/test/integration_test_helpers.cc new file mode 100644 index 0000000000..10e4f455ba --- /dev/null +++ b/pc/test/integration_test_helpers.cc @@ -0,0 +1,59 @@ +/* + * Copyright 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/test/integration_test_helpers.h" + +namespace webrtc { + +PeerConnectionInterface::RTCOfferAnswerOptions IceRestartOfferAnswerOptions() { + PeerConnectionInterface::RTCOfferAnswerOptions options; + options.ice_restart = true; + return options; +} + +void RemoveSsrcsAndMsids(cricket::SessionDescription* desc) { + for (ContentInfo& content : desc->contents()) { + content.media_description()->mutable_streams().clear(); + } + desc->set_msid_supported(false); + desc->set_msid_signaling(0); +} + +void RemoveSsrcsAndKeepMsids(cricket::SessionDescription* desc) { + for (ContentInfo& content : desc->contents()) { + std::string track_id; + std::vector stream_ids; + if (!content.media_description()->streams().empty()) { + const StreamParams& first_stream = + content.media_description()->streams()[0]; + track_id = first_stream.id; + stream_ids = first_stream.stream_ids(); + } + content.media_description()->mutable_streams().clear(); + StreamParams new_stream; + new_stream.id = track_id; + new_stream.set_stream_ids(stream_ids); + content.media_description()->AddStream(new_stream); + } +} + +int FindFirstMediaStatsIndexByKind( + const std::string& kind, + const std::vector& + media_stats_vec) { + for (size_t i = 0; i < media_stats_vec.size(); i++) { + if (media_stats_vec[i]->kind.ValueToString() == kind) { + return i; + } + } + return -1; +} + +} // namespace webrtc diff --git a/pc/test/integration_test_helpers.h b/pc/test/integration_test_helpers.h new file mode 100644 index 0000000000..117f1b428b --- /dev/null +++ b/pc/test/integration_test_helpers.h @@ -0,0 +1,1852 @@ +/* + * Copyright 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_TEST_INTEGRATION_TEST_HELPERS_H_ +#define PC_TEST_INTEGRATION_TEST_HELPERS_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "absl/types/optional.h" +#include "api/audio_options.h" +#include "api/call/call_factory_interface.h" +#include "api/candidate.h" +#include "api/crypto/crypto_options.h" +#include "api/data_channel_interface.h" +#include "api/ice_transport_interface.h" +#include "api/jsep.h" +#include "api/media_stream_interface.h" +#include "api/media_types.h" +#include "api/peer_connection_interface.h" +#include "api/rtc_error.h" +#include "api/rtc_event_log/rtc_event_log_factory.h" +#include "api/rtc_event_log/rtc_event_log_factory_interface.h" +#include "api/rtc_event_log_output.h" +#include "api/rtp_receiver_interface.h" +#include "api/rtp_sender_interface.h" +#include "api/rtp_transceiver_interface.h" +#include "api/scoped_refptr.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtc_stats_report.h" +#include "api/stats/rtcstats_objects.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "api/task_queue/task_queue_factory.h" +#include "api/transport/field_trial_based_config.h" +#include "api/transport/webrtc_key_value_config.h" +#include "api/uma_metrics.h" +#include "api/video/video_rotation.h" +#include "api/video_codecs/sdp_video_format.h" +#include "api/video_codecs/video_decoder_factory.h" +#include "api/video_codecs/video_encoder_factory.h" +#include "call/call.h" +#include "logging/rtc_event_log/fake_rtc_event_log_factory.h" +#include "media/base/media_engine.h" +#include "media/base/stream_params.h" +#include "media/engine/fake_webrtc_video_engine.h" +#include "media/engine/webrtc_media_engine.h" +#include "media/engine/webrtc_media_engine_defaults.h" +#include "modules/audio_device/include/audio_device.h" +#include "modules/audio_processing/include/audio_processing.h" +#include "modules/audio_processing/test/audio_processing_builder_for_testing.h" +#include "p2p/base/fake_ice_transport.h" +#include "p2p/base/ice_transport_internal.h" +#include "p2p/base/mock_async_resolver.h" +#include "p2p/base/p2p_constants.h" +#include "p2p/base/port.h" +#include "p2p/base/port_allocator.h" +#include "p2p/base/port_interface.h" +#include "p2p/base/test_stun_server.h" +#include "p2p/base/test_turn_customizer.h" +#include "p2p/base/test_turn_server.h" +#include "p2p/client/basic_port_allocator.h" +#include "pc/dtmf_sender.h" +#include "pc/local_audio_source.h" +#include "pc/media_session.h" +#include "pc/peer_connection.h" +#include "pc/peer_connection_factory.h" +#include "pc/peer_connection_proxy.h" +#include "pc/rtp_media_utils.h" +#include "pc/session_description.h" +#include "pc/test/fake_audio_capture_module.h" +#include "pc/test/fake_periodic_video_source.h" +#include "pc/test/fake_periodic_video_track_source.h" +#include "pc/test/fake_rtc_certificate_generator.h" +#include "pc/test/fake_video_track_renderer.h" +#include "pc/test/mock_peer_connection_observers.h" +#include "pc/video_track_source.h" +#include "rtc_base/checks.h" +#include "rtc_base/fake_clock.h" +#include "rtc_base/fake_mdns_responder.h" +#include "rtc_base/fake_network.h" +#include "rtc_base/firewall_socket_server.h" +#include "rtc_base/gunit.h" +#include "rtc_base/helpers.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/location.h" +#include "rtc_base/logging.h" +#include "rtc_base/mdns_responder_interface.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/rtc_certificate_generator.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/ssl_stream_adapter.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/test_certificate_verifier.h" +#include "rtc_base/thread.h" +#include "rtc_base/time_utils.h" +#include "rtc_base/virtual_socket_server.h" +#include "system_wrappers/include/metrics.h" +#include "test/field_trial.h" +#include "test/gmock.h" + +namespace webrtc { + +using ::cricket::ContentInfo; +using ::cricket::StreamParams; +using ::rtc::SocketAddress; +using ::testing::_; +using ::testing::Combine; +using ::testing::Contains; +using ::testing::DoAll; +using ::testing::ElementsAre; +using ::testing::NiceMock; +using ::testing::Return; +using ::testing::SetArgPointee; +using ::testing::UnorderedElementsAreArray; +using ::testing::Values; +using RTCConfiguration = PeerConnectionInterface::RTCConfiguration; + +static const int kDefaultTimeout = 10000; +static const int kMaxWaitForStatsMs = 3000; +static const int kMaxWaitForActivationMs = 5000; +static const int kMaxWaitForFramesMs = 10000; +// Default number of audio/video frames to wait for before considering a test +// successful. +static const int kDefaultExpectedAudioFrameCount = 3; +static const int kDefaultExpectedVideoFrameCount = 3; + +static const char kDataChannelLabel[] = "data_channel"; + +// SRTP cipher name negotiated by the tests. This must be updated if the +// default changes. +static const int kDefaultSrtpCryptoSuite = rtc::SRTP_AES128_CM_SHA1_80; +static const int kDefaultSrtpCryptoSuiteGcm = rtc::SRTP_AEAD_AES_256_GCM; + +static const SocketAddress kDefaultLocalAddress("192.168.1.1", 0); + +// Helper function for constructing offer/answer options to initiate an ICE +// restart. +PeerConnectionInterface::RTCOfferAnswerOptions IceRestartOfferAnswerOptions(); + +// Remove all stream information (SSRCs, track IDs, etc.) and "msid-semantic" +// attribute from received SDP, simulating a legacy endpoint. +void RemoveSsrcsAndMsids(cricket::SessionDescription* desc); + +// Removes all stream information besides the stream ids, simulating an +// endpoint that only signals a=msid lines to convey stream_ids. +void RemoveSsrcsAndKeepMsids(cricket::SessionDescription* desc); + +int FindFirstMediaStatsIndexByKind( + const std::string& kind, + const std::vector& + media_stats_vec); + +class SignalingMessageReceiver { + public: + virtual void ReceiveSdpMessage(SdpType type, const std::string& msg) = 0; + virtual void ReceiveIceMessage(const std::string& sdp_mid, + int sdp_mline_index, + const std::string& msg) = 0; + + protected: + SignalingMessageReceiver() {} + virtual ~SignalingMessageReceiver() {} +}; + +class MockRtpReceiverObserver : public webrtc::RtpReceiverObserverInterface { + public: + explicit MockRtpReceiverObserver(cricket::MediaType media_type) + : expected_media_type_(media_type) {} + + void OnFirstPacketReceived(cricket::MediaType media_type) override { + ASSERT_EQ(expected_media_type_, media_type); + first_packet_received_ = true; + } + + bool first_packet_received() const { return first_packet_received_; } + + virtual ~MockRtpReceiverObserver() {} + + private: + bool first_packet_received_ = false; + cricket::MediaType expected_media_type_; +}; + +// Helper class that wraps a peer connection, observes it, and can accept +// signaling messages from another wrapper. +// +// Uses a fake network, fake A/V capture, and optionally fake +// encoders/decoders, though they aren't used by default since they don't +// advertise support of any codecs. +// TODO(steveanton): See how this could become a subclass of +// PeerConnectionWrapper defined in peerconnectionwrapper.h. +class PeerConnectionIntegrationWrapper : public webrtc::PeerConnectionObserver, + public SignalingMessageReceiver { + public: + // Different factory methods for convenience. + // TODO(deadbeef): Could use the pattern of: + // + // PeerConnectionIntegrationWrapper = + // WrapperBuilder.WithConfig(...).WithOptions(...).build(); + // + // To reduce some code duplication. + static PeerConnectionIntegrationWrapper* CreateWithDtlsIdentityStore( + const std::string& debug_name, + std::unique_ptr cert_generator, + rtc::Thread* network_thread, + rtc::Thread* worker_thread) { + PeerConnectionIntegrationWrapper* client( + new PeerConnectionIntegrationWrapper(debug_name)); + webrtc::PeerConnectionDependencies dependencies(nullptr); + dependencies.cert_generator = std::move(cert_generator); + if (!client->Init(nullptr, nullptr, std::move(dependencies), network_thread, + worker_thread, nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false)) { + delete client; + return nullptr; + } + return client; + } + + webrtc::PeerConnectionFactoryInterface* pc_factory() const { + return peer_connection_factory_.get(); + } + + webrtc::PeerConnectionInterface* pc() const { return peer_connection_.get(); } + + // If a signaling message receiver is set (via ConnectFakeSignaling), this + // will set the whole offer/answer exchange in motion. Just need to wait for + // the signaling state to reach "stable". + void CreateAndSetAndSignalOffer() { + auto offer = CreateOfferAndWait(); + ASSERT_NE(nullptr, offer); + EXPECT_TRUE(SetLocalDescriptionAndSendSdpMessage(std::move(offer))); + } + + // Sets the options to be used when CreateAndSetAndSignalOffer is called, or + // when a remote offer is received (via fake signaling) and an answer is + // generated. By default, uses default options. + void SetOfferAnswerOptions( + const PeerConnectionInterface::RTCOfferAnswerOptions& options) { + offer_answer_options_ = options; + } + + // Set a callback to be invoked when SDP is received via the fake signaling + // channel, which provides an opportunity to munge (modify) the SDP. This is + // used to test SDP being applied that a PeerConnection would normally not + // generate, but a non-JSEP endpoint might. + void SetReceivedSdpMunger( + std::function munger) { + received_sdp_munger_ = std::move(munger); + } + + // Similar to the above, but this is run on SDP immediately after it's + // generated. + void SetGeneratedSdpMunger( + std::function munger) { + generated_sdp_munger_ = std::move(munger); + } + + // Set a callback to be invoked when a remote offer is received via the fake + // signaling channel. This provides an opportunity to change the + // PeerConnection state before an answer is created and sent to the caller. + void SetRemoteOfferHandler(std::function handler) { + remote_offer_handler_ = std::move(handler); + } + + void SetRemoteAsyncResolver(rtc::MockAsyncResolver* resolver) { + remote_async_resolver_ = resolver; + } + + // Every ICE connection state in order that has been seen by the observer. + std::vector + ice_connection_state_history() const { + return ice_connection_state_history_; + } + void clear_ice_connection_state_history() { + ice_connection_state_history_.clear(); + } + + // Every standardized ICE connection state in order that has been seen by the + // observer. + std::vector + standardized_ice_connection_state_history() const { + return standardized_ice_connection_state_history_; + } + + // Every PeerConnection state in order that has been seen by the observer. + std::vector + peer_connection_state_history() const { + return peer_connection_state_history_; + } + + // Every ICE gathering state in order that has been seen by the observer. + std::vector + ice_gathering_state_history() const { + return ice_gathering_state_history_; + } + std::vector + ice_candidate_pair_change_history() const { + return ice_candidate_pair_change_history_; + } + + // Every PeerConnection signaling state in order that has been seen by the + // observer. + std::vector + peer_connection_signaling_state_history() const { + return peer_connection_signaling_state_history_; + } + + void AddAudioVideoTracks() { + AddAudioTrack(); + AddVideoTrack(); + } + + rtc::scoped_refptr AddAudioTrack() { + return AddTrack(CreateLocalAudioTrack()); + } + + rtc::scoped_refptr AddVideoTrack() { + return AddTrack(CreateLocalVideoTrack()); + } + + rtc::scoped_refptr CreateLocalAudioTrack() { + cricket::AudioOptions options; + // Disable highpass filter so that we can get all the test audio frames. + options.highpass_filter = false; + rtc::scoped_refptr source = + peer_connection_factory_->CreateAudioSource(options); + // TODO(perkj): Test audio source when it is implemented. Currently audio + // always use the default input. + return peer_connection_factory_->CreateAudioTrack(rtc::CreateRandomUuid(), + source); + } + + rtc::scoped_refptr CreateLocalVideoTrack() { + webrtc::FakePeriodicVideoSource::Config config; + config.timestamp_offset_ms = rtc::TimeMillis(); + return CreateLocalVideoTrackInternal(config); + } + + rtc::scoped_refptr + CreateLocalVideoTrackWithConfig( + webrtc::FakePeriodicVideoSource::Config config) { + return CreateLocalVideoTrackInternal(config); + } + + rtc::scoped_refptr + CreateLocalVideoTrackWithRotation(webrtc::VideoRotation rotation) { + webrtc::FakePeriodicVideoSource::Config config; + config.rotation = rotation; + config.timestamp_offset_ms = rtc::TimeMillis(); + return CreateLocalVideoTrackInternal(config); + } + + rtc::scoped_refptr AddTrack( + rtc::scoped_refptr track, + const std::vector& stream_ids = {}) { + auto result = pc()->AddTrack(track, stream_ids); + EXPECT_EQ(RTCErrorType::NONE, result.error().type()); + return result.MoveValue(); + } + + std::vector> GetReceiversOfType( + cricket::MediaType media_type) { + std::vector> receivers; + for (const auto& receiver : pc()->GetReceivers()) { + if (receiver->media_type() == media_type) { + receivers.push_back(receiver); + } + } + return receivers; + } + + rtc::scoped_refptr GetFirstTransceiverOfType( + cricket::MediaType media_type) { + for (auto transceiver : pc()->GetTransceivers()) { + if (transceiver->receiver()->media_type() == media_type) { + return transceiver; + } + } + return nullptr; + } + + bool SignalingStateStable() { + return pc()->signaling_state() == webrtc::PeerConnectionInterface::kStable; + } + + void CreateDataChannel() { CreateDataChannel(nullptr); } + + void CreateDataChannel(const webrtc::DataChannelInit* init) { + CreateDataChannel(kDataChannelLabel, init); + } + + void CreateDataChannel(const std::string& label, + const webrtc::DataChannelInit* init) { + data_channel_ = pc()->CreateDataChannel(label, init); + ASSERT_TRUE(data_channel_.get() != nullptr); + data_observer_.reset(new MockDataChannelObserver(data_channel_)); + } + + DataChannelInterface* data_channel() { return data_channel_; } + const MockDataChannelObserver* data_observer() const { + return data_observer_.get(); + } + + int audio_frames_received() const { + return fake_audio_capture_module_->frames_received(); + } + + // Takes minimum of video frames received for each track. + // + // Can be used like: + // EXPECT_GE(expected_frames, min_video_frames_received_per_track()); + // + // To ensure that all video tracks received at least a certain number of + // frames. + int min_video_frames_received_per_track() const { + int min_frames = INT_MAX; + if (fake_video_renderers_.empty()) { + return 0; + } + + for (const auto& pair : fake_video_renderers_) { + min_frames = std::min(min_frames, pair.second->num_rendered_frames()); + } + return min_frames; + } + + // Returns a MockStatsObserver in a state after stats gathering finished, + // which can be used to access the gathered stats. + rtc::scoped_refptr OldGetStatsForTrack( + webrtc::MediaStreamTrackInterface* track) { + auto observer = rtc::make_ref_counted(); + EXPECT_TRUE(peer_connection_->GetStats( + observer, nullptr, PeerConnectionInterface::kStatsOutputLevelStandard)); + EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); + return observer; + } + + // Version that doesn't take a track "filter", and gathers all stats. + rtc::scoped_refptr OldGetStats() { + return OldGetStatsForTrack(nullptr); + } + + // Synchronously gets stats and returns them. If it times out, fails the test + // and returns null. + rtc::scoped_refptr NewGetStats() { + auto callback = + rtc::make_ref_counted(); + peer_connection_->GetStats(callback); + EXPECT_TRUE_WAIT(callback->called(), kDefaultTimeout); + return callback->report(); + } + + int rendered_width() { + EXPECT_FALSE(fake_video_renderers_.empty()); + return fake_video_renderers_.empty() + ? 0 + : fake_video_renderers_.begin()->second->width(); + } + + int rendered_height() { + EXPECT_FALSE(fake_video_renderers_.empty()); + return fake_video_renderers_.empty() + ? 0 + : fake_video_renderers_.begin()->second->height(); + } + + double rendered_aspect_ratio() { + if (rendered_height() == 0) { + return 0.0; + } + return static_cast(rendered_width()) / rendered_height(); + } + + webrtc::VideoRotation rendered_rotation() { + EXPECT_FALSE(fake_video_renderers_.empty()); + return fake_video_renderers_.empty() + ? webrtc::kVideoRotation_0 + : fake_video_renderers_.begin()->second->rotation(); + } + + int local_rendered_width() { + return local_video_renderer_ ? local_video_renderer_->width() : 0; + } + + int local_rendered_height() { + return local_video_renderer_ ? local_video_renderer_->height() : 0; + } + + double local_rendered_aspect_ratio() { + if (local_rendered_height() == 0) { + return 0.0; + } + return static_cast(local_rendered_width()) / + local_rendered_height(); + } + + size_t number_of_remote_streams() { + if (!pc()) { + return 0; + } + return pc()->remote_streams()->count(); + } + + StreamCollectionInterface* remote_streams() const { + if (!pc()) { + ADD_FAILURE(); + return nullptr; + } + return pc()->remote_streams(); + } + + StreamCollectionInterface* local_streams() { + if (!pc()) { + ADD_FAILURE(); + return nullptr; + } + return pc()->local_streams(); + } + + webrtc::PeerConnectionInterface::SignalingState signaling_state() { + return pc()->signaling_state(); + } + + webrtc::PeerConnectionInterface::IceConnectionState ice_connection_state() { + return pc()->ice_connection_state(); + } + + webrtc::PeerConnectionInterface::IceConnectionState + standardized_ice_connection_state() { + return pc()->standardized_ice_connection_state(); + } + + webrtc::PeerConnectionInterface::IceGatheringState ice_gathering_state() { + return pc()->ice_gathering_state(); + } + + // Returns a MockRtpReceiverObserver for each RtpReceiver returned by + // GetReceivers. They're updated automatically when a remote offer/answer + // from the fake signaling channel is applied, or when + // ResetRtpReceiverObservers below is called. + const std::vector>& + rtp_receiver_observers() { + return rtp_receiver_observers_; + } + + void ResetRtpReceiverObservers() { + rtp_receiver_observers_.clear(); + for (const rtc::scoped_refptr& receiver : + pc()->GetReceivers()) { + std::unique_ptr observer( + new MockRtpReceiverObserver(receiver->media_type())); + receiver->SetObserver(observer.get()); + rtp_receiver_observers_.push_back(std::move(observer)); + } + } + + rtc::FakeNetworkManager* network_manager() const { + return fake_network_manager_.get(); + } + cricket::PortAllocator* port_allocator() const { return port_allocator_; } + + webrtc::FakeRtcEventLogFactory* event_log_factory() const { + return event_log_factory_; + } + + const cricket::Candidate& last_candidate_gathered() const { + return last_candidate_gathered_; + } + const cricket::IceCandidateErrorEvent& error_event() const { + return error_event_; + } + + // Sets the mDNS responder for the owned fake network manager and keeps a + // reference to the responder. + void SetMdnsResponder( + std::unique_ptr mdns_responder) { + RTC_DCHECK(mdns_responder != nullptr); + mdns_responder_ = mdns_responder.get(); + network_manager()->set_mdns_responder(std::move(mdns_responder)); + } + + // Returns null on failure. + std::unique_ptr CreateOfferAndWait() { + auto observer = + rtc::make_ref_counted(); + pc()->CreateOffer(observer, offer_answer_options_); + return WaitForDescriptionFromObserver(observer); + } + bool Rollback() { + return SetRemoteDescription( + webrtc::CreateSessionDescription(SdpType::kRollback, "")); + } + + // Functions for querying stats. + void StartWatchingDelayStats() { + // Get the baseline numbers for audio_packets and audio_delay. + auto received_stats = NewGetStats(); + auto track_stats = + received_stats->GetStatsOfType()[0]; + ASSERT_TRUE(track_stats->relative_packet_arrival_delay.is_defined()); + auto rtp_stats = + received_stats->GetStatsOfType()[0]; + ASSERT_TRUE(rtp_stats->packets_received.is_defined()); + ASSERT_TRUE(rtp_stats->track_id.is_defined()); + audio_track_stats_id_ = track_stats->id(); + ASSERT_TRUE(received_stats->Get(audio_track_stats_id_)); + rtp_stats_id_ = rtp_stats->id(); + ASSERT_EQ(audio_track_stats_id_, *rtp_stats->track_id); + audio_packets_stat_ = *rtp_stats->packets_received; + audio_delay_stat_ = *track_stats->relative_packet_arrival_delay; + audio_samples_stat_ = *track_stats->total_samples_received; + audio_concealed_stat_ = *track_stats->concealed_samples; + } + + void UpdateDelayStats(std::string tag, int desc_size) { + auto report = NewGetStats(); + auto track_stats = + report->GetAs(audio_track_stats_id_); + ASSERT_TRUE(track_stats); + auto rtp_stats = + report->GetAs(rtp_stats_id_); + ASSERT_TRUE(rtp_stats); + auto delta_packets = *rtp_stats->packets_received - audio_packets_stat_; + auto delta_rpad = + *track_stats->relative_packet_arrival_delay - audio_delay_stat_; + auto recent_delay = delta_packets > 0 ? delta_rpad / delta_packets : -1; + // The purpose of these checks is to sound the alarm early if we introduce + // serious regressions. The numbers are not acceptable for production, but + // occur on slow bots. + // + // An average relative packet arrival delay over the renegotiation of + // > 100 ms indicates that something is dramatically wrong, and will impact + // quality for sure. + // Worst bots: + // linux_x86_dbg at 0.206 +#if !defined(NDEBUG) + EXPECT_GT(0.25, recent_delay) << tag << " size " << desc_size; +#else + EXPECT_GT(0.1, recent_delay) << tag << " size " << desc_size; +#endif + auto delta_samples = + *track_stats->total_samples_received - audio_samples_stat_; + auto delta_concealed = + *track_stats->concealed_samples - audio_concealed_stat_; + // These limits should be adjusted down as we improve: + // + // Concealing more than 4000 samples during a renegotiation is unacceptable. + // But some bots are slow. + + // Worst bots: + // linux_more_configs bot at conceal count 5184 + // android_arm_rel at conceal count 9241 + // linux_x86_dbg at 15174 +#if !defined(NDEBUG) + EXPECT_GT(18000U, delta_concealed) << "Concealed " << delta_concealed + << " of " << delta_samples << " samples"; +#else + EXPECT_GT(15000U, delta_concealed) << "Concealed " << delta_concealed + << " of " << delta_samples << " samples"; +#endif + // Concealing more than 20% of samples during a renegotiation is + // unacceptable. + // Worst bots: + // linux_more_configs bot at conceal rate 0.516 + // linux_x86_dbg bot at conceal rate 0.854 + if (delta_samples > 0) { +#if !defined(NDEBUG) + EXPECT_GT(0.95, 1.0 * delta_concealed / delta_samples) + << "Concealed " << delta_concealed << " of " << delta_samples + << " samples"; +#else + EXPECT_GT(0.6, 1.0 * delta_concealed / delta_samples) + << "Concealed " << delta_concealed << " of " << delta_samples + << " samples"; +#endif + } + // Increment trailing counters + audio_packets_stat_ = *rtp_stats->packets_received; + audio_delay_stat_ = *track_stats->relative_packet_arrival_delay; + audio_samples_stat_ = *track_stats->total_samples_received; + audio_concealed_stat_ = *track_stats->concealed_samples; + } + + private: + explicit PeerConnectionIntegrationWrapper(const std::string& debug_name) + : debug_name_(debug_name) {} + + bool Init(const PeerConnectionFactory::Options* options, + const PeerConnectionInterface::RTCConfiguration* config, + webrtc::PeerConnectionDependencies dependencies, + rtc::Thread* network_thread, + rtc::Thread* worker_thread, + std::unique_ptr event_log_factory, + bool reset_encoder_factory, + bool reset_decoder_factory) { + // There's an error in this test code if Init ends up being called twice. + RTC_DCHECK(!peer_connection_); + RTC_DCHECK(!peer_connection_factory_); + + fake_network_manager_.reset(new rtc::FakeNetworkManager()); + fake_network_manager_->AddInterface(kDefaultLocalAddress); + + std::unique_ptr port_allocator( + new cricket::BasicPortAllocator(fake_network_manager_.get())); + port_allocator_ = port_allocator.get(); + fake_audio_capture_module_ = FakeAudioCaptureModule::Create(); + if (!fake_audio_capture_module_) { + return false; + } + rtc::Thread* const signaling_thread = rtc::Thread::Current(); + + webrtc::PeerConnectionFactoryDependencies pc_factory_dependencies; + pc_factory_dependencies.network_thread = network_thread; + pc_factory_dependencies.worker_thread = worker_thread; + pc_factory_dependencies.signaling_thread = signaling_thread; + pc_factory_dependencies.task_queue_factory = + webrtc::CreateDefaultTaskQueueFactory(); + pc_factory_dependencies.trials = std::make_unique(); + cricket::MediaEngineDependencies media_deps; + media_deps.task_queue_factory = + pc_factory_dependencies.task_queue_factory.get(); + media_deps.adm = fake_audio_capture_module_; + webrtc::SetMediaEngineDefaults(&media_deps); + + if (reset_encoder_factory) { + media_deps.video_encoder_factory.reset(); + } + if (reset_decoder_factory) { + media_deps.video_decoder_factory.reset(); + } + + if (!media_deps.audio_processing) { + // If the standard Creation method for APM returns a null pointer, instead + // use the builder for testing to create an APM object. + media_deps.audio_processing = AudioProcessingBuilderForTesting().Create(); + } + + media_deps.trials = pc_factory_dependencies.trials.get(); + + pc_factory_dependencies.media_engine = + cricket::CreateMediaEngine(std::move(media_deps)); + pc_factory_dependencies.call_factory = webrtc::CreateCallFactory(); + if (event_log_factory) { + event_log_factory_ = event_log_factory.get(); + pc_factory_dependencies.event_log_factory = std::move(event_log_factory); + } else { + pc_factory_dependencies.event_log_factory = + std::make_unique( + pc_factory_dependencies.task_queue_factory.get()); + } + peer_connection_factory_ = webrtc::CreateModularPeerConnectionFactory( + std::move(pc_factory_dependencies)); + + if (!peer_connection_factory_) { + return false; + } + if (options) { + peer_connection_factory_->SetOptions(*options); + } + if (config) { + sdp_semantics_ = config->sdp_semantics; + } + + dependencies.allocator = std::move(port_allocator); + peer_connection_ = CreatePeerConnection(config, std::move(dependencies)); + return peer_connection_.get() != nullptr; + } + + rtc::scoped_refptr CreatePeerConnection( + const PeerConnectionInterface::RTCConfiguration* config, + webrtc::PeerConnectionDependencies dependencies) { + PeerConnectionInterface::RTCConfiguration modified_config; + // If |config| is null, this will result in a default configuration being + // used. + if (config) { + modified_config = *config; + } + // Disable resolution adaptation; we don't want it interfering with the + // test results. + // TODO(deadbeef): Do something more robust. Since we're testing for aspect + // ratios and not specific resolutions, is this even necessary? + modified_config.set_cpu_adaptation(false); + + dependencies.observer = this; + return peer_connection_factory_->CreatePeerConnection( + modified_config, std::move(dependencies)); + } + + void set_signaling_message_receiver( + SignalingMessageReceiver* signaling_message_receiver) { + signaling_message_receiver_ = signaling_message_receiver; + } + + void set_signaling_delay_ms(int delay_ms) { signaling_delay_ms_ = delay_ms; } + + void set_signal_ice_candidates(bool signal) { + signal_ice_candidates_ = signal; + } + + rtc::scoped_refptr CreateLocalVideoTrackInternal( + webrtc::FakePeriodicVideoSource::Config config) { + // Set max frame rate to 10fps to reduce the risk of test flakiness. + // TODO(deadbeef): Do something more robust. + config.frame_interval_ms = 100; + + video_track_sources_.emplace_back( + rtc::make_ref_counted( + config, false /* remote */)); + rtc::scoped_refptr track( + peer_connection_factory_->CreateVideoTrack( + rtc::CreateRandomUuid(), video_track_sources_.back())); + if (!local_video_renderer_) { + local_video_renderer_.reset(new webrtc::FakeVideoTrackRenderer(track)); + } + return track; + } + + void HandleIncomingOffer(const std::string& msg) { + RTC_LOG(LS_INFO) << debug_name_ << ": HandleIncomingOffer"; + std::unique_ptr desc = + webrtc::CreateSessionDescription(SdpType::kOffer, msg); + if (received_sdp_munger_) { + received_sdp_munger_(desc->description()); + } + + EXPECT_TRUE(SetRemoteDescription(std::move(desc))); + // Setting a remote description may have changed the number of receivers, + // so reset the receiver observers. + ResetRtpReceiverObservers(); + if (remote_offer_handler_) { + remote_offer_handler_(); + } + auto answer = CreateAnswer(); + ASSERT_NE(nullptr, answer); + EXPECT_TRUE(SetLocalDescriptionAndSendSdpMessage(std::move(answer))); + } + + void HandleIncomingAnswer(const std::string& msg) { + RTC_LOG(LS_INFO) << debug_name_ << ": HandleIncomingAnswer"; + std::unique_ptr desc = + webrtc::CreateSessionDescription(SdpType::kAnswer, msg); + if (received_sdp_munger_) { + received_sdp_munger_(desc->description()); + } + + EXPECT_TRUE(SetRemoteDescription(std::move(desc))); + // Set the RtpReceiverObserver after receivers are created. + ResetRtpReceiverObservers(); + } + + // Returns null on failure. + std::unique_ptr CreateAnswer() { + auto observer = + rtc::make_ref_counted(); + pc()->CreateAnswer(observer, offer_answer_options_); + return WaitForDescriptionFromObserver(observer); + } + + std::unique_ptr WaitForDescriptionFromObserver( + MockCreateSessionDescriptionObserver* observer) { + EXPECT_EQ_WAIT(true, observer->called(), kDefaultTimeout); + if (!observer->result()) { + return nullptr; + } + auto description = observer->MoveDescription(); + if (generated_sdp_munger_) { + generated_sdp_munger_(description->description()); + } + return description; + } + + // Setting the local description and sending the SDP message over the fake + // signaling channel are combined into the same method because the SDP + // message needs to be sent as soon as SetLocalDescription finishes, without + // waiting for the observer to be called. This ensures that ICE candidates + // don't outrace the description. + bool SetLocalDescriptionAndSendSdpMessage( + std::unique_ptr desc) { + auto observer = rtc::make_ref_counted(); + RTC_LOG(LS_INFO) << debug_name_ << ": SetLocalDescriptionAndSendSdpMessage"; + SdpType type = desc->GetType(); + std::string sdp; + EXPECT_TRUE(desc->ToString(&sdp)); + RTC_LOG(LS_INFO) << debug_name_ << ": local SDP contents=\n" << sdp; + pc()->SetLocalDescription(observer, desc.release()); + RemoveUnusedVideoRenderers(); + // As mentioned above, we need to send the message immediately after + // SetLocalDescription. + SendSdpMessage(type, sdp); + EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); + return true; + } + + bool SetRemoteDescription(std::unique_ptr desc) { + auto observer = rtc::make_ref_counted(); + RTC_LOG(LS_INFO) << debug_name_ << ": SetRemoteDescription"; + pc()->SetRemoteDescription(observer, desc.release()); + RemoveUnusedVideoRenderers(); + EXPECT_TRUE_WAIT(observer->called(), kDefaultTimeout); + return observer->result(); + } + + // This is a work around to remove unused fake_video_renderers from + // transceivers that have either stopped or are no longer receiving. + void RemoveUnusedVideoRenderers() { + if (sdp_semantics_ != SdpSemantics::kUnifiedPlan) { + return; + } + auto transceivers = pc()->GetTransceivers(); + std::set active_renderers; + for (auto& transceiver : transceivers) { + // Note - we don't check for direction here. This function is called + // before direction is set, and in that case, we should not remove + // the renderer. + if (transceiver->receiver()->media_type() == cricket::MEDIA_TYPE_VIDEO) { + active_renderers.insert(transceiver->receiver()->track()->id()); + } + } + for (auto it = fake_video_renderers_.begin(); + it != fake_video_renderers_.end();) { + // Remove fake video renderers belonging to any non-active transceivers. + if (!active_renderers.count(it->first)) { + it = fake_video_renderers_.erase(it); + } else { + it++; + } + } + } + + // Simulate sending a blob of SDP with delay |signaling_delay_ms_| (0 by + // default). + void SendSdpMessage(SdpType type, const std::string& msg) { + if (signaling_delay_ms_ == 0) { + RelaySdpMessageIfReceiverExists(type, msg); + } else { + rtc::Thread::Current()->PostDelayedTask( + ToQueuedTask(task_safety_.flag(), + [this, type, msg] { + RelaySdpMessageIfReceiverExists(type, msg); + }), + signaling_delay_ms_); + } + } + + void RelaySdpMessageIfReceiverExists(SdpType type, const std::string& msg) { + if (signaling_message_receiver_) { + signaling_message_receiver_->ReceiveSdpMessage(type, msg); + } + } + + // Simulate trickling an ICE candidate with delay |signaling_delay_ms_| (0 by + // default). + void SendIceMessage(const std::string& sdp_mid, + int sdp_mline_index, + const std::string& msg) { + if (signaling_delay_ms_ == 0) { + RelayIceMessageIfReceiverExists(sdp_mid, sdp_mline_index, msg); + } else { + rtc::Thread::Current()->PostDelayedTask( + ToQueuedTask(task_safety_.flag(), + [this, sdp_mid, sdp_mline_index, msg] { + RelayIceMessageIfReceiverExists(sdp_mid, + sdp_mline_index, msg); + }), + signaling_delay_ms_); + } + } + + void RelayIceMessageIfReceiverExists(const std::string& sdp_mid, + int sdp_mline_index, + const std::string& msg) { + if (signaling_message_receiver_) { + signaling_message_receiver_->ReceiveIceMessage(sdp_mid, sdp_mline_index, + msg); + } + } + + // SignalingMessageReceiver callbacks. + void ReceiveSdpMessage(SdpType type, const std::string& msg) override { + if (type == SdpType::kOffer) { + HandleIncomingOffer(msg); + } else { + HandleIncomingAnswer(msg); + } + } + + void ReceiveIceMessage(const std::string& sdp_mid, + int sdp_mline_index, + const std::string& msg) override { + RTC_LOG(LS_INFO) << debug_name_ << ": ReceiveIceMessage"; + std::unique_ptr candidate( + webrtc::CreateIceCandidate(sdp_mid, sdp_mline_index, msg, nullptr)); + EXPECT_TRUE(pc()->AddIceCandidate(candidate.get())); + } + + // PeerConnectionObserver callbacks. + void OnSignalingChange( + webrtc::PeerConnectionInterface::SignalingState new_state) override { + EXPECT_EQ(pc()->signaling_state(), new_state); + peer_connection_signaling_state_history_.push_back(new_state); + } + void OnAddTrack(rtc::scoped_refptr receiver, + const std::vector>& + streams) override { + if (receiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { + rtc::scoped_refptr video_track( + static_cast(receiver->track().get())); + ASSERT_TRUE(fake_video_renderers_.find(video_track->id()) == + fake_video_renderers_.end()); + fake_video_renderers_[video_track->id()] = + std::make_unique(video_track); + } + } + void OnRemoveTrack( + rtc::scoped_refptr receiver) override { + if (receiver->media_type() == cricket::MEDIA_TYPE_VIDEO) { + auto it = fake_video_renderers_.find(receiver->track()->id()); + if (it != fake_video_renderers_.end()) { + fake_video_renderers_.erase(it); + } else { + RTC_LOG(LS_ERROR) << "OnRemoveTrack called for non-active renderer"; + } + } + } + void OnRenegotiationNeeded() override {} + void OnIceConnectionChange( + webrtc::PeerConnectionInterface::IceConnectionState new_state) override { + EXPECT_EQ(pc()->ice_connection_state(), new_state); + ice_connection_state_history_.push_back(new_state); + } + void OnStandardizedIceConnectionChange( + webrtc::PeerConnectionInterface::IceConnectionState new_state) override { + standardized_ice_connection_state_history_.push_back(new_state); + } + void OnConnectionChange( + webrtc::PeerConnectionInterface::PeerConnectionState new_state) override { + peer_connection_state_history_.push_back(new_state); + } + + void OnIceGatheringChange( + webrtc::PeerConnectionInterface::IceGatheringState new_state) override { + EXPECT_EQ(pc()->ice_gathering_state(), new_state); + ice_gathering_state_history_.push_back(new_state); + } + + void OnIceSelectedCandidatePairChanged( + const cricket::CandidatePairChangeEvent& event) { + ice_candidate_pair_change_history_.push_back(event); + } + + void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) override { + RTC_LOG(LS_INFO) << debug_name_ << ": OnIceCandidate"; + + if (remote_async_resolver_) { + const auto& local_candidate = candidate->candidate(); + if (local_candidate.address().IsUnresolvedIP()) { + RTC_DCHECK(local_candidate.type() == cricket::LOCAL_PORT_TYPE); + rtc::SocketAddress resolved_addr(local_candidate.address()); + const auto resolved_ip = mdns_responder_->GetMappedAddressForName( + local_candidate.address().hostname()); + RTC_DCHECK(!resolved_ip.IsNil()); + resolved_addr.SetResolvedIP(resolved_ip); + EXPECT_CALL(*remote_async_resolver_, GetResolvedAddress(_, _)) + .WillOnce(DoAll(SetArgPointee<1>(resolved_addr), Return(true))); + EXPECT_CALL(*remote_async_resolver_, Destroy(_)); + } + } + + std::string ice_sdp; + EXPECT_TRUE(candidate->ToString(&ice_sdp)); + if (signaling_message_receiver_ == nullptr || !signal_ice_candidates_) { + // Remote party may be deleted. + return; + } + SendIceMessage(candidate->sdp_mid(), candidate->sdp_mline_index(), ice_sdp); + last_candidate_gathered_ = candidate->candidate(); + } + void OnIceCandidateError(const std::string& address, + int port, + const std::string& url, + int error_code, + const std::string& error_text) override { + error_event_ = cricket::IceCandidateErrorEvent(address, port, url, + error_code, error_text); + } + void OnDataChannel( + rtc::scoped_refptr data_channel) override { + RTC_LOG(LS_INFO) << debug_name_ << ": OnDataChannel"; + data_channel_ = data_channel; + data_observer_.reset(new MockDataChannelObserver(data_channel)); + } + + std::string debug_name_; + + std::unique_ptr fake_network_manager_; + // Reference to the mDNS responder owned by |fake_network_manager_| after set. + webrtc::FakeMdnsResponder* mdns_responder_ = nullptr; + + rtc::scoped_refptr peer_connection_; + rtc::scoped_refptr + peer_connection_factory_; + + cricket::PortAllocator* port_allocator_; + // Needed to keep track of number of frames sent. + rtc::scoped_refptr fake_audio_capture_module_; + // Needed to keep track of number of frames received. + std::map> + fake_video_renderers_; + // Needed to ensure frames aren't received for removed tracks. + std::vector> + removed_fake_video_renderers_; + + // For remote peer communication. + SignalingMessageReceiver* signaling_message_receiver_ = nullptr; + int signaling_delay_ms_ = 0; + bool signal_ice_candidates_ = true; + cricket::Candidate last_candidate_gathered_; + cricket::IceCandidateErrorEvent error_event_; + + // Store references to the video sources we've created, so that we can stop + // them, if required. + std::vector> + video_track_sources_; + // |local_video_renderer_| attached to the first created local video track. + std::unique_ptr local_video_renderer_; + + SdpSemantics sdp_semantics_; + PeerConnectionInterface::RTCOfferAnswerOptions offer_answer_options_; + std::function received_sdp_munger_; + std::function generated_sdp_munger_; + std::function remote_offer_handler_; + rtc::MockAsyncResolver* remote_async_resolver_ = nullptr; + rtc::scoped_refptr data_channel_; + std::unique_ptr data_observer_; + + std::vector> rtp_receiver_observers_; + + std::vector + ice_connection_state_history_; + std::vector + standardized_ice_connection_state_history_; + std::vector + peer_connection_state_history_; + std::vector + ice_gathering_state_history_; + std::vector + ice_candidate_pair_change_history_; + std::vector + peer_connection_signaling_state_history_; + webrtc::FakeRtcEventLogFactory* event_log_factory_; + + // Variables for tracking delay stats on an audio track + int audio_packets_stat_ = 0; + double audio_delay_stat_ = 0.0; + uint64_t audio_samples_stat_ = 0; + uint64_t audio_concealed_stat_ = 0; + std::string rtp_stats_id_; + std::string audio_track_stats_id_; + + ScopedTaskSafety task_safety_; + + friend class PeerConnectionIntegrationBaseTest; +}; + +class MockRtcEventLogOutput : public webrtc::RtcEventLogOutput { + public: + virtual ~MockRtcEventLogOutput() = default; + MOCK_METHOD(bool, IsActive, (), (const, override)); + MOCK_METHOD(bool, Write, (const std::string&), (override)); +}; + +// This helper object is used for both specifying how many audio/video frames +// are expected to be received for a caller/callee. It provides helper functions +// to specify these expectations. The object initially starts in a state of no +// expectations. +class MediaExpectations { + public: + enum ExpectFrames { + kExpectSomeFrames, + kExpectNoFrames, + kNoExpectation, + }; + + void ExpectBidirectionalAudioAndVideo() { + ExpectBidirectionalAudio(); + ExpectBidirectionalVideo(); + } + + void ExpectBidirectionalAudio() { + CallerExpectsSomeAudio(); + CalleeExpectsSomeAudio(); + } + + void ExpectNoAudio() { + CallerExpectsNoAudio(); + CalleeExpectsNoAudio(); + } + + void ExpectBidirectionalVideo() { + CallerExpectsSomeVideo(); + CalleeExpectsSomeVideo(); + } + + void ExpectNoVideo() { + CallerExpectsNoVideo(); + CalleeExpectsNoVideo(); + } + + void CallerExpectsSomeAudioAndVideo() { + CallerExpectsSomeAudio(); + CallerExpectsSomeVideo(); + } + + void CalleeExpectsSomeAudioAndVideo() { + CalleeExpectsSomeAudio(); + CalleeExpectsSomeVideo(); + } + + // Caller's audio functions. + void CallerExpectsSomeAudio( + int expected_audio_frames = kDefaultExpectedAudioFrameCount) { + caller_audio_expectation_ = kExpectSomeFrames; + caller_audio_frames_expected_ = expected_audio_frames; + } + + void CallerExpectsNoAudio() { + caller_audio_expectation_ = kExpectNoFrames; + caller_audio_frames_expected_ = 0; + } + + // Caller's video functions. + void CallerExpectsSomeVideo( + int expected_video_frames = kDefaultExpectedVideoFrameCount) { + caller_video_expectation_ = kExpectSomeFrames; + caller_video_frames_expected_ = expected_video_frames; + } + + void CallerExpectsNoVideo() { + caller_video_expectation_ = kExpectNoFrames; + caller_video_frames_expected_ = 0; + } + + // Callee's audio functions. + void CalleeExpectsSomeAudio( + int expected_audio_frames = kDefaultExpectedAudioFrameCount) { + callee_audio_expectation_ = kExpectSomeFrames; + callee_audio_frames_expected_ = expected_audio_frames; + } + + void CalleeExpectsNoAudio() { + callee_audio_expectation_ = kExpectNoFrames; + callee_audio_frames_expected_ = 0; + } + + // Callee's video functions. + void CalleeExpectsSomeVideo( + int expected_video_frames = kDefaultExpectedVideoFrameCount) { + callee_video_expectation_ = kExpectSomeFrames; + callee_video_frames_expected_ = expected_video_frames; + } + + void CalleeExpectsNoVideo() { + callee_video_expectation_ = kExpectNoFrames; + callee_video_frames_expected_ = 0; + } + + ExpectFrames caller_audio_expectation_ = kNoExpectation; + ExpectFrames caller_video_expectation_ = kNoExpectation; + ExpectFrames callee_audio_expectation_ = kNoExpectation; + ExpectFrames callee_video_expectation_ = kNoExpectation; + int caller_audio_frames_expected_ = 0; + int caller_video_frames_expected_ = 0; + int callee_audio_frames_expected_ = 0; + int callee_video_frames_expected_ = 0; +}; + +class MockIceTransport : public webrtc::IceTransportInterface { + public: + MockIceTransport(const std::string& name, int component) + : internal_(std::make_unique( + name, + component, + nullptr /* network_thread */)) {} + ~MockIceTransport() = default; + cricket::IceTransportInternal* internal() { return internal_.get(); } + + private: + std::unique_ptr internal_; +}; + +class MockIceTransportFactory : public IceTransportFactory { + public: + ~MockIceTransportFactory() override = default; + rtc::scoped_refptr CreateIceTransport( + const std::string& transport_name, + int component, + IceTransportInit init) { + RecordIceTransportCreated(); + return rtc::make_ref_counted(transport_name, component); + } + MOCK_METHOD(void, RecordIceTransportCreated, ()); +}; + +// Tests two PeerConnections connecting to each other end-to-end, using a +// virtual network, fake A/V capture and fake encoder/decoders. The +// PeerConnections share the threads/socket servers, but use separate versions +// of everything else (including "PeerConnectionFactory"s). +class PeerConnectionIntegrationBaseTest : public ::testing::Test { + public: + PeerConnectionIntegrationBaseTest( + SdpSemantics sdp_semantics, + absl::optional field_trials = absl::nullopt) + : sdp_semantics_(sdp_semantics), + ss_(new rtc::VirtualSocketServer()), + fss_(new rtc::FirewallSocketServer(ss_.get())), + network_thread_(new rtc::Thread(fss_.get())), + worker_thread_(rtc::Thread::Create()), + field_trials_(field_trials.has_value() + ? new test::ScopedFieldTrials(*field_trials) + : nullptr) { + network_thread_->SetName("PCNetworkThread", this); + worker_thread_->SetName("PCWorkerThread", this); + RTC_CHECK(network_thread_->Start()); + RTC_CHECK(worker_thread_->Start()); + webrtc::metrics::Reset(); + } + + ~PeerConnectionIntegrationBaseTest() { + // The PeerConnections should be deleted before the TurnCustomizers. + // A TurnPort is created with a raw pointer to a TurnCustomizer. The + // TurnPort has the same lifetime as the PeerConnection, so it's expected + // that the TurnCustomizer outlives the life of the PeerConnection or else + // when Send() is called it will hit a seg fault. + if (caller_) { + caller_->set_signaling_message_receiver(nullptr); + caller_->pc()->Close(); + delete SetCallerPcWrapperAndReturnCurrent(nullptr); + } + if (callee_) { + callee_->set_signaling_message_receiver(nullptr); + callee_->pc()->Close(); + delete SetCalleePcWrapperAndReturnCurrent(nullptr); + } + + // If turn servers were created for the test they need to be destroyed on + // the network thread. + network_thread()->Invoke(RTC_FROM_HERE, [this] { + turn_servers_.clear(); + turn_customizers_.clear(); + }); + } + + bool SignalingStateStable() { + return caller_->SignalingStateStable() && callee_->SignalingStateStable(); + } + + bool DtlsConnected() { + // TODO(deadbeef): kIceConnectionConnected currently means both ICE and DTLS + // are connected. This is an important distinction. Once we have separate + // ICE and DTLS state, this check needs to use the DTLS state. + return (callee()->ice_connection_state() == + webrtc::PeerConnectionInterface::kIceConnectionConnected || + callee()->ice_connection_state() == + webrtc::PeerConnectionInterface::kIceConnectionCompleted) && + (caller()->ice_connection_state() == + webrtc::PeerConnectionInterface::kIceConnectionConnected || + caller()->ice_connection_state() == + webrtc::PeerConnectionInterface::kIceConnectionCompleted); + } + + // When |event_log_factory| is null, the default implementation of the event + // log factory will be used. + std::unique_ptr CreatePeerConnectionWrapper( + const std::string& debug_name, + const PeerConnectionFactory::Options* options, + const RTCConfiguration* config, + webrtc::PeerConnectionDependencies dependencies, + std::unique_ptr event_log_factory, + bool reset_encoder_factory, + bool reset_decoder_factory) { + RTCConfiguration modified_config; + if (config) { + modified_config = *config; + } + modified_config.sdp_semantics = sdp_semantics_; + if (!dependencies.cert_generator) { + dependencies.cert_generator = + std::make_unique(); + } + std::unique_ptr client( + new PeerConnectionIntegrationWrapper(debug_name)); + + if (!client->Init(options, &modified_config, std::move(dependencies), + network_thread_.get(), worker_thread_.get(), + std::move(event_log_factory), reset_encoder_factory, + reset_decoder_factory)) { + return nullptr; + } + return client; + } + + std::unique_ptr + CreatePeerConnectionWrapperWithFakeRtcEventLog( + const std::string& debug_name, + const PeerConnectionFactory::Options* options, + const RTCConfiguration* config, + webrtc::PeerConnectionDependencies dependencies) { + return CreatePeerConnectionWrapper( + debug_name, options, config, std::move(dependencies), + std::make_unique(), + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + } + + bool CreatePeerConnectionWrappers() { + return CreatePeerConnectionWrappersWithConfig( + PeerConnectionInterface::RTCConfiguration(), + PeerConnectionInterface::RTCConfiguration()); + } + + bool CreatePeerConnectionWrappersWithSdpSemantics( + SdpSemantics caller_semantics, + SdpSemantics callee_semantics) { + // Can't specify the sdp_semantics in the passed-in configuration since it + // will be overwritten by CreatePeerConnectionWrapper with whatever is + // stored in sdp_semantics_. So get around this by modifying the instance + // variable before calling CreatePeerConnectionWrapper for the caller and + // callee PeerConnections. + SdpSemantics original_semantics = sdp_semantics_; + sdp_semantics_ = caller_semantics; + caller_ = CreatePeerConnectionWrapper( + "Caller", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), + nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + sdp_semantics_ = callee_semantics; + callee_ = CreatePeerConnectionWrapper( + "Callee", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), + nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + sdp_semantics_ = original_semantics; + return caller_ && callee_; + } + + bool CreatePeerConnectionWrappersWithConfig( + const PeerConnectionInterface::RTCConfiguration& caller_config, + const PeerConnectionInterface::RTCConfiguration& callee_config) { + caller_ = CreatePeerConnectionWrapper( + "Caller", nullptr, &caller_config, + webrtc::PeerConnectionDependencies(nullptr), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + callee_ = CreatePeerConnectionWrapper( + "Callee", nullptr, &callee_config, + webrtc::PeerConnectionDependencies(nullptr), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + return caller_ && callee_; + } + + bool CreatePeerConnectionWrappersWithConfigAndDeps( + const PeerConnectionInterface::RTCConfiguration& caller_config, + webrtc::PeerConnectionDependencies caller_dependencies, + const PeerConnectionInterface::RTCConfiguration& callee_config, + webrtc::PeerConnectionDependencies callee_dependencies) { + caller_ = + CreatePeerConnectionWrapper("Caller", nullptr, &caller_config, + std::move(caller_dependencies), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + callee_ = + CreatePeerConnectionWrapper("Callee", nullptr, &callee_config, + std::move(callee_dependencies), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + return caller_ && callee_; + } + + bool CreatePeerConnectionWrappersWithOptions( + const PeerConnectionFactory::Options& caller_options, + const PeerConnectionFactory::Options& callee_options) { + caller_ = CreatePeerConnectionWrapper( + "Caller", &caller_options, nullptr, + webrtc::PeerConnectionDependencies(nullptr), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + callee_ = CreatePeerConnectionWrapper( + "Callee", &callee_options, nullptr, + webrtc::PeerConnectionDependencies(nullptr), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + return caller_ && callee_; + } + + bool CreatePeerConnectionWrappersWithFakeRtcEventLog() { + PeerConnectionInterface::RTCConfiguration default_config; + caller_ = CreatePeerConnectionWrapperWithFakeRtcEventLog( + "Caller", nullptr, &default_config, + webrtc::PeerConnectionDependencies(nullptr)); + callee_ = CreatePeerConnectionWrapperWithFakeRtcEventLog( + "Callee", nullptr, &default_config, + webrtc::PeerConnectionDependencies(nullptr)); + return caller_ && callee_; + } + + std::unique_ptr + CreatePeerConnectionWrapperWithAlternateKey() { + std::unique_ptr cert_generator( + new FakeRTCCertificateGenerator()); + cert_generator->use_alternate_key(); + + webrtc::PeerConnectionDependencies dependencies(nullptr); + dependencies.cert_generator = std::move(cert_generator); + return CreatePeerConnectionWrapper("New Peer", nullptr, nullptr, + std::move(dependencies), nullptr, + /*reset_encoder_factory=*/false, + /*reset_decoder_factory=*/false); + } + + bool CreateOneDirectionalPeerConnectionWrappers(bool caller_to_callee) { + caller_ = CreatePeerConnectionWrapper( + "Caller", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), + nullptr, + /*reset_encoder_factory=*/!caller_to_callee, + /*reset_decoder_factory=*/caller_to_callee); + callee_ = CreatePeerConnectionWrapper( + "Callee", nullptr, nullptr, webrtc::PeerConnectionDependencies(nullptr), + nullptr, + /*reset_encoder_factory=*/caller_to_callee, + /*reset_decoder_factory=*/!caller_to_callee); + return caller_ && callee_; + } + + cricket::TestTurnServer* CreateTurnServer( + rtc::SocketAddress internal_address, + rtc::SocketAddress external_address, + cricket::ProtocolType type = cricket::ProtocolType::PROTO_UDP, + const std::string& common_name = "test turn server") { + rtc::Thread* thread = network_thread(); + std::unique_ptr turn_server = + network_thread()->Invoke>( + RTC_FROM_HERE, + [thread, internal_address, external_address, type, common_name] { + return std::make_unique( + thread, internal_address, external_address, type, + /*ignore_bad_certs=*/true, common_name); + }); + turn_servers_.push_back(std::move(turn_server)); + // Interactions with the turn server should be done on the network thread. + return turn_servers_.back().get(); + } + + cricket::TestTurnCustomizer* CreateTurnCustomizer() { + std::unique_ptr turn_customizer = + network_thread()->Invoke>( + RTC_FROM_HERE, + [] { return std::make_unique(); }); + turn_customizers_.push_back(std::move(turn_customizer)); + // Interactions with the turn customizer should be done on the network + // thread. + return turn_customizers_.back().get(); + } + + // Checks that the function counters for a TestTurnCustomizer are greater than + // 0. + void ExpectTurnCustomizerCountersIncremented( + cricket::TestTurnCustomizer* turn_customizer) { + unsigned int allow_channel_data_counter = + network_thread()->Invoke( + RTC_FROM_HERE, [turn_customizer] { + return turn_customizer->allow_channel_data_cnt_; + }); + EXPECT_GT(allow_channel_data_counter, 0u); + unsigned int modify_counter = network_thread()->Invoke( + RTC_FROM_HERE, + [turn_customizer] { return turn_customizer->modify_cnt_; }); + EXPECT_GT(modify_counter, 0u); + } + + // Once called, SDP blobs and ICE candidates will be automatically signaled + // between PeerConnections. + void ConnectFakeSignaling() { + caller_->set_signaling_message_receiver(callee_.get()); + callee_->set_signaling_message_receiver(caller_.get()); + } + + // Once called, SDP blobs will be automatically signaled between + // PeerConnections. Note that ICE candidates will not be signaled unless they + // are in the exchanged SDP blobs. + void ConnectFakeSignalingForSdpOnly() { + ConnectFakeSignaling(); + SetSignalIceCandidates(false); + } + + void SetSignalingDelayMs(int delay_ms) { + caller_->set_signaling_delay_ms(delay_ms); + callee_->set_signaling_delay_ms(delay_ms); + } + + void SetSignalIceCandidates(bool signal) { + caller_->set_signal_ice_candidates(signal); + callee_->set_signal_ice_candidates(signal); + } + + // Messages may get lost on the unreliable DataChannel, so we send multiple + // times to avoid test flakiness. + void SendRtpDataWithRetries(webrtc::DataChannelInterface* dc, + const std::string& data, + int retries) { + for (int i = 0; i < retries; ++i) { + dc->Send(DataBuffer(data)); + } + } + + rtc::Thread* network_thread() { return network_thread_.get(); } + + rtc::VirtualSocketServer* virtual_socket_server() { return ss_.get(); } + + PeerConnectionIntegrationWrapper* caller() { return caller_.get(); } + + // Set the |caller_| to the |wrapper| passed in and return the + // original |caller_|. + PeerConnectionIntegrationWrapper* SetCallerPcWrapperAndReturnCurrent( + PeerConnectionIntegrationWrapper* wrapper) { + PeerConnectionIntegrationWrapper* old = caller_.release(); + caller_.reset(wrapper); + return old; + } + + PeerConnectionIntegrationWrapper* callee() { return callee_.get(); } + + // Set the |callee_| to the |wrapper| passed in and return the + // original |callee_|. + PeerConnectionIntegrationWrapper* SetCalleePcWrapperAndReturnCurrent( + PeerConnectionIntegrationWrapper* wrapper) { + PeerConnectionIntegrationWrapper* old = callee_.release(); + callee_.reset(wrapper); + return old; + } + + void SetPortAllocatorFlags(uint32_t caller_flags, uint32_t callee_flags) { + network_thread()->Invoke(RTC_FROM_HERE, [this, caller_flags] { + caller()->port_allocator()->set_flags(caller_flags); + }); + network_thread()->Invoke(RTC_FROM_HERE, [this, callee_flags] { + callee()->port_allocator()->set_flags(callee_flags); + }); + } + + rtc::FirewallSocketServer* firewall() const { return fss_.get(); } + + // Expects the provided number of new frames to be received within + // kMaxWaitForFramesMs. The new expected frames are specified in + // |media_expectations|. Returns false if any of the expectations were + // not met. + bool ExpectNewFrames(const MediaExpectations& media_expectations) { + // Make sure there are no bogus tracks confusing the issue. + caller()->RemoveUnusedVideoRenderers(); + callee()->RemoveUnusedVideoRenderers(); + // First initialize the expected frame counts based upon the current + // frame count. + int total_caller_audio_frames_expected = caller()->audio_frames_received(); + if (media_expectations.caller_audio_expectation_ == + MediaExpectations::kExpectSomeFrames) { + total_caller_audio_frames_expected += + media_expectations.caller_audio_frames_expected_; + } + int total_caller_video_frames_expected = + caller()->min_video_frames_received_per_track(); + if (media_expectations.caller_video_expectation_ == + MediaExpectations::kExpectSomeFrames) { + total_caller_video_frames_expected += + media_expectations.caller_video_frames_expected_; + } + int total_callee_audio_frames_expected = callee()->audio_frames_received(); + if (media_expectations.callee_audio_expectation_ == + MediaExpectations::kExpectSomeFrames) { + total_callee_audio_frames_expected += + media_expectations.callee_audio_frames_expected_; + } + int total_callee_video_frames_expected = + callee()->min_video_frames_received_per_track(); + if (media_expectations.callee_video_expectation_ == + MediaExpectations::kExpectSomeFrames) { + total_callee_video_frames_expected += + media_expectations.callee_video_frames_expected_; + } + + // Wait for the expected frames. + EXPECT_TRUE_WAIT(caller()->audio_frames_received() >= + total_caller_audio_frames_expected && + caller()->min_video_frames_received_per_track() >= + total_caller_video_frames_expected && + callee()->audio_frames_received() >= + total_callee_audio_frames_expected && + callee()->min_video_frames_received_per_track() >= + total_callee_video_frames_expected, + kMaxWaitForFramesMs); + bool expectations_correct = + caller()->audio_frames_received() >= + total_caller_audio_frames_expected && + caller()->min_video_frames_received_per_track() >= + total_caller_video_frames_expected && + callee()->audio_frames_received() >= + total_callee_audio_frames_expected && + callee()->min_video_frames_received_per_track() >= + total_callee_video_frames_expected; + + // After the combined wait, print out a more detailed message upon + // failure. + EXPECT_GE(caller()->audio_frames_received(), + total_caller_audio_frames_expected); + EXPECT_GE(caller()->min_video_frames_received_per_track(), + total_caller_video_frames_expected); + EXPECT_GE(callee()->audio_frames_received(), + total_callee_audio_frames_expected); + EXPECT_GE(callee()->min_video_frames_received_per_track(), + total_callee_video_frames_expected); + + // We want to make sure nothing unexpected was received. + if (media_expectations.caller_audio_expectation_ == + MediaExpectations::kExpectNoFrames) { + EXPECT_EQ(caller()->audio_frames_received(), + total_caller_audio_frames_expected); + if (caller()->audio_frames_received() != + total_caller_audio_frames_expected) { + expectations_correct = false; + } + } + if (media_expectations.caller_video_expectation_ == + MediaExpectations::kExpectNoFrames) { + EXPECT_EQ(caller()->min_video_frames_received_per_track(), + total_caller_video_frames_expected); + if (caller()->min_video_frames_received_per_track() != + total_caller_video_frames_expected) { + expectations_correct = false; + } + } + if (media_expectations.callee_audio_expectation_ == + MediaExpectations::kExpectNoFrames) { + EXPECT_EQ(callee()->audio_frames_received(), + total_callee_audio_frames_expected); + if (callee()->audio_frames_received() != + total_callee_audio_frames_expected) { + expectations_correct = false; + } + } + if (media_expectations.callee_video_expectation_ == + MediaExpectations::kExpectNoFrames) { + EXPECT_EQ(callee()->min_video_frames_received_per_track(), + total_callee_video_frames_expected); + if (callee()->min_video_frames_received_per_track() != + total_callee_video_frames_expected) { + expectations_correct = false; + } + } + return expectations_correct; + } + + void ClosePeerConnections() { + if (caller()) + caller()->pc()->Close(); + if (callee()) + callee()->pc()->Close(); + } + + void TestNegotiatedCipherSuite( + const PeerConnectionFactory::Options& caller_options, + const PeerConnectionFactory::Options& callee_options, + int expected_cipher_suite) { + ASSERT_TRUE(CreatePeerConnectionWrappersWithOptions(caller_options, + callee_options)); + ConnectFakeSignaling(); + caller()->AddAudioVideoTracks(); + callee()->AddAudioVideoTracks(); + caller()->CreateAndSetAndSignalOffer(); + ASSERT_TRUE_WAIT(DtlsConnected(), kDefaultTimeout); + EXPECT_EQ_WAIT(rtc::SrtpCryptoSuiteToName(expected_cipher_suite), + caller()->OldGetStats()->SrtpCipher(), kDefaultTimeout); + // TODO(bugs.webrtc.org/9456): Fix it. + EXPECT_METRIC_EQ(1, webrtc::metrics::NumEvents( + "WebRTC.PeerConnection.SrtpCryptoSuite.Audio", + expected_cipher_suite)); + } + + void TestGcmNegotiationUsesCipherSuite(bool local_gcm_enabled, + bool remote_gcm_enabled, + bool aes_ctr_enabled, + int expected_cipher_suite) { + PeerConnectionFactory::Options caller_options; + caller_options.crypto_options.srtp.enable_gcm_crypto_suites = + local_gcm_enabled; + caller_options.crypto_options.srtp.enable_aes128_sha1_80_crypto_cipher = + aes_ctr_enabled; + PeerConnectionFactory::Options callee_options; + callee_options.crypto_options.srtp.enable_gcm_crypto_suites = + remote_gcm_enabled; + callee_options.crypto_options.srtp.enable_aes128_sha1_80_crypto_cipher = + aes_ctr_enabled; + TestNegotiatedCipherSuite(caller_options, callee_options, + expected_cipher_suite); + } + + protected: + SdpSemantics sdp_semantics_; + + private: + // |ss_| is used by |network_thread_| so it must be destroyed later. + std::unique_ptr ss_; + std::unique_ptr fss_; + // |network_thread_| and |worker_thread_| are used by both + // |caller_| and |callee_| so they must be destroyed + // later. + std::unique_ptr network_thread_; + std::unique_ptr worker_thread_; + // The turn servers and turn customizers should be accessed & deleted on the + // network thread to avoid a race with the socket read/write that occurs + // on the network thread. + std::vector> turn_servers_; + std::vector> turn_customizers_; + std::unique_ptr caller_; + std::unique_ptr callee_; + std::unique_ptr field_trials_; +}; + +} // namespace webrtc + +#endif // PC_TEST_INTEGRATION_TEST_HELPERS_H_ diff --git a/pc/test/mock_channel_interface.h b/pc/test/mock_channel_interface.h index 2df3baee47..6faba5c8fc 100644 --- a/pc/test/mock_channel_interface.h +++ b/pc/test/mock_channel_interface.h @@ -28,11 +28,10 @@ class MockChannelInterface : public cricket::ChannelInterface { MOCK_METHOD(MediaChannel*, media_channel, (), (const, override)); MOCK_METHOD(const std::string&, transport_name, (), (const, override)); MOCK_METHOD(const std::string&, content_name, (), (const, override)); - MOCK_METHOD(bool, enabled, (), (const, override)); - MOCK_METHOD(bool, Enable, (bool), (override)); - MOCK_METHOD(sigslot::signal1&, - SignalFirstPacketReceived, - (), + MOCK_METHOD(void, Enable, (bool), (override)); + MOCK_METHOD(void, + SetFirstPacketReceivedCallback, + (std::function), (override)); MOCK_METHOD(bool, SetLocalContent, @@ -46,6 +45,7 @@ class MockChannelInterface : public cricket::ChannelInterface { webrtc::SdpType, std::string*), (override)); + MOCK_METHOD(bool, SetPayloadTypeDemuxingEnabled, (bool), (override)); MOCK_METHOD(const std::vector&, local_streams, (), diff --git a/pc/test/mock_data_channel.h b/pc/test/mock_data_channel.h index 9ca018af14..ab4b0073da 100644 --- a/pc/test/mock_data_channel.h +++ b/pc/test/mock_data_channel.h @@ -13,24 +13,39 @@ #include -#include "pc/data_channel.h" +#include "pc/sctp_data_channel.h" #include "test/gmock.h" namespace webrtc { -class MockDataChannel : public rtc::RefCountedObject { +class MockSctpDataChannel : public rtc::RefCountedObject { public: - MockDataChannel(int id, DataState state) - : MockDataChannel(id, "MockDataChannel", state, "udp", 0, 0, 0, 0) {} - MockDataChannel(int id, - const std::string& label, - DataState state, - const std::string& protocol, - uint32_t messages_sent, - uint64_t bytes_sent, - uint32_t messages_received, - uint64_t bytes_received) - : rtc::RefCountedObject(nullptr, cricket::DCT_NONE, label) { + MockSctpDataChannel(int id, DataState state) + : MockSctpDataChannel(id, + "MockSctpDataChannel", + state, + "udp", + 0, + 0, + 0, + 0) {} + MockSctpDataChannel( + int id, + const std::string& label, + DataState state, + const std::string& protocol, + uint32_t messages_sent, + uint64_t bytes_sent, + uint32_t messages_received, + uint64_t bytes_received, + const InternalDataChannelInit& config = InternalDataChannelInit(), + rtc::Thread* signaling_thread = rtc::Thread::Current(), + rtc::Thread* network_thread = rtc::Thread::Current()) + : rtc::RefCountedObject(config, + nullptr, + label, + signaling_thread, + network_thread) { EXPECT_CALL(*this, id()).WillRepeatedly(::testing::Return(id)); EXPECT_CALL(*this, state()).WillRepeatedly(::testing::Return(state)); EXPECT_CALL(*this, protocol()).WillRepeatedly(::testing::Return(protocol)); diff --git a/pc/test/mock_delayable.h b/pc/test/mock_delayable.h deleted file mode 100644 index bef07c1970..0000000000 --- a/pc/test/mock_delayable.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef PC_TEST_MOCK_DELAYABLE_H_ -#define PC_TEST_MOCK_DELAYABLE_H_ - -#include - -#include "absl/types/optional.h" -#include "media/base/delayable.h" -#include "test/gmock.h" - -namespace webrtc { - -class MockDelayable : public cricket::Delayable { - public: - MOCK_METHOD(bool, - SetBaseMinimumPlayoutDelayMs, - (uint32_t ssrc, int delay_ms), - (override)); - MOCK_METHOD(absl::optional, - GetBaseMinimumPlayoutDelayMs, - (uint32_t ssrc), - (const, override)); -}; - -} // namespace webrtc - -#endif // PC_TEST_MOCK_DELAYABLE_H_ diff --git a/pc/test/mock_peer_connection_observers.h b/pc/test/mock_peer_connection_observers.h index 2017735dc7..413339dbf7 100644 --- a/pc/test/mock_peer_connection_observers.h +++ b/pc/test/mock_peer_connection_observers.h @@ -85,6 +85,9 @@ class MockPeerConnectionObserver : public PeerConnectionObserver { remote_streams_->RemoveStream(stream); } void OnRenegotiationNeeded() override { renegotiation_needed_ = true; } + void OnNegotiationNeededEvent(uint32_t event_id) override { + latest_negotiation_needed_event_ = event_id; + } void OnDataChannel( rtc::scoped_refptr data_channel) override { last_datachannel_ = data_channel; @@ -214,8 +217,18 @@ class MockPeerConnectionObserver : public PeerConnectionObserver { return candidates; } - bool negotiation_needed() const { return renegotiation_needed_; } - void clear_negotiation_needed() { renegotiation_needed_ = false; } + bool legacy_renegotiation_needed() const { return renegotiation_needed_; } + void clear_legacy_renegotiation_needed() { renegotiation_needed_ = false; } + + bool has_negotiation_needed_event() { + return latest_negotiation_needed_event_.has_value(); + } + uint32_t latest_negotiation_needed_event() { + return latest_negotiation_needed_event_.value_or(0u); + } + void clear_latest_negotiation_needed_event() { + latest_negotiation_needed_event_ = absl::nullopt; + } rtc::scoped_refptr pc_; PeerConnectionInterface::SignalingState state_; @@ -223,6 +236,7 @@ class MockPeerConnectionObserver : public PeerConnectionObserver { rtc::scoped_refptr last_datachannel_; rtc::scoped_refptr remote_streams_; bool renegotiation_needed_ = false; + absl::optional latest_negotiation_needed_event_; bool ice_gathering_complete_ = false; bool ice_connected_ = false; bool callback_triggered_ = false; @@ -272,7 +286,7 @@ class MockSetSessionDescriptionObserver : public webrtc::SetSessionDescriptionObserver { public: static rtc::scoped_refptr Create() { - return new rtc::RefCountedObject(); + return rtc::make_ref_counted(); } MockSetSessionDescriptionObserver() @@ -297,7 +311,26 @@ class MockSetSessionDescriptionObserver std::string error_; }; -class MockSetRemoteDescriptionObserver +class FakeSetLocalDescriptionObserver + : public rtc::RefCountedObject { + public: + bool called() const { return error_.has_value(); } + RTCError& error() { + RTC_DCHECK(error_.has_value()); + return *error_; + } + + // SetLocalDescriptionObserverInterface implementation. + void OnSetLocalDescriptionComplete(RTCError error) override { + error_ = std::move(error); + } + + private: + // Set on complete, on success this is set to an RTCError::OK() error. + absl::optional error_; +}; + +class FakeSetRemoteDescriptionObserver : public rtc::RefCountedObject { public: bool called() const { return error_.has_value(); } @@ -318,32 +351,51 @@ class MockSetRemoteDescriptionObserver class MockDataChannelObserver : public webrtc::DataChannelObserver { public: + struct Message { + std::string data; + bool binary; + }; + explicit MockDataChannelObserver(webrtc::DataChannelInterface* channel) : channel_(channel) { channel_->RegisterObserver(this); - state_ = channel_->state(); + states_.push_back(channel_->state()); } virtual ~MockDataChannelObserver() { channel_->UnregisterObserver(); } void OnBufferedAmountChange(uint64_t previous_amount) override {} - void OnStateChange() override { state_ = channel_->state(); } + void OnStateChange() override { states_.push_back(channel_->state()); } void OnMessage(const DataBuffer& buffer) override { messages_.push_back( - std::string(buffer.data.data(), buffer.data.size())); + {std::string(buffer.data.data(), buffer.data.size()), + buffer.binary}); } - bool IsOpen() const { return state_ == DataChannelInterface::kOpen; } - std::vector messages() const { return messages_; } + bool IsOpen() const { return state() == DataChannelInterface::kOpen; } + std::vector messages() const { return messages_; } std::string last_message() const { - return messages_.empty() ? std::string() : messages_.back(); + if (messages_.empty()) + return {}; + + return messages_.back().data; + } + bool last_message_is_binary() const { + if (messages_.empty()) + return false; + return messages_.back().binary; } size_t received_message_count() const { return messages_.size(); } + DataChannelInterface::DataState state() const { return states_.back(); } + const std::vector& states() const { + return states_; + } + private: rtc::scoped_refptr channel_; - DataChannelInterface::DataState state_; - std::vector messages_; + std::vector states_; + std::vector messages_; }; class MockStatsObserver : public webrtc::StatsObserver { diff --git a/pc/test/mock_rtp_receiver_internal.h b/pc/test/mock_rtp_receiver_internal.h index 779dcdcf08..ba244039af 100644 --- a/pc/test/mock_rtp_receiver_internal.h +++ b/pc/test/mock_rtp_receiver_internal.h @@ -57,6 +57,7 @@ class MockRtpReceiverInternal : public RtpReceiverInternal { // RtpReceiverInternal methods. MOCK_METHOD(void, Stop, (), (override)); + MOCK_METHOD(void, StopAndEndTrack, (), (override)); MOCK_METHOD(void, SetMediaChannel, (cricket::MediaChannel*), (override)); MOCK_METHOD(void, SetupMediaChannel, (uint32_t), (override)); MOCK_METHOD(void, SetupUnsignaledMediaChannel, (), (override)); diff --git a/pc/test/mock_rtp_sender_internal.h b/pc/test/mock_rtp_sender_internal.h index 1a31c5dac6..5e7670ebf0 100644 --- a/pc/test/mock_rtp_sender_internal.h +++ b/pc/test/mock_rtp_sender_internal.h @@ -65,23 +65,17 @@ class MockRtpSenderInternal : public RtpSenderInternal { (const, override)); // RtpSenderInternal methods. - MOCK_METHOD(void, SetMediaChannel, (cricket::MediaChannel*), (override)); - MOCK_METHOD(void, SetSsrc, (uint32_t), (override)); - MOCK_METHOD(void, - set_stream_ids, - (const std::vector&), - (override)); - MOCK_METHOD(void, SetStreams, (const std::vector&), (override)); - MOCK_METHOD(void, - set_init_send_encodings, - (const std::vector&), - (override)); - MOCK_METHOD(void, Stop, (), (override)); - MOCK_METHOD(int, AttachmentId, (), (const, override)); - MOCK_METHOD(RTCError, - DisableEncodingLayers, - (const std::vector&), - (override)); + MOCK_METHOD1(SetMediaChannel, void(cricket::MediaChannel*)); + MOCK_METHOD1(SetSsrc, void(uint32_t)); + MOCK_METHOD1(set_stream_ids, void(const std::vector&)); + MOCK_METHOD1(SetStreams, void(const std::vector&)); + MOCK_METHOD1(set_init_send_encodings, + void(const std::vector&)); + MOCK_METHOD0(Stop, void()); + MOCK_CONST_METHOD0(AttachmentId, int()); + MOCK_METHOD1(DisableEncodingLayers, + RTCError(const std::vector&)); + MOCK_METHOD0(SetTransceiverAsStopped, void()); }; } // namespace webrtc diff --git a/pc/test/peer_connection_test_wrapper.cc b/pc/test/peer_connection_test_wrapper.cc index 4f0d72e667..8fdfb1bbb8 100644 --- a/pc/test/peer_connection_test_wrapper.cc +++ b/pc/test/peer_connection_test_wrapper.cc @@ -20,6 +20,7 @@ #include "absl/types/optional.h" #include "api/audio/audio_mixer.h" #include "api/create_peerconnection_factory.h" +#include "api/sequence_checker.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "api/video_codecs/video_decoder_factory.h" @@ -37,7 +38,6 @@ #include "rtc_base/ref_counted_object.h" #include "rtc_base/rtc_certificate_generator.h" #include "rtc_base/string_encode.h" -#include "rtc_base/thread_checker.h" #include "rtc_base/time_utils.h" #include "test/gtest.h" @@ -80,7 +80,8 @@ PeerConnectionTestWrapper::PeerConnectionTestWrapper( rtc::Thread* worker_thread) : name_(name), network_thread_(network_thread), - worker_thread_(worker_thread) { + worker_thread_(worker_thread), + pending_negotiation_(false) { pc_thread_checker_.Detach(); } @@ -122,17 +123,42 @@ bool PeerConnectionTestWrapper::CreatePc( std::unique_ptr cert_generator( new FakeRTCCertificateGenerator()); - peer_connection_ = peer_connection_factory_->CreatePeerConnection( - config, std::move(port_allocator), std::move(cert_generator), this); - - return peer_connection_.get() != NULL; + webrtc::PeerConnectionDependencies deps(this); + deps.allocator = std::move(port_allocator); + deps.cert_generator = std::move(cert_generator); + auto result = peer_connection_factory_->CreatePeerConnectionOrError( + config, std::move(deps)); + if (result.ok()) { + peer_connection_ = result.MoveValue(); + return true; + } else { + return false; + } } rtc::scoped_refptr PeerConnectionTestWrapper::CreateDataChannel( const std::string& label, const webrtc::DataChannelInit& init) { - return peer_connection_->CreateDataChannel(label, &init); + auto result = peer_connection_->CreateDataChannelOrError(label, &init); + if (!result.ok()) { + RTC_LOG(LS_ERROR) << "CreateDataChannel failed: " + << ToString(result.error().type()) << " " + << result.error().message(); + return nullptr; + } + return result.MoveValue(); +} + +void PeerConnectionTestWrapper::WaitForNegotiation() { + EXPECT_TRUE_WAIT(!pending_negotiation_, kMaxWait); +} + +void PeerConnectionTestWrapper::OnSignalingChange( + webrtc::PeerConnectionInterface::SignalingState new_state) { + if (new_state == webrtc::PeerConnectionInterface::SignalingState::kStable) { + pending_negotiation_ = false; + } } void PeerConnectionTestWrapper::OnAddTrack( @@ -182,6 +208,7 @@ void PeerConnectionTestWrapper::OnSuccess(SessionDescriptionInterface* desc) { void PeerConnectionTestWrapper::CreateOffer( const webrtc::PeerConnectionInterface::RTCOfferAnswerOptions& options) { RTC_LOG(LS_INFO) << "PeerConnectionTestWrapper " << name_ << ": CreateOffer."; + pending_negotiation_ = true; peer_connection_->CreateOffer(this, options); } @@ -189,6 +216,7 @@ void PeerConnectionTestWrapper::CreateAnswer( const webrtc::PeerConnectionInterface::RTCOfferAnswerOptions& options) { RTC_LOG(LS_INFO) << "PeerConnectionTestWrapper " << name_ << ": CreateAnswer."; + pending_negotiation_ = true; peer_connection_->CreateAnswer(this, options); } @@ -207,8 +235,7 @@ void PeerConnectionTestWrapper::SetLocalDescription(SdpType type, << ": SetLocalDescription " << webrtc::SdpTypeToString(type) << " " << sdp; - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); peer_connection_->SetLocalDescription( observer, webrtc::CreateSessionDescription(type, sdp).release()); } @@ -219,8 +246,7 @@ void PeerConnectionTestWrapper::SetRemoteDescription(SdpType type, << ": SetRemoteDescription " << webrtc::SdpTypeToString(type) << " " << sdp; - rtc::scoped_refptr observer( - new rtc::RefCountedObject()); + auto observer = rtc::make_ref_counted(); peer_connection_->SetRemoteDescription( observer, webrtc::CreateSessionDescription(type, sdp).release()); } @@ -317,9 +343,8 @@ PeerConnectionTestWrapper::GetUserMedia( config.frame_interval_ms = 100; config.timestamp_offset_ms = rtc::TimeMillis(); - rtc::scoped_refptr source = - new rtc::RefCountedObject( - config, /* remote */ false); + auto source = rtc::make_ref_counted( + config, /* remote */ false); std::string videotrack_label = stream_id + kVideoTrackLabelBase; rtc::scoped_refptr video_track( diff --git a/pc/test/peer_connection_test_wrapper.h b/pc/test/peer_connection_test_wrapper.h index 2dc88e9309..4abf6c9ea5 100644 --- a/pc/test/peer_connection_test_wrapper.h +++ b/pc/test/peer_connection_test_wrapper.h @@ -25,11 +25,11 @@ #include "api/rtc_error.h" #include "api/rtp_receiver_interface.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "pc/test/fake_audio_capture_module.h" #include "pc/test/fake_video_track_renderer.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" class PeerConnectionTestWrapper : public webrtc::PeerConnectionObserver, @@ -49,15 +49,21 @@ class PeerConnectionTestWrapper rtc::scoped_refptr audio_encoder_factory, rtc::scoped_refptr audio_decoder_factory); + rtc::scoped_refptr pc_factory() + const { + return peer_connection_factory_; + } webrtc::PeerConnectionInterface* pc() { return peer_connection_.get(); } rtc::scoped_refptr CreateDataChannel( const std::string& label, const webrtc::DataChannelInit& init); + void WaitForNegotiation(); + // Implements PeerConnectionObserver. void OnSignalingChange( - webrtc::PeerConnectionInterface::SignalingState new_state) override {} + webrtc::PeerConnectionInterface::SignalingState new_state) override; void OnAddTrack( rtc::scoped_refptr receiver, const std::vector>& @@ -114,13 +120,14 @@ class PeerConnectionTestWrapper std::string name_; rtc::Thread* const network_thread_; rtc::Thread* const worker_thread_; - rtc::ThreadChecker pc_thread_checker_; + webrtc::SequenceChecker pc_thread_checker_; rtc::scoped_refptr peer_connection_; rtc::scoped_refptr peer_connection_factory_; rtc::scoped_refptr fake_audio_capture_module_; std::unique_ptr renderer_; int num_get_user_media_calls_ = 0; + bool pending_negotiation_; }; #endif // PC_TEST_PEER_CONNECTION_TEST_WRAPPER_H_ diff --git a/pc/test/rtc_stats_obtainer.h b/pc/test/rtc_stats_obtainer.h index 95201f6649..4da23c6628 100644 --- a/pc/test/rtc_stats_obtainer.h +++ b/pc/test/rtc_stats_obtainer.h @@ -20,8 +20,7 @@ class RTCStatsObtainer : public RTCStatsCollectorCallback { public: static rtc::scoped_refptr Create( rtc::scoped_refptr* report_ptr = nullptr) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(report_ptr)); + return rtc::make_ref_counted(report_ptr); } void OnStatsDelivered( @@ -43,7 +42,7 @@ class RTCStatsObtainer : public RTCStatsCollectorCallback { : report_ptr_(report_ptr) {} private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; rtc::scoped_refptr report_; rtc::scoped_refptr* report_ptr_; }; diff --git a/pc/test/test_sdp_strings.h b/pc/test/test_sdp_strings.h index 849757d300..6394ac5f5e 100644 --- a/pc/test/test_sdp_strings.h +++ b/pc/test/test_sdp_strings.h @@ -60,7 +60,7 @@ static const char kFireFoxSdpOffer[] = "a=candidate:4 2 UDP 2113667326 10.0.254.2 58890 typ host\r\n" "a=candidate:5 2 UDP 1694302206 74.95.2.170 33611 typ srflx raddr" " 10.0.254.2 rport 58890\r\n" -#ifdef HAVE_SCTP +#ifdef WEBRTC_HAVE_SCTP "m=application 45536 DTLS/SCTP 5000\r\n" "c=IN IP4 74.95.2.170\r\n" "a=fmtp:5000 protocol=webrtc-datachannel;streams=16\r\n" diff --git a/pc/track_media_info_map.cc b/pc/track_media_info_map.cc index ca923a030d..66f4c461df 100644 --- a/pc/track_media_info_map.cc +++ b/pc/track_media_info_map.cc @@ -10,10 +10,17 @@ #include "pc/track_media_info_map.h" +#include #include #include #include +#include "api/media_types.h" +#include "api/rtp_parameters.h" +#include "media/base/stream_params.h" +#include "rtc_base/checks.h" +#include "rtc_base/thread.h" + namespace webrtc { namespace { @@ -43,20 +50,12 @@ void GetAudioAndVideoTrackBySsrc( RTC_DCHECK(local_video_track_by_ssrc->empty()); RTC_DCHECK(remote_audio_track_by_ssrc->empty()); RTC_DCHECK(remote_video_track_by_ssrc->empty()); - // TODO(hbos): RTP senders/receivers uses a proxy to the signaling thread, and - // our sender/receiver implementations invokes on the worker thread. (This - // means one thread jump if on signaling thread and two thread jumps if on any - // other threads). Is there a way to avoid thread jump(s) on a per - // sender/receiver, per method basis? for (const auto& rtp_sender : rtp_senders) { cricket::MediaType media_type = rtp_sender->media_type(); MediaStreamTrackInterface* track = rtp_sender->track(); if (!track) { continue; } - RTC_DCHECK_EQ(track->kind(), media_type == cricket::MEDIA_TYPE_AUDIO - ? MediaStreamTrackInterface::kAudioKind - : MediaStreamTrackInterface::kVideoKind); // TODO(deadbeef): |ssrc| should be removed in favor of |GetParameters|. uint32_t ssrc = rtp_sender->ssrc(); if (ssrc != 0) { @@ -77,9 +76,6 @@ void GetAudioAndVideoTrackBySsrc( cricket::MediaType media_type = rtp_receiver->media_type(); MediaStreamTrackInterface* track = rtp_receiver->track(); RTC_DCHECK(track); - RTC_DCHECK_EQ(track->kind(), media_type == cricket::MEDIA_TYPE_AUDIO - ? MediaStreamTrackInterface::kAudioKind - : MediaStreamTrackInterface::kVideoKind); RtpParameters params = rtp_receiver->GetParameters(); for (const RtpEncodingParameters& encoding : params.encodings) { if (!encoding.ssrc) { @@ -115,6 +111,8 @@ TrackMediaInfoMap::TrackMediaInfoMap( const std::vector>& rtp_receivers) : voice_media_info_(std::move(voice_media_info)), video_media_info_(std::move(video_media_info)) { + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + std::map local_audio_track_by_ssrc; std::map local_video_track_by_ssrc; std::map remote_audio_track_by_ssrc; diff --git a/pc/track_media_info_map.h b/pc/track_media_info_map.h index 542501eb16..c8c6da2701 100644 --- a/pc/track_media_info_map.h +++ b/pc/track_media_info_map.h @@ -11,12 +11,16 @@ #ifndef PC_TRACK_MEDIA_INFO_MAP_H_ #define PC_TRACK_MEDIA_INFO_MAP_H_ +#include + #include #include #include #include +#include "absl/types/optional.h" #include "api/media_stream_interface.h" +#include "api/scoped_refptr.h" #include "media/base/media_channel.h" #include "pc/rtp_receiver.h" #include "pc/rtp_sender.h" diff --git a/pc/track_media_info_map_unittest.cc b/pc/track_media_info_map_unittest.cc index 0cb1e0e277..1d5caacddb 100644 --- a/pc/track_media_info_map_unittest.cc +++ b/pc/track_media_info_map_unittest.cc @@ -31,6 +31,45 @@ namespace webrtc { namespace { +class MockVideoTrack : public VideoTrackInterface { + public: + // NotifierInterface + MOCK_METHOD(void, + RegisterObserver, + (ObserverInterface * observer), + (override)); + MOCK_METHOD(void, + UnregisterObserver, + (ObserverInterface * observer), + (override)); + + // MediaStreamTrackInterface + MOCK_METHOD(std::string, kind, (), (const, override)); + MOCK_METHOD(std::string, id, (), (const, override)); + MOCK_METHOD(bool, enabled, (), (const, override)); + MOCK_METHOD(bool, set_enabled, (bool enable), (override)); + MOCK_METHOD(TrackState, state, (), (const, override)); + + // VideoSourceInterface + MOCK_METHOD(void, + AddOrUpdateSink, + (rtc::VideoSinkInterface * sink, + const rtc::VideoSinkWants& wants), + (override)); + // RemoveSink must guarantee that at the time the method returns, + // there is no current and no future calls to VideoSinkInterface::OnFrame. + MOCK_METHOD(void, + RemoveSink, + (rtc::VideoSinkInterface * sink), + (override)); + + // VideoTrackInterface + MOCK_METHOD(VideoTrackSourceInterface*, GetSource, (), (const, override)); + + MOCK_METHOD(ContentHint, content_hint, (), (const, override)); + MOCK_METHOD(void, set_content_hint, (ContentHint hint), (override)); +}; + RtpParameters CreateRtpParametersWithSsrcs( std::initializer_list ssrcs) { RtpParameters params; @@ -52,8 +91,7 @@ rtc::scoped_refptr CreateMockRtpSender( } else { first_ssrc = 0; } - rtc::scoped_refptr sender( - new rtc::RefCountedObject()); + auto sender = rtc::make_ref_counted(); EXPECT_CALL(*sender, track()) .WillRepeatedly(::testing::Return(std::move(track))); EXPECT_CALL(*sender, ssrc()).WillRepeatedly(::testing::Return(first_ssrc)); @@ -69,8 +107,7 @@ rtc::scoped_refptr CreateMockRtpReceiver( cricket::MediaType media_type, std::initializer_list ssrcs, rtc::scoped_refptr track) { - rtc::scoped_refptr receiver( - new rtc::RefCountedObject()); + auto receiver = rtc::make_ref_counted(); EXPECT_CALL(*receiver, track()) .WillRepeatedly(::testing::Return(std::move(track))); EXPECT_CALL(*receiver, media_type()) @@ -81,23 +118,35 @@ rtc::scoped_refptr CreateMockRtpReceiver( return receiver; } +rtc::scoped_refptr CreateVideoTrack( + const std::string& id) { + return VideoTrack::Create(id, FakeVideoTrackSource::Create(false), + rtc::Thread::Current()); +} + +rtc::scoped_refptr CreateMockVideoTrack( + const std::string& id) { + auto track = rtc::make_ref_counted(); + EXPECT_CALL(*track, kind()) + .WillRepeatedly(::testing::Return(VideoTrack::kVideoKind)); + return track; +} + class TrackMediaInfoMapTest : public ::testing::Test { public: TrackMediaInfoMapTest() : TrackMediaInfoMapTest(true) {} - explicit TrackMediaInfoMapTest(bool use_current_thread) + explicit TrackMediaInfoMapTest(bool use_real_video_track) : voice_media_info_(new cricket::VoiceMediaInfo()), video_media_info_(new cricket::VideoMediaInfo()), local_audio_track_(AudioTrack::Create("LocalAudioTrack", nullptr)), remote_audio_track_(AudioTrack::Create("RemoteAudioTrack", nullptr)), - local_video_track_(VideoTrack::Create( - "LocalVideoTrack", - FakeVideoTrackSource::Create(false), - use_current_thread ? rtc::Thread::Current() : nullptr)), - remote_video_track_(VideoTrack::Create( - "RemoteVideoTrack", - FakeVideoTrackSource::Create(false), - use_current_thread ? rtc::Thread::Current() : nullptr)) {} + local_video_track_(use_real_video_track + ? CreateVideoTrack("LocalVideoTrack") + : CreateMockVideoTrack("LocalVideoTrack")), + remote_video_track_(use_real_video_track + ? CreateVideoTrack("RemoteVideoTrack") + : CreateMockVideoTrack("LocalVideoTrack")) {} ~TrackMediaInfoMapTest() { // If we have a map the ownership has been passed to the map, only delete if @@ -181,8 +230,8 @@ class TrackMediaInfoMapTest : public ::testing::Test { std::unique_ptr map_; rtc::scoped_refptr local_audio_track_; rtc::scoped_refptr remote_audio_track_; - rtc::scoped_refptr local_video_track_; - rtc::scoped_refptr remote_video_track_; + rtc::scoped_refptr local_video_track_; + rtc::scoped_refptr remote_video_track_; }; } // namespace diff --git a/pc/transceiver_list.cc b/pc/transceiver_list.cc new file mode 100644 index 0000000000..235c9af036 --- /dev/null +++ b/pc/transceiver_list.cc @@ -0,0 +1,86 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/transceiver_list.h" + +#include "rtc_base/checks.h" + +namespace webrtc { + +void TransceiverStableState::set_newly_created() { + RTC_DCHECK(!has_m_section_); + newly_created_ = true; +} + +void TransceiverStableState::SetMSectionIfUnset( + absl::optional mid, + absl::optional mline_index) { + if (!has_m_section_) { + mid_ = mid; + mline_index_ = mline_index; + has_m_section_ = true; + } +} + +void TransceiverStableState::SetRemoteStreamIdsIfUnset( + const std::vector& ids) { + if (!remote_stream_ids_.has_value()) { + remote_stream_ids_ = ids; + } +} + +void TransceiverStableState::SetInitSendEncodings( + const std::vector& encodings) { + init_send_encodings_ = encodings; +} + +std::vector TransceiverList::ListInternal() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + std::vector internals; + for (auto transceiver : transceivers_) { + internals.push_back(transceiver->internal()); + } + return internals; +} + +RtpTransceiverProxyRefPtr TransceiverList::FindBySender( + rtc::scoped_refptr sender) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (auto transceiver : transceivers_) { + if (transceiver->sender() == sender) { + return transceiver; + } + } + return nullptr; +} + +RtpTransceiverProxyRefPtr TransceiverList::FindByMid( + const std::string& mid) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (auto transceiver : transceivers_) { + if (transceiver->mid() == mid) { + return transceiver; + } + } + return nullptr; +} + +RtpTransceiverProxyRefPtr TransceiverList::FindByMLineIndex( + size_t mline_index) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (auto transceiver : transceivers_) { + if (transceiver->internal()->mline_index() == mline_index) { + return transceiver; + } + } + return nullptr; +} + +} // namespace webrtc diff --git a/pc/transceiver_list.h b/pc/transceiver_list.h new file mode 100644 index 0000000000..568c9c7e7a --- /dev/null +++ b/pc/transceiver_list.h @@ -0,0 +1,148 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_TRANSCEIVER_LIST_H_ +#define PC_TRANSCEIVER_LIST_H_ + +#include + +#include +#include +#include +#include + +#include "absl/types/optional.h" +#include "api/media_types.h" +#include "api/rtc_error.h" +#include "api/rtp_parameters.h" +#include "api/rtp_sender_interface.h" +#include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "pc/rtp_transceiver.h" +#include "rtc_base/checks.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +typedef rtc::scoped_refptr> + RtpTransceiverProxyRefPtr; + +// Captures partial state to be used for rollback. Applicable only in +// Unified Plan. +class TransceiverStableState { + public: + TransceiverStableState() {} + void set_newly_created(); + void SetMSectionIfUnset(absl::optional mid, + absl::optional mline_index); + void SetRemoteStreamIdsIfUnset(const std::vector& ids); + void SetInitSendEncodings( + const std::vector& encodings); + absl::optional mid() const { return mid_; } + absl::optional mline_index() const { return mline_index_; } + absl::optional> remote_stream_ids() const { + return remote_stream_ids_; + } + absl::optional> init_send_encodings() + const { + return init_send_encodings_; + } + bool has_m_section() const { return has_m_section_; } + bool newly_created() const { return newly_created_; } + + private: + absl::optional mid_; + absl::optional mline_index_; + absl::optional> remote_stream_ids_; + absl::optional> init_send_encodings_; + // Indicates that mid value from stable state has been captured and + // that rollback has to restore the transceiver. Also protects against + // subsequent overwrites. + bool has_m_section_ = false; + // Indicates that the transceiver was created as part of applying a + // description to track potential need for removing transceiver during + // rollback. + bool newly_created_ = false; +}; + +// This class encapsulates the active list of transceivers on a +// PeerConnection, and offers convenient functions on that list. +// It is a single-thread class; all operations must be performed +// on the same thread. +class TransceiverList { + public: + // Returns a copy of the currently active list of transceivers. The + // list consists of rtc::scoped_refptrs, which will keep the transceivers + // from being deallocated, even if they are removed from the TransceiverList. + std::vector List() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return transceivers_; + } + // As above, but does not check thread ownership. Unsafe. + // TODO(bugs.webrtc.org/12692): Refactor and remove + std::vector UnsafeList() const { + return transceivers_; + } + + // Returns a list of the internal() pointers of the currently active list + // of transceivers. These raw pointers are not thread-safe, so need to + // be consumed on the same thread. + std::vector ListInternal() const; + + void Add(RtpTransceiverProxyRefPtr transceiver) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + transceivers_.push_back(transceiver); + } + void Remove(RtpTransceiverProxyRefPtr transceiver) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + transceivers_.erase( + std::remove(transceivers_.begin(), transceivers_.end(), transceiver), + transceivers_.end()); + } + RtpTransceiverProxyRefPtr FindBySender( + rtc::scoped_refptr sender) const; + RtpTransceiverProxyRefPtr FindByMid(const std::string& mid) const; + RtpTransceiverProxyRefPtr FindByMLineIndex(size_t mline_index) const; + + // Find or create the stable state for a transceiver. + TransceiverStableState* StableState(RtpTransceiverProxyRefPtr transceiver) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return &(transceiver_stable_states_by_transceivers_[transceiver]); + } + + void DiscardStableStates() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + transceiver_stable_states_by_transceivers_.clear(); + } + + std::map& StableStates() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return transceiver_stable_states_by_transceivers_; + } + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + std::vector transceivers_; + // TODO(bugs.webrtc.org/12692): Add RTC_GUARDED_BY(sequence_checker_); + + // Holds changes made to transceivers during applying descriptors for + // potential rollback. Gets cleared once signaling state goes to stable. + std::map + transceiver_stable_states_by_transceivers_ + RTC_GUARDED_BY(sequence_checker_); + // Holds remote stream ids for transceivers from stable state. + std::map> + remote_stream_ids_by_transceivers_ RTC_GUARDED_BY(sequence_checker_); +}; + +} // namespace webrtc + +#endif // PC_TRANSCEIVER_LIST_H_ diff --git a/pc/transport_stats.h b/pc/transport_stats.h index 7cb95f4ad2..173af91fba 100644 --- a/pc/transport_stats.h +++ b/pc/transport_stats.h @@ -14,6 +14,7 @@ #include #include +#include "api/dtls_transport_interface.h" #include "p2p/base/dtls_transport_internal.h" #include "p2p/base/ice_transport_internal.h" #include "p2p/base/port.h" @@ -30,7 +31,7 @@ struct TransportChannelStats { int ssl_version_bytes = 0; int srtp_crypto_suite = rtc::SRTP_INVALID_CRYPTO_SUITE; int ssl_cipher_suite = rtc::TLS_NULL_WITH_NULL_NULL; - DtlsTransportState dtls_state = DTLS_TRANSPORT_NEW; + webrtc::DtlsTransportState dtls_state = webrtc::DtlsTransportState::kNew; IceTransportStats ice_transport_stats; }; diff --git a/pc/usage_pattern.cc b/pc/usage_pattern.cc new file mode 100644 index 0000000000..848472148f --- /dev/null +++ b/pc/usage_pattern.cc @@ -0,0 +1,49 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/usage_pattern.h" + +#include "api/peer_connection_interface.h" +#include "rtc_base/logging.h" +#include "system_wrappers/include/metrics.h" + +namespace webrtc { + +void UsagePattern::NoteUsageEvent(UsageEvent event) { + usage_event_accumulator_ |= static_cast(event); +} + +void UsagePattern::ReportUsagePattern(PeerConnectionObserver* observer) const { + RTC_DLOG(LS_INFO) << "Usage signature is " << usage_event_accumulator_; + RTC_HISTOGRAM_ENUMERATION_SPARSE("WebRTC.PeerConnection.UsagePattern", + usage_event_accumulator_, + static_cast(UsageEvent::MAX_VALUE)); + const int bad_bits = + static_cast(UsageEvent::SET_LOCAL_DESCRIPTION_SUCCEEDED) | + static_cast(UsageEvent::CANDIDATE_COLLECTED); + const int good_bits = + static_cast(UsageEvent::SET_REMOTE_DESCRIPTION_SUCCEEDED) | + static_cast(UsageEvent::REMOTE_CANDIDATE_ADDED) | + static_cast(UsageEvent::ICE_STATE_CONNECTED); + if ((usage_event_accumulator_ & bad_bits) == bad_bits && + (usage_event_accumulator_ & good_bits) == 0) { + // If called after close(), we can't report, because observer may have + // been deallocated, and therefore pointer is null. Write to log instead. + if (observer) { + observer->OnInterestingUsage(usage_event_accumulator_); + } else { + RTC_LOG(LS_INFO) << "Interesting usage signature " + << usage_event_accumulator_ + << " observed after observer shutdown"; + } + } +} + +} // namespace webrtc diff --git a/pc/usage_pattern.h b/pc/usage_pattern.h new file mode 100644 index 0000000000..0182999d6b --- /dev/null +++ b/pc/usage_pattern.h @@ -0,0 +1,77 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_USAGE_PATTERN_H_ +#define PC_USAGE_PATTERN_H_ + +#include "api/peer_connection_interface.h" + +namespace webrtc { + +class PeerConnectionObserver; + +// A bit in the usage pattern is registered when its defining event occurs +// at least once. +enum class UsageEvent : int { + TURN_SERVER_ADDED = 0x01, + STUN_SERVER_ADDED = 0x02, + DATA_ADDED = 0x04, + AUDIO_ADDED = 0x08, + VIDEO_ADDED = 0x10, + // |SetLocalDescription| returns successfully. + SET_LOCAL_DESCRIPTION_SUCCEEDED = 0x20, + // |SetRemoteDescription| returns successfully. + SET_REMOTE_DESCRIPTION_SUCCEEDED = 0x40, + // A local candidate (with type host, server-reflexive, or relay) is + // collected. + CANDIDATE_COLLECTED = 0x80, + // A remote candidate is successfully added via |AddIceCandidate|. + ADD_ICE_CANDIDATE_SUCCEEDED = 0x100, + ICE_STATE_CONNECTED = 0x200, + CLOSE_CALLED = 0x400, + // A local candidate with private IP is collected. + PRIVATE_CANDIDATE_COLLECTED = 0x800, + // A remote candidate with private IP is added, either via AddiceCandidate + // or from the remote description. + REMOTE_PRIVATE_CANDIDATE_ADDED = 0x1000, + // A local mDNS candidate is collected. + MDNS_CANDIDATE_COLLECTED = 0x2000, + // A remote mDNS candidate is added, either via AddIceCandidate or from the + // remote description. + REMOTE_MDNS_CANDIDATE_ADDED = 0x4000, + // A local candidate with IPv6 address is collected. + IPV6_CANDIDATE_COLLECTED = 0x8000, + // A remote candidate with IPv6 address is added, either via AddIceCandidate + // or from the remote description. + REMOTE_IPV6_CANDIDATE_ADDED = 0x10000, + // A remote candidate (with type host, server-reflexive, or relay) is + // successfully added, either via AddIceCandidate or from the remote + // description. + REMOTE_CANDIDATE_ADDED = 0x20000, + // An explicit host-host candidate pair is selected, i.e. both the local and + // the remote candidates have the host type. This does not include candidate + // pairs formed with equivalent prflx remote candidates, e.g. a host-prflx + // pair where the prflx candidate has the same base as a host candidate of + // the remote peer. + DIRECT_CONNECTION_SELECTED = 0x40000, + MAX_VALUE = 0x80000, +}; + +class UsagePattern { + public: + void NoteUsageEvent(UsageEvent event); + void ReportUsagePattern(PeerConnectionObserver* observer) const; + + private: + int usage_event_accumulator_ = 0; +}; + +} // namespace webrtc +#endif // PC_USAGE_PATTERN_H_ diff --git a/pc/used_ids.h b/pc/used_ids.h index 78e64caa41..62b2faa018 100644 --- a/pc/used_ids.h +++ b/pc/used_ids.h @@ -60,7 +60,9 @@ class UsedIds { } protected: - bool IsIdUsed(int new_id) { return id_set_.find(new_id) != id_set_.end(); } + virtual bool IsIdUsed(int new_id) { + return id_set_.find(new_id) != id_set_.end(); + } const int min_allowed_id_; const int max_allowed_id_; @@ -92,11 +94,24 @@ class UsedIds { class UsedPayloadTypes : public UsedIds { public: UsedPayloadTypes() - : UsedIds(kDynamicPayloadTypeMin, kDynamicPayloadTypeMax) {} + : UsedIds(kFirstDynamicPayloadTypeLowerRange, + kLastDynamicPayloadTypeUpperRange) {} + + protected: + bool IsIdUsed(int new_id) override { + // Range marked for RTCP avoidance is "used". + if (new_id > kLastDynamicPayloadTypeLowerRange && + new_id < kFirstDynamicPayloadTypeUpperRange) + return true; + return UsedIds::IsIdUsed(new_id); + } private: - static const int kDynamicPayloadTypeMin = 96; - static const int kDynamicPayloadTypeMax = 127; + static const int kFirstDynamicPayloadTypeLowerRange = 35; + static const int kLastDynamicPayloadTypeLowerRange = 63; + + static const int kFirstDynamicPayloadTypeUpperRange = 96; + static const int kLastDynamicPayloadTypeUpperRange = 127; }; // Helper class used for finding duplicate RTP Header extension ids among diff --git a/pc/video_rtp_receiver.cc b/pc/video_rtp_receiver.cc index f093bf4b33..8db4d9f02f 100644 --- a/pc/video_rtp_receiver.cc +++ b/pc/video_rtp_receiver.cc @@ -15,17 +15,12 @@ #include #include -#include "api/media_stream_proxy.h" -#include "api/media_stream_track_proxy.h" -#include "api/video_track_source_proxy.h" -#include "pc/jitter_buffer_delay.h" -#include "pc/jitter_buffer_delay_proxy.h" -#include "pc/media_stream.h" +#include "api/video/recordable_encoded_frame.h" +#include "api/video_track_source_proxy_factory.h" #include "pc/video_track.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" -#include "rtc_base/trace_event.h" namespace webrtc { @@ -42,116 +37,139 @@ VideoRtpReceiver::VideoRtpReceiver( const std::vector>& streams) : worker_thread_(worker_thread), id_(receiver_id), - source_(new RefCountedObject(this)), - track_(VideoTrackProxy::Create( + source_(rtc::make_ref_counted(&source_callback_)), + track_(VideoTrackProxyWithInternal::Create( rtc::Thread::Current(), worker_thread, - VideoTrack::Create( - receiver_id, - VideoTrackSourceProxy::Create(rtc::Thread::Current(), - worker_thread, - source_), - worker_thread))), - attachment_id_(GenerateUniqueId()), - delay_(JitterBufferDelayProxy::Create( - rtc::Thread::Current(), - worker_thread, - new rtc::RefCountedObject(worker_thread))) { + VideoTrack::Create(receiver_id, + CreateVideoTrackSourceProxy(rtc::Thread::Current(), + worker_thread, + source_), + worker_thread))), + attachment_id_(GenerateUniqueId()) { RTC_DCHECK(worker_thread_); SetStreams(streams); - source_->SetState(MediaSourceInterface::kLive); + RTC_DCHECK_EQ(source_->state(), MediaSourceInterface::kLive); } VideoRtpReceiver::~VideoRtpReceiver() { - // Since cricket::VideoRenderer is not reference counted, - // we need to remove it from the channel before we are deleted. - Stop(); - // Make sure we can't be called by the |source_| anymore. - worker_thread_->Invoke(RTC_FROM_HERE, - [this] { source_->ClearCallback(); }); + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + RTC_DCHECK(stopped_); + RTC_DCHECK(!media_channel_); } std::vector VideoRtpReceiver::stream_ids() const { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); std::vector stream_ids(streams_.size()); for (size_t i = 0; i < streams_.size(); ++i) stream_ids[i] = streams_[i]->id(); return stream_ids; } +rtc::scoped_refptr VideoRtpReceiver::dtls_transport() + const { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + return dtls_transport_; +} + +std::vector> +VideoRtpReceiver::streams() const { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + return streams_; +} + RtpParameters VideoRtpReceiver::GetParameters() const { - if (!media_channel_ || stopped_) { + RTC_DCHECK_RUN_ON(worker_thread_); + if (!media_channel_) return RtpParameters(); - } - return worker_thread_->Invoke(RTC_FROM_HERE, [&] { - return ssrc_ ? media_channel_->GetRtpReceiveParameters(*ssrc_) - : media_channel_->GetDefaultRtpReceiveParameters(); - }); + return ssrc_ ? media_channel_->GetRtpReceiveParameters(*ssrc_) + : media_channel_->GetDefaultRtpReceiveParameters(); } void VideoRtpReceiver::SetFrameDecryptor( rtc::scoped_refptr frame_decryptor) { + RTC_DCHECK_RUN_ON(worker_thread_); frame_decryptor_ = std::move(frame_decryptor); // Special Case: Set the frame decryptor to any value on any existing channel. - if (media_channel_ && ssrc_.has_value() && !stopped_) { - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - media_channel_->SetFrameDecryptor(*ssrc_, frame_decryptor_); - }); + if (media_channel_ && ssrc_) { + media_channel_->SetFrameDecryptor(*ssrc_, frame_decryptor_); } } rtc::scoped_refptr VideoRtpReceiver::GetFrameDecryptor() const { + RTC_DCHECK_RUN_ON(worker_thread_); return frame_decryptor_; } void VideoRtpReceiver::SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) { - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - RTC_DCHECK_RUN_ON(worker_thread_); - frame_transformer_ = std::move(frame_transformer); - if (media_channel_ && !stopped_) { - media_channel_->SetDepacketizerToDecoderFrameTransformer( - ssrc_.value_or(0), frame_transformer_); - } - }); + RTC_DCHECK_RUN_ON(worker_thread_); + frame_transformer_ = std::move(frame_transformer); + if (media_channel_) { + media_channel_->SetDepacketizerToDecoderFrameTransformer( + ssrc_.value_or(0), frame_transformer_); + } } void VideoRtpReceiver::Stop() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); // TODO(deadbeef): Need to do more here to fully stop receiving packets. - if (stopped_) { - return; + + if (!stopped_) { + source_->SetState(MediaSourceInterface::kEnded); + stopped_ = true; } - source_->SetState(MediaSourceInterface::kEnded); - if (!media_channel_) { - RTC_LOG(LS_WARNING) << "VideoRtpReceiver::Stop: No video channel exists."; - } else { - // Allow that SetSink fails. This is the normal case when the underlying - // media channel has already been deleted. - worker_thread_->Invoke(RTC_FROM_HERE, [&] { - RTC_DCHECK_RUN_ON(worker_thread_); + + worker_thread_->Invoke(RTC_FROM_HERE, [&] { + RTC_DCHECK_RUN_ON(worker_thread_); + if (media_channel_) { SetSink(nullptr); - }); - } - delay_->OnStop(); - stopped_ = true; + SetMediaChannel_w(nullptr); + } + source_->ClearCallback(); + }); +} + +void VideoRtpReceiver::StopAndEndTrack() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + Stop(); + track_->internal()->set_ended(); } void VideoRtpReceiver::RestartMediaChannel(absl::optional ssrc) { - RTC_DCHECK(media_channel_); - if (!stopped_ && ssrc_ == ssrc) { - return; - } - worker_thread_->Invoke(RTC_FROM_HERE, [&] { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + + // `stopped_` will be `true` on construction. RestartMediaChannel + // can in this case function like "ensure started" and flip `stopped_` + // to false. + + // TODO(tommi): Can we restart the media channel without blocking? + bool ok = worker_thread_->Invoke(RTC_FROM_HERE, [&, was_stopped = + stopped_] { RTC_DCHECK_RUN_ON(worker_thread_); - if (!stopped_) { + if (!media_channel_) { + // Ignore further negotiations if we've already been stopped and don't + // have an associated media channel. + RTC_DCHECK(was_stopped); + return false; // Can't restart. + } + + if (!was_stopped && ssrc_ == ssrc) { + // Already running with that ssrc. + return true; + } + + // Disconnect from the previous ssrc. + if (!was_stopped) { SetSink(nullptr); } + bool encoded_sink_enabled = saved_encoded_sink_enabled_; SetEncodedSinkEnabled(false); - stopped_ = false; - - ssrc_ = ssrc; + // Set up the new ssrc. + ssrc_ = std::move(ssrc); SetSink(source_->sink()); if (encoded_sink_enabled) { SetEncodedSinkEnabled(true); @@ -161,47 +179,62 @@ void VideoRtpReceiver::RestartMediaChannel(absl::optional ssrc) { media_channel_->SetDepacketizerToDecoderFrameTransformer( ssrc_.value_or(0), frame_transformer_); } + + if (media_channel_ && ssrc_) { + if (frame_decryptor_) { + media_channel_->SetFrameDecryptor(*ssrc_, frame_decryptor_); + } + + media_channel_->SetBaseMinimumPlayoutDelayMs(*ssrc_, delay_.GetMs()); + } + + return true; }); - // Attach any existing frame decryptor to the media channel. - MaybeAttachFrameDecryptorToMediaChannel( - ssrc, worker_thread_, frame_decryptor_, media_channel_, stopped_); - // TODO(bugs.webrtc.org/8694): Stop using 0 to mean unsignalled SSRC - // value. - delay_->OnStart(media_channel_, ssrc.value_or(0)); + if (!ok) + return; + + stopped_ = false; } +// RTC_RUN_ON(worker_thread_) void VideoRtpReceiver::SetSink(rtc::VideoSinkInterface* sink) { - RTC_DCHECK(media_channel_); if (ssrc_) { media_channel_->SetSink(*ssrc_, sink); - return; + } else { + media_channel_->SetDefaultSink(sink); } - media_channel_->SetDefaultSink(sink); } void VideoRtpReceiver::SetupMediaChannel(uint32_t ssrc) { - if (!media_channel_) { - RTC_LOG(LS_ERROR) - << "VideoRtpReceiver::SetupMediaChannel: No video channel exists."; - } + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RestartMediaChannel(ssrc); } void VideoRtpReceiver::SetupUnsignaledMediaChannel() { - if (!media_channel_) { - RTC_LOG(LS_ERROR) << "VideoRtpReceiver::SetupUnsignaledMediaChannel: No " - "video channel exists."; - } + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RestartMediaChannel(absl::nullopt); } +uint32_t VideoRtpReceiver::ssrc() const { + RTC_DCHECK_RUN_ON(worker_thread_); + return ssrc_.value_or(0); +} + void VideoRtpReceiver::set_stream_ids(std::vector stream_ids) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); SetStreams(CreateStreamsFromIds(std::move(stream_ids))); } +void VideoRtpReceiver::set_transport( + rtc::scoped_refptr dtls_transport) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + dtls_transport_ = std::move(dtls_transport); +} + void VideoRtpReceiver::SetStreams( const std::vector>& streams) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); // Remove remote track from any streams that are going away. for (const auto& existing_stream : streams_) { bool removed = true; @@ -234,6 +267,7 @@ void VideoRtpReceiver::SetStreams( } void VideoRtpReceiver::SetObserver(RtpReceiverObserverInterface* observer) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); observer_ = observer; // Deliver any notifications the observer may have missed by being set late. if (received_first_packet_ && observer_) { @@ -243,40 +277,57 @@ void VideoRtpReceiver::SetObserver(RtpReceiverObserverInterface* observer) { void VideoRtpReceiver::SetJitterBufferMinimumDelay( absl::optional delay_seconds) { - delay_->Set(delay_seconds); + RTC_DCHECK_RUN_ON(worker_thread_); + delay_.Set(delay_seconds); + if (media_channel_ && ssrc_) + media_channel_->SetBaseMinimumPlayoutDelayMs(*ssrc_, delay_.GetMs()); } void VideoRtpReceiver::SetMediaChannel(cricket::MediaChannel* media_channel) { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); RTC_DCHECK(media_channel == nullptr || media_channel->media_type() == media_type()); + + if (stopped_ && !media_channel) + return; + worker_thread_->Invoke(RTC_FROM_HERE, [&] { RTC_DCHECK_RUN_ON(worker_thread_); - bool encoded_sink_enabled = saved_encoded_sink_enabled_; - if (encoded_sink_enabled && media_channel_) { - // Turn off the old sink, if any. - SetEncodedSinkEnabled(false); - } + SetMediaChannel_w(media_channel); + }); +} - media_channel_ = static_cast(media_channel); +// RTC_RUN_ON(worker_thread_) +void VideoRtpReceiver::SetMediaChannel_w(cricket::MediaChannel* media_channel) { + if (media_channel == media_channel_) + return; - if (media_channel_) { - if (saved_generate_keyframe_) { - // TODO(bugs.webrtc.org/8694): Stop using 0 to mean unsignalled SSRC - media_channel_->GenerateKeyFrame(ssrc_.value_or(0)); - saved_generate_keyframe_ = false; - } - if (encoded_sink_enabled) { - SetEncodedSinkEnabled(true); - } - if (frame_transformer_) { - media_channel_->SetDepacketizerToDecoderFrameTransformer( - ssrc_.value_or(0), frame_transformer_); - } + bool encoded_sink_enabled = saved_encoded_sink_enabled_; + if (encoded_sink_enabled && media_channel_) { + // Turn off the old sink, if any. + SetEncodedSinkEnabled(false); + } + + media_channel_ = static_cast(media_channel); + + if (media_channel_) { + if (saved_generate_keyframe_) { + // TODO(bugs.webrtc.org/8694): Stop using 0 to mean unsignalled SSRC + media_channel_->GenerateKeyFrame(ssrc_.value_or(0)); + saved_generate_keyframe_ = false; } - }); + if (encoded_sink_enabled) { + SetEncodedSinkEnabled(true); + } + if (frame_transformer_) { + media_channel_->SetDepacketizerToDecoderFrameTransformer( + ssrc_.value_or(0), frame_transformer_); + } + } } void VideoRtpReceiver::NotifyFirstPacketReceived() { + RTC_DCHECK_RUN_ON(&signaling_thread_checker_); if (observer_) { observer_->OnFirstPacketReceived(media_type()); } @@ -284,11 +335,10 @@ void VideoRtpReceiver::NotifyFirstPacketReceived() { } std::vector VideoRtpReceiver::GetSources() const { - if (!media_channel_ || !ssrc_ || stopped_) { - return {}; - } - return worker_thread_->Invoke>( - RTC_FROM_HERE, [&] { return media_channel_->GetSources(*ssrc_); }); + RTC_DCHECK_RUN_ON(worker_thread_); + if (!ssrc_ || !media_channel_) + return std::vector(); + return media_channel_->GetSources(*ssrc_); } void VideoRtpReceiver::OnGenerateKeyFrame() { @@ -314,20 +364,21 @@ void VideoRtpReceiver::OnEncodedSinkEnabled(bool enable) { saved_encoded_sink_enabled_ = enable; } +// RTC_RUN_ON(worker_thread_) void VideoRtpReceiver::SetEncodedSinkEnabled(bool enable) { - if (media_channel_) { - if (enable) { - // TODO(bugs.webrtc.org/8694): Stop using 0 to mean unsignalled SSRC - auto source = source_; - media_channel_->SetRecordableEncodedFrameCallback( - ssrc_.value_or(0), - [source = std::move(source)](const RecordableEncodedFrame& frame) { - source->BroadcastRecordableEncodedFrame(frame); - }); - } else { - // TODO(bugs.webrtc.org/8694): Stop using 0 to mean unsignalled SSRC - media_channel_->ClearRecordableEncodedFrameCallback(ssrc_.value_or(0)); - } + if (!media_channel_) + return; + + // TODO(bugs.webrtc.org/8694): Stop using 0 to mean unsignalled SSRC + const auto ssrc = ssrc_.value_or(0); + + if (enable) { + media_channel_->SetRecordableEncodedFrameCallback( + ssrc, [source = source_](const RecordableEncodedFrame& frame) { + source->BroadcastRecordableEncodedFrame(frame); + }); + } else { + media_channel_->ClearRecordableEncodedFrameCallback(ssrc); } } diff --git a/pc/video_rtp_receiver.h b/pc/video_rtp_receiver.h index f66a8a7892..f59db7a840 100644 --- a/pc/video_rtp_receiver.h +++ b/pc/video_rtp_receiver.h @@ -18,26 +18,32 @@ #include "absl/types/optional.h" #include "api/crypto/frame_decryptor_interface.h" +#include "api/dtls_transport_interface.h" #include "api/frame_transformer_interface.h" #include "api/media_stream_interface.h" #include "api/media_types.h" #include "api/rtp_parameters.h" #include "api/rtp_receiver_interface.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" +#include "api/transport/rtp/rtp_source.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" #include "media/base/media_channel.h" -#include "pc/jitter_buffer_delay_interface.h" +#include "pc/jitter_buffer_delay.h" +#include "pc/media_stream_track_proxy.h" #include "pc/rtp_receiver.h" #include "pc/video_rtp_track_source.h" +#include "pc/video_track.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { -class VideoRtpReceiver : public rtc::RefCountedObject, - public VideoRtpTrackSource::Callback { +class VideoRtpReceiver : public RtpReceiverInternal { public: // An SSRC of 0 will create a receiver that will match the first SSRC it // sees. Must be called on signaling thread. @@ -53,23 +59,16 @@ class VideoRtpReceiver : public rtc::RefCountedObject, virtual ~VideoRtpReceiver(); - rtc::scoped_refptr video_track() const { - return track_.get(); - } + rtc::scoped_refptr video_track() const { return track_; } // RtpReceiverInterface implementation rtc::scoped_refptr track() const override { - return track_.get(); - } - rtc::scoped_refptr dtls_transport() const override { - return dtls_transport_; + return track_; } + rtc::scoped_refptr dtls_transport() const override; std::vector stream_ids() const override; std::vector> streams() - const override { - return streams_; - } - + const override; cricket::MediaType media_type() const override { return cricket::MEDIA_TYPE_VIDEO; } @@ -89,15 +88,14 @@ class VideoRtpReceiver : public rtc::RefCountedObject, // RtpReceiverInternal implementation. void Stop() override; + void StopAndEndTrack() override; void SetupMediaChannel(uint32_t ssrc) override; void SetupUnsignaledMediaChannel() override; - uint32_t ssrc() const override { return ssrc_.value_or(0); } + uint32_t ssrc() const override; void NotifyFirstPacketReceived() override; void set_stream_ids(std::vector stream_ids) override; void set_transport( - rtc::scoped_refptr dtls_transport) override { - dtls_transport_ = dtls_transport; - } + rtc::scoped_refptr dtls_transport) override; void SetStreams(const std::vector>& streams) override; @@ -116,33 +114,68 @@ class VideoRtpReceiver : public rtc::RefCountedObject, void RestartMediaChannel(absl::optional ssrc); void SetSink(rtc::VideoSinkInterface* sink) RTC_RUN_ON(worker_thread_); + void SetMediaChannel_w(cricket::MediaChannel* media_channel) + RTC_RUN_ON(worker_thread_); // VideoRtpTrackSource::Callback - void OnGenerateKeyFrame() override; - void OnEncodedSinkEnabled(bool enable) override; + void OnGenerateKeyFrame(); + void OnEncodedSinkEnabled(bool enable); + void SetEncodedSinkEnabled(bool enable) RTC_RUN_ON(worker_thread_); + class SourceCallback : public VideoRtpTrackSource::Callback { + public: + explicit SourceCallback(VideoRtpReceiver* receiver) : receiver_(receiver) {} + ~SourceCallback() override = default; + + private: + void OnGenerateKeyFrame() override { receiver_->OnGenerateKeyFrame(); } + void OnEncodedSinkEnabled(bool enable) override { + receiver_->OnEncodedSinkEnabled(enable); + } + + VideoRtpReceiver* const receiver_; + } source_callback_{this}; + + RTC_NO_UNIQUE_ADDRESS SequenceChecker signaling_thread_checker_; rtc::Thread* const worker_thread_; const std::string id_; - cricket::VideoMediaChannel* media_channel_ = nullptr; - absl::optional ssrc_; + // See documentation for `stopped_` below for when a valid media channel + // has been assigned and when this pointer will be null. + cricket::VideoMediaChannel* media_channel_ RTC_GUARDED_BY(worker_thread_) = + nullptr; + absl::optional ssrc_ RTC_GUARDED_BY(worker_thread_); // |source_| is held here to be able to change the state of the source when // the VideoRtpReceiver is stopped. - rtc::scoped_refptr source_; - rtc::scoped_refptr track_; - std::vector> streams_; - bool stopped_ = true; - RtpReceiverObserverInterface* observer_ = nullptr; - bool received_first_packet_ = false; - int attachment_id_ = 0; - rtc::scoped_refptr frame_decryptor_; - rtc::scoped_refptr dtls_transport_; + const rtc::scoped_refptr source_; + const rtc::scoped_refptr> track_; + std::vector> streams_ + RTC_GUARDED_BY(&signaling_thread_checker_); + // `stopped` is state that's used on the signaling thread to indicate whether + // a valid `media_channel_` has been assigned and configured. When an instance + // of VideoRtpReceiver is initially created, `stopped_` is true and will + // remain true until either `SetupMediaChannel` or + // `SetupUnsignaledMediaChannel` is called after assigning a media channel. + // After that, `stopped_` will remain false until `Stop()` is called. + // Note, for checking the state of the class on the worker thread, + // check `media_channel_` instead, as that's the main worker thread state. + bool stopped_ RTC_GUARDED_BY(&signaling_thread_checker_) = true; + RtpReceiverObserverInterface* observer_ + RTC_GUARDED_BY(&signaling_thread_checker_) = nullptr; + bool received_first_packet_ RTC_GUARDED_BY(&signaling_thread_checker_) = + false; + const int attachment_id_; + rtc::scoped_refptr frame_decryptor_ + RTC_GUARDED_BY(worker_thread_); + rtc::scoped_refptr dtls_transport_ + RTC_GUARDED_BY(&signaling_thread_checker_); rtc::scoped_refptr frame_transformer_ RTC_GUARDED_BY(worker_thread_); - // Allows to thread safely change jitter buffer delay. Handles caching cases + // Stores the minimum jitter buffer delay. Handles caching cases // if |SetJitterBufferMinimumDelay| is called before start. - rtc::scoped_refptr delay_; + JitterBufferDelay delay_ RTC_GUARDED_BY(worker_thread_); + // Records if we should generate a keyframe when |media_channel_| gets set up // or switched. bool saved_generate_keyframe_ RTC_GUARDED_BY(worker_thread_) = false; diff --git a/pc/video_rtp_receiver_unittest.cc b/pc/video_rtp_receiver_unittest.cc index b3eb6e6e35..3a8099d30f 100644 --- a/pc/video_rtp_receiver_unittest.cc +++ b/pc/video_rtp_receiver_unittest.cc @@ -17,8 +17,10 @@ #include "test/gmock.h" using ::testing::_; +using ::testing::AnyNumber; using ::testing::InSequence; using ::testing::Mock; +using ::testing::NiceMock; using ::testing::SaveArg; using ::testing::StrictMock; @@ -29,9 +31,11 @@ class VideoRtpReceiverTest : public testing::Test { protected: class MockVideoMediaChannel : public cricket::FakeVideoMediaChannel { public: - MockVideoMediaChannel(cricket::FakeVideoEngine* engine, - const cricket::VideoOptions& options) - : FakeVideoMediaChannel(engine, options) {} + MockVideoMediaChannel( + cricket::FakeVideoEngine* engine, + const cricket::VideoOptions& options, + TaskQueueBase* network_thread = rtc::Thread::Current()) + : FakeVideoMediaChannel(engine, options, network_thread) {} MOCK_METHOD(void, SetRecordableEncodedFrameCallback, (uint32_t, std::function), @@ -51,19 +55,26 @@ class VideoRtpReceiverTest : public testing::Test { VideoRtpReceiverTest() : worker_thread_(rtc::Thread::Create()), channel_(nullptr, cricket::VideoOptions()), - receiver_(new VideoRtpReceiver(worker_thread_.get(), - "receiver", - {"stream"})) { + receiver_(rtc::make_ref_counted( + worker_thread_.get(), + std::string("receiver"), + std::vector({"stream"}))) { worker_thread_->Start(); receiver_->SetMediaChannel(&channel_); } + ~VideoRtpReceiverTest() override { + // Clear expectations that tests may have set up before calling Stop(). + Mock::VerifyAndClearExpectations(&channel_); + receiver_->Stop(); + } + webrtc::VideoTrackSourceInterface* Source() { return receiver_->streams()[0]->FindVideoTrack("receiver")->GetSource(); } std::unique_ptr worker_thread_; - MockVideoMediaChannel channel_; + NiceMock channel_; rtc::scoped_refptr receiver_; }; @@ -96,6 +107,10 @@ TEST_F(VideoRtpReceiverTest, // Switching to a new channel should now not cause calls to GenerateKeyFrame. StrictMock channel4(nullptr, cricket::VideoOptions()); receiver_->SetMediaChannel(&channel4); + + // We must call Stop() here since the mock media channels live on the stack + // and `receiver_` still has a pointer to those objects. + receiver_->Stop(); } TEST_F(VideoRtpReceiverTest, EnablesEncodedOutput) { @@ -129,6 +144,10 @@ TEST_F(VideoRtpReceiverTest, DisablesEnablesEncodedOutputOnChannelSwitch) { Source()->RemoveEncodedSink(&sink); StrictMock channel3(nullptr, cricket::VideoOptions()); receiver_->SetMediaChannel(&channel3); + + // We must call Stop() here since the mock media channels live on the stack + // and `receiver_` still has a pointer to those objects. + receiver_->Stop(); } TEST_F(VideoRtpReceiverTest, BroadcastsEncodedFramesWhenEnabled) { diff --git a/pc/video_rtp_track_source.cc b/pc/video_rtp_track_source.cc index 2f15c42b4d..bcfcdcbdf9 100644 --- a/pc/video_rtp_track_source.cc +++ b/pc/video_rtp_track_source.cc @@ -10,6 +10,12 @@ #include "pc/video_rtp_track_source.h" +#include + +#include + +#include "rtc_base/checks.h" + namespace webrtc { VideoRtpTrackSource::VideoRtpTrackSource(Callback* callback) @@ -31,7 +37,7 @@ rtc::VideoSinkInterface* VideoRtpTrackSource::sink() { void VideoRtpTrackSource::BroadcastRecordableEncodedFrame( const RecordableEncodedFrame& frame) const { - rtc::CritScope cs(&mu_); + MutexLock lock(&mu_); for (rtc::VideoSinkInterface* sink : encoded_sinks_) { sink->OnFrame(frame); } @@ -54,7 +60,7 @@ void VideoRtpTrackSource::AddEncodedSink( RTC_DCHECK(sink); size_t size = 0; { - rtc::CritScope cs(&mu_); + MutexLock lock(&mu_); RTC_DCHECK(std::find(encoded_sinks_.begin(), encoded_sinks_.end(), sink) == encoded_sinks_.end()); encoded_sinks_.push_back(sink); @@ -70,7 +76,7 @@ void VideoRtpTrackSource::RemoveEncodedSink( RTC_DCHECK_RUN_ON(&worker_sequence_checker_); size_t size = 0; { - rtc::CritScope cs(&mu_); + MutexLock lock(&mu_); auto it = std::find(encoded_sinks_.begin(), encoded_sinks_.end(), sink); if (it != encoded_sinks_.end()) { encoded_sinks_.erase(it); diff --git a/pc/video_rtp_track_source.h b/pc/video_rtp_track_source.h index e62cda70c3..47b7bc9eef 100644 --- a/pc/video_rtp_track_source.h +++ b/pc/video_rtp_track_source.h @@ -13,10 +13,17 @@ #include +#include "api/sequence_checker.h" +#include "api/video/recordable_encoded_frame.h" +#include "api/video/video_frame.h" +#include "api/video/video_sink_interface.h" +#include "api/video/video_source_interface.h" #include "media/base/video_broadcaster.h" #include "pc/video_track_source.h" -#include "rtc_base/callback.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/thread_annotations.h" namespace webrtc { @@ -67,12 +74,12 @@ class VideoRtpTrackSource : public VideoTrackSource { rtc::VideoSinkInterface* sink) override; private: - SequenceChecker worker_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_; // |broadcaster_| is needed since the decoder can only handle one sink. // It might be better if the decoder can handle multiple sinks and consider // the VideoSinkWants. rtc::VideoBroadcaster broadcaster_; - rtc::CriticalSection mu_; + mutable Mutex mu_; std::vector*> encoded_sinks_ RTC_GUARDED_BY(mu_); Callback* callback_ RTC_GUARDED_BY(worker_sequence_checker_); diff --git a/pc/video_rtp_track_source_unittest.cc b/pc/video_rtp_track_source_unittest.cc index ea1b4cacf8..5666b77d5f 100644 --- a/pc/video_rtp_track_source_unittest.cc +++ b/pc/video_rtp_track_source_unittest.cc @@ -30,9 +30,7 @@ class MockSink : public rtc::VideoSinkInterface { rtc::scoped_refptr MakeSource( VideoRtpTrackSource::Callback* callback) { - rtc::scoped_refptr source( - new rtc::RefCountedObject(callback)); - return source; + return rtc::make_ref_counted(callback); } TEST(VideoRtpTrackSourceTest, CreatesWithRemoteAtttributeSet) { diff --git a/pc/video_track.cc b/pc/video_track.cc index 55356e7046..d0246faa87 100644 --- a/pc/video_track.cc +++ b/pc/video_track.cc @@ -11,9 +11,11 @@ #include "pc/video_track.h" #include +#include #include #include "api/notifier.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/ref_counted_object.h" @@ -27,10 +29,16 @@ VideoTrack::VideoTrack(const std::string& label, worker_thread_(worker_thread), video_source_(video_source), content_hint_(ContentHint::kNone) { + RTC_DCHECK_RUN_ON(&signaling_thread_); + // Detach the thread checker for VideoSourceBaseGuarded since we'll make calls + // to VideoSourceBaseGuarded on the worker thread, but we're currently on the + // signaling thread. + source_sequence_.Detach(); video_source_->RegisterObserver(this); } VideoTrack::~VideoTrack() { + RTC_DCHECK_RUN_ON(&signaling_thread_); video_source_->UnregisterObserver(this); } @@ -42,26 +50,31 @@ std::string VideoTrack::kind() const { // thread. void VideoTrack::AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) { - RTC_DCHECK(worker_thread_->IsCurrent()); - VideoSourceBase::AddOrUpdateSink(sink, wants); + RTC_DCHECK_RUN_ON(worker_thread_); + VideoSourceBaseGuarded::AddOrUpdateSink(sink, wants); rtc::VideoSinkWants modified_wants = wants; modified_wants.black_frames = !enabled(); video_source_->AddOrUpdateSink(sink, modified_wants); } void VideoTrack::RemoveSink(rtc::VideoSinkInterface* sink) { - RTC_DCHECK(worker_thread_->IsCurrent()); - VideoSourceBase::RemoveSink(sink); + RTC_DCHECK_RUN_ON(worker_thread_); + VideoSourceBaseGuarded::RemoveSink(sink); video_source_->RemoveSink(sink); } +VideoTrackSourceInterface* VideoTrack::GetSource() const { + // Callable from any thread. + return video_source_.get(); +} + VideoTrackInterface::ContentHint VideoTrack::content_hint() const { - RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); return content_hint_; } void VideoTrack::set_content_hint(ContentHint hint) { - RTC_DCHECK_RUN_ON(&signaling_thread_checker_); + RTC_DCHECK_RUN_ON(worker_thread_); if (content_hint_ == hint) return; content_hint_ = hint; @@ -69,34 +82,43 @@ void VideoTrack::set_content_hint(ContentHint hint) { } bool VideoTrack::set_enabled(bool enable) { - RTC_DCHECK(signaling_thread_checker_.IsCurrent()); - worker_thread_->Invoke(RTC_FROM_HERE, [enable, this] { - RTC_DCHECK(worker_thread_->IsCurrent()); - for (auto& sink_pair : sink_pairs()) { - rtc::VideoSinkWants modified_wants = sink_pair.wants; - modified_wants.black_frames = !enable; - video_source_->AddOrUpdateSink(sink_pair.sink, modified_wants); - } - }); + RTC_DCHECK_RUN_ON(worker_thread_); + for (auto& sink_pair : sink_pairs()) { + rtc::VideoSinkWants modified_wants = sink_pair.wants; + modified_wants.black_frames = !enable; + video_source_->AddOrUpdateSink(sink_pair.sink, modified_wants); + } return MediaStreamTrack::set_enabled(enable); } +bool VideoTrack::enabled() const { + RTC_DCHECK_RUN_ON(worker_thread_); + return MediaStreamTrack::enabled(); +} + +MediaStreamTrackInterface::TrackState VideoTrack::state() const { + RTC_DCHECK_RUN_ON(worker_thread_); + return MediaStreamTrack::state(); +} + void VideoTrack::OnChanged() { - RTC_DCHECK(signaling_thread_checker_.IsCurrent()); - if (video_source_->state() == MediaSourceInterface::kEnded) { - set_state(kEnded); - } else { - set_state(kLive); - } + RTC_DCHECK_RUN_ON(&signaling_thread_); + worker_thread_->Invoke( + RTC_FROM_HERE, [this, state = video_source_->state()]() { + // TODO(tommi): Calling set_state() this way isn't ideal since we're + // currently blocking the signaling thread and set_state() may + // internally fire notifications via `FireOnChanged()` which may further + // amplify the blocking effect on the signaling thread. + rtc::Thread::ScopedDisallowBlockingCalls no_blocking_calls; + set_state(state == MediaSourceInterface::kEnded ? kEnded : kLive); + }); } rtc::scoped_refptr VideoTrack::Create( const std::string& id, VideoTrackSourceInterface* source, rtc::Thread* worker_thread) { - rtc::RefCountedObject* track = - new rtc::RefCountedObject(id, source, worker_thread); - return track; + return rtc::make_ref_counted(id, source, worker_thread); } } // namespace webrtc diff --git a/pc/video_track.h b/pc/video_track.h index 90e0758a6c..e840c8097f 100644 --- a/pc/video_track.h +++ b/pc/video_track.h @@ -14,20 +14,20 @@ #include #include "api/media_stream_interface.h" +#include "api/media_stream_track.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" #include "media/base/video_source_base.h" -#include "pc/media_stream_track.h" #include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" namespace webrtc { class VideoTrack : public MediaStreamTrack, - public rtc::VideoSourceBase, + public rtc::VideoSourceBaseGuarded, public ObserverInterface { public: static rtc::scoped_refptr Create( @@ -38,13 +38,13 @@ class VideoTrack : public MediaStreamTrack, void AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) override; void RemoveSink(rtc::VideoSinkInterface* sink) override; + VideoTrackSourceInterface* GetSource() const override; - VideoTrackSourceInterface* GetSource() const override { - return video_source_.get(); - } ContentHint content_hint() const override; void set_content_hint(ContentHint hint) override; bool set_enabled(bool enable) override; + bool enabled() const override; + MediaStreamTrackInterface::TrackState state() const override; std::string kind() const override; protected: @@ -57,10 +57,10 @@ class VideoTrack : public MediaStreamTrack, // Implements ObserverInterface. Observes |video_source_| state. void OnChanged() override; + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker signaling_thread_; rtc::Thread* const worker_thread_; - rtc::ThreadChecker signaling_thread_checker_; - rtc::scoped_refptr video_source_; - ContentHint content_hint_ RTC_GUARDED_BY(signaling_thread_checker_); + const rtc::scoped_refptr video_source_; + ContentHint content_hint_ RTC_GUARDED_BY(worker_thread_); }; } // namespace webrtc diff --git a/pc/video_track_source.cc b/pc/video_track_source.cc index f45d44aa32..d15eaaf43c 100644 --- a/pc/video_track_source.cc +++ b/pc/video_track_source.cc @@ -15,7 +15,7 @@ namespace webrtc { VideoTrackSource::VideoTrackSource(bool remote) - : state_(kInitializing), remote_(remote) { + : state_(kLive), remote_(remote) { worker_thread_checker_.Detach(); } diff --git a/pc/video_track_source.h b/pc/video_track_source.h index 27331eac4f..4a29381c4c 100644 --- a/pc/video_track_source.h +++ b/pc/video_track_source.h @@ -11,12 +11,16 @@ #ifndef PC_VIDEO_TRACK_SOURCE_H_ #define PC_VIDEO_TRACK_SOURCE_H_ +#include "absl/types/optional.h" #include "api/media_stream_interface.h" #include "api/notifier.h" +#include "api/sequence_checker.h" +#include "api/video/recordable_encoded_frame.h" +#include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" +#include "api/video/video_source_interface.h" #include "media/base/media_channel.h" #include "rtc_base/system/rtc_export.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -52,7 +56,7 @@ class RTC_EXPORT VideoTrackSource : public Notifier { virtual rtc::VideoSourceInterface* source() = 0; private: - rtc::ThreadChecker worker_thread_checker_; + SequenceChecker worker_thread_checker_; SourceState state_; const bool remote_; }; diff --git a/pc/video_track_source_proxy.cc b/pc/video_track_source_proxy.cc new file mode 100644 index 0000000000..309c1f20f8 --- /dev/null +++ b/pc/video_track_source_proxy.cc @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "pc/video_track_source_proxy.h" + +#include "api/media_stream_interface.h" +#include "api/video_track_source_proxy_factory.h" + +namespace webrtc { + +rtc::scoped_refptr CreateVideoTrackSourceProxy( + rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, + VideoTrackSourceInterface* source) { + return VideoTrackSourceProxy::Create(signaling_thread, worker_thread, source); +} + +} // namespace webrtc diff --git a/pc/video_track_source_proxy.h b/pc/video_track_source_proxy.h new file mode 100644 index 0000000000..8914dd0525 --- /dev/null +++ b/pc/video_track_source_proxy.h @@ -0,0 +1,49 @@ +/* + * Copyright 2012 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef PC_VIDEO_TRACK_SOURCE_PROXY_H_ +#define PC_VIDEO_TRACK_SOURCE_PROXY_H_ + +#include "api/media_stream_interface.h" +#include "pc/proxy.h" + +namespace webrtc { + +// Makes sure the real VideoTrackSourceInterface implementation is destroyed on +// the signaling thread and marshals all method calls to the signaling thread. +// TODO(deadbeef): Move this to .cc file. What threads methods are called on is +// an implementation detail. +BEGIN_PROXY_MAP(VideoTrackSource) +PROXY_PRIMARY_THREAD_DESTRUCTOR() +PROXY_CONSTMETHOD0(SourceState, state) +BYPASS_PROXY_CONSTMETHOD0(bool, remote) +BYPASS_PROXY_CONSTMETHOD0(bool, is_screencast) +PROXY_CONSTMETHOD0(absl::optional, needs_denoising) +PROXY_METHOD1(bool, GetStats, Stats*) +PROXY_SECONDARY_METHOD2(void, + AddOrUpdateSink, + rtc::VideoSinkInterface*, + const rtc::VideoSinkWants&) +PROXY_SECONDARY_METHOD1(void, RemoveSink, rtc::VideoSinkInterface*) +PROXY_METHOD1(void, RegisterObserver, ObserverInterface*) +PROXY_METHOD1(void, UnregisterObserver, ObserverInterface*) +PROXY_CONSTMETHOD0(bool, SupportsEncodedOutput) +PROXY_SECONDARY_METHOD0(void, GenerateKeyFrame) +PROXY_SECONDARY_METHOD1(void, + AddEncodedSink, + rtc::VideoSinkInterface*) +PROXY_SECONDARY_METHOD1(void, + RemoveEncodedSink, + rtc::VideoSinkInterface*) +END_PROXY_MAP(VideoTrackSource) + +} // namespace webrtc + +#endif // PC_VIDEO_TRACK_SOURCE_PROXY_H_ diff --git a/pc/video_track_unittest.cc b/pc/video_track_unittest.cc index f86bec8321..ab094ec487 100644 --- a/pc/video_track_unittest.cc +++ b/pc/video_track_unittest.cc @@ -32,7 +32,7 @@ class VideoTrackTest : public ::testing::Test { public: VideoTrackTest() : frame_source_(640, 480, rtc::kNumMicrosecsPerSec / 30) { static const char kVideoTrackId[] = "track_id"; - video_track_source_ = new rtc::RefCountedObject( + video_track_source_ = rtc::make_ref_counted( /*is_screencast=*/false); video_track_ = VideoTrack::Create(kVideoTrackId, video_track_source_, rtc::Thread::Current()); diff --git a/pc/webrtc_sdp.cc b/pc/webrtc_sdp.cc index f77327faf1..379b2f30c2 100644 --- a/pc/webrtc_sdp.cc +++ b/pc/webrtc_sdp.cc @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -24,29 +25,46 @@ #include #include "absl/algorithm/container.h" -#include "absl/strings/match.h" #include "api/candidate.h" #include "api/crypto_params.h" #include "api/jsep_ice_candidate.h" #include "api/jsep_session_description.h" #include "api/media_types.h" // for RtpExtension +#include "absl/types/optional.h" +#include "api/rtc_error.h" #include "api/rtp_parameters.h" +#include "api/rtp_transceiver_direction.h" #include "media/base/codec.h" #include "media/base/media_constants.h" +#include "media/base/rid_description.h" #include "media/base/rtp_utils.h" +#include "media/base/stream_params.h" #include "media/sctp/sctp_transport_internal.h" +#include "p2p/base/candidate_pair_interface.h" +#include "p2p/base/ice_transport_internal.h" #include "p2p/base/p2p_constants.h" #include "p2p/base/port.h" +#include "p2p/base/port_interface.h" +#include "p2p/base/transport_description.h" +#include "p2p/base/transport_info.h" +#include "pc/media_protocol_names.h" #include "pc/media_session.h" #include "pc/sdp_serializer.h" +#include "pc/session_description.h" +#include "pc/simulcast_description.h" #include "rtc_base/arraysize.h" #include "rtc_base/checks.h" +#include "rtc_base/helpers.h" +#include "rtc_base/ip_address.h" #include "rtc_base/logging.h" -#include "rtc_base/message_digest.h" +#include "rtc_base/net_helper.h" +#include "rtc_base/network_constants.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/ssl_fingerprint.h" +#include "rtc_base/string_encode.h" #include "rtc_base/string_utils.h" #include "rtc_base/strings/string_builder.h" -#include "rtc_base/third_party/base64/base64.h" using cricket::AudioContentDescription; using cricket::Candidate; @@ -55,14 +73,15 @@ using cricket::ContentInfo; using cricket::CryptoParams; using cricket::ICE_CANDIDATE_COMPONENT_RTCP; using cricket::ICE_CANDIDATE_COMPONENT_RTP; +using cricket::kApplicationSpecificBandwidth; using cricket::kCodecParamMaxPTime; using cricket::kCodecParamMinPTime; using cricket::kCodecParamPTime; +using cricket::kTransportSpecificBandwidth; using cricket::MediaContentDescription; using cricket::MediaProtocolType; using cricket::MediaType; using cricket::RidDescription; -using cricket::RtpDataContentDescription; using cricket::RtpHeaderExtensions; using cricket::SctpDataContentDescription; using cricket::SimulcastDescription; @@ -73,13 +92,10 @@ using cricket::StreamParams; using cricket::StreamParamsVec; using cricket::TransportDescription; using cricket::TransportInfo; +using cricket::UnsupportedContentDescription; using cricket::VideoContentDescription; using rtc::SocketAddress; -namespace cricket { -class SessionDescription; -} - // TODO(deadbeef): Switch to using anonymous namespace rather than declaring // everything "static". namespace webrtc { @@ -90,6 +106,15 @@ namespace webrtc { // the form: // = // where MUST be exactly one case-significant character. + +// Legal characters in a value (RFC 4566 section 9): +// token-char = %x21 / %x23-27 / %x2A-2B / %x2D-2E / %x30-39 +// / %x41-5A / %x5E-7E +static const char kLegalTokenCharacters[] = + "!#$%&'*+-." // %x21, %x23-27, %x2A-2B, %x2D-2E + "0123456789" // %x30-39 + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" // %x41-5A + "^_`abcdefghijklmnopqrstuvwxyz{|}~"; // %x5E-7E static const int kLinePrefixLength = 2; // Length of = static const char kLineTypeVersion = 'v'; static const char kLineTypeOrigin = 'o'; @@ -224,17 +249,9 @@ static const char kMediaPortRejected[] = "0"; // Use IPV4 per default. static const char kDummyAddress[] = "0.0.0.0"; static const char kDummyPort[] = "9"; -// RFC 3556 -static const char kApplicationSpecificMaximum[] = "AS"; static const char kDefaultSctpmapProtocol[] = "webrtc-datachannel"; -// This is a non-standardized setting for plugin transports. -static const char kOpaqueTransportParametersLine[] = "x-opaque"; - -// This is a non-standardized setting for plugin transports. -static const char kAltProtocolLine[] = "x-alt-protocol"; - // RTP payload type is in the 0-127 range. Use -1 to indicate "all" payload // types. const int kWildcardPayloadType = -1; @@ -282,9 +299,6 @@ static bool ParseSessionDescription(const std::string& message, rtc::SocketAddress* connection_addr, cricket::SessionDescription* desc, SdpParseError* error); -static bool ParseGroupAttribute(const std::string& line, - cricket::SessionDescription* desc, - SdpParseError* error); static bool ParseMediaDescription( const std::string& message, const TransportDescription& session_td, @@ -308,6 +322,9 @@ static bool ParseContent( TransportDescription* transport, std::vector>* candidates, SdpParseError* error); +static bool ParseGroupAttribute(const std::string& line, + cricket::SessionDescription* desc, + SdpParseError* error); static bool ParseSsrcAttribute(const std::string& line, SsrcInfoVec* ssrc_infos, int* msid_signaling, @@ -523,25 +540,6 @@ static void InitAttrLine(const std::string& attribute, rtc::StringBuilder* os) { InitLine(kLineTypeAttributes, attribute, os); } -// Adds an x-otp SDP attribute line based on opaque transport parameters. -static void AddOpaqueTransportLine( - const cricket::OpaqueTransportParameters params, - std::string* message) { - rtc::StringBuilder os; - InitAttrLine(kOpaqueTransportParametersLine, &os); - os << kSdpDelimiterColon << params.protocol << kSdpDelimiterColon - << rtc::Base64::Encode(params.parameters); - AddLine(os.str(), message); -} - -static void AddAltProtocolLine(const std::string& protocol, - std::string* message) { - rtc::StringBuilder os; - InitAttrLine(kAltProtocolLine, &os); - os << kSdpDelimiterColon << protocol; - AddLine(os.str(), message); -} - // Writes a SDP attribute line based on |attribute| and |value| to |message|. static void AddAttributeLine(const std::string& attribute, int value, @@ -629,6 +627,22 @@ static bool GetValue(const std::string& message, return true; } +// Get a single [token] from : +static bool GetSingleTokenValue(const std::string& message, + const std::string& attribute, + std::string* value, + SdpParseError* error) { + if (!GetValue(message, attribute, value, error)) { + return false; + } + if (strspn(value->c_str(), kLegalTokenCharacters) != value->size()) { + rtc::StringBuilder description; + description << "Illegal character found in the value of " << attribute; + return ParseFailed(message, description.str(), error); + } + return true; +} + static bool CaseInsensitiveFind(std::string str1, std::string str2) { absl::c_transform(str1, str1.begin(), ::tolower); absl::c_transform(str2, str2.begin(), ::tolower); @@ -886,11 +900,11 @@ std::string SdpSerialize(const JsepSessionDescription& jdesc) { // Time Description. AddLine(kTimeDescription, &message); - // Group - if (desc->HasGroup(cricket::GROUP_TYPE_BUNDLE)) { + // BUNDLE Groups + std::vector groups = + desc->GetGroupsByName(cricket::GROUP_TYPE_BUNDLE); + for (const cricket::ContentGroup* group : groups) { std::string group_line = kAttrGroup; - const cricket::ContentGroup* group = - desc->GetGroupByName(cricket::GROUP_TYPE_BUNDLE); RTC_DCHECK(group != NULL); for (const std::string& content_name : group->content_names()) { group_line.append(" "); @@ -1371,30 +1385,24 @@ void BuildMediaDescription(const ContentInfo* content_info, // RFC 4566 // m= // fmt is a list of payload type numbers that MAY be used in the session. - const char* type = NULL; - if (media_type == cricket::MEDIA_TYPE_AUDIO) - type = kMediaTypeAudio; - else if (media_type == cricket::MEDIA_TYPE_VIDEO) - type = kMediaTypeVideo; - else if (media_type == cricket::MEDIA_TYPE_DATA) - type = kMediaTypeData; - else - RTC_NOTREACHED(); - + std::string type; std::string fmt; if (media_type == cricket::MEDIA_TYPE_VIDEO) { + type = kMediaTypeVideo; const VideoContentDescription* video_desc = media_desc->as_video(); for (const cricket::VideoCodec& codec : video_desc->codecs()) { fmt.append(" "); fmt.append(rtc::ToString(codec.id)); } } else if (media_type == cricket::MEDIA_TYPE_AUDIO) { + type = kMediaTypeAudio; const AudioContentDescription* audio_desc = media_desc->as_audio(); for (const cricket::AudioCodec& codec : audio_desc->codecs()) { fmt.append(" "); fmt.append(rtc::ToString(codec.id)); } } else if (media_type == cricket::MEDIA_TYPE_DATA) { + type = kMediaTypeData; const cricket::SctpDataContentDescription* sctp_data_desc = media_desc->as_sctp(); if (sctp_data_desc) { @@ -1406,13 +1414,14 @@ void BuildMediaDescription(const ContentInfo* content_info, fmt.append(kDefaultSctpmapProtocol); } } else { - const RtpDataContentDescription* rtp_data_desc = - media_desc->as_rtp_data(); - for (const cricket::RtpDataCodec& codec : rtp_data_desc->codecs()) { - fmt.append(" "); - fmt.append(rtc::ToString(codec.id)); - } + RTC_NOTREACHED() << "Data description without SCTP"; } + } else if (media_type == cricket::MEDIA_TYPE_UNSUPPORTED) { + const UnsupportedContentDescription* unsupported_desc = + media_desc->as_unsupported(); + type = unsupported_desc->media_type(); + } else { + RTC_NOTREACHED(); } // The fmt must never be empty. If no codecs are found, set the fmt attribute // to 0. @@ -1461,10 +1470,18 @@ void BuildMediaDescription(const ContentInfo* content_info, AddLine(os.str(), message); // RFC 4566 - // b=AS: - if (media_desc->bandwidth() >= 1000) { - InitLine(kLineTypeSessionBandwidth, kApplicationSpecificMaximum, &os); - os << kSdpDelimiterColon << (media_desc->bandwidth() / 1000); + // b=AS: or + // b=TIAS: + int bandwidth = media_desc->bandwidth(); + std::string bandwidth_type = media_desc->bandwidth_type(); + if (bandwidth_type == kApplicationSpecificBandwidth && bandwidth >= 1000) { + InitLine(kLineTypeSessionBandwidth, bandwidth_type, &os); + bandwidth /= 1000; + os << kSdpDelimiterColon << bandwidth; + AddLine(os.str(), message); + } else if (bandwidth_type == kTransportSpecificBandwidth && bandwidth > 0) { + InitLine(kLineTypeSessionBandwidth, bandwidth_type, &os); + os << kSdpDelimiterColon << bandwidth; AddLine(os.str(), message); } @@ -1532,15 +1549,6 @@ void BuildMediaDescription(const ContentInfo* content_info, AddLine(os.str(), message); } } - - if (transport_info->description.opaque_parameters) { - AddOpaqueTransportLine(*transport_info->description.opaque_parameters, - message); - } - } - - if (media_desc->alt_protocol()) { - AddAltProtocolLine(*media_desc->alt_protocol(), message); } // RFC 3388 @@ -1594,6 +1602,8 @@ void BuildRtpContentAttributes(const MediaContentDescription* media_desc, // RFC 3264 // a=sendrecv || a=sendonly || a=sendrecv || a=inactive switch (media_desc->direction()) { + // Special case that for sdp purposes should be treated same as inactive. + case RtpTransceiverDirection::kStopped: case RtpTransceiverDirection::kInactive: InitAttrLine(kAttributeInactive, &os); break; @@ -1606,9 +1616,7 @@ void BuildRtpContentAttributes(const MediaContentDescription* media_desc, case RtpTransceiverDirection::kSendRecv: InitAttrLine(kAttributeSendRecv, &os); break; - case RtpTransceiverDirection::kStopped: default: - // kStopped shouldn't be used in signalling. RTC_NOTREACHED(); InitAttrLine(kAttributeSendRecv, &os); break; @@ -1796,8 +1804,13 @@ void WriteRtcpFbHeader(int payload_type, rtc::StringBuilder* os) { void WriteFmtpParameter(const std::string& parameter_name, const std::string& parameter_value, rtc::StringBuilder* os) { - // fmtp parameters: |parameter_name|=|parameter_value| - *os << parameter_name << kSdpDelimiterEqual << parameter_value; + if (parameter_name == "") { + // RFC 2198 and RFC 4733 don't use key-value pairs. + *os << parameter_value; + } else { + // fmtp parameters: |parameter_name|=|parameter_value| + *os << parameter_name << kSdpDelimiterEqual << parameter_value; + } } bool IsFmtpParam(const std::string& name) { @@ -1953,19 +1966,6 @@ void BuildRtpMap(const MediaContentDescription* media_desc, ptime = std::max(ptime, max_minptime); AddAttributeLine(kCodecParamPTime, ptime, message); } - } else if (media_type == cricket::MEDIA_TYPE_DATA) { - if (media_desc->as_rtp_data()) { - for (const cricket::RtpDataCodec& codec : - media_desc->as_rtp_data()->codecs()) { - // RFC 4566 - // a=rtpmap: / - // [/] - InitAttrLine(kAttributeRtpmap, &os); - os << kSdpDelimiterColon << codec.id << " " << codec.name << "/" - << codec.clockrate; - AddLine(os.str(), message); - } - } } } @@ -2105,32 +2105,6 @@ bool ParseConnectionData(const std::string& line, return true; } -bool ParseOpaqueTransportLine(const std::string& line, - std::string* protocol, - std::string* transport_parameters, - SdpParseError* error) { - std::string value; - if (!GetValue(line, kOpaqueTransportParametersLine, &value, error)) { - return false; - } - std::string tmp_parameters; - if (!rtc::tokenize_first(value, kSdpDelimiterColonChar, protocol, - &tmp_parameters)) { - return ParseFailedGetValue(line, kOpaqueTransportParametersLine, error); - } - if (!rtc::Base64::Decode(tmp_parameters, rtc::Base64::DO_STRICT, - transport_parameters, nullptr)) { - return ParseFailedGetValue(line, kOpaqueTransportParametersLine, error); - } - return true; -} - -bool ParseAltProtocolLine(const std::string& line, - std::string* protocol, - SdpParseError* error) { - return GetValue(line, kAltProtocolLine, protocol, error); -} - bool ParseSessionDescription(const std::string& message, size_t* pos, std::string* session_id, @@ -2319,12 +2293,6 @@ static bool ParseFingerprintAttribute( const std::string& line, std::unique_ptr* fingerprint, SdpParseError* error) { - if (!IsLineType(line, kLineTypeAttributes) || - !HasAttribute(line, kAttributeFingerprint)) { - return ParseFailedExpectLine(line, 0, kLineTypeAttributes, - kAttributeFingerprint, error); - } - std::vector fields; rtc::split(line.substr(kLinePrefixLength), kSdpDelimiterSpaceChar, &fields); const size_t expected_fields = 2; @@ -2622,6 +2590,7 @@ static std::unique_ptr ParseContentDescription( std::vector>* candidates, webrtc::SdpParseError* error) { auto media_desc = std::make_unique(); + media_desc->set_extmap_allow_mixed_enum(MediaContentDescription::kNo); if (!ParseContent(message, media_type, mline_index, protocol, payload_types, pos, content_name, bundle_only, msid_signaling, media_desc.get(), transport, candidates, error)) { @@ -2685,18 +2654,12 @@ bool ParseMediaDescription( if (!rtc::FromString(fields[1], &port) || !IsValidPort(port)) { return ParseFailed(line, "The port number is invalid", error); } - std::string protocol = fields[2]; + const std::string& protocol = fields[2]; // std::vector payload_types; if (cricket::IsRtpProtocol(protocol)) { for (size_t j = 3; j < fields.size(); ++j) { - // TODO(wu): Remove when below bug is fixed. - // https://bugzilla.mozilla.org/show_bug.cgi?id=996329 - if (fields[j].empty() && j == fields.size() - 1) { - continue; - } - int pl = 0; if (!GetPayloadTypeFromString(line, fields[j], &pl, error)) { return false; @@ -2716,17 +2679,22 @@ bool ParseMediaDescription( std::string content_name; bool bundle_only = false; int section_msid_signaling = 0; - if (HasAttribute(line, kMediaTypeVideo)) { + const std::string& media_type = fields[0]; + if ((media_type == kMediaTypeVideo || media_type == kMediaTypeAudio) && + !cricket::IsRtpProtocol(protocol)) { + return ParseFailed(line, "Unsupported protocol for media type", error); + } + if (media_type == kMediaTypeVideo) { content = ParseContentDescription( message, cricket::MEDIA_TYPE_VIDEO, mline_index, protocol, payload_types, pos, &content_name, &bundle_only, §ion_msid_signaling, &transport, candidates, error); - } else if (HasAttribute(line, kMediaTypeAudio)) { + } else if (media_type == kMediaTypeAudio) { content = ParseContentDescription( message, cricket::MEDIA_TYPE_AUDIO, mline_index, protocol, payload_types, pos, &content_name, &bundle_only, §ion_msid_signaling, &transport, candidates, error); - } else if (HasAttribute(line, kMediaTypeData)) { + } else if (media_type == kMediaTypeData) { if (cricket::IsDtlsSctp(protocol)) { // The draft-03 format is: // m=application DTLS/SCTP ... @@ -2753,17 +2721,21 @@ bool ParseMediaDescription( data_desc->set_protocol(protocol); content = std::move(data_desc); } else { - // RTP - std::unique_ptr data_desc = - ParseContentDescription( - message, cricket::MEDIA_TYPE_DATA, mline_index, protocol, - payload_types, pos, &content_name, &bundle_only, - §ion_msid_signaling, &transport, candidates, error); - content = std::move(data_desc); + return ParseFailed(line, "Unsupported protocol for media type", error); } } else { RTC_LOG(LS_WARNING) << "Unsupported media type: " << line; - continue; + auto unsupported_desc = + std::make_unique(media_type); + if (!ParseContent(message, cricket::MEDIA_TYPE_UNSUPPORTED, mline_index, + protocol, payload_types, pos, &content_name, + &bundle_only, §ion_msid_signaling, + unsupported_desc.get(), &transport, candidates, + error)) { + return false; + } + unsupported_desc->set_protocol(protocol); + content = std::move(unsupported_desc); } if (!content.get()) { // ParseContentDescription returns NULL if failed. @@ -2791,7 +2763,9 @@ bool ParseMediaDescription( content_rejected = port_rejected; } - if (cricket::IsRtpProtocol(protocol) && !content->as_sctp()) { + if (content->as_unsupported()) { + content_rejected = true; + } else if (cricket::IsRtpProtocol(protocol) && !content->as_sctp()) { content->set_protocol(protocol); // Set the extmap. if (!session_extmaps.empty() && @@ -3043,45 +3017,46 @@ bool ParseContent(const std::string& message, // b=* (zero or more bandwidth information lines) if (IsLineType(line, kLineTypeSessionBandwidth)) { std::string bandwidth; - if (HasAttribute(line, kApplicationSpecificMaximum)) { - if (!GetValue(line, kApplicationSpecificMaximum, &bandwidth, error)) { - return false; - } else { - int b = 0; - if (!GetValueFromString(line, bandwidth, &b, error)) { - return false; - } - // TODO(deadbeef): Historically, applications may be setting a value - // of -1 to mean "unset any previously set bandwidth limit", even - // though ommitting the "b=AS" entirely will do just that. Once we've - // transitioned applications to doing the right thing, it would be - // better to treat this as a hard error instead of just ignoring it. - if (b == -1) { - RTC_LOG(LS_WARNING) - << "Ignoring \"b=AS:-1\"; will be treated as \"no " - "bandwidth limit\"."; - continue; - } - if (b < 0) { - return ParseFailed(line, "b=AS value can't be negative.", error); - } - // We should never use more than the default bandwidth for RTP-based - // data channels. Don't allow SDP to set the bandwidth, because - // that would give JS the opportunity to "break the Internet". - // See: https://code.google.com/p/chromium/issues/detail?id=280726 - if (media_type == cricket::MEDIA_TYPE_DATA && - cricket::IsRtpProtocol(protocol) && - b > cricket::kDataMaxBandwidth / 1000) { - rtc::StringBuilder description; - description << "RTP-based data channels may not send more than " - << cricket::kDataMaxBandwidth / 1000 << "kbps."; - return ParseFailed(line, description.str(), error); - } - // Prevent integer overflow. - b = std::min(b, INT_MAX / 1000); - media_desc->set_bandwidth(b * 1000); - } + std::string bandwidth_type; + if (!rtc::tokenize_first(line.substr(kLinePrefixLength), + kSdpDelimiterColonChar, &bandwidth_type, + &bandwidth)) { + return ParseFailed( + line, + "b= syntax error, does not match b=:.", + error); + } + if (!(bandwidth_type == kApplicationSpecificBandwidth || + bandwidth_type == kTransportSpecificBandwidth)) { + // Ignore unknown bandwidth types. + continue; + } + int b = 0; + if (!GetValueFromString(line, bandwidth, &b, error)) { + return false; + } + // TODO(deadbeef): Historically, applications may be setting a value + // of -1 to mean "unset any previously set bandwidth limit", even + // though ommitting the "b=AS" entirely will do just that. Once we've + // transitioned applications to doing the right thing, it would be + // better to treat this as a hard error instead of just ignoring it. + if (bandwidth_type == kApplicationSpecificBandwidth && b == -1) { + RTC_LOG(LS_WARNING) << "Ignoring \"b=AS:-1\"; will be treated as \"no " + "bandwidth limit\"."; + continue; } + if (b < 0) { + return ParseFailed( + line, "b=" + bandwidth_type + " value can't be negative.", error); + } + // Convert values. Prevent integer overflow. + if (bandwidth_type == kApplicationSpecificBandwidth) { + b = std::min(b, INT_MAX / 1000) * 1000; + } else { + b = std::min(b, INT_MAX); + } + media_desc->set_bandwidth(b); + media_desc->set_bandwidth_type(bandwidth_type); continue; } @@ -3097,7 +3072,7 @@ bool ParseContent(const std::string& message, if (!IsLineType(line, kLineTypeAttributes)) { // TODO(deadbeef): Handle other lines if needed. - RTC_LOG(LS_INFO) << "Ignored line: " << line; + RTC_LOG(LS_VERBOSE) << "Ignored line: " << line; continue; } @@ -3107,7 +3082,7 @@ bool ParseContent(const std::string& message, // mid-attribute = "a=mid:" identification-tag // identification-tag = token // Use the mid identification-tag as the content name. - if (!GetValue(line, kAttributeMid, &mline_id, error)) { + if (!GetSingleTokenValue(line, kAttributeMid, &mline_id, error)) { return false; } *content_name = mline_id; @@ -3137,19 +3112,6 @@ bool ParseContent(const std::string& message, if (!ParseIceOptions(line, &transport->transport_options, error)) { return false; } - } else if (HasAttribute(line, kOpaqueTransportParametersLine)) { - transport->opaque_parameters = cricket::OpaqueTransportParameters(); - if (!ParseOpaqueTransportLine( - line, &transport->opaque_parameters->protocol, - &transport->opaque_parameters->parameters, error)) { - return false; - } - } else if (HasAttribute(line, kAltProtocolLine)) { - std::string alt_protocol; - if (!ParseAltProtocolLine(line, &alt_protocol, error)) { - return false; - } - media_desc->set_alt_protocol(alt_protocol); } else if (HasAttribute(line, kAttributeFmtp)) { if (!ParseFmtpAttributes(line, media_type, media_desc, error)) { return false; @@ -3165,37 +3127,33 @@ bool ParseContent(const std::string& message, return false; } } else if (cricket::IsDtlsSctp(protocol) && - HasAttribute(line, kAttributeSctpPort)) { - if (media_type != cricket::MEDIA_TYPE_DATA) { - return ParseFailed( - line, "sctp-port attribute found in non-data media description.", - error); - } - if (media_desc->as_sctp()->use_sctpmap()) { - return ParseFailed( - line, "sctp-port attribute can't be used with sctpmap.", error); - } - int sctp_port; - if (!ParseSctpPort(line, &sctp_port, error)) { - return false; - } - media_desc->as_sctp()->set_port(sctp_port); - } else if (cricket::IsDtlsSctp(protocol) && - HasAttribute(line, kAttributeMaxMessageSize)) { - if (media_type != cricket::MEDIA_TYPE_DATA) { - return ParseFailed( - line, - "max-message-size attribute found in non-data media description.", - error); - } - int max_message_size; - if (!ParseSctpMaxMessageSize(line, &max_message_size, error)) { - return false; + media_type == cricket::MEDIA_TYPE_DATA) { + // + // SCTP specific attributes + // + if (HasAttribute(line, kAttributeSctpPort)) { + if (media_desc->as_sctp()->use_sctpmap()) { + return ParseFailed( + line, "sctp-port attribute can't be used with sctpmap.", error); + } + int sctp_port; + if (!ParseSctpPort(line, &sctp_port, error)) { + return false; + } + media_desc->as_sctp()->set_port(sctp_port); + } else if (HasAttribute(line, kAttributeMaxMessageSize)) { + int max_message_size; + if (!ParseSctpMaxMessageSize(line, &max_message_size, error)) { + return false; + } + media_desc->as_sctp()->set_max_message_size(max_message_size); + } else if (HasAttribute(line, kAttributeSctpmap)) { + // Ignore a=sctpmap: from early versions of draft-ietf-mmusic-sctp-sdp + continue; } - media_desc->as_sctp()->set_max_message_size(max_message_size); } else if (cricket::IsRtpProtocol(protocol)) { // - // RTP specific attrubtes + // RTP specific attributes // if (HasAttribute(line, kAttributeRtcpMux)) { media_desc->set_rtcp_mux(true); @@ -3311,14 +3269,18 @@ bool ParseContent(const std::string& message, } simulcast = error_or_simulcast.value(); + } else if (HasAttribute(line, kAttributeRtcp)) { + // Ignore and do not log a=rtcp line. + // JSEP section 5.8.2 (media section parsing) says to ignore it. + continue; } else { // Unrecognized attribute in RTP protocol. - RTC_LOG(LS_INFO) << "Ignored line: " << line; + RTC_LOG(LS_VERBOSE) << "Ignored line: " << line; continue; } } else { // Only parse lines that we are interested of. - RTC_LOG(LS_INFO) << "Ignored line: " << line; + RTC_LOG(LS_VERBOSE) << "Ignored line: " << line; continue; } } @@ -3667,11 +3629,6 @@ bool ParseRtpmapAttribute(const std::string& line, AudioContentDescription* audio_desc = media_desc->as_audio(); UpdateCodec(payload_type, encoding_name, clock_rate, 0, channels, audio_desc); - } else if (media_type == cricket::MEDIA_TYPE_DATA) { - RtpDataContentDescription* data_desc = media_desc->as_rtp_data(); - if (data_desc) { - data_desc->AddCodec(cricket::RtpDataCodec(payload_type, encoding_name)); - } } return true; } @@ -3681,8 +3638,10 @@ bool ParseFmtpParam(const std::string& line, std::string* value, SdpParseError* error) { if (!rtc::tokenize_first(line, kSdpDelimiterEqualChar, parameter, value)) { - ParseFailed(line, "Unable to parse fmtp parameter. \'=\' missing.", error); - return false; + // Support for non-key-value lines like RFC 2198 or RFC 4733. + *parameter = ""; + *value = line; + return true; } // a=fmtp: =; =; ... return true; @@ -3700,7 +3659,7 @@ bool ParseFmtpAttributes(const std::string& line, std::string line_payload; std::string line_params; - // RFC 5576 + // https://tools.ietf.org/html/rfc4566#section-6 // a=fmtp: // At least two fields, whereas the second one is any of the optional // parameters. @@ -3729,17 +3688,15 @@ bool ParseFmtpAttributes(const std::string& line, cricket::CodecParameterMap codec_params; for (auto& iter : fields) { - if (iter.find(kSdpDelimiterEqual) == std::string::npos) { - // Only fmtps with equals are currently supported. Other fmtp types - // should be ignored. Unknown fmtps do not constitute an error. - continue; - } - std::string name; std::string value; if (!ParseFmtpParam(rtc::string_trim(iter), &name, &value, error)) { return false; } + if (codec_params.find(name) != codec_params.end()) { + RTC_LOG(LS_INFO) << "Overwriting duplicate fmtp parameter with key \"" + << name << "\"."; + } codec_params[name] = value; } diff --git a/pc/webrtc_sdp.h b/pc/webrtc_sdp.h index 588e02f139..aa3317f341 100644 --- a/pc/webrtc_sdp.h +++ b/pc/webrtc_sdp.h @@ -22,7 +22,12 @@ #include +#include "api/candidate.h" +#include "api/jsep.h" +#include "api/jsep_ice_candidate.h" +#include "api/jsep_session_description.h" #include "media/base/codec.h" +#include "rtc_base/strings/string_builder.h" #include "rtc_base/system/rtc_export.h" namespace cricket { diff --git a/pc/webrtc_sdp_unittest.cc b/pc/webrtc_sdp_unittest.cc index a2ad4b8bdc..266fd3dfd6 100644 --- a/pc/webrtc_sdp_unittest.cc +++ b/pc/webrtc_sdp_unittest.cc @@ -56,7 +56,6 @@ using cricket::Candidate; using cricket::ContentGroup; using cricket::ContentInfo; using cricket::CryptoParams; -using cricket::DataCodec; using cricket::ICE_CANDIDATE_COMPONENT_RTCP; using cricket::ICE_CANDIDATE_COMPONENT_RTP; using cricket::kFecSsrcGroupSemantics; @@ -65,7 +64,6 @@ using cricket::MediaProtocolType; using cricket::RELAY_PORT_TYPE; using cricket::RidDescription; using cricket::RidDirection; -using cricket::RtpDataContentDescription; using cricket::SctpDataContentDescription; using cricket::SessionDescription; using cricket::SimulcastDescription; @@ -153,6 +151,7 @@ static const char kSdpFullString[] = "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" "s=-\r\n" "t=0 0\r\n" + "a=extmap-allow-mixed\r\n" "a=msid-semantic: WMS local_stream_1\r\n" "m=audio 2345 RTP/SAVPF 111 103 104\r\n" "c=IN IP4 74.125.127.126\r\n" @@ -223,6 +222,7 @@ static const char kSdpString[] = "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" "s=-\r\n" "t=0 0\r\n" + "a=extmap-allow-mixed\r\n" "a=msid-semantic: WMS local_stream_1\r\n" "m=audio 9 RTP/SAVPF 111 103 104\r\n" "c=IN IP4 0.0.0.0\r\n" @@ -261,22 +261,6 @@ static const char kSdpString[] = "a=ssrc:3 mslabel:local_stream_1\r\n" "a=ssrc:3 label:video_track_id_1\r\n"; -static const char kSdpRtpDataChannelString[] = - "m=application 9 RTP/SAVPF 101\r\n" - "c=IN IP4 0.0.0.0\r\n" - "a=rtcp:9 IN IP4 0.0.0.0\r\n" - "a=ice-ufrag:ufrag_data\r\n" - "a=ice-pwd:pwd_data\r\n" - "a=mid:data_content_name\r\n" - "a=sendrecv\r\n" - "a=crypto:1 AES_CM_128_HMAC_SHA1_80 " - "inline:FvLcvU2P3ZWmQxgPAgcDu7Zl9vftYElFOjEzhWs5\r\n" - "a=rtpmap:101 google-data/90000\r\n" - "a=ssrc:10 cname:data_channel_cname\r\n" - "a=ssrc:10 msid:data_channel data_channeld0\r\n" - "a=ssrc:10 mslabel:data_channel\r\n" - "a=ssrc:10 label:data_channeld0\r\n"; - // draft-ietf-mmusic-sctp-sdp-03 static const char kSdpSctpDataChannelString[] = "m=application 9 UDP/DTLS/SCTP 5000\r\n" @@ -373,6 +357,7 @@ static const char kBundleOnlySdpFullString[] = "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" "s=-\r\n" "t=0 0\r\n" + "a=extmap-allow-mixed\r\n" "a=group:BUNDLE audio_content_name video_content_name\r\n" "a=msid-semantic: WMS local_stream_1\r\n" "m=audio 2345 RTP/SAVPF 111 103 104\r\n" @@ -433,6 +418,7 @@ static const char kPlanBSdpFullString[] = "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" "s=-\r\n" "t=0 0\r\n" + "a=extmap-allow-mixed\r\n" "a=msid-semantic: WMS local_stream_1 local_stream_2\r\n" "m=audio 2345 RTP/SAVPF 111 103 104\r\n" "c=IN IP4 74.125.127.126\r\n" @@ -516,6 +502,7 @@ static const char kUnifiedPlanSdpFullString[] = "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" "s=-\r\n" "t=0 0\r\n" + "a=extmap-allow-mixed\r\n" "a=msid-semantic: WMS local_stream_1\r\n" // Audio track 1, stream 1 (with candidates). "m=audio 2345 RTP/SAVPF 111 103 104\r\n" @@ -628,6 +615,7 @@ static const char kUnifiedPlanSdpFullStringWithSpecialMsid[] = "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" "s=-\r\n" "t=0 0\r\n" + "a=extmap-allow-mixed\r\n" "a=msid-semantic: WMS local_stream_1\r\n" // Audio track 1, with 1 stream id. "m=audio 2345 RTP/SAVPF 111 103 104\r\n" @@ -900,12 +888,6 @@ static const uint32_t kVideoTrack3Ssrc = 6; static const char kAudioTrackId3[] = "audio_track_id_3"; static const uint32_t kAudioTrack3Ssrc = 7; -// DataChannel -static const char kDataChannelLabel[] = "data_channel"; -static const char kDataChannelMsid[] = "data_channeld0"; -static const char kDataChannelCname[] = "data_channel_cname"; -static const uint32_t kDataChannelSsrc = 10; - // Candidate static const char kDummyMid[] = "dummy_mid"; static const int kDummyIndex = 123; @@ -938,15 +920,16 @@ static void Replace(const std::string& line, absl::StrReplaceAll({{line, newlines}}, message); } -// Expect fail to parase |bad_sdp| and expect |bad_part| be part of the error -// message. +// Expect a parse failure on the line containing |bad_part| when attempting to +// parse |bad_sdp|. static void ExpectParseFailure(const std::string& bad_sdp, const std::string& bad_part) { JsepSessionDescription desc(kDummyType); SdpParseError error; bool ret = webrtc::SdpDeserialize(bad_sdp, &desc, &error); - EXPECT_FALSE(ret); - EXPECT_NE(std::string::npos, error.line.find(bad_part.c_str())); + ASSERT_FALSE(ret); + EXPECT_NE(std::string::npos, error.line.find(bad_part.c_str())) + << "Did not find " << bad_part << " in " << error.line; } // Expect fail to parse kSdpFullString if replace |good_part| with |bad_part|. @@ -1293,8 +1276,7 @@ class WebRtcSdpTest : public ::testing::Test { "inline:NzB4d1BINUAvLEw6UzF3WSJ+PSdFcGdUJShpX1Zj|2^20|1:32", "dummy_session_params")); audio->set_protocol(cricket::kMediaProtocolSavpf); - AudioCodec opus(111, "opus", 48000, 0, 2); - audio->AddCodec(opus); + audio->AddCodec(AudioCodec(111, "opus", 48000, 0, 2)); audio->AddCodec(AudioCodec(103, "ISAC", 16000, 0, 1)); audio->AddCodec(AudioCodec(104, "ISAC", 32000, 0, 1)); return audio; @@ -1460,11 +1442,6 @@ class WebRtcSdpTest : public ::testing::Test { simulcast2.receive_layers().size()); } - void CompareRtpDataContentDescription(const RtpDataContentDescription* dcd1, - const RtpDataContentDescription* dcd2) { - CompareMediaContentDescription(dcd1, dcd2); - } - void CompareSctpDataContentDescription( const SctpDataContentDescription* dcd1, const SctpDataContentDescription* dcd2) { @@ -1515,21 +1492,11 @@ class WebRtcSdpTest : public ::testing::Test { const SctpDataContentDescription* scd2 = c2.media_description()->as_sctp(); CompareSctpDataContentDescription(scd1, scd2); - } else { - if (IsDataContent(&c1)) { - const RtpDataContentDescription* dcd1 = - c1.media_description()->as_rtp_data(); - const RtpDataContentDescription* dcd2 = - c2.media_description()->as_rtp_data(); - CompareRtpDataContentDescription(dcd1, dcd2); - } } CompareSimulcastDescription( c1.media_description()->simulcast_description(), c2.media_description()->simulcast_description()); - EXPECT_EQ(c1.media_description()->alt_protocol(), - c2.media_description()->alt_protocol()); } // group @@ -1584,8 +1551,6 @@ class WebRtcSdpTest : public ::testing::Test { } EXPECT_EQ(transport1.description.transport_options, transport2.description.transport_options); - EXPECT_EQ(transport1.description.opaque_parameters, - transport2.description.opaque_parameters); } // global attributes @@ -1679,23 +1644,6 @@ class WebRtcSdpTest : public ::testing::Test { desc_.AddTransportInfo(transport_info); } - void AddOpaqueTransportParameters(const std::string& content_name, - cricket::OpaqueTransportParameters params) { - ASSERT_TRUE(desc_.GetTransportInfoByName(content_name) != NULL); - cricket::TransportInfo info = *(desc_.GetTransportInfoByName(content_name)); - desc_.RemoveTransportInfoByName(content_name); - info.description.opaque_parameters = params; - desc_.AddTransportInfo(info); - } - - void AddAltProtocol(const std::string& content_name, - const std::string& alt_protocol) { - ASSERT_TRUE(desc_.GetTransportInfoByName(content_name) != NULL); - cricket::MediaContentDescription* description = - desc_.GetContentDescriptionByName(content_name); - description->set_alt_protocol(alt_protocol); - } - void AddFingerprint() { desc_.RemoveTransportInfoByName(kAudioContentName); desc_.RemoveTransportInfoByName(kVideoContentName); @@ -1831,28 +1779,6 @@ class WebRtcSdpTest : public ::testing::Test { kDataContentName, TransportDescription(kUfragData, kPwdData))); } - void AddRtpDataChannel() { - std::unique_ptr data( - new RtpDataContentDescription()); - data_desc_ = data.get(); - - data_desc_->AddCodec(DataCodec(101, "google-data")); - StreamParams data_stream; - data_stream.id = kDataChannelMsid; - data_stream.cname = kDataChannelCname; - data_stream.set_stream_ids({kDataChannelLabel}); - data_stream.ssrcs.push_back(kDataChannelSsrc); - data_desc_->AddStream(data_stream); - data_desc_->AddCrypto( - CryptoParams(1, "AES_CM_128_HMAC_SHA1_80", - "inline:FvLcvU2P3ZWmQxgPAgcDu7Zl9vftYElFOjEzhWs5", "")); - data_desc_->set_protocol(cricket::kMediaProtocolSavpf); - desc_.AddContent(kDataContentName, MediaProtocolType::kRtp, - std::move(data)); - desc_.AddTransportInfo(TransportInfo( - kDataContentName, TransportDescription(kUfragData, kPwdData))); - } - bool TestDeserializeDirection(RtpTransceiverDirection direction) { std::string new_sdp = kSdpFullString; ReplaceDirection(direction, &new_sdp); @@ -1955,13 +1881,14 @@ class WebRtcSdpTest : public ::testing::Test { // description. "a=msid-semantic: WMS\r\n" // Pl type 111 preferred. - "m=audio 9 RTP/SAVPF 111 104 103\r\n" + "m=audio 9 RTP/SAVPF 111 104 103 105\r\n" // Pltype 111 listed before 103 and 104 in the map. "a=rtpmap:111 opus/48000/2\r\n" // Pltype 103 listed before 104. "a=rtpmap:103 ISAC/16000\r\n" "a=rtpmap:104 ISAC/32000\r\n" - "a=fmtp:111 0-15,66,70\r\n" + "a=rtpmap:105 telephone-event/8000\r\n" + "a=fmtp:105 0-15,66,70\r\n" "a=fmtp:111 "; std::ostringstream os; os << "minptime=" << params.min_ptime << "; stereo=" << params.stereo @@ -1979,7 +1906,8 @@ class WebRtcSdpTest : public ::testing::Test { os.clear(); os.str(""); // Pl type 100 preferred. - os << "m=video 9 RTP/SAVPF 99 95\r\n" + os << "m=video 9 RTP/SAVPF 99 95 96\r\n" + "a=rtpmap:96 VP9/90000\r\n" // out-of-order wrt the m= line. "a=rtpmap:99 VP8/90000\r\n" "a=rtpmap:95 RTX/90000\r\n" "a=fmtp:95 apt=99;\r\n"; @@ -2008,6 +1936,14 @@ class WebRtcSdpTest : public ::testing::Test { VerifyCodecParameter(codec.params, "maxptime", params.max_ptime); } + cricket::AudioCodec dtmf = acd->codecs()[3]; + EXPECT_EQ("telephone-event", dtmf.name); + EXPECT_EQ(105, dtmf.id); + EXPECT_EQ(3u, + dtmf.params.size()); // ptime and max_ptime count as parameters. + EXPECT_EQ(dtmf.params.begin()->first, ""); + EXPECT_EQ(dtmf.params.begin()->second, "0-15,66,70"); + const VideoContentDescription* vcd = GetFirstVideoContentDescription(jdesc_output->description()); ASSERT_TRUE(vcd); @@ -2019,6 +1955,10 @@ class WebRtcSdpTest : public ::testing::Test { EXPECT_EQ("RTX", rtx.name); EXPECT_EQ(95, rtx.id); VerifyCodecParameter(rtx.params, "apt", vp8.id); + // VP9 is listed last in the m= line so should come after VP8 and RTX. + cricket::VideoCodec vp9 = vcd->codecs()[2]; + EXPECT_EQ("VP9", vp9.name); + EXPECT_EQ(96, vp9.id); } void TestDeserializeRtcpFb(JsepSessionDescription* jdesc_output, @@ -2109,7 +2049,6 @@ class WebRtcSdpTest : public ::testing::Test { SessionDescription desc_; AudioContentDescription* audio_desc_; VideoContentDescription* video_desc_; - RtpDataContentDescription* data_desc_; SctpDataContentDescription* sctp_desc_; Candidates candidates_; std::unique_ptr jcandidate_; @@ -2185,33 +2124,52 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithoutCandidates) { EXPECT_EQ(std::string(kSdpString), message); } -TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBundle) { - ContentGroup group(cricket::GROUP_TYPE_BUNDLE); - group.AddContentName(kAudioContentName); - group.AddContentName(kVideoContentName); - desc_.AddGroup(group); +TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBundles) { + ContentGroup group1(cricket::GROUP_TYPE_BUNDLE); + group1.AddContentName(kAudioContentName); + group1.AddContentName(kVideoContentName); + desc_.AddGroup(group1); + ContentGroup group2(cricket::GROUP_TYPE_BUNDLE); + group2.AddContentName(kAudioContentName2); + desc_.AddGroup(group2); ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), jdesc_.session_version())); std::string message = webrtc::SdpSerialize(jdesc_); std::string sdp_with_bundle = kSdpFullString; InjectAfter(kSessionTime, - "a=group:BUNDLE audio_content_name video_content_name\r\n", + "a=group:BUNDLE audio_content_name video_content_name\r\n" + "a=group:BUNDLE audio_content_name_2\r\n", &sdp_with_bundle); EXPECT_EQ(sdp_with_bundle, message); } TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithBandwidth) { VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_); - vcd->set_bandwidth(100 * 1000); + vcd->set_bandwidth(100 * 1000 + 755); // Integer division will drop the 755. + vcd->set_bandwidth_type("AS"); AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_); - acd->set_bandwidth(50 * 1000); + acd->set_bandwidth(555); + acd->set_bandwidth_type("TIAS"); ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), jdesc_.session_version())); std::string message = webrtc::SdpSerialize(jdesc_); std::string sdp_with_bandwidth = kSdpFullString; InjectAfter("c=IN IP4 74.125.224.39\r\n", "b=AS:100\r\n", &sdp_with_bandwidth); - InjectAfter("c=IN IP4 74.125.127.126\r\n", "b=AS:50\r\n", + InjectAfter("c=IN IP4 74.125.127.126\r\n", "b=TIAS:555\r\n", + &sdp_with_bandwidth); + EXPECT_EQ(sdp_with_bandwidth, message); +} + +// Should default to b=AS if bandwidth_type isn't set. +TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithMissingBandwidthType) { + VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_); + vcd->set_bandwidth(100 * 1000); + ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), + jdesc_.session_version())); + std::string message = webrtc::SdpSerialize(jdesc_); + std::string sdp_with_bandwidth = kSdpFullString; + InjectAfter("c=IN IP4 74.125.224.39\r\n", "b=AS:100\r\n", &sdp_with_bandwidth); EXPECT_EQ(sdp_with_bandwidth, message); } @@ -2236,41 +2194,6 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithIceOptions) { EXPECT_EQ(sdp_with_ice_options, message); } -TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithOpaqueTransportParams) { - cricket::OpaqueTransportParameters params; - params.protocol = "foo"; - params.parameters = "test64"; - AddOpaqueTransportParameters(kAudioContentName, params); - AddOpaqueTransportParameters(kVideoContentName, params); - - ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), - jdesc_.session_version())); - std::string message = webrtc::SdpSerialize(jdesc_); - - std::string sdp_with_transport_parameters = kSdpFullString; - InjectAfter(kAttributeIcePwdVoice, "a=x-opaque:foo:dGVzdDY0\r\n", - &sdp_with_transport_parameters); - InjectAfter(kAttributeIcePwdVideo, "a=x-opaque:foo:dGVzdDY0\r\n", - &sdp_with_transport_parameters); - EXPECT_EQ(message, sdp_with_transport_parameters); -} - -TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithAltProtocol) { - AddAltProtocol(kAudioContentName, "foo"); - AddAltProtocol(kVideoContentName, "bar"); - - ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), - jdesc_.session_version())); - std::string message = webrtc::SdpSerialize(jdesc_); - - std::string sdp_with_alt_protocol = kSdpFullString; - InjectAfter(kAttributeIcePwdVoice, "a=x-alt-protocol:foo\r\n", - &sdp_with_alt_protocol); - InjectAfter(kAttributeIcePwdVideo, "a=x-alt-protocol:bar\r\n", - &sdp_with_alt_protocol); - EXPECT_EQ(message, sdp_with_alt_protocol); -} - TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithRecvOnlyContent) { EXPECT_TRUE(TestSerializeDirection(RtpTransceiverDirection::kRecvOnly)); } @@ -2295,18 +2218,6 @@ TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithAudioVideoRejected) { EXPECT_TRUE(TestSerializeRejected(true, true)); } -TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithRtpDataChannel) { - AddRtpDataChannel(); - JsepSessionDescription jsep_desc(kDummyType); - - MakeDescriptionWithoutCandidates(&jsep_desc); - std::string message = webrtc::SdpSerialize(jsep_desc); - - std::string expected_sdp = kSdpString; - expected_sdp.append(kSdpRtpDataChannelString); - EXPECT_EQ(expected_sdp, message); -} - TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithSctpDataChannel) { bool use_sctpmap = true; AddSctpDataChannel(use_sctpmap); @@ -2353,21 +2264,6 @@ TEST_F(WebRtcSdpTest, SerializeWithSctpDataChannelAndNewPort) { EXPECT_EQ(expected_sdp, message); } -TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithDataChannelAndBandwidth) { - JsepSessionDescription jsep_desc(kDummyType); - AddRtpDataChannel(); - data_desc_->set_bandwidth(100 * 1000); - MakeDescriptionWithoutCandidates(&jsep_desc); - std::string message = webrtc::SdpSerialize(jsep_desc); - - std::string expected_sdp = kSdpString; - expected_sdp.append(kSdpRtpDataChannelString); - // Serializing data content shouldn't ignore bandwidth settings. - InjectAfter("m=application 9 RTP/SAVPF 101\r\nc=IN IP4 0.0.0.0\r\n", - "b=AS:100\r\n", &expected_sdp); - EXPECT_EQ(expected_sdp, message); -} - TEST_F(WebRtcSdpTest, SerializeSessionDescriptionWithExtmapAllowMixed) { jdesc_.description()->set_extmap_allow_mixed(true); TestSerialize(jdesc_); @@ -2445,8 +2341,6 @@ TEST_F(WebRtcSdpTest, SerializeHostnameCandidate) { EXPECT_EQ(std::string(kRawHostnameCandidate), message); } -// TODO(mallinath) : Enable this test once WebRTCSdp capable of parsing -// RFC 6544. TEST_F(WebRtcSdpTest, SerializeTcpCandidates) { Candidate candidate(ICE_CANDIDATE_COMPONENT_RTP, "tcp", rtc::SocketAddress("192.168.1.5", 9), kCandidatePriority, @@ -2662,6 +2556,41 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithBandwidth) { EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_bandwidth)); } +TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithTiasBandwidth) { + JsepSessionDescription jdesc_with_bandwidth(kDummyType); + std::string sdp_with_bandwidth = kSdpFullString; + InjectAfter("a=mid:video_content_name\r\na=sendrecv\r\n", "b=TIAS:100000\r\n", + &sdp_with_bandwidth); + InjectAfter("a=mid:audio_content_name\r\na=sendrecv\r\n", "b=TIAS:50000\r\n", + &sdp_with_bandwidth); + EXPECT_TRUE(SdpDeserialize(sdp_with_bandwidth, &jdesc_with_bandwidth)); + VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_); + vcd->set_bandwidth(100 * 1000); + AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_); + acd->set_bandwidth(50 * 1000); + ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), + jdesc_.session_version())); + EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_bandwidth)); +} + +TEST_F(WebRtcSdpTest, + DeserializeSessionDescriptionWithUnknownBandwidthModifier) { + JsepSessionDescription jdesc_with_bandwidth(kDummyType); + std::string sdp_with_bandwidth = kSdpFullString; + InjectAfter("a=mid:video_content_name\r\na=sendrecv\r\n", + "b=unknown:100000\r\n", &sdp_with_bandwidth); + InjectAfter("a=mid:audio_content_name\r\na=sendrecv\r\n", + "b=unknown:50000\r\n", &sdp_with_bandwidth); + EXPECT_TRUE(SdpDeserialize(sdp_with_bandwidth, &jdesc_with_bandwidth)); + VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_); + vcd->set_bandwidth(-1); + AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_); + acd->set_bandwidth(-1); + ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), + jdesc_.session_version())); + EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_bandwidth)); +} + TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithIceOptions) { JsepSessionDescription jdesc_with_ice_options(kDummyType); std::string sdp_with_ice_options = kSdpFullString; @@ -2685,48 +2614,6 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithIceOptions) { EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_ice_options)); } -TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithOpaqueTransportParams) { - std::string sdp_with_transport_parameters = kSdpFullString; - InjectAfter(kAttributeIcePwdVoice, "a=x-opaque:foo:dGVzdDY0\r\n", - &sdp_with_transport_parameters); - InjectAfter(kAttributeIcePwdVideo, "a=x-opaque:foo:dGVzdDY0\r\n", - &sdp_with_transport_parameters); - - JsepSessionDescription jdesc_with_transport_parameters(kDummyType); - EXPECT_TRUE(SdpDeserialize(sdp_with_transport_parameters, - &jdesc_with_transport_parameters)); - - cricket::OpaqueTransportParameters params; - params.protocol = "foo"; - params.parameters = "test64"; - - AddOpaqueTransportParameters(kAudioContentName, params); - AddOpaqueTransportParameters(kVideoContentName, params); - - ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), - jdesc_.session_version())); - EXPECT_TRUE( - CompareSessionDescription(jdesc_, jdesc_with_transport_parameters)); -} - -TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithAltProtocol) { - std::string sdp_with_alt_protocol = kSdpFullString; - InjectAfter(kAttributeIcePwdVoice, "a=x-alt-protocol:foo\r\n", - &sdp_with_alt_protocol); - InjectAfter(kAttributeIcePwdVideo, "a=x-alt-protocol:bar\r\n", - &sdp_with_alt_protocol); - - JsepSessionDescription jdesc_with_alt_protocol(kDummyType); - EXPECT_TRUE(SdpDeserialize(sdp_with_alt_protocol, &jdesc_with_alt_protocol)); - - AddAltProtocol(kAudioContentName, "foo"); - AddAltProtocol(kVideoContentName, "bar"); - - ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), - jdesc_.session_version())); - EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_with_alt_protocol)); -} - TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithUfragPwd) { // Remove the original ice-ufrag and ice-pwd JsepSessionDescription jdesc_with_ufrag_pwd(kDummyType); @@ -2793,10 +2680,9 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutMsid) { TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithExtmapAllowMixed) { jdesc_.description()->set_extmap_allow_mixed(true); std::string sdp_with_extmap_allow_mixed = kSdpFullString; - InjectAfter("t=0 0\r\n", kExtmapAllowMixed, &sdp_with_extmap_allow_mixed); // Deserialize JsepSessionDescription jdesc_deserialized(kDummyType); - EXPECT_TRUE(SdpDeserialize(sdp_with_extmap_allow_mixed, &jdesc_deserialized)); + ASSERT_TRUE(SdpDeserialize(sdp_with_extmap_allow_mixed, &jdesc_deserialized)); // Verify EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_deserialized)); } @@ -2804,9 +2690,10 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithExtmapAllowMixed) { TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutExtmapAllowMixed) { jdesc_.description()->set_extmap_allow_mixed(false); std::string sdp_without_extmap_allow_mixed = kSdpFullString; + Replace(kExtmapAllowMixed, "", &sdp_without_extmap_allow_mixed); // Deserialize JsepSessionDescription jdesc_deserialized(kDummyType); - EXPECT_TRUE( + ASSERT_TRUE( SdpDeserialize(sdp_without_extmap_allow_mixed, &jdesc_deserialized)); // Verify EXPECT_TRUE(CompareSessionDescription(jdesc_, jdesc_deserialized)); @@ -2947,21 +2834,6 @@ TEST_F(WebRtcSdpTest, DeserializeInvalidCandidiate) { EXPECT_FALSE(SdpDeserializeCandidate(kSdpTcpInvalidCandidate, &jcandidate)); } -TEST_F(WebRtcSdpTest, DeserializeSdpWithRtpDataChannels) { - AddRtpDataChannel(); - JsepSessionDescription jdesc(kDummyType); - ASSERT_TRUE(jdesc.Initialize(desc_.Clone(), kSessionId, kSessionVersion)); - - std::string sdp_with_data = kSdpString; - sdp_with_data.append(kSdpRtpDataChannelString); - JsepSessionDescription jdesc_output(kDummyType); - - // Deserialize - EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output)); - // Verify - EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output)); -} - TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannels) { bool use_sctpmap = true; AddSctpDataChannel(use_sctpmap); @@ -3017,6 +2889,25 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannelsWithSctpColonPort) { EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output)); } +TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannelsButWrongMediaType) { + bool use_sctpmap = true; + AddSctpDataChannel(use_sctpmap); + JsepSessionDescription jdesc(kDummyType); + ASSERT_TRUE(jdesc.Initialize(desc_.Clone(), kSessionId, kSessionVersion)); + + std::string sdp = kSdpSessionString; + sdp += kSdpSctpDataChannelString; + + const char needle[] = "m=application "; + sdp.replace(sdp.find(needle), strlen(needle), "m=application:bogus "); + + JsepSessionDescription jdesc_output(kDummyType); + EXPECT_TRUE(SdpDeserialize(sdp, &jdesc_output)); + + EXPECT_EQ(1u, jdesc_output.description()->contents().size()); + EXPECT_TRUE(jdesc_output.description()->contents()[0].rejected); +} + // Helper function to set the max-message-size parameter in the // SCTP data codec. void MutateJsepSctpMaxMessageSize(const SessionDescription& desc, @@ -3103,8 +2994,9 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithRtpmapAttribute) { } TEST_F(WebRtcSdpTest, DeserializeSdpWithStrangeApplicationProtocolNames) { - static const char* bad_strings[] = {"DTLS/SCTPRTP/", "obviously-bogus", - "UDP/TL/RTSP/SAVPF", "UDP/TL/RTSP/S"}; + static const char* bad_strings[] = { + "DTLS/SCTPRTP/", "obviously-bogus", "UDP/TL/RTSP/SAVPF", + "UDP/TL/RTSP/S", "DTLS/SCTP/RTP/FOO", "obviously-bogus/RTP/"}; for (auto proto : bad_strings) { std::string sdp_with_data = kSdpString; sdp_with_data.append("m=application 9 "); @@ -3114,18 +3006,6 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithStrangeApplicationProtocolNames) { EXPECT_FALSE(SdpDeserialize(sdp_with_data, &jdesc_output)) << "Parsing should have failed on " << proto; } - // The following strings are strange, but acceptable as RTP. - static const char* weird_strings[] = {"DTLS/SCTP/RTP/FOO", - "obviously-bogus/RTP/"}; - for (auto proto : weird_strings) { - std::string sdp_with_data = kSdpString; - sdp_with_data.append("m=application 9 "); - sdp_with_data.append(proto); - sdp_with_data.append(" 47\r\n"); - JsepSessionDescription jdesc_output(kDummyType); - EXPECT_TRUE(SdpDeserialize(sdp_with_data, &jdesc_output)) - << "Parsing should have succeeded on " << proto; - } } // For crbug/344475. @@ -3183,21 +3063,6 @@ TEST_F(WebRtcSdpTest, EXPECT_TRUE(CompareSessionDescription(jdesc, jdesc_output)); } -TEST_F(WebRtcSdpTest, DeserializeSdpWithRtpDataChannelsAndBandwidth) { - // We want to test that deserializing data content limits bandwidth - // settings (it should never be greater than the default). - // This should prevent someone from using unlimited data bandwidth through - // JS and "breaking the Internet". - // See: https://code.google.com/p/chromium/issues/detail?id=280726 - std::string sdp_with_bandwidth = kSdpString; - sdp_with_bandwidth.append(kSdpRtpDataChannelString); - InjectAfter("a=mid:data_content_name\r\n", "b=AS:100\r\n", - &sdp_with_bandwidth); - JsepSessionDescription jdesc_with_bandwidth(kDummyType); - - EXPECT_FALSE(SdpDeserialize(sdp_with_bandwidth, &jdesc_with_bandwidth)); -} - TEST_F(WebRtcSdpTest, DeserializeSdpWithSctpDataChannelsAndBandwidth) { bool use_sctpmap = true; AddSctpDataChannel(use_sctpmap); @@ -3365,6 +3230,7 @@ TEST_F(WebRtcSdpTest, DeserializeBrokenSdp) { // Broken media description ExpectParseFailure("m=audio", "c=IN IP4 74.125.224.39"); ExpectParseFailure("m=video", kSdpDestroyer); + ExpectParseFailure("m=", "c=IN IP4 74.125.224.39"); // Invalid lines ExpectParseFailure("a=candidate", kSdpEmptyType); @@ -3409,6 +3275,13 @@ TEST_F(WebRtcSdpTest, DeserializeSdpWithInvalidAttributeValue) { // bandwidth ExpectParseFailureWithNewLines("a=mid:video_content_name\r\n", "b=AS:badvalue\r\n", "b=AS:badvalue"); + ExpectParseFailureWithNewLines("a=mid:video_content_name\r\n", "b=AS\r\n", + "b=AS"); + ExpectParseFailureWithNewLines("a=mid:video_content_name\r\n", "b=AS:\r\n", + "b=AS:"); + ExpectParseFailureWithNewLines("a=mid:video_content_name\r\n", + "b=AS:12:34\r\n", "b=AS:12:34"); + // rtcp-fb ExpectParseFailureWithNewLines("a=mid:video_content_name\r\n", "a=rtcp-fb:badvalue nack\r\n", @@ -3673,6 +3546,28 @@ TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithPTimeAndMaxPTime) { EXPECT_EQ(sdp_with_fmtp, message); } +TEST_F(WebRtcSdpTest, SerializeAudioFmtpWithTelephoneEvent) { + AudioContentDescription* acd = GetFirstAudioContentDescription(&desc_); + + cricket::AudioCodecs codecs = acd->codecs(); + cricket::AudioCodec dtmf(105, "telephone-event", 8000, 0, 1); + dtmf.params[""] = "0-15"; + codecs.push_back(dtmf); + acd->set_codecs(codecs); + + ASSERT_TRUE(jdesc_.Initialize(desc_.Clone(), jdesc_.session_id(), + jdesc_.session_version())); + std::string message = webrtc::SdpSerialize(jdesc_); + std::string sdp_with_fmtp = kSdpFullString; + InjectAfter("m=audio 2345 RTP/SAVPF 111 103 104", " 105", &sdp_with_fmtp); + InjectAfter( + "a=rtpmap:104 ISAC/32000\r\n", + "a=rtpmap:105 telephone-event/8000\r\n" // No comma here. String merging! + "a=fmtp:105 0-15\r\n", + &sdp_with_fmtp); + EXPECT_EQ(sdp_with_fmtp, message); +} + TEST_F(WebRtcSdpTest, SerializeVideoFmtp) { VideoContentDescription* vcd = GetFirstVideoContentDescription(&desc_); @@ -4044,24 +3939,6 @@ TEST_F(WebRtcSdpTest, SerializeBothMediaSectionAndSsrcAttributeMsid) { EXPECT_NE(std::string::npos, sdp.find(kSsrcAttributeMsidLine)); } -// Regression test for heap overflow bug: -// https://bugs.chromium.org/p/chromium/issues/detail?id=647916 -TEST_F(WebRtcSdpTest, DeserializeSctpPortInVideoDescription) { - // The issue occurs when the sctp-port attribute is found in a video - // description. The actual heap overflow occurs when parsing the fmtp line. - static const char kSdpWithSctpPortInVideoDescription[] = - "v=0\r\n" - "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" - "s=-\r\n" - "t=0 0\r\n" - "m=video 9 UDP/DTLS/SCTP 120\r\n" - "a=sctp-port 5000\r\n" - "a=fmtp:108 foo=10\r\n"; - - ExpectParseFailure(std::string(kSdpWithSctpPortInVideoDescription), - "sctp-port"); -} - // Regression test for integer overflow bug: // https://bugs.chromium.org/p/chromium/issues/detail?id=648071 TEST_F(WebRtcSdpTest, DeserializeLargeBandwidthLimit) { @@ -4716,3 +4593,73 @@ TEST_F(WebRtcSdpTest, DeserializeSessionDescriptionWithoutCname) { jdesc_.session_version())); EXPECT_TRUE(CompareSessionDescription(jdesc_, new_jdesc)); } + +TEST_F(WebRtcSdpTest, DeserializeSdpWithUnsupportedMediaType) { + std::string sdp = kSdpSessionString; + sdp += + "m=bogus 9 RTP/SAVPF 0 8\r\n" + "c=IN IP4 0.0.0.0\r\n" + "a=mid:bogusmid\r\n"; + sdp += + "m=audio/something 9 RTP/SAVPF 0 8\r\n" + "c=IN IP4 0.0.0.0\r\n" + "a=mid:somethingmid\r\n"; + + JsepSessionDescription jdesc_output(kDummyType); + EXPECT_TRUE(SdpDeserialize(sdp, &jdesc_output)); + + ASSERT_EQ(2u, jdesc_output.description()->contents().size()); + ASSERT_NE(nullptr, jdesc_output.description() + ->contents()[0] + .media_description() + ->as_unsupported()); + ASSERT_NE(nullptr, jdesc_output.description() + ->contents()[1] + .media_description() + ->as_unsupported()); + + EXPECT_TRUE(jdesc_output.description()->contents()[0].rejected); + EXPECT_TRUE(jdesc_output.description()->contents()[1].rejected); + + EXPECT_EQ(jdesc_output.description()->contents()[0].name, "bogusmid"); + EXPECT_EQ(jdesc_output.description()->contents()[1].name, "somethingmid"); +} + +TEST_F(WebRtcSdpTest, MediaTypeProtocolMismatch) { + std::string sdp = + "v=0\r\n" + "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" + "s=-\r\n" + "t=0 0\r\n"; + + ExpectParseFailure(std::string(sdp + "m=audio 9 UDP/DTLS/SCTP 120\r\n"), + "m=audio"); + ExpectParseFailure(std::string(sdp + "m=video 9 UDP/DTLS/SCTP 120\r\n"), + "m=video"); + ExpectParseFailure(std::string(sdp + "m=video 9 SOMETHING 120\r\n"), + "m=video"); + ExpectParseFailure(std::string(sdp + "m=application 9 SOMETHING 120\r\n"), + "m=application"); +} + +// Regression test for: +// https://bugs.chromium.org/p/chromium/issues/detail?id=1171965 +TEST_F(WebRtcSdpTest, SctpPortInUnsupportedContent) { + std::string sdp = + "v=0\r\n" + "o=- 18446744069414584320 18446462598732840960 IN IP4 127.0.0.1\r\n" + "s=-\r\n" + "t=0 0\r\n" + "m=o 1 DTLS/SCTP 5000\r\n" + "a=sctp-port\r\n"; + + JsepSessionDescription jdesc_output(kDummyType); + EXPECT_TRUE(SdpDeserialize(sdp, &jdesc_output)); +} + +TEST_F(WebRtcSdpTest, IllegalMidCharacterValue) { + std::string sdp = kSdpString; + // [ is an illegal token value. + Replace("a=mid:", "a=mid:[]", &sdp); + ExpectParseFailure(std::string(sdp), "a=mid:[]"); +} diff --git a/pc/webrtc_session_description_factory.cc b/pc/webrtc_session_description_factory.cc index aaef7fdeb6..33826347ff 100644 --- a/pc/webrtc_session_description_factory.cc +++ b/pc/webrtc_session_description_factory.cc @@ -11,9 +11,10 @@ #include "pc/webrtc_session_description_factory.h" #include - +#include #include #include +#include #include #include @@ -22,6 +23,7 @@ #include "api/jsep.h" #include "api/jsep_session_description.h" #include "api/rtc_error.h" +#include "pc/sdp_state_provider.h" #include "pc/session_description.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" @@ -125,11 +127,14 @@ void WebRtcSessionDescriptionFactory::CopyCandidatesFromSessionDescription( WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory( rtc::Thread* signaling_thread, cricket::ChannelManager* channel_manager, - PeerConnectionInternal* pc, + const SdpStateProvider* sdp_info, const std::string& session_id, + bool dtls_enabled, std::unique_ptr cert_generator, const rtc::scoped_refptr& certificate, - UniqueRandomIdGenerator* ssrc_generator) + UniqueRandomIdGenerator* ssrc_generator, + std::function&)> + on_certificate_ready) : signaling_thread_(signaling_thread), session_desc_factory_(channel_manager, &transport_desc_factory_, @@ -139,20 +144,21 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory( // to just use a random number as session id and start version from // |kInitSessionVersion|. session_version_(kInitSessionVersion), - cert_generator_(std::move(cert_generator)), - pc_(pc), + cert_generator_(dtls_enabled ? std::move(cert_generator) : nullptr), + sdp_info_(sdp_info), session_id_(session_id), - certificate_request_state_(CERTIFICATE_NOT_NEEDED) { + certificate_request_state_(CERTIFICATE_NOT_NEEDED), + on_certificate_ready_(on_certificate_ready) { RTC_DCHECK(signaling_thread_); - RTC_DCHECK(!(cert_generator_ && certificate)); - bool dtls_enabled = cert_generator_ || certificate; - // SRTP-SDES is disabled if DTLS is on. - SetSdesPolicy(dtls_enabled ? cricket::SEC_DISABLED : cricket::SEC_REQUIRED); + if (!dtls_enabled) { + SetSdesPolicy(cricket::SEC_REQUIRED); RTC_LOG(LS_VERBOSE) << "DTLS-SRTP disabled."; return; } + // SRTP-SDES is disabled if DTLS is on. + SetSdesPolicy(cricket::SEC_DISABLED); if (certificate) { // Use |certificate|. certificate_request_state_ = CERTIFICATE_WAITING; @@ -168,8 +174,7 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory( // Generate certificate. certificate_request_state_ = CERTIFICATE_WAITING; - rtc::scoped_refptr callback( - new rtc::RefCountedObject()); + auto callback = rtc::make_ref_counted(); callback->SignalRequestFailed.connect( this, &WebRtcSessionDescriptionFactory::OnCertificateRequestFailed); callback->SignalCertificateReady.connect( @@ -188,7 +193,7 @@ WebRtcSessionDescriptionFactory::WebRtcSessionDescriptionFactory( } WebRtcSessionDescriptionFactory::~WebRtcSessionDescriptionFactory() { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); // Fail any requests that were asked for before identity generation completed. FailPendingRequests(kFailedDueToSessionShutdown); @@ -216,6 +221,7 @@ void WebRtcSessionDescriptionFactory::CreateOffer( CreateSessionDescriptionObserver* observer, const PeerConnectionInterface::RTCOfferAnswerOptions& options, const cricket::MediaSessionOptions& session_options) { + RTC_DCHECK_RUN_ON(signaling_thread_); std::string error = "CreateOffer"; if (certificate_request_state_ == CERTIFICATE_FAILED) { error += kFailedDueToIdentityFailed; @@ -252,13 +258,13 @@ void WebRtcSessionDescriptionFactory::CreateAnswer( PostCreateSessionDescriptionFailed(observer, error); return; } - if (!pc_->remote_description()) { + if (!sdp_info_->remote_description()) { error += " can't be called before SetRemoteDescription."; RTC_LOG(LS_ERROR) << error; PostCreateSessionDescriptionFailed(observer, error); return; } - if (pc_->remote_description()->GetType() != SdpType::kOffer) { + if (sdp_info_->remote_description()->GetType() != SdpType::kOffer) { error += " failed because remote_description is not an offer."; RTC_LOG(LS_ERROR) << error; PostCreateSessionDescriptionFailed(observer, error); @@ -325,12 +331,12 @@ void WebRtcSessionDescriptionFactory::OnMessage(rtc::Message* msg) { void WebRtcSessionDescriptionFactory::InternalCreateOffer( CreateSessionDescriptionRequest request) { - if (pc_->local_description()) { + if (sdp_info_->local_description()) { // If the needs-ice-restart flag is set as described by JSEP, we should // generate an offer with a new ufrag/password to trigger an ICE restart. for (cricket::MediaDescriptionOptions& options : request.options.media_description_options) { - if (pc_->NeedsIceRestart(options.mid)) { + if (sdp_info_->NeedsIceRestart(options.mid)) { options.transport_options.ice_restart = true; } } @@ -338,8 +344,8 @@ void WebRtcSessionDescriptionFactory::InternalCreateOffer( std::unique_ptr desc = session_desc_factory_.CreateOffer( - request.options, pc_->local_description() - ? pc_->local_description()->description() + request.options, sdp_info_->local_description() + ? sdp_info_->local_description()->description() : nullptr); if (!desc) { PostCreateSessionDescriptionFailed(request.observer, @@ -360,11 +366,11 @@ void WebRtcSessionDescriptionFactory::InternalCreateOffer( auto offer = std::make_unique( SdpType::kOffer, std::move(desc), session_id_, rtc::ToString(session_version_++)); - if (pc_->local_description()) { + if (sdp_info_->local_description()) { for (const cricket::MediaDescriptionOptions& options : request.options.media_description_options) { if (!options.transport_options.ice_restart) { - CopyCandidatesFromSessionDescription(pc_->local_description(), + CopyCandidatesFromSessionDescription(sdp_info_->local_description(), options.mid, offer.get()); } } @@ -374,31 +380,34 @@ void WebRtcSessionDescriptionFactory::InternalCreateOffer( void WebRtcSessionDescriptionFactory::InternalCreateAnswer( CreateSessionDescriptionRequest request) { - if (pc_->remote_description()) { + if (sdp_info_->remote_description()) { for (cricket::MediaDescriptionOptions& options : request.options.media_description_options) { // According to http://tools.ietf.org/html/rfc5245#section-9.2.1.1 // an answer should also contain new ICE ufrag and password if an offer // has been received with new ufrag and password. options.transport_options.ice_restart = - pc_->IceRestartPending(options.mid); - // We should pass the current SSL role to the transport description + sdp_info_->IceRestartPending(options.mid); + // We should pass the current DTLS role to the transport description // factory, if there is already an existing ongoing session. - rtc::SSLRole ssl_role; - if (pc_->GetSslRole(options.mid, &ssl_role)) { + absl::optional dtls_role = + sdp_info_->GetDtlsRole(options.mid); + if (dtls_role) { options.transport_options.prefer_passive_role = - (rtc::SSL_SERVER == ssl_role); + (rtc::SSL_SERVER == *dtls_role); } } } std::unique_ptr desc = session_desc_factory_.CreateAnswer( - pc_->remote_description() ? pc_->remote_description()->description() - : nullptr, + sdp_info_->remote_description() + ? sdp_info_->remote_description()->description() + : nullptr, request.options, - pc_->local_description() ? pc_->local_description()->description() - : nullptr); + sdp_info_->local_description() + ? sdp_info_->local_description()->description() + : nullptr); if (!desc) { PostCreateSessionDescriptionFailed(request.observer, "Failed to initialize the answer."); @@ -416,13 +425,13 @@ void WebRtcSessionDescriptionFactory::InternalCreateAnswer( auto answer = std::make_unique( SdpType::kAnswer, std::move(desc), session_id_, rtc::ToString(session_version_++)); - if (pc_->local_description()) { + if (sdp_info_->local_description()) { // Include all local ICE candidates in the SessionDescription unless // the remote peer has requested an ICE restart. for (const cricket::MediaDescriptionOptions& options : request.options.media_description_options) { if (!options.transport_options.ice_restart) { - CopyCandidatesFromSessionDescription(pc_->local_description(), + CopyCandidatesFromSessionDescription(sdp_info_->local_description(), options.mid, answer.get()); } } @@ -432,7 +441,7 @@ void WebRtcSessionDescriptionFactory::InternalCreateAnswer( void WebRtcSessionDescriptionFactory::FailPendingRequests( const std::string& reason) { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); while (!create_session_description_requests_.empty()) { const CreateSessionDescriptionRequest& request = create_session_description_requests_.front(); @@ -467,7 +476,7 @@ void WebRtcSessionDescriptionFactory::PostCreateSessionDescriptionSucceeded( } void WebRtcSessionDescriptionFactory::OnCertificateRequestFailed() { - RTC_DCHECK(signaling_thread_->IsCurrent()); + RTC_DCHECK_RUN_ON(signaling_thread_); RTC_LOG(LS_ERROR) << "Asynchronous certificate generation request failed."; certificate_request_state_ = CERTIFICATE_FAILED; @@ -481,7 +490,8 @@ void WebRtcSessionDescriptionFactory::SetCertificate( RTC_LOG(LS_VERBOSE) << "Setting new certificate."; certificate_request_state_ = CERTIFICATE_SUCCEEDED; - SignalCertificateReady(certificate); + + on_certificate_ready_(certificate); transport_desc_factory_.set_certificate(certificate); transport_desc_factory_.set_secure(cricket::SEC_ENABLED); diff --git a/pc/webrtc_session_description_factory.h b/pc/webrtc_session_description_factory.h index f70b847b4e..bd2636c0dd 100644 --- a/pc/webrtc_session_description_factory.h +++ b/pc/webrtc_session_description_factory.h @@ -13,6 +13,7 @@ #include +#include #include #include #include @@ -22,22 +23,23 @@ #include "api/scoped_refptr.h" #include "p2p/base/transport_description.h" #include "p2p/base/transport_description_factory.h" +#include "pc/channel_manager.h" #include "pc/media_session.h" -#include "pc/peer_connection_internal.h" +#include "pc/sdp_state_provider.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/message_handler.h" #include "rtc_base/rtc_certificate.h" #include "rtc_base/rtc_certificate_generator.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/thread.h" +#include "rtc_base/thread_message.h" #include "rtc_base/unique_id_generator.h" namespace webrtc { // DTLS certificate request callback class. class WebRtcCertificateGeneratorCallback - : public rtc::RTCCertificateGeneratorCallback, - public sigslot::has_slots<> { + : public rtc::RTCCertificateGeneratorCallback { public: // |rtc::RTCCertificateGeneratorCallback| overrides. void OnSuccess( @@ -80,11 +82,14 @@ class WebRtcSessionDescriptionFactory : public rtc::MessageHandler, WebRtcSessionDescriptionFactory( rtc::Thread* signaling_thread, cricket::ChannelManager* channel_manager, - PeerConnectionInternal* pc, + const SdpStateProvider* sdp_info, const std::string& session_id, + bool dtls_enabled, std::unique_ptr cert_generator, const rtc::scoped_refptr& certificate, - rtc::UniqueRandomIdGenerator* ssrc_generator); + rtc::UniqueRandomIdGenerator* ssrc_generator, + std::function&)> + on_certificate_ready); virtual ~WebRtcSessionDescriptionFactory(); static void CopyCandidatesFromSessionDescription( @@ -110,9 +115,6 @@ class WebRtcSessionDescriptionFactory : public rtc::MessageHandler, session_desc_factory_.set_is_unified_plan(is_unified_plan); } - sigslot::signal1&> - SignalCertificateReady; - // For testing. bool waiting_for_certificate_for_testing() const { return certificate_request_state_ == CERTIFICATE_WAITING; @@ -151,12 +153,13 @@ class WebRtcSessionDescriptionFactory : public rtc::MessageHandler, cricket::MediaSessionDescriptionFactory session_desc_factory_; uint64_t session_version_; const std::unique_ptr cert_generator_; - // TODO(jiayl): remove the dependency on peer connection once bug 2264 is - // fixed. - PeerConnectionInternal* const pc_; + const SdpStateProvider* sdp_info_; const std::string session_id_; CertificateRequestState certificate_request_state_; + std::function&)> + on_certificate_ready_; + RTC_DISALLOW_COPY_AND_ASSIGN(WebRtcSessionDescriptionFactory); }; } // namespace webrtc diff --git a/presubmit_test.py b/presubmit_test.py index 287071c1a3..bb93765f28 100755 --- a/presubmit_test.py +++ b/presubmit_test.py @@ -20,146 +20,145 @@ class CheckBugEntryFieldTest(unittest.TestCase): - def testCommitMessageBugEntryWithNoError(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['webrtc:1234']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) - - def testCommitMessageBugEntryReturnError(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['webrtc:1234', 'webrtc=4321']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(1, len(errors)) - self.assertEqual(('Bogus Bug entry: webrtc=4321. Please specify' - ' the issue tracker prefix and the issue number,' - ' separated by a colon, e.g. webrtc:123 or' - ' chromium:12345.'), str(errors[0])) - - def testCommitMessageBugEntryWithoutPrefix(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['1234']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(1, len(errors)) - self.assertEqual(('Bug entry requires issue tracker prefix, ' - 'e.g. webrtc:1234'), str(errors[0])) - - def testCommitMessageBugEntryIsNone(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change = MockChange([], ['None']) - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) - - def testCommitMessageBugEntrySupportInternalBugReference(self): - mock_input_api = MockInputApi() - mock_output_api = MockOutputApi() - mock_input_api.change.BUG = 'b/12345' - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) - mock_input_api.change.BUG = 'b/12345, webrtc:1234' - errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, - mock_output_api) - self.assertEqual(0, len(errors)) + def testCommitMessageBugEntryWithNoError(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['webrtc:1234']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) + + def testCommitMessageBugEntryReturnError(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['webrtc:1234', 'webrtc=4321']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(1, len(errors)) + self.assertEqual(('Bogus Bug entry: webrtc=4321. Please specify' + ' the issue tracker prefix and the issue number,' + ' separated by a colon, e.g. webrtc:123 or' + ' chromium:12345.'), str(errors[0])) + + def testCommitMessageBugEntryWithoutPrefix(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['1234']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(1, len(errors)) + self.assertEqual(('Bug entry requires issue tracker prefix, ' + 'e.g. webrtc:1234'), str(errors[0])) + + def testCommitMessageBugEntryIsNone(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change = MockChange([], ['None']) + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) + + def testCommitMessageBugEntrySupportInternalBugReference(self): + mock_input_api = MockInputApi() + mock_output_api = MockOutputApi() + mock_input_api.change.BUG = 'b/12345' + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) + mock_input_api.change.BUG = 'b/12345, webrtc:1234' + errors = PRESUBMIT.CheckCommitMessageBugEntry(mock_input_api, + mock_output_api) + self.assertEqual(0, len(errors)) class CheckNewlineAtTheEndOfProtoFilesTest(unittest.TestCase): - - def setUp(self): - self.tmp_dir = tempfile.mkdtemp() - self.proto_file_path = os.path.join(self.tmp_dir, 'foo.proto') - self.input_api = MockInputApi() - self.output_api = MockOutputApi() - - def tearDown(self): - shutil.rmtree(self.tmp_dir, ignore_errors=True) - - def testErrorIfProtoFileDoesNotEndWithNewline(self): - self._GenerateProtoWithoutNewlineAtTheEnd() - self.input_api.files = [MockFile(self.proto_file_path)] - errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles(self.input_api, - self.output_api, - lambda x: True) - self.assertEqual(1, len(errors)) - self.assertEqual( - 'File %s must end with exactly one newline.' % self.proto_file_path, - str(errors[0])) - - def testNoErrorIfProtoFileEndsWithNewline(self): - self._GenerateProtoWithNewlineAtTheEnd() - self.input_api.files = [MockFile(self.proto_file_path)] - errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles(self.input_api, - self.output_api, - lambda x: True) - self.assertEqual(0, len(errors)) - - def _GenerateProtoWithNewlineAtTheEnd(self): - with open(self.proto_file_path, 'w') as f: - f.write(textwrap.dedent(""" + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + self.proto_file_path = os.path.join(self.tmp_dir, 'foo.proto') + self.input_api = MockInputApi() + self.output_api = MockOutputApi() + + def tearDown(self): + shutil.rmtree(self.tmp_dir, ignore_errors=True) + + def testErrorIfProtoFileDoesNotEndWithNewline(self): + self._GenerateProtoWithoutNewlineAtTheEnd() + self.input_api.files = [MockFile(self.proto_file_path)] + errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles( + self.input_api, self.output_api, lambda x: True) + self.assertEqual(1, len(errors)) + self.assertEqual( + 'File %s must end with exactly one newline.' % + self.proto_file_path, str(errors[0])) + + def testNoErrorIfProtoFileEndsWithNewline(self): + self._GenerateProtoWithNewlineAtTheEnd() + self.input_api.files = [MockFile(self.proto_file_path)] + errors = PRESUBMIT.CheckNewlineAtTheEndOfProtoFiles( + self.input_api, self.output_api, lambda x: True) + self.assertEqual(0, len(errors)) + + def _GenerateProtoWithNewlineAtTheEnd(self): + with open(self.proto_file_path, 'w') as f: + f.write( + textwrap.dedent(""" syntax = "proto2"; option optimize_for = LITE_RUNTIME; package webrtc.audioproc; """)) - def _GenerateProtoWithoutNewlineAtTheEnd(self): - with open(self.proto_file_path, 'w') as f: - f.write(textwrap.dedent(""" + def _GenerateProtoWithoutNewlineAtTheEnd(self): + with open(self.proto_file_path, 'w') as f: + f.write( + textwrap.dedent(""" syntax = "proto2"; option optimize_for = LITE_RUNTIME; package webrtc.audioproc;""")) class CheckNoMixingSourcesTest(unittest.TestCase): + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + self.file_path = os.path.join(self.tmp_dir, 'BUILD.gn') + self.input_api = MockInputApi() + self.output_api = MockOutputApi() - def setUp(self): - self.tmp_dir = tempfile.mkdtemp() - self.file_path = os.path.join(self.tmp_dir, 'BUILD.gn') - self.input_api = MockInputApi() - self.output_api = MockOutputApi() - - def tearDown(self): - shutil.rmtree(self.tmp_dir, ignore_errors=True) + def tearDown(self): + shutil.rmtree(self.tmp_dir, ignore_errors=True) - def testErrorIfCAndCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.cc', 'bar.h']) + def testErrorIfCAndCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.cc', 'bar.h']) - def testErrorIfCAndObjCAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.m', 'bar.h']) + def testErrorIfCAndObjCAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.m', 'bar.h']) - def testErrorIfCAndObjCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.mm', 'bar.h']) + def testErrorIfCAndObjCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.c', 'bar.mm', 'bar.h']) - def testErrorIfCppAndObjCAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.m', 'bar.h']) + def testErrorIfCppAndObjCAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.m', 'bar.h']) - def testErrorIfCppAndObjCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.mm', 'bar.h']) + def testErrorIfCppAndObjCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(1, ['foo.cc', 'bar.mm', 'bar.h']) - def testNoErrorIfOnlyC(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.c', 'bar.c', 'bar.h']) + def testNoErrorIfOnlyC(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.c', 'bar.c', 'bar.h']) - def testNoErrorIfOnlyCpp(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.cc', 'bar.cc', 'bar.h']) + def testNoErrorIfOnlyCpp(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.cc', 'bar.cc', 'bar.h']) - def testNoErrorIfOnlyObjC(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.m', 'bar.h']) + def testNoErrorIfOnlyObjC(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.m', 'bar.h']) - def testNoErrorIfOnlyObjCpp(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.mm', 'bar.mm', 'bar.h']) + def testNoErrorIfOnlyObjCpp(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.mm', 'bar.mm', 'bar.h']) - def testNoErrorIfObjCAndObjCppAreMixed(self): - self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.mm', 'bar.h']) + def testNoErrorIfObjCAndObjCppAreMixed(self): + self._AssertNumberOfErrorsWithSources(0, ['foo.m', 'bar.mm', 'bar.h']) - def testNoErrorIfSourcesAreInExclusiveIfBranches(self): - self._GenerateBuildFile(textwrap.dedent(""" + def testNoErrorIfSourcesAreInExclusiveIfBranches(self): + self._GenerateBuildFile( + textwrap.dedent(""" rtc_library("bar_foo") { if (is_win) { sources = [ @@ -185,14 +184,15 @@ def testNoErrorIfSourcesAreInExclusiveIfBranches(self): } } """)) - self.input_api.files = [MockFile(self.file_path)] - errors = PRESUBMIT.CheckNoMixingSources(self.input_api, - [MockFile(self.file_path)], - self.output_api) - self.assertEqual(0, len(errors)) - - def testErrorIfSourcesAreNotInExclusiveIfBranches(self): - self._GenerateBuildFile(textwrap.dedent(""" + self.input_api.files = [MockFile(self.file_path)] + errors = PRESUBMIT.CheckNoMixingSources(self.input_api, + [MockFile(self.file_path)], + self.output_api) + self.assertEqual(0, len(errors)) + + def testErrorIfSourcesAreNotInExclusiveIfBranches(self): + self._GenerateBuildFile( + textwrap.dedent(""" rtc_library("bar_foo") { if (is_win) { sources = [ @@ -224,21 +224,23 @@ def testErrorIfSourcesAreNotInExclusiveIfBranches(self): } } """)) - self.input_api.files = [MockFile(self.file_path)] - errors = PRESUBMIT.CheckNoMixingSources(self.input_api, - [MockFile(self.file_path)], - self.output_api) - self.assertEqual(1, len(errors)) - self.assertTrue('bar.cc' in str(errors[0])) - self.assertTrue('bar.mm' in str(errors[0])) - self.assertTrue('foo.cc' in str(errors[0])) - self.assertTrue('foo.mm' in str(errors[0])) - self.assertTrue('bar.m' in str(errors[0])) - self.assertTrue('bar.c' in str(errors[0])) - - def _AssertNumberOfErrorsWithSources(self, number_of_errors, sources): - assert len(sources) == 3, 'This function accepts a list of 3 source files' - self._GenerateBuildFile(textwrap.dedent(""" + self.input_api.files = [MockFile(self.file_path)] + errors = PRESUBMIT.CheckNoMixingSources(self.input_api, + [MockFile(self.file_path)], + self.output_api) + self.assertEqual(1, len(errors)) + self.assertTrue('bar.cc' in str(errors[0])) + self.assertTrue('bar.mm' in str(errors[0])) + self.assertTrue('foo.cc' in str(errors[0])) + self.assertTrue('foo.mm' in str(errors[0])) + self.assertTrue('bar.m' in str(errors[0])) + self.assertTrue('bar.c' in str(errors[0])) + + def _AssertNumberOfErrorsWithSources(self, number_of_errors, sources): + assert len( + sources) == 3, 'This function accepts a list of 3 source files' + self._GenerateBuildFile( + textwrap.dedent(""" rtc_static_library("bar_foo") { sources = [ "%s", @@ -254,20 +256,20 @@ def _AssertNumberOfErrorsWithSources(self, number_of_errors, sources): ], } """ % (tuple(sources) * 2))) - self.input_api.files = [MockFile(self.file_path)] - errors = PRESUBMIT.CheckNoMixingSources(self.input_api, - [MockFile(self.file_path)], - self.output_api) - self.assertEqual(number_of_errors, len(errors)) - if number_of_errors == 1: - for source in sources: - if not source.endswith('.h'): - self.assertTrue(source in str(errors[0])) + self.input_api.files = [MockFile(self.file_path)] + errors = PRESUBMIT.CheckNoMixingSources(self.input_api, + [MockFile(self.file_path)], + self.output_api) + self.assertEqual(number_of_errors, len(errors)) + if number_of_errors == 1: + for source in sources: + if not source.endswith('.h'): + self.assertTrue(source in str(errors[0])) - def _GenerateBuildFile(self, content): - with open(self.file_path, 'w') as f: - f.write(content) + def _GenerateBuildFile(self, content): + with open(self.file_path, 'w') as f: + f.write(content) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/presubmit_test_mocks.py b/presubmit_test_mocks.py index 510a553158..b15eb74dd8 100644 --- a/presubmit_test_mocks.py +++ b/presubmit_test_mocks.py @@ -14,117 +14,125 @@ class MockInputApi(object): - """Mock class for the InputApi class. + """Mock class for the InputApi class. This class can be used for unittests for presubmit by initializing the files attribute as the list of changed files. """ - def __init__(self): - self.change = MockChange([], []) - self.files = [] - self.presubmit_local_path = os.path.dirname(__file__) + def __init__(self): + self.change = MockChange([], []) + self.files = [] + self.presubmit_local_path = os.path.dirname(__file__) - def AffectedSourceFiles(self, file_filter=None): - return self.AffectedFiles(file_filter=file_filter) + def AffectedSourceFiles(self, file_filter=None): + return self.AffectedFiles(file_filter=file_filter) - def AffectedFiles(self, file_filter=None, include_deletes=False): - # pylint: disable=unused-argument - return self.files + def AffectedFiles(self, file_filter=None, include_deletes=False): + # pylint: disable=unused-argument + return self.files - @classmethod - def FilterSourceFile(cls, affected_file, white_list=(), black_list=()): - # pylint: disable=unused-argument - return True + @classmethod + def FilterSourceFile(cls, + affected_file, + files_to_check=(), + files_to_skip=()): + # pylint: disable=unused-argument + return True - def PresubmitLocalPath(self): - return self.presubmit_local_path + def PresubmitLocalPath(self): + return self.presubmit_local_path - def ReadFile(self, affected_file, mode='rU'): - filename = affected_file.AbsoluteLocalPath() - for f in self.files: - if f.LocalPath() == filename: - with open(filename, mode) as f: - return f.read() - # Otherwise, file is not in our mock API. - raise IOError, "No such file or directory: '%s'" % filename + def ReadFile(self, affected_file, mode='rU'): + filename = affected_file.AbsoluteLocalPath() + for f in self.files: + if f.LocalPath() == filename: + with open(filename, mode) as f: + return f.read() + # Otherwise, file is not in our mock API. + raise IOError, "No such file or directory: '%s'" % filename class MockOutputApi(object): - """Mock class for the OutputApi class. + """Mock class for the OutputApi class. An instance of this class can be passed to presubmit unittests for outputing various types of results. """ - class PresubmitResult(object): - def __init__(self, message, items=None, long_text=''): - self.message = message - self.items = items - self.long_text = long_text + class PresubmitResult(object): + def __init__(self, message, items=None, long_text=''): + self.message = message + self.items = items + self.long_text = long_text - def __repr__(self): - return self.message + def __repr__(self): + return self.message - class PresubmitError(PresubmitResult): - def __init__(self, message, items=None, long_text=''): - MockOutputApi.PresubmitResult.__init__(self, message, items, long_text) - self.type = 'error' + class PresubmitError(PresubmitResult): + def __init__(self, message, items=None, long_text=''): + MockOutputApi.PresubmitResult.__init__(self, message, items, + long_text) + self.type = 'error' class MockChange(object): - """Mock class for Change class. + """Mock class for Change class. This class can be used in presubmit unittests to mock the query of the current change. """ - def __init__(self, changed_files, bugs_from_description, tags=None): - self._changed_files = changed_files - self._bugs_from_description = bugs_from_description - self.tags = dict() if not tags else tags + def __init__(self, changed_files, bugs_from_description, tags=None): + self._changed_files = changed_files + self._bugs_from_description = bugs_from_description + self.tags = dict() if not tags else tags - def BugsFromDescription(self): - return self._bugs_from_description + def BugsFromDescription(self): + return self._bugs_from_description - def __getattr__(self, attr): - """Return tags directly as attributes on the object.""" - if not re.match(r"^[A-Z_]*$", attr): - raise AttributeError(self, attr) - return self.tags.get(attr) + def __getattr__(self, attr): + """Return tags directly as attributes on the object.""" + if not re.match(r"^[A-Z_]*$", attr): + raise AttributeError(self, attr) + return self.tags.get(attr) class MockFile(object): - """Mock class for the File class. + """Mock class for the File class. This class can be used to form the mock list of changed files in MockInputApi for presubmit unittests. """ - def __init__(self, local_path, new_contents=None, old_contents=None, - action='A'): - if new_contents is None: - new_contents = ["Data"] - self._local_path = local_path - self._new_contents = new_contents - self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)] - self._action = action - self._old_contents = old_contents - - def Action(self): - return self._action - - def ChangedContents(self): - return self._changed_contents - - def NewContents(self): - return self._new_contents - - def LocalPath(self): - return self._local_path - - def AbsoluteLocalPath(self): - return self._local_path - - def OldContents(self): - return self._old_contents + def __init__(self, + local_path, + new_contents=None, + old_contents=None, + action='A'): + if new_contents is None: + new_contents = ["Data"] + self._local_path = local_path + self._new_contents = new_contents + self._changed_contents = [(i + 1, l) + for i, l in enumerate(new_contents)] + self._action = action + self._old_contents = old_contents + + def Action(self): + return self._action + + def ChangedContents(self): + return self._changed_contents + + def NewContents(self): + return self._new_contents + + def LocalPath(self): + return self._local_path + + def AbsoluteLocalPath(self): + return self._local_path + + def OldContents(self): + return self._old_contents diff --git a/pylintrc b/pylintrc index 9809ebe998..f26c84adce 100644 --- a/pylintrc +++ b/pylintrc @@ -97,9 +97,6 @@ max-line-length=80 # Maximum number of lines in a module max-module-lines=1000 -# We use two spaces for indents, instead of the usual four spaces or tab. -indent-string=' ' - [BASIC] diff --git a/resources/audio_processing/agc2/rnn_vad/gru_in.dat.sha1 b/resources/audio_processing/agc2/rnn_vad/gru_in.dat.sha1 new file mode 100644 index 0000000000..f78c40e6c4 --- /dev/null +++ b/resources/audio_processing/agc2/rnn_vad/gru_in.dat.sha1 @@ -0,0 +1 @@ +402abf7a4e5d35abb78906fff2b3f4d8d24aa629 \ No newline at end of file diff --git a/resources/audio_processing/output_data_fixed.pb.sha1 b/resources/audio_processing/output_data_fixed.pb.sha1 index f27905087e..43e68303ac 100644 --- a/resources/audio_processing/output_data_fixed.pb.sha1 +++ b/resources/audio_processing/output_data_fixed.pb.sha1 @@ -1 +1 @@ -4010b1fe15eda1b42968cdb3f9fed399e1aa7197 \ No newline at end of file +0ff9ab4d46929552e21d16f266f9eba42575ba8d \ No newline at end of file diff --git a/resources/audio_processing/output_data_float.pb.sha1 b/resources/audio_processing/output_data_float.pb.sha1 index b8312fc58f..d3375949ac 100644 --- a/resources/audio_processing/output_data_float.pb.sha1 +++ b/resources/audio_processing/output_data_float.pb.sha1 @@ -1 +1 @@ -d22d4b0bc8f59aa27da61e158b9d35596f3844f5 \ No newline at end of file +749efdfd1e3c3ace434b3673dac9ce4938534449 \ No newline at end of file diff --git a/resources/audio_processing/output_data_float_avx2.pb.sha1 b/resources/audio_processing/output_data_float_avx2.pb.sha1 new file mode 100644 index 0000000000..79a95efc0e --- /dev/null +++ b/resources/audio_processing/output_data_float_avx2.pb.sha1 @@ -0,0 +1 @@ +78c1a84de332173863c997538aa19b8cdcba5020 \ No newline at end of file diff --git a/resources/audio_processing/test/py_quality_assessment/BUILD.gn b/resources/audio_processing/test/py_quality_assessment/BUILD.gn index 5f2d34dd49..594efb05bb 100644 --- a/resources/audio_processing/test/py_quality_assessment/BUILD.gn +++ b/resources/audio_processing/test/py_quality_assessment/BUILD.gn @@ -8,7 +8,7 @@ import("../../../../webrtc.gni") -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { copy("noise_tracks") { testonly = true sources = [ "noise_tracks/city.wav" ] diff --git a/rtc_base/BUILD.gn b/rtc_base/BUILD.gn index 2d90898ce7..8dc89fafba 100644 --- a/rtc_base/BUILD.gn +++ b/rtc_base/BUILD.gn @@ -8,6 +8,7 @@ import("//build/config/crypto.gni") import("//build/config/ui.gni") +import("//third_party/google_benchmark/buildconfig.gni") import("../webrtc.gni") if (is_android) { @@ -15,7 +16,7 @@ if (is_android) { import("//build/config/android/rules.gni") } -config("rtc_base_chromium_config") { +config("threading_chromium_config") { defines = [ "NO_MAIN_THREAD_WRAPPING" ] } @@ -43,6 +44,25 @@ rtc_source_set("ignore_wundef") { sources = [ "ignore_wundef.h" ] } +rtc_source_set("untyped_function") { + sources = [ "untyped_function.h" ] + deps = [ "system:assume" ] +} + +rtc_source_set("callback_list") { + sources = [ + "callback_list.cc", + "callback_list.h", + ] + deps = [ + ":checks", + ":untyped_function", + "../api:function_view", + "system:assume", + "system:inline", + ] +} + # The subset of rtc_base approved for use outside of libjingle. # TODO(bugs.webrtc.org/9838): Create small and focused build targets and remove # the old concept of rtc_base and rtc_base_approved. @@ -55,16 +75,20 @@ rtc_library("rtc_base_approved") { ":type_traits", "../api:array_view", "../api:scoped_refptr", + "../api:sequence_checker", + "synchronization:mutex", "system:arch", + "system:no_unique_address", "system:rtc_export", - "system:unused", "third_party/base64", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/types:optional", ] public_deps = [] # no-presubmit-check TODO(webrtc:8603) sources = [ - "bind.h", "bit_buffer.cc", "bit_buffer.h", "buffer.h", @@ -77,9 +101,9 @@ rtc_library("rtc_base_approved") { "copy_on_write_buffer.h", "event_tracer.cc", "event_tracer.h", + "hash.h", "location.cc", "location.h", - "message_buffer_reader.h", "numerics/histogram_percentile_counter.cc", "numerics/histogram_percentile_counter.h", "numerics/mod_ops.h", @@ -105,12 +129,26 @@ rtc_library("rtc_base_approved") { if (is_win) { sources += [ + "win/get_activation_factory.cc", + "win/get_activation_factory.h", + "win/hstring.cc", + "win/hstring.h", + "win/scoped_com_initializer.cc", + "win/scoped_com_initializer.h", "win/windows_version.cc", "win/windows_version.h", ] data_deps = [ "//build/win:runtime_libs" ] } + # These files add a dependency on the Win10 SDK v10.0.10240. + if (rtc_enable_win_wgc) { + sources += [ + "win/create_direct3d_device.cc", + "win/create_direct3d_device.h", + ] + } + if (is_nacl) { public_deps += # no-presubmit-check TODO(webrtc:8603) [ "//native_client_sdk/src/libraries/nacl_io" ] @@ -122,7 +160,6 @@ rtc_library("rtc_base_approved") { public_deps += [ # no-presubmit-check TODO(webrtc:8603) ":atomicops", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -131,19 +168,16 @@ rtc_library("rtc_base_approved") { ":rtc_event", ":safe_conversions", ":stringutils", - ":thread_checker", ":timeutils", + "../api:sequence_checker", ] } rtc_source_set("macromagic") { - # TODO(bugs.webrtc.org/9606): This should not be public. - visibility = [ "*" ] sources = [ "arraysize.h", "constructor_magic.h", "format_macros.h", - "stringize_macros.h", "thread_annotations.h", ] deps = [ "system:arch" ] @@ -164,20 +198,23 @@ rtc_source_set("refcount") { "ref_counted_object.h", "ref_counter.h", ] - deps = [ ":macromagic" ] + deps = [ + ":macromagic", + "../api:scoped_refptr", + ] } rtc_library("criticalsection") { sources = [ - "critical_section.cc", - "critical_section.h", + "deprecated/recursive_critical_section.cc", + "deprecated/recursive_critical_section.h", ] deps = [ ":atomicops", ":checks", ":macromagic", ":platform_thread_types", - "system:rtc_export", + "synchronization:yield", "system:unused", ] } @@ -186,9 +223,10 @@ rtc_library("platform_thread") { visibility = [ ":rtc_base_approved", ":rtc_task_queue_libevent", - ":rtc_task_queue_win", ":rtc_task_queue_stdlib", - "synchronization:sequence_checker", + ":rtc_task_queue_win", + "../api:sequence_checker", + "synchronization:mutex", ] sources = [ "platform_thread.cc", @@ -200,9 +238,13 @@ rtc_library("platform_thread") { ":macromagic", ":platform_thread_types", ":rtc_event", - ":thread_checker", ":timeutils", + "../api:sequence_checker", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] } @@ -226,8 +268,8 @@ rtc_library("rtc_event") { ":checks", "synchronization:yield_policy", "system:warn_current_thread_is_deadlocked", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } @@ -236,11 +278,13 @@ rtc_library("logging") { libs = [] deps = [ ":checks", - ":criticalsection", ":macromagic", ":platform_thread_types", ":stringutils", ":timeutils", + "synchronization:mutex", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/meta:type_traits", "//third_party/abseil-cpp/absl/strings", @@ -265,27 +309,15 @@ rtc_library("logging") { deps += [ "system:inline" ] if (is_mac) { - libs += [ "Foundation.framework" ] + frameworks = [ "Foundation.framework" ] } - # logging.h needs the deprecation header while downstream projects are - # removing code that depends on logging implementation details. - deps += [ ":deprecation" ] - if (is_android) { libs += [ "log" ] } } } -rtc_source_set("thread_checker") { - sources = [ "thread_checker.h" ] - deps = [ - ":deprecation", - "synchronization:sequence_checker", - ] -} - rtc_source_set("atomicops") { sources = [ "atomic_ops.h" ] } @@ -302,6 +334,8 @@ rtc_library("checks") { ":safe_compare", "system:inline", "system:rtc_export", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/meta:type_traits", "//third_party/abseil-cpp/absl/strings", ] @@ -318,13 +352,14 @@ rtc_library("rate_limiter") { deps = [ ":rtc_base_approved", "../system_wrappers", - "//third_party/abseil-cpp/absl/types:optional", + "synchronization:mutex", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_source_set("sanitizer") { sources = [ "sanitizer.h" ] - deps = [ "//third_party/abseil-cpp/absl/meta:type_traits" ] + absl_deps = [ "//third_party/abseil-cpp/absl/meta:type_traits" ] } rtc_source_set("bounded_inline_vector") { @@ -366,6 +401,8 @@ rtc_source_set("safe_conversions") { rtc_library("timeutils") { visibility = [ "*" ] sources = [ + "system_time.cc", + "system_time.h", "time_utils.cc", "time_utils.h", ] @@ -375,6 +412,10 @@ rtc_library("timeutils") { ":stringutils", "system:rtc_export", ] + if (rtc_exclude_system_time) { + defines = [ "WEBRTC_EXCLUDE_SYSTEM_TIME" ] + } + libs = [] if (is_win) { libs += [ "winmm.lib" ] @@ -399,6 +440,8 @@ rtc_library("stringutils") { ":macromagic", ":safe_minmax", "../api:array_view", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -419,10 +462,6 @@ rtc_source_set("type_traits") { sources = [ "type_traits.h" ] } -rtc_source_set("deprecation") { - sources = [ "deprecation.h" ] -} - rtc_library("rtc_task_queue") { visibility = [ "*" ] sources = [ @@ -434,8 +473,8 @@ rtc_library("rtc_task_queue") { "../api/task_queue", "system:rtc_export", "task_utils:to_queued_task", - "//third_party/abseil-cpp/absl/memory", ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] } rtc_source_set("rtc_operations_chain") { @@ -449,8 +488,10 @@ rtc_source_set("rtc_operations_chain") { ":macromagic", ":refcount", "../api:scoped_refptr", - "synchronization:sequence_checker", + "../api:sequence_checker", + "system:no_unique_address", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } if (rtc_enable_libevent) { @@ -462,7 +503,6 @@ if (rtc_enable_libevent) { ] deps = [ ":checks", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -470,6 +510,9 @@ if (rtc_enable_libevent) { ":safe_conversions", ":timeutils", "../api/task_queue", + "synchronization:mutex", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/container:inlined_vector", "//third_party/abseil-cpp/absl/strings", ] @@ -490,9 +533,10 @@ if (is_mac || is_ios) { ":checks", ":logging", "../api/task_queue", + "synchronization:mutex", "system:gcd_helpers", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } } @@ -505,7 +549,6 @@ if (is_win) { ] deps = [ ":checks", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -513,7 +556,11 @@ if (is_win) { ":safe_conversions", ":timeutils", "../api/task_queue", + "synchronization:mutex", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] } } @@ -525,7 +572,6 @@ rtc_library("rtc_task_queue_stdlib") { ] deps = [ ":checks", - ":criticalsection", ":logging", ":macromagic", ":platform_thread", @@ -533,8 +579,9 @@ rtc_library("rtc_task_queue_stdlib") { ":safe_conversions", ":timeutils", "../api/task_queue", - "//third_party/abseil-cpp/absl/strings", + "synchronization:mutex", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("weak_ptr") { @@ -545,7 +592,8 @@ rtc_library("weak_ptr") { deps = [ ":refcount", "../api:scoped_refptr", - "synchronization:sequence_checker", + "../api:sequence_checker", + "system:no_unique_address", ] } @@ -553,8 +601,6 @@ rtc_library("rtc_numerics") { sources = [ "numerics/event_based_exponential_moving_average.cc", "numerics/event_based_exponential_moving_average.h", - "numerics/event_rate_counter.cc", - "numerics/event_rate_counter.h", "numerics/exp_filter.cc", "numerics/exp_filter.h", "numerics/math_utils.h", @@ -563,32 +609,34 @@ rtc_library("rtc_numerics") { "numerics/moving_median_filter.h", "numerics/percentile_filter.h", "numerics/running_statistics.h", - "numerics/sample_stats.cc", - "numerics/sample_stats.h", - "numerics/samples_stats_counter.cc", - "numerics/samples_stats_counter.h", "numerics/sequence_number_util.h", ] deps = [ ":checks", ":rtc_base_approved", - ":safe_compare", - "../api:array_view", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("rtc_stats_counters") { + sources = [ + "numerics/event_rate_counter.cc", + "numerics/event_rate_counter.h", + "numerics/sample_stats.cc", + "numerics/sample_stats.h", + ] + deps = [ + "../api/numerics", "../api/units:data_rate", "../api/units:time_delta", "../api/units:timestamp", - "//third_party/abseil-cpp/absl/algorithm:container", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [] } config("rtc_json_suppressions") { if (!is_win || is_clang) { cflags_cc = [ - # TODO(bugs.webrtc.org/10770): Update jsoncpp API usage and remove - # -Wno-deprecated-declarations. - "-Wno-deprecated-declarations", - # TODO(bugs.webrtc.org/10814): Remove -Wno-undef as soon as it get # removed upstream. "-Wno-undef", @@ -618,137 +666,234 @@ rtc_library("rtc_json") { } } -rtc_source_set("net_helpers") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "net_helpers.cc", - # "net_helpers.h", - # ] +rtc_library("net_helpers") { + sources = [ + "net_helpers.cc", + "net_helpers.h", + ] + deps = [] + if (is_android) { + deps += [ ":ifaddrs_android" ] + } + if (is_win) { + deps += [ ":win32" ] + } } -rtc_source_set("async_resolver_interface") { +rtc_library("async_resolver_interface") { visibility = [ "*" ] - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "async_resolver_interface.cc", - # "async_resolver_interface.h", - # ] + sources = [ + "async_resolver_interface.cc", + "async_resolver_interface.h", + ] + deps = [ + ":socket_address", + "system:rtc_export", + "third_party/sigslot", + ] } -rtc_source_set("ip_address") { +rtc_library("ip_address") { visibility = [ "*" ] - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "ip_address.cc", - # "ip_address.h", - # ] + sources = [ + "ip_address.cc", + "ip_address.h", + ] + deps = [ + ":net_helpers", + ":rtc_base_approved", + ":stringutils", + "system:rtc_export", + ] + if (is_win) { + deps += [ ":win32" ] + } } -rtc_source_set("socket_address") { +rtc_library("socket_address") { visibility = [ "*" ] - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "socket_address.cc", - # "socket_address.h", - # ] + sources = [ + "socket_address.cc", + "socket_address.h", + ] + deps = [ + ":checks", + ":ip_address", + ":logging", + ":net_helpers", + ":rtc_base_approved", + ":safe_conversions", + ":stringutils", + "system:rtc_export", + ] + if (is_win) { + deps += [ ":win32" ] + } } -rtc_source_set("null_socket_server") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "null_socket_server.cc", - # "null_socket_server.h", - # ] +rtc_library("null_socket_server") { + sources = [ + "null_socket_server.cc", + "null_socket_server.h", + ] + deps = [ + ":async_socket", + ":checks", + ":rtc_event", + ":socket", + ":socket_server", + "system:rtc_export", + ] } rtc_source_set("socket_server") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "socket_server.h", - # ] + sources = [ "socket_server.h" ] + deps = [ ":socket_factory" ] } -rtc_source_set("threading") { +rtc_library("threading") { visibility = [ "*" ] - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "asyncresolver.cc", - # "asyncresolver.h", - # "defaultsocketserver.cc", - # "defaultsocketserver.h", - # "message_handler.cc", - # "message_handler.h", - # "network_monitor.cc", - # "network_monitor.h", - # "physical_socket_server.cc", - # "physical_socket_server.h", - # "signal_thread.cc", - # "signal_thread.h", - # "thread.cc", - # "thread.h", - # ] + + if (build_with_chromium) { + public_configs = [ ":threading_chromium_config" ] + } + + sources = [ + "async_resolver.cc", + "async_resolver.h", + "internal/default_socket_server.cc", + "internal/default_socket_server.h", + "message_handler.cc", + "message_handler.h", + "network_monitor.cc", + "network_monitor.h", + "network_monitor_factory.cc", + "network_monitor_factory.h", + "physical_socket_server.cc", + "physical_socket_server.h", + "thread.cc", + "thread.h", + "thread_message.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] + deps = [ + ":async_resolver_interface", + ":atomicops", + ":checks", + ":criticalsection", + ":ip_address", + ":logging", + ":macromagic", + ":network_constants", + ":null_socket_server", + ":platform_thread_types", + ":rtc_base_approved", + ":rtc_event", + ":rtc_task_queue", + ":socket_address", + ":socket_server", + ":timeutils", + "../api:function_view", + "../api:refcountedbase", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/task_queue", + "synchronization:mutex", + "system:no_unique_address", + "system:rtc_export", + "task_utils:pending_task_safety_flag", + "task_utils:to_queued_task", + "third_party/sigslot", + ] + if (is_android) { + deps += [ ":ifaddrs_android" ] + } + if (is_win) { + deps += [ ":win32" ] + } + if (is_mac || is_ios) { + deps += [ "system:cocoa_threading" ] + } } rtc_source_set("socket_factory") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "socket_factory.h", - # ] + sources = [ "socket_factory.h" ] + deps = [ + ":async_socket", + ":socket", + ] } -rtc_source_set("async_socket") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "async_socket.cc", - # "async_socket.h", - # ] +rtc_library("async_socket") { + sources = [ + "async_socket.cc", + "async_socket.h", + ] + deps = [ + ":checks", + ":socket", + ":socket_address", + "third_party/sigslot", + ] } -rtc_source_set("socket") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "socket.cc", - # "socket.h", - # ] +rtc_library("socket") { + sources = [ + "socket.cc", + "socket.h", + ] + deps = [ + ":macromagic", + ":socket_address", + ] + if (is_win) { + deps += [ ":win32" ] + } } rtc_source_set("network_constants") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "network_constants.h", - # ] + sources = [ + "network_constants.cc", + "network_constants.h", + ] + deps = [ ":checks" ] } if (is_android) { - rtc_source_set("ifaddrs_android") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "ifaddrs_android.cc", - # "ifaddrs_android.h", - # ] + rtc_library("ifaddrs_android") { + sources = [ + "ifaddrs_android.cc", + "ifaddrs_android.h", + ] + libs = [ + "log", + "GLESv2", + ] } } if (is_win) { - rtc_source_set("win32") { - # TODO(bugs.webrtc.org/9987): This build target will soon contain - # the following files: - # sources = [ - # "win32.cc", - # "win32.h", - # ] + rtc_library("win32") { + sources = [ + "win32.cc", + "win32.h", + "win32_window.cc", + "win32_window.h", + ] + + deps = [ + ":checks", + ":macromagic", + ":rtc_base_approved", + ] + + libs = [ + "crypt32.lib", + "iphlpapi.lib", + "secur32.lib", + ] + + defines = [ "_CRT_NONSTDC_NO_DEPRECATE" ] } } @@ -759,22 +904,42 @@ rtc_library("rtc_base") { libs = [] defines = [] deps = [ + ":async_resolver_interface", + ":async_socket", ":checks", - ":deprecation", + ":ip_address", + ":network_constants", + ":null_socket_server", + ":rtc_task_queue", + ":socket", + ":socket_address", + ":socket_factory", + ":socket_server", ":stringutils", + ":threading", "../api:array_view", "../api:function_view", + "../api:refcountedbase", "../api:scoped_refptr", + "../api:sequence_checker", + "../api/numerics", "../api/task_queue", "../system_wrappers:field_trial", "network:sent_packet", + "synchronization:mutex", "system:file_wrapper", "system:inline", + "system:no_unique_address", "system:rtc_export", + "task_utils:pending_task_safety_flag", + "task_utils:repeating_task", "task_utils:to_queued_task", "third_party/base64", "third_party/sigslot", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/container:flat_hash_map", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", @@ -788,10 +953,6 @@ rtc_library("rtc_base") { "async_invoker_inl.h", "async_packet_socket.cc", "async_packet_socket.h", - "async_resolver_interface.cc", - "async_resolver_interface.h", - "async_socket.cc", - "async_socket.h", "async_tcp_socket.cc", "async_tcp_socket.h", "async_udp_socket.cc", @@ -809,64 +970,39 @@ rtc_library("rtc_base") { "helpers.h", "http_common.cc", "http_common.h", - "ip_address.cc", - "ip_address.h", - "keep_ref_until_done.h", "mdns_responder_interface.h", "message_digest.cc", "message_digest.h", - "message_handler.cc", - "message_handler.h", "net_helper.cc", "net_helper.h", - "net_helpers.cc", - "net_helpers.h", "network.cc", "network.h", - "network_constants.cc", - "network_constants.h", - "network_monitor.cc", - "network_monitor.h", "network_route.cc", "network_route.h", - "null_socket_server.cc", - "null_socket_server.h", "openssl.h", "openssl_adapter.cc", "openssl_adapter.h", - "openssl_certificate.cc", - "openssl_certificate.h", "openssl_digest.cc", "openssl_digest.h", - "openssl_identity.cc", - "openssl_identity.h", + "openssl_key_pair.cc", + "openssl_key_pair.h", "openssl_session_cache.cc", "openssl_session_cache.h", "openssl_stream_adapter.cc", "openssl_stream_adapter.h", "openssl_utility.cc", "openssl_utility.h", - "physical_socket_server.cc", - "physical_socket_server.h", "proxy_info.cc", "proxy_info.h", "rtc_certificate.cc", "rtc_certificate.h", "rtc_certificate_generator.cc", "rtc_certificate_generator.h", - "signal_thread.cc", - "signal_thread.h", "sigslot_repeater.h", - "socket.cc", - "socket.h", "socket_adapters.cc", "socket_adapters.h", - "socket_address.cc", - "socket_address.h", "socket_address_pair.cc", "socket_address_pair.h", - "socket_factory.h", - "socket_server.h", "ssl_adapter.cc", "ssl_adapter.h", "ssl_certificate.cc", @@ -879,22 +1015,33 @@ rtc_library("rtc_base") { "ssl_stream_adapter.h", "stream.cc", "stream.h", - "thread.cc", - "thread.h", - "thread_message.h", "unique_id_generator.cc", "unique_id_generator.h", ] + # If we are building the SSL library ourselves, we know it's BoringSSL. + if (rtc_build_ssl) { + sources += [ + "boringssl_certificate.cc", + "boringssl_certificate.h", + "boringssl_identity.cc", + "boringssl_identity.h", + ] + } else { + sources += [ + "openssl_certificate.cc", + "openssl_certificate.h", + "openssl_identity.cc", + "openssl_identity.h", + ] + } + if (build_with_chromium) { include_dirs = [ "../../boringssl/src/include" ] - public_configs += [ ":rtc_base_chromium_config" ] } else { sources += [ - "callback.h", "log_sinks.cc", "log_sinks.h", - "numerics/math_utils.h", "rolling_accumulator.h", "ssl_roots.h", ] @@ -919,23 +1066,14 @@ rtc_library("rtc_base") { } if (is_android) { - sources += [ - "ifaddrs_android.cc", - "ifaddrs_android.h", - ] - - libs += [ - "log", - "GLESv2", - ] + deps += [ ":ifaddrs_android" ] } if (is_ios || is_mac) { sources += [ "mac_ifaddrs_converter.cc" ] - deps += [ "system:cocoa_threading" ] } - if (is_linux) { + if (is_linux || is_chromeos) { libs += [ "dl", "rt", @@ -943,7 +1081,7 @@ rtc_library("rtc_base") { } if (is_ios) { - libs += [ + frameworks = [ "CFNetwork.framework", "Foundation.framework", "Security.framework", @@ -953,20 +1091,7 @@ rtc_library("rtc_base") { } if (is_win) { - sources += [ - "win32.cc", - "win32.h", - "win32_window.cc", - "win32_window.h", - ] - - libs += [ - "crypt32.lib", - "iphlpapi.lib", - "secur32.lib", - ] - - defines += [ "_CRT_NONSTDC_NO_DEPRECATE" ] + deps += [ ":win32" ] } if (is_posix || is_fuchsia) { @@ -1000,9 +1125,10 @@ rtc_library("gunit_helpers") { ":rtc_base", ":rtc_base_tests_utils", ":stringutils", + ":threading", "../test:test_support", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("testclient") { @@ -1012,12 +1138,25 @@ rtc_library("testclient") { "test_client.h", ] deps = [ - ":criticalsection", ":gunit_helpers", - ":macromagic", ":rtc_base", ":rtc_base_tests_utils", + ":threading", ":timeutils", + "synchronization:mutex", + ] +} + +rtc_library("callback_list_unittests") { + testonly = true + + sources = [ "callback_list_unittest.cc" ] + deps = [ + ":callback_list", + ":gunit_helpers", + ":rtc_base", + "../api:function_view", + "../test:test_support", ] } @@ -1061,12 +1200,23 @@ rtc_library("rtc_base_tests_utils") { "virtual_socket_server.h", ] deps = [ + ":async_socket", ":checks", + ":ip_address", ":rtc_base", + ":socket", + ":socket_address", + ":socket_factory", + ":socket_server", + ":threading", "../api/units:time_delta", "../api/units:timestamp", "memory:fifo_buffer", + "synchronization:mutex", + "task_utils:to_queued_task", "third_party/sigslot", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/memory", ] @@ -1081,15 +1231,14 @@ rtc_library("task_queue_for_test") { ] deps = [ ":checks", - ":macromagic", ":rtc_base_approved", ":rtc_event", ":rtc_task_queue", "../api/task_queue", "../api/task_queue:default_task_queue_factory", "task_utils:to_queued_task", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } if (rtc_include_tests) { @@ -1101,135 +1250,17 @@ if (rtc_include_tests) { ":rtc_base", ":rtc_base_tests_utils", "../test:test_support", + "synchronization:mutex", "third_party/sigslot", ] } - rtc_library("rtc_base_nonparallel_tests") { + rtc_library("untyped_function_unittest") { testonly = true - - sources = [ - "cpu_time_unittest.cc", - "file_rotating_stream_unittest.cc", - "null_socket_server_unittest.cc", - "physical_socket_server_unittest.cc", - "socket_address_unittest.cc", - "socket_unittest.cc", - "socket_unittest.h", - ] + sources = [ "untyped_function_unittest.cc" ] deps = [ - ":checks", - ":gunit_helpers", - ":rtc_base", - ":rtc_base_tests_utils", - ":testclient", - "../system_wrappers", - "../test:fileutils", - "../test:test_main", + ":untyped_function", "../test:test_support", - "third_party/sigslot", - "//testing/gtest", - "//third_party/abseil-cpp/absl/memory", - ] - if (is_win) { - sources += [ "win32_socket_server_unittest.cc" ] - } - } - - rtc_library("rtc_base_approved_unittests") { - testonly = true - sources = [ - "atomic_ops_unittest.cc", - "base64_unittest.cc", - "bind_unittest.cc", - "bit_buffer_unittest.cc", - "bounded_inline_vector_unittest.cc", - "buffer_queue_unittest.cc", - "buffer_unittest.cc", - "byte_buffer_unittest.cc", - "byte_order_unittest.cc", - "checks_unittest.cc", - "copy_on_write_buffer_unittest.cc", - "critical_section_unittest.cc", - "event_tracer_unittest.cc", - "event_unittest.cc", - "logging_unittest.cc", - "numerics/divide_round_unittest.cc", - "numerics/histogram_percentile_counter_unittest.cc", - "numerics/mod_ops_unittest.cc", - "numerics/moving_max_counter_unittest.cc", - "numerics/safe_compare_unittest.cc", - "numerics/safe_minmax_unittest.cc", - "numerics/sample_counter_unittest.cc", - "one_time_event_unittest.cc", - "platform_thread_unittest.cc", - "random_unittest.cc", - "rate_limiter_unittest.cc", - "rate_statistics_unittest.cc", - "rate_tracker_unittest.cc", - "ref_counted_object_unittest.cc", - "sanitizer_unittest.cc", - "string_encode_unittest.cc", - "string_to_number_unittest.cc", - "string_utils_unittest.cc", - "stringize_macros_unittest.cc", - "strings/string_builder_unittest.cc", - "strings/string_format_unittest.cc", - "swap_queue_unittest.cc", - "thread_annotations_unittest.cc", - "thread_checker_unittest.cc", - "time_utils_unittest.cc", - "timestamp_aligner_unittest.cc", - "virtual_socket_unittest.cc", - "zero_memory_unittest.cc", - ] - if (is_win) { - sources += [ "win/windows_version_unittest.cc" ] - } - deps = [ - ":bounded_inline_vector", - ":checks", - ":divide_round", - ":gunit_helpers", - ":rate_limiter", - ":rtc_base", - ":rtc_base_approved", - ":rtc_base_tests_utils", - ":rtc_task_queue", - ":safe_compare", - ":safe_minmax", - ":sanitizer", - ":stringutils", - ":testclient", - "../api:array_view", - "../api:scoped_refptr", - "../api/units:time_delta", - "../system_wrappers", - "../test:fileutils", - "../test:test_main", - "../test:test_support", - "memory:unittests", - "task_utils:to_queued_task", - "third_party/base64", - "third_party/sigslot", - "//third_party/abseil-cpp/absl/base:core_headers", - "//third_party/abseil-cpp/absl/memory", - ] - } - - rtc_library("rtc_task_queue_unittests") { - testonly = true - - sources = [ "task_queue_unittest.cc" ] - deps = [ - ":gunit_helpers", - ":rtc_base_approved", - ":rtc_base_tests_utils", - ":rtc_task_queue", - ":task_queue_for_test", - "../test:test_main", - "../test:test_support", - "//third_party/abseil-cpp/absl/memory", ] } @@ -1238,138 +1269,303 @@ if (rtc_include_tests) { sources = [ "operations_chain_unittest.cc" ] deps = [ + ":gunit_helpers", ":rtc_base", ":rtc_base_approved", ":rtc_event", ":rtc_operations_chain", + ":threading", "../test:test_support", ] } - rtc_library("weak_ptr_unittests") { - testonly = true - - sources = [ "weak_ptr_unittest.cc" ] - deps = [ - ":gunit_helpers", - ":rtc_base_approved", - ":rtc_base_tests_utils", - ":rtc_event", - ":task_queue_for_test", - ":weak_ptr", - "../test:test_main", - "../test:test_support", - ] - } + if (!build_with_chromium) { + rtc_library("rtc_base_nonparallel_tests") { + testonly = true + + sources = [ + "cpu_time_unittest.cc", + "file_rotating_stream_unittest.cc", + "null_socket_server_unittest.cc", + "physical_socket_server_unittest.cc", + "socket_address_unittest.cc", + "socket_unittest.cc", + "socket_unittest.h", + ] + deps = [ + ":async_socket", + ":checks", + ":gunit_helpers", + ":ip_address", + ":net_helpers", + ":null_socket_server", + ":rtc_base", + ":rtc_base_tests_utils", + ":socket", + ":socket_address", + ":socket_server", + ":testclient", + ":threading", + "../system_wrappers", + "../test:fileutils", + "../test:test_main", + "../test:test_support", + "third_party/sigslot", + "//testing/gtest", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + if (is_win) { + sources += [ "win32_socket_server_unittest.cc" ] + } + } - rtc_library("rtc_numerics_unittests") { - testonly = true + rtc_library("rtc_base_approved_unittests") { + testonly = true + sources = [ + "atomic_ops_unittest.cc", + "base64_unittest.cc", + "bit_buffer_unittest.cc", + "bounded_inline_vector_unittest.cc", + "buffer_queue_unittest.cc", + "buffer_unittest.cc", + "byte_buffer_unittest.cc", + "byte_order_unittest.cc", + "checks_unittest.cc", + "copy_on_write_buffer_unittest.cc", + "deprecated/recursive_critical_section_unittest.cc", + "event_tracer_unittest.cc", + "event_unittest.cc", + "hash_unittest.cc", + "logging_unittest.cc", + "numerics/divide_round_unittest.cc", + "numerics/histogram_percentile_counter_unittest.cc", + "numerics/mod_ops_unittest.cc", + "numerics/moving_max_counter_unittest.cc", + "numerics/safe_compare_unittest.cc", + "numerics/safe_minmax_unittest.cc", + "numerics/sample_counter_unittest.cc", + "one_time_event_unittest.cc", + "platform_thread_unittest.cc", + "random_unittest.cc", + "rate_limiter_unittest.cc", + "rate_statistics_unittest.cc", + "rate_tracker_unittest.cc", + "ref_counted_object_unittest.cc", + "sanitizer_unittest.cc", + "string_encode_unittest.cc", + "string_to_number_unittest.cc", + "string_utils_unittest.cc", + "strings/string_builder_unittest.cc", + "strings/string_format_unittest.cc", + "swap_queue_unittest.cc", + "thread_annotations_unittest.cc", + "time_utils_unittest.cc", + "timestamp_aligner_unittest.cc", + "virtual_socket_unittest.cc", + "zero_memory_unittest.cc", + ] + if (is_win) { + sources += [ "win/windows_version_unittest.cc" ] + } + deps = [ + ":async_socket", + ":bounded_inline_vector", + ":checks", + ":criticalsection", + ":divide_round", + ":gunit_helpers", + ":ip_address", + ":null_socket_server", + ":rate_limiter", + ":rtc_base", + ":rtc_base_approved", + ":rtc_base_tests_utils", + ":rtc_numerics", + ":rtc_task_queue", + ":safe_compare", + ":safe_minmax", + ":sanitizer", + ":socket", + ":socket_address", + ":socket_server", + ":stringutils", + ":testclient", + ":threading", + "../api:array_view", + "../api:scoped_refptr", + "../api/numerics", + "../api/units:time_delta", + "../system_wrappers", + "../test:fileutils", + "../test:test_main", + "../test:test_support", + "containers:unittests", + "memory:unittests", + "synchronization:mutex", + "task_utils:to_queued_task", + "third_party/base64", + "third_party/sigslot", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/base:core_headers", + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:optional", + ] + } - sources = [ - "numerics/event_based_exponential_moving_average_unittest.cc", - "numerics/exp_filter_unittest.cc", - "numerics/moving_average_unittest.cc", - "numerics/moving_median_filter_unittest.cc", - "numerics/percentile_filter_unittest.cc", - "numerics/running_statistics_unittest.cc", - "numerics/samples_stats_counter_unittest.cc", - "numerics/sequence_number_util_unittest.cc", - ] - deps = [ - ":rtc_base_approved", - ":rtc_numerics", - "../test:test_main", - "../test:test_support", - "//third_party/abseil-cpp/absl/algorithm:container", - ] - } + rtc_library("rtc_task_queue_unittests") { + testonly = true + + sources = [ "task_queue_unittest.cc" ] + deps = [ + ":gunit_helpers", + ":rtc_base_approved", + ":rtc_base_tests_utils", + ":rtc_task_queue", + ":task_queue_for_test", + "../test:test_main", + "../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + } - rtc_library("rtc_json_unittests") { - testonly = true + rtc_library("weak_ptr_unittests") { + testonly = true + + sources = [ "weak_ptr_unittest.cc" ] + deps = [ + ":gunit_helpers", + ":rtc_base_approved", + ":rtc_base_tests_utils", + ":rtc_event", + ":task_queue_for_test", + ":weak_ptr", + "../test:test_main", + "../test:test_support", + ] + } - sources = [ "strings/json_unittest.cc" ] - deps = [ - ":gunit_helpers", - ":rtc_base_tests_utils", - ":rtc_json", - "../test:test_main", - "../test:test_support", - ] - } + rtc_library("rtc_numerics_unittests") { + testonly = true + + sources = [ + "numerics/event_based_exponential_moving_average_unittest.cc", + "numerics/exp_filter_unittest.cc", + "numerics/moving_average_unittest.cc", + "numerics/moving_median_filter_unittest.cc", + "numerics/percentile_filter_unittest.cc", + "numerics/running_statistics_unittest.cc", + "numerics/sequence_number_util_unittest.cc", + ] + deps = [ + ":rtc_base_approved", + ":rtc_numerics", + "../test:test_main", + "../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] + } - rtc_library("rtc_base_unittests") { - testonly = true - defines = [] + rtc_library("rtc_json_unittests") { + testonly = true - sources = [ - "callback_unittest.cc", - "crc32_unittest.cc", - "data_rate_limiter_unittest.cc", - "fake_clock_unittest.cc", - "helpers_unittest.cc", - "ip_address_unittest.cc", - "memory_usage_unittest.cc", - "message_digest_unittest.cc", - "nat_unittest.cc", - "network_route_unittest.cc", - "network_unittest.cc", - "proxy_unittest.cc", - "rolling_accumulator_unittest.cc", - "rtc_certificate_generator_unittest.cc", - "rtc_certificate_unittest.cc", - "signal_thread_unittest.cc", - "sigslot_tester_unittest.cc", - "test_client_unittest.cc", - "thread_unittest.cc", - "unique_id_generator_unittest.cc", - ] - if (is_win) { - sources += [ - "win32_unittest.cc", - "win32_window_unittest.cc", + sources = [ "strings/json_unittest.cc" ] + deps = [ + ":gunit_helpers", + ":rtc_base_tests_utils", + ":rtc_json", + "../test:test_main", + "../test:test_support", ] } - if (is_posix || is_fuchsia) { - sources += [ - "openssl_adapter_unittest.cc", - "openssl_session_cache_unittest.cc", - "openssl_utility_unittest.cc", - "ssl_adapter_unittest.cc", - "ssl_identity_unittest.cc", - "ssl_stream_adapter_unittest.cc", + + rtc_library("rtc_base_unittests") { + testonly = true + defines = [] + + sources = [ + "crc32_unittest.cc", + "data_rate_limiter_unittest.cc", + "fake_clock_unittest.cc", + "helpers_unittest.cc", + "ip_address_unittest.cc", + "memory_usage_unittest.cc", + "message_digest_unittest.cc", + "nat_unittest.cc", + "network_route_unittest.cc", + "network_unittest.cc", + "proxy_unittest.cc", + "rolling_accumulator_unittest.cc", + "rtc_certificate_generator_unittest.cc", + "rtc_certificate_unittest.cc", + "sigslot_tester_unittest.cc", + "test_client_unittest.cc", + "thread_unittest.cc", + "unique_id_generator_unittest.cc", ] - } - deps = [ - ":checks", - ":gunit_helpers", - ":rtc_base_tests_utils", - ":stringutils", - ":testclient", - "../api:array_view", - "../api/task_queue", - "../api/task_queue:task_queue_test", - "../test:field_trial", - "../test:fileutils", - "../test:rtc_expect_death", - "../test:test_main", - "../test:test_support", - "memory:fifo_buffer", - "synchronization:synchronization_unittests", - "task_utils:to_queued_task", - "third_party/sigslot", - "//third_party/abseil-cpp/absl/algorithm:container", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/strings", - "//third_party/abseil-cpp/absl/types:optional", - ] - public_deps = [ ":rtc_base" ] # no-presubmit-check TODO(webrtc:8603) - if (build_with_chromium) { - include_dirs = [ "../../boringssl/src/include" ] - } - if (rtc_build_ssl) { - deps += [ "//third_party/boringssl" ] - } else { - configs += [ ":external_ssl_library" ] + deps = [ + ":async_socket", + ":checks", + ":gunit_helpers", + ":ip_address", + ":net_helpers", + ":null_socket_server", + ":rtc_base_tests_utils", + ":socket_address", + ":socket_factory", + ":socket_server", + ":stringutils", + ":testclient", + ":threading", + "../api:array_view", + "../api/task_queue", + "../api/task_queue:task_queue_test", + "../test:field_trial", + "../test:fileutils", + "../test:rtc_expect_death", + "../test:test_main", + "../test:test_support", + "memory:fifo_buffer", + "synchronization:mutex", + "task_utils:pending_task_safety_flag", + "task_utils:to_queued_task", + "third_party/sigslot", + ] + if (enable_google_benchmarks) { + deps += [ "synchronization:synchronization_unittests" ] + } + if (is_win) { + sources += [ + "win32_unittest.cc", + "win32_window_unittest.cc", + ] + deps += [ ":win32" ] + } + if (is_posix || is_fuchsia) { + sources += [ + "openssl_adapter_unittest.cc", + "openssl_session_cache_unittest.cc", + "openssl_utility_unittest.cc", + "ssl_adapter_unittest.cc", + "ssl_identity_unittest.cc", + "ssl_stream_adapter_unittest.cc", + ] + } + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] + public_deps = [ ":rtc_base" ] # no-presubmit-check TODO(webrtc:8603) + if (build_with_chromium) { + include_dirs = [ "../../boringssl/src/include" ] + } + if (rtc_build_ssl) { + deps += [ "//third_party/boringssl" ] + } else { + configs += [ ":external_ssl_library" ] + } } } } @@ -1388,4 +1584,8 @@ if (is_android) { "//third_party/android_deps:com_android_support_support_annotations_java", ] } + java_cpp_enum("network_monitor_enums") { + sources = [ "network_monitor.h" ] + visibility = [ "*" ] + } } diff --git a/rtc_base/DEPS b/rtc_base/DEPS index 679d06dfc8..3fdc4bc10e 100644 --- a/rtc_base/DEPS +++ b/rtc_base/DEPS @@ -1,8 +1,8 @@ include_rules = [ "+base/third_party/libevent", "+json", - "+third_party/jsoncpp", "+system_wrappers", + "+third_party/jsoncpp", ] specific_include_rules = { @@ -12,4 +12,7 @@ specific_include_rules = { "gunit\.h": [ "+testing/base/public/gunit.h" ], + "logging\.cc": [ + "+absl/synchronization" + ], } diff --git a/rtc_base/OWNERS b/rtc_base/OWNERS index 107bbcd812..ce7968ca72 100644 --- a/rtc_base/OWNERS +++ b/rtc_base/OWNERS @@ -1,10 +1,8 @@ hta@webrtc.org juberti@webrtc.org -kwiberg@webrtc.org mflodman@webrtc.org -qingsi@webrtc.org -sergeyu@chromium.org tommi@webrtc.org +mbonadei@webrtc.org per-file rate_statistics*=sprang@webrtc.org per-file rate_statistics*=stefan@webrtc.org diff --git a/rtc_base/async_invoker.cc b/rtc_base/async_invoker.cc index 26f8c523ab..87d039373d 100644 --- a/rtc_base/async_invoker.cc +++ b/rtc_base/async_invoker.cc @@ -15,12 +15,12 @@ namespace rtc { -AsyncInvoker::AsyncInvoker() +DEPRECATED_AsyncInvoker::DEPRECATED_AsyncInvoker() : pending_invocations_(0), - invocation_complete_(new RefCountedObject()), + invocation_complete_(make_ref_counted()), destroying_(false) {} -AsyncInvoker::~AsyncInvoker() { +DEPRECATED_AsyncInvoker::~DEPRECATED_AsyncInvoker() { destroying_.store(true, std::memory_order_relaxed); // Messages for this need to be cleared *before* our destructor is complete. ThreadManager::Clear(this); @@ -37,7 +37,7 @@ AsyncInvoker::~AsyncInvoker() { } } -void AsyncInvoker::OnMessage(Message* msg) { +void DEPRECATED_AsyncInvoker::OnMessage(Message* msg) { // Get the AsyncClosure shared ptr from this message's data. ScopedMessageData* data = static_cast*>(msg->pdata); @@ -46,7 +46,8 @@ void AsyncInvoker::OnMessage(Message* msg) { delete data; } -void AsyncInvoker::Flush(Thread* thread, uint32_t id /*= MQID_ANY*/) { +void DEPRECATED_AsyncInvoker::Flush(Thread* thread, + uint32_t id /*= MQID_ANY*/) { // If the destructor is waiting for invocations to finish, don't start // running even more tasks. if (destroying_.load(std::memory_order_relaxed)) @@ -55,7 +56,7 @@ void AsyncInvoker::Flush(Thread* thread, uint32_t id /*= MQID_ANY*/) { // Run this on |thread| to reduce the number of context switches. if (Thread::Current() != thread) { thread->Invoke(RTC_FROM_HERE, - Bind(&AsyncInvoker::Flush, this, thread, id)); + [this, thread, id] { Flush(thread, id); }); return; } @@ -67,14 +68,14 @@ void AsyncInvoker::Flush(Thread* thread, uint32_t id /*= MQID_ANY*/) { } } -void AsyncInvoker::Clear() { +void DEPRECATED_AsyncInvoker::Clear() { ThreadManager::Clear(this); } -void AsyncInvoker::DoInvoke(const Location& posted_from, - Thread* thread, - std::unique_ptr closure, - uint32_t id) { +void DEPRECATED_AsyncInvoker::DoInvoke(const Location& posted_from, + Thread* thread, + std::unique_ptr closure, + uint32_t id) { if (destroying_.load(std::memory_order_relaxed)) { // Note that this may be expected, if the application is AsyncInvoking // tasks that AsyncInvoke other tasks. But otherwise it indicates a race @@ -87,11 +88,12 @@ void AsyncInvoker::DoInvoke(const Location& posted_from, new ScopedMessageData(std::move(closure))); } -void AsyncInvoker::DoInvokeDelayed(const Location& posted_from, - Thread* thread, - std::unique_ptr closure, - uint32_t delay_ms, - uint32_t id) { +void DEPRECATED_AsyncInvoker::DoInvokeDelayed( + const Location& posted_from, + Thread* thread, + std::unique_ptr closure, + uint32_t delay_ms, + uint32_t id) { if (destroying_.load(std::memory_order_relaxed)) { // See above comment. RTC_LOG(LS_WARNING) << "Tried to invoke while destroying the invoker."; @@ -101,29 +103,7 @@ void AsyncInvoker::DoInvokeDelayed(const Location& posted_from, new ScopedMessageData(std::move(closure))); } -GuardedAsyncInvoker::GuardedAsyncInvoker() : thread_(Thread::Current()) { - thread_->SignalQueueDestroyed.connect(this, - &GuardedAsyncInvoker::ThreadDestroyed); -} - -GuardedAsyncInvoker::~GuardedAsyncInvoker() {} - -bool GuardedAsyncInvoker::Flush(uint32_t id) { - CritScope cs(&crit_); - if (thread_ == nullptr) - return false; - invoker_.Flush(thread_, id); - return true; -} - -void GuardedAsyncInvoker::ThreadDestroyed() { - CritScope cs(&crit_); - // We should never get more than one notification about the thread dying. - RTC_DCHECK(thread_ != nullptr); - thread_ = nullptr; -} - -AsyncClosure::AsyncClosure(AsyncInvoker* invoker) +AsyncClosure::AsyncClosure(DEPRECATED_AsyncInvoker* invoker) : invoker_(invoker), invocation_complete_(invoker_->invocation_complete_) { invoker_->pending_invocations_.fetch_add(1, std::memory_order_relaxed); } diff --git a/rtc_base/async_invoker.h b/rtc_base/async_invoker.h index f15955d811..fd42ca76de 100644 --- a/rtc_base/async_invoker.h +++ b/rtc_base/async_invoker.h @@ -15,9 +15,9 @@ #include #include +#include "absl/base/attributes.h" #include "api/scoped_refptr.h" #include "rtc_base/async_invoker_inl.h" -#include "rtc_base/bind.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/event.h" #include "rtc_base/ref_counted_object.h" @@ -87,10 +87,10 @@ namespace rtc { // destruction. This can be done by starting each chain of invocations on the // same thread on which it will be destroyed, or by using some other // synchronization method. -class AsyncInvoker : public MessageHandler { +class DEPRECATED_AsyncInvoker : public MessageHandlerAutoCleanup { public: - AsyncInvoker(); - ~AsyncInvoker() override; + DEPRECATED_AsyncInvoker(); + ~DEPRECATED_AsyncInvoker() override; // Call |functor| asynchronously on |thread|, with no callback upon // completion. Returns immediately. @@ -157,7 +157,7 @@ class AsyncInvoker : public MessageHandler { // an AsyncClosure's destructor that's about to call // "invocation_complete_->Set()", it's not dereferenced after being // destroyed. - scoped_refptr> invocation_complete_; + rtc::Ref::Ptr invocation_complete_; // This flag is used to ensure that if an application AsyncInvokes tasks that // recursively AsyncInvoke other tasks ad infinitum, the cycle eventually @@ -166,99 +166,11 @@ class AsyncInvoker : public MessageHandler { friend class AsyncClosure; - RTC_DISALLOW_COPY_AND_ASSIGN(AsyncInvoker); + RTC_DISALLOW_COPY_AND_ASSIGN(DEPRECATED_AsyncInvoker); }; -// Similar to AsyncInvoker, but guards against the Thread being destroyed while -// there are outstanding dangling pointers to it. It will connect to the current -// thread in the constructor, and will get notified when that thread is -// destroyed. After GuardedAsyncInvoker is constructed, it can be used from -// other threads to post functors to the thread it was constructed on. If that -// thread dies, any further calls to AsyncInvoke() will be safely ignored. -class GuardedAsyncInvoker : public sigslot::has_slots<> { - public: - GuardedAsyncInvoker(); - ~GuardedAsyncInvoker() override; - - // Synchronously execute all outstanding calls we own, and wait for calls to - // complete before returning. Optionally filter by message id. The destructor - // will not wait for outstanding calls, so if that behavior is desired, call - // Flush() first. Returns false if the thread has died. - bool Flush(uint32_t id = MQID_ANY); - - // Call |functor| asynchronously with no callback upon completion. Returns - // immediately. Returns false if the thread has died. - template - bool AsyncInvoke(const Location& posted_from, - FunctorT&& functor, - uint32_t id = 0) { - CritScope cs(&crit_); - if (thread_ == nullptr) - return false; - invoker_.AsyncInvoke( - posted_from, thread_, std::forward(functor), id); - return true; - } - - // Call |functor| asynchronously with |delay_ms|, with no callback upon - // completion. Returns immediately. Returns false if the thread has died. - template - bool AsyncInvokeDelayed(const Location& posted_from, - FunctorT&& functor, - uint32_t delay_ms, - uint32_t id = 0) { - CritScope cs(&crit_); - if (thread_ == nullptr) - return false; - invoker_.AsyncInvokeDelayed( - posted_from, thread_, std::forward(functor), delay_ms, id); - return true; - } - - // Call |functor| asynchronously, calling |callback| when done. Returns false - // if the thread has died. - template - bool AsyncInvoke(const Location& posted_from, - const Location& callback_posted_from, - FunctorT&& functor, - void (HostT::*callback)(ReturnT), - HostT* callback_host, - uint32_t id = 0) { - CritScope cs(&crit_); - if (thread_ == nullptr) - return false; - invoker_.AsyncInvoke( - posted_from, callback_posted_from, thread_, - std::forward(functor), callback, callback_host, id); - return true; - } - - // Call |functor| asynchronously calling |callback| when done. Overloaded for - // void return. Returns false if the thread has died. - template - bool AsyncInvoke(const Location& posted_from, - const Location& callback_posted_from, - FunctorT&& functor, - void (HostT::*callback)(), - HostT* callback_host, - uint32_t id = 0) { - CritScope cs(&crit_); - if (thread_ == nullptr) - return false; - invoker_.AsyncInvoke( - posted_from, callback_posted_from, thread_, - std::forward(functor), callback, callback_host, id); - return true; - } - - private: - // Callback when |thread_| is destroyed. - void ThreadDestroyed(); - - CriticalSection crit_; - Thread* thread_ RTC_GUARDED_BY(crit_); - AsyncInvoker invoker_ RTC_GUARDED_BY(crit_); -}; +using AsyncInvoker ABSL_DEPRECATED("bugs.webrtc.org/12339") = + DEPRECATED_AsyncInvoker; } // namespace rtc diff --git a/rtc_base/async_invoker_inl.h b/rtc_base/async_invoker_inl.h index bd9b0d1aa1..9fb328782c 100644 --- a/rtc_base/async_invoker_inl.h +++ b/rtc_base/async_invoker_inl.h @@ -12,8 +12,6 @@ #define RTC_BASE_ASYNC_INVOKER_INL_H_ #include "api/scoped_refptr.h" -#include "rtc_base/bind.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/message_handler.h" #include "rtc_base/ref_counted_object.h" @@ -23,32 +21,33 @@ namespace rtc { -class AsyncInvoker; +class DEPRECATED_AsyncInvoker; // Helper class for AsyncInvoker. Runs a task and triggers a callback // on the calling thread if necessary. class AsyncClosure { public: - explicit AsyncClosure(AsyncInvoker* invoker); + explicit AsyncClosure(DEPRECATED_AsyncInvoker* invoker); virtual ~AsyncClosure(); // Runs the asynchronous task, and triggers a callback to the calling // thread if needed. Should be called from the target thread. virtual void Execute() = 0; protected: - AsyncInvoker* invoker_; + DEPRECATED_AsyncInvoker* invoker_; // Reference counted so that if the AsyncInvoker destructor finishes before // an AsyncClosure's destructor that's about to call // "invocation_complete_->Set()", it's not dereferenced after being // destroyed. - scoped_refptr> invocation_complete_; + rtc::Ref::Ptr invocation_complete_; }; // Simple closure that doesn't trigger a callback for the calling thread. template class FireAndForgetAsyncClosure : public AsyncClosure { public: - explicit FireAndForgetAsyncClosure(AsyncInvoker* invoker, FunctorT&& functor) + explicit FireAndForgetAsyncClosure(DEPRECATED_AsyncInvoker* invoker, + FunctorT&& functor) : AsyncClosure(invoker), functor_(std::forward(functor)) {} virtual void Execute() { functor_(); } diff --git a/rtc_base/async_resolver.cc b/rtc_base/async_resolver.cc new file mode 100644 index 0000000000..d482b4e681 --- /dev/null +++ b/rtc_base/async_resolver.cc @@ -0,0 +1,206 @@ +/* + * Copyright 2008 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/async_resolver.h" + +#include +#include +#include + +#include "api/ref_counted_base.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" + +#if defined(WEBRTC_WIN) +#include +#include + +#include "rtc_base/win32.h" +#endif +#if defined(WEBRTC_POSIX) && !defined(__native_client__) +#if defined(WEBRTC_ANDROID) +#include "rtc_base/ifaddrs_android.h" +#else +#include +#endif +#endif // defined(WEBRTC_POSIX) && !defined(__native_client__) + +#include "api/task_queue/task_queue_base.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/logging.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/third_party/sigslot/sigslot.h" // for signal_with_thread... + +namespace rtc { + +int ResolveHostname(const std::string& hostname, + int family, + std::vector* addresses) { +#ifdef __native_client__ + RTC_NOTREACHED(); + RTC_LOG(LS_WARNING) << "ResolveHostname() is not implemented for NaCl"; + return -1; +#else // __native_client__ + if (!addresses) { + return -1; + } + addresses->clear(); + struct addrinfo* result = nullptr; + struct addrinfo hints = {0}; + hints.ai_family = family; + // |family| here will almost always be AF_UNSPEC, because |family| comes from + // AsyncResolver::addr_.family(), which comes from a SocketAddress constructed + // with a hostname. When a SocketAddress is constructed with a hostname, its + // family is AF_UNSPEC. However, if someday in the future we construct + // a SocketAddress with both a hostname and a family other than AF_UNSPEC, + // then it would be possible to get a specific family value here. + + // The behavior of AF_UNSPEC is roughly "get both ipv4 and ipv6", as + // documented by the various operating systems: + // Linux: http://man7.org/linux/man-pages/man3/getaddrinfo.3.html + // Windows: https://msdn.microsoft.com/en-us/library/windows/desktop/ + // ms738520(v=vs.85).aspx + // Mac: https://developer.apple.com/legacy/library/documentation/Darwin/ + // Reference/ManPages/man3/getaddrinfo.3.html + // Android (source code, not documentation): + // https://android.googlesource.com/platform/bionic/+/ + // 7e0bfb511e85834d7c6cb9631206b62f82701d60/libc/netbsd/net/getaddrinfo.c#1657 + hints.ai_flags = AI_ADDRCONFIG; + int ret = getaddrinfo(hostname.c_str(), nullptr, &hints, &result); + if (ret != 0) { + return ret; + } + struct addrinfo* cursor = result; + for (; cursor; cursor = cursor->ai_next) { + if (family == AF_UNSPEC || cursor->ai_family == family) { + IPAddress ip; + if (IPFromAddrInfo(cursor, &ip)) { + addresses->push_back(ip); + } + } + } + freeaddrinfo(result); + return 0; +#endif // !__native_client__ +} + +struct AsyncResolver::State : public RefCountedBase { + webrtc::Mutex mutex; + enum class Status { + kLive, + kDead + } status RTC_GUARDED_BY(mutex) = Status::kLive; +}; + +AsyncResolver::AsyncResolver() : error_(-1), state_(new State) {} + +AsyncResolver::~AsyncResolver() { + RTC_DCHECK_RUN_ON(&sequence_checker_); + + // Ensure the thread isn't using a stale reference to the current task queue, + // or calling into ResolveDone post destruction. + webrtc::MutexLock lock(&state_->mutex); + state_->status = State::Status::kDead; +} + +void RunResolution(void* obj) { + std::function* function_ptr = + static_cast*>(obj); + (*function_ptr)(); + delete function_ptr; +} + +void AsyncResolver::Start(const SocketAddress& addr) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!destroy_called_); + addr_ = addr; + PlatformThread::SpawnDetached( + [this, addr, caller_task_queue = webrtc::TaskQueueBase::Current(), + state = state_] { + std::vector addresses; + int error = + ResolveHostname(addr.hostname().c_str(), addr.family(), &addresses); + webrtc::MutexLock lock(&state->mutex); + if (state->status == State::Status::kLive) { + caller_task_queue->PostTask(webrtc::ToQueuedTask( + [this, error, addresses = std::move(addresses), state] { + bool live; + { + // ResolveDone can lead to instance destruction, so make sure + // we don't deadlock. + webrtc::MutexLock lock(&state->mutex); + live = state->status == State::Status::kLive; + } + if (live) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + ResolveDone(std::move(addresses), error); + } + })); + } + }, + "AsyncResolver"); +} + +bool AsyncResolver::GetResolvedAddress(int family, SocketAddress* addr) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!destroy_called_); + if (error_ != 0 || addresses_.empty()) + return false; + + *addr = addr_; + for (size_t i = 0; i < addresses_.size(); ++i) { + if (family == addresses_[i].family()) { + addr->SetResolvedIP(addresses_[i]); + return true; + } + } + return false; +} + +int AsyncResolver::GetError() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!destroy_called_); + return error_; +} + +void AsyncResolver::Destroy(bool wait) { + // Some callers have trouble guaranteeing that Destroy is called on the + // sequence guarded by |sequence_checker_|. + // RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!destroy_called_); + destroy_called_ = true; + MaybeSelfDestruct(); +} + +const std::vector& AsyncResolver::addresses() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_DCHECK(!destroy_called_); + return addresses_; +} + +void AsyncResolver::ResolveDone(std::vector addresses, int error) { + addresses_ = addresses; + error_ = error; + recursion_check_ = true; + SignalDone(this); + MaybeSelfDestruct(); +} + +void AsyncResolver::MaybeSelfDestruct() { + if (!recursion_check_) { + delete this; + } else { + recursion_check_ = false; + } +} + +} // namespace rtc diff --git a/rtc_base/async_resolver.h b/rtc_base/async_resolver.h new file mode 100644 index 0000000000..0c053eed81 --- /dev/null +++ b/rtc_base/async_resolver.h @@ -0,0 +1,75 @@ +/* + * Copyright 2008 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_ASYNC_RESOLVER_H_ +#define RTC_BASE_ASYNC_RESOLVER_H_ + +#if defined(WEBRTC_POSIX) +#include +#elif WEBRTC_WIN +#include // NOLINT +#endif + +#include + +#include "api/sequence_checker.h" +#include "rtc_base/async_resolver_interface.h" +#include "rtc_base/event.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/socket_address.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/system/rtc_export.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" + +namespace rtc { + +// AsyncResolver will perform async DNS resolution, signaling the result on +// the SignalDone from AsyncResolverInterface when the operation completes. +// +// This class is thread-compatible, and all methods and destruction needs to +// happen from the same rtc::Thread, except for Destroy which is allowed to +// happen on another context provided it's not happening concurrently to another +// public API call, and is the last access to the object. +class RTC_EXPORT AsyncResolver : public AsyncResolverInterface { + public: + AsyncResolver(); + ~AsyncResolver() override; + + void Start(const SocketAddress& addr) override; + bool GetResolvedAddress(int family, SocketAddress* addr) const override; + int GetError() const override; + void Destroy(bool wait) override; + + const std::vector& addresses() const; + + private: + // Fwd decl. + struct State; + + void ResolveDone(std::vector addresses, int error) + RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_); + void MaybeSelfDestruct(); + + SocketAddress addr_ RTC_GUARDED_BY(sequence_checker_); + std::vector addresses_ RTC_GUARDED_BY(sequence_checker_); + int error_ RTC_GUARDED_BY(sequence_checker_); + bool recursion_check_ = + false; // Protects against SignalDone calling into Destroy. + bool destroy_called_ = false; + scoped_refptr state_; + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker sequence_checker_; +}; + +} // namespace rtc + +#endif // RTC_BASE_ASYNC_RESOLVER_H_ diff --git a/rtc_base/bind.h b/rtc_base/bind.h deleted file mode 100644 index b61d189f7a..0000000000 --- a/rtc_base/bind.h +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2012 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// Bind() is an overloaded function that converts method calls into function -// objects (aka functors). The method object is captured as a scoped_refptr<> if -// possible, and as a raw pointer otherwise. Any arguments to the method are -// captured by value. The return value of Bind is a stateful, nullary function -// object. Care should be taken about the lifetime of objects captured by -// Bind(); the returned functor knows nothing about the lifetime of a non -// ref-counted method object or any arguments passed by pointer, and calling the -// functor with a destroyed object will surely do bad things. -// -// To prevent the method object from being captured as a scoped_refptr<>, you -// can use Unretained. But this should only be done when absolutely necessary, -// and when the caller knows the extra reference isn't needed. -// -// Example usage: -// struct Foo { -// int Test1() { return 42; } -// int Test2() const { return 52; } -// int Test3(int x) { return x*x; } -// float Test4(int x, float y) { return x + y; } -// }; -// -// int main() { -// Foo foo; -// cout << rtc::Bind(&Foo::Test1, &foo)() << endl; -// cout << rtc::Bind(&Foo::Test2, &foo)() << endl; -// cout << rtc::Bind(&Foo::Test3, &foo, 3)() << endl; -// cout << rtc::Bind(&Foo::Test4, &foo, 7, 8.5f)() << endl; -// } -// -// Example usage of ref counted objects: -// struct Bar { -// int AddRef(); -// int Release(); -// -// void Test() {} -// void BindThis() { -// // The functor passed to AsyncInvoke() will keep this object alive. -// invoker.AsyncInvoke(RTC_FROM_HERE,rtc::Bind(&Bar::Test, this)); -// } -// }; -// -// int main() { -// rtc::scoped_refptr bar = new rtc::RefCountedObject(); -// auto functor = rtc::Bind(&Bar::Test, bar); -// bar = nullptr; -// // The functor stores an internal scoped_refptr, so this is safe. -// functor(); -// } -// - -#ifndef RTC_BASE_BIND_H_ -#define RTC_BASE_BIND_H_ - -#include -#include - -#include "api/scoped_refptr.h" - -#define NONAME - -namespace rtc { -namespace detail { -// This is needed because the template parameters in Bind can't be resolved -// if they're used both as parameters of the function pointer type and as -// parameters to Bind itself: the function pointer parameters are exact -// matches to the function prototype, but the parameters to bind have -// references stripped. This trick allows the compiler to dictate the Bind -// parameter types rather than deduce them. -template -struct identity { - typedef T type; -}; - -// IsRefCounted::value will be true for types that can be used in -// rtc::scoped_refptr, i.e. types that implements nullary functions AddRef() -// and Release(), regardless of their return types. AddRef() and Release() can -// be defined in T or any superclass of T. -template -class IsRefCounted { - // This is a complex implementation detail done with SFINAE. - - // Define types such that sizeof(Yes) != sizeof(No). - struct Yes { - char dummy[1]; - }; - struct No { - char dummy[2]; - }; - // Define two overloaded template functions with return types of different - // size. This way, we can use sizeof() on the return type to determine which - // function the compiler would have chosen. One function will be preferred - // over the other if it is possible to create it without compiler errors, - // otherwise the compiler will simply remove it, and default to the less - // preferred function. - template - static Yes test(R* r, decltype(r->AddRef(), r->Release(), 42)); - template - static No test(...); - - public: - // Trick the compiler to tell if it's possible to call AddRef() and Release(). - static const bool value = sizeof(test((T*)nullptr, 42)) == sizeof(Yes); -}; - -// TernaryTypeOperator is a helper class to select a type based on a static bool -// value. -template -struct TernaryTypeOperator {}; - -template -struct TernaryTypeOperator { - typedef IfTrueT type; -}; - -template -struct TernaryTypeOperator { - typedef IfFalseT type; -}; - -// PointerType::type will be scoped_refptr for ref counted types, and T* -// otherwise. -template -struct PointerType { - typedef typename TernaryTypeOperator::value, - scoped_refptr, - T*>::type type; -}; - -template -class UnretainedWrapper { - public: - explicit UnretainedWrapper(T* o) : ptr_(o) {} - T* get() const { return ptr_; } - - private: - T* ptr_; -}; - -} // namespace detail - -template -static inline detail::UnretainedWrapper Unretained(T* o) { - return detail::UnretainedWrapper(o); -} - -template -class MethodFunctor { - public: - MethodFunctor(MethodT method, ObjectT* object, Args... args) - : method_(method), object_(object), args_(args...) {} - R operator()() const { - return CallMethod(std::index_sequence_for()); - } - - private: - template - R CallMethod(std::index_sequence) const { - return (object_->*method_)(std::get(args_)...); - } - - MethodT method_; - typename detail::PointerType::type object_; - typename std::tuple::type...> args_; -}; - -template -class UnretainedMethodFunctor { - public: - UnretainedMethodFunctor(MethodT method, - detail::UnretainedWrapper object, - Args... args) - : method_(method), object_(object.get()), args_(args...) {} - R operator()() const { - return CallMethod(std::index_sequence_for()); - } - - private: - template - R CallMethod(std::index_sequence) const { - return (object_->*method_)(std::get(args_)...); - } - - MethodT method_; - ObjectT* object_; - typename std::tuple::type...> args_; -}; - -template -class Functor { - public: - Functor(const FunctorT& functor, Args... args) - : functor_(functor), args_(args...) {} - R operator()() const { - return CallFunction(std::index_sequence_for()); - } - - private: - template - R CallFunction(std::index_sequence) const { - return functor_(std::get(args_)...); - } - - FunctorT functor_; - typename std::tuple::type...> args_; -}; - -#define FP_T(x) R (ObjectT::*x)(Args...) - -template -MethodFunctor Bind( - FP_T(method), - ObjectT* object, - typename detail::identity::type... args) { - return MethodFunctor(method, object, - args...); -} - -template -MethodFunctor Bind( - FP_T(method), - const scoped_refptr& object, - typename detail::identity::type... args) { - return MethodFunctor(method, object.get(), - args...); -} - -template -UnretainedMethodFunctor Bind( - FP_T(method), - detail::UnretainedWrapper object, - typename detail::identity::type... args) { - return UnretainedMethodFunctor( - method, object, args...); -} - -#undef FP_T -#define FP_T(x) R (ObjectT::*x)(Args...) const - -template -MethodFunctor Bind( - FP_T(method), - const ObjectT* object, - typename detail::identity::type... args) { - return MethodFunctor(method, object, - args...); -} -template -UnretainedMethodFunctor Bind( - FP_T(method), - detail::UnretainedWrapper object, - typename detail::identity::type... args) { - return UnretainedMethodFunctor( - method, object, args...); -} - -#undef FP_T -#define FP_T(x) R (*x)(Args...) - -template -Functor Bind( - FP_T(function), - typename detail::identity::type... args) { - return Functor(function, args...); -} - -#undef FP_T - -} // namespace rtc - -#undef NONAME - -#endif // RTC_BASE_BIND_H_ diff --git a/rtc_base/bind_unittest.cc b/rtc_base/bind_unittest.cc deleted file mode 100644 index 664cb54500..0000000000 --- a/rtc_base/bind_unittest.cc +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright 2004 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/bind.h" - -#include - -#include "rtc_base/ref_count.h" -#include "rtc_base/ref_counted_object.h" -#include "test/gtest.h" - -namespace rtc { - -namespace { - -struct LifeTimeCheck; - -struct MethodBindTester { - void NullaryVoid() { ++call_count; } - int NullaryInt() { - ++call_count; - return 1; - } - int NullaryConst() const { - ++call_count; - return 2; - } - void UnaryVoid(int dummy) { ++call_count; } - template - T Identity(T value) { - ++call_count; - return value; - } - int UnaryByPointer(int* value) const { - ++call_count; - return ++(*value); - } - int UnaryByRef(const int& value) const { - ++call_count; - return ++const_cast(value); - } - int Multiply(int a, int b) const { - ++call_count; - return a * b; - } - void RefArgument(const scoped_refptr& object) { - EXPECT_TRUE(object.get() != nullptr); - } - - mutable int call_count; -}; - -struct A { - int dummy; -}; -struct B : public RefCountInterface { - int dummy; -}; -struct C : public A, B {}; -struct D { - int AddRef(); -}; -struct E : public D { - int Release(); -}; -struct F { - void AddRef(); - void Release(); -}; - -struct LifeTimeCheck { - LifeTimeCheck() : ref_count_(0) {} - void AddRef() { ++ref_count_; } - void Release() { --ref_count_; } - void NullaryVoid() {} - int ref_count_; -}; - -int Return42() { - return 42; -} -int Negate(int a) { - return -a; -} -int Multiply(int a, int b) { - return a * b; -} - -} // namespace - -// Try to catch any problem with scoped_refptr type deduction in rtc::Bind at -// compile time. -#define EXPECT_IS_CAPTURED_AS_PTR(T) \ - static_assert(std::is_same::type, T*>::value, \ - "PointerType") -#define EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(T) \ - static_assert( \ - std::is_same::type, scoped_refptr>::value, \ - "PointerType") - -EXPECT_IS_CAPTURED_AS_PTR(void); -EXPECT_IS_CAPTURED_AS_PTR(int); -EXPECT_IS_CAPTURED_AS_PTR(double); -EXPECT_IS_CAPTURED_AS_PTR(A); -EXPECT_IS_CAPTURED_AS_PTR(D); -EXPECT_IS_CAPTURED_AS_PTR(RefCountInterface*); -EXPECT_IS_CAPTURED_AS_PTR( - decltype(Unretained>)); - -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountInterface); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(B); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(C); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(E); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(F); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountedObject); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountedObject); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(RefCountedObject); -EXPECT_IS_CAPTURED_AS_SCOPED_REFPTR(const RefCountedObject); - -TEST(BindTest, BindToMethod) { - MethodBindTester object = {0}; - EXPECT_EQ(0, object.call_count); - Bind(&MethodBindTester::NullaryVoid, &object)(); - EXPECT_EQ(1, object.call_count); - EXPECT_EQ(1, Bind(&MethodBindTester::NullaryInt, &object)()); - EXPECT_EQ(2, object.call_count); - EXPECT_EQ(2, Bind(&MethodBindTester::NullaryConst, - static_cast(&object))()); - EXPECT_EQ(3, object.call_count); - Bind(&MethodBindTester::UnaryVoid, &object, 5)(); - EXPECT_EQ(4, object.call_count); - EXPECT_EQ(100, Bind(&MethodBindTester::Identity, &object, 100)()); - EXPECT_EQ(5, object.call_count); - const std::string string_value("test string"); - EXPECT_EQ(string_value, Bind(&MethodBindTester::Identity, - &object, string_value)()); - EXPECT_EQ(6, object.call_count); - int value = 11; - // Bind binds by value, even if the method signature is by reference, so - // "reference" binds require pointers. - EXPECT_EQ(12, Bind(&MethodBindTester::UnaryByPointer, &object, &value)()); - EXPECT_EQ(12, value); - EXPECT_EQ(7, object.call_count); - // It's possible to bind to a function that takes a const reference, though - // the capture will be a copy. See UnaryByRef hackery above where it removes - // the const to make sure the underlying storage is, in fact, a copy. - EXPECT_EQ(13, Bind(&MethodBindTester::UnaryByRef, &object, value)()); - // But the original value is unmodified. - EXPECT_EQ(12, value); - EXPECT_EQ(8, object.call_count); - EXPECT_EQ(56, Bind(&MethodBindTester::Multiply, &object, 7, 8)()); - EXPECT_EQ(9, object.call_count); -} - -TEST(BindTest, BindToFunction) { - EXPECT_EQ(42, Bind(&Return42)()); - EXPECT_EQ(3, Bind(&Negate, -3)()); - EXPECT_EQ(56, Bind(&Multiply, 8, 7)()); -} - -// Test Bind where method object implements RefCountInterface and is passed as a -// pointer. -TEST(BindTest, CapturePointerAsScopedRefPtr) { - LifeTimeCheck object; - EXPECT_EQ(object.ref_count_, 0); - scoped_refptr scoped_object(&object); - EXPECT_EQ(object.ref_count_, 1); - { - auto functor = Bind(&LifeTimeCheck::NullaryVoid, &object); - EXPECT_EQ(object.ref_count_, 2); - scoped_object = nullptr; - EXPECT_EQ(object.ref_count_, 1); - } - EXPECT_EQ(object.ref_count_, 0); -} - -// Test Bind where method object implements RefCountInterface and is passed as a -// scoped_refptr<>. -TEST(BindTest, CaptureScopedRefPtrAsScopedRefPtr) { - LifeTimeCheck object; - EXPECT_EQ(object.ref_count_, 0); - scoped_refptr scoped_object(&object); - EXPECT_EQ(object.ref_count_, 1); - { - auto functor = Bind(&LifeTimeCheck::NullaryVoid, scoped_object); - EXPECT_EQ(object.ref_count_, 2); - scoped_object = nullptr; - EXPECT_EQ(object.ref_count_, 1); - } - EXPECT_EQ(object.ref_count_, 0); -} - -// Test Bind where method object is captured as scoped_refptr<> and the functor -// dies while there are references left. -TEST(BindTest, FunctorReleasesObjectOnDestruction) { - LifeTimeCheck object; - EXPECT_EQ(object.ref_count_, 0); - scoped_refptr scoped_object(&object); - EXPECT_EQ(object.ref_count_, 1); - Bind(&LifeTimeCheck::NullaryVoid, &object)(); - EXPECT_EQ(object.ref_count_, 1); - scoped_object = nullptr; - EXPECT_EQ(object.ref_count_, 0); -} - -// Test Bind with scoped_refptr<> argument. -TEST(BindTest, ScopedRefPointerArgument) { - LifeTimeCheck object; - EXPECT_EQ(object.ref_count_, 0); - scoped_refptr scoped_object(&object); - EXPECT_EQ(object.ref_count_, 1); - { - MethodBindTester bind_tester; - auto functor = - Bind(&MethodBindTester::RefArgument, &bind_tester, scoped_object); - EXPECT_EQ(object.ref_count_, 2); - } - EXPECT_EQ(object.ref_count_, 1); - scoped_object = nullptr; - EXPECT_EQ(object.ref_count_, 0); -} - -namespace { - -const int* Ref(const int& a) { - return &a; -} - -} // anonymous namespace - -// Test Bind with non-scoped_refptr<> reference argument, which should be -// modified to a non-reference capture. -TEST(BindTest, RefArgument) { - const int x = 42; - EXPECT_EQ(&x, Ref(x)); - // Bind() should make a copy of |x|, i.e. the pointers should be different. - auto functor = Bind(&Ref, x); - EXPECT_NE(&x, functor()); -} - -} // namespace rtc diff --git a/rtc_base/bit_buffer.cc b/rtc_base/bit_buffer.cc index a6dc1c7ab8..d212ef5637 100644 --- a/rtc_base/bit_buffer.cc +++ b/rtc_base/bit_buffer.cc @@ -83,36 +83,36 @@ uint64_t BitBuffer::RemainingBitCount() const { return (static_cast(byte_count_) - byte_offset_) * 8 - bit_offset_; } -bool BitBuffer::ReadUInt8(uint8_t* val) { +bool BitBuffer::ReadUInt8(uint8_t& val) { uint32_t bit_val; - if (!ReadBits(&bit_val, sizeof(uint8_t) * 8)) { + if (!ReadBits(sizeof(uint8_t) * 8, bit_val)) { return false; } RTC_DCHECK(bit_val <= std::numeric_limits::max()); - *val = static_cast(bit_val); + val = static_cast(bit_val); return true; } -bool BitBuffer::ReadUInt16(uint16_t* val) { +bool BitBuffer::ReadUInt16(uint16_t& val) { uint32_t bit_val; - if (!ReadBits(&bit_val, sizeof(uint16_t) * 8)) { + if (!ReadBits(sizeof(uint16_t) * 8, bit_val)) { return false; } RTC_DCHECK(bit_val <= std::numeric_limits::max()); - *val = static_cast(bit_val); + val = static_cast(bit_val); return true; } -bool BitBuffer::ReadUInt32(uint32_t* val) { - return ReadBits(val, sizeof(uint32_t) * 8); +bool BitBuffer::ReadUInt32(uint32_t& val) { + return ReadBits(sizeof(uint32_t) * 8, val); } -bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) { +bool BitBuffer::PeekBits(size_t bit_count, uint32_t& val) { // TODO(nisse): Could allow bit_count == 0 and always return success. But // current code reads one byte beyond end of buffer in the case that // RemainingBitCount() == 0 and bit_count == 0. RTC_DCHECK(bit_count > 0); - if (!val || bit_count > RemainingBitCount() || bit_count > 32) { + if (bit_count > RemainingBitCount() || bit_count > 32) { return false; } const uint8_t* bytes = bytes_ + byte_offset_; @@ -121,7 +121,7 @@ bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) { // If we're reading fewer bits than what's left in the current byte, just // return the portion of this byte that we need. if (bit_count < remaining_bits_in_current_byte) { - *val = HighestBits(bits, bit_offset_ + bit_count); + val = HighestBits(bits, bit_offset_ + bit_count); return true; } // Otherwise, subtract what we've read from the bit count and read as many @@ -137,12 +137,50 @@ bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) { bits <<= bit_count; bits |= HighestBits(*bytes, bit_count); } - *val = bits; + val = bits; return true; } -bool BitBuffer::ReadBits(uint32_t* val, size_t bit_count) { - return PeekBits(val, bit_count) && ConsumeBits(bit_count); +bool BitBuffer::PeekBits(size_t bit_count, uint64_t& val) { + // TODO(nisse): Could allow bit_count == 0 and always return success. But + // current code reads one byte beyond end of buffer in the case that + // RemainingBitCount() == 0 and bit_count == 0. + RTC_DCHECK(bit_count > 0); + if (bit_count > RemainingBitCount() || bit_count > 64) { + return false; + } + const uint8_t* bytes = bytes_ + byte_offset_; + size_t remaining_bits_in_current_byte = 8 - bit_offset_; + uint64_t bits = LowestBits(*bytes++, remaining_bits_in_current_byte); + // If we're reading fewer bits than what's left in the current byte, just + // return the portion of this byte that we need. + if (bit_count < remaining_bits_in_current_byte) { + val = HighestBits(bits, bit_offset_ + bit_count); + return true; + } + // Otherwise, subtract what we've read from the bit count and read as many + // full bytes as we can into bits. + bit_count -= remaining_bits_in_current_byte; + while (bit_count >= 8) { + bits = (bits << 8) | *bytes++; + bit_count -= 8; + } + // Whatever we have left is smaller than a byte, so grab just the bits we need + // and shift them into the lowest bits. + if (bit_count > 0) { + bits <<= bit_count; + bits |= HighestBits(*bytes, bit_count); + } + val = bits; + return true; +} + +bool BitBuffer::ReadBits(size_t bit_count, uint32_t& val) { + return PeekBits(bit_count, val) && ConsumeBits(bit_count); +} + +bool BitBuffer::ReadBits(size_t bit_count, uint64_t& val) { + return PeekBits(bit_count, val) && ConsumeBits(bit_count); } bool BitBuffer::ConsumeBytes(size_t byte_count) { @@ -159,33 +197,36 @@ bool BitBuffer::ConsumeBits(size_t bit_count) { return true; } -bool BitBuffer::ReadNonSymmetric(uint32_t* val, uint32_t num_values) { +bool BitBuffer::ReadNonSymmetric(uint32_t num_values, uint32_t& val) { RTC_DCHECK_GT(num_values, 0); RTC_DCHECK_LE(num_values, uint32_t{1} << 31); + if (num_values == 1) { + // When there is only one possible value, it requires zero bits to store it. + // But ReadBits doesn't support reading zero bits. + val = 0; + return true; + } size_t count_bits = CountBits(num_values); uint32_t num_min_bits_values = (uint32_t{1} << count_bits) - num_values; - if (!ReadBits(val, count_bits - 1)) { + if (!ReadBits(count_bits - 1, val)) { return false; } - if (*val < num_min_bits_values) { + if (val < num_min_bits_values) { return true; } uint32_t extra_bit; - if (!ReadBits(&extra_bit, /*bit_count=*/1)) { + if (!ReadBits(/*bit_count=*/1, extra_bit)) { return false; } - *val = (*val << 1) + extra_bit - num_min_bits_values; + val = (val << 1) + extra_bit - num_min_bits_values; return true; } -bool BitBuffer::ReadExponentialGolomb(uint32_t* val) { - if (!val) { - return false; - } +bool BitBuffer::ReadExponentialGolomb(uint32_t& val) { // Store off the current byte/bit offset, in case we want to restore them due // to a failed parse. size_t original_byte_offset = byte_offset_; @@ -194,35 +235,35 @@ bool BitBuffer::ReadExponentialGolomb(uint32_t* val) { // Count the number of leading 0 bits by peeking/consuming them one at a time. size_t zero_bit_count = 0; uint32_t peeked_bit; - while (PeekBits(&peeked_bit, 1) && peeked_bit == 0) { + while (PeekBits(1, peeked_bit) && peeked_bit == 0) { zero_bit_count++; ConsumeBits(1); } // We should either be at the end of the stream, or the next bit should be 1. - RTC_DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1); + RTC_DCHECK(!PeekBits(1, peeked_bit) || peeked_bit == 1); // The bit count of the value is the number of zeros + 1. Make sure that many // bits fits in a uint32_t and that we have enough bits left for it, and then // read the value. size_t value_bit_count = zero_bit_count + 1; - if (value_bit_count > 32 || !ReadBits(val, value_bit_count)) { + if (value_bit_count > 32 || !ReadBits(value_bit_count, val)) { RTC_CHECK(Seek(original_byte_offset, original_bit_offset)); return false; } - *val -= 1; + val -= 1; return true; } -bool BitBuffer::ReadSignedExponentialGolomb(int32_t* val) { +bool BitBuffer::ReadSignedExponentialGolomb(int32_t& val) { uint32_t unsigned_val; - if (!ReadExponentialGolomb(&unsigned_val)) { + if (!ReadExponentialGolomb(unsigned_val)) { return false; } if ((unsigned_val & 1) == 0) { - *val = -static_cast(unsigned_val / 2); + val = -static_cast(unsigned_val / 2); } else { - *val = (unsigned_val + 1) / 2; + val = (unsigned_val + 1) / 2; } return true; } @@ -308,6 +349,11 @@ bool BitBufferWriter::WriteBits(uint64_t val, size_t bit_count) { bool BitBufferWriter::WriteNonSymmetric(uint32_t val, uint32_t num_values) { RTC_DCHECK_LT(val, num_values); RTC_DCHECK_LE(num_values, uint32_t{1} << 31); + if (num_values == 1) { + // When there is only one possible value, it requires zero bits to store it. + // But WriteBits doesn't support writing zero bits. + return true; + } size_t count_bits = CountBits(num_values); uint32_t num_min_bits_values = (uint32_t{1} << count_bits) - num_values; diff --git a/rtc_base/bit_buffer.h b/rtc_base/bit_buffer.h index de7bf02d56..388218e698 100644 --- a/rtc_base/bit_buffer.h +++ b/rtc_base/bit_buffer.h @@ -14,6 +14,7 @@ #include // For size_t. #include // For integer types. +#include "absl/base/attributes.h" #include "rtc_base/constructor_magic.h" namespace rtc { @@ -38,18 +39,35 @@ class BitBuffer { // Reads byte-sized values from the buffer. Returns false if there isn't // enough data left for the specified type. - bool ReadUInt8(uint8_t* val); - bool ReadUInt16(uint16_t* val); - bool ReadUInt32(uint32_t* val); + bool ReadUInt8(uint8_t& val); + bool ReadUInt16(uint16_t& val); + bool ReadUInt32(uint32_t& val); + ABSL_DEPRECATED("") bool ReadUInt8(uint8_t* val) { + return val ? ReadUInt8(*val) : false; + } + ABSL_DEPRECATED("") bool ReadUInt16(uint16_t* val) { + return val ? ReadUInt16(*val) : false; + } + ABSL_DEPRECATED("") bool ReadUInt32(uint32_t* val) { + return val ? ReadUInt32(*val) : false; + } // Reads bit-sized values from the buffer. Returns false if there isn't enough // data left for the specified bit count. - bool ReadBits(uint32_t* val, size_t bit_count); + bool ReadBits(size_t bit_count, uint32_t& val); + bool ReadBits(size_t bit_count, uint64_t& val); + ABSL_DEPRECATED("") bool ReadBits(uint32_t* val, size_t bit_count) { + return val ? ReadBits(bit_count, *val) : false; + } // Peeks bit-sized values from the buffer. Returns false if there isn't enough // data left for the specified number of bits. Doesn't move the current // offset. - bool PeekBits(uint32_t* val, size_t bit_count); + bool PeekBits(size_t bit_count, uint32_t& val); + bool PeekBits(size_t bit_count, uint64_t& val); + ABSL_DEPRECATED("") bool PeekBits(uint32_t* val, size_t bit_count) { + return val ? PeekBits(bit_count, *val) : false; + } // Reads value in range [0, num_values - 1]. // This encoding is similar to ReadBits(val, Ceil(Log2(num_values)), @@ -61,7 +79,11 @@ class BitBuffer { // Value v in range [k, num_values - 1] is encoded as (v+k) in n bits. // https://aomediacodec.github.io/av1-spec/#nsn // Returns false if there isn't enough data left. - bool ReadNonSymmetric(uint32_t* val, uint32_t num_values); + bool ReadNonSymmetric(uint32_t num_values, uint32_t& val); + ABSL_DEPRECATED("") + bool ReadNonSymmetric(uint32_t* val, uint32_t num_values) { + return val ? ReadNonSymmetric(num_values, *val) : false; + } // Reads the exponential golomb encoded value at the current offset. // Exponential golomb values are encoded as: @@ -71,11 +93,18 @@ class BitBuffer { // and increment the result by 1. // Returns false if there isn't enough data left for the specified type, or if // the value wouldn't fit in a uint32_t. - bool ReadExponentialGolomb(uint32_t* val); + bool ReadExponentialGolomb(uint32_t& val); + ABSL_DEPRECATED("") bool ReadExponentialGolomb(uint32_t* val) { + return val ? ReadExponentialGolomb(*val) : false; + } + // Reads signed exponential golomb values at the current offset. Signed // exponential golomb values are just the unsigned values mapped to the // sequence 0, 1, -1, 2, -2, etc. in order. - bool ReadSignedExponentialGolomb(int32_t* val); + bool ReadSignedExponentialGolomb(int32_t& val); + ABSL_DEPRECATED("") bool ReadSignedExponentialGolomb(int32_t* val) { + return val ? ReadSignedExponentialGolomb(*val) : false; + } // Moves current position |byte_count| bytes forward. Returns false if // there aren't enough bytes left in the buffer. diff --git a/rtc_base/bit_buffer_unittest.cc b/rtc_base/bit_buffer_unittest.cc index b3521b4951..e6bb4270c7 100644 --- a/rtc_base/bit_buffer_unittest.cc +++ b/rtc_base/bit_buffer_unittest.cc @@ -49,13 +49,13 @@ TEST(BitBufferTest, ReadBytesAligned) { uint16_t val16; uint32_t val32; BitBuffer buffer(bytes, 8); - EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_TRUE(buffer.ReadUInt8(val8)); EXPECT_EQ(0x0Au, val8); - EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_TRUE(buffer.ReadUInt8(val8)); EXPECT_EQ(0xBCu, val8); - EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_TRUE(buffer.ReadUInt16(val16)); EXPECT_EQ(0xDEF1u, val16); - EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_TRUE(buffer.ReadUInt32(val32)); EXPECT_EQ(0x23456789u, val32); } @@ -68,13 +68,13 @@ TEST(BitBufferTest, ReadBytesOffset4) { BitBuffer buffer(bytes, 9); EXPECT_TRUE(buffer.ConsumeBits(4)); - EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_TRUE(buffer.ReadUInt8(val8)); EXPECT_EQ(0xABu, val8); - EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_TRUE(buffer.ReadUInt8(val8)); EXPECT_EQ(0xCDu, val8); - EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_TRUE(buffer.ReadUInt16(val16)); EXPECT_EQ(0xEF12u, val16); - EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_TRUE(buffer.ReadUInt32(val32)); EXPECT_EQ(0x34567890u, val32); } @@ -102,15 +102,15 @@ TEST(BitBufferTest, ReadBytesOffset3) { uint32_t val32; BitBuffer buffer(bytes, 8); EXPECT_TRUE(buffer.ConsumeBits(3)); - EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_TRUE(buffer.ReadUInt8(val8)); EXPECT_EQ(0xFEu, val8); - EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_TRUE(buffer.ReadUInt16(val16)); EXPECT_EQ(0xDCBAu, val16); - EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_TRUE(buffer.ReadUInt32(val32)); EXPECT_EQ(0x98765432u, val32); // 5 bits left unread. Not enough to read a uint8_t. EXPECT_EQ(5u, buffer.RemainingBitCount()); - EXPECT_FALSE(buffer.ReadUInt8(&val8)); + EXPECT_FALSE(buffer.ReadUInt8(val8)); } TEST(BitBufferTest, ReadBits) { @@ -120,29 +120,61 @@ TEST(BitBufferTest, ReadBits) { const uint8_t bytes[] = {0x4D, 0x32}; uint32_t val; BitBuffer buffer(bytes, 2); - EXPECT_TRUE(buffer.ReadBits(&val, 3)); + EXPECT_TRUE(buffer.ReadBits(3, val)); // 0b010 EXPECT_EQ(0x2u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 2)); + EXPECT_TRUE(buffer.ReadBits(2, val)); // 0b01 EXPECT_EQ(0x1u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 7)); + EXPECT_TRUE(buffer.ReadBits(7, val)); // 0b1010011 EXPECT_EQ(0x53u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 2)); + EXPECT_TRUE(buffer.ReadBits(2, val)); // 0b00 EXPECT_EQ(0x0u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 1)); + EXPECT_TRUE(buffer.ReadBits(1, val)); // 0b1 EXPECT_EQ(0x1u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 1)); + EXPECT_TRUE(buffer.ReadBits(1, val)); // 0b0 EXPECT_EQ(0x0u, val); - EXPECT_FALSE(buffer.ReadBits(&val, 1)); + EXPECT_FALSE(buffer.ReadBits(1, val)); } -TEST(BitBufferTest, SetOffsetValues) { +TEST(BitBufferTest, ReadBits64) { + const uint8_t bytes[] = {0x4D, 0x32, 0xAB, 0x54, 0x00, 0xFF, 0xFE, 0x01, + 0xAB, 0xCD, 0xEF, 0x01, 0x23, 0x45, 0x67, 0x89}; + BitBuffer buffer(bytes, 16); + uint64_t val; + + // Peek and read first 33 bits. + EXPECT_TRUE(buffer.PeekBits(33, val)); + EXPECT_EQ(0x4D32AB5400FFFE01ull >> (64 - 33), val); + val = 0; + EXPECT_TRUE(buffer.ReadBits(33, val)); + EXPECT_EQ(0x4D32AB5400FFFE01ull >> (64 - 33), val); + + // Peek and read next 31 bits. + constexpr uint64_t kMask31Bits = (1ull << 32) - 1; + EXPECT_TRUE(buffer.PeekBits(31, val)); + EXPECT_EQ(0x4D32AB5400FFFE01ull & kMask31Bits, val); + val = 0; + EXPECT_TRUE(buffer.ReadBits(31, val)); + EXPECT_EQ(0x4D32AB5400FFFE01ull & kMask31Bits, val); + + // Peek and read remaining 64 bits. + EXPECT_TRUE(buffer.PeekBits(64, val)); + EXPECT_EQ(0xABCDEF0123456789ull, val); + val = 0; + EXPECT_TRUE(buffer.ReadBits(64, val)); + EXPECT_EQ(0xABCDEF0123456789ull, val); + + // Nothing more to read. + EXPECT_FALSE(buffer.ReadBits(1, val)); +} + +TEST(BitBufferDeathTest, SetOffsetValues) { uint8_t bytes[4] = {0}; BitBufferWriter buffer(bytes, 4); @@ -187,10 +219,10 @@ TEST(BitBufferTest, ReadNonSymmetricSameNumberOfBitsWhenNumValuesPowerOf2) { uint32_t values[4]; ASSERT_EQ(reader.RemainingBitCount(), 16u); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[0], /*num_values=*/1 << 4)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[1], /*num_values=*/1 << 4)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[2], /*num_values=*/1 << 4)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[3], /*num_values=*/1 << 4)); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/1 << 4, values[0])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/1 << 4, values[1])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/1 << 4, values[2])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/1 << 4, values[3])); ASSERT_EQ(reader.RemainingBitCount(), 0u); EXPECT_THAT(values, ElementsAre(0xf, 0x3, 0xa, 0x0)); @@ -244,16 +276,38 @@ TEST(BitBufferWriterTest, NonSymmetricReadsMatchesWrites) { rtc::BitBuffer reader(bytes, 2); uint32_t values[6]; - EXPECT_TRUE(reader.ReadNonSymmetric(&values[0], /*num_values=*/6)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[1], /*num_values=*/6)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[2], /*num_values=*/6)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[3], /*num_values=*/6)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[4], /*num_values=*/6)); - EXPECT_TRUE(reader.ReadNonSymmetric(&values[5], /*num_values=*/6)); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/6, values[0])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/6, values[1])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/6, values[2])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/6, values[3])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/6, values[4])); + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/6, values[5])); EXPECT_THAT(values, ElementsAre(0, 1, 2, 3, 4, 5)); } +TEST(BitBufferTest, ReadNonSymmetricOnlyValueConsumesNoBits) { + const uint8_t bytes[2] = {}; + BitBuffer reader(bytes, 2); + uint32_t value = 0xFFFFFFFF; + ASSERT_EQ(reader.RemainingBitCount(), 16u); + + EXPECT_TRUE(reader.ReadNonSymmetric(/*num_values=*/1, value)); + + EXPECT_EQ(value, 0u); + EXPECT_EQ(reader.RemainingBitCount(), 16u); +} + +TEST(BitBufferWriterTest, WriteNonSymmetricOnlyValueConsumesNoBits) { + uint8_t bytes[2] = {}; + BitBufferWriter writer(bytes, 2); + ASSERT_EQ(writer.RemainingBitCount(), 16u); + + EXPECT_TRUE(writer.WriteNonSymmetric(0, /*num_values=*/1)); + + EXPECT_EQ(writer.RemainingBitCount(), 16u); +} + uint64_t GolombEncoded(uint32_t val) { val++; uint32_t bit_counter = val; @@ -280,7 +334,7 @@ TEST(BitBufferTest, GolombUint32Values) { byteBuffer.WriteUInt64(encoded_val); uint32_t decoded_val; EXPECT_TRUE(buffer.Seek(0, 0)); - EXPECT_TRUE(buffer.ReadExponentialGolomb(&decoded_val)); + EXPECT_TRUE(buffer.ReadExponentialGolomb(decoded_val)); EXPECT_EQ(i, decoded_val); } } @@ -297,7 +351,7 @@ TEST(BitBufferTest, SignedGolombValues) { for (size_t i = 0; i < sizeof(golomb_bits); ++i) { BitBuffer buffer(&golomb_bits[i], 1); int32_t decoded_val; - ASSERT_TRUE(buffer.ReadSignedExponentialGolomb(&decoded_val)); + ASSERT_TRUE(buffer.ReadSignedExponentialGolomb(decoded_val)); EXPECT_EQ(expected[i], decoded_val) << "Mismatch in expected/decoded value for golomb_bits[" << i << "]: " << static_cast(golomb_bits[i]); @@ -310,13 +364,13 @@ TEST(BitBufferTest, NoGolombOverread) { // If it didn't, the above buffer would be valid at 3 bytes. BitBuffer buffer(bytes, 1); uint32_t decoded_val; - EXPECT_FALSE(buffer.ReadExponentialGolomb(&decoded_val)); + EXPECT_FALSE(buffer.ReadExponentialGolomb(decoded_val)); BitBuffer longer_buffer(bytes, 2); - EXPECT_FALSE(longer_buffer.ReadExponentialGolomb(&decoded_val)); + EXPECT_FALSE(longer_buffer.ReadExponentialGolomb(decoded_val)); BitBuffer longest_buffer(bytes, 3); - EXPECT_TRUE(longest_buffer.ReadExponentialGolomb(&decoded_val)); + EXPECT_TRUE(longest_buffer.ReadExponentialGolomb(decoded_val)); // Golomb should have read 9 bits, so 0x01FF, and since it is golomb, the // result is 0x01FF - 1 = 0x01FE. EXPECT_EQ(0x01FEu, decoded_val); @@ -338,20 +392,20 @@ TEST(BitBufferWriterTest, SymmetricReadWrite) { EXPECT_TRUE(buffer.Seek(0, 0)); uint32_t val; - EXPECT_TRUE(buffer.ReadBits(&val, 3)); + EXPECT_TRUE(buffer.ReadBits(3, val)); EXPECT_EQ(0x2u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 2)); + EXPECT_TRUE(buffer.ReadBits(2, val)); EXPECT_EQ(0x1u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 7)); + EXPECT_TRUE(buffer.ReadBits(7, val)); EXPECT_EQ(0x53u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 2)); + EXPECT_TRUE(buffer.ReadBits(2, val)); EXPECT_EQ(0x0u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 1)); + EXPECT_TRUE(buffer.ReadBits(1, val)); EXPECT_EQ(0x1u, val); - EXPECT_TRUE(buffer.ReadBits(&val, 17)); + EXPECT_TRUE(buffer.ReadBits(17, val)); EXPECT_EQ(0x1ABCDu, val); // And there should be nothing left. - EXPECT_FALSE(buffer.ReadBits(&val, 1)); + EXPECT_FALSE(buffer.ReadBits(1, val)); } TEST(BitBufferWriterTest, SymmetricBytesMisaligned) { @@ -368,11 +422,11 @@ TEST(BitBufferWriterTest, SymmetricBytesMisaligned) { uint8_t val8; uint16_t val16; uint32_t val32; - EXPECT_TRUE(buffer.ReadUInt8(&val8)); + EXPECT_TRUE(buffer.ReadUInt8(val8)); EXPECT_EQ(0x12u, val8); - EXPECT_TRUE(buffer.ReadUInt16(&val16)); + EXPECT_TRUE(buffer.ReadUInt16(val16)); EXPECT_EQ(0x3456u, val16); - EXPECT_TRUE(buffer.ReadUInt32(&val32)); + EXPECT_TRUE(buffer.ReadUInt32(val32)); EXPECT_EQ(0x789ABCDEu, val32); } @@ -386,7 +440,7 @@ TEST(BitBufferWriterTest, SymmetricGolomb) { buffer.Seek(0, 0); for (size_t i = 0; i < arraysize(test_string); ++i) { uint32_t val; - EXPECT_TRUE(buffer.ReadExponentialGolomb(&val)); + EXPECT_TRUE(buffer.ReadExponentialGolomb(val)); EXPECT_LE(val, std::numeric_limits::max()); EXPECT_EQ(test_string[i], static_cast(val)); } diff --git a/rtc_base/boringssl_certificate.cc b/rtc_base/boringssl_certificate.cc new file mode 100644 index 0000000000..bb14036a3e --- /dev/null +++ b/rtc_base/boringssl_certificate.cc @@ -0,0 +1,410 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/boringssl_certificate.h" + +#if defined(WEBRTC_WIN) +// Must be included first before openssl headers. +#include "rtc_base/win32.h" // NOLINT +#endif // WEBRTC_WIN + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/helpers.h" +#include "rtc_base/logging.h" +#include "rtc_base/message_digest.h" +#include "rtc_base/openssl_digest.h" +#include "rtc_base/openssl_key_pair.h" +#include "rtc_base/openssl_utility.h" + +namespace rtc { +namespace { + +// List of OIDs of signature algorithms accepted by WebRTC. +// Taken from openssl/nid.h. +static const uint8_t kMD5WithRSA[] = {0x2b, 0x0e, 0x03, 0x02, 0x03}; +static const uint8_t kMD5WithRSAEncryption[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x04}; +static const uint8_t kECDSAWithSHA1[] = {0x2a, 0x86, 0x48, 0xce, + 0x3d, 0x04, 0x01}; +static const uint8_t kDSAWithSHA1[] = {0x2a, 0x86, 0x48, 0xce, + 0x38, 0x04, 0x03}; +static const uint8_t kDSAWithSHA1_2[] = {0x2b, 0x0e, 0x03, 0x02, 0x1b}; +static const uint8_t kSHA1WithRSA[] = {0x2b, 0x0e, 0x03, 0x02, 0x1d}; +static const uint8_t kSHA1WithRSAEncryption[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05}; +static const uint8_t kECDSAWithSHA224[] = {0x2a, 0x86, 0x48, 0xce, + 0x3d, 0x04, 0x03, 0x01}; +static const uint8_t kSHA224WithRSAEncryption[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x0e}; +static const uint8_t kDSAWithSHA224[] = {0x60, 0x86, 0x48, 0x01, 0x65, + 0x03, 0x04, 0x03, 0x01}; +static const uint8_t kECDSAWithSHA256[] = {0x2a, 0x86, 0x48, 0xce, + 0x3d, 0x04, 0x03, 0x02}; +static const uint8_t kSHA256WithRSAEncryption[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x0b}; +static const uint8_t kDSAWithSHA256[] = {0x60, 0x86, 0x48, 0x01, 0x65, + 0x03, 0x04, 0x03, 0x02}; +static const uint8_t kECDSAWithSHA384[] = {0x2a, 0x86, 0x48, 0xce, + 0x3d, 0x04, 0x03, 0x03}; +static const uint8_t kSHA384WithRSAEncryption[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x0c}; +static const uint8_t kECDSAWithSHA512[] = {0x2a, 0x86, 0x48, 0xce, + 0x3d, 0x04, 0x03, 0x04}; +static const uint8_t kSHA512WithRSAEncryption[] = {0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x0d}; + +#if !defined(NDEBUG) +// Print a certificate to the log, for debugging. +static void PrintCert(BoringSSLCertificate* cert) { + // Since we're using CRYPTO_BUFFER, we can't use X509_print_ex, so we'll just + // print the PEM string. + RTC_DLOG(LS_VERBOSE) << "PEM representation of certificate:\n" + << cert->ToPEMString(); +} +#endif + +bool AddSHA256SignatureAlgorithm(CBB* cbb, KeyType key_type) { + // An AlgorithmIdentifier is described in RFC 5280, 4.1.1.2. + CBB sequence, oid, params; + if (!CBB_add_asn1(cbb, &sequence, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&sequence, &oid, CBS_ASN1_OBJECT)) { + return false; + } + + switch (key_type) { + case KT_RSA: + if (!CBB_add_bytes(&oid, kSHA256WithRSAEncryption, + sizeof(kSHA256WithRSAEncryption)) || + !CBB_add_asn1(&sequence, ¶ms, CBS_ASN1_NULL)) { + return false; + } + break; + case KT_ECDSA: + if (!CBB_add_bytes(&oid, kECDSAWithSHA256, sizeof(kECDSAWithSHA256))) { + return false; + } + break; + default: + RTC_NOTREACHED(); + return false; + } + if (!CBB_flush(cbb)) { + return false; + } + return true; +} + +// Adds an X.509 Common Name to |cbb|. +bool AddCommonName(CBB* cbb, const std::string& common_name) { + // See RFC 4519. + static const uint8_t kCommonName[] = {0x55, 0x04, 0x03}; + + if (common_name.empty()) { + RTC_LOG(LS_ERROR) << "Common name cannot be empty."; + return false; + } + + // See RFC 5280, section 4.1.2.4. + CBB rdns; + if (!CBB_add_asn1(cbb, &rdns, CBS_ASN1_SEQUENCE)) { + return false; + } + + CBB rdn, attr, type, value; + if (!CBB_add_asn1(&rdns, &rdn, CBS_ASN1_SET) || + !CBB_add_asn1(&rdn, &attr, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&attr, &type, CBS_ASN1_OBJECT) || + !CBB_add_bytes(&type, kCommonName, sizeof(kCommonName)) || + !CBB_add_asn1(&attr, &value, CBS_ASN1_UTF8STRING) || + !CBB_add_bytes(&value, + reinterpret_cast(common_name.c_str()), + common_name.size()) || + !CBB_flush(cbb)) { + return false; + } + + return true; +} + +bool AddTime(CBB* cbb, time_t time) { + bssl::UniquePtr asn1_time(ASN1_TIME_new()); + if (!asn1_time) { + return false; + } + + if (!ASN1_TIME_set(asn1_time.get(), time)) { + return false; + } + + unsigned tag; + switch (asn1_time->type) { + case V_ASN1_UTCTIME: + tag = CBS_ASN1_UTCTIME; + break; + case V_ASN1_GENERALIZEDTIME: + tag = CBS_ASN1_GENERALIZEDTIME; + break; + default: + return false; + } + + CBB child; + if (!CBB_add_asn1(cbb, &child, tag) || + !CBB_add_bytes(&child, asn1_time->data, asn1_time->length) || + !CBB_flush(cbb)) { + return false; + } + + return true; +} + +// Generate a self-signed certificate, with the public key from the +// given key pair. Caller is responsible for freeing the returned object. +static bssl::UniquePtr MakeCertificate( + EVP_PKEY* pkey, + const SSLIdentityParams& params) { + RTC_LOG(LS_INFO) << "Making certificate for " << params.common_name; + + // See RFC 5280, section 4.1. First, construct the TBSCertificate. + bssl::ScopedCBB cbb; + CBB tbs_cert, version, validity; + uint8_t* tbs_cert_bytes; + size_t tbs_cert_len; + uint64_t serial_number; + if (!CBB_init(cbb.get(), 64) || + !CBB_add_asn1(cbb.get(), &tbs_cert, CBS_ASN1_SEQUENCE) || + !CBB_add_asn1(&tbs_cert, &version, + CBS_ASN1_CONTEXT_SPECIFIC | CBS_ASN1_CONSTRUCTED | 0) || + !CBB_add_asn1_uint64(&version, 2) || + !RAND_bytes(reinterpret_cast(&serial_number), + sizeof(serial_number)) || + !CBB_add_asn1_uint64(&tbs_cert, serial_number) || + !AddSHA256SignatureAlgorithm(&tbs_cert, params.key_params.type()) || + !AddCommonName(&tbs_cert, params.common_name) || // issuer + !CBB_add_asn1(&tbs_cert, &validity, CBS_ASN1_SEQUENCE) || + !AddTime(&validity, params.not_before) || + !AddTime(&validity, params.not_after) || + !AddCommonName(&tbs_cert, params.common_name) || // subject + !EVP_marshal_public_key(&tbs_cert, pkey) || // subjectPublicKeyInfo + !CBB_finish(cbb.get(), &tbs_cert_bytes, &tbs_cert_len)) { + return nullptr; + } + + bssl::UniquePtr delete_tbs_cert_bytes(tbs_cert_bytes); + + // Sign the TBSCertificate and write the entire certificate. + CBB cert, signature; + bssl::ScopedEVP_MD_CTX ctx; + uint8_t* sig_out; + size_t sig_len; + uint8_t* cert_bytes; + size_t cert_len; + if (!CBB_init(cbb.get(), tbs_cert_len) || + !CBB_add_asn1(cbb.get(), &cert, CBS_ASN1_SEQUENCE) || + !CBB_add_bytes(&cert, tbs_cert_bytes, tbs_cert_len) || + !AddSHA256SignatureAlgorithm(&cert, params.key_params.type()) || + !CBB_add_asn1(&cert, &signature, CBS_ASN1_BITSTRING) || + !CBB_add_u8(&signature, 0 /* no unused bits */) || + !EVP_DigestSignInit(ctx.get(), nullptr, EVP_sha256(), nullptr, pkey) || + // Compute the maximum signature length. + !EVP_DigestSign(ctx.get(), nullptr, &sig_len, tbs_cert_bytes, + tbs_cert_len) || + !CBB_reserve(&signature, &sig_out, sig_len) || + // Actually sign the TBSCertificate. + !EVP_DigestSign(ctx.get(), sig_out, &sig_len, tbs_cert_bytes, + tbs_cert_len) || + !CBB_did_write(&signature, sig_len) || + !CBB_finish(cbb.get(), &cert_bytes, &cert_len)) { + return nullptr; + } + bssl::UniquePtr delete_cert_bytes(cert_bytes); + + RTC_LOG(LS_INFO) << "Returning certificate"; + return bssl::UniquePtr( + CRYPTO_BUFFER_new(cert_bytes, cert_len, openssl::GetBufferPool())); +} + +} // namespace + +BoringSSLCertificate::BoringSSLCertificate( + bssl::UniquePtr cert_buffer) + : cert_buffer_(std::move(cert_buffer)) { + RTC_DCHECK(cert_buffer_ != nullptr); +} + +std::unique_ptr BoringSSLCertificate::Generate( + OpenSSLKeyPair* key_pair, + const SSLIdentityParams& params) { + SSLIdentityParams actual_params(params); + if (actual_params.common_name.empty()) { + // Use a random string, arbitrarily 8 chars long. + actual_params.common_name = CreateRandomString(8); + } + bssl::UniquePtr cert_buffer = + MakeCertificate(key_pair->pkey(), actual_params); + if (!cert_buffer) { + openssl::LogSSLErrors("Generating certificate"); + return nullptr; + } + auto ret = std::make_unique(std::move(cert_buffer)); +#if !defined(NDEBUG) + PrintCert(ret.get()); +#endif + return ret; +} + +std::unique_ptr BoringSSLCertificate::FromPEMString( + const std::string& pem_string) { + std::string der; + if (!SSLIdentity::PemToDer(kPemTypeCertificate, pem_string, &der)) { + return nullptr; + } + bssl::UniquePtr cert_buffer( + CRYPTO_BUFFER_new(reinterpret_cast(der.c_str()), + der.length(), openssl::GetBufferPool())); + if (!cert_buffer) { + return nullptr; + } + return std::make_unique(std::move(cert_buffer)); +} + +#define OID_MATCHES(oid, oid_other) \ + (CBS_len(&oid) == sizeof(oid_other) && \ + 0 == memcmp(CBS_data(&oid), oid_other, sizeof(oid_other))) + +bool BoringSSLCertificate::GetSignatureDigestAlgorithm( + std::string* algorithm) const { + CBS oid; + if (!openssl::ParseCertificate(cert_buffer_.get(), &oid, nullptr)) { + RTC_LOG(LS_ERROR) << "Failed to parse certificate."; + return false; + } + if (OID_MATCHES(oid, kMD5WithRSA) || + OID_MATCHES(oid, kMD5WithRSAEncryption)) { + *algorithm = DIGEST_MD5; + return true; + } + if (OID_MATCHES(oid, kECDSAWithSHA1) || OID_MATCHES(oid, kDSAWithSHA1) || + OID_MATCHES(oid, kDSAWithSHA1_2) || OID_MATCHES(oid, kSHA1WithRSA) || + OID_MATCHES(oid, kSHA1WithRSAEncryption)) { + *algorithm = DIGEST_SHA_1; + return true; + } + if (OID_MATCHES(oid, kECDSAWithSHA224) || + OID_MATCHES(oid, kSHA224WithRSAEncryption) || + OID_MATCHES(oid, kDSAWithSHA224)) { + *algorithm = DIGEST_SHA_224; + return true; + } + if (OID_MATCHES(oid, kECDSAWithSHA256) || + OID_MATCHES(oid, kSHA256WithRSAEncryption) || + OID_MATCHES(oid, kDSAWithSHA256)) { + *algorithm = DIGEST_SHA_256; + return true; + } + if (OID_MATCHES(oid, kECDSAWithSHA384) || + OID_MATCHES(oid, kSHA384WithRSAEncryption)) { + *algorithm = DIGEST_SHA_384; + return true; + } + if (OID_MATCHES(oid, kECDSAWithSHA512) || + OID_MATCHES(oid, kSHA512WithRSAEncryption)) { + *algorithm = DIGEST_SHA_512; + return true; + } + // Unknown algorithm. There are several unhandled options that are less + // common and more complex. + RTC_LOG(LS_ERROR) << "Unknown signature algorithm."; + algorithm->clear(); + return false; +} + +bool BoringSSLCertificate::ComputeDigest(const std::string& algorithm, + unsigned char* digest, + size_t size, + size_t* length) const { + return ComputeDigest(cert_buffer_.get(), algorithm, digest, size, length); +} + +bool BoringSSLCertificate::ComputeDigest(const CRYPTO_BUFFER* cert_buffer, + const std::string& algorithm, + unsigned char* digest, + size_t size, + size_t* length) { + const EVP_MD* md = nullptr; + unsigned int n = 0; + if (!OpenSSLDigest::GetDigestEVP(algorithm, &md)) { + return false; + } + if (size < static_cast(EVP_MD_size(md))) { + return false; + } + if (!EVP_Digest(CRYPTO_BUFFER_data(cert_buffer), + CRYPTO_BUFFER_len(cert_buffer), digest, &n, md, nullptr)) { + return false; + } + *length = n; + return true; +} + +BoringSSLCertificate::~BoringSSLCertificate() {} + +std::unique_ptr BoringSSLCertificate::Clone() const { + return std::make_unique( + bssl::UpRef(cert_buffer_.get())); +} + +std::string BoringSSLCertificate::ToPEMString() const { + return SSLIdentity::DerToPem(kPemTypeCertificate, + CRYPTO_BUFFER_data(cert_buffer_.get()), + CRYPTO_BUFFER_len(cert_buffer_.get())); +} + +void BoringSSLCertificate::ToDER(Buffer* der_buffer) const { + der_buffer->SetData(CRYPTO_BUFFER_data(cert_buffer_.get()), + CRYPTO_BUFFER_len(cert_buffer_.get())); +} + +bool BoringSSLCertificate::operator==(const BoringSSLCertificate& other) const { + return CRYPTO_BUFFER_len(cert_buffer_.get()) == + CRYPTO_BUFFER_len(other.cert_buffer_.get()) && + 0 == memcmp(CRYPTO_BUFFER_data(cert_buffer_.get()), + CRYPTO_BUFFER_data(other.cert_buffer_.get()), + CRYPTO_BUFFER_len(cert_buffer_.get())); +} + +bool BoringSSLCertificate::operator!=(const BoringSSLCertificate& other) const { + return !(*this == other); +} + +int64_t BoringSSLCertificate::CertificateExpirationTime() const { + int64_t ret; + if (!openssl::ParseCertificate(cert_buffer_.get(), nullptr, &ret)) { + RTC_LOG(LS_ERROR) << "Failed to parse certificate."; + return -1; + } + return ret; +} + +} // namespace rtc diff --git a/rtc_base/boringssl_certificate.h b/rtc_base/boringssl_certificate.h new file mode 100644 index 0000000000..740763dc69 --- /dev/null +++ b/rtc_base/boringssl_certificate.h @@ -0,0 +1,80 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_BORINGSSL_CERTIFICATE_H_ +#define RTC_BASE_BORINGSSL_CERTIFICATE_H_ + +#include +#include +#include + +#include +#include + +#include "rtc_base/buffer.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/ssl_certificate.h" +#include "rtc_base/ssl_identity.h" + +namespace rtc { + +class OpenSSLKeyPair; + +// BoringSSLCertificate encapsulates a BoringSSL CRYPTO_BUFFER object holding a +// certificate, which is also reference counted inside the BoringSSL library. +// This offers binary size and memory improvements over the OpenSSL X509 +// object. +class BoringSSLCertificate final : public SSLCertificate { + public: + explicit BoringSSLCertificate(bssl::UniquePtr cert_buffer); + + static std::unique_ptr Generate( + OpenSSLKeyPair* key_pair, + const SSLIdentityParams& params); + static std::unique_ptr FromPEMString( + const std::string& pem_string); + + ~BoringSSLCertificate() override; + + std::unique_ptr Clone() const override; + + CRYPTO_BUFFER* cert_buffer() const { return cert_buffer_.get(); } + + std::string ToPEMString() const override; + void ToDER(Buffer* der_buffer) const override; + bool operator==(const BoringSSLCertificate& other) const; + bool operator!=(const BoringSSLCertificate& other) const; + + // Compute the digest of the certificate given |algorithm|. + bool ComputeDigest(const std::string& algorithm, + unsigned char* digest, + size_t size, + size_t* length) const override; + + // Compute the digest of a certificate as a CRYPTO_BUFFER. + static bool ComputeDigest(const CRYPTO_BUFFER* cert_buffer, + const std::string& algorithm, + unsigned char* digest, + size_t size, + size_t* length); + + bool GetSignatureDigestAlgorithm(std::string* algorithm) const override; + + int64_t CertificateExpirationTime() const override; + + private: + // A handle to the DER encoded certificate data. + bssl::UniquePtr cert_buffer_; + RTC_DISALLOW_COPY_AND_ASSIGN(BoringSSLCertificate); +}; + +} // namespace rtc + +#endif // RTC_BASE_BORINGSSL_CERTIFICATE_H_ diff --git a/rtc_base/boringssl_identity.cc b/rtc_base/boringssl_identity.cc new file mode 100644 index 0000000000..d22c8ce529 --- /dev/null +++ b/rtc_base/boringssl_identity.cc @@ -0,0 +1,215 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/boringssl_identity.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "absl/memory/memory.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/openssl.h" +#include "rtc_base/openssl_utility.h" + +namespace rtc { + +BoringSSLIdentity::BoringSSLIdentity( + std::unique_ptr key_pair, + std::unique_ptr certificate) + : key_pair_(std::move(key_pair)) { + RTC_DCHECK(key_pair_ != nullptr); + RTC_DCHECK(certificate != nullptr); + std::vector> certs; + certs.push_back(std::move(certificate)); + cert_chain_.reset(new SSLCertChain(std::move(certs))); +} + +BoringSSLIdentity::BoringSSLIdentity(std::unique_ptr key_pair, + std::unique_ptr cert_chain) + : key_pair_(std::move(key_pair)), cert_chain_(std::move(cert_chain)) { + RTC_DCHECK(key_pair_ != nullptr); + RTC_DCHECK(cert_chain_ != nullptr); +} + +BoringSSLIdentity::~BoringSSLIdentity() = default; + +std::unique_ptr BoringSSLIdentity::CreateInternal( + const SSLIdentityParams& params) { + auto key_pair = OpenSSLKeyPair::Generate(params.key_params); + if (key_pair) { + std::unique_ptr certificate( + BoringSSLCertificate::Generate(key_pair.get(), params)); + if (certificate != nullptr) { + return absl::WrapUnique( + new BoringSSLIdentity(std::move(key_pair), std::move(certificate))); + } + } + RTC_LOG(LS_ERROR) << "Identity generation failed."; + return nullptr; +} + +// static +std::unique_ptr BoringSSLIdentity::CreateWithExpiration( + const std::string& common_name, + const KeyParams& key_params, + time_t certificate_lifetime) { + SSLIdentityParams params; + params.key_params = key_params; + params.common_name = common_name; + time_t now = time(nullptr); + params.not_before = now + kCertificateWindowInSeconds; + params.not_after = now + certificate_lifetime; + if (params.not_before > params.not_after) + return nullptr; + return CreateInternal(params); +} + +std::unique_ptr BoringSSLIdentity::CreateForTest( + const SSLIdentityParams& params) { + return CreateInternal(params); +} + +std::unique_ptr BoringSSLIdentity::CreateFromPEMStrings( + const std::string& private_key, + const std::string& certificate) { + std::unique_ptr cert( + BoringSSLCertificate::FromPEMString(certificate)); + if (!cert) { + RTC_LOG(LS_ERROR) + << "Failed to create BoringSSLCertificate from PEM string."; + return nullptr; + } + + auto key_pair = OpenSSLKeyPair::FromPrivateKeyPEMString(private_key); + if (!key_pair) { + RTC_LOG(LS_ERROR) << "Failed to create key pair from PEM string."; + return nullptr; + } + + return absl::WrapUnique( + new BoringSSLIdentity(std::move(key_pair), std::move(cert))); +} + +std::unique_ptr BoringSSLIdentity::CreateFromPEMChainStrings( + const std::string& private_key, + const std::string& certificate_chain) { + bssl::UniquePtr bio( + BIO_new_mem_buf(certificate_chain.data(), + rtc::dchecked_cast(certificate_chain.size()))); + if (!bio) { + return nullptr; + } + BIO_set_mem_eof_return(bio.get(), 0); + std::vector> certs; + while (true) { + char* name; + char* header; + unsigned char* data; + long len; // NOLINT + int ret = PEM_read_bio(bio.get(), &name, &header, &data, &len); + if (ret == 0) { + uint32_t err = ERR_peek_error(); + if (ERR_GET_LIB(err) == ERR_LIB_PEM && + ERR_GET_REASON(err) == PEM_R_NO_START_LINE) { + break; + } + RTC_LOG(LS_ERROR) << "Failed to parse certificate from PEM string."; + return nullptr; + } + bssl::UniquePtr owned_name(name); + bssl::UniquePtr owned_header(header); + bssl::UniquePtr owned_data(data); + if (strcmp(owned_name.get(), PEM_STRING_X509) != 0) { + RTC_LOG(LS_ERROR) + << "Non-certificate found while parsing certificate chain: " + << owned_name.get(); + return nullptr; + } + bssl::UniquePtr crypto_buffer( + CRYPTO_BUFFER_new(data, len, openssl::GetBufferPool())); + if (!crypto_buffer) { + return nullptr; + } + certs.emplace_back(new BoringSSLCertificate(std::move(crypto_buffer))); + } + if (certs.empty()) { + RTC_LOG(LS_ERROR) << "Found no certificates in PEM string."; + return nullptr; + } + + auto key_pair = OpenSSLKeyPair::FromPrivateKeyPEMString(private_key); + if (!key_pair) { + RTC_LOG(LS_ERROR) << "Failed to create key pair from PEM string."; + return nullptr; + } + + return absl::WrapUnique(new BoringSSLIdentity( + std::move(key_pair), std::make_unique(std::move(certs)))); +} + +const BoringSSLCertificate& BoringSSLIdentity::certificate() const { + return *static_cast(&cert_chain_->Get(0)); +} + +const SSLCertChain& BoringSSLIdentity::cert_chain() const { + return *cert_chain_.get(); +} + +std::unique_ptr BoringSSLIdentity::CloneInternal() const { + // We cannot use std::make_unique here because the referenced + // BoringSSLIdentity constructor is private. + return absl::WrapUnique( + new BoringSSLIdentity(key_pair_->Clone(), cert_chain_->Clone())); +} + +bool BoringSSLIdentity::ConfigureIdentity(SSL_CTX* ctx) { + std::vector cert_buffers; + for (size_t i = 0; i < cert_chain_->GetSize(); ++i) { + cert_buffers.push_back( + static_cast(&cert_chain_->Get(i)) + ->cert_buffer()); + } + // 1 is the documented success return code. + if (1 != SSL_CTX_set_chain_and_key(ctx, &cert_buffers[0], cert_buffers.size(), + key_pair_->pkey(), nullptr)) { + openssl::LogSSLErrors("Configuring key and certificate"); + return false; + } + return true; +} + +std::string BoringSSLIdentity::PrivateKeyToPEMString() const { + return key_pair_->PrivateKeyToPEMString(); +} + +std::string BoringSSLIdentity::PublicKeyToPEMString() const { + return key_pair_->PublicKeyToPEMString(); +} + +bool BoringSSLIdentity::operator==(const BoringSSLIdentity& other) const { + return *this->key_pair_ == *other.key_pair_ && + this->certificate() == other.certificate(); +} + +bool BoringSSLIdentity::operator!=(const BoringSSLIdentity& other) const { + return !(*this == other); +} + +} // namespace rtc diff --git a/rtc_base/boringssl_identity.h b/rtc_base/boringssl_identity.h new file mode 100644 index 0000000000..71b29b486d --- /dev/null +++ b/rtc_base/boringssl_identity.h @@ -0,0 +1,76 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_BORINGSSL_IDENTITY_H_ +#define RTC_BASE_BORINGSSL_IDENTITY_H_ + +#include + +#include +#include +#include + +#include "rtc_base/boringssl_certificate.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/openssl_key_pair.h" +#include "rtc_base/ssl_certificate.h" +#include "rtc_base/ssl_identity.h" + +namespace rtc { + +// Holds a keypair and certificate together, and a method to generate them +// consistently. Uses CRYPTO_BUFFER instead of X509, which offers binary size +// and memory improvements. +class BoringSSLIdentity final : public SSLIdentity { + public: + static std::unique_ptr CreateWithExpiration( + const std::string& common_name, + const KeyParams& key_params, + time_t certificate_lifetime); + static std::unique_ptr CreateForTest( + const SSLIdentityParams& params); + static std::unique_ptr CreateFromPEMStrings( + const std::string& private_key, + const std::string& certificate); + static std::unique_ptr CreateFromPEMChainStrings( + const std::string& private_key, + const std::string& certificate_chain); + ~BoringSSLIdentity() override; + + const BoringSSLCertificate& certificate() const override; + const SSLCertChain& cert_chain() const override; + + // Configure an SSL context object to use our key and certificate. + bool ConfigureIdentity(SSL_CTX* ctx); + + std::string PrivateKeyToPEMString() const override; + std::string PublicKeyToPEMString() const override; + bool operator==(const BoringSSLIdentity& other) const; + bool operator!=(const BoringSSLIdentity& other) const; + + private: + BoringSSLIdentity(std::unique_ptr key_pair, + std::unique_ptr certificate); + BoringSSLIdentity(std::unique_ptr key_pair, + std::unique_ptr cert_chain); + std::unique_ptr CloneInternal() const override; + + static std::unique_ptr CreateInternal( + const SSLIdentityParams& params); + + std::unique_ptr key_pair_; + std::unique_ptr cert_chain_; + + RTC_DISALLOW_COPY_AND_ASSIGN(BoringSSLIdentity); +}; + +} // namespace rtc + +#endif // RTC_BASE_BORINGSSL_IDENTITY_H_ diff --git a/rtc_base/buffer.h b/rtc_base/buffer.h index 3048b9179f..d1639e2f71 100644 --- a/rtc_base/buffer.h +++ b/rtc_base/buffer.h @@ -370,7 +370,9 @@ class BufferT { : capacity; std::unique_ptr new_data(new T[new_capacity]); - std::memcpy(new_data.get(), data_.get(), size_ * sizeof(T)); + if (data_ != nullptr) { + std::memcpy(new_data.get(), data_.get(), size_ * sizeof(T)); + } MaybeZeroCompleteBuffer(); data_ = std::move(new_data); capacity_ = new_capacity; diff --git a/rtc_base/buffer_queue.cc b/rtc_base/buffer_queue.cc index 445045ceea..7879e933c7 100644 --- a/rtc_base/buffer_queue.cc +++ b/rtc_base/buffer_queue.cc @@ -21,23 +21,20 @@ BufferQueue::BufferQueue(size_t capacity, size_t default_size) : capacity_(capacity), default_size_(default_size) {} BufferQueue::~BufferQueue() { - CritScope cs(&crit_); - - for (Buffer* buffer : queue_) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + for (Buffer* buffer : queue_) delete buffer; - } - for (Buffer* buffer : free_list_) { + for (Buffer* buffer : free_list_) delete buffer; - } } size_t BufferQueue::size() const { - CritScope cs(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return queue_.size(); } void BufferQueue::Clear() { - CritScope cs(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); while (!queue_.empty()) { free_list_.push_back(queue_.front()); queue_.pop_front(); @@ -45,36 +42,30 @@ void BufferQueue::Clear() { } bool BufferQueue::ReadFront(void* buffer, size_t bytes, size_t* bytes_read) { - CritScope cs(&crit_); - if (queue_.empty()) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (queue_.empty()) return false; - } - bool was_writable = queue_.size() < capacity_; Buffer* packet = queue_.front(); queue_.pop_front(); bytes = std::min(bytes, packet->size()); memcpy(buffer, packet->data(), bytes); - if (bytes_read) { + + if (bytes_read) *bytes_read = bytes; - } + free_list_.push_back(packet); - if (!was_writable) { - NotifyWritableForTest(); - } return true; } bool BufferQueue::WriteBack(const void* buffer, size_t bytes, size_t* bytes_written) { - CritScope cs(&crit_); - if (queue_.size() == capacity_) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (queue_.size() == capacity_) return false; - } - bool was_readable = !queue_.empty(); Buffer* packet; if (!free_list_.empty()) { packet = free_list_.back(); @@ -84,13 +75,10 @@ bool BufferQueue::WriteBack(const void* buffer, } packet->SetData(static_cast(buffer), bytes); - if (bytes_written) { + if (bytes_written) *bytes_written = bytes; - } + queue_.push_back(packet); - if (!was_readable) { - NotifyReadableForTest(); - } return true; } diff --git a/rtc_base/buffer_queue.h b/rtc_base/buffer_queue.h index 5cb18d0220..09c6c4f734 100644 --- a/rtc_base/buffer_queue.h +++ b/rtc_base/buffer_queue.h @@ -16,18 +16,19 @@ #include #include +#include "api/sequence_checker.h" #include "rtc_base/buffer.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" namespace rtc { -class BufferQueue { +class BufferQueue final { public: // Creates a buffer queue with a given capacity and default buffer size. BufferQueue(size_t capacity, size_t default_size); - virtual ~BufferQueue(); + ~BufferQueue(); // Return number of queued buffers. size_t size() const; @@ -44,17 +45,22 @@ class BufferQueue { // Returns true unless no data could be written. bool WriteBack(const void* data, size_t bytes, size_t* bytes_written); - protected: - // These methods are called when the state of the queue changes. - virtual void NotifyReadableForTest() {} - virtual void NotifyWritableForTest() {} + bool is_writable() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return queue_.size() < capacity_; + } + + bool is_readable() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return !queue_.empty(); + } private: - size_t capacity_; - size_t default_size_; - CriticalSection crit_; - std::deque queue_ RTC_GUARDED_BY(crit_); - std::vector free_list_ RTC_GUARDED_BY(crit_); + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker sequence_checker_; + const size_t capacity_; + const size_t default_size_; + std::deque queue_ RTC_GUARDED_BY(sequence_checker_); + std::vector free_list_ RTC_GUARDED_BY(sequence_checker_); RTC_DISALLOW_COPY_AND_ASSIGN(BufferQueue); }; diff --git a/rtc_base/buffer_unittest.cc b/rtc_base/buffer_unittest.cc index 3e7396dd2c..8beae43cf9 100644 --- a/rtc_base/buffer_unittest.cc +++ b/rtc_base/buffer_unittest.cc @@ -447,7 +447,7 @@ TEST(BufferTest, TestStruct) { EXPECT_EQ(kObsidian, buf[2].stone); } -TEST(BufferTest, DieOnUseAfterMove) { +TEST(BufferDeathTest, DieOnUseAfterMove) { Buffer buf(17); Buffer buf2 = std::move(buf); EXPECT_EQ(buf2.size(), 17u); diff --git a/rtc_base/callback.h b/rtc_base/callback.h deleted file mode 100644 index 47512214e3..0000000000 --- a/rtc_base/callback.h +++ /dev/null @@ -1,250 +0,0 @@ -// This file was GENERATED by command: -// pump.py callback.h.pump -// DO NOT EDIT BY HAND!!! - -/* - * Copyright 2012 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// To generate callback.h from callback.h.pump, execute: -// ../third_party/googletest/src/googletest/scripts/pump.py callback.h.pump - -// Callbacks are callable object containers. They can hold a function pointer -// or a function object and behave like a value type. Internally, data is -// reference-counted, making copies and pass-by-value inexpensive. -// -// Callbacks are typed using template arguments. The format is: -// CallbackN -// where N is the number of arguments supplied to the callable object. -// Callbacks are invoked using operator(), just like a function or a function -// object. Default-constructed callbacks are "empty," and executing an empty -// callback does nothing. A callback can be made empty by assigning it from -// a default-constructed callback. -// -// Callbacks are similar in purpose to std::function (which isn't available on -// all platforms we support) and a lightweight alternative to sigslots. Since -// they effectively hide the type of the object they call, they're useful in -// breaking dependencies between objects that need to interact with one another. -// Notably, they can hold the results of Bind(), std::bind*, etc, without -// needing -// to know the resulting object type of those calls. -// -// Sigslots, on the other hand, provide a fuller feature set, such as multiple -// subscriptions to a signal, optional thread-safety, and lifetime tracking of -// slots. When these features are needed, choose sigslots. -// -// Example: -// int sqr(int x) { return x * x; } -// struct AddK { -// int k; -// int operator()(int x) const { return x + k; } -// } add_k = {5}; -// -// Callback1 my_callback; -// cout << my_callback.empty() << endl; // true -// -// my_callback = Callback1(&sqr); -// cout << my_callback.empty() << endl; // false -// cout << my_callback(3) << endl; // 9 -// -// my_callback = Callback1(add_k); -// cout << my_callback(10) << endl; // 15 -// -// my_callback = Callback1(); -// cout << my_callback.empty() << endl; // true - -#ifndef RTC_BASE_CALLBACK_H_ -#define RTC_BASE_CALLBACK_H_ - -#include "api/scoped_refptr.h" -#include "rtc_base/ref_count.h" -#include "rtc_base/ref_counted_object.h" - -namespace rtc { - -template -class Callback0 { - public: - // Default copy operations are appropriate for this class. - Callback0() {} - template - Callback0(const T& functor) - : helper_(new RefCountedObject >(functor)) {} - R operator()() { - if (empty()) - return R(); - return helper_->Run(); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run() = 0; - }; - template - struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run() { return functor_(); } - T functor_; - }; - scoped_refptr helper_; -}; - -template -class Callback1 { - public: - // Default copy operations are appropriate for this class. - Callback1() {} - template - Callback1(const T& functor) - : helper_(new RefCountedObject >(functor)) {} - R operator()(P1 p1) { - if (empty()) - return R(); - return helper_->Run(p1); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run(P1 p1) = 0; - }; - template - struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run(P1 p1) { return functor_(p1); } - T functor_; - }; - scoped_refptr helper_; -}; - -template -class Callback2 { - public: - // Default copy operations are appropriate for this class. - Callback2() {} - template - Callback2(const T& functor) - : helper_(new RefCountedObject >(functor)) {} - R operator()(P1 p1, P2 p2) { - if (empty()) - return R(); - return helper_->Run(p1, p2); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run(P1 p1, P2 p2) = 0; - }; - template - struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run(P1 p1, P2 p2) { return functor_(p1, p2); } - T functor_; - }; - scoped_refptr helper_; -}; - -template -class Callback3 { - public: - // Default copy operations are appropriate for this class. - Callback3() {} - template - Callback3(const T& functor) - : helper_(new RefCountedObject >(functor)) {} - R operator()(P1 p1, P2 p2, P3 p3) { - if (empty()) - return R(); - return helper_->Run(p1, p2, p3); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run(P1 p1, P2 p2, P3 p3) = 0; - }; - template - struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run(P1 p1, P2 p2, P3 p3) { return functor_(p1, p2, p3); } - T functor_; - }; - scoped_refptr helper_; -}; - -template -class Callback4 { - public: - // Default copy operations are appropriate for this class. - Callback4() {} - template - Callback4(const T& functor) - : helper_(new RefCountedObject >(functor)) {} - R operator()(P1 p1, P2 p2, P3 p3, P4 p4) { - if (empty()) - return R(); - return helper_->Run(p1, p2, p3, p4); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4) = 0; - }; - template - struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4) { - return functor_(p1, p2, p3, p4); - } - T functor_; - }; - scoped_refptr helper_; -}; - -template -class Callback5 { - public: - // Default copy operations are appropriate for this class. - Callback5() {} - template - Callback5(const T& functor) - : helper_(new RefCountedObject >(functor)) {} - R operator()(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { - if (empty()) - return R(); - return helper_->Run(p1, p2, p3, p4, p5); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) = 0; - }; - template - struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { - return functor_(p1, p2, p3, p4, p5); - } - T functor_; - }; - scoped_refptr helper_; -}; -} // namespace rtc - -#endif // RTC_BASE_CALLBACK_H_ diff --git a/rtc_base/callback.h.pump b/rtc_base/callback.h.pump deleted file mode 100644 index dc5fb3ae1d..0000000000 --- a/rtc_base/callback.h.pump +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2012 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// To generate callback.h from callback.h.pump, execute: -// ../third_party/googletest/src/googletest/scripts/pump.py callback.h.pump - -// Callbacks are callable object containers. They can hold a function pointer -// or a function object and behave like a value type. Internally, data is -// reference-counted, making copies and pass-by-value inexpensive. -// -// Callbacks are typed using template arguments. The format is: -// CallbackN -// where N is the number of arguments supplied to the callable object. -// Callbacks are invoked using operator(), just like a function or a function -// object. Default-constructed callbacks are "empty," and executing an empty -// callback does nothing. A callback can be made empty by assigning it from -// a default-constructed callback. -// -// Callbacks are similar in purpose to std::function (which isn't available on -// all platforms we support) and a lightweight alternative to sigslots. Since -// they effectively hide the type of the object they call, they're useful in -// breaking dependencies between objects that need to interact with one another. -// Notably, they can hold the results of Bind(), std::bind*, etc, without needing -// to know the resulting object type of those calls. -// -// Sigslots, on the other hand, provide a fuller feature set, such as multiple -// subscriptions to a signal, optional thread-safety, and lifetime tracking of -// slots. When these features are needed, choose sigslots. -// -// Example: -// int sqr(int x) { return x * x; } -// struct AddK { -// int k; -// int operator()(int x) const { return x + k; } -// } add_k = {5}; -// -// Callback1 my_callback; -// cout << my_callback.empty() << endl; // true -// -// my_callback = Callback1(&sqr); -// cout << my_callback.empty() << endl; // false -// cout << my_callback(3) << endl; // 9 -// -// my_callback = Callback1(add_k); -// cout << my_callback(10) << endl; // 15 -// -// my_callback = Callback1(); -// cout << my_callback.empty() << endl; // true - -#ifndef RTC_BASE_CALLBACK_H_ -#define RTC_BASE_CALLBACK_H_ - -#include "rtc_base/ref_count.h" -#include "rtc_base/ref_counted_object.h" -#include "api/scoped_refptr.h" - -namespace rtc { - -$var n = 5 -$range i 0..n -$for i [[ -$range j 1..i - -template -class Callback$i { - public: - // Default copy operations are appropriate for this class. - Callback$i() {} - template Callback$i(const T& functor) - : helper_(new RefCountedObject< HelperImpl >(functor)) {} - R operator()($for j , [[P$j p$j]]) { - if (empty()) - return R(); - return helper_->Run($for j , [[p$j]]); - } - bool empty() const { return !helper_; } - - private: - struct Helper : RefCountInterface { - virtual ~Helper() {} - virtual R Run($for j , [[P$j p$j]]) = 0; - }; - template struct HelperImpl : Helper { - explicit HelperImpl(const T& functor) : functor_(functor) {} - virtual R Run($for j , [[P$j p$j]]) { - return functor_($for j , [[p$j]]); - } - T functor_; - }; - scoped_refptr helper_; -}; - -]] -} // namespace rtc - -#endif // RTC_BASE_CALLBACK_H_ diff --git a/rtc_base/callback_list.cc b/rtc_base/callback_list.cc new file mode 100644 index 0000000000..88d0b6fc71 --- /dev/null +++ b/rtc_base/callback_list.cc @@ -0,0 +1,107 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/callback_list.h" + +#include "rtc_base/checks.h" + +namespace webrtc { +namespace callback_list_impl { + +CallbackListReceivers::CallbackListReceivers() = default; + +CallbackListReceivers::~CallbackListReceivers() { + RTC_CHECK(!send_in_progress_); +} + +void CallbackListReceivers::RemoveReceivers(const void* removal_tag) { + RTC_CHECK(!send_in_progress_); + RTC_DCHECK(removal_tag != nullptr); + + // We divide the receivers_ vector into three regions: from right to left, the + // "keep" region, the "todo" region, and the "remove" region. The "todo" + // region initially covers the whole vector. + size_t first_todo = 0; // First element of the "todo" + // region. + size_t first_remove = receivers_.size(); // First element of the "remove" + // region. + + // Loop until the "todo" region is empty. + while (first_todo != first_remove) { + if (receivers_[first_todo].removal_tag != removal_tag) { + // The first element of the "todo" region should be kept. Move the + // "keep"/"todo" boundary. + ++first_todo; + } else if (receivers_[first_remove - 1].removal_tag == removal_tag) { + // The last element of the "todo" region should be removed. Move the + // "todo"/"remove" boundary. + --first_remove; + } else { + // The first element of the "todo" region should be removed, and the last + // element of the "todo" region should be kept. Swap them, and then shrink + // the "todo" region from both ends. + RTC_DCHECK_NE(first_todo, first_remove - 1); + using std::swap; + swap(receivers_[first_todo], receivers_[first_remove - 1]); + RTC_DCHECK_NE(receivers_[first_todo].removal_tag, removal_tag); + ++first_todo; + RTC_DCHECK_EQ(receivers_[first_remove - 1].removal_tag, removal_tag); + --first_remove; + } + } + + // Discard the remove region. + receivers_.resize(first_remove); +} + +void CallbackListReceivers::Foreach( + rtc::FunctionView fv) { + RTC_CHECK(!send_in_progress_); + send_in_progress_ = true; + for (auto& r : receivers_) { + fv(r.function); + } + send_in_progress_ = false; +} + +template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<1>); +template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<2>); +template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<3>); +template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<4>); +template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::NontrivialUntypedFunctionArgs); +template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::FunctionPointerUntypedFunctionArgs); + +template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<1>); +template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<2>); +template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<3>); +template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<4>); +template void CallbackListReceivers::AddReceiver( + UntypedFunction::NontrivialUntypedFunctionArgs); +template void CallbackListReceivers::AddReceiver( + UntypedFunction::FunctionPointerUntypedFunctionArgs); + +} // namespace callback_list_impl +} // namespace webrtc diff --git a/rtc_base/callback_list.h b/rtc_base/callback_list.h new file mode 100644 index 0000000000..18d48b02ee --- /dev/null +++ b/rtc_base/callback_list.h @@ -0,0 +1,214 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_CALLBACK_LIST_H_ +#define RTC_BASE_CALLBACK_LIST_H_ + +#include +#include + +#include "api/function_view.h" +#include "rtc_base/checks.h" +#include "rtc_base/system/assume.h" +#include "rtc_base/system/inline.h" +#include "rtc_base/untyped_function.h" + +namespace webrtc { +namespace callback_list_impl { + +class CallbackListReceivers { + public: + CallbackListReceivers(); + CallbackListReceivers(const CallbackListReceivers&) = delete; + CallbackListReceivers& operator=(const CallbackListReceivers&) = delete; + CallbackListReceivers(CallbackListReceivers&&) = delete; + CallbackListReceivers& operator=(CallbackListReceivers&&) = delete; + ~CallbackListReceivers(); + + template + RTC_NO_INLINE void AddReceiver(const void* removal_tag, + UntypedFunctionArgsT args) { + RTC_CHECK(!send_in_progress_); + RTC_DCHECK(removal_tag != nullptr); + receivers_.push_back({removal_tag, UntypedFunction::Create(args)}); + } + + template + RTC_NO_INLINE void AddReceiver(UntypedFunctionArgsT args) { + RTC_CHECK(!send_in_progress_); + receivers_.push_back({nullptr, UntypedFunction::Create(args)}); + } + + void RemoveReceivers(const void* removal_tag); + + void Foreach(rtc::FunctionView fv); + + private: + struct Callback { + const void* removal_tag; + UntypedFunction function; + }; + std::vector receivers_; + bool send_in_progress_ = false; +}; + +extern template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<1>); +extern template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<2>); +extern template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<3>); +extern template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::TrivialUntypedFunctionArgs<4>); +extern template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::NontrivialUntypedFunctionArgs); +extern template void CallbackListReceivers::AddReceiver( + const void*, + UntypedFunction::FunctionPointerUntypedFunctionArgs); + +extern template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<1>); +extern template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<2>); +extern template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<3>); +extern template void CallbackListReceivers::AddReceiver( + UntypedFunction::TrivialUntypedFunctionArgs<4>); +extern template void CallbackListReceivers::AddReceiver( + UntypedFunction::NontrivialUntypedFunctionArgs); +extern template void CallbackListReceivers::AddReceiver( + UntypedFunction::FunctionPointerUntypedFunctionArgs); + +} // namespace callback_list_impl + +// A collection of receivers (callable objects) that can be called all at once. +// Optimized for minimal binary size. The template arguments dictate what +// signature the callbacks must have; for example, a CallbackList +// will require callbacks with signature void(int, float). +// +// CallbackList is neither copyable nor movable (could easily be made movable if +// necessary). Callbacks must be movable, but need not be copyable. +// +// Usage example: +// +// // Declaration (usually a member variable). +// CallbackList foo_; +// +// // Register callbacks. This can be done zero or more times. The +// // callbacks must accept the arguments types listed in the CallbackList's +// // template argument list, and must return void. +// foo_.AddReceiver([...](int a, float b) {...}); // Lambda. +// foo_.AddReceiver(SomeFunction); // Function pointer. +// +// // Call the zero or more receivers, one after the other. +// foo_.Send(17, 3.14); +// +// Callback lifetime considerations +// -------------------------------- +// +// CallbackList::AddReceiver() takes ownership of the given callback by moving +// it in place. The callback can be any callable object; in particular, it may +// have a nontrivial destructor, which will be run when the CallbackList is +// destroyed. The callback may thus access data via any type of smart pointer, +// expressing e.g. unique, shared, or weak ownership. Of course, if the data is +// guaranteed to outlive the callback, a plain raw pointer can be used. +// +// Take care when trying to have the callback own reference-counted data. The +// CallbackList will keep the callback alive, and the callback will keep its +// data alive, so as usual with reference-counted ownership, keep an eye out for +// cycles! +// +// Thread safety +// ------------- +// +// Like most C++ types, CallbackList is thread compatible: it's not safe to +// access it concurrently from multiple threads, but it can be made safe if it +// is protected by a mutex, for example. +// +// Excercise some care when deciding what mutexes to hold when you call +// CallbackList::Send(). In particular, do not hold mutexes that callbacks may +// need to grab. If a larger object has a CallbackList member and a single mutex +// that protects all of its data members, this may e.g. make it necessary to +// protect its CallbackList with a separate mutex; otherwise, there will be a +// deadlock if the callbacks try to access the object. +// +// CallbackList as a class data member +// ----------------------------------- +// +// CallbackList is a normal C++ data type, and should be private when it is a +// data member of a class. For thread safety reasons (see above), it is likely +// best to not have an accessor for the entire CallbackList, and instead only +// allow callers to add callbacks: +// +// template +// void AddFooCallback(F&& callback) { +// // Maybe grab a mutex here? +// foo_callbacks_.AddReceiver(std::forward(callback)); +// } +// +template +class CallbackList { + public: + CallbackList() = default; + CallbackList(const CallbackList&) = delete; + CallbackList& operator=(const CallbackList&) = delete; + CallbackList(CallbackList&&) = delete; + CallbackList& operator=(CallbackList&&) = delete; + + // Adds a new receiver. The receiver (a callable object or a function pointer) + // must be movable, but need not be copyable. Its call signature should be + // `void(ArgT...)`. The removal tag is a pointer to an arbitrary object that + // you own, and that will stay alive until the CallbackList is gone, or until + // all receivers using it as a removal tag have been removed; you can use it + // to remove the receiver. + template + void AddReceiver(const void* removal_tag, F&& f) { + receivers_.AddReceiver( + removal_tag, + UntypedFunction::PrepareArgs(std::forward(f))); + } + + // Adds a new receiver with no removal tag. + template + void AddReceiver(F&& f) { + receivers_.AddReceiver( + UntypedFunction::PrepareArgs(std::forward(f))); + } + + // Removes all receivers that were added with the given removal tag. + void RemoveReceivers(const void* removal_tag) { + receivers_.RemoveReceivers(removal_tag); + } + + // Calls all receivers with the given arguments. While the Send is in + // progress, no method calls are allowed; specifically, this means that the + // callbacks may not do anything with this CallbackList instance. + // + // Note: Receivers are called serially, but not necessarily in the same order + // they were added. + template + void Send(ArgU&&... args) { + receivers_.Foreach([&](UntypedFunction& f) { + f.Call(std::forward(args)...); + }); + } + + private: + callback_list_impl::CallbackListReceivers receivers_; +}; + +} // namespace webrtc + +#endif // RTC_BASE_CALLBACK_LIST_H_ diff --git a/rtc_base/callback_list_unittest.cc b/rtc_base/callback_list_unittest.cc new file mode 100644 index 0000000000..665d779739 --- /dev/null +++ b/rtc_base/callback_list_unittest.cc @@ -0,0 +1,256 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include +#include + +#include "api/function_view.h" +#include "rtc_base/callback_list.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +TEST(CallbackList, NoRecieverSingleMessageTest) { + CallbackList c; + + c.Send("message"); +} + +TEST(CallbackList, MultipleParameterMessageTest) { + CallbackList + c; + std::string str = "messege"; + int i = 10; + + c.Send(str, "message1", "message0", 123, &i, str); +} + +TEST(CallbackList, NoParameterMessageTest) { + CallbackList<> c; + + c.Send(); +} + +TEST(CallbackList, ReferenceTest) { + CallbackList c; + int index = 1; + + c.AddReceiver([](int& index) { index++; }); + c.Send(index); + + EXPECT_EQ(index, 2); +} + +enum State { + kNew, + kChecking, +}; + +TEST(CallbackList, SingleEnumValueTest) { + CallbackList c; + State s1 = kNew; + int index = 0; + + c.AddReceiver([&index](State s) { index++; }); + c.Send(s1); + + EXPECT_EQ(index, 1); +} + +TEST(CallbackList, SingleEnumReferenceTest) { + CallbackList c; + State s = kNew; + + c.AddReceiver([](State& s) { s = kChecking; }); + c.Send(s); + + EXPECT_EQ(s, kChecking); +} + +TEST(CallbackList, ConstReferenceTest) { + CallbackList c; + int i = 0; + int index = 1; + + c.AddReceiver([&i](const int& index) { i = index; }); + c.Send(index); + + EXPECT_EQ(i, 1); +} + +TEST(CallbackList, PointerTest) { + CallbackList c; + int index = 1; + + c.AddReceiver([](int* index) { (*index)++; }); + c.Send(&index); + + EXPECT_EQ(index, 2); +} + +TEST(CallbackList, CallByValue) { + CallbackList c; + int x = 17; + + c.AddReceiver([&x](int n) { x += n; }); + int y = 89; + c.Send(y); + + EXPECT_EQ(x, 106); +} + +void PlusOne(int& a) { + a++; +} + +TEST(CallbackList, FunctionPtrTest) { + CallbackList c; + int index = 1; + + c.AddReceiver(PlusOne); + c.Send(index); + + EXPECT_EQ(index, 2); +} + +struct LargeNonTrivial { + int a[17]; + + LargeNonTrivial() = default; + LargeNonTrivial(LargeNonTrivial&& m) {} + ~LargeNonTrivial() = default; + + void operator()(int& a) { a = 1; } +}; + +TEST(CallbackList, LargeNonTrivialTest) { + CallbackList c; + int i = 0; + static_assert(sizeof(LargeNonTrivial) > UntypedFunction::kInlineStorageSize, + ""); + c.AddReceiver(LargeNonTrivial()); + c.Send(i); + + EXPECT_EQ(i, 1); +} + +struct LargeTrivial { + int a[17]; + void operator()(int& x) { x = 1; } +}; + +TEST(CallbackList, LargeTrivial) { + CallbackList c; + LargeTrivial lt; + int i = 0; + + static_assert(sizeof(lt) > UntypedFunction::kInlineStorageSize, ""); + c.AddReceiver(lt); + c.Send(i); + + EXPECT_EQ(i, 1); +} + +struct OnlyNonTriviallyConstructible { + OnlyNonTriviallyConstructible() = default; + OnlyNonTriviallyConstructible(OnlyNonTriviallyConstructible&& m) {} + + void operator()(int& a) { a = 1; } +}; + +TEST(CallbackList, OnlyNonTriviallyMoveConstructible) { + CallbackList c; + int i = 0; + + c.AddReceiver(OnlyNonTriviallyConstructible()); + c.Send(i); + + EXPECT_EQ(i, 1); +} + +TEST(CallbackList, MultipleReceiverSendTest) { + CallbackList c; + std::function plus = PlusOne; + int index = 1; + + c.AddReceiver(plus); + c.AddReceiver([](int& i) { i--; }); + c.AddReceiver(plus); + c.AddReceiver(plus); + c.Send(index); + c.Send(index); + + EXPECT_EQ(index, 5); +} + +class A { + public: + void increment(int& i) const { i++; } +}; + +TEST(CallbackList, MemberFunctionTest) { + CallbackList c; + A a; + int index = 1; + + c.AddReceiver([&a](int& i) { a.increment(i); }); + c.Send(index); + + EXPECT_EQ(index, 2); +} + +// todo(glahiru): Add a test case to catch some error for Karl's first fix + +TEST(CallbackList, RemoveOneReceiver) { + int removal_tag[2]; + CallbackList<> c; + int accumulator = 0; + c.AddReceiver([&accumulator] { accumulator += 1; }); + c.AddReceiver(&removal_tag[0], [&accumulator] { accumulator += 10; }); + c.AddReceiver(&removal_tag[1], [&accumulator] { accumulator += 100; }); + c.Send(); + EXPECT_EQ(accumulator, 111); + c.RemoveReceivers(&removal_tag[0]); + c.Send(); + EXPECT_EQ(accumulator, 212); +} + +TEST(CallbackList, RemoveZeroReceivers) { + int removal_tag[3]; + CallbackList<> c; + int accumulator = 0; + c.AddReceiver([&accumulator] { accumulator += 1; }); + c.AddReceiver(&removal_tag[0], [&accumulator] { accumulator += 10; }); + c.AddReceiver(&removal_tag[1], [&accumulator] { accumulator += 100; }); + c.Send(); + EXPECT_EQ(accumulator, 111); + c.RemoveReceivers(&removal_tag[2]); + c.Send(); + EXPECT_EQ(accumulator, 222); +} + +TEST(CallbackList, RemoveManyReceivers) { + int removal_tag; + CallbackList<> c; + int accumulator = 0; + c.AddReceiver([&accumulator] { accumulator += 1; }); + c.AddReceiver(&removal_tag, [&accumulator] { accumulator += 10; }); + c.AddReceiver([&accumulator] { accumulator += 100; }); + c.AddReceiver(&removal_tag, [&accumulator] { accumulator += 1000; }); + c.Send(); + EXPECT_EQ(accumulator, 1111); + c.RemoveReceivers(&removal_tag); + c.Send(); + EXPECT_EQ(accumulator, 1212); +} + +} // namespace +} // namespace webrtc diff --git a/rtc_base/callback_unittest.cc b/rtc_base/callback_unittest.cc deleted file mode 100644 index 876729570c..0000000000 --- a/rtc_base/callback_unittest.cc +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2004 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/callback.h" - -#include "rtc_base/bind.h" -#include "rtc_base/keep_ref_until_done.h" -#include "rtc_base/ref_count.h" -#include "test/gtest.h" - -namespace rtc { - -namespace { - -void f() {} -int g() { - return 42; -} -int h(int x) { - return x * x; -} -void i(int& x) { - x *= x; -} // NOLINT: Testing refs - -struct BindTester { - int a() { return 24; } - int b(int x) const { return x * x; } -}; - -class RefCountedBindTester : public RefCountInterface { - public: - RefCountedBindTester() : count_(0) {} - void AddRef() const override { ++count_; } - RefCountReleaseStatus Release() const override { - --count_; - return count_ == 0 ? RefCountReleaseStatus::kDroppedLastRef - : RefCountReleaseStatus::kOtherRefsRemained; - } - int RefCount() const { return count_; } - - private: - mutable int count_; -}; - -} // namespace - -TEST(CallbackTest, VoidReturn) { - Callback0 cb; - EXPECT_TRUE(cb.empty()); - cb(); // Executing an empty callback should not crash. - cb = Callback0(&f); - EXPECT_FALSE(cb.empty()); - cb(); -} - -TEST(CallbackTest, IntReturn) { - Callback0 cb; - EXPECT_TRUE(cb.empty()); - cb = Callback0(&g); - EXPECT_FALSE(cb.empty()); - EXPECT_EQ(42, cb()); - EXPECT_EQ(42, cb()); -} - -TEST(CallbackTest, OneParam) { - Callback1 cb1(&h); - EXPECT_FALSE(cb1.empty()); - EXPECT_EQ(9, cb1(-3)); - EXPECT_EQ(100, cb1(10)); - - // Try clearing a callback. - cb1 = Callback1(); - EXPECT_TRUE(cb1.empty()); - - // Try a callback with a ref parameter. - Callback1 cb2(&i); - int x = 3; - cb2(x); - EXPECT_EQ(9, x); - cb2(x); - EXPECT_EQ(81, x); -} - -TEST(CallbackTest, WithBind) { - BindTester t; - Callback0 cb1 = Bind(&BindTester::a, &t); - EXPECT_EQ(24, cb1()); - EXPECT_EQ(24, cb1()); - cb1 = Bind(&BindTester::b, &t, 10); - EXPECT_EQ(100, cb1()); - EXPECT_EQ(100, cb1()); - cb1 = Bind(&BindTester::b, &t, 5); - EXPECT_EQ(25, cb1()); - EXPECT_EQ(25, cb1()); -} - -TEST(KeepRefUntilDoneTest, simple) { - RefCountedBindTester t; - EXPECT_EQ(0, t.RefCount()); - { - Callback0 cb = KeepRefUntilDone(&t); - EXPECT_EQ(1, t.RefCount()); - cb(); - EXPECT_EQ(1, t.RefCount()); - cb(); - EXPECT_EQ(1, t.RefCount()); - } - EXPECT_EQ(0, t.RefCount()); -} - -TEST(KeepRefUntilDoneTest, copy) { - RefCountedBindTester t; - EXPECT_EQ(0, t.RefCount()); - Callback0 cb2; - { - Callback0 cb = KeepRefUntilDone(&t); - EXPECT_EQ(1, t.RefCount()); - cb2 = cb; - } - EXPECT_EQ(1, t.RefCount()); - cb2 = Callback0(); - EXPECT_EQ(0, t.RefCount()); -} - -TEST(KeepRefUntilDoneTest, scopedref) { - RefCountedBindTester t; - EXPECT_EQ(0, t.RefCount()); - { - scoped_refptr t_scoped_ref(&t); - Callback0 cb = KeepRefUntilDone(t_scoped_ref); - t_scoped_ref = nullptr; - EXPECT_EQ(1, t.RefCount()); - cb(); - EXPECT_EQ(1, t.RefCount()); - } - EXPECT_EQ(0, t.RefCount()); -} - -} // namespace rtc diff --git a/rtc_base/checks.cc b/rtc_base/checks.cc index e5fc2ed416..239ea9f0da 100644 --- a/rtc_base/checks.cc +++ b/rtc_base/checks.cc @@ -36,6 +36,21 @@ #include "rtc_base/checks.h" namespace { + +RTC_NORETURN void WriteFatalLogAndAbort(const std::string& output) { + const char* output_c = output.c_str(); +#if defined(WEBRTC_ANDROID) + __android_log_print(ANDROID_LOG_ERROR, RTC_LOG_TAG_ANDROID, "%s\n", output_c); +#endif + fflush(stdout); + fprintf(stderr, "%s", output_c); + fflush(stderr); +#if defined(WEBRTC_WIN) + DebugBreak(); +#endif + abort(); +} + #if defined(__GNUC__) __attribute__((__format__(__printf__, 2, 3))) #endif @@ -149,19 +164,7 @@ RTC_NORETURN void FatalLog(const char* file, va_end(args); - const char* output = s.c_str(); - -#if defined(WEBRTC_ANDROID) - __android_log_print(ANDROID_LOG_ERROR, RTC_LOG_TAG_ANDROID, "%s\n", output); -#endif - - fflush(stdout); - fprintf(stderr, "%s", output); - fflush(stderr); -#if defined(WEBRTC_WIN) - DebugBreak(); -#endif - abort(); + WriteFatalLogAndAbort(s); } #else // RTC_CHECK_MSG_ENABLED RTC_NORETURN void FatalLog(const char* file, int line) { @@ -174,21 +177,39 @@ RTC_NORETURN void FatalLog(const char* file, int line) { "# Check failed.\n" "# ", file, line, LAST_SYSTEM_ERROR); - const char* output = s.c_str(); + WriteFatalLogAndAbort(s); +} +#endif // RTC_CHECK_MSG_ENABLED -#if defined(WEBRTC_ANDROID) - __android_log_print(ANDROID_LOG_ERROR, RTC_LOG_TAG_ANDROID, "%s\n", output); -#endif +#if RTC_DCHECK_IS_ON - fflush(stdout); - fprintf(stderr, "%s", output); - fflush(stderr); -#if defined(WEBRTC_WIN) - DebugBreak(); -#endif - abort(); +RTC_NORETURN void UnreachableCodeReached(const char* file, int line) { + std::string s; + AppendFormat(&s, + "\n\n" + "#\n" + "# Unreachable code reached: %s, line %d\n" + "# last system error: %u\n" + "# ", + file, line, LAST_SYSTEM_ERROR); + WriteFatalLogAndAbort(s); } -#endif // RTC_CHECK_MSG_ENABLED + +#else // !RTC_DCHECK_IS_ON + +RTC_NORETURN void UnreachableCodeReached() { + std::string s; + AppendFormat(&s, + "\n\n" + "#\n" + "# Unreachable code reached (file and line unknown)\n" + "# last system error: %u\n" + "# ", + LAST_SYSTEM_ERROR); + WriteFatalLogAndAbort(s); +} + +#endif // !RTC_DCHECK_IS_ON } // namespace webrtc_checks_impl } // namespace rtc diff --git a/rtc_base/checks.h b/rtc_base/checks.h index 2fde3f6640..21fca7e40f 100644 --- a/rtc_base/checks.h +++ b/rtc_base/checks.h @@ -69,7 +69,7 @@ RTC_NORETURN void rtc_FatalMessage(const char* file, int line, const char* msg); // the reason that it's better to terminate might simply be that the error // handling code isn't in place yet; in production, the reason might be that // the author of the code truly believes that x will always be true, but that -// she recognizes that if she is wrong, abrupt and unpleasant process +// they recognizes that if they are wrong, abrupt and unpleasant process // termination is still better than carrying on with the assumption violated. // // RTC_CHECK always evaluates its argument, so it's OK for x to have side @@ -95,7 +95,7 @@ RTC_NORETURN void rtc_FatalMessage(const char* file, int line, const char* msg); // messages if the condition doesn't hold. Prefer them to raw RTC_CHECK and // RTC_DCHECK. // -// - FATAL() aborts unconditionally. +// - RTC_FATAL() aborts unconditionally. namespace rtc { namespace webrtc_checks_impl { @@ -338,6 +338,22 @@ class FatalLogCall final { const char* message_; }; +#if RTC_DCHECK_IS_ON + +// Be helpful, and include file and line in the RTC_CHECK_NOTREACHED error +// message. +#define RTC_UNREACHABLE_FILE_AND_LINE_CALL_ARGS __FILE__, __LINE__ +RTC_NORETURN RTC_EXPORT void UnreachableCodeReached(const char* file, int line); + +#else + +// Be mindful of binary size, and don't include file and line in the +// RTC_CHECK_NOTREACHED error message. +#define RTC_UNREACHABLE_FILE_AND_LINE_CALL_ARGS +RTC_NORETURN RTC_EXPORT void UnreachableCodeReached(); + +#endif + } // namespace webrtc_checks_impl // The actual stream used isn't important. We reference |ignored| in the code @@ -430,8 +446,15 @@ class FatalLogCall final { #define RTC_UNREACHABLE_CODE_HIT false #define RTC_NOTREACHED() RTC_DCHECK(RTC_UNREACHABLE_CODE_HIT) -// TODO(bugs.webrtc.org/8454): Add an RTC_ prefix or rename differently. -#define FATAL() \ +// Kills the process with an error message. Never returns. Use when you wish to +// assert that a point in the code is never reached. +#define RTC_CHECK_NOTREACHED() \ + do { \ + ::rtc::webrtc_checks_impl::UnreachableCodeReached( \ + RTC_UNREACHABLE_FILE_AND_LINE_CALL_ARGS); \ + } while (0) + +#define RTC_FATAL() \ ::rtc::webrtc_checks_impl::FatalLogCall(__FILE__, __LINE__, \ "FATAL()") & \ ::rtc::webrtc_checks_impl::LogStreamer<>() diff --git a/rtc_base/checks_unittest.cc b/rtc_base/checks_unittest.cc index e6e094e597..95deba9f1c 100644 --- a/rtc_base/checks_unittest.cc +++ b/rtc_base/checks_unittest.cc @@ -19,9 +19,9 @@ TEST(ChecksTest, ExpressionNotEvaluatedWhenCheckPassing) { } #if GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST(ChecksTest, Checks) { +TEST(ChecksDeathTest, Checks) { #if RTC_CHECK_MSG_ENABLED - EXPECT_DEATH(FATAL() << "message", + EXPECT_DEATH(RTC_FATAL() << "message", "\n\n#\n" "# Fatal error in: \\S+, line \\w+\n" "# last system error: \\w+\n" @@ -45,7 +45,7 @@ TEST(ChecksTest, Checks) { "# Check failed: false\n" "# Hi there!"); #else - EXPECT_DEATH(FATAL() << "message", + EXPECT_DEATH(RTC_FATAL() << "message", "\n\n#\n" "# Fatal error in: \\S+, line \\w+\n" "# last system error: \\w+\n" diff --git a/rtc_base/constructor_magic.h b/rtc_base/constructor_magic.h index e63c2ff628..8d12a7b135 100644 --- a/rtc_base/constructor_magic.h +++ b/rtc_base/constructor_magic.h @@ -11,24 +11,10 @@ #ifndef RTC_BASE_CONSTRUCTOR_MAGIC_H_ #define RTC_BASE_CONSTRUCTOR_MAGIC_H_ -// Put this in the declarations for a class to be unassignable. -#define RTC_DISALLOW_ASSIGN(TypeName) \ - TypeName& operator=(const TypeName&) = delete - // A macro to disallow the copy constructor and operator= functions. This should // be used in the declarations for a class. #define RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) \ TypeName(const TypeName&) = delete; \ - RTC_DISALLOW_ASSIGN(TypeName) - -// A macro to disallow all the implicit constructors, namely the default -// constructor, copy constructor and operator= functions. -// -// This should be used in the declarations for a class that wants to prevent -// anyone from instantiating it. This is especially useful for classes -// containing only static methods. -#define RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \ - TypeName() = delete; \ - RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) + TypeName& operator=(const TypeName&) = delete #endif // RTC_BASE_CONSTRUCTOR_MAGIC_H_ diff --git a/rtc_base/containers/BUILD.gn b/rtc_base/containers/BUILD.gn new file mode 100644 index 0000000000..f303e706e4 --- /dev/null +++ b/rtc_base/containers/BUILD.gn @@ -0,0 +1,59 @@ +# Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../webrtc.gni") + +rtc_library("flat_containers_internal") { + sources = [ + "as_const.h", + "flat_tree.cc", + "flat_tree.h", + "identity.h", + "invoke.h", + "move_only_int.h", + "not_fn.h", + "void_t.h", + ] + deps = [ + "..:checks", + "../system:no_unique_address", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] + visibility = [ ":*" ] +} + +rtc_source_set("flat_set") { + sources = [ "flat_set.h" ] + deps = [ ":flat_containers_internal" ] +} + +rtc_source_set("flat_map") { + sources = [ "flat_map.h" ] + deps = [ + ":flat_containers_internal", + "..:checks", + ] +} + +rtc_library("unittests") { + testonly = true + sources = [ + "flat_map_unittest.cc", + "flat_set_unittest.cc", + "flat_tree_unittest.cc", + ] + deps = [ + ":flat_containers_internal", + ":flat_map", + ":flat_set", + "../../test:test_support", + "//testing/gmock:gmock", + "//testing/gtest:gtest", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container" ] +} diff --git a/rtc_base/containers/as_const.h b/rtc_base/containers/as_const.h new file mode 100644 index 0000000000..a41b3bc378 --- /dev/null +++ b/rtc_base/containers/as_const.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_AS_CONST_H_ +#define RTC_BASE_CONTAINERS_AS_CONST_H_ + +#include + +namespace webrtc { + +// C++14 implementation of C++17's std::as_const(): +// https://en.cppreference.com/w/cpp/utility/as_const +template +constexpr std::add_const_t& as_const(T& t) noexcept { + return t; +} + +template +void as_const(const T&& t) = delete; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_AS_CONST_H_ diff --git a/rtc_base/containers/flat_map.h b/rtc_base/containers/flat_map.h new file mode 100644 index 0000000000..1dfae51655 --- /dev/null +++ b/rtc_base/containers/flat_map.h @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_FLAT_MAP_H_ +#define RTC_BASE_CONTAINERS_FLAT_MAP_H_ + +#include +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/containers/flat_tree.h" + +namespace webrtc { + +namespace flat_containers_internal { + +// An implementation of the flat_tree GetKeyFromValue template parameter that +// extracts the key as the first element of a pair. +struct GetFirst { + template + constexpr const Key& operator()(const std::pair& p) const { + return p.first; + } +}; + +} // namespace flat_containers_internal + +// flat_map is a container with a std::map-like interface that stores its +// contents in a sorted container, by default a vector. +// +// Its implementation mostly tracks the corresponding standardization proposal +// https://wg21.link/P0429, except that the storage of keys and values is not +// split. +// +// PROS +// +// - Good memory locality. +// - Low overhead, especially for smaller maps. +// - Performance is good for more workloads than you might expect (see +// //base/containers/README.md in Chromium repository) +// - Supports C++14 map interface. +// +// CONS +// +// - Inserts and removals are O(n). +// +// IMPORTANT NOTES +// +// - Iterators are invalidated across mutations. This means that the following +// line of code has undefined behavior since adding a new element could +// resize the container, invalidating all iterators: +// container["new element"] = it.second; +// - If possible, construct a flat_map in one operation by inserting into +// a container and moving that container into the flat_map constructor. +// +// QUICK REFERENCE +// +// Most of the core functionality is inherited from flat_tree. Please see +// flat_tree.h for more details for most of these functions. As a quick +// reference, the functions available are: +// +// Constructors (inputs need not be sorted): +// flat_map(const flat_map&); +// flat_map(flat_map&&); +// flat_map(InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_map(const container_type& items, +// const Compare& compare = Compare()); +// flat_map(container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_map(std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Constructors (inputs need to be sorted): +// flat_map(sorted_unique_t, +// InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_map(sorted_unique_t, +// const container_type& items, +// const Compare& compare = Compare()); +// flat_map(sorted_unique_t, +// container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_map(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Assignment functions: +// flat_map& operator=(const flat_map&); +// flat_map& operator=(flat_map&&); +// flat_map& operator=(initializer_list); +// +// Memory management functions: +// void reserve(size_t); +// size_t capacity() const; +// void shrink_to_fit(); +// +// Size management functions: +// void clear(); +// size_t size() const; +// size_t max_size() const; +// bool empty() const; +// +// Iterator functions: +// iterator begin(); +// const_iterator begin() const; +// const_iterator cbegin() const; +// iterator end(); +// const_iterator end() const; +// const_iterator cend() const; +// reverse_iterator rbegin(); +// const reverse_iterator rbegin() const; +// const_reverse_iterator crbegin() const; +// reverse_iterator rend(); +// const_reverse_iterator rend() const; +// const_reverse_iterator crend() const; +// +// Insert and accessor functions: +// mapped_type& operator[](const key_type&); +// mapped_type& operator[](key_type&&); +// mapped_type& at(const K&); +// const mapped_type& at(const K&) const; +// pair insert(const value_type&); +// pair insert(value_type&&); +// iterator insert(const_iterator hint, const value_type&); +// iterator insert(const_iterator hint, value_type&&); +// void insert(InputIterator first, InputIterator last); +// pair insert_or_assign(K&&, M&&); +// iterator insert_or_assign(const_iterator hint, K&&, M&&); +// pair emplace(Args&&...); +// iterator emplace_hint(const_iterator, Args&&...); +// pair try_emplace(K&&, Args&&...); +// iterator try_emplace(const_iterator hint, K&&, Args&&...); + +// Underlying type functions: +// container_type extract() &&; +// void replace(container_type&&); +// +// Erase functions: +// iterator erase(iterator); +// iterator erase(const_iterator); +// iterator erase(const_iterator first, const_iterator& last); +// template size_t erase(const K& key); +// +// Comparators (see std::map documentation). +// key_compare key_comp() const; +// value_compare value_comp() const; +// +// Search functions: +// template size_t count(const K&) const; +// template iterator find(const K&); +// template const_iterator find(const K&) const; +// template bool contains(const K&) const; +// template pair equal_range(const K&); +// template iterator lower_bound(const K&); +// template const_iterator lower_bound(const K&) const; +// template iterator upper_bound(const K&); +// template const_iterator upper_bound(const K&) const; +// +// General functions: +// void swap(flat_map&); +// +// Non-member operators: +// bool operator==(const flat_map&, const flat_map); +// bool operator!=(const flat_map&, const flat_map); +// bool operator<(const flat_map&, const flat_map); +// bool operator>(const flat_map&, const flat_map); +// bool operator>=(const flat_map&, const flat_map); +// bool operator<=(const flat_map&, const flat_map); +// +template , + class Container = std::vector>> +class flat_map : public ::webrtc::flat_containers_internal::flat_tree< + Key, + flat_containers_internal::GetFirst, + Compare, + Container> { + private: + using tree = typename ::webrtc::flat_containers_internal:: + flat_tree; + + public: + using key_type = typename tree::key_type; + using mapped_type = Mapped; + using value_type = typename tree::value_type; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using iterator = typename tree::iterator; + using const_iterator = typename tree::const_iterator; + using reverse_iterator = typename tree::reverse_iterator; + using const_reverse_iterator = typename tree::const_reverse_iterator; + using container_type = typename tree::container_type; + + // -------------------------------------------------------------------------- + // Lifetime and assignments. + // + // Note: we explicitly bring operator= in because otherwise + // flat_map<...> x; + // x = {...}; + // Would first create a flat_map and then move assign it. This most likely + // would be optimized away but still affects our debug builds. + + using tree::tree; + using tree::operator=; + + // Out-of-bound calls to at() will CHECK. + template + mapped_type& at(const K& key); + template + const mapped_type& at(const K& key) const; + + // -------------------------------------------------------------------------- + // Map-specific insert operations. + // + // Normal insert() functions are inherited from flat_tree. + // + // Assume that every operation invalidates iterators and references. + // Insertion of one element can take O(size). + + mapped_type& operator[](const key_type& key); + mapped_type& operator[](key_type&& key); + + template + std::pair insert_or_assign(K&& key, M&& obj); + template + iterator insert_or_assign(const_iterator hint, K&& key, M&& obj); + + template + std::enable_if_t::value, + std::pair> + try_emplace(K&& key, Args&&... args); + + template + std::enable_if_t::value, iterator> + try_emplace(const_iterator hint, K&& key, Args&&... args); + + // -------------------------------------------------------------------------- + // General operations. + // + // Assume that swap invalidates iterators and references. + + void swap(flat_map& other) noexcept; + + friend void swap(flat_map& lhs, flat_map& rhs) noexcept { lhs.swap(rhs); } +}; + +// ---------------------------------------------------------------------------- +// Lookups. + +template +template +auto flat_map::at(const K& key) + -> mapped_type& { + iterator found = tree::find(key); + RTC_CHECK(found != tree::end()); + return found->second; +} + +template +template +auto flat_map::at(const K& key) const + -> const mapped_type& { + const_iterator found = tree::find(key); + RTC_CHECK(found != tree::cend()); + return found->second; +} + +// ---------------------------------------------------------------------------- +// Insert operations. + +template +auto flat_map::operator[](const key_type& key) + -> mapped_type& { + iterator found = tree::lower_bound(key); + if (found == tree::end() || tree::key_comp()(key, found->first)) + found = tree::unsafe_emplace(found, key, mapped_type()); + return found->second; +} + +template +auto flat_map::operator[](key_type&& key) + -> mapped_type& { + iterator found = tree::lower_bound(key); + if (found == tree::end() || tree::key_comp()(key, found->first)) + found = tree::unsafe_emplace(found, std::move(key), mapped_type()); + return found->second; +} + +template +template +auto flat_map::insert_or_assign(K&& key, + M&& obj) + -> std::pair { + auto result = + tree::emplace_key_args(key, std::forward(key), std::forward(obj)); + if (!result.second) + result.first->second = std::forward(obj); + return result; +} + +template +template +auto flat_map::insert_or_assign( + const_iterator hint, + K&& key, + M&& obj) -> iterator { + auto result = tree::emplace_hint_key_args(hint, key, std::forward(key), + std::forward(obj)); + if (!result.second) + result.first->second = std::forward(obj); + return result.first; +} + +template +template +auto flat_map::try_emplace(K&& key, + Args&&... args) + -> std::enable_if_t::value, + std::pair> { + return tree::emplace_key_args( + key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); +} + +template +template +auto flat_map::try_emplace(const_iterator hint, + K&& key, + Args&&... args) + -> std::enable_if_t::value, iterator> { + return tree::emplace_hint_key_args( + hint, key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)) + .first; +} + +// ---------------------------------------------------------------------------- +// General operations. + +template +void flat_map::swap(flat_map& other) noexcept { + tree::swap(other); +} + +// Erases all elements that match predicate. It has O(size) complexity. +// +// flat_map last_times; +// ... +// EraseIf(last_times, +// [&](const auto& element) { return now - element.second > kLimit; }); + +// NOLINTNEXTLINE(misc-unused-using-decls) +using ::webrtc::flat_containers_internal::EraseIf; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_FLAT_MAP_H_ diff --git a/rtc_base/containers/flat_map_unittest.cc b/rtc_base/containers/flat_map_unittest.cc new file mode 100644 index 0000000000..8f0b77fc30 --- /dev/null +++ b/rtc_base/containers/flat_map_unittest.cc @@ -0,0 +1,454 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_map.h" + +#include +#include +#include + +#include "rtc_base/containers/move_only_int.h" +#include "test/gmock.h" +#include "test/gtest.h" + +// A flat_map is basically a interface to flat_tree. So several basic +// operations are tested to make sure things are set up properly, but the bulk +// of the tests are in flat_tree_unittests.cc. + +using ::testing::ElementsAre; + +namespace webrtc { + +namespace { + +struct Unsortable { + int value; +}; + +bool operator==(const Unsortable& lhs, const Unsortable& rhs) { + return lhs.value == rhs.value; +} + +bool operator<(const Unsortable& lhs, const Unsortable& rhs) = delete; +bool operator<=(const Unsortable& lhs, const Unsortable& rhs) = delete; +bool operator>(const Unsortable& lhs, const Unsortable& rhs) = delete; +bool operator>=(const Unsortable& lhs, const Unsortable& rhs) = delete; + +TEST(FlatMap, IncompleteType) { + struct A { + using Map = flat_map; + int data; + Map set_with_incomplete_type; + Map::iterator it; + Map::const_iterator cit; + + // We do not declare operator< because clang complains that it's unused. + }; + + A a; +} + +TEST(FlatMap, RangeConstructor) { + flat_map::value_type input_vals[] = { + {1, 1}, {1, 2}, {1, 3}, {2, 1}, {2, 2}, {2, 3}, {3, 1}, {3, 2}, {3, 3}}; + + flat_map first(std::begin(input_vals), std::end(input_vals)); + EXPECT_THAT(first, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 1), + std::make_pair(3, 1))); +} + +TEST(FlatMap, MoveConstructor) { + using pair = std::pair; + + flat_map original; + original.insert(pair(MoveOnlyInt(1), MoveOnlyInt(1))); + original.insert(pair(MoveOnlyInt(2), MoveOnlyInt(2))); + original.insert(pair(MoveOnlyInt(3), MoveOnlyInt(3))); + original.insert(pair(MoveOnlyInt(4), MoveOnlyInt(4))); + + flat_map moved(std::move(original)); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +TEST(FlatMap, VectorConstructor) { + using IntPair = std::pair; + using IntMap = flat_map; + std::vector vect{{1, 1}, {1, 2}, {2, 1}}; + IntMap map(std::move(vect)); + EXPECT_THAT(map, ElementsAre(IntPair(1, 1), IntPair(2, 1))); +} + +TEST(FlatMap, InitializerListConstructor) { + flat_map cont( + {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {1, 2}, {10, 10}, {8, 8}}); + EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2), + std::make_pair(3, 3), std::make_pair(4, 4), + std::make_pair(5, 5), std::make_pair(8, 8), + std::make_pair(10, 10))); +} + +TEST(FlatMap, SortedRangeConstructor) { + using PairType = std::pair; + using MapType = flat_map; + MapType::value_type input_vals[] = {{1, {1}}, {2, {1}}, {3, {1}}}; + MapType map(sorted_unique, std::begin(input_vals), std::end(input_vals)); + EXPECT_THAT( + map, ElementsAre(PairType(1, {1}), PairType(2, {1}), PairType(3, {1}))); +} + +TEST(FlatMap, SortedCopyFromVectorConstructor) { + using PairType = std::pair; + using MapType = flat_map; + std::vector vect{{1, {1}}, {2, {1}}}; + MapType map(sorted_unique, vect); + EXPECT_THAT(map, ElementsAre(PairType(1, {1}), PairType(2, {1}))); +} + +TEST(FlatMap, SortedMoveFromVectorConstructor) { + using PairType = std::pair; + using MapType = flat_map; + std::vector vect{{1, {1}}, {2, {1}}}; + MapType map(sorted_unique, std::move(vect)); + EXPECT_THAT(map, ElementsAre(PairType(1, {1}), PairType(2, {1}))); +} + +TEST(FlatMap, SortedInitializerListConstructor) { + using PairType = std::pair; + flat_map map( + sorted_unique, + {{1, {1}}, {2, {2}}, {3, {3}}, {4, {4}}, {5, {5}}, {8, {8}}, {10, {10}}}); + EXPECT_THAT(map, + ElementsAre(PairType(1, {1}), PairType(2, {2}), PairType(3, {3}), + PairType(4, {4}), PairType(5, {5}), PairType(8, {8}), + PairType(10, {10}))); +} + +TEST(FlatMap, InitializerListAssignment) { + flat_map cont; + cont = {{1, 1}, {2, 2}}; + EXPECT_THAT(cont, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); +} + +TEST(FlatMap, InsertFindSize) { + flat_map s; + s.insert(std::make_pair(1, 1)); + s.insert(std::make_pair(1, 1)); + s.insert(std::make_pair(2, 2)); + + EXPECT_EQ(2u, s.size()); + EXPECT_EQ(std::make_pair(1, 1), *s.find(1)); + EXPECT_EQ(std::make_pair(2, 2), *s.find(2)); + EXPECT_EQ(s.end(), s.find(7)); +} + +TEST(FlatMap, CopySwap) { + flat_map original; + original.insert({1, 1}); + original.insert({2, 2}); + EXPECT_THAT(original, + ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); + + flat_map copy(original); + EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); + + copy.erase(copy.begin()); + copy.insert({10, 10}); + EXPECT_THAT(copy, ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10))); + + original.swap(copy); + EXPECT_THAT(original, + ElementsAre(std::make_pair(2, 2), std::make_pair(10, 10))); + EXPECT_THAT(copy, ElementsAre(std::make_pair(1, 1), std::make_pair(2, 2))); +} + +// operator[](const Key&) +TEST(FlatMap, SubscriptConstKey) { + flat_map m; + + // Default construct elements that don't exist yet. + int& s = m["a"]; + EXPECT_EQ(0, s); + EXPECT_EQ(1u, m.size()); + + // The returned mapped reference should refer into the map. + s = 22; + EXPECT_EQ(22, m["a"]); + + // Overwrite existing elements. + m["a"] = 44; + EXPECT_EQ(44, m["a"]); +} + +// operator[](Key&&) +TEST(FlatMap, SubscriptMoveOnlyKey) { + flat_map m; + + // Default construct elements that don't exist yet. + int& s = m[MoveOnlyInt(1)]; + EXPECT_EQ(0, s); + EXPECT_EQ(1u, m.size()); + + // The returned mapped reference should refer into the map. + s = 22; + EXPECT_EQ(22, m[MoveOnlyInt(1)]); + + // Overwrite existing elements. + m[MoveOnlyInt(1)] = 44; + EXPECT_EQ(44, m[MoveOnlyInt(1)]); +} + +// Mapped& at(const Key&) +// const Mapped& at(const Key&) const +TEST(FlatMap, AtFunction) { + flat_map m = {{1, "a"}, {2, "b"}}; + + // Basic Usage. + EXPECT_EQ("a", m.at(1)); + EXPECT_EQ("b", m.at(2)); + + // Const reference works. + const std::string& const_ref = webrtc::as_const(m).at(1); + EXPECT_EQ("a", const_ref); + + // Reference works, can operate on the string. + m.at(1)[0] = 'x'; + EXPECT_EQ("x", m.at(1)); + + // Out-of-bounds will CHECK. + EXPECT_DEATH_IF_SUPPORTED(m.at(-1), ""); + EXPECT_DEATH_IF_SUPPORTED({ m.at(-1)[0] = 'z'; }, ""); + + // Heterogeneous look-up works. + flat_map m2 = {{"a", 1}, {"b", 2}}; + EXPECT_EQ(1, m2.at(absl::string_view("a"))); + EXPECT_EQ(2, webrtc::as_const(m2).at(absl::string_view("b"))); +} + +// insert_or_assign(K&&, M&&) +TEST(FlatMap, InsertOrAssignMoveOnlyKey) { + flat_map m; + + // Initial insertion should return an iterator to the element and set the + // second pair member to |true|. The inserted key and value should be moved + // from. + MoveOnlyInt key(1); + MoveOnlyInt val(22); + auto result = m.insert_or_assign(std::move(key), std::move(val)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(22, result.first->second.data()); + EXPECT_TRUE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val.data()); // moved from + + // Second call with same key should result in an assignment, overwriting the + // old value. Assignment should be indicated by setting the second pair member + // to |false|. Only the inserted value should be moved from, the key should be + // left intact. + key = MoveOnlyInt(1); + val = MoveOnlyInt(44); + result = m.insert_or_assign(std::move(key), std::move(val)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(44, result.first->second.data()); + EXPECT_FALSE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(0, val.data()); // moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.insert_or_assign(MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +// insert_or_assign(const_iterator hint, K&&, M&&) +TEST(FlatMap, InsertOrAssignMoveOnlyKeyWithHint) { + flat_map m; + + // Initial insertion should return an iterator to the element. The inserted + // key and value should be moved from. + MoveOnlyInt key(1); + MoveOnlyInt val(22); + auto result = m.insert_or_assign(m.end(), std::move(key), std::move(val)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(22, result->second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val.data()); // moved from + + // Second call with same key should result in an assignment, overwriting the + // old value. Only the inserted value should be moved from, the key should be + // left intact. + key = MoveOnlyInt(1); + val = MoveOnlyInt(44); + result = m.insert_or_assign(m.end(), std::move(key), std::move(val)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(44, result->second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(0, val.data()); // moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.insert_or_assign(map.end(), MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +// try_emplace(K&&, Args&&...) +TEST(FlatMap, TryEmplaceMoveOnlyKey) { + flat_map> m; + + // Trying to emplace into an empty map should succeed. Insertion should return + // an iterator to the element and set the second pair member to |true|. The + // inserted key and value should be moved from. + MoveOnlyInt key(1); + MoveOnlyInt val1(22); + MoveOnlyInt val2(44); + // Test piecewise construction of mapped_type. + auto result = m.try_emplace(std::move(key), std::move(val1), std::move(val2)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(22, result.first->second.first.data()); + EXPECT_EQ(44, result.first->second.second.data()); + EXPECT_TRUE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val1.data()); // moved from + EXPECT_EQ(0, val2.data()); // moved from + + // Second call with same key should result in a no-op, returning an iterator + // to the existing element and returning false as the second pair member. + // Key and values that were attempted to be inserted should be left intact. + key = MoveOnlyInt(1); + auto paired_val = std::make_pair(MoveOnlyInt(33), MoveOnlyInt(55)); + // Test construction of mapped_type from pair. + result = m.try_emplace(std::move(key), std::move(paired_val)); + EXPECT_EQ(1, result.first->first.data()); + EXPECT_EQ(22, result.first->second.first.data()); + EXPECT_EQ(44, result.first->second.second.data()); + EXPECT_FALSE(result.second); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(33, paired_val.first.data()); // not moved from + EXPECT_EQ(55, paired_val.second.data()); // not moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.try_emplace(MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +// try_emplace(const_iterator hint, K&&, Args&&...) +TEST(FlatMap, TryEmplaceMoveOnlyKeyWithHint) { + flat_map> m; + + // Trying to emplace into an empty map should succeed. Insertion should return + // an iterator to the element. The inserted key and value should be moved + // from. + MoveOnlyInt key(1); + MoveOnlyInt val1(22); + MoveOnlyInt val2(44); + // Test piecewise construction of mapped_type. + auto result = + m.try_emplace(m.end(), std::move(key), std::move(val1), std::move(val2)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(22, result->second.first.data()); + EXPECT_EQ(44, result->second.second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(0, key.data()); // moved from + EXPECT_EQ(0, val1.data()); // moved from + EXPECT_EQ(0, val2.data()); // moved from + + // Second call with same key should result in a no-op, returning an iterator + // to the existing element. Key and values that were attempted to be inserted + // should be left intact. + key = MoveOnlyInt(1); + val1 = MoveOnlyInt(33); + val2 = MoveOnlyInt(55); + auto paired_val = std::make_pair(MoveOnlyInt(33), MoveOnlyInt(55)); + // Test construction of mapped_type from pair. + result = m.try_emplace(m.end(), std::move(key), std::move(paired_val)); + EXPECT_EQ(1, result->first.data()); + EXPECT_EQ(22, result->second.first.data()); + EXPECT_EQ(44, result->second.second.data()); + EXPECT_EQ(1u, m.size()); + EXPECT_EQ(1, key.data()); // not moved from + EXPECT_EQ(33, paired_val.first.data()); // not moved from + EXPECT_EQ(55, paired_val.second.data()); // not moved from + + // Check that random insertion results in sorted range. + flat_map map; + for (int i : {3, 1, 5, 6, 8, 7, 0, 9, 4, 2}) { + map.try_emplace(map.end(), MoveOnlyInt(i), i); + EXPECT_TRUE(absl::c_is_sorted(map)); + } +} + +TEST(FlatMap, UsingTransparentCompare) { + using ExplicitInt = MoveOnlyInt; + flat_map m; + const auto& m1 = m; + int x = 0; + + // Check if we can use lookup functions without converting to key_type. + // Correctness is checked in flat_tree tests. + m.count(x); + m1.count(x); + m.find(x); + m1.find(x); + m.equal_range(x); + m1.equal_range(x); + m.lower_bound(x); + m1.lower_bound(x); + m.upper_bound(x); + m1.upper_bound(x); + m.erase(x); + + // Check if we broke overload resolution. + m.emplace(ExplicitInt(0), 0); + m.emplace(ExplicitInt(1), 0); + m.erase(m.begin()); + m.erase(m.cbegin()); +} + +TEST(FlatMap, SupportsEraseIf) { + flat_map m; + m.insert(std::make_pair(MoveOnlyInt(1), MoveOnlyInt(1))); + m.insert(std::make_pair(MoveOnlyInt(2), MoveOnlyInt(2))); + m.insert(std::make_pair(MoveOnlyInt(3), MoveOnlyInt(3))); + m.insert(std::make_pair(MoveOnlyInt(4), MoveOnlyInt(4))); + m.insert(std::make_pair(MoveOnlyInt(5), MoveOnlyInt(5))); + + EraseIf(m, [to_be_removed = MoveOnlyInt(2)]( + const std::pair& e) { + return e.first == to_be_removed; + }); + + EXPECT_EQ(m.size(), 4u); + ASSERT_TRUE(m.find(MoveOnlyInt(1)) != m.end()); + ASSERT_FALSE(m.find(MoveOnlyInt(2)) != m.end()); + ASSERT_TRUE(m.find(MoveOnlyInt(3)) != m.end()); + ASSERT_TRUE(m.find(MoveOnlyInt(4)) != m.end()); + ASSERT_TRUE(m.find(MoveOnlyInt(5)) != m.end()); +} + +} // namespace +} // namespace webrtc diff --git a/rtc_base/containers/flat_set.h b/rtc_base/containers/flat_set.h new file mode 100644 index 0000000000..e088cc5314 --- /dev/null +++ b/rtc_base/containers/flat_set.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_FLAT_SET_H_ +#define RTC_BASE_CONTAINERS_FLAT_SET_H_ + +#include +#include + +#include "rtc_base/containers/flat_tree.h" +#include "rtc_base/containers/identity.h" + +namespace webrtc { + +// flat_set is a container with a std::set-like interface that stores its +// contents in a sorted container, by default a vector. +// +// Its implementation mostly tracks the corresponding standardization proposal +// https://wg21.link/P1222. +// +// +// PROS +// +// - Good memory locality. +// - Low overhead, especially for smaller sets. +// - Performance is good for more workloads than you might expect (see +// //base/containers/README.md in Chromium repository) +// - Supports C++14 set interface. +// +// CONS +// +// - Inserts and removals are O(n). +// +// IMPORTANT NOTES +// +// - Iterators are invalidated across mutations. +// - If possible, construct a flat_set in one operation by inserting into +// a container and moving that container into the flat_set constructor. +// - For multiple removals use base::EraseIf() which is O(n) rather than +// O(n * removed_items). +// +// QUICK REFERENCE +// +// Most of the core functionality is inherited from flat_tree. Please see +// flat_tree.h for more details for most of these functions. As a quick +// reference, the functions available are: +// +// Constructors (inputs need not be sorted): +// flat_set(const flat_set&); +// flat_set(flat_set&&); +// flat_set(InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_set(const container_type& items, +// const Compare& compare = Compare()); +// flat_set(container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_set(std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Constructors (inputs need to be sorted): +// flat_set(sorted_unique_t, +// InputIterator first, InputIterator last, +// const Compare& compare = Compare()); +// flat_set(sorted_unique_t, +// const container_type& items, +// const Compare& compare = Compare()); +// flat_set(sorted_unique_t, +// container_type&& items, +// const Compare& compare = Compare()); // Re-use storage. +// flat_set(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()); +// +// Assignment functions: +// flat_set& operator=(const flat_set&); +// flat_set& operator=(flat_set&&); +// flat_set& operator=(initializer_list); +// +// Memory management functions: +// void reserve(size_t); +// size_t capacity() const; +// void shrink_to_fit(); +// +// Size management functions: +// void clear(); +// size_t size() const; +// size_t max_size() const; +// bool empty() const; +// +// Iterator functions: +// iterator begin(); +// const_iterator begin() const; +// const_iterator cbegin() const; +// iterator end(); +// const_iterator end() const; +// const_iterator cend() const; +// reverse_iterator rbegin(); +// const reverse_iterator rbegin() const; +// const_reverse_iterator crbegin() const; +// reverse_iterator rend(); +// const_reverse_iterator rend() const; +// const_reverse_iterator crend() const; +// +// Insert and accessor functions: +// pair insert(const key_type&); +// pair insert(key_type&&); +// void insert(InputIterator first, InputIterator last); +// iterator insert(const_iterator hint, const key_type&); +// iterator insert(const_iterator hint, key_type&&); +// pair emplace(Args&&...); +// iterator emplace_hint(const_iterator, Args&&...); +// +// Underlying type functions: +// container_type extract() &&; +// void replace(container_type&&); +// +// Erase functions: +// iterator erase(iterator); +// iterator erase(const_iterator); +// iterator erase(const_iterator first, const_iterator& last); +// template size_t erase(const K& key); +// +// Comparators (see std::set documentation). +// key_compare key_comp() const; +// value_compare value_comp() const; +// +// Search functions: +// template size_t count(const K&) const; +// template iterator find(const K&); +// template const_iterator find(const K&) const; +// template bool contains(const K&) const; +// template pair equal_range(K&); +// template iterator lower_bound(const K&); +// template const_iterator lower_bound(const K&) const; +// template iterator upper_bound(const K&); +// template const_iterator upper_bound(const K&) const; +// +// General functions: +// void swap(flat_set&); +// +// Non-member operators: +// bool operator==(const flat_set&, const flat_set); +// bool operator!=(const flat_set&, const flat_set); +// bool operator<(const flat_set&, const flat_set); +// bool operator>(const flat_set&, const flat_set); +// bool operator>=(const flat_set&, const flat_set); +// bool operator<=(const flat_set&, const flat_set); +// +template , + class Container = std::vector> +using flat_set = typename ::webrtc::flat_containers_internal:: + flat_tree; + +// ---------------------------------------------------------------------------- +// General operations. + +// Erases all elements that match predicate. It has O(size) complexity. +// +// flat_set numbers; +// ... +// EraseIf(numbers, [](int number) { return number % 2 == 1; }); + +// NOLINTNEXTLINE(misc-unused-using-decls) +using ::webrtc::flat_containers_internal::EraseIf; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_FLAT_SET_H_ diff --git a/rtc_base/containers/flat_set_unittest.cc b/rtc_base/containers/flat_set_unittest.cc new file mode 100644 index 0000000000..617db92440 --- /dev/null +++ b/rtc_base/containers/flat_set_unittest.cc @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_set.h" + +#include +#include +#include +#include + +#include "rtc_base/containers/move_only_int.h" +#include "test/gmock.h" +#include "test/gtest.h" + +// A flat_set is basically a interface to flat_tree. So several basic +// operations are tested to make sure things are set up properly, but the bulk +// of the tests are in flat_tree_unittests.cc. + +using ::testing::ElementsAre; + +namespace webrtc { +namespace { + +TEST(FlatSet, IncompleteType) { + struct A { + using Set = flat_set; + int data; + Set set_with_incomplete_type; + Set::iterator it; + Set::const_iterator cit; + + // We do not declare operator< because clang complains that it's unused. + }; + + A a; +} + +TEST(FlatSet, RangeConstructor) { + flat_set::value_type input_vals[] = {1, 1, 1, 2, 2, 2, 3, 3, 3}; + + flat_set cont(std::begin(input_vals), std::end(input_vals)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3)); +} + +TEST(FlatSet, MoveConstructor) { + int input_range[] = {1, 2, 3, 4}; + + flat_set original(std::begin(input_range), + std::end(input_range)); + flat_set moved(std::move(original)); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +TEST(FlatSet, InitializerListConstructor) { + flat_set cont({1, 2, 3, 4, 5, 6, 10, 8}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); +} + +TEST(FlatSet, InsertFindSize) { + flat_set s; + s.insert(1); + s.insert(1); + s.insert(2); + + EXPECT_EQ(2u, s.size()); + EXPECT_EQ(1, *s.find(1)); + EXPECT_EQ(2, *s.find(2)); + EXPECT_EQ(s.end(), s.find(7)); +} + +TEST(FlatSet, CopySwap) { + flat_set original; + original.insert(1); + original.insert(2); + EXPECT_THAT(original, ElementsAre(1, 2)); + + flat_set copy(original); + EXPECT_THAT(copy, ElementsAre(1, 2)); + + copy.erase(copy.begin()); + copy.insert(10); + EXPECT_THAT(copy, ElementsAre(2, 10)); + + original.swap(copy); + EXPECT_THAT(original, ElementsAre(2, 10)); + EXPECT_THAT(copy, ElementsAre(1, 2)); +} + +TEST(FlatSet, UsingTransparentCompare) { + using ExplicitInt = webrtc::MoveOnlyInt; + flat_set s; + const auto& s1 = s; + int x = 0; + + // Check if we can use lookup functions without converting to key_type. + // Correctness is checked in flat_tree tests. + s.count(x); + s1.count(x); + s.find(x); + s1.find(x); + s.equal_range(x); + s1.equal_range(x); + s.lower_bound(x); + s1.lower_bound(x); + s.upper_bound(x); + s1.upper_bound(x); + s.erase(x); + + // Check if we broke overload resolution. + s.emplace(0); + s.emplace(1); + s.erase(s.begin()); + s.erase(s.cbegin()); +} + +TEST(FlatSet, SupportsEraseIf) { + flat_set s; + s.emplace(MoveOnlyInt(1)); + s.emplace(MoveOnlyInt(2)); + s.emplace(MoveOnlyInt(3)); + s.emplace(MoveOnlyInt(4)); + s.emplace(MoveOnlyInt(5)); + + EraseIf(s, [to_be_removed = MoveOnlyInt(2)](const MoveOnlyInt& elem) { + return elem == to_be_removed; + }); + + EXPECT_EQ(s.size(), 4u); + ASSERT_TRUE(s.find(MoveOnlyInt(1)) != s.end()); + ASSERT_FALSE(s.find(MoveOnlyInt(2)) != s.end()); + ASSERT_TRUE(s.find(MoveOnlyInt(3)) != s.end()); + ASSERT_TRUE(s.find(MoveOnlyInt(4)) != s.end()); + ASSERT_TRUE(s.find(MoveOnlyInt(5)) != s.end()); +} +} // namespace +} // namespace webrtc diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCAudioTrack.h b/rtc_base/containers/flat_tree.cc similarity index 58% rename from sdk/objc/Framework/Headers/WebRTC/RTCAudioTrack.h rename to rtc_base/containers/flat_tree.cc index 88515bb3fe..9e86db191a 100644 --- a/sdk/objc/Framework/Headers/WebRTC/RTCAudioTrack.h +++ b/rtc_base/containers/flat_tree.cc @@ -1,5 +1,5 @@ /* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,4 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ -#import "api/peerconnection/RTCAudioTrack.h" +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_tree.h" + +namespace webrtc { + +sorted_unique_t sorted_unique; + +} // namespace webrtc diff --git a/rtc_base/containers/flat_tree.h b/rtc_base/containers/flat_tree.h new file mode 100644 index 0000000000..1b02cce1b4 --- /dev/null +++ b/rtc_base/containers/flat_tree.h @@ -0,0 +1,1102 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_FLAT_TREE_H_ +#define RTC_BASE_CONTAINERS_FLAT_TREE_H_ + +#include +#include +#include +#include +#include + +#include "absl/algorithm/container.h" +#include "rtc_base/checks.h" +#include "rtc_base/containers/as_const.h" +#include "rtc_base/containers/not_fn.h" +#include "rtc_base/containers/void_t.h" +#include "rtc_base/system/no_unique_address.h" + +namespace webrtc { +// Tag type that allows skipping the sort_and_unique step when constructing a +// flat_tree in case the underlying container is already sorted and has no +// duplicate elements. +struct sorted_unique_t { + constexpr sorted_unique_t() = default; +}; +extern sorted_unique_t sorted_unique; + +namespace flat_containers_internal { + +// Helper functions used in RTC_DCHECKs below to make sure that inputs tagged +// with sorted_unique are indeed sorted and unique. +template +constexpr bool is_sorted_and_unique(const Range& range, Comp comp) { + // Being unique implies that there are no adjacent elements that + // compare equal. So this checks that each element is strictly less + // than the element after it. + return absl::c_adjacent_find(range, webrtc::not_fn(comp)) == std::end(range); +} + +// This is a convenience trait inheriting from std::true_type if Iterator is at +// least a ForwardIterator and thus supports multiple passes over a range. +template +using is_multipass = + std::is_base_of::iterator_category>; + +// Uses SFINAE to detect whether type has is_transparent member. +template +struct IsTransparentCompare : std::false_type {}; +template +struct IsTransparentCompare> + : std::true_type {}; + +// Helper inspired by C++20's std::to_array to convert a C-style array to a +// std::array. As opposed to the C++20 version this implementation does not +// provide an overload for rvalues and does not strip cv qualifers from the +// returned std::array::value_type. The returned value_type needs to be +// specified explicitly, allowing the construction of std::arrays with const +// elements. +// +// Reference: https://en.cppreference.com/w/cpp/container/array/to_array +template +constexpr std::array ToArrayImpl(const T (&data)[N], + std::index_sequence) { + return {{data[I]...}}; +} + +template +constexpr std::array ToArray(const T (&data)[N]) { + return ToArrayImpl(data, std::make_index_sequence()); +} + +// std::pair's operator= is not constexpr prior to C++20. Thus we need this +// small helper to invoke operator= on the .first and .second member explicitly. +template +constexpr void Assign(T& lhs, T&& rhs) { + lhs = std::move(rhs); +} + +template +constexpr void Assign(std::pair& lhs, std::pair&& rhs) { + Assign(lhs.first, std::move(rhs.first)); + Assign(lhs.second, std::move(rhs.second)); +} + +// constexpr swap implementation. std::swap is not constexpr prior to C++20. +template +constexpr void Swap(T& lhs, T& rhs) { + T tmp = std::move(lhs); + Assign(lhs, std::move(rhs)); + Assign(rhs, std::move(tmp)); +} + +// constexpr prev implementation. std::prev is not constexpr prior to C++17. +template +constexpr BidirIt Prev(BidirIt it) { + return --it; +} + +// constexpr next implementation. std::next is not constexpr prior to C++17. +template +constexpr InputIt Next(InputIt it) { + return ++it; +} + +// constexpr sort implementation. std::sort is not constexpr prior to C++20. +// While insertion sort has a quadratic worst case complexity, it was chosen +// because it has linear complexity for nearly sorted data, is stable, and +// simple to implement. +template +constexpr void InsertionSort(BidirIt first, BidirIt last, const Compare& comp) { + if (first == last) + return; + + for (auto it = Next(first); it != last; ++it) { + for (auto curr = it; curr != first && comp(*curr, *Prev(curr)); --curr) + Swap(*curr, *Prev(curr)); + } +} + +// Implementation ------------------------------------------------------------- + +// Implementation for the sorted associative flat_set and flat_map using a +// sorted vector as the backing store. Do not use directly. +// +// The use of "value" in this is like std::map uses, meaning it's the thing +// contained (in the case of map it's a pair). The Key is how +// things are looked up. In the case of a set, Key == Value. In the case of +// a map, the Key is a component of a Value. +// +// The helper class GetKeyFromValue provides the means to extract a key from a +// value for comparison purposes. It should implement: +// const Key& operator()(const Value&). +template +class flat_tree { + public: + // -------------------------------------------------------------------------- + // Types. + // + using key_type = Key; + using key_compare = KeyCompare; + using value_type = typename Container::value_type; + + // Wraps the templated key comparison to compare values. + struct value_compare { + constexpr bool operator()(const value_type& left, + const value_type& right) const { + GetKeyFromValue extractor; + return comp(extractor(left), extractor(right)); + } + + RTC_NO_UNIQUE_ADDRESS key_compare comp; + }; + + using pointer = typename Container::pointer; + using const_pointer = typename Container::const_pointer; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using iterator = typename Container::iterator; + using const_iterator = typename Container::const_iterator; + using reverse_iterator = typename Container::reverse_iterator; + using const_reverse_iterator = typename Container::const_reverse_iterator; + using container_type = Container; + + // -------------------------------------------------------------------------- + // Lifetime. + // + // Constructors that take range guarantee O(N * log^2(N)) + O(N) complexity + // and take O(N * log(N)) + O(N) if extra memory is available (N is a range + // length). + // + // Assume that move constructors invalidate iterators and references. + // + // The constructors that take ranges, lists, and vectors do not require that + // the input be sorted. + // + // When passing the webrtc::sorted_unique tag as the first argument no sort + // and unique step takes places. This is useful if the underlying container + // already has the required properties. + + flat_tree() = default; + flat_tree(const flat_tree&) = default; + flat_tree(flat_tree&&) = default; + + explicit flat_tree(const key_compare& comp); + + template + flat_tree(InputIterator first, + InputIterator last, + const key_compare& comp = key_compare()); + + flat_tree(const container_type& items, + const key_compare& comp = key_compare()); + + explicit flat_tree(container_type&& items, + const key_compare& comp = key_compare()); + + flat_tree(std::initializer_list ilist, + const key_compare& comp = key_compare()); + + template + flat_tree(sorted_unique_t, + InputIterator first, + InputIterator last, + const key_compare& comp = key_compare()); + + flat_tree(sorted_unique_t, + const container_type& items, + const key_compare& comp = key_compare()); + + constexpr flat_tree(sorted_unique_t, + container_type&& items, + const key_compare& comp = key_compare()); + + flat_tree(sorted_unique_t, + std::initializer_list ilist, + const key_compare& comp = key_compare()); + + ~flat_tree() = default; + + // -------------------------------------------------------------------------- + // Assignments. + // + // Assume that move assignment invalidates iterators and references. + + flat_tree& operator=(const flat_tree&) = default; + flat_tree& operator=(flat_tree&&) = default; + // Takes the first if there are duplicates in the initializer list. + flat_tree& operator=(std::initializer_list ilist); + + // -------------------------------------------------------------------------- + // Memory management. + // + // Beware that shrink_to_fit() simply forwards the request to the + // container_type and its implementation is free to optimize otherwise and + // leave capacity() to be greater that its size. + // + // reserve() and shrink_to_fit() invalidate iterators and references. + + void reserve(size_type new_capacity); + size_type capacity() const; + void shrink_to_fit(); + + // -------------------------------------------------------------------------- + // Size management. + // + // clear() leaves the capacity() of the flat_tree unchanged. + + void clear(); + + constexpr size_type size() const; + constexpr size_type max_size() const; + constexpr bool empty() const; + + // -------------------------------------------------------------------------- + // Iterators. + // + // Iterators follow the ordering defined by the key comparator used in + // construction of the flat_tree. + + iterator begin(); + constexpr const_iterator begin() const; + const_iterator cbegin() const; + + iterator end(); + constexpr const_iterator end() const; + const_iterator cend() const; + + reverse_iterator rbegin(); + const_reverse_iterator rbegin() const; + const_reverse_iterator crbegin() const; + + reverse_iterator rend(); + const_reverse_iterator rend() const; + const_reverse_iterator crend() const; + + // -------------------------------------------------------------------------- + // Insert operations. + // + // Assume that every operation invalidates iterators and references. + // Insertion of one element can take O(size). Capacity of flat_tree grows in + // an implementation-defined manner. + // + // NOTE: Prefer to build a new flat_tree from a std::vector (or similar) + // instead of calling insert() repeatedly. + + std::pair insert(const value_type& val); + std::pair insert(value_type&& val); + + iterator insert(const_iterator position_hint, const value_type& x); + iterator insert(const_iterator position_hint, value_type&& x); + + // This method inserts the values from the range [first, last) into the + // current tree. + template + void insert(InputIterator first, InputIterator last); + + template + std::pair emplace(Args&&... args); + + template + iterator emplace_hint(const_iterator position_hint, Args&&... args); + + // -------------------------------------------------------------------------- + // Underlying type operations. + // + // Assume that either operation invalidates iterators and references. + + // Extracts the container_type and returns it to the caller. Ensures that + // `this` is `empty()` afterwards. + container_type extract() &&; + + // Replaces the container_type with `body`. Expects that `body` is sorted + // and has no repeated elements with regard to value_comp(). + void replace(container_type&& body); + + // -------------------------------------------------------------------------- + // Erase operations. + // + // Assume that every operation invalidates iterators and references. + // + // erase(position), erase(first, last) can take O(size). + // erase(key) may take O(size) + O(log(size)). + // + // Prefer webrtc::EraseIf() or some other variation on erase(remove(), end()) + // idiom when deleting multiple non-consecutive elements. + + iterator erase(iterator position); + // Artificially templatized to break ambiguity if `iterator` and + // `const_iterator` are the same type. + template + iterator erase(const_iterator position); + iterator erase(const_iterator first, const_iterator last); + template + size_type erase(const K& key); + + // -------------------------------------------------------------------------- + // Comparators. + + constexpr key_compare key_comp() const; + constexpr value_compare value_comp() const; + + // -------------------------------------------------------------------------- + // Search operations. + // + // Search operations have O(log(size)) complexity. + + template + size_type count(const K& key) const; + + template + iterator find(const K& key); + + template + const_iterator find(const K& key) const; + + template + bool contains(const K& key) const; + + template + std::pair equal_range(const K& key); + + template + std::pair equal_range(const K& key) const; + + template + iterator lower_bound(const K& key); + + template + const_iterator lower_bound(const K& key) const; + + template + iterator upper_bound(const K& key); + + template + const_iterator upper_bound(const K& key) const; + + // -------------------------------------------------------------------------- + // General operations. + // + // Assume that swap invalidates iterators and references. + // + // Implementation note: currently we use operator==() and operator<() on + // std::vector, because they have the same contract we need, so we use them + // directly for brevity and in case it is more optimal than calling equal() + // and lexicograhpical_compare(). If the underlying container type is changed, + // this code may need to be modified. + + void swap(flat_tree& other) noexcept; + + friend bool operator==(const flat_tree& lhs, const flat_tree& rhs) { + return lhs.body_ == rhs.body_; + } + + friend bool operator!=(const flat_tree& lhs, const flat_tree& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const flat_tree& lhs, const flat_tree& rhs) { + return lhs.body_ < rhs.body_; + } + + friend bool operator>(const flat_tree& lhs, const flat_tree& rhs) { + return rhs < lhs; + } + + friend bool operator>=(const flat_tree& lhs, const flat_tree& rhs) { + return !(lhs < rhs); + } + + friend bool operator<=(const flat_tree& lhs, const flat_tree& rhs) { + return !(lhs > rhs); + } + + friend void swap(flat_tree& lhs, flat_tree& rhs) noexcept { lhs.swap(rhs); } + + protected: + // Emplaces a new item into the tree that is known not to be in it. This + // is for implementing map operator[]. + template + iterator unsafe_emplace(const_iterator position, Args&&... args); + + // Attempts to emplace a new element with key |key|. Only if |key| is not yet + // present, construct value_type from |args| and insert it. Returns an + // iterator to the element with key |key| and a bool indicating whether an + // insertion happened. + template + std::pair emplace_key_args(const K& key, Args&&... args); + + // Similar to |emplace_key_args|, but checks |hint| first as a possible + // insertion position. + template + std::pair emplace_hint_key_args(const_iterator hint, + const K& key, + Args&&... args); + + private: + // Helper class for e.g. lower_bound that can compare a value on the left + // to a key on the right. + struct KeyValueCompare { + // The key comparison object must outlive this class. + explicit KeyValueCompare(const key_compare& comp) : comp_(comp) {} + + template + bool operator()(const T& lhs, const U& rhs) const { + return comp_(extract_if_value_type(lhs), extract_if_value_type(rhs)); + } + + private: + const key_type& extract_if_value_type(const value_type& v) const { + GetKeyFromValue extractor; + return extractor(v); + } + + template + const K& extract_if_value_type(const K& k) const { + return k; + } + + const key_compare& comp_; + }; + + iterator const_cast_it(const_iterator c_it) { + auto distance = std::distance(cbegin(), c_it); + return std::next(begin(), distance); + } + + // This method is inspired by both std::map::insert(P&&) and + // std::map::insert_or_assign(const K&, V&&). It inserts val if an equivalent + // element is not present yet, otherwise it overwrites. It returns an iterator + // to the modified element and a flag indicating whether insertion or + // assignment happened. + template + std::pair insert_or_assign(V&& val) { + auto position = lower_bound(GetKeyFromValue()(val)); + + if (position == end() || value_comp()(val, *position)) + return {body_.emplace(position, std::forward(val)), true}; + + *position = std::forward(val); + return {position, false}; + } + + // This method is similar to insert_or_assign, with the following differences: + // - Instead of searching [begin(), end()) it only searches [first, last). + // - In case no equivalent element is found, val is appended to the end of the + // underlying body and an iterator to the next bigger element in [first, + // last) is returned. + template + std::pair append_or_assign(iterator first, + iterator last, + V&& val) { + auto position = std::lower_bound(first, last, val, value_comp()); + + if (position == last || value_comp()(val, *position)) { + // emplace_back might invalidate position, which is why distance needs to + // be cached. + const difference_type distance = std::distance(begin(), position); + body_.emplace_back(std::forward(val)); + return {std::next(begin(), distance), true}; + } + + *position = std::forward(val); + return {position, false}; + } + + // This method is similar to insert, with the following differences: + // - Instead of searching [begin(), end()) it only searches [first, last). + // - In case no equivalent element is found, val is appended to the end of the + // underlying body and an iterator to the next bigger element in [first, + // last) is returned. + template + std::pair append_unique(iterator first, + iterator last, + V&& val) { + auto position = std::lower_bound(first, last, val, value_comp()); + + if (position == last || value_comp()(val, *position)) { + // emplace_back might invalidate position, which is why distance needs to + // be cached. + const difference_type distance = std::distance(begin(), position); + body_.emplace_back(std::forward(val)); + return {std::next(begin(), distance), true}; + } + + return {position, false}; + } + + void sort_and_unique(iterator first, iterator last) { + // Preserve stability for the unique code below. + std::stable_sort(first, last, value_comp()); + + // lhs is already <= rhs due to sort, therefore !(lhs < rhs) <=> lhs == rhs. + auto equal_comp = webrtc::not_fn(value_comp()); + erase(std::unique(first, last, equal_comp), last); + } + + void sort_and_unique() { sort_and_unique(begin(), end()); } + + // To support comparators that may not be possible to default-construct, we + // have to store an instance of Compare. Since Compare commonly is stateless, + // we use the RTC_NO_UNIQUE_ADDRESS attribute to save space. + RTC_NO_UNIQUE_ADDRESS key_compare comp_; + // Declare after |key_compare_comp_| to workaround GCC ICE. For details + // see https://crbug.com/1156268 + container_type body_; + + // If the compare is not transparent we want to construct key_type once. + template + using KeyTypeOrK = typename std:: + conditional::value, K, key_type>::type; +}; + +// ---------------------------------------------------------------------------- +// Lifetime. + +template +flat_tree::flat_tree( + const KeyCompare& comp) + : comp_(comp) {} + +template +template +flat_tree::flat_tree( + InputIterator first, + InputIterator last, + const KeyCompare& comp) + : comp_(comp), body_(first, last) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + const container_type& items, + const KeyCompare& comp) + : comp_(comp), body_(items) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + container_type&& items, + const KeyCompare& comp) + : comp_(comp), body_(std::move(items)) { + sort_and_unique(); +} + +template +flat_tree::flat_tree( + std::initializer_list ilist, + const KeyCompare& comp) + : flat_tree(std::begin(ilist), std::end(ilist), comp) {} + +template +template +flat_tree::flat_tree( + sorted_unique_t, + InputIterator first, + InputIterator last, + const KeyCompare& comp) + : comp_(comp), body_(first, last) { + RTC_DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +flat_tree::flat_tree( + sorted_unique_t, + const container_type& items, + const KeyCompare& comp) + : comp_(comp), body_(items) { + RTC_DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +constexpr flat_tree::flat_tree( + sorted_unique_t, + container_type&& items, + const KeyCompare& comp) + : comp_(comp), body_(std::move(items)) { + RTC_DCHECK(is_sorted_and_unique(*this, value_comp())); +} + +template +flat_tree::flat_tree( + sorted_unique_t, + std::initializer_list ilist, + const KeyCompare& comp) + : flat_tree(sorted_unique, std::begin(ilist), std::end(ilist), comp) {} + +// ---------------------------------------------------------------------------- +// Assignments. + +template +auto flat_tree::operator=( + std::initializer_list ilist) -> flat_tree& { + body_ = ilist; + sort_and_unique(); + return *this; +} + +// ---------------------------------------------------------------------------- +// Memory management. + +template +void flat_tree::reserve( + size_type new_capacity) { + body_.reserve(new_capacity); +} + +template +auto flat_tree::capacity() const + -> size_type { + return body_.capacity(); +} + +template +void flat_tree::shrink_to_fit() { + body_.shrink_to_fit(); +} + +// ---------------------------------------------------------------------------- +// Size management. + +template +void flat_tree::clear() { + body_.clear(); +} + +template +constexpr auto flat_tree::size() + const -> size_type { + return body_.size(); +} + +template +constexpr auto +flat_tree::max_size() const + -> size_type { + return body_.max_size(); +} + +template +constexpr bool flat_tree::empty() + const { + return body_.empty(); +} + +// ---------------------------------------------------------------------------- +// Iterators. + +template +auto flat_tree::begin() + -> iterator { + return body_.begin(); +} + +template +constexpr auto flat_tree::begin() + const -> const_iterator { + return std::begin(body_); +} + +template +auto flat_tree::cbegin() const + -> const_iterator { + return body_.cbegin(); +} + +template +auto flat_tree::end() -> iterator { + return body_.end(); +} + +template +constexpr auto flat_tree::end() + const -> const_iterator { + return std::end(body_); +} + +template +auto flat_tree::cend() const + -> const_iterator { + return body_.cend(); +} + +template +auto flat_tree::rbegin() + -> reverse_iterator { + return body_.rbegin(); +} + +template +auto flat_tree::rbegin() const + -> const_reverse_iterator { + return body_.rbegin(); +} + +template +auto flat_tree::crbegin() const + -> const_reverse_iterator { + return body_.crbegin(); +} + +template +auto flat_tree::rend() + -> reverse_iterator { + return body_.rend(); +} + +template +auto flat_tree::rend() const + -> const_reverse_iterator { + return body_.rend(); +} + +template +auto flat_tree::crend() const + -> const_reverse_iterator { + return body_.crend(); +} + +// ---------------------------------------------------------------------------- +// Insert operations. +// +// Currently we use position_hint the same way as eastl or boost: +// https://github.com/electronicarts/EASTL/blob/master/include/EASTL/vector_set.h#L493 + +template +auto flat_tree::insert( + const value_type& val) -> std::pair { + return emplace_key_args(GetKeyFromValue()(val), val); +} + +template +auto flat_tree::insert( + value_type&& val) -> std::pair { + return emplace_key_args(GetKeyFromValue()(val), std::move(val)); +} + +template +auto flat_tree::insert( + const_iterator position_hint, + const value_type& val) -> iterator { + return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), val) + .first; +} + +template +auto flat_tree::insert( + const_iterator position_hint, + value_type&& val) -> iterator { + return emplace_hint_key_args(position_hint, GetKeyFromValue()(val), + std::move(val)) + .first; +} + +template +template +void flat_tree::insert( + InputIterator first, + InputIterator last) { + if (first == last) + return; + + // Dispatch to single element insert if the input range contains a single + // element. + if (is_multipass() && std::next(first) == last) { + insert(end(), *first); + return; + } + + // Provide a convenience lambda to obtain an iterator pointing past the last + // old element. This needs to be dymanic due to possible re-allocations. + auto middle = [this, size = size()] { return std::next(begin(), size); }; + + // For batch updates initialize the first insertion point. + difference_type pos_first_new = size(); + + // Loop over the input range while appending new values and overwriting + // existing ones, if applicable. Keep track of the first insertion point. + for (; first != last; ++first) { + std::pair result = append_unique(begin(), middle(), *first); + if (result.second) { + pos_first_new = + std::min(pos_first_new, std::distance(begin(), result.first)); + } + } + + // The new elements might be unordered and contain duplicates, so post-process + // the just inserted elements and merge them with the rest, inserting them at + // the previously found spot. + sort_and_unique(middle(), end()); + std::inplace_merge(std::next(begin(), pos_first_new), middle(), end(), + value_comp()); +} + +template +template +auto flat_tree::emplace( + Args&&... args) -> std::pair { + return insert(value_type(std::forward(args)...)); +} + +template +template +auto flat_tree::emplace_hint( + const_iterator position_hint, + Args&&... args) -> iterator { + return insert(position_hint, value_type(std::forward(args)...)); +} + +// ---------------------------------------------------------------------------- +// Underlying type operations. + +template +auto flat_tree:: + extract() && -> container_type { + return std::exchange(body_, container_type()); +} + +template +void flat_tree::replace( + container_type&& body) { + // Ensure that `body` is sorted and has no repeated elements according to + // `value_comp()`. + RTC_DCHECK(is_sorted_and_unique(body, value_comp())); + body_ = std::move(body); +} + +// ---------------------------------------------------------------------------- +// Erase operations. + +template +auto flat_tree::erase( + iterator position) -> iterator { + RTC_CHECK(position != body_.end()); + return body_.erase(position); +} + +template +template +auto flat_tree::erase( + const_iterator position) -> iterator { + RTC_CHECK(position != body_.end()); + return body_.erase(position); +} + +template +template +auto flat_tree::erase(const K& val) + -> size_type { + auto eq_range = equal_range(val); + auto res = std::distance(eq_range.first, eq_range.second); + erase(eq_range.first, eq_range.second); + return res; +} + +template +auto flat_tree::erase( + const_iterator first, + const_iterator last) -> iterator { + return body_.erase(first, last); +} + +// ---------------------------------------------------------------------------- +// Comparators. + +template +constexpr auto +flat_tree::key_comp() const + -> key_compare { + return comp_; +} + +template +constexpr auto +flat_tree::value_comp() const + -> value_compare { + return value_compare{comp_}; +} + +// ---------------------------------------------------------------------------- +// Search operations. + +template +template +auto flat_tree::count( + const K& key) const -> size_type { + auto eq_range = equal_range(key); + return std::distance(eq_range.first, eq_range.second); +} + +template +template +auto flat_tree::find(const K& key) + -> iterator { + return const_cast_it(webrtc::as_const(*this).find(key)); +} + +template +template +auto flat_tree::find( + const K& key) const -> const_iterator { + auto eq_range = equal_range(key); + return (eq_range.first == eq_range.second) ? end() : eq_range.first; +} + +template +template +bool flat_tree::contains( + const K& key) const { + auto lower = lower_bound(key); + return lower != end() && !comp_(key, GetKeyFromValue()(*lower)); +} + +template +template +auto flat_tree::equal_range( + const K& key) -> std::pair { + auto res = webrtc::as_const(*this).equal_range(key); + return {const_cast_it(res.first), const_cast_it(res.second)}; +} + +template +template +auto flat_tree::equal_range( + const K& key) const -> std::pair { + auto lower = lower_bound(key); + + KeyValueCompare comp(comp_); + if (lower == end() || comp(key, *lower)) + return {lower, lower}; + + return {lower, std::next(lower)}; +} + +template +template +auto flat_tree::lower_bound( + const K& key) -> iterator { + return const_cast_it(webrtc::as_const(*this).lower_bound(key)); +} + +template +template +auto flat_tree::lower_bound( + const K& key) const -> const_iterator { + static_assert(std::is_convertible&, const K&>::value, + "Requested type cannot be bound to the container's key_type " + "which is required for a non-transparent compare."); + + const KeyTypeOrK& key_ref = key; + + KeyValueCompare comp(comp_); + return absl::c_lower_bound(*this, key_ref, comp); +} + +template +template +auto flat_tree::upper_bound( + const K& key) -> iterator { + return const_cast_it(webrtc::as_const(*this).upper_bound(key)); +} + +template +template +auto flat_tree::upper_bound( + const K& key) const -> const_iterator { + static_assert(std::is_convertible&, const K&>::value, + "Requested type cannot be bound to the container's key_type " + "which is required for a non-transparent compare."); + + const KeyTypeOrK& key_ref = key; + + KeyValueCompare comp(comp_); + return absl::c_upper_bound(*this, key_ref, comp); +} + +// ---------------------------------------------------------------------------- +// General operations. + +template +void flat_tree::swap( + flat_tree& other) noexcept { + std::swap(*this, other); +} + +template +template +auto flat_tree::unsafe_emplace( + const_iterator position, + Args&&... args) -> iterator { + return body_.emplace(position, std::forward(args)...); +} + +template +template +auto flat_tree::emplace_key_args( + const K& key, + Args&&... args) -> std::pair { + auto lower = lower_bound(key); + if (lower == end() || comp_(key, GetKeyFromValue()(*lower))) + return {unsafe_emplace(lower, std::forward(args)...), true}; + return {lower, false}; +} + +template +template +auto flat_tree:: + emplace_hint_key_args(const_iterator hint, const K& key, Args&&... args) + -> std::pair { + KeyValueCompare comp(comp_); + if ((hint == begin() || comp(*std::prev(hint), key))) { + if (hint == end() || comp(key, *hint)) { + // *(hint - 1) < key < *hint => key did not exist and hint is correct. + return {unsafe_emplace(hint, std::forward(args)...), true}; + } + if (!comp(*hint, key)) { + // key == *hint => no-op, return correct hint. + return {const_cast_it(hint), false}; + } + } + // hint was not helpful, dispatch to hintless version. + return emplace_key_args(key, std::forward(args)...); +} + +// ---------------------------------------------------------------------------- +// Free functions. + +// Erases all elements that match predicate. It has O(size) complexity. +template +size_t EraseIf( + webrtc::flat_containers_internal:: + flat_tree& container, + Predicate pred) { + auto it = std::remove_if(container.begin(), container.end(), + std::forward(pred)); + size_t removed = std::distance(it, container.end()); + container.erase(it, container.end()); + return removed; +} + +} // namespace flat_containers_internal +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_FLAT_TREE_H_ diff --git a/rtc_base/containers/flat_tree_unittest.cc b/rtc_base/containers/flat_tree_unittest.cc new file mode 100644 index 0000000000..9bb803d16d --- /dev/null +++ b/rtc_base/containers/flat_tree_unittest.cc @@ -0,0 +1,1484 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#include "rtc_base/containers/flat_tree.h" + +// Following tests are ported and extended tests from libcpp for std::set. +// They can be found here: +// https://github.com/llvm/llvm-project/tree/main/libcxx/test/std/containers/associative/set +// +// Not ported tests: +// * No tests with PrivateConstructor and std::less<> changed to std::less +// These tests have to do with C++14 std::less<> +// http://en.cppreference.com/w/cpp/utility/functional/less_void +// and add support for templated versions of lookup functions. +// Because we use same implementation, we figured that it's OK just to check +// compilation and this is what we do in flat_set_unittest/flat_map_unittest. +// * No tests for max_size() +// Has to do with allocator support. +// * No tests with DefaultOnly. +// Standard containers allocate each element in the separate node on the heap +// and then manipulate these nodes. Flat containers store their elements in +// contiguous memory and move them around, type is required to be movable. +// * No tests for N3644. +// This proposal suggests that all default constructed iterators compare +// equal. Currently we use std::vector iterators and they don't implement +// this. +// * No tests with min_allocator and no tests counting allocations. +// Flat sets currently don't support allocators. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtc_base/containers/identity.h" +#include "rtc_base/containers/move_only_int.h" +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace flat_containers_internal { +namespace { + +template +class InputIterator { + public: + using iterator_category = std::input_iterator_tag; + using value_type = typename std::iterator_traits::value_type; + using difference_type = typename std::iterator_traits::difference_type; + using pointer = It; + using reference = typename std::iterator_traits::reference; + + InputIterator() : it_() {} + explicit InputIterator(It it) : it_(it) {} + + reference operator*() const { return *it_; } + pointer operator->() const { return it_; } + + InputIterator& operator++() { + ++it_; + return *this; + } + InputIterator operator++(int) { + InputIterator tmp(*this); + ++(*this); + return tmp; + } + + friend bool operator==(const InputIterator& lhs, const InputIterator& rhs) { + return lhs.it_ == rhs.it_; + } + friend bool operator!=(const InputIterator& lhs, const InputIterator& rhs) { + return !(lhs == rhs); + } + + private: + It it_; +}; + +template +InputIterator MakeInputIterator(It it) { + return InputIterator(it); +} + +class Emplaceable { + public: + Emplaceable() : Emplaceable(0, 0.0) {} + Emplaceable(int i, double d) : int_(i), double_(d) {} + Emplaceable(Emplaceable&& other) : int_(other.int_), double_(other.double_) { + other.int_ = 0; + other.double_ = 0.0; + } + Emplaceable(const Emplaceable&) = delete; + Emplaceable& operator=(const Emplaceable&) = delete; + + Emplaceable& operator=(Emplaceable&& other) { + int_ = other.int_; + other.int_ = 0; + double_ = other.double_; + other.double_ = 0.0; + return *this; + } + + friend bool operator==(const Emplaceable& lhs, const Emplaceable& rhs) { + return std::tie(lhs.int_, lhs.double_) == std::tie(rhs.int_, rhs.double_); + } + + friend bool operator<(const Emplaceable& lhs, const Emplaceable& rhs) { + return std::tie(lhs.int_, lhs.double_) < std::tie(rhs.int_, rhs.double_); + } + + private: + int int_; + double double_; +}; + +struct TemplateConstructor { + template + explicit TemplateConstructor(const T&) {} + + friend bool operator<(const TemplateConstructor&, + const TemplateConstructor&) { + return false; + } +}; + +class NonDefaultConstructibleCompare { + public: + explicit NonDefaultConstructibleCompare(int) {} + + template + bool operator()(const T& lhs, const T& rhs) const { + return std::less()(lhs, rhs); + } +}; + +template +struct LessByFirst { + bool operator()(const PairType& lhs, const PairType& rhs) const { + return lhs.first < rhs.first; + } +}; + +// Common test trees. +template +using TypedTree = flat_tree, + ContainerT>; +using IntTree = TypedTree>; +using IntPair = std::pair; +using IntPairTree = + flat_tree, std::vector>; +using MoveOnlyTree = + flat_tree, std::vector>; +using EmplaceableTree = + flat_tree, std::vector>; +using ReversedTree = + flat_tree, std::vector>; + +using TreeWithStrangeCompare = + flat_tree>; + +using ::testing::ElementsAre; +using ::testing::IsEmpty; + +template +class FlatTreeTest : public testing::Test {}; +TYPED_TEST_SUITE_P(FlatTreeTest); + +TEST(FlatTree, IsMultipass) { + static_assert(!is_multipass>(), + "InputIterator is not multipass"); + static_assert(!is_multipass>(), + "OutputIterator is not multipass"); + + static_assert(is_multipass::iterator>(), + "ForwardIterator is multipass"); + static_assert(is_multipass::iterator>(), + "BidirectionalIterator is multipass"); + static_assert(is_multipass::iterator>(), + "RandomAccessIterator is multipass"); +} + +// Tests that the compiler generated move operators propagrate noexcept +// specifiers. +TEST(FlatTree, NoExcept) { + struct MoveThrows { + MoveThrows(MoveThrows&&) noexcept(false) {} + MoveThrows& operator=(MoveThrows&&) noexcept(false) { return *this; } + }; + + using MoveThrowsTree = + flat_tree, std::array>; + + static_assert(std::is_nothrow_move_constructible::value, + "Error: IntTree is not nothrow move constructible"); + static_assert(std::is_nothrow_move_assignable::value, + "Error: IntTree is not nothrow move assignable"); + + static_assert(!std::is_nothrow_move_constructible::value, + "Error: MoveThrowsTree is nothrow move constructible"); + static_assert(!std::is_nothrow_move_assignable::value, + "Error: MoveThrowsTree is nothrow move assignable"); +} + +// ---------------------------------------------------------------------------- +// Class. + +// Check that flat_tree and its iterators can be instantiated with an +// incomplete type. + +TEST(FlatTree, IncompleteType) { + struct A { + using Tree = flat_tree, std::vector>; + int data; + Tree set_with_incomplete_type; + Tree::iterator it; + Tree::const_iterator cit; + + // We do not declare operator< because clang complains that it's unused. + }; + + A a; +} + +TEST(FlatTree, Stability) { + using Pair = std::pair; + + using Tree = flat_tree, std::vector>; + + // Constructors are stable. + Tree cont({{0, 0}, {1, 0}, {0, 1}, {2, 0}, {0, 2}, {1, 1}}); + + auto AllOfSecondsAreZero = [&cont] { + return absl::c_all_of(cont, + [](const Pair& elem) { return elem.second == 0; }); + }; + + EXPECT_TRUE(AllOfSecondsAreZero()) << "constructor should be stable"; + + // Should not replace existing. + cont.insert(Pair(0, 2)); + cont.insert(Pair(1, 2)); + cont.insert(Pair(2, 2)); + + EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable"; + + cont.insert(Pair(3, 0)); + cont.insert(Pair(3, 2)); + + EXPECT_TRUE(AllOfSecondsAreZero()) << "insert should be stable"; +} + +// ---------------------------------------------------------------------------- +// Types. + +// key_type +// key_compare +// value_type +// value_compare +// pointer +// const_pointer +// reference +// const_reference +// size_type +// difference_type +// iterator +// const_iterator +// reverse_iterator +// const_reverse_iterator + +TEST(FlatTree, Types) { + // These are guaranteed to be portable. + static_assert((std::is_same::value), ""); + static_assert((std::is_same::value), ""); + static_assert((std::is_same, IntTree::key_compare>::value), ""); + static_assert((std::is_same::value), ""); + static_assert((std::is_same::value), + ""); + static_assert((std::is_same::value), ""); + static_assert((std::is_same::value), ""); +} + +// ---------------------------------------------------------------------------- +// Lifetime. + +// flat_tree() +// flat_tree(const Compare& comp) + +TYPED_TEST_P(FlatTreeTest, DefaultConstructor) { + { + TypedTree cont; + EXPECT_THAT(cont, ElementsAre()); + } + + { + TreeWithStrangeCompare cont(NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre()); + } +} + +// flat_tree(const flat_tree& x) + +TYPED_TEST_P(FlatTreeTest, CopyConstructor) { + TypedTree original({1, 2, 3, 4}); + TypedTree copied(original); + + EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4)); + + EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(original, ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(original, copied); +} + +// flat_tree(flat_tree&& x) + +TEST(FlatTree, MoveConstructor) { + int input_range[] = {1, 2, 3, 4}; + + MoveOnlyTree original(std::begin(input_range), std::end(input_range)); + MoveOnlyTree moved(std::move(original)); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +// flat_tree(InputIterator first, +// InputIterator last, +// const Compare& comp = Compare()) + +TEST(FlatTree, RangeConstructor) { + { + IntPair input_vals[] = {{1, 1}, {1, 2}, {2, 1}, {2, 2}, {1, 3}, + {2, 3}, {3, 1}, {3, 2}, {3, 3}}; + + IntPairTree first_of(MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals))); + EXPECT_THAT(first_of, + ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1))); + } + { + TreeWithStrangeCompare::value_type input_vals[] = {1, 1, 1, 2, 2, + 2, 3, 3, 3}; + + TreeWithStrangeCompare cont(MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals)), + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3)); + } +} + +// flat_tree(const container_type&) + +TYPED_TEST_P(FlatTreeTest, ContainerCopyConstructor) { + TypeParam items = {1, 2, 3, 4}; + TypedTree tree(items); + + EXPECT_THAT(tree, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(items, ElementsAre(1, 2, 3, 4)); +} + +// flat_tree(container_type&&) + +TEST(FlatTree, ContainerMoveConstructor) { + using Pair = std::pair; + + // Construct an unsorted vector with a duplicate item in it. Sorted by the + // first item, the second allows us to test for stability. Using a move + // only type to ensure the vector is not copied. + std::vector storage; + storage.push_back(Pair(2, MoveOnlyInt(0))); + storage.push_back(Pair(1, MoveOnlyInt(0))); + storage.push_back(Pair(2, MoveOnlyInt(1))); + + using Tree = flat_tree, std::vector>; + Tree tree(std::move(storage)); + + // The list should be two items long, with only the first "2" saved. + ASSERT_EQ(2u, tree.size()); + const Pair& zeroth = *tree.begin(); + ASSERT_EQ(1, zeroth.first); + ASSERT_EQ(0, zeroth.second.data()); + + const Pair& first = *(tree.begin() + 1); + ASSERT_EQ(2, first.first); + ASSERT_EQ(0, first.second.data()); +} + +// flat_tree(std::initializer_list ilist, +// const Compare& comp = Compare()) + +TYPED_TEST_P(FlatTreeTest, InitializerListConstructor) { + { + TypedTree cont({1, 2, 3, 4, 5, 6, 10, 8}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TypedTree cont({1, 2, 3, 4, 5, 6, 10, 8}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TreeWithStrangeCompare cont({1, 2, 3, 4, 5, 6, 10, 8}, + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + IntPairTree first_of({{1, 1}, {2, 1}, {1, 2}}); + EXPECT_THAT(first_of, ElementsAre(IntPair(1, 1), IntPair(2, 1))); + } +} + +// flat_tree(sorted_unique_t, +// InputIterator first, +// InputIterator last, +// const Compare& comp = Compare()) + +TEST(FlatTree, SortedUniqueRangeConstructor) { + { + IntPair input_vals[] = {{1, 1}, {2, 1}, {3, 1}}; + + IntPairTree first_of(sorted_unique, + MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals))); + EXPECT_THAT(first_of, + ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1))); + } + { + TreeWithStrangeCompare::value_type input_vals[] = {1, 2, 3}; + + TreeWithStrangeCompare cont(sorted_unique, + MakeInputIterator(std::begin(input_vals)), + MakeInputIterator(std::end(input_vals)), + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3)); + } +} + +// flat_tree(sorted_unique_t, const container_type&) + +TYPED_TEST_P(FlatTreeTest, SortedUniqueContainerCopyConstructor) { + TypeParam items = {1, 2, 3, 4}; + TypedTree tree(sorted_unique, items); + + EXPECT_THAT(tree, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(items, ElementsAre(1, 2, 3, 4)); +} + +// flat_tree(sorted_unique_t, std::vector&&) + +TEST(FlatTree, SortedUniqueVectorMoveConstructor) { + using Pair = std::pair; + + std::vector storage; + storage.push_back(Pair(1, MoveOnlyInt(0))); + storage.push_back(Pair(2, MoveOnlyInt(0))); + + using Tree = flat_tree, std::vector>; + Tree tree(sorted_unique, std::move(storage)); + + ASSERT_EQ(2u, tree.size()); + const Pair& zeroth = *tree.begin(); + ASSERT_EQ(1, zeroth.first); + ASSERT_EQ(0, zeroth.second.data()); + + const Pair& first = *(tree.begin() + 1); + ASSERT_EQ(2, first.first); + ASSERT_EQ(0, first.second.data()); +} + +// flat_tree(sorted_unique_t, +// std::initializer_list ilist, +// const Compare& comp = Compare()) + +TYPED_TEST_P(FlatTreeTest, SortedUniqueInitializerListConstructor) { + { + TypedTree cont(sorted_unique, {1, 2, 3, 4, 5, 6, 8, 10}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TypedTree cont(sorted_unique, {1, 2, 3, 4, 5, 6, 8, 10}); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + TreeWithStrangeCompare cont(sorted_unique, {1, 2, 3, 4, 5, 6, 8, 10}, + NonDefaultConstructibleCompare(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); + } + { + IntPairTree first_of(sorted_unique, {{1, 1}, {2, 1}}); + EXPECT_THAT(first_of, ElementsAre(IntPair(1, 1), IntPair(2, 1))); + } +} + +// ---------------------------------------------------------------------------- +// Assignments. + +// flat_tree& operator=(const flat_tree&) + +TYPED_TEST_P(FlatTreeTest, CopyAssignable) { + TypedTree original({1, 2, 3, 4}); + TypedTree copied; + copied = original; + + EXPECT_THAT(copied, ElementsAre(1, 2, 3, 4)); + EXPECT_THAT(original, ElementsAre(1, 2, 3, 4)); + EXPECT_EQ(original, copied); +} + +// flat_tree& operator=(flat_tree&&) + +TEST(FlatTree, MoveAssignable) { + int input_range[] = {1, 2, 3, 4}; + + MoveOnlyTree original(std::begin(input_range), std::end(input_range)); + MoveOnlyTree moved; + moved = std::move(original); + + EXPECT_EQ(1U, moved.count(MoveOnlyInt(1))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(2))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(3))); + EXPECT_EQ(1U, moved.count(MoveOnlyInt(4))); +} + +// flat_tree& operator=(std::initializer_list ilist) + +TYPED_TEST_P(FlatTreeTest, InitializerListAssignable) { + TypedTree cont({0}); + cont = {1, 2, 3, 4, 5, 6, 10, 8}; + + EXPECT_EQ(0U, cont.count(0)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 8, 10)); +} + +// -------------------------------------------------------------------------- +// Memory management. + +// void reserve(size_type new_capacity) + +TEST(FlatTreeTest, Reserve) { + IntTree cont({1, 2, 3}); + + cont.reserve(5); + EXPECT_LE(5U, cont.capacity()); +} + +// size_type capacity() const + +TEST(FlatTreeTest, Capacity) { + IntTree cont({1, 2, 3}); + + EXPECT_LE(cont.size(), cont.capacity()); + cont.reserve(5); + EXPECT_LE(cont.size(), cont.capacity()); +} + +// void shrink_to_fit() + +TEST(FlatTreeTest, ShrinkToFit) { + IntTree cont({1, 2, 3}); + + IntTree::size_type capacity_before = cont.capacity(); + cont.shrink_to_fit(); + EXPECT_GE(capacity_before, cont.capacity()); +} + +// ---------------------------------------------------------------------------- +// Size management. + +// void clear() + +TYPED_TEST_P(FlatTreeTest, Clear) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + cont.clear(); + EXPECT_THAT(cont, ElementsAre()); +} + +// size_type size() const + +TYPED_TEST_P(FlatTreeTest, Size) { + TypedTree cont; + + EXPECT_EQ(0U, cont.size()); + cont.insert(2); + EXPECT_EQ(1U, cont.size()); + cont.insert(1); + EXPECT_EQ(2U, cont.size()); + cont.insert(3); + EXPECT_EQ(3U, cont.size()); + cont.erase(cont.begin()); + EXPECT_EQ(2U, cont.size()); + cont.erase(cont.begin()); + EXPECT_EQ(1U, cont.size()); + cont.erase(cont.begin()); + EXPECT_EQ(0U, cont.size()); +} + +// bool empty() const + +TYPED_TEST_P(FlatTreeTest, Empty) { + TypedTree cont; + + EXPECT_TRUE(cont.empty()); + cont.insert(1); + EXPECT_FALSE(cont.empty()); + cont.clear(); + EXPECT_TRUE(cont.empty()); +} + +// ---------------------------------------------------------------------------- +// Iterators. + +// iterator begin() +// const_iterator begin() const +// iterator end() +// const_iterator end() const +// +// reverse_iterator rbegin() +// const_reverse_iterator rbegin() const +// reverse_iterator rend() +// const_reverse_iterator rend() const +// +// const_iterator cbegin() const +// const_iterator cend() const +// const_reverse_iterator crbegin() const +// const_reverse_iterator crend() const + +TYPED_TEST_P(FlatTreeTest, Iterators) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + auto size = + static_cast::difference_type>(cont.size()); + + EXPECT_EQ(size, std::distance(cont.begin(), cont.end())); + EXPECT_EQ(size, std::distance(cont.cbegin(), cont.cend())); + EXPECT_EQ(size, std::distance(cont.rbegin(), cont.rend())); + EXPECT_EQ(size, std::distance(cont.crbegin(), cont.crend())); + + { + auto it = cont.begin(); + auto c_it = cont.cbegin(); + EXPECT_EQ(it, c_it); + for (int j = 1; it != cont.end(); ++it, ++c_it, ++j) { + EXPECT_EQ(j, *it); + EXPECT_EQ(j, *c_it); + } + } + { + auto rit = cont.rbegin(); + auto c_rit = cont.crbegin(); + EXPECT_EQ(rit, c_rit); + for (int j = static_cast(size); rit != cont.rend(); + ++rit, ++c_rit, --j) { + EXPECT_EQ(j, *rit); + EXPECT_EQ(j, *c_rit); + } + } +} + +// ---------------------------------------------------------------------------- +// Insert operations. + +// pair insert(const value_type& val) + +TYPED_TEST_P(FlatTreeTest, InsertLValue) { + TypedTree cont; + + int value = 2; + std::pair::iterator, bool> result = + cont.insert(value); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result.first); + + value = 1; + result = cont.insert(value); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, *result.first); + + value = 3; + result = cont.insert(value); + EXPECT_TRUE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result.first); + + value = 3; + result = cont.insert(value); + EXPECT_FALSE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result.first); +} + +// pair insert(value_type&& val) + +TEST(FlatTree, InsertRValue) { + MoveOnlyTree cont; + + std::pair result = cont.insert(MoveOnlyInt(2)); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, result.first->data()); + + result = cont.insert(MoveOnlyInt(1)); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, result.first->data()); + + result = cont.insert(MoveOnlyInt(3)); + EXPECT_TRUE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result.first->data()); + + result = cont.insert(MoveOnlyInt(3)); + EXPECT_FALSE(result.second); + EXPECT_EQ(std::prev(cont.end()), result.first); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result.first->data()); +} + +// iterator insert(const_iterator position_hint, const value_type& val) + +TYPED_TEST_P(FlatTreeTest, InsertPositionLValue) { + TypedTree cont; + + auto result = cont.insert(cont.cend(), 2); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result); + + result = cont.insert(cont.cend(), 1); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, *result); + + result = cont.insert(cont.cend(), 3); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result); + + result = cont.insert(cont.cend(), 3); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, *result); +} + +// iterator insert(const_iterator position_hint, value_type&& val) + +TEST(FlatTree, InsertPositionRValue) { + MoveOnlyTree cont; + + auto result = cont.insert(cont.cend(), MoveOnlyInt(2)); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, result->data()); + + result = cont.insert(cont.cend(), MoveOnlyInt(1)); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(1, result->data()); + + result = cont.insert(cont.cend(), MoveOnlyInt(3)); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result->data()); + + result = cont.insert(cont.cend(), MoveOnlyInt(3)); + EXPECT_EQ(std::prev(cont.end()), result); + EXPECT_EQ(3U, cont.size()); + EXPECT_EQ(3, result->data()); +} + +// template +// void insert(InputIterator first, InputIterator last); + +TEST(FlatTree, InsertIterIter) { + struct GetKeyFromIntIntPair { + const int& operator()(const std::pair& p) const { + return p.first; + } + }; + + using IntIntMap = flat_tree, + std::vector>; + + { + IntIntMap cont; + IntPair int_pairs[] = {{3, 1}, {1, 1}, {4, 1}, {2, 1}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + std::vector int_pairs; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{1, 1}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{5, 1}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1), IntPair(5, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1))); + } + + { + IntIntMap cont({{1, 1}, {2, 1}, {3, 1}, {4, 1}}); + IntPair int_pairs[] = {{3, 2}, {1, 2}, {4, 2}, {2, 2}, {7, 2}, {6, 2}, + {8, 2}, {5, 2}, {5, 3}, {6, 3}, {7, 3}, {8, 3}}; + cont.insert(std::begin(int_pairs), std::end(int_pairs)); + EXPECT_THAT(cont, ElementsAre(IntPair(1, 1), IntPair(2, 1), IntPair(3, 1), + IntPair(4, 1), IntPair(5, 2), IntPair(6, 2), + IntPair(7, 2), IntPair(8, 2))); + } +} + +// template +// pair emplace(Args&&... args) + +TYPED_TEST_P(FlatTreeTest, Emplace) { + { + EmplaceableTree cont; + + std::pair result = cont.emplace(); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(Emplaceable(), *cont.begin()); + + result = cont.emplace(2, 3.5); + EXPECT_TRUE(result.second); + EXPECT_EQ(std::next(cont.begin()), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result.first); + + result = cont.emplace(2, 3.5); + EXPECT_FALSE(result.second); + EXPECT_EQ(std::next(cont.begin()), result.first); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result.first); + } + { + TypedTree cont; + + std::pair::iterator, bool> result = + cont.emplace(2); + EXPECT_TRUE(result.second); + EXPECT_EQ(cont.begin(), result.first); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result.first); + } +} + +// template +// iterator emplace_hint(const_iterator position_hint, Args&&... args) + +TYPED_TEST_P(FlatTreeTest, EmplacePosition) { + { + EmplaceableTree cont; + + auto result = cont.emplace_hint(cont.cend()); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(Emplaceable(), *cont.begin()); + + result = cont.emplace_hint(cont.cend(), 2, 3.5); + EXPECT_EQ(std::next(cont.begin()), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result); + + result = cont.emplace_hint(cont.cbegin(), 2, 3.5); + EXPECT_EQ(std::next(cont.begin()), result); + EXPECT_EQ(2U, cont.size()); + EXPECT_EQ(Emplaceable(2, 3.5), *result); + } + { + TypedTree cont; + + auto result = cont.emplace_hint(cont.cend(), 2); + EXPECT_EQ(cont.begin(), result); + EXPECT_EQ(1U, cont.size()); + EXPECT_EQ(2, *result); + } +} + +// ---------------------------------------------------------------------------- +// Underlying type operations. + +// underlying_type extract() && +TYPED_TEST_P(FlatTreeTest, Extract) { + TypedTree cont; + cont.emplace(3); + cont.emplace(1); + cont.emplace(2); + cont.emplace(4); + + TypeParam body = std::move(cont).extract(); + EXPECT_THAT(cont, IsEmpty()); + EXPECT_THAT(body, ElementsAre(1, 2, 3, 4)); +} + +// replace(underlying_type&&) +TYPED_TEST_P(FlatTreeTest, Replace) { + TypeParam body = {1, 2, 3, 4}; + TypedTree cont; + cont.replace(std::move(body)); + + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4)); +} + +// ---------------------------------------------------------------------------- +// Erase operations. + +// iterator erase(const_iterator position_hint) + +TYPED_TEST_P(FlatTreeTest, ErasePosition) { + { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + auto it = cont.erase(std::next(cont.cbegin(), 3)); + EXPECT_EQ(std::next(cont.begin(), 3), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 0)); + EXPECT_EQ(cont.begin(), it); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 5)); + EXPECT_EQ(cont.end(), it); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7)); + + it = cont.erase(std::next(cont.cbegin(), 1)); + EXPECT_EQ(std::next(cont.begin()), it); + EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7)); + + it = cont.erase(std::next(cont.cbegin(), 2)); + EXPECT_EQ(std::next(cont.begin(), 2), it); + EXPECT_THAT(cont, ElementsAre(2, 5, 7)); + + it = cont.erase(std::next(cont.cbegin(), 2)); + EXPECT_EQ(std::next(cont.begin(), 2), it); + EXPECT_THAT(cont, ElementsAre(2, 5)); + + it = cont.erase(std::next(cont.cbegin(), 0)); + EXPECT_EQ(std::next(cont.begin(), 0), it); + EXPECT_THAT(cont, ElementsAre(5)); + + it = cont.erase(cont.cbegin()); + EXPECT_EQ(cont.begin(), it); + EXPECT_EQ(cont.end(), it); + } + // This is LWG #2059. + // There is a potential ambiguity between erase with an iterator and erase + // with a key, if key has a templated constructor. + { + using T = TemplateConstructor; + + flat_tree, std::vector> cont; + T v(0); + + auto it = cont.find(v); + if (it != cont.end()) + cont.erase(it); + } +} + +// iterator erase(const_iterator first, const_iterator last) + +TYPED_TEST_P(FlatTreeTest, EraseRange) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + auto it = + cont.erase(std::next(cont.cbegin(), 5), std::next(cont.cbegin(), 5)); + EXPECT_EQ(std::next(cont.begin(), 5), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 3), std::next(cont.cbegin(), 4)); + EXPECT_EQ(std::next(cont.begin(), 3), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 2), std::next(cont.cbegin(), 5)); + EXPECT_EQ(std::next(cont.begin(), 2), it); + EXPECT_THAT(cont, ElementsAre(1, 2, 7, 8)); + + it = cont.erase(std::next(cont.cbegin(), 0), std::next(cont.cbegin(), 2)); + EXPECT_EQ(std::next(cont.begin(), 0), it); + EXPECT_THAT(cont, ElementsAre(7, 8)); + + it = cont.erase(cont.cbegin(), cont.cend()); + EXPECT_EQ(cont.begin(), it); + EXPECT_EQ(cont.end(), it); +} + +// size_type erase(const key_type& key) + +TYPED_TEST_P(FlatTreeTest, EraseKey) { + TypedTree cont({1, 2, 3, 4, 5, 6, 7, 8}); + + EXPECT_EQ(0U, cont.erase(9)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8)); + + EXPECT_EQ(1U, cont.erase(4)); + EXPECT_THAT(cont, ElementsAre(1, 2, 3, 5, 6, 7, 8)); + + EXPECT_EQ(1U, cont.erase(1)); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7, 8)); + + EXPECT_EQ(1U, cont.erase(8)); + EXPECT_THAT(cont, ElementsAre(2, 3, 5, 6, 7)); + + EXPECT_EQ(1U, cont.erase(3)); + EXPECT_THAT(cont, ElementsAre(2, 5, 6, 7)); + + EXPECT_EQ(1U, cont.erase(6)); + EXPECT_THAT(cont, ElementsAre(2, 5, 7)); + + EXPECT_EQ(1U, cont.erase(7)); + EXPECT_THAT(cont, ElementsAre(2, 5)); + + EXPECT_EQ(1U, cont.erase(2)); + EXPECT_THAT(cont, ElementsAre(5)); + + EXPECT_EQ(1U, cont.erase(5)); + EXPECT_THAT(cont, ElementsAre()); +} + +TYPED_TEST_P(FlatTreeTest, EraseEndDeath) { + { + TypedTree tree; + ASSERT_DEATH_IF_SUPPORTED(tree.erase(tree.cend()), ""); + } + + { + TypedTree tree = {1, 2, 3, 4}; + ASSERT_DEATH_IF_SUPPORTED(tree.erase(tree.find(5)), ""); + } +} + +// ---------------------------------------------------------------------------- +// Comparators. + +// key_compare key_comp() const + +TEST(FlatTree, KeyComp) { + ReversedTree cont({1, 2, 3, 4, 5}); + + EXPECT_TRUE(absl::c_is_sorted(cont, cont.key_comp())); + int new_elements[] = {6, 7, 8, 9, 10}; + std::copy(std::begin(new_elements), std::end(new_elements), + std::inserter(cont, cont.end())); + EXPECT_TRUE(absl::c_is_sorted(cont, cont.key_comp())); +} + +// value_compare value_comp() const + +TEST(FlatTree, ValueComp) { + ReversedTree cont({1, 2, 3, 4, 5}); + + EXPECT_TRUE(absl::c_is_sorted(cont, cont.value_comp())); + int new_elements[] = {6, 7, 8, 9, 10}; + std::copy(std::begin(new_elements), std::end(new_elements), + std::inserter(cont, cont.end())); + EXPECT_TRUE(absl::c_is_sorted(cont, cont.value_comp())); +} + +// ---------------------------------------------------------------------------- +// Search operations. + +// size_type count(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, Count) { + const TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_EQ(1U, cont.count(5)); + EXPECT_EQ(1U, cont.count(6)); + EXPECT_EQ(1U, cont.count(7)); + EXPECT_EQ(1U, cont.count(8)); + EXPECT_EQ(1U, cont.count(9)); + EXPECT_EQ(1U, cont.count(10)); + EXPECT_EQ(1U, cont.count(11)); + EXPECT_EQ(1U, cont.count(12)); + EXPECT_EQ(0U, cont.count(4)); +} + +// iterator find(const key_type& key) +// const_iterator find(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, Find) { + { + TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_EQ(cont.begin(), cont.find(5)); + EXPECT_EQ(std::next(cont.begin()), cont.find(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4)); + } + { + const TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_EQ(cont.begin(), cont.find(5)); + EXPECT_EQ(std::next(cont.begin()), cont.find(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.find(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.find(8)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.find(9)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.find(10)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.find(11)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.find(12)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.find(4)); + } +} + +// bool contains(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, Contains) { + const TypedTree cont({5, 6, 7, 8, 9, 10, 11, 12}); + + EXPECT_TRUE(cont.contains(5)); + EXPECT_TRUE(cont.contains(6)); + EXPECT_TRUE(cont.contains(7)); + EXPECT_TRUE(cont.contains(8)); + EXPECT_TRUE(cont.contains(9)); + EXPECT_TRUE(cont.contains(10)); + EXPECT_TRUE(cont.contains(11)); + EXPECT_TRUE(cont.contains(12)); + EXPECT_FALSE(cont.contains(4)); +} + +// pair equal_range(const key_type& key) +// pair equal_range(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, EqualRange) { + { + TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + std::pair::iterator, + typename TypedTree::iterator> + result = cont.equal_range(5); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(7); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(9); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(11); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(13); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(15); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(17); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(19); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + result = cont.equal_range(4); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 0), result.second); + result = cont.equal_range(6); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(8); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(10); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(12); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(14); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(16); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(18); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(20); + EXPECT_EQ(std::next(cont.begin(), 8), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + } + { + const TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + std::pair::const_iterator, + typename TypedTree::const_iterator> + result = cont.equal_range(5); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(7); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(9); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(11); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(13); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(15); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(17); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(19); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + result = cont.equal_range(4); + EXPECT_EQ(std::next(cont.begin(), 0), result.first); + EXPECT_EQ(std::next(cont.begin(), 0), result.second); + result = cont.equal_range(6); + EXPECT_EQ(std::next(cont.begin(), 1), result.first); + EXPECT_EQ(std::next(cont.begin(), 1), result.second); + result = cont.equal_range(8); + EXPECT_EQ(std::next(cont.begin(), 2), result.first); + EXPECT_EQ(std::next(cont.begin(), 2), result.second); + result = cont.equal_range(10); + EXPECT_EQ(std::next(cont.begin(), 3), result.first); + EXPECT_EQ(std::next(cont.begin(), 3), result.second); + result = cont.equal_range(12); + EXPECT_EQ(std::next(cont.begin(), 4), result.first); + EXPECT_EQ(std::next(cont.begin(), 4), result.second); + result = cont.equal_range(14); + EXPECT_EQ(std::next(cont.begin(), 5), result.first); + EXPECT_EQ(std::next(cont.begin(), 5), result.second); + result = cont.equal_range(16); + EXPECT_EQ(std::next(cont.begin(), 6), result.first); + EXPECT_EQ(std::next(cont.begin(), 6), result.second); + result = cont.equal_range(18); + EXPECT_EQ(std::next(cont.begin(), 7), result.first); + EXPECT_EQ(std::next(cont.begin(), 7), result.second); + result = cont.equal_range(20); + EXPECT_EQ(std::next(cont.begin(), 8), result.first); + EXPECT_EQ(std::next(cont.begin(), 8), result.second); + } +} + +// iterator lower_bound(const key_type& key); +// const_iterator lower_bound(const key_type& key) const; + +TYPED_TEST_P(FlatTreeTest, LowerBound) { + { + TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(cont.begin(), cont.lower_bound(5)); + EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20)); + } + { + const TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(cont.begin(), cont.lower_bound(5)); + EXPECT_EQ(std::next(cont.begin()), cont.lower_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.lower_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.lower_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.lower_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.lower_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.lower_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.lower_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.lower_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.lower_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.lower_bound(20)); + } +} + +// iterator upper_bound(const key_type& key) +// const_iterator upper_bound(const key_type& key) const + +TYPED_TEST_P(FlatTreeTest, UpperBound) { + { + TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20)); + } + { + const TypedTree cont({5, 7, 9, 11, 13, 15, 17, 19}); + + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(5)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(7)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(9)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(11)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(13)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(15)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(17)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(19)); + EXPECT_EQ(std::next(cont.begin(), 0), cont.upper_bound(4)); + EXPECT_EQ(std::next(cont.begin(), 1), cont.upper_bound(6)); + EXPECT_EQ(std::next(cont.begin(), 2), cont.upper_bound(8)); + EXPECT_EQ(std::next(cont.begin(), 3), cont.upper_bound(10)); + EXPECT_EQ(std::next(cont.begin(), 4), cont.upper_bound(12)); + EXPECT_EQ(std::next(cont.begin(), 5), cont.upper_bound(14)); + EXPECT_EQ(std::next(cont.begin(), 6), cont.upper_bound(16)); + EXPECT_EQ(std::next(cont.begin(), 7), cont.upper_bound(18)); + EXPECT_EQ(std::next(cont.begin(), 8), cont.upper_bound(20)); + } +} + +// ---------------------------------------------------------------------------- +// General operations. + +// void swap(flat_tree& other) +// void swap(flat_tree& lhs, flat_tree& rhs) + +TYPED_TEST_P(FlatTreeTest, Swap) { + TypedTree x({1, 2, 3}); + TypedTree y({4}); + swap(x, y); + EXPECT_THAT(x, ElementsAre(4)); + EXPECT_THAT(y, ElementsAre(1, 2, 3)); + + y.swap(x); + EXPECT_THAT(x, ElementsAre(1, 2, 3)); + EXPECT_THAT(y, ElementsAre(4)); +} + +// bool operator==(const flat_tree& lhs, const flat_tree& rhs) +// bool operator!=(const flat_tree& lhs, const flat_tree& rhs) +// bool operator<(const flat_tree& lhs, const flat_tree& rhs) +// bool operator>(const flat_tree& lhs, const flat_tree& rhs) +// bool operator<=(const flat_tree& lhs, const flat_tree& rhs) +// bool operator>=(const flat_tree& lhs, const flat_tree& rhs) + +TEST(FlatTree, Comparison) { + // Provided comparator does not participate in comparison. + ReversedTree biggest({3}); + ReversedTree smallest({1}); + ReversedTree middle({1, 2}); + + EXPECT_EQ(biggest, biggest); + EXPECT_NE(biggest, smallest); + EXPECT_LT(smallest, middle); + EXPECT_LE(smallest, middle); + EXPECT_LE(middle, middle); + EXPECT_GT(biggest, middle); + EXPECT_GE(biggest, middle); + EXPECT_GE(biggest, biggest); +} + +TYPED_TEST_P(FlatTreeTest, SupportsEraseIf) { + TypedTree x; + EXPECT_EQ(0u, EraseIf(x, [](int) { return false; })); + EXPECT_THAT(x, ElementsAre()); + + x = {1, 2, 3}; + EXPECT_EQ(1u, EraseIf(x, [](int elem) { return !(elem & 1); })); + EXPECT_THAT(x, ElementsAre(1, 3)); + + x = {1, 2, 3, 4}; + EXPECT_EQ(2u, EraseIf(x, [](int elem) { return elem & 1; })); + EXPECT_THAT(x, ElementsAre(2, 4)); +} + +REGISTER_TYPED_TEST_SUITE_P(FlatTreeTest, + DefaultConstructor, + CopyConstructor, + ContainerCopyConstructor, + InitializerListConstructor, + SortedUniqueContainerCopyConstructor, + SortedUniqueInitializerListConstructor, + CopyAssignable, + InitializerListAssignable, + Clear, + Size, + Empty, + Iterators, + InsertLValue, + InsertPositionLValue, + Emplace, + EmplacePosition, + Extract, + Replace, + ErasePosition, + EraseRange, + EraseKey, + EraseEndDeath, + Count, + Find, + Contains, + EqualRange, + LowerBound, + UpperBound, + Swap, + SupportsEraseIf); + +using IntSequenceContainers = + ::testing::Types, std::vector>; +INSTANTIATE_TYPED_TEST_SUITE_P(My, FlatTreeTest, IntSequenceContainers); + +} // namespace +} // namespace flat_containers_internal +} // namespace webrtc diff --git a/rtc_base/containers/identity.h b/rtc_base/containers/identity.h new file mode 100644 index 0000000000..29592931bd --- /dev/null +++ b/rtc_base/containers/identity.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_IDENTITY_H_ +#define RTC_BASE_CONTAINERS_IDENTITY_H_ + +#include + +namespace webrtc { + +// Implementation of C++20's std::identity. +// +// Reference: +// - https://en.cppreference.com/w/cpp/utility/functional/identity +// - https://wg21.link/func.identity +struct identity { + template + constexpr T&& operator()(T&& t) const noexcept { + return std::forward(t); + } + + using is_transparent = void; +}; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_IDENTITY_H_ diff --git a/rtc_base/containers/invoke.h b/rtc_base/containers/invoke.h new file mode 100644 index 0000000000..5d17a70beb --- /dev/null +++ b/rtc_base/containers/invoke.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_INVOKE_H_ +#define RTC_BASE_CONTAINERS_INVOKE_H_ + +#include +#include + +namespace webrtc { + +namespace invoke_internal { + +// Helper struct and alias to deduce the class type from a member function +// pointer or member object pointer. +template +struct member_pointer_class {}; + +template +struct member_pointer_class { + using type = ClassT; +}; + +template +using member_pointer_class_t = typename member_pointer_class::type; + +// Utility struct to detect specializations of std::reference_wrapper. +template +struct is_reference_wrapper : std::false_type {}; + +template +struct is_reference_wrapper> : std::true_type {}; + +// Small helpers used below in invoke_internal::invoke to make the SFINAE more +// concise. +template +const bool& IsMemFunPtr = + std::is_member_function_pointer>::value; + +template +const bool& IsMemObjPtr = std::is_member_object_pointer>::value; + +template >> +const bool& IsMemPtrToBaseOf = + std::is_base_of>::value; + +template +const bool& IsRefWrapper = is_reference_wrapper>::value; + +template +using EnableIf = std::enable_if_t; + +// Invokes a member function pointer on a reference to an object of a suitable +// type. Covers bullet 1 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.1 +template && IsMemPtrToBaseOf> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1, Args&&... args) { + return (std::forward(t1).*f)(std::forward(args)...); +} + +// Invokes a member function pointer on a std::reference_wrapper to an object of +// a suitable type. Covers bullet 2 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.2 +template && IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1, Args&&... args) { + return (t1.get().*f)(std::forward(args)...); +} + +// Invokes a member function pointer on a pointer-like type to an object of a +// suitable type. Covers bullet 3 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.3 +template && !IsMemPtrToBaseOf && + !IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1, Args&&... args) { + return ((*std::forward(t1)).*f)(std::forward(args)...); +} + +// Invokes a member object pointer on a reference to an object of a suitable +// type. Covers bullet 4 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.4 +template && IsMemPtrToBaseOf> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1) { + return std::forward(t1).*f; +} + +// Invokes a member object pointer on a std::reference_wrapper to an object of +// a suitable type. Covers bullet 5 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.5 +template && IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1) { + return t1.get().*f; +} + +// Invokes a member object pointer on a pointer-like type to an object of a +// suitable type. Covers bullet 6 of the INVOKE definition. +// +// Reference: https://wg21.link/func.require#1.6 +template && !IsMemPtrToBaseOf && + !IsRefWrapper> = true> +constexpr decltype(auto) InvokeImpl(F&& f, T1&& t1) { + return (*std::forward(t1)).*f; +} + +// Invokes a regular function or function object. Covers bullet 7 of the INVOKE +// definition. +// +// Reference: https://wg21.link/func.require#1.7 +template +constexpr decltype(auto) InvokeImpl(F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); +} + +} // namespace invoke_internal + +// Implementation of C++17's std::invoke. This is not based on implementation +// referenced in original std::invoke proposal, but rather a manual +// implementation, so that it can be constexpr. +// +// References: +// - https://wg21.link/n4169#implementability +// - https://en.cppreference.com/w/cpp/utility/functional/invoke +// - https://wg21.link/func.invoke +template +constexpr decltype(auto) invoke(F&& f, Args&&... args) { + return invoke_internal::InvokeImpl(std::forward(f), + std::forward(args)...); +} + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_INVOKE_H_ diff --git a/rtc_base/containers/move_only_int.h b/rtc_base/containers/move_only_int.h new file mode 100644 index 0000000000..8f745aa688 --- /dev/null +++ b/rtc_base/containers/move_only_int.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_MOVE_ONLY_INT_H_ +#define RTC_BASE_CONTAINERS_MOVE_ONLY_INT_H_ + +namespace webrtc { + +// A move-only class that holds an integer. This is designed for testing +// containers. See also CopyOnlyInt. +class MoveOnlyInt { + public: + explicit MoveOnlyInt(int data = 1) : data_(data) {} + MoveOnlyInt(const MoveOnlyInt& other) = delete; + MoveOnlyInt& operator=(const MoveOnlyInt& other) = delete; + MoveOnlyInt(MoveOnlyInt&& other) : data_(other.data_) { other.data_ = 0; } + ~MoveOnlyInt() { data_ = 0; } + + MoveOnlyInt& operator=(MoveOnlyInt&& other) { + data_ = other.data_; + other.data_ = 0; + return *this; + } + + friend bool operator==(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return lhs.data_ == rhs.data_; + } + + friend bool operator!=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !operator==(lhs, rhs); + } + + friend bool operator<(const MoveOnlyInt& lhs, int rhs) { + return lhs.data_ < rhs; + } + + friend bool operator<(int lhs, const MoveOnlyInt& rhs) { + return lhs < rhs.data_; + } + + friend bool operator<(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return lhs.data_ < rhs.data_; + } + + friend bool operator>(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const MoveOnlyInt& lhs, const MoveOnlyInt& rhs) { + return !(lhs < rhs); + } + + int data() const { return data_; } + + private: + volatile int data_; +}; + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_MOVE_ONLY_INT_H_ diff --git a/rtc_base/containers/not_fn.h b/rtc_base/containers/not_fn.h new file mode 100644 index 0000000000..39cfd2763c --- /dev/null +++ b/rtc_base/containers/not_fn.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_NOT_FN_H_ +#define RTC_BASE_CONTAINERS_NOT_FN_H_ + +#include +#include + +#include "rtc_base/containers/invoke.h" + +namespace webrtc { + +namespace not_fn_internal { + +template +struct NotFnImpl { + F f; + + template + constexpr decltype(auto) operator()(Args&&... args) & noexcept { + return !webrtc::invoke(f, std::forward(args)...); + } + + template + constexpr decltype(auto) operator()(Args&&... args) const& noexcept { + return !webrtc::invoke(f, std::forward(args)...); + } + + template + constexpr decltype(auto) operator()(Args&&... args) && noexcept { + return !webrtc::invoke(std::move(f), std::forward(args)...); + } + + template + constexpr decltype(auto) operator()(Args&&... args) const&& noexcept { + return !webrtc::invoke(std::move(f), std::forward(args)...); + } +}; + +} // namespace not_fn_internal + +// Implementation of C++17's std::not_fn. +// +// Reference: +// - https://en.cppreference.com/w/cpp/utility/functional/not_fn +// - https://wg21.link/func.not.fn +template +constexpr not_fn_internal::NotFnImpl> not_fn(F&& f) { + return {std::forward(f)}; +} + +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_NOT_FN_H_ diff --git a/rtc_base/containers/void_t.h b/rtc_base/containers/void_t.h new file mode 100644 index 0000000000..62c57d4bec --- /dev/null +++ b/rtc_base/containers/void_t.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// This implementation is borrowed from Chromium. + +#ifndef RTC_BASE_CONTAINERS_VOID_T_H_ +#define RTC_BASE_CONTAINERS_VOID_T_H_ + +namespace webrtc { +namespace void_t_internal { +// Implementation detail of webrtc::void_t below. +template +struct make_void { + using type = void; +}; + +} // namespace void_t_internal + +// webrtc::void_t is an implementation of std::void_t from C++17. +// +// We use |webrtc::void_t_internal::make_void| as a helper struct to avoid a +// C++14 defect: +// http://en.cppreference.com/w/cpp/types/void_t +// http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558 +template +using void_t = typename ::webrtc::void_t_internal::make_void::type; +} // namespace webrtc + +#endif // RTC_BASE_CONTAINERS_VOID_T_H_ diff --git a/rtc_base/copy_on_write_buffer.cc b/rtc_base/copy_on_write_buffer.cc index 73182a12b1..f3cc710f85 100644 --- a/rtc_base/copy_on_write_buffer.cc +++ b/rtc_base/copy_on_write_buffer.cc @@ -32,16 +32,15 @@ CopyOnWriteBuffer::CopyOnWriteBuffer(const std::string& s) : CopyOnWriteBuffer(s.data(), s.length()) {} CopyOnWriteBuffer::CopyOnWriteBuffer(size_t size) - : buffer_(size > 0 ? new RefCountedObject(size) : nullptr), + : buffer_(size > 0 ? new RefCountedBuffer(size) : nullptr), offset_(0), size_(size) { RTC_DCHECK(IsConsistent()); } CopyOnWriteBuffer::CopyOnWriteBuffer(size_t size, size_t capacity) - : buffer_(size > 0 || capacity > 0 - ? new RefCountedObject(size, capacity) - : nullptr), + : buffer_(size > 0 || capacity > 0 ? new RefCountedBuffer(size, capacity) + : nullptr), offset_(0), size_(size) { RTC_DCHECK(IsConsistent()); @@ -61,7 +60,7 @@ void CopyOnWriteBuffer::SetSize(size_t size) { RTC_DCHECK(IsConsistent()); if (!buffer_) { if (size > 0) { - buffer_ = new RefCountedObject(size); + buffer_ = new RefCountedBuffer(size); offset_ = 0; size_ = size; } @@ -84,7 +83,7 @@ void CopyOnWriteBuffer::EnsureCapacity(size_t new_capacity) { RTC_DCHECK(IsConsistent()); if (!buffer_) { if (new_capacity > 0) { - buffer_ = new RefCountedObject(0, new_capacity); + buffer_ = new RefCountedBuffer(0, new_capacity); offset_ = 0; size_ = 0; } @@ -105,7 +104,7 @@ void CopyOnWriteBuffer::Clear() { if (buffer_->HasOneRef()) { buffer_->Clear(); } else { - buffer_ = new RefCountedObject(0, capacity()); + buffer_ = new RefCountedBuffer(0, capacity()); } offset_ = 0; size_ = 0; @@ -117,8 +116,8 @@ void CopyOnWriteBuffer::UnshareAndEnsureCapacity(size_t new_capacity) { return; } - buffer_ = new RefCountedObject(buffer_->data() + offset_, size_, - new_capacity); + buffer_ = + new RefCountedBuffer(buffer_->data() + offset_, size_, new_capacity); offset_ = 0; RTC_DCHECK(IsConsistent()); } diff --git a/rtc_base/copy_on_write_buffer.h b/rtc_base/copy_on_write_buffer.h index 68c6ad53d6..526cbe5c5c 100644 --- a/rtc_base/copy_on_write_buffer.h +++ b/rtc_base/copy_on_write_buffer.h @@ -86,7 +86,7 @@ class RTC_EXPORT CopyOnWriteBuffer { template ::value>::type* = nullptr> - T* data() { + T* MutableData() { RTC_DCHECK(IsConsistent()); if (!buffer_) { return nullptr; @@ -146,11 +146,6 @@ class RTC_EXPORT CopyOnWriteBuffer { return !(*this == buf); } - uint8_t& operator[](size_t index) { - RTC_DCHECK_LT(index, size()); - return data()[index]; - } - uint8_t operator[](size_t index) const { RTC_DCHECK_LT(index, size()); return cdata()[index]; @@ -164,9 +159,9 @@ class RTC_EXPORT CopyOnWriteBuffer { void SetData(const T* data, size_t size) { RTC_DCHECK(IsConsistent()); if (!buffer_) { - buffer_ = size > 0 ? new RefCountedObject(data, size) : nullptr; + buffer_ = size > 0 ? new RefCountedBuffer(data, size) : nullptr; } else if (!buffer_->HasOneRef()) { - buffer_ = new RefCountedObject(data, size, capacity()); + buffer_ = new RefCountedBuffer(data, size, capacity()); } else { buffer_->SetData(data, size); } @@ -201,7 +196,7 @@ class RTC_EXPORT CopyOnWriteBuffer { void AppendData(const T* data, size_t size) { RTC_DCHECK(IsConsistent()); if (!buffer_) { - buffer_ = new RefCountedObject(data, size); + buffer_ = new RefCountedBuffer(data, size); offset_ = 0; size_ = size; RTC_DCHECK(IsConsistent()); @@ -247,7 +242,7 @@ class RTC_EXPORT CopyOnWriteBuffer { // Swaps two buffers. friend void swap(CopyOnWriteBuffer& a, CopyOnWriteBuffer& b) { - std::swap(a.buffer_, b.buffer_); + a.buffer_.swap(b.buffer_); std::swap(a.offset_, b.offset_); std::swap(a.size_, b.size_); } @@ -262,6 +257,7 @@ class RTC_EXPORT CopyOnWriteBuffer { } private: + using RefCountedBuffer = FinalRefCountedObject; // Create a copy of the underlying data if it is referenced from other Buffer // objects or there is not enough capacity. void UnshareAndEnsureCapacity(size_t new_capacity); @@ -277,7 +273,7 @@ class RTC_EXPORT CopyOnWriteBuffer { } // buffer_ is either null, or points to an rtc::Buffer with capacity > 0. - scoped_refptr> buffer_; + scoped_refptr buffer_; // This buffer may represent a slice of a original data. size_t offset_; // Offset of a current slice in the original data in buffer_. // Should be 0 if the buffer_ is empty. diff --git a/rtc_base/copy_on_write_buffer_unittest.cc b/rtc_base/copy_on_write_buffer_unittest.cc index b35cd79454..d3978686a8 100644 --- a/rtc_base/copy_on_write_buffer_unittest.cc +++ b/rtc_base/copy_on_write_buffer_unittest.cc @@ -261,56 +261,44 @@ TEST(CopyOnWriteBufferTest, ClearDoesntChangeCapacity) { EXPECT_EQ(10u, buf2.capacity()); } -TEST(CopyOnWriteBufferTest, TestConstDataAccessor) { +TEST(CopyOnWriteBufferTest, DataAccessorDoesntCloneData) { CopyOnWriteBuffer buf1(kTestData, 3, 10); CopyOnWriteBuffer buf2(buf1); - // .cdata() doesn't clone data. - const uint8_t* cdata1 = buf1.cdata(); - const uint8_t* cdata2 = buf2.cdata(); - EXPECT_EQ(cdata1, cdata2); - - // Non-const .data() clones data if shared. - const uint8_t* data1 = buf1.data(); - const uint8_t* data2 = buf2.data(); - EXPECT_NE(data1, data2); - // buf1 was cloned above. - EXPECT_NE(data1, cdata1); - // Therefore buf2 was no longer sharing data and was not cloned. - EXPECT_EQ(data2, cdata1); + EXPECT_EQ(buf1.data(), buf2.data()); } -TEST(CopyOnWriteBufferTest, TestBacketRead) { +TEST(CopyOnWriteBufferTest, MutableDataClonesDataWhenShared) { CopyOnWriteBuffer buf1(kTestData, 3, 10); CopyOnWriteBuffer buf2(buf1); + const uint8_t* cdata = buf1.data(); - EnsureBuffersShareData(buf1, buf2); - // Non-const reads clone the data if shared. - for (size_t i = 0; i != 3u; ++i) { - EXPECT_EQ(buf1[i], kTestData[i]); - } - EnsureBuffersDontShareData(buf1, buf2); + uint8_t* data1 = buf1.MutableData(); + uint8_t* data2 = buf2.MutableData(); + // buf1 was cloned above. + EXPECT_NE(data1, cdata); + // Therefore buf2 was no longer sharing data and was not cloned. + EXPECT_EQ(data2, cdata); } -TEST(CopyOnWriteBufferTest, TestBacketReadConst) { +TEST(CopyOnWriteBufferTest, SeveralReads) { CopyOnWriteBuffer buf1(kTestData, 3, 10); CopyOnWriteBuffer buf2(buf1); EnsureBuffersShareData(buf1, buf2); - const CopyOnWriteBuffer& cbuf1 = buf1; for (size_t i = 0; i != 3u; ++i) { - EXPECT_EQ(cbuf1[i], kTestData[i]); + EXPECT_EQ(buf1[i], kTestData[i]); } EnsureBuffersShareData(buf1, buf2); } -TEST(CopyOnWriteBufferTest, TestBacketWrite) { +TEST(CopyOnWriteBufferTest, SeveralWrites) { CopyOnWriteBuffer buf1(kTestData, 3, 10); CopyOnWriteBuffer buf2(buf1); EnsureBuffersShareData(buf1, buf2); for (size_t i = 0; i != 3u; ++i) { - buf1[i] = kTestData[i] + 1; + buf1.MutableData()[i] = kTestData[i] + 1; } EXPECT_EQ(buf1.size(), 3u); EXPECT_EQ(buf1.capacity(), 10u); @@ -335,7 +323,7 @@ TEST(CopyOnWriteBufferTest, NoCopyDataOnSlice) { TEST(CopyOnWriteBufferTest, WritingCopiesData) { CopyOnWriteBuffer buf(kTestData, 10, 10); CopyOnWriteBuffer slice = buf.Slice(3, 4); - slice[0] = 0xaa; + slice.MutableData()[0] = 0xaa; EXPECT_NE(buf.cdata() + 3, slice.cdata()); EXPECT_EQ(0, memcmp(buf.cdata(), kTestData, 10)); } @@ -343,7 +331,7 @@ TEST(CopyOnWriteBufferTest, WritingCopiesData) { TEST(CopyOnWriteBufferTest, WritingToBufferDoesntAffectsSlice) { CopyOnWriteBuffer buf(kTestData, 10, 10); CopyOnWriteBuffer slice = buf.Slice(3, 4); - buf[0] = 0xaa; + buf.MutableData()[0] = 0xaa; EXPECT_NE(buf.cdata() + 3, slice.cdata()); EXPECT_EQ(0, memcmp(slice.cdata(), kTestData + 3, 4)); } @@ -361,7 +349,7 @@ TEST(CopyOnWriteBufferTest, SlicesAreIndependent) { CopyOnWriteBuffer buf(kTestData, 10, 10); CopyOnWriteBuffer slice = buf.Slice(3, 7); CopyOnWriteBuffer slice2 = buf.Slice(3, 7); - slice2[0] = 0xaa; + slice2.MutableData()[0] = 0xaa; EXPECT_EQ(buf.cdata() + 3, slice.cdata()); } diff --git a/rtc_base/cpu_time_unittest.cc b/rtc_base/cpu_time_unittest.cc index 675e86307c..94f82f4306 100644 --- a/rtc_base/cpu_time_unittest.cc +++ b/rtc_base/cpu_time_unittest.cc @@ -30,8 +30,7 @@ const int kProcessingTimeMillisecs = 500; const int kWorkingThreads = 2; // Consumes approximately kProcessingTimeMillisecs of CPU time in single thread. -void WorkingFunction(void* counter_pointer) { - int64_t* counter = reinterpret_cast(counter_pointer); +void WorkingFunction(int64_t* counter) { *counter = 0; int64_t stop_cpu_time = rtc::GetThreadCpuTimeNanos() + @@ -62,14 +61,12 @@ TEST(CpuTimeTest, MAYBE_TEST(TwoThreads)) { int64_t thread_start_time_nanos = GetThreadCpuTimeNanos(); int64_t counter1; int64_t counter2; - PlatformThread thread1(WorkingFunction, reinterpret_cast(&counter1), - "Thread1"); - PlatformThread thread2(WorkingFunction, reinterpret_cast(&counter2), - "Thread2"); - thread1.Start(); - thread2.Start(); - thread1.Stop(); - thread2.Stop(); + auto thread1 = PlatformThread::SpawnJoinable( + [&counter1] { WorkingFunction(&counter1); }, "Thread1"); + auto thread2 = PlatformThread::SpawnJoinable( + [&counter2] { WorkingFunction(&counter2); }, "Thread2"); + thread1.Finalize(); + thread2.Finalize(); EXPECT_GE(counter1, 0); EXPECT_GE(counter2, 0); diff --git a/rtc_base/critical_section.cc b/rtc_base/deprecated/recursive_critical_section.cc similarity index 80% rename from rtc_base/critical_section.cc rename to rtc_base/deprecated/recursive_critical_section.cc index 1969edefa5..068b9aa808 100644 --- a/rtc_base/critical_section.cc +++ b/rtc_base/deprecated/recursive_critical_section.cc @@ -8,17 +8,16 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "rtc_base/critical_section.h" +#include "rtc_base/deprecated/recursive_critical_section.h" #include #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" #include "rtc_base/platform_thread_types.h" +#include "rtc_base/synchronization/yield.h" #include "rtc_base/system/unused.h" -// TODO(tommi): Split this file up to per-platform implementation files. - #if RTC_DCHECK_IS_ON #define RTC_CS_DEBUG_CODE(x) x #else // !RTC_DCHECK_IS_ON @@ -27,7 +26,7 @@ namespace rtc { -CriticalSection::CriticalSection() { +RecursiveCriticalSection::RecursiveCriticalSection() { #if defined(WEBRTC_WIN) InitializeCriticalSection(&crit_); #elif defined(WEBRTC_POSIX) @@ -42,7 +41,7 @@ CriticalSection::CriticalSection() { pthread_mutexattr_settype(&mutex_attribute, PTHREAD_MUTEX_RECURSIVE); #if defined(WEBRTC_MAC) pthread_mutexattr_setpolicy_np(&mutex_attribute, - _PTHREAD_MUTEX_POLICY_FAIRSHARE); + _PTHREAD_MUTEX_POLICY_FIRSTFIT); #endif pthread_mutex_init(&mutex_, &mutex_attribute); pthread_mutexattr_destroy(&mutex_attribute); @@ -56,7 +55,7 @@ CriticalSection::CriticalSection() { #endif } -CriticalSection::~CriticalSection() { +RecursiveCriticalSection::~RecursiveCriticalSection() { #if defined(WEBRTC_WIN) DeleteCriticalSection(&crit_); #elif defined(WEBRTC_POSIX) @@ -70,7 +69,7 @@ CriticalSection::~CriticalSection() { #endif } -void CriticalSection::Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION() { +void RecursiveCriticalSection::Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION() { #if defined(WEBRTC_WIN) EnterCriticalSection(&crit_); #elif defined(WEBRTC_POSIX) @@ -129,7 +128,8 @@ void CriticalSection::Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION() { #endif } -bool CriticalSection::TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { +bool RecursiveCriticalSection::TryEnter() const + RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { #if defined(WEBRTC_WIN) return TryEnterCriticalSection(&crit_) != FALSE; #elif defined(WEBRTC_POSIX) @@ -162,7 +162,7 @@ bool CriticalSection::TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { #endif } -void CriticalSection::Leave() const RTC_UNLOCK_FUNCTION() { +void RecursiveCriticalSection::Leave() const RTC_UNLOCK_FUNCTION() { RTC_DCHECK(CurrentThreadIsOwner()); #if defined(WEBRTC_WIN) LeaveCriticalSection(&crit_); @@ -190,7 +190,7 @@ void CriticalSection::Leave() const RTC_UNLOCK_FUNCTION() { #endif } -bool CriticalSection::CurrentThreadIsOwner() const { +bool RecursiveCriticalSection::CurrentThreadIsOwner() const { #if defined(WEBRTC_WIN) // OwningThread has type HANDLE but actually contains the Thread ID: // http://stackoverflow.com/questions/12675301/why-is-the-owningthread-member-of-critical-section-of-type-handle-when-it-is-de @@ -209,41 +209,11 @@ bool CriticalSection::CurrentThreadIsOwner() const { #endif } -CritScope::CritScope(const CriticalSection* cs) : cs_(cs) { +CritScope::CritScope(const RecursiveCriticalSection* cs) : cs_(cs) { cs_->Enter(); } CritScope::~CritScope() { cs_->Leave(); } -void GlobalLock::Lock() { -#if !defined(WEBRTC_WIN) && \ - (!defined(WEBRTC_MAC) || RTC_USE_NATIVE_MUTEX_ON_MAC) - const struct timespec ts_null = {0}; -#endif - - while (AtomicOps::CompareAndSwap(&lock_acquired_, 0, 1)) { -#if defined(WEBRTC_WIN) - ::Sleep(0); -#elif defined(WEBRTC_MAC) && !RTC_USE_NATIVE_MUTEX_ON_MAC - sched_yield(); -#else - nanosleep(&ts_null, nullptr); -#endif - } -} - -void GlobalLock::Unlock() { - int old_value = AtomicOps::CompareAndSwap(&lock_acquired_, 1, 0); - RTC_DCHECK_EQ(1, old_value) << "Unlock called without calling Lock first"; -} - -GlobalLockScope::GlobalLockScope(GlobalLock* lock) : lock_(lock) { - lock_->Lock(); -} - -GlobalLockScope::~GlobalLockScope() { - lock_->Unlock(); -} - } // namespace rtc diff --git a/rtc_base/critical_section.h b/rtc_base/deprecated/recursive_critical_section.h similarity index 71% rename from rtc_base/critical_section.h rename to rtc_base/deprecated/recursive_critical_section.h index cf10463bdf..c044c732b9 100644 --- a/rtc_base/critical_section.h +++ b/rtc_base/deprecated/recursive_critical_section.h @@ -8,13 +8,11 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef RTC_BASE_CRITICAL_SECTION_H_ -#define RTC_BASE_CRITICAL_SECTION_H_ +#ifndef RTC_BASE_DEPRECATED_RECURSIVE_CRITICAL_SECTION_H_ +#define RTC_BASE_DEPRECATED_RECURSIVE_CRITICAL_SECTION_H_ -#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/platform_thread_types.h" -#include "rtc_base/system/rtc_export.h" #include "rtc_base/thread_annotations.h" #if defined(WEBRTC_WIN) @@ -43,13 +41,18 @@ namespace rtc { +// NOTE: This class is deprecated. Please use webrtc::Mutex instead! +// Search using https://www.google.com/?q=recursive+lock+considered+harmful +// to find the reasons. +// // Locking methods (Enter, TryEnter, Leave)are const to permit protecting -// members inside a const context without requiring mutable CriticalSections -// everywhere. CriticalSection is reentrant lock. -class RTC_LOCKABLE RTC_EXPORT CriticalSection { +// members inside a const context without requiring mutable +// RecursiveCriticalSections everywhere. RecursiveCriticalSection is +// reentrant lock. +class RTC_LOCKABLE RecursiveCriticalSection { public: - CriticalSection(); - ~CriticalSection(); + RecursiveCriticalSection(); + ~RecursiveCriticalSection(); void Enter() const RTC_EXCLUSIVE_LOCK_FUNCTION(); bool TryEnter() const RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true); @@ -87,37 +90,15 @@ class RTC_LOCKABLE RTC_EXPORT CriticalSection { // CritScope, for serializing execution through a scope. class RTC_SCOPED_LOCKABLE CritScope { public: - explicit CritScope(const CriticalSection* cs) RTC_EXCLUSIVE_LOCK_FUNCTION(cs); + explicit CritScope(const RecursiveCriticalSection* cs) + RTC_EXCLUSIVE_LOCK_FUNCTION(cs); ~CritScope() RTC_UNLOCK_FUNCTION(); private: - const CriticalSection* const cs_; + const RecursiveCriticalSection* const cs_; RTC_DISALLOW_COPY_AND_ASSIGN(CritScope); }; -// A lock used to protect global variables. Do NOT use for other purposes. -class RTC_LOCKABLE GlobalLock { - public: - constexpr GlobalLock() : lock_acquired_(0) {} - - void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(); - void Unlock() RTC_UNLOCK_FUNCTION(); - - private: - volatile int lock_acquired_; -}; - -// GlobalLockScope, for serializing execution through a scope. -class RTC_SCOPED_LOCKABLE GlobalLockScope { - public: - explicit GlobalLockScope(GlobalLock* lock) RTC_EXCLUSIVE_LOCK_FUNCTION(lock); - ~GlobalLockScope() RTC_UNLOCK_FUNCTION(); - - private: - GlobalLock* const lock_; - RTC_DISALLOW_COPY_AND_ASSIGN(GlobalLockScope); -}; - } // namespace rtc -#endif // RTC_BASE_CRITICAL_SECTION_H_ +#endif // RTC_BASE_DEPRECATED_RECURSIVE_CRITICAL_SECTION_H_ diff --git a/rtc_base/critical_section_unittest.cc b/rtc_base/deprecated/recursive_critical_section_unittest.cc similarity index 88% rename from rtc_base/critical_section_unittest.cc rename to rtc_base/deprecated/recursive_critical_section_unittest.cc index 16aefd2740..9256a76f58 100644 --- a/rtc_base/critical_section_unittest.cc +++ b/rtc_base/deprecated/recursive_critical_section_unittest.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "rtc_base/critical_section.h" +#include "rtc_base/deprecated/recursive_critical_section.h" #include #include @@ -78,7 +78,7 @@ class CompareAndSwapVerifier { int zero_count_; }; -class RunnerBase : public MessageHandler { +class RunnerBase : public MessageHandlerAutoCleanup { public: explicit RunnerBase(int value) : threads_active_(0), @@ -124,7 +124,7 @@ class RTC_LOCKABLE CriticalSectionLock { void Unlock() RTC_UNLOCK_FUNCTION() { cs_.Leave(); } private: - CriticalSection cs_; + RecursiveCriticalSection cs_; }; template @@ -183,7 +183,7 @@ class AtomicOpRunner : public RunnerBase { } private: - CriticalSection all_values_crit_; + RecursiveCriticalSection all_values_crit_; Verifier verifier_; }; @@ -282,26 +282,7 @@ TEST(AtomicOpsTest, CompareAndSwap) { EXPECT_EQ(1, runner.shared_value()); } -TEST(GlobalLockTest, CanHaveStaticStorageDuration) { - static_assert(std::is_trivially_destructible::value, ""); - ABSL_CONST_INIT static GlobalLock global_lock; - global_lock.Lock(); - global_lock.Unlock(); -} - -TEST(GlobalLockTest, Basic) { - // Create and start lots of threads. - LockRunner runner; - std::vector> threads; - StartThreads(&threads, &runner); - runner.SetExpectedThreadCount(kNumThreads); - - // Release the hounds! - EXPECT_TRUE(runner.Run()); - EXPECT_EQ(0, runner.shared_value()); -} - -TEST(CriticalSectionTest, Basic) { +TEST(RecursiveCriticalSectionTest, Basic) { // Create and start lots of threads. LockRunner runner; std::vector> threads; @@ -339,7 +320,7 @@ class PerfTestData { private: uint8_t cache_line_barrier_1_[64]; - CriticalSection lock_; + RecursiveCriticalSection lock_; uint8_t cache_line_barrier_2_[64]; int64_t my_counter_ = 0; const int expected_count_; @@ -348,33 +329,28 @@ class PerfTestData { class PerfTestThread { public: - PerfTestThread() : thread_(&ThreadFunc, this, "CsPerf") {} - void Start(PerfTestData* data, int repeats, int id) { - RTC_DCHECK(!thread_.IsRunning()); RTC_DCHECK(!data_); data_ = data; repeats_ = repeats; my_id_ = id; - thread_.Start(); + thread_ = PlatformThread::SpawnJoinable( + [this] { + for (int i = 0; i < repeats_; ++i) + data_->AddToCounter(my_id_); + }, + "CsPerf"); } void Stop() { - RTC_DCHECK(thread_.IsRunning()); RTC_DCHECK(data_); - thread_.Stop(); + thread_.Finalize(); repeats_ = 0; data_ = nullptr; my_id_ = 0; } private: - static void ThreadFunc(void* param) { - PerfTestThread* me = static_cast(param); - for (int i = 0; i < me->repeats_; ++i) - me->data_->AddToCounter(me->my_id_); - } - PlatformThread thread_; PerfTestData* data_ = nullptr; int repeats_ = 0; @@ -391,7 +367,7 @@ class PerfTestThread { // user 1m20.575s // sys 3m48.872s // Unit test output: -// [ OK ] CriticalSectionTest.Performance (294375 ms) +// [ OK ] RecursiveCriticalSectionTest.Performance (294375 ms) // // Native mutex implementation using first fit policy (current macOS default): // Approximate CPU usage: @@ -399,7 +375,7 @@ class PerfTestThread { // user 0m12.738s // sys 0m31.207s // Unit test output: -// [ OK ] CriticalSectionTest.Performance (11444 ms) +// [ OK ] RecursiveCriticalSectionTest.Performance (11444 ms) // // Special partially spin lock based implementation: // Approximate CPU usage: @@ -407,10 +383,10 @@ class PerfTestThread { // user 0m3.014s // sys 0m4.495s // Unit test output: -// [ OK ] CriticalSectionTest.Performance (1885 ms) +// [ OK ] RecursiveCriticalSectionTest.Performance (1885 ms) // // The test is disabled by default to avoid unecessarily loading the bots. -TEST(CriticalSectionTest, DISABLED_Performance) { +TEST(RecursiveCriticalSectionTest, DISABLED_Performance) { PerfTestThread threads[8]; Event event; diff --git a/rtc_base/deprecation.h b/rtc_base/deprecation.h deleted file mode 100644 index f285ab04bb..0000000000 --- a/rtc_base/deprecation.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_DEPRECATION_H_ -#define RTC_BASE_DEPRECATION_H_ - -// Annotate the declarations of deprecated functions with this to cause a -// compiler warning when they're used. Like so: -// -// RTC_DEPRECATED std::pony PonyPlz(const std::pony_spec& ps); -// -// NOTE 1: The annotation goes on the declaration in the .h file, not the -// definition in the .cc file! -// -// NOTE 2: In order to keep unit testing the deprecated function without -// getting warnings, do something like this: -// -// std::pony DEPRECATED_PonyPlz(const std::pony_spec& ps); -// RTC_DEPRECATED inline std::pony PonyPlz(const std::pony_spec& ps) { -// return DEPRECATED_PonyPlz(ps); -// } -// -// In other words, rename the existing function, and provide an inline wrapper -// using the original name that calls it. That way, callers who are willing to -// call it using the DEPRECATED_-prefixed name don't get the warning. -// -// TODO(kwiberg): Remove this when we can use [[deprecated]] from C++14. -#if defined(_MSC_VER) -// Note: Deprecation warnings seem to fail to trigger on Windows -// (https://bugs.chromium.org/p/webrtc/issues/detail?id=5368). -#define RTC_DEPRECATED __declspec(deprecated) -#elif defined(__GNUC__) -#define RTC_DEPRECATED __attribute__((__deprecated__)) -#else -#define RTC_DEPRECATED -#endif - -#endif // RTC_BASE_DEPRECATION_H_ diff --git a/rtc_base/event_tracer.cc b/rtc_base/event_tracer.cc index d23af21421..1a2b41ec5c 100644 --- a/rtc_base/event_tracer.cc +++ b/rtc_base/event_tracer.cc @@ -17,15 +17,15 @@ #include #include +#include "api/sequence_checker.h" #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread.h" #include "rtc_base/platform_thread_types.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" @@ -79,19 +79,12 @@ namespace rtc { namespace tracing { namespace { -static void EventTracingThreadFunc(void* params); - // Atomic-int fast path for avoiding logging when disabled. static volatile int g_event_logging_active = 0; // TODO(pbos): Log metadata for all threads, etc. class EventLogger final { public: - EventLogger() - : logging_thread_(EventTracingThreadFunc, - this, - "EventTracingThread", - kLowPriority) {} ~EventLogger() { RTC_DCHECK(thread_checker_.IsCurrent()); } void AddTraceEvent(const char* name, @@ -120,7 +113,7 @@ class EventLogger final { arg.value.as_string = str_copy; } } - rtc::CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); trace_events_.push_back( {name, category_enabled, phase, args, timestamp, 1, thread_id}); } @@ -136,7 +129,7 @@ class EventLogger final { bool shutting_down = shutdown_event_.Wait(kLoggingIntervalMs); std::vector events; { - rtc::CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); trace_events_.swap(events); } std::string args_str; @@ -196,7 +189,7 @@ class EventLogger final { output_file_ = file; output_file_owned_ = owned; { - rtc::CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); // Since the atomic fast-path for adding events to the queue can be // bypassed while the logging thread is shutting down there may be some // stale events in the queue, hence the vector needs to be cleared to not @@ -209,7 +202,8 @@ class EventLogger final { rtc::AtomicOps::CompareAndSwap(&g_event_logging_active, 0, 1)); // Finally start, everything should be set up now. - logging_thread_.Start(); + logging_thread_ = + PlatformThread::SpawnJoinable([this] { Log(); }, "EventTracingThread"); TRACE_EVENT_INSTANT0("webrtc", "EventLogger::Start"); } @@ -223,7 +217,7 @@ class EventLogger final { // Wake up logging thread to finish writing. shutdown_event_.Set(); // Join the logging thread. - logging_thread_.Stop(); + logging_thread_.Finalize(); } private: @@ -317,19 +311,15 @@ class EventLogger final { return output; } - rtc::CriticalSection crit_; - std::vector trace_events_ RTC_GUARDED_BY(crit_); + webrtc::Mutex mutex_; + std::vector trace_events_ RTC_GUARDED_BY(mutex_); rtc::PlatformThread logging_thread_; rtc::Event shutdown_event_; - rtc::ThreadChecker thread_checker_; + webrtc::SequenceChecker thread_checker_; FILE* output_file_ = nullptr; bool output_file_owned_ = false; }; -static void EventTracingThreadFunc(void* params) { - static_cast(params)->Log(); -} - static EventLogger* volatile g_event_logger = nullptr; static const char* const kDisabledTracePrefix = TRACE_DISABLED_BY_DEFAULT(""); const unsigned char* InternalGetCategoryEnabled(const char* name) { diff --git a/rtc_base/event_tracer_unittest.cc b/rtc_base/event_tracer_unittest.cc index 79cc9c0788..f4d41e4e7c 100644 --- a/rtc_base/event_tracer_unittest.cc +++ b/rtc_base/event_tracer_unittest.cc @@ -10,7 +10,7 @@ #include "rtc_base/event_tracer.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "rtc_base/trace_event.h" #include "test/gtest.h" @@ -20,17 +20,17 @@ namespace { class TestStatistics { public: void Reset() { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); events_logged_ = 0; } void Increment() { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); ++events_logged_; } int Count() const { - rtc::CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return events_logged_; } @@ -41,8 +41,8 @@ class TestStatistics { } private: - rtc::CriticalSection crit_; - int events_logged_ RTC_GUARDED_BY(crit_) = 0; + mutable webrtc::Mutex mutex_; + int events_logged_ RTC_GUARDED_BY(mutex_) = 0; }; } // namespace diff --git a/rtc_base/event_unittest.cc b/rtc_base/event_unittest.cc index 31118877cf..a634d6e426 100644 --- a/rtc_base/event_unittest.cc +++ b/rtc_base/event_unittest.cc @@ -43,22 +43,21 @@ TEST(EventTest, AutoReset) { class SignalerThread { public: - SignalerThread() : thread_(&ThreadFn, this, "EventPerf") {} void Start(Event* writer, Event* reader) { writer_ = writer; reader_ = reader; - thread_.Start(); + thread_ = PlatformThread::SpawnJoinable( + [this] { + while (!stop_event_.Wait(0)) { + writer_->Set(); + reader_->Wait(Event::kForever); + } + }, + "EventPerf"); } void Stop() { stop_event_.Set(); - thread_.Stop(); - } - static void ThreadFn(void* param) { - auto* me = static_cast(param); - while (!me->stop_event_.Wait(0)) { - me->writer_->Set(); - me->reader_->Wait(Event::kForever); - } + thread_.Finalize(); } Event stop_event_; Event* writer_; diff --git a/rtc_base/experiments/BUILD.gn b/rtc_base/experiments/BUILD.gn index bb3e0ce8ae..b0a729abfe 100644 --- a/rtc_base/experiments/BUILD.gn +++ b/rtc_base/experiments/BUILD.gn @@ -17,8 +17,8 @@ rtc_library("alr_experiment") { "../:rtc_base_approved", "../../api/transport:field_trial_based_config", "../../api/transport:webrtc_key_value_config", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("field_trial_parser") { @@ -40,6 +40,8 @@ rtc_library("field_trial_parser") { "../../rtc_base:logging", "../../rtc_base:safe_conversions", "../../rtc_base:stringutils", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings:strings", "//third_party/abseil-cpp/absl/types:optional", @@ -57,8 +59,8 @@ rtc_library("quality_rampup_experiment") { "../../api/transport:field_trial_based_config", "../../api/transport:webrtc_key_value_config", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("quality_scaler_settings") { @@ -72,8 +74,8 @@ rtc_library("quality_scaler_settings") { "../../api/transport:field_trial_based_config", "../../api/transport:webrtc_key_value_config", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("quality_scaling_experiment") { @@ -85,8 +87,8 @@ rtc_library("quality_scaling_experiment") { "../:rtc_base_approved", "../../api/video_codecs:video_codecs_api", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("normalize_simulcast_size_experiment") { @@ -97,8 +99,8 @@ rtc_library("normalize_simulcast_size_experiment") { deps = [ "../:rtc_base_approved", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("balanced_degradation_settings") { @@ -111,8 +113,8 @@ rtc_library("balanced_degradation_settings") { "../:rtc_base_approved", "../../api/video_codecs:video_codecs_api", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("cpu_speed_experiment") { @@ -121,10 +123,25 @@ rtc_library("cpu_speed_experiment") { "cpu_speed_experiment.h", ] deps = [ + ":field_trial_parser", "../:rtc_base_approved", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] +} + +rtc_library("encoder_info_settings") { + sources = [ + "encoder_info_settings.cc", + "encoder_info_settings.h", + ] + deps = [ + ":field_trial_parser", + "../:rtc_base_approved", + "../../api/video_codecs:video_codecs_api", + "../../system_wrappers:field_trial", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("rtt_mult_experiment") { @@ -135,8 +152,8 @@ rtc_library("rtt_mult_experiment") { deps = [ "../:rtc_base_approved", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("jitter_upper_bound_experiment") { @@ -147,8 +164,8 @@ rtc_library("jitter_upper_bound_experiment") { deps = [ "../:rtc_base_approved", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("rate_control_settings") { @@ -164,6 +181,8 @@ rtc_library("rate_control_settings") { "../../api/units:data_size", "../../api/video_codecs:video_codecs_api", "../../system_wrappers:field_trial", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -178,8 +197,8 @@ rtc_library("keyframe_interval_settings_experiment") { ":field_trial_parser", "../../api/transport:field_trial_based_config", "../../api/transport:webrtc_key_value_config", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("stable_target_rate_experiment") { @@ -192,8 +211,8 @@ rtc_library("stable_target_rate_experiment") { ":rate_control_settings", "../../api/transport:field_trial_based_config", "../../api/transport:webrtc_key_value_config", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("min_video_bitrate_experiment") { @@ -208,17 +227,18 @@ rtc_library("min_video_bitrate_experiment") { "../../rtc_base:checks", "../../rtc_base:logging", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_library("experiments_unittests") { testonly = true sources = [ "balanced_degradation_settings_unittest.cc", "cpu_speed_experiment_unittest.cc", + "encoder_info_settings_unittest.cc", "field_trial_list_unittest.cc", "field_trial_parser_unittest.cc", "field_trial_units_unittest.cc", @@ -236,6 +256,7 @@ if (rtc_include_tests) { deps = [ ":balanced_degradation_settings", ":cpu_speed_experiment", + ":encoder_info_settings", ":field_trial_parser", ":keyframe_interval_settings_experiment", ":min_video_bitrate_experiment", @@ -255,7 +276,7 @@ if (rtc_include_tests) { "../../test:field_trial", "../../test:test_main", "../../test:test_support", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } diff --git a/rtc_base/experiments/balanced_degradation_settings.cc b/rtc_base/experiments/balanced_degradation_settings.cc index 7a1e8913cc..90d44efb10 100644 --- a/rtc_base/experiments/balanced_degradation_settings.cc +++ b/rtc_base/experiments/balanced_degradation_settings.cc @@ -34,11 +34,11 @@ std::vector DefaultConfigs() { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, - {480 * 270, + {480 * 360, 10, 0, 0, - BalancedDegradationSettings::kNoFpsDiff, + 1, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, @@ -48,7 +48,7 @@ std::vector DefaultConfigs() { 15, 0, 0, - BalancedDegradationSettings::kNoFpsDiff, + 1, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, @@ -93,7 +93,8 @@ bool IsValid(const BalancedDegradationSettings::CodecTypeSpecific& config1, bool IsValid(const std::vector& configs) { if (configs.size() <= 1) { - RTC_LOG(LS_WARNING) << "Unsupported size, value ignored."; + if (configs.size() == 1) + RTC_LOG(LS_WARNING) << "Unsupported size, value ignored."; return false; } for (const auto& config : configs) { diff --git a/rtc_base/experiments/balanced_degradation_settings_unittest.cc b/rtc_base/experiments/balanced_degradation_settings_unittest.cc index 5721445ee4..92833ee98c 100644 --- a/rtc_base/experiments/balanced_degradation_settings_unittest.cc +++ b/rtc_base/experiments/balanced_degradation_settings_unittest.cc @@ -34,11 +34,11 @@ void VerifyIsDefault( {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}}, BalancedDegradationSettings::Config{ - 480 * 270, + 480 * 360, 10, 0, 0, - BalancedDegradationSettings::kNoFpsDiff, + 1, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, @@ -49,7 +49,7 @@ void VerifyIsDefault( 15, 0, 0, - BalancedDegradationSettings::kNoFpsDiff, + 1, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, diff --git a/rtc_base/experiments/cpu_speed_experiment.cc b/rtc_base/experiments/cpu_speed_experiment.cc index 6d5650acc8..7e61255260 100644 --- a/rtc_base/experiments/cpu_speed_experiment.cc +++ b/rtc_base/experiments/cpu_speed_experiment.cc @@ -12,8 +12,7 @@ #include -#include - +#include "rtc_base/experiments/field_trial_list.h" #include "rtc_base/logging.h" #include "system_wrappers/include/field_trial.h" @@ -22,30 +21,17 @@ namespace { constexpr char kFieldTrial[] = "WebRTC-VP8-CpuSpeed-Arm"; constexpr int kMinSetting = -16; constexpr int kMaxSetting = -1; -} // namespace -absl::optional> -CpuSpeedExperiment::GetConfigs() { - if (!webrtc::field_trial::IsEnabled(kFieldTrial)) - return absl::nullopt; - - const std::string group = webrtc::field_trial::FindFullName(kFieldTrial); - if (group.empty()) - return absl::nullopt; - - std::vector configs(3); - if (sscanf(group.c_str(), "Enabled-%d,%d,%d,%d,%d,%d", &(configs[0].pixels), - &(configs[0].cpu_speed), &(configs[1].pixels), - &(configs[1].cpu_speed), &(configs[2].pixels), - &(configs[2].cpu_speed)) != 6) { - RTC_LOG(LS_WARNING) << "Too few parameters provided."; - return absl::nullopt; +std::vector GetValidOrEmpty( + const std::vector& configs) { + if (configs.empty()) { + return {}; } for (const auto& config : configs) { if (config.cpu_speed < kMinSetting || config.cpu_speed > kMaxSetting) { RTC_LOG(LS_WARNING) << "Unsupported cpu speed setting, value ignored."; - return absl::nullopt; + return {}; } } @@ -53,20 +39,51 @@ CpuSpeedExperiment::GetConfigs() { if (configs[i].pixels < configs[i - 1].pixels || configs[i].cpu_speed > configs[i - 1].cpu_speed) { RTC_LOG(LS_WARNING) << "Invalid parameter value provided."; - return absl::nullopt; + return {}; } } - return absl::optional>(configs); + return configs; } -int CpuSpeedExperiment::GetValue(int pixels, - const std::vector& configs) { +bool HasLeCores(const std::vector& configs) { for (const auto& config : configs) { + if (config.cpu_speed_le_cores == 0) + return false; + } + return true; +} +} // namespace + +CpuSpeedExperiment::CpuSpeedExperiment() : cores_("cores") { + FieldTrialStructList configs( + {FieldTrialStructMember("pixels", [](Config* c) { return &c->pixels; }), + FieldTrialStructMember("cpu_speed", + [](Config* c) { return &c->cpu_speed; }), + FieldTrialStructMember( + "cpu_speed_le_cores", + [](Config* c) { return &c->cpu_speed_le_cores; })}, + {}); + ParseFieldTrial({&configs, &cores_}, field_trial::FindFullName(kFieldTrial)); + + configs_ = GetValidOrEmpty(configs.Get()); +} + +CpuSpeedExperiment::~CpuSpeedExperiment() {} + +absl::optional CpuSpeedExperiment::GetValue(int pixels, + int num_cores) const { + if (configs_.empty()) + return absl::nullopt; + + bool use_le = HasLeCores(configs_) && cores_ && num_cores <= cores_.Value(); + + for (const auto& config : configs_) { if (pixels <= config.pixels) - return config.cpu_speed; + return use_le ? absl::optional(config.cpu_speed_le_cores) + : absl::optional(config.cpu_speed); } - return kMinSetting; + return absl::optional(kMinSetting); } } // namespace webrtc diff --git a/rtc_base/experiments/cpu_speed_experiment.h b/rtc_base/experiments/cpu_speed_experiment.h index e6c8340943..7c7268c559 100644 --- a/rtc_base/experiments/cpu_speed_experiment.h +++ b/rtc_base/experiments/cpu_speed_experiment.h @@ -15,25 +15,49 @@ #include "absl/types/optional.h" +#include "rtc_base/experiments/field_trial_parser.h" + namespace webrtc { class CpuSpeedExperiment { public: - struct Config { - bool operator==(const Config& o) const { - return pixels == o.pixels && cpu_speed == o.cpu_speed; - } + CpuSpeedExperiment(); + ~CpuSpeedExperiment(); + + // Example: + // WebRTC-VP8-CpuSpeed-Arm/pixels:100|200|300,cpu_speed:-1|-2|-3/ + // pixels <= 100 -> cpu speed: -1 + // pixels <= 200 -> cpu speed: -2 + // pixels <= 300 -> cpu speed: -3 - int pixels; // The video frame size. - int cpu_speed; // The |cpu_speed| to be used if the frame size is less - // than or equal to |pixels|. + // WebRTC-VP8-CpuSpeed-Arm/pixels:100|200|300,cpu_speed:-1|-2|-3/, + // cpu_speed_le_cores:-4|-5|-6,cores:3/ + // If |num_cores| > 3 + // pixels <= 100 -> cpu speed: -1 + // pixels <= 200 -> cpu speed: -2 + // pixels <= 300 -> cpu speed: -3 + // else + // pixels <= 100 -> cpu speed: -4 + // pixels <= 200 -> cpu speed: -5 + // pixels <= 300 -> cpu speed: -6 + + struct Config { + int pixels = 0; // The video frame size. + int cpu_speed = 0; // The |cpu_speed| to be used if the frame size is less + // than or equal to |pixels|. + // Optional. + int cpu_speed_le_cores = 0; // Same as |cpu_speed| above but only used if + // |num_cores| <= |cores_|. }; - // Returns the configurations from field trial on success. - static absl::optional> GetConfigs(); + // Gets the cpu speed based on |pixels| and |num_cores|. + absl::optional GetValue(int pixels, int num_cores) const; + + private: + std::vector configs_; - // Gets the cpu speed from the |configs| based on |pixels|. - static int GetValue(int pixels, const std::vector& configs); + // Threshold for when to use |cpu_speed_le_cores|. + FieldTrialOptional cores_; }; } // namespace webrtc diff --git a/rtc_base/experiments/cpu_speed_experiment_unittest.cc b/rtc_base/experiments/cpu_speed_experiment_unittest.cc index edc782c0ad..2105da3818 100644 --- a/rtc_base/experiments/cpu_speed_experiment_unittest.cc +++ b/rtc_base/experiments/cpu_speed_experiment_unittest.cc @@ -16,70 +16,91 @@ namespace webrtc { -TEST(CpuSpeedExperimentTest, GetConfigsFailsIfNotEnabled) { - EXPECT_FALSE(CpuSpeedExperiment::GetConfigs()); +TEST(CpuSpeedExperimentTest, NoValueIfNotEnabled) { + CpuSpeedExperiment cpu_speed_config; + EXPECT_FALSE(cpu_speed_config.GetValue(1, /*num_cores=*/1)); } -TEST(CpuSpeedExperimentTest, GetConfigsFailsForTooFewParameters) { +TEST(CpuSpeedExperimentTest, GetValue) { webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,-1,2000,-10,3000/"); - EXPECT_FALSE(CpuSpeedExperiment::GetConfigs()); + "WebRTC-VP8-CpuSpeed-Arm/pixels:1000,cpu_speed:-12,cores:4/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_EQ(-12, cpu_speed_config.GetValue(1, /*num_cores=*/1)); + EXPECT_EQ(-12, cpu_speed_config.GetValue(1000, /*num_cores=*/1)); + EXPECT_EQ(-16, cpu_speed_config.GetValue(1001, /*num_cores=*/1)); } -TEST(CpuSpeedExperimentTest, GetConfigs) { +TEST(CpuSpeedExperimentTest, GetValueWithList) { webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,-1,2000,-10,3000,-16/"); - - const absl::optional> kConfigs = - CpuSpeedExperiment::GetConfigs(); - ASSERT_TRUE(kConfigs); - EXPECT_THAT(*kConfigs, - ::testing::ElementsAre(CpuSpeedExperiment::Config{1000, -1}, - CpuSpeedExperiment::Config{2000, -10}, - CpuSpeedExperiment::Config{3000, -16})); + "WebRTC-VP8-CpuSpeed-Arm/pixels:1000|2000|3000,cpu_speed:-1|-10|-16/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_EQ(-1, cpu_speed_config.GetValue(1, /*num_cores=*/1)); + EXPECT_EQ(-1, cpu_speed_config.GetValue(1000, /*num_cores=*/1)); + EXPECT_EQ(-10, cpu_speed_config.GetValue(1001, /*num_cores=*/1)); + EXPECT_EQ(-10, cpu_speed_config.GetValue(2000, /*num_cores=*/1)); + EXPECT_EQ(-16, cpu_speed_config.GetValue(2001, /*num_cores=*/1)); + EXPECT_EQ(-16, cpu_speed_config.GetValue(3000, /*num_cores=*/1)); + EXPECT_EQ(-16, cpu_speed_config.GetValue(3001, /*num_cores=*/1)); } -TEST(CpuSpeedExperimentTest, GetValue) { +TEST(CpuSpeedExperimentTest, GetValueWithCores) { webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,-5,2000,-10,3000,-12/"); - - const absl::optional> kConfigs = - CpuSpeedExperiment::GetConfigs(); - ASSERT_TRUE(kConfigs); - ASSERT_EQ(3u, (*kConfigs).size()); - EXPECT_EQ(-5, CpuSpeedExperiment::GetValue(1, *kConfigs)); - EXPECT_EQ(-5, CpuSpeedExperiment::GetValue(1000, *kConfigs)); - EXPECT_EQ(-10, CpuSpeedExperiment::GetValue(1000 + 1, *kConfigs)); - EXPECT_EQ(-10, CpuSpeedExperiment::GetValue(2000, *kConfigs)); - EXPECT_EQ(-12, CpuSpeedExperiment::GetValue(2000 + 1, *kConfigs)); - EXPECT_EQ(-12, CpuSpeedExperiment::GetValue(3000, *kConfigs)); - EXPECT_EQ(-16, CpuSpeedExperiment::GetValue(3000 + 1, *kConfigs)); + "WebRTC-VP8-CpuSpeed-Arm/" + "pixels:1000|2000|3000,cpu_speed:-1|-10|-16," + "cpu_speed_le_cores:-5|-11|-16,cores:2/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_EQ(-5, cpu_speed_config.GetValue(1000, /*num_cores=*/1)); + EXPECT_EQ(-11, cpu_speed_config.GetValue(2000, /*num_cores=*/2)); + EXPECT_EQ(-1, cpu_speed_config.GetValue(1000, /*num_cores=*/3)); + EXPECT_EQ(-10, cpu_speed_config.GetValue(2000, /*num_cores=*/4)); } -TEST(CpuSpeedExperimentTest, GetConfigsFailsForTooSmallValue) { +TEST(CpuSpeedExperimentTest, GetValueWithCoresUnconfigured) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-VP8-CpuSpeed-Arm/" + "pixels:1000|2000|3000,cpu_speed:-1|-10|-16," + "cpu_speed_le_cores:-5|-11|-16/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_EQ(-1, cpu_speed_config.GetValue(1000, /*num_cores=*/1)); + EXPECT_EQ(-10, cpu_speed_config.GetValue(2000, /*num_cores=*/2)); +} + +TEST(CpuSpeedExperimentTest, GetValueFailsForTooSmallValue) { // Supported range: [-16, -1]. webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,-1,2000,-10,3000,-17/"); - EXPECT_FALSE(CpuSpeedExperiment::GetConfigs()); + "WebRTC-VP8-CpuSpeed-Arm/pixels:1000|2000|3000,cpu_speed:-1|-10|-17/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_FALSE(cpu_speed_config.GetValue(1, /*num_cores=*/1)); } -TEST(CpuSpeedExperimentTest, GetConfigsFailsForTooLargeValue) { +TEST(CpuSpeedExperimentTest, GetValueFailsForTooLargeValue) { // Supported range: [-16, -1]. webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,0,2000,-10,3000,-16/"); - EXPECT_FALSE(CpuSpeedExperiment::GetConfigs()); + "WebRTC-VP8-CpuSpeed-Arm/pixels:1000|2000|3000,cpu_speed:0|-10|-16/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_FALSE(cpu_speed_config.GetValue(1, /*num_cores=*/1)); } -TEST(CpuSpeedExperimentTest, GetConfigsFailsIfPixelsDecreasing) { +TEST(CpuSpeedExperimentTest, GetValueFailsIfPixelsDecreases) { webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,-5,999,-10,3000,-16/"); - EXPECT_FALSE(CpuSpeedExperiment::GetConfigs()); + "WebRTC-VP8-CpuSpeed-Arm/pixels:1000|999|3000,cpu_speed:-5|-10|-16/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_FALSE(cpu_speed_config.GetValue(1, /*num_cores=*/1)); } -TEST(CpuSpeedExperimentTest, GetConfigsFailsIfCpuSpeedIncreasing) { +TEST(CpuSpeedExperimentTest, GetValueFailsIfCpuSpeedIncreases) { webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-VP8-CpuSpeed-Arm/Enabled-1000,-5,2000,-4,3000,-16/"); - EXPECT_FALSE(CpuSpeedExperiment::GetConfigs()); + "WebRTC-VP8-CpuSpeed-Arm/pixels:1000|2000|3000,cpu_speed:-5|-4|-16/"); + + CpuSpeedExperiment cpu_speed_config; + EXPECT_FALSE(cpu_speed_config.GetValue(1, /*num_cores=*/1)); } } // namespace webrtc diff --git a/rtc_base/experiments/encoder_info_settings.cc b/rtc_base/experiments/encoder_info_settings.cc new file mode 100644 index 0000000000..9e1a5190a3 --- /dev/null +++ b/rtc_base/experiments/encoder_info_settings.cc @@ -0,0 +1,120 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/experiments/encoder_info_settings.h" + +#include + +#include "rtc_base/experiments/field_trial_list.h" +#include "rtc_base/logging.h" +#include "system_wrappers/include/field_trial.h" + +namespace webrtc { +namespace { + +std::vector ToResolutionBitrateLimits( + const std::vector& limits) { + std::vector result; + for (const auto& limit : limits) { + result.push_back(VideoEncoder::ResolutionBitrateLimits( + limit.frame_size_pixels, limit.min_start_bitrate_bps, + limit.min_bitrate_bps, limit.max_bitrate_bps)); + } + return result; +} + +} // namespace + +// Default bitrate limits for simulcast with one active stream: +// {frame_size_pixels, min_start_bitrate_bps, min_bitrate_bps, max_bitrate_bps}. +std::vector +EncoderInfoSettings::GetDefaultSinglecastBitrateLimits( + VideoCodecType codec_type) { + // Specific limits for VP9. Other codecs use VP8 limits. + if (codec_type == kVideoCodecVP9) { + return {{320 * 180, 0, 30000, 150000}, + {480 * 270, 120000, 30000, 300000}, + {640 * 360, 190000, 30000, 420000}, + {960 * 540, 350000, 30000, 1000000}, + {1280 * 720, 480000, 30000, 1500000}}; + } + + return {{320 * 180, 0, 30000, 300000}, + {480 * 270, 200000, 30000, 500000}, + {640 * 360, 300000, 30000, 800000}, + {960 * 540, 500000, 30000, 1500000}, + {1280 * 720, 900000, 30000, 2500000}}; +} + +absl::optional +EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + VideoCodecType codec_type, + int frame_size_pixels) { + VideoEncoder::EncoderInfo info; + info.resolution_bitrate_limits = + GetDefaultSinglecastBitrateLimits(codec_type); + return info.GetEncoderBitrateLimitsForResolution(frame_size_pixels); +} + +EncoderInfoSettings::EncoderInfoSettings(std::string name) + : requested_resolution_alignment_("requested_resolution_alignment"), + apply_alignment_to_all_simulcast_layers_( + "apply_alignment_to_all_simulcast_layers") { + FieldTrialStructList bitrate_limits( + {FieldTrialStructMember( + "frame_size_pixels", + [](BitrateLimit* b) { return &b->frame_size_pixels; }), + FieldTrialStructMember( + "min_start_bitrate_bps", + [](BitrateLimit* b) { return &b->min_start_bitrate_bps; }), + FieldTrialStructMember( + "min_bitrate_bps", + [](BitrateLimit* b) { return &b->min_bitrate_bps; }), + FieldTrialStructMember( + "max_bitrate_bps", + [](BitrateLimit* b) { return &b->max_bitrate_bps; })}, + {}); + + if (field_trial::FindFullName(name).empty()) { + // Encoder name not found, use common string applying to all encoders. + name = "WebRTC-GetEncoderInfoOverride"; + } + + ParseFieldTrial({&bitrate_limits, &requested_resolution_alignment_, + &apply_alignment_to_all_simulcast_layers_}, + field_trial::FindFullName(name)); + + resolution_bitrate_limits_ = ToResolutionBitrateLimits(bitrate_limits.Get()); +} + +absl::optional EncoderInfoSettings::requested_resolution_alignment() + const { + if (requested_resolution_alignment_ && + requested_resolution_alignment_.Value() < 1) { + RTC_LOG(LS_WARNING) << "Unsupported alignment value, ignored."; + return absl::nullopt; + } + return requested_resolution_alignment_.GetOptional(); +} + +EncoderInfoSettings::~EncoderInfoSettings() {} + +SimulcastEncoderAdapterEncoderInfoSettings:: + SimulcastEncoderAdapterEncoderInfoSettings() + : EncoderInfoSettings( + "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride") {} + +LibvpxVp8EncoderInfoSettings::LibvpxVp8EncoderInfoSettings() + : EncoderInfoSettings("WebRTC-VP8-GetEncoderInfoOverride") {} + +LibvpxVp9EncoderInfoSettings::LibvpxVp9EncoderInfoSettings() + : EncoderInfoSettings("WebRTC-VP9-GetEncoderInfoOverride") {} + +} // namespace webrtc diff --git a/rtc_base/experiments/encoder_info_settings.h b/rtc_base/experiments/encoder_info_settings.h new file mode 100644 index 0000000000..9cbb5875bb --- /dev/null +++ b/rtc_base/experiments/encoder_info_settings.h @@ -0,0 +1,83 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_EXPERIMENTS_ENCODER_INFO_SETTINGS_H_ +#define RTC_BASE_EXPERIMENTS_ENCODER_INFO_SETTINGS_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/video_codecs/video_encoder.h" +#include "rtc_base/experiments/field_trial_parser.h" + +namespace webrtc { + +class EncoderInfoSettings { + public: + virtual ~EncoderInfoSettings(); + + // Bitrate limits per resolution. + struct BitrateLimit { + int frame_size_pixels = 0; // The video frame size. + int min_start_bitrate_bps = 0; // The minimum bitrate to start encoding. + int min_bitrate_bps = 0; // The minimum bitrate. + int max_bitrate_bps = 0; // The maximum bitrate. + }; + + absl::optional requested_resolution_alignment() const; + bool apply_alignment_to_all_simulcast_layers() const { + return apply_alignment_to_all_simulcast_layers_.Get(); + } + std::vector resolution_bitrate_limits() + const { + return resolution_bitrate_limits_; + } + + static std::vector + GetDefaultSinglecastBitrateLimits(VideoCodecType codec_type); + + static absl::optional + GetDefaultSinglecastBitrateLimitsForResolution(VideoCodecType codec_type, + int frame_size_pixels); + + protected: + explicit EncoderInfoSettings(std::string name); + + private: + FieldTrialOptional requested_resolution_alignment_; + FieldTrialFlag apply_alignment_to_all_simulcast_layers_; + std::vector resolution_bitrate_limits_; +}; + +// EncoderInfo settings for SimulcastEncoderAdapter. +class SimulcastEncoderAdapterEncoderInfoSettings : public EncoderInfoSettings { + public: + SimulcastEncoderAdapterEncoderInfoSettings(); + ~SimulcastEncoderAdapterEncoderInfoSettings() override {} +}; + +// EncoderInfo settings for LibvpxVp8Encoder. +class LibvpxVp8EncoderInfoSettings : public EncoderInfoSettings { + public: + LibvpxVp8EncoderInfoSettings(); + ~LibvpxVp8EncoderInfoSettings() override {} +}; + +// EncoderInfo settings for LibvpxVp9Encoder. +class LibvpxVp9EncoderInfoSettings : public EncoderInfoSettings { + public: + LibvpxVp9EncoderInfoSettings(); + ~LibvpxVp9EncoderInfoSettings() override {} +}; + +} // namespace webrtc + +#endif // RTC_BASE_EXPERIMENTS_ENCODER_INFO_SETTINGS_H_ diff --git a/rtc_base/experiments/encoder_info_settings_unittest.cc b/rtc_base/experiments/encoder_info_settings_unittest.cc new file mode 100644 index 0000000000..aabb68718c --- /dev/null +++ b/rtc_base/experiments/encoder_info_settings_unittest.cc @@ -0,0 +1,102 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/experiments/encoder_info_settings.h" + +#include "rtc_base/gunit.h" +#include "test/field_trial.h" +#include "test/gmock.h" + +namespace webrtc { + +TEST(SimulcastEncoderAdapterSettingsTest, NoValuesWithoutFieldTrial) { + SimulcastEncoderAdapterEncoderInfoSettings settings; + EXPECT_EQ(absl::nullopt, settings.requested_resolution_alignment()); + EXPECT_FALSE(settings.apply_alignment_to_all_simulcast_layers()); + EXPECT_TRUE(settings.resolution_bitrate_limits().empty()); +} + +TEST(SimulcastEncoderAdapterSettingsTest, NoValueForInvalidAlignment) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/" + "requested_resolution_alignment:0/"); + + SimulcastEncoderAdapterEncoderInfoSettings settings; + EXPECT_EQ(absl::nullopt, settings.requested_resolution_alignment()); +} + +TEST(SimulcastEncoderAdapterSettingsTest, GetResolutionAlignment) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/" + "requested_resolution_alignment:2/"); + + SimulcastEncoderAdapterEncoderInfoSettings settings; + EXPECT_EQ(2, settings.requested_resolution_alignment()); + EXPECT_FALSE(settings.apply_alignment_to_all_simulcast_layers()); + EXPECT_TRUE(settings.resolution_bitrate_limits().empty()); +} + +TEST(SimulcastEncoderAdapterSettingsTest, GetApplyAlignment) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/" + "requested_resolution_alignment:3," + "apply_alignment_to_all_simulcast_layers/"); + + SimulcastEncoderAdapterEncoderInfoSettings settings; + EXPECT_EQ(3, settings.requested_resolution_alignment()); + EXPECT_TRUE(settings.apply_alignment_to_all_simulcast_layers()); + EXPECT_TRUE(settings.resolution_bitrate_limits().empty()); +} + +TEST(SimulcastEncoderAdapterSettingsTest, GetResolutionBitrateLimits) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/" + "frame_size_pixels:123," + "min_start_bitrate_bps:11000," + "min_bitrate_bps:44000," + "max_bitrate_bps:77000/"); + + SimulcastEncoderAdapterEncoderInfoSettings settings; + EXPECT_EQ(absl::nullopt, settings.requested_resolution_alignment()); + EXPECT_FALSE(settings.apply_alignment_to_all_simulcast_layers()); + EXPECT_THAT(settings.resolution_bitrate_limits(), + ::testing::ElementsAre(VideoEncoder::ResolutionBitrateLimits{ + 123, 11000, 44000, 77000})); +} + +TEST(SimulcastEncoderAdapterSettingsTest, GetResolutionBitrateLimitsWithList) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-SimulcastEncoderAdapter-GetEncoderInfoOverride/" + "frame_size_pixels:123|456|789," + "min_start_bitrate_bps:11000|22000|33000," + "min_bitrate_bps:44000|55000|66000," + "max_bitrate_bps:77000|88000|99000/"); + + SimulcastEncoderAdapterEncoderInfoSettings settings; + EXPECT_THAT( + settings.resolution_bitrate_limits(), + ::testing::ElementsAre( + VideoEncoder::ResolutionBitrateLimits{123, 11000, 44000, 77000}, + VideoEncoder::ResolutionBitrateLimits{456, 22000, 55000, 88000}, + VideoEncoder::ResolutionBitrateLimits{789, 33000, 66000, 99000})); +} + +TEST(EncoderSettingsTest, CommonSettingsUsedIfEncoderNameUnspecified) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-VP8-GetEncoderInfoOverride/requested_resolution_alignment:2/" + "WebRTC-GetEncoderInfoOverride/requested_resolution_alignment:3/"); + + LibvpxVp8EncoderInfoSettings vp8_settings; + EXPECT_EQ(2, vp8_settings.requested_resolution_alignment()); + LibvpxVp9EncoderInfoSettings vp9_settings; + EXPECT_EQ(3, vp9_settings.requested_resolution_alignment()); +} + +} // namespace webrtc diff --git a/rtc_base/experiments/field_trial_parser.cc b/rtc_base/experiments/field_trial_parser.cc index b88d0f97c4..8fc89cec8f 100644 --- a/rtc_base/experiments/field_trial_parser.cc +++ b/rtc_base/experiments/field_trial_parser.cc @@ -83,7 +83,10 @@ void ParseFieldTrial( RTC_LOG(LS_WARNING) << "Failed to read empty key field with value '" << key << "' in trial: \"" << trial_string << "\""; } - } else { + } else if (key.empty() || key[0] != '_') { + // "_" is be used to prefix keys that are part of the string for + // debugging purposes but not neccessarily used. + // e.g. WebRTC-Experiment/param: value, _DebuggingString RTC_LOG(LS_INFO) << "No field with key: '" << key << "' (found in trial: \"" << trial_string << "\")"; std::string valid_keys; diff --git a/rtc_base/experiments/keyframe_interval_settings.cc b/rtc_base/experiments/keyframe_interval_settings.cc index 2f19a1c53f..76c85cbbad 100644 --- a/rtc_base/experiments/keyframe_interval_settings.cc +++ b/rtc_base/experiments/keyframe_interval_settings.cc @@ -22,11 +22,8 @@ constexpr char kFieldTrialName[] = "WebRTC-KeyframeInterval"; KeyframeIntervalSettings::KeyframeIntervalSettings( const WebRtcKeyValueConfig* const key_value_config) - : min_keyframe_send_interval_ms_("min_keyframe_send_interval_ms"), - max_wait_for_keyframe_ms_("max_wait_for_keyframe_ms"), - max_wait_for_frame_ms_("max_wait_for_frame_ms") { - ParseFieldTrial({&min_keyframe_send_interval_ms_, &max_wait_for_keyframe_ms_, - &max_wait_for_frame_ms_}, + : min_keyframe_send_interval_ms_("min_keyframe_send_interval_ms") { + ParseFieldTrial({&min_keyframe_send_interval_ms_}, key_value_config->Lookup(kFieldTrialName)); } @@ -39,13 +36,4 @@ absl::optional KeyframeIntervalSettings::MinKeyframeSendIntervalMs() const { return min_keyframe_send_interval_ms_.GetOptional(); } - -absl::optional KeyframeIntervalSettings::MaxWaitForKeyframeMs() const { - return max_wait_for_keyframe_ms_.GetOptional(); -} - -absl::optional KeyframeIntervalSettings::MaxWaitForFrameMs() const { - return max_wait_for_frame_ms_.GetOptional(); -} - } // namespace webrtc diff --git a/rtc_base/experiments/keyframe_interval_settings.h b/rtc_base/experiments/keyframe_interval_settings.h index 7c8d6d364a..3f253f0022 100644 --- a/rtc_base/experiments/keyframe_interval_settings.h +++ b/rtc_base/experiments/keyframe_interval_settings.h @@ -17,6 +17,9 @@ namespace webrtc { +// TODO(bugs.webrtc.org/10427): Remove and replace with proper configuration +// parameter, or move to using FIR if intent is to avoid triggering multiple +// times to PLIs corresponding to the same request when RTT is large. class KeyframeIntervalSettings final { public: static KeyframeIntervalSettings ParseFromFieldTrials(); @@ -25,22 +28,11 @@ class KeyframeIntervalSettings final { // The encoded keyframe send rate is <= 1/MinKeyframeSendIntervalMs(). absl::optional MinKeyframeSendIntervalMs() const; - // Receiver side. - // The keyframe request send rate is - // - when we have not received a key frame at all: - // <= 1/MaxWaitForKeyframeMs() - // - when we have not received a frame recently: - // <= 1/MaxWaitForFrameMs() - absl::optional MaxWaitForKeyframeMs() const; - absl::optional MaxWaitForFrameMs() const; - private: explicit KeyframeIntervalSettings( const WebRtcKeyValueConfig* key_value_config); FieldTrialOptional min_keyframe_send_interval_ms_; - FieldTrialOptional max_wait_for_keyframe_ms_; - FieldTrialOptional max_wait_for_frame_ms_; }; } // namespace webrtc diff --git a/rtc_base/experiments/keyframe_interval_settings_unittest.cc b/rtc_base/experiments/keyframe_interval_settings_unittest.cc index 7d89a4c000..25cebbcd70 100644 --- a/rtc_base/experiments/keyframe_interval_settings_unittest.cc +++ b/rtc_base/experiments/keyframe_interval_settings_unittest.cc @@ -27,60 +27,16 @@ TEST(KeyframeIntervalSettingsTest, ParsesMinKeyframeSendIntervalMs) { 100); } -TEST(KeyframeIntervalSettingsTest, ParsesMaxWaitForKeyframeMs) { - EXPECT_FALSE( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForKeyframeMs()); - - test::ScopedFieldTrials field_trials( - "WebRTC-KeyframeInterval/max_wait_for_keyframe_ms:100/"); - EXPECT_EQ( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForKeyframeMs(), - 100); -} - -TEST(KeyframeIntervalSettingsTest, ParsesMaxWaitForFrameMs) { - EXPECT_FALSE( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForFrameMs()); - - test::ScopedFieldTrials field_trials( - "WebRTC-KeyframeInterval/max_wait_for_frame_ms:100/"); - EXPECT_EQ( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForFrameMs(), - 100); -} - -TEST(KeyframeIntervalSettingsTest, ParsesAllValues) { - test::ScopedFieldTrials field_trials( - "WebRTC-KeyframeInterval/" - "min_keyframe_send_interval_ms:100," - "max_wait_for_keyframe_ms:101," - "max_wait_for_frame_ms:102/"); - EXPECT_EQ(KeyframeIntervalSettings::ParseFromFieldTrials() - .MinKeyframeSendIntervalMs(), - 100); - EXPECT_EQ( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForKeyframeMs(), - 101); - EXPECT_EQ( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForFrameMs(), - 102); -} - -TEST(KeyframeIntervalSettingsTest, DoesNotParseAllValuesWhenIncorrectlySet) { - EXPECT_FALSE( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForFrameMs()); +TEST(KeyframeIntervalSettingsTest, DoesNotParseIncorrectValues) { + EXPECT_FALSE(KeyframeIntervalSettings::ParseFromFieldTrials() + .MinKeyframeSendIntervalMs()); test::ScopedFieldTrials field_trials( - "WebRTC-KeyframeInterval/" - "min_keyframe_send_interval_ms:a," - "max_wait_for_keyframe_ms:b," - "max_wait_for_frame_ms:c/"); + "WebRTC-KeyframeInterval/min_keyframe_send_interval_ms:a/"); + EXPECT_FALSE(KeyframeIntervalSettings::ParseFromFieldTrials() + .MinKeyframeSendIntervalMs()); EXPECT_FALSE(KeyframeIntervalSettings::ParseFromFieldTrials() .MinKeyframeSendIntervalMs()); - EXPECT_FALSE( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForKeyframeMs()); - EXPECT_FALSE( - KeyframeIntervalSettings::ParseFromFieldTrials().MaxWaitForFrameMs()); } } // namespace diff --git a/rtc_base/experiments/quality_rampup_experiment.cc b/rtc_base/experiments/quality_rampup_experiment.cc index caf7e62368..ee6675c924 100644 --- a/rtc_base/experiments/quality_rampup_experiment.cc +++ b/rtc_base/experiments/quality_rampup_experiment.cc @@ -70,4 +70,8 @@ bool QualityRampupExperiment::BwHigh(int64_t now_ms, return (now_ms - *start_ms_) >= min_duration_ms_.Value(); } +bool QualityRampupExperiment::Enabled() const { + return min_pixels_ || min_duration_ms_ || max_bitrate_kbps_; +} + } // namespace webrtc diff --git a/rtc_base/experiments/quality_rampup_experiment.h b/rtc_base/experiments/quality_rampup_experiment.h index ff9d7d38e5..9d46901104 100644 --- a/rtc_base/experiments/quality_rampup_experiment.h +++ b/rtc_base/experiments/quality_rampup_experiment.h @@ -33,6 +33,8 @@ class QualityRampupExperiment final { // (max_bitrate_factor_) above |max_bitrate_kbps_| for |min_duration_ms_|. bool BwHigh(int64_t now_ms, uint32_t available_bw_kbps); + bool Enabled() const; + private: explicit QualityRampupExperiment( const WebRtcKeyValueConfig* const key_value_config); diff --git a/rtc_base/experiments/quality_scaler_settings.cc b/rtc_base/experiments/quality_scaler_settings.cc index c8d83ebe4d..d2443b05ce 100644 --- a/rtc_base/experiments/quality_scaler_settings.cc +++ b/rtc_base/experiments/quality_scaler_settings.cc @@ -21,14 +21,17 @@ const double kMinScaleFactor = 0.01; QualityScalerSettings::QualityScalerSettings( const WebRtcKeyValueConfig* const key_value_config) - : min_frames_("min_frames"), + : sampling_period_ms_("sampling_period_ms"), + average_qp_window_("average_qp_window"), + min_frames_("min_frames"), initial_scale_factor_("initial_scale_factor"), scale_factor_("scale_factor"), initial_bitrate_interval_ms_("initial_bitrate_interval_ms"), initial_bitrate_factor_("initial_bitrate_factor") { ParseFieldTrial( - {&min_frames_, &initial_scale_factor_, &scale_factor_, - &initial_bitrate_interval_ms_, &initial_bitrate_factor_}, + {&sampling_period_ms_, &average_qp_window_, &min_frames_, + &initial_scale_factor_, &scale_factor_, &initial_bitrate_interval_ms_, + &initial_bitrate_factor_}, key_value_config->Lookup("WebRTC-Video-QualityScalerSettings")); } @@ -37,6 +40,22 @@ QualityScalerSettings QualityScalerSettings::ParseFromFieldTrials() { return QualityScalerSettings(&field_trial_config); } +absl::optional QualityScalerSettings::SamplingPeriodMs() const { + if (sampling_period_ms_ && sampling_period_ms_.Value() <= 0) { + RTC_LOG(LS_WARNING) << "Unsupported sampling_period_ms value, ignored."; + return absl::nullopt; + } + return sampling_period_ms_.GetOptional(); +} + +absl::optional QualityScalerSettings::AverageQpWindow() const { + if (average_qp_window_ && average_qp_window_.Value() <= 0) { + RTC_LOG(LS_WARNING) << "Unsupported average_qp_window value, ignored."; + return absl::nullopt; + } + return average_qp_window_.GetOptional(); +} + absl::optional QualityScalerSettings::MinFrames() const { if (min_frames_ && min_frames_.Value() < kMinFrames) { RTC_LOG(LS_WARNING) << "Unsupported min_frames value, ignored."; diff --git a/rtc_base/experiments/quality_scaler_settings.h b/rtc_base/experiments/quality_scaler_settings.h index e3b12c54e3..b4b6a427a0 100644 --- a/rtc_base/experiments/quality_scaler_settings.h +++ b/rtc_base/experiments/quality_scaler_settings.h @@ -21,6 +21,8 @@ class QualityScalerSettings final { public: static QualityScalerSettings ParseFromFieldTrials(); + absl::optional SamplingPeriodMs() const; + absl::optional AverageQpWindow() const; absl::optional MinFrames() const; absl::optional InitialScaleFactor() const; absl::optional ScaleFactor() const; @@ -31,6 +33,8 @@ class QualityScalerSettings final { explicit QualityScalerSettings( const WebRtcKeyValueConfig* const key_value_config); + FieldTrialOptional sampling_period_ms_; + FieldTrialOptional average_qp_window_; FieldTrialOptional min_frames_; FieldTrialOptional initial_scale_factor_; FieldTrialOptional scale_factor_; diff --git a/rtc_base/experiments/quality_scaling_experiment.cc b/rtc_base/experiments/quality_scaling_experiment.cc index ca58ba858a..7d5722bbe3 100644 --- a/rtc_base/experiments/quality_scaling_experiment.cc +++ b/rtc_base/experiments/quality_scaling_experiment.cc @@ -25,6 +25,11 @@ constexpr int kMaxVp9Qp = 255; constexpr int kMaxH264Qp = 51; constexpr int kMaxGenericQp = 255; +#if !defined(WEBRTC_IOS) +constexpr char kDefaultQualityScalingSetttings[] = + "Enabled-29,95,149,205,24,37,26,36,0.9995,0.9999,1"; +#endif + absl::optional GetThresholds(int low, int high, int max) { @@ -38,15 +43,22 @@ absl::optional GetThresholds(int low, } // namespace bool QualityScalingExperiment::Enabled() { +#if defined(WEBRTC_IOS) return webrtc::field_trial::IsEnabled(kFieldTrial); +#else + return !webrtc::field_trial::IsDisabled(kFieldTrial); +#endif } absl::optional QualityScalingExperiment::ParseSettings() { - const std::string group = webrtc::field_trial::FindFullName(kFieldTrial); + std::string group = webrtc::field_trial::FindFullName(kFieldTrial); + // TODO(http://crbug.com/webrtc/12401): Completely remove the experiment code + // after few releases. +#if !defined(WEBRTC_IOS) if (group.empty()) - return absl::nullopt; - + group = kDefaultQualityScalingSetttings; +#endif Settings s; if (sscanf(group.c_str(), "Enabled-%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%d", &s.vp8_low, &s.vp8_high, &s.vp9_low, &s.vp9_high, &s.h264_low, diff --git a/rtc_base/experiments/quality_scaling_experiment_unittest.cc b/rtc_base/experiments/quality_scaling_experiment_unittest.cc index 7a345b629f..4507f1514f 100644 --- a/rtc_base/experiments/quality_scaling_experiment_unittest.cc +++ b/rtc_base/experiments/quality_scaling_experiment_unittest.cc @@ -38,10 +38,18 @@ void ExpectEqualConfig(QualityScalingExperiment::Config a, } } // namespace -TEST(QualityScalingExperimentTest, DisabledWithoutFieldTrial) { +#if !defined(WEBRTC_IOS) +// TODO(bugs.webrtc.org/12401): investigate why QualityScaler kicks in on iOS. +TEST(QualityScalingExperimentTest, DefaultEnabledWithoutFieldTrial) { + webrtc::test::ScopedFieldTrials field_trials(""); + EXPECT_TRUE(QualityScalingExperiment::Enabled()); +} +#else +TEST(QualityScalingExperimentTest, DefaultDisabledWithoutFieldTrialIOS) { webrtc::test::ScopedFieldTrials field_trials(""); EXPECT_FALSE(QualityScalingExperiment::Enabled()); } +#endif TEST(QualityScalingExperimentTest, EnabledWithFieldTrial) { webrtc::test::ScopedFieldTrials field_trials( @@ -59,10 +67,19 @@ TEST(QualityScalingExperimentTest, ParseSettings) { ExpectEqualSettings(kExpected, *settings); } +#if !defined(WEBRTC_IOS) +// TODO(bugs.webrtc.org/12401): investigate why QualityScaler kicks in on iOS. +TEST(QualityScalingExperimentTest, ParseSettingsUsesDefaultsWithoutFieldTrial) { + webrtc::test::ScopedFieldTrials field_trials(""); + // Uses some default hard coded values. + EXPECT_TRUE(QualityScalingExperiment::ParseSettings()); +} +#else TEST(QualityScalingExperimentTest, ParseSettingsFailsWithoutFieldTrial) { webrtc::test::ScopedFieldTrials field_trials(""); EXPECT_FALSE(QualityScalingExperiment::ParseSettings()); } +#endif TEST(QualityScalingExperimentTest, ParseSettingsFailsWithInvalidFieldTrial) { webrtc::test::ScopedFieldTrials field_trials( diff --git a/rtc_base/experiments/rate_control_settings.cc b/rtc_base/experiments/rate_control_settings.cc index 71c2eb1985..bed194e683 100644 --- a/rtc_base/experiments/rate_control_settings.cc +++ b/rtc_base/experiments/rate_control_settings.cc @@ -24,10 +24,13 @@ namespace webrtc { namespace { -const int kDefaultAcceptedQueueMs = 250; +const int kDefaultAcceptedQueueMs = 350; const int kDefaultMinPushbackTargetBitrateBps = 30000; +const char kCongestionWindowDefaultFieldTrialString[] = + "QueueSize:350,MinBitrate:30000,DropFrame:true"; + const char kUseBaseHeavyVp8Tl3RateAllocationFieldTrialName[] = "WebRTC-UseBaseHeavyVP8TL3RateAllocation"; @@ -75,27 +78,29 @@ constexpr char VideoRateControlConfig::kKey[]; std::unique_ptr VideoRateControlConfig::Parser() { // The empty comments ensures that each pair is on a separate line. return StructParametersParser::Create( - "pacing_factor", &pacing_factor, // - "alr_probing", &alr_probing, // - "vp8_qp_max", &vp8_qp_max, // - "vp8_min_pixels", &vp8_min_pixels, // - "trust_vp8", &trust_vp8, // - "trust_vp9", &trust_vp9, // - "video_hysteresis", &video_hysteresis, // - "screenshare_hysteresis", &screenshare_hysteresis, // - "probe_max_allocation", &probe_max_allocation, // - "bitrate_adjuster", &bitrate_adjuster, // - "adjuster_use_headroom", &adjuster_use_headroom, // - "vp8_s0_boost", &vp8_s0_boost, // - "vp8_base_heavy_tl3_alloc", &vp8_base_heavy_tl3_alloc, // - "vp8_dynamic_rate", &vp8_dynamic_rate, // - "vp9_dynamic_rate", &vp9_dynamic_rate); + "pacing_factor", &pacing_factor, // + "alr_probing", &alr_probing, // + "vp8_qp_max", &vp8_qp_max, // + "vp8_min_pixels", &vp8_min_pixels, // + "trust_vp8", &trust_vp8, // + "trust_vp9", &trust_vp9, // + "video_hysteresis", &video_hysteresis, // + "screenshare_hysteresis", &screenshare_hysteresis, // + "probe_max_allocation", &probe_max_allocation, // + "bitrate_adjuster", &bitrate_adjuster, // + "adjuster_use_headroom", &adjuster_use_headroom, // + "vp8_s0_boost", &vp8_s0_boost, // + "vp8_base_heavy_tl3_alloc", &vp8_base_heavy_tl3_alloc); } RateControlSettings::RateControlSettings( - const WebRtcKeyValueConfig* const key_value_config) - : congestion_window_config_(CongestionWindowConfig::Parse( - key_value_config->Lookup(CongestionWindowConfig::kKey))) { + const WebRtcKeyValueConfig* const key_value_config) { + std::string congestion_window_config = + key_value_config->Lookup(CongestionWindowConfig::kKey).empty() + ? kCongestionWindowDefaultFieldTrialString + : key_value_config->Lookup(CongestionWindowConfig::kKey); + congestion_window_config_ = + CongestionWindowConfig::Parse(congestion_window_config); video_config_.vp8_base_heavy_tl3_alloc = IsEnabled( key_value_config, kUseBaseHeavyVp8Tl3RateAllocationFieldTrialName); ParseHysteresisFactor(key_value_config, kVideoHysteresisFieldTrialname, @@ -182,18 +187,10 @@ bool RateControlSettings::Vp8BoostBaseLayerQuality() const { return video_config_.vp8_s0_boost; } -bool RateControlSettings::Vp8DynamicRateSettings() const { - return video_config_.vp8_dynamic_rate; -} - bool RateControlSettings::LibvpxVp9TrustedRateController() const { return video_config_.trust_vp9; } -bool RateControlSettings::Vp9DynamicRateSettings() const { - return video_config_.vp9_dynamic_rate; -} - double RateControlSettings::GetSimulcastHysteresisFactor( VideoCodecMode mode) const { if (mode == VideoCodecMode::kScreensharing) { diff --git a/rtc_base/experiments/rate_control_settings.h b/rtc_base/experiments/rate_control_settings.h index 6898bf6dd3..1c38e927dc 100644 --- a/rtc_base/experiments/rate_control_settings.h +++ b/rtc_base/experiments/rate_control_settings.h @@ -36,18 +36,16 @@ struct VideoRateControlConfig { bool alr_probing = false; absl::optional vp8_qp_max; absl::optional vp8_min_pixels; - bool trust_vp8 = false; - bool trust_vp9 = false; - double video_hysteresis = 1.0; + bool trust_vp8 = true; + bool trust_vp9 = true; + double video_hysteresis = 1.2; // Default to 35% hysteresis for simulcast screenshare. double screenshare_hysteresis = 1.35; bool probe_max_allocation = true; - bool bitrate_adjuster = false; - bool adjuster_use_headroom = false; - bool vp8_s0_boost = true; + bool bitrate_adjuster = true; + bool adjuster_use_headroom = true; + bool vp8_s0_boost = false; bool vp8_base_heavy_tl3_alloc = false; - bool vp8_dynamic_rate = false; - bool vp9_dynamic_rate = false; std::unique_ptr Parser(); }; @@ -98,7 +96,7 @@ class RateControlSettings final { explicit RateControlSettings( const WebRtcKeyValueConfig* const key_value_config); - const CongestionWindowConfig congestion_window_config_; + CongestionWindowConfig congestion_window_config_; VideoRateControlConfig video_config_; }; diff --git a/rtc_base/experiments/rate_control_settings_unittest.cc b/rtc_base/experiments/rate_control_settings_unittest.cc index b769c46a04..84e5825224 100644 --- a/rtc_base/experiments/rate_control_settings_unittest.cc +++ b/rtc_base/experiments/rate_control_settings_unittest.cc @@ -20,7 +20,7 @@ namespace webrtc { namespace { TEST(RateControlSettingsTest, CongestionWindow) { - EXPECT_FALSE( + EXPECT_TRUE( RateControlSettings::ParseFromFieldTrials().UseCongestionWindow()); test::ScopedFieldTrials field_trials( @@ -32,8 +32,8 @@ TEST(RateControlSettingsTest, CongestionWindow) { } TEST(RateControlSettingsTest, CongestionWindowPushback) { - EXPECT_FALSE(RateControlSettings::ParseFromFieldTrials() - .UseCongestionWindowPushback()); + EXPECT_TRUE(RateControlSettings::ParseFromFieldTrials() + .UseCongestionWindowPushback()); test::ScopedFieldTrials field_trials( "WebRTC-CongestionWindow/QueueSize:100,MinBitrate:100000/"); @@ -44,6 +44,29 @@ TEST(RateControlSettingsTest, CongestionWindowPushback) { 100000u); } +TEST(RateControlSettingsTest, CongestionWindowPushbackDropframe) { + EXPECT_TRUE(RateControlSettings::ParseFromFieldTrials() + .UseCongestionWindowPushback()); + + test::ScopedFieldTrials field_trials( + "WebRTC-CongestionWindow/" + "QueueSize:100,MinBitrate:100000,DropFrame:true/"); + const RateControlSettings settings_after = + RateControlSettings::ParseFromFieldTrials(); + EXPECT_TRUE(settings_after.UseCongestionWindowPushback()); + EXPECT_EQ(settings_after.CongestionWindowMinPushbackTargetBitrateBps(), + 100000u); + EXPECT_TRUE(settings_after.UseCongestionWindowDropFrameOnly()); +} + +TEST(RateControlSettingsTest, CongestionWindowPushbackDefaultConfig) { + const RateControlSettings settings = + RateControlSettings::ParseFromFieldTrials(); + EXPECT_TRUE(settings.UseCongestionWindowPushback()); + EXPECT_EQ(settings.CongestionWindowMinPushbackTargetBitrateBps(), 30000u); + EXPECT_TRUE(settings.UseCongestionWindowDropFrameOnly()); +} + TEST(RateControlSettingsTest, PacingFactor) { EXPECT_FALSE(RateControlSettings::ParseFromFieldTrials().GetPacingFactor()); @@ -99,15 +122,15 @@ TEST(RateControlSettingsTest, DoesNotGetTooSmallLibvpxVp8MinPixelValue) { TEST(RateControlSettingsTest, LibvpxTrustedRateController) { const RateControlSettings settings_before = RateControlSettings::ParseFromFieldTrials(); - EXPECT_FALSE(settings_before.LibvpxVp8TrustedRateController()); - EXPECT_FALSE(settings_before.LibvpxVp9TrustedRateController()); + EXPECT_TRUE(settings_before.LibvpxVp8TrustedRateController()); + EXPECT_TRUE(settings_before.LibvpxVp9TrustedRateController()); test::ScopedFieldTrials field_trials( - "WebRTC-VideoRateControl/trust_vp8:1,trust_vp9:1/"); + "WebRTC-VideoRateControl/trust_vp8:0,trust_vp9:0/"); const RateControlSettings settings_after = RateControlSettings::ParseFromFieldTrials(); - EXPECT_TRUE(settings_after.LibvpxVp8TrustedRateController()); - EXPECT_TRUE(settings_after.LibvpxVp9TrustedRateController()); + EXPECT_FALSE(settings_after.LibvpxVp8TrustedRateController()); + EXPECT_FALSE(settings_after.LibvpxVp9TrustedRateController()); } TEST(RateControlSettingsTest, Vp8BaseHeavyTl3RateAllocationLegacyKey) { @@ -154,10 +177,10 @@ TEST(RateControlSettingsTest, GetSimulcastHysteresisFactor) { RateControlSettings::ParseFromFieldTrials(); EXPECT_DOUBLE_EQ(settings_before.GetSimulcastHysteresisFactor( VideoCodecMode::kRealtimeVideo), - 1.0); + 1.2); EXPECT_DOUBLE_EQ(settings_before.GetSimulcastHysteresisFactor( VideoEncoderConfig::ContentType::kRealtimeVideo), - 1.0); + 1.2); EXPECT_DOUBLE_EQ(settings_before.GetSimulcastHysteresisFactor( VideoCodecMode::kScreensharing), 1.35); @@ -167,16 +190,16 @@ TEST(RateControlSettingsTest, GetSimulcastHysteresisFactor) { test::ScopedFieldTrials field_trials( "WebRTC-VideoRateControl/" - "video_hysteresis:1.2,screenshare_hysteresis:1.4/"); + "video_hysteresis:1.0,screenshare_hysteresis:1.4/"); const RateControlSettings settings_after = RateControlSettings::ParseFromFieldTrials(); EXPECT_DOUBLE_EQ(settings_after.GetSimulcastHysteresisFactor( VideoCodecMode::kRealtimeVideo), - 1.2); + 1.0); EXPECT_DOUBLE_EQ(settings_after.GetSimulcastHysteresisFactor( VideoEncoderConfig::ContentType::kRealtimeVideo), - 1.2); + 1.0); EXPECT_DOUBLE_EQ(settings_after.GetSimulcastHysteresisFactor( VideoCodecMode::kScreensharing), 1.4); @@ -196,16 +219,16 @@ TEST(RateControlSettingsTest, TriggerProbeOnMaxAllocatedBitrateChange) { } TEST(RateControlSettingsTest, UseEncoderBitrateAdjuster) { - // Should be off by default. - EXPECT_FALSE( + // Should be on by default. + EXPECT_TRUE( RateControlSettings::ParseFromFieldTrials().UseEncoderBitrateAdjuster()); { - // Can be turned on via field trial. + // Can be turned off via field trial. test::ScopedFieldTrials field_trials( - "WebRTC-VideoRateControl/bitrate_adjuster:true/"); - EXPECT_TRUE(RateControlSettings::ParseFromFieldTrials() - .UseEncoderBitrateAdjuster()); + "WebRTC-VideoRateControl/bitrate_adjuster:false/"); + EXPECT_FALSE(RateControlSettings::ParseFromFieldTrials() + .UseEncoderBitrateAdjuster()); } } diff --git a/rtc_base/experiments/stable_target_rate_experiment_unittest.cc b/rtc_base/experiments/stable_target_rate_experiment_unittest.cc index 71e757d68c..dbd841840d 100644 --- a/rtc_base/experiments/stable_target_rate_experiment_unittest.cc +++ b/rtc_base/experiments/stable_target_rate_experiment_unittest.cc @@ -19,7 +19,7 @@ TEST(StableBweExperimentTest, Default) { StableTargetRateExperiment config = StableTargetRateExperiment::ParseFromFieldTrials(); EXPECT_FALSE(config.IsEnabled()); - EXPECT_EQ(config.GetVideoHysteresisFactor(), 1.0); + EXPECT_EQ(config.GetVideoHysteresisFactor(), 1.2); EXPECT_EQ(config.GetScreenshareHysteresisFactor(), 1.35); } @@ -30,7 +30,7 @@ TEST(StableBweExperimentTest, EnabledNoHysteresis) { StableTargetRateExperiment config = StableTargetRateExperiment::ParseFromFieldTrials(); EXPECT_TRUE(config.IsEnabled()); - EXPECT_EQ(config.GetVideoHysteresisFactor(), 1.0); + EXPECT_EQ(config.GetVideoHysteresisFactor(), 1.2); EXPECT_EQ(config.GetScreenshareHysteresisFactor(), 1.35); } diff --git a/rtc_base/experiments/struct_parameters_parser.cc b/rtc_base/experiments/struct_parameters_parser.cc index 2605da8fef..d62eb6f1ea 100644 --- a/rtc_base/experiments/struct_parameters_parser.cc +++ b/rtc_base/experiments/struct_parameters_parser.cc @@ -107,7 +107,10 @@ void StructParametersParser::Parse(absl::string_view src) { break; } } - if (!found) { + // "_" is be used to prefix keys that are part of the string for + // debugging purposes but not neccessarily used. + // e.g. WebRTC-Experiment/param: value, _DebuggingString + if (!found && (key.empty() || key[0] != '_')) { RTC_LOG(LS_INFO) << "No field with key: '" << key << "' (found in trial: \"" << src << "\")"; } diff --git a/rtc_base/fake_clock.cc b/rtc_base/fake_clock.cc index e242e8e659..652a5afa3a 100644 --- a/rtc_base/fake_clock.cc +++ b/rtc_base/fake_clock.cc @@ -16,18 +16,18 @@ namespace rtc { int64_t FakeClock::TimeNanos() const { - CritScope cs(&lock_); + webrtc::MutexLock lock(&lock_); return time_ns_; } void FakeClock::SetTime(webrtc::Timestamp new_time) { - CritScope cs(&lock_); + webrtc::MutexLock lock(&lock_); RTC_DCHECK(new_time.us() * 1000 >= time_ns_); time_ns_ = new_time.us() * 1000; } void FakeClock::AdvanceTime(webrtc::TimeDelta delta) { - CritScope cs(&lock_); + webrtc::MutexLock lock(&lock_); time_ns_ += delta.ns(); } diff --git a/rtc_base/fake_clock.h b/rtc_base/fake_clock.h index 0ab9a937a8..edb507becb 100644 --- a/rtc_base/fake_clock.h +++ b/rtc_base/fake_clock.h @@ -15,7 +15,7 @@ #include "api/units/time_delta.h" #include "api/units/timestamp.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "rtc_base/time_utils.h" @@ -43,7 +43,7 @@ class FakeClock : public ClockInterface { void AdvanceTime(webrtc::TimeDelta delta); private: - CriticalSection lock_; + mutable webrtc::Mutex lock_; int64_t time_ns_ RTC_GUARDED_BY(lock_) = 0; }; diff --git a/rtc_base/fake_mdns_responder.h b/rtc_base/fake_mdns_responder.h index 42908764ab..1f87cf4b81 100644 --- a/rtc_base/fake_mdns_responder.h +++ b/rtc_base/fake_mdns_responder.h @@ -15,14 +15,17 @@ #include #include -#include "rtc_base/async_invoker.h" #include "rtc_base/ip_address.h" #include "rtc_base/location.h" #include "rtc_base/mdns_responder_interface.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" namespace webrtc { +// This class posts tasks on the given `thread` to invoke callbacks. It's the +// callback's responsibility to be aware of potential destruction of state it +// depends on, e.g., using WeakPtrFactory or PendingTaskSafetyFlag. class FakeMdnsResponder : public MdnsResponderInterface { public: explicit FakeMdnsResponder(rtc::Thread* thread) : thread_(thread) {} @@ -37,9 +40,8 @@ class FakeMdnsResponder : public MdnsResponderInterface { name = std::to_string(next_available_id_++) + ".local"; addr_name_map_[addr] = name; } - invoker_.AsyncInvoke( - RTC_FROM_HERE, thread_, - [callback, addr, name]() { callback(addr, name); }); + thread_->PostTask( + ToQueuedTask([callback, addr, name]() { callback(addr, name); })); } void RemoveNameForAddress(const rtc::IPAddress& addr, NameRemovedCallback callback) override { @@ -48,8 +50,7 @@ class FakeMdnsResponder : public MdnsResponderInterface { addr_name_map_.erase(it); } bool result = it != addr_name_map_.end(); - invoker_.AsyncInvoke(RTC_FROM_HERE, thread_, - [callback, result]() { callback(result); }); + thread_->PostTask(ToQueuedTask([callback, result]() { callback(result); })); } rtc::IPAddress GetMappedAddressForName(const std::string& name) const { @@ -64,8 +65,7 @@ class FakeMdnsResponder : public MdnsResponderInterface { private: uint32_t next_available_id_ = 0; std::map addr_name_map_; - rtc::Thread* thread_; - rtc::AsyncInvoker invoker_; + rtc::Thread* const thread_; }; } // namespace webrtc diff --git a/rtc_base/fake_network.h b/rtc_base/fake_network.h index 040b24205e..1bbdd460a0 100644 --- a/rtc_base/fake_network.h +++ b/rtc_base/fake_network.h @@ -31,7 +31,8 @@ const int kFakeIPv4NetworkPrefixLength = 24; const int kFakeIPv6NetworkPrefixLength = 64; // Fake network manager that allows us to manually specify the IPs to use. -class FakeNetworkManager : public NetworkManagerBase, public MessageHandler { +class FakeNetworkManager : public NetworkManagerBase, + public MessageHandlerAutoCleanup { public: FakeNetworkManager() {} @@ -69,10 +70,11 @@ class FakeNetworkManager : public NetworkManagerBase, public MessageHandler { ++start_count_; if (start_count_ == 1) { sent_first_update_ = false; - rtc::Thread::Current()->Post(RTC_FROM_HERE, this); + rtc::Thread::Current()->Post(RTC_FROM_HERE, this, kUpdateNetworksMessage); } else { if (sent_first_update_) { - SignalNetworksChanged(); + rtc::Thread::Current()->Post(RTC_FROM_HERE, this, + kSignalNetworksMessage); } } } @@ -80,7 +82,15 @@ class FakeNetworkManager : public NetworkManagerBase, public MessageHandler { void StopUpdating() override { --start_count_; } // MessageHandler interface. - void OnMessage(Message* msg) override { DoUpdateNetworks(); } + void OnMessage(Message* msg) override { + if (msg->message_id == kUpdateNetworksMessage) { + DoUpdateNetworks(); + } else if (msg->message_id == kSignalNetworksMessage) { + SignalNetworksChanged(); + } else { + RTC_CHECK(false); + } + } using NetworkManagerBase::set_default_local_addresses; using NetworkManagerBase::set_enumeration_permission; @@ -128,6 +138,9 @@ class FakeNetworkManager : public NetworkManagerBase, public MessageHandler { int start_count_ = 0; bool sent_first_update_ = false; + static constexpr uint32_t kUpdateNetworksMessage = 1; + static constexpr uint32_t kSignalNetworksMessage = 2; + std::unique_ptr mdns_responder_; }; diff --git a/rtc_base/file_rotating_stream.cc b/rtc_base/file_rotating_stream.cc index 826e6745f3..b7d64ba92d 100644 --- a/rtc_base/file_rotating_stream.cc +++ b/rtc_base/file_rotating_stream.cc @@ -193,49 +193,40 @@ FileRotatingStream::FileRotatingStream(const std::string& dir_path, FileRotatingStream::~FileRotatingStream() {} -StreamState FileRotatingStream::GetState() const { - return (file_.is_open() ? SS_OPEN : SS_CLOSED); +bool FileRotatingStream::IsOpen() const { + return file_.is_open(); } -StreamResult FileRotatingStream::Read(void* buffer, - size_t buffer_len, - size_t* read, - int* error) { - RTC_DCHECK(buffer); - RTC_NOTREACHED(); - return SR_EOS; -} - -StreamResult FileRotatingStream::Write(const void* data, - size_t data_len, - size_t* written, - int* error) { +bool FileRotatingStream::Write(const void* data, size_t data_len) { if (!file_.is_open()) { std::fprintf(stderr, "Open() must be called before Write.\n"); - return SR_ERROR; + return false; } - // Write as much as will fit in to the current file. - RTC_DCHECK_LT(current_bytes_written_, max_file_size_); - size_t remaining_bytes = max_file_size_ - current_bytes_written_; - size_t write_length = std::min(data_len, remaining_bytes); + while (data_len > 0) { + // Write as much as will fit in to the current file. + RTC_DCHECK_LT(current_bytes_written_, max_file_size_); + size_t remaining_bytes = max_file_size_ - current_bytes_written_; + size_t write_length = std::min(data_len, remaining_bytes); + + if (!file_.Write(data, write_length)) { + return false; + } + if (disable_buffering_ && !file_.Flush()) { + return false; + } - if (!file_.Write(data, write_length)) { - return SR_ERROR; - } - if (disable_buffering_ && !file_.Flush()) { - return SR_ERROR; - } + current_bytes_written_ += write_length; - current_bytes_written_ += write_length; - if (written) { - *written = write_length; - } - // If we're done with this file, rotate it out. - if (current_bytes_written_ >= max_file_size_) { - RTC_DCHECK_EQ(current_bytes_written_, max_file_size_); - RotateFiles(); + // If we're done with this file, rotate it out. + if (current_bytes_written_ >= max_file_size_) { + RTC_DCHECK_EQ(current_bytes_written_, max_file_size_); + RotateFiles(); + } + data_len -= write_length; + data = + static_cast(static_cast(data) + write_length); } - return SR_SUCCESS; + return true; } bool FileRotatingStream::Flush() { diff --git a/rtc_base/file_rotating_stream.h b/rtc_base/file_rotating_stream.h index 117cf2019a..88461e344f 100644 --- a/rtc_base/file_rotating_stream.h +++ b/rtc_base/file_rotating_stream.h @@ -18,7 +18,6 @@ #include #include "rtc_base/constructor_magic.h" -#include "rtc_base/stream.h" #include "rtc_base/system/file_wrapper.h" namespace rtc { @@ -27,13 +26,8 @@ namespace rtc { // constructor. It rotates the files once the current file is full. The // individual file size and the number of files used is configurable in the // constructor. Open() must be called before using this stream. -class FileRotatingStream : public StreamInterface { +class FileRotatingStream { public: - // Use this constructor for reading a directory previously written to with - // this stream. - FileRotatingStream(const std::string& dir_path, - const std::string& file_prefix); - // Use this constructor for writing to a directory. Files in the directory // matching the prefix will be deleted on open. FileRotatingStream(const std::string& dir_path, @@ -41,20 +35,13 @@ class FileRotatingStream : public StreamInterface { size_t max_file_size, size_t num_files); - ~FileRotatingStream() override; - - // StreamInterface methods. - StreamState GetState() const override; - StreamResult Read(void* buffer, - size_t buffer_len, - size_t* read, - int* error) override; - StreamResult Write(const void* data, - size_t data_len, - size_t* written, - int* error) override; - bool Flush() override; - void Close() override; + virtual ~FileRotatingStream(); + + bool IsOpen() const; + + bool Write(const void* data, size_t data_len); + bool Flush(); + void Close(); // Opens the appropriate file(s). Call this before using the stream. bool Open(); @@ -63,6 +50,8 @@ class FileRotatingStream : public StreamInterface { // enabled by default for performance. bool DisableBuffering(); + // Below two methods are public for testing only. + // Returns the path used for the i-th newest file, where the 0th file is the // newest file. The file may or may not exist, this is just used for // formatting. Index must be less than GetNumFiles(). @@ -72,8 +61,6 @@ class FileRotatingStream : public StreamInterface { size_t GetNumFiles() const { return file_names_.size(); } protected: - size_t GetMaxFileSize() const { return max_file_size_; } - void SetMaxFileSize(size_t size) { max_file_size_ = size; } size_t GetRotationIndex() const { return rotation_index_; } diff --git a/rtc_base/file_rotating_stream_unittest.cc b/rtc_base/file_rotating_stream_unittest.cc index c2ba06773a..849b111148 100644 --- a/rtc_base/file_rotating_stream_unittest.cc +++ b/rtc_base/file_rotating_stream_unittest.cc @@ -72,7 +72,7 @@ class MAYBE_FileRotatingStreamTest : public ::testing::Test { // Writes the data to the stream and flushes it. void WriteAndFlush(const void* data, const size_t data_len) { - EXPECT_EQ(SR_SUCCESS, stream_->WriteAll(data, data_len, nullptr, nullptr)); + EXPECT_TRUE(stream_->Write(data, data_len)); EXPECT_TRUE(stream_->Flush()); } @@ -114,11 +114,11 @@ const size_t MAYBE_FileRotatingStreamTest::kMaxFileSize = 2; TEST_F(MAYBE_FileRotatingStreamTest, State) { Init("FileRotatingStreamTestState", kFilePrefix, kMaxFileSize, 3); - EXPECT_EQ(SS_CLOSED, stream_->GetState()); + EXPECT_FALSE(stream_->IsOpen()); ASSERT_TRUE(stream_->Open()); - EXPECT_EQ(SS_OPEN, stream_->GetState()); + EXPECT_TRUE(stream_->IsOpen()); stream_->Close(); - EXPECT_EQ(SS_CLOSED, stream_->GetState()); + EXPECT_FALSE(stream_->IsOpen()); } // Tests that nothing is written to file when data of length zero is written. @@ -277,7 +277,7 @@ class MAYBE_CallSessionFileRotatingStreamTest : public ::testing::Test { // Writes the data to the stream and flushes it. void WriteAndFlush(const void* data, const size_t data_len) { - EXPECT_EQ(SR_SUCCESS, stream_->WriteAll(data, data_len, nullptr, nullptr)); + EXPECT_TRUE(stream_->Write(data, data_len)); EXPECT_TRUE(stream_->Flush()); } @@ -334,8 +334,7 @@ TEST_F(MAYBE_CallSessionFileRotatingStreamTest, WriteAndReadLarge) { std::unique_ptr buffer(new uint8_t[buffer_size]); for (int i = 0; i < 8; i++) { memset(buffer.get(), i, buffer_size); - EXPECT_EQ(SR_SUCCESS, - stream_->WriteAll(buffer.get(), buffer_size, nullptr, nullptr)); + EXPECT_TRUE(stream_->Write(buffer.get(), buffer_size)); } const int expected_vals[] = {0, 1, 2, 6, 7}; @@ -369,8 +368,7 @@ TEST_F(MAYBE_CallSessionFileRotatingStreamTest, WriteAndReadFirstHalf) { std::unique_ptr buffer(new uint8_t[buffer_size]); for (int i = 0; i < 2; i++) { memset(buffer.get(), i, buffer_size); - EXPECT_EQ(SR_SUCCESS, - stream_->WriteAll(buffer.get(), buffer_size, nullptr, nullptr)); + EXPECT_TRUE(stream_->Write(buffer.get(), buffer_size)); } const int expected_vals[] = {0, 1}; diff --git a/rtc_base/firewall_socket_server.cc b/rtc_base/firewall_socket_server.cc index fc7917613c..8f44753760 100644 --- a/rtc_base/firewall_socket_server.cc +++ b/rtc_base/firewall_socket_server.cc @@ -163,19 +163,19 @@ void FirewallSocketServer::AddRule(bool allow, r.p = p; r.src = src; r.dst = dst; - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); rules_.push_back(r); } void FirewallSocketServer::ClearRules() { - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); rules_.clear(); } bool FirewallSocketServer::Check(FirewallProtocol p, const SocketAddress& src, const SocketAddress& dst) { - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); for (size_t i = 0; i < rules_.size(); ++i) { const Rule& r = rules_[i]; if ((r.p != p) && (r.p != FP_ANY)) @@ -239,12 +239,12 @@ FirewallManager::~FirewallManager() { } void FirewallManager::AddServer(FirewallSocketServer* server) { - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); servers_.push_back(server); } void FirewallManager::RemoveServer(FirewallSocketServer* server) { - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); servers_.erase(std::remove(servers_.begin(), servers_.end(), server), servers_.end()); } @@ -253,7 +253,7 @@ void FirewallManager::AddRule(bool allow, FirewallProtocol p, FirewallDirection d, const SocketAddress& addr) { - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); for (std::vector::const_iterator it = servers_.begin(); it != servers_.end(); ++it) { (*it)->AddRule(allow, p, d, addr); @@ -261,7 +261,7 @@ void FirewallManager::AddRule(bool allow, } void FirewallManager::ClearRules() { - CritScope scope(&crit_); + webrtc::MutexLock scope(&mutex_); for (std::vector::const_iterator it = servers_.begin(); it != servers_.end(); ++it) { (*it)->ClearRules(); diff --git a/rtc_base/firewall_socket_server.h b/rtc_base/firewall_socket_server.h index d174033e01..23b91d6ad3 100644 --- a/rtc_base/firewall_socket_server.h +++ b/rtc_base/firewall_socket_server.h @@ -14,11 +14,11 @@ #include #include "rtc_base/async_socket.h" -#include "rtc_base/critical_section.h" #include "rtc_base/ip_address.h" #include "rtc_base/socket.h" #include "rtc_base/socket_address.h" #include "rtc_base/socket_server.h" +#include "rtc_base/synchronization/mutex.h" namespace rtc { @@ -90,7 +90,7 @@ class FirewallSocketServer : public SocketServer { private: SocketServer* server_; FirewallManager* manager_; - CriticalSection crit_; + webrtc::Mutex mutex_; struct Rule { bool allow; FirewallProtocol p; @@ -123,7 +123,7 @@ class FirewallManager { void ClearRules(); private: - CriticalSection crit_; + webrtc::Mutex mutex_; std::vector servers_; }; diff --git a/rtc_base/hash.h b/rtc_base/hash.h new file mode 100644 index 0000000000..56d581cdf1 --- /dev/null +++ b/rtc_base/hash.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef RTC_BASE_HASH_H_ +#define RTC_BASE_HASH_H_ + +#include + +#include +#include + +namespace webrtc { + +// A custom hash function for std::pair, to be able to be used as key in a +// std::unordered_map. If absl::flat_hash_map would ever be used, this is +// unnecessary as it already has a hash function for std::pair. +struct PairHash { + template + size_t operator()(const std::pair& p) const { + return (3 * std::hash{}(p.first)) ^ std::hash{}(p.second); + } +}; + +} // namespace webrtc + +#endif // RTC_BASE_HASH_H_ diff --git a/rtc_base/hash_unittest.cc b/rtc_base/hash_unittest.cc new file mode 100644 index 0000000000..e86c8a8586 --- /dev/null +++ b/rtc_base/hash_unittest.cc @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "rtc_base/hash.h" + +#include +#include +#include + +#include "test/gmock.h" + +namespace webrtc { +namespace { + +TEST(PairHashTest, CanInsertIntoSet) { + using MyPair = std::pair; + + std::unordered_set pairs; + + pairs.insert({1, 2}); + pairs.insert({3, 4}); + + EXPECT_NE(pairs.find({1, 2}), pairs.end()); + EXPECT_NE(pairs.find({3, 4}), pairs.end()); + EXPECT_EQ(pairs.find({1, 3}), pairs.end()); + EXPECT_EQ(pairs.find({3, 3}), pairs.end()); +} + +TEST(PairHashTest, CanInsertIntoMap) { + using MyPair = std::pair; + + std::unordered_map pairs; + + pairs[{"1", 2}] = 99; + pairs[{"3", 4}] = 100; + + EXPECT_EQ((pairs[{"1", 2}]), 99); + EXPECT_EQ((pairs[{"3", 4}]), 100); + EXPECT_EQ(pairs.find({"1", 3}), pairs.end()); + EXPECT_EQ(pairs.find({"3", 3}), pairs.end()); +} +} // namespace +} // namespace webrtc diff --git a/rtc_base/internal/default_socket_server.cc b/rtc_base/internal/default_socket_server.cc new file mode 100644 index 0000000000..5632b989fc --- /dev/null +++ b/rtc_base/internal/default_socket_server.cc @@ -0,0 +1,33 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/internal/default_socket_server.h" + +#include + +#include "rtc_base/socket_server.h" + +#if defined(__native_client__) +#include "rtc_base/null_socket_server.h" +#else +#include "rtc_base/physical_socket_server.h" +#endif + +namespace rtc { + +std::unique_ptr CreateDefaultSocketServer() { +#if defined(__native_client__) + return std::unique_ptr(new rtc::NullSocketServer); +#else + return std::unique_ptr(new rtc::PhysicalSocketServer); +#endif +} + +} // namespace rtc diff --git a/rtc_base/internal/default_socket_server.h b/rtc_base/internal/default_socket_server.h new file mode 100644 index 0000000000..5b3489f613 --- /dev/null +++ b/rtc_base/internal/default_socket_server.h @@ -0,0 +1,24 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_INTERNAL_DEFAULT_SOCKET_SERVER_H_ +#define RTC_BASE_INTERNAL_DEFAULT_SOCKET_SERVER_H_ + +#include + +#include "rtc_base/socket_server.h" + +namespace rtc { + +std::unique_ptr CreateDefaultSocketServer(); + +} // namespace rtc + +#endif // RTC_BASE_INTERNAL_DEFAULT_SOCKET_SERVER_H_ diff --git a/rtc_base/ip_address.cc b/rtc_base/ip_address.cc index 9dd534c2b5..86f42e0bf9 100644 --- a/rtc_base/ip_address.cc +++ b/rtc_base/ip_address.cc @@ -20,8 +20,9 @@ #include #endif -#include "rtc_base/byte_order.h" #include "rtc_base/ip_address.h" + +#include "rtc_base/byte_order.h" #include "rtc_base/net_helpers.h" #include "rtc_base/string_utils.h" @@ -148,10 +149,6 @@ std::string IPAddress::ToString() const { } std::string IPAddress::ToSensitiveString() const { -#if !defined(NDEBUG) - // Return non-stripped in debug. - return ToString(); -#else switch (family_) { case AF_INET: { std::string address = ToString(); @@ -175,7 +172,6 @@ std::string IPAddress::ToSensitiveString() const { } } return std::string(); -#endif } IPAddress IPAddress::Normalized() const { diff --git a/rtc_base/ip_address.h b/rtc_base/ip_address.h index ae135a69dc..8725417393 100644 --- a/rtc_base/ip_address.h +++ b/rtc_base/ip_address.h @@ -80,12 +80,12 @@ class RTC_EXPORT IPAddress { bool operator<(const IPAddress& other) const; bool operator>(const IPAddress& other) const; -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& os) { // no-presubmit-check TODO(webrtc:8982) return os << ToString(); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST int family() const { return family_; } in_addr ipv4_address() const; diff --git a/rtc_base/ip_address_unittest.cc b/rtc_base/ip_address_unittest.cc index d79a7b4bd6..f94649cfee 100644 --- a/rtc_base/ip_address_unittest.cc +++ b/rtc_base/ip_address_unittest.cc @@ -938,15 +938,9 @@ TEST(IPAddressTest, TestToSensitiveString) { EXPECT_EQ(kIPv4PublicAddrString, addr_v4.ToString()); EXPECT_EQ(kIPv6PublicAddrString, addr_v6.ToString()); EXPECT_EQ(kIPv6PublicAddr2String, addr_v6_2.ToString()); -#if defined(NDEBUG) EXPECT_EQ(kIPv4PublicAddrAnonymizedString, addr_v4.ToSensitiveString()); EXPECT_EQ(kIPv6PublicAddrAnonymizedString, addr_v6.ToSensitiveString()); EXPECT_EQ(kIPv6PublicAddr2AnonymizedString, addr_v6_2.ToSensitiveString()); -#else - EXPECT_EQ(kIPv4PublicAddrString, addr_v4.ToSensitiveString()); - EXPECT_EQ(kIPv6PublicAddrString, addr_v6.ToSensitiveString()); - EXPECT_EQ(kIPv6PublicAddr2String, addr_v6_2.ToSensitiveString()); -#endif // defined(NDEBUG) } TEST(IPAddressTest, TestInterfaceAddress) { diff --git a/rtc_base/java/src/org/webrtc/OWNERS b/rtc_base/java/src/org/webrtc/OWNERS index 299e8b20ec..109bea2725 100644 --- a/rtc_base/java/src/org/webrtc/OWNERS +++ b/rtc_base/java/src/org/webrtc/OWNERS @@ -1,2 +1,2 @@ magjed@webrtc.org -sakal@webrtc.org +xalep@webrtc.org diff --git a/rtc_base/keep_ref_until_done.h b/rtc_base/keep_ref_until_done.h deleted file mode 100644 index 7bebd82374..0000000000 --- a/rtc_base/keep_ref_until_done.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2015 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_KEEP_REF_UNTIL_DONE_H_ -#define RTC_BASE_KEEP_REF_UNTIL_DONE_H_ - -#include "api/scoped_refptr.h" -#include "rtc_base/bind.h" -#include "rtc_base/callback.h" -#include "rtc_base/ref_count.h" - -namespace rtc { - -namespace impl { -template -static inline void DoNothing(const scoped_refptr& object) {} -} // namespace impl - -// KeepRefUntilDone keeps a reference to |object| until the returned -// callback goes out of scope. If the returned callback is copied, the -// reference will be released when the last callback goes out of scope. -template -static inline Callback0 KeepRefUntilDone(ObjectT* object) { - return rtc::Bind(&impl::DoNothing, scoped_refptr(object)); -} - -template -static inline Callback0 KeepRefUntilDone( - const scoped_refptr& object) { - return rtc::Bind(&impl::DoNothing, object); -} - -} // namespace rtc - -#endif // RTC_BASE_KEEP_REF_UNTIL_DONE_H_ diff --git a/rtc_base/location.h b/rtc_base/location.h index ad8f479135..ff1eea95a9 100644 --- a/rtc_base/location.h +++ b/rtc_base/location.h @@ -13,7 +13,6 @@ #include -#include "rtc_base/stringize_macros.h" #include "rtc_base/system/rtc_export.h" namespace rtc { diff --git a/rtc_base/log_sinks.cc b/rtc_base/log_sinks.cc index a3019b9786..4365142517 100644 --- a/rtc_base/log_sinks.cc +++ b/rtc_base/log_sinks.cc @@ -16,7 +16,6 @@ #include #include "rtc_base/checks.h" -#include "rtc_base/stream.h" namespace rtc { @@ -37,23 +36,23 @@ FileRotatingLogSink::FileRotatingLogSink(FileRotatingStream* stream) FileRotatingLogSink::~FileRotatingLogSink() {} void FileRotatingLogSink::OnLogMessage(const std::string& message) { - if (stream_->GetState() != SS_OPEN) { + if (!stream_->IsOpen()) { std::fprintf(stderr, "Init() must be called before adding this sink.\n"); return; } - stream_->WriteAll(message.c_str(), message.size(), nullptr, nullptr); + stream_->Write(message.c_str(), message.size()); } void FileRotatingLogSink::OnLogMessage(const std::string& message, LoggingSeverity sev, const char* tag) { - if (stream_->GetState() != SS_OPEN) { + if (!stream_->IsOpen()) { std::fprintf(stderr, "Init() must be called before adding this sink.\n"); return; } - stream_->WriteAll(tag, strlen(tag), nullptr, nullptr); - stream_->WriteAll(": ", 2, nullptr, nullptr); - stream_->WriteAll(message.c_str(), message.size(), nullptr, nullptr); + stream_->Write(tag, strlen(tag)); + stream_->Write(": ", 2); + stream_->Write(message.c_str(), message.size()); } bool FileRotatingLogSink::Init() { diff --git a/rtc_base/logging.cc b/rtc_base/logging.cc index ff7369dd5c..a333d83970 100644 --- a/rtc_base/logging.cc +++ b/rtc_base/logging.cc @@ -33,6 +33,7 @@ static const int kMaxLogLineSize = 1024 - 60; #endif // WEBRTC_MAC && !defined(WEBRTC_IOS) || WEBRTC_ANDROID +#include #include #include @@ -42,14 +43,25 @@ static const int kMaxLogLineSize = 1024 - 60; #include "absl/base/attributes.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/platform_thread_types.h" #include "rtc_base/string_encode.h" #include "rtc_base/string_utils.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "rtc_base/time_utils.h" +#if defined(WEBRTC_RACE_CHECK_MUTEX) +#if defined(WEBRTC_ABSL_MUTEX) +#error Please only define one of WEBRTC_RACE_CHECK_MUTEX and WEBRTC_ABSL_MUTEX. +#endif +#include "absl/base/const_init.h" +#include "absl/synchronization/mutex.h" // nogncheck +using LoggingMutexLock = ::absl::MutexLock; +#else +using LoggingMutexLock = ::webrtc::MutexLock; +#endif // if defined(WEBRTC_RACE_CHECK_MUTEX) + namespace rtc { namespace { // By default, release builds don't log, debug builds at info level @@ -72,7 +84,17 @@ const char* FilenameFromPath(const char* file) { } // Global lock for log subsystem, only needed to serialize access to streams_. -CriticalSection g_log_crit; +// TODO(bugs.webrtc.org/11665): this is not currently constant initialized and +// trivially destructible. +#if defined(WEBRTC_RACE_CHECK_MUTEX) +// When WEBRTC_RACE_CHECK_MUTEX is defined, even though WebRTC objects are +// invoked serially, the logging is static, invoked concurrently and hence needs +// protection. +absl::Mutex g_log_mutex_(absl::kConstInit); +#else +webrtc::Mutex g_log_mutex_; +#endif + } // namespace ///////////////////////////////////////////////////////////////////////////// @@ -85,8 +107,9 @@ bool LogMessage::log_to_stderr_ = true; // Note: we explicitly do not clean this up, because of the uncertain ordering // of destructors at program exit. Let the person who sets the stream trigger // cleanup by setting to null, or let it leak (safe at program exit). -ABSL_CONST_INIT LogSink* LogMessage::streams_ RTC_GUARDED_BY(g_log_crit) = +ABSL_CONST_INIT LogSink* LogMessage::streams_ RTC_GUARDED_BY(g_log_mutex_) = nullptr; +ABSL_CONST_INIT std::atomic LogMessage::streams_empty_ = {true}; // Boolean options default to false (0) bool LogMessage::thread_, LogMessage::timestamp_; @@ -107,9 +130,13 @@ LogMessage::LogMessage(const char* file, // Also ensure WallClockStartTime is initialized, so that it matches // LogStartTime. WallClockStartTime(); - print_stream_ << "[" << rtc::LeftPad('0', 3, rtc::ToString(time / 1000)) - << ":" << rtc::LeftPad('0', 3, rtc::ToString(time % 1000)) - << "] "; + // TODO(kwiberg): Switch to absl::StrFormat, if binary size is ok. + char timestamp[50]; // Maximum string length of an int64_t is 20. + int len = + snprintf(timestamp, sizeof(timestamp), "[%03" PRId64 ":%03" PRId64 "]", + time / 1000, time % 1000); + RTC_DCHECK_LT(len, sizeof(timestamp)); + print_stream_ << timestamp; } if (thread_) { @@ -193,7 +220,7 @@ LogMessage::~LogMessage() { #endif } - CritScope cs(&g_log_crit); + LoggingMutexLock lock(&g_log_mutex_); for (LogSink* entry = streams_; entry != nullptr; entry = entry->next_) { if (severity_ >= entry->min_severity_) { #if defined(WEBRTC_ANDROID) @@ -242,7 +269,7 @@ void LogMessage::LogTimestamps(bool on) { void LogMessage::LogToDebug(LoggingSeverity min_sev) { g_dbg_sev = min_sev; - CritScope cs(&g_log_crit); + LoggingMutexLock lock(&g_log_mutex_); UpdateMinLogSeverity(); } @@ -251,7 +278,7 @@ void LogMessage::SetLogToStderr(bool log_to_stderr) { } int LogMessage::GetLogToStream(LogSink* stream) { - CritScope cs(&g_log_crit); + LoggingMutexLock lock(&g_log_mutex_); LoggingSeverity sev = LS_NONE; for (LogSink* entry = streams_; entry != nullptr; entry = entry->next_) { if (stream == nullptr || stream == entry) { @@ -262,15 +289,16 @@ int LogMessage::GetLogToStream(LogSink* stream) { } void LogMessage::AddLogToStream(LogSink* stream, LoggingSeverity min_sev) { - CritScope cs(&g_log_crit); + LoggingMutexLock lock(&g_log_mutex_); stream->min_severity_ = min_sev; stream->next_ = streams_; streams_ = stream; + streams_empty_.store(false, std::memory_order_relaxed); UpdateMinLogSeverity(); } void LogMessage::RemoveLogToStream(LogSink* stream) { - CritScope cs(&g_log_crit); + LoggingMutexLock lock(&g_log_mutex_); for (LogSink** entry = &streams_; *entry != nullptr; entry = &(*entry)->next_) { if (*entry == stream) { @@ -278,6 +306,7 @@ void LogMessage::RemoveLogToStream(LogSink* stream) { break; } } + streams_empty_.store(streams_ == nullptr, std::memory_order_relaxed); UpdateMinLogSeverity(); } @@ -331,7 +360,7 @@ void LogMessage::ConfigureLogging(const char* params) { } void LogMessage::UpdateMinLogSeverity() - RTC_EXCLUSIVE_LOCKS_REQUIRED(g_log_crit) { + RTC_EXCLUSIVE_LOCKS_REQUIRED(g_log_mutex_) { LoggingSeverity min_sev = g_dbg_sev; for (LogSink* entry = streams_; entry != nullptr; entry = entry->next_) { min_sev = std::min(min_sev, entry->min_severity_); @@ -435,12 +464,7 @@ void LogMessage::OutputToDebug(const std::string& str, bool LogMessage::IsNoop(LoggingSeverity severity) { if (severity >= g_dbg_sev || severity >= g_min_sev) return false; - - // TODO(tommi): We're grabbing this lock for every LogMessage instance that - // is going to be logged. This introduces unnecessary synchronization for - // a feature that's mostly used for testing. - CritScope cs(&g_log_crit); - return streams_ == nullptr; + return streams_empty_.load(std::memory_order_relaxed); } void LogMessage::FinishPrintStream() { @@ -481,11 +505,6 @@ void Log(const LogArgType* fmt, ...) { } } - if (LogMessage::IsNoop(meta.meta.Severity())) { - va_end(args); - return; - } - LogMessage log_message(meta.meta.File(), meta.meta.Line(), meta.meta.Severity(), meta.err_ctx, meta.err); if (tag) { diff --git a/rtc_base/logging.h b/rtc_base/logging.h index 0aa1e676d1..e21c30e21a 100644 --- a/rtc_base/logging.h +++ b/rtc_base/logging.h @@ -46,14 +46,15 @@ #include +#include #include // no-presubmit-check TODO(webrtc:8982) #include #include +#include "absl/base/attributes.h" #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/deprecation.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/system/inline.h" @@ -433,7 +434,7 @@ class LogMessage { // DEPRECATED - DO NOT USE - PLEASE USE THE MACROS INSTEAD OF THE CLASS. // Android code should use the 'const char*' version since tags are static // and we want to avoid allocating a std::string copy per log line. - RTC_DEPRECATED + ABSL_DEPRECATED("Use RTC_LOG macros instead of accessing this class directly") LogMessage(const char* file, int line, LoggingSeverity sev, @@ -463,9 +464,14 @@ class LogMessage { static void SetLogToStderr(bool log_to_stderr); // Stream: Any non-blocking stream interface. // Installs the |stream| to collect logs with severtiy |min_sev| or higher. - // |stream| must live until deinstalled by RemoveLogToStream + // |stream| must live until deinstalled by RemoveLogToStream. + // If |stream| is the first stream added to the system, we might miss some + // early concurrent log statement happening from another thread happening near + // this instant. static void AddLogToStream(LogSink* stream, LoggingSeverity min_sev); - // Removes the specified stream, without destroying it. + // Removes the specified stream, without destroying it. When the method + // has completed, it's guaranteed that |stream| will receive no more logging + // calls. static void RemoveLogToStream(LogSink* stream); // Returns the severity for the specified stream, of if none is specified, // the minimum stream severity. @@ -481,6 +487,12 @@ class LogMessage { // |streams_| collection is empty, the LogMessage will be considered a noop // LogMessage. static bool IsNoop(LoggingSeverity severity); + // Version of IsNoop that uses fewer instructions at the call site, since the + // caller doesn't have to pass an argument. + template + RTC_NO_INLINE static bool IsNoop() { + return IsNoop(S); + } #else // Next methods do nothing; no one will call these functions. LogMessage(const char* file, int line, LoggingSeverity sev) {} @@ -496,7 +508,7 @@ class LogMessage { // DEPRECATED - DO NOT USE - PLEASE USE THE MACROS INSTEAD OF THE CLASS. // Android code should use the 'const char*' version since tags are static // and we want to avoid allocating a std::string copy per log line. - RTC_DEPRECATED + ABSL_DEPRECATED("Use RTC_LOG macros instead of accessing this class directly") LogMessage(const char* file, int line, LoggingSeverity sev, @@ -519,7 +531,11 @@ class LogMessage { inline static int GetLogToStream(LogSink* stream = nullptr) { return 0; } inline static int GetMinLogSeverity() { return 0; } inline static void ConfigureLogging(const char* params) {} - inline static bool IsNoop(LoggingSeverity severity) { return true; } + static constexpr bool IsNoop(LoggingSeverity severity) { return true; } + template + static constexpr bool IsNoop() { + return IsNoop(S); + } #endif // RTC_LOG_ENABLED() private: @@ -557,6 +573,12 @@ class LogMessage { // The output streams and their associated severities static LogSink* streams_; + // Holds true with high probability if |streams_| is empty, false with high + // probability otherwise. Operated on with std::memory_order_relaxed because + // it's ok to lose or log some additional statements near the instant streams + // are added/removed. + static std::atomic streams_empty_; + // Flags for formatting options static bool thread_, timestamp_; @@ -586,16 +608,18 @@ class LogMessage { // Logging Helpers ////////////////////////////////////////////////////////////////////// -#define RTC_LOG_FILE_LINE(sev, file, line) \ - RTC_LOG_ENABLED() && \ - ::rtc::webrtc_logging_impl::LogCall() & \ - ::rtc::webrtc_logging_impl::LogStreamer<>() \ - << ::rtc::webrtc_logging_impl::LogMetadata(file, line, sev) +#define RTC_LOG_FILE_LINE(sev, file, line) \ + ::rtc::webrtc_logging_impl::LogCall() & \ + ::rtc::webrtc_logging_impl::LogStreamer<>() \ + << ::rtc::webrtc_logging_impl::LogMetadata(file, line, sev) -#define RTC_LOG(sev) RTC_LOG_FILE_LINE(::rtc::sev, __FILE__, __LINE__) +#define RTC_LOG(sev) \ + !rtc::LogMessage::IsNoop<::rtc::sev>() && \ + RTC_LOG_FILE_LINE(::rtc::sev, __FILE__, __LINE__) // The _V version is for when a variable is passed in. -#define RTC_LOG_V(sev) RTC_LOG_FILE_LINE(sev, __FILE__, __LINE__) +#define RTC_LOG_V(sev) \ + !rtc::LogMessage::IsNoop(sev) && RTC_LOG_FILE_LINE(sev, __FILE__, __LINE__) // The _F version prefixes the message with the current function name. #if (defined(__GNUC__) && !defined(NDEBUG)) || defined(WANT_PRETTY_LOG_F) @@ -614,11 +638,12 @@ inline bool LogCheckLevel(LoggingSeverity sev) { return (LogMessage::GetMinLogSeverity() <= sev); } -#define RTC_LOG_E(sev, ctx, err) \ - RTC_LOG_ENABLED() && ::rtc::webrtc_logging_impl::LogCall() & \ - ::rtc::webrtc_logging_impl::LogStreamer<>() \ - << ::rtc::webrtc_logging_impl::LogMetadataErr { \ - {__FILE__, __LINE__, ::rtc::sev}, ::rtc::ERRCTX_##ctx, (err) \ +#define RTC_LOG_E(sev, ctx, err) \ + !rtc::LogMessage::IsNoop<::rtc::sev>() && \ + ::rtc::webrtc_logging_impl::LogCall() & \ + ::rtc::webrtc_logging_impl::LogStreamer<>() \ + << ::rtc::webrtc_logging_impl::LogMetadataErr { \ + {__FILE__, __LINE__, ::rtc::sev}, ::rtc::ERRCTX_##ctx, (err) \ } #define RTC_LOG_T(sev) RTC_LOG(sev) << this << ": " @@ -651,11 +676,12 @@ inline const char* AdaptString(const std::string& str) { } } // namespace webrtc_logging_impl -#define RTC_LOG_TAG(sev, tag) \ - RTC_LOG_ENABLED() && ::rtc::webrtc_logging_impl::LogCall() & \ - ::rtc::webrtc_logging_impl::LogStreamer<>() \ - << ::rtc::webrtc_logging_impl::LogMetadataTag { \ - sev, ::rtc::webrtc_logging_impl::AdaptString(tag) \ +#define RTC_LOG_TAG(sev, tag) \ + !rtc::LogMessage::IsNoop(sev) && \ + ::rtc::webrtc_logging_impl::LogCall() & \ + ::rtc::webrtc_logging_impl::LogStreamer<>() \ + << ::rtc::webrtc_logging_impl::LogMetadataTag { \ + sev, ::rtc::webrtc_logging_impl::AdaptString(tag) \ } #else diff --git a/rtc_base/logging_unittest.cc b/rtc_base/logging_unittest.cc index a66f8b5608..dc1208f3f6 100644 --- a/rtc_base/logging_unittest.cc +++ b/rtc_base/logging_unittest.cc @@ -20,94 +20,23 @@ #include "rtc_base/checks.h" #include "rtc_base/event.h" #include "rtc_base/platform_thread.h" -#include "rtc_base/stream.h" #include "rtc_base/time_utils.h" #include "test/gtest.h" namespace rtc { -namespace { - -class StringStream : public StreamInterface { +class LogSinkImpl : public LogSink { public: - explicit StringStream(std::string* str); - explicit StringStream(const std::string& str); - - StreamState GetState() const override; - StreamResult Read(void* buffer, - size_t buffer_len, - size_t* read, - int* error) override; - StreamResult Write(const void* data, - size_t data_len, - size_t* written, - int* error) override; - void Close() override; - - private: - std::string& str_; - size_t read_pos_; - bool read_only_; -}; - -StringStream::StringStream(std::string* str) - : str_(*str), read_pos_(0), read_only_(false) {} - -StringStream::StringStream(const std::string& str) - : str_(const_cast(str)), read_pos_(0), read_only_(true) {} - -StreamState StringStream::GetState() const { - return SS_OPEN; -} - -StreamResult StringStream::Read(void* buffer, - size_t buffer_len, - size_t* read, - int* error) { - size_t available = std::min(buffer_len, str_.size() - read_pos_); - if (!available) - return SR_EOS; - memcpy(buffer, str_.data() + read_pos_, available); - read_pos_ += available; - if (read) - *read = available; - return SR_SUCCESS; -} - -StreamResult StringStream::Write(const void* data, - size_t data_len, - size_t* written, - int* error) { - if (read_only_) { - if (error) { - *error = -1; - } - return SR_ERROR; - } - str_.append(static_cast(data), - static_cast(data) + data_len); - if (written) - *written = data_len; - return SR_SUCCESS; -} - -void StringStream::Close() {} - -} // namespace - -template -class LogSinkImpl : public LogSink, public Base { - public: - LogSinkImpl() {} + explicit LogSinkImpl(std::string* log_data) : log_data_(log_data) {} template - explicit LogSinkImpl(P* p) : Base(p) {} + explicit LogSinkImpl(P* p) {} private: void OnLogMessage(const std::string& message) override { - static_cast(this)->WriteAll(message.data(), message.size(), nullptr, - nullptr); + log_data_->append(message); } + std::string* const log_data_; }; class LogMessageForTesting : public LogMessage { @@ -145,7 +74,7 @@ TEST(LogTest, SingleStream) { int sev = LogMessage::GetLogToStream(nullptr); std::string str; - LogSinkImpl stream(&str); + LogSinkImpl stream(&str); LogMessage::AddLogToStream(&stream, LS_INFO); EXPECT_EQ(LS_INFO, LogMessage::GetLogToStream(&stream)); @@ -207,7 +136,7 @@ TEST(LogTest, MultipleStreams) { int sev = LogMessage::GetLogToStream(nullptr); std::string str1, str2; - LogSinkImpl stream1(&str1), stream2(&str2); + LogSinkImpl stream1(&str1), stream2(&str2); LogMessage::AddLogToStream(&stream1, LS_INFO); LogMessage::AddLogToStream(&stream2, LS_VERBOSE); EXPECT_EQ(LS_INFO, LogMessage::GetLogToStream(&stream1)); @@ -231,18 +160,13 @@ TEST(LogTest, MultipleStreams) { class LogThread { public: - LogThread() : thread_(&ThreadEntry, this, "LogThread") {} - ~LogThread() { thread_.Stop(); } - - void Start() { thread_.Start(); } + void Start() { + thread_ = PlatformThread::SpawnJoinable( + [] { RTC_LOG(LS_VERBOSE) << "RTC_LOG"; }, "LogThread"); + } private: - void Run() { RTC_LOG(LS_VERBOSE) << "RTC_LOG"; } - - static void ThreadEntry(void* p) { static_cast(p)->Run(); } - PlatformThread thread_; - Event event_; }; // Ensure we don't crash when adding/removing streams while threads are going. @@ -256,7 +180,7 @@ TEST(LogTest, MultipleThreads) { thread3.Start(); std::string s1, s2, s3; - LogSinkImpl stream1(&s1), stream2(&s2), stream3(&s3); + LogSinkImpl stream1(&s1), stream2(&s2), stream3(&s3); for (int i = 0; i < 1000; ++i) { LogMessage::AddLogToStream(&stream1, LS_WARNING); LogMessage::AddLogToStream(&stream2, LS_INFO); @@ -303,7 +227,7 @@ TEST(LogTest, CheckFilePathParsed) { #if defined(WEBRTC_ANDROID) TEST(LogTest, CheckTagAddedToStringInDefaultOnLogMessageAndroid) { std::string str; - LogSinkImpl stream(&str); + LogSinkImpl stream(&str); LogMessage::AddLogToStream(&stream, LS_INFO); EXPECT_EQ(LS_INFO, LogMessage::GetLogToStream(&stream)); @@ -316,7 +240,7 @@ TEST(LogTest, CheckTagAddedToStringInDefaultOnLogMessageAndroid) { // Test the time required to write 1000 80-character logs to a string. TEST(LogTest, Perf) { std::string str; - LogSinkImpl stream(&str); + LogSinkImpl stream(&str); LogMessage::AddLogToStream(&stream, LS_VERBOSE); const std::string message(80, 'X'); @@ -336,7 +260,6 @@ TEST(LogTest, Perf) { finish = TimeMillis(); LogMessage::RemoveLogToStream(&stream); - stream.Close(); EXPECT_EQ(str.size(), (message.size() + logging_overhead) * kRepetitions); RTC_LOG(LS_INFO) << "Total log time: " << TimeDiff(finish, start) @@ -348,7 +271,7 @@ TEST(LogTest, Perf) { TEST(LogTest, EnumsAreSupported) { enum class TestEnum { kValue0 = 0, kValue1 = 1 }; std::string str; - LogSinkImpl stream(&str); + LogSinkImpl stream(&str); LogMessage::AddLogToStream(&stream, LS_INFO); RTC_LOG(LS_INFO) << "[" << TestEnum::kValue0 << "]"; EXPECT_NE(std::string::npos, str.find("[0]")); @@ -356,8 +279,21 @@ TEST(LogTest, EnumsAreSupported) { RTC_LOG(LS_INFO) << "[" << TestEnum::kValue1 << "]"; EXPECT_NE(std::string::npos, str.find("[1]")); LogMessage::RemoveLogToStream(&stream); - stream.Close(); +} + +TEST(LogTest, NoopSeverityDoesNotRunStringFormatting) { + if (!LogMessage::IsNoop(LS_VERBOSE)) { + RTC_LOG(LS_WARNING) << "Skipping test since verbose logging is turned on."; + return; + } + bool was_called = false; + auto cb = [&was_called]() { + was_called = true; + return "This could be an expensive callback."; + }; + RTC_LOG(LS_VERBOSE) << "This should not be logged: " << cb(); + EXPECT_FALSE(was_called); } } // namespace rtc -#endif +#endif // RTC_LOG_ENABLED() diff --git a/rtc_base/memory/BUILD.gn b/rtc_base/memory/BUILD.gn index aa905c6f70..ee66ac0df8 100644 --- a/rtc_base/memory/BUILD.gn +++ b/rtc_base/memory/BUILD.gn @@ -20,18 +20,25 @@ rtc_library("aligned_malloc") { deps = [ "..:checks" ] } +# Test only utility. rtc_library("fifo_buffer") { + testonly = true visibility = [ - "../../p2p:rtc_p2p", + ":unittests", "..:rtc_base_tests_utils", "..:rtc_base_unittests", - ":unittests", ] sources = [ "fifo_buffer.cc", "fifo_buffer.h", ] - deps = [ "..:rtc_base" ] + deps = [ + "..:rtc_base", + "..:threading", + "../synchronization:mutex", + "../task_utils:pending_task_safety_flag", + "../task_utils:to_queued_task", + ] } rtc_library("unittests") { diff --git a/rtc_base/memory/fifo_buffer.cc b/rtc_base/memory/fifo_buffer.cc index 44fb032e57..3fbea8dc20 100644 --- a/rtc_base/memory/fifo_buffer.cc +++ b/rtc_base/memory/fifo_buffer.cc @@ -39,13 +39,13 @@ FifoBuffer::FifoBuffer(size_t size, Thread* owner) FifoBuffer::~FifoBuffer() {} bool FifoBuffer::GetBuffered(size_t* size) const { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); *size = data_length_; return true; } bool FifoBuffer::SetCapacity(size_t size) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (data_length_ > size) { return false; } @@ -67,7 +67,7 @@ StreamResult FifoBuffer::ReadOffset(void* buffer, size_t bytes, size_t offset, size_t* bytes_read) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return ReadOffsetLocked(buffer, bytes, offset, bytes_read); } @@ -75,12 +75,12 @@ StreamResult FifoBuffer::WriteOffset(const void* buffer, size_t bytes, size_t offset, size_t* bytes_written) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return WriteOffsetLocked(buffer, bytes, offset, bytes_written); } StreamState FifoBuffer::GetState() const { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return state_; } @@ -88,7 +88,7 @@ StreamResult FifoBuffer::Read(void* buffer, size_t bytes, size_t* bytes_read, int* error) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); const bool was_writable = data_length_ < buffer_length_; size_t copy = 0; StreamResult result = ReadOffsetLocked(buffer, bytes, 0, ©); @@ -104,7 +104,7 @@ StreamResult FifoBuffer::Read(void* buffer, // if we were full before, and now we're not, post an event if (!was_writable && copy > 0) { - PostEvent(owner_, SE_WRITE, 0); + PostEvent(SE_WRITE, 0); } } return result; @@ -114,7 +114,7 @@ StreamResult FifoBuffer::Write(const void* buffer, size_t bytes, size_t* bytes_written, int* error) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); const bool was_readable = (data_length_ > 0); size_t copy = 0; @@ -129,19 +129,19 @@ StreamResult FifoBuffer::Write(const void* buffer, // if we didn't have any data to read before, and now we do, post an event if (!was_readable && copy > 0) { - PostEvent(owner_, SE_READ, 0); + PostEvent(SE_READ, 0); } } return result; } void FifoBuffer::Close() { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); state_ = SS_CLOSED; } const void* FifoBuffer::GetReadData(size_t* size) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); *size = (read_position_ + data_length_ <= buffer_length_) ? data_length_ : buffer_length_ - read_position_; @@ -149,18 +149,18 @@ const void* FifoBuffer::GetReadData(size_t* size) { } void FifoBuffer::ConsumeReadData(size_t size) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); RTC_DCHECK(size <= data_length_); const bool was_writable = data_length_ < buffer_length_; read_position_ = (read_position_ + size) % buffer_length_; data_length_ -= size; if (!was_writable && size > 0) { - PostEvent(owner_, SE_WRITE, 0); + PostEvent(SE_WRITE, 0); } } void* FifoBuffer::GetWriteBuffer(size_t* size) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (state_ == SS_CLOSED) { return nullptr; } @@ -180,17 +180,17 @@ void* FifoBuffer::GetWriteBuffer(size_t* size) { } void FifoBuffer::ConsumeWriteBuffer(size_t size) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); RTC_DCHECK(size <= buffer_length_ - data_length_); const bool was_readable = (data_length_ > 0); data_length_ += size; if (!was_readable && size > 0) { - PostEvent(owner_, SE_READ, 0); + PostEvent(SE_READ, 0); } } bool FifoBuffer::GetWriteRemaining(size_t* size) const { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); *size = buffer_length_ - data_length_; return true; } diff --git a/rtc_base/memory/fifo_buffer.h b/rtc_base/memory/fifo_buffer.h index f859815c70..bf2edf6e24 100644 --- a/rtc_base/memory/fifo_buffer.h +++ b/rtc_base/memory/fifo_buffer.h @@ -14,6 +14,9 @@ #include #include "rtc_base/stream.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" namespace rtc { @@ -97,13 +100,19 @@ class FifoBuffer final : public StreamInterface { bool GetWriteRemaining(size_t* size) const; private: + void PostEvent(int events, int err) { + owner_->PostTask(webrtc::ToQueuedTask(task_safety_, [this, events, err]() { + SignalEvent(this, events, err); + })); + } + // Helper method that implements ReadOffset. Caller must acquire a lock // when calling this method. StreamResult ReadOffsetLocked(void* buffer, size_t bytes, size_t offset, size_t* bytes_read) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Helper method that implements WriteOffset. Caller must acquire a lock // when calling this method. @@ -111,22 +120,24 @@ class FifoBuffer final : public StreamInterface { size_t bytes, size_t offset, size_t* bytes_written) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + + webrtc::ScopedTaskSafety task_safety_; // keeps the opened/closed state of the stream - StreamState state_ RTC_GUARDED_BY(crit_); + StreamState state_ RTC_GUARDED_BY(mutex_); // the allocated buffer - std::unique_ptr buffer_ RTC_GUARDED_BY(crit_); + std::unique_ptr buffer_ RTC_GUARDED_BY(mutex_); // size of the allocated buffer - size_t buffer_length_ RTC_GUARDED_BY(crit_); + size_t buffer_length_ RTC_GUARDED_BY(mutex_); // amount of readable data in the buffer - size_t data_length_ RTC_GUARDED_BY(crit_); + size_t data_length_ RTC_GUARDED_BY(mutex_); // offset to the readable data - size_t read_position_ RTC_GUARDED_BY(crit_); + size_t read_position_ RTC_GUARDED_BY(mutex_); // stream callbacks are dispatched on this thread - Thread* owner_; + Thread* const owner_; // object lock - CriticalSection crit_; + mutable webrtc::Mutex mutex_; RTC_DISALLOW_COPY_AND_ASSIGN(FifoBuffer); }; diff --git a/rtc_base/message_buffer_reader.h b/rtc_base/message_buffer_reader.h deleted file mode 100644 index 32b8f336b1..0000000000 --- a/rtc_base/message_buffer_reader.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_MESSAGE_BUFFER_READER_H_ -#define RTC_BASE_MESSAGE_BUFFER_READER_H_ - -#include "rtc_base/byte_buffer.h" - -namespace webrtc { - -// A simple subclass of the ByteBufferReader that exposes the starting address -// of the message and its length, so that we can recall previously parsed data. -class MessageBufferReader : public rtc::ByteBufferReader { - public: - MessageBufferReader(const char* bytes, size_t len) - : rtc::ByteBufferReader(bytes, len) {} - ~MessageBufferReader() = default; - - // Starting address of the message. - const char* MessageData() const { return bytes_; } - // Total length of the message. Note that this is different from Length(), - // which is the length of the remaining message from the current offset. - size_t MessageLength() const { return size_; } - // Current offset in the message. - size_t CurrentOffset() const { return start_; } -}; - -} // namespace webrtc - -#endif // RTC_BASE_MESSAGE_BUFFER_READER_H_ diff --git a/rtc_base/message_digest.h b/rtc_base/message_digest.h index 36f00b5273..691330e23c 100644 --- a/rtc_base/message_digest.h +++ b/rtc_base/message_digest.h @@ -45,7 +45,8 @@ class MessageDigestFactory { static MessageDigest* Create(const std::string& alg); }; -// A whitelist of approved digest algorithms from RFC 4572 (FIPS 180). +// A check that an algorithm is in a list of approved digest algorithms +// from RFC 4572 (FIPS 180). bool IsFips180DigestAlgorithm(const std::string& alg); // Functions to create hashes. diff --git a/rtc_base/message_handler.cc b/rtc_base/message_handler.cc index 18a06e241d..e6e973dbd9 100644 --- a/rtc_base/message_handler.cc +++ b/rtc_base/message_handler.cc @@ -14,7 +14,16 @@ namespace rtc { -MessageHandler::~MessageHandler() { +MessageHandlerAutoCleanup::MessageHandlerAutoCleanup() {} + +MessageHandlerAutoCleanup::~MessageHandlerAutoCleanup() { + // Note that even though this clears currently pending messages for the + // message handler, it's still racy since it doesn't prevent threads that + // might be in the process of posting new messages with would-be dangling + // pointers. + // This is related to the design of Message having a raw pointer. + // We could consider whether it would be safer to require message handlers + // to be reference counted (as some are). ThreadManager::Clear(this); } diff --git a/rtc_base/message_handler.h b/rtc_base/message_handler.h index 85cb785485..62c8344e1f 100644 --- a/rtc_base/message_handler.h +++ b/rtc_base/message_handler.h @@ -21,17 +21,27 @@ namespace rtc { struct Message; -// Messages get dispatched to a MessageHandler +// MessageQueue/Thread Messages get dispatched via the MessageHandler interface. class RTC_EXPORT MessageHandler { public: - virtual ~MessageHandler(); + virtual ~MessageHandler() {} virtual void OnMessage(Message* msg) = 0; +}; + +// Warning: Provided for backwards compatibility. +// +// This class performs expensive cleanup in the dtor that will affect all +// instances of Thread (and their pending message queues) and will block the +// current thread as well as all other threads. +class RTC_EXPORT MessageHandlerAutoCleanup : public MessageHandler { + public: + ~MessageHandlerAutoCleanup() override; protected: - MessageHandler() {} + MessageHandlerAutoCleanup(); private: - RTC_DISALLOW_COPY_AND_ASSIGN(MessageHandler); + RTC_DISALLOW_COPY_AND_ASSIGN(MessageHandlerAutoCleanup); }; } // namespace rtc diff --git a/rtc_base/nat_server.cc b/rtc_base/nat_server.cc index 323a787ee0..725a57be9f 100644 --- a/rtc_base/nat_server.cc +++ b/rtc_base/nat_server.cc @@ -174,7 +174,7 @@ void NATServer::OnInternalUDPPacket(AsyncPacketSocket* socket, RTC_DCHECK(iter != int_map_->end()); // Allow the destination to send packets back to the source. - iter->second->WhitelistInsert(dest_addr); + iter->second->AllowlistInsert(dest_addr); // Send the packet to its intended destination. rtc::PacketOptions options; @@ -227,29 +227,29 @@ void NATServer::Translate(const SocketAddressPair& route) { bool NATServer::ShouldFilterOut(TransEntry* entry, const SocketAddress& ext_addr) { - return entry->WhitelistContains(ext_addr); + return entry->AllowlistContains(ext_addr); } NATServer::TransEntry::TransEntry(const SocketAddressPair& r, AsyncUDPSocket* s, NAT* nat) : route(r), socket(s) { - whitelist = new AddressSet(AddrCmp(nat)); + allowlist = new AddressSet(AddrCmp(nat)); } NATServer::TransEntry::~TransEntry() { - delete whitelist; + delete allowlist; delete socket; } -void NATServer::TransEntry::WhitelistInsert(const SocketAddress& addr) { - CritScope cs(&crit_); - whitelist->insert(addr); +void NATServer::TransEntry::AllowlistInsert(const SocketAddress& addr) { + webrtc::MutexLock lock(&mutex_); + allowlist->insert(addr); } -bool NATServer::TransEntry::WhitelistContains(const SocketAddress& ext_addr) { - CritScope cs(&crit_); - return whitelist->find(ext_addr) == whitelist->end(); +bool NATServer::TransEntry::AllowlistContains(const SocketAddress& ext_addr) { + webrtc::MutexLock lock(&mutex_); + return allowlist->find(ext_addr) == allowlist->end(); } } // namespace rtc diff --git a/rtc_base/nat_server.h b/rtc_base/nat_server.h index 46f01e9761..5078fbb2c1 100644 --- a/rtc_base/nat_server.h +++ b/rtc_base/nat_server.h @@ -20,6 +20,7 @@ #include "rtc_base/proxy_server.h" #include "rtc_base/socket_address_pair.h" #include "rtc_base/socket_factory.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread.h" namespace rtc { @@ -96,13 +97,13 @@ class NATServer : public sigslot::has_slots<> { TransEntry(const SocketAddressPair& r, AsyncUDPSocket* s, NAT* nat); ~TransEntry(); - void WhitelistInsert(const SocketAddress& addr); - bool WhitelistContains(const SocketAddress& ext_addr); + void AllowlistInsert(const SocketAddress& addr); + bool AllowlistContains(const SocketAddress& ext_addr); SocketAddressPair route; AsyncUDPSocket* socket; - AddressSet* whitelist; - CriticalSection crit_; + AddressSet* allowlist; + webrtc::Mutex mutex_; }; typedef std::map InternalMap; diff --git a/rtc_base/nat_socket_factory.cc b/rtc_base/nat_socket_factory.cc index 7c853e9c99..effbb5a6c3 100644 --- a/rtc_base/nat_socket_factory.cc +++ b/rtc_base/nat_socket_factory.cc @@ -230,10 +230,10 @@ class NATSocket : public AsyncSocket, public sigslot::has_slots<> { return connected_ ? CS_CONNECTED : CS_CLOSED; } int GetOption(Option opt, int* value) override { - return socket_->GetOption(opt, value); + return socket_ ? socket_->GetOption(opt, value) : -1; } int SetOption(Option opt, int value) override { - return socket_->SetOption(opt, value); + return socket_ ? socket_->SetOption(opt, value) : -1; } void OnConnectEvent(AsyncSocket* socket) { @@ -428,14 +428,15 @@ NATSocketServer::Translator::Translator(NATSocketServer* server, // Create a new private network, and a NATServer running on the private // network that bridges to the external network. Also tell the private // network to use the same message queue as us. - VirtualSocketServer* internal_server = new VirtualSocketServer(); - internal_server->SetMessageQueue(server_->queue()); - internal_factory_.reset(internal_server); - nat_server_.reset(new NATServer(type, internal_server, int_ip, int_ip, - ext_factory, ext_ip)); + internal_server_ = std::make_unique(); + internal_server_->SetMessageQueue(server_->queue()); + nat_server_ = std::make_unique( + type, internal_server_.get(), int_ip, int_ip, ext_factory, ext_ip); } -NATSocketServer::Translator::~Translator() = default; +NATSocketServer::Translator::~Translator() { + internal_server_->SetMessageQueue(nullptr); +} NATSocketServer::Translator* NATSocketServer::Translator::GetTranslator( const SocketAddress& ext_ip) { diff --git a/rtc_base/nat_socket_factory.h b/rtc_base/nat_socket_factory.h index e649d19a8e..70030d834e 100644 --- a/rtc_base/nat_socket_factory.h +++ b/rtc_base/nat_socket_factory.h @@ -107,7 +107,7 @@ class NATSocketServer : public SocketServer, public NATInternalSocketFactory { const SocketAddress& ext_addr); ~Translator(); - SocketFactory* internal_factory() { return internal_factory_.get(); } + SocketFactory* internal_factory() { return internal_server_.get(); } SocketAddress internal_udp_address() const { return nat_server_->internal_udp_address(); } @@ -129,7 +129,7 @@ class NATSocketServer : public SocketServer, public NATInternalSocketFactory { private: NATSocketServer* server_; - std::unique_ptr internal_factory_; + std::unique_ptr internal_server_; std::unique_ptr nat_server_; TranslatorMap nats_; std::set clients_; diff --git a/rtc_base/net_helpers.cc b/rtc_base/net_helpers.cc index 6ff3791738..bec854af03 100644 --- a/rtc_base/net_helpers.cc +++ b/rtc_base/net_helpers.cc @@ -19,6 +19,7 @@ #include "rtc_base/win32.h" #endif #if defined(WEBRTC_POSIX) && !defined(__native_client__) +#include #if defined(WEBRTC_ANDROID) #include "rtc_base/ifaddrs_android.h" #else @@ -26,105 +27,8 @@ #endif #endif // defined(WEBRTC_POSIX) && !defined(__native_client__) -#include "rtc_base/logging.h" -#include "rtc_base/signal_thread.h" -#include "rtc_base/third_party/sigslot/sigslot.h" // for signal_with_thread... - namespace rtc { -int ResolveHostname(const std::string& hostname, - int family, - std::vector* addresses) { -#ifdef __native_client__ - RTC_NOTREACHED(); - RTC_LOG(LS_WARNING) << "ResolveHostname() is not implemented for NaCl"; - return -1; -#else // __native_client__ - if (!addresses) { - return -1; - } - addresses->clear(); - struct addrinfo* result = nullptr; - struct addrinfo hints = {0}; - hints.ai_family = family; - // |family| here will almost always be AF_UNSPEC, because |family| comes from - // AsyncResolver::addr_.family(), which comes from a SocketAddress constructed - // with a hostname. When a SocketAddress is constructed with a hostname, its - // family is AF_UNSPEC. However, if someday in the future we construct - // a SocketAddress with both a hostname and a family other than AF_UNSPEC, - // then it would be possible to get a specific family value here. - - // The behavior of AF_UNSPEC is roughly "get both ipv4 and ipv6", as - // documented by the various operating systems: - // Linux: http://man7.org/linux/man-pages/man3/getaddrinfo.3.html - // Windows: https://msdn.microsoft.com/en-us/library/windows/desktop/ - // ms738520(v=vs.85).aspx - // Mac: https://developer.apple.com/legacy/library/documentation/Darwin/ - // Reference/ManPages/man3/getaddrinfo.3.html - // Android (source code, not documentation): - // https://android.googlesource.com/platform/bionic/+/ - // 7e0bfb511e85834d7c6cb9631206b62f82701d60/libc/netbsd/net/getaddrinfo.c#1657 - hints.ai_flags = AI_ADDRCONFIG; - int ret = getaddrinfo(hostname.c_str(), nullptr, &hints, &result); - if (ret != 0) { - return ret; - } - struct addrinfo* cursor = result; - for (; cursor; cursor = cursor->ai_next) { - if (family == AF_UNSPEC || cursor->ai_family == family) { - IPAddress ip; - if (IPFromAddrInfo(cursor, &ip)) { - addresses->push_back(ip); - } - } - } - freeaddrinfo(result); - return 0; -#endif // !__native_client__ -} - -// AsyncResolver -AsyncResolver::AsyncResolver() : SignalThread(), error_(-1) {} - -AsyncResolver::~AsyncResolver() = default; - -void AsyncResolver::Start(const SocketAddress& addr) { - addr_ = addr; - // SignalThred Start will kickoff the resolve process. - SignalThread::Start(); -} - -bool AsyncResolver::GetResolvedAddress(int family, SocketAddress* addr) const { - if (error_ != 0 || addresses_.empty()) - return false; - - *addr = addr_; - for (size_t i = 0; i < addresses_.size(); ++i) { - if (family == addresses_[i].family()) { - addr->SetResolvedIP(addresses_[i]); - return true; - } - } - return false; -} - -int AsyncResolver::GetError() const { - return error_; -} - -void AsyncResolver::Destroy(bool wait) { - SignalThread::Destroy(wait); -} - -void AsyncResolver::DoWork() { - error_ = - ResolveHostname(addr_.hostname().c_str(), addr_.family(), &addresses_); -} - -void AsyncResolver::OnWorkDone() { - SignalDone(this); -} - const char* inet_ntop(int af, const void* src, char* dst, socklen_t size) { #if defined(WEBRTC_WIN) return win32_inet_ntop(af, src, dst, size); @@ -149,7 +53,7 @@ bool HasIPv4Enabled() { return false; } for (struct ifaddrs* cur = ifa; cur != nullptr; cur = cur->ifa_next) { - if (cur->ifa_addr->sa_family == AF_INET) { + if (cur->ifa_addr != nullptr && cur->ifa_addr->sa_family == AF_INET) { has_ipv4 = true; break; } @@ -208,7 +112,7 @@ bool HasIPv6Enabled() { return false; } for (struct ifaddrs* cur = ifa; cur != nullptr; cur = cur->ifa_next) { - if (cur->ifa_addr->sa_family == AF_INET6) { + if (cur->ifa_addr != nullptr && cur->ifa_addr->sa_family == AF_INET6) { has_ipv6 = true; break; } diff --git a/rtc_base/net_helpers.h b/rtc_base/net_helpers.h index 1e06940be7..4ed84786b3 100644 --- a/rtc_base/net_helpers.h +++ b/rtc_base/net_helpers.h @@ -15,44 +15,12 @@ #include #elif WEBRTC_WIN #include // NOLINT -#endif - -#include -#include "rtc_base/async_resolver_interface.h" -#include "rtc_base/ip_address.h" -#include "rtc_base/signal_thread.h" -#include "rtc_base/socket_address.h" -#include "rtc_base/system/rtc_export.h" +#include "rtc_base/win32.h" +#endif namespace rtc { -// AsyncResolver will perform async DNS resolution, signaling the result on -// the SignalDone from AsyncResolverInterface when the operation completes. -class RTC_EXPORT AsyncResolver : public SignalThread, - public AsyncResolverInterface { - public: - AsyncResolver(); - ~AsyncResolver() override; - - void Start(const SocketAddress& addr) override; - bool GetResolvedAddress(int family, SocketAddress* addr) const override; - int GetError() const override; - void Destroy(bool wait) override; - - const std::vector& addresses() const { return addresses_; } - void set_error(int error) { error_ = error; } - - protected: - void DoWork() override; - void OnWorkDone() override; - - private: - SocketAddress addr_; - std::vector addresses_; - int error_; -}; - // rtc namespaced wrappers for inet_ntop and inet_pton so we can avoid // the windows-native versions of these. const char* inet_ntop(int af, const void* src, char* dst, socklen_t size); @@ -60,6 +28,7 @@ int inet_pton(int af, const char* src, void* dst); bool HasIPv4Enabled(); bool HasIPv6Enabled(); + } // namespace rtc #endif // RTC_BASE_NET_HELPERS_H_ diff --git a/rtc_base/network.cc b/rtc_base/network.cc index 64aee4bdae..f4a349bae0 100644 --- a/rtc_base/network.cc +++ b/rtc_base/network.cc @@ -131,7 +131,7 @@ uint16_t ComputeNetworkCostByType(int type, } #if !defined(__native_client__) -bool IsIgnoredIPv6(const InterfaceAddress& ip) { +bool IsIgnoredIPv6(bool allow_mac_based_ipv6, const InterfaceAddress& ip) { if (ip.family() != AF_INET6) { return false; } @@ -144,7 +144,7 @@ bool IsIgnoredIPv6(const InterfaceAddress& ip) { } // Any MAC based IPv6 should be avoided to prevent the MAC tracking. - if (IPIsMacBased(ip)) { + if (IPIsMacBased(ip) && !allow_mac_based_ipv6) { return true; } @@ -212,7 +212,8 @@ AdapterType GetAdapterTypeFromName(const char* network_name) { return ADAPTER_TYPE_ETHERNET; } - if (MatchTypeNameWithIndexPattern(network_name, "wlan")) { + if (MatchTypeNameWithIndexPattern(network_name, "wlan") || + MatchTypeNameWithIndexPattern(network_name, "v4-wlan")) { return ADAPTER_TYPE_WIFI; } @@ -265,7 +266,9 @@ webrtc::MdnsResponderInterface* NetworkManager::GetMdnsResponder() const { } NetworkManagerBase::NetworkManagerBase() - : enumeration_permission_(NetworkManager::ENUMERATION_ALLOWED) {} + : enumeration_permission_(NetworkManager::ENUMERATION_ALLOWED), + signal_network_preference_change_(webrtc::field_trial::IsEnabled( + "WebRTC-SignalNetworkPreferenceChange")) {} NetworkManagerBase::~NetworkManagerBase() { for (const auto& kv : networks_map_) { @@ -382,6 +385,12 @@ void NetworkManagerBase::MergeNetworkList(const NetworkList& new_networks, if (!existing_net->active()) { *changed = true; } + if (net->network_preference() != existing_net->network_preference()) { + existing_net->set_network_preference(net->network_preference()); + if (signal_network_preference_change_) { + *changed = true; + } + } RTC_DCHECK(net->active()); if (existing_net != net) { delete net; @@ -470,12 +479,20 @@ Network* NetworkManagerBase::GetNetworkFromAddress( return nullptr; } -BasicNetworkManager::BasicNetworkManager() - : thread_(nullptr), sent_first_update_(false), start_count_(0) {} +BasicNetworkManager::BasicNetworkManager() : BasicNetworkManager(nullptr) {} + +BasicNetworkManager::BasicNetworkManager( + NetworkMonitorFactory* network_monitor_factory) + : network_monitor_factory_(network_monitor_factory), + allow_mac_based_ipv6_( + webrtc::field_trial::IsEnabled("WebRTC-AllowMACBasedIPv6")), + bind_using_ifname_( + !webrtc::field_trial::IsDisabled("WebRTC-BindUsingInterfaceName")) {} BasicNetworkManager::~BasicNetworkManager() {} void BasicNetworkManager::OnNetworksChanged() { + RTC_DCHECK_RUN_ON(thread_); RTC_LOG(LS_INFO) << "Network change was observed"; UpdateNetworksOnce(); } @@ -523,7 +540,7 @@ void BasicNetworkManager::ConvertIfAddrs(struct ifaddrs* interfaces, // Special case for IPv6 address. if (cursor->ifa_addr->sa_family == AF_INET6) { - if (IsIgnoredIPv6(ip)) { + if (IsIgnoredIPv6(allow_mac_based_ipv6_, ip)) { continue; } scope_id = @@ -532,6 +549,7 @@ void BasicNetworkManager::ConvertIfAddrs(struct ifaddrs* interfaces, AdapterType adapter_type = ADAPTER_TYPE_UNKNOWN; AdapterType vpn_underlying_adapter_type = ADAPTER_TYPE_UNKNOWN; + NetworkPreference network_preference = NetworkPreference::NEUTRAL; if (cursor->ifa_flags & IFF_LOOPBACK) { adapter_type = ADAPTER_TYPE_LOOPBACK; } else { @@ -539,6 +557,8 @@ void BasicNetworkManager::ConvertIfAddrs(struct ifaddrs* interfaces, // Otherwise, get the adapter type based on a few name matching rules. if (network_monitor_) { adapter_type = network_monitor_->GetAdapterType(cursor->ifa_name); + network_preference = + network_monitor_->GetNetworkPreference(cursor->ifa_name); } if (adapter_type == ADAPTER_TYPE_UNKNOWN) { adapter_type = GetAdapterTypeFromName(cursor->ifa_name); @@ -564,6 +584,7 @@ void BasicNetworkManager::ConvertIfAddrs(struct ifaddrs* interfaces, network->AddIP(ip); network->set_ignored(IsIgnoredNetwork(*network)); network->set_underlying_type_for_vpn(vpn_underlying_adapter_type); + network->set_network_preference(network_preference); if (include_ignored || !network->ignored()) { current_networks[key] = network.get(); networks->push_back(network.release()); @@ -576,6 +597,7 @@ void BasicNetworkManager::ConvertIfAddrs(struct ifaddrs* interfaces, existing_network->set_underlying_type_for_vpn( vpn_underlying_adapter_type); } + existing_network->set_network_preference(network_preference); } } } @@ -696,7 +718,7 @@ bool BasicNetworkManager::CreateNetworks(bool include_ignored, scope_id = v6_addr->sin6_scope_id; ip = IPAddress(v6_addr->sin6_addr); - if (IsIgnoredIPv6(InterfaceAddress(ip))) { + if (IsIgnoredIPv6(allow_mac_based_ipv6_, InterfaceAddress(ip))) { continue; } @@ -789,6 +811,11 @@ bool BasicNetworkManager::IsIgnoredNetwork(const Network& network) const { } #endif + if (network_monitor_ && + !network_monitor_->IsAdapterAvailable(network.name())) { + return true; + } + // Ignore any networks with a 0.x.y.z IP if (network.prefix().family() == AF_INET) { return (network.prefix().v4AddressAsHostOrderInteger() < 0x01000000); @@ -799,6 +826,8 @@ bool BasicNetworkManager::IsIgnoredNetwork(const Network& network) const { void BasicNetworkManager::StartUpdating() { thread_ = Thread::Current(); + // Redundant but necessary for thread annotations. + RTC_DCHECK_RUN_ON(thread_); if (start_count_) { // If network interfaces are already discovered and signal is sent, // we should trigger network signal immediately for the new clients @@ -813,7 +842,7 @@ void BasicNetworkManager::StartUpdating() { } void BasicNetworkManager::StopUpdating() { - RTC_DCHECK(Thread::Current() == thread_); + RTC_DCHECK_RUN_ON(thread_); if (!start_count_) return; @@ -826,18 +855,26 @@ void BasicNetworkManager::StopUpdating() { } void BasicNetworkManager::StartNetworkMonitor() { - NetworkMonitorFactory* factory = NetworkMonitorFactory::GetFactory(); - if (factory == nullptr) { + if (network_monitor_factory_ == nullptr) { return; } if (!network_monitor_) { - network_monitor_.reset(factory->CreateNetworkMonitor()); + network_monitor_.reset(network_monitor_factory_->CreateNetworkMonitor()); if (!network_monitor_) { return; } network_monitor_->SignalNetworksChanged.connect( this, &BasicNetworkManager::OnNetworksChanged); } + + if (network_monitor_->SupportsBindSocketToNetwork()) { + // Set NetworkBinder on SocketServer so that + // PhysicalSocket::Bind will call + // BasicNetworkManager::BindSocketToNetwork(), (that will lookup interface + // name and then call network_monitor_->BindSocketToNetwork()). + thread_->socketserver()->set_network_binder(this); + } + network_monitor_->Start(); } @@ -846,9 +883,17 @@ void BasicNetworkManager::StopNetworkMonitor() { return; } network_monitor_->Stop(); + + if (network_monitor_->SupportsBindSocketToNetwork()) { + // Reset NetworkBinder on SocketServer. + if (thread_->socketserver()->network_binder() == this) { + thread_->socketserver()->set_network_binder(nullptr); + } + } } void BasicNetworkManager::OnMessage(Message* msg) { + RTC_DCHECK_RUN_ON(thread_); switch (msg->message_id) { case kUpdateNetworksMessage: { UpdateNetworksContinually(); @@ -864,7 +909,6 @@ void BasicNetworkManager::OnMessage(Message* msg) { } IPAddress BasicNetworkManager::QueryDefaultLocalAddress(int family) const { - RTC_DCHECK(thread_ == Thread::Current()); RTC_DCHECK(thread_->socketserver() != nullptr); RTC_DCHECK(family == AF_INET || family == AF_INET6); @@ -893,8 +937,6 @@ void BasicNetworkManager::UpdateNetworksOnce() { if (!start_count_) return; - RTC_DCHECK(Thread::Current() == thread_); - NetworkList list; if (!CreateNetworks(false, &list)) { SignalError(); @@ -918,6 +960,7 @@ void BasicNetworkManager::UpdateNetworksContinually() { } void BasicNetworkManager::DumpNetworks() { + RTC_DCHECK_RUN_ON(thread_); NetworkList list; GetNetworks(&list); RTC_LOG(LS_INFO) << "NetworkManager detected " << list.size() << " networks:"; @@ -928,6 +971,20 @@ void BasicNetworkManager::DumpNetworks() { } } +NetworkBindingResult BasicNetworkManager::BindSocketToNetwork( + int socket_fd, + const IPAddress& address) { + RTC_DCHECK_RUN_ON(thread_); + std::string if_name; + if (bind_using_ifname_) { + Network* net = GetNetworkFromAddress(address); + if (net != nullptr) { + if_name = net->name(); + } + } + return network_monitor_->BindSocketToNetwork(socket_fd, address, if_name); +} + Network::Network(const std::string& name, const std::string& desc, const IPAddress& prefix, diff --git a/rtc_base/network.h b/rtc_base/network.h index a67d2a2339..8b6b6235fa 100644 --- a/rtc_base/network.h +++ b/rtc_base/network.h @@ -19,12 +19,15 @@ #include #include +#include "api/sequence_checker.h" #include "rtc_base/ip_address.h" #include "rtc_base/mdns_responder_interface.h" #include "rtc_base/message_handler.h" #include "rtc_base/network_monitor.h" +#include "rtc_base/network_monitor_factory.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/thread_annotations.h" #if defined(WEBRTC_POSIX) struct ifaddrs; @@ -191,11 +194,11 @@ class RTC_EXPORT NetworkManagerBase : public NetworkManager { void set_default_local_addresses(const IPAddress& ipv4, const IPAddress& ipv6); + Network* GetNetworkFromAddress(const rtc::IPAddress& ip) const; + private: friend class NetworkTest; - Network* GetNetworkFromAddress(const rtc::IPAddress& ip) const; - EnumerationPermission enumeration_permission_; NetworkList networks_; @@ -212,15 +215,21 @@ class RTC_EXPORT NetworkManagerBase : public NetworkManager { // network id 0 because we only compare the network ids in the old and the new // best connections in the transport channel. uint16_t next_available_network_id_ = 1; + + // True if calling network_preference() with a changed value + // should result in firing the SignalNetworkChanged signal. + bool signal_network_preference_change_ = false; }; // Basic implementation of the NetworkManager interface that gets list // of networks using OS APIs. class RTC_EXPORT BasicNetworkManager : public NetworkManagerBase, - public MessageHandler, + public MessageHandlerAutoCleanup, + public NetworkBinderInterface, public sigslot::has_slots<> { public: BasicNetworkManager(); + explicit BasicNetworkManager(NetworkMonitorFactory* network_monitor_factory); ~BasicNetworkManager() override; void StartUpdating() override; @@ -234,51 +243,68 @@ class RTC_EXPORT BasicNetworkManager : public NetworkManagerBase, // Sets the network ignore list, which is empty by default. Any network on the // ignore list will be filtered from network enumeration results. + // Should be called only before initialization. void set_network_ignore_list(const std::vector& list) { + RTC_DCHECK(thread_ == nullptr); network_ignore_list_ = list; } + // Bind a socket to interface that ip address belong to. + // Implementation look up interface name and calls + // BindSocketToNetwork on NetworkMonitor. + // The interface name is needed as e.g ipv4 over ipv6 addresses + // are not exposed using Android functions, but it is possible + // bind an ipv4 address to the interface. + NetworkBindingResult BindSocketToNetwork(int socket_fd, + const IPAddress& address) override; + protected: #if defined(WEBRTC_POSIX) // Separated from CreateNetworks for tests. void ConvertIfAddrs(ifaddrs* interfaces, IfAddrsConverter* converter, bool include_ignored, - NetworkList* networks) const; + NetworkList* networks) const RTC_RUN_ON(thread_); #endif // defined(WEBRTC_POSIX) // Creates a network object for each network available on the machine. - bool CreateNetworks(bool include_ignored, NetworkList* networks) const; + bool CreateNetworks(bool include_ignored, NetworkList* networks) const + RTC_RUN_ON(thread_); // Determines if a network should be ignored. This should only be determined // based on the network's property instead of any individual IP. - bool IsIgnoredNetwork(const Network& network) const; + bool IsIgnoredNetwork(const Network& network) const RTC_RUN_ON(thread_); // This function connects a UDP socket to a public address and returns the // local address associated it. Since it binds to the "any" address // internally, it returns the default local address on a multi-homed endpoint. - IPAddress QueryDefaultLocalAddress(int family) const; + IPAddress QueryDefaultLocalAddress(int family) const RTC_RUN_ON(thread_); private: friend class NetworkTest; // Creates a network monitor and listens for network updates. - void StartNetworkMonitor(); + void StartNetworkMonitor() RTC_RUN_ON(thread_); // Stops and removes the network monitor. - void StopNetworkMonitor(); + void StopNetworkMonitor() RTC_RUN_ON(thread_); // Called when it receives updates from the network monitor. void OnNetworksChanged(); // Updates the networks and reschedules the next update. - void UpdateNetworksContinually(); + void UpdateNetworksContinually() RTC_RUN_ON(thread_); // Only updates the networks; does not reschedule the next update. - void UpdateNetworksOnce(); + void UpdateNetworksOnce() RTC_RUN_ON(thread_); - Thread* thread_; - bool sent_first_update_; - int start_count_; + Thread* thread_ = nullptr; + bool sent_first_update_ = true; + int start_count_ = 0; std::vector network_ignore_list_; - std::unique_ptr network_monitor_; + NetworkMonitorFactory* network_monitor_factory_ RTC_GUARDED_BY(thread_) = + nullptr; + std::unique_ptr network_monitor_ + RTC_GUARDED_BY(thread_); + bool allow_mac_based_ipv6_ RTC_GUARDED_BY(thread_) = false; + bool bind_using_ifname_ RTC_GUARDED_BY(thread_) = false; }; // Represents a Unix-type network interface, with a name and single address. @@ -296,9 +322,13 @@ class RTC_EXPORT Network { AdapterType type); Network(const Network&); ~Network(); + // This signal is fired whenever type() or underlying_type_for_vpn() changes. sigslot::signal1 SignalTypeChanged; + // This signal is fired whenever network preference changes. + sigslot::signal1 SignalNetworkPreferenceChanged; + const DefaultLocalAddressProvider* default_local_address_provider() { return default_local_address_provider_; } @@ -443,6 +473,17 @@ class RTC_EXPORT Network { } } + // Property set by operating system/firmware that has information + // about connection strength to e.g WIFI router or CELL base towers. + NetworkPreference network_preference() const { return network_preference_; } + void set_network_preference(NetworkPreference val) { + if (network_preference_ == val) { + return; + } + network_preference_ = val; + SignalNetworkPreferenceChanged(this); + } + // Debugging description of this network std::string ToString() const; @@ -463,6 +504,7 @@ class RTC_EXPORT Network { bool active_ = true; uint16_t id_ = 0; bool use_differentiated_cellular_costs_ = false; + NetworkPreference network_preference_ = NetworkPreference::NEUTRAL; friend class NetworkManager; }; diff --git a/rtc_base/network/BUILD.gn b/rtc_base/network/BUILD.gn index 1d06defb3b..35ae3d45f7 100644 --- a/rtc_base/network/BUILD.gn +++ b/rtc_base/network/BUILD.gn @@ -13,8 +13,6 @@ rtc_library("sent_packet") { "sent_packet.cc", "sent_packet.h", ] - deps = [ - "../system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", - ] + deps = [ "../system:rtc_export" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } diff --git a/rtc_base/network_monitor.cc b/rtc_base/network_monitor.cc index 4eb52901f3..70c2ad5020 100644 --- a/rtc_base/network_monitor.cc +++ b/rtc_base/network_monitor.cc @@ -10,62 +10,21 @@ #include "rtc_base/network_monitor.h" -#include - #include "rtc_base/checks.h" -#include "rtc_base/location.h" -#include "rtc_base/logging.h" - -namespace { -const uint32_t UPDATE_NETWORKS_MESSAGE = 1; - -// This is set by NetworkMonitorFactory::SetFactory and the caller of -// NetworkMonitorFactory::SetFactory must be responsible for calling -// ReleaseFactory to destroy the factory. -rtc::NetworkMonitorFactory* network_monitor_factory = nullptr; -} // namespace namespace rtc { -NetworkMonitorInterface::NetworkMonitorInterface() {} - -NetworkMonitorInterface::~NetworkMonitorInterface() {} - -NetworkMonitorBase::NetworkMonitorBase() : worker_thread_(Thread::Current()) {} -NetworkMonitorBase::~NetworkMonitorBase() {} - -void NetworkMonitorBase::OnNetworksChanged() { - RTC_LOG(LS_VERBOSE) << "Network change is received at the network monitor"; - worker_thread_->Post(RTC_FROM_HERE, this, UPDATE_NETWORKS_MESSAGE); -} - -void NetworkMonitorBase::OnMessage(Message* msg) { - RTC_DCHECK(msg->message_id == UPDATE_NETWORKS_MESSAGE); - SignalNetworksChanged(); -} - -AdapterType NetworkMonitorBase::GetVpnUnderlyingAdapterType( - const std::string& interface_name) { - return ADAPTER_TYPE_UNKNOWN; -} - -NetworkMonitorFactory::NetworkMonitorFactory() {} -NetworkMonitorFactory::~NetworkMonitorFactory() {} - -void NetworkMonitorFactory::SetFactory(NetworkMonitorFactory* factory) { - if (network_monitor_factory != nullptr) { - delete network_monitor_factory; - } - network_monitor_factory = factory; -} -void NetworkMonitorFactory::ReleaseFactory(NetworkMonitorFactory* factory) { - if (factory == network_monitor_factory) { - SetFactory(nullptr); +const char* NetworkPreferenceToString(NetworkPreference preference) { + switch (preference) { + case NetworkPreference::NEUTRAL: + return "NEUTRAL"; + case NetworkPreference::NOT_PREFERRED: + return "NOT_PREFERRED"; } + RTC_CHECK_NOTREACHED(); } -NetworkMonitorFactory* NetworkMonitorFactory::GetFactory() { - return network_monitor_factory; -} +NetworkMonitorInterface::NetworkMonitorInterface() {} +NetworkMonitorInterface::~NetworkMonitorInterface() {} } // namespace rtc diff --git a/rtc_base/network_monitor.h b/rtc_base/network_monitor.h index ed4464db55..dddc2f60f4 100644 --- a/rtc_base/network_monitor.h +++ b/rtc_base/network_monitor.h @@ -13,7 +13,6 @@ #include "rtc_base/network_constants.h" #include "rtc_base/third_party/sigslot/sigslot.h" -#include "rtc_base/thread.h" namespace rtc { @@ -27,6 +26,18 @@ enum class NetworkBindingResult { NETWORK_CHANGED = -4 }; +// NetworkPreference property set by operating system/firmware that has +// information about connection strength to e.g WIFI router or CELL base towers. +// GENERATED_JAVA_ENUM_PACKAGE: org.webrtc +enum class NetworkPreference { + NEUTRAL = 0, + NOT_PREFERRED = -1, +}; + +const char* NetworkPreferenceToString(NetworkPreference preference); + +// This interface is set onto a socket server, +// where only the ip address is known at the time of binding. class NetworkBinderInterface { public: // Binds a socket to the network that is attached to |address| so that all @@ -53,8 +64,7 @@ class NetworkBinderInterface { * * Memory consideration: * NetworkMonitor is owned by the caller (NetworkManager). The global network - * monitor factory is owned by the factory itself but needs to be released from - * the factory creator. + * monitor factory is owned by the PeerConnectionFactory. */ // Generic network monitor interface. It starts and stops monitoring network // changes, and fires the SignalNetworksChanged event when networks change. @@ -68,54 +78,38 @@ class NetworkMonitorInterface { virtual void Start() = 0; virtual void Stop() = 0; - // Implementations should call this method on the base when networks change, - // and the base will fire SignalNetworksChanged on the right thread. - virtual void OnNetworksChanged() = 0; - virtual AdapterType GetAdapterType(const std::string& interface_name) = 0; virtual AdapterType GetVpnUnderlyingAdapterType( const std::string& interface_name) = 0; -}; - -class NetworkMonitorBase : public NetworkMonitorInterface, - public MessageHandler, - public sigslot::has_slots<> { - public: - NetworkMonitorBase(); - ~NetworkMonitorBase() override; - - void OnNetworksChanged() override; - - void OnMessage(Message* msg) override; - - AdapterType GetVpnUnderlyingAdapterType( - const std::string& interface_name) override; - - protected: - Thread* worker_thread() { return worker_thread_; } - private: - Thread* worker_thread_; -}; - -/* - * NetworkMonitorFactory creates NetworkMonitors. - */ -class NetworkMonitorFactory { - public: - // This is not thread-safe; it should be called once (or once per audio/video - // call) during the call initialization. - static void SetFactory(NetworkMonitorFactory* factory); - - static void ReleaseFactory(NetworkMonitorFactory* factory); - static NetworkMonitorFactory* GetFactory(); - - virtual NetworkMonitorInterface* CreateNetworkMonitor() = 0; + virtual NetworkPreference GetNetworkPreference( + const std::string& interface_name) = 0; - virtual ~NetworkMonitorFactory(); + // Does |this| NetworkMonitorInterface implement BindSocketToNetwork? + // Only Android returns true. + virtual bool SupportsBindSocketToNetwork() const { return false; } - protected: - NetworkMonitorFactory(); + // Bind a socket to an interface specified by ip address and/or interface + // name. Only implemented on Android. + virtual NetworkBindingResult BindSocketToNetwork( + int socket_fd, + const IPAddress& address, + const std::string& interface_name) { + return NetworkBindingResult::NOT_IMPLEMENTED; + } + + // Is this interface available to use? WebRTC shouldn't attempt to use it if + // this returns false. + // + // It's possible for this status to change, in which case + // SignalNetworksChanged will be fired. + // + // These specific use case this was added for was a phone with two SIM cards, + // where attempting to use all interfaces returned from getifaddrs caused the + // connection to be dropped. + virtual bool IsAdapterAvailable(const std::string& interface_name) { + return true; + } }; } // namespace rtc diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h b/rtc_base/network_monitor_factory.cc similarity index 58% rename from sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h rename to rtc_base/network_monitor_factory.cc index 64c49977c0..9fac4d95a0 100644 --- a/sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h +++ b/rtc_base/network_monitor_factory.cc @@ -1,5 +1,5 @@ /* - * Copyright 2016 The WebRTC Project Authors. All rights reserved. + * Copyright 2020 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,4 +8,11 @@ * be found in the AUTHORS file in the root of the source tree. */ -#import "components/audio/RTCAudioSession.h" +#include "rtc_base/network_monitor_factory.h" + +namespace rtc { + +NetworkMonitorFactory::NetworkMonitorFactory() {} +NetworkMonitorFactory::~NetworkMonitorFactory() {} + +} // namespace rtc diff --git a/rtc_base/network_monitor_factory.h b/rtc_base/network_monitor_factory.h new file mode 100644 index 0000000000..dadcd4aa8a --- /dev/null +++ b/rtc_base/network_monitor_factory.h @@ -0,0 +1,37 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_NETWORK_MONITOR_FACTORY_H_ +#define RTC_BASE_NETWORK_MONITOR_FACTORY_H_ + +namespace rtc { + +// Forward declaring this so it's not part of the API surface; it's only +// expected to be used by Android/iOS SDK code. +class NetworkMonitorInterface; + +/* + * NetworkMonitorFactory creates NetworkMonitors. + * Note that CreateNetworkMonitor is expected to be called on the network + * thread with the returned object only being used on that thread thereafter. + */ +class NetworkMonitorFactory { + public: + virtual NetworkMonitorInterface* CreateNetworkMonitor() = 0; + + virtual ~NetworkMonitorFactory(); + + protected: + NetworkMonitorFactory(); +}; + +} // namespace rtc + +#endif // RTC_BASE_NETWORK_MONITOR_FACTORY_H_ diff --git a/rtc_base/network_unittest.cc b/rtc_base/network_unittest.cc index cd693563e7..75856634ab 100644 --- a/rtc_base/network_unittest.cc +++ b/rtc_base/network_unittest.cc @@ -12,13 +12,16 @@ #include +#include #include #include +#include "absl/algorithm/container.h" #include "absl/strings/match.h" #include "rtc_base/checks.h" #include "rtc_base/net_helpers.h" #include "rtc_base/network_monitor.h" +#include "rtc_base/network_monitor_factory.h" #if defined(WEBRTC_POSIX) #include #include @@ -30,6 +33,7 @@ #if defined(WEBRTC_WIN) #include "rtc_base/logging.h" // For RTC_LOG_GLE #endif +#include "test/field_trial.h" using ::testing::Contains; using ::testing::Not; @@ -40,7 +44,7 @@ namespace rtc { namespace { -class FakeNetworkMonitor : public NetworkMonitorBase { +class FakeNetworkMonitor : public NetworkMonitorInterface { public: void Start() override { started_ = true; } void Stop() override { started_ = false; } @@ -56,9 +60,51 @@ class FakeNetworkMonitor : public NetworkMonitorBase { } return ADAPTER_TYPE_UNKNOWN; } + AdapterType GetVpnUnderlyingAdapterType(const std::string& if_name) override { + return ADAPTER_TYPE_UNKNOWN; + } + NetworkPreference GetNetworkPreference(const std::string& if_name) override { + return NetworkPreference::NEUTRAL; + } + + bool IsAdapterAvailable(const std::string& if_name) override { + return absl::c_count(unavailable_adapters_, if_name) == 0; + } + + // Used to test IsAdapterAvailable. + void set_unavailable_adapters(std::vector unavailable_adapters) { + unavailable_adapters_ = unavailable_adapters; + } + + bool SupportsBindSocketToNetwork() const override { return true; } + + NetworkBindingResult BindSocketToNetwork( + int socket_fd, + const IPAddress& address, + const std::string& if_name) override { + if (absl::c_count(addresses_, address) > 0) { + return NetworkBindingResult::SUCCESS; + } + + for (auto const& iter : adapters_) { + if (if_name.find(iter) != std::string::npos) { + return NetworkBindingResult::SUCCESS; + } + } + return NetworkBindingResult::ADDRESS_NOT_FOUND; + } + + void set_ip_addresses(std::vector addresses) { + addresses_ = addresses; + } + + void set_adapters(std::vector adapters) { adapters_ = adapters; } private: bool started_ = false; + std::vector adapters_; + std::vector unavailable_adapters_; + std::vector addresses_; }; class FakeNetworkMonitorFactory : public NetworkMonitorFactory { @@ -100,18 +146,27 @@ class NetworkTest : public ::testing::Test, public sigslot::has_slots<> { bool IsIgnoredNetwork(BasicNetworkManager& network_manager, const Network& network) { + RTC_DCHECK_RUN_ON(network_manager.thread_); return network_manager.IsIgnoredNetwork(network); } + IPAddress QueryDefaultLocalAddress(BasicNetworkManager& network_manager, + int family) { + RTC_DCHECK_RUN_ON(network_manager.thread_); + return network_manager.QueryDefaultLocalAddress(family); + } + NetworkManager::NetworkList GetNetworks( const BasicNetworkManager& network_manager, bool include_ignored) { + RTC_DCHECK_RUN_ON(network_manager.thread_); NetworkManager::NetworkList list; network_manager.CreateNetworks(include_ignored, &list); return list; } FakeNetworkMonitor* GetNetworkMonitor(BasicNetworkManager& network_manager) { + RTC_DCHECK_RUN_ON(network_manager.thread_); return static_cast( network_manager.network_monitor_.get()); } @@ -136,6 +191,7 @@ class NetworkTest : public ::testing::Test, public sigslot::has_slots<> { struct ifaddrs* interfaces, bool include_ignored, NetworkManager::NetworkList* networks) { + RTC_DCHECK_RUN_ON(network_manager.thread_); // Use the base IfAddrsConverter for test cases. std::unique_ptr ifaddrs_converter(new IfAddrsConverter()); network_manager.ConvertIfAddrs(interfaces, ifaddrs_converter.get(), @@ -247,6 +303,8 @@ class NetworkTest : public ::testing::Test, public sigslot::has_slots<> { class TestBasicNetworkManager : public BasicNetworkManager { public: + TestBasicNetworkManager(NetworkMonitorFactory* network_monitor_factory) + : BasicNetworkManager(network_monitor_factory) {} using BasicNetworkManager::QueryDefaultLocalAddress; using BasicNetworkManager::set_default_local_addresses; }; @@ -268,6 +326,7 @@ TEST_F(NetworkTest, TestIsIgnoredNetworkIgnoresIPsStartingWith0) { Network ipv4_network2("test_eth1", "Test Network Adapter 2", IPAddress(0x010000U), 24, ADAPTER_TYPE_ETHERNET); BasicNetworkManager network_manager; + network_manager.StartUpdating(); EXPECT_FALSE(IsIgnoredNetwork(network_manager, ipv4_network1)); EXPECT_TRUE(IsIgnoredNetwork(network_manager, ipv4_network2)); } @@ -278,14 +337,18 @@ TEST_F(NetworkTest, TestIgnoreList) { 24); Network include_me("include_me", "Include me please!", IPAddress(0x12345600U), 24); - BasicNetworkManager network_manager; - EXPECT_FALSE(IsIgnoredNetwork(network_manager, ignore_me)); - EXPECT_FALSE(IsIgnoredNetwork(network_manager, include_me)); + BasicNetworkManager default_network_manager; + default_network_manager.StartUpdating(); + EXPECT_FALSE(IsIgnoredNetwork(default_network_manager, ignore_me)); + EXPECT_FALSE(IsIgnoredNetwork(default_network_manager, include_me)); + + BasicNetworkManager ignoring_network_manager; std::vector ignore_list; ignore_list.push_back("ignore_me"); - network_manager.set_network_ignore_list(ignore_list); - EXPECT_TRUE(IsIgnoredNetwork(network_manager, ignore_me)); - EXPECT_FALSE(IsIgnoredNetwork(network_manager, include_me)); + ignoring_network_manager.set_network_ignore_list(ignore_list); + ignoring_network_manager.StartUpdating(); + EXPECT_TRUE(IsIgnoredNetwork(ignoring_network_manager, ignore_me)); + EXPECT_FALSE(IsIgnoredNetwork(ignoring_network_manager, include_me)); } // Test is failing on Windows opt: b/11288214 @@ -649,6 +712,7 @@ TEST_F(NetworkTest, TestMultiplePublicNetworksOnOneInterfaceMerge) { // Test that DumpNetworks does not crash. TEST_F(NetworkTest, TestCreateAndDumpNetworks) { BasicNetworkManager manager; + manager.StartUpdating(); NetworkManager::NetworkList list = GetNetworks(manager, true); bool changed; MergeNetworkList(manager, list, &changed); @@ -657,6 +721,7 @@ TEST_F(NetworkTest, TestCreateAndDumpNetworks) { TEST_F(NetworkTest, TestIPv6Toggle) { BasicNetworkManager manager; + manager.StartUpdating(); bool ipv6_found = false; NetworkManager::NetworkList list; list = GetNetworks(manager, true); @@ -753,6 +818,7 @@ TEST_F(NetworkTest, TestConvertIfAddrsNoAddress) { NetworkManager::NetworkList result; BasicNetworkManager manager; + manager.StartUpdating(); CallConvertIfAddrs(manager, &list, true, &result); EXPECT_TRUE(result.empty()); } @@ -768,6 +834,7 @@ TEST_F(NetworkTest, TestConvertIfAddrsMultiAddressesOnOneInterface) { "FFFF:FFFF:FFFF:FFFF::", 0); NetworkManager::NetworkList result; BasicNetworkManager manager; + manager.StartUpdating(); CallConvertIfAddrs(manager, list, true, &result); EXPECT_EQ(1U, result.size()); bool changed; @@ -787,46 +854,35 @@ TEST_F(NetworkTest, TestConvertIfAddrsNotRunning) { NetworkManager::NetworkList result; BasicNetworkManager manager; + manager.StartUpdating(); CallConvertIfAddrs(manager, &list, true, &result); EXPECT_TRUE(result.empty()); } -// Tests that the network type can be updated after the network monitor is -// started. +// Tests that the network type can be determined from the network monitor when +// it would otherwise be unknown. TEST_F(NetworkTest, TestGetAdapterTypeFromNetworkMonitor) { - char if_name1[20] = "wifi0"; - std::string ipv6_address1 = "1000:2000:3000:4000:0:0:0:1"; - std::string ipv6_address2 = "1000:2000:3000:8000:0:0:0:1"; + char if_name[20] = "wifi0"; + std::string ipv6_address = "1000:2000:3000:4000:0:0:0:1"; std::string ipv6_mask = "FFFF:FFFF:FFFF:FFFF::"; - BasicNetworkManager manager; - // A network created before the network monitor is started will get - // UNKNOWN type. - ifaddrs* addr_list = - InstallIpv6Network(if_name1, ipv6_address1, ipv6_mask, manager); - EXPECT_EQ(ADAPTER_TYPE_UNKNOWN, GetAdapterType(manager)); + BasicNetworkManager manager_without_monitor; + manager_without_monitor.StartUpdating(); + // A network created without a network monitor will get UNKNOWN type. + ifaddrs* addr_list = InstallIpv6Network(if_name, ipv6_address, ipv6_mask, + manager_without_monitor); + EXPECT_EQ(ADAPTER_TYPE_UNKNOWN, GetAdapterType(manager_without_monitor)); ReleaseIfAddrs(addr_list); - // Note: Do not call ClearNetworks here in order to test that the type - // of an existing network can be changed after the network monitor starts - // and detects the network type correctly. - - // After the network monitor starts, the type will be updated. - FakeNetworkMonitorFactory* factory = new FakeNetworkMonitorFactory(); - NetworkMonitorFactory::SetFactory(factory); - // This brings up the hook with the network monitor. - manager.StartUpdating(); + + // With the fake network monitor the type should be correctly determined. + FakeNetworkMonitorFactory factory; + BasicNetworkManager manager_with_monitor(&factory); + manager_with_monitor.StartUpdating(); // Add the same ipv6 address as before but it has the right network type // detected by the network monitor now. - addr_list = InstallIpv6Network(if_name1, ipv6_address1, ipv6_mask, manager); - EXPECT_EQ(ADAPTER_TYPE_WIFI, GetAdapterType(manager)); + addr_list = InstallIpv6Network(if_name, ipv6_address, ipv6_mask, + manager_with_monitor); + EXPECT_EQ(ADAPTER_TYPE_WIFI, GetAdapterType(manager_with_monitor)); ReleaseIfAddrs(addr_list); - ClearNetworks(manager); - - // Add another network with the type inferred from the network monitor. - char if_name2[20] = "cellular0"; - addr_list = InstallIpv6Network(if_name2, ipv6_address2, ipv6_mask, manager); - EXPECT_EQ(ADAPTER_TYPE_CELLULAR, GetAdapterType(manager)); - ReleaseIfAddrs(addr_list); - ClearNetworks(manager); } // Test that the network type can be determined based on name matching in @@ -839,6 +895,7 @@ TEST_F(NetworkTest, TestGetAdapterTypeFromNameMatching) { std::string ipv6_address2 = "1000:2000:3000:8000:0:0:0:1"; std::string ipv6_mask = "FFFF:FFFF:FFFF:FFFF::"; BasicNetworkManager manager; + manager.StartUpdating(); // IPSec interface; name is in form "ipsec". char if_name[20] = "ipsec11"; @@ -899,6 +956,41 @@ TEST_F(NetworkTest, TestGetAdapterTypeFromNameMatching) { ReleaseIfAddrs(addr_list); #endif } + +// Test that an adapter won't be included in the network list if there's a +// network monitor that says it's unavailable. +TEST_F(NetworkTest, TestNetworkMonitorIsAdapterAvailable) { + char if_name1[20] = "pdp_ip0"; + char if_name2[20] = "pdp_ip1"; + ifaddrs* list = nullptr; + list = AddIpv6Address(list, if_name1, "1000:2000:3000:4000:0:0:0:1", + "FFFF:FFFF:FFFF:FFFF::", 0); + list = AddIpv6Address(list, if_name2, "1000:2000:3000:4000:0:0:0:2", + "FFFF:FFFF:FFFF:FFFF::", 0); + NetworkManager::NetworkList result; + + // Sanity check that both interfaces are included by default. + FakeNetworkMonitorFactory factory; + BasicNetworkManager manager(&factory); + manager.StartUpdating(); + CallConvertIfAddrs(manager, list, /*include_ignored=*/false, &result); + EXPECT_EQ(2u, result.size()); + bool changed; + // This ensures we release the objects created in CallConvertIfAddrs. + MergeNetworkList(manager, result, &changed); + result.clear(); + + // Now simulate one interface being unavailable. + FakeNetworkMonitor* network_monitor = GetNetworkMonitor(manager); + network_monitor->set_unavailable_adapters({if_name1}); + CallConvertIfAddrs(manager, list, /*include_ignored=*/false, &result); + EXPECT_EQ(1u, result.size()); + EXPECT_EQ(if_name2, result[0]->name()); + + MergeNetworkList(manager, result, &changed); + ReleaseIfAddrs(list); +} + #endif // defined(WEBRTC_POSIX) // Test MergeNetworkList successfully combines all IPs for the same @@ -1022,11 +1114,10 @@ TEST_F(NetworkTest, TestIPv6Selection) { } TEST_F(NetworkTest, TestNetworkMonitoring) { - BasicNetworkManager manager; + FakeNetworkMonitorFactory factory; + BasicNetworkManager manager(&factory); manager.SignalNetworksChanged.connect(static_cast(this), &NetworkTest::OnNetworksChanged); - FakeNetworkMonitorFactory* factory = new FakeNetworkMonitorFactory(); - NetworkMonitorFactory::SetFactory(factory); manager.StartUpdating(); FakeNetworkMonitor* network_monitor = GetNetworkMonitor(manager); EXPECT_TRUE(network_monitor && network_monitor->started()); @@ -1037,14 +1128,12 @@ TEST_F(NetworkTest, TestNetworkMonitoring) { ClearNetworks(manager); // Network manager is started, so the callback is called when the network // monitor fires the network-change event. - network_monitor->OnNetworksChanged(); + network_monitor->SignalNetworksChanged(); EXPECT_TRUE_WAIT(callback_called_, 1000); // Network manager is stopped. manager.StopUpdating(); EXPECT_FALSE(GetNetworkMonitor(manager)->started()); - - NetworkMonitorFactory::ReleaseFactory(factory); } // Fails on Android: https://bugs.chromium.org/p/webrtc/issues/detail?id=4364. @@ -1055,11 +1144,10 @@ TEST_F(NetworkTest, TestNetworkMonitoring) { #endif TEST_F(NetworkTest, MAYBE_DefaultLocalAddress) { IPAddress ip; - TestBasicNetworkManager manager; + FakeNetworkMonitorFactory factory; + TestBasicNetworkManager manager(&factory); manager.SignalNetworksChanged.connect(static_cast(this), &NetworkTest::OnNetworksChanged); - FakeNetworkMonitorFactory* factory = new FakeNetworkMonitorFactory(); - NetworkMonitorFactory::SetFactory(factory); manager.StartUpdating(); EXPECT_TRUE_WAIT(callback_called_, 1000); @@ -1070,12 +1158,12 @@ TEST_F(NetworkTest, MAYBE_DefaultLocalAddress) { EXPECT_TRUE(!networks.empty()); for (const auto* network : networks) { if (network->GetBestIP().family() == AF_INET) { - EXPECT_TRUE(manager.QueryDefaultLocalAddress(AF_INET) != IPAddress()); + EXPECT_TRUE(QueryDefaultLocalAddress(manager, AF_INET) != IPAddress()); } else if (network->GetBestIP().family() == AF_INET6 && !IPIsLoopback(network->GetBestIP())) { // Existence of an IPv6 loopback address doesn't mean it has IPv6 network // enabled. - EXPECT_TRUE(manager.QueryDefaultLocalAddress(AF_INET6) != IPAddress()); + EXPECT_TRUE(QueryDefaultLocalAddress(manager, AF_INET6) != IPAddress()); } } @@ -1179,4 +1267,83 @@ TEST_F(NetworkTest, TestWhenNetworkListChangeReturnsChangedFlag) { } } +#if defined(WEBRTC_POSIX) +TEST_F(NetworkTest, IgnoresMACBasedIPv6Address) { + std::string ipv6_address = "2607:fc20:f340:1dc8:214:22ff:fe01:2345"; + std::string ipv6_mask = "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"; + BasicNetworkManager manager; + manager.StartUpdating(); + + // IPSec interface; name is in form "ipsec". + char if_name[20] = "ipsec11"; + ifaddrs* addr_list = + InstallIpv6Network(if_name, ipv6_address, ipv6_mask, manager); + + BasicNetworkManager::NetworkList list; + manager.GetNetworks(&list); + EXPECT_EQ(list.size(), 0u); + ReleaseIfAddrs(addr_list); +} + +TEST_F(NetworkTest, WebRTC_AllowMACBasedIPv6Address) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-AllowMACBasedIPv6/Enabled/"); + std::string ipv6_address = "2607:fc20:f340:1dc8:214:22ff:fe01:2345"; + std::string ipv6_mask = "FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF"; + BasicNetworkManager manager; + manager.StartUpdating(); + + // IPSec interface; name is in form "ipsec". + char if_name[20] = "ipsec11"; + ifaddrs* addr_list = + InstallIpv6Network(if_name, ipv6_address, ipv6_mask, manager); + + BasicNetworkManager::NetworkList list; + manager.GetNetworks(&list); + EXPECT_EQ(list.size(), 1u); + ReleaseIfAddrs(addr_list); +} +#endif + +#if defined(WEBRTC_POSIX) +TEST_F(NetworkTest, WebRTC_BindUsingInterfaceName) { + char if_name1[20] = "wlan0"; + char if_name2[20] = "v4-wlan0"; + ifaddrs* list = nullptr; + list = AddIpv6Address(list, if_name1, "1000:2000:3000:4000:0:0:0:1", + "FFFF:FFFF:FFFF:FFFF::", 0); + list = AddIpv4Address(list, if_name2, "192.168.0.2", "255.255.255.255"); + NetworkManager::NetworkList result; + + // Sanity check that both interfaces are included by default. + FakeNetworkMonitorFactory factory; + BasicNetworkManager manager(&factory); + manager.StartUpdating(); + CallConvertIfAddrs(manager, list, /*include_ignored=*/false, &result); + EXPECT_EQ(2u, result.size()); + ReleaseIfAddrs(list); + bool changed; + // This ensures we release the objects created in CallConvertIfAddrs. + MergeNetworkList(manager, result, &changed); + result.clear(); + + FakeNetworkMonitor* network_monitor = GetNetworkMonitor(manager); + + IPAddress ipv6; + EXPECT_TRUE(IPFromString("1000:2000:3000:4000:0:0:0:1", &ipv6)); + IPAddress ipv4; + EXPECT_TRUE(IPFromString("192.168.0.2", &ipv4)); + + // The network monitor only knwos about the ipv6 address, interface. + network_monitor->set_adapters({"wlan0"}); + network_monitor->set_ip_addresses({ipv6}); + EXPECT_EQ(manager.BindSocketToNetwork(/* fd */ 77, ipv6), + NetworkBindingResult::SUCCESS); + + // But it will bind anyway using string matching... + EXPECT_EQ(manager.BindSocketToNetwork(/* fd */ 77, ipv4), + NetworkBindingResult::SUCCESS); +} +#endif + } // namespace rtc diff --git a/rtc_base/null_socket_server_unittest.cc b/rtc_base/null_socket_server_unittest.cc index 39c16313b1..a875d6c284 100644 --- a/rtc_base/null_socket_server_unittest.cc +++ b/rtc_base/null_socket_server_unittest.cc @@ -25,7 +25,8 @@ namespace rtc { static const uint32_t kTimeout = 5000U; -class NullSocketServerTest : public ::testing::Test, public MessageHandler { +class NullSocketServerTest : public ::testing::Test, + public MessageHandlerAutoCleanup { protected: void OnMessage(Message* message) override { ss_.WakeUp(); } diff --git a/rtc_base/numerics/math_utils.h b/rtc_base/numerics/math_utils.h index 4bf48e22bb..0f1d51b090 100644 --- a/rtc_base/numerics/math_utils.h +++ b/rtc_base/numerics/math_utils.h @@ -8,14 +8,16 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef RTC_BASE_NUMERICS_MATH_UTILS_H_ -#define RTC_BASE_NUMERICS_MATH_UTILS_H_ +#ifndef API_NUMERICS_MATH_UTILS_H_ +#define API_NUMERICS_MATH_UTILS_H_ #include #include #include "rtc_base/checks.h" +namespace webrtc { +namespace webrtc_impl { // Given two numbers |x| and |y| such that x >= y, computes the difference // x - y without causing undefined behavior due to signed overflow. template @@ -67,4 +69,7 @@ constexpr T minus_infinity_or_min() { return std::numeric_limits::min(); } -#endif // RTC_BASE_NUMERICS_MATH_UTILS_H_ +} // namespace webrtc_impl +} // namespace webrtc + +#endif // API_NUMERICS_MATH_UTILS_H_ diff --git a/rtc_base/numerics/running_statistics.h b/rtc_base/numerics/running_statistics.h index 4a3516d3f6..bbcc7e2a73 100644 --- a/rtc_base/numerics/running_statistics.h +++ b/rtc_base/numerics/running_statistics.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef RTC_BASE_NUMERICS_RUNNING_STATISTICS_H_ -#define RTC_BASE_NUMERICS_RUNNING_STATISTICS_H_ +#ifndef API_NUMERICS_RUNNING_STATISTICS_H_ +#define API_NUMERICS_RUNNING_STATISTICS_H_ #include #include @@ -20,6 +20,7 @@ #include "rtc_base/numerics/math_utils.h" namespace webrtc { +namespace webrtc_impl { // tl;dr: Robust and efficient online computation of statistics, // using Welford's method for variance. [1] @@ -154,6 +155,7 @@ class RunningStatistics { double cumul_ = 0; // Variance * size_, sometimes noted m2. }; +} // namespace webrtc_impl } // namespace webrtc -#endif // RTC_BASE_NUMERICS_RUNNING_STATISTICS_H_ +#endif // API_NUMERICS_RUNNING_STATISTICS_H_ diff --git a/rtc_base/numerics/running_statistics_unittest.cc b/rtc_base/numerics/running_statistics_unittest.cc index 26dc7133e0..d593f3fc5a 100644 --- a/rtc_base/numerics/running_statistics_unittest.cc +++ b/rtc_base/numerics/running_statistics_unittest.cc @@ -21,6 +21,7 @@ // Tests were copied from samples_stats_counter_unittest.cc. namespace webrtc { +namespace webrtc_impl { namespace { RunningStatistics CreateStatsFilledWithIntsFrom1ToN(int n) { @@ -55,8 +56,6 @@ class RunningStatisticsTest : public ::testing::TestWithParam {}; constexpr int SIZE_FOR_MERGE = 5; -} // namespace - TEST(RunningStatistics, FullSimpleTest) { auto stats = CreateStatsFilledWithIntsFrom1ToN(100); @@ -192,4 +191,6 @@ INSTANTIATE_TEST_SUITE_P(RunningStatisticsTests, RunningStatisticsTest, ::testing::Range(0, SIZE_FOR_MERGE + 1)); +} // namespace +} // namespace webrtc_impl } // namespace webrtc diff --git a/rtc_base/numerics/safe_conversions.h b/rtc_base/numerics/safe_conversions.h index 5d58672510..e00219cbd7 100644 --- a/rtc_base/numerics/safe_conversions.h +++ b/rtc_base/numerics/safe_conversions.h @@ -63,12 +63,10 @@ inline constexpr Dst saturated_cast(Src value) { // Should fail only on attempting to assign NaN to a saturated integer. case internal::TYPE_INVALID: - FATAL(); - return std::numeric_limits::max(); + RTC_CHECK_NOTREACHED(); } - FATAL(); - return static_cast(value); + RTC_CHECK_NOTREACHED(); } } // namespace rtc diff --git a/rtc_base/numerics/sample_stats.h b/rtc_base/numerics/sample_stats.h index f6347414b0..39af1c6a37 100644 --- a/rtc_base/numerics/sample_stats.h +++ b/rtc_base/numerics/sample_stats.h @@ -10,10 +10,10 @@ #ifndef RTC_BASE_NUMERICS_SAMPLE_STATS_H_ #define RTC_BASE_NUMERICS_SAMPLE_STATS_H_ +#include "api/numerics/samples_stats_counter.h" #include "api/units/data_rate.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" -#include "rtc_base/numerics/samples_stats_counter.h" namespace webrtc { template diff --git a/rtc_base/one_time_event.h b/rtc_base/one_time_event.h index c5ccbf6933..d33ddbd587 100644 --- a/rtc_base/one_time_event.h +++ b/rtc_base/one_time_event.h @@ -11,7 +11,7 @@ #ifndef RTC_BASE_ONE_TIME_EVENT_H_ #define RTC_BASE_ONE_TIME_EVENT_H_ -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { // Provides a simple way to perform an operation (such as logging) one @@ -26,7 +26,7 @@ class OneTimeEvent { public: OneTimeEvent() {} bool operator()() { - rtc::CritScope cs(&critsect_); + MutexLock lock(&mutex_); if (happened_) { return false; } @@ -36,7 +36,7 @@ class OneTimeEvent { private: bool happened_ = false; - rtc::CriticalSection critsect_; + Mutex mutex_; }; // A non-thread-safe, ligher-weight version of the OneTimeEvent class. diff --git a/rtc_base/openssl_adapter.cc b/rtc_base/openssl_adapter.cc index 8fd882c2b3..c381f04899 100644 --- a/rtc_base/openssl_adapter.cc +++ b/rtc_base/openssl_adapter.cc @@ -13,6 +13,9 @@ #include #include #include +#ifdef OPENSSL_IS_BORINGSSL +#include +#endif #include #include #include @@ -20,13 +23,24 @@ #include +// Use CRYPTO_BUFFER APIs if available and we have no dependency on X509 +// objects. +#if defined(OPENSSL_IS_BORINGSSL) && \ + defined(WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS) +#define WEBRTC_USE_CRYPTO_BUFFER_CALLBACK +#endif + #include "absl/memory/memory.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/openssl.h" -#include "rtc_base/openssl_certificate.h" +#ifdef OPENSSL_IS_BORINGSSL +#include "rtc_base/boringssl_identity.h" +#else +#include "rtc_base/openssl_identity.h" +#endif #include "rtc_base/openssl_utility.h" #include "rtc_base/string_encode.h" #include "rtc_base/thread.h" @@ -223,8 +237,13 @@ void OpenSSLAdapter::SetCertVerifier( void OpenSSLAdapter::SetIdentity(std::unique_ptr identity) { RTC_DCHECK(!identity_); +#ifdef OPENSSL_IS_BORINGSSL + identity_ = + absl::WrapUnique(static_cast(identity.release())); +#else identity_ = absl::WrapUnique(static_cast(identity.release())); +#endif } void OpenSSLAdapter::SetRole(SSLRole role) { @@ -270,8 +289,8 @@ int OpenSSLAdapter::BeginSSL() { RTC_LOG(LS_INFO) << "OpenSSLAdapter::BeginSSL: " << ssl_host_name_; RTC_DCHECK(state_ == SSL_CONNECTING); - int err = 0; - BIO* bio = nullptr; + // Cleanup action to deal with on error cleanup a bit cleaner. + EarlyExitCatcher early_exit_catcher(*this); // First set up the context. We should either have a factory, with its own // pre-existing context, or be running standalone, in which case we will @@ -282,26 +301,22 @@ int OpenSSLAdapter::BeginSSL() { } if (!ssl_ctx_) { - err = -1; - goto ssl_error; + return -1; } if (identity_ && !identity_->ConfigureIdentity(ssl_ctx_)) { - SSL_CTX_free(ssl_ctx_); - err = -1; - goto ssl_error; + return -1; } - bio = BIO_new_socket(socket_); + std::unique_ptr bio{BIO_new_socket(socket_), + ::BIO_free}; if (!bio) { - err = -1; - goto ssl_error; + return -1; } ssl_ = SSL_new(ssl_ctx_); if (!ssl_) { - err = -1; - goto ssl_error; + return -1; } SSL_set_app_data(ssl_, this); @@ -327,8 +342,7 @@ int OpenSSLAdapter::BeginSSL() { if (cached) { if (SSL_set_session(ssl_, cached) == 0) { RTC_LOG(LS_WARNING) << "Failed to apply SSL session from cache"; - err = -1; - goto ssl_error; + return -1; } RTC_LOG(LS_INFO) << "Attempting to resume SSL session to " @@ -358,24 +372,16 @@ int OpenSSLAdapter::BeginSSL() { // Now that the initial config is done, transfer ownership of |bio| to the // SSL object. If ContinueSSL() fails, the bio will be freed in Cleanup(). - SSL_set_bio(ssl_, bio, bio); - bio = nullptr; + SSL_set_bio(ssl_, bio.get(), bio.get()); + bio.release(); // Do the connect. - err = ContinueSSL(); + int err = ContinueSSL(); if (err != 0) { - goto ssl_error; - } - - return err; - -ssl_error: - Cleanup(); - if (bio) { - BIO_free(bio); + return err; } - - return err; + early_exit_catcher.disable(); + return 0; } int OpenSSLAdapter::ContinueSSL() { @@ -797,7 +803,70 @@ void OpenSSLAdapter::SSLInfoCallback(const SSL* s, int where, int ret) { #endif +#ifdef WEBRTC_USE_CRYPTO_BUFFER_CALLBACK +// static +enum ssl_verify_result_t OpenSSLAdapter::SSLVerifyCallback(SSL* ssl, + uint8_t* out_alert) { + // Get our stream pointer from the SSL context. + OpenSSLAdapter* stream = + reinterpret_cast(SSL_get_app_data(ssl)); + + ssl_verify_result_t ret = stream->SSLVerifyInternal(ssl, out_alert); + + // Should only be used for debugging and development. + if (ret != ssl_verify_ok && stream->ignore_bad_cert_) { + RTC_DLOG(LS_WARNING) << "Ignoring cert error while verifying cert chain"; + return ssl_verify_ok; + } + + return ret; +} + +enum ssl_verify_result_t OpenSSLAdapter::SSLVerifyInternal(SSL* ssl, + uint8_t* out_alert) { + if (ssl_cert_verifier_ == nullptr) { + RTC_LOG(LS_WARNING) << "Built-in trusted root certificates disabled but no " + "SSL verify callback provided."; + return ssl_verify_invalid; + } + + RTC_LOG(LS_INFO) << "Invoking SSL Verify Callback."; + const STACK_OF(CRYPTO_BUFFER)* chain = SSL_get0_peer_certificates(ssl); + if (sk_CRYPTO_BUFFER_num(chain) == 0) { + RTC_LOG(LS_ERROR) << "Peer certificate chain empty?"; + return ssl_verify_invalid; + } + + BoringSSLCertificate cert(bssl::UpRef(sk_CRYPTO_BUFFER_value(chain, 0))); + if (!ssl_cert_verifier_->Verify(cert)) { + RTC_LOG(LS_WARNING) << "Failed to verify certificate using custom callback"; + return ssl_verify_invalid; + } + + custom_cert_verifier_status_ = true; + RTC_LOG(LS_INFO) << "Validated certificate using custom callback"; + return ssl_verify_ok; +} +#else // WEBRTC_USE_CRYPTO_BUFFER_CALLBACK int OpenSSLAdapter::SSLVerifyCallback(int ok, X509_STORE_CTX* store) { + // Get our stream pointer from the store + SSL* ssl = reinterpret_cast( + X509_STORE_CTX_get_ex_data(store, SSL_get_ex_data_X509_STORE_CTX_idx())); + + OpenSSLAdapter* stream = + reinterpret_cast(SSL_get_app_data(ssl)); + ok = stream->SSLVerifyInternal(ok, ssl, store); + + // Should only be used for debugging and development. + if (!ok && stream->ignore_bad_cert_) { + RTC_DLOG(LS_WARNING) << "Ignoring cert error while verifying cert chain"; + return 1; + } + + return ok; +} + +int OpenSSLAdapter::SSLVerifyInternal(int ok, SSL* ssl, X509_STORE_CTX* store) { #if !defined(NDEBUG) if (!ok) { char data[256]; @@ -814,33 +883,40 @@ int OpenSSLAdapter::SSLVerifyCallback(int ok, X509_STORE_CTX* store) { << X509_verify_cert_error_string(err); } #endif - // Get our stream pointer from the store - SSL* ssl = reinterpret_cast( - X509_STORE_CTX_get_ex_data(store, SSL_get_ex_data_X509_STORE_CTX_idx())); - - OpenSSLAdapter* stream = - reinterpret_cast(SSL_get_app_data(ssl)); - - if (!ok && stream->ssl_cert_verifier_ != nullptr) { - RTC_LOG(LS_INFO) << "Invoking SSL Verify Callback."; - const OpenSSLCertificate cert(X509_STORE_CTX_get_current_cert(store)); - if (stream->ssl_cert_verifier_->Verify(cert)) { - stream->custom_cert_verifier_status_ = true; - RTC_LOG(LS_INFO) << "Validated certificate using custom callback"; - ok = true; - } else { - RTC_LOG(LS_INFO) << "Failed to verify certificate using custom callback"; - } + if (ssl_cert_verifier_ == nullptr) { + return ok; } - // Should only be used for debugging and development. - if (!ok && stream->ignore_bad_cert_) { - RTC_DLOG(LS_WARNING) << "Ignoring cert error while verifying cert chain"; - ok = 1; + RTC_LOG(LS_INFO) << "Invoking SSL Verify Callback."; +#ifdef OPENSSL_IS_BORINGSSL + // Convert X509 to CRYPTO_BUFFER. + uint8_t* data = nullptr; + int length = i2d_X509(X509_STORE_CTX_get_current_cert(store), &data); + if (length < 0) { + RTC_LOG(LS_ERROR) << "Failed to encode X509."; + return ok; + } + bssl::UniquePtr owned_data(data); + bssl::UniquePtr crypto_buffer( + CRYPTO_BUFFER_new(data, length, openssl::GetBufferPool())); + if (!crypto_buffer) { + RTC_LOG(LS_ERROR) << "Failed to allocate CRYPTO_BUFFER."; + return ok; + } + const BoringSSLCertificate cert(std::move(crypto_buffer)); +#else + const OpenSSLCertificate cert(X509_STORE_CTX_get_current_cert(store)); +#endif + if (!ssl_cert_verifier_->Verify(cert)) { + RTC_LOG(LS_INFO) << "Failed to verify certificate using custom callback"; + return ok; } - return ok; + custom_cert_verifier_status_ = true; + RTC_LOG(LS_INFO) << "Validated certificate using custom callback"; + return 1; } +#endif // !defined(WEBRTC_USE_CRYPTO_BUFFER_CALLBACK) int OpenSSLAdapter::NewSSLSessionCallback(SSL* ssl, SSL_SESSION* session) { OpenSSLAdapter* stream = @@ -852,8 +928,15 @@ int OpenSSLAdapter::NewSSLSessionCallback(SSL* ssl, SSL_SESSION* session) { } SSL_CTX* OpenSSLAdapter::CreateContext(SSLMode mode, bool enable_cache) { +#ifdef WEBRTC_USE_CRYPTO_BUFFER_CALLBACK + // If X509 objects aren't used, we can use these methods to avoid + // linking the sizable crypto/x509 code. + SSL_CTX* ctx = SSL_CTX_new(mode == SSL_MODE_DTLS ? DTLS_with_buffers_method() + : TLS_with_buffers_method()); +#else SSL_CTX* ctx = SSL_CTX_new(mode == SSL_MODE_DTLS ? DTLS_method() : TLS_method()); +#endif if (ctx == nullptr) { unsigned long error = ERR_get_error(); // NOLINT: type used by OpenSSL. RTC_LOG(LS_WARNING) << "SSL_CTX creation failed: " << '"' @@ -877,8 +960,19 @@ SSL_CTX* OpenSSLAdapter::CreateContext(SSLMode mode, bool enable_cache) { SSL_CTX_set_info_callback(ctx, SSLInfoCallback); #endif +#ifdef OPENSSL_IS_BORINGSSL + SSL_CTX_set0_buffer_pool(ctx, openssl::GetBufferPool()); +#endif + +#ifdef WEBRTC_USE_CRYPTO_BUFFER_CALLBACK + SSL_CTX_set_custom_verify(ctx, SSL_VERIFY_PEER, SSLVerifyCallback); +#else SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER, SSLVerifyCallback); + // Verify certificate chains up to a depth of 4. This is not + // needed for DTLS-SRTP which uses self-signed certificates + // (so the depth is 0) but is required to support TURN/TLS. SSL_CTX_set_verify_depth(ctx, 4); +#endif // Use defaults, but disable HMAC-SHA256 and HMAC-SHA384 ciphers // (note that SHA256 and SHA384 only select legacy CBC ciphers). // Additionally disable HMAC-SHA1 ciphers in ECDSA. These are the remaining @@ -953,4 +1047,17 @@ OpenSSLAdapter* OpenSSLAdapterFactory::CreateAdapter(AsyncSocket* socket) { ssl_cert_verifier_); } +OpenSSLAdapter::EarlyExitCatcher::EarlyExitCatcher(OpenSSLAdapter& adapter_ptr) + : adapter_ptr_(adapter_ptr) {} + +void OpenSSLAdapter::EarlyExitCatcher::disable() { + disabled_ = true; +} + +OpenSSLAdapter::EarlyExitCatcher::~EarlyExitCatcher() { + if (!disabled_) { + adapter_ptr_.Cleanup(); + } +} + } // namespace rtc diff --git a/rtc_base/openssl_adapter.h b/rtc_base/openssl_adapter.h index 0e76836baf..9b2a36e00f 100644 --- a/rtc_base/openssl_adapter.h +++ b/rtc_base/openssl_adapter.h @@ -11,6 +11,7 @@ #ifndef RTC_BASE_OPENSSL_ADAPTER_H_ #define RTC_BASE_OPENSSL_ADAPTER_H_ +#include #include #include @@ -21,7 +22,11 @@ #include "rtc_base/async_socket.h" #include "rtc_base/buffer.h" #include "rtc_base/message_handler.h" +#ifdef OPENSSL_IS_BORINGSSL +#include "rtc_base/boringssl_identity.h" +#else #include "rtc_base/openssl_identity.h" +#endif #include "rtc_base/openssl_session_cache.h" #include "rtc_base/socket.h" #include "rtc_base/socket_address.h" @@ -32,7 +37,8 @@ namespace rtc { -class OpenSSLAdapter final : public SSLAdapter, public MessageHandler { +class OpenSSLAdapter final : public SSLAdapter, + public MessageHandlerAutoCleanup { public: static bool InitializeSSL(); static bool CleanupSSL(); @@ -83,6 +89,16 @@ class OpenSSLAdapter final : public SSLAdapter, public MessageHandler { void OnCloseEvent(AsyncSocket* socket, int err) override; private: + class EarlyExitCatcher { + public: + EarlyExitCatcher(OpenSSLAdapter& adapter_ptr); + void disable(); + ~EarlyExitCatcher(); + + private: + bool disabled_ = false; + OpenSSLAdapter& adapter_ptr_; + }; enum SSLState { SSL_NONE, SSL_WAIT, @@ -108,7 +124,16 @@ class OpenSSLAdapter final : public SSLAdapter, public MessageHandler { // In debug builds, logs info about the state of the SSL connection. static void SSLInfoCallback(const SSL* ssl, int where, int ret); #endif + +#if defined(OPENSSL_IS_BORINGSSL) && \ + defined(WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS) + static enum ssl_verify_result_t SSLVerifyCallback(SSL* ssl, + uint8_t* out_alert); + enum ssl_verify_result_t SSLVerifyInternal(SSL* ssl, uint8_t* out_alert); +#else static int SSLVerifyCallback(int ok, X509_STORE_CTX* store); + int SSLVerifyInternal(int ok, SSL* ssl, X509_STORE_CTX* store); +#endif friend class OpenSSLStreamAdapter; // for custom_verify_callback_; // If the SSL_CTX was created with |enable_cache| set to true, this callback @@ -122,7 +147,12 @@ class OpenSSLAdapter final : public SSLAdapter, public MessageHandler { SSLCertificateVerifier* ssl_cert_verifier_ = nullptr; // The current connection state of the (d)TLS connection. SSLState state_; + +#ifdef OPENSSL_IS_BORINGSSL + std::unique_ptr identity_; +#else std::unique_ptr identity_; +#endif // Indicates whethere this is a client or a server. SSLRole role_; bool ssl_read_needs_write_; @@ -182,6 +212,10 @@ class OpenSSLAdapterFactory : public SSLAdapterFactory { friend class OpenSSLAdapter; }; +// The EarlyExitCatcher is responsible for calling OpenSSLAdapter::Cleanup on +// destruction. By doing this we have scoped cleanup which can be disabled if +// there were no errors, aka early exits. + std::string TransformAlpnProtocols(const std::vector& protos); } // namespace rtc diff --git a/rtc_base/openssl_certificate.cc b/rtc_base/openssl_certificate.cc index 9459f76df6..802787dcfb 100644 --- a/rtc_base/openssl_certificate.cc +++ b/rtc_base/openssl_certificate.cc @@ -59,27 +59,30 @@ static X509* MakeCertificate(EVP_PKEY* pkey, const SSLIdentityParams& params) { RTC_LOG(LS_INFO) << "Making certificate for " << params.common_name; ASN1_INTEGER* asn1_serial_number = nullptr; - BIGNUM* serial_number = nullptr; - X509* x509 = nullptr; - X509_NAME* name = nullptr; + std::unique_ptr serial_number{nullptr, + ::BN_free}; + std::unique_ptr x509{nullptr, ::X509_free}; + std::unique_ptr name{ + nullptr, ::X509_NAME_free}; time_t epoch_off = 0; // Time offset since epoch. - - if ((x509 = X509_new()) == nullptr) { - goto error; + x509.reset(X509_new()); + if (x509 == nullptr) { + return nullptr; } - if (!X509_set_pubkey(x509, pkey)) { - goto error; + if (!X509_set_pubkey(x509.get(), pkey)) { + return nullptr; } // serial number - temporary reference to serial number inside x509 struct - if ((serial_number = BN_new()) == nullptr || - !BN_pseudo_rand(serial_number, SERIAL_RAND_BITS, 0, 0) || - (asn1_serial_number = X509_get_serialNumber(x509)) == nullptr || - !BN_to_ASN1_INTEGER(serial_number, asn1_serial_number)) { - goto error; + serial_number.reset(BN_new()); + if (serial_number == nullptr || + !BN_pseudo_rand(serial_number.get(), SERIAL_RAND_BITS, 0, 0) || + (asn1_serial_number = X509_get_serialNumber(x509.get())) == nullptr || + !BN_to_ASN1_INTEGER(serial_number.get(), asn1_serial_number)) { + return nullptr; } // Set version to X509.V3 - if (!X509_set_version(x509, 2L)) { - goto error; + if (!X509_set_version(x509.get(), 2L)) { + return nullptr; } // There are a lot of possible components for the name entries. In @@ -89,31 +92,27 @@ static X509* MakeCertificate(EVP_PKEY* pkey, const SSLIdentityParams& params) { // arbitrary common_name. Note that this certificate goes out in // clear during SSL negotiation, so there may be a privacy issue in // putting anything recognizable here. - if ((name = X509_NAME_new()) == nullptr || - !X509_NAME_add_entry_by_NID(name, NID_commonName, MBSTRING_UTF8, + name.reset(X509_NAME_new()); + if (name == nullptr || + !X509_NAME_add_entry_by_NID(name.get(), NID_commonName, MBSTRING_UTF8, (unsigned char*)params.common_name.c_str(), -1, -1, 0) || - !X509_set_subject_name(x509, name) || !X509_set_issuer_name(x509, name)) { - goto error; + !X509_set_subject_name(x509.get(), name.get()) || + !X509_set_issuer_name(x509.get(), name.get())) { + return nullptr; } - if (!X509_time_adj(X509_get_notBefore(x509), params.not_before, &epoch_off) || - !X509_time_adj(X509_get_notAfter(x509), params.not_after, &epoch_off)) { - goto error; + if (!X509_time_adj(X509_get_notBefore(x509.get()), params.not_before, + &epoch_off) || + !X509_time_adj(X509_get_notAfter(x509.get()), params.not_after, + &epoch_off)) { + return nullptr; } - if (!X509_sign(x509, pkey, EVP_sha256())) { - goto error; + if (!X509_sign(x509.get(), pkey, EVP_sha256())) { + return nullptr; } - BN_free(serial_number); - X509_NAME_free(name); RTC_LOG(LS_INFO) << "Returning certificate"; - return x509; - -error: - BN_free(serial_number); - X509_NAME_free(name); - X509_free(x509); - return nullptr; + return x509.release(); } } // namespace @@ -244,13 +243,8 @@ std::unique_ptr OpenSSLCertificate::Clone() const { std::string OpenSSLCertificate::ToPEMString() const { BIO* bio = BIO_new(BIO_s_mem()); - if (!bio) { - FATAL() << "Unreachable code."; - } - if (!PEM_write_bio_X509(bio, x509_)) { - BIO_free(bio); - FATAL() << "Unreachable code."; - } + RTC_CHECK(bio); + RTC_CHECK(PEM_write_bio_X509(bio, x509_)); BIO_write(bio, "\0", 1); char* buffer; BIO_get_mem_data(bio, &buffer); @@ -264,13 +258,8 @@ void OpenSSLCertificate::ToDER(Buffer* der_buffer) const { der_buffer->SetSize(0); // Calculates the DER representation of the certificate, from scratch. BIO* bio = BIO_new(BIO_s_mem()); - if (!bio) { - FATAL() << "Unreachable code."; - } - if (!i2d_X509_bio(bio, x509_)) { - BIO_free(bio); - FATAL() << "Unreachable code."; - } + RTC_CHECK(bio); + RTC_CHECK(i2d_X509_bio(bio, x509_)); char* data = nullptr; size_t length = BIO_get_mem_data(bio, &data); der_buffer->SetData(data, length); diff --git a/rtc_base/openssl_identity.cc b/rtc_base/openssl_identity.cc index c94df40bfb..3794d981ce 100644 --- a/rtc_base/openssl_identity.cc +++ b/rtc_base/openssl_identity.cc @@ -20,10 +20,8 @@ #endif // WEBRTC_WIN #include -#include #include #include -#include #include #include "absl/memory/memory.h" @@ -35,160 +33,6 @@ namespace rtc { -// We could have exposed a myriad of parameters for the crypto stuff, -// but keeping it simple seems best. - -// Generate a key pair. Caller is responsible for freeing the returned object. -static EVP_PKEY* MakeKey(const KeyParams& key_params) { - RTC_LOG(LS_INFO) << "Making key pair"; - EVP_PKEY* pkey = EVP_PKEY_new(); - if (key_params.type() == KT_RSA) { - int key_length = key_params.rsa_params().mod_size; - BIGNUM* exponent = BN_new(); - RSA* rsa = RSA_new(); - if (!pkey || !exponent || !rsa || - !BN_set_word(exponent, key_params.rsa_params().pub_exp) || - !RSA_generate_key_ex(rsa, key_length, exponent, nullptr) || - !EVP_PKEY_assign_RSA(pkey, rsa)) { - EVP_PKEY_free(pkey); - BN_free(exponent); - RSA_free(rsa); - RTC_LOG(LS_ERROR) << "Failed to make RSA key pair"; - return nullptr; - } - // ownership of rsa struct was assigned, don't free it. - BN_free(exponent); - } else if (key_params.type() == KT_ECDSA) { - if (key_params.ec_curve() == EC_NIST_P256) { - EC_KEY* ec_key = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); - - // Ensure curve name is included when EC key is serialized. - // Without this call, OpenSSL versions before 1.1.0 will create - // certificates that don't work for TLS. - // This is a no-op for BoringSSL and OpenSSL 1.1.0+ - EC_KEY_set_asn1_flag(ec_key, OPENSSL_EC_NAMED_CURVE); - - if (!pkey || !ec_key || !EC_KEY_generate_key(ec_key) || - !EVP_PKEY_assign_EC_KEY(pkey, ec_key)) { - EVP_PKEY_free(pkey); - EC_KEY_free(ec_key); - RTC_LOG(LS_ERROR) << "Failed to make EC key pair"; - return nullptr; - } - // ownership of ec_key struct was assigned, don't free it. - } else { - // Add generation of any other curves here. - EVP_PKEY_free(pkey); - RTC_LOG(LS_ERROR) << "ECDSA key requested for unknown curve"; - return nullptr; - } - } else { - EVP_PKEY_free(pkey); - RTC_LOG(LS_ERROR) << "Key type requested not understood"; - return nullptr; - } - - RTC_LOG(LS_INFO) << "Returning key pair"; - return pkey; -} - -OpenSSLKeyPair* OpenSSLKeyPair::Generate(const KeyParams& key_params) { - EVP_PKEY* pkey = MakeKey(key_params); - if (!pkey) { - openssl::LogSSLErrors("Generating key pair"); - return nullptr; - } - return new OpenSSLKeyPair(pkey); -} - -OpenSSLKeyPair* OpenSSLKeyPair::FromPrivateKeyPEMString( - const std::string& pem_string) { - BIO* bio = BIO_new_mem_buf(const_cast(pem_string.c_str()), -1); - if (!bio) { - RTC_LOG(LS_ERROR) << "Failed to create a new BIO buffer."; - return nullptr; - } - BIO_set_mem_eof_return(bio, 0); - EVP_PKEY* pkey = - PEM_read_bio_PrivateKey(bio, nullptr, nullptr, const_cast("\0")); - BIO_free(bio); // Frees the BIO, but not the pointed-to string. - if (!pkey) { - RTC_LOG(LS_ERROR) << "Failed to create the private key from PEM string."; - return nullptr; - } - if (EVP_PKEY_missing_parameters(pkey) != 0) { - RTC_LOG(LS_ERROR) - << "The resulting key pair is missing public key parameters."; - EVP_PKEY_free(pkey); - return nullptr; - } - return new OpenSSLKeyPair(pkey); -} - -OpenSSLKeyPair::~OpenSSLKeyPair() { - EVP_PKEY_free(pkey_); -} - -OpenSSLKeyPair* OpenSSLKeyPair::GetReference() { - AddReference(); - return new OpenSSLKeyPair(pkey_); -} - -void OpenSSLKeyPair::AddReference() { - EVP_PKEY_up_ref(pkey_); -} - -std::string OpenSSLKeyPair::PrivateKeyToPEMString() const { - BIO* temp_memory_bio = BIO_new(BIO_s_mem()); - if (!temp_memory_bio) { - RTC_LOG_F(LS_ERROR) << "Failed to allocate temporary memory bio"; - RTC_NOTREACHED(); - return ""; - } - if (!PEM_write_bio_PrivateKey(temp_memory_bio, pkey_, nullptr, nullptr, 0, - nullptr, nullptr)) { - RTC_LOG_F(LS_ERROR) << "Failed to write private key"; - BIO_free(temp_memory_bio); - RTC_NOTREACHED(); - return ""; - } - BIO_write(temp_memory_bio, "\0", 1); - char* buffer; - BIO_get_mem_data(temp_memory_bio, &buffer); - std::string priv_key_str = buffer; - BIO_free(temp_memory_bio); - return priv_key_str; -} - -std::string OpenSSLKeyPair::PublicKeyToPEMString() const { - BIO* temp_memory_bio = BIO_new(BIO_s_mem()); - if (!temp_memory_bio) { - RTC_LOG_F(LS_ERROR) << "Failed to allocate temporary memory bio"; - RTC_NOTREACHED(); - return ""; - } - if (!PEM_write_bio_PUBKEY(temp_memory_bio, pkey_)) { - RTC_LOG_F(LS_ERROR) << "Failed to write public key"; - BIO_free(temp_memory_bio); - RTC_NOTREACHED(); - return ""; - } - BIO_write(temp_memory_bio, "\0", 1); - char* buffer; - BIO_get_mem_data(temp_memory_bio, &buffer); - std::string pub_key_str = buffer; - BIO_free(temp_memory_bio); - return pub_key_str; -} - -bool OpenSSLKeyPair::operator==(const OpenSSLKeyPair& other) const { - return EVP_PKEY_cmp(this->pkey_, other.pkey_) == 1; -} - -bool OpenSSLKeyPair::operator!=(const OpenSSLKeyPair& other) const { - return !(*this == other); -} - OpenSSLIdentity::OpenSSLIdentity( std::unique_ptr key_pair, std::unique_ptr certificate) @@ -211,8 +55,7 @@ OpenSSLIdentity::~OpenSSLIdentity() = default; std::unique_ptr OpenSSLIdentity::CreateInternal( const SSLIdentityParams& params) { - std::unique_ptr key_pair( - OpenSSLKeyPair::Generate(params.key_params)); + auto key_pair = OpenSSLKeyPair::Generate(params.key_params); if (key_pair) { std::unique_ptr certificate( OpenSSLCertificate::Generate(key_pair.get(), params)); @@ -221,7 +64,7 @@ std::unique_ptr OpenSSLIdentity::CreateInternal( new OpenSSLIdentity(std::move(key_pair), std::move(certificate))); } } - RTC_LOG(LS_INFO) << "Identity generation failed"; + RTC_LOG(LS_ERROR) << "Identity generation failed"; return nullptr; } @@ -256,8 +99,7 @@ std::unique_ptr OpenSSLIdentity::CreateFromPEMStrings( return nullptr; } - std::unique_ptr key_pair( - OpenSSLKeyPair::FromPrivateKeyPEMString(private_key)); + auto key_pair = OpenSSLKeyPair::FromPrivateKeyPEMString(private_key); if (!key_pair) { RTC_LOG(LS_ERROR) << "Failed to create key pair from PEM string."; return nullptr; @@ -298,8 +140,7 @@ std::unique_ptr OpenSSLIdentity::CreateFromPEMChainStrings( return nullptr; } - std::unique_ptr key_pair( - OpenSSLKeyPair::FromPrivateKeyPEMString(private_key)); + auto key_pair = OpenSSLKeyPair::FromPrivateKeyPEMString(private_key); if (!key_pair) { RTC_LOG(LS_ERROR) << "Failed to create key pair from PEM string."; return nullptr; @@ -320,8 +161,8 @@ const SSLCertChain& OpenSSLIdentity::cert_chain() const { std::unique_ptr OpenSSLIdentity::CloneInternal() const { // We cannot use std::make_unique here because the referenced OpenSSLIdentity // constructor is private. - return absl::WrapUnique(new OpenSSLIdentity( - absl::WrapUnique(key_pair_->GetReference()), cert_chain_->Clone())); + return absl::WrapUnique( + new OpenSSLIdentity(key_pair_->Clone(), cert_chain_->Clone())); } bool OpenSSLIdentity::ConfigureIdentity(SSL_CTX* ctx) { diff --git a/rtc_base/openssl_identity.h b/rtc_base/openssl_identity.h index a2ac87cf45..00d6c74922 100644 --- a/rtc_base/openssl_identity.h +++ b/rtc_base/openssl_identity.h @@ -17,45 +17,14 @@ #include #include -#include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/openssl_certificate.h" +#include "rtc_base/openssl_key_pair.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/ssl_identity.h" namespace rtc { -// OpenSSLKeyPair encapsulates an OpenSSL EVP_PKEY* keypair object, -// which is reference counted inside the OpenSSL library. -class OpenSSLKeyPair final { - public: - explicit OpenSSLKeyPair(EVP_PKEY* pkey) : pkey_(pkey) { - RTC_DCHECK(pkey_ != nullptr); - } - - static OpenSSLKeyPair* Generate(const KeyParams& key_params); - // Constructs a key pair from the private key PEM string. This must not result - // in missing public key parameters. Returns null on error. - static OpenSSLKeyPair* FromPrivateKeyPEMString(const std::string& pem_string); - - virtual ~OpenSSLKeyPair(); - - virtual OpenSSLKeyPair* GetReference(); - - EVP_PKEY* pkey() const { return pkey_; } - std::string PrivateKeyToPEMString() const; - std::string PublicKeyToPEMString() const; - bool operator==(const OpenSSLKeyPair& other) const; - bool operator!=(const OpenSSLKeyPair& other) const; - - private: - void AddReference(); - - EVP_PKEY* pkey_; - - RTC_DISALLOW_COPY_AND_ASSIGN(OpenSSLKeyPair); -}; - // Holds a keypair and certificate together, and a method to generate // them consistently. class OpenSSLIdentity final : public SSLIdentity { diff --git a/rtc_base/openssl_key_pair.cc b/rtc_base/openssl_key_pair.cc new file mode 100644 index 0000000000..911a751cbe --- /dev/null +++ b/rtc_base/openssl_key_pair.cc @@ -0,0 +1,192 @@ +/* + * Copyright 2004 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/openssl_key_pair.h" + +#include +#include + +#if defined(WEBRTC_WIN) +// Must be included first before openssl headers. +#include "rtc_base/win32.h" // NOLINT +#endif // WEBRTC_WIN + +#include +#include +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/openssl.h" +#include "rtc_base/openssl_utility.h" + +namespace rtc { + +// We could have exposed a myriad of parameters for the crypto stuff, +// but keeping it simple seems best. + +// Generate a key pair. Caller is responsible for freeing the returned object. +static EVP_PKEY* MakeKey(const KeyParams& key_params) { + RTC_LOG(LS_INFO) << "Making key pair"; + EVP_PKEY* pkey = EVP_PKEY_new(); + if (key_params.type() == KT_RSA) { + int key_length = key_params.rsa_params().mod_size; + BIGNUM* exponent = BN_new(); + RSA* rsa = RSA_new(); + if (!pkey || !exponent || !rsa || + !BN_set_word(exponent, key_params.rsa_params().pub_exp) || + !RSA_generate_key_ex(rsa, key_length, exponent, nullptr) || + !EVP_PKEY_assign_RSA(pkey, rsa)) { + EVP_PKEY_free(pkey); + BN_free(exponent); + RSA_free(rsa); + RTC_LOG(LS_ERROR) << "Failed to make RSA key pair"; + return nullptr; + } + // ownership of rsa struct was assigned, don't free it. + BN_free(exponent); + } else if (key_params.type() == KT_ECDSA) { + if (key_params.ec_curve() == EC_NIST_P256) { + EC_KEY* ec_key = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + if (!ec_key) { + EVP_PKEY_free(pkey); + RTC_LOG(LS_ERROR) << "Failed to allocate EC key"; + return nullptr; + } + + // Ensure curve name is included when EC key is serialized. + // Without this call, OpenSSL versions before 1.1.0 will create + // certificates that don't work for TLS. + // This is a no-op for BoringSSL and OpenSSL 1.1.0+ + EC_KEY_set_asn1_flag(ec_key, OPENSSL_EC_NAMED_CURVE); + + if (!pkey || !ec_key || !EC_KEY_generate_key(ec_key) || + !EVP_PKEY_assign_EC_KEY(pkey, ec_key)) { + EVP_PKEY_free(pkey); + EC_KEY_free(ec_key); + RTC_LOG(LS_ERROR) << "Failed to make EC key pair"; + return nullptr; + } + // ownership of ec_key struct was assigned, don't free it. + } else { + // Add generation of any other curves here. + EVP_PKEY_free(pkey); + RTC_LOG(LS_ERROR) << "ECDSA key requested for unknown curve"; + return nullptr; + } + } else { + EVP_PKEY_free(pkey); + RTC_LOG(LS_ERROR) << "Key type requested not understood"; + return nullptr; + } + + RTC_LOG(LS_INFO) << "Returning key pair"; + return pkey; +} + +std::unique_ptr OpenSSLKeyPair::Generate( + const KeyParams& key_params) { + EVP_PKEY* pkey = MakeKey(key_params); + if (!pkey) { + openssl::LogSSLErrors("Generating key pair"); + return nullptr; + } + return std::make_unique(pkey); +} + +std::unique_ptr OpenSSLKeyPair::FromPrivateKeyPEMString( + const std::string& pem_string) { + BIO* bio = + BIO_new_mem_buf(const_cast(pem_string.data()), pem_string.size()); + if (!bio) { + RTC_LOG(LS_ERROR) << "Failed to create a new BIO buffer."; + return nullptr; + } + BIO_set_mem_eof_return(bio, 0); + EVP_PKEY* pkey = PEM_read_bio_PrivateKey(bio, nullptr, nullptr, nullptr); + BIO_free(bio); // Frees the BIO, but not the pointed-to string. + if (!pkey) { + RTC_LOG(LS_ERROR) << "Failed to create the private key from PEM string."; + return nullptr; + } + if (EVP_PKEY_missing_parameters(pkey) != 0) { + RTC_LOG(LS_ERROR) + << "The resulting key pair is missing public key parameters."; + EVP_PKEY_free(pkey); + return nullptr; + } + return std::make_unique(pkey); +} + +OpenSSLKeyPair::~OpenSSLKeyPair() { + EVP_PKEY_free(pkey_); +} + +std::unique_ptr OpenSSLKeyPair::Clone() { + AddReference(); + return std::make_unique(pkey_); +} + +void OpenSSLKeyPair::AddReference() { + EVP_PKEY_up_ref(pkey_); +} + +std::string OpenSSLKeyPair::PrivateKeyToPEMString() const { + BIO* temp_memory_bio = BIO_new(BIO_s_mem()); + if (!temp_memory_bio) { + RTC_LOG_F(LS_ERROR) << "Failed to allocate temporary memory bio"; + RTC_NOTREACHED(); + return ""; + } + if (!PEM_write_bio_PrivateKey(temp_memory_bio, pkey_, nullptr, nullptr, 0, + nullptr, nullptr)) { + RTC_LOG_F(LS_ERROR) << "Failed to write private key"; + BIO_free(temp_memory_bio); + RTC_NOTREACHED(); + return ""; + } + char* buffer; + size_t len = BIO_get_mem_data(temp_memory_bio, &buffer); + std::string priv_key_str(buffer, len); + BIO_free(temp_memory_bio); + return priv_key_str; +} + +std::string OpenSSLKeyPair::PublicKeyToPEMString() const { + BIO* temp_memory_bio = BIO_new(BIO_s_mem()); + if (!temp_memory_bio) { + RTC_LOG_F(LS_ERROR) << "Failed to allocate temporary memory bio"; + RTC_NOTREACHED(); + return ""; + } + if (!PEM_write_bio_PUBKEY(temp_memory_bio, pkey_)) { + RTC_LOG_F(LS_ERROR) << "Failed to write public key"; + BIO_free(temp_memory_bio); + RTC_NOTREACHED(); + return ""; + } + BIO_write(temp_memory_bio, "\0", 1); + char* buffer; + BIO_get_mem_data(temp_memory_bio, &buffer); + std::string pub_key_str = buffer; + BIO_free(temp_memory_bio); + return pub_key_str; +} + +bool OpenSSLKeyPair::operator==(const OpenSSLKeyPair& other) const { + return EVP_PKEY_cmp(this->pkey_, other.pkey_) == 1; +} + +bool OpenSSLKeyPair::operator!=(const OpenSSLKeyPair& other) const { + return !(*this == other); +} + +} // namespace rtc diff --git a/rtc_base/openssl_key_pair.h b/rtc_base/openssl_key_pair.h new file mode 100644 index 0000000000..a84c43b6bd --- /dev/null +++ b/rtc_base/openssl_key_pair.h @@ -0,0 +1,60 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_OPENSSL_KEY_PAIR_H_ +#define RTC_BASE_OPENSSL_KEY_PAIR_H_ + +#include + +#include +#include + +#include "rtc_base/checks.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/ssl_identity.h" + +namespace rtc { + +// OpenSSLKeyPair encapsulates an OpenSSL EVP_PKEY* keypair object, +// which is reference counted inside the OpenSSL library. +class OpenSSLKeyPair final { + public: + // Takes ownership of the key. + explicit OpenSSLKeyPair(EVP_PKEY* pkey) : pkey_(pkey) { + RTC_DCHECK(pkey_ != nullptr); + } + + static std::unique_ptr Generate(const KeyParams& key_params); + // Constructs a key pair from the private key PEM string. This must not result + // in missing public key parameters. Returns null on error. + static std::unique_ptr FromPrivateKeyPEMString( + const std::string& pem_string); + + ~OpenSSLKeyPair(); + + std::unique_ptr Clone(); + + EVP_PKEY* pkey() const { return pkey_; } + std::string PrivateKeyToPEMString() const; + std::string PublicKeyToPEMString() const; + bool operator==(const OpenSSLKeyPair& other) const; + bool operator!=(const OpenSSLKeyPair& other) const; + + private: + void AddReference(); + + EVP_PKEY* pkey_; + + RTC_DISALLOW_COPY_AND_ASSIGN(OpenSSLKeyPair); +}; + +} // namespace rtc + +#endif // RTC_BASE_OPENSSL_KEY_PAIR_H_ diff --git a/rtc_base/openssl_session_cache_unittest.cc b/rtc_base/openssl_session_cache_unittest.cc index 1d3084bbc5..0441d5c012 100644 --- a/rtc_base/openssl_session_cache_unittest.cc +++ b/rtc_base/openssl_session_cache_unittest.cc @@ -19,10 +19,28 @@ #include "rtc_base/gunit.h" #include "rtc_base/openssl.h" +namespace { +// Use methods that avoid X509 objects if possible. +SSL_CTX* NewDtlsContext() { +#ifdef OPENSSL_IS_BORINGSSL + return SSL_CTX_new(DTLS_with_buffers_method()); +#else + return SSL_CTX_new(DTLS_method()); +#endif +} +SSL_CTX* NewTlsContext() { +#ifdef OPENSSL_IS_BORINGSSL + return SSL_CTX_new(TLS_with_buffers_method()); +#else + return SSL_CTX_new(TLS_method()); +#endif +} +} // namespace + namespace rtc { TEST(OpenSSLSessionCache, DTLSModeSetCorrectly) { - SSL_CTX* ssl_ctx = SSL_CTX_new(DTLSv1_2_client_method()); + SSL_CTX* ssl_ctx = NewDtlsContext(); OpenSSLSessionCache session_cache(SSL_MODE_DTLS, ssl_ctx); EXPECT_EQ(session_cache.GetSSLMode(), SSL_MODE_DTLS); @@ -31,7 +49,7 @@ TEST(OpenSSLSessionCache, DTLSModeSetCorrectly) { } TEST(OpenSSLSessionCache, TLSModeSetCorrectly) { - SSL_CTX* ssl_ctx = SSL_CTX_new(TLSv1_2_client_method()); + SSL_CTX* ssl_ctx = NewTlsContext(); OpenSSLSessionCache session_cache(SSL_MODE_TLS, ssl_ctx); EXPECT_EQ(session_cache.GetSSLMode(), SSL_MODE_TLS); @@ -40,7 +58,7 @@ TEST(OpenSSLSessionCache, TLSModeSetCorrectly) { } TEST(OpenSSLSessionCache, SSLContextSetCorrectly) { - SSL_CTX* ssl_ctx = SSL_CTX_new(DTLSv1_2_client_method()); + SSL_CTX* ssl_ctx = NewDtlsContext(); OpenSSLSessionCache session_cache(SSL_MODE_DTLS, ssl_ctx); EXPECT_EQ(session_cache.GetSSLContext(), ssl_ctx); @@ -49,7 +67,7 @@ TEST(OpenSSLSessionCache, SSLContextSetCorrectly) { } TEST(OpenSSLSessionCache, InvalidLookupReturnsNullptr) { - SSL_CTX* ssl_ctx = SSL_CTX_new(DTLSv1_2_client_method()); + SSL_CTX* ssl_ctx = NewDtlsContext(); OpenSSLSessionCache session_cache(SSL_MODE_DTLS, ssl_ctx); EXPECT_EQ(session_cache.LookupSession("Invalid"), nullptr); @@ -60,7 +78,7 @@ TEST(OpenSSLSessionCache, InvalidLookupReturnsNullptr) { } TEST(OpenSSLSessionCache, SimpleValidSessionLookup) { - SSL_CTX* ssl_ctx = SSL_CTX_new(DTLSv1_2_client_method()); + SSL_CTX* ssl_ctx = NewDtlsContext(); SSL_SESSION* ssl_session = SSL_SESSION_new(ssl_ctx); OpenSSLSessionCache session_cache(SSL_MODE_DTLS, ssl_ctx); @@ -71,7 +89,7 @@ TEST(OpenSSLSessionCache, SimpleValidSessionLookup) { } TEST(OpenSSLSessionCache, AddToExistingReplacesPrevious) { - SSL_CTX* ssl_ctx = SSL_CTX_new(DTLSv1_2_client_method()); + SSL_CTX* ssl_ctx = NewDtlsContext(); SSL_SESSION* ssl_session_1 = SSL_SESSION_new(ssl_ctx); SSL_SESSION* ssl_session_2 = SSL_SESSION_new(ssl_ctx); diff --git a/rtc_base/openssl_stream_adapter.cc b/rtc_base/openssl_stream_adapter.cc index 7f4b79a53a..aa0bc3d40c 100644 --- a/rtc_base/openssl_stream_adapter.cc +++ b/rtc_base/openssl_stream_adapter.cc @@ -21,6 +21,7 @@ #include #endif +#include #include #include #include @@ -31,9 +32,15 @@ #include "rtc_base/openssl.h" #include "rtc_base/openssl_adapter.h" #include "rtc_base/openssl_digest.h" +#ifdef OPENSSL_IS_BORINGSSL +#include "rtc_base/boringssl_identity.h" +#else #include "rtc_base/openssl_identity.h" +#endif +#include "rtc_base/openssl_utility.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/stream.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" #include "rtc_base/time_utils.h" #include "system_wrappers/include/field_trial.h" @@ -50,7 +57,6 @@ namespace rtc { namespace { - // SRTP cipher suite table. |internal_name| is used to construct a // colon-separated profile strings which is needed by // SSL_CTX_set_tlsext_use_srtp(). @@ -265,9 +271,25 @@ static long stream_ctrl(BIO* b, int cmd, long num, void* ptr) { // OpenSSLStreamAdapter ///////////////////////////////////////////////////////////////////////////// +static std::atomic g_use_legacy_tls_protocols_override(false); +static std::atomic g_allow_legacy_tls_protocols(false); + +void SetAllowLegacyTLSProtocols(const absl::optional& allow) { + g_use_legacy_tls_protocols_override.store(allow.has_value()); + if (allow.has_value()) + g_allow_legacy_tls_protocols.store(allow.value()); +} + +bool ShouldAllowLegacyTLSProtocols() { + return g_use_legacy_tls_protocols_override.load() + ? g_allow_legacy_tls_protocols.load() + : webrtc::field_trial::IsEnabled("WebRTC-LegacyTlsProtocols"); +} + OpenSSLStreamAdapter::OpenSSLStreamAdapter( std::unique_ptr stream) - : SSLStreamAdapter(std::move(stream)), + : stream_(std::move(stream)), + owner_(rtc::Thread::Current()), state_(SSL_NONE), role_(SSL_CLIENT), ssl_read_needs_write_(false), @@ -278,19 +300,25 @@ OpenSSLStreamAdapter::OpenSSLStreamAdapter( ssl_max_version_(SSL_PROTOCOL_TLS_12), // Default is to support legacy TLS protocols. // This will be changed to default non-support in M82 or M83. - support_legacy_tls_protocols_flag_( - !webrtc::field_trial::IsDisabled("WebRTC-LegacyTlsProtocols")) {} + support_legacy_tls_protocols_flag_(ShouldAllowLegacyTLSProtocols()) { + stream_->SignalEvent.connect(this, &OpenSSLStreamAdapter::OnEvent); +} OpenSSLStreamAdapter::~OpenSSLStreamAdapter() { + timeout_task_.Stop(); Cleanup(0); } void OpenSSLStreamAdapter::SetIdentity(std::unique_ptr identity) { RTC_DCHECK(!identity_); +#ifdef OPENSSL_IS_BORINGSSL + identity_.reset(static_cast(identity.release())); +#else identity_.reset(static_cast(identity.release())); +#endif } -OpenSSLIdentity* OpenSSLStreamAdapter::GetIdentityForTesting() const { +SSLIdentity* OpenSSLStreamAdapter::GetIdentityForTesting() const { return identity_.get(); } @@ -493,7 +521,7 @@ int OpenSSLStreamAdapter::StartSSL() { return -1; } - if (StreamAdapterInterface::GetState() != SS_OPEN) { + if (stream_->GetState() != SS_OPEN) { state_ = SSL_WAIT; return 0; } @@ -530,12 +558,12 @@ StreamResult OpenSSLStreamAdapter::Write(const void* data, size_t data_len, size_t* written, int* error) { - RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::Write(" << data_len << ")"; + RTC_DLOG(LS_VERBOSE) << "OpenSSLStreamAdapter::Write(" << data_len << ")"; switch (state_) { case SSL_NONE: // pass-through in clear text - return StreamAdapterInterface::Write(data, data_len, written, error); + return stream_->Write(data, data_len, written, error); case SSL_WAIT: case SSL_CONNECTING: @@ -570,18 +598,18 @@ StreamResult OpenSSLStreamAdapter::Write(const void* data, int ssl_error = SSL_get_error(ssl_, code); switch (ssl_error) { case SSL_ERROR_NONE: - RTC_LOG(LS_VERBOSE) << " -- success"; + RTC_DLOG(LS_VERBOSE) << " -- success"; RTC_DCHECK_GT(code, 0); RTC_DCHECK_LE(code, data_len); if (written) *written = code; return SR_SUCCESS; case SSL_ERROR_WANT_READ: - RTC_LOG(LS_VERBOSE) << " -- error want read"; + RTC_DLOG(LS_VERBOSE) << " -- error want read"; ssl_write_needs_read_ = true; return SR_BLOCK; case SSL_ERROR_WANT_WRITE: - RTC_LOG(LS_VERBOSE) << " -- error want write"; + RTC_DLOG(LS_VERBOSE) << " -- error want write"; return SR_BLOCK; case SSL_ERROR_ZERO_RETURN: @@ -599,11 +627,11 @@ StreamResult OpenSSLStreamAdapter::Read(void* data, size_t data_len, size_t* read, int* error) { - RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::Read(" << data_len << ")"; + RTC_DLOG(LS_VERBOSE) << "OpenSSLStreamAdapter::Read(" << data_len << ")"; switch (state_) { case SSL_NONE: // pass-through in clear text - return StreamAdapterInterface::Read(data, data_len, read, error); + return stream_->Read(data, data_len, read, error); case SSL_WAIT: case SSL_CONNECTING: return SR_BLOCK; @@ -637,7 +665,7 @@ StreamResult OpenSSLStreamAdapter::Read(void* data, switch (ssl_error) { case SSL_ERROR_NONE: - RTC_LOG(LS_VERBOSE) << " -- success"; + RTC_DLOG(LS_VERBOSE) << " -- success"; RTC_DCHECK_GT(code, 0); RTC_DCHECK_LE(code, data_len); if (read) { @@ -649,7 +677,7 @@ StreamResult OpenSSLStreamAdapter::Read(void* data, unsigned int pending = SSL_pending(ssl_); if (pending) { - RTC_LOG(LS_INFO) << " -- short DTLS read. flushing"; + RTC_DLOG(LS_INFO) << " -- short DTLS read. flushing"; FlushInput(pending); if (error) { *error = SSE_MSG_TRUNC; @@ -659,14 +687,14 @@ StreamResult OpenSSLStreamAdapter::Read(void* data, } return SR_SUCCESS; case SSL_ERROR_WANT_READ: - RTC_LOG(LS_VERBOSE) << " -- error want read"; + RTC_DLOG(LS_VERBOSE) << " -- error want read"; return SR_BLOCK; case SSL_ERROR_WANT_WRITE: - RTC_LOG(LS_VERBOSE) << " -- error want write"; + RTC_DLOG(LS_VERBOSE) << " -- error want write"; ssl_read_needs_write_ = true; return SR_BLOCK; case SSL_ERROR_ZERO_RETURN: - RTC_LOG(LS_VERBOSE) << " -- remote side closed"; + RTC_DLOG(LS_VERBOSE) << " -- remote side closed"; Close(); return SR_EOS; default: @@ -696,7 +724,7 @@ void OpenSSLStreamAdapter::FlushInput(unsigned int left) { return; } - RTC_LOG(LS_VERBOSE) << " -- flushed " << code << " bytes"; + RTC_DLOG(LS_VERBOSE) << " -- flushed " << code << " bytes"; left -= code; } } @@ -707,7 +735,7 @@ void OpenSSLStreamAdapter::Close() { // When we're closed at SSL layer, also close the stream level which // performs necessary clean up. Otherwise, a new incoming packet after // this could overflow the stream buffer. - StreamAdapterInterface::Close(); + stream_->Close(); } StreamState OpenSSLStreamAdapter::GetState() const { @@ -731,10 +759,10 @@ void OpenSSLStreamAdapter::OnEvent(StreamInterface* stream, int err) { int events_to_signal = 0; int signal_error = 0; - RTC_DCHECK(stream == this->stream()); + RTC_DCHECK(stream == stream_.get()); if ((events & SE_OPEN)) { - RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent SE_OPEN"; + RTC_DLOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent SE_OPEN"; if (state_ != SSL_WAIT) { RTC_DCHECK(state_ == SSL_NONE); events_to_signal |= SE_OPEN; @@ -748,9 +776,9 @@ void OpenSSLStreamAdapter::OnEvent(StreamInterface* stream, } if ((events & (SE_READ | SE_WRITE))) { - RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent" - << ((events & SE_READ) ? " SE_READ" : "") - << ((events & SE_WRITE) ? " SE_WRITE" : ""); + RTC_DLOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent" + << ((events & SE_READ) ? " SE_READ" : "") + << ((events & SE_WRITE) ? " SE_WRITE" : ""); if (state_ == SSL_NONE) { events_to_signal |= events & (SE_READ | SE_WRITE); } else if (state_ == SSL_CONNECTING) { @@ -761,20 +789,20 @@ void OpenSSLStreamAdapter::OnEvent(StreamInterface* stream, } else if (state_ == SSL_CONNECTED) { if (((events & SE_READ) && ssl_write_needs_read_) || (events & SE_WRITE)) { - RTC_LOG(LS_VERBOSE) << " -- onStreamWriteable"; + RTC_DLOG(LS_VERBOSE) << " -- onStreamWriteable"; events_to_signal |= SE_WRITE; } if (((events & SE_WRITE) && ssl_read_needs_write_) || (events & SE_READ)) { - RTC_LOG(LS_VERBOSE) << " -- onStreamReadable"; + RTC_DLOG(LS_VERBOSE) << " -- onStreamReadable"; events_to_signal |= SE_READ; } } } if ((events & SE_CLOSE)) { - RTC_LOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent(SE_CLOSE, " << err - << ")"; + RTC_DLOG(LS_VERBOSE) << "OpenSSLStreamAdapter::OnEvent(SE_CLOSE, " << err + << ")"; Cleanup(0); events_to_signal |= SE_CLOSE; // SE_CLOSE is the only event that uses the final parameter to OnEvent(). @@ -783,14 +811,48 @@ void OpenSSLStreamAdapter::OnEvent(StreamInterface* stream, } if (events_to_signal) { - StreamAdapterInterface::OnEvent(stream, events_to_signal, signal_error); + // Note that the adapter presents itself as the origin of the stream events, + // since users of the adapter may not recognize the adapted object. + SignalEvent(this, events_to_signal, signal_error); } } +void OpenSSLStreamAdapter::PostEvent(int events, int err) { + owner_->PostTask(webrtc::ToQueuedTask( + task_safety_, [this, events, err]() { SignalEvent(this, events, err); })); +} + +void OpenSSLStreamAdapter::SetTimeout(int delay_ms) { + // We need to accept 0 delay here as well as >0 delay, because + // DTLSv1_get_timeout seems to frequently return 0 ms. + RTC_DCHECK_GE(delay_ms, 0); + RTC_DCHECK(!timeout_task_.Running()); + + timeout_task_ = webrtc::RepeatingTaskHandle::DelayedStart( + owner_, webrtc::TimeDelta::Millis(delay_ms), + [flag = task_safety_.flag(), this]() { + if (flag->alive()) { + RTC_DLOG(LS_INFO) << "DTLS timeout expired"; + timeout_task_.Stop(); + int res = DTLSv1_handle_timeout(ssl_); + if (res > 0) { + RTC_LOG(LS_INFO) << "DTLS retransmission"; + } else if (res < 0) { + RTC_LOG(LS_INFO) << "DTLSv1_handle_timeout() return -1"; + } + ContinueSSL(); + } else { + RTC_NOTREACHED(); + } + // This callback will never run again (stopped above). + return webrtc::TimeDelta::PlusInfinity(); + }); +} + int OpenSSLStreamAdapter::BeginSSL() { RTC_DCHECK(state_ == SSL_CONNECTING); // The underlying stream has opened. - RTC_LOG(LS_INFO) << "BeginSSL with peer."; + RTC_DLOG(LS_INFO) << "BeginSSL with peer."; BIO* bio = nullptr; @@ -801,7 +863,7 @@ int OpenSSLStreamAdapter::BeginSSL() { return -1; } - bio = BIO_new_stream(static_cast(stream())); + bio = BIO_new_stream(stream_.get()); if (!bio) { return -1; } @@ -833,18 +895,18 @@ int OpenSSLStreamAdapter::BeginSSL() { } int OpenSSLStreamAdapter::ContinueSSL() { - RTC_LOG(LS_VERBOSE) << "ContinueSSL"; + RTC_DLOG(LS_VERBOSE) << "ContinueSSL"; RTC_DCHECK(state_ == SSL_CONNECTING); // Clear the DTLS timer - Thread::Current()->Clear(this, MSG_TIMEOUT); + timeout_task_.Stop(); const int code = (role_ == SSL_CLIENT) ? SSL_connect(ssl_) : SSL_accept(ssl_); const int ssl_error = SSL_get_error(ssl_, code); switch (ssl_error) { case SSL_ERROR_NONE: - RTC_LOG(LS_VERBOSE) << " -- success"; + RTC_DLOG(LS_VERBOSE) << " -- success"; // By this point, OpenSSL should have given us a certificate, or errored // out if one was missing. RTC_DCHECK(peer_cert_chain_ || !GetClientAuthEnabled()); @@ -859,34 +921,32 @@ int OpenSSLStreamAdapter::ContinueSSL() { // The caller of ContinueSSL may be the same object listening for these // events and may not be prepared for reentrancy. // PostEvent(SE_OPEN | SE_READ | SE_WRITE, 0); - StreamAdapterInterface::OnEvent(stream(), SE_OPEN | SE_READ | SE_WRITE, - 0); + SignalEvent(this, SE_OPEN | SE_READ | SE_WRITE, 0); } break; case SSL_ERROR_WANT_READ: { - RTC_LOG(LS_VERBOSE) << " -- error want read"; + RTC_DLOG(LS_VERBOSE) << " -- error want read"; struct timeval timeout; if (DTLSv1_get_timeout(ssl_, &timeout)) { int delay = timeout.tv_sec * 1000 + timeout.tv_usec / 1000; - - Thread::Current()->PostDelayed(RTC_FROM_HERE, delay, this, MSG_TIMEOUT, - 0); + SetTimeout(delay); } } break; case SSL_ERROR_WANT_WRITE: - RTC_LOG(LS_VERBOSE) << " -- error want write"; + RTC_DLOG(LS_VERBOSE) << " -- error want write"; break; case SSL_ERROR_ZERO_RETURN: default: - RTC_LOG(LS_VERBOSE) << " -- error " << code; SSLHandshakeError ssl_handshake_err = SSLHandshakeError::UNKNOWN; int err_code = ERR_peek_last_error(); if (err_code != 0 && ERR_GET_REASON(err_code) == SSL_R_NO_SHARED_CIPHER) { ssl_handshake_err = SSLHandshakeError::INCOMPATIBLE_CIPHERSUITE; } + RTC_DLOG(LS_VERBOSE) << " -- error " << code << ", " << err_code << ", " + << ERR_GET_REASON(err_code); SignalSSLHandshakeError(ssl_handshake_err); return (ssl_error != 0) ? ssl_error : -1; } @@ -904,12 +964,12 @@ void OpenSSLStreamAdapter::Error(const char* context, ssl_error_code_ = err; Cleanup(alert); if (signal) { - StreamAdapterInterface::OnEvent(stream(), SE_CLOSE, err); + SignalEvent(this, SE_CLOSE, err); } } void OpenSSLStreamAdapter::Cleanup(uint8_t alert) { - RTC_LOG(LS_INFO) << "Cleanup"; + RTC_DLOG(LS_INFO) << "Cleanup"; if (state_ != SSL_ERROR) { state_ = SSL_CLOSED; @@ -947,23 +1007,20 @@ void OpenSSLStreamAdapter::Cleanup(uint8_t alert) { peer_cert_chain_.reset(); // Clear the DTLS timer - Thread::Current()->Clear(this, MSG_TIMEOUT); -} - -void OpenSSLStreamAdapter::OnMessage(Message* msg) { - // Process our own messages and then pass others to the superclass - if (MSG_TIMEOUT == msg->message_id) { - RTC_LOG(LS_INFO) << "DTLS timeout expired"; - DTLSv1_handle_timeout(ssl_); - ContinueSSL(); - } else { - StreamInterface::OnMessage(msg); - } + timeout_task_.Stop(); } SSL_CTX* OpenSSLStreamAdapter::SetupSSLContext() { +#ifdef OPENSSL_IS_BORINGSSL + // If X509 objects aren't used, we can use these methods to avoid + // linking the sizable crypto/x509 code, using CRYPTO_BUFFER instead. + SSL_CTX* ctx = + SSL_CTX_new(ssl_mode_ == SSL_MODE_DTLS ? DTLS_with_buffers_method() + : TLS_with_buffers_method()); +#else SSL_CTX* ctx = SSL_CTX_new(ssl_mode_ == SSL_MODE_DTLS ? DTLS_method() : TLS_method()); +#endif if (ctx == nullptr) { return nullptr; } @@ -1001,6 +1058,7 @@ SSL_CTX* OpenSSLStreamAdapter::SetupSSLContext() { if (g_use_time_callback_for_testing) { SSL_CTX_set_current_time_cb(ctx, &TimeCallbackForTesting); } + SSL_CTX_set0_buffer_pool(ctx, openssl::GetBufferPool()); #endif if (identity_ && !identity_->ConfigureIdentity(ctx)) { @@ -1021,11 +1079,16 @@ SSL_CTX* OpenSSLStreamAdapter::SetupSSLContext() { } // Configure a custom certificate verification callback to check the peer - // certificate digest. Note the second argument to SSL_CTX_set_verify is to - // override individual errors in the default verification logic, which is not - // what we want here. + // certificate digest. +#ifdef OPENSSL_IS_BORINGSSL + // Use CRYPTO_BUFFER version of the callback if building with BoringSSL. + SSL_CTX_set_custom_verify(ctx, mode, SSLVerifyCallback); +#else + // Note the second argument to SSL_CTX_set_verify is to override individual + // errors in the default verification logic, which is not what we want here. SSL_CTX_set_verify(ctx, mode, nullptr); SSL_CTX_set_cert_verify_callback(ctx, SSLVerifyCallback, nullptr); +#endif // Select list of available ciphers. Note that !SHA256 and !SHA384 only // remove HMAC-SHA256 and HMAC-SHA384 cipher suites, not GCM cipher suites @@ -1050,14 +1113,12 @@ bool OpenSSLStreamAdapter::VerifyPeerCertificate() { RTC_LOG(LS_WARNING) << "Missing digest or peer certificate."; return false; } - const OpenSSLCertificate* leaf_cert = - static_cast(&peer_cert_chain_->Get(0)); unsigned char digest[EVP_MAX_MD_SIZE]; size_t digest_length; - if (!OpenSSLCertificate::ComputeDigest( - leaf_cert->x509(), peer_certificate_digest_algorithm_, digest, - sizeof(digest), &digest_length)) { + if (!peer_cert_chain_->Get(0).ComputeDigest( + peer_certificate_digest_algorithm_, digest, sizeof(digest), + &digest_length)) { RTC_LOG(LS_WARNING) << "Failed to compute peer cert digest."; return false; } @@ -1071,7 +1132,7 @@ bool OpenSSLStreamAdapter::VerifyPeerCertificate() { // Ignore any verification error if the digest matches, since there is no // value in checking the validity of a self-signed cert issued by untrusted // sources. - RTC_LOG(LS_INFO) << "Accepted peer certificate."; + RTC_DLOG(LS_INFO) << "Accepted peer certificate."; peer_certificate_verified_ = true; return true; } @@ -1081,6 +1142,36 @@ std::unique_ptr OpenSSLStreamAdapter::GetPeerSSLCertChain() return peer_cert_chain_ ? peer_cert_chain_->Clone() : nullptr; } +#ifdef OPENSSL_IS_BORINGSSL +enum ssl_verify_result_t OpenSSLStreamAdapter::SSLVerifyCallback( + SSL* ssl, + uint8_t* out_alert) { + // Get our OpenSSLStreamAdapter from the context. + OpenSSLStreamAdapter* stream = + reinterpret_cast(SSL_get_app_data(ssl)); + const STACK_OF(CRYPTO_BUFFER)* chain = SSL_get0_peer_certificates(ssl); + // Creates certificate chain. + std::vector> cert_chain; + for (CRYPTO_BUFFER* cert : chain) { + cert_chain.emplace_back(new BoringSSLCertificate(bssl::UpRef(cert))); + } + stream->peer_cert_chain_.reset(new SSLCertChain(std::move(cert_chain))); + + // If the peer certificate digest isn't known yet, we'll wait to verify + // until it's known, and for now just return a success status. + if (stream->peer_certificate_digest_algorithm_.empty()) { + RTC_LOG(LS_INFO) << "Waiting to verify certificate until digest is known."; + // TODO(deadbeef): Use ssl_verify_retry? + return ssl_verify_ok; + } + + if (!stream->VerifyPeerCertificate()) { + return ssl_verify_invalid; + } + + return ssl_verify_ok; +} +#else // OPENSSL_IS_BORINGSSL int OpenSSLStreamAdapter::SSLVerifyCallback(X509_STORE_CTX* store, void* arg) { // Get our SSL structure and OpenSSLStreamAdapter from the store. SSL* ssl = reinterpret_cast( @@ -1088,25 +1179,15 @@ int OpenSSLStreamAdapter::SSLVerifyCallback(X509_STORE_CTX* store, void* arg) { OpenSSLStreamAdapter* stream = reinterpret_cast(SSL_get_app_data(ssl)); -#if defined(OPENSSL_IS_BORINGSSL) - STACK_OF(X509)* chain = SSL_get_peer_full_cert_chain(ssl); - // Creates certificate chain. - std::vector> cert_chain; - for (X509* cert : chain) { - cert_chain.emplace_back(new OpenSSLCertificate(cert)); - } - stream->peer_cert_chain_.reset(new SSLCertChain(std::move(cert_chain))); -#else // Record the peer's certificate. X509* cert = X509_STORE_CTX_get0_cert(store); stream->peer_cert_chain_.reset( new SSLCertChain(std::make_unique(cert))); -#endif // If the peer certificate digest isn't known yet, we'll wait to verify // until it's known, and for now just return a success status. if (stream->peer_certificate_digest_algorithm_.empty()) { - RTC_LOG(LS_INFO) << "Waiting to verify certificate until digest is known."; + RTC_DLOG(LS_INFO) << "Waiting to verify certificate until digest is known."; return 1; } @@ -1117,6 +1198,7 @@ int OpenSSLStreamAdapter::SSLVerifyCallback(X509_STORE_CTX* store, void* arg) { return 1; } +#endif // !OPENSSL_IS_BORINGSSL bool OpenSSLStreamAdapter::IsBoringSsl() { #ifdef OPENSSL_IS_BORINGSSL diff --git a/rtc_base/openssl_stream_adapter.h b/rtc_base/openssl_stream_adapter.h index 7ea324321b..58e15e3e6f 100644 --- a/rtc_base/openssl_stream_adapter.h +++ b/rtc_base/openssl_stream_adapter.h @@ -19,11 +19,19 @@ #include #include +#include "absl/types/optional.h" #include "rtc_base/buffer.h" +#ifdef OPENSSL_IS_BORINGSSL +#include "rtc_base/boringssl_identity.h" +#else #include "rtc_base/openssl_identity.h" +#endif #include "rtc_base/ssl_identity.h" #include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/stream.h" +#include "rtc_base/system/rtc_export.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/repeating_task.h" namespace rtc { @@ -55,13 +63,19 @@ class SSLCertChain; /////////////////////////////////////////////////////////////////////////////// +// If |allow| has a value, its value determines if legacy TLS protocols are +// allowed, overriding the default configuration. +// If |allow| has no value, any previous override is removed and the default +// configuration is restored. +RTC_EXPORT void SetAllowLegacyTLSProtocols(const absl::optional& allow); + class OpenSSLStreamAdapter final : public SSLStreamAdapter { public: explicit OpenSSLStreamAdapter(std::unique_ptr stream); ~OpenSSLStreamAdapter() override; void SetIdentity(std::unique_ptr identity) override; - OpenSSLIdentity* GetIdentityForTesting() const override; + SSLIdentity* GetIdentityForTesting() const override; // Default argument is for compatibility void SetServerRole(SSLRole role = SSL_SERVER) override; @@ -122,9 +136,6 @@ class OpenSSLStreamAdapter final : public SSLStreamAdapter { // using a fake clock. static void EnableTimeCallbackForTesting(); - protected: - void OnEvent(StreamInterface* stream, int events, int err) override; - private: enum SSLState { // Before calling one of the StartSSL methods, data flows @@ -137,7 +148,10 @@ class OpenSSLStreamAdapter final : public SSLStreamAdapter { SSL_CLOSED // Clean close }; - enum { MSG_TIMEOUT = MSG_MAX + 1 }; + void OnEvent(StreamInterface* stream, int events, int err); + + void PostEvent(int events, int err); + void SetTimeout(int delay_ms); // The following three methods return 0 on success and a negative // error code on failure. The error code may be from OpenSSL or -1 @@ -161,9 +175,6 @@ class OpenSSLStreamAdapter final : public SSLStreamAdapter { void Error(const char* context, int err, uint8_t alert, bool signal); void Cleanup(uint8_t alert); - // Override MessageHandler - void OnMessage(Message* msg) override; - // Flush the input buffers by reading left bytes (for DTLS) void FlushInput(unsigned int left); @@ -171,9 +182,16 @@ class OpenSSLStreamAdapter final : public SSLStreamAdapter { SSL_CTX* SetupSSLContext(); // Verify the peer certificate matches the signaled digest. bool VerifyPeerCertificate(); + +#ifdef OPENSSL_IS_BORINGSSL + // SSL certificate verification callback. See SSL_CTX_set_custom_verify. + static enum ssl_verify_result_t SSLVerifyCallback(SSL* ssl, + uint8_t* out_alert); +#else // SSL certificate verification callback. See // SSL_CTX_set_cert_verify_callback. static int SSLVerifyCallback(X509_STORE_CTX* store, void* arg); +#endif bool WaitingToVerifyPeerCertificate() const { return GetClientAuthEnabled() && !peer_certificate_verified_; @@ -184,6 +202,12 @@ class OpenSSLStreamAdapter final : public SSLStreamAdapter { !peer_certificate_digest_value_.empty(); } + const std::unique_ptr stream_; + + rtc::Thread* const owner_; + webrtc::ScopedTaskSafety task_safety_; + webrtc::RepeatingTaskHandle timeout_task_; + SSLState state_; SSLRole role_; int ssl_error_code_; // valid when state_ == SSL_ERROR or SSL_CLOSED @@ -196,7 +220,11 @@ class OpenSSLStreamAdapter final : public SSLStreamAdapter { SSL_CTX* ssl_ctx_; // Our key and certificate. +#ifdef OPENSSL_IS_BORINGSSL + std::unique_ptr identity_; +#else std::unique_ptr identity_; +#endif // The certificate chain that the peer presented. Initially null, until the // connection is established. std::unique_ptr peer_cert_chain_; diff --git a/rtc_base/openssl_utility.cc b/rtc_base/openssl_utility.cc index 1984eb0706..b5d649ca51 100644 --- a/rtc_base/openssl_utility.cc +++ b/rtc_base/openssl_utility.cc @@ -14,6 +14,9 @@ #include "rtc_base/win32.h" // NOLINT #endif // WEBRTC_WIN +#ifdef OPENSSL_IS_BORINGSSL +#include +#endif #include #include #include @@ -23,7 +26,7 @@ #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/openssl.h" -#include "rtc_base/openssl_certificate.h" +#include "rtc_base/ssl_identity.h" #ifndef WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS #include "rtc_base/ssl_roots.h" #endif // WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS @@ -33,6 +36,10 @@ namespace openssl { // Holds various helper methods. namespace { + +// TODO(crbug.com/webrtc/11710): When OS certificate verification is available, +// and we don't need VerifyPeerCertMatchesHost, don't compile this in order to +// avoid a dependency on OpenSSL X509 objects (see crbug.com/webrtc/11410). void LogCertificates(SSL* ssl, X509* certificate) { // Logging certificates is extremely verbose. So it is disabled by default. #ifdef LOG_CERTIFICATES @@ -65,6 +72,118 @@ void LogCertificates(SSL* ssl, X509* certificate) { } } // namespace +#ifdef OPENSSL_IS_BORINGSSL +bool ParseCertificate(CRYPTO_BUFFER* cert_buffer, + CBS* signature_algorithm_oid, + int64_t* expiration_time) { + CBS cbs; + CRYPTO_BUFFER_init_CBS(cert_buffer, &cbs); + + // Certificate ::= SEQUENCE { + CBS certificate; + if (!CBS_get_asn1(&cbs, &certificate, CBS_ASN1_SEQUENCE)) { + return false; + } + // tbsCertificate TBSCertificate, + CBS tbs_certificate; + if (!CBS_get_asn1(&certificate, &tbs_certificate, CBS_ASN1_SEQUENCE)) { + return false; + } + // signatureAlgorithm AlgorithmIdentifier, + CBS signature_algorithm; + if (!CBS_get_asn1(&certificate, &signature_algorithm, CBS_ASN1_SEQUENCE)) { + return false; + } + if (!CBS_get_asn1(&signature_algorithm, signature_algorithm_oid, + CBS_ASN1_OBJECT)) { + return false; + } + // signatureValue BIT STRING } + if (!CBS_get_asn1(&certificate, nullptr, CBS_ASN1_BITSTRING)) { + return false; + } + if (CBS_len(&certificate)) { + return false; + } + + // Now parse the inner TBSCertificate. + // version [0] EXPLICIT Version DEFAULT v1, + if (!CBS_get_optional_asn1( + &tbs_certificate, nullptr, nullptr, + CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC)) { + return false; + } + // serialNumber CertificateSerialNumber, + if (!CBS_get_asn1(&tbs_certificate, nullptr, CBS_ASN1_INTEGER)) { + return false; + } + // signature AlgorithmIdentifier + if (!CBS_get_asn1(&tbs_certificate, nullptr, CBS_ASN1_SEQUENCE)) { + return false; + } + // issuer Name, + if (!CBS_get_asn1(&tbs_certificate, nullptr, CBS_ASN1_SEQUENCE)) { + return false; + } + // validity Validity, + CBS validity; + if (!CBS_get_asn1(&tbs_certificate, &validity, CBS_ASN1_SEQUENCE)) { + return false; + } + // Skip over notBefore. + if (!CBS_get_any_asn1_element(&validity, nullptr, nullptr, nullptr)) { + return false; + } + // Parse notAfter. + CBS not_after; + unsigned not_after_tag; + if (!CBS_get_any_asn1(&validity, ¬_after, ¬_after_tag)) { + return false; + } + bool long_format; + if (not_after_tag == CBS_ASN1_UTCTIME) { + long_format = false; + } else if (not_after_tag == CBS_ASN1_GENERALIZEDTIME) { + long_format = true; + } else { + return false; + } + if (expiration_time) { + *expiration_time = + ASN1TimeToSec(CBS_data(¬_after), CBS_len(¬_after), long_format); + } + // subject Name, + if (!CBS_get_asn1_element(&tbs_certificate, nullptr, CBS_ASN1_SEQUENCE)) { + return false; + } + // subjectPublicKeyInfo SubjectPublicKeyInfo, + if (!CBS_get_asn1(&tbs_certificate, nullptr, CBS_ASN1_SEQUENCE)) { + return false; + } + // issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL + if (!CBS_get_optional_asn1(&tbs_certificate, nullptr, nullptr, + 0x01 | CBS_ASN1_CONTEXT_SPECIFIC)) { + return false; + } + // subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL + if (!CBS_get_optional_asn1(&tbs_certificate, nullptr, nullptr, + 0x02 | CBS_ASN1_CONTEXT_SPECIFIC)) { + return false; + } + // extensions [3] EXPLICIT Extensions OPTIONAL + if (!CBS_get_optional_asn1( + &tbs_certificate, nullptr, nullptr, + 0x03 | CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC)) { + return false; + } + if (CBS_len(&tbs_certificate)) { + return false; + } + + return true; +} +#endif // OPENSSL_IS_BORINGSSL + bool VerifyPeerCertMatchesHost(SSL* ssl, const std::string& host) { if (host.empty()) { RTC_DLOG(LS_ERROR) << "Hostname is empty. Cannot verify peer certificate."; @@ -76,9 +195,28 @@ bool VerifyPeerCertMatchesHost(SSL* ssl, const std::string& host) { return false; } +#ifdef OPENSSL_IS_BORINGSSL + // We can't grab a X509 object directly, as the SSL context may have been + // initialized with TLS_with_buffers_method. + const STACK_OF(CRYPTO_BUFFER)* chain = SSL_get0_peer_certificates(ssl); + if (chain == nullptr || sk_CRYPTO_BUFFER_num(chain) == 0) { + RTC_LOG(LS_ERROR) + << "SSL_get0_peer_certificates failed. This should never happen."; + return false; + } + CRYPTO_BUFFER* leaf = sk_CRYPTO_BUFFER_value(chain, 0); + bssl::UniquePtr x509(X509_parse_from_buffer(leaf)); + if (!x509) { + RTC_LOG(LS_ERROR) << "Failed to parse certificate to X509 object."; + return false; + } + LogCertificates(ssl, x509.get()); + return X509_check_host(x509.get(), host.c_str(), host.size(), 0, nullptr) == + 1; +#else // OPENSSL_IS_BORINGSSL X509* certificate = SSL_get_peer_certificate(ssl); if (certificate == nullptr) { - RTC_DLOG(LS_ERROR) + RTC_LOG(LS_ERROR) << "SSL_get_peer_certificate failed. This should never happen."; return false; } @@ -89,6 +227,7 @@ bool VerifyPeerCertMatchesHost(SSL* ssl, const std::string& host) { X509_check_host(certificate, host.c_str(), host.size(), 0, nullptr) == 1; X509_free(certificate); return is_valid_cert_name; +#endif // !defined(OPENSSL_IS_BORINGSSL) } void LogSSLErrors(const std::string& prefix) { @@ -123,5 +262,12 @@ bool LoadBuiltinSSLRootCertificates(SSL_CTX* ctx) { } #endif // WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS +#ifdef OPENSSL_IS_BORINGSSL +CRYPTO_BUFFER_POOL* GetBufferPool() { + static CRYPTO_BUFFER_POOL* instance = CRYPTO_BUFFER_POOL_new(); + return instance; +} +#endif + } // namespace openssl } // namespace rtc diff --git a/rtc_base/openssl_utility.h b/rtc_base/openssl_utility.h index 022294d4bb..ee29ccd602 100644 --- a/rtc_base/openssl_utility.h +++ b/rtc_base/openssl_utility.h @@ -20,8 +20,21 @@ namespace rtc { // to OpenSSL that are commonly used and don't require global state should be // placed here. namespace openssl { + +#ifdef OPENSSL_IS_BORINGSSL +// Does minimal parsing of a certificate (only verifying the presence of major +// fields), primarily for the purpose of extracting the relevant out +// parameters. Any that the caller is uninterested in can be null. +bool ParseCertificate(CRYPTO_BUFFER* cert_buffer, + CBS* signature_algorithm_oid, + int64_t* expiration_time); +#endif + // Verifies that the hostname provided matches that in the peer certificate // attached to this SSL state. +// TODO(crbug.com/webrtc/11710): When OS certificate verification is available, +// skip compiling this as it adds a dependency on OpenSSL X509 objects, which we +// are trying to avoid in favor of CRYPTO_BUFFERs (see crbug.com/webrtc/11410). bool VerifyPeerCertMatchesHost(SSL* ssl, const std::string& host); // Logs all the errors in the OpenSSL errror queue from the current thread. A @@ -35,6 +48,10 @@ void LogSSLErrors(const std::string& prefix); bool LoadBuiltinSSLRootCertificates(SSL_CTX* ssl_ctx); #endif // WEBRTC_EXCLUDE_BUILT_IN_SSL_ROOT_CERTS +#ifdef OPENSSL_IS_BORINGSSL +CRYPTO_BUFFER_POOL* GetBufferPool(); +#endif + } // namespace openssl } // namespace rtc diff --git a/rtc_base/openssl_utility_unittest.cc b/rtc_base/openssl_utility_unittest.cc index 9c9b9717ab..d090524cde 100644 --- a/rtc_base/openssl_utility_unittest.cc +++ b/rtc_base/openssl_utility_unittest.cc @@ -24,8 +24,12 @@ #include #include #include +#ifdef OPENSSL_IS_BORINGSSL +#include +#else #include #include +#endif #include "rtc_base/arraysize.h" #include "rtc_base/checks.h" @@ -169,14 +173,17 @@ const unsigned char kFakeSSLCertificateLegacy[] = { 0x84, 0x0b, 0xc7, 0x15, 0x86, 0xc3, 0xfc, 0x48, 0x55, 0xb5, 0x81, 0x94, 0x73, 0xbd, 0x18, 0xcd, 0x9d, 0x92, 0x47, 0xaa, 0xfd, 0x18}; +#ifdef OPENSSL_IS_BORINGSSL +enum ssl_verify_result_t DummyVerifyCallback(SSL* ssl, uint8_t* out_alert) { + return ssl_verify_ok; +} +#endif + // Creates a client SSL that has completed handshaking with a server that uses // the specified certificate (which must have private key kFakeSSLPrivateKey). // The server is deallocated. This client will have a peer certificate available // and is thus suitable for testing VerifyPeerCertMatchesHost. SSL* CreateSSLWithPeerCertificate(const unsigned char* cert, size_t cert_len) { - X509* x509 = - d2i_X509(nullptr, &cert, checked_cast(cert_len)); // NOLINT - RTC_CHECK(x509); const unsigned char* key_ptr = kFakeSSLPrivateKey; EVP_PKEY* key = d2i_PrivateKey( @@ -184,14 +191,33 @@ SSL* CreateSSLWithPeerCertificate(const unsigned char* cert, size_t cert_len) { checked_cast(arraysize(kFakeSSLPrivateKey))); // NOLINT RTC_CHECK(key); - SSL_CTX* ctx = SSL_CTX_new(SSLv23_method()); +#ifdef OPENSSL_IS_BORINGSSL + SSL_CTX* ctx = SSL_CTX_new(TLS_with_buffers_method()); +#else + SSL_CTX* ctx = SSL_CTX_new(TLS_method()); +#endif SSL* client = SSL_new(ctx); SSL* server = SSL_new(ctx); SSL_set_connect_state(client); SSL_set_accept_state(server); +#ifdef OPENSSL_IS_BORINGSSL + bssl::UniquePtr cert_buffer(CRYPTO_BUFFER_new( + static_cast(cert), cert_len, openssl::GetBufferPool())); + RTC_CHECK(cert_buffer); + std::vector cert_buffers; + cert_buffers.push_back(cert_buffer.get()); + RTC_CHECK(1 == SSL_set_chain_and_key(server, cert_buffers.data(), + cert_buffers.size(), key, nullptr)); + // When using crypto buffers we don't get any built-in verification. + SSL_set_custom_verify(client, SSL_VERIFY_PEER, DummyVerifyCallback); +#else + X509* x509 = + d2i_X509(nullptr, &cert, checked_cast(cert_len)); // NOLINT + RTC_CHECK(x509); RTC_CHECK(SSL_use_certificate(server, x509)); RTC_CHECK(SSL_use_PrivateKey(server, key)); +#endif BIO* bio1; BIO* bio2; @@ -221,13 +247,19 @@ SSL* CreateSSLWithPeerCertificate(const unsigned char* cert, size_t cert_len) { SSL_free(server); SSL_CTX_free(ctx); EVP_PKEY_free(key); +#ifndef OPENSSL_IS_BORINGSSL X509_free(x509); +#endif return client; } } // namespace TEST(OpenSSLUtilityTest, VerifyPeerCertMatchesHostFailsOnNoPeerCertificate) { - SSL_CTX* ssl_ctx = SSL_CTX_new(DTLSv1_2_client_method()); +#ifdef OPENSSL_IS_BORINGSSL + SSL_CTX* ssl_ctx = SSL_CTX_new(DTLS_with_buffers_method()); +#else + SSL_CTX* ssl_ctx = SSL_CTX_new(DTLS_method()); +#endif SSL* ssl = SSL_new(ssl_ctx); EXPECT_FALSE(openssl::VerifyPeerCertMatchesHost(ssl, "webrtc.org")); diff --git a/rtc_base/operations_chain.cc b/rtc_base/operations_chain.cc index 68ee20babc..f707d339b6 100644 --- a/rtc_base/operations_chain.cc +++ b/rtc_base/operations_chain.cc @@ -19,12 +19,14 @@ OperationsChain::CallbackHandle::CallbackHandle( : operations_chain_(std::move(operations_chain)) {} OperationsChain::CallbackHandle::~CallbackHandle() { +#if RTC_DCHECK_IS_ON RTC_DCHECK(has_run_); +#endif } void OperationsChain::CallbackHandle::OnOperationComplete() { +#if RTC_DCHECK_IS_ON RTC_DCHECK(!has_run_); -#ifdef RTC_DCHECK_IS_ON has_run_ = true; #endif // RTC_DCHECK_IS_ON operations_chain_->OnOperationComplete(); @@ -49,6 +51,17 @@ OperationsChain::~OperationsChain() { RTC_DCHECK(chained_operations_.empty()); } +void OperationsChain::SetOnChainEmptyCallback( + std::function on_chain_empty_callback) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + on_chain_empty_callback_ = std::move(on_chain_empty_callback); +} + +bool OperationsChain::IsEmpty() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return chained_operations_.empty(); +} + std::function OperationsChain::CreateOperationsChainCallback() { return [handle = rtc::scoped_refptr( new CallbackHandle(this))]() { handle->OnOperationComplete(); }; @@ -59,9 +72,12 @@ void OperationsChain::OnOperationComplete() { // The front element is the operation that just completed, remove it. RTC_DCHECK(!chained_operations_.empty()); chained_operations_.pop(); - // If there are any other operations chained, execute the next one. + // If there are any other operations chained, execute the next one. Otherwise, + // invoke the "on chain empty" callback if it has been set. if (!chained_operations_.empty()) { chained_operations_.front()->Run(); + } else if (on_chain_empty_callback_.has_value()) { + on_chain_empty_callback_.value()(); } } diff --git a/rtc_base/operations_chain.h b/rtc_base/operations_chain.h index b6ec46e04a..3dc5995114 100644 --- a/rtc_base/operations_chain.h +++ b/rtc_base/operations_chain.h @@ -18,12 +18,14 @@ #include #include +#include "absl/types/optional.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ref_count.h" #include "rtc_base/ref_counted_object.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace rtc { @@ -49,11 +51,15 @@ class OperationWithFunctor final : public Operation { : functor_(std::forward(functor)), callback_(std::move(callback)) {} - ~OperationWithFunctor() override { RTC_DCHECK(has_run_); } + ~OperationWithFunctor() override { +#if RTC_DCHECK_IS_ON + RTC_DCHECK(has_run_); +#endif // RTC_DCHECK_IS_ON + } void Run() override { +#if RTC_DCHECK_IS_ON RTC_DCHECK(!has_run_); -#ifdef RTC_DCHECK_IS_ON has_run_ = true; #endif // RTC_DCHECK_IS_ON // The functor being executed may invoke the callback synchronously, @@ -69,7 +75,7 @@ class OperationWithFunctor final : public Operation { private: typename std::remove_reference::type functor_; std::function callback_; -#ifdef RTC_DCHECK_IS_ON +#if RTC_DCHECK_IS_ON bool has_run_ = false; #endif // RTC_DCHECK_IS_ON }; @@ -112,6 +118,9 @@ class OperationsChain final : public RefCountedObject { static scoped_refptr Create(); ~OperationsChain(); + void SetOnChainEmptyCallback(std::function on_chain_empty_callback); + bool IsEmpty() const; + // Chains an operation. Chained operations are executed in FIFO order. The // operation starts when |functor| is executed by the OperationsChain and is // contractually obligated to invoke the callback passed to it when the @@ -163,7 +172,7 @@ class OperationsChain final : public RefCountedObject { private: scoped_refptr operations_chain_; -#ifdef RTC_DCHECK_IS_ON +#if RTC_DCHECK_IS_ON bool has_run_ = false; #endif // RTC_DCHECK_IS_ON @@ -175,12 +184,14 @@ class OperationsChain final : public RefCountedObject { std::function CreateOperationsChainCallback(); void OnOperationComplete(); - webrtc::SequenceChecker sequence_checker_; + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker sequence_checker_; // FIFO-list of operations that are chained. An operation that is executing // remains on this list until it has completed by invoking the callback passed // to it. std::queue> chained_operations_ RTC_GUARDED_BY(sequence_checker_); + absl::optional> on_chain_empty_callback_ + RTC_GUARDED_BY(sequence_checker_); RTC_DISALLOW_COPY_AND_ASSIGN(OperationsChain); }; diff --git a/rtc_base/operations_chain_unittest.cc b/rtc_base/operations_chain_unittest.cc index 968f94c060..792a2c76ff 100644 --- a/rtc_base/operations_chain_unittest.cc +++ b/rtc_base/operations_chain_unittest.cc @@ -10,13 +10,14 @@ #include "rtc_base/operations_chain.h" +#include #include #include #include #include -#include "rtc_base/bind.h" #include "rtc_base/event.h" +#include "rtc_base/gunit.h" #include "rtc_base/thread.h" #include "test/gmock.h" #include "test/gtest.h" @@ -25,6 +26,12 @@ namespace rtc { using ::testing::ElementsAre; +namespace { + +constexpr int kDefaultTimeout = 3000; + +} // namespace + class OperationTracker { public: OperationTracker() : background_thread_(Thread::Create()) { @@ -120,6 +127,31 @@ class OperationTrackerProxy { return event; } + void SetOnChainEmptyCallback(std::function on_chain_empty_callback) { + Event event; + operations_chain_thread_->PostTask( + RTC_FROM_HERE, + [this, &event, + on_chain_empty_callback = std::move(on_chain_empty_callback)]() { + operations_chain_->SetOnChainEmptyCallback( + std::move(on_chain_empty_callback)); + event.Set(); + }); + event.Wait(Event::kForever); + } + + bool IsEmpty() { + Event event; + bool is_empty = false; + operations_chain_thread_->PostTask( + RTC_FROM_HERE, [this, &event, &is_empty]() { + is_empty = operations_chain_->IsEmpty(); + event.Set(); + }); + event.Wait(Event::kForever); + return is_empty; + } + std::unique_ptr ReleaseOperationChain() { std::unique_ptr event = std::make_unique(); operations_chain_thread_->PostTask(RTC_FROM_HERE, @@ -326,6 +358,87 @@ TEST(OperationsChainTest, OperationsAreExecutedInOrder) { operation6_completed_event.get())); } +TEST(OperationsChainTest, IsEmpty) { + OperationTrackerProxy operation_tracker_proxy; + operation_tracker_proxy.Initialize()->Wait(Event::kForever); + + // The chain is initially empty. + EXPECT_TRUE(operation_tracker_proxy.IsEmpty()); + // Chain a single event. + Event unblock_async_operation_event0; + auto async_operation_completed_event0 = + operation_tracker_proxy.PostAsynchronousOperation( + &unblock_async_operation_event0); + // The chain is not empty while an event is pending. + EXPECT_FALSE(operation_tracker_proxy.IsEmpty()); + // Completing the operation empties the chain. + unblock_async_operation_event0.Set(); + async_operation_completed_event0->Wait(Event::kForever); + EXPECT_TRUE(operation_tracker_proxy.IsEmpty()); + + // Chain multiple events. + Event unblock_async_operation_event1; + auto async_operation_completed_event1 = + operation_tracker_proxy.PostAsynchronousOperation( + &unblock_async_operation_event1); + Event unblock_async_operation_event2; + auto async_operation_completed_event2 = + operation_tracker_proxy.PostAsynchronousOperation( + &unblock_async_operation_event2); + // Again, the chain is not empty while an event is pending. + EXPECT_FALSE(operation_tracker_proxy.IsEmpty()); + // Upon completing the first event, the chain is still not empty. + unblock_async_operation_event1.Set(); + async_operation_completed_event1->Wait(Event::kForever); + EXPECT_FALSE(operation_tracker_proxy.IsEmpty()); + // Completing the last evenet empties the chain. + unblock_async_operation_event2.Set(); + async_operation_completed_event2->Wait(Event::kForever); + EXPECT_TRUE(operation_tracker_proxy.IsEmpty()); +} + +TEST(OperationsChainTest, OnChainEmptyCallback) { + OperationTrackerProxy operation_tracker_proxy; + operation_tracker_proxy.Initialize()->Wait(Event::kForever); + + std::atomic on_empty_callback_counter(0u); + operation_tracker_proxy.SetOnChainEmptyCallback( + [&on_empty_callback_counter] { ++on_empty_callback_counter; }); + + // Chain a single event. + Event unblock_async_operation_event0; + auto async_operation_completed_event0 = + operation_tracker_proxy.PostAsynchronousOperation( + &unblock_async_operation_event0); + // The callback is not invoked until the operation has completed. + EXPECT_EQ(0u, on_empty_callback_counter); + // Completing the operation empties the chain, invoking the callback. + unblock_async_operation_event0.Set(); + async_operation_completed_event0->Wait(Event::kForever); + EXPECT_TRUE_WAIT(1u == on_empty_callback_counter, kDefaultTimeout); + + // Chain multiple events. + Event unblock_async_operation_event1; + auto async_operation_completed_event1 = + operation_tracker_proxy.PostAsynchronousOperation( + &unblock_async_operation_event1); + Event unblock_async_operation_event2; + auto async_operation_completed_event2 = + operation_tracker_proxy.PostAsynchronousOperation( + &unblock_async_operation_event2); + // Again, the callback is not invoked until the operation has completed. + EXPECT_TRUE_WAIT(1u == on_empty_callback_counter, kDefaultTimeout); + // Upon completing the first event, the chain is still not empty, so the + // callback must not be invoked yet. + unblock_async_operation_event1.Set(); + async_operation_completed_event1->Wait(Event::kForever); + EXPECT_TRUE_WAIT(1u == on_empty_callback_counter, kDefaultTimeout); + // Completing the last evenet empties the chain, invoking the callback. + unblock_async_operation_event2.Set(); + async_operation_completed_event2->Wait(Event::kForever); + EXPECT_TRUE_WAIT(2u == on_empty_callback_counter, kDefaultTimeout); +} + TEST(OperationsChainTest, SafeToReleaseReferenceToOperationChainWhileOperationIsPending) { OperationTrackerProxy operation_tracker_proxy; @@ -369,14 +482,15 @@ TEST(OperationsChainTest, FunctorIsNotDestroyedWhileExecuting) { #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST(OperationsChainTest, OperationNotInvokingCallbackShouldCrash) { +TEST(OperationsChainDeathTest, OperationNotInvokingCallbackShouldCrash) { scoped_refptr operations_chain = OperationsChain::Create(); EXPECT_DEATH( operations_chain->ChainOperation([](std::function callback) {}), ""); } -TEST(OperationsChainTest, OperationInvokingCallbackMultipleTimesShouldCrash) { +TEST(OperationsChainDeathTest, + OperationInvokingCallbackMultipleTimesShouldCrash) { scoped_refptr operations_chain = OperationsChain::Create(); EXPECT_DEATH( operations_chain->ChainOperation([](std::function callback) { diff --git a/rtc_base/physical_socket_server.cc b/rtc_base/physical_socket_server.cc index cf65300b4a..7904548041 100644 --- a/rtc_base/physical_socket_server.cc +++ b/rtc_base/physical_socket_server.cc @@ -48,6 +48,7 @@ #include "rtc_base/logging.h" #include "rtc_base/network_monitor.h" #include "rtc_base/null_socket_server.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" #if defined(WEBRTC_LINUX) @@ -103,15 +104,21 @@ typedef char* SockOptArg; #endif #endif -namespace rtc { +namespace { +class ScopedSetTrue { + public: + ScopedSetTrue(bool* value) : value_(value) { + RTC_DCHECK(!*value_); + *value_ = true; + } + ~ScopedSetTrue() { *value_ = false; } -std::unique_ptr SocketServer::CreateDefault() { -#if defined(__native_client__) - return std::unique_ptr(new rtc::NullSocketServer); -#else - return std::unique_ptr(new rtc::PhysicalSocketServer); -#endif -} + private: + bool* value_; +}; +} // namespace + +namespace rtc { PhysicalSocket::PhysicalSocket(PhysicalSocketServer* ss, SOCKET s) : ss_(ss), @@ -267,12 +274,12 @@ int PhysicalSocket::DoConnect(const SocketAddress& connect_addr) { } int PhysicalSocket::GetError() const { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); return error_; } void PhysicalSocket::SetError(int error) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); error_ = error; } @@ -321,10 +328,17 @@ int PhysicalSocket::SetOption(Option opt, int value) { #if defined(WEBRTC_POSIX) if (sopt == IPV6_TCLASS) { // Set the IPv4 option in all cases to support dual-stack sockets. + // Don't bother checking the return code, as this is expected to fail if + // it's not actually dual-stack. ::setsockopt(s_, IPPROTO_IP, IP_TOS, (SockOptArg)&value, sizeof(value)); } #endif - return ::setsockopt(s_, slevel, sopt, (SockOptArg)&value, sizeof(value)); + int result = + ::setsockopt(s_, slevel, sopt, (SockOptArg)&value, sizeof(value)); + if (result != 0) { + UpdateLastError(); + } + return result; } int PhysicalSocket::Send(const void* pv, size_t cb) { @@ -746,21 +760,14 @@ uint32_t SocketDispatcher::GetRequestedEvents() { return enabled_events(); } -void SocketDispatcher::OnPreEvent(uint32_t ff) { +#if defined(WEBRTC_WIN) + +void SocketDispatcher::OnEvent(uint32_t ff, int err) { if ((ff & DE_CONNECT) != 0) state_ = CS_CONNECTED; -#if defined(WEBRTC_WIN) -// We set CS_CLOSED from CheckSignalClose. -#elif defined(WEBRTC_POSIX) - if ((ff & DE_CLOSE) != 0) - state_ = CS_CLOSED; -#endif -} - -#if defined(WEBRTC_WIN) + // We set CS_CLOSED from CheckSignalClose. -void SocketDispatcher::OnEvent(uint32_t ff, int err) { int cache_id = id_; // Make sure we deliver connect/accept first. Otherwise, consumers may see // something like a READ followed by a CONNECT, which would be odd. @@ -795,6 +802,12 @@ void SocketDispatcher::OnEvent(uint32_t ff, int err) { #elif defined(WEBRTC_POSIX) void SocketDispatcher::OnEvent(uint32_t ff, int err) { + if ((ff & DE_CONNECT) != 0) + state_ = CS_CONNECTED; + + if ((ff & DE_CLOSE) != 0) + state_ = CS_CLOSED; + #if defined(WEBRTC_USE_EPOLL) // Remember currently enabled events so we can combine multiple changes // into one update call later. @@ -835,7 +848,7 @@ void SocketDispatcher::OnEvent(uint32_t ff, int err) { #if defined(WEBRTC_USE_EPOLL) -static int GetEpollEvents(uint32_t ff) { +inline static int GetEpollEvents(uint32_t ff) { int events = 0; if (ff & (DE_READ | DE_ACCEPT)) { events |= EPOLLIN; @@ -906,22 +919,32 @@ int SocketDispatcher::Close() { } #if defined(WEBRTC_POSIX) -class EventDispatcher : public Dispatcher { +// Sets the value of a boolean value to false when signaled. +class Signaler : public Dispatcher { public: - EventDispatcher(PhysicalSocketServer* ss) : ss_(ss), fSignaled_(false) { - if (pipe(afd_) < 0) - RTC_LOG(LERROR) << "pipe failed"; + Signaler(PhysicalSocketServer* ss, bool& flag_to_clear) + : ss_(ss), + afd_([] { + std::array afd = {-1, -1}; + + if (pipe(afd.data()) < 0) { + RTC_LOG(LERROR) << "pipe failed"; + } + return afd; + }()), + fSignaled_(false), + flag_to_clear_(flag_to_clear) { ss_->Add(this); } - ~EventDispatcher() override { + ~Signaler() override { ss_->Remove(this); close(afd_[0]); close(afd_[1]); } virtual void Signal() { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (!fSignaled_) { const uint8_t b[1] = {0}; const ssize_t res = write(afd_[1], b, sizeof(b)); @@ -932,30 +955,30 @@ class EventDispatcher : public Dispatcher { uint32_t GetRequestedEvents() override { return DE_READ; } - void OnPreEvent(uint32_t ff) override { + void OnEvent(uint32_t ff, int err) override { // It is not possible to perfectly emulate an auto-resetting event with // pipes. This simulates it by resetting before the event is handled. - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (fSignaled_) { uint8_t b[4]; // Allow for reading more than 1 byte, but expect 1. const ssize_t res = read(afd_[0], b, sizeof(b)); RTC_DCHECK_EQ(1, res); fSignaled_ = false; } + flag_to_clear_ = false; } - void OnEvent(uint32_t ff, int err) override { RTC_NOTREACHED(); } - int GetDescriptor() override { return afd_[0]; } bool IsDescriptorClosed() override { return false; } private: - PhysicalSocketServer* ss_; - int afd_[2]; - bool fSignaled_; - CriticalSection crit_; + PhysicalSocketServer* const ss_; + const std::array afd_; + bool fSignaled_ RTC_GUARDED_BY(mutex_); + webrtc::Mutex mutex_; + bool& flag_to_clear_; }; #endif // WEBRTC_POSIX @@ -974,16 +997,18 @@ static uint32_t FlagsToEvents(uint32_t events) { return ffFD; } -class EventDispatcher : public Dispatcher { +// Sets the value of a boolean value to false when signaled. +class Signaler : public Dispatcher { public: - EventDispatcher(PhysicalSocketServer* ss) : ss_(ss) { + Signaler(PhysicalSocketServer* ss, bool& flag_to_clear) + : ss_(ss), flag_to_clear_(flag_to_clear) { hev_ = WSACreateEvent(); if (hev_) { ss_->Add(this); } } - ~EventDispatcher() override { + ~Signaler() override { if (hev_ != nullptr) { ss_->Remove(this); WSACloseEvent(hev_); @@ -998,9 +1023,10 @@ class EventDispatcher : public Dispatcher { uint32_t GetRequestedEvents() override { return 0; } - void OnPreEvent(uint32_t ff) override { WSAResetEvent(hev_); } - - void OnEvent(uint32_t ff, int err) override {} + void OnEvent(uint32_t ff, int err) override { + WSAResetEvent(hev_); + flag_to_clear_ = false; + } WSAEVENT GetWSAEvent() override { return hev_; } @@ -1011,24 +1037,10 @@ class EventDispatcher : public Dispatcher { private: PhysicalSocketServer* ss_; WSAEVENT hev_; + bool& flag_to_clear_; }; #endif // WEBRTC_WIN -// Sets the value of a boolean value to false when signaled. -class Signaler : public EventDispatcher { - public: - Signaler(PhysicalSocketServer* ss, bool* pf) : EventDispatcher(ss), pf_(pf) {} - ~Signaler() override {} - - void OnEvent(uint32_t ff, int err) override { - if (pf_) - *pf_ = false; - } - - private: - bool* pf_; -}; - PhysicalSocketServer::PhysicalSocketServer() : #if defined(WEBRTC_USE_EPOLL) @@ -1048,7 +1060,8 @@ PhysicalSocketServer::PhysicalSocketServer() // Note that -1 == INVALID_SOCKET, the alias used by later checks. } #endif - signal_wakeup_ = new Signaler(this, &fWait_); + // The `fWait_` flag to be cleared by the Signaler. + signal_wakeup_ = new Signaler(this, fWait_); } PhysicalSocketServer::~PhysicalSocketServer() { @@ -1061,7 +1074,8 @@ PhysicalSocketServer::~PhysicalSocketServer() { close(epoll_fd_); } #endif - RTC_DCHECK(dispatchers_.empty()); + RTC_DCHECK(dispatcher_by_key_.empty()); + RTC_DCHECK(key_by_dispatcher_.empty()); } void PhysicalSocketServer::WakeUp() { @@ -1100,45 +1114,32 @@ AsyncSocket* PhysicalSocketServer::WrapSocket(SOCKET s) { void PhysicalSocketServer::Add(Dispatcher* pdispatcher) { CritScope cs(&crit_); - if (processing_dispatchers_) { - // A dispatcher is being added while a "Wait" call is processing the - // list of socket events. - // Defer adding to "dispatchers_" set until processing is done to avoid - // invalidating the iterator in "Wait". - pending_remove_dispatchers_.erase(pdispatcher); - pending_add_dispatchers_.insert(pdispatcher); - } else { - dispatchers_.insert(pdispatcher); + if (key_by_dispatcher_.count(pdispatcher)) { + RTC_LOG(LS_WARNING) + << "PhysicalSocketServer asked to add a duplicate dispatcher."; + return; } + uint64_t key = next_dispatcher_key_++; + dispatcher_by_key_.emplace(key, pdispatcher); + key_by_dispatcher_.emplace(pdispatcher, key); #if defined(WEBRTC_USE_EPOLL) if (epoll_fd_ != INVALID_SOCKET) { - AddEpoll(pdispatcher); + AddEpoll(pdispatcher, key); } #endif // WEBRTC_USE_EPOLL } void PhysicalSocketServer::Remove(Dispatcher* pdispatcher) { CritScope cs(&crit_); - if (processing_dispatchers_) { - // A dispatcher is being removed while a "Wait" call is processing the - // list of socket events. - // Defer removal from "dispatchers_" set until processing is done to avoid - // invalidating the iterator in "Wait". - if (!pending_add_dispatchers_.erase(pdispatcher) && - dispatchers_.find(pdispatcher) == dispatchers_.end()) { - RTC_LOG(LS_WARNING) << "PhysicalSocketServer asked to remove a unknown " - "dispatcher, potentially from a duplicate call to " - "Add."; - return; - } - - pending_remove_dispatchers_.insert(pdispatcher); - } else if (!dispatchers_.erase(pdispatcher)) { + if (!key_by_dispatcher_.count(pdispatcher)) { RTC_LOG(LS_WARNING) << "PhysicalSocketServer asked to remove a unknown " "dispatcher, potentially from a duplicate call to Add."; return; } + uint64_t key = key_by_dispatcher_.at(pdispatcher); + key_by_dispatcher_.erase(pdispatcher); + dispatcher_by_key_.erase(key); #if defined(WEBRTC_USE_EPOLL) if (epoll_fd_ != INVALID_SOCKET) { RemoveEpoll(pdispatcher); @@ -1152,34 +1153,22 @@ void PhysicalSocketServer::Update(Dispatcher* pdispatcher) { return; } + // Don't update dispatchers that haven't yet been added. CritScope cs(&crit_); - if (dispatchers_.find(pdispatcher) == dispatchers_.end()) { + if (!key_by_dispatcher_.count(pdispatcher)) { return; } - UpdateEpoll(pdispatcher); + UpdateEpoll(pdispatcher, key_by_dispatcher_.at(pdispatcher)); #endif } -void PhysicalSocketServer::AddRemovePendingDispatchers() { - if (!pending_add_dispatchers_.empty()) { - for (Dispatcher* pdispatcher : pending_add_dispatchers_) { - dispatchers_.insert(pdispatcher); - } - pending_add_dispatchers_.clear(); - } - - if (!pending_remove_dispatchers_.empty()) { - for (Dispatcher* pdispatcher : pending_remove_dispatchers_) { - dispatchers_.erase(pdispatcher); - } - pending_remove_dispatchers_.clear(); - } -} - #if defined(WEBRTC_POSIX) bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) { + // We don't support reentrant waiting. + RTC_DCHECK(!waiting_); + ScopedSetTrue s(&waiting_); #if defined(WEBRTC_USE_EPOLL) // We don't keep a dedicated "epoll" descriptor containing only the non-IO // (i.e. signaling) dispatcher, so "poll" will be used instead of the default @@ -1205,6 +1194,9 @@ static void ProcessEvents(Dispatcher* dispatcher, &len); } + // Most often the socket is writable or readable or both, so make a single + // virtual call to get requested events + const uint32_t requested_events = dispatcher->GetRequestedEvents(); uint32_t ff = 0; // Check readable descriptors. If we're waiting on an accept, signal @@ -1212,7 +1204,7 @@ static void ProcessEvents(Dispatcher* dispatcher, // readable or really closed. // TODO(pthatcher): Only peek at TCP descriptors. if (readable) { - if (dispatcher->GetRequestedEvents() & DE_ACCEPT) { + if (requested_events & DE_ACCEPT) { ff |= DE_ACCEPT; } else if (errcode || dispatcher->IsDescriptorClosed()) { ff |= DE_CLOSE; @@ -1224,7 +1216,7 @@ static void ProcessEvents(Dispatcher* dispatcher, // Check writable descriptors. If we're waiting on a connect, detect // success versus failure by the reaped error code. if (writable) { - if (dispatcher->GetRequestedEvents() & DE_CONNECT) { + if (requested_events & DE_CONNECT) { if (!errcode) { ff |= DE_CONNECT; } else { @@ -1237,7 +1229,6 @@ static void ProcessEvents(Dispatcher* dispatcher, // Tell the descriptor about the event. if (ff != 0) { - dispatcher->OnPreEvent(ff); dispatcher->OnEvent(ff, errcode); } } @@ -1258,13 +1249,9 @@ bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) { stop_us = rtc::TimeMicros() + cmsWait * 1000; } - // Zero all fd_sets. Don't need to do this inside the loop since - // select() zeros the descriptors not signaled fd_set fdsRead; - FD_ZERO(&fdsRead); fd_set fdsWrite; - FD_ZERO(&fdsWrite); // Explicitly unpoison these FDs on MemorySanitizer which doesn't handle the // inline assembly in FD_ZERO. // http://crbug.com/344505 @@ -1276,16 +1263,22 @@ bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) { fWait_ = true; while (fWait_) { + // Zero all fd_sets. Although select() zeros the descriptors not signaled, + // we may need to do this for dispatchers that were deleted while + // iterating. + FD_ZERO(&fdsRead); + FD_ZERO(&fdsWrite); int fdmax = -1; { CritScope cr(&crit_); - // TODO(jbauch): Support re-entrant waiting. - RTC_DCHECK(!processing_dispatchers_); - for (Dispatcher* pdispatcher : dispatchers_) { + current_dispatcher_keys_.clear(); + for (auto const& kv : dispatcher_by_key_) { + uint64_t key = kv.first; + Dispatcher* pdispatcher = kv.second; // Query dispatchers for read and write wait state - RTC_DCHECK(pdispatcher); if (!process_io && (pdispatcher != signal_wakeup_)) continue; + current_dispatcher_keys_.push_back(key); int fd = pdispatcher->GetDescriptor(); // "select"ing a file descriptor that is equal to or larger than // FD_SETSIZE will result in undefined behavior. @@ -1323,8 +1316,14 @@ bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) { } else { // We have signaled descriptors CritScope cr(&crit_); - processing_dispatchers_ = true; - for (Dispatcher* pdispatcher : dispatchers_) { + // Iterate only on the dispatchers whose sockets were passed into + // WSAEventSelect; this avoids the ABA problem (a socket being + // destroyed and a new one created with the same file descriptor). + for (uint64_t key : current_dispatcher_keys_) { + if (!dispatcher_by_key_.count(key)) + continue; + Dispatcher* pdispatcher = dispatcher_by_key_.at(key); + int fd = pdispatcher->GetDescriptor(); bool readable = FD_ISSET(fd, &fdsRead); @@ -1340,11 +1339,6 @@ bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) { // The error code can be signaled through reads or writes. ProcessEvents(pdispatcher, readable, writable, readable || writable); } - - processing_dispatchers_ = false; - // Process deferred dispatchers that have been added/removed while the - // events were handled above. - AddRemovePendingDispatchers(); } // Recalc the time remaining to wait. Doing it here means it doesn't get @@ -1365,13 +1359,7 @@ bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) { #if defined(WEBRTC_USE_EPOLL) -// Initial number of events to process with one call to "epoll_wait". -static const size_t kInitialEpollEvents = 128; - -// Maximum number of events to process with one call to "epoll_wait". -static const size_t kMaxEpollEvents = 8192; - -void PhysicalSocketServer::AddEpoll(Dispatcher* pdispatcher) { +void PhysicalSocketServer::AddEpoll(Dispatcher* pdispatcher, uint64_t key) { RTC_DCHECK(epoll_fd_ != INVALID_SOCKET); int fd = pdispatcher->GetDescriptor(); RTC_DCHECK(fd != INVALID_SOCKET); @@ -1381,7 +1369,7 @@ void PhysicalSocketServer::AddEpoll(Dispatcher* pdispatcher) { struct epoll_event event = {0}; event.events = GetEpollEvents(pdispatcher->GetRequestedEvents()); - event.data.ptr = pdispatcher; + event.data.u64 = key; int err = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, fd, &event); RTC_DCHECK_EQ(err, 0); if (err == -1) { @@ -1410,7 +1398,7 @@ void PhysicalSocketServer::RemoveEpoll(Dispatcher* pdispatcher) { } } -void PhysicalSocketServer::UpdateEpoll(Dispatcher* pdispatcher) { +void PhysicalSocketServer::UpdateEpoll(Dispatcher* pdispatcher, uint64_t key) { RTC_DCHECK(epoll_fd_ != INVALID_SOCKET); int fd = pdispatcher->GetDescriptor(); RTC_DCHECK(fd != INVALID_SOCKET); @@ -1420,7 +1408,7 @@ void PhysicalSocketServer::UpdateEpoll(Dispatcher* pdispatcher) { struct epoll_event event = {0}; event.events = GetEpollEvents(pdispatcher->GetRequestedEvents()); - event.data.ptr = pdispatcher; + event.data.u64 = key; int err = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, fd, &event); RTC_DCHECK_EQ(err, 0); if (err == -1) { @@ -1437,20 +1425,13 @@ bool PhysicalSocketServer::WaitEpoll(int cmsWait) { tvStop = TimeAfter(cmsWait); } - if (epoll_events_.empty()) { - // The initial space to receive events is created only if epoll is used. - epoll_events_.resize(kInitialEpollEvents); - } - fWait_ = true; - while (fWait_) { // Wait then call handlers as appropriate // < 0 means error // 0 means timeout // > 0 means count of descriptors ready - int n = epoll_wait(epoll_fd_, &epoll_events_[0], - static_cast(epoll_events_.size()), + int n = epoll_wait(epoll_fd_, epoll_events_.data(), epoll_events_.size(), static_cast(tvWait)); if (n < 0) { if (errno != EINTR) { @@ -1469,11 +1450,12 @@ bool PhysicalSocketServer::WaitEpoll(int cmsWait) { CritScope cr(&crit_); for (int i = 0; i < n; ++i) { const epoll_event& event = epoll_events_[i]; - Dispatcher* pdispatcher = static_cast(event.data.ptr); - if (dispatchers_.find(pdispatcher) == dispatchers_.end()) { + uint64_t key = event.data.u64; + if (!dispatcher_by_key_.count(key)) { // The dispatcher for this socket no longer exists. continue; } + Dispatcher* pdispatcher = dispatcher_by_key_.at(key); bool readable = (event.events & (EPOLLIN | EPOLLPRI)); bool writable = (event.events & EPOLLOUT); @@ -1483,16 +1465,9 @@ bool PhysicalSocketServer::WaitEpoll(int cmsWait) { } } - if (static_cast(n) == epoll_events_.size() && - epoll_events_.size() < kMaxEpollEvents) { - // We used the complete space to receive events, increase size for future - // iterations. - epoll_events_.resize(std::max(epoll_events_.size() * 2, kMaxEpollEvents)); - } - if (cmsWait != kForever) { tvWait = TimeDiff(tvStop, TimeMillis()); - if (tvWait < 0) { + if (tvWait <= 0) { // Return success on timeout. return true; } @@ -1575,6 +1550,10 @@ bool PhysicalSocketServer::WaitPoll(int cmsWait, Dispatcher* dispatcher) { #if defined(WEBRTC_WIN) bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) { + // We don't support reentrant waiting. + RTC_DCHECK(!waiting_); + ScopedSetTrue s(&waiting_); + int64_t cmsTotal = cmsWait; int64_t cmsElapsed = 0; int64_t msStart = Time(); @@ -1582,37 +1561,40 @@ bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) { fWait_ = true; while (fWait_) { std::vector events; - std::vector event_owners; + std::vector event_owners; events.push_back(socket_ev_); { CritScope cr(&crit_); - // TODO(jbauch): Support re-entrant waiting. - RTC_DCHECK(!processing_dispatchers_); - - // Calling "CheckSignalClose" might remove a closed dispatcher from the - // set. This must be deferred to prevent invalidating the iterator. - processing_dispatchers_ = true; - for (Dispatcher* disp : dispatchers_) { + // Get a snapshot of all current dispatchers; this is used to avoid the + // ABA problem (see later comment) and avoids the dispatcher_by_key_ + // iterator being invalidated by calling CheckSignalClose, which may + // remove the dispatcher from the list. + current_dispatcher_keys_.clear(); + for (auto const& kv : dispatcher_by_key_) { + current_dispatcher_keys_.push_back(kv.first); + } + for (uint64_t key : current_dispatcher_keys_) { + if (!dispatcher_by_key_.count(key)) { + continue; + } + Dispatcher* disp = dispatcher_by_key_.at(key); + if (!disp) + continue; if (!process_io && (disp != signal_wakeup_)) continue; SOCKET s = disp->GetSocket(); if (disp->CheckSignalClose()) { - // We just signalled close, don't poll this socket + // We just signalled close, don't poll this socket. } else if (s != INVALID_SOCKET) { WSAEventSelect(s, events[0], FlagsToEvents(disp->GetRequestedEvents())); } else { events.push_back(disp->GetWSAEvent()); - event_owners.push_back(disp); + event_owners.push_back(key); } } - - processing_dispatchers_ = false; - // Process deferred dispatchers that have been added/removed while the - // events were handled above. - AddRemovePendingDispatchers(); } // Which is shorter, the delay wait or the asked wait? @@ -1644,15 +1626,22 @@ bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) { int index = dw - WSA_WAIT_EVENT_0; if (index > 0) { --index; // The first event is the socket event - Dispatcher* disp = event_owners[index]; - // The dispatcher could have been removed while waiting for events. - if (dispatchers_.find(disp) != dispatchers_.end()) { - disp->OnPreEvent(0); - disp->OnEvent(0, 0); + uint64_t key = event_owners[index]; + if (!dispatcher_by_key_.count(key)) { + // The dispatcher could have been removed while waiting for events. + continue; } + Dispatcher* disp = dispatcher_by_key_.at(key); + disp->OnEvent(0, 0); } else if (process_io) { - processing_dispatchers_ = true; - for (Dispatcher* disp : dispatchers_) { + // Iterate only on the dispatchers whose sockets were passed into + // WSAEventSelect; this avoids the ABA problem (a socket being + // destroyed and a new one created with the same SOCKET handle). + for (uint64_t key : current_dispatcher_keys_) { + if (!dispatcher_by_key_.count(key)) { + continue; + } + Dispatcher* disp = dispatcher_by_key_.at(key); SOCKET s = disp->GetSocket(); if (s == INVALID_SOCKET) continue; @@ -1713,16 +1702,10 @@ bool PhysicalSocketServer::Wait(int cmsWait, bool process_io) { errcode = wsaEvents.iErrorCode[FD_CLOSE_BIT]; } if (ff != 0) { - disp->OnPreEvent(ff); disp->OnEvent(ff, errcode); } } } - - processing_dispatchers_ = false; - // Process deferred dispatchers that have been added/removed while the - // events were handled above. - AddRemovePendingDispatchers(); } // Reset the network event until new activity occurs diff --git a/rtc_base/physical_socket_server.h b/rtc_base/physical_socket_server.h index 2f53d4d4fe..4b7957eb20 100644 --- a/rtc_base/physical_socket_server.h +++ b/rtc_base/physical_socket_server.h @@ -16,13 +16,16 @@ #define WEBRTC_USE_EPOLL 1 #endif +#include #include -#include +#include #include -#include "rtc_base/critical_section.h" -#include "rtc_base/net_helpers.h" +#include "rtc_base/async_resolver.h" +#include "rtc_base/async_resolver_interface.h" +#include "rtc_base/deprecated/recursive_critical_section.h" #include "rtc_base/socket_server.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/thread_annotations.h" @@ -47,7 +50,6 @@ class Dispatcher { public: virtual ~Dispatcher() {} virtual uint32_t GetRequestedEvents() = 0; - virtual void OnPreEvent(uint32_t ff) = 0; virtual void OnEvent(uint32_t ff, int err) = 0; #if defined(WEBRTC_WIN) virtual WSAEVENT GetWSAEvent() = 0; @@ -81,33 +83,51 @@ class RTC_EXPORT PhysicalSocketServer : public SocketServer { void Update(Dispatcher* dispatcher); private: - typedef std::set DispatcherSet; - - void AddRemovePendingDispatchers() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + // The number of events to process with one call to "epoll_wait". + static constexpr size_t kNumEpollEvents = 128; #if defined(WEBRTC_POSIX) bool WaitSelect(int cms, bool process_io); #endif // WEBRTC_POSIX #if defined(WEBRTC_USE_EPOLL) - void AddEpoll(Dispatcher* dispatcher); + void AddEpoll(Dispatcher* dispatcher, uint64_t key); void RemoveEpoll(Dispatcher* dispatcher); - void UpdateEpoll(Dispatcher* dispatcher); + void UpdateEpoll(Dispatcher* dispatcher, uint64_t key); bool WaitEpoll(int cms); bool WaitPoll(int cms, Dispatcher* dispatcher); + // This array is accessed in isolation by a thread calling into Wait(). + // It's useless to use a SequenceChecker to guard it because a socket + // server can outlive the thread it's bound to, forcing the Wait call + // to have to reset the sequence checker on Wait calls. + std::array epoll_events_; const int epoll_fd_ = INVALID_SOCKET; - std::vector epoll_events_; #endif // WEBRTC_USE_EPOLL - DispatcherSet dispatchers_ RTC_GUARDED_BY(crit_); - DispatcherSet pending_add_dispatchers_ RTC_GUARDED_BY(crit_); - DispatcherSet pending_remove_dispatchers_ RTC_GUARDED_BY(crit_); - bool processing_dispatchers_ RTC_GUARDED_BY(crit_) = false; + // uint64_t keys are used to uniquely identify a dispatcher in order to avoid + // the ABA problem during the epoll loop (a dispatcher being destroyed and + // replaced by one with the same address). + uint64_t next_dispatcher_key_ RTC_GUARDED_BY(crit_) = 0; + std::unordered_map dispatcher_by_key_ + RTC_GUARDED_BY(crit_); + // Reverse lookup necessary for removals/updates. + std::unordered_map key_by_dispatcher_ + RTC_GUARDED_BY(crit_); + // A list of dispatcher keys that we're interested in for the current + // select() or WSAWaitForMultipleEvents() loop. Again, used to avoid the ABA + // problem (a socket being destroyed and a new one created with the same + // handle, erroneously receiving the events from the destroyed socket). + // + // Kept as a member variable just for efficiency. + std::vector current_dispatcher_keys_; Signaler* signal_wakeup_; // Assigned in constructor only - CriticalSection crit_; + RecursiveCriticalSection crit_; #if defined(WEBRTC_WIN) const WSAEVENT socket_ev_; #endif bool fWait_; + // Are we currently in a select()/epoll()/WSAWaitForMultipleEvents loop? + // Used for a DCHECK, because we don't support reentrant waiting. + bool waiting_ = false; }; class PhysicalSocket : public AsyncSocket, public sigslot::has_slots<> { @@ -183,8 +203,8 @@ class PhysicalSocket : public AsyncSocket, public sigslot::has_slots<> { SOCKET s_; bool udp_; int family_ = 0; - CriticalSection crit_; - int error_ RTC_GUARDED_BY(crit_); + mutable webrtc::Mutex mutex_; + int error_ RTC_GUARDED_BY(mutex_); ConnState state_; AsyncResolver* resolver_; @@ -217,7 +237,6 @@ class SocketDispatcher : public Dispatcher, public PhysicalSocket { #endif uint32_t GetRequestedEvents() override; - void OnPreEvent(uint32_t ff) override; void OnEvent(uint32_t ff, int err) override; int Close() override; diff --git a/rtc_base/physical_socket_server_unittest.cc b/rtc_base/physical_socket_server_unittest.cc index 586b9db292..3762762f85 100644 --- a/rtc_base/physical_socket_server_unittest.cc +++ b/rtc_base/physical_socket_server_unittest.cc @@ -18,6 +18,7 @@ #include "rtc_base/gunit.h" #include "rtc_base/ip_address.h" #include "rtc_base/logging.h" +#include "rtc_base/net_helpers.h" #include "rtc_base/network_monitor.h" #include "rtc_base/socket_unittest.h" #include "rtc_base/test_utils.h" @@ -381,6 +382,15 @@ TEST_F(PhysicalSocketTest, TestCloseInClosedCallbackIPv6) { SocketTest::TestCloseInClosedCallbackIPv6(); } +TEST_F(PhysicalSocketTest, TestDeleteInReadCallbackIPv4) { + MAYBE_SKIP_IPV4; + SocketTest::TestDeleteInReadCallbackIPv4(); +} + +TEST_F(PhysicalSocketTest, TestDeleteInReadCallbackIPv6) { + SocketTest::TestDeleteInReadCallbackIPv6(); +} + TEST_F(PhysicalSocketTest, TestSocketServerWaitIPv4) { MAYBE_SKIP_IPV4; SocketTest::TestSocketServerWaitIPv4(); diff --git a/rtc_base/platform_thread.cc b/rtc_base/platform_thread.cc index 8a5f2c9d6d..6d369d747e 100644 --- a/rtc_base/platform_thread.cc +++ b/rtc_base/platform_thread.cc @@ -10,131 +10,37 @@ #include "rtc_base/platform_thread.h" +#include +#include + #if !defined(WEBRTC_WIN) #include #endif -#include -#include - -#include #include "rtc_base/checks.h" namespace rtc { namespace { -#if !defined(WEBRTC_WIN) -struct ThreadAttributes { - ThreadAttributes() { pthread_attr_init(&attr); } - ~ThreadAttributes() { pthread_attr_destroy(&attr); } - pthread_attr_t* operator&() { return &attr; } - pthread_attr_t attr; -}; -#endif // defined(WEBRTC_WIN) -} // namespace - -PlatformThread::PlatformThread(ThreadRunFunction func, - void* obj, - absl::string_view thread_name, - ThreadPriority priority /*= kNormalPriority*/) - : run_function_(func), priority_(priority), obj_(obj), name_(thread_name) { - RTC_DCHECK(func); - RTC_DCHECK(!name_.empty()); - // TODO(tommi): Consider lowering the limit to 15 (limit on Linux). - RTC_DCHECK(name_.length() < 64); - spawned_thread_checker_.Detach(); -} - -PlatformThread::~PlatformThread() { - RTC_DCHECK(thread_checker_.IsCurrent()); -#if defined(WEBRTC_WIN) - RTC_DCHECK(!thread_); - RTC_DCHECK(!thread_id_); -#endif // defined(WEBRTC_WIN) -} - -#if defined(WEBRTC_WIN) -DWORD WINAPI PlatformThread::StartThread(void* param) { - // The GetLastError() function only returns valid results when it is called - // after a Win32 API function that returns a "failed" result. A crash dump - // contains the result from GetLastError() and to make sure it does not - // falsely report a Windows error we call SetLastError here. - ::SetLastError(ERROR_SUCCESS); - static_cast(param)->Run(); - return 0; -} -#else -void* PlatformThread::StartThread(void* param) { - static_cast(param)->Run(); - return 0; -} -#endif // defined(WEBRTC_WIN) - -void PlatformThread::Start() { - RTC_DCHECK(thread_checker_.IsCurrent()); - RTC_DCHECK(!thread_) << "Thread already started?"; -#if defined(WEBRTC_WIN) - // See bug 2902 for background on STACK_SIZE_PARAM_IS_A_RESERVATION. - // Set the reserved stack stack size to 1M, which is the default on Windows - // and Linux. - thread_ = ::CreateThread(nullptr, 1024 * 1024, &StartThread, this, - STACK_SIZE_PARAM_IS_A_RESERVATION, &thread_id_); - RTC_CHECK(thread_) << "CreateThread failed"; - RTC_DCHECK(thread_id_); -#else - ThreadAttributes attr; - // Set the stack stack size to 1M. - pthread_attr_setstacksize(&attr, 1024 * 1024); - RTC_CHECK_EQ(0, pthread_create(&thread_, &attr, &StartThread, this)); -#endif // defined(WEBRTC_WIN) -} -bool PlatformThread::IsRunning() const { - RTC_DCHECK(thread_checker_.IsCurrent()); #if defined(WEBRTC_WIN) - return thread_ != nullptr; -#else - return thread_ != 0; -#endif // defined(WEBRTC_WIN) -} - -PlatformThreadRef PlatformThread::GetThreadRef() const { -#if defined(WEBRTC_WIN) - return thread_id_; -#else - return thread_; -#endif // defined(WEBRTC_WIN) -} - -void PlatformThread::Stop() { - RTC_DCHECK(thread_checker_.IsCurrent()); - if (!IsRunning()) - return; - -#if defined(WEBRTC_WIN) - WaitForSingleObject(thread_, INFINITE); - CloseHandle(thread_); - thread_ = nullptr; - thread_id_ = 0; -#else - RTC_CHECK_EQ(0, pthread_join(thread_, nullptr)); - thread_ = 0; -#endif // defined(WEBRTC_WIN) - spawned_thread_checker_.Detach(); -} - -void PlatformThread::Run() { - // Attach the worker thread checker to this thread. - RTC_DCHECK(spawned_thread_checker_.IsCurrent()); - rtc::SetCurrentThreadName(name_.c_str()); - SetPriority(priority_); - run_function_(obj_); +int Win32PriorityFromThreadPriority(ThreadPriority priority) { + switch (priority) { + case ThreadPriority::kLow: + return THREAD_PRIORITY_BELOW_NORMAL; + case ThreadPriority::kNormal: + return THREAD_PRIORITY_NORMAL; + case ThreadPriority::kHigh: + return THREAD_PRIORITY_ABOVE_NORMAL; + case ThreadPriority::kRealtime: + return THREAD_PRIORITY_TIME_CRITICAL; + } } +#endif -bool PlatformThread::SetPriority(ThreadPriority priority) { - RTC_DCHECK(spawned_thread_checker_.IsCurrent()); - +bool SetPriority(ThreadPriority priority) { #if defined(WEBRTC_WIN) - return SetThreadPriority(thread_, priority) != FALSE; + return SetThreadPriority(GetCurrentThread(), + Win32PriorityFromThreadPriority(priority)) != FALSE; #elif defined(__native_client__) || defined(WEBRTC_FUCHSIA) // Setting thread priorities is not supported in NaCl or Fuchsia. return true; @@ -158,35 +64,148 @@ bool PlatformThread::SetPriority(ThreadPriority priority) { const int top_prio = max_prio - 1; const int low_prio = min_prio + 1; switch (priority) { - case kLowPriority: + case ThreadPriority::kLow: param.sched_priority = low_prio; break; - case kNormalPriority: + case ThreadPriority::kNormal: // The -1 ensures that the kHighPriority is always greater or equal to // kNormalPriority. param.sched_priority = (low_prio + top_prio - 1) / 2; break; - case kHighPriority: + case ThreadPriority::kHigh: param.sched_priority = std::max(top_prio - 2, low_prio); break; - case kHighestPriority: - param.sched_priority = std::max(top_prio - 1, low_prio); - break; - case kRealtimePriority: + case ThreadPriority::kRealtime: param.sched_priority = top_prio; break; } - return pthread_setschedparam(thread_, policy, ¶m) == 0; + return pthread_setschedparam(pthread_self(), policy, ¶m) == 0; #endif // defined(WEBRTC_WIN) } #if defined(WEBRTC_WIN) -bool PlatformThread::QueueAPC(PAPCFUNC function, ULONG_PTR data) { - RTC_DCHECK(thread_checker_.IsCurrent()); - RTC_DCHECK(IsRunning()); +DWORD WINAPI RunPlatformThread(void* param) { + // The GetLastError() function only returns valid results when it is called + // after a Win32 API function that returns a "failed" result. A crash dump + // contains the result from GetLastError() and to make sure it does not + // falsely report a Windows error we call SetLastError here. + ::SetLastError(ERROR_SUCCESS); + auto function = static_cast*>(param); + (*function)(); + delete function; + return 0; +} +#else +void* RunPlatformThread(void* param) { + auto function = static_cast*>(param); + (*function)(); + delete function; + return 0; +} +#endif // defined(WEBRTC_WIN) + +} // namespace + +PlatformThread::PlatformThread(Handle handle, bool joinable) + : handle_(handle), joinable_(joinable) {} + +PlatformThread::PlatformThread(PlatformThread&& rhs) + : handle_(rhs.handle_), joinable_(rhs.joinable_) { + rhs.handle_ = absl::nullopt; +} + +PlatformThread& PlatformThread::operator=(PlatformThread&& rhs) { + Finalize(); + handle_ = rhs.handle_; + joinable_ = rhs.joinable_; + rhs.handle_ = absl::nullopt; + return *this; +} + +PlatformThread::~PlatformThread() { + Finalize(); +} + +PlatformThread PlatformThread::SpawnJoinable( + std::function thread_function, + absl::string_view name, + ThreadAttributes attributes) { + return SpawnThread(std::move(thread_function), name, attributes, + /*joinable=*/true); +} + +PlatformThread PlatformThread::SpawnDetached( + std::function thread_function, + absl::string_view name, + ThreadAttributes attributes) { + return SpawnThread(std::move(thread_function), name, attributes, + /*joinable=*/false); +} + +absl::optional PlatformThread::GetHandle() const { + return handle_; +} - return QueueUserAPC(function, thread_, data) != FALSE; +#if defined(WEBRTC_WIN) +bool PlatformThread::QueueAPC(PAPCFUNC function, ULONG_PTR data) { + RTC_DCHECK(handle_.has_value()); + return handle_.has_value() ? QueueUserAPC(function, *handle_, data) != FALSE + : false; } #endif +void PlatformThread::Finalize() { + if (!handle_.has_value()) + return; +#if defined(WEBRTC_WIN) + if (joinable_) + WaitForSingleObject(*handle_, INFINITE); + CloseHandle(*handle_); +#else + if (joinable_) + RTC_CHECK_EQ(0, pthread_join(*handle_, nullptr)); +#endif + handle_ = absl::nullopt; +} + +PlatformThread PlatformThread::SpawnThread( + std::function thread_function, + absl::string_view name, + ThreadAttributes attributes, + bool joinable) { + RTC_DCHECK(thread_function); + RTC_DCHECK(!name.empty()); + // TODO(tommi): Consider lowering the limit to 15 (limit on Linux). + RTC_DCHECK(name.length() < 64); + auto start_thread_function_ptr = + new std::function([thread_function = std::move(thread_function), + name = std::string(name), attributes] { + rtc::SetCurrentThreadName(name.c_str()); + SetPriority(attributes.priority); + thread_function(); + }); +#if defined(WEBRTC_WIN) + // See bug 2902 for background on STACK_SIZE_PARAM_IS_A_RESERVATION. + // Set the reserved stack stack size to 1M, which is the default on Windows + // and Linux. + DWORD thread_id = 0; + PlatformThread::Handle handle = ::CreateThread( + nullptr, 1024 * 1024, &RunPlatformThread, start_thread_function_ptr, + STACK_SIZE_PARAM_IS_A_RESERVATION, &thread_id); + RTC_CHECK(handle) << "CreateThread failed"; +#else + pthread_attr_t attr; + pthread_attr_init(&attr); + // Set the stack stack size to 1M. + pthread_attr_setstacksize(&attr, 1024 * 1024); + pthread_attr_setdetachstate( + &attr, joinable ? PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED); + PlatformThread::Handle handle; + RTC_CHECK_EQ(0, pthread_create(&handle, &attr, &RunPlatformThread, + start_thread_function_ptr)); + pthread_attr_destroy(&attr); +#endif // defined(WEBRTC_WIN) + return PlatformThread(handle, joinable); +} + } // namespace rtc diff --git a/rtc_base/platform_thread.h b/rtc_base/platform_thread.h index 4968de9ee5..11ccfae3d0 100644 --- a/rtc_base/platform_thread.h +++ b/rtc_base/platform_thread.h @@ -11,92 +11,101 @@ #ifndef RTC_BASE_PLATFORM_THREAD_H_ #define RTC_BASE_PLATFORM_THREAD_H_ -#ifndef WEBRTC_WIN -#include -#endif +#include #include #include "absl/strings/string_view.h" -#include "rtc_base/constructor_magic.h" +#include "absl/types/optional.h" #include "rtc_base/platform_thread_types.h" -#include "rtc_base/thread_checker.h" namespace rtc { -// Callback function that the spawned thread will enter once spawned. -typedef void (*ThreadRunFunction)(void*); +enum class ThreadPriority { + kLow = 1, + kNormal, + kHigh, + kRealtime, +}; -enum ThreadPriority { -#ifdef WEBRTC_WIN - kLowPriority = THREAD_PRIORITY_BELOW_NORMAL, - kNormalPriority = THREAD_PRIORITY_NORMAL, - kHighPriority = THREAD_PRIORITY_ABOVE_NORMAL, - kHighestPriority = THREAD_PRIORITY_HIGHEST, - kRealtimePriority = THREAD_PRIORITY_TIME_CRITICAL -#else - kLowPriority = 1, - kNormalPriority = 2, - kHighPriority = 3, - kHighestPriority = 4, - kRealtimePriority = 5 -#endif +struct ThreadAttributes { + ThreadPriority priority = ThreadPriority::kNormal; + ThreadAttributes& SetPriority(ThreadPriority priority_param) { + priority = priority_param; + return *this; + } }; -// Represents a simple worker thread. The implementation must be assumed -// to be single threaded, meaning that all methods of the class, must be -// called from the same thread, including instantiation. -class PlatformThread { +// Represents a simple worker thread. +class PlatformThread final { public: - PlatformThread(ThreadRunFunction func, - void* obj, - absl::string_view thread_name, - ThreadPriority priority = kNormalPriority); + // Handle is the base platform thread handle. +#if defined(WEBRTC_WIN) + using Handle = HANDLE; +#else + using Handle = pthread_t; +#endif // defined(WEBRTC_WIN) + // This ctor creates the PlatformThread with an unset handle (returning true + // in empty()) and is provided for convenience. + // TODO(bugs.webrtc.org/12727) Look into if default and move support can be + // removed. + PlatformThread() = default; + + // Moves |rhs| into this, storing an empty state in |rhs|. + // TODO(bugs.webrtc.org/12727) Look into if default and move support can be + // removed. + PlatformThread(PlatformThread&& rhs); + + // Moves |rhs| into this, storing an empty state in |rhs|. + // TODO(bugs.webrtc.org/12727) Look into if default and move support can be + // removed. + PlatformThread& operator=(PlatformThread&& rhs); + + // For a PlatformThread that's been spawned joinable, the destructor suspends + // the calling thread until the created thread exits unless the thread has + // already exited. virtual ~PlatformThread(); - const std::string& name() const { return name_; } - - // Spawns a thread and tries to set thread priority according to the priority - // from when CreateThread was called. - void Start(); + // Finalizes any allocated resources. + // For a PlatformThread that's been spawned joinable, Finalize() suspends + // the calling thread until the created thread exits unless the thread has + // already exited. + // empty() returns true after completion. + void Finalize(); + + // Returns true if default constructed, moved from, or Finalize()ed. + bool empty() const { return !handle_.has_value(); } + + // Creates a started joinable thread which will be joined when the returned + // PlatformThread destructs or Finalize() is called. + static PlatformThread SpawnJoinable( + std::function thread_function, + absl::string_view name, + ThreadAttributes attributes = ThreadAttributes()); + + // Creates a started detached thread. The caller has to use external + // synchronization as nothing is provided by the PlatformThread construct. + static PlatformThread SpawnDetached( + std::function thread_function, + absl::string_view name, + ThreadAttributes attributes = ThreadAttributes()); + + // Returns the base platform thread handle of this thread. + absl::optional GetHandle() const; - bool IsRunning() const; - - // Returns an identifier for the worker thread that can be used to do - // thread checks. - PlatformThreadRef GetThreadRef() const; - - // Stops (joins) the spawned thread. - void Stop(); - - protected: #if defined(WEBRTC_WIN) - // Exposed to derived classes to allow for special cases specific to Windows. + // Queue a Windows APC function that runs when the thread is alertable. bool QueueAPC(PAPCFUNC apc_function, ULONG_PTR data); #endif private: - void Run(); - bool SetPriority(ThreadPriority priority); - - ThreadRunFunction const run_function_ = nullptr; - const ThreadPriority priority_ = kNormalPriority; - void* const obj_; - // TODO(pbos): Make sure call sites use string literals and update to a const - // char* instead of a std::string. - const std::string name_; - rtc::ThreadChecker thread_checker_; - rtc::ThreadChecker spawned_thread_checker_; -#if defined(WEBRTC_WIN) - static DWORD WINAPI StartThread(void* param); - - HANDLE thread_ = nullptr; - DWORD thread_id_ = 0; -#else - static void* StartThread(void* param); - - pthread_t thread_ = 0; -#endif // defined(WEBRTC_WIN) - RTC_DISALLOW_COPY_AND_ASSIGN(PlatformThread); + PlatformThread(Handle handle, bool joinable); + static PlatformThread SpawnThread(std::function thread_function, + absl::string_view name, + ThreadAttributes attributes, + bool joinable); + + absl::optional handle_; + bool joinable_ = false; }; } // namespace rtc diff --git a/rtc_base/platform_thread_unittest.cc b/rtc_base/platform_thread_unittest.cc index 3f0408aa4b..b60d2131b7 100644 --- a/rtc_base/platform_thread_unittest.cc +++ b/rtc_base/platform_thread_unittest.cc @@ -10,52 +10,99 @@ #include "rtc_base/platform_thread.h" +#include "absl/types/optional.h" +#include "rtc_base/event.h" #include "system_wrappers/include/sleep.h" -#include "test/gtest.h" +#include "test/gmock.h" namespace rtc { -namespace { -void NullRunFunction(void* obj) {} +TEST(PlatformThreadTest, DefaultConstructedIsEmpty) { + PlatformThread thread; + EXPECT_EQ(thread.GetHandle(), absl::nullopt); + EXPECT_TRUE(thread.empty()); +} -// Function that sets a boolean. -void SetFlagRunFunction(void* obj) { - bool* obj_as_bool = static_cast(obj); - *obj_as_bool = true; +TEST(PlatformThreadTest, StartFinalize) { + PlatformThread thread = PlatformThread::SpawnJoinable([] {}, "1"); + EXPECT_NE(thread.GetHandle(), absl::nullopt); + EXPECT_FALSE(thread.empty()); + thread.Finalize(); + EXPECT_TRUE(thread.empty()); + rtc::Event done; + thread = PlatformThread::SpawnDetached([&] { done.Set(); }, "2"); + EXPECT_FALSE(thread.empty()); + thread.Finalize(); + EXPECT_TRUE(thread.empty()); + done.Wait(30000); } -} // namespace +TEST(PlatformThreadTest, MovesEmpty) { + PlatformThread thread1; + PlatformThread thread2 = std::move(thread1); + EXPECT_TRUE(thread1.empty()); + EXPECT_TRUE(thread2.empty()); +} -TEST(PlatformThreadTest, StartStop) { - PlatformThread thread(&NullRunFunction, nullptr, "PlatformThreadTest"); - EXPECT_TRUE(thread.name() == "PlatformThreadTest"); - EXPECT_TRUE(thread.GetThreadRef() == 0); - thread.Start(); - EXPECT_TRUE(thread.GetThreadRef() != 0); - thread.Stop(); - EXPECT_TRUE(thread.GetThreadRef() == 0); +TEST(PlatformThreadTest, MovesHandles) { + PlatformThread thread1 = PlatformThread::SpawnJoinable([] {}, "1"); + PlatformThread thread2 = std::move(thread1); + EXPECT_TRUE(thread1.empty()); + EXPECT_FALSE(thread2.empty()); + rtc::Event done; + thread1 = PlatformThread::SpawnDetached([&] { done.Set(); }, "2"); + thread2 = std::move(thread1); + EXPECT_TRUE(thread1.empty()); + EXPECT_FALSE(thread2.empty()); + done.Wait(30000); } -TEST(PlatformThreadTest, StartStop2) { - PlatformThread thread1(&NullRunFunction, nullptr, "PlatformThreadTest1"); - PlatformThread thread2(&NullRunFunction, nullptr, "PlatformThreadTest2"); - EXPECT_TRUE(thread1.GetThreadRef() == thread2.GetThreadRef()); - thread1.Start(); - thread2.Start(); - EXPECT_TRUE(thread1.GetThreadRef() != thread2.GetThreadRef()); - thread2.Stop(); - thread1.Stop(); +TEST(PlatformThreadTest, + TwoThreadHandlesAreDifferentWhenStartedAndEqualWhenJoined) { + PlatformThread thread1 = PlatformThread(); + PlatformThread thread2 = PlatformThread(); + EXPECT_EQ(thread1.GetHandle(), thread2.GetHandle()); + thread1 = PlatformThread::SpawnJoinable([] {}, "1"); + thread2 = PlatformThread::SpawnJoinable([] {}, "2"); + EXPECT_NE(thread1.GetHandle(), thread2.GetHandle()); + thread1.Finalize(); + EXPECT_NE(thread1.GetHandle(), thread2.GetHandle()); + thread2.Finalize(); + EXPECT_EQ(thread1.GetHandle(), thread2.GetHandle()); } TEST(PlatformThreadTest, RunFunctionIsCalled) { bool flag = false; - PlatformThread thread(&SetFlagRunFunction, &flag, "RunFunctionIsCalled"); - thread.Start(); + PlatformThread::SpawnJoinable([&] { flag = true; }, "T"); + EXPECT_TRUE(flag); +} - // At this point, the flag may be either true or false. - thread.Stop(); +TEST(PlatformThreadTest, JoinsThread) { + // This test flakes if there are problems with the join implementation. + rtc::Event event; + PlatformThread::SpawnJoinable([&] { event.Set(); }, "T"); + EXPECT_TRUE(event.Wait(/*give_up_after_ms=*/0)); +} - // We expect the thread to have run at least once. +TEST(PlatformThreadTest, StopsBeforeDetachedThreadExits) { + // This test flakes if there are problems with the detached thread + // implementation. + bool flag = false; + rtc::Event thread_started; + rtc::Event thread_continue; + rtc::Event thread_exiting; + PlatformThread::SpawnDetached( + [&] { + thread_started.Set(); + thread_continue.Wait(Event::kForever); + flag = true; + thread_exiting.Set(); + }, + "T"); + thread_started.Wait(Event::kForever); + EXPECT_FALSE(flag); + thread_continue.Set(); + thread_exiting.Wait(Event::kForever); EXPECT_TRUE(flag); } diff --git a/rtc_base/random.cc b/rtc_base/random.cc index 5deb621727..5206b817f3 100644 --- a/rtc_base/random.cc +++ b/rtc_base/random.cc @@ -49,14 +49,14 @@ int32_t Random::Rand(int32_t low, int32_t high) { template <> float Random::Rand() { double result = NextOutput() - 1; - result = result / 0xFFFFFFFFFFFFFFFEull; + result = result / static_cast(0xFFFFFFFFFFFFFFFFull); return static_cast(result); } template <> double Random::Rand() { double result = NextOutput() - 1; - result = result / 0xFFFFFFFFFFFFFFFEull; + result = result / static_cast(0xFFFFFFFFFFFFFFFFull); return result; } @@ -72,8 +72,10 @@ double Random::Gaussian(double mean, double standard_deviation) { // in the range [1, 2^64-1]. Normally this behavior is a bit frustrating, // but here it is exactly what we need. const double kPi = 3.14159265358979323846; - double u1 = static_cast(NextOutput()) / 0xFFFFFFFFFFFFFFFFull; - double u2 = static_cast(NextOutput()) / 0xFFFFFFFFFFFFFFFFull; + double u1 = static_cast(NextOutput()) / + static_cast(0xFFFFFFFFFFFFFFFFull); + double u2 = static_cast(NextOutput()) / + static_cast(0xFFFFFFFFFFFFFFFFull); return mean + standard_deviation * sqrt(-2 * log(u1)) * cos(2 * kPi * u2); } diff --git a/rtc_base/random.h b/rtc_base/random.h index 93241a3e97..b3b9fd1608 100644 --- a/rtc_base/random.h +++ b/rtc_base/random.h @@ -16,7 +16,6 @@ #include #include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" namespace webrtc { @@ -35,6 +34,10 @@ class Random { // See also discussion here: https://codereview.webrtc.org/1623543002/ explicit Random(uint64_t seed); + Random() = delete; + Random(const Random&) = delete; + Random& operator=(const Random&) = delete; + // Return pseudo-random integer of the specified type. // We need to limit the size to 32 bits to keep the output close to uniform. template @@ -63,7 +66,8 @@ class Random { double Exponential(double lambda); private: - // Outputs a nonzero 64-bit random number. + // Outputs a nonzero 64-bit random number using Xorshift algorithm. + // https://en.wikipedia.org/wiki/Xorshift uint64_t NextOutput() { state_ ^= state_ >> 12; state_ ^= state_ << 25; @@ -73,8 +77,6 @@ class Random { } uint64_t state_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Random); }; // Return pseudo-random number in the interval [0.0, 1.0). diff --git a/rtc_base/random_unittest.cc b/rtc_base/random_unittest.cc index f94b0c13fe..4eb6f754eb 100644 --- a/rtc_base/random_unittest.cc +++ b/rtc_base/random_unittest.cc @@ -120,7 +120,7 @@ void BucketTestSignedInterval(unsigned int bucket_count, ASSERT_GE(high, low); ASSERT_GE(bucket_count, 2u); - uint32_t interval = unsigned_difference(high, low) + 1; + uint32_t interval = webrtc_impl::unsigned_difference(high, low) + 1; uint32_t numbers_per_bucket; if (interval == 0) { // The computation high - low + 1 should be 2^32 but overflowed @@ -136,7 +136,8 @@ void BucketTestSignedInterval(unsigned int bucket_count, int32_t sample = prng->Rand(low, high); EXPECT_LE(low, sample); EXPECT_GE(high, sample); - buckets[unsigned_difference(sample, low) / numbers_per_bucket]++; + buckets[webrtc_impl::unsigned_difference(sample, low) / + numbers_per_bucket]++; } for (unsigned int i = 0; i < bucket_count; i++) { diff --git a/rtc_base/rate_limiter.cc b/rtc_base/rate_limiter.cc index 7394c3eb89..0f3f343aed 100644 --- a/rtc_base/rate_limiter.cc +++ b/rtc_base/rate_limiter.cc @@ -31,7 +31,7 @@ RateLimiter::~RateLimiter() {} // calling SetMaxRate() and a timed maintenance thread periodically updating // the RTT. bool RateLimiter::TryUseRate(size_t packet_size_bytes) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); int64_t now_ms = clock_->TimeInMilliseconds(); absl::optional current_rate = current_rate_.Rate(now_ms); if (current_rate) { @@ -53,14 +53,14 @@ bool RateLimiter::TryUseRate(size_t packet_size_bytes) { } void RateLimiter::SetMaxRate(uint32_t max_rate_bps) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); max_rate_bps_ = max_rate_bps; } // Set the window size over which to measure the current bitrate. // For retransmissions, this is typically the RTT. bool RateLimiter::SetWindowSize(int64_t window_size_ms) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); window_size_ms_ = window_size_ms; return current_rate_.SetWindowSize(window_size_ms, clock_->TimeInMilliseconds()); diff --git a/rtc_base/rate_limiter.h b/rtc_base/rate_limiter.h index 1c956d788b..9bbe21f9ca 100644 --- a/rtc_base/rate_limiter.h +++ b/rtc_base/rate_limiter.h @@ -14,9 +14,8 @@ #include #include -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/rate_statistics.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" namespace webrtc { @@ -29,6 +28,11 @@ class Clock; class RateLimiter { public: RateLimiter(Clock* clock, int64_t max_window_ms); + + RateLimiter() = delete; + RateLimiter(const RateLimiter&) = delete; + RateLimiter& operator=(const RateLimiter&) = delete; + ~RateLimiter(); // Try to use rate to send bytes. Returns true on success and if so updates @@ -45,12 +49,10 @@ class RateLimiter { private: Clock* const clock_; - rtc::CriticalSection lock_; + Mutex lock_; RateStatistics current_rate_ RTC_GUARDED_BY(lock_); int64_t window_size_ms_ RTC_GUARDED_BY(lock_); uint32_t max_rate_bps_ RTC_GUARDED_BY(lock_); - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RateLimiter); }; } // namespace webrtc diff --git a/rtc_base/rate_limiter_unittest.cc b/rtc_base/rate_limiter_unittest.cc index 8ebf8aa67b..eda644b4ca 100644 --- a/rtc_base/rate_limiter_unittest.cc +++ b/rtc_base/rate_limiter_unittest.cc @@ -127,10 +127,6 @@ class ThreadTask { rtc::Event end_signal_; }; -void RunTask(void* thread_task) { - reinterpret_cast(thread_task)->Run(); -} - TEST_F(RateLimitTest, MultiThreadedUsage) { // Simple sanity test, with different threads calling the various methods. // Runs a few simple tasks, each on its own thread, but coordinated with @@ -149,8 +145,8 @@ TEST_F(RateLimitTest, MultiThreadedUsage) { EXPECT_TRUE(rate_limiter_->SetWindowSize(kWindowSizeMs / 2)); } } set_window_size_task(rate_limiter.get()); - rtc::PlatformThread thread1(RunTask, &set_window_size_task, "Thread1"); - thread1.Start(); + auto thread1 = rtc::PlatformThread::SpawnJoinable( + [&set_window_size_task] { set_window_size_task.Run(); }, "Thread1"); class SetMaxRateTask : public ThreadTask { public: @@ -160,8 +156,8 @@ TEST_F(RateLimitTest, MultiThreadedUsage) { void DoRun() override { rate_limiter_->SetMaxRate(kMaxRateBps * 2); } } set_max_rate_task(rate_limiter.get()); - rtc::PlatformThread thread2(RunTask, &set_max_rate_task, "Thread2"); - thread2.Start(); + auto thread2 = rtc::PlatformThread::SpawnJoinable( + [&set_max_rate_task] { set_max_rate_task.Run(); }, "Thread2"); class UseRateTask : public ThreadTask { public: @@ -177,8 +173,8 @@ TEST_F(RateLimitTest, MultiThreadedUsage) { SimulatedClock* const clock_; } use_rate_task(rate_limiter.get(), &clock_); - rtc::PlatformThread thread3(RunTask, &use_rate_task, "Thread3"); - thread3.Start(); + auto thread3 = rtc::PlatformThread::SpawnJoinable( + [&use_rate_task] { use_rate_task.Run(); }, "Thread3"); set_window_size_task.start_signal_.Set(); EXPECT_TRUE(set_window_size_task.end_signal_.Wait(kMaxTimeoutMs)); @@ -191,10 +187,6 @@ TEST_F(RateLimitTest, MultiThreadedUsage) { // All rate consumed. EXPECT_FALSE(rate_limiter->TryUseRate(1)); - - thread1.Stop(); - thread2.Stop(); - thread3.Stop(); } } // namespace webrtc diff --git a/rtc_base/rate_statistics.cc b/rtc_base/rate_statistics.cc index c4c2e78581..85621fa555 100644 --- a/rtc_base/rate_statistics.cc +++ b/rtc_base/rate_statistics.cc @@ -20,29 +20,26 @@ namespace webrtc { +RateStatistics::Bucket::Bucket(int64_t timestamp) + : sum(0), num_samples(0), timestamp(timestamp) {} + RateStatistics::RateStatistics(int64_t window_size_ms, float scale) - : buckets_(new Bucket[window_size_ms]()), - accumulated_count_(0), + : accumulated_count_(0), + first_timestamp_(-1), num_samples_(0), - oldest_time_(-window_size_ms), - oldest_index_(0), scale_(scale), max_window_size_ms_(window_size_ms), current_window_size_ms_(max_window_size_ms_) {} RateStatistics::RateStatistics(const RateStatistics& other) - : accumulated_count_(other.accumulated_count_), + : buckets_(other.buckets_), + accumulated_count_(other.accumulated_count_), + first_timestamp_(other.first_timestamp_), overflow_(other.overflow_), num_samples_(other.num_samples_), - oldest_time_(other.oldest_time_), - oldest_index_(other.oldest_index_), scale_(other.scale_), max_window_size_ms_(other.max_window_size_ms_), - current_window_size_ms_(other.current_window_size_ms_) { - buckets_ = std::make_unique(other.max_window_size_ms_); - std::copy(other.buckets_.get(), - other.buckets_.get() + other.max_window_size_ms_, buckets_.get()); -} + current_window_size_ms_(other.current_window_size_ms_) {} RateStatistics::RateStatistics(RateStatistics&& other) = default; @@ -52,33 +49,33 @@ void RateStatistics::Reset() { accumulated_count_ = 0; overflow_ = false; num_samples_ = 0; - oldest_time_ = -max_window_size_ms_; - oldest_index_ = 0; + first_timestamp_ = -1; current_window_size_ms_ = max_window_size_ms_; - for (int64_t i = 0; i < max_window_size_ms_; i++) - buckets_[i] = Bucket(); + buckets_.clear(); } void RateStatistics::Update(int64_t count, int64_t now_ms) { - RTC_DCHECK_LE(0, count); - if (now_ms < oldest_time_) { - // Too old data is ignored. - return; - } + RTC_DCHECK_GE(count, 0); EraseOld(now_ms); + if (first_timestamp_ == -1) { + first_timestamp_ = now_ms; + } + + if (buckets_.empty() || now_ms != buckets_.back().timestamp) { + if (!buckets_.empty() && now_ms < buckets_.back().timestamp) { + RTC_LOG(LS_WARNING) << "Timestamp " << now_ms + << " is before the last added " + "timestamp in the rate window: " + << buckets_.back().timestamp << ", aligning to that."; + now_ms = buckets_.back().timestamp; + } + buckets_.emplace_back(now_ms); + } + Bucket& last_bucket = buckets_.back(); + last_bucket.sum += count; + ++last_bucket.num_samples; - // First ever sample, reset window to start now. - if (!IsInitialized()) - oldest_time_ = now_ms; - - uint32_t now_offset = rtc::dchecked_cast(now_ms - oldest_time_); - RTC_DCHECK_LT(now_offset, max_window_size_ms_); - uint32_t index = oldest_index_ + now_offset; - if (index >= max_window_size_ms_) - index -= max_window_size_ms_; - buckets_[index].sum += count; - ++buckets_[index].samples; if (std::numeric_limits::max() - accumulated_count_ > count) { accumulated_count_ += count; } else { @@ -92,10 +89,22 @@ absl::optional RateStatistics::Rate(int64_t now_ms) const { // of the members as mutable... const_cast(this)->EraseOld(now_ms); + int active_window_size = 0; + if (first_timestamp_ != -1) { + if (first_timestamp_ <= now_ms - current_window_size_ms_) { + // Count window as full even if no data points currently in view, if the + // data stream started before the window. + active_window_size = current_window_size_ms_; + } else { + // Size of a single bucket is 1ms, so even if now_ms == first_timestmap_ + // the window size should be 1. + active_window_size = now_ms - first_timestamp_ + 1; + } + } + // If window is a single bucket or there is only one sample in a data set that // has not grown to the full window size, or if the accumulator has // overflowed, treat this as rate unavailable. - int active_window_size = now_ms - oldest_time_ + 1; if (num_samples_ == 0 || active_window_size <= 1 || (num_samples_ <= 1 && rtc::SafeLt(active_window_size, current_window_size_ms_)) || @@ -114,43 +123,35 @@ absl::optional RateStatistics::Rate(int64_t now_ms) const { } void RateStatistics::EraseOld(int64_t now_ms) { - if (!IsInitialized()) - return; - // New oldest time that is included in data set. - int64_t new_oldest_time = now_ms - current_window_size_ms_ + 1; - - // New oldest time is older than the current one, no need to cull data. - if (new_oldest_time <= oldest_time_) - return; + const int64_t new_oldest_time = now_ms - current_window_size_ms_ + 1; // Loop over buckets and remove too old data points. - while (num_samples_ > 0 && oldest_time_ < new_oldest_time) { - const Bucket& oldest_bucket = buckets_[oldest_index_]; + while (!buckets_.empty() && buckets_.front().timestamp < new_oldest_time) { + const Bucket& oldest_bucket = buckets_.front(); RTC_DCHECK_GE(accumulated_count_, oldest_bucket.sum); - RTC_DCHECK_GE(num_samples_, oldest_bucket.samples); + RTC_DCHECK_GE(num_samples_, oldest_bucket.num_samples); accumulated_count_ -= oldest_bucket.sum; - num_samples_ -= oldest_bucket.samples; - buckets_[oldest_index_] = Bucket(); - if (++oldest_index_ >= max_window_size_ms_) - oldest_index_ = 0; - ++oldest_time_; + num_samples_ -= oldest_bucket.num_samples; + buckets_.pop_front(); // This does not clear overflow_ even when counter is empty. // TODO(https://bugs.webrtc.org/11247): Consider if overflow_ can be reset. } - oldest_time_ = new_oldest_time; } bool RateStatistics::SetWindowSize(int64_t window_size_ms, int64_t now_ms) { if (window_size_ms <= 0 || window_size_ms > max_window_size_ms_) return false; + if (first_timestamp_ != -1) { + // If the window changes (e.g. decreases - removing data point, then + // increases again) we need to update the first timestamp mark as + // otherwise it indicates the window coveres a region of zeros, suddenly + // under-estimating the rate. + first_timestamp_ = std::max(first_timestamp_, now_ms - window_size_ms + 1); + } current_window_size_ms_ = window_size_ms; EraseOld(now_ms); return true; } -bool RateStatistics::IsInitialized() const { - return oldest_time_ != -max_window_size_ms_; -} - } // namespace webrtc diff --git a/rtc_base/rate_statistics.h b/rtc_base/rate_statistics.h index 11c8cee7af..dc8d7f5272 100644 --- a/rtc_base/rate_statistics.h +++ b/rtc_base/rate_statistics.h @@ -14,6 +14,7 @@ #include #include +#include #include #include "absl/types/optional.h" @@ -28,6 +29,10 @@ namespace webrtc { // high; for instance, a 20 Mbit/sec video stream can wrap a 32-bit byte // counter in 14 minutes. +// Note that timestamps used in Update(), Rate() and SetWindowSize() must never +// decrease for two consecutive calls. +// TODO(bugs.webrtc.org/11600): Migrate from int64_t to Timestamp. + class RTC_EXPORT RateStatistics { public: static constexpr float kBpsScale = 8000.0f; @@ -65,19 +70,22 @@ class RTC_EXPORT RateStatistics { private: void EraseOld(int64_t now_ms); - bool IsInitialized() const; - // Counters are kept in buckets (circular buffer), with one bucket - // per millisecond. struct Bucket { + explicit Bucket(int64_t timestamp); int64_t sum; // Sum of all samples in this bucket. - int samples; // Number of samples in this bucket. + int num_samples; // Number of samples in this bucket. + const int64_t timestamp; // Timestamp this bucket corresponds to. }; - std::unique_ptr buckets_; + // All buckets within the time window, ordered by time. + std::deque buckets_; - // Total count recorded in buckets. + // Total count recorded in all buckets. int64_t accumulated_count_; + // Timestamp of the first data point seen, or -1 of none seen. + int64_t first_timestamp_; + // True if accumulated_count_ has ever grown too large to be // contained in its integer type. bool overflow_ = false; @@ -85,12 +93,6 @@ class RTC_EXPORT RateStatistics { // The total number of samples in the buckets. int num_samples_; - // Oldest time recorded in buckets. - int64_t oldest_time_; - - // Bucket index of oldest counter recorded in buckets. - int64_t oldest_index_; - // To convert counts/ms to desired units const float scale_; diff --git a/rtc_base/rate_tracker.cc b/rtc_base/rate_tracker.cc index 5c827927f6..e39dadb988 100644 --- a/rtc_base/rate_tracker.cc +++ b/rtc_base/rate_tracker.cc @@ -108,14 +108,18 @@ int64_t RateTracker::TotalSampleCount() const { } void RateTracker::AddSamples(int64_t sample_count) { + AddSamplesAtTime(Time(), sample_count); +} + +void RateTracker::AddSamplesAtTime(int64_t current_time_ms, + int64_t sample_count) { RTC_DCHECK_LE(0, sample_count); EnsureInitialized(); - int64_t current_time = Time(); // Advance the current bucket as needed for the current time, and reset // bucket counts as we advance. - for (size_t i = 0; - i <= bucket_count_ && - current_time >= bucket_start_time_milliseconds_ + bucket_milliseconds_; + for (size_t i = 0; i <= bucket_count_ && + current_time_ms >= + bucket_start_time_milliseconds_ + bucket_milliseconds_; ++i) { bucket_start_time_milliseconds_ += bucket_milliseconds_; current_bucket_ = NextBucketIndex(current_bucket_); @@ -125,7 +129,8 @@ void RateTracker::AddSamples(int64_t sample_count) { // the entire buffer of samples has been expired. bucket_start_time_milliseconds_ += bucket_milliseconds_ * - ((current_time - bucket_start_time_milliseconds_) / bucket_milliseconds_); + ((current_time_ms - bucket_start_time_milliseconds_) / + bucket_milliseconds_); // Add all samples in the bucket that includes the current time. sample_buckets_[current_bucket_] += sample_count; total_sample_count_ += sample_count; diff --git a/rtc_base/rate_tracker.h b/rtc_base/rate_tracker.h index e42d40f14f..3b3c23538d 100644 --- a/rtc_base/rate_tracker.h +++ b/rtc_base/rate_tracker.h @@ -47,6 +47,9 @@ class RateTracker { // these samples, and increments the count for that bucket by sample_count. void AddSamples(int64_t sample_count); + // Increment count for bucket at |current_time_ms|. + void AddSamplesAtTime(int64_t current_time_ms, int64_t sample_count); + protected: // overrideable for tests virtual int64_t Time() const; diff --git a/rtc_base/ref_counted_object.h b/rtc_base/ref_counted_object.h index ce18379d50..331132c569 100644 --- a/rtc_base/ref_counted_object.h +++ b/rtc_base/ref_counted_object.h @@ -13,6 +13,7 @@ #include #include +#include "api/scoped_refptr.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ref_count.h" #include "rtc_base/ref_counter.h" @@ -33,9 +34,9 @@ class RefCountedObject : public T { std::forward(p1), std::forward(args)...) {} - virtual void AddRef() const { ref_count_.IncRef(); } + void AddRef() const override { ref_count_.IncRef(); } - virtual RefCountReleaseStatus Release() const { + RefCountReleaseStatus Release() const override { const auto status = ref_count_.DecRef(); if (status == RefCountReleaseStatus::kDroppedLastRef) { delete this; @@ -52,13 +53,146 @@ class RefCountedObject : public T { virtual bool HasOneRef() const { return ref_count_.HasOneRef(); } protected: - virtual ~RefCountedObject() {} + ~RefCountedObject() override {} mutable webrtc::webrtc_impl::RefCounter ref_count_{0}; RTC_DISALLOW_COPY_AND_ASSIGN(RefCountedObject); }; +template +class FinalRefCountedObject final : public T { + public: + using T::T; + // Until c++17 compilers are allowed not to inherit the default constructors. + // Thus the default constructors are forwarded explicitly. + FinalRefCountedObject() = default; + explicit FinalRefCountedObject(const T& other) : T(other) {} + explicit FinalRefCountedObject(T&& other) : T(std::move(other)) {} + FinalRefCountedObject(const FinalRefCountedObject&) = delete; + FinalRefCountedObject& operator=(const FinalRefCountedObject&) = delete; + + void AddRef() const { ref_count_.IncRef(); } + void Release() const { + if (ref_count_.DecRef() == RefCountReleaseStatus::kDroppedLastRef) { + delete this; + } + } + bool HasOneRef() const { return ref_count_.HasOneRef(); } + + private: + ~FinalRefCountedObject() = default; + + mutable webrtc::webrtc_impl::RefCounter ref_count_{0}; +}; + +// General utilities for constructing a reference counted class and the +// appropriate reference count implementation for that class. +// +// These utilities select either the `RefCountedObject` implementation or +// `FinalRefCountedObject` depending on whether the to-be-shared class is +// derived from the RefCountInterface interface or not (respectively). + +// `make_ref_counted`: +// +// Use this when you want to construct a reference counted object of type T and +// get a `scoped_refptr<>` back. Example: +// +// auto p = make_ref_counted("bar", 123); +// +// For a class that inherits from RefCountInterface, this is equivalent to: +// +// auto p = scoped_refptr(new RefCountedObject("bar", 123)); +// +// If the class does not inherit from RefCountInterface, the example is +// equivalent to: +// +// auto p = scoped_refptr>( +// new FinalRefCountedObject("bar", 123)); +// +// In these cases, `make_ref_counted` reduces the amount of boilerplate code but +// also helps with the most commonly intended usage of RefCountedObject whereby +// methods for reference counting, are virtual and designed to satisfy the need +// of an interface. When such a need does not exist, it is more efficient to use +// the `FinalRefCountedObject` template, which does not add the vtable overhead. +// +// Note that in some cases, using RefCountedObject directly may still be what's +// needed. + +// `make_ref_counted` for classes that are convertible to RefCountInterface. +template < + typename T, + typename... Args, + typename std::enable_if::value, + T>::type* = nullptr> +scoped_refptr make_ref_counted(Args&&... args) { + return new RefCountedObject(std::forward(args)...); +} + +// `make_ref_counted` for complete classes that are not convertible to +// RefCountInterface. +template < + typename T, + typename... Args, + typename std::enable_if::value, + T>::type* = nullptr> +scoped_refptr> make_ref_counted(Args&&... args) { + return new FinalRefCountedObject(std::forward(args)...); +} + +// `Ref<>`, `Ref<>::Type` and `Ref<>::Ptr`: +// +// `Ref` is a type declaring utility that is compatible with `make_ref_counted` +// and can be used in classes and methods where it's more convenient (or +// readable) to have the compiler figure out the fully fleshed out type for a +// class rather than spell it out verbatim in all places the type occurs (which +// can mean maintenance work if the class layout changes). +// +// Usage examples: +// +// If you want to declare the parameter type that's always compatible with +// this code: +// +// Bar(make_ref_counted()); +// +// You can use `Ref<>::Ptr` to declare a compatible scoped_refptr type: +// +// void Bar(Ref::Ptr p); +// +// This might be more practically useful in templates though. +// +// In rare cases you might need to be able to declare a parameter that's fully +// compatible with the reference counted T type - and just using T* is not +// enough. To give a code example, we can declare a function, `Foo` that is +// compatible with this code: +// auto p = make_ref_counted(); +// Foo(p.get()); +// +// void Foo(Ref::Type* foo_ptr); +// +// Alternatively this would be: +// void Foo(Foo* foo_ptr); +// or +// void Foo(FinalRefCountedObject* foo_ptr); + +// Declares the approprate reference counted type for T depending on whether +// T is convertible to RefCountInterface or not. +// For classes that are convertible, the type will simply be T. +// For classes that cannot be converted to RefCountInterface, the type will be +// FinalRefCountedObject. +// This is most useful for declaring a scoped_refptr instance for a class +// that may or may not implement a virtual reference counted interface: +// * scoped_refptr::Type> my_ptr; +template +struct Ref { + typedef typename std::conditional< + std::is_convertible::value, + T, + FinalRefCountedObject>::type Type; + + typedef scoped_refptr Ptr; +}; + } // namespace rtc #endif // RTC_BASE_REF_COUNTED_OBJECT_H_ diff --git a/rtc_base/ref_counted_object_unittest.cc b/rtc_base/ref_counted_object_unittest.cc index eacf731782..ab7bb09191 100644 --- a/rtc_base/ref_counted_object_unittest.cc +++ b/rtc_base/ref_counted_object_unittest.cc @@ -12,6 +12,7 @@ #include #include +#include #include #include "api/scoped_refptr.h" @@ -63,6 +64,20 @@ class RefClassWithMixedValues : public RefCountInterface { std::string c_; }; +class Foo { + public: + Foo() {} + Foo(int i, int j) : foo_(i + j) {} + int foo_ = 0; +}; + +class FooItf : public RefCountInterface { + public: + FooItf() {} + FooItf(int i, int j) : foo_(i + j) {} + int foo_ = 0; +}; + } // namespace TEST(RefCountedObject, HasOneRef) { @@ -95,4 +110,73 @@ TEST(RefCountedObject, SupportMixedTypesInCtor) { EXPECT_EQ(c, ref->c_); } +TEST(FinalRefCountedObject, CanWrapIntoScopedRefptr) { + using WrappedTyped = FinalRefCountedObject; + static_assert(!std::is_polymorphic::value, ""); + scoped_refptr ref(new WrappedTyped()); + EXPECT_TRUE(ref.get()); + EXPECT_TRUE(ref->HasOneRef()); + // Test reference counter is updated on some simple operations. + scoped_refptr ref2 = ref; + EXPECT_FALSE(ref->HasOneRef()); + EXPECT_FALSE(ref2->HasOneRef()); + + ref = nullptr; + EXPECT_TRUE(ref2->HasOneRef()); +} + +TEST(FinalRefCountedObject, CanCreateFromMovedType) { + class MoveOnly { + public: + MoveOnly(int a) : a_(a) {} + MoveOnly(MoveOnly&&) = default; + + int a() { return a_; } + + private: + int a_; + }; + MoveOnly foo(5); + auto ref = make_ref_counted(std::move(foo)); + EXPECT_EQ(ref->a(), 5); +} + +// This test is mostly a compile-time test for scoped_refptr compatibility. +TEST(RefCounted, SmartPointers) { + // Sanity compile-time tests. FooItf is virtual, Foo is not, FooItf inherits + // from RefCountInterface, Foo does not. + static_assert(std::is_base_of::value, ""); + static_assert(!std::is_base_of::value, ""); + static_assert(std::is_polymorphic::value, ""); + static_assert(!std::is_polymorphic::value, ""); + + // Check if Ref generates the expected types for Foo and FooItf. + static_assert(std::is_base_of::Type>::value && + !std::is_same::Type>::value, + ""); + static_assert(std::is_same::Type>::value, ""); + + { + // Test with FooItf, a class that inherits from RefCountInterface. + // Check that we get a valid FooItf reference counted object. + auto p = make_ref_counted(2, 3); + EXPECT_NE(p.get(), nullptr); + EXPECT_EQ(p->foo_, 5); // the FooItf ctor just stores 2+3 in foo_. + + // Use a couple of different ways of declaring what should result in the + // same type as `p` is of. + scoped_refptr::Type> p2 = p; + Ref::Ptr p3 = p; + } + + { + // Same for `Foo` + auto p = make_ref_counted(2, 3); + EXPECT_NE(p.get(), nullptr); + EXPECT_EQ(p->foo_, 5); + scoped_refptr::Type> p2 = p; + Ref::Ptr p3 = p; + } +} + } // namespace rtc diff --git a/rtc_base/rolling_accumulator.h b/rtc_base/rolling_accumulator.h index 015229b04c..241bd72a11 100644 --- a/rtc_base/rolling_accumulator.h +++ b/rtc_base/rolling_accumulator.h @@ -40,7 +40,7 @@ class RollingAccumulator { size_t count() const { return static_cast(stats_.Size()); } void Reset() { - stats_ = webrtc::RunningStatistics(); + stats_ = webrtc::webrtc_impl::RunningStatistics(); next_index_ = 0U; max_ = T(); max_stale_ = false; @@ -129,7 +129,7 @@ class RollingAccumulator { double ComputeVariance() const { return stats_.GetVariance().value_or(0); } private: - webrtc::RunningStatistics stats_; + webrtc::webrtc_impl::RunningStatistics stats_; size_t next_index_; mutable T max_; mutable bool max_stale_; diff --git a/rtc_base/rtc_certificate.cc b/rtc_base/rtc_certificate.cc index 04ae99685d..496b4ac4b4 100644 --- a/rtc_base/rtc_certificate.cc +++ b/rtc_base/rtc_certificate.cc @@ -13,7 +13,6 @@ #include #include "rtc_base/checks.h" -#include "rtc_base/ref_counted_object.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/ssl_identity.h" #include "rtc_base/time_utils.h" @@ -22,14 +21,14 @@ namespace rtc { scoped_refptr RTCCertificate::Create( std::unique_ptr identity) { - return new RefCountedObject(identity.release()); + return new RTCCertificate(identity.release()); } RTCCertificate::RTCCertificate(SSLIdentity* identity) : identity_(identity) { RTC_DCHECK(identity_); } -RTCCertificate::~RTCCertificate() {} +RTCCertificate::~RTCCertificate() = default; uint64_t RTCCertificate::Expires() const { int64_t expires = GetSSLCertificate().CertificateExpirationTime(); @@ -47,11 +46,6 @@ const SSLCertificate& RTCCertificate::GetSSLCertificate() const { return identity_->certificate(); } -// Deprecated: TODO(benwright) - Remove once chromium is updated. -const SSLCertificate& RTCCertificate::ssl_certificate() const { - return identity_->certificate(); -} - const SSLCertChain& RTCCertificate::GetSSLCertificateChain() const { return identity_->cert_chain(); } @@ -67,7 +61,7 @@ scoped_refptr RTCCertificate::FromPEM( SSLIdentity::CreateFromPEMStrings(pem.private_key(), pem.certificate())); if (!identity) return nullptr; - return new RefCountedObject(identity.release()); + return new RTCCertificate(identity.release()); } bool RTCCertificate::operator==(const RTCCertificate& certificate) const { diff --git a/rtc_base/rtc_certificate.h b/rtc_base/rtc_certificate.h index 102385e5a2..fa026ec331 100644 --- a/rtc_base/rtc_certificate.h +++ b/rtc_base/rtc_certificate.h @@ -16,8 +16,9 @@ #include #include +#include "absl/base/attributes.h" +#include "api/ref_counted_base.h" #include "api/scoped_refptr.h" -#include "rtc_base/ref_count.h" #include "rtc_base/system/rtc_export.h" namespace rtc { @@ -49,7 +50,8 @@ class RTCCertificatePEM { // A thin abstraction layer between "lower level crypto stuff" like // SSLCertificate and WebRTC usage. Takes ownership of some lower level objects, // reference counting protects these from premature destruction. -class RTC_EXPORT RTCCertificate : public RefCountInterface { +class RTC_EXPORT RTCCertificate final + : public RefCountedNonVirtual { public: // Takes ownership of |identity|. static scoped_refptr Create( @@ -64,9 +66,6 @@ class RTC_EXPORT RTCCertificate : public RefCountInterface { const SSLCertificate& GetSSLCertificate() const; const SSLCertChain& GetSSLCertificateChain() const; - // Deprecated: TODO(benwright) - Remove once chromium is updated. - const SSLCertificate& ssl_certificate() const; - // TODO(hbos): If possible, remove once RTCCertificate and its // GetSSLCertificate() is used in all relevant places. Should not pass around // raw SSLIdentity* for the sake of accessing SSLIdentity::certificate(). @@ -82,12 +81,14 @@ class RTC_EXPORT RTCCertificate : public RefCountInterface { protected: explicit RTCCertificate(SSLIdentity* identity); - ~RTCCertificate() override; + + friend class RefCountedNonVirtual; + ~RTCCertificate(); private: // The SSLIdentity is the owner of the SSLCertificate. To protect our // GetSSLCertificate() we take ownership of |identity_|. - std::unique_ptr identity_; + const std::unique_ptr identity_; }; } // namespace rtc diff --git a/rtc_base/rtc_certificate_generator.cc b/rtc_base/rtc_certificate_generator.cc index 4c9d378dd2..5e1fdcac30 100644 --- a/rtc_base/rtc_certificate_generator.cc +++ b/rtc_base/rtc_certificate_generator.cc @@ -30,75 +30,6 @@ namespace { const char kIdentityName[] = "WebRTC"; const uint64_t kYearInSeconds = 365 * 24 * 60 * 60; -enum { - MSG_GENERATE, - MSG_GENERATE_DONE, -}; - -// Helper class for generating certificates asynchronously; a single task -// instance is responsible for a single asynchronous certificate generation -// request. We are using a separate helper class so that a generation request -// can outlive the |RTCCertificateGenerator| that spawned it. -class RTCCertificateGenerationTask : public RefCountInterface, - public MessageHandler { - public: - RTCCertificateGenerationTask( - Thread* signaling_thread, - Thread* worker_thread, - const KeyParams& key_params, - const absl::optional& expires_ms, - const scoped_refptr& callback) - : signaling_thread_(signaling_thread), - worker_thread_(worker_thread), - key_params_(key_params), - expires_ms_(expires_ms), - callback_(callback) { - RTC_DCHECK(signaling_thread_); - RTC_DCHECK(worker_thread_); - RTC_DCHECK(callback_); - } - ~RTCCertificateGenerationTask() override {} - - // Handles |MSG_GENERATE| and its follow-up |MSG_GENERATE_DONE|. - void OnMessage(Message* msg) override { - switch (msg->message_id) { - case MSG_GENERATE: - RTC_DCHECK(worker_thread_->IsCurrent()); - // Perform the certificate generation work here on the worker thread. - certificate_ = RTCCertificateGenerator::GenerateCertificate( - key_params_, expires_ms_); - // Handle callbacks on signaling thread. Pass on the |msg->pdata| - // (which references |this| with ref counting) to that thread. - signaling_thread_->Post(RTC_FROM_HERE, this, MSG_GENERATE_DONE, - msg->pdata); - break; - case MSG_GENERATE_DONE: - RTC_DCHECK(signaling_thread_->IsCurrent()); - // Perform callback with result here on the signaling thread. - if (certificate_) { - callback_->OnSuccess(certificate_); - } else { - callback_->OnFailure(); - } - // Destroy |msg->pdata| which references |this| with ref counting. This - // may result in |this| being deleted - do not touch member variables - // after this line. - delete msg->pdata; - return; - default: - RTC_NOTREACHED(); - } - } - - private: - Thread* const signaling_thread_; - Thread* const worker_thread_; - const KeyParams key_params_; - const absl::optional expires_ms_; - const scoped_refptr callback_; - scoped_refptr certificate_; -}; - } // namespace // static @@ -120,7 +51,7 @@ scoped_refptr RTCCertificateGenerator::GenerateCertificate( expires_s = std::min(expires_s, kYearInSeconds); // TODO(torbjorng): Stop using |time_t|, its type is unspecified. It it safe // to assume it can hold up to a year's worth of seconds (and more), but - // |SSLIdentity::Generate| should stop relying on |time_t|. + // |SSLIdentity::Create| should stop relying on |time_t|. // See bugs.webrtc.org/5720. time_t cert_lifetime_s = static_cast(expires_s); identity = SSLIdentity::Create(kIdentityName, key_params, cert_lifetime_s); @@ -148,13 +79,16 @@ void RTCCertificateGenerator::GenerateCertificateAsync( // Create a new |RTCCertificateGenerationTask| for this generation request. It // is reference counted and referenced by the message data, ensuring it lives // until the task has completed (independent of |RTCCertificateGenerator|). - ScopedRefMessageData* msg_data = - new ScopedRefMessageData( - new RefCountedObject( - signaling_thread_, worker_thread_, key_params, expires_ms, - callback)); - worker_thread_->Post(RTC_FROM_HERE, msg_data->data().get(), MSG_GENERATE, - msg_data); + worker_thread_->PostTask(RTC_FROM_HERE, [key_params, expires_ms, + signaling_thread = signaling_thread_, + cb = callback]() { + scoped_refptr certificate = + RTCCertificateGenerator::GenerateCertificate(key_params, expires_ms); + signaling_thread->PostTask( + RTC_FROM_HERE, [cert = std::move(certificate), cb = std::move(cb)]() { + cert ? cb->OnSuccess(cert) : cb->OnFailure(); + }); + }); } } // namespace rtc diff --git a/rtc_base/signal_thread.cc b/rtc_base/signal_thread.cc deleted file mode 100644 index e100fbe179..0000000000 --- a/rtc_base/signal_thread.cc +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2004 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/signal_thread.h" - -#include - -#include "rtc_base/checks.h" -#include "rtc_base/location.h" -#include "rtc_base/null_socket_server.h" -#include "rtc_base/socket_server.h" - -namespace rtc { - -/////////////////////////////////////////////////////////////////////////////// -// SignalThread -/////////////////////////////////////////////////////////////////////////////// - -SignalThread::SignalThread() - : main_(Thread::Current()), worker_(this), state_(kInit), refcount_(1) { - main_->SignalQueueDestroyed.connect(this, - &SignalThread::OnMainThreadDestroyed); - worker_.SetName("SignalThread", this); -} - -SignalThread::~SignalThread() { - RTC_DCHECK(refcount_ == 0); -} - -bool SignalThread::SetName(const std::string& name, const void* obj) { - EnterExit ee(this); - RTC_DCHECK(main_->IsCurrent()); - RTC_DCHECK(kInit == state_); - return worker_.SetName(name, obj); -} - -void SignalThread::Start() { - EnterExit ee(this); - RTC_DCHECK(main_->IsCurrent()); - if (kInit == state_ || kComplete == state_) { - state_ = kRunning; - OnWorkStart(); - worker_.Start(); - } else { - RTC_NOTREACHED(); - } -} - -void SignalThread::Destroy(bool wait) { - EnterExit ee(this); - RTC_DCHECK(main_->IsCurrent()); - if ((kInit == state_) || (kComplete == state_)) { - refcount_--; - } else if (kRunning == state_ || kReleasing == state_) { - state_ = kStopping; - // OnWorkStop() must follow Quit(), so that when the thread wakes up due to - // OWS(), ContinueWork() will return false. - worker_.Quit(); - OnWorkStop(); - if (wait) { - // Release the thread's lock so that it can return from ::Run. - cs_.Leave(); - worker_.Stop(); - cs_.Enter(); - refcount_--; - } - } else { - RTC_NOTREACHED(); - } -} - -void SignalThread::Release() { - EnterExit ee(this); - RTC_DCHECK(main_->IsCurrent()); - if (kComplete == state_) { - refcount_--; - } else if (kRunning == state_) { - state_ = kReleasing; - } else { - // if (kInit == state_) use Destroy() - RTC_NOTREACHED(); - } -} - -bool SignalThread::ContinueWork() { - EnterExit ee(this); - RTC_DCHECK(worker_.IsCurrent()); - return worker_.ProcessMessages(0); -} - -void SignalThread::OnMessage(Message* msg) { - EnterExit ee(this); - if (ST_MSG_WORKER_DONE == msg->message_id) { - RTC_DCHECK(main_->IsCurrent()); - OnWorkDone(); - bool do_delete = false; - if (kRunning == state_) { - state_ = kComplete; - } else { - do_delete = true; - } - if (kStopping != state_) { - // Before signaling that the work is done, make sure that the worker - // thread actually is done. We got here because DoWork() finished and - // Run() posted the ST_MSG_WORKER_DONE message. This means the worker - // thread is about to go away anyway, but sometimes it doesn't actually - // finish before SignalWorkDone is processed, and for a reusable - // SignalThread this makes an assert in thread.cc fire. - // - // Calling Stop() on the worker ensures that the OS thread that underlies - // the worker will finish, and will be set to null, enabling us to call - // Start() again. - worker_.Stop(); - SignalWorkDone(this); - } - if (do_delete) { - refcount_--; - } - } -} - -SignalThread::Worker::Worker(SignalThread* parent) - : Thread(std::make_unique(), /*do_init=*/false), - parent_(parent) { - DoInit(); -} - -SignalThread::Worker::~Worker() { - Stop(); -} - -void SignalThread::Worker::Run() { - parent_->Run(); -} - -void SignalThread::Run() { - DoWork(); - { - EnterExit ee(this); - if (main_) { - main_->Post(RTC_FROM_HERE, this, ST_MSG_WORKER_DONE); - } - } -} - -void SignalThread::OnMainThreadDestroyed() { - EnterExit ee(this); - main_ = nullptr; -} - -bool SignalThread::Worker::IsProcessingMessagesForTesting() { - return false; -} - -} // namespace rtc diff --git a/rtc_base/signal_thread.h b/rtc_base/signal_thread.h deleted file mode 100644 index d9e8ade9b0..0000000000 --- a/rtc_base/signal_thread.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2004 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_SIGNAL_THREAD_H_ -#define RTC_BASE_SIGNAL_THREAD_H_ - -#include - -#include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/message_handler.h" -#include "rtc_base/third_party/sigslot/sigslot.h" -#include "rtc_base/thread.h" -#include "rtc_base/thread_annotations.h" - -namespace rtc { - -/////////////////////////////////////////////////////////////////////////////// -// SignalThread - Base class for worker threads. The main thread should call -// Start() to begin work, and then follow one of these models: -// Normal: Wait for SignalWorkDone, and then call Release to destroy. -// Cancellation: Call Release(true), to abort the worker thread. -// Fire-and-forget: Call Release(false), which allows the thread to run to -// completion, and then self-destruct without further notification. -// Periodic tasks: Wait for SignalWorkDone, then eventually call Start() -// again to repeat the task. When the instance isn't needed anymore, -// call Release. DoWork, OnWorkStart and OnWorkStop are called again, -// on a new thread. -// The subclass should override DoWork() to perform the background task. By -// periodically calling ContinueWork(), it can check for cancellation. -// OnWorkStart and OnWorkDone can be overridden to do pre- or post-work -// tasks in the context of the main thread. -/////////////////////////////////////////////////////////////////////////////// - -class SignalThread : public sigslot::has_slots<>, protected MessageHandler { - public: - SignalThread(); - - // Context: Main Thread. Call before Start to change the worker's name. - bool SetName(const std::string& name, const void* obj); - - // Context: Main Thread. Call to begin the worker thread. - void Start(); - - // Context: Main Thread. If the worker thread is not running, deletes the - // object immediately. Otherwise, asks the worker thread to abort processing, - // and schedules the object to be deleted once the worker exits. - // SignalWorkDone will not be signalled. If wait is true, does not return - // until the thread is deleted. - void Destroy(bool wait); - - // Context: Main Thread. If the worker thread is complete, deletes the - // object immediately. Otherwise, schedules the object to be deleted once - // the worker thread completes. SignalWorkDone will be signalled. - void Release(); - - // Context: Main Thread. Signalled when work is complete. - sigslot::signal1 SignalWorkDone; - - enum { ST_MSG_WORKER_DONE, ST_MSG_FIRST_AVAILABLE }; - - protected: - ~SignalThread() override; - - Thread* worker() { return &worker_; } - - // Context: Main Thread. Subclass should override to do pre-work setup. - virtual void OnWorkStart() {} - - // Context: Worker Thread. Subclass should override to do work. - virtual void DoWork() = 0; - - // Context: Worker Thread. Subclass should call periodically to - // dispatch messages and determine if the thread should terminate. - bool ContinueWork(); - - // Context: Worker Thread. Subclass should override when extra work is - // needed to abort the worker thread. - virtual void OnWorkStop() {} - - // Context: Main Thread. Subclass should override to do post-work cleanup. - virtual void OnWorkDone() {} - - // Context: Any Thread. If subclass overrides, be sure to call the base - // implementation. Do not use (message_id < ST_MSG_FIRST_AVAILABLE) - void OnMessage(Message* msg) override; - - private: - enum State { - kInit, // Initialized, but not started - kRunning, // Started and doing work - kReleasing, // Same as running, but to be deleted when work is done - kComplete, // Work is done - kStopping, // Work is being interrupted - }; - - class Worker : public Thread { - public: - explicit Worker(SignalThread* parent); - ~Worker() override; - void Run() override; - bool IsProcessingMessagesForTesting() override; - - private: - SignalThread* parent_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(Worker); - }; - - class RTC_SCOPED_LOCKABLE EnterExit { - public: - explicit EnterExit(SignalThread* t) RTC_EXCLUSIVE_LOCK_FUNCTION(t->cs_) - : t_(t) { - t_->cs_.Enter(); - // If refcount_ is zero then the object has already been deleted and we - // will be double-deleting it in ~EnterExit()! (shouldn't happen) - RTC_DCHECK_NE(0, t_->refcount_); - ++t_->refcount_; - } - ~EnterExit() RTC_UNLOCK_FUNCTION() { - bool d = (0 == --t_->refcount_); - t_->cs_.Leave(); - if (d) - delete t_; - } - - private: - SignalThread* t_; - - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EnterExit); - }; - - void Run(); - void OnMainThreadDestroyed(); - - Thread* main_; - Worker worker_; - CriticalSection cs_; - State state_; - int refcount_; - - RTC_DISALLOW_COPY_AND_ASSIGN(SignalThread); -}; - -/////////////////////////////////////////////////////////////////////////////// - -} // namespace rtc - -#endif // RTC_BASE_SIGNAL_THREAD_H_ diff --git a/rtc_base/signal_thread_unittest.cc b/rtc_base/signal_thread_unittest.cc deleted file mode 100644 index 14761865b8..0000000000 --- a/rtc_base/signal_thread_unittest.cc +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright 2004 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/signal_thread.h" - -#include - -#include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/gunit.h" -#include "rtc_base/null_socket_server.h" -#include "rtc_base/thread.h" -#include "rtc_base/thread_annotations.h" -#include "test/gtest.h" - -namespace rtc { -namespace { - -// 10 seconds. -static const int kTimeout = 10000; - -class SignalThreadTest : public ::testing::Test, public sigslot::has_slots<> { - public: - class SlowSignalThread : public SignalThread { - public: - SlowSignalThread(SignalThreadTest* harness) : harness_(harness) {} - - ~SlowSignalThread() override { - EXPECT_EQ(harness_->main_thread_, Thread::Current()); - ++harness_->thread_deleted_; - } - - const SignalThreadTest* harness() { return harness_; } - - protected: - void OnWorkStart() override { - ASSERT_TRUE(harness_ != nullptr); - ++harness_->thread_started_; - EXPECT_EQ(harness_->main_thread_, Thread::Current()); - EXPECT_FALSE(worker()->RunningForTest()); // not started yet - } - - void OnWorkStop() override { - ++harness_->thread_stopped_; - EXPECT_EQ(harness_->main_thread_, Thread::Current()); - EXPECT_TRUE(worker()->RunningForTest()); // not stopped yet - } - - void OnWorkDone() override { - ++harness_->thread_done_; - EXPECT_EQ(harness_->main_thread_, Thread::Current()); - EXPECT_TRUE(worker()->RunningForTest()); // not stopped yet - } - - void DoWork() override { - EXPECT_NE(harness_->main_thread_, Thread::Current()); - EXPECT_EQ(worker(), Thread::Current()); - Thread::Current()->socketserver()->Wait(250, false); - } - - private: - SignalThreadTest* harness_; - RTC_DISALLOW_COPY_AND_ASSIGN(SlowSignalThread); - }; - - void OnWorkComplete(rtc::SignalThread* thread) { - SlowSignalThread* t = static_cast(thread); - EXPECT_EQ(t->harness(), this); - EXPECT_EQ(main_thread_, Thread::Current()); - - ++thread_completed_; - if (!called_release_) { - thread->Release(); - } - } - - void SetUp() override { - main_thread_ = Thread::Current(); - thread_ = new SlowSignalThread(this); - thread_->SignalWorkDone.connect(this, &SignalThreadTest::OnWorkComplete); - called_release_ = false; - thread_started_ = 0; - thread_done_ = 0; - thread_completed_ = 0; - thread_stopped_ = 0; - thread_deleted_ = 0; - } - - void ExpectState(int started, - int done, - int completed, - int stopped, - int deleted) { - EXPECT_EQ(started, thread_started_); - EXPECT_EQ(done, thread_done_); - EXPECT_EQ(completed, thread_completed_); - EXPECT_EQ(stopped, thread_stopped_); - EXPECT_EQ(deleted, thread_deleted_); - } - - void ExpectStateWait(int started, - int done, - int completed, - int stopped, - int deleted, - int timeout) { - EXPECT_EQ_WAIT(started, thread_started_, timeout); - EXPECT_EQ_WAIT(done, thread_done_, timeout); - EXPECT_EQ_WAIT(completed, thread_completed_, timeout); - EXPECT_EQ_WAIT(stopped, thread_stopped_, timeout); - EXPECT_EQ_WAIT(deleted, thread_deleted_, timeout); - } - - Thread* main_thread_; - SlowSignalThread* thread_; - bool called_release_; - - int thread_started_; - int thread_done_; - int thread_completed_; - int thread_stopped_; - int thread_deleted_; -}; - -class OwnerThread : public Thread, public sigslot::has_slots<> { - public: - explicit OwnerThread(SignalThreadTest* harness) - : Thread(std::make_unique()), - harness_(harness), - has_run_(false) {} - - ~OwnerThread() override { Stop(); } - - void Run() override { - SignalThreadTest::SlowSignalThread* signal_thread = - new SignalThreadTest::SlowSignalThread(harness_); - signal_thread->SignalWorkDone.connect(this, &OwnerThread::OnWorkDone); - signal_thread->Start(); - Thread::Current()->socketserver()->Wait(100, false); - signal_thread->Release(); - // Delete |signal_thread|. - signal_thread->Destroy(true); - { - rtc::CritScope cs(&crit_); - has_run_ = true; - } - } - - bool has_run() { - rtc::CritScope cs(&crit_); - return has_run_; - } - void OnWorkDone(SignalThread* /*signal_thread*/) { - FAIL() << " This shouldn't get called."; - } - - private: - rtc::CriticalSection crit_; - SignalThreadTest* harness_; - bool has_run_ RTC_GUARDED_BY(crit_); - RTC_DISALLOW_COPY_AND_ASSIGN(OwnerThread); -}; - -// Test for when the main thread goes away while the -// signal thread is still working. This may happen -// when shutting down the process. -TEST_F(SignalThreadTest, OwnerThreadGoesAway) { - // We don't use |thread_| for this test, so destroy it. - thread_->Destroy(true); - - { - std::unique_ptr owner(new OwnerThread(this)); - main_thread_ = owner.get(); - owner->Start(); - while (!owner->has_run()) { - Thread::Current()->socketserver()->Wait(10, false); - } - } - // At this point the main thread has gone away. - // Give the SignalThread a little time to do its callback, - // which will crash if the signal thread doesn't handle - // this situation well. - Thread::Current()->socketserver()->Wait(500, false); -} - -TEST_F(SignalThreadTest, ThreadFinishes) { - thread_->Start(); - ExpectState(1, 0, 0, 0, 0); - ExpectStateWait(1, 1, 1, 0, 1, kTimeout); -} - -TEST_F(SignalThreadTest, ReleasedThreadFinishes) { - thread_->Start(); - ExpectState(1, 0, 0, 0, 0); - thread_->Release(); - called_release_ = true; - ExpectState(1, 0, 0, 0, 0); - ExpectStateWait(1, 1, 1, 0, 1, kTimeout); -} - -TEST_F(SignalThreadTest, DestroyedThreadCleansUp) { - thread_->Start(); - ExpectState(1, 0, 0, 0, 0); - thread_->Destroy(true); - ExpectState(1, 0, 0, 1, 1); - Thread::Current()->ProcessMessages(0); - ExpectState(1, 0, 0, 1, 1); -} - -TEST_F(SignalThreadTest, DeferredDestroyedThreadCleansUp) { - thread_->Start(); - ExpectState(1, 0, 0, 0, 0); - thread_->Destroy(false); - ExpectState(1, 0, 0, 1, 0); - ExpectStateWait(1, 1, 0, 1, 1, kTimeout); -} - -} // namespace -} // namespace rtc diff --git a/rtc_base/socket.h b/rtc_base/socket.h index c2d1e3d29a..6b3ad5e9f2 100644 --- a/rtc_base/socket.h +++ b/rtc_base/socket.h @@ -59,6 +59,8 @@ #define ECONNREFUSED WSAECONNREFUSED #undef EHOSTUNREACH #define EHOSTUNREACH WSAEHOSTUNREACH +#undef ENETUNREACH +#define ENETUNREACH WSAENETUNREACH #define SOCKET_EACCES WSAEACCES #endif // WEBRTC_WIN diff --git a/rtc_base/socket_address.cc b/rtc_base/socket_address.cc index 639be52c54..2996ede9d2 100644 --- a/rtc_base/socket_address.cc +++ b/rtc_base/socket_address.cc @@ -178,6 +178,16 @@ std::string SocketAddress::ToSensitiveString() const { return sb.str(); } +std::string SocketAddress::ToResolvedSensitiveString() const { + if (IsUnresolvedIP()) { + return ""; + } + char buf[1024]; + rtc::SimpleStringBuilder sb(buf); + sb << ipaddr().ToSensitiveString() << ":" << port(); + return sb.str(); +} + bool SocketAddress::FromString(const std::string& str) { if (str.at(0) == '[') { std::string::size_type closebracket = str.rfind(']'); diff --git a/rtc_base/socket_address.h b/rtc_base/socket_address.h index 6ee3d37bce..570a71281e 100644 --- a/rtc_base/socket_address.h +++ b/rtc_base/socket_address.h @@ -12,9 +12,9 @@ #define RTC_BASE_SOCKET_ADDRESS_H_ #include -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST #include // no-presubmit-check TODO(webrtc:8982) -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST #include "rtc_base/ip_address.h" #include "rtc_base/system/rtc_export.h" @@ -124,15 +124,19 @@ class RTC_EXPORT SocketAddress { // Same as ToString but anonymizes it by hiding the last part. std::string ToSensitiveString() const; + // Returns hostname:port string if address is resolved, otherwise returns + // empty string. + std::string ToResolvedSensitiveString() const; + // Parses hostname:port and [hostname]:port. bool FromString(const std::string& str); -#ifdef UNIT_TEST +#ifdef WEBRTC_UNIT_TEST inline std::ostream& operator<<( // no-presubmit-check TODO(webrtc:8982) std::ostream& os) { // no-presubmit-check TODO(webrtc:8982) return os << HostAsURIString() << ":" << port(); } -#endif // UNIT_TEST +#endif // WEBRTC_UNIT_TEST // Determines whether this represents a missing / any IP address. // That is, 0.0.0.0 or ::. diff --git a/rtc_base/socket_address_unittest.cc b/rtc_base/socket_address_unittest.cc index 14da8cb519..d1c911abff 100644 --- a/rtc_base/socket_address_unittest.cc +++ b/rtc_base/socket_address_unittest.cc @@ -323,25 +323,15 @@ TEST(SocketAddressTest, TestToSensitiveString) { EXPECT_EQ("1.2.3.4", addr_v4.HostAsURIString()); EXPECT_EQ("1.2.3.4:5678", addr_v4.ToString()); -#if defined(NDEBUG) EXPECT_EQ("1.2.3.x", addr_v4.HostAsSensitiveURIString()); EXPECT_EQ("1.2.3.x:5678", addr_v4.ToSensitiveString()); -#else - EXPECT_EQ("1.2.3.4", addr_v4.HostAsSensitiveURIString()); - EXPECT_EQ("1.2.3.4:5678", addr_v4.ToSensitiveString()); -#endif // defined(NDEBUG) SocketAddress addr_v6(kTestV6AddrString, 5678); EXPECT_EQ("[" + kTestV6AddrString + "]", addr_v6.HostAsURIString()); EXPECT_EQ(kTestV6AddrFullString, addr_v6.ToString()); -#if defined(NDEBUG) EXPECT_EQ("[" + kTestV6AddrAnonymizedString + "]", addr_v6.HostAsSensitiveURIString()); EXPECT_EQ(kTestV6AddrFullAnonymizedString, addr_v6.ToSensitiveString()); -#else - EXPECT_EQ("[" + kTestV6AddrString + "]", addr_v6.HostAsSensitiveURIString()); - EXPECT_EQ(kTestV6AddrFullString, addr_v6.ToSensitiveString()); -#endif // defined(NDEBUG) } } // namespace rtc diff --git a/rtc_base/socket_server.h b/rtc_base/socket_server.h index 98971e4d84..face04dbc2 100644 --- a/rtc_base/socket_server.h +++ b/rtc_base/socket_server.h @@ -33,9 +33,10 @@ class SocketServer : public SocketFactory { static const int kForever = -1; static std::unique_ptr CreateDefault(); - // When the socket server is installed into a Thread, this function is - // called to allow the socket server to use the thread's message queue for - // any messaging that it might need to perform. + // When the socket server is installed into a Thread, this function is called + // to allow the socket server to use the thread's message queue for any + // messaging that it might need to perform. It is also called with a null + // argument before the thread is destroyed. virtual void SetMessageQueue(Thread* queue) {} // Sleeps until: diff --git a/rtc_base/socket_unittest.cc b/rtc_base/socket_unittest.cc index 6ea4b47bd1..82e2f6d4b2 100644 --- a/rtc_base/socket_unittest.cc +++ b/rtc_base/socket_unittest.cc @@ -149,6 +149,15 @@ void SocketTest::TestCloseInClosedCallbackIPv6() { CloseInClosedCallbackInternal(kIPv6Loopback); } +void SocketTest::TestDeleteInReadCallbackIPv4() { + DeleteInReadCallbackInternal(kIPv4Loopback); +} + +void SocketTest::TestDeleteInReadCallbackIPv6() { + MAYBE_SKIP_IPV6; + DeleteInReadCallbackInternal(kIPv6Loopback); +} + void SocketTest::TestSocketServerWaitIPv4() { SocketServerWaitInternal(kIPv4Loopback); } @@ -651,7 +660,43 @@ void SocketTest::CloseInClosedCallbackInternal(const IPAddress& loopback) { EXPECT_TRUE(Socket::CS_CLOSED == client->GetState()); } -class Sleeper : public MessageHandler { +// Helper class specifically for the test below. +class SocketDeleter : public sigslot::has_slots<> { + public: + explicit SocketDeleter(std::unique_ptr socket) + : socket_(std::move(socket)) {} + + void Delete(AsyncSocket* other) { socket_.reset(); } + + bool deleted() const { return socket_ == nullptr; } + + private: + std::unique_ptr socket_; +}; + +// Tested deleting a socket within another socket's read callback. A previous +// iteration of the select loop failed in this situation, if both sockets +// became readable at the same time. +void SocketTest::DeleteInReadCallbackInternal(const IPAddress& loopback) { + std::unique_ptr socket1( + ss_->CreateAsyncSocket(loopback.family(), SOCK_DGRAM)); + std::unique_ptr socket2( + ss_->CreateAsyncSocket(loopback.family(), SOCK_DGRAM)); + EXPECT_EQ(0, socket1->Bind(SocketAddress(loopback, 0))); + EXPECT_EQ(0, socket2->Bind(SocketAddress(loopback, 0))); + EXPECT_EQ(3, socket1->SendTo("foo", 3, socket1->GetLocalAddress())); + EXPECT_EQ(3, socket2->SendTo("bar", 3, socket1->GetLocalAddress())); + // Sleep a while to ensure sends are both completed at the same time. + Thread::SleepMs(1000); + + // Configure the helper class to delete socket 2 when socket 1 has a read + // event. + SocketDeleter deleter(std::move(socket2)); + socket1->SignalReadEvent.connect(&deleter, &SocketDeleter::Delete); + EXPECT_TRUE_WAIT(deleter.deleted(), kTimeout); +} + +class Sleeper : public MessageHandlerAutoCleanup { public: void OnMessage(Message* msg) override { Thread::Current()->SleepMs(500); } }; diff --git a/rtc_base/socket_unittest.h b/rtc_base/socket_unittest.h index 5197ccd82d..91ef39c59e 100644 --- a/rtc_base/socket_unittest.h +++ b/rtc_base/socket_unittest.h @@ -46,6 +46,8 @@ class SocketTest : public ::testing::Test { void TestServerCloseIPv6(); void TestCloseInClosedCallbackIPv4(); void TestCloseInClosedCallbackIPv6(); + void TestDeleteInReadCallbackIPv4(); + void TestDeleteInReadCallbackIPv6(); void TestSocketServerWaitIPv4(); void TestSocketServerWaitIPv6(); void TestTcpIPv4(); @@ -83,6 +85,7 @@ class SocketTest : public ::testing::Test { void ClientCloseDuringConnectInternal(const IPAddress& loopback); void ServerCloseInternal(const IPAddress& loopback); void CloseInClosedCallbackInternal(const IPAddress& loopback); + void DeleteInReadCallbackInternal(const IPAddress& loopback); void SocketServerWaitInternal(const IPAddress& loopback); void SingleFlowControlCallbackInternal(const IPAddress& loopback); void UdpInternal(const IPAddress& loopback); diff --git a/rtc_base/ssl_certificate.cc b/rtc_base/ssl_certificate.cc index db9097b9a3..3f7013ee11 100644 --- a/rtc_base/ssl_certificate.cc +++ b/rtc_base/ssl_certificate.cc @@ -16,7 +16,12 @@ #include "absl/algorithm/container.h" #include "rtc_base/checks.h" -#include "rtc_base/openssl_certificate.h" +#include "rtc_base/openssl.h" +#ifdef OPENSSL_IS_BORINGSSL +#include "rtc_base/boringssl_identity.h" +#else +#include "rtc_base/openssl_identity.h" +#endif #include "rtc_base/ssl_fingerprint.h" #include "rtc_base/third_party/base64/base64.h" @@ -117,7 +122,11 @@ std::unique_ptr SSLCertChain::GetStats() const { // static std::unique_ptr SSLCertificate::FromPEMString( const std::string& pem_string) { +#ifdef OPENSSL_IS_BORINGSSL + return BoringSSLCertificate::FromPEMString(pem_string); +#else return OpenSSLCertificate::FromPEMString(pem_string); +#endif } } // namespace rtc diff --git a/rtc_base/ssl_fingerprint.cc b/rtc_base/ssl_fingerprint.cc index 5b261e0f53..358402eb03 100644 --- a/rtc_base/ssl_fingerprint.cc +++ b/rtc_base/ssl_fingerprint.cc @@ -103,9 +103,6 @@ SSLFingerprint::SSLFingerprint(const std::string& algorithm, size_t digest_len) : SSLFingerprint(algorithm, MakeArrayView(digest_in, digest_len)) {} -SSLFingerprint::SSLFingerprint(const SSLFingerprint& from) - : algorithm(from.algorithm), digest(from.digest) {} - bool SSLFingerprint::operator==(const SSLFingerprint& other) const { return algorithm == other.algorithm && digest == other.digest; } diff --git a/rtc_base/ssl_fingerprint.h b/rtc_base/ssl_fingerprint.h index d65d665d83..add3ab7911 100644 --- a/rtc_base/ssl_fingerprint.h +++ b/rtc_base/ssl_fingerprint.h @@ -57,7 +57,8 @@ struct RTC_EXPORT SSLFingerprint { const uint8_t* digest_in, size_t digest_len); - SSLFingerprint(const SSLFingerprint& from); + SSLFingerprint(const SSLFingerprint& from) = default; + SSLFingerprint& operator=(const SSLFingerprint& from) = default; bool operator==(const SSLFingerprint& other) const; diff --git a/rtc_base/ssl_identity.cc b/rtc_base/ssl_identity.cc index 09d25d228e..8d93ecfe23 100644 --- a/rtc_base/ssl_identity.cc +++ b/rtc_base/ssl_identity.cc @@ -11,12 +11,16 @@ // Handling of certificates and keypairs for SSLStreamAdapter's peer mode. #include "rtc_base/ssl_identity.h" +#include #include #include -#include #include "rtc_base/checks.h" +#ifdef OPENSSL_IS_BORINGSSL +#include "rtc_base/boringssl_identity.h" +#else #include "rtc_base/openssl_identity.h" +#endif #include "rtc_base/ssl_certificate.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/third_party/base64/base64.h" @@ -213,28 +217,36 @@ std::string SSLIdentity::DerToPem(const std::string& pem_type, std::unique_ptr SSLIdentity::Create(const std::string& common_name, const KeyParams& key_param, time_t certificate_lifetime) { +#ifdef OPENSSL_IS_BORINGSSL + return BoringSSLIdentity::CreateWithExpiration(common_name, key_param, + certificate_lifetime); +#else return OpenSSLIdentity::CreateWithExpiration(common_name, key_param, certificate_lifetime); +#endif } // static std::unique_ptr SSLIdentity::Create(const std::string& common_name, const KeyParams& key_param) { - return OpenSSLIdentity::CreateWithExpiration( - common_name, key_param, kDefaultCertificateLifetimeInSeconds); + return Create(common_name, key_param, kDefaultCertificateLifetimeInSeconds); } // static std::unique_ptr SSLIdentity::Create(const std::string& common_name, KeyType key_type) { - return OpenSSLIdentity::CreateWithExpiration( - common_name, KeyParams(key_type), kDefaultCertificateLifetimeInSeconds); + return Create(common_name, KeyParams(key_type), + kDefaultCertificateLifetimeInSeconds); } // static std::unique_ptr SSLIdentity::CreateForTest( const SSLIdentityParams& params) { +#ifdef OPENSSL_IS_BORINGSSL + return BoringSSLIdentity::CreateForTest(params); +#else return OpenSSLIdentity::CreateForTest(params); +#endif } // Construct an identity from a private key and a certificate. @@ -242,7 +254,11 @@ std::unique_ptr SSLIdentity::CreateForTest( std::unique_ptr SSLIdentity::CreateFromPEMStrings( const std::string& private_key, const std::string& certificate) { +#ifdef OPENSSL_IS_BORINGSSL + return BoringSSLIdentity::CreateFromPEMStrings(private_key, certificate); +#else return OpenSSLIdentity::CreateFromPEMStrings(private_key, certificate); +#endif } // Construct an identity from a private key and a certificate chain. @@ -250,13 +266,23 @@ std::unique_ptr SSLIdentity::CreateFromPEMStrings( std::unique_ptr SSLIdentity::CreateFromPEMChainStrings( const std::string& private_key, const std::string& certificate_chain) { +#ifdef OPENSSL_IS_BORINGSSL + return BoringSSLIdentity::CreateFromPEMChainStrings(private_key, + certificate_chain); +#else return OpenSSLIdentity::CreateFromPEMChainStrings(private_key, certificate_chain); +#endif } bool operator==(const SSLIdentity& a, const SSLIdentity& b) { +#ifdef OPENSSL_IS_BORINGSSL + return static_cast(a) == + static_cast(b); +#else return static_cast(a) == static_cast(b); +#endif } bool operator!=(const SSLIdentity& a, const SSLIdentity& b) { return !(a == b); diff --git a/rtc_base/ssl_identity.h b/rtc_base/ssl_identity.h index d078b045a7..a9167ef5eb 100644 --- a/rtc_base/ssl_identity.h +++ b/rtc_base/ssl_identity.h @@ -18,7 +18,6 @@ #include #include -#include "rtc_base/deprecation.h" #include "rtc_base/system/rtc_export.h" namespace rtc { diff --git a/rtc_base/ssl_identity_unittest.cc b/rtc_base/ssl_identity_unittest.cc index 0d9d0fd859..a907bfc3ed 100644 --- a/rtc_base/ssl_identity_unittest.cc +++ b/rtc_base/ssl_identity_unittest.cc @@ -65,7 +65,7 @@ const unsigned char kTestCertSha512[] = { 0x35, 0xce, 0x26, 0x58, 0x4a, 0x33, 0x6d, 0xbc, 0xb6}; // These PEM strings were created by generating an identity with -// |SSLIdentity::Generate| and invoking |identity->PrivateKeyToPEMString()|, +// |SSLIdentity::Create| and invoking |identity->PrivateKeyToPEMString()|, // |identity->PublicKeyToPEMString()| and // |identity->certificate().ToPEMString()|. If the crypto library is updated, // and the update changes the string form of the keys, these will have to be @@ -406,6 +406,21 @@ TEST_F(SSLIdentityTest, FromPEMStringsEC) { EXPECT_EQ(kECDSA_CERT_PEM, identity->certificate().ToPEMString()); } +TEST_F(SSLIdentityTest, FromPEMChainStrings) { + // This doesn't form a valid certificate chain, but that doesn't matter for + // the purposes of the test + std::string chain(kRSA_CERT_PEM); + chain.append(kTestCertificate); + std::unique_ptr identity( + SSLIdentity::CreateFromPEMChainStrings(kRSA_PRIVATE_KEY_PEM, chain)); + EXPECT_TRUE(identity); + EXPECT_EQ(kRSA_PRIVATE_KEY_PEM, identity->PrivateKeyToPEMString()); + EXPECT_EQ(kRSA_PUBLIC_KEY_PEM, identity->PublicKeyToPEMString()); + ASSERT_EQ(2u, identity->cert_chain().GetSize()); + EXPECT_EQ(kRSA_CERT_PEM, identity->cert_chain().Get(0).ToPEMString()); + EXPECT_EQ(kTestCertificate, identity->cert_chain().Get(1).ToPEMString()); +} + TEST_F(SSLIdentityTest, CloneIdentityRSA) { TestCloningIdentity(*identity_rsa1_); TestCloningIdentity(*identity_rsa2_); diff --git a/rtc_base/ssl_roots.h b/rtc_base/ssl_roots.h index 23a3836e6b..8f869f4a9e 100644 --- a/rtc_base/ssl_roots.h +++ b/rtc_base/ssl_roots.h @@ -15,8 +15,8 @@ // Google. // It was generated with the following command line: -// > python tools/sslroots/generate_sslroots.py -// https://pki.google.com/roots.pem +// > python tools_webrtc/sslroots/generate_sslroots.py +// https://pki.goog/roots.pem // clang-format off // Don't bother formatting generated code, @@ -1699,82 +1699,6 @@ const unsigned char GlobalSign_ECC_Root_CA___R5_certificate[546]={ }; -/* subject:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root */ -/* issuer :/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root */ - - -const unsigned char AddTrust_External_Root_certificate[1082]={ -0x30,0x82,0x04,0x36,0x30,0x82,0x03,0x1E,0xA0,0x03,0x02,0x01,0x02,0x02,0x01,0x01, -0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01,0x01,0x05,0x05,0x00,0x30, -0x6F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31,0x14, -0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75,0x73, -0x74,0x20,0x41,0x42,0x31,0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x0B,0x13,0x1D,0x41, -0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C, -0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x22,0x30,0x20, -0x06,0x03,0x55,0x04,0x03,0x13,0x19,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20, -0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F,0x74, -0x30,0x1E,0x17,0x0D,0x30,0x30,0x30,0x35,0x33,0x30,0x31,0x30,0x34,0x38,0x33,0x38, -0x5A,0x17,0x0D,0x32,0x30,0x30,0x35,0x33,0x30,0x31,0x30,0x34,0x38,0x33,0x38,0x5A, -0x30,0x6F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53,0x45,0x31, -0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54,0x72,0x75, -0x73,0x74,0x20,0x41,0x42,0x31,0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x0B,0x13,0x1D, -0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61, -0x6C,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31,0x22,0x30, -0x20,0x06,0x03,0x55,0x04,0x03,0x13,0x19,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74, -0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,0x20,0x43,0x41,0x20,0x52,0x6F,0x6F, -0x74,0x30,0x82,0x01,0x22,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D,0x01, -0x01,0x01,0x05,0x00,0x03,0x82,0x01,0x0F,0x00,0x30,0x82,0x01,0x0A,0x02,0x82,0x01, -0x01,0x00,0xB7,0xF7,0x1A,0x33,0xE6,0xF2,0x00,0x04,0x2D,0x39,0xE0,0x4E,0x5B,0xED, -0x1F,0xBC,0x6C,0x0F,0xCD,0xB5,0xFA,0x23,0xB6,0xCE,0xDE,0x9B,0x11,0x33,0x97,0xA4, -0x29,0x4C,0x7D,0x93,0x9F,0xBD,0x4A,0xBC,0x93,0xED,0x03,0x1A,0xE3,0x8F,0xCF,0xE5, -0x6D,0x50,0x5A,0xD6,0x97,0x29,0x94,0x5A,0x80,0xB0,0x49,0x7A,0xDB,0x2E,0x95,0xFD, -0xB8,0xCA,0xBF,0x37,0x38,0x2D,0x1E,0x3E,0x91,0x41,0xAD,0x70,0x56,0xC7,0xF0,0x4F, -0x3F,0xE8,0x32,0x9E,0x74,0xCA,0xC8,0x90,0x54,0xE9,0xC6,0x5F,0x0F,0x78,0x9D,0x9A, -0x40,0x3C,0x0E,0xAC,0x61,0xAA,0x5E,0x14,0x8F,0x9E,0x87,0xA1,0x6A,0x50,0xDC,0xD7, -0x9A,0x4E,0xAF,0x05,0xB3,0xA6,0x71,0x94,0x9C,0x71,0xB3,0x50,0x60,0x0A,0xC7,0x13, -0x9D,0x38,0x07,0x86,0x02,0xA8,0xE9,0xA8,0x69,0x26,0x18,0x90,0xAB,0x4C,0xB0,0x4F, -0x23,0xAB,0x3A,0x4F,0x84,0xD8,0xDF,0xCE,0x9F,0xE1,0x69,0x6F,0xBB,0xD7,0x42,0xD7, -0x6B,0x44,0xE4,0xC7,0xAD,0xEE,0x6D,0x41,0x5F,0x72,0x5A,0x71,0x08,0x37,0xB3,0x79, -0x65,0xA4,0x59,0xA0,0x94,0x37,0xF7,0x00,0x2F,0x0D,0xC2,0x92,0x72,0xDA,0xD0,0x38, -0x72,0xDB,0x14,0xA8,0x45,0xC4,0x5D,0x2A,0x7D,0xB7,0xB4,0xD6,0xC4,0xEE,0xAC,0xCD, -0x13,0x44,0xB7,0xC9,0x2B,0xDD,0x43,0x00,0x25,0xFA,0x61,0xB9,0x69,0x6A,0x58,0x23, -0x11,0xB7,0xA7,0x33,0x8F,0x56,0x75,0x59,0xF5,0xCD,0x29,0xD7,0x46,0xB7,0x0A,0x2B, -0x65,0xB6,0xD3,0x42,0x6F,0x15,0xB2,0xB8,0x7B,0xFB,0xEF,0xE9,0x5D,0x53,0xD5,0x34, -0x5A,0x27,0x02,0x03,0x01,0x00,0x01,0xA3,0x81,0xDC,0x30,0x81,0xD9,0x30,0x1D,0x06, -0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0xAD,0xBD,0x98,0x7A,0x34,0xB4,0x26,0xF7, -0xFA,0xC4,0x26,0x54,0xEF,0x03,0xBD,0xE0,0x24,0xCB,0x54,0x1A,0x30,0x0B,0x06,0x03, -0x55,0x1D,0x0F,0x04,0x04,0x03,0x02,0x01,0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13, -0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01,0x01,0xFF,0x30,0x81,0x99,0x06,0x03,0x55, -0x1D,0x23,0x04,0x81,0x91,0x30,0x81,0x8E,0x80,0x14,0xAD,0xBD,0x98,0x7A,0x34,0xB4, -0x26,0xF7,0xFA,0xC4,0x26,0x54,0xEF,0x03,0xBD,0xE0,0x24,0xCB,0x54,0x1A,0xA1,0x73, -0xA4,0x71,0x30,0x6F,0x31,0x0B,0x30,0x09,0x06,0x03,0x55,0x04,0x06,0x13,0x02,0x53, -0x45,0x31,0x14,0x30,0x12,0x06,0x03,0x55,0x04,0x0A,0x13,0x0B,0x41,0x64,0x64,0x54, -0x72,0x75,0x73,0x74,0x20,0x41,0x42,0x31,0x26,0x30,0x24,0x06,0x03,0x55,0x04,0x0B, -0x13,0x1D,0x41,0x64,0x64,0x54,0x72,0x75,0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72, -0x6E,0x61,0x6C,0x20,0x54,0x54,0x50,0x20,0x4E,0x65,0x74,0x77,0x6F,0x72,0x6B,0x31, -0x22,0x30,0x20,0x06,0x03,0x55,0x04,0x03,0x13,0x19,0x41,0x64,0x64,0x54,0x72,0x75, -0x73,0x74,0x20,0x45,0x78,0x74,0x65,0x72,0x6E,0x61,0x6C,0x20,0x43,0x41,0x20,0x52, -0x6F,0x6F,0x74,0x82,0x01,0x01,0x30,0x0D,0x06,0x09,0x2A,0x86,0x48,0x86,0xF7,0x0D, -0x01,0x01,0x05,0x05,0x00,0x03,0x82,0x01,0x01,0x00,0xB0,0x9B,0xE0,0x85,0x25,0xC2, -0xD6,0x23,0xE2,0x0F,0x96,0x06,0x92,0x9D,0x41,0x98,0x9C,0xD9,0x84,0x79,0x81,0xD9, -0x1E,0x5B,0x14,0x07,0x23,0x36,0x65,0x8F,0xB0,0xD8,0x77,0xBB,0xAC,0x41,0x6C,0x47, -0x60,0x83,0x51,0xB0,0xF9,0x32,0x3D,0xE7,0xFC,0xF6,0x26,0x13,0xC7,0x80,0x16,0xA5, -0xBF,0x5A,0xFC,0x87,0xCF,0x78,0x79,0x89,0x21,0x9A,0xE2,0x4C,0x07,0x0A,0x86,0x35, -0xBC,0xF2,0xDE,0x51,0xC4,0xD2,0x96,0xB7,0xDC,0x7E,0x4E,0xEE,0x70,0xFD,0x1C,0x39, -0xEB,0x0C,0x02,0x51,0x14,0x2D,0x8E,0xBD,0x16,0xE0,0xC1,0xDF,0x46,0x75,0xE7,0x24, -0xAD,0xEC,0xF4,0x42,0xB4,0x85,0x93,0x70,0x10,0x67,0xBA,0x9D,0x06,0x35,0x4A,0x18, -0xD3,0x2B,0x7A,0xCC,0x51,0x42,0xA1,0x7A,0x63,0xD1,0xE6,0xBB,0xA1,0xC5,0x2B,0xC2, -0x36,0xBE,0x13,0x0D,0xE6,0xBD,0x63,0x7E,0x79,0x7B,0xA7,0x09,0x0D,0x40,0xAB,0x6A, -0xDD,0x8F,0x8A,0xC3,0xF6,0xF6,0x8C,0x1A,0x42,0x05,0x51,0xD4,0x45,0xF5,0x9F,0xA7, -0x62,0x21,0x68,0x15,0x20,0x43,0x3C,0x99,0xE7,0x7C,0xBD,0x24,0xD8,0xA9,0x91,0x17, -0x73,0x88,0x3F,0x56,0x1B,0x31,0x38,0x18,0xB4,0x71,0x0F,0x9A,0xCD,0xC8,0x0E,0x9E, -0x8E,0x2E,0x1B,0xE1,0x8C,0x98,0x83,0xCB,0x1F,0x31,0xF1,0x44,0x4C,0xC6,0x04,0x73, -0x49,0x76,0x60,0x0F,0xC7,0xF8,0xBD,0x17,0x80,0x6B,0x2E,0xE9,0xCC,0x4C,0x0E,0x5A, -0x9A,0x79,0x0F,0x20,0x0A,0x2E,0xD5,0x9E,0x63,0x26,0x1E,0x55,0x92,0x94,0xD8,0x82, -0x17,0x5A,0x7B,0xD0,0xBC,0xC7,0x8F,0x4E,0x86,0x04, -}; - - /* subject:/C=US/ST=New Jersey/L=Jersey City/O=The USERTRUST Network/CN=USERTrust ECC Certification Authority */ /* issuer :/C=US/ST=New Jersey/L=Jersey City/O=The USERTRUST Network/CN=USERTrust ECC Certification Authority */ @@ -2572,50 +2496,6 @@ const unsigned char Entrust_Root_Certification_Authority___EC1_certificate[765]= }; -/* subject:/C=BE/O=GlobalSign nv-sa/OU=Root CA/CN=GlobalSign Root CA - R8 */ -/* issuer :/C=BE/O=GlobalSign nv-sa/OU=Root CA/CN=GlobalSign Root CA - R8 */ - - -const unsigned char GlobalSign_Root_CA___R8_certificate[567]={ -0x30,0x82,0x02,0x33,0x30,0x82,0x01,0xB9,0xA0,0x03,0x02,0x01,0x02,0x02,0x0E,0x48, -0x1B,0x6A,0x09,0xF4,0xF9,0x60,0x71,0x3A,0xFE,0x81,0xCC,0x86,0xDD,0x30,0x0A,0x06, -0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x30,0x5C,0x31,0x0B,0x30,0x09,0x06, -0x03,0x55,0x04,0x06,0x13,0x02,0x42,0x45,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04, -0x0A,0x13,0x10,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x6E,0x76, -0x2D,0x73,0x61,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x0B,0x13,0x07,0x52,0x6F, -0x6F,0x74,0x20,0x43,0x41,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17, -0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x52,0x6F,0x6F,0x74,0x20, -0x43,0x41,0x20,0x2D,0x20,0x52,0x38,0x30,0x1E,0x17,0x0D,0x31,0x36,0x30,0x36,0x31, -0x35,0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x17,0x0D,0x33,0x36,0x30,0x36,0x31,0x35, -0x30,0x30,0x30,0x30,0x30,0x30,0x5A,0x30,0x5C,0x31,0x0B,0x30,0x09,0x06,0x03,0x55, -0x04,0x06,0x13,0x02,0x42,0x45,0x31,0x19,0x30,0x17,0x06,0x03,0x55,0x04,0x0A,0x13, -0x10,0x47,0x6C,0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x6E,0x76,0x2D,0x73, -0x61,0x31,0x10,0x30,0x0E,0x06,0x03,0x55,0x04,0x0B,0x13,0x07,0x52,0x6F,0x6F,0x74, -0x20,0x43,0x41,0x31,0x20,0x30,0x1E,0x06,0x03,0x55,0x04,0x03,0x13,0x17,0x47,0x6C, -0x6F,0x62,0x61,0x6C,0x53,0x69,0x67,0x6E,0x20,0x52,0x6F,0x6F,0x74,0x20,0x43,0x41, -0x20,0x2D,0x20,0x52,0x38,0x30,0x76,0x30,0x10,0x06,0x07,0x2A,0x86,0x48,0xCE,0x3D, -0x02,0x01,0x06,0x05,0x2B,0x81,0x04,0x00,0x22,0x03,0x62,0x00,0x04,0xB8,0xEE,0x7C, -0x30,0x87,0xD8,0x94,0x1F,0x54,0x6B,0x6D,0x98,0x9D,0xFC,0x75,0xFB,0x5B,0x88,0xAB, -0x42,0xBA,0x8D,0x7D,0x39,0x7E,0xDD,0x44,0x3D,0x39,0x3C,0xE1,0x05,0xA1,0x4A,0x64, -0x60,0xAC,0x37,0xA6,0x73,0xB0,0xF9,0xC9,0x45,0x4B,0x0B,0x06,0xD0,0x3A,0xE0,0xF1, -0x6D,0x5F,0xFA,0x5E,0x5B,0x5A,0x52,0xB5,0x76,0xE3,0x46,0xDB,0xD5,0x1E,0x8C,0x74, -0x7A,0x42,0xC9,0x41,0x35,0x4F,0xC6,0xD4,0xE2,0x28,0x60,0xAB,0x34,0x8A,0xCE,0xB1, -0x40,0x23,0x46,0xA5,0xAE,0x19,0x24,0x52,0x7C,0x90,0x55,0x44,0xCE,0xA3,0x42,0x30, -0x40,0x30,0x0E,0x06,0x03,0x55,0x1D,0x0F,0x01,0x01,0xFF,0x04,0x04,0x03,0x02,0x01, -0x06,0x30,0x0F,0x06,0x03,0x55,0x1D,0x13,0x01,0x01,0xFF,0x04,0x05,0x30,0x03,0x01, -0x01,0xFF,0x30,0x1D,0x06,0x03,0x55,0x1D,0x0E,0x04,0x16,0x04,0x14,0x2F,0x3A,0x12, -0x26,0x80,0xE8,0x8A,0xC2,0x50,0x78,0x6D,0x06,0xC4,0x34,0x7E,0xE2,0x49,0x39,0x57, -0x76,0x30,0x0A,0x06,0x08,0x2A,0x86,0x48,0xCE,0x3D,0x04,0x03,0x03,0x03,0x68,0x00, -0x30,0x65,0x02,0x31,0x00,0xC7,0xA1,0x3D,0xB2,0x92,0x90,0xFA,0xCA,0x5D,0xE0,0x27, -0x84,0x82,0x3B,0x21,0xCC,0xF4,0x8D,0xF8,0x94,0x56,0xF2,0x20,0x5F,0x11,0xC0,0xAC, -0xBC,0x5F,0x15,0xA5,0x0B,0xC8,0x16,0x43,0xA7,0xF8,0xC5,0x7F,0x8D,0x20,0xA0,0x7F, -0x5E,0xFC,0x16,0x1C,0x27,0x02,0x30,0x1E,0x8C,0xF5,0x56,0xBF,0x38,0xDB,0x9C,0xE6, -0xA6,0xD7,0x84,0x29,0xE6,0xDF,0x0D,0x53,0x2E,0xE8,0x2B,0x01,0xB7,0x7D,0x09,0x3C, -0xB1,0x32,0x6A,0x1A,0x9A,0xB8,0x0A,0xEA,0xE8,0xAD,0x08,0xF2,0x74,0x39,0xD5,0x2B, -0x22,0x36,0xDC,0xEF,0x46,0x66,0xD8, -}; - - /* subject:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA */ /* issuer :/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA */ @@ -2817,7 +2697,6 @@ const unsigned char* const kSSLCertCertificateList[] = { COMODO_Certification_Authority_certificate, GlobalSign_ECC_Root_CA___R4_certificate, GlobalSign_ECC_Root_CA___R5_certificate, - AddTrust_External_Root_certificate, USERTrust_ECC_Certification_Authority_certificate, Entrust_net_Premium_2048_Secure_Server_CA_certificate, AffirmTrust_Premium_ECC_certificate, @@ -2830,7 +2709,6 @@ const unsigned char* const kSSLCertCertificateList[] = { DigiCert_Trusted_Root_G4_certificate, COMODO_ECC_Certification_Authority_certificate, Entrust_Root_Certification_Authority___EC1_certificate, - GlobalSign_Root_CA___R8_certificate, GeoTrust_Global_CA_certificate, DigiCert_Assured_ID_Root_G3_certificate, Go_Daddy_Root_Certificate_Authority___G2_certificate, @@ -2861,7 +2739,6 @@ const size_t kSSLCertCertificateSizeList[] = { 1057, 485, 546, - 1082, 659, 1070, 514, @@ -2874,7 +2751,6 @@ const size_t kSSLCertCertificateSizeList[] = { 1428, 653, 765, - 567, 856, 586, 969, diff --git a/rtc_base/ssl_stream_adapter.cc b/rtc_base/ssl_stream_adapter.cc index 354622e6f0..5730af63d8 100644 --- a/rtc_base/ssl_stream_adapter.cc +++ b/rtc_base/ssl_stream_adapter.cc @@ -95,11 +95,6 @@ std::unique_ptr SSLStreamAdapter::Create( return std::make_unique(std::move(stream)); } -SSLStreamAdapter::SSLStreamAdapter(std::unique_ptr stream) - : StreamAdapterInterface(stream.release()) {} - -SSLStreamAdapter::~SSLStreamAdapter() {} - bool SSLStreamAdapter::GetSslCipherSuite(int* cipher_suite) { return false; } diff --git a/rtc_base/ssl_stream_adapter.h b/rtc_base/ssl_stream_adapter.h index 3da0b09469..6b44c76455 100644 --- a/rtc_base/ssl_stream_adapter.h +++ b/rtc_base/ssl_stream_adapter.h @@ -18,7 +18,6 @@ #include #include "absl/memory/memory.h" -#include "rtc_base/deprecation.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/ssl_identity.h" #include "rtc_base/stream.h" @@ -93,11 +92,11 @@ bool IsGcmCryptoSuiteName(const std::string& crypto_suite); enum SSLRole { SSL_CLIENT, SSL_SERVER }; enum SSLMode { SSL_MODE_TLS, SSL_MODE_DTLS }; -// Note: TLS_10, TLS_11, and DTLS_10 will all be ignored, and only -// DTLS1_2 will be accepted, if the trial flag -// WebRTC-LegacyTlsProtocols/Disabled/ is passed in. Support for these -// protocol versions will be completely removed in M84 or later. -// TODO(https://bugs.webrtc.org/10261). +// Note: TLS_10, TLS_11, and DTLS_10 will all be ignored, and only DTLS1_2 will +// be accepted unless the trial flag WebRTC-LegacyTlsProtocols/Enabled/ is +// passed in or an explicit override is used. Support for the legacy protocol +// versions will be completely removed in the future. +// See https://bugs.webrtc.org/10261. enum SSLProtocolVersion { SSL_PROTOCOL_NOT_GIVEN = -1, SSL_PROTOCOL_TLS_10 = 0, @@ -119,7 +118,7 @@ enum { SSE_MSG_TRUNC = 0xff0001 }; // Used to send back UMA histogram value. Logged when Dtls handshake fails. enum class SSLHandshakeError { UNKNOWN, INCOMPATIBLE_CIPHERSUITE, MAX_VALUE }; -class SSLStreamAdapter : public StreamAdapterInterface { +class SSLStreamAdapter : public StreamInterface, public sigslot::has_slots<> { public: // Instantiate an SSLStreamAdapter wrapping the given stream, // (using the selected implementation for the platform). @@ -127,8 +126,8 @@ class SSLStreamAdapter : public StreamAdapterInterface { static std::unique_ptr Create( std::unique_ptr stream); - explicit SSLStreamAdapter(std::unique_ptr stream); - ~SSLStreamAdapter() override; + SSLStreamAdapter() = default; + ~SSLStreamAdapter() override = default; // Specify our SSL identity: key and certificate. SSLStream takes ownership // of the SSLIdentity object and will free it when appropriate. Should be diff --git a/rtc_base/ssl_stream_adapter_unittest.cc b/rtc_base/ssl_stream_adapter_unittest.cc index f6d20d1607..c580d835c5 100644 --- a/rtc_base/ssl_stream_adapter_unittest.cc +++ b/rtc_base/ssl_stream_adapter_unittest.cc @@ -21,10 +21,13 @@ #include "rtc_base/memory/fifo_buffer.h" #include "rtc_base/memory_stream.h" #include "rtc_base/message_digest.h" +#include "rtc_base/openssl_stream_adapter.h" #include "rtc_base/ssl_adapter.h" #include "rtc_base/ssl_identity.h" #include "rtc_base/ssl_stream_adapter.h" #include "rtc_base/stream.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "test/field_trial.h" using ::testing::Combine; @@ -213,7 +216,15 @@ class SSLDummyStreamBase : public rtc::StreamInterface, out_->Close(); } - protected: + private: + void PostEvent(int events, int err) { + thread_->PostTask(webrtc::ToQueuedTask(task_safety_, [this, events, err]() { + SignalEvent(this, events, err); + })); + } + + webrtc::ScopedTaskSafety task_safety_; + rtc::Thread* const thread_ = rtc::Thread::Current(); SSLStreamAdapterTestBase* test_base_; const std::string side_; rtc::StreamInterface* in_; @@ -230,10 +241,10 @@ class SSLDummyStreamTLS : public SSLDummyStreamBase { : SSLDummyStreamBase(test, side, in, out) {} }; -class BufferQueueStream : public rtc::BufferQueue, public rtc::StreamInterface { +class BufferQueueStream : public rtc::StreamInterface { public: BufferQueueStream(size_t capacity, size_t default_size) - : rtc::BufferQueue(capacity, default_size) {} + : buffer_(capacity, default_size) {} // Implementation of abstract StreamInterface methods. @@ -245,9 +256,13 @@ class BufferQueueStream : public rtc::BufferQueue, public rtc::StreamInterface { size_t buffer_len, size_t* read, int* error) override { - if (!ReadFront(buffer, buffer_len, read)) { + const bool was_writable = buffer_.is_writable(); + if (!buffer_.ReadFront(buffer, buffer_len, read)) return rtc::SR_BLOCK; - } + + if (!was_writable) + NotifyWritableForTest(); + return rtc::SR_SUCCESS; } @@ -256,9 +271,13 @@ class BufferQueueStream : public rtc::BufferQueue, public rtc::StreamInterface { size_t data_len, size_t* written, int* error) override { - if (!WriteBack(data, data_len, written)) { + const bool was_readable = buffer_.is_readable(); + if (!buffer_.WriteBack(data, data_len, written)) return rtc::SR_BLOCK; - } + + if (!was_readable) + NotifyReadableForTest(); + return rtc::SR_SUCCESS; } @@ -266,9 +285,19 @@ class BufferQueueStream : public rtc::BufferQueue, public rtc::StreamInterface { void Close() override {} protected: - void NotifyReadableForTest() override { PostEvent(rtc::SE_READ, 0); } + void NotifyReadableForTest() { PostEvent(rtc::SE_READ, 0); } + void NotifyWritableForTest() { PostEvent(rtc::SE_WRITE, 0); } + + private: + void PostEvent(int events, int err) { + thread_->PostTask(webrtc::ToQueuedTask(task_safety_, [this, events, err]() { + SignalEvent(this, events, err); + })); + } - void NotifyWritableForTest() override { PostEvent(rtc::SE_WRITE, 0); } + rtc::Thread* const thread_ = rtc::Thread::Current(); + webrtc::ScopedTaskSafety task_safety_; + rtc::BufferQueue buffer_; }; class SSLDummyStreamDTLS : public SSLDummyStreamBase { @@ -479,8 +508,9 @@ class SSLStreamAdapterTestBase : public ::testing::Test, } } - // This tests that the handshake can complete before the identity is - // verified, and the identity will be verified after the fact. + // This tests that the handshake can complete before the identity is verified, + // and the identity will be verified after the fact. It also verifies that + // packets can't be read or written before the identity has been verified. void TestHandshakeWithDelayedIdentity(bool valid_identity) { server_ssl_->SetMode(dtls_ ? rtc::SSL_MODE_DTLS : rtc::SSL_MODE_TLS); client_ssl_->SetMode(dtls_ ? rtc::SSL_MODE_DTLS : rtc::SSL_MODE_TLS); @@ -495,14 +525,9 @@ class SSLStreamAdapterTestBase : public ::testing::Test, } // Start the handshake - int rv; - server_ssl_->SetServerRole(); - rv = server_ssl_->StartSSL(); - ASSERT_EQ(0, rv); - - rv = client_ssl_->StartSSL(); - ASSERT_EQ(0, rv); + ASSERT_EQ(0, server_ssl_->StartSSL()); + ASSERT_EQ(0, client_ssl_->StartSSL()); // Now run the handshake. EXPECT_TRUE_WAIT( @@ -518,16 +543,57 @@ class SSLStreamAdapterTestBase : public ::testing::Test, EXPECT_EQ(rtc::SR_BLOCK, client_ssl_->Write(&packet, 1, &sent, 0)); EXPECT_EQ(rtc::SR_BLOCK, server_ssl_->Write(&packet, 1, &sent, 0)); - // If we set an invalid identity at this point, SetPeerCertificateDigest - // should return false. - SetPeerIdentitiesByDigest(valid_identity, valid_identity); + // Collect both of the certificate digests; needs to be done before calling + // SetPeerCertificateDigest as that may reset the identity. + unsigned char server_digest[20]; + size_t server_digest_len; + unsigned char client_digest[20]; + size_t client_digest_len; + bool rv; + + rv = server_identity()->certificate().ComputeDigest( + rtc::DIGEST_SHA_1, server_digest, 20, &server_digest_len); + ASSERT_TRUE(rv); + rv = client_identity()->certificate().ComputeDigest( + rtc::DIGEST_SHA_1, client_digest, 20, &client_digest_len); + ASSERT_TRUE(rv); + + if (!valid_identity) { + RTC_LOG(LS_INFO) << "Setting bogus digest for client/server certs"; + client_digest[0]++; + server_digest[0]++; + } + + // Set the peer certificate digest for the client. + rtc::SSLPeerCertificateDigestError err; + rtc::SSLPeerCertificateDigestError expected_err = + valid_identity + ? rtc::SSLPeerCertificateDigestError::NONE + : rtc::SSLPeerCertificateDigestError::VERIFICATION_FAILED; + rv = client_ssl_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, server_digest, + server_digest_len, &err); + EXPECT_EQ(expected_err, err); + EXPECT_EQ(valid_identity, rv); // State should then transition to SS_OPEN or SS_CLOSED based on validation // of the identity. if (valid_identity) { EXPECT_EQ(rtc::SS_OPEN, client_ssl_->GetState()); - EXPECT_EQ(rtc::SS_OPEN, server_ssl_->GetState()); + // If the client sends a packet while the server still hasn't verified the + // client identity, the server should continue to return SR_BLOCK. + EXPECT_EQ(rtc::SR_SUCCESS, client_ssl_->Write(&packet, 1, &sent, 0)); + EXPECT_EQ(rtc::SR_BLOCK, server_ssl_->Read(&packet, 1, 0, 0)); } else { EXPECT_EQ(rtc::SS_CLOSED, client_ssl_->GetState()); + } + + // Set the peer certificate digest for the server. + rv = server_ssl_->SetPeerCertificateDigest(rtc::DIGEST_SHA_1, client_digest, + client_digest_len, &err); + EXPECT_EQ(expected_err, err); + EXPECT_EQ(valid_identity, rv); + if (valid_identity) { + EXPECT_EQ(rtc::SS_OPEN, server_ssl_->GetState()); + } else { EXPECT_EQ(rtc::SS_CLOSED, server_ssl_->GetState()); } } @@ -1429,10 +1495,9 @@ TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuiteDtls12Both) { } // Test getting the used DTLS ciphers. -// DTLS 1.0 is max version for client and server, this will only work if -// legacy is enabled. +// DTLS 1.2 is max version for client and server. TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuite) { - SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10); + SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_12, rtc::SSL_PROTOCOL_DTLS_12); TestHandshake(); int client_cipher; @@ -1440,8 +1505,8 @@ TEST_P(SSLStreamAdapterTestDTLS, TestGetSslCipherSuite) { int server_cipher; ASSERT_TRUE(GetSslCipherSuite(false, &server_cipher)); - ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(true)); - ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_10, GetSslVersion(false)); + ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_12, GetSslVersion(true)); + ASSERT_EQ(rtc::SSL_PROTOCOL_DTLS_12, GetSslVersion(false)); ASSERT_EQ(client_cipher, server_cipher); ASSERT_TRUE(rtc::SSLStreamAdapter::IsAcceptableCipher( @@ -1500,7 +1565,7 @@ class SSLStreamAdapterTestDTLSLegacyProtocols } void ConfigureServer(std::string experiment) { - // webrtc::test::ScopedFieldTrials trial(experiment); + webrtc::test::ScopedFieldTrials trial(experiment); server_stream_ = new SSLDummyStreamDTLS(this, "s2c", &server_buffer_, &client_buffer_); server_ssl_ = @@ -1516,8 +1581,8 @@ class SSLStreamAdapterTestDTLSLegacyProtocols // Test getting the used DTLS ciphers. // DTLS 1.2 enabled for neither client nor server -> DTLS 1.0 will be used. TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestGetSslCipherSuite) { - ConfigureClient(""); - ConfigureServer(""); + ConfigureClient("WebRTC-LegacyTlsProtocols/Enabled/"); + ConfigureServer("WebRTC-LegacyTlsProtocols/Enabled/"); SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10); TestHandshake(); @@ -1555,8 +1620,8 @@ TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, // DTLS 1.2 enabled for client only -> DTLS 1.0 will be used. TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestGetSslCipherSuiteDtls12Client) { - ConfigureClient(""); - ConfigureServer(""); + ConfigureClient("WebRTC-LegacyTlsProtocols/Enabled/"); + ConfigureServer("WebRTC-LegacyTlsProtocols/Enabled/"); SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_12); TestHandshake(); @@ -1574,8 +1639,8 @@ TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, // DTLS 1.2 enabled for server only -> DTLS 1.0 will be used. TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestGetSslCipherSuiteDtls12Server) { - ConfigureClient(""); - ConfigureServer(""); + ConfigureClient("WebRTC-LegacyTlsProtocols/Enabled/"); + ConfigureServer("WebRTC-LegacyTlsProtocols/Enabled/"); SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_12, rtc::SSL_PROTOCOL_DTLS_10); TestHandshake(); @@ -1594,8 +1659,8 @@ TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, // This is meant to cause a failure. TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestGetSslVersionLegacyDisabledServer10) { - ConfigureClient("WebRTC-LegacyTlsProtocols/Disabled/"); - ConfigureServer(""); + ConfigureClient(""); + ConfigureServer("WebRTC-LegacyTlsProtocols/Enabled/"); SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_12); // Handshake should fail. TestHandshake(false); @@ -1605,8 +1670,8 @@ TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, // DTLS 1.2. This should work. TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestGetSslVersionLegacyDisabledServer12) { - ConfigureClient("WebRTC-LegacyTlsProtocols/Disabled/"); - ConfigureServer("WebRTC-LegacyTlsProtocols/Disabled/"); + ConfigureClient(""); + ConfigureServer(""); SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_12, rtc::SSL_PROTOCOL_DTLS_12); TestHandshake(); } @@ -1621,12 +1686,53 @@ TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestHandshake(); } -// Legacy protocols are disabled, max TLS version is 1.0 +// Legacy protocols are disabled in the client, max TLS version is 1.0 // This should be a configuration error, and handshake should fail. TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, TestGetSslVersionLegacyDisabledClient10Server10) { - ConfigureClient("WebRTC-LegacyTlsProtocols/Disabled/"); - ConfigureServer("WebRTC-LegacyTlsProtocols/Disabled/"); + ConfigureClient(""); + ConfigureServer("WebRTC-LegacyTlsProtocols/Enabled/"); + SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10); + TestHandshake(false); +} + +// Both client and server have legacy TLS versions enabled and support DTLS 1.0. +// This should work. +TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, + TestGetSslVersionLegacyOverrideEnabledClient10Server10) { + rtc::SetAllowLegacyTLSProtocols(true); + ConfigureClient(""); + ConfigureServer(""); + // Remove override. + rtc::SetAllowLegacyTLSProtocols(absl::nullopt); + SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10); + TestHandshake(); +} + +// Client has legacy TLS disabled and server has legacy TLS enabled via +// override. Handshake for DTLS 1.0 should fail. +TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, + TestGetSslVersionLegacyOverrideDisabledClient10EnabledServer10) { + rtc::SetAllowLegacyTLSProtocols(false); + ConfigureClient(""); + rtc::SetAllowLegacyTLSProtocols(true); + ConfigureServer(""); + // Remove override. + rtc::SetAllowLegacyTLSProtocols(absl::nullopt); + SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10); + TestHandshake(false); +} + +// Client has legacy TLS enabled and server has legacy TLS disabled via +// override. Handshake for DTLS 1.0 should fail. +TEST_F(SSLStreamAdapterTestDTLSLegacyProtocols, + TestGetSslVersionLegacyOverrideEnabledClient10DisabledServer10) { + rtc::SetAllowLegacyTLSProtocols(true); + ConfigureClient(""); + rtc::SetAllowLegacyTLSProtocols(false); + ConfigureServer(""); + // Remove override. + rtc::SetAllowLegacyTLSProtocols(absl::nullopt); SetupProtocolVersions(rtc::SSL_PROTOCOL_DTLS_10, rtc::SSL_PROTOCOL_DTLS_10); TestHandshake(false); } diff --git a/rtc_base/stream.cc b/rtc_base/stream.cc index 1b0a4d759b..30c767888c 100644 --- a/rtc_base/stream.cc +++ b/rtc_base/stream.cc @@ -24,7 +24,6 @@ namespace rtc { /////////////////////////////////////////////////////////////////////////////// // StreamInterface /////////////////////////////////////////////////////////////////////////////// -StreamInterface::~StreamInterface() {} StreamResult StreamInterface::WriteAll(const void* data, size_t data_len, @@ -44,91 +43,10 @@ StreamResult StreamInterface::WriteAll(const void* data, return result; } -void StreamInterface::PostEvent(Thread* t, int events, int err) { - t->Post(RTC_FROM_HERE, this, MSG_POST_EVENT, - new StreamEventData(events, err)); -} - -void StreamInterface::PostEvent(int events, int err) { - PostEvent(Thread::Current(), events, err); -} - bool StreamInterface::Flush() { return false; } StreamInterface::StreamInterface() {} -void StreamInterface::OnMessage(Message* msg) { - if (MSG_POST_EVENT == msg->message_id) { - StreamEventData* pe = static_cast(msg->pdata); - SignalEvent(this, pe->events, pe->error); - delete msg->pdata; - } -} - -/////////////////////////////////////////////////////////////////////////////// -// StreamAdapterInterface -/////////////////////////////////////////////////////////////////////////////// - -StreamAdapterInterface::StreamAdapterInterface(StreamInterface* stream, - bool owned) - : stream_(stream), owned_(owned) { - if (nullptr != stream_) - stream_->SignalEvent.connect(this, &StreamAdapterInterface::OnEvent); -} - -StreamState StreamAdapterInterface::GetState() const { - return stream_->GetState(); -} -StreamResult StreamAdapterInterface::Read(void* buffer, - size_t buffer_len, - size_t* read, - int* error) { - return stream_->Read(buffer, buffer_len, read, error); -} -StreamResult StreamAdapterInterface::Write(const void* data, - size_t data_len, - size_t* written, - int* error) { - return stream_->Write(data, data_len, written, error); -} -void StreamAdapterInterface::Close() { - stream_->Close(); -} - -bool StreamAdapterInterface::Flush() { - return stream_->Flush(); -} - -void StreamAdapterInterface::Attach(StreamInterface* stream, bool owned) { - if (nullptr != stream_) - stream_->SignalEvent.disconnect(this); - if (owned_) - delete stream_; - stream_ = stream; - owned_ = owned; - if (nullptr != stream_) - stream_->SignalEvent.connect(this, &StreamAdapterInterface::OnEvent); -} - -StreamInterface* StreamAdapterInterface::Detach() { - if (nullptr != stream_) - stream_->SignalEvent.disconnect(this); - StreamInterface* stream = stream_; - stream_ = nullptr; - return stream; -} - -StreamAdapterInterface::~StreamAdapterInterface() { - if (owned_) - delete stream_; -} - -void StreamAdapterInterface::OnEvent(StreamInterface* stream, - int events, - int err) { - SignalEvent(this, events, err); -} - } // namespace rtc diff --git a/rtc_base/stream.h b/rtc_base/stream.h index bfb9dc2c41..70de65a75d 100644 --- a/rtc_base/stream.h +++ b/rtc_base/stream.h @@ -15,7 +15,6 @@ #include "rtc_base/buffer.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/message_handler.h" #include "rtc_base/system/rtc_export.h" #include "rtc_base/third_party/sigslot/sigslot.h" @@ -49,16 +48,9 @@ enum StreamResult { SR_ERROR, SR_SUCCESS, SR_BLOCK, SR_EOS }; // SE_WRITE: Data can be written, so Write is likely to not return SR_BLOCK enum StreamEvent { SE_OPEN = 1, SE_READ = 2, SE_WRITE = 4, SE_CLOSE = 8 }; -struct StreamEventData : public MessageData { - int events, error; - StreamEventData(int ev, int er) : events(ev), error(er) {} -}; - -class RTC_EXPORT StreamInterface : public MessageHandler { +class RTC_EXPORT StreamInterface { public: - enum { MSG_POST_EVENT = 0xF1F1, MSG_MAX = MSG_POST_EVENT }; - - ~StreamInterface() override; + virtual ~StreamInterface() {} virtual StreamState GetState() const = 0; @@ -97,13 +89,6 @@ class RTC_EXPORT StreamInterface : public MessageHandler { // certain events will be raised in the future. sigslot::signal3 SignalEvent; - // Like calling SignalEvent, but posts a message to the specified thread, - // which will call SignalEvent. This helps unroll the stack and prevent - // re-entrancy. - void PostEvent(Thread* t, int events, int err); - // Like the aforementioned method, but posts to the current thread. - void PostEvent(int events, int err); - // Return true if flush is successful. virtual bool Flush(); @@ -126,57 +111,10 @@ class RTC_EXPORT StreamInterface : public MessageHandler { protected: StreamInterface(); - // MessageHandler Interface - void OnMessage(Message* msg) override; - private: RTC_DISALLOW_COPY_AND_ASSIGN(StreamInterface); }; -/////////////////////////////////////////////////////////////////////////////// -// StreamAdapterInterface is a convenient base-class for adapting a stream. -// By default, all operations are pass-through. Override the methods that you -// require adaptation. Streams should really be upgraded to reference-counted. -// In the meantime, use the owned flag to indicate whether the adapter should -// own the adapted stream. -/////////////////////////////////////////////////////////////////////////////// - -class StreamAdapterInterface : public StreamInterface, - public sigslot::has_slots<> { - public: - explicit StreamAdapterInterface(StreamInterface* stream, bool owned = true); - - // Core Stream Interface - StreamState GetState() const override; - StreamResult Read(void* buffer, - size_t buffer_len, - size_t* read, - int* error) override; - StreamResult Write(const void* data, - size_t data_len, - size_t* written, - int* error) override; - void Close() override; - - bool Flush() override; - - void Attach(StreamInterface* stream, bool owned = true); - StreamInterface* Detach(); - - protected: - ~StreamAdapterInterface() override; - - // Note that the adapter presents itself as the origin of the stream events, - // since users of the adapter may not recognize the adapted object. - virtual void OnEvent(StreamInterface* stream, int events, int err); - StreamInterface* stream() { return stream_; } - - private: - StreamInterface* stream_; - bool owned_; - RTC_DISALLOW_COPY_AND_ASSIGN(StreamAdapterInterface); -}; - } // namespace rtc #endif // RTC_BASE_STREAM_H_ diff --git a/rtc_base/string_utils.cc b/rtc_base/string_utils.cc index dfbb548050..1720c62d5e 100644 --- a/rtc_base/string_utils.cc +++ b/rtc_base/string_utils.cc @@ -50,10 +50,4 @@ std::string ToHex(const int i) { return std::string(buffer); } -std::string LeftPad(char padding, unsigned length, std::string s) { - if (s.length() >= length) - return s; - return std::string(length - s.length(), padding) + s; -} - } // namespace rtc diff --git a/rtc_base/string_utils.h b/rtc_base/string_utils.h index 3518702ec0..d844e5e125 100644 --- a/rtc_base/string_utils.h +++ b/rtc_base/string_utils.h @@ -88,7 +88,42 @@ std::string string_trim(const std::string& s); // TODO(jonasolsson): replace with absl::Hex when that becomes available. std::string ToHex(const int i); -std::string LeftPad(char padding, unsigned length, std::string s); +// CompileTimeString comprises of a string-like object which can be used as a +// regular const char* in compile time and supports concatenation. Useful for +// concatenating constexpr strings in for example macro declarations. +namespace rtc_base_string_utils_internal { +template +struct CompileTimeString { + char string[NPlus1] = {0}; + constexpr CompileTimeString() = default; + template + explicit constexpr CompileTimeString(const char (&chars)[MPlus1]) { + char* chars_pointer = string; + for (auto c : chars) + *chars_pointer++ = c; + } + template + constexpr auto Concat(CompileTimeString b) { + CompileTimeString result; + char* chars_pointer = result.string; + for (auto c : string) + *chars_pointer++ = c; + chars_pointer = result.string + NPlus1 - 1; + for (auto c : b.string) + *chars_pointer++ = c; + result.string[NPlus1 + MPlus1 - 2] = 0; + return result; + } + constexpr operator const char*() { return string; } +}; +} // namespace rtc_base_string_utils_internal + +// Makes a constexpr CompileTimeString without having to specify X +// explicitly. +template +constexpr auto MakeCompileTimeString(const char (&a)[N]) { + return rtc_base_string_utils_internal::CompileTimeString(a); +} } // namespace rtc diff --git a/rtc_base/string_utils_unittest.cc b/rtc_base/string_utils_unittest.cc index 2fa1f220ac..120f7e60f5 100644 --- a/rtc_base/string_utils_unittest.cc +++ b/rtc_base/string_utils_unittest.cc @@ -39,4 +39,29 @@ TEST(string_toutf, Empty) { #endif // WEBRTC_WIN +TEST(CompileTimeString, MakeActsLikeAString) { + EXPECT_STREQ(MakeCompileTimeString("abc123"), "abc123"); +} + +TEST(CompileTimeString, ConvertibleToStdString) { + EXPECT_EQ(std::string(MakeCompileTimeString("abab")), "abab"); +} + +namespace detail { +constexpr bool StringEquals(const char* a, const char* b) { + while (*a && *a == *b) + a++, b++; + return *a == *b; +} +} // namespace detail + +static_assert(detail::StringEquals(MakeCompileTimeString("handellm"), + "handellm"), + "String should initialize."); + +static_assert(detail::StringEquals(MakeCompileTimeString("abc123").Concat( + MakeCompileTimeString("def456ghi")), + "abc123def456ghi"), + "Strings should concatenate."); + } // namespace rtc diff --git a/rtc_base/stringize_macros.h b/rtc_base/stringize_macros.h deleted file mode 100644 index aee8d14551..0000000000 --- a/rtc_base/stringize_macros.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// Modified from the Chromium original: -// src/base/strings/stringize_macros.h - -// This file defines preprocessor macros for stringizing preprocessor -// symbols (or their output) and manipulating preprocessor symbols -// that define strings. - -#ifndef RTC_BASE_STRINGIZE_MACROS_H_ -#define RTC_BASE_STRINGIZE_MACROS_H_ - -// This is not very useful as it does not expand defined symbols if -// called directly. Use its counterpart without the _NO_EXPANSION -// suffix, below. -#define STRINGIZE_NO_EXPANSION(x) #x - -// Use this to quote the provided parameter, first expanding it if it -// is a preprocessor symbol. -// -// For example, if: -// #define A FOO -// #define B(x) myobj->FunctionCall(x) -// -// Then: -// STRINGIZE(A) produces "FOO" -// STRINGIZE(B(y)) produces "myobj->FunctionCall(y)" -#define STRINGIZE(x) STRINGIZE_NO_EXPANSION(x) - -#endif // RTC_BASE_STRINGIZE_MACROS_H_ diff --git a/rtc_base/stringize_macros_unittest.cc b/rtc_base/stringize_macros_unittest.cc deleted file mode 100644 index 78e6b55b2d..0000000000 --- a/rtc_base/stringize_macros_unittest.cc +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/stringize_macros.h" - -#include "test/gtest.h" - -// Macros as per documentation in header file. -#define PREPROCESSOR_UTIL_UNITTEST_A FOO -#define PREPROCESSOR_UTIL_UNITTEST_B(x) myobj->FunctionCall(x) -#define PREPROCESSOR_UTIL_UNITTEST_C "foo" - -TEST(StringizeTest, Ansi) { - EXPECT_STREQ("PREPROCESSOR_UTIL_UNITTEST_A", - STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_A)); - EXPECT_STREQ("PREPROCESSOR_UTIL_UNITTEST_B(y)", - STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_B(y))); - EXPECT_STREQ("PREPROCESSOR_UTIL_UNITTEST_C", - STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_C)); - - EXPECT_STREQ("FOO", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_A)); - EXPECT_STREQ("myobj->FunctionCall(y)", - STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_B(y))); - EXPECT_STREQ("\"foo\"", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_C)); -} diff --git a/rtc_base/strings/json.cc b/rtc_base/strings/json.cc index 8a544a0c0d..99664404cf 100644 --- a/rtc_base/strings/json.cc +++ b/rtc_base/strings/json.cc @@ -286,9 +286,9 @@ bool GetDoubleFromJsonObject(const Json::Value& in, } std::string JsonValueToString(const Json::Value& json) { - Json::FastWriter w; - std::string value = w.write(json); - return value.substr(0, value.size() - 1); // trim trailing newline + Json::StreamWriterBuilder builder; + std::string output = Json::writeString(builder, json); + return output.substr(0, output.size() - 1); // trim trailing newline } } // namespace rtc diff --git a/rtc_base/strings/string_builder_unittest.cc b/rtc_base/strings/string_builder_unittest.cc index 84717ad1d1..99dfd86292 100644 --- a/rtc_base/strings/string_builder_unittest.cc +++ b/rtc_base/strings/string_builder_unittest.cc @@ -59,7 +59,7 @@ TEST(SimpleStringBuilder, StdString) { // off. #if (GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)) || !RTC_DCHECK_IS_ON -TEST(SimpleStringBuilder, BufferOverrunConstCharP) { +TEST(SimpleStringBuilderDeathTest, BufferOverrunConstCharP) { char sb_buf[4]; SimpleStringBuilder sb(sb_buf); const char* const msg = "This is just too much"; @@ -71,7 +71,7 @@ TEST(SimpleStringBuilder, BufferOverrunConstCharP) { #endif } -TEST(SimpleStringBuilder, BufferOverrunStdString) { +TEST(SimpleStringBuilderDeathTest, BufferOverrunStdString) { char sb_buf[4]; SimpleStringBuilder sb(sb_buf); sb << 12; @@ -84,7 +84,7 @@ TEST(SimpleStringBuilder, BufferOverrunStdString) { #endif } -TEST(SimpleStringBuilder, BufferOverrunInt) { +TEST(SimpleStringBuilderDeathTest, BufferOverrunInt) { char sb_buf[4]; SimpleStringBuilder sb(sb_buf); constexpr int num = -12345; @@ -100,7 +100,7 @@ TEST(SimpleStringBuilder, BufferOverrunInt) { #endif } -TEST(SimpleStringBuilder, BufferOverrunDouble) { +TEST(SimpleStringBuilderDeathTest, BufferOverrunDouble) { char sb_buf[5]; SimpleStringBuilder sb(sb_buf); constexpr double num = 123.456; @@ -113,7 +113,7 @@ TEST(SimpleStringBuilder, BufferOverrunDouble) { #endif } -TEST(SimpleStringBuilder, BufferOverrunConstCharPAlreadyFull) { +TEST(SimpleStringBuilderDeathTest, BufferOverrunConstCharPAlreadyFull) { char sb_buf[4]; SimpleStringBuilder sb(sb_buf); sb << 123; @@ -126,7 +126,7 @@ TEST(SimpleStringBuilder, BufferOverrunConstCharPAlreadyFull) { #endif } -TEST(SimpleStringBuilder, BufferOverrunIntAlreadyFull) { +TEST(SimpleStringBuilderDeathTest, BufferOverrunIntAlreadyFull) { char sb_buf[4]; SimpleStringBuilder sb(sb_buf); sb << "xyz"; diff --git a/rtc_base/swap_queue.h b/rtc_base/swap_queue.h index eb0b1fff0c..3c8149c163 100644 --- a/rtc_base/swap_queue.h +++ b/rtc_base/swap_queue.h @@ -17,8 +17,8 @@ #include #include +#include "absl/base/attributes.h" #include "rtc_base/checks.h" -#include "rtc_base/system/unused.h" namespace webrtc { @@ -127,7 +127,7 @@ class SwapQueue { // When specified, the T given in *input must pass the ItemVerifier() test. // The contents of *input after the call are then also guaranteed to pass the // ItemVerifier() test. - bool Insert(T* input) RTC_WARN_UNUSED_RESULT { + ABSL_MUST_USE_RESULT bool Insert(T* input) { RTC_DCHECK(input); RTC_DCHECK(queue_item_verifier_(*input)); @@ -141,7 +141,8 @@ class SwapQueue { return false; } - std::swap(*input, queue_[next_write_index_]); + using std::swap; + swap(*input, queue_[next_write_index_]); // Increment the value of num_elements_ to account for the inserted element. // Release memory ordering prevents the reads and writes to @@ -167,7 +168,7 @@ class SwapQueue { // empty). When specified, The T given in *output must pass the ItemVerifier() // test and the contents of *output after the call are then also guaranteed to // pass the ItemVerifier() test. - bool Remove(T* output) RTC_WARN_UNUSED_RESULT { + ABSL_MUST_USE_RESULT bool Remove(T* output) { RTC_DCHECK(output); RTC_DCHECK(queue_item_verifier_(*output)); @@ -181,7 +182,8 @@ class SwapQueue { return false; } - std::swap(*output, queue_[next_read_index_]); + using std::swap; + swap(*output, queue_[next_read_index_]); // Decrement the value of num_elements_ to account for the removed element. // Release memory ordering prevents the reads and writes to diff --git a/rtc_base/swap_queue_unittest.cc b/rtc_base/swap_queue_unittest.cc index 199ac6b185..3862d850fa 100644 --- a/rtc_base/swap_queue_unittest.cc +++ b/rtc_base/swap_queue_unittest.cc @@ -135,7 +135,7 @@ TEST(SwapQueueTest, SuccessfulItemVerifyFunctor) { } #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST(SwapQueueTest, UnsuccessfulItemVerifyFunctor) { +TEST(SwapQueueDeathTest, UnsuccessfulItemVerifyFunctor) { // Queue item verifier for the test. auto minus_2_verifier = [](const int& i) { return i > -2; }; SwapQueue queue(2, minus_2_verifier); @@ -148,7 +148,7 @@ TEST(SwapQueueTest, UnsuccessfulItemVerifyFunctor) { EXPECT_DEATH(result = queue.Insert(&invalid_value), ""); } -TEST(SwapQueueTest, UnSuccessfulItemVerifyInsert) { +TEST(SwapQueueDeathTest, UnSuccessfulItemVerifyInsert) { std::vector template_element(kChunkSize); SwapQueue, SwapQueueItemVerifier, &LengthVerifierFunction>> @@ -158,7 +158,7 @@ TEST(SwapQueueTest, UnSuccessfulItemVerifyInsert) { EXPECT_DEATH(result = queue.Insert(&invalid_chunk), ""); } -TEST(SwapQueueTest, UnSuccessfulItemVerifyRemove) { +TEST(SwapQueueDeathTest, UnSuccessfulItemVerifyRemove) { std::vector template_element(kChunkSize); SwapQueue, SwapQueueItemVerifier, &LengthVerifierFunction>> diff --git a/rtc_base/synchronization/BUILD.gn b/rtc_base/synchronization/BUILD.gn index 3e7b22d4f9..3cddc55c72 100644 --- a/rtc_base/synchronization/BUILD.gn +++ b/rtc_base/synchronization/BUILD.gn @@ -6,40 +6,58 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. +import("//third_party/google_benchmark/buildconfig.gni") import("../../webrtc.gni") if (is_android) { import("//build/config/android/config.gni") import("//build/config/android/rules.gni") } -rtc_library("rw_lock_wrapper") { - public = [ "rw_lock_wrapper.h" ] - sources = [ "rw_lock_wrapper.cc" ] - deps = [ "..:macromagic" ] - if (is_win) { - sources += [ - "rw_lock_win.cc", - "rw_lock_win.h", - ] - deps += [ "..:logging" ] - } else { - sources += [ - "rw_lock_posix.cc", - "rw_lock_posix.h", - ] +rtc_library("yield") { + sources = [ + "yield.cc", + "yield.h", + ] + deps = [] +} + +rtc_library("mutex") { + sources = [ + "mutex.cc", + "mutex.h", + "mutex_critical_section.h", + "mutex_pthread.h", + "mutex_race_check.h", + ] + if (rtc_use_absl_mutex) { + sources += [ "mutex_abseil.h" ] + } + + deps = [ + ":yield", + "..:checks", + "..:macromagic", + "..:platform_thread_types", + "../system:unused", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] + if (rtc_use_absl_mutex) { + absl_deps += [ "//third_party/abseil-cpp/absl/synchronization" ] } } -rtc_library("sequence_checker") { +rtc_library("sequence_checker_internal") { + visibility = [ "../../api:sequence_checker" ] sources = [ - "sequence_checker.cc", - "sequence_checker.h", + "sequence_checker_internal.cc", + "sequence_checker_internal.h", ] deps = [ + ":mutex", "..:checks", - "..:criticalsection", "..:macromagic", "..:platform_thread_types", + "..:stringutils", "../../api/task_queue", "../system:rtc_export", ] @@ -50,36 +68,43 @@ rtc_library("yield_policy") { "yield_policy.cc", "yield_policy.h", ] - deps = [ - "..:checks", + deps = [ "..:checks" ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:config", "//third_party/abseil-cpp/absl/base:core_headers", ] } if (rtc_include_tests) { - rtc_library("synchronization_unittests") { - testonly = true - sources = [ "yield_policy_unittest.cc" ] - deps = [ - ":yield_policy", - "..:rtc_event", - "../../test:test_support", - ] - } - - rtc_library("sequence_checker_unittests") { - testonly = true + if (enable_google_benchmarks) { + rtc_library("synchronization_unittests") { + testonly = true + sources = [ + "mutex_unittest.cc", + "yield_policy_unittest.cc", + ] + deps = [ + ":mutex", + ":yield", + ":yield_policy", + "..:checks", + "..:macromagic", + "..:rtc_base", + "..:rtc_event", + "..:threading", + "../../test:test_support", + "//third_party/google_benchmark", + ] + } - sources = [ "sequence_checker_unittest.cc" ] - deps = [ - ":sequence_checker", - "..:checks", - "..:rtc_base_approved", - "..:task_queue_for_test", - "../../api:function_view", - "../../test:test_main", - "../../test:test_support", - ] + rtc_library("mutex_benchmark") { + testonly = true + sources = [ "mutex_benchmark.cc" ] + deps = [ + ":mutex", + "../system:unused", + "//third_party/google_benchmark", + ] + } } } diff --git a/rtc_base/synchronization/DEPS b/rtc_base/synchronization/DEPS new file mode 100644 index 0000000000..4ed1f2444b --- /dev/null +++ b/rtc_base/synchronization/DEPS @@ -0,0 +1,11 @@ +specific_include_rules = { + "mutex_abseil\.h": [ + "+absl/synchronization" + ], + ".*_benchmark\.cc": [ + "+benchmark", + ], + ".*_unittest\.cc": [ + "+benchmark", + ] +} diff --git a/rtc_base/synchronization/mutex.cc b/rtc_base/synchronization/mutex.cc new file mode 100644 index 0000000000..6c2d6ff7f0 --- /dev/null +++ b/rtc_base/synchronization/mutex.cc @@ -0,0 +1,39 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/synchronization/mutex.h" + +#include "rtc_base/checks.h" +#include "rtc_base/synchronization/yield.h" + +namespace webrtc { + +#if !defined(WEBRTC_ABSL_MUTEX) +void GlobalMutex::Lock() { + while (mutex_locked_.exchange(1)) { + YieldCurrentThread(); + } +} + +void GlobalMutex::Unlock() { + int old = mutex_locked_.exchange(0); + RTC_DCHECK_EQ(old, 1) << "Unlock called without calling Lock first"; +} + +GlobalMutexLock::GlobalMutexLock(GlobalMutex* mutex) : mutex_(mutex) { + mutex_->Lock(); +} + +GlobalMutexLock::~GlobalMutexLock() { + mutex_->Unlock(); +} +#endif // #if !defined(WEBRTC_ABSL_MUTEX) + +} // namespace webrtc diff --git a/rtc_base/synchronization/mutex.h b/rtc_base/synchronization/mutex.h new file mode 100644 index 0000000000..e1512e96cc --- /dev/null +++ b/rtc_base/synchronization/mutex.h @@ -0,0 +1,113 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYNCHRONIZATION_MUTEX_H_ +#define RTC_BASE_SYNCHRONIZATION_MUTEX_H_ + +#include + +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "rtc_base/checks.h" +#include "rtc_base/thread_annotations.h" + +#if defined(WEBRTC_RACE_CHECK_MUTEX) +// To use the race check mutex, define WEBRTC_RACE_CHECK_MUTEX globally. This +// also adds a dependency to absl::Mutex from logging.cc due to concurrent +// invocation of the static logging system. +#include "rtc_base/synchronization/mutex_race_check.h" +#elif defined(WEBRTC_ABSL_MUTEX) +#include "rtc_base/synchronization/mutex_abseil.h" // nogncheck +#elif defined(WEBRTC_WIN) +#include "rtc_base/synchronization/mutex_critical_section.h" +#elif defined(WEBRTC_POSIX) +#include "rtc_base/synchronization/mutex_pthread.h" +#else +#error Unsupported platform. +#endif + +namespace webrtc { + +// The Mutex guarantees exclusive access and aims to follow Abseil semantics +// (i.e. non-reentrant etc). +class RTC_LOCKABLE Mutex final { + public: + Mutex() = default; + Mutex(const Mutex&) = delete; + Mutex& operator=(const Mutex&) = delete; + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { + impl_.Lock(); + } + ABSL_MUST_USE_RESULT bool TryLock() RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return impl_.TryLock(); + } + void Unlock() RTC_UNLOCK_FUNCTION() { + impl_.Unlock(); + } + + private: + MutexImpl impl_; +}; + +// MutexLock, for serializing execution through a scope. +class RTC_SCOPED_LOCKABLE MutexLock final { + public: + MutexLock(const MutexLock&) = delete; + MutexLock& operator=(const MutexLock&) = delete; + + explicit MutexLock(Mutex* mutex) RTC_EXCLUSIVE_LOCK_FUNCTION(mutex) + : mutex_(mutex) { + mutex->Lock(); + } + ~MutexLock() RTC_UNLOCK_FUNCTION() { mutex_->Unlock(); } + + private: + Mutex* mutex_; +}; + +// A mutex used to protect global variables. Do NOT use for other purposes. +#if defined(WEBRTC_ABSL_MUTEX) +using GlobalMutex = absl::Mutex; +using GlobalMutexLock = absl::MutexLock; +#else +class RTC_LOCKABLE GlobalMutex final { + public: + GlobalMutex(const GlobalMutex&) = delete; + GlobalMutex& operator=(const GlobalMutex&) = delete; + + constexpr explicit GlobalMutex(absl::ConstInitType /*unused*/) + : mutex_locked_(0) {} + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION(); + void Unlock() RTC_UNLOCK_FUNCTION(); + + private: + std::atomic mutex_locked_; // 0 means lock not taken, 1 means taken. +}; + +// GlobalMutexLock, for serializing execution through a scope. +class RTC_SCOPED_LOCKABLE GlobalMutexLock final { + public: + GlobalMutexLock(const GlobalMutexLock&) = delete; + GlobalMutexLock& operator=(const GlobalMutexLock&) = delete; + + explicit GlobalMutexLock(GlobalMutex* mutex) + RTC_EXCLUSIVE_LOCK_FUNCTION(mutex_); + ~GlobalMutexLock() RTC_UNLOCK_FUNCTION(); + + private: + GlobalMutex* mutex_; +}; +#endif // if defined(WEBRTC_ABSL_MUTEX) + +} // namespace webrtc + +#endif // RTC_BASE_SYNCHRONIZATION_MUTEX_H_ diff --git a/rtc_base/synchronization/mutex_abseil.h b/rtc_base/synchronization/mutex_abseil.h new file mode 100644 index 0000000000..9247065ae6 --- /dev/null +++ b/rtc_base/synchronization/mutex_abseil.h @@ -0,0 +1,38 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYNCHRONIZATION_MUTEX_ABSEIL_H_ +#define RTC_BASE_SYNCHRONIZATION_MUTEX_ABSEIL_H_ + +#include "absl/base/attributes.h" +#include "absl/synchronization/mutex.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +class RTC_LOCKABLE MutexImpl final { + public: + MutexImpl() = default; + MutexImpl(const MutexImpl&) = delete; + MutexImpl& operator=(const MutexImpl&) = delete; + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { mutex_.Lock(); } + ABSL_MUST_USE_RESULT bool TryLock() RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return mutex_.TryLock(); + } + void Unlock() RTC_UNLOCK_FUNCTION() { mutex_.Unlock(); } + + private: + absl::Mutex mutex_; +}; + +} // namespace webrtc + +#endif // RTC_BASE_SYNCHRONIZATION_MUTEX_ABSEIL_H_ diff --git a/rtc_base/synchronization/mutex_benchmark.cc b/rtc_base/synchronization/mutex_benchmark.cc new file mode 100644 index 0000000000..40adca65d8 --- /dev/null +++ b/rtc_base/synchronization/mutex_benchmark.cc @@ -0,0 +1,95 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "benchmark/benchmark.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/unused.h" + +namespace webrtc { + +class PerfTestData { + public: + PerfTestData() : cache_line_barrier_1_(), cache_line_barrier_2_() { + cache_line_barrier_1_[0]++; // Avoid 'is not used'. + cache_line_barrier_2_[0]++; // Avoid 'is not used'. + } + + int AddToCounter(int add) { + MutexLock mu(&mu_); + my_counter_ += add; + return 0; + } + + private: + uint8_t cache_line_barrier_1_[64]; + Mutex mu_; + uint8_t cache_line_barrier_2_[64]; + int64_t my_counter_ = 0; +}; + +void BM_LockWithMutex(benchmark::State& state) { + static PerfTestData test_data; + for (auto s : state) { + RTC_UNUSED(s); + benchmark::DoNotOptimize(test_data.AddToCounter(2)); + } +} + +BENCHMARK(BM_LockWithMutex)->Threads(1); +BENCHMARK(BM_LockWithMutex)->Threads(2); +BENCHMARK(BM_LockWithMutex)->Threads(4); +BENCHMARK(BM_LockWithMutex)->ThreadPerCpu(); + +} // namespace webrtc + +/* + +Results: + +NB when reproducing: Remember to turn of power management features such as CPU +scaling before running! + +pthreads (Linux): +---------------------------------------------------------------------- +Run on (12 X 4500 MHz CPU s) +CPU Caches: + L1 Data 32 KiB (x6) + L1 Instruction 32 KiB (x6) + L2 Unified 1024 KiB (x6) + L3 Unified 8448 KiB (x1) +Load Average: 0.26, 0.28, 0.44 +---------------------------------------------------------------------- +Benchmark Time CPU Iterations +---------------------------------------------------------------------- +BM_LockWithMutex/threads:1 13.4 ns 13.4 ns 52192906 +BM_LockWithMutex/threads:2 44.2 ns 88.4 ns 8189944 +BM_LockWithMutex/threads:4 52.0 ns 198 ns 3743244 +BM_LockWithMutex/threads:12 84.9 ns 944 ns 733524 + +std::mutex performs like the pthread implementation (Linux). + +Abseil (Linux): +---------------------------------------------------------------------- +Run on (12 X 4500 MHz CPU s) +CPU Caches: + L1 Data 32 KiB (x6) + L1 Instruction 32 KiB (x6) + L2 Unified 1024 KiB (x6) + L3 Unified 8448 KiB (x1) +Load Average: 0.27, 0.24, 0.37 +---------------------------------------------------------------------- +Benchmark Time CPU Iterations +---------------------------------------------------------------------- +BM_LockWithMutex/threads:1 15.0 ns 15.0 ns 46550231 +BM_LockWithMutex/threads:2 91.1 ns 182 ns 4059212 +BM_LockWithMutex/threads:4 40.8 ns 131 ns 5496560 +BM_LockWithMutex/threads:12 37.0 ns 130 ns 5377668 + +*/ diff --git a/rtc_base/synchronization/mutex_critical_section.h b/rtc_base/synchronization/mutex_critical_section.h new file mode 100644 index 0000000000..cb3d6a095c --- /dev/null +++ b/rtc_base/synchronization/mutex_critical_section.h @@ -0,0 +1,55 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYNCHRONIZATION_MUTEX_CRITICAL_SECTION_H_ +#define RTC_BASE_SYNCHRONIZATION_MUTEX_CRITICAL_SECTION_H_ + +#if defined(WEBRTC_WIN) +// clang-format off +// clang formating would change include order. + +// Include winsock2.h before including to maintain consistency with +// win32.h. To include win32.h directly, it must be broken out into its own +// build target. +#include +#include +#include // must come after windows headers. +// clang-format on + +#include "absl/base/attributes.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +class RTC_LOCKABLE MutexImpl final { + public: + MutexImpl() { InitializeCriticalSection(&critical_section_); } + MutexImpl(const MutexImpl&) = delete; + MutexImpl& operator=(const MutexImpl&) = delete; + ~MutexImpl() { DeleteCriticalSection(&critical_section_); } + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { + EnterCriticalSection(&critical_section_); + } + ABSL_MUST_USE_RESULT bool TryLock() RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return TryEnterCriticalSection(&critical_section_) != FALSE; + } + void Unlock() RTC_UNLOCK_FUNCTION() { + LeaveCriticalSection(&critical_section_); + } + + private: + CRITICAL_SECTION critical_section_; +}; + +} // namespace webrtc + +#endif // #if defined(WEBRTC_WIN) +#endif // RTC_BASE_SYNCHRONIZATION_MUTEX_CRITICAL_SECTION_H_ diff --git a/rtc_base/synchronization/mutex_pthread.h b/rtc_base/synchronization/mutex_pthread.h new file mode 100644 index 0000000000..8898ca5348 --- /dev/null +++ b/rtc_base/synchronization/mutex_pthread.h @@ -0,0 +1,54 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYNCHRONIZATION_MUTEX_PTHREAD_H_ +#define RTC_BASE_SYNCHRONIZATION_MUTEX_PTHREAD_H_ + +#if defined(WEBRTC_POSIX) + +#include +#if defined(WEBRTC_MAC) +#include +#endif + +#include "absl/base/attributes.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +class RTC_LOCKABLE MutexImpl final { + public: + MutexImpl() { + pthread_mutexattr_t mutex_attribute; + pthread_mutexattr_init(&mutex_attribute); +#if defined(WEBRTC_MAC) + pthread_mutexattr_setpolicy_np(&mutex_attribute, + _PTHREAD_MUTEX_POLICY_FIRSTFIT); +#endif + pthread_mutex_init(&mutex_, &mutex_attribute); + pthread_mutexattr_destroy(&mutex_attribute); + } + MutexImpl(const MutexImpl&) = delete; + MutexImpl& operator=(const MutexImpl&) = delete; + ~MutexImpl() { pthread_mutex_destroy(&mutex_); } + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { pthread_mutex_lock(&mutex_); } + ABSL_MUST_USE_RESULT bool TryLock() RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + return pthread_mutex_trylock(&mutex_) == 0; + } + void Unlock() RTC_UNLOCK_FUNCTION() { pthread_mutex_unlock(&mutex_); } + + private: + pthread_mutex_t mutex_; +}; + +} // namespace webrtc +#endif // #if defined(WEBRTC_POSIX) +#endif // RTC_BASE_SYNCHRONIZATION_MUTEX_PTHREAD_H_ diff --git a/rtc_base/synchronization/mutex_race_check.h b/rtc_base/synchronization/mutex_race_check.h new file mode 100644 index 0000000000..cada6292b5 --- /dev/null +++ b/rtc_base/synchronization/mutex_race_check.h @@ -0,0 +1,65 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYNCHRONIZATION_MUTEX_RACE_CHECK_H_ +#define RTC_BASE_SYNCHRONIZATION_MUTEX_RACE_CHECK_H_ + +#include + +#include "absl/base/attributes.h" +#include "rtc_base/checks.h" +#include "rtc_base/system/unused.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// This implementation class is useful when a consuming project can guarantee +// that all WebRTC invocation is happening serially. Additionally, the consuming +// project cannot use WebRTC code that spawn threads or task queues. +// +// The class internally check fails on Lock() if it finds the consumer actually +// invokes WebRTC concurrently. +// +// To use the race check mutex, define WEBRTC_RACE_CHECK_MUTEX globally. This +// also adds a dependency to absl::Mutex from logging.cc because even though +// objects are invoked serially, the logging is static and invoked concurrently +// and hence needs protection. +class RTC_LOCKABLE MutexImpl final { + public: + MutexImpl() = default; + MutexImpl(const MutexImpl&) = delete; + MutexImpl& operator=(const MutexImpl&) = delete; + + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { + bool was_free = free_.exchange(false, std::memory_order_acquire); + RTC_CHECK(was_free) + << "WEBRTC_RACE_CHECK_MUTEX: mutex locked concurrently."; + } + ABSL_MUST_USE_RESULT bool TryLock() RTC_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + bool was_free = free_.exchange(false, std::memory_order_acquire); + return was_free; + } + void Unlock() RTC_UNLOCK_FUNCTION() { + free_.store(true, std::memory_order_release); + } + + private: + // Release-acquire ordering is used. + // - In the Lock methods we're guaranteeing that reads and writes happening + // after the (Try)Lock don't appear to have happened before the Lock (acquire + // ordering). + // - In the Unlock method we're guaranteeing that reads and writes happening + // before the Unlock don't appear to happen after it (release ordering). + std::atomic free_{true}; +}; + +} // namespace webrtc + +#endif // RTC_BASE_SYNCHRONIZATION_MUTEX_RACE_CHECK_H_ diff --git a/rtc_base/synchronization/mutex_unittest.cc b/rtc_base/synchronization/mutex_unittest.cc new file mode 100644 index 0000000000..b8c45d0a8c --- /dev/null +++ b/rtc_base/synchronization/mutex_unittest.cc @@ -0,0 +1,206 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/synchronization/mutex.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "rtc_base/checks.h" +#include "rtc_base/event.h" +#include "rtc_base/location.h" +#include "rtc_base/message_handler.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/yield.h" +#include "rtc_base/thread.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::rtc::Event; +using ::rtc::Message; +using ::rtc::MessageHandler; +using ::rtc::Thread; + +constexpr int kNumThreads = 16; + +template +class RTC_LOCKABLE RawMutexLocker { + public: + explicit RawMutexLocker(MutexType& mutex) : mutex_(mutex) {} + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { mutex_.Lock(); } + void Unlock() RTC_UNLOCK_FUNCTION() { mutex_.Unlock(); } + + private: + MutexType& mutex_; +}; + +class RTC_LOCKABLE RawMutexTryLocker { + public: + explicit RawMutexTryLocker(Mutex& mutex) : mutex_(mutex) {} + void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { + while (!mutex_.TryLock()) { + YieldCurrentThread(); + } + } + void Unlock() RTC_UNLOCK_FUNCTION() { mutex_.Unlock(); } + + private: + Mutex& mutex_; +}; + +template +class MutexLockLocker { + public: + explicit MutexLockLocker(MutexType& mutex) : mutex_(mutex) {} + void Lock() { lock_ = std::make_unique(&mutex_); } + void Unlock() { lock_ = nullptr; } + + private: + MutexType& mutex_; + std::unique_ptr lock_; +}; + +template +class LockRunner : public rtc::MessageHandlerAutoCleanup { + public: + template + explicit LockRunner(Args... args) + : threads_active_(0), + start_event_(true, false), + done_event_(true, false), + shared_value_(0), + mutex_(args...), + locker_(mutex_) {} + + bool Run() { + // Signal all threads to start. + start_event_.Set(); + + // Wait for all threads to finish. + return done_event_.Wait(kLongTime); + } + + void SetExpectedThreadCount(int count) { threads_active_ = count; } + + int shared_value() { + int shared_value; + locker_.Lock(); + shared_value = shared_value_; + locker_.Unlock(); + return shared_value_; + } + + void OnMessage(Message* msg) override { + ASSERT_TRUE(start_event_.Wait(kLongTime)); + locker_.Lock(); + + EXPECT_EQ(0, shared_value_); + int old = shared_value_; + + // Use a loop to increase the chance of race. If the |locker_| + // implementation is faulty, it would be improbable that the error slips + // through. + for (int i = 0; i < kOperationsToRun; ++i) { + benchmark::DoNotOptimize(++shared_value_); + } + EXPECT_EQ(old + kOperationsToRun, shared_value_); + shared_value_ = 0; + + locker_.Unlock(); + if (threads_active_.fetch_sub(1) == 1) { + done_event_.Set(); + } + } + + private: + static constexpr int kLongTime = 10000; // 10 seconds + static constexpr int kOperationsToRun = 1000; + + std::atomic threads_active_; + Event start_event_; + Event done_event_; + int shared_value_; + MutexType mutex_; + MutexLocker locker_; +}; + +void StartThreads(std::vector>& threads, + MessageHandler* handler) { + for (int i = 0; i < kNumThreads; ++i) { + std::unique_ptr thread(Thread::Create()); + thread->Start(); + thread->Post(RTC_FROM_HERE, handler); + threads.push_back(std::move(thread)); + } +} + +TEST(MutexTest, ProtectsSharedResourceWithMutexAndRawMutexLocker) { + std::vector> threads; + LockRunner> runner; + StartThreads(threads, &runner); + runner.SetExpectedThreadCount(kNumThreads); + EXPECT_TRUE(runner.Run()); + EXPECT_EQ(0, runner.shared_value()); +} + +TEST(MutexTest, ProtectsSharedResourceWithMutexAndRawMutexTryLocker) { + std::vector> threads; + LockRunner runner; + StartThreads(threads, &runner); + runner.SetExpectedThreadCount(kNumThreads); + EXPECT_TRUE(runner.Run()); + EXPECT_EQ(0, runner.shared_value()); +} + +TEST(MutexTest, ProtectsSharedResourceWithMutexAndMutexLocker) { + std::vector> threads; + LockRunner> runner; + StartThreads(threads, &runner); + runner.SetExpectedThreadCount(kNumThreads); + EXPECT_TRUE(runner.Run()); + EXPECT_EQ(0, runner.shared_value()); +} + +TEST(MutexTest, ProtectsSharedResourceWithGlobalMutexAndRawMutexLocker) { + std::vector> threads; + LockRunner> runner(absl::kConstInit); + StartThreads(threads, &runner); + runner.SetExpectedThreadCount(kNumThreads); + EXPECT_TRUE(runner.Run()); + EXPECT_EQ(0, runner.shared_value()); +} + +TEST(MutexTest, ProtectsSharedResourceWithGlobalMutexAndMutexLocker) { + std::vector> threads; + LockRunner> runner( + absl::kConstInit); + StartThreads(threads, &runner); + runner.SetExpectedThreadCount(kNumThreads); + EXPECT_TRUE(runner.Run()); + EXPECT_EQ(0, runner.shared_value()); +} + +TEST(MutexTest, GlobalMutexCanHaveStaticStorageDuration) { + ABSL_CONST_INIT static GlobalMutex global_lock(absl::kConstInit); + global_lock.Lock(); + global_lock.Unlock(); +} + +} // namespace +} // namespace webrtc diff --git a/rtc_base/synchronization/rw_lock_posix.cc b/rtc_base/synchronization/rw_lock_posix.cc deleted file mode 100644 index 15ef3d706e..0000000000 --- a/rtc_base/synchronization/rw_lock_posix.cc +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/synchronization/rw_lock_posix.h" - -#include - -namespace webrtc { - -RWLockPosix::RWLockPosix() : lock_() {} - -RWLockPosix::~RWLockPosix() { - pthread_rwlock_destroy(&lock_); -} - -RWLockPosix* RWLockPosix::Create() { - RWLockPosix* ret_val = new RWLockPosix(); - if (!ret_val->Init()) { - delete ret_val; - return NULL; - } - return ret_val; -} - -bool RWLockPosix::Init() { - return pthread_rwlock_init(&lock_, 0) == 0; -} - -void RWLockPosix::AcquireLockExclusive() { - pthread_rwlock_wrlock(&lock_); -} - -void RWLockPosix::ReleaseLockExclusive() { - pthread_rwlock_unlock(&lock_); -} - -void RWLockPosix::AcquireLockShared() { - pthread_rwlock_rdlock(&lock_); -} - -void RWLockPosix::ReleaseLockShared() { - pthread_rwlock_unlock(&lock_); -} - -} // namespace webrtc diff --git a/rtc_base/synchronization/rw_lock_posix.h b/rtc_base/synchronization/rw_lock_posix.h deleted file mode 100644 index a103fe7714..0000000000 --- a/rtc_base/synchronization/rw_lock_posix.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_SYNCHRONIZATION_RW_LOCK_POSIX_H_ -#define RTC_BASE_SYNCHRONIZATION_RW_LOCK_POSIX_H_ - -#include - -#include "rtc_base/synchronization/rw_lock_wrapper.h" - -namespace webrtc { - -class RWLockPosix : public RWLockWrapper { - public: - static RWLockPosix* Create(); - ~RWLockPosix() override; - - void AcquireLockExclusive() override; - void ReleaseLockExclusive() override; - - void AcquireLockShared() override; - void ReleaseLockShared() override; - - private: - RWLockPosix(); - bool Init(); - - pthread_rwlock_t lock_; -}; - -} // namespace webrtc - -#endif // RTC_BASE_SYNCHRONIZATION_RW_LOCK_POSIX_H_ diff --git a/rtc_base/synchronization/rw_lock_win.cc b/rtc_base/synchronization/rw_lock_win.cc deleted file mode 100644 index 3274c78a94..0000000000 --- a/rtc_base/synchronization/rw_lock_win.cc +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/synchronization/rw_lock_win.h" - -#include "rtc_base/logging.h" - -namespace webrtc { - -RWLockWin::RWLockWin() { - InitializeSRWLock(&lock_); -} - -RWLockWin* RWLockWin::Create() { - return new RWLockWin(); -} - -void RWLockWin::AcquireLockExclusive() { - AcquireSRWLockExclusive(&lock_); -} - -void RWLockWin::ReleaseLockExclusive() { - ReleaseSRWLockExclusive(&lock_); -} - -void RWLockWin::AcquireLockShared() { - AcquireSRWLockShared(&lock_); -} - -void RWLockWin::ReleaseLockShared() { - ReleaseSRWLockShared(&lock_); -} - -} // namespace webrtc diff --git a/rtc_base/synchronization/rw_lock_win.h b/rtc_base/synchronization/rw_lock_win.h deleted file mode 100644 index 43bde1da9b..0000000000 --- a/rtc_base/synchronization/rw_lock_win.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_SYNCHRONIZATION_RW_LOCK_WIN_H_ -#define RTC_BASE_SYNCHRONIZATION_RW_LOCK_WIN_H_ - -#include - -#include "rtc_base/synchronization/rw_lock_wrapper.h" - -namespace webrtc { - -class RWLockWin : public RWLockWrapper { - public: - static RWLockWin* Create(); - - void AcquireLockExclusive() override; - void ReleaseLockExclusive() override; - - void AcquireLockShared() override; - void ReleaseLockShared() override; - - private: - RWLockWin(); - - SRWLOCK lock_; -}; - -} // namespace webrtc - -#endif // RTC_BASE_SYNCHRONIZATION_RW_LOCK_WIN_H_ diff --git a/rtc_base/synchronization/rw_lock_wrapper.cc b/rtc_base/synchronization/rw_lock_wrapper.cc deleted file mode 100644 index fb464192a3..0000000000 --- a/rtc_base/synchronization/rw_lock_wrapper.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "rtc_base/synchronization/rw_lock_wrapper.h" - -#if defined(_WIN32) -#include "rtc_base/synchronization/rw_lock_win.h" -#else -#include "rtc_base/synchronization/rw_lock_posix.h" -#endif - -namespace webrtc { - -RWLockWrapper* RWLockWrapper::CreateRWLock() { -#ifdef _WIN32 - return RWLockWin::Create(); -#else - return RWLockPosix::Create(); -#endif -} - -} // namespace webrtc diff --git a/rtc_base/synchronization/rw_lock_wrapper.h b/rtc_base/synchronization/rw_lock_wrapper.h deleted file mode 100644 index 39f52fca35..0000000000 --- a/rtc_base/synchronization/rw_lock_wrapper.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef RTC_BASE_SYNCHRONIZATION_RW_LOCK_WRAPPER_H_ -#define RTC_BASE_SYNCHRONIZATION_RW_LOCK_WRAPPER_H_ - -#include "rtc_base/thread_annotations.h" - -// Note, Windows pre-Vista version of RW locks are not supported natively. For -// these OSs regular critical sections have been used to approximate RW lock -// functionality and will therefore have worse performance. - -namespace webrtc { - -class RTC_LOCKABLE RWLockWrapper { - public: - static RWLockWrapper* CreateRWLock(); - virtual ~RWLockWrapper() {} - - virtual void AcquireLockExclusive() RTC_EXCLUSIVE_LOCK_FUNCTION() = 0; - virtual void ReleaseLockExclusive() RTC_UNLOCK_FUNCTION() = 0; - - virtual void AcquireLockShared() RTC_SHARED_LOCK_FUNCTION() = 0; - virtual void ReleaseLockShared() RTC_UNLOCK_FUNCTION() = 0; -}; - -// RAII extensions of the RW lock. Prevents Acquire/Release missmatches and -// provides more compact locking syntax. -class RTC_SCOPED_LOCKABLE ReadLockScoped { - public: - explicit ReadLockScoped(RWLockWrapper& rw_lock) - RTC_SHARED_LOCK_FUNCTION(rw_lock) - : rw_lock_(rw_lock) { - rw_lock_.AcquireLockShared(); - } - - ~ReadLockScoped() RTC_UNLOCK_FUNCTION() { rw_lock_.ReleaseLockShared(); } - - private: - RWLockWrapper& rw_lock_; -}; - -class RTC_SCOPED_LOCKABLE WriteLockScoped { - public: - explicit WriteLockScoped(RWLockWrapper& rw_lock) - RTC_EXCLUSIVE_LOCK_FUNCTION(rw_lock) - : rw_lock_(rw_lock) { - rw_lock_.AcquireLockExclusive(); - } - - ~WriteLockScoped() RTC_UNLOCK_FUNCTION() { rw_lock_.ReleaseLockExclusive(); } - - private: - RWLockWrapper& rw_lock_; -}; - -} // namespace webrtc - -#endif // RTC_BASE_SYNCHRONIZATION_RW_LOCK_WRAPPER_H_ diff --git a/rtc_base/synchronization/sequence_checker.cc b/rtc_base/synchronization/sequence_checker.cc deleted file mode 100644 index d64f32a616..0000000000 --- a/rtc_base/synchronization/sequence_checker.cc +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "rtc_base/synchronization/sequence_checker.h" - -#if defined(WEBRTC_MAC) -#include -#endif - -namespace webrtc { -namespace { -// On Mac, returns the label of the current dispatch queue; elsewhere, return -// null. -const void* GetSystemQueueRef() { -#if defined(WEBRTC_MAC) - return dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL); -#else - return nullptr; -#endif -} -} // namespace - -SequenceCheckerImpl::SequenceCheckerImpl() - : attached_(true), - valid_thread_(rtc::CurrentThreadRef()), - valid_queue_(TaskQueueBase::Current()), - valid_system_queue_(GetSystemQueueRef()) {} - -SequenceCheckerImpl::~SequenceCheckerImpl() = default; - -bool SequenceCheckerImpl::IsCurrent() const { - const TaskQueueBase* const current_queue = TaskQueueBase::Current(); - const rtc::PlatformThreadRef current_thread = rtc::CurrentThreadRef(); - const void* const current_system_queue = GetSystemQueueRef(); - rtc::CritScope scoped_lock(&lock_); - if (!attached_) { // Previously detached. - attached_ = true; - valid_thread_ = current_thread; - valid_queue_ = current_queue; - valid_system_queue_ = current_system_queue; - return true; - } - if (valid_queue_ || current_queue) { - return valid_queue_ == current_queue; - } - if (valid_system_queue_ && valid_system_queue_ == current_system_queue) { - return true; - } - return rtc::IsThreadRefEqual(valid_thread_, current_thread); -} - -void SequenceCheckerImpl::Detach() { - rtc::CritScope scoped_lock(&lock_); - attached_ = false; - // We don't need to touch the other members here, they will be - // reset on the next call to IsCurrent(). -} - -} // namespace webrtc diff --git a/rtc_base/synchronization/sequence_checker.h b/rtc_base/synchronization/sequence_checker.h deleted file mode 100644 index fe644fa14e..0000000000 --- a/rtc_base/synchronization/sequence_checker.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef RTC_BASE_SYNCHRONIZATION_SEQUENCE_CHECKER_H_ -#define RTC_BASE_SYNCHRONIZATION_SEQUENCE_CHECKER_H_ - -#include "api/task_queue/task_queue_base.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/platform_thread_types.h" -#include "rtc_base/system/rtc_export.h" -#include "rtc_base/thread_annotations.h" - -namespace webrtc { -// Real implementation of SequenceChecker, for use in debug mode, or -// for temporary use in release mode (e.g. to RTC_CHECK on a threading issue -// seen only in the wild). -// -// Note: You should almost always use the SequenceChecker class to get the -// right version for your build configuration. -class RTC_EXPORT SequenceCheckerImpl { - public: - SequenceCheckerImpl(); - ~SequenceCheckerImpl(); - - bool IsCurrent() const; - // Changes the task queue or thread that is checked for in IsCurrent. This can - // be useful when an object may be created on one task queue / thread and then - // used exclusively on another thread. - void Detach(); - - private: - rtc::CriticalSection lock_; - // These are mutable so that IsCurrent can set them. - mutable bool attached_ RTC_GUARDED_BY(lock_); - mutable rtc::PlatformThreadRef valid_thread_ RTC_GUARDED_BY(lock_); - mutable const TaskQueueBase* valid_queue_ RTC_GUARDED_BY(lock_); - mutable const void* valid_system_queue_ RTC_GUARDED_BY(lock_); -}; - -// Do nothing implementation, for use in release mode. -// -// Note: You should almost always use the SequenceChecker class to get the -// right version for your build configuration. -class SequenceCheckerDoNothing { - public: - bool IsCurrent() const { return true; } - void Detach() {} -}; - -// SequenceChecker is a helper class used to help verify that some methods -// of a class are called on the same task queue or thread. A -// SequenceChecker is bound to a a task queue if the object is -// created on a task queue, or a thread otherwise. -// -// -// Example: -// class MyClass { -// public: -// void Foo() { -// RTC_DCHECK_RUN_ON(sequence_checker_); -// ... (do stuff) ... -// } -// -// private: -// SequenceChecker sequence_checker_; -// } -// -// In Release mode, IsCurrent will always return true. -#if RTC_DCHECK_IS_ON -class RTC_LOCKABLE SequenceChecker : public SequenceCheckerImpl {}; -#else -class RTC_LOCKABLE SequenceChecker : public SequenceCheckerDoNothing {}; -#endif // RTC_ENABLE_THREAD_CHECKER - -namespace webrtc_seq_check_impl { -// Helper class used by RTC_DCHECK_RUN_ON (see example usage below). -class RTC_SCOPED_LOCKABLE SequenceCheckerScope { - public: - template - explicit SequenceCheckerScope(const ThreadLikeObject* thread_like_object) - RTC_EXCLUSIVE_LOCK_FUNCTION(thread_like_object) {} - SequenceCheckerScope(const SequenceCheckerScope&) = delete; - SequenceCheckerScope& operator=(const SequenceCheckerScope&) = delete; - ~SequenceCheckerScope() RTC_UNLOCK_FUNCTION() {} - - template - static bool IsCurrent(const ThreadLikeObject* thread_like_object) { - return thread_like_object->IsCurrent(); - } -}; -} // namespace webrtc_seq_check_impl -} // namespace webrtc - -// RTC_RUN_ON/RTC_GUARDED_BY/RTC_DCHECK_RUN_ON macros allows to annotate -// variables are accessed from same thread/task queue. -// Using tools designed to check mutexes, it checks at compile time everywhere -// variable is access, there is a run-time dcheck thread/task queue is correct. -// -// class ThreadExample { -// public: -// void NeedVar1() { -// RTC_DCHECK_RUN_ON(network_thread_); -// transport_->Send(); -// } -// -// private: -// rtc::Thread* network_thread_; -// int transport_ RTC_GUARDED_BY(network_thread_); -// }; -// -// class SequenceCheckerExample { -// public: -// int CalledFromPacer() RTC_RUN_ON(pacer_sequence_checker_) { -// return var2_; -// } -// -// void CallMeFromPacer() { -// RTC_DCHECK_RUN_ON(&pacer_sequence_checker_) -// << "Should be called from pacer"; -// CalledFromPacer(); -// } -// -// private: -// int pacer_var_ RTC_GUARDED_BY(pacer_sequence_checker_); -// SequenceChecker pacer_sequence_checker_; -// }; -// -// class TaskQueueExample { -// public: -// class Encoder { -// public: -// rtc::TaskQueue* Queue() { return encoder_queue_; } -// void Encode() { -// RTC_DCHECK_RUN_ON(encoder_queue_); -// DoSomething(var_); -// } -// -// private: -// rtc::TaskQueue* const encoder_queue_; -// Frame var_ RTC_GUARDED_BY(encoder_queue_); -// }; -// -// void Encode() { -// // Will fail at runtime when DCHECK is enabled: -// // encoder_->Encode(); -// // Will work: -// rtc::scoped_refptr encoder = encoder_; -// encoder_->Queue()->PostTask([encoder] { encoder->Encode(); }); -// } -// -// private: -// rtc::scoped_refptr encoder_; -// } - -// Document if a function expected to be called from same thread/task queue. -#define RTC_RUN_ON(x) \ - RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x)) - -#define RTC_DCHECK_RUN_ON(x) \ - webrtc::webrtc_seq_check_impl::SequenceCheckerScope seq_check_scope(x); \ - RTC_DCHECK((x)->IsCurrent()) - -#endif // RTC_BASE_SYNCHRONIZATION_SEQUENCE_CHECKER_H_ diff --git a/rtc_base/synchronization/sequence_checker_internal.cc b/rtc_base/synchronization/sequence_checker_internal.cc new file mode 100644 index 0000000000..63badd9538 --- /dev/null +++ b/rtc_base/synchronization/sequence_checker_internal.cc @@ -0,0 +1,116 @@ +/* + * Copyright 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "rtc_base/synchronization/sequence_checker_internal.h" + +#include + +#if defined(WEBRTC_MAC) +#include +#endif + +#include "rtc_base/checks.h" +#include "rtc_base/strings/string_builder.h" + +namespace webrtc { +namespace webrtc_sequence_checker_internal { +namespace { +// On Mac, returns the label of the current dispatch queue; elsewhere, return +// null. +const void* GetSystemQueueRef() { +#if defined(WEBRTC_MAC) + return dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL); +#else + return nullptr; +#endif +} + +} // namespace + +SequenceCheckerImpl::SequenceCheckerImpl() + : attached_(true), + valid_thread_(rtc::CurrentThreadRef()), + valid_queue_(TaskQueueBase::Current()), + valid_system_queue_(GetSystemQueueRef()) {} + +bool SequenceCheckerImpl::IsCurrent() const { + const TaskQueueBase* const current_queue = TaskQueueBase::Current(); + const rtc::PlatformThreadRef current_thread = rtc::CurrentThreadRef(); + const void* const current_system_queue = GetSystemQueueRef(); + MutexLock scoped_lock(&lock_); + if (!attached_) { // Previously detached. + attached_ = true; + valid_thread_ = current_thread; + valid_queue_ = current_queue; + valid_system_queue_ = current_system_queue; + return true; + } + if (valid_queue_ || current_queue) { + return valid_queue_ == current_queue; + } + if (valid_system_queue_ && valid_system_queue_ == current_system_queue) { + return true; + } + return rtc::IsThreadRefEqual(valid_thread_, current_thread); +} + +void SequenceCheckerImpl::Detach() { + MutexLock scoped_lock(&lock_); + attached_ = false; + // We don't need to touch the other members here, they will be + // reset on the next call to IsCurrent(). +} + +#if RTC_DCHECK_IS_ON +std::string SequenceCheckerImpl::ExpectationToString() const { + const TaskQueueBase* const current_queue = TaskQueueBase::Current(); + const rtc::PlatformThreadRef current_thread = rtc::CurrentThreadRef(); + const void* const current_system_queue = GetSystemQueueRef(); + MutexLock scoped_lock(&lock_); + if (!attached_) + return "Checker currently not attached."; + + // The format of the string is meant to compliment the one we have inside of + // FatalLog() (checks.cc). Example: + // + // # Expected: TQ: 0x0 SysQ: 0x7fff69541330 Thread: 0x11dcf6dc0 + // # Actual: TQ: 0x7fa8f0604190 SysQ: 0x7fa8f0604a30 Thread: 0x700006f1a000 + // TaskQueue doesn't match + + rtc::StringBuilder message; + message.AppendFormat( + "# Expected: TQ: %p SysQ: %p Thread: %p\n" + "# Actual: TQ: %p SysQ: %p Thread: %p\n", + valid_queue_, valid_system_queue_, + reinterpret_cast(valid_thread_), current_queue, + current_system_queue, reinterpret_cast(current_thread)); + + if ((valid_queue_ || current_queue) && valid_queue_ != current_queue) { + message << "TaskQueue doesn't match\n"; + } else if (valid_system_queue_ && + valid_system_queue_ != current_system_queue) { + message << "System queue doesn't match\n"; + } else if (!rtc::IsThreadRefEqual(valid_thread_, current_thread)) { + message << "Threads don't match\n"; + } + + return message.Release(); +} +#endif // RTC_DCHECK_IS_ON + +std::string ExpectationToString(const SequenceCheckerImpl* checker) { +#if RTC_DCHECK_IS_ON + return checker->ExpectationToString(); +#else + return std::string(); +#endif +} + +} // namespace webrtc_sequence_checker_internal +} // namespace webrtc diff --git a/rtc_base/synchronization/sequence_checker_internal.h b/rtc_base/synchronization/sequence_checker_internal.h new file mode 100644 index 0000000000..f7ac6de125 --- /dev/null +++ b/rtc_base/synchronization/sequence_checker_internal.h @@ -0,0 +1,93 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#ifndef RTC_BASE_SYNCHRONIZATION_SEQUENCE_CHECKER_INTERNAL_H_ +#define RTC_BASE_SYNCHRONIZATION_SEQUENCE_CHECKER_INTERNAL_H_ + +#include +#include + +#include "api/task_queue/task_queue_base.h" +#include "rtc_base/platform_thread_types.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/rtc_export.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { +namespace webrtc_sequence_checker_internal { + +// Real implementation of SequenceChecker, for use in debug mode, or +// for temporary use in release mode (e.g. to RTC_CHECK on a threading issue +// seen only in the wild). +// +// Note: You should almost always use the SequenceChecker class to get the +// right version for your build configuration. +class RTC_EXPORT SequenceCheckerImpl { + public: + SequenceCheckerImpl(); + ~SequenceCheckerImpl() = default; + + bool IsCurrent() const; + // Changes the task queue or thread that is checked for in IsCurrent. This can + // be useful when an object may be created on one task queue / thread and then + // used exclusively on another thread. + void Detach(); + + // Returns a string that is formatted to match with the error string printed + // by RTC_CHECK() when a condition is not met. + // This is used in conjunction with the RTC_DCHECK_RUN_ON() macro. + std::string ExpectationToString() const; + + private: + mutable Mutex lock_; + // These are mutable so that IsCurrent can set them. + mutable bool attached_ RTC_GUARDED_BY(lock_); + mutable rtc::PlatformThreadRef valid_thread_ RTC_GUARDED_BY(lock_); + mutable const TaskQueueBase* valid_queue_ RTC_GUARDED_BY(lock_); + mutable const void* valid_system_queue_ RTC_GUARDED_BY(lock_); +}; + +// Do nothing implementation, for use in release mode. +// +// Note: You should almost always use the SequenceChecker class to get the +// right version for your build configuration. +class SequenceCheckerDoNothing { + public: + bool IsCurrent() const { return true; } + void Detach() {} +}; + +// Helper class used by RTC_DCHECK_RUN_ON (see example usage below). +class RTC_SCOPED_LOCKABLE SequenceCheckerScope { + public: + template + explicit SequenceCheckerScope(const ThreadLikeObject* thread_like_object) + RTC_EXCLUSIVE_LOCK_FUNCTION(thread_like_object) {} + SequenceCheckerScope(const SequenceCheckerScope&) = delete; + SequenceCheckerScope& operator=(const SequenceCheckerScope&) = delete; + ~SequenceCheckerScope() RTC_UNLOCK_FUNCTION() {} + + template + static bool IsCurrent(const ThreadLikeObject* thread_like_object) { + return thread_like_object->IsCurrent(); + } +}; + +std::string ExpectationToString(const SequenceCheckerImpl* checker); + +// Catch-all implementation for types other than explicitly supported above. +template +std::string ExpectationToString(const ThreadLikeObject*) { + return std::string(); +} + +} // namespace webrtc_sequence_checker_internal +} // namespace webrtc + +#endif // RTC_BASE_SYNCHRONIZATION_SEQUENCE_CHECKER_INTERNAL_H_ diff --git a/rtc_base/synchronization/yield.cc b/rtc_base/synchronization/yield.cc new file mode 100644 index 0000000000..cbb58d12ab --- /dev/null +++ b/rtc_base/synchronization/yield.cc @@ -0,0 +1,36 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/synchronization/yield.h" + +#if defined(WEBRTC_WIN) +#include +#else +#include +#include +#endif + +namespace webrtc { + +void YieldCurrentThread() { + // TODO(bugs.webrtc.org/11634): use dedicated OS functionality instead of + // sleep for yielding. +#if defined(WEBRTC_WIN) + ::Sleep(0); +#elif defined(WEBRTC_MAC) && defined(RTC_USE_NATIVE_MUTEX_ON_MAC) && \ + !RTC_USE_NATIVE_MUTEX_ON_MAC + sched_yield(); +#else + static const struct timespec ts_null = {0}; + nanosleep(&ts_null, nullptr); +#endif +} + +} // namespace webrtc diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecFactory.h b/rtc_base/synchronization/yield.h similarity index 53% rename from sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecFactory.h rename to rtc_base/synchronization/yield.h index 5e8d353aa1..d4f5f99f37 100644 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecFactory.h +++ b/rtc_base/synchronization/yield.h @@ -1,5 +1,5 @@ /* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * Copyright 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -7,8 +7,14 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ +#ifndef RTC_BASE_SYNCHRONIZATION_YIELD_H_ +#define RTC_BASE_SYNCHRONIZATION_YIELD_H_ -#import "base/RTCVideoDecoderFactory.h" -#import "base/RTCVideoEncoderFactory.h" -#import "components/video_codec/RTCDefaultVideoDecoderFactory.h" -#import "components/video_codec/RTCDefaultVideoEncoderFactory.h" +namespace webrtc { + +// Request rescheduling of threads. +void YieldCurrentThread(); + +} // namespace webrtc + +#endif // RTC_BASE_SYNCHRONIZATION_YIELD_H_ diff --git a/rtc_base/system/BUILD.gn b/rtc_base/system/BUILD.gn index 79cb301038..c604796e60 100644 --- a/rtc_base/system/BUILD.gn +++ b/rtc_base/system/BUILD.gn @@ -32,6 +32,19 @@ rtc_library("file_wrapper") { ] } +if (rtc_include_tests) { + rtc_library("file_wrapper_unittests") { + testonly = true + sources = [ "file_wrapper_unittest.cc" ] + deps = [ + ":file_wrapper", + "//rtc_base:checks", + "//test:fileutils", + "//test:test_support", + ] + } +} + rtc_source_set("ignore_warnings") { sources = [ "ignore_warnings.h" ] } @@ -44,6 +57,10 @@ rtc_source_set("unused") { sources = [ "unused.h" ] } +rtc_source_set("assume") { + sources = [ "assume.h" ] +} + rtc_source_set("rtc_export") { sources = [ "rtc_export.h", @@ -51,6 +68,10 @@ rtc_source_set("rtc_export") { ] } +rtc_source_set("no_unique_address") { + sources = [ "no_unique_address.h" ] +} + if (is_mac || is_ios) { rtc_library("cocoa_threading") { sources = [ @@ -58,7 +79,7 @@ if (is_mac || is_ios) { "cocoa_threading.mm", ] deps = [ "..:checks" ] - libs = [ "Foundation.framework" ] + frameworks = [ "Foundation.framework" ] } rtc_library("gcd_helpers") { @@ -72,13 +93,14 @@ if (is_mac || is_ios) { rtc_source_set("thread_registry") { sources = [ "thread_registry.h" ] - deps = [ "..:rtc_base_approved" ] + deps = [ + "..:rtc_base_approved", + "../synchronization:mutex", + ] if (is_android && !build_with_chromium) { sources += [ "thread_registry.cc" ] - deps += [ - "../../sdk/android:native_api_stacktrace", - "//third_party/abseil-cpp/absl/base:core_headers", - ] + deps += [ "../../sdk/android:native_api_stacktrace" ] + absl_deps = [ "//third_party/abseil-cpp/absl/base:core_headers" ] } } diff --git a/rtc_base/system/arch.h b/rtc_base/system/arch.h index ed216e660f..be2367b85f 100644 --- a/rtc_base/system/arch.h +++ b/rtc_base/system/arch.h @@ -15,8 +15,9 @@ #define RTC_BASE_SYSTEM_ARCH_H_ // Processor architecture detection. For more info on what's defined, see: -// http://msdn.microsoft.com/en-us/library/b0084kay.aspx -// http://www.agner.org/optimize/calling_conventions.pdf +// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros +// https://www.agner.org/optimize/calling_conventions.pdf +// https://sourceforge.net/p/predef/wiki/Architectures/ // or with gcc, run: "echo | gcc -E -dM -" #if defined(_M_X64) || defined(__x86_64__) #define WEBRTC_ARCH_X86_FAMILY @@ -32,17 +33,45 @@ #define WEBRTC_ARCH_X86 #define WEBRTC_ARCH_32_BITS #define WEBRTC_ARCH_LITTLE_ENDIAN -#elif defined(__ARMEL__) +#elif defined(_M_ARM) || defined(__ARMEL__) #define WEBRTC_ARCH_ARM_FAMILY #define WEBRTC_ARCH_32_BITS #define WEBRTC_ARCH_LITTLE_ENDIAN -#elif defined(__MIPSEL__) +#elif defined(__MIPSEL__) || defined(__MIPSEB__) #define WEBRTC_ARCH_MIPS_FAMILY #if defined(__LP64__) #define WEBRTC_ARCH_64_BITS #else #define WEBRTC_ARCH_32_BITS #endif +#if defined(__MIPSEL__) +#define WEBRTC_ARCH_LITTLE_ENDIAN +#else +#define WEBRTC_ARCH_BIG_ENDIAN +#endif +#elif defined(__PPC__) +#if defined(__PPC64__) +#define WEBRTC_ARCH_64_BITS +#else +#define WEBRTC_ARCH_32_BITS +#endif +#if defined(__LITTLE_ENDIAN__) +#define WEBRTC_ARCH_LITTLE_ENDIAN +#else +#define WEBRTC_ARCH_BIG_ENDIAN +#endif +#elif defined(__sparc) || defined(__sparc__) +#if __SIZEOF_LONG__ == 8 +#define WEBRTC_ARCH_64_BITS +#else +#define WEBRTC_ARCH_32_BITS +#endif +#define WEBRTC_ARCH_BIG_ENDIAN +#elif defined(__riscv) && __riscv_xlen == 64 +#define WEBRTC_ARCH_64_BITS +#define WEBRTC_ARCH_LITTLE_ENDIAN +#elif defined(__riscv) && __riscv_xlen == 32 +#define WEBRTC_ARCH_32_BITS #define WEBRTC_ARCH_LITTLE_ENDIAN #elif defined(__pnacl__) #define WEBRTC_ARCH_32_BITS diff --git a/rtc_base/system/assume.h b/rtc_base/system/assume.h new file mode 100644 index 0000000000..231c9e18ad --- /dev/null +++ b/rtc_base/system/assume.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYSTEM_ASSUME_H_ +#define RTC_BASE_SYSTEM_ASSUME_H_ + +// Possibly evaluate `p`, promising the compiler that the result is true; the +// compiler is allowed (but not required) to use this information when +// optimizing the code. USE WITH CAUTION! If you promise the compiler things +// that aren't true, it will build a broken binary for you. +// +// As a simple example, the compiler is allowed to transform this +// +// RTC_ASSUME(x == 4); +// return x; +// +// into this +// +// return 4; +// +// It is even allowed to propagate the assumption "backwards in time", if it can +// prove that it must have held at some earlier time. For example, the compiler +// is allowed to transform this +// +// int Add(int x, int y) { +// if (x == 17) +// y += 1; +// RTC_ASSUME(x != 17); +// return x + y; +// } +// +// into this +// +// int Add(int x, int y) { +// return x + y; +// } +// +// since if `x` isn't 17 on the third line of the function body, the test of `x +// == 17` on the first line must fail since nothing can modify the local +// variable `x` in between. +// +// The intended use is to allow the compiler to optimize better. For example, +// here we allow the compiler to omit an instruction that ensures correct +// rounding of negative arguments: +// +// int DivBy2(int x) { +// RTC_ASSUME(x >= 0); +// return x / 2; +// } +// +// and here we allow the compiler to possibly omit a null check: +// +// void Delete(int* p) { +// RTC_ASSUME(p != nullptr); +// delete p; +// } +// +// clang-format off +#if defined(__GNUC__) +#define RTC_ASSUME(p) do { if (!(p)) __builtin_unreachable(); } while (0) +#else +#define RTC_ASSUME(p) do {} while (0) +#endif +// clang-format on + +#endif // RTC_BASE_SYSTEM_ASSUME_H_ diff --git a/rtc_base/system/file_wrapper.cc b/rtc_base/system/file_wrapper.cc index 2828790e09..3e49315793 100644 --- a/rtc_base/system/file_wrapper.cc +++ b/rtc_base/system/file_wrapper.cc @@ -89,6 +89,22 @@ bool FileWrapper::SeekTo(int64_t position) { return fseek(file_, rtc::checked_cast(position), SEEK_SET) == 0; } +long FileWrapper::FileSize() { + if (file_ == nullptr) + return -1; + long original_position = ftell(file_); + if (original_position < 0) + return -1; + int seek_error = fseek(file_, 0, SEEK_END); + if (seek_error) + return -1; + long file_size = ftell(file_); + seek_error = fseek(file_, original_position, SEEK_SET); + if (seek_error) + return -1; + return file_size; +} + bool FileWrapper::Flush() { RTC_DCHECK(file_); return fflush(file_) == 0; diff --git a/rtc_base/system/file_wrapper.h b/rtc_base/system/file_wrapper.h index 24c333a6c3..0b293d9a80 100644 --- a/rtc_base/system/file_wrapper.h +++ b/rtc_base/system/file_wrapper.h @@ -14,7 +14,7 @@ #include #include -#include "rtc_base/critical_section.h" +#include // Implementation that can read (exclusive) or write from/to a file. @@ -38,7 +38,6 @@ class FileWrapper final { static FileWrapper OpenReadOnly(const std::string& file_name_utf8); static FileWrapper OpenWriteOnly(const char* file_name_utf8, int* error = nullptr); - static FileWrapper OpenWriteOnly(const std::string& file_name_utf8, int* error = nullptr); @@ -87,6 +86,11 @@ class FileWrapper final { // Seek to given position. bool SeekTo(int64_t position); + // Returns the file size or -1 if a size could not be determined. + // (A file size might not exists for non-seekable files or file-like + // objects, for example /dev/tty on unix.) + long FileSize(); + // Returns number of bytes read. Short count indicates EOF or error. size_t Read(void* buf, size_t length); diff --git a/rtc_base/system/file_wrapper_unittest.cc b/rtc_base/system/file_wrapper_unittest.cc new file mode 100644 index 0000000000..980b565c73 --- /dev/null +++ b/rtc_base/system/file_wrapper_unittest.cc @@ -0,0 +1,69 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/system/file_wrapper.h" + +#include "rtc_base/checks.h" +#include "test/gtest.h" +#include "test/testsupport/file_utils.h" + +namespace webrtc { + +TEST(FileWrapper, FileSize) { + auto test_info = ::testing::UnitTest::GetInstance()->current_test_info(); + std::string test_name = + std::string(test_info->test_case_name()) + "_" + test_info->name(); + std::replace(test_name.begin(), test_name.end(), '/', '_'); + const std::string temp_filename = test::OutputPath() + test_name; + + // Write + { + FileWrapper file = FileWrapper::OpenWriteOnly(temp_filename); + ASSERT_TRUE(file.is_open()); + EXPECT_EQ(file.FileSize(), 0); + + EXPECT_TRUE(file.Write("foo", 3)); + EXPECT_EQ(file.FileSize(), 3); + + // FileSize() doesn't change the file size. + EXPECT_EQ(file.FileSize(), 3); + + // FileSize() doesn't move the write position. + EXPECT_TRUE(file.Write("bar", 3)); + EXPECT_EQ(file.FileSize(), 6); + } + + // Read + { + FileWrapper file = FileWrapper::OpenReadOnly(temp_filename); + ASSERT_TRUE(file.is_open()); + EXPECT_EQ(file.FileSize(), 6); + + char buf[10]; + size_t bytes_read = file.Read(buf, 3); + EXPECT_EQ(bytes_read, 3u); + EXPECT_EQ(memcmp(buf, "foo", 3), 0); + + // FileSize() doesn't move the read position. + EXPECT_EQ(file.FileSize(), 6); + + // Attempting to read past the end reads what is available + // and sets the EOF flag. + bytes_read = file.Read(buf, 5); + EXPECT_EQ(bytes_read, 3u); + EXPECT_EQ(memcmp(buf, "bar", 3), 0); + EXPECT_TRUE(file.ReadEof()); + } + + // Clean up temporary file. + remove(temp_filename.c_str()); +} + +} // namespace webrtc diff --git a/rtc_base/system/no_unique_address.h b/rtc_base/system/no_unique_address.h new file mode 100644 index 0000000000..77e7a99526 --- /dev/null +++ b/rtc_base/system/no_unique_address.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYSTEM_NO_UNIQUE_ADDRESS_H_ +#define RTC_BASE_SYSTEM_NO_UNIQUE_ADDRESS_H_ + +// RTC_NO_UNIQUE_ADDRESS is a portable annotation to tell the compiler that +// a data member need not have an address distinct from all other non-static +// data members of its class. +// It allows empty types to actually occupy zero bytes as class members, +// instead of occupying at least one byte just so that they get their own +// address. There is almost never any reason not to use it on class members +// that could possibly be empty. +// The macro expands to [[no_unique_address]] if the compiler supports the +// attribute, it expands to nothing otherwise. +// Clang should supports this attribute since C++11, while other compilers +// should add support for it starting from C++20. Among clang compilers, +// clang-cl doesn't support it yet and support is unclear also when the target +// platform is iOS. +#if ((defined(__clang__) && !defined(_MSC_VER) && !defined(WEBRTC_IOS)) || \ + __cplusplus > 201703L) +// NOLINTNEXTLINE(whitespace/braces) +#define RTC_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define RTC_NO_UNIQUE_ADDRESS +#endif + +#endif // RTC_BASE_SYSTEM_NO_UNIQUE_ADDRESS_H_ diff --git a/rtc_base/system/thread_registry.cc b/rtc_base/system/thread_registry.cc index 86605446c7..b0e83ca1e9 100644 --- a/rtc_base/system/thread_registry.cc +++ b/rtc_base/system/thread_registry.cc @@ -14,9 +14,9 @@ #include #include "absl/base/attributes.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread_types.h" +#include "rtc_base/synchronization/mutex.h" #include "sdk/android/native_api/stacktrace/stacktrace.h" namespace webrtc { @@ -30,7 +30,7 @@ struct ThreadData { // The map of registered threads, and the lock that protects it. We create the // map on first use, and never destroy it. -ABSL_CONST_INIT rtc::GlobalLock g_thread_registry_lock; +ABSL_CONST_INIT GlobalMutex g_thread_registry_lock(absl::kConstInit); ABSL_CONST_INIT std::map* g_registered_threads = nullptr; @@ -38,7 +38,7 @@ ABSL_CONST_INIT std::map* ScopedRegisterThreadForDebugging::ScopedRegisterThreadForDebugging( rtc::Location location) { - rtc::GlobalLockScope gls(&g_thread_registry_lock); + GlobalMutexLock gls(&g_thread_registry_lock); if (g_registered_threads == nullptr) { g_registered_threads = new std::map(); @@ -49,14 +49,14 @@ ScopedRegisterThreadForDebugging::ScopedRegisterThreadForDebugging( } ScopedRegisterThreadForDebugging::~ScopedRegisterThreadForDebugging() { - rtc::GlobalLockScope gls(&g_thread_registry_lock); + GlobalMutexLock gls(&g_thread_registry_lock); RTC_DCHECK(g_registered_threads != nullptr); const int num_erased = g_registered_threads->erase(this); RTC_DCHECK_EQ(num_erased, 1); } void PrintStackTracesOfRegisteredThreads() { - rtc::GlobalLockScope gls(&g_thread_registry_lock); + GlobalMutexLock gls(&g_thread_registry_lock); if (g_registered_threads == nullptr) { return; } diff --git a/rtc_base/system/unused.h b/rtc_base/system/unused.h index a0add4ee29..a5732a7e84 100644 --- a/rtc_base/system/unused.h +++ b/rtc_base/system/unused.h @@ -11,24 +11,9 @@ #ifndef RTC_BASE_SYSTEM_UNUSED_H_ #define RTC_BASE_SYSTEM_UNUSED_H_ -// Annotate a function indicating the caller must examine the return value. -// Use like: -// int foo() RTC_WARN_UNUSED_RESULT; -// To explicitly ignore a result, cast to void. -// TODO(kwiberg): Remove when we can use [[nodiscard]] from C++17. -#if defined(__clang__) -#define RTC_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) -#elif defined(__GNUC__) -// gcc has a __warn_unused_result__ attribute, but you can't quiet it by -// casting to void, so we don't use it. -#define RTC_WARN_UNUSED_RESULT -#else -#define RTC_WARN_UNUSED_RESULT -#endif - // Prevent the compiler from warning about an unused variable. For example: // int result = DoSomething(); -// assert(result == 17); +// RTC_DCHECK(result == 17); // RTC_UNUSED(result); // Note: In most cases it is better to remove the unused variable rather than // suppressing the compiler warning. diff --git a/rtc_base/system_time.cc b/rtc_base/system_time.cc new file mode 100644 index 0000000000..9efe76e3a6 --- /dev/null +++ b/rtc_base/system_time.cc @@ -0,0 +1,97 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// If WEBRTC_EXCLUDE_SYSTEM_TIME is set, an implementation of +// rtc::SystemTimeNanos() must be provided externally. +#ifndef WEBRTC_EXCLUDE_SYSTEM_TIME + +#include + +#include + +#if defined(WEBRTC_POSIX) +#include +#if defined(WEBRTC_MAC) +#include +#endif +#endif + +#if defined(WEBRTC_WIN) +// clang-format off +// clang formatting would put last, +// which leads to compilation failure. +#include +#include +#include +// clang-format on +#endif + +#include "rtc_base/checks.h" +#include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/system_time.h" +#include "rtc_base/time_utils.h" + +namespace rtc { + +int64_t SystemTimeNanos() { + int64_t ticks; +#if defined(WEBRTC_MAC) + static mach_timebase_info_data_t timebase; + if (timebase.denom == 0) { + // Get the timebase if this is the first time we run. + // Recommended by Apple's QA1398. + if (mach_timebase_info(&timebase) != KERN_SUCCESS) { + RTC_NOTREACHED(); + } + } + // Use timebase to convert absolute time tick units into nanoseconds. + const auto mul = [](uint64_t a, uint32_t b) -> int64_t { + RTC_DCHECK_NE(b, 0); + RTC_DCHECK_LE(a, std::numeric_limits::max() / b) + << "The multiplication " << a << " * " << b << " overflows"; + return rtc::dchecked_cast(a * b); + }; + ticks = mul(mach_absolute_time(), timebase.numer) / timebase.denom; +#elif defined(WEBRTC_POSIX) + struct timespec ts; + // TODO(deadbeef): Do we need to handle the case when CLOCK_MONOTONIC is not + // supported? + clock_gettime(CLOCK_MONOTONIC, &ts); + ticks = kNumNanosecsPerSec * static_cast(ts.tv_sec) + + static_cast(ts.tv_nsec); +#elif defined(WINUWP) + ticks = WinUwpSystemTimeNanos(); +#elif defined(WEBRTC_WIN) + static volatile LONG last_timegettime = 0; + static volatile int64_t num_wrap_timegettime = 0; + volatile LONG* last_timegettime_ptr = &last_timegettime; + DWORD now = timeGetTime(); + // Atomically update the last gotten time + DWORD old = InterlockedExchange(last_timegettime_ptr, now); + if (now < old) { + // If now is earlier than old, there may have been a race between threads. + // 0x0fffffff ~3.1 days, the code will not take that long to execute + // so it must have been a wrap around. + if (old > 0xf0000000 && now < 0x0fffffff) { + num_wrap_timegettime++; + } + } + ticks = now + (num_wrap_timegettime << 32); + // TODO(deadbeef): Calculate with nanosecond precision. Otherwise, we're + // just wasting a multiply and divide when doing Time() on Windows. + ticks = ticks * kNumNanosecsPerMillisec; +#else +#error Unsupported platform. +#endif + return ticks; +} + +} // namespace rtc +#endif // WEBRTC_EXCLUDE_SYSTEM_TIME diff --git a/rtc_base/system_time.h b/rtc_base/system_time.h new file mode 100644 index 0000000000..d86e94adf4 --- /dev/null +++ b/rtc_base/system_time.h @@ -0,0 +1,22 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_SYSTEM_TIME_H_ +#define RTC_BASE_SYSTEM_TIME_H_ + +namespace rtc { + +// Returns the actual system time, even if a clock is set for testing. +// Useful for timeouts while using a test clock, or for logging. +int64_t SystemTimeNanos(); + +} // namespace rtc + +#endif // RTC_BASE_SYSTEM_TIME_H_ diff --git a/rtc_base/task_queue_libevent.cc b/rtc_base/task_queue_libevent.cc index 349a5f21fc..909698611e 100644 --- a/rtc_base/task_queue_libevent.cc +++ b/rtc_base/task_queue_libevent.cc @@ -29,11 +29,11 @@ #include "api/task_queue/task_queue_base.h" #include "base/third_party/libevent/event.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/platform_thread.h" #include "rtc_base/platform_thread_types.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "rtc_base/time_utils.h" @@ -93,16 +93,12 @@ void EventAssign(struct event* ev, rtc::ThreadPriority TaskQueuePriorityToThreadPriority(Priority priority) { switch (priority) { case Priority::HIGH: - return rtc::kRealtimePriority; + return rtc::ThreadPriority::kRealtime; case Priority::LOW: - return rtc::kLowPriority; + return rtc::ThreadPriority::kLow; case Priority::NORMAL: - return rtc::kNormalPriority; - default: - RTC_NOTREACHED(); - break; + return rtc::ThreadPriority::kNormal; } - return rtc::kNormalPriority; } class TaskQueueLibevent final : public TaskQueueBase { @@ -120,7 +116,6 @@ class TaskQueueLibevent final : public TaskQueueBase { ~TaskQueueLibevent() override = default; - static void ThreadMain(void* context); static void OnWakeup(int socket, short flags, void* context); // NOLINT static void RunTimer(int fd, short flags, void* context); // NOLINT @@ -130,7 +125,7 @@ class TaskQueueLibevent final : public TaskQueueBase { event_base* event_base_; event wakeup_event_; rtc::PlatformThread thread_; - rtc::CriticalSection pending_lock_; + Mutex pending_lock_; absl::InlinedVector, 4> pending_ RTC_GUARDED_BY(pending_lock_); // Holds a list of events pending timers for cleanup when the loop exits. @@ -172,8 +167,7 @@ class TaskQueueLibevent::SetTimerTask : public QueuedTask { TaskQueueLibevent::TaskQueueLibevent(absl::string_view queue_name, rtc::ThreadPriority priority) - : event_base_(event_base_new()), - thread_(&TaskQueueLibevent::ThreadMain, this, queue_name, priority) { + : event_base_(event_base_new()) { int fds[2]; RTC_CHECK(pipe(fds) == 0); SetNonBlocking(fds[0]); @@ -184,7 +178,18 @@ TaskQueueLibevent::TaskQueueLibevent(absl::string_view queue_name, EventAssign(&wakeup_event_, event_base_, wakeup_pipe_out_, EV_READ | EV_PERSIST, OnWakeup, this); event_add(&wakeup_event_, 0); - thread_.Start(); + thread_ = rtc::PlatformThread::SpawnJoinable( + [this] { + { + CurrentTaskQueueSetter set_current(this); + while (is_active_) + event_base_loop(event_base_, 0); + } + + for (TimerEvent* timer : pending_timers_) + delete timer; + }, + queue_name, rtc::ThreadAttributes().SetPriority(priority)); } void TaskQueueLibevent::Delete() { @@ -199,7 +204,7 @@ void TaskQueueLibevent::Delete() { nanosleep(&ts, nullptr); } - thread_.Stop(); + thread_.Finalize(); event_del(&wakeup_event_); @@ -216,7 +221,7 @@ void TaskQueueLibevent::Delete() { void TaskQueueLibevent::PostTask(std::unique_ptr task) { { - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); bool had_pending_tasks = !pending_.empty(); pending_.push_back(std::move(task)); @@ -252,20 +257,6 @@ void TaskQueueLibevent::PostDelayedTask(std::unique_ptr task, } } -// static -void TaskQueueLibevent::ThreadMain(void* context) { - TaskQueueLibevent* me = static_cast(context); - - { - CurrentTaskQueueSetter set_current(me); - while (me->is_active_) - event_base_loop(me->event_base_, 0); - } - - for (TimerEvent* timer : me->pending_timers_) - delete timer; -} - // static void TaskQueueLibevent::OnWakeup(int socket, short flags, // NOLINT @@ -282,7 +273,7 @@ void TaskQueueLibevent::OnWakeup(int socket, case kRunTasks: { absl::InlinedVector, 4> tasks; { - rtc::CritScope lock(&me->pending_lock_); + MutexLock lock(&me->pending_lock_); tasks.swap(me->pending_); } RTC_DCHECK(!tasks.empty()); diff --git a/rtc_base/task_queue_stdlib.cc b/rtc_base/task_queue_stdlib.cc index 7052f7c6db..41da285ee7 100644 --- a/rtc_base/task_queue_stdlib.cc +++ b/rtc_base/task_queue_stdlib.cc @@ -22,10 +22,10 @@ #include "api/task_queue/queued_task.h" #include "api/task_queue/task_queue_base.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "rtc_base/time_utils.h" @@ -36,14 +36,11 @@ rtc::ThreadPriority TaskQueuePriorityToThreadPriority( TaskQueueFactory::Priority priority) { switch (priority) { case TaskQueueFactory::Priority::HIGH: - return rtc::kRealtimePriority; + return rtc::ThreadPriority::kRealtime; case TaskQueueFactory::Priority::LOW: - return rtc::kLowPriority; + return rtc::ThreadPriority::kLow; case TaskQueueFactory::Priority::NORMAL: - return rtc::kNormalPriority; - default: - RTC_NOTREACHED(); - return rtc::kNormalPriority; + return rtc::ThreadPriority::kNormal; } } @@ -78,8 +75,6 @@ class TaskQueueStdlib final : public TaskQueueBase { NextTask GetNextTask(); - static void ThreadMain(void* context); - void ProcessTasks(); void NotifyWake(); @@ -87,17 +82,10 @@ class TaskQueueStdlib final : public TaskQueueBase { // Indicates if the thread has started. rtc::Event started_; - // Indicates if the thread has stopped. - rtc::Event stopped_; - // Signaled whenever a new task is pending. rtc::Event flag_notify_; - // Contains the active worker thread assigned to processing - // tasks (including delayed tasks). - rtc::PlatformThread thread_; - - rtc::CriticalSection pending_lock_; + Mutex pending_lock_; // Indicates if the worker thread needs to shutdown now. bool thread_should_quit_ RTC_GUARDED_BY(pending_lock_){false}; @@ -119,15 +107,25 @@ class TaskQueueStdlib final : public TaskQueueBase { // std::unique_ptr out of the queue without the presence of a hack. std::map> delayed_queue_ RTC_GUARDED_BY(pending_lock_); + + // Contains the active worker thread assigned to processing + // tasks (including delayed tasks). + // Placing this last ensures the thread doesn't touch uninitialized attributes + // throughout it's lifetime. + rtc::PlatformThread thread_; }; TaskQueueStdlib::TaskQueueStdlib(absl::string_view queue_name, rtc::ThreadPriority priority) : started_(/*manual_reset=*/false, /*initially_signaled=*/false), - stopped_(/*manual_reset=*/false, /*initially_signaled=*/false), flag_notify_(/*manual_reset=*/false, /*initially_signaled=*/false), - thread_(&TaskQueueStdlib::ThreadMain, this, queue_name, priority) { - thread_.Start(); + thread_(rtc::PlatformThread::SpawnJoinable( + [this] { + CurrentTaskQueueSetter set_current(this); + ProcessTasks(); + }, + queue_name, + rtc::ThreadAttributes().SetPriority(priority))) { started_.Wait(rtc::Event::kForever); } @@ -135,20 +133,18 @@ void TaskQueueStdlib::Delete() { RTC_DCHECK(!IsCurrent()); { - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); thread_should_quit_ = true; } NotifyWake(); - stopped_.Wait(rtc::Event::kForever); - thread_.Stop(); delete this; } void TaskQueueStdlib::PostTask(std::unique_ptr task) { { - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); OrderId order = thread_posting_order_++; pending_queue_.push(std::pair>( @@ -166,7 +162,7 @@ void TaskQueueStdlib::PostDelayedTask(std::unique_ptr task, delay.next_fire_at_ms_ = fire_at; { - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); delay.order_ = ++thread_posting_order_; delayed_queue_[delay] = std::move(task); } @@ -179,7 +175,7 @@ TaskQueueStdlib::NextTask TaskQueueStdlib::GetNextTask() { auto tick = rtc::TimeMillis(); - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); if (thread_should_quit_) { result.final_task_ = true; @@ -219,13 +215,6 @@ TaskQueueStdlib::NextTask TaskQueueStdlib::GetNextTask() { return result; } -// static -void TaskQueueStdlib::ThreadMain(void* context) { - TaskQueueStdlib* me = static_cast(context); - CurrentTaskQueueSetter set_current(me); - me->ProcessTasks(); -} - void TaskQueueStdlib::ProcessTasks() { started_.Set(); @@ -250,8 +239,6 @@ void TaskQueueStdlib::ProcessTasks() { else flag_notify_.Wait(task.sleep_time_ms_); } - - stopped_.Set(); } void TaskQueueStdlib::NotifyWake() { diff --git a/rtc_base/task_queue_unittest.cc b/rtc_base/task_queue_unittest.cc index a7148dcdd1..0c79858630 100644 --- a/rtc_base/task_queue_unittest.cc +++ b/rtc_base/task_queue_unittest.cc @@ -21,7 +21,6 @@ #include #include "absl/memory/memory.h" -#include "rtc_base/bind.h" #include "rtc_base/event.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/time_utils.h" @@ -67,7 +66,7 @@ TEST(TaskQueueTest, DISABLED_PostDelayedHighRes) { webrtc::TaskQueueForTest queue(kQueueName, TaskQueue::Priority::HIGH); uint32_t start = Time(); - queue.PostDelayedTask(Bind(&CheckCurrent, &event, &queue), 3); + queue.PostDelayedTask([&event, &queue] { CheckCurrent(&event, &queue); }, 3); EXPECT_TRUE(event.Wait(1000)); uint32_t end = TimeMillis(); // These tests are a little relaxed due to how "powerful" our test bots can diff --git a/rtc_base/task_queue_win.cc b/rtc_base/task_queue_win.cc index 8c11b8764a..d797d478f4 100644 --- a/rtc_base/task_queue_win.cc +++ b/rtc_base/task_queue_win.cc @@ -29,15 +29,17 @@ #include #include "absl/strings/string_view.h" +#include "absl/types/optional.h" #include "api/task_queue/queued_task.h" #include "api/task_queue/task_queue_base.h" #include "rtc_base/arraysize.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/constructor_magic.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" namespace webrtc { @@ -56,16 +58,12 @@ rtc::ThreadPriority TaskQueuePriorityToThreadPriority( TaskQueueFactory::Priority priority) { switch (priority) { case TaskQueueFactory::Priority::HIGH: - return rtc::kRealtimePriority; + return rtc::ThreadPriority::kRealtime; case TaskQueueFactory::Priority::LOW: - return rtc::kLowPriority; + return rtc::ThreadPriority::kLow; case TaskQueueFactory::Priority::NORMAL: - return rtc::kNormalPriority; - default: - RTC_NOTREACHED(); - break; + return rtc::ThreadPriority::kNormal; } - return rtc::kNormalPriority; } int64_t GetTick() { @@ -167,21 +165,6 @@ class TaskQueueWin : public TaskQueueBase { void RunPendingTasks(); private: - static void ThreadMain(void* context); - - class WorkerThread : public rtc::PlatformThread { - public: - WorkerThread(rtc::ThreadRunFunction func, - void* obj, - absl::string_view thread_name, - rtc::ThreadPriority priority) - : PlatformThread(func, obj, thread_name, priority) {} - - bool QueueAPC(PAPCFUNC apc_function, ULONG_PTR data) { - return rtc::PlatformThread::QueueAPC(apc_function, data); - } - }; - void RunThreadMain(); bool ProcessQueuedMessages(); void RunDueTasks(); @@ -204,8 +187,8 @@ class TaskQueueWin : public TaskQueueBase { greater> timer_tasks_; UINT_PTR timer_id_ = 0; - WorkerThread thread_; - rtc::CriticalSection pending_lock_; + rtc::PlatformThread thread_; + Mutex pending_lock_; std::queue> pending_ RTC_GUARDED_BY(pending_lock_); HANDLE in_queue_; @@ -213,10 +196,12 @@ class TaskQueueWin : public TaskQueueBase { TaskQueueWin::TaskQueueWin(absl::string_view queue_name, rtc::ThreadPriority priority) - : thread_(&TaskQueueWin::ThreadMain, this, queue_name, priority), - in_queue_(::CreateEvent(nullptr, true, false, nullptr)) { + : in_queue_(::CreateEvent(nullptr, true, false, nullptr)) { RTC_DCHECK(in_queue_); - thread_.Start(); + thread_ = rtc::PlatformThread::SpawnJoinable( + [this] { RunThreadMain(); }, queue_name, + rtc::ThreadAttributes().SetPriority(priority)); + rtc::Event event(false, false); RTC_CHECK(thread_.QueueAPC(&InitializeQueueThread, reinterpret_cast(&event))); @@ -225,17 +210,19 @@ TaskQueueWin::TaskQueueWin(absl::string_view queue_name, void TaskQueueWin::Delete() { RTC_DCHECK(!IsCurrent()); - while (!::PostThreadMessage(thread_.GetThreadRef(), WM_QUIT, 0, 0)) { + RTC_CHECK(thread_.GetHandle() != absl::nullopt); + while ( + !::PostThreadMessage(GetThreadId(*thread_.GetHandle()), WM_QUIT, 0, 0)) { RTC_CHECK_EQ(ERROR_NOT_ENOUGH_QUOTA, ::GetLastError()); Sleep(1); } - thread_.Stop(); + thread_.Finalize(); ::CloseHandle(in_queue_); delete this; } void TaskQueueWin::PostTask(std::unique_ptr task) { - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); pending_.push(std::move(task)); ::SetEvent(in_queue_); } @@ -252,7 +239,9 @@ void TaskQueueWin::PostDelayedTask(std::unique_ptr task, // and WPARAM is 32bits in 32bit builds. Otherwise, we could pass the // task pointer and timestamp as LPARAM and WPARAM. auto* task_info = new DelayedTaskInfo(milliseconds, std::move(task)); - if (!::PostThreadMessage(thread_.GetThreadRef(), WM_QUEUE_DELAYED_TASK, 0, + RTC_CHECK(thread_.GetHandle() != absl::nullopt); + if (!::PostThreadMessage(GetThreadId(*thread_.GetHandle()), + WM_QUEUE_DELAYED_TASK, 0, reinterpret_cast(task_info))) { delete task_info; } @@ -262,7 +251,7 @@ void TaskQueueWin::RunPendingTasks() { while (true) { std::unique_ptr task; { - rtc::CritScope lock(&pending_lock_); + MutexLock lock(&pending_lock_); if (pending_.empty()) break; task = std::move(pending_.front()); @@ -274,11 +263,6 @@ void TaskQueueWin::RunPendingTasks() { } } -// static -void TaskQueueWin::ThreadMain(void* context) { - static_cast(context)->RunThreadMain(); -} - void TaskQueueWin::RunThreadMain() { CurrentTaskQueueSetter set_current(this); HANDLE handles[2] = {*timer_.event_for_wait(), in_queue_}; diff --git a/rtc_base/task_utils/BUILD.gn b/rtc_base/task_utils/BUILD.gn index 32f72b87df..64a041908e 100644 --- a/rtc_base/task_utils/BUILD.gn +++ b/rtc_base/task_utils/BUILD.gn @@ -14,17 +14,17 @@ rtc_library("repeating_task") { "repeating_task.h", ] deps = [ + ":pending_task_safety_flag", ":to_queued_task", "..:logging", - "..:thread_checker", "..:timeutils", + "../../api:sequence_checker", "../../api/task_queue", "../../api/units:time_delta", "../../api/units:timestamp", "../../system_wrappers:system_wrappers", - "../synchronization:sequence_checker", - "//third_party/abseil-cpp/absl/memory", ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] } rtc_library("pending_task_safety_flag") { @@ -34,10 +34,10 @@ rtc_library("pending_task_safety_flag") { ] deps = [ "..:checks", - "..:refcount", - "..:thread_checker", + "../../api:refcountedbase", "../../api:scoped_refptr", - "../synchronization:sequence_checker", + "../../api:sequence_checker", + "../system:no_unique_address", ] } @@ -82,7 +82,7 @@ if (rtc_include_tests) { ":to_queued_task", "../../api/task_queue", "../../test:test_support", - "//third_party/abseil-cpp/absl/memory", ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] } } diff --git a/rtc_base/task_utils/pending_task_safety_flag.cc b/rtc_base/task_utils/pending_task_safety_flag.cc index 4be2131f3f..57b3f6ce88 100644 --- a/rtc_base/task_utils/pending_task_safety_flag.cc +++ b/rtc_base/task_utils/pending_task_safety_flag.cc @@ -10,13 +10,27 @@ #include "rtc_base/task_utils/pending_task_safety_flag.h" -#include "rtc_base/ref_counted_object.h" - namespace webrtc { // static rtc::scoped_refptr PendingTaskSafetyFlag::Create() { - return new rtc::RefCountedObject(); + return new PendingTaskSafetyFlag(true); +} + +rtc::scoped_refptr +PendingTaskSafetyFlag::CreateDetached() { + rtc::scoped_refptr safety_flag( + new PendingTaskSafetyFlag(true)); + safety_flag->main_sequence_.Detach(); + return safety_flag; +} + +rtc::scoped_refptr +PendingTaskSafetyFlag::CreateDetachedInactive() { + rtc::scoped_refptr safety_flag( + new PendingTaskSafetyFlag(false)); + safety_flag->main_sequence_.Detach(); + return safety_flag; } void PendingTaskSafetyFlag::SetNotAlive() { @@ -24,6 +38,11 @@ void PendingTaskSafetyFlag::SetNotAlive() { alive_ = false; } +void PendingTaskSafetyFlag::SetAlive() { + RTC_DCHECK_RUN_ON(&main_sequence_); + alive_ = true; +} + bool PendingTaskSafetyFlag::alive() const { RTC_DCHECK_RUN_ON(&main_sequence_); return alive_; diff --git a/rtc_base/task_utils/pending_task_safety_flag.h b/rtc_base/task_utils/pending_task_safety_flag.h index 580fb3f912..fc1b5bd878 100644 --- a/rtc_base/task_utils/pending_task_safety_flag.h +++ b/rtc_base/task_utils/pending_task_safety_flag.h @@ -11,23 +11,30 @@ #ifndef RTC_BASE_TASK_UTILS_PENDING_TASK_SAFETY_FLAG_H_ #define RTC_BASE_TASK_UTILS_PENDING_TASK_SAFETY_FLAG_H_ +#include "api/ref_counted_base.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" -#include "rtc_base/ref_count.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { -// Use this flag to drop pending tasks that have been posted to the "main" -// thread/TQ and end up running after the owning instance has been -// deleted. The owning instance signals deletion by calling SetNotAlive() from -// its destructor. -// +// The PendingTaskSafetyFlag and the ScopedTaskSafety are designed to address +// the issue where you have a task to be executed later that has references, +// but cannot guarantee that the referenced object is alive when the task is +// executed. + +// This mechanism can be used with tasks that are created and destroyed +// on a single thread / task queue, and with tasks posted to the same +// thread/task queue, but tasks can be posted from any thread/TQ. + +// Typical usage: // When posting a task, post a copy (capture by-value in a lambda) of the flag -// instance and before performing the work, check the |alive()| state. Abort if +// reference and before performing the work, check the |alive()| state. Abort if // alive() returns |false|: // -// // Running outside of the main thread. +// class ExampleClass { +// .... // my_task_queue_->PostTask(ToQueuedTask( // [safety = pending_task_safety_flag_, this]() { // // Now running on the main thread. @@ -35,39 +42,79 @@ namespace webrtc { // return; // MyMethod(); // })); +// .... +// ~ExampleClass() { +// pending_task_safety_flag_->SetNotAlive(); +// } +// scoped_refptr pending_task_safety_flag_ +// = PendingTaskSafetyFlag::Create(); +// } // -// Or implicitly by letting ToQueuedTask do the checking: +// ToQueuedTask has an overload that makes this check automatic: // -// // Running outside of the main thread. // my_task_queue_->PostTask(ToQueuedTask(pending_task_safety_flag_, // [this]() { MyMethod(); })); // -// Note that checking the state only works on the construction/destruction -// thread of the ReceiveStatisticsProxy instance. -class PendingTaskSafetyFlag : public rtc::RefCountInterface { +class PendingTaskSafetyFlag final + : public rtc::RefCountedNonVirtual { public: static rtc::scoped_refptr Create(); + // Creates a flag, but with its SequenceChecker initially detached. Hence, it + // may be created on a different thread than the flag will be used on. + static rtc::scoped_refptr CreateDetached(); + + // Same as `CreateDetached()` except the initial state of the returned flag + // will be `!alive()`. + static rtc::scoped_refptr CreateDetachedInactive(); + ~PendingTaskSafetyFlag() = default; void SetNotAlive(); + // The SetAlive method is intended to support Start/Stop/Restart usecases. + // When a class has called SetNotAlive on a flag used for posted tasks, and + // decides it wants to post new tasks and have them run, there are two + // reasonable ways to do that: + // + // (i) Use the below SetAlive method. One subtlety is that any task posted + // prior to SetNotAlive, and still in the queue, is resurrected and will + // run. + // + // (ii) Create a fresh flag, and just drop the reference to the old one. This + // avoids the above problem, and ensures that tasks poster prior to + // SetNotAlive stay cancelled. Instead, there's a potential data race on + // the flag pointer itself. Some synchronization is required between the + // thread overwriting the flag pointer, and the threads that want to post + // tasks and therefore read that same pointer. + void SetAlive(); bool alive() const; protected: - PendingTaskSafetyFlag() = default; + explicit PendingTaskSafetyFlag(bool alive) : alive_(alive) {} private: bool alive_ = true; - SequenceChecker main_sequence_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker main_sequence_; }; -// Makes using PendingTaskSafetyFlag very simple. Automatic PTSF creation -// and signalling of destruction when the ScopedTaskSafety instance goes out -// of scope. -// Should be used by the class that wants tasks dropped after destruction. -// Requirements are that the instance be constructed and destructed on +// The ScopedTaskSafety makes using PendingTaskSafetyFlag very simple. +// It does automatic PTSF creation and signalling of destruction when the +// ScopedTaskSafety instance goes out of scope. +// +// ToQueuedTask has an overload that takes a ScopedTaskSafety too, so there +// is no need to explicitly call the "flag" method. +// +// Example usage: +// +// my_task_queue->PostTask(ToQueuedTask(scoped_task_safety, +// [this]() { +// // task goes here +// } +// +// This should be used by the class that wants tasks dropped after destruction. +// The requirement is that the instance has to be constructed and destructed on // the same thread as the potentially dropped tasks would be running on. -class ScopedTaskSafety { +class ScopedTaskSafety final { public: ScopedTaskSafety() = default; ~ScopedTaskSafety() { flag_->SetNotAlive(); } @@ -80,6 +127,21 @@ class ScopedTaskSafety { PendingTaskSafetyFlag::Create(); }; +// Like ScopedTaskSafety, but allows construction on a different thread than +// where the flag will be used. +class ScopedTaskSafetyDetached final { + public: + ScopedTaskSafetyDetached() = default; + ~ScopedTaskSafetyDetached() { flag_->SetNotAlive(); } + + // Returns a new reference to the safety flag. + rtc::scoped_refptr flag() const { return flag_; } + + private: + rtc::scoped_refptr flag_ = + PendingTaskSafetyFlag::CreateDetached(); +}; + } // namespace webrtc #endif // RTC_BASE_TASK_UTILS_PENDING_TASK_SAFETY_FLAG_H_ diff --git a/rtc_base/task_utils/pending_task_safety_flag_unittest.cc b/rtc_base/task_utils/pending_task_safety_flag_unittest.cc index 6df2fe2ffb..07bbea296e 100644 --- a/rtc_base/task_utils/pending_task_safety_flag_unittest.cc +++ b/rtc_base/task_utils/pending_task_safety_flag_unittest.cc @@ -156,8 +156,27 @@ TEST(PendingTaskSafetyFlagTest, PendingTaskDropped) { blocker.Set(); // Run an empty task on tq1 to flush all the queued tasks. - tq1.SendTask([]() {}, RTC_FROM_HERE); + tq1.WaitForPreviouslyPostedTasks(); ASSERT_FALSE(owner); EXPECT_FALSE(stuff_done); } + +TEST(PendingTaskSafetyFlagTest, PendingTaskNotAliveInitialized) { + TaskQueueForTest tq("PendingTaskNotAliveInitialized"); + + // Create a new flag that initially not `alive`. + auto flag = PendingTaskSafetyFlag::CreateDetachedInactive(); + tq.SendTask([&flag]() { EXPECT_FALSE(flag->alive()); }, RTC_FROM_HERE); + + bool task_1_ran = false; + bool task_2_ran = false; + tq.PostTask(ToQueuedTask(flag, [&task_1_ran]() { task_1_ran = true; })); + tq.PostTask([&flag]() { flag->SetAlive(); }); + tq.PostTask(ToQueuedTask(flag, [&task_2_ran]() { task_2_ran = true; })); + + tq.WaitForPreviouslyPostedTasks(); + EXPECT_FALSE(task_1_ran); + EXPECT_TRUE(task_2_ran); +} + } // namespace webrtc diff --git a/rtc_base/task_utils/repeating_task.cc b/rtc_base/task_utils/repeating_task.cc index 574e6331f1..9636680cb4 100644 --- a/rtc_base/task_utils/repeating_task.cc +++ b/rtc_base/task_utils/repeating_task.cc @@ -12,32 +12,36 @@ #include "absl/memory/memory.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/time_utils.h" namespace webrtc { namespace webrtc_repeating_task_impl { -RepeatingTaskBase::RepeatingTaskBase(TaskQueueBase* task_queue, - TimeDelta first_delay, - Clock* clock) +RepeatingTaskBase::RepeatingTaskBase( + TaskQueueBase* task_queue, + TimeDelta first_delay, + Clock* clock, + rtc::scoped_refptr alive_flag) : task_queue_(task_queue), clock_(clock), - next_run_time_(clock_->CurrentTime() + first_delay) {} + next_run_time_(clock_->CurrentTime() + first_delay), + alive_flag_(std::move(alive_flag)) {} RepeatingTaskBase::~RepeatingTaskBase() = default; bool RepeatingTaskBase::Run() { RTC_DCHECK_RUN_ON(task_queue_); // Return true to tell the TaskQueue to destruct this object. - if (next_run_time_.IsPlusInfinity()) + if (!alive_flag_->alive()) return true; TimeDelta delay = RunClosure(); // The closure might have stopped this task, in which case we return true to // destruct this object. - if (next_run_time_.IsPlusInfinity()) + if (!alive_flag_->alive()) return true; RTC_DCHECK(delay.IsFinite()); @@ -53,33 +57,11 @@ bool RepeatingTaskBase::Run() { return false; } -void RepeatingTaskBase::Stop() { - RTC_DCHECK_RUN_ON(task_queue_); - RTC_DCHECK(next_run_time_.IsFinite()); - next_run_time_ = Timestamp::PlusInfinity(); -} - } // namespace webrtc_repeating_task_impl -RepeatingTaskHandle::RepeatingTaskHandle(RepeatingTaskHandle&& other) - : repeating_task_(other.repeating_task_) { - other.repeating_task_ = nullptr; -} - -RepeatingTaskHandle& RepeatingTaskHandle::operator=( - RepeatingTaskHandle&& other) { - repeating_task_ = other.repeating_task_; - other.repeating_task_ = nullptr; - return *this; -} - -RepeatingTaskHandle::RepeatingTaskHandle( - webrtc_repeating_task_impl::RepeatingTaskBase* repeating_task) - : repeating_task_(repeating_task) {} - void RepeatingTaskHandle::Stop() { if (repeating_task_) { - repeating_task_->Stop(); + repeating_task_->SetNotAlive(); repeating_task_ = nullptr; } } diff --git a/rtc_base/task_utils/repeating_task.h b/rtc_base/task_utils/repeating_task.h index 487b7d19d4..d5066fdb5c 100644 --- a/rtc_base/task_utils/repeating_task.h +++ b/rtc_base/task_utils/repeating_task.h @@ -19,22 +19,19 @@ #include "api/task_queue/task_queue_base.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "system_wrappers/include/clock.h" namespace webrtc { - -class RepeatingTaskHandle; - namespace webrtc_repeating_task_impl { class RepeatingTaskBase : public QueuedTask { public: RepeatingTaskBase(TaskQueueBase* task_queue, TimeDelta first_delay, - Clock* clock); + Clock* clock, + rtc::scoped_refptr alive_flag); ~RepeatingTaskBase() override; - void Stop(); - private: virtual TimeDelta RunClosure() = 0; @@ -42,9 +39,10 @@ class RepeatingTaskBase : public QueuedTask { TaskQueueBase* const task_queue_; Clock* const clock_; - // This is always finite, except for the special case where it's PlusInfinity - // to signal that the task should stop. + // This is always finite. Timestamp next_run_time_ RTC_GUARDED_BY(task_queue_); + rtc::scoped_refptr alive_flag_ + RTC_GUARDED_BY(task_queue_); }; // The template closure pattern is based on rtc::ClosureTask. @@ -54,8 +52,12 @@ class RepeatingTaskImpl final : public RepeatingTaskBase { RepeatingTaskImpl(TaskQueueBase* task_queue, TimeDelta first_delay, Closure&& closure, - Clock* clock) - : RepeatingTaskBase(task_queue, first_delay, clock), + Clock* clock, + rtc::scoped_refptr alive_flag) + : RepeatingTaskBase(task_queue, + first_delay, + clock, + std::move(alive_flag)), closure_(std::forward(closure)) { static_assert( std::is_same static RepeatingTaskHandle Start(TaskQueueBase* task_queue, Closure&& closure, Clock* clock = Clock::GetRealTimeClock()) { - auto repeating_task = std::make_unique< - webrtc_repeating_task_impl::RepeatingTaskImpl>( - task_queue, TimeDelta::Zero(), std::forward(closure), clock); - auto* repeating_task_ptr = repeating_task.get(); - task_queue->PostTask(std::move(repeating_task)); - return RepeatingTaskHandle(repeating_task_ptr); + auto alive_flag = PendingTaskSafetyFlag::CreateDetached(); + task_queue->PostTask( + std::make_unique< + webrtc_repeating_task_impl::RepeatingTaskImpl>( + task_queue, TimeDelta::Zero(), std::forward(closure), + clock, alive_flag)); + return RepeatingTaskHandle(std::move(alive_flag)); } // DelayedStart is equivalent to Start except that the first invocation of the @@ -113,12 +114,14 @@ class RepeatingTaskHandle { TimeDelta first_delay, Closure&& closure, Clock* clock = Clock::GetRealTimeClock()) { - auto repeating_task = std::make_unique< - webrtc_repeating_task_impl::RepeatingTaskImpl>( - task_queue, first_delay, std::forward(closure), clock); - auto* repeating_task_ptr = repeating_task.get(); - task_queue->PostDelayedTask(std::move(repeating_task), first_delay.ms()); - return RepeatingTaskHandle(repeating_task_ptr); + auto alive_flag = PendingTaskSafetyFlag::CreateDetached(); + task_queue->PostDelayedTask( + std::make_unique< + webrtc_repeating_task_impl::RepeatingTaskImpl>( + task_queue, first_delay, std::forward(closure), clock, + alive_flag), + first_delay.ms()); + return RepeatingTaskHandle(std::move(alive_flag)); } // Stops future invocations of the repeating task closure. Can only be called @@ -127,15 +130,15 @@ class RepeatingTaskHandle { // closure itself. void Stop(); - // Returns true if Start() or DelayedStart() was called most recently. Returns - // false initially and if Stop() or PostStop() was called most recently. + // Returns true until Stop() was called. + // Can only be called from the TaskQueue where the task is running. bool Running() const; private: explicit RepeatingTaskHandle( - webrtc_repeating_task_impl::RepeatingTaskBase* repeating_task); - // Owned by the task queue. - webrtc_repeating_task_impl::RepeatingTaskBase* repeating_task_ = nullptr; + rtc::scoped_refptr alive_flag) + : repeating_task_(std::move(alive_flag)) {} + rtc::scoped_refptr repeating_task_; }; } // namespace webrtc diff --git a/rtc_base/task_utils/repeating_task_unittest.cc b/rtc_base/task_utils/repeating_task_unittest.cc index 2fb15d1e5a..b23284f988 100644 --- a/rtc_base/task_utils/repeating_task_unittest.cc +++ b/rtc_base/task_utils/repeating_task_unittest.cc @@ -276,4 +276,22 @@ TEST(RepeatingTaskTest, ClockIntegration) { handle.Stop(); } +TEST(RepeatingTaskTest, CanBeStoppedAfterTaskQueueDeletedTheRepeatingTask) { + std::unique_ptr repeating_task; + + MockTaskQueue task_queue; + EXPECT_CALL(task_queue, PostDelayedTask) + .WillOnce([&](std::unique_ptr task, uint32_t milliseconds) { + repeating_task = std::move(task); + }); + + RepeatingTaskHandle handle = + RepeatingTaskHandle::DelayedStart(&task_queue, TimeDelta::Millis(100), + [] { return TimeDelta::Millis(100); }); + + // shutdown task queue: delete all pending tasks and run 'regular' task. + repeating_task = nullptr; + handle.Stop(); +} + } // namespace webrtc diff --git a/rtc_base/task_utils/to_queued_task.h b/rtc_base/task_utils/to_queued_task.h index 07ab0ebe26..b2e3aae7ae 100644 --- a/rtc_base/task_utils/to_queued_task.h +++ b/rtc_base/task_utils/to_queued_task.h @@ -20,7 +20,7 @@ namespace webrtc { namespace webrtc_new_closure_impl { -// Simple implementation of QueuedTask for use with rtc::Bind and lambdas. +// Simple implementation of QueuedTask for use with lambdas. template class ClosureTask : public QueuedTask { public: diff --git a/rtc_base/test_client.cc b/rtc_base/test_client.cc index e5aa9d7987..f23ac2aec0 100644 --- a/rtc_base/test_client.cc +++ b/rtc_base/test_client.cc @@ -75,7 +75,7 @@ std::unique_ptr TestClient::NextPacket(int timeout_ms) { int64_t end = TimeAfter(timeout_ms); while (TimeUntil(end) > 0) { { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (packets_.size() != 0) { break; } @@ -85,7 +85,7 @@ std::unique_ptr TestClient::NextPacket(int timeout_ms) { // Return the first packet placed in the queue. std::unique_ptr packet; - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); if (packets_.size() > 0) { packet = std::move(packets_.front()); packets_.erase(packets_.begin()); @@ -149,7 +149,7 @@ void TestClient::OnPacket(AsyncPacketSocket* socket, size_t size, const SocketAddress& remote_addr, const int64_t& packet_time_us) { - CritScope cs(&crit_); + webrtc::MutexLock lock(&mutex_); packets_.push_back( std::make_unique(remote_addr, buf, size, packet_time_us)); } diff --git a/rtc_base/test_client.h b/rtc_base/test_client.h index b45cf005bb..6989fe1d57 100644 --- a/rtc_base/test_client.h +++ b/rtc_base/test_client.h @@ -16,8 +16,8 @@ #include "rtc_base/async_udp_socket.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/fake_clock.h" +#include "rtc_base/synchronization/mutex.h" namespace rtc { @@ -105,7 +105,7 @@ class TestClient : public sigslot::has_slots<> { void AdvanceTime(int ms); ThreadProcessingFakeClock* fake_clock_ = nullptr; - CriticalSection crit_; + webrtc::Mutex mutex_; std::unique_ptr socket_; std::vector> packets_; int ready_to_send_count_ = 0; diff --git a/rtc_base/test_utils.h b/rtc_base/test_utils.h index 4746e962ae..7068e73881 100644 --- a/rtc_base/test_utils.h +++ b/rtc_base/test_utils.h @@ -17,25 +17,23 @@ #include #include "rtc_base/async_socket.h" -#include "rtc_base/stream.h" #include "rtc_base/third_party/sigslot/sigslot.h" namespace webrtc { namespace testing { /////////////////////////////////////////////////////////////////////////////// -// StreamSink - Monitor asynchronously signalled events from StreamInterface -// or AsyncSocket (which should probably be a StreamInterface. +// StreamSink - Monitor asynchronously signalled events from AsyncSocket. /////////////////////////////////////////////////////////////////////////////// -// Note: Any event that is an error is treaded as SSE_ERROR instead of that +// Note: Any event that is an error is treated as SSE_ERROR instead of that // event. enum StreamSinkEvent { - SSE_OPEN = rtc::SE_OPEN, - SSE_READ = rtc::SE_READ, - SSE_WRITE = rtc::SE_WRITE, - SSE_CLOSE = rtc::SE_CLOSE, + SSE_OPEN = 1, + SSE_READ = 2, + SSE_WRITE = 4, + SSE_CLOSE = 8, SSE_ERROR = 16 }; @@ -44,24 +42,6 @@ class StreamSink : public sigslot::has_slots<> { StreamSink(); ~StreamSink() override; - void Monitor(rtc::StreamInterface* stream) { - stream->SignalEvent.connect(this, &StreamSink::OnEvent); - events_.erase(stream); - } - void Unmonitor(rtc::StreamInterface* stream) { - stream->SignalEvent.disconnect(this); - // In case you forgot to unmonitor a previous object with this address - events_.erase(stream); - } - bool Check(rtc::StreamInterface* stream, - StreamSinkEvent event, - bool reset = true) { - return DoCheck(stream, event, reset); - } - int Events(rtc::StreamInterface* stream, bool reset = true) { - return DoEvents(stream, reset); - } - void Monitor(rtc::AsyncSocket* socket) { socket->SignalConnectEvent.connect(this, &StreamSink::OnConnectEvent); socket->SignalReadEvent.connect(this, &StreamSink::OnReadEvent); @@ -82,19 +62,10 @@ class StreamSink : public sigslot::has_slots<> { bool reset = true) { return DoCheck(socket, event, reset); } - int Events(rtc::AsyncSocket* socket, bool reset = true) { - return DoEvents(socket, reset); - } private: - typedef std::map EventMap; + typedef std::map EventMap; - void OnEvent(rtc::StreamInterface* stream, int events, int error) { - if (error) { - events = SSE_ERROR; - } - AddEvents(stream, events); - } void OnConnectEvent(rtc::AsyncSocket* socket) { AddEvents(socket, SSE_OPEN); } void OnReadEvent(rtc::AsyncSocket* socket) { AddEvents(socket, SSE_READ); } void OnWriteEvent(rtc::AsyncSocket* socket) { AddEvents(socket, SSE_WRITE); } @@ -102,7 +73,7 @@ class StreamSink : public sigslot::has_slots<> { AddEvents(socket, (0 == error) ? SSE_CLOSE : SSE_ERROR); } - void AddEvents(void* obj, int events) { + void AddEvents(rtc::AsyncSocket* obj, int events) { EventMap::iterator it = events_.find(obj); if (events_.end() == it) { events_.insert(EventMap::value_type(obj, events)); @@ -110,7 +81,7 @@ class StreamSink : public sigslot::has_slots<> { it->second |= events; } } - bool DoCheck(void* obj, StreamSinkEvent event, bool reset) { + bool DoCheck(rtc::AsyncSocket* obj, StreamSinkEvent event, bool reset) { EventMap::iterator it = events_.find(obj); if ((events_.end() == it) || (0 == (it->second & event))) { return false; @@ -120,16 +91,6 @@ class StreamSink : public sigslot::has_slots<> { } return true; } - int DoEvents(void* obj, bool reset) { - EventMap::iterator it = events_.find(obj); - if (events_.end() == it) - return 0; - int events = it->second; - if (reset) { - it->second = 0; - } - return events; - } EventMap events_; }; diff --git a/rtc_base/third_party/base64/BUILD.gn b/rtc_base/third_party/base64/BUILD.gn index db03e0273d..969c7c0c64 100644 --- a/rtc_base/third_party/base64/BUILD.gn +++ b/rtc_base/third_party/base64/BUILD.gn @@ -14,5 +14,8 @@ rtc_library("base64") { "base64.cc", "base64.h", ] - deps = [ "../../system:rtc_export" ] + deps = [ + "../..:checks", + "../../system:rtc_export", + ] } diff --git a/rtc_base/third_party/base64/base64.cc b/rtc_base/third_party/base64/base64.cc index 53ff6b9d54..b9acf9a4c9 100644 --- a/rtc_base/third_party/base64/base64.cc +++ b/rtc_base/third_party/base64/base64.cc @@ -19,6 +19,8 @@ #include #include +#include "rtc_base/checks.h" + using std::vector; namespace rtc { @@ -95,7 +97,7 @@ bool Base64::IsBase64Encoded(const std::string& str) { void Base64::EncodeFromArray(const void* data, size_t len, std::string* result) { - assert(nullptr != result); + RTC_DCHECK(result); result->clear(); result->resize(((len + 2) / 3) * 4); const unsigned char* byte_data = static_cast(data); @@ -223,15 +225,15 @@ bool Base64::DecodeFromArrayTemplate(const char* data, DecodeFlags flags, T* result, size_t* data_used) { - assert(nullptr != result); - assert(flags <= (DO_PARSE_MASK | DO_PAD_MASK | DO_TERM_MASK)); + RTC_DCHECK(result); + RTC_DCHECK_LE(flags, (DO_PARSE_MASK | DO_PAD_MASK | DO_TERM_MASK)); const DecodeFlags parse_flags = flags & DO_PARSE_MASK; const DecodeFlags pad_flags = flags & DO_PAD_MASK; const DecodeFlags term_flags = flags & DO_TERM_MASK; - assert(0 != parse_flags); - assert(0 != pad_flags); - assert(0 != term_flags); + RTC_DCHECK_NE(0, parse_flags); + RTC_DCHECK_NE(0, pad_flags); + RTC_DCHECK_NE(0, term_flags); result->clear(); result->reserve(len); diff --git a/rtc_base/thread.cc b/rtc_base/thread.cc index 5e48e4b857..8ca9ce76a8 100644 --- a/rtc_base/thread.cc +++ b/rtc_base/thread.cc @@ -29,9 +29,12 @@ #include #include "absl/algorithm/container.h" +#include "api/sequence_checker.h" #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/deprecated/recursive_critical_section.h" +#include "rtc_base/event.h" +#include "rtc_base/internal/default_socket_server.h" #include "rtc_base/logging.h" #include "rtc_base/null_socket_server.h" #include "rtc_base/task_utils/to_queued_task.h" @@ -68,11 +71,9 @@ class ScopedAutoReleasePool { namespace rtc { namespace { -const int kSlowDispatchLoggingThreshold = 50; // 50 ms - class MessageHandlerWithTask final : public MessageHandler { public: - MessageHandlerWithTask() = default; + MessageHandlerWithTask() {} void OnMessage(Message* msg) override { static_cast(msg->pdata)->Run(); @@ -87,8 +88,8 @@ class MessageHandlerWithTask final : public MessageHandler { class RTC_SCOPED_LOCKABLE MarkProcessingCritScope { public: - MarkProcessingCritScope(const CriticalSection* cs, size_t* processing) - RTC_EXCLUSIVE_LOCK_FUNCTION(cs) + MarkProcessingCritScope(const RecursiveCriticalSection* cs, + size_t* processing) RTC_EXCLUSIVE_LOCK_FUNCTION(cs) : cs_(cs), processing_(processing) { cs_->Enter(); *processing_ += 1; @@ -100,7 +101,7 @@ class RTC_SCOPED_LOCKABLE MarkProcessingCritScope { } private: - const CriticalSection* const cs_; + const RecursiveCriticalSection* const cs_; size_t* processing_; RTC_DISALLOW_COPY_AND_ASSIGN(MarkProcessingCritScope); @@ -163,13 +164,16 @@ void ThreadManager::RemoveFromSendGraph(Thread* thread) { void ThreadManager::RegisterSendAndCheckForCycles(Thread* source, Thread* target) { + RTC_DCHECK(source); + RTC_DCHECK(target); + CritScope cs(&crit_); std::deque all_targets({target}); // We check the pre-existing who-sends-to-who graph for any path from target // to source. This loop is guaranteed to terminate because per the send graph // invariant, there are no cycles in the graph. - for (auto it = all_targets.begin(); it != all_targets.end(); ++it) { - const auto& targets = send_graph_[*it]; + for (size_t i = 0; i < all_targets.size(); i++) { + const auto& targets = send_graph_[all_targets[i]]; all_targets.insert(all_targets.end(), targets.begin(), targets.end()); } RTC_CHECK_EQ(absl::c_count(all_targets, source), 0) @@ -252,7 +256,7 @@ Thread* Thread::Current() { #ifndef NO_MAIN_THREAD_WRAPPING // Only autowrap the thread which instantiated the ThreadManager. if (!thread && manager->IsMainThread()) { - thread = new Thread(SocketServer::CreateDefault()); + thread = new Thread(CreateDefaultSocketServer()); thread->WrapCurrentWithThreadManager(manager, true); } #endif @@ -321,7 +325,7 @@ void rtc::ThreadManager::ChangeCurrentThreadForTest(rtc::Thread* thread) { Thread* ThreadManager::WrapCurrentThread() { Thread* result = CurrentThread(); if (nullptr == result) { - result = new Thread(SocketServer::CreateDefault()); + result = new Thread(CreateDefaultSocketServer()); result->WrapCurrentWithThreadManager(this, true); } return result; @@ -348,6 +352,35 @@ Thread::ScopedDisallowBlockingCalls::~ScopedDisallowBlockingCalls() { thread_->SetAllowBlockingCalls(previous_state_); } +#if RTC_DCHECK_IS_ON +Thread::ScopedCountBlockingCalls::ScopedCountBlockingCalls( + std::function callback) + : thread_(Thread::Current()), + base_blocking_call_count_(thread_->GetBlockingCallCount()), + base_could_be_blocking_call_count_( + thread_->GetCouldBeBlockingCallCount()), + result_callback_(std::move(callback)) {} + +Thread::ScopedCountBlockingCalls::~ScopedCountBlockingCalls() { + if (GetTotalBlockedCallCount() >= min_blocking_calls_for_callback_) { + result_callback_(GetBlockingCallCount(), GetCouldBeBlockingCallCount()); + } +} + +uint32_t Thread::ScopedCountBlockingCalls::GetBlockingCallCount() const { + return thread_->GetBlockingCallCount() - base_blocking_call_count_; +} + +uint32_t Thread::ScopedCountBlockingCalls::GetCouldBeBlockingCallCount() const { + return thread_->GetCouldBeBlockingCallCount() - + base_could_be_blocking_call_count_; +} + +uint32_t Thread::ScopedCountBlockingCalls::GetTotalBlockedCallCount() const { + return GetBlockingCallCount() + GetCouldBeBlockingCallCount(); +} +#endif + Thread::Thread(SocketServer* ss) : Thread(ss, /*do_init=*/true) {} Thread::Thread(std::unique_ptr ss) @@ -396,13 +429,11 @@ void Thread::DoDestroy() { // The signal is done from here to ensure // that it always gets called when the queue // is going away. - SignalQueueDestroyed(); - ThreadManager::Remove(this); - ClearInternal(nullptr, MQID_ANY, nullptr); - if (ss_) { ss_->SetMessageQueue(nullptr); } + ThreadManager::Remove(this); + ClearInternal(nullptr, MQID_ANY, nullptr); } SocketServer* Thread::socketserver() { @@ -675,14 +706,18 @@ void Thread::Dispatch(Message* pmsg) { TRACE_EVENT2("webrtc", "Thread::Dispatch", "src_file", pmsg->posted_from.file_name(), "src_func", pmsg->posted_from.function_name()); + RTC_DCHECK_RUN_ON(this); int64_t start_time = TimeMillis(); pmsg->phandler->OnMessage(pmsg); int64_t end_time = TimeMillis(); int64_t diff = TimeDiff(end_time, start_time); - if (diff >= kSlowDispatchLoggingThreshold) { - RTC_LOG(LS_INFO) << "Message took " << diff + if (diff >= dispatch_warning_ms_) { + RTC_LOG(LS_INFO) << "Message to " << name() << " took " << diff << "ms to dispatch. Posted from: " << pmsg->posted_from.ToString(); + // To avoid log spew, move the warning limit to only give warning + // for delays that are larger than the one observed. + dispatch_warning_ms_ = diff + 1; } } @@ -691,7 +726,7 @@ bool Thread::IsCurrent() const { } std::unique_ptr Thread::CreateWithSocketServer() { - return std::unique_ptr(new Thread(SocketServer::CreateDefault())); + return std::unique_ptr(new Thread(CreateDefaultSocketServer())); } std::unique_ptr Thread::Create() { @@ -734,6 +769,16 @@ bool Thread::SetName(const std::string& name, const void* obj) { return true; } +void Thread::SetDispatchWarningMs(int deadline) { + if (!IsCurrent()) { + PostTask(webrtc::ToQueuedTask( + [this, deadline]() { SetDispatchWarningMs(deadline); })); + return; + } + RTC_DCHECK_RUN_ON(this); + dispatch_warning_ms_ = deadline; +} + bool Thread::Start() { RTC_DCHECK(!IsRunning()); @@ -883,51 +928,75 @@ void Thread::Send(const Location& posted_from, msg.message_id = id; msg.pdata = pdata; if (IsCurrent()) { +#if RTC_DCHECK_IS_ON + RTC_DCHECK(this->IsInvokeToThreadAllowed(this)); + RTC_DCHECK_RUN_ON(this); + could_be_blocking_call_count_++; +#endif msg.phandler->OnMessage(&msg); return; } AssertBlockingIsAllowedOnCurrentThread(); - AutoThread thread; Thread* current_thread = Thread::Current(); - RTC_DCHECK(current_thread != nullptr); // AutoThread ensures this + #if RTC_DCHECK_IS_ON - ThreadManager::Instance()->RegisterSendAndCheckForCycles(current_thread, - this); + if (current_thread) { + RTC_DCHECK_RUN_ON(current_thread); + current_thread->blocking_call_count_++; + RTC_DCHECK(current_thread->IsInvokeToThreadAllowed(this)); + ThreadManager::Instance()->RegisterSendAndCheckForCycles(current_thread, + this); + } #endif + + // Perhaps down the line we can get rid of this workaround and always require + // current_thread to be valid when Send() is called. + std::unique_ptr done_event; + if (!current_thread) + done_event.reset(new rtc::Event()); + bool ready = false; - PostTask( - webrtc::ToQueuedTask([msg]() mutable { msg.phandler->OnMessage(&msg); }, - [this, &ready, current_thread] { - CritScope cs(&crit_); - ready = true; - current_thread->socketserver()->WakeUp(); - })); - - bool waited = false; - crit_.Enter(); - while (!ready) { - crit_.Leave(); - current_thread->socketserver()->Wait(kForever, false); - waited = true; + PostTask(webrtc::ToQueuedTask( + [&msg]() mutable { msg.phandler->OnMessage(&msg); }, + [this, &ready, current_thread, done = done_event.get()] { + if (current_thread) { + CritScope cs(&crit_); + ready = true; + current_thread->socketserver()->WakeUp(); + } else { + done->Set(); + } + })); + + if (current_thread) { + bool waited = false; crit_.Enter(); - } - crit_.Leave(); - - // Our Wait loop above may have consumed some WakeUp events for this - // Thread, that weren't relevant to this Send. Losing these WakeUps can - // cause problems for some SocketServers. - // - // Concrete example: - // Win32SocketServer on thread A calls Send on thread B. While processing the - // message, thread B Posts a message to A. We consume the wakeup for that - // Post while waiting for the Send to complete, which means that when we exit - // this loop, we need to issue another WakeUp, or else the Posted message - // won't be processed in a timely manner. - - if (waited) { - current_thread->socketserver()->WakeUp(); + while (!ready) { + crit_.Leave(); + current_thread->socketserver()->Wait(kForever, false); + waited = true; + crit_.Enter(); + } + crit_.Leave(); + + // Our Wait loop above may have consumed some WakeUp events for this + // Thread, that weren't relevant to this Send. Losing these WakeUps can + // cause problems for some SocketServers. + // + // Concrete example: + // Win32SocketServer on thread A calls Send on thread B. While processing + // the message, thread B Posts a message to A. We consume the wakeup for + // that Post while waiting for the Send to complete, which means that when + // we exit this loop, we need to issue another WakeUp, or else the Posted + // message won't be processed in a timely manner. + + if (waited) { + current_thread->socketserver()->WakeUp(); + } + } else { + done_event->Wait(rtc::Event::kForever); } } @@ -974,6 +1043,61 @@ void Thread::QueuedTaskHandler::OnMessage(Message* msg) { task.release(); } +void Thread::AllowInvokesToThread(Thread* thread) { +#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) + if (!IsCurrent()) { + PostTask(webrtc::ToQueuedTask( + [thread, this]() { AllowInvokesToThread(thread); })); + return; + } + RTC_DCHECK_RUN_ON(this); + allowed_threads_.push_back(thread); + invoke_policy_enabled_ = true; +#endif +} + +void Thread::DisallowAllInvokes() { +#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) + if (!IsCurrent()) { + PostTask(webrtc::ToQueuedTask([this]() { DisallowAllInvokes(); })); + return; + } + RTC_DCHECK_RUN_ON(this); + allowed_threads_.clear(); + invoke_policy_enabled_ = true; +#endif +} + +#if RTC_DCHECK_IS_ON +uint32_t Thread::GetBlockingCallCount() const { + RTC_DCHECK_RUN_ON(this); + return blocking_call_count_; +} +uint32_t Thread::GetCouldBeBlockingCallCount() const { + RTC_DCHECK_RUN_ON(this); + return could_be_blocking_call_count_; +} +#endif + +// Returns true if no policies added or if there is at least one policy +// that permits invocation to |target| thread. +bool Thread::IsInvokeToThreadAllowed(rtc::Thread* target) { +#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) + RTC_DCHECK_RUN_ON(this); + if (!invoke_policy_enabled_) { + return true; + } + for (const auto* thread : allowed_threads_) { + if (thread == target) { + return true; + } + } + return false; +#else + return true; +#endif +} + void Thread::PostTask(std::unique_ptr task) { // Though Post takes MessageData by raw pointer (last parameter), it still // takes it with ownership. @@ -1071,7 +1195,7 @@ MessageHandler* Thread::GetPostTaskMessageHandler() { } AutoThread::AutoThread() - : Thread(SocketServer::CreateDefault(), /*do_init=*/false) { + : Thread(CreateDefaultSocketServer(), /*do_init=*/false) { if (!ThreadManager::Instance()->CurrentThread()) { // DoInit registers with ThreadManager. Do that only if we intend to // be rtc::Thread::Current(), otherwise ProcessAllMessageQueuesInternal will diff --git a/rtc_base/thread.h b/rtc_base/thread.h index e25ed4ea8c..6e68f1a679 100644 --- a/rtc_base/thread.h +++ b/rtc_base/thread.h @@ -29,7 +29,7 @@ #include "api/task_queue/queued_task.h" #include "api/task_queue/task_queue_base.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/deprecated/recursive_critical_section.h" #include "rtc_base/location.h" #include "rtc_base/message_handler.h" #include "rtc_base/platform_thread_types.h" @@ -42,6 +42,35 @@ #include "rtc_base/win32.h" #endif +#if RTC_DCHECK_IS_ON +// Counts how many blocking Thread::Invoke or Thread::Send calls are made from +// within a scope and logs the number of blocking calls at the end of the scope. +#define RTC_LOG_THREAD_BLOCK_COUNT() \ + rtc::Thread::ScopedCountBlockingCalls blocked_call_count_printer( \ + [func = __func__](uint32_t actual_block, uint32_t could_block) { \ + auto total = actual_block + could_block; \ + if (total) { \ + RTC_LOG(LS_WARNING) << "Blocking " << func << ": total=" << total \ + << " (actual=" << actual_block \ + << ", could=" << could_block << ")"; \ + } \ + }) + +// Adds an RTC_DCHECK_LE that checks that the number of blocking calls are +// less than or equal to a specific value. Use to avoid regressing in the +// number of blocking thread calls. +// Note: Use of this macro, requires RTC_LOG_THREAD_BLOCK_COUNT() to be called +// first. +#define RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(x) \ + do { \ + blocked_call_count_printer.set_minimum_call_count_for_callback(x + 1); \ + RTC_DCHECK_LE(blocked_call_count_printer.GetTotalBlockedCallCount(), x); \ + } while (0) +#else +#define RTC_LOG_THREAD_BLOCK_COUNT() +#define RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(x) +#endif + namespace rtc { class Thread; @@ -82,9 +111,6 @@ class RTC_EXPORT ThreadManager { static void Remove(Thread* message_queue); static void Clear(MessageHandler* handler); - // TODO(nisse): Delete alias, as soon as downstream code is updated. - static void ProcessAllMessageQueues() { ProcessAllMessageQueuesForTesting(); } - // For testing purposes, for use with a simulated clock. // Ensures that all message queues have processed delayed messages // up until the current point in time. @@ -140,7 +166,7 @@ class RTC_EXPORT ThreadManager { // Methods that don't modify the list of message queues may be called in a // re-entrant fashion. "processing_" keeps track of the depth of re-entrant // calls. - CriticalSection crit_; + RecursiveCriticalSection crit_; size_t processing_ RTC_GUARDED_BY(crit_) = 0; #if RTC_DCHECK_IS_ON // Represents all thread seand actions by storing all send targets per thread. @@ -215,6 +241,39 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { const bool previous_state_; }; +#if RTC_DCHECK_IS_ON + class ScopedCountBlockingCalls { + public: + ScopedCountBlockingCalls(std::function callback); + ScopedCountBlockingCalls(const ScopedDisallowBlockingCalls&) = delete; + ScopedCountBlockingCalls& operator=(const ScopedDisallowBlockingCalls&) = + delete; + ~ScopedCountBlockingCalls(); + + uint32_t GetBlockingCallCount() const; + uint32_t GetCouldBeBlockingCallCount() const; + uint32_t GetTotalBlockedCallCount() const; + + void set_minimum_call_count_for_callback(uint32_t minimum) { + min_blocking_calls_for_callback_ = minimum; + } + + private: + Thread* const thread_; + const uint32_t base_blocking_call_count_; + const uint32_t base_could_be_blocking_call_count_; + // The minimum number of blocking calls required in order to issue the + // result_callback_. This is used by RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN to + // tame log spam. + // By default we always issue the callback, regardless of callback count. + uint32_t min_blocking_calls_for_callback_ = 0; + std::function result_callback_; + }; + + uint32_t GetBlockingCallCount() const; + uint32_t GetCouldBeBlockingCallCount() const; +#endif + SocketServer* socketserver(); // Note: The behavior of Thread has changed. When a thread is stopped, @@ -277,10 +336,6 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { } } - // When this signal is sent out, any references to this queue should - // no longer be used. - sigslot::signal0<> SignalQueueDestroyed; - bool IsCurrent() const; // Sleeps the calling thread for the specified number of milliseconds, during @@ -293,6 +348,11 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { const std::string& name() const { return name_; } bool SetName(const std::string& name, const void* obj); + // Sets the expected processing time in ms. The thread will write + // log messages when Invoke() takes more time than this. + // Default is 50 ms. + void SetDispatchWarningMs(int deadline); + // Starts the execution of the thread. bool Start(); @@ -338,6 +398,19 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { InvokeInternal(posted_from, functor); } + // Allows invoke to specified |thread|. Thread never will be dereferenced and + // will be used only for reference-based comparison, so instance can be safely + // deleted. If NDEBUG is defined and DCHECK_ALWAYS_ON is undefined do nothing. + void AllowInvokesToThread(Thread* thread); + + // If NDEBUG is defined and DCHECK_ALWAYS_ON is undefined do nothing. + void DisallowAllInvokes(); + // Returns true if |target| was allowed by AllowInvokesToThread() or if no + // calls were made to AllowInvokesToThread and DisallowAllInvokes. Otherwise + // returns false. + // If NDEBUG is defined and DCHECK_ALWAYS_ON is undefined always returns true. + bool IsInvokeToThreadAllowed(rtc::Thread* target); + // Posts a task to invoke the functor on |this| thread asynchronously, i.e. // without blocking the thread that invoked PostTask(). Ownership of |functor| // is passed and (usually, see below) destroyed on |this| thread after it is @@ -428,13 +501,6 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { // irrevocable. Must be called on this thread. void DisallowBlockingCalls() { SetAllowBlockingCalls(false); } -#ifdef WEBRTC_ANDROID - // Sets the per-thread allow-blocking-calls flag to true, sidestepping the - // invariants upheld by DisallowBlockingCalls() and - // ScopedDisallowBlockingCalls. Must be called on this thread. - void DEPRECATED_AllowBlockingCalls() { SetAllowBlockingCalls(true); } -#endif - protected: class CurrentThreadSetter : CurrentTaskQueueSetter { public: @@ -519,11 +585,14 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { friend class ScopedDisallowBlockingCalls; - CriticalSection* CritForTest() { return &crit_; } + RecursiveCriticalSection* CritForTest() { return &crit_; } private: + static const int kSlowDispatchLoggingThreshold = 50; // 50 ms + class QueuedTaskHandler final : public MessageHandler { public: + QueuedTaskHandler() {} void OnMessage(Message* msg) override; }; @@ -566,7 +635,13 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { MessageList messages_ RTC_GUARDED_BY(crit_); PriorityQueue delayed_messages_ RTC_GUARDED_BY(crit_); uint32_t delayed_next_num_ RTC_GUARDED_BY(crit_); - CriticalSection crit_; +#if RTC_DCHECK_IS_ON + uint32_t blocking_call_count_ RTC_GUARDED_BY(this) = 0; + uint32_t could_be_blocking_call_count_ RTC_GUARDED_BY(this) = 0; + std::vector allowed_threads_ RTC_GUARDED_BY(this); + bool invoke_policy_enabled_ RTC_GUARDED_BY(this) = false; +#endif + RecursiveCriticalSection crit_; bool fInitialized_; bool fDestroyed_; @@ -606,13 +681,17 @@ class RTC_LOCKABLE RTC_EXPORT Thread : public webrtc::TaskQueueBase { friend class ThreadManager; + int dispatch_warning_ms_ RTC_GUARDED_BY(this) = kSlowDispatchLoggingThreshold; + RTC_DISALLOW_COPY_AND_ASSIGN(Thread); }; // AutoThread automatically installs itself at construction // uninstalls at destruction, if a Thread object is // _not already_ associated with the current OS thread. - +// +// NOTE: *** This class should only be used by tests *** +// class AutoThread : public Thread { public: AutoThread(); diff --git a/rtc_base/thread_checker.h b/rtc_base/thread_checker.h deleted file mode 100644 index 876a08e38c..0000000000 --- a/rtc_base/thread_checker.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// Borrowed from Chromium's src/base/threading/thread_checker.h. - -#ifndef RTC_BASE_THREAD_CHECKER_H_ -#define RTC_BASE_THREAD_CHECKER_H_ - -#include "rtc_base/deprecation.h" -#include "rtc_base/synchronization/sequence_checker.h" - -namespace rtc { -// TODO(srte): Replace usages of this with SequenceChecker. -class ThreadChecker : public webrtc::SequenceChecker { - public: - RTC_DEPRECATED bool CalledOnValidThread() const { return IsCurrent(); } - RTC_DEPRECATED void DetachFromThread() { Detach(); } -}; -} // namespace rtc -#endif // RTC_BASE_THREAD_CHECKER_H_ diff --git a/rtc_base/thread_checker_unittest.cc b/rtc_base/thread_checker_unittest.cc deleted file mode 100644 index b5927043f0..0000000000 --- a/rtc_base/thread_checker_unittest.cc +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -// Borrowed from Chromium's src/base/threading/thread_checker_unittest.cc. - -#include "rtc_base/thread_checker.h" - -#include -#include - -#include "rtc_base/checks.h" -#include "rtc_base/constructor_magic.h" -#include "rtc_base/null_socket_server.h" -#include "rtc_base/socket_server.h" -#include "rtc_base/task_queue.h" -#include "rtc_base/thread.h" -#include "test/gtest.h" - -// Duplicated from base/threading/thread_checker.h so that we can be -// good citizens there and undef the macro. -#define ENABLE_THREAD_CHECKER RTC_DCHECK_IS_ON - -namespace rtc { - -namespace { - -// Simple class to exercise the basics of ThreadChecker. -// Both the destructor and DoStuff should verify that they were -// called on the same thread as the constructor. -class ThreadCheckerClass : public ThreadChecker { - public: - ThreadCheckerClass() {} - - // Verifies that it was called on the same thread as the constructor. - void DoStuff() { RTC_DCHECK(IsCurrent()); } - - void Detach() { ThreadChecker::Detach(); } - - static void MethodOnDifferentThreadImpl(); - static void DetachThenCallFromDifferentThreadImpl(); - - private: - RTC_DISALLOW_COPY_AND_ASSIGN(ThreadCheckerClass); -}; - -// Calls ThreadCheckerClass::DoStuff on another thread. -class CallDoStuffOnThread : public Thread { - public: - explicit CallDoStuffOnThread(ThreadCheckerClass* thread_checker_class) - : Thread(std::unique_ptr(new rtc::NullSocketServer())), - thread_checker_class_(thread_checker_class) { - SetName("call_do_stuff_on_thread", nullptr); - } - - void Run() override { thread_checker_class_->DoStuff(); } - - // New method. Needed since Thread::Join is protected, and it is called by - // the TEST. - void Join() { Thread::Join(); } - - private: - ThreadCheckerClass* thread_checker_class_; - - RTC_DISALLOW_COPY_AND_ASSIGN(CallDoStuffOnThread); -}; - -// Deletes ThreadCheckerClass on a different thread. -class DeleteThreadCheckerClassOnThread : public Thread { - public: - explicit DeleteThreadCheckerClassOnThread( - std::unique_ptr thread_checker_class) - : Thread(std::unique_ptr(new rtc::NullSocketServer())), - thread_checker_class_(std::move(thread_checker_class)) { - SetName("delete_thread_checker_class_on_thread", nullptr); - } - - void Run() override { thread_checker_class_.reset(); } - - // New method. Needed since Thread::Join is protected, and it is called by - // the TEST. - void Join() { Thread::Join(); } - - bool has_been_deleted() const { return !thread_checker_class_; } - - private: - std::unique_ptr thread_checker_class_; - - RTC_DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread); -}; - -} // namespace - -TEST(ThreadCheckerTest, CallsAllowedOnSameThread) { - std::unique_ptr thread_checker_class( - new ThreadCheckerClass); - - // Verify that DoStuff doesn't assert. - thread_checker_class->DoStuff(); - - // Verify that the destructor doesn't assert. - thread_checker_class.reset(); -} - -TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) { - std::unique_ptr thread_checker_class( - new ThreadCheckerClass); - - // Verify that the destructor doesn't assert - // when called on a different thread. - DeleteThreadCheckerClassOnThread delete_on_thread( - std::move(thread_checker_class)); - - EXPECT_FALSE(delete_on_thread.has_been_deleted()); - - delete_on_thread.Start(); - delete_on_thread.Join(); - - EXPECT_TRUE(delete_on_thread.has_been_deleted()); -} - -TEST(ThreadCheckerTest, Detach) { - std::unique_ptr thread_checker_class( - new ThreadCheckerClass); - - // Verify that DoStuff doesn't assert when called on a different thread after - // a call to Detach. - thread_checker_class->Detach(); - CallDoStuffOnThread call_on_thread(thread_checker_class.get()); - - call_on_thread.Start(); - call_on_thread.Join(); -} - -#if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER - -void ThreadCheckerClass::MethodOnDifferentThreadImpl() { - std::unique_ptr thread_checker_class( - new ThreadCheckerClass); - - // DoStuff should assert in debug builds only when called on a - // different thread. - CallDoStuffOnThread call_on_thread(thread_checker_class.get()); - - call_on_thread.Start(); - call_on_thread.Join(); -} - -#if ENABLE_THREAD_CHECKER -TEST(ThreadCheckerDeathTest, MethodNotAllowedOnDifferentThreadInDebug) { - ASSERT_DEATH({ ThreadCheckerClass::MethodOnDifferentThreadImpl(); }, ""); -} -#else -TEST(ThreadCheckerTest, MethodAllowedOnDifferentThreadInRelease) { - ThreadCheckerClass::MethodOnDifferentThreadImpl(); -} -#endif // ENABLE_THREAD_CHECKER - -void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() { - std::unique_ptr thread_checker_class( - new ThreadCheckerClass); - - // DoStuff doesn't assert when called on a different thread - // after a call to Detach. - thread_checker_class->Detach(); - CallDoStuffOnThread call_on_thread(thread_checker_class.get()); - - call_on_thread.Start(); - call_on_thread.Join(); - - // DoStuff should assert in debug builds only after moving to - // another thread. - thread_checker_class->DoStuff(); -} - -#if ENABLE_THREAD_CHECKER -TEST(ThreadCheckerDeathTest, DetachFromThreadInDebug) { - ASSERT_DEATH({ ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl(); }, - ""); -} -#else -TEST(ThreadCheckerTest, DetachFromThreadInRelease) { - ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl(); -} -#endif // ENABLE_THREAD_CHECKER - -#endif // GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER - -class ThreadAnnotateTest { - public: - // Next two function should create warnings when compile (e.g. if used with - // specific T). - // TODO(danilchap): Find a way to test they do not compile when thread - // annotation checks enabled. - template - void access_var_no_annotate() { - var_thread_ = 42; - } - - template - void access_fun_no_annotate() { - function(); - } - - // Functions below should be able to compile. - void access_var_annotate_thread() { - RTC_DCHECK_RUN_ON(thread_); - var_thread_ = 42; - } - - void access_var_annotate_checker() { - RTC_DCHECK_RUN_ON(&checker_); - var_checker_ = 44; - } - - void access_var_annotate_queue() { - RTC_DCHECK_RUN_ON(queue_); - var_queue_ = 46; - } - - void access_fun_annotate() { - RTC_DCHECK_RUN_ON(thread_); - function(); - } - - void access_fun_and_var() { - RTC_DCHECK_RUN_ON(thread_); - fun_acccess_var(); - } - - private: - void function() RTC_RUN_ON(thread_) {} - void fun_acccess_var() RTC_RUN_ON(thread_) { var_thread_ = 13; } - - rtc::Thread* thread_; - rtc::ThreadChecker checker_; - rtc::TaskQueue* queue_; - - int var_thread_ RTC_GUARDED_BY(thread_); - int var_checker_ RTC_GUARDED_BY(checker_); - int var_queue_ RTC_GUARDED_BY(queue_); -}; - -// Just in case we ever get lumped together with other compilation units. -#undef ENABLE_THREAD_CHECKER - -} // namespace rtc diff --git a/rtc_base/thread_unittest.cc b/rtc_base/thread_unittest.cc index e1011f4119..789bdd943e 100644 --- a/rtc_base/thread_unittest.cc +++ b/rtc_base/thread_unittest.cc @@ -19,15 +19,18 @@ #include "rtc_base/atomic_ops.h" #include "rtc_base/event.h" #include "rtc_base/gunit.h" +#include "rtc_base/internal/default_socket_server.h" #include "rtc_base/null_socket_server.h" #include "rtc_base/physical_socket_server.h" #include "rtc_base/socket_address.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "test/testsupport/rtc_expect_death.h" #if defined(WEBRTC_WIN) #include // NOLINT + #endif namespace rtc { @@ -94,7 +97,7 @@ class SocketClient : public TestGenerator, public sigslot::has_slots<> { }; // Receives messages and sends on a socket. -class MessageClient : public MessageHandler, public TestGenerator { +class MessageClient : public MessageHandlerAutoCleanup, public TestGenerator { public: MessageClient(Thread* pth, Socket* socket) : socket_(socket) {} @@ -161,17 +164,17 @@ class AtomicBool { public: explicit AtomicBool(bool value = false) : flag_(value) {} AtomicBool& operator=(bool value) { - CritScope scoped_lock(&cs_); + webrtc::MutexLock scoped_lock(&mutex_); flag_ = value; return *this; } bool get() const { - CritScope scoped_lock(&cs_); + webrtc::MutexLock scoped_lock(&mutex_); return flag_; } private: - CriticalSection cs_; + mutable webrtc::Mutex mutex_; bool flag_; }; @@ -253,6 +256,81 @@ TEST(ThreadTest, DISABLED_Main) { EXPECT_EQ(55, sock_client.last); } +TEST(ThreadTest, CountBlockingCalls) { + // When the test runs, this will print out: + // (thread_unittest.cc:262): Blocking TestBody: total=2 (actual=1, could=1) + RTC_LOG_THREAD_BLOCK_COUNT(); +#if RTC_DCHECK_IS_ON + rtc::Thread* current = rtc::Thread::Current(); + ASSERT_TRUE(current); + rtc::Thread::ScopedCountBlockingCalls blocked_calls( + [&](uint32_t actual_block, uint32_t could_block) { + EXPECT_EQ(1u, actual_block); + EXPECT_EQ(1u, could_block); + }); + + EXPECT_EQ(0u, blocked_calls.GetBlockingCallCount()); + EXPECT_EQ(0u, blocked_calls.GetCouldBeBlockingCallCount()); + EXPECT_EQ(0u, blocked_calls.GetTotalBlockedCallCount()); + + // Test invoking on the current thread. This should not count as an 'actual' + // invoke, but should still count as an invoke that could block since we + // that the call to Invoke serves a purpose in some configurations (and should + // not be used a general way to call methods on the same thread). + current->Invoke(RTC_FROM_HERE, []() {}); + EXPECT_EQ(0u, blocked_calls.GetBlockingCallCount()); + EXPECT_EQ(1u, blocked_calls.GetCouldBeBlockingCallCount()); + EXPECT_EQ(1u, blocked_calls.GetTotalBlockedCallCount()); + + // Create a new thread to invoke on. + auto thread = Thread::CreateWithSocketServer(); + thread->Start(); + EXPECT_EQ(42, thread->Invoke(RTC_FROM_HERE, []() { return 42; })); + EXPECT_EQ(1u, blocked_calls.GetBlockingCallCount()); + EXPECT_EQ(1u, blocked_calls.GetCouldBeBlockingCallCount()); + EXPECT_EQ(2u, blocked_calls.GetTotalBlockedCallCount()); + thread->Stop(); + RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(2); +#else + RTC_DCHECK_BLOCK_COUNT_NO_MORE_THAN(0); + RTC_LOG(LS_INFO) << "Test not active in this config"; +#endif +} + +#if RTC_DCHECK_IS_ON +TEST(ThreadTest, CountBlockingCallsOneCallback) { + rtc::Thread* current = rtc::Thread::Current(); + ASSERT_TRUE(current); + bool was_called_back = false; + { + rtc::Thread::ScopedCountBlockingCalls blocked_calls( + [&](uint32_t actual_block, uint32_t could_block) { + was_called_back = true; + }); + current->Invoke(RTC_FROM_HERE, []() {}); + } + EXPECT_TRUE(was_called_back); +} + +TEST(ThreadTest, CountBlockingCallsSkipCallback) { + rtc::Thread* current = rtc::Thread::Current(); + ASSERT_TRUE(current); + bool was_called_back = false; + { + rtc::Thread::ScopedCountBlockingCalls blocked_calls( + [&](uint32_t actual_block, uint32_t could_block) { + was_called_back = true; + }); + // Changed `blocked_calls` to not issue the callback if there are 1 or + // fewer blocking calls (i.e. we set the minimum required number to 2). + blocked_calls.set_minimum_call_count_for_callback(2); + current->Invoke(RTC_FROM_HERE, []() {}); + } + // We should not have gotten a call back. + EXPECT_FALSE(was_called_back); +} +#endif + // Test that setting thread names doesn't cause a malfunction. // There's no easy way to verify the name was set properly at this time. TEST(ThreadTest, Names) { @@ -288,6 +366,63 @@ TEST(ThreadTest, Wrap) { ThreadManager::Instance()->SetCurrentThread(current_thread); } +#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) +TEST(ThreadTest, InvokeToThreadAllowedReturnsTrueWithoutPolicies) { + // Create and start the thread. + auto thread1 = Thread::CreateWithSocketServer(); + auto thread2 = Thread::CreateWithSocketServer(); + + thread1->PostTask(ToQueuedTask( + [&]() { EXPECT_TRUE(thread1->IsInvokeToThreadAllowed(thread2.get())); })); + Thread* th_main = Thread::Current(); + th_main->ProcessMessages(100); +} + +TEST(ThreadTest, InvokeAllowedWhenThreadsAdded) { + // Create and start the thread. + auto thread1 = Thread::CreateWithSocketServer(); + auto thread2 = Thread::CreateWithSocketServer(); + auto thread3 = Thread::CreateWithSocketServer(); + auto thread4 = Thread::CreateWithSocketServer(); + + thread1->AllowInvokesToThread(thread2.get()); + thread1->AllowInvokesToThread(thread3.get()); + + thread1->PostTask(ToQueuedTask([&]() { + EXPECT_TRUE(thread1->IsInvokeToThreadAllowed(thread2.get())); + EXPECT_TRUE(thread1->IsInvokeToThreadAllowed(thread3.get())); + EXPECT_FALSE(thread1->IsInvokeToThreadAllowed(thread4.get())); + })); + Thread* th_main = Thread::Current(); + th_main->ProcessMessages(100); +} + +TEST(ThreadTest, InvokesDisallowedWhenDisallowAllInvokes) { + // Create and start the thread. + auto thread1 = Thread::CreateWithSocketServer(); + auto thread2 = Thread::CreateWithSocketServer(); + + thread1->DisallowAllInvokes(); + + thread1->PostTask(ToQueuedTask([&]() { + EXPECT_FALSE(thread1->IsInvokeToThreadAllowed(thread2.get())); + })); + Thread* th_main = Thread::Current(); + th_main->ProcessMessages(100); +} +#endif // (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) + +TEST(ThreadTest, InvokesAllowedByDefault) { + // Create and start the thread. + auto thread1 = Thread::CreateWithSocketServer(); + auto thread2 = Thread::CreateWithSocketServer(); + + thread1->PostTask(ToQueuedTask( + [&]() { EXPECT_TRUE(thread1->IsInvokeToThreadAllowed(thread2.get())); })); + Thread* th_main = Thread::Current(); + th_main->ProcessMessages(100); +} + TEST(ThreadTest, Invoke) { // Create and start the thread. auto thread = Thread::CreateWithSocketServer(); @@ -356,24 +491,24 @@ TEST(ThreadTest, ThreeThreadsInvoke) { explicit LockedBool(bool value) : value_(value) {} void Set(bool value) { - CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); value_ = value; } bool Get() { - CritScope lock(&crit_); + webrtc::MutexLock lock(&mutex_); return value_; } private: - CriticalSection crit_; - bool value_ RTC_GUARDED_BY(crit_); + webrtc::Mutex mutex_; + bool value_ RTC_GUARDED_BY(mutex_); }; struct LocalFuncs { static void Set(LockedBool* out) { out->Set(true); } static void InvokeSet(Thread* thread, LockedBool* out) { - thread->Invoke(RTC_FROM_HERE, Bind(&Set, out)); + thread->Invoke(RTC_FROM_HERE, [out] { Set(out); }); } // Set |out| true and call InvokeSet on |thread|. @@ -386,68 +521,41 @@ TEST(ThreadTest, ThreeThreadsInvoke) { // Asynchronously invoke SetAndInvokeSet on |thread1| and wait until // |thread1| starts the call. - static void AsyncInvokeSetAndWait(AsyncInvoker* invoker, + static void AsyncInvokeSetAndWait(DEPRECATED_AsyncInvoker* invoker, Thread* thread1, Thread* thread2, LockedBool* out) { - CriticalSection crit; LockedBool async_invoked(false); invoker->AsyncInvoke( - RTC_FROM_HERE, thread1, - Bind(&SetAndInvokeSet, &async_invoked, thread2, out)); + RTC_FROM_HERE, thread1, [&async_invoked, thread2, out] { + SetAndInvokeSet(&async_invoked, thread2, out); + }); EXPECT_TRUE_WAIT(async_invoked.Get(), 2000); } }; - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; LockedBool thread_a_called(false); // Start the sequence A --(invoke)--> B --(async invoke)--> C --(invoke)--> A. // Thread B returns when C receives the call and C should be blocked until A // starts to process messages. - thread_b->Invoke(RTC_FROM_HERE, - Bind(&LocalFuncs::AsyncInvokeSetAndWait, &invoker, - thread_c.get(), thread_a, &thread_a_called)); + Thread* thread_c_ptr = thread_c.get(); + thread_b->Invoke( + RTC_FROM_HERE, [&invoker, thread_c_ptr, thread_a, &thread_a_called] { + LocalFuncs::AsyncInvokeSetAndWait(&invoker, thread_c_ptr, thread_a, + &thread_a_called); + }); EXPECT_FALSE(thread_a_called.Get()); EXPECT_TRUE_WAIT(thread_a_called.Get(), 2000); } -// Set the name on a thread when the underlying QueueDestroyed signal is -// triggered. This causes an error if the object is already partially -// destroyed. -class SetNameOnSignalQueueDestroyedTester : public sigslot::has_slots<> { - public: - SetNameOnSignalQueueDestroyedTester(Thread* thread) : thread_(thread) { - thread->SignalQueueDestroyed.connect( - this, &SetNameOnSignalQueueDestroyedTester::OnQueueDestroyed); - } - - void OnQueueDestroyed() { - // Makes sure that if we access the Thread while it's being destroyed, that - // it doesn't cause a problem because the vtable has been modified. - thread_->SetName("foo", nullptr); - } - - private: - Thread* thread_; -}; - -TEST(ThreadTest, SetNameOnSignalQueueDestroyed) { - auto thread1 = Thread::CreateWithSocketServer(); - SetNameOnSignalQueueDestroyedTester tester1(thread1.get()); - thread1.reset(); - - Thread* thread2 = new AutoThread(); - SetNameOnSignalQueueDestroyedTester tester2(thread2); - delete thread2; -} - class ThreadQueueTest : public ::testing::Test, public Thread { public: - ThreadQueueTest() : Thread(SocketServer::CreateDefault(), true) {} + ThreadQueueTest() : Thread(CreateDefaultSocketServer(), true) {} bool IsLocked_Worker() { if (!CritForTest()->TryEnter()) { return true; @@ -460,8 +568,8 @@ class ThreadQueueTest : public ::testing::Test, public Thread { // succeed, since our critical sections are reentrant. std::unique_ptr worker(Thread::CreateWithSocketServer()); worker->Start(); - return worker->Invoke( - RTC_FROM_HERE, rtc::Bind(&ThreadQueueTest::IsLocked_Worker, this)); + return worker->Invoke(RTC_FROM_HERE, + [this] { return IsLocked_Worker(); }); } }; @@ -497,7 +605,7 @@ static void DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder(Thread* q) { } TEST_F(ThreadQueueTest, DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder) { - Thread q(SocketServer::CreateDefault(), true); + Thread q(CreateDefaultSocketServer(), true); DelayedPostsWithIdenticalTimesAreProcessedInFifoOrder(&q); NullSocketServer nullss; @@ -516,7 +624,7 @@ TEST_F(ThreadQueueTest, DisposeNotLocked) { EXPECT_FALSE(was_locked); } -class DeletedMessageHandler : public MessageHandler { +class DeletedMessageHandler : public MessageHandlerAutoCleanup { public: explicit DeletedMessageHandler(bool* deleted) : deleted_(deleted) {} ~DeletedMessageHandler() override { *deleted_ = true; } @@ -606,12 +714,13 @@ TEST(ThreadManager, ProcessAllMessageQueuesWithClearedQueue) { ThreadManager::ProcessAllMessageQueuesForTesting(); } -class RefCountedHandler : public MessageHandler, public rtc::RefCountInterface { +class RefCountedHandler : public MessageHandlerAutoCleanup, + public rtc::RefCountInterface { public: void OnMessage(Message* msg) override {} }; -class EmptyHandler : public MessageHandler { +class EmptyHandler : public MessageHandlerAutoCleanup { public: void OnMessage(Message* msg) override {} }; @@ -652,7 +761,7 @@ class AsyncInvokeTest : public ::testing::Test { }; TEST_F(AsyncInvokeTest, FireAndForget) { - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; // Create and start the thread. auto thread = Thread::CreateWithSocketServer(); thread->Start(); @@ -664,7 +773,7 @@ TEST_F(AsyncInvokeTest, FireAndForget) { } TEST_F(AsyncInvokeTest, NonCopyableFunctor) { - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; // Create and start the thread. auto thread = Thread::CreateWithSocketServer(); thread->Start(); @@ -695,7 +804,7 @@ TEST_F(AsyncInvokeTest, KillInvokerDuringExecute) { EXPECT_FALSE(invoker_destroyed); functor_finished.Set(); }; - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; invoker.AsyncInvoke(RTC_FROM_HERE, thread.get(), functor); functor_started.Wait(Event::kForever); @@ -724,7 +833,7 @@ TEST_F(AsyncInvokeTest, KillInvokerDuringExecuteWithReentrantInvoke) { Thread thread(std::make_unique()); thread.Start(); { - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; auto reentrant_functor = [&reentrant_functor_run] { reentrant_functor_run = true; }; @@ -743,7 +852,7 @@ TEST_F(AsyncInvokeTest, KillInvokerDuringExecuteWithReentrantInvoke) { } TEST_F(AsyncInvokeTest, Flush) { - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; AtomicBool flag1; AtomicBool flag2; // Queue two async calls to the current thread. @@ -759,7 +868,7 @@ TEST_F(AsyncInvokeTest, Flush) { } TEST_F(AsyncInvokeTest, FlushWithIds) { - AsyncInvoker invoker; + DEPRECATED_AsyncInvoker invoker; AtomicBool flag1; AtomicBool flag2; // Queue two async calls to the current thread, one with a message id. @@ -780,110 +889,6 @@ TEST_F(AsyncInvokeTest, FlushWithIds) { EXPECT_TRUE(flag2.get()); } -class GuardedAsyncInvokeTest : public ::testing::Test { - public: - void IntCallback(int value) { - EXPECT_EQ(expected_thread_, Thread::Current()); - int_value_ = value; - } - void SetExpectedThreadForIntCallback(Thread* thread) { - expected_thread_ = thread; - } - - protected: - constexpr static int kWaitTimeout = 1000; - GuardedAsyncInvokeTest() : int_value_(0), expected_thread_(nullptr) {} - - int int_value_; - Thread* expected_thread_; -}; - -// Functor for creating an invoker. -struct CreateInvoker { - CreateInvoker(std::unique_ptr* invoker) - : invoker_(invoker) {} - void operator()() { invoker_->reset(new GuardedAsyncInvoker()); } - std::unique_ptr* invoker_; -}; - -// Test that we can call AsyncInvoke() after the thread died. -TEST_F(GuardedAsyncInvokeTest, KillThreadFireAndForget) { - // Create and start the thread. - std::unique_ptr thread(Thread::Create()); - thread->Start(); - std::unique_ptr invoker; - // Create the invoker on |thread|. - thread->Invoke(RTC_FROM_HERE, CreateInvoker(&invoker)); - // Kill |thread|. - thread = nullptr; - // Try calling functor. - AtomicBool called; - EXPECT_FALSE(invoker->AsyncInvoke(RTC_FROM_HERE, FunctorB(&called))); - // With thread gone, nothing should happen. - WAIT(called.get(), kWaitTimeout); - EXPECT_FALSE(called.get()); -} - -// The remaining tests check that GuardedAsyncInvoker behaves as AsyncInvoker -// when Thread is still alive. -TEST_F(GuardedAsyncInvokeTest, FireAndForget) { - GuardedAsyncInvoker invoker; - // Try calling functor. - AtomicBool called; - EXPECT_TRUE(invoker.AsyncInvoke(RTC_FROM_HERE, FunctorB(&called))); - EXPECT_TRUE_WAIT(called.get(), kWaitTimeout); -} - -TEST_F(GuardedAsyncInvokeTest, NonCopyableFunctor) { - GuardedAsyncInvoker invoker; - // Try calling functor. - AtomicBool called; - EXPECT_TRUE(invoker.AsyncInvoke(RTC_FROM_HERE, FunctorD(&called))); - EXPECT_TRUE_WAIT(called.get(), kWaitTimeout); -} - -TEST_F(GuardedAsyncInvokeTest, Flush) { - GuardedAsyncInvoker invoker; - AtomicBool flag1; - AtomicBool flag2; - // Queue two async calls to the current thread. - EXPECT_TRUE(invoker.AsyncInvoke(RTC_FROM_HERE, FunctorB(&flag1))); - EXPECT_TRUE(invoker.AsyncInvoke(RTC_FROM_HERE, FunctorB(&flag2))); - // Because we haven't pumped messages, these should not have run yet. - EXPECT_FALSE(flag1.get()); - EXPECT_FALSE(flag2.get()); - // Force them to run now. - EXPECT_TRUE(invoker.Flush()); - EXPECT_TRUE(flag1.get()); - EXPECT_TRUE(flag2.get()); -} - -TEST_F(GuardedAsyncInvokeTest, FlushWithIds) { - GuardedAsyncInvoker invoker; - AtomicBool flag1; - AtomicBool flag2; - // Queue two async calls to the current thread, one with a message id. - EXPECT_TRUE(invoker.AsyncInvoke(RTC_FROM_HERE, FunctorB(&flag1), 5)); - EXPECT_TRUE(invoker.AsyncInvoke(RTC_FROM_HERE, FunctorB(&flag2))); - // Because we haven't pumped messages, these should not have run yet. - EXPECT_FALSE(flag1.get()); - EXPECT_FALSE(flag2.get()); - // Execute pending calls with id == 5. - EXPECT_TRUE(invoker.Flush(5)); - EXPECT_TRUE(flag1.get()); - EXPECT_FALSE(flag2.get()); - flag1 = false; - // Execute all pending calls. The id == 5 call should not execute again. - EXPECT_TRUE(invoker.Flush()); - EXPECT_FALSE(flag1.get()); - EXPECT_TRUE(flag2.get()); -} - -void ThreadIsCurrent(Thread* thread, bool* result, Event* event) { - *result = thread->IsCurrent(); - event->Set(); -} - void WaitAndSetEvent(Event* wait_event, Event* set_event) { wait_event->Wait(Event::kForever); set_event->Set(); @@ -948,15 +953,6 @@ class DestructionFunctor { bool was_invoked_ = false; }; -TEST(ThreadPostTaskTest, InvokesWithBind) { - std::unique_ptr background_thread(rtc::Thread::Create()); - background_thread->Start(); - - Event event; - background_thread->PostTask(RTC_FROM_HERE, Bind(&Event::Set, &event)); - event.Wait(Event::kForever); -} - TEST(ThreadPostTaskTest, InvokesWithLambda) { std::unique_ptr background_thread(rtc::Thread::Create()); background_thread->Start(); @@ -1059,9 +1055,13 @@ TEST(ThreadPostTaskTest, InvokesOnBackgroundThread) { Event event; bool was_invoked_on_background_thread = false; - background_thread->PostTask(RTC_FROM_HERE, - Bind(&ThreadIsCurrent, background_thread.get(), - &was_invoked_on_background_thread, &event)); + Thread* background_thread_ptr = background_thread.get(); + background_thread->PostTask( + RTC_FROM_HERE, + [background_thread_ptr, &was_invoked_on_background_thread, &event] { + was_invoked_on_background_thread = background_thread_ptr->IsCurrent(); + event.Set(); + }); event.Wait(Event::kForever); EXPECT_TRUE(was_invoked_on_background_thread); @@ -1075,9 +1075,10 @@ TEST(ThreadPostTaskTest, InvokesAsynchronously) { // thread. The second event ensures that the message is processed. Event event_set_by_test_thread; Event event_set_by_background_thread; - background_thread->PostTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &event_set_by_test_thread, - &event_set_by_background_thread)); + background_thread->PostTask(RTC_FROM_HERE, [&event_set_by_test_thread, + &event_set_by_background_thread] { + WaitAndSetEvent(&event_set_by_test_thread, &event_set_by_background_thread); + }); event_set_by_test_thread.Set(); event_set_by_background_thread.Wait(Event::kForever); } @@ -1091,12 +1092,12 @@ TEST(ThreadPostTaskTest, InvokesInPostedOrder) { Event third; Event fourth; - background_thread->PostTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &first, &second)); - background_thread->PostTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &second, &third)); - background_thread->PostTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &third, &fourth)); + background_thread->PostTask( + RTC_FROM_HERE, [&first, &second] { WaitAndSetEvent(&first, &second); }); + background_thread->PostTask( + RTC_FROM_HERE, [&second, &third] { WaitAndSetEvent(&second, &third); }); + background_thread->PostTask( + RTC_FROM_HERE, [&third, &fourth] { WaitAndSetEvent(&third, &fourth); }); // All tasks have been posted before the first one is unblocked. first.Set(); @@ -1114,8 +1115,10 @@ TEST(ThreadPostDelayedTaskTest, InvokesAsynchronously) { Event event_set_by_background_thread; background_thread->PostDelayedTask( RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &event_set_by_test_thread, - &event_set_by_background_thread), + [&event_set_by_test_thread, &event_set_by_background_thread] { + WaitAndSetEvent(&event_set_by_test_thread, + &event_set_by_background_thread); + }, /*milliseconds=*/10); event_set_by_test_thread.Set(); event_set_by_background_thread.Wait(Event::kForever); @@ -1131,15 +1134,15 @@ TEST(ThreadPostDelayedTaskTest, InvokesInDelayOrder) { Event third; Event fourth; - background_thread->PostDelayedTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &third, &fourth), - /*milliseconds=*/11); - background_thread->PostDelayedTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &first, &second), - /*milliseconds=*/9); - background_thread->PostDelayedTask(RTC_FROM_HERE, - Bind(&WaitAndSetEvent, &second, &third), - /*milliseconds=*/10); + background_thread->PostDelayedTask( + RTC_FROM_HERE, [&third, &fourth] { WaitAndSetEvent(&third, &fourth); }, + /*milliseconds=*/11); + background_thread->PostDelayedTask( + RTC_FROM_HERE, [&first, &second] { WaitAndSetEvent(&first, &second); }, + /*milliseconds=*/9); + background_thread->PostDelayedTask( + RTC_FROM_HERE, [&second, &third] { WaitAndSetEvent(&second, &third); }, + /*milliseconds=*/10); // All tasks have been posted before the first one is unblocked. first.Set(); diff --git a/rtc_base/time/BUILD.gn b/rtc_base/time/BUILD.gn index e13ccd35ee..9a1d99b610 100644 --- a/rtc_base/time/BUILD.gn +++ b/rtc_base/time/BUILD.gn @@ -17,5 +17,4 @@ rtc_library("timestamp_extrapolator") { "timestamp_extrapolator.cc", "timestamp_extrapolator.h", ] - deps = [ "../synchronization:rw_lock_wrapper" ] } diff --git a/rtc_base/time/timestamp_extrapolator.cc b/rtc_base/time/timestamp_extrapolator.cc index bf9f726c42..99445284dc 100644 --- a/rtc_base/time/timestamp_extrapolator.cc +++ b/rtc_base/time/timestamp_extrapolator.cc @@ -15,8 +15,7 @@ namespace webrtc { TimestampExtrapolator::TimestampExtrapolator(int64_t start_ms) - : _rwLock(RWLockWrapper::CreateRWLock()), - _startMs(0), + : _startMs(0), _firstTimestamp(0), _wrapArounds(0), _prevUnwrappedTimestamp(-1), @@ -34,12 +33,7 @@ TimestampExtrapolator::TimestampExtrapolator(int64_t start_ms) Reset(start_ms); } -TimestampExtrapolator::~TimestampExtrapolator() { - delete _rwLock; -} - void TimestampExtrapolator::Reset(int64_t start_ms) { - WriteLockScoped wl(*_rwLock); _startMs = start_ms; _prevMs = _startMs; _firstTimestamp = 0; @@ -58,13 +52,10 @@ void TimestampExtrapolator::Reset(int64_t start_ms) { } void TimestampExtrapolator::Update(int64_t tMs, uint32_t ts90khz) { - _rwLock->AcquireLockExclusive(); if (tMs - _prevMs > 10e3) { // Ten seconds without a complete frame. // Reset the extrapolator - _rwLock->ReleaseLockExclusive(); Reset(tMs); - _rwLock->AcquireLockExclusive(); } else { _prevMs = tMs; } @@ -100,7 +91,6 @@ void TimestampExtrapolator::Update(int64_t tMs, uint32_t ts90khz) { if (_prevUnwrappedTimestamp >= 0 && unwrapped_ts90khz < _prevUnwrappedTimestamp) { // Drop reordered frames. - _rwLock->ReleaseLockExclusive(); return; } @@ -131,11 +121,9 @@ void TimestampExtrapolator::Update(int64_t tMs, uint32_t ts90khz) { if (_packetCount < _startUpFilterDelayInPackets) { _packetCount++; } - _rwLock->ReleaseLockExclusive(); } int64_t TimestampExtrapolator::ExtrapolateLocalTime(uint32_t timestamp90khz) { - ReadLockScoped rl(*_rwLock); int64_t localTimeMs = 0; CheckForWrapArounds(timestamp90khz); double unwrapped_ts90khz = diff --git a/rtc_base/time/timestamp_extrapolator.h b/rtc_base/time/timestamp_extrapolator.h index 63af57b227..b325d2cbaa 100644 --- a/rtc_base/time/timestamp_extrapolator.h +++ b/rtc_base/time/timestamp_extrapolator.h @@ -13,14 +13,12 @@ #include -#include "rtc_base/synchronization/rw_lock_wrapper.h" - namespace webrtc { +// Not thread safe. class TimestampExtrapolator { public: explicit TimestampExtrapolator(int64_t start_ms); - ~TimestampExtrapolator(); void Update(int64_t tMs, uint32_t ts90khz); int64_t ExtrapolateLocalTime(uint32_t timestamp90khz); void Reset(int64_t start_ms); @@ -28,7 +26,6 @@ class TimestampExtrapolator { private: void CheckForWrapArounds(uint32_t ts90khz); bool DelayChangeDetection(double error); - RWLockWrapper* _rwLock; double _w[2]; double _pP[2][2]; int64_t _startMs; diff --git a/rtc_base/time_utils.cc b/rtc_base/time_utils.cc index 8d919262d3..fe63d3a8ed 100644 --- a/rtc_base/time_utils.cc +++ b/rtc_base/time_utils.cc @@ -12,23 +12,15 @@ #if defined(WEBRTC_POSIX) #include -#if defined(WEBRTC_MAC) -#include -#endif #endif #if defined(WEBRTC_WIN) -// clang-format off -// clang formatting would put last, -// which leads to compilation failure. -#include -#include #include -// clang-format on #endif #include "rtc_base/checks.h" #include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/system_time.h" #include "rtc_base/time_utils.h" namespace rtc { @@ -141,61 +133,12 @@ void SyncWithNtp(int64_t time_from_ntp_server_ms) { TimeHelper::SyncWithNtp(time_from_ntp_server_ms); } -#endif // defined(WINUWP) - -int64_t SystemTimeNanos() { - int64_t ticks; -#if defined(WEBRTC_MAC) - static mach_timebase_info_data_t timebase; - if (timebase.denom == 0) { - // Get the timebase if this is the first time we run. - // Recommended by Apple's QA1398. - if (mach_timebase_info(&timebase) != KERN_SUCCESS) { - RTC_NOTREACHED(); - } - } - // Use timebase to convert absolute time tick units into nanoseconds. - const auto mul = [](uint64_t a, uint32_t b) -> int64_t { - RTC_DCHECK_NE(b, 0); - RTC_DCHECK_LE(a, std::numeric_limits::max() / b) - << "The multiplication " << a << " * " << b << " overflows"; - return rtc::dchecked_cast(a * b); - }; - ticks = mul(mach_absolute_time(), timebase.numer) / timebase.denom; -#elif defined(WEBRTC_POSIX) - struct timespec ts; - // TODO(deadbeef): Do we need to handle the case when CLOCK_MONOTONIC is not - // supported? - clock_gettime(CLOCK_MONOTONIC, &ts); - ticks = kNumNanosecsPerSec * static_cast(ts.tv_sec) + - static_cast(ts.tv_nsec); -#elif defined(WINUWP) - ticks = TimeHelper::TicksNs(); -#elif defined(WEBRTC_WIN) - static volatile LONG last_timegettime = 0; - static volatile int64_t num_wrap_timegettime = 0; - volatile LONG* last_timegettime_ptr = &last_timegettime; - DWORD now = timeGetTime(); - // Atomically update the last gotten time - DWORD old = InterlockedExchange(last_timegettime_ptr, now); - if (now < old) { - // If now is earlier than old, there may have been a race between threads. - // 0x0fffffff ~3.1 days, the code will not take that long to execute - // so it must have been a wrap around. - if (old > 0xf0000000 && now < 0x0fffffff) { - num_wrap_timegettime++; - } - } - ticks = now + (num_wrap_timegettime << 32); - // TODO(deadbeef): Calculate with nanosecond precision. Otherwise, we're - // just wasting a multiply and divide when doing Time() on Windows. - ticks = ticks * kNumNanosecsPerMillisec; -#else -#error Unsupported platform. -#endif - return ticks; +int64_t WinUwpSystemTimeNanos() { + return TimeHelper::TicksNs(); } +#endif // defined(WINUWP) + int64_t SystemTimeMillis() { return static_cast(SystemTimeNanos() / kNumNanosecsPerMillisec); } @@ -247,7 +190,7 @@ int64_t TimestampWrapAroundHandler::Unwrap(uint32_t ts) { ++num_wrap_; } else if ((ts - last_ts_) > 0xf0000000) { // Backwards wrap. Unwrap with last wrap count and don't update last_ts_. - return ts + ((num_wrap_ - 1) << 32); + return ts + (num_wrap_ - 1) * (int64_t{1} << 32); } last_ts_ = ts; diff --git a/rtc_base/time_utils.h b/rtc_base/time_utils.h index 147ab8daf8..de3c58c815 100644 --- a/rtc_base/time_utils.h +++ b/rtc_base/time_utils.h @@ -16,6 +16,7 @@ #include "rtc_base/checks.h" #include "rtc_base/system/rtc_export.h" +#include "rtc_base/system_time.h" namespace rtc { @@ -61,11 +62,16 @@ RTC_EXPORT ClockInterface* GetClockForTesting(); // Synchronizes the current clock based upon an NTP server's epoch in // milliseconds. void SyncWithNtp(int64_t time_from_ntp_server_ms); + +// Returns the current time in nanoseconds. The clock is synchonized with the +// system wall clock time upon instatiation. It may also be synchronized using +// the SyncWithNtp() function above. Please note that the clock will most likely +// drift away from the system wall clock time as time goes by. +int64_t WinUwpSystemTimeNanos(); #endif // defined(WINUWP) // Returns the actual system time, even if a clock is set for testing. // Useful for timeouts while using a test clock, or for logging. -int64_t SystemTimeNanos(); int64_t SystemTimeMillis(); // Returns the current time in milliseconds in 32 bits. diff --git a/rtc_base/unique_id_generator.cc b/rtc_base/unique_id_generator.cc index d41fa8d186..9fa3021c6f 100644 --- a/rtc_base/unique_id_generator.cc +++ b/rtc_base/unique_id_generator.cc @@ -26,6 +26,8 @@ UniqueRandomIdGenerator::UniqueRandomIdGenerator(ArrayView known_ids) UniqueRandomIdGenerator::~UniqueRandomIdGenerator() = default; uint32_t UniqueRandomIdGenerator::GenerateId() { + webrtc::MutexLock lock(&mutex_); + RTC_CHECK_LT(known_ids_.size(), std::numeric_limits::max() - 1); while (true) { auto pair = known_ids_.insert(CreateRandomNonZeroId()); @@ -36,6 +38,7 @@ uint32_t UniqueRandomIdGenerator::GenerateId() { } bool UniqueRandomIdGenerator::AddKnownId(uint32_t value) { + webrtc::MutexLock lock(&mutex_); return known_ids_.insert(value).second; } diff --git a/rtc_base/unique_id_generator.h b/rtc_base/unique_id_generator.h index 836dc70b61..22398fd3f2 100644 --- a/rtc_base/unique_id_generator.h +++ b/rtc_base/unique_id_generator.h @@ -16,6 +16,9 @@ #include #include "api/array_view.h" +#include "api/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" namespace rtc { @@ -47,9 +50,10 @@ class UniqueNumberGenerator { bool AddKnownId(TIntegral value); private: + RTC_NO_UNIQUE_ADDRESS webrtc::SequenceChecker sequence_checker_; static_assert(std::is_integral::value, "Must be integral type."); - TIntegral counter_; - std::set known_ids_; + TIntegral counter_ RTC_GUARDED_BY(sequence_checker_); + std::set known_ids_ RTC_GUARDED_BY(sequence_checker_); }; // This class will generate unique ids. Ids are 32 bit unsigned integers. @@ -76,7 +80,10 @@ class UniqueRandomIdGenerator { bool AddKnownId(uint32_t value); private: - std::set known_ids_; + // TODO(bugs.webrtc.org/12666): This lock is needed due to an instance in + // SdpOfferAnswerHandler being shared between threads. + webrtc::Mutex mutex_; + std::set known_ids_ RTC_GUARDED_BY(&mutex_); }; // This class will generate strings. A common use case is for identifiers. @@ -104,18 +111,23 @@ class UniqueStringGenerator { }; template -UniqueNumberGenerator::UniqueNumberGenerator() : counter_(0) {} +UniqueNumberGenerator::UniqueNumberGenerator() : counter_(0) { + sequence_checker_.Detach(); +} template UniqueNumberGenerator::UniqueNumberGenerator( ArrayView known_ids) - : counter_(0), known_ids_(known_ids.begin(), known_ids.end()) {} + : counter_(0), known_ids_(known_ids.begin(), known_ids.end()) { + sequence_checker_.Detach(); +} template UniqueNumberGenerator::~UniqueNumberGenerator() {} template TIntegral UniqueNumberGenerator::GenerateNumber() { + RTC_DCHECK_RUN_ON(&sequence_checker_); while (true) { RTC_CHECK_LT(counter_, std::numeric_limits::max()); auto pair = known_ids_.insert(counter_++); @@ -127,6 +139,7 @@ TIntegral UniqueNumberGenerator::GenerateNumber() { template bool UniqueNumberGenerator::AddKnownId(TIntegral value) { + RTC_DCHECK_RUN_ON(&sequence_checker_); return known_ids_.insert(value).second; } } // namespace rtc diff --git a/rtc_base/unique_id_generator_unittest.cc b/rtc_base/unique_id_generator_unittest.cc index 868b348b11..835a57e162 100644 --- a/rtc_base/unique_id_generator_unittest.cc +++ b/rtc_base/unique_id_generator_unittest.cc @@ -15,6 +15,7 @@ #include "absl/algorithm/container.h" #include "api/array_view.h" +#include "api/task_queue/task_queue_base.h" #include "rtc_base/gunit.h" #include "rtc_base/helpers.h" #include "test/gmock.h" @@ -23,6 +24,21 @@ using ::testing::IsEmpty; using ::testing::Test; namespace rtc { +namespace { +// Utility class that registers itself as the currently active task queue. +class FakeTaskQueue : public webrtc::TaskQueueBase { + public: + FakeTaskQueue() : task_queue_setter_(this) {} + + void Delete() override {} + void PostTask(std::unique_ptr task) override {} + void PostDelayedTask(std::unique_ptr task, + uint32_t milliseconds) override {} + + private: + CurrentTaskQueueSetter task_queue_setter_; +}; +} // namespace template class UniqueIdGeneratorTest : public Test {}; @@ -148,4 +164,39 @@ TYPED_TEST(UniqueIdGeneratorTest, EXPECT_FALSE(generator2.AddKnownId(id)); } +// Tests that it's OK to construct the generator in one execution environment +// (thread/task queue) but use it in another. +TEST(UniqueNumberGenerator, UsedOnSecondaryThread) { + const auto* current_tq = webrtc::TaskQueueBase::Current(); + // Construct the generator before `fake_task_queue` to ensure that it is + // constructed in a different execution environment than what + // `fake_task_queue` will represent. + UniqueNumberGenerator generator; + + FakeTaskQueue fake_task_queue; + // Sanity check to make sure we're in a different runtime environment. + ASSERT_NE(current_tq, webrtc::TaskQueueBase::Current()); + + // Generating an id should be fine in this context. + generator.GenerateNumber(); +} + +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +TEST(UniqueNumberGeneratorDeathTest, FailsWhenUsedInWrongContext) { + // Instantiate the generator before the `loop`. This ensures that + // thread/sequence checkers will pick up a different thread environment than + // `fake_task_queue` will represent. + UniqueNumberGenerator generator; + // Generate an ID on the current thread. This causes the generator to attach + // to the current thread context. + generator.GenerateNumber(); + + // Instantiate a fake task queue that will register itself as the current tq. + FakeTaskQueue fake_task_queue; + + // Attempting to generate an id should now trigger a dcheck. + EXPECT_DEATH(generator.GenerateNumber(), ""); +} +#endif + } // namespace rtc diff --git a/rtc_base/untyped_function.h b/rtc_base/untyped_function.h new file mode 100644 index 0000000000..c1f59458b9 --- /dev/null +++ b/rtc_base/untyped_function.h @@ -0,0 +1,324 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_UNTYPED_FUNCTION_H_ +#define RTC_BASE_UNTYPED_FUNCTION_H_ + +#include +#include +#include +#include +#include + +#include "rtc_base/system/assume.h" + +namespace webrtc { +namespace webrtc_function_impl { + +using FunVoid = void(); + +// Inline storage size is this many machine words. +enum : size_t { kInlineStorageWords = 4 }; + +union VoidUnion { + void* void_ptr; + FunVoid* fun_ptr; + typename std::aligned_storage::type + inline_storage; +}; + +// Returns the number of elements of the `inline_storage` array required to +// store an object of type T. +template +constexpr size_t InlineStorageSize() { + // sizeof(T) / sizeof(uintptr_t), but rounded up. + return (sizeof(T) + sizeof(uintptr_t) - 1) / sizeof(uintptr_t); +} + +template +struct CallHelpers; +template +struct CallHelpers { + // Return type of the three helpers below. + using return_type = RetT; + // Complete function type of the three helpers below. + using function_type = RetT(VoidUnion*, ArgT...); + // Helper for calling the `void_ptr` case of VoidUnion. + template + static RetT CallVoidPtr(VoidUnion* vu, ArgT... args) { + return (*static_cast(vu->void_ptr))(std::forward(args)...); + } + // Helper for calling the `fun_ptr` case of VoidUnion. + static RetT CallFunPtr(VoidUnion* vu, ArgT... args) { + return (reinterpret_cast(vu->fun_ptr))( + std::forward(args)...); + } + // Helper for calling the `inline_storage` case of VoidUnion. + template + static RetT CallInlineStorage(VoidUnion* vu, ArgT... args) { + return (*reinterpret_cast(&vu->inline_storage))( + std::forward(args)...); + } +}; + +} // namespace webrtc_function_impl + +// A class that holds (and owns) any callable. The same function call signature +// must be provided when constructing and calling the object. +// +// The point of not having the call signature as a class template parameter is +// to have one single concrete type for all signatures; this reduces binary +// size. +class UntypedFunction final { + public: + // Callables of at most this size can be stored inline, if they are trivial. + // (Useful in tests and benchmarks; avoid using this in production code.) + enum : size_t { + kInlineStorageSize = sizeof(webrtc_function_impl::VoidUnion::inline_storage) + }; + static_assert(kInlineStorageSize == + webrtc_function_impl::kInlineStorageWords * + sizeof(uintptr_t), + ""); + + // The *UntypedFunctionArgs structs are used to transfer arguments from + // PrepareArgs() to Create(). They are trivial, but may own heap allocations, + // so make sure to pass them to Create() exactly once! + // + // The point of doing Create(PrepareArgs(foo)) instead of just Create(foo) is + // to separate the code that has to be inlined (PrepareArgs) from the code + // that can be noninlined (Create); the *UntypedFunctionArgs types are + // designed to efficiently carry the required information from one to the + // other. + template + struct TrivialUntypedFunctionArgs { + static_assert(N >= 1, ""); + static_assert(N <= webrtc_function_impl::kInlineStorageWords, ""); + // We use an uintptr_t array here instead of std::aligned_storage, because + // the former can be efficiently passed in registers when using + // TrivialUntypedFunctionArgs as a function argument. (We can't do the same + // in VoidUnion, because std::aligned_storage but not uintptr_t can be + // legally reinterpret_casted to arbitrary types. + // TrivialUntypedFunctionArgs, on the other hand, only needs to handle + // placement new and memcpy.) + alignas(std::max_align_t) uintptr_t inline_storage[N]; + webrtc_function_impl::FunVoid* call; + }; + struct NontrivialUntypedFunctionArgs { + void* void_ptr; + webrtc_function_impl::FunVoid* call; + void (*del)(webrtc_function_impl::VoidUnion*); + }; + struct FunctionPointerUntypedFunctionArgs { + webrtc_function_impl::FunVoid* fun_ptr; + webrtc_function_impl::FunVoid* call; + }; + + // Create function for lambdas and other callables that are trivial and small; + // it accepts every type of argument except those noted in its enable_if call. + template < + typename Signature, + typename F, + typename F_deref = typename std::remove_reference::type, + typename std::enable_if< + // Not for function pointers; we have another overload for that below. + !std::is_function< + typename std::remove_pointer::type>::value && + + // Not for nullptr; we have a constructor for that below. + !std::is_same::type>::value && + + // Not for UntypedFunction objects; use move construction or + // assignment. + !std::is_same::type>::value && + + // Only for trivial callables that will fit in inline storage. + std::is_trivially_move_constructible::value && + std::is_trivially_destructible::value && + sizeof(F_deref) <= kInlineStorageSize>::type* = nullptr, + size_t InlineSize = webrtc_function_impl::InlineStorageSize()> + static TrivialUntypedFunctionArgs PrepareArgs(F&& f) { + // The callable is trivial and small enough, so we just store its bytes + // in the inline storage. + TrivialUntypedFunctionArgs args; + new (&args.inline_storage) F_deref(std::forward(f)); + args.call = reinterpret_cast( + webrtc_function_impl::CallHelpers< + Signature>::template CallInlineStorage); + return args; + } + template + static UntypedFunction Create(TrivialUntypedFunctionArgs args) { + webrtc_function_impl::VoidUnion vu; + std::memcpy(&vu.inline_storage, args.inline_storage, + sizeof(args.inline_storage)); + return UntypedFunction(vu, args.call, nullptr); + } + + // Create function for lambdas and other callables that are nontrivial or + // large; it accepts every type of argument except those noted in its + // enable_if call. + template ::type, + typename std::enable_if< + // Not for function pointers; we have another overload for that + // below. + !std::is_function< + typename std::remove_pointer::type>::value && + + // Not for nullptr; we have a constructor for that below. + !std::is_same::type>::value && + + // Not for UntypedFunction objects; use move construction or + // assignment. + !std::is_same::type>::value && + + // Only for nontrivial callables, or callables that won't fit in + // inline storage. + !(std::is_trivially_move_constructible::value && + std::is_trivially_destructible::value && + sizeof(F_deref) <= kInlineStorageSize)>::type* = nullptr> + static NontrivialUntypedFunctionArgs PrepareArgs(F&& f) { + // The callable is either nontrivial or too large, so we can't keep it + // in the inline storage; use the heap instead. + NontrivialUntypedFunctionArgs args; + args.void_ptr = new F_deref(std::forward(f)); + args.call = reinterpret_cast( + webrtc_function_impl::CallHelpers::template CallVoidPtr< + F_deref>); + args.del = static_cast( + [](webrtc_function_impl::VoidUnion* vu) { + // Assuming that this pointer isn't null allows the + // compiler to eliminate a null check in the (inlined) + // delete operation. + RTC_ASSUME(vu->void_ptr != nullptr); + delete reinterpret_cast(vu->void_ptr); + }); + return args; + } + static UntypedFunction Create(NontrivialUntypedFunctionArgs args) { + webrtc_function_impl::VoidUnion vu; + vu.void_ptr = args.void_ptr; + return UntypedFunction(vu, args.call, args.del); + } + + // Create function that accepts function pointers. If the argument is null, + // the result is an empty UntypedFunction. + template + static FunctionPointerUntypedFunctionArgs PrepareArgs(Signature* f) { + FunctionPointerUntypedFunctionArgs args; + args.fun_ptr = reinterpret_cast(f); + args.call = reinterpret_cast( + webrtc_function_impl::CallHelpers::CallFunPtr); + return args; + } + static UntypedFunction Create(FunctionPointerUntypedFunctionArgs args) { + webrtc_function_impl::VoidUnion vu; + vu.fun_ptr = args.fun_ptr; + return UntypedFunction(vu, args.fun_ptr == nullptr ? nullptr : args.call, + nullptr); + } + + // Prepares arguments and creates an UntypedFunction in one go. + template + static UntypedFunction Create(F&& f) { + return Create(PrepareArgs(std::forward(f))); + } + + // Default constructor. Creates an empty UntypedFunction. + UntypedFunction() : call_(nullptr), delete_(nullptr) {} + + // Nullptr constructor and assignment. Creates an empty UntypedFunction. + UntypedFunction(std::nullptr_t) // NOLINT(runtime/explicit) + : call_(nullptr), delete_(nullptr) {} + UntypedFunction& operator=(std::nullptr_t) { + call_ = nullptr; + if (delete_) { + delete_(&f_); + delete_ = nullptr; + } + return *this; + } + + // Not copyable. + UntypedFunction(const UntypedFunction&) = delete; + UntypedFunction& operator=(const UntypedFunction&) = delete; + + // Move construction and assignment. + UntypedFunction(UntypedFunction&& other) + : f_(other.f_), call_(other.call_), delete_(other.delete_) { + other.delete_ = nullptr; + } + UntypedFunction& operator=(UntypedFunction&& other) { + if (delete_) { + delete_(&f_); + } + f_ = other.f_; + call_ = other.call_; + delete_ = other.delete_; + other.delete_ = nullptr; + return *this; + } + + ~UntypedFunction() { + if (delete_) { + delete_(&f_); + } + } + + friend void swap(UntypedFunction& a, UntypedFunction& b) { + using std::swap; + swap(a.f_, b.f_); + swap(a.call_, b.call_); + swap(a.delete_, b.delete_); + } + + // Returns true if we have a function, false if we don't (i.e., we're null). + explicit operator bool() const { return call_ != nullptr; } + + template + typename webrtc_function_impl::CallHelpers::return_type Call( + ArgT&&... args) { + return reinterpret_cast< + typename webrtc_function_impl::CallHelpers::function_type*>( + call_)(&f_, std::forward(args)...); + } + + // Returns true iff we don't need to call a destructor. This is guaranteed + // to hold for a moved-from object. + bool IsTriviallyDestructible() { return delete_ == nullptr; } + + private: + UntypedFunction(webrtc_function_impl::VoidUnion f, + webrtc_function_impl::FunVoid* call, + void (*del)(webrtc_function_impl::VoidUnion*)) + : f_(f), call_(call), delete_(del) {} + + // The callable thing, or a pointer to it. + webrtc_function_impl::VoidUnion f_; + + // Pointer to a dispatch function that knows the type of the callable thing + // that's stored in f_, and how to call it. An UntypedFunction object is empty + // (null) iff call_ is null. + webrtc_function_impl::FunVoid* call_; + + // Pointer to a function that knows how to delete the callable thing that's + // stored in f_. Null if `f_` is trivially deletable. + void (*delete_)(webrtc_function_impl::VoidUnion*); +}; + +} // namespace webrtc + +#endif // RTC_BASE_UNTYPED_FUNCTION_H_ diff --git a/rtc_base/untyped_function_unittest.cc b/rtc_base/untyped_function_unittest.cc new file mode 100644 index 0000000000..8ea26e7a43 --- /dev/null +++ b/rtc_base/untyped_function_unittest.cc @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/untyped_function.h" + +#include +#include + +#include "test/gmock.h" +#include "test/gtest.h" + +namespace webrtc { +namespace { + +using ::testing::Pointee; + +TEST(UntypedFunction, Empty1) { + UntypedFunction uf; + EXPECT_FALSE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); +} + +TEST(UntypedFunction, Empty2) { + UntypedFunction uf = nullptr; + EXPECT_FALSE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); +} + +TEST(UntypedFunction, Empty3) { + UntypedFunction uf = UntypedFunction::Create(nullptr); + EXPECT_FALSE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); +} + +TEST(UntypedFunction, CallTrivialWithInt) { + auto uf = UntypedFunction::Create([](int x) { return x + 5; }); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(17), 22); +} + +TEST(UntypedFunction, CallTrivialWithPointer) { + auto uf = UntypedFunction::Create([](int* x) { return *x; }); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + int x = 12; + EXPECT_EQ(uf.Call(&x), 12); +} + +TEST(UntypedFunction, CallTrivialWithReference) { + auto uf = UntypedFunction::Create([](int& x) { x = 3; }); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + int x = 12; + uf.Call(x); + EXPECT_EQ(x, 3); +} + +TEST(UntypedFunction, CallTrivialWithRvalueReference) { + auto uf = UntypedFunction::Create([](int&& x) { return x - 2; }); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(34), 32); +} + +TEST(UntypedFunction, CallNontrivialWithInt) { + std::vector list; + auto uf = UntypedFunction::Create([list](int x) mutable { + list.push_back(x); + return list.size(); + }); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(17), 1); + EXPECT_EQ(uf.Call(17), 2); +} + +TEST(UntypedFunction, CallNontrivialWithPointer) { + std::vector list; + auto uf = UntypedFunction::Create([list](int* x) mutable { + list.push_back(*x); + return list.data(); + }); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + int x = 12; + EXPECT_THAT(uf.Call(&x), Pointee(12)); +} + +TEST(UntypedFunction, CallNontrivialWithReference) { + std::vector list = {34, 35, 36}; + auto uf = + UntypedFunction::Create([list](int& x) { x = list[1]; }); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + int x = 12; + uf.Call(x); + EXPECT_EQ(x, 35); +} + +TEST(UntypedFunction, CallNontrivialWithRvalueReference) { + std::vector list; + auto uf = UntypedFunction::Create([list](int&& x) mutable { + list.push_back(x); + return list.size(); + }); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(34), 1); + EXPECT_EQ(uf.Call(34), 2); +} + +int AddFive(int x) { + return x + 5; +} +int DereferencePointer(int* x) { + return *x; +} +void AssignThree(int& x) { + x = 3; +} +int SubtractTwo(int&& x) { + return x - 2; +} + +TEST(UntypedFunction, CallFunctionPointerWithInt) { + auto uf = UntypedFunction::Create(AddFive); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(17), 22); +} + +TEST(UntypedFunction, CallFunctionPointerWithPointer) { + auto uf = UntypedFunction::Create(DereferencePointer); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + int x = 12; + EXPECT_EQ(uf.Call(&x), 12); +} + +TEST(UntypedFunction, CallFunctionPointerWithReference) { + auto uf = UntypedFunction::Create(AssignThree); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + int x = 12; + uf.Call(x); + EXPECT_EQ(x, 3); +} + +TEST(UntypedFunction, CallFunctionPointerWithRvalueReference) { + auto uf = UntypedFunction::Create(SubtractTwo); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(34), 32); +} + +TEST(UntypedFunction, CallTrivialWithNoArgs) { + int arr[] = {1, 2, 3}; + static_assert(sizeof(arr) <= UntypedFunction::kInlineStorageSize, ""); + auto uf = UntypedFunction::Create([arr] { return arr[1]; }); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(), 2); +} + +TEST(UntypedFunction, CallLargeTrivialWithNoArgs) { + int arr[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0}; + static_assert(sizeof(arr) > UntypedFunction::kInlineStorageSize, ""); + auto uf = UntypedFunction::Create([arr] { return arr[4]; }); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(), 5); +} + +TEST(UntypedFunction, MoveonlyReturnValue) { + auto uf = UntypedFunction::Create()>( + [] { return std::make_unique(567); }); + EXPECT_THAT(uf.Call()>(), Pointee(567)); +} + +TEST(UntypedFunction, MoveonlyArgument) { + auto uf = UntypedFunction::Create)>( + [](std::unique_ptr x) { return *x + 19; }); + EXPECT_EQ(uf.Call)>(std::make_unique(40)), 59); +} + +TEST(UntypedFunction, MoveOnlyCallable) { + auto uf = UntypedFunction::Create( + [x = std::make_unique(17)] { return ++*x; }); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(), 18); + EXPECT_EQ(uf.Call(), 19); + UntypedFunction uf2 = std::move(uf); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_FALSE(uf2.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(), 20); + EXPECT_EQ(uf.Call(), 21); +} + +class Destroyer { + public: + explicit Destroyer(int& destroy_count) : destroy_count_(&destroy_count) {} + ~Destroyer() { ++*destroy_count_; } + int operator()() { return 72; } + int* destroy_count_; +}; + +TEST(UntypedFunction, CallableIsDestroyed) { + int destroy_count = 0; + { + auto uf = UntypedFunction::Create(Destroyer(destroy_count)); + // Destruction count is 1 here, because the temporary we created above was + // destroyed. + EXPECT_EQ(destroy_count, 1); + { + auto uf2 = std::move(uf); + EXPECT_EQ(destroy_count, 1); + } + // `uf2` was destroyed. + EXPECT_EQ(destroy_count, 2); + } + // `uf` was destroyed, but it didn't contain a Destroyer since we moved it to + // `uf2` above. + EXPECT_EQ(destroy_count, 2); +} + +TEST(UntypedFunction, MoveAssign) { + int destroy_count = 0; + auto uf = UntypedFunction::Create(Destroyer(destroy_count)); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + // Destruction count is 1 here, because the temporary we created above was + // destroyed. + EXPECT_EQ(destroy_count, 1); + UntypedFunction uf2 = nullptr; + EXPECT_FALSE(uf2); + EXPECT_TRUE(uf2.IsTriviallyDestructible()); + + uf2 = std::move(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_TRUE(uf2); + EXPECT_FALSE(uf2.IsTriviallyDestructible()); + EXPECT_EQ(destroy_count, 1); // The callable was not destroyed. + EXPECT_EQ(uf2.Call(), 72); + + UntypedFunction uf3 = nullptr; + uf2 = std::move(uf3); + EXPECT_FALSE(uf2); + EXPECT_TRUE(uf2.IsTriviallyDestructible()); + EXPECT_EQ(destroy_count, 2); // The callable was destroyed by the assignment. +} + +TEST(UntypedFunction, NullptrAssign) { + int destroy_count = 0; + auto uf = UntypedFunction::Create(Destroyer(destroy_count)); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + // Destruction count is 1 here, because the temporary we created above was + // destroyed. + EXPECT_EQ(destroy_count, 1); + + uf = nullptr; + EXPECT_FALSE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_EQ(destroy_count, 2); // The callable was destroyed by the assignment. +} + +TEST(UntypedFunction, Swap) { + int x = 13; + auto uf = UntypedFunction::Create([x]() mutable { return ++x; }); + EXPECT_TRUE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + auto y = std::make_unique(113); + auto uf2 = + UntypedFunction::Create([y = std::move(y)] { return ++*y; }); + EXPECT_TRUE(uf2); + EXPECT_FALSE(uf2.IsTriviallyDestructible()); + UntypedFunction uf3 = nullptr; + EXPECT_FALSE(uf3); + EXPECT_TRUE(uf3.IsTriviallyDestructible()); + + EXPECT_EQ(uf.Call(), 14); + swap(uf, uf2); + EXPECT_TRUE(uf); + EXPECT_FALSE(uf.IsTriviallyDestructible()); + EXPECT_TRUE(uf2); + EXPECT_TRUE(uf2.IsTriviallyDestructible()); + EXPECT_EQ(uf.Call(), 114); + EXPECT_EQ(uf2.Call(), 15); + + swap(uf, uf3); + EXPECT_FALSE(uf); + EXPECT_TRUE(uf.IsTriviallyDestructible()); + EXPECT_TRUE(uf3); + EXPECT_FALSE(uf3.IsTriviallyDestructible()); + EXPECT_EQ(uf3.Call(), 115); +} + +} // namespace +} // namespace webrtc diff --git a/rtc_base/virtual_socket_server.cc b/rtc_base/virtual_socket_server.cc index d42873e18b..f5e993645e 100644 --- a/rtc_base/virtual_socket_server.cc +++ b/rtc_base/virtual_socket_server.cc @@ -53,7 +53,6 @@ const int NUM_SAMPLES = 1000; enum { MSG_ID_PACKET, - MSG_ID_ADDRESS_BOUND, MSG_ID_CONNECT, MSG_ID_DISCONNECT, MSG_ID_SIGNALREADEVENT, @@ -148,9 +147,6 @@ int VirtualSocket::Bind(const SocketAddress& addr) { } else { bound_ = true; was_any_ = addr.IsAnyIP(); - // Post a message here such that test case could have chance to - // process the local address. (i.e. SetAlternativeLocalAddress). - server_->msg_queue_->Post(RTC_FROM_HERE, this, MSG_ID_ADDRESS_BOUND); } return result; } @@ -167,65 +163,30 @@ int VirtualSocket::Close() { } if (SOCK_STREAM == type_) { + webrtc::MutexLock lock(&mutex_); + // Cancel pending sockets if (listen_queue_) { while (!listen_queue_->empty()) { SocketAddress addr = listen_queue_->front(); // Disconnect listening socket. - server_->Disconnect(server_->LookupBinding(addr)); + server_->Disconnect(addr); listen_queue_->pop_front(); } - delete listen_queue_; listen_queue_ = nullptr; } // Disconnect stream sockets if (CS_CONNECTED == state_) { - // Disconnect remote socket, check if it is a child of a server socket. - VirtualSocket* socket = - server_->LookupConnection(local_addr_, remote_addr_); - if (!socket) { - // Not a server socket child, then see if it is bound. - // TODO(tbd): If this is indeed a server socket that has no - // children this will cause the server socket to be - // closed. This might lead to unexpected results, how to fix this? - socket = server_->LookupBinding(remote_addr_); - } - server_->Disconnect(socket); - - // Remove mapping for both directions. - server_->RemoveConnection(remote_addr_, local_addr_); - server_->RemoveConnection(local_addr_, remote_addr_); + server_->Disconnect(local_addr_, remote_addr_); } // Cancel potential connects - MessageList msgs; - if (server_->msg_queue_) { - server_->msg_queue_->Clear(this, MSG_ID_CONNECT, &msgs); - } - for (MessageList::iterator it = msgs.begin(); it != msgs.end(); ++it) { - RTC_DCHECK(nullptr != it->pdata); - MessageAddress* data = static_cast(it->pdata); - - // Lookup remote side. - VirtualSocket* socket = - server_->LookupConnection(local_addr_, data->addr); - if (socket) { - // Server socket, remote side is a socket retreived by - // accept. Accepted sockets are not bound so we will not - // find it by looking in the bindings table. - server_->Disconnect(socket); - server_->RemoveConnection(local_addr_, data->addr); - } else { - server_->Disconnect(server_->LookupBinding(data->addr)); - } - delete data; - } - // Clear incoming packets and disconnect messages - if (server_->msg_queue_) { - server_->msg_queue_->Clear(this); - } + server_->CancelConnects(this); } + // Clear incoming packets and disconnect messages + server_->Clear(this); + state_ = CS_CLOSED; local_addr_.Clear(); remote_addr_.Clear(); @@ -270,6 +231,8 @@ int VirtualSocket::RecvFrom(void* pv, if (timestamp) { *timestamp = -1; } + + webrtc::MutexLock lock(&mutex_); // If we don't have a packet, then either error or wait for one to arrive. if (recv_buffer_.empty()) { if (async_) { @@ -277,9 +240,7 @@ int VirtualSocket::RecvFrom(void* pv, return -1; } while (recv_buffer_.empty()) { - Message msg; - server_->msg_queue_->Get(&msg); - server_->msg_queue_->Dispatch(&msg); + server_->ProcessOneMessage(); } } @@ -299,18 +260,14 @@ int VirtualSocket::RecvFrom(void* pv, // To behave like a real socket, SignalReadEvent should fire in the next // message loop pass if there's still data buffered. if (!recv_buffer_.empty()) { - // Clear the message so it doesn't end up posted multiple times. - server_->msg_queue_->Clear(this, MSG_ID_SIGNALREADEVENT); - server_->msg_queue_->Post(RTC_FROM_HERE, this, MSG_ID_SIGNALREADEVENT); + server_->PostSignalReadEvent(this); } if (SOCK_STREAM == type_) { - bool was_full = (recv_buffer_size_ == server_->recv_buffer_capacity_); + bool was_full = (recv_buffer_size_ == server_->recv_buffer_capacity()); recv_buffer_size_ -= data_read; if (was_full) { - VirtualSocket* sender = server_->LookupBinding(remote_addr_); - RTC_DCHECK(nullptr != sender); - server_->SendTcp(sender); + server_->SendTcp(remote_addr_); } } @@ -318,6 +275,7 @@ int VirtualSocket::RecvFrom(void* pv, } int VirtualSocket::Listen(int backlog) { + webrtc::MutexLock lock(&mutex_); RTC_DCHECK(SOCK_STREAM == type_); RTC_DCHECK(CS_CLOSED == state_); if (local_addr_.IsNil()) { @@ -325,12 +283,13 @@ int VirtualSocket::Listen(int backlog) { return -1; } RTC_DCHECK(nullptr == listen_queue_); - listen_queue_ = new ListenQueue; + listen_queue_ = std::make_unique(); state_ = CS_CONNECTING; return 0; } VirtualSocket* VirtualSocket::Accept(SocketAddress* paddr) { + webrtc::MutexLock lock(&mutex_); if (nullptr == listen_queue_) { error_ = EINVAL; return nullptr; @@ -349,7 +308,7 @@ VirtualSocket* VirtualSocket::Accept(SocketAddress* paddr) { delete socket; continue; } - socket->CompleteConnect(remote_addr, false); + socket->CompleteConnect(remote_addr); if (paddr) { *paddr = remote_addr; } @@ -386,49 +345,57 @@ int VirtualSocket::SetOption(Option opt, int value) { } void VirtualSocket::OnMessage(Message* pmsg) { - if (pmsg->message_id == MSG_ID_PACKET) { - RTC_DCHECK(nullptr != pmsg->pdata); - Packet* packet = static_cast(pmsg->pdata); - - recv_buffer_.push_back(packet); - - if (async_) { - SignalReadEvent(this); - } - } else if (pmsg->message_id == MSG_ID_CONNECT) { - RTC_DCHECK(nullptr != pmsg->pdata); - MessageAddress* data = static_cast(pmsg->pdata); - if (listen_queue_ != nullptr) { - listen_queue_->push_back(data->addr); - if (async_) { - SignalReadEvent(this); + bool signal_read_event = false; + bool signal_close_event = false; + bool signal_connect_event = false; + int error_to_signal = 0; + { + webrtc::MutexLock lock(&mutex_); + if (pmsg->message_id == MSG_ID_PACKET) { + RTC_DCHECK(nullptr != pmsg->pdata); + Packet* packet = static_cast(pmsg->pdata); + + recv_buffer_.push_back(packet); + signal_read_event = async_; + } else if (pmsg->message_id == MSG_ID_CONNECT) { + RTC_DCHECK(nullptr != pmsg->pdata); + MessageAddress* data = static_cast(pmsg->pdata); + if (listen_queue_ != nullptr) { + listen_queue_->push_back(data->addr); + signal_read_event = async_; + } else if ((SOCK_STREAM == type_) && (CS_CONNECTING == state_)) { + CompleteConnect(data->addr); + signal_connect_event = async_; + } else { + RTC_LOG(LS_VERBOSE) + << "Socket at " << local_addr_.ToString() << " is not listening"; + server_->Disconnect(data->addr); } - } else if ((SOCK_STREAM == type_) && (CS_CONNECTING == state_)) { - CompleteConnect(data->addr, true); - } else { - RTC_LOG(LS_VERBOSE) << "Socket at " << local_addr_.ToString() - << " is not listening"; - server_->Disconnect(server_->LookupBinding(data->addr)); - } - delete data; - } else if (pmsg->message_id == MSG_ID_DISCONNECT) { - RTC_DCHECK(SOCK_STREAM == type_); - if (CS_CLOSED != state_) { - int error = (CS_CONNECTING == state_) ? ECONNREFUSED : 0; - state_ = CS_CLOSED; - remote_addr_.Clear(); - if (async_) { - SignalCloseEvent(this, error); + delete data; + } else if (pmsg->message_id == MSG_ID_DISCONNECT) { + RTC_DCHECK(SOCK_STREAM == type_); + if (CS_CLOSED != state_) { + error_to_signal = (CS_CONNECTING == state_) ? ECONNREFUSED : 0; + state_ = CS_CLOSED; + remote_addr_.Clear(); + signal_close_event = async_; } + } else if (pmsg->message_id == MSG_ID_SIGNALREADEVENT) { + signal_read_event = !recv_buffer_.empty(); + } else { + RTC_NOTREACHED(); } - } else if (pmsg->message_id == MSG_ID_ADDRESS_BOUND) { - SignalAddressReady(this, GetLocalAddress()); - } else if (pmsg->message_id == MSG_ID_SIGNALREADEVENT) { - if (!recv_buffer_.empty()) { - SignalReadEvent(this); - } - } else { - RTC_NOTREACHED(); + } + // Signal events without holding `mutex_`, to avoid recursive locking, as well + // as issues with sigslot and lock order. + if (signal_read_event) { + SignalReadEvent(this); + } + if (signal_close_event) { + SignalCloseEvent(this, error_to_signal); + } + if (signal_connect_event) { + SignalConnectEvent(this); } } @@ -463,14 +430,11 @@ int VirtualSocket::InitiateConnect(const SocketAddress& addr, bool use_delay) { return 0; } -void VirtualSocket::CompleteConnect(const SocketAddress& addr, bool notify) { +void VirtualSocket::CompleteConnect(const SocketAddress& addr) { RTC_DCHECK(CS_CONNECTING == state_); remote_addr_ = addr; state_ = CS_CONNECTED; server_->AddConnection(remote_addr_, local_addr_, this); - if (async_ && notify) { - SignalConnectEvent(this); - } } int VirtualSocket::SendUdp(const void* pv, @@ -492,7 +456,7 @@ int VirtualSocket::SendUdp(const void* pv, } int VirtualSocket::SendTcp(const void* pv, size_t cb) { - size_t capacity = server_->send_buffer_capacity_ - send_buffer_.size(); + size_t capacity = server_->send_buffer_capacity() - send_buffer_.size(); if (0 == capacity) { ready_to_send_ = false; error_ = EWOULDBLOCK; @@ -521,6 +485,67 @@ void VirtualSocket::OnSocketServerReadyToSend() { } } +void VirtualSocket::SetToBlocked() { + webrtc::MutexLock lock(&mutex_); + ready_to_send_ = false; + error_ = EWOULDBLOCK; +} + +void VirtualSocket::UpdateRecv(size_t data_size) { + recv_buffer_size_ += data_size; +} + +void VirtualSocket::UpdateSend(size_t data_size) { + size_t new_buffer_size = send_buffer_.size() - data_size; + // Avoid undefined access beyond the last element of the vector. + // This only happens when new_buffer_size is 0. + if (data_size < send_buffer_.size()) { + // memmove is required for potentially overlapping source/destination. + memmove(&send_buffer_[0], &send_buffer_[data_size], new_buffer_size); + } + send_buffer_.resize(new_buffer_size); +} + +void VirtualSocket::MaybeSignalWriteEvent(size_t capacity) { + if (!ready_to_send_ && (send_buffer_.size() < capacity)) { + ready_to_send_ = true; + SignalWriteEvent(this); + } +} + +uint32_t VirtualSocket::AddPacket(int64_t cur_time, size_t packet_size) { + network_size_ += packet_size; + uint32_t send_delay = + server_->SendDelay(static_cast(network_size_)); + + NetworkEntry entry; + entry.size = packet_size; + entry.done_time = cur_time + send_delay; + network_.push_back(entry); + + return send_delay; +} + +int64_t VirtualSocket::UpdateOrderedDelivery(int64_t ts) { + // Ensure that new packets arrive after previous ones + ts = std::max(ts, last_delivery_time_); + // A socket should not have both ordered and unordered delivery, so its last + // delivery time only needs to be updated when it has ordered delivery. + last_delivery_time_ = ts; + return ts; +} + +size_t VirtualSocket::PurgeNetworkPackets(int64_t cur_time) { + webrtc::MutexLock lock(&mutex_); + + while (!network_.empty() && (network_.front().done_time <= cur_time)) { + RTC_DCHECK(network_size_ >= network_.front().size); + network_size_ -= network_.front().size; + network_.pop_front(); + } + return network_size_; +} + VirtualSocketServer::VirtualSocketServer() : VirtualSocketServer(nullptr) {} VirtualSocketServer::VirtualSocketServer(ThreadProcessingFakeClock* fake_clock) @@ -594,17 +619,11 @@ AsyncSocket* VirtualSocketServer::CreateAsyncSocket(int family, int type) { } VirtualSocket* VirtualSocketServer::CreateSocketInternal(int family, int type) { - VirtualSocket* socket = new VirtualSocket(this, family, type, true); - SignalSocketCreated(socket); - return socket; + return new VirtualSocket(this, family, type, true); } void VirtualSocketServer::SetMessageQueue(Thread* msg_queue) { msg_queue_ = msg_queue; - if (msg_queue_) { - msg_queue_->SignalQueueDestroyed.connect( - this, &VirtualSocketServer::OnMessageQueueDestroyed); - } } bool VirtualSocketServer::Wait(int cmsWait, bool process_io) { @@ -812,19 +831,98 @@ bool VirtualSocketServer::Disconnect(VirtualSocket* socket) { return false; } +bool VirtualSocketServer::Disconnect(const SocketAddress& addr) { + return Disconnect(LookupBinding(addr)); +} + +bool VirtualSocketServer::Disconnect(const SocketAddress& local_addr, + const SocketAddress& remote_addr) { + // Disconnect remote socket, check if it is a child of a server socket. + VirtualSocket* socket = LookupConnection(local_addr, remote_addr); + if (!socket) { + // Not a server socket child, then see if it is bound. + // TODO(tbd): If this is indeed a server socket that has no + // children this will cause the server socket to be + // closed. This might lead to unexpected results, how to fix this? + socket = LookupBinding(remote_addr); + } + Disconnect(socket); + + // Remove mapping for both directions. + RemoveConnection(remote_addr, local_addr); + RemoveConnection(local_addr, remote_addr); + return socket != nullptr; +} + +void VirtualSocketServer::CancelConnects(VirtualSocket* socket) { + MessageList msgs; + if (msg_queue_) { + msg_queue_->Clear(socket, MSG_ID_CONNECT, &msgs); + } + for (MessageList::iterator it = msgs.begin(); it != msgs.end(); ++it) { + RTC_DCHECK(nullptr != it->pdata); + MessageAddress* data = static_cast(it->pdata); + SocketAddress local_addr = socket->GetLocalAddress(); + // Lookup remote side. + VirtualSocket* socket = LookupConnection(local_addr, data->addr); + if (socket) { + // Server socket, remote side is a socket retreived by + // accept. Accepted sockets are not bound so we will not + // find it by looking in the bindings table. + Disconnect(socket); + RemoveConnection(local_addr, data->addr); + } else { + Disconnect(data->addr); + } + delete data; + } +} + +void VirtualSocketServer::Clear(VirtualSocket* socket) { + // Clear incoming packets and disconnect messages + if (msg_queue_) { + msg_queue_->Clear(socket); + } +} + +void VirtualSocketServer::ProcessOneMessage() { + Message msg; + msg_queue_->Get(&msg); + msg_queue_->Dispatch(&msg); +} + +void VirtualSocketServer::PostSignalReadEvent(VirtualSocket* socket) { + // Clear the message so it doesn't end up posted multiple times. + msg_queue_->Clear(socket, MSG_ID_SIGNALREADEVENT); + msg_queue_->Post(RTC_FROM_HERE, socket, MSG_ID_SIGNALREADEVENT); +} + int VirtualSocketServer::SendUdp(VirtualSocket* socket, const char* data, size_t data_size, const SocketAddress& remote_addr) { ++sent_packets_; if (sending_blocked_) { - CritScope cs(&socket->crit_); - socket->ready_to_send_ = false; - socket->error_ = EWOULDBLOCK; + socket->SetToBlocked(); return -1; } + if (data_size > largest_seen_udp_payload_) { + if (data_size > 1000) { + RTC_LOG(LS_VERBOSE) << "Largest UDP seen is " << data_size; + } + largest_seen_udp_payload_ = data_size; + } + // See if we want to drop this packet. + if (data_size > max_udp_payload_) { + RTC_LOG(LS_VERBOSE) << "Dropping too large UDP payload of size " + << data_size << ", UDP payload limit is " + << max_udp_payload_; + // Return as if send was successful; packet disappears. + return data_size; + } + if (Random() < drop_prob_) { RTC_LOG(LS_VERBOSE) << "Dropping packet: bad luck"; return static_cast(data_size); @@ -854,10 +952,8 @@ int VirtualSocketServer::SendUdp(VirtualSocket* socket, } { - CritScope cs(&socket->crit_); - int64_t cur_time = TimeMillis(); - PurgeNetworkPackets(socket, cur_time); + size_t network_size = socket->PurgeNetworkPackets(cur_time); // Determine whether we have enough bandwidth to accept this packet. To do // this, we need to update the send queue. Once we know it's current size, @@ -868,7 +964,7 @@ int VirtualSocketServer::SendUdp(VirtualSocket* socket, // simulation of what a normal network would do. size_t packet_size = data_size + UDP_HEADER_SIZE; - if (socket->network_size_ + packet_size > network_capacity_) { + if (network_size + packet_size > network_capacity_) { RTC_LOG(LS_VERBOSE) << "Dropping packet: network capacity exceeded"; return static_cast(data_size); } @@ -896,45 +992,36 @@ void VirtualSocketServer::SendTcp(VirtualSocket* socket) { // Lookup the local/remote pair in the connections table. VirtualSocket* recipient = - LookupConnection(socket->local_addr_, socket->remote_addr_); + LookupConnection(socket->GetLocalAddress(), socket->GetRemoteAddress()); if (!recipient) { RTC_LOG(LS_VERBOSE) << "Sending data to no one."; return; } - CritScope cs(&socket->crit_); - int64_t cur_time = TimeMillis(); - PurgeNetworkPackets(socket, cur_time); + socket->PurgeNetworkPackets(cur_time); while (true) { - size_t available = recv_buffer_capacity_ - recipient->recv_buffer_size_; + size_t available = recv_buffer_capacity_ - recipient->recv_buffer_size(); size_t max_data_size = std::min(available, TCP_MSS - TCP_HEADER_SIZE); - size_t data_size = std::min(socket->send_buffer_.size(), max_data_size); + size_t data_size = std::min(socket->send_buffer_size(), max_data_size); if (0 == data_size) break; - AddPacketToNetwork(socket, recipient, cur_time, &socket->send_buffer_[0], + AddPacketToNetwork(socket, recipient, cur_time, socket->send_buffer_data(), data_size, TCP_HEADER_SIZE, true); - recipient->recv_buffer_size_ += data_size; - - size_t new_buffer_size = socket->send_buffer_.size() - data_size; - // Avoid undefined access beyond the last element of the vector. - // This only happens when new_buffer_size is 0. - if (data_size < socket->send_buffer_.size()) { - // memmove is required for potentially overlapping source/destination. - memmove(&socket->send_buffer_[0], &socket->send_buffer_[data_size], - new_buffer_size); - } - socket->send_buffer_.resize(new_buffer_size); + recipient->UpdateRecv(data_size); + socket->UpdateSend(data_size); } - if (!socket->ready_to_send_ && - (socket->send_buffer_.size() < send_buffer_capacity_)) { - socket->ready_to_send_ = true; - socket->SignalWriteEvent(socket); - } + socket->MaybeSignalWriteEvent(send_buffer_capacity_); +} + +void VirtualSocketServer::SendTcp(const SocketAddress& addr) { + VirtualSocket* sender = LookupBinding(addr); + RTC_DCHECK(nullptr != sender); + SendTcp(sender); } void VirtualSocketServer::AddPacketToNetwork(VirtualSocket* sender, @@ -944,13 +1031,7 @@ void VirtualSocketServer::AddPacketToNetwork(VirtualSocket* sender, size_t data_size, size_t header_size, bool ordered) { - VirtualSocket::NetworkEntry entry; - entry.size = data_size + header_size; - - sender->network_size_ += entry.size; - uint32_t send_delay = SendDelay(static_cast(sender->network_size_)); - entry.done_time = cur_time + send_delay; - sender->network_.push_back(entry); + uint32_t send_delay = sender->AddPacket(cur_time, data_size + header_size); // Find the delay for crossing the many virtual hops of the network. uint32_t transit_delay = GetTransitDelay(sender); @@ -958,7 +1039,7 @@ void VirtualSocketServer::AddPacketToNetwork(VirtualSocket* sender, // When the incoming packet is from a binding of the any address, translate it // to the default route here such that the recipient will see the default // route. - SocketAddress sender_addr = sender->local_addr_; + SocketAddress sender_addr = sender->GetLocalAddress(); IPAddress default_ip = GetDefaultRoute(sender_addr.ipaddr().family()); if (sender_addr.IsAnyIP() && !IPIsUnspec(default_ip)) { sender_addr.SetIP(default_ip); @@ -969,25 +1050,11 @@ void VirtualSocketServer::AddPacketToNetwork(VirtualSocket* sender, int64_t ts = TimeAfter(send_delay + transit_delay); if (ordered) { - // Ensure that new packets arrive after previous ones - ts = std::max(ts, sender->last_delivery_time_); - // A socket should not have both ordered and unordered delivery, so its last - // delivery time only needs to be updated when it has ordered delivery. - sender->last_delivery_time_ = ts; + ts = sender->UpdateOrderedDelivery(ts); } msg_queue_->PostAt(RTC_FROM_HERE, ts, recipient, MSG_ID_PACKET, p); } -void VirtualSocketServer::PurgeNetworkPackets(VirtualSocket* socket, - int64_t cur_time) { - while (!socket->network_.empty() && - (socket->network_.front().done_time <= cur_time)) { - RTC_DCHECK(socket->network_size_ >= socket->network_.front().size); - socket->network_size_ -= socket->network_.front().size; - socket->network_.pop_front(); - } -} - uint32_t VirtualSocketServer::SendDelay(uint32_t size) { if (bandwidth_ == 0) return 0; @@ -1017,13 +1084,7 @@ void PrintFunction(std::vector >* f) { #endif // void VirtualSocketServer::UpdateDelayDistribution() { - Function* dist = - CreateDistribution(delay_mean_, delay_stddev_, delay_samples_); - // We take a lock just to make sure we don't leak memory. - { - CritScope cs(&delay_crit_); - delay_dist_.reset(dist); - } + delay_dist_ = CreateDistribution(delay_mean_, delay_stddev_, delay_samples_); } static double PI = 4 * atan(1.0); @@ -1042,11 +1103,11 @@ static double Pareto(double x, double min, double k) { } #endif -VirtualSocketServer::Function* VirtualSocketServer::CreateDistribution( - uint32_t mean, - uint32_t stddev, - uint32_t samples) { - Function* f = new Function(); +std::unique_ptr +VirtualSocketServer::CreateDistribution(uint32_t mean, + uint32_t stddev, + uint32_t samples) { + auto f = std::make_unique(); if (0 == stddev) { f->push_back(Point(mean, 1.0)); @@ -1062,7 +1123,7 @@ VirtualSocketServer::Function* VirtualSocketServer::CreateDistribution( f->push_back(Point(x, y)); } } - return Resample(Invert(Accumulate(f)), 0, 1, samples); + return Resample(Invert(Accumulate(std::move(f))), 0, 1, samples); } uint32_t VirtualSocketServer::GetTransitDelay(Socket* socket) { @@ -1091,7 +1152,8 @@ struct FunctionDomainCmp { } }; -VirtualSocketServer::Function* VirtualSocketServer::Accumulate(Function* f) { +std::unique_ptr VirtualSocketServer::Accumulate( + std::unique_ptr f) { RTC_DCHECK(f->size() >= 1); double v = 0; for (Function::size_type i = 0; i < f->size() - 1; ++i) { @@ -1104,7 +1166,8 @@ VirtualSocketServer::Function* VirtualSocketServer::Accumulate(Function* f) { return f; } -VirtualSocketServer::Function* VirtualSocketServer::Invert(Function* f) { +std::unique_ptr VirtualSocketServer::Invert( + std::unique_ptr f) { for (Function::size_type i = 0; i < f->size(); ++i) std::swap((*f)[i].first, (*f)[i].second); @@ -1112,24 +1175,25 @@ VirtualSocketServer::Function* VirtualSocketServer::Invert(Function* f) { return f; } -VirtualSocketServer::Function* VirtualSocketServer::Resample(Function* f, - double x1, - double x2, - uint32_t samples) { - Function* g = new Function(); +std::unique_ptr VirtualSocketServer::Resample( + std::unique_ptr f, + double x1, + double x2, + uint32_t samples) { + auto g = std::make_unique(); for (size_t i = 0; i < samples; i++) { double x = x1 + (x2 - x1) * i / (samples - 1); - double y = Evaluate(f, x); + double y = Evaluate(f.get(), x); g->push_back(Point(x, y)); } - delete f; return g; } -double VirtualSocketServer::Evaluate(Function* f, double x) { - Function::iterator iter = absl::c_lower_bound(*f, x, FunctionDomainCmp()); +double VirtualSocketServer::Evaluate(const Function* f, double x) { + Function::const_iterator iter = + absl::c_lower_bound(*f, x, FunctionDomainCmp()); if (iter == f->begin()) { return (*f)[0].second; } else if (iter == f->end()) { diff --git a/rtc_base/virtual_socket_server.h b/rtc_base/virtual_socket_server.h index f45fabf0af..6c58a4bdfe 100644 --- a/rtc_base/virtual_socket_server.h +++ b/rtc_base/virtual_socket_server.h @@ -21,6 +21,7 @@ #include "rtc_base/fake_clock.h" #include "rtc_base/message_handler.h" #include "rtc_base/socket_server.h" +#include "rtc_base/synchronization/mutex.h" namespace rtc { @@ -32,7 +33,7 @@ class SocketAddressPair; // interface can create as many addresses as you want. All of the sockets // created by this network will be able to communicate with one another, unless // they are bound to addresses from incompatible families. -class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { +class VirtualSocketServer : public SocketServer { public: VirtualSocketServer(); // This constructor needs to be used if the test uses a fake clock and @@ -93,6 +94,16 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { drop_prob_ = drop_prob; } + // Controls the maximum UDP payload for the networks simulated + // by this server. Any UDP payload sent that is larger than this will + // be dropped. + size_t max_udp_payload() { return max_udp_payload_; } + void set_max_udp_payload(size_t payload_size) { + max_udp_payload_ = payload_size; + } + + size_t largest_seen_udp_payload() { return largest_seen_udp_payload_; } + // If |blocked| is true, subsequent attempts to send will result in -1 being // returned, with the socket error set to EWOULDBLOCK. // @@ -129,9 +140,9 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { typedef std::pair Point; typedef std::vector Function; - static Function* CreateDistribution(uint32_t mean, - uint32_t stddev, - uint32_t samples); + static std::unique_ptr CreateDistribution(uint32_t mean, + uint32_t stddev, + uint32_t samples); // Similar to Thread::ProcessMessages, but it only processes messages until // there are no immediate messages or pending network traffic. Returns false @@ -150,25 +161,12 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { // socket server. Intended to be used for test assertions. uint32_t sent_packets() const { return sent_packets_; } - // For testing purpose only. Fired when a client socket is created. - sigslot::signal1 SignalSocketCreated; - - protected: - // Returns a new IP not used before in this network. - IPAddress GetNextIP(int family); - uint16_t GetNextPort(); - - VirtualSocket* CreateSocketInternal(int family, int type); - // Binds the given socket to addr, assigning and IP and Port if necessary int Bind(VirtualSocket* socket, SocketAddress* addr); // Binds the given socket to the given (fully-defined) address. int Bind(VirtualSocket* socket, const SocketAddress& addr); - // Find the socket bound to the given address - VirtualSocket* LookupBinding(const SocketAddress& addr); - int Unbind(const SocketAddress& addr, VirtualSocket* socket); // Adds a mapping between this socket pair and the socket. @@ -176,13 +174,6 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { const SocketAddress& server, VirtualSocket* socket); - // Find the socket pair corresponding to this server address. - VirtualSocket* LookupConnection(const SocketAddress& client, - const SocketAddress& server); - - void RemoveConnection(const SocketAddress& client, - const SocketAddress& server); - // Connects the given socket to the socket at the given address int Connect(VirtualSocket* socket, const SocketAddress& remote_addr, @@ -191,6 +182,13 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { // Sends a disconnect message to the socket at the given address bool Disconnect(VirtualSocket* socket); + // Lookup address, and disconnect corresponding socket. + bool Disconnect(const SocketAddress& addr); + + // Lookup connection, close corresponding socket. + bool Disconnect(const SocketAddress& local_addr, + const SocketAddress& remote_addr); + // Sends the given packet to the socket at the given address (if one exists). int SendUdp(VirtualSocket* socket, const char* data, @@ -200,6 +198,44 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { // Moves as much data as possible from the sender's buffer to the network void SendTcp(VirtualSocket* socket); + // Like above, but lookup sender by address. + void SendTcp(const SocketAddress& addr); + + // Computes the number of milliseconds required to send a packet of this size. + uint32_t SendDelay(uint32_t size); + + // Cancel attempts to connect to a socket that is being closed. + void CancelConnects(VirtualSocket* socket); + + // Clear incoming messages for a socket that is being closed. + void Clear(VirtualSocket* socket); + + void ProcessOneMessage(); + + void PostSignalReadEvent(VirtualSocket* socket); + + // Sending was previously blocked, but now isn't. + sigslot::signal0<> SignalReadyToSend; + + protected: + // Returns a new IP not used before in this network. + IPAddress GetNextIP(int family); + + // Find the socket bound to the given address + VirtualSocket* LookupBinding(const SocketAddress& addr); + + private: + uint16_t GetNextPort(); + + VirtualSocket* CreateSocketInternal(int family, int type); + + // Find the socket pair corresponding to this server address. + VirtualSocket* LookupConnection(const SocketAddress& client, + const SocketAddress& server); + + void RemoveConnection(const SocketAddress& client, + const SocketAddress& server); + // Places a packet on the network. void AddPacketToNetwork(VirtualSocket* socket, VirtualSocket* recipient, @@ -209,31 +245,19 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { size_t header_size, bool ordered); - // Removes stale packets from the network - void PurgeNetworkPackets(VirtualSocket* socket, int64_t cur_time); - - // Computes the number of milliseconds required to send a packet of this size. - uint32_t SendDelay(uint32_t size); - // If the delay has been set for the address of the socket, returns the set // delay. Otherwise, returns a random transit delay chosen from the // appropriate distribution. uint32_t GetTransitDelay(Socket* socket); - // Basic operations on functions. Those that return a function also take - // ownership of the function given (and hence, may modify or delete it). - static Function* Accumulate(Function* f); - static Function* Invert(Function* f); - static Function* Resample(Function* f, - double x1, - double x2, - uint32_t samples); - static double Evaluate(Function* f, double x); - - // Null out our message queue if it goes away. Necessary in the case where - // our lifetime is greater than that of the thread we are using, since we - // try to send Close messages for all connected sockets when we shutdown. - void OnMessageQueueDestroyed() { msg_queue_ = nullptr; } + // Basic operations on functions. + static std::unique_ptr Accumulate(std::unique_ptr f); + static std::unique_ptr Invert(std::unique_ptr f); + static std::unique_ptr Resample(std::unique_ptr f, + double x1, + double x2, + uint32_t samples); + static double Evaluate(const Function* f, double x); // Determine if two sockets should be able to communicate. // We don't (currently) specify an address family for sockets; instead, @@ -253,12 +277,6 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { // NB: This scheme doesn't permit non-dualstack IPv6 sockets. static bool CanInteractWith(VirtualSocket* local, VirtualSocket* remote); - private: - friend class VirtualSocket; - - // Sending was previously blocked, but now isn't. - sigslot::signal0<> SignalReadyToSend; - typedef std::map AddressMap; typedef std::map ConnectionMap; @@ -294,9 +312,14 @@ class VirtualSocketServer : public SocketServer, public sigslot::has_slots<> { std::map alternative_address_mapping_; std::unique_ptr delay_dist_; - CriticalSection delay_crit_; - double drop_prob_; + // The largest UDP payload permitted on this virtual socket server. + // The default is the max size of IPv4 fragmented UDP packet payload: + // 65535 bytes - 8 bytes UDP header - 20 bytes IP header. + size_t max_udp_payload_ = 65507; + // The largest UDP payload seen so far. + size_t largest_seen_udp_payload_ = 0; + bool sending_blocked_ = false; RTC_DISALLOW_COPY_AND_ASSIGN(VirtualSocketServer); }; @@ -333,11 +356,30 @@ class VirtualSocket : public AsyncSocket, int SetOption(Option opt, int value) override; void OnMessage(Message* pmsg) override; + size_t recv_buffer_size() const { return recv_buffer_size_; } + size_t send_buffer_size() const { return send_buffer_.size(); } + const char* send_buffer_data() const { return send_buffer_.data(); } + + // Used by server sockets to set the local address without binding. + void SetLocalAddress(const SocketAddress& addr); + bool was_any() { return was_any_; } void set_was_any(bool was_any) { was_any_ = was_any; } - // For testing purpose only. Fired when client socket is bound to an address. - sigslot::signal2 SignalAddressReady; + void SetToBlocked(); + + void UpdateRecv(size_t data_size); + void UpdateSend(size_t data_size); + + void MaybeSignalWriteEvent(size_t capacity); + + // Adds a packet to be sent. Returns delay, based on network_size_. + uint32_t AddPacket(int64_t cur_time, size_t packet_size); + + int64_t UpdateOrderedDelivery(int64_t ts); + + // Removes stale packets from the network. Returns current size. + size_t PurgeNetworkPackets(int64_t cur_time); private: struct NetworkEntry { @@ -352,25 +394,23 @@ class VirtualSocket : public AsyncSocket, typedef std::map OptionsMap; int InitiateConnect(const SocketAddress& addr, bool use_delay); - void CompleteConnect(const SocketAddress& addr, bool notify); + void CompleteConnect(const SocketAddress& addr); int SendUdp(const void* pv, size_t cb, const SocketAddress& addr); int SendTcp(const void* pv, size_t cb); - // Used by server sockets to set the local address without binding. - void SetLocalAddress(const SocketAddress& addr); - void OnSocketServerReadyToSend(); - VirtualSocketServer* server_; - int type_; - bool async_; + VirtualSocketServer* const server_; + const int type_; + const bool async_; ConnState state_; int error_; SocketAddress local_addr_; SocketAddress remote_addr_; // Pending sockets which can be Accepted - ListenQueue* listen_queue_; + std::unique_ptr listen_queue_ RTC_GUARDED_BY(mutex_) + RTC_PT_GUARDED_BY(mutex_); // Data which tcp has buffered for sending SendBuffer send_buffer_; @@ -378,8 +418,8 @@ class VirtualSocket : public AsyncSocket, // Set back to true when the socket can send again. bool ready_to_send_ = true; - // Critical section to protect the recv_buffer and queue_ - CriticalSection crit_; + // Mutex to protect the recv_buffer and listen_queue_ + webrtc::Mutex mutex_; // Network model that enforces bandwidth and capacity constraints NetworkQueue network_; @@ -389,7 +429,7 @@ class VirtualSocket : public AsyncSocket, int64_t last_delivery_time_ = 0; // Data which has been received from the network - RecvBuffer recv_buffer_; + RecvBuffer recv_buffer_ RTC_GUARDED_BY(mutex_); // The amount of data which is in flight or in recv_buffer_ size_t recv_buffer_size_; @@ -404,8 +444,6 @@ class VirtualSocket : public AsyncSocket, // Store the options that are set OptionsMap options_map_; - - friend class VirtualSocketServer; }; } // namespace rtc diff --git a/rtc_base/virtual_socket_unittest.cc b/rtc_base/virtual_socket_unittest.cc index b274b40857..96a359d187 100644 --- a/rtc_base/virtual_socket_unittest.cc +++ b/rtc_base/virtual_socket_unittest.cc @@ -53,7 +53,7 @@ using webrtc::testing::SSE_WRITE; using webrtc::testing::StreamSink; // Sends at a constant rate but with random packet sizes. -struct Sender : public MessageHandler { +struct Sender : public MessageHandlerAutoCleanup { Sender(Thread* th, AsyncSocket* s, uint32_t rt) : thread(th), socket(std::make_unique(s)), @@ -99,7 +99,8 @@ struct Sender : public MessageHandler { char dummy[4096]; }; -struct Receiver : public MessageHandler, public sigslot::has_slots<> { +struct Receiver : public MessageHandlerAutoCleanup, + public sigslot::has_slots<> { Receiver(Thread* th, AsyncSocket* s, uint32_t bw) : thread(th), socket(std::make_unique(s)), @@ -1116,10 +1117,10 @@ TEST_F(VirtualSocketServerTest, CreatesStandardDistribution) { ASSERT_LT(0u, kTestSamples[sidx]); const uint32_t kStdDev = static_cast(kTestDev[didx] * kTestMean[midx]); - VirtualSocketServer::Function* f = + std::unique_ptr f = VirtualSocketServer::CreateDistribution(kTestMean[midx], kStdDev, kTestSamples[sidx]); - ASSERT_TRUE(nullptr != f); + ASSERT_TRUE(nullptr != f.get()); ASSERT_EQ(kTestSamples[sidx], f->size()); double sum = 0; for (uint32_t i = 0; i < f->size(); ++i) { @@ -1138,7 +1139,6 @@ TEST_F(VirtualSocketServerTest, CreatesStandardDistribution) { EXPECT_NEAR(kStdDev, stddev, 0.1 * kStdDev) << "M=" << kTestMean[midx] << " SD=" << kStdDev << " N=" << kTestSamples[sidx]; - delete f; } } } diff --git a/rtc_base/weak_ptr.h b/rtc_base/weak_ptr.h index 3e63a7587d..a9e6b3a990 100644 --- a/rtc_base/weak_ptr.h +++ b/rtc_base/weak_ptr.h @@ -15,9 +15,10 @@ #include #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "rtc_base/ref_count.h" #include "rtc_base/ref_counted_object.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" // The implementation is borrowed from chromium except that it does not // implement SupportsWeakPtr. @@ -103,7 +104,7 @@ class WeakReference { ~Flag() override; - ::webrtc::SequenceChecker checker_; + RTC_NO_UNIQUE_ADDRESS ::webrtc::SequenceChecker checker_; bool is_valid_; }; @@ -241,6 +242,10 @@ class WeakPtrFactory { public: explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {} + WeakPtrFactory() = delete; + WeakPtrFactory(const WeakPtrFactory&) = delete; + WeakPtrFactory& operator=(const WeakPtrFactory&) = delete; + ~WeakPtrFactory() { ptr_ = nullptr; } WeakPtr GetWeakPtr() { @@ -263,7 +268,6 @@ class WeakPtrFactory { private: internal::WeakReferenceOwner weak_reference_owner_; T* ptr_; - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory); }; } // namespace rtc diff --git a/rtc_base/win/create_direct3d_device.cc b/rtc_base/win/create_direct3d_device.cc new file mode 100644 index 0000000000..02fe340d56 --- /dev/null +++ b/rtc_base/win/create_direct3d_device.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/win/create_direct3d_device.h" + +#include +#include + +namespace { + +FARPROC LoadD3D11Function(const char* function_name) { + static HMODULE const handle = + ::LoadLibraryExW(L"d3d11.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); + return handle ? ::GetProcAddress(handle, function_name) : nullptr; +} + +decltype(&::CreateDirect3D11DeviceFromDXGIDevice) +GetCreateDirect3D11DeviceFromDXGIDevice() { + static decltype(&::CreateDirect3D11DeviceFromDXGIDevice) const function = + reinterpret_cast( + LoadD3D11Function("CreateDirect3D11DeviceFromDXGIDevice")); + return function; +} + +} // namespace + +namespace webrtc { + +bool ResolveCoreWinRTDirect3DDelayload() { + return GetCreateDirect3D11DeviceFromDXGIDevice(); +} + +HRESULT CreateDirect3DDeviceFromDXGIDevice( + IDXGIDevice* dxgi_device, + ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice** + out_d3d11_device) { + decltype(&::CreateDirect3D11DeviceFromDXGIDevice) create_d3d11_device_func = + GetCreateDirect3D11DeviceFromDXGIDevice(); + if (!create_d3d11_device_func) + return E_FAIL; + + Microsoft::WRL::ComPtr inspectableSurface; + HRESULT hr = create_d3d11_device_func(dxgi_device, &inspectableSurface); + if (FAILED(hr)) + return hr; + + return inspectableSurface->QueryInterface(IID_PPV_ARGS(out_d3d11_device)); +} + +} // namespace webrtc diff --git a/rtc_base/win/create_direct3d_device.h b/rtc_base/win/create_direct3d_device.h new file mode 100644 index 0000000000..7c21f8720a --- /dev/null +++ b/rtc_base/win/create_direct3d_device.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_WIN_CREATE_DIRECT3D_DEVICE_H_ +#define RTC_BASE_WIN_CREATE_DIRECT3D_DEVICE_H_ + +#include +#include +#include +#include + +namespace webrtc { + +// Callers must check the return value of ResolveCoreWinRTDirect3DDelayload() +// before using CreateDirect3DDeviceFromDXGIDevice(). +bool ResolveCoreWinRTDirect3DDelayload(); + +// Allows for the creating of Direct3D Devices from a DXGI device on versions +// of Windows greater than Win7. +HRESULT CreateDirect3DDeviceFromDXGIDevice( + IDXGIDevice* dxgi_device, + ABI::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice** + out_d3d11_device); + +} // namespace webrtc + +#endif // RTC_BASE_WIN_CREATE_DIRECT3D_DEVICE_H_ diff --git a/rtc_base/win/get_activation_factory.cc b/rtc_base/win/get_activation_factory.cc new file mode 100644 index 0000000000..b3be9abfa7 --- /dev/null +++ b/rtc_base/win/get_activation_factory.cc @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/win/get_activation_factory.h" + +#include +#include + +namespace { + +FARPROC LoadComBaseFunction(const char* function_name) { + static HMODULE const handle = + ::LoadLibraryExW(L"combase.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); + return handle ? ::GetProcAddress(handle, function_name) : nullptr; +} + +decltype(&::RoGetActivationFactory) GetRoGetActivationFactoryFunction() { + static decltype(&::RoGetActivationFactory) const function = + reinterpret_cast( + LoadComBaseFunction("RoGetActivationFactory")); + return function; +} + +} // namespace + +namespace webrtc { + +bool ResolveCoreWinRTDelayload() { + return GetRoGetActivationFactoryFunction() && + ResolveCoreWinRTStringDelayload(); +} + +HRESULT RoGetActivationFactoryProxy(HSTRING class_id, + const IID& iid, + void** out_factory) { + auto get_factory_func = GetRoGetActivationFactoryFunction(); + if (!get_factory_func) + return E_FAIL; + return get_factory_func(class_id, iid, out_factory); +} + +} // namespace webrtc diff --git a/rtc_base/win/get_activation_factory.h b/rtc_base/win/get_activation_factory.h new file mode 100644 index 0000000000..08f602f0c4 --- /dev/null +++ b/rtc_base/win/get_activation_factory.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_WIN_GET_ACTIVATION_FACTORY_H_ +#define RTC_BASE_WIN_GET_ACTIVATION_FACTORY_H_ + +#include + +#include "rtc_base/win/hstring.h" + +namespace webrtc { + +// Provides access to Core WinRT functions which may not be available on +// Windows 7. Loads functions dynamically at runtime to prevent library +// dependencies. + +// Callers must check the return value of ResolveCoreWinRTDelayLoad() before +// using these functions. + +bool ResolveCoreWinRTDelayload(); + +HRESULT RoGetActivationFactoryProxy(HSTRING class_id, + const IID& iid, + void** out_factory); + +// Retrieves an activation factory for the type specified. +template +HRESULT GetActivationFactory(InterfaceType** factory) { + HSTRING class_id_hstring; + HRESULT hr = CreateHstring(runtime_class_id, wcslen(runtime_class_id), + &class_id_hstring); + if (FAILED(hr)) + return hr; + + hr = RoGetActivationFactoryProxy(class_id_hstring, IID_PPV_ARGS(factory)); + if (FAILED(hr)) { + DeleteHstring(class_id_hstring); + return hr; + } + + return DeleteHstring(class_id_hstring); +} + +} // namespace webrtc + +#endif // RTC_BASE_WIN_GET_ACTIVATION_FACTORY_H_ diff --git a/rtc_base/win/hstring.cc b/rtc_base/win/hstring.cc new file mode 100644 index 0000000000..5a362a97c9 --- /dev/null +++ b/rtc_base/win/hstring.cc @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/win/hstring.h" + +#include +#include + +namespace { + +FARPROC LoadComBaseFunction(const char* function_name) { + static HMODULE const handle = + ::LoadLibraryExW(L"combase.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); + return handle ? ::GetProcAddress(handle, function_name) : nullptr; +} + +decltype(&::WindowsCreateString) GetWindowsCreateString() { + static decltype(&::WindowsCreateString) const function = + reinterpret_cast( + LoadComBaseFunction("WindowsCreateString")); + return function; +} + +decltype(&::WindowsDeleteString) GetWindowsDeleteString() { + static decltype(&::WindowsDeleteString) const function = + reinterpret_cast( + LoadComBaseFunction("WindowsDeleteString")); + return function; +} + +} // namespace + +namespace webrtc { + +bool ResolveCoreWinRTStringDelayload() { + return GetWindowsDeleteString() && GetWindowsCreateString(); +} + +HRESULT CreateHstring(const wchar_t* src, uint32_t len, HSTRING* out_hstr) { + decltype(&::WindowsCreateString) create_string_func = + GetWindowsCreateString(); + if (!create_string_func) + return E_FAIL; + return create_string_func(src, len, out_hstr); +} + +HRESULT DeleteHstring(HSTRING hstr) { + decltype(&::WindowsDeleteString) delete_string_func = + GetWindowsDeleteString(); + if (!delete_string_func) + return E_FAIL; + return delete_string_func(hstr); +} + +} // namespace webrtc diff --git a/rtc_base/win/hstring.h b/rtc_base/win/hstring.h new file mode 100644 index 0000000000..8fb119a9e6 --- /dev/null +++ b/rtc_base/win/hstring.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_WIN_HSTRING_H_ +#define RTC_BASE_WIN_HSTRING_H_ + +#include +#include +#include + +namespace webrtc { + +// Callers must check the return value of ResolveCoreWinRTStringDelayLoad() +// before using these functions. +bool ResolveCoreWinRTStringDelayload(); + +HRESULT CreateHstring(const wchar_t* src, uint32_t len, HSTRING* out_hstr); + +HRESULT DeleteHstring(HSTRING hstr); + +} // namespace webrtc + +#endif // RTC_BASE_WIN_HSTRING_H_ diff --git a/rtc_base/win/scoped_com_initializer.cc b/rtc_base/win/scoped_com_initializer.cc new file mode 100644 index 0000000000..81079fb54c --- /dev/null +++ b/rtc_base/win/scoped_com_initializer.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_base/win/scoped_com_initializer.h" + +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +ScopedCOMInitializer::ScopedCOMInitializer() { + RTC_DLOG(INFO) << "Single-Threaded Apartment (STA) COM thread"; + Initialize(COINIT_APARTMENTTHREADED); +} + +// Constructor for MTA initialization. +ScopedCOMInitializer::ScopedCOMInitializer(SelectMTA mta) { + RTC_DLOG(INFO) << "Multi-Threaded Apartment (MTA) COM thread"; + Initialize(COINIT_MULTITHREADED); +} + +ScopedCOMInitializer::~ScopedCOMInitializer() { + if (Succeeded()) { + CoUninitialize(); + } +} + +void ScopedCOMInitializer::Initialize(COINIT init) { + // Initializes the COM library for use by the calling thread, sets the + // thread's concurrency model, and creates a new apartment for the thread + // if one is required. CoInitializeEx must be called at least once, and is + // usually called only once, for each thread that uses the COM library. + hr_ = CoInitializeEx(NULL, init); + RTC_CHECK_NE(RPC_E_CHANGED_MODE, hr_) + << "Invalid COM thread model change (MTA->STA)"; + // Multiple calls to CoInitializeEx by the same thread are allowed as long + // as they pass the same concurrency flag, but subsequent valid calls + // return S_FALSE. To close the COM library gracefully on a thread, each + // successful call to CoInitializeEx, including any call that returns + // S_FALSE, must be balanced by a corresponding call to CoUninitialize. + if (hr_ == S_OK) { + RTC_DLOG(INFO) + << "The COM library was initialized successfully on this thread"; + } else if (hr_ == S_FALSE) { + RTC_DLOG(WARNING) + << "The COM library is already initialized on this thread"; + } +} + +} // namespace webrtc diff --git a/rtc_base/win/scoped_com_initializer.h b/rtc_base/win/scoped_com_initializer.h new file mode 100644 index 0000000000..2427097b5b --- /dev/null +++ b/rtc_base/win/scoped_com_initializer.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_BASE_WIN_SCOPED_COM_INITIALIZER_H_ +#define RTC_BASE_WIN_SCOPED_COM_INITIALIZER_H_ + +#include + +namespace webrtc { + +// Initializes COM in the constructor (STA or MTA), and uninitializes COM in the +// destructor. Taken from base::win::ScopedCOMInitializer. +// +// WARNING: This should only be used once per thread, ideally scoped to a +// similar lifetime as the thread itself. You should not be using this in +// random utility functions that make COM calls; instead ensure that these +// functions are running on a COM-supporting thread! +// See https://msdn.microsoft.com/en-us/library/ms809971.aspx for details. +class ScopedCOMInitializer { + public: + // Enum value provided to initialize the thread as an MTA instead of STA. + // There are two types of apartments, Single Threaded Apartments (STAs) + // and Multi Threaded Apartments (MTAs). Within a given process there can + // be multiple STA’s but there is only one MTA. STA is typically used by + // "GUI applications" and MTA by "worker threads" with no UI message loop. + enum SelectMTA { kMTA }; + + // Constructor for STA initialization. + ScopedCOMInitializer(); + + // Constructor for MTA initialization. + explicit ScopedCOMInitializer(SelectMTA mta); + + ~ScopedCOMInitializer(); + + ScopedCOMInitializer(const ScopedCOMInitializer&) = delete; + ScopedCOMInitializer& operator=(const ScopedCOMInitializer&) = delete; + + bool Succeeded() { return SUCCEEDED(hr_); } + + private: + void Initialize(COINIT init); + + HRESULT hr_; +}; + +} // namespace webrtc + +#endif // RTC_BASE_WIN_SCOPED_COM_INITIALIZER_H_ diff --git a/rtc_base/win/windows_version.cc b/rtc_base/win/windows_version.cc index 2e6c1577ce..42148adeea 100644 --- a/rtc_base/win/windows_version.cc +++ b/rtc_base/win/windows_version.cc @@ -203,8 +203,12 @@ Version MajorMinorBuildToVersion(int major, int minor, int build) { return VERSION_WIN10_RS2; } else if (build < 17134) { return VERSION_WIN10_RS3; - } else { + } else if (build < 17763) { return VERSION_WIN10_RS4; + } else if (build < 18362) { + return VERSION_WIN10_RS5; + } else { + return VERSION_WIN10_19H1; } } else if (major > 6) { RTC_NOTREACHED(); diff --git a/rtc_base/win/windows_version.h b/rtc_base/win/windows_version.h index 1ad319e4cc..33449e2b37 100644 --- a/rtc_base/win/windows_version.h +++ b/rtc_base/win/windows_version.h @@ -43,6 +43,8 @@ enum Version { VERSION_WIN10_RS2 = 10, // Redstone 2: Version 1703, Build 15063. VERSION_WIN10_RS3 = 11, // Redstone 3: Version 1709, Build 16299. VERSION_WIN10_RS4 = 12, // Redstone 4: Version 1803, Build 17134. + VERSION_WIN10_RS5 = 13, // Redstone 5: Version 1809, Build 17763. + VERSION_WIN10_19H1 = 14, // 19H1: Version 1903, Build 18362. // On edit, update tools\metrics\histograms\enums.xml "WindowsVersion" and // "GpuBlacklistFeatureTestResultsWindows2". VERSION_WIN_LAST, // Indicates error condition. diff --git a/rtc_base/win32_socket_server.cc b/rtc_base/win32_socket_server.cc index 8a5b93a608..cfe21a3630 100644 --- a/rtc_base/win32_socket_server.cc +++ b/rtc_base/win32_socket_server.cc @@ -733,7 +733,7 @@ bool Win32SocketServer::Wait(int cms, bool process_io) { MSG msg; b = GetMessage(&msg, nullptr, s_wm_wakeup_id, s_wm_wakeup_id); { - CritScope scope(&cs_); + webrtc::MutexLock lock(&mutex_); posted_ = false; } } else { @@ -747,7 +747,7 @@ void Win32SocketServer::WakeUp() { if (wnd_.handle()) { // Set the "message pending" flag, if not already set. { - CritScope scope(&cs_); + webrtc::MutexLock lock(&mutex_); if (posted_) return; posted_ = true; @@ -760,7 +760,7 @@ void Win32SocketServer::WakeUp() { void Win32SocketServer::Pump() { // Clear the "message pending" flag. { - CritScope scope(&cs_); + webrtc::MutexLock lock(&mutex_); posted_ = false; } diff --git a/rtc_base/win32_socket_server.h b/rtc_base/win32_socket_server.h index 92fd68cd83..317acce0d2 100644 --- a/rtc_base/win32_socket_server.h +++ b/rtc_base/win32_socket_server.h @@ -13,10 +13,10 @@ #if defined(WEBRTC_WIN) #include "rtc_base/async_socket.h" -#include "rtc_base/critical_section.h" #include "rtc_base/socket.h" #include "rtc_base/socket_factory.h" #include "rtc_base/socket_server.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread.h" #include "rtc_base/win32_window.h" @@ -123,7 +123,7 @@ class Win32SocketServer : public SocketServer { static const wchar_t kWindowName[]; Thread* message_queue_; MessageWindow wnd_; - CriticalSection cs_; + webrtc::Mutex mutex_; bool posted_; HWND hdlg_; }; diff --git a/rtc_tools/BUILD.gn b/rtc_tools/BUILD.gn index 3d398455bd..b841228a8e 100644 --- a/rtc_tools/BUILD.gn +++ b/rtc_tools/BUILD.gn @@ -17,34 +17,35 @@ group("rtc_tools") { deps = [ ":frame_analyzer", ":video_file_reader", - ":video_quality_analysis", ] if (!build_with_chromium) { deps += [ ":psnr_ssim_analyzer", ":rgba_to_i420_converter", + ":video_quality_analysis", ] - if (rtc_enable_protobuf) { - deps += [ ":chart_proto" ] - } } - - if (rtc_include_tests) { + if (!build_with_chromium && rtc_enable_protobuf) { + deps += [ ":chart_proto" ] + } + if (!build_with_chromium && rtc_include_tests) { deps += [ ":tools_unittests", ":yuv_to_ivf_converter", ] - if (rtc_enable_protobuf) { - if (!build_with_chromium) { - deps += [ ":event_log_visualizer" ] - } - deps += [ - ":audioproc_f", - ":rtp_analyzer", - ":unpack_aecdump", - "network_tester", - ] - } + } + if (rtc_include_tests && rtc_enable_protobuf) { + deps += [ + ":rtp_analyzer", + "network_tester", + ] + } + if (rtc_include_tests && rtc_enable_protobuf && !build_with_chromium) { + deps += [ + ":audioproc_f", + ":event_log_visualizer", + ":unpack_aecdump", + ] } } @@ -56,10 +57,11 @@ rtc_library("video_file_reader") { deps = [ "../api:scoped_refptr", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../rtc_base:checks", "../rtc_base:rtc_base_approved", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -74,9 +76,10 @@ rtc_library("video_file_writer") { ":video_file_reader", "../api:scoped_refptr", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../rtc_base:rtc_base_approved", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] @@ -101,17 +104,22 @@ rtc_library("video_quality_analysis") { "../api:array_view", "../api:scoped_refptr", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../common_video", "../rtc_base:checks", "../rtc_base:rtc_base_approved", "../test:perf_test", - "//third_party/abseil-cpp/absl/types:optional", "//third_party/libyuv", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } +# Abseil dependencies are not moved to the absl_deps field deliberately. +# If build_with_chromium is true, the absl_deps replaces the dependencies with +# the "//third_party/abseil-cpp:absl" target. Which doesn't include absl/flags +# (and some others) because they cannot be used in Chromiums. Special exception +# for the "frame_analyzer" target in "third_party/abseil-cpp/absl.gni" allows +# it to be build in chromium. rtc_executable("frame_analyzer") { visibility = [ "*" ] testonly = true @@ -128,6 +136,12 @@ rtc_executable("frame_analyzer") { "//third_party/abseil-cpp/absl/flags:parse", "//third_party/abseil-cpp/absl/strings", ] + + if (build_with_chromium) { + # When building from Chromium, WebRTC's metrics and field trial + # implementations need to be replaced by the Chromium ones. + deps += [ "//third_party/webrtc_overrides:webrtc_component" ] + } } # TODO(bugs.webrtc.org/11474): Enable this on win if needed. For now it @@ -147,6 +161,13 @@ if (!is_component_build) { # This target can be built from Chromium but it doesn't support # is_component_build=true because it depends on WebRTC testonly code # which is not part of //third_party/webrtc_overrides:webrtc_component. + + # Abseil dependencies are not moved to the absl_deps field deliberately. + # If build_with_chromium is true, the absl_deps replaces the dependencies with + # the "//third_party/abseil-cpp:absl" target. Which doesn't include absl/flags + # (and some others) because they cannot be used in Chromiums. Special exception + # for the "frame_analyzer" target in "third_party/abseil-cpp/absl.gni" allows + # it to be build in chromium. rtc_executable("rtp_generator") { visibility = [ "*" ] testonly = true @@ -180,6 +201,7 @@ if (!is_component_build) { "../rtc_base", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_json", + "../rtc_base:threading", "../rtc_base/system:file_wrapper", "../test:fileutils", "../test:rtp_test_utils", @@ -199,6 +221,13 @@ if (!is_component_build) { # This target can be built from Chromium but it doesn't support # is_component_build=true because it depends on WebRTC testonly code # which is not part of //third_party/webrtc_overrides:webrtc_component. + + # Abseil dependencies are not moved to the absl_deps field deliberately. + # If build_with_chromium is true, the absl_deps replaces the dependencies with + # the "//third_party/abseil-cpp:absl" target. Which doesn't include absl/flags + # (and some others) because they cannot be used in Chromiums. Special exception + # for the "frame_analyzer" target in "third_party/abseil-cpp/absl.gni" allows + # it to be build in chromium. rtc_executable("video_replay") { visibility = [ "*" ] testonly = true @@ -208,11 +237,14 @@ if (!is_component_build) { "../api/task_queue:default_task_queue_factory", "../api/test/video:function_video_factory", "../api/transport:field_trial_based_config", + "../api/video:video_frame", "../api/video_codecs:video_codecs_api", "../call", "../call:call_interfaces", "../common_video", "../media:rtc_internal_video_codecs", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/video_coding:video_coding_utility", "../rtc_base:checks", "../rtc_base:rtc_json", "../rtc_base:stringutils", @@ -321,6 +353,8 @@ if (!build_with_chromium) { sources = [ "rtc_event_log_visualizer/alerts.cc", "rtc_event_log_visualizer/alerts.h", + "rtc_event_log_visualizer/analyze_audio.cc", + "rtc_event_log_visualizer/analyze_audio.h", "rtc_event_log_visualizer/analyzer.cc", "rtc_event_log_visualizer/analyzer.h", "rtc_event_log_visualizer/analyzer_common.cc", @@ -337,6 +371,7 @@ if (!build_with_chromium) { deps = [ ":chart_proto", "../api:function_view", + "../api:network_state_predictor_api", "../rtc_base:ignore_wundef", # TODO(kwiberg): Remove this dependency. @@ -363,160 +398,194 @@ if (!build_with_chromium) { "../rtc_base:rtc_base_approved", "../rtc_base:rtc_numerics", "../rtc_base:stringutils", + "../system_wrappers", + "../test:explicit_key_value_config", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/base:core_headers", + "//third_party/abseil-cpp/absl/functional:bind_front", "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] } } } if (rtc_include_tests) { - rtc_executable("yuv_to_ivf_converter") { - visibility = [ "*" ] - testonly = true - sources = [ "converter/yuv_to_ivf_converter.cc" ] - deps = [ - "../api:create_frame_generator", - "../api:frame_generator_api", - "../api/task_queue:default_task_queue_factory", - "../api/video:encoded_image", - "../api/video:video_frame", - "../api/video_codecs:video_codecs_api", - "../media:rtc_media_base", - "../modules/rtp_rtcp:rtp_rtcp_format", - "../modules/video_coding:video_codec_interface", - "../modules/video_coding:video_coding_utility", - "../modules/video_coding:webrtc_h264", - "../modules/video_coding:webrtc_vp8", - "../modules/video_coding:webrtc_vp9", - "../rtc_base:checks", - "../rtc_base:criticalsection", - "../rtc_base:logging", - "../rtc_base:rtc_event", - "../rtc_base:rtc_task_queue", - "../rtc_base/system:file_wrapper", - "../test:video_test_common", - "../test:video_test_support", - "//third_party/abseil-cpp/absl/debugging:failure_signal_handler", - "//third_party/abseil-cpp/absl/debugging:symbolize", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - "//third_party/abseil-cpp/absl/strings", - ] - } - - if (rtc_enable_protobuf && !build_with_chromium) { - rtc_executable("event_log_visualizer") { + if (!build_with_chromium) { + rtc_executable("yuv_to_ivf_converter") { + visibility = [ "*" ] testonly = true - sources = [ "rtc_event_log_visualizer/main.cc" ] - data = [ - # If --wav_filename is not provided, event_log_visualizer uses - # EN_script2_F_sp2_B1.wav by default. This is a good default to use - # for example with flags --plot=all when there is no need to use a - # specific .wav file. - "../resources/audio_processing/conversational_speech/EN_script2_F_sp2_B1.wav", - ] + sources = [ "converter/yuv_to_ivf_converter.cc" ] deps = [ - ":event_log_visualizer_utils", - "../api/neteq:neteq_api", - "../api/rtc_event_log", - "../logging:rtc_event_log_parser", - "../modules/audio_coding:neteq", + "../api:create_frame_generator", + "../api:frame_generator_api", + "../api/task_queue:default_task_queue_factory", + "../api/video:encoded_image", + "../api/video:video_frame", + "../api/video_codecs:video_codecs_api", + "../media:rtc_media_base", "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/video_coding:video_codec_interface", + "../modules/video_coding:video_coding_utility", + "../modules/video_coding:webrtc_h264", + "../modules/video_coding:webrtc_vp8", + "../modules/video_coding:webrtc_vp9", "../rtc_base:checks", - "../rtc_base:protobuf_utils", - "../rtc_base:rtc_base_approved", - "../system_wrappers:field_trial", - "../test:field_trial", - "../test:fileutils", - "../test:test_support", - "//third_party/abseil-cpp/absl/algorithm:container", - "//third_party/abseil-cpp/absl/flags:config", + "../rtc_base:criticalsection", + "../rtc_base:logging", + "../rtc_base:rtc_event", + "../rtc_base:rtc_task_queue", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:file_wrapper", + "../test:video_test_common", + "../test:video_test_support", + "//third_party/abseil-cpp/absl/debugging:failure_signal_handler", + "//third_party/abseil-cpp/absl/debugging:symbolize", "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:parse", - "//third_party/abseil-cpp/absl/flags:usage", "//third_party/abseil-cpp/absl/strings", ] } - } - tools_unittests_resources = [ - "../resources/foreman_128x96.yuv", - "../resources/foreman_cif.yuv", - "../resources/reference_less_video_test_file.y4m", - ] + if (rtc_enable_protobuf) { + rtc_executable("event_log_visualizer") { + testonly = true + sources = [ "rtc_event_log_visualizer/main.cc" ] + data = [ + # If --wav_filename is not provided, event_log_visualizer uses + # EN_script2_F_sp2_B1.wav by default. This is a good default to use + # for example with flags --plot=all when there is no need to use a + # specific .wav file. + "../resources/audio_processing/conversational_speech/EN_script2_F_sp2_B1.wav", + ] + deps = [ + ":event_log_visualizer_utils", + "../api/neteq:neteq_api", + "../api/rtc_event_log", + "../logging:rtc_event_log_parser", + "../modules/audio_coding:neteq", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../rtc_base:checks", + "../rtc_base:protobuf_utils", + "../rtc_base:rtc_base_approved", + "../system_wrappers:field_trial", + "../test:field_trial", + "../test:fileutils", + "../test:test_support", + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/flags:config", + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + "//third_party/abseil-cpp/absl/flags:usage", + "//third_party/abseil-cpp/absl/strings", + ] + } + } - if (is_ios) { - bundle_data("tools_unittests_bundle_data") { - testonly = true - sources = tools_unittests_resources - outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] + tools_unittests_resources = [ + "../resources/foreman_128x96.yuv", + "../resources/foreman_cif.yuv", + "../resources/reference_less_video_test_file.y4m", + ] + + if (is_ios) { + bundle_data("tools_unittests_bundle_data") { + testonly = true + sources = tools_unittests_resources + outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] + } } - } - rtc_test("tools_unittests") { - testonly = true + rtc_test("tools_unittests") { + testonly = true - sources = [ - "frame_analyzer/linear_least_squares_unittest.cc", - "frame_analyzer/reference_less_video_analysis_unittest.cc", - "frame_analyzer/video_color_aligner_unittest.cc", - "frame_analyzer/video_geometry_aligner_unittest.cc", - "frame_analyzer/video_quality_analysis_unittest.cc", - "frame_analyzer/video_temporal_aligner_unittest.cc", - "sanitizers_unittest.cc", - "video_file_reader_unittest.cc", - "video_file_writer_unittest.cc", - ] + sources = [ + "frame_analyzer/linear_least_squares_unittest.cc", + "frame_analyzer/reference_less_video_analysis_unittest.cc", + "frame_analyzer/video_color_aligner_unittest.cc", + "frame_analyzer/video_geometry_aligner_unittest.cc", + "frame_analyzer/video_quality_analysis_unittest.cc", + "frame_analyzer/video_temporal_aligner_unittest.cc", + "sanitizers_unittest.cc", + "video_file_reader_unittest.cc", + "video_file_writer_unittest.cc", + ] - deps = [ - ":video_file_reader", - ":video_file_writer", - ":video_quality_analysis", - "../api:scoped_refptr", - "../api/video:video_frame", - "../api/video:video_frame_i420", - "../api/video:video_rtp_headers", - "../common_video", - "../rtc_base", - "../rtc_base:checks", - "../test:fileutils", - "../test:test_main", - "../test:test_support", - "//testing/gtest", - "//third_party/libyuv", - ] + deps = [ + ":video_file_reader", + ":video_file_writer", + ":video_quality_analysis", + "../api:scoped_refptr", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../common_video", + "../rtc_base", + "../rtc_base:checks", + "../rtc_base:null_socket_server", + "../rtc_base:threading", + "../test:fileutils", + "../test:test_main", + "../test:test_support", + "//testing/gtest", + "//third_party/libyuv", + ] + + if (!build_with_chromium) { + deps += [ ":reference_less_video_analysis_lib" ] + } - if (!build_with_chromium) { - deps += [ ":reference_less_video_analysis_lib" ] + if (rtc_enable_protobuf) { + deps += [ "network_tester:network_tester_unittests" ] + } + + data = tools_unittests_resources + if (is_android) { + deps += [ "//testing/android/native_test:native_test_support" ] + shard_timeout = 900 + } + if (is_ios) { + deps += [ ":tools_unittests_bundle_data" ] + } } if (rtc_enable_protobuf) { - deps += [ "network_tester:network_tester_unittests" ] - } + rtc_executable("audioproc_f") { + testonly = true + sources = [ "audioproc_f/audioproc_float_main.cc" ] + deps = [ + "../api:audioproc_f_api", + "../modules/audio_processing", + "../modules/audio_processing:api", + "../rtc_base:rtc_base_approved", + ] + } - data = tools_unittests_resources - if (is_android) { - deps += [ "//testing/android/native_test:native_test_support" ] - shard_timeout = 900 - } - if (is_ios) { - deps += [ ":tools_unittests_bundle_data" ] + rtc_executable("unpack_aecdump") { + visibility = [ "*" ] + testonly = true + sources = [ "unpack_aecdump/unpack.cc" ] + + deps = [ + "../api:function_view", + "../common_audio", + "../modules/audio_processing", + "../modules/audio_processing:audioproc_debug_proto", + "../modules/audio_processing:audioproc_debug_proto", + "../modules/audio_processing:audioproc_protobuf_utils", + "../modules/audio_processing:audioproc_test_utils", + "../rtc_base:ignore_wundef", + "../rtc_base:protobuf_utils", + "../rtc_base:rtc_base_approved", + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + ] + } # unpack_aecdump } } if (rtc_enable_protobuf) { - rtc_executable("audioproc_f") { - testonly = true - sources = [ "audioproc_f/audioproc_float_main.cc" ] - deps = [ - "../api:audioproc_f_api", - "../modules/audio_processing", - "../modules/audio_processing:api", - "../rtc_base:rtc_base_approved", - ] - } - copy("rtp_analyzer") { sources = [ "py_event_log_analyzer/misc.py", @@ -527,26 +596,5 @@ if (rtc_include_tests) { outputs = [ "$root_build_dir/{{source_file_part}}" ] deps = [ "../logging:rtc_event_log_proto" ] } # rtp_analyzer - - rtc_executable("unpack_aecdump") { - visibility = [ "*" ] - testonly = true - sources = [ "unpack_aecdump/unpack.cc" ] - - deps = [ - "../api:function_view", - "../common_audio", - "../modules/audio_processing", - "../modules/audio_processing:audioproc_debug_proto", - "../modules/audio_processing:audioproc_debug_proto", - "../modules/audio_processing:audioproc_protobuf_utils", - "../modules/audio_processing:audioproc_test_utils", - "../rtc_base:ignore_wundef", - "../rtc_base:protobuf_utils", - "../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - ] - } # unpack_aecdump } } diff --git a/rtc_tools/DEPS b/rtc_tools/DEPS index 5ccd86b63b..3cf6080c93 100644 --- a/rtc_tools/DEPS +++ b/rtc_tools/DEPS @@ -29,4 +29,7 @@ specific_include_rules = { "+modules/video_coding/utility/ivf_file_writer.h", "+modules/video_coding/codecs/h264/include/h264.h", ], + ".*video_replay\.cc": [ + "+modules/video_coding/utility/ivf_file_writer.h", + ], } diff --git a/rtc_tools/compare_videos.py b/rtc_tools/compare_videos.py index ee8cf455b2..a54eb42979 100755 --- a/rtc_tools/compare_videos.py +++ b/rtc_tools/compare_videos.py @@ -18,7 +18,6 @@ import sys import tempfile - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) # Chrome browsertests will throw away stderr; avoid that output gets lost. @@ -26,131 +25,154 @@ def _ParseArgs(): - """Registers the command-line options.""" - usage = 'usage: %prog [options]' - parser = optparse.OptionParser(usage=usage) - - parser.add_option('--label', type='string', default='MY_TEST', - help=('Label of the test, used to identify different ' - 'tests. Default: %default')) - parser.add_option('--ref_video', type='string', - help='Reference video to compare with (YUV).') - parser.add_option('--test_video', type='string', - help=('Test video to be compared with the reference ' - 'video (YUV).')) - parser.add_option('--frame_analyzer', type='string', - help='Path to the frame analyzer executable.') - parser.add_option('--aligned_output_file', type='string', - help='Path for output aligned YUV or Y4M file.') - parser.add_option('--vmaf', type='string', - help='Path to VMAF executable.') - parser.add_option('--vmaf_model', type='string', - help='Path to VMAF model.') - parser.add_option('--vmaf_phone_model', action='store_true', - help='Whether to use phone model in VMAF.') - parser.add_option('--yuv_frame_width', type='int', default=640, - help='Width of the YUV file\'s frames. Default: %default') - parser.add_option('--yuv_frame_height', type='int', default=480, - help='Height of the YUV file\'s frames. Default: %default') - parser.add_option('--chartjson_result_file', type='str', default=None, - help='Where to store perf results in chartjson format.') - options, _ = parser.parse_args() - - if not options.ref_video: - parser.error('You must provide a path to the reference video!') - if not os.path.exists(options.ref_video): - parser.error('Cannot find the reference video at %s' % options.ref_video) - - if not options.test_video: - parser.error('You must provide a path to the test video!') - if not os.path.exists(options.test_video): - parser.error('Cannot find the test video at %s' % options.test_video) - - if not options.frame_analyzer: - parser.error('You must provide the path to the frame analyzer executable!') - if not os.path.exists(options.frame_analyzer): - parser.error('Cannot find frame analyzer executable at %s!' % - options.frame_analyzer) - - if options.vmaf and not options.vmaf_model: - parser.error('You must provide a path to a VMAF model to use VMAF.') - - return options + """Registers the command-line options.""" + usage = 'usage: %prog [options]' + parser = optparse.OptionParser(usage=usage) + + parser.add_option('--label', + type='string', + default='MY_TEST', + help=('Label of the test, used to identify different ' + 'tests. Default: %default')) + parser.add_option('--ref_video', + type='string', + help='Reference video to compare with (YUV).') + parser.add_option('--test_video', + type='string', + help=('Test video to be compared with the reference ' + 'video (YUV).')) + parser.add_option('--frame_analyzer', + type='string', + help='Path to the frame analyzer executable.') + parser.add_option('--aligned_output_file', + type='string', + help='Path for output aligned YUV or Y4M file.') + parser.add_option('--vmaf', type='string', help='Path to VMAF executable.') + parser.add_option('--vmaf_model', + type='string', + help='Path to VMAF model.') + parser.add_option('--vmaf_phone_model', + action='store_true', + help='Whether to use phone model in VMAF.') + parser.add_option( + '--yuv_frame_width', + type='int', + default=640, + help='Width of the YUV file\'s frames. Default: %default') + parser.add_option( + '--yuv_frame_height', + type='int', + default=480, + help='Height of the YUV file\'s frames. Default: %default') + parser.add_option('--chartjson_result_file', + type='str', + default=None, + help='Where to store perf results in chartjson format.') + options, _ = parser.parse_args() + + if not options.ref_video: + parser.error('You must provide a path to the reference video!') + if not os.path.exists(options.ref_video): + parser.error('Cannot find the reference video at %s' % + options.ref_video) + + if not options.test_video: + parser.error('You must provide a path to the test video!') + if not os.path.exists(options.test_video): + parser.error('Cannot find the test video at %s' % options.test_video) + + if not options.frame_analyzer: + parser.error( + 'You must provide the path to the frame analyzer executable!') + if not os.path.exists(options.frame_analyzer): + parser.error('Cannot find frame analyzer executable at %s!' % + options.frame_analyzer) + + if options.vmaf and not options.vmaf_model: + parser.error('You must provide a path to a VMAF model to use VMAF.') + + return options + def _DevNull(): - """On Windows, sometimes the inherited stdin handle from the parent process + """On Windows, sometimes the inherited stdin handle from the parent process fails. Workaround this by passing null to stdin to the subprocesses commands. This function can be used to create the null file handler. """ - return open(os.devnull, 'r') + return open(os.devnull, 'r') def _RunFrameAnalyzer(options, yuv_directory=None): - """Run frame analyzer to compare the videos and print output.""" - cmd = [ - options.frame_analyzer, - '--label=%s' % options.label, - '--reference_file=%s' % options.ref_video, - '--test_file=%s' % options.test_video, - '--width=%d' % options.yuv_frame_width, - '--height=%d' % options.yuv_frame_height, - ] - if options.chartjson_result_file: - cmd.append('--chartjson_result_file=%s' % options.chartjson_result_file) - if options.aligned_output_file: - cmd.append('--aligned_output_file=%s' % options.aligned_output_file) - if yuv_directory: - cmd.append('--yuv_directory=%s' % yuv_directory) - frame_analyzer = subprocess.Popen(cmd, stdin=_DevNull(), - stdout=sys.stdout, stderr=sys.stderr) - frame_analyzer.wait() - if frame_analyzer.returncode != 0: - print('Failed to run frame analyzer.') - return frame_analyzer.returncode + """Run frame analyzer to compare the videos and print output.""" + cmd = [ + options.frame_analyzer, + '--label=%s' % options.label, + '--reference_file=%s' % options.ref_video, + '--test_file=%s' % options.test_video, + '--width=%d' % options.yuv_frame_width, + '--height=%d' % options.yuv_frame_height, + ] + if options.chartjson_result_file: + cmd.append('--chartjson_result_file=%s' % + options.chartjson_result_file) + if options.aligned_output_file: + cmd.append('--aligned_output_file=%s' % options.aligned_output_file) + if yuv_directory: + cmd.append('--yuv_directory=%s' % yuv_directory) + frame_analyzer = subprocess.Popen(cmd, + stdin=_DevNull(), + stdout=sys.stdout, + stderr=sys.stderr) + frame_analyzer.wait() + if frame_analyzer.returncode != 0: + print('Failed to run frame analyzer.') + return frame_analyzer.returncode def _RunVmaf(options, yuv_directory, logfile): - """ Run VMAF to compare videos and print output. + """ Run VMAF to compare videos and print output. The yuv_directory is assumed to have been populated with a reference and test video in .yuv format, with names according to the label. """ - cmd = [ - options.vmaf, - 'yuv420p', - str(options.yuv_frame_width), - str(options.yuv_frame_height), - os.path.join(yuv_directory, "ref.yuv"), - os.path.join(yuv_directory, "test.yuv"), - options.vmaf_model, - '--log', - logfile, - '--log-fmt', - 'json', - ] - if options.vmaf_phone_model: - cmd.append('--phone-model') - - vmaf = subprocess.Popen(cmd, stdin=_DevNull(), - stdout=sys.stdout, stderr=sys.stderr) - vmaf.wait() - if vmaf.returncode != 0: - print('Failed to run VMAF.') - return 1 - - # Read per-frame scores from VMAF output and print. - with open(logfile) as f: - vmaf_data = json.load(f) - vmaf_scores = [] - for frame in vmaf_data['frames']: - vmaf_scores.append(frame['metrics']['vmaf']) - print('RESULT VMAF: %s=' % options.label, vmaf_scores) - - return 0 + cmd = [ + options.vmaf, + 'yuv420p', + str(options.yuv_frame_width), + str(options.yuv_frame_height), + os.path.join(yuv_directory, "ref.yuv"), + os.path.join(yuv_directory, "test.yuv"), + options.vmaf_model, + '--log', + logfile, + '--log-fmt', + 'json', + ] + if options.vmaf_phone_model: + cmd.append('--phone-model') + + vmaf = subprocess.Popen(cmd, + stdin=_DevNull(), + stdout=sys.stdout, + stderr=sys.stderr) + vmaf.wait() + if vmaf.returncode != 0: + print('Failed to run VMAF.') + return 1 + + # Read per-frame scores from VMAF output and print. + with open(logfile) as f: + vmaf_data = json.load(f) + vmaf_scores = [] + for frame in vmaf_data['frames']: + vmaf_scores.append(frame['metrics']['vmaf']) + print('RESULT VMAF: %s=' % options.label, vmaf_scores) + + return 0 def main(): - """The main function. + """The main function. A simple invocation is: ./webrtc/rtc_tools/compare_videos.py @@ -161,27 +183,28 @@ def main(): Running vmaf requires the following arguments: --vmaf, --vmaf_model, --yuv_frame_width, --yuv_frame_height """ - options = _ParseArgs() + options = _ParseArgs() - if options.vmaf: - try: - # Directory to save temporary YUV files for VMAF in frame_analyzer. - yuv_directory = tempfile.mkdtemp() - _, vmaf_logfile = tempfile.mkstemp() + if options.vmaf: + try: + # Directory to save temporary YUV files for VMAF in frame_analyzer. + yuv_directory = tempfile.mkdtemp() + _, vmaf_logfile = tempfile.mkstemp() - # Run frame analyzer to compare the videos and print output. - if _RunFrameAnalyzer(options, yuv_directory=yuv_directory) != 0: - return 1 + # Run frame analyzer to compare the videos and print output. + if _RunFrameAnalyzer(options, yuv_directory=yuv_directory) != 0: + return 1 + + # Run VMAF for further video comparison and print output. + return _RunVmaf(options, yuv_directory, vmaf_logfile) + finally: + shutil.rmtree(yuv_directory) + os.remove(vmaf_logfile) + else: + return _RunFrameAnalyzer(options) - # Run VMAF for further video comparison and print output. - return _RunVmaf(options, yuv_directory, vmaf_logfile) - finally: - shutil.rmtree(yuv_directory) - os.remove(vmaf_logfile) - else: - return _RunFrameAnalyzer(options) + return 0 - return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/rtc_tools/converter/yuv_to_ivf_converter.cc b/rtc_tools/converter/yuv_to_ivf_converter.cc index 6f15bd33a2..e4a1e125f8 100644 --- a/rtc_tools/converter/yuv_to_ivf_converter.cc +++ b/rtc_tools/converter/yuv_to_ivf_converter.cc @@ -30,9 +30,9 @@ #include "modules/video_coding/include/video_error_codes.h" #include "modules/video_coding/utility/ivf_file_writer.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/file_wrapper.h" #include "rtc_base/task_queue.h" #include "test/testsupport/frame_reader.h" @@ -74,11 +74,10 @@ class IvfFileWriterEncodedCallback : public EncodedImageCallback { ~IvfFileWriterEncodedCallback() { RTC_CHECK(file_writer_->Close()); } Result OnEncodedImage(const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { RTC_CHECK(file_writer_->WriteFrame(encoded_image, video_codec_type_)); - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); received_frames_count_++; RTC_CHECK_LE(received_frames_count_, expected_frames_count_); if (received_frames_count_ % kFrameLogInterval == 0) { @@ -99,7 +98,7 @@ class IvfFileWriterEncodedCallback : public EncodedImageCallback { const VideoCodecType video_codec_type_; const int expected_frames_count_; - rtc::CriticalSection lock_; + Mutex lock_; int received_frames_count_ RTC_GUARDED_BY(lock_) = 0; rtc::Event next_frame_written_; }; diff --git a/rtc_tools/frame_analyzer/video_geometry_aligner.cc b/rtc_tools/frame_analyzer/video_geometry_aligner.cc index db397bc3a5..88da26d4d0 100644 --- a/rtc_tools/frame_analyzer/video_geometry_aligner.cc +++ b/rtc_tools/frame_analyzer/video_geometry_aligner.cc @@ -61,7 +61,7 @@ rtc::scoped_refptr CropAndZoom( adjusted_frame->MutableDataY(), adjusted_frame->StrideY(), adjusted_frame->MutableDataU(), adjusted_frame->StrideU(), adjusted_frame->MutableDataV(), adjusted_frame->StrideV(), - frame->width(), frame->height(), libyuv::kFilterBilinear); + frame->width(), frame->height(), libyuv::kFilterBox); return adjusted_frame; } diff --git a/rtc_tools/metrics_plotter.py b/rtc_tools/metrics_plotter.py index 54ccee9c01..3b746ad8ee 100644 --- a/rtc_tools/metrics_plotter.py +++ b/rtc_tools/metrics_plotter.py @@ -24,6 +24,7 @@ } """ +import argparse import fileinput import json import matplotlib.pyplot as plt @@ -38,36 +39,59 @@ def main(): - metrics = [] - for line in fileinput.input(): - line = line.strip() - if line.startswith(LINE_PREFIX): - line = line.replace(LINE_PREFIX, '') - metrics.append(json.loads(line)) - else: - print line - - for metric in metrics: - figure = plt.figure() - figure.canvas.set_window_title(metric[TRACE_NAME]) - - x_values = [] - y_values = [] - start_x = None - for sample in metric['samples']: - if start_x is None: - start_x = sample['time'] - # Time is us, we want to show it in seconds. - x_values.append((sample['time'] - start_x) / MICROSECONDS_IN_SECOND) - y_values.append(sample['value']) - - plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS])) - plt.xlabel('time (s)') - plt.title(metric[GRAPH_NAME]) - plt.plot(x_values, y_values) - - plt.show() + parser = argparse.ArgumentParser( + description='Plots metrics exported from WebRTC perf tests') + parser.add_argument( + '-m', + '--metrics', + type=str, + nargs='*', + help= + 'Metrics to plot. If nothing specified then will plot all available') + args = parser.parse_args() + + metrics_to_plot = set() + if args.metrics: + for metric in args.metrics: + metrics_to_plot.add(metric) + + metrics = [] + for line in fileinput.input('-'): + line = line.strip() + if line.startswith(LINE_PREFIX): + line = line.replace(LINE_PREFIX, '') + metrics.append(json.loads(line)) + else: + print line + + for metric in metrics: + if len(metrics_to_plot + ) > 0 and metric[GRAPH_NAME] not in metrics_to_plot: + continue + + figure = plt.figure() + figure.canvas.set_window_title(metric[TRACE_NAME]) + + x_values = [] + y_values = [] + start_x = None + samples = metric['samples'] + samples.sort(key=lambda x: x['time']) + for sample in samples: + if start_x is None: + start_x = sample['time'] + # Time is us, we want to show it in seconds. + x_values.append( + (sample['time'] - start_x) / MICROSECONDS_IN_SECOND) + y_values.append(sample['value']) + + plt.ylabel('%s (%s)' % (metric[GRAPH_NAME], metric[UNITS])) + plt.xlabel('time (s)') + plt.title(metric[GRAPH_NAME]) + plt.plot(x_values, y_values) + + plt.show() if __name__ == '__main__': - main() + main() diff --git a/rtc_tools/network_tester/BUILD.gn b/rtc_tools/network_tester/BUILD.gn index e02f5d4242..f7982d3eef 100644 --- a/rtc_tools/network_tester/BUILD.gn +++ b/rtc_tools/network_tester/BUILD.gn @@ -39,19 +39,24 @@ if (rtc_enable_protobuf) { deps = [ ":network_tester_config_proto", ":network_tester_packet_proto", + "../../api:sequence_checker", "../../api/task_queue", "../../api/task_queue:default_task_queue_factory", "../../p2p", "../../rtc_base", "../../rtc_base:checks", "../../rtc_base:ignore_wundef", + "../../rtc_base:ip_address", "../../rtc_base:protobuf_utils", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_task_queue", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base:socket_address", + "../../rtc_base:threading", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", "../../rtc_base/third_party/sigslot", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } network_tester_unittests_resources = [ @@ -101,7 +106,7 @@ if (is_android) { testonly = true apk_name = "NetworkTesterMobile" android_manifest = "androidapp/AndroidManifest.xml" - min_sdk_version = 16 + min_sdk_version = 21 target_sdk_version = 24 deps = [ @@ -122,6 +127,7 @@ if (is_android) { "androidapp/src/com/google/media/networktester/NetworkTester.java", ] + resources_package = "com.google.media.networktester" deps = [ ":NetworkTesterMobile_resources", "../../rtc_base:base_java", @@ -138,11 +144,11 @@ if (is_android) { "androidapp/res/mipmap-xhdpi/ic_launcher.png", "androidapp/res/mipmap-xxhdpi/ic_launcher.png", "androidapp/res/mipmap-xxxhdpi/ic_launcher.png", + "androidapp/res/values-v17/styles.xml", + "androidapp/res/values-w820dp/dimens.xml", "androidapp/res/values/colors.xml", "androidapp/res/values/dimens.xml", "androidapp/res/values/strings.xml", - "androidapp/res/values-v17/styles.xml", - "androidapp/res/values-w820dp/dimens.xml", ] # Needed for Bazel converter. diff --git a/rtc_tools/network_tester/androidapp/AndroidManifest.xml b/rtc_tools/network_tester/androidapp/AndroidManifest.xml index 3839955458..1ff519396b 100755 --- a/rtc_tools/network_tester/androidapp/AndroidManifest.xml +++ b/rtc_tools/network_tester/androidapp/AndroidManifest.xml @@ -4,7 +4,7 @@ - #include +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/ignore_wundef.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" #ifdef WEBRTC_NETWORK_TESTER_PROTO @@ -49,7 +50,7 @@ class PacketSender { void UpdateTestSetting(size_t packet_size, int64_t send_interval_ms); private: - SequenceChecker worker_queue_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_queue_checker_; size_t packet_size_ RTC_GUARDED_BY(worker_queue_checker_); int64_t send_interval_ms_ RTC_GUARDED_BY(worker_queue_checker_); int64_t sequence_number_ RTC_GUARDED_BY(worker_queue_checker_); diff --git a/rtc_tools/network_tester/parse_packet_log.py b/rtc_tools/network_tester/parse_packet_log.py index 98fd0f6964..be86e0c88d 100755 --- a/rtc_tools/network_tester/parse_packet_log.py +++ b/rtc_tools/network_tester/parse_packet_log.py @@ -20,128 +20,131 @@ import network_tester_packet_pb2 + def GetSize(file_to_parse): - data = file_to_parse.read(1) - if data == '': - return 0 - return struct.unpack(' - self.window_time): - self.bytes = self.bytes - packet.packet_size - self.packet_window.remove(packet) - - def AddPacket(self, packet): - """This functions returns bits / second""" - self.send_interval = packet.arrival_timestamp - self.latest_packet_time - self.latest_packet_time = packet.arrival_timestamp - self.RemoveOldPackets() - self.packet_window.append(packet) - self.bytes = self.bytes + packet.packet_size - return self.bytes * 8 + def __init__(self): + self.packet_window = [] + self.window_time = 1000000 + self.bytes = 0 + self.latest_packet_time = 0 + self.send_interval = 0 + + def RemoveOldPackets(self): + for packet in self.packet_window: + if (self.latest_packet_time - packet.arrival_timestamp > + self.window_time): + self.bytes = self.bytes - packet.packet_size + self.packet_window.remove(packet) + + def AddPacket(self, packet): + """This functions returns bits / second""" + self.send_interval = packet.arrival_timestamp - self.latest_packet_time + self.latest_packet_time = packet.arrival_timestamp + self.RemoveOldPackets() + self.packet_window.append(packet) + self.bytes = self.bytes + packet.packet_size + return self.bytes * 8 def CreateReceiveBiratePlot(packets, plot): - bitrate = MovingAverageBitrate() - y = [bitrate.AddPacket(packet) for packet in packets] - plot.grid(True) - plot.set_title("Receive birate [bps]") - plot.plot(GetTimeAxis(packets), y) + bitrate = MovingAverageBitrate() + y = [bitrate.AddPacket(packet) for packet in packets] + plot.grid(True) + plot.set_title("Receive birate [bps]") + plot.plot(GetTimeAxis(packets), y) def CreatePacketlossPlot(packets, plot): - packets_look_up = {} - first_sequence_number = packets[0].sequence_number - last_sequence_number = packets[-1].sequence_number - for packet in packets: - packets_look_up[packet.sequence_number] = packet - y = [] - x = [] - first_arrival_time = 0 - last_arrival_time = 0 - last_arrival_time_diff = 0 - for sequence_number in range(first_sequence_number, last_sequence_number + 1): - if sequence_number in packets_look_up: - y.append(0) - if first_arrival_time == 0: - first_arrival_time = packets_look_up[sequence_number].arrival_timestamp - x_time = (packets_look_up[sequence_number].arrival_timestamp - - first_arrival_time) - if last_arrival_time != 0: - last_arrival_time_diff = x_time - last_arrival_time - last_arrival_time = x_time - x.append(x_time / 1000000.0) - else: - if last_arrival_time != 0 and last_arrival_time_diff != 0: - x.append((last_arrival_time + last_arrival_time_diff) / 1000000.0) - y.append(1) - plot.grid(True) - plot.set_title("Lost packets [0/1]") - plot.plot(x, y) + packets_look_up = {} + first_sequence_number = packets[0].sequence_number + last_sequence_number = packets[-1].sequence_number + for packet in packets: + packets_look_up[packet.sequence_number] = packet + y = [] + x = [] + first_arrival_time = 0 + last_arrival_time = 0 + last_arrival_time_diff = 0 + for sequence_number in range(first_sequence_number, + last_sequence_number + 1): + if sequence_number in packets_look_up: + y.append(0) + if first_arrival_time == 0: + first_arrival_time = packets_look_up[ + sequence_number].arrival_timestamp + x_time = (packets_look_up[sequence_number].arrival_timestamp - + first_arrival_time) + if last_arrival_time != 0: + last_arrival_time_diff = x_time - last_arrival_time + last_arrival_time = x_time + x.append(x_time / 1000000.0) + else: + if last_arrival_time != 0 and last_arrival_time_diff != 0: + x.append( + (last_arrival_time + last_arrival_time_diff) / 1000000.0) + y.append(1) + plot.grid(True) + plot.set_title("Lost packets [0/1]") + plot.plot(x, y) def main(): - parser = OptionParser() - parser.add_option("-f", - "--packet_log_file", - dest="packet_log_file", - help="packet_log file to parse") + parser = OptionParser() + parser.add_option("-f", + "--packet_log_file", + dest="packet_log_file", + help="packet_log file to parse") - options = parser.parse_args()[0] + options = parser.parse_args()[0] - packets = ParsePacketLog(options.packet_log_file) - f, plots = plt.subplots(3, sharex=True) - plt.xlabel('time [sec]') - CreateSendTimeDiffPlot(packets, plots[0]) - CreateReceiveBiratePlot(packets, plots[1]) - CreatePacketlossPlot(packets, plots[2]) - f.subplots_adjust(hspace=0.3) - plt.show() + packets = ParsePacketLog(options.packet_log_file) + f, plots = plt.subplots(3, sharex=True) + plt.xlabel('time [sec]') + CreateSendTimeDiffPlot(packets, plots[0]) + CreateReceiveBiratePlot(packets, plots[1]) + CreatePacketlossPlot(packets, plots[2]) + f.subplots_adjust(hspace=0.3) + plt.show() if __name__ == "__main__": - main() + main() diff --git a/rtc_tools/network_tester/test_controller.cc b/rtc_tools/network_tester/test_controller.cc index 49b470ce5f..85a5a57bc0 100644 --- a/rtc_tools/network_tester/test_controller.cc +++ b/rtc_tools/network_tester/test_controller.cc @@ -43,7 +43,7 @@ void TestController::SendConnectTo(const std::string& hostname, int port) { NetworkTesterPacket packet; packet.set_type(NetworkTesterPacket::HAND_SHAKING); SendData(packet, absl::nullopt); - rtc::CritScope scoped_lock(&local_test_done_lock_); + MutexLock scoped_lock(&local_test_done_lock_); local_test_done_ = false; remote_test_done_ = false; } @@ -71,13 +71,13 @@ void TestController::OnTestDone() { NetworkTesterPacket packet; packet.set_type(NetworkTesterPacket::TEST_DONE); SendData(packet, absl::nullopt); - rtc::CritScope scoped_lock(&local_test_done_lock_); + MutexLock scoped_lock(&local_test_done_lock_); local_test_done_ = true; } bool TestController::IsTestDone() { RTC_DCHECK_RUN_ON(&test_controller_thread_checker_); - rtc::CritScope scoped_lock(&local_test_done_lock_); + MutexLock scoped_lock(&local_test_done_lock_); return local_test_done_ && remote_test_done_; } @@ -100,7 +100,7 @@ void TestController::OnReadPacket(rtc::AsyncPacketSocket* socket, SendData(packet, absl::nullopt); packet_sender_.reset(new PacketSender(this, config_file_path_)); packet_sender_->StartSending(); - rtc::CritScope scoped_lock(&local_test_done_lock_); + MutexLock scoped_lock(&local_test_done_lock_); local_test_done_ = false; remote_test_done_ = false; break; @@ -108,7 +108,7 @@ void TestController::OnReadPacket(rtc::AsyncPacketSocket* socket, case NetworkTesterPacket::TEST_START: { packet_sender_.reset(new PacketSender(this, config_file_path_)); packet_sender_->StartSending(); - rtc::CritScope scoped_lock(&local_test_done_lock_); + MutexLock scoped_lock(&local_test_done_lock_); local_test_done_ = false; remote_test_done_ = false; break; diff --git a/rtc_tools/network_tester/test_controller.h b/rtc_tools/network_tester/test_controller.h index d04158d934..50055fcf4c 100644 --- a/rtc_tools/network_tester/test_controller.h +++ b/rtc_tools/network_tester/test_controller.h @@ -19,23 +19,21 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "p2p/base/basic_packet_socket_factory.h" #include "rtc_base/async_packet_socket.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/ignore_wundef.h" #include "rtc_base/socket_address.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/third_party/sigslot/sigslot.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "rtc_tools/network_tester/packet_logger.h" #include "rtc_tools/network_tester/packet_sender.h" #ifdef WEBRTC_NETWORK_TESTER_PROTO RTC_PUSH_IGNORING_WUNDEF() #include "rtc_tools/network_tester/network_tester_packet.pb.h" - RTC_POP_IGNORING_WUNDEF() using webrtc::network_tester::packet::NetworkTesterPacket; #else @@ -70,12 +68,12 @@ class TestController : public sigslot::has_slots<> { size_t len, const rtc::SocketAddress& remote_addr, const int64_t& packet_time_us); - rtc::ThreadChecker test_controller_thread_checker_; + SequenceChecker test_controller_thread_checker_; SequenceChecker packet_sender_checker_; rtc::BasicPacketSocketFactory socket_factory_; const std::string config_file_path_; PacketLogger packet_logger_; - rtc::CriticalSection local_test_done_lock_; + Mutex local_test_done_lock_; bool local_test_done_ RTC_GUARDED_BY(local_test_done_lock_); bool remote_test_done_; std::array send_data_; diff --git a/rtc_tools/py_event_log_analyzer/misc.py b/rtc_tools/py_event_log_analyzer/misc.py index 629497c018..c21f0c466b 100644 --- a/rtc_tools/py_event_log_analyzer/misc.py +++ b/rtc_tools/py_event_log_analyzer/misc.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Utility functions for calculating statistics. """ @@ -15,18 +14,17 @@ def CountReordered(sequence_numbers): - """Returns number of reordered indices. + """Returns number of reordered indices. A reordered index is an index `i` for which sequence_numbers[i] >= sequence_numbers[i + 1] """ - return sum(1 for (s1, s2) in zip(sequence_numbers, - sequence_numbers[1:]) if - s1 >= s2) + return sum(1 for (s1, s2) in zip(sequence_numbers, sequence_numbers[1:]) + if s1 >= s2) def SsrcNormalizedSizeTable(data_points): - """Counts proportion of data for every SSRC. + """Counts proportion of data for every SSRC. Args: data_points: list of pb_parse.DataPoint @@ -37,14 +35,14 @@ def SsrcNormalizedSizeTable(data_points): SSRC `s` to the total size of all packets. """ - mapping = collections.defaultdict(int) - for point in data_points: - mapping[point.ssrc] += point.size - return NormalizeCounter(mapping) + mapping = collections.defaultdict(int) + for point in data_points: + mapping[point.ssrc] += point.size + return NormalizeCounter(mapping) def NormalizeCounter(counter): - """Returns a normalized version of the dictionary `counter`. + """Returns a normalized version of the dictionary `counter`. Does not modify `counter`. @@ -52,12 +50,12 @@ def NormalizeCounter(counter): A new dictionary, in which every value in `counter` has been divided by the total to sum up to 1. """ - total = sum(counter.values()) - return {key: counter[key] / total for key in counter} + total = sum(counter.values()) + return {key: counter[key] / total for key in counter} def Unwrap(data, mod): - """Returns `data` unwrapped modulo `mod`. Does not modify data. + """Returns `data` unwrapped modulo `mod`. Does not modify data. Adds integer multiples of mod to all elements of data except the first, such that all pairs of consecutive elements (a, b) satisfy @@ -66,22 +64,22 @@ def Unwrap(data, mod): E.g. Unwrap([0, 1, 2, 0, 1, 2, 7, 8], 3) -> [0, 1, 2, 3, 4, 5, 4, 5] """ - lst = data[:] - for i in range(1, len(data)): - lst[i] = lst[i - 1] + (lst[i] - lst[i - 1] + - mod // 2) % mod - (mod // 2) - return lst + lst = data[:] + for i in range(1, len(data)): + lst[i] = lst[i - 1] + (lst[i] - lst[i - 1] + mod // 2) % mod - (mod // + 2) + return lst def SsrcDirections(data_points): - ssrc_is_incoming = {} - for point in data_points: - ssrc_is_incoming[point.ssrc] = point.incoming - return ssrc_is_incoming + ssrc_is_incoming = {} + for point in data_points: + ssrc_is_incoming[point.ssrc] = point.incoming + return ssrc_is_incoming # Python 2/3-compatible input function if sys.version_info[0] <= 2: - get_input = raw_input # pylint: disable=invalid-name + get_input = raw_input # pylint: disable=invalid-name else: - get_input = input # pylint: disable=invalid-name + get_input = input # pylint: disable=invalid-name diff --git a/rtc_tools/py_event_log_analyzer/misc_test.py b/rtc_tools/py_event_log_analyzer/misc_test.py index 33449a7076..e855dc7d11 100755 --- a/rtc_tools/py_event_log_analyzer/misc_test.py +++ b/rtc_tools/py_event_log_analyzer/misc_test.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Run the tests with python misc_test.py @@ -22,51 +21,52 @@ class TestMisc(unittest.TestCase): - def testUnwrapMod3(self): - data = [0, 1, 2, 0, -1, -2, -3, -4] - unwrapped_3 = misc.Unwrap(data, 3) - self.assertEqual([0, 1, 2, 3, 2, 1, 0, -1], unwrapped_3) + def testUnwrapMod3(self): + data = [0, 1, 2, 0, -1, -2, -3, -4] + unwrapped_3 = misc.Unwrap(data, 3) + self.assertEqual([0, 1, 2, 3, 2, 1, 0, -1], unwrapped_3) + + def testUnwrapMod4(self): + data = [0, 1, 2, 0, -1, -2, -3, -4] + unwrapped_4 = misc.Unwrap(data, 4) + self.assertEqual([0, 1, 2, 0, -1, -2, -3, -4], unwrapped_4) - def testUnwrapMod4(self): - data = [0, 1, 2, 0, -1, -2, -3, -4] - unwrapped_4 = misc.Unwrap(data, 4) - self.assertEqual([0, 1, 2, 0, -1, -2, -3, -4], unwrapped_4) + def testDataShouldNotChangeAfterUnwrap(self): + data = [0, 1, 2, 0, -1, -2, -3, -4] + _ = misc.Unwrap(data, 4) - def testDataShouldNotChangeAfterUnwrap(self): - data = [0, 1, 2, 0, -1, -2, -3, -4] - _ = misc.Unwrap(data, 4) + self.assertEqual([0, 1, 2, 0, -1, -2, -3, -4], data) - self.assertEqual([0, 1, 2, 0, -1, -2, -3, -4], data) + def testRandomlyMultiplesOfModAdded(self): + # `unwrap` definition says only multiples of mod are added. + random_data = [random.randint(0, 9) for _ in range(100)] - def testRandomlyMultiplesOfModAdded(self): - # `unwrap` definition says only multiples of mod are added. - random_data = [random.randint(0, 9) for _ in range(100)] + for mod in range(1, 100): + random_data_unwrapped_mod = misc.Unwrap(random_data, mod) - for mod in range(1, 100): - random_data_unwrapped_mod = misc.Unwrap(random_data, mod) + for (old_a, a) in zip(random_data, random_data_unwrapped_mod): + self.assertEqual((old_a - a) % mod, 0) - for (old_a, a) in zip(random_data, random_data_unwrapped_mod): - self.assertEqual((old_a - a) % mod, 0) + def testRandomlyAgainstInequalityDefinition(self): + # Data has to satisfy -mod/2 <= difference < mod/2 for every + # difference between consecutive values after unwrap. + random_data = [random.randint(0, 9) for _ in range(100)] - def testRandomlyAgainstInequalityDefinition(self): - # Data has to satisfy -mod/2 <= difference < mod/2 for every - # difference between consecutive values after unwrap. - random_data = [random.randint(0, 9) for _ in range(100)] + for mod in range(1, 100): + random_data_unwrapped_mod = misc.Unwrap(random_data, mod) - for mod in range(1, 100): - random_data_unwrapped_mod = misc.Unwrap(random_data, mod) + for (a, b) in zip(random_data_unwrapped_mod, + random_data_unwrapped_mod[1:]): + self.assertTrue(-mod / 2 <= b - a < mod / 2) - for (a, b) in zip(random_data_unwrapped_mod, - random_data_unwrapped_mod[1:]): - self.assertTrue(-mod / 2 <= b - a < mod / 2) + def testRandomlyDataShouldNotChangeAfterUnwrap(self): + random_data = [random.randint(0, 9) for _ in range(100)] + random_data_copy = random_data[:] + for mod in range(1, 100): + _ = misc.Unwrap(random_data, mod) - def testRandomlyDataShouldNotChangeAfterUnwrap(self): - random_data = [random.randint(0, 9) for _ in range(100)] - random_data_copy = random_data[:] - for mod in range(1, 100): - _ = misc.Unwrap(random_data, mod) + self.assertEqual(random_data, random_data_copy) - self.assertEqual(random_data, random_data_copy) if __name__ == "__main__": - unittest.main() + unittest.main() diff --git a/rtc_tools/py_event_log_analyzer/pb_parse.py b/rtc_tools/py_event_log_analyzer/pb_parse.py index bc835ae023..23e6ae4487 100644 --- a/rtc_tools/py_event_log_analyzer/pb_parse.py +++ b/rtc_tools/py_event_log_analyzer/pb_parse.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Parses protobuf RTC dumps.""" from __future__ import division @@ -14,26 +13,26 @@ class DataPoint(object): - """Simple container class for RTP events.""" + """Simple container class for RTP events.""" - def __init__(self, rtp_header_str, packet_size, - arrival_timestamp_us, incoming): - """Builds a data point by parsing an RTP header, size and arrival time. + def __init__(self, rtp_header_str, packet_size, arrival_timestamp_us, + incoming): + """Builds a data point by parsing an RTP header, size and arrival time. RTP header structure is defined in RFC 3550 section 5.1. """ - self.size = packet_size - self.arrival_timestamp_ms = arrival_timestamp_us / 1000 - self.incoming = incoming - header = struct.unpack_from("!HHII", rtp_header_str, 0) - (first2header_bytes, self.sequence_number, self.timestamp, - self.ssrc) = header - self.payload_type = first2header_bytes & 0b01111111 - self.marker_bit = (first2header_bytes & 0b10000000) >> 7 + self.size = packet_size + self.arrival_timestamp_ms = arrival_timestamp_us / 1000 + self.incoming = incoming + header = struct.unpack_from("!HHII", rtp_header_str, 0) + (first2header_bytes, self.sequence_number, self.timestamp, + self.ssrc) = header + self.payload_type = first2header_bytes & 0b01111111 + self.marker_bit = (first2header_bytes & 0b10000000) >> 7 def ParseProtobuf(file_path): - """Parses RTC event log from protobuf file. + """Parses RTC event log from protobuf file. Args: file_path: path to protobuf file of RTC event stream @@ -41,12 +40,12 @@ def ParseProtobuf(file_path): Returns: all RTP packet events from the event stream as a list of DataPoints """ - event_stream = rtc_pb.EventStream() - with open(file_path, "rb") as f: - event_stream.ParseFromString(f.read()) - - return [DataPoint(event.rtp_packet.header, - event.rtp_packet.packet_length, - event.timestamp_us, event.rtp_packet.incoming) - for event in event_stream.stream - if event.HasField("rtp_packet")] + event_stream = rtc_pb.EventStream() + with open(file_path, "rb") as f: + event_stream.ParseFromString(f.read()) + + return [ + DataPoint(event.rtp_packet.header, event.rtp_packet.packet_length, + event.timestamp_us, event.rtp_packet.incoming) + for event in event_stream.stream if event.HasField("rtp_packet") + ] diff --git a/rtc_tools/py_event_log_analyzer/rtp_analyzer.py b/rtc_tools/py_event_log_analyzer/rtp_analyzer.py index ebf4d7fb2a..53f413552a 100644 --- a/rtc_tools/py_event_log_analyzer/rtp_analyzer.py +++ b/rtc_tools/py_event_log_analyzer/rtp_analyzer.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Displays statistics and plots graphs from RTC protobuf dump.""" from __future__ import division @@ -24,13 +23,13 @@ class RTPStatistics(object): - """Has methods for calculating and plotting RTP stream statistics.""" + """Has methods for calculating and plotting RTP stream statistics.""" - BANDWIDTH_SMOOTHING_WINDOW_SIZE = 10 - PLOT_RESOLUTION_MS = 50 + BANDWIDTH_SMOOTHING_WINDOW_SIZE = 10 + PLOT_RESOLUTION_MS = 50 - def __init__(self, data_points): - """Initializes object with data_points and computes simple statistics. + def __init__(self, data_points): + """Initializes object with data_points and computes simple statistics. Computes percentages of number of packets and packet sizes by SSRC. @@ -41,238 +40,245 @@ def __init__(self, data_points): """ - self.data_points = data_points - self.ssrc_frequencies = misc.NormalizeCounter( - collections.Counter([pt.ssrc for pt in self.data_points])) - self.ssrc_size_table = misc.SsrcNormalizedSizeTable(self.data_points) - self.bandwidth_kbps = None - self.smooth_bw_kbps = None - - def PrintHeaderStatistics(self): - print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format( - "SeqNo", "TimeStamp", "SendTime", "Size", "PT", "M", "SSRC")) - for point in self.data_points: - print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format( - point.sequence_number, point.timestamp, - int(point.arrival_timestamp_ms), point.size, point.payload_type, - point.marker_bit, "0x{:x}".format(point.ssrc))) - - def PrintSsrcInfo(self, ssrc_id, ssrc): - """Prints packet and size statistics for a given SSRC. + self.data_points = data_points + self.ssrc_frequencies = misc.NormalizeCounter( + collections.Counter([pt.ssrc for pt in self.data_points])) + self.ssrc_size_table = misc.SsrcNormalizedSizeTable(self.data_points) + self.bandwidth_kbps = None + self.smooth_bw_kbps = None + + def PrintHeaderStatistics(self): + print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format( + "SeqNo", "TimeStamp", "SendTime", "Size", "PT", "M", "SSRC")) + for point in self.data_points: + print("{:>6}{:>14}{:>14}{:>6}{:>6}{:>3}{:>11}".format( + point.sequence_number, point.timestamp, + int(point.arrival_timestamp_ms), point.size, + point.payload_type, point.marker_bit, + "0x{:x}".format(point.ssrc))) + + def PrintSsrcInfo(self, ssrc_id, ssrc): + """Prints packet and size statistics for a given SSRC. Args: ssrc_id: textual identifier of SSRC printed beside statistics for it. ssrc: SSRC by which to filter data and display statistics """ - filtered_ssrc = [point for point in self.data_points if point.ssrc - == ssrc] - payloads = misc.NormalizeCounter( - collections.Counter([point.payload_type for point in - filtered_ssrc])) - - payload_info = "payload type(s): {}".format( - ", ".join(str(payload) for payload in payloads)) - print("{} 0x{:x} {}, {:.2f}% packets, {:.2f}% data".format( - ssrc_id, ssrc, payload_info, self.ssrc_frequencies[ssrc] * 100, - self.ssrc_size_table[ssrc] * 100)) - print(" packet sizes:") - (bin_counts, bin_bounds) = numpy.histogram([point.size for point in - filtered_ssrc], bins=5, - density=False) - bin_proportions = bin_counts / sum(bin_counts) - print("\n".join([ - " {:.1f} - {:.1f}: {:.2f}%".format(bin_bounds[i], bin_bounds[i + 1], - bin_proportions[i] * 100) - for i in range(len(bin_proportions)) - ])) - - def ChooseSsrc(self): - """Queries user for SSRC.""" - - if len(self.ssrc_frequencies) == 1: - chosen_ssrc = self.ssrc_frequencies.keys()[0] - self.PrintSsrcInfo("", chosen_ssrc) - return chosen_ssrc - - ssrc_is_incoming = misc.SsrcDirections(self.data_points) - incoming = [ssrc for ssrc in ssrc_is_incoming if ssrc_is_incoming[ssrc]] - outgoing = [ssrc for ssrc in ssrc_is_incoming if not ssrc_is_incoming[ssrc]] - - print("\nIncoming:\n") - for (i, ssrc) in enumerate(incoming): - self.PrintSsrcInfo(i, ssrc) - - print("\nOutgoing:\n") - for (i, ssrc) in enumerate(outgoing): - self.PrintSsrcInfo(i + len(incoming), ssrc) - - while True: - chosen_index = int(misc.get_input("choose one> ")) - if 0 <= chosen_index < len(self.ssrc_frequencies): - return (incoming + outgoing)[chosen_index] - else: - print("Invalid index!") - - def FilterSsrc(self, chosen_ssrc): - """Filters and wraps data points. + filtered_ssrc = [ + point for point in self.data_points if point.ssrc == ssrc + ] + payloads = misc.NormalizeCounter( + collections.Counter( + [point.payload_type for point in filtered_ssrc])) + + payload_info = "payload type(s): {}".format(", ".join( + str(payload) for payload in payloads)) + print("{} 0x{:x} {}, {:.2f}% packets, {:.2f}% data".format( + ssrc_id, ssrc, payload_info, self.ssrc_frequencies[ssrc] * 100, + self.ssrc_size_table[ssrc] * 100)) + print(" packet sizes:") + (bin_counts, + bin_bounds) = numpy.histogram([point.size for point in filtered_ssrc], + bins=5, + density=False) + bin_proportions = bin_counts / sum(bin_counts) + print("\n".join([ + " {:.1f} - {:.1f}: {:.2f}%".format(bin_bounds[i], + bin_bounds[i + 1], + bin_proportions[i] * 100) + for i in range(len(bin_proportions)) + ])) + + def ChooseSsrc(self): + """Queries user for SSRC.""" + + if len(self.ssrc_frequencies) == 1: + chosen_ssrc = self.ssrc_frequencies.keys()[0] + self.PrintSsrcInfo("", chosen_ssrc) + return chosen_ssrc + + ssrc_is_incoming = misc.SsrcDirections(self.data_points) + incoming = [ + ssrc for ssrc in ssrc_is_incoming if ssrc_is_incoming[ssrc] + ] + outgoing = [ + ssrc for ssrc in ssrc_is_incoming if not ssrc_is_incoming[ssrc] + ] + + print("\nIncoming:\n") + for (i, ssrc) in enumerate(incoming): + self.PrintSsrcInfo(i, ssrc) + + print("\nOutgoing:\n") + for (i, ssrc) in enumerate(outgoing): + self.PrintSsrcInfo(i + len(incoming), ssrc) + + while True: + chosen_index = int(misc.get_input("choose one> ")) + if 0 <= chosen_index < len(self.ssrc_frequencies): + return (incoming + outgoing)[chosen_index] + else: + print("Invalid index!") + + def FilterSsrc(self, chosen_ssrc): + """Filters and wraps data points. Removes data points with `ssrc != chosen_ssrc`. Unwraps sequence numbers and timestamps for the chosen selection. """ - self.data_points = [point for point in self.data_points if - point.ssrc == chosen_ssrc] - unwrapped_sequence_numbers = misc.Unwrap( - [point.sequence_number for point in self.data_points], 2**16 - 1) - for (data_point, sequence_number) in zip(self.data_points, - unwrapped_sequence_numbers): - data_point.sequence_number = sequence_number - - unwrapped_timestamps = misc.Unwrap([point.timestamp for point in - self.data_points], 2**32 - 1) - - for (data_point, timestamp) in zip(self.data_points, - unwrapped_timestamps): - data_point.timestamp = timestamp - - def PrintSequenceNumberStatistics(self): - seq_no_set = set(point.sequence_number for point in - self.data_points) - missing_sequence_numbers = max(seq_no_set) - min(seq_no_set) + ( - 1 - len(seq_no_set)) - print("Missing sequence numbers: {} out of {} ({:.2f}%)".format( - missing_sequence_numbers, - len(seq_no_set), - 100 * missing_sequence_numbers / len(seq_no_set) - )) - print("Duplicated packets: {}".format(len(self.data_points) - - len(seq_no_set))) - print("Reordered packets: {}".format( - misc.CountReordered([point.sequence_number for point in - self.data_points]))) - - def EstimateFrequency(self, always_query_sample_rate): - """Estimates frequency and updates data. + self.data_points = [ + point for point in self.data_points if point.ssrc == chosen_ssrc + ] + unwrapped_sequence_numbers = misc.Unwrap( + [point.sequence_number for point in self.data_points], 2**16 - 1) + for (data_point, sequence_number) in zip(self.data_points, + unwrapped_sequence_numbers): + data_point.sequence_number = sequence_number + + unwrapped_timestamps = misc.Unwrap( + [point.timestamp for point in self.data_points], 2**32 - 1) + + for (data_point, timestamp) in zip(self.data_points, + unwrapped_timestamps): + data_point.timestamp = timestamp + + def PrintSequenceNumberStatistics(self): + seq_no_set = set(point.sequence_number for point in self.data_points) + missing_sequence_numbers = max(seq_no_set) - min(seq_no_set) + ( + 1 - len(seq_no_set)) + print("Missing sequence numbers: {} out of {} ({:.2f}%)".format( + missing_sequence_numbers, len(seq_no_set), + 100 * missing_sequence_numbers / len(seq_no_set))) + print("Duplicated packets: {}".format( + len(self.data_points) - len(seq_no_set))) + print("Reordered packets: {}".format( + misc.CountReordered( + [point.sequence_number for point in self.data_points]))) + + def EstimateFrequency(self, always_query_sample_rate): + """Estimates frequency and updates data. Guesses the most probable frequency by looking at changes in timestamps (RFC 3550 section 5.1), calculates clock drifts and sending time of packets. Updates `self.data_points` with changes in delay and send time. """ - delta_timestamp = (self.data_points[-1].timestamp - - self.data_points[0].timestamp) - delta_arr_timestamp = float((self.data_points[-1].arrival_timestamp_ms - - self.data_points[0].arrival_timestamp_ms)) - freq_est = delta_timestamp / delta_arr_timestamp - - freq_vec = [8, 16, 32, 48, 90] - freq = None - for f in freq_vec: - if abs((freq_est - f) / f) < 0.05: - freq = f - - print("Estimated frequency: {:.3f}kHz".format(freq_est)) - if freq is None or always_query_sample_rate: - if not always_query_sample_rate: - print ("Frequency could not be guessed.", end=" ") - freq = int(misc.get_input("Input frequency (in kHz)> ")) - else: - print("Guessed frequency: {}kHz".format(freq)) - - for point in self.data_points: - point.real_send_time_ms = (point.timestamp - - self.data_points[0].timestamp) / freq - point.delay = point.arrival_timestamp_ms - point.real_send_time_ms - - def PrintDurationStatistics(self): - """Prints delay, clock drift and bitrate statistics.""" - - min_delay = min(point.delay for point in self.data_points) - - for point in self.data_points: - point.absdelay = point.delay - min_delay - - stream_duration_sender = self.data_points[-1].real_send_time_ms / 1000 - print("Stream duration at sender: {:.1f} seconds".format( - stream_duration_sender - )) - - arrival_timestamps_ms = [point.arrival_timestamp_ms for point in - self.data_points] - stream_duration_receiver = (max(arrival_timestamps_ms) - - min(arrival_timestamps_ms)) / 1000 - print("Stream duration at receiver: {:.1f} seconds".format( - stream_duration_receiver - )) - - print("Clock drift: {:.2f}%".format( - 100 * (stream_duration_receiver / stream_duration_sender - 1) - )) - - total_size = sum(point.size for point in self.data_points) * 8 / 1000 - print("Send average bitrate: {:.2f} kbps".format( - total_size / stream_duration_sender)) - - print("Receive average bitrate: {:.2f} kbps".format( - total_size / stream_duration_receiver)) - - def RemoveReordered(self): - last = self.data_points[0] - data_points_ordered = [last] - for point in self.data_points[1:]: - if point.sequence_number > last.sequence_number and ( - point.real_send_time_ms > last.real_send_time_ms): - data_points_ordered.append(point) - last = point - self.data_points = data_points_ordered - - def ComputeBandwidth(self): - """Computes bandwidth averaged over several consecutive packets. + delta_timestamp = (self.data_points[-1].timestamp - + self.data_points[0].timestamp) + delta_arr_timestamp = float( + (self.data_points[-1].arrival_timestamp_ms - + self.data_points[0].arrival_timestamp_ms)) + freq_est = delta_timestamp / delta_arr_timestamp + + freq_vec = [8, 16, 32, 48, 90] + freq = None + for f in freq_vec: + if abs((freq_est - f) / f) < 0.05: + freq = f + + print("Estimated frequency: {:.3f}kHz".format(freq_est)) + if freq is None or always_query_sample_rate: + if not always_query_sample_rate: + print("Frequency could not be guessed.", end=" ") + freq = int(misc.get_input("Input frequency (in kHz)> ")) + else: + print("Guessed frequency: {}kHz".format(freq)) + + for point in self.data_points: + point.real_send_time_ms = (point.timestamp - + self.data_points[0].timestamp) / freq + point.delay = point.arrival_timestamp_ms - point.real_send_time_ms + + def PrintDurationStatistics(self): + """Prints delay, clock drift and bitrate statistics.""" + + min_delay = min(point.delay for point in self.data_points) + + for point in self.data_points: + point.absdelay = point.delay - min_delay + + stream_duration_sender = self.data_points[-1].real_send_time_ms / 1000 + print("Stream duration at sender: {:.1f} seconds".format( + stream_duration_sender)) + + arrival_timestamps_ms = [ + point.arrival_timestamp_ms for point in self.data_points + ] + stream_duration_receiver = (max(arrival_timestamps_ms) - + min(arrival_timestamps_ms)) / 1000 + print("Stream duration at receiver: {:.1f} seconds".format( + stream_duration_receiver)) + + print("Clock drift: {:.2f}%".format( + 100 * (stream_duration_receiver / stream_duration_sender - 1))) + + total_size = sum(point.size for point in self.data_points) * 8 / 1000 + print("Send average bitrate: {:.2f} kbps".format( + total_size / stream_duration_sender)) + + print("Receive average bitrate: {:.2f} kbps".format( + total_size / stream_duration_receiver)) + + def RemoveReordered(self): + last = self.data_points[0] + data_points_ordered = [last] + for point in self.data_points[1:]: + if point.sequence_number > last.sequence_number and ( + point.real_send_time_ms > last.real_send_time_ms): + data_points_ordered.append(point) + last = point + self.data_points = data_points_ordered + + def ComputeBandwidth(self): + """Computes bandwidth averaged over several consecutive packets. The number of consecutive packets used in the average is BANDWIDTH_SMOOTHING_WINDOW_SIZE. Averaging is done with numpy.correlate. """ - start_ms = self.data_points[0].real_send_time_ms - stop_ms = self.data_points[-1].real_send_time_ms - (self.bandwidth_kbps, _) = numpy.histogram( - [point.real_send_time_ms for point in self.data_points], - bins=numpy.arange(start_ms, stop_ms, - RTPStatistics.PLOT_RESOLUTION_MS), - weights=[point.size * 8 / RTPStatistics.PLOT_RESOLUTION_MS - for point in self.data_points] - ) - correlate_filter = (numpy.ones( - RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) / - RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) - self.smooth_bw_kbps = numpy.correlate(self.bandwidth_kbps, correlate_filter) - - def PlotStatistics(self): - """Plots changes in delay and average bandwidth.""" - - start_ms = self.data_points[0].real_send_time_ms - stop_ms = self.data_points[-1].real_send_time_ms - time_axis = numpy.arange(start_ms / 1000, stop_ms / 1000, - RTPStatistics.PLOT_RESOLUTION_MS / 1000) - - delay = CalculateDelay(start_ms, stop_ms, - RTPStatistics.PLOT_RESOLUTION_MS, - self.data_points) - - plt.figure(1) - plt.plot(time_axis, delay[:len(time_axis)]) - plt.xlabel("Send time [s]") - plt.ylabel("Relative transport delay [ms]") - - plt.figure(2) - plt.plot(time_axis[:len(self.smooth_bw_kbps)], self.smooth_bw_kbps) - plt.xlabel("Send time [s]") - plt.ylabel("Bandwidth [kbps]") - - plt.show() + start_ms = self.data_points[0].real_send_time_ms + stop_ms = self.data_points[-1].real_send_time_ms + (self.bandwidth_kbps, _) = numpy.histogram( + [point.real_send_time_ms for point in self.data_points], + bins=numpy.arange(start_ms, stop_ms, + RTPStatistics.PLOT_RESOLUTION_MS), + weights=[ + point.size * 8 / RTPStatistics.PLOT_RESOLUTION_MS + for point in self.data_points + ]) + correlate_filter = ( + numpy.ones(RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) / + RTPStatistics.BANDWIDTH_SMOOTHING_WINDOW_SIZE) + self.smooth_bw_kbps = numpy.correlate(self.bandwidth_kbps, + correlate_filter) + + def PlotStatistics(self): + """Plots changes in delay and average bandwidth.""" + + start_ms = self.data_points[0].real_send_time_ms + stop_ms = self.data_points[-1].real_send_time_ms + time_axis = numpy.arange(start_ms / 1000, stop_ms / 1000, + RTPStatistics.PLOT_RESOLUTION_MS / 1000) + + delay = CalculateDelay(start_ms, stop_ms, + RTPStatistics.PLOT_RESOLUTION_MS, + self.data_points) + + plt.figure(1) + plt.plot(time_axis, delay[:len(time_axis)]) + plt.xlabel("Send time [s]") + plt.ylabel("Relative transport delay [ms]") + + plt.figure(2) + plt.plot(time_axis[:len(self.smooth_bw_kbps)], self.smooth_bw_kbps) + plt.xlabel("Send time [s]") + plt.ylabel("Bandwidth [kbps]") + + plt.show() def CalculateDelay(start, stop, step, points): - """Quantizes the time coordinates for the delay. + """Quantizes the time coordinates for the delay. Quantizes points by rounding the timestamps downwards to the nearest point in the time sequence start, start+step, start+2*step... Takes @@ -280,61 +286,67 @@ def CalculateDelay(start, stop, step, points): masked array, in which time points with no value are masked. """ - grouped_delays = [[] for _ in numpy.arange(start, stop + step, step)] - rounded_value_index = lambda x: int((x - start) / step) - for point in points: - grouped_delays[rounded_value_index(point.real_send_time_ms) - ].append(point.absdelay) - regularized_delays = [numpy.average(arr) if arr else -1 for arr in - grouped_delays] - return numpy.ma.masked_values(regularized_delays, -1) + grouped_delays = [[] for _ in numpy.arange(start, stop + step, step)] + rounded_value_index = lambda x: int((x - start) / step) + for point in points: + grouped_delays[rounded_value_index(point.real_send_time_ms)].append( + point.absdelay) + regularized_delays = [ + numpy.average(arr) if arr else -1 for arr in grouped_delays + ] + return numpy.ma.masked_values(regularized_delays, -1) def main(): - usage = "Usage: %prog [options] " - parser = optparse.OptionParser(usage=usage) - parser.add_option("--dump_header_to_stdout", - default=False, action="store_true", - help="print header info to stdout; similar to rtp_analyze") - parser.add_option("--query_sample_rate", - default=False, action="store_true", - help="always query user for real sample rate") + usage = "Usage: %prog [options] " + parser = optparse.OptionParser(usage=usage) + parser.add_option( + "--dump_header_to_stdout", + default=False, + action="store_true", + help="print header info to stdout; similar to rtp_analyze") + parser.add_option("--query_sample_rate", + default=False, + action="store_true", + help="always query user for real sample rate") + + parser.add_option("--working_directory", + default=None, + action="store", + help="directory in which to search for relative paths") - parser.add_option("--working_directory", - default=None, action="store", - help="directory in which to search for relative paths") + (options, args) = parser.parse_args() - (options, args) = parser.parse_args() + if len(args) < 1: + parser.print_help() + sys.exit(0) - if len(args) < 1: - parser.print_help() - sys.exit(0) + input_file = args[0] - input_file = args[0] + if options.working_directory and not os.path.isabs(input_file): + input_file = os.path.join(options.working_directory, input_file) - if options.working_directory and not os.path.isabs(input_file): - input_file = os.path.join(options.working_directory, input_file) + data_points = pb_parse.ParseProtobuf(input_file) + rtp_stats = RTPStatistics(data_points) - data_points = pb_parse.ParseProtobuf(input_file) - rtp_stats = RTPStatistics(data_points) + if options.dump_header_to_stdout: + print("Printing header info to stdout.", file=sys.stderr) + rtp_stats.PrintHeaderStatistics() + sys.exit(0) - if options.dump_header_to_stdout: - print("Printing header info to stdout.", file=sys.stderr) - rtp_stats.PrintHeaderStatistics() - sys.exit(0) + chosen_ssrc = rtp_stats.ChooseSsrc() + print("Chosen SSRC: 0X{:X}".format(chosen_ssrc)) - chosen_ssrc = rtp_stats.ChooseSsrc() - print("Chosen SSRC: 0X{:X}".format(chosen_ssrc)) + rtp_stats.FilterSsrc(chosen_ssrc) - rtp_stats.FilterSsrc(chosen_ssrc) + print("Statistics:") + rtp_stats.PrintSequenceNumberStatistics() + rtp_stats.EstimateFrequency(options.query_sample_rate) + rtp_stats.PrintDurationStatistics() + rtp_stats.RemoveReordered() + rtp_stats.ComputeBandwidth() + rtp_stats.PlotStatistics() - print("Statistics:") - rtp_stats.PrintSequenceNumberStatistics() - rtp_stats.EstimateFrequency(options.query_sample_rate) - rtp_stats.PrintDurationStatistics() - rtp_stats.RemoveReordered() - rtp_stats.ComputeBandwidth() - rtp_stats.PlotStatistics() if __name__ == "__main__": - main() + main() diff --git a/rtc_tools/py_event_log_analyzer/rtp_analyzer_test.py b/rtc_tools/py_event_log_analyzer/rtp_analyzer_test.py index dc6cb22509..bc93b6912d 100755 --- a/rtc_tools/py_event_log_analyzer/rtp_analyzer_test.py +++ b/rtc_tools/py_event_log_analyzer/rtp_analyzer_test.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Run the tests with python rtp_analyzer_test.py @@ -19,43 +18,43 @@ MISSING_NUMPY = False # pylint: disable=invalid-name try: - import numpy - import rtp_analyzer + import numpy + import rtp_analyzer except ImportError: - MISSING_NUMPY = True + MISSING_NUMPY = True FakePoint = collections.namedtuple("FakePoint", ["real_send_time_ms", "absdelay"]) class TestDelay(unittest.TestCase): - def AssertMaskEqual(self, masked_array, data, mask): - self.assertEqual(list(masked_array.data), data) + def AssertMaskEqual(self, masked_array, data, mask): + self.assertEqual(list(masked_array.data), data) - if isinstance(masked_array.mask, numpy.bool_): - array_mask = masked_array.mask - else: - array_mask = list(masked_array.mask) - self.assertEqual(array_mask, mask) + if isinstance(masked_array.mask, numpy.bool_): + array_mask = masked_array.mask + else: + array_mask = list(masked_array.mask) + self.assertEqual(array_mask, mask) - def testCalculateDelaySimple(self): - points = [FakePoint(0, 0), FakePoint(1, 0)] - mask = rtp_analyzer.CalculateDelay(0, 1, 1, points) - self.AssertMaskEqual(mask, [0, 0], False) + def testCalculateDelaySimple(self): + points = [FakePoint(0, 0), FakePoint(1, 0)] + mask = rtp_analyzer.CalculateDelay(0, 1, 1, points) + self.AssertMaskEqual(mask, [0, 0], False) - def testCalculateDelayMissing(self): - points = [FakePoint(0, 0), FakePoint(2, 0)] - mask = rtp_analyzer.CalculateDelay(0, 2, 1, points) - self.AssertMaskEqual(mask, [0, -1, 0], [False, True, False]) + def testCalculateDelayMissing(self): + points = [FakePoint(0, 0), FakePoint(2, 0)] + mask = rtp_analyzer.CalculateDelay(0, 2, 1, points) + self.AssertMaskEqual(mask, [0, -1, 0], [False, True, False]) - def testCalculateDelayBorders(self): - points = [FakePoint(0, 0), FakePoint(2, 0)] - mask = rtp_analyzer.CalculateDelay(0, 3, 2, points) - self.AssertMaskEqual(mask, [0, 0, -1], [False, False, True]) + def testCalculateDelayBorders(self): + points = [FakePoint(0, 0), FakePoint(2, 0)] + mask = rtp_analyzer.CalculateDelay(0, 3, 2, points) + self.AssertMaskEqual(mask, [0, 0, -1], [False, False, True]) if __name__ == "__main__": - if MISSING_NUMPY: - print "Missing numpy, skipping test." - else: - unittest.main() + if MISSING_NUMPY: + print "Missing numpy, skipping test." + else: + unittest.main() diff --git a/rtc_tools/rtc_event_log_visualizer/alerts.cc b/rtc_tools/rtc_event_log_visualizer/alerts.cc index f2d9564795..2d1868fa28 100644 --- a/rtc_tools/rtc_event_log_visualizer/alerts.cc +++ b/rtc_tools/rtc_event_log_visualizer/alerts.cc @@ -26,15 +26,6 @@ namespace webrtc { -void TriageHelper::Print(FILE* file) { - fprintf(file, "========== TRIAGE NOTIFICATIONS ==========\n"); - for (const auto& alert : triage_alerts_) { - fprintf(file, "%d %s. First occurrence at %3.3lf\n", alert.second.count, - alert.second.explanation.c_str(), alert.second.first_occurrence); - } - fprintf(file, "========== END TRIAGE NOTIFICATIONS ==========\n"); -} - void TriageHelper::AnalyzeStreamGaps(const ParsedRtcEventLog& parsed_log, PacketDirection direction) { // With 100 packets/s (~800kbps), false positives would require 10 s without @@ -62,10 +53,7 @@ void TriageHelper::AnalyzeStreamGaps(const ParsedRtcEventLog& parsed_log, direction == kIncomingPacket ? TriageAlertType::kIncomingCaptureTimeJump : TriageAlertType::kOutgoingCaptureTimeJump; - const int64_t segment_end_us = - parsed_log.log_segments().empty() - ? std::numeric_limits::max() - : parsed_log.log_segments().front().stop_time_us(); + const int64_t segment_end_us = parsed_log.first_log_segment().stop_time_us(); // Check for gaps in sequence numbers and capture timestamps. for (const auto& stream : parsed_log.rtp_packets_by_ssrc(direction)) { @@ -133,10 +121,7 @@ void TriageHelper::AnalyzeTransmissionGaps(const ParsedRtcEventLog& parsed_log, ? TriageAlertType::kIncomingRtcpGap : TriageAlertType::kOutgoingRtcpGap; - const int64_t segment_end_us = - parsed_log.log_segments().empty() - ? std::numeric_limits::max() - : parsed_log.log_segments().front().stop_time_us(); + const int64_t segment_end_us = parsed_log.first_log_segment().stop_time_us(); // TODO(terelius): The parser could provide a list of all packets, ordered // by time, for each direction. @@ -202,10 +187,7 @@ void TriageHelper::AnalyzeLog(const ParsedRtcEventLog& parsed_log) { AnalyzeTransmissionGaps(parsed_log, kIncomingPacket); AnalyzeTransmissionGaps(parsed_log, kOutgoingPacket); - const int64_t segment_end_us = - parsed_log.log_segments().empty() - ? std::numeric_limits::max() - : parsed_log.log_segments().front().stop_time_us(); + const int64_t segment_end_us = parsed_log.first_log_segment().stop_time_us(); int64_t first_occurrence = parsed_log.last_timestamp(); constexpr double kMaxLossFraction = 0.05; @@ -233,4 +215,21 @@ void TriageHelper::AnalyzeLog(const ParsedRtcEventLog& parsed_log) { } } +void TriageHelper::Print(FILE* file) { + fprintf(file, "========== TRIAGE NOTIFICATIONS ==========\n"); + for (const auto& alert : triage_alerts_) { + fprintf(file, "%d %s. First occurrence at %3.3lf\n", alert.second.count, + alert.second.explanation.c_str(), alert.second.first_occurrence); + } + fprintf(file, "========== END TRIAGE NOTIFICATIONS ==========\n"); +} + +void TriageHelper::ProcessAlerts( + std::function f) { + for (const auto& alert : triage_alerts_) { + f(alert.second.count, alert.second.first_occurrence, + alert.second.explanation); + } +} + } // namespace webrtc diff --git a/rtc_tools/rtc_event_log_visualizer/alerts.h b/rtc_tools/rtc_event_log_visualizer/alerts.h index 7bd9f05270..d3e41666aa 100644 --- a/rtc_tools/rtc_event_log_visualizer/alerts.h +++ b/rtc_tools/rtc_event_log_visualizer/alerts.h @@ -57,6 +57,8 @@ class TriageHelper { PacketDirection direction); void Print(FILE* file); + void ProcessAlerts(std::function f); + private: AnalyzerConfig config_; std::map triage_alerts_; diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc new file mode 100644 index 0000000000..02184a64ea --- /dev/null +++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.cc @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "rtc_tools/rtc_event_log_visualizer/analyze_audio.h" + +#include +#include +#include +#include + +#include "modules/audio_coding/neteq/tools/audio_sink.h" +#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h" +#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h" +#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h" +#include "modules/audio_coding/neteq/tools/neteq_test.h" +#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h" +#include "rtc_base/ref_counted_object.h" + +namespace webrtc { + +void CreateAudioEncoderTargetBitrateGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder target bitrate", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaBitrateBps = [](const LoggedAudioNetworkAdaptationEvent& ana_event) + -> absl::optional { + if (ana_event.config.bitrate_bps) + return absl::optional( + static_cast(*ana_event.config.bitrate_bps)); + return absl::nullopt; + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaBitrateBps, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder target bitrate"); +} + +void CreateAudioEncoderFrameLengthGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder frame length", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaFrameLengthMs = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.frame_length_ms) + return absl::optional( + static_cast(*ana_event.config.frame_length_ms)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaFrameLengthMs, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder frame length"); +} + +void CreateAudioEncoderPacketLossGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder uplink packet loss fraction", + LineStyle::kLine, PointStyle::kHighlight); + auto GetAnaPacketLoss = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.uplink_packet_loss_fraction) + return absl::optional(static_cast( + *ana_event.config.uplink_packet_loss_fraction)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaPacketLoss, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, + kTopMargin); + plot->SetTitle("Reported audio encoder lost packets"); +} + +void CreateAudioEncoderEnableFecGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder FEC", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaFecEnabled = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.enable_fec) + return absl::optional( + static_cast(*ana_event.config.enable_fec)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaFecEnabled, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder FEC"); +} + +void CreateAudioEncoderEnableDtxGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder DTX", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaDtxEnabled = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.enable_dtx) + return absl::optional( + static_cast(*ana_event.config.enable_dtx)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaDtxEnabled, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder DTX"); +} + +void CreateAudioEncoderNumChannelsGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot) { + TimeSeries time_series("Audio encoder number of channels", LineStyle::kLine, + PointStyle::kHighlight); + auto GetAnaNumChannels = + [](const LoggedAudioNetworkAdaptationEvent& ana_event) { + if (ana_event.config.num_channels) + return absl::optional( + static_cast(*ana_event.config.num_channels)); + return absl::optional(); + }; + auto ToCallTime = [config](const LoggedAudioNetworkAdaptationEvent& packet) { + return config.GetCallTimeSec(packet.log_time_us()); + }; + ProcessPoints( + ToCallTime, GetAnaNumChannels, + parsed_log.audio_network_adaptation_events(), &time_series); + plot->AppendTimeSeries(std::move(time_series)); + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", + kBottomMargin, kTopMargin); + plot->SetTitle("Reported audio encoder number of channels"); +} + +class NetEqStreamInput : public test::NetEqInput { + public: + // Does not take any ownership, and all pointers must refer to valid objects + // that outlive the one constructed. + NetEqStreamInput(const std::vector* packet_stream, + const std::vector* output_events, + absl::optional end_time_ms) + : packet_stream_(*packet_stream), + packet_stream_it_(packet_stream_.begin()), + output_events_it_(output_events->begin()), + output_events_end_(output_events->end()), + end_time_ms_(end_time_ms) { + RTC_DCHECK(packet_stream); + RTC_DCHECK(output_events); + } + + absl::optional NextPacketTime() const override { + if (packet_stream_it_ == packet_stream_.end()) { + return absl::nullopt; + } + if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) { + return absl::nullopt; + } + return packet_stream_it_->rtp.log_time_ms(); + } + + absl::optional NextOutputEventTime() const override { + if (output_events_it_ == output_events_end_) { + return absl::nullopt; + } + if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) { + return absl::nullopt; + } + return output_events_it_->log_time_ms(); + } + + std::unique_ptr PopPacket() override { + if (packet_stream_it_ == packet_stream_.end()) { + return std::unique_ptr(); + } + std::unique_ptr packet_data(new PacketData()); + packet_data->header = packet_stream_it_->rtp.header; + packet_data->time_ms = packet_stream_it_->rtp.log_time_ms(); + + // This is a header-only "dummy" packet. Set the payload to all zeros, with + // length according to the virtual length. + packet_data->payload.SetSize(packet_stream_it_->rtp.total_length - + packet_stream_it_->rtp.header_length); + std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0); + + ++packet_stream_it_; + return packet_data; + } + + void AdvanceOutputEvent() override { + if (output_events_it_ != output_events_end_) { + ++output_events_it_; + } + } + + bool ended() const override { return !NextEventTime(); } + + absl::optional NextHeader() const override { + if (packet_stream_it_ == packet_stream_.end()) { + return absl::nullopt; + } + return packet_stream_it_->rtp.header; + } + + private: + const std::vector& packet_stream_; + std::vector::const_iterator packet_stream_it_; + std::vector::const_iterator output_events_it_; + const std::vector::const_iterator output_events_end_; + const absl::optional end_time_ms_; +}; + +namespace { + +// Factory to create a "replacement decoder" that produces the decoded audio +// by reading from a file rather than from the encoded payloads. +class ReplacementAudioDecoderFactory : public AudioDecoderFactory { + public: + ReplacementAudioDecoderFactory(const absl::string_view replacement_file_name, + int file_sample_rate_hz) + : replacement_file_name_(replacement_file_name), + file_sample_rate_hz_(file_sample_rate_hz) {} + + std::vector GetSupportedDecoders() override { + RTC_NOTREACHED(); + return {}; + } + + bool IsSupportedDecoder(const SdpAudioFormat& format) override { + return true; + } + + std::unique_ptr MakeAudioDecoder( + const SdpAudioFormat& format, + absl::optional codec_pair_id) override { + auto replacement_file = std::make_unique( + replacement_file_name_, file_sample_rate_hz_); + replacement_file->set_output_rate_hz(48000); + return std::make_unique( + std::move(replacement_file), 48000, false); + } + + private: + const std::string replacement_file_name_; + const int file_sample_rate_hz_; +}; + +// Creates a NetEq test object and all necessary input and output helpers. Runs +// the test and returns the NetEqDelayAnalyzer object that was used to +// instrument the test. +std::unique_ptr CreateNetEqTestAndRun( + const std::vector* packet_stream, + const std::vector* output_events, + absl::optional end_time_ms, + const std::string& replacement_file_name, + int file_sample_rate_hz) { + std::unique_ptr input( + new NetEqStreamInput(packet_stream, output_events, end_time_ms)); + + constexpr int kReplacementPt = 127; + std::set cn_types; + std::set forbidden_types; + input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt, + cn_types, forbidden_types)); + + std::unique_ptr output(new test::VoidAudioSink()); + + rtc::scoped_refptr decoder_factory = + rtc::make_ref_counted( + replacement_file_name, file_sample_rate_hz); + + test::NetEqTest::DecoderMap codecs = { + {kReplacementPt, SdpAudioFormat("l16", 48000, 1)}}; + + std::unique_ptr delay_cb( + new test::NetEqDelayAnalyzer); + std::unique_ptr neteq_stats_getter( + new test::NetEqStatsGetter(std::move(delay_cb))); + test::DefaultNetEqTestErrorCallback error_cb; + test::NetEqTest::Callbacks callbacks; + callbacks.error_callback = &error_cb; + callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer(); + callbacks.get_audio_callback = neteq_stats_getter.get(); + + NetEq::Config config; + test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr, + /*factory=*/nullptr, std::move(input), std::move(output), + callbacks); + test.Run(); + return neteq_stats_getter; +} +} // namespace + +NetEqStatsGetterMap SimulateNetEq(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const std::string& replacement_file_name, + int file_sample_rate_hz) { + NetEqStatsGetterMap neteq_stats; + + for (const auto& stream : parsed_log.incoming_rtp_packets_by_ssrc()) { + const uint32_t ssrc = stream.ssrc; + if (!IsAudioSsrc(parsed_log, kIncomingPacket, ssrc)) + continue; + const std::vector* audio_packets = + &stream.incoming_packets; + if (audio_packets == nullptr) { + // No incoming audio stream found. + continue; + } + + RTC_DCHECK(neteq_stats.find(ssrc) == neteq_stats.end()); + + std::map>::const_iterator + output_events_it = parsed_log.audio_playout_events().find(ssrc); + if (output_events_it == parsed_log.audio_playout_events().end()) { + // Could not find output events with SSRC matching the input audio stream. + // Using the first available stream of output events. + output_events_it = parsed_log.audio_playout_events().cbegin(); + } + + int64_t end_time_ms = parsed_log.first_log_segment().stop_time_ms(); + + neteq_stats[ssrc] = CreateNetEqTestAndRun( + audio_packets, &output_events_it->second, end_time_ms, + replacement_file_name, file_sample_rate_hz); + } + + return neteq_stats; +} + +// Given a NetEqStatsGetter and the SSRC that the NetEqStatsGetter was created +// for, this method generates a plot for the jitter buffer delay profile. +void CreateAudioJitterBufferGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + uint32_t ssrc, + const test::NetEqStatsGetter* stats_getter, + Plot* plot) { + test::NetEqDelayAnalyzer::Delays arrival_delay_ms; + test::NetEqDelayAnalyzer::Delays corrected_arrival_delay_ms; + test::NetEqDelayAnalyzer::Delays playout_delay_ms; + test::NetEqDelayAnalyzer::Delays target_delay_ms; + + stats_getter->delay_analyzer()->CreateGraphs( + &arrival_delay_ms, &corrected_arrival_delay_ms, &playout_delay_ms, + &target_delay_ms); + + TimeSeries time_series_packet_arrival("packet arrival delay", + LineStyle::kLine); + TimeSeries time_series_relative_packet_arrival( + "Relative packet arrival delay", LineStyle::kLine); + TimeSeries time_series_play_time("Playout delay", LineStyle::kLine); + TimeSeries time_series_target_time("Target delay", LineStyle::kLine, + PointStyle::kHighlight); + + for (const auto& data : arrival_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_packet_arrival.points.emplace_back(TimeSeriesPoint(x, y)); + } + for (const auto& data : corrected_arrival_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_relative_packet_arrival.points.emplace_back( + TimeSeriesPoint(x, y)); + } + for (const auto& data : playout_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_play_time.points.emplace_back(TimeSeriesPoint(x, y)); + } + for (const auto& data : target_delay_ms) { + const float x = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float y = data.second; + time_series_target_time.points.emplace_back(TimeSeriesPoint(x, y)); + } + + plot->AppendTimeSeries(std::move(time_series_packet_arrival)); + plot->AppendTimeSeries(std::move(time_series_relative_packet_arrival)); + plot->AppendTimeSeries(std::move(time_series_play_time)); + plot->AppendTimeSeries(std::move(time_series_target_time)); + + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, "Relative delay (ms)", kBottomMargin, + kTopMargin); + plot->SetTitle("NetEq timing for " + + GetStreamName(parsed_log, kIncomingPacket, ssrc)); +} + +template +void CreateNetEqStatsGraphInternal( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats, + rtc::FunctionView>*( + const test::NetEqStatsGetter*)> data_extractor, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot) { + std::map time_series; + + for (const auto& st : neteq_stats) { + const uint32_t ssrc = st.first; + const std::vector>* data_vector = + data_extractor(st.second.get()); + for (const auto& data : *data_vector) { + const float time = config.GetCallTimeSec(data.first * 1000); // ms to us. + const float value = stats_extractor(data.second); + time_series[ssrc].points.emplace_back(TimeSeriesPoint(time, value)); + } + } + + for (auto& series : time_series) { + series.second.label = + GetStreamName(parsed_log, kIncomingPacket, series.first); + series.second.line_style = LineStyle::kLine; + plot->AppendTimeSeries(std::move(series.second)); + } + + plot->SetXAxis(config.CallBeginTimeSec(), config.CallEndTimeSec(), "Time (s)", + kLeftMargin, kRightMargin); + plot->SetSuggestedYAxis(0, 1, plot_name, kBottomMargin, kTopMargin); + plot->SetTitle(plot_name); +} + +void CreateNetEqNetworkStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot) { + CreateNetEqStatsGraphInternal( + parsed_log, config, neteq_stats, + [](const test::NetEqStatsGetter* stats_getter) { + return stats_getter->stats(); + }, + stats_extractor, plot_name, plot); +} + +void CreateNetEqLifetimeStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot) { + CreateNetEqStatsGraphInternal( + parsed_log, config, neteq_stats, + [](const test::NetEqStatsGetter* stats_getter) { + return stats_getter->lifetime_stats(); + }, + stats_extractor, plot_name, plot); +} + +} // namespace webrtc diff --git a/rtc_tools/rtc_event_log_visualizer/analyze_audio.h b/rtc_tools/rtc_event_log_visualizer/analyze_audio.h new file mode 100644 index 0000000000..726e84492d --- /dev/null +++ b/rtc_tools/rtc_event_log_visualizer/analyze_audio.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_ +#define RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_ + +#include +#include +#include +#include + +#include "api/function_view.h" +#include "logging/rtc_event_log/rtc_event_log_parser.h" +#include "modules/audio_coding/neteq/tools/neteq_stats_getter.h" +#include "rtc_tools/rtc_event_log_visualizer/analyzer_common.h" +#include "rtc_tools/rtc_event_log_visualizer/plot_base.h" + +namespace webrtc { + +void CreateAudioEncoderTargetBitrateGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderFrameLengthGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderPacketLossGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderEnableFecGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderEnableDtxGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); +void CreateAudioEncoderNumChannelsGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + Plot* plot); + +using NetEqStatsGetterMap = + std::map>; +NetEqStatsGetterMap SimulateNetEq(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const std::string& replacement_file_name, + int file_sample_rate_hz); + +void CreateAudioJitterBufferGraph(const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + uint32_t ssrc, + const test::NetEqStatsGetter* stats_getter, + Plot* plot); +void CreateNetEqNetworkStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats_getters, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot); +void CreateNetEqLifetimeStatsGraph( + const ParsedRtcEventLog& parsed_log, + const AnalyzerConfig& config, + const NetEqStatsGetterMap& neteq_stats_getters, + rtc::FunctionView stats_extractor, + const std::string& plot_name, + Plot* plot); + +} // namespace webrtc + +#endif // RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZE_AUDIO_H_ diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.cc b/rtc_tools/rtc_event_log_visualizer/analyzer.cc index 9a9a4553f8..0f727f2815 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer.cc +++ b/rtc_tools/rtc_event_log_visualizer/analyzer.cc @@ -19,8 +19,10 @@ #include #include "absl/algorithm/container.h" +#include "absl/functional/bind_front.h" #include "absl/strings/string_view.h" #include "api/function_view.h" +#include "api/network_state_predictor.h" #include "api/transport/field_trial_based_config.h" #include "api/transport/goog_cc_factory.h" #include "call/audio_receive_stream.h" @@ -31,12 +33,6 @@ #include "logging/rtc_event_log/rtc_event_processor.h" #include "logging/rtc_event_log/rtc_stream_config.h" #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor.h" -#include "modules/audio_coding/neteq/tools/audio_sink.h" -#include "modules/audio_coding/neteq/tools/fake_decode_from_file.h" -#include "modules/audio_coding/neteq/tools/neteq_delay_analyzer.h" -#include "modules/audio_coding/neteq/tools/neteq_replacement_input.h" -#include "modules/audio_coding/neteq/tools/neteq_test.h" -#include "modules/audio_coding/neteq/tools/resample_input_audio_file.h" #include "modules/congestion_controller/goog_cc/acknowledged_bitrate_estimator.h" #include "modules/congestion_controller/goog_cc/bitrate_estimator.h" #include "modules/congestion_controller/goog_cc/delay_based_bwe.h" @@ -44,8 +40,6 @@ #include "modules/congestion_controller/rtp/transport_feedback_adapter.h" #include "modules/pacing/paced_sender.h" #include "modules/pacing/packet_router.h" -#include "modules/remote_bitrate_estimator/include/bwe_defines.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "modules/rtp_rtcp/source/rtcp_packet.h" #include "modules/rtp_rtcp/source/rtcp_packet/common_header.h" @@ -54,6 +48,7 @@ #include "modules/rtp_rtcp/source/rtcp_packet/sender_report.h" #include "modules/rtp_rtcp/source/rtcp_packet/transport_feedback.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "rtc_base/checks.h" #include "rtc_base/format_macros.h" @@ -62,17 +57,12 @@ #include "rtc_base/rate_statistics.h" #include "rtc_base/strings/string_builder.h" #include "rtc_tools/rtc_event_log_visualizer/log_simulation.h" - -#ifndef BWE_TEST_LOGGING_COMPILE_TIME_ENABLE -#define BWE_TEST_LOGGING_COMPILE_TIME_ENABLE 0 -#endif // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE +#include "test/explicit_key_value_config.h" namespace webrtc { namespace { -const int kNumMicrosecsPerSec = 1000000; - std::string SsrcToString(uint32_t ssrc) { rtc::StringBuilder ss; ss << "SSRC " << ssrc; @@ -168,11 +158,6 @@ absl::optional EstimateRtpClockFrequency( return absl::nullopt; } -constexpr float kLeftMargin = 0.01f; -constexpr float kRightMargin = 0.02f; -constexpr float kBottomMargin = 0.02f; -constexpr float kTopMargin = 0.05f; - absl::optional NetworkDelayDiff_AbsSendTime( const LoggedRtpPacketIncoming& old_packet, const LoggedRtpPacketIncoming& new_packet) { @@ -222,99 +207,6 @@ absl::optional NetworkDelayDiff_CaptureTime( return delay_change; } -// For each element in data_view, use |f()| to extract a y-coordinate and -// store the result in a TimeSeries. -template -void ProcessPoints(rtc::FunctionView fx, - rtc::FunctionView(const DataType&)> fy, - const IterableType& data_view, - TimeSeries* result) { - for (size_t i = 0; i < data_view.size(); i++) { - const DataType& elem = data_view[i]; - float x = fx(elem); - absl::optional y = fy(elem); - if (y) - result->points.emplace_back(x, *y); - } -} - -// For each pair of adjacent elements in |data|, use |f()| to extract a -// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate -// will be the time of the second element in the pair. -template -void ProcessPairs( - rtc::FunctionView fx, - rtc::FunctionView(const DataType&, - const DataType&)> fy, - const IterableType& data, - TimeSeries* result) { - for (size_t i = 1; i < data.size(); i++) { - float x = fx(data[i]); - absl::optional y = fy(data[i - 1], data[i]); - if (y) - result->points.emplace_back(x, static_cast(*y)); - } -} - -// For each pair of adjacent elements in |data|, use |f()| to extract a -// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate -// will be the time of the second element in the pair. -template -void AccumulatePairs( - rtc::FunctionView fx, - rtc::FunctionView(const DataType&, - const DataType&)> fy, - const IterableType& data, - TimeSeries* result) { - ResultType sum = 0; - for (size_t i = 1; i < data.size(); i++) { - float x = fx(data[i]); - absl::optional y = fy(data[i - 1], data[i]); - if (y) { - sum += *y; - result->points.emplace_back(x, static_cast(sum)); - } - } -} - -// Calculates a moving average of |data| and stores the result in a TimeSeries. -// A data point is generated every |step| microseconds from |begin_time| -// to |end_time|. The value of each data point is the average of the data -// during the preceding |window_duration_us| microseconds. -template -void MovingAverage( - rtc::FunctionView(const DataType&)> fy, - const IterableType& data_view, - AnalyzerConfig config, - TimeSeries* result) { - size_t window_index_begin = 0; - size_t window_index_end = 0; - ResultType sum_in_window = 0; - - for (int64_t t = config.begin_time_; t < config.end_time_ + config.step_; - t += config.step_) { - while (window_index_end < data_view.size() && - data_view[window_index_end].log_time_us() < t) { - absl::optional value = fy(data_view[window_index_end]); - if (value) - sum_in_window += *value; - ++window_index_end; - } - while (window_index_begin < data_view.size() && - data_view[window_index_begin].log_time_us() < - t - config.window_duration_) { - absl::optional value = fy(data_view[window_index_begin]); - if (value) - sum_in_window -= *value; - ++window_index_begin; - } - float window_duration_s = - static_cast(config.window_duration_) / kNumMicrosecsPerSec; - float x = config.GetCallTimeSec(t); - float y = sum_in_window / window_duration_s; - result->points.emplace_back(x, y); - } -} template TimeSeries CreateRtcpTypeTimeSeries(const std::vector& rtcp_list, @@ -465,15 +357,21 @@ EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log, config_.begin_time_ = config_.end_time_ = 0; } - RTC_LOG(LS_INFO) << "Found " << parsed_log_.log_segments().size() - << " (LOG_START, LOG_END) segments in log."; + RTC_LOG(LS_INFO) << "Log is " + << (parsed_log_.last_timestamp() - + parsed_log_.first_timestamp()) / + 1000000 + << " seconds long."; } EventLogAnalyzer::EventLogAnalyzer(const ParsedRtcEventLog& log, const AnalyzerConfig& config) : parsed_log_(log), config_(config) { - RTC_LOG(LS_INFO) << "Found " << parsed_log_.log_segments().size() - << " (LOG_START, LOG_END) segments in log."; + RTC_LOG(LS_INFO) << "Log is " + << (parsed_log_.last_timestamp() - + parsed_log_.first_timestamp()) / + 1000000 + << " seconds long."; } class BitrateObserver : public RemoteBitrateObserver { @@ -548,6 +446,8 @@ void EventLogAnalyzer::CreateRtcpTypeGraph(PacketDirection direction, CreateRtcpTypeTimeSeries(parsed_log_.firs(direction), config_, "FIR", 7)); plot->AppendTimeSeries( CreateRtcpTypeTimeSeries(parsed_log_.plis(direction), config_, "PLI", 8)); + plot->AppendTimeSeries( + CreateRtcpTypeTimeSeries(parsed_log_.byes(direction), config_, "BYE", 9)); plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), "Time (s)", kLeftMargin, kRightMargin); plot->SetSuggestedYAxis(0, 1, "RTCP type", kBottomMargin, kTopMargin); @@ -559,7 +459,8 @@ void EventLogAnalyzer::CreateRtcpTypeGraph(PacketDirection direction, {5, "NACK"}, {6, "REMB"}, {7, "FIR"}, - {8, "PLI"}}); + {8, "PLI"}, + {9, "BYE"}}); } template @@ -852,10 +753,7 @@ void EventLogAnalyzer::CreateIncomingDelayGraph(Plot* plot) { << packets.size() << " packets in the stream."; continue; } - int64_t segment_end_us = - parsed_log_.log_segments().empty() - ? std::numeric_limits::max() - : parsed_log_.log_segments().front().stop_time_us(); + int64_t segment_end_us = parsed_log_.first_log_segment().stop_time_us(); absl::optional estimated_frequency = EstimateRtpClockFrequency(packets, segment_end_us); if (!estimated_frequency) @@ -1315,10 +1213,13 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { TimeSeries time_series("Delay-based estimate", LineStyle::kStep, PointStyle::kHighlight); - TimeSeries acked_time_series("Acked bitrate", LineStyle::kLine, + TimeSeries acked_time_series("Raw acked bitrate", LineStyle::kLine, PointStyle::kHighlight); - TimeSeries acked_estimate_time_series( - "Acked bitrate estimate", LineStyle::kLine, PointStyle::kHighlight); + TimeSeries robust_time_series("Robust throughput estimate", LineStyle::kLine, + PointStyle::kHighlight); + TimeSeries acked_estimate_time_series("Ackednowledged bitrate estimate", + LineStyle::kLine, + PointStyle::kHighlight); auto rtp_iterator = outgoing_rtp.begin(); auto rtcp_iterator = incoming_rtcp.begin(); @@ -1344,20 +1245,18 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { return std::numeric_limits::max(); }; - RateStatistics acked_bitrate(250, 8000); -#if !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE) - FieldTrialBasedConfig field_trial_config_; - // The event_log_visualizer should normally not be compiled with - // BWE_TEST_LOGGING_COMPILE_TIME_ENABLE since the normal plots won't work. - // However, compiling with BWE_TEST_LOGGING, running with --plot=sendside_bwe - // and piping the output to plot_dynamics.py can be used as a hack to get the - // internal state of various BWE components. In this case, it is important - // we don't instantiate the AcknowledgedBitrateEstimator both here and in - // GoogCcNetworkController since that would lead to duplicate outputs. + RateStatistics acked_bitrate(750, 8000); + test::ExplicitKeyValueConfig throughput_config( + "WebRTC-Bwe-RobustThroughputEstimatorSettings/" + "enabled:true,reduce_bias:true,assume_shared_link:false,initial_packets:" + "10,min_packets:25,window_duration:750ms,unacked_weight:0.5/"); + std::unique_ptr + robust_throughput_estimator( + AcknowledgedBitrateEstimatorInterface::Create(&throughput_config)); + FieldTrialBasedConfig field_trial_config; std::unique_ptr acknowledged_bitrate_estimator( - AcknowledgedBitrateEstimatorInterface::Create(&field_trial_config_)); -#endif // !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE) + AcknowledgedBitrateEstimatorInterface::Create(&field_trial_config)); int64_t time_us = std::min({NextRtpTime(), NextRtcpTime(), NextProcessTime()}); int64_t last_update_us = 0; @@ -1367,24 +1266,40 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { RTC_DCHECK_EQ(clock.TimeInMicroseconds(), NextRtpTime()); const RtpPacketType& rtp_packet = *rtp_iterator->second; if (rtp_packet.rtp.header.extension.hasTransportSequenceNumber) { - RTC_DCHECK(rtp_packet.rtp.header.extension.hasTransportSequenceNumber); RtpPacketSendInfo packet_info; - packet_info.ssrc = rtp_packet.rtp.header.ssrc; + packet_info.media_ssrc = rtp_packet.rtp.header.ssrc; packet_info.transport_sequence_number = rtp_packet.rtp.header.extension.transportSequenceNumber; packet_info.rtp_sequence_number = rtp_packet.rtp.header.sequenceNumber; packet_info.length = rtp_packet.rtp.total_length; + if (IsRtxSsrc(parsed_log_, PacketDirection::kOutgoingPacket, + rtp_packet.rtp.header.ssrc)) { + // Don't set the optional media type as we don't know if it is + // a retransmission, FEC or padding. + } else if (IsVideoSsrc(parsed_log_, PacketDirection::kOutgoingPacket, + rtp_packet.rtp.header.ssrc)) { + packet_info.packet_type = RtpPacketMediaType::kVideo; + } else if (IsAudioSsrc(parsed_log_, PacketDirection::kOutgoingPacket, + rtp_packet.rtp.header.ssrc)) { + packet_info.packet_type = RtpPacketMediaType::kAudio; + } transport_feedback.AddPacket( packet_info, 0u, // Per packet overhead bytes. Timestamp::Micros(rtp_packet.rtp.log_time_us())); - rtc::SentPacket sent_packet( - rtp_packet.rtp.header.extension.transportSequenceNumber, - rtp_packet.rtp.log_time_us() / 1000); - auto sent_msg = transport_feedback.ProcessSentPacket(sent_packet); - if (sent_msg) - observer.Update(goog_cc->OnSentPacket(*sent_msg)); } + rtc::SentPacket sent_packet; + sent_packet.send_time_ms = rtp_packet.rtp.log_time_ms(); + sent_packet.info.included_in_allocation = true; + sent_packet.info.packet_size_bytes = rtp_packet.rtp.total_length; + if (rtp_packet.rtp.header.extension.hasTransportSequenceNumber) { + sent_packet.packet_id = + rtp_packet.rtp.header.extension.transportSequenceNumber; + sent_packet.info.included_in_feedback = true; + } + auto sent_msg = transport_feedback.ProcessSentPacket(sent_packet); + if (sent_msg) + observer.Update(goog_cc->OnSentPacket(*sent_msg)); ++rtp_iterator; } if (clock.TimeInMicroseconds() >= NextRtcpTime()) { @@ -1399,13 +1314,13 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { std::vector feedback = feedback_msg->SortedByReceiveTime(); if (!feedback.empty()) { -#if !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE) acknowledged_bitrate_estimator->IncomingPacketFeedbackVector( feedback); -#endif // !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE) - for (const PacketResult& packet : feedback) + robust_throughput_estimator->IncomingPacketFeedbackVector(feedback); + for (const PacketResult& packet : feedback) { acked_bitrate.Update(packet.sent_packet.size.bytes(), packet.receive_time.ms()); + } bitrate_bps = acked_bitrate.Rate(feedback.back().receive_time.ms()); } } @@ -1413,12 +1328,14 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { float x = config_.GetCallTimeSec(clock.TimeInMicroseconds()); float y = bitrate_bps.value_or(0) / 1000; acked_time_series.points.emplace_back(x, y); -#if !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE) + y = robust_throughput_estimator->bitrate() + .value_or(DataRate::Zero()) + .kbps(); + robust_time_series.points.emplace_back(x, y); y = acknowledged_bitrate_estimator->bitrate() .value_or(DataRate::Zero()) .kbps(); acked_estimate_time_series.points.emplace_back(x, y); -#endif // !(BWE_TEST_LOGGING_COMPILE_TIME_ENABLE) ++rtcp_iterator; } if (clock.TimeInMicroseconds() >= NextProcessTime()) { @@ -1439,6 +1356,7 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { } // Add the data set to the plot. plot->AppendTimeSeries(std::move(time_series)); + plot->AppendTimeSeries(std::move(robust_time_series)); plot->AppendTimeSeries(std::move(acked_time_series)); plot->AppendTimeSeriesIfNotEmpty(std::move(acked_estimate_time_series)); @@ -1450,13 +1368,11 @@ void EventLogAnalyzer::CreateSendSideBweSimulationGraph(Plot* plot) { void EventLogAnalyzer::CreateReceiveSideBweSimulationGraph(Plot* plot) { using RtpPacketType = LoggedRtpPacketIncoming; - class RembInterceptingPacketRouter : public PacketRouter { + class RembInterceptor { public: - void OnReceiveBitrateChanged(const std::vector& ssrcs, - uint32_t bitrate_bps) override { + void SendRemb(uint32_t bitrate_bps, std::vector ssrcs) { last_bitrate_bps_ = bitrate_bps; bitrate_updated_ = true; - PacketRouter::OnReceiveBitrateChanged(ssrcs, bitrate_bps); } uint32_t last_bitrate_bps() const { return last_bitrate_bps_; } bool GetAndResetBitrateUpdated() { @@ -1466,8 +1382,10 @@ void EventLogAnalyzer::CreateReceiveSideBweSimulationGraph(Plot* plot) { } private: - uint32_t last_bitrate_bps_; - bool bitrate_updated_; + // We don't know the start bitrate, but assume that it is the default 300 + // kbps. + uint32_t last_bitrate_bps_ = 300000; + bool bitrate_updated_ = false; }; std::multimap incoming_rtp; @@ -1481,10 +1399,10 @@ void EventLogAnalyzer::CreateReceiveSideBweSimulationGraph(Plot* plot) { } SimulatedClock clock(0); - RembInterceptingPacketRouter packet_router; - // TODO(terelius): The PacketRouter is used as the RemoteBitrateObserver. - // Is this intentional? - ReceiveSideCongestionController rscc(&clock, &packet_router); + RembInterceptor remb_interceptor; + ReceiveSideCongestionController rscc( + &clock, [](auto...) {}, + absl::bind_front(&RembInterceptor::SendRemb, &remb_interceptor), nullptr); // TODO(holmer): Log the call config and use that here instead. // static const uint32_t kDefaultStartBitrateBps = 300000; // rscc.SetBweBitrates(0, kDefaultStartBitrateBps, -1); @@ -1509,9 +1427,9 @@ void EventLogAnalyzer::CreateReceiveSideBweSimulationGraph(Plot* plot) { float x = config_.GetCallTimeSec(clock.TimeInMicroseconds()); acked_time_series.points.emplace_back(x, y); } - if (packet_router.GetAndResetBitrateUpdated() || + if (remb_interceptor.GetAndResetBitrateUpdated() || clock.TimeInMicroseconds() - last_update_us >= 1e6) { - uint32_t y = packet_router.last_bitrate_bps() / 1000; + uint32_t y = remb_interceptor.last_bitrate_bps() / 1000; float x = config_.GetCallTimeSec(clock.TimeInMicroseconds()); time_series.points.emplace_back(x, y); last_update_us = clock.TimeInMicroseconds(); @@ -1586,10 +1504,7 @@ void EventLogAnalyzer::CreatePacerDelayGraph(Plot* plot) { "pacer delay with less than 2 packets in the stream"; continue; } - int64_t segment_end_us = - parsed_log_.log_segments().empty() - ? std::numeric_limits::max() - : parsed_log_.log_segments().front().stop_time_us(); + int64_t segment_end_us = parsed_log_.first_log_segment().stop_time_us(); absl::optional estimated_frequency = EstimateRtpClockFrequency(packets, segment_end_us); if (!estimated_frequency) @@ -1723,466 +1638,6 @@ void EventLogAnalyzer::CreateSenderAndReceiverReportPlot( plot->SetTitle(title); } -void EventLogAnalyzer::CreateAudioEncoderTargetBitrateGraph(Plot* plot) { - TimeSeries time_series("Audio encoder target bitrate", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaBitrateBps = [](const LoggedAudioNetworkAdaptationEvent& ana_event) - -> absl::optional { - if (ana_event.config.bitrate_bps) - return absl::optional( - static_cast(*ana_event.config.bitrate_bps)); - return absl::nullopt; - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaBitrateBps, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Bitrate (bps)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder target bitrate"); -} - -void EventLogAnalyzer::CreateAudioEncoderFrameLengthGraph(Plot* plot) { - TimeSeries time_series("Audio encoder frame length", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaFrameLengthMs = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.frame_length_ms) - return absl::optional( - static_cast(*ana_event.config.frame_length_ms)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaFrameLengthMs, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Frame length (ms)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder frame length"); -} - -void EventLogAnalyzer::CreateAudioEncoderPacketLossGraph(Plot* plot) { - TimeSeries time_series("Audio encoder uplink packet loss fraction", - LineStyle::kLine, PointStyle::kHighlight); - auto GetAnaPacketLoss = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.uplink_packet_loss_fraction) - return absl::optional(static_cast( - *ana_event.config.uplink_packet_loss_fraction)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaPacketLoss, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 10, "Percent lost packets", kBottomMargin, - kTopMargin); - plot->SetTitle("Reported audio encoder lost packets"); -} - -void EventLogAnalyzer::CreateAudioEncoderEnableFecGraph(Plot* plot) { - TimeSeries time_series("Audio encoder FEC", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaFecEnabled = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.enable_fec) - return absl::optional( - static_cast(*ana_event.config.enable_fec)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaFecEnabled, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "FEC (false/true)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder FEC"); -} - -void EventLogAnalyzer::CreateAudioEncoderEnableDtxGraph(Plot* plot) { - TimeSeries time_series("Audio encoder DTX", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaDtxEnabled = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.enable_dtx) - return absl::optional( - static_cast(*ana_event.config.enable_dtx)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaDtxEnabled, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "DTX (false/true)", kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder DTX"); -} - -void EventLogAnalyzer::CreateAudioEncoderNumChannelsGraph(Plot* plot) { - TimeSeries time_series("Audio encoder number of channels", LineStyle::kLine, - PointStyle::kHighlight); - auto GetAnaNumChannels = - [](const LoggedAudioNetworkAdaptationEvent& ana_event) { - if (ana_event.config.num_channels) - return absl::optional( - static_cast(*ana_event.config.num_channels)); - return absl::optional(); - }; - auto ToCallTime = [this](const LoggedAudioNetworkAdaptationEvent& packet) { - return this->config_.GetCallTimeSec(packet.log_time_us()); - }; - ProcessPoints( - ToCallTime, GetAnaNumChannels, - parsed_log_.audio_network_adaptation_events(), &time_series); - plot->AppendTimeSeries(std::move(time_series)); - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Number of channels (1 (mono)/2 (stereo))", - kBottomMargin, kTopMargin); - plot->SetTitle("Reported audio encoder number of channels"); -} - -class NetEqStreamInput : public test::NetEqInput { - public: - // Does not take any ownership, and all pointers must refer to valid objects - // that outlive the one constructed. - NetEqStreamInput(const std::vector* packet_stream, - const std::vector* output_events, - absl::optional end_time_ms) - : packet_stream_(*packet_stream), - packet_stream_it_(packet_stream_.begin()), - output_events_it_(output_events->begin()), - output_events_end_(output_events->end()), - end_time_ms_(end_time_ms) { - RTC_DCHECK(packet_stream); - RTC_DCHECK(output_events); - } - - absl::optional NextPacketTime() const override { - if (packet_stream_it_ == packet_stream_.end()) { - return absl::nullopt; - } - if (end_time_ms_ && packet_stream_it_->rtp.log_time_ms() > *end_time_ms_) { - return absl::nullopt; - } - return packet_stream_it_->rtp.log_time_ms(); - } - - absl::optional NextOutputEventTime() const override { - if (output_events_it_ == output_events_end_) { - return absl::nullopt; - } - if (end_time_ms_ && output_events_it_->log_time_ms() > *end_time_ms_) { - return absl::nullopt; - } - return output_events_it_->log_time_ms(); - } - - std::unique_ptr PopPacket() override { - if (packet_stream_it_ == packet_stream_.end()) { - return std::unique_ptr(); - } - std::unique_ptr packet_data(new PacketData()); - packet_data->header = packet_stream_it_->rtp.header; - packet_data->time_ms = packet_stream_it_->rtp.log_time_ms(); - - // This is a header-only "dummy" packet. Set the payload to all zeros, with - // length according to the virtual length. - packet_data->payload.SetSize(packet_stream_it_->rtp.total_length - - packet_stream_it_->rtp.header_length); - std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0); - - ++packet_stream_it_; - return packet_data; - } - - void AdvanceOutputEvent() override { - if (output_events_it_ != output_events_end_) { - ++output_events_it_; - } - } - - bool ended() const override { return !NextEventTime(); } - - absl::optional NextHeader() const override { - if (packet_stream_it_ == packet_stream_.end()) { - return absl::nullopt; - } - return packet_stream_it_->rtp.header; - } - - private: - const std::vector& packet_stream_; - std::vector::const_iterator packet_stream_it_; - std::vector::const_iterator output_events_it_; - const std::vector::const_iterator output_events_end_; - const absl::optional end_time_ms_; -}; - -namespace { - -// Factory to create a "replacement decoder" that produces the decoded audio -// by reading from a file rather than from the encoded payloads. -class ReplacementAudioDecoderFactory : public AudioDecoderFactory { - public: - ReplacementAudioDecoderFactory(const absl::string_view replacement_file_name, - int file_sample_rate_hz) - : replacement_file_name_(replacement_file_name), - file_sample_rate_hz_(file_sample_rate_hz) {} - - std::vector GetSupportedDecoders() override { - RTC_NOTREACHED(); - return {}; - } - - bool IsSupportedDecoder(const SdpAudioFormat& format) override { - return true; - } - - std::unique_ptr MakeAudioDecoder( - const SdpAudioFormat& format, - absl::optional codec_pair_id) override { - auto replacement_file = std::make_unique( - replacement_file_name_, file_sample_rate_hz_); - replacement_file->set_output_rate_hz(48000); - return std::make_unique( - std::move(replacement_file), 48000, false); - } - - private: - const std::string replacement_file_name_; - const int file_sample_rate_hz_; -}; - -// Creates a NetEq test object and all necessary input and output helpers. Runs -// the test and returns the NetEqDelayAnalyzer object that was used to -// instrument the test. -std::unique_ptr CreateNetEqTestAndRun( - const std::vector* packet_stream, - const std::vector* output_events, - absl::optional end_time_ms, - const std::string& replacement_file_name, - int file_sample_rate_hz) { - std::unique_ptr input( - new NetEqStreamInput(packet_stream, output_events, end_time_ms)); - - constexpr int kReplacementPt = 127; - std::set cn_types; - std::set forbidden_types; - input.reset(new test::NetEqReplacementInput(std::move(input), kReplacementPt, - cn_types, forbidden_types)); - - NetEq::Config config; - config.max_packets_in_buffer = 200; - config.enable_fast_accelerate = true; - - std::unique_ptr output(new test::VoidAudioSink()); - - rtc::scoped_refptr decoder_factory = - new rtc::RefCountedObject( - replacement_file_name, file_sample_rate_hz); - - test::NetEqTest::DecoderMap codecs = { - {kReplacementPt, SdpAudioFormat("l16", 48000, 1)}}; - - std::unique_ptr delay_cb( - new test::NetEqDelayAnalyzer); - std::unique_ptr neteq_stats_getter( - new test::NetEqStatsGetter(std::move(delay_cb))); - test::DefaultNetEqTestErrorCallback error_cb; - test::NetEqTest::Callbacks callbacks; - callbacks.error_callback = &error_cb; - callbacks.post_insert_packet = neteq_stats_getter->delay_analyzer(); - callbacks.get_audio_callback = neteq_stats_getter.get(); - - test::NetEqTest test(config, decoder_factory, codecs, /*text_log=*/nullptr, - /*factory=*/nullptr, std::move(input), std::move(output), - callbacks); - test.Run(); - return neteq_stats_getter; -} -} // namespace - -EventLogAnalyzer::NetEqStatsGetterMap EventLogAnalyzer::SimulateNetEq( - const std::string& replacement_file_name, - int file_sample_rate_hz) const { - NetEqStatsGetterMap neteq_stats; - - for (const auto& stream : parsed_log_.incoming_rtp_packets_by_ssrc()) { - const uint32_t ssrc = stream.ssrc; - if (!IsAudioSsrc(parsed_log_, kIncomingPacket, ssrc)) - continue; - const std::vector* audio_packets = - &stream.incoming_packets; - if (audio_packets == nullptr) { - // No incoming audio stream found. - continue; - } - - RTC_DCHECK(neteq_stats.find(ssrc) == neteq_stats.end()); - - std::map>::const_iterator - output_events_it = parsed_log_.audio_playout_events().find(ssrc); - if (output_events_it == parsed_log_.audio_playout_events().end()) { - // Could not find output events with SSRC matching the input audio stream. - // Using the first available stream of output events. - output_events_it = parsed_log_.audio_playout_events().cbegin(); - } - - absl::optional end_time_ms = - parsed_log_.log_segments().empty() - ? absl::nullopt - : absl::optional( - parsed_log_.log_segments().front().stop_time_ms()); - - neteq_stats[ssrc] = CreateNetEqTestAndRun( - audio_packets, &output_events_it->second, end_time_ms, - replacement_file_name, file_sample_rate_hz); - } - - return neteq_stats; -} - -// Given a NetEqStatsGetter and the SSRC that the NetEqStatsGetter was created -// for, this method generates a plot for the jitter buffer delay profile. -void EventLogAnalyzer::CreateAudioJitterBufferGraph( - uint32_t ssrc, - const test::NetEqStatsGetter* stats_getter, - Plot* plot) const { - test::NetEqDelayAnalyzer::Delays arrival_delay_ms; - test::NetEqDelayAnalyzer::Delays corrected_arrival_delay_ms; - test::NetEqDelayAnalyzer::Delays playout_delay_ms; - test::NetEqDelayAnalyzer::Delays target_delay_ms; - - stats_getter->delay_analyzer()->CreateGraphs( - &arrival_delay_ms, &corrected_arrival_delay_ms, &playout_delay_ms, - &target_delay_ms); - - TimeSeries time_series_packet_arrival("packet arrival delay", - LineStyle::kLine); - TimeSeries time_series_relative_packet_arrival( - "Relative packet arrival delay", LineStyle::kLine); - TimeSeries time_series_play_time("Playout delay", LineStyle::kLine); - TimeSeries time_series_target_time("Target delay", LineStyle::kLine, - PointStyle::kHighlight); - - for (const auto& data : arrival_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_packet_arrival.points.emplace_back(TimeSeriesPoint(x, y)); - } - for (const auto& data : corrected_arrival_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_relative_packet_arrival.points.emplace_back( - TimeSeriesPoint(x, y)); - } - for (const auto& data : playout_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_play_time.points.emplace_back(TimeSeriesPoint(x, y)); - } - for (const auto& data : target_delay_ms) { - const float x = config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float y = data.second; - time_series_target_time.points.emplace_back(TimeSeriesPoint(x, y)); - } - - plot->AppendTimeSeries(std::move(time_series_packet_arrival)); - plot->AppendTimeSeries(std::move(time_series_relative_packet_arrival)); - plot->AppendTimeSeries(std::move(time_series_play_time)); - plot->AppendTimeSeries(std::move(time_series_target_time)); - - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, "Relative delay (ms)", kBottomMargin, - kTopMargin); - plot->SetTitle("NetEq timing for " + - GetStreamName(parsed_log_, kIncomingPacket, ssrc)); -} - -template -void EventLogAnalyzer::CreateNetEqStatsGraphInternal( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView>*( - const test::NetEqStatsGetter*)> data_extractor, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const { - std::map time_series; - - for (const auto& st : neteq_stats) { - const uint32_t ssrc = st.first; - const std::vector>* data_vector = - data_extractor(st.second.get()); - for (const auto& data : *data_vector) { - const float time = - config_.GetCallTimeSec(data.first * 1000); // ms to us. - const float value = stats_extractor(data.second); - time_series[ssrc].points.emplace_back(TimeSeriesPoint(time, value)); - } - } - - for (auto& series : time_series) { - series.second.label = - GetStreamName(parsed_log_, kIncomingPacket, series.first); - series.second.line_style = LineStyle::kLine; - plot->AppendTimeSeries(std::move(series.second)); - } - - plot->SetXAxis(config_.CallBeginTimeSec(), config_.CallEndTimeSec(), - "Time (s)", kLeftMargin, kRightMargin); - plot->SetSuggestedYAxis(0, 1, plot_name, kBottomMargin, kTopMargin); - plot->SetTitle(plot_name); -} - -void EventLogAnalyzer::CreateNetEqNetworkStatsGraph( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const { - CreateNetEqStatsGraphInternal( - neteq_stats, - [](const test::NetEqStatsGetter* stats_getter) { - return stats_getter->stats(); - }, - stats_extractor, plot_name, plot); -} - -void EventLogAnalyzer::CreateNetEqLifetimeStatsGraph( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const { - CreateNetEqStatsGraphInternal( - neteq_stats, - [](const test::NetEqStatsGetter* stats_getter) { - return stats_getter->lifetime_stats(); - }, - stats_extractor, plot_name, plot); -} - void EventLogAnalyzer::CreateIceCandidatePairConfigGraph(Plot* plot) { std::map configs_by_cp_id; for (const auto& config : parsed_log_.ice_candidate_pair_configs()) { diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer.h b/rtc_tools/rtc_event_log_visualizer/analyzer.h index ebdfdcc41c..4918cf48e1 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer.h +++ b/rtc_tools/rtc_event_log_visualizer/analyzer.h @@ -79,32 +79,6 @@ class EventLogAnalyzer { std::string yaxis_label, Plot* plot); - void CreateAudioEncoderTargetBitrateGraph(Plot* plot); - void CreateAudioEncoderFrameLengthGraph(Plot* plot); - void CreateAudioEncoderPacketLossGraph(Plot* plot); - void CreateAudioEncoderEnableFecGraph(Plot* plot); - void CreateAudioEncoderEnableDtxGraph(Plot* plot); - void CreateAudioEncoderNumChannelsGraph(Plot* plot); - - using NetEqStatsGetterMap = - std::map>; - NetEqStatsGetterMap SimulateNetEq(const std::string& replacement_file_name, - int file_sample_rate_hz) const; - - void CreateAudioJitterBufferGraph(uint32_t ssrc, - const test::NetEqStatsGetter* stats_getter, - Plot* plot) const; - void CreateNetEqNetworkStatsGraph( - const NetEqStatsGetterMap& neteq_stats_getters, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const; - void CreateNetEqLifetimeStatsGraph( - const NetEqStatsGetterMap& neteq_stats_getters, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const; - void CreateIceCandidatePairConfigGraph(Plot* plot); void CreateIceConnectivityCheckGraph(Plot* plot); @@ -115,15 +89,6 @@ class EventLogAnalyzer { void PrintNotifications(FILE* file); private: - template - void CreateNetEqStatsGraphInternal( - const NetEqStatsGetterMap& neteq_stats, - rtc::FunctionView>*( - const test::NetEqStatsGetter*)> data_extractor, - rtc::FunctionView stats_extractor, - const std::string& plot_name, - Plot* plot) const; - template void CreateAccumulatedPacketsTimeSeries(Plot* plot, const IterableType& packets, diff --git a/rtc_tools/rtc_event_log_visualizer/analyzer_common.h b/rtc_tools/rtc_event_log_visualizer/analyzer_common.h index 3ac651e69a..d5776acf62 100644 --- a/rtc_tools/rtc_event_log_visualizer/analyzer_common.h +++ b/rtc_tools/rtc_event_log_visualizer/analyzer_common.h @@ -14,10 +14,19 @@ #include #include +#include "absl/types/optional.h" +#include "api/function_view.h" #include "logging/rtc_event_log/rtc_event_log_parser.h" +#include "rtc_tools/rtc_event_log_visualizer/plot_base.h" namespace webrtc { +constexpr int kNumMicrosecsPerSec = 1000000; +constexpr float kLeftMargin = 0.01f; +constexpr float kRightMargin = 0.02f; +constexpr float kBottomMargin = 0.02f; +constexpr float kTopMargin = 0.05f; + class AnalyzerConfig { public: float GetCallTimeSec(int64_t timestamp_us) const { @@ -74,6 +83,100 @@ std::string GetStreamName(const ParsedRtcEventLog& parsed_log, uint32_t ssrc); std::string GetLayerName(LayerDescription layer); +// For each element in data_view, use |f()| to extract a y-coordinate and +// store the result in a TimeSeries. +template +void ProcessPoints(rtc::FunctionView fx, + rtc::FunctionView(const DataType&)> fy, + const IterableType& data_view, + TimeSeries* result) { + for (size_t i = 0; i < data_view.size(); i++) { + const DataType& elem = data_view[i]; + float x = fx(elem); + absl::optional y = fy(elem); + if (y) + result->points.emplace_back(x, *y); + } +} + +// For each pair of adjacent elements in |data|, use |f()| to extract a +// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate +// will be the time of the second element in the pair. +template +void ProcessPairs( + rtc::FunctionView fx, + rtc::FunctionView(const DataType&, + const DataType&)> fy, + const IterableType& data, + TimeSeries* result) { + for (size_t i = 1; i < data.size(); i++) { + float x = fx(data[i]); + absl::optional y = fy(data[i - 1], data[i]); + if (y) + result->points.emplace_back(x, static_cast(*y)); + } +} + +// For each pair of adjacent elements in |data|, use |f()| to extract a +// y-coordinate and store the result in a TimeSeries. Note that the x-coordinate +// will be the time of the second element in the pair. +template +void AccumulatePairs( + rtc::FunctionView fx, + rtc::FunctionView(const DataType&, + const DataType&)> fy, + const IterableType& data, + TimeSeries* result) { + ResultType sum = 0; + for (size_t i = 1; i < data.size(); i++) { + float x = fx(data[i]); + absl::optional y = fy(data[i - 1], data[i]); + if (y) { + sum += *y; + result->points.emplace_back(x, static_cast(sum)); + } + } +} + +// Calculates a moving average of |data| and stores the result in a TimeSeries. +// A data point is generated every |step| microseconds from |begin_time| +// to |end_time|. The value of each data point is the average of the data +// during the preceding |window_duration_us| microseconds. +template +void MovingAverage( + rtc::FunctionView(const DataType&)> fy, + const IterableType& data_view, + AnalyzerConfig config, + TimeSeries* result) { + size_t window_index_begin = 0; + size_t window_index_end = 0; + ResultType sum_in_window = 0; + + for (int64_t t = config.begin_time_; t < config.end_time_ + config.step_; + t += config.step_) { + while (window_index_end < data_view.size() && + data_view[window_index_end].log_time_us() < t) { + absl::optional value = fy(data_view[window_index_end]); + if (value) + sum_in_window += *value; + ++window_index_end; + } + while (window_index_begin < data_view.size() && + data_view[window_index_begin].log_time_us() < + t - config.window_duration_) { + absl::optional value = fy(data_view[window_index_begin]); + if (value) + sum_in_window -= *value; + ++window_index_begin; + } + float window_duration_s = + static_cast(config.window_duration_) / kNumMicrosecsPerSec; + float x = config.GetCallTimeSec(t); + float y = sum_in_window / window_duration_s; + result->points.emplace_back(x, y); + } +} + } // namespace webrtc #endif // RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_ANALYZER_COMMON_H_ diff --git a/rtc_tools/rtc_event_log_visualizer/log_simulation.cc b/rtc_tools/rtc_event_log_visualizer/log_simulation.cc index 0e5b5d04a9..c0b418de4b 100644 --- a/rtc_tools/rtc_event_log_visualizer/log_simulation.cc +++ b/rtc_tools/rtc_event_log_visualizer/log_simulation.cc @@ -14,6 +14,7 @@ #include "logging/rtc_event_log/rtc_event_processor.h" #include "modules/rtp_rtcp/source/time_util.h" +#include "system_wrappers/include/clock.h" namespace webrtc { @@ -83,7 +84,7 @@ void LogBasedNetworkControllerSimulation::OnPacketSent( } RtpPacketSendInfo packet_info; - packet_info.ssrc = packet.ssrc; + packet_info.media_ssrc = packet.ssrc; packet_info.transport_sequence_number = packet.transport_seq_no; packet_info.rtp_sequence_number = packet.stream_seq_no; packet_info.length = packet.size; @@ -142,11 +143,13 @@ void LogBasedNetworkControllerSimulation::OnReceiverReport( HandleStateUpdate(controller_->OnTransportLossReport(msg)); } + Clock* clock = Clock::GetRealTimeClock(); TimeDelta rtt = TimeDelta::PlusInfinity(); for (auto& rb : report.rr.report_blocks()) { if (rb.last_sr()) { + Timestamp report_log_time = Timestamp::Micros(report.log_time_us()); uint32_t receive_time_ntp = - CompactNtp(TimeMicrosToNtp(report.log_time_us())); + CompactNtp(clock->ConvertTimestampToNtpTime(report_log_time)); uint32_t rtt_ntp = receive_time_ntp - rb.delay_since_last_sr() - rb.last_sr(); rtt = std::min(rtt, TimeDelta::Millis(CompactNtpRttToMs(rtt_ntp))); diff --git a/rtc_tools/rtc_event_log_visualizer/main.cc b/rtc_tools/rtc_event_log_visualizer/main.cc index 21768c92d1..ab4b7ebac1 100644 --- a/rtc_tools/rtc_event_log_visualizer/main.cc +++ b/rtc_tools/rtc_event_log_visualizer/main.cc @@ -31,10 +31,9 @@ #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_tools/rtc_event_log_visualizer/alerts.h" +#include "rtc_tools/rtc_event_log_visualizer/analyze_audio.h" #include "rtc_tools/rtc_event_log_visualizer/analyzer.h" #include "rtc_tools/rtc_event_log_visualizer/plot_base.h" -#include "rtc_tools/rtc_event_log_visualizer/plot_protobuf.h" -#include "rtc_tools/rtc_event_log_visualizer/plot_python.h" #include "system_wrappers/include/field_trial.h" #include "test/field_trial.h" #include "test/testsupport/file_utils.h" @@ -78,7 +77,7 @@ ABSL_FLAG(bool, ABSL_FLAG(bool, print_triage_alerts, - false, + true, "Print triage alerts, i.e. a list of potential problems."); ABSL_FLAG(bool, @@ -230,8 +229,7 @@ int main(int argc, char* argv[]) { {"simulated_neteq_stats", {"simulated_neteq_jitter_buffer_delay", "simulated_neteq_preferred_buffer_size", - "simulated_neteq_concealment_events", - "simulated_neteq_packet_loss_rate", "simulated_neteq_preemptive_rate", + "simulated_neteq_concealment_events", "simulated_neteq_preemptive_rate", "simulated_neteq_accelerate_rate", "simulated_neteq_speech_expand_rate", "simulated_neteq_expand_rate"}}}; @@ -276,13 +274,7 @@ int main(int argc, char* argv[]) { } webrtc::EventLogAnalyzer analyzer(parsed_log, config); - std::unique_ptr collection; - if (absl::GetFlag(FLAGS_protobuf_output)) { - collection.reset(new webrtc::ProtobufPlotCollection()); - } else { - collection.reset( - new webrtc::PythonPlotCollection(absl::GetFlag(FLAGS_shared_xaxis))); - } + webrtc::PlotCollection collection; PlotMap plots; plots.RegisterPlot("incoming_packet_sizes", [&](Plot* plot) { @@ -436,22 +428,22 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("pacer_delay", [&](Plot* plot) { analyzer.CreatePacerDelayGraph(plot); }); plots.RegisterPlot("audio_encoder_bitrate", [&](Plot* plot) { - analyzer.CreateAudioEncoderTargetBitrateGraph(plot); + CreateAudioEncoderTargetBitrateGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_frame_length", [&](Plot* plot) { - analyzer.CreateAudioEncoderFrameLengthGraph(plot); + CreateAudioEncoderFrameLengthGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_packet_loss", [&](Plot* plot) { - analyzer.CreateAudioEncoderPacketLossGraph(plot); + CreateAudioEncoderPacketLossGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_fec", [&](Plot* plot) { - analyzer.CreateAudioEncoderEnableFecGraph(plot); + CreateAudioEncoderEnableFecGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_dtx", [&](Plot* plot) { - analyzer.CreateAudioEncoderEnableDtxGraph(plot); + CreateAudioEncoderEnableDtxGraph(parsed_log, config, plot); }); plots.RegisterPlot("audio_encoder_num_channels", [&](Plot* plot) { - analyzer.CreateAudioEncoderNumChannelsGraph(plot); + CreateAudioEncoderNumChannelsGraph(parsed_log, config, plot); }); plots.RegisterPlot("ice_candidate_pair_config", [&](Plot* plot) { @@ -474,14 +466,14 @@ int main(int argc, char* argv[]) { wav_path = webrtc::test::ResourcePath( "audio_processing/conversational_speech/EN_script2_F_sp2_B1", "wav"); } - absl::optional neteq_stats; + absl::optional neteq_stats; plots.RegisterPlot("simulated_neteq_expand_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.expand_rate / 16384.f; }, @@ -490,10 +482,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_speech_expand_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.speech_expand_rate / 16384.f; }, @@ -502,10 +494,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_accelerate_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.accelerate_rate / 16384.f; }, @@ -514,34 +506,22 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_preemptive_rate", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.preemptive_rate / 16384.f; }, "Preemptive rate", plot); }); - plots.RegisterPlot("simulated_neteq_packet_loss_rate", [&](Plot* plot) { - if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); - } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, - [](const webrtc::NetEqNetworkStatistics& stats) { - return stats.packet_loss_rate / 16384.f; - }, - "Packet loss rate", plot); - }); - plots.RegisterPlot("simulated_neteq_concealment_events", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqLifetimeStatsGraph( - *neteq_stats, + webrtc::CreateNetEqLifetimeStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqLifetimeStatistics& stats) { return static_cast(stats.concealment_events); }, @@ -550,10 +530,10 @@ int main(int argc, char* argv[]) { plots.RegisterPlot("simulated_neteq_preferred_buffer_size", [&](Plot* plot) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - analyzer.CreateNetEqNetworkStatsGraph( - *neteq_stats, + webrtc::CreateNetEqNetworkStatsGraph( + parsed_log, config, *neteq_stats, [](const webrtc::NetEqNetworkStatistics& stats) { return stats.preferred_buffer_size_ms; }, @@ -600,7 +580,7 @@ int main(int argc, char* argv[]) { for (const auto& plot : plots) { if (plot.enabled) { - Plot* output = collection->AppendNewPlot(); + Plot* output = collection.AppendNewPlot(); plot.plot_func(output); output->SetId(plot.label); } @@ -614,17 +594,23 @@ int main(int argc, char* argv[]) { if (absl::c_find(plot_flags, "simulated_neteq_jitter_buffer_delay") != plot_flags.end()) { if (!neteq_stats) { - neteq_stats = analyzer.SimulateNetEq(wav_path, 48000); + neteq_stats = webrtc::SimulateNetEq(parsed_log, config, wav_path, 48000); } - for (webrtc::EventLogAnalyzer::NetEqStatsGetterMap::const_iterator it = - neteq_stats->cbegin(); + for (webrtc::NetEqStatsGetterMap::const_iterator it = neteq_stats->cbegin(); it != neteq_stats->cend(); ++it) { - analyzer.CreateAudioJitterBufferGraph(it->first, it->second.get(), - collection->AppendNewPlot()); + webrtc::CreateAudioJitterBufferGraph(parsed_log, config, it->first, + it->second.get(), + collection.AppendNewPlot()); } } - collection->Draw(); + if (absl::GetFlag(FLAGS_protobuf_output)) { + webrtc::analytics::ChartCollection proto_charts; + collection.ExportProtobuf(&proto_charts); + std::cout << proto_charts.SerializeAsString(); + } else { + collection.PrintPythonCode(absl::GetFlag(FLAGS_shared_xaxis)); + } if (absl::GetFlag(FLAGS_print_triage_alerts)) { webrtc::TriageHelper triage_alerts(config); diff --git a/rtc_tools/rtc_event_log_visualizer/plot_base.cc b/rtc_tools/rtc_event_log_visualizer/plot_base.cc index dfcd26fed5..82533e6eb0 100644 --- a/rtc_tools/rtc_event_log_visualizer/plot_base.cc +++ b/rtc_tools/rtc_event_log_visualizer/plot_base.cc @@ -11,6 +11,7 @@ #include "rtc_tools/rtc_event_log_visualizer/plot_base.h" #include +#include #include "rtc_base/checks.h" @@ -93,4 +94,231 @@ void Plot::AppendTimeSeriesIfNotEmpty(TimeSeries&& time_series) { } } +void Plot::PrintPythonCode() const { + // Write python commands to stdout. Intended program usage is + // ./event_log_visualizer event_log160330.dump | python + + if (!series_list_.empty()) { + printf("color_count = %zu\n", series_list_.size()); + printf( + "hls_colors = [(i*1.0/color_count, 0.25+i*0.5/color_count, 0.8) for i " + "in range(color_count)]\n"); + printf("colors = [colorsys.hls_to_rgb(*hls) for hls in hls_colors]\n"); + + for (size_t i = 0; i < series_list_.size(); i++) { + printf("\n# === Series: %s ===\n", series_list_[i].label.c_str()); + // List x coordinates + printf("x%zu = [", i); + if (!series_list_[i].points.empty()) + printf("%.3f", series_list_[i].points[0].x); + for (size_t j = 1; j < series_list_[i].points.size(); j++) + printf(", %.3f", series_list_[i].points[j].x); + printf("]\n"); + + // List y coordinates + printf("y%zu = [", i); + if (!series_list_[i].points.empty()) + printf("%G", series_list_[i].points[0].y); + for (size_t j = 1; j < series_list_[i].points.size(); j++) + printf(", %G", series_list_[i].points[j].y); + printf("]\n"); + + if (series_list_[i].line_style == LineStyle::kBar) { + // There is a plt.bar function that draws bar plots, + // but it is *way* too slow to be useful. + printf( + "plt.vlines(x%zu, [min(t,0) for t in y%zu], [max(t,0) for t in " + "y%zu], color=colors[%zu], label=\'%s\')\n", + i, i, i, i, series_list_[i].label.c_str()); + if (series_list_[i].point_style == PointStyle::kHighlight) { + printf( + "plt.plot(x%zu, y%zu, color=colors[%zu], " + "marker='.', ls=' ')\n", + i, i, i); + } + } else if (series_list_[i].line_style == LineStyle::kLine) { + if (series_list_[i].point_style == PointStyle::kHighlight) { + printf( + "plt.plot(x%zu, y%zu, color=colors[%zu], label=\'%s\', " + "marker='.')\n", + i, i, i, series_list_[i].label.c_str()); + } else { + printf("plt.plot(x%zu, y%zu, color=colors[%zu], label=\'%s\')\n", i, + i, i, series_list_[i].label.c_str()); + } + } else if (series_list_[i].line_style == LineStyle::kStep) { + // Draw lines from (x[0],y[0]) to (x[1],y[0]) to (x[1],y[1]) and so on + // to illustrate the "steps". This can be expressed by duplicating all + // elements except the first in x and the last in y. + printf("xd%zu = [dup for v in x%zu for dup in [v, v]]\n", i, i); + printf("yd%zu = [dup for v in y%zu for dup in [v, v]]\n", i, i); + printf( + "plt.plot(xd%zu[1:], yd%zu[:-1], color=colors[%zu], " + "label=\'%s\')\n", + i, i, i, series_list_[i].label.c_str()); + if (series_list_[i].point_style == PointStyle::kHighlight) { + printf( + "plt.plot(x%zu, y%zu, color=colors[%zu], " + "marker='.', ls=' ')\n", + i, i, i); + } + } else if (series_list_[i].line_style == LineStyle::kNone) { + printf( + "plt.plot(x%zu, y%zu, color=colors[%zu], label=\'%s\', " + "marker='o', ls=' ')\n", + i, i, i, series_list_[i].label.c_str()); + } else { + printf("raise Exception(\"Unknown graph type\")\n"); + } + } + + // IntervalSeries + printf("interval_colors = ['#ff8e82','#5092fc','#c4ffc4','#aaaaaa']\n"); + RTC_CHECK_LE(interval_list_.size(), 4); + // To get the intervals to show up in the legend we have to create patches + // for them. + printf("legend_patches = []\n"); + for (size_t i = 0; i < interval_list_.size(); i++) { + // List intervals + printf("\n# === IntervalSeries: %s ===\n", + interval_list_[i].label.c_str()); + printf("ival%zu = [", i); + if (!interval_list_[i].intervals.empty()) { + printf("(%G, %G)", interval_list_[i].intervals[0].begin, + interval_list_[i].intervals[0].end); + } + for (size_t j = 1; j < interval_list_[i].intervals.size(); j++) { + printf(", (%G, %G)", interval_list_[i].intervals[j].begin, + interval_list_[i].intervals[j].end); + } + printf("]\n"); + + printf("for i in range(0, %zu):\n", interval_list_[i].intervals.size()); + if (interval_list_[i].orientation == IntervalSeries::kVertical) { + printf( + " plt.axhspan(ival%zu[i][0], ival%zu[i][1], " + "facecolor=interval_colors[%zu], " + "alpha=0.3)\n", + i, i, i); + } else { + printf( + " plt.axvspan(ival%zu[i][0], ival%zu[i][1], " + "facecolor=interval_colors[%zu], " + "alpha=0.3)\n", + i, i, i); + } + printf( + "legend_patches.append(mpatches.Patch(ec=\'black\', " + "fc=interval_colors[%zu], label='%s'))\n", + i, interval_list_[i].label.c_str()); + } + } + + printf("plt.xlim(%f, %f)\n", xaxis_min_, xaxis_max_); + printf("plt.ylim(%f, %f)\n", yaxis_min_, yaxis_max_); + printf("plt.xlabel(\'%s\')\n", xaxis_label_.c_str()); + printf("plt.ylabel(\'%s\')\n", yaxis_label_.c_str()); + printf("plt.title(\'%s\')\n", title_.c_str()); + printf("fig = plt.gcf()\n"); + printf("fig.canvas.set_window_title(\'%s\')\n", id_.c_str()); + if (!yaxis_tick_labels_.empty()) { + printf("yaxis_tick_labels = ["); + for (const auto& kv : yaxis_tick_labels_) { + printf("(%f,\"%s\"),", kv.first, kv.second.c_str()); + } + printf("]\n"); + printf("yaxis_tick_labels = list(zip(*yaxis_tick_labels))\n"); + printf("plt.yticks(*yaxis_tick_labels)\n"); + } + if (!series_list_.empty() || !interval_list_.empty()) { + printf("handles, labels = plt.gca().get_legend_handles_labels()\n"); + printf("for lp in legend_patches:\n"); + printf(" handles.append(lp)\n"); + printf(" labels.append(lp.get_label())\n"); + printf("plt.legend(handles, labels, loc=\'best\', fontsize=\'small\')\n"); + } +} + +void Plot::ExportProtobuf(webrtc::analytics::Chart* chart) const { + for (size_t i = 0; i < series_list_.size(); i++) { + webrtc::analytics::DataSet* data_set = chart->add_data_sets(); + for (const auto& point : series_list_[i].points) { + data_set->add_x_values(point.x); + } + for (const auto& point : series_list_[i].points) { + data_set->add_y_values(point.y); + } + + if (series_list_[i].line_style == LineStyle::kBar) { + data_set->set_style(webrtc::analytics::ChartStyle::BAR_CHART); + } else if (series_list_[i].line_style == LineStyle::kLine) { + data_set->set_style(webrtc::analytics::ChartStyle::LINE_CHART); + } else if (series_list_[i].line_style == LineStyle::kStep) { + data_set->set_style(webrtc::analytics::ChartStyle::LINE_STEP_CHART); + } else if (series_list_[i].line_style == LineStyle::kNone) { + data_set->set_style(webrtc::analytics::ChartStyle::SCATTER_CHART); + } else { + data_set->set_style(webrtc::analytics::ChartStyle::UNDEFINED); + } + + if (series_list_[i].point_style == PointStyle::kHighlight) + data_set->set_highlight_points(true); + + data_set->set_label(series_list_[i].label); + } + + chart->set_xaxis_min(xaxis_min_); + chart->set_xaxis_max(xaxis_max_); + chart->set_yaxis_min(yaxis_min_); + chart->set_yaxis_max(yaxis_max_); + chart->set_xaxis_label(xaxis_label_); + chart->set_yaxis_label(yaxis_label_); + chart->set_title(title_); + chart->set_id(id_); + + for (const auto& kv : yaxis_tick_labels_) { + webrtc::analytics::TickLabel* tick = chart->add_yaxis_tick_labels(); + tick->set_value(kv.first); + tick->set_label(kv.second); + } +} + +void PlotCollection::PrintPythonCode(bool shared_xaxis) const { + printf("import matplotlib.pyplot as plt\n"); + printf("plt.rcParams.update({'figure.max_open_warning': 0})\n"); + printf("import matplotlib.patches as mpatches\n"); + printf("import matplotlib.patheffects as pe\n"); + printf("import colorsys\n"); + for (size_t i = 0; i < plots_.size(); i++) { + printf("plt.figure(%zu)\n", i); + if (shared_xaxis) { + // Link x-axes across all figures for synchronized zooming. + if (i == 0) { + printf("axis0 = plt.subplot(111)\n"); + } else { + printf("plt.subplot(111, sharex=axis0)\n"); + } + } + plots_[i]->PrintPythonCode(); + } + printf("plt.show()\n"); +} + +void PlotCollection::ExportProtobuf( + webrtc::analytics::ChartCollection* collection) const { + for (const auto& plot : plots_) { + // TODO(terelius): Ensure that there is no way to insert plots other than + // ProtobufPlots in a ProtobufPlotCollection. Needed to safely static_cast + // here. + webrtc::analytics::Chart* protobuf_representation = + collection->add_charts(); + plot->ExportProtobuf(protobuf_representation); + } +} + +Plot* PlotCollection::AppendNewPlot() { + plots_.push_back(std::make_unique()); + return plots_.back().get(); +} + } // namespace webrtc diff --git a/rtc_tools/rtc_event_log_visualizer/plot_base.h b/rtc_tools/rtc_event_log_visualizer/plot_base.h index 5e4ebfa522..a26146b5e5 100644 --- a/rtc_tools/rtc_event_log_visualizer/plot_base.h +++ b/rtc_tools/rtc_event_log_visualizer/plot_base.h @@ -15,6 +15,13 @@ #include #include +#include "absl/base/attributes.h" +#include "rtc_base/ignore_wundef.h" + +RTC_PUSH_IGNORING_WUNDEF() +#include "rtc_tools/rtc_event_log_visualizer/proto/chart.pb.h" +RTC_POP_IGNORING_WUNDEF() + namespace webrtc { enum class LineStyle { @@ -94,8 +101,8 @@ class Plot { public: virtual ~Plot() {} - // Overloaded to draw the plot. - virtual void Draw() = 0; + ABSL_DEPRECATED("Use PrintPythonCode() or ExportProtobuf() instead.") + virtual void Draw() {} // Sets the lower x-axis limit to min_value (if left_margin == 0). // Sets the upper x-axis limit to max_value (if right_margin == 0). @@ -158,6 +165,12 @@ class Plot { // Otherwise, the call has no effect and the timeseries is destroyed. void AppendTimeSeriesIfNotEmpty(TimeSeries&& time_series); + // Replaces PythonPlot::Draw() + void PrintPythonCode() const; + + // Replaces ProtobufPlot::Draw() + void ExportProtobuf(webrtc::analytics::Chart* chart) const; + protected: float xaxis_min_; float xaxis_max_; @@ -175,8 +188,17 @@ class Plot { class PlotCollection { public: virtual ~PlotCollection() {} - virtual void Draw() = 0; - virtual Plot* AppendNewPlot() = 0; + + ABSL_DEPRECATED("Use PrintPythonCode() or ExportProtobuf() instead.") + virtual void Draw() {} + + virtual Plot* AppendNewPlot(); + + // Replaces PythonPlotCollection::Draw() + void PrintPythonCode(bool shared_xaxis) const; + + // Replaces ProtobufPlotCollections::Draw() + void ExportProtobuf(webrtc::analytics::ChartCollection* collection) const; protected: std::vector> plots_; diff --git a/rtc_tools/rtc_event_log_visualizer/plot_protobuf.cc b/rtc_tools/rtc_event_log_visualizer/plot_protobuf.cc index 9e82c01ba6..0f43191e8b 100644 --- a/rtc_tools/rtc_event_log_visualizer/plot_protobuf.cc +++ b/rtc_tools/rtc_event_log_visualizer/plot_protobuf.cc @@ -24,49 +24,7 @@ ProtobufPlot::~ProtobufPlot() {} void ProtobufPlot::Draw() {} -void ProtobufPlot::ExportProtobuf(webrtc::analytics::Chart* chart) { - for (size_t i = 0; i < series_list_.size(); i++) { - webrtc::analytics::DataSet* data_set = chart->add_data_sets(); - for (const auto& point : series_list_[i].points) { - data_set->add_x_values(point.x); - } - for (const auto& point : series_list_[i].points) { - data_set->add_y_values(point.y); - } - if (series_list_[i].line_style == LineStyle::kBar) { - data_set->set_style(webrtc::analytics::ChartStyle::BAR_CHART); - } else if (series_list_[i].line_style == LineStyle::kLine) { - data_set->set_style(webrtc::analytics::ChartStyle::LINE_CHART); - } else if (series_list_[i].line_style == LineStyle::kStep) { - data_set->set_style(webrtc::analytics::ChartStyle::LINE_STEP_CHART); - } else if (series_list_[i].line_style == LineStyle::kNone) { - data_set->set_style(webrtc::analytics::ChartStyle::SCATTER_CHART); - } else { - data_set->set_style(webrtc::analytics::ChartStyle::UNDEFINED); - } - - if (series_list_[i].point_style == PointStyle::kHighlight) - data_set->set_highlight_points(true); - - data_set->set_label(series_list_[i].label); - } - - chart->set_xaxis_min(xaxis_min_); - chart->set_xaxis_max(xaxis_max_); - chart->set_yaxis_min(yaxis_min_); - chart->set_yaxis_max(yaxis_max_); - chart->set_xaxis_label(xaxis_label_); - chart->set_yaxis_label(yaxis_label_); - chart->set_title(title_); - chart->set_id(id_); - - for (const auto& kv : yaxis_tick_labels_) { - webrtc::analytics::TickLabel* tick = chart->add_yaxis_tick_labels(); - tick->set_value(kv.first); - tick->set_label(kv.second); - } -} ProtobufPlotCollection::ProtobufPlotCollection() {} @@ -78,19 +36,6 @@ void ProtobufPlotCollection::Draw() { std::cout << collection.SerializeAsString(); } -void ProtobufPlotCollection::ExportProtobuf( - webrtc::analytics::ChartCollection* collection) { - for (const auto& plot : plots_) { - // TODO(terelius): Ensure that there is no way to insert plots other than - // ProtobufPlots in a ProtobufPlotCollection. Needed to safely static_cast - // here. - webrtc::analytics::Chart* protobuf_representation = - collection->add_charts(); - static_cast(plot.get()) - ->ExportProtobuf(protobuf_representation); - } -} - Plot* ProtobufPlotCollection::AppendNewPlot() { Plot* plot = new ProtobufPlot(); plots_.push_back(std::unique_ptr(plot)); diff --git a/rtc_tools/rtc_event_log_visualizer/plot_protobuf.h b/rtc_tools/rtc_event_log_visualizer/plot_protobuf.h index 738247a309..fbe68853a3 100644 --- a/rtc_tools/rtc_event_log_visualizer/plot_protobuf.h +++ b/rtc_tools/rtc_event_log_visualizer/plot_protobuf.h @@ -10,6 +10,7 @@ #ifndef RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_PLOT_PROTOBUF_H_ #define RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_PLOT_PROTOBUF_H_ +#include "absl/base/attributes.h" #include "rtc_base/ignore_wundef.h" RTC_PUSH_IGNORING_WUNDEF() #include "rtc_tools/rtc_event_log_visualizer/proto/chart.pb.h" @@ -23,16 +24,15 @@ class ProtobufPlot final : public Plot { ProtobufPlot(); ~ProtobufPlot() override; void Draw() override; - void ExportProtobuf(webrtc::analytics::Chart* chart); }; -class ProtobufPlotCollection final : public PlotCollection { +class ABSL_DEPRECATED("Use PlotCollection and ExportProtobuf() instead") + ProtobufPlotCollection final : public PlotCollection { public: ProtobufPlotCollection(); ~ProtobufPlotCollection() override; void Draw() override; Plot* AppendNewPlot() override; - void ExportProtobuf(webrtc::analytics::ChartCollection* collection); }; } // namespace webrtc diff --git a/rtc_tools/rtc_event_log_visualizer/plot_python.cc b/rtc_tools/rtc_event_log_visualizer/plot_python.cc index e7cde45f30..b3708110df 100644 --- a/rtc_tools/rtc_event_log_visualizer/plot_python.cc +++ b/rtc_tools/rtc_event_log_visualizer/plot_python.cc @@ -25,149 +25,7 @@ PythonPlot::PythonPlot() {} PythonPlot::~PythonPlot() {} void PythonPlot::Draw() { - // Write python commands to stdout. Intended program usage is - // ./event_log_visualizer event_log160330.dump | python - - if (!series_list_.empty()) { - printf("color_count = %zu\n", series_list_.size()); - printf( - "hls_colors = [(i*1.0/color_count, 0.25+i*0.5/color_count, 0.8) for i " - "in range(color_count)]\n"); - printf("colors = [colorsys.hls_to_rgb(*hls) for hls in hls_colors]\n"); - - for (size_t i = 0; i < series_list_.size(); i++) { - printf("\n# === Series: %s ===\n", series_list_[i].label.c_str()); - // List x coordinates - printf("x%zu = [", i); - if (!series_list_[i].points.empty()) - printf("%.3f", series_list_[i].points[0].x); - for (size_t j = 1; j < series_list_[i].points.size(); j++) - printf(", %.3f", series_list_[i].points[j].x); - printf("]\n"); - - // List y coordinates - printf("y%zu = [", i); - if (!series_list_[i].points.empty()) - printf("%G", series_list_[i].points[0].y); - for (size_t j = 1; j < series_list_[i].points.size(); j++) - printf(", %G", series_list_[i].points[j].y); - printf("]\n"); - - if (series_list_[i].line_style == LineStyle::kBar) { - // There is a plt.bar function that draws bar plots, - // but it is *way* too slow to be useful. - printf( - "plt.vlines(x%zu, map(lambda t: min(t,0), y%zu), map(lambda t: " - "max(t,0), y%zu), color=colors[%zu], " - "label=\'%s\')\n", - i, i, i, i, series_list_[i].label.c_str()); - if (series_list_[i].point_style == PointStyle::kHighlight) { - printf( - "plt.plot(x%zu, y%zu, color=colors[%zu], " - "marker='.', ls=' ')\n", - i, i, i); - } - } else if (series_list_[i].line_style == LineStyle::kLine) { - if (series_list_[i].point_style == PointStyle::kHighlight) { - printf( - "plt.plot(x%zu, y%zu, color=colors[%zu], label=\'%s\', " - "marker='.')\n", - i, i, i, series_list_[i].label.c_str()); - } else { - printf("plt.plot(x%zu, y%zu, color=colors[%zu], label=\'%s\')\n", i, - i, i, series_list_[i].label.c_str()); - } - } else if (series_list_[i].line_style == LineStyle::kStep) { - // Draw lines from (x[0],y[0]) to (x[1],y[0]) to (x[1],y[1]) and so on - // to illustrate the "steps". This can be expressed by duplicating all - // elements except the first in x and the last in y. - printf("xd%zu = [dup for v in x%zu for dup in [v, v]]\n", i, i); - printf("yd%zu = [dup for v in y%zu for dup in [v, v]]\n", i, i); - printf( - "plt.plot(xd%zu[1:], yd%zu[:-1], color=colors[%zu], " - "label=\'%s\')\n", - i, i, i, series_list_[i].label.c_str()); - if (series_list_[i].point_style == PointStyle::kHighlight) { - printf( - "plt.plot(x%zu, y%zu, color=colors[%zu], " - "marker='.', ls=' ')\n", - i, i, i); - } - } else if (series_list_[i].line_style == LineStyle::kNone) { - printf( - "plt.plot(x%zu, y%zu, color=colors[%zu], label=\'%s\', " - "marker='o', ls=' ')\n", - i, i, i, series_list_[i].label.c_str()); - } else { - printf("raise Exception(\"Unknown graph type\")\n"); - } - } - - // IntervalSeries - printf("interval_colors = ['#ff8e82','#5092fc','#c4ffc4','#aaaaaa']\n"); - RTC_CHECK_LE(interval_list_.size(), 4); - // To get the intervals to show up in the legend we have to create patches - // for them. - printf("legend_patches = []\n"); - for (size_t i = 0; i < interval_list_.size(); i++) { - // List intervals - printf("\n# === IntervalSeries: %s ===\n", - interval_list_[i].label.c_str()); - printf("ival%zu = [", i); - if (!interval_list_[i].intervals.empty()) { - printf("(%G, %G)", interval_list_[i].intervals[0].begin, - interval_list_[i].intervals[0].end); - } - for (size_t j = 1; j < interval_list_[i].intervals.size(); j++) { - printf(", (%G, %G)", interval_list_[i].intervals[j].begin, - interval_list_[i].intervals[j].end); - } - printf("]\n"); - - printf("for i in range(0, %zu):\n", interval_list_[i].intervals.size()); - if (interval_list_[i].orientation == IntervalSeries::kVertical) { - printf( - " plt.axhspan(ival%zu[i][0], ival%zu[i][1], " - "facecolor=interval_colors[%zu], " - "alpha=0.3)\n", - i, i, i); - } else { - printf( - " plt.axvspan(ival%zu[i][0], ival%zu[i][1], " - "facecolor=interval_colors[%zu], " - "alpha=0.3)\n", - i, i, i); - } - printf( - "legend_patches.append(mpatches.Patch(ec=\'black\', " - "fc=interval_colors[%zu], label='%s'))\n", - i, interval_list_[i].label.c_str()); - } - } - - printf("plt.xlim(%f, %f)\n", xaxis_min_, xaxis_max_); - printf("plt.ylim(%f, %f)\n", yaxis_min_, yaxis_max_); - printf("plt.xlabel(\'%s\')\n", xaxis_label_.c_str()); - printf("plt.ylabel(\'%s\')\n", yaxis_label_.c_str()); - printf("plt.title(\'%s\')\n", title_.c_str()); - printf("fig = plt.gcf()\n"); - printf("fig.canvas.set_window_title(\'%s\')\n", id_.c_str()); - if (!yaxis_tick_labels_.empty()) { - printf("yaxis_tick_labels = ["); - for (const auto& kv : yaxis_tick_labels_) { - printf("(%f,\"%s\"),", kv.first, kv.second.c_str()); - } - printf("]\n"); - printf("yaxis_tick_labels = list(zip(*yaxis_tick_labels))\n"); - printf("plt.yticks(*yaxis_tick_labels)\n"); - } - if (!series_list_.empty() || !interval_list_.empty()) { - printf("handles, labels = plt.gca().get_legend_handles_labels()\n"); - printf("for lp in legend_patches:\n"); - printf(" handles.append(lp)\n"); - printf(" labels.append(lp.get_label())\n"); - printf("plt.legend(handles, labels, loc=\'best\', fontsize=\'small\')\n"); - } + PrintPythonCode(); } PythonPlotCollection::PythonPlotCollection(bool shared_xaxis) @@ -176,24 +34,7 @@ PythonPlotCollection::PythonPlotCollection(bool shared_xaxis) PythonPlotCollection::~PythonPlotCollection() {} void PythonPlotCollection::Draw() { - printf("import matplotlib.pyplot as plt\n"); - printf("plt.rcParams.update({'figure.max_open_warning': 0})\n"); - printf("import matplotlib.patches as mpatches\n"); - printf("import matplotlib.patheffects as pe\n"); - printf("import colorsys\n"); - for (size_t i = 0; i < plots_.size(); i++) { - printf("plt.figure(%zu)\n", i); - if (shared_xaxis_) { - // Link x-axes across all figures for synchronized zooming. - if (i == 0) { - printf("axis0 = plt.subplot(111)\n"); - } else { - printf("plt.subplot(111, sharex=axis0)\n"); - } - } - plots_[i]->Draw(); - } - printf("plt.show()\n"); + PrintPythonCode(shared_xaxis_); } Plot* PythonPlotCollection::AppendNewPlot() { diff --git a/rtc_tools/rtc_event_log_visualizer/plot_python.h b/rtc_tools/rtc_event_log_visualizer/plot_python.h index dcdcf23fcf..6acc436d71 100644 --- a/rtc_tools/rtc_event_log_visualizer/plot_python.h +++ b/rtc_tools/rtc_event_log_visualizer/plot_python.h @@ -10,6 +10,7 @@ #ifndef RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_PLOT_PYTHON_H_ #define RTC_TOOLS_RTC_EVENT_LOG_VISUALIZER_PLOT_PYTHON_H_ +#include "absl/base/attributes.h" #include "rtc_tools/rtc_event_log_visualizer/plot_base.h" namespace webrtc { @@ -21,7 +22,8 @@ class PythonPlot final : public Plot { void Draw() override; }; -class PythonPlotCollection final : public PlotCollection { +class ABSL_DEPRECATED("Use PlotCollection and PrintPythonCode() instead.") + PythonPlotCollection final : public PlotCollection { public: explicit PythonPlotCollection(bool shared_xaxis = false); ~PythonPlotCollection() override; diff --git a/rtc_tools/rtp_generator/rtp_generator.cc b/rtc_tools/rtp_generator/rtp_generator.cc index 21826c8dff..c2fc1cff06 100644 --- a/rtc_tools/rtp_generator/rtp_generator.cc +++ b/rtc_tools/rtp_generator/rtp_generator.cc @@ -136,10 +136,15 @@ absl::optional ParseRtpGeneratorOptionsFromFile( } // Parse the file as JSON - Json::Reader json_reader; + Json::CharReaderBuilder builder; Json::Value json; - if (!json_reader.parse(raw_json_buffer.data(), json)) { - RTC_LOG(LS_ERROR) << "Unable to parse the corpus config json file"; + std::string error_message; + std::unique_ptr json_reader(builder.newCharReader()); + if (!json_reader->parse(raw_json_buffer.data(), + raw_json_buffer.data() + raw_json_buffer.size(), + &json, &error_message)) { + RTC_LOG(LS_ERROR) << "Unable to parse the corpus config json file. Error:" + << error_message; return absl::nullopt; } @@ -188,15 +193,17 @@ RtpGenerator::RtpGenerator(const RtpGeneratorOptions& options) PayloadStringToCodecType(video_config.rtp.payload_name); if (video_config.rtp.payload_name == cricket::kVp8CodecName) { VideoCodecVP8 settings = VideoEncoder::GetDefaultVp8Settings(); - encoder_config.encoder_specific_settings = new rtc::RefCountedObject< - VideoEncoderConfig::Vp8EncoderSpecificSettings>(settings); + encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + settings); } else if (video_config.rtp.payload_name == cricket::kVp9CodecName) { VideoCodecVP9 settings = VideoEncoder::GetDefaultVp9Settings(); - encoder_config.encoder_specific_settings = new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(settings); + encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + settings); } else if (video_config.rtp.payload_name == cricket::kH264CodecName) { VideoCodecH264 settings = VideoEncoder::GetDefaultH264Settings(); - encoder_config.encoder_specific_settings = new rtc::RefCountedObject< + encoder_config.encoder_specific_settings = rtc::make_ref_counted< VideoEncoderConfig::H264EncoderSpecificSettings>(settings); } encoder_config.video_format.name = video_config.rtp.payload_name; @@ -217,7 +224,7 @@ RtpGenerator::RtpGenerator(const RtpGeneratorOptions& options) } encoder_config.video_stream_factory = - new rtc::RefCountedObject( + rtc::make_ref_counted( video_config.rtp.payload_name, /*max qp*/ 56, /*screencast*/ false, /*screenshare enabled*/ false); diff --git a/rtc_tools/rtp_generator/rtp_generator.h b/rtc_tools/rtp_generator/rtp_generator.h index 6248c6a636..a317bf7278 100644 --- a/rtc_tools/rtp_generator/rtp_generator.h +++ b/rtc_tools/rtp_generator/rtp_generator.h @@ -27,7 +27,6 @@ #include "call/rtp_config.h" #include "call/video_send_stream.h" #include "media/engine/webrtc_video_engine.h" -#include "rtc_base/constructor_magic.h" #include "test/frame_generator_capturer.h" #include "test/rtp_file_reader.h" #include "test/rtp_file_writer.h" @@ -79,6 +78,11 @@ class RtpGenerator final : public webrtc::Transport { public: // Construct a new RtpGenerator using the specified options. explicit RtpGenerator(const RtpGeneratorOptions& options); + + RtpGenerator() = delete; + RtpGenerator(const RtpGenerator&) = delete; + RtpGenerator& operator=(const RtpGenerator&) = delete; + // Cleans up the VideoSendStream. ~RtpGenerator() override; // Generates an rtp_dump that is written out to @@ -113,9 +117,6 @@ class RtpGenerator final : public webrtc::Transport { std::vector durations_ms_; uint32_t start_ms_ = 0; std::unique_ptr task_queue_; - - // This object cannot be copied. - RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(RtpGenerator); }; } // namespace webrtc diff --git a/rtc_tools/sanitizers_unittest.cc b/rtc_tools/sanitizers_unittest.cc index b997bf0c23..9606f42216 100644 --- a/rtc_tools/sanitizers_unittest.cc +++ b/rtc_tools/sanitizers_unittest.cc @@ -110,7 +110,7 @@ void DataRace() { thread2.Join(); // TSan seems to mess with gtest's death detection. // Fail intentionally, and rely on detecting the error message. - RTC_CHECK(false); + RTC_CHECK_NOTREACHED(); } TEST(SanitizersDeathTest, ThreadSanitizer) { diff --git a/rtc_tools/testing/build_apprtc.py b/rtc_tools/testing/build_apprtc.py index 367a2602d5..e93b7e06c7 100755 --- a/rtc_tools/testing/build_apprtc.py +++ b/rtc_tools/testing/build_apprtc.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Builds the AppRTC collider using the golang toolchain. The golang toolchain is downloaded by download_apprtc.py. We use that here @@ -24,44 +23,44 @@ import utils - USAGE_STR = "Usage: {} " def _ConfigureApprtcServerToDeveloperMode(app_yaml_path): - for line in fileinput.input(app_yaml_path, inplace=True): - # We can't click past these in browser-based tests, so disable them. - line = line.replace('BYPASS_JOIN_CONFIRMATION: false', - 'BYPASS_JOIN_CONFIRMATION: true') - sys.stdout.write(line) + for line in fileinput.input(app_yaml_path, inplace=True): + # We can't click past these in browser-based tests, so disable them. + line = line.replace('BYPASS_JOIN_CONFIRMATION: false', + 'BYPASS_JOIN_CONFIRMATION: true') + sys.stdout.write(line) def main(argv): - if len(argv) != 4: - return USAGE_STR.format(argv[0]) + if len(argv) != 4: + return USAGE_STR.format(argv[0]) - apprtc_dir = os.path.abspath(argv[1]) - go_root_dir = os.path.abspath(argv[2]) - golang_workspace = os.path.abspath(argv[3]) + apprtc_dir = os.path.abspath(argv[1]) + go_root_dir = os.path.abspath(argv[2]) + golang_workspace = os.path.abspath(argv[3]) - app_yaml_path = os.path.join(apprtc_dir, 'out', 'app_engine', 'app.yaml') - _ConfigureApprtcServerToDeveloperMode(app_yaml_path) + app_yaml_path = os.path.join(apprtc_dir, 'out', 'app_engine', 'app.yaml') + _ConfigureApprtcServerToDeveloperMode(app_yaml_path) - utils.RemoveDirectory(golang_workspace) + utils.RemoveDirectory(golang_workspace) - collider_dir = os.path.join(apprtc_dir, 'src', 'collider') - shutil.copytree(collider_dir, os.path.join(golang_workspace, 'src')) + collider_dir = os.path.join(apprtc_dir, 'src', 'collider') + shutil.copytree(collider_dir, os.path.join(golang_workspace, 'src')) - golang_path = os.path.join(go_root_dir, 'bin', - 'go' + utils.GetExecutableExtension()) - golang_env = os.environ.copy() - golang_env['GOROOT'] = go_root_dir - golang_env['GOPATH'] = golang_workspace - collider_out = os.path.join(golang_workspace, - 'collidermain' + utils.GetExecutableExtension()) - subprocess.check_call([golang_path, 'build', '-o', collider_out, - 'collidermain'], env=golang_env) + golang_path = os.path.join(go_root_dir, 'bin', + 'go' + utils.GetExecutableExtension()) + golang_env = os.environ.copy() + golang_env['GOROOT'] = go_root_dir + golang_env['GOPATH'] = golang_workspace + collider_out = os.path.join( + golang_workspace, 'collidermain' + utils.GetExecutableExtension()) + subprocess.check_call( + [golang_path, 'build', '-o', collider_out, 'collidermain'], + env=golang_env) if __name__ == '__main__': - sys.exit(main(sys.argv)) + sys.exit(main(sys.argv)) diff --git a/rtc_tools/testing/download_apprtc.py b/rtc_tools/testing/download_apprtc.py index f6db785275..a77955a3f6 100755 --- a/rtc_tools/testing/download_apprtc.py +++ b/rtc_tools/testing/download_apprtc.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Downloads prebuilt AppRTC and Go from WebRTC storage and unpacks it. Requires that depot_tools is installed and in the PATH. @@ -21,38 +20,37 @@ import utils - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) def _GetGoArchivePathForPlatform(): - archive_extension = 'zip' if utils.GetPlatform() == 'win' else 'tar.gz' - return os.path.join(utils.GetPlatform(), 'go.%s' % archive_extension) + archive_extension = 'zip' if utils.GetPlatform() == 'win' else 'tar.gz' + return os.path.join(utils.GetPlatform(), 'go.%s' % archive_extension) def main(argv): - if len(argv) > 2: - return 'Usage: %s [output_dir]' % argv[0] + if len(argv) > 2: + return 'Usage: %s [output_dir]' % argv[0] - output_dir = os.path.abspath(argv[1]) if len(argv) > 1 else None + output_dir = os.path.abspath(argv[1]) if len(argv) > 1 else None - apprtc_zip_path = os.path.join(SCRIPT_DIR, 'prebuilt_apprtc.zip') - if os.path.isfile(apprtc_zip_path + '.sha1'): - utils.DownloadFilesFromGoogleStorage(SCRIPT_DIR, auto_platform=False) + apprtc_zip_path = os.path.join(SCRIPT_DIR, 'prebuilt_apprtc.zip') + if os.path.isfile(apprtc_zip_path + '.sha1'): + utils.DownloadFilesFromGoogleStorage(SCRIPT_DIR, auto_platform=False) - if output_dir is not None: - utils.RemoveDirectory(os.path.join(output_dir, 'apprtc')) - utils.UnpackArchiveTo(apprtc_zip_path, output_dir) + if output_dir is not None: + utils.RemoveDirectory(os.path.join(output_dir, 'apprtc')) + utils.UnpackArchiveTo(apprtc_zip_path, output_dir) - golang_path = os.path.join(SCRIPT_DIR, 'golang') - golang_zip_path = os.path.join(golang_path, _GetGoArchivePathForPlatform()) - if os.path.isfile(golang_zip_path + '.sha1'): - utils.DownloadFilesFromGoogleStorage(golang_path) + golang_path = os.path.join(SCRIPT_DIR, 'golang') + golang_zip_path = os.path.join(golang_path, _GetGoArchivePathForPlatform()) + if os.path.isfile(golang_zip_path + '.sha1'): + utils.DownloadFilesFromGoogleStorage(golang_path) - if output_dir is not None: - utils.RemoveDirectory(os.path.join(output_dir, 'go')) - utils.UnpackArchiveTo(golang_zip_path, output_dir) + if output_dir is not None: + utils.RemoveDirectory(os.path.join(output_dir, 'go')) + utils.UnpackArchiveTo(golang_zip_path, output_dir) if __name__ == '__main__': - sys.exit(main(sys.argv)) + sys.exit(main(sys.argv)) diff --git a/rtc_tools/testing/setup_apprtc.py b/rtc_tools/testing/setup_apprtc.py index 2b463e004d..387ba694a3 100755 --- a/rtc_tools/testing/setup_apprtc.py +++ b/rtc_tools/testing/setup_apprtc.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """This script sets up AppRTC and its dependencies. Requires that depot_tools is installed and in the PATH. @@ -19,27 +18,26 @@ import utils - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) def main(argv): - if len(argv) == 1: - return 'Usage %s ' % argv[0] + if len(argv) == 1: + return 'Usage %s ' % argv[0] - output_dir = os.path.abspath(argv[1]) + output_dir = os.path.abspath(argv[1]) - download_apprtc_path = os.path.join(SCRIPT_DIR, 'download_apprtc.py') - utils.RunSubprocessWithRetry([sys.executable, download_apprtc_path, - output_dir]) + download_apprtc_path = os.path.join(SCRIPT_DIR, 'download_apprtc.py') + utils.RunSubprocessWithRetry( + [sys.executable, download_apprtc_path, output_dir]) - build_apprtc_path = os.path.join(SCRIPT_DIR, 'build_apprtc.py') - apprtc_dir = os.path.join(output_dir, 'apprtc') - go_dir = os.path.join(output_dir, 'go') - collider_dir = os.path.join(output_dir, 'collider') - utils.RunSubprocessWithRetry([sys.executable, build_apprtc_path, - apprtc_dir, go_dir, collider_dir]) + build_apprtc_path = os.path.join(SCRIPT_DIR, 'build_apprtc.py') + apprtc_dir = os.path.join(output_dir, 'apprtc') + go_dir = os.path.join(output_dir, 'go') + collider_dir = os.path.join(output_dir, 'collider') + utils.RunSubprocessWithRetry( + [sys.executable, build_apprtc_path, apprtc_dir, go_dir, collider_dir]) if __name__ == '__main__': - sys.exit(main(sys.argv)) + sys.exit(main(sys.argv)) diff --git a/rtc_tools/testing/utils.py b/rtc_tools/testing/utils.py index 7968dad62b..8a5de50cf8 100755 --- a/rtc_tools/testing/utils.py +++ b/rtc_tools/testing/utils.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Utilities for all our deps-management stuff.""" from __future__ import absolute_import @@ -23,36 +22,37 @@ def RunSubprocessWithRetry(cmd): - """Invokes the subprocess and backs off exponentially on fail.""" - for i in range(5): - try: - subprocess.check_call(cmd) - return - except subprocess.CalledProcessError as exception: - backoff = pow(2, i) - print('Got %s, retrying in %d seconds...' % (exception, backoff)) - time.sleep(backoff) + """Invokes the subprocess and backs off exponentially on fail.""" + for i in range(5): + try: + subprocess.check_call(cmd) + return + except subprocess.CalledProcessError as exception: + backoff = pow(2, i) + print('Got %s, retrying in %d seconds...' % (exception, backoff)) + time.sleep(backoff) - print('Giving up.') - raise exception + print('Giving up.') + raise exception def DownloadFilesFromGoogleStorage(path, auto_platform=True): - print('Downloading files in %s...' % path) + print('Downloading files in %s...' % path) - extension = 'bat' if 'win32' in sys.platform else 'py' - cmd = ['download_from_google_storage.%s' % extension, - '--bucket=chromium-webrtc-resources', - '--directory', path] - if auto_platform: - cmd += ['--auto_platform', '--recursive'] - subprocess.check_call(cmd) + extension = 'bat' if 'win32' in sys.platform else 'py' + cmd = [ + 'download_from_google_storage.%s' % extension, + '--bucket=chromium-webrtc-resources', '--directory', path + ] + if auto_platform: + cmd += ['--auto_platform', '--recursive'] + subprocess.check_call(cmd) # Code partially copied from # https://cs.chromium.org#chromium/build/scripts/common/chromium_utils.py def RemoveDirectory(*path): - """Recursively removes a directory, even if it's marked read-only. + """Recursively removes a directory, even if it's marked read-only. Remove the directory located at *path, if it exists. @@ -67,62 +67,63 @@ def RemoveDirectory(*path): bit and try again, so we do that too. It's hand-waving, but sometimes it works. :/ """ - file_path = os.path.join(*path) - print('Deleting `{}`.'.format(file_path)) - if not os.path.exists(file_path): - print('`{}` does not exist.'.format(file_path)) - return - - if sys.platform == 'win32': - # Give up and use cmd.exe's rd command. - file_path = os.path.normcase(file_path) - for _ in range(3): - print('RemoveDirectory running %s' % (' '.join( - ['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))) - if not subprocess.call(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]): - break - print(' Failed') - time.sleep(3) - return - else: - shutil.rmtree(file_path, ignore_errors=True) + file_path = os.path.join(*path) + print('Deleting `{}`.'.format(file_path)) + if not os.path.exists(file_path): + print('`{}` does not exist.'.format(file_path)) + return + + if sys.platform == 'win32': + # Give up and use cmd.exe's rd command. + file_path = os.path.normcase(file_path) + for _ in range(3): + print('RemoveDirectory running %s' % + (' '.join(['cmd.exe', '/c', 'rd', '/q', '/s', file_path]))) + if not subprocess.call( + ['cmd.exe', '/c', 'rd', '/q', '/s', file_path]): + break + print(' Failed') + time.sleep(3) + return + else: + shutil.rmtree(file_path, ignore_errors=True) def UnpackArchiveTo(archive_path, output_dir): - extension = os.path.splitext(archive_path)[1] - if extension == '.zip': - _UnzipArchiveTo(archive_path, output_dir) - else: - _UntarArchiveTo(archive_path, output_dir) + extension = os.path.splitext(archive_path)[1] + if extension == '.zip': + _UnzipArchiveTo(archive_path, output_dir) + else: + _UntarArchiveTo(archive_path, output_dir) def _UnzipArchiveTo(archive_path, output_dir): - print('Unzipping {} in {}.'.format(archive_path, output_dir)) - zip_file = zipfile.ZipFile(archive_path) - try: - zip_file.extractall(output_dir) - finally: - zip_file.close() + print('Unzipping {} in {}.'.format(archive_path, output_dir)) + zip_file = zipfile.ZipFile(archive_path) + try: + zip_file.extractall(output_dir) + finally: + zip_file.close() def _UntarArchiveTo(archive_path, output_dir): - print('Untarring {} in {}.'.format(archive_path, output_dir)) - tar_file = tarfile.open(archive_path, 'r:gz') - try: - tar_file.extractall(output_dir) - finally: - tar_file.close() + print('Untarring {} in {}.'.format(archive_path, output_dir)) + tar_file = tarfile.open(archive_path, 'r:gz') + try: + tar_file.extractall(output_dir) + finally: + tar_file.close() def GetPlatform(): - if sys.platform.startswith('win'): - return 'win' - if sys.platform.startswith('linux'): - return 'linux' - if sys.platform.startswith('darwin'): - return 'mac' - raise Exception("Can't run on platform %s." % sys.platform) + if sys.platform.startswith('win'): + return 'win' + if sys.platform.startswith('linux'): + return 'linux' + if sys.platform.startswith('darwin'): + return 'mac' + raise Exception("Can't run on platform %s." % sys.platform) def GetExecutableExtension(): - return '.exe' if GetPlatform() == 'win' else '' + return '.exe' if GetPlatform() == 'win' else '' diff --git a/rtc_tools/unpack_aecdump/unpack.cc b/rtc_tools/unpack_aecdump/unpack.cc index ba3af129bf..4a98349820 100644 --- a/rtc_tools/unpack_aecdump/unpack.cc +++ b/rtc_tools/unpack_aecdump/unpack.cc @@ -81,6 +81,10 @@ ABSL_FLAG(bool, text, false, "Write non-audio files as text files instead of binary files."); +ABSL_FLAG(bool, + use_init_suffix, + false, + "Use init index instead of capture frame count as file name suffix."); #define PRINT_CONFIG(field_name) \ if (msg.has_##field_name()) { \ @@ -224,6 +228,16 @@ std::vector RuntimeSettingWriters() { })}; } +std::string GetWavFileIndex(int init_index, int frame_count) { + rtc::StringBuilder suffix; + if (absl::GetFlag(FLAGS_use_init_suffix)) { + suffix << "_" << init_index; + } else { + suffix << frame_count; + } + return suffix.str(); +} + } // namespace int do_main(int argc, char* argv[]) { @@ -243,6 +257,7 @@ int do_main(int argc, char* argv[]) { Event event_msg; int frame_count = 0; + int init_count = 0; size_t reverse_samples_per_channel = 0; size_t input_samples_per_channel = 0; size_t output_samples_per_channel = 0; @@ -452,9 +467,11 @@ int do_main(int argc, char* argv[]) { return 1; } + ++init_count; const Init msg = event_msg.init(); // These should print out zeros if they're missing. - fprintf(settings_file, "Init at frame: %d\n", frame_count); + fprintf(settings_file, "Init #%d at frame: %d\n", init_count, + frame_count); int input_sample_rate = msg.sample_rate(); fprintf(settings_file, " Input sample rate: %d\n", input_sample_rate); int output_sample_rate = msg.output_sample_rate(); @@ -495,24 +512,24 @@ int do_main(int argc, char* argv[]) { if (!absl::GetFlag(FLAGS_raw)) { // The WAV files need to be reset every time, because they cant change // their sample rate or number of channels. + + std::string suffix = GetWavFileIndex(init_count, frame_count); rtc::StringBuilder reverse_name; - reverse_name << absl::GetFlag(FLAGS_reverse_file) << frame_count - << ".wav"; + reverse_name << absl::GetFlag(FLAGS_reverse_file) << suffix << ".wav"; reverse_wav_file.reset(new WavWriter( reverse_name.str(), reverse_sample_rate, num_reverse_channels)); rtc::StringBuilder input_name; - input_name << absl::GetFlag(FLAGS_input_file) << frame_count << ".wav"; + input_name << absl::GetFlag(FLAGS_input_file) << suffix << ".wav"; input_wav_file.reset(new WavWriter(input_name.str(), input_sample_rate, num_input_channels)); rtc::StringBuilder output_name; - output_name << absl::GetFlag(FLAGS_output_file) << frame_count - << ".wav"; + output_name << absl::GetFlag(FLAGS_output_file) << suffix << ".wav"; output_wav_file.reset(new WavWriter( output_name.str(), output_sample_rate, num_output_channels)); if (WritingCallOrderFile()) { rtc::StringBuilder callorder_name; - callorder_name << absl::GetFlag(FLAGS_callorder_file) << frame_count + callorder_name << absl::GetFlag(FLAGS_callorder_file) << suffix << ".char"; callorder_char_file = OpenFile(callorder_name.str(), "wb"); } diff --git a/rtc_tools/video_file_reader.cc b/rtc_tools/video_file_reader.cc index b01fc0fcdd..bfdcba45fa 100644 --- a/rtc_tools/video_file_reader.cc +++ b/rtc_tools/video_file_reader.cc @@ -224,8 +224,8 @@ rtc::scoped_refptr

note: |types| is a list of ConnectionTypes, so that all cellular types can be modified in + * one call. + */ + public void onNetworkPreference(List types, @NetworkPreference int preference); + } + + public ConnectionType getCurrentConnectionType(); + + public boolean supportNetworkCallback(); + + @Nullable public List getActiveNetworkList(); + + public void destroy(); +} diff --git a/media/sctp/noop.cc b/sdk/android/api/org/webrtc/NetworkChangeDetectorFactory.java similarity index 56% rename from media/sctp/noop.cc rename to sdk/android/api/org/webrtc/NetworkChangeDetectorFactory.java index a3523b18b2..14e98b2387 100644 --- a/media/sctp/noop.cc +++ b/sdk/android/api/org/webrtc/NetworkChangeDetectorFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2017 The WebRTC Project Authors. All rights reserved. + * Copyright 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,6 +8,10 @@ * be found in the AUTHORS file in the root of the source tree. */ -// This file is only needed to make ninja happy on some platforms. -// On some platforms it is not possible to link an rtc_static_library -// without any source file listed in the GN target. +package org.webrtc; + +import android.content.Context; + +public interface NetworkChangeDetectorFactory { + public NetworkChangeDetector create(NetworkChangeDetector.Observer observer, Context context); +} diff --git a/sdk/android/api/org/webrtc/NetworkMonitor.java b/sdk/android/api/org/webrtc/NetworkMonitor.java index 364bb4d0b5..566302b0b4 100644 --- a/sdk/android/api/org/webrtc/NetworkMonitor.java +++ b/sdk/android/api/org/webrtc/NetworkMonitor.java @@ -10,14 +10,12 @@ package org.webrtc; -import static org.webrtc.NetworkMonitorAutoDetect.INVALID_NET_ID; - import android.content.Context; import android.os.Build; import android.support.annotation.Nullable; import java.util.ArrayList; import java.util.List; -import org.webrtc.NetworkMonitorAutoDetect; +import org.webrtc.NetworkChangeDetector; /** * Borrowed from Chromium's @@ -32,7 +30,7 @@ public class NetworkMonitor { * Alerted when the connection type of the network changes. The alert is fired on the UI thread. */ public interface NetworkObserver { - public void onConnectionTypeChanged(NetworkMonitorAutoDetect.ConnectionType connectionType); + public void onConnectionTypeChanged(NetworkChangeDetector.ConnectionType connectionType); } private static final String TAG = "NetworkMonitor"; @@ -43,24 +41,43 @@ private static class InstanceHolder { static final NetworkMonitor instance = new NetworkMonitor(); } + // Factory for creating NetworkChangeDetector. + private NetworkChangeDetectorFactory networkChangeDetectorFactory = + new NetworkChangeDetectorFactory() { + @Override + public NetworkChangeDetector create( + NetworkChangeDetector.Observer observer, Context context) { + return new NetworkMonitorAutoDetect(observer, context); + } + }; + // Native observers of the connection type changes. private final ArrayList nativeNetworkObservers; // Java observers of the connection type changes. private final ArrayList networkObservers; - private final Object autoDetectLock = new Object(); + private final Object networkChangeDetectorLock = new Object(); // Object that detects the connection type changes and brings up mobile networks. - @Nullable private NetworkMonitorAutoDetect autoDetect; + @Nullable private NetworkChangeDetector networkChangeDetector; // Also guarded by autoDetectLock. private int numObservers; - private volatile NetworkMonitorAutoDetect.ConnectionType currentConnectionType; + private volatile NetworkChangeDetector.ConnectionType currentConnectionType; private NetworkMonitor() { nativeNetworkObservers = new ArrayList(); networkObservers = new ArrayList(); numObservers = 0; - currentConnectionType = NetworkMonitorAutoDetect.ConnectionType.CONNECTION_UNKNOWN; + currentConnectionType = NetworkChangeDetector.ConnectionType.CONNECTION_UNKNOWN; + } + + /** + * Set the factory that will be used to create the network change detector. + * Needs to be called before the monitoring is starts. + */ + public void setNetworkChangeDetectorFactory(NetworkChangeDetectorFactory factory) { + assertIsTrue(numObservers == 0); + this.networkChangeDetectorFactory = factory; } // TODO(sakal): Remove once downstream dependencies have been updated. @@ -85,13 +102,12 @@ private static void assertIsTrue(boolean condition) { * CHANGE_NETWORK_STATE permission. */ public void startMonitoring(Context applicationContext) { - synchronized (autoDetectLock) { + synchronized (networkChangeDetectorLock) { ++numObservers; - if (autoDetect == null) { - autoDetect = createAutoDetect(applicationContext); + if (networkChangeDetector == null) { + networkChangeDetector = createNetworkChangeDetector(applicationContext); } - currentConnectionType = - NetworkMonitorAutoDetect.getConnectionType(autoDetect.getCurrentNetworkState()); + currentConnectionType = networkChangeDetector.getCurrentConnectionType(); } } @@ -122,12 +138,15 @@ private void startMonitoring(@Nullable Context applicationContext, long nativeOb notifyObserversOfConnectionTypeChange(currentConnectionType); } - /** Stop network monitoring. If no one is monitoring networks, destroy and reset autoDetect. */ + /** + * Stop network monitoring. If no one is monitoring networks, destroy and reset + * networkChangeDetector. + */ public void stopMonitoring() { - synchronized (autoDetectLock) { + synchronized (networkChangeDetectorLock) { if (--numObservers == 0) { - autoDetect.destroy(); - autoDetect = null; + networkChangeDetector.destroy(); + networkChangeDetector = null; } } } @@ -144,8 +163,8 @@ private void stopMonitoring(long nativeObserver) { // Returns true if network binding is supported on this platform. @CalledByNative private boolean networkBindingSupported() { - synchronized (autoDetectLock) { - return autoDetect != null && autoDetect.supportNetworkCallback(); + synchronized (networkChangeDetectorLock) { + return networkChangeDetector != null && networkChangeDetector.supportNetworkCallback(); } } @@ -154,27 +173,19 @@ private static int androidSdkInt() { return Build.VERSION.SDK_INT; } - private NetworkMonitorAutoDetect.ConnectionType getCurrentConnectionType() { + private NetworkChangeDetector.ConnectionType getCurrentConnectionType() { return currentConnectionType; } - private long getCurrentDefaultNetId() { - synchronized (autoDetectLock) { - return autoDetect == null ? INVALID_NET_ID : autoDetect.getDefaultNetId(); - } - } - - private NetworkMonitorAutoDetect createAutoDetect(Context appContext) { - return new NetworkMonitorAutoDetect(new NetworkMonitorAutoDetect.Observer() { - + private NetworkChangeDetector createNetworkChangeDetector(Context appContext) { + return networkChangeDetectorFactory.create(new NetworkChangeDetector.Observer() { @Override - public void onConnectionTypeChanged( - NetworkMonitorAutoDetect.ConnectionType newConnectionType) { + public void onConnectionTypeChanged(NetworkChangeDetector.ConnectionType newConnectionType) { updateCurrentConnectionType(newConnectionType); } @Override - public void onNetworkConnect(NetworkMonitorAutoDetect.NetworkInformation networkInfo) { + public void onNetworkConnect(NetworkChangeDetector.NetworkInformation networkInfo) { notifyObserversOfNetworkConnect(networkInfo); } @@ -182,18 +193,23 @@ public void onNetworkConnect(NetworkMonitorAutoDetect.NetworkInformation network public void onNetworkDisconnect(long networkHandle) { notifyObserversOfNetworkDisconnect(networkHandle); } + + @Override + public void onNetworkPreference( + List types, int preference) { + notifyObserversOfNetworkPreference(types, preference); + } }, appContext); } - private void updateCurrentConnectionType( - NetworkMonitorAutoDetect.ConnectionType newConnectionType) { + private void updateCurrentConnectionType(NetworkChangeDetector.ConnectionType newConnectionType) { currentConnectionType = newConnectionType; notifyObserversOfConnectionTypeChange(newConnectionType); } /** Alerts all observers of a connection change. */ private void notifyObserversOfConnectionTypeChange( - NetworkMonitorAutoDetect.ConnectionType newConnectionType) { + NetworkChangeDetector.ConnectionType newConnectionType) { List nativeObservers = getNativeNetworkObserversSync(); for (Long nativeObserver : nativeObservers) { nativeNotifyConnectionTypeChanged(nativeObserver); @@ -209,7 +225,7 @@ private void notifyObserversOfConnectionTypeChange( } private void notifyObserversOfNetworkConnect( - NetworkMonitorAutoDetect.NetworkInformation networkInfo) { + NetworkChangeDetector.NetworkInformation networkInfo) { List nativeObservers = getNativeNetworkObserversSync(); for (Long nativeObserver : nativeObservers) { nativeNotifyOfNetworkConnect(nativeObserver, networkInfo); @@ -223,17 +239,28 @@ private void notifyObserversOfNetworkDisconnect(long networkHandle) { } } + private void notifyObserversOfNetworkPreference( + List types, int preference) { + List nativeObservers = getNativeNetworkObserversSync(); + for (NetworkChangeDetector.ConnectionType type : types) { + for (Long nativeObserver : nativeObservers) { + nativeNotifyOfNetworkPreference(nativeObserver, type, preference); + } + } + } + private void updateObserverActiveNetworkList(long nativeObserver) { - List networkInfoList; - synchronized (autoDetectLock) { - networkInfoList = (autoDetect == null) ? null : autoDetect.getActiveNetworkList(); + List networkInfoList; + synchronized (networkChangeDetectorLock) { + networkInfoList = + (networkChangeDetector == null) ? null : networkChangeDetector.getActiveNetworkList(); } if (networkInfoList == null || networkInfoList.size() == 0) { return; } - NetworkMonitorAutoDetect.NetworkInformation[] networkInfos = - new NetworkMonitorAutoDetect.NetworkInformation[networkInfoList.size()]; + NetworkChangeDetector.NetworkInformation[] networkInfos = + new NetworkChangeDetector.NetworkInformation[networkInfoList.size()]; networkInfos = networkInfoList.toArray(networkInfos); nativeNotifyOfActiveNetworkList(nativeObserver, networkInfos); } @@ -278,30 +305,35 @@ public void removeObserver(NetworkObserver observer) { /** Checks if there currently is connectivity. */ public static boolean isOnline() { - NetworkMonitorAutoDetect.ConnectionType connectionType = - getInstance().getCurrentConnectionType(); - return connectionType != NetworkMonitorAutoDetect.ConnectionType.CONNECTION_NONE; + NetworkChangeDetector.ConnectionType connectionType = getInstance().getCurrentConnectionType(); + return connectionType != NetworkChangeDetector.ConnectionType.CONNECTION_NONE; } private native void nativeNotifyConnectionTypeChanged(long nativeAndroidNetworkMonitor); + private native void nativeNotifyOfNetworkConnect( - long nativeAndroidNetworkMonitor, NetworkMonitorAutoDetect.NetworkInformation networkInfo); + long nativeAndroidNetworkMonitor, NetworkChangeDetector.NetworkInformation networkInfo); + private native void nativeNotifyOfNetworkDisconnect( long nativeAndroidNetworkMonitor, long networkHandle); + private native void nativeNotifyOfActiveNetworkList( - long nativeAndroidNetworkMonitor, NetworkMonitorAutoDetect.NetworkInformation[] networkInfos); + long nativeAndroidNetworkMonitor, NetworkChangeDetector.NetworkInformation[] networkInfos); + + private native void nativeNotifyOfNetworkPreference( + long nativeAndroidNetworkMonitor, NetworkChangeDetector.ConnectionType type, int preference); // For testing only. @Nullable - NetworkMonitorAutoDetect getNetworkMonitorAutoDetect() { - synchronized (autoDetectLock) { - return autoDetect; + NetworkChangeDetector getNetworkChangeDetector() { + synchronized (networkChangeDetectorLock) { + return networkChangeDetector; } } // For testing only. int getNumObservers() { - synchronized (autoDetectLock) { + synchronized (networkChangeDetectorLock) { return numObservers; } } @@ -309,7 +341,9 @@ int getNumObservers() { // For testing only. static NetworkMonitorAutoDetect createAndSetAutoDetectForTest(Context context) { NetworkMonitor networkMonitor = getInstance(); - NetworkMonitorAutoDetect autoDetect = networkMonitor.createAutoDetect(context); - return networkMonitor.autoDetect = autoDetect; + NetworkChangeDetector networkChangeDetector = + networkMonitor.createNetworkChangeDetector(context); + networkMonitor.networkChangeDetector = networkChangeDetector; + return (NetworkMonitorAutoDetect) networkChangeDetector; } } diff --git a/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java b/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java index dbea840710..3d233b3423 100644 --- a/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java +++ b/sdk/android/api/org/webrtc/NetworkMonitorAutoDetect.java @@ -41,80 +41,10 @@ * Borrowed from Chromium's * src/net/android/java/src/org/chromium/net/NetworkChangeNotifierAutoDetect.java * - * Used by the NetworkMonitor to listen to platform changes in connectivity. - * Note that use of this class requires that the app have the platform - * ACCESS_NETWORK_STATE permission. + *

Used by the NetworkMonitor to listen to platform changes in connectivity. Note that use of + * this class requires that the app have the platform ACCESS_NETWORK_STATE permission. */ -public class NetworkMonitorAutoDetect extends BroadcastReceiver { - public static enum ConnectionType { - CONNECTION_UNKNOWN, - CONNECTION_ETHERNET, - CONNECTION_WIFI, - CONNECTION_5G, - CONNECTION_4G, - CONNECTION_3G, - CONNECTION_2G, - CONNECTION_UNKNOWN_CELLULAR, - CONNECTION_BLUETOOTH, - CONNECTION_VPN, - CONNECTION_NONE - } - - public static class IPAddress { - public final byte[] address; - public IPAddress(byte[] address) { - this.address = address; - } - - @CalledByNative("IPAddress") - private byte[] getAddress() { - return address; - } - } - - /** Java version of NetworkMonitor.NetworkInformation */ - public static class NetworkInformation { - public final String name; - public final ConnectionType type; - // Used to specify the underlying network type if the type is CONNECTION_VPN. - public final ConnectionType underlyingTypeForVpn; - public final long handle; - public final IPAddress[] ipAddresses; - public NetworkInformation(String name, ConnectionType type, ConnectionType underlyingTypeForVpn, - long handle, IPAddress[] addresses) { - this.name = name; - this.type = type; - this.underlyingTypeForVpn = underlyingTypeForVpn; - this.handle = handle; - this.ipAddresses = addresses; - } - - @CalledByNative("NetworkInformation") - private IPAddress[] getIpAddresses() { - return ipAddresses; - } - - @CalledByNative("NetworkInformation") - private ConnectionType getConnectionType() { - return type; - } - - @CalledByNative("NetworkInformation") - private ConnectionType getUnderlyingConnectionTypeForVpn() { - return underlyingTypeForVpn; - } - - @CalledByNative("NetworkInformation") - private long getHandle() { - return handle; - } - - @CalledByNative("NetworkInformation") - private String getName() { - return name; - } - }; - +public class NetworkMonitorAutoDetect extends BroadcastReceiver implements NetworkChangeDetector { static class NetworkState { private final boolean connected; // Defined from ConnectivityManager.TYPE_XXX for non-mobile; for mobile, it is @@ -182,7 +112,10 @@ public void onCapabilitiesChanged(Network network, NetworkCapabilities networkCa public void onLinkPropertiesChanged(Network network, LinkProperties linkProperties) { // A link property change may indicate the IP address changes. // so forward the new NetworkInformation to the observer. - Logging.d(TAG, "link properties changed: " + linkProperties.toString()); + // + // linkProperties.toString() has PII that cannot be redacted + // very reliably, so do not include in log. + Logging.d(TAG, "link properties changed"); onNetworkChanged(network); } @@ -410,8 +343,8 @@ long getDefaultNetId() { } NetworkState networkState = getNetworkState(network); - ConnectionType connectionType = getConnectionType(networkState); - if (connectionType == ConnectionType.CONNECTION_NONE) { + NetworkChangeDetector.ConnectionType connectionType = getConnectionType(networkState); + if (connectionType == NetworkChangeDetector.ConnectionType.CONNECTION_NONE) { // This may not be an error. The OS may signal a network event with connection type // NONE when the network disconnects. Logging.d(TAG, "Network " + network.toString() + " is disconnected"); @@ -420,13 +353,14 @@ long getDefaultNetId() { // Some android device may return a CONNECTION_UNKNOWN_CELLULAR or CONNECTION_UNKNOWN type, // which appears to be usable. Just log them here. - if (connectionType == ConnectionType.CONNECTION_UNKNOWN - || connectionType == ConnectionType.CONNECTION_UNKNOWN_CELLULAR) { + if (connectionType == NetworkChangeDetector.ConnectionType.CONNECTION_UNKNOWN + || connectionType == NetworkChangeDetector.ConnectionType.CONNECTION_UNKNOWN_CELLULAR) { Logging.d(TAG, "Network " + network.toString() + " connection type is " + connectionType + " because it has type " + networkState.getNetworkType() + " and subtype " + networkState.getNetworkSubType()); } - // ConnectionType.CONNECTION_UNKNOWN if the network is not a VPN or the underlying network is + // NetworkChangeDetector.ConnectionType.CONNECTION_UNKNOWN if the network is not a VPN or the + // underlying network is // unknown. ConnectionType underlyingConnectionTypeForVpn = getUnderlyingConnectionTypeForVpn(networkState); @@ -529,12 +463,12 @@ static class WifiDirectManagerDelegate extends BroadcastReceiver { // (NETWORK_UNSPECIFIED) for these addresses. private static final int WIFI_P2P_NETWORK_HANDLE = 0; private final Context context; - private final Observer observer; + private final NetworkChangeDetector.Observer observer; // Network information about a WifiP2p (aka WiFi-Direct) network, or null if no such network is // connected. @Nullable private NetworkInformation wifiP2pNetworkInfo; - WifiDirectManagerDelegate(Observer observer, Context context) { + WifiDirectManagerDelegate(NetworkChangeDetector.Observer observer, Context context) { this.context = context; this.observer = observer; IntentFilter intentFilter = new IntentFilter(); @@ -599,9 +533,10 @@ private void onWifiP2pGroupChange(@Nullable WifiP2pGroup wifiP2pGroup) { ipAddresses[i] = new IPAddress(interfaceAddresses.get(i).getAddress()); } - wifiP2pNetworkInfo = - new NetworkInformation(wifiP2pGroup.getInterface(), ConnectionType.CONNECTION_WIFI, - ConnectionType.CONNECTION_NONE, WIFI_P2P_NETWORK_HANDLE, ipAddresses); + wifiP2pNetworkInfo = new NetworkInformation(wifiP2pGroup.getInterface(), + NetworkChangeDetector.ConnectionType.CONNECTION_WIFI, + NetworkChangeDetector.ConnectionType.CONNECTION_NONE, WIFI_P2P_NETWORK_HANDLE, + ipAddresses); observer.onNetworkConnect(wifiP2pNetworkInfo); } @@ -614,11 +549,11 @@ private void onWifiP2pStateChange(int state) { } } - static final long INVALID_NET_ID = -1; + private static final long INVALID_NET_ID = -1; private static final String TAG = "NetworkMonitorAutoDetect"; // Observer for the connection type change. - private final Observer observer; + private final NetworkChangeDetector.Observer observer; private final IntentFilter intentFilter; private final Context context; // Used to request mobile network. It does not do anything except for keeping @@ -632,26 +567,12 @@ private void onWifiP2pStateChange(int state) { private WifiDirectManagerDelegate wifiDirectManagerDelegate; private boolean isRegistered; - private ConnectionType connectionType; + private NetworkChangeDetector.ConnectionType connectionType; private String wifiSSID; - /** - * Observer interface by which observer is notified of network changes. - */ - public static interface Observer { - /** - * Called when default network changes. - */ - public void onConnectionTypeChanged(ConnectionType newConnectionType); - public void onNetworkConnect(NetworkInformation networkInfo); - public void onNetworkDisconnect(long networkHandle); - } - - /** - * Constructs a NetworkMonitorAutoDetect. Should only be called on UI thread. - */ + /** Constructs a NetworkMonitorAutoDetect. Should only be called on UI thread. */ @SuppressLint("NewApi") - public NetworkMonitorAutoDetect(Observer observer, Context context) { + public NetworkMonitorAutoDetect(NetworkChangeDetector.Observer observer, Context context) { this.observer = observer; this.context = context; connectivityManagerDelegate = new ConnectivityManagerDelegate(context); @@ -686,6 +607,7 @@ public NetworkMonitorAutoDetect(Observer observer, Context context) { } } + @Override public boolean supportNetworkCallback() { return connectivityManagerDelegate.supportNetworkCallback(); } @@ -712,8 +634,9 @@ boolean isReceiverRegisteredForTesting() { return isRegistered; } + @Override @Nullable - List getActiveNetworkList() { + public List getActiveNetworkList() { List connectivityManagerList = connectivityManagerDelegate.getActiveNetworkList(); if (connectivityManagerList == null) { @@ -727,6 +650,7 @@ List getActiveNetworkList() { return result; } + @Override public void destroy() { if (allNetworkCallback != null) { connectivityManagerDelegate.releaseCallback(allNetworkCallback); @@ -776,21 +700,21 @@ public long getDefaultNetId() { return connectivityManagerDelegate.getDefaultNetId(); } - private static ConnectionType getConnectionType( + private static NetworkChangeDetector.ConnectionType getConnectionType( boolean isConnected, int networkType, int networkSubtype) { if (!isConnected) { - return ConnectionType.CONNECTION_NONE; + return NetworkChangeDetector.ConnectionType.CONNECTION_NONE; } switch (networkType) { case ConnectivityManager.TYPE_ETHERNET: - return ConnectionType.CONNECTION_ETHERNET; + return NetworkChangeDetector.ConnectionType.CONNECTION_ETHERNET; case ConnectivityManager.TYPE_WIFI: - return ConnectionType.CONNECTION_WIFI; + return NetworkChangeDetector.ConnectionType.CONNECTION_WIFI; case ConnectivityManager.TYPE_WIMAX: - return ConnectionType.CONNECTION_4G; + return NetworkChangeDetector.ConnectionType.CONNECTION_4G; case ConnectivityManager.TYPE_BLUETOOTH: - return ConnectionType.CONNECTION_BLUETOOTH; + return NetworkChangeDetector.ConnectionType.CONNECTION_BLUETOOTH; case ConnectivityManager.TYPE_MOBILE: // Use information from TelephonyManager to classify the connection. switch (networkSubtype) { @@ -800,7 +724,7 @@ private static ConnectionType getConnectionType( case TelephonyManager.NETWORK_TYPE_1xRTT: case TelephonyManager.NETWORK_TYPE_IDEN: case TelephonyManager.NETWORK_TYPE_GSM: - return ConnectionType.CONNECTION_2G; + return NetworkChangeDetector.ConnectionType.CONNECTION_2G; case TelephonyManager.NETWORK_TYPE_UMTS: case TelephonyManager.NETWORK_TYPE_EVDO_0: case TelephonyManager.NETWORK_TYPE_EVDO_A: @@ -811,30 +735,36 @@ private static ConnectionType getConnectionType( case TelephonyManager.NETWORK_TYPE_EHRPD: case TelephonyManager.NETWORK_TYPE_HSPAP: case TelephonyManager.NETWORK_TYPE_TD_SCDMA: - return ConnectionType.CONNECTION_3G; + return NetworkChangeDetector.ConnectionType.CONNECTION_3G; case TelephonyManager.NETWORK_TYPE_LTE: case TelephonyManager.NETWORK_TYPE_IWLAN: - return ConnectionType.CONNECTION_4G; + return NetworkChangeDetector.ConnectionType.CONNECTION_4G; case TelephonyManager.NETWORK_TYPE_NR: - return ConnectionType.CONNECTION_5G; + return NetworkChangeDetector.ConnectionType.CONNECTION_5G; default: - return ConnectionType.CONNECTION_UNKNOWN_CELLULAR; + return NetworkChangeDetector.ConnectionType.CONNECTION_UNKNOWN_CELLULAR; } case ConnectivityManager.TYPE_VPN: - return ConnectionType.CONNECTION_VPN; + return NetworkChangeDetector.ConnectionType.CONNECTION_VPN; default: - return ConnectionType.CONNECTION_UNKNOWN; + return NetworkChangeDetector.ConnectionType.CONNECTION_UNKNOWN; } } - public static ConnectionType getConnectionType(NetworkState networkState) { + public static NetworkChangeDetector.ConnectionType getConnectionType(NetworkState networkState) { return getConnectionType(networkState.isConnected(), networkState.getNetworkType(), networkState.getNetworkSubType()); } - private static ConnectionType getUnderlyingConnectionTypeForVpn(NetworkState networkState) { + @Override + public NetworkChangeDetector.ConnectionType getCurrentConnectionType() { + return getConnectionType(getCurrentNetworkState()); + } + + private static NetworkChangeDetector.ConnectionType getUnderlyingConnectionTypeForVpn( + NetworkState networkState) { if (networkState.getNetworkType() != ConnectivityManager.TYPE_VPN) { - return ConnectionType.CONNECTION_NONE; + return NetworkChangeDetector.ConnectionType.CONNECTION_NONE; } return getConnectionType(networkState.isConnected(), networkState.getUnderlyingNetworkTypeForVpn(), @@ -842,7 +772,7 @@ private static ConnectionType getUnderlyingConnectionTypeForVpn(NetworkState net } private String getWifiSSID(NetworkState networkState) { - if (getConnectionType(networkState) != ConnectionType.CONNECTION_WIFI) + if (getConnectionType(networkState) != NetworkChangeDetector.ConnectionType.CONNECTION_WIFI) return ""; return wifiManagerDelegate.getWifiSSID(); } @@ -857,7 +787,7 @@ public void onReceive(Context context, Intent intent) { } private void connectionTypeChanged(NetworkState networkState) { - ConnectionType newConnectionType = getConnectionType(networkState); + NetworkChangeDetector.ConnectionType newConnectionType = getConnectionType(networkState); String newWifiSSID = getWifiSSID(networkState); if (newConnectionType == connectionType && newWifiSSID.equals(wifiSSID)) return; diff --git a/sdk/android/api/org/webrtc/OWNERS b/sdk/android/api/org/webrtc/OWNERS index e6ccc2dda2..b64df86672 100644 --- a/sdk/android/api/org/webrtc/OWNERS +++ b/sdk/android/api/org/webrtc/OWNERS @@ -1,3 +1,3 @@ -per-file Camera*=sakal@webrtc.org -per-file Histogram.java=sakal@webrtc.org -per-file Metrics.java=sakal@webrtc.org +per-file Camera*=xalep@webrtc.org +per-file Histogram.java=xalep@webrtc.org +per-file Metrics.java=xalep@webrtc.org diff --git a/sdk/android/api/org/webrtc/PeerConnection.java b/sdk/android/api/org/webrtc/PeerConnection.java index bf0a2e9441..67705ba3d5 100644 --- a/sdk/android/api/org/webrtc/PeerConnection.java +++ b/sdk/android/api/org/webrtc/PeerConnection.java @@ -141,7 +141,14 @@ default void onSelectedCandidatePairChanged(CandidatePairChangeEvent event) {} * Triggered when a new track is signaled by the remote peer, as a result of * setRemoteDescription. */ - @CalledByNative("Observer") void onAddTrack(RtpReceiver receiver, MediaStream[] mediaStreams); + @CalledByNative("Observer") + default void onAddTrack(RtpReceiver receiver, MediaStream[] mediaStreams){}; + + /** + * Triggered when a previously added remote track is removed by the remote + * peer, as a result of setRemoteDescription. + */ + @CalledByNative("Observer") default void onRemoveTrack(RtpReceiver receiver){}; /** * Triggered when the signaling from SetRemoteDescription indicates that a transceiver @@ -501,6 +508,9 @@ public static class RTCConfiguration { // to keep NAT bindings open. // The default value in the implementation is used if this field is null. @Nullable public Integer stunCandidateKeepaliveIntervalMs; + // The interval in milliseconds of pings sent when the connection is stable and writable. + // The default value in the implementation is used if this field is null. + @Nullable public Integer stableWritableConnectionPingIntervalMs; public boolean disableIPv6OnWifi; // By default, PeerConnection will use a limited number of IPv6 network // interfaces, in order to avoid too many ICE candidate pairs being created @@ -514,7 +524,6 @@ public static class RTCConfiguration { public boolean disableIpv6; public boolean enableDscp; public boolean enableCpuOveruseDetection; - public boolean enableRtpDataChannel; public boolean suspendBelowMinBitrate; @Nullable public Integer screencastMinBitrate; @Nullable public Boolean combinedAudioVideoBwe; @@ -536,18 +545,6 @@ public static class RTCConfiguration { // Null indicates no change to currently configured value. @Nullable public Boolean allowCodecSwitching; - /* - * Experimental flag that enables a use of media transport. If this is true, the media transport - * factory MUST be provided to the PeerConnectionFactory. - */ - public boolean useMediaTransport; - - /* - * Experimental flag that enables a use of media transport for data channels. If this is true, - * the media transport factory MUST be provided to the PeerConnectionFactory. - */ - public boolean useMediaTransportForDataChannels; - /** * Defines advanced optional cryptographic settings related to SRTP and * frame encryption for native WebRTC. Setting this will overwrite any @@ -562,6 +559,19 @@ public static class RTCConfiguration { */ @Nullable public String turnLoggingId; + /** + * Allow implicit rollback of local description when remote description + * conflicts with local description. + * See: https://w3c.github.io/webrtc-pc/#dom-peerconnection-setremotedescription + */ + public boolean enableImplicitRollback; + + /** + * Control if "a=extmap-allow-mixed" is included in the offer. + * See: https://www.chromestatus.com/feature/6269234631933952 + */ + public boolean offerExtmapAllowMixed; + // TODO(deadbeef): Instead of duplicating the defaults here, we should do // something to pick up the defaults from C++. The Objective-C equivalent // of RTCConfiguration does that. @@ -589,12 +599,12 @@ public RTCConfiguration(List iceServers) { iceUnwritableTimeMs = null; iceUnwritableMinChecks = null; stunCandidateKeepaliveIntervalMs = null; + stableWritableConnectionPingIntervalMs = null; disableIPv6OnWifi = false; maxIPv6Networks = 5; disableIpv6 = false; enableDscp = false; enableCpuOveruseDetection = true; - enableRtpDataChannel = false; suspendBelowMinBitrate = false; screencastMinBitrate = null; combinedAudioVideoBwe = null; @@ -602,11 +612,11 @@ public RTCConfiguration(List iceServers) { networkPreference = AdapterType.UNKNOWN; sdpSemantics = SdpSemantics.PLAN_B; activeResetSrtpParams = false; - useMediaTransport = false; - useMediaTransportForDataChannels = false; cryptoOptions = null; turnLoggingId = null; allowCodecSwitching = null; + enableImplicitRollback = false; + offerExtmapAllowMixed = true; } @CalledByNative("RTCConfiguration") @@ -736,6 +746,12 @@ Integer getStunCandidateKeepaliveInterval() { return stunCandidateKeepaliveIntervalMs; } + @Nullable + @CalledByNative("RTCConfiguration") + Integer getStableWritableConnectionPingIntervalMs() { + return stableWritableConnectionPingIntervalMs; + } + @CalledByNative("RTCConfiguration") boolean getDisableIPv6OnWifi() { return disableIPv6OnWifi; @@ -767,11 +783,6 @@ boolean getEnableCpuOveruseDetection() { return enableCpuOveruseDetection; } - @CalledByNative("RTCConfiguration") - boolean getEnableRtpDataChannel() { - return enableRtpDataChannel; - } - @CalledByNative("RTCConfiguration") boolean getSuspendBelowMinBitrate() { return suspendBelowMinBitrate; @@ -816,16 +827,6 @@ Boolean getAllowCodecSwitching() { return allowCodecSwitching; } - @CalledByNative("RTCConfiguration") - boolean getUseMediaTransport() { - return useMediaTransport; - } - - @CalledByNative("RTCConfiguration") - boolean getUseMediaTransportForDataChannels() { - return useMediaTransportForDataChannels; - } - @Nullable @CalledByNative("RTCConfiguration") CryptoOptions getCryptoOptions() { @@ -837,6 +838,16 @@ CryptoOptions getCryptoOptions() { String getTurnLoggingId() { return turnLoggingId; } + + @CalledByNative("RTCConfiguration") + boolean getEnableImplicitRollback() { + return enableImplicitRollback; + } + + @CalledByNative("RTCConfiguration") + boolean getOfferExtmapAllowMixed() { + return offerExtmapAllowMixed; + } }; private final List localStreams = new ArrayList<>(); @@ -882,6 +893,10 @@ public void createAnswer(SdpObserver observer, MediaConstraints constraints) { nativeCreateAnswer(observer, constraints); } + public void setLocalDescription(SdpObserver observer) { + nativeSetLocalDescriptionAutomatically(observer); + } + public void setLocalDescription(SdpObserver observer, SessionDescription sdp) { nativeSetLocalDescription(observer, sdp); } @@ -890,6 +905,13 @@ public void setRemoteDescription(SdpObserver observer, SessionDescription sdp) { nativeSetRemoteDescription(observer, sdp); } + /** + * Tells the PeerConnection that ICE should be restarted. + */ + public void restartIce() { + nativeRestartIce(); + } + /** * Enables/disables playout of received audio streams. Enabled by default. * @@ -920,6 +942,11 @@ public boolean addIceCandidate(IceCandidate candidate) { return nativeAddIceCandidate(candidate.sdpMid, candidate.sdpMLineIndex, candidate.sdp); } + public void addIceCandidate(IceCandidate candidate, AddIceObserver observer) { + nativeAddIceCandidateWithObserver( + candidate.sdpMid, candidate.sdpMLineIndex, candidate.sdp, observer); + } + public boolean removeIceCandidates(final IceCandidate[] candidates) { return nativeRemoveIceCandidates(candidates); } @@ -1264,8 +1291,10 @@ public static long createNativePeerConnectionObserver(Observer observer) { private native DataChannel nativeCreateDataChannel(String label, DataChannel.Init init); private native void nativeCreateOffer(SdpObserver observer, MediaConstraints constraints); private native void nativeCreateAnswer(SdpObserver observer, MediaConstraints constraints); + private native void nativeSetLocalDescriptionAutomatically(SdpObserver observer); private native void nativeSetLocalDescription(SdpObserver observer, SessionDescription sdp); private native void nativeSetRemoteDescription(SdpObserver observer, SessionDescription sdp); + private native void nativeRestartIce(); private native void nativeSetAudioPlayout(boolean playout); private native void nativeSetAudioRecording(boolean recording); private native boolean nativeSetBitrate(Integer min, Integer current, Integer max); @@ -1279,6 +1308,8 @@ public static long createNativePeerConnectionObserver(Observer observer) { private native boolean nativeSetConfiguration(RTCConfiguration config); private native boolean nativeAddIceCandidate( String sdpMid, int sdpMLineIndex, String iceCandidateSdp); + private native void nativeAddIceCandidateWithObserver( + String sdpMid, int sdpMLineIndex, String iceCandidateSdp, AddIceObserver observer); private native boolean nativeRemoveIceCandidates(final IceCandidate[] candidates); private native boolean nativeAddLocalStream(long stream); private native void nativeRemoveLocalStream(long stream); diff --git a/sdk/android/api/org/webrtc/PeerConnectionFactory.java b/sdk/android/api/org/webrtc/PeerConnectionFactory.java index decdc0cc42..c87e639f23 100644 --- a/sdk/android/api/org/webrtc/PeerConnectionFactory.java +++ b/sdk/android/api/org/webrtc/PeerConnectionFactory.java @@ -175,7 +175,6 @@ public static class Builder { @Nullable private FecControllerFactoryFactoryInterface fecControllerFactoryFactory; @Nullable private NetworkControllerFactoryFactory networkControllerFactoryFactory; @Nullable private NetworkStatePredictorFactoryFactory networkStatePredictorFactoryFactory; - @Nullable private MediaTransportFactoryFactory mediaTransportFactoryFactory; @Nullable private NetEqFactoryFactory neteqFactoryFactory; private Builder() {} @@ -247,13 +246,6 @@ public Builder setNetworkStatePredictorFactoryFactory( return this; } - /** Sets a MediaTransportFactoryFactory for a PeerConnectionFactory. */ - public Builder setMediaTransportFactoryFactory( - MediaTransportFactoryFactory mediaTransportFactoryFactory) { - this.mediaTransportFactoryFactory = mediaTransportFactoryFactory; - return this; - } - /** * Sets a NetEqFactoryFactory for the PeerConnectionFactory. When using a * custom NetEqFactoryFactory, the AudioDecoderFactoryFactory will be set @@ -284,9 +276,6 @@ public PeerConnectionFactory createPeerConnectionFactory() { networkStatePredictorFactoryFactory == null ? 0 : networkStatePredictorFactoryFactory.createNativeNetworkStatePredictorFactory(), - mediaTransportFactoryFactory == null - ? 0 - : mediaTransportFactoryFactory.createNativeMediaTransportFactory(), neteqFactoryFactory == null ? 0 : neteqFactoryFactory.createNativeNetEqFactory()); } } @@ -607,7 +596,7 @@ private static native PeerConnectionFactory nativeCreatePeerConnectionFactory(Co long audioDecoderFactory, VideoEncoderFactory encoderFactory, VideoDecoderFactory decoderFactory, long nativeAudioProcessor, long nativeFecControllerFactory, long nativeNetworkControllerFactory, - long nativeNetworkStatePredictorFactory, long mediaTransportFactory, long neteqFactory); + long nativeNetworkStatePredictorFactory, long neteqFactory); private static native long nativeCreatePeerConnection(long factory, PeerConnection.RTCConfiguration rtcConfig, MediaConstraints constraints, long nativeObserver, diff --git a/sdk/android/api/org/webrtc/PlatformSoftwareVideoDecoderFactory.java b/sdk/android/api/org/webrtc/PlatformSoftwareVideoDecoderFactory.java index 82417fd980..d334dfab4e 100644 --- a/sdk/android/api/org/webrtc/PlatformSoftwareVideoDecoderFactory.java +++ b/sdk/android/api/org/webrtc/PlatformSoftwareVideoDecoderFactory.java @@ -21,19 +21,9 @@ public class PlatformSoftwareVideoDecoderFactory extends MediaCodecVideoDecoderF */ private static final Predicate defaultAllowedPredicate = new Predicate() { - private String[] prefixWhitelist = - Arrays.copyOf(MediaCodecUtils.SOFTWARE_IMPLEMENTATION_PREFIXES, - MediaCodecUtils.SOFTWARE_IMPLEMENTATION_PREFIXES.length); - @Override public boolean test(MediaCodecInfo arg) { - final String name = arg.getName(); - for (String prefix : prefixWhitelist) { - if (name.startsWith(prefix)) { - return true; - } - } - return false; + return MediaCodecUtils.isSoftwareOnly(arg); } }; diff --git a/sdk/android/api/org/webrtc/RTCStats.java b/sdk/android/api/org/webrtc/RTCStats.java index 7ad7634c82..573d95300f 100644 --- a/sdk/android/api/org/webrtc/RTCStats.java +++ b/sdk/android/api/org/webrtc/RTCStats.java @@ -62,6 +62,7 @@ public String getId() { * - Double * - String * - The array form of any of the above (e.g., Integer[]) + * - Map of String keys to BigInteger / Double values */ public Map getMembers() { return members; diff --git a/sdk/android/api/org/webrtc/RtpParameters.java b/sdk/android/api/org/webrtc/RtpParameters.java index e4e09304e9..673ef47589 100644 --- a/sdk/android/api/org/webrtc/RtpParameters.java +++ b/sdk/android/api/org/webrtc/RtpParameters.java @@ -79,6 +79,9 @@ public static class Encoding { // SSRC to be used by this encoding. // Can't be changed between getParameters/setParameters. public Long ssrc; + // Set to true to allow dynamic frame length changes for audio: + // https://w3c.github.io/webrtc-extensions/#dom-rtcrtpencodingparameters-adaptiveptime + public boolean adaptiveAudioPacketTime; // This constructor is useful for creating simulcast layers. public Encoding(String rid, boolean active, Double scaleResolutionDownBy) { @@ -90,7 +93,8 @@ public Encoding(String rid, boolean active, Double scaleResolutionDownBy) { @CalledByNative("Encoding") Encoding(String rid, boolean active, double bitratePriority, @Priority int networkPriority, Integer maxBitrateBps, Integer minBitrateBps, Integer maxFramerate, - Integer numTemporalLayers, Double scaleResolutionDownBy, Long ssrc) { + Integer numTemporalLayers, Double scaleResolutionDownBy, Long ssrc, + boolean adaptiveAudioPacketTime) { this.rid = rid; this.active = active; this.bitratePriority = bitratePriority; @@ -101,6 +105,7 @@ public Encoding(String rid, boolean active, Double scaleResolutionDownBy) { this.numTemporalLayers = numTemporalLayers; this.scaleResolutionDownBy = scaleResolutionDownBy; this.ssrc = ssrc; + this.adaptiveAudioPacketTime = adaptiveAudioPacketTime; } @Nullable @@ -159,6 +164,11 @@ Double getScaleResolutionDownBy() { Long getSsrc() { return ssrc; } + + @CalledByNative("Encoding") + boolean getAdaptivePTime() { + return adaptiveAudioPacketTime; + } } public static class Codec { diff --git a/sdk/android/api/org/webrtc/RtpTransceiver.java b/sdk/android/api/org/webrtc/RtpTransceiver.java index 64d8eb41d1..aff1bfbde1 100644 --- a/sdk/android/api/org/webrtc/RtpTransceiver.java +++ b/sdk/android/api/org/webrtc/RtpTransceiver.java @@ -200,19 +200,40 @@ public RtpTransceiverDirection getCurrentDirection() { * sendrecv, sendonly, recvonly, or inactive. * https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction */ - public void setDirection(RtpTransceiverDirection rtpTransceiverDirection) { + public boolean setDirection(RtpTransceiverDirection rtpTransceiverDirection) { checkRtpTransceiverExists(); - nativeSetDirection(nativeRtpTransceiver, rtpTransceiverDirection); + return nativeSetDirection(nativeRtpTransceiver, rtpTransceiverDirection); } /** - * The Stop method irreversibly stops the RtpTransceiver. The sender of this - * transceiver will no longer send, the receiver will no longer receive. - * https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stop + * The Stop method will for the time being call the StopInternal method. + * After a migration procedure, stop() will be equivalent to StopStandard. */ public void stop() { checkRtpTransceiverExists(); - nativeStop(nativeRtpTransceiver); + nativeStopInternal(nativeRtpTransceiver); + } + + /** + * The StopInternal method stops the RtpTransceiver, like Stop, but goes + * immediately to Stopped state. + */ + public void stopInternal() { + checkRtpTransceiverExists(); + nativeStopInternal(nativeRtpTransceiver); + } + + /** + * The StopStandard method irreversibly stops the RtpTransceiver. The sender + * of this transceiver will no longer send, the receiver will no longer + * receive. + * + *

The transceiver will enter Stopping state and signal NegotiationNeeded. + * https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stop + */ + public void stopStandard() { + checkRtpTransceiverExists(); + nativeStopStandard(nativeRtpTransceiver); } @CalledByNative @@ -237,7 +258,8 @@ private void checkRtpTransceiverExists() { private static native boolean nativeStopped(long rtpTransceiver); private static native RtpTransceiverDirection nativeDirection(long rtpTransceiver); private static native RtpTransceiverDirection nativeCurrentDirection(long rtpTransceiver); - private static native void nativeStop(long rtpTransceiver); - private static native void nativeSetDirection( + private static native void nativeStopInternal(long rtpTransceiver); + private static native void nativeStopStandard(long rtpTransceiver); + private static native boolean nativeSetDirection( long rtpTransceiver, RtpTransceiverDirection rtpTransceiverDirection); } diff --git a/sdk/android/api/org/webrtc/ScreenCapturerAndroid.java b/sdk/android/api/org/webrtc/ScreenCapturerAndroid.java index bff5ad74a2..e37b34d9b2 100644 --- a/sdk/android/api/org/webrtc/ScreenCapturerAndroid.java +++ b/sdk/android/api/org/webrtc/ScreenCapturerAndroid.java @@ -75,6 +75,11 @@ private void checkNotDisposed() { } } + @Nullable + public MediaProjection getMediaProjection() { + return mediaProjection; + } + @Override // TODO(bugs.webrtc.org/8491): Remove NoSynchronizedMethodCheck suppression. @SuppressWarnings("NoSynchronizedMethodCheck") diff --git a/sdk/android/api/org/webrtc/SessionDescription.java b/sdk/android/api/org/webrtc/SessionDescription.java index 62601f0bf2..be89599a5f 100644 --- a/sdk/android/api/org/webrtc/SessionDescription.java +++ b/sdk/android/api/org/webrtc/SessionDescription.java @@ -22,7 +22,8 @@ public class SessionDescription { public static enum Type { OFFER, PRANSWER, - ANSWER; + ANSWER, + ROLLBACK; public String canonicalForm() { return name().toLowerCase(Locale.US); diff --git a/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java b/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java index 7abe3505a6..c59db3b47b 100644 --- a/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java +++ b/sdk/android/api/org/webrtc/SoftwareVideoDecoderFactory.java @@ -16,22 +16,22 @@ import java.util.List; public class SoftwareVideoDecoderFactory implements VideoDecoderFactory { - @Deprecated @Nullable @Override - public VideoDecoder createDecoder(String codecType) { - return createDecoder(new VideoCodecInfo(codecType, new HashMap<>())); - } + public VideoDecoder createDecoder(VideoCodecInfo codecInfo) { + String codecName = codecInfo.getName(); - @Nullable - @Override - public VideoDecoder createDecoder(VideoCodecInfo codecType) { - if (codecType.getName().equalsIgnoreCase("VP8")) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP8.toSdpCodecName())) { return new LibvpxVp8Decoder(); } - if (codecType.getName().equalsIgnoreCase("VP9") && LibvpxVp9Decoder.nativeIsSupported()) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP9.toSdpCodecName()) + && LibvpxVp9Decoder.nativeIsSupported()) { return new LibvpxVp9Decoder(); } + if (codecName.equalsIgnoreCase(VideoCodecMimeType.AV1.toSdpCodecName()) + && LibaomAv1Decoder.nativeIsSupported()) { + return new LibaomAv1Decoder(); + } return null; } @@ -44,9 +44,12 @@ public VideoCodecInfo[] getSupportedCodecs() { static VideoCodecInfo[] supportedCodecs() { List codecs = new ArrayList(); - codecs.add(new VideoCodecInfo("VP8", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP8.toSdpCodecName(), new HashMap<>())); if (LibvpxVp9Decoder.nativeIsSupported()) { - codecs.add(new VideoCodecInfo("VP9", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP9.toSdpCodecName(), new HashMap<>())); + } + if (LibaomAv1Decoder.nativeIsSupported()) { + codecs.add(new VideoCodecInfo(VideoCodecMimeType.AV1.toSdpCodecName(), new HashMap<>())); } return codecs.toArray(new VideoCodecInfo[codecs.size()]); diff --git a/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java b/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java index ed70d228ef..4de39dcdba 100644 --- a/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java +++ b/sdk/android/api/org/webrtc/SoftwareVideoEncoderFactory.java @@ -18,13 +18,20 @@ public class SoftwareVideoEncoderFactory implements VideoEncoderFactory { @Nullable @Override - public VideoEncoder createEncoder(VideoCodecInfo info) { - if (info.name.equalsIgnoreCase("VP8")) { + public VideoEncoder createEncoder(VideoCodecInfo codecInfo) { + String codecName = codecInfo.getName(); + + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP8.toSdpCodecName())) { return new LibvpxVp8Encoder(); } - if (info.name.equalsIgnoreCase("VP9") && LibvpxVp9Encoder.nativeIsSupported()) { + if (codecName.equalsIgnoreCase(VideoCodecMimeType.VP9.toSdpCodecName()) + && LibvpxVp9Encoder.nativeIsSupported()) { return new LibvpxVp9Encoder(); } + if (codecName.equalsIgnoreCase(VideoCodecMimeType.AV1.toSdpCodecName()) + && LibaomAv1Encoder.nativeIsSupported()) { + return new LibaomAv1Encoder(); + } return null; } @@ -37,9 +44,12 @@ public VideoCodecInfo[] getSupportedCodecs() { static VideoCodecInfo[] supportedCodecs() { List codecs = new ArrayList(); - codecs.add(new VideoCodecInfo("VP8", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP8.toSdpCodecName(), new HashMap<>())); if (LibvpxVp9Encoder.nativeIsSupported()) { - codecs.add(new VideoCodecInfo("VP9", new HashMap<>())); + codecs.add(new VideoCodecInfo(VideoCodecMimeType.VP9.toSdpCodecName(), new HashMap<>())); + } + if (LibaomAv1Encoder.nativeIsSupported()) { + codecs.add(new VideoCodecInfo(VideoCodecMimeType.AV1.toSdpCodecName(), new HashMap<>())); } return codecs.toArray(new VideoCodecInfo[codecs.size()]); diff --git a/sdk/android/api/org/webrtc/SurfaceTextureHelper.java b/sdk/android/api/org/webrtc/SurfaceTextureHelper.java index 3522a87487..0dd45cfc93 100644 --- a/sdk/android/api/org/webrtc/SurfaceTextureHelper.java +++ b/sdk/android/api/org/webrtc/SurfaceTextureHelper.java @@ -199,6 +199,10 @@ private SurfaceTextureHelper(Context sharedContext, Handler handler, boolean ali oesTextureId = GlUtil.generateTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES); surfaceTexture = new SurfaceTexture(oesTextureId); setOnFrameAvailableListener(surfaceTexture, (SurfaceTexture st) -> { + if (hasPendingTexture) { + Logging.d(TAG, "A frame is already pending, dropping frame."); + } + hasPendingTexture = true; tryDeliverTextureFrame(); }, handler); @@ -263,6 +267,17 @@ public void setTextureSize(int textureWidth, int textureHeight) { }); } + /** + * Forces a frame to be produced. If no new frame is available, the last frame is sent to the + * listener again. + */ + public void forceFrame() { + handler.post(() -> { + hasPendingTexture = true; + tryDeliverTextureFrame(); + }); + } + /** Set the rotation of the delivered frames. */ public void setFrameRotation(int rotation) { handler.post(() -> this.frameRotation = rotation); diff --git a/sdk/android/api/org/webrtc/VideoCodecInfo.java b/sdk/android/api/org/webrtc/VideoCodecInfo.java index 8dd9295fd7..e11782dedd 100644 --- a/sdk/android/api/org/webrtc/VideoCodecInfo.java +++ b/sdk/android/api/org/webrtc/VideoCodecInfo.java @@ -69,6 +69,11 @@ public int hashCode() { return Arrays.hashCode(values); } + @Override + public String toString() { + return "VideoCodec{" + name + " " + params + "}"; + } + @CalledByNative String getName() { return name; diff --git a/sdk/android/api/org/webrtc/VideoDecoder.java b/sdk/android/api/org/webrtc/VideoDecoder.java index 879b694b28..a80fa4fef2 100644 --- a/sdk/android/api/org/webrtc/VideoDecoder.java +++ b/sdk/android/api/org/webrtc/VideoDecoder.java @@ -86,11 +86,6 @@ default long createNativeVideoDecoder() { * Request the decoder to decode a frame. */ @CalledByNative VideoCodecStatus decode(EncodedImage frame, DecodeInfo info); - /** - * The decoder should return true if it prefers late decoding. That is, it can not decode - * infinite number of frames before the decoded frame is consumed. - */ - @CalledByNative boolean getPrefersLateDecoding(); /** * Should return a descriptive name for the implementation. Gets called once and cached. May be * called from arbitrary thread. diff --git a/sdk/android/api/org/webrtc/VideoDecoderFactory.java b/sdk/android/api/org/webrtc/VideoDecoderFactory.java index 2dd09670bd..3f0168f23e 100644 --- a/sdk/android/api/org/webrtc/VideoDecoderFactory.java +++ b/sdk/android/api/org/webrtc/VideoDecoderFactory.java @@ -18,18 +18,7 @@ public interface VideoDecoderFactory { * Creates a VideoDecoder for the given codec. Supports the same codecs supported by * VideoEncoderFactory. */ - @Deprecated - @Nullable - default VideoDecoder createDecoder(String codecType) { - throw new UnsupportedOperationException("Deprecated and not implemented."); - } - - /** Creates a decoder for the given video codec. */ - @Nullable - @CalledByNative - default VideoDecoder createDecoder(VideoCodecInfo info) { - return createDecoder(info.getName()); - } + @Nullable @CalledByNative VideoDecoder createDecoder(VideoCodecInfo info); /** * Enumerates the list of supported video codecs. diff --git a/sdk/android/api/org/webrtc/VideoEncoder.java b/sdk/android/api/org/webrtc/VideoEncoder.java index cb8eb81767..460428192d 100644 --- a/sdk/android/api/org/webrtc/VideoEncoder.java +++ b/sdk/android/api/org/webrtc/VideoEncoder.java @@ -86,6 +86,8 @@ public class CodecSpecificInfoVP9 extends CodecSpecificInfo {} public class CodecSpecificInfoH264 extends CodecSpecificInfo {} + public class CodecSpecificInfoAV1 extends CodecSpecificInfo {} + /** * Represents bitrate allocated for an encoder to produce frames. Bitrate can be divided between * spatial and temporal layers. diff --git a/sdk/android/api/org/webrtc/WrappedNativeVideoDecoder.java b/sdk/android/api/org/webrtc/WrappedNativeVideoDecoder.java index b70c664915..027120e48e 100644 --- a/sdk/android/api/org/webrtc/WrappedNativeVideoDecoder.java +++ b/sdk/android/api/org/webrtc/WrappedNativeVideoDecoder.java @@ -31,11 +31,6 @@ public final VideoCodecStatus decode(EncodedImage frame, DecodeInfo info) { throw new UnsupportedOperationException("Not implemented."); } - @Override - public final boolean getPrefersLateDecoding() { - throw new UnsupportedOperationException("Not implemented."); - } - @Override public final String getImplementationName() { throw new UnsupportedOperationException("Not implemented."); diff --git a/sdk/android/api/org/webrtc/YuvConverter.java b/sdk/android/api/org/webrtc/YuvConverter.java index 0e2d5055f7..9c00678900 100644 --- a/sdk/android/api/org/webrtc/YuvConverter.java +++ b/sdk/android/api/org/webrtc/YuvConverter.java @@ -12,6 +12,8 @@ import android.graphics.Matrix; import android.opengl.GLES20; +import android.opengl.GLException; +import android.support.annotation.Nullable; import java.nio.ByteBuffer; import org.webrtc.VideoFrame.I420Buffer; import org.webrtc.VideoFrame.TextureBuffer; @@ -20,7 +22,9 @@ * Class for converting OES textures to a YUV ByteBuffer. It can be constructed on any thread, but * should only be operated from a single thread with an active EGL context. */ -public class YuvConverter { +public final class YuvConverter { + private static final String TAG = "YuvConverter"; + private static final String FRAGMENT_SHADER = // Difference in texture coordinate corresponding to one // sub-pixel in the x direction. @@ -122,9 +126,17 @@ public YuvConverter(VideoFrameDrawer videoFrameDrawer) { } /** Converts the texture buffer to I420. */ + @Nullable public I420Buffer convert(TextureBuffer inputTextureBuffer) { - threadChecker.checkIsOnValidThread(); + try { + return convertInternal(inputTextureBuffer); + } catch (GLException e) { + Logging.w(TAG, "Failed to convert TextureBuffer", e); + } + return null; + } + private I420Buffer convertInternal(TextureBuffer inputTextureBuffer) { TextureBuffer preparedBuffer = (TextureBuffer) videoFrameDrawer.prepareBufferForViewportSize( inputTextureBuffer, inputTextureBuffer.getWidth(), inputTextureBuffer.getHeight()); diff --git a/sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java b/sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java index 9ae00c51b7..4ca6466622 100644 --- a/sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java +++ b/sdk/android/api/org/webrtc/audio/JavaAudioDeviceModule.java @@ -11,10 +11,12 @@ package org.webrtc.audio; import android.content.Context; +import android.media.AudioAttributes; import android.media.AudioDeviceInfo; import android.media.AudioManager; import android.os.Build; import android.support.annotation.RequiresApi; +import java.util.concurrent.ScheduledExecutorService; import org.webrtc.JniCommon; import org.webrtc.Logging; @@ -31,6 +33,7 @@ public static Builder builder(Context context) { public static class Builder { private final Context context; + private ScheduledExecutorService scheduler; private final AudioManager audioManager; private int inputSampleRate; private int outputSampleRate; @@ -45,12 +48,20 @@ public static class Builder { private boolean useHardwareNoiseSuppressor = isBuiltInNoiseSuppressorSupported(); private boolean useStereoInput; private boolean useStereoOutput; + private AudioAttributes audioAttributes; + private boolean useLowLatency; private Builder(Context context) { this.context = context; this.audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); this.inputSampleRate = WebRtcAudioManager.getSampleRate(audioManager); this.outputSampleRate = WebRtcAudioManager.getSampleRate(audioManager); + this.useLowLatency = false; + } + + public Builder setScheduler(ScheduledExecutorService scheduler) { + this.scheduler = scheduler; + return this; } /** @@ -186,11 +197,27 @@ public Builder setUseStereoOutput(boolean useStereoOutput) { return this; } + /** + * Control if the low-latency mode should be used. The default is disabled. + */ + public Builder setUseLowLatency(boolean useLowLatency) { + this.useLowLatency = useLowLatency; + return this; + } + + /** + * Set custom {@link AudioAttributes} to use. + */ + public Builder setAudioAttributes(AudioAttributes audioAttributes) { + this.audioAttributes = audioAttributes; + return this; + } + /** * Construct an AudioDeviceModule based on the supplied arguments. The caller takes ownership * and is responsible for calling release(). */ - public AudioDeviceModule createAudioDeviceModule() { + public JavaAudioDeviceModule createAudioDeviceModule() { Logging.d(TAG, "createAudioDeviceModule"); if (useHardwareNoiseSuppressor) { Logging.d(TAG, "HW NS will be used."); @@ -208,11 +235,21 @@ public AudioDeviceModule createAudioDeviceModule() { } Logging.d(TAG, "HW AEC will not be used."); } - final WebRtcAudioRecord audioInput = new WebRtcAudioRecord(context, audioManager, audioSource, - audioFormat, audioRecordErrorCallback, audioRecordStateCallback, samplesReadyCallback, - useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor); - final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack( - context, audioManager, audioTrackErrorCallback, audioTrackStateCallback); + // Low-latency mode was introduced in API version 26, see + // https://developer.android.com/reference/android/media/AudioTrack#PERFORMANCE_MODE_LOW_LATENCY + final int MIN_LOW_LATENCY_SDK_VERSION = 26; + if (useLowLatency && Build.VERSION.SDK_INT >= MIN_LOW_LATENCY_SDK_VERSION) { + Logging.d(TAG, "Low latency mode will be used."); + } + ScheduledExecutorService executor = this.scheduler; + if (executor == null) { + executor = WebRtcAudioRecord.newDefaultScheduler(); + } + final WebRtcAudioRecord audioInput = new WebRtcAudioRecord(context, executor, audioManager, + audioSource, audioFormat, audioRecordErrorCallback, audioRecordStateCallback, + samplesReadyCallback, useHardwareAcousticEchoCanceler, useHardwareNoiseSuppressor); + final WebRtcAudioTrack audioOutput = new WebRtcAudioTrack(context, audioManager, + audioAttributes, audioTrackErrorCallback, audioTrackStateCallback, useLowLatency); return new JavaAudioDeviceModule(context, audioManager, audioInput, audioOutput, inputSampleRate, outputSampleRate, useStereoInput, useStereoOutput); } diff --git a/sdk/android/instrumentationtests/AndroidManifest.xml b/sdk/android/instrumentationtests/AndroidManifest.xml index 75df968f13..55028da703 100644 --- a/sdk/android/instrumentationtests/AndroidManifest.xml +++ b/sdk/android/instrumentationtests/AndroidManifest.xml @@ -16,7 +16,7 @@ - + diff --git a/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java b/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java index 69b0129c36..8135e80eaf 100644 --- a/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java +++ b/sdk/android/instrumentationtests/src/org/webrtc/DefaultVideoEncoderFactoryTest.java @@ -70,13 +70,14 @@ public void testGetSupportedCodecsWithHardwareH264HighProfile() { VideoEncoderFactory hwFactory = new CustomHardwareVideoEncoderFactory(true, true); DefaultVideoEncoderFactory dvef = new DefaultVideoEncoderFactory(hwFactory); VideoCodecInfo[] videoCodecs = dvef.getSupportedCodecs(); - assertEquals(4, videoCodecs.length); + assertEquals(5, videoCodecs.length); assertEquals("VP8", videoCodecs[0].name); assertEquals("VP9", videoCodecs[1].name); - assertEquals("H264", videoCodecs[2].name); - assertEquals("42e01f", videoCodecs[2].params.get("profile-level-id")); + assertEquals("AV1X", videoCodecs[2].name); assertEquals("H264", videoCodecs[3].name); - assertEquals("640c1f", videoCodecs[3].params.get("profile-level-id")); + assertEquals("42e01f", videoCodecs[3].params.get("profile-level-id")); + assertEquals("H264", videoCodecs[4].name); + assertEquals("640c1f", videoCodecs[4].params.get("profile-level-id")); } @SmallTest @@ -85,11 +86,12 @@ public void testGetSupportedCodecsWithoutHardwareH264HighProfile() { VideoEncoderFactory hwFactory = new CustomHardwareVideoEncoderFactory(true, false); DefaultVideoEncoderFactory dvef = new DefaultVideoEncoderFactory(hwFactory); VideoCodecInfo[] videoCodecs = dvef.getSupportedCodecs(); - assertEquals(3, videoCodecs.length); + assertEquals(4, videoCodecs.length); assertEquals("VP8", videoCodecs[0].name); assertEquals("VP9", videoCodecs[1].name); - assertEquals("H264", videoCodecs[2].name); - assertEquals("42e01f", videoCodecs[2].params.get("profile-level-id")); + assertEquals("AV1X", videoCodecs[2].name); + assertEquals("H264", videoCodecs[3].name); + assertEquals("42e01f", videoCodecs[3].params.get("profile-level-id")); } @SmallTest @@ -98,12 +100,13 @@ public void testGetSupportedCodecsWithoutHardwareVP8() { VideoEncoderFactory hwFactory = new CustomHardwareVideoEncoderFactory(false, true); DefaultVideoEncoderFactory dvef = new DefaultVideoEncoderFactory(hwFactory); VideoCodecInfo[] videoCodecs = dvef.getSupportedCodecs(); - assertEquals(4, videoCodecs.length); + assertEquals(5, videoCodecs.length); assertEquals("VP8", videoCodecs[0].name); assertEquals("VP9", videoCodecs[1].name); - assertEquals("H264", videoCodecs[2].name); - assertEquals("42e01f", videoCodecs[2].params.get("profile-level-id")); + assertEquals("AV1X", videoCodecs[2].name); assertEquals("H264", videoCodecs[3].name); - assertEquals("640c1f", videoCodecs[3].params.get("profile-level-id")); + assertEquals("42e01f", videoCodecs[3].params.get("profile-level-id")); + assertEquals("H264", videoCodecs[4].name); + assertEquals("640c1f", videoCodecs[4].params.get("profile-level-id")); } } diff --git a/sdk/android/instrumentationtests/src/org/webrtc/HardwareVideoEncoderTest.java b/sdk/android/instrumentationtests/src/org/webrtc/HardwareVideoEncoderTest.java index 1591cae0e6..4eb033b210 100644 --- a/sdk/android/instrumentationtests/src/org/webrtc/HardwareVideoEncoderTest.java +++ b/sdk/android/instrumentationtests/src/org/webrtc/HardwareVideoEncoderTest.java @@ -101,7 +101,6 @@ public void onEncodedFrame(EncodedImage frame, VideoEncoder.CodecSpecificInfo in .setCaptureTimeNs(frame.captureTimeNs) .setFrameType(frame.frameType) .setRotation(frame.rotation) - .setCompleteFrame(frame.completeFrame) .setQp(frame.qp) .createEncodedImage()); } diff --git a/sdk/android/instrumentationtests/src/org/webrtc/NetworkMonitorTest.java b/sdk/android/instrumentationtests/src/org/webrtc/NetworkMonitorTest.java index 36136ca933..5f7e07df55 100644 --- a/sdk/android/instrumentationtests/src/org/webrtc/NetworkMonitorTest.java +++ b/sdk/android/instrumentationtests/src/org/webrtc/NetworkMonitorTest.java @@ -15,7 +15,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.webrtc.NetworkMonitorAutoDetect.INVALID_NET_ID; import android.annotation.SuppressLint; import android.content.Context; @@ -31,14 +30,15 @@ import android.support.test.filters.MediumTest; import android.support.test.filters.SmallTest; import android.support.test.rule.UiThreadTestRule; +import java.util.List; import org.chromium.base.test.BaseJUnit4ClassRunner; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; -import org.webrtc.NetworkMonitorAutoDetect.ConnectionType; +import org.webrtc.NetworkChangeDetector.ConnectionType; +import org.webrtc.NetworkChangeDetector.NetworkInformation; import org.webrtc.NetworkMonitorAutoDetect.ConnectivityManagerDelegate; -import org.webrtc.NetworkMonitorAutoDetect.NetworkInformation; import org.webrtc.NetworkMonitorAutoDetect.NetworkState; /** @@ -53,6 +53,9 @@ public class NetworkMonitorTest { @Rule public UiThreadTestRule uiThreadTestRule = new UiThreadTestRule(); + private static final long INVALID_NET_ID = -1; + private NetworkChangeDetector detector; + /** * Listens for alerts fired by the NetworkMonitor when network status changes. */ @@ -155,6 +158,10 @@ public void onNetworkConnect(NetworkInformation networkInfo) {} @Override public void onNetworkDisconnect(long networkHandle) {} + + @Override + public void onNetworkPreference(List types, @NetworkPreference int preference) { + } } private static final Object lock = new Object(); @@ -179,6 +186,17 @@ private static Handler getUiThreadHandler() { */ private void createTestMonitor() { Context context = InstrumentationRegistry.getTargetContext(); + + NetworkMonitor.getInstance().setNetworkChangeDetectorFactory( + new NetworkChangeDetectorFactory() { + @Override + public NetworkChangeDetector create( + NetworkChangeDetector.Observer observer, Context context) { + detector = new NetworkMonitorAutoDetect(observer, context); + return detector; + } + }); + receiver = NetworkMonitor.createAndSetAutoDetectForTest(context); assertNotNull(receiver); @@ -311,9 +329,9 @@ public void testStartStopMonitoring() { Context context = ContextUtils.getApplicationContext(); networkMonitor.startMonitoring(context); assertEquals(1, networkMonitor.getNumObservers()); - assertNotNull(networkMonitor.getNetworkMonitorAutoDetect()); + assertEquals(detector, networkMonitor.getNetworkChangeDetector()); networkMonitor.stopMonitoring(); assertEquals(0, networkMonitor.getNumObservers()); - assertNull(networkMonitor.getNetworkMonitorAutoDetect()); + assertNull(networkMonitor.getNetworkChangeDetector()); } } diff --git a/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionEndToEndTest.java b/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionEndToEndTest.java index 88be833504..c380310b83 100644 --- a/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionEndToEndTest.java +++ b/sdk/android/instrumentationtests/src/org/webrtc/PeerConnectionEndToEndTest.java @@ -1488,6 +1488,38 @@ public void testRemoteStreamUpdatedWhenTracksAddedOrRemoved() throws Exception { factory.dispose(); } + @Test + @SmallTest + public void testRollback() throws Exception { + PeerConnectionFactory factory = PeerConnectionFactory.builder().createPeerConnectionFactory(); + PeerConnection.RTCConfiguration config = new PeerConnection.RTCConfiguration(Arrays.asList()); + config.sdpSemantics = PeerConnection.SdpSemantics.UNIFIED_PLAN; + + ObserverExpectations offeringExpectations = new ObserverExpectations("PCTest:offerer"); + PeerConnection pc = factory.createPeerConnection(config, offeringExpectations); + + SdpObserverLatch sdpLatch = new SdpObserverLatch(); + pc.createOffer(sdpLatch, new MediaConstraints()); + assertTrue(sdpLatch.await()); + SessionDescription offer = sdpLatch.getSdp(); + + sdpLatch = new SdpObserverLatch(); + offeringExpectations.expectSignalingChange(SignalingState.HAVE_LOCAL_OFFER); + pc.setLocalDescription(sdpLatch, offer); + assertTrue(sdpLatch.await()); + + SessionDescription rollback = new SessionDescription(SessionDescription.Type.ROLLBACK, ""); + sdpLatch = new SdpObserverLatch(); + offeringExpectations.expectSignalingChange(SignalingState.STABLE); + // TODO(bugs.webrtc.org/11970): determine if triggering ONN (twice even) is correct. + offeringExpectations.expectRenegotiationNeeded(); + offeringExpectations.expectRenegotiationNeeded(); + pc.setLocalDescription(sdpLatch, rollback); + assertTrue(sdpLatch.await()); + + assertTrue(offeringExpectations.waitForAllExpectationsToBeSatisfied(DEFAULT_TIMEOUT_SECONDS)); + } + private static void negotiate(PeerConnection offeringPC, ObserverExpectations offeringExpectations, PeerConnection answeringPC, ObserverExpectations answeringExpectations) { diff --git a/sdk/android/native_api/audio_device_module/audio_device_android.cc b/sdk/android/native_api/audio_device_module/audio_device_android.cc index 16a3643ae0..8a57e4af91 100644 --- a/sdk/android/native_api/audio_device_module/audio_device_android.cc +++ b/sdk/android/native_api/audio_device_module/audio_device_android.cc @@ -57,7 +57,7 @@ void GetDefaultAudioParameters(JNIEnv* env, rtc::scoped_refptr CreateAAudioAudioDeviceModule( JNIEnv* env, jobject application_context) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; // Get default audio input/output parameters. AudioParameters input_parameters; AudioParameters output_parameters; @@ -76,7 +76,7 @@ rtc::scoped_refptr CreateAAudioAudioDeviceModule( rtc::scoped_refptr CreateJavaAudioDeviceModule( JNIEnv* env, jobject application_context) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; // Get default audio input/output parameters. const JavaParamRef j_context(application_context); const ScopedJavaLocalRef j_audio_manager = @@ -104,7 +104,7 @@ rtc::scoped_refptr CreateJavaAudioDeviceModule( rtc::scoped_refptr CreateOpenSLESAudioDeviceModule( JNIEnv* env, jobject application_context) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; // Get default audio input/output parameters. AudioParameters input_parameters; AudioParameters output_parameters; @@ -127,7 +127,7 @@ rtc::scoped_refptr CreateOpenSLESAudioDeviceModule( rtc::scoped_refptr CreateJavaInputAndOpenSLESOutputAudioDeviceModule(JNIEnv* env, jobject application_context) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; // Get default audio input/output parameters. const JavaParamRef j_context(application_context); const ScopedJavaLocalRef j_audio_manager = diff --git a/sdk/android/native_api/jni/java_types.cc b/sdk/android/native_api/jni/java_types.cc index a97c81f1f2..af02c10f4c 100644 --- a/sdk/android/native_api/jni/java_types.cc +++ b/sdk/android/native_api/jni/java_types.cc @@ -10,6 +10,7 @@ #include "sdk/android/native_api/jni/java_types.h" +#include #include #include @@ -51,14 +52,15 @@ Iterable::Iterator::Iterator(JNIEnv* jni, const JavaRef& iterable) Iterable::Iterator::Iterator(Iterator&& other) : jni_(std::move(other.jni_)), iterator_(std::move(other.iterator_)), - value_(std::move(other.value_)), - thread_checker_(std::move(other.thread_checker_)) {} + value_(std::move(other.value_)) { + RTC_DCHECK_RUN_ON(&thread_checker_); +} Iterable::Iterator::~Iterator() = default; // Advances the iterator one step. Iterable::Iterator& Iterable::Iterator::operator++() { - RTC_CHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); if (AtEnd()) { // Can't move past the end. return *this; @@ -93,7 +95,7 @@ ScopedJavaLocalRef& Iterable::Iterator::operator*() { } bool Iterable::Iterator::AtEnd() const { - RTC_CHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(&thread_checker_); return jni_ == nullptr || IsNull(jni_, iterator_); } diff --git a/sdk/android/native_api/jni/java_types.h b/sdk/android/native_api/jni/java_types.h index 955911c186..a1639d6478 100644 --- a/sdk/android/native_api/jni/java_types.h +++ b/sdk/android/native_api/jni/java_types.h @@ -18,14 +18,16 @@ #define SDK_ANDROID_NATIVE_API_JNI_JAVA_TYPES_H_ #include + #include +#include #include #include #include "absl/types/optional.h" #include "api/array_view.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/native_api/jni/scoped_java_ref.h" // Abort the process if |jni| has a Java exception pending. @@ -93,7 +95,7 @@ class Iterable { JNIEnv* jni_ = nullptr; ScopedJavaLocalRef iterator_; ScopedJavaLocalRef value_; - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; RTC_DISALLOW_COPY_AND_ASSIGN(Iterator); }; diff --git a/sdk/android/native_api/base/network_monitor.cc b/sdk/android/native_api/network_monitor/network_monitor.cc similarity index 93% rename from sdk/android/native_api/base/network_monitor.cc rename to sdk/android/native_api/network_monitor/network_monitor.cc index 515e9f21fb..38be7fdef7 100644 --- a/sdk/android/native_api/base/network_monitor.cc +++ b/sdk/android/native_api/network_monitor/network_monitor.cc @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "sdk/android/native_api/base/network_monitor.h" +#include "sdk/android/native_api/network_monitor/network_monitor.h" #include diff --git a/sdk/android/native_api/base/network_monitor.h b/sdk/android/native_api/network_monitor/network_monitor.h similarity index 80% rename from sdk/android/native_api/base/network_monitor.h rename to sdk/android/native_api/network_monitor/network_monitor.h index 135ebb1e86..45ecd75543 100644 --- a/sdk/android/native_api/base/network_monitor.h +++ b/sdk/android/native_api/network_monitor/network_monitor.h @@ -8,14 +8,14 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef SDK_ANDROID_NATIVE_API_BASE_NETWORK_MONITOR_H_ -#define SDK_ANDROID_NATIVE_API_BASE_NETWORK_MONITOR_H_ +#ifndef SDK_ANDROID_NATIVE_API_NETWORK_MONITOR_NETWORK_MONITOR_H_ +#define SDK_ANDROID_NATIVE_API_NETWORK_MONITOR_NETWORK_MONITOR_H_ #include #include -#include "rtc_base/network_monitor.h" +#include "rtc_base/network_monitor_factory.h" namespace webrtc { @@ -33,4 +33,4 @@ CreateAndroidNetworkMonitorFactory(); } // namespace webrtc -#endif // SDK_ANDROID_NATIVE_API_BASE_NETWORK_MONITOR_H_ +#endif // SDK_ANDROID_NATIVE_API_NETWORK_MONITOR_NETWORK_MONITOR_H_ diff --git a/sdk/android/native_api/peerconnection/peer_connection_factory.cc b/sdk/android/native_api/peerconnection/peer_connection_factory.cc index e6839754ac..4e742d1b7a 100644 --- a/sdk/android/native_api/peerconnection/peer_connection_factory.cc +++ b/sdk/android/native_api/peerconnection/peer_connection_factory.cc @@ -23,11 +23,10 @@ jobject NativeToJavaPeerConnectionFactory( rtc::scoped_refptr pcf, std::unique_ptr network_thread, std::unique_ptr worker_thread, - std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory) { + std::unique_ptr signaling_thread) { return webrtc::jni::NativeToJavaPeerConnectionFactory( jni, pcf, std::move(network_thread), std::move(worker_thread), - std::move(signaling_thread), network_monitor_factory); + std::move(signaling_thread)); } } // namespace webrtc diff --git a/sdk/android/native_api/peerconnection/peer_connection_factory.h b/sdk/android/native_api/peerconnection/peer_connection_factory.h index 889d6092e7..00550a9b12 100644 --- a/sdk/android/native_api/peerconnection/peer_connection_factory.h +++ b/sdk/android/native_api/peerconnection/peer_connection_factory.h @@ -26,8 +26,7 @@ jobject NativeToJavaPeerConnectionFactory( rtc::scoped_refptr pcf, std::unique_ptr network_thread, std::unique_ptr worker_thread, - std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory = nullptr); + std::unique_ptr signaling_thread); } // namespace webrtc diff --git a/sdk/android/native_api/stacktrace/stacktrace.cc b/sdk/android/native_api/stacktrace/stacktrace.cc index 6350acaacf..cea3490091 100644 --- a/sdk/android/native_api/stacktrace/stacktrace.cc +++ b/sdk/android/native_api/stacktrace/stacktrace.cc @@ -27,9 +27,9 @@ #endif #include "absl/base/attributes.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -92,7 +92,7 @@ struct SignalHandlerOutputState { }; // Global lock to ensure only one thread gets interrupted at a time. -ABSL_CONST_INIT rtc::GlobalLock g_signal_handler_lock; +ABSL_CONST_INIT GlobalMutex g_signal_handler_lock(absl::kConstInit); // Argument passed to the ThreadSignalHandler() from the sampling thread to the // sampled (stopped) thread. This value is set just before sending signal to the // thread and reset when handler is done. @@ -153,7 +153,7 @@ const char* CaptureRawStacktrace(int pid, act.sa_flags = SA_RESTART | SA_SIGINFO; sigemptyset(&act.sa_mask); - rtc::GlobalLockScope ls(&g_signal_handler_lock); + GlobalMutexLock ls(&g_signal_handler_lock); g_signal_handler_output_state = params; if (sigaction(kSignal, &act, &old_act) != 0) diff --git a/sdk/android/native_api/video/video_source.cc b/sdk/android/native_api/video/video_source.cc index 1f4bc4dead..4f1409ef7b 100644 --- a/sdk/android/native_api/video/video_source.cc +++ b/sdk/android/native_api/video/video_source.cc @@ -10,6 +10,7 @@ #include "sdk/android/native_api/video/video_source.h" +#include "rtc_base/ref_counted_object.h" #include "sdk/android/src/jni/android_video_track_source.h" #include "sdk/android/src/jni/native_capturer_observer.h" @@ -28,7 +29,7 @@ class JavaVideoTrackSourceImpl : public JavaVideoTrackSourceInterface { bool is_screencast, bool align_timestamps) : android_video_track_source_( - new rtc::RefCountedObject( + rtc::make_ref_counted( signaling_thread, env, is_screencast, @@ -108,7 +109,7 @@ rtc::scoped_refptr CreateJavaVideoSource( rtc::Thread* signaling_thread, bool is_screencast, bool align_timestamps) { - return new rtc::RefCountedObject( + return rtc::make_ref_counted( jni, signaling_thread, is_screencast, align_timestamps); } diff --git a/sdk/android/native_unittests/android_network_monitor_unittest.cc b/sdk/android/native_unittests/android_network_monitor_unittest.cc index 5c17d44fb2..c342ce692e 100644 --- a/sdk/android/native_unittests/android_network_monitor_unittest.cc +++ b/sdk/android/native_unittests/android_network_monitor_unittest.cc @@ -51,11 +51,16 @@ class AndroidNetworkMonitorTest : public ::testing::Test { std::make_unique(env, context); } - void SetUp() { + void SetUp() override { // Reset network monitor states. network_monitor_->Stop(); } + void TearDown() override { + // The network monitor must be stopped, before it is destructed. + network_monitor_->Stop(); + } + protected: std::unique_ptr network_monitor_; }; @@ -69,7 +74,7 @@ TEST_F(AndroidNetworkMonitorTest, TestFindNetworkHandleUsingIpv4Address) { network_monitor_->SetNetworkInfos(net_infos); auto network_handle = - network_monitor_->FindNetworkHandleFromAddress(ipv4_address); + network_monitor_->FindNetworkHandleFromAddressOrName(ipv4_address, ""); ASSERT_TRUE(network_handle.has_value()); EXPECT_EQ(ipv4_handle, *network_handle); @@ -86,9 +91,9 @@ TEST_F(AndroidNetworkMonitorTest, TestFindNetworkHandleUsingFullIpv6Address) { network_monitor_->SetNetworkInfos(net_infos); auto network_handle1 = - network_monitor_->FindNetworkHandleFromAddress(ipv6_address1); + network_monitor_->FindNetworkHandleFromAddressOrName(ipv6_address1, ""); auto network_handle2 = - network_monitor_->FindNetworkHandleFromAddress(ipv6_address2); + network_monitor_->FindNetworkHandleFromAddressOrName(ipv6_address2, ""); ASSERT_TRUE(network_handle1.has_value()); EXPECT_EQ(ipv6_handle, *network_handle1); @@ -111,9 +116,9 @@ TEST_F(AndroidNetworkMonitorTest, network_monitor_->SetNetworkInfos(net_infos); auto network_handle1 = - network_monitor_->FindNetworkHandleFromAddress(ipv6_address1); + network_monitor_->FindNetworkHandleFromAddressOrName(ipv6_address1, ""); auto network_handle2 = - network_monitor_->FindNetworkHandleFromAddress(ipv6_address2); + network_monitor_->FindNetworkHandleFromAddressOrName(ipv6_address2, ""); ASSERT_TRUE(network_handle1.has_value()); EXPECT_EQ(ipv6_handle, *network_handle1); @@ -121,5 +126,46 @@ TEST_F(AndroidNetworkMonitorTest, EXPECT_EQ(ipv6_handle, *network_handle2); } +TEST_F(AndroidNetworkMonitorTest, TestFindNetworkHandleUsingIfName) { + // Start() updates the states introduced by the field trial. + network_monitor_->Start(); + jni::NetworkHandle ipv6_handle = 200; + rtc::IPAddress ipv6_address1 = GetIpAddressFromIpv6String(kTestIpv6Address1); + + // Set up an IPv6 network. + jni::NetworkInformation net_info = + CreateNetworkInformation("wlan0", ipv6_handle, ipv6_address1); + std::vector net_infos(1, net_info); + network_monitor_->SetNetworkInfos(net_infos); + + rtc::IPAddress ipv4_address(kTestIpv4Address); + + // Search using ip address only... + auto network_handle1 = + network_monitor_->FindNetworkHandleFromAddressOrName(ipv4_address, ""); + + // Search using ip address AND if_name (for typical ipv4 over ipv6 tunnel). + auto network_handle2 = network_monitor_->FindNetworkHandleFromAddressOrName( + ipv4_address, "v4-wlan0"); + + ASSERT_FALSE(network_handle1.has_value()); + ASSERT_TRUE(network_handle2.has_value()); + EXPECT_EQ(ipv6_handle, *network_handle2); +} + +TEST_F(AndroidNetworkMonitorTest, TestUnderlyingVpnType) { + ScopedFieldTrials field_trials("WebRTC-BindUsingInterfaceName/Enabled/"); + jni::NetworkHandle ipv4_handle = 100; + rtc::IPAddress ipv4_address(kTestIpv4Address); + jni::NetworkInformation net_info = + CreateNetworkInformation("wlan0", ipv4_handle, ipv4_address); + net_info.type = jni::NETWORK_VPN; + net_info.underlying_type_for_vpn = jni::NETWORK_WIFI; + network_monitor_->SetNetworkInfos({net_info}); + + EXPECT_EQ(rtc::ADAPTER_TYPE_WIFI, + network_monitor_->GetVpnUnderlyingAdapterType("v4-wlan0")); +} + } // namespace test } // namespace webrtc diff --git a/sdk/android/native_unittests/audio_device/audio_device_unittest.cc b/sdk/android/native_unittests/audio_device/audio_device_unittest.cc index 88dfad4a88..31da60cbc3 100644 --- a/sdk/android/native_unittests/audio_device/audio_device_unittest.cc +++ b/sdk/android/native_unittests/audio_device/audio_device_unittest.cc @@ -16,9 +16,9 @@ #include "modules/audio_device/include/audio_device.h" #include "modules/audio_device/include/mock_audio_transport.h" #include "rtc_base/arraysize.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/format_macros.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" #include "sdk/android/generated_native_unittests_jni/BuildInfo_jni.h" #include "sdk/android/native_api/audio_device_module/audio_device_android.h" @@ -179,7 +179,7 @@ class FifoAudioStream : public AudioStreamInterface { } int16_t* memory = new int16_t[frames_per_buffer_]; memcpy(static_cast(&memory[0]), source, bytes_per_buffer_); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); fifo_->push_back(memory); const size_t size = fifo_->size(); if (size > largest_size_) { @@ -195,7 +195,7 @@ class FifoAudioStream : public AudioStreamInterface { void Read(void* destination, size_t num_frames) override { ASSERT_EQ(num_frames, frames_per_buffer_); PRINTD("-"); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (fifo_->empty()) { memset(destination, 0, bytes_per_buffer_); } else { @@ -226,7 +226,7 @@ class FifoAudioStream : public AudioStreamInterface { } using AudioBufferList = std::list; - rtc::CriticalSection lock_; + Mutex lock_; const size_t frames_per_buffer_; const size_t bytes_per_buffer_; std::unique_ptr fifo_; diff --git a/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc b/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc index 54613f9f57..75535d052b 100644 --- a/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc +++ b/sdk/android/native_unittests/peerconnection/peer_connection_factory_unittest.cc @@ -100,7 +100,7 @@ TEST(PeerConnectionFactoryTest, NativeToJavaPeerConnectionFactory) { jobject java_factory = NativeToJavaPeerConnectionFactory( jni, factory, std::move(network_thread), std::move(worker_thread), - std::move(signaling_thread), nullptr /* network_monitor_factory */); + std::move(signaling_thread)); RTC_LOG(INFO) << java_factory; diff --git a/sdk/android/native_unittests/stacktrace/stacktrace_unittest.cc b/sdk/android/native_unittests/stacktrace/stacktrace_unittest.cc index e3b5e78cdd..b77d86719f 100644 --- a/sdk/android/native_unittests/stacktrace/stacktrace_unittest.cc +++ b/sdk/android/native_unittests/stacktrace/stacktrace_unittest.cc @@ -16,12 +16,12 @@ #include #include -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread.h" #include "rtc_base/string_utils.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/inline.h" #include "system_wrappers/include/sleep.h" #include "test/gtest.h" @@ -118,15 +118,15 @@ class RtcEventDeadlock : public DeadlockInterface { class RtcCriticalSectionDeadlock : public DeadlockInterface { public: RtcCriticalSectionDeadlock() - : critscope_(std::make_unique(&crit_)) {} + : mutex_lock_(std::make_unique(&mutex_)) {} private: - void Deadlock() override { rtc::CritScope lock(&crit_); } + void Deadlock() override { MutexLock lock(&mutex_); } - void Release() override { critscope_.reset(); } + void Release() override { mutex_lock_.reset(); } - rtc::CriticalSection crit_; - std::unique_ptr critscope_; + Mutex mutex_; + std::unique_ptr mutex_lock_; }; class SpinDeadlock : public DeadlockInterface { @@ -153,28 +153,24 @@ class SleepDeadlock : public DeadlockInterface { } }; -// This is the function that is exectued by the thread that will deadlock and -// have its stacktrace captured. -void ThreadFunction(void* void_params) { - ThreadParams* params = static_cast(void_params); - params->tid = gettid(); - - params->deadlock_region_start_address = GetCurrentRelativeExecutionAddress(); - params->deadlock_start_event.Set(); - params->deadlock_impl->Deadlock(); - params->deadlock_region_end_address = GetCurrentRelativeExecutionAddress(); - - params->deadlock_done_event.Set(); -} - void TestStacktrace(std::unique_ptr deadlock_impl) { // Set params that will be sent to other thread. ThreadParams params; params.deadlock_impl = deadlock_impl.get(); // Spawn thread. - rtc::PlatformThread thread(&ThreadFunction, ¶ms, "StacktraceTest"); - thread.Start(); + auto thread = rtc::PlatformThread::SpawnJoinable( + [¶ms] { + params.tid = gettid(); + params.deadlock_region_start_address = + GetCurrentRelativeExecutionAddress(); + params.deadlock_start_event.Set(); + params.deadlock_impl->Deadlock(); + params.deadlock_region_end_address = + GetCurrentRelativeExecutionAddress(); + params.deadlock_done_event.Set(); + }, + "StacktraceTest"); // Wait until the thread has entered the deadlock region, and take a very // brief nap to give it time to reach the actual deadlock. @@ -198,8 +194,6 @@ void TestStacktrace(std::unique_ptr deadlock_impl) { << rtc::ToHex(params.deadlock_region_start_address) << ", " << rtc::ToHex(params.deadlock_region_end_address) << "] not contained in: " << StackTraceToString(stack_trace); - - thread.Stop(); } class LookoutLogSink final : public rtc::LogSink { @@ -259,13 +253,9 @@ TEST(Stacktrace, TestRtcEventDeadlockDetection) { // Start a thread that waits for an event. rtc::Event ev; - rtc::PlatformThread thread( - [](void* arg) { - auto* ev = static_cast(arg); - ev->Wait(rtc::Event::kForever); - }, - &ev, "TestRtcEventDeadlockDetection"); - thread.Start(); + auto thread = rtc::PlatformThread::SpawnJoinable( + [&ev] { ev.Wait(rtc::Event::kForever); }, + "TestRtcEventDeadlockDetection"); // The message should appear after 3 sec. We'll wait up to 10 sec in an // attempt to not be flaky. @@ -273,7 +263,7 @@ TEST(Stacktrace, TestRtcEventDeadlockDetection) { // Unblock the thread and shut it down. ev.Set(); - thread.Stop(); + thread.Finalize(); rtc::LogMessage::RemoveLogToStream(&sink); } diff --git a/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java b/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java index b797e2521e..aa68e9d832 100644 --- a/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java +++ b/sdk/android/src/java/org/webrtc/AndroidVideoDecoder.java @@ -180,7 +180,7 @@ private VideoCodecStatus initDecodeInternal(int width, int height) { try { codec = mediaCodecWrapperFactory.createByCodecName(codecName); - } catch (IOException | IllegalArgumentException e) { + } catch (IOException | IllegalArgumentException | IllegalStateException e) { Logging.e(TAG, "Cannot create media decoder " + codecName); return VideoCodecStatus.FALLBACK_SOFTWARE; } @@ -191,7 +191,7 @@ private VideoCodecStatus initDecodeInternal(int width, int height) { } codec.configure(format, surface, null, 0); codec.start(); - } catch (IllegalStateException e) { + } catch (IllegalStateException | IllegalArgumentException e) { Logging.e(TAG, "initDecode failed", e); release(); return VideoCodecStatus.FALLBACK_SOFTWARE; @@ -246,10 +246,6 @@ public VideoCodecStatus decode(EncodedImage frame, DecodeInfo info) { Logging.e(TAG, "decode() - key frame required first"); return VideoCodecStatus.NO_OUTPUT; } - if (!frame.completeFrame) { - Logging.e(TAG, "decode() - complete frame required first"); - return VideoCodecStatus.NO_OUTPUT; - } } int index; @@ -295,11 +291,6 @@ public VideoCodecStatus decode(EncodedImage frame, DecodeInfo info) { return VideoCodecStatus.OK; } - @Override - public boolean getPrefersLateDecoding() { - return true; - } - @Override public String getImplementationName() { return codecName; @@ -594,13 +585,21 @@ private void reformat(MediaFormat format) { } // Compare to existing width, height, and save values under the dimension lock. synchronized (dimensionLock) { - if (hasDecodedFirstFrame && (width != newWidth || height != newHeight)) { - stopOnOutputThread(new RuntimeException("Unexpected size change. Configured " + width + "*" - + height + ". New " + newWidth + "*" + newHeight)); - return; + if (newWidth != width || newHeight != height) { + if (hasDecodedFirstFrame) { + stopOnOutputThread(new RuntimeException("Unexpected size change. " + + "Configured " + width + "*" + height + ". " + + "New " + newWidth + "*" + newHeight)); + return; + } else if (newWidth <= 0 || newHeight <= 0) { + Logging.w(TAG, + "Unexpected format dimensions. Configured " + width + "*" + height + ". " + + "New " + newWidth + "*" + newHeight + ". Skip it"); + return; + } + width = newWidth; + height = newHeight; } - width = newWidth; - height = newHeight; } // Note: texture mode ignores colorFormat. Hence, if the texture helper is non-null, skip diff --git a/sdk/android/src/java/org/webrtc/EglBase10Impl.java b/sdk/android/src/java/org/webrtc/EglBase10Impl.java index 3ae38f0e78..1affbd9de6 100644 --- a/sdk/android/src/java/org/webrtc/EglBase10Impl.java +++ b/sdk/android/src/java/org/webrtc/EglBase10Impl.java @@ -39,7 +39,9 @@ class EglBase10Impl implements EglBase10 { // EGL wrapper for an actual EGLContext. private static class Context implements EglBase10.Context { + private final EGL10 egl; private final EGLContext eglContext; + private final EGLConfig eglContextConfig; @Override public EGLContext getRawContext() { @@ -48,15 +50,41 @@ public EGLContext getRawContext() { @Override public long getNativeEglContext() { - // TODO(magjed): Implement. There is no easy way of getting the native context for EGL 1.0. We - // need to make sure to have an EglSurface, then make the context current using that surface, - // and then call into JNI and call the native version of eglGetCurrentContext. Then we need to - // restore the state and return the native context. - return 0 /* EGL_NO_CONTEXT */; + EGLContext previousContext = egl.eglGetCurrentContext(); + EGLDisplay currentDisplay = egl.eglGetCurrentDisplay(); + EGLSurface previousDrawSurface = egl.eglGetCurrentSurface(EGL10.EGL_DRAW); + EGLSurface previousReadSurface = egl.eglGetCurrentSurface(EGL10.EGL_READ); + EGLSurface tempEglSurface = null; + + if (currentDisplay == EGL10.EGL_NO_DISPLAY) { + currentDisplay = egl.eglGetDisplay(EGL10.EGL_DEFAULT_DISPLAY); + } + + try { + if (previousContext != eglContext) { + int[] surfaceAttribs = {EGL10.EGL_WIDTH, 1, EGL10.EGL_HEIGHT, 1, EGL10.EGL_NONE}; + tempEglSurface = + egl.eglCreatePbufferSurface(currentDisplay, eglContextConfig, surfaceAttribs); + if (!egl.eglMakeCurrent(currentDisplay, tempEglSurface, tempEglSurface, eglContext)) { + throw new RuntimeException( + "Failed to make temporary EGL surface active: " + egl.eglGetError()); + } + } + + return nativeGetCurrentNativeEGLContext(); + } finally { + if (tempEglSurface != null) { + egl.eglMakeCurrent( + currentDisplay, previousDrawSurface, previousReadSurface, previousContext); + egl.eglDestroySurface(currentDisplay, tempEglSurface); + } + } } - public Context(EGLContext eglContext) { + public Context(EGL10 egl, EGLContext eglContext, EGLConfig eglContextConfig) { + this.egl = egl; this.eglContext = eglContext; + this.eglContextConfig = eglContextConfig; } } @@ -64,7 +92,7 @@ public Context(EGLContext eglContext) { public EglBase10Impl(EGLContext sharedContext, int[] configAttributes) { this.egl = (EGL10) EGLContext.getEGL(); eglDisplay = getEglDisplay(); - eglConfig = getEglConfig(eglDisplay, configAttributes); + eglConfig = getEglConfig(egl, eglDisplay, configAttributes); final int openGlesVersion = EglBase.getOpenGlesVersionFromConfig(configAttributes); Logging.d(TAG, "Using OpenGL ES version " + openGlesVersion); eglContext = createEglContext(sharedContext, eglDisplay, eglConfig, openGlesVersion); @@ -186,7 +214,7 @@ public void createPbufferSurface(int width, int height) { @Override public org.webrtc.EglBase.Context getEglBaseContext() { - return new Context(eglContext); + return new Context(egl, eglContext, eglConfig); } @Override @@ -294,7 +322,7 @@ private EGLDisplay getEglDisplay() { } // Return an EGLConfig, or die trying. - private EGLConfig getEglConfig(EGLDisplay eglDisplay, int[] configAttributes) { + private static EGLConfig getEglConfig(EGL10 egl, EGLDisplay eglDisplay, int[] configAttributes) { EGLConfig[] configs = new EGLConfig[1]; int[] numConfigs = new int[1]; if (!egl.eglChooseConfig(eglDisplay, configAttributes, configs, configs.length, numConfigs)) { @@ -329,4 +357,6 @@ private EGLContext createEglContext(@Nullable EGLContext sharedContext, EGLDispl } return eglContext; } + + private static native long nativeGetCurrentNativeEGLContext(); } diff --git a/sdk/android/src/java/org/webrtc/GlGenericDrawer.java b/sdk/android/src/java/org/webrtc/GlGenericDrawer.java index 92b4245499..0e9718ea0b 100644 --- a/sdk/android/src/java/org/webrtc/GlGenericDrawer.java +++ b/sdk/android/src/java/org/webrtc/GlGenericDrawer.java @@ -219,11 +219,14 @@ private void prepareShader(ShaderType shaderType, float[] texMatrix, int frameWi shader = currentShader; } else { // Allocate new shader. - currentShaderType = shaderType; + currentShaderType = null; if (currentShader != null) { currentShader.release(); + currentShader = null; } + shader = createShader(shaderType); + currentShaderType = shaderType; currentShader = shader; shader.useProgram(); diff --git a/sdk/android/src/java/org/webrtc/HardwareVideoEncoder.java b/sdk/android/src/java/org/webrtc/HardwareVideoEncoder.java index 42800aef10..f116fefc83 100644 --- a/sdk/android/src/java/org/webrtc/HardwareVideoEncoder.java +++ b/sdk/android/src/java/org/webrtc/HardwareVideoEncoder.java @@ -169,7 +169,7 @@ public void waitForZero() { * intervals, and bitrateAdjuster. * * @param codecName the hardware codec implementation to use - * @param codecType the type of the given video codec (eg. VP8, VP9, or H264) + * @param codecType the type of the given video codec (eg. VP8, VP9, H264 or AV1) * @param surfaceColorFormat color format for surface mode or null if not available * @param yuvColorFormat color format for bytebuffer mode * @param keyFrameIntervalSec interval in seconds between key frames; used to initialize the codec @@ -370,7 +370,6 @@ public VideoCodecStatus encode(VideoFrame videoFrame, EncodeInfo encodeInfo) { int bufferSize = videoFrameBuffer.getHeight() * videoFrameBuffer.getWidth() * 3 / 2; EncodedImage.Builder builder = EncodedImage.builder() .setCaptureTimeNs(videoFrame.getTimestampNs()) - .setCompleteFrame(true) .setEncodedWidth(videoFrame.getBuffer().getWidth()) .setEncodedHeight(videoFrame.getBuffer().getHeight()) .setRotation(videoFrame.getRotation()); diff --git a/sdk/android/src/java/org/webrtc/MediaCodecUtils.java b/sdk/android/src/java/org/webrtc/MediaCodecUtils.java index 9028cc3ae4..b634fb34f5 100644 --- a/sdk/android/src/java/org/webrtc/MediaCodecUtils.java +++ b/sdk/android/src/java/org/webrtc/MediaCodecUtils.java @@ -10,6 +10,7 @@ package org.webrtc; +import android.annotation.TargetApi; import android.media.MediaCodecInfo; import android.media.MediaCodecInfo.CodecCapabilities; import android.os.Build; @@ -28,7 +29,8 @@ class MediaCodecUtils { static final String INTEL_PREFIX = "OMX.Intel."; static final String NVIDIA_PREFIX = "OMX.Nvidia."; static final String QCOM_PREFIX = "OMX.qcom."; - static final String[] SOFTWARE_IMPLEMENTATION_PREFIXES = {"OMX.google.", "OMX.SEC."}; + static final String[] SOFTWARE_IMPLEMENTATION_PREFIXES = { + "OMX.google.", "OMX.SEC.", "c2.android"}; // NV12 color format supported by QCOM codec, but not declared in MediaCodec - // see /hardware/qcom/media/mm-core/inc/OMX_QCOMExtns.h @@ -89,6 +91,7 @@ static Map getCodecProperties(VideoCodecMimeType type, boolean h switch (type) { case VP8: case VP9: + case AV1: return new HashMap(); case H264: return H264Utils.getDefaultH264Params(highProfile); @@ -97,6 +100,36 @@ static Map getCodecProperties(VideoCodecMimeType type, boolean h } } + static boolean isHardwareAccelerated(MediaCodecInfo info) { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) { + return isHardwareAcceleratedQOrHigher(info); + } + return !isSoftwareOnly(info); + } + + @TargetApi(29) + private static boolean isHardwareAcceleratedQOrHigher(android.media.MediaCodecInfo codecInfo) { + return codecInfo.isHardwareAccelerated(); + } + + static boolean isSoftwareOnly(android.media.MediaCodecInfo codecInfo) { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) { + return isSoftwareOnlyQOrHigher(codecInfo); + } + String name = codecInfo.getName(); + for (String prefix : SOFTWARE_IMPLEMENTATION_PREFIXES) { + if (name.startsWith(prefix)) { + return true; + } + } + return false; + } + + @TargetApi(29) + private static boolean isSoftwareOnlyQOrHigher(android.media.MediaCodecInfo codecInfo) { + return codecInfo.isSoftwareOnly(); + } + private MediaCodecUtils() { // This class should not be instantiated. } diff --git a/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java b/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java index 036aca5822..5a1d63e1c5 100644 --- a/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java +++ b/sdk/android/src/java/org/webrtc/MediaCodecVideoDecoderFactory.java @@ -46,7 +46,7 @@ public MediaCodecVideoDecoderFactory(@Nullable EglBase.Context sharedContext, @Nullable @Override public VideoDecoder createDecoder(VideoCodecInfo codecType) { - VideoCodecMimeType type = VideoCodecMimeType.valueOf(codecType.getName()); + VideoCodecMimeType type = VideoCodecMimeType.fromSdpCodecName(codecType.getName()); MediaCodecInfo info = findCodecForType(type); if (info == null) { @@ -64,11 +64,11 @@ public VideoCodecInfo[] getSupportedCodecs() { List supportedCodecInfos = new ArrayList(); // Generate a list of supported codecs in order of preference: // VP8, VP9, H264 (high profile), and H264 (baseline profile). - for (VideoCodecMimeType type : new VideoCodecMimeType[] { - VideoCodecMimeType.VP8, VideoCodecMimeType.VP9, VideoCodecMimeType.H264}) { + for (VideoCodecMimeType type : new VideoCodecMimeType[] {VideoCodecMimeType.VP8, + VideoCodecMimeType.VP9, VideoCodecMimeType.H264, VideoCodecMimeType.AV1}) { MediaCodecInfo codec = findCodecForType(type); if (codec != null) { - String name = type.name(); + String name = type.toSdpCodecName(); if (type == VideoCodecMimeType.H264 && isH264HighProfileSupported(codec)) { supportedCodecInfos.add(new VideoCodecInfo( name, MediaCodecUtils.getCodecProperties(type, /* highProfile= */ true))); diff --git a/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java b/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java index f27a9176cf..93a9286165 100644 --- a/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java +++ b/sdk/android/src/java/org/webrtc/VideoCodecMimeType.java @@ -14,7 +14,8 @@ enum VideoCodecMimeType { VP8("video/x-vnd.on2.vp8"), VP9("video/x-vnd.on2.vp9"), - H264("video/avc"); + H264("video/avc"), + AV1("video/av01"); private final String mimeType; @@ -25,4 +26,12 @@ private VideoCodecMimeType(String mimeType) { String mimeType() { return mimeType; } + + static VideoCodecMimeType fromSdpCodecName(String codecName) { + return codecName.equals("AV1X") ? AV1 : valueOf(codecName); + } + + String toSdpCodecName() { + return this == AV1 ? "AV1X" : name(); + } } diff --git a/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java b/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java new file mode 100644 index 0000000000..70c625ab4f --- /dev/null +++ b/sdk/android/src/java/org/webrtc/audio/LowLatencyAudioBufferManager.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +package org.webrtc.audio; + +import android.media.AudioTrack; +import android.os.Build; +import org.webrtc.Logging; + +// Lowers the buffer size if no underruns are detected for 100 ms. Once an +// underrun is detected, the buffer size is increased by 10 ms and it will not +// be lowered further. The buffer size will never be increased more than +// 5 times, to avoid the possibility of the buffer size increasing without +// bounds. +class LowLatencyAudioBufferManager { + private static final String TAG = "LowLatencyAudioBufferManager"; + // The underrun count that was valid during the previous call to maybeAdjustBufferSize(). Used to + // detect increases in the value. + private int prevUnderrunCount; + // The number of ticks to wait without an underrun before decreasing the buffer size. + private int ticksUntilNextDecrease; + // Indicate if we should continue to decrease the buffer size. + private boolean keepLoweringBufferSize; + // How often the buffer size was increased. + private int bufferIncreaseCounter; + + public LowLatencyAudioBufferManager() { + this.prevUnderrunCount = 0; + this.ticksUntilNextDecrease = 10; + this.keepLoweringBufferSize = true; + this.bufferIncreaseCounter = 0; + } + + public void maybeAdjustBufferSize(AudioTrack audioTrack) { + if (Build.VERSION.SDK_INT >= 26) { + final int underrunCount = audioTrack.getUnderrunCount(); + if (underrunCount > prevUnderrunCount) { + // Don't increase buffer more than 5 times. Continuing to increase the buffer size + // could be harmful on low-power devices that regularly experience underruns under + // normal conditions. + if (bufferIncreaseCounter < 5) { + // Underrun detected, increase buffer size by 10ms. + final int currentBufferSize = audioTrack.getBufferSizeInFrames(); + final int newBufferSize = currentBufferSize + audioTrack.getPlaybackRate() / 100; + Logging.d(TAG, + "Underrun detected! Increasing AudioTrack buffer size from " + currentBufferSize + + " to " + newBufferSize); + audioTrack.setBufferSizeInFrames(newBufferSize); + bufferIncreaseCounter++; + } + // Stop trying to lower the buffer size. + keepLoweringBufferSize = false; + prevUnderrunCount = underrunCount; + ticksUntilNextDecrease = 10; + } else if (keepLoweringBufferSize) { + ticksUntilNextDecrease--; + if (ticksUntilNextDecrease <= 0) { + // No underrun seen for 100 ms, try to lower the buffer size by 10ms. + final int bufferSize10ms = audioTrack.getPlaybackRate() / 100; + // Never go below a buffer size of 10ms. + final int currentBufferSize = audioTrack.getBufferSizeInFrames(); + final int newBufferSize = Math.max(bufferSize10ms, currentBufferSize - bufferSize10ms); + if (newBufferSize != currentBufferSize) { + Logging.d(TAG, + "Lowering AudioTrack buffer size from " + currentBufferSize + " to " + + newBufferSize); + audioTrack.setBufferSizeInFrames(newBufferSize); + } + ticksUntilNextDecrease = 10; + } + } + } + } +} diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java index 6f575be2ce..6b69b264ea 100644 --- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java +++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioEffects.java @@ -219,14 +219,14 @@ private static void assertTrue(boolean condition) { // Returns true if an effect of the specified type is available. Functionally // equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but // faster as it avoids the expensive OS call to enumerate effects. - private static boolean isEffectTypeAvailable(UUID effectType, UUID blackListedUuid) { + private static boolean isEffectTypeAvailable(UUID effectType, UUID blockListedUuid) { Descriptor[] effects = getAvailableEffects(); if (effects == null) { return false; } for (Descriptor d : effects) { if (d.type.equals(effectType)) { - return !d.uuid.equals(blackListedUuid); + return !d.uuid.equals(blockListedUuid); } } return false; diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java index 018196b784..734695937a 100644 --- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java +++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioRecord.java @@ -31,7 +31,10 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import org.webrtc.CalledByNative; import org.webrtc.Logging; import org.webrtc.ThreadUtils; @@ -90,12 +93,12 @@ class WebRtcAudioRecord { private @Nullable AudioRecordThread audioThread; private @Nullable AudioDeviceInfo preferredDevice; - private @Nullable ScheduledExecutorService executor; + private final ScheduledExecutorService executor; private @Nullable ScheduledFuture future; private volatile boolean microphoneMute; - private boolean audioSourceMatchesRecordingSession; - private boolean isAudioConfigVerified; + private final AtomicReference audioSourceMatchesRecordingSessionRef = + new AtomicReference<>(); private byte[] emptyBytes; private final @Nullable AudioRecordErrorCallback errorCallback; @@ -179,14 +182,15 @@ public void stopThread() { @CalledByNative WebRtcAudioRecord(Context context, AudioManager audioManager) { - this(context, audioManager, DEFAULT_AUDIO_SOURCE, DEFAULT_AUDIO_FORMAT, - null /* errorCallback */, null /* stateCallback */, null /* audioSamplesReadyCallback */, - WebRtcAudioEffects.isAcousticEchoCancelerSupported(), + this(context, newDefaultScheduler() /* scheduler */, audioManager, DEFAULT_AUDIO_SOURCE, + DEFAULT_AUDIO_FORMAT, null /* errorCallback */, null /* stateCallback */, + null /* audioSamplesReadyCallback */, WebRtcAudioEffects.isAcousticEchoCancelerSupported(), WebRtcAudioEffects.isNoiseSuppressorSupported()); } - public WebRtcAudioRecord(Context context, AudioManager audioManager, int audioSource, - int audioFormat, @Nullable AudioRecordErrorCallback errorCallback, + public WebRtcAudioRecord(Context context, ScheduledExecutorService scheduler, + AudioManager audioManager, int audioSource, int audioFormat, + @Nullable AudioRecordErrorCallback errorCallback, @Nullable AudioRecordStateCallback stateCallback, @Nullable SamplesReadyCallback audioSamplesReadyCallback, boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported) { @@ -197,6 +201,7 @@ public WebRtcAudioRecord(Context context, AudioManager audioManager, int audioSo throw new IllegalArgumentException("HW NS not supported"); } this.context = context; + this.executor = scheduler; this.audioManager = audioManager; this.audioSource = audioSource; this.audioFormat = audioFormat; @@ -227,7 +232,7 @@ boolean isNoiseSuppressorSupported() { // checked before using the returned value of isAudioSourceMatchingRecordingSession(). @CalledByNative boolean isAudioConfigVerified() { - return isAudioConfigVerified; + return audioSourceMatchesRecordingSessionRef.get() != null; } // Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when @@ -236,7 +241,8 @@ boolean isAudioConfigVerified() { // enabled in WebRtcAudioRecord to ensure that the returned value is valid. @CalledByNative boolean isAudioSourceMatchingRecordingSession() { - if (!isAudioConfigVerified) { + Boolean audioSourceMatchesRecordingSession = audioSourceMatchesRecordingSessionRef.get(); + if (audioSourceMatchesRecordingSession == null) { Logging.w(TAG, "Audio configuration has not yet been verified"); return false; } @@ -298,6 +304,7 @@ private int initRecording(int sampleRate, int channels) { // Throws IllegalArgumentException. audioRecord = createAudioRecordOnMOrHigher( audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes); + audioSourceMatchesRecordingSessionRef.set(null); if (preferredDevice != null) { setPreferredDevice(preferredDevice); } @@ -306,6 +313,7 @@ private int initRecording(int sampleRate, int channels) { // Throws UnsupportedOperationException. audioRecord = createAudioRecordOnLowerThanM( audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes); + audioSourceMatchesRecordingSessionRef.set(null); } } catch (IllegalArgumentException | UnsupportedOperationException e) { // Report of exception message is sufficient. Example: "Cannot create AudioRecord". @@ -324,7 +332,7 @@ private int initRecording(int sampleRate, int channels) { // Check number of active recording sessions. Should be zero but we have seen conflict cases // and adding a log for it can help us figure out details about conflicting sessions. final int numActiveRecordingSessions = - logRecordingConfigurations(false /* verifyAudioConfig */); + logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */); if (numActiveRecordingSessions != 0) { // Log the conflict as a warning since initialization did in fact succeed. Most likely, the // upcoming call to startRecording() will fail under these conditions. @@ -371,7 +379,7 @@ private boolean startRecording() { } audioThread = new AudioRecordThread("AudioRecordJavaThread"); audioThread.start(); - scheduleLogRecordingConfigurationsTask(); + scheduleLogRecordingConfigurationsTask(audioRecord); return true; } @@ -386,10 +394,6 @@ private boolean stopRecording() { } future = null; } - if (executor != null) { - executor.shutdownNow(); - executor = null; - } audioThread.stopThread(); if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) { Logging.e(TAG, "Join of AudioRecordJavaThread timed out"); @@ -442,8 +446,8 @@ private void logMainParametersExtended() { @TargetApi(Build.VERSION_CODES.N) // Checks the number of active recording sessions and logs the states of all active sessions. - // Returns number of active sessions. - private int logRecordingConfigurations(boolean verifyAudioConfig) { + // Returns number of active sessions. Note that this could occur on arbituary thread. + private int logRecordingConfigurations(AudioRecord audioRecord, boolean verifyAudioConfig) { if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) { Logging.w(TAG, "AudioManager#getActiveRecordingConfigurations() requires N or higher"); return 0; @@ -451,6 +455,7 @@ private int logRecordingConfigurations(boolean verifyAudioConfig) { if (audioRecord == null) { return 0; } + // Get a list of the currently active audio recording configurations of the device (can be more // than one). An empty list indicates there is no recording active when queried. List configs = audioManager.getActiveRecordingConfigurations(); @@ -463,10 +468,9 @@ private int logRecordingConfigurations(boolean verifyAudioConfig) { // to the AudioRecord instance) is matching what the audio recording configuration lists // as its client parameters. If these do not match, recording might work but under invalid // conditions. - audioSourceMatchesRecordingSession = + audioSourceMatchesRecordingSessionRef.set( verifyAudioConfig(audioRecord.getAudioSource(), audioRecord.getAudioSessionId(), - audioRecord.getFormat(), audioRecord.getRoutedDevice(), configs); - isAudioConfigVerified = true; + audioRecord.getFormat(), audioRecord.getRoutedDevice(), configs)); } } return numActiveRecordingSessions; @@ -501,12 +505,13 @@ private void releaseAudioResources() { audioRecord.release(); audioRecord = null; } + audioSourceMatchesRecordingSessionRef.set(null); } private void reportWebRtcAudioRecordInitError(String errorMessage) { Logging.e(TAG, "Init recording error: " + errorMessage); WebRtcAudioUtils.logAudioState(TAG, context, audioManager); - logRecordingConfigurations(false /* verifyAudioConfig */); + logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */); if (errorCallback != null) { errorCallback.onWebRtcAudioRecordInitError(errorMessage); } @@ -516,7 +521,7 @@ private void reportWebRtcAudioRecordStartError( AudioRecordStartErrorCode errorCode, String errorMessage) { Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage); WebRtcAudioUtils.logAudioState(TAG, context, audioManager); - logRecordingConfigurations(false /* verifyAudioConfig */); + logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */); if (errorCallback != null) { errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage); } @@ -564,18 +569,18 @@ private static int getBytesPerSample(int audioFormat) { // Use an ExecutorService to schedule a task after a given delay where the task consists of // checking (by logging) the current status of active recording sessions. - private void scheduleLogRecordingConfigurationsTask() { + private void scheduleLogRecordingConfigurationsTask(AudioRecord audioRecord) { Logging.d(TAG, "scheduleLogRecordingConfigurationsTask"); if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) { return; } - if (executor != null) { - executor.shutdownNow(); - } - executor = Executors.newSingleThreadScheduledExecutor(); Callable callable = () -> { - logRecordingConfigurations(true /* verifyAudioConfig */); + if (this.audioRecord == audioRecord) { + logRecordingConfigurations(audioRecord, true /* verifyAudioConfig */); + } else { + Logging.d(TAG, "audio record has changed"); + } return "Scheduled task is done"; }; @@ -704,4 +709,22 @@ private static String audioStateToString(int state) { return "INVALID"; } } + + private static final AtomicInteger nextSchedulerId = new AtomicInteger(0); + + static ScheduledExecutorService newDefaultScheduler() { + AtomicInteger nextThreadId = new AtomicInteger(0); + return Executors.newScheduledThreadPool(0, new ThreadFactory() { + /** + * Constructs a new {@code Thread} + */ + @Override + public Thread newThread(Runnable r) { + Thread thread = Executors.defaultThreadFactory().newThread(r); + thread.setName(String.format("WebRtcAudioRecordScheduler-%s-%s", + nextSchedulerId.getAndIncrement(), nextThreadId.getAndIncrement())); + return thread; + } + }); + } } diff --git a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java index edc9dd179d..5e1201d5ca 100644 --- a/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java +++ b/sdk/android/src/java/org/webrtc/audio/WebRtcAudioTrack.java @@ -19,7 +19,6 @@ import android.os.Build; import android.os.Process; import android.support.annotation.Nullable; -import java.lang.Thread; import java.nio.ByteBuffer; import org.webrtc.CalledByNative; import org.webrtc.Logging; @@ -27,6 +26,7 @@ import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackErrorCallback; import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStartErrorCode; import org.webrtc.audio.JavaAudioDeviceModule.AudioTrackStateCallback; +import org.webrtc.audio.LowLatencyAudioBufferManager; class WebRtcAudioTrack { private static final String TAG = "WebRtcAudioTrackExternal"; @@ -71,6 +71,7 @@ private static int getDefaultUsageAttribute() { private ByteBuffer byteBuffer; + private @Nullable final AudioAttributes audioAttributes; private @Nullable AudioTrack audioTrack; private @Nullable AudioTrackThread audioThread; private final VolumeLogger volumeLogger; @@ -79,6 +80,8 @@ private static int getDefaultUsageAttribute() { // Can be used to ensure that the speaker is fully muted. private volatile boolean speakerMute; private byte[] emptyBytes; + private boolean useLowLatency; + private int initialBufferSizeInFrames; private final @Nullable AudioTrackErrorCallback errorCallback; private final @Nullable AudioTrackStateCallback stateCallback; @@ -91,9 +94,11 @@ private static int getDefaultUsageAttribute() { */ private class AudioTrackThread extends Thread { private volatile boolean keepAlive = true; + private LowLatencyAudioBufferManager bufferManager; public AudioTrackThread(String name) { super(name); + bufferManager = new LowLatencyAudioBufferManager(); } @Override @@ -133,6 +138,9 @@ public void run() { reportWebRtcAudioTrackError("AudioTrack.write failed: " + bytesWritten); } } + if (useLowLatency) { + bufferManager.maybeAdjustBufferSize(audioTrack); + } // The byte buffer must be rewinded since byteBuffer.position() is // increased at each call to AudioTrack.write(). If we don't do this, // next call to AudioTrack.write() will fail. @@ -162,18 +170,21 @@ public void stopThread() { @CalledByNative WebRtcAudioTrack(Context context, AudioManager audioManager) { - this(context, audioManager, null /* errorCallback */, null /* stateCallback */); + this(context, audioManager, null /* audioAttributes */, null /* errorCallback */, + null /* stateCallback */, false /* useLowLatency */); } WebRtcAudioTrack(Context context, AudioManager audioManager, - @Nullable AudioTrackErrorCallback errorCallback, - @Nullable AudioTrackStateCallback stateCallback) { + @Nullable AudioAttributes audioAttributes, @Nullable AudioTrackErrorCallback errorCallback, + @Nullable AudioTrackStateCallback stateCallback, boolean useLowLatency) { threadChecker.detachThread(); this.context = context; this.audioManager = audioManager; + this.audioAttributes = audioAttributes; this.errorCallback = errorCallback; this.stateCallback = stateCallback; this.volumeLogger = new VolumeLogger(audioManager); + this.useLowLatency = useLowLatency; Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo()); } @@ -183,7 +194,7 @@ public void setNativeAudioTrack(long nativeAudioTrack) { } @CalledByNative - private boolean initPlayout(int sampleRate, int channels, double bufferSizeFactor) { + private int initPlayout(int sampleRate, int channels, double bufferSizeFactor) { threadChecker.checkIsOnValidThread(); Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels @@ -212,27 +223,38 @@ private boolean initPlayout(int sampleRate, int channels, double bufferSizeFacto // can happen that |minBufferSizeInBytes| contains an invalid value. if (minBufferSizeInBytes < byteBuffer.capacity()) { reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value."); - return false; + return -1; + } + + // Don't use low-latency mode when a bufferSizeFactor > 1 is used. When bufferSizeFactor > 1 + // we want to use a larger buffer to prevent underruns. However, low-latency mode would + // decrease the buffer size, which makes the bufferSizeFactor have no effect. + if (bufferSizeFactor > 1.0) { + useLowLatency = false; } // Ensure that prevision audio session was stopped correctly before trying // to create a new AudioTrack. if (audioTrack != null) { reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack."); - return false; + return -1; } try { // Create an AudioTrack object and initialize its associated audio buffer. // The size of this buffer determines how long an AudioTrack can play // before running out of data. - if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { + if (useLowLatency && Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) { + // On API level 26 or higher, we can use a low latency mode. + audioTrack = createAudioTrackOnOreoOrHigher( + sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes); + } else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { // If we are on API level 21 or higher, it is possible to use a special AudioTrack // constructor that uses AudioAttributes and AudioFormat as input. It allows us to // supersede the notion of stream types for defining the behavior of audio playback, // and to allow certain platforms or routing policies to use this information for more // refined volume or routing decisions. - audioTrack = - createAudioTrackOnLollipopOrHigher(sampleRate, channelConfig, minBufferSizeInBytes); + audioTrack = createAudioTrackOnLollipopOrHigher( + sampleRate, channelConfig, minBufferSizeInBytes, audioAttributes); } else { // Use default constructor for API levels below 21. audioTrack = @@ -241,7 +263,7 @@ private boolean initPlayout(int sampleRate, int channels, double bufferSizeFacto } catch (IllegalArgumentException e) { reportWebRtcAudioTrackInitError(e.getMessage()); releaseAudioResources(); - return false; + return -1; } // It can happen that an AudioTrack is created but it was not successfully @@ -250,11 +272,16 @@ private boolean initPlayout(int sampleRate, int channels, double bufferSizeFacto if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) { reportWebRtcAudioTrackInitError("Initialization of audio track failed."); releaseAudioResources(); - return false; + return -1; + } + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { + initialBufferSizeInFrames = audioTrack.getBufferSizeInFrames(); + } else { + initialBufferSizeInFrames = -1; } logMainParameters(); logMainParametersExtended(); - return true; + return minBufferSizeInBytes; } @CalledByNative @@ -379,26 +406,49 @@ private void logMainParameters() { + "max gain: " + AudioTrack.getMaxVolume()); } - // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input. - // It allows certain platforms or routing policies to use this information for more - // refined volume or routing decisions. - @TargetApi(Build.VERSION_CODES.LOLLIPOP) - private static AudioTrack createAudioTrackOnLollipopOrHigher( - int sampleRateInHz, int channelConfig, int bufferSizeInBytes) { - Logging.d(TAG, "createAudioTrackOnLollipopOrHigher"); - // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control - // performance when Android O is supported. Add some logging in the mean time. + private static void logNativeOutputSampleRate(int requestedSampleRateInHz) { final int nativeOutputSampleRate = AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL); Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate); - if (sampleRateInHz != nativeOutputSampleRate) { + if (requestedSampleRateInHz != nativeOutputSampleRate) { Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native"); } + } + + private static AudioAttributes getAudioAttributes(@Nullable AudioAttributes overrideAttributes) { + AudioAttributes.Builder attributesBuilder = + new AudioAttributes.Builder() + .setUsage(DEFAULT_USAGE) + .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH); + + if (overrideAttributes != null) { + if (overrideAttributes.getUsage() != AudioAttributes.USAGE_UNKNOWN) { + attributesBuilder.setUsage(overrideAttributes.getUsage()); + } + if (overrideAttributes.getContentType() != AudioAttributes.CONTENT_TYPE_UNKNOWN) { + attributesBuilder.setContentType(overrideAttributes.getContentType()); + } + + attributesBuilder.setFlags(overrideAttributes.getFlags()); + + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) { + attributesBuilder = applyAttributesOnQOrHigher(attributesBuilder, overrideAttributes); + } + } + return attributesBuilder.build(); + } + + // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input. + // It allows certain platforms or routing policies to use this information for more + // refined volume or routing decisions. + @TargetApi(Build.VERSION_CODES.LOLLIPOP) + private static AudioTrack createAudioTrackOnLollipopOrHigher(int sampleRateInHz, + int channelConfig, int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) { + Logging.d(TAG, "createAudioTrackOnLollipopOrHigher"); + logNativeOutputSampleRate(sampleRateInHz); + // Create an audio track where the audio usage is for VoIP and the content type is speech. - return new AudioTrack(new AudioAttributes.Builder() - .setUsage(DEFAULT_USAGE) - .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH) - .build(), + return new AudioTrack(getAudioAttributes(overrideAttributes), new AudioFormat.Builder() .setEncoding(AudioFormat.ENCODING_PCM_16BIT) .setSampleRate(sampleRateInHz) @@ -407,6 +457,38 @@ private static AudioTrack createAudioTrackOnLollipopOrHigher( bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE); } + // Creates and AudioTrack instance using AudioAttributes and AudioFormat as input. + // Use the low-latency mode to improve audio latency. Note that the low-latency mode may + // prevent effects (such as AEC) from working. Assuming AEC is working, the delay changes + // that happen in low-latency mode during the call will cause the AEC to perform worse. + // The behavior of the low-latency mode may be device dependent, use at your own risk. + @TargetApi(Build.VERSION_CODES.O) + private static AudioTrack createAudioTrackOnOreoOrHigher(int sampleRateInHz, int channelConfig, + int bufferSizeInBytes, @Nullable AudioAttributes overrideAttributes) { + Logging.d(TAG, "createAudioTrackOnOreoOrHigher"); + logNativeOutputSampleRate(sampleRateInHz); + + // Create an audio track where the audio usage is for VoIP and the content type is speech. + return new AudioTrack.Builder() + .setAudioAttributes(getAudioAttributes(overrideAttributes)) + .setAudioFormat(new AudioFormat.Builder() + .setEncoding(AudioFormat.ENCODING_PCM_16BIT) + .setSampleRate(sampleRateInHz) + .setChannelMask(channelConfig) + .build()) + .setBufferSizeInBytes(bufferSizeInBytes) + .setPerformanceMode(AudioTrack.PERFORMANCE_MODE_LOW_LATENCY) + .setTransferMode(AudioTrack.MODE_STREAM) + .setSessionId(AudioManager.AUDIO_SESSION_ID_GENERATE) + .build(); + } + + @TargetApi(Build.VERSION_CODES.Q) + private static AudioAttributes.Builder applyAttributesOnQOrHigher( + AudioAttributes.Builder builder, AudioAttributes overrideAttributes) { + return builder.setAllowedCapturePolicy(overrideAttributes.getAllowedCapturePolicy()); + } + @SuppressWarnings("deprecation") // Deprecated in API level 25. private static AudioTrack createAudioTrackOnLowerThanLollipop( int sampleRateInHz, int channelConfig, int bufferSizeInBytes) { @@ -423,6 +505,19 @@ private void logBufferSizeInFrames() { } } + @CalledByNative + private int getBufferSizeInFrames() { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { + return audioTrack.getBufferSizeInFrames(); + } + return -1; + } + + @CalledByNative + private int getInitialBufferSizeInFrames() { + return initialBufferSizeInFrames; + } + private void logBufferCapacityInFrames() { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) { Logging.d(TAG, diff --git a/sdk/android/src/jni/OWNERS b/sdk/android/src/jni/OWNERS index 4f2f242102..557373424b 100644 --- a/sdk/android/src/jni/OWNERS +++ b/sdk/android/src/jni/OWNERS @@ -1,4 +1,4 @@ -per-file androidhistogram.cc=sakal@webrtc.org -per-file androidmetrics.cc=sakal@webrtc.org -per-file androidvideotracksource.*=sakal@webrtc.org -per-file androidvideotracksource.cc=sakal@webrtc.org +per-file androidhistogram.cc=xalep@webrtc.org +per-file androidmetrics.cc=xalep@webrtc.org +per-file androidvideotracksource.*=xalep@webrtc.org +per-file androidvideotracksource.cc=xalep@webrtc.org diff --git a/sdk/android/src/jni/android_media_codec_common.h b/sdk/android/src/jni/android_media_codec_common.h deleted file mode 100644 index be2eb19ba6..0000000000 --- a/sdk/android/src/jni/android_media_codec_common.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef SDK_ANDROID_SRC_JNI_ANDROID_MEDIA_CODEC_COMMON_H_ -#define SDK_ANDROID_SRC_JNI_ANDROID_MEDIA_CODEC_COMMON_H_ - -#include - -#include "rtc_base/logging.h" -#include "rtc_base/thread.h" -#include "sdk/android/src/jni/jni_helpers.h" - -namespace webrtc { -namespace jni { - -// Uncomment this define to enable verbose logging for every encoded/decoded -// video frame. -//#define TRACK_BUFFER_TIMING - -#define TAG_COMMON "MediaCodecVideo" - -// Color formats supported by encoder or decoder - should include all -// colors from supportedColorList in MediaCodecVideoEncoder.java and -// MediaCodecVideoDecoder.java. Supported color format set in encoder -// and decoder could be different. -enum COLOR_FORMATTYPE { - COLOR_FormatYUV420Planar = 0x13, - COLOR_FormatYUV420SemiPlanar = 0x15, - COLOR_QCOM_FormatYUV420SemiPlanar = 0x7FA30C00, - // NV12 color format supported by QCOM codec, but not declared in MediaCodec - - // see /hardware/qcom/media/mm-core/inc/OMX_QCOMExtns.h - // This format is presumably similar to COLOR_FormatYUV420SemiPlanar, - // but requires some (16, 32?) byte alignment. - COLOR_QCOM_FORMATYVU420PackedSemiPlanar32m4ka = 0x7FA30C01, - COLOR_QCOM_FORMATYVU420PackedSemiPlanar16m4ka = 0x7FA30C02, - COLOR_QCOM_FORMATYVU420PackedSemiPlanar64x32Tile2m8ka = 0x7FA30C03, - COLOR_QCOM_FORMATYUV420PackedSemiPlanar32m = 0x7FA30C04 -}; - -// Arbitrary interval to poll the codec for new outputs. -enum { kMediaCodecPollMs = 10 }; -// Arbitrary interval to poll at when there should be no more frames. -enum { kMediaCodecPollNoFramesMs = 100 }; -// Media codec maximum output buffer ready timeout. -enum { kMediaCodecTimeoutMs = 1000 }; -// Interval to print codec statistics (bitrate, fps, encoding/decoding time). -enum { kMediaCodecStatisticsIntervalMs = 3000 }; -// Maximum amount of pending frames for VP8 decoder. -enum { kMaxPendingFramesVp8 = 1 }; -// Maximum amount of pending frames for VP9 decoder. -enum { kMaxPendingFramesVp9 = 1 }; -// Maximum amount of pending frames for H.264 decoder. -enum { kMaxPendingFramesH264 = 4 }; -// Maximum amount of decoded frames for which per-frame logging is enabled. -enum { kMaxDecodedLogFrames = 10 }; -// Maximum amount of encoded frames for which per-frame logging is enabled. -enum { kMaxEncodedLogFrames = 10 }; - -static inline void AllowBlockingCalls() { - rtc::Thread* current_thread = rtc::Thread::Current(); - if (current_thread != NULL) - current_thread->DEPRECATED_AllowBlockingCalls(); -} - -// Checks for any Java exception, prints stack backtrace and clears -// currently thrown exception. -static inline bool CheckException(JNIEnv* jni) { - if (jni->ExceptionCheck()) { - RTC_LOG_TAG(rtc::LS_ERROR, TAG_COMMON) << "Java JNI exception."; - jni->ExceptionDescribe(); - jni->ExceptionClear(); - return true; - } - return false; -} - -} // namespace jni -} // namespace webrtc - -#endif // SDK_ANDROID_SRC_JNI_ANDROID_MEDIA_CODEC_COMMON_H_ diff --git a/sdk/android/src/jni/android_network_monitor.cc b/sdk/android/src/jni/android_network_monitor.cc index 69e89564e3..686f94e1e6 100644 --- a/sdk/android/src/jni/android_network_monitor.cc +++ b/sdk/android/src/jni/android_network_monitor.cc @@ -16,12 +16,13 @@ #define RTLD_NOLOAD 4 #endif -#include "rtc_base/bind.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" #include "rtc_base/ip_address.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" -#include "sdk/android/generated_base_jni/NetworkMonitorAutoDetect_jni.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "sdk/android/generated_base_jni/NetworkChangeDetector_jni.h" #include "sdk/android/generated_base_jni/NetworkMonitor_jni.h" #include "sdk/android/native_api/jni/java_types.h" #include "sdk/android/src/jni/jni_helpers.h" @@ -30,6 +31,37 @@ namespace webrtc { namespace jni { +namespace { + +const char* NetworkTypeToString(NetworkType type) { + switch (type) { + case NETWORK_UNKNOWN: + return "UNKNOWN"; + case NETWORK_ETHERNET: + return "ETHERNET"; + case NETWORK_WIFI: + return "WIFI"; + case NETWORK_5G: + return "5G"; + case NETWORK_4G: + return "4G"; + case NETWORK_3G: + return "3G"; + case NETWORK_2G: + return "2G"; + case NETWORK_UNKNOWN_CELLULAR: + return "UNKNOWN_CELLULAR"; + case NETWORK_BLUETOOTH: + return "BLUETOOTH"; + case NETWORK_VPN: + return "VPN"; + case NETWORK_NONE: + return "NONE"; + } +} + +} // namespace + enum AndroidSdkVersion { SDK_VERSION_LOLLIPOP = 21, SDK_VERSION_MARSHMALLOW = 23 @@ -196,12 +228,15 @@ AndroidNetworkMonitor::AndroidNetworkMonitor( const JavaRef& j_application_context) : android_sdk_int_(Java_NetworkMonitor_androidSdkInt(env)), j_application_context_(env, j_application_context), - j_network_monitor_(env, Java_NetworkMonitor_getInstance(env)) {} + j_network_monitor_(env, Java_NetworkMonitor_getInstance(env)), + network_thread_(rtc::Thread::Current()) {} -AndroidNetworkMonitor::~AndroidNetworkMonitor() = default; +AndroidNetworkMonitor::~AndroidNetworkMonitor() { + RTC_DCHECK(!started_); +} void AndroidNetworkMonitor::Start() { - RTC_CHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); if (started_) { return; } @@ -211,11 +246,13 @@ void AndroidNetworkMonitor::Start() { find_network_handle_without_ipv6_temporary_part_ = webrtc::field_trial::IsEnabled( "WebRTC-FindNetworkHandleWithoutIpv6TemporaryPart"); + bind_using_ifname_ = + !webrtc::field_trial::IsDisabled("WebRTC-BindUsingInterfaceName"); - // This is kind of magic behavior, but doing this allows the SocketServer to - // use this as a NetworkBinder to bind sockets on a particular network when - // it creates sockets. - worker_thread()->socketserver()->set_network_binder(this); + // This pointer is also accessed by the methods called from java threads. + // Assigning it here is safe, because the java monitor is in a stopped state, + // and will not make any callbacks. + safety_flag_ = PendingTaskSafetyFlag::Create(); JNIEnv* env = AttachCurrentThreadIfNeeded(); Java_NetworkMonitor_startMonitoring( @@ -223,18 +260,16 @@ void AndroidNetworkMonitor::Start() { } void AndroidNetworkMonitor::Stop() { - RTC_CHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); if (!started_) { return; } started_ = false; find_network_handle_without_ipv6_temporary_part_ = false; - // Once the network monitor stops, it will clear all network information and - // it won't find the network handle to bind anyway. - if (worker_thread()->socketserver()->network_binder() == this) { - worker_thread()->socketserver()->set_network_binder(nullptr); - } + // Cancel any pending tasks. We should not call SignalNetworksChanged when the + // monitor is stopped. + safety_flag_->SetNotAlive(); JNIEnv* env = AttachCurrentThreadIfNeeded(); Java_NetworkMonitor_stopMonitoring(env, j_network_monitor_, @@ -248,8 +283,9 @@ void AndroidNetworkMonitor::Stop() { // https://cs.chromium.org/chromium/src/net/udp/udp_socket_posix.cc rtc::NetworkBindingResult AndroidNetworkMonitor::BindSocketToNetwork( int socket_fd, - const rtc::IPAddress& address) { - RTC_CHECK(thread_checker_.IsCurrent()); + const rtc::IPAddress& address, + const std::string& if_name) { + RTC_DCHECK_RUN_ON(network_thread_); // Android prior to Lollipop didn't have support for binding sockets to // networks. This may also occur if there is no connectivity manager @@ -266,12 +302,18 @@ rtc::NetworkBindingResult AndroidNetworkMonitor::BindSocketToNetwork( } absl::optional network_handle = - FindNetworkHandleFromAddress(address); + FindNetworkHandleFromAddressOrName(address, if_name); if (!network_handle) { + RTC_LOG(LS_WARNING) + << "BindSocketToNetwork unable to find network handle for" + << " addr: " << address.ToSensitiveString() << " ifname: " << if_name; return rtc::NetworkBindingResult::ADDRESS_NOT_FOUND; } if (*network_handle == 0 /* NETWORK_UNSPECIFIED */) { + RTC_LOG(LS_WARNING) << "BindSocketToNetwork 0 network handle for" + << " addr: " << address.ToSensitiveString() + << " ifname: " << if_name; return rtc::NetworkBindingResult::NOT_IMPLEMENTED; } @@ -338,25 +380,25 @@ rtc::NetworkBindingResult AndroidNetworkMonitor::BindSocketToNetwork( // ERR_NETWORK_CHANGED, rather than MapSystemError(ENONET) which gives back // the less descriptive ERR_FAILED. if (rv == 0) { + RTC_LOG(LS_VERBOSE) << "BindSocketToNetwork bound network handle for" + << " addr: " << address.ToSensitiveString() + << " ifname: " << if_name; return rtc::NetworkBindingResult::SUCCESS; } + + RTC_LOG(LS_WARNING) << "BindSocketToNetwork got error: " << rv + << " addr: " << address.ToSensitiveString() + << " ifname: " << if_name; if (rv == ENONET) { return rtc::NetworkBindingResult::NETWORK_CHANGED; } - return rtc::NetworkBindingResult::FAILURE; -} -void AndroidNetworkMonitor::OnNetworkConnected( - const NetworkInformation& network_info) { - worker_thread()->Invoke( - RTC_FROM_HERE, rtc::Bind(&AndroidNetworkMonitor::OnNetworkConnected_w, - this, network_info)); - // Fire SignalNetworksChanged to update the list of networks. - OnNetworksChanged(); + return rtc::NetworkBindingResult::FAILURE; } -void AndroidNetworkMonitor::OnNetworkConnected_w( +void AndroidNetworkMonitor::OnNetworkConnected_n( const NetworkInformation& network_info) { + RTC_DCHECK_RUN_ON(network_thread_); RTC_LOG(LS_INFO) << "Network connected: " << network_info.ToString(); adapter_type_by_name_[network_info.interface_name] = AdapterTypeFromNetworkType(network_info.type, surface_cellular_types_); @@ -369,11 +411,14 @@ void AndroidNetworkMonitor::OnNetworkConnected_w( for (const rtc::IPAddress& address : network_info.ip_addresses) { network_handle_by_address_[address] = network_info.handle; } + SignalNetworksChanged(); } absl::optional -AndroidNetworkMonitor::FindNetworkHandleFromAddress( - const rtc::IPAddress& ip_address) const { +AndroidNetworkMonitor::FindNetworkHandleFromAddressOrName( + const rtc::IPAddress& ip_address, + const std::string& if_name) const { + RTC_DCHECK_RUN_ON(network_thread_); RTC_LOG(LS_INFO) << "Find network handle."; if (find_network_handle_without_ipv6_temporary_part_) { for (auto const& iter : network_info_by_handle_) { @@ -386,24 +431,36 @@ AndroidNetworkMonitor::FindNetworkHandleFromAddress( return absl::make_optional(iter.first); } } - return absl::nullopt; } else { auto iter = network_handle_by_address_.find(ip_address); - if (iter == network_handle_by_address_.end()) { - return absl::nullopt; + if (iter != network_handle_by_address_.end()) { + return absl::make_optional(iter->second); } - return absl::make_optional(iter->second); } + + return FindNetworkHandleFromIfname(if_name); } -void AndroidNetworkMonitor::OnNetworkDisconnected(NetworkHandle handle) { - RTC_LOG(LS_INFO) << "Network disconnected for handle " << handle; - worker_thread()->Invoke( - RTC_FROM_HERE, - rtc::Bind(&AndroidNetworkMonitor::OnNetworkDisconnected_w, this, handle)); +absl::optional +AndroidNetworkMonitor::FindNetworkHandleFromIfname( + const std::string& if_name) const { + RTC_DCHECK_RUN_ON(network_thread_); + if (bind_using_ifname_) { + for (auto const& iter : network_info_by_handle_) { + if (if_name.find(iter.second.interface_name) != std::string::npos) { + // Use partial match so that e.g if_name="v4-wlan0" is matched + // agains iter.first="wlan0" + return absl::make_optional(iter.first); + } + } + } + + return absl::nullopt; } -void AndroidNetworkMonitor::OnNetworkDisconnected_w(NetworkHandle handle) { +void AndroidNetworkMonitor::OnNetworkDisconnected_n(NetworkHandle handle) { + RTC_DCHECK_RUN_ON(network_thread_); + RTC_LOG(LS_INFO) << "Network disconnected for handle " << handle; auto iter = network_info_by_handle_.find(handle); if (iter != network_info_by_handle_.end()) { for (const rtc::IPAddress& address : iter->second.ip_addresses) { @@ -413,24 +470,49 @@ void AndroidNetworkMonitor::OnNetworkDisconnected_w(NetworkHandle handle) { } } +void AndroidNetworkMonitor::OnNetworkPreference_n( + NetworkType type, + rtc::NetworkPreference preference) { + RTC_DCHECK_RUN_ON(network_thread_); + RTC_LOG(LS_INFO) << "Android network monitor preference for " + << NetworkTypeToString(type) << " changed to " + << rtc::NetworkPreferenceToString(preference); + auto adapter_type = AdapterTypeFromNetworkType(type, surface_cellular_types_); + network_preference_by_adapter_type_[adapter_type] = preference; + SignalNetworksChanged(); +} + void AndroidNetworkMonitor::SetNetworkInfos( const std::vector& network_infos) { - RTC_CHECK(thread_checker_.IsCurrent()); + RTC_DCHECK_RUN_ON(network_thread_); network_handle_by_address_.clear(); network_info_by_handle_.clear(); RTC_LOG(LS_INFO) << "Android network monitor found " << network_infos.size() << " networks"; for (const NetworkInformation& network : network_infos) { - OnNetworkConnected_w(network); + OnNetworkConnected_n(network); } } rtc::AdapterType AndroidNetworkMonitor::GetAdapterType( const std::string& if_name) { + RTC_DCHECK_RUN_ON(network_thread_); auto iter = adapter_type_by_name_.find(if_name); rtc::AdapterType type = (iter == adapter_type_by_name_.end()) ? rtc::ADAPTER_TYPE_UNKNOWN : iter->second; + + if (type == rtc::ADAPTER_TYPE_UNKNOWN && bind_using_ifname_) { + for (auto const& iter : adapter_type_by_name_) { + // Use partial match so that e.g if_name="v4-wlan0" is matched + // agains iter.first="wlan0" + if (if_name.find(iter.first) != std::string::npos) { + type = iter.second; + break; + } + } + } + if (type == rtc::ADAPTER_TYPE_UNKNOWN) { RTC_LOG(LS_WARNING) << "Get an unknown type for the interface " << if_name; } @@ -439,13 +521,49 @@ rtc::AdapterType AndroidNetworkMonitor::GetAdapterType( rtc::AdapterType AndroidNetworkMonitor::GetVpnUnderlyingAdapterType( const std::string& if_name) { + RTC_DCHECK_RUN_ON(network_thread_); auto iter = vpn_underlying_adapter_type_by_name_.find(if_name); rtc::AdapterType type = (iter == vpn_underlying_adapter_type_by_name_.end()) ? rtc::ADAPTER_TYPE_UNKNOWN : iter->second; + if (type == rtc::ADAPTER_TYPE_UNKNOWN && bind_using_ifname_) { + // Use partial match so that e.g if_name="v4-wlan0" is matched + // agains iter.first="wlan0" + for (auto const& iter : vpn_underlying_adapter_type_by_name_) { + if (if_name.find(iter.first) != std::string::npos) { + type = iter.second; + break; + } + } + } + return type; } +rtc::NetworkPreference AndroidNetworkMonitor::GetNetworkPreference( + const std::string& if_name) { + RTC_DCHECK_RUN_ON(network_thread_); + auto iter = adapter_type_by_name_.find(if_name); + if (iter == adapter_type_by_name_.end()) { + return rtc::NetworkPreference::NEUTRAL; + } + + rtc::AdapterType adapter_type = iter->second; + if (adapter_type == rtc::ADAPTER_TYPE_VPN) { + auto iter2 = vpn_underlying_adapter_type_by_name_.find(if_name); + if (iter2 != vpn_underlying_adapter_type_by_name_.end()) { + adapter_type = iter2->second; + } + } + + auto preference_iter = network_preference_by_adapter_type_.find(adapter_type); + if (preference_iter == network_preference_by_adapter_type_.end()) { + return rtc::NetworkPreference::NEUTRAL; + } + + return preference_iter->second; +} + AndroidNetworkMonitorFactory::AndroidNetworkMonitorFactory() : j_application_context_(nullptr) {} @@ -465,7 +583,11 @@ AndroidNetworkMonitorFactory::CreateNetworkMonitor() { void AndroidNetworkMonitor::NotifyConnectionTypeChanged( JNIEnv* env, const JavaRef& j_caller) { - OnNetworksChanged(); + network_thread_->PostTask(ToQueuedTask(safety_flag_, [this] { + RTC_LOG(LS_INFO) + << "Android network monitor detected connection type change."; + SignalNetworksChanged(); + })); } void AndroidNetworkMonitor::NotifyOfActiveNetworkList( @@ -484,14 +606,33 @@ void AndroidNetworkMonitor::NotifyOfNetworkConnect( const JavaRef& j_network_info) { NetworkInformation network_info = GetNetworkInformationFromJava(env, j_network_info); - OnNetworkConnected(network_info); + network_thread_->PostTask(ToQueuedTask( + safety_flag_, [this, network_info = std::move(network_info)] { + OnNetworkConnected_n(network_info); + })); } void AndroidNetworkMonitor::NotifyOfNetworkDisconnect( JNIEnv* env, const JavaRef& j_caller, jlong network_handle) { - OnNetworkDisconnected(static_cast(network_handle)); + network_thread_->PostTask(ToQueuedTask(safety_flag_, [this, network_handle] { + OnNetworkDisconnected_n(static_cast(network_handle)); + })); +} + +void AndroidNetworkMonitor::NotifyOfNetworkPreference( + JNIEnv* env, + const JavaRef& j_caller, + const JavaRef& j_connection_type, + jint jpreference) { + NetworkType type = GetNetworkTypeFromJava(env, j_connection_type); + rtc::NetworkPreference preference = + static_cast(jpreference); + + network_thread_->PostTask(ToQueuedTask( + safety_flag_, + [this, type, preference] { OnNetworkPreference_n(type, preference); })); } } // namespace jni diff --git a/sdk/android/src/jni/android_network_monitor.h b/sdk/android/src/jni/android_network_monitor.h index 1d795df991..423ae3a66c 100644 --- a/sdk/android/src/jni/android_network_monitor.h +++ b/sdk/android/src/jni/android_network_monitor.h @@ -12,13 +12,17 @@ #define SDK_ANDROID_SRC_JNI_ANDROID_NETWORK_MONITOR_H_ #include + #include #include #include #include "absl/types/optional.h" #include "rtc_base/network_monitor.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/network_monitor_factory.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" #include "sdk/android/src/jni/jni_helpers.h" namespace webrtc { @@ -26,7 +30,7 @@ namespace jni { typedef int64_t NetworkHandle; -// c++ equivalent of java NetworkMonitorAutoDetect.ConnectionType. +// c++ equivalent of java NetworkChangeDetector.ConnectionType. enum NetworkType { NETWORK_UNKNOWN, NETWORK_ETHERNET, @@ -60,8 +64,7 @@ struct NetworkInformation { std::string ToString() const; }; -class AndroidNetworkMonitor : public rtc::NetworkMonitorBase, - public rtc::NetworkBinderInterface { +class AndroidNetworkMonitor : public rtc::NetworkMonitorInterface { public: AndroidNetworkMonitor(JNIEnv* env, const JavaRef& j_application_context); @@ -73,14 +76,20 @@ class AndroidNetworkMonitor : public rtc::NetworkMonitorBase, void Start() override; void Stop() override; + // Does |this| NetworkMonitorInterface implement BindSocketToNetwork? + // Only Android returns true. + virtual bool SupportsBindSocketToNetwork() const override { return true; } + rtc::NetworkBindingResult BindSocketToNetwork( int socket_fd, - const rtc::IPAddress& address) override; + const rtc::IPAddress& address, + const std::string& if_name) override; rtc::AdapterType GetAdapterType(const std::string& if_name) override; rtc::AdapterType GetVpnUnderlyingAdapterType( const std::string& if_name) override; - void OnNetworkConnected(const NetworkInformation& network_info); - void OnNetworkDisconnected(NetworkHandle network_handle); + rtc::NetworkPreference GetNetworkPreference( + const std::string& if_name) override; + // Always expected to be called on the network thread. void SetNetworkInfos(const std::vector& network_infos); @@ -95,26 +104,53 @@ class AndroidNetworkMonitor : public rtc::NetworkMonitorBase, void NotifyOfActiveNetworkList(JNIEnv* env, const JavaRef& j_caller, const JavaRef& j_network_infos); + void NotifyOfNetworkPreference(JNIEnv* env, + const JavaRef& j_caller, + const JavaRef& j_connection_type, + jint preference); // Visible for testing. - absl::optional FindNetworkHandleFromAddress( - const rtc::IPAddress& address) const; + absl::optional FindNetworkHandleFromAddressOrName( + const rtc::IPAddress& address, + const std::string& ifname) const; private: - void OnNetworkConnected_w(const NetworkInformation& network_info); - void OnNetworkDisconnected_w(NetworkHandle network_handle); + void OnNetworkConnected_n(const NetworkInformation& network_info); + void OnNetworkDisconnected_n(NetworkHandle network_handle); + void OnNetworkPreference_n(NetworkType type, + rtc::NetworkPreference preference); + + absl::optional FindNetworkHandleFromIfname( + const std::string& ifname) const; const int android_sdk_int_; ScopedJavaGlobalRef j_application_context_; ScopedJavaGlobalRef j_network_monitor_; - rtc::ThreadChecker thread_checker_; - bool started_ = false; - std::map adapter_type_by_name_; - std::map vpn_underlying_adapter_type_by_name_; - std::map network_handle_by_address_; - std::map network_info_by_handle_; - bool find_network_handle_without_ipv6_temporary_part_; - bool surface_cellular_types_; + rtc::Thread* const network_thread_; + bool started_ RTC_GUARDED_BY(network_thread_) = false; + std::map adapter_type_by_name_ + RTC_GUARDED_BY(network_thread_); + std::map vpn_underlying_adapter_type_by_name_ + RTC_GUARDED_BY(network_thread_); + std::map network_handle_by_address_ + RTC_GUARDED_BY(network_thread_); + std::map network_info_by_handle_ + RTC_GUARDED_BY(network_thread_); + std::map + network_preference_by_adapter_type_ RTC_GUARDED_BY(network_thread_); + bool find_network_handle_without_ipv6_temporary_part_ + RTC_GUARDED_BY(network_thread_) = false; + bool surface_cellular_types_ RTC_GUARDED_BY(network_thread_) = false; + + // NOTE: if bind_using_ifname_ is TRUE + // then the adapter name is used with substring matching as follows: + // An adapater name repored by android as 'wlan0' + // will be matched with 'v4-wlan0' ("v4-wlan0".find("wlan0") != npos). + // This applies to adapter_type_by_name_, vpn_underlying_adapter_type_by_name_ + // and FindNetworkHandleFromIfname. + bool bind_using_ifname_ RTC_GUARDED_BY(network_thread_) = true; + rtc::scoped_refptr safety_flag_ + RTC_PT_GUARDED_BY(network_thread_) = nullptr; }; class AndroidNetworkMonitorFactory : public rtc::NetworkMonitorFactory { diff --git a/sdk/android/src/jni/android_video_track_source.cc b/sdk/android/src/jni/android_video_track_source.cc index f8455c91fa..72cf3955f0 100644 --- a/sdk/android/src/jni/android_video_track_source.cc +++ b/sdk/android/src/jni/android_video_track_source.cc @@ -14,7 +14,6 @@ #include -#include "rtc_base/bind.h" #include "rtc_base/logging.h" namespace webrtc { @@ -68,12 +67,7 @@ void AndroidVideoTrackSource::SetState(JNIEnv* env, } else { // TODO(sakal): Is this even necessary, does FireOnChanged have to be // called from signaling thread? - signaling_thread_->PostTask( - RTC_FROM_HERE, - rtc::Bind( - &AndroidVideoTrackSource::FireOnChanged, - static_cast*>( - this))); + signaling_thread_->PostTask(RTC_FROM_HERE, [this] { FireOnChanged(); }); } } } diff --git a/sdk/android/src/jni/android_video_track_source.h b/sdk/android/src/jni/android_video_track_source.h index 378d380a11..eeac48f1e8 100644 --- a/sdk/android/src/jni/android_video_track_source.h +++ b/sdk/android/src/jni/android_video_track_source.h @@ -13,10 +13,8 @@ #include -#include "common_video/include/i420_buffer_pool.h" #include "common_video/libyuv/include/webrtc_libyuv.h" #include "media/base/adapted_video_track_source.h" -#include "rtc_base/async_invoker.h" #include "rtc_base/checks.h" #include "rtc_base/thread.h" #include "rtc_base/timestamp_aligner.h" diff --git a/sdk/android/src/jni/audio_device/aaudio_player.h b/sdk/android/src/jni/audio_device/aaudio_player.h index e6146d0d47..5f9a9eace9 100644 --- a/sdk/android/src/jni/audio_device/aaudio_player.h +++ b/sdk/android/src/jni/audio_device/aaudio_player.h @@ -12,15 +12,16 @@ #define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_PLAYER_H_ #include + #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/include/audio_device_defines.h" #include "rtc_base/message_handler.h" #include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/src/jni/audio_device/aaudio_wrapper.h" #include "sdk/android/src/jni/audio_device/audio_device_module.h" @@ -99,12 +100,12 @@ class AAudioPlayer final : public AudioOutput, // Ensures that methods are called from the same thread as this object is // created on. - rtc::ThreadChecker main_thread_checker_; + SequenceChecker main_thread_checker_; // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a // real-time thread owned by AAudio. Detached during construction of this // object. - rtc::ThreadChecker thread_checker_aaudio_; + SequenceChecker thread_checker_aaudio_; // The thread on which this object is created on. rtc::Thread* main_thread_; diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.cc b/sdk/android/src/jni/audio_device/aaudio_recorder.cc index 65bef4b1ba..34b14f4509 100644 --- a/sdk/android/src/jni/audio_device/aaudio_recorder.cc +++ b/sdk/android/src/jni/audio_device/aaudio_recorder.cc @@ -18,8 +18,6 @@ #include "rtc_base/logging.h" #include "rtc_base/time_utils.h" -#include "system_wrappers/include/sleep.h" - namespace webrtc { namespace jni { diff --git a/sdk/android/src/jni/audio_device/aaudio_recorder.h b/sdk/android/src/jni/audio_device/aaudio_recorder.h index 0ed0fa2d5c..2b6aa03127 100644 --- a/sdk/android/src/jni/audio_device/aaudio_recorder.h +++ b/sdk/android/src/jni/audio_device/aaudio_recorder.h @@ -12,13 +12,14 @@ #define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AAUDIO_RECORDER_H_ #include + #include +#include "api/sequence_checker.h" #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/include/audio_device_defines.h" #include "rtc_base/message_handler.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/src/jni/audio_device/aaudio_wrapper.h" #include "sdk/android/src/jni/audio_device/audio_device_module.h" @@ -90,12 +91,12 @@ class AAudioRecorder : public AudioInput, // Ensures that methods are called from the same thread as this object is // created on. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // Stores thread ID in first call to AAudioPlayer::OnDataCallback from a // real-time thread owned by AAudio. Detached during construction of this // object. - rtc::ThreadChecker thread_checker_aaudio_; + SequenceChecker thread_checker_aaudio_; // The thread on which this object is created on. rtc::Thread* main_thread_; diff --git a/sdk/android/src/jni/audio_device/aaudio_wrapper.h b/sdk/android/src/jni/audio_device/aaudio_wrapper.h index 1900ab988c..cbc78a0a25 100644 --- a/sdk/android/src/jni/audio_device/aaudio_wrapper.h +++ b/sdk/android/src/jni/audio_device/aaudio_wrapper.h @@ -13,8 +13,8 @@ #include +#include "api/sequence_checker.h" #include "modules/audio_device/include/audio_device_defines.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -113,8 +113,8 @@ class AAudioWrapper { bool VerifyStreamConfiguration(); bool OptimizeBuffers(); - rtc::ThreadChecker thread_checker_; - rtc::ThreadChecker aaudio_thread_checker_; + SequenceChecker thread_checker_; + SequenceChecker aaudio_thread_checker_; const AudioParameters audio_parameters_; const aaudio_direction_t direction_; AAudioObserverInterface* observer_ = nullptr; diff --git a/sdk/android/src/jni/audio_device/audio_device_module.cc b/sdk/android/src/jni/audio_device/audio_device_module.cc index b4cb184177..4c9c36b7ac 100644 --- a/sdk/android/src/jni/audio_device/audio_device_module.cc +++ b/sdk/android/src/jni/audio_device/audio_device_module.cc @@ -13,13 +13,13 @@ #include #include +#include "api/sequence_checker.h" #include "api/task_queue/default_task_queue_factory.h" #include "api/task_queue/task_queue_factory.h" #include "modules/audio_device/audio_device_buffer.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/ref_counted_object.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/generated_audio_device_module_base_jni/WebRtcAudioManager_jni.h" #include "system_wrappers/include/metrics.h" @@ -70,26 +70,26 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { initialized_(false) { RTC_CHECK(input_); RTC_CHECK(output_); - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; thread_checker_.Detach(); } - ~AndroidAudioDeviceModule() override { RTC_LOG(INFO) << __FUNCTION__; } + ~AndroidAudioDeviceModule() override { RTC_DLOG(INFO) << __FUNCTION__; } int32_t ActiveAudioLayer( AudioDeviceModule::AudioLayer* audioLayer) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *audioLayer = audio_layer_; return 0; } int32_t RegisterAudioCallback(AudioTransport* audioCallback) override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return audio_device_buffer_->RegisterAudioCallback(audioCallback); } int32_t Init() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; RTC_DCHECK(thread_checker_.IsCurrent()); audio_device_buffer_ = std::make_unique(task_queue_factory_.get()); @@ -118,7 +118,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t Terminate() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return 0; RTC_DCHECK(thread_checker_.IsCurrent()); @@ -132,123 +132,119 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } bool Initialized() const override { - RTC_LOG(INFO) << __FUNCTION__ << ":" << initialized_; + RTC_DLOG(INFO) << __FUNCTION__ << ":" << initialized_; return initialized_; } int16_t PlayoutDevices() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; RTC_LOG(INFO) << "output: " << 1; return 1; } int16_t RecordingDevices() override { - RTC_LOG(INFO) << __FUNCTION__; - RTC_LOG(INFO) << "output: " << 1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << "output: " << 1; return 1; } int32_t PlayoutDeviceName(uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) override { - FATAL() << "Should never be called"; - return -1; + RTC_CHECK_NOTREACHED(); } int32_t RecordingDeviceName(uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) override { - FATAL() << "Should never be called"; - return -1; + RTC_CHECK_NOTREACHED(); } int32_t SetPlayoutDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; return 0; } int32_t SetPlayoutDevice( AudioDeviceModule::WindowsDeviceType device) override { - FATAL() << "Should never be called"; - return -1; + RTC_CHECK_NOTREACHED(); } int32_t SetRecordingDevice(uint16_t index) override { // OK to use but it has no effect currently since device selection is // done using Andoid APIs instead. - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; return 0; } int32_t SetRecordingDevice( AudioDeviceModule::WindowsDeviceType device) override { - FATAL() << "Should never be called"; - return -1; + RTC_CHECK_NOTREACHED(); } int32_t PlayoutIsAvailable(bool* available) override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *available = true; - RTC_LOG(INFO) << "output: " << *available; + RTC_DLOG(INFO) << "output: " << *available; return 0; } int32_t InitPlayout() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; if (PlayoutIsInitialized()) { return 0; } int32_t result = output_->InitPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", static_cast(result == 0)); return result; } bool PlayoutIsInitialized() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return output_->PlayoutIsInitialized(); } int32_t RecordingIsAvailable(bool* available) override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *available = true; - RTC_LOG(INFO) << "output: " << *available; + RTC_DLOG(INFO) << "output: " << *available; return 0; } int32_t InitRecording() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; if (RecordingIsInitialized()) { return 0; } int32_t result = input_->InitRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", static_cast(result == 0)); return result; } bool RecordingIsInitialized() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return input_->RecordingIsInitialized(); } int32_t StartPlayout() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; if (Playing()) { return 0; } int32_t result = output_->StartPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", static_cast(result == 0)); if (result == 0) { @@ -260,7 +256,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StopPlayout() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; if (!Playing()) @@ -268,26 +264,26 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { RTC_LOG(INFO) << __FUNCTION__; audio_device_buffer_->StopPlayout(); int32_t result = output_->StopPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", static_cast(result == 0)); return result; } bool Playing() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return output_->Playing(); } int32_t StartRecording() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; if (Recording()) { return 0; } int32_t result = input_->StartRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", static_cast(result == 0)); if (result == 0) { @@ -299,74 +295,74 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StopRecording() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; if (!Recording()) return 0; audio_device_buffer_->StopRecording(); int32_t result = input_->StopRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", static_cast(result == 0)); return result; } bool Recording() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return input_->Recording(); } int32_t InitSpeaker() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return initialized_ ? 0 : -1; } bool SpeakerIsInitialized() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return initialized_; } int32_t InitMicrophone() override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return initialized_ ? 0 : -1; } bool MicrophoneIsInitialized() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return initialized_; } int32_t SpeakerVolumeIsAvailable(bool* available) override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; *available = output_->SpeakerVolumeIsAvailable(); - RTC_LOG(INFO) << "output: " << *available; + RTC_DLOG(INFO) << "output: " << *available; return 0; } int32_t SetSpeakerVolume(uint32_t volume) override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; return output_->SetSpeakerVolume(volume); } int32_t SpeakerVolume(uint32_t* output_volume) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; absl::optional volume = output_->SpeakerVolume(); if (!volume) return -1; *output_volume = *volume; - RTC_LOG(INFO) << "output: " << *volume; + RTC_DLOG(INFO) << "output: " << *volume; return 0; } int32_t MaxSpeakerVolume(uint32_t* output_max_volume) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; absl::optional max_volume = output_->MaxSpeakerVolume(); @@ -377,7 +373,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t MinSpeakerVolume(uint32_t* output_min_volume) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return -1; absl::optional min_volume = output_->MinSpeakerVolume(); @@ -388,81 +384,71 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t MicrophoneVolumeIsAvailable(bool* available) override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *available = false; - RTC_LOG(INFO) << "output: " << *available; + RTC_DLOG(INFO) << "output: " << *available; return -1; } int32_t SetMicrophoneVolume(uint32_t volume) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_CHECK_NOTREACHED(); } int32_t MicrophoneVolume(uint32_t* volume) const override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t SpeakerMuteIsAvailable(bool* available) override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t SetSpeakerMute(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_CHECK_NOTREACHED(); } int32_t SpeakerMute(bool* enabled) const override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Should never be called"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t MicrophoneMuteIsAvailable(bool* available) override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Not implemented"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t SetMicrophoneMute(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; - FATAL() << "Not implemented"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_CHECK_NOTREACHED(); } int32_t MicrophoneMute(bool* enabled) const override { - RTC_LOG(INFO) << __FUNCTION__; - FATAL() << "Not implemented"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_CHECK_NOTREACHED(); } int32_t StereoPlayoutIsAvailable(bool* available) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *available = is_stereo_playout_supported_; - RTC_LOG(INFO) << "output: " << *available; + RTC_DLOG(INFO) << "output: " << *available; return 0; } int32_t SetStereoPlayout(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; // Android does not support changes between mono and stero on the fly. The // use of stereo or mono is determined by the audio layer. It is allowed // to call this method if that same state is not modified. @@ -475,21 +461,21 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StereoPlayout(bool* enabled) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *enabled = is_stereo_playout_supported_; - RTC_LOG(INFO) << "output: " << *enabled; + RTC_DLOG(INFO) << "output: " << *enabled; return 0; } int32_t StereoRecordingIsAvailable(bool* available) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *available = is_stereo_record_supported_; - RTC_LOG(INFO) << "output: " << *available; + RTC_DLOG(INFO) << "output: " << *available; return 0; } int32_t SetStereoRecording(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; // Android does not support changes between mono and stero on the fly. The // use of stereo or mono is determined by the audio layer. It is allowed // to call this method if that same state is not modified. @@ -502,9 +488,9 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t StereoRecording(bool* enabled) const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; *enabled = is_stereo_record_supported_; - RTC_LOG(INFO) << "output: " << *enabled; + RTC_DLOG(INFO) << "output: " << *enabled; return 0; } @@ -516,7 +502,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } // Returns true if the device both supports built in AEC and the device - // is not blacklisted. + // is not blocklisted. // Currently, if OpenSL ES is used in both directions, this method will still // report the correct value and it has the correct effect. As an example: // a device supports built in AEC and this method returns true. Libjingle @@ -528,59 +514,58 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { // a "Not Implemented" log will be filed. This non-perfect state will remain // until I have added full support for audio effects based on OpenSL ES APIs. bool BuiltInAECIsAvailable() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return false; bool isAvailable = input_->IsAcousticEchoCancelerSupported(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return isAvailable; } // Not implemented for any input device on Android. bool BuiltInAGCIsAvailable() const override { - RTC_LOG(INFO) << __FUNCTION__; - RTC_LOG(INFO) << "output: " << false; + RTC_DLOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << "output: " << false; return false; } // Returns true if the device both supports built in NS and the device - // is not blacklisted. + // is not blocklisted. // TODO(henrika): add implementation for OpenSL ES based audio as well. // In addition, see comments for BuiltInAECIsAvailable(). bool BuiltInNSIsAvailable() const override { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return false; bool isAvailable = input_->IsNoiseSuppressorSupported(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return isAvailable; } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInAEC(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; if (!initialized_) return -1; RTC_CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available"; int32_t result = input_->EnableBuiltInAEC(enable); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; return result; } int32_t EnableBuiltInAGC(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; - FATAL() << "HW AGC is not available"; - return -1; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_CHECK_NOTREACHED(); } // TODO(henrika): add implementation for OpenSL ES based audio as well. int32_t EnableBuiltInNS(bool enable) override { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; if (!initialized_) return -1; RTC_CHECK(BuiltInNSIsAvailable()) << "HW NS is not available"; int32_t result = input_->EnableBuiltInNS(enable); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; return result; } @@ -591,14 +576,14 @@ class AndroidAudioDeviceModule : public AudioDeviceModule { } int32_t AttachAudioBuffer() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; output_->AttachAudioBuffer(audio_device_buffer_.get()); input_->AttachAudioBuffer(audio_device_buffer_.get()); return 0; } private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; const AudioDeviceModule::AudioLayer audio_layer_; const bool is_stereo_playout_supported_; @@ -655,8 +640,8 @@ rtc::scoped_refptr CreateAudioDeviceModuleFromInputAndOutput( uint16_t playout_delay_ms, std::unique_ptr audio_input, std::unique_ptr audio_output) { - RTC_LOG(INFO) << __FUNCTION__; - return new rtc::RefCountedObject( + RTC_DLOG(INFO) << __FUNCTION__; + return rtc::make_ref_counted( audio_layer, is_stereo_playout_supported, is_stereo_record_supported, playout_delay_ms, std::move(audio_input), std::move(audio_output)); } diff --git a/sdk/android/src/jni/audio_device/audio_record_jni.h b/sdk/android/src/jni/audio_device/audio_record_jni.h index 7578f83948..800d235432 100644 --- a/sdk/android/src/jni/audio_device/audio_record_jni.h +++ b/sdk/android/src/jni/audio_device/audio_record_jni.h @@ -12,11 +12,12 @@ #define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_RECORD_JNI_H_ #include + #include +#include "api/sequence_checker.h" #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/include/audio_device_defines.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/src/jni/audio_device/audio_device_module.h" namespace webrtc { @@ -93,11 +94,11 @@ class AudioRecordJni : public AudioInput { private: // Stores thread ID in constructor. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // Stores thread ID in first call to OnDataIsRecorded() from high-priority // thread in Java. Detached during construction of this object. - rtc::ThreadChecker thread_checker_java_; + SequenceChecker thread_checker_java_; // Wraps the Java specific parts of the AudioRecordJni class. JNIEnv* env_ = nullptr; diff --git a/sdk/android/src/jni/audio_device/audio_track_jni.cc b/sdk/android/src/jni/audio_device/audio_track_jni.cc index 8f0a041711..85adee2861 100644 --- a/sdk/android/src/jni/audio_device/audio_track_jni.cc +++ b/sdk/android/src/jni/audio_device/audio_track_jni.cc @@ -20,6 +20,7 @@ #include "sdk/android/generated_java_audio_device_module_native_jni/WebRtcAudioTrack_jni.h" #include "sdk/android/src/jni/jni_helpers.h" #include "system_wrappers/include/field_trial.h" +#include "system_wrappers/include/metrics.h" namespace webrtc { @@ -89,12 +90,33 @@ int32_t AudioTrackJni::InitPlayout() { nullptr); if (buffer_size_factor == 0) buffer_size_factor = 1.0; - if (!Java_WebRtcAudioTrack_initPlayout( - env_, j_audio_track_, audio_parameters_.sample_rate(), - static_cast(audio_parameters_.channels()), buffer_size_factor)) { + int requested_buffer_size_bytes = Java_WebRtcAudioTrack_initPlayout( + env_, j_audio_track_, audio_parameters_.sample_rate(), + static_cast(audio_parameters_.channels()), buffer_size_factor); + if (requested_buffer_size_bytes < 0) { RTC_LOG(LS_ERROR) << "InitPlayout failed"; return -1; } + // Update UMA histograms for both the requested and actual buffer size. + // To avoid division by zero, we assume the sample rate is 48k if an invalid + // value is found. + const int sample_rate = audio_parameters_.sample_rate() <= 0 + ? 48000 + : audio_parameters_.sample_rate(); + // This calculation assumes that audio is mono. + const int requested_buffer_size_ms = + (requested_buffer_size_bytes * 1000) / (2 * sample_rate); + RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeRequestedAudioBufferSizeMs", + requested_buffer_size_ms, 0, 1000, 100); + int actual_buffer_size_frames = + Java_WebRtcAudioTrack_getBufferSizeInFrames(env_, j_audio_track_); + if (actual_buffer_size_frames >= 0) { + const int actual_buffer_size_ms = + actual_buffer_size_frames * 1000 / sample_rate; + RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AndroidNativeAudioBufferSizeMs", + actual_buffer_size_ms, 0, 1000, 100); + } + initialized_ = true; return 0; } @@ -129,6 +151,18 @@ int32_t AudioTrackJni::StopPlayout() { if (!initialized_ || !playing_) { return 0; } + // Log the difference in initial and current buffer level. + const int current_buffer_size_frames = + Java_WebRtcAudioTrack_getBufferSizeInFrames(env_, j_audio_track_); + const int initial_buffer_size_frames = + Java_WebRtcAudioTrack_getInitialBufferSizeInFrames(env_, j_audio_track_); + const int sample_rate_hz = audio_parameters_.sample_rate(); + RTC_HISTOGRAM_COUNTS( + "WebRTC.Audio.AndroidNativeAudioBufferSizeDifferenceFromInitialMs", + (current_buffer_size_frames - initial_buffer_size_frames) * 1000 / + sample_rate_hz, + -500, 100, 100); + if (!Java_WebRtcAudioTrack_stopPlayout(env_, j_audio_track_)) { RTC_LOG(LS_ERROR) << "StopPlayout failed"; return -1; diff --git a/sdk/android/src/jni/audio_device/audio_track_jni.h b/sdk/android/src/jni/audio_device/audio_track_jni.h index c7d060033f..cc4d8f53a0 100644 --- a/sdk/android/src/jni/audio_device/audio_track_jni.h +++ b/sdk/android/src/jni/audio_device/audio_track_jni.h @@ -12,12 +12,13 @@ #define SDK_ANDROID_SRC_JNI_AUDIO_DEVICE_AUDIO_TRACK_JNI_H_ #include + #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/include/audio_device_defines.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/src/jni/audio_device/audio_common.h" #include "sdk/android/src/jni/audio_device/audio_device_module.h" @@ -84,11 +85,11 @@ class AudioTrackJni : public AudioOutput { private: // Stores thread ID in constructor. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // Stores thread ID in first call to OnGetPlayoutData() from high-priority // thread in Java. Detached during construction of this object. - rtc::ThreadChecker thread_checker_java_; + SequenceChecker thread_checker_java_; // Wraps the Java specific parts of the AudioTrackJni class. JNIEnv* env_ = nullptr; diff --git a/sdk/android/src/jni/audio_device/opensles_common.cc b/sdk/android/src/jni/audio_device/opensles_common.cc index 04c3ae9f7a..0f35b2712a 100644 --- a/sdk/android/src/jni/audio_device/opensles_common.cc +++ b/sdk/android/src/jni/audio_device/opensles_common.cc @@ -106,8 +106,6 @@ OpenSLEngineManager::OpenSLEngineManager() { thread_checker_.Detach(); } -OpenSLEngineManager::~OpenSLEngineManager() = default; - SLObjectItf OpenSLEngineManager::GetOpenSLEngine() { RTC_LOG(INFO) << "GetOpenSLEngine"; RTC_DCHECK(thread_checker_.IsCurrent()); diff --git a/sdk/android/src/jni/audio_device/opensles_common.h b/sdk/android/src/jni/audio_device/opensles_common.h index 605ddfc0eb..9dd1e0f7d7 100644 --- a/sdk/android/src/jni/audio_device/opensles_common.h +++ b/sdk/android/src/jni/audio_device/opensles_common.h @@ -15,9 +15,9 @@ #include #include "api/ref_counted_base.h" +#include "api/sequence_checker.h" #include "rtc_base/checks.h" #include "rtc_base/logging.h" -#include "rtc_base/thread_checker.h" namespace webrtc { @@ -68,14 +68,15 @@ typedef ScopedSLObject ScopedSLObjectItf; // Subsequent calls returns the already created engine. // Note: This class must be used single threaded and this is enforced by a // thread checker. -class OpenSLEngineManager : public rtc::RefCountedBase { +class OpenSLEngineManager + : public rtc::RefCountedNonVirtual { public: OpenSLEngineManager(); - ~OpenSLEngineManager() override; + ~OpenSLEngineManager() = default; SLObjectItf GetOpenSLEngine(); private: - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // This object is the global entry point of the OpenSL ES API. // After creating the engine object, the application can obtain this object‘s // SLEngineItf interface. This interface contains creation methods for all diff --git a/sdk/android/src/jni/audio_device/opensles_player.h b/sdk/android/src/jni/audio_device/opensles_player.h index a2a49f986f..7388a9370c 100644 --- a/sdk/android/src/jni/audio_device/opensles_player.h +++ b/sdk/android/src/jni/audio_device/opensles_player.h @@ -16,12 +16,13 @@ #include #include + #include "absl/types/optional.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/fine_audio_buffer.h" #include "modules/audio_device/include/audio_device_defines.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/src/jni/audio_device/audio_common.h" #include "sdk/android/src/jni/audio_device/audio_device_module.h" #include "sdk/android/src/jni/audio_device/opensles_common.h" @@ -121,12 +122,12 @@ class OpenSLESPlayer : public AudioOutput { // Ensures that methods are called from the same thread as this object is // created on. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // Stores thread ID in first call to SimpleBufferQueueCallback() from internal // non-application thread which is not attached to the Dalvik JVM. // Detached during construction of this object. - rtc::ThreadChecker thread_checker_opensles_; + SequenceChecker thread_checker_opensles_; const AudioParameters audio_parameters_; diff --git a/sdk/android/src/jni/audio_device/opensles_recorder.h b/sdk/android/src/jni/audio_device/opensles_recorder.h index 4856fd0155..ff324f3135 100644 --- a/sdk/android/src/jni/audio_device/opensles_recorder.h +++ b/sdk/android/src/jni/audio_device/opensles_recorder.h @@ -18,10 +18,10 @@ #include #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/fine_audio_buffer.h" #include "modules/audio_device/include/audio_device_defines.h" -#include "rtc_base/thread_checker.h" #include "sdk/android/src/jni/audio_device/audio_common.h" #include "sdk/android/src/jni/audio_device/audio_device_module.h" #include "sdk/android/src/jni/audio_device/opensles_common.h" @@ -128,12 +128,12 @@ class OpenSLESRecorder : public AudioInput { // Ensures that methods are called from the same thread as this object is // created on. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // Stores thread ID in first call to SimpleBufferQueueCallback() from internal // non-application thread which is not attached to the Dalvik JVM. // Detached during construction of this object. - rtc::ThreadChecker thread_checker_opensles_; + SequenceChecker thread_checker_opensles_; const AudioParameters audio_parameters_; diff --git a/sdk/android/src/jni/av1_codec.cc b/sdk/android/src/jni/av1_codec.cc new file mode 100644 index 0000000000..02070f7901 --- /dev/null +++ b/sdk/android/src/jni/av1_codec.cc @@ -0,0 +1,39 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "modules/video_coding/codecs/av1/libaom_av1_decoder.h" +#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h" +#include "sdk/android/generated_libaom_av1_jni/LibaomAv1Decoder_jni.h" +#include "sdk/android/generated_libaom_av1_jni/LibaomAv1Encoder_jni.h" +#include "sdk/android/src/jni/jni_helpers.h" + +namespace webrtc { +namespace jni { + +static jlong JNI_LibaomAv1Encoder_CreateEncoder(JNIEnv* jni) { + return jlongFromPointer(webrtc::CreateLibaomAv1Encoder().release()); +} + +static jboolean JNI_LibaomAv1Encoder_IsSupported(JNIEnv* jni) { + return webrtc::kIsLibaomAv1EncoderSupported; +} + +static jlong JNI_LibaomAv1Decoder_CreateDecoder(JNIEnv* jni) { + return jlongFromPointer(webrtc::CreateLibaomAv1Decoder().release()); +} + +static jboolean JNI_LibaomAv1Decoder_IsSupported(JNIEnv* jni) { + return webrtc::kIsLibaomAv1DecoderSupported; +} + +} // namespace jni +} // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/id_generator.cc b/sdk/android/src/jni/egl_base_10_impl.cc similarity index 52% rename from test/pc/e2e/analyzer/video/id_generator.cc rename to sdk/android/src/jni/egl_base_10_impl.cc index f1ead37e2f..1bbc7031a0 100644 --- a/test/pc/e2e/analyzer/video/id_generator.cc +++ b/sdk/android/src/jni/egl_base_10_impl.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * Copyright 2021 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,17 +8,16 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "test/pc/e2e/analyzer/video/id_generator.h" +#include -namespace webrtc { -namespace webrtc_pc_e2e { +#include "sdk/android/generated_video_egl_jni/EglBase10Impl_jni.h" -IntIdGenerator::IntIdGenerator(int start_value) : next_id_(start_value) {} -IntIdGenerator::~IntIdGenerator() = default; +namespace webrtc { +namespace jni { -int IntIdGenerator::GetNextId() { - return next_id_++; +static jlong JNI_EglBase10Impl_GetCurrentNativeEGLContext(JNIEnv* jni) { + return reinterpret_cast(eglGetCurrentContext()); } -} // namespace webrtc_pc_e2e +} // namespace jni } // namespace webrtc diff --git a/sdk/android/src/jni/encoded_image.cc b/sdk/android/src/jni/encoded_image.cc index e13653ca34..189d7e95e4 100644 --- a/sdk/android/src/jni/encoded_image.cc +++ b/sdk/android/src/jni/encoded_image.cc @@ -70,7 +70,7 @@ ScopedJavaLocalRef NativeToJavaEncodedImage( static_cast(image._encodedWidth), static_cast(image._encodedHeight), image.capture_time_ms_ * rtc::kNumNanosecsPerMillisec, frame_type, - static_cast(image.rotation_), image._completeFrame, qp); + static_cast(image.rotation_), qp); } ScopedJavaLocalRef NativeToJavaFrameTypeArray( @@ -90,7 +90,7 @@ EncodedImage JavaToNativeEncodedImage(JNIEnv* env, const size_t buffer_size = env->GetDirectBufferCapacity(j_buffer.obj()); EncodedImage frame; - frame.SetEncodedData(new rtc::RefCountedObject( + frame.SetEncodedData(rtc::make_ref_counted( env, j_encoded_image, buffer, buffer_size)); frame._encodedWidth = Java_EncodedImage_getEncodedWidth(env, j_encoded_image); @@ -98,8 +98,6 @@ EncodedImage JavaToNativeEncodedImage(JNIEnv* env, Java_EncodedImage_getEncodedHeight(env, j_encoded_image); frame.rotation_ = (VideoRotation)Java_EncodedImage_getRotation(env, j_encoded_image); - frame._completeFrame = - Java_EncodedImage_getCompleteFrame(env, j_encoded_image); frame.qp_ = JavaToNativeOptionalInt( env, Java_EncodedImage_getQp(env, j_encoded_image)) diff --git a/sdk/android/src/jni/h264_utils.cc b/sdk/android/src/jni/h264_utils.cc index 02e3ae110d..882df95b82 100644 --- a/sdk/android/src/jni/h264_utils.cc +++ b/sdk/android/src/jni/h264_utils.cc @@ -8,10 +8,9 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include "sdk/android/src/jni/video_codec_info.h" - -#include "common_video/h264/profile_level_id.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "sdk/android/generated_video_jni/H264Utils_jni.h" +#include "sdk/android/src/jni/video_codec_info.h" namespace webrtc { namespace jni { @@ -20,8 +19,8 @@ static jboolean JNI_H264Utils_IsSameH264Profile( JNIEnv* env, const JavaParamRef& params1, const JavaParamRef& params2) { - return H264::IsSameH264Profile(JavaToNativeStringMap(env, params1), - JavaToNativeStringMap(env, params2)); + return H264IsSameProfile(JavaToNativeStringMap(env, params1), + JavaToNativeStringMap(env, params2)); } } // namespace jni diff --git a/sdk/android/src/jni/pc/add_ice_candidate_observer.cc b/sdk/android/src/jni/pc/add_ice_candidate_observer.cc new file mode 100644 index 0000000000..7f3dddbb28 --- /dev/null +++ b/sdk/android/src/jni/pc/add_ice_candidate_observer.cc @@ -0,0 +1,39 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "sdk/android/src/jni/pc/add_ice_candidate_observer.h" + +#include + +#include "sdk/android/generated_peerconnection_jni/AddIceObserver_jni.h" +#include "sdk/android/native_api/jni/java_types.h" +#include "sdk/android/src/jni/jni_helpers.h" +#include "sdk/media_constraints.h" + +namespace webrtc { +namespace jni { + +AddIceCandidateObserverJni::AddIceCandidateObserverJni( + JNIEnv* env, + const JavaRef& j_observer) + : j_observer_global_(env, j_observer) {} + +void AddIceCandidateObserverJni::OnComplete(webrtc::RTCError error) { + JNIEnv* env = AttachCurrentThreadIfNeeded(); + if (error.ok()) { + Java_AddIceObserver_onAddSuccess(env, j_observer_global_); + } else { + Java_AddIceObserver_onAddFailure(env, j_observer_global_, + NativeToJavaString(env, error.message())); + } +} + +} // namespace jni +} // namespace webrtc diff --git a/sdk/android/src/jni/pc/add_ice_candidate_observer.h b/sdk/android/src/jni/pc/add_ice_candidate_observer.h new file mode 100644 index 0000000000..1128385389 --- /dev/null +++ b/sdk/android/src/jni/pc/add_ice_candidate_observer.h @@ -0,0 +1,38 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_ANDROID_SRC_JNI_PC_ADD_ICE_CANDIDATE_OBSERVER_H_ +#define SDK_ANDROID_SRC_JNI_PC_ADD_ICE_CANDIDATE_OBSERVER_H_ + +#include +#include + +#include "api/peer_connection_interface.h" +#include "sdk/android/src/jni/jni_helpers.h" + +namespace webrtc { +namespace jni { + +class AddIceCandidateObserverJni final + : public rtc::RefCountedNonVirtual { + public: + AddIceCandidateObserverJni(JNIEnv* env, const JavaRef& j_observer); + ~AddIceCandidateObserverJni() = default; + + void OnComplete(RTCError error); + + private: + const ScopedJavaGlobalRef j_observer_global_; +}; + +} // namespace jni +} // namespace webrtc + +#endif // SDK_ANDROID_SRC_JNI_PC_ADD_ICE_CANDIDATE_OBSERVER_H_ diff --git a/sdk/android/src/jni/pc/audio.cc b/sdk/android/src/jni/pc/audio.cc index 376c42178a..74c8b5547a 100644 --- a/sdk/android/src/jni/pc/audio.cc +++ b/sdk/android/src/jni/pc/audio.cc @@ -10,21 +10,11 @@ #include "sdk/android/src/jni/pc/audio.h" -#include "api/audio_codecs/builtin_audio_decoder_factory.h" -#include "api/audio_codecs/builtin_audio_encoder_factory.h" #include "modules/audio_processing/include/audio_processing.h" namespace webrtc { namespace jni { -rtc::scoped_refptr CreateAudioDecoderFactory() { - return CreateBuiltinAudioDecoderFactory(); -} - -rtc::scoped_refptr CreateAudioEncoderFactory() { - return CreateBuiltinAudioEncoderFactory(); -} - rtc::scoped_refptr CreateAudioProcessing() { return AudioProcessingBuilder().Create(); } diff --git a/sdk/android/src/jni/pc/audio.h b/sdk/android/src/jni/pc/audio.h index 1e8b3accc2..7a79bed986 100644 --- a/sdk/android/src/jni/pc/audio.h +++ b/sdk/android/src/jni/pc/audio.h @@ -11,20 +11,14 @@ #ifndef SDK_ANDROID_SRC_JNI_PC_AUDIO_H_ #define SDK_ANDROID_SRC_JNI_PC_AUDIO_H_ +#include "api/scoped_refptr.h" // Adding 'nogncheck' to disable the gn include headers check. // We don't want this target depend on audio related targets -#include "api/audio_codecs/audio_decoder_factory.h" // nogncheck -#include "api/audio_codecs/audio_encoder_factory.h" // nogncheck -#include "api/scoped_refptr.h" #include "modules/audio_processing/include/audio_processing.h" // nogncheck namespace webrtc { namespace jni { -rtc::scoped_refptr CreateAudioDecoderFactory(); - -rtc::scoped_refptr CreateAudioEncoderFactory(); - rtc::scoped_refptr CreateAudioProcessing(); } // namespace jni diff --git a/sdk/android/src/jni/pc/owned_factory_and_threads.cc b/sdk/android/src/jni/pc/owned_factory_and_threads.cc index e42b117e57..5e00ece8ce 100644 --- a/sdk/android/src/jni/pc/owned_factory_and_threads.cc +++ b/sdk/android/src/jni/pc/owned_factory_and_threads.cc @@ -19,19 +19,11 @@ OwnedFactoryAndThreads::OwnedFactoryAndThreads( std::unique_ptr network_thread, std::unique_ptr worker_thread, std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory, const rtc::scoped_refptr& factory) : network_thread_(std::move(network_thread)), worker_thread_(std::move(worker_thread)), signaling_thread_(std::move(signaling_thread)), - network_monitor_factory_(network_monitor_factory), factory_(factory) {} -OwnedFactoryAndThreads::~OwnedFactoryAndThreads() { - if (network_monitor_factory_ != nullptr) { - rtc::NetworkMonitorFactory::ReleaseFactory(network_monitor_factory_); - } -} - } // namespace jni } // namespace webrtc diff --git a/sdk/android/src/jni/pc/owned_factory_and_threads.h b/sdk/android/src/jni/pc/owned_factory_and_threads.h index 845d4dbd70..e87879c13f 100644 --- a/sdk/android/src/jni/pc/owned_factory_and_threads.h +++ b/sdk/android/src/jni/pc/owned_factory_and_threads.h @@ -33,25 +33,19 @@ class OwnedFactoryAndThreads { std::unique_ptr network_thread, std::unique_ptr worker_thread, std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory, const rtc::scoped_refptr& factory); - ~OwnedFactoryAndThreads(); + ~OwnedFactoryAndThreads() = default; PeerConnectionFactoryInterface* factory() { return factory_.get(); } rtc::Thread* network_thread() { return network_thread_.get(); } rtc::Thread* signaling_thread() { return signaling_thread_.get(); } rtc::Thread* worker_thread() { return worker_thread_.get(); } - rtc::NetworkMonitorFactory* network_monitor_factory() { - return network_monitor_factory_; - } - void clear_network_monitor_factory() { network_monitor_factory_ = nullptr; } private: const std::unique_ptr network_thread_; const std::unique_ptr worker_thread_; const std::unique_ptr signaling_thread_; - rtc::NetworkMonitorFactory* network_monitor_factory_; const rtc::scoped_refptr factory_; }; diff --git a/sdk/android/src/jni/pc/peer_connection.cc b/sdk/android/src/jni/pc/peer_connection.cc index 0ae39fbf66..09b8f33edb 100644 --- a/sdk/android/src/jni/pc/peer_connection.cc +++ b/sdk/android/src/jni/pc/peer_connection.cc @@ -44,6 +44,7 @@ #include "sdk/android/generated_peerconnection_jni/PeerConnection_jni.h" #include "sdk/android/native_api/jni/java_types.h" #include "sdk/android/src/jni/jni_helpers.h" +#include "sdk/android/src/jni/pc/add_ice_candidate_observer.h" #include "sdk/android/src/jni/pc/crypto_options.h" #include "sdk/android/src/jni/pc/data_channel.h" #include "sdk/android/src/jni/pc/ice_candidate.h" @@ -128,7 +129,8 @@ ScopedJavaLocalRef NativeToJavaCandidatePairChange( env, NativeToJavaCandidate(env, selected_pair.local_candidate()), NativeToJavaCandidate(env, selected_pair.remote_candidate()), static_cast(event.last_data_received_ms), - NativeToJavaString(env, event.reason)); + NativeToJavaString(env, event.reason), + static_cast(event.estimated_disconnected_time_ms)); } } // namespace @@ -236,6 +238,12 @@ void JavaToNativeRTCConfiguration( j_rtc_config); rtc_config->stun_candidate_keepalive_interval = JavaToNativeOptionalInt(jni, j_stun_candidate_keepalive_interval); + ScopedJavaLocalRef j_stable_writable_connection_ping_interval_ms = + Java_RTCConfiguration_getStableWritableConnectionPingIntervalMs( + jni, j_rtc_config); + rtc_config->stable_writable_connection_ping_interval_ms = + JavaToNativeOptionalInt(jni, + j_stable_writable_connection_ping_interval_ms); rtc_config->disable_ipv6_on_wifi = Java_RTCConfiguration_getDisableIPv6OnWifi(jni, j_rtc_config); rtc_config->max_ipv6_networks = @@ -249,8 +257,6 @@ void JavaToNativeRTCConfiguration( Java_RTCConfiguration_getEnableDscp(jni, j_rtc_config); rtc_config->media_config.video.enable_cpu_adaptation = Java_RTCConfiguration_getEnableCpuOveruseDetection(jni, j_rtc_config); - rtc_config->enable_rtp_data_channel = - Java_RTCConfiguration_getEnableRtpDataChannel(jni, j_rtc_config); rtc_config->media_config.video.suspend_below_min_bitrate = Java_RTCConfiguration_getSuspendBelowMinBitrate(jni, j_rtc_config); rtc_config->screencast_min_bitrate = JavaToNativeOptionalInt( @@ -264,17 +270,17 @@ void JavaToNativeRTCConfiguration( rtc_config->sdp_semantics = JavaToNativeSdpSemantics(jni, j_sdp_semantics); rtc_config->active_reset_srtp_params = Java_RTCConfiguration_getActiveResetSrtpParams(jni, j_rtc_config); - rtc_config->use_media_transport = - Java_RTCConfiguration_getUseMediaTransport(jni, j_rtc_config); - rtc_config->use_media_transport_for_data_channels = - Java_RTCConfiguration_getUseMediaTransportForDataChannels(jni, - j_rtc_config); rtc_config->crypto_options = JavaToNativeOptionalCryptoOptions(jni, j_crypto_options); rtc_config->allow_codec_switching = JavaToNativeOptionalBool( jni, Java_RTCConfiguration_getAllowCodecSwitching(jni, j_rtc_config)); + rtc_config->offer_extmap_allow_mixed = + Java_RTCConfiguration_getOfferExtmapAllowMixed(jni, j_rtc_config); + rtc_config->enable_implicit_rollback = + Java_RTCConfiguration_getEnableImplicitRollback(jni, j_rtc_config); + ScopedJavaLocalRef j_turn_logging_id = Java_RTCConfiguration_getTurnLoggingId(jni, j_rtc_config); if (!IsNull(jni, j_turn_logging_id)) { @@ -404,6 +410,16 @@ void PeerConnectionObserverJni::OnAddTrack( NativeToJavaMediaStreamArray(env, streams)); } +void PeerConnectionObserverJni::OnRemoveTrack( + rtc::scoped_refptr receiver) { + JNIEnv* env = AttachCurrentThreadIfNeeded(); + ScopedJavaLocalRef j_rtp_receiver = + NativeToJavaRtpReceiver(env, receiver); + rtp_receivers_.emplace_back(env, j_rtp_receiver); + + Java_Observer_onRemoveTrack(env, j_observer_global_, j_rtp_receiver); +} + void PeerConnectionObserverJni::OnTrack( rtc::scoped_refptr transceiver) { JNIEnv* env = AttachCurrentThreadIfNeeded(); @@ -468,9 +484,7 @@ static jlong JNI_PeerConnection_CreatePeerConnectionObserver( return jlongFromPointer(new PeerConnectionObserverJni(jni, j_observer)); } -static void JNI_PeerConnection_FreeOwnedPeerConnection( - JNIEnv*, - jlong j_p) { +static void JNI_PeerConnection_FreeOwnedPeerConnection(JNIEnv*, jlong j_p) { delete reinterpret_cast(j_p); } @@ -483,17 +497,39 @@ static jlong JNI_PeerConnection_GetNativePeerConnection( static ScopedJavaLocalRef JNI_PeerConnection_GetLocalDescription( JNIEnv* jni, const JavaParamRef& j_pc) { - const SessionDescriptionInterface* sdp = - ExtractNativePC(jni, j_pc)->local_description(); - return sdp ? NativeToJavaSessionDescription(jni, sdp) : nullptr; + PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc); + // It's only safe to operate on SessionDescriptionInterface on the + // signaling thread, but |jni| may only be used on the current thread, so we + // must do this odd dance. + std::string sdp; + std::string type; + pc->signaling_thread()->Invoke(RTC_FROM_HERE, [pc, &sdp, &type] { + const SessionDescriptionInterface* desc = pc->local_description(); + if (desc) { + RTC_CHECK(desc->ToString(&sdp)) << "got so far: " << sdp; + type = desc->type(); + } + }); + return sdp.empty() ? nullptr : NativeToJavaSessionDescription(jni, sdp, type); } static ScopedJavaLocalRef JNI_PeerConnection_GetRemoteDescription( JNIEnv* jni, const JavaParamRef& j_pc) { - const SessionDescriptionInterface* sdp = - ExtractNativePC(jni, j_pc)->remote_description(); - return sdp ? NativeToJavaSessionDescription(jni, sdp) : nullptr; + PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc); + // It's only safe to operate on SessionDescriptionInterface on the + // signaling thread, but |jni| may only be used on the current thread, so we + // must do this odd dance. + std::string sdp; + std::string type; + pc->signaling_thread()->Invoke(RTC_FROM_HERE, [pc, &sdp, &type] { + const SessionDescriptionInterface* desc = pc->remote_description(); + if (desc) { + RTC_CHECK(desc->ToString(&sdp)) << "got so far: " << sdp; + type = desc->type(); + } + }); + return sdp.empty() ? nullptr : NativeToJavaSessionDescription(jni, sdp, type); } static ScopedJavaLocalRef JNI_PeerConnection_GetCertificate( @@ -512,10 +548,12 @@ static ScopedJavaLocalRef JNI_PeerConnection_CreateDataChannel( const JavaParamRef& j_label, const JavaParamRef& j_init) { DataChannelInit init = JavaToNativeDataChannelInit(jni, j_init); - rtc::scoped_refptr channel( - ExtractNativePC(jni, j_pc)->CreateDataChannel( - JavaToNativeString(jni, j_label), &init)); - return WrapNativeDataChannel(jni, channel); + auto result = ExtractNativePC(jni, j_pc)->CreateDataChannelOrError( + JavaToNativeString(jni, j_label), &init); + if (!result.ok()) { + return WrapNativeDataChannel(jni, nullptr); + } + return WrapNativeDataChannel(jni, result.MoveValue()); } static void JNI_PeerConnection_CreateOffer( @@ -525,9 +563,8 @@ static void JNI_PeerConnection_CreateOffer( const JavaParamRef& j_constraints) { std::unique_ptr constraints = JavaToNativeMediaConstraints(jni, j_constraints); - rtc::scoped_refptr observer( - new rtc::RefCountedObject(jni, j_observer, - std::move(constraints))); + auto observer = rtc::make_ref_counted( + jni, j_observer, std::move(constraints)); PeerConnectionInterface::RTCOfferAnswerOptions options; CopyConstraintsIntoOfferAnswerOptions(observer->constraints(), &options); ExtractNativePC(jni, j_pc)->CreateOffer(observer, options); @@ -540,23 +577,31 @@ static void JNI_PeerConnection_CreateAnswer( const JavaParamRef& j_constraints) { std::unique_ptr constraints = JavaToNativeMediaConstraints(jni, j_constraints); - rtc::scoped_refptr observer( - new rtc::RefCountedObject(jni, j_observer, - std::move(constraints))); + auto observer = rtc::make_ref_counted( + jni, j_observer, std::move(constraints)); PeerConnectionInterface::RTCOfferAnswerOptions options; CopyConstraintsIntoOfferAnswerOptions(observer->constraints(), &options); ExtractNativePC(jni, j_pc)->CreateAnswer(observer, options); } +static void JNI_PeerConnection_SetLocalDescriptionAutomatically( + JNIEnv* jni, + const JavaParamRef& j_pc, + const JavaParamRef& j_observer) { + auto observer = + rtc::make_ref_counted(jni, j_observer); + ExtractNativePC(jni, j_pc)->SetLocalDescription(observer); +} + static void JNI_PeerConnection_SetLocalDescription( JNIEnv* jni, const JavaParamRef& j_pc, const JavaParamRef& j_observer, const JavaParamRef& j_sdp) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject(jni, j_observer, nullptr)); + auto observer = + rtc::make_ref_counted(jni, j_observer); ExtractNativePC(jni, j_pc)->SetLocalDescription( - observer, JavaToNativeSessionDescription(jni, j_sdp).release()); + JavaToNativeSessionDescription(jni, j_sdp), observer); } static void JNI_PeerConnection_SetRemoteDescription( @@ -564,10 +609,15 @@ static void JNI_PeerConnection_SetRemoteDescription( const JavaParamRef& j_pc, const JavaParamRef& j_observer, const JavaParamRef& j_sdp) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject(jni, j_observer, nullptr)); + auto observer = + rtc::make_ref_counted(jni, j_observer); ExtractNativePC(jni, j_pc)->SetRemoteDescription( - observer, JavaToNativeSessionDescription(jni, j_sdp).release()); + JavaToNativeSessionDescription(jni, j_sdp), observer); +} + +static void JNI_PeerConnection_RestartIce(JNIEnv* jni, + const JavaParamRef& j_pc) { + ExtractNativePC(jni, j_pc)->RestartIce(); } static void JNI_PeerConnection_SetAudioPlayout( @@ -614,6 +664,25 @@ static jboolean JNI_PeerConnection_AddIceCandidate( return ExtractNativePC(jni, j_pc)->AddIceCandidate(candidate.get()); } +static void JNI_PeerConnection_AddIceCandidateWithObserver( + JNIEnv* jni, + const JavaParamRef& j_pc, + const JavaParamRef& j_sdp_mid, + jint j_sdp_mline_index, + const JavaParamRef& j_candidate_sdp, + const JavaParamRef& j_observer) { + std::string sdp_mid = JavaToNativeString(jni, j_sdp_mid); + std::string sdp = JavaToNativeString(jni, j_candidate_sdp); + std::unique_ptr candidate( + CreateIceCandidate(sdp_mid, j_sdp_mline_index, sdp, nullptr)); + + rtc::scoped_refptr observer( + new AddIceCandidateObserverJni(jni, j_observer)); + ExtractNativePC(jni, j_pc)->AddIceCandidate( + std::move(candidate), + [observer](RTCError error) { observer->OnComplete(error); }); +} + static jboolean JNI_PeerConnection_RemoveIceCandidates( JNIEnv* jni, const JavaParamRef& j_pc, @@ -740,8 +809,7 @@ static jboolean JNI_PeerConnection_OldGetStats( const JavaParamRef& j_pc, const JavaParamRef& j_observer, jlong native_track) { - rtc::scoped_refptr observer( - new rtc::RefCountedObject(jni, j_observer)); + auto observer = rtc::make_ref_counted(jni, j_observer); return ExtractNativePC(jni, j_pc)->GetStats( observer, reinterpret_cast(native_track), PeerConnectionInterface::kStatsOutputLevelStandard); @@ -751,9 +819,8 @@ static void JNI_PeerConnection_NewGetStats( JNIEnv* jni, const JavaParamRef& j_pc, const JavaParamRef& j_callback) { - rtc::scoped_refptr callback( - new rtc::RefCountedObject(jni, - j_callback)); + auto callback = + rtc::make_ref_counted(jni, j_callback); ExtractNativePC(jni, j_pc)->GetStats(callback); } @@ -763,9 +830,9 @@ static jboolean JNI_PeerConnection_SetBitrate( const JavaParamRef& j_min, const JavaParamRef& j_current, const JavaParamRef& j_max) { - PeerConnectionInterface::BitrateParameters params; + BitrateSettings params; params.min_bitrate_bps = JavaToNativeOptionalInt(jni, j_min); - params.current_bitrate_bps = JavaToNativeOptionalInt(jni, j_current); + params.start_bitrate_bps = JavaToNativeOptionalInt(jni, j_current); params.max_bitrate_bps = JavaToNativeOptionalInt(jni, j_max); return ExtractNativePC(jni, j_pc)->SetBitrate(params).ok(); } diff --git a/sdk/android/src/jni/pc/peer_connection.h b/sdk/android/src/jni/pc/peer_connection.h index a9e2af2a47..86d99f31c4 100644 --- a/sdk/android/src/jni/pc/peer_connection.h +++ b/sdk/android/src/jni/pc/peer_connection.h @@ -72,6 +72,8 @@ class PeerConnectionObserverJni : public PeerConnectionObserver { streams) override; void OnTrack( rtc::scoped_refptr transceiver) override; + void OnRemoveTrack( + rtc::scoped_refptr receiver) override; private: typedef std::map diff --git a/sdk/android/src/jni/pc/peer_connection_factory.cc b/sdk/android/src/jni/pc/peer_connection_factory.cc index 48dd6e41d8..53e715bd08 100644 --- a/sdk/android/src/jni/pc/peer_connection_factory.cc +++ b/sdk/android/src/jni/pc/peer_connection_factory.cc @@ -138,11 +138,10 @@ ScopedJavaLocalRef NativeToScopedJavaPeerConnectionFactory( rtc::scoped_refptr pcf, std::unique_ptr network_thread, std::unique_ptr worker_thread, - std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory) { + std::unique_ptr signaling_thread) { OwnedFactoryAndThreads* owned_factory = new OwnedFactoryAndThreads( std::move(network_thread), std::move(worker_thread), - std::move(signaling_thread), network_monitor_factory, pcf); + std::move(signaling_thread), pcf); ScopedJavaLocalRef j_pcf = Java_PeerConnectionFactory_Constructor( env, NativeToJavaPointer(owned_factory)); @@ -172,17 +171,15 @@ PeerConnectionFactoryInterface* PeerConnectionFactoryFromJava(jlong j_p) { // Set in PeerConnectionFactory_initializeAndroidGlobals(). static bool factory_static_initialized = false; - jobject NativeToJavaPeerConnectionFactory( JNIEnv* jni, rtc::scoped_refptr pcf, std::unique_ptr network_thread, std::unique_ptr worker_thread, - std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory) { + std::unique_ptr signaling_thread) { return NativeToScopedJavaPeerConnectionFactory( jni, pcf, std::move(network_thread), std::move(worker_thread), - std::move(signaling_thread), network_monitor_factory) + std::move(signaling_thread)) .Release(); } @@ -246,7 +243,7 @@ static void JNI_PeerConnectionFactory_ShutdownInternalTracer(JNIEnv* jni) { // Following parameters are optional: // |audio_device_module|, |jencoder_factory|, |jdecoder_factory|, -// |audio_processor|, |media_transport_factory|, |fec_controller_factory|, +// |audio_processor|, |fec_controller_factory|, // |network_state_predictor_factory|, |neteq_factory|. ScopedJavaLocalRef CreatePeerConnectionFactoryForJava( JNIEnv* jni, @@ -263,7 +260,6 @@ ScopedJavaLocalRef CreatePeerConnectionFactoryForJava( network_controller_factory, std::unique_ptr network_state_predictor_factory, - std::unique_ptr media_transport_factory, std::unique_ptr neteq_factory) { // talk/ assumes pretty widely that the current Thread is ThreadManager'd, but // ThreadManager only WrapCurrentThread()s the thread where it is first @@ -285,18 +281,9 @@ ScopedJavaLocalRef CreatePeerConnectionFactoryForJava( signaling_thread->SetName("signaling_thread", NULL); RTC_CHECK(signaling_thread->Start()) << "Failed to start thread"; - rtc::NetworkMonitorFactory* network_monitor_factory = nullptr; - const absl::optional options = JavaToNativePeerConnectionFactoryOptions(jni, joptions); - // Do not create network_monitor_factory only if the options are - // provided and disable_network_monitor therein is set to true. - if (!(options && options->disable_network_monitor)) { - network_monitor_factory = new AndroidNetworkMonitorFactory(); - rtc::NetworkMonitorFactory::SetFactory(network_monitor_factory); - } - PeerConnectionFactoryDependencies dependencies; dependencies.network_thread = network_thread.get(); dependencies.worker_thread = worker_thread.get(); @@ -310,8 +297,11 @@ ScopedJavaLocalRef CreatePeerConnectionFactoryForJava( std::move(network_controller_factory); dependencies.network_state_predictor_factory = std::move(network_state_predictor_factory); - dependencies.media_transport_factory = std::move(media_transport_factory); dependencies.neteq_factory = std::move(neteq_factory); + if (!(options && options->disable_network_monitor)) { + dependencies.network_monitor_factory = + std::make_unique(); + } cricket::MediaEngineDependencies media_dependencies; media_dependencies.task_queue_factory = dependencies.task_queue_factory.get(); @@ -338,7 +328,7 @@ ScopedJavaLocalRef CreatePeerConnectionFactoryForJava( return NativeToScopedJavaPeerConnectionFactory( jni, factory, std::move(network_thread), std::move(worker_thread), - std::move(signaling_thread), network_monitor_factory); + std::move(signaling_thread)); } static ScopedJavaLocalRef @@ -355,7 +345,6 @@ JNI_PeerConnectionFactory_CreatePeerConnectionFactory( jlong native_fec_controller_factory, jlong native_network_controller_factory, jlong native_network_state_predictor_factory, - jlong native_media_transport_factory, jlong native_neteq_factory) { rtc::scoped_refptr audio_processor = reinterpret_cast(native_audio_processor); @@ -372,8 +361,6 @@ JNI_PeerConnectionFactory_CreatePeerConnectionFactory( native_network_controller_factory), TakeOwnershipOfUniquePtr( native_network_state_predictor_factory), - TakeOwnershipOfUniquePtr( - native_media_transport_factory), TakeOwnershipOfUniquePtr(native_neteq_factory)); } @@ -484,14 +471,14 @@ static jlong JNI_PeerConnectionFactory_CreatePeerConnection( jni, j_sslCertificateVerifier); } - rtc::scoped_refptr pc = - PeerConnectionFactoryFromJava(factory)->CreatePeerConnection( + auto result = + PeerConnectionFactoryFromJava(factory)->CreatePeerConnectionOrError( rtc_config, std::move(peer_connection_dependencies)); - if (!pc) + if (!result.ok()) return 0; - return jlongFromPointer( - new OwnedPeerConnection(pc, std::move(observer), std::move(constraints))); + return jlongFromPointer(new OwnedPeerConnection( + result.MoveValue(), std::move(observer), std::move(constraints))); } static jlong JNI_PeerConnectionFactory_CreateVideoSource( diff --git a/sdk/android/src/jni/pc/peer_connection_factory.h b/sdk/android/src/jni/pc/peer_connection_factory.h index 904352f425..5bfdb7a808 100644 --- a/sdk/android/src/jni/pc/peer_connection_factory.h +++ b/sdk/android/src/jni/pc/peer_connection_factory.h @@ -24,8 +24,7 @@ jobject NativeToJavaPeerConnectionFactory( rtc::scoped_refptr pcf, std::unique_ptr network_thread, std::unique_ptr worker_thread, - std::unique_ptr signaling_thread, - rtc::NetworkMonitorFactory* network_monitor_factory = nullptr); + std::unique_ptr signaling_thread); } // namespace jni } // namespace webrtc diff --git a/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc b/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc index b334bb4a72..baa3f276e7 100644 --- a/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc +++ b/sdk/android/src/jni/pc/rtc_stats_collector_callback_wrapper.cc @@ -94,6 +94,23 @@ ScopedJavaLocalRef MemberToJava( case RTCStatsMemberInterface::kSequenceString: return NativeToJavaStringArray( env, *member.cast_to>>()); + + case RTCStatsMemberInterface::kMapStringUint64: + return NativeToJavaMap( + env, + *member.cast_to>>(), + [](JNIEnv* env, const auto& entry) { + return std::make_pair(NativeToJavaString(env, entry.first), + NativeToJavaBigInteger(env, entry.second)); + }); + + case RTCStatsMemberInterface::kMapStringDouble: + return NativeToJavaMap( + env, *member.cast_to>>(), + [](JNIEnv* env, const auto& entry) { + return std::make_pair(NativeToJavaString(env, entry.first), + NativeToJavaDouble(env, entry.second)); + }); } RTC_NOTREACHED(); return nullptr; diff --git a/sdk/android/src/jni/pc/rtp_parameters.cc b/sdk/android/src/jni/pc/rtp_parameters.cc index a65fa6eaa9..4bd9ee0e1d 100644 --- a/sdk/android/src/jni/pc/rtp_parameters.cc +++ b/sdk/android/src/jni/pc/rtp_parameters.cc @@ -53,7 +53,8 @@ ScopedJavaLocalRef NativeToJavaRtpEncodingParameter( NativeToJavaInteger(env, encoding.max_framerate), NativeToJavaInteger(env, encoding.num_temporal_layers), NativeToJavaDouble(env, encoding.scale_resolution_down_by), - encoding.ssrc ? NativeToJavaLong(env, *encoding.ssrc) : nullptr); + encoding.ssrc ? NativeToJavaLong(env, *encoding.ssrc) : nullptr, + encoding.adaptive_ptime); } ScopedJavaLocalRef NativeToJavaRtpCodecParameter( @@ -115,6 +116,8 @@ RtpEncodingParameters JavaToNativeRtpEncodingParameters( Java_Encoding_getScaleResolutionDownBy(jni, j_encoding_parameters); encoding.scale_resolution_down_by = JavaToNativeOptionalDouble(jni, j_scale_resolution_down_by); + encoding.adaptive_ptime = + Java_Encoding_getAdaptivePTime(jni, j_encoding_parameters); ScopedJavaLocalRef j_ssrc = Java_Encoding_getSsrc(jni, j_encoding_parameters); if (!IsNull(jni, j_ssrc)) diff --git a/sdk/android/src/jni/pc/rtp_transceiver.cc b/sdk/android/src/jni/pc/rtp_transceiver.cc index 7d8cfdef49..1d468461f1 100644 --- a/sdk/android/src/jni/pc/rtp_transceiver.cc +++ b/sdk/android/src/jni/pc/rtp_transceiver.cc @@ -139,23 +139,37 @@ ScopedJavaLocalRef JNI_RtpTransceiver_CurrentDirection( : nullptr; } -void JNI_RtpTransceiver_Stop(JNIEnv* jni, - jlong j_rtp_transceiver_pointer) { - reinterpret_cast(j_rtp_transceiver_pointer)->Stop(); +void JNI_RtpTransceiver_StopInternal(JNIEnv* jni, + jlong j_rtp_transceiver_pointer) { + reinterpret_cast(j_rtp_transceiver_pointer) + ->StopInternal(); +} + +void JNI_RtpTransceiver_StopStandard(JNIEnv* jni, + jlong j_rtp_transceiver_pointer) { + reinterpret_cast(j_rtp_transceiver_pointer) + ->StopStandard(); } -void JNI_RtpTransceiver_SetDirection( +jboolean JNI_RtpTransceiver_SetDirection( JNIEnv* jni, jlong j_rtp_transceiver_pointer, const base::android::JavaParamRef& j_rtp_transceiver_direction) { if (IsNull(jni, j_rtp_transceiver_direction)) { - return; + return false; } RtpTransceiverDirection direction = static_cast( Java_RtpTransceiverDirection_getNativeIndex(jni, j_rtp_transceiver_direction)); - reinterpret_cast(j_rtp_transceiver_pointer) - ->SetDirection(direction); + webrtc::RTCError error = + reinterpret_cast(j_rtp_transceiver_pointer) + ->SetDirectionWithError(direction); + if (!error.ok()) { + RTC_LOG(LS_WARNING) << "SetDirection failed, code " + << ToString(error.type()) << ", message " + << error.message(); + } + return error.ok(); } } // namespace jni diff --git a/sdk/android/src/jni/pc/sdp_observer.cc b/sdk/android/src/jni/pc/sdp_observer.cc index fc59d1749a..c8b4345af4 100644 --- a/sdk/android/src/jni/pc/sdp_observer.cc +++ b/sdk/android/src/jni/pc/sdp_observer.cc @@ -31,8 +31,11 @@ CreateSdpObserverJni::~CreateSdpObserverJni() = default; void CreateSdpObserverJni::OnSuccess(SessionDescriptionInterface* desc) { JNIEnv* env = AttachCurrentThreadIfNeeded(); - Java_SdpObserver_onCreateSuccess(env, j_observer_global_, - NativeToJavaSessionDescription(env, desc)); + std::string sdp; + RTC_CHECK(desc->ToString(&sdp)) << "got so far: " << sdp; + Java_SdpObserver_onCreateSuccess( + env, j_observer_global_, + NativeToJavaSessionDescription(env, sdp, desc->type())); // OnSuccess transfers ownership of the description (there's a TODO to make // it use unique_ptr...). delete desc; @@ -44,24 +47,34 @@ void CreateSdpObserverJni::OnFailure(webrtc::RTCError error) { NativeToJavaString(env, error.message())); } -SetSdpObserverJni::SetSdpObserverJni( +SetLocalSdpObserverJni::SetLocalSdpObserverJni( JNIEnv* env, - const JavaRef& j_observer, - std::unique_ptr constraints) - : j_observer_global_(env, j_observer), - constraints_(std::move(constraints)) {} + const JavaRef& j_observer) + : j_observer_global_(env, j_observer) {} -SetSdpObserverJni::~SetSdpObserverJni() = default; - -void SetSdpObserverJni::OnSuccess() { +void SetLocalSdpObserverJni::OnSetLocalDescriptionComplete(RTCError error) { JNIEnv* env = AttachCurrentThreadIfNeeded(); - Java_SdpObserver_onSetSuccess(env, j_observer_global_); + if (error.ok()) { + Java_SdpObserver_onSetSuccess(env, j_observer_global_); + } else { + Java_SdpObserver_onSetFailure(env, j_observer_global_, + NativeToJavaString(env, error.message())); + } } -void SetSdpObserverJni::OnFailure(webrtc::RTCError error) { +SetRemoteSdpObserverJni::SetRemoteSdpObserverJni( + JNIEnv* env, + const JavaRef& j_observer) + : j_observer_global_(env, j_observer) {} + +void SetRemoteSdpObserverJni::OnSetRemoteDescriptionComplete(RTCError error) { JNIEnv* env = AttachCurrentThreadIfNeeded(); - Java_SdpObserver_onSetFailure(env, j_observer_global_, - NativeToJavaString(env, error.message())); + if (error.ok()) { + Java_SdpObserver_onSetSuccess(env, j_observer_global_); + } else { + Java_SdpObserver_onSetFailure(env, j_observer_global_, + NativeToJavaString(env, error.message())); + } } } // namespace jni diff --git a/sdk/android/src/jni/pc/sdp_observer.h b/sdk/android/src/jni/pc/sdp_observer.h index 68ded76e7d..b33a3018c8 100644 --- a/sdk/android/src/jni/pc/sdp_observer.h +++ b/sdk/android/src/jni/pc/sdp_observer.h @@ -39,21 +39,28 @@ class CreateSdpObserverJni : public CreateSessionDescriptionObserver { std::unique_ptr constraints_; }; -class SetSdpObserverJni : public SetSessionDescriptionObserver { +class SetLocalSdpObserverJni : public SetLocalDescriptionObserverInterface { public: - SetSdpObserverJni(JNIEnv* env, - const JavaRef& j_observer, - std::unique_ptr constraints); - ~SetSdpObserverJni() override; + SetLocalSdpObserverJni(JNIEnv* env, const JavaRef& j_observer); - MediaConstraints* constraints() { return constraints_.get(); } + ~SetLocalSdpObserverJni() override = default; - void OnSuccess() override; - void OnFailure(RTCError error) override; + virtual void OnSetLocalDescriptionComplete(RTCError error) override; + + private: + const ScopedJavaGlobalRef j_observer_global_; +}; + +class SetRemoteSdpObserverJni : public SetRemoteDescriptionObserverInterface { + public: + SetRemoteSdpObserverJni(JNIEnv* env, const JavaRef& j_observer); + + ~SetRemoteSdpObserverJni() override = default; + + virtual void OnSetRemoteDescriptionComplete(RTCError error) override; private: const ScopedJavaGlobalRef j_observer_global_; - std::unique_ptr constraints_; }; } // namespace jni diff --git a/sdk/android/src/jni/pc/session_description.cc b/sdk/android/src/jni/pc/session_description.cc index 1b335215dc..bbac721e51 100644 --- a/sdk/android/src/jni/pc/session_description.cc +++ b/sdk/android/src/jni/pc/session_description.cc @@ -37,12 +37,10 @@ std::unique_ptr JavaToNativeSessionDescription( ScopedJavaLocalRef NativeToJavaSessionDescription( JNIEnv* jni, - const SessionDescriptionInterface* desc) { - std::string sdp; - RTC_CHECK(desc->ToString(&sdp)) << "got so far: " << sdp; + const std::string& sdp, + const std::string& type) { return Java_SessionDescription_Constructor( - jni, - Java_Type_fromCanonicalForm(jni, NativeToJavaString(jni, desc->type())), + jni, Java_Type_fromCanonicalForm(jni, NativeToJavaString(jni, type)), NativeToJavaString(jni, sdp)); } diff --git a/sdk/android/src/jni/pc/session_description.h b/sdk/android/src/jni/pc/session_description.h index fe308474a7..f0f49cb2ee 100644 --- a/sdk/android/src/jni/pc/session_description.h +++ b/sdk/android/src/jni/pc/session_description.h @@ -13,6 +13,7 @@ #include #include +#include #include "api/jsep.h" #include "sdk/android/native_api/jni/scoped_java_ref.h" @@ -26,7 +27,8 @@ std::unique_ptr JavaToNativeSessionDescription( ScopedJavaLocalRef NativeToJavaSessionDescription( JNIEnv* jni, - const SessionDescriptionInterface* desc); + const std::string& sdp, + const std::string& type); } // namespace jni } // namespace webrtc diff --git a/sdk/android/src/jni/pc/video.cc b/sdk/android/src/jni/pc/video.cc index 605258436e..ee5ecbea6f 100644 --- a/sdk/android/src/jni/pc/video.cc +++ b/sdk/android/src/jni/pc/video.cc @@ -16,6 +16,7 @@ #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" #include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" #include "sdk/android/native_api/jni/java_types.h" #include "sdk/android/src/jni/android_video_track_source.h" #include "sdk/android/src/jni/video_decoder_factory_wrapper.h" @@ -45,9 +46,8 @@ void* CreateVideoSource(JNIEnv* env, rtc::Thread* worker_thread, jboolean is_screencast, jboolean align_timestamps) { - rtc::scoped_refptr source( - new rtc::RefCountedObject( - signaling_thread, env, is_screencast, align_timestamps)); + auto source = rtc::make_ref_counted( + signaling_thread, env, is_screencast, align_timestamps); return source.release(); } diff --git a/sdk/android/src/jni/video_decoder_wrapper.cc b/sdk/android/src/jni/video_decoder_wrapper.cc index 54c6e1574c..01fb84fc05 100644 --- a/sdk/android/src/jni/video_decoder_wrapper.cc +++ b/sdk/android/src/jni/video_decoder_wrapper.cc @@ -109,7 +109,7 @@ int32_t VideoDecoderWrapper::Decode( frame_extra_info.qp = qp_parsing_enabled_ ? ParseQP(input_image) : absl::nullopt; { - rtc::CritScope cs(&frame_extra_infos_lock_); + MutexLock lock(&frame_extra_infos_lock_); frame_extra_infos_.push_back(frame_extra_info); } @@ -135,7 +135,7 @@ int32_t VideoDecoderWrapper::Release() { jni, Java_VideoDecoder_release(jni, decoder_)); RTC_LOG(LS_INFO) << "release: " << status; { - rtc::CritScope cs(&frame_extra_infos_lock_); + MutexLock lock(&frame_extra_infos_lock_); frame_extra_infos_.clear(); } initialized_ = false; @@ -144,11 +144,6 @@ int32_t VideoDecoderWrapper::Release() { return status; } -bool VideoDecoderWrapper::PrefersLateDecoding() const { - JNIEnv* jni = AttachCurrentThreadIfNeeded(); - return Java_VideoDecoder_getPrefersLateDecoding(jni, decoder_); -} - const char* VideoDecoderWrapper::ImplementationName() const { return implementation_name_.c_str(); } @@ -163,7 +158,7 @@ void VideoDecoderWrapper::OnDecodedFrame( FrameExtraInfo frame_extra_info; { - rtc::CritScope cs(&frame_extra_infos_lock_); + MutexLock lock(&frame_extra_infos_lock_); do { if (frame_extra_infos_.empty()) { @@ -249,12 +244,8 @@ absl::optional VideoDecoderWrapper::ParseQP( break; } case kVideoCodecH264: { - h264_bitstream_parser_.ParseBitstream(input_image.data(), - input_image.size()); - int qp_int; - if (h264_bitstream_parser_.GetLastSliceQp(&qp_int)) { - qp = qp_int; - } + h264_bitstream_parser_.ParseBitstream(input_image); + qp = h264_bitstream_parser_.GetLastSliceQp(); break; } default: diff --git a/sdk/android/src/jni/video_decoder_wrapper.h b/sdk/android/src/jni/video_decoder_wrapper.h index a7f686872c..15f7ab9bf5 100644 --- a/sdk/android/src/jni/video_decoder_wrapper.h +++ b/sdk/android/src/jni/video_decoder_wrapper.h @@ -12,13 +12,15 @@ #define SDK_ANDROID_SRC_JNI_VIDEO_DECODER_WRAPPER_H_ #include + #include #include +#include "api/sequence_checker.h" #include "api/video_codecs/video_decoder.h" #include "common_video/h264/h264_bitstream_parser.h" #include "rtc_base/race_checker.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "sdk/android/src/jni/jni_helpers.h" namespace webrtc { @@ -45,11 +47,6 @@ class VideoDecoderWrapper : public VideoDecoder { // still safe and synchronous. int32_t Release() override RTC_NO_THREAD_SAFETY_ANALYSIS; - // Returns true if the decoder prefer to decode frames late. - // That is, it can not decode infinite number of frames before the decoded - // frame is consumed. - bool PrefersLateDecoding() const override; - const char* ImplementationName() const override; // Wraps the frame to a AndroidVideoBuffer and passes it to the callback. @@ -86,7 +83,7 @@ class VideoDecoderWrapper : public VideoDecoder { const ScopedJavaGlobalRef decoder_; const std::string implementation_name_; - rtc::ThreadChecker decoder_thread_checker_; + SequenceChecker decoder_thread_checker_; // Callbacks must be executed sequentially on an arbitrary thread. We do not // own this thread so a thread checker cannot be used. rtc::RaceChecker callback_race_checker_; @@ -103,7 +100,7 @@ class VideoDecoderWrapper : public VideoDecoder { // Accessed both on the decoder thread and the callback thread. std::atomic qp_parsing_enabled_; - rtc::CriticalSection frame_extra_infos_lock_; + Mutex frame_extra_infos_lock_; std::deque frame_extra_infos_ RTC_GUARDED_BY(frame_extra_infos_lock_); }; diff --git a/sdk/android/src/jni/video_encoder_factory_wrapper.cc b/sdk/android/src/jni/video_encoder_factory_wrapper.cc index d6a6cfaf2d..8ab4191db2 100644 --- a/sdk/android/src/jni/video_encoder_factory_wrapper.cc +++ b/sdk/android/src/jni/video_encoder_factory_wrapper.cc @@ -101,21 +101,6 @@ std::vector VideoEncoderFactoryWrapper::GetImplementations() return implementations_; } -VideoEncoderFactory::CodecInfo VideoEncoderFactoryWrapper::QueryVideoEncoder( - const SdpVideoFormat& format) const { - JNIEnv* jni = AttachCurrentThreadIfNeeded(); - ScopedJavaLocalRef j_codec_info = - SdpVideoFormatToVideoCodecInfo(jni, format); - ScopedJavaLocalRef encoder = Java_VideoEncoderFactory_createEncoder( - jni, encoder_factory_, j_codec_info); - - CodecInfo codec_info; - // Check if this is a wrapped native software encoder implementation. - codec_info.is_hardware_accelerated = IsHardwareVideoEncoder(jni, encoder); - codec_info.has_internal_source = false; - return codec_info; -} - std::unique_ptr VideoEncoderFactoryWrapper::GetEncoderSelector() const { JNIEnv* jni = AttachCurrentThreadIfNeeded(); diff --git a/sdk/android/src/jni/video_encoder_factory_wrapper.h b/sdk/android/src/jni/video_encoder_factory_wrapper.h index 799ae0f2bc..2be6b1b33f 100644 --- a/sdk/android/src/jni/video_encoder_factory_wrapper.h +++ b/sdk/android/src/jni/video_encoder_factory_wrapper.h @@ -37,8 +37,6 @@ class VideoEncoderFactoryWrapper : public VideoEncoderFactory { std::vector GetImplementations() const override; - CodecInfo QueryVideoEncoder(const SdpVideoFormat& format) const override; - std::unique_ptr GetEncoderSelector() const override; private: diff --git a/sdk/android/src/jni/video_encoder_wrapper.cc b/sdk/android/src/jni/video_encoder_wrapper.cc index 1b34e99dce..4e6d764457 100644 --- a/sdk/android/src/jni/video_encoder_wrapper.cc +++ b/sdk/android/src/jni/video_encoder_wrapper.cc @@ -15,6 +15,7 @@ #include "common_video/h264/h264_common.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" #include "modules/video_coding/utility/vp8_header_parser.h" #include "modules/video_coding/utility/vp9_uncompressed_header_parser.h" #include "rtc_base/logging.h" @@ -113,7 +114,10 @@ int32_t VideoEncoderWrapper::Release() { int32_t status = JavaToNativeVideoCodecStatus( jni, Java_VideoEncoder_release(jni, encoder_)); RTC_LOG(LS_INFO) << "release: " << status; - frame_extra_infos_.clear(); + { + MutexLock lock(&frame_extra_infos_lock_); + frame_extra_infos_.clear(); + } initialized_ = false; return status; @@ -138,7 +142,10 @@ int32_t VideoEncoderWrapper::Encode( FrameExtraInfo info; info.capture_time_ns = frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec; info.timestamp_rtp = frame.timestamp(); - frame_extra_infos_.push_back(info); + { + MutexLock lock(&frame_extra_infos_lock_); + frame_extra_infos_.push_back(info); + } ScopedJavaLocalRef j_frame = NativeToJavaVideoFrame(jni, frame); ScopedJavaLocalRef ret = @@ -229,19 +236,23 @@ void VideoEncoderWrapper::OnEncodedFrame( // entries that don't belong to us, and we need to be careful not to // remove them. Removing only those entries older than the current frame // provides this guarantee. - while (!frame_extra_infos_.empty() && - frame_extra_infos_.front().capture_time_ns < capture_time_ns) { + FrameExtraInfo frame_extra_info; + { + MutexLock lock(&frame_extra_infos_lock_); + while (!frame_extra_infos_.empty() && + frame_extra_infos_.front().capture_time_ns < capture_time_ns) { + frame_extra_infos_.pop_front(); + } + if (frame_extra_infos_.empty() || + frame_extra_infos_.front().capture_time_ns != capture_time_ns) { + RTC_LOG(LS_WARNING) + << "Java encoder produced an unexpected frame with timestamp: " + << capture_time_ns; + return; + } + frame_extra_info = frame_extra_infos_.front(); frame_extra_infos_.pop_front(); } - if (frame_extra_infos_.empty() || - frame_extra_infos_.front().capture_time_ns != capture_time_ns) { - RTC_LOG(LS_WARNING) - << "Java encoder produced an unexpected frame with timestamp: " - << capture_time_ns; - return; - } - FrameExtraInfo frame_extra_info = std::move(frame_extra_infos_.front()); - frame_extra_infos_.pop_front(); // This is a bit subtle. The |frame| variable from the lambda capture is // const. Which implies that (i) we need to make a copy to be able to @@ -254,13 +265,12 @@ void VideoEncoderWrapper::OnEncodedFrame( frame_copy.SetTimestamp(frame_extra_info.timestamp_rtp); frame_copy.capture_time_ms_ = capture_time_ns / rtc::kNumNanosecsPerMillisec; - RTPFragmentationHeader header = ParseFragmentationHeader(frame); if (frame_copy.qp_ < 0) frame_copy.qp_ = ParseQp(frame); CodecSpecificInfo info(ParseCodecSpecificInfo(frame)); - callback_->OnEncodedImage(frame_copy, &info, &header); + callback_->OnEncodedImage(frame_copy, &info); } int32_t VideoEncoderWrapper::HandleReturnCode(JNIEnv* jni, @@ -289,35 +299,6 @@ int32_t VideoEncoderWrapper::HandleReturnCode(JNIEnv* jni, return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE; } -RTPFragmentationHeader VideoEncoderWrapper::ParseFragmentationHeader( - rtc::ArrayView buffer) { - RTPFragmentationHeader header; - if (codec_settings_.codecType == kVideoCodecH264) { - h264_bitstream_parser_.ParseBitstream(buffer.data(), buffer.size()); - - // For H.264 search for start codes. - const std::vector nalu_idxs = - H264::FindNaluIndices(buffer.data(), buffer.size()); - if (nalu_idxs.empty()) { - RTC_LOG(LS_ERROR) << "Start code is not found!"; - RTC_LOG(LS_ERROR) << "Data:" << buffer[0] << " " << buffer[1] << " " - << buffer[2] << " " << buffer[3] << " " << buffer[4] - << " " << buffer[5]; - } - header.VerifyAndAllocateFragmentationHeader(nalu_idxs.size()); - for (size_t i = 0; i < nalu_idxs.size(); i++) { - header.fragmentationOffset[i] = nalu_idxs[i].payload_start_offset; - header.fragmentationLength[i] = nalu_idxs[i].payload_size; - } - } else { - // Generate a header describing a single fragment. - header.VerifyAndAllocateFragmentationHeader(1); - header.fragmentationOffset[0] = 0; - header.fragmentationLength[0] = buffer.size(); - } - return header; -} - int VideoEncoderWrapper::ParseQp(rtc::ArrayView buffer) { int qp; bool success; @@ -329,7 +310,9 @@ int VideoEncoderWrapper::ParseQp(rtc::ArrayView buffer) { success = vp9::GetQp(buffer.data(), buffer.size(), &qp); break; case kVideoCodecH264: - success = h264_bitstream_parser_.GetLastSliceQp(&qp); + h264_bitstream_parser_.ParseBitstream(buffer); + qp = h264_bitstream_parser_.GetLastSliceQp().value_or(-1); + success = (qp >= 0); break; default: // Default is to not provide QP. success = false; @@ -343,6 +326,19 @@ CodecSpecificInfo VideoEncoderWrapper::ParseCodecSpecificInfo( const bool key_frame = frame._frameType == VideoFrameType::kVideoFrameKey; CodecSpecificInfo info; + // For stream with scalability, NextFrameConfig should be called before + // encoding and used to configure encoder, then passed here e.g. via + // FrameExtraInfo structure. But while this encoder wrapper uses only trivial + // scalability, NextFrameConfig can be called here. + auto layer_frames = svc_controller_.NextFrameConfig(/*reset=*/key_frame); + RTC_DCHECK_EQ(layer_frames.size(), 1); + info.generic_frame_info = svc_controller_.OnEncodeDone(layer_frames[0]); + if (key_frame) { + info.template_structure = svc_controller_.DependencyStructure(); + info.template_structure->resolutions = { + RenderResolution(frame._encodedWidth, frame._encodedHeight)}; + } + info.codecType = codec_settings_.codecType; switch (codec_settings_.codecType) { @@ -366,7 +362,6 @@ CodecSpecificInfo VideoEncoderWrapper::ParseCodecSpecificInfo( static_cast(gof_idx_++ % gof_.num_frames_in_gof); info.codecSpecific.VP9.num_spatial_layers = 1; info.codecSpecific.VP9.first_frame_in_picture = true; - info.codecSpecific.VP9.end_of_picture = true; info.codecSpecific.VP9.spatial_layer_resolution_present = false; if (info.codecSpecific.VP9.ss_data_available) { info.codecSpecific.VP9.spatial_layer_resolution_present = true; diff --git a/sdk/android/src/jni/video_encoder_wrapper.h b/sdk/android/src/jni/video_encoder_wrapper.h index 0e9d37bf23..1a42b05bc6 100644 --- a/sdk/android/src/jni/video_encoder_wrapper.h +++ b/sdk/android/src/jni/video_encoder_wrapper.h @@ -12,6 +12,7 @@ #define SDK_ANDROID_SRC_JNI_VIDEO_ENCODER_WRAPPER_H_ #include + #include #include #include @@ -21,6 +22,8 @@ #include "api/video_codecs/video_encoder.h" #include "common_video/h264/h264_bitstream_parser.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" +#include "modules/video_coding/svc/scalable_video_controller_no_layering.h" +#include "rtc_base/synchronization/mutex.h" #include "sdk/android/src/jni/jni_helpers.h" #include "sdk/android/src/jni/video_frame.h" @@ -67,8 +70,6 @@ class VideoEncoderWrapper : public VideoEncoder { const JavaRef& j_value, const char* method_name); - RTPFragmentationHeader ParseFragmentationHeader( - rtc::ArrayView buffer); int ParseQp(rtc::ArrayView buffer); CodecSpecificInfo ParseCodecSpecificInfo(const EncodedImage& frame); ScopedJavaLocalRef ToJavaBitrateAllocation( @@ -84,7 +85,10 @@ class VideoEncoderWrapper : public VideoEncoder { const ScopedJavaGlobalRef encoder_; const ScopedJavaGlobalRef int_array_class_; - std::deque frame_extra_infos_; + // Modified both on the encoder thread and the callback thread. + Mutex frame_extra_infos_lock_; + std::deque frame_extra_infos_ + RTC_GUARDED_BY(frame_extra_infos_lock_); EncodedImageCallback* callback_; bool initialized_; int num_resets_; @@ -94,6 +98,8 @@ class VideoEncoderWrapper : public VideoEncoder { EncoderInfo encoder_info_; H264BitstreamParser h264_bitstream_parser_; + // Fills frame dependencies in codec-agnostic format. + ScalableVideoControllerNoLayering svc_controller_; // VP9 variables to populate codec specific structure. GofInfoVP9 gof_; // Contains each frame's temporal information for // non-flexible VP9 mode. diff --git a/sdk/android/src/jni/video_frame.cc b/sdk/android/src/jni/video_frame.cc index d57fe8f9b7..98728032e8 100644 --- a/sdk/android/src/jni/video_frame.cc +++ b/sdk/android/src/jni/video_frame.cc @@ -14,10 +14,9 @@ #include "api/scoped_refptr.h" #include "common_video/include/video_frame_buffer.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" -#include "rtc_base/keep_ref_until_done.h" #include "rtc_base/logging.h" +#include "rtc_base/ref_counted_object.h" #include "rtc_base/time_utils.h" #include "sdk/android/generated_video_jni/VideoFrame_jni.h" #include "sdk/android/src/jni/jni_helpers.h" @@ -78,8 +77,8 @@ rtc::scoped_refptr AndroidVideoI420Buffer::Adopt( int width, int height, const JavaRef& j_video_frame_buffer) { - return new rtc::RefCountedObject( - jni, width, height, j_video_frame_buffer); + return rtc::make_ref_counted(jni, width, height, + j_video_frame_buffer); } AndroidVideoI420Buffer::AndroidVideoI420Buffer( @@ -124,8 +123,7 @@ int64_t GetJavaVideoFrameTimestampNs(JNIEnv* jni, rtc::scoped_refptr AndroidVideoBuffer::Adopt( JNIEnv* jni, const JavaRef& j_video_frame_buffer) { - return new rtc::RefCountedObject(jni, - j_video_frame_buffer); + return rtc::make_ref_counted(jni, j_video_frame_buffer); } rtc::scoped_refptr AndroidVideoBuffer::Create( @@ -152,14 +150,14 @@ const ScopedJavaGlobalRef& AndroidVideoBuffer::video_frame_buffer() return j_video_frame_buffer_; } -rtc::scoped_refptr AndroidVideoBuffer::CropAndScale( - JNIEnv* jni, +rtc::scoped_refptr AndroidVideoBuffer::CropAndScale( int crop_x, int crop_y, int crop_width, int crop_height, int scale_width, int scale_height) { + JNIEnv* jni = AttachCurrentThreadIfNeeded(); return Adopt(jni, Java_Buffer_cropAndScale(jni, j_video_frame_buffer_, crop_x, crop_y, crop_width, crop_height, scale_width, scale_height)); @@ -181,6 +179,10 @@ rtc::scoped_refptr AndroidVideoBuffer::ToI420() { JNIEnv* jni = AttachCurrentThreadIfNeeded(); ScopedJavaLocalRef j_i420_buffer = Java_Buffer_toI420(jni, j_video_frame_buffer_); + // In case I420 conversion fails, we propagate the nullptr. + if (j_i420_buffer.is_null()) { + return nullptr; + } // We don't need to retain the buffer because toI420 returns a new object that // we are assumed to take the ownership of. diff --git a/sdk/android/src/jni/video_frame.h b/sdk/android/src/jni/video_frame.h index f6b569a3e5..d1e463bba2 100644 --- a/sdk/android/src/jni/video_frame.h +++ b/sdk/android/src/jni/video_frame.h @@ -16,7 +16,6 @@ #include "api/video/video_frame.h" #include "api/video/video_frame_buffer.h" #include "api/video/video_rotation.h" -#include "rtc_base/callback.h" #include "sdk/android/src/jni/jni_helpers.h" namespace webrtc { @@ -42,13 +41,12 @@ class AndroidVideoBuffer : public VideoFrameBuffer { // Crops a region defined by |crop_x|, |crop_y|, |crop_width| and // |crop_height|. Scales it to size |scale_width| x |scale_height|. - rtc::scoped_refptr CropAndScale(JNIEnv* jni, - int crop_x, - int crop_y, - int crop_width, - int crop_height, - int scale_width, - int scale_height); + rtc::scoped_refptr CropAndScale(int crop_x, + int crop_y, + int crop_width, + int crop_height, + int scale_width, + int scale_height) override; protected: // Should not be called directly. Adopts the Java VideoFrame.Buffer. Use diff --git a/sdk/android/tests/resources/robolectric.properties b/sdk/android/tests/resources/robolectric.properties index 3acb7e5d59..a9bc625b18 100644 --- a/sdk/android/tests/resources/robolectric.properties +++ b/sdk/android/tests/resources/robolectric.properties @@ -1 +1 @@ -sdk=19,21,25,26 +sdk=21,25,26 diff --git a/sdk/android/tests/src/org/webrtc/AndroidVideoDecoderTest.java b/sdk/android/tests/src/org/webrtc/AndroidVideoDecoderTest.java index c1e8f6c3b3..644b24b1b3 100644 --- a/sdk/android/tests/src/org/webrtc/AndroidVideoDecoderTest.java +++ b/sdk/android/tests/src/org/webrtc/AndroidVideoDecoderTest.java @@ -187,7 +187,6 @@ private EncodedImage createTestEncodedImage() { return EncodedImage.builder() .setBuffer(ByteBuffer.wrap(ENCODED_TEST_DATA), null) .setFrameType(FrameType.VideoFrameKey) - .setCompleteFrame(true) .createEncodedImage(); } diff --git a/sdk/android/tests/src/org/webrtc/HardwareVideoEncoderTest.java b/sdk/android/tests/src/org/webrtc/HardwareVideoEncoderTest.java index 2eca89f575..728e401495 100644 --- a/sdk/android/tests/src/org/webrtc/HardwareVideoEncoderTest.java +++ b/sdk/android/tests/src/org/webrtc/HardwareVideoEncoderTest.java @@ -233,7 +233,6 @@ public void testDeliversOutputData() throws InterruptedException { assertThat(videoFrame.encodedHeight).isEqualTo(TEST_ENCODER_SETTINGS.height); assertThat(videoFrame.rotation).isEqualTo(0); assertThat(videoFrame.captureTimeNs).isEqualTo(42); - assertThat(videoFrame.completeFrame).isTrue(); assertThat(videoFrame.frameType).isEqualTo(FrameType.VideoFrameKey); CodecTestHelper.assertEqualContents( outputData, videoFrame.buffer, /* offset= */ 0, videoFrame.buffer.capacity()); diff --git a/sdk/android/tests/src/org/webrtc/audio/LowLatencyAudioBufferManagerTest.java b/sdk/android/tests/src/org/webrtc/audio/LowLatencyAudioBufferManagerTest.java new file mode 100644 index 0000000000..c76ee8dab8 --- /dev/null +++ b/sdk/android/tests/src/org/webrtc/audio/LowLatencyAudioBufferManagerTest.java @@ -0,0 +1,104 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +package org.webrtc.audio; + +import static org.junit.Assert.assertTrue; +import static org.mockito.AdditionalMatchers.gt; +import static org.mockito.AdditionalMatchers.lt; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import android.media.AudioTrack; +import android.os.Build; +import org.chromium.testing.local.LocalRobolectricTestRunner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.robolectric.annotation.Config; +import org.webrtc.audio.LowLatencyAudioBufferManager; + +/** + * Tests for LowLatencyAudioBufferManager. + */ +@RunWith(LocalRobolectricTestRunner.class) +@Config(manifest = Config.NONE, sdk = Build.VERSION_CODES.O) +public class LowLatencyAudioBufferManagerTest { + @Mock private AudioTrack mockAudioTrack; + private LowLatencyAudioBufferManager bufferManager; + + @Before + public void setUp() { + MockitoAnnotations.initMocks(this); + bufferManager = new LowLatencyAudioBufferManager(); + } + + @Test + public void testBufferSizeDecrease() { + when(mockAudioTrack.getUnderrunCount()).thenReturn(0); + when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(100); + when(mockAudioTrack.getPlaybackRate()).thenReturn(1000); + for (int i = 0; i < 9; i++) { + bufferManager.maybeAdjustBufferSize(mockAudioTrack); + } + // Check that the buffer size was not changed yet. + verify(mockAudioTrack, times(0)).setBufferSizeInFrames(anyInt()); + // After the 10th call without underruns, we expect the buffer size to decrease. + bufferManager.maybeAdjustBufferSize(mockAudioTrack); + // The expected size is 10ms below the existing size, which works out to 100 - (1000 / 100) + // = 90. + verify(mockAudioTrack, times(1)).setBufferSizeInFrames(90); + } + + @Test + public void testBufferSizeNeverBelow10ms() { + when(mockAudioTrack.getUnderrunCount()).thenReturn(0); + when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(11); + when(mockAudioTrack.getPlaybackRate()).thenReturn(1000); + for (int i = 0; i < 10; i++) { + bufferManager.maybeAdjustBufferSize(mockAudioTrack); + } + // Check that the buffer size was not set to a value below 10 ms. + verify(mockAudioTrack, times(0)).setBufferSizeInFrames(lt(10)); + } + + @Test + public void testUnderrunBehavior() { + when(mockAudioTrack.getUnderrunCount()).thenReturn(1); + when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(100); + when(mockAudioTrack.getPlaybackRate()).thenReturn(1000); + bufferManager.maybeAdjustBufferSize(mockAudioTrack); + // Check that the buffer size was increased after the underrrun. + verify(mockAudioTrack, times(1)).setBufferSizeInFrames(gt(100)); + when(mockAudioTrack.getUnderrunCount()).thenReturn(0); + for (int i = 0; i < 10; i++) { + bufferManager.maybeAdjustBufferSize(mockAudioTrack); + } + // Check that the buffer size was not changed again, even though there were no underruns for + // 10 calls. + verify(mockAudioTrack, times(1)).setBufferSizeInFrames(anyInt()); + } + + @Test + public void testBufferIncrease() { + when(mockAudioTrack.getBufferSizeInFrames()).thenReturn(100); + when(mockAudioTrack.getPlaybackRate()).thenReturn(1000); + for (int i = 1; i < 30; i++) { + when(mockAudioTrack.getUnderrunCount()).thenReturn(i); + bufferManager.maybeAdjustBufferSize(mockAudioTrack); + } + // Check that the buffer size was not increased more than 5 times. + verify(mockAudioTrack, times(5)).setBufferSizeInFrames(gt(100)); + } +} diff --git a/sdk/media_constraints.cc b/sdk/media_constraints.cc index faf393bf34..6f4901c97e 100644 --- a/sdk/media_constraints.cc +++ b/sdk/media_constraints.cc @@ -118,7 +118,6 @@ const char MediaConstraints::kUseRtpMux[] = "googUseRtpMUX"; // Below constraints should be used during PeerConnection construction. const char MediaConstraints::kEnableDtlsSrtp[] = "DtlsSrtpKeyAgreement"; -const char MediaConstraints::kEnableRtpDataChannels[] = "RtpDataChannels"; // Google-specific constraint keys. const char MediaConstraints::kEnableDscp[] = "googDscp"; const char MediaConstraints::kEnableIPv6[] = "googIPv6"; @@ -167,8 +166,6 @@ void CopyConstraintsIntoRtcConfiguration( FindConstraint(constraints, MediaConstraints::kCpuOveruseDetection, &configuration->media_config.video.enable_cpu_adaptation, nullptr); - FindConstraint(constraints, MediaConstraints::kEnableRtpDataChannels, - &configuration->enable_rtp_data_channel, nullptr); // Find Suspend Below Min Bitrate constraint. FindConstraint( constraints, MediaConstraints::kEnableVideoSuspendBelowMinBitrate, diff --git a/sdk/media_constraints.h b/sdk/media_constraints.h index b85dc472e0..15cb363f7c 100644 --- a/sdk/media_constraints.h +++ b/sdk/media_constraints.h @@ -85,8 +85,6 @@ class MediaConstraints { // PeerConnection constraint keys. // Temporary pseudo-constraints used to enable DTLS-SRTP static const char kEnableDtlsSrtp[]; // Enable DTLS-SRTP - // Temporary pseudo-constraints used to enable DataChannels - static const char kEnableRtpDataChannels[]; // Enable RTP DataChannels // Google-specific constraint keys. // Temporary pseudo-constraint for enabling DSCP through JS. static const char kEnableDscp[]; // googDscp diff --git a/sdk/media_constraints_unittest.cc b/sdk/media_constraints_unittest.cc index 7fd7f67dc8..dab85eb971 100644 --- a/sdk/media_constraints_unittest.cc +++ b/sdk/media_constraints_unittest.cc @@ -23,7 +23,6 @@ bool Matches(const PeerConnectionInterface::RTCConfiguration& a, return a.disable_ipv6 == b.disable_ipv6 && a.audio_jitter_buffer_max_packets == b.audio_jitter_buffer_max_packets && - a.enable_rtp_data_channel == b.enable_rtp_data_channel && a.screencast_min_bitrate == b.screencast_min_bitrate && a.combined_audio_video_bwe == b.combined_audio_video_bwe && a.enable_dtls_srtp == b.enable_dtls_srtp && diff --git a/sdk/objc/Framework/Classes/PeerConnection/RTCVideoCodec+Private.h b/sdk/objc/Framework/Classes/PeerConnection/RTCVideoCodec+Private.h deleted file mode 100644 index 3233e4e9f2..0000000000 --- a/sdk/objc/Framework/Classes/PeerConnection/RTCVideoCodec+Private.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCEncodedImage+Private.h" -#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h" -#import "api/peerconnection/RTCVideoCodecInfo+Private.h" -#import "api/peerconnection/RTCVideoEncoderSettings+Private.h" -#import "components/video_codec/RTCCodecSpecificInfoH264+Private.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h b/sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h deleted file mode 100644 index 6ee1bc5435..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/audio/RTCAudioSessionConfiguration.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCCVPixelBuffer.h b/sdk/objc/Framework/Headers/WebRTC/RTCCVPixelBuffer.h deleted file mode 100644 index 7a4f847be5..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCCVPixelBuffer.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/video_frame_buffer/RTCCVPixelBuffer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCCallbackLogger.h b/sdk/objc/Framework/Headers/WebRTC/RTCCallbackLogger.h deleted file mode 100644 index c4585228ed..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCCallbackLogger.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/logging/RTCCallbackLogger.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCCameraPreviewView.h b/sdk/objc/Framework/Headers/WebRTC/RTCCameraPreviewView.h deleted file mode 100644 index 388e72fce0..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCCameraPreviewView.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "helpers/RTCCameraPreviewView.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCCameraVideoCapturer.h b/sdk/objc/Framework/Headers/WebRTC/RTCCameraVideoCapturer.h deleted file mode 100644 index aac6773b6c..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCCameraVideoCapturer.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/capturer/RTCCameraVideoCapturer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCCertificate.h b/sdk/objc/Framework/Headers/WebRTC/RTCCertificate.h deleted file mode 100644 index 9e3b7dcf1a..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCCertificate.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCCertificate.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCConfiguration.h b/sdk/objc/Framework/Headers/WebRTC/RTCConfiguration.h deleted file mode 100644 index fe2f0cdfdb..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCConfiguration.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCConfiguration.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCDataChannel.h b/sdk/objc/Framework/Headers/WebRTC/RTCDataChannel.h deleted file mode 100644 index 4f35079471..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCDataChannel.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCDataChannel.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCDataChannelConfiguration.h b/sdk/objc/Framework/Headers/WebRTC/RTCDataChannelConfiguration.h deleted file mode 100644 index 20cb4e1c51..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCDataChannelConfiguration.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCDataChannelConfiguration.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCDispatcher.h b/sdk/objc/Framework/Headers/WebRTC/RTCDispatcher.h deleted file mode 100644 index 2ae9cd89c8..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCDispatcher.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "helpers/RTCDispatcher.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCDtmfSender.h b/sdk/objc/Framework/Headers/WebRTC/RTCDtmfSender.h deleted file mode 100644 index 20407102c6..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCDtmfSender.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCDtmfSender.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCEAGLVideoView.h b/sdk/objc/Framework/Headers/WebRTC/RTCEAGLVideoView.h deleted file mode 100644 index ec5b1d7087..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCEAGLVideoView.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/renderer/opengl/RTCEAGLVideoView.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCFieldTrials.h b/sdk/objc/Framework/Headers/WebRTC/RTCFieldTrials.h deleted file mode 100644 index 386989abf0..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCFieldTrials.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCFieldTrials.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCFileLogger.h b/sdk/objc/Framework/Headers/WebRTC/RTCFileLogger.h deleted file mode 100644 index ae7d9ef95f..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCFileLogger.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCFileLogger.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCFileVideoCapturer.h b/sdk/objc/Framework/Headers/WebRTC/RTCFileVideoCapturer.h deleted file mode 100644 index 344eca9b00..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCFileVideoCapturer.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/capturer/RTCFileVideoCapturer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCH264ProfileLevelId.h b/sdk/objc/Framework/Headers/WebRTC/RTCH264ProfileLevelId.h deleted file mode 100644 index 57798148a0..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCH264ProfileLevelId.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/video_codec/RTCH264ProfileLevelId.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCIceCandidate.h b/sdk/objc/Framework/Headers/WebRTC/RTCIceCandidate.h deleted file mode 100644 index dbeea22000..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCIceCandidate.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCIceCandidate.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCIceServer.h b/sdk/objc/Framework/Headers/WebRTC/RTCIceServer.h deleted file mode 100644 index 967b19c066..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCIceServer.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCIceServer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCLegacyStatsReport.h b/sdk/objc/Framework/Headers/WebRTC/RTCLegacyStatsReport.h deleted file mode 100644 index a727a495fc..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCLegacyStatsReport.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCLegacyStatsReport.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCLogging.h b/sdk/objc/Framework/Headers/WebRTC/RTCLogging.h deleted file mode 100644 index bb5e25dbde..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCLogging.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCLogging.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMTLNSVideoView.h b/sdk/objc/Framework/Headers/WebRTC/RTCMTLNSVideoView.h deleted file mode 100644 index 4368a8a95b..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMTLNSVideoView.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/renderer/metal/RTCMTLNSVideoView.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMTLVideoView.h b/sdk/objc/Framework/Headers/WebRTC/RTCMTLVideoView.h deleted file mode 100644 index 9f43dc5613..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMTLVideoView.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/renderer/metal/RTCMTLVideoView.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMacros.h b/sdk/objc/Framework/Headers/WebRTC/RTCMacros.h deleted file mode 100644 index 8582a33600..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMacros.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCMacros.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMediaConstraints.h b/sdk/objc/Framework/Headers/WebRTC/RTCMediaConstraints.h deleted file mode 100644 index 1059725c05..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMediaConstraints.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCMediaConstraints.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMediaSource.h b/sdk/objc/Framework/Headers/WebRTC/RTCMediaSource.h deleted file mode 100644 index f642524d2a..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMediaSource.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCMediaSource.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMediaStream.h b/sdk/objc/Framework/Headers/WebRTC/RTCMediaStream.h deleted file mode 100644 index 9b4d03213c..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMediaStream.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCMediaStream.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMediaStreamTrack.h b/sdk/objc/Framework/Headers/WebRTC/RTCMediaStreamTrack.h deleted file mode 100644 index 9de0edbfbd..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMediaStreamTrack.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCMediaStreamTrack.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMetrics.h b/sdk/objc/Framework/Headers/WebRTC/RTCMetrics.h deleted file mode 100644 index b44289ea34..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMetrics.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCMetrics.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCMetricsSampleInfo.h b/sdk/objc/Framework/Headers/WebRTC/RTCMetricsSampleInfo.h deleted file mode 100644 index 5ee84c1457..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCMetricsSampleInfo.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCMetricsSampleInfo.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCNSGLVideoView.h b/sdk/objc/Framework/Headers/WebRTC/RTCNSGLVideoView.h deleted file mode 100644 index 3fa89aa30e..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCNSGLVideoView.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/renderer/opengl/RTCNSGLVideoView.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnection.h b/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnection.h deleted file mode 100644 index df9bc85cdd..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnection.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCPeerConnection.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnectionFactory.h b/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnectionFactory.h deleted file mode 100644 index ff544a00c8..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnectionFactory.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCPeerConnectionFactory.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnectionFactoryOptions.h b/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnectionFactoryOptions.h deleted file mode 100644 index c4699c963e..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCPeerConnectionFactoryOptions.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCPeerConnectionFactoryOptions.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtcpParameters.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtcpParameters.h deleted file mode 100644 index 28f4f5fcfc..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtcpParameters.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtcpParameters.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpCodecParameters.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpCodecParameters.h deleted file mode 100644 index d4b76012f9..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpCodecParameters.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpCodecParameters.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpEncodingParameters.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpEncodingParameters.h deleted file mode 100644 index a1510ba98f..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpEncodingParameters.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpEncodingParameters.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpHeaderExtension.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpHeaderExtension.h deleted file mode 100644 index 3bc6b2ba54..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpHeaderExtension.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpHeaderExtension.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpParameters.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpParameters.h deleted file mode 100644 index 0e5b7e2178..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpParameters.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpParameters.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpReceiver.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpReceiver.h deleted file mode 100644 index ff61f824a0..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpReceiver.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpReceiver.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpSender.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpSender.h deleted file mode 100644 index d5a4e65d09..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpSender.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpSender.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCRtpTransceiver.h b/sdk/objc/Framework/Headers/WebRTC/RTCRtpTransceiver.h deleted file mode 100644 index ff07e29fb7..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCRtpTransceiver.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCRtpTransceiver.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCSSLAdapter.h b/sdk/objc/Framework/Headers/WebRTC/RTCSSLAdapter.h deleted file mode 100644 index c721875e70..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCSSLAdapter.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCSSLAdapter.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCSessionDescription.h b/sdk/objc/Framework/Headers/WebRTC/RTCSessionDescription.h deleted file mode 100644 index b5d55f7729..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCSessionDescription.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCSessionDescription.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCTracing.h b/sdk/objc/Framework/Headers/WebRTC/RTCTracing.h deleted file mode 100644 index fd4a6ffecb..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCTracing.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC Project Authors. All rights reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCTracing.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCapturer.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoCapturer.h deleted file mode 100644 index 93586fc0d5..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCapturer.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCVideoCapturer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodec.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodec.h deleted file mode 100644 index b4511a9985..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodec.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/video_codec/RTCVideoCodecConstants.h" -#import "base/RTCCodecSpecificInfo.h" -#import "base/RTCEncodedImage.h" -#import "base/RTCRtpFragmentationHeader.h" -#import "base/RTCVideoCodecInfo.h" -#import "base/RTCVideoDecoder.h" -#import "base/RTCVideoEncoder.h" -#import "base/RTCVideoEncoderQpThresholds.h" -#import "base/RTCVideoEncoderSettings.h" -#import "components/video_codec/RTCH264ProfileLevelId.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecH264.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecH264.h deleted file mode 100644 index d1ff92e27a..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecH264.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/video_codec/RTCCodecSpecificInfoH264.h" -#import "components/video_codec/RTCH264ProfileLevelId.h" -#import "components/video_codec/RTCVideoDecoderFactoryH264.h" -#import "components/video_codec/RTCVideoDecoderH264.h" -#import "components/video_codec/RTCVideoEncoderFactoryH264.h" -#import "components/video_codec/RTCVideoEncoderH264.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecInfo.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecInfo.h deleted file mode 100644 index 10c5fea465..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoCodecInfo.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCVideoCodecInfo.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoDecoderVP8.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoDecoderVP8.h deleted file mode 100644 index 9c9e840e6d..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoDecoderVP8.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/video_codec/RTCVideoDecoderVP8.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoDecoderVP9.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoDecoderVP9.h deleted file mode 100644 index 1a7c38267b..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoDecoderVP9.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/video_codec/RTCVideoDecoderVP9.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoEncoderVP8.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoEncoderVP8.h deleted file mode 100644 index 65a7850884..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoEncoderVP8.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/video_codec/RTCVideoEncoderVP8.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoEncoderVP9.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoEncoderVP9.h deleted file mode 100644 index 5d01835805..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoEncoderVP9.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/video_codec/RTCVideoEncoderVP9.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoFrame.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoFrame.h deleted file mode 100644 index ec1543d8e3..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoFrame.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCVideoFrame.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoFrameBuffer.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoFrameBuffer.h deleted file mode 100644 index 8d4be50868..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoFrameBuffer.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/video_frame_buffer/RTCNativeI420Buffer.h" -#import "api/video_frame_buffer/RTCNativeMutableI420Buffer.h" -#import "base/RTCI420Buffer.h" -#import "base/RTCMutableI420Buffer.h" -#import "base/RTCMutableYUVPlanarBuffer.h" -#import "base/RTCVideoFrameBuffer.h" -#import "base/RTCYUVPlanarBuffer.h" -#import "components/video_frame_buffer/RTCCVPixelBuffer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoRenderer.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoRenderer.h deleted file mode 100644 index 0e32c02c9a..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoRenderer.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCVideoRenderer.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoSource.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoSource.h deleted file mode 100644 index 91e635de6c..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoSource.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCVideoSource.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoTrack.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoTrack.h deleted file mode 100644 index b5dd1ddf59..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoTrack.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "api/peerconnection/RTCVideoTrack.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCVideoViewShading.h b/sdk/objc/Framework/Headers/WebRTC/RTCVideoViewShading.h deleted file mode 100644 index 7c6cd7a2e5..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/RTCVideoViewShading.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "components/renderer/opengl/RTCVideoViewShading.h" diff --git a/sdk/objc/Framework/Headers/WebRTC/UIDevice+RTCDevice.h b/sdk/objc/Framework/Headers/WebRTC/UIDevice+RTCDevice.h deleted file mode 100644 index 724dc8dca4..0000000000 --- a/sdk/objc/Framework/Headers/WebRTC/UIDevice+RTCDevice.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "helpers/UIDevice+RTCDevice.h" diff --git a/sdk/objc/Info.plist b/sdk/objc/Info.plist index 38c437e7fe..2e243bdcd5 100644 --- a/sdk/objc/Info.plist +++ b/sdk/objc/Info.plist @@ -7,7 +7,7 @@ CFBundleExecutable WebRTC CFBundleIdentifier - org.webrtc.WebRTC + com.sendbird.calls.webrtc CFBundleInfoDictionaryVersion 6.0 CFBundleName @@ -15,12 +15,10 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 1.0 + 1.4.0 CFBundleSignature ???? CFBundleVersion - 1.0 - NSPrincipalClass - + 1.4.0 diff --git a/sdk/objc/api/peerconnection/RTCConfiguration.h b/sdk/objc/api/peerconnection/RTCConfiguration.h index 4e9c674ef8..02461b084c 100644 --- a/sdk/objc/api/peerconnection/RTCConfiguration.h +++ b/sdk/objc/api/peerconnection/RTCConfiguration.h @@ -197,18 +197,6 @@ RTC_OBJC_EXPORT @property(nonatomic, assign) BOOL allowCodecSwitching; -/** - * If MediaTransportFactory is provided in PeerConnectionFactory, this flag informs PeerConnection - * that it should use the MediaTransportInterface. - */ -@property(nonatomic, assign) BOOL useMediaTransport; - -/** - * If MediaTransportFactory is provided in PeerConnectionFactory, this flag informs PeerConnection - * that it should use the MediaTransportInterface for data channels. - */ -@property(nonatomic, assign) BOOL useMediaTransportForDataChannels; - /** * Defines advanced optional cryptographic settings related to SRTP and * frame encryption for native WebRTC. Setting this will overwrite any @@ -216,6 +204,12 @@ RTC_OBJC_EXPORT */ @property(nonatomic, nullable) RTC_OBJC_TYPE(RTCCryptoOptions) * cryptoOptions; +/** + * An optional string that will be attached to the TURN_ALLOCATE_REQUEST which + * which can be used to correlate client logs with backend logs. + */ +@property(nonatomic, nullable, copy) NSString *turnLoggingId; + /** * Time interval between audio RTCP reports. */ @@ -226,6 +220,54 @@ RTC_OBJC_EXPORT */ @property(nonatomic, assign) int rtcpVideoReportIntervalMs; +/** + * Allow implicit rollback of local description when remote description + * conflicts with local description. + * See: https://w3c.github.io/webrtc-pc/#dom-peerconnection-setremotedescription + */ +@property(nonatomic, assign) BOOL enableImplicitRollback; + +/** + * Control if "a=extmap-allow-mixed" is included in the offer. + * See: https://www.chromestatus.com/feature/6269234631933952 + */ +@property(nonatomic, assign) BOOL offerExtmapAllowMixed; + +/** + * Defines the interval applied to ALL candidate pairs + * when ICE is strongly connected, and it overrides the + * default value of this interval in the ICE implementation; + */ +@property(nonatomic, copy, nullable) NSNumber *iceCheckIntervalStrongConnectivity; + +/** + * Defines the counterpart for ALL pairs when ICE is + * weakly connected, and it overrides the default value of + * this interval in the ICE implementation + */ +@property(nonatomic, copy, nullable) NSNumber *iceCheckIntervalWeakConnectivity; + +/** + * The min time period for which a candidate pair must wait for response to + * connectivity checks before it becomes unwritable. This parameter + * overrides the default value in the ICE implementation if set. + */ +@property(nonatomic, copy, nullable) NSNumber *iceUnwritableTimeout; + +/** + * The min number of connectivity checks that a candidate pair must sent + * without receiving response before it becomes unwritable. This parameter + * overrides the default value in the ICE implementation if set. + */ +@property(nonatomic, copy, nullable) NSNumber *iceUnwritableMinChecks; + +/** + * The min time period for which a candidate pair must wait for response to + * connectivity checks it becomes inactive. This parameter overrides the + * default value in the ICE implementation if set. + */ +@property(nonatomic, copy, nullable) NSNumber *iceInactiveTimeout; + - (instancetype)init; @end diff --git a/sdk/objc/api/peerconnection/RTCConfiguration.mm b/sdk/objc/api/peerconnection/RTCConfiguration.mm index 52c1450505..0f0239f93d 100644 --- a/sdk/objc/api/peerconnection/RTCConfiguration.mm +++ b/sdk/objc/api/peerconnection/RTCConfiguration.mm @@ -52,11 +52,17 @@ @implementation RTC_OBJC_TYPE (RTCConfiguration) @synthesize turnCustomizer = _turnCustomizer; @synthesize activeResetSrtpParams = _activeResetSrtpParams; @synthesize allowCodecSwitching = _allowCodecSwitching; -@synthesize useMediaTransport = _useMediaTransport; -@synthesize useMediaTransportForDataChannels = _useMediaTransportForDataChannels; @synthesize cryptoOptions = _cryptoOptions; +@synthesize turnLoggingId = _turnLoggingId; @synthesize rtcpAudioReportIntervalMs = _rtcpAudioReportIntervalMs; @synthesize rtcpVideoReportIntervalMs = _rtcpVideoReportIntervalMs; +@synthesize enableImplicitRollback = _enableImplicitRollback; +@synthesize offerExtmapAllowMixed = _offerExtmapAllowMixed; +@synthesize iceCheckIntervalStrongConnectivity = _iceCheckIntervalStrongConnectivity; +@synthesize iceCheckIntervalWeakConnectivity = _iceCheckIntervalWeakConnectivity; +@synthesize iceUnwritableTimeout = _iceUnwritableTimeout; +@synthesize iceUnwritableMinChecks = _iceUnwritableMinChecks; +@synthesize iceInactiveTimeout = _iceInactiveTimeout; - (instancetype)init { // Copy defaults. @@ -106,8 +112,6 @@ - (instancetype)initWithNativeConfiguration: _iceConnectionReceivingTimeout = config.ice_connection_receiving_timeout; _iceBackupCandidatePairPingInterval = config.ice_backup_candidate_pair_ping_interval; - _useMediaTransport = config.use_media_transport; - _useMediaTransportForDataChannels = config.use_media_transport_for_data_channels; _keyType = RTCEncryptionKeyTypeECDSA; _iceCandidatePoolSize = config.ice_candidate_pool_size; _shouldPruneTurnPorts = config.prune_turn_ports; @@ -133,9 +137,28 @@ - (instancetype)initWithNativeConfiguration: sframeRequireFrameEncryption:config.crypto_options->sframe .require_frame_encryption]; } + _turnLoggingId = [NSString stringWithUTF8String:config.turn_logging_id.c_str()]; _rtcpAudioReportIntervalMs = config.audio_rtcp_report_interval_ms(); _rtcpVideoReportIntervalMs = config.video_rtcp_report_interval_ms(); _allowCodecSwitching = config.allow_codec_switching.value_or(false); + _enableImplicitRollback = config.enable_implicit_rollback; + _offerExtmapAllowMixed = config.offer_extmap_allow_mixed; + _iceCheckIntervalStrongConnectivity = + config.ice_check_interval_strong_connectivity.has_value() ? + [NSNumber numberWithInt:*config.ice_check_interval_strong_connectivity] : + nil; + _iceCheckIntervalWeakConnectivity = config.ice_check_interval_weak_connectivity.has_value() ? + [NSNumber numberWithInt:*config.ice_check_interval_weak_connectivity] : + nil; + _iceUnwritableTimeout = config.ice_unwritable_timeout.has_value() ? + [NSNumber numberWithInt:*config.ice_unwritable_timeout] : + nil; + _iceUnwritableMinChecks = config.ice_unwritable_min_checks.has_value() ? + [NSNumber numberWithInt:*config.ice_unwritable_min_checks] : + nil; + _iceInactiveTimeout = config.ice_inactive_timeout.has_value() ? + [NSNumber numberWithInt:*config.ice_inactive_timeout] : + nil; } return self; } @@ -143,7 +166,7 @@ - (instancetype)initWithNativeConfiguration: - (NSString *)description { static NSString *formatString = @"RTC_OBJC_TYPE(RTCConfiguration): " @"{\n%@\n%@\n%@\n%@\n%@\n%@\n%@\n%@\n%d\n%d\n%d\n%d\n%d\n%d\n" - @"%d\n%@\n%d\n%d\n%d\n%d\n%d\n%@\n%d\n}\n"; + @"%d\n%@\n%d\n%d\n%d\n%d\n%d\n%d\n%d\n}\n"; return [NSString stringWithFormat:formatString, @@ -169,8 +192,8 @@ - (NSString *)description { _disableIPV6OnWiFi, _maxIPv6Networks, _activeResetSrtpParams, - _useMediaTransport, - _enableDscp]; + _enableDscp, + _enableImplicitRollback]; } #pragma mark - Private @@ -208,8 +231,6 @@ - (NSString *)description { _iceConnectionReceivingTimeout; nativeConfig->ice_backup_candidate_pair_ping_interval = _iceBackupCandidatePairPingInterval; - nativeConfig->use_media_transport = _useMediaTransport; - nativeConfig->use_media_transport_for_data_channels = _useMediaTransportForDataChannels; rtc::KeyType keyType = [[self class] nativeEncryptionKeyTypeForKeyType:_keyType]; if (_certificate != nullptr) { @@ -265,9 +286,29 @@ - (NSString *)description { _cryptoOptions.sframeRequireFrameEncryption ? true : false; nativeConfig->crypto_options = absl::optional(nativeCryptoOptions); } + nativeConfig->turn_logging_id = [_turnLoggingId UTF8String]; nativeConfig->set_audio_rtcp_report_interval_ms(_rtcpAudioReportIntervalMs); nativeConfig->set_video_rtcp_report_interval_ms(_rtcpVideoReportIntervalMs); nativeConfig->allow_codec_switching = _allowCodecSwitching; + nativeConfig->enable_implicit_rollback = _enableImplicitRollback; + nativeConfig->offer_extmap_allow_mixed = _offerExtmapAllowMixed; + if (_iceCheckIntervalStrongConnectivity != nil) { + nativeConfig->ice_check_interval_strong_connectivity = + absl::optional(_iceCheckIntervalStrongConnectivity.intValue); + } + if (_iceCheckIntervalWeakConnectivity != nil) { + nativeConfig->ice_check_interval_weak_connectivity = + absl::optional(_iceCheckIntervalWeakConnectivity.intValue); + } + if (_iceUnwritableTimeout != nil) { + nativeConfig->ice_unwritable_timeout = absl::optional(_iceUnwritableTimeout.intValue); + } + if (_iceUnwritableMinChecks != nil) { + nativeConfig->ice_unwritable_min_checks = absl::optional(_iceUnwritableMinChecks.intValue); + } + if (_iceInactiveTimeout != nil) { + nativeConfig->ice_inactive_timeout = absl::optional(_iceInactiveTimeout.intValue); + } return nativeConfig.release(); } diff --git a/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm b/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm index f9e4346350..b3e0a7bb67 100644 --- a/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm +++ b/sdk/objc/api/peerconnection/RTCEncodedImage+Private.mm @@ -92,7 +92,6 @@ - (instancetype)initWithNativeEncodedImage:(const webrtc::EncodedImage &)encoded self.encodeFinishMs = encodedImage.timing_.encode_finish_ms; self.frameType = static_cast(encodedImage._frameType); self.rotation = static_cast(encodedImage.rotation_); - self.completeFrame = encodedImage._completeFrame; self.qp = @(encodedImage.qp_); self.contentType = (encodedImage.content_type_ == webrtc::VideoContentType::SCREENSHARE) ? RTCVideoContentTypeScreenshare : @@ -121,7 +120,6 @@ - (instancetype)initWithNativeEncodedImage:(const webrtc::EncodedImage &)encoded encodedImage.timing_.encode_finish_ms = self.encodeFinishMs; encodedImage._frameType = webrtc::VideoFrameType(self.frameType); encodedImage.rotation_ = webrtc::VideoRotation(self.rotation); - encodedImage._completeFrame = self.completeFrame; encodedImage.qp_ = self.qp ? self.qp.intValue : -1; encodedImage.content_type_ = (self.contentType == RTCVideoContentTypeScreenshare) ? webrtc::VideoContentType::SCREENSHARE : diff --git a/sdk/objc/api/peerconnection/RTCFieldTrials.h b/sdk/objc/api/peerconnection/RTCFieldTrials.h index 61443e8bb2..7477ad020f 100644 --- a/sdk/objc/api/peerconnection/RTCFieldTrials.h +++ b/sdk/objc/api/peerconnection/RTCFieldTrials.h @@ -21,6 +21,7 @@ RTC_EXTERN NSString * const kRTCFieldTrialFlexFec03AdvertisedKey; RTC_EXTERN NSString * const kRTCFieldTrialFlexFec03Key; RTC_EXTERN NSString * const kRTCFieldTrialH264HighProfileKey; RTC_EXTERN NSString * const kRTCFieldTrialMinimizeResamplingOnMobileKey; +RTC_EXTERN NSString *const kRTCFieldTrialUseNWPathMonitor; /** The valid value for field trials above. */ RTC_EXTERN NSString * const kRTCFieldTrialEnabledValue; diff --git a/sdk/objc/api/peerconnection/RTCFieldTrials.mm b/sdk/objc/api/peerconnection/RTCFieldTrials.mm index 4a30db2f70..c52dfe4e45 100644 --- a/sdk/objc/api/peerconnection/RTCFieldTrials.mm +++ b/sdk/objc/api/peerconnection/RTCFieldTrials.mm @@ -25,6 +25,7 @@ NSString * const kRTCFieldTrialH264HighProfileKey = @"WebRTC-H264HighProfile"; NSString * const kRTCFieldTrialMinimizeResamplingOnMobileKey = @"WebRTC-Audio-MinimizeResamplingOnMobile"; +NSString *const kRTCFieldTrialUseNWPathMonitor = @"WebRTC-Network-UseNWPathMonitor"; NSString * const kRTCFieldTrialEnabledValue = @"Enabled"; static std::unique_ptr gFieldTrialInitString; diff --git a/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm b/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm index 1ded45d670..cb75f061d8 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnection+DataChannel.mm @@ -23,14 +23,12 @@ @implementation RTC_OBJC_TYPE (RTCPeerConnection) std::string labelString = [NSString stdStringForString:label]; const webrtc::DataChannelInit nativeInit = configuration.nativeDataChannelInit; - rtc::scoped_refptr dataChannel = - self.nativePeerConnection->CreateDataChannel(labelString, - &nativeInit); - if (!dataChannel) { + auto result = self.nativePeerConnection->CreateDataChannelOrError(labelString, &nativeInit); + if (!result.ok()) { return nil; } return [[RTC_OBJC_TYPE(RTCDataChannel) alloc] initWithFactory:self.factory - nativeDataChannel:dataChannel]; + nativeDataChannel:result.MoveValue()]; } @end diff --git a/sdk/objc/api/peerconnection/RTCPeerConnection+Private.h b/sdk/objc/api/peerconnection/RTCPeerConnection+Private.h index 735881025a..558cf07dce 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnection+Private.h +++ b/sdk/objc/api/peerconnection/RTCPeerConnection+Private.h @@ -77,20 +77,21 @@ class PeerConnectionDelegateAdapter : public PeerConnectionObserver { /** Initialize an RTCPeerConnection with a configuration, constraints, and * delegate. */ -- (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory - configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration - constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - delegate:(nullable id)delegate; +- (nullable instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory + configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration + constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints + delegate: + (nullable id)delegate; /** Initialize an RTCPeerConnection with a configuration, constraints, * delegate and PeerConnectionDependencies. */ -- (instancetype)initWithDependencies:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory - configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration - constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - dependencies: - (std::unique_ptr)dependencies - delegate:(nullable id)delegate +- (nullable instancetype) + initWithDependencies:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory + configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration + constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints + dependencies:(std::unique_ptr)dependencies + delegate:(nullable id)delegate NS_DESIGNATED_INITIALIZER; + (webrtc::PeerConnectionInterface::SignalingState)nativeSignalingStateForState: diff --git a/sdk/objc/api/peerconnection/RTCPeerConnection.h b/sdk/objc/api/peerconnection/RTCPeerConnection.h index 38a5dc2a4d..b9605819f1 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnection.h +++ b/sdk/objc/api/peerconnection/RTCPeerConnection.h @@ -81,6 +81,12 @@ typedef NS_ENUM(NSInteger, RTCStatsOutputLevel) { RTCStatsOutputLevelDebug, }; +typedef void (^RTCCreateSessionDescriptionCompletionHandler)(RTC_OBJC_TYPE(RTCSessionDescription) * + _Nullable sdp, + NSError *_Nullable error); + +typedef void (^RTCSetSessionDescriptionCompletionHandler)(NSError *_Nullable error); + @class RTC_OBJC_TYPE(RTCPeerConnection); RTC_OBJC_EXPORT @@ -213,7 +219,12 @@ RTC_OBJC_EXPORT - (void)close; /** Provide a remote candidate to the ICE Agent. */ -- (void)addIceCandidate:(RTC_OBJC_TYPE(RTCIceCandidate) *)candidate; +- (void)addIceCandidate:(RTC_OBJC_TYPE(RTCIceCandidate) *)candidate + DEPRECATED_MSG_ATTRIBUTE("Please use addIceCandidate:completionHandler: instead"); + +/** Provide a remote candidate to the ICE Agent. */ +- (void)addIceCandidate:(RTC_OBJC_TYPE(RTCIceCandidate) *)candidate + completionHandler:(void (^)(NSError *_Nullable error))completionHandler; /** Remove a group of remote candidates from the ICE Agent. */ - (void)removeIceCandidates:(NSArray *)candidates; @@ -238,8 +249,8 @@ RTC_OBJC_EXPORT * - A sender already exists for the track. * - The peer connection is closed. */ -- (RTC_OBJC_TYPE(RTCRtpSender) *)addTrack:(RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track - streamIds:(NSArray *)streamIds; +- (nullable RTC_OBJC_TYPE(RTCRtpSender) *)addTrack:(RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track + streamIds:(NSArray *)streamIds; /** With PlanB semantics, removes an RTCRtpSender from this peer connection. * @@ -271,38 +282,46 @@ RTC_OBJC_EXPORT * of the transceiver (and sender/receiver) will be derived from the kind of * the track. */ -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverWithTrack: +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverWithTrack: (RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track; -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *) +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *) addTransceiverWithTrack:(RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track init:(RTC_OBJC_TYPE(RTCRtpTransceiverInit) *)init; /** Adds a transceiver with the given kind. Can either be RTCRtpMediaTypeAudio * or RTCRtpMediaTypeVideo. */ -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverOfType:(RTCRtpMediaType)mediaType; -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverOfType:(RTCRtpMediaType)mediaType - init:(RTC_OBJC_TYPE(RTCRtpTransceiverInit) *) - init; +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverOfType:(RTCRtpMediaType)mediaType; +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *) + addTransceiverOfType:(RTCRtpMediaType)mediaType + init:(RTC_OBJC_TYPE(RTCRtpTransceiverInit) *)init; + +/** Tells the PeerConnection that ICE should be restarted. This triggers a need + * for negotiation and subsequent offerForConstraints:completionHandler call will act as if + * RTCOfferAnswerOptions::ice_restart is true. + */ +- (void)restartIce; /** Generate an SDP offer. */ - (void)offerForConstraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - completionHandler:(nullable void (^)(RTC_OBJC_TYPE(RTCSessionDescription) * _Nullable sdp, - NSError *_Nullable error))completionHandler; + completionHandler:(RTCCreateSessionDescriptionCompletionHandler)completionHandler; /** Generate an SDP answer. */ - (void)answerForConstraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - completionHandler: - (nullable void (^)(RTC_OBJC_TYPE(RTCSessionDescription) * _Nullable sdp, - NSError *_Nullable error))completionHandler; + completionHandler:(RTCCreateSessionDescriptionCompletionHandler)completionHandler; /** Apply the supplied RTCSessionDescription as the local description. */ - (void)setLocalDescription:(RTC_OBJC_TYPE(RTCSessionDescription) *)sdp - completionHandler:(nullable void (^)(NSError *_Nullable error))completionHandler; + completionHandler:(RTCSetSessionDescriptionCompletionHandler)completionHandler; + +/** Creates an offer or answer (depending on current signaling state) and sets + * it as the local session description. */ +- (void)setLocalDescriptionWithCompletionHandler: + (RTCSetSessionDescriptionCompletionHandler)completionHandler; /** Apply the supplied RTCSessionDescription as the remote description. */ - (void)setRemoteDescription:(RTC_OBJC_TYPE(RTCSessionDescription) *)sdp - completionHandler:(nullable void (^)(NSError *_Nullable error))completionHandler; + completionHandler:(RTCSetSessionDescriptionCompletionHandler)completionHandler; /** Limits the bandwidth allocated for all RTP streams sent by this * PeerConnection. Nil parameters will be unchanged. Setting diff --git a/sdk/objc/api/peerconnection/RTCPeerConnection.mm b/sdk/objc/api/peerconnection/RTCPeerConnection.mm index fa68d08e74..67d0ff0cd6 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnection.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnection.mm @@ -29,13 +29,51 @@ #include "api/jsep_ice_candidate.h" #include "api/rtc_event_log_output_file.h" -#include "api/transport/media/media_transport_interface.h" +#include "api/set_local_description_observer_interface.h" +#include "api/set_remote_description_observer_interface.h" #include "rtc_base/checks.h" #include "rtc_base/numerics/safe_conversions.h" NSString *const kRTCPeerConnectionErrorDomain = @"org.webrtc.RTC_OBJC_TYPE(RTCPeerConnection)"; int const kRTCPeerConnnectionSessionDescriptionError = -1; +namespace { + +class SetSessionDescriptionObserver : public webrtc::SetLocalDescriptionObserverInterface, + public webrtc::SetRemoteDescriptionObserverInterface { + public: + SetSessionDescriptionObserver(RTCSetSessionDescriptionCompletionHandler completionHandler) { + completion_handler_ = completionHandler; + } + + virtual void OnSetLocalDescriptionComplete(webrtc::RTCError error) override { + OnCompelete(error); + } + + virtual void OnSetRemoteDescriptionComplete(webrtc::RTCError error) override { + OnCompelete(error); + } + + private: + void OnCompelete(webrtc::RTCError error) { + RTC_DCHECK(completion_handler_ != nil); + if (error.ok()) { + completion_handler_(nil); + } else { + // TODO(hta): Add handling of error.type() + NSString *str = [NSString stringForStdString:error.message()]; + NSError *err = [NSError errorWithDomain:kRTCPeerConnectionErrorDomain + code:kRTCPeerConnnectionSessionDescriptionError + userInfo:@{NSLocalizedDescriptionKey : str}]; + completion_handler_(err); + } + completion_handler_ = nil; + } + RTCSetSessionDescriptionCompletionHandler completion_handler_; +}; + +} // anonymous namespace + namespace webrtc { class CreateSessionDescriptionObserverAdapter @@ -75,38 +113,6 @@ void OnFailure(RTCError error) override { NSError *error); }; -class SetSessionDescriptionObserverAdapter : - public SetSessionDescriptionObserver { - public: - SetSessionDescriptionObserverAdapter(void (^completionHandler) - (NSError *error)) { - completion_handler_ = completionHandler; - } - - ~SetSessionDescriptionObserverAdapter() override { completion_handler_ = nil; } - - void OnSuccess() override { - RTC_DCHECK(completion_handler_); - completion_handler_(nil); - completion_handler_ = nil; - } - - void OnFailure(RTCError error) override { - RTC_DCHECK(completion_handler_); - // TODO(hta): Add handling of error.type() - NSString *str = [NSString stringForStdString:error.message()]; - NSError* err = - [NSError errorWithDomain:kRTCPeerConnectionErrorDomain - code:kRTCPeerConnnectionSessionDescriptionError - userInfo:@{ NSLocalizedDescriptionKey : str }]; - completion_handler_(err); - completion_handler_ = nil; - } - - private: - void (^completion_handler_)(NSError *error); -}; - PeerConnectionDelegateAdapter::PeerConnectionDelegateAdapter(RTC_OBJC_TYPE(RTCPeerConnection) * peerConnection) { peer_connection_ = peerConnection; @@ -308,10 +314,10 @@ @implementation RTC_OBJC_TYPE (RTCPeerConnection) { @synthesize delegate = _delegate; @synthesize factory = _factory; -- (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory - configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration - constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - delegate:(id)delegate { +- (nullable instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory + configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration + constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints + delegate:(id)delegate { NSParameterAssert(factory); std::unique_ptr dependencies = std::make_unique(nullptr); @@ -322,12 +328,12 @@ - (instancetype)initWithFactory:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)facto delegate:delegate]; } -- (instancetype)initWithDependencies:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory - configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration - constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - dependencies: - (std::unique_ptr)dependencies - delegate:(id)delegate { +- (nullable instancetype) + initWithDependencies:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *)factory + configuration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration + constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints + dependencies:(std::unique_ptr)dependencies + delegate:(id)delegate { NSParameterAssert(factory); NSParameterAssert(dependencies.get()); std::unique_ptr config( @@ -342,11 +348,12 @@ - (instancetype)initWithDependencies:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *) webrtc::PeerConnectionDependencies deps = std::move(*dependencies.release()); deps.observer = _observer.get(); - _peerConnection = factory.nativeFactory->CreatePeerConnection(*config, std::move(deps)); + auto result = factory.nativeFactory->CreatePeerConnectionOrError(*config, std::move(deps)); - if (!_peerConnection) { + if (!result.ok()) { return nil; } + _peerConnection = result.MoveValue(); _factory = factory; _localStreams = [[NSMutableArray alloc] init]; _delegate = delegate; @@ -359,19 +366,27 @@ - (instancetype)initWithDependencies:(RTC_OBJC_TYPE(RTCPeerConnectionFactory) *) } - (RTC_OBJC_TYPE(RTCSessionDescription) *)localDescription { - const webrtc::SessionDescriptionInterface *description = - _peerConnection->local_description(); - return description ? - [[RTC_OBJC_TYPE(RTCSessionDescription) alloc] initWithNativeDescription:description] : - nil; + // It's only safe to operate on SessionDescriptionInterface on the signaling thread. + return _peerConnection->signaling_thread()->Invoke( + RTC_FROM_HERE, [self] { + const webrtc::SessionDescriptionInterface *description = + _peerConnection->local_description(); + return description ? + [[RTC_OBJC_TYPE(RTCSessionDescription) alloc] initWithNativeDescription:description] : + nil; + }); } - (RTC_OBJC_TYPE(RTCSessionDescription) *)remoteDescription { - const webrtc::SessionDescriptionInterface *description = - _peerConnection->remote_description(); - return description ? - [[RTC_OBJC_TYPE(RTCSessionDescription) alloc] initWithNativeDescription:description] : - nil; + // It's only safe to operate on SessionDescriptionInterface on the signaling thread. + return _peerConnection->signaling_thread()->Invoke( + RTC_FROM_HERE, [self] { + const webrtc::SessionDescriptionInterface *description = + _peerConnection->remote_description(); + return description ? + [[RTC_OBJC_TYPE(RTCSessionDescription) alloc] initWithNativeDescription:description] : + nil; + }); } - (RTCSignalingState)signalingState { @@ -419,7 +434,22 @@ - (void)addIceCandidate:(RTC_OBJC_TYPE(RTCIceCandidate) *)candidate { candidate.nativeCandidate); _peerConnection->AddIceCandidate(iceCandidate.get()); } - +- (void)addIceCandidate:(RTC_OBJC_TYPE(RTCIceCandidate) *)candidate + completionHandler:(void (^)(NSError *_Nullable error))completionHandler { + RTC_DCHECK(completionHandler != nil); + _peerConnection->AddIceCandidate( + candidate.nativeCandidate, [completionHandler](const auto &error) { + if (error.ok()) { + completionHandler(nil); + } else { + NSString *str = [NSString stringForStdString:error.message()]; + NSError *err = [NSError errorWithDomain:kRTCPeerConnectionErrorDomain + code:static_cast(error.type()) + userInfo:@{NSLocalizedDescriptionKey : str}]; + completionHandler(err); + } + }); +} - (void)removeIceCandidates:(NSArray *)iceCandidates { std::vector candidates; for (RTC_OBJC_TYPE(RTCIceCandidate) * iceCandidate in iceCandidates) { @@ -449,8 +479,8 @@ - (void)removeStream:(RTC_OBJC_TYPE(RTCMediaStream) *)stream { [_localStreams removeObject:stream]; } -- (RTC_OBJC_TYPE(RTCRtpSender) *)addTrack:(RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track - streamIds:(NSArray *)streamIds { +- (nullable RTC_OBJC_TYPE(RTCRtpSender) *)addTrack:(RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track + streamIds:(NSArray *)streamIds { std::vector nativeStreamIds; for (NSString *streamId in streamIds) { nativeStreamIds.push_back([streamId UTF8String]); @@ -473,13 +503,13 @@ - (BOOL)removeTrack:(RTC_OBJC_TYPE(RTCRtpSender) *)sender { return result; } -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverWithTrack: +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverWithTrack: (RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track { return [self addTransceiverWithTrack:track init:[[RTC_OBJC_TYPE(RTCRtpTransceiverInit) alloc] init]]; } -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *) +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *) addTransceiverWithTrack:(RTC_OBJC_TYPE(RTCMediaStreamTrack) *)track init:(RTC_OBJC_TYPE(RTCRtpTransceiverInit) *)init { webrtc::RTCErrorOr> nativeTransceiverOrError = @@ -494,14 +524,14 @@ - (BOOL)removeTrack:(RTC_OBJC_TYPE(RTCRtpSender) *)sender { nativeRtpTransceiver:nativeTransceiverOrError.MoveValue()]; } -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverOfType:(RTCRtpMediaType)mediaType { +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverOfType:(RTCRtpMediaType)mediaType { return [self addTransceiverOfType:mediaType init:[[RTC_OBJC_TYPE(RTCRtpTransceiverInit) alloc] init]]; } -- (RTC_OBJC_TYPE(RTCRtpTransceiver) *)addTransceiverOfType:(RTCRtpMediaType)mediaType - init:(RTC_OBJC_TYPE(RTCRtpTransceiverInit) *) - init { +- (nullable RTC_OBJC_TYPE(RTCRtpTransceiver) *) + addTransceiverOfType:(RTCRtpMediaType)mediaType + init:(RTC_OBJC_TYPE(RTCRtpTransceiverInit) *)init { webrtc::RTCErrorOr> nativeTransceiverOrError = _peerConnection->AddTransceiver( [RTC_OBJC_TYPE(RTCRtpReceiver) nativeMediaTypeForMediaType:mediaType], init.nativeInit); @@ -516,9 +546,13 @@ - (BOOL)removeTrack:(RTC_OBJC_TYPE(RTCRtpSender) *)sender { nativeRtpTransceiver:nativeTransceiverOrError.MoveValue()]; } +- (void)restartIce { + _peerConnection->RestartIce(); +} + - (void)offerForConstraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - completionHandler:(void (^)(RTC_OBJC_TYPE(RTCSessionDescription) * sessionDescription, - NSError *error))completionHandler { + completionHandler:(RTCCreateSessionDescriptionCompletionHandler)completionHandler { + RTC_DCHECK(completionHandler != nil); rtc::scoped_refptr observer(new rtc::RefCountedObject (completionHandler)); @@ -529,8 +563,8 @@ - (void)offerForConstraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints } - (void)answerForConstraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints - completionHandler:(void (^)(RTC_OBJC_TYPE(RTCSessionDescription) * sessionDescription, - NSError *error))completionHandler { + completionHandler:(RTCCreateSessionDescriptionCompletionHandler)completionHandler { + RTC_DCHECK(completionHandler != nil); rtc::scoped_refptr observer(new rtc::RefCountedObject (completionHandler)); @@ -541,30 +575,38 @@ - (void)answerForConstraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints } - (void)setLocalDescription:(RTC_OBJC_TYPE(RTCSessionDescription) *)sdp - completionHandler:(void (^)(NSError *error))completionHandler { - rtc::scoped_refptr observer( - new rtc::RefCountedObject( - completionHandler)); - _peerConnection->SetLocalDescription(observer, sdp.nativeDescription); + completionHandler:(RTCSetSessionDescriptionCompletionHandler)completionHandler { + RTC_DCHECK(completionHandler != nil); + rtc::scoped_refptr observer( + new rtc::RefCountedObject<::SetSessionDescriptionObserver>(completionHandler)); + _peerConnection->SetLocalDescription(sdp.nativeDescription, observer); +} + +- (void)setLocalDescriptionWithCompletionHandler: + (RTCSetSessionDescriptionCompletionHandler)completionHandler { + RTC_DCHECK(completionHandler != nil); + rtc::scoped_refptr observer( + new rtc::RefCountedObject<::SetSessionDescriptionObserver>(completionHandler)); + _peerConnection->SetLocalDescription(observer); } - (void)setRemoteDescription:(RTC_OBJC_TYPE(RTCSessionDescription) *)sdp - completionHandler:(void (^)(NSError *error))completionHandler { - rtc::scoped_refptr observer( - new rtc::RefCountedObject( - completionHandler)); - _peerConnection->SetRemoteDescription(observer, sdp.nativeDescription); + completionHandler:(RTCSetSessionDescriptionCompletionHandler)completionHandler { + RTC_DCHECK(completionHandler != nil); + rtc::scoped_refptr observer( + new rtc::RefCountedObject<::SetSessionDescriptionObserver>(completionHandler)); + _peerConnection->SetRemoteDescription(sdp.nativeDescription, observer); } - (BOOL)setBweMinBitrateBps:(nullable NSNumber *)minBitrateBps currentBitrateBps:(nullable NSNumber *)currentBitrateBps maxBitrateBps:(nullable NSNumber *)maxBitrateBps { - webrtc::PeerConnectionInterface::BitrateParameters params; + webrtc::BitrateSettings params; if (minBitrateBps != nil) { params.min_bitrate_bps = absl::optional(minBitrateBps.intValue); } if (currentBitrateBps != nil) { - params.current_bitrate_bps = absl::optional(currentBitrateBps.intValue); + params.start_bitrate_bps = absl::optional(currentBitrateBps.intValue); } if (maxBitrateBps != nil) { params.max_bitrate_bps = absl::optional(maxBitrateBps.intValue); diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory+Native.h b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory+Native.h index 46bf1baf69..b39bb437b5 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory+Native.h +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory+Native.h @@ -17,7 +17,6 @@ namespace webrtc { class AudioDeviceModule; class AudioEncoderFactory; class AudioDecoderFactory; -class MediaTransportFactory; class NetworkControllerFactoryInterface; class VideoEncoderFactory; class VideoDecoderFactory; @@ -53,21 +52,6 @@ NS_ASSUME_NONNULL_BEGIN audioProcessingModule: (rtc::scoped_refptr)audioProcessingModule; -- (instancetype) - initWithNativeAudioEncoderFactory: - (rtc::scoped_refptr)audioEncoderFactory - nativeAudioDecoderFactory: - (rtc::scoped_refptr)audioDecoderFactory - nativeVideoEncoderFactory: - (std::unique_ptr)videoEncoderFactory - nativeVideoDecoderFactory: - (std::unique_ptr)videoDecoderFactory - audioDeviceModule:(nullable webrtc::AudioDeviceModule *)audioDeviceModule - audioProcessingModule: - (rtc::scoped_refptr)audioProcessingModule - mediaTransportFactory: - (std::unique_ptr)mediaTransportFactory; - - (instancetype) initWithNativeAudioEncoderFactory: (rtc::scoped_refptr)audioEncoderFactory @@ -81,20 +65,17 @@ NS_ASSUME_NONNULL_BEGIN audioProcessingModule: (rtc::scoped_refptr)audioProcessingModule networkControllerFactory:(std::unique_ptr) - networkControllerFactory - mediaTransportFactory: - (std::unique_ptr)mediaTransportFactory; + networkControllerFactory; - (instancetype) initWithEncoderFactory:(nullable id)encoderFactory decoderFactory:(nullable id)decoderFactory - audioSink:(nullable RTC_OBJC_TYPE(RTCAudioSink) *)audioSink - mediaTransportFactory:(std::unique_ptr)mediaTransportFactory; + audioSink:(nullable RTC_OBJC_TYPE(RTCAudioSink) *)audioSink; /** Initialize an RTCPeerConnection with a configuration, constraints, and * dependencies. */ -- (RTC_OBJC_TYPE(RTCPeerConnection) *) +- (nullable RTC_OBJC_TYPE(RTCPeerConnection) *) peerConnectionWithDependencies:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints dependencies:(std::unique_ptr)dependencies diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h index a234d3dc09..7e5a6669df 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.h @@ -77,7 +77,7 @@ RTC_OBJC_EXPORT /** Initialize an RTCPeerConnection with a configuration, constraints, and * delegate. */ -- (RTC_OBJC_TYPE(RTCPeerConnection) *) +- (nullable RTC_OBJC_TYPE(RTCPeerConnection) *) peerConnectionWithConfiguration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints delegate:(nullable id)delegate; diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm index 8869f87c12..0698205d04 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactory.mm @@ -25,6 +25,9 @@ #import "base/RTCVideoDecoderFactory.h" #import "base/RTCVideoEncoderFactory.h" #import "helpers/NSString+StdString.h" +#include "sdk/objc/native/api/network_monitor_factory.h" +#include "system_wrappers/include/field_trial.h" + #ifndef HAVE_NO_MEDIA #import "components/video_codec/RTCVideoDecoderFactoryH264.h" #import "components/video_codec/RTCVideoEncoderFactoryH264.h" @@ -34,6 +37,7 @@ #include "api/audio_codecs/builtin_audio_encoder_factory.h" // nogncheck #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/task_queue/default_task_queue_factory.h" +#include "api/transport/field_trial_based_config.h" #include "modules/audio_device/include/audio_device.h" // nogncheck #include "modules/audio_processing/include/audio_processing.h" // nogncheck @@ -55,7 +59,6 @@ // C++ target. // TODO(zhihuang): Remove nogncheck once MediaEngineInterface is moved to C++ // API layer. -#include "api/transport/media/media_transport_interface.h" #include "media/engine/webrtc_media_engine.h" // nogncheck @implementation RTC_OBJC_TYPE (RTCPeerConnectionFactory) { @@ -80,7 +83,7 @@ @implementation RTC_OBJC_TYPE (RTCPeerConnectionFactory) { #if defined(WEBRTC_IOS) RTCLogInfo(@"Creating AudioDeviceModule with AudioSourceSink"); webrtc::AudioSourceSink *sink = new webrtc::AudioSourceSink(audioSink); - return webrtc::CreateAudioDeviceModule(sink); + return webrtc::CreateAudioDeviceModule(false, sink); #else return nullptr; #endif @@ -98,16 +101,14 @@ - (instancetype)init { nativeVideoDecoderFactory:webrtc::ObjCToNativeVideoDecoderFactory([[RTC_OBJC_TYPE( RTCVideoDecoderFactoryH264) alloc] init]) audioDeviceModule:[self audioDeviceModule] - audioProcessingModule:nullptr - mediaTransportFactory:nullptr]; + audioProcessingModule:nullptr]; #endif } - (instancetype) initWithEncoderFactory:(nullable id)encoderFactory decoderFactory:(nullable id)decoderFactory - audioSink:(nullable RTC_OBJC_TYPE(RTCAudioSink) *)audioSink - mediaTransportFactory:(std::unique_ptr)mediaTransportFactory { + audioSink:(nullable RTC_OBJC_TYPE(RTCAudioSink) *)audioSink { #ifdef HAVE_NO_MEDIA return [self initWithNoMedia]; #else @@ -124,8 +125,7 @@ - (instancetype)init { nativeVideoEncoderFactory:std::move(native_encoder_factory) nativeVideoDecoderFactory:std::move(native_decoder_factory) audioDeviceModule:[self audioDeviceModuleWithAudioSink:audioSink] - audioProcessingModule:nullptr - mediaTransportFactory:std::move(mediaTransportFactory)]; + audioProcessingModule:nullptr]; #endif } @@ -134,18 +134,7 @@ - (instancetype)init { decoderFactory:(nullable id)decoderFactory { return [self initWithEncoderFactory:encoderFactory decoderFactory:decoderFactory - audioSink:nullptr - mediaTransportFactory:nullptr]; -} - -- (instancetype) - initWithEncoderFactory:(nullable id)encoderFactory - decoderFactory:(nullable id)decoderFactory - audioSink:(nullable RTC_OBJC_TYPE(RTCAudioSink) *)audioSink { - return [self initWithEncoderFactory:encoderFactory - decoderFactory:decoderFactory - audioSink:audioSink - mediaTransportFactory:nullptr]; + audioSink:nullptr]; } - (instancetype)initNative { @@ -174,6 +163,9 @@ - (instancetype)initWithNoMedia { dependencies.network_thread = _networkThread.get(); dependencies.worker_thread = _workerThread.get(); dependencies.signaling_thread = _signalingThread.get(); + if (webrtc::field_trial::IsEnabled("WebRTC-Network-UseNWPathMonitor")) { + dependencies.network_monitor_factory = webrtc::CreateNetworkMonitorFactory(); + } _nativeFactory = webrtc::CreateModularPeerConnectionFactory(std::move(dependencies)); NSAssert(_nativeFactory, @"Failed to initialize PeerConnectionFactory!"); } @@ -197,30 +189,7 @@ - (instancetype)initWithNativeAudioEncoderFactory: nativeVideoDecoderFactory:std::move(videoDecoderFactory) audioDeviceModule:audioDeviceModule audioProcessingModule:audioProcessingModule - mediaTransportFactory:nullptr]; -} - -- (instancetype)initWithNativeAudioEncoderFactory: - (rtc::scoped_refptr)audioEncoderFactory - nativeAudioDecoderFactory: - (rtc::scoped_refptr)audioDecoderFactory - nativeVideoEncoderFactory: - (std::unique_ptr)videoEncoderFactory - nativeVideoDecoderFactory: - (std::unique_ptr)videoDecoderFactory - audioDeviceModule:(webrtc::AudioDeviceModule *)audioDeviceModule - audioProcessingModule: - (rtc::scoped_refptr)audioProcessingModule - mediaTransportFactory:(std::unique_ptr) - mediaTransportFactory { - return [self initWithNativeAudioEncoderFactory:audioEncoderFactory - nativeAudioDecoderFactory:audioDecoderFactory - nativeVideoEncoderFactory:std::move(videoEncoderFactory) - nativeVideoDecoderFactory:std::move(videoDecoderFactory) - audioDeviceModule:audioDeviceModule - audioProcessingModule:audioProcessingModule - networkControllerFactory:nullptr - mediaTransportFactory:std::move(mediaTransportFactory)]; + networkControllerFactory:nullptr]; } - (instancetype)initWithNativeAudioEncoderFactory: (rtc::scoped_refptr)audioEncoderFactory @@ -235,16 +204,18 @@ - (instancetype)initWithNativeAudioEncoderFactory: (rtc::scoped_refptr)audioProcessingModule networkControllerFactory: (std::unique_ptr) - networkControllerFactory - mediaTransportFactory:(std::unique_ptr) - mediaTransportFactory { + networkControllerFactory { if (self = [self initNative]) { webrtc::PeerConnectionFactoryDependencies dependencies; dependencies.network_thread = _networkThread.get(); dependencies.worker_thread = _workerThread.get(); dependencies.signaling_thread = _signalingThread.get(); + if (webrtc::field_trial::IsEnabled("WebRTC-Network-UseNWPathMonitor")) { + dependencies.network_monitor_factory = webrtc::CreateNetworkMonitorFactory(); + } #ifndef HAVE_NO_MEDIA dependencies.task_queue_factory = webrtc::CreateDefaultTaskQueueFactory(); + dependencies.trials = std::make_unique(); cricket::MediaEngineDependencies media_deps; media_deps.adm = std::move(audioDeviceModule); media_deps.task_queue_factory = dependencies.task_queue_factory.get(); @@ -257,12 +228,12 @@ - (instancetype)initWithNativeAudioEncoderFactory: } else { media_deps.audio_processing = webrtc::AudioProcessingBuilder().Create(); } + media_deps.trials = dependencies.trials.get(); dependencies.media_engine = cricket::CreateMediaEngine(std::move(media_deps)); dependencies.call_factory = webrtc::CreateCallFactory(); dependencies.event_log_factory = std::make_unique(dependencies.task_queue_factory.get()); dependencies.network_controller_factory = std::move(networkControllerFactory); - dependencies.media_transport_factory = std::move(mediaTransportFactory); #endif _nativeFactory = webrtc::CreateModularPeerConnectionFactory(std::move(dependencies)); NSAssert(_nativeFactory, @"Failed to initialize PeerConnectionFactory!"); @@ -309,7 +280,7 @@ - (instancetype)initWithNativeAudioEncoderFactory: return [[RTC_OBJC_TYPE(RTCMediaStream) alloc] initWithFactory:self streamId:streamId]; } -- (RTC_OBJC_TYPE(RTCPeerConnection) *) +- (nullable RTC_OBJC_TYPE(RTCPeerConnection) *) peerConnectionWithConfiguration:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints delegate: @@ -320,7 +291,7 @@ - (instancetype)initWithNativeAudioEncoderFactory: delegate:delegate]; } -- (RTC_OBJC_TYPE(RTCPeerConnection) *) +- (nullable RTC_OBJC_TYPE(RTCPeerConnection) *) peerConnectionWithDependencies:(RTC_OBJC_TYPE(RTCConfiguration) *)configuration constraints:(RTC_OBJC_TYPE(RTCMediaConstraints) *)constraints dependencies:(std::unique_ptr)dependencies diff --git a/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder.mm b/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder.mm index 8f52bea8e3..991ec5a41c 100644 --- a/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder.mm +++ b/sdk/objc/api/peerconnection/RTCPeerConnectionFactoryBuilder.mm @@ -13,7 +13,6 @@ #include "api/audio_codecs/audio_decoder_factory.h" #include "api/audio_codecs/audio_encoder_factory.h" -#include "api/transport/media/media_transport_interface.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" #include "modules/audio_device/include/audio_device.h" @@ -26,7 +25,6 @@ @implementation RTCPeerConnectionFactoryBuilder { rtc::scoped_refptr _audioDecoderFactory; rtc::scoped_refptr _audioDeviceModule; rtc::scoped_refptr _audioProcessingModule; - std::unique_ptr _mediaTransportFactory; } + (RTCPeerConnectionFactoryBuilder *)builder { @@ -41,8 +39,7 @@ + (RTCPeerConnectionFactoryBuilder *)builder { nativeVideoEncoderFactory:std::move(_videoEncoderFactory) nativeVideoDecoderFactory:std::move(_videoDecoderFactory) audioDeviceModule:_audioDeviceModule - audioProcessingModule:_audioProcessingModule - mediaTransportFactory:std::move(_mediaTransportFactory)]; + audioProcessingModule:_audioProcessingModule]; } - (void)setVideoEncoderFactory:(std::unique_ptr)videoEncoderFactory { diff --git a/sdk/objc/api/peerconnection/RTCRtcpParameters+Private.h b/sdk/objc/api/peerconnection/RTCRtcpParameters+Private.h index 94c1f92956..c4d196cf79 100644 --- a/sdk/objc/api/peerconnection/RTCRtcpParameters+Private.h +++ b/sdk/objc/api/peerconnection/RTCRtcpParameters+Private.h @@ -21,7 +21,8 @@ NS_ASSUME_NONNULL_BEGIN @property(nonatomic, readonly) webrtc::RtcpParameters nativeParameters; /** Initialize the object with a native RtcpParameters structure. */ -- (instancetype)initWithNativeParameters:(const webrtc::RtcpParameters &)nativeParameters; +- (instancetype)initWithNativeParameters:(const webrtc::RtcpParameters &)nativeParameters + NS_DESIGNATED_INITIALIZER; @end diff --git a/sdk/objc/api/peerconnection/RTCRtcpParameters.h b/sdk/objc/api/peerconnection/RTCRtcpParameters.h index 1bbaedcf7e..2f7aad3aef 100644 --- a/sdk/objc/api/peerconnection/RTCRtcpParameters.h +++ b/sdk/objc/api/peerconnection/RTCRtcpParameters.h @@ -23,7 +23,7 @@ RTC_OBJC_EXPORT /** Whether reduced size RTCP is configured or compound RTCP. */ @property(nonatomic, assign) BOOL isReducedSize; -- (instancetype)init NS_DESIGNATED_INITIALIZER; +- (instancetype)init; @end diff --git a/sdk/objc/api/peerconnection/RTCRtcpParameters.mm b/sdk/objc/api/peerconnection/RTCRtcpParameters.mm index 4d6084b90d..e92ee4b3e7 100644 --- a/sdk/objc/api/peerconnection/RTCRtcpParameters.mm +++ b/sdk/objc/api/peerconnection/RTCRtcpParameters.mm @@ -18,11 +18,12 @@ @implementation RTC_OBJC_TYPE (RTCRtcpParameters) @synthesize isReducedSize = _isReducedSize; - (instancetype)init { - return [super init]; + webrtc::RtcpParameters nativeParameters; + return [self initWithNativeParameters:nativeParameters]; } - (instancetype)initWithNativeParameters:(const webrtc::RtcpParameters &)nativeParameters { - if (self = [self init]) { + if (self = [super init]) { _cname = [NSString stringForStdString:nativeParameters.cname]; _isReducedSize = nativeParameters.reduced_size; } diff --git a/sdk/objc/api/peerconnection/RTCRtpCodecParameters+Private.h b/sdk/objc/api/peerconnection/RTCRtpCodecParameters+Private.h index 7833068837..ff23cfd642 100644 --- a/sdk/objc/api/peerconnection/RTCRtpCodecParameters+Private.h +++ b/sdk/objc/api/peerconnection/RTCRtpCodecParameters+Private.h @@ -21,7 +21,8 @@ NS_ASSUME_NONNULL_BEGIN @property(nonatomic, readonly) webrtc::RtpCodecParameters nativeParameters; /** Initialize the object with a native RtpCodecParameters structure. */ -- (instancetype)initWithNativeParameters:(const webrtc::RtpCodecParameters &)nativeParameters; +- (instancetype)initWithNativeParameters:(const webrtc::RtpCodecParameters &)nativeParameters + NS_DESIGNATED_INITIALIZER; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h b/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h index a68d9eb873..6135223720 100644 --- a/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h +++ b/sdk/objc/api/peerconnection/RTCRtpCodecParameters.h @@ -31,7 +31,7 @@ RTC_EXTERN const NSString *const kRTCVp8CodecName; RTC_EXTERN const NSString *const kRTCVp9CodecName; RTC_EXTERN const NSString *const kRTCH264CodecName; -/** Defined in http://w3c.github.io/webrtc-pc/#idl-def-RTC_OBJC_TYPE(RTCRtpCodecParameters) */ +/** Defined in https://www.w3.org/TR/webrtc/#idl-def-rtcrtpcodecparameters */ RTC_OBJC_EXPORT @interface RTC_OBJC_TYPE (RTCRtpCodecParameters) : NSObject @@ -66,7 +66,7 @@ RTC_OBJC_EXPORT /** The "format specific parameters" field from the "a=fmtp" line in the SDP */ @property(nonatomic, readonly, nonnull) NSDictionary *parameters; -- (instancetype)init NS_DESIGNATED_INITIALIZER; +- (instancetype)init; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpCodecParameters.mm b/sdk/objc/api/peerconnection/RTCRtpCodecParameters.mm index f61b93cce2..b48d51f9ca 100644 --- a/sdk/objc/api/peerconnection/RTCRtpCodecParameters.mm +++ b/sdk/objc/api/peerconnection/RTCRtpCodecParameters.mm @@ -44,12 +44,13 @@ @implementation RTC_OBJC_TYPE (RTCRtpCodecParameters) @synthesize parameters = _parameters; - (instancetype)init { - return [super init]; + webrtc::RtpCodecParameters nativeParameters; + return [self initWithNativeParameters:nativeParameters]; } - (instancetype)initWithNativeParameters: (const webrtc::RtpCodecParameters &)nativeParameters { - if (self = [self init]) { + if (self = [super init]) { _payloadType = nativeParameters.payload_type; _name = [NSString stringForStdString:nativeParameters.name]; switch (nativeParameters.kind) { @@ -62,6 +63,9 @@ - (instancetype)initWithNativeParameters: case cricket::MEDIA_TYPE_DATA: RTC_NOTREACHED(); break; + case cricket::MEDIA_TYPE_UNSUPPORTED: + RTC_NOTREACHED(); + break; } if (nativeParameters.clock_rate) { _clockRate = [NSNumber numberWithInt:*nativeParameters.clock_rate]; diff --git a/sdk/objc/api/peerconnection/RTCRtpEncodingParameters+Private.h b/sdk/objc/api/peerconnection/RTCRtpEncodingParameters+Private.h index 074c9b175b..d12ca624e3 100644 --- a/sdk/objc/api/peerconnection/RTCRtpEncodingParameters+Private.h +++ b/sdk/objc/api/peerconnection/RTCRtpEncodingParameters+Private.h @@ -21,7 +21,8 @@ NS_ASSUME_NONNULL_BEGIN @property(nonatomic, readonly) webrtc::RtpEncodingParameters nativeParameters; /** Initialize the object with a native RtpEncodingParameters structure. */ -- (instancetype)initWithNativeParameters:(const webrtc::RtpEncodingParameters &)nativeParameters; +- (instancetype)initWithNativeParameters:(const webrtc::RtpEncodingParameters &)nativeParameters + NS_DESIGNATED_INITIALIZER; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.h b/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.h index facd7e5129..07f6b7a39c 100644 --- a/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.h +++ b/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.h @@ -65,7 +65,11 @@ RTC_OBJC_EXPORT /** The relative DiffServ Code Point priority. */ @property(nonatomic, assign) RTCPriority networkPriority; -- (instancetype)init NS_DESIGNATED_INITIALIZER; +/** Allow dynamic frame length changes for audio: + https://w3c.github.io/webrtc-extensions/#dom-rtcrtpencodingparameters-adaptiveptime */ +@property(nonatomic, assign) BOOL adaptiveAudioPacketTime; + +- (instancetype)init; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.mm b/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.mm index eec6ce4015..d6087dafb0 100644 --- a/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.mm +++ b/sdk/objc/api/peerconnection/RTCRtpEncodingParameters.mm @@ -24,14 +24,16 @@ @implementation RTC_OBJC_TYPE (RTCRtpEncodingParameters) @synthesize ssrc = _ssrc; @synthesize bitratePriority = _bitratePriority; @synthesize networkPriority = _networkPriority; +@synthesize adaptiveAudioPacketTime = _adaptiveAudioPacketTime; - (instancetype)init { - return [super init]; + webrtc::RtpEncodingParameters nativeParameters; + return [self initWithNativeParameters:nativeParameters]; } - (instancetype)initWithNativeParameters: (const webrtc::RtpEncodingParameters &)nativeParameters { - if (self = [self init]) { + if (self = [super init]) { if (!nativeParameters.rid.empty()) { _rid = [NSString stringForStdString:nativeParameters.rid]; } @@ -60,6 +62,7 @@ - (instancetype)initWithNativeParameters: _bitratePriority = nativeParameters.bitrate_priority; _networkPriority = [RTC_OBJC_TYPE(RTCRtpEncodingParameters) priorityFromNativePriority:nativeParameters.network_priority]; + _adaptiveAudioPacketTime = nativeParameters.adaptive_ptime; } return self; } @@ -92,6 +95,7 @@ - (instancetype)initWithNativeParameters: parameters.bitrate_priority = _bitratePriority; parameters.network_priority = [RTC_OBJC_TYPE(RTCRtpEncodingParameters) nativePriorityFromPriority:_networkPriority]; + parameters.adaptive_ptime = _adaptiveAudioPacketTime; return parameters; } diff --git a/sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.h b/sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.h deleted file mode 100644 index 0b0bce556f..0000000000 --- a/sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "base/RTCRtpFragmentationHeader.h" - -#include "modules/include/module_common_types.h" - -NS_ASSUME_NONNULL_BEGIN - -/* Interfaces for converting to/from internal C++ formats. */ -@interface RTC_OBJC_TYPE (RTCRtpFragmentationHeader) -(Private) - - - (instancetype)initWithNativeFragmentationHeader - : (const webrtc::RTPFragmentationHeader *__nullable)fragmentationHeader; -- (std::unique_ptr)createNativeFragmentationHeader; - -@end - -NS_ASSUME_NONNULL_END diff --git a/sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.mm b/sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.mm deleted file mode 100644 index e514cf69c6..0000000000 --- a/sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.mm +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "RTCRtpFragmentationHeader+Private.h" - -#include "modules/include/module_common_types.h" - -@implementation RTC_OBJC_TYPE (RTCRtpFragmentationHeader) -(Private) - - - (instancetype)initWithNativeFragmentationHeader - : (const webrtc::RTPFragmentationHeader *)fragmentationHeader { - if (self = [super init]) { - if (fragmentationHeader) { - int count = fragmentationHeader->fragmentationVectorSize; - NSMutableArray *offsets = [NSMutableArray array]; - NSMutableArray *lengths = [NSMutableArray array]; - NSMutableArray *timeDiffs = [NSMutableArray array]; - NSMutableArray *plTypes = [NSMutableArray array]; - for (int i = 0; i < count; ++i) { - [offsets addObject:@(fragmentationHeader->fragmentationOffset[i])]; - [lengths addObject:@(fragmentationHeader->fragmentationLength[i])]; - [timeDiffs addObject:@(0)]; - [plTypes addObject:@(0)]; - } - self.fragmentationOffset = [offsets copy]; - self.fragmentationLength = [lengths copy]; - self.fragmentationTimeDiff = [timeDiffs copy]; - self.fragmentationPlType = [plTypes copy]; - } - } - - return self; -} - -- (std::unique_ptr)createNativeFragmentationHeader { - auto fragmentationHeader = - std::unique_ptr(new webrtc::RTPFragmentationHeader); - fragmentationHeader->VerifyAndAllocateFragmentationHeader(self.fragmentationOffset.count); - for (NSUInteger i = 0; i < self.fragmentationOffset.count; ++i) { - fragmentationHeader->fragmentationOffset[i] = (size_t)self.fragmentationOffset[i].unsignedIntValue; - fragmentationHeader->fragmentationLength[i] = (size_t)self.fragmentationLength[i].unsignedIntValue; - } - - return fragmentationHeader; -} - -@end diff --git a/sdk/objc/api/peerconnection/RTCRtpHeaderExtension+Private.h b/sdk/objc/api/peerconnection/RTCRtpHeaderExtension+Private.h index 6255847fb9..0e0fbba5ac 100644 --- a/sdk/objc/api/peerconnection/RTCRtpHeaderExtension+Private.h +++ b/sdk/objc/api/peerconnection/RTCRtpHeaderExtension+Private.h @@ -21,7 +21,8 @@ NS_ASSUME_NONNULL_BEGIN @property(nonatomic, readonly) webrtc::RtpExtension nativeParameters; /** Initialize the object with a native RtpExtension structure. */ -- (instancetype)initWithNativeParameters:(const webrtc::RtpExtension &)nativeParameters; +- (instancetype)initWithNativeParameters:(const webrtc::RtpExtension &)nativeParameters + NS_DESIGNATED_INITIALIZER; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.h b/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.h index 15be5af56c..4000bf5372 100644 --- a/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.h +++ b/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.h @@ -26,7 +26,7 @@ RTC_OBJC_EXPORT /** Whether the header extension is encrypted or not. */ @property(nonatomic, readonly, getter=isEncrypted) BOOL encrypted; -- (instancetype)init NS_DESIGNATED_INITIALIZER; +- (instancetype)init; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.mm b/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.mm index a19228e629..68093e92ea 100644 --- a/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.mm +++ b/sdk/objc/api/peerconnection/RTCRtpHeaderExtension.mm @@ -19,11 +19,12 @@ @implementation RTC_OBJC_TYPE (RTCRtpHeaderExtension) @synthesize encrypted = _encrypted; - (instancetype)init { - return [super init]; + webrtc::RtpExtension nativeExtension; + return [self initWithNativeParameters:nativeExtension]; } - (instancetype)initWithNativeParameters:(const webrtc::RtpExtension &)nativeParameters { - if (self = [self init]) { + if (self = [super init]) { _uri = [NSString stringForStdString:nativeParameters.uri]; _id = nativeParameters.id; _encrypted = nativeParameters.encrypt; diff --git a/sdk/objc/api/peerconnection/RTCRtpParameters+Private.h b/sdk/objc/api/peerconnection/RTCRtpParameters+Private.h index 369475a81d..139617f727 100644 --- a/sdk/objc/api/peerconnection/RTCRtpParameters+Private.h +++ b/sdk/objc/api/peerconnection/RTCRtpParameters+Private.h @@ -21,7 +21,8 @@ NS_ASSUME_NONNULL_BEGIN @property(nonatomic, readonly) webrtc::RtpParameters nativeParameters; /** Initialize the object with a native RtpParameters structure. */ -- (instancetype)initWithNativeParameters:(const webrtc::RtpParameters &)nativeParameters; +- (instancetype)initWithNativeParameters:(const webrtc::RtpParameters &)nativeParameters + NS_DESIGNATED_INITIALIZER; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpParameters.h b/sdk/objc/api/peerconnection/RTCRtpParameters.h index fff6a85886..3d71c55ab9 100644 --- a/sdk/objc/api/peerconnection/RTCRtpParameters.h +++ b/sdk/objc/api/peerconnection/RTCRtpParameters.h @@ -51,7 +51,7 @@ RTC_OBJC_EXPORT */ @property(nonatomic, copy, nullable) NSNumber *degradationPreference; -- (instancetype)init NS_DESIGNATED_INITIALIZER; +- (instancetype)init; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpParameters.mm b/sdk/objc/api/peerconnection/RTCRtpParameters.mm index 2236b9aa36..2baf0ecd80 100644 --- a/sdk/objc/api/peerconnection/RTCRtpParameters.mm +++ b/sdk/objc/api/peerconnection/RTCRtpParameters.mm @@ -26,12 +26,13 @@ @implementation RTC_OBJC_TYPE (RTCRtpParameters) @synthesize degradationPreference = _degradationPreference; - (instancetype)init { - return [super init]; + webrtc::RtpParameters nativeParameters; + return [self initWithNativeParameters:nativeParameters]; } - (instancetype)initWithNativeParameters: (const webrtc::RtpParameters &)nativeParameters { - if (self = [self init]) { + if (self = [super init]) { _transactionId = [NSString stringForStdString:nativeParameters.transaction_id]; _rtcp = [[RTC_OBJC_TYPE(RTCRtcpParameters) alloc] initWithNativeParameters:nativeParameters.rtcp]; diff --git a/sdk/objc/api/peerconnection/RTCRtpReceiver.h b/sdk/objc/api/peerconnection/RTCRtpReceiver.h index 7ab2cfae72..1e407fd71b 100644 --- a/sdk/objc/api/peerconnection/RTCRtpReceiver.h +++ b/sdk/objc/api/peerconnection/RTCRtpReceiver.h @@ -21,6 +21,7 @@ typedef NS_ENUM(NSInteger, RTCRtpMediaType) { RTCRtpMediaTypeAudio, RTCRtpMediaTypeVideo, RTCRtpMediaTypeData, + RTCRtpMediaTypeUnsupported, }; @class RTC_OBJC_TYPE(RTCRtpReceiver); diff --git a/sdk/objc/api/peerconnection/RTCRtpReceiver.mm b/sdk/objc/api/peerconnection/RTCRtpReceiver.mm index 3e00935694..60af86ac1b 100644 --- a/sdk/objc/api/peerconnection/RTCRtpReceiver.mm +++ b/sdk/objc/api/peerconnection/RTCRtpReceiver.mm @@ -125,6 +125,8 @@ + (RTCRtpMediaType)mediaTypeForNativeMediaType: return RTCRtpMediaTypeVideo; case cricket::MEDIA_TYPE_DATA: return RTCRtpMediaTypeData; + case cricket::MEDIA_TYPE_UNSUPPORTED: + return RTCRtpMediaTypeUnsupported; } } @@ -136,6 +138,8 @@ + (RTCRtpMediaType)mediaTypeForNativeMediaType: return cricket::MEDIA_TYPE_VIDEO; case RTCRtpMediaTypeData: return cricket::MEDIA_TYPE_DATA; + case RTCRtpMediaTypeUnsupported: + return cricket::MEDIA_TYPE_UNSUPPORTED; } } @@ -147,6 +151,8 @@ + (NSString *)stringForMediaType:(RTCRtpMediaType)mediaType { return @"VIDEO"; case RTCRtpMediaTypeData: return @"DATA"; + case RTCRtpMediaTypeUnsupported: + return @"UNSUPPORTED"; } } diff --git a/sdk/objc/api/peerconnection/RTCRtpSender.h b/sdk/objc/api/peerconnection/RTCRtpSender.h index 41bb083d2e..fcdf199869 100644 --- a/sdk/objc/api/peerconnection/RTCRtpSender.h +++ b/sdk/objc/api/peerconnection/RTCRtpSender.h @@ -21,8 +21,8 @@ RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE (RTCRtpSender) - /** A unique identifier for this sender. */ - @property(nonatomic, readonly) NSString *senderId; +/** A unique identifier for this sender. */ +@property(nonatomic, readonly) NSString *senderId; /** The currently active RTCRtpParameters, as defined in * https://www.w3.org/TR/webrtc/#idl-def-RTCRtpParameters. diff --git a/sdk/objc/api/peerconnection/RTCRtpTransceiver.h b/sdk/objc/api/peerconnection/RTCRtpTransceiver.h index f8996ccafb..fd59013639 100644 --- a/sdk/objc/api/peerconnection/RTCRtpTransceiver.h +++ b/sdk/objc/api/peerconnection/RTCRtpTransceiver.h @@ -16,6 +16,8 @@ NS_ASSUME_NONNULL_BEGIN +extern NSString *const kRTCRtpTransceiverErrorDomain; + /** https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiverdirection */ typedef NS_ENUM(NSInteger, RTCRtpTransceiverDirection) { RTCRtpTransceiverDirectionSendRecv, @@ -98,12 +100,9 @@ RTC_OBJC_EXPORT /** The direction attribute indicates the preferred direction of this * transceiver, which will be used in calls to createOffer and createAnswer. - * An update of directionality does not take effect immediately. Instead, - * future calls to createOffer and createAnswer mark the corresponding media - * descriptions as sendrecv, sendonly, recvonly, or inactive. * https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction */ -@property(nonatomic) RTCRtpTransceiverDirection direction; +@property(nonatomic, readonly) RTCRtpTransceiverDirection direction; /** The currentDirection attribute indicates the current direction negotiated * for this transceiver. If this transceiver has never been represented in an @@ -117,7 +116,14 @@ RTC_OBJC_EXPORT * this transceiver will no longer send, the receiver will no longer receive. * https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-stop */ -- (void)stop; +- (void)stopInternal; + +/** An update of directionality does not take effect immediately. Instead, + * future calls to createOffer and createAnswer mark the corresponding media + * descriptions as sendrecv, sendonly, recvonly, or inactive. + * https://w3c.github.io/webrtc-pc/#dom-rtcrtptransceiver-direction + */ +- (void)setDirection:(RTCRtpTransceiverDirection)direction error:(NSError **)error; @end diff --git a/sdk/objc/api/peerconnection/RTCRtpTransceiver.mm b/sdk/objc/api/peerconnection/RTCRtpTransceiver.mm index 2995e5fceb..ae1cf79864 100644 --- a/sdk/objc/api/peerconnection/RTCRtpTransceiver.mm +++ b/sdk/objc/api/peerconnection/RTCRtpTransceiver.mm @@ -17,6 +17,8 @@ #import "base/RTCLogging.h" #import "helpers/NSString+StdString.h" +NSString *const kRTCRtpTransceiverErrorDomain = @"org.webrtc.RTCRtpTranceiver"; + @implementation RTC_OBJC_TYPE (RTCRtpTransceiverInit) @synthesize direction = _direction; @@ -75,9 +77,18 @@ - (RTCRtpTransceiverDirection)direction { rtpTransceiverDirectionFromNativeDirection:_nativeRtpTransceiver->direction()]; } -- (void)setDirection:(RTCRtpTransceiverDirection)direction { - _nativeRtpTransceiver->SetDirection( +- (void)setDirection:(RTCRtpTransceiverDirection)direction error:(NSError **)error { + webrtc::RTCError nativeError = _nativeRtpTransceiver->SetDirectionWithError( [RTC_OBJC_TYPE(RTCRtpTransceiver) nativeRtpTransceiverDirectionFromDirection:direction]); + + if (!nativeError.ok() && error) { + *error = [NSError errorWithDomain:kRTCRtpTransceiverErrorDomain + code:static_cast(nativeError.type()) + userInfo:@{ + @"message" : [NSString stringWithCString:nativeError.message() + encoding:NSUTF8StringEncoding] + }]; + } } - (BOOL)currentDirection:(RTCRtpTransceiverDirection *)currentDirectionOut { @@ -90,8 +101,8 @@ - (BOOL)currentDirection:(RTCRtpTransceiverDirection *)currentDirectionOut { } } -- (void)stop { - _nativeRtpTransceiver->Stop(); +- (void)stopInternal { + _nativeRtpTransceiver->StopInternal(); } - (NSString *)description { diff --git a/sdk/objc/api/peerconnection/RTCSessionDescription+Private.h b/sdk/objc/api/peerconnection/RTCSessionDescription+Private.h index 0f0a06a887..aa087e557f 100644 --- a/sdk/objc/api/peerconnection/RTCSessionDescription+Private.h +++ b/sdk/objc/api/peerconnection/RTCSessionDescription+Private.h @@ -22,7 +22,8 @@ NS_ASSUME_NONNULL_BEGIN * RTCSessionDescription object. This is needed to pass to the underlying C++ * APIs. */ - @property(nonatomic, readonly, nullable) webrtc::SessionDescriptionInterface *nativeDescription; + @property(nonatomic, + readonly) std::unique_ptr nativeDescription; /** * Initialize an RTCSessionDescription from a native diff --git a/sdk/objc/api/peerconnection/RTCSessionDescription.h b/sdk/objc/api/peerconnection/RTCSessionDescription.h index 6bd118db13..8a9479d5cf 100644 --- a/sdk/objc/api/peerconnection/RTCSessionDescription.h +++ b/sdk/objc/api/peerconnection/RTCSessionDescription.h @@ -20,6 +20,7 @@ typedef NS_ENUM(NSInteger, RTCSdpType) { RTCSdpTypeOffer, RTCSdpTypePrAnswer, RTCSdpTypeAnswer, + RTCSdpTypeRollback, }; NS_ASSUME_NONNULL_BEGIN diff --git a/sdk/objc/api/peerconnection/RTCSessionDescription.mm b/sdk/objc/api/peerconnection/RTCSessionDescription.mm index a62870e088..4ff02e8411 100644 --- a/sdk/objc/api/peerconnection/RTCSessionDescription.mm +++ b/sdk/objc/api/peerconnection/RTCSessionDescription.mm @@ -31,7 +31,6 @@ + (RTCSdpType)typeForString:(NSString *)string { } - (instancetype)initWithType:(RTCSdpType)type sdp:(NSString *)sdp { - NSParameterAssert(sdp.length); if (self = [super init]) { _type = type; _sdp = [sdp copy]; @@ -47,13 +46,11 @@ - (NSString *)description { #pragma mark - Private -- (webrtc::SessionDescriptionInterface *)nativeDescription { +- (std::unique_ptr)nativeDescription { webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface *description = - webrtc::CreateSessionDescription([[self class] stdStringForType:_type], - _sdp.stdString, - &error); + std::unique_ptr description(webrtc::CreateSessionDescription( + [[self class] stdStringForType:_type], _sdp.stdString, &error)); if (!description) { RTCLogError(@"Failed to create session description: %s\nline: %s", @@ -83,6 +80,8 @@ - (instancetype)initWithNativeDescription: return webrtc::SessionDescriptionInterface::kPrAnswer; case RTCSdpTypeAnswer: return webrtc::SessionDescriptionInterface::kAnswer; + case RTCSdpTypeRollback: + return webrtc::SessionDescriptionInterface::kRollback; } } @@ -93,6 +92,8 @@ + (RTCSdpType)typeForStdString:(const std::string &)string { return RTCSdpTypePrAnswer; } else if (string == webrtc::SessionDescriptionInterface::kAnswer) { return RTCSdpTypeAnswer; + } else if (string == webrtc::SessionDescriptionInterface::kRollback) { + return RTCSdpTypeRollback; } else { RTC_NOTREACHED(); return RTCSdpTypeOffer; diff --git a/sdk/objc/api/peerconnection/RTCStatisticsReport.h b/sdk/objc/api/peerconnection/RTCStatisticsReport.h index 38d93e8771..06dbf48d88 100644 --- a/sdk/objc/api/peerconnection/RTCStatisticsReport.h +++ b/sdk/objc/api/peerconnection/RTCStatisticsReport.h @@ -44,8 +44,8 @@ RTC_OBJC_EXPORT @property(nonatomic, readonly) NSString *type; /** The keys and values of the subreport, e.g. "totalFramesDuration = 5.551". - The values are either NSNumbers or NSStrings, or NSArrays encapsulating NSNumbers - or NSStrings. */ + The values are either NSNumbers or NSStrings or NSArrays encapsulating NSNumbers + or NSStrings, or NSDictionary of NSString keys to NSNumber values. */ @property(nonatomic, readonly) NSDictionary *values; - (instancetype)init NS_UNAVAILABLE; diff --git a/sdk/objc/api/peerconnection/RTCStatisticsReport.mm b/sdk/objc/api/peerconnection/RTCStatisticsReport.mm index ab8006d9bd..967683fc91 100644 --- a/sdk/objc/api/peerconnection/RTCStatisticsReport.mm +++ b/sdk/objc/api/peerconnection/RTCStatisticsReport.mm @@ -16,7 +16,7 @@ namespace webrtc { /** Converts a single value to a suitable NSNumber, NSString or NSArray containing NSNumbers - or NSStrings.*/ + or NSStrings, or NSDictionary of NSString keys to NSNumber values.*/ NSObject *ValueFromStatsMember(const RTCStatsMemberInterface *member) { if (member->is_defined()) { switch (member->type()) { @@ -37,7 +37,7 @@ case RTCStatsMemberInterface::kSequenceBool: { std::vector sequence = *member->cast_to>>(); NSMutableArray *array = [NSMutableArray arrayWithCapacity:sequence.size()]; - for (const auto &item : sequence) { + for (auto item : sequence) { [array addObject:[NSNumber numberWithBool:item]]; } return [array copy]; @@ -91,6 +91,26 @@ } return [array copy]; } + case RTCStatsMemberInterface::kMapStringUint64: { + std::map map = + *member->cast_to>>(); + NSMutableDictionary *dictionary = + [NSMutableDictionary dictionaryWithCapacity:map.size()]; + for (const auto &item : map) { + dictionary[[NSString stringForStdString:item.first]] = @(item.second); + } + return [dictionary copy]; + } + case RTCStatsMemberInterface::kMapStringDouble: { + std::map map = + *member->cast_to>>(); + NSMutableDictionary *dictionary = + [NSMutableDictionary dictionaryWithCapacity:map.size()]; + for (const auto &item : map) { + dictionary[[NSString stringForStdString:item.first]] = @(item.second); + } + return [dictionary copy]; + } default: RTC_NOTREACHED(); } diff --git a/sdk/objc/api/peerconnection/RTCVideoSource.mm b/sdk/objc/api/peerconnection/RTCVideoSource.mm index 15b0d6f1be..3a1ea6a322 100644 --- a/sdk/objc/api/peerconnection/RTCVideoSource.mm +++ b/sdk/objc/api/peerconnection/RTCVideoSource.mm @@ -10,7 +10,7 @@ #import "RTCVideoSource+Private.h" -#include "api/video_track_source_proxy.h" +#include "pc/video_track_source_proxy.h" #include "rtc_base/checks.h" #include "sdk/objc/native/src/objc_video_track_source.h" diff --git a/sdk/objc/api/video_codec/RTCVideoCodecConstants.h b/sdk/objc/api/video_codec/RTCVideoCodecConstants.h index 03f36e22ca..8b17a75aef 100644 --- a/sdk/objc/api/video_codec/RTCVideoCodecConstants.h +++ b/sdk/objc/api/video_codec/RTCVideoCodecConstants.h @@ -12,5 +12,6 @@ #import "RTCMacros.h" -RTC_OBJC_EXPORT extern NSString* const kRTCVideoCodecVp8Name; -RTC_OBJC_EXPORT extern NSString* const kRTCVideoCodecVp9Name; +RTC_EXTERN NSString* const kRTCVideoCodecVp8Name; +RTC_EXTERN NSString* const kRTCVideoCodecVp9Name; +RTC_EXTERN NSString* const kRTCVideoCodecAv1Name; diff --git a/sdk/objc/api/video_codec/RTCVideoCodecConstants.mm b/sdk/objc/api/video_codec/RTCVideoCodecConstants.mm index 3de6e22ef7..1ab236a2c2 100644 --- a/sdk/objc/api/video_codec/RTCVideoCodecConstants.mm +++ b/sdk/objc/api/video_codec/RTCVideoCodecConstants.mm @@ -15,3 +15,4 @@ NSString *const kRTCVideoCodecVp8Name = @(cricket::kVp8CodecName); NSString *const kRTCVideoCodecVp9Name = @(cricket::kVp9CodecName); +NSString *const kRTCVideoCodecAv1Name = @(cricket::kAv1CodecName); diff --git a/sdk/objc/api/video_codec/RTCVideoDecoderAV1.h b/sdk/objc/api/video_codec/RTCVideoDecoderAV1.h new file mode 100644 index 0000000000..d618237970 --- /dev/null +++ b/sdk/objc/api/video_codec/RTCVideoDecoderAV1.h @@ -0,0 +1,27 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoDecoder.h" + +RTC_OBJC_EXPORT +@interface RTC_OBJC_TYPE (RTCVideoDecoderAV1) : NSObject + +/* This returns a AV1 decoder that can be returned from a RTCVideoDecoderFactory injected into + * RTCPeerConnectionFactory. Even though it implements the RTCVideoDecoder protocol, it can not be + * used independently from the RTCPeerConnectionFactory. + */ ++ (id)av1Decoder; + ++ (bool)isSupported; + +@end diff --git a/sdk/objc/api/video_codec/RTCVideoDecoderAV1.mm b/sdk/objc/api/video_codec/RTCVideoDecoderAV1.mm new file mode 100644 index 0000000000..cc40f5af85 --- /dev/null +++ b/sdk/objc/api/video_codec/RTCVideoDecoderAV1.mm @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoDecoderAV1.h" +#import "RTCWrappedNativeVideoDecoder.h" + +#include "modules/video_coding/codecs/av1/libaom_av1_decoder.h" + +@implementation RTC_OBJC_TYPE (RTCVideoDecoderAV1) + ++ (id)av1Decoder { + std::unique_ptr nativeDecoder(webrtc::CreateLibaomAv1Decoder()); + if (nativeDecoder == nullptr) { + return nil; + } + return [[RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) alloc] + initWithNativeDecoder:std::move(nativeDecoder)]; +} + ++ (bool)isSupported { + return webrtc::kIsLibaomAv1DecoderSupported; +} + +@end diff --git a/sdk/objc/api/video_codec/RTCVideoDecoderVP8.mm b/sdk/objc/api/video_codec/RTCVideoDecoderVP8.mm index 91ca3b7aec..c150cf6d3a 100644 --- a/sdk/objc/api/video_codec/RTCVideoDecoderVP8.mm +++ b/sdk/objc/api/video_codec/RTCVideoDecoderVP8.mm @@ -11,6 +11,7 @@ #import +#import "RTCMacros.h" #import "RTCVideoDecoderVP8.h" #import "RTCWrappedNativeVideoDecoder.h" @@ -19,7 +20,7 @@ @implementation RTC_OBJC_TYPE (RTCVideoDecoderVP8) + (id)vp8Decoder { - return [[RTCWrappedNativeVideoDecoder alloc] + return [[RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) alloc] initWithNativeDecoder:std::unique_ptr(webrtc::VP8Decoder::Create())]; } diff --git a/sdk/objc/api/video_codec/RTCVideoDecoderVP9.h b/sdk/objc/api/video_codec/RTCVideoDecoderVP9.h index b3a1743057..de7e62012b 100644 --- a/sdk/objc/api/video_codec/RTCVideoDecoderVP9.h +++ b/sdk/objc/api/video_codec/RTCVideoDecoderVP9.h @@ -22,4 +22,6 @@ RTC_OBJC_EXPORT */ + (id)vp9Decoder; ++ (bool)isSupported; + @end diff --git a/sdk/objc/api/video_codec/RTCVideoDecoderVP9.mm b/sdk/objc/api/video_codec/RTCVideoDecoderVP9.mm index 56041a27eb..05446d436d 100644 --- a/sdk/objc/api/video_codec/RTCVideoDecoderVP9.mm +++ b/sdk/objc/api/video_codec/RTCVideoDecoderVP9.mm @@ -11,6 +11,7 @@ #import +#import "RTCMacros.h" #import "RTCVideoDecoderVP9.h" #import "RTCWrappedNativeVideoDecoder.h" @@ -19,8 +20,20 @@ @implementation RTC_OBJC_TYPE (RTCVideoDecoderVP9) + (id)vp9Decoder { - return [[RTCWrappedNativeVideoDecoder alloc] - initWithNativeDecoder:std::unique_ptr(webrtc::VP9Decoder::Create())]; + std::unique_ptr nativeDecoder(webrtc::VP9Decoder::Create()); + if (nativeDecoder == nullptr) { + return nil; + } + return [[RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) alloc] + initWithNativeDecoder:std::move(nativeDecoder)]; +} + ++ (bool)isSupported { +#if defined(RTC_ENABLE_VP9) + return true; +#else + return false; +#endif } @end diff --git a/sdk/objc/api/video_codec/RTCVideoEncoderAV1.h b/sdk/objc/api/video_codec/RTCVideoEncoderAV1.h new file mode 100644 index 0000000000..8aa55e4bfa --- /dev/null +++ b/sdk/objc/api/video_codec/RTCVideoEncoderAV1.h @@ -0,0 +1,27 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoEncoder.h" + +RTC_OBJC_EXPORT +@interface RTC_OBJC_TYPE (RTCVideoEncoderAV1) : NSObject + +/* This returns a AV1 encoder that can be returned from a RTCVideoEncoderFactory injected into + * RTCPeerConnectionFactory. Even though it implements the RTCVideoEncoder protocol, it can not be + * used independently from the RTCPeerConnectionFactory. + */ ++ (id)av1Encoder; + ++ (bool)isSupported; + +@end diff --git a/sdk/objc/api/video_codec/RTCVideoEncoderAV1.mm b/sdk/objc/api/video_codec/RTCVideoEncoderAV1.mm new file mode 100644 index 0000000000..92e924a475 --- /dev/null +++ b/sdk/objc/api/video_codec/RTCVideoEncoderAV1.mm @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoEncoderAV1.h" +#import "RTCWrappedNativeVideoEncoder.h" + +#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h" + +@implementation RTC_OBJC_TYPE (RTCVideoEncoderAV1) + ++ (id)av1Encoder { + std::unique_ptr nativeEncoder(webrtc::CreateLibaomAv1Encoder()); + if (nativeEncoder == nullptr) { + return nil; + } + return [[RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) alloc] + initWithNativeEncoder:std::move(nativeEncoder)]; +} + ++ (bool)isSupported { + return webrtc::kIsLibaomAv1EncoderSupported; +} + +@end diff --git a/sdk/objc/api/video_codec/RTCVideoEncoderVP8.mm b/sdk/objc/api/video_codec/RTCVideoEncoderVP8.mm index 135512723e..d72f705813 100644 --- a/sdk/objc/api/video_codec/RTCVideoEncoderVP8.mm +++ b/sdk/objc/api/video_codec/RTCVideoEncoderVP8.mm @@ -11,6 +11,7 @@ #import +#import "RTCMacros.h" #import "RTCVideoEncoderVP8.h" #import "RTCWrappedNativeVideoEncoder.h" @@ -19,7 +20,7 @@ @implementation RTC_OBJC_TYPE (RTCVideoEncoderVP8) + (id)vp8Encoder { - return [[RTCWrappedNativeVideoEncoder alloc] + return [[RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) alloc] initWithNativeEncoder:std::unique_ptr(webrtc::VP8Encoder::Create())]; } diff --git a/sdk/objc/api/video_codec/RTCVideoEncoderVP9.h b/sdk/objc/api/video_codec/RTCVideoEncoderVP9.h index 8f961ef337..f7dac6117d 100644 --- a/sdk/objc/api/video_codec/RTCVideoEncoderVP9.h +++ b/sdk/objc/api/video_codec/RTCVideoEncoderVP9.h @@ -22,4 +22,6 @@ RTC_OBJC_EXPORT */ + (id)vp9Encoder; ++ (bool)isSupported; + @end diff --git a/sdk/objc/api/video_codec/RTCVideoEncoderVP9.mm b/sdk/objc/api/video_codec/RTCVideoEncoderVP9.mm index ec9e75a5ed..18a9353f7e 100644 --- a/sdk/objc/api/video_codec/RTCVideoEncoderVP9.mm +++ b/sdk/objc/api/video_codec/RTCVideoEncoderVP9.mm @@ -11,6 +11,7 @@ #import +#import "RTCMacros.h" #import "RTCVideoEncoderVP9.h" #import "RTCWrappedNativeVideoEncoder.h" @@ -19,8 +20,20 @@ @implementation RTC_OBJC_TYPE (RTCVideoEncoderVP9) + (id)vp9Encoder { - return [[RTCWrappedNativeVideoEncoder alloc] - initWithNativeEncoder:std::unique_ptr(webrtc::VP9Encoder::Create())]; + std::unique_ptr nativeEncoder(webrtc::VP9Encoder::Create()); + if (nativeEncoder == nullptr) { + return nil; + } + return [[RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) alloc] + initWithNativeEncoder:std::move(nativeEncoder)]; +} + ++ (bool)isSupported { +#if defined(RTC_ENABLE_VP9) + return true; +#else + return false; +#endif } @end diff --git a/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.h b/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.h index 2241c0c056..3a9b39e959 100644 --- a/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.h +++ b/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.h @@ -10,12 +10,13 @@ #import +#import "base/RTCMacros.h" #import "base/RTCVideoDecoder.h" #include "api/video_codecs/video_decoder.h" #include "media/base/codec.h" -@interface RTCWrappedNativeVideoDecoder : NSObject +@interface RTC_OBJC_TYPE (RTCWrappedNativeVideoDecoder) : NSObject - (instancetype)initWithNativeDecoder:(std::unique_ptr)decoder; diff --git a/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.mm b/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.mm index e4d8dc357d..48d09cf396 100644 --- a/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.mm +++ b/sdk/objc/api/video_codec/RTCWrappedNativeVideoDecoder.mm @@ -11,9 +11,10 @@ #import #import "RTCWrappedNativeVideoDecoder.h" +#import "base/RTCMacros.h" #import "helpers/NSString+StdString.h" -@implementation RTCWrappedNativeVideoDecoder { +@implementation RTC_OBJC_TYPE (RTCWrappedNativeVideoDecoder) { std::unique_ptr _wrappedDecoder; } diff --git a/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.h b/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.h index ec16793f8c..8df9ceec35 100644 --- a/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.h +++ b/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.h @@ -10,13 +10,14 @@ #import +#import "base/RTCMacros.h" #import "base/RTCVideoEncoder.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_encoder.h" #include "media/base/codec.h" -@interface RTCWrappedNativeVideoEncoder : NSObject +@interface RTC_OBJC_TYPE (RTCWrappedNativeVideoEncoder) : NSObject - (instancetype)initWithNativeEncoder:(std::unique_ptr)encoder; diff --git a/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm b/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm index 6feecabd07..ea2a459360 100644 --- a/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm +++ b/sdk/objc/api/video_codec/RTCWrappedNativeVideoEncoder.mm @@ -11,9 +11,10 @@ #import #import "RTCWrappedNativeVideoEncoder.h" +#import "base/RTCMacros.h" #import "helpers/NSString+StdString.h" -@implementation RTCWrappedNativeVideoEncoder { +@implementation RTC_OBJC_TYPE (RTCWrappedNativeVideoEncoder) { std::unique_ptr _wrappedEncoder; } @@ -68,4 +69,14 @@ - (NSString *)implementationName { return nil; } +- (NSInteger)resolutionAlignment { + RTC_NOTREACHED(); + return 1; +} + +- (BOOL)applyAlignmentToAllSimulcastLayers { + RTC_NOTREACHED(); + return NO; +} + @end diff --git a/sdk/objc/base/RTCEncodedImage.h b/sdk/objc/base/RTCEncodedImage.h index 5fec8a220a..28529e5906 100644 --- a/sdk/objc/base/RTCEncodedImage.h +++ b/sdk/objc/base/RTCEncodedImage.h @@ -44,7 +44,6 @@ RTC_OBJC_EXPORT @property(nonatomic, assign) int64_t encodeFinishMs; @property(nonatomic, assign) RTCFrameType frameType; @property(nonatomic, assign) RTCVideoRotation rotation; -@property(nonatomic, assign) BOOL completeFrame; @property(nonatomic, strong) NSNumber *qp; @property(nonatomic, assign) RTCVideoContentType contentType; diff --git a/sdk/objc/base/RTCEncodedImage.m b/sdk/objc/base/RTCEncodedImage.m index dec9630539..ad8441aabd 100644 --- a/sdk/objc/base/RTCEncodedImage.m +++ b/sdk/objc/base/RTCEncodedImage.m @@ -23,7 +23,6 @@ @implementation RTC_OBJC_TYPE (RTCEncodedImage) @synthesize encodeFinishMs = _encodeFinishMs; @synthesize frameType = _frameType; @synthesize rotation = _rotation; -@synthesize completeFrame = _completeFrame; @synthesize qp = _qp; @synthesize contentType = _contentType; diff --git a/sdk/objc/base/RTCMacros.h b/sdk/objc/base/RTCMacros.h index e527ff6bc4..b5a79113b8 100644 --- a/sdk/objc/base/RTCMacros.h +++ b/sdk/objc/base/RTCMacros.h @@ -11,6 +11,18 @@ #ifndef SDK_OBJC_BASE_RTCMACROS_H_ #define SDK_OBJC_BASE_RTCMACROS_H_ +#ifdef WEBRTC_ENABLE_SYMBOL_EXPORT + +#if defined(WEBRTC_LIBRARY_IMPL) +#define RTC_OBJC_EXPORT __attribute__((visibility("default"))) +#endif + +#endif // WEBRTC_ENABLE_SYMBOL_EXPORT + +#ifndef RTC_OBJC_EXPORT +#define RTC_OBJC_EXPORT +#endif + // Internal macros used to correctly concatenate symbols. #define RTC_SYMBOL_CONCAT_HELPER(a, b) a##b #define RTC_SYMBOL_CONCAT(a, b) RTC_SYMBOL_CONCAT_HELPER(a, b) @@ -35,8 +47,6 @@ // that will be affected by the configurable RTC_OBJC_TYPE_PREFIX. #define RTC_OBJC_TYPE(type_name) RTC_SYMBOL_CONCAT(RTC_OBJC_TYPE_PREFIX, type_name) -#define RTC_OBJC_EXPORT __attribute__((visibility("default"))) - #if defined(__cplusplus) #define RTC_EXTERN extern "C" RTC_OBJC_EXPORT #else diff --git a/sdk/objc/base/RTCRtpFragmentationHeader.h b/sdk/objc/base/RTCRtpFragmentationHeader.h deleted file mode 100644 index 001b4e9deb..0000000000 --- a/sdk/objc/base/RTCRtpFragmentationHeader.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import - -#import "RTCMacros.h" - -NS_ASSUME_NONNULL_BEGIN - -/** Information for header. Corresponds to webrtc::RTPFragmentationHeader. */ -RTC_OBJC_EXPORT -@interface RTC_OBJC_TYPE (RTCRtpFragmentationHeader) : NSObject - -@property(nonatomic, strong) NSArray *fragmentationOffset; -@property(nonatomic, strong) NSArray *fragmentationLength; -@property(nonatomic, strong) NSArray *fragmentationTimeDiff; -@property(nonatomic, strong) NSArray *fragmentationPlType; - -@end - -NS_ASSUME_NONNULL_END diff --git a/sdk/objc/base/RTCRtpFragmentationHeader.m b/sdk/objc/base/RTCRtpFragmentationHeader.m deleted file mode 100644 index 60e2f5d1e6..0000000000 --- a/sdk/objc/base/RTCRtpFragmentationHeader.m +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2017 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#import "RTCRtpFragmentationHeader.h" - -@implementation RTC_OBJC_TYPE (RTCRtpFragmentationHeader) - -@synthesize fragmentationOffset = _fragmentationOffset; -@synthesize fragmentationLength = _fragmentationLength; -@synthesize fragmentationTimeDiff = _fragmentationTimeDiff; -@synthesize fragmentationPlType = _fragmentationPlType; - -@end diff --git a/sdk/objc/base/RTCVideoEncoder.h b/sdk/objc/base/RTCVideoEncoder.h index 7d1a7afd7f..26cf4ec03f 100644 --- a/sdk/objc/base/RTCVideoEncoder.h +++ b/sdk/objc/base/RTCVideoEncoder.h @@ -13,7 +13,6 @@ #import "RTCCodecSpecificInfo.h" #import "RTCEncodedImage.h" #import "RTCMacros.h" -#import "RTCRtpFragmentationHeader.h" #import "RTCVideoEncoderQpThresholds.h" #import "RTCVideoEncoderSettings.h" #import "RTCVideoFrame.h" @@ -22,15 +21,14 @@ NS_ASSUME_NONNULL_BEGIN /** Callback block for encoder. */ typedef BOOL (^RTCVideoEncoderCallback)(RTC_OBJC_TYPE(RTCEncodedImage) * frame, - id info, - RTC_OBJC_TYPE(RTCRtpFragmentationHeader) * header); + id info); /** Protocol for encoder implementations. */ RTC_OBJC_EXPORT @protocol RTC_OBJC_TYPE (RTCVideoEncoder) - - (void)setCallback : (RTCVideoEncoderCallback)callback; +- (void)setCallback:(nullable RTCVideoEncoderCallback)callback; - (NSInteger)startEncodeWithSettings:(RTC_OBJC_TYPE(RTCVideoEncoderSettings) *)settings numberOfCores:(int)numberOfCores; - (NSInteger)releaseEncoder; @@ -45,6 +43,13 @@ RTC_OBJC_EXPORT * disables quality scaling. */ - (nullable RTC_OBJC_TYPE(RTCVideoEncoderQpThresholds) *)scalingSettings; +/** Resolutions should be aligned to this value. */ +@property(nonatomic, readonly) NSInteger resolutionAlignment; + +/** If enabled, resolution alignment is applied to all simulcast layers simultaneously so that when + scaled, all resolutions comply with 'resolutionAlignment'. */ +@property(nonatomic, readonly) BOOL applyAlignmentToAllSimulcastLayers; + @end NS_ASSUME_NONNULL_END diff --git a/sdk/objc/components/audio/RTCAudioSession+Configuration.mm b/sdk/objc/components/audio/RTCAudioSession+Configuration.mm index b2753f282e..449f31e9dd 100644 --- a/sdk/objc/components/audio/RTCAudioSession+Configuration.mm +++ b/sdk/objc/components/audio/RTCAudioSession+Configuration.mm @@ -43,9 +43,6 @@ - (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configur if (outError) { *outError = nil; } - if (![self checkLock:outError]) { - return NO; - } // Provide an error even if there isn't one so we can log it. We will not // return immediately on error in this function and instead try to set diff --git a/sdk/objc/components/audio/RTCAudioSession+Private.h b/sdk/objc/components/audio/RTCAudioSession+Private.h index 4c1eb1c44a..8496ca6bbc 100644 --- a/sdk/objc/components/audio/RTCAudioSession+Private.h +++ b/sdk/objc/components/audio/RTCAudioSession+Private.h @@ -35,8 +35,6 @@ NS_ASSUME_NONNULL_BEGIN */ @property(nonatomic, assign) BOOL isInterrupted; -- (BOOL)checkLock:(NSError **)outError; - /** Adds the delegate to the list of delegates, and places it at the front of * the list. This delegate will be notified before other delegates of * audio events. diff --git a/sdk/objc/components/audio/RTCAudioSession.h b/sdk/objc/components/audio/RTCAudioSession.h index f917e327a4..79658e3c81 100644 --- a/sdk/objc/components/audio/RTCAudioSession.h +++ b/sdk/objc/components/audio/RTCAudioSession.h @@ -137,8 +137,6 @@ RTC_OBJC_EXPORT * AVAudioSession. */ @property(nonatomic, readonly) BOOL isActive; -/** Whether RTCAudioSession is currently locked for configuration. */ -@property(nonatomic, readonly) BOOL isLocked; /** If YES, WebRTC will not initialize the audio unit automatically when an * audio track is ready for playout or recording. Instead, applications should diff --git a/sdk/objc/components/audio/RTCAudioSession.mm b/sdk/objc/components/audio/RTCAudioSession.mm index 74b57acd61..057f62cf27 100644 --- a/sdk/objc/components/audio/RTCAudioSession.mm +++ b/sdk/objc/components/audio/RTCAudioSession.mm @@ -16,7 +16,7 @@ #include "rtc_base/atomic_ops.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #import "RTCAudioSessionConfiguration.h" #import "base/RTCLogging.h" @@ -35,10 +35,9 @@ @interface RTC_OBJC_TYPE (RTCAudioSession) // TODO(tkchin): Consider more granular locking. We're not expecting a lot of // lock contention so coarse locks should be fine for now. @implementation RTC_OBJC_TYPE (RTCAudioSession) { - rtc::CriticalSection _crit; + webrtc::Mutex _mutex; AVAudioSession *_session; volatile int _activationCount; - volatile int _lockRecursionCount; volatile int _webRTCSessionCount; BOOL _isActive; BOOL _useManualAudio; @@ -151,10 +150,6 @@ - (BOOL)isActive { } } -- (BOOL)isLocked { - return _lockRecursionCount > 0; -} - - (void)setUseManualAudio:(BOOL)useManualAudio { @synchronized(self) { if (_useManualAudio == useManualAudio) { @@ -234,20 +229,11 @@ - (void)removeDelegate:(id)delegate { #pragma clang diagnostic ignored "-Wthread-safety-analysis" - (void)lockForConfiguration { - _crit.Enter(); - rtc::AtomicOps::Increment(&_lockRecursionCount); + _mutex.Lock(); } - (void)unlockForConfiguration { - // Don't let threads other than the one that called lockForConfiguration - // unlock. - if (_crit.TryEnter()) { - rtc::AtomicOps::Decrement(&_lockRecursionCount); - // One unlock for the tryLock, and another one to actually unlock. If this - // was called without anyone calling lock, we will hit an assertion. - _crit.Leave(); - _crit.Leave(); - } + _mutex.Unlock(); } #pragma clang diagnostic pop @@ -346,13 +332,8 @@ - (NSTimeInterval)preferredIOBufferDuration { return self.session.preferredIOBufferDuration; } -// TODO(tkchin): Simplify the amount of locking happening here. Likely that we -// can just do atomic increments / decrements. - (BOOL)setActive:(BOOL)active error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } int activationCount = _activationCount; if (!active && activationCount == 0) { RTCLogWarning(@"Attempting to deactivate without prior activation."); @@ -382,24 +363,27 @@ - (BOOL)setActive:(BOOL)active } } if (success) { - if (shouldSetActive) { - self.isActive = active; - if (active && self.isInterrupted) { - self.isInterrupted = NO; - [self notifyDidEndInterruptionWithShouldResumeSession:YES]; - } - } if (active) { + if (shouldSetActive) { + self.isActive = active; + if (self.isInterrupted) { + self.isInterrupted = NO; + [self notifyDidEndInterruptionWithShouldResumeSession:YES]; + } + } [self incrementActivationCount]; + [self notifyDidSetActive:active]; } - [self notifyDidSetActive:active]; } else { RTCLogError(@"Failed to setActive:%d. Error: %@", active, error.localizedDescription); [self notifyFailedToSetActive:active error:error]; } - // Decrement activation count on deactivation whether or not it succeeded. + // Set isActive and decrement activation count on deactivation + // whether or not it succeeded. if (!active) { + self.isActive = active; + [self notifyDidSetActive:active]; [self decrementActivationCount]; } RTCLog(@"Number of current activations: %d", _activationCount); @@ -409,85 +393,52 @@ - (BOOL)setActive:(BOOL)active - (BOOL)setCategory:(NSString *)category withOptions:(AVAudioSessionCategoryOptions)options error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setCategory:category withOptions:options error:outError]; } - (BOOL)setMode:(NSString *)mode error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setMode:mode error:outError]; } - (BOOL)setInputGain:(float)gain error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setInputGain:gain error:outError]; } - (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setPreferredSampleRate:sampleRate error:outError]; } - (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setPreferredIOBufferDuration:duration error:outError]; } - (BOOL)setPreferredInputNumberOfChannels:(NSInteger)count error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setPreferredInputNumberOfChannels:count error:outError]; } - (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setPreferredOutputNumberOfChannels:count error:outError]; } - (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session overrideOutputAudioPort:portOverride error:outError]; } - (BOOL)setPreferredInput:(AVAudioSessionPortDescription *)inPort error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setPreferredInput:inPort error:outError]; } - (BOOL)setInputDataSource:(AVAudioSessionDataSourceDescription *)dataSource error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setInputDataSource:dataSource error:outError]; } - (BOOL)setOutputDataSource:(AVAudioSessionDataSourceDescription *)dataSource error:(NSError **)outError { - if (![self checkLock:outError]) { - return NO; - } return [self.session setOutputDataSource:dataSource error:outError]; } @@ -608,18 +559,6 @@ - (void)handleApplicationDidBecomeActive:(NSNotification *)notification { #pragma mark - Private -+ (NSError *)lockError { - NSDictionary *userInfo = @{ - NSLocalizedDescriptionKey: - @"Must call lockForConfiguration before calling this method." - }; - NSError *error = - [[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain - code:kRTCAudioSessionErrorLockRequired - userInfo:userInfo]; - return error; -} - - (std::vector<__weak id >)delegates { @synchronized(self) { // Note: this returns a copy. @@ -681,25 +620,10 @@ - (void)setIsInterrupted:(BOOL)isInterrupted { } } -- (BOOL)checkLock:(NSError **)outError { - // Check ivar instead of trying to acquire lock so that we won't accidentally - // acquire lock if it hasn't already been called. - if (!self.isLocked) { - if (outError) { - *outError = [RTC_OBJC_TYPE(RTCAudioSession) lockError]; - } - return NO; - } - return YES; -} - - (BOOL)beginWebRTCSession:(NSError **)outError { if (outError) { *outError = nil; } - if (![self checkLock:outError]) { - return NO; - } rtc::AtomicOps::Increment(&_webRTCSessionCount); [self notifyDidStartPlayOrRecord]; return YES; @@ -709,9 +633,6 @@ - (BOOL)endWebRTCSession:(NSError **)outError { if (outError) { *outError = nil; } - if (![self checkLock:outError]) { - return NO; - } rtc::AtomicOps::Decrement(&_webRTCSessionCount); [self notifyDidStopPlayOrRecord]; return YES; @@ -721,9 +642,6 @@ - (BOOL)configureWebRTCSession:(NSError **)outError { if (outError) { *outError = nil; } - if (![self checkLock:outError]) { - return NO; - } RTCLog(@"Configuring audio session for WebRTC."); // Configure the AVAudioSession and activate it. @@ -784,9 +702,6 @@ - (BOOL)unconfigureWebRTCSession:(NSError **)outError { if (outError) { *outError = nil; } - if (![self checkLock:outError]) { - return NO; - } RTCLog(@"Unconfiguring audio session for WebRTC."); [self setActive:NO error:outError]; diff --git a/sdk/objc/components/capturer/RTCCameraVideoCapturer.h b/sdk/objc/components/capturer/RTCCameraVideoCapturer.h index fed5a37827..370bfa70f0 100644 --- a/sdk/objc/components/capturer/RTCCameraVideoCapturer.h +++ b/sdk/objc/components/capturer/RTCCameraVideoCapturer.h @@ -40,7 +40,7 @@ NS_EXTENSION_UNAVAILABLE_IOS("Camera not available in app extensions.") - (void)startCaptureWithDevice:(AVCaptureDevice *)device format:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps - completionHandler:(nullable void (^)(NSError *))completionHandler; + completionHandler:(nullable void (^)(NSError *_Nullable))completionHandler; // Stops the capture session asynchronously and notifies callback on completion. - (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler; diff --git a/sdk/objc/components/capturer/RTCCameraVideoCapturer.m b/sdk/objc/components/capturer/RTCCameraVideoCapturer.m index 6edcec88e9..e0e9e41254 100644 --- a/sdk/objc/components/capturer/RTCCameraVideoCapturer.m +++ b/sdk/objc/components/capturer/RTCCameraVideoCapturer.m @@ -153,7 +153,7 @@ - (void)stopCapture { - (void)startCaptureWithDevice:(AVCaptureDevice *)device format:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps - completionHandler:(nullable void (^)(NSError *))completionHandler { + completionHandler:(nullable void (^)(NSError *_Nullable error))completionHandler { _willBeRunning = YES; [RTC_OBJC_TYPE(RTCDispatcher) dispatchAsyncOnType:RTCDispatcherTypeCaptureSession diff --git a/sdk/objc/components/network/RTCNetworkMonitor+Private.h b/sdk/objc/components/network/RTCNetworkMonitor+Private.h new file mode 100644 index 0000000000..efb37bb63b --- /dev/null +++ b/sdk/objc/components/network/RTCNetworkMonitor+Private.h @@ -0,0 +1,23 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCNetworkMonitor.h" + +#include "sdk/objc/native/src/network_monitor_observer.h" + +@interface RTCNetworkMonitor () + +/** |observer| is a raw pointer and should be kept alive + * for this object's lifetime. + */ +- (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer + NS_DESIGNATED_INITIALIZER; + +@end diff --git a/common_video/include/video_frame.h b/sdk/objc/components/network/RTCNetworkMonitor.h similarity index 52% rename from common_video/include/video_frame.h rename to sdk/objc/components/network/RTCNetworkMonitor.h index ba280f2a8c..21d22f5463 100644 --- a/common_video/include/video_frame.h +++ b/sdk/objc/components/network/RTCNetworkMonitor.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. + * Copyright 2020 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,10 +8,17 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef COMMON_VIDEO_INCLUDE_VIDEO_FRAME_H_ -#define COMMON_VIDEO_INCLUDE_VIDEO_FRAME_H_ +#import -// TODO(nisse): Delete this file, after downstream code is updated. -#include "api/video/encoded_image.h" +NS_ASSUME_NONNULL_BEGIN -#endif // COMMON_VIDEO_INCLUDE_VIDEO_FRAME_H_ +/** Listens for NWPathMonitor updates and forwards the results to a C++ + * observer. + */ +@interface RTCNetworkMonitor : NSObject + +- (instancetype)init NS_UNAVAILABLE; + +@end + +NS_ASSUME_NONNULL_END diff --git a/sdk/objc/components/network/RTCNetworkMonitor.mm b/sdk/objc/components/network/RTCNetworkMonitor.mm new file mode 100644 index 0000000000..8ac7d3a0d2 --- /dev/null +++ b/sdk/objc/components/network/RTCNetworkMonitor.mm @@ -0,0 +1,109 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "RTCNetworkMonitor+Private.h" + +#import + +#import "base/RTCLogging.h" +#import "helpers/RTCDispatcher+Private.h" + +namespace { + +rtc::AdapterType AdapterTypeFromInterfaceType(nw_interface_type_t interfaceType) { + rtc::AdapterType adapterType = rtc::ADAPTER_TYPE_UNKNOWN; + switch (interfaceType) { + case nw_interface_type_other: + adapterType = rtc::ADAPTER_TYPE_UNKNOWN; + break; + case nw_interface_type_wifi: + adapterType = rtc::ADAPTER_TYPE_WIFI; + break; + case nw_interface_type_cellular: + adapterType = rtc::ADAPTER_TYPE_CELLULAR; + break; + case nw_interface_type_wired: + adapterType = rtc::ADAPTER_TYPE_ETHERNET; + break; + case nw_interface_type_loopback: + adapterType = rtc::ADAPTER_TYPE_LOOPBACK; + break; + default: + adapterType = rtc::ADAPTER_TYPE_UNKNOWN; + break; + } + return adapterType; +} + +} // namespace + +@implementation RTCNetworkMonitor { + webrtc::NetworkMonitorObserver *_observer; + nw_path_monitor_t _pathMonitor; + dispatch_queue_t _monitorQueue; +} + +- (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer { + RTC_DCHECK(observer); + if (self = [super init]) { + _observer = observer; + if (@available(iOS 12, *)) { + _pathMonitor = nw_path_monitor_create(); + if (_pathMonitor == nil) { + RTCLog(@"nw_path_monitor_create failed."); + return nil; + } + RTCLog(@"NW path monitor created."); + __weak RTCNetworkMonitor *weakSelf = self; + nw_path_monitor_set_update_handler(_pathMonitor, ^(nw_path_t path) { + if (weakSelf == nil) { + return; + } + RTCNetworkMonitor *strongSelf = weakSelf; + RTCLog(@"NW path monitor: updated."); + nw_path_status_t status = nw_path_get_status(path); + if (status == nw_path_status_invalid) { + RTCLog(@"NW path monitor status: invalid."); + } else if (status == nw_path_status_unsatisfied) { + RTCLog(@"NW path monitor status: unsatisfied."); + } else if (status == nw_path_status_satisfied) { + RTCLog(@"NW path monitor status: satisfied."); + } else if (status == nw_path_status_satisfiable) { + RTCLog(@"NW path monitor status: satisfiable."); + } + std::map *map = + new std::map(); + nw_path_enumerate_interfaces( + path, (nw_path_enumerate_interfaces_block_t) ^ (nw_interface_t interface) { + const char *name = nw_interface_get_name(interface); + nw_interface_type_t interfaceType = nw_interface_get_type(interface); + RTCLog(@"NW path monitor available interface: %s", name); + rtc::AdapterType adapterType = AdapterTypeFromInterfaceType(interfaceType); + map->insert(std::pair(name, adapterType)); + }); + strongSelf->_observer->OnPathUpdate(std::move(*map)); + delete map; + }); + nw_path_monitor_set_queue( + _pathMonitor, + [RTC_OBJC_TYPE(RTCDispatcher) dispatchQueueForType:RTCDispatcherTypeNetworkMonitor]); + nw_path_monitor_start(_pathMonitor); + } + } + return self; +} + +- (void)dealloc { + if (@available(iOS 12, *)) { + nw_path_monitor_cancel(_pathMonitor); + } +} + +@end diff --git a/sdk/objc/components/renderer/metal/RTCMTLVideoView.h b/sdk/objc/components/renderer/metal/RTCMTLVideoView.h index 5678112ade..3320d12076 100644 --- a/sdk/objc/components/renderer/metal/RTCMTLVideoView.h +++ b/sdk/objc/components/renderer/metal/RTCMTLVideoView.h @@ -21,8 +21,6 @@ NS_ASSUME_NONNULL_BEGIN * * It has id property that renders video frames in the view's * bounds using Metal. - * NOTE: always check if metal is available on the running device via - * RTC_SUPPORTS_METAL macro before initializing this class. */ NS_CLASS_AVAILABLE_IOS(9) diff --git a/sdk/objc/components/renderer/metal/RTCMTLVideoView.m b/sdk/objc/components/renderer/metal/RTCMTLVideoView.m index f5be7c061c..4c50bcf9c1 100644 --- a/sdk/objc/components/renderer/metal/RTCMTLVideoView.m +++ b/sdk/objc/components/renderer/metal/RTCMTLVideoView.m @@ -86,11 +86,7 @@ - (void)setVideoContentMode:(UIViewContentMode)mode { #pragma mark - Private + (BOOL)isMetalAvailable { -#if defined(RTC_SUPPORTS_METAL) return MTLCreateSystemDefaultDevice() != nil; -#else - return NO; -#endif } + (MTKView *)createMetalView:(CGRect)frame { diff --git a/sdk/objc/components/renderer/opengl/RTCDefaultShader.h b/sdk/objc/components/renderer/opengl/RTCDefaultShader.h index 71a073ab21..bba04283ee 100644 --- a/sdk/objc/components/renderer/opengl/RTCDefaultShader.h +++ b/sdk/objc/components/renderer/opengl/RTCDefaultShader.h @@ -9,6 +9,7 @@ */ #import "RTCVideoViewShading.h" +#import "RTCMacros.h" NS_ASSUME_NONNULL_BEGIN @@ -16,6 +17,7 @@ NS_ASSUME_NONNULL_BEGIN * and RTCEAGLVideoView if no external shader is specified. This shader will render * the video in a rectangle without any color or geometric transformations. */ +RTC_OBJC_EXPORT @interface RTCDefaultShader : NSObject @end diff --git a/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m b/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m index 4046cfedbe..f4a97a8659 100644 --- a/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m +++ b/sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.m @@ -13,11 +13,10 @@ #import "RTCH264ProfileLevelId.h" #import "RTCVideoDecoderH264.h" #import "api/video_codec/RTCVideoCodecConstants.h" +#import "api/video_codec/RTCVideoDecoderAV1.h" #import "api/video_codec/RTCVideoDecoderVP8.h" -#import "base/RTCVideoCodecInfo.h" -#if defined(RTC_ENABLE_VP9) #import "api/video_codec/RTCVideoDecoderVP9.h" -#endif +#import "base/RTCVideoCodecInfo.h" @implementation RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory) @@ -43,19 +42,23 @@ @implementation RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory) RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp8Info = [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp8Name]; -#if defined(RTC_ENABLE_VP9) - RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp9Info = - [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]; -#endif - - return @[ + NSMutableArray *result = [@[ constrainedHighInfo, constrainedBaselineInfo, vp8Info, -#if defined(RTC_ENABLE_VP9) - vp9Info, -#endif - ]; + ] mutableCopy]; + + if ([RTC_OBJC_TYPE(RTCVideoDecoderVP9) isSupported]) { + [result + addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]]; + } + + if ([RTC_OBJC_TYPE(RTCVideoDecoderAV1) isSupported]) { + [result + addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecAv1Name]]; + } + + return result; } - (id)createDecoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info { @@ -63,10 +66,12 @@ @implementation RTC_OBJC_TYPE (RTCDefaultVideoDecoderFactory) return [[RTC_OBJC_TYPE(RTCVideoDecoderH264) alloc] init]; } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) { return [RTC_OBJC_TYPE(RTCVideoDecoderVP8) vp8Decoder]; -#if defined(RTC_ENABLE_VP9) - } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) { + } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name] && + [RTC_OBJC_TYPE(RTCVideoDecoderVP9) isSupported]) { return [RTC_OBJC_TYPE(RTCVideoDecoderVP9) vp9Decoder]; -#endif + } else if ([info.name isEqualToString:kRTCVideoCodecAv1Name] && + [RTC_OBJC_TYPE(RTCVideoDecoderAV1) isSupported]) { + return [RTC_OBJC_TYPE(RTCVideoDecoderAV1) av1Decoder]; } return nil; diff --git a/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m b/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m index 35a1407f38..06c4e8c22f 100644 --- a/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m +++ b/sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.m @@ -13,11 +13,10 @@ #import "RTCH264ProfileLevelId.h" #import "RTCVideoEncoderH264.h" #import "api/video_codec/RTCVideoCodecConstants.h" +#import "api/video_codec/RTCVideoEncoderAV1.h" #import "api/video_codec/RTCVideoEncoderVP8.h" -#import "base/RTCVideoCodecInfo.h" -#if defined(RTC_ENABLE_VP9) #import "api/video_codec/RTCVideoEncoderVP9.h" -#endif +#import "base/RTCVideoCodecInfo.h" @implementation RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory) @@ -45,19 +44,23 @@ @implementation RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory) RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp8Info = [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp8Name]; -#if defined(RTC_ENABLE_VP9) - RTC_OBJC_TYPE(RTCVideoCodecInfo) *vp9Info = - [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]; -#endif - - return @[ + NSMutableArray *result = [@[ constrainedHighInfo, constrainedBaselineInfo, vp8Info, -#if defined(RTC_ENABLE_VP9) - vp9Info, -#endif - ]; + ] mutableCopy]; + + if ([RTC_OBJC_TYPE(RTCVideoEncoderVP9) isSupported]) { + [result + addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecVp9Name]]; + } + + if ([RTC_OBJC_TYPE(RTCVideoEncoderAV1) isSupported]) { + [result + addObject:[[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithName:kRTCVideoCodecAv1Name]]; + } + + return result; } - (id)createEncoder:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)info { @@ -65,10 +68,12 @@ @implementation RTC_OBJC_TYPE (RTCDefaultVideoEncoderFactory) return [[RTC_OBJC_TYPE(RTCVideoEncoderH264) alloc] initWithCodecInfo:info]; } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) { return [RTC_OBJC_TYPE(RTCVideoEncoderVP8) vp8Encoder]; -#if defined(RTC_ENABLE_VP9) - } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) { + } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name] && + [RTC_OBJC_TYPE(RTCVideoEncoderVP9) isSupported]) { return [RTC_OBJC_TYPE(RTCVideoEncoderVP9) vp9Encoder]; -#endif + } else if ([info.name isEqualToString:kRTCVideoCodecAv1Name] && + [RTC_OBJC_TYPE(RTCVideoEncoderAV1) isSupported]) { + return [RTC_OBJC_TYPE(RTCVideoEncoderAV1) av1Encoder]; } return nil; diff --git a/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm b/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm index b985d9df02..f0ef3ec232 100644 --- a/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm +++ b/sdk/objc/components/video_codec/RTCH264ProfileLevelId.mm @@ -16,7 +16,7 @@ #import "UIDevice+H264Profile.h" #endif -#include "media/base/h264_profile_level_id.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "media/base/media_constants.h" namespace { @@ -38,13 +38,12 @@ #if defined(WEBRTC_IOS) -using namespace webrtc::H264; - -NSString *MaxSupportedLevelForProfile(Profile profile) { - const absl::optional profileLevelId = [UIDevice maxSupportedH264Profile]; +NSString *MaxSupportedLevelForProfile(webrtc::H264Profile profile) { + const absl::optional profileLevelId = + [UIDevice maxSupportedH264Profile]; if (profileLevelId && profileLevelId->profile >= profile) { const absl::optional profileString = - ProfileLevelIdToString(ProfileLevelId(profile, profileLevelId->level)); + H264ProfileLevelIdToString(webrtc::H264ProfileLevelId(profile, profileLevelId->level)); if (profileString) { return [NSString stringForStdString:*profileString]; } @@ -55,7 +54,7 @@ NSString *MaxSupportedProfileLevelConstrainedBaseline() { #if defined(WEBRTC_IOS) - NSString *profile = MaxSupportedLevelForProfile(webrtc::H264::kProfileConstrainedBaseline); + NSString *profile = MaxSupportedLevelForProfile(webrtc::H264Profile::kProfileConstrainedBaseline); if (profile != nil) { return profile; } @@ -65,7 +64,7 @@ NSString *MaxSupportedProfileLevelConstrainedHigh() { #if defined(WEBRTC_IOS) - NSString *profile = MaxSupportedLevelForProfile(webrtc::H264::kProfileConstrainedHigh); + NSString *profile = MaxSupportedLevelForProfile(webrtc::H264Profile::kProfileConstrainedHigh); if (profile != nil) { return profile; } @@ -94,8 +93,8 @@ - (instancetype)initWithHexString:(NSString *)hexString { if (self = [super init]) { self.hexString = hexString; - absl::optional profile_level_id = - webrtc::H264::ParseProfileLevelId([hexString cStringUsingEncoding:NSUTF8StringEncoding]); + absl::optional profile_level_id = + webrtc::ParseH264ProfileLevelId([hexString cStringUsingEncoding:NSUTF8StringEncoding]); if (profile_level_id.has_value()) { self.profile = static_cast(profile_level_id->profile); self.level = static_cast(profile_level_id->level); @@ -110,8 +109,8 @@ - (instancetype)initWithProfile:(RTCH264Profile)profile level:(RTCH264Level)leve self.level = level; absl::optional hex_string = - webrtc::H264::ProfileLevelIdToString(webrtc::H264::ProfileLevelId( - static_cast(profile), static_cast(level))); + webrtc::H264ProfileLevelIdToString(webrtc::H264ProfileLevelId( + static_cast(profile), static_cast(level))); self.hexString = [NSString stringWithCString:hex_string.value_or("").c_str() encoding:NSUTF8StringEncoding]; } diff --git a/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm b/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm index 52edefe053..06cfb741d8 100644 --- a/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm +++ b/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm @@ -202,20 +202,33 @@ - (int)resetDecompressionSession { // CVPixelBuffers directly to the renderer. // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that // we can pass CVPixelBuffers as native handles in decoder output. +#if TARGET_OS_SIMULATOR + static size_t const attributesSize = 2; +#else static size_t const attributesSize = 3; +#endif + CFTypeRef keys[attributesSize] = { -#if defined(WEBRTC_IOS) - kCVPixelBufferOpenGLESCompatibilityKey, +#if defined(WEBRTC_IOS) && TARGET_OS_MACCATALYST + kCVPixelBufferMetalCompatibilityKey, +#elif defined(WEBRTC_IOS) + kCVPixelBufferOpenGLESCompatibilityKey, #elif defined(WEBRTC_MAC) - kCVPixelBufferOpenGLCompatibilityKey, + kCVPixelBufferOpenGLCompatibilityKey, #endif - kCVPixelBufferIOSurfacePropertiesKey, - kCVPixelBufferPixelFormatTypeKey - }; +#if !(TARGET_OS_SIMULATOR) + kCVPixelBufferIOSurfacePropertiesKey, +#endif + kCVPixelBufferPixelFormatTypeKey}; CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0); int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); +#if TARGET_OS_SIMULATOR + CFTypeRef values[attributesSize] = {kCFBooleanTrue, pixelFormat}; +#else CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, pixelFormat}; +#endif + CFDictionaryRef attributes = CreateCFTypeDictionary(keys, values, attributesSize); if (ioSurfaceValue) { CFRelease(ioSurfaceValue); diff --git a/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm b/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm index 113806489c..7c0d029ae7 100644 --- a/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm +++ b/sdk/objc/components/video_codec/RTCVideoEncoderH264.mm @@ -19,7 +19,6 @@ #endif #import "RTCCodecSpecificInfoH264.h" #import "RTCH264ProfileLevelId.h" -#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h" #import "api/peerconnection/RTCVideoCodecInfo+Private.h" #import "base/RTCCodecSpecificInfo.h" #import "base/RTCI420Buffer.h" @@ -29,10 +28,9 @@ #import "components/video_frame_buffer/RTCCVPixelBuffer.h" #import "helpers.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "common_video/h264/h264_bitstream_parser.h" -#include "common_video/h264/profile_level_id.h" #include "common_video/include/bitrate_adjuster.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_error_codes.h" #include "rtc_base/buffer.h" #include "rtc_base/logging.h" @@ -175,100 +173,100 @@ void compressionOutputCallback(void *encoder, // no specific VideoToolbox profile for the specified level, AutoLevel will be // returned. The user must initialize the encoder with a resolution and // framerate conforming to the selected H264 level regardless. -CFStringRef ExtractProfile(const webrtc::H264::ProfileLevelId &profile_level_id) { +CFStringRef ExtractProfile(const webrtc::H264ProfileLevelId &profile_level_id) { switch (profile_level_id.profile) { - case webrtc::H264::kProfileConstrainedBaseline: - case webrtc::H264::kProfileBaseline: + case webrtc::H264Profile::kProfileConstrainedBaseline: + case webrtc::H264Profile::kProfileBaseline: switch (profile_level_id.level) { - case webrtc::H264::kLevel3: + case webrtc::H264Level::kLevel3: return kVTProfileLevel_H264_Baseline_3_0; - case webrtc::H264::kLevel3_1: + case webrtc::H264Level::kLevel3_1: return kVTProfileLevel_H264_Baseline_3_1; - case webrtc::H264::kLevel3_2: + case webrtc::H264Level::kLevel3_2: return kVTProfileLevel_H264_Baseline_3_2; - case webrtc::H264::kLevel4: + case webrtc::H264Level::kLevel4: return kVTProfileLevel_H264_Baseline_4_0; - case webrtc::H264::kLevel4_1: + case webrtc::H264Level::kLevel4_1: return kVTProfileLevel_H264_Baseline_4_1; - case webrtc::H264::kLevel4_2: + case webrtc::H264Level::kLevel4_2: return kVTProfileLevel_H264_Baseline_4_2; - case webrtc::H264::kLevel5: + case webrtc::H264Level::kLevel5: return kVTProfileLevel_H264_Baseline_5_0; - case webrtc::H264::kLevel5_1: + case webrtc::H264Level::kLevel5_1: return kVTProfileLevel_H264_Baseline_5_1; - case webrtc::H264::kLevel5_2: + case webrtc::H264Level::kLevel5_2: return kVTProfileLevel_H264_Baseline_5_2; - case webrtc::H264::kLevel1: - case webrtc::H264::kLevel1_b: - case webrtc::H264::kLevel1_1: - case webrtc::H264::kLevel1_2: - case webrtc::H264::kLevel1_3: - case webrtc::H264::kLevel2: - case webrtc::H264::kLevel2_1: - case webrtc::H264::kLevel2_2: + case webrtc::H264Level::kLevel1: + case webrtc::H264Level::kLevel1_b: + case webrtc::H264Level::kLevel1_1: + case webrtc::H264Level::kLevel1_2: + case webrtc::H264Level::kLevel1_3: + case webrtc::H264Level::kLevel2: + case webrtc::H264Level::kLevel2_1: + case webrtc::H264Level::kLevel2_2: return kVTProfileLevel_H264_Baseline_AutoLevel; } - case webrtc::H264::kProfileMain: + case webrtc::H264Profile::kProfileMain: switch (profile_level_id.level) { - case webrtc::H264::kLevel3: + case webrtc::H264Level::kLevel3: return kVTProfileLevel_H264_Main_3_0; - case webrtc::H264::kLevel3_1: + case webrtc::H264Level::kLevel3_1: return kVTProfileLevel_H264_Main_3_1; - case webrtc::H264::kLevel3_2: + case webrtc::H264Level::kLevel3_2: return kVTProfileLevel_H264_Main_3_2; - case webrtc::H264::kLevel4: + case webrtc::H264Level::kLevel4: return kVTProfileLevel_H264_Main_4_0; - case webrtc::H264::kLevel4_1: + case webrtc::H264Level::kLevel4_1: return kVTProfileLevel_H264_Main_4_1; - case webrtc::H264::kLevel4_2: + case webrtc::H264Level::kLevel4_2: return kVTProfileLevel_H264_Main_4_2; - case webrtc::H264::kLevel5: + case webrtc::H264Level::kLevel5: return kVTProfileLevel_H264_Main_5_0; - case webrtc::H264::kLevel5_1: + case webrtc::H264Level::kLevel5_1: return kVTProfileLevel_H264_Main_5_1; - case webrtc::H264::kLevel5_2: + case webrtc::H264Level::kLevel5_2: return kVTProfileLevel_H264_Main_5_2; - case webrtc::H264::kLevel1: - case webrtc::H264::kLevel1_b: - case webrtc::H264::kLevel1_1: - case webrtc::H264::kLevel1_2: - case webrtc::H264::kLevel1_3: - case webrtc::H264::kLevel2: - case webrtc::H264::kLevel2_1: - case webrtc::H264::kLevel2_2: + case webrtc::H264Level::kLevel1: + case webrtc::H264Level::kLevel1_b: + case webrtc::H264Level::kLevel1_1: + case webrtc::H264Level::kLevel1_2: + case webrtc::H264Level::kLevel1_3: + case webrtc::H264Level::kLevel2: + case webrtc::H264Level::kLevel2_1: + case webrtc::H264Level::kLevel2_2: return kVTProfileLevel_H264_Main_AutoLevel; } - case webrtc::H264::kProfileConstrainedHigh: - case webrtc::H264::kProfileHigh: + case webrtc::H264Profile::kProfileConstrainedHigh: + case webrtc::H264Profile::kProfileHigh: switch (profile_level_id.level) { - case webrtc::H264::kLevel3: + case webrtc::H264Level::kLevel3: return kVTProfileLevel_H264_High_3_0; - case webrtc::H264::kLevel3_1: + case webrtc::H264Level::kLevel3_1: return kVTProfileLevel_H264_High_3_1; - case webrtc::H264::kLevel3_2: + case webrtc::H264Level::kLevel3_2: return kVTProfileLevel_H264_High_3_2; - case webrtc::H264::kLevel4: + case webrtc::H264Level::kLevel4: return kVTProfileLevel_H264_High_4_0; - case webrtc::H264::kLevel4_1: + case webrtc::H264Level::kLevel4_1: return kVTProfileLevel_H264_High_4_1; - case webrtc::H264::kLevel4_2: + case webrtc::H264Level::kLevel4_2: return kVTProfileLevel_H264_High_4_2; - case webrtc::H264::kLevel5: + case webrtc::H264Level::kLevel5: return kVTProfileLevel_H264_High_5_0; - case webrtc::H264::kLevel5_1: + case webrtc::H264Level::kLevel5_1: return kVTProfileLevel_H264_High_5_1; - case webrtc::H264::kLevel5_2: + case webrtc::H264Level::kLevel5_2: return kVTProfileLevel_H264_High_5_2; - case webrtc::H264::kLevel1: - case webrtc::H264::kLevel1_b: - case webrtc::H264::kLevel1_1: - case webrtc::H264::kLevel1_2: - case webrtc::H264::kLevel1_3: - case webrtc::H264::kLevel2: - case webrtc::H264::kLevel2_1: - case webrtc::H264::kLevel2_2: + case webrtc::H264Level::kLevel1: + case webrtc::H264Level::kLevel1_b: + case webrtc::H264Level::kLevel1_1: + case webrtc::H264Level::kLevel1_2: + case webrtc::H264Level::kLevel1_3: + case webrtc::H264Level::kLevel2: + case webrtc::H264Level::kLevel2_1: + case webrtc::H264Level::kLevel2_2: return kVTProfileLevel_H264_High_AutoLevel; } } @@ -278,33 +276,33 @@ CFStringRef ExtractProfile(const webrtc::H264::ProfileLevelId &profile_level_id) // can be processed by given encoder with |profile_level_id|. // See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items // for details. -NSUInteger GetMaxSampleRate(const webrtc::H264::ProfileLevelId &profile_level_id) { +NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id) { switch (profile_level_id.level) { - case webrtc::H264::kLevel3: + case webrtc::H264Level::kLevel3: return 10368000; - case webrtc::H264::kLevel3_1: + case webrtc::H264Level::kLevel3_1: return 27648000; - case webrtc::H264::kLevel3_2: + case webrtc::H264Level::kLevel3_2: return 55296000; - case webrtc::H264::kLevel4: - case webrtc::H264::kLevel4_1: + case webrtc::H264Level::kLevel4: + case webrtc::H264Level::kLevel4_1: return 62914560; - case webrtc::H264::kLevel4_2: + case webrtc::H264Level::kLevel4_2: return 133693440; - case webrtc::H264::kLevel5: + case webrtc::H264Level::kLevel5: return 150994944; - case webrtc::H264::kLevel5_1: + case webrtc::H264Level::kLevel5_1: return 251658240; - case webrtc::H264::kLevel5_2: + case webrtc::H264Level::kLevel5_2: return 530841600; - case webrtc::H264::kLevel1: - case webrtc::H264::kLevel1_b: - case webrtc::H264::kLevel1_1: - case webrtc::H264::kLevel1_2: - case webrtc::H264::kLevel1_3: - case webrtc::H264::kLevel2: - case webrtc::H264::kLevel2_1: - case webrtc::H264::kLevel2_2: + case webrtc::H264Level::kLevel1: + case webrtc::H264Level::kLevel1_b: + case webrtc::H264Level::kLevel1_1: + case webrtc::H264Level::kLevel1_2: + case webrtc::H264Level::kLevel1_3: + case webrtc::H264Level::kLevel2: + case webrtc::H264Level::kLevel2_1: + case webrtc::H264Level::kLevel2_2: // Zero means auto rate setting. return 0; } @@ -319,7 +317,7 @@ @implementation RTC_OBJC_TYPE (RTCVideoEncoderH264) { uint32_t _encoderFrameRate; uint32_t _maxAllowedFrameRate; RTCH264PacketizationMode _packetizationMode; - absl::optional _profile_level_id; + absl::optional _profile_level_id; RTCVideoEncoderCallback _callback; int32_t _width; int32_t _height; @@ -344,7 +342,7 @@ - (instancetype)initWithCodecInfo:(RTC_OBJC_TYPE(RTCVideoCodecInfo) *)codecInfo _bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95)); _packetizationMode = RTCH264PacketizationModeNonInterleaved; _profile_level_id = - webrtc::H264::ParseSdpProfileLevelId([codecInfo nativeSdpVideoFormat].parameters); + webrtc::ParseSdpForH264ProfileLevelId([codecInfo nativeSdpVideoFormat].parameters); RTC_DCHECK(_profile_level_id); RTC_LOG(LS_INFO) << "Using profile " << CFStringToString(ExtractProfile(*_profile_level_id)); RTC_CHECK([codecInfo.name isEqualToString:kRTCVideoCodecH264Name]); @@ -531,6 +529,14 @@ - (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate { return WEBRTC_VIDEO_CODEC_OK; } +- (NSInteger)resolutionAlignment { + return 1; +} + +- (BOOL)applyAlignmentToAllSimulcastLayers { + return NO; +} + #pragma mark - Private - (NSInteger)releaseEncoder { @@ -596,14 +602,15 @@ - (int)resetCompressionSessionWithPixelFormat:(OSType)framePixelFormat { // buffers retrieved from the encoder's pixel buffer pool. const size_t attributesSize = 3; CFTypeRef keys[attributesSize] = { -#if defined(WEBRTC_IOS) - kCVPixelBufferOpenGLESCompatibilityKey, +#if defined(WEBRTC_IOS) && TARGET_OS_MACCATALYST + kCVPixelBufferMetalCompatibilityKey, +#elif defined(WEBRTC_IOS) + kCVPixelBufferOpenGLESCompatibilityKey, #elif defined(WEBRTC_MAC) - kCVPixelBufferOpenGLCompatibilityKey, + kCVPixelBufferOpenGLCompatibilityKey, #endif - kCVPixelBufferIOSurfacePropertiesKey, - kCVPixelBufferPixelFormatTypeKey - }; + kCVPixelBufferIOSurfacePropertiesKey, + kCVPixelBufferPixelFormatTypeKey}; CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0); int64_t pixelFormatType = framePixelFormat; CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &pixelFormatType); @@ -761,6 +768,10 @@ - (void)frameWasEncoded:(OSStatus)status renderTimeMs:(int64_t)renderTimeMs timestamp:(uint32_t)timestamp rotation:(RTCVideoRotation)rotation { + RTCVideoEncoderCallback callback = _callback; + if (!callback) { + return; + } if (status != noErr) { RTC_LOG(LS_ERROR) << "H264 encode failed with code: " << status; return; @@ -783,16 +794,8 @@ - (void)frameWasEncoded:(OSStatus)status } __block std::unique_ptr buffer = std::make_unique(); - RTC_OBJC_TYPE(RTCRtpFragmentationHeader) * header; - { - std::unique_ptr header_cpp; - bool result = - H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get(), &header_cpp); - header = [[RTC_OBJC_TYPE(RTCRtpFragmentationHeader) alloc] - initWithNativeFragmentationHeader:header_cpp.get()]; - if (!result) { - return; - } + if (!webrtc::H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get())) { + return; } RTC_OBJC_TYPE(RTCEncodedImage) *frame = [[RTC_OBJC_TYPE(RTCEncodedImage) alloc] init]; @@ -804,7 +807,6 @@ - (void)frameWasEncoded:(OSStatus)status }]; frame.encodedWidth = width; frame.encodedHeight = height; - frame.completeFrame = YES; frame.frameType = isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta; frame.captureTimeMs = renderTimeMs; frame.timeStamp = timestamp; @@ -813,12 +815,10 @@ - (void)frameWasEncoded:(OSStatus)status RTCVideoContentTypeUnspecified; frame.flags = webrtc::VideoSendTiming::kInvalid; - int qp; - _h264BitstreamParser.ParseBitstream(buffer->data(), buffer->size()); - _h264BitstreamParser.GetLastSliceQp(&qp); - frame.qp = @(qp); + _h264BitstreamParser.ParseBitstream(*buffer); + frame.qp = @(_h264BitstreamParser.GetLastSliceQp().value_or(-1)); - BOOL res = _callback(frame, codecSpecificInfo, header); + BOOL res = callback(frame, codecSpecificInfo); if (!res) { RTC_LOG(LS_ERROR) << "Encode callback failed"; return; diff --git a/sdk/objc/components/video_codec/UIDevice+H264Profile.h b/sdk/objc/components/video_codec/UIDevice+H264Profile.h index bb6f6ce520..a51debb9fa 100644 --- a/sdk/objc/components/video_codec/UIDevice+H264Profile.h +++ b/sdk/objc/components/video_codec/UIDevice+H264Profile.h @@ -10,10 +10,10 @@ #import -#include "media/base/h264_profile_level_id.h" +#include "api/video_codecs/h264_profile_level_id.h" @interface UIDevice (H264Profile) -+ (absl::optional)maxSupportedH264Profile; ++ (absl::optional)maxSupportedH264Profile; @end diff --git a/sdk/objc/components/video_codec/UIDevice+H264Profile.mm b/sdk/objc/components/video_codec/UIDevice+H264Profile.mm index cbae79fad8..42ebadf01e 100644 --- a/sdk/objc/components/video_codec/UIDevice+H264Profile.mm +++ b/sdk/objc/components/video_codec/UIDevice+H264Profile.mm @@ -15,91 +15,156 @@ namespace { -using namespace webrtc::H264; +using namespace webrtc; struct SupportedH264Profile { const RTCDeviceType deviceType; - const ProfileLevelId profile; + const H264ProfileLevelId profile; }; constexpr SupportedH264Profile kH264MaxSupportedProfiles[] = { // iPhones with at least iOS 9 - {RTCDeviceTypeIPhone11ProMax, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP806 - {RTCDeviceTypeIPhone11Pro, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP805 - {RTCDeviceTypeIPhone11, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP804 - {RTCDeviceTypeIPhoneXS, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP779 - {RTCDeviceTypeIPhoneXSMax, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP780 - {RTCDeviceTypeIPhoneXR, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP781 - {RTCDeviceTypeIPhoneX, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP770 - {RTCDeviceTypeIPhone8, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP767 - {RTCDeviceTypeIPhone8Plus, {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP768 - {RTCDeviceTypeIPhone7, {kProfileHigh, kLevel5_1}}, // https://support.apple.com/kb/SP743 - {RTCDeviceTypeIPhone7Plus, {kProfileHigh, kLevel5_1}}, // https://support.apple.com/kb/SP744 - {RTCDeviceTypeIPhoneSE, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP738 - {RTCDeviceTypeIPhone6S, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP726 - {RTCDeviceTypeIPhone6SPlus, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP727 - {RTCDeviceTypeIPhone6, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP705 - {RTCDeviceTypeIPhone6Plus, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP706 - {RTCDeviceTypeIPhone5SGSM, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP685 + {RTCDeviceTypeIPhone12ProMax, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP832 + {RTCDeviceTypeIPhone12Pro, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP831 + {RTCDeviceTypeIPhone12, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP830 + {RTCDeviceTypeIPhone12Mini, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP829 + {RTCDeviceTypeIPhone11ProMax, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP806 + {RTCDeviceTypeIPhone11Pro, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP805 + {RTCDeviceTypeIPhone11, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP804 + {RTCDeviceTypeIPhoneXS, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP779 + {RTCDeviceTypeIPhoneXSMax, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP780 + {RTCDeviceTypeIPhoneXR, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP781 + {RTCDeviceTypeIPhoneX, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP770 + {RTCDeviceTypeIPhone8, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP767 + {RTCDeviceTypeIPhone8Plus, + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP768 + {RTCDeviceTypeIPhone7, + {H264Profile::kProfileHigh, H264Level::kLevel5_1}}, // https://support.apple.com/kb/SP743 + {RTCDeviceTypeIPhone7Plus, + {H264Profile::kProfileHigh, H264Level::kLevel5_1}}, // https://support.apple.com/kb/SP744 + {RTCDeviceTypeIPhoneSE, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP738 + {RTCDeviceTypeIPhone6S, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP726 + {RTCDeviceTypeIPhone6SPlus, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP727 + {RTCDeviceTypeIPhone6, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP705 + {RTCDeviceTypeIPhone6Plus, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP706 + {RTCDeviceTypeIPhone5SGSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP685 {RTCDeviceTypeIPhone5SGSM_CDMA, - {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP685 - {RTCDeviceTypeIPhone5GSM, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP655 + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP685 + {RTCDeviceTypeIPhone5GSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP655 {RTCDeviceTypeIPhone5GSM_CDMA, - {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP655 - {RTCDeviceTypeIPhone5CGSM, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP684 + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP655 + {RTCDeviceTypeIPhone5CGSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP684 {RTCDeviceTypeIPhone5CGSM_CDMA, - {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP684 - {RTCDeviceTypeIPhone4S, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP643 + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP684 + {RTCDeviceTypeIPhone4S, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP643 // iPods with at least iOS 9 - {RTCDeviceTypeIPodTouch7G, {kProfileMain, kLevel4_1}}, // https://support.apple.com/kb/SP796 - {RTCDeviceTypeIPodTouch6G, {kProfileMain, kLevel4_1}}, // https://support.apple.com/kb/SP720 - {RTCDeviceTypeIPodTouch5G, {kProfileMain, kLevel3_1}}, // https://support.apple.com/kb/SP657 + {RTCDeviceTypeIPodTouch7G, + {H264Profile::kProfileMain, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP796 + {RTCDeviceTypeIPodTouch6G, + {H264Profile::kProfileMain, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP720 + {RTCDeviceTypeIPodTouch5G, + {H264Profile::kProfileMain, H264Level::kLevel3_1}}, // https://support.apple.com/kb/SP657 // iPads with at least iOS 9 - {RTCDeviceTypeIPadAir3Gen, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP787 - {RTCDeviceTypeIPadMini5Gen, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP788 + {RTCDeviceTypeIPadAir4Gen, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP828 + {RTCDeviceTypeIPad8, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP822 + {RTCDeviceTypeIPadPro4Gen12Inch, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP815 + {RTCDeviceTypeIPadPro4Gen11Inch, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP814 + {RTCDeviceTypeIPadAir3Gen, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP787 + {RTCDeviceTypeIPadMini5Gen, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP788 {RTCDeviceTypeIPadPro3Gen12Inch, - {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP785 + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP785 {RTCDeviceTypeIPadPro3Gen11Inch, - {kProfileHigh, kLevel5_2}}, // https://support.apple.com/kb/SP784 - {RTCDeviceTypeIPad7Gen10Inch, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP807 - {RTCDeviceTypeIPad2Wifi, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP622 - {RTCDeviceTypeIPad2GSM, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP622 - {RTCDeviceTypeIPad2CDMA, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP622 - {RTCDeviceTypeIPad2Wifi2, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP622 - {RTCDeviceTypeIPadMiniWifi, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP661 - {RTCDeviceTypeIPadMiniGSM, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP661 + {H264Profile::kProfileHigh, H264Level::kLevel5_2}}, // https://support.apple.com/kb/SP784 + {RTCDeviceTypeIPad7Gen10Inch, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP807 + {RTCDeviceTypeIPad2Wifi, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622 + {RTCDeviceTypeIPad2GSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622 + {RTCDeviceTypeIPad2CDMA, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622 + {RTCDeviceTypeIPad2Wifi2, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP622 + {RTCDeviceTypeIPadMiniWifi, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661 + {RTCDeviceTypeIPadMiniGSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661 {RTCDeviceTypeIPadMiniGSM_CDMA, - {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP661 - {RTCDeviceTypeIPad3Wifi, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP647 - {RTCDeviceTypeIPad3GSM_CDMA, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP647 - {RTCDeviceTypeIPad3GSM, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP647 - {RTCDeviceTypeIPad4Wifi, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP662 - {RTCDeviceTypeIPad4GSM, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP662 - {RTCDeviceTypeIPad4GSM_CDMA, {kProfileHigh, kLevel4_1}}, // https://support.apple.com/kb/SP662 - {RTCDeviceTypeIPad5, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP751 - {RTCDeviceTypeIPad6, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP774 - {RTCDeviceTypeIPadAirWifi, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP692 + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP661 + {RTCDeviceTypeIPad3Wifi, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647 + {RTCDeviceTypeIPad3GSM_CDMA, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647 + {RTCDeviceTypeIPad3GSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP647 + {RTCDeviceTypeIPad4Wifi, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662 + {RTCDeviceTypeIPad4GSM, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662 + {RTCDeviceTypeIPad4GSM_CDMA, + {H264Profile::kProfileHigh, H264Level::kLevel4_1}}, // https://support.apple.com/kb/SP662 + {RTCDeviceTypeIPad5, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP751 + {RTCDeviceTypeIPad6, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP774 + {RTCDeviceTypeIPadAirWifi, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692 {RTCDeviceTypeIPadAirCellular, - {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP692 + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692 {RTCDeviceTypeIPadAirWifiCellular, - {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP692 - {RTCDeviceTypeIPadAir2, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP708 - {RTCDeviceTypeIPadMini2GWifi, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP693 + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP692 + {RTCDeviceTypeIPadAir2, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP708 + {RTCDeviceTypeIPadMini2GWifi, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693 {RTCDeviceTypeIPadMini2GCellular, - {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP693 + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693 {RTCDeviceTypeIPadMini2GWifiCellular, - {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP693 - {RTCDeviceTypeIPadMini3, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP709 - {RTCDeviceTypeIPadMini4, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP725 - {RTCDeviceTypeIPadPro9Inch, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP739 - {RTCDeviceTypeIPadPro12Inch, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/sp723 - {RTCDeviceTypeIPadPro12Inch2, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP761 - {RTCDeviceTypeIPadPro10Inch, {kProfileHigh, kLevel4_2}}, // https://support.apple.com/kb/SP762 + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP693 + {RTCDeviceTypeIPadMini3, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP709 + {RTCDeviceTypeIPadMini4, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP725 + {RTCDeviceTypeIPadPro9Inch, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP739 + {RTCDeviceTypeIPadPro12Inch, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/sp723 + {RTCDeviceTypeIPadPro12Inch2, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP761 + {RTCDeviceTypeIPadPro10Inch, + {H264Profile::kProfileHigh, H264Level::kLevel4_2}}, // https://support.apple.com/kb/SP762 }; -absl::optional FindMaxSupportedProfileForDevice(RTCDeviceType deviceType) { +absl::optional FindMaxSupportedProfileForDevice(RTCDeviceType deviceType) { const auto* result = std::find_if(std::begin(kH264MaxSupportedProfiles), std::end(kH264MaxSupportedProfiles), [deviceType](const SupportedH264Profile& supportedProfile) { @@ -115,7 +180,7 @@ @implementation UIDevice (H264Profile) -+ (absl::optional)maxSupportedH264Profile { ++ (absl::optional)maxSupportedH264Profile { return FindMaxSupportedProfileForDevice([self deviceType]); } diff --git a/sdk/objc/components/video_codec/nalu_rewriter.cc b/sdk/objc/components/video_codec/nalu_rewriter.cc index dc258d6064..60382d2b2d 100644 --- a/sdk/objc/components/video_codec/nalu_rewriter.cc +++ b/sdk/objc/components/video_codec/nalu_rewriter.cc @@ -29,14 +29,10 @@ using H264::ParseNaluType; const char kAnnexBHeaderBytes[4] = {0, 0, 0, 1}; const size_t kAvccHeaderByteSize = sizeof(uint32_t); -bool H264CMSampleBufferToAnnexBBuffer( - CMSampleBufferRef avcc_sample_buffer, - bool is_keyframe, - rtc::Buffer* annexb_buffer, - std::unique_ptr* out_header) { +bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer, + bool is_keyframe, + rtc::Buffer* annexb_buffer) { RTC_DCHECK(avcc_sample_buffer); - RTC_DCHECK(out_header); - out_header->reset(nullptr); // Get format description from the sample buffer. CMVideoFormatDescriptionRef description = @@ -61,10 +57,6 @@ bool H264CMSampleBufferToAnnexBBuffer( // Truncate any previous data in the buffer without changing its capacity. annexb_buffer->SetSize(0); - size_t nalu_offset = 0; - std::vector frag_offsets; - std::vector frag_lengths; - // Place all parameter sets at the front of buffer. if (is_keyframe) { size_t param_set_size = 0; @@ -80,10 +72,6 @@ bool H264CMSampleBufferToAnnexBBuffer( annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes)); annexb_buffer->AppendData(reinterpret_cast(param_set), param_set_size); - // Update fragmentation. - frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes)); - frag_lengths.push_back(param_set_size); - nalu_offset += sizeof(kAnnexBHeaderBytes) + param_set_size; } } @@ -132,10 +120,6 @@ bool H264CMSampleBufferToAnnexBBuffer( // Update buffer. annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes)); annexb_buffer->AppendData(data_ptr + nalu_header_size, packet_size); - // Update fragmentation. - frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes)); - frag_lengths.push_back(packet_size); - nalu_offset += sizeof(kAnnexBHeaderBytes) + packet_size; size_t bytes_written = packet_size + sizeof(kAnnexBHeaderBytes); bytes_remaining -= bytes_written; @@ -143,14 +127,6 @@ bool H264CMSampleBufferToAnnexBBuffer( } RTC_DCHECK_EQ(bytes_remaining, (size_t)0); - std::unique_ptr header(new RTPFragmentationHeader()); - header->VerifyAndAllocateFragmentationHeader(frag_offsets.size()); - RTC_DCHECK_EQ(frag_lengths.size(), frag_offsets.size()); - for (size_t i = 0; i < frag_offsets.size(); ++i) { - header->fragmentationOffset[i] = frag_offsets[i]; - header->fragmentationLength[i] = frag_lengths[i]; - } - *out_header = std::move(header); CFRelease(contiguous_buffer); return true; } diff --git a/sdk/objc/components/video_codec/nalu_rewriter.h b/sdk/objc/components/video_codec/nalu_rewriter.h index a0c1aa90af..d94ce7b7aa 100644 --- a/sdk/objc/components/video_codec/nalu_rewriter.h +++ b/sdk/objc/components/video_codec/nalu_rewriter.h @@ -18,7 +18,6 @@ #include #include "common_video/h264/h264_common.h" -#include "modules/include/module_common_types.h" #include "rtc_base/buffer.h" using webrtc::H264::NaluIndex; @@ -27,13 +26,10 @@ namespace webrtc { // Converts a sample buffer emitted from the VideoToolbox encoder into a buffer // suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer -// needs to be in Annex B format. Data is written directly to |annexb_buffer| -// and a new RTPFragmentationHeader is returned in |out_header|. -bool H264CMSampleBufferToAnnexBBuffer( - CMSampleBufferRef avcc_sample_buffer, - bool is_keyframe, - rtc::Buffer* annexb_buffer, - std::unique_ptr* out_header); +// needs to be in Annex B format. Data is written directly to |annexb_buffer|. +bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer, + bool is_keyframe, + rtc::Buffer* annexb_buffer); // Converts a buffer received from RTP into a sample buffer suitable for the // VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample diff --git a/sdk/objc/helpers/RTCDispatcher.h b/sdk/objc/helpers/RTCDispatcher.h index f8580f95fa..e148af6dea 100644 --- a/sdk/objc/helpers/RTCDispatcher.h +++ b/sdk/objc/helpers/RTCDispatcher.h @@ -20,6 +20,8 @@ typedef NS_ENUM(NSInteger, RTCDispatcherQueueType) { RTCDispatcherTypeCaptureSession, // Used for operations on AVAudioSession. RTCDispatcherTypeAudioSession, + // Used for operations on NWPathMonitor. + RTCDispatcherTypeNetworkMonitor, }; /** Dispatcher that asynchronously dispatches blocks to a specific diff --git a/sdk/objc/helpers/RTCDispatcher.m b/sdk/objc/helpers/RTCDispatcher.m index 2e83573adc..4df19bc297 100644 --- a/sdk/objc/helpers/RTCDispatcher.m +++ b/sdk/objc/helpers/RTCDispatcher.m @@ -12,6 +12,7 @@ static dispatch_queue_t kAudioSessionQueue = nil; static dispatch_queue_t kCaptureSessionQueue = nil; +static dispatch_queue_t kNetworkMonitorQueue = nil; @implementation RTC_OBJC_TYPE (RTCDispatcher) @@ -24,6 +25,8 @@ + (void)initialize { kCaptureSessionQueue = dispatch_queue_create( "org.webrtc.RTCDispatcherCaptureSession", DISPATCH_QUEUE_SERIAL); + kNetworkMonitorQueue = + dispatch_queue_create("org.webrtc.RTCDispatcherNetworkMonitor", DISPATCH_QUEUE_SERIAL); }); } @@ -54,6 +57,8 @@ + (dispatch_queue_t)dispatchQueueForType:(RTCDispatcherQueueType)dispatchType { return kCaptureSessionQueue; case RTCDispatcherTypeAudioSession: return kAudioSessionQueue; + case RTCDispatcherTypeNetworkMonitor: + return kNetworkMonitorQueue; } } diff --git a/sdk/objc/helpers/UIDevice+RTCDevice.h b/sdk/objc/helpers/UIDevice+RTCDevice.h index a7f655ba7b..eb9a076647 100644 --- a/sdk/objc/helpers/UIDevice+RTCDevice.h +++ b/sdk/objc/helpers/UIDevice+RTCDevice.h @@ -40,6 +40,10 @@ typedef NS_ENUM(NSInteger, RTCDeviceType) { RTCDeviceTypeIPhone11, RTCDeviceTypeIPhone11Pro, RTCDeviceTypeIPhone11ProMax, + RTCDeviceTypeIPhone12Mini, + RTCDeviceTypeIPhone12, + RTCDeviceTypeIPhone12Pro, + RTCDeviceTypeIPhone12ProMax, RTCDeviceTypeIPodTouch1G, RTCDeviceTypeIPodTouch2G, RTCDeviceTypeIPodTouch3G, @@ -79,8 +83,12 @@ typedef NS_ENUM(NSInteger, RTCDeviceType) { RTCDeviceTypeIPad7Gen10Inch, RTCDeviceTypeIPadPro3Gen11Inch, RTCDeviceTypeIPadPro3Gen12Inch, + RTCDeviceTypeIPadPro4Gen11Inch, + RTCDeviceTypeIPadPro4Gen12Inch, RTCDeviceTypeIPadMini5Gen, RTCDeviceTypeIPadAir3Gen, + RTCDeviceTypeIPad8, + RTCDeviceTypeIPadAir4Gen, RTCDeviceTypeSimulatori386, RTCDeviceTypeSimulatorx86_64, }; diff --git a/sdk/objc/helpers/UIDevice+RTCDevice.mm b/sdk/objc/helpers/UIDevice+RTCDevice.mm index 3210bdeee5..9b39b99b5c 100644 --- a/sdk/objc/helpers/UIDevice+RTCDevice.mm +++ b/sdk/objc/helpers/UIDevice+RTCDevice.mm @@ -52,6 +52,10 @@ + (RTCDeviceType)deviceType { @"iPhone12,1" : @(RTCDeviceTypeIPhone11), @"iPhone12,3" : @(RTCDeviceTypeIPhone11Pro), @"iPhone12,5" : @(RTCDeviceTypeIPhone11ProMax), + @"iPhone13,1" : @(RTCDeviceTypeIPhone12Mini), + @"iPhone13,2" : @(RTCDeviceTypeIPhone12), + @"iPhone13,3" : @(RTCDeviceTypeIPhone12Pro), + @"iPhone13,4" : @(RTCDeviceTypeIPhone12ProMax), @"iPod1,1" : @(RTCDeviceTypeIPodTouch1G), @"iPod2,1" : @(RTCDeviceTypeIPodTouch2G), @"iPod3,1" : @(RTCDeviceTypeIPodTouch3G), @@ -108,10 +112,18 @@ + (RTCDeviceType)deviceType { @"iPad8,6" : @(RTCDeviceTypeIPadPro3Gen12Inch), @"iPad8,7" : @(RTCDeviceTypeIPadPro3Gen12Inch), @"iPad8,8" : @(RTCDeviceTypeIPadPro3Gen12Inch), + @"iPad8,9" : @(RTCDeviceTypeIPadPro4Gen11Inch), + @"iPad8,10" : @(RTCDeviceTypeIPadPro4Gen11Inch), + @"iPad8,11" : @(RTCDeviceTypeIPadPro4Gen12Inch), + @"iPad8,12" : @(RTCDeviceTypeIPadPro4Gen12Inch), @"iPad11,1" : @(RTCDeviceTypeIPadMini5Gen), @"iPad11,2" : @(RTCDeviceTypeIPadMini5Gen), @"iPad11,3" : @(RTCDeviceTypeIPadAir3Gen), @"iPad11,4" : @(RTCDeviceTypeIPadAir3Gen), + @"iPad11,6" : @(RTCDeviceTypeIPad8), + @"iPad11,7" : @(RTCDeviceTypeIPad8), + @"iPad13,1" : @(RTCDeviceTypeIPadAir4Gen), + @"iPad12,2" : @(RTCDeviceTypeIPadAir4Gen), @"i386" : @(RTCDeviceTypeSimulatori386), @"x86_64" : @(RTCDeviceTypeSimulatorx86_64), }; diff --git a/sdk/objc/native/api/audio_device_module.h b/sdk/objc/native/api/audio_device_module.h index 6a61620ad5..0b12a33424 100644 --- a/sdk/objc/native/api/audio_device_module.h +++ b/sdk/objc/native/api/audio_device_module.h @@ -19,8 +19,14 @@ namespace webrtc { class AudioSourceSink; -rtc::scoped_refptr CreateAudioDeviceModule(); -rtc::scoped_refptr CreateAudioDeviceModule(AudioSourceSink* audioSink); +// If |bypass_voice_processing| is true, WebRTC will attempt to disable hardware +// audio processing on iOS. +// Warning: Setting |bypass_voice_processing| will have unpredictable +// consequences for the audio path in the device. It is not advisable to use in +// most scenarios. +rtc::scoped_refptr CreateAudioDeviceModule( + bool bypass_voice_processing = false, + AudioSourceSink* audioSink = nullptr); } // namespace webrtc diff --git a/sdk/objc/native/api/audio_device_module.mm b/sdk/objc/native/api/audio_device_module.mm index 7c5adf9d90..cfe3c55e13 100644 --- a/sdk/objc/native/api/audio_device_module.mm +++ b/sdk/objc/native/api/audio_device_module.mm @@ -18,21 +18,10 @@ namespace webrtc { -rtc::scoped_refptr CreateAudioDeviceModule() { - RTC_LOG(INFO) << __FUNCTION__; +rtc::scoped_refptr CreateAudioDeviceModule(bool bypass_voice_processing, webrtc::AudioSourceSink* audioSink) { + RTC_DLOG(INFO) << __FUNCTION__; #if defined(WEBRTC_IOS) - return new rtc::RefCountedObject(); -#else - RTC_LOG(LERROR) - << "current platform is not supported => this module will self destruct!"; - return nullptr; -#endif -} - -rtc::scoped_refptr CreateAudioDeviceModule(webrtc::AudioSourceSink* audioSink) { - RTC_LOG(INFO) << __FUNCTION__; -#if defined(WEBRTC_IOS) - return new rtc::RefCountedObject(audioSink); + return new rtc::RefCountedObject(bypass_voice_processing, audioSink); #else RTC_LOG(LERROR) << "current platform is not supported => this module will self destruct!"; diff --git a/test/fuzzers/mdns_parser_fuzzer.cc b/sdk/objc/native/api/network_monitor_factory.h similarity index 51% rename from test/fuzzers/mdns_parser_fuzzer.cc rename to sdk/objc/native/api/network_monitor_factory.h index 451742327f..903c66893d 100644 --- a/test/fuzzers/mdns_parser_fuzzer.cc +++ b/sdk/objc/native/api/network_monitor_factory.h @@ -1,5 +1,5 @@ /* - * Copyright 2018 The WebRTC Project Authors. All rights reserved. + * Copyright 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,20 +8,17 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include -#include +#ifndef SDK_OBJC_NATIVE_API_NETWORK_MONITOR_FACTORY_H_ +#define SDK_OBJC_NATIVE_API_NETWORK_MONITOR_FACTORY_H_ #include -#include "p2p/base/mdns_message.h" -#include "rtc_base/message_buffer_reader.h" +#include "rtc_base/network_monitor_factory.h" namespace webrtc { -void FuzzOneInput(const uint8_t* data, size_t size) { - MessageBufferReader buf(reinterpret_cast(data), size); - auto mdns_msg = std::make_unique(); - mdns_msg->Read(&buf); -} +std::unique_ptr CreateNetworkMonitorFactory(); } // namespace webrtc + +#endif // SDK_OBJC_NATIVE_API_NETWORK_MONITOR_FACTORY_H_ diff --git a/sdk/objc/native/api/network_monitor_factory.mm b/sdk/objc/native/api/network_monitor_factory.mm new file mode 100644 index 0000000000..acde634b1d --- /dev/null +++ b/sdk/objc/native/api/network_monitor_factory.mm @@ -0,0 +1,30 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "network_monitor_factory.h" + +#if defined(WEBRTC_IOS) +#include "sdk/objc/native/src/objc_network_monitor.h" +#endif + +#include "rtc_base/logging.h" + +namespace webrtc { + +std::unique_ptr CreateNetworkMonitorFactory() { + RTC_DLOG(LS_INFO) << __FUNCTION__; +#if defined(WEBRTC_IOS) + return std::make_unique(); +#else + return nullptr; +#endif +} + +} diff --git a/sdk/objc/native/api/video_capturer.mm b/sdk/objc/native/api/video_capturer.mm index 6dd0edbcd9..cae7a50318 100644 --- a/sdk/objc/native/api/video_capturer.mm +++ b/sdk/objc/native/api/video_capturer.mm @@ -11,7 +11,8 @@ #include "sdk/objc/native/api/video_capturer.h" #include "absl/memory/memory.h" -#include "api/video_track_source_proxy.h" +#include "api/video_track_source_proxy_factory.h" +#include "rtc_base/ref_counted_object.h" #include "sdk/objc/native/src/objc_video_track_source.h" namespace webrtc { @@ -24,8 +25,7 @@ rtc::scoped_refptr objc_video_track_source( new rtc::RefCountedObject(adapter)); rtc::scoped_refptr video_source = - webrtc::VideoTrackSourceProxy::Create( - signaling_thread, worker_thread, objc_video_track_source); + webrtc::CreateVideoTrackSourceProxy(signaling_thread, worker_thread, objc_video_track_source); objc_video_capturer.delegate = adapter; diff --git a/sdk/objc/native/src/audio/audio_device_ios.h b/sdk/objc/native/src/audio/audio_device_ios.h index a1be8a121a..81df6530ae 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.h +++ b/sdk/objc/native/src/audio/audio_device_ios.h @@ -13,12 +13,12 @@ #include +#include "api/sequence_checker.h" #include "audio_session_observer.h" #include "modules/audio_device/audio_device_generic.h" #include "rtc_base/buffer.h" #include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "sdk/objc/base/RTCMacros.h" #include "voice_processing_audio_unit.h" @@ -49,7 +49,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric, public VoiceProcessingAudioUnitObserver, public rtc::MessageHandler { public: - AudioDeviceIOS(); + explicit AudioDeviceIOS(bool bypass_voice_processing); ~AudioDeviceIOS() override; void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; @@ -195,6 +195,10 @@ class AudioDeviceIOS : public AudioDeviceGeneric, // Configures the audio session for WebRTC. bool ConfigureAudioSession(); + + // Like above, but requires caller to already hold session lock. + bool ConfigureAudioSessionLocked(); + // Unconfigures the audio session. void UnconfigureAudioSession(); @@ -208,12 +212,15 @@ class AudioDeviceIOS : public AudioDeviceGeneric, // Resets thread-checkers before a call is restarted. void PrepareForNewStart(); + // Determines whether voice processing should be enabled or disabled. + const bool bypass_voice_processing_; + // Ensures that methods are called from the same thread as this object is // created on. - rtc::ThreadChecker thread_checker_; + SequenceChecker thread_checker_; // Native I/O audio thread checker. - rtc::ThreadChecker io_thread_checker_; + SequenceChecker io_thread_checker_; // Thread that this object is created on. rtc::Thread* thread_; diff --git a/sdk/objc/native/src/audio/audio_device_ios.mm b/sdk/objc/native/src/audio/audio_device_ios.mm index a67705998a..8089a528f2 100644 --- a/sdk/objc/native/src/audio/audio_device_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_ios.mm @@ -19,9 +19,7 @@ #include "helpers.h" #include "modules/audio_device/fine_audio_buffer.h" #include "rtc_base/atomic_ops.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" #include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" @@ -103,8 +101,9 @@ static void LogDeviceInfo() { } #endif // !defined(NDEBUG) -AudioDeviceIOS::AudioDeviceIOS() - : audio_device_buffer_(nullptr), +AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing) + : bypass_voice_processing_(bypass_voice_processing), + audio_device_buffer_(nullptr), audio_unit_(nullptr), recording_(0), playing_(0), @@ -116,7 +115,8 @@ static void LogDeviceInfo() { last_playout_time_(0), num_playout_callbacks_(0), last_output_volume_change_time_(0) { - LOGI() << "ctor" << ios::GetCurrentThreadDescription(); + LOGI() << "ctor" << ios::GetCurrentThreadDescription() + << ",bypass_voice_processing=" << bypass_voice_processing_; io_thread_checker_.Detach(); thread_checker_.Detach(); thread_ = rtc::Thread::Current(); @@ -127,6 +127,7 @@ static void LogDeviceInfo() { AudioDeviceIOS::~AudioDeviceIOS() { RTC_DCHECK(thread_checker_.IsCurrent()); LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); + thread_->Clear(this); Terminate(); audio_session_observer_ = nil; } @@ -512,9 +513,8 @@ static void LogDeviceInfo() { RTCLog(@"Stopping the audio unit due to interruption begin."); if (!audio_unit_->Stop()) { RTCLogError(@"Failed to stop the audio unit for interruption begin."); - } else { - PrepareForNewStart(); } + PrepareForNewStart(); } is_interrupted_ = true; } @@ -737,7 +737,7 @@ static void LogDeviceInfo() { bool AudioDeviceIOS::CreateAudioUnit() { RTC_DCHECK(!audio_unit_); - audio_unit_.reset(new VoiceProcessingAudioUnit(this)); + audio_unit_.reset(new VoiceProcessingAudioUnit(bypass_voice_processing_, this)); if (!audio_unit_->Init()) { audio_unit_.reset(); return false; @@ -817,8 +817,10 @@ static void LogDeviceInfo() { RTCLog(@"Stopping audio unit for UpdateAudioUnit"); if (!audio_unit_->Stop()) { RTCLogError(@"Failed to stop audio unit."); + PrepareForNewStart(); return; } + PrepareForNewStart(); } if (should_uninitialize_audio_unit) { @@ -848,6 +850,24 @@ static void LogDeviceInfo() { return success; } +bool AudioDeviceIOS::ConfigureAudioSessionLocked() { + RTC_DCHECK_RUN_ON(&thread_checker_); + RTCLog(@"Configuring audio session."); + if (has_configured_session_) { + RTCLogWarning(@"Audio session already configured."); + return false; + } + RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; + bool success = [session configureWebRTCSession:nil]; + if (success) { + has_configured_session_ = true; + RTCLog(@"Configured audio session."); + } else { + RTCLog(@"Failed to configure audio session."); + } + return success; +} + void AudioDeviceIOS::UnconfigureAudioSession() { RTC_DCHECK_RUN_ON(&thread_checker_); RTCLog(@"Unconfiguring audio session."); @@ -891,7 +911,7 @@ static void LogDeviceInfo() { // If we are ready to play or record, and if the audio session can be // configured, then initialize the audio unit. if (session.canPlayOrRecord) { - if (!ConfigureAudioSession()) { + if (!ConfigureAudioSessionLocked()) { // One possible reason for failure is if an attempt was made to use the // audio session during or after a Media Services failure. // See AVAudioSessionErrorCodeMediaServicesFailed for details. diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.h b/sdk/objc/native/src/audio/audio_device_module_ios.h index c817a9893c..15d8ae83c9 100644 --- a/sdk/objc/native/src/audio/audio_device_module_ios.h +++ b/sdk/objc/native/src/audio/audio_device_module_ios.h @@ -19,7 +19,6 @@ #include "modules/audio_device/audio_device_buffer.h" #include "modules/audio_device/include/audio_device.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" namespace webrtc { @@ -32,8 +31,8 @@ class AudioDeviceModuleIOS : public AudioDeviceModule { public: int32_t AttachAudioBuffer(); - AudioDeviceModuleIOS(); - AudioDeviceModuleIOS(AudioSourceSink* audioSink); + explicit AudioDeviceModuleIOS(bool bypass_voice_processing); + explicit AudioDeviceModuleIOS(bool bypass_voice_processing, AudioSourceSink* audioSink); ~AudioDeviceModuleIOS() override; // Retrieve the currently utilized audio layer @@ -133,6 +132,7 @@ class AudioDeviceModuleIOS : public AudioDeviceModule { int GetRecordAudioParameters(AudioParameters* params) const override; #endif // WEBRTC_IOS private: + const bool bypass_voice_processing_; bool initialized_ = false; const std::unique_ptr task_queue_factory_; std::unique_ptr audio_device_; diff --git a/sdk/objc/native/src/audio/audio_device_module_ios.mm b/sdk/objc/native/src/audio/audio_device_module_ios.mm index 475265b693..9b7ba857f5 100644 --- a/sdk/objc/native/src/audio/audio_device_module_ios.mm +++ b/sdk/objc/native/src/audio/audio_device_module_ios.mm @@ -41,31 +41,26 @@ namespace webrtc { namespace ios_adm { -AudioDeviceModuleIOS::AudioDeviceModuleIOS() - : task_queue_factory_(CreateDefaultTaskQueueFactory()) { - RTC_LOG(INFO) << "current platform is IOS"; - RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; -} - -AudioDeviceModuleIOS::AudioDeviceModuleIOS(AudioSourceSink* audioSink) - : task_queue_factory_(CreateDefaultTaskQueueFactory()) { +AudioDeviceModuleIOS::AudioDeviceModuleIOS(bool bypass_voice_processing, AudioSourceSink* audioSink) + : bypass_voice_processing_(bypass_voice_processing), + task_queue_factory_(CreateDefaultTaskQueueFactory()) { RTC_LOG(INFO) << "current platform is IOS"; RTC_LOG(INFO) << "iPhone Audio APIs will be utilized."; audio_sink_ = audioSink; } int32_t AudioDeviceModuleIOS::AttachAudioBuffer() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; audio_device_->AttachAudioBuffer(audio_device_buffer_.get()); return 0; } AudioDeviceModuleIOS::~AudioDeviceModuleIOS() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; } int32_t AudioDeviceModuleIOS::ActiveAudioLayer(AudioLayer* audioLayer) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; AudioLayer activeAudio; if (audio_device_->ActiveAudioLayer(activeAudio) == -1) { return -1; @@ -75,12 +70,12 @@ } int32_t AudioDeviceModuleIOS::Init() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (initialized_) return 0; audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer(task_queue_factory_.get())); - audio_device_.reset(new ios_adm::AudioDeviceIOS()); + audio_device_.reset(new ios_adm::AudioDeviceIOS(bypass_voice_processing_)); RTC_CHECK(audio_device_); audio_device_->AddAudioSourceSink(audio_sink_); @@ -99,7 +94,7 @@ } int32_t AudioDeviceModuleIOS::Terminate() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; if (!initialized_) return 0; if (audio_device_->Terminate() == -1) { @@ -110,65 +105,65 @@ } bool AudioDeviceModuleIOS::Initialized() const { - RTC_LOG(INFO) << __FUNCTION__ << ": " << initialized_; + RTC_DLOG(INFO) << __FUNCTION__ << ": " << initialized_; return initialized_; } int32_t AudioDeviceModuleIOS::InitSpeaker() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitSpeaker(); } int32_t AudioDeviceModuleIOS::InitMicrophone() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->InitMicrophone(); } int32_t AudioDeviceModuleIOS::SpeakerVolumeIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetSpeakerVolume(uint32_t volume) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerVolume(volume); } int32_t AudioDeviceModuleIOS::SpeakerVolume(uint32_t* volume) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->SpeakerVolume(level) == -1) { return -1; } *volume = level; - RTC_LOG(INFO) << "output: " << *volume; + RTC_DLOG(INFO) << "output: " << *volume; return 0; } bool AudioDeviceModuleIOS::SpeakerIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->SpeakerIsInitialized(); - RTC_LOG(INFO) << "output: " << isInitialized; + RTC_DLOG(INFO) << "output: " << isInitialized; return isInitialized; } bool AudioDeviceModuleIOS::MicrophoneIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isInitialized = audio_device_->MicrophoneIsInitialized(); - RTC_LOG(INFO) << "output: " << isInitialized; + RTC_DLOG(INFO) << "output: " << isInitialized; return isInitialized; } @@ -193,110 +188,110 @@ } int32_t AudioDeviceModuleIOS::SpeakerMuteIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetSpeakerMute(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return audio_device_->SetSpeakerMute(enable); } int32_t AudioDeviceModuleIOS::SpeakerMute(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->SpeakerMute(muted) == -1) { return -1; } *enabled = muted; - RTC_LOG(INFO) << "output: " << muted; + RTC_DLOG(INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleIOS::MicrophoneMuteIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetMicrophoneMute(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneMute(enable)); } int32_t AudioDeviceModuleIOS::MicrophoneMute(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool muted = false; if (audio_device_->MicrophoneMute(muted) == -1) { return -1; } *enabled = muted; - RTC_LOG(INFO) << "output: " << muted; + RTC_DLOG(INFO) << "output: " << muted; return 0; } int32_t AudioDeviceModuleIOS::MicrophoneVolumeIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetMicrophoneVolume(uint32_t volume) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << volume << ")"; CHECKinitialized_(); return (audio_device_->SetMicrophoneVolume(volume)); } int32_t AudioDeviceModuleIOS::MicrophoneVolume(uint32_t* volume) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); uint32_t level = 0; if (audio_device_->MicrophoneVolume(level) == -1) { return -1; } *volume = level; - RTC_LOG(INFO) << "output: " << *volume; + RTC_DLOG(INFO) << "output: " << *volume; return 0; } int32_t AudioDeviceModuleIOS::StereoRecordingIsAvailable( bool* available) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetStereoRecording(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (enable) { RTC_LOG(WARNING) << "recording in stereo is not supported"; @@ -305,31 +300,31 @@ } int32_t AudioDeviceModuleIOS::StereoRecording(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoRecording(stereo) == -1) { return -1; } *enabled = stereo; - RTC_LOG(INFO) << "output: " << stereo; + RTC_DLOG(INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleIOS::StereoPlayoutIsAvailable(bool* available) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::SetStereoPlayout(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); if (audio_device_->PlayoutIsInitialized()) { RTC_LOG(LERROR) @@ -349,38 +344,38 @@ } int32_t AudioDeviceModuleIOS::StereoPlayout(bool* enabled) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool stereo = false; if (audio_device_->StereoPlayout(stereo) == -1) { return -1; } *enabled = stereo; - RTC_LOG(INFO) << "output: " << stereo; + RTC_DLOG(INFO) << "output: " << stereo; return 0; } int32_t AudioDeviceModuleIOS::PlayoutIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } int32_t AudioDeviceModuleIOS::RecordingIsAvailable(bool* available) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); bool isAvailable = false; if (audio_device_->RecordingIsAvailable(isAvailable) == -1) { return -1; } *available = isAvailable; - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return 0; } @@ -405,21 +400,21 @@ } int16_t AudioDeviceModuleIOS::PlayoutDevices() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nPlayoutDevices = audio_device_->PlayoutDevices(); - RTC_LOG(INFO) << "output: " << nPlayoutDevices; + RTC_DLOG(INFO) << "output: " << nPlayoutDevices; return (int16_t)(nPlayoutDevices); } int32_t AudioDeviceModuleIOS::SetPlayoutDevice(uint16_t index) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(index); } int32_t AudioDeviceModuleIOS::SetPlayoutDevice(WindowsDeviceType device) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetPlayoutDevice(device); } @@ -428,7 +423,7 @@ uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -437,10 +432,10 @@ return -1; } if (name != NULL) { - RTC_LOG(INFO) << "output: name = " << name; + RTC_DLOG(INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_LOG(INFO) << "output: guid = " << guid; + RTC_DLOG(INFO) << "output: guid = " << guid; } return 0; } @@ -449,7 +444,7 @@ uint16_t index, char name[kAdmMaxDeviceNameSize], char guid[kAdmMaxGuidSize]) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ", ...)"; CHECKinitialized_(); if (name == NULL) { return -1; @@ -458,138 +453,138 @@ return -1; } if (name != NULL) { - RTC_LOG(INFO) << "output: name = " << name; + RTC_DLOG(INFO) << "output: name = " << name; } if (guid != NULL) { - RTC_LOG(INFO) << "output: guid = " << guid; + RTC_DLOG(INFO) << "output: guid = " << guid; } return 0; } int16_t AudioDeviceModuleIOS::RecordingDevices() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); uint16_t nRecordingDevices = audio_device_->RecordingDevices(); - RTC_LOG(INFO) << "output: " << nRecordingDevices; + RTC_DLOG(INFO) << "output: " << nRecordingDevices; return (int16_t)nRecordingDevices; } int32_t AudioDeviceModuleIOS::SetRecordingDevice(uint16_t index) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << index << ")"; CHECKinitialized_(); return audio_device_->SetRecordingDevice(index); } int32_t AudioDeviceModuleIOS::SetRecordingDevice(WindowsDeviceType device) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); return audio_device_->SetRecordingDevice(device); } int32_t AudioDeviceModuleIOS::InitPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); if (PlayoutIsInitialized()) { return 0; } int32_t result = audio_device_->InitPlayout(); audio_device_->AddAudioSourceSink(audio_sink_); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleIOS::InitRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); if (RecordingIsInitialized()) { return 0; } int32_t result = audio_device_->InitRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleIOS::PlayoutIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->PlayoutIsInitialized(); } bool AudioDeviceModuleIOS::RecordingIsInitialized() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->RecordingIsInitialized(); } int32_t AudioDeviceModuleIOS::StartPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); if (Playing()) { return 0; } audio_device_buffer_.get()->StartPlayout(); int32_t result = audio_device_->StartPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleIOS::StopPlayout() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopPlayout(); audio_device_buffer_.get()->StopPlayout(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleIOS::Playing() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Playing(); } int32_t AudioDeviceModuleIOS::StartRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); if (Recording()) { return 0; } audio_device_buffer_.get()->StartRecording(); int32_t result = audio_device_->StartRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess", static_cast(result == 0)); return result; } int32_t AudioDeviceModuleIOS::StopRecording() { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized_(); int32_t result = audio_device_->StopRecording(); audio_device_buffer_.get()->StopRecording(); - RTC_LOG(INFO) << "output: " << result; + RTC_DLOG(INFO) << "output: " << result; RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess", static_cast(result == 0)); return result; } bool AudioDeviceModuleIOS::Recording() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); return audio_device_->Recording(); } int32_t AudioDeviceModuleIOS::RegisterAudioCallback( AudioTransport* audioCallback) { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; return audio_device_buffer_.get()->RegisterAudioCallback(audioCallback); } @@ -605,50 +600,50 @@ } bool AudioDeviceModuleIOS::BuiltInAECIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAECIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleIOS::EnableBuiltInAEC(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAEC(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_DLOG(INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleIOS::BuiltInAGCIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInAGCIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleIOS::EnableBuiltInAGC(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInAGC(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_DLOG(INFO) << "output: " << ok; return ok; } bool AudioDeviceModuleIOS::BuiltInNSIsAvailable() const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; CHECKinitialized__BOOL(); bool isAvailable = audio_device_->BuiltInNSIsAvailable(); - RTC_LOG(INFO) << "output: " << isAvailable; + RTC_DLOG(INFO) << "output: " << isAvailable; return isAvailable; } int32_t AudioDeviceModuleIOS::EnableBuiltInNS(bool enable) { - RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")"; + RTC_DLOG(INFO) << __FUNCTION__ << "(" << enable << ")"; CHECKinitialized_(); int32_t ok = audio_device_->EnableBuiltInNS(enable); - RTC_LOG(INFO) << "output: " << ok; + RTC_DLOG(INFO) << "output: " << ok; return ok; } @@ -662,17 +657,17 @@ #if defined(WEBRTC_IOS) int AudioDeviceModuleIOS::GetPlayoutAudioParameters( AudioParameters* params) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; int r = audio_device_->GetPlayoutAudioParameters(params); - RTC_LOG(INFO) << "output: " << r; + RTC_DLOG(INFO) << "output: " << r; return r; } int AudioDeviceModuleIOS::GetRecordAudioParameters( AudioParameters* params) const { - RTC_LOG(INFO) << __FUNCTION__; + RTC_DLOG(INFO) << __FUNCTION__; int r = audio_device_->GetRecordAudioParameters(params); - RTC_LOG(INFO) << "output: " << r; + RTC_DLOG(INFO) << "output: " << r; return r; } #endif // WEBRTC_IOS diff --git a/sdk/objc/native/src/audio/voice_processing_audio_unit.h b/sdk/objc/native/src/audio/voice_processing_audio_unit.h index 7293032f6f..72e29c0d67 100644 --- a/sdk/objc/native/src/audio/voice_processing_audio_unit.h +++ b/sdk/objc/native/src/audio/voice_processing_audio_unit.h @@ -46,7 +46,8 @@ class VoiceProcessingAudioUnitObserver { // VoIP applications. class VoiceProcessingAudioUnit { public: - explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer); + VoiceProcessingAudioUnit(bool bypass_voice_processing, + VoiceProcessingAudioUnitObserver* observer); ~VoiceProcessingAudioUnit(); // TODO(tkchin): enum for state and state checking. @@ -129,6 +130,7 @@ class VoiceProcessingAudioUnit { // Deletes the underlying audio unit. void DisposeAudioUnit(); + const bool bypass_voice_processing_; VoiceProcessingAudioUnitObserver* observer_; AudioUnit vpio_unit_; VoiceProcessingAudioUnit::State state_; diff --git a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm index a2aa7f323b..2325b2ed2e 100644 --- a/sdk/objc/native/src/audio/voice_processing_audio_unit.mm +++ b/sdk/objc/native/src/audio/voice_processing_audio_unit.mm @@ -72,9 +72,12 @@ static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) { return result; } -VoiceProcessingAudioUnit::VoiceProcessingAudioUnit( - VoiceProcessingAudioUnitObserver* observer) - : observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) { +VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(bool bypass_voice_processing, + VoiceProcessingAudioUnitObserver* observer) + : bypass_voice_processing_(bypass_voice_processing), + observer_(observer), + vpio_unit_(nullptr), + state_(kInitRequired) { RTC_DCHECK(observer); } @@ -250,6 +253,24 @@ static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) { RTCLog(@"Voice Processing I/O unit is now initialized."); } + if (bypass_voice_processing_) { + // Attempt to disable builtin voice processing. + UInt32 toggle = 1; + result = AudioUnitSetProperty(vpio_unit_, + kAUVoiceIOProperty_BypassVoiceProcessing, + kAudioUnitScope_Global, + kInputBus, + &toggle, + sizeof(toggle)); + if (result == noErr) { + RTCLog(@"Successfully bypassed voice processing."); + } else { + RTCLogError(@"Failed to bypass voice processing. Error=%ld.", (long)result); + } + state_ = kInitialized; + return true; + } + // AGC should be enabled by default for Voice Processing I/O units but it is // checked below and enabled explicitly if needed. This scheme is used // to be absolutely sure that the AGC is enabled since we have seen cases diff --git a/sdk/objc/native/src/network_monitor_observer.h b/sdk/objc/native/src/network_monitor_observer.h new file mode 100644 index 0000000000..85fd3b992f --- /dev/null +++ b/sdk/objc/native/src/network_monitor_observer.h @@ -0,0 +1,39 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_NETWORK_MONITOR_OBSERVER_H_ +#define SDK_OBJC_NATIVE_SRC_NETWORK_MONITOR_OBSERVER_H_ + +#include +#include + +#include "rtc_base/network_constants.h" +#include "rtc_base/thread.h" + +namespace webrtc { + +// Observer interface for listening to NWPathMonitor updates. +class NetworkMonitorObserver { + public: + // Called when a path update occurs, on network monitor dispatch queue. + // + // |adapter_type_by_name| is a map from interface name (i.e. "pdp_ip0") to + // adapter type, for all available interfaces on the current path. If an + // interface name isn't present it can be assumed to be unavailable. + virtual void OnPathUpdate( + std::map adapter_type_by_name) = 0; + + protected: + virtual ~NetworkMonitorObserver() {} +}; + +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_AUDIO_AUDIO_SESSION_OBSERVER_H_ diff --git a/sdk/objc/native/src/objc_network_monitor.h b/sdk/objc/native/src/objc_network_monitor.h new file mode 100644 index 0000000000..df4774efe2 --- /dev/null +++ b/sdk/objc/native/src/objc_network_monitor.h @@ -0,0 +1,67 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef SDK_OBJC_NATIVE_SRC_OBJC_NETWORK_MONITOR_H_ +#define SDK_OBJC_NATIVE_SRC_OBJC_NETWORK_MONITOR_H_ + +#include + +#include "api/sequence_checker.h" +#include "rtc_base/network_monitor.h" +#include "rtc_base/network_monitor_factory.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "sdk/objc/components/network/RTCNetworkMonitor+Private.h" +#include "sdk/objc/native/src/network_monitor_observer.h" + +namespace webrtc { + +class ObjCNetworkMonitorFactory : public rtc::NetworkMonitorFactory { + public: + ObjCNetworkMonitorFactory() = default; + ~ObjCNetworkMonitorFactory() override = default; + + rtc::NetworkMonitorInterface* CreateNetworkMonitor() override; +}; + +class ObjCNetworkMonitor : public rtc::NetworkMonitorInterface, + public NetworkMonitorObserver { + public: + ObjCNetworkMonitor(); + ~ObjCNetworkMonitor() override; + + void Start() override; + void Stop() override; + + rtc::AdapterType GetAdapterType(const std::string& interface_name) override; + rtc::AdapterType GetVpnUnderlyingAdapterType( + const std::string& interface_name) override; + rtc::NetworkPreference GetNetworkPreference( + const std::string& interface_name) override; + bool IsAdapterAvailable(const std::string& interface_name) override; + + // NetworkMonitorObserver override. + // Fans out updates to observers on the correct thread. + void OnPathUpdate( + std::map adapter_type_by_name) override; + + private: + rtc::Thread* thread_ = nullptr; + bool started_ = false; + std::map adapter_type_by_name_ + RTC_GUARDED_BY(thread_); + rtc::scoped_refptr safety_flag_; + RTCNetworkMonitor* network_monitor_ = nil; +}; + +} // namespace webrtc + +#endif // SDK_OBJC_NATIVE_SRC_OBJC_NETWORK_MONITOR_H_ diff --git a/sdk/objc/native/src/objc_network_monitor.mm b/sdk/objc/native/src/objc_network_monitor.mm new file mode 100644 index 0000000000..e85bb8b6a4 --- /dev/null +++ b/sdk/objc/native/src/objc_network_monitor.mm @@ -0,0 +1,94 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "sdk/objc/native/src/objc_network_monitor.h" + +#include "rtc_base/task_utils/to_queued_task.h" + +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +rtc::NetworkMonitorInterface* ObjCNetworkMonitorFactory::CreateNetworkMonitor() { + return new ObjCNetworkMonitor(); +} + +ObjCNetworkMonitor::ObjCNetworkMonitor() { + safety_flag_ = PendingTaskSafetyFlag::Create(); +} + +ObjCNetworkMonitor::~ObjCNetworkMonitor() { + network_monitor_ = nil; +} + +void ObjCNetworkMonitor::Start() { + if (started_) { + return; + } + thread_ = rtc::Thread::Current(); + RTC_DCHECK_RUN_ON(thread_); + safety_flag_->SetAlive(); + network_monitor_ = [[RTCNetworkMonitor alloc] initWithObserver:this]; + if (network_monitor_ == nil) { + RTC_LOG(LS_WARNING) << "Failed to create RTCNetworkMonitor; not available on this OS?"; + } + started_ = true; +} + +void ObjCNetworkMonitor::Stop() { + RTC_DCHECK_RUN_ON(thread_); + if (!started_) { + return; + } + safety_flag_->SetNotAlive(); + network_monitor_ = nil; + started_ = false; +} + +rtc::AdapterType ObjCNetworkMonitor::GetAdapterType(const std::string& interface_name) { + RTC_DCHECK_RUN_ON(thread_); + if (adapter_type_by_name_.find(interface_name) == adapter_type_by_name_.end()) { + return rtc::ADAPTER_TYPE_UNKNOWN; + } + return adapter_type_by_name_.at(interface_name); +} + +rtc::AdapterType ObjCNetworkMonitor::GetVpnUnderlyingAdapterType( + const std::string& interface_name) { + return rtc::ADAPTER_TYPE_UNKNOWN; +} + +rtc::NetworkPreference ObjCNetworkMonitor::GetNetworkPreference(const std::string& interface_name) { + return rtc::NetworkPreference::NEUTRAL; +} + +bool ObjCNetworkMonitor::IsAdapterAvailable(const std::string& interface_name) { + RTC_DCHECK_RUN_ON(thread_); + if (adapter_type_by_name_.empty()) { + // If we have no path update, assume everything's available, because it's + // preferable for WebRTC to try all interfaces rather than none at all. + return true; + } + return adapter_type_by_name_.find(interface_name) != adapter_type_by_name_.end(); +} + +void ObjCNetworkMonitor::OnPathUpdate( + std::map adapter_type_by_name) { + RTC_DCHECK(network_monitor_ != nil); + thread_->PostTask(ToQueuedTask(safety_flag_, [this, adapter_type_by_name] { + RTC_DCHECK_RUN_ON(thread_); + adapter_type_by_name_ = adapter_type_by_name; + SignalNetworksChanged(); + })); +} + +} // namespace webrtc diff --git a/sdk/objc/native/src/objc_video_decoder_factory.mm b/sdk/objc/native/src/objc_video_decoder_factory.mm index 09060548de..ccaed74d5a 100644 --- a/sdk/objc/native/src/objc_video_decoder_factory.mm +++ b/sdk/objc/native/src/objc_video_decoder_factory.mm @@ -10,6 +10,7 @@ #include "sdk/objc/native/src/objc_video_decoder_factory.h" +#import "base/RTCMacros.h" #import "base/RTCVideoDecoder.h" #import "base/RTCVideoDecoderFactory.h" #import "base/RTCVideoFrame.h" @@ -98,8 +99,8 @@ int32_t RegisterDecodeCompleteCallback(DecodedImageCallback *callback) override if ([codecName isEqualToString:codecInfo.name]) { id decoder = [decoder_factory_ createDecoder:codecInfo]; - if ([decoder isKindOfClass:[RTCWrappedNativeVideoDecoder class]]) { - return [(RTCWrappedNativeVideoDecoder *)decoder releaseWrappedDecoder]; + if ([decoder isKindOfClass:[RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) class]]) { + return [(RTC_OBJC_TYPE(RTCWrappedNativeVideoDecoder) *)decoder releaseWrappedDecoder]; } else { return std::unique_ptr(new ObjCVideoDecoder(decoder)); } diff --git a/sdk/objc/native/src/objc_video_encoder_factory.h b/sdk/objc/native/src/objc_video_encoder_factory.h index 7e474c976a..38db5e6ae7 100644 --- a/sdk/objc/native/src/objc_video_encoder_factory.h +++ b/sdk/objc/native/src/objc_video_encoder_factory.h @@ -33,7 +33,6 @@ class ObjCVideoEncoderFactory : public VideoEncoderFactory { std::vector GetImplementations() const override; std::unique_ptr CreateVideoEncoder( const SdpVideoFormat& format) override; - CodecInfo QueryVideoEncoder(const SdpVideoFormat& format) const override; std::unique_ptr GetEncoderSelector() const override; private: diff --git a/sdk/objc/native/src/objc_video_encoder_factory.mm b/sdk/objc/native/src/objc_video_encoder_factory.mm index b54945f49e..b66554b1a4 100644 --- a/sdk/objc/native/src/objc_video_encoder_factory.mm +++ b/sdk/objc/native/src/objc_video_encoder_factory.mm @@ -17,7 +17,6 @@ #import "base/RTCVideoEncoderFactory.h" #import "components/video_codec/RTCCodecSpecificInfoH264+Private.h" #import "sdk/objc/api/peerconnection/RTCEncodedImage+Private.h" -#import "sdk/objc/api/peerconnection/RTCRtpFragmentationHeader+Private.h" #import "sdk/objc/api/peerconnection/RTCVideoCodecInfo+Private.h" #import "sdk/objc/api/peerconnection/RTCVideoEncoderSettings+Private.h" #import "sdk/objc/api/video_codec/RTCVideoCodecConstants.h" @@ -27,7 +26,6 @@ #include "api/video/video_frame.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_encoder.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" #include "rtc_base/logging.h" @@ -50,25 +48,24 @@ int32_t InitEncode(const VideoCodec *codec_settings, const Settings &encoder_set } int32_t RegisterEncodeCompleteCallback(EncodedImageCallback *callback) override { - [encoder_ setCallback:^BOOL(RTC_OBJC_TYPE(RTCEncodedImage) * _Nonnull frame, - id _Nonnull info, - RTC_OBJC_TYPE(RTCRtpFragmentationHeader) * _Nonnull header) { - EncodedImage encodedImage = [frame nativeEncodedImage]; - - // Handle types that can be converted into one of CodecSpecificInfo's hard coded cases. - CodecSpecificInfo codecSpecificInfo; - if ([info isKindOfClass:[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) class]]) { - codecSpecificInfo = - [(RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) *)info nativeCodecSpecificInfo]; - } - - std::unique_ptr fragmentationHeader = - [header createNativeFragmentationHeader]; - EncodedImageCallback::Result res = - callback->OnEncodedImage(encodedImage, &codecSpecificInfo, fragmentationHeader.get()); - return res.error == EncodedImageCallback::Result::OK; - }]; - + if (callback) { + [encoder_ setCallback:^BOOL(RTC_OBJC_TYPE(RTCEncodedImage) * _Nonnull frame, + id _Nonnull info) { + EncodedImage encodedImage = [frame nativeEncodedImage]; + + // Handle types that can be converted into one of CodecSpecificInfo's hard coded cases. + CodecSpecificInfo codecSpecificInfo; + if ([info isKindOfClass:[RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) class]]) { + codecSpecificInfo = + [(RTC_OBJC_TYPE(RTCCodecSpecificInfoH264) *)info nativeCodecSpecificInfo]; + } + + EncodedImageCallback::Result res = callback->OnEncodedImage(encodedImage, &codecSpecificInfo); + return res.error == EncodedImageCallback::Result::OK; + }]; + } else { + [encoder_ setCallback:nil]; + } return WEBRTC_VIDEO_CODEC_OK; } @@ -101,6 +98,8 @@ void SetRates(const RateControlParameters ¶meters) override { info.scaling_settings = qp_thresholds ? ScalingSettings(qp_thresholds.low, qp_thresholds.high) : ScalingSettings::kOff; + info.requested_resolution_alignment = encoder_.resolutionAlignment > 0 ?: 1; + info.apply_alignment_to_all_simulcast_layers = encoder_.applyAlignmentToAllSimulcastLayers; info.is_hardware_accelerated = true; info.has_internal_source = false; return info; @@ -174,26 +173,13 @@ void OnCurrentEncoder(const SdpVideoFormat &format) override { return GetSupportedFormats(); } -VideoEncoderFactory::CodecInfo ObjCVideoEncoderFactory::QueryVideoEncoder( - const SdpVideoFormat &format) const { - // TODO(andersc): This is a hack until we figure out how this should be done properly. - NSString *formatName = [NSString stringForStdString:format.name]; - NSSet *wrappedSoftwareFormats = - [NSSet setWithObjects:kRTCVideoCodecVp8Name, kRTCVideoCodecVp9Name, nil]; - - CodecInfo codec_info; - codec_info.is_hardware_accelerated = ![wrappedSoftwareFormats containsObject:formatName]; - codec_info.has_internal_source = false; - return codec_info; -} - std::unique_ptr ObjCVideoEncoderFactory::CreateVideoEncoder( const SdpVideoFormat &format) { RTC_OBJC_TYPE(RTCVideoCodecInfo) *info = [[RTC_OBJC_TYPE(RTCVideoCodecInfo) alloc] initWithNativeSdpVideoFormat:format]; id encoder = [encoder_factory_ createEncoder:info]; - if ([encoder isKindOfClass:[RTCWrappedNativeVideoEncoder class]]) { - return [(RTCWrappedNativeVideoEncoder *)encoder releaseWrappedEncoder]; + if ([encoder isKindOfClass:[RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) class]]) { + return [(RTC_OBJC_TYPE(RTCWrappedNativeVideoEncoder) *)encoder releaseWrappedEncoder]; } else { return std::unique_ptr(new ObjCVideoEncoder(encoder)); } diff --git a/sdk/objc/unittests/RTCAudioDevice_xctest.mm b/sdk/objc/unittests/RTCAudioDevice_xctest.mm index c936399f34..e01fdbd6e3 100644 --- a/sdk/objc/unittests/RTCAudioDevice_xctest.mm +++ b/sdk/objc/unittests/RTCAudioDevice_xctest.mm @@ -33,7 +33,7 @@ - (void)setUp { [super setUp]; _audioDeviceModule = webrtc::CreateAudioDeviceModule(); - _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS()); + _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS(/*bypass_voice_processing=*/false)); self.audioSession = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; NSError *error = nil; diff --git a/sdk/objc/unittests/RTCAudioSessionTest.mm b/sdk/objc/unittests/RTCAudioSessionTest.mm index 4e309ca2fa..e2c26634b0 100644 --- a/sdk/objc/unittests/RTCAudioSessionTest.mm +++ b/sdk/objc/unittests/RTCAudioSessionTest.mm @@ -113,26 +113,10 @@ - (void)dealloc { @interface RTCAudioSessionTest : NSObject -- (void)testLockForConfiguration; - @end @implementation RTCAudioSessionTest -- (void)testLockForConfiguration { - RTC_OBJC_TYPE(RTCAudioSession) *session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; - - for (size_t i = 0; i < 2; i++) { - [session lockForConfiguration]; - EXPECT_TRUE(session.isLocked); - } - for (size_t i = 0; i < 2; i++) { - EXPECT_TRUE(session.isLocked); - [session unlockForConfiguration]; - } - EXPECT_FALSE(session.isLocked); -} - - (void)testAddAndRemoveDelegates { RTC_OBJC_TYPE(RTCAudioSession) *session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; NSMutableArray *delegates = [NSMutableArray array]; @@ -246,16 +230,17 @@ - (void)testConfigureWebRTCSession { __autoreleasing NSError **retError; [invocation getArgument:&retError atIndex:4]; *retError = [NSError errorWithDomain:@"AVAudioSession" - code:AVAudioSessionErrorInsufficientPriority + code:AVAudioSessionErrorCodeCannotInterruptOthers userInfo:nil]; BOOL failure = NO; [invocation setReturnValue:&failure]; }; id mockAVAudioSession = OCMPartialMock([AVAudioSession sharedInstance]); - OCMStub([[mockAVAudioSession ignoringNonObjectArgs] - setActive:YES withOptions:0 error:((NSError __autoreleasing **)[OCMArg anyPointer])]). - andDo(setActiveBlock); + OCMStub([[mockAVAudioSession ignoringNonObjectArgs] setActive:YES + withOptions:0 + error:([OCMArg anyObjectRef])]) + .andDo(setActiveBlock); id mockAudioSession = OCMPartialMock([RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]); OCMStub([mockAudioSession session]).andReturn(mockAVAudioSession); @@ -263,12 +248,12 @@ - (void)testConfigureWebRTCSession { RTC_OBJC_TYPE(RTCAudioSession) *audioSession = mockAudioSession; EXPECT_EQ(0, audioSession.activationCount); [audioSession lockForConfiguration]; - EXPECT_TRUE([audioSession checkLock:nil]); // configureWebRTCSession is forced to fail in the above mock interface, // so activationCount should remain 0 - OCMExpect([[mockAVAudioSession ignoringNonObjectArgs] - setActive:YES withOptions:0 error:((NSError __autoreleasing **)[OCMArg anyPointer])]). - andDo(setActiveBlock); + OCMExpect([[mockAVAudioSession ignoringNonObjectArgs] setActive:YES + withOptions:0 + error:([OCMArg anyObjectRef])]) + .andDo(setActiveBlock); OCMExpect([mockAudioSession session]).andReturn(mockAVAudioSession); EXPECT_FALSE([audioSession configureWebRTCSession:&error]); EXPECT_EQ(0, audioSession.activationCount); @@ -314,11 +299,6 @@ void TearDown() override { } }; -TEST_F(AudioSessionTest, LockForConfiguration) { - RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init]; - [test testLockForConfiguration]; -} - TEST_F(AudioSessionTest, AddAndRemoveDelegates) { RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init]; [test testAddAndRemoveDelegates]; diff --git a/sdk/objc/unittests/RTCPeerConnectionFactoryBuilderTest.mm b/sdk/objc/unittests/RTCPeerConnectionFactoryBuilderTest.mm index 7d19d4095d..14131dc38d 100644 --- a/sdk/objc/unittests/RTCPeerConnectionFactoryBuilderTest.mm +++ b/sdk/objc/unittests/RTCPeerConnectionFactoryBuilderTest.mm @@ -22,7 +22,6 @@ #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" -#include "api/transport/media/media_transport_interface.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" #include "modules/audio_device/include/audio_device.h" @@ -50,8 +49,7 @@ - (void)testBuilder { nativeVideoEncoderFactory:nullptr nativeVideoDecoderFactory:nullptr audioDeviceModule:nullptr - audioProcessingModule:nullptr - mediaTransportFactory:nullptr]); + audioProcessingModule:nullptr]); #endif RTCPeerConnectionFactoryBuilder* builder = [[RTCPeerConnectionFactoryBuilder alloc] init]; RTC_OBJC_TYPE(RTCPeerConnectionFactory)* peerConnectionFactory = @@ -72,8 +70,7 @@ - (void)testDefaultComponentsBuilder { nativeVideoEncoderFactory:nullptr nativeVideoDecoderFactory:nullptr audioDeviceModule:nullptr - audioProcessingModule:nullptr - mediaTransportFactory:nullptr]); + audioProcessingModule:nullptr]); #endif RTCPeerConnectionFactoryBuilder* builder = [RTCPeerConnectionFactoryBuilder defaultBuilder]; RTC_OBJC_TYPE(RTCPeerConnectionFactory)* peerConnectionFactory = diff --git a/sdk/objc/unittests/RTCPeerConnectionFactory_xctest.m b/sdk/objc/unittests/RTCPeerConnectionFactory_xctest.m index 2737bb6eee..629095b81d 100644 --- a/sdk/objc/unittests/RTCPeerConnectionFactory_xctest.m +++ b/sdk/objc/unittests/RTCPeerConnectionFactory_xctest.m @@ -19,6 +19,7 @@ #import "api/peerconnection/RTCRtpReceiver.h" #import "api/peerconnection/RTCRtpSender.h" #import "api/peerconnection/RTCRtpTransceiver.h" +#import "api/peerconnection/RTCSessionDescription.h" #import "api/peerconnection/RTCVideoSource.h" #import @@ -270,6 +271,56 @@ - (void)testVideoTrackLifetime { XCTAssertTrue(true, "Expect test does not crash"); } +- (void)testRollback { + @autoreleasepool { + RTC_OBJC_TYPE(RTCConfiguration) *config = [[RTC_OBJC_TYPE(RTCConfiguration) alloc] init]; + config.sdpSemantics = RTCSdpSemanticsUnifiedPlan; + RTC_OBJC_TYPE(RTCMediaConstraints) *constraints = + [[RTC_OBJC_TYPE(RTCMediaConstraints) alloc] initWithMandatoryConstraints:@{ + kRTCMediaConstraintsOfferToReceiveAudio : kRTCMediaConstraintsValueTrue + } + optionalConstraints:nil]; + + __block RTC_OBJC_TYPE(RTCPeerConnectionFactory) * factory; + __block RTC_OBJC_TYPE(RTCPeerConnection) * pc1; + RTCSessionDescription *rollback = [[RTCSessionDescription alloc] initWithType:RTCSdpTypeRollback + sdp:@""]; + + @autoreleasepool { + factory = [[RTC_OBJC_TYPE(RTCPeerConnectionFactory) alloc] init]; + pc1 = [factory peerConnectionWithConfiguration:config constraints:constraints delegate:nil]; + dispatch_semaphore_t negotiatedSem = dispatch_semaphore_create(0); + [pc1 offerForConstraints:constraints + completionHandler:^(RTC_OBJC_TYPE(RTCSessionDescription) * offer, NSError * error) { + XCTAssertNil(error); + XCTAssertNotNil(offer); + + __weak RTC_OBJC_TYPE(RTCPeerConnection) *weakPC1 = pc1; + [pc1 setLocalDescription:offer + completionHandler:^(NSError *error) { + XCTAssertNil(error); + [weakPC1 setLocalDescription:rollback + completionHandler:^(NSError *error) { + XCTAssertNil(error); + }]; + }]; + NSTimeInterval negotiationTimeout = 15; + dispatch_semaphore_wait( + negotiatedSem, + dispatch_time(DISPATCH_TIME_NOW, (int64_t)(negotiationTimeout * NSEC_PER_SEC))); + + XCTAssertEqual(pc1.signalingState, RTCSignalingStateStable); + + [pc1 close]; + pc1 = nil; + factory = nil; + }]; + } + + XCTAssertTrue(true, "Expect test does not crash"); + } +} + - (bool)negotiatePeerConnection:(RTC_OBJC_TYPE(RTCPeerConnection) *)pc1 withPeerConnection:(RTC_OBJC_TYPE(RTCPeerConnection) *)pc2 negotiationTimeout:(NSTimeInterval)timeout { diff --git a/sdk/objc/unittests/RTCPeerConnectionTest.mm b/sdk/objc/unittests/RTCPeerConnectionTest.mm index e45ca93a6c..1d0ae2679e 100644 --- a/sdk/objc/unittests/RTCPeerConnectionTest.mm +++ b/sdk/objc/unittests/RTCPeerConnectionTest.mm @@ -18,16 +18,20 @@ #import "api/peerconnection/RTCConfiguration+Private.h" #import "api/peerconnection/RTCConfiguration.h" #import "api/peerconnection/RTCCryptoOptions.h" +#import "api/peerconnection/RTCIceCandidate.h" #import "api/peerconnection/RTCIceServer.h" #import "api/peerconnection/RTCMediaConstraints.h" #import "api/peerconnection/RTCPeerConnection.h" #import "api/peerconnection/RTCPeerConnectionFactory+Native.h" #import "api/peerconnection/RTCPeerConnectionFactory.h" +#import "api/peerconnection/RTCSessionDescription.h" #import "helpers/NSString+StdString.h" @interface RTCPeerConnectionTest : NSObject - (void)testConfigurationGetter; - (void)testWithDependencies; +- (void)testWithInvalidSDP; +- (void)testWithInvalidIceCandidate; @end @implementation RTCPeerConnectionTest @@ -137,6 +141,66 @@ - (void)testWithDependencies { } } +- (void)testWithInvalidSDP { + RTC_OBJC_TYPE(RTCPeerConnectionFactory) *factory = + [[RTC_OBJC_TYPE(RTCPeerConnectionFactory) alloc] init]; + + RTC_OBJC_TYPE(RTCConfiguration) *config = [[RTC_OBJC_TYPE(RTCConfiguration) alloc] init]; + RTC_OBJC_TYPE(RTCMediaConstraints) *contraints = + [[RTC_OBJC_TYPE(RTCMediaConstraints) alloc] initWithMandatoryConstraints:@{} + optionalConstraints:nil]; + RTC_OBJC_TYPE(RTCPeerConnection) *peerConnection = + [factory peerConnectionWithConfiguration:config constraints:contraints delegate:nil]; + + dispatch_semaphore_t negotiatedSem = dispatch_semaphore_create(0); + [peerConnection setRemoteDescription:[[RTC_OBJC_TYPE(RTCSessionDescription) alloc] + initWithType:RTCSdpTypeOffer + sdp:@"invalid"] + completionHandler:^(NSError *error) { + ASSERT_NE(error, nil); + if (error != nil) { + dispatch_semaphore_signal(negotiatedSem); + } + }]; + + NSTimeInterval timeout = 5; + ASSERT_EQ( + 0, + dispatch_semaphore_wait(negotiatedSem, + dispatch_time(DISPATCH_TIME_NOW, (int64_t)(timeout * NSEC_PER_SEC)))); + [peerConnection close]; +} + +- (void)testWithInvalidIceCandidate { + RTC_OBJC_TYPE(RTCPeerConnectionFactory) *factory = + [[RTC_OBJC_TYPE(RTCPeerConnectionFactory) alloc] init]; + + RTC_OBJC_TYPE(RTCConfiguration) *config = [[RTC_OBJC_TYPE(RTCConfiguration) alloc] init]; + RTC_OBJC_TYPE(RTCMediaConstraints) *contraints = + [[RTC_OBJC_TYPE(RTCMediaConstraints) alloc] initWithMandatoryConstraints:@{} + optionalConstraints:nil]; + RTC_OBJC_TYPE(RTCPeerConnection) *peerConnection = + [factory peerConnectionWithConfiguration:config constraints:contraints delegate:nil]; + + dispatch_semaphore_t negotiatedSem = dispatch_semaphore_create(0); + [peerConnection addIceCandidate:[[RTC_OBJC_TYPE(RTCIceCandidate) alloc] initWithSdp:@"invalid" + sdpMLineIndex:-1 + sdpMid:nil] + completionHandler:^(NSError *error) { + ASSERT_NE(error, nil); + if (error != nil) { + dispatch_semaphore_signal(negotiatedSem); + } + }]; + + NSTimeInterval timeout = 5; + ASSERT_EQ( + 0, + dispatch_semaphore_wait(negotiatedSem, + dispatch_time(DISPATCH_TIME_NOW, (int64_t)(timeout * NSEC_PER_SEC)))); + [peerConnection close]; +} + @end TEST(RTCPeerConnectionTest, ConfigurationGetterTest) { @@ -152,3 +216,17 @@ - (void)testWithDependencies { [test testWithDependencies]; } } + +TEST(RTCPeerConnectionTest, TestWithInvalidSDP) { + @autoreleasepool { + RTCPeerConnectionTest *test = [[RTCPeerConnectionTest alloc] init]; + [test testWithInvalidSDP]; + } +} + +TEST(RTCPeerConnectionTest, TestWithInvalidIceCandidate) { + @autoreleasepool { + RTCPeerConnectionTest *test = [[RTCPeerConnectionTest alloc] init]; + [test testWithInvalidIceCandidate]; + } +} diff --git a/sdk/objc/unittests/RTCSessionDescriptionTest.mm b/sdk/objc/unittests/RTCSessionDescriptionTest.mm index ee65649cbc..25d7ffe67d 100644 --- a/sdk/objc/unittests/RTCSessionDescriptionTest.mm +++ b/sdk/objc/unittests/RTCSessionDescriptionTest.mm @@ -31,7 +31,7 @@ - (void)testSessionDescriptionConversion { RTC_OBJC_TYPE(RTCSessionDescription) *description = [[RTC_OBJC_TYPE(RTCSessionDescription) alloc] initWithType:RTCSdpTypeAnswer sdp:[self sdp]]; - webrtc::SessionDescriptionInterface *nativeDescription = + std::unique_ptr nativeDescription = description.nativeDescription; EXPECT_EQ(RTCSdpTypeAnswer, diff --git a/sdk/objc/unittests/nalu_rewriter_xctest.mm b/sdk/objc/unittests/nalu_rewriter_xctest.mm index 490d228573..995cc80da4 100644 --- a/sdk/objc/unittests/nalu_rewriter_xctest.mm +++ b/sdk/objc/unittests/nalu_rewriter_xctest.mm @@ -276,14 +276,12 @@ - (void)testH264CMSampleBufferToAnnexBBuffer { // clang-format on rtc::Buffer annexb_buffer(arraysize(cmsample_data)); - std::unique_ptr out_header_ptr; CMSampleBufferRef sample_buffer = [self createCMSampleBufferRef:(void*)cmsample_data cmsampleSize:arraysize(cmsample_data)]; Boolean result = webrtc::H264CMSampleBufferToAnnexBBuffer(sample_buffer, /* is_keyframe */ false, - &annexb_buffer, - &out_header_ptr); + &annexb_buffer); XCTAssertTrue(result); @@ -293,16 +291,6 @@ - (void)testH264CMSampleBufferToAnnexBBuffer { memcmp(expected_annex_b_data, annexb_buffer.data(), arraysize(expected_annex_b_data)); XCTAssertEqual(0, data_comparison_result); - - webrtc::RTPFragmentationHeader* out_header = out_header_ptr.get(); - - XCTAssertEqual(2, (int)out_header->Size()); - - XCTAssertEqual(4, (int)out_header->Offset(0)); - XCTAssertEqual(4, (int)out_header->Length(0)); - - XCTAssertEqual(12, (int)out_header->Offset(1)); - XCTAssertEqual(2, (int)out_header->Length(1)); } - (void)testH264CMSampleBufferToAnnexBBufferWithKeyframe { @@ -321,14 +309,12 @@ - (void)testH264CMSampleBufferToAnnexBBufferWithKeyframe { // clang-format on rtc::Buffer annexb_buffer(arraysize(cmsample_data)); - std::unique_ptr out_header_ptr; CMSampleBufferRef sample_buffer = [self createCMSampleBufferRef:(void*)cmsample_data cmsampleSize:arraysize(cmsample_data)]; Boolean result = webrtc::H264CMSampleBufferToAnnexBBuffer(sample_buffer, /* is_keyframe */ true, - &annexb_buffer, - &out_header_ptr); + &annexb_buffer); XCTAssertTrue(result); @@ -341,22 +327,6 @@ - (void)testH264CMSampleBufferToAnnexBBufferWithKeyframe { memcmp(expected_annex_b_data, annexb_buffer.data() + arraysize(SPS_PPS_BUFFER), arraysize(expected_annex_b_data))); - - webrtc::RTPFragmentationHeader* out_header = out_header_ptr.get(); - - XCTAssertEqual(4, (int)out_header->Size()); - - XCTAssertEqual(4, (int)out_header->Offset(0)); - XCTAssertEqual(14, (int)out_header->Length(0)); - - XCTAssertEqual(22, (int)out_header->Offset(1)); - XCTAssertEqual(4, (int)out_header->Length(1)); - - XCTAssertEqual(30, (int)out_header->Offset(2)); - XCTAssertEqual(4, (int)out_header->Length(2)); - - XCTAssertEqual(38, (int)out_header->Offset(3)); - XCTAssertEqual(2, (int)out_header->Length(3)); } - (CMVideoFormatDescriptionRef)createDescription { diff --git a/sdk/objc/unittests/objc_video_encoder_factory_tests.mm b/sdk/objc/unittests/objc_video_encoder_factory_tests.mm index 728dc018e2..7c1594a109 100644 --- a/sdk/objc/unittests/objc_video_encoder_factory_tests.mm +++ b/sdk/objc/unittests/objc_video_encoder_factory_tests.mm @@ -19,7 +19,6 @@ #import "base/RTCVideoEncoderFactory.h" #import "base/RTCVideoFrameBuffer.h" #import "components/video_frame_buffer/RTCCVPixelBuffer.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" #include "rtc_base/gunit.h" diff --git a/stats/BUILD.gn b/stats/BUILD.gn index 37224cd618..d947c50cc9 100644 --- a/stats/BUILD.gn +++ b/stats/BUILD.gn @@ -44,7 +44,7 @@ rtc_library("rtc_stats_test_utils") { ] } -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_test("rtc_stats_unittests") { testonly = true sources = [ diff --git a/stats/rtc_stats.cc b/stats/rtc_stats.cc index b8e9633f46..4895edc738 100644 --- a/stats/rtc_stats.cc +++ b/stats/rtc_stats.cc @@ -35,6 +35,20 @@ std::string VectorToString(const std::vector& vector) { return sb.Release(); } +// This overload is required because std::vector range loops don't +// return references but objects, causing -Wrange-loop-analysis diagnostics. +std::string VectorToString(const std::vector& vector) { + rtc::StringBuilder sb; + sb << "["; + const char* separator = ""; + for (bool element : vector) { + sb << separator << rtc::ToString(element); + separator = ","; + } + sb << "]"; + return sb.Release(); +} + // Produces "[\"a\",\"b\",\"c\"]". Works for vectors of both const char* and // std::string element types. template @@ -50,6 +64,20 @@ std::string VectorOfStringsToString(const std::vector& strings) { return sb.Release(); } +template +std::string MapToString(const std::map& map) { + rtc::StringBuilder sb; + sb << "{"; + const char* separator = ""; + for (const auto& element : map) { + sb << separator << rtc::ToString(element.first) << ":" + << rtc::ToString(element.second); + separator = ","; + } + sb << "}"; + return sb.Release(); +} + template std::string ToStringAsDouble(const T value) { // JSON represents numbers as floating point numbers with about 15 decimal @@ -74,6 +102,20 @@ std::string VectorToStringAsDouble(const std::vector& vector) { return sb.Release(); } +template +std::string MapToStringAsDouble(const std::map& map) { + rtc::StringBuilder sb; + sb << "{"; + const char* separator = ""; + for (const auto& element : map) { + sb << separator << "\"" << rtc::ToString(element.first) + << "\":" << ToStringAsDouble(element.second); + separator = ","; + } + sb << "}"; + return sb.Release(); +} + } // namespace bool RTCStats::operator==(const RTCStats& other) const { @@ -234,6 +276,18 @@ WEBRTC_DEFINE_RTCSTATSMEMBER(std::vector, false, VectorOfStringsToString(value_), VectorOfStringsToString(value_)); +WEBRTC_DEFINE_RTCSTATSMEMBER(rtc_stats_internal::MapStringUint64, + kMapStringUint64, + false, + false, + MapToString(value_), + MapToStringAsDouble(value_)); +WEBRTC_DEFINE_RTCSTATSMEMBER(rtc_stats_internal::MapStringDouble, + kMapStringDouble, + false, + false, + MapToString(value_), + MapToStringAsDouble(value_)); template class RTC_EXPORT_TEMPLATE_DEFINE(RTC_EXPORT) RTCNonStandardStatsMember; diff --git a/stats/rtc_stats_report.cc b/stats/rtc_stats_report.cc index a56d30d3c3..4fbd82508e 100644 --- a/stats/rtc_stats_report.cc +++ b/stats/rtc_stats_report.cc @@ -56,15 +56,12 @@ bool RTCStatsReport::ConstIterator::operator!=( rtc::scoped_refptr RTCStatsReport::Create( int64_t timestamp_us) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(timestamp_us)); + return rtc::scoped_refptr(new RTCStatsReport(timestamp_us)); } RTCStatsReport::RTCStatsReport(int64_t timestamp_us) : timestamp_us_(timestamp_us) {} -RTCStatsReport::~RTCStatsReport() {} - rtc::scoped_refptr RTCStatsReport::Copy() const { rtc::scoped_refptr copy = Create(timestamp_us_); for (auto it = stats_.begin(); it != stats_.end(); ++it) { @@ -98,13 +95,12 @@ std::unique_ptr RTCStatsReport::Take(const std::string& id) { return stats; } -void RTCStatsReport::TakeMembersFrom( - rtc::scoped_refptr victim) { - for (StatsMap::iterator it = victim->stats_.begin(); - it != victim->stats_.end(); ++it) { +void RTCStatsReport::TakeMembersFrom(rtc::scoped_refptr other) { + for (StatsMap::iterator it = other->stats_.begin(); it != other->stats_.end(); + ++it) { AddStats(std::unique_ptr(it->second.release())); } - victim->stats_.clear(); + other->stats_.clear(); } RTCStatsReport::ConstIterator RTCStatsReport::begin() const { diff --git a/stats/rtc_stats_unittest.cc b/stats/rtc_stats_unittest.cc index b159977858..2cad90d02b 100644 --- a/stats/rtc_stats_unittest.cc +++ b/stats/rtc_stats_unittest.cc @@ -71,7 +71,7 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { EXPECT_EQ(stats.id(), "testId"); EXPECT_EQ(stats.timestamp_us(), static_cast(42)); std::vector members = stats.Members(); - EXPECT_EQ(members.size(), static_cast(14)); + EXPECT_EQ(members.size(), static_cast(16)); for (const RTCStatsMemberInterface* member : members) { EXPECT_FALSE(member->is_defined()); } @@ -98,6 +98,9 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { std::vector sequence_string; sequence_string.push_back(std::string("six")); + std::map map_string_uint64{{"seven", 8}}; + std::map map_string_double{{"nine", 10.0}}; + stats.m_sequence_bool = sequence_bool; stats.m_sequence_int32 = sequence_int32; stats.m_sequence_uint32 = sequence_uint32; @@ -106,6 +109,8 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { stats.m_sequence_uint64 = sequence_uint64; stats.m_sequence_double = sequence_double; stats.m_sequence_string = sequence_string; + stats.m_map_string_uint64 = map_string_uint64; + stats.m_map_string_double = map_string_double; for (const RTCStatsMemberInterface* member : members) { EXPECT_TRUE(member->is_defined()); } @@ -123,6 +128,8 @@ TEST(RTCStatsTest, RTCStatsAndMembers) { EXPECT_EQ(*stats.m_sequence_uint64, sequence_uint64); EXPECT_EQ(*stats.m_sequence_double, sequence_double); EXPECT_EQ(*stats.m_sequence_string, sequence_string); + EXPECT_EQ(*stats.m_map_string_uint64, map_string_uint64); + EXPECT_EQ(*stats.m_map_string_double, map_string_double); int32_t numbers[] = {4, 8, 15, 16, 23, 42}; std::vector numbers_sequence(&numbers[0], &numbers[6]); @@ -152,6 +159,8 @@ TEST(RTCStatsTest, EqualityOperator) { stats_with_all_values.m_sequence_uint64 = std::vector(); stats_with_all_values.m_sequence_double = std::vector(); stats_with_all_values.m_sequence_string = std::vector(); + stats_with_all_values.m_map_string_uint64 = std::map(); + stats_with_all_values.m_map_string_double = std::map(); EXPECT_NE(stats_with_all_values, empty_stats); EXPECT_EQ(stats_with_all_values, stats_with_all_values); EXPECT_NE(stats_with_all_values.m_int32, stats_with_all_values.m_uint32); @@ -180,6 +189,8 @@ TEST(RTCStatsTest, EqualityOperator) { one_member_different[11].m_sequence_uint64->push_back(321); one_member_different[12].m_sequence_double->push_back(321.0); one_member_different[13].m_sequence_string->push_back("321"); + (*one_member_different[13].m_map_string_uint64)["321"] = 321; + (*one_member_different[13].m_map_string_double)["321"] = 321.0; for (size_t i = 0; i < 14; ++i) { EXPECT_NE(stats_with_all_values, one_member_different[i]); } @@ -238,6 +249,11 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { std::vector sequence_string; sequence_string.push_back(std::string("four")); + std::map map_string_uint64{ + {"long", static_cast(1234567890123456499L)}}; + std::map map_string_double{ + {"three", 123.4567890123456499}, {"thirteen", 123.4567890123456499}}; + RTCTestStats stats(id, timestamp); stats.m_bool = m_bool; stats.m_int32 = m_int32; @@ -249,9 +265,16 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { stats.m_sequence_int64 = sequence_int64; stats.m_sequence_double = sequence_double; stats.m_sequence_string = sequence_string; + stats.m_map_string_uint64 = map_string_uint64; + stats.m_map_string_double = map_string_double; + std::string json_stats = stats.ToJson(); + Json::CharReaderBuilder builder; Json::Value json_output; - EXPECT_TRUE(Json::Reader().parse(stats.ToJson(), json_output)); + std::unique_ptr json_reader(builder.newCharReader()); + EXPECT_TRUE(json_reader->parse(json_stats.c_str(), + json_stats.c_str() + json_stats.size(), + &json_output, nullptr)); EXPECT_TRUE(rtc::GetStringFromJsonObject(json_output, "id", &id)); EXPECT_TRUE(rtc::GetIntFromJsonObject(json_output, "timestamp", ×tamp)); @@ -278,6 +301,16 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { rtc::GetValueFromJsonObject(json_output, "mSequenceString", &json_array)); EXPECT_TRUE(rtc::JsonArrayToStringVector(json_array, &sequence_string)); + Json::Value json_map; + EXPECT_TRUE( + rtc::GetValueFromJsonObject(json_output, "mMapStringDouble", &json_map)); + for (const auto& entry : map_string_double) { + double double_output = 0.0; + EXPECT_TRUE( + rtc::GetDoubleFromJsonObject(json_map, entry.first, &double_output)); + EXPECT_NEAR(double_output, entry.second, GetExpectedError(entry.second)); + } + EXPECT_EQ(id, stats.id()); EXPECT_EQ(timestamp, stats.timestamp_us()); EXPECT_EQ(m_bool, *stats.m_bool); @@ -286,6 +319,7 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { EXPECT_EQ(sequence_bool, *stats.m_sequence_bool); EXPECT_EQ(sequence_int32, *stats.m_sequence_int32); EXPECT_EQ(sequence_string, *stats.m_sequence_string); + EXPECT_EQ(map_string_double, *stats.m_map_string_double); EXPECT_NEAR(m_double, *stats.m_double, GetExpectedError(*stats.m_double)); @@ -295,6 +329,13 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { GetExpectedError(stats.m_sequence_double->at(i))); } + EXPECT_EQ(map_string_double.size(), stats.m_map_string_double->size()); + for (const auto& entry : map_string_double) { + auto it = stats.m_map_string_double->find(entry.first); + EXPECT_NE(it, stats.m_map_string_double->end()); + EXPECT_NEAR(entry.second, it->second, GetExpectedError(it->second)); + } + // We read mInt64 as double since JSON stores all numbers as doubles, so there // is not enough precision to represent large numbers. double m_int64_as_double; @@ -320,6 +361,19 @@ TEST(RTCStatsTest, RTCStatsPrintsValidJson) { GetExpectedError(stats_value_as_double)); } + // Similarly, read Uint64 as double + EXPECT_TRUE( + rtc::GetValueFromJsonObject(json_output, "mMapStringUint64", &json_map)); + for (const auto& entry : map_string_uint64) { + const double stats_value_as_double = + static_cast((*stats.m_map_string_uint64)[entry.first]); + double double_output = 0.0; + EXPECT_TRUE( + rtc::GetDoubleFromJsonObject(json_map, entry.first, &double_output)); + EXPECT_NEAR(double_output, stats_value_as_double, + GetExpectedError(stats_value_as_double)); + } + // Neither stats.m_uint32 nor stats.m_uint64 are defined, so "mUint64" and // "mUint32" should not be part of the generated JSON object. int m_uint32; diff --git a/stats/rtcstats_objects.cc b/stats/rtcstats_objects.cc index 453acce925..a2d7aa0b07 100644 --- a/stats/rtcstats_objects.cc +++ b/stats/rtcstats_objects.cc @@ -92,6 +92,7 @@ RTCCertificateStats::~RTCCertificateStats() {} // clang-format off WEBRTC_RTCSTATS_IMPL(RTCCodecStats, RTCStats, "codec", + &transport_id, &payload_type, &mime_type, &clock_rate, @@ -104,6 +105,7 @@ RTCCodecStats::RTCCodecStats(const std::string& id, int64_t timestamp_us) RTCCodecStats::RTCCodecStats(std::string&& id, int64_t timestamp_us) : RTCStats(std::move(id), timestamp_us), + transport_id("transportId"), payload_type("payloadType"), mime_type("mimeType"), clock_rate("clockRate"), @@ -112,6 +114,7 @@ RTCCodecStats::RTCCodecStats(std::string&& id, int64_t timestamp_us) RTCCodecStats::RTCCodecStats(const RTCCodecStats& other) : RTCStats(other.id(), other.timestamp_us()), + transport_id(other.transport_id), payload_type(other.payload_type), mime_type(other.mime_type), clock_rate(other.clock_rate), @@ -124,7 +127,7 @@ RTCCodecStats::~RTCCodecStats() {} WEBRTC_RTCSTATS_IMPL(RTCDataChannelStats, RTCStats, "data-channel", &label, &protocol, - &datachannelid, + &data_channel_identifier, &state, &messages_sent, &bytes_sent, @@ -140,7 +143,7 @@ RTCDataChannelStats::RTCDataChannelStats(std::string&& id, int64_t timestamp_us) : RTCStats(std::move(id), timestamp_us), label("label"), protocol("protocol"), - datachannelid("datachannelid"), + data_channel_identifier("dataChannelIdentifier"), state("state"), messages_sent("messagesSent"), bytes_sent("bytesSent"), @@ -151,7 +154,7 @@ RTCDataChannelStats::RTCDataChannelStats(const RTCDataChannelStats& other) : RTCStats(other.id(), other.timestamp_us()), label(other.label), protocol(other.protocol), - datachannelid(other.datachannelid), + data_channel_identifier(other.data_channel_identifier), state(other.state), messages_sent(other.messages_sent), bytes_sent(other.bytes_sent), @@ -256,13 +259,13 @@ WEBRTC_RTCSTATS_IMPL(RTCIceCandidateStats, RTCStats, "abstract-ice-candidate", &is_remote, &network_type, &ip, + &address, &port, &protocol, &relay_protocol, &candidate_type, &priority, - &url, - &deleted) + &url) // clang-format on RTCIceCandidateStats::RTCIceCandidateStats(const std::string& id, @@ -278,13 +281,13 @@ RTCIceCandidateStats::RTCIceCandidateStats(std::string&& id, is_remote("isRemote", is_remote), network_type("networkType"), ip("ip"), + address("address"), port("port"), protocol("protocol"), relay_protocol("relayProtocol"), candidate_type("candidateType"), priority("priority"), - url("url"), - deleted("deleted", false) {} + url("url") {} RTCIceCandidateStats::RTCIceCandidateStats(const RTCIceCandidateStats& other) : RTCStats(other.id(), other.timestamp_us()), @@ -292,13 +295,13 @@ RTCIceCandidateStats::RTCIceCandidateStats(const RTCIceCandidateStats& other) is_remote(other.is_remote), network_type(other.network_type), ip(other.ip), + address(other.address), port(other.port), protocol(other.protocol), relay_protocol(other.relay_protocol), candidate_type(other.candidate_type), priority(other.priority), - url(other.url), - deleted(other.deleted) {} + url(other.url) {} RTCIceCandidateStats::~RTCIceCandidateStats() {} @@ -544,17 +547,11 @@ RTCPeerConnectionStats::~RTCPeerConnectionStats() {} // clang-format off WEBRTC_RTCSTATS_IMPL(RTCRTPStreamStats, RTCStats, "rtp", &ssrc, - &is_remote, - &media_type, &kind, &track_id, &transport_id, &codec_id, - &fir_count, - &pli_count, - &nack_count, - &sli_count, - &qp_sum) + &media_type) // clang-format on RTCRTPStreamStats::RTCRTPStreamStats(const std::string& id, @@ -564,46 +561,94 @@ RTCRTPStreamStats::RTCRTPStreamStats(const std::string& id, RTCRTPStreamStats::RTCRTPStreamStats(std::string&& id, int64_t timestamp_us) : RTCStats(std::move(id), timestamp_us), ssrc("ssrc"), - is_remote("isRemote", false), - media_type("mediaType"), kind("kind"), track_id("trackId"), transport_id("transportId"), codec_id("codecId"), - fir_count("firCount"), - pli_count("pliCount"), - nack_count("nackCount"), - sli_count("sliCount"), - qp_sum("qpSum") {} + media_type("mediaType") {} RTCRTPStreamStats::RTCRTPStreamStats(const RTCRTPStreamStats& other) : RTCStats(other.id(), other.timestamp_us()), ssrc(other.ssrc), - is_remote(other.is_remote), - media_type(other.media_type), kind(other.kind), track_id(other.track_id), transport_id(other.transport_id), codec_id(other.codec_id), - fir_count(other.fir_count), - pli_count(other.pli_count), - nack_count(other.nack_count), - sli_count(other.sli_count), - qp_sum(other.qp_sum) {} + media_type(other.media_type) {} RTCRTPStreamStats::~RTCRTPStreamStats() {} // clang-format off WEBRTC_RTCSTATS_IMPL( - RTCInboundRTPStreamStats, RTCRTPStreamStats, "inbound-rtp", + RTCReceivedRtpStreamStats, RTCRTPStreamStats, "received-rtp", + &jitter, + &packets_lost) +// clang-format on + +RTCReceivedRtpStreamStats::RTCReceivedRtpStreamStats(const std::string&& id, + int64_t timestamp_us) + : RTCReceivedRtpStreamStats(std::string(id), timestamp_us) {} + +RTCReceivedRtpStreamStats::RTCReceivedRtpStreamStats(std::string&& id, + int64_t timestamp_us) + : RTCRTPStreamStats(std::move(id), timestamp_us), + jitter("jitter"), + packets_lost("packetsLost") {} + +RTCReceivedRtpStreamStats::RTCReceivedRtpStreamStats( + const RTCReceivedRtpStreamStats& other) + : RTCRTPStreamStats(other), + jitter(other.jitter), + packets_lost(other.packets_lost) {} + +RTCReceivedRtpStreamStats::~RTCReceivedRtpStreamStats() {} + +// clang-format off +WEBRTC_RTCSTATS_IMPL( + RTCSentRtpStreamStats, RTCRTPStreamStats, "sent-rtp", + &packets_sent, + &bytes_sent) +// clang-format on + +RTCSentRtpStreamStats::RTCSentRtpStreamStats(const std::string&& id, + int64_t timestamp_us) + : RTCSentRtpStreamStats(std::string(id), timestamp_us) {} + +RTCSentRtpStreamStats::RTCSentRtpStreamStats(std::string&& id, + int64_t timestamp_us) + : RTCRTPStreamStats(std::move(id), timestamp_us), + packets_sent("packetsSent"), + bytes_sent("bytesSent") {} + +RTCSentRtpStreamStats::RTCSentRtpStreamStats(const RTCSentRtpStreamStats& other) + : RTCRTPStreamStats(other), + packets_sent(other.packets_sent), + bytes_sent(other.bytes_sent) {} + +RTCSentRtpStreamStats::~RTCSentRtpStreamStats() {} + +// clang-format off +WEBRTC_RTCSTATS_IMPL( + RTCInboundRTPStreamStats, RTCReceivedRtpStreamStats, "inbound-rtp", + &remote_id, &packets_received, &fec_packets_received, &fec_packets_discarded, &bytes_received, &header_bytes_received, - &packets_lost, &last_packet_received_timestamp, - &jitter, + &jitter_buffer_delay, + &jitter_buffer_emitted_count, + &total_samples_received, + &concealed_samples, + &silent_concealed_samples, + &concealment_events, + &inserted_samples_for_deceleration, + &removed_samples_for_acceleration, + &audio_level, + &total_audio_energy, + &total_samples_duration, + &frames_received, &round_trip_time, &packets_discarded, &packets_repaired, @@ -615,14 +660,23 @@ WEBRTC_RTCSTATS_IMPL( &burst_discard_rate, &gap_loss_rate, &gap_discard_rate, + &frame_width, + &frame_height, + &frame_bit_depth, + &frames_per_second, &frames_decoded, &key_frames_decoded, + &frames_dropped, &total_decode_time, &total_inter_frame_delay, &total_squared_inter_frame_delay, &content_type, &estimated_playout_timestamp, - &decoder_implementation) + &decoder_implementation, + &fir_count, + &pli_count, + &nack_count, + &qp_sum) // clang-format on RTCInboundRTPStreamStats::RTCInboundRTPStreamStats(const std::string& id, @@ -631,15 +685,26 @@ RTCInboundRTPStreamStats::RTCInboundRTPStreamStats(const std::string& id, RTCInboundRTPStreamStats::RTCInboundRTPStreamStats(std::string&& id, int64_t timestamp_us) - : RTCRTPStreamStats(std::move(id), timestamp_us), + : RTCReceivedRtpStreamStats(std::move(id), timestamp_us), + remote_id("remoteId"), packets_received("packetsReceived"), fec_packets_received("fecPacketsReceived"), fec_packets_discarded("fecPacketsDiscarded"), bytes_received("bytesReceived"), header_bytes_received("headerBytesReceived"), - packets_lost("packetsLost"), last_packet_received_timestamp("lastPacketReceivedTimestamp"), - jitter("jitter"), + jitter_buffer_delay("jitterBufferDelay"), + jitter_buffer_emitted_count("jitterBufferEmittedCount"), + total_samples_received("totalSamplesReceived"), + concealed_samples("concealedSamples"), + silent_concealed_samples("silentConcealedSamples"), + concealment_events("concealmentEvents"), + inserted_samples_for_deceleration("insertedSamplesForDeceleration"), + removed_samples_for_acceleration("removedSamplesForAcceleration"), + audio_level("audioLevel"), + total_audio_energy("totalAudioEnergy"), + total_samples_duration("totalSamplesDuration"), + frames_received("framesReceived"), round_trip_time("roundTripTime"), packets_discarded("packetsDiscarded"), packets_repaired("packetsRepaired"), @@ -651,26 +716,47 @@ RTCInboundRTPStreamStats::RTCInboundRTPStreamStats(std::string&& id, burst_discard_rate("burstDiscardRate"), gap_loss_rate("gapLossRate"), gap_discard_rate("gapDiscardRate"), + frame_width("frameWidth"), + frame_height("frameHeight"), + frame_bit_depth("frameBitDepth"), + frames_per_second("framesPerSecond"), frames_decoded("framesDecoded"), key_frames_decoded("keyFramesDecoded"), + frames_dropped("framesDropped"), total_decode_time("totalDecodeTime"), total_inter_frame_delay("totalInterFrameDelay"), total_squared_inter_frame_delay("totalSquaredInterFrameDelay"), content_type("contentType"), estimated_playout_timestamp("estimatedPlayoutTimestamp"), - decoder_implementation("decoderImplementation") {} + decoder_implementation("decoderImplementation"), + fir_count("firCount"), + pli_count("pliCount"), + nack_count("nackCount"), + qp_sum("qpSum") {} RTCInboundRTPStreamStats::RTCInboundRTPStreamStats( const RTCInboundRTPStreamStats& other) - : RTCRTPStreamStats(other), + : RTCReceivedRtpStreamStats(other), + remote_id(other.remote_id), packets_received(other.packets_received), fec_packets_received(other.fec_packets_received), fec_packets_discarded(other.fec_packets_discarded), bytes_received(other.bytes_received), header_bytes_received(other.header_bytes_received), - packets_lost(other.packets_lost), last_packet_received_timestamp(other.last_packet_received_timestamp), - jitter(other.jitter), + jitter_buffer_delay(other.jitter_buffer_delay), + jitter_buffer_emitted_count(other.jitter_buffer_emitted_count), + total_samples_received(other.total_samples_received), + concealed_samples(other.concealed_samples), + silent_concealed_samples(other.silent_concealed_samples), + concealment_events(other.concealment_events), + inserted_samples_for_deceleration( + other.inserted_samples_for_deceleration), + removed_samples_for_acceleration(other.removed_samples_for_acceleration), + audio_level(other.audio_level), + total_audio_energy(other.total_audio_energy), + total_samples_duration(other.total_samples_duration), + frames_received(other.frames_received), round_trip_time(other.round_trip_time), packets_discarded(other.packets_discarded), packets_repaired(other.packets_repaired), @@ -682,14 +768,23 @@ RTCInboundRTPStreamStats::RTCInboundRTPStreamStats( burst_discard_rate(other.burst_discard_rate), gap_loss_rate(other.gap_loss_rate), gap_discard_rate(other.gap_discard_rate), + frame_width(other.frame_width), + frame_height(other.frame_height), + frame_bit_depth(other.frame_bit_depth), + frames_per_second(other.frames_per_second), frames_decoded(other.frames_decoded), key_frames_decoded(other.key_frames_decoded), + frames_dropped(other.frames_dropped), total_decode_time(other.total_decode_time), total_inter_frame_delay(other.total_inter_frame_delay), total_squared_inter_frame_delay(other.total_squared_inter_frame_delay), content_type(other.content_type), estimated_playout_timestamp(other.estimated_playout_timestamp), - decoder_implementation(other.decoder_implementation) {} + decoder_implementation(other.decoder_implementation), + fir_count(other.fir_count), + pli_count(other.pli_count), + nack_count(other.nack_count), + qp_sum(other.qp_sum) {} RTCInboundRTPStreamStats::~RTCInboundRTPStreamStats() {} @@ -716,9 +811,14 @@ WEBRTC_RTCSTATS_IMPL( &huge_frames_sent, &total_packet_send_delay, &quality_limitation_reason, + &quality_limitation_durations, &quality_limitation_resolution_changes, &content_type, - &encoder_implementation) + &encoder_implementation, + &fir_count, + &pli_count, + &nack_count, + &qp_sum) // clang-format on RTCOutboundRTPStreamStats::RTCOutboundRTPStreamStats(const std::string& id, @@ -748,10 +848,15 @@ RTCOutboundRTPStreamStats::RTCOutboundRTPStreamStats(std::string&& id, huge_frames_sent("hugeFramesSent"), total_packet_send_delay("totalPacketSendDelay"), quality_limitation_reason("qualityLimitationReason"), + quality_limitation_durations("qualityLimitationDurations"), quality_limitation_resolution_changes( "qualityLimitationResolutionChanges"), content_type("contentType"), - encoder_implementation("encoderImplementation") {} + encoder_implementation("encoderImplementation"), + fir_count("firCount"), + pli_count("pliCount"), + nack_count("nackCount"), + qp_sum("qpSum") {} RTCOutboundRTPStreamStats::RTCOutboundRTPStreamStats( const RTCOutboundRTPStreamStats& other) @@ -776,24 +881,27 @@ RTCOutboundRTPStreamStats::RTCOutboundRTPStreamStats( huge_frames_sent(other.huge_frames_sent), total_packet_send_delay(other.total_packet_send_delay), quality_limitation_reason(other.quality_limitation_reason), + quality_limitation_durations(other.quality_limitation_durations), quality_limitation_resolution_changes( other.quality_limitation_resolution_changes), content_type(other.content_type), - encoder_implementation(other.encoder_implementation) {} + encoder_implementation(other.encoder_implementation), + fir_count(other.fir_count), + pli_count(other.pli_count), + nack_count(other.nack_count), + qp_sum(other.qp_sum) {} RTCOutboundRTPStreamStats::~RTCOutboundRTPStreamStats() {} // clang-format off WEBRTC_RTCSTATS_IMPL( - RTCRemoteInboundRtpStreamStats, RTCStats, "remote-inbound-rtp", - &ssrc, - &kind, - &transport_id, - &codec_id, - &packets_lost, - &jitter, + RTCRemoteInboundRtpStreamStats, RTCReceivedRtpStreamStats, + "remote-inbound-rtp", &local_id, - &round_trip_time) + &round_trip_time, + &fraction_lost, + &total_round_trip_time, + &round_trip_time_measurements) // clang-format on RTCRemoteInboundRtpStreamStats::RTCRemoteInboundRtpStreamStats( @@ -804,30 +912,55 @@ RTCRemoteInboundRtpStreamStats::RTCRemoteInboundRtpStreamStats( RTCRemoteInboundRtpStreamStats::RTCRemoteInboundRtpStreamStats( std::string&& id, int64_t timestamp_us) - : RTCStats(std::move(id), timestamp_us), - ssrc("ssrc"), - kind("kind"), - transport_id("transportId"), - codec_id("codecId"), - packets_lost("packetsLost"), - jitter("jitter"), + : RTCReceivedRtpStreamStats(std::move(id), timestamp_us), local_id("localId"), - round_trip_time("roundTripTime") {} + round_trip_time("roundTripTime"), + fraction_lost("fractionLost"), + total_round_trip_time("totalRoundTripTime"), + round_trip_time_measurements("roundTripTimeMeasurements") {} RTCRemoteInboundRtpStreamStats::RTCRemoteInboundRtpStreamStats( const RTCRemoteInboundRtpStreamStats& other) - : RTCStats(other), - ssrc(other.ssrc), - kind(other.kind), - transport_id(other.transport_id), - codec_id(other.codec_id), - packets_lost(other.packets_lost), - jitter(other.jitter), + : RTCReceivedRtpStreamStats(other), local_id(other.local_id), - round_trip_time(other.round_trip_time) {} + round_trip_time(other.round_trip_time), + fraction_lost(other.fraction_lost), + total_round_trip_time(other.total_round_trip_time), + round_trip_time_measurements(other.round_trip_time_measurements) {} RTCRemoteInboundRtpStreamStats::~RTCRemoteInboundRtpStreamStats() {} +// clang-format off +WEBRTC_RTCSTATS_IMPL( + RTCRemoteOutboundRtpStreamStats, RTCSentRtpStreamStats, + "remote-outbound-rtp", + &local_id, + &remote_timestamp, + &reports_sent) +// clang-format on + +RTCRemoteOutboundRtpStreamStats::RTCRemoteOutboundRtpStreamStats( + const std::string& id, + int64_t timestamp_us) + : RTCRemoteOutboundRtpStreamStats(std::string(id), timestamp_us) {} + +RTCRemoteOutboundRtpStreamStats::RTCRemoteOutboundRtpStreamStats( + std::string&& id, + int64_t timestamp_us) + : RTCSentRtpStreamStats(std::move(id), timestamp_us), + local_id("localId"), + remote_timestamp("remoteTimestamp"), + reports_sent("reportsSent") {} + +RTCRemoteOutboundRtpStreamStats::RTCRemoteOutboundRtpStreamStats( + const RTCRemoteOutboundRtpStreamStats& other) + : RTCSentRtpStreamStats(other), + local_id(other.local_id), + remote_timestamp(other.remote_timestamp), + reports_sent(other.reports_sent) {} + +RTCRemoteOutboundRtpStreamStats::~RTCRemoteOutboundRtpStreamStats() {} + // clang-format off WEBRTC_RTCSTATS_IMPL(RTCMediaSourceStats, RTCStats, "parent-media-source", &track_identifier, @@ -854,7 +987,9 @@ RTCMediaSourceStats::~RTCMediaSourceStats() {} WEBRTC_RTCSTATS_IMPL(RTCAudioSourceStats, RTCMediaSourceStats, "media-source", &audio_level, &total_audio_energy, - &total_samples_duration) + &total_samples_duration, + &echo_return_loss, + &echo_return_loss_enhancement) // clang-format on RTCAudioSourceStats::RTCAudioSourceStats(const std::string& id, @@ -865,13 +1000,17 @@ RTCAudioSourceStats::RTCAudioSourceStats(std::string&& id, int64_t timestamp_us) : RTCMediaSourceStats(std::move(id), timestamp_us), audio_level("audioLevel"), total_audio_energy("totalAudioEnergy"), - total_samples_duration("totalSamplesDuration") {} + total_samples_duration("totalSamplesDuration"), + echo_return_loss("echoReturnLoss"), + echo_return_loss_enhancement("echoReturnLossEnhancement") {} RTCAudioSourceStats::RTCAudioSourceStats(const RTCAudioSourceStats& other) : RTCMediaSourceStats(other), audio_level(other.audio_level), total_audio_energy(other.total_audio_energy), - total_samples_duration(other.total_samples_duration) {} + total_samples_duration(other.total_samples_duration), + echo_return_loss(other.echo_return_loss), + echo_return_loss_enhancement(other.echo_return_loss_enhancement) {} RTCAudioSourceStats::~RTCAudioSourceStats() {} @@ -906,7 +1045,9 @@ RTCVideoSourceStats::~RTCVideoSourceStats() {} // clang-format off WEBRTC_RTCSTATS_IMPL(RTCTransportStats, RTCStats, "transport", &bytes_sent, + &packets_sent, &bytes_received, + &packets_received, &rtcp_transport_stats_id, &dtls_state, &selected_candidate_pair_id, @@ -925,7 +1066,9 @@ RTCTransportStats::RTCTransportStats(const std::string& id, RTCTransportStats::RTCTransportStats(std::string&& id, int64_t timestamp_us) : RTCStats(std::move(id), timestamp_us), bytes_sent("bytesSent"), + packets_sent("packetsSent"), bytes_received("bytesReceived"), + packets_received("packetsReceived"), rtcp_transport_stats_id("rtcpTransportStatsId"), dtls_state("dtlsState"), selected_candidate_pair_id("selectedCandidatePairId"), @@ -939,7 +1082,9 @@ RTCTransportStats::RTCTransportStats(std::string&& id, int64_t timestamp_us) RTCTransportStats::RTCTransportStats(const RTCTransportStats& other) : RTCStats(other.id(), other.timestamp_us()), bytes_sent(other.bytes_sent), + packets_sent(other.packets_sent), bytes_received(other.bytes_received), + packets_received(other.packets_received), rtcp_transport_stats_id(other.rtcp_transport_stats_id), dtls_state(other.dtls_state), selected_candidate_pair_id(other.selected_candidate_pair_id), diff --git a/stats/test/rtc_test_stats.cc b/stats/test/rtc_test_stats.cc index d8bcbb19eb..e73da76fa9 100644 --- a/stats/test/rtc_test_stats.cc +++ b/stats/test/rtc_test_stats.cc @@ -30,7 +30,9 @@ WEBRTC_RTCSTATS_IMPL(RTCTestStats, &m_sequence_int64, &m_sequence_uint64, &m_sequence_double, - &m_sequence_string) + &m_sequence_string, + &m_map_string_uint64, + &m_map_string_double) RTCTestStats::RTCTestStats(const std::string& id, int64_t timestamp_us) : RTCStats(id, timestamp_us), @@ -47,7 +49,9 @@ RTCTestStats::RTCTestStats(const std::string& id, int64_t timestamp_us) m_sequence_int64("mSequenceInt64"), m_sequence_uint64("mSequenceUint64"), m_sequence_double("mSequenceDouble"), - m_sequence_string("mSequenceString") {} + m_sequence_string("mSequenceString"), + m_map_string_uint64("mMapStringUint64"), + m_map_string_double("mMapStringDouble") {} RTCTestStats::RTCTestStats(const RTCTestStats& other) : RTCStats(other.id(), other.timestamp_us()), @@ -64,7 +68,9 @@ RTCTestStats::RTCTestStats(const RTCTestStats& other) m_sequence_int64(other.m_sequence_int64), m_sequence_uint64(other.m_sequence_uint64), m_sequence_double(other.m_sequence_double), - m_sequence_string(other.m_sequence_string) {} + m_sequence_string(other.m_sequence_string), + m_map_string_uint64(other.m_map_string_uint64), + m_map_string_double(other.m_map_string_double) {} RTCTestStats::~RTCTestStats() {} diff --git a/stats/test/rtc_test_stats.h b/stats/test/rtc_test_stats.h index 1db32c25c1..0feb07e78e 100644 --- a/stats/test/rtc_test_stats.h +++ b/stats/test/rtc_test_stats.h @@ -12,6 +12,7 @@ #define STATS_TEST_RTC_TEST_STATS_H_ #include +#include #include #include @@ -42,6 +43,8 @@ class RTC_EXPORT RTCTestStats : public RTCStats { RTCStatsMember> m_sequence_uint64; RTCStatsMember> m_sequence_double; RTCStatsMember> m_sequence_string; + RTCStatsMember> m_map_string_uint64; + RTCStatsMember> m_map_string_double; }; } // namespace webrtc diff --git a/style-guide.md b/style-guide.md deleted file mode 100644 index 901217a86d..0000000000 --- a/style-guide.md +++ /dev/null @@ -1,251 +0,0 @@ -# WebRTC coding style guide - -## **General advice** - -Some older parts of the code violate the style guide in various ways. - -* If making small changes to such code, follow the style guide when - it’s reasonable to do so, but in matters of formatting etc., it is - often better to be consistent with the surrounding code. -* If making large changes to such code, consider first cleaning it up - in a separate CL. - -## **C++** - -WebRTC follows the [Chromium][chr-style] and [Google][goog-style] C++ -style guides. In cases where they conflict, the Chromium style guide -trumps the Google style guide, and the rules in this file trump them -both. - -[chr-style]: https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/c++/c++.md -[goog-style]: https://google.github.io/styleguide/cppguide.html - -### C++ version - -WebRTC is written in C++14, but with some restrictions: - -* We only allow the subset of C++14 (language and library) that is not - banned by Chromium; see [this page][chromium-cpp]. -* We only allow the subset of C++14 that is also valid C++17; - otherwise, users would not be able to compile WebRTC in C++17 mode. - -[chromium-cpp]: https://chromium-cpp.appspot.com/ - -### Abseil - -You may use a subset of the utilities provided by the [Abseil][abseil] -library when writing WebRTC C++ code. [Details](abseil-in-webrtc.md). - -[abseil]: https://abseil.io/about/ - -### `.h` and `.cc` files come in pairs - -`.h` and `.cc` files should come in pairs, with the same name (except -for the file type suffix), in the same directory, in the same build -target. - -* If a declaration in `path/to/foo.h` has a definition in some `.cc` - file, it should be in `path/to/foo.cc`. -* If a definition in `path/to/foo.cc` file has a declaration in some - `.h` file, it should be in `path/to/foo.h`. -* Omit the `.cc` file if it would have been empty, but still list the - `.h` file in a build target. -* Omit the `.h` file if it would have been empty. (This can happen - with unit test `.cc` files, and with `.cc` files that define - `main`.) - -This makes the source code easier to navigate and organize, and -precludes some questionable build system practices such as having -build targets that don’t pull in definitions for everything they -declare. - -[Examples and exceptions](style-guide/h-cc-pairs.md). - -### TODO comments - -Follow the [Google style][goog-style-todo]. When referencing a WebRTC bug, -prefer the url form, e.g. -``` -// TODO(bugs.webrtc.org/12345): Delete the hack when blocking bugs are resolved. -``` - -[goog-style-todo]: https://google.github.io/styleguide/cppguide.html#TODO_Comments - -### ArrayView - -When passing an array of values to a function, use `rtc::ArrayView` -whenever possible—that is, whenever you’re not passing ownership of -the array, and don’t allow the callee to change the array size. - -For example, - -instead of | use -------------------------------------|--------------------- -`const std::vector&` | `ArrayView` -`const T* ptr, size_t num_elements` | `ArrayView` -`T* ptr, size_t num_elements` | `ArrayView` - -See [the source](api/array_view.h) for more detailed docs. - -### sigslot - -sigslot is a lightweight library that adds a signal/slot language -construct to C++, making it easy to implement the observer pattern -with minimal boilerplate code. - -When adding a signal to a pure interface, **prefer to add a pure -virtual method that returns a reference to a signal**: - -``` -sigslot::signal& SignalFoo() = 0; -``` - -As opposed to making it a public member variable, as a lot of legacy -code does: - -``` -sigslot::signal SignalFoo; -``` - -The virtual method approach has the advantage that it keeps the -interface stateless, and gives the subclass more flexibility in how it -implements the signal. It may: - -* Have its own signal as a member variable. -* Use a `sigslot::repeater`, to repeat a signal of another object: - - ``` - sigslot::repeater foo_; - /* ... */ - foo_.repeat(bar_.SignalFoo()); - ``` -* Just return another object's signal directly, if the other object's - lifetime is the same as its own. - - ``` - sigslot::signal& SignalFoo() { return bar_.SignalFoo(); } - ``` - -### std::bind - -Don’t use `std::bind`—there are pitfalls, and lambdas are almost as -succinct and already familiar to modern C++ programmers. - -### std::function - -`std::function` is allowed, but remember that it’s not the right tool -for every occasion. Prefer to use interfaces when that makes sense, -and consider `rtc::FunctionView` for cases where the callee will not -save the function object. - -### Forward declarations - -WebRTC follows the [Google][goog-forward-declarations] C++ style guide -with respect to forward declarations. In summary: avoid using forward -declarations where possible; just `#include` the headers you need. - -[goog-forward-declarations]: https://google.github.io/styleguide/cppguide.html#Forward_Declarations - -## **C** - -There’s a substantial chunk of legacy C code in WebRTC, and a lot of -it is old enough that it violates the parts of the C++ style guide -that also applies to C (naming etc.) for the simple reason that it -pre-dates the use of the current C++ style guide for this code base. - -* If making small changes to C code, mimic the style of the - surrounding code. -* If making large changes to C code, consider converting the whole - thing to C++ first. - -## **Java** - -WebRTC follows the [Google Java style guide][goog-java-style]. - -[goog-java-style]: https://google.github.io/styleguide/javaguide.html - -## **Objective-C and Objective-C++** - -WebRTC follows the -[Chromium Objective-C and Objective-C++ style guide][chr-objc-style]. - -[chr-objc-style]: https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/objective-c/objective-c.md - -## **Python** - -WebRTC follows [Chromium’s Python style][chr-py-style]. - -[chr-py-style]: https://chromium.googlesource.com/chromium/src/+/HEAD/styleguide/styleguide.md#python - -## **Build files** - -The WebRTC build files are written in [GN][gn], and we follow -the [Chromium GN style guide][chr-gn-style]. Additionally, there are -some WebRTC-specific rules below; in case of conflict, they trump the -Chromium style guide. - -[gn]: https://chromium.googlesource.com/chromium/src/tools/gn/ -[chr-gn-style]: https://chromium.googlesource.com/chromium/src/tools/gn/+/HEAD/docs/style_guide.md - -### WebRTC-specific GN templates - -Use the following [GN templates][gn-templ] to ensure that all -our [targets][gn-target] are built with the same configuration: - -instead of | use ------------------|--------------------- -`executable` | `rtc_executable` -`shared_library` | `rtc_shared_library` -`source_set` | `rtc_source_set` -`static_library` | `rtc_static_library` -`test` | `rtc_test` - -[gn-templ]: https://chromium.googlesource.com/chromium/src/tools/gn/+/HEAD/docs/language.md#Templates -[gn-target]: https://chromium.googlesource.com/chromium/src/tools/gn/+/HEAD/docs/language.md#Targets - -### Target visibility and the native API - -The [WebRTC-specific GN templates](#webrtc-gn-templates) declare build -targets whose default `visibility` allows all other targets in the -WebRTC tree (and no targets outside the tree) to depend on them. - -Prefer to restrict the visibility if possible: - -* If a target is used by only one or a tiny number of other targets, - prefer to list them explicitly: `visibility = [ ":foo", ":bar" ]` -* If a target is used only by targets in the same `BUILD.gn` file: - `visibility = [ ":*" ]`. - -Setting `visibility = [ "*" ]` means that targets outside the WebRTC -tree can depend on this target; use this only for build targets whose -headers are part of the [native API](native-api.md). - -### Conditional compilation with the C preprocessor - -Avoid using the C preprocessor to conditionally enable or disable -pieces of code. But if you can’t avoid it, introduce a GN variable, -and then set a preprocessor constant to either 0 or 1 in the build -targets that need it: - -``` -if (apm_debug_dump) { - defines = [ "WEBRTC_APM_DEBUG_DUMP=1" ] -} else { - defines = [ "WEBRTC_APM_DEBUG_DUMP=0" ] -} -``` - -In the C, C++, or Objective-C files, use `#if` when testing the flag, -not `#ifdef` or `#if defined()`: - -``` -#if WEBRTC_APM_DEBUG_DUMP -// One way. -#else -// Or another. -#endif -``` - -When combined with the `-Wundef` compiler option, this produces -compile time warnings if preprocessor symbols are misspelled, or used -without corresponding build rules to set them. diff --git a/system_wrappers/BUILD.gn b/system_wrappers/BUILD.gn index 1ff2ddd4fd..80088e0d01 100644 --- a/system_wrappers/BUILD.gn +++ b/system_wrappers/BUILD.gn @@ -16,6 +16,7 @@ rtc_library("system_wrappers") { visibility = [ "*" ] sources = [ "include/clock.h", + "include/cpu_features_wrapper.h", "include/cpu_info.h", "include/ntp_time.h", "include/rtp_to_ntp_estimator.h", @@ -30,16 +31,16 @@ rtc_library("system_wrappers") { defines = [] libs = [] deps = [ - ":cpu_features_api", + ":field_trial", "../api:array_view", "../api/units:timestamp", "../modules:module_api_public", "../rtc_base:checks", - "../rtc_base/synchronization:rw_lock_wrapper", + "../rtc_base/synchronization:mutex", "../rtc_base/system:arch", "../rtc_base/system:rtc_export", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] if (is_android) { if (build_with_mozilla) { @@ -49,15 +50,16 @@ rtc_library("system_wrappers") { "/nsprpub/pr/include", ] } else { - deps += [ ":cpu_features_android" ] + sources += [ "source/cpu_features_android.cc" ] + deps += [ "//third_party/android_sdk:cpu_features" ] } libs += [ "log" ] } - if (is_linux) { + if (is_linux || is_chromeos) { if (!build_with_chromium) { - deps += [ ":cpu_features_linux" ] + sources += [ "source/cpu_features_linux.cc" ] } libs += [ "rt" ] @@ -68,7 +70,7 @@ rtc_library("system_wrappers") { # Windows needs ../rtc_base due to include of # webrtc/rtc_base/win32.h in source/clock.cc. - deps += [ "../rtc_base" ] + deps += [ "../rtc_base:win32" ] } deps += [ @@ -77,10 +79,6 @@ rtc_library("system_wrappers") { ] } -rtc_source_set("cpu_features_api") { - sources = [ "include/cpu_features_wrapper.h" ] -} - rtc_library("field_trial") { visibility = [ "*" ] public = [ "include/field_trial.h" ] @@ -92,8 +90,8 @@ rtc_library("field_trial") { "../rtc_base:checks", "../rtc_base:logging", "../rtc_base:stringutils", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } rtc_library("metrics") { @@ -106,28 +104,11 @@ rtc_library("metrics") { deps = [ "../rtc_base:checks", "../rtc_base:rtc_base_approved", + "../rtc_base/synchronization:mutex", ] } -if (is_android && !build_with_mozilla) { - rtc_library("cpu_features_android") { - sources = [ "source/cpu_features_android.c" ] - - deps = [ "//third_party/android_sdk:cpu_features" ] - } -} - -if (is_linux) { - rtc_library("cpu_features_linux") { - sources = [ "source/cpu_features_linux.c" ] - deps = [ - ":cpu_features_api", - "../rtc_base/system:arch", - ] - } -} - -if (rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { rtc_test("system_wrappers_unittests") { testonly = true sources = [ @@ -149,9 +130,10 @@ if (rtc_include_tests) { "../test:test_main", "../test:test_support", "//testing/gtest", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + if (is_android) { deps += [ "//testing/android/native_test:native_test_support" ] diff --git a/system_wrappers/include/clock.h b/system_wrappers/include/clock.h index 8eac3d18b3..271291c214 100644 --- a/system_wrappers/include/clock.h +++ b/system_wrappers/include/clock.h @@ -13,10 +13,10 @@ #include +#include #include #include "api/units/timestamp.h" -#include "rtc_base/synchronization/rw_lock_wrapper.h" #include "rtc_base/system/rtc_export.h" #include "system_wrappers/include/ntp_time.h" @@ -32,22 +32,24 @@ const double kMagicNtpFractionalUnit = 4.294967296E+9; class RTC_EXPORT Clock { public: virtual ~Clock() {} + // Return a timestamp relative to an unspecified epoch. - virtual Timestamp CurrentTime() { - return Timestamp::Micros(TimeInMicroseconds()); + virtual Timestamp CurrentTime() = 0; + int64_t TimeInMilliseconds() { return CurrentTime().ms(); } + int64_t TimeInMicroseconds() { return CurrentTime().us(); } + + // Retrieve an NTP absolute timestamp (with an epoch of Jan 1, 1900). + // TODO(bugs.webrtc.org/11327): Make this non-virtual once + // "WebRTC-SystemIndependentNtpTimeKillSwitch" is removed. + virtual NtpTime CurrentNtpTime() { + return ConvertTimestampToNtpTime(CurrentTime()); } - virtual int64_t TimeInMilliseconds() { return CurrentTime().ms(); } - virtual int64_t TimeInMicroseconds() { return CurrentTime().us(); } - - // Retrieve an NTP absolute timestamp. - virtual NtpTime CurrentNtpTime() = 0; - - // Retrieve an NTP absolute timestamp in milliseconds. - virtual int64_t CurrentNtpInMilliseconds() = 0; + int64_t CurrentNtpInMilliseconds() { return CurrentNtpTime().ToMs(); } - // Converts an NTP timestamp to a millisecond timestamp. - static int64_t NtpToMs(uint32_t seconds, uint32_t fractions) { - return NtpTime(seconds, fractions).ToMs(); + // Converts between a relative timestamp returned by this clock, to NTP time. + virtual NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) = 0; + int64_t ConvertTimestampToNtpTimeInMilliseconds(int64_t timestamp_ms) { + return ConvertTimestampToNtpTime(Timestamp::Millis(timestamp_ms)).ToMs(); } // Returns an instance of the real-time system clock implementation. @@ -56,20 +58,15 @@ class RTC_EXPORT Clock { class SimulatedClock : public Clock { public: + // The constructors assume an epoch of Jan 1, 1970. explicit SimulatedClock(int64_t initial_time_us); explicit SimulatedClock(Timestamp initial_time); - ~SimulatedClock() override; - // Return a timestamp relative to some arbitrary source; the source is fixed - // for this clock. + // Return a timestamp with an epoch of Jan 1, 1970. Timestamp CurrentTime() override; - // Retrieve an NTP absolute timestamp. - NtpTime CurrentNtpTime() override; - - // Converts an NTP timestamp to a millisecond timestamp. - int64_t CurrentNtpInMilliseconds() override; + NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) override; // Advance the simulated clock with a given number of milliseconds or // microseconds. @@ -78,8 +75,12 @@ class SimulatedClock : public Clock { void AdvanceTime(TimeDelta delta); private: - Timestamp time_; - std::unique_ptr lock_; + // The time is read and incremented with relaxed order. Each thread will see + // monotonically increasing time, and when threads post tasks or messages to + // one another, the synchronization done as part of the message passing should + // ensure that any causual chain of events on multiple threads also + // corresponds to monotonically increasing time. + std::atomic time_us_; }; } // namespace webrtc diff --git a/system_wrappers/include/cpu_features_wrapper.h b/system_wrappers/include/cpu_features_wrapper.h index 739161afca..612b4a5d6b 100644 --- a/system_wrappers/include/cpu_features_wrapper.h +++ b/system_wrappers/include/cpu_features_wrapper.h @@ -13,12 +13,10 @@ #include -#if defined(__cplusplus) || defined(c_plusplus) -extern "C" { -#endif +namespace webrtc { // List of features in x86. -typedef enum { kSSE2, kSSE3 } CPUFeature; +typedef enum { kSSE2, kSSE3, kAVX2 } CPUFeature; // List of features in ARM. enum { @@ -28,21 +26,17 @@ enum { kCPUFeatureLDREXSTREX = (1 << 3) }; -typedef int (*WebRtc_CPUInfo)(CPUFeature feature); - // Returns true if the CPU supports the feature. -extern WebRtc_CPUInfo WebRtc_GetCPUInfo; +int GetCPUInfo(CPUFeature feature); // No CPU feature is available => straight C path. -extern WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM; +int GetCPUInfoNoASM(CPUFeature feature); // Return the features in an ARM device. // It detects the features in the hardware platform, and returns supported // values in the above enum definition as a bitmask. -extern uint64_t WebRtc_GetCPUFeaturesARM(void); +uint64_t GetCPUFeaturesARM(void); -#if defined(__cplusplus) || defined(c_plusplus) -} // extern "C" -#endif +} // namespace webrtc #endif // SYSTEM_WRAPPERS_INCLUDE_CPU_FEATURES_WRAPPER_H_ diff --git a/system_wrappers/source/clock.cc b/system_wrappers/source/clock.cc index b0356fc40c..77c1d36327 100644 --- a/system_wrappers/source/clock.cc +++ b/system_wrappers/source/clock.cc @@ -10,6 +10,8 @@ #include "system_wrappers/include/clock.h" +#include "system_wrappers/include/field_trial.h" + #if defined(WEBRTC_WIN) // Windows needs to be included before mmsystem.h @@ -17,7 +19,6 @@ #include -#include "rtc_base/critical_section.h" #elif defined(WEBRTC_POSIX) @@ -26,61 +27,92 @@ #endif // defined(WEBRTC_POSIX) -#include "rtc_base/synchronization/rw_lock_wrapper.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" namespace webrtc { +namespace { + +int64_t NtpOffsetUsCalledOnce() { + constexpr int64_t kNtpJan1970Sec = 2208988800; + int64_t clock_time = rtc::TimeMicros(); + int64_t utc_time = rtc::TimeUTCMicros(); + return utc_time - clock_time + kNtpJan1970Sec * rtc::kNumMicrosecsPerSec; +} + +NtpTime TimeMicrosToNtp(int64_t time_us) { + static int64_t ntp_offset_us = NtpOffsetUsCalledOnce(); + + int64_t time_ntp_us = time_us + ntp_offset_us; + RTC_DCHECK_GE(time_ntp_us, 0); // Time before year 1900 is unsupported. + + // Convert seconds to uint32 through uint64 for a well-defined cast. + // A wrap around, which will happen in 2036, is expected for NTP time. + uint32_t ntp_seconds = + static_cast(time_ntp_us / rtc::kNumMicrosecsPerSec); + + // Scale fractions of the second to NTP resolution. + constexpr int64_t kNtpFractionsInSecond = 1LL << 32; + int64_t us_fractions = time_ntp_us % rtc::kNumMicrosecsPerSec; + uint32_t ntp_fractions = + us_fractions * kNtpFractionsInSecond / rtc::kNumMicrosecsPerSec; + + return NtpTime(ntp_seconds, ntp_fractions); +} + +void GetSecondsAndFraction(const timeval& time, + uint32_t* seconds, + double* fraction) { + *seconds = time.tv_sec + kNtpJan1970; + *fraction = time.tv_usec / 1e6; + + while (*fraction >= 1) { + --*fraction; + ++*seconds; + } + while (*fraction < 0) { + ++*fraction; + --*seconds; + } +} + +} // namespace class RealTimeClock : public Clock { + public: + RealTimeClock() + : use_system_independent_ntp_time_(!field_trial::IsEnabled( + "WebRTC-SystemIndependentNtpTimeKillSwitch")) {} + Timestamp CurrentTime() override { return Timestamp::Micros(rtc::TimeMicros()); } - // Return a timestamp in milliseconds relative to some arbitrary source; the - // source is fixed for this clock. - int64_t TimeInMilliseconds() override { return rtc::TimeMillis(); } - // Return a timestamp in microseconds relative to some arbitrary source; the - // source is fixed for this clock. - int64_t TimeInMicroseconds() override { return rtc::TimeMicros(); } - - // Retrieve an NTP absolute timestamp. NtpTime CurrentNtpTime() override { - timeval tv = CurrentTimeVal(); - double microseconds_in_seconds; - uint32_t seconds; - Adjust(tv, &seconds, µseconds_in_seconds); - uint32_t fractions = static_cast( - microseconds_in_seconds * kMagicNtpFractionalUnit + 0.5); - return NtpTime(seconds, fractions); + return use_system_independent_ntp_time_ ? TimeMicrosToNtp(rtc::TimeMicros()) + : SystemDependentNtpTime(); } - // Retrieve an NTP absolute timestamp in milliseconds. - int64_t CurrentNtpInMilliseconds() override { - timeval tv = CurrentTimeVal(); - uint32_t seconds; - double microseconds_in_seconds; - Adjust(tv, &seconds, µseconds_in_seconds); - return 1000 * static_cast(seconds) + - static_cast(1000.0 * microseconds_in_seconds + 0.5); + NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) override { + // This method does not check |use_system_independent_ntp_time_| because + // all callers never used the old behavior of |CurrentNtpTime|. + return TimeMicrosToNtp(timestamp.us()); } protected: virtual timeval CurrentTimeVal() = 0; - static void Adjust(const timeval& tv, - uint32_t* adjusted_s, - double* adjusted_us_in_s) { - *adjusted_s = tv.tv_sec + kNtpJan1970; - *adjusted_us_in_s = tv.tv_usec / 1e6; - - if (*adjusted_us_in_s >= 1) { - *adjusted_us_in_s -= 1; - ++*adjusted_s; - } else if (*adjusted_us_in_s < -1) { - *adjusted_us_in_s += 1; - --*adjusted_s; - } + private: + NtpTime SystemDependentNtpTime() { + uint32_t seconds; + double fraction; + GetSecondsAndFraction(CurrentTimeVal(), &seconds, &fraction); + + return NtpTime(seconds, static_cast( + fraction * kMagicNtpFractionalUnit + 0.5)); } + + bool use_system_independent_ntp_time_; }; #if defined(WINUWP) @@ -91,10 +123,10 @@ class WinUwpRealTimeClock final : public RealTimeClock { protected: timeval CurrentTimeVal() override { - // The rtc::SystemTimeNanos() method is already time offset from a base - // epoch value and might as be synchronized against an NTP time server as - // an added bonus. - auto nanos = rtc::SystemTimeNanos(); + // The rtc::WinUwpSystemTimeNanos() method is already time offset from a + // base epoch value and might as be synchronized against an NTP time server + // as an added bonus. + auto nanos = rtc::WinUwpSystemTimeNanos(); struct timeval tv; @@ -150,7 +182,7 @@ class WindowsRealTimeClock : public RealTimeClock { DWORD t; LARGE_INTEGER elapsed_ms; { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // time MUST be fetched inside the critical section to avoid non-monotonic // last_time_ms_ values that'll register as incorrect wraparounds due to // concurrent calls to GetTime. @@ -200,7 +232,7 @@ class WindowsRealTimeClock : public RealTimeClock { return ref; } - rtc::CriticalSection crit_; + Mutex mutex_; DWORD last_time_ms_; LONG num_timer_wraps_; const ReferencePoint ref_point_; @@ -239,30 +271,25 @@ Clock* Clock::GetRealTimeClock() { } SimulatedClock::SimulatedClock(int64_t initial_time_us) - : SimulatedClock(Timestamp::Micros(initial_time_us)) {} + : time_us_(initial_time_us) {} SimulatedClock::SimulatedClock(Timestamp initial_time) - : time_(initial_time), lock_(RWLockWrapper::CreateRWLock()) {} + : SimulatedClock(initial_time.us()) {} SimulatedClock::~SimulatedClock() {} Timestamp SimulatedClock::CurrentTime() { - ReadLockScoped synchronize(*lock_); - return time_; + return Timestamp::Micros(time_us_.load(std::memory_order_relaxed)); } -NtpTime SimulatedClock::CurrentNtpTime() { - int64_t now_ms = TimeInMilliseconds(); - uint32_t seconds = (now_ms / 1000) + kNtpJan1970; - uint32_t fractions = - static_cast((now_ms % 1000) * kMagicNtpFractionalUnit / 1000); +NtpTime SimulatedClock::ConvertTimestampToNtpTime(Timestamp timestamp) { + int64_t now_us = timestamp.us(); + uint32_t seconds = (now_us / 1'000'000) + kNtpJan1970; + uint32_t fractions = static_cast( + (now_us % 1'000'000) * kMagicNtpFractionalUnit / 1'000'000); return NtpTime(seconds, fractions); } -int64_t SimulatedClock::CurrentNtpInMilliseconds() { - return TimeInMilliseconds() + 1000 * static_cast(kNtpJan1970); -} - void SimulatedClock::AdvanceTimeMilliseconds(int64_t milliseconds) { AdvanceTime(TimeDelta::Millis(milliseconds)); } @@ -271,9 +298,13 @@ void SimulatedClock::AdvanceTimeMicroseconds(int64_t microseconds) { AdvanceTime(TimeDelta::Micros(microseconds)); } +// TODO(bugs.webrtc.org(12102): It's desirable to let a single thread own +// advancement of the clock. We could then replace this read-modify-write +// operation with just a thread checker. But currently, that breaks a couple of +// tests, in particular, RepeatingTaskTest.ClockIntegration and +// CallStatsTest.LastProcessedRtt. void SimulatedClock::AdvanceTime(TimeDelta delta) { - WriteLockScoped synchronize(*lock_); - time_ += delta; + time_us_.fetch_add(delta.us(), std::memory_order_relaxed); } } // namespace webrtc diff --git a/system_wrappers/source/cpu_features.cc b/system_wrappers/source/cpu_features.cc index ebcb48c15f..0f81212894 100644 --- a/system_wrappers/source/cpu_features.cc +++ b/system_wrappers/source/cpu_features.cc @@ -12,11 +12,14 @@ #include "rtc_base/system/arch.h" #include "system_wrappers/include/cpu_features_wrapper.h" +#include "system_wrappers/include/field_trial.h" #if defined(WEBRTC_ARCH_X86_FAMILY) && defined(_MSC_VER) #include #endif +namespace webrtc { + // No CPU feature is available => straight C path. int GetCPUInfoNoASM(CPUFeature feature) { (void)feature; @@ -24,6 +27,22 @@ int GetCPUInfoNoASM(CPUFeature feature) { } #if defined(WEBRTC_ARCH_X86_FAMILY) + +#if defined(WEBRTC_ENABLE_AVX2) +// xgetbv returns the value of an Intel Extended Control Register (XCR). +// Currently only XCR0 is defined by Intel so |xcr| should always be zero. +static uint64_t xgetbv(uint32_t xcr) { +#if defined(_MSC_VER) + return _xgetbv(xcr); +#else + uint32_t eax, edx; + + __asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr)); + return (static_cast(edx) << 32) | eax; +#endif // _MSC_VER +} +#endif // WEBRTC_ENABLE_AVX2 + #ifndef _MSC_VER // Intrinsic for "cpuid". #if defined(__pic__) && defined(__i386__) @@ -41,7 +60,7 @@ static inline void __cpuid(int cpu_info[4], int info_type) { __asm__ volatile("cpuid\n" : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) - : "a"(info_type)); + : "a"(info_type), "c"(0)); } #endif #endif // _MSC_VER @@ -49,7 +68,7 @@ static inline void __cpuid(int cpu_info[4], int info_type) { #if defined(WEBRTC_ARCH_X86_FAMILY) // Actual feature detection for x86. -static int GetCPUInfo(CPUFeature feature) { +int GetCPUInfo(CPUFeature feature) { int cpu_info[4]; __cpuid(cpu_info, 1); if (feature == kSSE2) { @@ -58,15 +77,39 @@ static int GetCPUInfo(CPUFeature feature) { if (feature == kSSE3) { return 0 != (cpu_info[2] & 0x00000001); } +#if defined(WEBRTC_ENABLE_AVX2) + if (feature == kAVX2 && + !webrtc::field_trial::IsEnabled("WebRTC-Avx2SupportKillSwitch")) { + int cpu_info7[4]; + __cpuid(cpu_info7, 0); + int num_ids = cpu_info7[0]; + if (num_ids < 7) { + return 0; + } + // Interpret CPU feature information. + __cpuid(cpu_info7, 7); + + // AVX instructions can be used when + // a) AVX are supported by the CPU, + // b) XSAVE is supported by the CPU, + // c) XSAVE is enabled by the kernel. + // See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled + // AVX2 support needs (avx_support && (cpu_info7[1] & 0x00000020) != 0;). + return (cpu_info[2] & 0x10000000) != 0 && + (cpu_info[2] & 0x04000000) != 0 /* XSAVE */ && + (cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ && + (xgetbv(0) & 0x00000006) == 6 /* XSAVE enabled by kernel */ && + (cpu_info7[1] & 0x00000020) != 0; + } +#endif // WEBRTC_ENABLE_AVX2 return 0; } #else // Default to straight C for other platforms. -static int GetCPUInfo(CPUFeature feature) { +int GetCPUInfo(CPUFeature feature) { (void)feature; return 0; } #endif -WebRtc_CPUInfo WebRtc_GetCPUInfo = GetCPUInfo; -WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM = GetCPUInfoNoASM; +} // namespace webrtc diff --git a/system_wrappers/source/cpu_features_android.c b/system_wrappers/source/cpu_features_android.cc similarity index 85% rename from system_wrappers/source/cpu_features_android.c rename to system_wrappers/source/cpu_features_android.cc index 0cb3a6c5ee..95cc609b09 100644 --- a/system_wrappers/source/cpu_features_android.c +++ b/system_wrappers/source/cpu_features_android.cc @@ -10,6 +10,10 @@ #include -uint64_t WebRtc_GetCPUFeaturesARM(void) { +namespace webrtc { + +uint64_t GetCPUFeaturesARM(void) { return android_getCpuFeatures(); } + +} // namespace webrtc diff --git a/system_wrappers/source/cpu_features_linux.c b/system_wrappers/source/cpu_features_linux.cc similarity index 87% rename from system_wrappers/source/cpu_features_linux.c rename to system_wrappers/source/cpu_features_linux.cc index 004de5a6a9..335bed4da3 100644 --- a/system_wrappers/source/cpu_features_linux.c +++ b/system_wrappers/source/cpu_features_linux.cc @@ -8,32 +8,39 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include #include #include -#include -#ifndef __GLIBC_PREREQ -#define __GLIBC_PREREQ(a, b) 0 + +#ifdef __GLIBC_PREREQ +#define WEBRTC_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) +#else +#define WEBRTC_GLIBC_PREREQ(a, b) 0 #endif -#if __GLIBC_PREREQ(2, 16) + +#if WEBRTC_GLIBC_PREREQ(2, 16) #include #else -#include -#include #include +#include #include +#include #endif + #include "rtc_base/system/arch.h" #include "system_wrappers/include/cpu_features_wrapper.h" #if defined(WEBRTC_ARCH_ARM_FAMILY) #include -uint64_t WebRtc_GetCPUFeaturesARM(void) { +namespace webrtc { + +uint64_t GetCPUFeaturesARM(void) { uint64_t result = 0; int architecture = 0; - unsigned long hwcap = 0; + uint64_t hwcap = 0; const char* platform = NULL; -#if __GLIBC_PREREQ(2, 16) +#if WEBRTC_GLIBC_PREREQ(2, 16) hwcap = getauxval(AT_HWCAP); platform = (const char*)getauxval(AT_PLATFORM); #else @@ -57,7 +64,7 @@ uint64_t WebRtc_GetCPUFeaturesARM(void) { } close(fd); } -#endif // __GLIBC_PREREQ(2,16) +#endif // WEBRTC_GLIBC_PREREQ(2, 16) #if defined(__aarch64__) architecture = 8; if ((hwcap & HWCAP_FP) != 0) @@ -84,4 +91,6 @@ uint64_t WebRtc_GetCPUFeaturesARM(void) { result |= kCPUFeatureLDREXSTREX; return result; } + +} // namespace webrtc #endif // WEBRTC_ARCH_ARM_FAMILY diff --git a/system_wrappers/source/field_trial.cc b/system_wrappers/source/field_trial.cc index f1dccc987b..d10b5cff3f 100644 --- a/system_wrappers/source/field_trial.cc +++ b/system_wrappers/source/field_trial.cc @@ -85,7 +85,7 @@ void InsertOrReplaceFieldTrialStringsInMap( (*fieldtrial_map)[tokens[idx]] = tokens[idx + 1]; } } else { - RTC_DCHECK(false) << "Invalid field trials string:" << trials_string; + RTC_NOTREACHED() << "Invalid field trials string:" << trials_string; } } diff --git a/system_wrappers/source/field_trial_unittest.cc b/system_wrappers/source/field_trial_unittest.cc index fdabe1b7e6..ada6313e67 100644 --- a/system_wrappers/source/field_trial_unittest.cc +++ b/system_wrappers/source/field_trial_unittest.cc @@ -32,7 +32,7 @@ TEST(FieldTrialValidationTest, AcceptsValidInputs) { EXPECT_TRUE(FieldTrialsStringIsValid("Audio/Enabled/B/C/Audio/Enabled/")); } -TEST(FieldTrialValidationTest, RejectsBadInputs) { +TEST(FieldTrialValidationDeathTest, RejectsBadInputs) { // Bad delimiters RTC_EXPECT_DEATH(InitFieldTrialsFromString("Audio/EnabledVideo/Disabled/"), "Invalid field trials string:"); @@ -90,7 +90,7 @@ TEST(FieldTrialMergingTest, MergesValidInput) { "Audio/Enabled/Video/Enabled/"); } -TEST(FieldTrialMergingTest, DchecksBadInput) { +TEST(FieldTrialMergingDeathTest, DchecksBadInput) { RTC_EXPECT_DEATH(MergeFieldTrialsStrings("Audio/Enabled/", "garbage"), "Invalid field trials string:"); } diff --git a/system_wrappers/source/metrics.cc b/system_wrappers/source/metrics.cc index 2383272887..d42833643d 100644 --- a/system_wrappers/source/metrics.cc +++ b/system_wrappers/source/metrics.cc @@ -11,7 +11,8 @@ #include -#include "rtc_base/critical_section.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" // Default implementation of histogram methods for WebRTC clients that do not @@ -38,7 +39,7 @@ class RtcHistogram { sample = std::min(sample, max_); sample = std::max(sample, min_ - 1); // Underflow bucket. - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); if (info_.samples.size() == kMaxSampleMapSize && info_.samples.find(sample) == info_.samples.end()) { return; @@ -48,7 +49,7 @@ class RtcHistogram { // Returns a copy (or nullptr if there are no samples) and clears samples. std::unique_ptr GetAndReset() { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); if (info_.samples.empty()) return nullptr; @@ -64,19 +65,19 @@ class RtcHistogram { // Functions only for testing. void Reset() { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); info_.samples.clear(); } int NumEvents(int sample) const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto it = info_.samples.find(sample); return (it == info_.samples.end()) ? 0 : it->second; } int NumSamples() const { int num_samples = 0; - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); for (const auto& sample : info_.samples) { num_samples += sample.second; } @@ -84,20 +85,20 @@ class RtcHistogram { } int MinSample() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return (info_.samples.empty()) ? -1 : info_.samples.begin()->first; } std::map Samples() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return info_.samples; } private: - rtc::CriticalSection crit_; + mutable Mutex mutex_; const int min_; const int max_; - SampleInfo info_ RTC_GUARDED_BY(crit_); + SampleInfo info_ RTC_GUARDED_BY(mutex_); RTC_DISALLOW_COPY_AND_ASSIGN(RtcHistogram); }; @@ -111,7 +112,7 @@ class RtcHistogramMap { int min, int max, int bucket_count) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& it = map_.find(name); if (it != map_.end()) return reinterpret_cast(it->second.get()); @@ -122,7 +123,7 @@ class RtcHistogramMap { } Histogram* GetEnumerationHistogram(const std::string& name, int boundary) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& it = map_.find(name); if (it != map_.end()) return reinterpret_cast(it->second.get()); @@ -134,7 +135,7 @@ class RtcHistogramMap { void GetAndReset( std::map>* histograms) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); for (const auto& kv : map_) { std::unique_ptr info = kv.second->GetAndReset(); if (info) @@ -144,39 +145,39 @@ class RtcHistogramMap { // Functions only for testing. void Reset() { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); for (const auto& kv : map_) kv.second->Reset(); } int NumEvents(const std::string& name, int sample) const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& it = map_.find(name); return (it == map_.end()) ? 0 : it->second->NumEvents(sample); } int NumSamples(const std::string& name) const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& it = map_.find(name); return (it == map_.end()) ? 0 : it->second->NumSamples(); } int MinSample(const std::string& name) const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& it = map_.find(name); return (it == map_.end()) ? -1 : it->second->MinSample(); } std::map Samples(const std::string& name) const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); const auto& it = map_.find(name); return (it == map_.end()) ? std::map() : it->second->Samples(); } private: - rtc::CriticalSection crit_; + mutable Mutex mutex_; std::map> map_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); RTC_DISALLOW_COPY_AND_ASSIGN(RtcHistogramMap); }; diff --git a/system_wrappers/source/metrics_unittest.cc b/system_wrappers/source/metrics_unittest.cc index 9e5bc86ba9..7532b2ad83 100644 --- a/system_wrappers/source/metrics_unittest.cc +++ b/system_wrappers/source/metrics_unittest.cc @@ -114,7 +114,8 @@ TEST_F(MetricsTest, RtcHistogramsCounts_AddSample) { } #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST_F(MetricsTest, RtcHistogramsCounts_InvalidIndex) { +using MetricsDeathTest = MetricsTest; +TEST_F(MetricsDeathTest, RtcHistogramsCounts_InvalidIndex) { EXPECT_DEATH(RTC_HISTOGRAMS_COUNTS_1000(-1, "Name", kSample), ""); EXPECT_DEATH(RTC_HISTOGRAMS_COUNTS_1000(3, "Name", kSample), ""); EXPECT_DEATH(RTC_HISTOGRAMS_COUNTS_1000(3u, "Name", kSample), ""); diff --git a/system_wrappers/source/ntp_time_unittest.cc b/system_wrappers/source/ntp_time_unittest.cc index cdaca67fbe..0705531e37 100644 --- a/system_wrappers/source/ntp_time_unittest.cc +++ b/system_wrappers/source/ntp_time_unittest.cc @@ -56,7 +56,6 @@ TEST(NtpTimeTest, ToMsMeansToNtpMilliseconds) { SimulatedClock clock(0x123456789abc); NtpTime ntp = clock.CurrentNtpTime(); - EXPECT_EQ(ntp.ToMs(), Clock::NtpToMs(ntp.seconds(), ntp.fractions())); EXPECT_EQ(ntp.ToMs(), clock.CurrentNtpInMilliseconds()); } diff --git a/test/BUILD.gn b/test/BUILD.gn index 3f91f4a5b2..82d0b9ea28 100644 --- a/test/BUILD.gn +++ b/test/BUILD.gn @@ -7,36 +7,39 @@ # be found in the AUTHORS file in the root of the source tree. import("//build/config/ui.gni") +import("//third_party/google_benchmark/buildconfig.gni") import("../webrtc.gni") if (is_android) { import("//build/config/android/rules.gni") } -group("test") { - testonly = true - - deps = [ - ":copy_to_file_audio_capturer", - ":rtp_test_utils", - ":test_common", - ":test_renderer", - ":test_support", - ":video_test_common", - "pc/e2e", - ] +if (!build_with_chromium) { + group("test") { + testonly = true - if (rtc_include_tests) { - deps += [ - ":test_main", - ":test_support_unittests", + deps = [ + ":copy_to_file_audio_capturer", + ":rtp_test_utils", + ":test_common", + ":test_renderer", + ":test_support", + ":video_test_common", ] + + if (rtc_include_tests) { + deps += [ + ":test_main", + ":test_support_unittests", + "pc/e2e", + ] + } } } rtc_library("frame_generator_impl") { visibility = [ - "../api:create_frame_generator", ":*", + "../api:create_frame_generator", ] testonly = true sources = [ @@ -49,10 +52,10 @@ rtc_library("frame_generator_impl") { ":frame_utils", "../api:frame_generator_api", "../api:scoped_refptr", + "../api:sequence_checker", "../api/video:encoded_image", "../api/video:video_frame", "../api/video:video_frame_i010", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", "../common_video", @@ -67,11 +70,11 @@ rtc_library("frame_generator_impl") { "../rtc_base:criticalsection", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_event", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", "../rtc_base/system:file_wrapper", "../system_wrappers", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("frame_utils") { @@ -84,7 +87,6 @@ rtc_library("frame_utils") { deps = [ "../api:scoped_refptr", "../api/video:video_frame", - "../api/video:video_frame_i420", ] } @@ -98,6 +100,8 @@ rtc_library("video_test_common") { "frame_forwarder.h", "frame_generator_capturer.cc", "frame_generator_capturer.h", + "mappable_native_buffer.cc", + "mappable_native_buffer.h", "test_video_capturer.cc", "test_video_capturer.h", "video_codec_settings.h", @@ -106,13 +110,12 @@ rtc_library("video_test_common") { deps = [ ":fileutils", ":frame_utils", + "../api:array_view", "../api:create_frame_generator", "../api:frame_generator_api", "../api:scoped_refptr", "../api/task_queue", "../api/video:video_frame", - "../api/video:video_frame_i010", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", "../common_video", @@ -125,8 +128,12 @@ rtc_library("video_test_common") { "../rtc_base:rtc_base_approved", "../rtc_base:rtc_task_queue", "../rtc_base:timeutils", + "../rtc_base/synchronization:mutex", "../rtc_base/task_utils:repeating_task", "../system_wrappers", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/strings", ] } @@ -146,6 +153,7 @@ if (!build_with_chromium) { "../api:scoped_refptr", "../modules/video_capture:video_capture_module", "../rtc_base", + "../rtc_base:threading", "../sdk:base_objc", "../sdk:native_api", "../sdk:native_video", @@ -160,10 +168,8 @@ if (!build_with_chromium) { "platform_video_capturer.cc", "platform_video_capturer.h", ] - deps = [ - ":video_test_common", - "//third_party/abseil-cpp/absl/memory", - ] + deps = [ ":video_test_common" ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] if (is_mac || is_ios) { deps += [ ":video_test_mac" ] } else { @@ -203,8 +209,10 @@ rtc_library("rtp_test_utils") { "../rtc_base:criticalsection", "../rtc_base:logging", "../rtc_base:macromagic", + "../rtc_base/synchronization:mutex", "../rtc_base/system:arch", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("field_trial") { @@ -218,6 +226,20 @@ rtc_library("field_trial") { deps = [ "../system_wrappers:field_trial" ] } +rtc_library("explicit_key_value_config") { + sources = [ + "explicit_key_value_config.cc", + "explicit_key_value_config.h", + ] + + deps = [ + "../api/transport:webrtc_key_value_config", + "../rtc_base:checks", + "../system_wrappers:field_trial", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings:strings" ] +} + rtc_library("perf_test") { visibility = [ "*" ] testonly = true @@ -231,11 +253,17 @@ rtc_library("perf_test") { ] deps = [ "../api:array_view", + "../api/numerics", "../rtc_base:checks", "../rtc_base:criticalsection", "../rtc_base:logging", "../rtc_base:rtc_numerics", - "//third_party/abseil-cpp/absl/flags:flag", + "../rtc_base:stringutils", + "../rtc_base/synchronization:mutex", + "../test:fileutils", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", ] if (rtc_enable_protobuf) { @@ -262,8 +290,8 @@ if (is_ios) { deps = [ ":perf_test", "../sdk:helpers_objc", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] configs += [ ":test_support_objc_config" ] } @@ -289,7 +317,7 @@ config("suppress_warning_4373") { config("test_main_direct_config") { visibility = [ ":*" ] - defines = [ "UNIT_TEST" ] + defines = [ "WEBRTC_UNIT_TEST" ] } rtc_source_set("test_support") { visibility = [ "*" ] @@ -341,9 +369,9 @@ rtc_library("video_test_support") { ":test_support", ":video_test_common", "../api:scoped_refptr", + "../api:sequence_checker", "../api/video:encoded_image", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video_codecs:video_codecs_api", "../common_video", "../media:rtc_media_base", @@ -357,10 +385,9 @@ rtc_library("video_test_support") { "../rtc_base:logging", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_event", - "../rtc_base/synchronization:sequence_checker", "../rtc_base/system:file_wrapper", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] if (!is_ios) { deps += [ "//third_party:jpeg" ] @@ -374,7 +401,26 @@ rtc_library("video_test_support") { } } -if (rtc_include_tests) { +if (rtc_include_tests && enable_google_benchmarks) { + rtc_library("benchmark_main") { + testonly = true + sources = [ "benchmark_main.cc" ] + + deps = [ "//third_party/google_benchmark" ] + } +} + +if (rtc_include_tests && !build_with_chromium) { + rtc_library("resources_dir_flag") { + testonly = true + visibility = [ "*" ] + sources = [ + "testsupport/resources_dir_flag.cc", + "testsupport/resources_dir_flag.h", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag" ] + } + rtc_library("test_main_lib") { visibility = [ "*" ] testonly = true @@ -392,10 +438,12 @@ if (rtc_include_tests) { "../rtc_base:checks", "../rtc_base:logging", "../rtc_base:rtc_base_approved", + "../rtc_base:threading", "../system_wrappers:field_trial", "../system_wrappers:metrics", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/strings:strings", "//third_party/abseil-cpp/absl/types:optional", @@ -409,8 +457,13 @@ if (rtc_include_tests) { deps = [ ":test_main_lib", + ":test_support", + ] + + absl_deps = [ "//third_party/abseil-cpp/absl/debugging:failure_signal_handler", "//third_party/abseil-cpp/absl/debugging:symbolize", + "//third_party/abseil-cpp/absl/flags:parse", ] } @@ -424,6 +477,8 @@ if (rtc_include_tests) { ":fileutils", "../rtc_base:logging", "../rtc_base/system:file_wrapper", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:parse", ] @@ -474,7 +529,6 @@ if (rtc_include_tests) { "../api/test/video:function_video_factory", "../api/video:encoded_image", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video_codecs:video_codecs_api", "../call:video_stream_api", "../common_video", @@ -490,6 +544,7 @@ if (rtc_include_tests) { "../rtc_base:criticalsection", "../rtc_base:rtc_event", "../rtc_base:rtc_task_queue", + "../rtc_base/synchronization:mutex", "../rtc_base/system:file_wrapper", "../rtc_base/task_utils:to_queued_task", "pc/e2e:e2e_unittests", @@ -497,6 +552,8 @@ if (rtc_include_tests) { "scenario:scenario_unittests", "time_controller:time_controller", "time_controller:time_controller_unittests", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/strings", "//third_party/abseil-cpp/absl/types:optional", @@ -535,7 +592,7 @@ if (rtc_include_tests) { deps += [ ":test_support_unittests_bundle_data" ] } - if (!is_android && !build_with_chromium) { + if (!is_android) { # This is needed in order to avoid: # undefined symbol: webrtc::videocapturemodule::VideoCaptureImpl::Create deps += [ "../modules/video_capture:video_capture_internal_impl" ] @@ -587,8 +644,8 @@ rtc_library("fileutils") { ":fileutils_override_impl", "../rtc_base:checks", "../rtc_base:stringutils", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] if (is_ios) { deps += [ ":fileutils_ios_objc" ] } @@ -596,20 +653,10 @@ rtc_library("fileutils") { deps += [ ":fileutils_mac_objc" ] } if (is_win) { - deps += [ "../rtc_base" ] + deps += [ "../rtc_base:win32" ] } } -rtc_library("resources_dir_flag") { - testonly = true - visibility = [ "*" ] - sources = [ - "testsupport/resources_dir_flag.cc", - "testsupport/resources_dir_flag.h", - ] - deps = [ "//third_party/abseil-cpp/absl/flags:flag" ] -} - # We separate header into own target to make it possible for downstream # projects to override implementation. rtc_source_set("fileutils_override_api") { @@ -626,8 +673,8 @@ rtc_library("fileutils_override_impl") { "../rtc_base:checks", "../rtc_base:macromagic", "../rtc_base:stringutils", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] if (is_ios) { deps += [ ":fileutils_ios_objc" ] } @@ -635,7 +682,7 @@ rtc_library("fileutils_override_impl") { deps += [ ":fileutils_mac_objc" ] } if (is_win) { - deps += [ "../rtc_base" ] + deps += [ "../rtc_base:win32" ] } } @@ -677,8 +724,8 @@ rtc_library("fileutils_unittests") { ":fileutils", ":test_support", "../rtc_base:checks", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("direct_transport") { @@ -689,19 +736,20 @@ rtc_library("direct_transport") { "direct_transport.h", ] deps = [ - ":rtp_test_utils", + "../api:sequence_checker", "../api:simulated_network_api", "../api:transport_api", "../api/task_queue", "../api/units:time_delta", "../call:call_interfaces", "../call:simulated_packet_receiver", + "../modules/rtp_rtcp:rtp_rtcp_format", "../rtc_base:macromagic", "../rtc_base:timeutils", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", "../rtc_base/task_utils:repeating_task", - "//third_party/abseil-cpp/absl/memory", ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] public_deps = # no-presubmit-check TODO(webrtc:8603) [ "../call:fake_network" ] } @@ -724,15 +772,14 @@ rtc_library("fake_video_codecs") { deps = [ "../api:fec_controller_api", "../api:scoped_refptr", + "../api:sequence_checker", "../api/task_queue", "../api/video:encoded_image", "../api/video:video_bitrate_allocation", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", "../api/video_codecs:vp8_temporal_layers_factory", - "../modules:module_api", "../modules/video_coding:codec_globals_headers", "../modules/video_coding:video_codec_interface", "../modules/video_coding:video_coding_utility", @@ -741,10 +788,10 @@ rtc_library("fake_video_codecs") { "../rtc_base:macromagic", "../rtc_base:rtc_task_queue", "../rtc_base:timeutils", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", "../system_wrappers", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("null_transport") { @@ -801,9 +848,9 @@ rtc_library("test_common") { ":fake_video_codecs", ":fileutils", ":mock_transport", - ":rtp_test_utils", ":test_support", ":video_test_common", + "../api:array_view", "../api:create_frame_generator", "../api:frame_generator_api", "../api:rtp_headers", @@ -837,11 +884,12 @@ rtc_library("test_common") { "../rtc_base:rtc_base", "../rtc_base:rtc_event", "../rtc_base:task_queue_for_test", + "../rtc_base:threading", "../rtc_base/task_utils:to_queued_task", "../system_wrappers", "../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] if (!is_android && !build_with_chromium) { deps += [ "../modules/video_capture:video_capture_internal_impl" ] } @@ -903,7 +951,7 @@ if (is_mac) { ":test_renderer_generic", "../rtc_base:rtc_base_approved", ] - libs = [ + frameworks = [ "Cocoa.framework", "OpenGL.framework", "CoreVideo.framework", @@ -934,17 +982,17 @@ rtc_library("test_renderer_generic") { ] deps += [ "../api:scoped_refptr" ] } - if (!(is_linux && rtc_use_x11) && !is_mac && !is_win) { + if (!((is_linux || is_chromeos) && rtc_use_x11) && !is_mac && !is_win) { sources += [ "null_platform_renderer.cc" ] } - if ((is_linux && rtc_use_x11) || is_mac) { + if (((is_linux || is_chromeos) && rtc_use_x11) || is_mac) { sources += [ "gl/gl_renderer.cc", "gl/gl_renderer.h", ] } - if (is_linux && rtc_use_x11) { + if ((is_linux || is_chromeos) && rtc_use_x11) { sources += [ "linux/glx_renderer.cc", "linux/glx_renderer.h", @@ -987,8 +1035,8 @@ rtc_library("audio_codec_mocks") { "../api/audio_codecs:builtin_audio_decoder_factory", "../rtc_base:checks", "../rtc_base:refcount", - "//third_party/abseil-cpp/absl/memory", ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] } rtc_library("copy_to_file_audio_capturer") { @@ -1002,8 +1050,8 @@ rtc_library("copy_to_file_audio_capturer") { "../common_audio", "../modules/audio_device:audio_device_impl", "../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("copy_to_file_audio_capturer_unittest") { diff --git a/test/DEPS b/test/DEPS index 62fd6d3ff7..0e51f003ab 100644 --- a/test/DEPS +++ b/test/DEPS @@ -6,6 +6,7 @@ include_rules = [ "+common_video", "+logging/rtc_event_log", "+media/base", + "+media/sctp", "+media/engine", "+modules/audio_coding", "+modules/congestion_controller", @@ -72,5 +73,11 @@ specific_include_rules = { ], ".*test_video_capturer_video_track_source.h": [ "+pc", + ], + "benchmark_main\.cc": [ + "+benchmark", + ], + "emulated_turn_server\.h": [ + "+p2p/base/turn_server.h", ] } diff --git a/test/OWNERS b/test/OWNERS index 6f8099845b..8439a9d5d4 100644 --- a/test/OWNERS +++ b/test/OWNERS @@ -3,4 +3,4 @@ sprang@webrtc.org srte@webrtc.org stefan@webrtc.org titovartem@webrtc.org - +landrey@webrtc.org diff --git a/test/android/AndroidManifest.xml b/test/android/AndroidManifest.xml index c1ddfd4a02..ad3f434b4f 100644 --- a/test/android/AndroidManifest.xml +++ b/test/android/AndroidManifest.xml @@ -14,7 +14,7 @@ be found in the AUTHORS file in the root of the source tree. android:versionCode="1" android:versionName="1.0"> - + @@ -39,7 +39,7 @@ be found in the AUTHORS file in the root of the source tree. - diff --git a/sdk/objc/Framework/Headers/WebRTC/RTCAudioSource.h b/test/benchmark_main.cc similarity index 59% rename from sdk/objc/Framework/Headers/WebRTC/RTCAudioSource.h rename to test/benchmark_main.cc index a7dc191319..1a79c24913 100644 --- a/sdk/objc/Framework/Headers/WebRTC/RTCAudioSource.h +++ b/test/benchmark_main.cc @@ -1,5 +1,5 @@ /* - * Copyright 2016 The WebRTC project authors. All Rights Reserved. + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,4 +8,10 @@ * be found in the AUTHORS file in the root of the source tree. */ -#import "api/peerconnection/RTCAudioSource.h" +#include "benchmark/benchmark.h" + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); + return 0; +} diff --git a/test/call_test.cc b/test/call_test.cc index 8d63b64bea..11230dae2f 100644 --- a/test/call_test.cc +++ b/test/call_test.cc @@ -388,9 +388,9 @@ void CallTest::AddMatchingVideoReceiveConfigs( decoder.video_format = SdpVideoFormat(video_send_config.rtp.payload_name); // Force fake decoders on non-selected simulcast streams. if (!decode_sub_stream || i == *decode_sub_stream) { - decoder.decoder_factory = decoder_factory; + video_recv_config.decoder_factory = decoder_factory; } else { - decoder.decoder_factory = &fake_decoder_factory_; + video_recv_config.decoder_factory = &fake_decoder_factory_; } video_recv_config.decoders.push_back(decoder); receive_configs->emplace_back(std::move(video_recv_config)); @@ -409,7 +409,7 @@ void CallTest::CreateMatchingAudioAndFecConfigs( if (num_flexfec_streams_ == 1) { CreateMatchingFecConfig(rtcp_send_transport, *GetVideoSendConfig()); for (const RtpExtension& extension : GetVideoSendConfig()->rtp.extensions) - GetFlexFecConfig()->rtp_header_extensions.push_back(extension); + GetFlexFecConfig()->rtp.extensions.push_back(extension); } } @@ -444,11 +444,13 @@ void CallTest::CreateMatchingFecConfig( const VideoSendStream::Config& send_config) { FlexfecReceiveStream::Config config(transport); config.payload_type = send_config.rtp.flexfec.payload_type; - config.remote_ssrc = send_config.rtp.flexfec.ssrc; + config.rtp.remote_ssrc = send_config.rtp.flexfec.ssrc; config.protected_media_ssrcs = send_config.rtp.flexfec.protected_media_ssrcs; - config.local_ssrc = kReceiverLocalVideoSsrc; - if (!video_receive_configs_.empty()) + config.rtp.local_ssrc = kReceiverLocalVideoSsrc; + if (!video_receive_configs_.empty()) { video_receive_configs_[0].rtp.protected_by_flexfec = true; + video_receive_configs_[0].rtp.packet_sink_ = this; + } flexfec_receive_configs_.push_back(config); } @@ -510,8 +512,6 @@ void CallTest::CreateVideoStreams() { video_receive_streams_.push_back(receiver_call_->CreateVideoReceiveStream( video_receive_configs_[i].Copy())); } - - AssociateFlexfecStreamsWithVideoStreams(); } void CallTest::CreateVideoSendStreams() { @@ -572,8 +572,6 @@ void CallTest::CreateFlexfecStreams() { receiver_call_->CreateFlexfecReceiveStream( flexfec_receive_configs_[i])); } - - AssociateFlexfecStreamsWithVideoStreams(); } void CallTest::ConnectVideoSourcesToStreams() { @@ -582,23 +580,6 @@ void CallTest::ConnectVideoSourcesToStreams() { degradation_preference_); } -void CallTest::AssociateFlexfecStreamsWithVideoStreams() { - // All FlexFEC streams protect all of the video streams. - for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) { - for (VideoReceiveStream* video_recv_stream : video_receive_streams_) { - video_recv_stream->AddSecondarySink(flexfec_recv_stream); - } - } -} - -void CallTest::DissociateFlexfecStreamsFromVideoStreams() { - for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) { - for (VideoReceiveStream* video_recv_stream : video_receive_streams_) { - video_recv_stream->RemoveSecondarySink(flexfec_recv_stream); - } - } -} - void CallTest::Start() { StartVideoStreams(); if (audio_send_stream_) { @@ -632,8 +613,6 @@ void CallTest::StopVideoStreams() { } void CallTest::DestroyStreams() { - DissociateFlexfecStreamsFromVideoStreams(); - if (audio_send_stream_) sender_call_->DestroyAudioSendStream(audio_send_stream_); audio_send_stream_ = nullptr; @@ -691,6 +670,12 @@ FlexfecReceiveStream::Config* CallTest::GetFlexFecConfig() { return &flexfec_receive_configs_[0]; } +void CallTest::OnRtpPacket(const RtpPacketReceived& packet) { + // All FlexFEC streams protect all of the video streams. + for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) + flexfec_recv_stream->OnRtpPacket(packet); +} + absl::optional CallTest::GetRtpExtensionByUri( const std::string& uri) const { for (const auto& extension : rtp_extensions_) { diff --git a/test/call_test.h b/test/call_test.h index 4b26097b6c..adb21dd7f0 100644 --- a/test/call_test.h +++ b/test/call_test.h @@ -38,7 +38,7 @@ namespace test { class BaseTest; -class CallTest : public ::testing::Test { +class CallTest : public ::testing::Test, public RtpPacketSinkInterface { public: CallTest(); virtual ~CallTest(); @@ -156,9 +156,6 @@ class CallTest : public ::testing::Test { void ConnectVideoSourcesToStreams(); - void AssociateFlexfecStreamsWithVideoStreams(); - void DissociateFlexfecStreamsFromVideoStreams(); - void Start(); void StartVideoStreams(); void Stop(); @@ -177,6 +174,9 @@ class CallTest : public ::testing::Test { FlexfecReceiveStream::Config* GetFlexFecConfig(); TaskQueueBase* task_queue() { return task_queue_.get(); } + // RtpPacketSinkInterface implementation. + void OnRtpPacket(const RtpPacketReceived& packet) override; + test::RunLoop loop_; Clock* const clock_; diff --git a/test/configurable_frame_size_encoder.cc b/test/configurable_frame_size_encoder.cc index dd259456fd..e3965ef770 100644 --- a/test/configurable_frame_size_encoder.cc +++ b/test/configurable_frame_size_encoder.cc @@ -17,7 +17,6 @@ #include #include "api/video/encoded_image.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_error_codes.h" #include "rtc_base/checks.h" @@ -28,12 +27,8 @@ namespace test { ConfigurableFrameSizeEncoder::ConfigurableFrameSizeEncoder( size_t max_frame_size) : callback_(NULL), - max_frame_size_(max_frame_size), current_frame_size_(max_frame_size), - buffer_(new uint8_t[max_frame_size]), - codec_type_(kVideoCodecGeneric) { - memset(buffer_.get(), 0, max_frame_size); -} + codec_type_(kVideoCodecGeneric) {} ConfigurableFrameSizeEncoder::~ConfigurableFrameSizeEncoder() {} @@ -51,18 +46,18 @@ int32_t ConfigurableFrameSizeEncoder::InitEncode( int32_t ConfigurableFrameSizeEncoder::Encode( const VideoFrame& inputImage, const std::vector* frame_types) { - EncodedImage encodedImage(buffer_.get(), current_frame_size_, - max_frame_size_); - encodedImage._completeFrame = true; + EncodedImage encodedImage; + auto buffer = EncodedImageBuffer::Create(current_frame_size_); + memset(buffer->data(), 0, current_frame_size_); + encodedImage.SetEncodedData(buffer); encodedImage._encodedHeight = inputImage.height(); encodedImage._encodedWidth = inputImage.width(); encodedImage._frameType = VideoFrameType::kVideoFrameKey; encodedImage.SetTimestamp(inputImage.timestamp()); encodedImage.capture_time_ms_ = inputImage.render_time_ms(); - RTPFragmentationHeader* fragmentation = NULL; CodecSpecificInfo specific{}; specific.codecType = codec_type_; - callback_->OnEncodedImage(encodedImage, &specific, fragmentation); + callback_->OnEncodedImage(encodedImage, &specific); if (post_encode_callback_) { (*post_encode_callback_)(); } @@ -83,7 +78,6 @@ void ConfigurableFrameSizeEncoder::SetRates( const RateControlParameters& parameters) {} int32_t ConfigurableFrameSizeEncoder::SetFrameSize(size_t size) { - RTC_DCHECK_LE(size, max_frame_size_); current_frame_size_ = size; return WEBRTC_VIDEO_CODEC_OK; } diff --git a/test/configurable_frame_size_encoder.h b/test/configurable_frame_size_encoder.h index 0ffe3b22cd..8dd5157b5b 100644 --- a/test/configurable_frame_size_encoder.h +++ b/test/configurable_frame_size_encoder.h @@ -60,9 +60,7 @@ class ConfigurableFrameSizeEncoder : public VideoEncoder { EncodedImageCallback* callback_; absl::optional> post_encode_callback_; - const size_t max_frame_size_; size_t current_frame_size_; - std::unique_ptr buffer_; VideoCodecType codec_type_; }; diff --git a/test/direct_transport.cc b/test/direct_transport.cc index f4ae047870..7e9c5aefeb 100644 --- a/test/direct_transport.cc +++ b/test/direct_transport.cc @@ -14,9 +14,9 @@ #include "api/units/time_delta.h" #include "call/call.h" #include "call/fake_network_pipe.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/time_utils.h" -#include "test/rtp_header_parser.h" namespace webrtc { namespace test { @@ -26,7 +26,7 @@ Demuxer::Demuxer(const std::map& payload_type_map) MediaType Demuxer::GetMediaType(const uint8_t* packet_data, const size_t packet_length) const { - if (!RtpHeaderParser::IsRtcp(packet_data, packet_length)) { + if (IsRtpPacket(rtc::MakeArrayView(packet_data, packet_length))) { RTC_CHECK_GE(packet_length, 2); const uint8_t payload_type = packet_data[1] & 0x7f; std::map::const_iterator it = @@ -83,7 +83,7 @@ void DirectTransport::SendPacket(const uint8_t* data, size_t length) { int64_t send_time_us = rtc::TimeMicros(); fake_network_->DeliverPacket(media_type, rtc::CopyOnWriteBuffer(data, length), send_time_us); - rtc::CritScope cs(&process_lock_); + MutexLock lock(&process_lock_); if (!next_process_task_.Running()) ProcessPackets(); } @@ -112,7 +112,7 @@ void DirectTransport::ProcessPackets() { if (auto delay_ms = fake_network_->TimeUntilNextProcess()) return TimeDelta::Millis(*delay_ms); // Otherwise stop the task. - rtc::CritScope cs(&process_lock_); + MutexLock lock(&process_lock_); next_process_task_.Stop(); // Since this task is stopped, return value doesn't matter. return TimeDelta::Zero(); diff --git a/test/direct_transport.h b/test/direct_transport.h index e0b2251eea..34b68555d5 100644 --- a/test/direct_transport.h +++ b/test/direct_transport.h @@ -13,11 +13,12 @@ #include #include "api/call/transport.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_base.h" #include "api/test/simulated_network.h" #include "call/call.h" #include "call/simulated_packet_receiver.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" @@ -66,7 +67,7 @@ class DirectTransport : public Transport { TaskQueueBase* const task_queue_; - rtc::CriticalSection process_lock_; + Mutex process_lock_; RepeatingTaskHandle next_process_task_ RTC_GUARDED_BY(&process_lock_); const Demuxer demuxer_; diff --git a/test/direct_transport_unittest.cc b/test/direct_transport_unittest.cc index 66ab5bcac1..ab00971089 100644 --- a/test/direct_transport_unittest.cc +++ b/test/direct_transport_unittest.cc @@ -18,12 +18,13 @@ namespace test { TEST(DemuxerTest, Demuxing) { constexpr uint8_t kVideoPayloadType = 100; constexpr uint8_t kAudioPayloadType = 101; - constexpr size_t kPacketSize = 10; + constexpr size_t kPacketSize = 12; Demuxer demuxer({{kVideoPayloadType, MediaType::VIDEO}, {kAudioPayloadType, MediaType::AUDIO}}); uint8_t data[kPacketSize]; memset(data, 0, kPacketSize); + data[0] = 0x80; data[1] = kVideoPayloadType; EXPECT_EQ(demuxer.GetMediaType(data, kPacketSize), MediaType::VIDEO); data[1] = kAudioPayloadType; diff --git a/test/drifting_clock.cc b/test/drifting_clock.cc index 1a5154557e..47c8e56916 100644 --- a/test/drifting_clock.cc +++ b/test/drifting_clock.cc @@ -28,22 +28,18 @@ TimeDelta DriftingClock::Drift() const { return (now - start_time_) * drift_; } -Timestamp DriftingClock::CurrentTime() { - return clock_->CurrentTime() + Drift() / 1000.; +Timestamp DriftingClock::Drift(Timestamp timestamp) const { + return timestamp + Drift() / 1000.; } -NtpTime DriftingClock::CurrentNtpTime() { +NtpTime DriftingClock::Drift(NtpTime ntp_time) const { // NTP precision is 1/2^32 seconds, i.e. 2^32 ntp fractions = 1 second. const double kNtpFracPerMicroSecond = 4294.967296; // = 2^32 / 10^6 - NtpTime ntp = clock_->CurrentNtpTime(); - uint64_t total_fractions = static_cast(ntp); + uint64_t total_fractions = static_cast(ntp_time); total_fractions += Drift().us() * kNtpFracPerMicroSecond; return NtpTime(total_fractions); } -int64_t DriftingClock::CurrentNtpInMilliseconds() { - return clock_->CurrentNtpInMilliseconds() + Drift().ms(); -} } // namespace test } // namespace webrtc diff --git a/test/drifting_clock.h b/test/drifting_clock.h index 2539b61786..3471c008a1 100644 --- a/test/drifting_clock.h +++ b/test/drifting_clock.h @@ -30,12 +30,16 @@ class DriftingClock : public Clock { return 1.0f - percent / 100.0f; } - Timestamp CurrentTime() override; - NtpTime CurrentNtpTime() override; - int64_t CurrentNtpInMilliseconds() override; + Timestamp CurrentTime() override { return Drift(clock_->CurrentTime()); } + NtpTime CurrentNtpTime() override { return Drift(clock_->CurrentNtpTime()); } + NtpTime ConvertTimestampToNtpTime(Timestamp timestamp) override { + return Drift(clock_->ConvertTimestampToNtpTime(timestamp)); + } private: TimeDelta Drift() const; + Timestamp Drift(Timestamp timestamp) const; + NtpTime Drift(NtpTime ntp_time) const; Clock* const clock_; const float drift_; diff --git a/test/encoder_settings.cc b/test/encoder_settings.cc index ff3b096ef6..c8251883fd 100644 --- a/test/encoder_settings.cc +++ b/test/encoder_settings.cc @@ -10,7 +10,6 @@ #include "test/encoder_settings.h" #include -#include #include "api/scoped_refptr.h" #include "api/video_codecs/sdp_video_format.h" @@ -54,62 +53,46 @@ std::vector CreateVideoStreams( stream_settings[i].height = (i + 1) * height / encoder_config.number_of_streams; stream_settings[i].max_framerate = 30; + stream_settings[i].max_qp = 56; stream_settings[i].min_bitrate_bps = DefaultVideoStreamFactory::kDefaultMinBitratePerStream[i]; - int target_bitrate_bps = -1; - int max_bitrate_bps = -1; - // Use configured values instead of default values if values has been - // configured. - if (i < encoder_config.simulcast_layers.size()) { - const VideoStream& stream = encoder_config.simulcast_layers[i]; - - max_bitrate_bps = - stream.max_bitrate_bps > 0 - ? stream.max_bitrate_bps - : DefaultVideoStreamFactory::kMaxBitratePerStream[i]; - max_bitrate_bps = std::min(bitrate_left_bps, max_bitrate_bps); - - target_bitrate_bps = - stream.target_bitrate_bps > 0 - ? stream.target_bitrate_bps - : DefaultVideoStreamFactory::kMaxBitratePerStream[i]; - target_bitrate_bps = std::min(max_bitrate_bps, target_bitrate_bps); - - if (stream.min_bitrate_bps > 0) { - RTC_DCHECK_LE(stream.min_bitrate_bps, target_bitrate_bps); - stream_settings[i].min_bitrate_bps = stream.min_bitrate_bps; - } - if (stream.max_framerate > 0) { - stream_settings[i].max_framerate = stream.max_framerate; - } - if (stream.num_temporal_layers) { - RTC_DCHECK_GE(*stream.num_temporal_layers, 1); - stream_settings[i].num_temporal_layers = stream.num_temporal_layers; - } - if (stream.scale_resolution_down_by >= 1.0) { - stream_settings[i].width = width / stream.scale_resolution_down_by; - stream_settings[i].height = height / stream.scale_resolution_down_by; - } - } else { - max_bitrate_bps = std::min( - bitrate_left_bps, DefaultVideoStreamFactory::kMaxBitratePerStream[i]); - target_bitrate_bps = max_bitrate_bps; + // Use configured values instead of default values if set. + const VideoStream stream = (i < encoder_config.simulcast_layers.size()) + ? encoder_config.simulcast_layers[i] + : VideoStream(); + + int max_bitrate_bps = + stream.max_bitrate_bps > 0 + ? stream.max_bitrate_bps + : DefaultVideoStreamFactory::kMaxBitratePerStream[i]; + max_bitrate_bps = std::min(bitrate_left_bps, max_bitrate_bps); + + int target_bitrate_bps = stream.target_bitrate_bps > 0 + ? stream.target_bitrate_bps + : max_bitrate_bps; + target_bitrate_bps = std::min(max_bitrate_bps, target_bitrate_bps); + + if (stream.min_bitrate_bps > 0) { + RTC_DCHECK_LE(stream.min_bitrate_bps, target_bitrate_bps); + stream_settings[i].min_bitrate_bps = stream.min_bitrate_bps; + } + if (stream.max_framerate > 0) { + stream_settings[i].max_framerate = stream.max_framerate; + } + if (stream.num_temporal_layers) { + RTC_DCHECK_GE(*stream.num_temporal_layers, 1); + stream_settings[i].num_temporal_layers = stream.num_temporal_layers; + } + if (stream.scale_resolution_down_by >= 1.0) { + stream_settings[i].width = width / stream.scale_resolution_down_by; + stream_settings[i].height = height / stream.scale_resolution_down_by; } - - RTC_DCHECK_NE(target_bitrate_bps, -1); - RTC_DCHECK_NE(max_bitrate_bps, -1); stream_settings[i].target_bitrate_bps = target_bitrate_bps; stream_settings[i].max_bitrate_bps = max_bitrate_bps; - stream_settings[i].max_qp = 56; + stream_settings[i].active = + encoder_config.number_of_streams == 1 || stream.active; - if (i < encoder_config.simulcast_layers.size()) { - // Higher level controls are setting the active configuration for the - // VideoStream. - stream_settings[i].active = encoder_config.simulcast_layers[i].active; - } else { - stream_settings[i].active = true; - } bitrate_left_bps -= stream_settings[i].target_bitrate_bps; } @@ -137,7 +120,7 @@ void FillEncoderConfiguration(VideoCodecType codec_type, configuration->codec_type = codec_type; configuration->number_of_streams = num_streams; configuration->video_stream_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); configuration->max_bitrate_bps = 0; configuration->simulcast_layers = std::vector(num_streams); for (size_t i = 0; i < num_streams; ++i) { diff --git a/test/explicit_key_value_config.cc b/test/explicit_key_value_config.cc new file mode 100644 index 0000000000..69f725a9e2 --- /dev/null +++ b/test/explicit_key_value_config.cc @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/explicit_key_value_config.h" + +#include "api/transport/webrtc_key_value_config.h" +#include "rtc_base/checks.h" +#include "system_wrappers/include/field_trial.h" + +namespace webrtc { +namespace test { + +ExplicitKeyValueConfig::ExplicitKeyValueConfig(const std::string& s) { + std::string::size_type field_start = 0; + while (field_start < s.size()) { + std::string::size_type separator_pos = s.find('/', field_start); + RTC_CHECK_NE(separator_pos, std::string::npos) + << "Missing separator '/' after field trial key."; + RTC_CHECK_GT(separator_pos, field_start) + << "Field trial key cannot be empty."; + std::string key = s.substr(field_start, separator_pos - field_start); + field_start = separator_pos + 1; + + RTC_CHECK_LT(field_start, s.size()) + << "Missing value after field trial key. String ended."; + separator_pos = s.find('/', field_start); + RTC_CHECK_NE(separator_pos, std::string::npos) + << "Missing terminating '/' in field trial string."; + RTC_CHECK_GT(separator_pos, field_start) + << "Field trial value cannot be empty."; + std::string value = s.substr(field_start, separator_pos - field_start); + field_start = separator_pos + 1; + + key_value_map_[key] = value; + } + // This check is technically redundant due to earlier checks. + // We nevertheless keep the check to make it clear that the entire + // string has been processed, and without indexing past the end. + RTC_CHECK_EQ(field_start, s.size()); +} + +std::string ExplicitKeyValueConfig::Lookup(absl::string_view key) const { + auto it = key_value_map_.find(std::string(key)); + if (it != key_value_map_.end()) + return it->second; + return ""; +} + +} // namespace test +} // namespace webrtc diff --git a/test/explicit_key_value_config.h b/test/explicit_key_value_config.h new file mode 100644 index 0000000000..9a3bc84f60 --- /dev/null +++ b/test/explicit_key_value_config.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_EXPLICIT_KEY_VALUE_CONFIG_H_ +#define TEST_EXPLICIT_KEY_VALUE_CONFIG_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "api/transport/webrtc_key_value_config.h" + +namespace webrtc { +namespace test { + +class ExplicitKeyValueConfig : public WebRtcKeyValueConfig { + public: + explicit ExplicitKeyValueConfig(const std::string& s); + std::string Lookup(absl::string_view key) const override; + + private: + std::map key_value_map_; +}; + +} // namespace test +} // namespace webrtc + +#endif // TEST_EXPLICIT_KEY_VALUE_CONFIG_H_ diff --git a/test/fake_decoder.cc b/test/fake_decoder.cc index e80c31cc40..f164bfbe03 100644 --- a/test/fake_decoder.cc +++ b/test/fake_decoder.cc @@ -27,11 +27,6 @@ namespace webrtc { namespace test { -namespace { -const int kDefaultWidth = 320; -const int kDefaultHeight = 180; -} // namespace - FakeDecoder::FakeDecoder() : FakeDecoder(nullptr) {} FakeDecoder::FakeDecoder(TaskQueueFactory* task_queue_factory) @@ -99,6 +94,12 @@ int32_t FakeDecoder::Release() { } const char* FakeDecoder::kImplementationName = "fake_decoder"; +VideoDecoder::DecoderInfo FakeDecoder::GetDecoderInfo() const { + DecoderInfo info; + info.implementation_name = kImplementationName; + info.is_hardware_accelerated = false; + return info; +} const char* FakeDecoder::ImplementationName() const { return kImplementationName; } diff --git a/test/fake_decoder.h b/test/fake_decoder.h index 055c55beca..6a5d6cb419 100644 --- a/test/fake_decoder.h +++ b/test/fake_decoder.h @@ -25,6 +25,8 @@ namespace test { class FakeDecoder : public VideoDecoder { public: + enum { kDefaultWidth = 320, kDefaultHeight = 180 }; + FakeDecoder(); explicit FakeDecoder(TaskQueueFactory* task_queue_factory); virtual ~FakeDecoder() {} @@ -41,6 +43,7 @@ class FakeDecoder : public VideoDecoder { int32_t Release() override; + DecoderInfo GetDecoderInfo() const override; const char* ImplementationName() const override; static const char* kImplementationName; diff --git a/test/fake_encoder.cc b/test/fake_encoder.cc index 2959559910..814be280ec 100644 --- a/test/fake_encoder.cc +++ b/test/fake_encoder.cc @@ -67,19 +67,19 @@ void FakeEncoder::SetFecControllerOverride( void FakeEncoder::SetMaxBitrate(int max_kbps) { RTC_DCHECK_GE(max_kbps, -1); // max_kbps == -1 disables it. - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); max_target_bitrate_kbps_ = max_kbps; SetRatesLocked(current_rate_settings_); } void FakeEncoder::SetQp(int qp) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); qp_ = qp; } int32_t FakeEncoder::InitEncode(const VideoCodec* config, const Settings& settings) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); config_ = *config; current_rate_settings_.bitrate.SetBitrate(0, 0, config_.startBitrate * 1000); current_rate_settings_.framerate_fps = config_.maxFramerate; @@ -92,7 +92,7 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image, const std::vector* frame_types) { unsigned char max_framerate; unsigned char num_simulcast_streams; - SimulcastStream simulcast_streams[kMaxSimulcastStreams]; + SpatialLayer simulcast_streams[kMaxSimulcastStreams]; EncodedImageCallback* callback; RateControlParameters rates; VideoCodecMode mode; @@ -100,7 +100,7 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image, uint32_t counter; absl::optional qp; { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); max_framerate = config_.maxFramerate; num_simulcast_streams = config_.numberOfSimulcastStreams; for (int i = 0; i < num_simulcast_streams; ++i) { @@ -128,14 +128,15 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image, continue; } - EncodedImage encoded; - encoded.SetEncodedData( - EncodedImageBuffer::Create(frame_info.layers[i].size)); - + auto buffer = EncodedImageBuffer::Create(frame_info.layers[i].size); // Fill the buffer with arbitrary data. Write someting to make Asan happy. - memset(encoded.data(), 9, frame_info.layers[i].size); + memset(buffer->data(), 9, frame_info.layers[i].size); // Write a counter to the image to make each frame unique. - WriteCounter(encoded.data() + frame_info.layers[i].size - 4, counter); + WriteCounter(buffer->data() + frame_info.layers[i].size - 4, counter); + + EncodedImage encoded; + encoded.SetEncodedData(buffer); + encoded.SetTimestamp(input_image.timestamp()); encoded._frameType = frame_info.keyframe ? VideoFrameType::kVideoFrameKey : VideoFrameType::kVideoFrameDelta; @@ -144,23 +145,22 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image, if (qp) encoded.qp_ = *qp; encoded.SetSpatialIndex(i); - CodecSpecificInfo codec_specific; - std::unique_ptr fragmentation = - EncodeHook(&encoded, &codec_specific); + CodecSpecificInfo codec_specific = EncodeHook(encoded, buffer); - if (callback->OnEncodedImage(encoded, &codec_specific, fragmentation.get()) - .error != EncodedImageCallback::Result::OK) { + if (callback->OnEncodedImage(encoded, &codec_specific).error != + EncodedImageCallback::Result::OK) { return -1; } } return 0; } -std::unique_ptr FakeEncoder::EncodeHook( - EncodedImage* encoded_image, - CodecSpecificInfo* codec_specific) { - codec_specific->codecType = kVideoCodecGeneric; - return nullptr; +CodecSpecificInfo FakeEncoder::EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer) { + CodecSpecificInfo codec_specific; + codec_specific.codecType = kVideoCodecGeneric; + return codec_specific; } FakeEncoder::FrameInfo FakeEncoder::NextFrame( @@ -168,7 +168,7 @@ FakeEncoder::FrameInfo FakeEncoder::NextFrame( bool keyframe, uint8_t num_simulcast_streams, const VideoBitrateAllocation& target_bitrate, - SimulcastStream simulcast_streams[kMaxSimulcastStreams], + SpatialLayer simulcast_streams[kMaxSimulcastStreams], int framerate) { FrameInfo frame_info; frame_info.keyframe = keyframe; @@ -182,7 +182,7 @@ FakeEncoder::FrameInfo FakeEncoder::NextFrame( } } - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); for (uint8_t i = 0; i < num_simulcast_streams; ++i) { if (target_bitrate.GetBitrate(i, 0) > 0) { int temporal_id = last_frame_info_.layers.size() > i @@ -232,7 +232,7 @@ FakeEncoder::FrameInfo FakeEncoder::NextFrame( int32_t FakeEncoder::RegisterEncodeCompleteCallback( EncodedImageCallback* callback) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); callback_ = callback; return 0; } @@ -242,7 +242,7 @@ int32_t FakeEncoder::Release() { } void FakeEncoder::SetRates(const RateControlParameters& parameters) { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); SetRatesLocked(parameters); } @@ -276,74 +276,75 @@ const char* FakeEncoder::kImplementationName = "fake_encoder"; VideoEncoder::EncoderInfo FakeEncoder::GetEncoderInfo() const { EncoderInfo info; info.implementation_name = kImplementationName; + MutexLock lock(&mutex_); + for (int sid = 0; sid < config_.numberOfSimulcastStreams; ++sid) { + int number_of_temporal_layers = + config_.simulcastStream[sid].numberOfTemporalLayers; + info.fps_allocation[sid].clear(); + for (int tid = 0; tid < number_of_temporal_layers; ++tid) { + // {1/4, 1/2, 1} allocation for num layers = 3. + info.fps_allocation[sid].push_back(255 / + (number_of_temporal_layers - tid)); + } + } return info; } int FakeEncoder::GetConfiguredInputFramerate() const { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); return static_cast(current_rate_settings_.framerate_fps + 0.5); } FakeH264Encoder::FakeH264Encoder(Clock* clock) : FakeEncoder(clock), idr_counter_(0) {} -std::unique_ptr FakeH264Encoder::EncodeHook( - EncodedImage* encoded_image, - CodecSpecificInfo* codec_specific) { +CodecSpecificInfo FakeH264Encoder::EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer) { + static constexpr std::array kStartCode = {0, 0, 1}; const size_t kSpsSize = 8; const size_t kPpsSize = 11; const int kIdrFrequency = 10; int current_idr_counter; { - rtc::CritScope cs(&local_crit_sect_); + MutexLock lock(&local_mutex_); current_idr_counter = idr_counter_; ++idr_counter_; } - auto fragmentation = std::make_unique(); + for (size_t i = 0; i < encoded_image.size(); ++i) { + buffer->data()[i] = static_cast(i); + } if (current_idr_counter % kIdrFrequency == 0 && - encoded_image->size() > kSpsSize + kPpsSize + 1) { - const size_t kNumSlices = 3; - fragmentation->VerifyAndAllocateFragmentationHeader(kNumSlices); - fragmentation->fragmentationOffset[0] = 0; - fragmentation->fragmentationLength[0] = kSpsSize; - fragmentation->fragmentationOffset[1] = kSpsSize; - fragmentation->fragmentationLength[1] = kPpsSize; - fragmentation->fragmentationOffset[2] = kSpsSize + kPpsSize; - fragmentation->fragmentationLength[2] = - encoded_image->size() - (kSpsSize + kPpsSize); + encoded_image.size() > kSpsSize + kPpsSize + 1 + 3 * kStartCode.size()) { const size_t kSpsNalHeader = 0x67; const size_t kPpsNalHeader = 0x68; const size_t kIdrNalHeader = 0x65; - encoded_image->data()[fragmentation->fragmentationOffset[0]] = - kSpsNalHeader; - encoded_image->data()[fragmentation->fragmentationOffset[1]] = - kPpsNalHeader; - encoded_image->data()[fragmentation->fragmentationOffset[2]] = - kIdrNalHeader; + uint8_t* data = buffer->data(); + memcpy(data, kStartCode.data(), kStartCode.size()); + data += kStartCode.size(); + data[0] = kSpsNalHeader; + data += kSpsSize; + + memcpy(data, kStartCode.data(), kStartCode.size()); + data += kStartCode.size(); + data[0] = kPpsNalHeader; + data += kPpsSize; + + memcpy(data, kStartCode.data(), kStartCode.size()); + data += kStartCode.size(); + data[0] = kIdrNalHeader; } else { - const size_t kNumSlices = 1; - fragmentation->VerifyAndAllocateFragmentationHeader(kNumSlices); - fragmentation->fragmentationOffset[0] = 0; - fragmentation->fragmentationLength[0] = encoded_image->size(); + memcpy(buffer->data(), kStartCode.data(), kStartCode.size()); const size_t kNalHeader = 0x41; - encoded_image->data()[fragmentation->fragmentationOffset[0]] = kNalHeader; - } - uint8_t value = 0; - int fragment_counter = 0; - for (size_t i = 0; i < encoded_image->size(); ++i) { - if (fragment_counter == fragmentation->fragmentationVectorSize || - i != fragmentation->fragmentationOffset[fragment_counter]) { - encoded_image->data()[i] = value++; - } else { - ++fragment_counter; - } + buffer->data()[kStartCode.size()] = kNalHeader; } - codec_specific->codecType = kVideoCodecH264; - codec_specific->codecSpecific.H264.packetization_mode = - H264PacketizationMode::NonInterleaved; - return fragmentation; + CodecSpecificInfo codec_specific; + codec_specific.codecType = kVideoCodecH264; + codec_specific.codecSpecific.H264.packetization_mode = + H264PacketizationMode::NonInterleaved; + return codec_specific; } DelayedEncoder::DelayedEncoder(Clock* clock, int delay_ms) diff --git a/test/fake_encoder.h b/test/fake_encoder.h index ade0e35560..9feed1455f 100644 --- a/test/fake_encoder.h +++ b/test/fake_encoder.h @@ -18,16 +18,15 @@ #include #include "api/fec_controller_override.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" #include "api/video/encoded_image.h" #include "api/video/video_bitrate_allocation.h" #include "api/video/video_frame.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" @@ -40,23 +39,23 @@ class FakeEncoder : public VideoEncoder { virtual ~FakeEncoder() = default; // Sets max bitrate. Not thread-safe, call before registering the encoder. - void SetMaxBitrate(int max_kbps) RTC_LOCKS_EXCLUDED(crit_sect_); - void SetQp(int qp) RTC_LOCKS_EXCLUDED(crit_sect_); + void SetMaxBitrate(int max_kbps) RTC_LOCKS_EXCLUDED(mutex_); + void SetQp(int qp) RTC_LOCKS_EXCLUDED(mutex_); void SetFecControllerOverride( FecControllerOverride* fec_controller_override) override; int32_t InitEncode(const VideoCodec* config, const Settings& settings) - RTC_LOCKS_EXCLUDED(crit_sect_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; int32_t Encode(const VideoFrame& input_image, const std::vector* frame_types) - RTC_LOCKS_EXCLUDED(crit_sect_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; int32_t RegisterEncodeCompleteCallback(EncodedImageCallback* callback) - RTC_LOCKS_EXCLUDED(crit_sect_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; int32_t Release() override; void SetRates(const RateControlParameters& parameters) - RTC_LOCKS_EXCLUDED(crit_sect_) override; - int GetConfiguredInputFramerate() const RTC_LOCKS_EXCLUDED(crit_sect_); + RTC_LOCKS_EXCLUDED(mutex_) override; + int GetConfiguredInputFramerate() const RTC_LOCKS_EXCLUDED(mutex_); EncoderInfo GetEncoderInfo() const override; static const char* kImplementationName; @@ -80,31 +79,31 @@ class FakeEncoder : public VideoEncoder { bool keyframe, uint8_t num_simulcast_streams, const VideoBitrateAllocation& target_bitrate, - SimulcastStream simulcast_streams[kMaxSimulcastStreams], - int framerate) RTC_LOCKS_EXCLUDED(crit_sect_); + SpatialLayer simulcast_streams[kMaxSimulcastStreams], + int framerate) RTC_LOCKS_EXCLUDED(mutex_); // Called before the frame is passed to callback_->OnEncodedImage, to let - // subclasses fill out codec_specific, possibly modify encodedImage. - // Returns an RTPFragmentationHeader, if needed by the codec. - virtual std::unique_ptr EncodeHook( - EncodedImage* encoded_image, - CodecSpecificInfo* codec_specific); + // subclasses fill out CodecSpecificInfo, possibly modify |encoded_image| or + // |buffer|. + virtual CodecSpecificInfo EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer); void SetRatesLocked(const RateControlParameters& parameters) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_sect_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - FrameInfo last_frame_info_ RTC_GUARDED_BY(crit_sect_); + FrameInfo last_frame_info_ RTC_GUARDED_BY(mutex_); Clock* const clock_; - VideoCodec config_ RTC_GUARDED_BY(crit_sect_); - EncodedImageCallback* callback_ RTC_GUARDED_BY(crit_sect_); - RateControlParameters current_rate_settings_ RTC_GUARDED_BY(crit_sect_); - int max_target_bitrate_kbps_ RTC_GUARDED_BY(crit_sect_); - bool pending_keyframe_ RTC_GUARDED_BY(crit_sect_); - uint32_t counter_ RTC_GUARDED_BY(crit_sect_); - rtc::CriticalSection crit_sect_; + VideoCodec config_ RTC_GUARDED_BY(mutex_); + EncodedImageCallback* callback_ RTC_GUARDED_BY(mutex_); + RateControlParameters current_rate_settings_ RTC_GUARDED_BY(mutex_); + int max_target_bitrate_kbps_ RTC_GUARDED_BY(mutex_); + bool pending_keyframe_ RTC_GUARDED_BY(mutex_); + uint32_t counter_ RTC_GUARDED_BY(mutex_); + mutable Mutex mutex_; bool used_layers_[kMaxSimulcastStreams]; - absl::optional qp_ RTC_GUARDED_BY(crit_sect_); + absl::optional qp_ RTC_GUARDED_BY(mutex_); // Current byte debt to be payed over a number of frames. // The debt is acquired by keyframes overshooting the bitrate target. @@ -117,12 +116,12 @@ class FakeH264Encoder : public FakeEncoder { virtual ~FakeH264Encoder() = default; private: - std::unique_ptr EncodeHook( - EncodedImage* encoded_image, - CodecSpecificInfo* codec_specific) override; + CodecSpecificInfo EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer) override; - int idr_counter_ RTC_GUARDED_BY(local_crit_sect_); - rtc::CriticalSection local_crit_sect_; + int idr_counter_ RTC_GUARDED_BY(local_mutex_); + Mutex local_mutex_; }; class DelayedEncoder : public test::FakeEncoder { diff --git a/test/fake_texture_frame.cc b/test/fake_texture_frame.cc index 4fa5e9d242..3f155184ab 100644 --- a/test/fake_texture_frame.cc +++ b/test/fake_texture_frame.cc @@ -23,7 +23,7 @@ VideoFrame FakeNativeBuffer::CreateFrame(int width, VideoRotation rotation) { return VideoFrame::Builder() .set_video_frame_buffer( - new rtc::RefCountedObject(width, height)) + rtc::make_ref_counted(width, height)) .set_timestamp_rtp(timestamp) .set_timestamp_ms(render_time_ms) .set_rotation(rotation) diff --git a/test/fake_vp8_decoder.cc b/test/fake_vp8_decoder.cc index faaa554259..ec636dca11 100644 --- a/test/fake_vp8_decoder.cc +++ b/test/fake_vp8_decoder.cc @@ -79,6 +79,13 @@ int32_t FakeVp8Decoder::Release() { } const char* FakeVp8Decoder::kImplementationName = "fake_vp8_decoder"; +VideoDecoder::DecoderInfo FakeVp8Decoder::GetDecoderInfo() const { + DecoderInfo info; + info.implementation_name = kImplementationName; + info.is_hardware_accelerated = false; + return info; +} + const char* FakeVp8Decoder::ImplementationName() const { return kImplementationName; } diff --git a/test/fake_vp8_decoder.h b/test/fake_vp8_decoder.h index 4f0fa3d8a9..2e469a17f3 100644 --- a/test/fake_vp8_decoder.h +++ b/test/fake_vp8_decoder.h @@ -38,8 +38,8 @@ class FakeVp8Decoder : public VideoDecoder { int32_t Release() override; + DecoderInfo GetDecoderInfo() const override; const char* ImplementationName() const override; - static const char* kImplementationName; private: diff --git a/test/fake_vp8_encoder.cc b/test/fake_vp8_encoder.cc index 8397e5f544..a24fab81bb 100644 --- a/test/fake_vp8_encoder.cc +++ b/test/fake_vp8_encoder.cc @@ -70,46 +70,59 @@ int32_t FakeVp8Encoder::Release() { return result; } -void FakeVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific, - size_t size_bytes, - VideoFrameType frame_type, - int stream_idx, - uint32_t timestamp) { +CodecSpecificInfo FakeVp8Encoder::PopulateCodecSpecific( + size_t size_bytes, + VideoFrameType frame_type, + int stream_idx, + uint32_t timestamp) { RTC_DCHECK_RUN_ON(&sequence_checker_); - codec_specific->codecType = kVideoCodecVP8; - codec_specific->codecSpecific.VP8.keyIdx = kNoKeyIdx; - codec_specific->codecSpecific.VP8.nonReference = false; + CodecSpecificInfo codec_specific; + codec_specific.codecType = kVideoCodecVP8; + codec_specific.codecSpecific.VP8.keyIdx = kNoKeyIdx; + codec_specific.codecSpecific.VP8.nonReference = false; if (size_bytes > 0) { frame_buffer_controller_->OnEncodeDone( stream_idx, timestamp, size_bytes, - frame_type == VideoFrameType::kVideoFrameKey, -1, codec_specific); + frame_type == VideoFrameType::kVideoFrameKey, -1, &codec_specific); } else { frame_buffer_controller_->OnFrameDropped(stream_idx, timestamp); } + return codec_specific; } -std::unique_ptr FakeVp8Encoder::EncodeHook( - EncodedImage* encoded_image, - CodecSpecificInfo* codec_specific) { +CodecSpecificInfo FakeVp8Encoder::EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer) { RTC_DCHECK_RUN_ON(&sequence_checker_); - uint8_t stream_idx = encoded_image->SpatialIndex().value_or(0); + uint8_t stream_idx = encoded_image.SpatialIndex().value_or(0); frame_buffer_controller_->NextFrameConfig(stream_idx, - encoded_image->Timestamp()); - PopulateCodecSpecific(codec_specific, encoded_image->size(), - encoded_image->_frameType, stream_idx, - encoded_image->Timestamp()); + encoded_image.Timestamp()); + CodecSpecificInfo codec_specific = + PopulateCodecSpecific(encoded_image.size(), encoded_image._frameType, + stream_idx, encoded_image.Timestamp()); // Write width and height to the payload the same way as the real encoder // does. - WriteFakeVp8(encoded_image->data(), encoded_image->_encodedWidth, - encoded_image->_encodedHeight, - encoded_image->_frameType == VideoFrameType::kVideoFrameKey); - return nullptr; + WriteFakeVp8(buffer->data(), encoded_image._encodedWidth, + encoded_image._encodedHeight, + encoded_image._frameType == VideoFrameType::kVideoFrameKey); + return codec_specific; } VideoEncoder::EncoderInfo FakeVp8Encoder::GetEncoderInfo() const { EncoderInfo info; info.implementation_name = "FakeVp8Encoder"; + MutexLock lock(&mutex_); + for (int sid = 0; sid < config_.numberOfSimulcastStreams; ++sid) { + int number_of_temporal_layers = + config_.simulcastStream[sid].numberOfTemporalLayers; + info.fps_allocation[sid].clear(); + for (int tid = 0; tid < number_of_temporal_layers; ++tid) { + // {1/4, 1/2, 1} allocation for num layers = 3. + info.fps_allocation[sid].push_back(255 / + (number_of_temporal_layers - tid)); + } + } return info; } diff --git a/test/fake_vp8_encoder.h b/test/fake_vp8_encoder.h index f1be8d4599..6aaf547379 100644 --- a/test/fake_vp8_encoder.h +++ b/test/fake_vp8_encoder.h @@ -17,14 +17,13 @@ #include #include "api/fec_controller_override.h" +#include "api/sequence_checker.h" #include "api/video/encoded_image.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/vp8_frame_buffer_controller.h" #include "api/video_codecs/vp8_temporal_layers.h" -#include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "rtc_base/synchronization/sequence_checker.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" #include "test/fake_encoder.h" @@ -45,15 +44,14 @@ class FakeVp8Encoder : public FakeEncoder { EncoderInfo GetEncoderInfo() const override; private: - void PopulateCodecSpecific(CodecSpecificInfo* codec_specific, - size_t size_bytes, - VideoFrameType frame_type, - int stream_idx, - uint32_t timestamp); - - std::unique_ptr EncodeHook( - EncodedImage* encoded_image, - CodecSpecificInfo* codec_specific) override; + CodecSpecificInfo PopulateCodecSpecific(size_t size_bytes, + VideoFrameType frame_type, + int stream_idx, + uint32_t timestamp); + + CodecSpecificInfo EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer) override; SequenceChecker sequence_checker_; diff --git a/test/fake_vp8_encoder_unittest.cc b/test/fake_vp8_encoder_unittest.cc index d35083f0d8..e79e8e421b 100644 --- a/test/fake_vp8_encoder_unittest.cc +++ b/test/fake_vp8_encoder_unittest.cc @@ -104,5 +104,11 @@ TEST(TestFakeVp8Codec, TestDecodeWidthHeightSet) { fixture->TestDecodeWidthHeightSet(); } +TEST(TestFakeVp8Codec, + TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation) { + auto fixture = CreateSpecificSimulcastTestFixture(); + fixture->TestEncoderInfoForDefaultTemporalLayerProfileHasFpsAllocation(); +} + } // namespace test } // namespace webrtc diff --git a/test/frame_forwarder.cc b/test/frame_forwarder.cc index d1a2ddb1c2..e89f753bd3 100644 --- a/test/frame_forwarder.cc +++ b/test/frame_forwarder.cc @@ -18,32 +18,42 @@ FrameForwarder::FrameForwarder() : sink_(nullptr) {} FrameForwarder::~FrameForwarder() {} void FrameForwarder::IncomingCapturedFrame(const VideoFrame& video_frame) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (sink_) sink_->OnFrame(video_frame); } void FrameForwarder::AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); + AddOrUpdateSinkLocked(sink, wants); +} + +void FrameForwarder::AddOrUpdateSinkLocked( + rtc::VideoSinkInterface* sink, + const rtc::VideoSinkWants& wants) { RTC_DCHECK(!sink_ || sink_ == sink); sink_ = sink; sink_wants_ = wants; } void FrameForwarder::RemoveSink(rtc::VideoSinkInterface* sink) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RTC_DCHECK_EQ(sink, sink_); sink_ = nullptr; } rtc::VideoSinkWants FrameForwarder::sink_wants() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); + return sink_wants_; +} + +rtc::VideoSinkWants FrameForwarder::sink_wants_locked() const { return sink_wants_; } bool FrameForwarder::has_sinks() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return sink_ != nullptr; } diff --git a/test/frame_forwarder.h b/test/frame_forwarder.h index cf29f5f074..bbf11cc939 100644 --- a/test/frame_forwarder.h +++ b/test/frame_forwarder.h @@ -12,7 +12,7 @@ #include "api/video/video_frame.h" #include "api/video/video_source_interface.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { namespace test { @@ -26,18 +26,26 @@ class FrameForwarder : public rtc::VideoSourceInterface { FrameForwarder(); ~FrameForwarder() override; // Forwards |video_frame| to the registered |sink_|. - virtual void IncomingCapturedFrame(const VideoFrame& video_frame); - rtc::VideoSinkWants sink_wants() const; - bool has_sinks() const; + virtual void IncomingCapturedFrame(const VideoFrame& video_frame) + RTC_LOCKS_EXCLUDED(mutex_); + rtc::VideoSinkWants sink_wants() const RTC_LOCKS_EXCLUDED(mutex_); + bool has_sinks() const RTC_LOCKS_EXCLUDED(mutex_); protected: + rtc::VideoSinkWants sink_wants_locked() const + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void AddOrUpdateSink(rtc::VideoSinkInterface* sink, - const rtc::VideoSinkWants& wants) override; - void RemoveSink(rtc::VideoSinkInterface* sink) override; + const rtc::VideoSinkWants& wants) + RTC_LOCKS_EXCLUDED(mutex_) override; + void AddOrUpdateSinkLocked(rtc::VideoSinkInterface* sink, + const rtc::VideoSinkWants& wants) + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void RemoveSink(rtc::VideoSinkInterface* sink) + RTC_LOCKS_EXCLUDED(mutex_) override; - rtc::CriticalSection crit_; - rtc::VideoSinkInterface* sink_ RTC_GUARDED_BY(crit_); - rtc::VideoSinkWants sink_wants_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + rtc::VideoSinkInterface* sink_ RTC_GUARDED_BY(mutex_); + rtc::VideoSinkWants sink_wants_ RTC_GUARDED_BY(mutex_); }; } // namespace test diff --git a/test/frame_generator.cc b/test/frame_generator.cc index 1f998427ac..913a4fb589 100644 --- a/test/frame_generator.cc +++ b/test/frame_generator.cc @@ -16,23 +16,15 @@ #include #include "api/video/i010_buffer.h" +#include "api/video/nv12_buffer.h" #include "api/video/video_rotation.h" #include "common_video/include/video_frame_buffer.h" #include "common_video/libyuv/include/webrtc_libyuv.h" -#include "rtc_base/bind.h" #include "rtc_base/checks.h" -#include "rtc_base/keep_ref_until_done.h" #include "test/frame_utils.h" namespace webrtc { namespace test { -namespace { - -// Helper method for keeping a reference to passed pointers. -void KeepBufferRefs(rtc::scoped_refptr, - rtc::scoped_refptr) {} - -} // namespace SquareGenerator::SquareGenerator(int width, int height, @@ -46,7 +38,7 @@ SquareGenerator::SquareGenerator(int width, } void SquareGenerator::ChangeResolution(size_t width, size_t height) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); width_ = static_cast(width); height_ = static_cast(height); RTC_CHECK(width_ > 0); @@ -65,12 +57,13 @@ rtc::scoped_refptr SquareGenerator::CreateI420Buffer(int width, } FrameGeneratorInterface::VideoFrameData SquareGenerator::NextFrame() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); rtc::scoped_refptr buffer = nullptr; switch (type_) { case OutputType::kI420: - case OutputType::kI010: { + case OutputType::kI010: + case OutputType::kNV12: { buffer = CreateI420Buffer(width_, height_); break; } @@ -79,12 +72,13 @@ FrameGeneratorInterface::VideoFrameData SquareGenerator::NextFrame() { CreateI420Buffer(width_, height_); rtc::scoped_refptr axx_buffer = CreateI420Buffer(width_, height_); - buffer = WrapI420ABuffer( - yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(), - yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(), - yuv_buffer->DataV(), yuv_buffer->StrideV(), axx_buffer->DataY(), - axx_buffer->StrideY(), - rtc::Bind(&KeepBufferRefs, yuv_buffer, axx_buffer)); + buffer = WrapI420ABuffer(yuv_buffer->width(), yuv_buffer->height(), + yuv_buffer->DataY(), yuv_buffer->StrideY(), + yuv_buffer->DataU(), yuv_buffer->StrideU(), + yuv_buffer->DataV(), yuv_buffer->StrideV(), + axx_buffer->DataY(), axx_buffer->StrideY(), + // To keep references alive. + [yuv_buffer, axx_buffer] {}); break; } default: @@ -96,6 +90,8 @@ FrameGeneratorInterface::VideoFrameData SquareGenerator::NextFrame() { if (type_ == OutputType::kI010) { buffer = I010Buffer::Copy(*buffer->ToI420()); + } else if (type_ == OutputType::kNV12) { + buffer = NV12Buffer::Copy(*buffer->ToI420()); } return VideoFrameData(buffer, absl::nullopt); @@ -371,7 +367,8 @@ void ScrollingImageFrameGenerator::CropSourceToScrolledImage( &i420_buffer->DataY()[offset_y], i420_buffer->StrideY(), &i420_buffer->DataU()[offset_u], i420_buffer->StrideU(), &i420_buffer->DataV()[offset_v], i420_buffer->StrideV(), - KeepRefUntilDone(i420_buffer)), + // To keep reference alive. + [i420_buffer] {}), update_rect); } diff --git a/test/frame_generator.h b/test/frame_generator.h index 6f59c1ed0b..94e15cb0de 100644 --- a/test/frame_generator.h +++ b/test/frame_generator.h @@ -20,8 +20,8 @@ #include "api/video/video_frame.h" #include "api/video/video_frame_buffer.h" #include "api/video/video_source_interface.h" -#include "rtc_base/critical_section.h" #include "rtc_base/random.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -57,11 +57,11 @@ class SquareGenerator : public FrameGeneratorInterface { const uint8_t yuv_a_; }; - rtc::CriticalSection crit_; + Mutex mutex_; const OutputType type_; - int width_ RTC_GUARDED_BY(&crit_); - int height_ RTC_GUARDED_BY(&crit_); - std::vector> squares_ RTC_GUARDED_BY(&crit_); + int width_ RTC_GUARDED_BY(&mutex_); + int height_ RTC_GUARDED_BY(&mutex_); + std::vector> squares_ RTC_GUARDED_BY(&mutex_); }; class YuvFileGenerator : public FrameGeneratorInterface { diff --git a/test/frame_generator_capturer.cc b/test/frame_generator_capturer.cc index 9806c83d83..a4c528e0f7 100644 --- a/test/frame_generator_capturer.cc +++ b/test/frame_generator_capturer.cc @@ -20,7 +20,6 @@ #include "absl/strings/match.h" #include "api/test/create_frame_generator.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" #include "rtc_base/task_queue.h" #include "rtc_base/time_utils.h" @@ -32,7 +31,7 @@ namespace test { namespace { std::string TransformFilePath(std::string path) { static const std::string resource_prefix = "res://"; - int ext_pos = path.rfind("."); + int ext_pos = path.rfind('.'); if (ext_pos < 0) { return test::ResourcePath(path, "yuv"); } else if (absl::StartsWith(path, resource_prefix)) { @@ -150,13 +149,13 @@ std::unique_ptr FrameGeneratorCapturer::Create( } void FrameGeneratorCapturer::SetFakeRotation(VideoRotation rotation) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); fake_rotation_ = rotation; } void FrameGeneratorCapturer::SetFakeColorSpace( absl::optional color_space) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); fake_color_space_ = color_space; } @@ -176,7 +175,7 @@ bool FrameGeneratorCapturer::Init() { } void FrameGeneratorCapturer::InsertFrame() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (sending_) { FrameGeneratorInterface::VideoFrameData frame_data = frame_generator_->NextFrame(); @@ -205,7 +204,7 @@ void FrameGeneratorCapturer::InsertFrame() { void FrameGeneratorCapturer::Start() { { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); sending_ = true; } if (!frame_task_.Running()) { @@ -217,17 +216,17 @@ void FrameGeneratorCapturer::Start() { } void FrameGeneratorCapturer::Stop() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); sending_ = false; } void FrameGeneratorCapturer::ChangeResolution(size_t width, size_t height) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); frame_generator_->ChangeResolution(width, height); } void FrameGeneratorCapturer::ChangeFramerate(int target_framerate) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); RTC_CHECK(target_capture_fps_ > 0); if (target_framerate > source_fps_) RTC_LOG(LS_WARNING) << "Target framerate clamped from " << target_framerate @@ -245,7 +244,7 @@ void FrameGeneratorCapturer::ChangeFramerate(int target_framerate) { } void FrameGeneratorCapturer::SetSinkWantsObserver(SinkWantsObserver* observer) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); RTC_DCHECK(!sink_wants_observer_); sink_wants_observer_ = observer; } @@ -254,7 +253,7 @@ void FrameGeneratorCapturer::AddOrUpdateSink( rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) { TestVideoCapturer::AddOrUpdateSink(sink, wants); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (sink_wants_observer_) { // Tests need to observe unmodified sink wants. sink_wants_observer_->OnSinkWantsChanged(sink, wants); @@ -266,7 +265,7 @@ void FrameGeneratorCapturer::RemoveSink( rtc::VideoSinkInterface* sink) { TestVideoCapturer::RemoveSink(sink); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); UpdateFps(GetSinkWants().max_framerate_fps); } @@ -284,7 +283,7 @@ void FrameGeneratorCapturer::ForceFrame() { } int FrameGeneratorCapturer::GetCurrentConfiguredFramerate() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (wanted_fps_ && *wanted_fps_ < target_capture_fps_) return *wanted_fps_; return target_capture_fps_; diff --git a/test/frame_generator_capturer.h b/test/frame_generator_capturer.h index fd376e2d6f..1e915fca21 100644 --- a/test/frame_generator_capturer.h +++ b/test/frame_generator_capturer.h @@ -16,7 +16,7 @@ #include "api/task_queue/task_queue_factory.h" #include "api/test/frame_generator_interface.h" #include "api/video/video_frame.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" #include "rtc_base/task_utils/repeating_task.h" #include "system_wrappers/include/clock.h" @@ -157,7 +157,7 @@ class FrameGeneratorCapturer : public TestVideoCapturer { bool sending_; SinkWantsObserver* sink_wants_observer_ RTC_GUARDED_BY(&lock_); - rtc::CriticalSection lock_; + Mutex lock_; std::unique_ptr frame_generator_; int source_fps_ RTC_GUARDED_BY(&lock_); diff --git a/test/frame_generator_unittest.cc b/test/frame_generator_unittest.cc index 12d5111bff..8e5cde8c5f 100644 --- a/test/frame_generator_unittest.cc +++ b/test/frame_generator_unittest.cc @@ -54,7 +54,7 @@ class FrameGeneratorTest : public ::testing::Test { protected: void WriteYuvFile(FILE* file, uint8_t y, uint8_t u, uint8_t v) { - assert(file); + RTC_DCHECK(file); std::unique_ptr plane_buffer(new uint8_t[y_size]); memset(plane_buffer.get(), y, y_size); fwrite(plane_buffer.get(), 1, y_size, file); diff --git a/test/fuzzers/BUILD.gn b/test/fuzzers/BUILD.gn index 96376a2e83..9824bebb5f 100644 --- a/test/fuzzers/BUILD.gn +++ b/test/fuzzers/BUILD.gn @@ -18,11 +18,12 @@ rtc_library("webrtc_fuzzer_main") { ] # When WebRTC fuzzer tests are built on Chromium bots they need to link - # with Chromium's implementation of metrics and field trial. + # with Chromium's implementation of metrics, field trial, and system time. if (build_with_chromium) { deps += [ "../../../webrtc_overrides:field_trial", "../../../webrtc_overrides:metrics", + "../../../webrtc_overrides:system_time", ] } } @@ -40,6 +41,10 @@ rtc_library("fuzz_data_helper") { visibility = [ ":*" ] # Only targets in this file can depend on this. } +set_defaults("webrtc_fuzzer_test") { + absl_deps = [] +} + template("webrtc_fuzzer_test") { fuzzer_test(target_name) { forward_variables_from(invoker, "*") @@ -47,6 +52,21 @@ template("webrtc_fuzzer_test") { ":fuzz_data_helper", ":webrtc_fuzzer_main", ] + + # If absl_deps is [], no action is needed. If not [], then it needs to be + # converted to //third_party/abseil-cpp:absl when build_with_chromium=true + # otherwise it just needs to be added to deps. + if (absl_deps != []) { + if (!defined(deps)) { + deps = [] + } + if (build_with_chromium) { + deps += [ "//third_party/abseil-cpp:absl" ] + } else { + deps += absl_deps + } + } + if (!build_with_chromium && is_clang) { suppressed_configs = [ "//build/config/clang:find_bad_constructs" ] } @@ -194,10 +214,8 @@ webrtc_fuzzer_test("rtcp_receiver_fuzzer") { webrtc_fuzzer_test("rtp_packet_fuzzer") { sources = [ "rtp_packet_fuzzer.cc" ] - deps = [ - "../../modules/rtp_rtcp:rtp_rtcp_format", - "//third_party/abseil-cpp/absl/types:optional", - ] + deps = [ "../../modules/rtp_rtcp:rtp_rtcp_format" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] seed_corpus = "corpora/rtp-corpus" } @@ -227,6 +245,7 @@ webrtc_fuzzer_test("congestion_controller_feedback_fuzzer") { "../../modules/remote_bitrate_estimator", "../../modules/rtp_rtcp:rtp_rtcp_format", ] + absl_deps = [ "//third_party/abseil-cpp/absl/functional:bind_front" ] } rtc_library("audio_decoder_fuzzer") { @@ -240,8 +259,8 @@ rtc_library("audio_decoder_fuzzer") { "../../modules/rtp_rtcp:rtp_rtcp_format", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } webrtc_fuzzer_test("audio_decoder_ilbc_fuzzer") { @@ -318,7 +337,7 @@ webrtc_fuzzer_test("audio_encoder_opus_fuzzer") { } webrtc_fuzzer_test("audio_encoder_isac_fixed_fuzzer") { - sources = [ "audio_encoder_isax_fixed_fuzzer.cc" ] + sources = [ "audio_encoder_isac_fixed_fuzzer.cc" ] deps = [ ":audio_encoder_fuzzer", "../../api/audio_codecs/isac:audio_encoder_isac_fix", @@ -327,7 +346,7 @@ webrtc_fuzzer_test("audio_encoder_isac_fixed_fuzzer") { } webrtc_fuzzer_test("audio_encoder_isac_float_fuzzer") { - sources = [ "audio_encoder_isax_float_fuzzer.cc" ] + sources = [ "audio_encoder_isac_float_fuzzer.cc" ] deps = [ ":audio_encoder_fuzzer", "../../api/audio_codecs/isac:audio_encoder_isac_float", @@ -388,6 +407,23 @@ webrtc_fuzzer_test("sdp_parser_fuzzer") { seed_corpus = "corpora/sdp-corpus" } +if (!build_with_chromium) { + # This target depends on test infrastructure that can't be built + # with Chromium at the moment. + # TODO(bugs.chromium.org/12534): Make this fuzzer build in Chromium. + + webrtc_fuzzer_test("sdp_integration_fuzzer") { + sources = [ "sdp_integration_fuzzer.cc" ] + deps = [ + "../../api:libjingle_peerconnection_api", + "../../pc:integration_test_helpers", + "../../pc:libjingle_peerconnection", + "../../test:test_support", + ] + seed_corpus = "corpora/sdp-corpus" + } +} + webrtc_fuzzer_test("stun_parser_fuzzer") { sources = [ "stun_parser_fuzzer.cc" ] deps = [ @@ -408,20 +444,12 @@ webrtc_fuzzer_test("stun_validator_fuzzer") { dict = "corpora/stun.tokens" } -webrtc_fuzzer_test("mdns_parser_fuzzer") { - sources = [ "mdns_parser_fuzzer.cc" ] - deps = [ - "../../p2p:rtc_p2p", - "../../rtc_base:rtc_base_approved", - ] - seed_corpus = "corpora/mdns-corpus" -} - webrtc_fuzzer_test("pseudotcp_parser_fuzzer") { sources = [ "pseudotcp_parser_fuzzer.cc" ] deps = [ "../../p2p:rtc_p2p", "../../rtc_base", + "../../rtc_base:threading", ] } @@ -439,8 +467,8 @@ rtc_library("audio_processing_fuzzer_helper") { "../../modules/audio_processing:audio_frame_proxies", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } webrtc_fuzzer_test("audio_processing_fuzzer") { @@ -461,8 +489,8 @@ webrtc_fuzzer_test("audio_processing_fuzzer") { "../../rtc_base:rtc_task_queue", "../../rtc_base:safe_minmax", "../../system_wrappers:field_trial", - "//third_party/abseil-cpp/absl/memory", ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] seed_corpus = "corpora/audio_processing-corpus" } @@ -567,15 +595,6 @@ webrtc_fuzzer_test("string_to_number_fuzzer") { seed_corpus = "corpora/string_to_number-corpus" } -webrtc_fuzzer_test("rtp_rtcp_demuxer_helper_fuzzer") { - sources = [ "rtp_rtcp_demuxer_helper_fuzzer.cc" ] - deps = [ - "../../api:array_view", - "../../call:rtp_receiver", - ] - seed_corpus = "corpora/rtcp-corpus" -} - webrtc_fuzzer_test("sctp_utils_fuzzer") { sources = [ "sctp_utils_fuzzer.cc" ] deps = [ @@ -586,14 +605,29 @@ webrtc_fuzzer_test("sctp_utils_fuzzer") { ] } +webrtc_fuzzer_test("dcsctp_socket_fuzzer") { + sources = [ "dcsctp_socket_fuzzer.cc" ] + deps = [ + "../../net/dcsctp/fuzzers:dcsctp_fuzzers", + "../../net/dcsctp/public:socket", + "../../net/dcsctp/public:types", + "../../net/dcsctp/socket:dcsctp_socket", + "../../rtc_base:rtc_base_approved", + ] +} + webrtc_fuzzer_test("rtp_header_parser_fuzzer") { sources = [ "rtp_header_parser_fuzzer.cc" ] deps = [ "../:rtp_test_utils" ] } webrtc_fuzzer_test("ssl_certificate_fuzzer") { - sources = [ "rtp_header_parser_fuzzer.cc" ] - deps = [ "../:rtp_test_utils" ] + sources = [ "ssl_certificate_fuzzer.cc" ] + deps = [ + "../:rtp_test_utils", + "../../rtc_base", + "../../rtc_base:stringutils", + ] } webrtc_fuzzer_test("vp8_replay_fuzzer") { @@ -605,6 +639,30 @@ webrtc_fuzzer_test("vp8_replay_fuzzer") { seed_corpus = "corpora/rtpdump-corpus/vp8" } +if (rtc_build_libvpx) { + webrtc_fuzzer_test("vp9_encoder_references_fuzzer") { + sources = [ "vp9_encoder_references_fuzzer.cc" ] + deps = [ + "..:test_support", + "../../api:array_view", + "../../api/transport:webrtc_key_value_config", + "../../api/video:video_frame", + "../../api/video_codecs:video_codecs_api", + "../../modules/video_coding:frame_dependencies_calculator", + "../../modules/video_coding:mock_libvpx_interface", + "../../modules/video_coding:webrtc_vp9", + "../../rtc_base:safe_compare", + rtc_libvpx_dir, + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/base:core_headers", + "//third_party/abseil-cpp/absl/container:inlined_vector", + ] + defines = [ "RTC_ENABLE_VP9" ] + } +} + webrtc_fuzzer_test("vp9_replay_fuzzer") { sources = [ "vp9_replay_fuzzer.cc" ] deps = [ diff --git a/test/fuzzers/DEPS b/test/fuzzers/DEPS index 82631c4a1b..50b1c8adce 100644 --- a/test/fuzzers/DEPS +++ b/test/fuzzers/DEPS @@ -1,4 +1,5 @@ include_rules = [ "+audio", "+pc", + "+net/dcsctp", ] diff --git a/test/fuzzers/audio_encoder_isax_fixed_fuzzer.cc b/test/fuzzers/audio_encoder_isac_fixed_fuzzer.cc similarity index 100% rename from test/fuzzers/audio_encoder_isax_fixed_fuzzer.cc rename to test/fuzzers/audio_encoder_isac_fixed_fuzzer.cc diff --git a/test/fuzzers/audio_encoder_isax_float_fuzzer.cc b/test/fuzzers/audio_encoder_isac_float_fuzzer.cc similarity index 100% rename from test/fuzzers/audio_encoder_isax_float_fuzzer.cc rename to test/fuzzers/audio_encoder_isac_float_fuzzer.cc diff --git a/test/fuzzers/congestion_controller_feedback_fuzzer.cc b/test/fuzzers/congestion_controller_feedback_fuzzer.cc index 084c8c300a..06a73b0434 100644 --- a/test/fuzzers/congestion_controller_feedback_fuzzer.cc +++ b/test/fuzzers/congestion_controller_feedback_fuzzer.cc @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "absl/functional/bind_front.h" #include "modules/congestion_controller/include/receive_side_congestion_controller.h" #include "modules/pacing/packet_router.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" @@ -21,7 +22,10 @@ void FuzzOneInput(const uint8_t* data, size_t size) { return; SimulatedClock clock(data[i++]); PacketRouter packet_router; - ReceiveSideCongestionController cc(&clock, &packet_router); + ReceiveSideCongestionController cc( + &clock, + absl::bind_front(&PacketRouter::SendCombinedRtcpPacket, &packet_router), + absl::bind_front(&PacketRouter::SendRemb, &packet_router), nullptr); RemoteBitrateEstimator* rbe = cc.GetRemoteBitrateEstimator(true); RTPHeader header; header.ssrc = ByteReader::ReadBigEndian(&data[i]); diff --git a/test/fuzzers/corpora/README b/test/fuzzers/corpora/README index d29e169417..cc87025ff6 100644 --- a/test/fuzzers/corpora/README +++ b/test/fuzzers/corpora/README @@ -31,4 +31,7 @@ which header extensions to enable, and the first byte of the fuzz data is used for this. ### PseudoTCP ### -Very small corpus minimised from the unit tests. \ No newline at end of file +Very small corpus minimised from the unit tests. + +### SCTP ### +This corpus was extracted from a few manually recorder wireshark dumps. diff --git a/test/fuzzers/corpora/sctp-packet-corpus/cookie-ack-sack.bin b/test/fuzzers/corpora/sctp-packet-corpus/cookie-ack-sack.bin new file mode 100644 index 0000000000..4374f5aad5 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/cookie-ack-sack.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data-data-data.bin b/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data-data-data.bin new file mode 100644 index 0000000000..1f1d0be301 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data-data-data.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data-data.bin b/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data-data.bin new file mode 100644 index 0000000000..21a0c22837 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data-data.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data.bin b/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data.bin new file mode 100644 index 0000000000..fc8600106e Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/cookie-echo-data.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/data-fragment1.bin b/test/fuzzers/corpora/sctp-packet-corpus/data-fragment1.bin new file mode 100644 index 0000000000..bec7b289e7 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/data-fragment1.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/forward-tsn.bin b/test/fuzzers/corpora/sctp-packet-corpus/forward-tsn.bin new file mode 100644 index 0000000000..ab98a0a4a7 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/forward-tsn.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/heartbeat-ack.bin b/test/fuzzers/corpora/sctp-packet-corpus/heartbeat-ack.bin new file mode 100644 index 0000000000..59200abe5e Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/heartbeat-ack.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/heartbeat.bin b/test/fuzzers/corpora/sctp-packet-corpus/heartbeat.bin new file mode 100644 index 0000000000..cef8cfe929 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/heartbeat.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/init-ack.bin b/test/fuzzers/corpora/sctp-packet-corpus/init-ack.bin new file mode 100644 index 0000000000..80438434d0 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/init-ack.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/init.bin b/test/fuzzers/corpora/sctp-packet-corpus/init.bin new file mode 100644 index 0000000000..3fb4977d58 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/init.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/re-config.bin b/test/fuzzers/corpora/sctp-packet-corpus/re-config.bin new file mode 100644 index 0000000000..74c74f3377 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/re-config.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/sack-data.bin b/test/fuzzers/corpora/sctp-packet-corpus/sack-data.bin new file mode 100644 index 0000000000..fe4de63863 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/sack-data.bin differ diff --git a/test/fuzzers/corpora/sctp-packet-corpus/sack-gap-ack-1.bin b/test/fuzzers/corpora/sctp-packet-corpus/sack-gap-ack-1.bin new file mode 100644 index 0000000000..08494c1515 Binary files /dev/null and b/test/fuzzers/corpora/sctp-packet-corpus/sack-gap-ack-1.bin differ diff --git a/test/fuzzers/dcsctp_packet_fuzzer.cc b/test/fuzzers/dcsctp_packet_fuzzer.cc new file mode 100644 index 0000000000..2fc3fe10f1 --- /dev/null +++ b/test/fuzzers/dcsctp_packet_fuzzer.cc @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/packet/chunk/chunk.h" +#include "net/dcsctp/packet/sctp_packet.h" + +namespace webrtc { +using dcsctp::SctpPacket; + +void FuzzOneInput(const uint8_t* data, size_t size) { + absl::optional c = + SctpPacket::Parse(rtc::ArrayView(data, size), + /*disable_checksum_verification=*/true); + + if (!c.has_value()) { + return; + } + + for (const SctpPacket::ChunkDescriptor& desc : c->descriptors()) { + dcsctp::DebugConvertChunkToString(desc.data); + } +} +} // namespace webrtc diff --git a/test/fuzzers/dcsctp_socket_fuzzer.cc b/test/fuzzers/dcsctp_socket_fuzzer.cc new file mode 100644 index 0000000000..390cbb7f6c --- /dev/null +++ b/test/fuzzers/dcsctp_socket_fuzzer.cc @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "net/dcsctp/fuzzers/dcsctp_fuzzers.h" +#include "net/dcsctp/public/dcsctp_message.h" +#include "net/dcsctp/public/dcsctp_options.h" +#include "net/dcsctp/public/dcsctp_socket.h" +#include "net/dcsctp/socket/dcsctp_socket.h" +#include "rtc_base/logging.h" + +namespace webrtc { + +void FuzzOneInput(const uint8_t* data, size_t size) { + dcsctp::dcsctp_fuzzers::FuzzerCallbacks cb; + dcsctp::DcSctpOptions options; + options.disable_checksum_verification = true; + dcsctp::DcSctpSocket socket("A", cb, nullptr, options); + + dcsctp::dcsctp_fuzzers::FuzzSocket(socket, cb, + rtc::ArrayView(data, size)); +} +} // namespace webrtc diff --git a/test/fuzzers/flexfec_header_reader_fuzzer.cc b/test/fuzzers/flexfec_header_reader_fuzzer.cc index 7d710d972f..854cc8b811 100644 --- a/test/fuzzers/flexfec_header_reader_fuzzer.cc +++ b/test/fuzzers/flexfec_header_reader_fuzzer.cc @@ -27,7 +27,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { std::min(size, static_cast(IP_PACKET_SIZE)); packet.pkt->data.SetSize(packet_size); packet.pkt->data.EnsureCapacity(IP_PACKET_SIZE); - memcpy(packet.pkt->data.data(), data, packet_size); + memcpy(packet.pkt->data.MutableData(), data, packet_size); FlexfecHeaderReader flexfec_reader; flexfec_reader.ReadFecHeader(&packet); diff --git a/test/fuzzers/forward_error_correction_fuzzer.cc b/test/fuzzers/forward_error_correction_fuzzer.cc index 09009e1649..04a459bc71 100644 --- a/test/fuzzers/forward_error_correction_fuzzer.cc +++ b/test/fuzzers/forward_error_correction_fuzzer.cc @@ -57,7 +57,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { recovered_packet->pkt = rtc::scoped_refptr( new ForwardErrorCorrection::Packet()); recovered_packet->pkt->data.SetSize(kPacketSize); - memset(recovered_packet->pkt->data.data(), 0, kPacketSize); + memset(recovered_packet->pkt->data.MutableData(), 0, kPacketSize); recovered_packet->ssrc = kMediaSsrc; recovered_packet->seq_num = media_seqnum++; recovered_packets.emplace_back(recovered_packet); @@ -69,7 +69,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { new ForwardErrorCorrection::Packet()); received_packet.pkt->data.SetSize(kPacketSize); received_packet.pkt->data.EnsureCapacity(IP_PACKET_SIZE); - uint8_t* packet_buffer = received_packet.pkt->data.data(); + uint8_t* packet_buffer = received_packet.pkt->data.MutableData(); uint8_t reordering; uint16_t seq_num_diff; uint8_t packet_type; diff --git a/test/fuzzers/frame_buffer2_fuzzer.cc b/test/fuzzers/frame_buffer2_fuzzer.cc index 7ec7da5eca..0572675f71 100644 --- a/test/fuzzers/frame_buffer2_fuzzer.cc +++ b/test/fuzzers/frame_buffer2_fuzzer.cc @@ -49,7 +49,7 @@ struct DataReader { size_t offset_ = 0; }; -class FuzzyFrameObject : public video_coding::EncodedFrame { +class FuzzyFrameObject : public EncodedFrame { public: FuzzyFrameObject() {} ~FuzzyFrameObject() {} @@ -77,11 +77,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) { while (reader.MoreToRead()) { if (reader.GetNum() % 2) { std::unique_ptr frame(new FuzzyFrameObject()); - frame->id.picture_id = reader.GetNum(); - frame->id.spatial_layer = reader.GetNum() % 5; + frame->SetId(reader.GetNum()); + frame->SetSpatialIndex(reader.GetNum() % 5); frame->SetTimestamp(reader.GetNum()); - frame->num_references = reader.GetNum() % - video_coding::EncodedFrame::kMaxFrameReferences; + frame->num_references = + reader.GetNum() % EncodedFrame::kMaxFrameReferences; for (size_t r = 0; r < frame->num_references; ++r) frame->references[r] = reader.GetNum(); @@ -98,7 +98,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { frame_buffer.NextFrame( max_wait_time_ms, keyframe_required, &task_queue, [&next_frame_task_running]( - std::unique_ptr frame, + std::unique_ptr frame, video_coding::FrameBuffer::ReturnReason res) { next_frame_task_running = false; }); diff --git a/test/fuzzers/h264_bitstream_parser_fuzzer.cc b/test/fuzzers/h264_bitstream_parser_fuzzer.cc index a9384d73f3..cd1128c0b4 100644 --- a/test/fuzzers/h264_bitstream_parser_fuzzer.cc +++ b/test/fuzzers/h264_bitstream_parser_fuzzer.cc @@ -14,8 +14,8 @@ namespace webrtc { void FuzzOneInput(const uint8_t* data, size_t size) { H264BitstreamParser h264_bitstream_parser; - h264_bitstream_parser.ParseBitstream(data, size); - int qp; - h264_bitstream_parser.GetLastSliceQp(&qp); + h264_bitstream_parser.ParseBitstream( + rtc::ArrayView(data, size)); + h264_bitstream_parser.GetLastSliceQp(); } } // namespace webrtc diff --git a/test/fuzzers/neteq_rtp_fuzzer.cc b/test/fuzzers/neteq_rtp_fuzzer.cc index d978199448..348c84f040 100644 --- a/test/fuzzers/neteq_rtp_fuzzer.cc +++ b/test/fuzzers/neteq_rtp_fuzzer.cc @@ -8,7 +8,9 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include #include +#include #include #include @@ -64,6 +66,7 @@ class FuzzRtpInput : public NetEqInput { std::numeric_limits::max())); packet_ = input_->PopPacket(); FuzzHeader(); + MaybeFuzzPayload(); } absl::optional NextPacketTime() const override { @@ -79,6 +82,7 @@ class FuzzRtpInput : public NetEqInput { std::unique_ptr packet_to_return = std::move(packet_); packet_ = input_->PopPacket(); FuzzHeader(); + MaybeFuzzPayload(); return packet_to_return; } @@ -116,6 +120,30 @@ class FuzzRtpInput : public NetEqInput { RTC_CHECK_EQ(data_ix_ - start_ix, kNumBytesToFuzz); } + void MaybeFuzzPayload() { + // Read one byte of fuzz data to determine how many payload bytes to fuzz. + if (data_ix_ + 1 > data_.size()) { + ended_ = true; + return; + } + size_t bytes_to_fuzz = data_[data_ix_++]; + + // Restrict number of bytes to fuzz to 16; a reasonably low number enough to + // cover a few RED headers. Also don't write outside the payload length. + bytes_to_fuzz = std::min(bytes_to_fuzz % 16, packet_->payload.size()); + + if (bytes_to_fuzz == 0) + return; + + if (data_ix_ + bytes_to_fuzz > data_.size()) { + ended_ = true; + return; + } + + std::memcpy(packet_->payload.data(), &data_[data_ix_], bytes_to_fuzz); + data_ix_ += bytes_to_fuzz; + } + bool ended_ = false; rtc::ArrayView data_; size_t data_ix_ = 0; diff --git a/test/fuzzers/packet_buffer_fuzzer.cc b/test/fuzzers/packet_buffer_fuzzer.cc index 30f452c9b7..ea9d4896f1 100644 --- a/test/fuzzers/packet_buffer_fuzzer.cc +++ b/test/fuzzers/packet_buffer_fuzzer.cc @@ -13,7 +13,6 @@ #include "modules/video_coding/frame_object.h" #include "modules/video_coding/packet_buffer.h" -#include "system_wrappers/include/clock.h" #include "test/fuzzers/fuzz_data_helper.h" namespace webrtc { @@ -24,8 +23,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { if (size > 200000) { return; } - SimulatedClock clock(0); - video_coding::PacketBuffer packet_buffer(&clock, 8, 1024); + video_coding::PacketBuffer packet_buffer(8, 1024); test::FuzzDataHelper helper(rtc::ArrayView(data, size)); while (helper.BytesLeft()) { @@ -35,7 +33,6 @@ void FuzzOneInput(const uint8_t* data, size_t size) { helper.CopyTo(&packet->payload_type); helper.CopyTo(&packet->seq_num); helper.CopyTo(&packet->timestamp); - helper.CopyTo(&packet->ntp_time_ms); helper.CopyTo(&packet->times_nacked); // Fuzz non-POD member of the packet. diff --git a/test/fuzzers/rtcp_receiver_fuzzer.cc b/test/fuzzers/rtcp_receiver_fuzzer.cc index 38213c3a6e..8bad9e456a 100644 --- a/test/fuzzers/rtcp_receiver_fuzzer.cc +++ b/test/fuzzers/rtcp_receiver_fuzzer.cc @@ -7,9 +7,9 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/source/rtcp_packet/tmmb_item.h" #include "modules/rtp_rtcp/source/rtcp_receiver.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "rtc_base/checks.h" #include "system_wrappers/include/clock.h" @@ -40,7 +40,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { NullModuleRtpRtcp rtp_rtcp_module; SimulatedClock clock(1234); - RtpRtcp::Configuration config; + RtpRtcpInterface::Configuration config; config.clock = &clock; config.rtcp_report_interval_ms = kRtcpIntervalMs; config.local_media_ssrc = 1; diff --git a/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc b/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc index a9f33feed6..fdb4aa5f3c 100644 --- a/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc +++ b/test/fuzzers/rtp_frame_reference_finder_fuzzer.cc @@ -12,9 +12,7 @@ #include "api/rtp_packet_infos.h" #include "modules/video_coding/frame_object.h" -#include "modules/video_coding/packet_buffer.h" #include "modules/video_coding/rtp_frame_reference_finder.h" -#include "system_wrappers/include/clock.h" namespace webrtc { @@ -58,11 +56,6 @@ class DataReader { size_t offset_ = 0; }; -class NullCallback : public video_coding::OnCompleteFrameCallback { - void OnCompleteFrame( - std::unique_ptr frame) override {} -}; - absl::optional GenerateGenericFrameDependencies(DataReader* reader) { absl::optional result; @@ -92,8 +85,7 @@ GenerateGenericFrameDependencies(DataReader* reader) { void FuzzOneInput(const uint8_t* data, size_t size) { DataReader reader(data, size); - NullCallback cb; - video_coding::RtpFrameReferenceFinder reference_finder(&cb); + RtpFrameReferenceFinder reference_finder; auto codec = static_cast(reader.GetNum() % 5); @@ -132,11 +124,10 @@ void FuzzOneInput(const uint8_t* data, size_t size) { break; } - reader.CopyTo(&video_header.frame_marking); video_header.generic = GenerateGenericFrameDependencies(&reader); // clang-format off - auto frame = std::make_unique( + auto frame = std::make_unique( first_seq_num, last_seq_num, marker_bit, diff --git a/test/fuzzers/rtp_header_parser_fuzzer.cc b/test/fuzzers/rtp_header_parser_fuzzer.cc index d6af5ca3ce..435c64bbb4 100644 --- a/test/fuzzers/rtp_header_parser_fuzzer.cc +++ b/test/fuzzers/rtp_header_parser_fuzzer.cc @@ -20,29 +20,7 @@ namespace webrtc { void FuzzOneInput(const uint8_t* data, size_t size) { - RtpHeaderParser::IsRtcp(data, size); RtpHeaderParser::GetSsrc(data, size); - RTPHeader rtp_header; - - std::unique_ptr rtp_header_parser( - RtpHeaderParser::CreateForTest()); - - rtp_header_parser->Parse(data, size, &rtp_header); - for (int i = 1; i < kRtpExtensionNumberOfExtensions; ++i) { - if (size > 0 && i >= data[size - 1]) { - RTPExtensionType add_extension = static_cast(i); - rtp_header_parser->RegisterRtpHeaderExtension(add_extension, i); - } - } - rtp_header_parser->Parse(data, size, &rtp_header); - - for (int i = 1; i < kRtpExtensionNumberOfExtensions; ++i) { - if (size > 1 && i >= data[size - 2]) { - RTPExtensionType remove_extension = static_cast(i); - rtp_header_parser->DeregisterRtpHeaderExtension(remove_extension); - } - } - rtp_header_parser->Parse(data, size, &rtp_header); } } // namespace webrtc diff --git a/test/fuzzers/rtp_packet_fuzzer.cc b/test/fuzzers/rtp_packet_fuzzer.cc index 6a4f5e7893..3f2fc5e668 100644 --- a/test/fuzzers/rtp_packet_fuzzer.cc +++ b/test/fuzzers/rtp_packet_fuzzer.cc @@ -9,12 +9,14 @@ */ #include +#include #include "absl/types/optional.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h" namespace webrtc { // We decide which header extensions to register by reading four bytes @@ -75,6 +77,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) { uint8_t audio_level; packet.GetExtension(&voice_activity, &audio_level); break; + case kRtpExtensionCsrcAudioLevel: { + std::vector audio_levels; + packet.GetExtension(&audio_levels); + break; + } case kRtpExtensionAbsoluteSendTime: uint32_t sendtime; packet.GetExtension(&sendtime); @@ -100,7 +107,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { break; } case kRtpExtensionPlayoutDelay: { - PlayoutDelay playout = PlayoutDelay::Noop(); + VideoPlayoutDelay playout; packet.GetExtension(&playout); break; } @@ -108,14 +115,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) { VideoContentType content_type; packet.GetExtension(&content_type); break; - case kRtpExtensionVideoTiming: + case kRtpExtensionVideoTiming: { VideoSendTiming timing; packet.GetExtension(&timing); break; - case kRtpExtensionFrameMarking: - FrameMarking frame_marking; - packet.GetExtension(&frame_marking); - break; + } case kRtpExtensionRtpStreamId: { std::string rsid; packet.GetExtension(&rsid); @@ -146,6 +150,16 @@ void FuzzOneInput(const uint8_t* data, size_t size) { packet.GetExtension(&noise_level); break; } + case kRtpExtensionVideoLayersAllocation: { + VideoLayersAllocation allocation; + packet.GetExtension(&allocation); + break; + } + case kRtpExtensionVideoFrameTrackingId: { + uint16_t tracking_id; + packet.GetExtension(&tracking_id); + break; + } case kRtpExtensionGenericFrameDescriptor02: // This extension requires state to read and so complicated that // deserves own fuzzer. diff --git a/test/fuzzers/rtp_packetizer_av1_fuzzer.cc b/test/fuzzers/rtp_packetizer_av1_fuzzer.cc index 5277c10f4b..e5550c1279 100644 --- a/test/fuzzers/rtp_packetizer_av1_fuzzer.cc +++ b/test/fuzzers/rtp_packetizer_av1_fuzzer.cc @@ -35,7 +35,8 @@ void FuzzOneInput(const uint8_t* data, size_t size) { // Main function under test: RtpPacketizerAv1's constructor. RtpPacketizerAv1 packetizer(fuzz_input.ReadByteArray(fuzz_input.BytesLeft()), - limits, frame_type); + limits, frame_type, + /*is_last_frame_in_picture=*/true); size_t num_packets = packetizer.NumPackets(); if (num_packets == 0) { diff --git a/test/fuzzers/sdp_integration_fuzzer.cc b/test/fuzzers/sdp_integration_fuzzer.cc new file mode 100644 index 0000000000..bc181f0573 --- /dev/null +++ b/test/fuzzers/sdp_integration_fuzzer.cc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "pc/test/integration_test_helpers.h" + +namespace webrtc { + +class FuzzerTest : public PeerConnectionIntegrationBaseTest { + public: + FuzzerTest() + : PeerConnectionIntegrationBaseTest(SdpSemantics::kUnifiedPlan) {} + + void TestBody() override {} +}; + +void FuzzOneInput(const uint8_t* data, size_t size) { + if (size > 16384) { + return; + } + std::string message(reinterpret_cast(data), size); + + FuzzerTest test; + test.CreatePeerConnectionWrappers(); + // Note - we do not do test.ConnectFakeSignaling(); all signals + // generated are discarded. + + auto srd_observer = + rtc::make_ref_counted(); + + webrtc::SdpParseError error; + std::unique_ptr sdp( + CreateSessionDescription("offer", message, &error)); + // Note: This form of SRD takes ownership of the description. + test.caller()->pc()->SetRemoteDescription(srd_observer, sdp.release()); + // Wait a short time for observer to be called. Timeout is short + // because the fuzzer should be trying many branches. + EXPECT_TRUE_WAIT(srd_observer->called(), 100); + + // If set-remote-description was successful, try to answer. + auto sld_observer = + rtc::make_ref_counted(); + if (srd_observer->result()) { + test.caller()->pc()->SetLocalDescription(sld_observer.get()); + EXPECT_TRUE_WAIT(sld_observer->called(), 100); + } +} + +} // namespace webrtc diff --git a/test/fuzzers/ssl_certificate_fuzzer.cc b/test/fuzzers/ssl_certificate_fuzzer.cc index 7ab59b51dd..4bab5c8f02 100644 --- a/test/fuzzers/ssl_certificate_fuzzer.cc +++ b/test/fuzzers/ssl_certificate_fuzzer.cc @@ -13,6 +13,7 @@ #include +#include "rtc_base/message_digest.h" #include "rtc_base/ssl_certificate.h" #include "rtc_base/string_encode.h" @@ -34,7 +35,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { cert->CertificateExpirationTime(); std::string algorithm; - cert->GetSignatureDigestAlgorithm(algorithm); + cert->GetSignatureDigestAlgorithm(&algorithm); unsigned char digest[rtc::MessageDigest::kMaxSize]; size_t digest_len; diff --git a/test/fuzzers/stun_parser_fuzzer.cc b/test/fuzzers/stun_parser_fuzzer.cc index 720a699662..6ca9eac8b2 100644 --- a/test/fuzzers/stun_parser_fuzzer.cc +++ b/test/fuzzers/stun_parser_fuzzer.cc @@ -24,5 +24,6 @@ void FuzzOneInput(const uint8_t* data, size_t size) { std::unique_ptr stun_msg(new cricket::IceMessage()); rtc::ByteBufferReader buf(message, size); stun_msg->Read(&buf); + stun_msg->ValidateMessageIntegrity(""); } } // namespace webrtc diff --git a/test/fuzzers/stun_validator_fuzzer.cc b/test/fuzzers/stun_validator_fuzzer.cc index 44252fafbc..421638db1b 100644 --- a/test/fuzzers/stun_validator_fuzzer.cc +++ b/test/fuzzers/stun_validator_fuzzer.cc @@ -18,6 +18,6 @@ void FuzzOneInput(const uint8_t* data, size_t size) { const char* message = reinterpret_cast(data); cricket::StunMessage::ValidateFingerprint(message, size); - cricket::StunMessage::ValidateMessageIntegrity(message, size, ""); + cricket::StunMessage::ValidateMessageIntegrityForTesting(message, size, ""); } } // namespace webrtc diff --git a/test/fuzzers/ulpfec_generator_fuzzer.cc b/test/fuzzers/ulpfec_generator_fuzzer.cc index 9426ef0ad3..43d9450918 100644 --- a/test/fuzzers/ulpfec_generator_fuzzer.cc +++ b/test/fuzzers/ulpfec_generator_fuzzer.cc @@ -45,9 +45,9 @@ void FuzzOneInput(const uint8_t* data, size_t size) { packet.EnsureCapacity(IP_PACKET_SIZE); // Write a valid parsable header (version = 2, no padding, no extensions, // no CSRCs). - ByteWriter::WriteBigEndian(&packet[0], 2 << 6); + ByteWriter::WriteBigEndian(packet.MutableData(), 2 << 6); // Make sure sequence numbers are increasing. - ByteWriter::WriteBigEndian(&packet[2], seq_num++); + ByteWriter::WriteBigEndian(packet.MutableData() + 2, seq_num++); i += payload_size + rtp_header_length; const bool protect = data[i++] % 2 == 1; diff --git a/test/fuzzers/ulpfec_header_reader_fuzzer.cc b/test/fuzzers/ulpfec_header_reader_fuzzer.cc index 570fa321ac..243cb4ed70 100644 --- a/test/fuzzers/ulpfec_header_reader_fuzzer.cc +++ b/test/fuzzers/ulpfec_header_reader_fuzzer.cc @@ -27,7 +27,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) { std::min(size, static_cast(IP_PACKET_SIZE)); packet.pkt->data.SetSize(packet_size); packet.pkt->data.EnsureCapacity(IP_PACKET_SIZE); - memcpy(packet.pkt->data.data(), data, packet_size); + memcpy(packet.pkt->data.MutableData(), data, packet_size); UlpfecHeaderReader ulpfec_reader; ulpfec_reader.ReadFecHeader(&packet); diff --git a/test/fuzzers/utils/BUILD.gn b/test/fuzzers/utils/BUILD.gn index 6249156058..3e0782f39d 100644 --- a/test/fuzzers/utils/BUILD.gn +++ b/test/fuzzers/utils/BUILD.gn @@ -24,6 +24,7 @@ rtc_library("rtp_replayer") { "../../../call:call_interfaces", "../../../common_video", "../../../media:rtc_internal_video_codecs", + "../../../modules/rtp_rtcp:rtp_rtcp_format", "../../../rtc_base:checks", "../../../rtc_base:rtc_base_approved", "../../../rtc_base:rtc_base_tests_utils", diff --git a/test/fuzzers/utils/rtp_replayer.cc b/test/fuzzers/utils/rtp_replayer.cc index b7d51e3988..43b1fc2ea4 100644 --- a/test/fuzzers/utils/rtp_replayer.cc +++ b/test/fuzzers/utils/rtp_replayer.cc @@ -17,13 +17,13 @@ #include "api/task_queue/default_task_queue_factory.h" #include "api/transport/field_trial_based_config.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "rtc_base/strings/json.h" #include "system_wrappers/include/clock.h" #include "test/call_config_utils.h" #include "test/encoder_settings.h" #include "test/fake_decoder.h" #include "test/rtp_file_reader.h" -#include "test/rtp_header_parser.h" #include "test/run_loop.h" namespace webrtc { @@ -113,7 +113,6 @@ void RtpReplayer::SetupVideoStreams( for (auto& decoder : receive_config.decoders) { decoder = test::CreateMatchingDecoder(decoder.payload_type, decoder.video_format.name); - decoder.decoder_factory = stream_state->decoder_factory.get(); } // Create the window to display the rendered video. @@ -121,6 +120,7 @@ void RtpReplayer::SetupVideoStreams( test::VideoRenderer::Create("Fuzzing WebRTC Video Config", 640, 480)); // Create a receive stream for this config. receive_config.renderer = stream_state->sinks.back().get(); + receive_config.decoder_factory = stream_state->decoder_factory.get(); stream_state->receive_streams.emplace_back( call->CreateVideoReceiveStream(std::move(receive_config))); } @@ -164,37 +164,32 @@ void RtpReplayer::ReplayPackets(rtc::FakeClock* clock, std::min(deliver_in_ms, static_cast(100)))); } + rtc::CopyOnWriteBuffer packet_buffer(packet.data, packet.length); ++num_packets; - switch (call->Receiver()->DeliverPacket( - webrtc::MediaType::VIDEO, - rtc::CopyOnWriteBuffer(packet.data, packet.length), - /* packet_time_us */ -1)) { + switch (call->Receiver()->DeliverPacket(webrtc::MediaType::VIDEO, + packet_buffer, + /* packet_time_us */ -1)) { case PacketReceiver::DELIVERY_OK: break; case PacketReceiver::DELIVERY_UNKNOWN_SSRC: { - RTPHeader header; - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - - parser->Parse(packet.data, packet.length, &header); - if (unknown_packets[header.ssrc] == 0) { - RTC_LOG(LS_ERROR) << "Unknown SSRC: " << header.ssrc; + webrtc::RtpPacket header; + header.Parse(packet_buffer); + if (unknown_packets[header.Ssrc()] == 0) { + RTC_LOG(LS_ERROR) << "Unknown SSRC: " << header.Ssrc(); } - ++unknown_packets[header.ssrc]; + ++unknown_packets[header.Ssrc()]; break; } case PacketReceiver::DELIVERY_PACKET_ERROR: { RTC_LOG(LS_ERROR) << "Packet error, corrupt packets or incorrect setup?"; - RTPHeader header; - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - parser->Parse(packet.data, packet.length, &header); + webrtc::RtpPacket header; + header.Parse(packet_buffer); RTC_LOG(LS_ERROR) << "Packet packet_length=" << packet.length - << " payload_type=" << header.payloadType - << " sequence_number=" << header.sequenceNumber - << " time_stamp=" << header.timestamp - << " ssrc=" << header.ssrc; + << " payload_type=" << header.PayloadType() + << " sequence_number=" << header.SequenceNumber() + << " time_stamp=" << header.Timestamp() + << " ssrc=" << header.Ssrc(); break; } } diff --git a/test/fuzzers/vp9_encoder_references_fuzzer.cc b/test/fuzzers/vp9_encoder_references_fuzzer.cc new file mode 100644 index 0000000000..9c793ae9aa --- /dev/null +++ b/test/fuzzers/vp9_encoder_references_fuzzer.cc @@ -0,0 +1,498 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "absl/algorithm/container.h" +#include "absl/base/macros.h" +#include "absl/container/inlined_vector.h" +#include "api/array_view.h" +#include "api/transport/webrtc_key_value_config.h" +#include "api/video/video_frame.h" +#include "api/video_codecs/video_codec.h" +#include "api/video_codecs/video_encoder.h" +#include "modules/video_coding/codecs/interface/mock_libvpx_interface.h" +#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h" +#include "modules/video_coding/frame_dependencies_calculator.h" +#include "rtc_base/numerics/safe_compare.h" +#include "test/fuzzers/fuzz_data_helper.h" +#include "test/gmock.h" + +// Fuzzer simulates various svc configurations and libvpx encoder dropping +// layer frames. +// Validates vp9 encoder wrapper produces consistent frame references. +namespace webrtc { +namespace { + +using test::FuzzDataHelper; +using ::testing::NiceMock; + +class FrameValidator : public EncodedImageCallback { + public: + ~FrameValidator() override = default; + + Result OnEncodedImage(const EncodedImage& encoded_image, + const CodecSpecificInfo* codec_specific_info) override { + RTC_CHECK(codec_specific_info); + RTC_CHECK_EQ(codec_specific_info->codecType, kVideoCodecVP9); + if (codec_specific_info->codecSpecific.VP9.first_frame_in_picture) { + ++picture_id_; + } + int64_t frame_id = frame_id_++; + LayerFrame& layer_frame = frames_[frame_id % kMaxFrameHistorySize]; + layer_frame.picture_id = picture_id_; + layer_frame.spatial_id = encoded_image.SpatialIndex().value_or(0); + layer_frame.frame_id = frame_id; + layer_frame.temporal_id = + codec_specific_info->codecSpecific.VP9.temporal_idx; + if (layer_frame.temporal_id == kNoTemporalIdx) { + layer_frame.temporal_id = 0; + } + layer_frame.vp9_non_ref_for_inter_layer_pred = + codec_specific_info->codecSpecific.VP9.non_ref_for_inter_layer_pred; + CheckVp9References(layer_frame, codec_specific_info->codecSpecific.VP9); + + if (codec_specific_info->generic_frame_info.has_value()) { + absl::InlinedVector frame_dependencies = + dependencies_calculator_.FromBuffersUsage( + frame_id, + codec_specific_info->generic_frame_info->encoder_buffers); + + CheckGenericReferences(frame_dependencies, + *codec_specific_info->generic_frame_info); + CheckGenericAndCodecSpecificReferencesAreConsistent( + frame_dependencies, *codec_specific_info, layer_frame); + } + + return Result(Result::OK); + } + + private: + // With 4 spatial layers and patterns up to 8 pictures, it should be enought + // to keep 32 last frames to validate dependencies. + static constexpr size_t kMaxFrameHistorySize = 32; + struct LayerFrame { + int64_t frame_id; + int64_t picture_id; + int spatial_id; + int temporal_id; + bool vp9_non_ref_for_inter_layer_pred; + }; + + void CheckVp9References(const LayerFrame& layer_frame, + const CodecSpecificInfoVP9& vp9_info) { + if (layer_frame.frame_id == 0) { + RTC_CHECK(!vp9_info.inter_layer_predicted); + } else { + const LayerFrame& previous_frame = Frame(layer_frame.frame_id - 1); + if (vp9_info.inter_layer_predicted) { + RTC_CHECK(!previous_frame.vp9_non_ref_for_inter_layer_pred); + RTC_CHECK_EQ(layer_frame.picture_id, previous_frame.picture_id); + } + if (previous_frame.picture_id == layer_frame.picture_id) { + RTC_CHECK_GT(layer_frame.spatial_id, previous_frame.spatial_id); + // The check below would fail for temporal shift structures. Remove it + // or move it to !flexible_mode section when vp9 encoder starts + // supporting such structures. + RTC_CHECK_EQ(layer_frame.temporal_id, previous_frame.temporal_id); + } + } + if (!vp9_info.flexible_mode) { + if (vp9_info.gof.num_frames_in_gof > 0) { + gof_.CopyGofInfoVP9(vp9_info.gof); + } + RTC_CHECK_EQ(gof_.temporal_idx[vp9_info.gof_idx], + layer_frame.temporal_id); + } + } + + void CheckGenericReferences(rtc::ArrayView frame_dependencies, + const GenericFrameInfo& generic_info) const { + for (int64_t dependency_frame_id : frame_dependencies) { + RTC_CHECK_GE(dependency_frame_id, 0); + const LayerFrame& dependency = Frame(dependency_frame_id); + RTC_CHECK_GE(generic_info.spatial_id, dependency.spatial_id); + RTC_CHECK_GE(generic_info.temporal_id, dependency.temporal_id); + } + } + + void CheckGenericAndCodecSpecificReferencesAreConsistent( + rtc::ArrayView frame_dependencies, + const CodecSpecificInfo& info, + const LayerFrame& layer_frame) const { + const CodecSpecificInfoVP9& vp9_info = info.codecSpecific.VP9; + const GenericFrameInfo& generic_info = *info.generic_frame_info; + + RTC_CHECK_EQ(generic_info.spatial_id, layer_frame.spatial_id); + RTC_CHECK_EQ(generic_info.temporal_id, layer_frame.temporal_id); + auto picture_id_diffs = + rtc::MakeArrayView(vp9_info.p_diff, vp9_info.num_ref_pics); + RTC_CHECK_EQ( + frame_dependencies.size(), + picture_id_diffs.size() + (vp9_info.inter_layer_predicted ? 1 : 0)); + for (int64_t dependency_frame_id : frame_dependencies) { + RTC_CHECK_GE(dependency_frame_id, 0); + const LayerFrame& dependency = Frame(dependency_frame_id); + if (dependency.spatial_id != layer_frame.spatial_id) { + RTC_CHECK(vp9_info.inter_layer_predicted); + RTC_CHECK_EQ(layer_frame.picture_id, dependency.picture_id); + RTC_CHECK_GT(layer_frame.spatial_id, dependency.spatial_id); + } else { + RTC_CHECK(vp9_info.inter_pic_predicted); + RTC_CHECK_EQ(layer_frame.spatial_id, dependency.spatial_id); + RTC_CHECK(absl::c_linear_search( + picture_id_diffs, layer_frame.picture_id - dependency.picture_id)); + } + } + } + + const LayerFrame& Frame(int64_t frame_id) const { + auto& frame = frames_[frame_id % kMaxFrameHistorySize]; + RTC_CHECK_EQ(frame.frame_id, frame_id); + return frame; + } + + GofInfoVP9 gof_; + int64_t frame_id_ = 0; + int64_t picture_id_ = 1; + FrameDependenciesCalculator dependencies_calculator_; + LayerFrame frames_[kMaxFrameHistorySize]; +}; + +class FieldTrials : public WebRtcKeyValueConfig { + public: + explicit FieldTrials(FuzzDataHelper& config) + : flags_(config.ReadOrDefaultValue(0)) {} + + ~FieldTrials() override = default; + std::string Lookup(absl::string_view key) const override { + static constexpr absl::string_view kBinaryFieldTrials[] = { + "WebRTC-Vp9DependencyDescriptor", + "WebRTC-Vp9ExternalRefCtrl", + "WebRTC-Vp9IssueKeyFrameOnLayerDeactivation", + }; + for (size_t i = 0; i < ABSL_ARRAYSIZE(kBinaryFieldTrials); ++i) { + if (key == kBinaryFieldTrials[i]) { + return (flags_ & (1u << i)) ? "Enabled" : "Disabled"; + } + } + + // Ignore following field trials. + if (key == "WebRTC-CongestionWindow" || + key == "WebRTC-UseBaseHeavyVP8TL3RateAllocation" || + key == "WebRTC-SimulcastUpswitchHysteresisPercent" || + key == "WebRTC-SimulcastScreenshareUpswitchHysteresisPercent" || + key == "WebRTC-VideoRateControl" || + key == "WebRTC-VP9-PerformanceFlags" || + key == "WebRTC-VP9VariableFramerateScreenshare" || + key == "WebRTC-VP9QualityScaler") { + return ""; + } + // Crash when using unexpected field trial to decide if it should be fuzzed + // or have a constant value. + RTC_CHECK(false) << "Unfuzzed field trial " << key << "\n"; + } + + private: + const uint8_t flags_; +}; + +VideoCodec CodecSettings(FuzzDataHelper& rng) { + uint16_t config = rng.ReadOrDefaultValue(0); + // Test up to to 4 spatial and 4 temporal layers. + int num_spatial_layers = 1 + (config & 0b11); + int num_temporal_layers = 1 + ((config >> 2) & 0b11); + + VideoCodec codec_settings = {}; + codec_settings.codecType = kVideoCodecVP9; + codec_settings.maxFramerate = 30; + codec_settings.width = 320 << (num_spatial_layers - 1); + codec_settings.height = 180 << (num_spatial_layers - 1); + if (num_spatial_layers > 1) { + for (int sid = 0; sid < num_spatial_layers; ++sid) { + SpatialLayer& spatial_layer = codec_settings.spatialLayers[sid]; + codec_settings.width = 320 << sid; + codec_settings.height = 180 << sid; + spatial_layer.maxFramerate = codec_settings.maxFramerate; + spatial_layer.numberOfTemporalLayers = num_temporal_layers; + } + } + codec_settings.VP9()->numberOfSpatialLayers = num_spatial_layers; + codec_settings.VP9()->numberOfTemporalLayers = num_temporal_layers; + int inter_layer_pred = (config >> 4) & 0b11; + // There are only 3 valid values. + codec_settings.VP9()->interLayerPred = static_cast( + inter_layer_pred < 3 ? inter_layer_pred : 0); + codec_settings.VP9()->flexibleMode = (config & (1u << 6)) != 0; + codec_settings.VP9()->frameDroppingOn = (config & (1u << 7)) != 0; + codec_settings.mode = VideoCodecMode::kRealtimeVideo; + return codec_settings; +} + +VideoEncoder::Settings EncoderSettings() { + return VideoEncoder::Settings(VideoEncoder::Capabilities(false), + /*number_of_cores=*/1, + /*max_payload_size=*/0); +} + +struct LibvpxState { + LibvpxState() { + pkt.kind = VPX_CODEC_CX_FRAME_PKT; + pkt.data.frame.buf = pkt_buffer; + pkt.data.frame.sz = ABSL_ARRAYSIZE(pkt_buffer); + layer_id.spatial_layer_id = -1; + } + + uint8_t pkt_buffer[1000] = {}; + vpx_codec_enc_cfg_t config = {}; + vpx_codec_priv_output_cx_pkt_cb_pair_t callback = {}; + vpx_image_t img = {}; + vpx_svc_ref_frame_config_t ref_config = {}; + vpx_svc_layer_id_t layer_id = {}; + vpx_svc_frame_drop_t frame_drop = {}; + vpx_codec_cx_pkt pkt = {}; +}; + +class StubLibvpx : public NiceMock { + public: + explicit StubLibvpx(LibvpxState* state) : state_(state) { RTC_CHECK(state_); } + + vpx_codec_err_t codec_enc_config_default(vpx_codec_iface_t* iface, + vpx_codec_enc_cfg_t* cfg, + unsigned int usage) const override { + state_->config = *cfg; + return VPX_CODEC_OK; + } + + vpx_codec_err_t codec_enc_init(vpx_codec_ctx_t* ctx, + vpx_codec_iface_t* iface, + const vpx_codec_enc_cfg_t* cfg, + vpx_codec_flags_t flags) const override { + RTC_CHECK(ctx); + ctx->err = VPX_CODEC_OK; + return VPX_CODEC_OK; + } + + vpx_image_t* img_wrap(vpx_image_t* img, + vpx_img_fmt_t fmt, + unsigned int d_w, + unsigned int d_h, + unsigned int stride_align, + unsigned char* img_data) const override { + state_->img.fmt = fmt; + state_->img.d_w = d_w; + state_->img.d_h = d_h; + return &state_->img; + } + + vpx_codec_err_t codec_encode(vpx_codec_ctx_t* ctx, + const vpx_image_t* img, + vpx_codec_pts_t pts, + uint64_t duration, + vpx_enc_frame_flags_t flags, + uint64_t deadline) const override { + if (flags & VPX_EFLAG_FORCE_KF) { + state_->pkt.data.frame.flags = VPX_FRAME_IS_KEY; + } else { + state_->pkt.data.frame.flags = 0; + } + state_->pkt.data.frame.duration = duration; + return VPX_CODEC_OK; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + void* param) const override { + if (ctrl_id == VP9E_REGISTER_CX_CALLBACK) { + state_->callback = + *reinterpret_cast(param); + } + return VPX_CODEC_OK; + } + + vpx_codec_err_t codec_control( + vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_ref_frame_config_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_REF_FRAME_CONFIG: + state_->ref_config = *param; + break; + case VP9E_GET_SVC_REF_FRAME_CONFIG: + *param = state_->ref_config; + break; + default: + break; + } + return VPX_CODEC_OK; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_layer_id_t* param) const override { + switch (ctrl_id) { + case VP9E_SET_SVC_LAYER_ID: + state_->layer_id = *param; + break; + case VP9E_GET_SVC_LAYER_ID: + *param = state_->layer_id; + break; + default: + break; + } + return VPX_CODEC_OK; + } + + vpx_codec_err_t codec_control(vpx_codec_ctx_t* ctx, + vp8e_enc_control_id ctrl_id, + vpx_svc_frame_drop_t* param) const override { + if (ctrl_id == VP9E_SET_SVC_FRAME_DROP_LAYER) { + state_->frame_drop = *param; + } + return VPX_CODEC_OK; + } + + vpx_codec_err_t codec_enc_config_set( + vpx_codec_ctx_t* ctx, + const vpx_codec_enc_cfg_t* cfg) const override { + state_->config = *cfg; + return VPX_CODEC_OK; + } + + private: + LibvpxState* const state_; +}; + +enum Actions { + kEncode, + kSetRates, +}; + +// When a layer frame is marked for drop, drops all layer frames from that +// pictures with larger spatial ids. +constexpr bool DropAbove(uint8_t layers_mask, int sid) { + uint8_t full_mask = (uint8_t{1} << (sid + 1)) - 1; + return (layers_mask & full_mask) != full_mask; +} +// inline unittests +static_assert(DropAbove(0b1011, /*sid=*/0) == false, ""); +static_assert(DropAbove(0b1011, /*sid=*/1) == false, ""); +static_assert(DropAbove(0b1011, /*sid=*/2) == true, ""); +static_assert(DropAbove(0b1011, /*sid=*/3) == true, ""); + +// When a layer frame is marked for drop, drops all layer frames from that +// pictures with smaller spatial ids. +constexpr bool DropBelow(uint8_t layers_mask, int sid, int num_layers) { + return (layers_mask >> sid) != (1 << (num_layers - sid)) - 1; +} +// inline unittests +static_assert(DropBelow(0b1101, /*sid=*/0, 4) == true, ""); +static_assert(DropBelow(0b1101, /*sid=*/1, 4) == true, ""); +static_assert(DropBelow(0b1101, /*sid=*/2, 4) == false, ""); +static_assert(DropBelow(0b1101, /*sid=*/3, 4) == false, ""); + +} // namespace + +void FuzzOneInput(const uint8_t* data, size_t size) { + FuzzDataHelper helper(rtc::MakeArrayView(data, size)); + + FrameValidator validator; + FieldTrials field_trials(helper); + // Setup call callbacks for the fake + LibvpxState state; + + // Initialize encoder + LibvpxVp9Encoder encoder(cricket::VideoCodec(), + std::make_unique(&state), field_trials); + VideoCodec codec = CodecSettings(helper); + if (encoder.InitEncode(&codec, EncoderSettings()) != WEBRTC_VIDEO_CODEC_OK) { + return; + } + RTC_CHECK_EQ(encoder.RegisterEncodeCompleteCallback(&validator), + WEBRTC_VIDEO_CODEC_OK); + { + // Enable all the layers initially. Encoder doesn't support producing + // frames when no layers are enabled. + LibvpxVp9Encoder::RateControlParameters parameters; + parameters.framerate_fps = 30.0; + for (int sid = 0; sid < codec.VP9()->numberOfSpatialLayers; ++sid) { + for (int tid = 0; tid < codec.VP9()->numberOfTemporalLayers; ++tid) { + parameters.bitrate.SetBitrate(sid, tid, 100'000); + } + } + encoder.SetRates(parameters); + } + + std::vector frame_types(1); + VideoFrame fake_image = VideoFrame::Builder() + .set_video_frame_buffer(I420Buffer::Create( + int{codec.width}, int{codec.height})) + .build(); + + // Start producing frames at random. + while (helper.CanReadBytes(1)) { + uint8_t action = helper.Read(); + switch (action & 0b11) { + case kEncode: { + // bitmask of the action: SSSS-K00, where + // four S bit indicate which spatial layers should be produced, + // K bit indicates if frame should be a key frame. + frame_types[0] = (action & 0b100) ? VideoFrameType::kVideoFrameKey + : VideoFrameType::kVideoFrameDelta; + encoder.Encode(fake_image, &frame_types); + uint8_t encode_spatial_layers = (action >> 4); + for (size_t sid = 0; sid < state.config.ss_number_layers; ++sid) { + bool drop = true; + switch (state.frame_drop.framedrop_mode) { + case FULL_SUPERFRAME_DROP: + drop = encode_spatial_layers == 0; + break; + case LAYER_DROP: + drop = (encode_spatial_layers & (1 << sid)) == 0; + break; + case CONSTRAINED_LAYER_DROP: + drop = DropBelow(encode_spatial_layers, sid, + state.config.ss_number_layers); + break; + case CONSTRAINED_FROM_ABOVE_DROP: + drop = DropAbove(encode_spatial_layers, sid); + break; + } + if (!drop) { + state.layer_id.spatial_layer_id = sid; + state.callback.output_cx_pkt(&state.pkt, state.callback.user_priv); + } + } + } break; + case kSetRates: { + // bitmask of the action: (S3)(S1)(S0)01, + // where Sx is number of temporal layers to enable for spatial layer x + // In pariculat Sx = 0 indicates spatial layer x should be disabled. + LibvpxVp9Encoder::RateControlParameters parameters; + parameters.framerate_fps = 30.0; + for (int sid = 0; sid < codec.VP9()->numberOfSpatialLayers; ++sid) { + int temporal_layers = (action >> ((1 + sid) * 2)) & 0b11; + for (int tid = 0; tid < temporal_layers; ++tid) { + parameters.bitrate.SetBitrate(sid, tid, 100'000); + } + } + // Ignore allocation that turns off all the layers. in such case + // it is up to upper-layer code not to call Encode. + if (parameters.bitrate.get_sum_bps() > 0) { + encoder.SetRates(parameters); + } + } break; + default: + // Unspecificed values are noop. + break; + } + } +} +} // namespace webrtc diff --git a/test/linux/glx_renderer.cc b/test/linux/glx_renderer.cc index 50f2a06a8e..04d482c88b 100644 --- a/test/linux/glx_renderer.cc +++ b/test/linux/glx_renderer.cc @@ -20,8 +20,8 @@ namespace test { GlxRenderer::GlxRenderer(size_t width, size_t height) : width_(width), height_(height), display_(NULL), context_(NULL) { - assert(width > 0); - assert(height > 0); + RTC_DCHECK_GT(width, 0); + RTC_DCHECK_GT(height, 0); } GlxRenderer::~GlxRenderer() { diff --git a/test/logging/BUILD.gn b/test/logging/BUILD.gn index db2a5447ac..1af2ecfdac 100644 --- a/test/logging/BUILD.gn +++ b/test/logging/BUILD.gn @@ -27,6 +27,6 @@ rtc_library("log_writer") { "../../rtc_base:rtc_base_tests_utils", "../../rtc_base:stringutils", "../../test:fileutils", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } diff --git a/test/logging/memory_log_writer.cc b/test/logging/memory_log_writer.cc index 2eb1cffb48..f57f0317a9 100644 --- a/test/logging/memory_log_writer.cc +++ b/test/logging/memory_log_writer.cc @@ -21,25 +21,18 @@ class MemoryLogWriter final : public RtcEventLogOutput { explicit MemoryLogWriter(std::map* target, std::string filename) : target_(target), filename_(filename) {} - ~MemoryLogWriter() final { - size_t size; - buffer_.GetSize(&size); - target_->insert({filename_, std::string(buffer_.GetBuffer(), size)}); - } + ~MemoryLogWriter() final { target_->insert({filename_, std::move(buffer_)}); } bool IsActive() const override { return true; } bool Write(const std::string& value) override { - size_t written; - int error; - return buffer_.WriteAll(value.data(), value.size(), &written, &error) == - rtc::SR_SUCCESS; - RTC_DCHECK_EQ(value.size(), written); + buffer_.append(value); + return true; } void Flush() override {} private: std::map* const target_; const std::string filename_; - rtc::MemoryStream buffer_; + std::string buffer_; }; class MemoryLogWriterFactory : public LogWriterFactoryInterface { diff --git a/test/logging/memory_log_writer.h b/test/logging/memory_log_writer.h index daef297b88..e795b2fd10 100644 --- a/test/logging/memory_log_writer.h +++ b/test/logging/memory_log_writer.h @@ -15,7 +15,6 @@ #include #include -#include "rtc_base/memory_stream.h" #include "test/logging/log_writer.h" namespace webrtc { diff --git a/test/mappable_native_buffer.cc b/test/mappable_native_buffer.cc new file mode 100644 index 0000000000..bd0b304545 --- /dev/null +++ b/test/mappable_native_buffer.cc @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/mappable_native_buffer.h" + +#include "absl/algorithm/container.h" +#include "api/video/i420_buffer.h" +#include "api/video/nv12_buffer.h" +#include "api/video/video_frame.h" +#include "api/video/video_rotation.h" +#include "common_video/include/video_frame_buffer.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace test { + +namespace { + +class NV12BufferWithDidConvertToI420 : public NV12Buffer { + public: + NV12BufferWithDidConvertToI420(int width, int height) + : NV12Buffer(width, height), did_convert_to_i420_(false) {} + + bool did_convert_to_i420() const { return did_convert_to_i420_; } + + rtc::scoped_refptr ToI420() override { + did_convert_to_i420_ = true; + return NV12Buffer::ToI420(); + } + + private: + bool did_convert_to_i420_; +}; + +} // namespace + +VideoFrame CreateMappableNativeFrame(int64_t ntp_time_ms, + VideoFrameBuffer::Type mappable_type, + int width, + int height) { + VideoFrame frame = + VideoFrame::Builder() + .set_video_frame_buffer(rtc::make_ref_counted( + mappable_type, width, height)) + .set_timestamp_rtp(99) + .set_timestamp_ms(99) + .set_rotation(kVideoRotation_0) + .build(); + frame.set_ntp_time_ms(ntp_time_ms); + return frame; +} + +rtc::scoped_refptr GetMappableNativeBufferFromVideoFrame( + const VideoFrame& frame) { + return static_cast(frame.video_frame_buffer().get()); +} + +MappableNativeBuffer::ScaledBuffer::ScaledBuffer( + rtc::scoped_refptr parent, + int width, + int height) + : parent_(std::move(parent)), width_(width), height_(height) {} + +MappableNativeBuffer::ScaledBuffer::~ScaledBuffer() {} + +rtc::scoped_refptr +MappableNativeBuffer::ScaledBuffer::CropAndScale(int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) { + return rtc::make_ref_counted(parent_, scaled_width, + scaled_height); +} + +rtc::scoped_refptr +MappableNativeBuffer::ScaledBuffer::ToI420() { + return parent_->GetOrCreateMappedBuffer(width_, height_)->ToI420(); +} + +rtc::scoped_refptr +MappableNativeBuffer::ScaledBuffer::GetMappedFrameBuffer( + rtc::ArrayView types) { + if (absl::c_find(types, parent_->mappable_type_) == types.end()) + return nullptr; + return parent_->GetOrCreateMappedBuffer(width_, height_); +} + +MappableNativeBuffer::MappableNativeBuffer(VideoFrameBuffer::Type mappable_type, + int width, + int height) + : mappable_type_(mappable_type), width_(width), height_(height) { + RTC_DCHECK(mappable_type_ == VideoFrameBuffer::Type::kI420 || + mappable_type_ == VideoFrameBuffer::Type::kNV12); +} + +MappableNativeBuffer::~MappableNativeBuffer() {} + +rtc::scoped_refptr MappableNativeBuffer::CropAndScale( + int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) { + return FullSizeBuffer()->CropAndScale( + offset_x, offset_y, crop_width, crop_height, scaled_width, scaled_height); +} + +rtc::scoped_refptr MappableNativeBuffer::ToI420() { + return FullSizeBuffer()->ToI420(); +} + +rtc::scoped_refptr MappableNativeBuffer::GetMappedFrameBuffer( + rtc::ArrayView types) { + return FullSizeBuffer()->GetMappedFrameBuffer(types); +} + +std::vector> +MappableNativeBuffer::GetMappedFramedBuffers() const { + MutexLock lock(&lock_); + return mapped_buffers_; +} + +bool MappableNativeBuffer::DidConvertToI420() const { + if (mappable_type_ != VideoFrameBuffer::Type::kNV12) + return false; + MutexLock lock(&lock_); + for (auto& mapped_buffer : mapped_buffers_) { + if (static_cast(mapped_buffer.get()) + ->did_convert_to_i420()) { + return true; + } + } + return false; +} + +rtc::scoped_refptr +MappableNativeBuffer::FullSizeBuffer() { + return rtc::make_ref_counted(this, width_, height_); +} + +rtc::scoped_refptr +MappableNativeBuffer::GetOrCreateMappedBuffer(int width, int height) { + MutexLock lock(&lock_); + for (auto& mapped_buffer : mapped_buffers_) { + if (mapped_buffer->width() == width && mapped_buffer->height() == height) { + return mapped_buffer; + } + } + rtc::scoped_refptr mapped_buffer; + switch (mappable_type_) { + case VideoFrameBuffer::Type::kI420: { + rtc::scoped_refptr i420_buffer = + I420Buffer::Create(width, height); + I420Buffer::SetBlack(i420_buffer); + mapped_buffer = i420_buffer; + break; + } + case VideoFrameBuffer::Type::kNV12: { + auto nv12_buffer = + rtc::make_ref_counted(width, height); + nv12_buffer->InitializeData(); + mapped_buffer = std::move(nv12_buffer); + break; + } + default: + RTC_NOTREACHED(); + } + mapped_buffers_.push_back(mapped_buffer); + return mapped_buffer; +} + +} // namespace test +} // namespace webrtc diff --git a/test/mappable_native_buffer.h b/test/mappable_native_buffer.h new file mode 100644 index 0000000000..add22029c7 --- /dev/null +++ b/test/mappable_native_buffer.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_MAPPABLE_NATIVE_BUFFER_H_ +#define TEST_MAPPABLE_NATIVE_BUFFER_H_ + +#include +#include + +#include "api/array_view.h" +#include "api/video/video_frame.h" +#include "common_video/include/video_frame_buffer.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { +namespace test { + +class MappableNativeBuffer; + +VideoFrame CreateMappableNativeFrame(int64_t ntp_time_ms, + VideoFrameBuffer::Type mappable_type, + int width, + int height); + +rtc::scoped_refptr GetMappableNativeBufferFromVideoFrame( + const VideoFrame& frame); + +// A for-testing native buffer that is scalable and mappable. The contents of +// the buffer is black and the pixels are created upon mapping. Mapped buffers +// are stored inside MappableNativeBuffer, allowing tests to verify which +// resolutions were mapped, e.g. when passing them in to an encoder or other +// modules. +class MappableNativeBuffer : public VideoFrameBuffer { + public: + // If |allow_i420_conversion| is false, calling ToI420() on a non-I420 buffer + // will DCHECK-crash. Used to ensure zero-copy in tests. + MappableNativeBuffer(VideoFrameBuffer::Type mappable_type, + int width, + int height); + ~MappableNativeBuffer() override; + + VideoFrameBuffer::Type mappable_type() const { return mappable_type_; } + + VideoFrameBuffer::Type type() const override { return Type::kNative; } + int width() const override { return width_; } + int height() const override { return height_; } + + rtc::scoped_refptr CropAndScale(int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) override; + + rtc::scoped_refptr ToI420() override; + rtc::scoped_refptr GetMappedFrameBuffer( + rtc::ArrayView types) override; + + // Gets all the buffers that have been mapped so far, including mappings of + // cropped and scaled buffers. + std::vector> GetMappedFramedBuffers() + const; + bool DidConvertToI420() const; + + private: + friend class rtc::RefCountedObject; + + class ScaledBuffer : public VideoFrameBuffer { + public: + ScaledBuffer(rtc::scoped_refptr parent, + int width, + int height); + ~ScaledBuffer() override; + + VideoFrameBuffer::Type type() const override { return Type::kNative; } + int width() const override { return width_; } + int height() const override { return height_; } + + rtc::scoped_refptr CropAndScale( + int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) override; + + rtc::scoped_refptr ToI420() override; + rtc::scoped_refptr GetMappedFrameBuffer( + rtc::ArrayView types) override; + + private: + friend class rtc::RefCountedObject; + + const rtc::scoped_refptr parent_; + const int width_; + const int height_; + }; + + rtc::scoped_refptr FullSizeBuffer(); + rtc::scoped_refptr GetOrCreateMappedBuffer(int width, + int height); + + const VideoFrameBuffer::Type mappable_type_; + const int width_; + const int height_; + mutable Mutex lock_; + std::vector> mapped_buffers_ + RTC_GUARDED_BY(&lock_); +}; + +} // namespace test +} // namespace webrtc + +#endif // TEST_MAPPABLE_NATIVE_BUFFER_H_ diff --git a/test/mock_audio_decoder_factory.h b/test/mock_audio_decoder_factory.h index cdb03d3f38..4d3eed212c 100644 --- a/test/mock_audio_decoder_factory.h +++ b/test/mock_audio_decoder_factory.h @@ -52,7 +52,7 @@ class MockAudioDecoderFactory : public AudioDecoderFactory { using ::testing::Return; rtc::scoped_refptr factory = - new rtc::RefCountedObject; + rtc::make_ref_counted(); ON_CALL(*factory.get(), GetSupportedDecoders()) .WillByDefault(Return(std::vector())); EXPECT_CALL(*factory.get(), GetSupportedDecoders()).Times(AnyNumber()); @@ -73,7 +73,7 @@ class MockAudioDecoderFactory : public AudioDecoderFactory { using ::testing::SetArgPointee; rtc::scoped_refptr factory = - new rtc::RefCountedObject; + rtc::make_ref_counted(); ON_CALL(*factory.get(), GetSupportedDecoders()) .WillByDefault(Return(std::vector())); EXPECT_CALL(*factory.get(), GetSupportedDecoders()).Times(AnyNumber()); diff --git a/test/mock_audio_encoder.h b/test/mock_audio_encoder.h index 9d9db0d66c..87b8cc8c8e 100644 --- a/test/mock_audio_encoder.h +++ b/test/mock_audio_encoder.h @@ -21,10 +21,6 @@ namespace webrtc { class MockAudioEncoder : public AudioEncoder { public: - // TODO(nisse): Valid overrides commented out, because the gmock - // methods don't use any override declarations, and we want to avoid - // warnings from -Winconsistent-missing-override. See - // http://crbug.com/428099. MockAudioEncoder(); ~MockAudioEncoder(); MOCK_METHOD(int, SampleRateHz, (), (const, override)); @@ -52,6 +48,10 @@ class MockAudioEncoder : public AudioEncoder { OnReceivedUplinkPacketLossFraction, (float uplink_packet_loss_fraction), (override)); + MOCK_METHOD(void, + OnReceivedOverhead, + (size_t overhead_bytes_per_packet), + (override)); MOCK_METHOD(bool, EnableAudioNetworkAdaptor, diff --git a/test/network/BUILD.gn b/test/network/BUILD.gn index 4b01479c9b..1e39a3f89b 100644 --- a/test/network/BUILD.gn +++ b/test/network/BUILD.gn @@ -10,13 +10,14 @@ import("../../webrtc.gni") rtc_library("emulated_network") { visibility = [ - "../../api:create_network_emulation_manager", ":*", + "../../api:create_network_emulation_manager", + "../../api/test/network_emulation:create_cross_traffic", ] if (rtc_include_tests) { visibility += [ - "../scenario:*", "../peer_scenario:*", + "../scenario:*", ] } testonly = true @@ -25,6 +26,8 @@ rtc_library("emulated_network") { "cross_traffic.h", "emulated_network_manager.cc", "emulated_network_manager.h", + "emulated_turn_server.cc", + "emulated_turn_server.h", "fake_network_socket_server.cc", "fake_network_socket_server.h", "network_emulation.cc", @@ -35,25 +38,43 @@ rtc_library("emulated_network") { "traffic_route.h", ] deps = [ + "../../api:array_view", "../../api:network_emulation_manager_api", + "../../api:packet_socket_factory", + "../../api:scoped_refptr", + "../../api:sequence_checker", "../../api:simulated_network_api", "../../api:time_controller", + "../../api/numerics", + "../../api/test/network_emulation", + "../../api/transport:stun_types", "../../api/units:data_rate", "../../api/units:data_size", "../../api/units:time_delta", "../../api/units:timestamp", "../../call:simulated_network", + "../../p2p:p2p_server_utils", "../../rtc_base", + "../../rtc_base:async_socket", + "../../rtc_base:ip_address", + "../../rtc_base:network_constants", "../../rtc_base:rtc_base_tests_utils", "../../rtc_base:rtc_task_queue", "../../rtc_base:safe_minmax", + "../../rtc_base:socket_address", + "../../rtc_base:socket_server", + "../../rtc_base:stringutils", "../../rtc_base:task_queue_for_test", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base:threading", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/task_utils:pending_task_safety_flag", "../../rtc_base/task_utils:repeating_task", - "../../rtc_base/third_party/sigslot", + "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers", "../scenario:column_printer", "../time_controller", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", @@ -72,34 +93,37 @@ rtc_library("network_emulation_unittest") { "../../rtc_base:gunit_helpers", "../../rtc_base:logging", "../../rtc_base:rtc_event", - "../../system_wrappers:system_wrappers", + "../../rtc_base/synchronization:mutex", ] } -rtc_library("network_emulation_pc_unittest") { - testonly = true - sources = [ "network_emulation_pc_unittest.cc" ] - deps = [ - ":emulated_network", - "../:test_support", - "../../api:callfactory_api", - "../../api:libjingle_peerconnection_api", - "../../api:scoped_refptr", - "../../api:simulated_network_api", - "../../api/rtc_event_log:rtc_event_log_factory", - "../../api/task_queue:default_task_queue_factory", - "../../call:simulated_network", - "../../media:rtc_audio_video", - "../../media:rtc_media_engine_defaults", - "../../modules/audio_device:audio_device_impl", - "../../p2p:rtc_p2p", - "../../pc:pc_test_utils", - "../../pc:peerconnection_wrapper", - "../../rtc_base", - "../../rtc_base:gunit_helpers", - "../../rtc_base:logging", - "../../rtc_base:rtc_event", - ] +if (rtc_include_tests && !build_with_chromium) { + rtc_library("network_emulation_pc_unittest") { + testonly = true + sources = [ "network_emulation_pc_unittest.cc" ] + deps = [ + ":emulated_network", + "../:test_support", + "../../api:callfactory_api", + "../../api:libjingle_peerconnection_api", + "../../api:scoped_refptr", + "../../api:simulated_network_api", + "../../api/rtc_event_log:rtc_event_log_factory", + "../../api/task_queue:default_task_queue_factory", + "../../api/transport:field_trial_based_config", + "../../call:simulated_network", + "../../media:rtc_audio_video", + "../../media:rtc_media_engine_defaults", + "../../modules/audio_device:audio_device_impl", + "../../p2p:rtc_p2p", + "../../pc:pc_test_utils", + "../../pc:peerconnection_wrapper", + "../../rtc_base", + "../../rtc_base:gunit_helpers", + "../../rtc_base:logging", + "../../rtc_base:rtc_event", + ] + } } rtc_library("cross_traffic_unittest") { @@ -108,47 +132,56 @@ rtc_library("cross_traffic_unittest") { deps = [ ":emulated_network", "../:test_support", + "../../api:network_emulation_manager_api", "../../api:simulated_network_api", "../../call:simulated_network", "../../rtc_base", "../../rtc_base:logging", + "../../rtc_base:network_constants", "../../rtc_base:rtc_event", - "//test/time_controller:time_controller", - "//third_party/abseil-cpp/absl/memory", - ] -} - -rtc_library("feedback_generator") { - testonly = true - sources = [ - "feedback_generator.cc", - "feedback_generator.h", - ] - deps = [ - ":emulated_network", - "../../api/transport:test_feedback_generator_interface", - "../../call:simulated_network", - "../../rtc_base:checks", "../time_controller", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:optional", ] } -rtc_library("feedback_generator_unittest") { - testonly = true - sources = [ "feedback_generator_unittest.cc" ] - deps = [ - "../:test_support", - "../../api/transport:test_feedback_generator", - ] -} +if (rtc_include_tests) { + rtc_library("feedback_generator") { + testonly = true + sources = [ + "feedback_generator.cc", + "feedback_generator.h", + ] + deps = [ + ":emulated_network", + "../../api/transport:test_feedback_generator_interface", + "../../call:simulated_network", + "../../rtc_base:checks", + "../time_controller", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + } -rtc_library("network_emulation_unittests") { - testonly = true - deps = [ - ":cross_traffic_unittest", - ":feedback_generator_unittest", - ":network_emulation_pc_unittest", - ":network_emulation_unittest", - ] + rtc_library("feedback_generator_unittest") { + testonly = true + sources = [ "feedback_generator_unittest.cc" ] + deps = [ + "../:test_support", + "../../api/transport:test_feedback_generator", + ] + } + + if (!build_with_chromium) { + rtc_library("network_emulation_unittests") { + testonly = true + deps = [ + ":cross_traffic_unittest", + ":feedback_generator_unittest", + ":network_emulation_pc_unittest", + ":network_emulation_unittest", + ] + } + } } diff --git a/test/network/cross_traffic.cc b/test/network/cross_traffic.cc index be0c3d3286..ae5b156376 100644 --- a/test/network/cross_traffic.cc +++ b/test/network/cross_traffic.cc @@ -24,7 +24,7 @@ namespace webrtc { namespace test { RandomWalkCrossTraffic::RandomWalkCrossTraffic(RandomWalkConfig config, - TrafficRoute* traffic_route) + CrossTrafficRoute* traffic_route) : config_(config), traffic_route_(traffic_route), random_(config_.random_seed) { @@ -56,6 +56,10 @@ void RandomWalkCrossTraffic::Process(Timestamp at_time) { } } +TimeDelta RandomWalkCrossTraffic::GetProcessInterval() const { + return config_.min_packet_interval; +} + DataRate RandomWalkCrossTraffic::TrafficRate() const { RTC_DCHECK_RUN_ON(&sequence_checker_); return config_.peak_rate * intensity_; @@ -70,8 +74,9 @@ ColumnPrinter RandomWalkCrossTraffic::StatsPrinter() { 32); } -PulsedPeaksCrossTraffic::PulsedPeaksCrossTraffic(PulsedPeaksConfig config, - TrafficRoute* traffic_route) +PulsedPeaksCrossTraffic::PulsedPeaksCrossTraffic( + PulsedPeaksConfig config, + CrossTrafficRoute* traffic_route) : config_(config), traffic_route_(traffic_route) { sequence_checker_.Detach(); } @@ -102,6 +107,10 @@ void PulsedPeaksCrossTraffic::Process(Timestamp at_time) { } } +TimeDelta PulsedPeaksCrossTraffic::GetProcessInterval() const { + return config_.min_packet_interval; +} + DataRate PulsedPeaksCrossTraffic::TrafficRate() const { RTC_DCHECK_RUN_ON(&sequence_checker_); return sending_ ? config_.peak_rate : DataRate::Zero(); @@ -144,15 +153,16 @@ void TcpMessageRouteImpl::SendMessage(size_t size, cwnd_ = 10; ssthresh_ = INFINITY; } - size_t data_left = size; - size_t kMaxPacketSize = 1200; - size_t kMinPacketSize = 4; + int64_t data_left = static_cast(size); + int64_t kMaxPacketSize = 1200; + int64_t kMinPacketSize = 4; Message message{std::move(handler)}; while (data_left > 0) { - size_t packet_size = - std::max(kMinPacketSize, std::min(data_left, kMaxPacketSize)); + int64_t packet_size = std::min(data_left, kMaxPacketSize); int fragment_id = next_fragment_id_++; - pending_.push_back(MessageFragment{fragment_id, packet_size}); + pending_.push_back(MessageFragment{ + fragment_id, + static_cast(std::max(kMinPacketSize, packet_size))}); message.pending_fragment_ids.insert(fragment_id); data_left -= packet_size; } @@ -239,21 +249,13 @@ void TcpMessageRouteImpl::HandlePacketTimeout(int seq_num, Timestamp at_time) { } } -FakeTcpCrossTraffic::FakeTcpCrossTraffic(Clock* clock, - FakeTcpConfig config, +FakeTcpCrossTraffic::FakeTcpCrossTraffic(FakeTcpConfig config, EmulatedRoute* send_route, EmulatedRoute* ret_route) - : clock_(clock), conf_(config), route_(this, send_route, ret_route) {} - -void FakeTcpCrossTraffic::Start(TaskQueueBase* task_queue) { - repeating_task_handle_ = RepeatingTaskHandle::Start(task_queue, [this] { - Process(clock_->CurrentTime()); - return conf_.process_interval; - }); -} + : conf_(config), route_(this, send_route, ret_route) {} -void FakeTcpCrossTraffic::Stop() { - repeating_task_handle_.Stop(); +TimeDelta FakeTcpCrossTraffic::GetProcessInterval() const { + return conf_.process_interval; } void FakeTcpCrossTraffic::Process(Timestamp at_time) { diff --git a/test/network/cross_traffic.h b/test/network/cross_traffic.h index 942b863bbf..487622d4d4 100644 --- a/test/network/cross_traffic.h +++ b/test/network/cross_traffic.h @@ -15,41 +15,34 @@ #include #include +#include "api/sequence_checker.h" +#include "api/test/network_emulation_manager.h" #include "api/units/data_rate.h" #include "api/units/data_size.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" #include "rtc_base/random.h" -#include "rtc_base/synchronization/sequence_checker.h" -#include "test/network/traffic_route.h" +#include "test/network/network_emulation.h" #include "test/scenario/column_printer.h" namespace webrtc { namespace test { -struct RandomWalkConfig { - int random_seed = 1; - DataRate peak_rate = DataRate::KilobitsPerSec(100); - DataSize min_packet_size = DataSize::Bytes(200); - TimeDelta min_packet_interval = TimeDelta::Millis(1); - TimeDelta update_interval = TimeDelta::Millis(200); - double variance = 0.6; - double bias = -0.1; -}; - -class RandomWalkCrossTraffic { +class RandomWalkCrossTraffic final : public CrossTrafficGenerator { public: - RandomWalkCrossTraffic(RandomWalkConfig config, TrafficRoute* traffic_route); + RandomWalkCrossTraffic(RandomWalkConfig config, + CrossTrafficRoute* traffic_route); ~RandomWalkCrossTraffic(); - void Process(Timestamp at_time); + void Process(Timestamp at_time) override; + TimeDelta GetProcessInterval() const override; DataRate TrafficRate() const; ColumnPrinter StatsPrinter(); private: SequenceChecker sequence_checker_; const RandomWalkConfig config_; - TrafficRoute* const traffic_route_ RTC_PT_GUARDED_BY(sequence_checker_); + CrossTrafficRoute* const traffic_route_ RTC_PT_GUARDED_BY(sequence_checker_); webrtc::Random random_ RTC_GUARDED_BY(sequence_checker_); Timestamp last_process_time_ RTC_GUARDED_BY(sequence_checker_) = @@ -62,28 +55,21 @@ class RandomWalkCrossTraffic { DataSize pending_size_ RTC_GUARDED_BY(sequence_checker_) = DataSize::Zero(); }; -struct PulsedPeaksConfig { - DataRate peak_rate = DataRate::KilobitsPerSec(100); - DataSize min_packet_size = DataSize::Bytes(200); - TimeDelta min_packet_interval = TimeDelta::Millis(1); - TimeDelta send_duration = TimeDelta::Millis(100); - TimeDelta hold_duration = TimeDelta::Millis(2000); -}; - -class PulsedPeaksCrossTraffic { +class PulsedPeaksCrossTraffic final : public CrossTrafficGenerator { public: PulsedPeaksCrossTraffic(PulsedPeaksConfig config, - TrafficRoute* traffic_route); + CrossTrafficRoute* traffic_route); ~PulsedPeaksCrossTraffic(); - void Process(Timestamp at_time); + void Process(Timestamp at_time) override; + TimeDelta GetProcessInterval() const override; DataRate TrafficRate() const; ColumnPrinter StatsPrinter(); private: SequenceChecker sequence_checker_; const PulsedPeaksConfig config_; - TrafficRoute* const traffic_route_ RTC_PT_GUARDED_BY(sequence_checker_); + CrossTrafficRoute* const traffic_route_ RTC_PT_GUARDED_BY(sequence_checker_); Timestamp last_update_time_ RTC_GUARDED_BY(sequence_checker_) = Timestamp::MinusInfinity(); @@ -149,23 +135,17 @@ class TcpMessageRouteImpl final : public TcpMessageRoute { TimeDelta last_rtt_ = TimeDelta::Zero(); }; -struct FakeTcpConfig { - DataSize packet_size = DataSize::Bytes(1200); - DataSize send_limit = DataSize::PlusInfinity(); - TimeDelta process_interval = TimeDelta::Millis(200); - TimeDelta packet_timeout = TimeDelta::Seconds(1); -}; - class FakeTcpCrossTraffic - : public TwoWayFakeTrafficRoute::TrafficHandlerInterface { + : public TwoWayFakeTrafficRoute::TrafficHandlerInterface, + public CrossTrafficGenerator { public: - FakeTcpCrossTraffic(Clock* clock, - FakeTcpConfig config, + FakeTcpCrossTraffic(FakeTcpConfig config, EmulatedRoute* send_route, EmulatedRoute* ret_route); - void Start(TaskQueueBase* task_queue); - void Stop(); - void Process(Timestamp at_time); + + TimeDelta GetProcessInterval() const override; + void Process(Timestamp at_time) override; + void OnRequest(int sequence_number, Timestamp at_time) override; void OnResponse(int sequence_number, Timestamp at_time) override; @@ -174,7 +154,6 @@ class FakeTcpCrossTraffic void SendPackets(Timestamp at_time); private: - Clock* const clock_; const FakeTcpConfig conf_; TwoWayFakeTrafficRoute route_; @@ -187,7 +166,6 @@ class FakeTcpCrossTraffic Timestamp last_reduction_time_ = Timestamp::MinusInfinity(); TimeDelta last_rtt_ = TimeDelta::Zero(); DataSize total_sent_ = DataSize::Zero(); - RepeatingTaskHandle repeating_task_handle_; }; } // namespace test diff --git a/test/network/cross_traffic_unittest.cc b/test/network/cross_traffic_unittest.cc index a3c7b42311..2744a90ce3 100644 --- a/test/network/cross_traffic_unittest.cc +++ b/test/network/cross_traffic_unittest.cc @@ -16,6 +16,8 @@ #include #include "absl/memory/memory.h" +#include "absl/types/optional.h" +#include "api/test/network_emulation_manager.h" #include "api/test/simulated_network.h" #include "call/simulated_network.h" #include "rtc_base/event.h" @@ -24,6 +26,7 @@ #include "test/gmock.h" #include "test/gtest.h" #include "test/network/network_emulation_manager.h" +#include "test/network/traffic_route.h" #include "test/time_controller/simulated_time_controller.h" namespace webrtc { @@ -46,19 +49,20 @@ struct TrafficCounterFixture { SimulatedClock clock{0}; CountingReceiver counter; TaskQueueForTest task_queue_; - EmulatedEndpointImpl endpoint{/*id=*/1, - rtc::IPAddress(kTestIpAddress), - /*is_enabled=*/true, - /*type=*/rtc::AdapterType::ADAPTER_TYPE_UNKNOWN, - &task_queue_, - &clock}; + EmulatedEndpointImpl endpoint{EmulatedEndpointImpl::Options{ + /*id=*/1, + rtc::IPAddress(kTestIpAddress), + EmulatedEndpointConfig(), + }, + /*is_enabled=*/true, &task_queue_, &clock}; }; } // namespace TEST(CrossTrafficTest, TriggerPacketBurst) { TrafficCounterFixture fixture; - TrafficRoute traffic(&fixture.clock, &fixture.counter, &fixture.endpoint); + CrossTrafficRouteImpl traffic(&fixture.clock, &fixture.counter, + &fixture.endpoint); traffic.TriggerPacketBurst(100, 1000); EXPECT_EQ(fixture.counter.packets_count_, 100); @@ -67,7 +71,8 @@ TEST(CrossTrafficTest, TriggerPacketBurst) { TEST(CrossTrafficTest, PulsedPeaksCrossTraffic) { TrafficCounterFixture fixture; - TrafficRoute traffic(&fixture.clock, &fixture.counter, &fixture.endpoint); + CrossTrafficRouteImpl traffic(&fixture.clock, &fixture.counter, + &fixture.endpoint); PulsedPeaksConfig config; config.peak_rate = DataRate::KilobitsPerSec(1000); @@ -92,7 +97,8 @@ TEST(CrossTrafficTest, PulsedPeaksCrossTraffic) { TEST(CrossTrafficTest, RandomWalkCrossTraffic) { TrafficCounterFixture fixture; - TrafficRoute traffic(&fixture.clock, &fixture.counter, &fixture.endpoint); + CrossTrafficRouteImpl traffic(&fixture.clock, &fixture.counter, + &fixture.endpoint); RandomWalkConfig config; config.peak_rate = DataRate::KilobitsPerSec(1000); diff --git a/test/network/emulated_network_manager.cc b/test/network/emulated_network_manager.cc index 2dc2fad5b0..ec8b2b3554 100644 --- a/test/network/emulated_network_manager.cc +++ b/test/network/emulated_network_manager.cc @@ -80,7 +80,8 @@ void EmulatedNetworkManager::StopUpdating() { } void EmulatedNetworkManager::GetStats( - std::function stats_callback) const { + std::function)> stats_callback) + const { task_queue_->PostTask([stats_callback, this]() { stats_callback(endpoints_container_->GetStats()); }); diff --git a/test/network/emulated_network_manager.h b/test/network/emulated_network_manager.h index 92555eee23..fd2bb5b665 100644 --- a/test/network/emulated_network_manager.h +++ b/test/network/emulated_network_manager.h @@ -11,17 +11,17 @@ #ifndef TEST_NETWORK_EMULATED_NETWORK_MANAGER_H_ #define TEST_NETWORK_EMULATED_NETWORK_MANAGER_H_ +#include #include #include +#include "api/sequence_checker.h" #include "api/test/network_emulation_manager.h" #include "api/test/time_controller.h" -#include "rtc_base/critical_section.h" #include "rtc_base/ip_address.h" #include "rtc_base/network.h" #include "rtc_base/socket_server.h" #include "rtc_base/thread.h" -#include "rtc_base/thread_checker.h" #include "test/network/network_emulation.h" namespace webrtc { @@ -50,15 +50,18 @@ class EmulatedNetworkManager : public rtc::NetworkManagerBase, // EmulatedNetworkManagerInterface API rtc::Thread* network_thread() override { return network_thread_.get(); } rtc::NetworkManager* network_manager() override { return this; } - void GetStats( - std::function stats_callback) const override; + std::vector endpoints() const override { + return endpoints_container_->GetEndpoints(); + } + void GetStats(std::function)> + stats_callback) const override; private: void UpdateNetworksOnce(); void MaybeSignalNetworksChanged(); TaskQueueForTest* const task_queue_; - EndpointsContainer* const endpoints_container_; + const EndpointsContainer* const endpoints_container_; std::unique_ptr network_thread_; bool sent_first_update_ RTC_GUARDED_BY(network_thread_); diff --git a/test/network/emulated_turn_server.cc b/test/network/emulated_turn_server.cc new file mode 100644 index 0000000000..d67e4e337a --- /dev/null +++ b/test/network/emulated_turn_server.cc @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/network/emulated_turn_server.h" + +#include +#include + +#include "api/packet_socket_factory.h" +#include "rtc_base/strings/string_builder.h" + +namespace { + +static const char kTestRealm[] = "example.org"; +static const char kTestSoftware[] = "TestTurnServer"; + +// A wrapper class for copying data between an AsyncPacketSocket and a +// EmulatedEndpoint. This is used by the cricket::TurnServer when +// sending data back into the emulated network. +class AsyncPacketSocketWrapper : public rtc::AsyncPacketSocket { + public: + AsyncPacketSocketWrapper(webrtc::test::EmulatedTURNServer* turn_server, + webrtc::EmulatedEndpoint* endpoint, + uint16_t port) + : turn_server_(turn_server), + endpoint_(endpoint), + local_address_( + rtc::SocketAddress(endpoint_->GetPeerLocalAddress(), port)) {} + ~AsyncPacketSocketWrapper() { turn_server_->Unbind(local_address_); } + + rtc::SocketAddress GetLocalAddress() const override { return local_address_; } + rtc::SocketAddress GetRemoteAddress() const override { + return rtc::SocketAddress(); + } + int Send(const void* pv, + size_t cb, + const rtc::PacketOptions& options) override { + RTC_CHECK(false) << "TCP not implemented"; + return -1; + } + int SendTo(const void* pv, + size_t cb, + const rtc::SocketAddress& addr, + const rtc::PacketOptions& options) override { + // Copy from rtc::AsyncPacketSocket to EmulatedEndpoint. + rtc::CopyOnWriteBuffer buf(reinterpret_cast(pv), cb); + endpoint_->SendPacket(local_address_, addr, buf); + return cb; + } + int Close() override { return 0; } + + rtc::AsyncPacketSocket::State GetState() const override { + return rtc::AsyncPacketSocket::STATE_BOUND; + } + int GetOption(rtc::Socket::Option opt, int* value) override { return 0; } + int SetOption(rtc::Socket::Option opt, int value) override { return 0; } + int GetError() const override { return 0; } + void SetError(int error) override {} + + private: + webrtc::test::EmulatedTURNServer* const turn_server_; + webrtc::EmulatedEndpoint* const endpoint_; + const rtc::SocketAddress local_address_; +}; + +// A wrapper class for cricket::TurnServer to allocate sockets. +class PacketSocketFactoryWrapper : public rtc::PacketSocketFactory { + public: + explicit PacketSocketFactoryWrapper( + webrtc::test::EmulatedTURNServer* turn_server) + : turn_server_(turn_server) {} + ~PacketSocketFactoryWrapper() override {} + + // This method is called from TurnServer when making a TURN ALLOCATION. + // It will create a socket on the |peer_| endpoint. + rtc::AsyncPacketSocket* CreateUdpSocket(const rtc::SocketAddress& address, + uint16_t min_port, + uint16_t max_port) override { + return turn_server_->CreatePeerSocket(); + } + + rtc::AsyncPacketSocket* CreateServerTcpSocket( + const rtc::SocketAddress& local_address, + uint16_t min_port, + uint16_t max_port, + int opts) override { + return nullptr; + } + rtc::AsyncPacketSocket* CreateClientTcpSocket( + const rtc::SocketAddress& local_address, + const rtc::SocketAddress& remote_address, + const rtc::ProxyInfo& proxy_info, + const std::string& user_agent, + const rtc::PacketSocketTcpOptions& tcp_options) override { + return nullptr; + } + rtc::AsyncResolverInterface* CreateAsyncResolver() override { + return nullptr; + } + + private: + webrtc::test::EmulatedTURNServer* turn_server_; +}; + +} // namespace + +namespace webrtc { +namespace test { + +EmulatedTURNServer::EmulatedTURNServer(std::unique_ptr thread, + EmulatedEndpoint* client, + EmulatedEndpoint* peer) + : thread_(std::move(thread)), client_(client), peer_(peer) { + ice_config_.username = "keso"; + ice_config_.password = "keso"; + thread_->Invoke(RTC_FROM_HERE, [=]() { + RTC_DCHECK_RUN_ON(thread_.get()); + turn_server_ = std::make_unique(thread_.get()); + turn_server_->set_realm(kTestRealm); + turn_server_->set_realm(kTestSoftware); + turn_server_->set_auth_hook(this); + + auto client_socket = Wrap(client_); + turn_server_->AddInternalSocket(client_socket, cricket::PROTO_UDP); + turn_server_->SetExternalSocketFactory(new PacketSocketFactoryWrapper(this), + rtc::SocketAddress()); + client_address_ = client_socket->GetLocalAddress(); + char buf[256]; + rtc::SimpleStringBuilder str(buf); + str.AppendFormat("turn:%s?transport=udp", + client_address_.ToString().c_str()); + ice_config_.url = str.str(); + }); +} + +void EmulatedTURNServer::Stop() { + thread_->Invoke(RTC_FROM_HERE, [=]() { + RTC_DCHECK_RUN_ON(thread_.get()); + sockets_.clear(); + }); +} + +EmulatedTURNServer::~EmulatedTURNServer() { + thread_->Invoke(RTC_FROM_HERE, [=]() { + RTC_DCHECK_RUN_ON(thread_.get()); + turn_server_.reset(nullptr); + }); +} + +rtc::AsyncPacketSocket* EmulatedTURNServer::Wrap(EmulatedEndpoint* endpoint) { + RTC_DCHECK_RUN_ON(thread_.get()); + auto port = endpoint->BindReceiver(0, this).value(); + auto socket = new AsyncPacketSocketWrapper(this, endpoint, port); + sockets_[rtc::SocketAddress(endpoint->GetPeerLocalAddress(), port)] = socket; + return socket; +} + +void EmulatedTURNServer::OnPacketReceived(webrtc::EmulatedIpPacket packet) { + // Copy from EmulatedEndpoint to rtc::AsyncPacketSocket. + thread_->PostTask(RTC_FROM_HERE, [this, packet(std::move(packet))]() { + RTC_DCHECK_RUN_ON(thread_.get()); + auto it = sockets_.find(packet.to); + if (it != sockets_.end()) { + it->second->SignalReadPacket( + it->second, reinterpret_cast(packet.cdata()), + packet.size(), packet.from, packet.arrival_time.ms()); + } + }); +} + +void EmulatedTURNServer::Unbind(rtc::SocketAddress address) { + RTC_DCHECK_RUN_ON(thread_.get()); + if (GetClientEndpoint()->GetPeerLocalAddress() == address.ipaddr()) { + GetClientEndpoint()->UnbindReceiver(address.port()); + } else { + GetPeerEndpoint()->UnbindReceiver(address.port()); + } + sockets_.erase(address); +} + +} // namespace test +} // namespace webrtc diff --git a/test/network/emulated_turn_server.h b/test/network/emulated_turn_server.h new file mode 100644 index 0000000000..f91124d4d6 --- /dev/null +++ b/test/network/emulated_turn_server.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_NETWORK_EMULATED_TURN_SERVER_H_ +#define TEST_NETWORK_EMULATED_TURN_SERVER_H_ + +#include +#include +#include + +#include "api/test/network_emulation_manager.h" +#include "api/transport/stun.h" +#include "p2p/base/turn_server.h" +#include "rtc_base/async_packet_socket.h" + +namespace webrtc { +namespace test { + +// EmulatedTURNServer wraps cricket::TurnServer to be used inside +// a emulated network. +// +// Packets from EmulatedEndpoint (client or peer) are received in +// EmulatedTURNServer::OnPacketReceived which performs a map lookup +// and delivers them into cricket::TurnServer using +// AsyncPacketSocket::SignalReadPacket +// +// Packets from cricket::TurnServer to EmulatedEndpoint are sent into +// using a wrapper around AsyncPacketSocket (no lookup required as the +// wrapper around AsyncPacketSocket keep a pointer to the EmulatedEndpoint). +class EmulatedTURNServer : public EmulatedTURNServerInterface, + public cricket::TurnAuthInterface, + public webrtc::EmulatedNetworkReceiverInterface { + public: + // Create an EmulatedTURNServer. + // |thread| is a thread that will be used to run cricket::TurnServer + // that expects all calls to be made from a single thread. + EmulatedTURNServer(std::unique_ptr thread, + EmulatedEndpoint* client, + EmulatedEndpoint* peer); + ~EmulatedTURNServer() override; + + IceServerConfig GetIceServerConfig() const override { return ice_config_; } + + EmulatedEndpoint* GetClientEndpoint() const override { return client_; } + + rtc::SocketAddress GetClientEndpointAddress() const override { + return client_address_; + } + + EmulatedEndpoint* GetPeerEndpoint() const override { return peer_; } + + // cricket::TurnAuthInterface + bool GetKey(const std::string& username, + const std::string& realm, + std::string* key) override { + return cricket::ComputeStunCredentialHash(username, realm, username, key); + } + + rtc::AsyncPacketSocket* CreatePeerSocket() { return Wrap(peer_); } + + // This method is called by network emulation when a packet + // comes from an emulated link. + void OnPacketReceived(webrtc::EmulatedIpPacket packet) override; + + // This is called when the TURN server deletes a socket. + void Unbind(rtc::SocketAddress address); + + // Unbind all sockets. + void Stop(); + + private: + std::unique_ptr thread_; + rtc::SocketAddress client_address_; + IceServerConfig ice_config_; + EmulatedEndpoint* const client_; + EmulatedEndpoint* const peer_; + std::unique_ptr turn_server_ RTC_GUARDED_BY(&thread_); + std::map sockets_ + RTC_GUARDED_BY(&thread_); + + // Wraps a EmulatedEndpoint in a AsyncPacketSocket to bridge interaction + // with TurnServer. cricket::TurnServer gets ownership of the socket. + rtc::AsyncPacketSocket* Wrap(EmulatedEndpoint* endpoint); +}; + +} // namespace test +} // namespace webrtc + +#endif // TEST_NETWORK_EMULATED_TURN_SERVER_H_ diff --git a/test/network/fake_network_socket_server.cc b/test/network/fake_network_socket_server.cc index 60dfbe33d5..bf6ef5f12d 100644 --- a/test/network/fake_network_socket_server.cc +++ b/test/network/fake_network_socket_server.cc @@ -16,8 +16,10 @@ #include #include "absl/algorithm/container.h" -#include "rtc_base/async_invoker.h" +#include "api/scoped_refptr.h" #include "rtc_base/logging.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/thread.h" namespace webrtc { @@ -74,7 +76,7 @@ class FakeNetworkSocket : public rtc::AsyncSocket, std::map options_map_ RTC_GUARDED_BY(&thread_); absl::optional pending_ RTC_GUARDED_BY(thread_); - rtc::AsyncInvoker invoker_; + rtc::scoped_refptr alive_; }; FakeNetworkSocket::FakeNetworkSocket(FakeNetworkSocketServer* socket_server, @@ -82,9 +84,13 @@ FakeNetworkSocket::FakeNetworkSocket(FakeNetworkSocketServer* socket_server, : socket_server_(socket_server), thread_(thread), state_(CS_CLOSED), - error_(0) {} + error_(0), + alive_(PendingTaskSafetyFlag::Create()) {} FakeNetworkSocket::~FakeNetworkSocket() { + // Abandon all pending packets. + alive_->SetNotAlive(); + Close(); socket_server_->Unregister(this); } @@ -103,7 +109,7 @@ void FakeNetworkSocket::OnPacketReceived(EmulatedIpPacket packet) { SignalReadEvent(this); RTC_DCHECK(!pending_); }; - invoker_.AsyncInvoke(RTC_FROM_HERE, thread_, std::move(task)); + thread_->PostTask(ToQueuedTask(alive_, std::move(task))); socket_server_->WakeUp(); } @@ -270,17 +276,13 @@ FakeNetworkSocketServer::FakeNetworkSocketServer( wakeup_(/*manual_reset=*/false, /*initially_signaled=*/false) {} FakeNetworkSocketServer::~FakeNetworkSocketServer() = default; -void FakeNetworkSocketServer::OnMessageQueueDestroyed() { - thread_ = nullptr; -} - EmulatedEndpointImpl* FakeNetworkSocketServer::GetEndpointNode( const rtc::IPAddress& ip) { return endpoints_container_->LookupByLocalAddress(ip); } void FakeNetworkSocketServer::Unregister(FakeNetworkSocket* socket) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); sockets_.erase(absl::c_find(sockets_, socket)); } @@ -297,7 +299,7 @@ rtc::AsyncSocket* FakeNetworkSocketServer::CreateAsyncSocket(int family, RTC_DCHECK(thread_) << "must be attached to thread before creating sockets"; FakeNetworkSocket* out = new FakeNetworkSocket(this, thread_); { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); sockets_.push_back(out); } return out; @@ -305,10 +307,6 @@ rtc::AsyncSocket* FakeNetworkSocketServer::CreateAsyncSocket(int family, void FakeNetworkSocketServer::SetMessageQueue(rtc::Thread* thread) { thread_ = thread; - if (thread_) { - thread_->SignalQueueDestroyed.connect( - this, &FakeNetworkSocketServer::OnMessageQueueDestroyed); - } } // Always returns true (if return false, it won't be invoked again...) diff --git a/test/network/fake_network_socket_server.h b/test/network/fake_network_socket_server.h index 3a007588e3..d8be2e24b8 100644 --- a/test/network/fake_network_socket_server.h +++ b/test/network/fake_network_socket_server.h @@ -16,10 +16,9 @@ #include "api/units/timestamp.h" #include "rtc_base/async_socket.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/socket_server.h" -#include "rtc_base/third_party/sigslot/sigslot.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/clock.h" #include "test/network/network_emulation.h" @@ -28,8 +27,7 @@ namespace test { class FakeNetworkSocket; // FakeNetworkSocketServer must outlive any sockets it creates. -class FakeNetworkSocketServer : public rtc::SocketServer, - public sigslot::has_slots<> { +class FakeNetworkSocketServer : public rtc::SocketServer { public: explicit FakeNetworkSocketServer(EndpointsContainer* endpoints_controller); ~FakeNetworkSocketServer() override; @@ -52,13 +50,11 @@ class FakeNetworkSocketServer : public rtc::SocketServer, void Unregister(FakeNetworkSocket* socket); private: - void OnMessageQueueDestroyed(); - const EndpointsContainer* endpoints_container_; rtc::Event wakeup_; rtc::Thread* thread_ = nullptr; - rtc::CriticalSection lock_; + Mutex lock_; std::vector sockets_ RTC_GUARDED_BY(lock_); }; diff --git a/test/network/g3doc/g3doc.lua b/test/network/g3doc/g3doc.lua new file mode 100644 index 0000000000..981393c826 --- /dev/null +++ b/test/network/g3doc/g3doc.lua @@ -0,0 +1,5 @@ +config = super() + +config.freshness.owner = 'titovartem' + +return config diff --git a/test/network/g3doc/index.md b/test/network/g3doc/index.md new file mode 100644 index 0000000000..5d511916c1 --- /dev/null +++ b/test/network/g3doc/index.md @@ -0,0 +1,136 @@ +# Network Emulation Framework + + + +[TOC] + +## Disclamer + +This documentation explain the implementation details of Network Emulation +Framework. Framework's public APIs are located in: + +* [`/api/test/network_emulation_manager.h`](https://source.chromium.org/search?q=%2Fapi%2Ftest%2Fnetwork_emulation_manager.h) +* [`/api/test/create_network_emulation_manager.h`](https://source.chromium.org/search?q=%2Fapi%2Ftest%2Fcreate_network_emulation_manager.h) +* [`/api/test/network_emulation/network_emulation_interfaces.h`](https://source.chromium.org/search?q=%2Fapi%2Ftest%2Fnetwork_emulation%2Fnetwork_emulation_interfaces.h) +* [`/api/test/simulated_network.h`](https://source.chromium.org/search?q=%2Fapi%2Ftest%2Fsimulated_network.h) + +## Overview + +Network Emulation Framework provides an ability to emulate network behavior +between different clients, including a WebRTC PeerConnection client. To +configure network behavior, the user can choose different options: + +* Use predefined implementation that can be configured with parameters such as + packet loss, bandwidth, delay, etc. +* Custom implementation + +Conceptually the framework provides the ability to define multiple endpoints and +routes used to connect them. All network related entities are created and +managed by single factory class `webrtc::NetworkEmulationManager` which is +implemented by `webrtc::test::NetworkEmulationManagerImpl` and can work in two +modes: + +* Real time +* Simulated time + +The manager has a dedicated task queue which pipes all packets through all +network routes from senders to receivers. This task queue behaviour is +determined by `webrtc::TimeController`, which is based on either in real time or +simulated time mode. + +The network operates on IP level and supports only UDP for now. + +## Abstractions + +The framework contains the following public abstractions: + +* `webrtc::NetworkBehaviorInterface` - defines how emulated network should + behave. It operates on packets metadata level and is responsible for telling + which packet at which time have to be delivered to the next receiver. + +* `webrtc::EmulatedIpPacket` - represents a single packet that can be sent or + received via emulated network. It has source and destination address and + payload to transfer. + +* `webrtc::EmulatedNetworkReceiverInterface` - generic packet receiver + interface. + +* `webrtc::EmulatedEndpoint` - primary user facing abstraction of the + framework. It represents a network interface on client's machine. It has its + own unique IP address and can be used to send and receive packets. + + `EmulatedEndpoint` implements `EmulatedNetworkReceiverInterface` to receive + packets from the network and provides an API to send packets to the network + and API to bind other `EmulatedNetworkReceiverInterface` which will be able + to receive packets from the endpoint. `EmulatedEndpoint` interface has the + only implementation: `webrtc::test::EmulatedEndpointImpl`. + +* `webrtc::EmulatedNetworkNode` - represents single network in the real world, + like a 3G network between peers, or Wi-Fi for one peer and LTE for another. + Each `EmulatedNetworkNode` is a single direction connetion and to form + bidirectional connection between endpoints two nodes should be used. + Multiple nodes can be joined into chain emulating a network path from one + peer to another. + + In public API this class is forward declared and fully accessible only by + the framework implementation. + + Internally consist of two parts: `LinkEmulation`, which is responsible for + behavior of current `EmulatedNetworkNode` and `NetworkRouterNode` which is + responsible for routing packets to the next node or to the endpoint. + +* `webrtc::EmulatedRoute` - represents single route from one network interface + on one device to another network interface on another device. + + In public API this class is forward declared and fully accessible only by + the framework implementation. + + It contains start and end endpoint and ordered list of `EmulatedNetworkNode` + which forms the single directional route between those endpoints. + +The framework has also the following private abstractions: + +* `webrtc::test::NetworkRouterNode` - an `EmulatedNetworkReceiverInterface` + that can route incoming packets to the next receiver based on internal IP + routing table. + +* `webrtc::test::LinkEmulation` - an `EmulatedNetworkReceiverInterface` that + can emulate network leg behavior via `webrtc::NetworkBehaviorInterface` + interface. + +For integrating with `webrtc::PeerConnection` there are helper abstractions: + +* `webrtc::EmulatedNetworkManagerInterface` which is implemented by + `webrtc::test::EmulatedNetworkManager` and provides `rtc::Thread` and + `rtc::NetworkManager` for WebRTC to use as network thread for + `PeerConnection` and for `cricket::BasicPortAllocator`. + + Implementation represent framework endpoints as `rtc::Network` to WebRTC. + +## Architecture + +Let's take a look on how framework's abstractions are connected to each other. + +When the user wants to setup emulated network, first of all, they should create +an instance of `NetworkEmulationManager` using +`webrtc::CreateNetworkEmulationManager(...)` API. Then user should use a manager +to create at least one `EmulatedEndpoint` for each client. After endpoints, the +user should create required `EmulatedNetworkNode`s and with help of manager +chain them into `EmulatedRoute`s conecting desired endpoints. + +Here is a visual overview of the emulated network architecture: + +![Architecture](network_emulation_framework.png "Architecture") + +When network is hooked into `PeerConnection` it is done through network thread +and `NetworkManager`. In the network thread the custom `rtc::SocketServer` is +provided: `webrtc::test::FakeNetworkSocketServer`. This custom socket server +will construct custom sockets (`webrtc::test::FakeNetworkSocket`), which +internally bind themselves to the required endpoint. All packets processing +inside socket have to be done on the `PeerConnection`'s network thread. When +packet is going from `PeerConnection` to the network it's already comming from +the network thread and when it's comming from the emulated network switch from +the Network Emulation Framework internal task queue and `PeerConnection`'s +network thread is done inside socket's `OnPacketReceived(...)` method. + +![Network Injection](network_injection_into_peer_connection.png "Network Injection") diff --git a/test/network/g3doc/network_emulation_framework.png b/test/network/g3doc/network_emulation_framework.png new file mode 100644 index 0000000000..afec47773f Binary files /dev/null and b/test/network/g3doc/network_emulation_framework.png differ diff --git a/test/network/g3doc/network_injection_into_peer_connection.png b/test/network/g3doc/network_injection_into_peer_connection.png new file mode 100644 index 0000000000..c9e3bf8baf Binary files /dev/null and b/test/network/g3doc/network_injection_into_peer_connection.png differ diff --git a/test/network/network_emulation.cc b/test/network/network_emulation.cc index 3cb3def2f2..ada9ab542a 100644 --- a/test/network/network_emulation.cc +++ b/test/network/network_emulation.cc @@ -13,13 +13,264 @@ #include #include #include +#include +#include "absl/types/optional.h" +#include "api/numerics/samples_stats_counter.h" +#include "api/test/network_emulation/network_emulation_interfaces.h" #include "api/units/data_size.h" -#include "rtc_base/bind.h" #include "rtc_base/logging.h" namespace webrtc { +DataRate EmulatedNetworkOutgoingStatsImpl::AverageSendRate() const { + RTC_DCHECK_GE(packets_sent_, 2); + RTC_DCHECK(first_packet_sent_time_.IsFinite()); + RTC_DCHECK(last_packet_sent_time_.IsFinite()); + return (bytes_sent_ - first_sent_packet_size_) / + (last_packet_sent_time_ - first_packet_sent_time_); +} + +DataRate EmulatedNetworkIncomingStatsImpl::AverageReceiveRate() const { + RTC_DCHECK_GE(packets_received_, 2); + RTC_DCHECK(first_packet_received_time_.IsFinite()); + RTC_DCHECK(last_packet_received_time_.IsFinite()); + return (bytes_received_ - first_received_packet_size_) / + (last_packet_received_time_ - first_packet_received_time_); +} + +std::map> +EmulatedNetworkStatsImpl::OutgoingStatsPerDestination() const { + std::map> out; + for (const auto& entry : outgoing_stats_per_destination_) { + out.emplace(entry.first, std::make_unique( + *entry.second)); + } + return out; +} + +std::map> +EmulatedNetworkStatsImpl::IncomingStatsPerSource() const { + std::map> out; + for (const auto& entry : incoming_stats_per_source_) { + out.emplace(entry.first, std::make_unique( + *entry.second)); + } + return out; +} + +std::unique_ptr +EmulatedNetworkStatsImpl::GetOverallOutgoingStats() const { + EmulatedNetworkOutgoingStatsBuilder builder; + for (const auto& entry : outgoing_stats_per_destination_) { + builder.AddOutgoingStats(*entry.second); + } + return builder.Build(); +} + +std::unique_ptr +EmulatedNetworkStatsImpl::GetOverallIncomingStats() const { + EmulatedNetworkIncomingStatsBuilder builder; + for (const auto& entry : incoming_stats_per_source_) { + builder.AddIncomingStats(*entry.second); + } + return builder.Build(); +} + +EmulatedNetworkOutgoingStatsBuilder::EmulatedNetworkOutgoingStatsBuilder() { + sequence_checker_.Detach(); +} + +void EmulatedNetworkOutgoingStatsBuilder::OnPacketSent( + Timestamp sent_time, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_CHECK_GE(packet_size, DataSize::Zero()); + if (first_packet_sent_time_.IsInfinite()) { + first_packet_sent_time_ = sent_time; + first_sent_packet_size_ = packet_size; + } + last_packet_sent_time_ = sent_time; + packets_sent_++; + bytes_sent_ += packet_size; + if (mode == EmulatedEndpointConfig::StatsGatheringMode::kDebug) { + sent_packets_size_counter_.AddSample(packet_size.bytes()); + } +} + +void EmulatedNetworkOutgoingStatsBuilder::AddOutgoingStats( + const EmulatedNetworkOutgoingStats& stats) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + packets_sent_ += stats.PacketsSent(); + bytes_sent_ += stats.BytesSent(); + sent_packets_size_counter_.AddSamples(stats.SentPacketsSizeCounter()); + if (first_packet_sent_time_ > stats.FirstPacketSentTime()) { + first_packet_sent_time_ = stats.FirstPacketSentTime(); + first_sent_packet_size_ = stats.FirstSentPacketSize(); + } + if (last_packet_sent_time_ < stats.LastPacketSentTime()) { + last_packet_sent_time_ = stats.LastPacketSentTime(); + } +} + +std::unique_ptr +EmulatedNetworkOutgoingStatsBuilder::Build() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return std::make_unique( + packets_sent_, bytes_sent_, sent_packets_size_counter_, + first_sent_packet_size_, first_packet_sent_time_, last_packet_sent_time_); +} + +EmulatedNetworkIncomingStatsBuilder::EmulatedNetworkIncomingStatsBuilder() { + sequence_checker_.Detach(); +} + +void EmulatedNetworkIncomingStatsBuilder::OnPacketDropped( + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + packets_dropped_++; + bytes_dropped_ += packet_size; + if (mode == EmulatedEndpointConfig::StatsGatheringMode::kDebug) { + dropped_packets_size_counter_.AddSample(packet_size.bytes()); + } +} + +void EmulatedNetworkIncomingStatsBuilder::OnPacketReceived( + Timestamp received_time, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + RTC_CHECK_GE(packet_size, DataSize::Zero()); + if (first_packet_received_time_.IsInfinite()) { + first_packet_received_time_ = received_time; + first_received_packet_size_ = packet_size; + } + last_packet_received_time_ = received_time; + packets_received_++; + bytes_received_ += packet_size; + if (mode == EmulatedEndpointConfig::StatsGatheringMode::kDebug) { + received_packets_size_counter_.AddSample(packet_size.bytes()); + } +} + +void EmulatedNetworkIncomingStatsBuilder::AddIncomingStats( + const EmulatedNetworkIncomingStats& stats) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + packets_received_ += stats.PacketsReceived(); + bytes_received_ += stats.BytesReceived(); + received_packets_size_counter_.AddSamples(stats.ReceivedPacketsSizeCounter()); + packets_dropped_ += stats.PacketsDropped(); + bytes_dropped_ += stats.BytesDropped(); + dropped_packets_size_counter_.AddSamples(stats.DroppedPacketsSizeCounter()); + if (first_packet_received_time_ > stats.FirstPacketReceivedTime()) { + first_packet_received_time_ = stats.FirstPacketReceivedTime(); + first_received_packet_size_ = stats.FirstReceivedPacketSize(); + } + if (last_packet_received_time_ < stats.LastPacketReceivedTime()) { + last_packet_received_time_ = stats.LastPacketReceivedTime(); + } +} + +std::unique_ptr +EmulatedNetworkIncomingStatsBuilder::Build() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return std::make_unique( + packets_received_, bytes_received_, received_packets_size_counter_, + packets_dropped_, bytes_dropped_, dropped_packets_size_counter_, + first_received_packet_size_, first_packet_received_time_, + last_packet_received_time_); +} + +EmulatedNetworkStatsBuilder::EmulatedNetworkStatsBuilder() { + sequence_checker_.Detach(); +} + +EmulatedNetworkStatsBuilder::EmulatedNetworkStatsBuilder( + rtc::IPAddress local_ip) { + local_addresses_.push_back(local_ip); + sequence_checker_.Detach(); +} + +void EmulatedNetworkStatsBuilder::OnPacketSent( + Timestamp queued_time, + Timestamp sent_time, + rtc::IPAddress destination_ip, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + if (mode == EmulatedEndpointConfig::StatsGatheringMode::kDebug) { + sent_packets_queue_wait_time_us_.AddSample((sent_time - queued_time).us()); + } + outgoing_stats_per_destination_[destination_ip].OnPacketSent( + sent_time, packet_size, mode); +} + +void EmulatedNetworkStatsBuilder::OnPacketDropped( + rtc::IPAddress source_ip, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + incoming_stats_per_source_[source_ip].OnPacketDropped(packet_size, mode); +} + +void EmulatedNetworkStatsBuilder::OnPacketReceived( + Timestamp received_time, + rtc::IPAddress source_ip, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + incoming_stats_per_source_[source_ip].OnPacketReceived(received_time, + packet_size, mode); +} + +void EmulatedNetworkStatsBuilder::AddEmulatedNetworkStats( + const EmulatedNetworkStats& stats) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + + // Append IPs from other endpoints stats to the builder. + for (const rtc::IPAddress& addr : stats.LocalAddresses()) { + local_addresses_.push_back(addr); + } + + sent_packets_queue_wait_time_us_.AddSamples( + stats.SentPacketsQueueWaitTimeUs()); + + // Add outgoing stats from other endpoints to the builder. + const std::map> + outgoing_stats_per_destination = stats.OutgoingStatsPerDestination(); + for (const auto& entry : outgoing_stats_per_destination) { + outgoing_stats_per_destination_[entry.first].AddOutgoingStats( + *entry.second); + } + + // Add incoming stats from other endpoints to the builder. + const std::map> + incoming_stats_per_source = stats.IncomingStatsPerSource(); + for (const auto& entry : incoming_stats_per_source) { + incoming_stats_per_source_[entry.first].AddIncomingStats(*entry.second); + } +} + +std::unique_ptr EmulatedNetworkStatsBuilder::Build() + const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + std::map> + outgoing_stats; + for (const auto& entry : outgoing_stats_per_destination_) { + outgoing_stats.emplace(entry.first, entry.second.Build()); + } + std::map> + incoming_stats; + for (const auto& entry : incoming_stats_per_source_) { + incoming_stats.emplace(entry.first, entry.second.Build()); + } + return std::make_unique( + local_addresses_, sent_packets_queue_wait_time_us_, + std::move(outgoing_stats), std::move(incoming_stats)); +} + void LinkEmulation::OnPacketReceived(EmulatedIpPacket packet) { task_queue_->PostTask([this, packet = std::move(packet)]() mutable { RTC_DCHECK_RUN_ON(task_queue_); @@ -97,6 +348,9 @@ void NetworkRouterNode::OnPacketReceived(EmulatedIpPacket packet) { } auto receiver_it = routing_.find(packet.to.ipaddr()); if (receiver_it == routing_.end()) { + if (default_receiver_.has_value()) { + (*default_receiver_)->OnPacketReceived(std::move(packet)); + } return; } RTC_CHECK(receiver_it != routing_.end()); @@ -121,6 +375,23 @@ void NetworkRouterNode::RemoveReceiver(const rtc::IPAddress& dest_ip) { routing_.erase(dest_ip); } +void NetworkRouterNode::SetDefaultReceiver( + EmulatedNetworkReceiverInterface* receiver) { + task_queue_->PostTask([=] { + RTC_DCHECK_RUN_ON(task_queue_); + if (default_receiver_.has_value()) { + RTC_CHECK_EQ(*default_receiver_, receiver) + << "Router already default receiver"; + } + default_receiver_ = receiver; + }); +} + +void NetworkRouterNode::RemoveDefaultReceiver() { + RTC_DCHECK_RUN_ON(task_queue_); + default_receiver_ = absl::nullopt; +} + void NetworkRouterNode::SetWatcher( std::function watcher) { task_queue_->PostTask([=] { @@ -166,69 +437,96 @@ void EmulatedNetworkNode::ClearRoute(const rtc::IPAddress& receiver_ip, EmulatedNetworkNode::~EmulatedNetworkNode() = default; -EmulatedEndpointImpl::EmulatedEndpointImpl(uint64_t id, - const rtc::IPAddress& ip, +EmulatedEndpointImpl::Options::Options(uint64_t id, + const rtc::IPAddress& ip, + const EmulatedEndpointConfig& config) + : id(id), + ip(ip), + stats_gathering_mode(config.stats_gathering_mode), + type(config.type), + allow_send_packet_with_different_source_ip( + config.allow_send_packet_with_different_source_ip), + allow_receive_packets_with_different_dest_ip( + config.allow_receive_packets_with_different_dest_ip), + log_name(ip.ToString() + " (" + config.name.value_or("") + ")") {} + +EmulatedEndpointImpl::EmulatedEndpointImpl(const Options& options, bool is_enabled, - rtc::AdapterType type, rtc::TaskQueue* task_queue, Clock* clock) - : id_(id), - peer_local_addr_(ip), + : options_(options), is_enabled_(is_enabled), - type_(type), clock_(clock), task_queue_(task_queue), router_(task_queue_), - next_port_(kFirstEphemeralPort) { + next_port_(kFirstEphemeralPort), + stats_builder_(options_.ip) { constexpr int kIPv4NetworkPrefixLength = 24; constexpr int kIPv6NetworkPrefixLength = 64; int prefix_length = 0; - if (ip.family() == AF_INET) { + if (options_.ip.family() == AF_INET) { prefix_length = kIPv4NetworkPrefixLength; - } else if (ip.family() == AF_INET6) { + } else if (options_.ip.family() == AF_INET6) { prefix_length = kIPv6NetworkPrefixLength; } - rtc::IPAddress prefix = TruncateIP(ip, prefix_length); + rtc::IPAddress prefix = TruncateIP(options_.ip, prefix_length); network_ = std::make_unique( - ip.ToString(), "Endpoint id=" + std::to_string(id_), prefix, - prefix_length, type_); - network_->AddIP(ip); + options_.ip.ToString(), "Endpoint id=" + std::to_string(options_.id), + prefix, prefix_length, options_.type); + network_->AddIP(options_.ip); enabled_state_checker_.Detach(); + RTC_LOG(INFO) << "Created emulated endpoint " << options_.log_name + << "; id=" << options_.id; } EmulatedEndpointImpl::~EmulatedEndpointImpl() = default; uint64_t EmulatedEndpointImpl::GetId() const { - return id_; + return options_.id; } void EmulatedEndpointImpl::SendPacket(const rtc::SocketAddress& from, const rtc::SocketAddress& to, rtc::CopyOnWriteBuffer packet_data, uint16_t application_overhead) { - RTC_CHECK(from.ipaddr() == peer_local_addr_); + if (!options_.allow_send_packet_with_different_source_ip) { + RTC_CHECK(from.ipaddr() == options_.ip); + } EmulatedIpPacket packet(from, to, std::move(packet_data), clock_->CurrentTime(), application_overhead); task_queue_->PostTask([this, packet = std::move(packet)]() mutable { RTC_DCHECK_RUN_ON(task_queue_); - Timestamp current_time = clock_->CurrentTime(); - if (stats_.first_packet_sent_time.IsInfinite()) { - stats_.first_packet_sent_time = current_time; - stats_.first_sent_packet_size = DataSize::Bytes(packet.ip_packet_size()); + stats_builder_.OnPacketSent(packet.arrival_time, clock_->CurrentTime(), + packet.to.ipaddr(), + DataSize::Bytes(packet.ip_packet_size()), + options_.stats_gathering_mode); + + if (packet.to.ipaddr() == options_.ip) { + OnPacketReceived(std::move(packet)); + } else { + router_.OnPacketReceived(std::move(packet)); } - stats_.last_packet_sent_time = current_time; - stats_.packets_sent++; - stats_.bytes_sent += DataSize::Bytes(packet.ip_packet_size()); - - router_.OnPacketReceived(std::move(packet)); }); } absl::optional EmulatedEndpointImpl::BindReceiver( uint16_t desired_port, EmulatedNetworkReceiverInterface* receiver) { - rtc::CritScope crit(&receiver_lock_); + return BindReceiverInternal(desired_port, receiver, /*is_one_shot=*/false); +} + +absl::optional EmulatedEndpointImpl::BindOneShotReceiver( + uint16_t desired_port, + EmulatedNetworkReceiverInterface* receiver) { + return BindReceiverInternal(desired_port, receiver, /*is_one_shot=*/true); +} + +absl::optional EmulatedEndpointImpl::BindReceiverInternal( + uint16_t desired_port, + EmulatedNetworkReceiverInterface* receiver, + bool is_one_shot) { + MutexLock lock(&receiver_lock_); uint16_t port = desired_port; if (port == 0) { // Because client can specify its own port, next_port_ can be already in @@ -244,15 +542,17 @@ absl::optional EmulatedEndpointImpl::BindReceiver( } } RTC_CHECK(port != 0) << "Can't find free port for receiver in endpoint " - << id_; - bool result = port_to_receiver_.insert({port, receiver}).second; + << options_.log_name << "; id=" << options_.id; + bool result = + port_to_receiver_.insert({port, {receiver, is_one_shot}}).second; if (!result) { RTC_LOG(INFO) << "Can't bind receiver to used port " << desired_port - << " in endpoint " << id_; + << " in endpoint " << options_.log_name + << "; id=" << options_.id; return absl::nullopt; } - RTC_LOG(INFO) << "New receiver is binded to endpoint " << id_ << " on port " - << port; + RTC_LOG(INFO) << "New receiver is binded to endpoint " << options_.log_name + << "; id=" << options_.id << " on port " << port; return port; } @@ -267,37 +567,71 @@ uint16_t EmulatedEndpointImpl::NextPort() { } void EmulatedEndpointImpl::UnbindReceiver(uint16_t port) { - rtc::CritScope crit(&receiver_lock_); + MutexLock lock(&receiver_lock_); + RTC_LOG(INFO) << "Receiver is removed on port " << port << " from endpoint " + << options_.log_name << "; id=" << options_.id; port_to_receiver_.erase(port); } +void EmulatedEndpointImpl::BindDefaultReceiver( + EmulatedNetworkReceiverInterface* receiver) { + MutexLock lock(&receiver_lock_); + RTC_CHECK(!default_receiver_.has_value()) + << "Endpoint " << options_.log_name << "; id=" << options_.id + << " already has default receiver"; + RTC_LOG(INFO) << "Default receiver is binded to endpoint " + << options_.log_name << "; id=" << options_.id; + default_receiver_ = receiver; +} + +void EmulatedEndpointImpl::UnbindDefaultReceiver() { + MutexLock lock(&receiver_lock_); + RTC_LOG(INFO) << "Default receiver is removed from endpoint " + << options_.log_name << "; id=" << options_.id; + default_receiver_ = absl::nullopt; +} + rtc::IPAddress EmulatedEndpointImpl::GetPeerLocalAddress() const { - return peer_local_addr_; + return options_.ip; } void EmulatedEndpointImpl::OnPacketReceived(EmulatedIpPacket packet) { RTC_DCHECK_RUN_ON(task_queue_); - RTC_CHECK(packet.to.ipaddr() == peer_local_addr_) - << "Routing error: wrong destination endpoint. Packet.to.ipaddr()=: " - << packet.to.ipaddr().ToString() - << "; Receiver peer_local_addr_=" << peer_local_addr_.ToString(); - rtc::CritScope crit(&receiver_lock_); - UpdateReceiveStats(packet); + if (!options_.allow_receive_packets_with_different_dest_ip) { + RTC_CHECK(packet.to.ipaddr() == options_.ip) + << "Routing error: wrong destination endpoint. Packet.to.ipaddr()=: " + << packet.to.ipaddr().ToString() + << "; Receiver options_.ip=" << options_.ip.ToString(); + } + MutexLock lock(&receiver_lock_); + stats_builder_.OnPacketReceived(clock_->CurrentTime(), packet.from.ipaddr(), + DataSize::Bytes(packet.ip_packet_size()), + options_.stats_gathering_mode); auto it = port_to_receiver_.find(packet.to.port()); if (it == port_to_receiver_.end()) { + if (default_receiver_.has_value()) { + (*default_receiver_)->OnPacketReceived(std::move(packet)); + return; + } // It can happen, that remote peer closed connection, but there still some // packets, that are going to it. It can happen during peer connection close // process: one peer closed connection, second still sending data. - RTC_LOG(INFO) << "Drop packet: no receiver registered in " << id_ - << " on port " << packet.to.port(); - stats_.packets_dropped++; - stats_.bytes_dropped += DataSize::Bytes(packet.ip_packet_size()); + RTC_LOG(INFO) << "Drop packet: no receiver registered in " + << options_.log_name << "; id=" << options_.id << " on port " + << packet.to.port(); + stats_builder_.OnPacketDropped(packet.from.ipaddr(), + DataSize::Bytes(packet.ip_packet_size()), + options_.stats_gathering_mode); return; } - // Endpoint assumes frequent calls to bind and unbind methods, so it holds - // lock during packet processing to ensure that receiver won't be deleted - // before call to OnPacketReceived. - it->second->OnPacketReceived(std::move(packet)); + // Endpoint holds lock during packet processing to ensure that a call to + // UnbindReceiver followed by a delete of the receiver cannot race with this + // call to OnPacketReceived. + it->second.receiver->OnPacketReceived(std::move(packet)); + + if (it->second.is_one_shot) { + port_to_receiver_.erase(it); + } } void EmulatedEndpointImpl::Enable() { @@ -317,22 +651,9 @@ bool EmulatedEndpointImpl::Enabled() const { return is_enabled_; } -EmulatedNetworkStats EmulatedEndpointImpl::stats() { +std::unique_ptr EmulatedEndpointImpl::stats() const { RTC_DCHECK_RUN_ON(task_queue_); - return stats_; -} - -void EmulatedEndpointImpl::UpdateReceiveStats(const EmulatedIpPacket& packet) { - RTC_DCHECK_RUN_ON(task_queue_); - Timestamp current_time = clock_->CurrentTime(); - if (stats_.first_packet_received_time.IsInfinite()) { - stats_.first_packet_received_time = current_time; - stats_.first_received_packet_size = - DataSize::Bytes(packet.ip_packet_size()); - } - stats_.last_packet_received_time = current_time; - stats_.packets_received++; - stats_.bytes_received += DataSize::Bytes(packet.ip_packet_size()); + return stats_builder_.Build(); } EndpointsContainer::EndpointsContainer( @@ -371,39 +692,16 @@ EndpointsContainer::GetEnabledNetworks() const { return networks; } -EmulatedNetworkStats EndpointsContainer::GetStats() const { - EmulatedNetworkStats stats; +std::vector EndpointsContainer::GetEndpoints() const { + return std::vector(endpoints_.begin(), endpoints_.end()); +} + +std::unique_ptr EndpointsContainer::GetStats() const { + EmulatedNetworkStatsBuilder stats_builder; for (auto* endpoint : endpoints_) { - EmulatedNetworkStats endpoint_stats = endpoint->stats(); - stats.packets_sent += endpoint_stats.packets_sent; - stats.bytes_sent += endpoint_stats.bytes_sent; - stats.packets_received += endpoint_stats.packets_received; - stats.bytes_received += endpoint_stats.bytes_received; - stats.packets_dropped += endpoint_stats.packets_dropped; - stats.bytes_dropped += endpoint_stats.bytes_dropped; - if (stats.first_packet_received_time > - endpoint_stats.first_packet_received_time) { - stats.first_packet_received_time = - endpoint_stats.first_packet_received_time; - stats.first_received_packet_size = - endpoint_stats.first_received_packet_size; - } - if (stats.first_packet_sent_time > endpoint_stats.first_packet_sent_time) { - stats.first_packet_sent_time = endpoint_stats.first_packet_sent_time; - stats.first_sent_packet_size = endpoint_stats.first_sent_packet_size; - } - if (stats.last_packet_received_time.IsInfinite() || - stats.last_packet_received_time < - endpoint_stats.last_packet_received_time) { - stats.last_packet_received_time = - endpoint_stats.last_packet_received_time; - } - if (stats.last_packet_sent_time.IsInfinite() || - stats.last_packet_sent_time < endpoint_stats.last_packet_sent_time) { - stats.last_packet_sent_time = endpoint_stats.last_packet_sent_time; - } + stats_builder.AddEmulatedNetworkStats(*endpoint->stats()); } - return stats; + return stats_builder.Build(); } } // namespace webrtc diff --git a/test/network/network_emulation.h b/test/network/network_emulation.h index 75e9c2c78a..f700beffcd 100644 --- a/test/network/network_emulation.h +++ b/test/network/network_emulation.h @@ -20,6 +20,9 @@ #include #include "absl/types/optional.h" +#include "api/array_view.h" +#include "api/numerics/samples_stats_counter.h" +#include "api/sequence_checker.h" #include "api/test/network_emulation_manager.h" #include "api/test/simulated_network.h" #include "api/units/timestamp.h" @@ -27,13 +30,364 @@ #include "rtc_base/network.h" #include "rtc_base/network_constants.h" #include "rtc_base/socket_address.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_utils/repeating_task.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" namespace webrtc { +// This class is immutable and so thread safe. +class EmulatedNetworkOutgoingStatsImpl final + : public EmulatedNetworkOutgoingStats { + public: + EmulatedNetworkOutgoingStatsImpl( + int64_t packets_sent, + DataSize bytes_sent, + SamplesStatsCounter sent_packets_size_counter, + DataSize first_sent_packet_size, + Timestamp first_packet_sent_time, + Timestamp last_packet_sent_time) + : packets_sent_(packets_sent), + bytes_sent_(bytes_sent), + sent_packets_size_counter_(std::move(sent_packets_size_counter)), + first_sent_packet_size_(first_sent_packet_size), + first_packet_sent_time_(first_packet_sent_time), + last_packet_sent_time_(last_packet_sent_time) {} + explicit EmulatedNetworkOutgoingStatsImpl( + const EmulatedNetworkOutgoingStats& stats) + : packets_sent_(stats.PacketsSent()), + bytes_sent_(stats.BytesSent()), + sent_packets_size_counter_(stats.SentPacketsSizeCounter()), + first_sent_packet_size_(stats.FirstSentPacketSize()), + first_packet_sent_time_(stats.FirstPacketSentTime()), + last_packet_sent_time_(stats.LastPacketSentTime()) {} + ~EmulatedNetworkOutgoingStatsImpl() override = default; + + int64_t PacketsSent() const override { return packets_sent_; } + + DataSize BytesSent() const override { return bytes_sent_; } + + const SamplesStatsCounter& SentPacketsSizeCounter() const override { + return sent_packets_size_counter_; + } + + DataSize FirstSentPacketSize() const override { + return first_sent_packet_size_; + } + + Timestamp FirstPacketSentTime() const override { + return first_packet_sent_time_; + } + + Timestamp LastPacketSentTime() const override { + return last_packet_sent_time_; + } + + DataRate AverageSendRate() const override; + + private: + const int64_t packets_sent_; + const DataSize bytes_sent_; + const SamplesStatsCounter sent_packets_size_counter_; + const DataSize first_sent_packet_size_; + const Timestamp first_packet_sent_time_; + const Timestamp last_packet_sent_time_; +}; + +// This class is immutable and so thread safe. +class EmulatedNetworkIncomingStatsImpl final + : public EmulatedNetworkIncomingStats { + public: + EmulatedNetworkIncomingStatsImpl( + int64_t packets_received, + DataSize bytes_received, + SamplesStatsCounter received_packets_size_counter, + int64_t packets_dropped, + DataSize bytes_dropped, + SamplesStatsCounter dropped_packets_size_counter, + DataSize first_received_packet_size, + Timestamp first_packet_received_time, + Timestamp last_packet_received_time) + : packets_received_(packets_received), + bytes_received_(bytes_received), + received_packets_size_counter_(received_packets_size_counter), + packets_dropped_(packets_dropped), + bytes_dropped_(bytes_dropped), + dropped_packets_size_counter_(dropped_packets_size_counter), + first_received_packet_size_(first_received_packet_size), + first_packet_received_time_(first_packet_received_time), + last_packet_received_time_(last_packet_received_time) {} + explicit EmulatedNetworkIncomingStatsImpl( + const EmulatedNetworkIncomingStats& stats) + : packets_received_(stats.PacketsReceived()), + bytes_received_(stats.BytesReceived()), + received_packets_size_counter_(stats.ReceivedPacketsSizeCounter()), + packets_dropped_(stats.PacketsDropped()), + bytes_dropped_(stats.BytesDropped()), + dropped_packets_size_counter_(stats.DroppedPacketsSizeCounter()), + first_received_packet_size_(stats.FirstReceivedPacketSize()), + first_packet_received_time_(stats.FirstPacketReceivedTime()), + last_packet_received_time_(stats.LastPacketReceivedTime()) {} + ~EmulatedNetworkIncomingStatsImpl() override = default; + + int64_t PacketsReceived() const override { return packets_received_; } + + DataSize BytesReceived() const override { return bytes_received_; } + + const SamplesStatsCounter& ReceivedPacketsSizeCounter() const override { + return received_packets_size_counter_; + } + + int64_t PacketsDropped() const override { return packets_dropped_; } + + DataSize BytesDropped() const override { return bytes_dropped_; } + + const SamplesStatsCounter& DroppedPacketsSizeCounter() const override { + return dropped_packets_size_counter_; + } + + DataSize FirstReceivedPacketSize() const override { + return first_received_packet_size_; + } + + Timestamp FirstPacketReceivedTime() const override { + return first_packet_received_time_; + } + + Timestamp LastPacketReceivedTime() const override { + return last_packet_received_time_; + } + + DataRate AverageReceiveRate() const override; + + private: + const int64_t packets_received_; + const DataSize bytes_received_; + const SamplesStatsCounter received_packets_size_counter_; + const int64_t packets_dropped_; + const DataSize bytes_dropped_; + const SamplesStatsCounter dropped_packets_size_counter_; + const DataSize first_received_packet_size_; + const Timestamp first_packet_received_time_; + const Timestamp last_packet_received_time_; +}; + +// This class is immutable and so is thread safe. +class EmulatedNetworkStatsImpl final : public EmulatedNetworkStats { + public: + EmulatedNetworkStatsImpl( + std::vector local_addresses, + SamplesStatsCounter sent_packets_queue_wait_time_us, + std::map> + outgoing_stats_per_destination, + std::map> + incoming_stats_per_source) + : local_addresses_(std::move(local_addresses)), + sent_packets_queue_wait_time_us_(sent_packets_queue_wait_time_us), + outgoing_stats_per_destination_( + std::move(outgoing_stats_per_destination)), + incoming_stats_per_source_(std::move(incoming_stats_per_source)), + overall_outgoing_stats_(GetOverallOutgoingStats()), + overall_incoming_stats_(GetOverallIncomingStats()) {} + ~EmulatedNetworkStatsImpl() override = default; + + std::vector LocalAddresses() const override { + return local_addresses_; + } + + int64_t PacketsSent() const override { + return overall_outgoing_stats_->PacketsSent(); + } + + DataSize BytesSent() const override { + return overall_outgoing_stats_->BytesSent(); + } + + const SamplesStatsCounter& SentPacketsSizeCounter() const override { + return overall_outgoing_stats_->SentPacketsSizeCounter(); + } + + const SamplesStatsCounter& SentPacketsQueueWaitTimeUs() const override { + return sent_packets_queue_wait_time_us_; + } + + DataSize FirstSentPacketSize() const override { + return overall_outgoing_stats_->FirstSentPacketSize(); + } + + Timestamp FirstPacketSentTime() const override { + return overall_outgoing_stats_->FirstPacketSentTime(); + } + + Timestamp LastPacketSentTime() const override { + return overall_outgoing_stats_->LastPacketSentTime(); + } + + DataRate AverageSendRate() const override { + return overall_outgoing_stats_->AverageSendRate(); + } + + int64_t PacketsReceived() const override { + return overall_incoming_stats_->PacketsReceived(); + } + + DataSize BytesReceived() const override { + return overall_incoming_stats_->BytesReceived(); + } + + const SamplesStatsCounter& ReceivedPacketsSizeCounter() const override { + return overall_incoming_stats_->ReceivedPacketsSizeCounter(); + } + + int64_t PacketsDropped() const override { + return overall_incoming_stats_->PacketsDropped(); + } + + DataSize BytesDropped() const override { + return overall_incoming_stats_->BytesDropped(); + } + + const SamplesStatsCounter& DroppedPacketsSizeCounter() const override { + return overall_incoming_stats_->DroppedPacketsSizeCounter(); + } + + DataSize FirstReceivedPacketSize() const override { + return overall_incoming_stats_->FirstReceivedPacketSize(); + } + + Timestamp FirstPacketReceivedTime() const override { + return overall_incoming_stats_->FirstPacketReceivedTime(); + } + + Timestamp LastPacketReceivedTime() const override { + return overall_incoming_stats_->LastPacketReceivedTime(); + } + + DataRate AverageReceiveRate() const override { + return overall_incoming_stats_->AverageReceiveRate(); + } + + std::map> + OutgoingStatsPerDestination() const override; + + std::map> + IncomingStatsPerSource() const override; + + private: + std::unique_ptr GetOverallOutgoingStats() const; + std::unique_ptr GetOverallIncomingStats() const; + + const std::vector local_addresses_; + const SamplesStatsCounter sent_packets_queue_wait_time_us_; + const std::map> + outgoing_stats_per_destination_; + const std::map> + incoming_stats_per_source_; + const std::unique_ptr overall_outgoing_stats_; + const std::unique_ptr overall_incoming_stats_; +}; + +class EmulatedNetworkOutgoingStatsBuilder { + public: + EmulatedNetworkOutgoingStatsBuilder(); + + void OnPacketSent(Timestamp sent_time, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode); + + void AddOutgoingStats(const EmulatedNetworkOutgoingStats& stats); + + std::unique_ptr Build() const; + + private: + SequenceChecker sequence_checker_; + + int64_t packets_sent_ RTC_GUARDED_BY(sequence_checker_) = 0; + DataSize bytes_sent_ RTC_GUARDED_BY(sequence_checker_) = DataSize::Zero(); + SamplesStatsCounter sent_packets_size_counter_ + RTC_GUARDED_BY(sequence_checker_); + DataSize first_sent_packet_size_ RTC_GUARDED_BY(sequence_checker_) = + DataSize::Zero(); + Timestamp first_packet_sent_time_ RTC_GUARDED_BY(sequence_checker_) = + Timestamp::PlusInfinity(); + Timestamp last_packet_sent_time_ RTC_GUARDED_BY(sequence_checker_) = + Timestamp::MinusInfinity(); +}; + +class EmulatedNetworkIncomingStatsBuilder { + public: + EmulatedNetworkIncomingStatsBuilder(); + + void OnPacketDropped(DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode); + + void OnPacketReceived(Timestamp received_time, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode); + + // Adds stats collected from another endpoints to the builder. + void AddIncomingStats(const EmulatedNetworkIncomingStats& stats); + + std::unique_ptr Build() const; + + private: + SequenceChecker sequence_checker_; + + int64_t packets_received_ RTC_GUARDED_BY(sequence_checker_) = 0; + DataSize bytes_received_ RTC_GUARDED_BY(sequence_checker_) = DataSize::Zero(); + SamplesStatsCounter received_packets_size_counter_ + RTC_GUARDED_BY(sequence_checker_); + int64_t packets_dropped_ RTC_GUARDED_BY(sequence_checker_) = 0; + DataSize bytes_dropped_ RTC_GUARDED_BY(sequence_checker_) = DataSize::Zero(); + SamplesStatsCounter dropped_packets_size_counter_ + RTC_GUARDED_BY(sequence_checker_); + DataSize first_received_packet_size_ RTC_GUARDED_BY(sequence_checker_) = + DataSize::Zero(); + Timestamp first_packet_received_time_ RTC_GUARDED_BY(sequence_checker_) = + Timestamp::PlusInfinity(); + Timestamp last_packet_received_time_ RTC_GUARDED_BY(sequence_checker_) = + Timestamp::MinusInfinity(); +}; + +// All methods of EmulatedNetworkStatsBuilder have to be used on a single +// thread. It may be created on another thread. +class EmulatedNetworkStatsBuilder { + public: + EmulatedNetworkStatsBuilder(); + explicit EmulatedNetworkStatsBuilder(rtc::IPAddress local_ip); + + void OnPacketSent(Timestamp queued_time, + Timestamp sent_time, + rtc::IPAddress destination_ip, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode); + + void OnPacketDropped(rtc::IPAddress source_ip, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode); + + void OnPacketReceived(Timestamp received_time, + rtc::IPAddress source_ip, + DataSize packet_size, + EmulatedEndpointConfig::StatsGatheringMode mode); + + void AddEmulatedNetworkStats(const EmulatedNetworkStats& stats); + + std::unique_ptr Build() const; + + private: + SequenceChecker sequence_checker_; + + std::vector local_addresses_ + RTC_GUARDED_BY(sequence_checker_); + SamplesStatsCounter sent_packets_queue_wait_time_us_; + std::map + outgoing_stats_per_destination_ RTC_GUARDED_BY(sequence_checker_); + std::map + incoming_stats_per_source_ RTC_GUARDED_BY(sequence_checker_); +}; class LinkEmulation : public EmulatedNetworkReceiverInterface { public: @@ -65,6 +419,10 @@ class LinkEmulation : public EmulatedNetworkReceiverInterface { uint64_t next_packet_id_ RTC_GUARDED_BY(task_queue_) = 1; }; +// Represents a component responsible for routing packets based on their IP +// address. All possible routes have to be set explicitly before packet for +// desired destination will be seen for the first time. If route is unknown +// the packet will be silently dropped. class NetworkRouterNode : public EmulatedNetworkReceiverInterface { public: explicit NetworkRouterNode(rtc::TaskQueue* task_queue); @@ -73,11 +431,17 @@ class NetworkRouterNode : public EmulatedNetworkReceiverInterface { void SetReceiver(const rtc::IPAddress& dest_ip, EmulatedNetworkReceiverInterface* receiver); void RemoveReceiver(const rtc::IPAddress& dest_ip); + // Sets a default receive that will be used for all incoming packets for which + // there is no specific receiver binded to their destination port. + void SetDefaultReceiver(EmulatedNetworkReceiverInterface* receiver); + void RemoveDefaultReceiver(); void SetWatcher(std::function watcher); void SetFilter(std::function filter); private: rtc::TaskQueue* const task_queue_; + absl::optional default_receiver_ + RTC_GUARDED_BY(task_queue_); std::map routing_ RTC_GUARDED_BY(task_queue_); std::function watcher_ @@ -128,10 +492,31 @@ class EmulatedNetworkNode : public EmulatedNetworkReceiverInterface { // from other EmulatedNetworkNodes. class EmulatedEndpointImpl : public EmulatedEndpoint { public: - EmulatedEndpointImpl(uint64_t id, - const rtc::IPAddress& ip, + struct Options { + Options(uint64_t id, + const rtc::IPAddress& ip, + const EmulatedEndpointConfig& config); + + // TODO(titovartem) check if we can remove id. + uint64_t id; + // Endpoint local IP address. + rtc::IPAddress ip; + EmulatedEndpointConfig::StatsGatheringMode stats_gathering_mode; + rtc::AdapterType type; + // Allow endpoint to send packets specifying source IP address different to + // the current endpoint IP address. If false endpoint will crash if attempt + // to send such packet will be done. + bool allow_send_packet_with_different_source_ip; + // Allow endpoint to receive packet with destination IP address different to + // the current endpoint IP address. If false endpoint will crash if such + // packet will arrive. + bool allow_receive_packets_with_different_dest_ip; + // Name of the endpoint used for logging purposes. + std::string log_name; + }; + + EmulatedEndpointImpl(const Options& options, bool is_enabled, - rtc::AdapterType type, rtc::TaskQueue* task_queue, Clock* clock); ~EmulatedEndpointImpl() override; @@ -148,7 +533,14 @@ class EmulatedEndpointImpl : public EmulatedEndpoint { absl::optional BindReceiver( uint16_t desired_port, EmulatedNetworkReceiverInterface* receiver) override; + // Binds a receiver, and automatically removes the binding after first call to + // OnPacketReceived. + absl::optional BindOneShotReceiver( + uint16_t desired_port, + EmulatedNetworkReceiverInterface* receiver); void UnbindReceiver(uint16_t port) override; + void BindDefaultReceiver(EmulatedNetworkReceiverInterface* receiver) override; + void UnbindDefaultReceiver() override; rtc::IPAddress GetPeerLocalAddress() const override; @@ -161,45 +553,61 @@ class EmulatedEndpointImpl : public EmulatedEndpoint { const rtc::Network& network() const { return *network_.get(); } - EmulatedNetworkStats stats() override; + std::unique_ptr stats() const; private: + struct ReceiverBinding { + EmulatedNetworkReceiverInterface* receiver; + bool is_one_shot; + }; + + absl::optional BindReceiverInternal( + uint16_t desired_port, + EmulatedNetworkReceiverInterface* receiver, + bool is_one_shot); + static constexpr uint16_t kFirstEphemeralPort = 49152; uint16_t NextPort() RTC_EXCLUSIVE_LOCKS_REQUIRED(receiver_lock_); - void UpdateReceiveStats(const EmulatedIpPacket& packet); - rtc::CriticalSection receiver_lock_; - rtc::ThreadChecker enabled_state_checker_; + Mutex receiver_lock_; + SequenceChecker enabled_state_checker_; - uint64_t id_; - // Peer's local IP address for this endpoint network interface. - const rtc::IPAddress peer_local_addr_; + const Options options_; bool is_enabled_ RTC_GUARDED_BY(enabled_state_checker_); - const rtc::AdapterType type_; Clock* const clock_; rtc::TaskQueue* const task_queue_; std::unique_ptr network_; NetworkRouterNode router_; uint16_t next_port_ RTC_GUARDED_BY(receiver_lock_); - std::map port_to_receiver_ + absl::optional default_receiver_ + RTC_GUARDED_BY(receiver_lock_); + std::map port_to_receiver_ RTC_GUARDED_BY(receiver_lock_); - EmulatedNetworkStats stats_ RTC_GUARDED_BY(task_queue_); + EmulatedNetworkStatsBuilder stats_builder_ RTC_GUARDED_BY(task_queue_); }; class EmulatedRoute { public: EmulatedRoute(EmulatedEndpointImpl* from, std::vector via_nodes, - EmulatedEndpointImpl* to) - : from(from), via_nodes(std::move(via_nodes)), to(to), active(true) {} + EmulatedEndpointImpl* to, + bool is_default) + : from(from), + via_nodes(std::move(via_nodes)), + to(to), + active(true), + is_default(is_default) {} EmulatedEndpointImpl* from; std::vector via_nodes; EmulatedEndpointImpl* to; bool active; + bool is_default; }; + +// This object is immutable and so thread safe. class EndpointsContainer { public: explicit EndpointsContainer( @@ -211,7 +619,8 @@ class EndpointsContainer { // Returns list of networks for enabled endpoints. Caller takes ownership of // returned rtc::Network objects. std::vector> GetEnabledNetworks() const; - EmulatedNetworkStats GetStats() const; + std::vector GetEndpoints() const; + std::unique_ptr GetStats() const; private: const std::vector endpoints_; @@ -234,12 +643,12 @@ class FakePacketRoute : public EmulatedNetworkReceiverInterface { RTC_CHECK_GE(size, sizeof(int)); sent_.emplace(next_packet_id_, packet); rtc::CopyOnWriteBuffer buf(size); - reinterpret_cast(buf.data())[0] = next_packet_id_++; + reinterpret_cast(buf.MutableData())[0] = next_packet_id_++; route_->from->SendPacket(send_addr_, recv_addr_, buf); } void OnPacketReceived(EmulatedIpPacket packet) override { - int packet_id = reinterpret_cast(packet.data.data())[0]; + int packet_id = reinterpret_cast(packet.data.data())[0]; action_(std::move(sent_[packet_id]), packet.arrival_time); sent_.erase(packet_id); } diff --git a/test/network/network_emulation_manager.cc b/test/network/network_emulation_manager.cc index adc4bb646c..2c96191200 100644 --- a/test/network/network_emulation_manager.cc +++ b/test/network/network_emulation_manager.cc @@ -17,6 +17,8 @@ #include "api/units/timestamp.h" #include "call/simulated_network.h" #include "rtc_base/fake_network.h" +#include "test/network/emulated_turn_server.h" +#include "test/network/traffic_route.h" #include "test/time_controller/real_time_controller.h" #include "test/time_controller/simulated_time_controller.h" @@ -44,7 +46,8 @@ std::unique_ptr CreateTimeController(TimeMode mode) { } // namespace NetworkEmulationManagerImpl::NetworkEmulationManagerImpl(TimeMode mode) - : time_controller_(CreateTimeController(mode)), + : time_mode_(mode), + time_controller_(CreateTimeController(mode)), clock_(time_controller_->GetClock()), next_node_id_(1), next_ip4_address_(kMinIPv4Address), @@ -55,11 +58,17 @@ NetworkEmulationManagerImpl::NetworkEmulationManagerImpl(TimeMode mode) // TODO(srte): Ensure that any pending task that must be run for consistency // (such as stats collection tasks) are not cancelled when the task queue is // destroyed. -NetworkEmulationManagerImpl::~NetworkEmulationManagerImpl() = default; +NetworkEmulationManagerImpl::~NetworkEmulationManagerImpl() { + for (auto& turn_server : turn_servers_) { + turn_server->Stop(); + } +} EmulatedNetworkNode* NetworkEmulationManagerImpl::CreateEmulatedNode( - BuiltInNetworkBehaviorConfig config) { - return CreateEmulatedNode(std::make_unique(config)); + BuiltInNetworkBehaviorConfig config, + uint64_t random_seed) { + return CreateEmulatedNode( + std::make_unique(config, random_seed)); } EmulatedNetworkNode* NetworkEmulationManagerImpl::CreateEmulatedNode( @@ -78,7 +87,7 @@ NetworkEmulationManagerImpl::NodeBuilder() { return SimulatedNetworkNode::Builder(this); } -EmulatedEndpoint* NetworkEmulationManagerImpl::CreateEndpoint( +EmulatedEndpointImpl* NetworkEmulationManagerImpl::CreateEndpoint( EmulatedEndpointConfig config) { absl::optional ip = config.ip; if (!ip) { @@ -98,9 +107,9 @@ EmulatedEndpoint* NetworkEmulationManagerImpl::CreateEndpoint( bool res = used_ip_addresses_.insert(*ip).second; RTC_CHECK(res) << "IP=" << ip->ToString() << " already in use"; auto node = std::make_unique( - next_node_id_++, *ip, config.start_as_enabled, config.type, &task_queue_, - clock_); - EmulatedEndpoint* out = node.get(); + EmulatedEndpointImpl::Options(next_node_id_++, *ip, config), + config.start_as_enabled, &task_queue_, clock_); + EmulatedEndpointImpl* out = node.get(); endpoints_.push_back(std::move(node)); return out; } @@ -139,7 +148,7 @@ EmulatedRoute* NetworkEmulationManagerImpl::CreateRoute( std::unique_ptr route = std::make_unique( static_cast(from), std::move(via_nodes), - static_cast(to)); + static_cast(to), /*is_default=*/false); EmulatedRoute* out = route.get(); routes_.push_back(std::move(route)); return out; @@ -152,26 +161,72 @@ EmulatedRoute* NetworkEmulationManagerImpl::CreateRoute( return CreateRoute(from, via_nodes, to); } +EmulatedRoute* NetworkEmulationManagerImpl::CreateDefaultRoute( + EmulatedEndpoint* from, + const std::vector& via_nodes, + EmulatedEndpoint* to) { + // Because endpoint has no send node by default at least one should be + // provided here. + RTC_CHECK(!via_nodes.empty()); + + static_cast(from)->router()->SetDefaultReceiver( + via_nodes[0]); + EmulatedNetworkNode* cur_node = via_nodes[0]; + for (size_t i = 1; i < via_nodes.size(); ++i) { + cur_node->router()->SetDefaultReceiver(via_nodes[i]); + cur_node = via_nodes[i]; + } + cur_node->router()->SetDefaultReceiver(to); + + std::unique_ptr route = std::make_unique( + static_cast(from), std::move(via_nodes), + static_cast(to), /*is_default=*/true); + EmulatedRoute* out = route.get(); + routes_.push_back(std::move(route)); + return out; +} + void NetworkEmulationManagerImpl::ClearRoute(EmulatedRoute* route) { RTC_CHECK(route->active) << "Route already cleared"; task_queue_.SendTask( [route]() { // Remove receiver from intermediate nodes. for (auto* node : route->via_nodes) { - node->router()->RemoveReceiver(route->to->GetPeerLocalAddress()); + if (route->is_default) { + node->router()->RemoveDefaultReceiver(); + } else { + node->router()->RemoveReceiver(route->to->GetPeerLocalAddress()); + } } // Remove destination endpoint from source endpoint's router. - route->from->router()->RemoveReceiver(route->to->GetPeerLocalAddress()); + if (route->is_default) { + route->from->router()->RemoveDefaultReceiver(); + } else { + route->from->router()->RemoveReceiver( + route->to->GetPeerLocalAddress()); + } route->active = false; }, RTC_FROM_HERE); } -TrafficRoute* NetworkEmulationManagerImpl::CreateTrafficRoute( +TcpMessageRoute* NetworkEmulationManagerImpl::CreateTcpRoute( + EmulatedRoute* send_route, + EmulatedRoute* ret_route) { + auto tcp_route = std::make_unique( + clock_, task_queue_.Get(), send_route, ret_route); + auto* route_ptr = tcp_route.get(); + task_queue_.PostTask([this, tcp_route = std::move(tcp_route)]() mutable { + tcp_message_routes_.push_back(std::move(tcp_route)); + }); + return route_ptr; +} + +CrossTrafficRoute* NetworkEmulationManagerImpl::CreateCrossTrafficRoute( const std::vector& via_nodes) { RTC_CHECK(!via_nodes.empty()); - EmulatedEndpoint* endpoint = CreateEndpoint(EmulatedEndpointConfig()); + EmulatedEndpointImpl* endpoint = CreateEndpoint(EmulatedEndpointConfig()); // Setup a route via specified nodes. EmulatedNetworkNode* cur_node = via_nodes[0]; @@ -182,88 +237,40 @@ TrafficRoute* NetworkEmulationManagerImpl::CreateTrafficRoute( } cur_node->router()->SetReceiver(endpoint->GetPeerLocalAddress(), endpoint); - std::unique_ptr traffic_route = - std::make_unique(clock_, via_nodes[0], endpoint); - TrafficRoute* out = traffic_route.get(); + std::unique_ptr traffic_route = + std::make_unique(clock_, via_nodes[0], endpoint); + CrossTrafficRoute* out = traffic_route.get(); traffic_routes_.push_back(std::move(traffic_route)); return out; } -RandomWalkCrossTraffic* -NetworkEmulationManagerImpl::CreateRandomWalkCrossTraffic( - TrafficRoute* traffic_route, - RandomWalkConfig config) { - auto traffic = - std::make_unique(config, traffic_route); - RandomWalkCrossTraffic* out = traffic.get(); - - task_queue_.PostTask( - [this, config, traffic = std::move(traffic)]() mutable { - auto* traffic_ptr = traffic.get(); - random_cross_traffics_.push_back(std::move(traffic)); - RepeatingTaskHandle::Start(task_queue_.Get(), - [this, config, traffic_ptr] { - traffic_ptr->Process(Now()); - return config.min_packet_interval; - }); - }); - return out; -} +CrossTrafficGenerator* NetworkEmulationManagerImpl::StartCrossTraffic( + std::unique_ptr generator) { + CrossTrafficGenerator* out = generator.get(); + task_queue_.PostTask([this, generator = std::move(generator)]() mutable { + auto* generator_ptr = generator.get(); -PulsedPeaksCrossTraffic* -NetworkEmulationManagerImpl::CreatePulsedPeaksCrossTraffic( - TrafficRoute* traffic_route, - PulsedPeaksConfig config) { - auto traffic = - std::make_unique(config, traffic_route); - PulsedPeaksCrossTraffic* out = traffic.get(); - task_queue_.PostTask( - [this, config, traffic = std::move(traffic)]() mutable { - auto* traffic_ptr = traffic.get(); - pulsed_cross_traffics_.push_back(std::move(traffic)); - RepeatingTaskHandle::Start(task_queue_.Get(), - [this, config, traffic_ptr] { - traffic_ptr->Process(Now()); - return config.min_packet_interval; - }); - }); - return out; -} - -FakeTcpCrossTraffic* NetworkEmulationManagerImpl::StartFakeTcpCrossTraffic( - std::vector send_link, - std::vector ret_link, - FakeTcpConfig config) { - auto traffic = std::make_unique( - clock_, config, CreateRoute(send_link), CreateRoute(ret_link)); - auto* traffic_ptr = traffic.get(); - task_queue_.PostTask([this, traffic = std::move(traffic)]() mutable { - traffic->Start(task_queue_.Get()); - tcp_cross_traffics_.push_back(std::move(traffic)); - }); - return traffic_ptr; -} + auto repeating_task_handle = + RepeatingTaskHandle::Start(task_queue_.Get(), [this, generator_ptr] { + generator_ptr->Process(Now()); + return generator_ptr->GetProcessInterval(); + }); -TcpMessageRoute* NetworkEmulationManagerImpl::CreateTcpRoute( - EmulatedRoute* send_route, - EmulatedRoute* ret_route) { - auto tcp_route = std::make_unique( - clock_, task_queue_.Get(), send_route, ret_route); - auto* route_ptr = tcp_route.get(); - task_queue_.PostTask([this, tcp_route = std::move(tcp_route)]() mutable { - tcp_message_routes_.push_back(std::move(tcp_route)); + cross_traffics_.push_back(CrossTrafficSource( + std::move(generator), std::move(repeating_task_handle))); }); - return route_ptr; + return out; } void NetworkEmulationManagerImpl::StopCrossTraffic( - FakeTcpCrossTraffic* traffic) { + CrossTrafficGenerator* generator) { task_queue_.PostTask([=]() { - traffic->Stop(); - tcp_cross_traffics_.remove_if( - [=](const std::unique_ptr& ptr) { - return ptr.get() == traffic; - }); + auto it = std::find_if(cross_traffics_.begin(), cross_traffics_.end(), + [=](const CrossTrafficSource& el) { + return el.first.get() == generator; + }); + it->second.Stop(); + cross_traffics_.erase(it); }); } @@ -271,6 +278,7 @@ EmulatedNetworkManagerInterface* NetworkEmulationManagerImpl::CreateEmulatedNetworkManagerInterface( const std::vector& endpoints) { std::vector endpoint_impls; + endpoint_impls.reserve(endpoints.size()); for (EmulatedEndpoint* endpoint : endpoints) { endpoint_impls.push_back(static_cast(endpoint)); } @@ -295,6 +303,22 @@ NetworkEmulationManagerImpl::CreateEmulatedNetworkManagerInterface( return out; } +void NetworkEmulationManagerImpl::GetStats( + rtc::ArrayView endpoints, + std::function)> stats_callback) { + task_queue_.PostTask([endpoints, stats_callback]() { + EmulatedNetworkStatsBuilder stats_builder; + for (auto* endpoint : endpoints) { + // It's safe to cast here because EmulatedEndpointImpl can be the only + // implementation of EmulatedEndpoint, because only it has access to + // EmulatedEndpoint constructor. + auto endpoint_impl = static_cast(endpoint); + stats_builder.AddEmulatedNetworkStats(*endpoint_impl->stats()); + } + stats_callback(stats_builder.Build()); + }); +} + absl::optional NetworkEmulationManagerImpl::GetNextIPv4Address() { uint32_t addresses_count = kMaxIPv4Address - kMinIPv4Address; @@ -316,5 +340,20 @@ Timestamp NetworkEmulationManagerImpl::Now() const { return clock_->CurrentTime(); } +EmulatedTURNServerInterface* NetworkEmulationManagerImpl::CreateTURNServer( + EmulatedTURNServerConfig config) { + auto* client = CreateEndpoint(config.client_config); + auto* peer = CreateEndpoint(config.client_config); + char buf[128]; + rtc::SimpleStringBuilder str(buf); + str.AppendFormat("turn_server_%u", + static_cast(turn_servers_.size())); + auto turn = std::make_unique( + time_controller_->CreateThread(str.str()), client, peer); + auto out = turn.get(); + turn_servers_.push_back(std::move(turn)); + return out; +} + } // namespace test } // namespace webrtc diff --git a/test/network/network_emulation_manager.h b/test/network/network_emulation_manager.h index 2b33fa1575..449441a3c1 100644 --- a/test/network/network_emulation_manager.h +++ b/test/network/network_emulation_manager.h @@ -17,6 +17,7 @@ #include #include +#include "api/array_view.h" #include "api/test/network_emulation_manager.h" #include "api/test/simulated_network.h" #include "api/test/time_controller.h" @@ -30,9 +31,9 @@ #include "system_wrappers/include/clock.h" #include "test/network/cross_traffic.h" #include "test/network/emulated_network_manager.h" +#include "test/network/emulated_turn_server.h" #include "test/network/fake_network_socket_server.h" #include "test/network/network_emulation.h" -#include "test/network/traffic_route.h" namespace webrtc { namespace test { @@ -42,14 +43,14 @@ class NetworkEmulationManagerImpl : public NetworkEmulationManager { explicit NetworkEmulationManagerImpl(TimeMode mode); ~NetworkEmulationManagerImpl(); - EmulatedNetworkNode* CreateEmulatedNode( - BuiltInNetworkBehaviorConfig config) override; + EmulatedNetworkNode* CreateEmulatedNode(BuiltInNetworkBehaviorConfig config, + uint64_t random_seed = 1) override; EmulatedNetworkNode* CreateEmulatedNode( std::unique_ptr network_behavior) override; SimulatedNetworkNode::Builder NodeBuilder() override; - EmulatedEndpoint* CreateEndpoint(EmulatedEndpointConfig config) override; + EmulatedEndpointImpl* CreateEndpoint(EmulatedEndpointConfig config) override; void EnableEndpoint(EmulatedEndpoint* endpoint) override; void DisableEndpoint(EmulatedEndpoint* endpoint) override; @@ -60,35 +61,46 @@ class NetworkEmulationManagerImpl : public NetworkEmulationManager { EmulatedRoute* CreateRoute( const std::vector& via_nodes) override; - void ClearRoute(EmulatedRoute* route) override; + EmulatedRoute* CreateDefaultRoute( + EmulatedEndpoint* from, + const std::vector& via_nodes, + EmulatedEndpoint* to) override; - TrafficRoute* CreateTrafficRoute( - const std::vector& via_nodes); - RandomWalkCrossTraffic* CreateRandomWalkCrossTraffic( - TrafficRoute* traffic_route, - RandomWalkConfig config); - PulsedPeaksCrossTraffic* CreatePulsedPeaksCrossTraffic( - TrafficRoute* traffic_route, - PulsedPeaksConfig config); - FakeTcpCrossTraffic* StartFakeTcpCrossTraffic( - std::vector send_link, - std::vector ret_link, - FakeTcpConfig config); + void ClearRoute(EmulatedRoute* route) override; TcpMessageRoute* CreateTcpRoute(EmulatedRoute* send_route, EmulatedRoute* ret_route) override; - void StopCrossTraffic(FakeTcpCrossTraffic* traffic); + CrossTrafficRoute* CreateCrossTrafficRoute( + const std::vector& via_nodes) override; + + CrossTrafficGenerator* StartCrossTraffic( + std::unique_ptr generator) override; + void StopCrossTraffic(CrossTrafficGenerator* generator) override; EmulatedNetworkManagerInterface* CreateEmulatedNetworkManagerInterface( const std::vector& endpoints) override; + void GetStats(rtc::ArrayView endpoints, + std::function)> + stats_callback) override; + TimeController* time_controller() override { return time_controller_.get(); } + TimeMode time_mode() const override { return time_mode_; } + Timestamp Now() const; + EmulatedTURNServerInterface* CreateTURNServer( + EmulatedTURNServerConfig config) override; + private: + using CrossTrafficSource = + std::pair, RepeatingTaskHandle>; + absl::optional GetNextIPv4Address(); + + const TimeMode time_mode_; const std::unique_ptr time_controller_; Clock* const clock_; int next_node_id_; @@ -102,13 +114,12 @@ class NetworkEmulationManagerImpl : public NetworkEmulationManager { std::vector> endpoints_; std::vector> network_nodes_; std::vector> routes_; - std::vector> traffic_routes_; - std::vector> random_cross_traffics_; - std::vector> pulsed_cross_traffics_; - std::list> tcp_cross_traffics_; + std::vector> traffic_routes_; + std::vector cross_traffics_; std::list> tcp_message_routes_; std::vector> endpoints_containers_; std::vector> network_managers_; + std::vector> turn_servers_; std::map endpoint_to_network_manager_; diff --git a/test/network/network_emulation_pc_unittest.cc b/test/network/network_emulation_pc_unittest.cc index e04da34076..bd15b5ad38 100644 --- a/test/network/network_emulation_pc_unittest.cc +++ b/test/network/network_emulation_pc_unittest.cc @@ -16,6 +16,7 @@ #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/scoped_refptr.h" #include "api/task_queue/default_task_queue_factory.h" +#include "api/transport/field_trial_based_config.h" #include "call/simulated_network.h" #include "media/engine/webrtc_media_engine.h" #include "media/engine/webrtc_media_engine_defaults.h" @@ -59,6 +60,7 @@ rtc::scoped_refptr CreatePeerConnectionFactory( std::make_unique(pcf_deps.task_queue_factory.get()); pcf_deps.network_thread = network_thread; pcf_deps.signaling_thread = signaling_thread; + pcf_deps.trials = std::make_unique(); cricket::MediaEngineDependencies media_deps; media_deps.task_queue_factory = pcf_deps.task_queue_factory.get(); media_deps.adm = TestAudioDeviceModule::Create( @@ -67,6 +69,7 @@ rtc::scoped_refptr CreatePeerConnectionFactory( kSamplingFrequency), TestAudioDeviceModule::CreateDiscardRenderer(kSamplingFrequency), /*speed=*/1.f); + media_deps.trials = pcf_deps.trials.get(); SetMediaEngineDefaults(&media_deps); pcf_deps.media_engine = cricket::CreateMediaEngine(std::move(media_deps)); return CreateModularPeerConnectionFactory(std::move(pcf_deps)); @@ -75,7 +78,8 @@ rtc::scoped_refptr CreatePeerConnectionFactory( rtc::scoped_refptr CreatePeerConnection( const rtc::scoped_refptr& pcf, PeerConnectionObserver* observer, - rtc::NetworkManager* network_manager) { + rtc::NetworkManager* network_manager, + EmulatedTURNServerInterface* turn_server = nullptr) { PeerConnectionDependencies pc_deps(observer); auto port_allocator = std::make_unique(network_manager); @@ -87,8 +91,20 @@ rtc::scoped_refptr CreatePeerConnection( pc_deps.allocator = std::move(port_allocator); PeerConnectionInterface::RTCConfiguration rtc_configuration; rtc_configuration.sdp_semantics = SdpSemantics::kUnifiedPlan; + if (turn_server != nullptr) { + webrtc::PeerConnectionInterface::IceServer server; + server.username = turn_server->GetIceServerConfig().username; + server.password = turn_server->GetIceServerConfig().username; + server.urls.push_back(turn_server->GetIceServerConfig().url); + rtc_configuration.servers.push_back(server); + } - return pcf->CreatePeerConnection(rtc_configuration, std::move(pc_deps)); + auto result = + pcf->CreatePeerConnectionOrError(rtc_configuration, std::move(pc_deps)); + if (!result.ok()) { + return nullptr; + } + return result.MoveValue(); } } // namespace @@ -182,5 +198,114 @@ TEST(NetworkEmulationManagerPCTest, Run) { }); } +TEST(NetworkEmulationManagerPCTest, RunTURN) { + std::unique_ptr signaling_thread = rtc::Thread::Create(); + signaling_thread->SetName(kSignalThreadName, nullptr); + signaling_thread->Start(); + + // Setup emulated network + NetworkEmulationManagerImpl emulation(TimeMode::kRealTime); + + EmulatedNetworkNode* alice_node = emulation.CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + EmulatedNetworkNode* bob_node = emulation.CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + EmulatedNetworkNode* turn_node = emulation.CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + EmulatedEndpoint* alice_endpoint = + emulation.CreateEndpoint(EmulatedEndpointConfig()); + EmulatedEndpoint* bob_endpoint = + emulation.CreateEndpoint(EmulatedEndpointConfig()); + EmulatedTURNServerInterface* alice_turn = + emulation.CreateTURNServer(EmulatedTURNServerConfig()); + EmulatedTURNServerInterface* bob_turn = + emulation.CreateTURNServer(EmulatedTURNServerConfig()); + + emulation.CreateRoute(alice_endpoint, {alice_node}, + alice_turn->GetClientEndpoint()); + emulation.CreateRoute(alice_turn->GetClientEndpoint(), {alice_node}, + alice_endpoint); + + emulation.CreateRoute(bob_endpoint, {bob_node}, + bob_turn->GetClientEndpoint()); + emulation.CreateRoute(bob_turn->GetClientEndpoint(), {bob_node}, + bob_endpoint); + + emulation.CreateRoute(alice_turn->GetPeerEndpoint(), {turn_node}, + bob_turn->GetPeerEndpoint()); + emulation.CreateRoute(bob_turn->GetPeerEndpoint(), {turn_node}, + alice_turn->GetPeerEndpoint()); + + EmulatedNetworkManagerInterface* alice_network = + emulation.CreateEmulatedNetworkManagerInterface({alice_endpoint}); + EmulatedNetworkManagerInterface* bob_network = + emulation.CreateEmulatedNetworkManagerInterface({bob_endpoint}); + + // Setup peer connections. + rtc::scoped_refptr alice_pcf; + rtc::scoped_refptr alice_pc; + std::unique_ptr alice_observer = + std::make_unique(); + + rtc::scoped_refptr bob_pcf; + rtc::scoped_refptr bob_pc; + std::unique_ptr bob_observer = + std::make_unique(); + + signaling_thread->Invoke(RTC_FROM_HERE, [&]() { + alice_pcf = CreatePeerConnectionFactory(signaling_thread.get(), + alice_network->network_thread()); + alice_pc = + CreatePeerConnection(alice_pcf, alice_observer.get(), + alice_network->network_manager(), alice_turn); + + bob_pcf = CreatePeerConnectionFactory(signaling_thread.get(), + bob_network->network_thread()); + bob_pc = CreatePeerConnection(bob_pcf, bob_observer.get(), + bob_network->network_manager(), bob_turn); + }); + + std::unique_ptr alice = + std::make_unique(alice_pcf, alice_pc, + std::move(alice_observer)); + std::unique_ptr bob = + std::make_unique(bob_pcf, bob_pc, + std::move(bob_observer)); + + signaling_thread->Invoke(RTC_FROM_HERE, [&]() { + rtc::scoped_refptr source = + alice_pcf->CreateAudioSource(cricket::AudioOptions()); + rtc::scoped_refptr track = + alice_pcf->CreateAudioTrack("audio", source); + alice->AddTransceiver(track); + + // Connect peers. + ASSERT_TRUE(alice->ExchangeOfferAnswerWith(bob.get())); + // Do the SDP negotiation, and also exchange ice candidates. + ASSERT_TRUE_WAIT( + alice->signaling_state() == PeerConnectionInterface::kStable, + kDefaultTimeoutMs); + ASSERT_TRUE_WAIT(alice->IsIceGatheringDone(), kDefaultTimeoutMs); + ASSERT_TRUE_WAIT(bob->IsIceGatheringDone(), kDefaultTimeoutMs); + + // Connect an ICE candidate pairs. + ASSERT_TRUE( + AddIceCandidates(bob.get(), alice->observer()->GetAllCandidates())); + ASSERT_TRUE( + AddIceCandidates(alice.get(), bob->observer()->GetAllCandidates())); + // This means that ICE and DTLS are connected. + ASSERT_TRUE_WAIT(bob->IsIceConnected(), kDefaultTimeoutMs); + ASSERT_TRUE_WAIT(alice->IsIceConnected(), kDefaultTimeoutMs); + + // Close peer connections + alice->pc()->Close(); + bob->pc()->Close(); + + // Delete peers. + alice.reset(); + bob.reset(); + }); +} + } // namespace test } // namespace webrtc diff --git a/test/network/network_emulation_unittest.cc b/test/network/network_emulation_unittest.cc index 9e630de9cb..fca10c40b7 100644 --- a/test/network/network_emulation_unittest.cc +++ b/test/network/network_emulation_unittest.cc @@ -19,7 +19,7 @@ #include "call/simulated_network.h" #include "rtc_base/event.h" #include "rtc_base/gunit.h" -#include "system_wrappers/include/sleep.h" +#include "rtc_base/synchronization/mutex.h" #include "test/gmock.h" #include "test/gtest.h" #include "test/network/network_emulation_manager.h" @@ -28,6 +28,8 @@ namespace webrtc { namespace test { namespace { +using ::testing::ElementsAreArray; + constexpr TimeDelta kNetworkPacketWaitTimeout = TimeDelta::Millis(100); constexpr TimeDelta kStatsWaitTimeout = TimeDelta::Seconds(1); constexpr int kOverheadIpv4Udp = 20 + 8; @@ -48,12 +50,12 @@ class SocketReader : public sigslot::has_slots<> { int64_t timestamp; len_ = socket_->Recv(buf_, size_, ×tamp); - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); received_count_++; } int ReceivedCount() { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return received_count_; } @@ -64,7 +66,7 @@ class SocketReader : public sigslot::has_slots<> { size_t size_; int len_; - rtc::CriticalSection lock_; + Mutex lock_; int received_count_ RTC_GUARDED_BY(lock_) = 0; }; @@ -205,8 +207,14 @@ TEST(NetworkEmulationManagerTest, Run) { rtc::CopyOnWriteBuffer data("Hello"); for (uint64_t j = 0; j < 2; j++) { - auto* s1 = t1->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); - auto* s2 = t2->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + rtc::AsyncSocket* s1 = nullptr; + rtc::AsyncSocket* s2 = nullptr; + t1->Invoke(RTC_FROM_HERE, [&] { + s1 = t1->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + }); + t2->Invoke(RTC_FROM_HERE, [&] { + s2 = t2->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + }); SocketReader r1(s1, t1); SocketReader r2(s2, t2); @@ -244,22 +252,83 @@ TEST(NetworkEmulationManagerTest, Run) { const int64_t single_packet_size = data.size() + kOverheadIpv4Udp; std::atomic received_stats_count{0}; - nt1->GetStats([&](EmulatedNetworkStats st) { - EXPECT_EQ(st.packets_sent, 2000l); - EXPECT_EQ(st.bytes_sent.bytes(), single_packet_size * 2000l); - EXPECT_EQ(st.packets_received, 2000l); - EXPECT_EQ(st.bytes_received.bytes(), single_packet_size * 2000l); - EXPECT_EQ(st.packets_dropped, 0l); - EXPECT_EQ(st.bytes_dropped.bytes(), 0l); + nt1->GetStats([&](std::unique_ptr st) { + EXPECT_EQ(st->PacketsSent(), 2000l); + EXPECT_EQ(st->BytesSent().bytes(), single_packet_size * 2000l); + EXPECT_THAT(st->LocalAddresses(), + ElementsAreArray({alice_endpoint->GetPeerLocalAddress()})); + EXPECT_EQ(st->PacketsReceived(), 2000l); + EXPECT_EQ(st->BytesReceived().bytes(), single_packet_size * 2000l); + EXPECT_EQ(st->PacketsDropped(), 0l); + EXPECT_EQ(st->BytesDropped().bytes(), 0l); + + rtc::IPAddress bob_ip = bob_endpoint->GetPeerLocalAddress(); + std::map> + source_st = st->IncomingStatsPerSource(); + ASSERT_EQ(source_st.size(), 1lu); + EXPECT_EQ(source_st.at(bob_ip)->PacketsReceived(), 2000l); + EXPECT_EQ(source_st.at(bob_ip)->BytesReceived().bytes(), + single_packet_size * 2000l); + EXPECT_EQ(source_st.at(bob_ip)->PacketsDropped(), 0l); + EXPECT_EQ(source_st.at(bob_ip)->BytesDropped().bytes(), 0l); + + std::map> + dest_st = st->OutgoingStatsPerDestination(); + ASSERT_EQ(dest_st.size(), 1lu); + EXPECT_EQ(dest_st.at(bob_ip)->PacketsSent(), 2000l); + EXPECT_EQ(dest_st.at(bob_ip)->BytesSent().bytes(), + single_packet_size * 2000l); + + // No debug stats are collected by default. + EXPECT_TRUE(st->SentPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(st->SentPacketsQueueWaitTimeUs().IsEmpty()); + EXPECT_TRUE(st->ReceivedPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(st->DroppedPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(dest_st.at(bob_ip)->SentPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(source_st.at(bob_ip)->ReceivedPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(source_st.at(bob_ip)->DroppedPacketsSizeCounter().IsEmpty()); + received_stats_count++; }); - nt2->GetStats([&](EmulatedNetworkStats st) { - EXPECT_EQ(st.packets_sent, 2000l); - EXPECT_EQ(st.bytes_sent.bytes(), single_packet_size * 2000l); - EXPECT_EQ(st.packets_received, 2000l); - EXPECT_EQ(st.bytes_received.bytes(), single_packet_size * 2000l); - EXPECT_EQ(st.packets_dropped, 0l); - EXPECT_EQ(st.bytes_dropped.bytes(), 0l); + nt2->GetStats([&](std::unique_ptr st) { + EXPECT_EQ(st->PacketsSent(), 2000l); + EXPECT_EQ(st->BytesSent().bytes(), single_packet_size * 2000l); + EXPECT_THAT(st->LocalAddresses(), + ElementsAreArray({bob_endpoint->GetPeerLocalAddress()})); + EXPECT_EQ(st->PacketsReceived(), 2000l); + EXPECT_EQ(st->BytesReceived().bytes(), single_packet_size * 2000l); + EXPECT_EQ(st->PacketsDropped(), 0l); + EXPECT_EQ(st->BytesDropped().bytes(), 0l); + EXPECT_GT(st->FirstReceivedPacketSize(), DataSize::Zero()); + EXPECT_TRUE(st->FirstPacketReceivedTime().IsFinite()); + EXPECT_TRUE(st->LastPacketReceivedTime().IsFinite()); + + rtc::IPAddress alice_ip = alice_endpoint->GetPeerLocalAddress(); + std::map> + source_st = st->IncomingStatsPerSource(); + ASSERT_EQ(source_st.size(), 1lu); + EXPECT_EQ(source_st.at(alice_ip)->PacketsReceived(), 2000l); + EXPECT_EQ(source_st.at(alice_ip)->BytesReceived().bytes(), + single_packet_size * 2000l); + EXPECT_EQ(source_st.at(alice_ip)->PacketsDropped(), 0l); + EXPECT_EQ(source_st.at(alice_ip)->BytesDropped().bytes(), 0l); + + std::map> + dest_st = st->OutgoingStatsPerDestination(); + ASSERT_EQ(dest_st.size(), 1lu); + EXPECT_EQ(dest_st.at(alice_ip)->PacketsSent(), 2000l); + EXPECT_EQ(dest_st.at(alice_ip)->BytesSent().bytes(), + single_packet_size * 2000l); + + // No debug stats are collected by default. + EXPECT_TRUE(st->SentPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(st->SentPacketsQueueWaitTimeUs().IsEmpty()); + EXPECT_TRUE(st->ReceivedPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(st->DroppedPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(dest_st.at(alice_ip)->SentPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(source_st.at(alice_ip)->ReceivedPacketsSizeCounter().IsEmpty()); + EXPECT_TRUE(source_st.at(alice_ip)->DroppedPacketsSizeCounter().IsEmpty()); + received_stats_count++; }); ASSERT_EQ_SIMULATED_WAIT(received_stats_count.load(), 2, @@ -267,6 +336,111 @@ TEST(NetworkEmulationManagerTest, Run) { *network_manager.time_controller()); } +TEST(NetworkEmulationManagerTest, DebugStatsCollectedInDebugMode) { + NetworkEmulationManagerImpl network_manager(TimeMode::kSimulated); + + EmulatedNetworkNode* alice_node = network_manager.CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + EmulatedNetworkNode* bob_node = network_manager.CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + EmulatedEndpointConfig debug_config; + debug_config.stats_gathering_mode = + EmulatedEndpointConfig::StatsGatheringMode::kDebug; + EmulatedEndpoint* alice_endpoint = + network_manager.CreateEndpoint(debug_config); + EmulatedEndpoint* bob_endpoint = + network_manager.CreateEndpoint(EmulatedEndpointConfig()); + network_manager.CreateRoute(alice_endpoint, {alice_node}, bob_endpoint); + network_manager.CreateRoute(bob_endpoint, {bob_node}, alice_endpoint); + + EmulatedNetworkManagerInterface* nt1 = + network_manager.CreateEmulatedNetworkManagerInterface({alice_endpoint}); + EmulatedNetworkManagerInterface* nt2 = + network_manager.CreateEmulatedNetworkManagerInterface({bob_endpoint}); + + rtc::Thread* t1 = nt1->network_thread(); + rtc::Thread* t2 = nt2->network_thread(); + + rtc::CopyOnWriteBuffer data("Hello"); + for (uint64_t j = 0; j < 2; j++) { + rtc::AsyncSocket* s1 = nullptr; + rtc::AsyncSocket* s2 = nullptr; + t1->Invoke(RTC_FROM_HERE, [&] { + s1 = t1->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + }); + t2->Invoke(RTC_FROM_HERE, [&] { + s2 = t2->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + }); + + SocketReader r1(s1, t1); + SocketReader r2(s2, t2); + + rtc::SocketAddress a1(alice_endpoint->GetPeerLocalAddress(), 0); + rtc::SocketAddress a2(bob_endpoint->GetPeerLocalAddress(), 0); + + t1->Invoke(RTC_FROM_HERE, [&] { + s1->Bind(a1); + a1 = s1->GetLocalAddress(); + }); + t2->Invoke(RTC_FROM_HERE, [&] { + s2->Bind(a2); + a2 = s2->GetLocalAddress(); + }); + + t1->Invoke(RTC_FROM_HERE, [&] { s1->Connect(a2); }); + t2->Invoke(RTC_FROM_HERE, [&] { s2->Connect(a1); }); + + for (uint64_t i = 0; i < 1000; i++) { + t1->PostTask(RTC_FROM_HERE, + [&]() { s1->Send(data.data(), data.size()); }); + t2->PostTask(RTC_FROM_HERE, + [&]() { s2->Send(data.data(), data.size()); }); + } + + network_manager.time_controller()->AdvanceTime(TimeDelta::Seconds(1)); + + EXPECT_EQ(r1.ReceivedCount(), 1000); + EXPECT_EQ(r2.ReceivedCount(), 1000); + + t1->Invoke(RTC_FROM_HERE, [&] { delete s1; }); + t2->Invoke(RTC_FROM_HERE, [&] { delete s2; }); + } + + const int64_t single_packet_size = data.size() + kOverheadIpv4Udp; + std::atomic received_stats_count{0}; + nt1->GetStats([&](std::unique_ptr st) { + rtc::IPAddress bob_ip = bob_endpoint->GetPeerLocalAddress(); + std::map> + source_st = st->IncomingStatsPerSource(); + ASSERT_EQ(source_st.size(), 1lu); + + std::map> + dest_st = st->OutgoingStatsPerDestination(); + ASSERT_EQ(dest_st.size(), 1lu); + + // No debug stats are collected by default. + EXPECT_EQ(st->SentPacketsSizeCounter().NumSamples(), 2000l); + EXPECT_EQ(st->ReceivedPacketsSizeCounter().GetAverage(), + single_packet_size); + EXPECT_EQ(st->SentPacketsQueueWaitTimeUs().NumSamples(), 2000l); + EXPECT_LT(st->SentPacketsQueueWaitTimeUs().GetMax(), 1); + EXPECT_TRUE(st->DroppedPacketsSizeCounter().IsEmpty()); + EXPECT_EQ(dest_st.at(bob_ip)->SentPacketsSizeCounter().NumSamples(), 2000l); + EXPECT_EQ(dest_st.at(bob_ip)->SentPacketsSizeCounter().GetAverage(), + single_packet_size); + EXPECT_EQ(source_st.at(bob_ip)->ReceivedPacketsSizeCounter().NumSamples(), + 2000l); + EXPECT_EQ(source_st.at(bob_ip)->ReceivedPacketsSizeCounter().GetAverage(), + single_packet_size); + EXPECT_TRUE(source_st.at(bob_ip)->DroppedPacketsSizeCounter().IsEmpty()); + + received_stats_count++; + }); + ASSERT_EQ_SIMULATED_WAIT(received_stats_count.load(), 1, + kStatsWaitTimeout.ms(), + *network_manager.time_controller()); +} + TEST(NetworkEmulationManagerTest, ThroughputStats) { NetworkEmulationManagerImpl network_manager(TimeMode::kRealTime); @@ -292,8 +466,15 @@ TEST(NetworkEmulationManagerTest, ThroughputStats) { constexpr int64_t kUdpPayloadSize = 100; constexpr int64_t kSinglePacketSize = kUdpPayloadSize + kOverheadIpv4Udp; rtc::CopyOnWriteBuffer data(kUdpPayloadSize); - auto* s1 = t1->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); - auto* s2 = t2->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + + rtc::AsyncSocket* s1 = nullptr; + rtc::AsyncSocket* s2 = nullptr; + t1->Invoke(RTC_FROM_HERE, [&] { + s1 = t1->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + }); + t2->Invoke(RTC_FROM_HERE, [&] { + s2 = t2->socketserver()->CreateAsyncSocket(AF_INET, SOCK_DGRAM); + }); SocketReader r1(s1, t1); SocketReader r2(s2, t2); @@ -323,14 +504,14 @@ TEST(NetworkEmulationManagerTest, ThroughputStats) { } std::atomic received_stats_count{0}; - nt1->GetStats([&](EmulatedNetworkStats st) { - EXPECT_EQ(st.packets_sent, kNumPacketsSent); - EXPECT_EQ(st.bytes_sent.bytes(), kSinglePacketSize * kNumPacketsSent); + nt1->GetStats([&](std::unique_ptr st) { + EXPECT_EQ(st->PacketsSent(), kNumPacketsSent); + EXPECT_EQ(st->BytesSent().bytes(), kSinglePacketSize * kNumPacketsSent); const double tolerance = 0.95; // Accept 5% tolerance for timing. - EXPECT_GE(st.last_packet_sent_time - st.first_packet_sent_time, + EXPECT_GE(st->LastPacketSentTime() - st->FirstPacketSentTime(), (kNumPacketsSent - 1) * kDelay * tolerance); - EXPECT_GT(st.AverageSendRate().bps(), 0); + EXPECT_GT(st->AverageSendRate().bps(), 0); received_stats_count++; }); @@ -392,5 +573,101 @@ TEST_F(NetworkEmulationManagerThreeNodesRoutingTest, SendPacketsAndValidateDelivery(); } +TEST(NetworkEmulationManagerTest, EndpointLoopback) { + NetworkEmulationManagerImpl network_manager(TimeMode::kSimulated); + auto endpoint = network_manager.CreateEndpoint(EmulatedEndpointConfig()); + + MockReceiver receiver; + EXPECT_CALL(receiver, OnPacketReceived(::testing::_)).Times(1); + ASSERT_EQ(endpoint->BindReceiver(80, &receiver), 80); + + endpoint->SendPacket(rtc::SocketAddress(endpoint->GetPeerLocalAddress(), 80), + rtc::SocketAddress(endpoint->GetPeerLocalAddress(), 80), + "Hello"); + network_manager.time_controller()->AdvanceTime(TimeDelta::Seconds(1)); +} + +TEST(NetworkEmulationManagerTest, EndpointCanSendWithDifferentSourceIp) { + constexpr uint32_t kEndpointIp = 0xC0A80011; // 192.168.0.17 + constexpr uint32_t kSourceIp = 0xC0A80012; // 192.168.0.18 + NetworkEmulationManagerImpl network_manager(TimeMode::kSimulated); + EmulatedEndpointConfig endpoint_config; + endpoint_config.ip = rtc::IPAddress(kEndpointIp); + endpoint_config.allow_send_packet_with_different_source_ip = true; + auto endpoint = network_manager.CreateEndpoint(endpoint_config); + + MockReceiver receiver; + EXPECT_CALL(receiver, OnPacketReceived(::testing::_)).Times(1); + ASSERT_EQ(endpoint->BindReceiver(80, &receiver), 80); + + endpoint->SendPacket(rtc::SocketAddress(kSourceIp, 80), + rtc::SocketAddress(endpoint->GetPeerLocalAddress(), 80), + "Hello"); + network_manager.time_controller()->AdvanceTime(TimeDelta::Seconds(1)); +} + +TEST(NetworkEmulationManagerTest, + EndpointCanReceiveWithDifferentDestIpThroughDefaultRoute) { + constexpr uint32_t kDestEndpointIp = 0xC0A80011; // 192.168.0.17 + constexpr uint32_t kDestIp = 0xC0A80012; // 192.168.0.18 + NetworkEmulationManagerImpl network_manager(TimeMode::kSimulated); + auto sender_endpoint = + network_manager.CreateEndpoint(EmulatedEndpointConfig()); + EmulatedEndpointConfig endpoint_config; + endpoint_config.ip = rtc::IPAddress(kDestEndpointIp); + endpoint_config.allow_receive_packets_with_different_dest_ip = true; + auto receiver_endpoint = network_manager.CreateEndpoint(endpoint_config); + + MockReceiver receiver; + EXPECT_CALL(receiver, OnPacketReceived(::testing::_)).Times(1); + ASSERT_EQ(receiver_endpoint->BindReceiver(80, &receiver), 80); + + network_manager.CreateDefaultRoute( + sender_endpoint, {network_manager.NodeBuilder().Build().node}, + receiver_endpoint); + + sender_endpoint->SendPacket( + rtc::SocketAddress(sender_endpoint->GetPeerLocalAddress(), 80), + rtc::SocketAddress(kDestIp, 80), "Hello"); + network_manager.time_controller()->AdvanceTime(TimeDelta::Seconds(1)); +} + +TEST(NetworkEmulationManagerTURNTest, GetIceServerConfig) { + NetworkEmulationManagerImpl network_manager(TimeMode::kRealTime); + auto turn = network_manager.CreateTURNServer(EmulatedTURNServerConfig()); + + EXPECT_GT(turn->GetIceServerConfig().username.size(), 0u); + EXPECT_GT(turn->GetIceServerConfig().password.size(), 0u); + EXPECT_NE(turn->GetIceServerConfig().url.find( + turn->GetClientEndpoint()->GetPeerLocalAddress().ToString()), + std::string::npos); +} + +TEST(NetworkEmulationManagerTURNTest, ClientTraffic) { + NetworkEmulationManagerImpl emulation(TimeMode::kSimulated); + auto* ep = emulation.CreateEndpoint(EmulatedEndpointConfig()); + auto* turn = emulation.CreateTURNServer(EmulatedTURNServerConfig()); + auto* node = CreateEmulatedNodeWithDefaultBuiltInConfig(&emulation); + emulation.CreateRoute(ep, {node}, turn->GetClientEndpoint()); + emulation.CreateRoute(turn->GetClientEndpoint(), {node}, ep); + + MockReceiver recv; + int port = ep->BindReceiver(0, &recv).value(); + + // Construct a STUN BINDING. + cricket::StunMessage ping; + ping.SetType(cricket::STUN_BINDING_REQUEST); + rtc::ByteBufferWriter buf; + ping.Write(&buf); + rtc::CopyOnWriteBuffer packet(buf.Data(), buf.Length()); + + // We expect to get a ping reply. + EXPECT_CALL(recv, OnPacketReceived(::testing::_)).Times(1); + + ep->SendPacket(rtc::SocketAddress(ep->GetPeerLocalAddress(), port), + turn->GetClientEndpointAddress(), packet); + emulation.time_controller()->AdvanceTime(TimeDelta::Seconds(1)); +} + } // namespace test } // namespace webrtc diff --git a/test/network/traffic_route.cc b/test/network/traffic_route.cc index 2baf5a4662..81bb8ca514 100644 --- a/test/network/traffic_route.cc +++ b/test/network/traffic_route.cc @@ -29,33 +29,23 @@ class NullReceiver : public EmulatedNetworkReceiverInterface { class ActionReceiver : public EmulatedNetworkReceiverInterface { public: - ActionReceiver(std::function action, EmulatedEndpoint* endpoint) - : action_(action), endpoint_(endpoint) {} + explicit ActionReceiver(std::function action) : action_(action) {} ~ActionReceiver() override = default; void OnPacketReceived(EmulatedIpPacket packet) override { - RTC_DCHECK(port_); action_(); - endpoint_->UnbindReceiver(port_.value()); } - // We can't set port in constructor, because port will be provided by - // endpoint, when this receiver will be binded to that endpoint. - void SetPort(uint16_t port) { port_ = port; } - private: std::function action_; - // Endpoint and port will be used to free port in the endpoint after action - // will be done. - EmulatedEndpoint* endpoint_; - absl::optional port_ = absl::nullopt; }; } // namespace -TrafficRoute::TrafficRoute(Clock* clock, - EmulatedNetworkReceiverInterface* receiver, - EmulatedEndpoint* endpoint) +CrossTrafficRouteImpl::CrossTrafficRouteImpl( + Clock* clock, + EmulatedNetworkReceiverInterface* receiver, + EmulatedEndpointImpl* endpoint) : clock_(clock), receiver_(receiver), endpoint_(endpoint) { null_receiver_ = std::make_unique(); absl::optional port = @@ -63,32 +53,34 @@ TrafficRoute::TrafficRoute(Clock* clock, RTC_DCHECK(port); null_receiver_port_ = port.value(); } -TrafficRoute::~TrafficRoute() = default; +CrossTrafficRouteImpl::~CrossTrafficRouteImpl() = default; -void TrafficRoute::TriggerPacketBurst(size_t num_packets, size_t packet_size) { +void CrossTrafficRouteImpl::TriggerPacketBurst(size_t num_packets, + size_t packet_size) { for (size_t i = 0; i < num_packets; ++i) { SendPacket(packet_size); } } -void TrafficRoute::NetworkDelayedAction(size_t packet_size, - std::function action) { - auto action_receiver = std::make_unique(action, endpoint_); +void CrossTrafficRouteImpl::NetworkDelayedAction(size_t packet_size, + std::function action) { + auto action_receiver = std::make_unique(action); + // BindOneShotReceiver arranges to free the port in the endpoint after the + // action is done. absl::optional port = - endpoint_->BindReceiver(0, action_receiver.get()); + endpoint_->BindOneShotReceiver(0, action_receiver.get()); RTC_DCHECK(port); - action_receiver->SetPort(port.value()); actions_.push_back(std::move(action_receiver)); SendPacket(packet_size, port.value()); } -void TrafficRoute::SendPacket(size_t packet_size) { +void CrossTrafficRouteImpl::SendPacket(size_t packet_size) { SendPacket(packet_size, null_receiver_port_); } -void TrafficRoute::SendPacket(size_t packet_size, uint16_t dest_port) { +void CrossTrafficRouteImpl::SendPacket(size_t packet_size, uint16_t dest_port) { rtc::CopyOnWriteBuffer data(packet_size); - std::fill_n(data.data(), data.size(), 0); + std::fill_n(data.MutableData(), data.size(), 0); receiver_->OnPacketReceived(EmulatedIpPacket( /*from=*/rtc::SocketAddress(), rtc::SocketAddress(endpoint_->GetPeerLocalAddress(), dest_port), data, diff --git a/test/network/traffic_route.h b/test/network/traffic_route.h index 1bb34c6b6c..2c2fadc427 100644 --- a/test/network/traffic_route.h +++ b/test/network/traffic_route.h @@ -14,6 +14,7 @@ #include #include +#include "api/test/network_emulation_manager.h" #include "rtc_base/copy_on_write_buffer.h" #include "system_wrappers/include/clock.h" #include "test/network/network_emulation.h" @@ -23,26 +24,27 @@ namespace test { // Represents the endpoint for cross traffic that is going through the network. // It can be used to emulate unexpected network load. -class TrafficRoute { +class CrossTrafficRouteImpl final : public CrossTrafficRoute { public: - TrafficRoute(Clock* clock, - EmulatedNetworkReceiverInterface* receiver, - EmulatedEndpoint* endpoint); - ~TrafficRoute(); + CrossTrafficRouteImpl(Clock* clock, + EmulatedNetworkReceiverInterface* receiver, + EmulatedEndpointImpl* endpoint); + ~CrossTrafficRouteImpl(); // Triggers sending of dummy packets with size |packet_size| bytes. - void TriggerPacketBurst(size_t num_packets, size_t packet_size); + void TriggerPacketBurst(size_t num_packets, size_t packet_size) override; // Sends a packet over the nodes and runs |action| when it has been delivered. - void NetworkDelayedAction(size_t packet_size, std::function action); + void NetworkDelayedAction(size_t packet_size, + std::function action) override; - void SendPacket(size_t packet_size); + void SendPacket(size_t packet_size) override; private: void SendPacket(size_t packet_size, uint16_t dest_port); Clock* const clock_; EmulatedNetworkReceiverInterface* const receiver_; - EmulatedEndpoint* const endpoint_; + EmulatedEndpointImpl* const endpoint_; uint16_t null_receiver_port_; std::unique_ptr null_receiver_; diff --git a/test/pc/e2e/BUILD.gn b/test/pc/e2e/BUILD.gn index d340f1a00c..9e9d5c2db5 100644 --- a/test/pc/e2e/BUILD.gn +++ b/test/pc/e2e/BUILD.gn @@ -8,632 +8,752 @@ import("../../../webrtc.gni") -group("e2e") { - testonly = true - - deps = [ - ":default_encoded_image_data_injector", - ":encoded_image_data_injector_api", - ":example_video_quality_analyzer", - ":id_generator", - ":quality_analyzing_video_decoder", - ":quality_analyzing_video_encoder", - ":single_process_encoded_image_data_injector", - ] - if (rtc_include_tests) { - deps += [ - ":peerconnection_quality_test", - ":test_peer", - ":video_quality_analyzer_injection_helper", +if (!build_with_chromium) { + group("e2e") { + testonly = true + + deps = [ + ":encoded_image_data_injector_api", + ":example_video_quality_analyzer", + ":quality_analyzing_video_decoder", + ":quality_analyzing_video_encoder", + ":single_process_encoded_image_data_injector", + ":video_frame_tracking_id_injector", ] + if (rtc_include_tests) { + deps += [ + ":peerconnection_quality_test", + ":test_peer", + ":video_quality_analyzer_injection_helper", + ] + } } -} -if (rtc_include_tests) { - group("e2e_unittests") { + if (rtc_include_tests) { + group("e2e_unittests") { + testonly = true + + deps = [ + ":default_video_quality_analyzer_test", + ":multi_head_queue_test", + ":peer_connection_e2e_smoke_test", + ":single_process_encoded_image_data_injector_unittest", + ":video_frame_tracking_id_injector_unittest", + ] + } + } + + rtc_library("peer_connection_quality_test_params") { + visibility = [ "*" ] testonly = true + sources = [ "peer_connection_quality_test_params.h" ] deps = [ - ":default_encoded_image_data_injector_unittest", - ":default_video_quality_analyzer_test", - ":peer_connection_e2e_smoke_test", - ":single_process_encoded_image_data_injector_unittest", + "../../../api:callfactory_api", + "../../../api:fec_controller_api", + "../../../api:libjingle_peerconnection_api", + "../../../api:packet_socket_factory", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api/rtc_event_log", + "../../../api/task_queue", + "../../../api/transport:network_control", + "../../../api/transport:webrtc_key_value_config", + "../../../api/video_codecs:video_codecs_api", + "../../../rtc_base", + "../../../rtc_base:threading", ] } -} - -rtc_library("peer_connection_quality_test_params") { - visibility = [ "*" ] - testonly = true - sources = [ "peer_connection_quality_test_params.h" ] - - deps = [ - "../../../api:callfactory_api", - "../../../api:fec_controller_api", - "../../../api:libjingle_peerconnection_api", - "../../../api:packet_socket_factory", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../api/rtc_event_log", - "../../../api/task_queue", - "../../../api/transport:network_control", - "../../../api/transport/media:media_transport_interface", - "../../../api/video_codecs:video_codecs_api", - "../../../rtc_base", - ] -} - -rtc_library("encoded_image_data_injector_api") { - visibility = [ "*" ] - testonly = true - sources = [ "analyzer/video/encoded_image_data_injector.h" ] - deps = [ "../../../api/video:encoded_image" ] -} + rtc_library("encoded_image_data_injector_api") { + visibility = [ "*" ] + testonly = true + sources = [ "analyzer/video/encoded_image_data_injector.h" ] -rtc_library("default_encoded_image_data_injector") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/default_encoded_image_data_injector.cc", - "analyzer/video/default_encoded_image_data_injector.h", - ] - - deps = [ - ":encoded_image_data_injector_api", - "../../../api/video:encoded_image", - "../../../rtc_base:checks", - "../../../rtc_base:criticalsection", - "//third_party/abseil-cpp/absl/memory", - ] -} + deps = [ "../../../api/video:encoded_image" ] + } -rtc_library("single_process_encoded_image_data_injector") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/single_process_encoded_image_data_injector.cc", - "analyzer/video/single_process_encoded_image_data_injector.h", - ] - - deps = [ - ":encoded_image_data_injector_api", - "../../../api/video:encoded_image", - "../../../rtc_base:checks", - "../../../rtc_base:criticalsection", - "//third_party/abseil-cpp/absl/memory", - ] -} + rtc_library("single_process_encoded_image_data_injector") { + visibility = [ "*" ] + testonly = true + sources = [ + "analyzer/video/single_process_encoded_image_data_injector.cc", + "analyzer/video/single_process_encoded_image_data_injector.h", + ] -rtc_library("id_generator") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/id_generator.cc", - "analyzer/video/id_generator.h", - ] - deps = [] -} + deps = [ + ":encoded_image_data_injector_api", + "../../../api/video:encoded_image", + "../../../rtc_base:checks", + "../../../rtc_base:criticalsection", + "../../../rtc_base/synchronization:mutex", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + } -rtc_library("simulcast_dummy_buffer_helper") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/simulcast_dummy_buffer_helper.cc", - "analyzer/video/simulcast_dummy_buffer_helper.h", - ] - deps = [ - "../../../api/video:video_frame", - "../../../api/video:video_frame_i420", - ] -} + rtc_library("video_frame_tracking_id_injector") { + visibility = [ "*" ] + testonly = true + sources = [ + "analyzer/video/video_frame_tracking_id_injector.cc", + "analyzer/video/video_frame_tracking_id_injector.h", + ] -rtc_library("quality_analyzing_video_decoder") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/quality_analyzing_video_decoder.cc", - "analyzer/video/quality_analyzing_video_decoder.h", - ] - deps = [ - ":encoded_image_data_injector_api", - ":id_generator", - ":simulcast_dummy_buffer_helper", - "../../../api:video_quality_analyzer_api", - "../../../api/video:encoded_image", - "../../../api/video:video_frame", - "../../../api/video:video_frame_i420", - "../../../api/video:video_rtp_headers", - "../../../api/video_codecs:video_codecs_api", - "../../../modules/video_coding:video_codec_interface", - "../../../rtc_base:criticalsection", - "../../../rtc_base:logging", - "//third_party/abseil-cpp/absl/types:optional", - ] -} + deps = [ + ":encoded_image_data_injector_api", + "../../../api/video:encoded_image", + "../../../rtc_base:checks", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/memory" ] + } -rtc_library("quality_analyzing_video_encoder") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/quality_analyzing_video_encoder.cc", - "analyzer/video/quality_analyzing_video_encoder.h", - ] - deps = [ - ":encoded_image_data_injector_api", - ":id_generator", - "../../../api:video_quality_analyzer_api", - "../../../api/video:encoded_image", - "../../../api/video:video_frame", - "../../../api/video:video_rtp_headers", - "../../../api/video_codecs:video_codecs_api", - "../../../modules/video_coding:video_codec_interface", - "../../../rtc_base:criticalsection", - "../../../rtc_base:logging", - ] -} + rtc_library("simulcast_dummy_buffer_helper") { + visibility = [ "*" ] + testonly = true + sources = [ + "analyzer/video/simulcast_dummy_buffer_helper.cc", + "analyzer/video/simulcast_dummy_buffer_helper.h", + ] + deps = [ "../../../api/video:video_frame" ] + } -if (rtc_include_tests) { - rtc_library("video_quality_analyzer_injection_helper") { + rtc_library("quality_analyzing_video_decoder") { visibility = [ "*" ] testonly = true sources = [ - "analyzer/video/video_quality_analyzer_injection_helper.cc", - "analyzer/video/video_quality_analyzer_injection_helper.h", + "analyzer/video/quality_analyzing_video_decoder.cc", + "analyzer/video/quality_analyzing_video_decoder.h", ] deps = [ ":encoded_image_data_injector_api", - ":id_generator", - ":quality_analyzing_video_decoder", - ":quality_analyzing_video_encoder", ":simulcast_dummy_buffer_helper", - "../..:test_renderer", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../api:stats_observer_interface", "../../../api:video_quality_analyzer_api", + "../../../api/video:encoded_image", "../../../api/video:video_frame", "../../../api/video:video_rtp_headers", "../../../api/video_codecs:video_codecs_api", + "../../../modules/video_coding:video_codec_interface", "../../../rtc_base:criticalsection", - "../../../test:video_test_common", - "../../../test:video_test_support", - "//third_party/abseil-cpp/absl/memory", + "../../../rtc_base:logging", + "../../../rtc_base/synchronization:mutex", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", ] } - rtc_library("echo_emulation") { + rtc_library("quality_analyzing_video_encoder") { visibility = [ "*" ] testonly = true sources = [ - "echo/echo_emulation.cc", - "echo/echo_emulation.h", + "analyzer/video/quality_analyzing_video_encoder.cc", + "analyzer/video/quality_analyzing_video_encoder.h", ] deps = [ - "../../../api:peer_connection_quality_test_fixture_api", - "../../../modules/audio_device:audio_device_impl", - "../../../rtc_base:rtc_base_approved", + ":encoded_image_data_injector_api", + "../../../api:video_quality_analyzer_api", + "../../../api/video:encoded_image", + "../../../api/video:video_frame", + "../../../api/video:video_rtp_headers", + "../../../api/video_codecs:video_codecs_api", + "../../../modules/video_coding:video_codec_interface", + "../../../rtc_base:criticalsection", + "../../../rtc_base:logging", + "../../../rtc_base/synchronization:mutex", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + } + + if (rtc_include_tests) { + rtc_library("video_quality_analyzer_injection_helper") { + visibility = [ "*" ] + testonly = true + sources = [ + "analyzer/video/video_quality_analyzer_injection_helper.cc", + "analyzer/video/video_quality_analyzer_injection_helper.h", + ] + deps = [ + ":encoded_image_data_injector_api", + ":quality_analyzing_video_decoder", + ":quality_analyzing_video_encoder", + ":simulcast_dummy_buffer_helper", + "../..:test_renderer", + "../../../api:array_view", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:stats_observer_interface", + "../../../api:video_quality_analyzer_api", + "../../../api/video:video_frame", + "../../../api/video:video_rtp_headers", + "../../../api/video_codecs:video_codecs_api", + "../../../rtc_base:criticalsection", + "../../../rtc_base/synchronization:mutex", + "../../../test:video_test_common", + "../../../test:video_test_support", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] + } + + rtc_library("echo_emulation") { + visibility = [ "*" ] + testonly = true + sources = [ + "echo/echo_emulation.cc", + "echo/echo_emulation.h", + ] + deps = [ + "../../../api:peer_connection_quality_test_fixture_api", + "../../../modules/audio_device:audio_device_impl", + "../../../rtc_base:rtc_base_approved", + ] + } + + rtc_library("test_peer") { + visibility = [ "*" ] + testonly = true + sources = [ + "test_peer.cc", + "test_peer.h", + ] + deps = [ + ":peer_configurer", + ":peer_connection_quality_test_params", + "../../../api:frame_generator_api", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:scoped_refptr", + "../../../modules/audio_processing:api", + "../../../pc:peerconnection_wrapper", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:variant", + ] + } + + rtc_library("test_peer_factory") { + visibility = [ "*" ] + testonly = true + sources = [ + "test_peer_factory.cc", + "test_peer_factory.h", + ] + deps = [ + ":echo_emulation", + ":peer_configurer", + ":peer_connection_quality_test_params", + ":quality_analyzing_video_encoder", + ":test_peer", + ":video_quality_analyzer_injection_helper", + "../..:copy_to_file_audio_capturer", + "../../../api:create_time_controller", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:time_controller", + "../../../api/rtc_event_log:rtc_event_log_factory", + "../../../api/task_queue:default_task_queue_factory", + "../../../api/transport:field_trial_based_config", + "../../../api/video_codecs:builtin_video_decoder_factory", + "../../../api/video_codecs:builtin_video_encoder_factory", + "../../../media:rtc_audio_video", + "../../../media:rtc_media_engine_defaults", + "../../../modules/audio_device:audio_device_impl", + "../../../modules/audio_processing/aec_dump", + "../../../p2p:rtc_p2p", + "../../../rtc_base:rtc_task_queue", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings", + ] + } + + rtc_library("media_helper") { + visibility = [ "*" ] + testonly = true + sources = [ + "media/media_helper.cc", + "media/media_helper.h", + "media/test_video_capturer_video_track_source.h", + ] + deps = [ + ":peer_configurer", + ":test_peer", + ":video_quality_analyzer_injection_helper", + "../..:fileutils", + "../..:platform_video_capturer", + "../..:video_test_common", + "../../../api:create_frame_generator", + "../../../api:frame_generator_api", + "../../../api:media_stream_interface", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api/video:video_frame", + "../../../pc:peerconnection", + "../../../pc:session_description", + "../../../pc:video_track_source", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:variant" ] + } + + rtc_library("peer_configurer") { + visibility = [ "*" ] + testonly = true + sources = [ + "peer_configurer.cc", + "peer_configurer.h", + ] + deps = [ + ":peer_connection_quality_test_params", + "../..:fileutils", + "../../../api:callfactory_api", + "../../../api:create_peer_connection_quality_test_frame_generator", + "../../../api:fec_controller_api", + "../../../api:packet_socket_factory", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api/rtc_event_log", + "../../../api/task_queue", + "../../../api/transport:network_control", + "../../../api/video_codecs:video_codecs_api", + "../../../rtc_base", + "../../../rtc_base:threading", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + } + + rtc_library("test_activities_executor") { + visibility = [ "*" ] + testonly = true + sources = [ + "test_activities_executor.cc", + "test_activities_executor.h", + ] + deps = [ + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../rtc_base:checks", + "../../../rtc_base:criticalsection", + "../../../rtc_base:logging", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:task_queue_for_test", + "../../../rtc_base/synchronization:mutex", + "../../../rtc_base/task_utils:repeating_task", + "../../../system_wrappers", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/types:optional", + ] + } + + rtc_library("peerconnection_quality_test") { + visibility = [ "*" ] + testonly = true + + sources = [ + "peer_connection_quality_test.cc", + "peer_connection_quality_test.h", + ] + deps = [ + ":analyzer_helper", + ":cross_media_metrics_reporter", + ":default_audio_quality_analyzer", + ":default_video_quality_analyzer", + ":media_helper", + ":peer_configurer", + ":peer_connection_quality_test_params", + ":sdp_changer", + ":single_process_encoded_image_data_injector", + ":stats_poller", + ":test_activities_executor", + ":test_peer", + ":test_peer_factory", + ":video_quality_analyzer_injection_helper", + ":video_quality_metrics_reporter", + "../..:field_trial", + "../..:fileutils", + "../..:perf_test", + "../../../api:audio_quality_analyzer_api", + "../../../api:libjingle_peerconnection_api", + "../../../api:media_stream_interface", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:rtc_event_log_output_file", + "../../../api:scoped_refptr", + "../../../api:time_controller", + "../../../api:video_quality_analyzer_api", + "../../../api/rtc_event_log", + "../../../api/task_queue", + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../pc:pc_test_utils", + "../../../pc:peerconnection", + "../../../rtc_base", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:macromagic", + "../../../rtc_base:rtc_base_approved", + "../../../rtc_base:safe_conversions", + "../../../rtc_base:task_queue_for_test", + "../../../rtc_base:threading", + "../../../rtc_base/synchronization:mutex", + "../../../system_wrappers", + "../../../system_wrappers:field_trial", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] + } + + rtc_library("single_process_encoded_image_data_injector_unittest") { + testonly = true + sources = [ + "analyzer/video/single_process_encoded_image_data_injector_unittest.cc", + ] + deps = [ + ":single_process_encoded_image_data_injector", + "../../../api/video:encoded_image", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + ] + } + + rtc_library("video_frame_tracking_id_injector_unittest") { + testonly = true + sources = + [ "analyzer/video/video_frame_tracking_id_injector_unittest.cc" ] + deps = [ + ":video_frame_tracking_id_injector", + "../../../api/video:encoded_image", + "../../../rtc_base:rtc_base_approved", + "../../../test:test_support", + ] + } + + peer_connection_e2e_smoke_test_resources = [ + "../../../resources/pc_quality_smoke_test_alice_source.wav", + "../../../resources/pc_quality_smoke_test_bob_source.wav", ] + if (is_ios) { + bundle_data("peer_connection_e2e_smoke_test_resources_bundle_data") { + testonly = true + sources = peer_connection_e2e_smoke_test_resources + outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] + } + } + + rtc_library("peer_connection_e2e_smoke_test") { + testonly = true + + sources = [ "peer_connection_e2e_smoke_test.cc" ] + deps = [ + ":default_audio_quality_analyzer", + ":default_video_quality_analyzer", + ":network_quality_metrics_reporter", + ":stats_based_network_quality_metrics_reporter", + "../../../api:callfactory_api", + "../../../api:create_network_emulation_manager", + "../../../api:create_peer_connection_quality_test_frame_generator", + "../../../api:create_peerconnection_quality_test_fixture", + "../../../api:libjingle_peerconnection_api", + "../../../api:media_stream_interface", + "../../../api:network_emulation_manager_api", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:scoped_refptr", + "../../../api:simulated_network_api", + "../../../api/audio_codecs:builtin_audio_decoder_factory", + "../../../api/audio_codecs:builtin_audio_encoder_factory", + "../../../api/video_codecs:builtin_video_decoder_factory", + "../../../api/video_codecs:builtin_video_encoder_factory", + "../../../call:simulated_network", + "../../../media:rtc_audio_video", + "../../../modules/audio_device:audio_device_impl", + "../../../p2p:rtc_p2p", + "../../../pc:pc_test_utils", + "../../../pc:peerconnection_wrapper", + "../../../rtc_base", + "../../../rtc_base:gunit_helpers", + "../../../rtc_base:logging", + "../../../rtc_base:rtc_event", + "../../../system_wrappers:field_trial", + "../../../test:field_trial", + "../../../test:fileutils", + "../../../test:test_support", + ] + data = peer_connection_e2e_smoke_test_resources + if (is_ios) { + deps += [ ":peer_connection_e2e_smoke_test_resources_bundle_data" ] + } + } + + rtc_library("stats_poller") { + visibility = [ "*" ] + testonly = true + sources = [ + "stats_poller.cc", + "stats_poller.h", + ] + deps = [ + ":test_peer", + "../../../api:libjingle_peerconnection_api", + "../../../api:rtc_stats_api", + "../../../api:stats_observer_interface", + "../../../rtc_base:logging", + ] + } + + rtc_library("default_video_quality_analyzer_test") { + testonly = true + sources = [ "analyzer/video/default_video_quality_analyzer_test.cc" ] + deps = [ + ":default_video_quality_analyzer", + "../..:test_support", + "../../../api:create_frame_generator", + "../../../api:rtp_packet_info", + "../../../api/video:encoded_image", + "../../../api/video:video_frame", + "../../../common_video", + "../../../modules/rtp_rtcp:rtp_rtcp_format", + "../../../rtc_base:stringutils", + "../../../rtc_tools:video_quality_analysis", + "../../../system_wrappers", + ] + } + + rtc_library("multi_head_queue_test") { + testonly = true + sources = [ "analyzer/video/multi_head_queue_test.cc" ] + deps = [ + ":multi_head_queue", + "../../../test:test_support", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] + } } - rtc_library("test_peer") { + rtc_library("analyzer_helper") { visibility = [ "*" ] - testonly = true sources = [ - "test_peer.cc", - "test_peer.h", + "analyzer_helper.cc", + "analyzer_helper.h", ] deps = [ - ":peer_configurer", - ":peer_connection_quality_test_params", - "../../../api:frame_generator_api", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../api:scoped_refptr", - "../../../modules/audio_processing:api", - "../../../pc:peerconnection_wrapper", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/types:variant", + "../../../api:sequence_checker", + "../../../api:track_id_stream_info_map", + "../../../rtc_base:macromagic", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } - rtc_library("test_peer_factory") { + rtc_library("default_audio_quality_analyzer") { visibility = [ "*" ] testonly = true sources = [ - "test_peer_factory.cc", - "test_peer_factory.h", + "analyzer/audio/default_audio_quality_analyzer.cc", + "analyzer/audio/default_audio_quality_analyzer.h", ] + deps = [ - ":echo_emulation", - ":peer_configurer", - ":peer_connection_quality_test_params", - ":quality_analyzing_video_encoder", - ":test_peer", - ":video_quality_analyzer_injection_helper", - "../..:copy_to_file_audio_capturer", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../api/rtc_event_log:rtc_event_log_factory", - "../../../api/task_queue:default_task_queue_factory", - "../../../api/video_codecs:builtin_video_decoder_factory", - "../../../api/video_codecs:builtin_video_encoder_factory", - "../../../media:rtc_audio_video", - "../../../media:rtc_media_engine_defaults", - "../../../modules/audio_device:audio_device_impl", - "../../../modules/audio_processing/aec_dump", - "../../../p2p:rtc_p2p", - "../../../rtc_base:rtc_task_queue", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/strings", + "../..:perf_test", + "../../../api:audio_quality_analyzer_api", + "../../../api:rtc_stats_api", + "../../../api:stats_observer_interface", + "../../../api:track_id_stream_info_map", + "../../../api/numerics", + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../rtc_base:criticalsection", + "../../../rtc_base:logging", + "../../../rtc_base:rtc_numerics", + "../../../rtc_base/synchronization:mutex", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } - rtc_library("media_helper") { + rtc_library("example_video_quality_analyzer") { visibility = [ "*" ] testonly = true sources = [ - "media/media_helper.cc", - "media/media_helper.h", - "media/test_video_capturer_video_track_source.h", + "analyzer/video/example_video_quality_analyzer.cc", + "analyzer/video/example_video_quality_analyzer.h", ] + deps = [ - ":peer_configurer", - ":test_peer", - ":video_quality_analyzer_injection_helper", - "../..:fileutils", - "../..:platform_video_capturer", - "../..:video_test_common", - "../../../api:create_frame_generator", - "../../../api:frame_generator_api", - "../../../api:media_stream_interface", - "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:array_view", + "../../../api:video_quality_analyzer_api", + "../../../api/video:encoded_image", "../../../api/video:video_frame", - "../../../pc:peerconnection", - "//third_party/abseil-cpp/absl/types:variant", + "../../../api/video:video_rtp_headers", + "../../../rtc_base:criticalsection", + "../../../rtc_base:logging", + "../../../rtc_base/synchronization:mutex", ] } - rtc_library("peer_configurer") { + rtc_library("video_quality_metrics_reporter") { visibility = [ "*" ] + testonly = true sources = [ - "peer_configurer.cc", - "peer_configurer.h", + "analyzer/video/video_quality_metrics_reporter.cc", + "analyzer/video/video_quality_metrics_reporter.h", ] deps = [ - ":peer_connection_quality_test_params", - "../..:fileutils", - "../../../api:callfactory_api", - "../../../api:create_peer_connection_quality_test_frame_generator", - "../../../api:fec_controller_api", - "../../../api:packet_socket_factory", + "../..:perf_test", "../../../api:peer_connection_quality_test_fixture_api", - "../../../api/rtc_event_log", - "../../../api/task_queue", - "../../../api/transport:network_control", - "../../../api/transport/media:media_transport_interface", - "../../../api/video_codecs:video_codecs_api", - "../../../rtc_base", - "//third_party/abseil-cpp/absl/strings", + "../../../api:rtc_stats_api", + "../../../api:track_id_stream_info_map", + "../../../api/numerics", + "../../../api/units:data_rate", + "../../../api/units:data_size", + "../../../api/units:time_delta", + "../../../api/units:timestamp", + "../../../rtc_base:criticalsection", + "../../../rtc_base:rtc_numerics", + "../../../rtc_base/synchronization:mutex", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } - rtc_library("test_activities_executor") { + rtc_library("default_video_quality_analyzer") { visibility = [ "*" ] + testonly = true sources = [ - "test_activities_executor.cc", - "test_activities_executor.h", + "analyzer/video/default_video_quality_analyzer.cc", + "analyzer/video/default_video_quality_analyzer.h", ] + deps = [ + ":multi_head_queue", + "../..:perf_test", + "../../../api:array_view", + "../../../api:video_quality_analyzer_api", + "../../../api/numerics", "../../../api/units:time_delta", "../../../api/units:timestamp", - "../../../rtc_base:checks", + "../../../api/video:encoded_image", + "../../../api/video:video_frame", + "../../../api/video:video_rtp_headers", + "../../../common_video", "../../../rtc_base:criticalsection", "../../../rtc_base:logging", "../../../rtc_base:rtc_base_approved", - "../../../rtc_base:task_queue_for_test", - "../../../rtc_base/task_utils:repeating_task", + "../../../rtc_base:rtc_base_tests_utils", + "../../../rtc_base:rtc_event", + "../../../rtc_base:rtc_numerics", + "../../../rtc_base:timeutils", + "../../../rtc_base/synchronization:mutex", + "../../../rtc_tools:video_quality_analysis", "../../../system_wrappers", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/types:optional", ] } - rtc_library("peerconnection_quality_test") { + rtc_library("network_quality_metrics_reporter") { visibility = [ "*" ] testonly = true - sources = [ - "peer_connection_quality_test.cc", - "peer_connection_quality_test.h", + "network_quality_metrics_reporter.cc", + "network_quality_metrics_reporter.h", ] deps = [ - ":analyzer_helper", - ":default_audio_quality_analyzer", - ":default_video_quality_analyzer", - ":media_helper", - ":peer_configurer", - ":peer_connection_quality_test_params", - ":sdp_changer", - ":single_process_encoded_image_data_injector", - ":stats_poller", - ":test_activities_executor", - ":test_peer", - ":test_peer_factory", - ":video_quality_analyzer_injection_helper", - ":video_quality_metrics_reporter", - "../..:field_trial", - "../..:fileutils", "../..:perf_test", - "../../../api:audio_quality_analyzer_api", - "../../../api:libjingle_peerconnection_api", - "../../../api:media_stream_interface", + "../../../api:network_emulation_manager_api", "../../../api:peer_connection_quality_test_fixture_api", - "../../../api:rtc_event_log_output_file", - "../../../api:scoped_refptr", - "../../../api:video_quality_analyzer_api", - "../../../api/rtc_event_log", - "../../../api/task_queue", - "../../../api/task_queue:default_task_queue_factory", - "../../../api/units:time_delta", - "../../../api/units:timestamp", - "../../../pc:pc_test_utils", - "../../../pc:peerconnection", - "../../../rtc_base", - "../../../rtc_base:gunit_helpers", - "../../../rtc_base:macromagic", - "../../../rtc_base:rtc_base_approved", - "../../../rtc_base:safe_conversions", - "../../../rtc_base:task_queue_for_test", - "../../../system_wrappers", + "../../../api:rtc_stats_api", + "../../../api:track_id_stream_info_map", + "../../../api/units:data_size", + "../../../rtc_base:criticalsection", + "../../../rtc_base:rtc_event", + "../../../rtc_base/synchronization:mutex", "../../../system_wrappers:field_trial", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } - rtc_library("single_process_encoded_image_data_injector_unittest") { + rtc_library("stats_based_network_quality_metrics_reporter") { + visibility = [ "*" ] testonly = true sources = [ - "analyzer/video/single_process_encoded_image_data_injector_unittest.cc", + "stats_based_network_quality_metrics_reporter.cc", + "stats_based_network_quality_metrics_reporter.h", ] deps = [ - ":single_process_encoded_image_data_injector", - "../../../api/video:encoded_image", - "../../../rtc_base:rtc_base_approved", - "../../../test:test_support", + "../..:perf_test", + "../../../api:array_view", + "../../../api:network_emulation_manager_api", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:rtc_stats_api", + "../../../api:scoped_refptr", + "../../../api/numerics", + "../../../api/test/network_emulation", + "../../../api/units:data_rate", + "../../../api/units:data_size", + "../../../api/units:timestamp", + "../../../rtc_base", + "../../../rtc_base:ip_address", + "../../../rtc_base:rtc_event", + "../../../rtc_base:stringutils", + "../../../rtc_base/synchronization:mutex", + "../../../system_wrappers:field_trial", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } - rtc_library("default_encoded_image_data_injector_unittest") { + rtc_library("cross_media_metrics_reporter") { + visibility = [ "*" ] testonly = true - sources = - [ "analyzer/video/default_encoded_image_data_injector_unittest.cc" ] - deps = [ - ":default_encoded_image_data_injector", - "../../../api/video:encoded_image", - "../../../rtc_base:rtc_base_approved", - "../../../test:test_support", + sources = [ + "cross_media_metrics_reporter.cc", + "cross_media_metrics_reporter.h", ] - } - - peer_connection_e2e_smoke_test_resources = [ - "../../../resources/pc_quality_smoke_test_alice_source.wav", - "../../../resources/pc_quality_smoke_test_bob_source.wav", - ] - if (is_ios) { - bundle_data("peer_connection_e2e_smoke_test_resources_bundle_data") { - testonly = true - sources = peer_connection_e2e_smoke_test_resources - outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] - } - } - - rtc_library("peer_connection_e2e_smoke_test") { - testonly = true - - sources = [ "peer_connection_e2e_smoke_test.cc" ] deps = [ - ":default_audio_quality_analyzer", - ":default_video_quality_analyzer", - ":network_quality_metrics_reporter", - "../../../api:callfactory_api", - "../../../api:create_network_emulation_manager", - "../../../api:create_peer_connection_quality_test_frame_generator", - "../../../api:create_peerconnection_quality_test_fixture", - "../../../api:libjingle_peerconnection_api", - "../../../api:media_stream_interface", + "../..:perf_test", "../../../api:network_emulation_manager_api", "../../../api:peer_connection_quality_test_fixture_api", - "../../../api:scoped_refptr", - "../../../api:simulated_network_api", - "../../../api/audio_codecs:builtin_audio_decoder_factory", - "../../../api/audio_codecs:builtin_audio_encoder_factory", - "../../../api/video_codecs:builtin_video_decoder_factory", - "../../../api/video_codecs:builtin_video_encoder_factory", - "../../../call:simulated_network", - "../../../media:rtc_audio_video", - "../../../modules/audio_device:audio_device_impl", - "../../../p2p:rtc_p2p", - "../../../pc:pc_test_utils", - "../../../pc:peerconnection_wrapper", - "../../../rtc_base", - "../../../rtc_base:gunit_helpers", - "../../../rtc_base:logging", + "../../../api:rtc_stats_api", + "../../../api:track_id_stream_info_map", + "../../../api/numerics", + "../../../api/units:timestamp", + "../../../rtc_base:criticalsection", "../../../rtc_base:rtc_event", + "../../../rtc_base:rtc_numerics", + "../../../rtc_base/synchronization:mutex", "../../../system_wrappers:field_trial", - "../../../test:field_trial", - "../../../test:fileutils", - "../../../test:test_support", ] - data = peer_connection_e2e_smoke_test_resources - if (is_ios) { - deps += [ ":peer_connection_e2e_smoke_test_resources_bundle_data" ] - } + absl_deps = [ + "//third_party/abseil-cpp/absl/strings", + "//third_party/abseil-cpp/absl/types:optional", + ] } - rtc_library("stats_poller") { + rtc_library("sdp_changer") { visibility = [ "*" ] testonly = true sources = [ - "stats_poller.cc", - "stats_poller.h", + "sdp/sdp_changer.cc", + "sdp/sdp_changer.h", ] deps = [ - ":test_peer", + "../../../api:array_view", "../../../api:libjingle_peerconnection_api", - "../../../api:stats_observer_interface", - "../../../rtc_base:logging", + "../../../api:peer_connection_quality_test_fixture_api", + "../../../api:rtp_parameters", + "../../../media:rtc_media_base", + "../../../p2p:rtc_p2p", + "../../../pc:peerconnection", + "../../../pc:rtc_pc_base", + "../../../pc:session_description", + "../../../pc:simulcast_description", + "../../../rtc_base:stringutils", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/memory", + "//third_party/abseil-cpp/absl/strings:strings", + "//third_party/abseil-cpp/absl/types:optional", ] } - rtc_library("default_video_quality_analyzer_test") { + rtc_library("multi_head_queue") { + visibility = [ "*" ] testonly = true - sources = [ "analyzer/video/default_video_quality_analyzer_test.cc" ] - deps = [ - ":default_video_quality_analyzer", - "../..:test_support", - "../../../api:create_frame_generator", - "../../../api:rtp_packet_info", - "../../../api/video:encoded_image", - "../../../api/video:video_frame", - "../../../api/video:video_frame_i420", - "../../../modules/rtp_rtcp:rtp_rtcp_format", - "../../../system_wrappers", - ] + sources = [ "analyzer/video/multi_head_queue.h" ] + deps = [ "../../../rtc_base:checks" ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } - -rtc_library("analyzer_helper") { - visibility = [ "*" ] - sources = [ - "analyzer_helper.cc", - "analyzer_helper.h", - ] - deps = [ - "../../../api:track_id_stream_label_map", - "../../../rtc_base:macromagic", - "../../../rtc_base/synchronization:sequence_checker", - ] -} - -rtc_library("default_audio_quality_analyzer") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/audio/default_audio_quality_analyzer.cc", - "analyzer/audio/default_audio_quality_analyzer.h", - ] - - deps = [ - "../..:perf_test", - "../../../api:audio_quality_analyzer_api", - "../../../api:libjingle_peerconnection_api", - "../../../api:stats_observer_interface", - "../../../api:track_id_stream_label_map", - "../../../rtc_base:criticalsection", - "../../../rtc_base:logging", - "../../../rtc_base:rtc_numerics", - ] -} - -rtc_library("example_video_quality_analyzer") { - visibility = [ "*" ] - testonly = true - sources = [ - "analyzer/video/example_video_quality_analyzer.cc", - "analyzer/video/example_video_quality_analyzer.h", - ] - - deps = [ - "../../../api:video_quality_analyzer_api", - "../../../api/video:encoded_image", - "../../../api/video:video_frame", - "../../../api/video:video_rtp_headers", - "../../../rtc_base:criticalsection", - "../../../rtc_base:logging", - ] -} - -rtc_library("video_quality_metrics_reporter") { - visibility = [ "*" ] - - testonly = true - sources = [ - "analyzer/video/video_quality_metrics_reporter.cc", - "analyzer/video/video_quality_metrics_reporter.h", - ] - deps = [ - "../..:perf_test", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../rtc_base:criticalsection", - "../../../rtc_base:rtc_numerics", - ] -} - -rtc_library("default_video_quality_analyzer") { - visibility = [ "*" ] - - testonly = true - sources = [ - "analyzer/video/default_video_quality_analyzer.cc", - "analyzer/video/default_video_quality_analyzer.h", - ] - - deps = [ - "../..:perf_test", - "../../../api:video_quality_analyzer_api", - "../../../api/units:time_delta", - "../../../api/units:timestamp", - "../../../api/video:encoded_image", - "../../../api/video:video_frame", - "../../../api/video:video_frame_i420", - "../../../api/video:video_rtp_headers", - "../../../common_video", - "../../../rtc_base:criticalsection", - "../../../rtc_base:logging", - "../../../rtc_base:rtc_base_approved", - "../../../rtc_base:rtc_base_tests_utils", - "../../../rtc_base:rtc_event", - "../../../rtc_base:rtc_numerics", - "../../../rtc_base:timeutils", - "../../../system_wrappers", - ] -} - -rtc_library("network_quality_metrics_reporter") { - visibility = [ "*" ] - testonly = true - sources = [ - "network_quality_metrics_reporter.cc", - "network_quality_metrics_reporter.h", - ] - deps = [ - "../..:perf_test", - "../../../api:libjingle_peerconnection_api", - "../../../api:network_emulation_manager_api", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../rtc_base:criticalsection", - "../../../rtc_base:rtc_event", - "../../../system_wrappers:field_trial", - ] -} - -rtc_library("sdp_changer") { - visibility = [ "*" ] - testonly = true - sources = [ - "sdp/sdp_changer.cc", - "sdp/sdp_changer.h", - ] - deps = [ - "../../../api:array_view", - "../../../api:libjingle_peerconnection_api", - "../../../api:peer_connection_quality_test_fixture_api", - "../../../api:rtp_parameters", - "../../../media:rtc_media_base", - "../../../p2p:rtc_p2p", - "../../../pc:peerconnection", - "../../../pc:rtc_pc_base", - "../../../rtc_base:stringutils", - "//third_party/abseil-cpp/absl/memory", - "//third_party/abseil-cpp/absl/strings:strings", - "//third_party/abseil-cpp/absl/types:optional", - ] -} diff --git a/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.cc b/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.cc index b8f1740e46..30c17c1ca9 100644 --- a/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.cc +++ b/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.cc @@ -10,87 +10,103 @@ #include "test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h" -#include "api/stats_types.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtcstats_objects.h" #include "rtc_base/logging.h" namespace webrtc { namespace webrtc_pc_e2e { -namespace { -static const char kStatsAudioMediaType[] = "audio"; - -} // namespace - -void DefaultAudioQualityAnalyzer::Start( - std::string test_case_name, - TrackIdStreamLabelMap* analyzer_helper) { +void DefaultAudioQualityAnalyzer::Start(std::string test_case_name, + TrackIdStreamInfoMap* analyzer_helper) { test_case_name_ = std::move(test_case_name); analyzer_helper_ = analyzer_helper; } void DefaultAudioQualityAnalyzer::OnStatsReports( - const std::string& pc_label, - const StatsReports& stats_reports) { - for (const StatsReport* stats_report : stats_reports) { - // NetEq stats are only present in kStatsReportTypeSsrc reports, so all - // other reports are just ignored. - if (stats_report->type() != StatsReport::StatsType::kStatsReportTypeSsrc) { - continue; - } - // Ignoring stats reports of "video" SSRC. - const webrtc::StatsReport::Value* media_type = stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameMediaType); - RTC_CHECK(media_type); - if (strcmp(media_type->static_string_val(), kStatsAudioMediaType) != 0) { + absl::string_view pc_label, + const rtc::scoped_refptr& report) { + // TODO(https://crbug.com/webrtc/11789): use "inbound-rtp" instead of "track" + // stats when required audio metrics moved there + auto stats = report->GetStatsOfType(); + + for (auto& stat : stats) { + if (!stat->kind.is_defined() || + !(*stat->kind == RTCMediaStreamTrackKind::kAudio) || + !*stat->remote_source) { continue; } - if (stats_report->FindValue( - webrtc::StatsReport::kStatsValueNameBytesSent)) { - // If kStatsValueNameBytesSent is present, it means it's a send stream, - // but we need audio metrics for receive stream, so skip it. - continue; + + StatsSample sample; + sample.total_samples_received = + stat->total_samples_received.ValueOrDefault(0ul); + sample.concealed_samples = stat->concealed_samples.ValueOrDefault(0ul); + sample.removed_samples_for_acceleration = + stat->removed_samples_for_acceleration.ValueOrDefault(0ul); + sample.inserted_samples_for_deceleration = + stat->inserted_samples_for_deceleration.ValueOrDefault(0ul); + sample.silent_concealed_samples = + stat->silent_concealed_samples.ValueOrDefault(0ul); + sample.jitter_buffer_delay = + TimeDelta::Seconds(stat->jitter_buffer_delay.ValueOrDefault(0.)); + sample.jitter_buffer_target_delay = + TimeDelta::Seconds(stat->jitter_buffer_target_delay.ValueOrDefault(0.)); + sample.jitter_buffer_emitted_count = + stat->jitter_buffer_emitted_count.ValueOrDefault(0ul); + + const std::string stream_label = std::string( + analyzer_helper_->GetStreamLabelFromTrackId(*stat->track_identifier)); + + MutexLock lock(&lock_); + StatsSample prev_sample = last_stats_sample_[stream_label]; + RTC_CHECK_GE(sample.total_samples_received, + prev_sample.total_samples_received); + double total_samples_diff = static_cast( + sample.total_samples_received - prev_sample.total_samples_received); + if (total_samples_diff == 0) { + return; } - const webrtc::StatsReport::Value* expand_rate = stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameExpandRate); - const webrtc::StatsReport::Value* accelerate_rate = stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameAccelerateRate); - const webrtc::StatsReport::Value* preemptive_rate = stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNamePreemptiveExpandRate); - const webrtc::StatsReport::Value* speech_expand_rate = - stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameSpeechExpandRate); - const webrtc::StatsReport::Value* preferred_buffer_size_ms = - stats_report->FindValue(StatsReport::StatsValueName:: - kStatsValueNamePreferredJitterBufferMs); - RTC_CHECK(expand_rate); - RTC_CHECK(accelerate_rate); - RTC_CHECK(preemptive_rate); - RTC_CHECK(speech_expand_rate); - RTC_CHECK(preferred_buffer_size_ms); - - const std::string& stream_label = - GetStreamLabelFromStatsReport(stats_report); - - rtc::CritScope crit(&lock_); AudioStreamStats& audio_stream_stats = streams_stats_[stream_label]; - audio_stream_stats.expand_rate.AddSample(expand_rate->float_val()); - audio_stream_stats.accelerate_rate.AddSample(accelerate_rate->float_val()); - audio_stream_stats.preemptive_rate.AddSample(preemptive_rate->float_val()); + audio_stream_stats.expand_rate.AddSample( + (sample.concealed_samples - prev_sample.concealed_samples) / + total_samples_diff); + audio_stream_stats.accelerate_rate.AddSample( + (sample.removed_samples_for_acceleration - + prev_sample.removed_samples_for_acceleration) / + total_samples_diff); + audio_stream_stats.preemptive_rate.AddSample( + (sample.inserted_samples_for_deceleration - + prev_sample.inserted_samples_for_deceleration) / + total_samples_diff); + + int64_t speech_concealed_samples = + sample.concealed_samples - sample.silent_concealed_samples; + int64_t prev_speech_concealed_samples = + prev_sample.concealed_samples - prev_sample.silent_concealed_samples; audio_stream_stats.speech_expand_rate.AddSample( - speech_expand_rate->float_val()); - audio_stream_stats.preferred_buffer_size_ms.AddSample( - preferred_buffer_size_ms->int_val()); - } -} + (speech_concealed_samples - prev_speech_concealed_samples) / + total_samples_diff); + + int64_t jitter_buffer_emitted_count_diff = + sample.jitter_buffer_emitted_count - + prev_sample.jitter_buffer_emitted_count; + if (jitter_buffer_emitted_count_diff > 0) { + TimeDelta jitter_buffer_delay_diff = + sample.jitter_buffer_delay - prev_sample.jitter_buffer_delay; + TimeDelta jitter_buffer_target_delay_diff = + sample.jitter_buffer_target_delay - + prev_sample.jitter_buffer_target_delay; + audio_stream_stats.average_jitter_buffer_delay_ms.AddSample( + jitter_buffer_delay_diff.ms() / + jitter_buffer_emitted_count_diff); + audio_stream_stats.preferred_buffer_size_ms.AddSample( + jitter_buffer_target_delay_diff.ms() / + jitter_buffer_emitted_count_diff); + } -const std::string& DefaultAudioQualityAnalyzer::GetStreamLabelFromStatsReport( - const StatsReport* stats_report) const { - const webrtc::StatsReport::Value* report_track_id = stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameTrackId); - RTC_CHECK(report_track_id); - return analyzer_helper_->GetStreamLabelFromTrackId( - report_track_id->string_val()); + last_stats_sample_[stream_label] = sample; + } } std::string DefaultAudioQualityAnalyzer::GetTestCaseName( @@ -100,7 +116,7 @@ std::string DefaultAudioQualityAnalyzer::GetTestCaseName( void DefaultAudioQualityAnalyzer::Stop() { using ::webrtc::test::ImproveDirection; - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); for (auto& item : streams_stats_) { ReportResult("expand_rate", item.first, item.second.expand_rate, "unitless", ImproveDirection::kSmallerIsBetter); @@ -111,6 +127,9 @@ void DefaultAudioQualityAnalyzer::Stop() { ReportResult("speech_expand_rate", item.first, item.second.speech_expand_rate, "unitless", ImproveDirection::kSmallerIsBetter); + ReportResult("average_jitter_buffer_delay_ms", item.first, + item.second.average_jitter_buffer_delay_ms, "ms", + ImproveDirection::kNone); ReportResult("preferred_buffer_size_ms", item.first, item.second.preferred_buffer_size_ms, "ms", ImproveDirection::kNone); @@ -119,7 +138,7 @@ void DefaultAudioQualityAnalyzer::Stop() { std::map DefaultAudioQualityAnalyzer::GetAudioStreamsStats() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return streams_stats_; } diff --git a/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h b/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h index 33aaefd4c3..4ad0dd3da2 100644 --- a/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h +++ b/test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h @@ -14,11 +14,12 @@ #include #include -#include "api/stats_types.h" +#include "absl/strings/string_view.h" +#include "api/numerics/samples_stats_counter.h" #include "api/test/audio_quality_analyzer_interface.h" -#include "api/test/track_id_stream_label_map.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/numerics/samples_stats_counter.h" +#include "api/test/track_id_stream_info_map.h" +#include "api/units/time_delta.h" +#include "rtc_base/synchronization/mutex.h" #include "test/testsupport/perf_test.h" namespace webrtc { @@ -29,25 +30,34 @@ struct AudioStreamStats { SamplesStatsCounter accelerate_rate; SamplesStatsCounter preemptive_rate; SamplesStatsCounter speech_expand_rate; + SamplesStatsCounter average_jitter_buffer_delay_ms; SamplesStatsCounter preferred_buffer_size_ms; }; -// TODO(bugs.webrtc.org/10430): Migrate to the new GetStats as soon as -// bugs.webrtc.org/10428 is fixed. class DefaultAudioQualityAnalyzer : public AudioQualityAnalyzerInterface { public: void Start(std::string test_case_name, - TrackIdStreamLabelMap* analyzer_helper) override; - void OnStatsReports(const std::string& pc_label, - const StatsReports& stats_reports) override; + TrackIdStreamInfoMap* analyzer_helper) override; + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override; void Stop() override; // Returns audio quality stats per stream label. std::map GetAudioStreamsStats() const; private: - const std::string& GetStreamLabelFromStatsReport( - const StatsReport* stats_report) const; + struct StatsSample { + uint64_t total_samples_received = 0; + uint64_t concealed_samples = 0; + uint64_t removed_samples_for_acceleration = 0; + uint64_t inserted_samples_for_deceleration = 0; + uint64_t silent_concealed_samples = 0; + TimeDelta jitter_buffer_delay = TimeDelta::Zero(); + TimeDelta jitter_buffer_target_delay = TimeDelta::Zero(); + uint64_t jitter_buffer_emitted_count = 0; + }; + std::string GetTestCaseName(const std::string& stream_label) const; void ReportResult(const std::string& metric_name, const std::string& stream_label, @@ -56,10 +66,11 @@ class DefaultAudioQualityAnalyzer : public AudioQualityAnalyzerInterface { webrtc::test::ImproveDirection improve_direction) const; std::string test_case_name_; - TrackIdStreamLabelMap* analyzer_helper_; + TrackIdStreamInfoMap* analyzer_helper_; - rtc::CriticalSection lock_; + mutable Mutex lock_; std::map streams_stats_ RTC_GUARDED_BY(lock_); + std::map last_stats_sample_ RTC_GUARDED_BY(lock_); }; } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer/video/default_encoded_image_data_injector.cc b/test/pc/e2e/analyzer/video/default_encoded_image_data_injector.cc deleted file mode 100644 index 2634e6eea4..0000000000 --- a/test/pc/e2e/analyzer/video/default_encoded_image_data_injector.cc +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "test/pc/e2e/analyzer/video/default_encoded_image_data_injector.h" - -#include -#include - -#include "absl/memory/memory.h" -#include "api/video/encoded_image.h" -#include "rtc_base/checks.h" - -namespace webrtc { -namespace webrtc_pc_e2e { -namespace { - -// The amount on which encoded image buffer will be expanded to inject frame id. -// This is 2 bytes for uint16_t frame id itself and 4 bytes for original length -// of the buffer. -constexpr int kEncodedImageBufferExpansion = 6; - -struct ExtractionInfo { - size_t length; - bool discard; -}; - -} // namespace - -DefaultEncodedImageDataInjector::DefaultEncodedImageDataInjector() = default; -DefaultEncodedImageDataInjector::~DefaultEncodedImageDataInjector() = default; - -EncodedImage DefaultEncodedImageDataInjector::InjectData( - uint16_t id, - bool discard, - const EncodedImage& source, - int /*coding_entity_id*/) { - EncodedImage out = source; - out.SetEncodedData( - EncodedImageBuffer::Create(source.size() + kEncodedImageBufferExpansion)); - memcpy(out.data(), source.data(), source.size()); - size_t insertion_pos = source.size(); - out.data()[insertion_pos] = id & 0x00ff; - out.data()[insertion_pos + 1] = (id & 0xff00) >> 8; - out.data()[insertion_pos + 2] = source.size() & 0x000000ff; - out.data()[insertion_pos + 3] = (source.size() & 0x0000ff00) >> 8; - out.data()[insertion_pos + 4] = (source.size() & 0x00ff0000) >> 16; - out.data()[insertion_pos + 5] = (source.size() & 0xff000000) >> 24; - - // We will store discard flag in the high bit of high byte of the size. - RTC_CHECK_LT(source.size(), 1U << 31) << "High bit is already in use"; - out.data()[insertion_pos + 5] = - out.data()[insertion_pos + 5] | ((discard ? 1 : 0) << 7); - return out; -} - -EncodedImageExtractionResult DefaultEncodedImageDataInjector::ExtractData( - const EncodedImage& source, - int /*coding_entity_id*/) { - EncodedImage out = source; - out.SetEncodedData(EncodedImageBuffer::Create(source.size())); - - size_t source_pos = source.size() - 1; - absl::optional id = absl::nullopt; - bool discard = true; - std::vector extraction_infos; - // First make a reverse pass through whole buffer to populate frame id, - // discard flags and concatenated encoded images length. - while (true) { - size_t insertion_pos = source_pos - kEncodedImageBufferExpansion + 1; - RTC_CHECK_GE(insertion_pos, 0); - RTC_CHECK_LE(insertion_pos + kEncodedImageBufferExpansion, source.size()); - uint16_t next_id = - source.data()[insertion_pos] + (source.data()[insertion_pos + 1] << 8); - RTC_CHECK(!id || id.value() == next_id) - << "Different frames encoded into single encoded image: " << id.value() - << " vs " << next_id; - id = next_id; - uint32_t length = source.data()[insertion_pos + 2] + - (source.data()[insertion_pos + 3] << 8) + - (source.data()[insertion_pos + 4] << 16) + - ((source.data()[insertion_pos + 5] << 24) & 0b01111111); - bool current_discard = (source.data()[insertion_pos + 5] & 0b10000000) != 0; - extraction_infos.push_back({length, current_discard}); - // Extraction result is discarded only if all encoded partitions are - // discarded. - discard = discard && current_discard; - if (source_pos < length + kEncodedImageBufferExpansion) { - break; - } - source_pos -= length + kEncodedImageBufferExpansion; - } - RTC_CHECK(id); - std::reverse(extraction_infos.begin(), extraction_infos.end()); - if (discard) { - out.set_size(0); - return EncodedImageExtractionResult{*id, out, true}; - } - - // Now basing on populated data make a forward pass to copy required pieces - // of data to the output buffer. - source_pos = 0; - size_t out_pos = 0; - auto extraction_infos_it = extraction_infos.begin(); - while (source_pos < source.size()) { - const ExtractionInfo& info = *extraction_infos_it; - RTC_CHECK_LE(source_pos + kEncodedImageBufferExpansion + info.length, - source.size()); - if (!info.discard) { - // Copy next encoded image payload from concatenated buffer only if it is - // not discarded. - memcpy(&out.data()[out_pos], &source.data()[source_pos], info.length); - out_pos += info.length; - } - source_pos += info.length + kEncodedImageBufferExpansion; - ++extraction_infos_it; - } - out.set_size(out_pos); - - return EncodedImageExtractionResult{id.value(), out, discard}; -} - -} // namespace webrtc_pc_e2e -} // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/default_encoded_image_data_injector.h b/test/pc/e2e/analyzer/video/default_encoded_image_data_injector.h deleted file mode 100644 index f4bd81ce90..0000000000 --- a/test/pc/e2e/analyzer/video/default_encoded_image_data_injector.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef TEST_PC_E2E_ANALYZER_VIDEO_DEFAULT_ENCODED_IMAGE_DATA_INJECTOR_H_ -#define TEST_PC_E2E_ANALYZER_VIDEO_DEFAULT_ENCODED_IMAGE_DATA_INJECTOR_H_ - -#include -#include -#include -#include -#include -#include - -#include "api/video/encoded_image.h" -#include "rtc_base/critical_section.h" -#include "test/pc/e2e/analyzer/video/encoded_image_data_injector.h" - -namespace webrtc { -namespace webrtc_pc_e2e { - -// Injects frame id and discard flag into EncodedImage payload buffer. The -// payload buffer will be appended in the injector with 2 bytes frame id and 4 -// bytes original buffer length. Discarded flag will be put into the highest bit -// of the length. It is assumed, that frame's data can't be more then 2^31 -// bytes. In the decoder, frame id and discard flag will be extracted and the -// length will be used to restore original buffer. We can't put this data in the -// beginning of the payload, because first bytes are used in different parts of -// WebRTC pipeline. -// -// The data in the EncodedImage on encoder side after injection will look like -// this: -// 4 bytes frame length + discard flag -// _________________ _ _ _↓_ _ _ -// | original buffer | | | -// ¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯ ¯↑¯ ¯ ¯ ¯ ¯ -// 2 bytes frame id -// -// But on decoder side multiple payloads can be concatenated into single -// EncodedImage in jitter buffer and its payload will look like this: -// _________ _ _ _ _ _ _ _________ _ _ _ _ _ _ _________ _ _ _ _ _ _ -// buf: | payload | | | payload | | | payload | | | -// ¯¯¯¯¯¯¯¯¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯¯¯¯¯¯¯¯¯ ¯ ¯ ¯ ¯ ¯ ¯ ¯¯¯¯¯¯¯¯¯ ¯ ¯ ¯ ¯ ¯ ¯ -// To correctly restore such images we will extract id by this algorithm: -// 1. Make a pass from end to begin of the buffer to restore origin lengths, -// frame ids and discard flags from length high bit. -// 2. If all discard flags are true - discard this encoded image -// 3. Make a pass from begin to end copying data to the output basing on -// previously extracted length -// Also it will check, that all extracted ids are equals. -class DefaultEncodedImageDataInjector : public EncodedImageDataInjector, - public EncodedImageDataExtractor { - public: - DefaultEncodedImageDataInjector(); - ~DefaultEncodedImageDataInjector() override; - - EncodedImage InjectData(uint16_t id, - bool discard, - const EncodedImage& source, - int /*coding_entity_id*/) override; - EncodedImageExtractionResult ExtractData(const EncodedImage& source, - int coding_entity_id) override; -}; - -} // namespace webrtc_pc_e2e -} // namespace webrtc - -#endif // TEST_PC_E2E_ANALYZER_VIDEO_DEFAULT_ENCODED_IMAGE_DATA_INJECTOR_H_ diff --git a/test/pc/e2e/analyzer/video/default_encoded_image_data_injector_unittest.cc b/test/pc/e2e/analyzer/video/default_encoded_image_data_injector_unittest.cc deleted file mode 100644 index 3ad978f66a..0000000000 --- a/test/pc/e2e/analyzer/video/default_encoded_image_data_injector_unittest.cc +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "test/pc/e2e/analyzer/video/default_encoded_image_data_injector.h" - -#include - -#include "api/video/encoded_image.h" -#include "rtc_base/buffer.h" -#include "test/gtest.h" - -namespace webrtc { -namespace webrtc_pc_e2e { -namespace { - -rtc::Buffer CreateBufferOfSizeNFilledWithValuesFromX(size_t n, uint8_t x) { - rtc::Buffer buffer(n); - for (size_t i = 0; i < n; ++i) { - buffer[i] = static_cast(x + i); - } - return buffer; -} - -} // namespace - -TEST(DefaultEncodedImageDataInjector, InjectExtractDiscardFalse) { - DefaultEncodedImageDataInjector injector; - - rtc::Buffer buffer = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - - EncodedImage source(buffer.data(), 10, 10); - source.SetTimestamp(123456789); - - EncodedImageExtractionResult out = - injector.ExtractData(injector.InjectData(512, false, source, 1), 2); - EXPECT_EQ(out.id, 512); - EXPECT_FALSE(out.discard); - EXPECT_EQ(out.image.size(), 10ul); - for (int i = 0; i < 10; ++i) { - EXPECT_EQ(out.image.data()[i], i + 1); - } -} - -TEST(DefaultEncodedImageDataInjector, InjectExtractDiscardTrue) { - DefaultEncodedImageDataInjector injector; - - rtc::Buffer buffer = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - - EncodedImage source(buffer.data(), 10, 10); - source.SetTimestamp(123456789); - - EncodedImageExtractionResult out = - injector.ExtractData(injector.InjectData(512, true, source, 1), 2); - EXPECT_EQ(out.id, 512); - EXPECT_TRUE(out.discard); - EXPECT_EQ(out.image.size(), 0ul); -} - -TEST(DefaultEncodedImageDataInjector, Inject3Extract3) { - DefaultEncodedImageDataInjector injector; - - rtc::Buffer buffer1 = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - rtc::Buffer buffer2 = CreateBufferOfSizeNFilledWithValuesFromX(10, 11); - rtc::Buffer buffer3 = CreateBufferOfSizeNFilledWithValuesFromX(10, 21); - - // 1st frame - EncodedImage source1(buffer1.data(), 10, 10); - source1.SetTimestamp(123456710); - // 2nd frame 1st spatial layer - EncodedImage source2(buffer2.data(), 10, 10); - source2.SetTimestamp(123456720); - // 2nd frame 2nd spatial layer - EncodedImage source3(buffer3.data(), 10, 10); - source3.SetTimestamp(123456720); - - EncodedImage intermediate1 = injector.InjectData(510, false, source1, 1); - EncodedImage intermediate2 = injector.InjectData(520, true, source2, 1); - EncodedImage intermediate3 = injector.InjectData(520, false, source3, 1); - - // Extract ids in different order. - EncodedImageExtractionResult out3 = injector.ExtractData(intermediate3, 2); - EncodedImageExtractionResult out1 = injector.ExtractData(intermediate1, 2); - EncodedImageExtractionResult out2 = injector.ExtractData(intermediate2, 2); - - EXPECT_EQ(out1.id, 510); - EXPECT_FALSE(out1.discard); - EXPECT_EQ(out1.image.size(), 10ul); - for (int i = 0; i < 10; ++i) { - EXPECT_EQ(out1.image.data()[i], i + 1); - } - EXPECT_EQ(out2.id, 520); - EXPECT_TRUE(out2.discard); - EXPECT_EQ(out2.image.size(), 0ul); - EXPECT_EQ(out3.id, 520); - EXPECT_FALSE(out3.discard); - EXPECT_EQ(out3.image.size(), 10ul); - for (int i = 0; i < 10; ++i) { - EXPECT_EQ(out3.image.data()[i], i + 21); - } -} - -TEST(DefaultEncodedImageDataInjector, InjectExtractFromConcatenated) { - DefaultEncodedImageDataInjector injector; - - rtc::Buffer buffer1 = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - rtc::Buffer buffer2 = CreateBufferOfSizeNFilledWithValuesFromX(10, 11); - rtc::Buffer buffer3 = CreateBufferOfSizeNFilledWithValuesFromX(10, 21); - - EncodedImage source1(buffer1.data(), 10, 10); - source1.SetTimestamp(123456710); - EncodedImage source2(buffer2.data(), 10, 10); - source2.SetTimestamp(123456710); - EncodedImage source3(buffer3.data(), 10, 10); - source3.SetTimestamp(123456710); - - // Inject id into 3 images with same frame id. - EncodedImage intermediate1 = injector.InjectData(512, false, source1, 1); - EncodedImage intermediate2 = injector.InjectData(512, true, source2, 1); - EncodedImage intermediate3 = injector.InjectData(512, false, source3, 1); - - // Concatenate them into single encoded image, like it can be done in jitter - // buffer. - size_t concatenated_length = - intermediate1.size() + intermediate2.size() + intermediate3.size(); - rtc::Buffer concatenated_buffer; - concatenated_buffer.AppendData(intermediate1.data(), intermediate1.size()); - concatenated_buffer.AppendData(intermediate2.data(), intermediate2.size()); - concatenated_buffer.AppendData(intermediate3.data(), intermediate3.size()); - EncodedImage concatenated(concatenated_buffer.data(), concatenated_length, - concatenated_length); - - // Extract frame id from concatenated image - EncodedImageExtractionResult out = injector.ExtractData(concatenated, 2); - - EXPECT_EQ(out.id, 512); - EXPECT_FALSE(out.discard); - EXPECT_EQ(out.image.size(), 2 * 10ul); - for (int i = 0; i < 10; ++i) { - EXPECT_EQ(out.image.data()[i], i + 1); - EXPECT_EQ(out.image.data()[i + 10], i + 21); - } -} - -TEST(DefaultEncodedImageDataInjector, - InjectExtractFromConcatenatedAllDiscarded) { - DefaultEncodedImageDataInjector injector; - - rtc::Buffer buffer1 = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - rtc::Buffer buffer2 = CreateBufferOfSizeNFilledWithValuesFromX(10, 11); - rtc::Buffer buffer3 = CreateBufferOfSizeNFilledWithValuesFromX(10, 21); - - EncodedImage source1(buffer1.data(), 10, 10); - source1.SetTimestamp(123456710); - EncodedImage source2(buffer2.data(), 10, 10); - source2.SetTimestamp(123456710); - EncodedImage source3(buffer3.data(), 10, 10); - source3.SetTimestamp(123456710); - - // Inject id into 3 images with same frame id. - EncodedImage intermediate1 = injector.InjectData(512, true, source1, 1); - EncodedImage intermediate2 = injector.InjectData(512, true, source2, 1); - EncodedImage intermediate3 = injector.InjectData(512, true, source3, 1); - - // Concatenate them into single encoded image, like it can be done in jitter - // buffer. - size_t concatenated_length = - intermediate1.size() + intermediate2.size() + intermediate3.size(); - rtc::Buffer concatenated_buffer; - concatenated_buffer.AppendData(intermediate1.data(), intermediate1.size()); - concatenated_buffer.AppendData(intermediate2.data(), intermediate2.size()); - concatenated_buffer.AppendData(intermediate3.data(), intermediate3.size()); - EncodedImage concatenated(concatenated_buffer.data(), concatenated_length, - concatenated_length); - - // Extract frame id from concatenated image - EncodedImageExtractionResult out = injector.ExtractData(concatenated, 2); - - EXPECT_EQ(out.id, 512); - EXPECT_TRUE(out.discard); - EXPECT_EQ(out.image.size(), 0ul); -} - -} // namespace webrtc_pc_e2e -} // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc index 786509ddb7..53fb14e606 100644 --- a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc +++ b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.cc @@ -14,12 +14,17 @@ #include #include +#include "api/array_view.h" +#include "api/numerics/samples_stats_counter.h" #include "api/units/time_delta.h" #include "api/video/i420_buffer.h" #include "common_video/libyuv/include/webrtc_libyuv.h" #include "rtc_base/cpu_time.h" #include "rtc_base/logging.h" +#include "rtc_base/platform_thread.h" +#include "rtc_base/strings/string_builder.h" #include "rtc_base/time_utils.h" +#include "rtc_tools/frame_analyzer/video_geometry_aligner.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -35,6 +40,7 @@ void LogFrameCounters(const std::string& name, const FrameCounters& counters) { RTC_LOG(INFO) << "[" << name << "] Pre encoded : " << counters.pre_encoded; RTC_LOG(INFO) << "[" << name << "] Encoded : " << counters.encoded; RTC_LOG(INFO) << "[" << name << "] Received : " << counters.received; + RTC_LOG(INFO) << "[" << name << "] Decoded : " << counters.decoded; RTC_LOG(INFO) << "[" << name << "] Rendered : " << counters.rendered; RTC_LOG(INFO) << "[" << name << "] Dropped : " << counters.dropped; } @@ -46,6 +52,20 @@ void LogStreamInternalStats(const std::string& name, const StreamStats& stats) { << stats.dropped_before_encoder; } +template +absl::optional MaybeGetValue(const std::map& map, size_t key) { + auto it = map.find(key); + if (it == map.end()) { + return absl::nullopt; + } + return it->second; +} + +SamplesStatsCounter::StatsSample StatsSample(double value, + Timestamp sampling_time) { + return SamplesStatsCounter::StatsSample{value, sampling_time}; +} + } // namespace void RateCounter::AddEvent(Timestamp event_time) { @@ -65,30 +85,70 @@ double RateCounter::GetEventsPerSecond() const { (event_last_time_ - event_first_time_).us() * kMicrosPerSecond; } +std::string StatsKey::ToString() const { + rtc::StringBuilder out; + out << stream_label << "_" << sender << "_" << receiver; + return out.str(); +} + +bool operator<(const StatsKey& a, const StatsKey& b) { + if (a.stream_label != b.stream_label) { + return a.stream_label < b.stream_label; + } + if (a.sender != b.sender) { + return a.sender < b.sender; + } + return a.receiver < b.receiver; +} + +bool operator==(const StatsKey& a, const StatsKey& b) { + return a.stream_label == b.stream_label && a.sender == b.sender && + a.receiver == b.receiver; +} + +std::string InternalStatsKey::ToString() const { + rtc::StringBuilder out; + out << "stream=" << stream << "_sender=" << sender + << "_receiver=" << receiver; + return out.str(); +} + +bool operator<(const InternalStatsKey& a, const InternalStatsKey& b) { + if (a.stream != b.stream) { + return a.stream < b.stream; + } + if (a.sender != b.sender) { + return a.sender < b.sender; + } + return a.receiver < b.receiver; +} + +bool operator==(const InternalStatsKey& a, const InternalStatsKey& b) { + return a.stream == b.stream && a.sender == b.sender && + a.receiver == b.receiver; +} + DefaultVideoQualityAnalyzer::DefaultVideoQualityAnalyzer( - bool heavy_metrics_computation_enabled, - int max_frames_in_flight_per_stream_count) - : heavy_metrics_computation_enabled_(heavy_metrics_computation_enabled), - max_frames_in_flight_per_stream_count_( - max_frames_in_flight_per_stream_count), - clock_(Clock::GetRealTimeClock()) {} + webrtc::Clock* clock, + DefaultVideoQualityAnalyzerOptions options) + : options_(options), clock_(clock) {} DefaultVideoQualityAnalyzer::~DefaultVideoQualityAnalyzer() { Stop(); } -void DefaultVideoQualityAnalyzer::Start(std::string test_case_name, - int max_threads_count) { +void DefaultVideoQualityAnalyzer::Start( + std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count) { test_label_ = std::move(test_case_name); for (int i = 0; i < max_threads_count; i++) { - auto thread = std::make_unique( - &DefaultVideoQualityAnalyzer::ProcessComparisonsThread, this, - ("DefaultVideoQualityAnalyzerWorker-" + std::to_string(i)).data(), - rtc::ThreadPriority::kNormalPriority); - thread->Start(); - thread_pool_.push_back(std::move(thread)); + thread_pool_.push_back(rtc::PlatformThread::SpawnJoinable( + [this] { ProcessComparisons(); }, + "DefaultVideoQualityAnalyzerWorker-" + std::to_string(i))); } { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); + peers_ = std::make_unique(peer_names); RTC_CHECK(start_time_.IsMinusInfinity()); state_ = State::kActive; @@ -98,73 +158,119 @@ void DefaultVideoQualityAnalyzer::Start(std::string test_case_name, } uint16_t DefaultVideoQualityAnalyzer::OnFrameCaptured( + absl::string_view peer_name, const std::string& stream_label, const webrtc::VideoFrame& frame) { // |next_frame_id| is atomic, so we needn't lock here. uint16_t frame_id = next_frame_id_++; Timestamp start_time = Timestamp::MinusInfinity(); + size_t peer_index = -1; + size_t peers_count = -1; + size_t stream_index; { - rtc::CritScope crit(&lock_); - // Create a local copy of start_time_ to access it under |comparison_lock_| - // without holding a |lock_| + MutexLock lock(&lock_); + // Create a local copy of |start_time_|, peer's index and total peers count + // to access it under |comparison_lock_| without holding a |lock_| start_time = start_time_; + peer_index = peers_->index(peer_name); + peers_count = peers_->size(); + stream_index = streams_.AddIfAbsent(stream_label); } { // Ensure stats for this stream exists. - rtc::CritScope crit(&comparison_lock_); - if (stream_stats_.find(stream_label) == stream_stats_.end()) { - stream_stats_.insert({stream_label, StreamStats()}); - // Assume that the first freeze was before first stream frame captured. - // This way time before the first freeze would be counted as time between - // freezes. - stream_last_freeze_end_time_.insert({stream_label, start_time}); + MutexLock lock(&comparison_lock_); + for (size_t i = 0; i < peers_count; ++i) { + if (i == peer_index) { + continue; + } + InternalStatsKey stats_key(stream_index, peer_index, i); + if (stream_stats_.find(stats_key) == stream_stats_.end()) { + stream_stats_.insert({stats_key, StreamStats()}); + // Assume that the first freeze was before first stream frame captured. + // This way time before the first freeze would be counted as time + // between freezes. + stream_last_freeze_end_time_.insert({stats_key, start_time}); + } else { + // When we see some |stream_label| for the first time we need to create + // stream stats object for it and set up some states, but we need to do + // it only once and for all receivers, so on the next frame on the same + // |stream_label| we can be sure, that it's already done and we needn't + // to scan though all peers again. + break; + } } } { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); + stream_to_sender_[stream_index] = peer_index; frame_counters_.captured++; - stream_frame_counters_[stream_label].captured++; + for (size_t i = 0; i < peers_->size(); ++i) { + if (i != peer_index) { + InternalStatsKey key(stream_index, peer_index, i); + stream_frame_counters_[key].captured++; + } + } - StreamState* state = &stream_states_[stream_label]; + auto state_it = stream_states_.find(stream_index); + if (state_it == stream_states_.end()) { + stream_states_.emplace(stream_index, + StreamState(peer_index, peers_->size())); + } + StreamState* state = &stream_states_.at(stream_index); state->PushBack(frame_id); // Update frames in flight info. auto it = captured_frames_in_flight_.find(frame_id); if (it != captured_frames_in_flight_.end()) { - // We overflow uint16_t and hit previous frame id and this frame is still - // in flight. It means that this stream wasn't rendered for long time and - // we need to process existing frame as dropped. - auto stats_it = frame_stats_.find(frame_id); - RTC_DCHECK(stats_it != frame_stats_.end()); - - uint16_t oldest_frame_id = state->PopFront(); - RTC_DCHECK_EQ(frame_id, oldest_frame_id); - frame_counters_.dropped++; - stream_frame_counters_[stream_label].dropped++; - AddComparison(it->second, absl::nullopt, true, stats_it->second); + // If we overflow uint16_t and hit previous frame id and this frame is + // still in flight, it means that this stream wasn't rendered for long + // time and we need to process existing frame as dropped. + for (size_t i = 0; i < peers_->size(); ++i) { + if (i == peer_index) { + continue; + } + + uint16_t oldest_frame_id = state->PopFront(i); + RTC_DCHECK_EQ(frame_id, oldest_frame_id); + frame_counters_.dropped++; + InternalStatsKey key(stream_index, peer_index, i); + stream_frame_counters_.at(key).dropped++; + + MutexLock lock1(&comparison_lock_); + analyzer_stats_.frames_in_flight_left_count.AddSample( + StatsSample(captured_frames_in_flight_.size(), Now())); + AddComparison(InternalStatsKey(stream_index, peer_index, i), + it->second.frame(), absl::nullopt, true, + it->second.GetStatsForPeer(i)); + } captured_frames_in_flight_.erase(it); - frame_stats_.erase(stats_it); } - captured_frames_in_flight_.insert( - std::pair(frame_id, frame)); + captured_frames_in_flight_.emplace( + frame_id, + FrameInFlight(stream_index, frame, + /*captured_time=*/Now(), peer_index, peers_->size())); // Set frame id on local copy of the frame - captured_frames_in_flight_.at(frame_id).set_id(frame_id); - frame_stats_.insert(std::pair( - frame_id, FrameStats(stream_label, /*captured_time=*/Now()))); + captured_frames_in_flight_.at(frame_id).SetFrameId(frame_id); // Update history stream<->frame mapping for (auto it = stream_to_frame_id_history_.begin(); it != stream_to_frame_id_history_.end(); ++it) { it->second.erase(frame_id); } - stream_to_frame_id_history_[stream_label].insert(frame_id); + stream_to_frame_id_history_[stream_index].insert(frame_id); // If state has too many frames that are in flight => remove the oldest // queued frame in order to avoid to use too much memory. - if (state->GetAliveFramesCount() > max_frames_in_flight_per_stream_count_) { + if (state->GetAliveFramesCount() > + options_.max_frames_in_flight_per_stream_count) { uint16_t frame_id_to_remove = state->MarkNextAliveFrameAsDead(); - auto removed_count = captured_frames_in_flight_.erase(frame_id_to_remove); - RTC_DCHECK_EQ(removed_count, 1) + auto it = captured_frames_in_flight_.find(frame_id_to_remove); + RTC_CHECK(it != captured_frames_in_flight_.end()) + << "Frame with ID " << frame_id_to_remove + << " is expected to be in flight, but hasn't been found in " + << "|captured_frames_in_flight_|"; + bool is_removed = it->second.RemoveFrame(); + RTC_DCHECK(is_removed) << "Invalid stream state: alive frame is removed already"; } } @@ -172,186 +278,272 @@ uint16_t DefaultVideoQualityAnalyzer::OnFrameCaptured( } void DefaultVideoQualityAnalyzer::OnFramePreEncode( + absl::string_view peer_name, const webrtc::VideoFrame& frame) { - rtc::CritScope crit(&lock_); - auto it = frame_stats_.find(frame.id()); - RTC_DCHECK(it != frame_stats_.end()) + MutexLock lock(&lock_); + auto it = captured_frames_in_flight_.find(frame.id()); + RTC_DCHECK(it != captured_frames_in_flight_.end()) << "Frame id=" << frame.id() << " not found"; frame_counters_.pre_encoded++; - stream_frame_counters_[it->second.stream_label].pre_encoded++; - it->second.pre_encode_time = Now(); + size_t peer_index = peers_->index(peer_name); + for (size_t i = 0; i < peers_->size(); ++i) { + if (i != peer_index) { + InternalStatsKey key(it->second.stream(), peer_index, i); + stream_frame_counters_.at(key).pre_encoded++; + } + } + it->second.SetPreEncodeTime(Now()); } void DefaultVideoQualityAnalyzer::OnFrameEncoded( + absl::string_view peer_name, uint16_t frame_id, const webrtc::EncodedImage& encoded_image, const EncoderStats& stats) { - rtc::CritScope crit(&lock_); - auto it = frame_stats_.find(frame_id); - RTC_DCHECK(it != frame_stats_.end()); + MutexLock lock(&lock_); + auto it = captured_frames_in_flight_.find(frame_id); + RTC_DCHECK(it != captured_frames_in_flight_.end()); // For SVC we can receive multiple encoded images for one frame, so to cover // all cases we have to pick the last encode time. - if (it->second.encoded_time.IsInfinite()) { + if (!it->second.HasEncodedTime()) { // Increase counters only when we meet this frame first time. frame_counters_.encoded++; - stream_frame_counters_[it->second.stream_label].encoded++; + size_t peer_index = peers_->index(peer_name); + for (size_t i = 0; i < peers_->size(); ++i) { + if (i != peer_index) { + InternalStatsKey key(it->second.stream(), peer_index, i); + stream_frame_counters_.at(key).encoded++; + } + } } - it->second.encoded_time = Now(); - it->second.encoded_image_size = encoded_image.size(); - it->second.target_encode_bitrate += stats.target_encode_bitrate; + it->second.OnFrameEncoded(Now(), encoded_image.size(), + stats.target_encode_bitrate); } void DefaultVideoQualityAnalyzer::OnFrameDropped( + absl::string_view peer_name, webrtc::EncodedImageCallback::DropReason reason) { // Here we do nothing, because we will see this drop on renderer side. } void DefaultVideoQualityAnalyzer::OnFramePreDecode( + absl::string_view peer_name, uint16_t frame_id, const webrtc::EncodedImage& input_image) { - rtc::CritScope crit(&lock_); - auto it = frame_stats_.find(frame_id); - RTC_DCHECK(it != frame_stats_.end()); - RTC_DCHECK(it->second.received_time.IsInfinite()) - << "Received multiple spatial layers for stream_label=" - << it->second.stream_label; + MutexLock lock(&lock_); + size_t peer_index = peers_->index(peer_name); + + auto it = captured_frames_in_flight_.find(frame_id); + if (it == captured_frames_in_flight_.end() || + it->second.HasReceivedTime(peer_index)) { + // It means this frame was predecoded before, so we can skip it. It may + // happen when we have multiple simulcast streams in one track and received + // the same picture from two different streams because SFU can't reliably + // correlate two simulcast streams and started relaying the second stream + // from the same frame it has relayed right before for the first stream. + return; + } + frame_counters_.received++; - stream_frame_counters_[it->second.stream_label].received++; - it->second.decode_start_time = Now(); + InternalStatsKey key(it->second.stream(), + stream_to_sender_.at(it->second.stream()), peer_index); + stream_frame_counters_.at(key).received++; // Determine the time of the last received packet of this video frame. RTC_DCHECK(!input_image.PacketInfos().empty()); - int64_t last_receive_time = + Timestamp last_receive_time = std::max_element(input_image.PacketInfos().cbegin(), input_image.PacketInfos().cend(), [](const RtpPacketInfo& a, const RtpPacketInfo& b) { - return a.receive_time_ms() < b.receive_time_ms(); + return a.receive_time() < b.receive_time(); }) - ->receive_time_ms(); - it->second.received_time = Timestamp::Millis(last_receive_time); + ->receive_time(); + it->second.OnFramePreDecode(peer_index, + /*received_time=*/last_receive_time, + /*decode_start_time=*/Now()); } void DefaultVideoQualityAnalyzer::OnFrameDecoded( + absl::string_view peer_name, const webrtc::VideoFrame& frame, const DecoderStats& stats) { - rtc::CritScope crit(&lock_); - auto it = frame_stats_.find(frame.id()); - RTC_DCHECK(it != frame_stats_.end()); + MutexLock lock(&lock_); + size_t peer_index = peers_->index(peer_name); + + auto it = captured_frames_in_flight_.find(frame.id()); + if (it == captured_frames_in_flight_.end() || + it->second.HasDecodeEndTime(peer_index)) { + // It means this frame was decoded before, so we can skip it. It may happen + // when we have multiple simulcast streams in one track and received + // the same picture from two different streams because SFU can't reliably + // correlate two simulcast streams and started relaying the second stream + // from the same frame it has relayed right before for the first stream. + return; + } frame_counters_.decoded++; - stream_frame_counters_[it->second.stream_label].decoded++; - it->second.decode_end_time = Now(); + InternalStatsKey key(it->second.stream(), + stream_to_sender_.at(it->second.stream()), peer_index); + stream_frame_counters_.at(key).decoded++; + it->second.SetDecodeEndTime(peer_index, Now()); } void DefaultVideoQualityAnalyzer::OnFrameRendered( - const webrtc::VideoFrame& raw_frame) { - // Copy entire video frame including video buffer to ensure that analyzer - // won't hold any WebRTC internal buffers. - VideoFrame frame = raw_frame; - frame.set_video_frame_buffer( - I420Buffer::Copy(*raw_frame.video_frame_buffer()->ToI420())); - - rtc::CritScope crit(&lock_); - auto stats_it = frame_stats_.find(frame.id()); - RTC_DCHECK(stats_it != frame_stats_.end()); - FrameStats* frame_stats = &stats_it->second; + absl::string_view peer_name, + const webrtc::VideoFrame& frame) { + MutexLock lock(&lock_); + size_t peer_index = peers_->index(peer_name); + + auto frame_it = captured_frames_in_flight_.find(frame.id()); + if (frame_it == captured_frames_in_flight_.end() || + frame_it->second.HasRenderedTime(peer_index)) { + // It means this frame was rendered before, so we can skip it. It may happen + // when we have multiple simulcast streams in one track and received + // the same picture from two different streams because SFU can't reliably + // correlate two simulcast streams and started relaying the second stream + // from the same frame it has relayed right before for the first stream. + return; + } + + // Find corresponding captured frame. + FrameInFlight* frame_in_flight = &frame_it->second; + absl::optional captured_frame = frame_in_flight->frame(); + + const size_t stream_index = frame_in_flight->stream(); + StreamState* state = &stream_states_.at(stream_index); + const InternalStatsKey stats_key(stream_index, state->owner(), peer_index); + // Update frames counters. frame_counters_.rendered++; - stream_frame_counters_[frame_stats->stream_label].rendered++; + stream_frame_counters_.at(stats_key).rendered++; // Update current frame stats. - frame_stats->rendered_time = Now(); - frame_stats->rendered_frame_width = frame.width(); - frame_stats->rendered_frame_height = frame.height(); - - // Find corresponding captured frame. - auto frame_it = captured_frames_in_flight_.find(frame.id()); - absl::optional captured_frame = - frame_it != captured_frames_in_flight_.end() - ? absl::optional(frame_it->second) - : absl::nullopt; + frame_in_flight->OnFrameRendered(peer_index, Now(), frame.width(), + frame.height()); // After we received frame here we need to check if there are any dropped // frames between this one and last one, that was rendered for this video // stream. - - const std::string& stream_label = frame_stats->stream_label; - StreamState* state = &stream_states_[stream_label]; int dropped_count = 0; - while (!state->Empty() && state->Front() != frame.id()) { + while (!state->IsEmpty(peer_index) && + state->Front(peer_index) != frame.id()) { dropped_count++; - uint16_t dropped_frame_id = state->PopFront(); + uint16_t dropped_frame_id = state->PopFront(peer_index); // Frame with id |dropped_frame_id| was dropped. We need: // 1. Update global and stream frame counters // 2. Extract corresponding frame from |captured_frames_in_flight_| - // 3. Extract corresponding frame stats from |frame_stats_| - // 4. Send extracted frame to comparison with dropped=true - // 5. Cleanup dropped frame + // 3. Send extracted frame to comparison with dropped=true + // 4. Cleanup dropped frame frame_counters_.dropped++; - stream_frame_counters_[stream_label].dropped++; + stream_frame_counters_.at(stats_key).dropped++; - auto dropped_frame_stats_it = frame_stats_.find(dropped_frame_id); - RTC_DCHECK(dropped_frame_stats_it != frame_stats_.end()); auto dropped_frame_it = captured_frames_in_flight_.find(dropped_frame_id); - absl::optional dropped_frame = - dropped_frame_it != captured_frames_in_flight_.end() - ? absl::optional(dropped_frame_it->second) - : absl::nullopt; + RTC_DCHECK(dropped_frame_it != captured_frames_in_flight_.end()); + absl::optional dropped_frame = dropped_frame_it->second.frame(); + dropped_frame_it->second.MarkDropped(peer_index); - AddComparison(dropped_frame, absl::nullopt, true, - dropped_frame_stats_it->second); + { + MutexLock lock1(&comparison_lock_); + analyzer_stats_.frames_in_flight_left_count.AddSample( + StatsSample(captured_frames_in_flight_.size(), Now())); + AddComparison(stats_key, dropped_frame, absl::nullopt, true, + dropped_frame_it->second.GetStatsForPeer(peer_index)); + } - frame_stats_.erase(dropped_frame_stats_it); - if (dropped_frame_it != captured_frames_in_flight_.end()) { + if (dropped_frame_it->second.HaveAllPeersReceived()) { captured_frames_in_flight_.erase(dropped_frame_it); } } - RTC_DCHECK(!state->Empty()); - state->PopFront(); + RTC_DCHECK(!state->IsEmpty(peer_index)); + state->PopFront(peer_index); - if (state->last_rendered_frame_time()) { - frame_stats->prev_frame_rendered_time = - state->last_rendered_frame_time().value(); + if (state->last_rendered_frame_time(peer_index)) { + frame_in_flight->SetPrevFrameRenderedTime( + peer_index, state->last_rendered_frame_time(peer_index).value()); + } + state->SetLastRenderedFrameTime(peer_index, + frame_in_flight->rendered_time(peer_index)); + { + MutexLock cr(&comparison_lock_); + stream_stats_.at(stats_key).skipped_between_rendered.AddSample( + StatsSample(dropped_count, Now())); } - state->set_last_rendered_frame_time(frame_stats->rendered_time); + { - rtc::CritScope cr(&comparison_lock_); - stream_stats_[stream_label].skipped_between_rendered.AddSample( - dropped_count); + MutexLock lock(&comparison_lock_); + analyzer_stats_.frames_in_flight_left_count.AddSample( + StatsSample(captured_frames_in_flight_.size(), Now())); + AddComparison(stats_key, captured_frame, frame, false, + frame_in_flight->GetStatsForPeer(peer_index)); } - AddComparison(captured_frame, frame, false, *frame_stats); - if (frame_it != captured_frames_in_flight_.end()) { + if (frame_it->second.HaveAllPeersReceived()) { captured_frames_in_flight_.erase(frame_it); } - frame_stats_.erase(stats_it); } void DefaultVideoQualityAnalyzer::OnEncoderError( + absl::string_view peer_name, const webrtc::VideoFrame& frame, int32_t error_code) { RTC_LOG(LS_ERROR) << "Encoder error for frame.id=" << frame.id() << ", code=" << error_code; } -void DefaultVideoQualityAnalyzer::OnDecoderError(uint16_t frame_id, +void DefaultVideoQualityAnalyzer::OnDecoderError(absl::string_view peer_name, + uint16_t frame_id, int32_t error_code) { RTC_LOG(LS_ERROR) << "Decoder error for frame_id=" << frame_id << ", code=" << error_code; } +void DefaultVideoQualityAnalyzer::RegisterParticipantInCall( + absl::string_view peer_name) { + MutexLock lock1(&lock_); + MutexLock lock2(&comparison_lock_); + RTC_CHECK(!peers_->HasName(peer_name)); + peers_->AddIfAbsent(peer_name); + + // Ensure stats for receiving (for frames from other peers to this one) + // streams exists. Since in flight frames will be sent to the new peer + // as well. Sending stats (from this peer to others) will be added by + // DefaultVideoQualityAnalyzer::OnFrameCaptured. + for (auto& key_val : stream_to_sender_) { + InternalStatsKey key(key_val.first, key_val.second, + peers_->index(peer_name)); + const int64_t frames_count = captured_frames_in_flight_.size(); + FrameCounters counters; + counters.captured = frames_count; + counters.pre_encoded = frames_count; + counters.encoded = frames_count; + stream_frame_counters_.insert({key, std::move(counters)}); + + stream_stats_.insert({key, StreamStats()}); + stream_last_freeze_end_time_.insert({key, start_time_}); + } + // Ensure, that frames states are handled correctly + // (e.g. dropped frames tracking). + for (auto& key_val : stream_states_) { + key_val.second.AddPeer(); + } + // Register new peer for every frame in flight. + // It is guaranteed, that no garbadge FrameInFlight objects will stay in + // memory because of adding new peer. Even if the new peer won't receive the + // frame, the frame will be removed by OnFrameRendered after next frame comes + // for the new peer. It is important because FrameInFlight is a large object. + for (auto& key_val : captured_frames_in_flight_) { + key_val.second.AddPeer(); + } +} + void DefaultVideoQualityAnalyzer::Stop() { - StopMeasuringCpuProcessTime(); { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); if (state_ == State::kStopped) { return; } state_ = State::kStopped; } + StopMeasuringCpuProcessTime(); comparison_available_event_.Set(); - for (auto& thread : thread_pool_) { - thread->Stop(); - } - // PlatformThread have to be deleted on the same thread, where it was created thread_pool_.clear(); // Perform final Metrics update. On this place analyzer is stopped and no one @@ -360,109 +552,130 @@ void DefaultVideoQualityAnalyzer::Stop() { // Time between freezes. // Count time since the last freeze to the end of the call as time // between freezes. - rtc::CritScope crit1(&lock_); - rtc::CritScope crit2(&comparison_lock_); - for (auto& item : stream_stats_) { - const StreamState& state = stream_states_[item.first]; - // If there are no freezes in the call we have to report - // time_between_freezes_ms as call duration and in such case - // |stream_last_freeze_end_time_| for this stream will be |start_time_|. - // If there is freeze, then we need add time from last rendered frame - // to last freeze end as time between freezes. - if (state.last_rendered_frame_time()) { - item.second.time_between_freezes_ms.AddSample( - (state.last_rendered_frame_time().value() - - stream_last_freeze_end_time_.at(item.first)) - .ms()); + MutexLock lock1(&lock_); + MutexLock lock2(&comparison_lock_); + for (auto& state_entry : stream_states_) { + const size_t stream_index = state_entry.first; + const StreamState& stream_state = state_entry.second; + for (size_t i = 0; i < peers_->size(); ++i) { + if (i == static_cast(stream_state.owner())) { + continue; + } + + InternalStatsKey stats_key(stream_index, stream_state.owner(), i); + + // If there are no freezes in the call we have to report + // time_between_freezes_ms as call duration and in such case + // |stream_last_freeze_end_time_| for this stream will be |start_time_|. + // If there is freeze, then we need add time from last rendered frame + // to last freeze end as time between freezes. + if (stream_state.last_rendered_frame_time(i)) { + stream_stats_[stats_key].time_between_freezes_ms.AddSample( + StatsSample( + stream_state.last_rendered_frame_time(i).value().ms() - + stream_last_freeze_end_time_.at(stats_key).ms(), + Now())); + } } } + analyzer_stats_.frames_in_flight_left_count.AddSample( + StatsSample(captured_frames_in_flight_.size(), Now())); } ReportResults(); } std::string DefaultVideoQualityAnalyzer::GetStreamLabel(uint16_t frame_id) { - rtc::CritScope crit1(&lock_); - auto it = frame_stats_.find(frame_id); - if (it != frame_stats_.end()) { - return it->second.stream_label; + MutexLock lock1(&lock_); + auto it = captured_frames_in_flight_.find(frame_id); + if (it != captured_frames_in_flight_.end()) { + return streams_.name(it->second.stream()); } for (auto hist_it = stream_to_frame_id_history_.begin(); hist_it != stream_to_frame_id_history_.end(); ++hist_it) { auto hist_set_it = hist_it->second.find(frame_id); if (hist_set_it != hist_it->second.end()) { - return hist_it->first; + return streams_.name(hist_it->first); } } RTC_CHECK(false) << "Unknown frame_id=" << frame_id; } -std::set DefaultVideoQualityAnalyzer::GetKnownVideoStreams() - const { - rtc::CritScope crit2(&comparison_lock_); - std::set out; +std::set DefaultVideoQualityAnalyzer::GetKnownVideoStreams() const { + MutexLock lock1(&lock_); + MutexLock lock2(&comparison_lock_); + std::set out; for (auto& item : stream_stats_) { - out.insert(item.first); + RTC_LOG(INFO) << item.first.ToString() << " ==> " + << ToStatsKey(item.first).ToString(); + out.insert(ToStatsKey(item.first)); } return out; } const FrameCounters& DefaultVideoQualityAnalyzer::GetGlobalCounters() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frame_counters_; } -const std::map& +std::map DefaultVideoQualityAnalyzer::GetPerStreamCounters() const { - rtc::CritScope crit(&lock_); - return stream_frame_counters_; + MutexLock lock(&lock_); + std::map out; + for (auto& item : stream_frame_counters_) { + out.emplace(ToStatsKey(item.first), item.second); + } + return out; } -std::map DefaultVideoQualityAnalyzer::GetStats() - const { - rtc::CritScope cri(&comparison_lock_); - return stream_stats_; +std::map DefaultVideoQualityAnalyzer::GetStats() const { + MutexLock lock1(&lock_); + MutexLock lock2(&comparison_lock_); + std::map out; + for (auto& item : stream_stats_) { + out.emplace(ToStatsKey(item.first), item.second); + } + return out; } AnalyzerStats DefaultVideoQualityAnalyzer::GetAnalyzerStats() const { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); return analyzer_stats_; } void DefaultVideoQualityAnalyzer::AddComparison( + InternalStatsKey stats_key, absl::optional captured, absl::optional rendered, bool dropped, FrameStats frame_stats) { StartExcludingCpuThreadTime(); - rtc::CritScope crit(&comparison_lock_); - analyzer_stats_.comparisons_queue_size.AddSample(comparisons_.size()); + analyzer_stats_.comparisons_queue_size.AddSample( + StatsSample(comparisons_.size(), Now())); // If there too many computations waiting in the queue, we won't provide // frames itself to make future computations lighter. if (comparisons_.size() >= kMaxActiveComparisons) { - comparisons_.emplace_back(absl::nullopt, absl::nullopt, dropped, - frame_stats, OverloadReason::kCpu); + comparisons_.emplace_back(std::move(stats_key), absl::nullopt, + absl::nullopt, dropped, std::move(frame_stats), + OverloadReason::kCpu); } else { OverloadReason overload_reason = OverloadReason::kNone; if (!captured && !dropped) { overload_reason = OverloadReason::kMemory; } - comparisons_.emplace_back(std::move(captured), std::move(rendered), dropped, - frame_stats, overload_reason); + comparisons_.emplace_back(std::move(stats_key), std::move(captured), + std::move(rendered), dropped, + std::move(frame_stats), overload_reason); } comparison_available_event_.Set(); StopExcludingCpuThreadTime(); } -void DefaultVideoQualityAnalyzer::ProcessComparisonsThread(void* obj) { - static_cast(obj)->ProcessComparisons(); -} - void DefaultVideoQualityAnalyzer::ProcessComparisons() { while (true) { // Try to pick next comparison to perform from the queue. absl::optional comparison = absl::nullopt; { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); if (!comparisons_.empty()) { comparison = comparisons_.front(); comparisons_.pop_front(); @@ -476,7 +689,7 @@ void DefaultVideoQualityAnalyzer::ProcessComparisons() { { // If there are no comparisons and state is stopped => // no more frames expected. - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); more_frames_expected = state_ != State::kStopped; } if (!more_frames_expected) { @@ -498,17 +711,27 @@ void DefaultVideoQualityAnalyzer::ProcessComparison( // Perform expensive psnr and ssim calculations while not holding lock. double psnr = -1.0; double ssim = -1.0; - if (heavy_metrics_computation_enabled_ && comparison.captured && + if (options_.heavy_metrics_computation_enabled && comparison.captured && !comparison.dropped) { - psnr = I420PSNR(&*comparison.captured, &*comparison.rendered); - ssim = I420SSIM(&*comparison.captured, &*comparison.rendered); + rtc::scoped_refptr reference_buffer = + comparison.captured->video_frame_buffer()->ToI420(); + rtc::scoped_refptr test_buffer = + comparison.rendered->video_frame_buffer()->ToI420(); + if (options_.adjust_cropping_before_comparing_frames) { + test_buffer = + ScaleVideoFrameBuffer(*test_buffer.get(), reference_buffer->width(), + reference_buffer->height()); + reference_buffer = test::AdjustCropping(reference_buffer, test_buffer); + } + psnr = I420PSNR(*reference_buffer.get(), *test_buffer.get()); + ssim = I420SSIM(*reference_buffer.get(), *test_buffer.get()); } const FrameStats& frame_stats = comparison.frame_stats; - rtc::CritScope crit(&comparison_lock_); - auto stats_it = stream_stats_.find(frame_stats.stream_label); - RTC_CHECK(stats_it != stream_stats_.end()); + MutexLock lock(&comparison_lock_); + auto stats_it = stream_stats_.find(comparison.stats_key); + RTC_CHECK(stats_it != stream_stats_.end()) << comparison.stats_key.ToString(); StreamStats* stats = &stats_it->second; analyzer_stats_.comparisons_done++; if (comparison.overload_reason == OverloadReason::kCpu) { @@ -517,17 +740,19 @@ void DefaultVideoQualityAnalyzer::ProcessComparison( analyzer_stats_.memory_overloaded_comparisons_done++; } if (psnr > 0) { - stats->psnr.AddSample(psnr); + stats->psnr.AddSample(StatsSample(psnr, frame_stats.rendered_time)); } if (ssim > 0) { - stats->ssim.AddSample(ssim); + stats->ssim.AddSample(StatsSample(ssim, frame_stats.received_time)); } if (frame_stats.encoded_time.IsFinite()) { - stats->encode_time_ms.AddSample( - (frame_stats.encoded_time - frame_stats.pre_encode_time).ms()); + stats->encode_time_ms.AddSample(StatsSample( + (frame_stats.encoded_time - frame_stats.pre_encode_time).ms(), + frame_stats.encoded_time)); stats->encode_frame_rate.AddEvent(frame_stats.encoded_time); stats->total_encoded_images_payload += frame_stats.encoded_image_size; - stats->target_encode_bitrate.AddSample(frame_stats.target_encode_bitrate); + stats->target_encode_bitrate.AddSample(StatsSample( + frame_stats.target_encode_bitrate, frame_stats.encoded_time)); } else { if (frame_stats.pre_encode_time.IsFinite()) { stats->dropped_by_encoder++; @@ -538,34 +763,40 @@ void DefaultVideoQualityAnalyzer::ProcessComparison( // Next stats can be calculated only if frame was received on remote side. if (!comparison.dropped) { stats->resolution_of_rendered_frame.AddSample( - *comparison.frame_stats.rendered_frame_width * - *comparison.frame_stats.rendered_frame_height); - stats->transport_time_ms.AddSample( - (frame_stats.decode_start_time - frame_stats.encoded_time).ms()); - stats->total_delay_incl_transport_ms.AddSample( - (frame_stats.rendered_time - frame_stats.captured_time).ms()); - stats->decode_time_ms.AddSample( - (frame_stats.decode_end_time - frame_stats.decode_start_time).ms()); - stats->receive_to_render_time_ms.AddSample( - (frame_stats.rendered_time - frame_stats.received_time).ms()); + StatsSample(*comparison.frame_stats.rendered_frame_width * + *comparison.frame_stats.rendered_frame_height, + frame_stats.rendered_time)); + stats->transport_time_ms.AddSample(StatsSample( + (frame_stats.decode_start_time - frame_stats.encoded_time).ms(), + frame_stats.received_time)); + stats->total_delay_incl_transport_ms.AddSample(StatsSample( + (frame_stats.rendered_time - frame_stats.captured_time).ms(), + frame_stats.received_time)); + stats->decode_time_ms.AddSample(StatsSample( + (frame_stats.decode_end_time - frame_stats.decode_start_time).ms(), + frame_stats.decode_end_time)); + stats->receive_to_render_time_ms.AddSample(StatsSample( + (frame_stats.rendered_time - frame_stats.received_time).ms(), + frame_stats.rendered_time)); if (frame_stats.prev_frame_rendered_time.IsFinite()) { TimeDelta time_between_rendered_frames = frame_stats.rendered_time - frame_stats.prev_frame_rendered_time; - stats->time_between_rendered_frames_ms.AddSample( - time_between_rendered_frames.ms()); + stats->time_between_rendered_frames_ms.AddSample(StatsSample( + time_between_rendered_frames.ms(), frame_stats.rendered_time)); double average_time_between_rendered_frames_ms = stats->time_between_rendered_frames_ms.GetAverage(); if (time_between_rendered_frames.ms() > std::max(kFreezeThresholdMs + average_time_between_rendered_frames_ms, 3 * average_time_between_rendered_frames_ms)) { - stats->freeze_time_ms.AddSample(time_between_rendered_frames.ms()); + stats->freeze_time_ms.AddSample(StatsSample( + time_between_rendered_frames.ms(), frame_stats.rendered_time)); auto freeze_end_it = - stream_last_freeze_end_time_.find(frame_stats.stream_label); + stream_last_freeze_end_time_.find(comparison.stats_key); RTC_DCHECK(freeze_end_it != stream_last_freeze_end_time_.end()); - stats->time_between_freezes_ms.AddSample( - (frame_stats.prev_frame_rendered_time - freeze_end_it->second) - .ms()); + stats->time_between_freezes_ms.AddSample(StatsSample( + (frame_stats.prev_frame_rendered_time - freeze_end_it->second).ms(), + frame_stats.rendered_time)); freeze_end_it->second = frame_stats.rendered_time; } } @@ -575,18 +806,19 @@ void DefaultVideoQualityAnalyzer::ProcessComparison( void DefaultVideoQualityAnalyzer::ReportResults() { using ::webrtc::test::ImproveDirection; - rtc::CritScope crit1(&lock_); - rtc::CritScope crit2(&comparison_lock_); + MutexLock lock1(&lock_); + MutexLock lock2(&comparison_lock_); for (auto& item : stream_stats_) { - ReportResults(GetTestCaseName(item.first), item.second, - stream_frame_counters_.at(item.first)); + ReportResults(GetTestCaseName(StatsKeyToMetricName(ToStatsKey(item.first))), + item.second, stream_frame_counters_.at(item.first)); } test::PrintResult("cpu_usage", "", test_label_.c_str(), GetCpuUsagePercent(), "%", false, ImproveDirection::kSmallerIsBetter); LogFrameCounters("Global", frame_counters_); for (auto& item : stream_stats_) { - LogFrameCounters(item.first, stream_frame_counters_.at(item.first)); - LogStreamInternalStats(item.first, item.second); + LogFrameCounters(ToStatsKey(item.first).ToString(), + stream_frame_counters_.at(item.first)); + LogStreamInternalStats(ToStatsKey(item.first).ToString(), item.second); } if (!analyzer_stats_.comparisons_queue_size.IsEmpty()) { RTC_LOG(INFO) << "comparisons_queue_size min=" @@ -683,6 +915,9 @@ void DefaultVideoQualityAnalyzer::ReportResults( frame_counters.dropped, "count", /*important=*/false, ImproveDirection::kSmallerIsBetter); + test::PrintResult("rendered_frames", "", test_case_name, + frame_counters.rendered, "count", /*important=*/false, + ImproveDirection::kBiggerIsBetter); ReportResult("max_skipped", test_case_name, stats.skipped_between_rendered, "count", ImproveDirection::kSmallerIsBetter); ReportResult("target_encode_bitrate", test_case_name, @@ -714,62 +949,249 @@ Timestamp DefaultVideoQualityAnalyzer::Now() { return clock_->CurrentTime(); } +StatsKey DefaultVideoQualityAnalyzer::ToStatsKey( + const InternalStatsKey& key) const { + return StatsKey(streams_.name(key.stream), peers_->name(key.sender), + peers_->name(key.receiver)); +} + +std::string DefaultVideoQualityAnalyzer::StatsKeyToMetricName( + const StatsKey& key) const { + if (peers_->size() <= 2) { + return key.stream_label; + } + return key.ToString(); +} + void DefaultVideoQualityAnalyzer::StartMeasuringCpuProcessTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ -= rtc::GetProcessCpuTimeNanos(); wallclock_time_ -= rtc::SystemTimeNanos(); } void DefaultVideoQualityAnalyzer::StopMeasuringCpuProcessTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ += rtc::GetProcessCpuTimeNanos(); wallclock_time_ += rtc::SystemTimeNanos(); } void DefaultVideoQualityAnalyzer::StartExcludingCpuThreadTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ += rtc::GetThreadCpuTimeNanos(); } void DefaultVideoQualityAnalyzer::StopExcludingCpuThreadTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ -= rtc::GetThreadCpuTimeNanos(); } double DefaultVideoQualityAnalyzer::GetCpuUsagePercent() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); return static_cast(cpu_time_) / wallclock_time_ * 100.0; } -DefaultVideoQualityAnalyzer::FrameStats::FrameStats(std::string stream_label, - Timestamp captured_time) - : stream_label(std::move(stream_label)), captured_time(captured_time) {} - DefaultVideoQualityAnalyzer::FrameComparison::FrameComparison( + InternalStatsKey stats_key, absl::optional captured, absl::optional rendered, bool dropped, FrameStats frame_stats, OverloadReason overload_reason) - : captured(std::move(captured)), + : stats_key(std::move(stats_key)), + captured(std::move(captured)), rendered(std::move(rendered)), dropped(dropped), frame_stats(std::move(frame_stats)), overload_reason(overload_reason) {} -uint16_t DefaultVideoQualityAnalyzer::StreamState::PopFront() { - uint16_t frame_id = frame_ids_.front(); - frame_ids_.pop_front(); - if (dead_frames_count_ > 0) { - dead_frames_count_--; +uint16_t DefaultVideoQualityAnalyzer::StreamState::PopFront(size_t peer) { + absl::optional frame_id = frame_ids_.PopFront(peer); + RTC_DCHECK(frame_id.has_value()); + + // If alive's frame queue is longer than all others, than also pop frame from + // it, because that frame is received by all receivers. + size_t owner_size = frame_ids_.size(owner_); + size_t other_size = 0; + for (size_t i = 0; i < frame_ids_.readers_count(); ++i) { + size_t cur_size = frame_ids_.size(i); + if (i != owner_ && cur_size > other_size) { + other_size = cur_size; + } } - return frame_id; + if (owner_size > other_size) { + absl::optional alive_frame_id = frame_ids_.PopFront(owner_); + RTC_DCHECK(alive_frame_id.has_value()); + RTC_DCHECK_EQ(frame_id.value(), alive_frame_id.value()); + } + + return frame_id.value(); } uint16_t DefaultVideoQualityAnalyzer::StreamState::MarkNextAliveFrameAsDead() { - uint16_t frame_id = frame_ids_[dead_frames_count_]; - dead_frames_count_++; - return frame_id; + absl::optional frame_id = frame_ids_.PopFront(owner_); + RTC_DCHECK(frame_id.has_value()); + return frame_id.value(); +} + +void DefaultVideoQualityAnalyzer::StreamState::SetLastRenderedFrameTime( + size_t peer, + Timestamp time) { + auto it = last_rendered_frame_time_.find(peer); + if (it == last_rendered_frame_time_.end()) { + last_rendered_frame_time_.insert({peer, time}); + } else { + it->second = time; + } +} + +absl::optional +DefaultVideoQualityAnalyzer::StreamState::last_rendered_frame_time( + size_t peer) const { + return MaybeGetValue(last_rendered_frame_time_, peer); +} + +bool DefaultVideoQualityAnalyzer::FrameInFlight::RemoveFrame() { + if (!frame_) { + return false; + } + frame_ = absl::nullopt; + return true; +} + +void DefaultVideoQualityAnalyzer::FrameInFlight::SetFrameId(uint16_t id) { + if (frame_) { + frame_->set_id(id); + } +} + +std::vector +DefaultVideoQualityAnalyzer::FrameInFlight::GetPeersWhichDidntReceive() const { + std::vector out; + for (size_t i = 0; i < peers_count_; ++i) { + auto it = receiver_stats_.find(i); + if (i != owner_ && it != receiver_stats_.end() && + it->second.rendered_time.IsInfinite()) { + out.push_back(i); + } + } + return out; +} + +bool DefaultVideoQualityAnalyzer::FrameInFlight::HaveAllPeersReceived() const { + for (size_t i = 0; i < peers_count_; ++i) { + if (i == owner_) { + continue; + } + + auto it = receiver_stats_.find(i); + if (it == receiver_stats_.end()) { + return false; + } + + if (!it->second.dropped && it->second.rendered_time.IsInfinite()) { + return false; + } + } + return true; +} + +void DefaultVideoQualityAnalyzer::FrameInFlight::OnFrameEncoded( + webrtc::Timestamp time, + int64_t encoded_image_size, + uint32_t target_encode_bitrate) { + encoded_time_ = time; + encoded_image_size_ = encoded_image_size; + target_encode_bitrate_ += target_encode_bitrate; +} + +void DefaultVideoQualityAnalyzer::FrameInFlight::OnFramePreDecode( + size_t peer, + webrtc::Timestamp received_time, + webrtc::Timestamp decode_start_time) { + receiver_stats_[peer].received_time = received_time; + receiver_stats_[peer].decode_start_time = decode_start_time; +} + +bool DefaultVideoQualityAnalyzer::FrameInFlight::HasReceivedTime( + size_t peer) const { + auto it = receiver_stats_.find(peer); + if (it == receiver_stats_.end()) { + return false; + } + return it->second.received_time.IsFinite(); +} + +bool DefaultVideoQualityAnalyzer::FrameInFlight::HasDecodeEndTime( + size_t peer) const { + auto it = receiver_stats_.find(peer); + if (it == receiver_stats_.end()) { + return false; + } + return it->second.decode_end_time.IsFinite(); +} + +void DefaultVideoQualityAnalyzer::FrameInFlight::OnFrameRendered( + size_t peer, + webrtc::Timestamp time, + int width, + int height) { + receiver_stats_[peer].rendered_time = time; + receiver_stats_[peer].rendered_frame_width = width; + receiver_stats_[peer].rendered_frame_height = height; +} + +bool DefaultVideoQualityAnalyzer::FrameInFlight::HasRenderedTime( + size_t peer) const { + auto it = receiver_stats_.find(peer); + if (it == receiver_stats_.end()) { + return false; + } + return it->second.rendered_time.IsFinite(); +} + +DefaultVideoQualityAnalyzer::FrameStats +DefaultVideoQualityAnalyzer::FrameInFlight::GetStatsForPeer(size_t peer) const { + FrameStats stats(captured_time_); + stats.pre_encode_time = pre_encode_time_; + stats.encoded_time = encoded_time_; + stats.target_encode_bitrate = target_encode_bitrate_; + stats.encoded_image_size = encoded_image_size_; + + absl::optional receiver_stats = + MaybeGetValue(receiver_stats_, peer); + if (receiver_stats.has_value()) { + stats.received_time = receiver_stats->received_time; + stats.decode_start_time = receiver_stats->decode_start_time; + stats.decode_end_time = receiver_stats->decode_end_time; + stats.rendered_time = receiver_stats->rendered_time; + stats.prev_frame_rendered_time = receiver_stats->prev_frame_rendered_time; + stats.rendered_frame_width = receiver_stats->rendered_frame_width; + stats.rendered_frame_height = receiver_stats->rendered_frame_height; + } + return stats; +} + +size_t DefaultVideoQualityAnalyzer::NamesCollection::AddIfAbsent( + absl::string_view name) { + auto it = index_.find(name); + if (it != index_.end()) { + return it->second; + } + size_t out = names_.size(); + size_t old_capacity = names_.capacity(); + names_.emplace_back(name); + size_t new_capacity = names_.capacity(); + + if (old_capacity == new_capacity) { + index_.emplace(names_[out], out); + } else { + // Reallocation happened in the vector, so we need to rebuild |index_| + index_.clear(); + for (size_t i = 0; i < names_.size(); ++i) { + index_.emplace(names_[i], i); + } + } + return out; } } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h index 6bebb0f02b..626fa246e5 100644 --- a/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h +++ b/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h @@ -19,15 +19,17 @@ #include #include +#include "api/array_view.h" +#include "api/numerics/samples_stats_counter.h" #include "api/test/video_quality_analyzer_interface.h" #include "api/units/timestamp.h" #include "api/video/encoded_image.h" #include "api/video/video_frame.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" -#include "rtc_base/numerics/samples_stats_counter.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/clock.h" +#include "test/pc/e2e/analyzer/video/multi_head_queue.h" #include "test/testsupport/perf_test.h" namespace webrtc { @@ -36,7 +38,7 @@ namespace webrtc_pc_e2e { // WebRTC will request a key frame after 3 seconds if no frames were received. // We assume max frame rate ~60 fps, so 270 frames will cover max freeze without // key frame request. -constexpr int kDefaultMaxFramesInFlightPerStream = 270; +constexpr size_t kDefaultMaxFramesInFlightPerStream = 270; class RateCounter { public: @@ -124,52 +126,123 @@ struct AnalyzerStats { // it is queued when its captured frame was already removed due to high memory // usage for that video stream. int64_t memory_overloaded_comparisons_done = 0; + // Count of frames in flight in analyzer measured when new comparison is added + // and after analyzer was stopped. + SamplesStatsCounter frames_in_flight_left_count; +}; + +struct StatsKey { + StatsKey(std::string stream_label, std::string sender, std::string receiver) + : stream_label(std::move(stream_label)), + sender(std::move(sender)), + receiver(std::move(receiver)) {} + + std::string ToString() const; + + // Label of video stream to which stats belongs to. + std::string stream_label; + // Name of the peer which send this stream. + std::string sender; + // Name of the peer on which stream was received. + std::string receiver; +}; + +// Required to use StatsKey as std::map key. +bool operator<(const StatsKey& a, const StatsKey& b); +bool operator==(const StatsKey& a, const StatsKey& b); + +struct InternalStatsKey { + InternalStatsKey(size_t stream, size_t sender, size_t receiver) + : stream(stream), sender(sender), receiver(receiver) {} + + std::string ToString() const; + + size_t stream; + size_t sender; + size_t receiver; +}; + +// Required to use InternalStatsKey as std::map key. +bool operator<(const InternalStatsKey& a, const InternalStatsKey& b); +bool operator==(const InternalStatsKey& a, const InternalStatsKey& b); + +struct DefaultVideoQualityAnalyzerOptions { + // Tells DefaultVideoQualityAnalyzer if heavy metrics like PSNR and SSIM have + // to be computed or not. + bool heavy_metrics_computation_enabled = true; + // If true DefaultVideoQualityAnalyzer will try to adjust frames before + // computing PSNR and SSIM for them. In some cases picture may be shifted by + // a few pixels after the encode/decode step. Those difference is invisible + // for a human eye, but it affects the metrics. So the adjustment is used to + // get metrics that are closer to how human persepts the video. This feature + // significantly slows down the comparison, so turn it on only when it is + // needed. + bool adjust_cropping_before_comparing_frames = false; + // Amount of frames that are queued in the DefaultVideoQualityAnalyzer from + // the point they were captured to the point they were rendered on all + // receivers per stream. + size_t max_frames_in_flight_per_stream_count = + kDefaultMaxFramesInFlightPerStream; }; class DefaultVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { public: explicit DefaultVideoQualityAnalyzer( - bool heavy_metrics_computation_enabled = true, - int max_frames_in_flight_per_stream_count = - kDefaultMaxFramesInFlightPerStream); + webrtc::Clock* clock, + DefaultVideoQualityAnalyzerOptions options = + DefaultVideoQualityAnalyzerOptions()); ~DefaultVideoQualityAnalyzer() override; - void Start(std::string test_case_name, int max_threads_count) override; - uint16_t OnFrameCaptured(const std::string& stream_label, + void Start(std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count) override; + uint16_t OnFrameCaptured(absl::string_view peer_name, + const std::string& stream_label, const VideoFrame& frame) override; - void OnFramePreEncode(const VideoFrame& frame) override; - void OnFrameEncoded(uint16_t frame_id, + void OnFramePreEncode(absl::string_view peer_name, + const VideoFrame& frame) override; + void OnFrameEncoded(absl::string_view peer_name, + uint16_t frame_id, const EncodedImage& encoded_image, const EncoderStats& stats) override; - void OnFrameDropped(EncodedImageCallback::DropReason reason) override; - void OnFramePreDecode(uint16_t frame_id, + void OnFrameDropped(absl::string_view peer_name, + EncodedImageCallback::DropReason reason) override; + void OnFramePreDecode(absl::string_view peer_name, + uint16_t frame_id, const EncodedImage& input_image) override; - void OnFrameDecoded(const VideoFrame& frame, + void OnFrameDecoded(absl::string_view peer_name, + const VideoFrame& frame, const DecoderStats& stats) override; - void OnFrameRendered(const VideoFrame& frame) override; - void OnEncoderError(const VideoFrame& frame, int32_t error_code) override; - void OnDecoderError(uint16_t frame_id, int32_t error_code) override; + void OnFrameRendered(absl::string_view peer_name, + const VideoFrame& frame) override; + void OnEncoderError(absl::string_view peer_name, + const VideoFrame& frame, + int32_t error_code) override; + void OnDecoderError(absl::string_view peer_name, + uint16_t frame_id, + int32_t error_code) override; + void RegisterParticipantInCall(absl::string_view peer_name) override; void Stop() override; std::string GetStreamLabel(uint16_t frame_id) override; - void OnStatsReports(const std::string& pc_label, - const StatsReports& stats_reports) override {} + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override {} // Returns set of stream labels, that were met during test call. - std::set GetKnownVideoStreams() const; + std::set GetKnownVideoStreams() const; const FrameCounters& GetGlobalCounters() const; // Returns frame counter per stream label. Valid stream labels can be obtained // by calling GetKnownVideoStreams() - const std::map& GetPerStreamCounters() const; + std::map GetPerStreamCounters() const; // Returns video quality stats per stream label. Valid stream labels can be // obtained by calling GetKnownVideoStreams() - std::map GetStats() const; + std::map GetStats() const; AnalyzerStats GetAnalyzerStats() const; + double GetCpuUsagePercent(); private: struct FrameStats { - FrameStats(std::string stream_label, Timestamp captured_time); - - std::string stream_label; + FrameStats(Timestamp captured_time) : captured_time(captured_time) {} // Frame events timestamp. Timestamp captured_time; @@ -182,12 +255,11 @@ class DefaultVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { Timestamp rendered_time = Timestamp::MinusInfinity(); Timestamp prev_frame_rendered_time = Timestamp::MinusInfinity(); + int64_t encoded_image_size = 0; uint32_t target_encode_bitrate = 0; absl::optional rendered_frame_width = absl::nullopt; absl::optional rendered_frame_height = absl::nullopt; - - int64_t encoded_image_size = 0; }; // Describes why comparison was done in overloaded mode (without calculating @@ -209,12 +281,14 @@ class DefaultVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { // because there were too many comparisons in the queue. |dropped| can be // true or false showing was frame dropped or not. struct FrameComparison { - FrameComparison(absl::optional captured, + FrameComparison(InternalStatsKey stats_key, + absl::optional captured, absl::optional rendered, bool dropped, FrameStats frame_stats, OverloadReason overload_reason); + InternalStatsKey stats_key; // Frames can be omitted if there too many computations waiting in the // queue. absl::optional captured; @@ -230,49 +304,183 @@ class DefaultVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { // Represents a current state of video stream. class StreamState { public: - void PushBack(uint16_t frame_id) { frame_ids_.emplace_back(frame_id); } - - uint16_t PopFront(); + StreamState(size_t owner, size_t peers_count) + : owner_(owner), frame_ids_(peers_count) {} - bool Empty() { return frame_ids_.empty(); } + size_t owner() const { return owner_; } - uint16_t Front() { return frame_ids_.front(); } + void PushBack(uint16_t frame_id) { frame_ids_.PushBack(frame_id); } + // Crash if state is empty. Guarantees that there can be no alive frames + // that are not in the owner queue + uint16_t PopFront(size_t peer); + bool IsEmpty(size_t peer) const { return frame_ids_.IsEmpty(peer); } + // Crash if state is empty. + uint16_t Front(size_t peer) const { return frame_ids_.Front(peer).value(); } - int GetAliveFramesCount() { return frame_ids_.size() - dead_frames_count_; } + // When new peer is added - all current alive frames will be sent to it as + // well. So we need to register them as expected by copying owner_ head to + // the new head. + void AddPeer() { frame_ids_.AddHead(owner_); } + size_t GetAliveFramesCount() { return frame_ids_.size(owner_); } uint16_t MarkNextAliveFrameAsDead(); - void set_last_rendered_frame_time(Timestamp time) { - last_rendered_frame_time_ = time; - } - absl::optional last_rendered_frame_time() const { - return last_rendered_frame_time_; - } + void SetLastRenderedFrameTime(size_t peer, Timestamp time); + absl::optional last_rendered_frame_time(size_t peer) const; private: + // Index of the owner. Owner's queue in |frame_ids_| will keep alive frames. + const size_t owner_; // To correctly determine dropped frames we have to know sequence of frames // in each stream so we will keep a list of frame ids inside the stream. - // When the frame is rendered, we will pop ids from the list for until id - // will match with rendered one. All ids before matched one can be - // considered as dropped: + // This list is represented by multi head queue of frame ids with separate + // head for each receiver. When the frame is rendered, we will pop ids from + // the corresponding head until id will match with rendered one. All ids + // before matched one can be considered as dropped: // // | frame_id1 |->| frame_id2 |->| frame_id3 |->| frame_id4 | // // If we received frame with id frame_id3, then we will pop frame_id1 and // frame_id2 and consider that frames as dropped and then compare received // frame with the one from |captured_frames_in_flight_| with id frame_id3. - std::deque frame_ids_; - // Count of dead frames in the beginning of the deque. - int dead_frames_count_; - absl::optional last_rendered_frame_time_ = absl::nullopt; + // + // To track alive frames (frames that contains frame's payload in + // |captured_frames_in_flight_|) the head which corresponds to |owner_| will + // be used. So that head will point to the first alive frame in frames list. + MultiHeadQueue frame_ids_; + std::map last_rendered_frame_time_; }; enum State { kNew, kActive, kStopped }; - void AddComparison(absl::optional captured, + struct ReceiverFrameStats { + // Time when last packet of a frame was received. + Timestamp received_time = Timestamp::MinusInfinity(); + Timestamp decode_start_time = Timestamp::MinusInfinity(); + Timestamp decode_end_time = Timestamp::MinusInfinity(); + Timestamp rendered_time = Timestamp::MinusInfinity(); + Timestamp prev_frame_rendered_time = Timestamp::MinusInfinity(); + + absl::optional rendered_frame_width = absl::nullopt; + absl::optional rendered_frame_height = absl::nullopt; + + bool dropped = false; + }; + + class FrameInFlight { + public: + FrameInFlight(size_t stream, + VideoFrame frame, + Timestamp captured_time, + size_t owner, + size_t peers_count) + : stream_(stream), + owner_(owner), + peers_count_(peers_count), + frame_(std::move(frame)), + captured_time_(captured_time) {} + + size_t stream() const { return stream_; } + const absl::optional& frame() const { return frame_; } + // Returns was frame removed or not. + bool RemoveFrame(); + void SetFrameId(uint16_t id); + + void AddPeer() { ++peers_count_; } + + std::vector GetPeersWhichDidntReceive() const; + bool HaveAllPeersReceived() const; + + void SetPreEncodeTime(webrtc::Timestamp time) { pre_encode_time_ = time; } + + void OnFrameEncoded(webrtc::Timestamp time, + int64_t encoded_image_size, + uint32_t target_encode_bitrate); + + bool HasEncodedTime() const { return encoded_time_.IsFinite(); } + + void OnFramePreDecode(size_t peer, + webrtc::Timestamp received_time, + webrtc::Timestamp decode_start_time); + + bool HasReceivedTime(size_t peer) const; + + void SetDecodeEndTime(size_t peer, webrtc::Timestamp time) { + receiver_stats_[peer].decode_end_time = time; + } + + bool HasDecodeEndTime(size_t peer) const; + + void OnFrameRendered(size_t peer, + webrtc::Timestamp time, + int width, + int height); + + bool HasRenderedTime(size_t peer) const; + + // Crash if rendered time is not set for specified |peer|. + webrtc::Timestamp rendered_time(size_t peer) const { + return receiver_stats_.at(peer).rendered_time; + } + + void MarkDropped(size_t peer) { receiver_stats_[peer].dropped = true; } + + void SetPrevFrameRenderedTime(size_t peer, webrtc::Timestamp time) { + receiver_stats_[peer].prev_frame_rendered_time = time; + } + + FrameStats GetStatsForPeer(size_t peer) const; + + private: + const size_t stream_; + const size_t owner_; + size_t peers_count_; + absl::optional frame_; + + // Frame events timestamp. + Timestamp captured_time_; + Timestamp pre_encode_time_ = Timestamp::MinusInfinity(); + Timestamp encoded_time_ = Timestamp::MinusInfinity(); + int64_t encoded_image_size_ = 0; + uint32_t target_encode_bitrate_ = 0; + std::map receiver_stats_; + }; + + class NamesCollection { + public: + NamesCollection() = default; + explicit NamesCollection(rtc::ArrayView names) { + names_ = std::vector(names.begin(), names.end()); + for (size_t i = 0; i < names_.size(); ++i) { + index_.emplace(names_[i], i); + } + } + + size_t size() const { return names_.size(); } + + size_t index(absl::string_view name) const { return index_.at(name); } + + const std::string& name(size_t index) const { return names_[index]; } + + bool HasName(absl::string_view name) const { + return index_.find(name) != index_.end(); + } + + // Add specified |name| to the collection if it isn't presented. + // Returns index which corresponds to specified |name|. + size_t AddIfAbsent(absl::string_view name); + + private: + std::vector names_; + std::map index_; + }; + + void AddComparison(InternalStatsKey stats_key, + absl::optional captured, absl::optional rendered, bool dropped, - FrameStats frame_stats); + FrameStats frame_stats) + RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_); static void ProcessComparisonsThread(void* obj); void ProcessComparisons(); void ProcessComparison(const FrameComparison& comparison); @@ -292,23 +500,32 @@ class DefaultVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { // Returns name of current test case for reporting. std::string GetTestCaseName(const std::string& stream_label) const; Timestamp Now(); + StatsKey ToStatsKey(const InternalStatsKey& key) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); + // Returns string representation of stats key for metrics naming. Used for + // backward compatibility by metrics naming for 2 peers cases. + std::string StatsKeyToMetricName(const StatsKey& key) const + RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); void StartMeasuringCpuProcessTime(); void StopMeasuringCpuProcessTime(); void StartExcludingCpuThreadTime(); void StopExcludingCpuThreadTime(); - double GetCpuUsagePercent(); - const bool heavy_metrics_computation_enabled_; - const int max_frames_in_flight_per_stream_count_; + // TODO(titovartem) restore const when old constructor will be removed. + DefaultVideoQualityAnalyzerOptions options_; webrtc::Clock* const clock_; std::atomic next_frame_id_{0}; std::string test_label_; - rtc::CriticalSection lock_; + mutable Mutex lock_; + std::unique_ptr peers_ RTC_GUARDED_BY(lock_); State state_ RTC_GUARDED_BY(lock_) = State::kNew; Timestamp start_time_ RTC_GUARDED_BY(lock_) = Timestamp::MinusInfinity(); + // Mapping from stream label to unique size_t value to use in stats and avoid + // extra string copying. + NamesCollection streams_ RTC_GUARDED_BY(lock_); // Frames that were captured by all streams and still aren't rendered by any // stream or deemed dropped. Frame with id X can be removed from this map if: // 1. The frame with id X was received in OnFrameRendered @@ -316,35 +533,37 @@ class DefaultVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { // 3. Next available frame id for newly captured frame is X // 4. There too many frames in flight for current video stream and X is the // oldest frame id in this stream. - std::map captured_frames_in_flight_ + std::map captured_frames_in_flight_ RTC_GUARDED_BY(lock_); // Global frames count for all video streams. FrameCounters frame_counters_ RTC_GUARDED_BY(lock_); - // Frame counters per each stream. - std::map stream_frame_counters_ + // Frame counters per each stream per each receiver. + std::map stream_frame_counters_ RTC_GUARDED_BY(lock_); - std::map frame_stats_ RTC_GUARDED_BY(lock_); - std::map stream_states_ RTC_GUARDED_BY(lock_); - - // Stores history mapping between stream labels and frame ids. Updated when - // frame id overlap. It required to properly return stream label after 1st - // frame from simulcast streams was already rendered and last is still - // encoding. - std::map> stream_to_frame_id_history_ + // Map from stream index in |streams_| to its StreamState. + std::map stream_states_ RTC_GUARDED_BY(lock_); + // Map from stream index in |streams_| to sender peer index in |peers_|. + std::map stream_to_sender_ RTC_GUARDED_BY(lock_); + + // Stores history mapping between stream index in |streams_| and frame ids. + // Updated when frame id overlap. It required to properly return stream label + // after 1st frame from simulcast streams was already rendered and last is + // still encoding. + std::map> stream_to_frame_id_history_ RTC_GUARDED_BY(lock_); - rtc::CriticalSection comparison_lock_; - std::map stream_stats_ + mutable Mutex comparison_lock_; + std::map stream_stats_ RTC_GUARDED_BY(comparison_lock_); - std::map stream_last_freeze_end_time_ + std::map stream_last_freeze_end_time_ RTC_GUARDED_BY(comparison_lock_); std::deque comparisons_ RTC_GUARDED_BY(comparison_lock_); AnalyzerStats analyzer_stats_ RTC_GUARDED_BY(comparison_lock_); - std::vector> thread_pool_; + std::vector thread_pool_; rtc::Event comparison_available_event_; - rtc::CriticalSection cpu_measurement_lock_; + Mutex cpu_measurement_lock_; int64_t cpu_time_ RTC_GUARDED_BY(cpu_measurement_lock_) = 0; int64_t wallclock_time_ RTC_GUARDED_BY(cpu_measurement_lock_) = 0; }; diff --git a/test/pc/e2e/analyzer/video/default_video_quality_analyzer_test.cc b/test/pc/e2e/analyzer/video/default_video_quality_analyzer_test.cc index 1bc29c5f09..8d8a1af848 100644 --- a/test/pc/e2e/analyzer/video/default_video_quality_analyzer_test.cc +++ b/test/pc/e2e/analyzer/video/default_video_quality_analyzer_test.cc @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include #include #include #include @@ -18,6 +19,9 @@ #include "api/video/encoded_image.h" #include "api/video/i420_buffer.h" #include "api/video/video_frame.h" +#include "common_video/libyuv/include/webrtc_libyuv.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_tools/frame_analyzer/video_geometry_aligner.h" #include "system_wrappers/include/sleep.h" #include "test/gtest.h" #include "test/pc/e2e/analyzer/video/default_video_quality_analyzer.h" @@ -26,11 +30,24 @@ namespace webrtc { namespace webrtc_pc_e2e { namespace { +using StatsSample = ::webrtc::SamplesStatsCounter::StatsSample; + constexpr int kAnalyzerMaxThreadsCount = 1; constexpr int kMaxFramesInFlightPerStream = 10; constexpr int kFrameWidth = 320; constexpr int kFrameHeight = 240; +constexpr double kMaxSsim = 1; constexpr char kStreamLabel[] = "video-stream"; +constexpr char kSenderPeerName[] = "alice"; +constexpr char kReceiverPeerName[] = "bob"; + +DefaultVideoQualityAnalyzerOptions AnalyzerOptionsForTest() { + DefaultVideoQualityAnalyzerOptions options; + options.heavy_metrics_computation_enabled = false; + options.adjust_cropping_before_comparing_frames = false; + options.max_frames_in_flight_per_stream_count = kMaxFramesInFlightPerStream; + return options; +} VideoFrame NextFrame(test::FrameGeneratorInterface* frame_generator, int64_t timestamp_us) { @@ -46,13 +63,13 @@ VideoFrame NextFrame(test::FrameGeneratorInterface* frame_generator, EncodedImage FakeEncode(const VideoFrame& frame) { EncodedImage image; std::vector packet_infos; - packet_infos.push_back( - RtpPacketInfo(/*ssrc=*/1, - /*csrcs=*/{}, - /*rtp_timestamp=*/frame.timestamp(), - /*audio_level=*/absl::nullopt, - /*absolute_capture_time=*/absl::nullopt, - /*receive_time_ms=*/frame.timestamp_us() + 10)); + packet_infos.push_back(RtpPacketInfo( + /*ssrc=*/1, + /*csrcs=*/{}, + /*rtp_timestamp=*/frame.timestamp(), + /*audio_level=*/absl::nullopt, + /*absolute_capture_time=*/absl::nullopt, + /*receive_time=*/Timestamp::Micros(frame.timestamp_us() + 10000))); image.SetPacketInfos(RtpPacketInfos(packet_infos)); return image; } @@ -64,6 +81,33 @@ VideoFrame DeepCopy(const VideoFrame& frame) { return copy; } +std::vector GetSortedSamples(const SamplesStatsCounter& counter) { + rtc::ArrayView view = counter.GetTimedSamples(); + std::vector out(view.begin(), view.end()); + std::sort(out.begin(), out.end(), + [](const StatsSample& a, const StatsSample& b) { + return a.time < b.time; + }); + return out; +} + +std::string ToString(const std::vector& values) { + rtc::StringBuilder out; + for (const auto& v : values) { + out << "{ time_ms=" << v.time.ms() << "; value=" << v.value << "}, "; + } + return out.str(); +} + +void FakeCPULoad() { + std::vector temp(1000000); + for (size_t i = 0; i < temp.size(); ++i) { + temp[i] = rand(); + } + std::sort(temp.begin(), temp.end()); + ASSERT_TRUE(std::is_sorted(temp.begin(), temp.end())); +} + TEST(DefaultVideoQualityAnalyzerTest, MemoryOverloadedAndThenAllFramesReceived) { std::unique_ptr frame_generator = @@ -71,28 +115,32 @@ TEST(DefaultVideoQualityAnalyzerTest, /*type=*/absl::nullopt, /*num_squares=*/absl::nullopt); - DefaultVideoQualityAnalyzer analyzer( - /*heavy_metrics_computation_enabled=*/false, kMaxFramesInFlightPerStream); - analyzer.Start("test_case", kAnalyzerMaxThreadsCount); + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); std::map captured_frames; std::vector frames_order; for (int i = 0; i < kMaxFramesInFlightPerStream * 2; ++i) { VideoFrame frame = NextFrame(frame_generator.get(), i); - frame.set_id(analyzer.OnFrameCaptured(kStreamLabel, frame)); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); frames_order.push_back(frame.id()); captured_frames.insert({frame.id(), frame}); - analyzer.OnFramePreEncode(frame); - analyzer.OnFrameEncoded(frame.id(), FakeEncode(frame), + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), VideoQualityAnalyzerInterface::EncoderStats()); } for (const uint16_t& frame_id : frames_order) { VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); - analyzer.OnFramePreDecode(received_frame.id(), FakeEncode(received_frame)); - analyzer.OnFrameDecoded(received_frame, + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, VideoQualityAnalyzerInterface::DecoderStats()); - analyzer.OnFrameRendered(received_frame); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); } // Give analyzer some time to process frames on async thread. The computations @@ -111,6 +159,87 @@ TEST(DefaultVideoQualityAnalyzerTest, EXPECT_EQ(frame_counters.dropped, 0); } +TEST(DefaultVideoQualityAnalyzerTest, + FillMaxMemoryReceiveAllMemoryOverloadedAndThenAllFramesReceived) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); + + std::map captured_frames; + std::vector frames_order; + // Feel analyzer's memory up to limit + for (int i = 0; i < kMaxFramesInFlightPerStream; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); + frames_order.push_back(frame.id()); + captured_frames.insert({frame.id(), frame}); + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + } + + // Receive all frames. + for (const uint16_t& frame_id : frames_order) { + VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + } + frames_order.clear(); + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + + // Overload analyzer's memory up to limit + for (int i = 0; i < 2 * kMaxFramesInFlightPerStream; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); + frames_order.push_back(frame.id()); + captured_frames.insert({frame.id(), frame}); + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + } + + // Receive all frames. + for (const uint16_t& frame_id : frames_order) { + VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + } + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + analyzer.Stop(); + + AnalyzerStats stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(stats.memory_overloaded_comparisons_done, + kMaxFramesInFlightPerStream); + EXPECT_EQ(stats.comparisons_done, kMaxFramesInFlightPerStream * 3); + FrameCounters frame_counters = analyzer.GetGlobalCounters(); + EXPECT_EQ(frame_counters.captured, kMaxFramesInFlightPerStream * 3); + EXPECT_EQ(frame_counters.rendered, kMaxFramesInFlightPerStream * 3); + EXPECT_EQ(frame_counters.dropped, 0); +} + TEST(DefaultVideoQualityAnalyzerTest, MemoryOverloadedHalfDroppedAndThenHalfFramesReceived) { std::unique_ptr frame_generator = @@ -118,29 +247,33 @@ TEST(DefaultVideoQualityAnalyzerTest, /*type=*/absl::nullopt, /*num_squares=*/absl::nullopt); - DefaultVideoQualityAnalyzer analyzer( - /*heavy_metrics_computation_enabled=*/false, kMaxFramesInFlightPerStream); - analyzer.Start("test_case", kAnalyzerMaxThreadsCount); + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); std::map captured_frames; std::vector frames_order; for (int i = 0; i < kMaxFramesInFlightPerStream * 2; ++i) { VideoFrame frame = NextFrame(frame_generator.get(), i); - frame.set_id(analyzer.OnFrameCaptured(kStreamLabel, frame)); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); frames_order.push_back(frame.id()); captured_frames.insert({frame.id(), frame}); - analyzer.OnFramePreEncode(frame); - analyzer.OnFrameEncoded(frame.id(), FakeEncode(frame), + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), VideoQualityAnalyzerInterface::EncoderStats()); } for (size_t i = kMaxFramesInFlightPerStream; i < frames_order.size(); ++i) { uint16_t frame_id = frames_order.at(i); VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); - analyzer.OnFramePreDecode(received_frame.id(), FakeEncode(received_frame)); - analyzer.OnFrameDecoded(received_frame, + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, VideoQualityAnalyzerInterface::DecoderStats()); - analyzer.OnFrameRendered(received_frame); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); } // Give analyzer some time to process frames on async thread. The computations @@ -164,29 +297,33 @@ TEST(DefaultVideoQualityAnalyzerTest, NormalScenario) { /*type=*/absl::nullopt, /*num_squares=*/absl::nullopt); - DefaultVideoQualityAnalyzer analyzer( - /*heavy_metrics_computation_enabled=*/false, kMaxFramesInFlightPerStream); - analyzer.Start("test_case", kAnalyzerMaxThreadsCount); + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); std::map captured_frames; std::vector frames_order; for (int i = 0; i < kMaxFramesInFlightPerStream; ++i) { VideoFrame frame = NextFrame(frame_generator.get(), i); - frame.set_id(analyzer.OnFrameCaptured(kStreamLabel, frame)); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); frames_order.push_back(frame.id()); captured_frames.insert({frame.id(), frame}); - analyzer.OnFramePreEncode(frame); - analyzer.OnFrameEncoded(frame.id(), FakeEncode(frame), + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), VideoQualityAnalyzerInterface::EncoderStats()); } for (size_t i = 1; i < frames_order.size(); i += 2) { uint16_t frame_id = frames_order.at(i); VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); - analyzer.OnFramePreDecode(received_frame.id(), FakeEncode(received_frame)); - analyzer.OnFrameDecoded(received_frame, + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, VideoQualityAnalyzerInterface::DecoderStats()); - analyzer.OnFrameRendered(received_frame); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); } // Give analyzer some time to process frames on async thread. The computations @@ -199,6 +336,11 @@ TEST(DefaultVideoQualityAnalyzerTest, NormalScenario) { EXPECT_EQ(stats.memory_overloaded_comparisons_done, 0); EXPECT_EQ(stats.comparisons_done, kMaxFramesInFlightPerStream); + std::vector frames_in_flight_sizes = + GetSortedSamples(stats.frames_in_flight_left_count); + EXPECT_EQ(frames_in_flight_sizes.back().value, 0) + << ToString(frames_in_flight_sizes); + FrameCounters frame_counters = analyzer.GetGlobalCounters(); EXPECT_EQ(frame_counters.captured, kMaxFramesInFlightPerStream); EXPECT_EQ(frame_counters.received, kMaxFramesInFlightPerStream / 2); @@ -207,6 +349,559 @@ TEST(DefaultVideoQualityAnalyzerTest, NormalScenario) { EXPECT_EQ(frame_counters.dropped, kMaxFramesInFlightPerStream / 2); } +TEST(DefaultVideoQualityAnalyzerTest, OneFrameReceivedTwice) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); + + VideoFrame captured_frame = NextFrame(frame_generator.get(), 0); + captured_frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, captured_frame)); + analyzer.OnFramePreEncode(kSenderPeerName, captured_frame); + analyzer.OnFrameEncoded(kSenderPeerName, captured_frame.id(), + FakeEncode(captured_frame), + VideoQualityAnalyzerInterface::EncoderStats()); + + VideoFrame received_frame = DeepCopy(captured_frame); + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + + received_frame = DeepCopy(captured_frame); + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + analyzer.Stop(); + + AnalyzerStats stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(stats.memory_overloaded_comparisons_done, 0); + EXPECT_EQ(stats.comparisons_done, 1); + + FrameCounters frame_counters = analyzer.GetGlobalCounters(); + EXPECT_EQ(frame_counters.captured, 1); + EXPECT_EQ(frame_counters.received, 1); + EXPECT_EQ(frame_counters.decoded, 1); + EXPECT_EQ(frame_counters.rendered, 1); + EXPECT_EQ(frame_counters.dropped, 0); +} + +TEST(DefaultVideoQualityAnalyzerTest, NormalScenario2Receivers) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + constexpr char kAlice[] = "alice"; + constexpr char kBob[] = "bob"; + constexpr char kCharlie[] = "charlie"; + + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", std::vector{kAlice, kBob, kCharlie}, + kAnalyzerMaxThreadsCount); + + std::map captured_frames; + std::vector frames_order; + for (int i = 0; i < kMaxFramesInFlightPerStream; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id(analyzer.OnFrameCaptured(kAlice, kStreamLabel, frame)); + frames_order.push_back(frame.id()); + captured_frames.insert({frame.id(), frame}); + analyzer.OnFramePreEncode(kAlice, frame); + SleepMs(20); + analyzer.OnFrameEncoded(kAlice, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + } + + SleepMs(50); + + for (size_t i = 1; i < frames_order.size(); i += 2) { + uint16_t frame_id = frames_order.at(i); + VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kBob, received_frame.id(), + FakeEncode(received_frame)); + SleepMs(30); + analyzer.OnFrameDecoded(kBob, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + SleepMs(10); + analyzer.OnFrameRendered(kBob, received_frame); + } + + for (size_t i = 1; i < frames_order.size(); i += 2) { + uint16_t frame_id = frames_order.at(i); + VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kCharlie, received_frame.id(), + FakeEncode(received_frame)); + SleepMs(40); + analyzer.OnFrameDecoded(kCharlie, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + SleepMs(5); + analyzer.OnFrameRendered(kCharlie, received_frame); + } + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + analyzer.Stop(); + + AnalyzerStats analyzer_stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(analyzer_stats.memory_overloaded_comparisons_done, 0); + EXPECT_EQ(analyzer_stats.comparisons_done, kMaxFramesInFlightPerStream * 2); + + FrameCounters frame_counters = analyzer.GetGlobalCounters(); + EXPECT_EQ(frame_counters.captured, kMaxFramesInFlightPerStream); + EXPECT_EQ(frame_counters.received, kMaxFramesInFlightPerStream); + EXPECT_EQ(frame_counters.decoded, kMaxFramesInFlightPerStream); + EXPECT_EQ(frame_counters.rendered, kMaxFramesInFlightPerStream); + EXPECT_EQ(frame_counters.dropped, kMaxFramesInFlightPerStream); + EXPECT_EQ(analyzer.GetKnownVideoStreams().size(), 2lu); + for (auto stream_key : analyzer.GetKnownVideoStreams()) { + FrameCounters stream_conters = + analyzer.GetPerStreamCounters().at(stream_key); + // On some devices the pipeline can be too slow, so we actually can't + // force real constraints here. Lets just check, that at least 1 + // frame passed whole pipeline. + EXPECT_GE(stream_conters.captured, 10); + EXPECT_GE(stream_conters.pre_encoded, 10); + EXPECT_GE(stream_conters.encoded, 10); + EXPECT_GE(stream_conters.received, 5); + EXPECT_GE(stream_conters.decoded, 5); + EXPECT_GE(stream_conters.rendered, 5); + EXPECT_GE(stream_conters.dropped, 5); + } + + std::map stats = analyzer.GetStats(); + const StatsKey kAliceBobStats(kStreamLabel, kAlice, kBob); + const StatsKey kAliceCharlieStats(kStreamLabel, kAlice, kCharlie); + EXPECT_EQ(stats.size(), 2lu); + { + auto it = stats.find(kAliceBobStats); + EXPECT_FALSE(it == stats.end()); + ASSERT_FALSE(it->second.encode_time_ms.IsEmpty()); + EXPECT_GE(it->second.encode_time_ms.GetMin(), 20); + ASSERT_FALSE(it->second.decode_time_ms.IsEmpty()); + EXPECT_GE(it->second.decode_time_ms.GetMin(), 30); + ASSERT_FALSE(it->second.resolution_of_rendered_frame.IsEmpty()); + EXPECT_GE(it->second.resolution_of_rendered_frame.GetMin(), + kFrameWidth * kFrameHeight - 1); + EXPECT_LE(it->second.resolution_of_rendered_frame.GetMax(), + kFrameWidth * kFrameHeight + 1); + } + { + auto it = stats.find(kAliceCharlieStats); + EXPECT_FALSE(it == stats.end()); + ASSERT_FALSE(it->second.encode_time_ms.IsEmpty()); + EXPECT_GE(it->second.encode_time_ms.GetMin(), 20); + ASSERT_FALSE(it->second.decode_time_ms.IsEmpty()); + EXPECT_GE(it->second.decode_time_ms.GetMin(), 30); + ASSERT_FALSE(it->second.resolution_of_rendered_frame.IsEmpty()); + EXPECT_GE(it->second.resolution_of_rendered_frame.GetMin(), + kFrameWidth * kFrameHeight - 1); + EXPECT_LE(it->second.resolution_of_rendered_frame.GetMax(), + kFrameWidth * kFrameHeight + 1); + } +} + +TEST(DefaultVideoQualityAnalyzerTest, OneFrameReceivedTwiceWith2Receivers) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + constexpr char kAlice[] = "alice"; + constexpr char kBob[] = "bob"; + constexpr char kCharlie[] = "charlie"; + + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", std::vector{kAlice, kBob, kCharlie}, + kAnalyzerMaxThreadsCount); + + VideoFrame captured_frame = NextFrame(frame_generator.get(), 0); + captured_frame.set_id( + analyzer.OnFrameCaptured(kAlice, kStreamLabel, captured_frame)); + analyzer.OnFramePreEncode(kAlice, captured_frame); + analyzer.OnFrameEncoded(kAlice, captured_frame.id(), + FakeEncode(captured_frame), + VideoQualityAnalyzerInterface::EncoderStats()); + + VideoFrame received_frame = DeepCopy(captured_frame); + analyzer.OnFramePreDecode(kBob, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kBob, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kBob, received_frame); + + received_frame = DeepCopy(captured_frame); + analyzer.OnFramePreDecode(kBob, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kBob, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kBob, received_frame); + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + analyzer.Stop(); + + AnalyzerStats stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(stats.memory_overloaded_comparisons_done, 0); + EXPECT_EQ(stats.comparisons_done, 1); + + FrameCounters frame_counters = analyzer.GetGlobalCounters(); + EXPECT_EQ(frame_counters.captured, 1); + EXPECT_EQ(frame_counters.received, 1); + EXPECT_EQ(frame_counters.decoded, 1); + EXPECT_EQ(frame_counters.rendered, 1); + EXPECT_EQ(frame_counters.dropped, 0); +} + +TEST(DefaultVideoQualityAnalyzerTest, HeavyQualityMetricsFromEqualFrames) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + DefaultVideoQualityAnalyzerOptions analyzer_options; + analyzer_options.heavy_metrics_computation_enabled = true; + analyzer_options.adjust_cropping_before_comparing_frames = false; + analyzer_options.max_frames_in_flight_per_stream_count = + kMaxFramesInFlightPerStream; + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + analyzer_options); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); + + for (int i = 0; i < kMaxFramesInFlightPerStream; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + + VideoFrame received_frame = DeepCopy(frame); + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + } + + // Give analyzer some time to process frames on async thread. Heavy metrics + // computation is turned on, so giving some extra time to be sure that + // computatio have ended. + SleepMs(500); + analyzer.Stop(); + + AnalyzerStats stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(stats.memory_overloaded_comparisons_done, 0); + EXPECT_EQ(stats.comparisons_done, kMaxFramesInFlightPerStream); + + std::vector frames_in_flight_sizes = + GetSortedSamples(stats.frames_in_flight_left_count); + EXPECT_EQ(frames_in_flight_sizes.back().value, 0) + << ToString(frames_in_flight_sizes); + + std::map stream_stats = analyzer.GetStats(); + const StatsKey kAliceBobStats(kStreamLabel, kSenderPeerName, + kReceiverPeerName); + EXPECT_EQ(stream_stats.size(), 1lu); + + auto it = stream_stats.find(kAliceBobStats); + EXPECT_GE(it->second.psnr.GetMin(), kPerfectPSNR); + EXPECT_GE(it->second.ssim.GetMin(), kMaxSsim); +} + +TEST(DefaultVideoQualityAnalyzerTest, + HeavyQualityMetricsFromShiftedFramesWithAdjustment) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + DefaultVideoQualityAnalyzerOptions analyzer_options; + analyzer_options.heavy_metrics_computation_enabled = true; + analyzer_options.adjust_cropping_before_comparing_frames = true; + analyzer_options.max_frames_in_flight_per_stream_count = + kMaxFramesInFlightPerStream; + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + analyzer_options); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); + + for (int i = 0; i < kMaxFramesInFlightPerStream; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + + VideoFrame received_frame = frame; + // Shift frame by a few pixels. + test::CropRegion crop_region{0, 1, 3, 0}; + rtc::scoped_refptr cropped_buffer = + CropAndZoom(crop_region, received_frame.video_frame_buffer()->ToI420()); + received_frame.set_video_frame_buffer(cropped_buffer); + + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + } + + // Give analyzer some time to process frames on async thread. Heavy metrics + // computation is turned on, so giving some extra time to be sure that + // computatio have ended. + SleepMs(500); + analyzer.Stop(); + + AnalyzerStats stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(stats.memory_overloaded_comparisons_done, 0); + EXPECT_EQ(stats.comparisons_done, kMaxFramesInFlightPerStream); + + std::vector frames_in_flight_sizes = + GetSortedSamples(stats.frames_in_flight_left_count); + EXPECT_EQ(frames_in_flight_sizes.back().value, 0) + << ToString(frames_in_flight_sizes); + + std::map stream_stats = analyzer.GetStats(); + const StatsKey kAliceBobStats(kStreamLabel, kSenderPeerName, + kReceiverPeerName); + EXPECT_EQ(stream_stats.size(), 1lu); + + auto it = stream_stats.find(kAliceBobStats); + EXPECT_GE(it->second.psnr.GetMin(), kPerfectPSNR); + EXPECT_GE(it->second.ssim.GetMin(), kMaxSsim); +} + +TEST(DefaultVideoQualityAnalyzerTest, CpuUsage) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", + std::vector{kSenderPeerName, kReceiverPeerName}, + kAnalyzerMaxThreadsCount); + + std::map captured_frames; + std::vector frames_order; + for (int i = 0; i < kMaxFramesInFlightPerStream; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id( + analyzer.OnFrameCaptured(kSenderPeerName, kStreamLabel, frame)); + frames_order.push_back(frame.id()); + captured_frames.insert({frame.id(), frame}); + analyzer.OnFramePreEncode(kSenderPeerName, frame); + analyzer.OnFrameEncoded(kSenderPeerName, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + } + + // Windows CPU clock has low accuracy. We need to fake some additional load to + // be sure that the clock ticks (https://crbug.com/webrtc/12249). + FakeCPULoad(); + + for (size_t i = 1; i < frames_order.size(); i += 2) { + uint16_t frame_id = frames_order.at(i); + VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kReceiverPeerName, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kReceiverPeerName, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kReceiverPeerName, received_frame); + } + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + analyzer.Stop(); + + double cpu_usage = analyzer.GetCpuUsagePercent(); + ASSERT_GT(cpu_usage, 0); + + SleepMs(100); + analyzer.Stop(); + + EXPECT_EQ(analyzer.GetCpuUsagePercent(), cpu_usage); +} + +TEST(DefaultVideoQualityAnalyzerTest, RuntimeParticipantsAdding) { + std::unique_ptr frame_generator = + test::CreateSquareFrameGenerator(kFrameWidth, kFrameHeight, + /*type=*/absl::nullopt, + /*num_squares=*/absl::nullopt); + + constexpr char kAlice[] = "alice"; + constexpr char kBob[] = "bob"; + constexpr char kCharlie[] = "charlie"; + constexpr char kKatie[] = "katie"; + + constexpr int kFramesCount = 9; + constexpr int kOneThirdFrames = kFramesCount / 3; + constexpr int kTwoThirdFrames = 2 * kOneThirdFrames; + + DefaultVideoQualityAnalyzer analyzer(Clock::GetRealTimeClock(), + AnalyzerOptionsForTest()); + analyzer.Start("test_case", {}, kAnalyzerMaxThreadsCount); + + std::map captured_frames; + std::vector frames_order; + analyzer.RegisterParticipantInCall(kAlice); + analyzer.RegisterParticipantInCall(kBob); + + // Alice is sending frames. + for (int i = 0; i < kFramesCount; ++i) { + VideoFrame frame = NextFrame(frame_generator.get(), i); + frame.set_id(analyzer.OnFrameCaptured(kAlice, kStreamLabel, frame)); + frames_order.push_back(frame.id()); + captured_frames.insert({frame.id(), frame}); + analyzer.OnFramePreEncode(kAlice, frame); + analyzer.OnFrameEncoded(kAlice, frame.id(), FakeEncode(frame), + VideoQualityAnalyzerInterface::EncoderStats()); + } + + // Bob receives one third of the sent frames. + for (int i = 0; i < kOneThirdFrames; ++i) { + uint16_t frame_id = frames_order.at(i); + VideoFrame received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kBob, received_frame.id(), + FakeEncode(received_frame)); + analyzer.OnFrameDecoded(kBob, received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kBob, received_frame); + } + + analyzer.RegisterParticipantInCall(kCharlie); + analyzer.RegisterParticipantInCall(kKatie); + + // New participants were dynamically added. Bob and Charlie receive second + // third of the sent frames. Katie drops the frames. + for (int i = kOneThirdFrames; i < kTwoThirdFrames; ++i) { + uint16_t frame_id = frames_order.at(i); + VideoFrame bob_received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kBob, bob_received_frame.id(), + FakeEncode(bob_received_frame)); + analyzer.OnFrameDecoded(kBob, bob_received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kBob, bob_received_frame); + + VideoFrame charlie_received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kCharlie, charlie_received_frame.id(), + FakeEncode(charlie_received_frame)); + analyzer.OnFrameDecoded(kCharlie, charlie_received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kCharlie, charlie_received_frame); + } + + // Bob, Charlie and Katie receive the rest of the sent frames. + for (int i = kTwoThirdFrames; i < kFramesCount; ++i) { + uint16_t frame_id = frames_order.at(i); + VideoFrame bob_received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kBob, bob_received_frame.id(), + FakeEncode(bob_received_frame)); + analyzer.OnFrameDecoded(kBob, bob_received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kBob, bob_received_frame); + + VideoFrame charlie_received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kCharlie, charlie_received_frame.id(), + FakeEncode(charlie_received_frame)); + analyzer.OnFrameDecoded(kCharlie, charlie_received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kCharlie, charlie_received_frame); + + VideoFrame katie_received_frame = DeepCopy(captured_frames.at(frame_id)); + analyzer.OnFramePreDecode(kKatie, katie_received_frame.id(), + FakeEncode(katie_received_frame)); + analyzer.OnFrameDecoded(kKatie, katie_received_frame, + VideoQualityAnalyzerInterface::DecoderStats()); + analyzer.OnFrameRendered(kKatie, katie_received_frame); + } + + // Give analyzer some time to process frames on async thread. The computations + // have to be fast (heavy metrics are disabled!), so if doesn't fit 100ms it + // means we have an issue! + SleepMs(100); + analyzer.Stop(); + + AnalyzerStats stats = analyzer.GetAnalyzerStats(); + EXPECT_EQ(stats.memory_overloaded_comparisons_done, 0); + EXPECT_EQ(stats.comparisons_done, kFramesCount + 2 * kTwoThirdFrames); + + std::vector frames_in_flight_sizes = + GetSortedSamples(stats.frames_in_flight_left_count); + EXPECT_EQ(frames_in_flight_sizes.back().value, 0) + << ToString(frames_in_flight_sizes); + + FrameCounters frame_counters = analyzer.GetGlobalCounters(); + EXPECT_EQ(frame_counters.captured, kFramesCount); + EXPECT_EQ(frame_counters.received, 2 * kFramesCount); + EXPECT_EQ(frame_counters.decoded, 2 * kFramesCount); + EXPECT_EQ(frame_counters.rendered, 2 * kFramesCount); + EXPECT_EQ(frame_counters.dropped, kOneThirdFrames); + + EXPECT_EQ(analyzer.GetKnownVideoStreams().size(), 3lu); + const StatsKey kAliceBobStats(kStreamLabel, kAlice, kBob); + const StatsKey kAliceCharlieStats(kStreamLabel, kAlice, kCharlie); + const StatsKey kAliceKatieStats(kStreamLabel, kAlice, kKatie); + { + FrameCounters stream_conters = + analyzer.GetPerStreamCounters().at(kAliceBobStats); + EXPECT_EQ(stream_conters.captured, kFramesCount); + EXPECT_EQ(stream_conters.pre_encoded, kFramesCount); + EXPECT_EQ(stream_conters.encoded, kFramesCount); + EXPECT_EQ(stream_conters.received, kFramesCount); + EXPECT_EQ(stream_conters.decoded, kFramesCount); + EXPECT_EQ(stream_conters.rendered, kFramesCount); + } + { + FrameCounters stream_conters = + analyzer.GetPerStreamCounters().at(kAliceCharlieStats); + EXPECT_EQ(stream_conters.captured, kTwoThirdFrames); + EXPECT_EQ(stream_conters.pre_encoded, kTwoThirdFrames); + EXPECT_EQ(stream_conters.encoded, kTwoThirdFrames); + EXPECT_EQ(stream_conters.received, kTwoThirdFrames); + EXPECT_EQ(stream_conters.decoded, kTwoThirdFrames); + EXPECT_EQ(stream_conters.rendered, kTwoThirdFrames); + } + { + FrameCounters stream_conters = + analyzer.GetPerStreamCounters().at(kAliceKatieStats); + EXPECT_EQ(stream_conters.captured, kTwoThirdFrames); + EXPECT_EQ(stream_conters.pre_encoded, kTwoThirdFrames); + EXPECT_EQ(stream_conters.encoded, kTwoThirdFrames); + EXPECT_EQ(stream_conters.received, kOneThirdFrames); + EXPECT_EQ(stream_conters.decoded, kOneThirdFrames); + EXPECT_EQ(stream_conters.rendered, kOneThirdFrames); + } +} + } // namespace } // namespace webrtc_pc_e2e } // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/encoded_image_data_injector.h b/test/pc/e2e/analyzer/video/encoded_image_data_injector.h index 8e218629fc..154e38e43f 100644 --- a/test/pc/e2e/analyzer/video/encoded_image_data_injector.h +++ b/test/pc/e2e/analyzer/video/encoded_image_data_injector.h @@ -27,11 +27,10 @@ class EncodedImageDataInjector { // Return encoded image with specified |id| and |discard| flag injected into // its payload. |discard| flag mean does analyzing decoder should discard this // encoded image because it belongs to unnecessary simulcast stream or spatial - // layer. |coding_entity_id| is unique id of decoder or encoder. + // layer. virtual EncodedImage InjectData(uint16_t id, bool discard, - const EncodedImage& source, - int coding_entity_id) = 0; + const EncodedImage& source) = 0; }; struct EncodedImageExtractionResult { @@ -47,11 +46,20 @@ class EncodedImageDataExtractor { public: virtual ~EncodedImageDataExtractor() = default; + // Invoked by framework before any image will come to the extractor. + // |expected_receivers_count| is the expected amount of receivers for each + // encoded image. + virtual void Start(int expected_receivers_count) = 0; + + // Invoked by framework when it is required to add one more receiver for + // frames. Will be invoked before that receiver will start receive data. + virtual void AddParticipantInCall() = 0; + // Returns encoded image id, extracted from payload and also encoded image // with its original payload. For concatenated spatial layers it should be the - // same id. |coding_entity_id| is unique id of decoder or encoder. - virtual EncodedImageExtractionResult ExtractData(const EncodedImage& source, - int coding_entity_id) = 0; + // same id. + virtual EncodedImageExtractionResult ExtractData( + const EncodedImage& source) = 0; }; } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc b/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc index d1d1bface7..198a6cb42f 100644 --- a/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc +++ b/test/pc/e2e/analyzer/video/example_video_quality_analyzer.cc @@ -10,6 +10,7 @@ #include "test/pc/e2e/analyzer/video/example_video_quality_analyzer.h" +#include "api/array_view.h" #include "rtc_base/logging.h" namespace webrtc { @@ -18,13 +19,16 @@ namespace webrtc_pc_e2e { ExampleVideoQualityAnalyzer::ExampleVideoQualityAnalyzer() = default; ExampleVideoQualityAnalyzer::~ExampleVideoQualityAnalyzer() = default; -void ExampleVideoQualityAnalyzer::Start(std::string test_case_name, - int max_threads_count) {} +void ExampleVideoQualityAnalyzer::Start( + std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count) {} uint16_t ExampleVideoQualityAnalyzer::OnFrameCaptured( + absl::string_view peer_name, const std::string& stream_label, const webrtc::VideoFrame& frame) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); uint16_t frame_id = next_frame_id_++; auto it = frames_in_flight_.find(frame_id); if (it == frames_in_flight_.end()) { @@ -45,69 +49,77 @@ uint16_t ExampleVideoQualityAnalyzer::OnFrameCaptured( } void ExampleVideoQualityAnalyzer::OnFramePreEncode( + absl::string_view peer_name, const webrtc::VideoFrame& frame) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); ++frames_pre_encoded_; } void ExampleVideoQualityAnalyzer::OnFrameEncoded( + absl::string_view peer_name, uint16_t frame_id, const webrtc::EncodedImage& encoded_image, const EncoderStats& stats) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); ++frames_encoded_; } void ExampleVideoQualityAnalyzer::OnFrameDropped( + absl::string_view peer_name, webrtc::EncodedImageCallback::DropReason reason) { RTC_LOG(INFO) << "Frame dropped by encoder"; - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); ++frames_dropped_; } void ExampleVideoQualityAnalyzer::OnFramePreDecode( + absl::string_view peer_name, uint16_t frame_id, const webrtc::EncodedImage& encoded_image) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); ++frames_received_; } void ExampleVideoQualityAnalyzer::OnFrameDecoded( + absl::string_view peer_name, const webrtc::VideoFrame& frame, const DecoderStats& stats) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); ++frames_decoded_; } void ExampleVideoQualityAnalyzer::OnFrameRendered( + absl::string_view peer_name, const webrtc::VideoFrame& frame) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); frames_in_flight_.erase(frame.id()); ++frames_rendered_; } void ExampleVideoQualityAnalyzer::OnEncoderError( + absl::string_view peer_name, const webrtc::VideoFrame& frame, int32_t error_code) { RTC_LOG(LS_ERROR) << "Failed to encode frame " << frame.id() << ". Code: " << error_code; } -void ExampleVideoQualityAnalyzer::OnDecoderError(uint16_t frame_id, +void ExampleVideoQualityAnalyzer::OnDecoderError(absl::string_view peer_name, + uint16_t frame_id, int32_t error_code) { RTC_LOG(LS_ERROR) << "Failed to decode frame " << frame_id << ". Code: " << error_code; } void ExampleVideoQualityAnalyzer::Stop() { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); RTC_LOG(INFO) << "There are " << frames_in_flight_.size() << " frames in flight, assuming all of them are dropped"; frames_dropped_ += frames_in_flight_.size(); } std::string ExampleVideoQualityAnalyzer::GetStreamLabel(uint16_t frame_id) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); auto it = frames_to_stream_label_.find(frame_id); RTC_DCHECK(it != frames_to_stream_label_.end()) << "Unknown frame_id=" << frame_id; @@ -115,37 +127,37 @@ std::string ExampleVideoQualityAnalyzer::GetStreamLabel(uint16_t frame_id) { } uint64_t ExampleVideoQualityAnalyzer::frames_captured() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_captured_; } uint64_t ExampleVideoQualityAnalyzer::frames_pre_encoded() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_pre_encoded_; } uint64_t ExampleVideoQualityAnalyzer::frames_encoded() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_encoded_; } uint64_t ExampleVideoQualityAnalyzer::frames_received() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_received_; } uint64_t ExampleVideoQualityAnalyzer::frames_decoded() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_decoded_; } uint64_t ExampleVideoQualityAnalyzer::frames_rendered() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_rendered_; } uint64_t ExampleVideoQualityAnalyzer::frames_dropped() const { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); return frames_dropped_; } diff --git a/test/pc/e2e/analyzer/video/example_video_quality_analyzer.h b/test/pc/e2e/analyzer/video/example_video_quality_analyzer.h index 0d6169f9fa..9f004396ae 100644 --- a/test/pc/e2e/analyzer/video/example_video_quality_analyzer.h +++ b/test/pc/e2e/analyzer/video/example_video_quality_analyzer.h @@ -16,10 +16,11 @@ #include #include +#include "api/array_view.h" #include "api/test/video_quality_analyzer_interface.h" #include "api/video/encoded_image.h" #include "api/video/video_frame.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -33,21 +34,34 @@ class ExampleVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { ExampleVideoQualityAnalyzer(); ~ExampleVideoQualityAnalyzer() override; - void Start(std::string test_case_name, int max_threads_count) override; - uint16_t OnFrameCaptured(const std::string& stream_label, + void Start(std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count) override; + uint16_t OnFrameCaptured(absl::string_view peer_name, + const std::string& stream_label, const VideoFrame& frame) override; - void OnFramePreEncode(const VideoFrame& frame) override; - void OnFrameEncoded(uint16_t frame_id, + void OnFramePreEncode(absl::string_view peer_name, + const VideoFrame& frame) override; + void OnFrameEncoded(absl::string_view peer_name, + uint16_t frame_id, const EncodedImage& encoded_image, const EncoderStats& stats) override; - void OnFrameDropped(EncodedImageCallback::DropReason reason) override; - void OnFramePreDecode(uint16_t frame_id, + void OnFrameDropped(absl::string_view peer_name, + EncodedImageCallback::DropReason reason) override; + void OnFramePreDecode(absl::string_view peer_name, + uint16_t frame_id, const EncodedImage& encoded_image) override; - void OnFrameDecoded(const VideoFrame& frame, + void OnFrameDecoded(absl::string_view peer_name, + const VideoFrame& frame, const DecoderStats& stats) override; - void OnFrameRendered(const VideoFrame& frame) override; - void OnEncoderError(const VideoFrame& frame, int32_t error_code) override; - void OnDecoderError(uint16_t frame_id, int32_t error_code) override; + void OnFrameRendered(absl::string_view peer_name, + const VideoFrame& frame) override; + void OnEncoderError(absl::string_view peer_name, + const VideoFrame& frame, + int32_t error_code) override; + void OnDecoderError(absl::string_view peer_name, + uint16_t frame_id, + int32_t error_code) override; void Stop() override; std::string GetStreamLabel(uint16_t frame_id) override; @@ -65,7 +79,7 @@ class ExampleVideoQualityAnalyzer : public VideoQualityAnalyzerInterface { // when it will be received in peer B, so we need to guard it with lock. // Also because analyzer will serve for all video streams it can be called // from different threads inside one peer. - rtc::CriticalSection lock_; + mutable Mutex lock_; // Stores frame ids, that are currently going from one peer to another. We // need to keep them to correctly determine dropped frames and also correctly // process frame id overlap. diff --git a/test/pc/e2e/analyzer/video/id_generator.h b/test/pc/e2e/analyzer/video/id_generator.h deleted file mode 100644 index 8c988f211a..0000000000 --- a/test/pc/e2e/analyzer/video/id_generator.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef TEST_PC_E2E_ANALYZER_VIDEO_ID_GENERATOR_H_ -#define TEST_PC_E2E_ANALYZER_VIDEO_ID_GENERATOR_H_ - -#include - -namespace webrtc { -namespace webrtc_pc_e2e { - -// IdGenerator generates ids. All provided ids have to be unique. There is no -// any order guarantees for provided ids. -template -class IdGenerator { - public: - virtual ~IdGenerator() = default; - - // Returns next unique id. There is no any order guarantees for provided ids. - virtual T GetNextId() = 0; -}; - -// Generates int ids. It is assumed, that no more then max int value ids will be -// requested from this generator. -class IntIdGenerator : public IdGenerator { - public: - explicit IntIdGenerator(int start_value); - ~IntIdGenerator() override; - - int GetNextId() override; - - private: - std::atomic next_id_; -}; - -} // namespace webrtc_pc_e2e -} // namespace webrtc - -#endif // TEST_PC_E2E_ANALYZER_VIDEO_ID_GENERATOR_H_ diff --git a/test/pc/e2e/analyzer/video/multi_head_queue.h b/test/pc/e2e/analyzer/video/multi_head_queue.h new file mode 100644 index 0000000000..fc606d207b --- /dev/null +++ b/test/pc/e2e/analyzer/video/multi_head_queue.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_PC_E2E_ANALYZER_VIDEO_MULTI_HEAD_QUEUE_H_ +#define TEST_PC_E2E_ANALYZER_VIDEO_MULTI_HEAD_QUEUE_H_ + +#include +#include +#include + +#include "absl/types/optional.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +// A queue that allows more than one reader. Readers are independent, and all +// readers will see all elements; an inserted element stays in the queue until +// all readers have extracted it. Elements are copied and copying is assumed to +// be cheap. +template +class MultiHeadQueue { + public: + // Creates queue with exactly |readers_count| readers. + explicit MultiHeadQueue(size_t readers_count) { + for (size_t i = 0; i < readers_count; ++i) { + queues_.push_back(std::deque()); + } + } + + // Creates a copy of an existing head. Complexity O(MultiHeadQueue::size()). + void AddHead(size_t copy_index) { queues_.push_back(queues_[copy_index]); } + + // Add value to the end of the queue. Complexity O(readers_count). + void PushBack(T value) { + for (auto& queue : queues_) { + queue.push_back(value); + } + } + + // Extract element from specified head. Complexity O(1). + absl::optional PopFront(size_t index) { + RTC_CHECK_LT(index, queues_.size()); + if (queues_[index].empty()) { + return absl::nullopt; + } + T out = queues_[index].front(); + queues_[index].pop_front(); + return out; + } + + // Returns element at specified head. Complexity O(1). + absl::optional Front(size_t index) const { + RTC_CHECK_LT(index, queues_.size()); + if (queues_[index].empty()) { + return absl::nullopt; + } + return queues_[index].front(); + } + + // Returns true if for specified head there are no more elements in the queue + // or false otherwise. Complexity O(1). + bool IsEmpty(size_t index) const { + RTC_CHECK_LT(index, queues_.size()); + return queues_[index].empty(); + } + + // Returns size of the longest queue between all readers. + // Complexity O(readers_count). + size_t size() const { + size_t size = 0; + for (auto& queue : queues_) { + if (queue.size() > size) { + size = queue.size(); + } + } + return size; + } + + // Returns size of the specified queue. Complexity O(1). + size_t size(size_t index) const { + RTC_CHECK_LT(index, queues_.size()); + return queues_[index].size(); + } + + size_t readers_count() const { return queues_.size(); } + + private: + std::vector> queues_; +}; + +} // namespace webrtc_pc_e2e +} // namespace webrtc + +#endif // TEST_PC_E2E_ANALYZER_VIDEO_MULTI_HEAD_QUEUE_H_ diff --git a/test/pc/e2e/analyzer/video/multi_head_queue_test.cc b/test/pc/e2e/analyzer/video/multi_head_queue_test.cc new file mode 100644 index 0000000000..0025d1eace --- /dev/null +++ b/test/pc/e2e/analyzer/video/multi_head_queue_test.cc @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/pc/e2e/analyzer/video/multi_head_queue.h" +#include "absl/types/optional.h" +#include "test/gtest.h" + +namespace webrtc { +namespace webrtc_pc_e2e { +namespace { + +TEST(MultiHeadQueueTest, GetOnEmpty) { + MultiHeadQueue queue = MultiHeadQueue(10); + EXPECT_TRUE(queue.IsEmpty(0)); + for (int i = 0; i < 10; ++i) { + EXPECT_FALSE(queue.PopFront(i).has_value()); + EXPECT_FALSE(queue.Front(i).has_value()); + } +} + +TEST(MultiHeadQueueTest, SingleHeadOneAddOneRemove) { + MultiHeadQueue queue = MultiHeadQueue(1); + queue.PushBack(1); + EXPECT_EQ(queue.size(), 1lu); + EXPECT_TRUE(queue.Front(0).has_value()); + EXPECT_EQ(queue.Front(0).value(), 1); + absl::optional value = queue.PopFront(0); + EXPECT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), 1); + EXPECT_EQ(queue.size(), 0lu); + EXPECT_TRUE(queue.IsEmpty(0)); +} + +TEST(MultiHeadQueueTest, SingleHead) { + MultiHeadQueue queue = MultiHeadQueue(1); + for (size_t i = 0; i < 10; ++i) { + queue.PushBack(i); + EXPECT_EQ(queue.size(), i + 1); + } + for (size_t i = 0; i < 10; ++i) { + absl::optional value = queue.PopFront(0); + EXPECT_EQ(queue.size(), 10 - i - 1); + ASSERT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), i); + } +} + +TEST(MultiHeadQueueTest, ThreeHeadsAddAllRemoveAllPerHead) { + MultiHeadQueue queue = MultiHeadQueue(3); + for (size_t i = 0; i < 10; ++i) { + queue.PushBack(i); + EXPECT_EQ(queue.size(), i + 1); + } + for (size_t i = 0; i < 10; ++i) { + absl::optional value = queue.PopFront(0); + EXPECT_EQ(queue.size(), 10lu); + ASSERT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), i); + } + for (size_t i = 0; i < 10; ++i) { + absl::optional value = queue.PopFront(1); + EXPECT_EQ(queue.size(), 10lu); + ASSERT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), i); + } + for (size_t i = 0; i < 10; ++i) { + absl::optional value = queue.PopFront(2); + EXPECT_EQ(queue.size(), 10 - i - 1); + ASSERT_TRUE(value.has_value()); + EXPECT_EQ(value.value(), i); + } +} + +TEST(MultiHeadQueueTest, ThreeHeadsAddAllRemoveAll) { + MultiHeadQueue queue = MultiHeadQueue(3); + for (size_t i = 0; i < 10; ++i) { + queue.PushBack(i); + EXPECT_EQ(queue.size(), i + 1); + } + for (size_t i = 0; i < 10; ++i) { + absl::optional value1 = queue.PopFront(0); + absl::optional value2 = queue.PopFront(1); + absl::optional value3 = queue.PopFront(2); + EXPECT_EQ(queue.size(), 10 - i - 1); + ASSERT_TRUE(value1.has_value()); + ASSERT_TRUE(value2.has_value()); + ASSERT_TRUE(value3.has_value()); + EXPECT_EQ(value1.value(), i); + EXPECT_EQ(value2.value(), i); + EXPECT_EQ(value3.value(), i); + } +} + +TEST(MultiHeadQueueTest, HeadCopy) { + MultiHeadQueue queue = MultiHeadQueue(1); + for (size_t i = 0; i < 10; ++i) { + queue.PushBack(i); + EXPECT_EQ(queue.size(), i + 1); + } + queue.AddHead(0); + EXPECT_EQ(queue.readers_count(), 2u); + for (size_t i = 0; i < 10; ++i) { + absl::optional value1 = queue.PopFront(0); + absl::optional value2 = queue.PopFront(1); + EXPECT_EQ(queue.size(), 10 - i - 1); + ASSERT_TRUE(value1.has_value()); + ASSERT_TRUE(value2.has_value()); + EXPECT_EQ(value1.value(), i); + EXPECT_EQ(value2.value(), i); + } +} + +} // namespace +} // namespace webrtc_pc_e2e +} // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.cc b/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.cc index 228ab8ac02..68b76cd37d 100644 --- a/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.cc +++ b/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.cc @@ -15,6 +15,7 @@ #include #include +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/video/i420_buffer.h" #include "modules/video_coding/include/video_error_codes.h" @@ -25,11 +26,11 @@ namespace webrtc { namespace webrtc_pc_e2e { QualityAnalyzingVideoDecoder::QualityAnalyzingVideoDecoder( - int id, + absl::string_view peer_name, std::unique_ptr delegate, EncodedImageDataExtractor* extractor, VideoQualityAnalyzerInterface* analyzer) - : id_(id), + : peer_name_(peer_name), implementation_name_("AnalyzingDecoder-" + std::string(delegate->ImplementationName())), delegate_(std::move(delegate)), @@ -53,7 +54,7 @@ int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image, // owner of original buffer will be responsible for deleting it, or extractor // can create a new buffer. In such case extractor will be responsible for // deleting it. - EncodedImageExtractionResult out = extractor_->ExtractData(input_image, id_); + EncodedImageExtractionResult out = extractor_->ExtractData(input_image); if (out.discard) { // To partly emulate behavior of Selective Forwarding Unit (SFU) in the @@ -76,7 +77,7 @@ int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image, EncodedImage* origin_image; { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); // Store id to be able to retrieve it in analyzing callback. timestamp_to_frame_id_.insert({input_image.Timestamp(), out.id}); // Store encoded image to prevent its destruction while it is used in @@ -87,17 +88,17 @@ int32_t QualityAnalyzingVideoDecoder::Decode(const EncodedImage& input_image, // We can safely dereference |origin_image|, because it can be removed from // the map only after |delegate_| Decode method will be invoked. Image will be // removed inside DecodedImageCallback, which can be done on separate thread. - analyzer_->OnFramePreDecode(out.id, *origin_image); + analyzer_->OnFramePreDecode(peer_name_, out.id, *origin_image); int32_t result = delegate_->Decode(*origin_image, missing_frames, render_time_ms); if (result != WEBRTC_VIDEO_CODEC_OK) { // If delegate decoder failed, then cleanup data for this image. { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); timestamp_to_frame_id_.erase(input_image.Timestamp()); decoding_images_.erase(out.id); } - analyzer_->OnDecoderError(out.id, result); + analyzer_->OnDecoderError(peer_name_, out.id, result); } return result; } @@ -113,15 +114,17 @@ int32_t QualityAnalyzingVideoDecoder::Release() { // frames, so we don't take a lock to prevent deadlock. int32_t result = delegate_->Release(); - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); analyzing_callback_->SetDelegateCallback(nullptr); timestamp_to_frame_id_.clear(); decoding_images_.clear(); return result; } -bool QualityAnalyzingVideoDecoder::PrefersLateDecoding() const { - return delegate_->PrefersLateDecoding(); +VideoDecoder::DecoderInfo QualityAnalyzingVideoDecoder::GetDecoderInfo() const { + DecoderInfo info = delegate_->GetDecoderInfo(); + info.implementation_name = implementation_name_; + return info; } const char* QualityAnalyzingVideoDecoder::ImplementationName() const { @@ -135,7 +138,7 @@ QualityAnalyzingVideoDecoder::DecoderCallback::~DecoderCallback() = default; void QualityAnalyzingVideoDecoder::DecoderCallback::SetDelegateCallback( DecodedImageCallback* delegate) { - rtc::CritScope crit(&callback_lock_); + MutexLock lock(&callback_lock_); delegate_callback_ = delegate; } @@ -147,7 +150,7 @@ int32_t QualityAnalyzingVideoDecoder::DecoderCallback::Decoded( decoder_->OnFrameDecoded(&decodedImage, /*decode_time_ms=*/absl::nullopt, /*qp=*/absl::nullopt); - rtc::CritScope crit(&callback_lock_); + MutexLock lock(&callback_lock_); RTC_DCHECK(delegate_callback_); return delegate_callback_->Decoded(decodedImage); } @@ -157,7 +160,7 @@ int32_t QualityAnalyzingVideoDecoder::DecoderCallback::Decoded( int64_t decode_time_ms) { decoder_->OnFrameDecoded(&decodedImage, decode_time_ms, /*qp=*/absl::nullopt); - rtc::CritScope crit(&callback_lock_); + MutexLock lock(&callback_lock_); RTC_DCHECK(delegate_callback_); return delegate_callback_->Decoded(decodedImage, decode_time_ms); } @@ -168,7 +171,7 @@ void QualityAnalyzingVideoDecoder::DecoderCallback::Decoded( absl::optional qp) { decoder_->OnFrameDecoded(&decodedImage, decode_time_ms, qp); - rtc::CritScope crit(&callback_lock_); + MutexLock lock(&callback_lock_); RTC_DCHECK(delegate_callback_); delegate_callback_->Decoded(decodedImage, decode_time_ms, qp); } @@ -183,7 +186,7 @@ QualityAnalyzingVideoDecoder::DecoderCallback::IrrelevantSimulcastStreamDecoded( .set_timestamp_rtp(timestamp_ms) .set_id(frame_id) .build(); - rtc::CritScope crit(&callback_lock_); + MutexLock lock(&callback_lock_); RTC_DCHECK(delegate_callback_); delegate_callback_->Decoded(dummy_frame, absl::nullopt, absl::nullopt); return WEBRTC_VIDEO_CODEC_OK; @@ -204,7 +207,7 @@ void QualityAnalyzingVideoDecoder::OnFrameDecoded( absl::optional qp) { uint16_t frame_id; { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); auto it = timestamp_to_frame_id_.find(frame->timestamp()); if (it == timestamp_to_frame_id_.end()) { // Ensure, that we have info about this frame. It can happen that for some @@ -224,16 +227,16 @@ void QualityAnalyzingVideoDecoder::OnFrameDecoded( frame->set_id(frame_id); VideoQualityAnalyzerInterface::DecoderStats stats; stats.decode_time_ms = decode_time_ms; - analyzer_->OnFrameDecoded(*frame, stats); + analyzer_->OnFrameDecoded(peer_name_, *frame, stats); } QualityAnalyzingVideoDecoderFactory::QualityAnalyzingVideoDecoderFactory( + absl::string_view peer_name, std::unique_ptr delegate, - IdGenerator* id_generator, EncodedImageDataExtractor* extractor, VideoQualityAnalyzerInterface* analyzer) - : delegate_(std::move(delegate)), - id_generator_(id_generator), + : peer_name_(peer_name), + delegate_(std::move(delegate)), extractor_(extractor), analyzer_(analyzer) {} QualityAnalyzingVideoDecoderFactory::~QualityAnalyzingVideoDecoderFactory() = @@ -249,17 +252,7 @@ QualityAnalyzingVideoDecoderFactory::CreateVideoDecoder( const SdpVideoFormat& format) { std::unique_ptr decoder = delegate_->CreateVideoDecoder(format); return std::make_unique( - id_generator_->GetNextId(), std::move(decoder), extractor_, analyzer_); -} - -std::unique_ptr -QualityAnalyzingVideoDecoderFactory::LegacyCreateVideoDecoder( - const SdpVideoFormat& format, - const std::string& receive_stream_id) { - std::unique_ptr decoder = - delegate_->LegacyCreateVideoDecoder(format, receive_stream_id); - return std::make_unique( - id_generator_->GetNextId(), std::move(decoder), extractor_, analyzer_); + peer_name_, std::move(decoder), extractor_, analyzer_); } } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.h b/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.h index 5cbc882226..e150c91cb4 100644 --- a/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.h +++ b/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.h @@ -16,15 +16,15 @@ #include #include +#include "absl/strings/string_view.h" #include "api/test/video_quality_analyzer_interface.h" #include "api/video/encoded_image.h" #include "api/video/video_frame.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_decoder.h" #include "api/video_codecs/video_decoder_factory.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "test/pc/e2e/analyzer/video/encoded_image_data_injector.h" -#include "test/pc/e2e/analyzer/video/id_generator.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -45,14 +45,11 @@ namespace webrtc_pc_e2e { // callback, where video analyzer will be called again and then decoded frame // will be passed to origin callback, provided by user. // -// Quality decoder registers its own callback in origin decoder at the same -// time, when user registers his callback in quality decoder. +// Quality decoder registers its own callback in origin decoder, at the same +// time the user registers their callback in quality decoder. class QualityAnalyzingVideoDecoder : public VideoDecoder { public: - // Creates analyzing decoder. |id| is unique coding entity id, that will - // be used to distinguish all encoders and decoders inside - // EncodedImageDataInjector and EncodedImageIdExtracor. - QualityAnalyzingVideoDecoder(int id, + QualityAnalyzingVideoDecoder(absl::string_view peer_name, std::unique_ptr delegate, EncodedImageDataExtractor* extractor, VideoQualityAnalyzerInterface* analyzer); @@ -67,7 +64,7 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder { int32_t RegisterDecodeCompleteCallback( DecodedImageCallback* callback) override; int32_t Release() override; - bool PrefersLateDecoding() const override; + DecoderInfo GetDecoderInfo() const override; const char* ImplementationName() const override; private: @@ -95,7 +92,7 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder { rtc::scoped_refptr dummy_frame_buffer_; - rtc::CriticalSection callback_lock_; + Mutex callback_lock_; DecodedImageCallback* delegate_callback_ RTC_GUARDED_BY(callback_lock_); }; @@ -103,7 +100,7 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder { absl::optional decode_time_ms, absl::optional qp); - const int id_; + const std::string peer_name_; const std::string implementation_name_; std::unique_ptr delegate_; EncodedImageDataExtractor* const extractor_; @@ -113,7 +110,7 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder { // VideoDecoder interface assumes async delivery of decoded video frames. // This lock is used to protect shared state, that have to be propagated // from received EncodedImage to resulted VideoFrame. - rtc::CriticalSection lock_; + Mutex lock_; std::map timestamp_to_frame_id_ RTC_GUARDED_BY(lock_); // Stores currently being decoded images by frame id. Because @@ -129,8 +126,8 @@ class QualityAnalyzingVideoDecoder : public VideoDecoder { class QualityAnalyzingVideoDecoderFactory : public VideoDecoderFactory { public: QualityAnalyzingVideoDecoderFactory( + absl::string_view peer_name, std::unique_ptr delegate, - IdGenerator* id_generator, EncodedImageDataExtractor* extractor, VideoQualityAnalyzerInterface* analyzer); ~QualityAnalyzingVideoDecoderFactory() override; @@ -139,13 +136,10 @@ class QualityAnalyzingVideoDecoderFactory : public VideoDecoderFactory { std::vector GetSupportedFormats() const override; std::unique_ptr CreateVideoDecoder( const SdpVideoFormat& format) override; - std::unique_ptr LegacyCreateVideoDecoder( - const SdpVideoFormat& format, - const std::string& receive_stream_id) override; private: + const std::string peer_name_; std::unique_ptr delegate_; - IdGenerator* const id_generator_; EncodedImageDataExtractor* const extractor_; VideoQualityAnalyzerInterface* const analyzer_; }; diff --git a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc index 2e7b8f4152..5b8a571cd0 100644 --- a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc +++ b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.cc @@ -14,10 +14,10 @@ #include #include +#include "absl/strings/string_view.h" #include "api/video/video_codec_type.h" #include "api/video_codecs/video_encoder.h" #include "modules/video_coding/include/video_error_codes.h" -#include "rtc_base/critical_section.h" #include "rtc_base/logging.h" namespace webrtc { @@ -53,13 +53,13 @@ std::pair GetMinMaxBitratesBps(const VideoCodec& codec, } // namespace QualityAnalyzingVideoEncoder::QualityAnalyzingVideoEncoder( - int id, + absl::string_view peer_name, std::unique_ptr delegate, double bitrate_multiplier, std::map> stream_required_spatial_index, EncodedImageDataInjector* injector, VideoQualityAnalyzerInterface* analyzer) - : id_(id), + : peer_name_(peer_name), delegate_(std::move(delegate)), bitrate_multiplier_(bitrate_multiplier), stream_required_spatial_index_(std::move(stream_required_spatial_index)), @@ -77,7 +77,7 @@ void QualityAnalyzingVideoEncoder::SetFecControllerOverride( int32_t QualityAnalyzingVideoEncoder::InitEncode( const VideoCodec* codec_settings, const Settings& settings) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); codec_settings_ = *codec_settings; mode_ = SimulcastMode::kNormal; if (codec_settings->codecType == kVideoCodecVP9) { @@ -108,7 +108,7 @@ int32_t QualityAnalyzingVideoEncoder::RegisterEncodeCompleteCallback( EncodedImageCallback* callback) { // We need to get a lock here because delegate_callback can be hypothetically // accessed from different thread (encoder one) concurrently. - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); delegate_callback_ = callback; return delegate_->RegisterEncodeCompleteCallback(this); } @@ -118,7 +118,7 @@ int32_t QualityAnalyzingVideoEncoder::Release() { // frames, so we don't take a lock to prevent deadlock. int32_t result = delegate_->Release(); - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); delegate_callback_ = nullptr; return result; } @@ -127,19 +127,19 @@ int32_t QualityAnalyzingVideoEncoder::Encode( const VideoFrame& frame, const std::vector* frame_types) { { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); // Store id to be able to retrieve it in analyzing callback. timestamp_to_frame_id_list_.push_back({frame.timestamp(), frame.id()}); // If this list is growing, it means that we are not receiving new encoded // images from encoder. So it should be a bug in setup on in the encoder. RTC_DCHECK_LT(timestamp_to_frame_id_list_.size(), kMaxFrameInPipelineCount); } - analyzer_->OnFramePreEncode(frame); + analyzer_->OnFramePreEncode(peer_name_, frame); int32_t result = delegate_->Encode(frame, frame_types); if (result != WEBRTC_VIDEO_CODEC_OK) { // If origin encoder failed, then cleanup data for this frame. { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); // The timestamp-frame_id pair can be not the last one, so we need to // find it first and then remove. We will search from the end, because // usually it will be the last or close to the last one. @@ -152,7 +152,7 @@ int32_t QualityAnalyzingVideoEncoder::Encode( } } } - analyzer_->OnEncoderError(frame, result); + analyzer_->OnEncoderError(peer_name_, frame, result); } return result; } @@ -162,7 +162,7 @@ void QualityAnalyzingVideoEncoder::SetRates( RTC_DCHECK_GT(bitrate_multiplier_, 0.0); if (fabs(bitrate_multiplier_ - kNoMultiplier) < kEps) { { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); bitrate_allocation_ = parameters.bitrate; } return delegate_->SetRates(parameters); @@ -205,7 +205,7 @@ void QualityAnalyzingVideoEncoder::SetRates( RateControlParameters adjusted_params = parameters; adjusted_params.bitrate = multiplied_allocation; { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); bitrate_allocation_ = adjusted_params.bitrate; } return delegate_->SetRates(adjusted_params); @@ -230,13 +230,12 @@ VideoEncoder::EncoderInfo QualityAnalyzingVideoEncoder::GetEncoderInfo() const { // pair - remove the front pair and got to the step 1. EncodedImageCallback::Result QualityAnalyzingVideoEncoder::OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { + const CodecSpecificInfo* codec_specific_info) { uint16_t frame_id; bool discard = false; uint32_t target_encode_bitrate = 0; { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); std::pair timestamp_frame_id; while (!timestamp_to_frame_id_list_.empty()) { timestamp_frame_id = timestamp_to_frame_id_list_.front(); @@ -277,7 +276,7 @@ EncodedImageCallback::Result QualityAnalyzingVideoEncoder::OnEncodedImage( // not discarded layers have to be passed. VideoQualityAnalyzerInterface::EncoderStats stats; stats.target_encode_bitrate = target_encode_bitrate; - analyzer_->OnFrameEncoded(frame_id, encoded_image, stats); + analyzer_->OnFrameEncoded(peer_name_, frame_id, encoded_image, stats); } // Image data injector injects frame id and discard flag into provided @@ -286,19 +285,18 @@ EncodedImageCallback::Result QualityAnalyzingVideoEncoder::OnEncodedImage( // it) or b) a new buffer (in such case injector will be responsible for // deleting it). const EncodedImage& image = - injector_->InjectData(frame_id, discard, encoded_image, id_); + injector_->InjectData(frame_id, discard, encoded_image); { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); RTC_DCHECK(delegate_callback_); - return delegate_callback_->OnEncodedImage(image, codec_specific_info, - fragmentation); + return delegate_callback_->OnEncodedImage(image, codec_specific_info); } } void QualityAnalyzingVideoEncoder::OnDroppedFrame( EncodedImageCallback::DropReason reason) { - rtc::CritScope crit(&lock_); - analyzer_->OnFrameDropped(reason); + MutexLock lock(&lock_); + analyzer_->OnFrameDropped(peer_name_, reason); RTC_DCHECK(delegate_callback_); delegate_callback_->OnDroppedFrame(reason); } @@ -348,16 +346,16 @@ bool QualityAnalyzingVideoEncoder::ShouldDiscard( } QualityAnalyzingVideoEncoderFactory::QualityAnalyzingVideoEncoderFactory( + absl::string_view peer_name, std::unique_ptr delegate, double bitrate_multiplier, std::map> stream_required_spatial_index, - IdGenerator* id_generator, EncodedImageDataInjector* injector, VideoQualityAnalyzerInterface* analyzer) - : delegate_(std::move(delegate)), + : peer_name_(peer_name), + delegate_(std::move(delegate)), bitrate_multiplier_(bitrate_multiplier), stream_required_spatial_index_(std::move(stream_required_spatial_index)), - id_generator_(id_generator), injector_(injector), analyzer_(analyzer) {} QualityAnalyzingVideoEncoderFactory::~QualityAnalyzingVideoEncoderFactory() = @@ -378,9 +376,8 @@ std::unique_ptr QualityAnalyzingVideoEncoderFactory::CreateVideoEncoder( const SdpVideoFormat& format) { return std::make_unique( - id_generator_->GetNextId(), delegate_->CreateVideoEncoder(format), - bitrate_multiplier_, stream_required_spatial_index_, injector_, - analyzer_); + peer_name_, delegate_->CreateVideoEncoder(format), bitrate_multiplier_, + stream_required_spatial_index_, injector_, analyzer_); } } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h index 3307dc7325..2ba8bdcb38 100644 --- a/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h +++ b/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h @@ -16,15 +16,15 @@ #include #include +#include "absl/strings/string_view.h" #include "api/test/video_quality_analyzer_interface.h" #include "api/video/video_frame.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/video_encoder_factory.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "test/pc/e2e/analyzer/video/encoded_image_data_injector.h" -#include "test/pc/e2e/analyzer/video/id_generator.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -49,16 +49,13 @@ constexpr int kAnalyzeAnySpatialStream = -1; // injected into EncodedImage with passed EncodedImageDataInjector. Then new // EncodedImage will be passed to origin callback, provided by user. // -// Quality encoder registers its own callback in origin encoder at the same -// time, when user registers his callback in quality encoder. +// Quality encoder registers its own callback in origin encoder, at the same +// time the user registers their callback in quality encoder. class QualityAnalyzingVideoEncoder : public VideoEncoder, public EncodedImageCallback { public: - // Creates analyzing encoder. |id| is unique coding entity id, that will - // be used to distinguish all encoders and decoders inside - // EncodedImageDataInjector and EncodedImageIdExtracor. QualityAnalyzingVideoEncoder( - int id, + absl::string_view peer_name, std::unique_ptr delegate, double bitrate_multiplier, std::map> stream_required_spatial_index, @@ -82,8 +79,7 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder, // Methods of EncodedImageCallback interface. EncodedImageCallback::Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override; + const CodecSpecificInfo* codec_specific_info) override; void OnDroppedFrame(DropReason reason) override; private: @@ -138,7 +134,7 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder, bool ShouldDiscard(uint16_t frame_id, const EncodedImage& encoded_image) RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); - const int id_; + const std::string peer_name_; std::unique_ptr delegate_; const double bitrate_multiplier_; // Contains mapping from stream label to optional spatial index. @@ -154,7 +150,7 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder, // VideoEncoder interface assumes async delivery of encoded images. // This lock is used to protect shared state, that have to be propagated // from received VideoFrame to resulted EncodedImage. - rtc::CriticalSection lock_; + Mutex lock_; VideoCodec codec_settings_; SimulcastMode mode_ RTC_GUARDED_BY(lock_); @@ -170,10 +166,10 @@ class QualityAnalyzingVideoEncoder : public VideoEncoder, class QualityAnalyzingVideoEncoderFactory : public VideoEncoderFactory { public: QualityAnalyzingVideoEncoderFactory( + absl::string_view peer_name, std::unique_ptr delegate, double bitrate_multiplier, std::map> stream_required_spatial_index, - IdGenerator* id_generator, EncodedImageDataInjector* injector, VideoQualityAnalyzerInterface* analyzer); ~QualityAnalyzingVideoEncoderFactory() override; @@ -186,10 +182,10 @@ class QualityAnalyzingVideoEncoderFactory : public VideoEncoderFactory { const SdpVideoFormat& format) override; private: + const std::string peer_name_; std::unique_ptr delegate_; const double bitrate_multiplier_; std::map> stream_required_spatial_index_; - IdGenerator* const id_generator_; EncodedImageDataInjector* const injector_; VideoQualityAnalyzerInterface* const analyzer_; }; diff --git a/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.cc b/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.cc index ec0d26b780..d7ee0f41b9 100644 --- a/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.cc +++ b/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.cc @@ -19,13 +19,6 @@ namespace webrtc { namespace webrtc_pc_e2e { -namespace { - -// Number of bytes from the beginning of the EncodedImage buffer that will be -// used to store frame id and sub id. -constexpr size_t kUsedBufferSize = 3; - -} // namespace SingleProcessEncodedImageDataInjector::SingleProcessEncodedImageDataInjector() = default; @@ -35,60 +28,77 @@ SingleProcessEncodedImageDataInjector:: EncodedImage SingleProcessEncodedImageDataInjector::InjectData( uint16_t id, bool discard, - const EncodedImage& source, - int coding_entity_id) { - RTC_CHECK(source.size() >= kUsedBufferSize); + const EncodedImage& source) { + RTC_CHECK(source.size() >= ExtractionInfo::kUsedBufferSize); ExtractionInfo info; - info.length = source.size(); info.discard = discard; - size_t insertion_pos = source.size() - kUsedBufferSize; - memcpy(info.origin_data, &source.data()[insertion_pos], kUsedBufferSize); + size_t insertion_pos = source.size() - ExtractionInfo::kUsedBufferSize; + memcpy(info.origin_data, &source.data()[insertion_pos], + ExtractionInfo::kUsedBufferSize); { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); // Will create new one if missed. ExtractionInfoVector& ev = extraction_cache_[id]; info.sub_id = ev.next_sub_id++; ev.infos[info.sub_id] = info; } + auto buffer = EncodedImageBuffer::Create(source.data(), source.size()); + buffer->data()[insertion_pos] = id & 0x00ff; + buffer->data()[insertion_pos + 1] = (id & 0xff00) >> 8; + buffer->data()[insertion_pos + 2] = info.sub_id; + EncodedImage out = source; - out.data()[insertion_pos] = id & 0x00ff; - out.data()[insertion_pos + 1] = (id & 0xff00) >> 8; - out.data()[insertion_pos + 2] = info.sub_id; + out.SetEncodedData(buffer); return out; } +void SingleProcessEncodedImageDataInjector::AddParticipantInCall() { + MutexLock crit(&lock_); + expected_receivers_count_++; +} + EncodedImageExtractionResult SingleProcessEncodedImageDataInjector::ExtractData( - const EncodedImage& source, - int coding_entity_id) { + const EncodedImage& source) { + size_t size = source.size(); + auto buffer = EncodedImageBuffer::Create(source.data(), source.size()); EncodedImage out = source; + out.SetEncodedData(buffer); + + std::vector frame_sizes; + std::vector frame_sl_index; + size_t max_spatial_index = out.SpatialIndex().value_or(0); + for (size_t i = 0; i <= max_spatial_index; ++i) { + auto frame_size = source.SpatialLayerFrameSize(i); + if (frame_size.value_or(0)) { + frame_sl_index.push_back(i); + frame_sizes.push_back(frame_size.value()); + } + } + if (frame_sizes.empty()) { + frame_sizes.push_back(size); + } - // Both |source| and |out| image will share the same buffer for payload or - // out will have a copy for it, so we can operate on the |out| buffer only. - uint8_t* buffer = out.data(); - size_t size = out.size(); - - // |pos| is pointing to end of current encoded image. - size_t pos = size - 1; + size_t prev_frames_size = 0; absl::optional id = absl::nullopt; bool discard = true; std::vector extraction_infos; - // Go through whole buffer and find all related extraction infos in - // order from 1st encoded image to the last. - while (true) { - size_t insertion_pos = pos - kUsedBufferSize + 1; + for (size_t frame_size : frame_sizes) { + size_t insertion_pos = + prev_frames_size + frame_size - ExtractionInfo::kUsedBufferSize; // Extract frame id from first 2 bytes starting from insertion pos. - uint16_t next_id = buffer[insertion_pos] + (buffer[insertion_pos + 1] << 8); + uint16_t next_id = buffer->data()[insertion_pos] + + (buffer->data()[insertion_pos + 1] << 8); // Extract frame sub id from second 3 byte starting from insertion pos. - uint8_t sub_id = buffer[insertion_pos + 2]; + uint8_t sub_id = buffer->data()[insertion_pos + 2]; RTC_CHECK(!id || *id == next_id) << "Different frames encoded into single encoded image: " << *id << " vs " << next_id; id = next_id; ExtractionInfo info; { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); auto ext_vector_it = extraction_cache_.find(next_id); RTC_CHECK(ext_vector_it != extraction_cache_.end()) << "Unknown frame_id=" << next_id; @@ -96,44 +106,53 @@ EncodedImageExtractionResult SingleProcessEncodedImageDataInjector::ExtractData( auto info_it = ext_vector_it->second.infos.find(sub_id); RTC_CHECK(info_it != ext_vector_it->second.infos.end()) << "Unknown sub_id=" << sub_id << " for frame_id=" << next_id; + info_it->second.received_count++; info = info_it->second; - ext_vector_it->second.infos.erase(info_it); + if (info.received_count == expected_receivers_count_) { + ext_vector_it->second.infos.erase(info_it); + } } - extraction_infos.push_back(info); // We need to discard encoded image only if all concatenated encoded images // have to be discarded. discard = discard && info.discard; - if (pos < info.length) { - break; - } - pos -= info.length; + + extraction_infos.push_back(info); + prev_frames_size += frame_size; } RTC_CHECK(id); - std::reverse(extraction_infos.begin(), extraction_infos.end()); + if (discard) { out.set_size(0); + for (size_t i = 0; i <= max_spatial_index; ++i) { + out.SetSpatialLayerFrameSize(i, 0); + } return EncodedImageExtractionResult{*id, out, true}; } // Make a pass from begin to end to restore origin payload and erase discarded // encoded images. - pos = 0; - auto extraction_infos_it = extraction_infos.begin(); - while (pos < size) { - RTC_DCHECK(extraction_infos_it != extraction_infos.end()); - const ExtractionInfo& info = *extraction_infos_it; + size_t pos = 0; + for (size_t frame_index = 0; frame_index < frame_sizes.size(); + ++frame_index) { + RTC_CHECK(pos < size); + const size_t frame_size = frame_sizes[frame_index]; + const ExtractionInfo& info = extraction_infos[frame_index]; if (info.discard) { // If this encoded image is marked to be discarded - erase it's payload // from the buffer. - memmove(&buffer[pos], &buffer[pos + info.length], - size - pos - info.length); - size -= info.length; + memmove(&buffer->data()[pos], &buffer->data()[pos + frame_size], + size - pos - frame_size); + RTC_CHECK_LT(frame_index, frame_sl_index.size()) + << "codec doesn't support discard option or the image, that was " + "supposed to be discarded, is lost"; + out.SetSpatialLayerFrameSize(frame_sl_index[frame_index], 0); + size -= frame_size; } else { - memcpy(&buffer[pos + info.length - kUsedBufferSize], info.origin_data, - kUsedBufferSize); - pos += info.length; + memcpy( + &buffer->data()[pos + frame_size - ExtractionInfo::kUsedBufferSize], + info.origin_data, ExtractionInfo::kUsedBufferSize); + pos += frame_size; } - ++extraction_infos_it; } out.set_size(pos); diff --git a/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.h b/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.h index 3787cc51aa..03feb7997c 100644 --- a/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.h +++ b/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.h @@ -18,7 +18,7 @@ #include #include "api/video/encoded_image.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "test/pc/e2e/analyzer/video/encoded_image_data_injector.h" namespace webrtc { @@ -48,24 +48,31 @@ class SingleProcessEncodedImageDataInjector : public EncodedImageDataInjector, // changed. EncodedImage InjectData(uint16_t id, bool discard, - const EncodedImage& source, - int coding_entity_id) override; - EncodedImageExtractionResult ExtractData(const EncodedImage& source, - int coding_entity_id) override; + const EncodedImage& source) override; + + void Start(int expected_receivers_count) override { + MutexLock crit(&lock_); + expected_receivers_count_ = expected_receivers_count; + } + void AddParticipantInCall() override; + EncodedImageExtractionResult ExtractData(const EncodedImage& source) override; private: // Contains data required to extract frame id from EncodedImage and restore // original buffer. struct ExtractionInfo { + // Number of bytes from the beginning of the EncodedImage buffer that will + // be used to store frame id and sub id. + const static size_t kUsedBufferSize = 3; // Frame sub id to distinguish encoded images for different spatial layers. uint8_t sub_id; - // Length of the origin buffer encoded image. - size_t length; // Flag to show is this encoded images should be discarded by analyzing // decoder because of not required spatial layer/simulcast stream. bool discard; // Data from first 3 bytes of origin encoded image's payload. - uint8_t origin_data[3]; + uint8_t origin_data[ExtractionInfo::kUsedBufferSize]; + // Count of how many times this frame was received. + int received_count = 0; }; struct ExtractionInfoVector { @@ -77,7 +84,8 @@ class SingleProcessEncodedImageDataInjector : public EncodedImageDataInjector, std::map infos; }; - rtc::CriticalSection lock_; + Mutex lock_; + int expected_receivers_count_ RTC_GUARDED_BY(lock_); // Stores a mapping from frame id to extraction info for spatial layers // for this frame id. There can be a lot of them, because if frame was // dropped we can't clean it up, because we won't receive a signal on diff --git a/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector_unittest.cc b/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector_unittest.cc index 67cafa75a6..cfeab23562 100644 --- a/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector_unittest.cc +++ b/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector_unittest.cc @@ -20,115 +20,165 @@ namespace webrtc { namespace webrtc_pc_e2e { namespace { -rtc::Buffer CreateBufferOfSizeNFilledWithValuesFromX(size_t n, uint8_t x) { - rtc::Buffer buffer(n); +rtc::scoped_refptr +CreateEncodedImageBufferOfSizeNFilledWithValuesFromX(size_t n, uint8_t x) { + auto buffer = EncodedImageBuffer::Create(n); for (size_t i = 0; i < n; ++i) { - buffer[i] = static_cast(x + i); + buffer->data()[i] = static_cast(x + i); } return buffer; } -} // namespace +EncodedImage CreateEncodedImageOfSizeNFilledWithValuesFromX(size_t n, + uint8_t x) { + EncodedImage image; + image.SetEncodedData( + CreateEncodedImageBufferOfSizeNFilledWithValuesFromX(n, x)); + return image; +} -TEST(SingleProcessEncodedImageDataInjector, InjectExtractDiscardFalse) { +TEST(SingleProcessEncodedImageDataInjectorTest, InjectExtractDiscardFalse) { SingleProcessEncodedImageDataInjector injector; + injector.Start(1); - rtc::Buffer buffer = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - - EncodedImage source(buffer.data(), 10, 10); + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); source.SetTimestamp(123456789); EncodedImageExtractionResult out = - injector.ExtractData(injector.InjectData(512, false, source, 1), 2); + injector.ExtractData(injector.InjectData(512, false, source)); EXPECT_EQ(out.id, 512); EXPECT_FALSE(out.discard); EXPECT_EQ(out.image.size(), 10ul); - EXPECT_EQ(out.image.capacity(), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 0ul); for (int i = 0; i < 10; ++i) { EXPECT_EQ(out.image.data()[i], i + 1); } } -TEST(SingleProcessEncodedImageDataInjector, InjectExtractDiscardTrue) { +TEST(SingleProcessEncodedImageDataInjectorTest, InjectExtractDiscardTrue) { SingleProcessEncodedImageDataInjector injector; + injector.Start(1); - rtc::Buffer buffer = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - - EncodedImage source(buffer.data(), 10, 10); + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); source.SetTimestamp(123456789); EncodedImageExtractionResult out = - injector.ExtractData(injector.InjectData(512, true, source, 1), 2); + injector.ExtractData(injector.InjectData(512, true, source)); EXPECT_EQ(out.id, 512); EXPECT_TRUE(out.discard); EXPECT_EQ(out.image.size(), 0ul); - EXPECT_EQ(out.image.capacity(), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 0ul); +} + +TEST(SingleProcessEncodedImageDataInjectorTest, + InjectWithUnsetSpatialLayerSizes) { + SingleProcessEncodedImageDataInjector injector; + injector.Start(1); + + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); + source.SetTimestamp(123456789); + + EncodedImage intermediate = injector.InjectData(512, false, source); + intermediate.SetSpatialIndex(2); + + EncodedImageExtractionResult out = injector.ExtractData(intermediate); + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(out.image.data()[i], i + 1); + } + EXPECT_EQ(out.image.SpatialIndex().value_or(0), 2); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(out.image.SpatialLayerFrameSize(i).value_or(0), 0ul); + } } -TEST(SingleProcessEncodedImageDataInjector, Inject3Extract3) { +TEST(SingleProcessEncodedImageDataInjectorTest, + InjectWithZeroSpatialLayerSizes) { SingleProcessEncodedImageDataInjector injector; + injector.Start(1); + + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); + source.SetTimestamp(123456789); + + EncodedImage intermediate = injector.InjectData(512, false, source); + intermediate.SetSpatialIndex(2); + intermediate.SetSpatialLayerFrameSize(0, 0); + intermediate.SetSpatialLayerFrameSize(1, 0); + intermediate.SetSpatialLayerFrameSize(2, 0); + + EncodedImageExtractionResult out = injector.ExtractData(intermediate); + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(out.image.data()[i], i + 1); + } + EXPECT_EQ(out.image.SpatialIndex().value_or(0), 2); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(out.image.SpatialLayerFrameSize(i).value_or(0), 0ul); + } +} - rtc::Buffer buffer1 = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - rtc::Buffer buffer2 = CreateBufferOfSizeNFilledWithValuesFromX(10, 11); - rtc::Buffer buffer3 = CreateBufferOfSizeNFilledWithValuesFromX(10, 21); +TEST(SingleProcessEncodedImageDataInjectorTest, Inject3Extract3) { + SingleProcessEncodedImageDataInjector injector; + injector.Start(1); // 1st frame - EncodedImage source1(buffer1.data(), 10, 10); + EncodedImage source1 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); source1.SetTimestamp(123456710); // 2nd frame 1st spatial layer - EncodedImage source2(buffer2.data(), 10, 10); + EncodedImage source2 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 11); source2.SetTimestamp(123456720); // 2nd frame 2nd spatial layer - EncodedImage source3(buffer3.data(), 10, 10); + EncodedImage source3 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 21); source3.SetTimestamp(123456720); - EncodedImage intermediate1 = injector.InjectData(510, false, source1, 1); - EncodedImage intermediate2 = injector.InjectData(520, true, source2, 1); - EncodedImage intermediate3 = injector.InjectData(520, false, source3, 1); + EncodedImage intermediate1 = injector.InjectData(510, false, source1); + EncodedImage intermediate2 = injector.InjectData(520, true, source2); + EncodedImage intermediate3 = injector.InjectData(520, false, source3); // Extract ids in different order. - EncodedImageExtractionResult out3 = injector.ExtractData(intermediate3, 2); - EncodedImageExtractionResult out1 = injector.ExtractData(intermediate1, 2); - EncodedImageExtractionResult out2 = injector.ExtractData(intermediate2, 2); + EncodedImageExtractionResult out3 = injector.ExtractData(intermediate3); + EncodedImageExtractionResult out1 = injector.ExtractData(intermediate1); + EncodedImageExtractionResult out2 = injector.ExtractData(intermediate2); EXPECT_EQ(out1.id, 510); EXPECT_FALSE(out1.discard); EXPECT_EQ(out1.image.size(), 10ul); - EXPECT_EQ(out1.image.capacity(), 10ul); + EXPECT_EQ(out1.image.SpatialLayerFrameSize(0).value_or(0), 0ul); for (int i = 0; i < 10; ++i) { EXPECT_EQ(out1.image.data()[i], i + 1); } EXPECT_EQ(out2.id, 520); EXPECT_TRUE(out2.discard); EXPECT_EQ(out2.image.size(), 0ul); - EXPECT_EQ(out2.image.capacity(), 10ul); + EXPECT_EQ(out2.image.SpatialLayerFrameSize(0).value_or(0), 0ul); EXPECT_EQ(out3.id, 520); EXPECT_FALSE(out3.discard); EXPECT_EQ(out3.image.size(), 10ul); - EXPECT_EQ(out3.image.capacity(), 10ul); + EXPECT_EQ(out3.image.SpatialLayerFrameSize(0).value_or(0), 0ul); for (int i = 0; i < 10; ++i) { EXPECT_EQ(out3.image.data()[i], i + 21); } } -TEST(SingleProcessEncodedImageDataInjector, InjectExtractFromConcatenated) { +TEST(SingleProcessEncodedImageDataInjectorTest, InjectExtractFromConcatenated) { SingleProcessEncodedImageDataInjector injector; + injector.Start(1); - rtc::Buffer buffer1 = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - rtc::Buffer buffer2 = CreateBufferOfSizeNFilledWithValuesFromX(10, 11); - rtc::Buffer buffer3 = CreateBufferOfSizeNFilledWithValuesFromX(10, 21); - - EncodedImage source1(buffer1.data(), 10, 10); + EncodedImage source1 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); source1.SetTimestamp(123456710); - EncodedImage source2(buffer2.data(), 10, 10); + EncodedImage source2 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 11); source2.SetTimestamp(123456710); - EncodedImage source3(buffer3.data(), 10, 10); + EncodedImage source3 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 21); source3.SetTimestamp(123456710); // Inject id into 3 images with same frame id. - EncodedImage intermediate1 = injector.InjectData(512, false, source1, 1); - EncodedImage intermediate2 = injector.InjectData(512, true, source2, 1); - EncodedImage intermediate3 = injector.InjectData(512, false, source3, 1); + EncodedImage intermediate1 = injector.InjectData(512, false, source1); + EncodedImage intermediate2 = injector.InjectData(512, true, source2); + EncodedImage intermediate3 = injector.InjectData(512, false, source3); // Concatenate them into single encoded image, like it can be done in jitter // buffer. @@ -138,41 +188,46 @@ TEST(SingleProcessEncodedImageDataInjector, InjectExtractFromConcatenated) { concatenated_buffer.AppendData(intermediate1.data(), intermediate1.size()); concatenated_buffer.AppendData(intermediate2.data(), intermediate2.size()); concatenated_buffer.AppendData(intermediate3.data(), intermediate3.size()); - EncodedImage concatenated(concatenated_buffer.data(), concatenated_length, - concatenated_length); + EncodedImage concatenated; + concatenated.SetEncodedData(EncodedImageBuffer::Create( + concatenated_buffer.data(), concatenated_length)); + concatenated.SetSpatialIndex(2); + concatenated.SetSpatialLayerFrameSize(0, intermediate1.size()); + concatenated.SetSpatialLayerFrameSize(1, intermediate2.size()); + concatenated.SetSpatialLayerFrameSize(2, intermediate3.size()); // Extract frame id from concatenated image - EncodedImageExtractionResult out = injector.ExtractData(concatenated, 2); + EncodedImageExtractionResult out = injector.ExtractData(concatenated); EXPECT_EQ(out.id, 512); EXPECT_FALSE(out.discard); EXPECT_EQ(out.image.size(), 2 * 10ul); - EXPECT_EQ(out.image.capacity(), 3 * 10ul); for (int i = 0; i < 10; ++i) { EXPECT_EQ(out.image.data()[i], i + 1); EXPECT_EQ(out.image.data()[i + 10], i + 21); } + EXPECT_EQ(out.image.SpatialIndex().value_or(0), 2); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(1).value_or(0), 0ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(2).value_or(0), 10ul); } TEST(SingleProcessEncodedImageDataInjector, InjectExtractFromConcatenatedAllDiscarded) { SingleProcessEncodedImageDataInjector injector; + injector.Start(1); - rtc::Buffer buffer1 = CreateBufferOfSizeNFilledWithValuesFromX(10, 1); - rtc::Buffer buffer2 = CreateBufferOfSizeNFilledWithValuesFromX(10, 11); - rtc::Buffer buffer3 = CreateBufferOfSizeNFilledWithValuesFromX(10, 21); - - EncodedImage source1(buffer1.data(), 10, 10); + EncodedImage source1 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); source1.SetTimestamp(123456710); - EncodedImage source2(buffer2.data(), 10, 10); + EncodedImage source2 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 11); source2.SetTimestamp(123456710); - EncodedImage source3(buffer3.data(), 10, 10); + EncodedImage source3 = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 21); source3.SetTimestamp(123456710); // Inject id into 3 images with same frame id. - EncodedImage intermediate1 = injector.InjectData(512, true, source1, 1); - EncodedImage intermediate2 = injector.InjectData(512, true, source2, 1); - EncodedImage intermediate3 = injector.InjectData(512, true, source3, 1); + EncodedImage intermediate1 = injector.InjectData(512, true, source1); + EncodedImage intermediate2 = injector.InjectData(512, true, source2); + EncodedImage intermediate3 = injector.InjectData(512, true, source3); // Concatenate them into single encoded image, like it can be done in jitter // buffer. @@ -182,17 +237,125 @@ TEST(SingleProcessEncodedImageDataInjector, concatenated_buffer.AppendData(intermediate1.data(), intermediate1.size()); concatenated_buffer.AppendData(intermediate2.data(), intermediate2.size()); concatenated_buffer.AppendData(intermediate3.data(), intermediate3.size()); - EncodedImage concatenated(concatenated_buffer.data(), concatenated_length, - concatenated_length); + EncodedImage concatenated; + concatenated.SetEncodedData(EncodedImageBuffer::Create( + concatenated_buffer.data(), concatenated_length)); + concatenated.SetSpatialIndex(2); + concatenated.SetSpatialLayerFrameSize(0, intermediate1.size()); + concatenated.SetSpatialLayerFrameSize(1, intermediate2.size()); + concatenated.SetSpatialLayerFrameSize(2, intermediate3.size()); // Extract frame id from concatenated image - EncodedImageExtractionResult out = injector.ExtractData(concatenated, 2); + EncodedImageExtractionResult out = injector.ExtractData(concatenated); EXPECT_EQ(out.id, 512); EXPECT_TRUE(out.discard); EXPECT_EQ(out.image.size(), 0ul); - EXPECT_EQ(out.image.capacity(), 3 * 10ul); + EXPECT_EQ(out.image.SpatialIndex().value_or(0), 2); + for (int i = 0; i < 3; ++i) { + EXPECT_EQ(out.image.SpatialLayerFrameSize(i).value_or(0), 0ul); + } +} + +TEST(SingleProcessEncodedImageDataInjectorTest, InjectOnceExtractTwice) { + SingleProcessEncodedImageDataInjector injector; + injector.Start(2); + + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); + source.SetTimestamp(123456789); + + EncodedImageExtractionResult out = injector.ExtractData( + injector.InjectData(/*id=*/512, /*discard=*/false, source)); + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 0ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(out.image.data()[i], i + 1); + } + out = injector.ExtractData( + injector.InjectData(/*id=*/512, /*discard=*/false, source)); + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 0ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(out.image.data()[i], i + 1); + } +} + +TEST(SingleProcessEncodedImageDataInjectorTest, Add1stReceiverAfterStart) { + SingleProcessEncodedImageDataInjector injector; + injector.Start(0); + + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); + source.SetTimestamp(123456789); + EncodedImage modified_image = injector.InjectData( + /*id=*/512, /*discard=*/false, source); + + injector.AddParticipantInCall(); + EncodedImageExtractionResult out = injector.ExtractData(modified_image); + + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 0ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(out.image.data()[i], i + 1); + } +} + +TEST(SingleProcessEncodedImageDataInjectorTest, Add3rdReceiverAfterStart) { + SingleProcessEncodedImageDataInjector injector; + injector.Start(2); + + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); + source.SetTimestamp(123456789); + EncodedImage modified_image = injector.InjectData( + /*id=*/512, /*discard=*/false, source); + injector.ExtractData(modified_image); + + injector.AddParticipantInCall(); + injector.ExtractData(modified_image); + EncodedImageExtractionResult out = injector.ExtractData(modified_image); + + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + EXPECT_EQ(out.image.SpatialLayerFrameSize(0).value_or(0), 0ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(out.image.data()[i], i + 1); + } +} + +// Death tests. +// Disabled on Android because death tests misbehave on Android, see +// base/test/gtest_util.h. +#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) +EncodedImage DeepCopyEncodedImage(const EncodedImage& source) { + EncodedImage copy = source; + copy.SetEncodedData(EncodedImageBuffer::Create(source.data(), source.size())); + return copy; } +TEST(SingleProcessEncodedImageDataInjectorTest, + InjectOnceExtractMoreThenExpected) { + SingleProcessEncodedImageDataInjector injector; + injector.Start(2); + + EncodedImage source = CreateEncodedImageOfSizeNFilledWithValuesFromX(10, 1); + source.SetTimestamp(123456789); + + EncodedImage modified = + injector.InjectData(/*id=*/512, /*discard=*/false, source); + + injector.ExtractData(DeepCopyEncodedImage(modified)); + injector.ExtractData(DeepCopyEncodedImage(modified)); + EXPECT_DEATH(injector.ExtractData(DeepCopyEncodedImage(modified)), + "Unknown sub_id=0 for frame_id=512"); +} +#endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) + +} // namespace } // namespace webrtc_pc_e2e } // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.cc b/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.cc new file mode 100644 index 0000000000..e149e3f250 --- /dev/null +++ b/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.cc @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.h" + +#include "absl/memory/memory.h" +#include "api/video/encoded_image.h" +#include "rtc_base/checks.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +EncodedImage VideoFrameTrackingIdInjector::InjectData( + uint16_t id, + bool unused_discard, + const EncodedImage& source) { + RTC_CHECK(!unused_discard); + EncodedImage out = source; + out.SetVideoFrameTrackingId(id); + return out; +} + +EncodedImageExtractionResult VideoFrameTrackingIdInjector::ExtractData( + const EncodedImage& source) { + return EncodedImageExtractionResult{source.VideoFrameTrackingId().value_or(0), + source, /*discard=*/false}; +} + +} // namespace webrtc_pc_e2e +} // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.h b/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.h new file mode 100644 index 0000000000..aac7c3726a --- /dev/null +++ b/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_PC_E2E_ANALYZER_VIDEO_VIDEO_FRAME_TRACKING_ID_INJECTOR_H_ +#define TEST_PC_E2E_ANALYZER_VIDEO_VIDEO_FRAME_TRACKING_ID_INJECTOR_H_ + +#include + +#include "api/video/encoded_image.h" +#include "test/pc/e2e/analyzer/video/encoded_image_data_injector.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +// This injector sets and retrieves the provided id in the EncodedImage +// video_frame_tracking_id field. This is only possible with the RTP header +// extension VideoFrameTrackingIdExtension that will propagate the input +// tracking id to the received EncodedImage. This RTP header extension is +// enabled with the field trial WebRTC-VideoFrameTrackingIdAdvertised +// (http://www.webrtc.org/experiments/rtp-hdrext/video-frame-tracking-id). +// +// Note that this injector doesn't allow to discard frames. +class VideoFrameTrackingIdInjector : public EncodedImageDataInjector, + public EncodedImageDataExtractor { + public: + EncodedImage InjectData(uint16_t id, + bool unused_discard, + const EncodedImage& source) override; + + EncodedImageExtractionResult ExtractData(const EncodedImage& source) override; + + void Start(int) override {} + void AddParticipantInCall() override {} +}; + +} // namespace webrtc_pc_e2e +} // namespace webrtc + +#endif // TEST_PC_E2E_ANALYZER_VIDEO_VIDEO_FRAME_TRACKING_ID_INJECTOR_H_ diff --git a/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector_unittest.cc b/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector_unittest.cc new file mode 100644 index 0000000000..af85b2283f --- /dev/null +++ b/test/pc/e2e/analyzer/video/video_frame_tracking_id_injector_unittest.cc @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/pc/e2e/analyzer/video/video_frame_tracking_id_injector.h" + +#include "api/video/encoded_image.h" +#include "rtc_base/buffer.h" +#include "test/gtest.h" + +namespace webrtc { +namespace webrtc_pc_e2e { +namespace { + +EncodedImage CreateEncodedImageOfSizeN(size_t n) { + EncodedImage image; + rtc::scoped_refptr buffer = EncodedImageBuffer::Create(n); + for (size_t i = 0; i < n; ++i) { + buffer->data()[i] = static_cast(i); + } + image.SetEncodedData(buffer); + return image; +} + +TEST(VideoFrameTrackingIdInjectorTest, InjectExtractDiscardFalse) { + VideoFrameTrackingIdInjector injector; + EncodedImage source = CreateEncodedImageOfSizeN(10); + EncodedImageExtractionResult out = + injector.ExtractData(injector.InjectData(512, false, source)); + + EXPECT_EQ(out.id, 512); + EXPECT_FALSE(out.discard); + EXPECT_EQ(out.image.size(), 10ul); + for (int i = 0; i < 10; ++i) { + EXPECT_EQ(source.data()[i], out.image.data()[i]); + } +} + +#if GTEST_HAS_DEATH_TEST +TEST(VideoFrameTrackingIdInjectorTest, InjectExtractDiscardTrue) { + VideoFrameTrackingIdInjector injector; + EncodedImage source = CreateEncodedImageOfSizeN(10); + + EXPECT_DEATH(injector.InjectData(512, true, source), ""); +} +#endif // GTEST_HAS_DEATH_TEST + +} // namespace +} // namespace webrtc_pc_e2e +} // namespace webrtc diff --git a/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.cc b/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.cc index 074188439b..b1a22209be 100644 --- a/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.cc +++ b/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.cc @@ -14,6 +14,8 @@ #include #include "absl/memory/memory.h" +#include "absl/strings/string_view.h" +#include "api/array_view.h" #include "test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.h" #include "test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h" #include "test/pc/e2e/analyzer/video/simulcast_dummy_buffer_helper.h" @@ -26,27 +28,35 @@ namespace { class VideoWriter final : public rtc::VideoSinkInterface { public: - VideoWriter(test::VideoFrameWriter* video_writer) - : video_writer_(video_writer) {} + VideoWriter(test::VideoFrameWriter* video_writer, int sampling_modulo) + : video_writer_(video_writer), sampling_modulo_(sampling_modulo) {} ~VideoWriter() override = default; void OnFrame(const VideoFrame& frame) override { + if (frames_counter_++ % sampling_modulo_ != 0) { + return; + } bool result = video_writer_->WriteFrame(frame); RTC_CHECK(result) << "Failed to write frame"; } private: - test::VideoFrameWriter* video_writer_; + test::VideoFrameWriter* const video_writer_; + const int sampling_modulo_; + + int64_t frames_counter_ = 0; }; class AnalyzingFramePreprocessor : public test::TestVideoCapturer::FramePreprocessor { public: AnalyzingFramePreprocessor( - std::string stream_label, + absl::string_view peer_name, + absl::string_view stream_label, VideoQualityAnalyzerInterface* analyzer, std::vector>> sinks) - : stream_label_(std::move(stream_label)), + : peer_name_(peer_name), + stream_label_(stream_label), analyzer_(analyzer), sinks_(std::move(sinks)) {} ~AnalyzingFramePreprocessor() override = default; @@ -54,7 +64,8 @@ class AnalyzingFramePreprocessor VideoFrame Preprocess(const VideoFrame& source_frame) override { // Copy VideoFrame to be able to set id on it. VideoFrame frame = source_frame; - uint16_t frame_id = analyzer_->OnFrameCaptured(stream_label_, frame); + uint16_t frame_id = + analyzer_->OnFrameCaptured(peer_name_, stream_label_, frame); frame.set_id(frame_id); for (auto& sink : sinks_) { @@ -64,6 +75,7 @@ class AnalyzingFramePreprocessor } private: + const std::string peer_name_; const std::string stream_label_; VideoQualityAnalyzerInterface* const analyzer_; const std::vector>> @@ -78,8 +90,7 @@ VideoQualityAnalyzerInjectionHelper::VideoQualityAnalyzerInjectionHelper( EncodedImageDataExtractor* extractor) : analyzer_(std::move(analyzer)), injector_(injector), - extractor_(extractor), - encoding_entities_id_generator_(std::make_unique(1)) { + extractor_(extractor) { RTC_DCHECK(injector_); RTC_DCHECK(extractor_); } @@ -88,32 +99,34 @@ VideoQualityAnalyzerInjectionHelper::~VideoQualityAnalyzerInjectionHelper() = std::unique_ptr VideoQualityAnalyzerInjectionHelper::WrapVideoEncoderFactory( + absl::string_view peer_name, std::unique_ptr delegate, double bitrate_multiplier, std::map> stream_required_spatial_index) const { return std::make_unique( - std::move(delegate), bitrate_multiplier, - std::move(stream_required_spatial_index), - encoding_entities_id_generator_.get(), injector_, analyzer_.get()); + peer_name, std::move(delegate), bitrate_multiplier, + std::move(stream_required_spatial_index), injector_, analyzer_.get()); } std::unique_ptr VideoQualityAnalyzerInjectionHelper::WrapVideoDecoderFactory( + absl::string_view peer_name, std::unique_ptr delegate) const { return std::make_unique( - std::move(delegate), encoding_entities_id_generator_.get(), extractor_, - analyzer_.get()); + peer_name, std::move(delegate), extractor_, analyzer_.get()); } std::unique_ptr VideoQualityAnalyzerInjectionHelper::CreateFramePreprocessor( + absl::string_view peer_name, const VideoConfig& config) { std::vector>> sinks; test::VideoFrameWriter* writer = MaybeCreateVideoWriter(config.input_dump_file_name, config); if (writer) { - sinks.push_back(std::make_unique(writer)); + sinks.push_back(std::make_unique( + writer, config.input_dump_sampling_modulo)); } if (config.show_on_screen) { sinks.push_back(absl::WrapUnique( @@ -121,27 +134,32 @@ VideoQualityAnalyzerInjectionHelper::CreateFramePreprocessor( config.width, config.height))); } { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); known_video_configs_.insert({*config.stream_label, config}); } return std::make_unique( - std::move(*config.stream_label), analyzer_.get(), std::move(sinks)); + peer_name, std::move(*config.stream_label), analyzer_.get(), + std::move(sinks)); } std::unique_ptr> -VideoQualityAnalyzerInjectionHelper::CreateVideoSink() { - return std::make_unique(this); +VideoQualityAnalyzerInjectionHelper::CreateVideoSink( + absl::string_view peer_name) { + return std::make_unique(peer_name, this); } -void VideoQualityAnalyzerInjectionHelper::Start(std::string test_case_name, - int max_threads_count) { - analyzer_->Start(std::move(test_case_name), max_threads_count); +void VideoQualityAnalyzerInjectionHelper::Start( + std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count) { + analyzer_->Start(std::move(test_case_name), peer_names, max_threads_count); + extractor_->Start(peer_names.size()); } void VideoQualityAnalyzerInjectionHelper::OnStatsReports( - const std::string& pc_label, - const StatsReports& stats_reports) { - analyzer_->OnStatsReports(pc_label, stats_reports); + absl::string_view pc_label, + const rtc::scoped_refptr& report) { + analyzer_->OnStatsReports(pc_label, report); } void VideoQualityAnalyzerInjectionHelper::Stop() { @@ -169,12 +187,20 @@ VideoQualityAnalyzerInjectionHelper::MaybeCreateVideoWriter( return out; } -void VideoQualityAnalyzerInjectionHelper::OnFrame(const VideoFrame& frame) { - if (IsDummyFrameBuffer(frame.video_frame_buffer()->ToI420())) { +void VideoQualityAnalyzerInjectionHelper::OnFrame(absl::string_view peer_name, + const VideoFrame& frame) { + rtc::scoped_refptr i420_buffer = + frame.video_frame_buffer()->ToI420(); + if (IsDummyFrameBuffer(i420_buffer)) { // This is dummy frame, so we don't need to process it further. return; } - analyzer_->OnFrameRendered(frame); + // Copy entire video frame including video buffer to ensure that analyzer + // won't hold any WebRTC internal buffers. + VideoFrame frame_copy = frame; + frame_copy.set_video_frame_buffer(I420Buffer::Copy(*i420_buffer)); + analyzer_->OnFrameRendered(peer_name, frame_copy); + std::string stream_label = analyzer_->GetStreamLabel(frame.id()); std::vector>>* sinks = PopulateSinks(stream_label); @@ -189,7 +215,7 @@ void VideoQualityAnalyzerInjectionHelper::OnFrame(const VideoFrame& frame) { std::vector>>* VideoQualityAnalyzerInjectionHelper::PopulateSinks( const std::string& stream_label) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); auto sinks_it = sinks_.find(stream_label); if (sinks_it != sinks_.end()) { return &sinks_it->second; @@ -203,7 +229,8 @@ VideoQualityAnalyzerInjectionHelper::PopulateSinks( test::VideoFrameWriter* writer = MaybeCreateVideoWriter(config.output_dump_file_name, config); if (writer) { - sinks.push_back(std::make_unique(writer)); + sinks.push_back(std::make_unique( + writer, config.output_dump_sampling_modulo)); } if (config.show_on_screen) { sinks.push_back(absl::WrapUnique( diff --git a/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h b/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h index a0daa9ff18..85874cb5bc 100644 --- a/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h +++ b/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h @@ -14,7 +14,10 @@ #include #include #include +#include +#include "absl/strings/string_view.h" +#include "api/array_view.h" #include "api/test/peerconnection_quality_test_fixture.h" #include "api/test/stats_observer_interface.h" #include "api/test/video_quality_analyzer_interface.h" @@ -22,9 +25,8 @@ #include "api/video/video_sink_interface.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "test/pc/e2e/analyzer/video/encoded_image_data_injector.h" -#include "test/pc/e2e/analyzer/video/id_generator.h" #include "test/test_video_capturer.h" #include "test/testsupport/video_frame_writer.h" @@ -43,9 +45,17 @@ class VideoQualityAnalyzerInjectionHelper : public StatsObserverInterface { EncodedImageDataExtractor* extractor); ~VideoQualityAnalyzerInjectionHelper() override; + // Registers new call participant to the underlying video quality analyzer. + // The method should be called before the participant is actually added. + void RegisterParticipantInCall(absl::string_view peer_name) { + analyzer_->RegisterParticipantInCall(peer_name); + extractor_->AddParticipantInCall(); + } + // Wraps video encoder factory to give video quality analyzer access to frames // before encoding and encoded images after. std::unique_ptr WrapVideoEncoderFactory( + absl::string_view peer_name, std::unique_ptr delegate, double bitrate_multiplier, std::map> stream_required_spatial_index) @@ -53,25 +63,32 @@ class VideoQualityAnalyzerInjectionHelper : public StatsObserverInterface { // Wraps video decoder factory to give video quality analyzer access to // received encoded images and frames, that were decoded from them. std::unique_ptr WrapVideoDecoderFactory( + absl::string_view peer_name, std::unique_ptr delegate) const; // Creates VideoFrame preprocessor, that will allow video quality analyzer to // get access to the captured frames. If provided config also specifies // |input_dump_file_name|, video will be written into that file. std::unique_ptr - CreateFramePreprocessor(const VideoConfig& config); + CreateFramePreprocessor(absl::string_view peer_name, + const VideoConfig& config); // Creates sink, that will allow video quality analyzer to get access to // the rendered frames. If corresponding video track has - // |output_dump_file_name| in its VideoConfig, then video also will be written + // |output_dump_file_name| in its VideoConfig, which was used for + // CreateFramePreprocessor(...), then video also will be written // into that file. - std::unique_ptr> CreateVideoSink(); + std::unique_ptr> CreateVideoSink( + absl::string_view peer_name); - void Start(std::string test_case_name, int max_threads_count); + void Start(std::string test_case_name, + rtc::ArrayView peer_names, + int max_threads_count = 1); // Forwards |stats_reports| for Peer Connection |pc_label| to // |analyzer_|. - void OnStatsReports(const std::string& pc_label, - const StatsReports& stats_reports) override; + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override; // Stops VideoQualityAnalyzerInterface to populate final data and metrics. // Should be invoked after analyzed video tracks are disposed. @@ -80,20 +97,26 @@ class VideoQualityAnalyzerInjectionHelper : public StatsObserverInterface { private: class AnalyzingVideoSink final : public rtc::VideoSinkInterface { public: - explicit AnalyzingVideoSink(VideoQualityAnalyzerInjectionHelper* helper) - : helper_(helper) {} + explicit AnalyzingVideoSink(absl::string_view peer_name, + VideoQualityAnalyzerInjectionHelper* helper) + : peer_name_(peer_name), helper_(helper) {} ~AnalyzingVideoSink() override = default; - void OnFrame(const VideoFrame& frame) override { helper_->OnFrame(frame); } + void OnFrame(const VideoFrame& frame) override { + helper_->OnFrame(peer_name_, frame); + } private: + const std::string peer_name_; VideoQualityAnalyzerInjectionHelper* const helper_; }; test::VideoFrameWriter* MaybeCreateVideoWriter( absl::optional file_name, const PeerConnectionE2EQualityTestFixture::VideoConfig& config); - void OnFrame(const VideoFrame& frame); + // Creates a deep copy of the frame and passes it to the video analyzer, while + // passing real frame to the sinks + void OnFrame(absl::string_view peer_name, const VideoFrame& frame); std::vector>>* PopulateSinks(const std::string& stream_label); @@ -103,13 +126,11 @@ class VideoQualityAnalyzerInjectionHelper : public StatsObserverInterface { std::vector> video_writers_; - rtc::CriticalSection lock_; + Mutex lock_; std::map known_video_configs_ RTC_GUARDED_BY(lock_); std::map>>> sinks_ RTC_GUARDED_BY(lock_); - - std::unique_ptr> encoding_entities_id_generator_; }; } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.cc b/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.cc index 754a0a468f..cc675cc2df 100644 --- a/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.cc +++ b/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.cc @@ -10,55 +10,94 @@ #include "test/pc/e2e/analyzer/video/video_quality_metrics_reporter.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtcstats_objects.h" +#include "api/units/data_rate.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" + namespace webrtc { namespace webrtc_pc_e2e { -namespace { - -constexpr int kBitsInByte = 8; -} // namespace - -void VideoQualityMetricsReporter::Start(absl::string_view test_case_name) { +void VideoQualityMetricsReporter::Start( + absl::string_view test_case_name, + const TrackIdStreamInfoMap* /*reporter_helper*/) { test_case_name_ = std::string(test_case_name); + start_time_ = Now(); } -// TODO(bugs.webrtc.org/10430): Migrate to the new GetStats as soon as -// bugs.webrtc.org/10428 is fixed. void VideoQualityMetricsReporter::OnStatsReports( - const std::string& pc_label, - const StatsReports& stats_reports) { - for (const StatsReport* stats_report : stats_reports) { - // The only stats collected by this analyzer are present in - // kStatsReportTypeBwe reports, so all other reports are just ignored. - if (stats_report->type() != StatsReport::StatsType::kStatsReportTypeBwe) { + absl::string_view pc_label, + const rtc::scoped_refptr& report) { + RTC_CHECK(start_time_) + << "Please invoke Start(...) method before calling OnStatsReports(...)"; + + auto transport_stats = report->GetStatsOfType(); + if (transport_stats.size() == 0u || + !transport_stats[0]->selected_candidate_pair_id.is_defined()) { + return; + } + RTC_DCHECK_EQ(transport_stats.size(), 1); + std::string selected_ice_id = + transport_stats[0]->selected_candidate_pair_id.ValueToString(); + // Use the selected ICE candidate pair ID to get the appropriate ICE stats. + const RTCIceCandidatePairStats ice_candidate_pair_stats = + report->Get(selected_ice_id)->cast_to(); + + auto outbound_rtp_stats = report->GetStatsOfType(); + StatsSample sample; + for (auto& s : outbound_rtp_stats) { + if (!s->media_type.is_defined()) { + continue; + } + if (!(*s->media_type == RTCMediaStreamTrackKind::kVideo)) { continue; } - const webrtc::StatsReport::Value* available_send_bandwidth = - stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameAvailableSendBandwidth); - const webrtc::StatsReport::Value* retransmission_bitrate = - stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameRetransmitBitrate); - const webrtc::StatsReport::Value* transmission_bitrate = - stats_report->FindValue( - StatsReport::StatsValueName::kStatsValueNameTransmitBitrate); - RTC_CHECK(available_send_bandwidth); - RTC_CHECK(retransmission_bitrate); - RTC_CHECK(transmission_bitrate); - - rtc::CritScope crit(&video_bwe_stats_lock_); - VideoBweStats& video_bwe_stats = video_bwe_stats_[pc_label]; + if (s->timestamp_us() > sample.sample_time.us()) { + sample.sample_time = Timestamp::Micros(s->timestamp_us()); + } + sample.retransmitted_bytes_sent += + DataSize::Bytes(s->retransmitted_bytes_sent.ValueOrDefault(0ul)); + sample.bytes_sent += DataSize::Bytes(s->bytes_sent.ValueOrDefault(0ul)); + sample.header_bytes_sent += + DataSize::Bytes(s->header_bytes_sent.ValueOrDefault(0ul)); + } + + MutexLock lock(&video_bwe_stats_lock_); + VideoBweStats& video_bwe_stats = video_bwe_stats_[std::string(pc_label)]; + if (ice_candidate_pair_stats.available_outgoing_bitrate.is_defined()) { video_bwe_stats.available_send_bandwidth.AddSample( - available_send_bandwidth->int_val()); - video_bwe_stats.transmission_bitrate.AddSample( - transmission_bitrate->int_val()); - video_bwe_stats.retransmission_bitrate.AddSample( - retransmission_bitrate->int_val()); + DataRate::BitsPerSec( + *ice_candidate_pair_stats.available_outgoing_bitrate) + .bytes_per_sec()); + } + + StatsSample prev_sample = last_stats_sample_[std::string(pc_label)]; + if (prev_sample.sample_time.IsZero()) { + prev_sample.sample_time = start_time_.value(); + } + last_stats_sample_[std::string(pc_label)] = sample; + + TimeDelta time_between_samples = sample.sample_time - prev_sample.sample_time; + if (time_between_samples.IsZero()) { + return; } + + DataRate retransmission_bitrate = + (sample.retransmitted_bytes_sent - prev_sample.retransmitted_bytes_sent) / + time_between_samples; + video_bwe_stats.retransmission_bitrate.AddSample( + retransmission_bitrate.bytes_per_sec()); + DataRate transmission_bitrate = + (sample.bytes_sent + sample.header_bytes_sent - prev_sample.bytes_sent - + prev_sample.header_bytes_sent) / + time_between_samples; + video_bwe_stats.transmission_bitrate.AddSample( + transmission_bitrate.bytes_per_sec()); } void VideoQualityMetricsReporter::StopAndReportResults() { - rtc::CritScope video_bwe_crit(&video_bwe_stats_lock_); + MutexLock video_bwemutex_(&video_bwe_stats_lock_); for (const auto& item : video_bwe_stats_) { ReportVideoBweResults(GetTestCaseName(item.first), item.second); } @@ -73,14 +112,11 @@ void VideoQualityMetricsReporter::ReportVideoBweResults( const std::string& test_case_name, const VideoBweStats& video_bwe_stats) { ReportResult("available_send_bandwidth", test_case_name, - video_bwe_stats.available_send_bandwidth / kBitsInByte, - "bytesPerSecond"); + video_bwe_stats.available_send_bandwidth, "bytesPerSecond"); ReportResult("transmission_bitrate", test_case_name, - video_bwe_stats.transmission_bitrate / kBitsInByte, - "bytesPerSecond"); + video_bwe_stats.transmission_bitrate, "bytesPerSecond"); ReportResult("retransmission_bitrate", test_case_name, - video_bwe_stats.retransmission_bitrate / kBitsInByte, - "bytesPerSecond"); + video_bwe_stats.retransmission_bitrate, "bytesPerSecond"); } void VideoQualityMetricsReporter::ReportResult( diff --git a/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.h b/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.h index 1688a7b6fc..ff195a450e 100644 --- a/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.h +++ b/test/pc/e2e/analyzer/video/video_quality_metrics_reporter.h @@ -14,9 +14,13 @@ #include #include +#include "absl/strings/string_view.h" +#include "api/numerics/samples_stats_counter.h" #include "api/test/peerconnection_quality_test_fixture.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/numerics/samples_stats_counter.h" +#include "api/test/track_id_stream_info_map.h" +#include "api/units/data_size.h" +#include "api/units/timestamp.h" +#include "rtc_base/synchronization/mutex.h" #include "test/testsupport/perf_test.h" namespace webrtc { @@ -31,15 +35,25 @@ struct VideoBweStats { class VideoQualityMetricsReporter : public PeerConnectionE2EQualityTestFixture::QualityMetricsReporter { public: - VideoQualityMetricsReporter() = default; + VideoQualityMetricsReporter(Clock* const clock) : clock_(clock) {} ~VideoQualityMetricsReporter() override = default; - void Start(absl::string_view test_case_name) override; - void OnStatsReports(const std::string& pc_label, - const StatsReports& reports) override; + void Start(absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) override; + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override; void StopAndReportResults() override; private: + struct StatsSample { + DataSize bytes_sent = DataSize::Zero(); + DataSize header_bytes_sent = DataSize::Zero(); + DataSize retransmitted_bytes_sent = DataSize::Zero(); + + Timestamp sample_time = Timestamp::Zero(); + }; + std::string GetTestCaseName(const std::string& stream_label) const; static void ReportVideoBweResults(const std::string& test_case_name, const VideoBweStats& video_bwe_stats); @@ -50,14 +64,20 @@ class VideoQualityMetricsReporter const std::string& unit, webrtc::test::ImproveDirection improve_direction = webrtc::test::ImproveDirection::kNone); + Timestamp Now() const { return clock_->CurrentTime(); } + + Clock* const clock_; std::string test_case_name_; + absl::optional start_time_; - rtc::CriticalSection video_bwe_stats_lock_; + Mutex video_bwe_stats_lock_; // Map between a peer connection label (provided by the framework) and // its video BWE stats. std::map video_bwe_stats_ RTC_GUARDED_BY(video_bwe_stats_lock_); + std::map last_stats_sample_ + RTC_GUARDED_BY(video_bwe_stats_lock_); }; } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/analyzer_helper.cc b/test/pc/e2e/analyzer_helper.cc index f11b3bb803..852f0a3435 100644 --- a/test/pc/e2e/analyzer_helper.cc +++ b/test/pc/e2e/analyzer_helper.cc @@ -22,16 +22,36 @@ AnalyzerHelper::AnalyzerHelper() { void AnalyzerHelper::AddTrackToStreamMapping(std::string track_id, std::string stream_label) { RTC_DCHECK_RUN_ON(&signaling_sequence_checker_); - track_to_stream_map_.insert({std::move(track_id), std::move(stream_label)}); + track_to_stream_map_.insert( + {std::move(track_id), StreamInfo{stream_label, stream_label}}); } -const std::string& AnalyzerHelper::GetStreamLabelFromTrackId( - const std::string& track_id) const { +void AnalyzerHelper::AddTrackToStreamMapping(std::string track_id, + std::string stream_label, + std::string sync_group) { + RTC_DCHECK_RUN_ON(&signaling_sequence_checker_); + track_to_stream_map_.insert( + {std::move(track_id), + StreamInfo{std::move(stream_label), std::move(sync_group)}}); +} + +const AnalyzerHelper::StreamInfo& AnalyzerHelper::GetStreamInfoFromTrackId( + absl::string_view track_id) const { RTC_DCHECK_RUN_ON(&signaling_sequence_checker_); - auto track_to_stream_pair = track_to_stream_map_.find(track_id); + auto track_to_stream_pair = track_to_stream_map_.find(std::string(track_id)); RTC_CHECK(track_to_stream_pair != track_to_stream_map_.end()); return track_to_stream_pair->second; } +absl::string_view AnalyzerHelper::GetStreamLabelFromTrackId( + absl::string_view track_id) const { + return GetStreamInfoFromTrackId(track_id).stream_label; +} + +absl::string_view AnalyzerHelper::GetSyncGroupLabelFromTrackId( + absl::string_view track_id) const { + return GetStreamInfoFromTrackId(track_id).sync_group; +} + } // namespace webrtc_pc_e2e } // namespace webrtc diff --git a/test/pc/e2e/analyzer_helper.h b/test/pc/e2e/analyzer_helper.h index 51cfe5587d..9cebd7015e 100644 --- a/test/pc/e2e/analyzer_helper.h +++ b/test/pc/e2e/analyzer_helper.h @@ -14,33 +14,49 @@ #include #include -#include "api/test/track_id_stream_label_map.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "absl/strings/string_view.h" +#include "api/sequence_checker.h" +#include "api/test/track_id_stream_info_map.h" #include "rtc_base/thread_annotations.h" namespace webrtc { namespace webrtc_pc_e2e { // This class is a utility that provides bookkeeping capabilities that -// are useful to associate stats reports track_ids to the remote stream_id. +// are useful to associate stats reports track_ids to the remote stream info. // The framework will populate an instance of this class and it will pass // it to the Start method of Media Quality Analyzers. // An instance of AnalyzerHelper must only be accessed from a single // thread and since stats collection happens on the signaling thread, -// both AddTrackToStreamMapping and GetStreamLabelFromTrackId must be -// invoked from the signaling thread. -class AnalyzerHelper : public TrackIdStreamLabelMap { +// AddTrackToStreamMapping, GetStreamLabelFromTrackId and +// GetSyncGroupLabelFromTrackId must be invoked from the signaling thread. Get +// methods should be invoked only after all data is added. Mixing Get methods +// with adding new data may lead to undefined behaviour. +class AnalyzerHelper : public TrackIdStreamInfoMap { public: AnalyzerHelper(); void AddTrackToStreamMapping(std::string track_id, std::string stream_label); + void AddTrackToStreamMapping(std::string track_id, + std::string stream_label, + std::string sync_group); - const std::string& GetStreamLabelFromTrackId( - const std::string& track_id) const override; + absl::string_view GetStreamLabelFromTrackId( + absl::string_view track_id) const override; + + absl::string_view GetSyncGroupLabelFromTrackId( + absl::string_view track_id) const override; private: + struct StreamInfo { + std::string stream_label; + std::string sync_group; + }; + + const StreamInfo& GetStreamInfoFromTrackId(absl::string_view track_id) const; + SequenceChecker signaling_sequence_checker_; - std::map track_to_stream_map_ + std::map track_to_stream_map_ RTC_GUARDED_BY(signaling_sequence_checker_); }; diff --git a/test/pc/e2e/cross_media_metrics_reporter.cc b/test/pc/e2e/cross_media_metrics_reporter.cc new file mode 100644 index 0000000000..96f661fd4f --- /dev/null +++ b/test/pc/e2e/cross_media_metrics_reporter.cc @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "test/pc/e2e/cross_media_metrics_reporter.h" + +#include +#include + +#include "api/stats/rtc_stats.h" +#include "api/stats/rtcstats_objects.h" +#include "api/units/timestamp.h" +#include "rtc_base/event.h" +#include "system_wrappers/include/field_trial.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +void CrossMediaMetricsReporter::Start( + absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) { + test_case_name_ = std::string(test_case_name); + reporter_helper_ = reporter_helper; +} + +void CrossMediaMetricsReporter::OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) { + auto inbound_stats = report->GetStatsOfType(); + std::map> + sync_group_stats; + for (const auto& stat : inbound_stats) { + auto media_source_stat = + report->GetAs(*stat->track_id); + if (stat->estimated_playout_timestamp.ValueOrDefault(0.) > 0 && + media_source_stat->track_identifier.is_defined()) { + sync_group_stats[reporter_helper_->GetSyncGroupLabelFromTrackId( + *media_source_stat->track_identifier)] + .push_back(stat); + } + } + + MutexLock lock(&mutex_); + for (const auto& pair : sync_group_stats) { + // If there is less than two streams, it is not a sync group. + if (pair.second.size() < 2) { + continue; + } + auto sync_group = std::string(pair.first); + const RTCInboundRTPStreamStats* audio_stat = pair.second[0]; + const RTCInboundRTPStreamStats* video_stat = pair.second[1]; + + RTC_CHECK(pair.second.size() == 2 && audio_stat->kind.is_defined() && + video_stat->kind.is_defined() && + *audio_stat->kind != *video_stat->kind) + << "Sync group should consist of one audio and one video stream."; + + if (*audio_stat->kind == RTCMediaStreamTrackKind::kVideo) { + std::swap(audio_stat, video_stat); + } + // Stream labels of a sync group are same for all polls, so we need it add + // it only once. + if (stats_info_.find(sync_group) == stats_info_.end()) { + auto audio_source_stat = + report->GetAs(*audio_stat->track_id); + auto video_source_stat = + report->GetAs(*video_stat->track_id); + // *_source_stat->track_identifier is always defined here because we + // checked it while grouping stats. + stats_info_[sync_group].audio_stream_label = + std::string(reporter_helper_->GetStreamLabelFromTrackId( + *audio_source_stat->track_identifier)); + stats_info_[sync_group].video_stream_label = + std::string(reporter_helper_->GetStreamLabelFromTrackId( + *video_source_stat->track_identifier)); + } + + double audio_video_playout_diff = *audio_stat->estimated_playout_timestamp - + *video_stat->estimated_playout_timestamp; + if (audio_video_playout_diff > 0) { + stats_info_[sync_group].audio_ahead_ms.AddSample( + audio_video_playout_diff); + stats_info_[sync_group].video_ahead_ms.AddSample(0); + } else { + stats_info_[sync_group].audio_ahead_ms.AddSample(0); + stats_info_[sync_group].video_ahead_ms.AddSample( + std::abs(audio_video_playout_diff)); + } + } +} + +void CrossMediaMetricsReporter::StopAndReportResults() { + MutexLock lock(&mutex_); + for (const auto& pair : stats_info_) { + const std::string& sync_group = pair.first; + ReportResult("audio_ahead_ms", + GetTestCaseName(pair.second.audio_stream_label, sync_group), + pair.second.audio_ahead_ms, "ms", + webrtc::test::ImproveDirection::kSmallerIsBetter); + ReportResult("video_ahead_ms", + GetTestCaseName(pair.second.video_stream_label, sync_group), + pair.second.video_ahead_ms, "ms", + webrtc::test::ImproveDirection::kSmallerIsBetter); + } +} + +void CrossMediaMetricsReporter::ReportResult( + const std::string& metric_name, + const std::string& test_case_name, + const SamplesStatsCounter& counter, + const std::string& unit, + webrtc::test::ImproveDirection improve_direction) { + test::PrintResult(metric_name, /*modifier=*/"", test_case_name, counter, unit, + /*important=*/false, improve_direction); +} + +std::string CrossMediaMetricsReporter::GetTestCaseName( + const std::string& stream_label, + const std::string& sync_group) const { + return test_case_name_ + "/" + sync_group + "_" + stream_label; +} + +} // namespace webrtc_pc_e2e +} // namespace webrtc diff --git a/test/pc/e2e/cross_media_metrics_reporter.h b/test/pc/e2e/cross_media_metrics_reporter.h new file mode 100644 index 0000000000..6ddc994d1f --- /dev/null +++ b/test/pc/e2e/cross_media_metrics_reporter.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_PC_E2E_CROSS_MEDIA_METRICS_REPORTER_H_ +#define TEST_PC_E2E_CROSS_MEDIA_METRICS_REPORTER_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" +#include "api/numerics/samples_stats_counter.h" +#include "api/test/peerconnection_quality_test_fixture.h" +#include "api/test/track_id_stream_info_map.h" +#include "api/units/timestamp.h" +#include "rtc_base/synchronization/mutex.h" +#include "test/testsupport/perf_test.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +class CrossMediaMetricsReporter + : public PeerConnectionE2EQualityTestFixture::QualityMetricsReporter { + public: + CrossMediaMetricsReporter() = default; + ~CrossMediaMetricsReporter() override = default; + + void Start(absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) override; + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override; + void StopAndReportResults() override; + + private: + struct StatsInfo { + SamplesStatsCounter audio_ahead_ms; + SamplesStatsCounter video_ahead_ms; + + std::string audio_stream_label; + std::string video_stream_label; + }; + + static void ReportResult(const std::string& metric_name, + const std::string& test_case_name, + const SamplesStatsCounter& counter, + const std::string& unit, + webrtc::test::ImproveDirection improve_direction = + webrtc::test::ImproveDirection::kNone); + std::string GetTestCaseName(const std::string& stream_label, + const std::string& sync_group) const; + + std::string test_case_name_; + const TrackIdStreamInfoMap* reporter_helper_; + + Mutex mutex_; + std::map stats_info_ RTC_GUARDED_BY(mutex_); +}; + +} // namespace webrtc_pc_e2e +} // namespace webrtc + +#endif // TEST_PC_E2E_CROSS_MEDIA_METRICS_REPORTER_H_ diff --git a/test/pc/e2e/echo/echo_emulation.cc b/test/pc/e2e/echo/echo_emulation.cc index 2beaa34cbd..230e8e3eca 100644 --- a/test/pc/e2e/echo/echo_emulation.cc +++ b/test/pc/e2e/echo/echo_emulation.cc @@ -57,17 +57,7 @@ void EchoEmulatingCapturer::OnAudioRendered( } queue_input_.assign(data.begin(), data.end()); if (!renderer_queue_.Insert(&queue_input_)) { - // Test audio device works too slow with sanitizers and on some platforms - // and can't properly process audio, so when capturer will be stopped - // renderer will quickly overfill the queue. - // TODO(crbug.com/webrtc/10850) remove it when test ADM will be fast enough. -#if defined(THREAD_SANITIZER) || defined(MEMORY_SANITIZER) || \ - defined(ADDRESS_SANITIZER) || defined(WEBRTC_ANDROID) || \ - (defined(_MSC_VER) && !defined(__clang__) && !defined(NDEBUG)) RTC_LOG(WARNING) << "Echo queue is full"; -#else - RTC_CHECK(false) << "Echo queue is full"; -#endif } } diff --git a/test/pc/e2e/g3doc/architecture.md b/test/pc/e2e/g3doc/architecture.md new file mode 100644 index 0000000000..5708054c37 --- /dev/null +++ b/test/pc/e2e/g3doc/architecture.md @@ -0,0 +1,208 @@ + + +# PeerConnection level framework fixture architecture + +## Overview + +The main implementation of +[`webrtc::webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture`][1] is +[`webrtc::webrtc_pc_e2e::PeerConnectionE2EQualityTest`][2]. Internally it owns +the next main pieces: + +* [`MediaHelper`][3] - responsible for adding audio and video tracks to the + peers. +* [`VideoQualityAnalyzerInjectionHelper`][4] and + [`SingleProcessEncodedImageDataInjector`][5] - used to inject video quality + analysis and properly match captured and rendered video frames. You can read + more about it in + [DefaultVideoQualityAnalyzer](default_video_quality_analyzer.md) section. +* [`AudioQualityAnalyzerInterface`][6] - used to measure audio quality metrics +* [`TestActivitiesExecutor`][7] - used to support [`ExecuteAt(...)`][8] and + [`ExecuteEvery(...)`][9] API of `PeerConnectionE2EQualityTestFixture` to run + any arbitrary action during test execution timely synchronized with a test + call. +* A vector of [`QualityMetricsReporter`][10] added by the + `PeerConnectionE2EQualityTestFixture` user. +* Two peers: Alice and Bob represented by instances of [`TestPeer`][11] + object. + +Also it keeps a reference to [`webrtc::TimeController`][12], which is used to +create all required threads, task queues, task queue factories and time related +objects. + +## TestPeer + +Call participants are represented by instances of `TestPeer` object. +[`TestPeerFactory`][13] is used to create them. `TestPeer` owns all instances +related to the `webrtc::PeerConnection`, including required listeners and +callbacks. Also it provides an API to do offer/answer exchange and ICE candidate +exchange. For this purposes internally it uses an instance of +[`webrtc::PeerConnectionWrapper`][14]. + +The `TestPeer` also owns the `PeerConnection` worker thread. The signaling +thread for all `PeerConnection`'s is owned by +`PeerConnectionE2EQualityTestFixture` and shared between all participants in the +call. The network thread is owned by the network layer (it maybe either emulated +network provided by [Network Emulation Framework][24] or network thread and +`rtc::NetworkManager` provided by user) and provided when peer is added to the +fixture via [`AddPeer(...)`][15] API. + +## GetStats API based metrics reporters + +`PeerConnectionE2EQualityTestFixture` gives the user ability to provide +different `QualityMetricsReporter`s which will listen for `PeerConnection` +[`GetStats`][16] API. Then such reporters will be able to report various metrics +that user wants to measure. + +`PeerConnectionE2EQualityTestFixture` itself also uses this mechanism to +measure: + +* Audio quality metrics +* Audio/Video sync metrics (with help of [`CrossMediaMetricsReporter`][17]) + +Also framework provides a [`StatsBasedNetworkQualityMetricsReporter`][18] to +measure network related WebRTC metrics and print debug raw emulated network +statistic. This reporter should be added by user via +[`AddQualityMetricsReporter(...)`][19] API if requried. + +Internally stats gathering is done by [`StatsPoller`][20]. Stats are requested +once per second for each `PeerConnection` and then resulted object is provided +into each stats listener. + +## Offer/Answer exchange + +`PeerConnectionE2EQualityTest` provides ability to test Simulcast and SVC for +video. These features aren't supported by P2P call and in general requires a +Selective Forwarding Unit (SFU). So special logic is applied to mimic SFU +behavior in P2P call. This logic is located inside [`SignalingInterceptor`][21], +[`QualityAnalyzingVideoEncoder`][22] and [`QualityAnalyzingVideoDecoder`][23] +and consist of SDP modification during offer/answer exchange and special +handling of video frames from unrelated Simulcast/SVC streams during decoding. + +### Simulcast + +In case of Simulcast we have a video track, which internally contains multiple +video streams, for example low resolution, medium resolution and high +resolution. WebRTC client doesn't support receiving an offer with multiple +streams in it, because usually SFU will keep only single stream for the client. +To bypass it framework will modify offer by converting a single track with three +video streams into three independent video tracks. Then sender will think that +it send simulcast, but receiver will think that it receives 3 independent +tracks. + +To achieve such behavior some extra tweaks are required: + +* MID RTP header extension from original offer have to be removed +* RID RTP header extension from original offer is replaced with MID RTP header + extension, so the ID that sender uses for RID on receiver will be parsed as + MID. +* Answer have to be modified in the opposite way. + +Described modifications are illustrated on the picture below. + +![VP8 Simulcast offer modification](vp8_simulcast_offer_modification.png "VP8 Simulcast offer modification") + +The exchange will look like this: + +1. Alice creates an offer +2. Alice sets offer as local description +3. Do described offer modification +4. Alice sends modified offer to Bob +5. Bob sets modified offer as remote description +6. Bob creates answer +7. Bob sets answer as local description +8. Do reverse modifications on answer +9. Bob sends modified answer to Alice +10. Alice sets modified answer as remote description + +Such mechanism put a constraint that RTX streams are not supported, because they +don't have RID RTP header extension in their packets. + +### SVC + +In case of SVC the framework will update the sender's offer before even setting +it as local description on the sender side. Then no changes to answer will be +required. + +`ssrc` is a 32 bit random value that is generated in RTP to denote a specific +source used to send media in an RTP connection. In original offer video track +section will look like this: + +``` +m=video 9 UDP/TLS/RTP/SAVPF 98 100 99 101 +... +a=ssrc-group:FID +a=ssrc: cname:... +.... +a=ssrc: cname:... +.... +``` + +To enable SVC for such video track framework will add extra `ssrc`s for each SVC +stream that is required like this: + +``` +a=ssrc-group:FID +a=ssrc: cname:... +.... +a=ssrc: cname:.... +... +a=ssrc-group:FID +a=ssrc: cname:... +.... +a=ssrc: cname:.... +... +a=ssrc-group:FID +a=ssrc: cname:... +.... +a=ssrc: cname:.... +... +``` + +The next line will also be added to the video track section of the offer: + +``` +a=ssrc-group:SIM +``` + +It will tell PeerConnection that this track should be configured as SVC. It +utilize WebRTC Plan B offer structure to achieve SVC behavior, also it modifies +offer before setting it as local description which violates WebRTC standard. +Also it adds limitations that on lossy networks only top resolution streams can +be analyzed, because WebRTC won't try to restore low resolution streams in case +of loss, because it still receives higher stream. + +### Handling in encoder/decoder + +In the encoder, the framework for each encoded video frame will propagate +information requried for the fake SFU to know if it belongs to an interesting +simulcast stream/spatial layer of if it should be "discarded". + +On the decoder side frames that should be "discarded" by fake SFU will be auto +decoded into single pixel images and only the interesting simulcast +stream/spatial layer will go into real decoder and then will be analyzed. + +[1]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=55;drc=484acf27231d931dbc99aedce85bc27e06486b96 +[2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/peer_connection_quality_test.h;l=44;drc=6cc893ad778a0965e2b7a8e614f3c98aa81bee5b +[3]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/media/media_helper.h;l=27;drc=d46db9f1523ae45909b4a6fdc90a140443068bc6 +[4]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h;l=38;drc=79020414fd5c71f9ec1f25445ea5f1c8001e1a49 +[5]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.h;l=40;drc=79020414fd5c71f9ec1f25445ea5f1c8001e1a49 +[6]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/audio_quality_analyzer_interface.h;l=23;drc=20f45823e37fd7272aa841831c029c21f29742c2 +[7]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/test_activities_executor.h;l=28;drc=6cc893ad778a0965e2b7a8e614f3c98aa81bee5b +[8]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=439;drc=484acf27231d931dbc99aedce85bc27e06486b96 +[9]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=445;drc=484acf27231d931dbc99aedce85bc27e06486b96 +[10]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=413;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[11]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/test_activities_executor.h;l=28;drc=6cc893ad778a0965e2b7a8e614f3c98aa81bee5b +[12]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/test_activities_executor.h;l=28;drc=6cc893ad778a0965e2b7a8e614f3c98aa81bee5b +[13]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/test_peer_factory.h;l=46;drc=0ef4a2488a466a24ab97b31fdddde55440d451f9 +[14]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/pc/peer_connection_wrapper.h;l=47;drc=5ab79e62f691875a237ea28ca3975ea1f0ed62ec +[15]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=459;drc=484acf27231d931dbc99aedce85bc27e06486b96 +[16]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/peer_connection_interface.h;l=886;drc=9438fb3fff97c803d1ead34c0e4f223db168526f +[17]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/cross_media_metrics_reporter.h;l=29;drc=9d777620236ec76754cfce19f6e82dd18e52d22c +[18]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/cross_media_metrics_reporter.h;l=29;drc=9d777620236ec76754cfce19f6e82dd18e52d22c +[19]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=450;drc=484acf27231d931dbc99aedce85bc27e06486b96 +[20]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/stats_poller.h;l=52;drc=9b526180c9e9722d3fc7f8689da6ec094fc7fc0a +[21]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/sdp/sdp_changer.h;l=79;drc=ee558dcca89fd8b105114ededf9e74d948da85e8 +[22]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/quality_analyzing_video_encoder.h;l=54;drc=79020414fd5c71f9ec1f25445ea5f1c8001e1a49 +[23]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/quality_analyzing_video_decoder.h;l=50;drc=79020414fd5c71f9ec1f25445ea5f1c8001e1a49 +[24]: /test/network/g3doc/index.md diff --git a/test/pc/e2e/g3doc/default_video_quality_analyzer.md b/test/pc/e2e/g3doc/default_video_quality_analyzer.md new file mode 100644 index 0000000000..532226e350 --- /dev/null +++ b/test/pc/e2e/g3doc/default_video_quality_analyzer.md @@ -0,0 +1,196 @@ + + +# DefaultVideoQualityAnalyzer + +## Audience + +This document is for users of +[`webrtc::webrtc_pc_e2e::DefaultVideoQualityAnalyzer`][1]. + +## Overview + +`DefaultVideoQualityAnalyzer` implements +[`webrtc::webrtc_pc_e2e::VideoQualityAnalyzerInterface`][2] and is a main +implementation of video quality analyzer for WebRTC. To operate correctly it +requires to receive video frame on each step: + +1. On frame captured - analyzer will generate a unique ID for the frame, that + caller should attach to the it. +2. Immediately before frame enter the encoder. +3. Immediately after the frame was encoded. +4. After the frame was received and immediately before it entered the decoder. +5. Immediately after the frame was decoded. +6. When the frame was rendered. + +![VideoQualityAnalyzerInterface pipeline](video_quality_analyzer_pipeline.png "VideoQualityAnalyzerInterface pipeline") + +The analyzer updates its internal metrics per frame when it was rendered and +reports all of them after it was stopped through +[WebRTC perf results reporting system][10]. + +To properly inject `DefaultVideoQualityAnalyzer` into pipeline the following helpers can be used: + +### VideoQualityAnalyzerInjectionHelper + +[`webrtc::webrtc_pc_e2e::VideoQualityAnalyzerInjectionHelper`][3] provides +factory methods for components, that will be used to inject +`VideoQualityAnalyzerInterface` into the `PeerConnection` pipeline: + +* Wrappers for [`webrtc::VideoEncoderFactory`][4] and + [`webrtc::VideoDecodeFactory`][5] which will properly pass + [`webrtc::VideoFrame`][6] and [`webrtc::EncodedImage`][7] into analyzer + before and after real video encode and decoder. +* [`webrtc::test::TestVideoCapturer::FramePreprocessor`][8] which is used to + pass generated frames into analyzer on capturing and then set the returned + frame ID. It also configures dumping of captured frames if requried. +* [`rtc::VideoSinkInterface`][9] which is used to pass frames to + the analyzer before they will be rendered to compute per frame metrics. It + also configures dumping of rendered video if requried. + +Besides factories `VideoQualityAnalyzerInjectionHelper` has method to +orchestrate `VideoQualityAnalyzerInterface` workflow: + +* `Start` - to start video analyzer, so it will be able to receive and analyze + video frames. +* `RegisterParticipantInCall` - to add new participants after analyzer was + started. +* `Stop` - to stop analyzer, compute all metrics for frames that were recevied + before and report them. + +Also `VideoQualityAnalyzerInjectionHelper` implements +[`webrtc::webrtc_pc_e2e::StatsObserverInterface`][11] to propagate WebRTC stats +to `VideoQualityAnalyzerInterface`. + +### EncodedImageDataInjector and EncodedImageDataExtractor + +[`webrtc::webrtc_pc_e2e::EncodedImageDataInjector`][14] and +[`webrtc::webrtc_pc_e2e::EncodedImageDataInjector`][15] are used to inject and +extract data into `webrtc::EncodedImage` to propagate frame ID and other +required information through the network. + +By default [`webrtc::webrtc_pc_e2e::SingleProcessEncodedImageDataInjector`][16] +is used. It assumes `webrtc::EncodedImage` payload as black box which is +remaining unchanged from encoder to decoder and stores the information required +for its work in the last 3 bytes of the payload, replacing the original data +during injection and restoring it back during extraction. Also +`SingleProcessEncodedImageDataInjector` requires that sender and receiver were +inside single process. + +![SingleProcessEncodedImageDataInjector](single_process_encoded_image_data_injector.png "SingleProcessEncodedImageDataInjector") + +## Exported metrics + +Exported metrics are reported to WebRTC perf results reporting system. + +### General + +* *`cpu_usage`* - CPU usage excluding video analyzer + +### Video + +* *`psnr`* - peak signal-to-noise ratio: + [wikipedia](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) +* *`ssim`* - structural similarity: + [wikipedia](https://en.wikipedia.org/wiki/Structural_similarity). +* *`min_psnr`* - minimum value of psnr across all frames of video stream. +* *`encode_time`* - time to encode a single frame. +* *`decode_time`* - time to decode a single frame. +* *`transport_time`* - time from frame encoded to frame received for decoding. +* *`receive_to_render_time`* - time from frame received for decoding to frame + rendered. +* *`total_delay_incl_transport`* - time from frame was captured on device to + time when frame was displayed on device. +* *`encode_frame_rate`* - frame rate after encoder. +* *`harmonic_framerate`* - video duration divided on squared sum of interframe + delays. Reflects render frame rate penalized by freezes. +* *`time_between_rendered_frames`* - time between frames out to renderer. +* *`dropped_frames`* - amount of frames that were sent, but weren't rendered + and are known not to be “on the way” from sender to receiver. + +Freeze is a pause when no new frames from decoder arrived for 150ms + avg time +between frames or 3 * avg time between frames. + +* *`time_between_freezes`* - mean time from previous freeze end to new freeze + start. +* *`freeze_time_ms`* - total freeze time in ms. +* *`max_skipped`* - frames skipped between two nearest rendered. +* *`pixels_per_frame`* - amount of pixels on frame (width * height). +* *`target_encode_bitrate`* - target encode bitrate provided by BWE to + encoder. +* *`actual_encode_bitrate -`* - actual encode bitrate produced by encoder. +* *`available_send_bandwidth -`* - available send bandwidth estimated by BWE. +* *`transmission_bitrate`* - bitrate of media in the emulated network, not + counting retransmissions FEC, and RTCP messages +* *`retransmission_bitrate`* - bitrate of retransmission streams only. + +### Framework stability + +* *`frames_in_flight`* - amount of frames that were captured but wasn't seen + on receiver. + +## Debug metrics + +Debug metrics are not reported to WebRTC perf results reporting system, but are +available through `DefaultVideoQualityAnalyzer` API. + +### [FrameCounters][12] + +Frame counters consist of next counters: + +* *`captured`* - count of frames, that were passed into WebRTC pipeline by + video stream source +* *`pre_encoded`* - count of frames that reached video encoder. +* *`encoded`* - count of encoded images that were produced by encoder for all + requested spatial layers and simulcast streams. +* *`received`* - count of encoded images received in decoder for all requested + spatial layers and simulcast streams. +* *`decoded`* - count of frames that were produced by decoder. +* *`rendered`* - count of frames that went out from WebRTC pipeline to video + sink. +* *`dropped`* - count of frames that were dropped in any point between + capturing and rendering. + +`DefaultVideoQualityAnalyzer` exports these frame counters: + +* *`GlobalCounters`* - frame counters for frames met on each stage of analysis + for all media streams. +* *`PerStreamCounters`* - frame counters for frames met on each stage of + analysis separated per individual video track (single media section in the + SDP offer). + +### [AnalyzerStats][13] + +Contains metrics about internal state of video analyzer during its work + +* *`comparisons_queue_size`* - size of analyzer internal queue used to perform + captured and rendered frames comparisons measured when new element is added + to the queue. +* *`comparisons_done`* - number of performed comparisons of 2 video frames + from captured and rendered streams. +* *`cpu_overloaded_comparisons_done`* - number of cpu overloaded comparisons. + Comparison is cpu overloaded if it is queued when there are too many not + processed comparisons in the queue. Overloaded comparison doesn't include + metrics like SSIM and PSNR that require heavy computations. +* *`memory_overloaded_comparisons_done`* - number of memory overloaded + comparisons. Comparison is memory overloaded if it is queued when its + captured frame was already removed due to high memory usage for that video + stream. +* *`frames_in_flight_left_count`* - count of frames in flight in analyzer + measured when new comparison is added and after analyzer was stopped. + +[1]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h;l=188;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/video_quality_analyzer_interface.h;l=56;drc=d7808f1c464a07c8f1e2f97ec7ee92fda998d590 +[3]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h;l=39;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[4]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video_codecs/video_encoder_factory.h;l=27;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[5]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video_codecs/video_decoder_factory.h;l=27;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[6]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video/video_frame.h;l=30;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[7]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video/encoded_image.h;l=71;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[8]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/test_video_capturer.h;l=28;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[9]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video/video_sink_interface.h;l=19;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[10]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/testsupport/perf_test.h;drc=0710b401b1e5b500b8e84946fb657656ba1b58b7 +[11]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/stats_observer_interface.h;l=21;drc=9b526180c9e9722d3fc7f8689da6ec094fc7fc0a +[12]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h;l=57;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[13]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/default_video_quality_analyzer.h;l=113;drc=08f46909a8735cf181b99ef2f7e1791c5a7531d2 +[14]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/encoded_image_data_injector.h;l=23;drc=c57089a97a3df454f4356d882cc8df173e8b3ead +[15]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/encoded_image_data_injector.h;l=46;drc=c57089a97a3df454f4356d882cc8df173e8b3ead +[16]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/analyzer/video/single_process_encoded_image_data_injector.h;l=40;drc=c57089a97a3df454f4356d882cc8df173e8b3ead diff --git a/test/pc/e2e/g3doc/g3doc.lua b/test/pc/e2e/g3doc/g3doc.lua new file mode 100644 index 0000000000..981393c826 --- /dev/null +++ b/test/pc/e2e/g3doc/g3doc.lua @@ -0,0 +1,5 @@ +config = super() + +config.freshness.owner = 'titovartem' + +return config diff --git a/test/pc/e2e/g3doc/in_test_psnr_plot.png b/test/pc/e2e/g3doc/in_test_psnr_plot.png new file mode 100644 index 0000000000..3f36725727 Binary files /dev/null and b/test/pc/e2e/g3doc/in_test_psnr_plot.png differ diff --git a/test/pc/e2e/g3doc/index.md b/test/pc/e2e/g3doc/index.md new file mode 100644 index 0000000000..d676476ddc --- /dev/null +++ b/test/pc/e2e/g3doc/index.md @@ -0,0 +1,223 @@ + + +# PeerConnection Level Framework + +## API + +* [Fixture][1] +* [Fixture factory function][2] + +## Documentation + +The PeerConnection level framework is designed for end-to-end media quality +testing through the PeerConnection level public API. The framework uses the +*Unified plan* API to generate offers/answers during the signaling phase. The +framework also wraps the video encoder/decoder and inject it into +*`webrtc::PeerConnection`* to measure video quality, performing 1:1 frames +matching between captured and rendered frames without any extra requirements to +input video. For audio quality evaluation the standard `GetStats()` API from +PeerConnection is used. + +The framework API is located in the namespace *`webrtc::webrtc_pc_e2e`*. + +### Supported features + +* Single or bidirectional media in the call +* RTC Event log dump per peer +* AEC dump per peer +* Compatible with *`webrtc::TimeController`* for both real and simulated time +* Media + * AV sync +* Video + * Any amount of video tracks both from caller and callee sides + * Input video from + * Video generator + * Specified file + * Any instance of *`webrtc::test::FrameGeneratorInterface`* + * Dumping of captured/rendered video into file + * Screen sharing + * Vp8 simulcast from caller side + * Vp9 SVC from caller side + * Choosing of video codec (name and parameters), having multiple codecs + negotiated to support codec-switching testing. + * FEC (ULP or Flex) + * Forced codec overshooting (for encoder overshoot emulation on some + mobile devices, when hardware encoder can overshoot target bitrate) +* Audio + * Up to 1 audio track both from caller and callee sides + * Generated audio + * Audio from specified file + * Dumping of captured/rendered audio into file + * Parameterizing of `cricket::AudioOptions` + * Echo emulation +* Injection of various WebRTC components into underlying + *`webrtc::PeerConnection`* or *`webrtc::PeerConnectionFactory`*. You can see + the full list [here][11] +* Scheduling of events, that can happen during the test, for example: + * Changes in network configuration + * User statistics measurements + * Custom defined actions +* User defined statistics reporting via + *`webrtc::webrtc_pc_e2e::PeerConnectionE2EQualityTestFixture::QualityMetricsReporter`* + interface + +## Exported metrics + +### General + +* *`_connected`* - peer successfully established connection to + remote side +* *`cpu_usage`* - CPU usage excluding video analyzer +* *`audio_ahead_ms`* - Used to estimate how much audio and video is out of + sync when the two tracks were from the same source. Stats are polled + periodically during a call. The metric represents how much earlier was audio + played out on average over the call. If, during a stats poll, video is + ahead, then audio_ahead_ms will be equal to 0 for this poll. +* *`video_ahead_ms`* - Used to estimate how much audio and video is out of + sync when the two tracks were from the same source. Stats are polled + periodically during a call. The metric represents how much earlier was video + played out on average over the call. If, during a stats poll, audio is + ahead, then video_ahead_ms will be equal to 0 for this poll. + +### Video + +See documentation for +[*`DefaultVideoQualityAnalyzer`*](default_video_quality_analyzer.md#exported-metrics) + +### Audio + +* *`accelerate_rate`* - when playout is sped up, this counter is increased by + the difference between the number of samples received and the number of + samples played out. If speedup is achieved by removing samples, this will be + the count of samples removed. Rate is calculated as difference between + nearby samples divided on sample interval. +* *`expand_rate`* - the total number of samples that are concealed samples + over time. A concealed sample is a sample that was replaced with synthesized + samples generated locally before being played out. Examples of samples that + have to be concealed are samples from lost packets or samples from packets + that arrive too late to be played out +* *`speech_expand_rate`* - the total number of samples that are concealed + samples minus the total number of concealed samples inserted that are + "silent" over time. Playing out silent samples results in silence or comfort + noise. +* *`preemptive_rate`* - when playout is slowed down, this counter is increased + by the difference between the number of samples received and the number of + samples played out. If playout is slowed down by inserting samples, this + will be the number of inserted samples. Rate is calculated as difference + between nearby samples divided on sample interval. +* *`average_jitter_buffer_delay_ms`* - average size of NetEQ jitter buffer. +* *`preferred_buffer_size_ms`* - preferred size of NetEQ jitter buffer. +* *`visqol_mos`* - proxy for audio quality itself. +* *`asdm_samples`* - measure of how much acceleration/deceleration was in the + signal. +* *`word_error_rate`* - measure of how intelligible the audio was (percent of + words that could not be recognized in output audio). + +### Network + +* *`bytes_sent`* - represents the total number of payload bytes sent on this + PeerConnection, i.e., not including headers or padding +* *`packets_sent`* - represents the total number of packets sent over this + PeerConnection’s transports. +* *`average_send_rate`* - average send rate calculated on bytes_sent divided + by test duration. +* *`payload_bytes_sent`* - total number of bytes sent for all SSRC plus total + number of RTP header and padding bytes sent for all SSRC. This does not + include the size of transport layer headers such as IP or UDP. +* *`sent_packets_loss`* - packets_sent minus corresponding packets_received. +* *`bytes_received`* - represents the total number of bytes received on this + PeerConnection, i.e., not including headers or padding. +* *`packets_received`* - represents the total number of packets received on + this PeerConnection’s transports. +* *`average_receive_rate`* - average receive rate calculated on bytes_received + divided by test duration. +* *`payload_bytes_received`* - total number of bytes received for all SSRC + plus total number of RTP header and padding bytes received for all SSRC. + This does not include the size of transport layer headers such as IP or UDP. + +### Framework stability + +* *`frames_in_flight`* - amount of frames that were captured but wasn't seen + on receiver in the way that also all frames after also weren't seen on + receiver. +* *`bytes_discarded_no_receiver`* - total number of bytes that were received + on network interfaces related to the peer, but destination port was closed. +* *`packets_discarded_no_receiver`* - total number of packets that were + received on network interfaces related to the peer, but destination port was + closed. + +## Examples + +Examples can be found in + +* [peer_connection_e2e_smoke_test.cc][3] +* [pc_full_stack_tests.cc][4] + +## Stats plotting + +### Description + +Stats plotting provides ability to plot statistic collected during the test. +Right now it is used in PeerConnection level framework and give ability to see +how video quality metrics changed during test execution. + +### Usage + +To make any metrics plottable you need: + +1. Collect metric data with [SamplesStatsCounter][5] which internally will + store all intermediate points and timestamps when these points were added. +2. Then you need to report collected data with + [`webrtc::test::PrintResult(...)`][6]. By using these method you will also + specify name of the plottable metric. + +After these steps it will be possible to export your metric for plotting. There +are several options how you can do this: + +1. Use [`webrtc::TestMain::Create()`][7] as `main` function implementation, for + example use [`test/test_main.cc`][8] as `main` function for your test. + + In such case your binary will have flag `--plot`, where you can provide a + list of metrics, that you want to plot or specify `all` to plot all + available metrics. + + If `--plot` is specified, the binary will output metrics data into `stdout`. + Then you need to pipe this `stdout` into python plotter script + [`rtc_tools/metrics_plotter.py`][9], which will plot data. + + Examples: + + ```shell + $ ./out/Default/test_support_unittests \ + --gtest_filter=PeerConnectionE2EQualityTestSmokeTest.Svc \ + --nologs \ + --plot=all \ + | python rtc_tools/metrics_plotter.py + ``` + + ```shell + $ ./out/Default/test_support_unittests \ + --gtest_filter=PeerConnectionE2EQualityTestSmokeTest.Svc \ + --nologs \ + --plot=psnr,ssim \ + | python rtc_tools/metrics_plotter.py + ``` + + Example chart: ![PSNR changes during the test](in_test_psnr_plot.png) + +2. Use API from [`test/testsupport/perf_test.h`][10] directly by invoking + `webrtc::test::PrintPlottableResults(const std::vector& + desired_graphs)` to print plottable metrics to stdout. Then as in previous + option you need to pipe result into plotter script. + +[1]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;drc=cbe6e8a2589a925d4c91a2ac2c69201f03de9c39 +[2]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/create_peerconnection_quality_test_fixture.h;drc=cbe6e8a2589a925d4c91a2ac2c69201f03de9c39 +[3]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/pc/e2e/peer_connection_e2e_smoke_test.cc;drc=cbe6e8a2589a925d4c91a2ac2c69201f03de9c39 +[4]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/video/pc_full_stack_tests.cc;drc=cbe6e8a2589a925d4c91a2ac2c69201f03de9c39 +[5]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/numerics/samples_stats_counter.h;drc=cbe6e8a2589a925d4c91a2ac2c69201f03de9c39 +[6]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/testsupport/perf_test.h;l=86;drc=0710b401b1e5b500b8e84946fb657656ba1b58b7 +[7]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/test_main_lib.h;l=23;drc=bcb42f1e4be136c390986a40d9d5cb3ad0de260b +[8]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/test_main.cc;drc=bcb42f1e4be136c390986a40d9d5cb3ad0de260b +[9]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/rtc_tools/metrics_plotter.py;drc=8cc6695652307929edfc877cd64b75cd9ec2d615 +[10]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/test/testsupport/perf_test.h;l=105;drc=0710b401b1e5b500b8e84946fb657656ba1b58b7 +[11]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/test/peerconnection_quality_test_fixture.h;l=272;drc=484acf27231d931dbc99aedce85bc27e06486b96 diff --git a/test/pc/e2e/g3doc/single_process_encoded_image_data_injector.png b/test/pc/e2e/g3doc/single_process_encoded_image_data_injector.png new file mode 100644 index 0000000000..73480bafbe Binary files /dev/null and b/test/pc/e2e/g3doc/single_process_encoded_image_data_injector.png differ diff --git a/test/pc/e2e/g3doc/video_quality_analyzer_pipeline.png b/test/pc/e2e/g3doc/video_quality_analyzer_pipeline.png new file mode 100644 index 0000000000..6cddb91110 Binary files /dev/null and b/test/pc/e2e/g3doc/video_quality_analyzer_pipeline.png differ diff --git a/test/pc/e2e/g3doc/vp8_simulcast_offer_modification.png b/test/pc/e2e/g3doc/vp8_simulcast_offer_modification.png new file mode 100644 index 0000000000..c7eaa04c0e Binary files /dev/null and b/test/pc/e2e/g3doc/vp8_simulcast_offer_modification.png differ diff --git a/test/pc/e2e/media/media_helper.cc b/test/pc/e2e/media/media_helper.cc index d3fa6ffe03..6b1996adaa 100644 --- a/test/pc/e2e/media/media_helper.cc +++ b/test/pc/e2e/media/media_helper.cc @@ -58,13 +58,13 @@ MediaHelper::MaybeAddVideo(TestPeer* peer) { std::unique_ptr capturer = CreateVideoCapturer( video_config, peer->ReleaseVideoSource(i), video_quality_analyzer_injection_helper_->CreateFramePreprocessor( - video_config)); + params->name.value(), video_config)); bool is_screencast = video_config.content_hint == VideoTrackInterface::ContentHint::kText || video_config.content_hint == VideoTrackInterface::ContentHint::kDetailed; rtc::scoped_refptr source = - new rtc::RefCountedObject( + rtc::make_ref_counted( std::move(capturer), is_screencast); out.push_back(source); RTC_LOG(INFO) << "Adding video with video_config.stream_label=" diff --git a/test/pc/e2e/media/media_helper.h b/test/pc/e2e/media/media_helper.h index 8b36646a0b..4e977e3002 100644 --- a/test/pc/e2e/media/media_helper.h +++ b/test/pc/e2e/media/media_helper.h @@ -28,8 +28,9 @@ class MediaHelper { public: MediaHelper(VideoQualityAnalyzerInjectionHelper* video_quality_analyzer_injection_helper, - TaskQueueFactory* task_queue_factory) - : clock_(Clock::GetRealTimeClock()), + TaskQueueFactory* task_queue_factory, + Clock* clock) + : clock_(clock), task_queue_factory_(task_queue_factory), video_quality_analyzer_injection_helper_( video_quality_analyzer_injection_helper) {} diff --git a/test/pc/e2e/network_quality_metrics_reporter.cc b/test/pc/e2e/network_quality_metrics_reporter.cc index 56f0337037..513bdc0a5f 100644 --- a/test/pc/e2e/network_quality_metrics_reporter.cc +++ b/test/pc/e2e/network_quality_metrics_reporter.cc @@ -11,7 +11,8 @@ #include -#include "api/stats_types.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtcstats_objects.h" #include "rtc_base/event.h" #include "system_wrappers/include/field_trial.h" #include "test/testsupport/perf_test.h" @@ -28,67 +29,75 @@ constexpr int kStatsWaitTimeoutMs = 1000; constexpr char kUseStandardBytesStats[] = "WebRTC-UseStandardBytesStats"; } -void NetworkQualityMetricsReporter::Start(absl::string_view test_case_name) { +void NetworkQualityMetricsReporter::Start( + absl::string_view test_case_name, + const TrackIdStreamInfoMap* /*reporter_helper*/) { test_case_name_ = std::string(test_case_name); // Check that network stats are clean before test execution. - EmulatedNetworkStats alice_stats = PopulateStats(alice_network_); - RTC_CHECK_EQ(alice_stats.packets_sent, 0); - RTC_CHECK_EQ(alice_stats.packets_received, 0); - EmulatedNetworkStats bob_stats = PopulateStats(bob_network_); - RTC_CHECK_EQ(bob_stats.packets_sent, 0); - RTC_CHECK_EQ(bob_stats.packets_received, 0); + std::unique_ptr alice_stats = + PopulateStats(alice_network_); + RTC_CHECK_EQ(alice_stats->PacketsSent(), 0); + RTC_CHECK_EQ(alice_stats->PacketsReceived(), 0); + std::unique_ptr bob_stats = PopulateStats(bob_network_); + RTC_CHECK_EQ(bob_stats->PacketsSent(), 0); + RTC_CHECK_EQ(bob_stats->PacketsReceived(), 0); } void NetworkQualityMetricsReporter::OnStatsReports( - const std::string& pc_label, - const StatsReports& reports) { - rtc::CritScope cs(&lock_); - int64_t payload_bytes_received = 0; - int64_t payload_bytes_sent = 0; - for (const StatsReport* report : reports) { - if (report->type() == StatsReport::kStatsReportTypeSsrc) { - const auto* received = - report->FindValue(StatsReport::kStatsValueNameBytesReceived); - if (received) { - payload_bytes_received += received->int64_val(); - } - const auto* sent = - report->FindValue(StatsReport::kStatsValueNameBytesSent); - if (sent) { - payload_bytes_sent += sent->int64_val(); - } - } + absl::string_view pc_label, + const rtc::scoped_refptr& report) { + DataSize payload_received = DataSize::Zero(); + DataSize payload_sent = DataSize::Zero(); + + auto inbound_stats = report->GetStatsOfType(); + for (const auto& stat : inbound_stats) { + payload_received += + DataSize::Bytes(stat->bytes_received.ValueOrDefault(0ul) + + stat->header_bytes_received.ValueOrDefault(0ul)); } - PCStats& stats = pc_stats_[pc_label]; - stats.payload_bytes_received = payload_bytes_received; - stats.payload_bytes_sent = payload_bytes_sent; + + auto outbound_stats = report->GetStatsOfType(); + for (const auto& stat : outbound_stats) { + payload_sent += + DataSize::Bytes(stat->bytes_sent.ValueOrDefault(0ul) + + stat->header_bytes_sent.ValueOrDefault(0ul)); + } + + MutexLock lock(&lock_); + PCStats& stats = pc_stats_[std::string(pc_label)]; + stats.payload_received = payload_received; + stats.payload_sent = payload_sent; } void NetworkQualityMetricsReporter::StopAndReportResults() { - EmulatedNetworkStats alice_stats = PopulateStats(alice_network_); - EmulatedNetworkStats bob_stats = PopulateStats(bob_network_); - ReportStats("alice", alice_stats, - alice_stats.packets_sent - bob_stats.packets_received); - ReportStats("bob", bob_stats, - bob_stats.packets_sent - alice_stats.packets_received); + std::unique_ptr alice_stats = + PopulateStats(alice_network_); + std::unique_ptr bob_stats = PopulateStats(bob_network_); + int64_t alice_packets_loss = + alice_stats->PacketsSent() - bob_stats->PacketsReceived(); + int64_t bob_packets_loss = + bob_stats->PacketsSent() - alice_stats->PacketsReceived(); + ReportStats("alice", std::move(alice_stats), alice_packets_loss); + ReportStats("bob", std::move(bob_stats), bob_packets_loss); if (!webrtc::field_trial::IsEnabled(kUseStandardBytesStats)) { RTC_LOG(LS_ERROR) << "Non-standard GetStats; \"payload\" counts include RTP headers"; } - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); for (const auto& pair : pc_stats_) { ReportPCStats(pair.first, pair.second); } } -EmulatedNetworkStats NetworkQualityMetricsReporter::PopulateStats( +std::unique_ptr +NetworkQualityMetricsReporter::PopulateStats( EmulatedNetworkManagerInterface* network) { rtc::Event wait; - EmulatedNetworkStats stats; - network->GetStats([&](const EmulatedNetworkStats& s) { - stats = s; + std::unique_ptr stats; + network->GetStats([&](std::unique_ptr s) { + stats = std::move(s); wait.Set(); }); bool stats_received = wait.Wait(kStatsWaitTimeoutMs); @@ -98,26 +107,26 @@ EmulatedNetworkStats NetworkQualityMetricsReporter::PopulateStats( void NetworkQualityMetricsReporter::ReportStats( const std::string& network_label, - const EmulatedNetworkStats& stats, + std::unique_ptr stats, int64_t packet_loss) { - ReportResult("bytes_sent", network_label, stats.bytes_sent.bytes(), + ReportResult("bytes_sent", network_label, stats->BytesSent().bytes(), "sizeInBytes"); - ReportResult("packets_sent", network_label, stats.packets_sent, "unitless"); + ReportResult("packets_sent", network_label, stats->PacketsSent(), "unitless"); ReportResult( "average_send_rate", network_label, - stats.packets_sent >= 2 ? stats.AverageSendRate().bytes_per_sec() : 0, + stats->PacketsSent() >= 2 ? stats->AverageSendRate().bytes_per_sec() : 0, "bytesPerSecond"); - ReportResult("bytes_dropped", network_label, stats.bytes_dropped.bytes(), + ReportResult("bytes_discarded_no_receiver", network_label, + stats->BytesDropped().bytes(), "sizeInBytes"); + ReportResult("packets_discarded_no_receiver", network_label, + stats->PacketsDropped(), "unitless"); + ReportResult("bytes_received", network_label, stats->BytesReceived().bytes(), "sizeInBytes"); - ReportResult("packets_dropped", network_label, stats.packets_dropped, - "unitless"); - ReportResult("bytes_received", network_label, stats.bytes_received.bytes(), - "sizeInBytes"); - ReportResult("packets_received", network_label, stats.packets_received, + ReportResult("packets_received", network_label, stats->PacketsReceived(), "unitless"); ReportResult("average_receive_rate", network_label, - stats.packets_received >= 2 - ? stats.AverageReceiveRate().bytes_per_sec() + stats->PacketsReceived() >= 2 + ? stats->AverageReceiveRate().bytes_per_sec() : 0, "bytesPerSecond"); ReportResult("sent_packets_loss", network_label, packet_loss, "unitless"); @@ -125,9 +134,9 @@ void NetworkQualityMetricsReporter::ReportStats( void NetworkQualityMetricsReporter::ReportPCStats(const std::string& pc_label, const PCStats& stats) { - ReportResult("payload_bytes_received", pc_label, stats.payload_bytes_received, - "sizeInBytes"); - ReportResult("payload_bytes_sent", pc_label, stats.payload_bytes_sent, + ReportResult("payload_bytes_received", pc_label, + stats.payload_received.bytes(), "sizeInBytes"); + ReportResult("payload_bytes_sent", pc_label, stats.payload_sent.bytes(), "sizeInBytes"); } diff --git a/test/pc/e2e/network_quality_metrics_reporter.h b/test/pc/e2e/network_quality_metrics_reporter.h index 6454f17526..50c36234a5 100644 --- a/test/pc/e2e/network_quality_metrics_reporter.h +++ b/test/pc/e2e/network_quality_metrics_reporter.h @@ -11,11 +11,15 @@ #ifndef TEST_PC_E2E_NETWORK_QUALITY_METRICS_REPORTER_H_ #define TEST_PC_E2E_NETWORK_QUALITY_METRICS_REPORTER_H_ +#include #include +#include "absl/strings/string_view.h" #include "api/test/network_emulation_manager.h" #include "api/test/peerconnection_quality_test_fixture.h" -#include "rtc_base/critical_section.h" +#include "api/test/track_id_stream_info_map.h" +#include "api/units/data_size.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { namespace webrtc_pc_e2e { @@ -29,23 +33,25 @@ class NetworkQualityMetricsReporter ~NetworkQualityMetricsReporter() override = default; // Network stats must be empty when this method will be invoked. - void Start(absl::string_view test_case_name) override; - void OnStatsReports(const std::string& pc_label, - const StatsReports& reports) override; + void Start(absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) override; + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override; void StopAndReportResults() override; private: struct PCStats { // TODO(nisse): Separate audio and video counters. Depends on standard stat // counters, enabled by field trial "WebRTC-UseStandardBytesStats". - int64_t payload_bytes_received = 0; - int64_t payload_bytes_sent = 0; + DataSize payload_received = DataSize::Zero(); + DataSize payload_sent = DataSize::Zero(); }; - static EmulatedNetworkStats PopulateStats( + static std::unique_ptr PopulateStats( EmulatedNetworkManagerInterface* network); void ReportStats(const std::string& network_label, - const EmulatedNetworkStats& stats, + std::unique_ptr stats, int64_t packet_loss); void ReportPCStats(const std::string& pc_label, const PCStats& stats); void ReportResult(const std::string& metric_name, @@ -58,7 +64,7 @@ class NetworkQualityMetricsReporter EmulatedNetworkManagerInterface* alice_network_; EmulatedNetworkManagerInterface* bob_network_; - rtc::CriticalSection lock_; + Mutex lock_; std::map pc_stats_ RTC_GUARDED_BY(lock_); }; diff --git a/test/pc/e2e/peer_configurer.cc b/test/pc/e2e/peer_configurer.cc index d1d5b7f8d7..18570c2c6b 100644 --- a/test/pc/e2e/peer_configurer.cc +++ b/test/pc/e2e/peer_configurer.cc @@ -107,6 +107,8 @@ void ValidateParams( std::set peer_names; std::set video_labels; std::set audio_labels; + std::set video_sync_groups; + std::set audio_sync_groups; int media_streams_count = 0; for (size_t i = 0; i < peers.size(); ++i) { @@ -123,7 +125,8 @@ void ValidateParams( } media_streams_count += p->video_configs.size(); - // Validate that all video stream labels are unique. + // Validate that all video stream labels are unique and sync groups are + // valid. for (const VideoConfig& video_config : p->video_configs) { RTC_CHECK(video_config.stream_label); bool inserted = @@ -131,6 +134,26 @@ void ValidateParams( RTC_CHECK(inserted) << "Duplicate video_config.stream_label=" << video_config.stream_label.value(); + if (video_config.input_dump_file_name.has_value()) { + RTC_CHECK_GT(video_config.input_dump_sampling_modulo, 0) + << "video_config.input_dump_sampling_modulo must be greater than 0"; + } + if (video_config.output_dump_file_name.has_value()) { + RTC_CHECK_GT(video_config.output_dump_sampling_modulo, 0) + << "video_config.input_dump_sampling_modulo must be greater than 0"; + } + + // TODO(bugs.webrtc.org/4762): remove this check after synchronization of + // more than two streams is supported. + if (video_config.sync_group.has_value()) { + bool sync_group_inserted = + video_sync_groups.insert(video_config.sync_group.value()).second; + RTC_CHECK(sync_group_inserted) + << "Sync group shouldn't consist of more than two streams (one " + "video and one audio). Duplicate video_config.sync_group=" + << video_config.sync_group.value(); + } + if (video_config.simulcast_config) { if (video_config.simulcast_config->target_spatial_index) { RTC_CHECK_GE(*video_config.simulcast_config->target_spatial_index, 0); @@ -158,6 +181,17 @@ void ValidateParams( audio_labels.insert(p->audio_config->stream_label.value()).second; RTC_CHECK(inserted) << "Duplicate audio_config.stream_label=" << p->audio_config->stream_label.value(); + // TODO(bugs.webrtc.org/4762): remove this check after synchronization of + // more than two streams is supported. + if (p->audio_config->sync_group.has_value()) { + bool sync_group_inserted = + audio_sync_groups.insert(p->audio_config->sync_group.value()) + .second; + RTC_CHECK(sync_group_inserted) + << "Sync group shouldn't consist of more than two streams (one " + "video and one audio). Duplicate audio_config.sync_group=" + << p->audio_config->sync_group.value(); + } // Check that if mode input file name specified only if mode is kFile. if (p->audio_config.value().mode == AudioConfig::Mode::kGenerated) { RTC_CHECK(!p->audio_config.value().input_file_name); diff --git a/test/pc/e2e/peer_configurer.h b/test/pc/e2e/peer_configurer.h index 010ddcee82..422d3d7341 100644 --- a/test/pc/e2e/peer_configurer.h +++ b/test/pc/e2e/peer_configurer.h @@ -23,7 +23,6 @@ #include "api/task_queue/task_queue_factory.h" #include "api/test/create_peer_connection_quality_test_frame_generator.h" #include "api/test/peerconnection_quality_test_fixture.h" -#include "api/transport/media/media_transport_interface.h" #include "api/transport/network_control.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" @@ -86,12 +85,6 @@ class PeerConfigurerImpl final std::move(network_controller_factory); return this; } - PeerConfigurer* SetMediaTransportFactory( - std::unique_ptr media_transport_factory) override { - components_->pcf_dependencies->media_transport_factory = - std::move(media_transport_factory); - return this; - } PeerConfigurer* SetVideoEncoderFactory( std::unique_ptr video_encoder_factory) override { components_->pcf_dependencies->video_encoder_factory = @@ -170,9 +163,9 @@ class PeerConfigurerImpl final params_->rtc_configuration = std::move(configuration); return this; } - PeerConfigurer* SetBitrateParameters( - PeerConnectionInterface::BitrateParameters bitrate_params) override { - params_->bitrate_params = bitrate_params; + PeerConfigurer* SetBitrateSettings( + BitrateSettings bitrate_settings) override { + params_->bitrate_settings = bitrate_settings; return this; } diff --git a/test/pc/e2e/peer_connection_e2e_smoke_test.cc b/test/pc/e2e/peer_connection_e2e_smoke_test.cc index 8080d4bb0a..10f62835a9 100644 --- a/test/pc/e2e/peer_connection_e2e_smoke_test.cc +++ b/test/pc/e2e/peer_connection_e2e_smoke_test.cc @@ -23,7 +23,7 @@ #include "test/gtest.h" #include "test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h" #include "test/pc/e2e/analyzer/video/default_video_quality_analyzer.h" -#include "test/pc/e2e/network_quality_metrics_reporter.h" +#include "test/pc/e2e/stats_based_network_quality_metrics_reporter.h" #include "test/testsupport/file_utils.h" namespace webrtc { @@ -46,88 +46,83 @@ class PeerConnectionE2EQualityTestSmokeTest : public ::testing::Test { using EchoEmulationConfig = PeerConnectionE2EQualityTestFixture::EchoEmulationConfig; - void RunTest(const std::string& test_case_name, - const RunParams& run_params, - rtc::FunctionView alice_configurer, - rtc::FunctionView bob_configurer) { - // Setup emulated network - std::unique_ptr network_emulation_manager = - CreateNetworkEmulationManager(); - - auto alice_network_behavior = - std::make_unique(BuiltInNetworkBehaviorConfig()); - SimulatedNetwork* alice_network_behavior_ptr = alice_network_behavior.get(); - EmulatedNetworkNode* alice_node = - network_emulation_manager->CreateEmulatedNode( - std::move(alice_network_behavior)); - EmulatedNetworkNode* bob_node = - network_emulation_manager->CreateEmulatedNode( - std::make_unique(BuiltInNetworkBehaviorConfig())); - auto* alice_endpoint = - network_emulation_manager->CreateEndpoint(EmulatedEndpointConfig()); + void SetUp() override { + network_emulation_ = CreateNetworkEmulationManager(); + auto video_quality_analyzer = std::make_unique( + network_emulation_->time_controller()->GetClock()); + video_quality_analyzer_ = video_quality_analyzer.get(); + fixture_ = CreatePeerConnectionE2EQualityTestFixture( + testing::UnitTest::GetInstance()->current_test_info()->name(), + *network_emulation_->time_controller(), + /*audio_quality_analyzer=*/nullptr, std::move(video_quality_analyzer)); + test::ScopedFieldTrials field_trials( + std::string(field_trial::GetFieldTrialString()) + + "WebRTC-UseStandardBytesStats/Enabled/"); + } + + std::pair + CreateNetwork() { + EmulatedNetworkNode* alice_node = network_emulation_->CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + EmulatedNetworkNode* bob_node = network_emulation_->CreateEmulatedNode( + std::make_unique(BuiltInNetworkBehaviorConfig())); + + EmulatedEndpoint* alice_endpoint = + network_emulation_->CreateEndpoint(EmulatedEndpointConfig()); EmulatedEndpoint* bob_endpoint = - network_emulation_manager->CreateEndpoint(EmulatedEndpointConfig()); - network_emulation_manager->CreateRoute(alice_endpoint, {alice_node}, - bob_endpoint); - network_emulation_manager->CreateRoute(bob_endpoint, {bob_node}, - alice_endpoint); - - // Create analyzers. - std::unique_ptr video_quality_analyzer = - std::make_unique(); - // This is only done for the sake of smoke testing. In general there should - // be no need to explicitly pull data from analyzers after the run. - auto* video_analyzer_ptr = - static_cast(video_quality_analyzer.get()); - - auto fixture = CreatePeerConnectionE2EQualityTestFixture( - test_case_name, /*audio_quality_analyzer=*/nullptr, - std::move(video_quality_analyzer)); - fixture->ExecuteAt(TimeDelta::Seconds(2), - [alice_network_behavior_ptr](TimeDelta) { - BuiltInNetworkBehaviorConfig config; - config.loss_percent = 5; - alice_network_behavior_ptr->SetConfig(config); - }); - - // Setup components. We need to provide rtc::NetworkManager compatible with - // emulated network layer. + network_emulation_->CreateEndpoint(EmulatedEndpointConfig()); + + network_emulation_->CreateRoute(alice_endpoint, {alice_node}, bob_endpoint); + network_emulation_->CreateRoute(bob_endpoint, {bob_node}, alice_endpoint); + EmulatedNetworkManagerInterface* alice_network = - network_emulation_manager->CreateEmulatedNetworkManagerInterface( + network_emulation_->CreateEmulatedNetworkManagerInterface( {alice_endpoint}); EmulatedNetworkManagerInterface* bob_network = - network_emulation_manager->CreateEmulatedNetworkManagerInterface( + network_emulation_->CreateEmulatedNetworkManagerInterface( {bob_endpoint}); - fixture->AddPeer(alice_network->network_thread(), - alice_network->network_manager(), alice_configurer); - fixture->AddPeer(bob_network->network_thread(), - bob_network->network_manager(), bob_configurer); - fixture->AddQualityMetricsReporter( - std::make_unique(alice_network, - bob_network)); + return std::make_pair(alice_network, bob_network); + } - fixture->Run(run_params); + void AddPeer(EmulatedNetworkManagerInterface* network, + rtc::FunctionView configurer) { + fixture_->AddPeer(network->network_thread(), network->network_manager(), + configurer); + } - EXPECT_GE(fixture->GetRealTestDuration(), run_params.run_duration); - for (auto stream_label : video_analyzer_ptr->GetKnownVideoStreams()) { + void RunAndCheckEachVideoStreamReceivedFrames(const RunParams& run_params) { + fixture_->Run(run_params); + + EXPECT_GE(fixture_->GetRealTestDuration(), run_params.run_duration); + for (auto stream_key : video_quality_analyzer_->GetKnownVideoStreams()) { FrameCounters stream_conters = - video_analyzer_ptr->GetPerStreamCounters().at(stream_label); + video_quality_analyzer_->GetPerStreamCounters().at(stream_key); // On some devices the pipeline can be too slow, so we actually can't // force real constraints here. Lets just check, that at least 1 // frame passed whole pipeline. - int64_t expected_min_fps = run_params.run_duration.seconds() * 30; - EXPECT_GE(stream_conters.captured, expected_min_fps); - EXPECT_GE(stream_conters.pre_encoded, 1); - EXPECT_GE(stream_conters.encoded, 1); - EXPECT_GE(stream_conters.received, 1); - EXPECT_GE(stream_conters.decoded, 1); - EXPECT_GE(stream_conters.rendered, 1); + int64_t expected_min_fps = run_params.run_duration.seconds() * 15; + EXPECT_GE(stream_conters.captured, expected_min_fps) + << stream_key.ToString(); + EXPECT_GE(stream_conters.pre_encoded, 1) << stream_key.ToString(); + EXPECT_GE(stream_conters.encoded, 1) << stream_key.ToString(); + EXPECT_GE(stream_conters.received, 1) << stream_key.ToString(); + EXPECT_GE(stream_conters.decoded, 1) << stream_key.ToString(); + EXPECT_GE(stream_conters.rendered, 1) << stream_key.ToString(); } } -}; -} // namespace + NetworkEmulationManager* network_emulation() { + return network_emulation_.get(); + } + + PeerConnectionE2EQualityTestFixture* fixture() { return fixture_.get(); } + + private: + std::unique_ptr network_emulation_; + DefaultVideoQualityAnalyzer* video_quality_analyzer_; + std::unique_ptr fixture_; +}; // IOS debug builds can be quite slow, disabling to avoid issues with timeouts. #if defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_ARM64) && !defined(NDEBUG) @@ -136,41 +131,129 @@ class PeerConnectionE2EQualityTestSmokeTest : public ::testing::Test { #define MAYBE_Smoke Smoke #endif TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Smoke) { + std::pair + network_links = CreateNetwork(); + AddPeer(network_links.first, [](PeerConfigurer* alice) { + VideoConfig video(160, 120, 15); + video.stream_label = "alice-video"; + video.sync_group = "alice-media"; + alice->AddVideoConfig(std::move(video)); + + AudioConfig audio; + audio.stream_label = "alice-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); + audio.sampling_frequency_in_hz = 48000; + audio.sync_group = "alice-media"; + alice->SetAudioConfig(std::move(audio)); + }); + AddPeer(network_links.second, [](PeerConfigurer* charlie) { + charlie->SetName("charlie"); + VideoConfig video(160, 120, 15); + video.stream_label = "charlie-video"; + video.temporal_layers_count = 2; + charlie->AddVideoConfig(std::move(video)); + + AudioConfig audio; + audio.stream_label = "charlie-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); + charlie->SetAudioConfig(std::move(audio)); + }); + fixture()->AddQualityMetricsReporter( + std::make_unique( + std::map>( + {{"alice", network_links.first->endpoints()}, + {"charlie", network_links.second->endpoints()}}), + network_emulation())); RunParams run_params(TimeDelta::Seconds(2)); run_params.video_codecs = { VideoCodecConfig(cricket::kVp9CodecName, {{"profile-id", "0"}})}; run_params.use_flex_fec = true; run_params.use_ulp_fec = true; run_params.video_encoder_bitrate_multiplier = 1.1; - test::ScopedFieldTrials field_trials( - std::string(field_trial::GetFieldTrialString()) + - "WebRTC-UseStandardBytesStats/Enabled/"); - RunTest( - "smoke", run_params, - [](PeerConfigurer* alice) { - VideoConfig video(640, 360, 30); - video.stream_label = "alice-video"; - video.sync_group = "alice-media"; - alice->AddVideoConfig(std::move(video)); - - AudioConfig audio; - audio.stream_label = "alice-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); - audio.sampling_frequency_in_hz = 48000; - audio.sync_group = "alice-media"; - alice->SetAudioConfig(std::move(audio)); - }, - [](PeerConfigurer* charlie) { - charlie->SetName("charlie"); - VideoConfig video(640, 360, 30); - video.stream_label = "charlie-video"; - video.temporal_layers_count = 2; - charlie->AddVideoConfig(std::move(video)); - - VideoConfig screenshare(640, 360, 30); - screenshare.stream_label = "charlie-screenshare"; + RunAndCheckEachVideoStreamReceivedFrames(run_params); +} + +// IOS debug builds can be quite slow, disabling to avoid issues with timeouts. +#if defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_ARM64) && !defined(NDEBUG) +#define MAYBE_ChangeNetworkConditions DISABLED_ChangeNetworkConditions +#else +#define MAYBE_ChangeNetworkConditions ChangeNetworkConditions +#endif +TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_ChangeNetworkConditions) { + NetworkEmulationManager::SimulatedNetworkNode alice_node = + network_emulation() + ->NodeBuilder() + .config(BuiltInNetworkBehaviorConfig()) + .Build(); + NetworkEmulationManager::SimulatedNetworkNode bob_node = + network_emulation() + ->NodeBuilder() + .config(BuiltInNetworkBehaviorConfig()) + .Build(); + + EmulatedEndpoint* alice_endpoint = + network_emulation()->CreateEndpoint(EmulatedEndpointConfig()); + EmulatedEndpoint* bob_endpoint = + network_emulation()->CreateEndpoint(EmulatedEndpointConfig()); + + network_emulation()->CreateRoute(alice_endpoint, {alice_node.node}, + bob_endpoint); + network_emulation()->CreateRoute(bob_endpoint, {bob_node.node}, + alice_endpoint); + + EmulatedNetworkManagerInterface* alice_network = + network_emulation()->CreateEmulatedNetworkManagerInterface( + {alice_endpoint}); + EmulatedNetworkManagerInterface* bob_network = + network_emulation()->CreateEmulatedNetworkManagerInterface( + {bob_endpoint}); + + AddPeer(alice_network, [](PeerConfigurer* alice) { + VideoConfig video(160, 120, 15); + video.stream_label = "alice-video"; + video.sync_group = "alice-media"; + alice->AddVideoConfig(std::move(video)); + }); + AddPeer(bob_network, [](PeerConfigurer* bob) {}); + fixture()->AddQualityMetricsReporter( + std::make_unique( + std::map>( + {{"alice", alice_network->endpoints()}, + {"bob", bob_network->endpoints()}}), + network_emulation())); + + fixture()->ExecuteAt(TimeDelta::Seconds(1), [alice_node](TimeDelta) { + BuiltInNetworkBehaviorConfig config; + config.loss_percent = 5; + alice_node.simulation->SetConfig(config); + }); + + RunParams run_params(TimeDelta::Seconds(2)); + run_params.video_codecs = { + VideoCodecConfig(cricket::kVp9CodecName, {{"profile-id", "0"}})}; + run_params.use_flex_fec = true; + run_params.use_ulp_fec = true; + run_params.video_encoder_bitrate_multiplier = 1.1; + RunAndCheckEachVideoStreamReceivedFrames(run_params); +} + +// IOS debug builds can be quite slow, disabling to avoid issues with timeouts. +#if defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_ARM64) && !defined(NDEBUG) +#define MAYBE_Screenshare DISABLED_Screenshare +#else +#define MAYBE_Screenshare Screenshare +#endif +TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Screenshare) { + std::pair + network_links = CreateNetwork(); + AddPeer( + network_links.first, [](PeerConfigurer* alice) { + VideoConfig screenshare(320, 180, 30); + screenshare.stream_label = "alice-screenshare"; screenshare.content_hint = VideoTrackInterface::ContentHint::kText; ScreenShareConfig screen_share_config = ScreenShareConfig(TimeDelta::Seconds(2)); @@ -178,16 +261,11 @@ TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Smoke) { TimeDelta::Millis(1800), kDefaultSlidesWidth, kDefaultSlidesHeight); auto screen_share_frame_generator = CreateScreenShareFrameGenerator(screenshare, screen_share_config); - charlie->AddVideoConfig(std::move(screenshare), - std::move(screen_share_frame_generator)); - - AudioConfig audio; - audio.stream_label = "charlie-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); - charlie->SetAudioConfig(std::move(audio)); + alice->AddVideoConfig(std::move(screenshare), + std::move(screen_share_frame_generator)); }); + AddPeer(network_links.second, [](PeerConfigurer* bob) {}); + RunAndCheckEachVideoStreamReceivedFrames(RunParams(TimeDelta::Seconds(2))); } // IOS debug builds can be quite slow, disabling to avoid issues with timeouts. @@ -197,27 +275,28 @@ TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Smoke) { #define MAYBE_Echo Echo #endif TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Echo) { + std::pair + network_links = CreateNetwork(); + AddPeer(network_links.first, [](PeerConfigurer* alice) { + AudioConfig audio; + audio.stream_label = "alice-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); + audio.sampling_frequency_in_hz = 48000; + alice->SetAudioConfig(std::move(audio)); + }); + AddPeer(network_links.second, [](PeerConfigurer* bob) { + AudioConfig audio; + audio.stream_label = "bob-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); + bob->SetAudioConfig(std::move(audio)); + }); RunParams run_params(TimeDelta::Seconds(2)); run_params.echo_emulation_config = EchoEmulationConfig(); - RunTest( - "smoke", run_params, - [](PeerConfigurer* alice) { - AudioConfig audio; - audio.stream_label = "alice-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); - audio.sampling_frequency_in_hz = 48000; - alice->SetAudioConfig(std::move(audio)); - }, - [](PeerConfigurer* bob) { - AudioConfig audio; - audio.stream_label = "bob-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); - bob->SetAudioConfig(std::move(audio)); - }); + RunAndCheckEachVideoStreamReceivedFrames(run_params); } // IOS debug builds can be quite slow, disabling to avoid issues with timeouts. @@ -227,35 +306,25 @@ TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Echo) { #define MAYBE_Simulcast Simulcast #endif TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Simulcast) { + std::pair + network_links = CreateNetwork(); + AddPeer(network_links.first, [](PeerConfigurer* alice) { + VideoConfig simulcast(1280, 720, 15); + simulcast.stream_label = "alice-simulcast"; + simulcast.simulcast_config = VideoSimulcastConfig(2, 0); + alice->AddVideoConfig(std::move(simulcast)); + + AudioConfig audio; + audio.stream_label = "alice-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); + alice->SetAudioConfig(std::move(audio)); + }); + AddPeer(network_links.second, [](PeerConfigurer* bob) {}); RunParams run_params(TimeDelta::Seconds(2)); run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)}; - RunTest( - "simulcast", run_params, - [](PeerConfigurer* alice) { - VideoConfig simulcast(1280, 720, 30); - simulcast.stream_label = "alice-simulcast"; - simulcast.simulcast_config = VideoSimulcastConfig(3, 0); - alice->AddVideoConfig(std::move(simulcast)); - - AudioConfig audio; - audio.stream_label = "alice-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); - alice->SetAudioConfig(std::move(audio)); - }, - [](PeerConfigurer* bob) { - VideoConfig video(640, 360, 30); - video.stream_label = "bob-video"; - bob->AddVideoConfig(std::move(video)); - - AudioConfig audio; - audio.stream_label = "bob-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); - bob->SetAudioConfig(std::move(audio)); - }); + RunAndCheckEachVideoStreamReceivedFrames(run_params); } // IOS debug builds can be quite slow, disabling to avoid issues with timeouts. @@ -265,37 +334,27 @@ TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Simulcast) { #define MAYBE_Svc Svc #endif TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Svc) { + std::pair + network_links = CreateNetwork(); + AddPeer(network_links.first, [](PeerConfigurer* alice) { + VideoConfig simulcast(1280, 720, 15); + simulcast.stream_label = "alice-svc"; + // Because we have network with packets loss we can analyze only the + // highest spatial layer in SVC mode. + simulcast.simulcast_config = VideoSimulcastConfig(2, 1); + alice->AddVideoConfig(std::move(simulcast)); + + AudioConfig audio; + audio.stream_label = "alice-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); + alice->SetAudioConfig(std::move(audio)); + }); + AddPeer(network_links.second, [](PeerConfigurer* bob) {}); RunParams run_params(TimeDelta::Seconds(2)); run_params.video_codecs = {VideoCodecConfig(cricket::kVp9CodecName)}; - RunTest( - "simulcast", run_params, - [](PeerConfigurer* alice) { - VideoConfig simulcast(1280, 720, 30); - simulcast.stream_label = "alice-svc"; - // Because we have network with packets loss we can analyze only the - // highest spatial layer in SVC mode. - simulcast.simulcast_config = VideoSimulcastConfig(3, 2); - alice->AddVideoConfig(std::move(simulcast)); - - AudioConfig audio; - audio.stream_label = "alice-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); - alice->SetAudioConfig(std::move(audio)); - }, - [](PeerConfigurer* bob) { - VideoConfig video(640, 360, 30); - video.stream_label = "bob-video"; - bob->AddVideoConfig(std::move(video)); - - AudioConfig audio; - audio.stream_label = "bob-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); - bob->SetAudioConfig(std::move(audio)); - }); + RunAndCheckEachVideoStreamReceivedFrames(run_params); } // IOS debug builds can be quite slow, disabling to avoid issues with timeouts. @@ -305,50 +364,34 @@ TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_Svc) { #define MAYBE_HighBitrate HighBitrate #endif TEST_F(PeerConnectionE2EQualityTestSmokeTest, MAYBE_HighBitrate) { + std::pair + network_links = CreateNetwork(); + AddPeer(network_links.first, [](PeerConfigurer* alice) { + BitrateSettings bitrate_settings; + bitrate_settings.start_bitrate_bps = 3'000'000; + bitrate_settings.max_bitrate_bps = 3'000'000; + alice->SetBitrateSettings(bitrate_settings); + VideoConfig video(800, 600, 15); + video.stream_label = "alice-video"; + video.min_encode_bitrate_bps = 500'000; + video.max_encode_bitrate_bps = 3'000'000; + alice->AddVideoConfig(std::move(video)); + + AudioConfig audio; + audio.stream_label = "alice-audio"; + audio.mode = AudioConfig::Mode::kFile; + audio.input_file_name = + test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); + audio.sampling_frequency_in_hz = 48000; + alice->SetAudioConfig(std::move(audio)); + }); + AddPeer(network_links.second, [](PeerConfigurer* bob) {}); RunParams run_params(TimeDelta::Seconds(2)); run_params.video_codecs = { VideoCodecConfig(cricket::kVp9CodecName, {{"profile-id", "0"}})}; - - RunTest( - "smoke", run_params, - [](PeerConfigurer* alice) { - PeerConnectionInterface::BitrateParameters bitrate_params; - bitrate_params.current_bitrate_bps = 3'000'000; - bitrate_params.max_bitrate_bps = 3'000'000; - alice->SetBitrateParameters(bitrate_params); - VideoConfig video(800, 600, 30); - video.stream_label = "alice-video"; - video.min_encode_bitrate_bps = 500'000; - video.max_encode_bitrate_bps = 3'000'000; - alice->AddVideoConfig(std::move(video)); - - AudioConfig audio; - audio.stream_label = "alice-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_alice_source", "wav"); - audio.sampling_frequency_in_hz = 48000; - alice->SetAudioConfig(std::move(audio)); - }, - [](PeerConfigurer* bob) { - PeerConnectionInterface::BitrateParameters bitrate_params; - bitrate_params.current_bitrate_bps = 3'000'000; - bitrate_params.max_bitrate_bps = 3'000'000; - bob->SetBitrateParameters(bitrate_params); - VideoConfig video(800, 600, 30); - video.stream_label = "bob-video"; - video.min_encode_bitrate_bps = 500'000; - video.max_encode_bitrate_bps = 3'000'000; - bob->AddVideoConfig(std::move(video)); - - AudioConfig audio; - audio.stream_label = "bob-audio"; - audio.mode = AudioConfig::Mode::kFile; - audio.input_file_name = - test::ResourcePath("pc_quality_smoke_test_bob_source", "wav"); - bob->SetAudioConfig(std::move(audio)); - }); + RunAndCheckEachVideoStreamReceivedFrames(run_params); } +} // namespace } // namespace webrtc_pc_e2e } // namespace webrtc diff --git a/test/pc/e2e/peer_connection_quality_test.cc b/test/pc/e2e/peer_connection_quality_test.cc index 17104a90aa..38a9ebf801 100644 --- a/test/pc/e2e/peer_connection_quality_test.cc +++ b/test/pc/e2e/peer_connection_quality_test.cc @@ -14,17 +14,17 @@ #include #include +#include "absl/strings/string_view.h" #include "api/jsep.h" #include "api/media_stream_interface.h" #include "api/peer_connection_interface.h" #include "api/rtc_event_log/rtc_event_log.h" #include "api/rtc_event_log_output_file.h" #include "api/scoped_refptr.h" -#include "api/task_queue/default_task_queue_factory.h" +#include "api/test/time_controller.h" #include "api/test/video_quality_analyzer_interface.h" #include "pc/sdp_utils.h" #include "pc/test/mock_peer_connection_observers.h" -#include "rtc_base/bind.h" #include "rtc_base/gunit.h" #include "rtc_base/numerics/safe_conversions.h" #include "system_wrappers/include/cpu_info.h" @@ -32,6 +32,7 @@ #include "test/pc/e2e/analyzer/audio/default_audio_quality_analyzer.h" #include "test/pc/e2e/analyzer/video/default_video_quality_analyzer.h" #include "test/pc/e2e/analyzer/video/video_quality_metrics_reporter.h" +#include "test/pc/e2e/cross_media_metrics_reporter.h" #include "test/pc/e2e/stats_poller.h" #include "test/pc/e2e/test_peer_factory.h" #include "test/testsupport/file_utils.h" @@ -44,7 +45,7 @@ namespace { using VideoConfig = PeerConnectionE2EQualityTestFixture::VideoConfig; using VideoCodecConfig = PeerConnectionE2EQualityTestFixture::VideoCodecConfig; -constexpr int kDefaultTimeoutMs = 10000; +constexpr TimeDelta kDefaultTimeout = TimeDelta::Seconds(10); constexpr char kSignalThreadName[] = "signaling_thread"; // 1 signaling, 2 network, 2 worker and 2 extra for codecs etc. constexpr int kPeerConnectionUsedThreads = 7; @@ -57,7 +58,7 @@ constexpr TimeDelta kStatsUpdateInterval = TimeDelta::Seconds(1); constexpr TimeDelta kAliveMessageLogInterval = TimeDelta::Seconds(30); -constexpr int kQuickTestModeRunDurationMs = 100; +constexpr TimeDelta kQuickTestModeRunDuration = TimeDelta::Millis(100); // Field trials to enable Flex FEC advertising and receiving. constexpr char kFlexFecEnabledFieldTrials[] = @@ -102,17 +103,20 @@ class FixturePeerConnectionObserver : public MockPeerConnectionObserver { PeerConnectionE2EQualityTest::PeerConnectionE2EQualityTest( std::string test_case_name, + TimeController& time_controller, std::unique_ptr audio_quality_analyzer, std::unique_ptr video_quality_analyzer) - : clock_(Clock::GetRealTimeClock()), - task_queue_factory_(CreateDefaultTaskQueueFactory()), + : time_controller_(time_controller), + task_queue_factory_(time_controller_.CreateTaskQueueFactory()), test_case_name_(std::move(test_case_name)), - executor_(std::make_unique(clock_)) { + executor_(std::make_unique( + time_controller_.GetClock())) { // Create default video quality analyzer. We will always create an analyzer, // even if there are no video streams, because it will be installed into video // encoder/decoder factories. if (video_quality_analyzer == nullptr) { - video_quality_analyzer = std::make_unique(); + video_quality_analyzer = std::make_unique( + time_controller_.GetClock()); } encoded_image_id_controller_ = std::make_unique(); @@ -187,15 +191,16 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { << "; audio=" << bob_configurer->params()->audio_config.has_value(); - const std::unique_ptr signaling_thread = rtc::Thread::Create(); - signaling_thread->SetName(kSignalThreadName, nullptr); - signaling_thread->Start(); + const std::unique_ptr signaling_thread = + time_controller_.CreateThread(kSignalThreadName); media_helper_ = std::make_unique( - video_quality_analyzer_injection_helper_.get(), - task_queue_factory_.get()); + video_quality_analyzer_injection_helper_.get(), task_queue_factory_.get(), + time_controller_.GetClock()); // Create a |task_queue_|. - task_queue_ = std::make_unique("pc_e2e_quality_test"); + task_queue_ = std::make_unique( + time_controller_.GetTaskQueueFactory()->CreateTaskQueue( + "pc_e2e_quality_test", webrtc::TaskQueueFactory::Priority::NORMAL)); // Create call participants: Alice and Bob. // Audio streams are intercepted in AudioDeviceModule, so if it is required to @@ -205,34 +210,38 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { RemotePeerAudioConfig::Create(bob_configurer->params()->audio_config); absl::optional bob_remote_audio_config = RemotePeerAudioConfig::Create(alice_configurer->params()->audio_config); - // Copy Alice and Bob video configs to correctly pass them into lambdas. + // Copy Alice and Bob video configs and names to correctly pass them into + // lambdas. std::vector alice_video_configs = alice_configurer->params()->video_configs; + std::string alice_name = alice_configurer->params()->name.value(); std::vector bob_video_configs = bob_configurer->params()->video_configs; + std::string bob_name = bob_configurer->params()->name.value(); - alice_ = TestPeerFactory::CreateTestPeer( + TestPeerFactory test_peer_factory( + signaling_thread.get(), time_controller_, + video_quality_analyzer_injection_helper_.get(), task_queue_.get()); + alice_ = test_peer_factory.CreateTestPeer( std::move(alice_configurer), std::make_unique( - [this, bob_video_configs]( + [this, bob_video_configs, alice_name]( rtc::scoped_refptr transceiver) { - OnTrackCallback(transceiver, bob_video_configs); + OnTrackCallback(alice_name, transceiver, bob_video_configs); }, [this]() { StartVideo(alice_video_sources_); }), - video_quality_analyzer_injection_helper_.get(), signaling_thread.get(), alice_remote_audio_config, run_params.video_encoder_bitrate_multiplier, - run_params.echo_emulation_config, task_queue_.get()); - bob_ = TestPeerFactory::CreateTestPeer( + run_params.echo_emulation_config); + bob_ = test_peer_factory.CreateTestPeer( std::move(bob_configurer), std::make_unique( - [this, alice_video_configs]( - rtc::scoped_refptr transceiver) { - OnTrackCallback(transceiver, alice_video_configs); + [this, alice_video_configs, + bob_name](rtc::scoped_refptr transceiver) { + OnTrackCallback(bob_name, transceiver, alice_video_configs); }, [this]() { StartVideo(bob_video_sources_); }), - video_quality_analyzer_injection_helper_.get(), signaling_thread.get(), bob_remote_audio_config, run_params.video_encoder_bitrate_multiplier, - run_params.echo_emulation_config, task_queue_.get()); + run_params.echo_emulation_config); int num_cores = CpuInfo::DetectNumberOfCores(); RTC_DCHECK_GE(num_cores, 1); @@ -246,13 +255,19 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { std::min(video_analyzer_threads, kMaxVideoAnalyzerThreads); RTC_LOG(INFO) << "video_analyzer_threads=" << video_analyzer_threads; quality_metrics_reporters_.push_back( - std::make_unique()); + std::make_unique( + time_controller_.GetClock())); + quality_metrics_reporters_.push_back( + std::make_unique()); - video_quality_analyzer_injection_helper_->Start(test_case_name_, - video_analyzer_threads); + video_quality_analyzer_injection_helper_->Start( + test_case_name_, + std::vector{alice_->params()->name.value(), + bob_->params()->name.value()}, + video_analyzer_threads); audio_quality_analyzer_->Start(test_case_name_, &analyzer_helper_); for (auto& reporter : quality_metrics_reporters_) { - reporter->Start(test_case_name_); + reporter->Start(test_case_name_, &analyzer_helper_); } // Start RTCEventLog recording if requested. @@ -295,19 +310,30 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { }); // Setup call. - signaling_thread->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnectionE2EQualityTest::SetupCallOnSignalingThread, this, - run_params)); + signaling_thread->Invoke(RTC_FROM_HERE, [this, &run_params] { + SetupCallOnSignalingThread(run_params); + }); + std::unique_ptr signaling_interceptor = + CreateSignalingInterceptor(run_params); + // Connect peers. + signaling_thread->Invoke(RTC_FROM_HERE, [this, &signaling_interceptor] { + ExchangeOfferAnswer(signaling_interceptor.get()); + }); + WaitUntilIceCandidatesGathered(signaling_thread.get()); + + signaling_thread->Invoke(RTC_FROM_HERE, [this, &signaling_interceptor] { + ExchangeIceCandidates(signaling_interceptor.get()); + }); + WaitUntilPeersAreConnected(signaling_thread.get()); + executor_->Start(task_queue_.get()); Timestamp start_time = Now(); - rtc::Event done; bool is_quick_test_enabled = field_trial::IsEnabled("WebRTC-QuickPerfTest"); if (is_quick_test_enabled) { - done.Wait(kQuickTestModeRunDurationMs); + time_controller_.AdvanceTime(kQuickTestModeRunDuration); } else { - done.Wait(run_params.run_duration.ms()); + time_controller_.AdvanceTime(run_params.run_duration); } RTC_LOG(INFO) << "Test is done, initiating disconnect sequence."; @@ -329,14 +355,13 @@ void PeerConnectionE2EQualityTest::Run(RunParams run_params) { alice_->DetachAecDump(); bob_->DetachAecDump(); // Tear down the call. - signaling_thread->Invoke( - RTC_FROM_HERE, - rtc::Bind(&PeerConnectionE2EQualityTest::TearDownCallOnSignalingThread, - this)); + signaling_thread->Invoke(RTC_FROM_HERE, + [this] { TearDownCallOnSignalingThread(); }); + Timestamp end_time = Now(); RTC_LOG(INFO) << "All peers are disconnected."; { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); real_test_duration_ = end_time - start_time; } @@ -371,14 +396,17 @@ void PeerConnectionE2EQualityTest::SetupRequiredFieldTrials( } void PeerConnectionE2EQualityTest::OnTrackCallback( + absl::string_view peer_name, rtc::scoped_refptr transceiver, std::vector remote_video_configs) { const rtc::scoped_refptr& track = transceiver->receiver()->track(); RTC_CHECK_EQ(transceiver->receiver()->stream_ids().size(), 2) << "Expected 2 stream ids: 1st - sync group, 2nd - unique stream label"; + std::string sync_group = transceiver->receiver()->stream_ids()[0]; std::string stream_label = transceiver->receiver()->stream_ids()[1]; - analyzer_helper_.AddTrackToStreamMapping(track->id(), stream_label); + analyzer_helper_.AddTrackToStreamMapping(track->id(), stream_label, + sync_group); if (track->kind() != MediaStreamTrackInterface::kVideoKind) { return; } @@ -387,7 +415,7 @@ void PeerConnectionE2EQualityTest::OnTrackCallback( // track->kind() is kVideoKind. auto* video_track = static_cast(track.get()); std::unique_ptr> video_sink = - video_quality_analyzer_injection_helper_->CreateVideoSink(); + video_quality_analyzer_injection_helper_->CreateVideoSink(peer_name); video_track->AddOrUpdateSink(video_sink.get(), rtc::VideoSinkWants()); output_video_sinks_.push_back(std::move(video_sink)); } @@ -469,8 +497,6 @@ void PeerConnectionE2EQualityTest::SetupCallOnSignalingThread( SetPeerCodecPreferences(alice_.get(), run_params); SetPeerCodecPreferences(bob_.get(), run_params); - - SetupCall(run_params); } void PeerConnectionE2EQualityTest::TearDownCallOnSignalingThread() { @@ -513,7 +539,9 @@ void PeerConnectionE2EQualityTest::SetPeerCodecPreferences( } } -void PeerConnectionE2EQualityTest::SetupCall(const RunParams& run_params) { +std::unique_ptr +PeerConnectionE2EQualityTest::CreateSignalingInterceptor( + const RunParams& run_params) { std::map stream_label_to_simulcast_streams_count; // We add only Alice here, because simulcast/svc is supported only from the // first peer. @@ -527,21 +555,35 @@ void PeerConnectionE2EQualityTest::SetupCall(const RunParams& run_params) { PatchingParams patching_params(run_params.video_codecs, run_params.use_conference_mode, stream_label_to_simulcast_streams_count); - SignalingInterceptor signaling_interceptor(patching_params); - // Connect peers. - ExchangeOfferAnswer(&signaling_interceptor); - // Do the SDP negotiation, and also exchange ice candidates. - ASSERT_EQ_WAIT(alice_->signaling_state(), PeerConnectionInterface::kStable, - kDefaultTimeoutMs); - ASSERT_TRUE_WAIT(alice_->IsIceGatheringDone(), kDefaultTimeoutMs); - ASSERT_TRUE_WAIT(bob_->IsIceGatheringDone(), kDefaultTimeoutMs); - - ExchangeIceCandidates(&signaling_interceptor); + return std::make_unique(patching_params); +} + +void PeerConnectionE2EQualityTest::WaitUntilIceCandidatesGathered( + rtc::Thread* signaling_thread) { + ASSERT_TRUE(time_controller_.Wait( + [&]() { + return signaling_thread->Invoke(RTC_FROM_HERE, [&]() { + return alice_->IsIceGatheringDone() && bob_->IsIceGatheringDone(); + }); + }, + 2 * kDefaultTimeout)); +} + +void PeerConnectionE2EQualityTest::WaitUntilPeersAreConnected( + rtc::Thread* signaling_thread) { // This means that ICE and DTLS are connected. - WAIT(bob_->IsIceConnected(), kDefaultTimeoutMs); - bob_connected_ = bob_->IsIceConnected(); - WAIT(alice_->IsIceConnected(), kDefaultTimeoutMs); - alice_connected_ = alice_->IsIceConnected(); + alice_connected_ = time_controller_.Wait( + [&]() { + return signaling_thread->Invoke( + RTC_FROM_HERE, [&]() { return alice_->IsIceConnected(); }); + }, + kDefaultTimeout); + bob_connected_ = time_controller_.Wait( + [&]() { + return signaling_thread->Invoke( + RTC_FROM_HERE, [&]() { return bob_->IsIceConnected(); }); + }, + kDefaultTimeout); } void PeerConnectionE2EQualityTest::ExchangeOfferAnswer( @@ -628,12 +670,12 @@ void PeerConnectionE2EQualityTest::TearDownCall() { video_source->Stop(); } - alice_->pc()->Close(); - bob_->pc()->Close(); - alice_video_sources_.clear(); bob_video_sources_.clear(); + alice_->Close(); + bob_->Close(); + media_helper_ = nullptr; } @@ -649,7 +691,7 @@ void PeerConnectionE2EQualityTest::ReportGeneralTestResults() { } Timestamp PeerConnectionE2EQualityTest::Now() const { - return clock_->CurrentTime(); + return time_controller_.GetClock()->CurrentTime(); } } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/peer_connection_quality_test.h b/test/pc/e2e/peer_connection_quality_test.h index 2eb7e708c6..9ce19a80e4 100644 --- a/test/pc/e2e/peer_connection_quality_test.h +++ b/test/pc/e2e/peer_connection_quality_test.h @@ -15,11 +15,14 @@ #include #include +#include "absl/strings/string_view.h" #include "api/task_queue/task_queue_factory.h" #include "api/test/audio_quality_analyzer_interface.h" #include "api/test/peerconnection_quality_test_fixture.h" +#include "api/test/time_controller.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/thread.h" #include "rtc_base/thread_annotations.h" @@ -51,6 +54,7 @@ class PeerConnectionE2EQualityTest PeerConnectionE2EQualityTest( std::string test_case_name, + TimeController& time_controller, std::unique_ptr audio_quality_analyzer, std::unique_ptr video_quality_analyzer); @@ -71,7 +75,7 @@ class PeerConnectionE2EQualityTest void Run(RunParams run_params) override; TimeDelta GetRealTestDuration() const override { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); RTC_CHECK_NE(real_test_duration_, TimeDelta::Zero()); return real_test_duration_; } @@ -80,13 +84,17 @@ class PeerConnectionE2EQualityTest // For some functionality some field trials have to be enabled, so we will // enable them here. void SetupRequiredFieldTrials(const RunParams& run_params); - void OnTrackCallback(rtc::scoped_refptr transceiver, + void OnTrackCallback(absl::string_view peer_name, + rtc::scoped_refptr transceiver, std::vector remote_video_configs); // Have to be run on the signaling thread. void SetupCallOnSignalingThread(const RunParams& run_params); void TearDownCallOnSignalingThread(); void SetPeerCodecPreferences(TestPeer* peer, const RunParams& run_params); - void SetupCall(const RunParams& run_params); + std::unique_ptr CreateSignalingInterceptor( + const RunParams& run_params); + void WaitUntilIceCandidatesGathered(rtc::Thread* signaling_thread); + void WaitUntilPeersAreConnected(rtc::Thread* signaling_thread); void ExchangeOfferAnswer(SignalingInterceptor* signaling_interceptor); void ExchangeIceCandidates(SignalingInterceptor* signaling_interceptor); void StartVideo( @@ -96,7 +104,7 @@ class PeerConnectionE2EQualityTest void ReportGeneralTestResults(); Timestamp Now() const; - Clock* const clock_; + TimeController& time_controller_; const std::unique_ptr task_queue_factory_; std::string test_case_name_; std::unique_ptr @@ -124,7 +132,7 @@ class PeerConnectionE2EQualityTest output_video_sinks_; AnalyzerHelper analyzer_helper_; - rtc::CriticalSection lock_; + mutable Mutex lock_; TimeDelta real_test_duration_ RTC_GUARDED_BY(lock_) = TimeDelta::Zero(); // Task queue, that is used for running activities during test call. diff --git a/test/pc/e2e/peer_connection_quality_test_params.h b/test/pc/e2e/peer_connection_quality_test_params.h index ccb53492c3..e1c0232cb2 100644 --- a/test/pc/e2e/peer_connection_quality_test_params.h +++ b/test/pc/e2e/peer_connection_quality_test_params.h @@ -20,8 +20,8 @@ #include "api/rtc_event_log/rtc_event_log_factory_interface.h" #include "api/task_queue/task_queue_factory.h" #include "api/test/peerconnection_quality_test_fixture.h" -#include "api/transport/media/media_transport_interface.h" #include "api/transport/network_control.h" +#include "api/transport/webrtc_key_value_config.h" #include "api/video_codecs/video_decoder_factory.h" #include "api/video_codecs/video_encoder_factory.h" #include "rtc_base/network.h" @@ -47,13 +47,14 @@ struct PeerConnectionFactoryComponents { std::unique_ptr event_log_factory; std::unique_ptr fec_controller_factory; std::unique_ptr network_controller_factory; - std::unique_ptr media_transport_factory; std::unique_ptr neteq_factory; // Will be passed to MediaEngineInterface, that will be used in // PeerConnectionFactory. std::unique_ptr video_encoder_factory; std::unique_ptr video_decoder_factory; + + std::unique_ptr trials; }; // Contains most parts from PeerConnectionDependencies. Also all fields are @@ -114,7 +115,7 @@ struct Params { absl::optional aec_dump_path; PeerConnectionInterface::RTCConfiguration rtc_configuration; - PeerConnectionInterface::BitrateParameters bitrate_params; + BitrateSettings bitrate_settings; }; } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/sdp/sdp_changer.cc b/test/pc/e2e/sdp/sdp_changer.cc index f2aeb1b92d..b46aea1c5f 100644 --- a/test/pc/e2e/sdp/sdp_changer.cc +++ b/test/pc/e2e/sdp/sdp_changer.cc @@ -34,6 +34,23 @@ std::string CodecRequiredParamsToString( return out.str(); } +std::string SupportedCodecsToString( + rtc::ArrayView supported_codecs) { + rtc::StringBuilder out; + for (const auto& codec : supported_codecs) { + out << codec.name; + if (!codec.parameters.empty()) { + out << "("; + for (const auto& param : codec.parameters) { + out << param.first << "=" << param.second << ";"; + } + out << ")"; + } + out << "; "; + } + return out.str(); +} + } // namespace std::vector FilterVideoCodecCapabilities( @@ -42,16 +59,6 @@ std::vector FilterVideoCodecCapabilities( bool use_ulpfec, bool use_flexfec, rtc::ArrayView supported_codecs) { - RTC_LOG(INFO) << "Peer connection support these codecs:"; - for (const auto& codec : supported_codecs) { - RTC_LOG(INFO) << "Codec: " << codec.name; - if (!codec.parameters.empty()) { - RTC_LOG(INFO) << "Params:"; - for (const auto& param : codec.parameters) { - RTC_LOG(INFO) << " " << param.first << "=" << param.second; - } - } - } std::vector output_codecs; // Find requested codecs among supported and add them to output in the order // they were requested. @@ -80,7 +87,8 @@ std::vector FilterVideoCodecCapabilities( RTC_CHECK_GT(output_codecs.size(), size_before) << "Codec with name=" << codec_request.name << " and params {" << CodecRequiredParamsToString(codec_request.required_params) - << "} is unsupported for this peer connection"; + << "} is unsupported for this peer connection. Supported codecs are: " + << SupportedCodecsToString(supported_codecs); } // Add required FEC and RTX codecs to output. @@ -524,9 +532,11 @@ SignalingInterceptor::PatchOffererIceCandidates( context_.simulcast_infos_by_mid.find(candidate->sdp_mid()); if (simulcast_info_it != context_.simulcast_infos_by_mid.end()) { // This is candidate for simulcast section, so it should be transformed - // into candidates for replicated sections - out.push_back(CreateIceCandidate(simulcast_info_it->second->rids[0], 0, - candidate->candidate())); + // into candidates for replicated sections. The sdpMLineIndex is set to + // -1 and ignored if the rid is present. + for (auto rid : simulcast_info_it->second->rids) { + out.push_back(CreateIceCandidate(rid, -1, candidate->candidate())); + } } else { out.push_back(CreateIceCandidate(candidate->sdp_mid(), candidate->sdp_mline_index(), @@ -550,6 +560,9 @@ SignalingInterceptor::PatchAnswererIceCandidates( // section. out.push_back(CreateIceCandidate(simulcast_info_it->second->mid, 0, candidate->candidate())); + } else if (context_.simulcast_infos_by_rid.size()) { + // When using simulcast and bundle, put everything on the first m-line. + out.push_back(CreateIceCandidate("", 0, candidate->candidate())); } else { out.push_back(CreateIceCandidate(candidate->sdp_mid(), candidate->sdp_mline_index(), diff --git a/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc b/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc new file mode 100644 index 0000000000..eb676a92bd --- /dev/null +++ b/test/pc/e2e/stats_based_network_quality_metrics_reporter.cc @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "test/pc/e2e/stats_based_network_quality_metrics_reporter.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/array_view.h" +#include "api/scoped_refptr.h" +#include "api/stats/rtc_stats.h" +#include "api/stats/rtcstats_objects.h" +#include "api/test/network_emulation/network_emulation_interfaces.h" +#include "api/test/network_emulation_manager.h" +#include "api/units/data_rate.h" +#include "api/units/timestamp.h" +#include "rtc_base/event.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" +#include "system_wrappers/include/field_trial.h" +#include "test/testsupport/perf_test.h" + +namespace webrtc { +namespace webrtc_pc_e2e { +namespace { + +constexpr int kStatsWaitTimeoutMs = 1000; + +// Field trial which controls whether to report standard-compliant bytes +// sent/received per stream. If enabled, padding and headers are not included +// in bytes sent or received. +constexpr char kUseStandardBytesStats[] = "WebRTC-UseStandardBytesStats"; + +std::unique_ptr PopulateStats( + std::vector endpoints, + NetworkEmulationManager* network_emulation) { + rtc::Event stats_loaded; + std::unique_ptr stats; + network_emulation->GetStats(endpoints, + [&](std::unique_ptr s) { + stats = std::move(s); + stats_loaded.Set(); + }); + bool stats_received = stats_loaded.Wait(kStatsWaitTimeoutMs); + RTC_CHECK(stats_received); + return stats; +} + +std::map PopulateIpToPeer( + const std::map>& + peer_endpoints) { + std::map out; + for (const auto& entry : peer_endpoints) { + for (const EmulatedEndpoint* const endpoint : entry.second) { + out.emplace(endpoint->GetPeerLocalAddress(), entry.first); + } + } + return out; +} + +} // namespace + +StatsBasedNetworkQualityMetricsReporter::NetworkLayerStatsCollector:: + NetworkLayerStatsCollector( + std::map> peer_endpoints, + NetworkEmulationManager* network_emulation) + : peer_endpoints_(std::move(peer_endpoints)), + ip_to_peer_(PopulateIpToPeer(peer_endpoints_)), + network_emulation_(network_emulation) {} + +void StatsBasedNetworkQualityMetricsReporter::NetworkLayerStatsCollector:: + Start() { + // Check that network stats are clean before test execution. + for (const auto& entry : peer_endpoints_) { + std::unique_ptr stats = + PopulateStats(entry.second, network_emulation_); + RTC_CHECK_EQ(stats->PacketsSent(), 0); + RTC_CHECK_EQ(stats->PacketsReceived(), 0); + } +} + +std::map +StatsBasedNetworkQualityMetricsReporter::NetworkLayerStatsCollector:: + GetStats() { + std::map peer_to_stats; + std::map> sender_to_receivers; + for (const auto& entry : peer_endpoints_) { + NetworkLayerStats stats; + stats.stats = PopulateStats(entry.second, network_emulation_); + const std::string& peer_name = entry.first; + for (const auto& income_stats_entry : + stats.stats->IncomingStatsPerSource()) { + const rtc::IPAddress& source_ip = income_stats_entry.first; + auto it = ip_to_peer_.find(source_ip); + if (it == ip_to_peer_.end()) { + // Source IP is unknown for this collector, so will be skipped. + continue; + } + sender_to_receivers[it->second].push_back(peer_name); + } + peer_to_stats.emplace(peer_name, std::move(stats)); + } + for (auto& entry : peer_to_stats) { + const std::vector& receivers = + sender_to_receivers[entry.first]; + entry.second.receivers = + std::set(receivers.begin(), receivers.end()); + } + return peer_to_stats; +} + +void StatsBasedNetworkQualityMetricsReporter::Start( + absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) { + test_case_name_ = std::string(test_case_name); + collector_.Start(); + start_time_ = clock_->CurrentTime(); +} + +void StatsBasedNetworkQualityMetricsReporter::OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) { + PCStats cur_stats; + + auto inbound_stats = report->GetStatsOfType(); + for (const auto& stat : inbound_stats) { + cur_stats.payload_received += + DataSize::Bytes(stat->bytes_received.ValueOrDefault(0ul) + + stat->header_bytes_received.ValueOrDefault(0ul)); + } + + auto outbound_stats = report->GetStatsOfType(); + for (const auto& stat : outbound_stats) { + cur_stats.payload_sent += + DataSize::Bytes(stat->bytes_sent.ValueOrDefault(0ul) + + stat->header_bytes_sent.ValueOrDefault(0ul)); + } + + auto candidate_pairs_stats = report->GetStatsOfType(); + for (const auto& stat : candidate_pairs_stats) { + cur_stats.total_received += + DataSize::Bytes(stat->bytes_received.ValueOrDefault(0ul)); + cur_stats.total_sent += + DataSize::Bytes(stat->bytes_sent.ValueOrDefault(0ul)); + cur_stats.packets_received += stat->packets_received.ValueOrDefault(0ul); + cur_stats.packets_sent += stat->packets_sent.ValueOrDefault(0ul); + } + + MutexLock lock(&mutex_); + pc_stats_[std::string(pc_label)] = cur_stats; +} + +void StatsBasedNetworkQualityMetricsReporter::StopAndReportResults() { + Timestamp end_time = clock_->CurrentTime(); + + if (!webrtc::field_trial::IsEnabled(kUseStandardBytesStats)) { + RTC_LOG(LS_ERROR) + << "Non-standard GetStats; \"payload\" counts include RTP headers"; + } + + std::map stats = collector_.GetStats(); + for (const auto& entry : stats) { + LogNetworkLayerStats(entry.first, entry.second); + } + MutexLock lock(&mutex_); + for (const auto& pair : pc_stats_) { + auto it = stats.find(pair.first); + RTC_CHECK(it != stats.end()) + << "Peer name used for PeerConnection stats collection and peer name " + "used for endpoints naming doesn't match. No endpoints found for " + "peer " + << pair.first; + const NetworkLayerStats& network_layer_stats = it->second; + int64_t total_packets_received = 0; + bool found = false; + for (const auto& dest_peer : network_layer_stats.receivers) { + auto pc_stats_it = pc_stats_.find(dest_peer); + if (pc_stats_it == pc_stats_.end()) { + continue; + } + found = true; + total_packets_received += pc_stats_it->second.packets_received; + } + int64_t packet_loss = -1; + if (found) { + packet_loss = pair.second.packets_sent - total_packets_received; + } + ReportStats(pair.first, pair.second, network_layer_stats, packet_loss, + end_time); + } +} + +void StatsBasedNetworkQualityMetricsReporter::ReportStats( + const std::string& pc_label, + const PCStats& pc_stats, + const NetworkLayerStats& network_layer_stats, + int64_t packet_loss, + const Timestamp& end_time) { + ReportResult("bytes_discarded_no_receiver", pc_label, + network_layer_stats.stats->BytesDropped().bytes(), + "sizeInBytes"); + ReportResult("packets_discarded_no_receiver", pc_label, + network_layer_stats.stats->PacketsDropped(), "unitless"); + + ReportResult("payload_bytes_received", pc_label, + pc_stats.payload_received.bytes(), "sizeInBytes"); + ReportResult("payload_bytes_sent", pc_label, pc_stats.payload_sent.bytes(), + "sizeInBytes"); + + ReportResult("bytes_sent", pc_label, pc_stats.total_sent.bytes(), + "sizeInBytes"); + ReportResult("packets_sent", pc_label, pc_stats.packets_sent, "unitless"); + ReportResult("average_send_rate", pc_label, + (pc_stats.total_sent / (end_time - start_time_)).bytes_per_sec(), + "bytesPerSecond"); + ReportResult("bytes_received", pc_label, pc_stats.total_received.bytes(), + "sizeInBytes"); + ReportResult("packets_received", pc_label, pc_stats.packets_received, + "unitless"); + ReportResult( + "average_receive_rate", pc_label, + (pc_stats.total_received / (end_time - start_time_)).bytes_per_sec(), + "bytesPerSecond"); + ReportResult("sent_packets_loss", pc_label, packet_loss, "unitless"); +} + +void StatsBasedNetworkQualityMetricsReporter::ReportResult( + const std::string& metric_name, + const std::string& network_label, + const double value, + const std::string& unit) const { + test::PrintResult(metric_name, /*modifier=*/"", + GetTestCaseName(network_label), value, unit, + /*important=*/false); +} + +void StatsBasedNetworkQualityMetricsReporter::ReportResult( + const std::string& metric_name, + const std::string& network_label, + const SamplesStatsCounter& value, + const std::string& unit) const { + test::PrintResult(metric_name, /*modifier=*/"", + GetTestCaseName(network_label), value, unit, + /*important=*/false); +} + +std::string StatsBasedNetworkQualityMetricsReporter::GetTestCaseName( + absl::string_view network_label) const { + rtc::StringBuilder builder; + builder << test_case_name_ << "/" << network_label.data(); + return builder.str(); +} + +void StatsBasedNetworkQualityMetricsReporter::LogNetworkLayerStats( + const std::string& peer_name, + const NetworkLayerStats& stats) const { + DataRate average_send_rate = stats.stats->PacketsSent() >= 2 + ? stats.stats->AverageSendRate() + : DataRate::Zero(); + DataRate average_receive_rate = stats.stats->PacketsReceived() >= 2 + ? stats.stats->AverageReceiveRate() + : DataRate::Zero(); + rtc::StringBuilder log; + log << "Raw network layer statistic for [" << peer_name << "]:\n" + << "Local IPs:\n"; + std::vector local_ips = stats.stats->LocalAddresses(); + for (size_t i = 0; i < local_ips.size(); ++i) { + log << " " << local_ips[i].ToString() << "\n"; + } + if (!stats.stats->SentPacketsSizeCounter().IsEmpty()) { + ReportResult("sent_packets_size", peer_name, + stats.stats->SentPacketsSizeCounter(), "sizeInBytes"); + } + if (!stats.stats->ReceivedPacketsSizeCounter().IsEmpty()) { + ReportResult("received_packets_size", peer_name, + stats.stats->ReceivedPacketsSizeCounter(), "sizeInBytes"); + } + if (!stats.stats->DroppedPacketsSizeCounter().IsEmpty()) { + ReportResult("dropped_packets_size", peer_name, + stats.stats->DroppedPacketsSizeCounter(), "sizeInBytes"); + } + if (!stats.stats->SentPacketsQueueWaitTimeUs().IsEmpty()) { + ReportResult("sent_packets_queue_wait_time_us", peer_name, + stats.stats->SentPacketsQueueWaitTimeUs(), "unitless"); + } + + log << "Send statistic:\n" + << " packets: " << stats.stats->PacketsSent() + << " bytes: " << stats.stats->BytesSent().bytes() + << " avg_rate (bytes/sec): " << average_send_rate.bytes_per_sec() + << " avg_rate (bps): " << average_send_rate.bps() << "\n" + << "Send statistic per destination:\n"; + + for (const auto& entry : stats.stats->OutgoingStatsPerDestination()) { + DataRate source_average_send_rate = entry.second->PacketsSent() >= 2 + ? entry.second->AverageSendRate() + : DataRate::Zero(); + log << "(" << entry.first.ToString() << "):\n" + << " packets: " << entry.second->PacketsSent() + << " bytes: " << entry.second->BytesSent().bytes() + << " avg_rate (bytes/sec): " << source_average_send_rate.bytes_per_sec() + << " avg_rate (bps): " << source_average_send_rate.bps() << "\n"; + if (!entry.second->SentPacketsSizeCounter().IsEmpty()) { + ReportResult("sent_packets_size", + peer_name + "/" + entry.first.ToString(), + stats.stats->SentPacketsSizeCounter(), "sizeInBytes"); + } + } + + log << "Receive statistic:\n" + << " packets: " << stats.stats->PacketsReceived() + << " bytes: " << stats.stats->BytesReceived().bytes() + << " avg_rate (bytes/sec): " << average_receive_rate.bytes_per_sec() + << " avg_rate (bps): " << average_receive_rate.bps() << "\n" + << "Receive statistic per source:\n"; + + for (const auto& entry : stats.stats->IncomingStatsPerSource()) { + DataRate source_average_receive_rate = + entry.second->PacketsReceived() >= 2 + ? entry.second->AverageReceiveRate() + : DataRate::Zero(); + log << "(" << entry.first.ToString() << "):\n" + << " packets: " << entry.second->PacketsReceived() + << " bytes: " << entry.second->BytesReceived().bytes() + << " avg_rate (bytes/sec): " + << source_average_receive_rate.bytes_per_sec() + << " avg_rate (bps): " << source_average_receive_rate.bps() << "\n"; + if (!entry.second->ReceivedPacketsSizeCounter().IsEmpty()) { + ReportResult("received_packets_size", + peer_name + "/" + entry.first.ToString(), + stats.stats->ReceivedPacketsSizeCounter(), "sizeInBytes"); + } + if (!entry.second->DroppedPacketsSizeCounter().IsEmpty()) { + ReportResult("dropped_packets_size", + peer_name + "/" + entry.first.ToString(), + stats.stats->DroppedPacketsSizeCounter(), "sizeInBytes"); + } + } + + RTC_LOG(INFO) << log.str(); +} + +} // namespace webrtc_pc_e2e +} // namespace webrtc diff --git a/test/pc/e2e/stats_based_network_quality_metrics_reporter.h b/test/pc/e2e/stats_based_network_quality_metrics_reporter.h new file mode 100644 index 0000000000..d14bb43e1b --- /dev/null +++ b/test/pc/e2e/stats_based_network_quality_metrics_reporter.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef TEST_PC_E2E_STATS_BASED_NETWORK_QUALITY_METRICS_REPORTER_H_ +#define TEST_PC_E2E_STATS_BASED_NETWORK_QUALITY_METRICS_REPORTER_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/strings/string_view.h" +#include "api/numerics/samples_stats_counter.h" +#include "api/test/network_emulation/network_emulation_interfaces.h" +#include "api/test/network_emulation_manager.h" +#include "api/test/peerconnection_quality_test_fixture.h" +#include "api/units/data_size.h" +#include "api/units/timestamp.h" +#include "rtc_base/ip_address.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { +namespace webrtc_pc_e2e { + +class StatsBasedNetworkQualityMetricsReporter + : public PeerConnectionE2EQualityTestFixture::QualityMetricsReporter { + public: + // |networks| map peer name to network to report network layer stability stats + // and to log network layer metrics. + StatsBasedNetworkQualityMetricsReporter( + std::map> peer_endpoints, + NetworkEmulationManager* network_emulation) + : collector_(std::move(peer_endpoints), network_emulation), + clock_(network_emulation->time_controller()->GetClock()) {} + ~StatsBasedNetworkQualityMetricsReporter() override = default; + + // Network stats must be empty when this method will be invoked. + void Start(absl::string_view test_case_name, + const TrackIdStreamInfoMap* reporter_helper) override; + void OnStatsReports( + absl::string_view pc_label, + const rtc::scoped_refptr& report) override; + void StopAndReportResults() override; + + private: + struct PCStats { + // TODO(nisse): Separate audio and video counters. Depends on standard stat + // counters, enabled by field trial "WebRTC-UseStandardBytesStats". + DataSize payload_received = DataSize::Zero(); + DataSize payload_sent = DataSize::Zero(); + + // Total bytes/packets sent/received in all RTCTransport's. + DataSize total_received = DataSize::Zero(); + DataSize total_sent = DataSize::Zero(); + int64_t packets_received = 0; + int64_t packets_sent = 0; + }; + + struct NetworkLayerStats { + std::unique_ptr stats; + std::set receivers; + }; + + class NetworkLayerStatsCollector { + public: + NetworkLayerStatsCollector( + std::map> peer_endpoints, + NetworkEmulationManager* network_emulation); + + void Start(); + + std::map GetStats(); + + private: + const std::map> peer_endpoints_; + const std::map ip_to_peer_; + NetworkEmulationManager* const network_emulation_; + }; + + void ReportStats(const std::string& pc_label, + const PCStats& pc_stats, + const NetworkLayerStats& network_layer_stats, + int64_t packet_loss, + const Timestamp& end_time); + void ReportResult(const std::string& metric_name, + const std::string& network_label, + const double value, + const std::string& unit) const; + void ReportResult(const std::string& metric_name, + const std::string& network_label, + const SamplesStatsCounter& value, + const std::string& unit) const; + std::string GetTestCaseName(absl::string_view network_label) const; + void LogNetworkLayerStats(const std::string& peer_name, + const NetworkLayerStats& stats) const; + + NetworkLayerStatsCollector collector_; + Clock* const clock_; + + std::string test_case_name_; + Timestamp start_time_ = Timestamp::MinusInfinity(); + + Mutex mutex_; + std::map pc_stats_ RTC_GUARDED_BY(mutex_); +}; + +} // namespace webrtc_pc_e2e +} // namespace webrtc + +#endif // TEST_PC_E2E_STATS_BASED_NETWORK_QUALITY_METRICS_REPORTER_H_ diff --git a/test/pc/e2e/stats_poller.cc b/test/pc/e2e/stats_poller.cc index 987f26e7e8..5f1424cd29 100644 --- a/test/pc/e2e/stats_poller.cc +++ b/test/pc/e2e/stats_poller.cc @@ -18,21 +18,20 @@ namespace webrtc { namespace webrtc_pc_e2e { void InternalStatsObserver::PollStats() { - peer_->pc()->GetStats(this, nullptr, - webrtc::PeerConnectionInterface::StatsOutputLevel:: - kStatsOutputLevelStandard); + peer_->pc()->GetStats(this); } -void InternalStatsObserver::OnComplete(const StatsReports& reports) { +void InternalStatsObserver::OnStatsDelivered( + const rtc::scoped_refptr& report) { for (auto* observer : observers_) { - observer->OnStatsReports(pc_label_, reports); + observer->OnStatsReports(pc_label_, report); } } StatsPoller::StatsPoller(std::vector observers, std::map peers) { for (auto& peer : peers) { - pollers_.push_back(new rtc::RefCountedObject( + pollers_.push_back(rtc::make_ref_counted( peer.first, peer.second, observers)); } } diff --git a/test/pc/e2e/stats_poller.h b/test/pc/e2e/stats_poller.h index 3d0c2d6801..157a147834 100644 --- a/test/pc/e2e/stats_poller.h +++ b/test/pc/e2e/stats_poller.h @@ -17,6 +17,7 @@ #include #include "api/peer_connection_interface.h" +#include "api/stats/rtc_stats_collector_callback.h" #include "api/test/stats_observer_interface.h" #include "test/pc/e2e/test_peer.h" @@ -25,7 +26,7 @@ namespace webrtc_pc_e2e { // Helper class that will notify all the webrtc::test::StatsObserverInterface // objects subscribed. -class InternalStatsObserver : public StatsObserver { +class InternalStatsObserver : public RTCStatsCollectorCallback { public: InternalStatsObserver(std::string pc_label, TestPeer* peer, @@ -36,7 +37,8 @@ class InternalStatsObserver : public StatsObserver { void PollStats(); - void OnComplete(const StatsReports& reports) override; + void OnStatsDelivered( + const rtc::scoped_refptr& report) override; private: std::string pc_label_; diff --git a/test/pc/e2e/test_activities_executor.cc b/test/pc/e2e/test_activities_executor.cc index 4ace6ae7d8..ded39920f2 100644 --- a/test/pc/e2e/test_activities_executor.cc +++ b/test/pc/e2e/test_activities_executor.cc @@ -24,7 +24,7 @@ namespace webrtc_pc_e2e { void TestActivitiesExecutor::Start(TaskQueueForTest* task_queue) { RTC_DCHECK(task_queue); task_queue_ = task_queue; - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); start_time_ = Now(); while (!scheduled_activities_.empty()) { PostActivity(std::move(scheduled_activities_.front())); @@ -39,7 +39,7 @@ void TestActivitiesExecutor::Stop() { } task_queue_->SendTask( [this]() { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); for (auto& handle : repeating_task_handles_) { handle.Stop(); } @@ -56,7 +56,7 @@ void TestActivitiesExecutor::ScheduleActivity( initial_delay_since_start >= TimeDelta::Zero()); RTC_CHECK(!interval || (interval->IsFinite() && *interval > TimeDelta::Zero())); - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); ScheduledActivity activity(initial_delay_since_start, interval, func); if (start_time_.IsInfinite()) { scheduled_activities_.push(std::move(activity)); diff --git a/test/pc/e2e/test_activities_executor.h b/test/pc/e2e/test_activities_executor.h index 09bfe4167f..94e73d1e5f 100644 --- a/test/pc/e2e/test_activities_executor.h +++ b/test/pc/e2e/test_activities_executor.h @@ -17,7 +17,7 @@ #include "absl/types/optional.h" #include "api/units/time_delta.h" #include "api/units/timestamp.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_utils/repeating_task.h" #include "system_wrappers/include/clock.h" @@ -63,7 +63,7 @@ class TestActivitiesExecutor { TaskQueueForTest* task_queue_; - rtc::CriticalSection lock_; + Mutex lock_; // Time when test was started. Minus infinity means that it wasn't started // yet. Timestamp start_time_ RTC_GUARDED_BY(lock_) = Timestamp::MinusInfinity(); diff --git a/test/pc/e2e/test_peer.cc b/test/pc/e2e/test_peer.cc index 8c9c6d9c8f..942bedfba3 100644 --- a/test/pc/e2e/test_peer.cc +++ b/test/pc/e2e/test_peer.cc @@ -21,6 +21,7 @@ namespace webrtc_pc_e2e { bool TestPeer::AddIceCandidates( std::vector> candidates) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; bool success = true; for (auto& candidate : candidates) { if (!pc()->AddIceCandidate(candidate.get())) { @@ -37,16 +38,27 @@ bool TestPeer::AddIceCandidates( return success; } +void TestPeer::Close() { + wrapper_->pc()->Close(); + remote_ice_candidates_.clear(); + audio_processing_ = nullptr; + video_sources_.clear(); + wrapper_ = nullptr; + worker_thread_ = nullptr; +} + TestPeer::TestPeer( rtc::scoped_refptr pc_factory, rtc::scoped_refptr pc, std::unique_ptr observer, std::unique_ptr params, std::vector video_sources, - rtc::scoped_refptr audio_processing) - : PeerConnectionWrapper::PeerConnectionWrapper(std::move(pc_factory), - std::move(pc), - std::move(observer)), + rtc::scoped_refptr audio_processing, + std::unique_ptr worker_thread) + : worker_thread_(std::move(worker_thread)), + wrapper_(std::make_unique(std::move(pc_factory), + std::move(pc), + std::move(observer))), params_(std::move(params)), video_sources_(std::move(video_sources)), audio_processing_(audio_processing) {} diff --git a/test/pc/e2e/test_peer.h b/test/pc/e2e/test_peer.h index c506127488..d8d5b2d1bb 100644 --- a/test/pc/e2e/test_peer.h +++ b/test/pc/e2e/test_peer.h @@ -26,16 +26,91 @@ namespace webrtc { namespace webrtc_pc_e2e { // Describes a single participant in the call. -class TestPeer final : public PeerConnectionWrapper { +class TestPeer final { public: - using PeerConnectionWrapper::PeerConnectionWrapper; - Params* params() const { return params_.get(); } PeerConfigurerImpl::VideoSource ReleaseVideoSource(size_t i) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; return std::move(video_sources_[i]); } + PeerConnectionFactoryInterface* pc_factory() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->pc_factory(); + } + PeerConnectionInterface* pc() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->pc(); + } + MockPeerConnectionObserver* observer() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->observer(); + } + + std::unique_ptr CreateOffer() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->CreateOffer(); + } + + std::unique_ptr CreateAnswer() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->CreateAnswer(); + } + + bool SetLocalDescription(std::unique_ptr desc, + std::string* error_out = nullptr) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->SetLocalDescription(std::move(desc), error_out); + } + + bool SetRemoteDescription(std::unique_ptr desc, + std::string* error_out = nullptr) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->SetRemoteDescription(std::move(desc), error_out); + } + + rtc::scoped_refptr AddTransceiver( + cricket::MediaType media_type, + const RtpTransceiverInit& init) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->AddTransceiver(media_type, init); + } + + rtc::scoped_refptr AddTrack( + rtc::scoped_refptr track, + const std::vector& stream_ids = {}) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->AddTrack(track, stream_ids); + } + + rtc::scoped_refptr CreateDataChannel( + const std::string& label) { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->CreateDataChannel(label); + } + + PeerConnectionInterface::SignalingState signaling_state() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->signaling_state(); + } + + bool IsIceGatheringDone() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->IsIceGatheringDone(); + } + + bool IsIceConnected() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->IsIceConnected(); + } + + rtc::scoped_refptr GetStats() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; + return wrapper_->GetStats(); + } + void DetachAecDump() { + RTC_CHECK(wrapper_) << "TestPeer is already closed"; if (audio_processing_) { audio_processing_->DetachAecDump(); } @@ -45,6 +120,10 @@ class TestPeer final : public PeerConnectionWrapper { bool AddIceCandidates( std::vector> candidates); + // Closes underlying peer connection and destroys all related objects freeing + // up related resources. + void Close(); + protected: friend class TestPeerFactory; TestPeer(rtc::scoped_refptr pc_factory, @@ -52,9 +131,13 @@ class TestPeer final : public PeerConnectionWrapper { std::unique_ptr observer, std::unique_ptr params, std::vector video_sources, - rtc::scoped_refptr audio_processing); + rtc::scoped_refptr audio_processing, + std::unique_ptr worker_thread); private: + // Keeps ownership of worker thread. It has to be destroyed after |wrapper_|. + std::unique_ptr worker_thread_; + std::unique_ptr wrapper_; std::unique_ptr params_; std::vector video_sources_; rtc::scoped_refptr audio_processing_; diff --git a/test/pc/e2e/test_peer_factory.cc b/test/pc/e2e/test_peer_factory.cc index 455337ef3a..869b40f703 100644 --- a/test/pc/e2e/test_peer_factory.cc +++ b/test/pc/e2e/test_peer_factory.cc @@ -12,7 +12,11 @@ #include #include "absl/memory/memory.h" +#include "absl/strings/string_view.h" #include "api/task_queue/default_task_queue_factory.h" +#include "api/test/create_time_controller.h" +#include "api/test/time_controller.h" +#include "api/transport/field_trial_based_config.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "media/engine/webrtc_media_engine.h" @@ -42,23 +46,29 @@ constexpr int kDefaultSamplingFrequencyInHz = 48000; // and |pc_dependencies| if they are omitted. Also setup required // dependencies, that won't be specially provided by factory and will be just // transferred to peer connection creation code. -void SetMandatoryEntities(InjectableComponents* components) { +void SetMandatoryEntities(InjectableComponents* components, + TimeController& time_controller) { RTC_DCHECK(components->pcf_dependencies); RTC_DCHECK(components->pc_dependencies); // Setup required peer connection factory dependencies. if (components->pcf_dependencies->task_queue_factory == nullptr) { components->pcf_dependencies->task_queue_factory = - CreateDefaultTaskQueueFactory(); + time_controller.CreateTaskQueueFactory(); } if (components->pcf_dependencies->call_factory == nullptr) { - components->pcf_dependencies->call_factory = webrtc::CreateCallFactory(); + components->pcf_dependencies->call_factory = + CreateTimeControllerBasedCallFactory(&time_controller); } if (components->pcf_dependencies->event_log_factory == nullptr) { components->pcf_dependencies->event_log_factory = std::make_unique( components->pcf_dependencies->task_queue_factory.get()); } + if (!components->pcf_dependencies->trials) { + components->pcf_dependencies->trials = + std::make_unique(); + } } // Returns mapping from stream label to optional spatial index. @@ -168,10 +178,14 @@ std::unique_ptr CreateMediaEngine( media_deps.video_decoder_factory = std::move(pcf_dependencies->video_decoder_factory); webrtc::SetMediaEngineDefaults(&media_deps); + RTC_DCHECK(pcf_dependencies->trials); + media_deps.trials = pcf_dependencies->trials.get(); + return cricket::CreateMediaEngine(std::move(media_deps)); } void WrapVideoEncoderFactory( + absl::string_view peer_name, double bitrate_multiplier, std::map> stream_required_spatial_index, PeerConnectionFactoryComponents* pcf_dependencies, @@ -184,11 +198,12 @@ void WrapVideoEncoderFactory( } pcf_dependencies->video_encoder_factory = video_analyzer_helper->WrapVideoEncoderFactory( - std::move(video_encoder_factory), bitrate_multiplier, + peer_name, std::move(video_encoder_factory), bitrate_multiplier, std::move(stream_required_spatial_index)); } void WrapVideoDecoderFactory( + absl::string_view peer_name, PeerConnectionFactoryComponents* pcf_dependencies, VideoQualityAnalyzerInjectionHelper* video_analyzer_helper) { std::unique_ptr video_decoder_factory; @@ -199,7 +214,7 @@ void WrapVideoDecoderFactory( } pcf_dependencies->video_decoder_factory = video_analyzer_helper->WrapVideoDecoderFactory( - std::move(video_decoder_factory)); + peer_name, std::move(video_decoder_factory)); } // Creates PeerConnectionFactoryDependencies objects, providing entities @@ -208,10 +223,12 @@ PeerConnectionFactoryDependencies CreatePCFDependencies( std::unique_ptr pcf_dependencies, std::unique_ptr media_engine, rtc::Thread* signaling_thread, + rtc::Thread* worker_thread, rtc::Thread* network_thread) { PeerConnectionFactoryDependencies pcf_deps; - pcf_deps.network_thread = network_thread; pcf_deps.signaling_thread = signaling_thread; + pcf_deps.worker_thread = worker_thread; + pcf_deps.network_thread = network_thread; pcf_deps.media_engine = std::move(media_engine); pcf_deps.call_factory = std::move(pcf_dependencies->call_factory); @@ -226,13 +243,12 @@ PeerConnectionFactoryDependencies CreatePCFDependencies( pcf_deps.network_controller_factory = std::move(pcf_dependencies->network_controller_factory); } - if (pcf_dependencies->media_transport_factory != nullptr) { - pcf_deps.media_transport_factory = - std::move(pcf_dependencies->media_transport_factory); - } if (pcf_dependencies->neteq_factory != nullptr) { pcf_deps.neteq_factory = std::move(pcf_dependencies->neteq_factory); } + if (pcf_dependencies->trials != nullptr) { + pcf_deps.trials = std::move(pcf_dependencies->trials); + } return pcf_deps; } @@ -281,20 +297,21 @@ absl::optional RemotePeerAudioConfig::Create( } std::unique_ptr TestPeerFactory::CreateTestPeer( - std::unique_ptr components, - std::unique_ptr params, - std::vector video_sources, + std::unique_ptr configurer, std::unique_ptr observer, - VideoQualityAnalyzerInjectionHelper* video_analyzer_helper, - rtc::Thread* signaling_thread, absl::optional remote_audio_config, double bitrate_multiplier, - absl::optional echo_emulation_config, - rtc::TaskQueue* task_queue) { + absl::optional + echo_emulation_config) { + std::unique_ptr components = + configurer->ReleaseComponents(); + std::unique_ptr params = configurer->ReleaseParams(); + std::vector video_sources = + configurer->ReleaseVideoSources(); RTC_DCHECK(components); RTC_DCHECK(params); RTC_DCHECK_EQ(params->video_configs.size(), video_sources.size()); - SetMandatoryEntities(components.get()); + SetMandatoryEntities(components.get(), time_controller_); params->rtc_configuration.sdp_semantics = SdpSemantics::kUnifiedPlan; // Create peer connection factory. @@ -302,24 +319,28 @@ std::unique_ptr TestPeerFactory::CreateTestPeer( webrtc::AudioProcessingBuilder().Create(); if (params->aec_dump_path && audio_processing) { audio_processing->CreateAndAttachAecDump(*params->aec_dump_path, -1, - task_queue); + task_queue_); } rtc::scoped_refptr audio_device_module = CreateAudioDeviceModule( params->audio_config, remote_audio_config, echo_emulation_config, components->pcf_dependencies->task_queue_factory.get()); WrapVideoEncoderFactory( - bitrate_multiplier, + params->name.value(), bitrate_multiplier, CalculateRequiredSpatialIndexPerStream(params->video_configs), - components->pcf_dependencies.get(), video_analyzer_helper); - WrapVideoDecoderFactory(components->pcf_dependencies.get(), - video_analyzer_helper); + components->pcf_dependencies.get(), video_analyzer_helper_); + WrapVideoDecoderFactory(params->name.value(), + components->pcf_dependencies.get(), + video_analyzer_helper_); std::unique_ptr media_engine = CreateMediaEngine(components->pcf_dependencies.get(), audio_device_module, audio_processing); + + std::unique_ptr worker_thread = + time_controller_.CreateThread("worker_thread"); PeerConnectionFactoryDependencies pcf_deps = CreatePCFDependencies( std::move(components->pcf_dependencies), std::move(media_engine), - signaling_thread, components->network_thread); + signaling_thread_, worker_thread.get(), components->network_thread); rtc::scoped_refptr peer_connection_factory = CreateModularPeerConnectionFactory(std::move(pcf_deps)); @@ -327,30 +348,16 @@ std::unique_ptr TestPeerFactory::CreateTestPeer( PeerConnectionDependencies pc_deps = CreatePCDependencies( observer.get(), std::move(components->pc_dependencies)); rtc::scoped_refptr peer_connection = - peer_connection_factory->CreatePeerConnection(params->rtc_configuration, - std::move(pc_deps)); - peer_connection->SetBitrate(params->bitrate_params); + peer_connection_factory + ->CreatePeerConnectionOrError(params->rtc_configuration, + std::move(pc_deps)) + .MoveValue(); + peer_connection->SetBitrate(params->bitrate_settings); return absl::WrapUnique(new TestPeer( peer_connection_factory, peer_connection, std::move(observer), - std::move(params), std::move(video_sources), audio_processing)); -} - -std::unique_ptr TestPeerFactory::CreateTestPeer( - std::unique_ptr configurer, - std::unique_ptr observer, - VideoQualityAnalyzerInjectionHelper* video_analyzer_helper, - rtc::Thread* signaling_thread, - absl::optional remote_audio_config, - double bitrate_multiplier, - absl::optional - echo_emulation_config, - rtc::TaskQueue* task_queue) { - return CreateTestPeer( - configurer->ReleaseComponents(), configurer->ReleaseParams(), - configurer->ReleaseVideoSources(), std::move(observer), - video_analyzer_helper, signaling_thread, remote_audio_config, - bitrate_multiplier, echo_emulation_config, task_queue); + std::move(params), std::move(video_sources), audio_processing, + std::move(worker_thread))); } } // namespace webrtc_pc_e2e diff --git a/test/pc/e2e/test_peer_factory.h b/test/pc/e2e/test_peer_factory.h index 3233bb65f7..df33406270 100644 --- a/test/pc/e2e/test_peer_factory.h +++ b/test/pc/e2e/test_peer_factory.h @@ -19,6 +19,7 @@ #include "absl/strings/string_view.h" #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/test/peerconnection_quality_test_fixture.h" +#include "api/test/time_controller.h" #include "modules/audio_device/include/test_audio_device.h" #include "rtc_base/task_queue.h" #include "test/pc/e2e/analyzer/video/video_quality_analyzer_injection_helper.h" @@ -44,42 +45,40 @@ struct RemotePeerAudioConfig { class TestPeerFactory { public: + // Creates a test peer factory. + // |signaling_thread| will be used as a signaling thread for all peers created + // by this factory. + // |time_controller| will be used to create required threads, task queue + // factories and call factory. + // |video_analyzer_helper| will be used to setup video quality analysis for + // created peers. + // |task_queue| will be used for AEC dump if it is requested. + TestPeerFactory(rtc::Thread* signaling_thread, + TimeController& time_controller, + VideoQualityAnalyzerInjectionHelper* video_analyzer_helper, + rtc::TaskQueue* task_queue) + : signaling_thread_(signaling_thread), + time_controller_(time_controller), + video_analyzer_helper_(video_analyzer_helper), + task_queue_(task_queue) {} + // Setups all components, that should be provided to WebRTC // PeerConnectionFactory and PeerConnection creation methods, // also will setup dependencies, that are required for media analyzers // injection. - // - // |signaling_thread| will be provided by test fixture implementation. - // |params| - describes current peer parameters, like current peer video - // streams and audio streams - static std::unique_ptr CreateTestPeer( - std::unique_ptr components, - std::unique_ptr params, - std::vector video_sources, - std::unique_ptr observer, - VideoQualityAnalyzerInjectionHelper* video_analyzer_helper, - rtc::Thread* signaling_thread, - absl::optional remote_audio_config, - double bitrate_multiplier, - absl::optional - echo_emulation_config, - rtc::TaskQueue* task_queue); - // Setups all components, that should be provided to WebRTC - // PeerConnectionFactory and PeerConnection creation methods, - // also will setup dependencies, that are required for media analyzers - // injection. - // - // |signaling_thread| will be provided by test fixture implementation. - static std::unique_ptr CreateTestPeer( + std::unique_ptr CreateTestPeer( std::unique_ptr configurer, std::unique_ptr observer, - VideoQualityAnalyzerInjectionHelper* video_analyzer_helper, - rtc::Thread* signaling_thread, absl::optional remote_audio_config, double bitrate_multiplier, absl::optional - echo_emulation_config, - rtc::TaskQueue* task_queue); + echo_emulation_config); + + private: + rtc::Thread* signaling_thread_; + TimeController& time_controller_; + VideoQualityAnalyzerInjectionHelper* video_analyzer_helper_; + rtc::TaskQueue* task_queue_; }; } // namespace webrtc_pc_e2e diff --git a/test/pc/sctp/BUILD.gn b/test/pc/sctp/BUILD.gn new file mode 100644 index 0000000000..b47cff2c0f --- /dev/null +++ b/test/pc/sctp/BUILD.gn @@ -0,0 +1,15 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import("../../../webrtc.gni") + +rtc_source_set("fake_sctp_transport") { + visibility = [ "*" ] + sources = [ "fake_sctp_transport.h" ] + deps = [ "../../../media:rtc_data_sctp_transport_internal" ] +} diff --git a/pc/test/fake_sctp_transport.h b/test/pc/sctp/fake_sctp_transport.h similarity index 82% rename from pc/test/fake_sctp_transport.h rename to test/pc/sctp/fake_sctp_transport.h index 50e59f1fc2..42b978a900 100644 --- a/pc/test/fake_sctp_transport.h +++ b/test/pc/sctp/fake_sctp_transport.h @@ -8,8 +8,8 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef PC_TEST_FAKE_SCTP_TRANSPORT_H_ -#define PC_TEST_FAKE_SCTP_TRANSPORT_H_ +#ifndef TEST_PC_SCTP_FAKE_SCTP_TRANSPORT_H_ +#define TEST_PC_SCTP_FAKE_SCTP_TRANSPORT_H_ #include @@ -29,7 +29,8 @@ class FakeSctpTransport : public cricket::SctpTransportInternal { } bool OpenStream(int sid) override { return true; } bool ResetStream(int sid) override { return true; } - bool SendData(const cricket::SendDataParams& params, + bool SendData(int sid, + const webrtc::SendDataParams& params, const rtc::CopyOnWriteBuffer& payload, cricket::SendDataResult* result = nullptr) override { return true; @@ -40,8 +41,14 @@ class FakeSctpTransport : public cricket::SctpTransportInternal { int max_message_size() const { return max_message_size_; } absl::optional max_outbound_streams() const { return absl::nullopt; } absl::optional max_inbound_streams() const { return absl::nullopt; } - int local_port() const { return *local_port_; } - int remote_port() const { return *remote_port_; } + int local_port() const { + RTC_DCHECK(local_port_); + return *local_port_; + } + int remote_port() const { + RTC_DCHECK(remote_port_); + return *remote_port_; + } private: absl::optional local_port_; @@ -49,7 +56,7 @@ class FakeSctpTransport : public cricket::SctpTransportInternal { int max_message_size_; }; -class FakeSctpTransportFactory : public cricket::SctpTransportInternalFactory { +class FakeSctpTransportFactory : public webrtc::SctpTransportFactoryInterface { public: std::unique_ptr CreateSctpTransport( rtc::PacketTransportInternal*) override { @@ -66,4 +73,4 @@ class FakeSctpTransportFactory : public cricket::SctpTransportInternalFactory { FakeSctpTransport* last_fake_sctp_transport_ = nullptr; }; -#endif // PC_TEST_FAKE_SCTP_TRANSPORT_H_ +#endif // TEST_PC_SCTP_FAKE_SCTP_TRANSPORT_H_ diff --git a/test/peer_scenario/BUILD.gn b/test/peer_scenario/BUILD.gn index d702cf539f..033ef4115a 100644 --- a/test/peer_scenario/BUILD.gn +++ b/test/peer_scenario/BUILD.gn @@ -37,6 +37,7 @@ if (rtc_include_tests) { "../../api/audio_codecs:builtin_audio_encoder_factory", "../../api/rtc_event_log:rtc_event_log_factory", "../../api/task_queue:default_task_queue_factory", + "../../api/transport:field_trial_based_config", "../../api/video_codecs:builtin_video_decoder_factory", "../../api/video_codecs:builtin_video_encoder_factory", "../../media:rtc_audio_video", @@ -46,12 +47,16 @@ if (rtc_include_tests) { "../../p2p:rtc_p2p", "../../pc:pc_test_utils", "../../pc:rtc_pc_base", + "../../pc:session_description", "../../rtc_base", + "../../rtc_base:null_socket_server", "../../rtc_base:stringutils", "../logging:log_writer", "../network:emulated_network", "../scenario", "../time_controller", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/memory", ] diff --git a/test/peer_scenario/peer_scenario.cc b/test/peer_scenario/peer_scenario.cc index c3443aa185..ea959c943a 100644 --- a/test/peer_scenario/peer_scenario.cc +++ b/test/peer_scenario/peer_scenario.cc @@ -77,8 +77,8 @@ SignalingRoute PeerScenario::ConnectSignaling( PeerScenarioClient* callee, std::vector send_link, std::vector ret_link) { - return SignalingRoute(caller, callee, net_.CreateTrafficRoute(send_link), - net_.CreateTrafficRoute(ret_link)); + return SignalingRoute(caller, callee, net_.CreateCrossTrafficRoute(send_link), + net_.CreateCrossTrafficRoute(ret_link)); } void PeerScenario::SimpleConnection( diff --git a/test/peer_scenario/peer_scenario_client.cc b/test/peer_scenario/peer_scenario_client.cc index 1ae53ee86b..7f3e126287 100644 --- a/test/peer_scenario/peer_scenario_client.cc +++ b/test/peer_scenario/peer_scenario_client.cc @@ -18,6 +18,7 @@ #include "api/rtc_event_log/rtc_event_log_factory.h" #include "api/task_queue/default_task_queue_factory.h" #include "api/test/create_time_controller.h" +#include "api/transport/field_trial_based_config.h" #include "api/video_codecs/builtin_video_decoder_factory.h" #include "api/video_codecs/builtin_video_encoder_factory.h" #include "media/engine/webrtc_media_engine.h" @@ -125,8 +126,6 @@ class FakeVideoEncoderFactory : public VideoEncoderFactory { CodecInfo QueryVideoEncoder(const SdpVideoFormat& format) const override { RTC_CHECK_EQ(format.name, "VP8"); CodecInfo info; - info.has_internal_source = false; - info.is_hardware_accelerated = false; return info; } std::unique_ptr CreateVideoEncoder( @@ -199,6 +198,7 @@ PeerScenarioClient::PeerScenarioClient( net->time_controller()->CreateTaskQueueFactory(); pcf_deps.event_log_factory = std::make_unique(task_queue_factory_); + pcf_deps.trials = std::make_unique(); cricket::MediaEngineDependencies media_deps; media_deps.task_queue_factory = task_queue_factory_; @@ -223,6 +223,7 @@ PeerScenarioClient::PeerScenarioClient( } media_deps.audio_encoder_factory = CreateBuiltinAudioEncoderFactory(); media_deps.audio_decoder_factory = CreateBuiltinAudioDecoderFactory(); + media_deps.trials = pcf_deps.trials.get(); pcf_deps.media_engine = cricket::CreateMediaEngine(std::move(media_deps)); pcf_deps.fec_controller_factory = nullptr; @@ -230,6 +231,9 @@ PeerScenarioClient::PeerScenarioClient( pcf_deps.network_state_predictor_factory = nullptr; pc_factory_ = CreateModularPeerConnectionFactory(std::move(pcf_deps)); + PeerConnectionFactoryInterface::Options pc_options; + pc_options.disable_encryption = config.disable_encryption; + pc_factory_->SetOptions(pc_options); PeerConnectionDependencies pc_deps(observer_.get()); pc_deps.allocator = @@ -237,7 +241,9 @@ PeerScenarioClient::PeerScenarioClient( pc_deps.allocator->set_flags(pc_deps.allocator->flags() | cricket::PORTALLOCATOR_DISABLE_TCP); peer_connection_ = - pc_factory_->CreatePeerConnection(config.rtc_config, std::move(pc_deps)); + pc_factory_ + ->CreatePeerConnectionOrError(config.rtc_config, std::move(pc_deps)) + .MoveValue(); if (log_writer_factory_) { peer_connection_->StartRtcEventLog(log_writer_factory_->Create(".rtc.dat"), /*output_period_ms=*/1000); @@ -287,14 +293,17 @@ void PeerScenarioClient::AddVideoReceiveSink( } void PeerScenarioClient::CreateAndSetSdp( + std::function munge_offer, std::function offer_handler) { RTC_DCHECK_RUN_ON(signaling_thread_); peer_connection_->CreateOffer( SdpCreateObserver([=](SessionDescriptionInterface* offer) { RTC_DCHECK_RUN_ON(signaling_thread_); + if (munge_offer) { + munge_offer(offer); + } std::string sdp_offer; - offer->ToString(&sdp_offer); - RTC_LOG(LS_INFO) << sdp_offer; + RTC_CHECK(offer->ToString(&sdp_offer)); peer_connection_->SetLocalDescription( SdpSetObserver( [sdp_offer, offer_handler]() { offer_handler(sdp_offer); }), diff --git a/test/peer_scenario/peer_scenario_client.h b/test/peer_scenario/peer_scenario_client.h index d939d7f3a7..65ad0734db 100644 --- a/test/peer_scenario/peer_scenario_client.h +++ b/test/peer_scenario/peer_scenario_client.h @@ -89,6 +89,7 @@ class PeerScenarioClient { {0, EmulatedEndpointConfig()}}; CallbackHandlers handlers; PeerConnectionInterface::RTCConfiguration rtc_config; + bool disable_encryption = false; Config() { rtc_config.sdp_semantics = SdpSemantics::kUnifiedPlan; } }; @@ -136,9 +137,13 @@ class PeerScenarioClient { CallbackHandlers* handlers() { return &handlers_; } - // Note that there's no provision for munging SDP as that is deprecated - // behavior. - void CreateAndSetSdp(std::function offer_handler); + // The |munge_offer| function can be used to munge the SDP, i.e. modify a + // local description afer creating it but before setting it. Note that this is + // legacy behavior. It's added here only to be able to have test coverage for + // scenarios even if they are not spec compliant. + void CreateAndSetSdp( + std::function munge_offer, + std::function offer_handler); void SetSdpOfferAndGetAnswer(std::string remote_offer, std::function answer_handler); void SetSdpAnswer( diff --git a/test/peer_scenario/scenario_connection.cc b/test/peer_scenario/scenario_connection.cc index 92082f5097..fefaa00c72 100644 --- a/test/peer_scenario/scenario_connection.cc +++ b/test/peer_scenario/scenario_connection.cc @@ -97,8 +97,7 @@ ScenarioIceConnectionImpl::ScenarioIceConnectionImpl( port_allocator_( new cricket::BasicPortAllocator(manager_->network_manager())), jsep_controller_( - new JsepTransportController(signaling_thread_, - network_thread_, + new JsepTransportController(network_thread_, port_allocator_.get(), /*async_resolver_factory*/ nullptr, CreateJsepConfig())) { @@ -165,8 +164,12 @@ void ScenarioIceConnectionImpl::SetRemoteSdp(SdpType type, const std::string& remote_sdp) { RTC_DCHECK_RUN_ON(signaling_thread_); remote_description_ = webrtc::CreateSessionDescription(type, remote_sdp); - jsep_controller_->SignalIceCandidatesGathered.connect( - this, &ScenarioIceConnectionImpl::OnCandidates); + jsep_controller_->SubscribeIceCandidateGathered( + [this](const std::string& transport, + const std::vector& candidate) { + ScenarioIceConnectionImpl::OnCandidates(transport, candidate); + }); + auto res = jsep_controller_->SetRemoteDescription( remote_description_->GetType(), remote_description_->description()); RTC_CHECK(res.ok()) << res.message(); diff --git a/test/peer_scenario/signaling_route.cc b/test/peer_scenario/signaling_route.cc index 1e5b9aad9a..908d405461 100644 --- a/test/peer_scenario/signaling_route.cc +++ b/test/peer_scenario/signaling_route.cc @@ -41,7 +41,7 @@ struct IceMessage { void StartIceSignalingForRoute(PeerScenarioClient* caller, PeerScenarioClient* callee, - TrafficRoute* send_route) { + CrossTrafficRoute* send_route) { caller->handlers()->on_ice_candidate.push_back( [=](const IceCandidateInterface* candidate) { IceMessage msg(candidate); @@ -56,11 +56,12 @@ void StartIceSignalingForRoute(PeerScenarioClient* caller, void StartSdpNegotiation( PeerScenarioClient* caller, PeerScenarioClient* callee, - TrafficRoute* send_route, - TrafficRoute* ret_route, + CrossTrafficRoute* send_route, + CrossTrafficRoute* ret_route, + std::function munge_offer, std::function modify_offer, std::function exchange_finished) { - caller->CreateAndSetSdp([=](std::string sdp_offer) { + caller->CreateAndSetSdp(munge_offer, [=](std::string sdp_offer) { if (modify_offer) { auto offer = CreateSessionDescription(SdpType::kOffer, sdp_offer); modify_offer(offer.get()); @@ -79,8 +80,8 @@ void StartSdpNegotiation( SignalingRoute::SignalingRoute(PeerScenarioClient* caller, PeerScenarioClient* callee, - TrafficRoute* send_route, - TrafficRoute* ret_route) + CrossTrafficRoute* send_route, + CrossTrafficRoute* ret_route) : caller_(caller), callee_(callee), send_route_(send_route), @@ -92,15 +93,22 @@ void SignalingRoute::StartIceSignaling() { } void SignalingRoute::NegotiateSdp( + std::function munge_offer, std::function modify_offer, std::function exchange_finished) { - StartSdpNegotiation(caller_, callee_, send_route_, ret_route_, modify_offer, - exchange_finished); + StartSdpNegotiation(caller_, callee_, send_route_, ret_route_, munge_offer, + modify_offer, exchange_finished); +} + +void SignalingRoute::NegotiateSdp( + std::function modify_offer, + std::function exchange_finished) { + NegotiateSdp({}, modify_offer, exchange_finished); } void SignalingRoute::NegotiateSdp( std::function exchange_finished) { - NegotiateSdp({}, exchange_finished); + NegotiateSdp({}, {}, exchange_finished); } } // namespace test diff --git a/test/peer_scenario/signaling_route.h b/test/peer_scenario/signaling_route.h index 189c4b6f3f..021fc4989b 100644 --- a/test/peer_scenario/signaling_route.h +++ b/test/peer_scenario/signaling_route.h @@ -25,12 +25,24 @@ class SignalingRoute { public: SignalingRoute(PeerScenarioClient* caller, PeerScenarioClient* callee, - TrafficRoute* send_route, - TrafficRoute* ret_route); + CrossTrafficRoute* send_route, + CrossTrafficRoute* ret_route); void StartIceSignaling(); + // The |modify_offer| callback is used to modify an offer after the local + // description has been set. This is legal (but odd) behavior. + // The |munge_offer| callback is used to modify an offer between its creation + // and set local description. This behavior is forbidden according to the spec + // but available here in order to allow test coverage on corner cases. + // The |exchange_finished| callback is called with the answer produced after + // SDP negotations has completed. // TODO(srte): Handle lossy links. + void NegotiateSdp( + std::function munge_offer, + std::function modify_offer, + std::function + exchange_finished); void NegotiateSdp( std::function modify_offer, std::function @@ -45,8 +57,8 @@ class SignalingRoute { private: PeerScenarioClient* const caller_; PeerScenarioClient* const callee_; - TrafficRoute* const send_route_; - TrafficRoute* const ret_route_; + CrossTrafficRoute* const send_route_; + CrossTrafficRoute* const ret_route_; }; } // namespace test diff --git a/test/peer_scenario/tests/BUILD.gn b/test/peer_scenario/tests/BUILD.gn index 35528626f8..a8b9c2563e 100644 --- a/test/peer_scenario/tests/BUILD.gn +++ b/test/peer_scenario/tests/BUILD.gn @@ -14,13 +14,18 @@ if (rtc_include_tests) { sources = [ "peer_scenario_quality_test.cc", "remote_estimate_test.cc", + "unsignaled_stream_test.cc", ] deps = [ "..:peer_scenario", "../../:field_trial", + "../../:rtp_test_utils", "../../:test_support", + "../../../media:rtc_media_base", "../../../modules/rtp_rtcp:rtp_rtcp", + "../../../modules/rtp_rtcp:rtp_rtcp_format", "../../../pc:rtc_pc_base", + "../../../pc:session_description", ] } } diff --git a/test/peer_scenario/tests/remote_estimate_test.cc b/test/peer_scenario/tests/remote_estimate_test.cc index b882ad9dc2..f1d8345fde 100644 --- a/test/peer_scenario/tests/remote_estimate_test.cc +++ b/test/peer_scenario/tests/remote_estimate_test.cc @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "pc/media_session.h" #include "pc/session_description.h" @@ -29,7 +30,7 @@ absl::optional GetRtpPacketExtensions( const rtc::ArrayView packet, const RtpHeaderExtensionMap& extension_map) { RtpUtility::RtpHeaderParser rtp_parser(packet.data(), packet.size()); - if (!rtp_parser.RTCP()) { + if (IsRtpPacket(packet)) { RTPHeader header; if (rtp_parser.Parse(&header, &extension_map, true)) { return header.extension; diff --git a/test/peer_scenario/tests/unsignaled_stream_test.cc b/test/peer_scenario/tests/unsignaled_stream_test.cc new file mode 100644 index 0000000000..e0fe02edcf --- /dev/null +++ b/test/peer_scenario/tests/unsignaled_stream_test.cc @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "media/base/stream_params.h" +#include "modules/rtp_rtcp/source/byte_io.h" +#include "modules/rtp_rtcp/source/rtp_util.h" +#include "pc/media_session.h" +#include "pc/session_description.h" +#include "test/field_trial.h" +#include "test/gmock.h" +#include "test/gtest.h" +#include "test/peer_scenario/peer_scenario.h" +#include "test/rtp_header_parser.h" + +namespace webrtc { +namespace test { +namespace { + +enum class MidTestConfiguration { + // Legacy endpoint setup where PT demuxing is used. + kMidNotNegotiated, + // MID is negotiated but missing from packets. PT demuxing is disabled, so + // SSRCs have to be added to the SDP for WebRTC to forward packets correctly. + // Happens when client is spec compliant but the SFU isn't. Popular legacy. + kMidNegotiatedButMissingFromPackets, + // Fully spec-compliant: MID is present so we can safely drop packets with + // unknown MIDs. + kMidNegotiatedAndPresentInPackets, +}; + +// Gives the parameterized test a readable suffix. +std::string TestParametersMidTestConfigurationToString( + testing::TestParamInfo info) { + switch (info.param) { + case MidTestConfiguration::kMidNotNegotiated: + return "MidNotNegotiated"; + case MidTestConfiguration::kMidNegotiatedButMissingFromPackets: + return "MidNegotiatedButMissingFromPackets"; + case MidTestConfiguration::kMidNegotiatedAndPresentInPackets: + return "MidNegotiatedAndPresentInPackets"; + } +} + +class FrameObserver : public rtc::VideoSinkInterface { + public: + FrameObserver() : frame_observed_(false) {} + void OnFrame(const VideoFrame&) override { frame_observed_ = true; } + + std::atomic frame_observed_; +}; + +uint32_t get_ssrc(SessionDescriptionInterface* offer, size_t track_index) { + EXPECT_LT(track_index, offer->description()->contents().size()); + return offer->description() + ->contents()[track_index] + .media_description() + ->streams()[0] + .ssrcs[0]; +} + +void set_ssrc(SessionDescriptionInterface* offer, size_t index, uint32_t ssrc) { + EXPECT_LT(index, offer->description()->contents().size()); + cricket::StreamParams& new_stream_params = offer->description() + ->contents()[index] + .media_description() + ->mutable_streams()[0]; + new_stream_params.ssrcs[0] = ssrc; + new_stream_params.ssrc_groups[0].ssrcs[0] = ssrc; +} + +} // namespace + +class UnsignaledStreamTest + : public ::testing::Test, + public ::testing::WithParamInterface {}; + +TEST_P(UnsignaledStreamTest, ReplacesUnsignaledStreamOnCompletedSignaling) { + // This test covers a scenario that might occur if a remote client starts + // sending media packets before negotiation has completed. Depending on setup, + // these packets either get dropped or trigger an unsignalled default stream + // to be created, and connects that to a default video sink. + // In some edge cases using Unified Plan and PT demuxing, the default stream + // is create in a different transceiver to where the media SSRC will actually + // be used. This test verifies that the default stream is removed properly, + // and that packets are demuxed and video frames reach the desired sink. + const MidTestConfiguration kMidTestConfiguration = GetParam(); + + // Defined before PeerScenario so it gets destructed after, to avoid use after + // free. + PeerScenario s(*::testing::UnitTest::GetInstance()->current_test_info()); + + PeerScenarioClient::Config config = PeerScenarioClient::Config(); + // Disable encryption so that we can inject a fake early media packet without + // triggering srtp failures. + config.disable_encryption = true; + auto* caller = s.CreateClient(config); + auto* callee = s.CreateClient(config); + + auto send_node = s.net()->NodeBuilder().Build().node; + auto ret_node = s.net()->NodeBuilder().Build().node; + + s.net()->CreateRoute(caller->endpoint(), {send_node}, callee->endpoint()); + s.net()->CreateRoute(callee->endpoint(), {ret_node}, caller->endpoint()); + + auto signaling = s.ConnectSignaling(caller, callee, {send_node}, {ret_node}); + PeerScenarioClient::VideoSendTrackConfig video_conf; + video_conf.generator.squares_video->framerate = 15; + + auto first_track = caller->CreateVideo("VIDEO", video_conf); + FrameObserver first_sink; + callee->AddVideoReceiveSink(first_track.track->id(), &first_sink); + + signaling.StartIceSignaling(); + std::atomic offer_exchange_done(false); + std::atomic got_unsignaled_packet(false); + + // We will capture the media ssrc of the first added stream, and preemptively + // inject a new media packet using a different ssrc. What happens depends on + // the test configuration. + // + // MidTestConfiguration::kMidNotNegotiated: + // - MID is not negotiated which means PT-based demuxing is enabled. Because + // the packets have no MID, the second ssrc packet gets forwarded to the + // first m= section. This will create a "default stream" for the second ssrc + // and connect it to the default video sink (not set in this test). The test + // verifies we can recover from this when we later get packets for the first + // ssrc. + // + // MidTestConfiguration::kMidNegotiatedButMissingFromPackets: + // - MID is negotiated wich means PT-based demuxing is disabled. Because we + // modify the packets not to contain the MID anyway (simulating a legacy SFU + // that does not negotiate properly) unknown SSRCs are dropped but do not + // otherwise cause any issues. + // + // MidTestConfiguration::kMidNegotiatedAndPresentInPackets: + // - MID is negotiated which means PT-based demuxing is enabled. In this case + // the packets have the MID so they either get forwarded or dropped + // depending on if the MID is known. The spec-compliant way is also the most + // straight-forward one. + + uint32_t first_ssrc = 0; + uint32_t second_ssrc = 0; + absl::optional mid_header_extension_id = absl::nullopt; + + signaling.NegotiateSdp( + /* munge_sdp = */ + [&](SessionDescriptionInterface* offer) { + // Obtain the MID header extension ID and if we want the + // MidTestConfiguration::kMidNotNegotiated setup then we remove the MID + // header extension through SDP munging (otherwise SDP is not modified). + for (cricket::ContentInfo& content_info : + offer->description()->contents()) { + std::vector header_extensions = + content_info.media_description()->rtp_header_extensions(); + for (auto it = header_extensions.begin(); + it != header_extensions.end(); ++it) { + if (it->uri == RtpExtension::kMidUri) { + // MID header extension found! + mid_header_extension_id = it->id; + if (kMidTestConfiguration == + MidTestConfiguration::kMidNotNegotiated) { + // Munge away the extension. + header_extensions.erase(it); + } + break; + } + } + content_info.media_description()->set_rtp_header_extensions( + std::move(header_extensions)); + } + ASSERT_TRUE(mid_header_extension_id.has_value()); + }, + /* modify_sdp = */ + [&](SessionDescriptionInterface* offer) { + first_ssrc = get_ssrc(offer, 0); + second_ssrc = first_ssrc + 1; + + send_node->router()->SetWatcher([&](const EmulatedIpPacket& packet) { + if (IsRtpPacket(packet.data) && + ByteReader::ReadBigEndian(&(packet.cdata()[8])) == + first_ssrc && + !got_unsignaled_packet) { + // Parse packet and modify the SSRC to simulate a second m= + // section that has not been negotiated yet. + std::vector extensions; + extensions.emplace_back(RtpExtension::kMidUri, + mid_header_extension_id.value()); + RtpHeaderExtensionMap extensions_map(extensions); + RtpPacket parsed_packet; + parsed_packet.IdentifyExtensions(extensions_map); + ASSERT_TRUE(parsed_packet.Parse(packet.data)); + parsed_packet.SetSsrc(second_ssrc); + // The MID extension is present if and only if it was negotiated. + // If present, we either want to remove it or modify it depending + // on setup. + switch (kMidTestConfiguration) { + case MidTestConfiguration::kMidNotNegotiated: + EXPECT_FALSE(parsed_packet.HasExtension()); + break; + case MidTestConfiguration::kMidNegotiatedButMissingFromPackets: + EXPECT_TRUE(parsed_packet.HasExtension()); + ASSERT_TRUE(parsed_packet.RemoveExtension(RtpMid::kId)); + break; + case MidTestConfiguration::kMidNegotiatedAndPresentInPackets: + EXPECT_TRUE(parsed_packet.HasExtension()); + // The simulated second m= section would have a different MID. + // If we don't modify it here then |second_ssrc| would end up + // being mapped to the first m= section which would cause SSRC + // conflicts if we later add the same SSRC to a second m= + // section. Hidden assumption: first m= section does not use + // MID:1. + ASSERT_TRUE(parsed_packet.SetExtension("1")); + break; + } + // Inject the modified packet. + rtc::CopyOnWriteBuffer updated_buffer = parsed_packet.Buffer(); + EmulatedIpPacket updated_packet( + packet.from, packet.to, updated_buffer, packet.arrival_time); + send_node->OnPacketReceived(std::move(updated_packet)); + got_unsignaled_packet = true; + } + }); + }, + [&](const SessionDescriptionInterface& answer) { + EXPECT_EQ(answer.description()->contents().size(), 1u); + offer_exchange_done = true; + }); + EXPECT_TRUE(s.WaitAndProcess(&offer_exchange_done)); + EXPECT_TRUE(s.WaitAndProcess(&got_unsignaled_packet)); + EXPECT_TRUE(s.WaitAndProcess(&first_sink.frame_observed_)); + + auto second_track = caller->CreateVideo("VIDEO2", video_conf); + FrameObserver second_sink; + callee->AddVideoReceiveSink(second_track.track->id(), &second_sink); + + // Create a second video stream, munge the sdp to force it to use our fake + // early media ssrc. + offer_exchange_done = false; + signaling.NegotiateSdp( + /* munge_sdp = */ + [&](SessionDescriptionInterface* offer) { + set_ssrc(offer, 1, second_ssrc); + }, + /* modify_sdp = */ {}, + [&](const SessionDescriptionInterface& answer) { + EXPECT_EQ(answer.description()->contents().size(), 2u); + offer_exchange_done = true; + }); + EXPECT_TRUE(s.WaitAndProcess(&offer_exchange_done)); + EXPECT_TRUE(s.WaitAndProcess(&second_sink.frame_observed_)); +} + +INSTANTIATE_TEST_SUITE_P( + All, + UnsignaledStreamTest, + ::testing::Values(MidTestConfiguration::kMidNotNegotiated, + MidTestConfiguration::kMidNegotiatedButMissingFromPackets, + MidTestConfiguration::kMidNegotiatedAndPresentInPackets), + TestParametersMidTestConfigurationToString); + +} // namespace test +} // namespace webrtc diff --git a/test/rtp_file_reader.cc b/test/rtp_file_reader.cc index 336beff1e7..a09d5a66e4 100644 --- a/test/rtp_file_reader.cc +++ b/test/rtp_file_reader.cc @@ -17,6 +17,7 @@ #include #include +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/rtp_rtcp/source/rtp_utility.h" #include "rtc_base/checks.h" #include "rtc_base/constructor_magic.h" @@ -82,15 +83,15 @@ class InterleavedRtpFileReader : public RtpFileReaderImpl { } bool NextPacket(RtpPacket* packet) override { - assert(file_ != nullptr); + RTC_DCHECK(file_); packet->length = RtpPacket::kMaxPacketBufferSize; uint32_t len = 0; TRY(ReadUint32(&len, file_)); if (packet->length < len) { - FATAL() << "Packet is too large to fit: " << len << " bytes vs " - << packet->length - << " bytes allocated. Consider increasing the buffer " - "size"; + RTC_FATAL() << "Packet is too large to fit: " << len << " bytes vs " + << packet->length + << " bytes allocated. Consider increasing the buffer " + << "size"; } if (fread(packet->data, 1, len, file_) != len) return false; @@ -275,7 +276,7 @@ class PcapReader : public RtpFileReaderImpl { if (result == kResultFail) { break; } else if (result == kResultSuccess && packets_.size() == 1) { - assert(stream_start_ms == 0); + RTC_DCHECK_EQ(stream_start_ms, 0); PacketIterator it = packets_.begin(); stream_start_ms = it->time_offset_ms; it->time_offset_ms = 0; @@ -329,9 +330,9 @@ class PcapReader : public RtpFileReaderImpl { } virtual int NextPcap(uint8_t* data, uint32_t* length, uint32_t* time_ms) { - assert(data); - assert(length); - assert(time_ms); + RTC_DCHECK(data); + RTC_DCHECK(length); + RTC_DCHECK(time_ms); if (next_packet_it_ == packets_.end()) { return -1; @@ -408,7 +409,7 @@ class PcapReader : public RtpFileReaderImpl { uint32_t stream_start_ms, uint32_t number, const std::set& ssrc_filter) { - assert(next_packet_pos); + RTC_DCHECK(next_packet_pos); uint32_t ts_sec; // Timestamp seconds. uint32_t ts_usec; // Timestamp microseconds. @@ -434,7 +435,7 @@ class PcapReader : public RtpFileReaderImpl { TRY_PCAP(Read(read_buffer_, marker.payload_length)); RtpUtility::RtpHeaderParser rtp_parser(read_buffer_, marker.payload_length); - if (rtp_parser.RTCP()) { + if (IsRtcpPacket(rtc::MakeArrayView(read_buffer_, marker.payload_length))) { rtp_parser.ParseRtcp(&marker.rtp_header); packets_.push_back(marker); } else { @@ -503,7 +504,7 @@ class PcapReader : public RtpFileReaderImpl { } int ReadXxpIpHeader(RtpPacketMarker* marker) { - assert(marker); + RTC_DCHECK(marker); uint16_t version; uint16_t length; @@ -533,7 +534,7 @@ class PcapReader : public RtpFileReaderImpl { // Skip remaining fields of IP header. uint16_t header_length = (version & 0x0f00) >> (8 - 2); - assert(header_length >= kMinIpHeaderLength); + RTC_DCHECK_GE(header_length, kMinIpHeaderLength); TRY_PCAP(Skip(header_length - kMinIpHeaderLength)); protocol = protocol & 0x00ff; diff --git a/test/rtp_header_parser.cc b/test/rtp_header_parser.cc index 713e64d83c..48e493ddeb 100644 --- a/test/rtp_header_parser.cc +++ b/test/rtp_header_parser.cc @@ -9,47 +9,10 @@ */ #include "test/rtp_header_parser.h" -#include - -#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/rtp_utility.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/thread_annotations.h" namespace webrtc { -class RtpHeaderParserImpl : public RtpHeaderParser { - public: - RtpHeaderParserImpl(); - ~RtpHeaderParserImpl() override = default; - - bool Parse(const uint8_t* packet, - size_t length, - RTPHeader* header) const override; - - bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id) override; - bool RegisterRtpHeaderExtension(RtpExtension extension) override; - - bool DeregisterRtpHeaderExtension(RTPExtensionType type) override; - bool DeregisterRtpHeaderExtension(RtpExtension extension) override; - - private: - rtc::CriticalSection critical_section_; - RtpHeaderExtensionMap rtp_header_extension_map_ - RTC_GUARDED_BY(critical_section_); -}; - -std::unique_ptr RtpHeaderParser::CreateForTest() { - return std::make_unique(); -} - -RtpHeaderParserImpl::RtpHeaderParserImpl() {} - -bool RtpHeaderParser::IsRtcp(const uint8_t* packet, size_t length) { - RtpUtility::RtpHeaderParser rtp_parser(packet, length); - return rtp_parser.RTCP(); -} - absl::optional RtpHeaderParser::GetSsrc(const uint8_t* packet, size_t length) { RtpUtility::RtpHeaderParser rtp_parser(packet, length); @@ -60,43 +23,4 @@ absl::optional RtpHeaderParser::GetSsrc(const uint8_t* packet, return absl::nullopt; } -bool RtpHeaderParserImpl::Parse(const uint8_t* packet, - size_t length, - RTPHeader* header) const { - RtpUtility::RtpHeaderParser rtp_parser(packet, length); - *header = RTPHeader(); - - RtpHeaderExtensionMap map; - { - rtc::CritScope cs(&critical_section_); - map = rtp_header_extension_map_; - } - - const bool valid_rtpheader = rtp_parser.Parse(header, &map); - if (!valid_rtpheader) { - return false; - } - return true; -} -bool RtpHeaderParserImpl::RegisterRtpHeaderExtension(RtpExtension extension) { - rtc::CritScope cs(&critical_section_); - return rtp_header_extension_map_.RegisterByUri(extension.id, extension.uri); -} - -bool RtpHeaderParserImpl::RegisterRtpHeaderExtension(RTPExtensionType type, - uint8_t id) { - rtc::CritScope cs(&critical_section_); - return rtp_header_extension_map_.RegisterByType(id, type); -} - -bool RtpHeaderParserImpl::DeregisterRtpHeaderExtension(RtpExtension extension) { - rtc::CritScope cs(&critical_section_); - return rtp_header_extension_map_.Deregister( - rtp_header_extension_map_.GetType(extension.id)); -} - -bool RtpHeaderParserImpl::DeregisterRtpHeaderExtension(RTPExtensionType type) { - rtc::CritScope cs(&critical_section_); - return rtp_header_extension_map_.Deregister(type) == 0; -} } // namespace webrtc diff --git a/test/rtp_header_parser.h b/test/rtp_header_parser.h index 851ccf3bc2..f6ed74c043 100644 --- a/test/rtp_header_parser.h +++ b/test/rtp_header_parser.h @@ -10,44 +10,16 @@ #ifndef TEST_RTP_HEADER_PARSER_H_ #define TEST_RTP_HEADER_PARSER_H_ -#include +#include +#include -#include "api/rtp_parameters.h" -#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" +#include "absl/types/optional.h" namespace webrtc { -struct RTPHeader; - class RtpHeaderParser { public: - static std::unique_ptr CreateForTest(); - virtual ~RtpHeaderParser() {} - - // Returns true if the packet is an RTCP packet, false otherwise. - static bool IsRtcp(const uint8_t* packet, size_t length); static absl::optional GetSsrc(const uint8_t* packet, size_t length); - - // Parses the packet and stores the parsed packet in |header|. Returns true on - // success, false otherwise. - // This method is thread-safe in the sense that it can parse multiple packets - // at once. - virtual bool Parse(const uint8_t* packet, - size_t length, - RTPHeader* header) const = 0; - - // Registers an RTP header extension and binds it to |id|. - virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, - uint8_t id) = 0; - - // Registers an RTP header extension. - virtual bool RegisterRtpHeaderExtension(RtpExtension extension) = 0; - - // De-registers an RTP header extension. - virtual bool DeregisterRtpHeaderExtension(RTPExtensionType type) = 0; - - // De-registers an RTP header extension. - virtual bool DeregisterRtpHeaderExtension(RtpExtension extension) = 0; }; } // namespace webrtc #endif // TEST_RTP_HEADER_PARSER_H_ diff --git a/test/rtp_rtcp_observer.h b/test/rtp_rtcp_observer.h index 3bfa475f73..f17560f021 100644 --- a/test/rtp_rtcp_observer.h +++ b/test/rtp_rtcp_observer.h @@ -15,15 +15,15 @@ #include #include +#include "api/array_view.h" #include "api/test/simulated_network.h" #include "call/simulated_packet_receiver.h" #include "call/video_send_stream.h" -#include "rtc_base/critical_section.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/event.h" #include "system_wrappers/include/field_trial.h" #include "test/direct_transport.h" #include "test/gtest.h" -#include "test/rtp_header_parser.h" namespace { const int kShortTimeoutMs = 500; @@ -99,7 +99,7 @@ class PacketTransport : public test::DirectTransport { bool SendRtp(const uint8_t* packet, size_t length, const PacketOptions& options) override { - EXPECT_FALSE(RtpHeaderParser::IsRtcp(packet, length)); + EXPECT_TRUE(IsRtpPacket(rtc::MakeArrayView(packet, length))); RtpRtcpObserver::Action action; { if (transport_type_ == kSender) { @@ -119,7 +119,7 @@ class PacketTransport : public test::DirectTransport { } bool SendRtcp(const uint8_t* packet, size_t length) override { - EXPECT_TRUE(RtpHeaderParser::IsRtcp(packet, length)); + EXPECT_TRUE(IsRtcpPacket(rtc::MakeArrayView(packet, length))); RtpRtcpObserver::Action action; { if (transport_type_ == kSender) { diff --git a/test/scenario/BUILD.gn b/test/scenario/BUILD.gn index e2e5f8cef2..a64f8317a0 100644 --- a/test/scenario/BUILD.gn +++ b/test/scenario/BUILD.gn @@ -21,7 +21,7 @@ rtc_library("column_printer") { ] } -if (is_ios || rtc_include_tests) { +if (rtc_include_tests && !build_with_chromium) { scenario_resources = [ "../../resources/difficult_photo_1850_1110.yuv", "../../resources/photo_1850_1110.yuv", @@ -29,21 +29,20 @@ if (is_ios || rtc_include_tests) { "../../resources/web_screenshot_1850_1110.yuv", ] scenario_unittest_resources = [ "../../resources/foreman_cif.yuv" ] -} -if (is_ios) { - bundle_data("scenario_resources_bundle_data") { - testonly = true - sources = scenario_resources - outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] - } - bundle_data("scenario_unittest_resources_bundle_data") { - testonly = true - sources = scenario_unittest_resources - outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] + if (is_ios) { + bundle_data("scenario_resources_bundle_data") { + testonly = true + sources = scenario_resources + outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] + } + bundle_data("scenario_unittest_resources_bundle_data") { + testonly = true + sources = scenario_unittest_resources + outputs = [ "{{bundle_resources_dir}}/{{source_file_part}}" ] + } } -} -if (rtc_include_tests) { + rtc_library("scenario") { testonly = true sources = [ @@ -82,6 +81,7 @@ if (rtc_include_tests) { "../../api:libjingle_peerconnection_api", "../../api:rtc_event_log_output_file", "../../api:rtp_parameters", + "../../api:sequence_checker", "../../api:time_controller", "../../api:time_controller", "../../api:transport_api", @@ -97,7 +97,6 @@ if (rtc_include_tests) { "../../api/units:timestamp", "../../api/video:builtin_video_bitrate_allocator_factory", "../../api/video:video_frame", - "../../api/video:video_frame_i420", "../../api/video:video_rtp_headers", "../../api/video_codecs:video_codecs_api", "../../audio", @@ -130,10 +129,13 @@ if (rtc_include_tests) { "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_base_tests_utils", "../../rtc_base:rtc_numerics", + "../../rtc_base:rtc_stats_counters", "../../rtc_base:rtc_task_queue", "../../rtc_base:safe_minmax", + "../../rtc_base:socket_address", "../../rtc_base:task_queue_for_test", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base:threading", + "../../rtc_base/synchronization:mutex", "../../rtc_base/task_utils:repeating_task", "../../system_wrappers", "../../system_wrappers:field_trial", @@ -141,6 +143,8 @@ if (rtc_include_tests) { "../logging:log_writer", "../network:emulated_network", "../time_controller", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/flags:flag", "//third_party/abseil-cpp/absl/flags:parse", "//third_party/abseil-cpp/absl/memory", @@ -164,12 +168,15 @@ if (rtc_include_tests) { testonly = true sources = [ "performance_stats_unittest.cc", + "probing_test.cc", "scenario_unittest.cc", "stats_collection_unittest.cc", "video_stream_unittest.cc", ] deps = [ ":scenario", + "../../api/test/network_emulation", + "../../api/test/network_emulation:create_cross_traffic", "../../logging:mocks", "../../rtc_base:checks", "../../rtc_base:rtc_base_approved", diff --git a/test/scenario/audio_stream.cc b/test/scenario/audio_stream.cc index 47e7f768d1..63f78c8f71 100644 --- a/test/scenario/audio_stream.cc +++ b/test/scenario/audio_stream.cc @@ -185,7 +185,6 @@ ReceiveAudioStream::ReceiveAudioStream( recv_config.rtp.extensions = {{RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberExtensionId}}; } - receiver_->AddExtensions(recv_config.rtp.extensions); recv_config.decoder_factory = decoder_factory; recv_config.decoder_map = { {CallTest::kAudioSendPayloadType, {"opus", 48000, 2}}}; @@ -212,7 +211,9 @@ void ReceiveAudioStream::Stop() { AudioReceiveStream::Stats ReceiveAudioStream::GetStats() const { AudioReceiveStream::Stats result; - receiver_->SendTask([&] { result = receive_stream_->GetStats(); }); + receiver_->SendTask([&] { + result = receive_stream_->GetStats(/*get_and_clear_legacy_stats=*/true); + }); return result; } diff --git a/test/scenario/call_client.cc b/test/scenario/call_client.cc index fb888df694..be8d39f2a5 100644 --- a/test/scenario/call_client.cc +++ b/test/scenario/call_client.cc @@ -9,12 +9,16 @@ */ #include "test/scenario/call_client.h" +#include +#include #include -#include #include "api/rtc_event_log/rtc_event_log.h" #include "api/rtc_event_log/rtc_event_log_factory.h" +#include "api/transport/network_types.h" #include "modules/audio_mixer/audio_mixer_impl.h" +#include "modules/rtp_rtcp/source/rtp_util.h" +#include "test/rtp_header_parser.h" namespace webrtc { namespace test { @@ -54,7 +58,8 @@ Call* CreateCall(TimeController* time_controller, RtcEventLog* event_log, CallClientConfig config, LoggingNetworkControllerFactory* network_controller_factory, - rtc::scoped_refptr audio_state) { + rtc::scoped_refptr audio_state, + rtc::scoped_refptr call_thread) { CallConfig call_config(event_log); call_config.bitrate_config.max_bitrate_bps = config.transport.rates.max_rate.bps_or(-1); @@ -67,7 +72,7 @@ Call* CreateCall(TimeController* time_controller, call_config.audio_state = audio_state; call_config.trials = config.field_trials; return Call::Create(call_config, time_controller->GetClock(), - time_controller->CreateProcessThread("CallModules"), + std::move(call_thread), time_controller->CreateProcessThread("Pacer")); } @@ -196,6 +201,12 @@ TimeDelta LoggingNetworkControllerFactory::GetProcessInterval() const { return cc_factory_->GetProcessInterval(); } +void LoggingNetworkControllerFactory::SetRemoteBitrateEstimate( + RemoteBitrateReport msg) { + if (last_controller_) + last_controller_->OnRemoteBitrateReport(msg); +} + CallClient::CallClient( TimeController* time_controller, std::unique_ptr log_writer_factory, @@ -204,7 +215,6 @@ CallClient::CallClient( clock_(time_controller->GetClock()), log_writer_factory_(std::move(log_writer_factory)), network_controller_factory_(log_writer_factory_.get(), config.transport), - header_parser_(RtpHeaderParser::CreateForTest()), task_queue_(time_controller->GetTaskQueueFactory()->CreateTaskQueue( "CallClient", TaskQueueFactory::Priority::NORMAL)) { @@ -213,9 +223,14 @@ CallClient::CallClient( event_log_ = CreateEventLog(time_controller_->GetTaskQueueFactory(), log_writer_factory_.get()); fake_audio_setup_ = InitAudio(time_controller_); + RTC_DCHECK(!module_thread_); + module_thread_ = SharedModuleThread::Create( + time_controller_->CreateProcessThread("CallThread"), + [this]() { module_thread_ = nullptr; }); + call_.reset(CreateCall(time_controller_, event_log_.get(), config, &network_controller_factory_, - fake_audio_setup_.audio_state)); + fake_audio_setup_.audio_state, module_thread_)); transport_ = std::make_unique(clock_, call_.get()); }); } @@ -223,6 +238,7 @@ CallClient::CallClient( CallClient::~CallClient() { SendTask([&] { call_.reset(); + RTC_DCHECK(!module_thread_); // Should be set to null in the lambda above. fake_audio_setup_ = {}; rtc::Event done; event_log_->StopLogging([&done] { done.Set(); }); @@ -262,9 +278,23 @@ DataRate CallClient::padding_rate() const { return network_controller_factory_.GetUpdate().pacer_config->pad_rate(); } +void CallClient::SetRemoteBitrate(DataRate bitrate) { + RemoteBitrateReport msg; + msg.bandwidth = bitrate; + msg.receive_time = clock_->CurrentTime(); + network_controller_factory_.SetRemoteBitrateEstimate(msg); +} + +void CallClient::UpdateBitrateConstraints( + const BitrateConstraints& constraints) { + SendTask([this, &constraints]() { + call_->GetTransportControllerSend()->SetSdpBitrateParameters(constraints); + }); +} + void CallClient::OnPacketReceived(EmulatedIpPacket packet) { MediaType media_type = MediaType::ANY; - if (!RtpHeaderParser::IsRtcp(packet.cdata(), packet.data.size())) { + if (IsRtpPacket(packet.data)) { auto ssrc = RtpHeaderParser::GetSsrc(packet.cdata(), packet.data.size()); RTC_CHECK(ssrc.has_value()); media_type = ssrc_media_types_[*ssrc]; @@ -309,11 +339,6 @@ uint32_t CallClient::GetNextRtxSsrc() { return kSendRtxSsrcs[next_rtx_ssrc_index_++]; } -void CallClient::AddExtensions(std::vector extensions) { - for (const auto& extension : extensions) - header_parser_->RegisterRtpHeaderExtension(extension); -} - void CallClient::SendTask(std::function task) { task_queue_.SendTask(std::move(task), RTC_FROM_HERE); } diff --git a/test/scenario/call_client.h b/test/scenario/call_client.h index 33fa2765cb..08b0131350 100644 --- a/test/scenario/call_client.h +++ b/test/scenario/call_client.h @@ -18,6 +18,7 @@ #include "api/rtc_event_log/rtc_event_log.h" #include "api/test/time_controller.h" +#include "api/units/data_rate.h" #include "call/call.h" #include "modules/audio_device/include/test_audio_device.h" #include "modules/congestion_controller/goog_cc/test/goog_cc_printer.h" @@ -25,7 +26,6 @@ #include "rtc_base/task_queue_for_test.h" #include "test/logging/log_writer.h" #include "test/network/network_emulation.h" -#include "test/rtp_header_parser.h" #include "test/scenario/column_printer.h" #include "test/scenario/network_node.h" #include "test/scenario/scenario_config.h" @@ -75,6 +75,7 @@ class LoggingNetworkControllerFactory TimeDelta GetProcessInterval() const override; // TODO(srte): Consider using the Columnprinter interface for this. void LogCongestionControllerStats(Timestamp at_time); + void SetRemoteBitrateEstimate(RemoteBitrateReport msg); NetworkControlUpdate GetUpdate() const; @@ -109,6 +110,8 @@ class CallClient : public EmulatedNetworkReceiverInterface { DataRate target_rate() const; DataRate stable_target_rate() const; DataRate padding_rate() const; + void UpdateBitrateConstraints(const BitrateConstraints& constraints); + void SetRemoteBitrate(DataRate bitrate); void OnPacketReceived(EmulatedIpPacket packet) override; std::unique_ptr GetLogWriter(std::string name); @@ -133,7 +136,6 @@ class CallClient : public EmulatedNetworkReceiverInterface { uint32_t GetNextAudioSsrc(); uint32_t GetNextAudioLocalSsrc(); uint32_t GetNextRtxSsrc(); - void AddExtensions(std::vector extensions); int16_t Bind(EmulatedEndpoint* endpoint); void UnBind(); @@ -145,7 +147,6 @@ class CallClient : public EmulatedNetworkReceiverInterface { CallClientFakeAudio fake_audio_setup_; std::unique_ptr call_; std::unique_ptr transport_; - std::unique_ptr const header_parser_; std::vector> endpoints_; int next_video_ssrc_index_ = 0; @@ -157,6 +158,8 @@ class CallClient : public EmulatedNetworkReceiverInterface { // Defined last so it's destroyed first. TaskQueueForTest task_queue_; + rtc::scoped_refptr module_thread_; + const FieldTrialBasedConfig field_trials_; }; diff --git a/test/scenario/network_node.cc b/test/scenario/network_node.cc index aa576dcf53..702789fe73 100644 --- a/test/scenario/network_node.cc +++ b/test/scenario/network_node.cc @@ -86,7 +86,7 @@ bool NetworkNodeTransport::SendRtp(const uint8_t* packet, sent_packet.info.packet_type = rtc::PacketType::kData; sender_call_->OnSentPacket(sent_packet); - rtc::CritScope crit(&crit_sect_); + MutexLock lock(&mutex_); if (!endpoint_) return false; rtc::CopyOnWriteBuffer buffer(packet, length); @@ -97,7 +97,7 @@ bool NetworkNodeTransport::SendRtp(const uint8_t* packet, bool NetworkNodeTransport::SendRtcp(const uint8_t* packet, size_t length) { rtc::CopyOnWriteBuffer buffer(packet, length); - rtc::CritScope crit(&crit_sect_); + MutexLock lock(&mutex_); if (!endpoint_) return false; endpoint_->SendPacket(local_address_, remote_address_, buffer, @@ -121,7 +121,7 @@ void NetworkNodeTransport::Connect(EmulatedEndpoint* endpoint, { // Only IPv4 address is supported. RTC_CHECK_EQ(receiver_address.family(), AF_INET); - rtc::CritScope crit(&crit_sect_); + MutexLock lock(&mutex_); endpoint_ = endpoint; local_address_ = rtc::SocketAddress(endpoint_->GetPeerLocalAddress(), 0); remote_address_ = receiver_address; @@ -134,7 +134,7 @@ void NetworkNodeTransport::Connect(EmulatedEndpoint* endpoint, } void NetworkNodeTransport::Disconnect() { - rtc::CritScope crit(&crit_sect_); + MutexLock lock(&mutex_); current_network_route_.connected = false; sender_call_->GetTransportControllerSend()->OnNetworkRouteChanged( kDummyTransportName, current_network_route_); diff --git a/test/scenario/network_node.h b/test/scenario/network_node.h index b3d093b84e..ea8eb35daf 100644 --- a/test/scenario/network_node.h +++ b/test/scenario/network_node.h @@ -22,6 +22,7 @@ #include "call/simulated_network.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" #include "test/network/network_emulation.h" #include "test/scenario/column_printer.h" @@ -65,19 +66,19 @@ class NetworkNodeTransport : public Transport { void Disconnect(); DataSize packet_overhead() { - rtc::CritScope crit(&crit_sect_); + MutexLock lock(&mutex_); return packet_overhead_; } private: - rtc::CriticalSection crit_sect_; + Mutex mutex_; Clock* const sender_clock_; Call* const sender_call_; - EmulatedEndpoint* endpoint_ RTC_GUARDED_BY(crit_sect_) = nullptr; - rtc::SocketAddress local_address_ RTC_GUARDED_BY(crit_sect_); - rtc::SocketAddress remote_address_ RTC_GUARDED_BY(crit_sect_); - DataSize packet_overhead_ RTC_GUARDED_BY(crit_sect_) = DataSize::Zero(); - rtc::NetworkRoute current_network_route_ RTC_GUARDED_BY(crit_sect_); + EmulatedEndpoint* endpoint_ RTC_GUARDED_BY(mutex_) = nullptr; + rtc::SocketAddress local_address_ RTC_GUARDED_BY(mutex_); + rtc::SocketAddress remote_address_ RTC_GUARDED_BY(mutex_); + DataSize packet_overhead_ RTC_GUARDED_BY(mutex_) = DataSize::Zero(); + rtc::NetworkRoute current_network_route_ RTC_GUARDED_BY(mutex_); }; } // namespace test } // namespace webrtc diff --git a/test/scenario/probing_test.cc b/test/scenario/probing_test.cc new file mode 100644 index 0000000000..f08a003d5c --- /dev/null +++ b/test/scenario/probing_test.cc @@ -0,0 +1,133 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "test/gtest.h" +#include "test/scenario/scenario.h" + +namespace webrtc { +namespace test { + +TEST(ProbingTest, InitialProbingRampsUpTargetRateWhenNetworkIsGood) { + Scenario s; + NetworkSimulationConfig good_network; + good_network.bandwidth = DataRate::KilobitsPerSec(2000); + + VideoStreamConfig video_config; + video_config.encoder.codec = + VideoStreamConfig::Encoder::Codec::kVideoCodecVP8; + CallClientConfig send_config; + auto* caller = s.CreateClient("caller", send_config); + auto* callee = s.CreateClient("callee", CallClientConfig()); + auto route = + s.CreateRoutes(caller, {s.CreateSimulationNode(good_network)}, callee, + {s.CreateSimulationNode(NetworkSimulationConfig())}); + s.CreateVideoStream(route->forward(), video_config); + + s.RunFor(TimeDelta::Seconds(1)); + EXPECT_GE(DataRate::BitsPerSec(caller->GetStats().send_bandwidth_bps), + 3 * send_config.transport.rates.start_rate); +} + +TEST(ProbingTest, MidCallProbingRampupTriggeredByUpdatedBitrateConstraints) { + Scenario s; + + const DataRate kStartRate = DataRate::KilobitsPerSec(300); + const DataRate kConstrainedRate = DataRate::KilobitsPerSec(100); + const DataRate kHighRate = DataRate::KilobitsPerSec(2500); + + VideoStreamConfig video_config; + video_config.encoder.codec = + VideoStreamConfig::Encoder::Codec::kVideoCodecVP8; + CallClientConfig send_call_config; + send_call_config.transport.rates.start_rate = kStartRate; + send_call_config.transport.rates.max_rate = kHighRate * 2; + auto* caller = s.CreateClient("caller", send_call_config); + auto* callee = s.CreateClient("callee", CallClientConfig()); + auto route = s.CreateRoutes( + caller, {s.CreateSimulationNode(NetworkSimulationConfig())}, callee, + {s.CreateSimulationNode(NetworkSimulationConfig())}); + s.CreateVideoStream(route->forward(), video_config); + + // Wait until initial probing rampup is done and then set a low max bitrate. + s.RunFor(TimeDelta::Seconds(1)); + EXPECT_GE(DataRate::BitsPerSec(caller->GetStats().send_bandwidth_bps), + 5 * send_call_config.transport.rates.start_rate); + BitrateConstraints bitrate_config; + bitrate_config.max_bitrate_bps = kConstrainedRate.bps(); + caller->UpdateBitrateConstraints(bitrate_config); + + // Wait until the low send bitrate has taken effect, and then set a much + // higher max bitrate. + s.RunFor(TimeDelta::Seconds(2)); + EXPECT_LT(DataRate::BitsPerSec(caller->GetStats().send_bandwidth_bps), + kConstrainedRate * 1.1); + bitrate_config.max_bitrate_bps = 2 * kHighRate.bps(); + caller->UpdateBitrateConstraints(bitrate_config); + + // Check that the max send bitrate is reached quicker than would be possible + // with simple AIMD rate control. + s.RunFor(TimeDelta::Seconds(1)); + EXPECT_GE(DataRate::BitsPerSec(caller->GetStats().send_bandwidth_bps), + kHighRate); +} + +TEST(ProbingTest, ProbesRampsUpWhenVideoEncoderConfigChanges) { + Scenario s; + const DataRate kStartRate = DataRate::KilobitsPerSec(50); + const DataRate kHdRate = DataRate::KilobitsPerSec(3250); + + // Set up 3-layer simulcast. + VideoStreamConfig video_config; + video_config.encoder.codec = + VideoStreamConfig::Encoder::Codec::kVideoCodecVP8; + video_config.encoder.layers.spatial = 3; + video_config.source.generator.width = 1280; + video_config.source.generator.height = 720; + + CallClientConfig send_call_config; + send_call_config.transport.rates.start_rate = kStartRate; + send_call_config.transport.rates.max_rate = kHdRate * 2; + auto* caller = s.CreateClient("caller", send_call_config); + auto* callee = s.CreateClient("callee", CallClientConfig()); + auto send_net = + s.CreateMutableSimulationNode([&](NetworkSimulationConfig* c) { + c->bandwidth = DataRate::KilobitsPerSec(200); + }); + auto route = + s.CreateRoutes(caller, {send_net->node()}, callee, + {s.CreateSimulationNode(NetworkSimulationConfig())}); + auto* video_stream = s.CreateVideoStream(route->forward(), video_config); + + // Only QVGA enabled initially. Run until initial probing is done and BWE + // has settled. + video_stream->send()->UpdateActiveLayers({true, false, false}); + s.RunFor(TimeDelta::Seconds(2)); + + // Remove network constraints and run for a while more, BWE should be much + // less than required HD rate. + send_net->UpdateConfig([&](NetworkSimulationConfig* c) { + c->bandwidth = DataRate::PlusInfinity(); + }); + s.RunFor(TimeDelta::Seconds(2)); + + DataRate bandwidth = + DataRate::BitsPerSec(caller->GetStats().send_bandwidth_bps); + EXPECT_LT(bandwidth, kHdRate / 4); + + // Enable all layers, triggering a probe. + video_stream->send()->UpdateActiveLayers({true, true, true}); + + // Run for a short while and verify BWE has ramped up fast. + s.RunFor(TimeDelta::Seconds(2)); + EXPECT_GT(DataRate::BitsPerSec(caller->GetStats().send_bandwidth_bps), + kHdRate); +} + +} // namespace test +} // namespace webrtc diff --git a/test/scenario/scenario.cc b/test/scenario/scenario.cc index c1c664a754..239aad9dfe 100644 --- a/test/scenario/scenario.cc +++ b/test/scenario/scenario.cc @@ -198,7 +198,7 @@ SimulationNode* Scenario::CreateMutableSimulationNode( void Scenario::TriggerPacketBurst(std::vector over_nodes, size_t num_packets, size_t packet_size) { - network_manager_.CreateTrafficRoute(over_nodes) + network_manager_.CreateCrossTrafficRoute(over_nodes) ->TriggerPacketBurst(num_packets, packet_size); } @@ -206,7 +206,7 @@ void Scenario::NetworkDelayedAction( std::vector over_nodes, size_t packet_size, std::function action) { - network_manager_.CreateTrafficRoute(over_nodes) + network_manager_.CreateCrossTrafficRoute(over_nodes) ->NetworkDelayedAction(packet_size, action); } diff --git a/test/scenario/scenario_config.h b/test/scenario/scenario_config.h index c9d636a67f..c7320e9dc3 100644 --- a/test/scenario/scenario_config.h +++ b/test/scenario/scenario_config.h @@ -129,6 +129,7 @@ struct VideoStreamConfig { using Codec = VideoCodecType; Codec codec = Codec::kVideoCodecGeneric; absl::optional max_data_rate; + absl::optional min_data_rate; absl::optional max_framerate; // Counted in frame count. absl::optional key_frame_interval = 3000; @@ -149,6 +150,7 @@ struct VideoStreamConfig { DegradationPreference degradation_preference = DegradationPreference::MAINTAIN_FRAMERATE; + bool suspend_below_min_bitrate = false; } encoder; struct Stream { Stream(); diff --git a/test/scenario/scenario_unittest.cc b/test/scenario/scenario_unittest.cc index 839e6a375e..6861151a2d 100644 --- a/test/scenario/scenario_unittest.cc +++ b/test/scenario/scenario_unittest.cc @@ -11,6 +11,9 @@ #include +#include "api/test/network_emulation/create_cross_traffic.h" +#include "api/test/network_emulation/cross_traffic.h" +#include "test/field_trial.h" #include "test/gtest.h" #include "test/logging/memory_log_writer.h" #include "test/scenario/stats_collection.h" @@ -43,8 +46,8 @@ TEST(ScenarioTest, StartsAndStopsWithoutErrors) { s.CreateAudioStream(route->reverse(), audio_stream_config); RandomWalkConfig cross_traffic_config; - s.net()->CreateRandomWalkCrossTraffic( - s.net()->CreateTrafficRoute({alice_net}), cross_traffic_config); + s.net()->StartCrossTraffic(CreateRandomWalkCrossTraffic( + s.net()->CreateCrossTrafficRoute({alice_net}), cross_traffic_config)); s.NetworkDelayedAction({alice_net, bob_net}, 100, [&packet_received] { packet_received = true; }); @@ -119,7 +122,8 @@ TEST(ScenarioTest, MAYBE_RealTimeEncoding) { } // Regression tests based on previous runs. EXPECT_LT(analyzer.stats().lost_count, 2); - EXPECT_NEAR(analyzer.stats().psnr_with_freeze.Mean(), 38, 10); + // This far below expected but ensures that we get something. + EXPECT_GT(analyzer.stats().psnr_with_freeze.Mean(), 10); } TEST(ScenarioTest, SimTimeFakeing) { @@ -140,5 +144,53 @@ TEST(ScenarioTest, WritesToRtcEventLog) { EXPECT_GE(storage.logs().at("alice.rtc.dat").size(), 1u); } +TEST(ScenarioTest, + RetransmitsVideoPacketsInAudioAndVideoCallWithSendSideBweAndLoss) { + // Make sure audio packets are included in transport feedback. + test::ScopedFieldTrials override_field_trials( + "WebRTC-Audio-ABWENoTWCC/Disabled/"); + + Scenario s; + CallClientConfig call_client_config; + call_client_config.transport.rates.start_rate = DataRate::KilobitsPerSec(300); + auto* alice = s.CreateClient("alice", call_client_config); + auto* bob = s.CreateClient("bob", call_client_config); + NetworkSimulationConfig network_config; + // Add some loss and delay. + network_config.delay = TimeDelta::Millis(200); + network_config.loss_rate = 0.05; + auto alice_net = s.CreateSimulationNode(network_config); + auto bob_net = s.CreateSimulationNode(network_config); + auto route = s.CreateRoutes(alice, {alice_net}, bob, {bob_net}); + + // First add an audio stream, then a video stream. + // Needed to make sure audio RTP module is selected first when sending + // transport feedback message. + AudioStreamConfig audio_stream_config; + audio_stream_config.encoder.min_rate = DataRate::KilobitsPerSec(6); + audio_stream_config.encoder.max_rate = DataRate::KilobitsPerSec(64); + audio_stream_config.encoder.allocate_bitrate = true; + audio_stream_config.stream.in_bandwidth_estimation = true; + s.CreateAudioStream(route->forward(), audio_stream_config); + s.CreateAudioStream(route->reverse(), audio_stream_config); + + VideoStreamConfig video_stream_config; + auto video = s.CreateVideoStream(route->forward(), video_stream_config); + s.CreateVideoStream(route->reverse(), video_stream_config); + + // Run for 10 seconds. + s.RunFor(TimeDelta::Seconds(10)); + // Make sure retransmissions have happened. + int retransmit_packets = 0; + + VideoSendStream::Stats stats; + alice->SendTask([&]() { stats = video->send()->GetStats(); }); + + for (const auto& substream : stats.substreams) { + retransmit_packets += substream.second.rtp_stats.retransmitted.packets; + } + EXPECT_GT(retransmit_packets, 0); +} + } // namespace test } // namespace webrtc diff --git a/test/scenario/stats_collection_unittest.cc b/test/scenario/stats_collection_unittest.cc index 7f27eaeaf8..96b2830c76 100644 --- a/test/scenario/stats_collection_unittest.cc +++ b/test/scenario/stats_collection_unittest.cc @@ -33,8 +33,14 @@ void CreateAnalyzedStream(Scenario* s, auto* audio = s->CreateAudioStream(route->forward(), AudioStreamConfig()); s->Every(TimeDelta::Seconds(1), [=] { collectors->call.AddStats(caller->GetStats()); - collectors->video_send.AddStats(video->send()->GetStats(), s->Now()); - collectors->audio_receive.AddStats(audio->receive()->GetStats()); + + VideoSendStream::Stats send_stats; + caller->SendTask([&]() { send_stats = video->send()->GetStats(); }); + collectors->video_send.AddStats(send_stats, s->Now()); + + AudioReceiveStream::Stats receive_stats; + caller->SendTask([&]() { receive_stats = audio->receive()->GetStats(); }); + collectors->audio_receive.AddStats(receive_stats); // Querying the video stats from within the expected runtime environment // (i.e. the TQ that belongs to the CallClient, not the Scenario TQ that @@ -87,7 +93,7 @@ TEST(ScenarioAnalyzerTest, PsnrIsLowWhenNetworkIsBad) { EXPECT_NEAR(stats.call.stats().target_rate.Mean().kbps(), 75, 50); EXPECT_NEAR(stats.video_send.stats().media_bitrate.Mean().kbps(), 100, 50); EXPECT_NEAR(stats.video_receive.stats().resolution.Mean(), 180, 10); - EXPECT_NEAR(stats.audio_receive.stats().jitter_buffer.Mean().ms(), 200, 150); + EXPECT_NEAR(stats.audio_receive.stats().jitter_buffer.Mean().ms(), 250, 200); } TEST(ScenarioAnalyzerTest, CountsCapturedButNotRendered) { diff --git a/test/scenario/video_frame_matcher.h b/test/scenario/video_frame_matcher.h index f7f62436ac..a3aa85447d 100644 --- a/test/scenario/video_frame_matcher.h +++ b/test/scenario/video_frame_matcher.h @@ -52,7 +52,7 @@ class VideoFrameMatcher { rtc::scoped_refptr thumb; int repeat_count = 0; }; - using DecodedFrame = rtc::RefCountedObject; + using DecodedFrame = rtc::FinalRefCountedObject; struct CapturedFrame { int id; Timestamp capture_time = Timestamp::PlusInfinity(); diff --git a/test/scenario/video_stream.cc b/test/scenario/video_stream.cc index 4bea740074..96f6f5bc59 100644 --- a/test/scenario/video_stream.cc +++ b/test/scenario/video_stream.cc @@ -110,7 +110,7 @@ std::vector GetVideoRtpExtensions( std::string TransformFilePath(std::string path) { static const std::string resource_prefix = "res://"; - int ext_pos = path.rfind("."); + int ext_pos = path.rfind('.'); if (ext_pos < 0) { return test::ResourcePath(path, "yuv"); } else if (absl::StartsWith(path, resource_prefix)) { @@ -175,8 +175,8 @@ CreateVp9SpecificSettings(VideoStreamConfig video_config) { vp9.automaticResizeOn = conf.single.automatic_scaling; vp9.denoisingOn = conf.single.denoising; } - return new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9); + return rtc::make_ref_counted( + vp9); } rtc::scoped_refptr @@ -192,8 +192,8 @@ CreateVp8SpecificSettings(VideoStreamConfig config) { vp8_settings.automaticResizeOn = config.encoder.single.automatic_scaling; vp8_settings.denoisingOn = config.encoder.single.denoising; } - return new rtc::RefCountedObject< - VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings); + return rtc::make_ref_counted( + vp8_settings); } rtc::scoped_refptr @@ -205,8 +205,8 @@ CreateH264SpecificSettings(VideoStreamConfig config) { h264_settings.frameDroppingOn = config.encoder.frame_dropping; h264_settings.keyFrameInterval = config.encoder.key_frame_interval.value_or(0); - return new rtc::RefCountedObject< - VideoEncoderConfig::H264EncoderSpecificSettings>(h264_settings); + return rtc::make_ref_counted( + h264_settings); } rtc::scoped_refptr @@ -248,11 +248,11 @@ VideoEncoderConfig CreateVideoEncoderConfig(VideoStreamConfig config) { bool screenshare = config.encoder.content_type == VideoStreamConfig::Encoder::ContentType::kScreen; encoder_config.video_stream_factory = - new rtc::RefCountedObject( + rtc::make_ref_counted( cricket_codec, kDefaultMaxQp, screenshare, screenshare); } else { encoder_config.video_stream_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); } // TODO(srte): Base this on encoder capabilities. @@ -265,6 +265,7 @@ VideoEncoderConfig CreateVideoEncoderConfig(VideoStreamConfig config) { if (config.encoder.max_framerate) { for (auto& layer : encoder_config.simulcast_layers) { layer.max_framerate = *config.encoder.max_framerate; + layer.min_bitrate_bps = config.encoder.min_data_rate->bps_or(-1); } } @@ -323,6 +324,7 @@ std::unique_ptr CreateFrameGenerator( VideoReceiveStream::Config CreateVideoReceiveStreamConfig( VideoStreamConfig config, Transport* feedback_transport, + VideoDecoderFactory* decoder_factory, VideoReceiveStream::Decoder decoder, rtc::VideoSinkInterface* renderer, uint32_t local_ssrc, @@ -338,6 +340,7 @@ VideoReceiveStream::Config CreateVideoReceiveStreamConfig( recv.rtp.nack.rtp_history_ms = config.stream.nack_history_time.ms(); recv.rtp.protected_by_flexfec = config.stream.use_flexfec; recv.rtp.remote_ssrc = ssrc; + recv.decoder_factory = decoder_factory; recv.decoders.push_back(decoder); recv.renderer = renderer; if (config.stream.use_rtx) { @@ -373,7 +376,7 @@ SendVideoStream::SendVideoStream(CallClient* sender, case Encoder::Implementation::kFake: encoder_factory_ = std::make_unique([this]() { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); std::unique_ptr encoder; if (config_.encoder.codec == Codec::kVideoCodecVP8) { encoder = std::make_unique(sender_->clock_); @@ -410,6 +413,8 @@ SendVideoStream::SendVideoStream(CallClient* sender, send_config.encoder_settings.encoder_factory = encoder_factory_.get(); send_config.encoder_settings.bitrate_allocator_factory = bitrate_allocator_factory_.get(); + send_config.suspend_below_min_bitrate = + config.encoder.suspend_below_min_bitrate; sender_->SendTask([&] { if (config.stream.fec_controller_factory) { @@ -452,7 +457,7 @@ void SendVideoStream::Stop() { void SendVideoStream::UpdateConfig( std::function modifier) { sender_->SendTask([&] { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); VideoStreamConfig prior_config = config_; modifier(&config_); if (prior_config.encoder.fake.max_rate != config_.encoder.fake.max_rate) { @@ -461,7 +466,8 @@ void SendVideoStream::UpdateConfig( } } // TODO(srte): Add more conditions that should cause reconfiguration. - if (prior_config.encoder.max_framerate != config_.encoder.max_framerate) { + if (prior_config.encoder.max_framerate != config_.encoder.max_framerate || + prior_config.encoder.max_data_rate != config_.encoder.max_data_rate) { VideoEncoderConfig encoder_config = CreateVideoEncoderConfig(config_); send_stream_->ReconfigureVideoEncoder(std::move(encoder_config)); } @@ -473,18 +479,16 @@ void SendVideoStream::UpdateConfig( void SendVideoStream::UpdateActiveLayers(std::vector active_layers) { sender_->task_queue_.PostTask([=] { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); if (config_.encoder.codec == VideoStreamConfig::Encoder::Codec::kVideoCodecVP8) { send_stream_->UpdateActiveSimulcastLayers(active_layers); - } else { - VideoEncoderConfig encoder_config = CreateVideoEncoderConfig(config_); - RTC_CHECK_EQ(encoder_config.simulcast_layers.size(), - active_layers.size()); - for (size_t i = 0; i < encoder_config.simulcast_layers.size(); ++i) - encoder_config.simulcast_layers[i].active = active_layers[i]; - send_stream_->ReconfigureVideoEncoder(std::move(encoder_config)); } + VideoEncoderConfig encoder_config = CreateVideoEncoderConfig(config_); + RTC_CHECK_EQ(encoder_config.simulcast_layers.size(), active_layers.size()); + for (size_t i = 0; i < encoder_config.simulcast_layers.size(); ++i) + encoder_config.simulcast_layers[i].active = active_layers[i]; + send_stream_->ReconfigureVideoEncoder(std::move(encoder_config)); }); } @@ -549,7 +553,6 @@ ReceiveVideoStream::ReceiveVideoStream(CallClient* receiver, VideoReceiveStream::Decoder decoder = CreateMatchingDecoder(CodecTypeToPayloadType(config.encoder.codec), CodecTypeToPayloadString(config.encoder.codec)); - decoder.decoder_factory = decoder_factory_.get(); size_t num_streams = 1; if (config.encoder.codec == VideoStreamConfig::Encoder::Codec::kVideoCodecVP8) num_streams = config.encoder.layers.spatial; @@ -561,17 +564,17 @@ ReceiveVideoStream::ReceiveVideoStream(CallClient* receiver, renderer = render_taps_.back().get(); } auto recv_config = CreateVideoReceiveStreamConfig( - config, feedback_transport, decoder, renderer, + config, feedback_transport, decoder_factory_.get(), decoder, renderer, receiver_->GetNextVideoLocalSsrc(), send_stream->ssrcs_[i], send_stream->rtx_ssrcs_[i]); if (config.stream.use_flexfec) { RTC_DCHECK(num_streams == 1); FlexfecReceiveStream::Config flexfec(feedback_transport); flexfec.payload_type = CallTest::kFlexfecPayloadType; - flexfec.remote_ssrc = CallTest::kFlexfecSendSsrc; + flexfec.rtp.remote_ssrc = CallTest::kFlexfecSendSsrc; flexfec.protected_media_ssrcs = send_stream->rtx_ssrcs_; - flexfec.local_ssrc = recv_config.rtp.local_ssrc; - receiver_->ssrc_media_types_[flexfec.remote_ssrc] = MediaType::VIDEO; + flexfec.rtp.local_ssrc = recv_config.rtp.local_ssrc; + receiver_->ssrc_media_types_[flexfec.rtp.remote_ssrc] = MediaType::VIDEO; receiver_->SendTask([this, &flexfec] { flecfec_stream_ = receiver_->call_->CreateFlexfecReceiveStream(flexfec); diff --git a/test/scenario/video_stream.h b/test/scenario/video_stream.h index f0b99db57a..96b6d49f63 100644 --- a/test/scenario/video_stream.h +++ b/test/scenario/video_stream.h @@ -14,6 +14,7 @@ #include #include "rtc_base/constructor_magic.h" +#include "rtc_base/synchronization/mutex.h" #include "test/fake_encoder.h" #include "test/fake_videorenderer.h" #include "test/frame_generator_capturer.h" @@ -53,14 +54,14 @@ class SendVideoStream { Transport* send_transport, VideoFrameMatcher* matcher); - rtc::CriticalSection crit_; + Mutex mutex_; std::vector ssrcs_; std::vector rtx_ssrcs_; VideoSendStream* send_stream_ = nullptr; CallClient* const sender_; - VideoStreamConfig config_ RTC_GUARDED_BY(crit_); + VideoStreamConfig config_ RTC_GUARDED_BY(mutex_); std::unique_ptr encoder_factory_; - std::vector fake_encoders_ RTC_GUARDED_BY(crit_); + std::vector fake_encoders_ RTC_GUARDED_BY(mutex_); std::unique_ptr bitrate_allocator_factory_; std::unique_ptr video_capturer_; std::unique_ptr frame_tap_; diff --git a/test/scenario/video_stream_unittest.cc b/test/scenario/video_stream_unittest.cc index 1f2cad7e8c..c1649a39b3 100644 --- a/test/scenario/video_stream_unittest.cc +++ b/test/scenario/video_stream_unittest.cc @@ -9,6 +9,9 @@ */ #include +#include "api/test/network_emulation/create_cross_traffic.h" +#include "api/test/network_emulation/cross_traffic.h" +#include "test/field_trial.h" #include "test/gtest.h" #include "test/scenario/scenario.h" @@ -127,7 +130,9 @@ TEST(VideoStreamTest, SendsNacksOnLoss) { auto video = s.CreateVideoStream(route->forward(), VideoStreamConfig()); s.RunFor(TimeDelta::Seconds(1)); int retransmit_packets = 0; - for (const auto& substream : video->send()->GetStats().substreams) { + VideoSendStream::Stats stats; + route->first()->SendTask([&]() { stats = video->send()->GetStats(); }); + for (const auto& substream : stats.substreams) { retransmit_packets += substream.second.rtp_stats.retransmitted.packets; } EXPECT_GT(retransmit_packets, 0); @@ -149,7 +154,8 @@ TEST(VideoStreamTest, SendsFecWithUlpFec) { c->stream.use_ulpfec = true; }); s.RunFor(TimeDelta::Seconds(5)); - VideoSendStream::Stats video_stats = video->send()->GetStats(); + VideoSendStream::Stats video_stats; + route->first()->SendTask([&]() { video_stats = video->send()->GetStats(); }); EXPECT_GT(video_stats.substreams.begin()->second.rtp_stats.fec.packets, 0u); } TEST(VideoStreamTest, SendsFecWithFlexFec) { @@ -166,8 +172,148 @@ TEST(VideoStreamTest, SendsFecWithFlexFec) { c->stream.use_flexfec = true; }); s.RunFor(TimeDelta::Seconds(5)); - VideoSendStream::Stats video_stats = video->send()->GetStats(); + VideoSendStream::Stats video_stats; + route->first()->SendTask([&]() { video_stats = video->send()->GetStats(); }); EXPECT_GT(video_stats.substreams.begin()->second.rtp_stats.fec.packets, 0u); } + +TEST(VideoStreamTest, ResolutionAdaptsToAvailableBandwidth) { + // Declared before scenario to avoid use after free. + std::atomic num_qvga_frames_(0); + std::atomic num_vga_frames_(0); + + Scenario s; + // Link has enough capacity for VGA. + NetworkSimulationConfig net_conf; + net_conf.bandwidth = DataRate::KilobitsPerSec(800); + net_conf.delay = TimeDelta::Millis(50); + auto* client = s.CreateClient("send", [&](CallClientConfig* c) { + c->transport.rates.start_rate = DataRate::KilobitsPerSec(800); + }); + auto send_net = {s.CreateSimulationNode(net_conf)}; + auto ret_net = {s.CreateSimulationNode(net_conf)}; + auto* route = s.CreateRoutes( + client, send_net, s.CreateClient("return", CallClientConfig()), ret_net); + + s.CreateVideoStream(route->forward(), [&](VideoStreamConfig* c) { + c->hooks.frame_pair_handlers = {[&](const VideoFramePair& info) { + if (info.decoded->width() == 640) { + ++num_vga_frames_; + } else if (info.decoded->width() == 320) { + ++num_qvga_frames_; + } else { + ADD_FAILURE() << "Unexpected resolution: " << info.decoded->width(); + } + }}; + c->source.framerate = 30; + // The resolution must be high enough to allow smaller layers to be + // created. + c->source.generator.width = 640; + c->source.generator.height = 480; + c->encoder.implementation = CodecImpl::kSoftware; + c->encoder.codec = Codec::kVideoCodecVP9; + // Enable SVC. + c->encoder.layers.spatial = 2; + }); + + // Run for a few seconds, until streams have stabilized, + // check that we are sending VGA. + s.RunFor(TimeDelta::Seconds(5)); + EXPECT_GT(num_vga_frames_, 0u); + + // Trigger cross traffic, run until we have seen 3 consecutive + // seconds with no VGA frames due to reduced available bandwidth. + auto cross_traffic = s.net()->StartCrossTraffic(CreateFakeTcpCrossTraffic( + s.net()->CreateRoute(send_net), s.net()->CreateRoute(ret_net), + FakeTcpConfig())); + + int num_seconds_without_vga = 0; + int num_iterations = 0; + do { + ASSERT_LE(++num_iterations, 100); + num_qvga_frames_ = 0; + num_vga_frames_ = 0; + s.RunFor(TimeDelta::Seconds(1)); + if (num_qvga_frames_ > 0 && num_vga_frames_ == 0) { + ++num_seconds_without_vga; + } else { + num_seconds_without_vga = 0; + } + } while (num_seconds_without_vga < 3); + + // Stop cross traffic, make sure we recover and get VGA frames agian. + s.net()->StopCrossTraffic(cross_traffic); + num_qvga_frames_ = 0; + num_vga_frames_ = 0; + + s.RunFor(TimeDelta::Seconds(40)); + EXPECT_GT(num_qvga_frames_, 0u); + EXPECT_GT(num_vga_frames_, 0u); +} + +TEST(VideoStreamTest, SuspendsBelowMinBitrate) { + const DataRate kMinVideoBitrate = DataRate::KilobitsPerSec(30); + + // Declared before scenario to avoid use after free. + std::atomic last_frame_timestamp(Timestamp::MinusInfinity()); + + Scenario s; + NetworkSimulationConfig net_config; + net_config.bandwidth = kMinVideoBitrate * 4; + net_config.delay = TimeDelta::Millis(10); + auto* client = s.CreateClient("send", [&](CallClientConfig* c) { + // Min transmit rate needs to be lower than kMinVideoBitrate for this test + // to make sense. + c->transport.rates.min_rate = kMinVideoBitrate / 2; + c->transport.rates.start_rate = kMinVideoBitrate; + c->transport.rates.max_rate = kMinVideoBitrate * 2; + }); + auto send_net = s.CreateMutableSimulationNode( + [&](NetworkSimulationConfig* c) { *c = net_config; }); + auto ret_net = {s.CreateSimulationNode(net_config)}; + auto* route = + s.CreateRoutes(client, {send_net->node()}, + s.CreateClient("return", CallClientConfig()), ret_net); + + s.CreateVideoStream(route->forward(), [&](VideoStreamConfig* c) { + c->hooks.frame_pair_handlers = {[&](const VideoFramePair& pair) { + if (pair.repeated == 0) { + last_frame_timestamp = pair.capture_time; + } + }}; + c->source.framerate = 30; + c->source.generator.width = 320; + c->source.generator.height = 180; + c->encoder.implementation = CodecImpl::kFake; + c->encoder.codec = Codec::kVideoCodecVP8; + c->encoder.min_data_rate = kMinVideoBitrate; + c->encoder.suspend_below_min_bitrate = true; + c->stream.pad_to_rate = kMinVideoBitrate; + }); + + // Run for a few seconds, check we have received at least one frame. + s.RunFor(TimeDelta::Seconds(2)); + EXPECT_TRUE(last_frame_timestamp.load().IsFinite()); + + // Degrade network to below min bitrate. + send_net->UpdateConfig([&](NetworkSimulationConfig* c) { + c->bandwidth = kMinVideoBitrate * 0.9; + }); + + // Run for 20s, verify that no frames arrive that were captured after the + // first five seconds, allowing some margin for BWE backoff to trigger and + // packets already in the pipeline to potentially arrive. + s.RunFor(TimeDelta::Seconds(20)); + EXPECT_GT(s.Now() - last_frame_timestamp, TimeDelta::Seconds(15)); + + // Relax the network constraints and run for a while more, verify that we + // start receiving frames again. + send_net->UpdateConfig( + [&](NetworkSimulationConfig* c) { c->bandwidth = kMinVideoBitrate * 4; }); + last_frame_timestamp = Timestamp::MinusInfinity(); + s.RunFor(TimeDelta::Seconds(15)); + EXPECT_TRUE(last_frame_timestamp.load().IsFinite()); +} + } // namespace test } // namespace webrtc diff --git a/test/test_main.cc b/test/test_main.cc index 5046979548..f919c4bba7 100644 --- a/test/test_main.cc +++ b/test/test_main.cc @@ -12,17 +12,21 @@ #include "absl/debugging/failure_signal_handler.h" #include "absl/debugging/symbolize.h" +#include "absl/flags/parse.h" +#include "test/gmock.h" #include "test/test_main_lib.h" int main(int argc, char* argv[]) { // Initialize the symbolizer to get a human-readable stack trace absl::InitializeSymbolizer(argv[0]); + testing::InitGoogleMock(&argc, argv); + absl::ParseCommandLine(argc, argv); absl::FailureSignalHandlerOptions options; absl::InstallFailureSignalHandler(options); std::unique_ptr main = webrtc::TestMain::Create(); - int err_code = main->Init(&argc, argv); + int err_code = main->Init(); if (err_code != 0) { return err_code; } diff --git a/test/test_main_lib.cc b/test/test_main_lib.cc index f5e02341f3..7170163346 100644 --- a/test/test_main_lib.cc +++ b/test/test_main_lib.cc @@ -15,7 +15,6 @@ #include #include "absl/flags/flag.h" -#include "absl/flags/parse.h" #include "absl/memory/memory.h" #include "absl/strings/match.h" #include "absl/types/optional.h" @@ -28,7 +27,6 @@ #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" #include "test/field_trial.h" -#include "test/gmock.h" #include "test/gtest.h" #include "test/testsupport/perf_test.h" #include "test/testsupport/resources_dir_flag.h" @@ -157,10 +155,9 @@ class TestMainImpl : public TestMain { std::unique_ptr thread_; }; - int Init(int* argc, char* argv[]) override { - ::testing::InitGoogleMock(argc, argv); - absl::ParseCommandLine(*argc, argv); + int Init(int* argc, char* argv[]) override { return Init(); } + int Init() override { // Make sure we always pull in the --resources_dir flag, even if the test // binary doesn't link with fileutils (downstream expects all test mains to // have this flag). diff --git a/test/test_main_lib.h b/test/test_main_lib.h index bdb0afb6eb..2233171c60 100644 --- a/test/test_main_lib.h +++ b/test/test_main_lib.h @@ -25,6 +25,8 @@ class TestMain { // Initializes test environment. Clients can add their own initialization // steps after call to this method and before running tests. // Returns 0 if initialization was successful and non 0 otherwise. + virtual int Init() = 0; + // Temporary for backward compatibility virtual int Init(int* argc, char* argv[]) = 0; // Runs test end return result error code. 0 - no errors. diff --git a/test/test_video_capturer.cc b/test/test_video_capturer.cc index c0d575dc5e..9ce4aa0637 100644 --- a/test/test_video_capturer.cc +++ b/test/test_video_capturer.cc @@ -84,7 +84,7 @@ void TestVideoCapturer::UpdateVideoAdapter() { } VideoFrame TestVideoCapturer::MaybePreprocess(const VideoFrame& frame) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); if (preprocessor_ != nullptr) { return preprocessor_->Preprocess(frame); } else { diff --git a/test/test_video_capturer.h b/test/test_video_capturer.h index 114767a43e..dff529cb15 100644 --- a/test/test_video_capturer.h +++ b/test/test_video_capturer.h @@ -18,7 +18,7 @@ #include "api/video/video_source_interface.h" #include "media/base/video_adapter.h" #include "media/base/video_broadcaster.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { namespace test { @@ -38,7 +38,7 @@ class TestVideoCapturer : public rtc::VideoSourceInterface { const rtc::VideoSinkWants& wants) override; void RemoveSink(rtc::VideoSinkInterface* sink) override; void SetFramePreprocessor(std::unique_ptr preprocessor) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); preprocessor_ = std::move(preprocessor); } @@ -50,7 +50,7 @@ class TestVideoCapturer : public rtc::VideoSourceInterface { void UpdateVideoAdapter(); VideoFrame MaybePreprocess(const VideoFrame& frame); - rtc::CriticalSection lock_; + Mutex lock_; std::unique_ptr preprocessor_ RTC_GUARDED_BY(lock_); rtc::VideoBroadcaster broadcaster_; cricket::VideoAdapter video_adapter_; diff --git a/test/testsupport/file_utils.cc b/test/testsupport/file_utils.cc index 0b4ffa446c..1f829d320b 100644 --- a/test/testsupport/file_utils.cc +++ b/test/testsupport/file_utils.cc @@ -107,7 +107,7 @@ std::string TempFilename(const std::string& dir, const std::string& prefix) { if (::GetTempFileNameW(rtc::ToUtf16(dir).c_str(), rtc::ToUtf16(prefix).c_str(), 0, filename) != 0) return rtc::ToUtf8(filename); - assert(false); + RTC_NOTREACHED(); return ""; #else int len = dir.size() + prefix.size() + 2 + 6; @@ -116,7 +116,7 @@ std::string TempFilename(const std::string& dir, const std::string& prefix) { snprintf(tempname.get(), len, "%s/%sXXXXXX", dir.c_str(), prefix.c_str()); int fd = ::mkstemp(tempname.get()); if (fd == -1) { - assert(false); + RTC_NOTREACHED(); return ""; } else { ::close(fd); diff --git a/test/testsupport/frame_reader.h b/test/testsupport/frame_reader.h index 7f313e8b82..ac399655df 100644 --- a/test/testsupport/frame_reader.h +++ b/test/testsupport/frame_reader.h @@ -15,6 +15,7 @@ #include +#include "absl/types/optional.h" #include "api/scoped_refptr.h" namespace webrtc { @@ -47,11 +48,32 @@ class FrameReader { class YuvFrameReaderImpl : public FrameReader { public: + enum class RepeatMode { kSingle, kRepeat, kPingPong }; + class DropperUtil { + public: + DropperUtil(int source_fps, int target_fps); + + enum class DropDecision { kDropframe, kKeepFrame }; + DropDecision UpdateLevel(); + + private: + const double frame_size_buckets_; + double bucket_level_; + }; + // Creates a file handler. The input file is assumed to exist and be readable. // Parameters: // input_filename The file to read from. // width, height Size of each frame to read. YuvFrameReaderImpl(std::string input_filename, int width, int height); + YuvFrameReaderImpl(std::string input_filename, + int input_width, + int input_height, + int desired_width, + int desired_height, + RepeatMode repeat_mode, + absl::optional clip_fps, + int target_fps); ~YuvFrameReaderImpl() override; bool Init() override; rtc::scoped_refptr ReadFrame() override; @@ -63,9 +85,15 @@ class YuvFrameReaderImpl : public FrameReader { const std::string input_filename_; // It is not const, so subclasses will be able to add frame header size. size_t frame_length_in_bytes_; - const int width_; - const int height_; + const int input_width_; + const int input_height_; + const int desired_width_; + const int desired_height_; + const size_t frame_size_bytes_; + const RepeatMode repeat_mode_; int number_of_frames_; + int current_frame_index_; + std::unique_ptr dropper_; FILE* input_file_; }; diff --git a/test/testsupport/frame_writer.h b/test/testsupport/frame_writer.h index b91e57c963..5f85d8bcd4 100644 --- a/test/testsupport/frame_writer.h +++ b/test/testsupport/frame_writer.h @@ -32,7 +32,7 @@ class FrameWriter { // Writes a frame of the configured frame length to the output file. // Returns true if the write was successful, false otherwise. - virtual bool WriteFrame(uint8_t* frame_buffer) = 0; + virtual bool WriteFrame(const uint8_t* frame_buffer) = 0; // Closes the output file if open. Essentially makes this class impossible // to use anymore. Will also be invoked by the destructor. @@ -54,7 +54,7 @@ class YuvFrameWriterImpl : public FrameWriter { YuvFrameWriterImpl(std::string output_filename, int width, int height); ~YuvFrameWriterImpl() override; bool Init() override; - bool WriteFrame(uint8_t* frame_buffer) override; + bool WriteFrame(const uint8_t* frame_buffer) override; void Close() override; size_t FrameLength() override; @@ -76,7 +76,7 @@ class Y4mFrameWriterImpl : public YuvFrameWriterImpl { int frame_rate); ~Y4mFrameWriterImpl() override; bool Init() override; - bool WriteFrame(uint8_t* frame_buffer) override; + bool WriteFrame(const uint8_t* frame_buffer) override; private: const int frame_rate_; diff --git a/test/testsupport/ivf_video_frame_generator.cc b/test/testsupport/ivf_video_frame_generator.cc index 81155f80ff..fe836763fa 100644 --- a/test/testsupport/ivf_video_frame_generator.cc +++ b/test/testsupport/ivf_video_frame_generator.cc @@ -53,7 +53,7 @@ IvfVideoFrameGenerator::IvfVideoFrameGenerator(const std::string& file_name) WEBRTC_VIDEO_CODEC_OK); } IvfVideoFrameGenerator::~IvfVideoFrameGenerator() { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); if (!file_reader_) { return; } @@ -62,7 +62,7 @@ IvfVideoFrameGenerator::~IvfVideoFrameGenerator() { // Reset decoder to prevent it from async access to |this|. video_decoder_.reset(); { - rtc::CritScope frame_crit(&frame_decode_lock_); + MutexLock frame_lock(&frame_decode_lock_); next_frame_ = absl::nullopt; // Set event in case another thread is waiting on it. next_frame_decoded_.Set(); @@ -70,7 +70,7 @@ IvfVideoFrameGenerator::~IvfVideoFrameGenerator() { } FrameGeneratorInterface::VideoFrameData IvfVideoFrameGenerator::NextFrame() { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); next_frame_decoded_.Reset(); RTC_CHECK(file_reader_); if (!file_reader_->HasMoreFrames()) { @@ -86,7 +86,7 @@ FrameGeneratorInterface::VideoFrameData IvfVideoFrameGenerator::NextFrame() { RTC_CHECK(decoded) << "Failed to decode next frame in " << kMaxNextFrameWaitTemeoutMs << "ms. Can't continue"; - rtc::CritScope frame_crit(&frame_decode_lock_); + MutexLock frame_lock(&frame_decode_lock_); rtc::scoped_refptr buffer = next_frame_->video_frame_buffer(); if (width_ != static_cast(buffer->width()) || @@ -102,7 +102,7 @@ FrameGeneratorInterface::VideoFrameData IvfVideoFrameGenerator::NextFrame() { } void IvfVideoFrameGenerator::ChangeResolution(size_t width, size_t height) { - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); width_ = width; height_ = height; } @@ -126,7 +126,7 @@ void IvfVideoFrameGenerator::DecodedCallback::Decoded( } void IvfVideoFrameGenerator::OnFrameDecoded(const VideoFrame& decoded_frame) { - rtc::CritScope crit(&frame_decode_lock_); + MutexLock lock(&frame_decode_lock_); next_frame_ = decoded_frame; next_frame_decoded_.Set(); } diff --git a/test/testsupport/ivf_video_frame_generator.h b/test/testsupport/ivf_video_frame_generator.h index 913d882766..8ee9c03417 100644 --- a/test/testsupport/ivf_video_frame_generator.h +++ b/test/testsupport/ivf_video_frame_generator.h @@ -15,14 +15,14 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/test/frame_generator_interface.h" #include "api/video/video_codec_type.h" #include "api/video/video_frame.h" #include "api/video_codecs/video_decoder.h" #include "modules/video_coding/utility/ivf_file_reader.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { namespace test { @@ -71,11 +71,11 @@ class IvfVideoFrameGenerator : public FrameGeneratorInterface { // FrameGenerator is injected into PeerConnection via some scoped_ref object // and it can happen that the last pointer will be destroyed on the different // thread comparing to the one from which frames were read. - rtc::CriticalSection lock_; + Mutex lock_; // This lock is used to sync between sending and receiving frame from decoder. // We can't reuse |lock_| because then generator can be destroyed between // frame was sent to decoder and decoder callback was invoked. - rtc::CriticalSection frame_decode_lock_; + Mutex frame_decode_lock_; rtc::Event next_frame_decoded_; absl::optional next_frame_ RTC_GUARDED_BY(frame_decode_lock_); diff --git a/test/testsupport/ivf_video_frame_generator_unittest.cc b/test/testsupport/ivf_video_frame_generator_unittest.cc index 0c364dbb1d..126f7203b8 100644 --- a/test/testsupport/ivf_video_frame_generator_unittest.cc +++ b/test/testsupport/ivf_video_frame_generator_unittest.cc @@ -25,7 +25,6 @@ #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "modules/video_coding/include/video_error_codes.h" #include "modules/video_coding/utility/ivf_file_writer.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "test/gtest.h" #include "test/testsupport/file_utils.h" @@ -34,6 +33,8 @@ #if defined(WEBRTC_USE_H264) #include "modules/video_coding/codecs/h264/include/h264.h" +#include "rtc_base/synchronization/mutex.h" + #endif namespace webrtc { @@ -47,7 +48,7 @@ constexpr int kMaxFramerate = 30; constexpr int kMaxFrameEncodeWaitTimeoutMs = 2000; static const VideoEncoder::Capabilities kCapabilities(false); -#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) +#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM64) constexpr double kExpectedMinPsnr = 35; #else constexpr double kExpectedMinPsnr = 39; @@ -67,11 +68,10 @@ class IvfFileWriterEncodedCallback : public EncodedImageCallback { ~IvfFileWriterEncodedCallback() { EXPECT_TRUE(file_writer_->Close()); } Result OnEncodedImage(const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { EXPECT_TRUE(file_writer_->WriteFrame(encoded_image, video_codec_type_)); - rtc::CritScope crit(&lock_); + MutexLock lock(&lock_); received_frames_count_++; RTC_CHECK_LE(received_frames_count_, expected_frames_count_); if (received_frames_count_ == expected_frames_count_) { @@ -89,7 +89,7 @@ class IvfFileWriterEncodedCallback : public EncodedImageCallback { const VideoCodecType video_codec_type_; const int expected_frames_count_; - rtc::CriticalSection lock_; + Mutex lock_; int received_frames_count_ RTC_GUARDED_BY(lock_) = 0; rtc::Event expected_frames_count_received_; }; diff --git a/test/testsupport/perf_result_reporter.cc b/test/testsupport/perf_result_reporter.cc index e4c98e7446..158f1cd768 100644 --- a/test/testsupport/perf_result_reporter.cc +++ b/test/testsupport/perf_result_reporter.cc @@ -12,6 +12,8 @@ #include +#include "absl/strings/string_view.h" + namespace { // These characters mess with either the stdout parsing or the dashboard itself. @@ -21,7 +23,7 @@ const std::vector& InvalidCharacters() { return kInvalidCharacters; } -void CheckForInvalidCharacters(const std::string& str) { +void CheckForInvalidCharacters(absl::string_view str) { for (const auto& invalid : InvalidCharacters()) { RTC_CHECK(str.find(invalid) == std::string::npos) << "Given invalid character for perf names '" << invalid << "'"; @@ -76,8 +78,8 @@ std::string UnitToString(Unit unit) { } // namespace -PerfResultReporter::PerfResultReporter(const std::string& metric_basename, - const std::string& story_name) +PerfResultReporter::PerfResultReporter(absl::string_view metric_basename, + absl::string_view story_name) : metric_basename_(metric_basename), story_name_(story_name) { CheckForInvalidCharacters(metric_basename_); CheckForInvalidCharacters(story_name_); @@ -85,19 +87,20 @@ PerfResultReporter::PerfResultReporter(const std::string& metric_basename, PerfResultReporter::~PerfResultReporter() = default; -void PerfResultReporter::RegisterMetric(const std::string& metric_suffix, +void PerfResultReporter::RegisterMetric(absl::string_view metric_suffix, Unit unit) { RegisterMetric(metric_suffix, unit, ImproveDirection::kNone); } -void PerfResultReporter::RegisterMetric(const std::string& metric_suffix, +void PerfResultReporter::RegisterMetric(absl::string_view metric_suffix, Unit unit, ImproveDirection improve_direction) { CheckForInvalidCharacters(metric_suffix); - RTC_CHECK(metric_map_.count(metric_suffix) == 0); - metric_map_.insert({metric_suffix, {unit, improve_direction}}); + std::string metric(metric_suffix); + RTC_CHECK(metric_map_.count(metric) == 0); + metric_map_.insert({std::move(metric), {unit, improve_direction}}); } -void PerfResultReporter::AddResult(const std::string& metric_suffix, +void PerfResultReporter::AddResult(absl::string_view metric_suffix, size_t value) const { auto info = GetMetricInfoOrFail(metric_suffix); @@ -105,7 +108,7 @@ void PerfResultReporter::AddResult(const std::string& metric_suffix, UnitToString(info.unit), kNotImportant, info.improve_direction); } -void PerfResultReporter::AddResult(const std::string& metric_suffix, +void PerfResultReporter::AddResult(absl::string_view metric_suffix, double value) const { auto info = GetMetricInfoOrFail(metric_suffix); @@ -114,7 +117,7 @@ void PerfResultReporter::AddResult(const std::string& metric_suffix, } void PerfResultReporter::AddResultList( - const std::string& metric_suffix, + absl::string_view metric_suffix, rtc::ArrayView values) const { auto info = GetMetricInfoOrFail(metric_suffix); @@ -123,7 +126,7 @@ void PerfResultReporter::AddResultList( info.improve_direction); } -void PerfResultReporter::AddResultMeanAndError(const std::string& metric_suffix, +void PerfResultReporter::AddResultMeanAndError(absl::string_view metric_suffix, const double mean, const double error) { auto info = GetMetricInfoOrFail(metric_suffix); @@ -134,8 +137,8 @@ void PerfResultReporter::AddResultMeanAndError(const std::string& metric_suffix, } absl::optional PerfResultReporter::GetMetricInfo( - const std::string& metric_suffix) const { - auto iter = metric_map_.find(metric_suffix); + absl::string_view metric_suffix) const { + auto iter = metric_map_.find(std::string(metric_suffix)); if (iter == metric_map_.end()) { return absl::optional(); } @@ -144,7 +147,7 @@ absl::optional PerfResultReporter::GetMetricInfo( } MetricInfo PerfResultReporter::GetMetricInfoOrFail( - const std::string& metric_suffix) const { + absl::string_view metric_suffix) const { absl::optional info = GetMetricInfo(metric_suffix); RTC_CHECK(info.has_value()) << "Attempted to use unregistered metric " << metric_suffix; diff --git a/test/testsupport/perf_result_reporter.h b/test/testsupport/perf_result_reporter.h index c8028574aa..aeb1786824 100644 --- a/test/testsupport/perf_result_reporter.h +++ b/test/testsupport/perf_result_reporter.h @@ -14,6 +14,7 @@ #include #include +#include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "api/array_view.h" #include "test/testsupport/perf_test.h" @@ -61,34 +62,34 @@ struct MetricInfo { // as separate subtests (e.g. next to bwe_15s). class PerfResultReporter { public: - PerfResultReporter(const std::string& metric_basename, - const std::string& story_name); + PerfResultReporter(absl::string_view metric_basename, + absl::string_view story_name); ~PerfResultReporter(); - void RegisterMetric(const std::string& metric_suffix, Unit unit); - void RegisterMetric(const std::string& metric_suffix, + void RegisterMetric(absl::string_view metric_suffix, Unit unit); + void RegisterMetric(absl::string_view metric_suffix, Unit unit, ImproveDirection improve_direction); - void AddResult(const std::string& metric_suffix, size_t value) const; - void AddResult(const std::string& metric_suffix, double value) const; + void AddResult(absl::string_view metric_suffix, size_t value) const; + void AddResult(absl::string_view metric_suffix, double value) const; - void AddResultList(const std::string& metric_suffix, + void AddResultList(absl::string_view metric_suffix, rtc::ArrayView values) const; // Users should prefer AddResultList if possible, as otherwise the min/max // values reported on the perf dashboard aren't useful. // |mean_and_error| should be a comma-separated string of mean then // error/stddev, e.g. "2.4,0.5". - void AddResultMeanAndError(const std::string& metric_suffix, + void AddResultMeanAndError(absl::string_view metric_suffix, const double mean, const double error); // Returns the metric info if it has been registered. absl::optional GetMetricInfo( - const std::string& metric_suffix) const; + absl::string_view metric_suffix) const; private: - MetricInfo GetMetricInfoOrFail(const std::string& metric_suffix) const; + MetricInfo GetMetricInfoOrFail(absl::string_view metric_suffix) const; std::string metric_basename_; std::string story_name_; diff --git a/test/testsupport/perf_test.cc b/test/testsupport/perf_test.cc index 310c7e36a5..d282bf23a1 100644 --- a/test/testsupport/perf_test.cc +++ b/test/testsupport/perf_test.cc @@ -17,8 +17,12 @@ #include #include +#include "absl/strings/string_view.h" +#include "api/numerics/samples_stats_counter.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" +#include "test/testsupport/file_utils.h" #include "test/testsupport/perf_test_histogram_writer.h" namespace webrtc { @@ -27,18 +31,31 @@ namespace test { namespace { std::string UnitWithDirection( - const std::string& units, + absl::string_view units, webrtc::test::ImproveDirection improve_direction) { switch (improve_direction) { case webrtc::test::ImproveDirection::kNone: - return units; + return std::string(units); case webrtc::test::ImproveDirection::kSmallerIsBetter: - return units + "_smallerIsBetter"; + return std::string(units) + "_smallerIsBetter"; case webrtc::test::ImproveDirection::kBiggerIsBetter: - return units + "_biggerIsBetter"; + return std::string(units) + "_biggerIsBetter"; } } +std::vector GetSortedSamples( + const SamplesStatsCounter& counter) { + rtc::ArrayView view = + counter.GetTimedSamples(); + std::vector out(view.begin(), view.end()); + std::sort(out.begin(), out.end(), + [](const SamplesStatsCounter::StatsSample& a, + const SamplesStatsCounter::StatsSample& b) { + return a.time < b.time; + }); + return out; +} + template void OutputListToStream(std::ostream* ostream, const Container& values) { const char* sep = ""; @@ -60,22 +77,24 @@ class PlottableCounterPrinter { PlottableCounterPrinter() : output_(stdout) {} void SetOutput(FILE* output) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); output_ = output; } - void AddCounter(const std::string& graph_name, - const std::string& trace_name, + void AddCounter(absl::string_view graph_name, + absl::string_view trace_name, const webrtc::SamplesStatsCounter& counter, - const std::string& units) { - rtc::CritScope lock(&crit_); - plottable_counters_.push_back({graph_name, trace_name, counter, units}); + absl::string_view units) { + MutexLock lock(&mutex_); + plottable_counters_.push_back({std::string(graph_name), + std::string(trace_name), counter, + std::string(units)}); } void Print(const std::vector& desired_graphs_raw) const { std::set desired_graphs(desired_graphs_raw.begin(), desired_graphs_raw.end()); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); for (auto& counter : plottable_counters_) { if (!desired_graphs.empty()) { auto it = desired_graphs.find(counter.graph_name); @@ -108,9 +127,9 @@ class PlottableCounterPrinter { } private: - rtc::CriticalSection crit_; - std::vector plottable_counters_ RTC_GUARDED_BY(&crit_); - FILE* output_ RTC_GUARDED_BY(&crit_); + mutable Mutex mutex_; + std::vector plottable_counters_ RTC_GUARDED_BY(&mutex_); + FILE* output_ RTC_GUARDED_BY(&mutex_); }; PlottableCounterPrinter& GetPlottableCounterPrinter() { @@ -123,14 +142,14 @@ class ResultsLinePrinter { ResultsLinePrinter() : output_(stdout) {} void SetOutput(FILE* output) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); output_ = output; } - void PrintResult(const std::string& graph_name, - const std::string& trace_name, + void PrintResult(absl::string_view graph_name, + absl::string_view trace_name, const double value, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction) { std::ostringstream value_stream; @@ -142,11 +161,11 @@ class ResultsLinePrinter { important); } - void PrintResultMeanAndError(const std::string& graph_name, - const std::string& trace_name, + void PrintResultMeanAndError(absl::string_view graph_name, + absl::string_view trace_name, const double mean, const double error, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction) { std::ostringstream value_stream; @@ -156,10 +175,10 @@ class ResultsLinePrinter { UnitWithDirection(units, improve_direction), important); } - void PrintResultList(const std::string& graph_name, - const std::string& trace_name, + void PrintResultList(absl::string_view graph_name, + absl::string_view trace_name, const rtc::ArrayView values, - const std::string& units, + absl::string_view units, const bool important, webrtc::test::ImproveDirection improve_direction) { std::ostringstream value_stream; @@ -170,24 +189,25 @@ class ResultsLinePrinter { } private: - void PrintResultImpl(const std::string& graph_name, - const std::string& trace_name, - const std::string& values, - const std::string& prefix, - const std::string& suffix, - const std::string& units, + void PrintResultImpl(absl::string_view graph_name, + absl::string_view trace_name, + absl::string_view values, + absl::string_view prefix, + absl::string_view suffix, + absl::string_view units, bool important) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); + rtc::StringBuilder message; + message << (important ? "*" : "") << "RESULT " << graph_name << ": " + << trace_name << "= " << prefix << values << suffix << " " << units; // <*>RESULT : = // <*>RESULT : = {, } // <*>RESULT : = [,value,value,...,] - fprintf(output_, "%sRESULT %s: %s= %s%s%s %s\n", important ? "*" : "", - graph_name.c_str(), trace_name.c_str(), prefix.c_str(), - values.c_str(), suffix.c_str(), units.c_str()); + fprintf(output_, "%s\n", message.str().c_str()); } - rtc::CriticalSection crit_; - FILE* output_ RTC_GUARDED_BY(&crit_); + Mutex mutex_; + FILE* output_ RTC_GUARDED_BY(&mutex_); }; ResultsLinePrinter& GetResultsLinePrinter() { @@ -221,6 +241,7 @@ void PrintPlottableResults(const std::vector& desired_graphs) { bool WritePerfResults(const std::string& output_path) { std::string results = GetPerfResults(); + CreateDir(DirName(output_path)); FILE* output = fopen(output_path.c_str(), "wb"); if (output == NULL) { printf("Failed to write to %s.\n", output_path.c_str()); @@ -239,73 +260,94 @@ bool WritePerfResults(const std::string& output_path) { return true; } -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, +void PrintResult(absl::string_view measurement, + absl::string_view modifier, + absl::string_view trace, const double value, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction) { - std::string graph_name = measurement + modifier; + rtc::StringBuilder graph_name; + graph_name << measurement << modifier; RTC_CHECK(std::isfinite(value)) - << "Expected finite value for graph " << graph_name << ", trace name " - << trace << ", units " << units << ", got " << value; - GetPerfWriter().LogResult(graph_name, trace, value, units, important, + << "Expected finite value for graph " << graph_name.str() + << ", trace name " << trace << ", units " << units << ", got " << value; + GetPerfWriter().LogResult(graph_name.str(), trace, value, units, important, improve_direction); - GetResultsLinePrinter().PrintResult(graph_name, trace, value, units, + GetResultsLinePrinter().PrintResult(graph_name.str(), trace, value, units, important, improve_direction); } -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& trace, +void PrintResult(absl::string_view measurement, + absl::string_view modifier, + absl::string_view trace, const SamplesStatsCounter& counter, - const std::string& units, + absl::string_view units, const bool important, ImproveDirection improve_direction) { - std::string graph_name = measurement + modifier; - GetPlottableCounterPrinter().AddCounter(graph_name, trace, counter, units); + rtc::StringBuilder graph_name; + graph_name << measurement << modifier; + GetPlottableCounterPrinter().AddCounter(graph_name.str(), trace, counter, + units); double mean = counter.IsEmpty() ? 0 : counter.GetAverage(); double error = counter.IsEmpty() ? 0 : counter.GetStandardDeviation(); - PrintResultMeanAndError(measurement, modifier, trace, mean, error, units, - important, improve_direction); + + std::vector timed_samples = + GetSortedSamples(counter); + std::vector samples(timed_samples.size()); + for (size_t i = 0; i < timed_samples.size(); ++i) { + samples[i] = timed_samples[i].value; + } + // If we have an empty counter, default it to 0. + if (samples.empty()) { + samples.push_back(0); + } + + GetPerfWriter().LogResultList(graph_name.str(), trace, samples, units, + important, improve_direction); + GetResultsLinePrinter().PrintResultMeanAndError(graph_name.str(), trace, mean, + error, units, important, + improve_direction); } -void PrintResultMeanAndError(const std::string& measurement, - const std::string& modifier, - const std::string& trace, +void PrintResultMeanAndError(absl::string_view measurement, + absl::string_view modifier, + absl::string_view trace, const double mean, const double error, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction) { RTC_CHECK(std::isfinite(mean)); RTC_CHECK(std::isfinite(error)); - std::string graph_name = measurement + modifier; - GetPerfWriter().LogResultMeanAndError(graph_name, trace, mean, error, units, - important, improve_direction); - GetResultsLinePrinter().PrintResultMeanAndError( - graph_name, trace, mean, error, units, important, improve_direction); + rtc::StringBuilder graph_name; + graph_name << measurement << modifier; + GetPerfWriter().LogResultMeanAndError(graph_name.str(), trace, mean, error, + units, important, improve_direction); + GetResultsLinePrinter().PrintResultMeanAndError(graph_name.str(), trace, mean, + error, units, important, + improve_direction); } -void PrintResultList(const std::string& measurement, - const std::string& modifier, - const std::string& trace, +void PrintResultList(absl::string_view measurement, + absl::string_view modifier, + absl::string_view trace, const rtc::ArrayView values, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction) { for (double v : values) { RTC_CHECK(std::isfinite(v)); } - std::string graph_name = measurement + modifier; - GetPerfWriter().LogResultList(graph_name, trace, values, units, important, - improve_direction); - GetResultsLinePrinter().PrintResultList(graph_name, trace, values, units, - important, improve_direction); + rtc::StringBuilder graph_name; + graph_name << measurement << modifier; + GetPerfWriter().LogResultList(graph_name.str(), trace, values, units, + important, improve_direction); + GetResultsLinePrinter().PrintResultList(graph_name.str(), trace, values, + units, important, improve_direction); } } // namespace test diff --git a/test/testsupport/perf_test.h b/test/testsupport/perf_test.h index b0a5607d20..41380241c3 100644 --- a/test/testsupport/perf_test.h +++ b/test/testsupport/perf_test.h @@ -15,9 +15,9 @@ #include #include -#include "absl/flags/flag.h" +#include "absl/strings/string_view.h" #include "api/array_view.h" -#include "rtc_base/numerics/samples_stats_counter.h" +#include "api/numerics/samples_stats_counter.h" namespace webrtc { namespace test { @@ -46,11 +46,11 @@ enum class ImproveDirection { // // The binary this runs in must be hooked up as a perf test in the WebRTC // recipes for this to actually be uploaded to chromeperf.appspot.com. -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& user_story, +void PrintResult(absl::string_view measurement, + absl::string_view modifier, + absl::string_view user_story, const double value, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction = ImproveDirection::kNone); @@ -59,12 +59,12 @@ void PrintResult(const std::string& measurement, // standard deviation (or other error metric) of the measurement. // DEPRECATED: soon unsupported. void PrintResultMeanAndError( - const std::string& measurement, - const std::string& modifier, - const std::string& user_story, + absl::string_view measurement, + absl::string_view modifier, + absl::string_view user_story, const double mean, const double error, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction = ImproveDirection::kNone); @@ -73,21 +73,21 @@ void PrintResultMeanAndError( // post-processing step might produce plots of their mean and standard // deviation. void PrintResultList( - const std::string& measurement, - const std::string& modifier, - const std::string& user_story, + absl::string_view measurement, + absl::string_view modifier, + absl::string_view user_story, rtc::ArrayView values, - const std::string& units, + absl::string_view units, bool important, ImproveDirection improve_direction = ImproveDirection::kNone); // Like PrintResult(), but prints a (mean, standard deviation) from stats // counter. Also add specified metric to the plotable metrics output. -void PrintResult(const std::string& measurement, - const std::string& modifier, - const std::string& user_story, +void PrintResult(absl::string_view measurement, + absl::string_view modifier, + absl::string_view user_story, const SamplesStatsCounter& counter, - const std::string& units, + absl::string_view units, const bool important, ImproveDirection improve_direction = ImproveDirection::kNone); diff --git a/test/testsupport/perf_test_histogram_writer.cc b/test/testsupport/perf_test_histogram_writer.cc index ad70d6801c..096ca44571 100644 --- a/test/testsupport/perf_test_histogram_writer.cc +++ b/test/testsupport/perf_test_histogram_writer.cc @@ -15,8 +15,11 @@ #include #include -#include "rtc_base/critical_section.h" +#include "absl/strings/string_view.h" +#include "api/numerics/samples_stats_counter.h" #include "rtc_base/logging.h" +#include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" #include "third_party/catapult/tracing/tracing/value/diagnostics/reserved_infos.h" #include "third_party/catapult/tracing/tracing/value/histogram.h" @@ -33,26 +36,26 @@ std::string AsJsonString(const std::string string) { class PerfTestHistogramWriter : public PerfTestResultWriter { public: - PerfTestHistogramWriter() : crit_() {} + PerfTestHistogramWriter() : mutex_() {} void ClearResults() override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); histograms_.clear(); } - void LogResult(const std::string& graph_name, - const std::string& trace_name, + void LogResult(absl::string_view graph_name, + absl::string_view trace_name, const double value, - const std::string& units, + absl::string_view units, const bool important, ImproveDirection improve_direction) override { (void)important; AddSample(graph_name, trace_name, value, units, improve_direction); } - void LogResultMeanAndError(const std::string& graph_name, - const std::string& trace_name, + void LogResultMeanAndError(absl::string_view graph_name, + absl::string_view trace_name, const double mean, const double error, - const std::string& units, + absl::string_view units, const bool important, ImproveDirection improve_direction) override { RTC_LOG(LS_WARNING) << "Discarding stddev, not supported by histograms"; @@ -61,10 +64,10 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { AddSample(graph_name, trace_name, mean, units, improve_direction); } - void LogResultList(const std::string& graph_name, - const std::string& trace_name, + void LogResultList(absl::string_view graph_name, + absl::string_view trace_name, const rtc::ArrayView values, - const std::string& units, + absl::string_view units, const bool important, ImproveDirection improve_direction) override { (void)important; @@ -75,7 +78,7 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { std::string Serialize() const override { proto::HistogramSet histogram_set; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); for (const auto& histogram : histograms_) { std::unique_ptr proto = histogram.second->toProto(); histogram_set.mutable_histograms()->AddAllocated(proto.release()); @@ -88,14 +91,14 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { } private: - void AddSample(const std::string& original_graph_name, - const std::string& trace_name, + void AddSample(absl::string_view original_graph_name, + absl::string_view trace_name, const double value, - const std::string& units, + absl::string_view units, ImproveDirection improve_direction) { // WebRTC annotates the units into the metric name when they are not // supported by the Histogram API. - std::string graph_name = original_graph_name; + std::string graph_name(original_graph_name); if (units == "dB") { graph_name += "_dB"; } else if (units == "fps") { @@ -107,9 +110,10 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { // Lookup on graph name + trace name (or measurement + story in catapult // parlance). There should be several histograms with the same measurement // if they're for different stories. - std::string measurement_and_story = graph_name + trace_name; - rtc::CritScope lock(&crit_); - if (histograms_.count(measurement_and_story) == 0) { + rtc::StringBuilder measurement_and_story; + measurement_and_story << graph_name << trace_name; + MutexLock lock(&mutex_); + if (histograms_.count(measurement_and_story.str()) == 0) { proto::UnitAndDirection unit = ParseUnit(units, improve_direction); std::unique_ptr builder = std::make_unique(graph_name, unit); @@ -117,24 +121,24 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { // Set all summary options as false - we don't want to generate // metric_std, metric_count, and so on for all metrics. builder->SetSummaryOptions(proto::SummaryOptions()); - histograms_[measurement_and_story] = std::move(builder); + histograms_[measurement_and_story.str()] = std::move(builder); proto::Diagnostic stories; proto::GenericSet* generic_set = stories.mutable_generic_set(); - generic_set->add_values(AsJsonString(trace_name)); - histograms_[measurement_and_story]->AddDiagnostic( + generic_set->add_values(AsJsonString(std::string(trace_name))); + histograms_[measurement_and_story.str()]->AddDiagnostic( catapult::kStoriesDiagnostic, stories); } if (units == "bps") { // Bps has been interpreted as bits per second in WebRTC tests. - histograms_[measurement_and_story]->AddSample(value / 8); + histograms_[measurement_and_story.str()]->AddSample(value / 8); } else { - histograms_[measurement_and_story]->AddSample(value); + histograms_[measurement_and_story.str()]->AddSample(value); } } - proto::UnitAndDirection ParseUnit(const std::string& units, + proto::UnitAndDirection ParseUnit(absl::string_view units, ImproveDirection improve_direction) { RTC_DCHECK(units.find('_') == std::string::npos) << "The unit_bigger|smallerIsBetter syntax isn't supported in WebRTC, " @@ -155,7 +159,7 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { } else if (units == "%") { result.set_unit(proto::UNITLESS); } else { - proto::Unit unit = catapult::UnitFromJsonUnit(units); + proto::Unit unit = catapult::UnitFromJsonUnit(std::string(units)); // UnitFromJsonUnit returns UNITLESS if it doesn't recognize the unit. if (unit == proto::UNITLESS && units != "unitless") { @@ -182,9 +186,9 @@ class PerfTestHistogramWriter : public PerfTestResultWriter { } private: - rtc::CriticalSection crit_; + mutable Mutex mutex_; std::map> histograms_ - RTC_GUARDED_BY(&crit_); + RTC_GUARDED_BY(&mutex_); }; } // namespace diff --git a/test/testsupport/perf_test_histogram_writer_no_protobuf.cc b/test/testsupport/perf_test_histogram_writer_no_protobuf.cc index 17685ccb0d..14deb37c66 100644 --- a/test/testsupport/perf_test_histogram_writer_no_protobuf.cc +++ b/test/testsupport/perf_test_histogram_writer_no_protobuf.cc @@ -16,6 +16,7 @@ namespace test { PerfTestResultWriter* CreateHistogramWriter() { RTC_NOTREACHED() << "Cannot run perf tests with rtc_enable_protobuf = false. " "Perf write results as protobufs."; + return nullptr; } } // namespace test diff --git a/test/testsupport/perf_test_histogram_writer_unittest.cc b/test/testsupport/perf_test_histogram_writer_unittest.cc index 6b083d6543..83025a7447 100644 --- a/test/testsupport/perf_test_histogram_writer_unittest.cc +++ b/test/testsupport/perf_test_histogram_writer_unittest.cc @@ -34,6 +34,25 @@ TEST(PerfHistogramWriterUnittest, TestSimpleHistogram) { ASSERT_EQ(histogram_set.histograms_size(), 1); } +TEST(PerfHistogramWriterUnittest, TestListOfValuesHistogram) { + std::unique_ptr writer = + std::unique_ptr(CreateHistogramWriter()); + + std::vector samples{0, 1, 2}; + writer->LogResultList("-", "-", samples, "ms", false, + ImproveDirection::kNone); + + proto::HistogramSet histogram_set; + EXPECT_TRUE(histogram_set.ParseFromString(writer->Serialize())) + << "Expected valid histogram set"; + + ASSERT_EQ(histogram_set.histograms_size(), 1); + ASSERT_EQ(histogram_set.histograms(0).sample_values_size(), 3); + EXPECT_EQ(histogram_set.histograms(0).sample_values(0), 0); + EXPECT_EQ(histogram_set.histograms(0).sample_values(1), 1); + EXPECT_EQ(histogram_set.histograms(0).sample_values(2), 2); +} + TEST(PerfHistogramWriterUnittest, WritesSamplesAndUserStory) { std::unique_ptr writer = std::unique_ptr(CreateHistogramWriter()); diff --git a/test/testsupport/perf_test_result_writer.h b/test/testsupport/perf_test_result_writer.h index d5d7011749..e7342c137f 100644 --- a/test/testsupport/perf_test_result_writer.h +++ b/test/testsupport/perf_test_result_writer.h @@ -12,8 +12,10 @@ #define TEST_TESTSUPPORT_PERF_TEST_RESULT_WRITER_H_ #include + #include +#include "absl/strings/string_view.h" #include "test/testsupport/perf_test.h" namespace webrtc { @@ -25,25 +27,25 @@ class PerfTestResultWriter { virtual ~PerfTestResultWriter() = default; virtual void ClearResults() = 0; - virtual void LogResult(const std::string& graph_name, - const std::string& trace_name, + virtual void LogResult(absl::string_view graph_name, + absl::string_view trace_name, const double value, - const std::string& units, + absl::string_view units, const bool important, webrtc::test::ImproveDirection improve_direction) = 0; virtual void LogResultMeanAndError( - const std::string& graph_name, - const std::string& trace_name, + absl::string_view graph_name, + absl::string_view trace_name, const double mean, const double error, - const std::string& units, + absl::string_view units, const bool important, webrtc::test::ImproveDirection improve_direction) = 0; virtual void LogResultList( - const std::string& graph_name, - const std::string& trace_name, + absl::string_view graph_name, + absl::string_view trace_name, const rtc::ArrayView values, - const std::string& units, + absl::string_view units, const bool important, webrtc::test::ImproveDirection improve_direction) = 0; diff --git a/test/testsupport/perf_test_unittest.cc b/test/testsupport/perf_test_unittest.cc index b779f4c6ec..4cd925d8fb 100644 --- a/test/testsupport/perf_test_unittest.cc +++ b/test/testsupport/perf_test_unittest.cc @@ -64,6 +64,7 @@ TEST_F(PerfTest, TestClearPerfResults) { #if WEBRTC_ENABLE_PROTOBUF TEST_F(PerfTest, TestGetPerfResultsHistograms) { + ClearPerfResults(); PrintResult("measurement", "_modifier", "story_1", 42, "ms", false); PrintResult("foo", "bar", "story_1", 7, "sigma", true); // Note: the error will be ignored, not supported by histograms. @@ -102,6 +103,83 @@ TEST_F(PerfTest, TestGetPerfResultsHistograms) { EXPECT_EQ(hist2.unit().unit(), proto::MS_BEST_FIT_FORMAT); } +TEST_F(PerfTest, TestGetPerfResultsHistogramsWithEmptyCounter) { + ClearPerfResults(); + ::testing::internal::CaptureStdout(); + + SamplesStatsCounter empty_counter; + PrintResult("measurement", "_modifier", "story", empty_counter, "ms", false); + + proto::HistogramSet histogram_set; + EXPECT_TRUE(histogram_set.ParseFromString(GetPerfResults())) + << "Expected valid histogram set"; + + ASSERT_EQ(histogram_set.histograms_size(), 1) + << "Should be one histogram: measurement_modifier"; + const proto::Histogram& hist = histogram_set.histograms(0); + + EXPECT_EQ(hist.name(), "measurement_modifier"); + + // Spot check some things in here (there's a more thorough test on the + // histogram writer itself). + EXPECT_EQ(hist.unit().unit(), proto::MS_BEST_FIT_FORMAT); + EXPECT_EQ(hist.sample_values_size(), 1); + EXPECT_EQ(hist.sample_values(0), 0); + + EXPECT_EQ(hist.diagnostics().diagnostic_map().count("stories"), 1u); + const proto::Diagnostic& stories = + hist.diagnostics().diagnostic_map().at("stories"); + ASSERT_EQ(stories.generic_set().values_size(), 1); + EXPECT_EQ(stories.generic_set().values(0), "\"story\""); + + std::string expected = "RESULT measurement_modifier: story= {0,0} ms\n"; + EXPECT_EQ(expected, ::testing::internal::GetCapturedStdout()); +} + +TEST_F(PerfTest, TestGetPerfResultsHistogramsWithStatsCounter) { + ClearPerfResults(); + ::testing::internal::CaptureStdout(); + + SamplesStatsCounter counter; + counter.AddSample(1); + counter.AddSample(2); + counter.AddSample(3); + counter.AddSample(4); + counter.AddSample(5); + PrintResult("measurement", "_modifier", "story", counter, "ms", false); + + proto::HistogramSet histogram_set; + EXPECT_TRUE(histogram_set.ParseFromString(GetPerfResults())) + << "Expected valid histogram set"; + + ASSERT_EQ(histogram_set.histograms_size(), 1) + << "Should be one histogram: measurement_modifier"; + const proto::Histogram& hist = histogram_set.histograms(0); + + EXPECT_EQ(hist.name(), "measurement_modifier"); + + // Spot check some things in here (there's a more thorough test on the + // histogram writer itself). + EXPECT_EQ(hist.unit().unit(), proto::MS_BEST_FIT_FORMAT); + EXPECT_EQ(hist.sample_values_size(), 5); + EXPECT_EQ(hist.sample_values(0), 1); + EXPECT_EQ(hist.sample_values(1), 2); + EXPECT_EQ(hist.sample_values(2), 3); + EXPECT_EQ(hist.sample_values(3), 4); + EXPECT_EQ(hist.sample_values(4), 5); + + EXPECT_EQ(hist.diagnostics().diagnostic_map().count("stories"), 1u); + const proto::Diagnostic& stories = + hist.diagnostics().diagnostic_map().at("stories"); + ASSERT_EQ(stories.generic_set().values_size(), 1); + EXPECT_EQ(stories.generic_set().values(0), "\"story\""); + + // mean = 3; std = sqrt(2) + std::string expected = + "RESULT measurement_modifier: story= {3,1.4142136} ms\n"; + EXPECT_EQ(expected, ::testing::internal::GetCapturedStdout()); +} + #endif // WEBRTC_ENABLE_PROTOBUF #if GTEST_HAS_DEATH_TEST diff --git a/test/testsupport/resources_dir_flag.cc b/test/testsupport/resources_dir_flag.cc index a6ab3b537b..87a449a401 100644 --- a/test/testsupport/resources_dir_flag.cc +++ b/test/testsupport/resources_dir_flag.cc @@ -10,6 +10,8 @@ #include "test/testsupport/resources_dir_flag.h" +#include "absl/flags/flag.h" + ABSL_FLAG(std::string, resources_dir, "", diff --git a/test/testsupport/resources_dir_flag.h b/test/testsupport/resources_dir_flag.h index 055cc82546..7d6f192d9b 100644 --- a/test/testsupport/resources_dir_flag.h +++ b/test/testsupport/resources_dir_flag.h @@ -13,7 +13,7 @@ #ifndef TEST_TESTSUPPORT_RESOURCES_DIR_FLAG_H__ #define TEST_TESTSUPPORT_RESOURCES_DIR_FLAG_H__ -#include "absl/flags/flag.h" +#include "absl/flags/declare.h" ABSL_DECLARE_FLAG(std::string, resources_dir); diff --git a/test/testsupport/test_artifacts_unittest.cc b/test/testsupport/test_artifacts_unittest.cc index 98de9e4bb8..fb577610fb 100644 --- a/test/testsupport/test_artifacts_unittest.cc +++ b/test/testsupport/test_artifacts_unittest.cc @@ -14,6 +14,7 @@ #include +#include "absl/flags/declare.h" #include "absl/flags/flag.h" #include "rtc_base/system/file_wrapper.h" #include "test/gtest.h" diff --git a/test/testsupport/video_frame_writer.h b/test/testsupport/video_frame_writer.h index db1d453775..f4af378b12 100644 --- a/test/testsupport/video_frame_writer.h +++ b/test/testsupport/video_frame_writer.h @@ -16,7 +16,6 @@ #include "api/video/video_frame.h" #include "rtc_base/buffer.h" -#include "rtc_base/critical_section.h" #include "test/testsupport/frame_writer.h" namespace webrtc { diff --git a/test/testsupport/y4m_frame_reader.cc b/test/testsupport/y4m_frame_reader.cc index 6008d1ef16..3f037a3b4b 100644 --- a/test/testsupport/y4m_frame_reader.cc +++ b/test/testsupport/y4m_frame_reader.cc @@ -40,9 +40,9 @@ Y4mFrameReaderImpl::~Y4mFrameReaderImpl() { } bool Y4mFrameReaderImpl::Init() { - if (width_ <= 0 || height_ <= 0) { - fprintf(stderr, "Frame width and height must be >0, was %d x %d\n", width_, - height_); + if (input_width_ <= 0 || input_height_ <= 0) { + fprintf(stderr, "Frame width and height must be >0, was %d x %d\n", + input_width_, input_height_); return false; } input_file_ = fopen(input_filename_.c_str(), "rb"); diff --git a/test/testsupport/y4m_frame_writer.cc b/test/testsupport/y4m_frame_writer.cc index 25106d886c..896524fa0c 100644 --- a/test/testsupport/y4m_frame_writer.cc +++ b/test/testsupport/y4m_frame_writer.cc @@ -41,7 +41,7 @@ bool Y4mFrameWriterImpl::Init() { return true; } -bool Y4mFrameWriterImpl::WriteFrame(uint8_t* frame_buffer) { +bool Y4mFrameWriterImpl::WriteFrame(const uint8_t* frame_buffer) { if (output_file_ == nullptr) { fprintf(stderr, "Y4mFrameWriterImpl is not initialized (output file is NULL)\n"); diff --git a/test/testsupport/yuv_frame_reader.cc b/test/testsupport/yuv_frame_reader.cc index 91b31a6e72..fca982bf34 100644 --- a/test/testsupport/yuv_frame_reader.cc +++ b/test/testsupport/yuv_frame_reader.cc @@ -20,16 +20,64 @@ namespace webrtc { namespace test { +size_t FrameSizeBytes(int width, int height) { + int half_width = (width + 1) / 2; + size_t size_y = static_cast(width) * height; + size_t size_uv = static_cast(half_width) * ((height + 1) / 2); + return size_y + 2 * size_uv; +} + +YuvFrameReaderImpl::DropperUtil::DropperUtil(int source_fps, int target_fps) + : frame_size_buckets_( + std::max(1.0, static_cast(source_fps) / target_fps)), + bucket_level_(0.0) {} + +YuvFrameReaderImpl::DropperUtil::DropDecision +YuvFrameReaderImpl::DropperUtil::UpdateLevel() { + DropDecision decision; + if (bucket_level_ <= 0.0) { + decision = DropDecision::kKeepFrame; + bucket_level_ += frame_size_buckets_; + } else { + decision = DropDecision::kDropframe; + } + bucket_level_ -= 1.0; + return decision; +} YuvFrameReaderImpl::YuvFrameReaderImpl(std::string input_filename, int width, int height) + : YuvFrameReaderImpl(input_filename, + width, + height, + width, + height, + RepeatMode::kSingle, + 30, + 30) {} +YuvFrameReaderImpl::YuvFrameReaderImpl(std::string input_filename, + int input_width, + int input_height, + int desired_width, + int desired_height, + RepeatMode repeat_mode, + absl::optional clip_fps, + int target_fps) : input_filename_(input_filename), - frame_length_in_bytes_(width * height + - 2 * ((width + 1) / 2) * ((height + 1) / 2)), - width_(width), - height_(height), + frame_length_in_bytes_(input_width * input_height + + 2 * ((input_width + 1) / 2) * + ((input_height + 1) / 2)), + input_width_(input_width), + input_height_(input_height), + desired_width_(desired_width), + desired_height_(desired_height), + frame_size_bytes_(FrameSizeBytes(input_width, input_height)), + repeat_mode_(repeat_mode), number_of_frames_(-1), + current_frame_index_(-1), + dropper_(clip_fps.has_value() ? new DropperUtil(*clip_fps, target_fps) + : nullptr), input_file_(nullptr) {} YuvFrameReaderImpl::~YuvFrameReaderImpl() { @@ -37,9 +85,9 @@ YuvFrameReaderImpl::~YuvFrameReaderImpl() { } bool YuvFrameReaderImpl::Init() { - if (width_ <= 0 || height_ <= 0) { - fprintf(stderr, "Frame width and height must be >0, was %d x %d\n", width_, - height_); + if (input_width_ <= 0 || input_height_ <= 0) { + fprintf(stderr, "Frame width and height must be >0, was %d x %d\n", + input_width_, input_height_); return false; } input_file_ = fopen(input_filename_.c_str(), "rb"); @@ -56,6 +104,7 @@ bool YuvFrameReaderImpl::Init() { } number_of_frames_ = static_cast(source_file_size / frame_length_in_bytes_); + current_frame_index_ = 0; return true; } @@ -65,13 +114,49 @@ rtc::scoped_refptr YuvFrameReaderImpl::ReadFrame() { "YuvFrameReaderImpl is not initialized (input file is NULL)\n"); return nullptr; } - rtc::scoped_refptr buffer( - ReadI420Buffer(width_, height_, input_file_)); - if (!buffer && ferror(input_file_)) { - fprintf(stderr, "Error reading from input file: %s\n", - input_filename_.c_str()); + + rtc::scoped_refptr buffer; + + do { + if (current_frame_index_ >= number_of_frames_) { + switch (repeat_mode_) { + case RepeatMode::kSingle: + return nullptr; + case RepeatMode::kRepeat: + fseek(input_file_, 0, SEEK_SET); + current_frame_index_ = 0; + break; + case RepeatMode::kPingPong: + if (current_frame_index_ == number_of_frames_ * 2) { + fseek(input_file_, 0, SEEK_SET); + current_frame_index_ = 0; + } else { + int reverse_frame_index = current_frame_index_ - number_of_frames_; + int seek_frame_pos = (number_of_frames_ - reverse_frame_index - 1); + fseek(input_file_, seek_frame_pos * frame_size_bytes_, SEEK_SET); + } + break; + } + } + ++current_frame_index_; + + buffer = ReadI420Buffer(input_width_, input_height_, input_file_); + if (!buffer && ferror(input_file_)) { + fprintf(stderr, "Error reading from input file: %s\n", + input_filename_.c_str()); + } + } while (dropper_ && + dropper_->UpdateLevel() == DropperUtil::DropDecision::kDropframe); + + if (input_width_ == desired_width_ && input_height_ == desired_height_) { + return buffer; } - return buffer; + + rtc::scoped_refptr rescaled_buffer( + I420Buffer::Create(desired_width_, desired_height_)); + rescaled_buffer->ScaleFrom(*buffer.get()); + + return rescaled_buffer; } void YuvFrameReaderImpl::Close() { diff --git a/test/testsupport/yuv_frame_writer.cc b/test/testsupport/yuv_frame_writer.cc index 24f4f53133..59cb74ebeb 100644 --- a/test/testsupport/yuv_frame_writer.cc +++ b/test/testsupport/yuv_frame_writer.cc @@ -50,7 +50,7 @@ bool YuvFrameWriterImpl::Init() { return true; } -bool YuvFrameWriterImpl::WriteFrame(uint8_t* frame_buffer) { +bool YuvFrameWriterImpl::WriteFrame(const uint8_t* frame_buffer) { RTC_DCHECK(frame_buffer); if (output_file_ == nullptr) { fprintf(stderr, diff --git a/test/time_controller/BUILD.gn b/test/time_controller/BUILD.gn index 7f77f0afec..6c13a99648 100644 --- a/test/time_controller/BUILD.gn +++ b/test/time_controller/BUILD.gn @@ -26,6 +26,7 @@ rtc_library("time_controller") { ] deps = [ + "../../api:sequence_checker", "../../api:time_controller", "../../api/task_queue", "../../api/task_queue:default_task_queue_factory", @@ -35,14 +36,15 @@ rtc_library("time_controller") { "../../modules/utility:utility", "../../rtc_base", "../../rtc_base:checks", + "../../rtc_base:null_socket_server", "../../rtc_base:rtc_base_tests_utils", "../../rtc_base:rtc_event", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", "../../rtc_base/synchronization:yield_policy", "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers", - "//third_party/abseil-cpp/absl/strings", ] + absl_deps = [ "//third_party/abseil-cpp/absl/strings" ] } if (rtc_include_tests) { @@ -51,13 +53,20 @@ if (rtc_include_tests) { sources = [ "external_time_controller_unittest.cc", "simulated_time_controller_unittest.cc", + "time_controller_conformance_test.cc", ] deps = [ ":time_controller", "../:test_support", + "../../api:time_controller", + "../../api/units:time_delta", + "../../rtc_base", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_task_queue", + "../../rtc_base:threading", + "../../rtc_base/synchronization:mutex", "../../rtc_base/task_utils:repeating_task", + "../../rtc_base/task_utils:to_queued_task", ] } } diff --git a/test/time_controller/real_time_controller.cc b/test/time_controller/real_time_controller.cc index 73772b968f..2e741cf20c 100644 --- a/test/time_controller/real_time_controller.cc +++ b/test/time_controller/real_time_controller.cc @@ -11,7 +11,6 @@ #include "api/task_queue/default_task_queue_factory.h" #include "rtc_base/null_socket_server.h" -#include "system_wrappers/include/sleep.h" namespace webrtc { namespace { diff --git a/test/time_controller/simulated_process_thread.cc b/test/time_controller/simulated_process_thread.cc index df90f54ed6..e001841ac0 100644 --- a/test/time_controller/simulated_process_thread.cc +++ b/test/time_controller/simulated_process_thread.cc @@ -39,7 +39,7 @@ SimulatedProcessThread::~SimulatedProcessThread() { void SimulatedProcessThread::RunReady(Timestamp at_time) { CurrentTaskQueueSetter set_current(this); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); std::vector ready_modules; for (auto it = delayed_modules_.begin(); it != delayed_modules_.end() && it->first <= at_time; @@ -63,10 +63,10 @@ void SimulatedProcessThread::RunReady(Timestamp at_time) { while (!queue_.empty()) { std::unique_ptr task = std::move(queue_.front()); queue_.pop_front(); - lock_.Leave(); + lock_.Unlock(); bool should_delete = task->Run(); RTC_CHECK(should_delete); - lock_.Enter(); + lock_.Lock(); } RTC_DCHECK(queue_.empty()); if (!delayed_modules_.empty()) { @@ -81,7 +81,7 @@ void SimulatedProcessThread::RunReady(Timestamp at_time) { void SimulatedProcessThread::Start() { std::vector starting; { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (process_thread_running_) return; process_thread_running_ = true; @@ -91,7 +91,7 @@ void SimulatedProcessThread::Start() { module->ProcessThreadAttached(this); Timestamp at_time = handler_->CurrentTime(); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); for (auto& module : starting) delayed_modules_[GetNextTime(module, at_time)].push_back(module); @@ -107,7 +107,7 @@ void SimulatedProcessThread::Start() { void SimulatedProcessThread::Stop() { std::vector stopping; { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); process_thread_running_ = false; for (auto& delayed : delayed_modules_) { @@ -123,7 +123,7 @@ void SimulatedProcessThread::Stop() { } void SimulatedProcessThread::WakeUp(Module* module) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); for (auto it = delayed_modules_.begin(); it != delayed_modules_.end(); ++it) { if (RemoveByValue(&it->second, module)) break; @@ -136,7 +136,7 @@ void SimulatedProcessThread::WakeUp(Module* module) { void SimulatedProcessThread::RegisterModule(Module* module, const rtc::Location& from) { module->ProcessThreadAttached(this); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (!process_thread_running_) { stopped_modules_.push_back(module); } else { @@ -149,7 +149,7 @@ void SimulatedProcessThread::RegisterModule(Module* module, void SimulatedProcessThread::DeRegisterModule(Module* module) { bool modules_running; { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (!process_thread_running_) { RemoveByValue(&stopped_modules_, module); } else { @@ -165,14 +165,14 @@ void SimulatedProcessThread::DeRegisterModule(Module* module) { } void SimulatedProcessThread::PostTask(std::unique_ptr task) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); queue_.emplace_back(std::move(task)); next_run_time_ = Timestamp::MinusInfinity(); } void SimulatedProcessThread::PostDelayedTask(std::unique_ptr task, uint32_t milliseconds) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); Timestamp target_time = handler_->CurrentTime() + TimeDelta::Millis(milliseconds); delayed_tasks_[target_time].push_back(std::move(task)); diff --git a/test/time_controller/simulated_process_thread.h b/test/time_controller/simulated_process_thread.h index 6026826d2f..54d5db7df8 100644 --- a/test/time_controller/simulated_process_thread.h +++ b/test/time_controller/simulated_process_thread.h @@ -16,6 +16,7 @@ #include #include +#include "rtc_base/synchronization/mutex.h" #include "test/time_controller/simulated_time_controller.h" namespace webrtc { @@ -29,7 +30,7 @@ class SimulatedProcessThread : public ProcessThread, void RunReady(Timestamp at_time) override; Timestamp GetNextRunTime() const override { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); return next_run_time_; } @@ -55,7 +56,7 @@ class SimulatedProcessThread : public ProcessThread, sim_time_impl::SimulatedTimeControllerImpl* const handler_; // Using char* to be debugger friendly. char* name_; - rtc::CriticalSection lock_; + mutable Mutex lock_; Timestamp next_run_time_ RTC_GUARDED_BY(lock_) = Timestamp::PlusInfinity(); std::deque> queue_; diff --git a/test/time_controller/simulated_task_queue.cc b/test/time_controller/simulated_task_queue.cc index 6bc96c73b9..da675af81e 100644 --- a/test/time_controller/simulated_task_queue.cc +++ b/test/time_controller/simulated_task_queue.cc @@ -27,16 +27,22 @@ SimulatedTaskQueue::~SimulatedTaskQueue() { } void SimulatedTaskQueue::Delete() { + // Need to destroy the tasks outside of the lock because task destruction + // can lead to re-entry in SimulatedTaskQueue via custom destructors. + std::deque> ready_tasks; + std::map>> delayed_tasks; { - rtc::CritScope lock(&lock_); - ready_tasks_.clear(); - delayed_tasks_.clear(); + MutexLock lock(&lock_); + ready_tasks_.swap(ready_tasks); + delayed_tasks_.swap(delayed_tasks); } + ready_tasks.clear(); + delayed_tasks.clear(); delete this; } void SimulatedTaskQueue::RunReady(Timestamp at_time) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); for (auto it = delayed_tasks_.begin(); it != delayed_tasks_.end() && it->first <= at_time; it = delayed_tasks_.erase(it)) { @@ -48,14 +54,14 @@ void SimulatedTaskQueue::RunReady(Timestamp at_time) { while (!ready_tasks_.empty()) { std::unique_ptr ready = std::move(ready_tasks_.front()); ready_tasks_.pop_front(); - lock_.Leave(); + lock_.Unlock(); bool delete_task = ready->Run(); if (delete_task) { ready.reset(); } else { ready.release(); } - lock_.Enter(); + lock_.Lock(); } if (!delayed_tasks_.empty()) { next_run_time_ = delayed_tasks_.begin()->first; @@ -65,14 +71,14 @@ void SimulatedTaskQueue::RunReady(Timestamp at_time) { } void SimulatedTaskQueue::PostTask(std::unique_ptr task) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); ready_tasks_.emplace_back(std::move(task)); next_run_time_ = Timestamp::MinusInfinity(); } void SimulatedTaskQueue::PostDelayedTask(std::unique_ptr task, uint32_t milliseconds) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); Timestamp target_time = handler_->CurrentTime() + TimeDelta::Millis(milliseconds); delayed_tasks_[target_time].push_back(std::move(task)); diff --git a/test/time_controller/simulated_task_queue.h b/test/time_controller/simulated_task_queue.h index 940117c85b..5035f799fc 100644 --- a/test/time_controller/simulated_task_queue.h +++ b/test/time_controller/simulated_task_queue.h @@ -15,6 +15,7 @@ #include #include +#include "rtc_base/synchronization/mutex.h" #include "test/time_controller/simulated_time_controller.h" namespace webrtc { @@ -30,7 +31,7 @@ class SimulatedTaskQueue : public TaskQueueBase, void RunReady(Timestamp at_time) override; Timestamp GetNextRunTime() const override { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); return next_run_time_; } TaskQueueBase* GetAsTaskQueue() override { return this; } @@ -46,7 +47,7 @@ class SimulatedTaskQueue : public TaskQueueBase, // Using char* to be debugger friendly. char* name_; - rtc::CriticalSection lock_; + mutable Mutex lock_; std::deque> ready_tasks_ RTC_GUARDED_BY(lock_); std::map>> delayed_tasks_ diff --git a/test/time_controller/simulated_thread.cc b/test/time_controller/simulated_thread.cc index 8d1637c352..aa8b9ac90d 100644 --- a/test/time_controller/simulated_thread.cc +++ b/test/time_controller/simulated_thread.cc @@ -59,7 +59,7 @@ void SimulatedThread::RunReady(Timestamp at_time) { CurrentThreadSetter set_current(this); ProcessMessages(0); int delay_ms = GetDelay(); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); if (delay_ms == kForever) { next_run_time_ = Timestamp::PlusInfinity(); } else { @@ -83,6 +83,7 @@ void SimulatedThread::Send(const rtc::Location& posted_from, } else { TaskQueueBase* yielding_from = TaskQueueBase::Current(); handler_->StartYield(yielding_from); + RunReady(Timestamp::MinusInfinity()); CurrentThreadSetter set_current(this); msg.phandler->OnMessage(&msg); handler_->StopYield(yielding_from); @@ -95,7 +96,7 @@ void SimulatedThread::Post(const rtc::Location& posted_from, rtc::MessageData* pdata, bool time_sensitive) { rtc::Thread::Post(posted_from, phandler, id, pdata, time_sensitive); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); next_run_time_ = Timestamp::MinusInfinity(); } @@ -105,7 +106,7 @@ void SimulatedThread::PostDelayed(const rtc::Location& posted_from, uint32_t id, rtc::MessageData* pdata) { rtc::Thread::PostDelayed(posted_from, delay_ms, phandler, id, pdata); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); next_run_time_ = std::min(next_run_time_, Timestamp::Millis(rtc::TimeMillis() + delay_ms)); } @@ -116,7 +117,7 @@ void SimulatedThread::PostAt(const rtc::Location& posted_from, uint32_t id, rtc::MessageData* pdata) { rtc::Thread::PostAt(posted_from, target_time_ms, phandler, id, pdata); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); next_run_time_ = std::min(next_run_time_, Timestamp::Millis(target_time_ms)); } diff --git a/test/time_controller/simulated_thread.h b/test/time_controller/simulated_thread.h index fd3969670a..b6c1e6e265 100644 --- a/test/time_controller/simulated_thread.h +++ b/test/time_controller/simulated_thread.h @@ -12,6 +12,7 @@ #include +#include "rtc_base/synchronization/mutex.h" #include "test/time_controller/simulated_time_controller.h" namespace webrtc { @@ -28,7 +29,7 @@ class SimulatedThread : public rtc::Thread, void RunReady(Timestamp at_time) override; Timestamp GetNextRunTime() const override { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); return next_run_time_; } @@ -61,7 +62,7 @@ class SimulatedThread : public rtc::Thread, sim_time_impl::SimulatedTimeControllerImpl* const handler_; // Using char* to be debugger friendly. char* name_; - rtc::CriticalSection lock_; + mutable Mutex lock_; Timestamp next_run_time_ RTC_GUARDED_BY(lock_) = Timestamp::PlusInfinity(); }; diff --git a/test/time_controller/simulated_time_controller.cc b/test/time_controller/simulated_time_controller.cc index 769be3ff78..a34abe8ced 100644 --- a/test/time_controller/simulated_time_controller.cc +++ b/test/time_controller/simulated_time_controller.cc @@ -95,7 +95,7 @@ void SimulatedTimeControllerImpl::RunReadyRunners() { // Using a dummy thread rather than nullptr to avoid implicit thread creation // by Thread::Current(). SimulatedThread::CurrentThreadSetter set_current(dummy_thread_.get()); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); RTC_DCHECK_EQ(rtc::CurrentThreadId(), thread_id_); Timestamp current_time = CurrentTime(); // Clearing |ready_runners_| in case this is a recursive call: @@ -116,25 +116,25 @@ void SimulatedTimeControllerImpl::RunReadyRunners() { while (!ready_runners_.empty()) { auto* runner = ready_runners_.front(); ready_runners_.pop_front(); - lock_.Leave(); + lock_.Unlock(); // Note that the RunReady function might indirectly cause a call to // Unregister() which will grab |lock_| again to remove items from // |ready_runners_|. runner->RunReady(current_time); - lock_.Enter(); + lock_.Lock(); } } } Timestamp SimulatedTimeControllerImpl::CurrentTime() const { - rtc::CritScope lock(&time_lock_); + MutexLock lock(&time_lock_); return current_time_; } Timestamp SimulatedTimeControllerImpl::NextRunTime() const { Timestamp current_time = CurrentTime(); Timestamp next_time = Timestamp::PlusInfinity(); - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); for (auto* runner : runners_) { Timestamp next_run_time = runner->GetNextRunTime(); if (next_run_time <= current_time) @@ -145,18 +145,18 @@ Timestamp SimulatedTimeControllerImpl::NextRunTime() const { } void SimulatedTimeControllerImpl::AdvanceTime(Timestamp target_time) { - rtc::CritScope time_lock(&time_lock_); + MutexLock time_lock(&time_lock_); RTC_DCHECK_GE(target_time, current_time_); current_time_ = target_time; } void SimulatedTimeControllerImpl::Register(SimulatedSequenceRunner* runner) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); runners_.push_back(runner); } void SimulatedTimeControllerImpl::Unregister(SimulatedSequenceRunner* runner) { - rtc::CritScope lock(&lock_); + MutexLock lock(&lock_); bool removed = RemoveByValue(&runners_, runner); RTC_CHECK(removed); RemoveByValue(&ready_runners_, runner); @@ -226,4 +226,14 @@ void GlobalSimulatedTimeController::AdvanceTime(TimeDelta duration) { impl_.RunReadyRunners(); } +void GlobalSimulatedTimeController::Register( + sim_time_impl::SimulatedSequenceRunner* runner) { + impl_.Register(runner); +} + +void GlobalSimulatedTimeController::Unregister( + sim_time_impl::SimulatedSequenceRunner* runner) { + impl_.Unregister(runner); +} + } // namespace webrtc diff --git a/test/time_controller/simulated_time_controller.h b/test/time_controller/simulated_time_controller.h index 48112b3a31..9ded4689de 100644 --- a/test/time_controller/simulated_time_controller.h +++ b/test/time_controller/simulated_time_controller.h @@ -17,15 +17,15 @@ #include #include "absl/strings/string_view.h" +#include "api/sequence_checker.h" #include "api/test/time_controller.h" #include "api/units/timestamp.h" #include "modules/include/module.h" #include "modules/utility/include/process_thread.h" -#include "rtc_base/critical_section.h" #include "rtc_base/fake_clock.h" #include "rtc_base/platform_thread_types.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/synchronization/yield_policy.h" -#include "rtc_base/thread_checker.h" namespace webrtc { namespace sim_time_impl { @@ -89,9 +89,9 @@ class SimulatedTimeControllerImpl : public TaskQueueFactory, private: const rtc::PlatformThreadId thread_id_; const std::unique_ptr dummy_thread_ = rtc::Thread::Create(); - rtc::CriticalSection time_lock_; + mutable Mutex time_lock_; Timestamp current_time_ RTC_GUARDED_BY(time_lock_); - rtc::CriticalSection lock_; + mutable Mutex lock_; std::vector runners_ RTC_GUARDED_BY(lock_); // Used in RunReadyRunners() to keep track of ready runners that are to be // processed in a round robin fashion. the reason it's a member is so that @@ -140,6 +140,17 @@ class GlobalSimulatedTimeController : public TimeController { void AdvanceTime(TimeDelta duration) override; + // Makes the simulated time controller aware of a custom + // SimulatedSequenceRunner. + // TODO(bugs.webrtc.org/11581): remove method once the ModuleRtpRtcpImpl2 unit + // test stops using it. + void Register(sim_time_impl::SimulatedSequenceRunner* runner); + // Removes a previously installed custom SimulatedSequenceRunner from the + // simulated time controller. + // TODO(bugs.webrtc.org/11581): remove method once the ModuleRtpRtcpImpl2 unit + // test stops using it. + void Unregister(sim_time_impl::SimulatedSequenceRunner* runner); + private: rtc::ScopedBaseFakeClock global_clock_; // Provides simulated CurrentNtpInMilliseconds() diff --git a/test/time_controller/time_controller_conformance_test.cc b/test/time_controller/time_controller_conformance_test.cc new file mode 100644 index 0000000000..3d582cad8e --- /dev/null +++ b/test/time_controller/time_controller_conformance_test.cc @@ -0,0 +1,184 @@ +/* + * Copyright 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "api/test/time_controller.h" +#include "api/units/time_delta.h" +#include "rtc_base/event.h" +#include "rtc_base/location.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/thread.h" +#include "rtc_base/thread_annotations.h" +#include "test/gmock.h" +#include "test/gtest.h" +#include "test/time_controller/real_time_controller.h" +#include "test/time_controller/simulated_time_controller.h" + +namespace webrtc { +namespace { + +using ::testing::ElementsAreArray; +using ::testing::TestParamInfo; +using ::testing::TestWithParam; +using ::testing::Values; + +enum class TimeMode { kRealTime, kSimulated }; + +std::unique_ptr CreateTimeController(TimeMode mode) { + switch (mode) { + case TimeMode::kRealTime: + return std::make_unique(); + case TimeMode::kSimulated: + // Using an offset of 100000 to get nice fixed width and readable + // timestamps in typical test scenarios. + constexpr Timestamp kSimulatedStartTime = Timestamp::Seconds(100000); + return std::make_unique( + kSimulatedStartTime); + } +} + +std::string ParamsToString(const TestParamInfo& param) { + switch (param.param) { + case webrtc::TimeMode::kRealTime: + return "RealTime"; + case webrtc::TimeMode::kSimulated: + return "SimulatedTime"; + default: + RTC_NOTREACHED() << "Time mode not supported"; + } +} + +// Keeps order of executions. May be called from different threads. +class ExecutionOrderKeeper { + public: + void Executed(int execution_id) { + MutexLock lock(&mutex_); + order_.push_back(execution_id); + } + + std::vector order() const { + MutexLock lock(&mutex_); + return order_; + } + + private: + mutable Mutex mutex_; + std::vector order_ RTC_GUARDED_BY(mutex_); +}; + +// Tests conformance between real time and simulated time time controller. +class SimulatedRealTimeControllerConformanceTest + : public TestWithParam {}; + +TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostOrderTest) { + std::unique_ptr time_controller = + CreateTimeController(GetParam()); + std::unique_ptr thread = time_controller->CreateThread("thread"); + + // Tasks on thread have to be executed in order in which they were + // posted. + ExecutionOrderKeeper execution_order; + thread->PostTask(RTC_FROM_HERE, [&]() { execution_order.Executed(1); }); + thread->PostTask(RTC_FROM_HERE, [&]() { execution_order.Executed(2); }); + time_controller->AdvanceTime(TimeDelta::Millis(100)); + EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; +} + +TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostDelayedOrderTest) { + std::unique_ptr time_controller = + CreateTimeController(GetParam()); + std::unique_ptr thread = time_controller->CreateThread("thread"); + + ExecutionOrderKeeper execution_order; + thread->PostDelayedTask(ToQueuedTask([&]() { execution_order.Executed(2); }), + /*milliseconds=*/500); + thread->PostTask(ToQueuedTask([&]() { execution_order.Executed(1); })); + time_controller->AdvanceTime(TimeDelta::Millis(600)); + EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; +} + +TEST_P(SimulatedRealTimeControllerConformanceTest, ThreadPostInvokeOrderTest) { + std::unique_ptr time_controller = + CreateTimeController(GetParam()); + std::unique_ptr thread = time_controller->CreateThread("thread"); + + // Tasks on thread have to be executed in order in which they were + // posted/invoked. + ExecutionOrderKeeper execution_order; + thread->PostTask(RTC_FROM_HERE, [&]() { execution_order.Executed(1); }); + thread->Invoke(RTC_FROM_HERE, [&]() { execution_order.Executed(2); }); + time_controller->AdvanceTime(TimeDelta::Millis(100)); + EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; +} + +TEST_P(SimulatedRealTimeControllerConformanceTest, + ThreadPostInvokeFromThreadOrderTest) { + std::unique_ptr time_controller = + CreateTimeController(GetParam()); + std::unique_ptr thread = time_controller->CreateThread("thread"); + + // If task is invoked from thread X on thread X it has to be executed + // immediately. + ExecutionOrderKeeper execution_order; + thread->PostTask(RTC_FROM_HERE, [&]() { + thread->PostTask(RTC_FROM_HERE, [&]() { execution_order.Executed(2); }); + thread->Invoke(RTC_FROM_HERE, [&]() { execution_order.Executed(1); }); + }); + time_controller->AdvanceTime(TimeDelta::Millis(100)); + EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `thread` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + thread = nullptr; +} + +TEST_P(SimulatedRealTimeControllerConformanceTest, + TaskQueuePostEventWaitOrderTest) { + std::unique_ptr time_controller = + CreateTimeController(GetParam()); + auto task_queue = time_controller->GetTaskQueueFactory()->CreateTaskQueue( + "task_queue", webrtc::TaskQueueFactory::Priority::NORMAL); + + // Tasks on thread have to be executed in order in which they were + // posted/invoked. + ExecutionOrderKeeper execution_order; + rtc::Event event; + task_queue->PostTask(ToQueuedTask([&]() { execution_order.Executed(1); })); + task_queue->PostTask(ToQueuedTask([&]() { + execution_order.Executed(2); + event.Set(); + })); + EXPECT_TRUE(event.Wait(/*give_up_after_ms=*/100, + /*warn_after_ms=*/10'000)); + time_controller->AdvanceTime(TimeDelta::Millis(100)); + EXPECT_THAT(execution_order.order(), ElementsAreArray({1, 2})); + // Destroy `task_queue` before `execution_order` to be sure `execution_order` + // is not accessed on the posted task after it is destroyed. + task_queue = nullptr; +} + +INSTANTIATE_TEST_SUITE_P(ConformanceTest, + SimulatedRealTimeControllerConformanceTest, + Values(TimeMode::kRealTime, TimeMode::kSimulated), + ParamsToString); + +} // namespace +} // namespace webrtc diff --git a/test/video_codec_settings.h b/test/video_codec_settings.h index b5250486d7..82c82cd7e1 100644 --- a/test/video_codec_settings.h +++ b/test/video_codec_settings.h @@ -25,9 +25,7 @@ const int64_t kTestTimingFramesDelayMs = 200; const uint16_t kTestOutlierFrameSizePercent = 250; static void CodecSettings(VideoCodecType codec_type, VideoCodec* settings) { - memset(settings, 0, sizeof(VideoCodec)); - - settings->plType = kTestPayloadType; + *settings = {}; settings->width = kTestWidth; settings->height = kTestHeight; diff --git a/test/video_decoder_proxy_factory.h b/test/video_decoder_proxy_factory.h index 84552e39a4..303d209dbd 100644 --- a/test/video_decoder_proxy_factory.h +++ b/test/video_decoder_proxy_factory.h @@ -61,7 +61,9 @@ class VideoDecoderProxyFactory final : public VideoDecoderFactory { return decoder_->RegisterDecodeCompleteCallback(callback); } int32_t Release() override { return decoder_->Release(); } - bool PrefersLateDecoding() const { return decoder_->PrefersLateDecoding(); } + DecoderInfo GetDecoderInfo() const override { + return decoder_->GetDecoderInfo(); + } const char* ImplementationName() const override { return decoder_->ImplementationName(); } diff --git a/test/video_encoder_proxy_factory.h b/test/video_encoder_proxy_factory.h index 70e2c8aaf2..7c412bacfa 100644 --- a/test/video_encoder_proxy_factory.h +++ b/test/video_encoder_proxy_factory.h @@ -38,7 +38,6 @@ class VideoEncoderProxyFactory final : public VideoEncoderFactory { encoder_selector_(encoder_selector), num_simultaneous_encoder_instances_(0), max_num_simultaneous_encoder_instances_(0) { - codec_info_.is_hardware_accelerated = false; codec_info_.has_internal_source = false; } @@ -70,9 +69,6 @@ class VideoEncoderProxyFactory final : public VideoEncoderFactory { return nullptr; } - void SetIsHardwareAccelerated(bool is_hardware_accelerated) { - codec_info_.is_hardware_accelerated = is_hardware_accelerated; - } void SetHasInternalSource(bool has_internal_source) { codec_info_.has_internal_source = has_internal_source; } diff --git a/tools_webrtc/BUILD.gn b/tools_webrtc/BUILD.gn new file mode 100644 index 0000000000..ee9a734107 --- /dev/null +++ b/tools_webrtc/BUILD.gn @@ -0,0 +1,18 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +if (target_os == "android") { + action("binary_version_check") { + testonly = true + script = "binary_version_check.py" + deps = [ "../sdk/android:libjingle_peerconnection_so" ] + inputs = [ "$root_out_dir/libjingle_peerconnection_so.so" ] + outputs = [ "$root_out_dir/webrtc_binary_version_check" ] + args = [ "libjingle_peerconnection_so.so" ] + } +} diff --git a/tools_webrtc/PRESUBMIT.py b/tools_webrtc/PRESUBMIT.py index 80e20a348a..27f8bb10d2 100644 --- a/tools_webrtc/PRESUBMIT.py +++ b/tools_webrtc/PRESUBMIT.py @@ -8,39 +8,43 @@ def _LicenseHeader(input_api): - """Returns the license header regexp.""" - # Accept any year number from 2003 to the current year - current_year = int(input_api.time.strftime('%Y')) - allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) - years_re = '(' + '|'.join(allowed_years) + ')' - license_header = ( - r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' + """Returns the license header regexp.""" + # Accept any year number from 2003 to the current year + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. ' r'All [Rr]ights [Rr]eserved\.\n' - r'.*?\n' - r'.*? Use of this source code is governed by a BSD-style license\n' - r'.*? that can be found in the LICENSE file in the root of the source\n' - r'.*? tree\. An additional intellectual property rights grant can be ' + r'.*?\n' + r'.*? Use of this source code is governed by a BSD-style license\n' + r'.*? that can be found in the LICENSE file in the root of the source\n' + r'.*? tree\. An additional intellectual property rights grant can be ' r'found\n' - r'.*? in the file PATENTS\. All contributing project authors may\n' - r'.*? be found in the AUTHORS file in the root of the source tree\.\n' - ) % { - 'year': years_re, - } - return license_header + r'.*? in the file PATENTS\. All contributing project authors may\n' + r'.*? be found in the AUTHORS file in the root of the source tree\.\n' + ) % { + 'year': years_re, + } + return license_header + def _CommonChecks(input_api, output_api): - """Checks common to both upload and commit.""" - results = [] - results.extend(input_api.canned_checks.CheckLicense( - input_api, output_api, _LicenseHeader(input_api))) - return results + """Checks common to both upload and commit.""" + results = [] + results.extend( + input_api.canned_checks.CheckLicense(input_api, output_api, + _LicenseHeader(input_api))) + return results + def CheckChangeOnUpload(input_api, output_api): - results = [] - results.extend(_CommonChecks(input_api, output_api)) - return results + results = [] + results.extend(_CommonChecks(input_api, output_api)) + return results + def CheckChangeOnCommit(input_api, output_api): - results = [] - results.extend(_CommonChecks(input_api, output_api)) - return results + results = [] + results.extend(_CommonChecks(input_api, output_api)) + return results diff --git a/tools_webrtc/android/OWNERS b/tools_webrtc/android/OWNERS index 3c4e54174e..cf092a316a 100644 --- a/tools_webrtc/android/OWNERS +++ b/tools_webrtc/android/OWNERS @@ -1 +1 @@ -sakal@webrtc.org +xalep@webrtc.org diff --git a/tools_webrtc/android/build_aar.py b/tools_webrtc/android/build_aar.py index 81e545d11c..9fc4bb0f39 100755 --- a/tools_webrtc/android/build_aar.py +++ b/tools_webrtc/android/build_aar.py @@ -7,7 +7,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Script to generate libwebrtc.aar for distribution. The script has to be run from the root src folder. @@ -33,7 +32,6 @@ import tempfile import zipfile - SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir)) DEFAULT_ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64'] @@ -41,8 +39,8 @@ JAR_FILE = 'lib.java/sdk/android/libwebrtc.jar' MANIFEST_FILE = 'sdk/android/AndroidManifest.xml' TARGETS = [ - 'sdk/android:libwebrtc', - 'sdk/android:libjingle_peerconnection_so', + 'sdk/android:libwebrtc', + 'sdk/android:libjingle_peerconnection_so', ] sys.path.append(os.path.join(SCRIPT_DIR, '..', 'libs')) @@ -52,183 +50,211 @@ import find_depot_tools - def _ParseArgs(): - parser = argparse.ArgumentParser(description='libwebrtc.aar generator.') - parser.add_argument('--build-dir', - help='Build dir. By default will create and use temporary dir.') - parser.add_argument('--output', default='libwebrtc.aar', - help='Output file of the script.') - parser.add_argument('--arch', default=DEFAULT_ARCHS, nargs='*', - help='Architectures to build. Defaults to %(default)s.') - parser.add_argument('--use-goma', action='store_true', default=False, - help='Use goma.') - parser.add_argument('--verbose', action='store_true', default=False, - help='Debug logging.') - parser.add_argument('--extra-gn-args', default=[], nargs='*', - help="""Additional GN arguments to be used during Ninja generation. + parser = argparse.ArgumentParser(description='libwebrtc.aar generator.') + parser.add_argument( + '--build-dir', + type=os.path.abspath, + help='Build dir. By default will create and use temporary dir.') + parser.add_argument('--output', + default='libwebrtc.aar', + type=os.path.abspath, + help='Output file of the script.') + parser.add_argument( + '--arch', + default=DEFAULT_ARCHS, + nargs='*', + help='Architectures to build. Defaults to %(default)s.') + parser.add_argument('--use-goma', + action='store_true', + default=False, + help='Use goma.') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + parser.add_argument( + '--extra-gn-args', + default=[], + nargs='*', + help="""Additional GN arguments to be used during Ninja generation. These are passed to gn inside `--args` switch and applied after any other arguments and will override any values defined by the script. Example of building debug aar file: build_aar.py --extra-gn-args='is_debug=true'""") - parser.add_argument('--extra-ninja-switches', default=[], nargs='*', - help="""Additional Ninja switches to be used during compilation. + parser.add_argument( + '--extra-ninja-switches', + default=[], + nargs='*', + help="""Additional Ninja switches to be used during compilation. These are applied after any other Ninja switches. Example of enabling verbose Ninja output: build_aar.py --extra-ninja-switches='-v'""") - parser.add_argument('--extra-gn-switches', default=[], nargs='*', - help="""Additional GN switches to be used during compilation. + parser.add_argument( + '--extra-gn-switches', + default=[], + nargs='*', + help="""Additional GN switches to be used during compilation. These are applied after any other GN switches. Example of enabling verbose GN output: build_aar.py --extra-gn-switches='-v'""") - return parser.parse_args() + return parser.parse_args() def _RunGN(args): - cmd = [sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py')] - cmd.extend(args) - logging.debug('Running: %r', cmd) - subprocess.check_call(cmd) + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') + ] + cmd.extend(args) + logging.debug('Running: %r', cmd) + subprocess.check_call(cmd) def _RunNinja(output_directory, args): - cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), - '-C', output_directory] - cmd.extend(args) - logging.debug('Running: %r', cmd) - subprocess.check_call(cmd) + cmd = [ + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), '-C', + output_directory + ] + cmd.extend(args) + logging.debug('Running: %r', cmd) + subprocess.check_call(cmd) def _EncodeForGN(value): - """Encodes value as a GN literal.""" - if isinstance(value, str): - return '"' + value + '"' - elif isinstance(value, bool): - return repr(value).lower() - else: - return repr(value) + """Encodes value as a GN literal.""" + if isinstance(value, str): + return '"' + value + '"' + elif isinstance(value, bool): + return repr(value).lower() + else: + return repr(value) def _GetOutputDirectory(build_dir, arch): - """Returns the GN output directory for the target architecture.""" - return os.path.join(build_dir, arch) + """Returns the GN output directory for the target architecture.""" + return os.path.join(build_dir, arch) def _GetTargetCpu(arch): - """Returns target_cpu for the GN build with the given architecture.""" - if arch in ['armeabi', 'armeabi-v7a']: - return 'arm' - elif arch == 'arm64-v8a': - return 'arm64' - elif arch == 'x86': - return 'x86' - elif arch == 'x86_64': - return 'x64' - else: - raise Exception('Unknown arch: ' + arch) + """Returns target_cpu for the GN build with the given architecture.""" + if arch in ['armeabi', 'armeabi-v7a']: + return 'arm' + elif arch == 'arm64-v8a': + return 'arm64' + elif arch == 'x86': + return 'x86' + elif arch == 'x86_64': + return 'x64' + else: + raise Exception('Unknown arch: ' + arch) def _GetArmVersion(arch): - """Returns arm_version for the GN build with the given architecture.""" - if arch == 'armeabi': - return 6 - elif arch == 'armeabi-v7a': - return 7 - elif arch in ['arm64-v8a', 'x86', 'x86_64']: - return None - else: - raise Exception('Unknown arch: ' + arch) + """Returns arm_version for the GN build with the given architecture.""" + if arch == 'armeabi': + return 6 + elif arch == 'armeabi-v7a': + return 7 + elif arch in ['arm64-v8a', 'x86', 'x86_64']: + return None + else: + raise Exception('Unknown arch: ' + arch) def Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches, extra_ninja_switches): - """Generates target architecture using GN and builds it using ninja.""" - logging.info('Building: %s', arch) - output_directory = _GetOutputDirectory(build_dir, arch) - gn_args = { - 'target_os': 'android', - 'is_debug': False, - 'is_component_build': False, - 'rtc_include_tests': False, - 'target_cpu': _GetTargetCpu(arch), - 'use_goma': use_goma - } - arm_version = _GetArmVersion(arch) - if arm_version: - gn_args['arm_version'] = arm_version - gn_args_str = '--args=' + ' '.join([ - k + '=' + _EncodeForGN(v) for k, v in gn_args.items()] + extra_gn_args) - - gn_args_list = ['gen', output_directory, gn_args_str] - gn_args_list.extend(extra_gn_switches) - _RunGN(gn_args_list) - - ninja_args = TARGETS[:] - if use_goma: - ninja_args.extend(['-j', '200']) - ninja_args.extend(extra_ninja_switches) - _RunNinja(output_directory, ninja_args) + """Generates target architecture using GN and builds it using ninja.""" + logging.info('Building: %s', arch) + output_directory = _GetOutputDirectory(build_dir, arch) + gn_args = { + 'target_os': 'android', + 'is_debug': False, + 'is_component_build': False, + 'rtc_include_tests': False, + 'target_cpu': _GetTargetCpu(arch), + 'use_goma': use_goma + } + arm_version = _GetArmVersion(arch) + if arm_version: + gn_args['arm_version'] = arm_version + gn_args_str = '--args=' + ' '.join( + [k + '=' + _EncodeForGN(v) + for k, v in gn_args.items()] + extra_gn_args) + + gn_args_list = ['gen', output_directory, gn_args_str] + gn_args_list.extend(extra_gn_switches) + _RunGN(gn_args_list) + + ninja_args = TARGETS[:] + if use_goma: + ninja_args.extend(['-j', '200']) + ninja_args.extend(extra_ninja_switches) + _RunNinja(output_directory, ninja_args) def CollectCommon(aar_file, build_dir, arch): - """Collects architecture independent files into the .aar-archive.""" - logging.info('Collecting common files.') - output_directory = _GetOutputDirectory(build_dir, arch) - aar_file.write(MANIFEST_FILE, 'AndroidManifest.xml') - aar_file.write(os.path.join(output_directory, JAR_FILE), 'classes.jar') + """Collects architecture independent files into the .aar-archive.""" + logging.info('Collecting common files.') + output_directory = _GetOutputDirectory(build_dir, arch) + aar_file.write(MANIFEST_FILE, 'AndroidManifest.xml') + aar_file.write(os.path.join(output_directory, JAR_FILE), 'classes.jar') def Collect(aar_file, build_dir, arch): - """Collects architecture specific files into the .aar-archive.""" - logging.info('Collecting: %s', arch) - output_directory = _GetOutputDirectory(build_dir, arch) + """Collects architecture specific files into the .aar-archive.""" + logging.info('Collecting: %s', arch) + output_directory = _GetOutputDirectory(build_dir, arch) - abi_dir = os.path.join('jni', arch) - for so_file in NEEDED_SO_FILES: - aar_file.write(os.path.join(output_directory, so_file), - os.path.join(abi_dir, so_file)) + abi_dir = os.path.join('jni', arch) + for so_file in NEEDED_SO_FILES: + aar_file.write(os.path.join(output_directory, so_file), + os.path.join(abi_dir, so_file)) def GenerateLicenses(output_dir, build_dir, archs): - builder = LicenseBuilder( - [_GetOutputDirectory(build_dir, arch) for arch in archs], TARGETS) - builder.GenerateLicenseText(output_dir) + builder = LicenseBuilder( + [_GetOutputDirectory(build_dir, arch) for arch in archs], TARGETS) + builder.GenerateLicenseText(output_dir) -def BuildAar(archs, output_file, use_goma=False, extra_gn_args=None, - ext_build_dir=None, extra_gn_switches=None, +def BuildAar(archs, + output_file, + use_goma=False, + extra_gn_args=None, + ext_build_dir=None, + extra_gn_switches=None, extra_ninja_switches=None): - extra_gn_args = extra_gn_args or [] - extra_gn_switches = extra_gn_switches or [] - extra_ninja_switches = extra_ninja_switches or [] - build_dir = ext_build_dir if ext_build_dir else tempfile.mkdtemp() - - for arch in archs: - Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches, - extra_ninja_switches) - - with zipfile.ZipFile(output_file, 'w') as aar_file: - # Architecture doesn't matter here, arbitrarily using the first one. - CollectCommon(aar_file, build_dir, archs[0]) + extra_gn_args = extra_gn_args or [] + extra_gn_switches = extra_gn_switches or [] + extra_ninja_switches = extra_ninja_switches or [] + build_dir = ext_build_dir if ext_build_dir else tempfile.mkdtemp() + for arch in archs: - Collect(aar_file, build_dir, arch) + Build(build_dir, arch, use_goma, extra_gn_args, extra_gn_switches, + extra_ninja_switches) + + with zipfile.ZipFile(output_file, 'w') as aar_file: + # Architecture doesn't matter here, arbitrarily using the first one. + CollectCommon(aar_file, build_dir, archs[0]) + for arch in archs: + Collect(aar_file, build_dir, arch) - license_dir = os.path.dirname(os.path.realpath(output_file)) - GenerateLicenses(license_dir, build_dir, archs) + license_dir = os.path.dirname(os.path.realpath(output_file)) + GenerateLicenses(license_dir, build_dir, archs) - if not ext_build_dir: - shutil.rmtree(build_dir, True) + if not ext_build_dir: + shutil.rmtree(build_dir, True) def main(): - args = _ParseArgs() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + args = _ParseArgs() + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - BuildAar(args.arch, args.output, args.use_goma, args.extra_gn_args, - args.build_dir, args.extra_gn_switches, args.extra_ninja_switches) + BuildAar(args.arch, args.output, args.use_goma, args.extra_gn_args, + args.build_dir, args.extra_gn_switches, args.extra_ninja_switches) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/android/release_aar.py b/tools_webrtc/android/release_aar.py index 1ac21b4280..bc7f471ae3 100644 --- a/tools_webrtc/android/release_aar.py +++ b/tools_webrtc/android/release_aar.py @@ -7,7 +7,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Script for publishing WebRTC AAR on Bintray. Set BINTRAY_USER and BINTRAY_API_KEY environment variables before running @@ -25,7 +24,6 @@ import tempfile import time - SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir)) @@ -36,7 +34,6 @@ sys.path.append(os.path.join(CHECKOUT_ROOT, 'tools_webrtc')) from android.build_aar import BuildAar - ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64'] MAVEN_REPOSITORY = 'https://google.bintray.com/webrtc' API = 'https://api.bintray.com' @@ -62,230 +59,249 @@ def _ParseArgs(): - parser = argparse.ArgumentParser(description='Releases WebRTC on Bintray.') - parser.add_argument('--use-goma', action='store_true', default=False, - help='Use goma.') - parser.add_argument('--skip-tests', action='store_true', default=False, - help='Skips running the tests.') - parser.add_argument('--publish', action='store_true', default=False, - help='Automatically publishes the library if the tests pass.') - parser.add_argument('--build-dir', default=None, - help='Temporary directory to store the build files. If not specified, ' - 'a new directory will be created.') - parser.add_argument('--verbose', action='store_true', default=False, - help='Debug logging.') - return parser.parse_args() + parser = argparse.ArgumentParser(description='Releases WebRTC on Bintray.') + parser.add_argument('--use-goma', + action='store_true', + default=False, + help='Use goma.') + parser.add_argument('--skip-tests', + action='store_true', + default=False, + help='Skips running the tests.') + parser.add_argument( + '--publish', + action='store_true', + default=False, + help='Automatically publishes the library if the tests pass.') + parser.add_argument( + '--build-dir', + default=None, + help='Temporary directory to store the build files. If not specified, ' + 'a new directory will be created.') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + return parser.parse_args() def _GetCommitHash(): - commit_hash = subprocess.check_output( - ['git', 'rev-parse', 'HEAD'], cwd=CHECKOUT_ROOT).strip() - return commit_hash + commit_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'], + cwd=CHECKOUT_ROOT).strip() + return commit_hash def _GetCommitPos(): - commit_message = subprocess.check_output( - ['git', 'rev-list', '--format=%B', '--max-count=1', 'HEAD'], - cwd=CHECKOUT_ROOT) - commit_pos_match = re.search( - COMMIT_POSITION_REGEX, commit_message, re.MULTILINE) - if not commit_pos_match: - raise Exception('Commit position not found in the commit message: %s' - % commit_message) - return commit_pos_match.group(1) + commit_message = subprocess.check_output( + ['git', 'rev-list', '--format=%B', '--max-count=1', 'HEAD'], + cwd=CHECKOUT_ROOT) + commit_pos_match = re.search(COMMIT_POSITION_REGEX, commit_message, + re.MULTILINE) + if not commit_pos_match: + raise Exception('Commit position not found in the commit message: %s' % + commit_message) + return commit_pos_match.group(1) def _UploadFile(user, password, filename, version, target_file): -# URL is of format: - # ///// - # Example: - # https://api.bintray.com/content/google/webrtc/google-webrtc/1.0.19742/org/webrtc/google-webrtc/1.0.19742/google-webrtc-1.0.19742.aar + # URL is of format: + # ///// + # Example: + # https://api.bintray.com/content/google/webrtc/google-webrtc/1.0.19742/org/webrtc/google-webrtc/1.0.19742/google-webrtc-1.0.19742.aar + + target_dir = version + '/' + GROUP_ID + '/' + ARTIFACT_ID + '/' + version + target_path = target_dir + '/' + target_file + url = CONTENT_API + '/' + target_path + + logging.info('Uploading %s to %s', filename, url) + with open(filename) as fh: + file_data = fh.read() + + for attempt in xrange(UPLOAD_TRIES): + try: + response = requests.put(url, + data=file_data, + auth=(user, password), + timeout=API_TIMEOUT_SECONDS) + break + except requests.exceptions.Timeout as e: + logging.warning('Timeout while uploading: %s', e) + time.sleep(UPLOAD_RETRY_BASE_SLEEP_SECONDS**attempt) + else: + raise Exception('Failed to upload %s' % filename) - target_dir = version + '/' + GROUP_ID + '/' + ARTIFACT_ID + '/' + version - target_path = target_dir + '/' + target_file - url = CONTENT_API + '/' + target_path + if not response.ok: + raise Exception('Failed to upload %s. Response: %s' % + (filename, response)) + logging.info('Uploaded %s: %s', filename, response) - logging.info('Uploading %s to %s', filename, url) - with open(filename) as fh: - file_data = fh.read() - for attempt in xrange(UPLOAD_TRIES): - try: - response = requests.put(url, data=file_data, auth=(user, password), - timeout=API_TIMEOUT_SECONDS) - break - except requests.exceptions.Timeout as e: - logging.warning('Timeout while uploading: %s', e) - time.sleep(UPLOAD_RETRY_BASE_SLEEP_SECONDS ** attempt) - else: - raise Exception('Failed to upload %s' % filename) +def _GeneratePom(target_file, version, commit): + env = jinja2.Environment(loader=jinja2.PackageLoader('release_aar'), ) + template = env.get_template('pom.jinja') + pom = template.render(version=version, commit=commit) + with open(target_file, 'w') as fh: + fh.write(pom) - if not response.ok: - raise Exception('Failed to upload %s. Response: %s' % (filename, response)) - logging.info('Uploaded %s: %s', filename, response) +def _TestAAR(tmp_dir, username, password, version): + """Runs AppRTCMobile tests using the AAR. Returns true if the tests pass.""" + logging.info('Testing library.') + env = jinja2.Environment(loader=jinja2.PackageLoader('release_aar'), ) -def _GeneratePom(target_file, version, commit): - env = jinja2.Environment( - loader=jinja2.PackageLoader('release_aar'), - ) - template = env.get_template('pom.jinja') - pom = template.render(version=version, commit=commit) - with open(target_file, 'w') as fh: - fh.write(pom) + gradle_backup = os.path.join(tmp_dir, 'build.gradle.backup') + app_gradle_backup = os.path.join(tmp_dir, 'app-build.gradle.backup') + # Make backup copies of the project files before modifying them. + shutil.copy2(AAR_PROJECT_GRADLE, gradle_backup) + shutil.copy2(AAR_PROJECT_APP_GRADLE, app_gradle_backup) -def _TestAAR(tmp_dir, username, password, version): - """Runs AppRTCMobile tests using the AAR. Returns true if the tests pass.""" - logging.info('Testing library.') - env = jinja2.Environment( - loader=jinja2.PackageLoader('release_aar'), - ) - - gradle_backup = os.path.join(tmp_dir, 'build.gradle.backup') - app_gradle_backup = os.path.join(tmp_dir, 'app-build.gradle.backup') - - # Make backup copies of the project files before modifying them. - shutil.copy2(AAR_PROJECT_GRADLE, gradle_backup) - shutil.copy2(AAR_PROJECT_APP_GRADLE, app_gradle_backup) - - try: - maven_repository_template = env.get_template('maven-repository.jinja') - maven_repository = maven_repository_template.render( - url=MAVEN_REPOSITORY, username=username, password=password) - - # Append Maven repository to build file to download unpublished files. - with open(AAR_PROJECT_GRADLE, 'a') as gradle_file: - gradle_file.write(maven_repository) - - # Read app build file. - with open(AAR_PROJECT_APP_GRADLE, 'r') as gradle_app_file: - gradle_app = gradle_app_file.read() - - if AAR_PROJECT_DEPENDENCY not in gradle_app: - raise Exception( - '%s not found in the build file.' % AAR_PROJECT_DEPENDENCY) - # Set version to the version to be tested. - target_dependency = AAR_PROJECT_VERSION_DEPENDENCY % version - gradle_app = gradle_app.replace(AAR_PROJECT_DEPENDENCY, target_dependency) - - # Write back. - with open(AAR_PROJECT_APP_GRADLE, 'w') as gradle_app_file: - gradle_app_file.write(gradle_app) - - # Uninstall any existing version of AppRTCMobile. - logging.info('Uninstalling previous AppRTCMobile versions. It is okay for ' - 'these commands to fail if AppRTCMobile is not installed.') - subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc']) - subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc.test']) - - # Run tests. try: - # First clean the project. - subprocess.check_call([GRADLEW_BIN, 'clean'], cwd=AAR_PROJECT_DIR) - # Then run the tests. - subprocess.check_call([GRADLEW_BIN, 'connectedDebugAndroidTest'], - cwd=AAR_PROJECT_DIR) - except subprocess.CalledProcessError: - logging.exception('Test failure.') - return False # Clean or tests failed - - return True # Tests pass - finally: - # Restore backups. - shutil.copy2(gradle_backup, AAR_PROJECT_GRADLE) - shutil.copy2(app_gradle_backup, AAR_PROJECT_APP_GRADLE) + maven_repository_template = env.get_template('maven-repository.jinja') + maven_repository = maven_repository_template.render( + url=MAVEN_REPOSITORY, username=username, password=password) + + # Append Maven repository to build file to download unpublished files. + with open(AAR_PROJECT_GRADLE, 'a') as gradle_file: + gradle_file.write(maven_repository) + + # Read app build file. + with open(AAR_PROJECT_APP_GRADLE, 'r') as gradle_app_file: + gradle_app = gradle_app_file.read() + + if AAR_PROJECT_DEPENDENCY not in gradle_app: + raise Exception('%s not found in the build file.' % + AAR_PROJECT_DEPENDENCY) + # Set version to the version to be tested. + target_dependency = AAR_PROJECT_VERSION_DEPENDENCY % version + gradle_app = gradle_app.replace(AAR_PROJECT_DEPENDENCY, + target_dependency) + + # Write back. + with open(AAR_PROJECT_APP_GRADLE, 'w') as gradle_app_file: + gradle_app_file.write(gradle_app) + + # Uninstall any existing version of AppRTCMobile. + logging.info( + 'Uninstalling previous AppRTCMobile versions. It is okay for ' + 'these commands to fail if AppRTCMobile is not installed.') + subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc']) + subprocess.call([ADB_BIN, 'uninstall', 'org.appspot.apprtc.test']) + + # Run tests. + try: + # First clean the project. + subprocess.check_call([GRADLEW_BIN, 'clean'], cwd=AAR_PROJECT_DIR) + # Then run the tests. + subprocess.check_call([GRADLEW_BIN, 'connectedDebugAndroidTest'], + cwd=AAR_PROJECT_DIR) + except subprocess.CalledProcessError: + logging.exception('Test failure.') + return False # Clean or tests failed + + return True # Tests pass + finally: + # Restore backups. + shutil.copy2(gradle_backup, AAR_PROJECT_GRADLE) + shutil.copy2(app_gradle_backup, AAR_PROJECT_APP_GRADLE) def _PublishAAR(user, password, version, additional_args): - args = { - 'publish_wait_for_secs': 0 # Publish asynchronously. - } - args.update(additional_args) - - url = CONTENT_API + '/' + version + '/publish' - response = requests.post(url, data=json.dumps(args), auth=(user, password), - timeout=API_TIMEOUT_SECONDS) + args = { + 'publish_wait_for_secs': 0 # Publish asynchronously. + } + args.update(additional_args) + + url = CONTENT_API + '/' + version + '/publish' + response = requests.post(url, + data=json.dumps(args), + auth=(user, password), + timeout=API_TIMEOUT_SECONDS) - if not response.ok: - raise Exception('Failed to publish. Response: %s' % response) + if not response.ok: + raise Exception('Failed to publish. Response: %s' % response) def _DeleteUnpublishedVersion(user, password, version): - url = PACKAGES_API + '/versions/' + version - response = requests.get(url, auth=(user, password), - timeout=API_TIMEOUT_SECONDS) - if not response.ok: - raise Exception('Failed to get version info. Response: %s' % response) - - version_info = json.loads(response.content) - if version_info['published']: - logging.info('Version has already been published, not deleting.') - return - - logging.info('Deleting unpublished version.') - response = requests.delete(url, auth=(user, password), - timeout=API_TIMEOUT_SECONDS) - if not response.ok: - raise Exception('Failed to delete version. Response: %s' % response) + url = PACKAGES_API + '/versions/' + version + response = requests.get(url, + auth=(user, password), + timeout=API_TIMEOUT_SECONDS) + if not response.ok: + raise Exception('Failed to get version info. Response: %s' % response) + + version_info = json.loads(response.content) + if version_info['published']: + logging.info('Version has already been published, not deleting.') + return + + logging.info('Deleting unpublished version.') + response = requests.delete(url, + auth=(user, password), + timeout=API_TIMEOUT_SECONDS) + if not response.ok: + raise Exception('Failed to delete version. Response: %s' % response) def ReleaseAar(use_goma, skip_tests, publish, build_dir): - version = '1.0.' + _GetCommitPos() - commit = _GetCommitHash() - logging.info('Releasing AAR version %s with hash %s', version, commit) - - user = os.environ.get('BINTRAY_USER', None) - api_key = os.environ.get('BINTRAY_API_KEY', None) - if not user or not api_key: - raise Exception('Environment variables BINTRAY_USER and BINTRAY_API_KEY ' - 'must be defined.') - - # If build directory is not specified, create a temporary directory. - use_tmp_dir = not build_dir - if use_tmp_dir: - build_dir = tempfile.mkdtemp() - - try: - base_name = ARTIFACT_ID + '-' + version - aar_file = os.path.join(build_dir, base_name + '.aar') - third_party_licenses_file = os.path.join(build_dir, 'LICENSE.md') - pom_file = os.path.join(build_dir, base_name + '.pom') - - logging.info('Building at %s', build_dir) - BuildAar(ARCHS, aar_file, - use_goma=use_goma, - ext_build_dir=os.path.join(build_dir, 'aar-build')) - _GeneratePom(pom_file, version, commit) - - _UploadFile(user, api_key, aar_file, version, base_name + '.aar') - _UploadFile(user, api_key, third_party_licenses_file, version, - 'THIRD_PARTY_LICENSES.md') - _UploadFile(user, api_key, pom_file, version, base_name + '.pom') - - tests_pass = skip_tests or _TestAAR(build_dir, user, api_key, version) - if not tests_pass: - logging.info('Discarding library.') - _PublishAAR(user, api_key, version, {'discard': True}) - _DeleteUnpublishedVersion(user, api_key, version) - raise Exception('Test failure. Discarded library.') - - if publish: - logging.info('Publishing library.') - _PublishAAR(user, api_key, version, {}) - else: - logging.info('Note: The library has not not been published automatically.' - ' Please do so manually if desired.') - finally: + version = '1.0.' + _GetCommitPos() + commit = _GetCommitHash() + logging.info('Releasing AAR version %s with hash %s', version, commit) + + user = os.environ.get('BINTRAY_USER', None) + api_key = os.environ.get('BINTRAY_API_KEY', None) + if not user or not api_key: + raise Exception( + 'Environment variables BINTRAY_USER and BINTRAY_API_KEY ' + 'must be defined.') + + # If build directory is not specified, create a temporary directory. + use_tmp_dir = not build_dir if use_tmp_dir: - shutil.rmtree(build_dir, True) + build_dir = tempfile.mkdtemp() + + try: + base_name = ARTIFACT_ID + '-' + version + aar_file = os.path.join(build_dir, base_name + '.aar') + third_party_licenses_file = os.path.join(build_dir, 'LICENSE.md') + pom_file = os.path.join(build_dir, base_name + '.pom') + + logging.info('Building at %s', build_dir) + BuildAar(ARCHS, + aar_file, + use_goma=use_goma, + ext_build_dir=os.path.join(build_dir, 'aar-build')) + _GeneratePom(pom_file, version, commit) + + _UploadFile(user, api_key, aar_file, version, base_name + '.aar') + _UploadFile(user, api_key, third_party_licenses_file, version, + 'THIRD_PARTY_LICENSES.md') + _UploadFile(user, api_key, pom_file, version, base_name + '.pom') + + tests_pass = skip_tests or _TestAAR(build_dir, user, api_key, version) + if not tests_pass: + logging.info('Discarding library.') + _PublishAAR(user, api_key, version, {'discard': True}) + _DeleteUnpublishedVersion(user, api_key, version) + raise Exception('Test failure. Discarded library.') + + if publish: + logging.info('Publishing library.') + _PublishAAR(user, api_key, version, {}) + else: + logging.info( + 'Note: The library has not not been published automatically.' + ' Please do so manually if desired.') + finally: + if use_tmp_dir: + shutil.rmtree(build_dir, True) def main(): - args = _ParseArgs() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - ReleaseAar(args.use_goma, args.skip_tests, args.publish, args.build_dir) + args = _ParseArgs() + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + ReleaseAar(args.use_goma, args.skip_tests, args.publish, args.build_dir) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/android/suppressions.xml b/tools_webrtc/android/suppressions.xml deleted file mode 100644 index 87b9387c3b..0000000000 --- a/tools_webrtc/android/suppressions.xml +++ /dev/null @@ -1,61 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tools_webrtc/apple/copy_framework_header.py b/tools_webrtc/apple/copy_framework_header.py new file mode 100755 index 0000000000..d194650934 --- /dev/null +++ b/tools_webrtc/apple/copy_framework_header.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import argparse +import re +import sys + + +def replace_double_quote(line): + re_rtc_import = re.compile( + r'(\s*)#import\s+"(\S+/|)(\w+\+|)RTC(\w+)\.h"(.*)', re.DOTALL) + match = re_rtc_import.match(line) + if not match: + return line + + return '%s#import %s' % (match.group(1), match.group(3), + match.group(4), match.group(5)) + + +def process(input_file, output_file): + with open(input_file, 'rb') as fb, open(output_file, 'wb') as fw: + for line in fb.read().decode('UTF-8').splitlines(): + fw.write(replace_double_quote(line).encode('UTF-8')) + fw.write(b"\n") + + +def main(): + parser = argparse.ArgumentParser( + description= + "Copy headers of framework and replace double-quoted includes to" + + " angle-bracketed respectively.") + parser.add_argument('--input', + help='Input header files to copy.', + type=str) + parser.add_argument('--output', help='Output file.', type=str) + parsed_args = parser.parse_args() + return process(parsed_args.input, parsed_args.output) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tools_webrtc/apple/copy_framework_header_test.py b/tools_webrtc/apple/copy_framework_header_test.py new file mode 100644 index 0000000000..24bab3eb0b --- /dev/null +++ b/tools_webrtc/apple/copy_framework_header_test.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import unittest +from copy_framework_header import replace_double_quote + + +class TestCopyFramework(unittest.TestCase): + def testReplaceDoubleQuote(self): + self.assertEqual(replace_double_quote("""#import "RTCMacros.h\""""), + """#import """) + self.assertEqual(replace_double_quote("""#import "RTCMacros.h\"\n"""), + """#import \n""") + self.assertEqual( + replace_double_quote("""#import "UIDevice+RTCDevice.h\"\n"""), + """#import \n""") + self.assertEqual( + replace_double_quote("#import \"components/video_codec/" + + "RTCVideoDecoderFactoryH264.h\"\n"), + """#import \n""") + self.assertEqual( + replace_double_quote( + """@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *\n""" + ), + """@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *\n""") + + +if __name__ == '__main__': + unittest.main() diff --git a/tools_webrtc/autoroller/roll_deps.py b/tools_webrtc/autoroller/roll_deps.py index 77dd7d8d73..286c3c4cda 100755 --- a/tools_webrtc/autoroller/roll_deps.py +++ b/tools_webrtc/autoroller/roll_deps.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Script to automatically roll dependencies in the WebRTC DEPS file.""" import argparse @@ -19,16 +18,18 @@ import sys import urllib2 + def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir + # Skip these dependencies (list without solution name prefix). DONT_AUTOROLL_THESE = [ - 'src/examples/androidtests/third_party/gradle', + 'src/examples/androidtests/third_party/gradle', ] # These dependencies are missing in chromium/src/DEPS, either unused or already @@ -36,19 +37,18 @@ def FindSrcDirPath(): # but we pull it through a subtree mirror, so therefore it isn't listed in # Chromium's deps but it is in ours. WEBRTC_ONLY_DEPS = [ - 'src/base', - 'src/build', - 'src/buildtools', - 'src/ios', - 'src/testing', - 'src/third_party', - 'src/third_party/findbugs', - 'src/third_party/gtest-parallel', - 'src/third_party/yasm/binaries', - 'src/tools', + 'src/base', + 'src/build', + 'src/buildtools', + 'src/ios', + 'src/testing', + 'src/third_party', + 'src/third_party/findbugs', + 'src/third_party/gtest-parallel', + 'src/third_party/yasm/binaries', + 'src/tools', ] - WEBRTC_URL = 'https://webrtc.googlesource.com/src' CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src' CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s' @@ -56,7 +56,7 @@ def FindSrcDirPath(): CHROMIUM_FILE_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s/%s' COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$') -CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'([0-9a-z]+)\'$') +CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'([-0-9a-z]+)\'$') ROLL_BRANCH_NAME = 'roll_chromium_revision' SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -71,7 +71,6 @@ def FindSrcDirPath(): NOTIFY_EMAIL = 'webrtc-trooper@grotations.appspotmail.com' - sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build')) import find_depot_tools @@ -82,8 +81,8 @@ def FindSrcDirPath(): 'clang', 'scripts', 'update.py') DepsEntry = collections.namedtuple('DepsEntry', 'path url revision') -ChangedDep = collections.namedtuple( - 'ChangedDep', 'path url current_rev new_rev') +ChangedDep = collections.namedtuple('ChangedDep', + 'path url current_rev new_rev') CipdDepsEntry = collections.namedtuple('CipdDepsEntry', 'path packages') ChangedCipdPackage = collections.namedtuple( 'ChangedCipdPackage', 'path package current_version new_version') @@ -94,124 +93,135 @@ def FindSrcDirPath(): class RollError(Exception): - pass + pass + + +def StrExpansion(): + return lambda str_value: str_value def VarLookup(local_scope): - return lambda var_name: local_scope['vars'][var_name] + return lambda var_name: local_scope['vars'][var_name] def ParseDepsDict(deps_content): - local_scope = {} - global_scope = { - 'Var': VarLookup(local_scope), - 'deps_os': {}, - } - exec (deps_content, global_scope, local_scope) - return local_scope + local_scope = {} + global_scope = { + 'Str': StrExpansion(), + 'Var': VarLookup(local_scope), + 'deps_os': {}, + } + exec (deps_content, global_scope, local_scope) + return local_scope def ParseLocalDepsFile(filename): - with open(filename, 'rb') as f: - deps_content = f.read() - return ParseDepsDict(deps_content) + with open(filename, 'rb') as f: + deps_content = f.read() + return ParseDepsDict(deps_content) def ParseCommitPosition(commit_message): - for line in reversed(commit_message.splitlines()): - m = COMMIT_POSITION_RE.match(line.strip()) - if m: - return int(m.group(1)) - logging.error('Failed to parse commit position id from:\n%s\n', - commit_message) - sys.exit(-1) - - -def _RunCommand(command, working_dir=None, ignore_exit_code=False, - extra_env=None, input_data=None): - """Runs a command and returns the output from that command. + for line in reversed(commit_message.splitlines()): + m = COMMIT_POSITION_RE.match(line.strip()) + if m: + return int(m.group(1)) + logging.error('Failed to parse commit position id from:\n%s\n', + commit_message) + sys.exit(-1) + + +def _RunCommand(command, + working_dir=None, + ignore_exit_code=False, + extra_env=None, + input_data=None): + """Runs a command and returns the output from that command. If the command fails (exit code != 0), the function will exit the process. Returns: A tuple containing the stdout and stderr outputs as strings. """ - working_dir = working_dir or CHECKOUT_SRC_DIR - logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) - env = os.environ.copy() - if extra_env: - assert all(isinstance(value, str) for value in extra_env.values()) - logging.debug('extra env: %s', extra_env) - env.update(extra_env) - p = subprocess.Popen(command, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, env=env, - cwd=working_dir, universal_newlines=True) - std_output, err_output = p.communicate(input_data) - p.stdout.close() - p.stderr.close() - if not ignore_exit_code and p.returncode != 0: - logging.error('Command failed: %s\n' - 'stdout:\n%s\n' - 'stderr:\n%s\n', ' '.join(command), std_output, err_output) - sys.exit(p.returncode) - return std_output, err_output + working_dir = working_dir or CHECKOUT_SRC_DIR + logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir) + env = os.environ.copy() + if extra_env: + assert all(isinstance(value, str) for value in extra_env.values()) + logging.debug('extra env: %s', extra_env) + env.update(extra_env) + p = subprocess.Popen(command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + cwd=working_dir, + universal_newlines=True) + std_output, err_output = p.communicate(input_data) + p.stdout.close() + p.stderr.close() + if not ignore_exit_code and p.returncode != 0: + logging.error('Command failed: %s\n' + 'stdout:\n%s\n' + 'stderr:\n%s\n', ' '.join(command), std_output, + err_output) + sys.exit(p.returncode) + return std_output, err_output def _GetBranches(): - """Returns a tuple of active,branches. + """Returns a tuple of active,branches. The 'active' is the name of the currently active branch and 'branches' is a list of all branches. """ - lines = _RunCommand(['git', 'branch'])[0].split('\n') - branches = [] - active = '' - for line in lines: - if '*' in line: - # The assumption is that the first char will always be the '*'. - active = line[1:].strip() - branches.append(active) - else: - branch = line.strip() - if branch: - branches.append(branch) - return active, branches + lines = _RunCommand(['git', 'branch'])[0].split('\n') + branches = [] + active = '' + for line in lines: + if '*' in line: + # The assumption is that the first char will always be the '*'. + active = line[1:].strip() + branches.append(active) + else: + branch = line.strip() + if branch: + branches.append(branch) + return active, branches def _ReadGitilesContent(url): - # Download and decode BASE64 content until - # https://code.google.com/p/gitiles/issues/detail?id=7 is fixed. - base64_content = ReadUrlContent(url + '?format=TEXT') - return base64.b64decode(base64_content[0]) + # Download and decode BASE64 content until + # https://code.google.com/p/gitiles/issues/detail?id=7 is fixed. + base64_content = ReadUrlContent(url + '?format=TEXT') + return base64.b64decode(base64_content[0]) def ReadRemoteCrFile(path_below_src, revision): - """Reads a remote Chromium file of a specific revision. Returns a string.""" - return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision, - path_below_src)) + """Reads a remote Chromium file of a specific revision. Returns a string.""" + return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % + (revision, path_below_src)) def ReadRemoteCrCommit(revision): - """Reads a remote Chromium commit message. Returns a string.""" - return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision) + """Reads a remote Chromium commit message. Returns a string.""" + return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision) def ReadUrlContent(url): - """Connect to a remote host and read the contents. Returns a list of lines.""" - conn = urllib2.urlopen(url) - try: - return conn.readlines() - except IOError as e: - logging.exception('Error connecting to %s. Error: %s', url, e) - raise - finally: - conn.close() + """Connect to a remote host and read the contents. Returns a list of lines.""" + conn = urllib2.urlopen(url) + try: + return conn.readlines() + except IOError as e: + logging.exception('Error connecting to %s. Error: %s', url, e) + raise + finally: + conn.close() def GetMatchingDepsEntries(depsentry_dict, dir_path): - """Gets all deps entries matching the provided path. + """Gets all deps entries matching the provided path. This list may contain more than one DepsEntry object. Example: dir_path='src/testing' would give results containing both @@ -222,69 +232,72 @@ def GetMatchingDepsEntries(depsentry_dict, dir_path): Returns: A list of DepsEntry objects. """ - result = [] - for path, depsentry in depsentry_dict.iteritems(): - if path == dir_path: - result.append(depsentry) - else: - parts = path.split('/') - if all(part == parts[i] - for i, part in enumerate(dir_path.split('/'))): - result.append(depsentry) - return result + result = [] + for path, depsentry in depsentry_dict.iteritems(): + if path == dir_path: + result.append(depsentry) + else: + parts = path.split('/') + if all(part == parts[i] + for i, part in enumerate(dir_path.split('/'))): + result.append(depsentry) + return result def BuildDepsentryDict(deps_dict): - """Builds a dict of paths to DepsEntry objects from a raw parsed deps dict.""" - result = {} - - def AddDepsEntries(deps_subdict): - for path, dep in deps_subdict.iteritems(): - if path in result: - continue - if not isinstance(dep, dict): - dep = {'url': dep} - if dep.get('dep_type') == 'cipd': - result[path] = CipdDepsEntry(path, dep['packages']) - else: - if '@' not in dep['url']: - continue - url, revision = dep['url'].split('@') - result[path] = DepsEntry(path, url, revision) - - AddDepsEntries(deps_dict['deps']) - for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']: - AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {})) - return result + """Builds a dict of paths to DepsEntry objects from a raw parsed deps dict.""" + result = {} + + def AddDepsEntries(deps_subdict): + for path, dep in deps_subdict.iteritems(): + if path in result: + continue + if not isinstance(dep, dict): + dep = {'url': dep} + if dep.get('dep_type') == 'cipd': + result[path] = CipdDepsEntry(path, dep['packages']) + else: + if '@' not in dep['url']: + continue + url, revision = dep['url'].split('@') + result[path] = DepsEntry(path, url, revision) + + AddDepsEntries(deps_dict['deps']) + for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']: + AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {})) + return result def _FindChangedCipdPackages(path, old_pkgs, new_pkgs): - pkgs_equal = ({p['package'] for p in old_pkgs} == - {p['package'] for p in new_pkgs}) - assert pkgs_equal, ('Old: %s\n New: %s.\nYou need to do a manual roll ' - 'and remove/add entries in DEPS so the old and new ' - 'list match.' % (old_pkgs, new_pkgs)) - for old_pkg in old_pkgs: - for new_pkg in new_pkgs: - old_version = old_pkg['version'] - new_version = new_pkg['version'] - if (old_pkg['package'] == new_pkg['package'] and - old_version != new_version): - logging.debug('Roll dependency %s to %s', path, new_version) - yield ChangedCipdPackage(path, old_pkg['package'], - old_version, new_version) + pkgs_equal = ({p['package'] + for p in old_pkgs} == {p['package'] + for p in new_pkgs}) + assert pkgs_equal, ('Old: %s\n New: %s.\nYou need to do a manual roll ' + 'and remove/add entries in DEPS so the old and new ' + 'list match.' % (old_pkgs, new_pkgs)) + for old_pkg in old_pkgs: + for new_pkg in new_pkgs: + old_version = old_pkg['version'] + new_version = new_pkg['version'] + if (old_pkg['package'] == new_pkg['package'] + and old_version != new_version): + logging.debug('Roll dependency %s to %s', path, new_version) + yield ChangedCipdPackage(path, old_pkg['package'], old_version, + new_version) def _FindNewDeps(old, new): - """ Gather dependencies only in |new| and return corresponding paths. """ - old_entries = set(BuildDepsentryDict(old)) - new_entries = set(BuildDepsentryDict(new)) - return [path for path in new_entries - old_entries - if path not in DONT_AUTOROLL_THESE] + """ Gather dependencies only in |new| and return corresponding paths. """ + old_entries = set(BuildDepsentryDict(old)) + new_entries = set(BuildDepsentryDict(new)) + return [ + path for path in new_entries - old_entries + if path not in DONT_AUTOROLL_THESE + ] def FindAddedDeps(webrtc_deps, new_cr_deps): - """ + """ Calculate new deps entries of interest. Ideally, that would mean: only appearing in chromium DEPS @@ -305,16 +318,18 @@ def FindAddedDeps(webrtc_deps, new_cr_deps): A list of paths added dependencies sitting in |ANDROID_DEPS_PATH|. A list of paths for other added dependencies. """ - all_added_deps = _FindNewDeps(webrtc_deps, new_cr_deps) - generated_android_deps = [path for path in all_added_deps - if path.startswith(ANDROID_DEPS_PATH)] - other_deps = [path for path in all_added_deps - if path not in generated_android_deps] - return generated_android_deps, other_deps + all_added_deps = _FindNewDeps(webrtc_deps, new_cr_deps) + generated_android_deps = [ + path for path in all_added_deps if path.startswith(ANDROID_DEPS_PATH) + ] + other_deps = [ + path for path in all_added_deps if path not in generated_android_deps + ] + return generated_android_deps, other_deps def FindRemovedDeps(webrtc_deps, new_cr_deps): - """ + """ Calculate obsolete deps entries. Ideally, that would mean: no more appearing in chromium DEPS @@ -337,18 +352,20 @@ def FindRemovedDeps(webrtc_deps, new_cr_deps): A list of paths of dependencies removed from |ANDROID_DEPS_PATH|. A list of paths of unexpected disappearing dependencies. """ - all_removed_deps = _FindNewDeps(new_cr_deps, webrtc_deps) - generated_android_deps = [path for path in all_removed_deps - if path.startswith(ANDROID_DEPS_PATH)] - # Webrtc-only dependencies are handled in CalculateChangedDeps. - other_deps = [path for path in all_removed_deps - if path not in generated_android_deps and - path not in WEBRTC_ONLY_DEPS] - return generated_android_deps, other_deps + all_removed_deps = _FindNewDeps(new_cr_deps, webrtc_deps) + generated_android_deps = [ + path for path in all_removed_deps if path.startswith(ANDROID_DEPS_PATH) + ] + # Webrtc-only dependencies are handled in CalculateChangedDeps. + other_deps = [ + path for path in all_removed_deps + if path not in generated_android_deps and path not in WEBRTC_ONLY_DEPS + ] + return generated_android_deps, other_deps def CalculateChangedDeps(webrtc_deps, new_cr_deps): - """ + """ Calculate changed deps entries based on entries defined in the WebRTC DEPS file: - If a shared dependency with the Chromium DEPS file: roll it to the same @@ -362,354 +379,381 @@ def CalculateChangedDeps(webrtc_deps, new_cr_deps): Returns: A list of ChangedDep objects representing the changed deps. """ - result = [] - webrtc_entries = BuildDepsentryDict(webrtc_deps) - new_cr_entries = BuildDepsentryDict(new_cr_deps) - for path, webrtc_deps_entry in webrtc_entries.iteritems(): - if path in DONT_AUTOROLL_THESE: - continue - cr_deps_entry = new_cr_entries.get(path) - if cr_deps_entry: - assert type(cr_deps_entry) is type(webrtc_deps_entry) - - if isinstance(cr_deps_entry, CipdDepsEntry): - result.extend(_FindChangedCipdPackages(path, webrtc_deps_entry.packages, - cr_deps_entry.packages)) - continue - - # Use the revision from Chromium's DEPS file. - new_rev = cr_deps_entry.revision - assert webrtc_deps_entry.url == cr_deps_entry.url, ( - 'WebRTC DEPS entry %s has a different URL (%s) than Chromium (%s).' % - (path, webrtc_deps_entry.url, cr_deps_entry.url)) - else: - if isinstance(webrtc_deps_entry, DepsEntry): - # Use the HEAD of the deps repo. - stdout, _ = _RunCommand(['git', 'ls-remote', webrtc_deps_entry.url, - 'HEAD']) - new_rev = stdout.strip().split('\t')[0] - else: - # The dependency has been removed from chromium. - # This is handled by FindRemovedDeps. - continue - - # Check if an update is necessary. - if webrtc_deps_entry.revision != new_rev: - logging.debug('Roll dependency %s to %s', path, new_rev) - result.append(ChangedDep(path, webrtc_deps_entry.url, - webrtc_deps_entry.revision, new_rev)) - return sorted(result) + result = [] + webrtc_entries = BuildDepsentryDict(webrtc_deps) + new_cr_entries = BuildDepsentryDict(new_cr_deps) + for path, webrtc_deps_entry in webrtc_entries.iteritems(): + if path in DONT_AUTOROLL_THESE: + continue + cr_deps_entry = new_cr_entries.get(path) + if cr_deps_entry: + assert type(cr_deps_entry) is type(webrtc_deps_entry) + + if isinstance(cr_deps_entry, CipdDepsEntry): + result.extend( + _FindChangedCipdPackages(path, webrtc_deps_entry.packages, + cr_deps_entry.packages)) + continue + + # Use the revision from Chromium's DEPS file. + new_rev = cr_deps_entry.revision + assert webrtc_deps_entry.url == cr_deps_entry.url, ( + 'WebRTC DEPS entry %s has a different URL (%s) than Chromium (%s).' + % (path, webrtc_deps_entry.url, cr_deps_entry.url)) + else: + if isinstance(webrtc_deps_entry, DepsEntry): + # Use the HEAD of the deps repo. + stdout, _ = _RunCommand( + ['git', 'ls-remote', webrtc_deps_entry.url, 'HEAD']) + new_rev = stdout.strip().split('\t')[0] + else: + # The dependency has been removed from chromium. + # This is handled by FindRemovedDeps. + continue + + # Check if an update is necessary. + if webrtc_deps_entry.revision != new_rev: + logging.debug('Roll dependency %s to %s', path, new_rev) + result.append( + ChangedDep(path, webrtc_deps_entry.url, + webrtc_deps_entry.revision, new_rev)) + return sorted(result) def CalculateChangedClang(new_cr_rev): - def GetClangRev(lines): - for line in lines: - match = CLANG_REVISION_RE.match(line) - if match: - return match.group(1) - raise RollError('Could not parse Clang revision!') - - with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f: - current_lines = f.readlines() - current_rev = GetClangRev(current_lines) - - new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH, - new_cr_rev).splitlines() - new_rev = GetClangRev(new_clang_update_py) - return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev) - - -def GenerateCommitMessage(rev_update, current_commit_pos, new_commit_pos, - changed_deps_list, - added_deps_paths=None, - removed_deps_paths=None, - clang_change=None, - ): - current_cr_rev = rev_update.current_chromium_rev[0:10] - new_cr_rev = rev_update.new_chromium_rev[0:10] - rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev) - git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos) - - commit_msg = ['Roll chromium_revision %s (%s)\n' % (rev_interval, - git_number_interval), - 'Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval), - 'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE % - rev_interval)] - - def Section(adjective, deps): - noun = 'dependency' if len(deps) == 1 else 'dependencies' - commit_msg.append('%s %s' % (adjective, noun)) - - tbr_authors = '' - if changed_deps_list: - Section('Changed', changed_deps_list) - - for c in changed_deps_list: - if isinstance(c, ChangedCipdPackage): - commit_msg.append('* %s: %s..%s' % (c.path, c.current_version, - c.new_version)) - else: - commit_msg.append('* %s: %s/+log/%s..%s' % (c.path, c.url, - c.current_rev[0:10], - c.new_rev[0:10])) - if 'libvpx' in c.path: - tbr_authors += 'marpan@webrtc.org, jianj@chromium.org, ' - - if added_deps_paths: - Section('Added', added_deps_paths) - commit_msg.extend('* %s' % p for p in added_deps_paths) - - if removed_deps_paths: - Section('Removed', removed_deps_paths) - commit_msg.extend('* %s' % p for p in removed_deps_paths) - - if any([changed_deps_list, - added_deps_paths, - removed_deps_paths]): - change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS') - commit_msg.append('DEPS diff: %s\n' % change_url) - else: - commit_msg.append('No dependencies changed.') - - if clang_change and clang_change.current_rev != clang_change.new_rev: - commit_msg.append('Clang version changed %s:%s' % - (clang_change.current_rev, clang_change.new_rev)) - change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, - CLANG_UPDATE_SCRIPT_URL_PATH) - commit_msg.append('Details: %s\n' % change_url) - else: - commit_msg.append('No update to Clang.\n') - - # TBR needs to be non-empty for Gerrit to process it. - git_author = _RunCommand(['git', 'config', 'user.email'], - working_dir=CHECKOUT_SRC_DIR)[0].splitlines()[0] - tbr_authors = git_author + ',' + tbr_authors - - commit_msg.append('TBR=%s' % tbr_authors) - commit_msg.append('BUG=None') - return '\n'.join(commit_msg) + def GetClangRev(lines): + for line in lines: + match = CLANG_REVISION_RE.match(line) + if match: + return match.group(1) + raise RollError('Could not parse Clang revision!') + + with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f: + current_lines = f.readlines() + current_rev = GetClangRev(current_lines) + + new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH, + new_cr_rev).splitlines() + new_rev = GetClangRev(new_clang_update_py) + return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, + new_rev) + + +def GenerateCommitMessage( + rev_update, + current_commit_pos, + new_commit_pos, + changed_deps_list, + added_deps_paths=None, + removed_deps_paths=None, + clang_change=None, +): + current_cr_rev = rev_update.current_chromium_rev[0:10] + new_cr_rev = rev_update.new_chromium_rev[0:10] + rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev) + git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos) + + commit_msg = [ + 'Roll chromium_revision %s (%s)\n' % + (rev_interval, git_number_interval), + 'Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval), + 'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE % rev_interval) + ] + + def Section(adjective, deps): + noun = 'dependency' if len(deps) == 1 else 'dependencies' + commit_msg.append('%s %s' % (adjective, noun)) + + tbr_authors = '' + if changed_deps_list: + Section('Changed', changed_deps_list) + + for c in changed_deps_list: + if isinstance(c, ChangedCipdPackage): + commit_msg.append('* %s: %s..%s' % + (c.path, c.current_version, c.new_version)) + else: + commit_msg.append( + '* %s: %s/+log/%s..%s' % + (c.path, c.url, c.current_rev[0:10], c.new_rev[0:10])) + if 'libvpx' in c.path: + tbr_authors += 'marpan@webrtc.org, jianj@chromium.org, ' + + if added_deps_paths: + Section('Added', added_deps_paths) + commit_msg.extend('* %s' % p for p in added_deps_paths) + + if removed_deps_paths: + Section('Removed', removed_deps_paths) + commit_msg.extend('* %s' % p for p in removed_deps_paths) + + if any([changed_deps_list, added_deps_paths, removed_deps_paths]): + change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS') + commit_msg.append('DEPS diff: %s\n' % change_url) + else: + commit_msg.append('No dependencies changed.') + + if clang_change and clang_change.current_rev != clang_change.new_rev: + commit_msg.append('Clang version changed %s:%s' % + (clang_change.current_rev, clang_change.new_rev)) + change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, + CLANG_UPDATE_SCRIPT_URL_PATH) + commit_msg.append('Details: %s\n' % change_url) + else: + commit_msg.append('No update to Clang.\n') + + # TBR needs to be non-empty for Gerrit to process it. + git_author = _RunCommand(['git', 'config', 'user.email'], + working_dir=CHECKOUT_SRC_DIR)[0].splitlines()[0] + tbr_authors = git_author + ',' + tbr_authors + + commit_msg.append('TBR=%s' % tbr_authors) + commit_msg.append('BUG=None') + return '\n'.join(commit_msg) def UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content): - """Update the DEPS file with the new revision.""" - - with open(deps_filename, 'rb') as deps_file: - deps_content = deps_file.read() - - # Update the chromium_revision variable. - deps_content = deps_content.replace(rev_update.current_chromium_rev, - rev_update.new_chromium_rev) - - # Add and remove dependencies. For now: only generated android deps. - # Since gclient cannot add or remove deps, we on the fact that - # these android deps are located in one place we can copy/paste. - deps_re = re.compile(ANDROID_DEPS_START + '.*' + ANDROID_DEPS_END, - re.DOTALL) - new_deps = deps_re.search(new_cr_content) - old_deps = deps_re.search(deps_content) - if not new_deps or not old_deps: - faulty = 'Chromium' if not new_deps else 'WebRTC' - raise RollError('Was expecting to find "%s" and "%s"\n' - 'in %s DEPS' - % (ANDROID_DEPS_START, ANDROID_DEPS_END, faulty)) - deps_content = deps_re.sub(new_deps.group(0), deps_content) - - with open(deps_filename, 'wb') as deps_file: - deps_file.write(deps_content) - - # Update each individual DEPS entry. - for dep in changed_deps: - local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path) - if not os.path.isdir(local_dep_dir): - raise RollError( - 'Cannot find local directory %s. Either run\n' - 'gclient sync --deps=all\n' - 'or make sure the .gclient file for your solution contains all ' - 'platforms in the target_os list, i.e.\n' - 'target_os = ["android", "unix", "mac", "ios", "win"];\n' - 'Then run "gclient sync" again.' % local_dep_dir) - if isinstance(dep, ChangedCipdPackage): - package = dep.package.format() # Eliminate double curly brackets - update = '%s:%s@%s' % (dep.path, package, dep.new_version) - else: - update = '%s@%s' % (dep.path, dep.new_rev) - _RunCommand(['gclient', 'setdep', '--revision', update], - working_dir=CHECKOUT_SRC_DIR) + """Update the DEPS file with the new revision.""" + + with open(deps_filename, 'rb') as deps_file: + deps_content = deps_file.read() + + # Update the chromium_revision variable. + deps_content = deps_content.replace(rev_update.current_chromium_rev, + rev_update.new_chromium_rev) + + # Add and remove dependencies. For now: only generated android deps. + # Since gclient cannot add or remove deps, we on the fact that + # these android deps are located in one place we can copy/paste. + deps_re = re.compile(ANDROID_DEPS_START + '.*' + ANDROID_DEPS_END, + re.DOTALL) + new_deps = deps_re.search(new_cr_content) + old_deps = deps_re.search(deps_content) + if not new_deps or not old_deps: + faulty = 'Chromium' if not new_deps else 'WebRTC' + raise RollError('Was expecting to find "%s" and "%s"\n' + 'in %s DEPS' % + (ANDROID_DEPS_START, ANDROID_DEPS_END, faulty)) + deps_content = deps_re.sub(new_deps.group(0), deps_content) + + with open(deps_filename, 'wb') as deps_file: + deps_file.write(deps_content) + + # Update each individual DEPS entry. + for dep in changed_deps: + local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path) + if not os.path.isdir(local_dep_dir): + raise RollError( + 'Cannot find local directory %s. Either run\n' + 'gclient sync --deps=all\n' + 'or make sure the .gclient file for your solution contains all ' + 'platforms in the target_os list, i.e.\n' + 'target_os = ["android", "unix", "mac", "ios", "win"];\n' + 'Then run "gclient sync" again.' % local_dep_dir) + if isinstance(dep, ChangedCipdPackage): + package = dep.package.format() # Eliminate double curly brackets + update = '%s:%s@%s' % (dep.path, package, dep.new_version) + else: + update = '%s@%s' % (dep.path, dep.new_rev) + _RunCommand(['gclient', 'setdep', '--revision', update], + working_dir=CHECKOUT_SRC_DIR) def _IsTreeClean(): - stdout, _ = _RunCommand(['git', 'status', '--porcelain']) - if len(stdout) == 0: - return True + stdout, _ = _RunCommand(['git', 'status', '--porcelain']) + if len(stdout) == 0: + return True - logging.error('Dirty/unversioned files:\n%s', stdout) - return False + logging.error('Dirty/unversioned files:\n%s', stdout) + return False -def _EnsureUpdatedMasterBranch(dry_run): - current_branch = _RunCommand( - ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0] - if current_branch != 'master': - logging.error('Please checkout the master branch and re-run this script.') - if not dry_run: - sys.exit(-1) +def _EnsureUpdatedMainBranch(dry_run): + current_branch = _RunCommand(['git', 'rev-parse', '--abbrev-ref', + 'HEAD'])[0].splitlines()[0] + if current_branch != 'main': + logging.error( + 'Please checkout the main branch and re-run this script.') + if not dry_run: + sys.exit(-1) - logging.info('Updating master branch...') - _RunCommand(['git', 'pull']) + logging.info('Updating main branch...') + _RunCommand(['git', 'pull']) def _CreateRollBranch(dry_run): - logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME) - if not dry_run: - _RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME]) + logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME) + if not dry_run: + _RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME]) def _RemovePreviousRollBranch(dry_run): - active_branch, branches = _GetBranches() - if active_branch == ROLL_BRANCH_NAME: - active_branch = 'master' - if ROLL_BRANCH_NAME in branches: - logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME) - if not dry_run: - _RunCommand(['git', 'checkout', active_branch]) - _RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME]) + active_branch, branches = _GetBranches() + if active_branch == ROLL_BRANCH_NAME: + active_branch = 'main' + if ROLL_BRANCH_NAME in branches: + logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME) + if not dry_run: + _RunCommand(['git', 'checkout', active_branch]) + _RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME]) def _LocalCommit(commit_msg, dry_run): - logging.info('Committing changes locally.') - if not dry_run: - _RunCommand(['git', 'add', '--update', '.']) - _RunCommand(['git', 'commit', '-m', commit_msg]) + logging.info('Committing changes locally.') + if not dry_run: + _RunCommand(['git', 'add', '--update', '.']) + _RunCommand(['git', 'commit', '-m', commit_msg]) def ChooseCQMode(skip_cq, cq_over, current_commit_pos, new_commit_pos): - if skip_cq: - return 0 - if (new_commit_pos - current_commit_pos) < cq_over: - return 1 - return 2 + if skip_cq: + return 0 + if (new_commit_pos - current_commit_pos) < cq_over: + return 1 + return 2 def _UploadCL(commit_queue_mode): - """Upload the committed changes as a changelist to Gerrit. + """Upload the committed changes as a changelist to Gerrit. commit_queue_mode: - 2: Submit to commit queue. - 1: Run trybots but do not submit to CQ. - 0: Skip CQ, upload only. """ - cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks', '--send-mail'] - cmd.extend(['--cc', NOTIFY_EMAIL]) - if commit_queue_mode >= 2: - logging.info('Sending the CL to the CQ...') - cmd.extend(['--use-commit-queue']) - elif commit_queue_mode >= 1: - logging.info('Starting CQ dry run...') - cmd.extend(['--cq-dry-run']) - extra_env = { - 'EDITOR': 'true', - 'SKIP_GCE_AUTH_FOR_GIT': '1', - } - stdout, stderr = _RunCommand(cmd, extra_env=extra_env) - logging.debug('Output from "git cl upload":\nstdout:\n%s\n\nstderr:\n%s', - stdout, stderr) + cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks'] + if commit_queue_mode >= 2: + logging.info('Sending the CL to the CQ...') + cmd.extend(['--use-commit-queue']) + cmd.extend(['--send-mail', '--cc', NOTIFY_EMAIL]) + elif commit_queue_mode >= 1: + logging.info('Starting CQ dry run...') + cmd.extend(['--cq-dry-run']) + extra_env = { + 'EDITOR': 'true', + 'SKIP_GCE_AUTH_FOR_GIT': '1', + } + stdout, stderr = _RunCommand(cmd, extra_env=extra_env) + logging.debug('Output from "git cl upload":\nstdout:\n%s\n\nstderr:\n%s', + stdout, stderr) def GetRollRevisionRanges(opts, webrtc_deps): - current_cr_rev = webrtc_deps['vars']['chromium_revision'] - new_cr_rev = opts.revision - if not new_cr_rev: - stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD']) - head_rev = stdout.strip().split('\t')[0] - logging.info('No revision specified. Using HEAD: %s', head_rev) - new_cr_rev = head_rev + current_cr_rev = webrtc_deps['vars']['chromium_revision'] + new_cr_rev = opts.revision + if not new_cr_rev: + stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD']) + head_rev = stdout.strip().split('\t')[0] + logging.info('No revision specified. Using HEAD: %s', head_rev) + new_cr_rev = head_rev - return ChromiumRevisionUpdate(current_cr_rev, new_cr_rev) + return ChromiumRevisionUpdate(current_cr_rev, new_cr_rev) def main(): - p = argparse.ArgumentParser() - p.add_argument('--clean', action='store_true', default=False, - help='Removes any previous local roll branch.') - p.add_argument('-r', '--revision', - help=('Chromium Git revision to roll to. Defaults to the ' - 'Chromium HEAD revision if omitted.')) - p.add_argument('--dry-run', action='store_true', default=False, - help=('Calculate changes and modify DEPS, but don\'t create ' - 'any local branch, commit, upload CL or send any ' - 'tryjobs.')) - p.add_argument('-i', '--ignore-unclean-workdir', action='store_true', - default=False, - help=('Ignore if the current branch is not master or if there ' - 'are uncommitted changes (default: %(default)s).')) - grp = p.add_mutually_exclusive_group() - grp.add_argument('--skip-cq', action='store_true', default=False, - help='Skip sending the CL to the CQ (default: %(default)s)') - grp.add_argument('--cq-over', type=int, default=1, - help=('Commit queue dry run if the revision difference ' - 'is below this number (default: %(default)s)')) - p.add_argument('-v', '--verbose', action='store_true', default=False, - help='Be extra verbose in printing of log messages.') - opts = p.parse_args() - - if opts.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - if not opts.ignore_unclean_workdir and not _IsTreeClean(): - logging.error('Please clean your local checkout first.') - return 1 - - if opts.clean: - _RemovePreviousRollBranch(opts.dry_run) - - if not opts.ignore_unclean_workdir: - _EnsureUpdatedMasterBranch(opts.dry_run) - - deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS') - webrtc_deps = ParseLocalDepsFile(deps_filename) - - rev_update = GetRollRevisionRanges(opts, webrtc_deps) - - current_commit_pos = ParseCommitPosition( - ReadRemoteCrCommit(rev_update.current_chromium_rev)) - new_commit_pos = ParseCommitPosition( - ReadRemoteCrCommit(rev_update.new_chromium_rev)) - - new_cr_content = ReadRemoteCrFile('DEPS', rev_update.new_chromium_rev) - new_cr_deps = ParseDepsDict(new_cr_content) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - # Discard other deps, assumed to be chromium-only dependencies. - new_generated_android_deps, _ = FindAddedDeps(webrtc_deps, new_cr_deps) - removed_generated_android_deps, other_deps = FindRemovedDeps(webrtc_deps, - new_cr_deps) - if other_deps: - raise RollError('WebRTC DEPS entries are missing from Chromium: %s.\n' - 'Remove them or add them to either ' - 'WEBRTC_ONLY_DEPS or DONT_AUTOROLL_THESE.' % other_deps) - clang_change = CalculateChangedClang(rev_update.new_chromium_rev) - commit_msg = GenerateCommitMessage( - rev_update, current_commit_pos, new_commit_pos, changed_deps, - added_deps_paths=new_generated_android_deps, - removed_deps_paths=removed_generated_android_deps, - clang_change=clang_change) - logging.debug('Commit message:\n%s', commit_msg) - - _CreateRollBranch(opts.dry_run) - if not opts.dry_run: - UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content) - if _IsTreeClean(): - logging.info("No DEPS changes detected, skipping CL creation.") - else: - _LocalCommit(commit_msg, opts.dry_run) - commit_queue_mode = ChooseCQMode(opts.skip_cq, opts.cq_over, - current_commit_pos, new_commit_pos) - logging.info('Uploading CL...') + p = argparse.ArgumentParser() + p.add_argument('--clean', + action='store_true', + default=False, + help='Removes any previous local roll branch.') + p.add_argument('-r', + '--revision', + help=('Chromium Git revision to roll to. Defaults to the ' + 'Chromium HEAD revision if omitted.')) + p.add_argument( + '--dry-run', + action='store_true', + default=False, + help=('Calculate changes and modify DEPS, but don\'t create ' + 'any local branch, commit, upload CL or send any ' + 'tryjobs.')) + p.add_argument( + '-i', + '--ignore-unclean-workdir', + action='store_true', + default=False, + help=('Ignore if the current branch is not main or if there ' + 'are uncommitted changes (default: %(default)s).')) + grp = p.add_mutually_exclusive_group() + grp.add_argument( + '--skip-cq', + action='store_true', + default=False, + help='Skip sending the CL to the CQ (default: %(default)s)') + grp.add_argument('--cq-over', + type=int, + default=1, + help=('Commit queue dry run if the revision difference ' + 'is below this number (default: %(default)s)')) + p.add_argument('-v', + '--verbose', + action='store_true', + default=False, + help='Be extra verbose in printing of log messages.') + opts = p.parse_args() + + if opts.verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + if not opts.ignore_unclean_workdir and not _IsTreeClean(): + logging.error('Please clean your local checkout first.') + return 1 + + if opts.clean: + _RemovePreviousRollBranch(opts.dry_run) + + if not opts.ignore_unclean_workdir: + _EnsureUpdatedMainBranch(opts.dry_run) + + deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS') + webrtc_deps = ParseLocalDepsFile(deps_filename) + + rev_update = GetRollRevisionRanges(opts, webrtc_deps) + + current_commit_pos = ParseCommitPosition( + ReadRemoteCrCommit(rev_update.current_chromium_rev)) + new_commit_pos = ParseCommitPosition( + ReadRemoteCrCommit(rev_update.new_chromium_rev)) + + new_cr_content = ReadRemoteCrFile('DEPS', rev_update.new_chromium_rev) + new_cr_deps = ParseDepsDict(new_cr_content) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + # Discard other deps, assumed to be chromium-only dependencies. + new_generated_android_deps, _ = FindAddedDeps(webrtc_deps, new_cr_deps) + removed_generated_android_deps, other_deps = FindRemovedDeps( + webrtc_deps, new_cr_deps) + if other_deps: + raise RollError('WebRTC DEPS entries are missing from Chromium: %s.\n' + 'Remove them or add them to either ' + 'WEBRTC_ONLY_DEPS or DONT_AUTOROLL_THESE.' % + other_deps) + clang_change = CalculateChangedClang(rev_update.new_chromium_rev) + commit_msg = GenerateCommitMessage( + rev_update, + current_commit_pos, + new_commit_pos, + changed_deps, + added_deps_paths=new_generated_android_deps, + removed_deps_paths=removed_generated_android_deps, + clang_change=clang_change) + logging.debug('Commit message:\n%s', commit_msg) + + _CreateRollBranch(opts.dry_run) if not opts.dry_run: - _UploadCL(commit_queue_mode) - return 0 + UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content) + if _IsTreeClean(): + logging.info("No DEPS changes detected, skipping CL creation.") + else: + _LocalCommit(commit_msg, opts.dry_run) + commit_queue_mode = ChooseCQMode(opts.skip_cq, opts.cq_over, + current_commit_pos, new_commit_pos) + logging.info('Uploading CL...') + if not opts.dry_run: + _UploadCL(commit_queue_mode) + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/autoroller/unittests/roll_deps_test.py b/tools_webrtc/autoroller/unittests/roll_deps_test.py index 8d68bddc35..8f1e732ca4 100755 --- a/tools_webrtc/autoroller/unittests/roll_deps_test.py +++ b/tools_webrtc/autoroller/unittests/roll_deps_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython # Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -14,7 +14,6 @@ import tempfile import unittest - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARENT_DIR = os.path.join(SCRIPT_DIR, os.pardir) sys.path.append(PARENT_DIR) @@ -24,20 +23,18 @@ GetMatchingDepsEntries, ParseDepsDict, ParseLocalDepsFile, UpdateDepsFile, \ ChromiumRevisionUpdate -SRC_DIR = os.path.join(PARENT_DIR, os.pardir, os.pardir) -sys.path.append(os.path.join(SRC_DIR, 'third_party', 'pymock')) import mock TEST_DATA_VARS = { - 'chromium_git': 'https://chromium.googlesource.com', - 'chromium_revision': '1b9c098a08e40114e44b6c1ec33ddf95c40b901d', + 'chromium_git': 'https://chromium.googlesource.com', + 'chromium_revision': '1b9c098a08e40114e44b6c1ec33ddf95c40b901d', } DEPS_ENTRIES = { - 'src/build': 'https://build.com', - 'src/third_party/depot_tools': 'https://depottools.com', - 'src/testing/gtest': 'https://gtest.com', - 'src/testing/gmock': 'https://gmock.com', + 'src/build': 'https://build.com', + 'src/third_party/depot_tools': 'https://depottools.com', + 'src/testing/gtest': 'https://gtest.com', + 'src/testing/gmock': 'https://gmock.com', } BUILD_OLD_REV = '52f7afeca991d96d68cf0507e20dbdd5b845691f' @@ -49,291 +46,298 @@ class TestError(Exception): - pass + pass class FakeCmd(object): - def __init__(self): - self.expectations = [] - - def AddExpectation(self, *args, **kwargs): - returns = kwargs.pop('_returns', None) - ignores = kwargs.pop('_ignores', []) - self.expectations.append((args, kwargs, returns, ignores)) - - def __call__(self, *args, **kwargs): - if not self.expectations: - raise TestError('Got unexpected\n%s\n%s' % (args, kwargs)) - exp_args, exp_kwargs, exp_returns, ignores = self.expectations.pop(0) - for item in ignores: - kwargs.pop(item, None) - if args != exp_args or kwargs != exp_kwargs: - message = 'Expected:\n args: %s\n kwargs: %s\n' % (exp_args, exp_kwargs) - message += 'Got:\n args: %s\n kwargs: %s\n' % (args, kwargs) - raise TestError(message) - return exp_returns + def __init__(self): + self.expectations = [] + + def AddExpectation(self, *args, **kwargs): + returns = kwargs.pop('_returns', None) + ignores = kwargs.pop('_ignores', []) + self.expectations.append((args, kwargs, returns, ignores)) + + def __call__(self, *args, **kwargs): + if not self.expectations: + raise TestError('Got unexpected\n%s\n%s' % (args, kwargs)) + exp_args, exp_kwargs, exp_returns, ignores = self.expectations.pop(0) + for item in ignores: + kwargs.pop(item, None) + if args != exp_args or kwargs != exp_kwargs: + message = 'Expected:\n args: %s\n kwargs: %s\n' % (exp_args, + exp_kwargs) + message += 'Got:\n args: %s\n kwargs: %s\n' % (args, kwargs) + raise TestError(message) + return exp_returns class NullCmd(object): - """No-op mock when calls mustn't be checked. """ + """No-op mock when calls mustn't be checked. """ - def __call__(self, *args, **kwargs): - # Empty stdout and stderr. - return None, None + def __call__(self, *args, **kwargs): + # Empty stdout and stderr. + return None, None class TestRollChromiumRevision(unittest.TestCase): - def setUp(self): - self._output_dir = tempfile.mkdtemp() - test_data_dir = os.path.join(SCRIPT_DIR, 'testdata', 'roll_deps') - for test_file in glob.glob(os.path.join(test_data_dir, '*')): - shutil.copy(test_file, self._output_dir) - join = lambda f: os.path.join(self._output_dir, f) - self._webrtc_depsfile = join('DEPS') - self._new_cr_depsfile = join('DEPS.chromium.new') - self._webrtc_depsfile_android = join('DEPS.with_android_deps') - self._new_cr_depsfile_android = join('DEPS.chromium.with_android_deps') - self.fake = FakeCmd() - - def tearDown(self): - shutil.rmtree(self._output_dir, ignore_errors=True) - self.assertEqual(self.fake.expectations, []) - - def testVarLookup(self): - local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} - lookup = roll_deps.VarLookup(local_scope) - self.assertEquals(lookup('foo'), 'bar') - - def testUpdateDepsFile(self): - new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' - current_rev = TEST_DATA_VARS['chromium_revision'] - - with open(self._new_cr_depsfile_android) as deps_file: - new_cr_contents = deps_file.read() - - UpdateDepsFile(self._webrtc_depsfile, - ChromiumRevisionUpdate(current_rev, new_rev), - [], - new_cr_contents) - with open(self._webrtc_depsfile) as deps_file: - deps_contents = deps_file.read() - self.assertTrue(new_rev in deps_contents, - 'Failed to find %s in\n%s' % (new_rev, deps_contents)) - - def _UpdateDepsSetup(self): - with open(self._webrtc_depsfile_android) as deps_file: - webrtc_contents = deps_file.read() - with open(self._new_cr_depsfile_android) as deps_file: - new_cr_contents = deps_file.read() - webrtc_deps = ParseDepsDict(webrtc_contents) - new_cr_deps = ParseDepsDict(new_cr_contents) - - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - with mock.patch('roll_deps._RunCommand', NullCmd()): - UpdateDepsFile(self._webrtc_depsfile_android, - NO_CHROMIUM_REVISION_UPDATE, - changed_deps, - new_cr_contents) - - with open(self._webrtc_depsfile_android) as deps_file: - updated_contents = deps_file.read() - - return webrtc_contents, updated_contents - - def testUpdateAndroidGeneratedDeps(self): - _, updated_contents = self._UpdateDepsSetup() - - changed = 'third_party/android_deps/libs/android_arch_core_common' - changed_version = '1.0.0-cr0' - self.assertTrue(changed in updated_contents) - self.assertTrue(changed_version in updated_contents) - - def testAddAndroidGeneratedDeps(self): - webrtc_contents, updated_contents = self._UpdateDepsSetup() - - added = 'third_party/android_deps/libs/android_arch_lifecycle_common' - self.assertFalse(added in webrtc_contents) - self.assertTrue(added in updated_contents) - - def testRemoveAndroidGeneratedDeps(self): - webrtc_contents, updated_contents = self._UpdateDepsSetup() - - removed = 'third_party/android_deps/libs/android_arch_lifecycle_runtime' - self.assertTrue(removed in webrtc_contents) - self.assertFalse(removed in updated_contents) - - def testParseDepsDict(self): - with open(self._webrtc_depsfile) as deps_file: - deps_contents = deps_file.read() - local_scope = ParseDepsDict(deps_contents) - vars_dict = local_scope['vars'] - - def AssertVar(variable_name): - self.assertEquals(vars_dict[variable_name], TEST_DATA_VARS[variable_name]) - AssertVar('chromium_git') - AssertVar('chromium_revision') - self.assertEquals(len(local_scope['deps']), 3) - self.assertEquals(len(local_scope['deps_os']), 1) - - def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): - entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') - self.assertEquals(len(entries), 1) - self.assertEquals(entries[0], DEPS_ENTRIES['src/testing/gtest']) - - def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): - entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') - self.assertEquals(len(entries), 2) - - def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): - entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') - self.assertEquals(len(entries), 1) - - - def testCalculateChangedDeps(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile) - with mock.patch('roll_deps._RunCommand', self.fake): - _SetupGitLsRemoteCall( - self.fake, 'https://chromium.googlesource.com/chromium/src/build', - BUILD_NEW_REV) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - - self.assertEquals(len(changed_deps), 3) - self.assertEquals(changed_deps[0].path, 'src/build') - self.assertEquals(changed_deps[0].current_rev, BUILD_OLD_REV) - self.assertEquals(changed_deps[0].new_rev, BUILD_NEW_REV) - - self.assertEquals(changed_deps[1].path, 'src/third_party/depot_tools') - self.assertEquals(changed_deps[1].current_rev, DEPOTTOOLS_OLD_REV) - self.assertEquals(changed_deps[1].new_rev, DEPOTTOOLS_NEW_REV) - - self.assertEquals(changed_deps[2].path, 'src/third_party/xstream') - self.assertEquals(changed_deps[2].package, 'chromium/third_party/xstream') - self.assertEquals(changed_deps[2].current_version, 'version:1.4.8-cr0') - self.assertEquals(changed_deps[2].new_version, 'version:1.10.0-cr0') - - def testWithDistinctDeps(self): - """Check CalculateChangedDeps still works when deps are added/removed. """ - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - self.assertEquals(len(changed_deps), 1) - self.assertEquals( - changed_deps[0].path, - 'src/third_party/android_deps/libs/android_arch_core_common') - self.assertEquals( - changed_deps[0].package, - 'chromium/third_party/android_deps/libs/android_arch_core_common') - self.assertEquals(changed_deps[0].current_version, 'version:0.9.0') - self.assertEquals(changed_deps[0].new_version, 'version:1.0.0-cr0') - - def testFindAddedDeps(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - added_android_paths, other_paths = FindAddedDeps(webrtc_deps, new_cr_deps) - self.assertEquals( - added_android_paths, - ['src/third_party/android_deps/libs/android_arch_lifecycle_common']) - self.assertEquals(other_paths, []) - - def testFindRemovedDeps(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - removed_android_paths, other_paths = FindRemovedDeps(webrtc_deps, - new_cr_deps) - self.assertEquals(removed_android_paths, - ['src/third_party/android_deps/libs/android_arch_lifecycle_runtime']) - self.assertEquals(other_paths, []) - - def testMissingDepsIsDetected(self): - """Check an error is reported when deps cannot be automatically removed.""" - # The situation at test is the following: - # * A WebRTC DEPS entry is missing from Chromium. - # * The dependency isn't an android_deps (those are supported). - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - _, other_paths = FindRemovedDeps(webrtc_deps, new_cr_deps) - self.assertEquals(other_paths, ['src/third_party/xstream', - 'src/third_party/depot_tools']) - - def testExpectedDepsIsNotReportedMissing(self): - """Some deps musn't be seen as missing, even if absent from Chromium.""" - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - removed_android_paths, other_paths = FindRemovedDeps(webrtc_deps, - new_cr_deps) - self.assertTrue('src/build' not in removed_android_paths) - self.assertTrue('src/build' not in other_paths) - - def _CommitMessageSetup(self): - webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) - new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) - - changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) - added_paths, _ = FindAddedDeps(webrtc_deps, new_cr_deps) - removed_paths, _ = FindRemovedDeps(webrtc_deps, new_cr_deps) - - current_commit_pos = 'cafe' - new_commit_pos = 'f00d' - - with mock.patch('roll_deps._RunCommand', self.fake): - # We don't really care, but it's needed to construct the message. - self.fake.AddExpectation(['git', 'config', 'user.email'], - _returns=('nobody@nowhere.no', None), - _ignores=['working_dir']) - - commit_msg = GenerateCommitMessage( - NO_CHROMIUM_REVISION_UPDATE, current_commit_pos, new_commit_pos, - changed_deps, added_paths, removed_paths) - - return [l.strip() for l in commit_msg.split('\n')] - - def testChangedDepsInCommitMessage(self): - commit_lines = self._CommitMessageSetup() - - changed = '* src/third_party/android_deps/libs/' \ - 'android_arch_core_common: version:0.9.0..version:1.0.0-cr0' - self.assertTrue(changed in commit_lines) - # Check it is in adequate section. - changed_line = commit_lines.index(changed) - self.assertTrue('Changed' in commit_lines[changed_line-1]) - - def testAddedDepsInCommitMessage(self): - commit_lines = self._CommitMessageSetup() - - added = '* src/third_party/android_deps/libs/' \ - 'android_arch_lifecycle_common' - self.assertTrue(added in commit_lines) - # Check it is in adequate section. - added_line = commit_lines.index(added) - self.assertTrue('Added' in commit_lines[added_line-1]) - - def testRemovedDepsInCommitMessage(self): - commit_lines = self._CommitMessageSetup() - - removed = '* src/third_party/android_deps/libs/' \ - 'android_arch_lifecycle_runtime' - self.assertTrue(removed in commit_lines) - # Check it is in adequate section. - removed_line = commit_lines.index(removed) - self.assertTrue('Removed' in commit_lines[removed_line-1]) + def setUp(self): + self._output_dir = tempfile.mkdtemp() + test_data_dir = os.path.join(SCRIPT_DIR, 'testdata', 'roll_deps') + for test_file in glob.glob(os.path.join(test_data_dir, '*')): + shutil.copy(test_file, self._output_dir) + join = lambda f: os.path.join(self._output_dir, f) + self._webrtc_depsfile = join('DEPS') + self._new_cr_depsfile = join('DEPS.chromium.new') + self._webrtc_depsfile_android = join('DEPS.with_android_deps') + self._new_cr_depsfile_android = join('DEPS.chromium.with_android_deps') + self.fake = FakeCmd() + + def tearDown(self): + shutil.rmtree(self._output_dir, ignore_errors=True) + self.assertEqual(self.fake.expectations, []) + + def testVarLookup(self): + local_scope = {'foo': 'wrong', 'vars': {'foo': 'bar'}} + lookup = roll_deps.VarLookup(local_scope) + self.assertEquals(lookup('foo'), 'bar') + + def testUpdateDepsFile(self): + new_rev = 'aaaaabbbbbcccccdddddeeeeefffff0000011111' + current_rev = TEST_DATA_VARS['chromium_revision'] + + with open(self._new_cr_depsfile_android) as deps_file: + new_cr_contents = deps_file.read() + + UpdateDepsFile(self._webrtc_depsfile, + ChromiumRevisionUpdate(current_rev, new_rev), [], + new_cr_contents) + with open(self._webrtc_depsfile) as deps_file: + deps_contents = deps_file.read() + self.assertTrue( + new_rev in deps_contents, + 'Failed to find %s in\n%s' % (new_rev, deps_contents)) + + def _UpdateDepsSetup(self): + with open(self._webrtc_depsfile_android) as deps_file: + webrtc_contents = deps_file.read() + with open(self._new_cr_depsfile_android) as deps_file: + new_cr_contents = deps_file.read() + webrtc_deps = ParseDepsDict(webrtc_contents) + new_cr_deps = ParseDepsDict(new_cr_contents) + + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + with mock.patch('roll_deps._RunCommand', NullCmd()): + UpdateDepsFile(self._webrtc_depsfile_android, + NO_CHROMIUM_REVISION_UPDATE, changed_deps, + new_cr_contents) + + with open(self._webrtc_depsfile_android) as deps_file: + updated_contents = deps_file.read() + + return webrtc_contents, updated_contents + + def testUpdateAndroidGeneratedDeps(self): + _, updated_contents = self._UpdateDepsSetup() + + changed = 'third_party/android_deps/libs/android_arch_core_common' + changed_version = '1.0.0-cr0' + self.assertTrue(changed in updated_contents) + self.assertTrue(changed_version in updated_contents) + + def testAddAndroidGeneratedDeps(self): + webrtc_contents, updated_contents = self._UpdateDepsSetup() + + added = 'third_party/android_deps/libs/android_arch_lifecycle_common' + self.assertFalse(added in webrtc_contents) + self.assertTrue(added in updated_contents) + + def testRemoveAndroidGeneratedDeps(self): + webrtc_contents, updated_contents = self._UpdateDepsSetup() + + removed = 'third_party/android_deps/libs/android_arch_lifecycle_runtime' + self.assertTrue(removed in webrtc_contents) + self.assertFalse(removed in updated_contents) + + def testParseDepsDict(self): + with open(self._webrtc_depsfile) as deps_file: + deps_contents = deps_file.read() + local_scope = ParseDepsDict(deps_contents) + vars_dict = local_scope['vars'] + + def AssertVar(variable_name): + self.assertEquals(vars_dict[variable_name], + TEST_DATA_VARS[variable_name]) + + AssertVar('chromium_git') + AssertVar('chromium_revision') + self.assertEquals(len(local_scope['deps']), 3) + self.assertEquals(len(local_scope['deps_os']), 1) + + def testGetMatchingDepsEntriesReturnsPathInSimpleCase(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing/gtest') + self.assertEquals(len(entries), 1) + self.assertEquals(entries[0], DEPS_ENTRIES['src/testing/gtest']) + + def testGetMatchingDepsEntriesHandlesSimilarStartingPaths(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/testing') + self.assertEquals(len(entries), 2) + + def testGetMatchingDepsEntriesHandlesTwoPathsWithIdenticalFirstParts(self): + entries = GetMatchingDepsEntries(DEPS_ENTRIES, 'src/build') + self.assertEquals(len(entries), 1) + + def testCalculateChangedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile) + with mock.patch('roll_deps._RunCommand', self.fake): + _SetupGitLsRemoteCall( + self.fake, + 'https://chromium.googlesource.com/chromium/src/build', + BUILD_NEW_REV) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + + self.assertEquals(len(changed_deps), 3) + self.assertEquals(changed_deps[0].path, 'src/build') + self.assertEquals(changed_deps[0].current_rev, BUILD_OLD_REV) + self.assertEquals(changed_deps[0].new_rev, BUILD_NEW_REV) + + self.assertEquals(changed_deps[1].path, 'src/third_party/depot_tools') + self.assertEquals(changed_deps[1].current_rev, DEPOTTOOLS_OLD_REV) + self.assertEquals(changed_deps[1].new_rev, DEPOTTOOLS_NEW_REV) + + self.assertEquals(changed_deps[2].path, 'src/third_party/xstream') + self.assertEquals(changed_deps[2].package, + 'chromium/third_party/xstream') + self.assertEquals(changed_deps[2].current_version, 'version:1.4.8-cr0') + self.assertEquals(changed_deps[2].new_version, 'version:1.10.0-cr0') + + def testWithDistinctDeps(self): + """Check CalculateChangedDeps still works when deps are added/removed. """ + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + self.assertEquals(len(changed_deps), 1) + self.assertEquals( + changed_deps[0].path, + 'src/third_party/android_deps/libs/android_arch_core_common') + self.assertEquals( + changed_deps[0].package, + 'chromium/third_party/android_deps/libs/android_arch_core_common') + self.assertEquals(changed_deps[0].current_version, 'version:0.9.0') + self.assertEquals(changed_deps[0].new_version, 'version:1.0.0-cr0') + + def testFindAddedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + added_android_paths, other_paths = FindAddedDeps( + webrtc_deps, new_cr_deps) + self.assertEquals(added_android_paths, [ + 'src/third_party/android_deps/libs/android_arch_lifecycle_common' + ]) + self.assertEquals(other_paths, []) + + def testFindRemovedDeps(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + removed_android_paths, other_paths = FindRemovedDeps( + webrtc_deps, new_cr_deps) + self.assertEquals(removed_android_paths, [ + 'src/third_party/android_deps/libs/android_arch_lifecycle_runtime' + ]) + self.assertEquals(other_paths, []) + + def testMissingDepsIsDetected(self): + """Check an error is reported when deps cannot be automatically removed.""" + # The situation at test is the following: + # * A WebRTC DEPS entry is missing from Chromium. + # * The dependency isn't an android_deps (those are supported). + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + _, other_paths = FindRemovedDeps(webrtc_deps, new_cr_deps) + self.assertEquals( + other_paths, + ['src/third_party/xstream', 'src/third_party/depot_tools']) + + def testExpectedDepsIsNotReportedMissing(self): + """Some deps musn't be seen as missing, even if absent from Chromium.""" + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + removed_android_paths, other_paths = FindRemovedDeps( + webrtc_deps, new_cr_deps) + self.assertTrue('src/build' not in removed_android_paths) + self.assertTrue('src/build' not in other_paths) + + def _CommitMessageSetup(self): + webrtc_deps = ParseLocalDepsFile(self._webrtc_depsfile_android) + new_cr_deps = ParseLocalDepsFile(self._new_cr_depsfile_android) + + changed_deps = CalculateChangedDeps(webrtc_deps, new_cr_deps) + added_paths, _ = FindAddedDeps(webrtc_deps, new_cr_deps) + removed_paths, _ = FindRemovedDeps(webrtc_deps, new_cr_deps) + + current_commit_pos = 'cafe' + new_commit_pos = 'f00d' + + with mock.patch('roll_deps._RunCommand', self.fake): + # We don't really care, but it's needed to construct the message. + self.fake.AddExpectation(['git', 'config', 'user.email'], + _returns=('nobody@nowhere.no', None), + _ignores=['working_dir']) + + commit_msg = GenerateCommitMessage(NO_CHROMIUM_REVISION_UPDATE, + current_commit_pos, + new_commit_pos, changed_deps, + added_paths, removed_paths) + + return [l.strip() for l in commit_msg.split('\n')] + + def testChangedDepsInCommitMessage(self): + commit_lines = self._CommitMessageSetup() + + changed = '* src/third_party/android_deps/libs/' \ + 'android_arch_core_common: version:0.9.0..version:1.0.0-cr0' + self.assertTrue(changed in commit_lines) + # Check it is in adequate section. + changed_line = commit_lines.index(changed) + self.assertTrue('Changed' in commit_lines[changed_line - 1]) + + def testAddedDepsInCommitMessage(self): + commit_lines = self._CommitMessageSetup() + + added = '* src/third_party/android_deps/libs/' \ + 'android_arch_lifecycle_common' + self.assertTrue(added in commit_lines) + # Check it is in adequate section. + added_line = commit_lines.index(added) + self.assertTrue('Added' in commit_lines[added_line - 1]) + + def testRemovedDepsInCommitMessage(self): + commit_lines = self._CommitMessageSetup() + + removed = '* src/third_party/android_deps/libs/' \ + 'android_arch_lifecycle_runtime' + self.assertTrue(removed in commit_lines) + # Check it is in adequate section. + removed_line = commit_lines.index(removed) + self.assertTrue('Removed' in commit_lines[removed_line - 1]) class TestChooseCQMode(unittest.TestCase): - def testSkip(self): - self.assertEquals(ChooseCQMode(True, 99, 500000, 500100), 0) + def testSkip(self): + self.assertEquals(ChooseCQMode(True, 99, 500000, 500100), 0) - def testDryRun(self): - self.assertEquals(ChooseCQMode(False, 101, 500000, 500100), 1) + def testDryRun(self): + self.assertEquals(ChooseCQMode(False, 101, 500000, 500100), 1) - def testSubmit(self): - self.assertEquals(ChooseCQMode(False, 100, 500000, 500100), 2) + def testSubmit(self): + self.assertEquals(ChooseCQMode(False, 100, 500000, 500100), 2) def _SetupGitLsRemoteCall(cmd_fake, url, revision): - cmd = ['git', 'ls-remote', url, revision] - cmd_fake.AddExpectation(cmd, _returns=(revision, None)) + cmd = ['git', 'ls-remote', url, revision] + cmd_fake.AddExpectation(cmd, _returns=(revision, None)) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/binary_version_check.py b/tools_webrtc/binary_version_check.py new file mode 100644 index 0000000000..cc5d96172c --- /dev/null +++ b/tools_webrtc/binary_version_check.py @@ -0,0 +1,34 @@ +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +import re +import subprocess +import sys + +WEBRTC_VERSION_RE = re.compile( + r'WebRTC source stamp [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}' +) + + +if __name__ == '__main__': + args = sys.argv + if len(args) != 2: + print('Usage: binary_version_test.py ') + exit(1) + filename = sys.argv[1] + output = subprocess.check_output(['strings', filename]) + strings_in_binary = output.decode('utf-8').splitlines() + for symbol in strings_in_binary: + if WEBRTC_VERSION_RE.match(symbol): + with open('webrtc_binary_version_check', 'w') as f: + f.write(symbol) + exit(0) + print('WebRTC source timestamp not found in "%s"' % filename) + print('Check why "kSourceTimestamp" from call/version.cc is not linked ' + '(or why it has been optimized away by the compiler/linker)') + exit(1) diff --git a/tools_webrtc/clang_tidy.py b/tools_webrtc/clang_tidy.py index 49a9427f09..bce2549aed 100755 --- a/tools_webrtc/clang_tidy.py +++ b/tools_webrtc/clang_tidy.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Invoke clang-tidy tool. Usage: clang_tidy.py file.cc [clang-tidy-args...] @@ -25,7 +24,6 @@ from presubmit_checks_lib.build_helpers import GetClangTidyPath, \ GetCompilationCommand - # We enable all checkers by default for investigation purpose. # This includes clang-analyzer-* checks. # Individual checkers can be disabled via command line options. @@ -34,63 +32,66 @@ def Process(filepath, args): - # Build directory is needed to gather compilation flags. - # Create a temporary one (instead of reusing an existing one) - # to keep the CLI simple and unencumbered. - out_dir = tempfile.mkdtemp('clang_tidy') - - try: - gn_args = [] # Use default build. - command = GetCompilationCommand(filepath, gn_args, out_dir) - - # Remove warning flags. They aren't needed and they cause trouble - # when clang-tidy doesn't match most recent clang. - # Same battle for -f (e.g. -fcomplete-member-pointers). - command = [arg for arg in command if not (arg.startswith('-W') or - arg.startswith('-f'))] - - # Path from build dir. - rel_path = os.path.relpath(os.path.abspath(filepath), out_dir) - - # Replace clang++ by clang-tidy - command[0:1] = [GetClangTidyPath(), - CHECKER_OPTION, - rel_path] + args + ['--'] # Separator for clang flags. - print "Running: %s" % ' '.join(command) - # Run from build dir so that relative paths are correct. - p = subprocess.Popen(command, cwd=out_dir, - stdout=sys.stdout, stderr=sys.stderr) - p.communicate() - return p.returncode - finally: - shutil.rmtree(out_dir, ignore_errors=True) + # Build directory is needed to gather compilation flags. + # Create a temporary one (instead of reusing an existing one) + # to keep the CLI simple and unencumbered. + out_dir = tempfile.mkdtemp('clang_tidy') + + try: + gn_args = [] # Use default build. + command = GetCompilationCommand(filepath, gn_args, out_dir) + + # Remove warning flags. They aren't needed and they cause trouble + # when clang-tidy doesn't match most recent clang. + # Same battle for -f (e.g. -fcomplete-member-pointers). + command = [ + arg for arg in command + if not (arg.startswith('-W') or arg.startswith('-f')) + ] + + # Path from build dir. + rel_path = os.path.relpath(os.path.abspath(filepath), out_dir) + + # Replace clang++ by clang-tidy + command[0:1] = [GetClangTidyPath(), CHECKER_OPTION, rel_path + ] + args + ['--'] # Separator for clang flags. + print "Running: %s" % ' '.join(command) + # Run from build dir so that relative paths are correct. + p = subprocess.Popen(command, + cwd=out_dir, + stdout=sys.stdout, + stderr=sys.stderr) + p.communicate() + return p.returncode + finally: + shutil.rmtree(out_dir, ignore_errors=True) def ValidateCC(filepath): - """We can only analyze .cc files. Provide explicit message about that.""" - if filepath.endswith('.cc'): - return filepath - msg = ('%s not supported.\n' - 'For now, we can only analyze translation units (.cc files).' % - filepath) - raise argparse.ArgumentTypeError(msg) + """We can only analyze .cc files. Provide explicit message about that.""" + if filepath.endswith('.cc'): + return filepath + msg = ('%s not supported.\n' + 'For now, we can only analyze translation units (.cc files).' % + filepath) + raise argparse.ArgumentTypeError(msg) def Main(): - description = ( - "Run clang-tidy on single cc file.\n" - "Use flags, defines and include paths as in default debug build.\n" - "WARNING, this is a POC version with rough edges.") - parser = argparse.ArgumentParser(description=description) - parser.add_argument('filepath', - help='Specifies the path of the .cc file to analyze.', - type=ValidateCC) - parser.add_argument('args', - nargs=argparse.REMAINDER, - help='Arguments passed to clang-tidy') - parsed_args = parser.parse_args() - return Process(parsed_args.filepath, parsed_args.args) + description = ( + "Run clang-tidy on single cc file.\n" + "Use flags, defines and include paths as in default debug build.\n" + "WARNING, this is a POC version with rough edges.") + parser = argparse.ArgumentParser(description=description) + parser.add_argument('filepath', + help='Specifies the path of the .cc file to analyze.', + type=ValidateCC) + parser.add_argument('args', + nargs=argparse.REMAINDER, + help='Arguments passed to clang-tidy') + parsed_args = parser.parse_args() + return Process(parsed_args.filepath, parsed_args.args) if __name__ == '__main__': - sys.exit(Main()) + sys.exit(Main()) diff --git a/tools_webrtc/coverage/generate_coverage_command.py b/tools_webrtc/coverage/generate_coverage_command.py index 856666816d..894731b8b7 100644 --- a/tools_webrtc/coverage/generate_coverage_command.py +++ b/tools_webrtc/coverage/generate_coverage_command.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Generates a command-line for coverage.py. Useful for manual coverage runs. Before running the generated command line, do this: @@ -17,39 +16,32 @@ import sys TESTS = [ - 'video_capture_tests', - 'webrtc_nonparallel_tests', - 'video_engine_tests', - 'tools_unittests', - 'test_support_unittests', - 'slow_tests', - 'system_wrappers_unittests', - 'rtc_unittests', - 'rtc_stats_unittests', - 'rtc_pc_unittests', - 'rtc_media_unittests', - 'peerconnection_unittests', - 'modules_unittests', - 'modules_tests', - 'low_bandwidth_audio_test', - 'common_video_unittests', - 'common_audio_unittests', - 'audio_decoder_unittests' + 'video_capture_tests', 'webrtc_nonparallel_tests', 'video_engine_tests', + 'tools_unittests', 'test_support_unittests', 'slow_tests', + 'system_wrappers_unittests', 'rtc_unittests', 'rtc_stats_unittests', + 'rtc_pc_unittests', 'rtc_media_unittests', 'peerconnection_unittests', + 'modules_unittests', 'modules_tests', 'low_bandwidth_audio_test', + 'common_video_unittests', 'common_audio_unittests', + 'audio_decoder_unittests' ] + def main(): - cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS + - ['-b out/coverage', '-o out/report'] + - ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + - ['-c \'out/coverage/%s\'' % t for t in TESTS]) + cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + TESTS + + ['-b out/coverage', '-o out/report'] + + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + + ['-c \'out/coverage/%s\'' % t for t in TESTS]) + + def WithXvfb(binary): + return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary) + + modules_unittests = 'out/coverage/modules_unittests' + cmd[cmd.index('-c \'%s\'' % + modules_unittests)] = WithXvfb(modules_unittests) - def WithXvfb(binary): - return '-c \'%s testing/xvfb.py %s\'' % (sys.executable, binary) - modules_unittests = 'out/coverage/modules_unittests' - cmd[cmd.index('-c \'%s\'' % modules_unittests)] = WithXvfb(modules_unittests) + print ' '.join(cmd) + return 0 - print ' '.join(cmd) - return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/coverage/generate_ios_coverage_command.py b/tools_webrtc/coverage/generate_ios_coverage_command.py index f81ee2c62b..e01a75c049 100644 --- a/tools_webrtc/coverage/generate_ios_coverage_command.py +++ b/tools_webrtc/coverage/generate_ios_coverage_command.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Generates command-line instructions to produce one-time iOS coverage using coverage.py. @@ -53,122 +52,115 @@ def _GetBinaryPath(command): DIRECTORY = 'out/coverage' TESTS = [ - 'audio_decoder_unittests', - 'common_audio_unittests', - 'common_video_unittests', - 'modules_tests', - 'modules_unittests', - 'rtc_media_unittests', - 'rtc_pc_unittests', - 'rtc_stats_unittests', - 'rtc_unittests', - 'slow_tests', - 'system_wrappers_unittests', - 'test_support_unittests', - 'tools_unittests', - 'video_capture_tests', - 'video_engine_tests', - 'webrtc_nonparallel_tests', + 'audio_decoder_unittests', + 'common_audio_unittests', + 'common_video_unittests', + 'modules_tests', + 'modules_unittests', + 'rtc_media_unittests', + 'rtc_pc_unittests', + 'rtc_stats_unittests', + 'rtc_unittests', + 'slow_tests', + 'system_wrappers_unittests', + 'test_support_unittests', + 'tools_unittests', + 'video_capture_tests', + 'video_engine_tests', + 'webrtc_nonparallel_tests', ] XC_TESTS = [ - 'apprtcmobile_tests', - 'sdk_framework_unittests', - 'sdk_unittests', + 'apprtcmobile_tests', + 'sdk_framework_unittests', + 'sdk_unittests', ] def FormatIossimTest(test_name, is_xctest=False): - args = ['%s/%s.app' % (DIRECTORY, test_name)] - if is_xctest: - args += ['%s/%s_module.xctest' % (DIRECTORY, test_name)] + args = ['%s/%s.app' % (DIRECTORY, test_name)] + if is_xctest: + args += ['%s/%s_module.xctest' % (DIRECTORY, test_name)] - return '-c \'%s/iossim %s\'' % (DIRECTORY, ' '.join(args)) + return '-c \'%s/iossim %s\'' % (DIRECTORY, ' '.join(args)) def GetGNArgs(is_simulator): - target_cpu = 'x64' if is_simulator else 'arm64' - return ([] + - ['target_os="ios"'] + - ['target_cpu="%s"' % target_cpu] + - ['use_clang_coverage=true'] + - ['is_component_build=false'] + - ['dcheck_always_on=true']) + target_cpu = 'x64' if is_simulator else 'arm64' + return ([] + ['target_os="ios"'] + ['target_cpu="%s"' % target_cpu] + + ['use_clang_coverage=true'] + ['is_component_build=false'] + + ['dcheck_always_on=true']) def GenerateIOSSimulatorCommand(): - gn_args_string = ' '.join(GetGNArgs(is_simulator=True)) - gn_cmd = ['gn', 'gen', DIRECTORY, '--args=\'%s\'' % gn_args_string] - - coverage_cmd = ( - [sys.executable, 'tools/code_coverage/coverage.py'] + - ["%s.app" % t for t in XC_TESTS + TESTS] + - ['-b %s' % DIRECTORY, '-o out/report'] + - ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + - [FormatIossimTest(t, is_xctest=True) for t in XC_TESTS] + - [FormatIossimTest(t, is_xctest=False) for t in TESTS] - ) - - print 'To get code coverage using iOS simulator just run following commands:' - print '' - print ' '.join(gn_cmd) - print '' - print ' '.join(coverage_cmd) - return 0 + gn_args_string = ' '.join(GetGNArgs(is_simulator=True)) + gn_cmd = ['gn', 'gen', DIRECTORY, '--args=\'%s\'' % gn_args_string] + + coverage_cmd = ([sys.executable, 'tools/code_coverage/coverage.py'] + + ["%s.app" % t for t in XC_TESTS + TESTS] + + ['-b %s' % DIRECTORY, '-o out/report'] + + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] + + [FormatIossimTest(t, is_xctest=True) for t in XC_TESTS] + + [FormatIossimTest(t, is_xctest=False) for t in TESTS]) + + print 'To get code coverage using iOS simulator just run following commands:' + print '' + print ' '.join(gn_cmd) + print '' + print ' '.join(coverage_cmd) + return 0 def GenerateIOSDeviceCommand(): - gn_args_string = ' '.join(GetGNArgs(is_simulator=False)) - - coverage_report_cmd = ( - [sys.executable, 'tools/code_coverage/coverage.py'] + - ['%s.app' % t for t in TESTS] + - ['-b %s' % DIRECTORY] + - ['-o out/report'] + - ['-p %s/merged.profdata' % DIRECTORY] + - ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\''] - ) - - print 'Computing code coverage for real iOS device is a little bit tedious.' - print '' - print 'You will need:' - print '' - print '1. Generate xcode project and open it with Xcode 10+:' - print ' gn gen %s --ide=xcode --args=\'%s\'' % (DIRECTORY, gn_args_string) - print ' open %s/all.xcworkspace' % DIRECTORY - print '' - print '2. Execute these Run targets manually with Xcode Run button and ' - print 'manually save generated coverage.profraw file to %s:' % DIRECTORY - print '\n'.join('- %s' % t for t in TESTS) - print '' - print '3. Execute these Test targets manually with Xcode Test button and ' - print 'manually save generated coverage.profraw file to %s:' % DIRECTORY - print '\n'.join('- %s' % t for t in XC_TESTS) - print '' - print '4. Merge *.profraw files to *.profdata using llvm-profdata tool:' - print (' build/mac_files/Xcode.app/Contents/Developer/Toolchains/' + - 'XcodeDefault.xctoolchain/usr/bin/llvm-profdata merge ' + - '-o %s/merged.profdata ' % DIRECTORY + - '-sparse=true %s/*.profraw' % DIRECTORY) - print '' - print '5. Generate coverage report:' - print ' ' + ' '.join(coverage_report_cmd) - return 0 + gn_args_string = ' '.join(GetGNArgs(is_simulator=False)) + + coverage_report_cmd = ( + [sys.executable, 'tools/code_coverage/coverage.py'] + + ['%s.app' % t for t in TESTS] + ['-b %s' % DIRECTORY] + + ['-o out/report'] + ['-p %s/merged.profdata' % DIRECTORY] + + ['-i=\'.*/out/.*|.*/third_party/.*|.*test.*\'']) + + print 'Computing code coverage for real iOS device is a little bit tedious.' + print '' + print 'You will need:' + print '' + print '1. Generate xcode project and open it with Xcode 10+:' + print ' gn gen %s --ide=xcode --args=\'%s\'' % (DIRECTORY, gn_args_string) + print ' open %s/all.xcworkspace' % DIRECTORY + print '' + print '2. Execute these Run targets manually with Xcode Run button and ' + print 'manually save generated coverage.profraw file to %s:' % DIRECTORY + print '\n'.join('- %s' % t for t in TESTS) + print '' + print '3. Execute these Test targets manually with Xcode Test button and ' + print 'manually save generated coverage.profraw file to %s:' % DIRECTORY + print '\n'.join('- %s' % t for t in XC_TESTS) + print '' + print '4. Merge *.profraw files to *.profdata using llvm-profdata tool:' + print(' build/mac_files/Xcode.app/Contents/Developer/Toolchains/' + + 'XcodeDefault.xctoolchain/usr/bin/llvm-profdata merge ' + + '-o %s/merged.profdata ' % DIRECTORY + + '-sparse=true %s/*.profraw' % DIRECTORY) + print '' + print '5. Generate coverage report:' + print ' ' + ' '.join(coverage_report_cmd) + return 0 def Main(): - if len(sys.argv) < 2: - print 'Please specify type of coverage:' - print ' %s simulator' % sys.argv[0] - print ' %s device' % sys.argv[0] - elif sys.argv[1] == 'simulator': - GenerateIOSSimulatorCommand() - elif sys.argv[1] == 'device': - GenerateIOSDeviceCommand() - else: - print 'Unsupported type of coverage' - - return 0 + if len(sys.argv) < 2: + print 'Please specify type of coverage:' + print ' %s simulator' % sys.argv[0] + print ' %s device' % sys.argv[0] + elif sys.argv[1] == 'simulator': + GenerateIOSSimulatorCommand() + elif sys.argv[1] == 'device': + GenerateIOSDeviceCommand() + else: + print 'Unsupported type of coverage' + + return 0 + if __name__ == '__main__': - sys.exit(Main()) + sys.exit(Main()) diff --git a/tools_webrtc/cpu/cpu_mon.py b/tools_webrtc/cpu/cpu_mon.py index 128a5c0bf6..d89935aeab 100644 --- a/tools_webrtc/cpu/cpu_mon.py +++ b/tools_webrtc/cpu/cpu_mon.py @@ -8,7 +8,6 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - import psutil import sys @@ -17,67 +16,68 @@ class CpuSnapshot(object): - def __init__(self, label): - self.label = label - self.samples = [] + def __init__(self, label): + self.label = label + self.samples = [] - def Capture(self, sample_count): - print ('Capturing %d CPU samples for %s...' % - ((sample_count - len(self.samples)), self.label)) - while len(self.samples) < sample_count: - self.samples.append(psutil.cpu_percent(1.0, False)) + def Capture(self, sample_count): + print('Capturing %d CPU samples for %s...' % + ((sample_count - len(self.samples)), self.label)) + while len(self.samples) < sample_count: + self.samples.append(psutil.cpu_percent(1.0, False)) - def Text(self): - return ('%s: avg=%s, median=%s, min=%s, max=%s' % - (self.label, numpy.average(self.samples), - numpy.median(self.samples), - numpy.min(self.samples), numpy.max(self.samples))) + def Text(self): + return ('%s: avg=%s, median=%s, min=%s, max=%s' % + (self.label, numpy.average(self.samples), + numpy.median(self.samples), numpy.min( + self.samples), numpy.max(self.samples))) - def Max(self): - return numpy.max(self.samples) + def Max(self): + return numpy.max(self.samples) def GrabCpuSamples(sample_count): - print 'Label for snapshot (enter to quit): ' - label = raw_input().strip() - if len(label) == 0: - return None + print 'Label for snapshot (enter to quit): ' + label = raw_input().strip() + if len(label) == 0: + return None - snapshot = CpuSnapshot(label) - snapshot.Capture(sample_count) + snapshot = CpuSnapshot(label) + snapshot.Capture(sample_count) - return snapshot + return snapshot def main(): - print 'How many seconds to capture per snapshot (enter for 60)?' - sample_count = raw_input().strip() - if len(sample_count) > 0 and int(sample_count) > 0: - sample_count = int(sample_count) - else: - print 'Defaulting to 60 samples.' - sample_count = 60 + print 'How many seconds to capture per snapshot (enter for 60)?' + sample_count = raw_input().strip() + if len(sample_count) > 0 and int(sample_count) > 0: + sample_count = int(sample_count) + else: + print 'Defaulting to 60 samples.' + sample_count = 60 + + snapshots = [] + while True: + snapshot = GrabCpuSamples(sample_count) + if snapshot is None: + break + snapshots.append(snapshot) - snapshots = [] - while True: - snapshot = GrabCpuSamples(sample_count) - if snapshot is None: - break - snapshots.append(snapshot) + if len(snapshots) == 0: + print 'no samples captured' + return -1 - if len(snapshots) == 0: - print 'no samples captured' - return -1 + pyplot.title('CPU usage') - pyplot.title('CPU usage') + for s in snapshots: + pyplot.plot(s.samples, label=s.Text(), linewidth=2) - for s in snapshots: - pyplot.plot(s.samples, label=s.Text(), linewidth=2) + pyplot.legend() - pyplot.legend() + pyplot.show() + return 0 - pyplot.show() - return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/download_tools.py b/tools_webrtc/download_tools.py index 9554c8cd7a..62602dea59 100755 --- a/tools_webrtc/download_tools.py +++ b/tools_webrtc/download_tools.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Downloads precompiled tools. These are checked into the repository as SHA-1 hashes (see *.sha1 files in @@ -17,12 +16,10 @@ import os import sys - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir)) sys.path.append(os.path.join(SRC_DIR, 'build')) - import find_depot_tools find_depot_tools.add_depot_tools_to_path() import gclient_utils @@ -30,32 +27,34 @@ def main(directories): - if not directories: - directories = [SCRIPT_DIR] - - for path in directories: - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, - 'download_from_google_storage.py'), - '--directory', - '--num_threads=10', - '--bucket', 'chrome-webrtc-resources', - '--auto_platform', - '--recursive', - path, - ] - print 'Downloading precompiled tools...' - - # Perform download similar to how gclient hooks execute. - try: - gclient_utils.CheckCallAndFilter( - cmd, cwd=SRC_DIR, always_show_header=True) - except (gclient_utils.Error, subprocess2.CalledProcessError) as e: - print 'Error: %s' % str(e) - return 2 - return 0 + if not directories: + directories = [SCRIPT_DIR] + + for path in directories: + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, + 'download_from_google_storage.py'), + '--directory', + '--num_threads=10', + '--bucket', + 'chrome-webrtc-resources', + '--auto_platform', + '--recursive', + path, + ] + print 'Downloading precompiled tools...' + + # Perform download similar to how gclient hooks execute. + try: + gclient_utils.CheckCallAndFilter(cmd, + cwd=SRC_DIR, + always_show_header=True) + except (gclient_utils.Error, subprocess2.CalledProcessError) as e: + print 'Error: %s' % str(e) + return 2 + return 0 if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + sys.exit(main(sys.argv[1:])) diff --git a/tools_webrtc/ensure_webcam_is_running.py b/tools_webrtc/ensure_webcam_is_running.py index 20cf49e137..b68a484ea0 100755 --- a/tools_webrtc/ensure_webcam_is_running.py +++ b/tools_webrtc/ensure_webcam_is_running.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Checks if a virtual webcam is running and starts it if not. Returns a non-zero return code if the webcam could not be started. @@ -32,74 +31,73 @@ import subprocess import sys - WEBCAM_WIN = ('schtasks', '/run', '/tn', 'ManyCam') WEBCAM_MAC = ('open', '/Applications/ManyCam/ManyCam.app') def IsWebCamRunning(): - if sys.platform == 'win32': - process_name = 'ManyCam.exe' - elif sys.platform.startswith('darwin'): - process_name = 'ManyCam' - elif sys.platform.startswith('linux'): - # TODO(bugs.webrtc.org/9636): Currently a no-op on Linux: sw webcams no - # longer in use. - print 'Virtual webcam: no-op on Linux' - return True - else: - raise Exception('Unsupported platform: %s' % sys.platform) - for p in psutil.process_iter(): - try: - if process_name == p.name: - print 'Found a running virtual webcam (%s with PID %s)' % (p.name, - p.pid) - return True - except psutil.AccessDenied: - pass # This is normal if we query sys processes, etc. - return False - - -def StartWebCam(): - try: if sys.platform == 'win32': - subprocess.check_call(WEBCAM_WIN) - print 'Successfully launched virtual webcam.' + process_name = 'ManyCam.exe' elif sys.platform.startswith('darwin'): - subprocess.check_call(WEBCAM_MAC) - print 'Successfully launched virtual webcam.' + process_name = 'ManyCam' elif sys.platform.startswith('linux'): - # TODO(bugs.webrtc.org/9636): Currently a no-op on Linux: sw webcams no - # longer in use. - print 'Not implemented on Linux' - - except Exception as e: - print 'Failed to launch virtual webcam: %s' % e + # TODO(bugs.webrtc.org/9636): Currently a no-op on Linux: sw webcams no + # longer in use. + print 'Virtual webcam: no-op on Linux' + return True + else: + raise Exception('Unsupported platform: %s' % sys.platform) + for p in psutil.process_iter(): + try: + if process_name == p.name: + print 'Found a running virtual webcam (%s with PID %s)' % ( + p.name, p.pid) + return True + except psutil.AccessDenied: + pass # This is normal if we query sys processes, etc. return False - return True + +def StartWebCam(): + try: + if sys.platform == 'win32': + subprocess.check_call(WEBCAM_WIN) + print 'Successfully launched virtual webcam.' + elif sys.platform.startswith('darwin'): + subprocess.check_call(WEBCAM_MAC) + print 'Successfully launched virtual webcam.' + elif sys.platform.startswith('linux'): + # TODO(bugs.webrtc.org/9636): Currently a no-op on Linux: sw webcams no + # longer in use. + print 'Not implemented on Linux' + + except Exception as e: + print 'Failed to launch virtual webcam: %s' % e + return False + + return True def _ForcePythonInterpreter(cmd): - """Returns the fixed command line to call the right python executable.""" - out = cmd[:] - if out[0] == 'python': - out[0] = sys.executable - elif out[0].endswith('.py'): - out.insert(0, sys.executable) - return out + """Returns the fixed command line to call the right python executable.""" + out = cmd[:] + if out[0] == 'python': + out[0] = sys.executable + elif out[0].endswith('.py'): + out.insert(0, sys.executable) + return out def Main(argv): - if not IsWebCamRunning(): - if not StartWebCam(): - return 1 + if not IsWebCamRunning(): + if not StartWebCam(): + return 1 - if argv: - return subprocess.call(_ForcePythonInterpreter(argv)) - else: - return 0 + if argv: + return subprocess.call(_ForcePythonInterpreter(argv)) + else: + return 0 if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) + sys.exit(Main(sys.argv[1:])) diff --git a/tools_webrtc/executable_host_build.py b/tools_webrtc/executable_host_build.py index cc1e7ee59e..aac4be0b4b 100644 --- a/tools_webrtc/executable_host_build.py +++ b/tools_webrtc/executable_host_build.py @@ -55,7 +55,6 @@ import sys import tempfile - SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) SRC_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir)) sys.path.append(os.path.join(SRC_DIR, 'build')) @@ -63,39 +62,40 @@ def _ParseArgs(): - desc = 'Generates a GN executable targeting the host machine.' - parser = argparse.ArgumentParser(description=desc) - parser.add_argument('--executable_name', - required=True, - help='Name of the executable to build') - args = parser.parse_args() - return args + desc = 'Generates a GN executable targeting the host machine.' + parser = argparse.ArgumentParser(description=desc) + parser.add_argument('--executable_name', + required=True, + help='Name of the executable to build') + args = parser.parse_args() + return args @contextmanager def HostBuildDir(): - temp_dir = tempfile.mkdtemp() - try: - yield temp_dir - finally: - shutil.rmtree(temp_dir) + temp_dir = tempfile.mkdtemp() + try: + yield temp_dir + finally: + shutil.rmtree(temp_dir) def _RunCommand(argv, cwd=SRC_DIR, **kwargs): - with open(os.devnull, 'w') as devnull: - subprocess.check_call(argv, cwd=cwd, stdout=devnull, **kwargs) + with open(os.devnull, 'w') as devnull: + subprocess.check_call(argv, cwd=cwd, stdout=devnull, **kwargs) def DepotToolPath(*args): - return os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, *args) + return os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, *args) if __name__ == '__main__': - ARGS = _ParseArgs() - EXECUTABLE_TO_BUILD = ARGS.executable_name - EXECUTABLE_FINAL_NAME = ARGS.executable_name + '_host' - with HostBuildDir() as build_dir: - _RunCommand([sys.executable, DepotToolPath('gn.py'), 'gen', build_dir]) - _RunCommand([DepotToolPath('ninja'), '-C', build_dir, EXECUTABLE_TO_BUILD]) - shutil.copy(os.path.join(build_dir, EXECUTABLE_TO_BUILD), - EXECUTABLE_FINAL_NAME) + ARGS = _ParseArgs() + EXECUTABLE_TO_BUILD = ARGS.executable_name + EXECUTABLE_FINAL_NAME = ARGS.executable_name + '_host' + with HostBuildDir() as build_dir: + _RunCommand([sys.executable, DepotToolPath('gn.py'), 'gen', build_dir]) + _RunCommand( + [DepotToolPath('ninja'), '-C', build_dir, EXECUTABLE_TO_BUILD]) + shutil.copy(os.path.join(build_dir, EXECUTABLE_TO_BUILD), + EXECUTABLE_FINAL_NAME) diff --git a/tools_webrtc/flags_compatibility.py b/tools_webrtc/flags_compatibility.py old mode 100644 new mode 100755 index d3570a5254..c716574477 --- a/tools_webrtc/flags_compatibility.py +++ b/tools_webrtc/flags_compatibility.py @@ -15,30 +15,32 @@ def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--isolated-script-test-perf-output') - args, unrecognized_args = parser.parse_known_args() + parser = argparse.ArgumentParser() + parser.add_argument('--isolated-script-test-perf-output') + args, unrecognized_args = parser.parse_known_args() - test_command = _ForcePythonInterpreter(unrecognized_args) - if args.isolated_script_test_perf_output: - test_command += ['--isolated_script_test_perf_output=' + - args.isolated_script_test_perf_output] - logging.info('Running %r', test_command) + test_command = _ForcePythonInterpreter(unrecognized_args) + if args.isolated_script_test_perf_output: + test_command += [ + '--isolated_script_test_perf_output=' + + args.isolated_script_test_perf_output + ] + logging.info('Running %r', test_command) - return subprocess.call(test_command) + return subprocess.call(test_command) def _ForcePythonInterpreter(cmd): - """Returns the fixed command line to call the right python executable.""" - out = cmd[:] - if out[0] == 'python': - out[0] = sys.executable - elif out[0].endswith('.py'): - out.insert(0, sys.executable) - return out + """Returns the fixed command line to call the right python executable.""" + out = cmd[:] + if out[0] == 'python': + out[0] = sys.executable + elif out[0].endswith('.py'): + out.insert(0, sys.executable) + return out if __name__ == '__main__': - # pylint: disable=W0101 - logging.basicConfig(level=logging.INFO) - sys.exit(main()) + # pylint: disable=W0101 + logging.basicConfig(level=logging.INFO) + sys.exit(main()) diff --git a/tools_webrtc/get_landmines.py b/tools_webrtc/get_landmines.py index ba8ac9c1bf..764f053f2a 100755 --- a/tools_webrtc/get_landmines.py +++ b/tools_webrtc/get_landmines.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """ This file emits the list of reasons why a particular build needs to be clobbered (or a list of 'landmines'). @@ -20,46 +19,49 @@ sys.path.insert(0, os.path.join(CHECKOUT_ROOT, 'build')) import landmine_utils - host_os = landmine_utils.host_os # pylint: disable=invalid-name def print_landmines(): # pylint: disable=invalid-name - """ + """ ALL LANDMINES ARE EMITTED FROM HERE. """ - # DO NOT add landmines as part of a regular CL. Landmines are a last-effort - # bandaid fix if a CL that got landed has a build dependency bug and all bots - # need to be cleaned up. If you're writing a new CL that causes build - # dependency problems, fix the dependency problems instead of adding a - # landmine. - # See the Chromium version in src/build/get_landmines.py for usage examples. - print 'Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)' - if host_os() == 'win': - print 'Clobber to resolve some issues with corrupt .pdb files on bots.' - print 'Clobber due to corrupt .pdb files (after #14623)' - print 'Clobber due to Win 64-bit Debug linking error (crbug.com/668961)' - print ('Clobber due to Win Clang Debug linking errors in ' - 'https://codereview.webrtc.org/2786603002') - print ('Clobber due to Win Debug linking errors in ' - 'https://codereview.webrtc.org/2832063003/') - if host_os() == 'mac': - print 'Clobber due to iOS compile errors (crbug.com/694721)' - print 'Clobber to unblock https://codereview.webrtc.org/2709573003' - print ('Clobber to fix https://codereview.webrtc.org/2709573003 after ' - 'landing') - print ('Clobber to fix https://codereview.webrtc.org/2767383005 before' - 'landing (changing rtc_executable -> rtc_test on iOS)') - print ('Clobber to fix https://codereview.webrtc.org/2767383005 before' - 'landing (changing rtc_executable -> rtc_test on iOS)') - print 'Another landmine for low_bandwidth_audio_test (webrtc:7430)' - print 'Clobber to change neteq_rtpplay type to executable' + # DO NOT add landmines as part of a regular CL. Landmines are a last-effort + # bandaid fix if a CL that got landed has a build dependency bug and all bots + # need to be cleaned up. If you're writing a new CL that causes build + # dependency problems, fix the dependency problems instead of adding a + # landmine. + # See the Chromium version in src/build/get_landmines.py for usage examples. + print 'Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)' + if host_os() == 'win': + print 'Clobber to resolve some issues with corrupt .pdb files on bots.' + print 'Clobber due to corrupt .pdb files (after #14623)' + print 'Clobber due to Win 64-bit Debug linking error (crbug.com/668961)' + print('Clobber due to Win Clang Debug linking errors in ' + 'https://codereview.webrtc.org/2786603002') + print('Clobber due to Win Debug linking errors in ' + 'https://codereview.webrtc.org/2832063003/') + print 'Clobber win x86 bots (issues with isolated files).' + if host_os() == 'mac': + print 'Clobber due to iOS compile errors (crbug.com/694721)' + print 'Clobber to unblock https://codereview.webrtc.org/2709573003' + print('Clobber to fix https://codereview.webrtc.org/2709573003 after ' + 'landing') + print('Clobber to fix https://codereview.webrtc.org/2767383005 before' + 'landing (changing rtc_executable -> rtc_test on iOS)') + print('Clobber to fix https://codereview.webrtc.org/2767383005 before' + 'landing (changing rtc_executable -> rtc_test on iOS)') + print 'Another landmine for low_bandwidth_audio_test (webrtc:7430)' + print 'Clobber to change neteq_rtpplay type to executable' + print 'Clobber to remove .xctest files.' + print 'Clobber to remove .xctest files (take 2).' + print 'Switching rtc_executable to rtc_test' def main(): - print_landmines() - return 0 + print_landmines() + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/gn_check_autofix.py b/tools_webrtc/gn_check_autofix.py index 57ea0f6d7b..282dc4fc0f 100644 --- a/tools_webrtc/gn_check_autofix.py +++ b/tools_webrtc/gn_check_autofix.py @@ -7,7 +7,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """ This tool tries to fix (some) errors reported by `gn gen --check` or `gn check`. @@ -31,72 +30,78 @@ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) -CHROMIUM_DIRS = ['base', 'build', 'buildtools', - 'testing', 'third_party', 'tools'] +CHROMIUM_DIRS = [ + 'base', 'build', 'buildtools', 'testing', 'third_party', 'tools' +] TARGET_RE = re.compile( r'(?P\s*)\w*\("(?P\w*)"\) {$') + class TemporaryDirectory(object): - def __init__(self): - self._closed = False - self._name = None - self._name = tempfile.mkdtemp() + def __init__(self): + self._closed = False + self._name = None + self._name = tempfile.mkdtemp() - def __enter__(self): - return self._name + def __enter__(self): + return self._name - def __exit__(self, exc, value, _tb): - if self._name and not self._closed: - shutil.rmtree(self._name) - self._closed = True + def __exit__(self, exc, value, _tb): + if self._name and not self._closed: + shutil.rmtree(self._name) + self._closed = True def Run(cmd): - print 'Running:', ' '.join(cmd) - sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - return sub.communicate() + print 'Running:', ' '.join(cmd) + sub = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return sub.communicate() + def FixErrors(filename, missing_deps, deleted_sources): - with open(filename) as f: - lines = f.readlines() - - fixed_file = '' - indentation_level = None - for line in lines: - match = TARGET_RE.match(line) - if match: - target = match.group('target_name') - if target in missing_deps: - indentation_level = match.group('indentation_level') - elif indentation_level is not None: - match = re.match(indentation_level + '}$', line) - if match: - line = ('deps = [\n' + - ''.join(' "' + dep + '",\n' for dep in missing_deps[target]) + - ']\n') + line - indentation_level = None - elif line.strip().startswith('deps'): - is_empty_deps = line.strip() == 'deps = []' - line = 'deps = [\n' if is_empty_deps else line - line += ''.join(' "' + dep + '",\n' for dep in missing_deps[target]) - line += ']\n' if is_empty_deps else '' - indentation_level = None - - if line.strip() not in deleted_sources: - fixed_file += line - - with open(filename, 'w') as f: - f.write(fixed_file) - - Run(['gn', 'format', filename]) + with open(filename) as f: + lines = f.readlines() + + fixed_file = '' + indentation_level = None + for line in lines: + match = TARGET_RE.match(line) + if match: + target = match.group('target_name') + if target in missing_deps: + indentation_level = match.group('indentation_level') + elif indentation_level is not None: + match = re.match(indentation_level + '}$', line) + if match: + line = ('deps = [\n' + ''.join(' "' + dep + '",\n' + for dep in missing_deps[target]) + + ']\n') + line + indentation_level = None + elif line.strip().startswith('deps'): + is_empty_deps = line.strip() == 'deps = []' + line = 'deps = [\n' if is_empty_deps else line + line += ''.join(' "' + dep + '",\n' + for dep in missing_deps[target]) + line += ']\n' if is_empty_deps else '' + indentation_level = None + + if line.strip() not in deleted_sources: + fixed_file += line + + with open(filename, 'w') as f: + f.write(fixed_file) + + Run(['gn', 'format', filename]) + def FirstNonEmpty(iterable): - """Return first item which evaluates to True, or fallback to None.""" - return next((x for x in iterable if x), None) + """Return first item which evaluates to True, or fallback to None.""" + return next((x for x in iterable if x), None) + def Rebase(base_path, dependency_path, dependency): - """Adapt paths so they work both in stand-alone WebRTC and Chromium tree. + """Adapt paths so they work both in stand-alone WebRTC and Chromium tree. To cope with varying top-level directory (WebRTC VS Chromium), we use: * relative paths for WebRTC modules. @@ -113,77 +118,82 @@ def Rebase(base_path, dependency_path, dependency): Full target path (E.g. '../rtc_base/time:timestamp_extrapolator'). """ - root = FirstNonEmpty(dependency_path.split('/')) - if root in CHROMIUM_DIRS: - # Chromium paths must remain absolute. E.g. //third_party//abseil-cpp... - rebased = dependency_path - else: - base_path = base_path.split(os.path.sep) - dependency_path = dependency_path.split(os.path.sep) - - first_difference = None - shortest_length = min(len(dependency_path), len(base_path)) - for i in range(shortest_length): - if dependency_path[i] != base_path[i]: - first_difference = i - break - - first_difference = first_difference or shortest_length - base_path = base_path[first_difference:] - dependency_path = dependency_path[first_difference:] - rebased = os.path.sep.join((['..'] * len(base_path)) + dependency_path) - return rebased + ':' + dependency - -def main(): - deleted_sources = set() - errors_by_file = defaultdict(lambda: defaultdict(set)) - - with TemporaryDirectory() as tmp_dir: - mb_script_path = os.path.join(SCRIPT_DIR, 'mb', 'mb.py') - mb_config_file_path = os.path.join(SCRIPT_DIR, 'mb', 'mb_config.pyl') - mb_gen_command = ([ - mb_script_path, 'gen', - tmp_dir, - '--config-file', mb_config_file_path, - ] + sys.argv[1:]) - - mb_output = Run(mb_gen_command) - errors = mb_output[0].split('ERROR')[1:] - - if mb_output[1]: - print mb_output[1] - return 1 - - for error in errors: - error = error.splitlines() - target_msg = 'The target:' - if target_msg not in error: - target_msg = 'It is not in any dependency of' - if target_msg not in error: - print '\n'.join(error) - continue - index = error.index(target_msg) + 1 - path, target = error[index].strip().split(':') - if error[index+1] in ('is including a file from the target:', - 'The include file is in the target(s):'): - dep = error[index+2].strip() - dep_path, dep = dep.split(':') - dep = Rebase(path, dep_path, dep) - # Replacing /target:target with /target - dep = re.sub(r'/(\w+):(\1)$', r'/\1', dep) - path = os.path.join(path[2:], 'BUILD.gn') - errors_by_file[path][target].add(dep) - elif error[index+1] == 'has a source file:': - deleted_file = '"' + os.path.basename(error[index+2].strip()) + '",' - deleted_sources.add(deleted_file) + root = FirstNonEmpty(dependency_path.split('/')) + if root in CHROMIUM_DIRS: + # Chromium paths must remain absolute. E.g. //third_party//abseil-cpp... + rebased = dependency_path else: - print '\n'.join(error) - continue + base_path = base_path.split(os.path.sep) + dependency_path = dependency_path.split(os.path.sep) + + first_difference = None + shortest_length = min(len(dependency_path), len(base_path)) + for i in range(shortest_length): + if dependency_path[i] != base_path[i]: + first_difference = i + break + + first_difference = first_difference or shortest_length + base_path = base_path[first_difference:] + dependency_path = dependency_path[first_difference:] + rebased = os.path.sep.join((['..'] * len(base_path)) + dependency_path) + return rebased + ':' + dependency - for path, missing_deps in errors_by_file.items(): - FixErrors(path, missing_deps, deleted_sources) - return 0 +def main(): + deleted_sources = set() + errors_by_file = defaultdict(lambda: defaultdict(set)) + + with TemporaryDirectory() as tmp_dir: + mb_script_path = os.path.join(SCRIPT_DIR, 'mb', 'mb.py') + mb_config_file_path = os.path.join(SCRIPT_DIR, 'mb', 'mb_config.pyl') + mb_gen_command = ([ + mb_script_path, + 'gen', + tmp_dir, + '--config-file', + mb_config_file_path, + ] + sys.argv[1:]) + + mb_output = Run(mb_gen_command) + errors = mb_output[0].split('ERROR')[1:] + + if mb_output[1]: + print mb_output[1] + return 1 + + for error in errors: + error = error.splitlines() + target_msg = 'The target:' + if target_msg not in error: + target_msg = 'It is not in any dependency of' + if target_msg not in error: + print '\n'.join(error) + continue + index = error.index(target_msg) + 1 + path, target = error[index].strip().split(':') + if error[index + 1] in ('is including a file from the target:', + 'The include file is in the target(s):'): + dep = error[index + 2].strip() + dep_path, dep = dep.split(':') + dep = Rebase(path, dep_path, dep) + # Replacing /target:target with /target + dep = re.sub(r'/(\w+):(\1)$', r'/\1', dep) + path = os.path.join(path[2:], 'BUILD.gn') + errors_by_file[path][target].add(dep) + elif error[index + 1] == 'has a source file:': + deleted_file = '"' + os.path.basename( + error[index + 2].strip()) + '",' + deleted_sources.add(deleted_file) + else: + print '\n'.join(error) + continue + + for path, missing_deps in errors_by_file.items(): + FixErrors(path, missing_deps, deleted_sources) + + return 0 + if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/gtest-parallel-wrapper.py b/tools_webrtc/gtest-parallel-wrapper.py index fa37c5dd54..26b9afa0ad 100755 --- a/tools_webrtc/gtest-parallel-wrapper.py +++ b/tools_webrtc/gtest-parallel-wrapper.py @@ -75,165 +75,174 @@ import subprocess import sys - -Args = collections.namedtuple('Args', - ['gtest_parallel_args', 'test_env', 'output_dir', - 'test_artifacts_dir']) +Args = collections.namedtuple( + 'Args', + ['gtest_parallel_args', 'test_env', 'output_dir', 'test_artifacts_dir']) def _CatFiles(file_list, output_file): - with open(output_file, 'w') as output_file: - for filename in file_list: - with open(filename) as input_file: - output_file.write(input_file.read()) - os.remove(filename) + with open(output_file, 'w') as output_file: + for filename in file_list: + with open(filename) as input_file: + output_file.write(input_file.read()) + os.remove(filename) + def _ParseWorkersOption(workers): - """Interpret Nx syntax as N * cpu_count. Int value is left as is.""" - base = float(workers.rstrip('x')) - if workers.endswith('x'): - result = int(base * multiprocessing.cpu_count()) - else: - result = int(base) - return max(result, 1) # Sanitize when using e.g. '0.5x'. + """Interpret Nx syntax as N * cpu_count. Int value is left as is.""" + base = float(workers.rstrip('x')) + if workers.endswith('x'): + result = int(base * multiprocessing.cpu_count()) + else: + result = int(base) + return max(result, 1) # Sanitize when using e.g. '0.5x'. class ReconstructibleArgumentGroup(object): - """An argument group that can be converted back into a command line. + """An argument group that can be converted back into a command line. This acts like ArgumentParser.add_argument_group, but names of arguments added to it are also kept in a list, so that parsed options from ArgumentParser.parse_args can be reconstructed back into a command line (list of args) based on the list of wanted keys.""" - def __init__(self, parser, *args, **kwargs): - self._group = parser.add_argument_group(*args, **kwargs) - self._keys = [] - def AddArgument(self, *args, **kwargs): - arg = self._group.add_argument(*args, **kwargs) - self._keys.append(arg.dest) + def __init__(self, parser, *args, **kwargs): + self._group = parser.add_argument_group(*args, **kwargs) + self._keys = [] + + def AddArgument(self, *args, **kwargs): + arg = self._group.add_argument(*args, **kwargs) + self._keys.append(arg.dest) - def RemakeCommandLine(self, options): - result = [] - for key in self._keys: - value = getattr(options, key) - if value is True: - result.append('--%s' % key) - elif value is not None: - result.append('--%s=%s' % (key, value)) - return result + def RemakeCommandLine(self, options): + result = [] + for key in self._keys: + value = getattr(options, key) + if value is True: + result.append('--%s' % key) + elif value is not None: + result.append('--%s=%s' % (key, value)) + return result def ParseArgs(argv=None): - parser = argparse.ArgumentParser(argv) - - gtest_group = ReconstructibleArgumentGroup(parser, - 'Arguments to gtest-parallel') - # These options will be passed unchanged to gtest-parallel. - gtest_group.AddArgument('-d', '--output_dir') - gtest_group.AddArgument('-r', '--repeat') - gtest_group.AddArgument('--retry_failed') - gtest_group.AddArgument('--gtest_color') - gtest_group.AddArgument('--gtest_filter') - gtest_group.AddArgument('--gtest_also_run_disabled_tests', - action='store_true', default=None) - gtest_group.AddArgument('--timeout') - - # Syntax 'Nx' will be interpreted as N * number of cpu cores. - gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption) - - # Needed when the test wants to store test artifacts, because it doesn't know - # what will be the swarming output dir. - parser.add_argument('--store-test-artifacts', action='store_true') - - # No-sandbox is a Chromium-specific flag, ignore it. - # TODO(oprypin): Remove (bugs.webrtc.org/8115) - parser.add_argument('--no-sandbox', action='store_true', - help=argparse.SUPPRESS) - - parser.add_argument('executable') - parser.add_argument('executable_args', nargs='*') - - options, unrecognized_args = parser.parse_known_args(argv) - - args_to_pass = [] - for arg in unrecognized_args: - if arg.startswith('--isolated-script-test-perf-output'): - arg_split = arg.split('=') - assert len(arg_split) == 2, 'You must use the = syntax for this flag.' - args_to_pass.append('--isolated_script_test_perf_output=' + arg_split[1]) + parser = argparse.ArgumentParser(argv) + + gtest_group = ReconstructibleArgumentGroup(parser, + 'Arguments to gtest-parallel') + # These options will be passed unchanged to gtest-parallel. + gtest_group.AddArgument('-d', '--output_dir') + gtest_group.AddArgument('-r', '--repeat') + gtest_group.AddArgument('--retry_failed') + gtest_group.AddArgument('--gtest_color') + gtest_group.AddArgument('--gtest_filter') + gtest_group.AddArgument('--gtest_also_run_disabled_tests', + action='store_true', + default=None) + gtest_group.AddArgument('--timeout') + + # Syntax 'Nx' will be interpreted as N * number of cpu cores. + gtest_group.AddArgument('-w', '--workers', type=_ParseWorkersOption) + + # Needed when the test wants to store test artifacts, because it doesn't know + # what will be the swarming output dir. + parser.add_argument('--store-test-artifacts', action='store_true') + + # No-sandbox is a Chromium-specific flag, ignore it. + # TODO(oprypin): Remove (bugs.webrtc.org/8115) + parser.add_argument('--no-sandbox', + action='store_true', + help=argparse.SUPPRESS) + + parser.add_argument('executable') + parser.add_argument('executable_args', nargs='*') + + options, unrecognized_args = parser.parse_known_args(argv) + + args_to_pass = [] + for arg in unrecognized_args: + if arg.startswith('--isolated-script-test-perf-output'): + arg_split = arg.split('=') + assert len( + arg_split) == 2, 'You must use the = syntax for this flag.' + args_to_pass.append('--isolated_script_test_perf_output=' + + arg_split[1]) + else: + args_to_pass.append(arg) + + executable_args = options.executable_args + args_to_pass + + if options.store_test_artifacts: + assert options.output_dir, ( + '--output_dir must be specified for storing test artifacts.') + test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts') + + executable_args.insert(0, + '--test_artifacts_dir=%s' % test_artifacts_dir) else: - args_to_pass.append(arg) - - executable_args = options.executable_args + args_to_pass - - if options.store_test_artifacts: - assert options.output_dir, ( - '--output_dir must be specified for storing test artifacts.') - test_artifacts_dir = os.path.join(options.output_dir, 'test_artifacts') - - executable_args.insert(0, '--test_artifacts_dir=%s' % test_artifacts_dir) - else: - test_artifacts_dir = None + test_artifacts_dir = None - gtest_parallel_args = gtest_group.RemakeCommandLine(options) + gtest_parallel_args = gtest_group.RemakeCommandLine(options) - # GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS must be removed from the - # environment. Otherwise it will be picked up by the binary, causing a bug - # where only tests in the first shard are executed. - test_env = os.environ.copy() - gtest_shard_index = test_env.pop('GTEST_SHARD_INDEX', '0') - gtest_total_shards = test_env.pop('GTEST_TOTAL_SHARDS', '1') + # GTEST_SHARD_INDEX and GTEST_TOTAL_SHARDS must be removed from the + # environment. Otherwise it will be picked up by the binary, causing a bug + # where only tests in the first shard are executed. + test_env = os.environ.copy() + gtest_shard_index = test_env.pop('GTEST_SHARD_INDEX', '0') + gtest_total_shards = test_env.pop('GTEST_TOTAL_SHARDS', '1') - gtest_parallel_args.insert(0, '--shard_index=%s' % gtest_shard_index) - gtest_parallel_args.insert(1, '--shard_count=%s' % gtest_total_shards) + gtest_parallel_args.insert(0, '--shard_index=%s' % gtest_shard_index) + gtest_parallel_args.insert(1, '--shard_count=%s' % gtest_total_shards) - gtest_parallel_args.append(options.executable) - if executable_args: - gtest_parallel_args += ['--'] + executable_args + gtest_parallel_args.append(options.executable) + if executable_args: + gtest_parallel_args += ['--'] + executable_args - return Args(gtest_parallel_args, test_env, options.output_dir, - test_artifacts_dir) + return Args(gtest_parallel_args, test_env, options.output_dir, + test_artifacts_dir) def main(): - webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - gtest_parallel_path = os.path.join( - webrtc_root, 'third_party', 'gtest-parallel', 'gtest-parallel') + webrtc_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + gtest_parallel_path = os.path.join(webrtc_root, 'third_party', + 'gtest-parallel', 'gtest-parallel') - gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs() + gtest_parallel_args, test_env, output_dir, test_artifacts_dir = ParseArgs() - command = [ - sys.executable, - gtest_parallel_path, - ] + gtest_parallel_args + command = [ + sys.executable, + gtest_parallel_path, + ] + gtest_parallel_args - if output_dir and not os.path.isdir(output_dir): - os.makedirs(output_dir) - if test_artifacts_dir and not os.path.isdir(test_artifacts_dir): - os.makedirs(test_artifacts_dir) + if output_dir and not os.path.isdir(output_dir): + os.makedirs(output_dir) + if test_artifacts_dir and not os.path.isdir(test_artifacts_dir): + os.makedirs(test_artifacts_dir) - print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command) - sys.stdout.flush() + print 'gtest-parallel-wrapper: Executing command %s' % ' '.join(command) + sys.stdout.flush() - exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd()) + exit_code = subprocess.call(command, env=test_env, cwd=os.getcwd()) - if output_dir: - for test_status in 'passed', 'failed', 'interrupted': - logs_dir = os.path.join(output_dir, 'gtest-parallel-logs', test_status) - if not os.path.isdir(logs_dir): - continue - logs = [os.path.join(logs_dir, log) for log in os.listdir(logs_dir)] - log_file = os.path.join(output_dir, '%s-tests.log' % test_status) - _CatFiles(logs, log_file) - os.rmdir(logs_dir) + if output_dir: + for test_status in 'passed', 'failed', 'interrupted': + logs_dir = os.path.join(output_dir, 'gtest-parallel-logs', + test_status) + if not os.path.isdir(logs_dir): + continue + logs = [ + os.path.join(logs_dir, log) for log in os.listdir(logs_dir) + ] + log_file = os.path.join(output_dir, '%s-tests.log' % test_status) + _CatFiles(logs, log_file) + os.rmdir(logs_dir) - if test_artifacts_dir: - shutil.make_archive(test_artifacts_dir, 'zip', test_artifacts_dir) - shutil.rmtree(test_artifacts_dir) + if test_artifacts_dir: + shutil.make_archive(test_artifacts_dir, 'zip', test_artifacts_dir) + shutil.rmtree(test_artifacts_dir) - return exit_code + return exit_code if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/gtest_parallel_wrapper_test.py b/tools_webrtc/gtest_parallel_wrapper_test.py index 26135e1abb..82cb75bc6a 100755 --- a/tools_webrtc/gtest_parallel_wrapper_test.py +++ b/tools_webrtc/gtest_parallel_wrapper_test.py @@ -21,149 +21,152 @@ @contextmanager def TemporaryDirectory(): - tmp_dir = tempfile.mkdtemp() - yield tmp_dir - os.rmdir(tmp_dir) + tmp_dir = tempfile.mkdtemp() + yield tmp_dir + os.rmdir(tmp_dir) class GtestParallelWrapperHelpersTest(unittest.TestCase): + def testGetWorkersAsIs(self): + # pylint: disable=protected-access + self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('12'), 12) - def testGetWorkersAsIs(self): - # pylint: disable=protected-access - self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('12'), 12) + def testGetTwiceWorkers(self): + expected = 2 * multiprocessing.cpu_count() + # pylint: disable=protected-access + self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('2x'), + expected) - def testGetTwiceWorkers(self): - expected = 2 * multiprocessing.cpu_count() - # pylint: disable=protected-access - self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('2x'), expected) - - def testGetHalfWorkers(self): - expected = max(multiprocessing.cpu_count() // 2, 1) - # pylint: disable=protected-access - self.assertEqual( - gtest_parallel_wrapper._ParseWorkersOption('0.5x'), expected) + def testGetHalfWorkers(self): + expected = max(multiprocessing.cpu_count() // 2, 1) + # pylint: disable=protected-access + self.assertEqual(gtest_parallel_wrapper._ParseWorkersOption('0.5x'), + expected) class GtestParallelWrapperTest(unittest.TestCase): - - @classmethod - def _Expected(cls, gtest_parallel_args): - return ['--shard_index=0', '--shard_count=1'] + gtest_parallel_args - - def testOverwrite(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout=123', 'exec', '--timeout', '124']) - expected = self._Expected(['--timeout=124', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testMixing(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout=123', '--param1', 'exec', '--param2', '--timeout', '124']) - expected = self._Expected( - ['--timeout=124', 'exec', '--', '--param1', '--param2']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testMixingPositional(self): - result = gtest_parallel_wrapper.ParseArgs([ - '--timeout=123', 'exec', '--foo1', 'bar1', '--timeout', '124', '--foo2', - 'bar2' - ]) - expected = self._Expected( - ['--timeout=124', 'exec', '--', '--foo1', 'bar1', '--foo2', 'bar2']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testDoubleDash1(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout', '123', 'exec', '--', '--timeout', '124']) - expected = self._Expected( - ['--timeout=123', 'exec', '--', '--timeout', '124']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testDoubleDash2(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--timeout=123', '--', 'exec', '--timeout=124']) - expected = self._Expected(['--timeout=123', 'exec', '--', '--timeout=124']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testArtifacts(self): - with TemporaryDirectory() as tmp_dir: - output_dir = os.path.join(tmp_dir, 'foo') - result = gtest_parallel_wrapper.ParseArgs( - ['exec', '--store-test-artifacts', '--output_dir', output_dir]) - exp_artifacts_dir = os.path.join(output_dir, 'test_artifacts') - exp = self._Expected([ - '--output_dir=' + output_dir, 'exec', '--', - '--test_artifacts_dir=' + exp_artifacts_dir - ]) - self.assertEqual(result.gtest_parallel_args, exp) - self.assertEqual(result.output_dir, output_dir) - self.assertEqual(result.test_artifacts_dir, exp_artifacts_dir) - - def testNoDirsSpecified(self): - result = gtest_parallel_wrapper.ParseArgs(['exec']) - self.assertEqual(result.output_dir, None) - self.assertEqual(result.test_artifacts_dir, None) - - def testOutputDirSpecified(self): - result = gtest_parallel_wrapper.ParseArgs( - ['exec', '--output_dir', '/tmp/foo']) - self.assertEqual(result.output_dir, '/tmp/foo') - self.assertEqual(result.test_artifacts_dir, None) - - def testShortArg(self): - result = gtest_parallel_wrapper.ParseArgs(['-d', '/tmp/foo', 'exec']) - expected = self._Expected(['--output_dir=/tmp/foo', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) - self.assertEqual(result.output_dir, '/tmp/foo') - - def testBoolArg(self): - result = gtest_parallel_wrapper.ParseArgs( - ['--gtest_also_run_disabled_tests', 'exec']) - expected = self._Expected(['--gtest_also_run_disabled_tests', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testNoArgs(self): - result = gtest_parallel_wrapper.ParseArgs(['exec']) - expected = self._Expected(['exec']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testDocExample(self): - with TemporaryDirectory() as tmp_dir: - output_dir = os.path.join(tmp_dir, 'foo') - result = gtest_parallel_wrapper.ParseArgs([ - 'some_test', '--some_flag=some_value', '--another_flag', - '--output_dir=' + output_dir, '--store-test-artifacts', - '--isolated-script-test-perf-output=SOME_OTHER_DIR', '--foo=bar', - '--baz' - ]) - expected_artifacts_dir = os.path.join(output_dir, 'test_artifacts') - expected = self._Expected([ - '--output_dir=' + output_dir, - 'some_test', '--', '--test_artifacts_dir=' + expected_artifacts_dir, - '--some_flag=some_value', '--another_flag', - '--isolated_script_test_perf_output=SOME_OTHER_DIR', '--foo=bar', - '--baz' - ]) - self.assertEqual(result.gtest_parallel_args, expected) - - def testStandardWorkers(self): - """Check integer value is passed as-is.""" - result = gtest_parallel_wrapper.ParseArgs(['--workers', '17', 'exec']) - expected = self._Expected(['--workers=17', 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testTwoWorkersPerCpuCore(self): - result = gtest_parallel_wrapper.ParseArgs(['--workers', '2x', 'exec']) - workers = 2 * multiprocessing.cpu_count() - expected = self._Expected(['--workers=%s' % workers, 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) - - def testUseHalfTheCpuCores(self): - result = gtest_parallel_wrapper.ParseArgs(['--workers', '0.5x', 'exec']) - workers = max(multiprocessing.cpu_count() // 2, 1) - expected = self._Expected(['--workers=%s' % workers, 'exec']) - self.assertEqual(result.gtest_parallel_args, expected) + @classmethod + def _Expected(cls, gtest_parallel_args): + return ['--shard_index=0', '--shard_count=1'] + gtest_parallel_args + + def testOverwrite(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout=123', 'exec', '--timeout', '124']) + expected = self._Expected(['--timeout=124', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testMixing(self): + result = gtest_parallel_wrapper.ParseArgs([ + '--timeout=123', '--param1', 'exec', '--param2', '--timeout', '124' + ]) + expected = self._Expected( + ['--timeout=124', 'exec', '--', '--param1', '--param2']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testMixingPositional(self): + result = gtest_parallel_wrapper.ParseArgs([ + '--timeout=123', 'exec', '--foo1', 'bar1', '--timeout', '124', + '--foo2', 'bar2' + ]) + expected = self._Expected([ + '--timeout=124', 'exec', '--', '--foo1', 'bar1', '--foo2', 'bar2' + ]) + self.assertEqual(result.gtest_parallel_args, expected) + + def testDoubleDash1(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout', '123', 'exec', '--', '--timeout', '124']) + expected = self._Expected( + ['--timeout=123', 'exec', '--', '--timeout', '124']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testDoubleDash2(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--timeout=123', '--', 'exec', '--timeout=124']) + expected = self._Expected( + ['--timeout=123', 'exec', '--', '--timeout=124']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testArtifacts(self): + with TemporaryDirectory() as tmp_dir: + output_dir = os.path.join(tmp_dir, 'foo') + result = gtest_parallel_wrapper.ParseArgs( + ['exec', '--store-test-artifacts', '--output_dir', output_dir]) + exp_artifacts_dir = os.path.join(output_dir, 'test_artifacts') + exp = self._Expected([ + '--output_dir=' + output_dir, 'exec', '--', + '--test_artifacts_dir=' + exp_artifacts_dir + ]) + self.assertEqual(result.gtest_parallel_args, exp) + self.assertEqual(result.output_dir, output_dir) + self.assertEqual(result.test_artifacts_dir, exp_artifacts_dir) + + def testNoDirsSpecified(self): + result = gtest_parallel_wrapper.ParseArgs(['exec']) + self.assertEqual(result.output_dir, None) + self.assertEqual(result.test_artifacts_dir, None) + + def testOutputDirSpecified(self): + result = gtest_parallel_wrapper.ParseArgs( + ['exec', '--output_dir', '/tmp/foo']) + self.assertEqual(result.output_dir, '/tmp/foo') + self.assertEqual(result.test_artifacts_dir, None) + + def testShortArg(self): + result = gtest_parallel_wrapper.ParseArgs(['-d', '/tmp/foo', 'exec']) + expected = self._Expected(['--output_dir=/tmp/foo', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) + self.assertEqual(result.output_dir, '/tmp/foo') + + def testBoolArg(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--gtest_also_run_disabled_tests', 'exec']) + expected = self._Expected(['--gtest_also_run_disabled_tests', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testNoArgs(self): + result = gtest_parallel_wrapper.ParseArgs(['exec']) + expected = self._Expected(['exec']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testDocExample(self): + with TemporaryDirectory() as tmp_dir: + output_dir = os.path.join(tmp_dir, 'foo') + result = gtest_parallel_wrapper.ParseArgs([ + 'some_test', '--some_flag=some_value', '--another_flag', + '--output_dir=' + output_dir, '--store-test-artifacts', + '--isolated-script-test-perf-output=SOME_OTHER_DIR', + '--foo=bar', '--baz' + ]) + expected_artifacts_dir = os.path.join(output_dir, 'test_artifacts') + expected = self._Expected([ + '--output_dir=' + output_dir, 'some_test', '--', + '--test_artifacts_dir=' + expected_artifacts_dir, + '--some_flag=some_value', '--another_flag', + '--isolated_script_test_perf_output=SOME_OTHER_DIR', + '--foo=bar', '--baz' + ]) + self.assertEqual(result.gtest_parallel_args, expected) + + def testStandardWorkers(self): + """Check integer value is passed as-is.""" + result = gtest_parallel_wrapper.ParseArgs(['--workers', '17', 'exec']) + expected = self._Expected(['--workers=17', 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testTwoWorkersPerCpuCore(self): + result = gtest_parallel_wrapper.ParseArgs(['--workers', '2x', 'exec']) + workers = 2 * multiprocessing.cpu_count() + expected = self._Expected(['--workers=%s' % workers, 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) + + def testUseHalfTheCpuCores(self): + result = gtest_parallel_wrapper.ParseArgs( + ['--workers', '0.5x', 'exec']) + workers = max(multiprocessing.cpu_count() // 2, 1) + expected = self._Expected(['--workers=%s' % workers, 'exec']) + self.assertEqual(result.gtest_parallel_args, expected) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/ios/build_ios_libs.py b/tools_webrtc/ios/build_ios_libs.py index 538bbad2e8..937fadf716 100755 --- a/tools_webrtc/ios/build_ios_libs.py +++ b/tools_webrtc/ios/build_ios_libs.py @@ -7,8 +7,7 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - -"""WebRTC iOS FAT libraries build script. +"""WebRTC iOS XCFramework build script. Each architecture is compiled separately before being merged together. By default, the library is created in out_ios_libs/. (Change with -o.) """ @@ -21,7 +20,6 @@ import subprocess import sys - os.environ['PATH'] = '/usr/libexec' + os.pathsep + os.environ['PATH'] SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -32,7 +30,16 @@ SDK_OUTPUT_DIR = os.path.join(SRC_DIR, 'out_ios_libs') SDK_FRAMEWORK_NAME = 'WebRTC.framework' -DEFAULT_ARCHS = ENABLED_ARCHS = ['arm64', 'arm', 'x64', 'x86'] +SDK_DSYM_NAME = 'WebRTC.dSYM' +SDK_XCFRAMEWORK_NAME = 'WebRTC.xcframework' + +ENABLED_ARCHS = [ + 'device:arm64', 'device:arm', 'simulator:arm64', 'simulator:x64', 'simulator:x86', + 'arm64', 'x64' +] +DEFAULT_ARCHS = [ + 'device:arm64', 'device:arm', 'simulator:arm64', 'simulator:x64', 'simulator:x86' +] IOS_DEPLOYMENT_TARGET = '9.0' LIBVPX_BUILD_VP9 = True @@ -41,198 +48,292 @@ def _ParseArgs(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('--build_config', default='release', - choices=['debug', 'release'], - help='The build config. Can be "debug" or "release". ' - 'Defaults to "release".') - parser.add_argument('--arch', nargs='+', default=DEFAULT_ARCHS, - choices=ENABLED_ARCHS, - help='Architectures to build. Defaults to %(default)s.') - parser.add_argument('-c', '--clean', action='store_true', default=False, - help='Removes the previously generated build output, if any.') - parser.add_argument('-p', '--purify', action='store_true', default=False, - help='Purifies the previously generated build output by ' - 'removing the temporary results used when (re)building.') - parser.add_argument('-o', '--output-dir', default=SDK_OUTPUT_DIR, - help='Specifies a directory to output the build artifacts to. ' - 'If specified together with -c, deletes the dir.') - parser.add_argument('-r', '--revision', type=int, default=0, - help='Specifies a revision number to embed if building the framework.') - parser.add_argument('-e', '--bitcode', action='store_true', default=True, - help='Compile with bitcode.') - parser.add_argument('--verbose', action='store_true', default=False, - help='Debug logging.') - parser.add_argument('--use-goma', action='store_true', default=False, - help='Use goma to build.') - parser.add_argument('--extra-gn-args', default=[], nargs='*', - help='Additional GN args to be used during Ninja generation.') - - return parser.parse_args() + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('--build_config', + default='release', + choices=['debug', 'release'], + help='The build config. Can be "debug" or "release". ' + 'Defaults to "release".') + parser.add_argument( + '--arch', + nargs='+', + default=DEFAULT_ARCHS, + choices=ENABLED_ARCHS, + help='Architectures to build. Defaults to %(default)s.') + parser.add_argument( + '-c', + '--clean', + action='store_true', + default=False, + help='Removes the previously generated build output, if any.') + parser.add_argument( + '-p', + '--purify', + action='store_true', + default=False, + help='Purifies the previously generated build output by ' + 'removing the temporary results used when (re)building.') + parser.add_argument( + '-o', + '--output-dir', + type=os.path.abspath, + default=SDK_OUTPUT_DIR, + help='Specifies a directory to output the build artifacts to. ' + 'If specified together with -c, deletes the dir.') + parser.add_argument( + '-r', + '--revision', + type=int, + default=0, + help='Specifies a revision number to embed if building the framework.') + parser.add_argument('-e', + '--bitcode', + action='store_true', + default=True, + help='Compile with bitcode.') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + parser.add_argument('--use-goma', + action='store_true', + default=False, + help='Use goma to build.') + parser.add_argument( + '--extra-gn-args', + default=[], + nargs='*', + help='Additional GN args to be used during Ninja generation.') + + return parser.parse_args() def _RunCommand(cmd): - logging.debug('Running: %r', cmd) - subprocess.check_call(cmd, cwd=SRC_DIR) + logging.debug('Running: %r', cmd) + subprocess.check_call(cmd, cwd=SRC_DIR) def _CleanArtifacts(output_dir): - if os.path.isdir(output_dir): - logging.info('Deleting %s', output_dir) - shutil.rmtree(output_dir) + if os.path.isdir(output_dir): + logging.info('Deleting %s', output_dir) + shutil.rmtree(output_dir) def _CleanTemporary(output_dir, architectures): - if os.path.isdir(output_dir): - logging.info('Removing temporary build files.') - for arch in architectures: - arch_lib_path = os.path.join(output_dir, arch + '_libs') - if os.path.isdir(arch_lib_path): - shutil.rmtree(arch_lib_path) - - -def BuildWebRTC(output_dir, target_arch, flavor, gn_target_name, - ios_deployment_target, libvpx_build_vp9, use_bitcode, - use_goma, extra_gn_args): - output_dir = os.path.join(output_dir, target_arch + '_libs') - gn_args = ['target_os="ios"', 'ios_enable_code_signing=false', - 'use_xcode_clang=true', 'is_component_build=false'] - - # Add flavor option. - if flavor == 'debug': - gn_args.append('is_debug=true') - elif flavor == 'release': - gn_args.append('is_debug=false') - else: - raise ValueError('Unexpected flavor type: %s' % flavor) - - gn_args.append('target_cpu="%s"' % target_arch) - - gn_args.append('ios_deployment_target="%s"' % ios_deployment_target) - - gn_args.append('rtc_libvpx_build_vp9=' + - ('true' if libvpx_build_vp9 else 'false')) - - gn_args.append('enable_ios_bitcode=' + - ('true' if use_bitcode else 'false')) - gn_args.append('use_goma=' + ('true' if use_goma else 'false')) - - args_string = ' '.join(gn_args + extra_gn_args) - logging.info('Building WebRTC with args: %s', args_string) - - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), - 'gen', - output_dir, - '--args=' + args_string, - ] - _RunCommand(cmd) - logging.info('Building target: %s', gn_target_name) - - cmd = [ - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), - '-C', - output_dir, - gn_target_name, - ] - if use_goma: - cmd.extend(['-j', '200']) - _RunCommand(cmd) + if os.path.isdir(output_dir): + logging.info('Removing temporary build files.') + for arch in architectures: + arch_lib_path = os.path.join(output_dir, arch) + if os.path.isdir(arch_lib_path): + shutil.rmtree(arch_lib_path) -def main(): - args = _ParseArgs() - - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - if args.clean: - _CleanArtifacts(args.output_dir) - return 0 +def _ParseArchitecture(architectures): + result = dict() + for arch in architectures: + if ":" in arch: + target_environment, target_cpu = arch.split(":") + else: + logging.warning('The environment for build is not specified.') + logging.warning('It is assumed based on cpu type.') + logging.warning('See crbug.com/1138425 for more details.') + if arch == "x64": + target_environment = "simulator" + else: + target_environment = "device" + target_cpu = arch + archs = result.get(target_environment) + if archs is None: + result[target_environment] = {target_cpu} + else: + archs.add(target_cpu) + + return result + + +def BuildWebRTC(output_dir, target_environment, target_arch, flavor, + gn_target_name, ios_deployment_target, libvpx_build_vp9, + use_bitcode, use_goma, extra_gn_args): + gn_args = [ + 'target_os="ios"', 'ios_enable_code_signing=false', + 'use_xcode_clang=true', 'is_component_build=false', + 'rtc_include_tests=false', + ] + + # Add flavor option. + if flavor == 'debug': + gn_args.append('is_debug=true') + elif flavor == 'release': + gn_args.append('is_debug=false') + else: + raise ValueError('Unexpected flavor type: %s' % flavor) + + gn_args.append('target_environment="%s"' % target_environment) + + gn_args.append('target_cpu="%s"' % target_arch) + + gn_args.append('ios_deployment_target="%s"' % ios_deployment_target) + + gn_args.append('rtc_libvpx_build_vp9=' + + ('true' if libvpx_build_vp9 else 'false')) + + gn_args.append('enable_ios_bitcode=' + + ('true' if use_bitcode else 'false')) + gn_args.append('use_goma=' + ('true' if use_goma else 'false')) + gn_args.append('rtc_enable_symbol_export=true') + + args_string = ' '.join(gn_args + extra_gn_args) + logging.info('Building WebRTC with args: %s', args_string) + + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), + 'gen', + output_dir, + '--args=' + args_string, + ] + _RunCommand(cmd) + logging.info('Building target: %s', gn_target_name) + + cmd = [ + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja'), + '-C', + output_dir, + gn_target_name, + ] + if use_goma: + cmd.extend(['-j', '200']) + _RunCommand(cmd) - architectures = list(args.arch) - gn_args = args.extra_gn_args - if args.purify: - _CleanTemporary(args.output_dir, architectures) - return 0 +def main(): + args = _ParseArgs() + + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + + if args.clean: + _CleanArtifacts(args.output_dir) + return 0 + + # architectures is typed as Dict[str, Set[str]], + # where key is for the environment (device or simulator) + # and value is for the cpu type. + architectures = _ParseArchitecture(args.arch) + gn_args = args.extra_gn_args + + if args.purify: + _CleanTemporary(args.output_dir, architectures.keys()) + return 0 + + gn_target_name = 'framework_objc' + if not args.bitcode: + gn_args.append('enable_dsyms=true') + gn_args.append('enable_stripping=true') + + # Build all architectures. + framework_paths = [] + all_lib_paths = [] + for (environment, archs) in architectures.items(): + framework_path = os.path.join(args.output_dir, environment) + framework_paths.append(framework_path) + lib_paths = [] + for arch in archs: + lib_path = os.path.join(framework_path, arch + '_libs') + lib_paths.append(lib_path) + BuildWebRTC(lib_path, environment, arch, args.build_config, + gn_target_name, IOS_DEPLOYMENT_TARGET, + LIBVPX_BUILD_VP9, args.bitcode, args.use_goma, gn_args) + all_lib_paths.extend(lib_paths) + + # Combine the slices. + dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC') + # Dylibs will be combined, all other files are the same across archs. + # Use distutils instead of shutil to support merging folders. + distutils.dir_util.copy_tree( + os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME), + os.path.join(framework_path, SDK_FRAMEWORK_NAME)) + logging.info('Merging framework slices for %s.', environment) + dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths] + out_dylib_path = os.path.join(framework_path, dylib_path) + try: + os.remove(out_dylib_path) + except OSError: + pass + cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path] + _RunCommand(cmd) + + # Merge the dSYM slices. + lib_dsym_dir_path = os.path.join(lib_paths[0], SDK_DSYM_NAME) + if os.path.isdir(lib_dsym_dir_path): + distutils.dir_util.copy_tree( + lib_dsym_dir_path, os.path.join(framework_path, SDK_DSYM_NAME)) + logging.info('Merging dSYM slices.') + dsym_path = os.path.join(SDK_DSYM_NAME, 'Contents', 'Resources', + 'DWARF', 'WebRTC') + lib_dsym_paths = [ + os.path.join(path, dsym_path) for path in lib_paths + ] + out_dsym_path = os.path.join(framework_path, dsym_path) + try: + os.remove(out_dsym_path) + except OSError: + pass + cmd = ['lipo' + ] + lib_dsym_paths + ['-create', '-output', out_dsym_path] + _RunCommand(cmd) + + # Modify the version number. + # Format should be ... + # e.g. 55.0.14986 means + # branch cut 55, no hotfixes, and revision 14986. + infoplist_path = os.path.join(framework_path, SDK_FRAMEWORK_NAME, + 'Info.plist') + cmd = [ + 'PlistBuddy', '-c', 'Print :CFBundleShortVersionString', + infoplist_path + ] + major_minor = subprocess.check_output(cmd).strip() + version_number = '%s.%s' % (major_minor, args.revision) + logging.info('Substituting revision number: %s', version_number) + cmd = [ + 'PlistBuddy', '-c', 'Set :CFBundleVersion ' + version_number, + infoplist_path + ] + _RunCommand(cmd) + _RunCommand(['plutil', '-convert', 'binary1', infoplist_path]) + + xcframework_dir = os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME) + if os.path.isdir(xcframework_dir): + shutil.rmtree(xcframework_dir) + + logging.info('Creating xcframework.') + cmd = ['xcodebuild', '-create-xcframework', '-output', xcframework_dir] + + # Apparently, xcodebuild needs absolute paths for input arguments + for framework_path in framework_paths: + cmd += [ + '-framework', + os.path.abspath(os.path.join(framework_path, SDK_FRAMEWORK_NAME)) + ] + + if os.path.exists(os.path.join(framework_path, SDK_DSYM_NAME)): + cmd += [ + '-debug-symbols', + os.path.abspath(os.path.join(framework_path, SDK_DSYM_NAME)) + ] - gn_target_name = 'framework_objc' - if not args.bitcode: - gn_args.append('enable_dsyms=true') - gn_args.append('enable_stripping=true') - - - # Build all architectures. - for arch in architectures: - BuildWebRTC(args.output_dir, arch, args.build_config, gn_target_name, - IOS_DEPLOYMENT_TARGET, LIBVPX_BUILD_VP9, args.bitcode, - args.use_goma, gn_args) - - # Create FAT archive. - lib_paths = [os.path.join(args.output_dir, arch + '_libs') - for arch in architectures] - - # Combine the slices. - dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC') - # Dylibs will be combined, all other files are the same across archs. - # Use distutils instead of shutil to support merging folders. - distutils.dir_util.copy_tree( - os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME), - os.path.join(args.output_dir, SDK_FRAMEWORK_NAME)) - logging.info('Merging framework slices.') - dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths] - out_dylib_path = os.path.join(args.output_dir, dylib_path) - try: - os.remove(out_dylib_path) - except OSError: - pass - cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path] - _RunCommand(cmd) - - # Merge the dSYM slices. - lib_dsym_dir_path = os.path.join(lib_paths[0], 'WebRTC.dSYM') - if os.path.isdir(lib_dsym_dir_path): - distutils.dir_util.copy_tree(lib_dsym_dir_path, - os.path.join(args.output_dir, 'WebRTC.dSYM')) - logging.info('Merging dSYM slices.') - dsym_path = os.path.join('WebRTC.dSYM', 'Contents', 'Resources', 'DWARF', - 'WebRTC') - lib_dsym_paths = [os.path.join(path, dsym_path) for path in lib_paths] - out_dsym_path = os.path.join(args.output_dir, dsym_path) - try: - os.remove(out_dsym_path) - except OSError: - pass - cmd = ['lipo'] + lib_dsym_paths + ['-create', '-output', out_dsym_path] _RunCommand(cmd) # Generate the license file. - ninja_dirs = [os.path.join(args.output_dir, arch + '_libs') - for arch in architectures] + logging.info('Generate license file.') gn_target_full_name = '//sdk:' + gn_target_name - builder = LicenseBuilder(ninja_dirs, [gn_target_full_name]) + builder = LicenseBuilder(all_lib_paths, [gn_target_full_name]) builder.GenerateLicenseText( - os.path.join(args.output_dir, SDK_FRAMEWORK_NAME)) - - - # Modify the version number. - # Format should be ... - # e.g. 55.0.14986 means branch cut 55, no hotfixes, and revision 14986. - infoplist_path = os.path.join(args.output_dir, SDK_FRAMEWORK_NAME, - 'Info.plist') - cmd = ['PlistBuddy', '-c', - 'Print :CFBundleShortVersionString', infoplist_path] - major_minor = subprocess.check_output(cmd).strip() - version_number = '%s.%s' % (major_minor, args.revision) - logging.info('Substituting revision number: %s', version_number) - cmd = ['PlistBuddy', '-c', - 'Set :CFBundleVersion ' + version_number, infoplist_path] - _RunCommand(cmd) - _RunCommand(['plutil', '-convert', 'binary1', infoplist_path]) + os.path.join(args.output_dir, SDK_XCFRAMEWORK_NAME)) - logging.info('Done.') - return 0 + logging.info('Done.') + return 0 if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/ios/generate_modulemap.py b/tools_webrtc/ios/generate_modulemap.py index 45bd3d875e..4609385c38 100644 --- a/tools_webrtc/ios/generate_modulemap.py +++ b/tools_webrtc/ios/generate_modulemap.py @@ -9,24 +9,24 @@ import argparse import sys + def GenerateModulemap(): - parser = argparse.ArgumentParser(description='Generate modulemap') - parser.add_argument("-o", "--out", type=str, help="Output file.") - parser.add_argument("-n", "--name", type=str, help="Name of binary.") + parser = argparse.ArgumentParser(description='Generate modulemap') + parser.add_argument("-o", "--out", type=str, help="Output file.") + parser.add_argument("-n", "--name", type=str, help="Name of binary.") - args = parser.parse_args() + args = parser.parse_args() - with open(args.out, "w") as outfile: - module_template = 'framework module %s {\n' \ - ' umbrella header "%s.h"\n' \ - '\n' \ - ' export *\n' \ - ' module * { export * }\n' \ - '}\n' % (args.name, args.name) - outfile.write(module_template) - return 0 + with open(args.out, "w") as outfile: + module_template = 'framework module %s {\n' \ + ' umbrella header "%s.h"\n' \ + '\n' \ + ' export *\n' \ + ' module * { export * }\n' \ + '}\n' % (args.name, args.name) + outfile.write(module_template) + return 0 if __name__ == '__main__': - sys.exit(GenerateModulemap()) - + sys.exit(GenerateModulemap()) diff --git a/tools_webrtc/ios/generate_umbrella_header.py b/tools_webrtc/ios/generate_umbrella_header.py index 3549735eb6..4c700a1c31 100644 --- a/tools_webrtc/ios/generate_umbrella_header.py +++ b/tools_webrtc/ios/generate_umbrella_header.py @@ -14,15 +14,20 @@ def GenerateUmbrellaHeader(): - parser = argparse.ArgumentParser(description='Generate umbrella header') - parser.add_argument("-o", "--out", type=str, help="Output file.") - parser.add_argument("-s", "--sources", default=[], type=str, nargs='+', - help="Headers to include.") - - args = parser.parse_args() - - with open(args.out, "w") as outfile: - outfile.write(textwrap.dedent("""\ + parser = argparse.ArgumentParser(description='Generate umbrella header') + parser.add_argument("-o", "--out", type=str, help="Output file.") + parser.add_argument("-s", + "--sources", + default=[], + type=str, + nargs='+', + help="Headers to include.") + + args = parser.parse_args() + + with open(args.out, "w") as outfile: + outfile.write( + textwrap.dedent("""\ /* * Copyright %d The WebRTC project authors. All Rights Reserved. * @@ -33,11 +38,11 @@ def GenerateUmbrellaHeader(): * be found in the AUTHORS file in the root of the source tree. */\n\n""" % datetime.datetime.now().year)) - for s in args.sources: - outfile.write("#import \n".format(os.path.basename(s))) + for s in args.sources: + outfile.write("#import \n".format(os.path.basename(s))) - return 0 + return 0 if __name__ == '__main__': - sys.exit(GenerateUmbrellaHeader()) + sys.exit(GenerateUmbrellaHeader()) diff --git a/tools_webrtc/ios/merge_ios_libs.py b/tools_webrtc/ios/merge_ios_libs.py index 651024eb6a..31ffc1ddd5 100755 --- a/tools_webrtc/ios/merge_ios_libs.py +++ b/tools_webrtc/ios/merge_ios_libs.py @@ -7,7 +7,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Script for merging generated iOS libraries.""" import sys @@ -22,7 +21,7 @@ def MergeLibs(lib_base_dir): - """Merges generated iOS libraries for different archs. + """Merges generated iOS libraries for different archs. Uses libtool to generate FAT archive files for each generated library. @@ -33,92 +32,96 @@ def MergeLibs(lib_base_dir): Returns: Exit code of libtool. """ - output_dir_name = 'fat_libs' - archs = [arch for arch in os.listdir(lib_base_dir) - if arch in VALID_ARCHS] - # For each arch, find (library name, libary path) for arch. We will merge - # all libraries with the same name. - libs = {} - for lib_dir in [os.path.join(lib_base_dir, arch) for arch in VALID_ARCHS]: - if not os.path.exists(lib_dir): - continue - for dirpath, _, filenames in os.walk(lib_dir): - for filename in filenames: - if not filename.endswith('.a'): - continue - entry = libs.get(filename, []) - entry.append(os.path.join(dirpath, filename)) - libs[filename] = entry - orphaned_libs = {} - valid_libs = {} - for library, paths in libs.items(): - if len(paths) < len(archs): - orphaned_libs[library] = paths - else: - valid_libs[library] = paths - for library, paths in orphaned_libs.items(): - components = library[:-2].split('_')[:-1] - found = False - # Find directly matching parent libs by stripping suffix. - while components and not found: - parent_library = '_'.join(components) + '.a' - if parent_library in valid_libs: - valid_libs[parent_library].extend(paths) - found = True - break - components = components[:-1] - # Find next best match by finding parent libs with the same prefix. - if not found: - base_prefix = library[:-2].split('_')[0] - for valid_lib, valid_paths in valid_libs.items(): - if valid_lib[:len(base_prefix)] == base_prefix: - valid_paths.extend(paths) - found = True - break - assert found - - # Create output directory. - output_dir_path = os.path.join(lib_base_dir, output_dir_name) - if not os.path.exists(output_dir_path): - os.mkdir(output_dir_path) - - # Use this so libtool merged binaries are always the same. - env = os.environ.copy() - env['ZERO_AR_DATE'] = '1' - - # Ignore certain errors. - libtool_re = re.compile(r'^.*libtool:.*file: .* has no symbols$') - - # Merge libraries using libtool. - libtool_returncode = 0 - for library, paths in valid_libs.items(): - cmd_list = ['libtool', '-static', '-v', '-o', - os.path.join(output_dir_path, library)] + paths - libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) - _, err = libtoolout.communicate() - for line in err.splitlines(): - if not libtool_re.match(line): - print >>sys.stderr, line - # Unconditionally touch the output .a file on the command line if present - # and the command succeeded. A bit hacky. - libtool_returncode = libtoolout.returncode - if not libtool_returncode: - for i in range(len(cmd_list) - 1): - if cmd_list[i] == '-o' and cmd_list[i+1].endswith('.a'): - os.utime(cmd_list[i+1], None) - break - return libtool_returncode + output_dir_name = 'fat_libs' + archs = [arch for arch in os.listdir(lib_base_dir) if arch in VALID_ARCHS] + # For each arch, find (library name, libary path) for arch. We will merge + # all libraries with the same name. + libs = {} + for lib_dir in [os.path.join(lib_base_dir, arch) for arch in VALID_ARCHS]: + if not os.path.exists(lib_dir): + continue + for dirpath, _, filenames in os.walk(lib_dir): + for filename in filenames: + if not filename.endswith('.a'): + continue + entry = libs.get(filename, []) + entry.append(os.path.join(dirpath, filename)) + libs[filename] = entry + orphaned_libs = {} + valid_libs = {} + for library, paths in libs.items(): + if len(paths) < len(archs): + orphaned_libs[library] = paths + else: + valid_libs[library] = paths + for library, paths in orphaned_libs.items(): + components = library[:-2].split('_')[:-1] + found = False + # Find directly matching parent libs by stripping suffix. + while components and not found: + parent_library = '_'.join(components) + '.a' + if parent_library in valid_libs: + valid_libs[parent_library].extend(paths) + found = True + break + components = components[:-1] + # Find next best match by finding parent libs with the same prefix. + if not found: + base_prefix = library[:-2].split('_')[0] + for valid_lib, valid_paths in valid_libs.items(): + if valid_lib[:len(base_prefix)] == base_prefix: + valid_paths.extend(paths) + found = True + break + assert found + + # Create output directory. + output_dir_path = os.path.join(lib_base_dir, output_dir_name) + if not os.path.exists(output_dir_path): + os.mkdir(output_dir_path) + + # Use this so libtool merged binaries are always the same. + env = os.environ.copy() + env['ZERO_AR_DATE'] = '1' + + # Ignore certain errors. + libtool_re = re.compile(r'^.*libtool:.*file: .* has no symbols$') + + # Merge libraries using libtool. + libtool_returncode = 0 + for library, paths in valid_libs.items(): + cmd_list = [ + 'libtool', '-static', '-v', '-o', + os.path.join(output_dir_path, library) + ] + paths + libtoolout = subprocess.Popen(cmd_list, + stderr=subprocess.PIPE, + env=env) + _, err = libtoolout.communicate() + for line in err.splitlines(): + if not libtool_re.match(line): + print >> sys.stderr, line + # Unconditionally touch the output .a file on the command line if present + # and the command succeeded. A bit hacky. + libtool_returncode = libtoolout.returncode + if not libtool_returncode: + for i in range(len(cmd_list) - 1): + if cmd_list[i] == '-o' and cmd_list[i + 1].endswith('.a'): + os.utime(cmd_list[i + 1], None) + break + return libtool_returncode def Main(): - parser_description = 'Merge WebRTC libraries.' - parser = argparse.ArgumentParser(description=parser_description) - parser.add_argument('lib_base_dir', - help='Directory with built libraries. ', - type=str) - args = parser.parse_args() - lib_base_dir = args.lib_base_dir - MergeLibs(lib_base_dir) + parser_description = 'Merge WebRTC libraries.' + parser = argparse.ArgumentParser(description=parser_description) + parser.add_argument('lib_base_dir', + help='Directory with built libraries. ', + type=str) + args = parser.parse_args() + lib_base_dir = args.lib_base_dir + MergeLibs(lib_base_dir) + if __name__ == '__main__': - sys.exit(Main()) + sys.exit(Main()) diff --git a/tools_webrtc/iwyu/apply-iwyu b/tools_webrtc/iwyu/apply-iwyu new file mode 100755 index 0000000000..a26f46b933 --- /dev/null +++ b/tools_webrtc/iwyu/apply-iwyu @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Run the include-what-you-use tool (iwyu) on a file in the webrtc source +# directory. +# +# The script uses a subsequent grep pass to remove #include files from .cc +# that are also in the .h file, or are problematic to include. +# +# To get iwyu on Debian/glinux, do "sudo apt-get install iwyu". + +set -e +set -x +FILE=$1 +# If you want to exclude files that are in $FILE.h from $FILE.cc, set +# the following variable to "yes". This is a style guide violation. +REMOVE_CC_INCLUDES=no + +if [ ! -f $FILE ]; then + # See if we have the root name of a .cc/.h pair + if [ ! -f $FILE.h ]; then + echo "$FILE.h not found" + exit 1 + fi + FILE_H=$FILE.h + if [ ! -f $FILE.cc ]; then + echo "$FILE.cc not found" + exit 1 + fi + FILE_CC=$FILE.cc +else + # Exact file, no .h file + FILE_CC=$FILE + FILE_H="" +fi + +# IWYU has a confusing set of exit codes. Discard it. +iwyu -Xiwyu --no_fwd_decls -D__X86_64__ -DWEBRTC_POSIX -I . \ + -I third_party/abseil-cpp \ + -I third_party/googletest/src/googlemock/include \ + -I third_party/googletest/src/googletest/include \ + $FILE_CC >& /tmp/includefixes$$ || echo "IWYU done, code $?" + +if grep 'fatal error' /tmp/includefixes$$; then + echo "iwyu run failed" + cat /tmp/includefixes$$ + rm /tmp/includefixes$$ + exit 1 +else + fix_include < /tmp/includefixes$$ || echo "Some files modified" + rm /tmp/includefixes$$ +fi + +if [ $REMOVE_CC_INCLUDES == "yes" ]; then + if [ -n "$FILE_H" ]; then + # Don't include in .cc what's already included in .h + grep ^#include $FILE_H | grep -v -f - $FILE_CC > $FILE_CC.new + else + cp $FILE_CC $FILE_CC.new + fi + # Don't include stuff on the banlist + grep -v -f tools_webrtc/iwyu/iwyu-filter-list $FILE_CC.new > $FILE_CC + rm $FILE.ccnew +else + grep -v -f tools_webrtc/iwyu/iwyu-filter-list $FILE_CC > $FILE_CC.new + mv $FILE_CC.new $FILE_CC +fi +if [ -n "$FILE_H" ]; then + grep -v -f tools_webrtc/iwyu/iwyu-filter-list $FILE_H > $FILE_H.new + mv $FILE_H.new $FILE_H +fi + +echo "Finished. Check diff, compile and git cl format before uploading." + + diff --git a/tools_webrtc/iwyu/iwyu b/tools_webrtc/iwyu/iwyu new file mode 100755 index 0000000000..7bbc69d496 --- /dev/null +++ b/tools_webrtc/iwyu/iwyu @@ -0,0 +1,11 @@ +#!/bin/bash +# +# Run IWYU against a single webrtc source file. +# +# To get iwyu on Debian/glinux, do "sudo apt-get install iwyu". +# +# To apply the changes suggested blindly, do tools/iwyu |& fix_include +# +# Doing "tools/iwyu filename.cc" will check both the .cc and .h file. +# +iwyu -Xiwyu --no_fwd_decls -D__X86_64__ -DWEBRTC_POSIX -I . -I third_party/abseil-cpp $@ diff --git a/tools_webrtc/iwyu/iwyu-filter-list b/tools_webrtc/iwyu/iwyu-filter-list new file mode 100644 index 0000000000..0c0c69558b --- /dev/null +++ b/tools_webrtc/iwyu/iwyu-filter-list @@ -0,0 +1,6 @@ +# These are lines that apply-iwyu will prevent from being added to a +# file. They are lines that refer to files that are conditionally included +# in certain configurations. +#include +#include +#include diff --git a/tools_webrtc/libs/generate_licenses.py b/tools_webrtc/libs/generate_licenses.py index 04d655cba0..cbb1514d3c 100755 --- a/tools_webrtc/libs/generate_licenses.py +++ b/tools_webrtc/libs/generate_licenses.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright 2016 The WebRTC project authors. All Rights Reserved. # @@ -23,12 +23,16 @@ import sys import argparse -import cgi import json import logging import os import re import subprocess +try: + # python 3.2+ + from html import escape +except ImportError: + from cgi import escape # Third_party library to licences mapping. Keys are names of the libraries # (right after the `third_party/` prefix) @@ -36,14 +40,19 @@ 'abseil-cpp': ['third_party/abseil-cpp/LICENSE'], 'android_ndk': ['third_party/android_ndk/NOTICE'], 'android_sdk': ['third_party/android_sdk/LICENSE'], - 'auto': ['third_party/android_deps/libs/' - 'com_google_auto_service_auto_service/LICENSE'], + 'auto': [ + 'third_party/android_deps/libs/' + 'com_google_auto_service_auto_service/LICENSE' + ], 'bazel': ['third_party/bazel/LICENSE'], 'boringssl': ['third_party/boringssl/src/LICENSE'], - 'errorprone': ['third_party/android_deps/libs/' - 'com_google_errorprone_error_prone_core/LICENSE'], + 'crc32c': ['third_party/crc32c/src/LICENSE'], + 'errorprone': [ + 'third_party/android_deps/libs/' + 'com_google_errorprone_error_prone_core/LICENSE' + ], 'fiat': ['third_party/boringssl/src/third_party/fiat/LICENSE'], - 'guava': ['third_party/guava/LICENSE'], + 'guava': ['third_party/android_deps/libs/com_google_guava_guava/LICENSE'], 'ijar': ['third_party/ijar/LICENSE'], 'jsoncpp': ['third_party/jsoncpp/LICENSE'], 'libaom': ['third_party/libaom/source/libaom/LICENSE'], @@ -74,10 +83,13 @@ # TODO(bugs.webrtc.org/1110): Remove this hack. This is not a lib. # For some reason it is listed as so in _GetThirdPartyLibraries. 'android_deps': [], + # This is not a library but a collection of libraries. + 'androidx': [], # Compile time dependencies, no license needed: 'yasm': [], 'ow2_asm': [], + 'jdk': [], } # Third_party library _regex_ to licences mapping. Keys are regular expression @@ -94,11 +106,11 @@ def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0])) @@ -112,29 +124,28 @@ def FindSrcDirPath(): class LicenseBuilder(object): + def __init__(self, + buildfile_dirs, + targets, + lib_to_licenses_dict=None, + lib_regex_to_licenses_dict=None): + if lib_to_licenses_dict is None: + lib_to_licenses_dict = LIB_TO_LICENSES_DICT - def __init__(self, - buildfile_dirs, - targets, - lib_to_licenses_dict=None, - lib_regex_to_licenses_dict=None): - if lib_to_licenses_dict is None: - lib_to_licenses_dict = LIB_TO_LICENSES_DICT - - if lib_regex_to_licenses_dict is None: - lib_regex_to_licenses_dict = LIB_REGEX_TO_LICENSES_DICT + if lib_regex_to_licenses_dict is None: + lib_regex_to_licenses_dict = LIB_REGEX_TO_LICENSES_DICT - self.buildfile_dirs = buildfile_dirs - self.targets = targets - self.lib_to_licenses_dict = lib_to_licenses_dict - self.lib_regex_to_licenses_dict = lib_regex_to_licenses_dict + self.buildfile_dirs = buildfile_dirs + self.targets = targets + self.lib_to_licenses_dict = lib_to_licenses_dict + self.lib_regex_to_licenses_dict = lib_regex_to_licenses_dict - self.common_licenses_dict = self.lib_to_licenses_dict.copy() - self.common_licenses_dict.update(self.lib_regex_to_licenses_dict) + self.common_licenses_dict = self.lib_to_licenses_dict.copy() + self.common_licenses_dict.update(self.lib_regex_to_licenses_dict) - @staticmethod - def _ParseLibraryName(dep): - """Returns library name after third_party + @staticmethod + def _ParseLibraryName(dep): + """Returns library name after third_party Input one of: //a/b/third_party/libname:c @@ -143,11 +154,11 @@ def _ParseLibraryName(dep): Outputs libname or None if this is not a third_party dependency. """ - groups = re.match(THIRD_PARTY_LIB_SIMPLE_NAME_REGEX, dep) - return groups.group(1) if groups else None + groups = re.match(THIRD_PARTY_LIB_SIMPLE_NAME_REGEX, dep) + return groups.group(1) if groups else None - def _ParseLibrary(self, dep): - """Returns library simple or regex name that matches `dep` after third_party + def _ParseLibrary(self, dep): + """Returns library simple or regex name that matches `dep` after third_party This method matches `dep` dependency against simple names in LIB_TO_LICENSES_DICT and regular expression names in @@ -155,104 +166,109 @@ def _ParseLibrary(self, dep): Outputs matched dict key or None if this is not a third_party dependency. """ - libname = LicenseBuilder._ParseLibraryName(dep) - - for lib_regex in self.lib_regex_to_licenses_dict: - if re.match(THIRD_PARTY_LIB_REGEX_TEMPLATE % lib_regex, dep): - return lib_regex - - return libname - - @staticmethod - def _RunGN(buildfile_dir, target): - cmd = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), - 'desc', - '--all', - '--format=json', - os.path.abspath(buildfile_dir), - target, - ] - logging.debug('Running: %r', cmd) - output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT) - logging.debug('Output: %s', output_json) - return output_json - - def _GetThirdPartyLibraries(self, buildfile_dir, target): - output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target)) - libraries = set() - for described_target in output.values(): - third_party_libs = ( - self._ParseLibrary(dep) for dep in described_target['deps']) - libraries |= set(lib for lib in third_party_libs if lib) - return libraries - - def GenerateLicenseText(self, output_dir): - # Get a list of third_party libs from gn. For fat libraries we must consider - # all architectures, hence the multiple buildfile directories. - third_party_libs = set() - for buildfile in self.buildfile_dirs: - for target in self.targets: - third_party_libs |= self._GetThirdPartyLibraries(buildfile, target) - assert len(third_party_libs) > 0 - - missing_licenses = third_party_libs - set(self.common_licenses_dict.keys()) - if missing_licenses: - error_msg = 'Missing licenses for following third_party targets: %s' % \ - ', '.join(missing_licenses) - logging.error(error_msg) - raise Exception(error_msg) - - # Put webrtc at the front of the list. - license_libs = sorted(third_party_libs) - license_libs.insert(0, 'webrtc') - - logging.info('List of licenses: %s', ', '.join(license_libs)) - - # Generate markdown. - output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), 'w+') - for license_lib in license_libs: - if len(self.common_licenses_dict[license_lib]) == 0: - logging.info('Skipping compile time or internal dependency: %s', - license_lib) - continue # Compile time dependency - - output_license_file.write('# %s\n' % license_lib) - output_license_file.write('```\n') - for path in self.common_licenses_dict[license_lib]: - license_path = os.path.join(WEBRTC_ROOT, path) - with open(license_path, 'r') as license_file: - license_text = cgi.escape(license_file.read(), quote=True) - output_license_file.write(license_text) - output_license_file.write('\n') - output_license_file.write('```\n\n') - - output_license_file.close() + libname = LicenseBuilder._ParseLibraryName(dep) + + for lib_regex in self.lib_regex_to_licenses_dict: + if re.match(THIRD_PARTY_LIB_REGEX_TEMPLATE % lib_regex, dep): + return lib_regex + + return libname + + @staticmethod + def _RunGN(buildfile_dir, target): + cmd = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py'), + 'desc', + '--all', + '--format=json', + os.path.abspath(buildfile_dir), + target, + ] + logging.debug('Running: %r', cmd) + output_json = subprocess.check_output(cmd, cwd=WEBRTC_ROOT).decode('UTF-8') + logging.debug('Output: %s', output_json) + return output_json + + def _GetThirdPartyLibraries(self, buildfile_dir, target): + output = json.loads(LicenseBuilder._RunGN(buildfile_dir, target)) + libraries = set() + for described_target in output.values(): + third_party_libs = (self._ParseLibrary(dep) + for dep in described_target['deps']) + libraries |= set(lib for lib in third_party_libs if lib) + return libraries + + def GenerateLicenseText(self, output_dir): + # Get a list of third_party libs from gn. For fat libraries we must consider + # all architectures, hence the multiple buildfile directories. + third_party_libs = set() + for buildfile in self.buildfile_dirs: + for target in self.targets: + third_party_libs |= self._GetThirdPartyLibraries( + buildfile, target) + assert len(third_party_libs) > 0 + + missing_licenses = third_party_libs - set( + self.common_licenses_dict.keys()) + if missing_licenses: + error_msg = 'Missing licenses for following third_party targets: %s' % \ + ', '.join(sorted(missing_licenses)) + logging.error(error_msg) + raise Exception(error_msg) + + # Put webrtc at the front of the list. + license_libs = sorted(third_party_libs) + license_libs.insert(0, 'webrtc') + + logging.info('List of licenses: %s', ', '.join(license_libs)) + + # Generate markdown. + output_license_file = open(os.path.join(output_dir, 'LICENSE.md'), + 'w+') + for license_lib in license_libs: + if len(self.common_licenses_dict[license_lib]) == 0: + logging.info( + 'Skipping compile time or internal dependency: %s', + license_lib) + continue # Compile time dependency + + output_license_file.write('# %s\n' % license_lib) + output_license_file.write('```\n') + for path in self.common_licenses_dict[license_lib]: + license_path = os.path.join(WEBRTC_ROOT, path) + with open(license_path, 'r') as license_file: + license_text = escape(license_file.read(), quote=True) + output_license_file.write(license_text) + output_license_file.write('\n') + output_license_file.write('```\n\n') + + output_license_file.close() def main(): - parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md') - parser.add_argument( - '--verbose', action='store_true', default=False, help='Debug logging.') - parser.add_argument( - '--target', - required=True, - action='append', - default=[], - help='Name of the GN target to generate a license for') - parser.add_argument('output_dir', help='Directory to output LICENSE.md to.') - parser.add_argument( - 'buildfile_dirs', - nargs='+', - help='Directories containing gn generated ninja files') - args = parser.parse_args() - - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - - builder = LicenseBuilder(args.buildfile_dirs, args.target) - builder.GenerateLicenseText(args.output_dir) + parser = argparse.ArgumentParser(description='Generate WebRTC LICENSE.md') + parser.add_argument('--verbose', + action='store_true', + default=False, + help='Debug logging.') + parser.add_argument('--target', + required=True, + action='append', + default=[], + help='Name of the GN target to generate a license for') + parser.add_argument('output_dir', + help='Directory to output LICENSE.md to.') + parser.add_argument('buildfile_dirs', + nargs='+', + help='Directories containing gn generated ninja files') + args = parser.parse_args() + + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + + builder = LicenseBuilder(args.buildfile_dirs, args.target) + builder.GenerateLicenseText(args.output_dir) if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/libs/generate_licenses_test.py b/tools_webrtc/libs/generate_licenses_test.py index 03f14459c2..ebef78e132 100755 --- a/tools_webrtc/libs/generate_licenses_test.py +++ b/tools_webrtc/libs/generate_licenses_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython # pylint: disable=relative-import,protected-access,unused-argument # Copyright 2017 The WebRTC project authors. All Rights Reserved. @@ -9,24 +9,21 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. -import os -import sys - -SRC = os.path.abspath( - os.path.join(os.path.dirname((__file__)), os.pardir, os.pardir)) -sys.path.append(os.path.join(SRC, 'third_party', 'pymock')) - import unittest -import mock +try: + # python 3.3+ + from unittest.mock import patch +except ImportError: + # From site-package + from mock import patch from generate_licenses import LicenseBuilder class TestLicenseBuilder(unittest.TestCase): - - @staticmethod - def _FakeRunGN(buildfile_dir, target): - return """ + @staticmethod + def _FakeRunGN(buildfile_dir, target): + return """ { "target1": { "deps": [ @@ -39,91 +36,93 @@ def _FakeRunGN(buildfile_dir, target): } """ - def testParseLibraryName(self): - self.assertEquals( - LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'), - 'libname1') - self.assertEquals( - LicenseBuilder._ParseLibraryName('//a/b/third_party/libname2:c(d)'), - 'libname2') - self.assertEquals( - LicenseBuilder._ParseLibraryName('//a/b/third_party/libname3/c:d(e)'), - 'libname3') - self.assertEquals( - LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None) - - def testParseLibrarySimpleMatch(self): - builder = LicenseBuilder([], [], {}, {}) - self.assertEquals( - builder._ParseLibrary('//a/b/third_party/libname:c'), 'libname') - - def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): - lib_dict = { - 'libname:foo.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], lib_dict, {}) - self.assertEquals( - builder._ParseLibrary('//a/b/third_party/libname:bar_java'), 'libname') - - def testParseLibraryRegExMatch(self): - lib_regex_dict = { - 'libname:foo.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( - builder._ParseLibrary('//a/b/third_party/libname:foo_bar_java'), - 'libname:foo.*') - - def testParseLibraryRegExMatchWithSubDirectory(self): - lib_regex_dict = { - 'libname/foo:bar.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( - builder._ParseLibrary('//a/b/third_party/libname/foo:bar_java'), - 'libname/foo:bar.*') - - def testParseLibraryRegExMatchWithStarInside(self): - lib_regex_dict = { - 'libname/foo.*bar.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( - builder._ParseLibrary('//a/b/third_party/libname/fooHAHA:bar_java'), - 'libname/foo.*bar.*') - - @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) - def testGetThirdPartyLibrariesWithoutRegex(self): - builder = LicenseBuilder([], [], {}, {}) - self.assertEquals( - builder._GetThirdPartyLibraries('out/arm', 'target1'), - set(['libname1', 'libname2', 'libname3'])) - - @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) - def testGetThirdPartyLibrariesWithRegex(self): - lib_regex_dict = { - 'libname2:c.*': ['path/to/LICENSE'], - } - builder = LicenseBuilder([], [], {}, lib_regex_dict) - self.assertEquals( - builder._GetThirdPartyLibraries('out/arm', 'target1'), - set(['libname1', 'libname2:c.*', 'libname3'])) - - @mock.patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) - def testGenerateLicenseTextFailIfUnknownLibrary(self): - lib_dict = { - 'simple_library': ['path/to/LICENSE'], - } - builder = LicenseBuilder(['dummy_dir'], ['dummy_target'], lib_dict, {}) - - with self.assertRaises(Exception) as context: - builder.GenerateLicenseText('dummy/dir') - - self.assertEquals( - context.exception.message, - 'Missing licenses for following third_party targets: ' - 'libname1, libname2, libname3') + def testParseLibraryName(self): + self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/third_party/libname1:c'), + 'libname1') + self.assertEqual( + LicenseBuilder._ParseLibraryName( + '//a/b/third_party/libname2:c(d)'), 'libname2') + self.assertEqual( + LicenseBuilder._ParseLibraryName( + '//a/b/third_party/libname3/c:d(e)'), 'libname3') + self.assertEqual( + LicenseBuilder._ParseLibraryName('//a/b/not_third_party/c'), None) + + def testParseLibrarySimpleMatch(self): + builder = LicenseBuilder([], [], {}, {}) + self.assertEqual(builder._ParseLibrary('//a/b/third_party/libname:c'), + 'libname') + + def testParseLibraryRegExNoMatchFallbacksToDefaultLibname(self): + lib_dict = { + 'libname:foo.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], lib_dict, {}) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:bar_java'), + 'libname') + + def testParseLibraryRegExMatch(self): + lib_regex_dict = { + 'libname:foo.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname:foo_bar_java'), + 'libname:foo.*') + + def testParseLibraryRegExMatchWithSubDirectory(self): + lib_regex_dict = { + 'libname/foo:bar.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._ParseLibrary('//a/b/third_party/libname/foo:bar_java'), + 'libname/foo:bar.*') + + def testParseLibraryRegExMatchWithStarInside(self): + lib_regex_dict = { + 'libname/foo.*bar.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._ParseLibrary( + '//a/b/third_party/libname/fooHAHA:bar_java'), + 'libname/foo.*bar.*') + + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGetThirdPartyLibrariesWithoutRegex(self): + builder = LicenseBuilder([], [], {}, {}) + self.assertEqual( + builder._GetThirdPartyLibraries('out/arm', 'target1'), + set(['libname1', 'libname2', 'libname3'])) + + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGetThirdPartyLibrariesWithRegex(self): + lib_regex_dict = { + 'libname2:c.*': ['path/to/LICENSE'], + } + builder = LicenseBuilder([], [], {}, lib_regex_dict) + self.assertEqual( + builder._GetThirdPartyLibraries('out/arm', 'target1'), + set(['libname1', 'libname2:c.*', 'libname3'])) + + @patch('generate_licenses.LicenseBuilder._RunGN', _FakeRunGN) + def testGenerateLicenseTextFailIfUnknownLibrary(self): + lib_dict = { + 'simple_library': ['path/to/LICENSE'], + } + builder = LicenseBuilder(['dummy_dir'], ['dummy_target'], lib_dict, {}) + + with self.assertRaises(Exception) as context: + builder.GenerateLicenseText('dummy/dir') + + self.assertEqual( + context.exception.args[0], + 'Missing licenses for following third_party targets: ' + 'libname1, libname2, libname3') if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/mb/docs/design_spec.md b/tools_webrtc/mb/docs/design_spec.md index 33fda806e8..0aaaf89f9b 100644 --- a/tools_webrtc/mb/docs/design_spec.md +++ b/tools_webrtc/mb/docs/design_spec.md @@ -52,8 +52,8 @@ for a particular build directory, and what set of flags (`GYP_DEFINES` or `gn args`) to use. A config can either be specified directly (useful for testing) or by specifying -the master name and builder name (useful on the bots so that they do not need -to specify a config directly and can be hidden from the details). +the builder group name and builder name (useful on the bots so that they do not +need to specify a config directly and can be hidden from the details). See the [user guide](user_guide.md#mb_config.pyl) for details. diff --git a/tools_webrtc/mb/docs/user_guide.md b/tools_webrtc/mb/docs/user_guide.md index 9817553bf6..8c66cd328c 100644 --- a/tools_webrtc/mb/docs/user_guide.md +++ b/tools_webrtc/mb/docs/user_guide.md @@ -27,8 +27,8 @@ a list of files (e.g., the list of files in a patch on a trybot): mb analyze -c chromium_linux_rel //out/Release input.json output.json ``` -Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags -must be specified so that `mb` can figure out which config to use. +Either the `-c/--config` flag or the `-m/--builder-group` and `-b/--builder` +flags must be specified so that `mb` can figure out which config to use. The first positional argument must be a GN-style "source-absolute" path to the build directory. @@ -90,15 +90,16 @@ differences can be subtle. We won't even go into how the `targets` and `build_targets` differ from each other or from `compile_targets` and `test_targets`. -The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`, +The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--builder-group`, `-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`. ### `mb audit` `mb audit` is used to track the progress of the GYP->GN migration. You can -use it to check a single master, or all the masters we care about. See -`mb help audit` for more details (most people are not expected to care about -this). +use it to check a single builder group, or all the builder groups we care +about. +See `mb help audit` for more details (most people are not expected to care +about this). ### `mb gen` @@ -111,8 +112,8 @@ a directory, then runs GYP or GN as appropriate: % mb gen -c linux_rel_trybot //out/Release ``` -Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags -must be specified so that `mb` can figure out which config to use. The +Either the `-c/--config` flag or the `-m/--builder-group` and `-b/--builder` +flags must be specified so that `mb` can figure out which config to use. The `--phase` flag must also be used with builders that have multiple build/compile steps (and only with those builders). @@ -149,7 +150,7 @@ Produces help output on the other subcommands Prints what command will be run by `mb gen` (like `mb gen -n` but does not require you to specify a path). -The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`, +The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--builder-group`, `--phase`, `-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`. @@ -198,11 +199,11 @@ listed here, and so by using the configs in this file you can avoid having to juggle long lists of GYP_DEFINES and gn args by hand. `mb_config.pyl` is structured as a file containing a single PYthon Literal -expression: a dictionary with three main keys, `masters`, `configs` and +expression: a dictionary with three main keys, `builder_groups`, `configs` and `mixins`. -The `masters` key contains a nested series of dicts containing mappings -of master -> builder -> config . This allows us to isolate the buildbot +The `builder_groups` key contains a nested series of dicts containing mappings +of builder group -> builder -> config . This allows us to isolate the buildbot recipes from the actual details of the configs. The config should either be a single string value representing a key in the `configs` dictionary, or a list of strings, each of which is a key in the `configs` dictionary; diff --git a/tools_webrtc/mb/gn_isolate_map.pyl b/tools_webrtc/mb/gn_isolate_map.pyl index dba0d97571..01993a8fcb 100644 --- a/tools_webrtc/mb/gn_isolate_map.pyl +++ b/tools_webrtc/mb/gn_isolate_map.pyl @@ -51,6 +51,10 @@ "label": "//common_video:common_video_unittests", "type": "console_test_launcher", }, + "dcsctp_unittests": { + "label": "//net/dcsctp:dcsctp_unittests", + "type": "console_test_launcher", + }, "isac_fix_test": { "label": "//modules/audio_coding:isac_fix_test", "type": "console_test_launcher", diff --git a/tools_webrtc/mb/mb.py b/tools_webrtc/mb/mb.py index 6287ca2366..4aff74621f 100755 --- a/tools_webrtc/mb/mb.py +++ b/tools_webrtc/mb/mb.py @@ -54,7 +54,7 @@ def __init__(self): self.sep = os.sep self.args = argparse.Namespace() self.configs = {} - self.masters = {} + self.builder_groups = {} self.mixins = {} self.isolate_exe = 'isolate.exe' if self.platform.startswith( 'win') else 'isolate' @@ -80,8 +80,8 @@ def ParseArgs(self, argv): def AddCommonOptions(subp): subp.add_argument('-b', '--builder', help='builder name to look up config from') - subp.add_argument('-m', '--master', - help='master name to look up config from') + subp.add_argument('-m', '--builder-group', + help='builder group name to look up config from') subp.add_argument('-c', '--config', help='configuration to analyze') subp.add_argument('--phase', @@ -252,10 +252,10 @@ def CmdAnalyze(self): def CmdExport(self): self.ReadConfigFile() obj = {} - for master, builders in self.masters.items(): - obj[master] = {} + for builder_group, builders in self.builder_groups.items(): + obj[builder_group] = {} for builder in builders: - config = self.masters[master][builder] + config = self.builder_groups[builder_group][builder] if not config: continue @@ -269,7 +269,7 @@ def CmdExport(self): if 'error' in args: continue - obj[master][builder] = args + obj[builder_group][builder] = args # Dump object and trim trailing whitespace. s = '\n'.join(l.rstrip() for l in @@ -323,11 +323,14 @@ def CmdRun(self): return ret if self.args.swarmed: - return self._RunUnderSwarming(build_dir, target) + cmd, _ = self.GetSwarmingCommand(self.args.target[0], vals) + return self._RunUnderSwarming(build_dir, target, cmd) else: return self._RunLocallyIsolated(build_dir, target) - def _RunUnderSwarming(self, build_dir, target): + def _RunUnderSwarming(self, build_dir, target, isolate_cmd): + cas_instance = 'chromium-swarm' + swarming_server = 'chromium-swarm.appspot.com' # TODO(dpranke): Look up the information for the target in # the //testing/buildbot.json file, if possible, so that we # can determine the isolate target, command line, and additional @@ -336,7 +339,7 @@ def _RunUnderSwarming(self, build_dir, target): # TODO(dpranke): Also, add support for sharding and merging results. dimensions = [] for k, v in self.args.dimensions: - dimensions += ['-d', k, v] + dimensions += ['-d', '%s=%s' % (k, v)] archive_json_path = self.ToSrcRelPath( '%s/%s.archive.json' % (build_dir, target)) @@ -345,13 +348,29 @@ def _RunUnderSwarming(self, build_dir, target): 'archive', '-i', self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)), - '-s', - self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)), - '-I', 'isolateserver.appspot.com', - '-dump-json', archive_json_path, - ] - ret, _, _ = self.Run(cmd, force_verbose=False) + '-cas-instance', + cas_instance, + '-dump-json', + archive_json_path, + ] + + # Talking to the isolateserver may fail because we're not logged in. + # We trap the command explicitly and rewrite the error output so that + # the error message is actually correct for a Chromium check out. + self.PrintCmd(cmd, env=None) + ret, out, err = self.Run(cmd, force_verbose=False) if ret: + self.Print(' -> returned %d' % ret) + if out: + self.Print(out, end='') + if err: + # The swarming client will return an exit code of 2 (via + # argparse.ArgumentParser.error()) and print a message to indicate + # that auth failed, so we have to parse the message to check. + if (ret == 2 and 'Please login to' in err): + err = err.replace(' auth.py', ' tools/swarming_client/auth.py') + self.Print(err, end='', file=sys.stderr) + return ret try: @@ -361,7 +380,7 @@ def _RunUnderSwarming(self, build_dir, target): 'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr) return 1 try: - isolated_hash = archive_hashes[target] + cas_digest = archive_hashes[target] except Exception: self.Print( 'Cannot find hash for "%s" in "%s", file content: %s' % @@ -369,16 +388,44 @@ def _RunUnderSwarming(self, build_dir, target): file=sys.stderr) return 1 + try: + json_dir = self.TempDir() + json_file = self.PathJoin(json_dir, 'task.json') + + cmd = [ + self.PathJoin('tools', 'luci-go', 'swarming'), + 'trigger', + '-digest', + cas_digest, + '-server', + swarming_server, + '-tag=purpose:user-debug-mb', + '-relative-cwd', + self.ToSrcRelPath(build_dir), + '-dump-json', + json_file, + ] + dimensions + ['--'] + list(isolate_cmd) + + if self.args.extra_args: + cmd += ['--'] + self.args.extra_args + self.Print('') + ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False) + if ret: + return ret + task_json = self.ReadFile(json_file) + task_id = json.loads(task_json)["tasks"][0]['task_id'] + finally: + if json_dir: + self.RemoveDirectory(json_dir) + cmd = [ - self.executable, - self.PathJoin('tools', 'swarming_client', 'swarming.py'), - 'run', - '-s', isolated_hash, - '-I', 'isolateserver.appspot.com', - '-S', 'chromium-swarm.appspot.com', - ] + dimensions - if self.args.extra_args: - cmd += ['--'] + self.args.extra_args + self.PathJoin('tools', 'luci-go', 'swarming'), + 'collect', + '-server', + swarming_server, + '-task-output-stdout=console', + task_id, + ] ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False) return ret @@ -402,13 +449,13 @@ def CmdValidate(self, print_ok=True): # Build a list of all of the configs referenced by builders. all_configs = {} - for master in self.masters: - for config in self.masters[master].values(): + for builder_group in self.builder_groups: + for config in self.builder_groups[builder_group].values(): if isinstance(config, dict): for c in config.values(): - all_configs[c] = master + all_configs[c] = builder_group else: - all_configs[config] = master + all_configs[config] = builder_group # Check that every referenced args file or config actually exists. for config, loc in all_configs.items(): @@ -459,7 +506,7 @@ def GetConfig(self): build_dir = self.args.path[0] vals = self.DefaultVals() - if self.args.builder or self.args.master or self.args.config: + if self.args.builder or self.args.builder_group or self.args.config: vals = self.Lookup() # Re-run gn gen in order to ensure the config is consistent with the # build dir. @@ -517,7 +564,7 @@ def ReadConfigFile(self): (self.args.config_file, e)) self.configs = contents['configs'] - self.masters = contents['masters'] + self.builder_groups = contents['builder_groups'] self.mixins = contents['mixins'] def ReadIsolateMap(self): @@ -532,38 +579,39 @@ def ReadIsolateMap(self): def ConfigFromArgs(self): if self.args.config: - if self.args.master or self.args.builder: - raise MBErr('Can not specific both -c/--config and -m/--master or ' - '-b/--builder') + if self.args.builder_group or self.args.builder: + raise MBErr('Can not specific both -c/--config and -m/--builder-group ' + 'or -b/--builder') return self.args.config - if not self.args.master or not self.args.builder: + if not self.args.builder_group or not self.args.builder: raise MBErr('Must specify either -c/--config or ' - '(-m/--master and -b/--builder)') + '(-m/--builder-group and -b/--builder)') - if not self.args.master in self.masters: + if not self.args.builder_group in self.builder_groups: raise MBErr('Master name "%s" not found in "%s"' % - (self.args.master, self.args.config_file)) + (self.args.builder_group, self.args.config_file)) - if not self.args.builder in self.masters[self.args.master]: - raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' % - (self.args.builder, self.args.master, self.args.config_file)) + if not self.args.builder in self.builder_groups[self.args.builder_group]: + raise MBErr( + 'Builder name "%s" not found under builder_groups[%s] in "%s"' % + (self.args.builder, self.args.builder_group, self.args.config_file)) - config = self.masters[self.args.master][self.args.builder] + config = self.builder_groups[self.args.builder_group][self.args.builder] if isinstance(config, dict): if self.args.phase is None: raise MBErr('Must specify a build --phase for %s on %s' % - (self.args.builder, self.args.master)) + (self.args.builder, self.args.builder_group)) phase = str(self.args.phase) if phase not in config: raise MBErr('Phase %s doesn\'t exist for %s on %s' % - (phase, self.args.builder, self.args.master)) + (phase, self.args.builder, self.args.builder_group)) return config[phase] if self.args.phase is not None: raise MBErr('Must not specify a build --phase for %s on %s' % - (self.args.builder, self.args.master)) + (self.args.builder, self.args.builder_group)) return config def FlattenConfig(self, config): @@ -682,7 +730,7 @@ def RunGNGen(self, vals): raise MBErr('did not generate any of %s' % ', '.join(runtime_deps_targets)) - command, extra_files = self.GetIsolateCommand(target, vals) + command, extra_files = self.GetSwarmingCommand(target, vals) runtime_deps = self.ReadFile(runtime_deps_path).splitlines() @@ -700,7 +748,7 @@ def RunGNIsolate(self, vals): label = labels[0] build_dir = self.args.path[0] - command, extra_files = self.GetIsolateCommand(target, vals) + command, extra_files = self.GetSwarmingCommand(target, vals) cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps') ret, out, _ = self.Call(cmd) @@ -823,7 +871,7 @@ def GNArgs(self, vals): gn_args = ('import("%s")\n' % vals['args_file']) + gn_args return gn_args - def GetIsolateCommand(self, target, vals): + def GetSwarmingCommand(self, target, vals): isolate_map = self.ReadIsolateMap() test_type = isolate_map[target]['type'] @@ -1069,27 +1117,6 @@ def WriteJSON(self, obj, path, force_verbose=False): raise MBErr('Error %s writing to the output path "%s"' % (e, path)) - def CheckCompile(self, master, builder): - url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1' - url = urllib2.quote(url_template.format(master=master, builder=builder), - safe=':/()?=') - try: - builds = json.loads(self.Fetch(url)) - except Exception as e: - return str(e) - successes = sorted( - [int(x) for x in builds.keys() if "text" in builds[x] and - cmp(builds[x]["text"][:2], ["build", "successful"]) == 0], - reverse=True) - if not successes: - return "no successful builds" - build = builds[str(successes[0])] - step_names = set([step["name"] for step in build["steps"]]) - compile_indicators = set(["compile", "compile (with patch)", "analyze"]) - if compile_indicators & step_names: - return "compiles" - return "does not compile" - def PrintCmd(self, cmd, env): if self.platform == 'win32': env_prefix = 'set ' @@ -1208,6 +1235,10 @@ def RemoveDirectory(self, abs_path): else: shutil.rmtree(abs_path, ignore_errors=True) + def TempDir(self): + # This function largely exists so it can be overriden for testing. + return tempfile.mkdtemp(prefix='mb_') + def TempFile(self, mode='w'): # This function largely exists so it can be overriden for testing. return tempfile.NamedTemporaryFile(mode=mode, delete=False) diff --git a/tools_webrtc/mb/mb_config.pyl b/tools_webrtc/mb/mb_config.pyl index 9f9fdd2d41..253a57acc5 100644 --- a/tools_webrtc/mb/mb_config.pyl +++ b/tools_webrtc/mb/mb_config.pyl @@ -12,27 +12,26 @@ # easy to try different configurations of GN args in tryjob patches. { - # This is a map of buildbot master names -> buildbot builder names -> + # This is a map of buildbot builder group names -> buildbot builder names -> # config names (where each config name is a key in the 'configs' dict, # above). mb uses this dict to look up which config to use for a given bot. # The builders should be sorted by the order they appear in the /builders # page on the buildbots, *not* alphabetically. - 'masters': { + 'builder_groups': { 'client.webrtc': { # iOS - 'iOS32 Debug': 'ios_debug_bot_arm', - 'iOS32 Release': 'ios_release_bot_arm', 'iOS64 Debug': 'ios_debug_bot_arm64', 'iOS64 Release': 'ios_release_bot_arm64', - 'iOS64 Sim Debug (iOS 10.0)': 'ios_debug_bot_x64', - 'iOS64 Sim Debug (iOS 11)': 'ios_debug_bot_x64', 'iOS64 Sim Debug (iOS 12)': 'ios_debug_bot_x64', + 'iOS64 Sim Debug (iOS 13)': 'ios_debug_bot_x64', + 'iOS64 Sim Debug (iOS 14.0)': 'ios_debug_bot_x64', # Mac 'Mac64 Debug': 'debug_bot_x64', 'Mac64 Release': 'release_bot_x64', 'Mac64 Builder': 'pure_release_bot_x64', 'Mac Asan': 'mac_asan_clang_release_bot_x64', + 'MacARM64 M1 Release': 'release_bot_arm64', # Linux 'Linux32 Debug': 'no_h264_debug_bot_x86', @@ -44,7 +43,6 @@ 'Linux64 Builder': 'pure_release_bot_x64', 'Linux64 Debug (ARM)': 'debug_bot_arm64', 'Linux64 Release (ARM)': 'release_bot_arm64', - 'Linux64 Release (GCC)': 'gcc_release_bot_x64', 'Linux Asan': 'asan_lsan_clang_release_bot_x64', 'Linux MSan': 'msan_clang_release_bot_x64', 'Linux Tsan v2': 'tsan_clang_release_bot_x64', @@ -92,7 +90,6 @@ 'Win64 Debug (Clang)': 'win_clang_debug_bot_x64', 'Win64 Release (Clang)': 'win_clang_release_bot_x64', 'Win64 ASan': 'win_asan_clang_release_bot_x64', - 'Win64 UWP': 'win_uwp_release_bot_x64', 'Win (more configs)': { 'bwe_test_logging': 'bwe_test_logging_x86', @@ -108,11 +105,12 @@ # build anything). # TODO(http://crbug.com/1029452): Nuke these and isolate on builder # instead? - 'Perf Android32 (K Nexus5)': 'release_bot_x64', + 'Perf Android32 (M Nexus5)': 'release_bot_x64', 'Perf Android32 (M AOSP Nexus6)': 'release_bot_x64', 'Perf Android64 (M Nexus5X)': 'release_bot_x64', 'Perf Android64 (O Pixel2)': 'release_bot_x64', 'Perf Linux Trusty': 'release_bot_x64', + 'Perf Linux Bionic': 'release_bot_x64', 'Perf Mac 10.11': 'release_bot_x64', 'Perf Win7': 'release_bot_x64', }, @@ -149,13 +147,11 @@ }, 'tryserver.webrtc': { # iOS - 'ios_compile_arm_dbg': 'ios_debug_bot_arm', - 'ios_compile_arm_rel': 'ios_release_bot_arm', 'ios_compile_arm64_dbg': 'ios_debug_bot_arm64', 'ios_compile_arm64_rel': 'ios_release_bot_arm64', - 'ios_sim_x64_dbg_ios10': 'ios_debug_bot_x64', - 'ios_sim_x64_dbg_ios11': 'ios_debug_bot_x64', 'ios_sim_x64_dbg_ios12': 'ios_debug_bot_x64', + 'ios_sim_x64_dbg_ios13': 'ios_debug_bot_x64', + 'ios_sim_x64_dbg_ios14': 'ios_debug_bot_x64', # Mac 'mac_compile_dbg': 'debug_bot_x64', @@ -173,7 +169,6 @@ 'linux_compile_arm_rel': 'release_bot_arm', 'linux_compile_arm64_dbg': 'debug_bot_arm64', 'linux_compile_arm64_rel': 'release_bot_arm64', - 'linux_compile_gcc_rel': 'gcc_release_bot_x64', 'linux_dbg': 'debug_bot_x64', 'linux_rel': 'release_bot_x64', 'linux_x86_rel': 'release_bot_x86', @@ -235,7 +230,6 @@ 'win_asan': 'win_asan_clang_release_bot_x64', 'win_x64_clang_dbg_win8': 'win_clang_debug_bot_x64', 'win_x64_clang_dbg_win10': 'win_clang_debug_bot_x64', - 'win_x64_uwp': 'win_uwp_release_bot_x64', 'win_x86_more_configs': { 'bwe_test_logging': 'bwe_test_logging_x86', @@ -253,9 +247,6 @@ # we might have mac, win, and linux bots all using the 'release_bot' config). 'configs': { # Linux, Mac and Windows - 'gcc_release_bot_x64': [ - 'gcc', 'release_bot_no_goma', 'x64', 'no_rtc_tests' - ], # TODO(kjellander): Restore Goma for this when crbug.com/726706 is fixed. 'debug_bot_arm': [ 'openh264', 'debug', 'arm' @@ -310,7 +301,7 @@ ], 'libfuzzer_asan_release_bot_x64': [ 'libfuzzer', 'asan', 'optimize_for_fuzzing', 'openh264', 'release_bot', - 'x64', 'no_rtc_tests' + 'x64' ], # Windows @@ -345,10 +336,6 @@ 'asan', 'clang', 'full_symbols', 'openh264', 'release_bot', 'x64', 'win_fastlink', ], - 'win_uwp_release_bot_x64': [ - # UWP passes compiler flags that are not supported by goma. - 'no_clang', 'openh264', 'x64', 'winuwp', 'release_bot_no_goma' - ], # Mac 'mac_asan_clang_release_bot_x64': [ @@ -391,32 +378,28 @@ ], # iOS - 'ios_debug_bot_arm': [ - 'ios', 'debug_bot', 'arm', 'no_ios_code_signing', 'ios_use_goma_rbe' - ], - 'ios_release_bot_arm': [ - 'ios', 'release_bot', 'arm', 'no_ios_code_signing', 'ios_use_goma_rbe' - ], 'ios_debug_bot_arm64': [ - 'ios', 'debug_bot', 'arm64', 'no_ios_code_signing', 'ios_use_goma_rbe' + 'ios', 'debug_bot', 'arm64', 'no_ios_code_signing', 'ios_use_goma_rbe', + 'xctest', ], 'ios_release_bot_arm64': [ - 'ios', 'release_bot', 'arm64', 'no_ios_code_signing', 'ios_use_goma_rbe' + 'ios', 'release_bot', 'arm64', 'no_ios_code_signing', 'ios_use_goma_rbe', + 'xctest', ], 'ios_internal_debug_bot_arm64': [ 'ios', 'debug_bot', 'arm64', 'ios_use_goma_rbe', - 'ios_code_signing_identity_description', + 'ios_code_signing_identity_description', 'xctest', ], 'ios_internal_release_bot_arm64': [ 'ios', 'release_bot', 'arm64', 'ios_use_goma_rbe', - 'ios_code_signing_identity_description', + 'ios_code_signing_identity_description', 'xctest', ], 'ios_internal_pure_release_bot_arm64': [ 'ios', 'pure_release_bot', 'arm64', 'ios_use_goma_rbe', - 'ios_code_signing_identity_description', + 'ios_code_signing_identity_description', 'xctest', ], 'ios_debug_bot_x64': [ - 'ios', 'debug_bot', 'x64', 'ios_use_goma_rbe' + 'ios', 'debug_bot', 'x64', 'ios_use_goma_rbe', 'xctest', ], # More configs @@ -500,11 +483,6 @@ 'gn_args': 'symbol_level=2', }, - 'gcc': { - 'gn_args': ('is_clang=false use_sysroot=false ' - 'treat_warnings_as_errors=false'), - }, - 'goma': { 'gn_args': 'use_goma=true', }, @@ -553,10 +531,6 @@ 'gn_args': 'use_lld=false', }, - 'no_rtc_tests': { - 'gn_args': 'rtc_include_tests=false', - }, - 'openh264': { 'gn_args': 'ffmpeg_branding="Chrome" rtc_use_h264=true', }, @@ -625,12 +599,11 @@ 'gn_args': 'rtc_enable_sctp=false', }, - 'winuwp': { - 'gn_args': 'target_os="winuwp"', - }, - 'win_undef_unicode': { 'gn_args': 'rtc_win_undef_unicode=true', }, + 'xctest': { + 'gn_args': 'enable_run_ios_unittests_with_xctest=true', + }, }, } diff --git a/tools_webrtc/mb/mb_unittest.py b/tools_webrtc/mb/mb_unittest.py index c1e477c104..fc359d9995 100755 --- a/tools_webrtc/mb/mb_unittest.py +++ b/tools_webrtc/mb/mb_unittest.py @@ -13,7 +13,9 @@ import json import StringIO import os +import re import sys +import tempfile import unittest import mb @@ -32,6 +34,7 @@ def __init__(self, win32=False): self.platform = 'win32' self.executable = 'c:\\python\\python.exe' self.sep = '\\' + self.cwd = 'c:\\fake_src\\out\\Default' else: self.src_dir = '/fake_src' self.default_config = '/fake_src/tools_webrtc/mb/mb_config.pyl' @@ -39,8 +42,10 @@ def __init__(self, win32=False): self.executable = '/usr/bin/python' self.platform = 'linux2' self.sep = '/' + self.cwd = '/fake_src/out/Default' self.files = {} + self.dirs = set() self.calls = [] self.cmds = [] self.cross_compile = None @@ -52,21 +57,24 @@ def ExpandUser(self, path): return '$HOME/%s' % path def Exists(self, path): - return self.files.get(path) is not None + abs_path = self._AbsPath(path) + return (self.files.get(abs_path) is not None or abs_path in self.dirs) def MaybeMakeDirectory(self, path): - self.files[path] = True + abpath = self._AbsPath(path) + self.dirs.add(abpath) def PathJoin(self, *comps): return self.sep.join(comps) def ReadFile(self, path): - return self.files[path] + return self.files[self._AbsPath(path)] def WriteFile(self, path, contents, force_verbose=False): if self.args.dryrun or self.args.verbose or force_verbose: self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path)) - self.files[path] = contents + abpath = self._AbsPath(path) + self.files[abpath] = contents def Call(self, cmd, env=None, buffer_output=True): self.calls.append(cmd) @@ -83,18 +91,34 @@ def Print(self, *args, **kwargs): else: self.out += sep.join(args) + end + def TempDir(self): + tmp_dir = os.path.join(tempfile.gettempdir(), 'mb_test') + self.dirs.add(tmp_dir) + return tmp_dir + def TempFile(self, mode='w'): return FakeFile(self.files) def RemoveFile(self, path): - del self.files[path] + abpath = self._AbsPath(path) + self.files[abpath] = None def RemoveDirectory(self, path): - self.rmdirs.append(path) - files_to_delete = [f for f in self.files if f.startswith(path)] + abpath = self._AbsPath(path) + self.rmdirs.append(abpath) + files_to_delete = [f for f in self.files if f.startswith(abpath)] for f in files_to_delete: self.files[f] = None + def _AbsPath(self, path): + if not ((self.platform == 'win32' and path.startswith('c:')) or + (self.platform != 'win32' and path.startswith('/'))): + path = self.PathJoin(self.cwd, path) + if self.sep == '\\': + return re.sub(r'\\+', r'\\', path) + else: + return re.sub('/+', '/', path) + class FakeFile(object): def __init__(self, files): @@ -111,12 +135,12 @@ def close(self): TEST_CONFIG = """\ { - 'masters': { + 'builder_groups': { 'chromium': {}, - 'fake_master': { + 'fake_group': { 'fake_builder': 'rel_bot', 'fake_debug_builder': 'debug_goma', - 'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn', + 'fake_args_bot': '//build/args/bots/fake_group/fake_args_bot.gn', 'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'}, 'fake_android_bot': 'android_bot', }, @@ -169,20 +193,27 @@ def fake_mbw(self, files=None, win32=False): }, }''') mbw.files.setdefault( - mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'), + mbw.ToAbsPath('//build/args/bots/fake_group/fake_args_bot.gn'), 'is_debug = false\n') if files: for path, contents in files.items(): mbw.files[path] = contents return mbw - def check(self, args, mbw=None, files=None, out=None, err=None, ret=None): + def check(self, args, mbw=None, files=None, out=None, err=None, ret=None, + env=None): if not mbw: mbw = self.fake_mbw(files) - actual_ret = mbw.Main(args) - - self.assertEqual(actual_ret, ret) + try: + prev_env = os.environ.copy() + os.environ = env if env else prev_env + actual_ret = mbw.Main(args) + finally: + os.environ = prev_env + self.assertEqual( + actual_ret, ret, + "ret: %s, out: %s, err: %s" % (actual_ret, mbw.out, mbw.err)) if out is not None: self.assertEqual(mbw.out, out) if err is not None: @@ -238,12 +269,12 @@ def test_gen(self): '--check\n', mbw.out) mbw = self.fake_mbw() - self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot', + self.check(['gen', '-m', 'fake_group', '-b', 'fake_args_bot', '//out/Debug'], mbw=mbw, ret=0) self.assertEqual( mbw.files['/fake_src/out/Debug/args.gn'], - 'import("//build/args/bots/fake_master/fake_args_bot.gn")\n') + 'import("//build/args/bots/fake_group/fake_args_bot.gn")\n\n') def test_gen_fails(self): @@ -564,8 +595,8 @@ def test_isolate_windowed_test_launcher_linux(self): def test_gen_windowed_test_launcher_win(self): files = { - '/tmp/swarming_targets': 'unittests\n', - '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( + 'c:\\fake_src\\out\\Default\\tmp\\swarming_targets': 'unittests\n', + 'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl': ( "{'unittests': {" " 'label': '//somewhere:unittests'," " 'type': 'windowed_test_launcher'," @@ -579,9 +610,10 @@ def test_gen_windowed_test_launcher_win(self): mbw = self.fake_mbw(files=files, win32=True) self.check(['gen', '-c', 'debug_goma', - '--swarming-targets-file', '/tmp/swarming_targets', + '--swarming-targets-file', + 'c:\\fake_src\\out\\Default\\tmp\\swarming_targets', '--isolate-map-file', - '/fake_src/testing/buildbot/gn_isolate_map.pyl', + 'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl', '//out/Default'], mbw=mbw, ret=0) isolate_file = mbw.files['c:\\fake_src\\out\\Default\\unittests.isolate'] @@ -750,23 +782,40 @@ def test_run(self): def test_run_swarmed(self): files = { - '/fake_src/testing/buildbot/gn_isolate_map.pyl': ( - "{'base_unittests': {" - " 'label': '//base:base_unittests'," - " 'type': 'raw'," - " 'args': []," - "}}\n" - ), - '/fake_src/out/Default/base_unittests.runtime_deps': ( - "base_unittests\n" - ), - 'out/Default/base_unittests.archive.json': ( - "{\"base_unittests\":\"fake_hash\"}"), + '/fake_src/testing/buildbot/gn_isolate_map.pyl': + ("{'base_unittests': {" + " 'label': '//base:base_unittests'," + " 'type': 'console_test_launcher'," + "}}\n"), + '/fake_src/out/Default/base_unittests.runtime_deps': + ("base_unittests\n"), + '/fake_src/out/Default/base_unittests.archive.json': + ("{\"base_unittests\":\"fake_hash\"}"), + '/fake_src/third_party/depot_tools/cipd_manifest.txt': + ("# vpython\n" + "/some/vpython/pkg git_revision:deadbeef\n"), } + task_json = json.dumps({'tasks': [{'task_id': '00000'}]}) + collect_json = json.dumps({'00000': {'results': {}}}) mbw = self.fake_mbw(files=files) + mbw.files[mbw.PathJoin(mbw.TempDir(), 'task.json')] = task_json + mbw.files[mbw.PathJoin(mbw.TempDir(), 'collect_output.json')] = collect_json + original_impl = mbw.ToSrcRelPath + + def to_src_rel_path_stub(path): + if path.endswith('base_unittests.archive.json'): + return 'base_unittests.archive.json' + return original_impl(path) + + mbw.ToSrcRelPath = to_src_rel_path_stub + self.check(['run', '-s', '-c', 'debug_goma', '//out/Default', 'base_unittests'], mbw=mbw, ret=0) + mbw = self.fake_mbw(files=files) + mbw.files[mbw.PathJoin(mbw.TempDir(), 'task.json')] = task_json + mbw.files[mbw.PathJoin(mbw.TempDir(), 'collect_output.json')] = collect_json + mbw.ToSrcRelPath = to_src_rel_path_stub self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7', '//out/Default', 'base_unittests'], mbw=mbw, ret=0) @@ -801,26 +850,26 @@ def test_help(self): def test_multiple_phases(self): # Check that not passing a --phase to a multi-phase builder fails. - mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'], + mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase'], ret=1) self.assertIn('Must specify a build --phase', mbw.out) # Check that passing a --phase to a single-phase builder fails. - mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder', + mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_builder', '--phase', 'phase_1'], ret=1) self.assertIn('Must not specify a build --phase', mbw.out) # Check that passing a wrong phase key to a multi-phase builder fails. - mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase', '--phase', 'wrong_phase'], ret=1) self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out) # Check that passing a correct phase key to a multi-phase builder passes. - mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase', '--phase', 'phase_1'], ret=0) self.assertIn('phase = 1', mbw.out) - mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase', + mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase', '--phase', 'phase_2'], ret=0) self.assertIn('phase = 2', mbw.out) diff --git a/tools_webrtc/msan/blacklist.txt b/tools_webrtc/msan/suppressions.txt similarity index 57% rename from tools_webrtc/msan/blacklist.txt rename to tools_webrtc/msan/suppressions.txt index 3c9c9b202b..47a0dff16f 100644 --- a/tools_webrtc/msan/blacklist.txt +++ b/tools_webrtc/msan/suppressions.txt @@ -4,9 +4,12 @@ # # Please think twice before you add or remove these rules. -# This is a stripped down copy of Chromium's blacklist.txt, to enable -# adding WebRTC-specific blacklist entries. +# This is a stripped down copy of Chromium's ignorelist.txt, to enable +# adding WebRTC-specific ignorelist entries. # Uninit in zlib. http://crbug.com/116277 fun:*MOZ_Z_deflate* +# Uninit in H264. http://crbug.com/webrtc/11702 +src:*/third_party/openh264/src/codec/processing/src/vaacalc/vaacalcfuncs.cpp + diff --git a/tools_webrtc/network_emulator/config.py b/tools_webrtc/network_emulator/config.py index 60fa485db4..c1d3eaf3d1 100644 --- a/tools_webrtc/network_emulator/config.py +++ b/tools_webrtc/network_emulator/config.py @@ -6,31 +6,31 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Configuration class for network emulation.""" class ConnectionConfig(object): - """Configuration containing the characteristics of a network connection.""" + """Configuration containing the characteristics of a network connection.""" - def __init__(self, num, name, receive_bw_kbps, send_bw_kbps, delay_ms, - packet_loss_percent, queue_slots): - self.num = num - self.name = name - self.receive_bw_kbps = receive_bw_kbps - self.send_bw_kbps = send_bw_kbps - self.delay_ms = delay_ms - self.packet_loss_percent = packet_loss_percent - self.queue_slots = queue_slots + def __init__(self, num, name, receive_bw_kbps, send_bw_kbps, delay_ms, + packet_loss_percent, queue_slots): + self.num = num + self.name = name + self.receive_bw_kbps = receive_bw_kbps + self.send_bw_kbps = send_bw_kbps + self.delay_ms = delay_ms + self.packet_loss_percent = packet_loss_percent + self.queue_slots = queue_slots - def __str__(self): - """String representing the configuration. + def __str__(self): + """String representing the configuration. Returns: A string formatted and padded like this example: 12 Name 375 kbps 375 kbps 10 145 ms 0.1 % """ - left_aligned_name = self.name.ljust(24, ' ') - return '%2s %24s %5s kbps %5s kbps %4s %5s ms %3s %%' % ( - self.num, left_aligned_name, self.receive_bw_kbps, self.send_bw_kbps, - self.queue_slots, self.delay_ms, self.packet_loss_percent) + left_aligned_name = self.name.ljust(24, ' ') + return '%2s %24s %5s kbps %5s kbps %4s %5s ms %3s %%' % ( + self.num, left_aligned_name, self.receive_bw_kbps, + self.send_bw_kbps, self.queue_slots, self.delay_ms, + self.packet_loss_percent) diff --git a/tools_webrtc/network_emulator/emulate.py b/tools_webrtc/network_emulator/emulate.py index 08049a5424..51224c80b1 100755 --- a/tools_webrtc/network_emulator/emulate.py +++ b/tools_webrtc/network_emulator/emulate.py @@ -6,10 +6,8 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Script for constraining traffic on the local machine.""" - import logging import optparse import socket @@ -18,7 +16,6 @@ import config import network_emulator - _DEFAULT_LOG_LEVEL = logging.INFO # Default port range to apply network constraints on. @@ -41,7 +38,7 @@ config.ConnectionConfig(12, 'Wifi, Average Case', 40000, 33000, 1, 0, 100), config.ConnectionConfig(13, 'Wifi, Good', 45000, 40000, 1, 0, 100), config.ConnectionConfig(14, 'Wifi, Lossy', 40000, 33000, 1, 0, 100), - ] +] _PRESETS_DICT = dict((p.num, p) for p in _PRESETS) _DEFAULT_PRESET_ID = 2 @@ -49,147 +46,170 @@ class NonStrippingEpilogOptionParser(optparse.OptionParser): - """Custom parser to let us show the epilog without weird line breaking.""" + """Custom parser to let us show the epilog without weird line breaking.""" - def format_epilog(self, formatter): - return self.epilog + def format_epilog(self, formatter): + return self.epilog def _GetExternalIp(): - """Finds out the machine's external IP by connecting to google.com.""" - external_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - external_socket.connect(('google.com', 80)) - return external_socket.getsockname()[0] + """Finds out the machine's external IP by connecting to google.com.""" + external_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + external_socket.connect(('google.com', 80)) + return external_socket.getsockname()[0] def _ParseArgs(): - """Define and parse the command-line arguments.""" - presets_string = '\n'.join(str(p) for p in _PRESETS) - parser = NonStrippingEpilogOptionParser(epilog=( - '\nAvailable presets:\n' - ' Bandwidth (kbps) Packet\n' - 'ID Name Receive Send Queue Delay loss \n' - '-- ---- --------- -------- ----- ------- ------\n' - '%s\n' % presets_string)) - parser.add_option('-p', '--preset', type='int', default=_DEFAULT_PRESET_ID, - help=('ConnectionConfig configuration, specified by ID. ' - 'Default: %default')) - parser.add_option('-r', '--receive-bw', type='int', - default=_DEFAULT_PRESET.receive_bw_kbps, - help=('Receive bandwidth in kilobit/s. Default: %default')) - parser.add_option('-s', '--send-bw', type='int', - default=_DEFAULT_PRESET.send_bw_kbps, - help=('Send bandwidth in kilobit/s. Default: %default')) - parser.add_option('-d', '--delay', type='int', - default=_DEFAULT_PRESET.delay_ms, - help=('Delay in ms. Default: %default')) - parser.add_option('-l', '--packet-loss', type='float', - default=_DEFAULT_PRESET.packet_loss_percent, - help=('Packet loss in %. Default: %default')) - parser.add_option('-q', '--queue', type='int', - default=_DEFAULT_PRESET.queue_slots, - help=('Queue size as number of slots. Default: %default')) - parser.add_option('--port-range', default='%s,%s' % _DEFAULT_PORT_RANGE, - help=('Range of ports for constrained network. Specify as ' - 'two comma separated integers. Default: %default')) - parser.add_option('--target-ip', default=None, - help=('The interface IP address to apply the rules for. ' - 'Default: the external facing interface IP address.')) - parser.add_option('-v', '--verbose', action='store_true', default=False, - help=('Turn on verbose output. Will print all \'ipfw\' ' - 'commands that are executed.')) - - options = parser.parse_args()[0] - - # Find preset by ID, if specified. - if options.preset and not _PRESETS_DICT.has_key(options.preset): - parser.error('Invalid preset: %s' % options.preset) - - # Simple validation of the IP address, if supplied. - if options.target_ip: + """Define and parse the command-line arguments.""" + presets_string = '\n'.join(str(p) for p in _PRESETS) + parser = NonStrippingEpilogOptionParser(epilog=( + '\nAvailable presets:\n' + ' Bandwidth (kbps) Packet\n' + 'ID Name Receive Send Queue Delay loss \n' + '-- ---- --------- -------- ----- ------- ------\n' + '%s\n' % presets_string)) + parser.add_option('-p', + '--preset', + type='int', + default=_DEFAULT_PRESET_ID, + help=('ConnectionConfig configuration, specified by ID. ' + 'Default: %default')) + parser.add_option( + '-r', + '--receive-bw', + type='int', + default=_DEFAULT_PRESET.receive_bw_kbps, + help=('Receive bandwidth in kilobit/s. Default: %default')) + parser.add_option('-s', + '--send-bw', + type='int', + default=_DEFAULT_PRESET.send_bw_kbps, + help=('Send bandwidth in kilobit/s. Default: %default')) + parser.add_option('-d', + '--delay', + type='int', + default=_DEFAULT_PRESET.delay_ms, + help=('Delay in ms. Default: %default')) + parser.add_option('-l', + '--packet-loss', + type='float', + default=_DEFAULT_PRESET.packet_loss_percent, + help=('Packet loss in %. Default: %default')) + parser.add_option( + '-q', + '--queue', + type='int', + default=_DEFAULT_PRESET.queue_slots, + help=('Queue size as number of slots. Default: %default')) + parser.add_option( + '--port-range', + default='%s,%s' % _DEFAULT_PORT_RANGE, + help=('Range of ports for constrained network. Specify as ' + 'two comma separated integers. Default: %default')) + parser.add_option( + '--target-ip', + default=None, + help=('The interface IP address to apply the rules for. ' + 'Default: the external facing interface IP address.')) + parser.add_option('-v', + '--verbose', + action='store_true', + default=False, + help=('Turn on verbose output. Will print all \'ipfw\' ' + 'commands that are executed.')) + + options = parser.parse_args()[0] + + # Find preset by ID, if specified. + if options.preset and not _PRESETS_DICT.has_key(options.preset): + parser.error('Invalid preset: %s' % options.preset) + + # Simple validation of the IP address, if supplied. + if options.target_ip: + try: + socket.inet_aton(options.target_ip) + except socket.error: + parser.error('Invalid IP address specified: %s' % + options.target_ip) + + # Convert port range into the desired tuple format. try: - socket.inet_aton(options.target_ip) - except socket.error: - parser.error('Invalid IP address specified: %s' % options.target_ip) + if isinstance(options.port_range, str): + options.port_range = tuple( + int(port) for port in options.port_range.split(',')) + if len(options.port_range) != 2: + parser.error( + 'Invalid port range specified, please specify two ' + 'integers separated by a comma.') + except ValueError: + parser.error('Invalid port range specified.') - # Convert port range into the desired tuple format. - try: - if isinstance(options.port_range, str): - options.port_range = tuple(int(port) for port in - options.port_range.split(',')) - if len(options.port_range) != 2: - parser.error('Invalid port range specified, please specify two ' - 'integers separated by a comma.') - except ValueError: - parser.error('Invalid port range specified.') - - _InitLogging(options.verbose) - return options + _InitLogging(options.verbose) + return options def _InitLogging(verbose): - """Setup logging.""" - log_level = _DEFAULT_LOG_LEVEL - if verbose: - log_level = logging.DEBUG - logging.basicConfig(level=log_level, format='%(message)s') + """Setup logging.""" + log_level = _DEFAULT_LOG_LEVEL + if verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, format='%(message)s') def main(): - options = _ParseArgs() - - # Build a configuration object. Override any preset configuration settings if - # a value of a setting was also given as a flag. - connection_config = _PRESETS_DICT[options.preset] - if options.receive_bw is not _DEFAULT_PRESET.receive_bw_kbps: - connection_config.receive_bw_kbps = options.receive_bw - if options.send_bw is not _DEFAULT_PRESET.send_bw_kbps: - connection_config.send_bw_kbps = options.send_bw - if options.delay is not _DEFAULT_PRESET.delay_ms: - connection_config.delay_ms = options.delay - if options.packet_loss is not _DEFAULT_PRESET.packet_loss_percent: - connection_config.packet_loss_percent = options.packet_loss - if options.queue is not _DEFAULT_PRESET.queue_slots: - connection_config.queue_slots = options.queue - emulator = network_emulator.NetworkEmulator(connection_config, - options.port_range) - try: - emulator.CheckPermissions() - except network_emulator.NetworkEmulatorError as e: - logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) - return -1 - - if not options.target_ip: - external_ip = _GetExternalIp() - else: - external_ip = options.target_ip - - logging.info('Constraining traffic to/from IP: %s', external_ip) - try: - emulator.Emulate(external_ip) - logging.info('Started network emulation with the following configuration:\n' - ' Receive bandwidth: %s kbps (%s kB/s)\n' - ' Send bandwidth : %s kbps (%s kB/s)\n' - ' Delay : %s ms\n' - ' Packet loss : %s %%\n' - ' Queue slots : %s', - connection_config.receive_bw_kbps, - connection_config.receive_bw_kbps/8, - connection_config.send_bw_kbps, - connection_config.send_bw_kbps/8, - connection_config.delay_ms, - connection_config.packet_loss_percent, - connection_config.queue_slots) - logging.info('Affected traffic: IP traffic on ports %s-%s', - options.port_range[0], options.port_range[1]) - raw_input('Press Enter to abort Network Emulation...') - logging.info('Flushing all Dummynet rules...') - network_emulator.Cleanup() - logging.info('Completed Network Emulation.') - return 0 - except network_emulator.NetworkEmulatorError as e: - logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) - return -2 + options = _ParseArgs() + + # Build a configuration object. Override any preset configuration settings if + # a value of a setting was also given as a flag. + connection_config = _PRESETS_DICT[options.preset] + if options.receive_bw is not _DEFAULT_PRESET.receive_bw_kbps: + connection_config.receive_bw_kbps = options.receive_bw + if options.send_bw is not _DEFAULT_PRESET.send_bw_kbps: + connection_config.send_bw_kbps = options.send_bw + if options.delay is not _DEFAULT_PRESET.delay_ms: + connection_config.delay_ms = options.delay + if options.packet_loss is not _DEFAULT_PRESET.packet_loss_percent: + connection_config.packet_loss_percent = options.packet_loss + if options.queue is not _DEFAULT_PRESET.queue_slots: + connection_config.queue_slots = options.queue + emulator = network_emulator.NetworkEmulator(connection_config, + options.port_range) + try: + emulator.CheckPermissions() + except network_emulator.NetworkEmulatorError as e: + logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) + return -1 + + if not options.target_ip: + external_ip = _GetExternalIp() + else: + external_ip = options.target_ip + + logging.info('Constraining traffic to/from IP: %s', external_ip) + try: + emulator.Emulate(external_ip) + logging.info( + 'Started network emulation with the following configuration:\n' + ' Receive bandwidth: %s kbps (%s kB/s)\n' + ' Send bandwidth : %s kbps (%s kB/s)\n' + ' Delay : %s ms\n' + ' Packet loss : %s %%\n' + ' Queue slots : %s', connection_config.receive_bw_kbps, + connection_config.receive_bw_kbps / 8, + connection_config.send_bw_kbps, connection_config.send_bw_kbps / 8, + connection_config.delay_ms, connection_config.packet_loss_percent, + connection_config.queue_slots) + logging.info('Affected traffic: IP traffic on ports %s-%s', + options.port_range[0], options.port_range[1]) + raw_input('Press Enter to abort Network Emulation...') + logging.info('Flushing all Dummynet rules...') + network_emulator.Cleanup() + logging.info('Completed Network Emulation.') + return 0 + except network_emulator.NetworkEmulatorError as e: + logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error) + return -2 + if __name__ == '__main__': - sys.exit(main()) + sys.exit(main()) diff --git a/tools_webrtc/network_emulator/network_emulator.py b/tools_webrtc/network_emulator/network_emulator.py index aa3ebda4c0..f77753b0f6 100644 --- a/tools_webrtc/network_emulator/network_emulator.py +++ b/tools_webrtc/network_emulator/network_emulator.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Script for constraining traffic on the local machine.""" import ctypes @@ -17,7 +16,7 @@ class NetworkEmulatorError(BaseException): - """Exception raised for errors in the network emulator. + """Exception raised for errors in the network emulator. Attributes: fail_msg: User defined error message. @@ -27,81 +26,88 @@ class NetworkEmulatorError(BaseException): stderr: Error output of running the command. """ - def __init__(self, fail_msg, cmd=None, returncode=None, output=None, - error=None): - BaseException.__init__(self, fail_msg) - self.fail_msg = fail_msg - self.cmd = cmd - self.returncode = returncode - self.output = output - self.error = error + def __init__(self, + fail_msg, + cmd=None, + returncode=None, + output=None, + error=None): + BaseException.__init__(self, fail_msg) + self.fail_msg = fail_msg + self.cmd = cmd + self.returncode = returncode + self.output = output + self.error = error class NetworkEmulator(object): - """A network emulator that can constrain the network using Dummynet.""" + """A network emulator that can constrain the network using Dummynet.""" - def __init__(self, connection_config, port_range): - """Constructor. + def __init__(self, connection_config, port_range): + """Constructor. Args: connection_config: A config.ConnectionConfig object containing the characteristics for the connection to be emulation. port_range: Tuple containing two integers defining the port range. """ - self._pipe_counter = 0 - self._rule_counter = 0 - self._port_range = port_range - self._connection_config = connection_config + self._pipe_counter = 0 + self._rule_counter = 0 + self._port_range = port_range + self._connection_config = connection_config - def Emulate(self, target_ip): - """Starts a network emulation by setting up Dummynet rules. + def Emulate(self, target_ip): + """Starts a network emulation by setting up Dummynet rules. Args: target_ip: The IP address of the interface that shall be that have the network constraints applied to it. """ - receive_pipe_id = self._CreateDummynetPipe( - self._connection_config.receive_bw_kbps, - self._connection_config.delay_ms, - self._connection_config.packet_loss_percent, - self._connection_config.queue_slots) - logging.debug('Created receive pipe: %s', receive_pipe_id) - send_pipe_id = self._CreateDummynetPipe( - self._connection_config.send_bw_kbps, - self._connection_config.delay_ms, - self._connection_config.packet_loss_percent, - self._connection_config.queue_slots) - logging.debug('Created send pipe: %s', send_pipe_id) - - # Adding the rules will start the emulation. - incoming_rule_id = self._CreateDummynetRule(receive_pipe_id, 'any', - target_ip, self._port_range) - logging.debug('Created incoming rule: %s', incoming_rule_id) - outgoing_rule_id = self._CreateDummynetRule(send_pipe_id, target_ip, - 'any', self._port_range) - logging.debug('Created outgoing rule: %s', outgoing_rule_id) - - @staticmethod - def CheckPermissions(): - """Checks if permissions are available to run Dummynet commands. + receive_pipe_id = self._CreateDummynetPipe( + self._connection_config.receive_bw_kbps, + self._connection_config.delay_ms, + self._connection_config.packet_loss_percent, + self._connection_config.queue_slots) + logging.debug('Created receive pipe: %s', receive_pipe_id) + send_pipe_id = self._CreateDummynetPipe( + self._connection_config.send_bw_kbps, + self._connection_config.delay_ms, + self._connection_config.packet_loss_percent, + self._connection_config.queue_slots) + logging.debug('Created send pipe: %s', send_pipe_id) + + # Adding the rules will start the emulation. + incoming_rule_id = self._CreateDummynetRule(receive_pipe_id, 'any', + target_ip, + self._port_range) + logging.debug('Created incoming rule: %s', incoming_rule_id) + outgoing_rule_id = self._CreateDummynetRule(send_pipe_id, target_ip, + 'any', self._port_range) + logging.debug('Created outgoing rule: %s', outgoing_rule_id) + + @staticmethod + def CheckPermissions(): + """Checks if permissions are available to run Dummynet commands. Raises: NetworkEmulatorError: If permissions to run Dummynet commands are not available. """ - try: - if os.getuid() != 0: - raise NetworkEmulatorError('You must run this script with sudo.') - except AttributeError: - - # AttributeError will be raised on Windows. - if ctypes.windll.shell32.IsUserAnAdmin() == 0: - raise NetworkEmulatorError('You must run this script with administrator' - ' privileges.') - - def _CreateDummynetRule(self, pipe_id, from_address, to_address, - port_range): - """Creates a network emulation rule and returns its ID. + try: + if os.getuid() != 0: + raise NetworkEmulatorError( + 'You must run this script with sudo.') + except AttributeError: + + # AttributeError will be raised on Windows. + if ctypes.windll.shell32.IsUserAnAdmin() == 0: + raise NetworkEmulatorError( + 'You must run this script with administrator' + ' privileges.') + + def _CreateDummynetRule(self, pipe_id, from_address, to_address, + port_range): + """Creates a network emulation rule and returns its ID. Args: pipe_id: integer ID of the pipe. @@ -115,18 +121,22 @@ def _CreateDummynetRule(self, pipe_id, from_address, to_address, The ID of the rule, starting at 100. The rule ID increments with 100 for each rule being added. """ - self._rule_counter += 100 - add_part = ['add', self._rule_counter, 'pipe', pipe_id, - 'ip', 'from', from_address, 'to', to_address] - _RunIpfwCommand(add_part + ['src-port', '%s-%s' % port_range], - 'Failed to add Dummynet src-port rule.') - _RunIpfwCommand(add_part + ['dst-port', '%s-%s' % port_range], - 'Failed to add Dummynet dst-port rule.') - return self._rule_counter - - def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms, packet_loss_percent, - queue_slots): - """Creates a Dummynet pipe and return its ID. + self._rule_counter += 100 + add_part = [ + 'add', self._rule_counter, 'pipe', pipe_id, 'ip', 'from', + from_address, 'to', to_address + ] + _RunIpfwCommand(add_part + + ['src-port', '%s-%s' % port_range], + 'Failed to add Dummynet src-port rule.') + _RunIpfwCommand(add_part + + ['dst-port', '%s-%s' % port_range], + 'Failed to add Dummynet dst-port rule.') + return self._rule_counter + + def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms, + packet_loss_percent, queue_slots): + """Creates a Dummynet pipe and return its ID. Args: bandwidth_kbps: Bandwidth. @@ -136,32 +146,34 @@ def _CreateDummynetPipe(self, bandwidth_kbps, delay_ms, packet_loss_percent, Returns: The ID of the pipe, starting at 1. """ - self._pipe_counter += 1 - cmd = ['pipe', self._pipe_counter, 'config', - 'bw', str(bandwidth_kbps/8) + 'KByte/s', - 'delay', '%sms' % delay_ms, - 'plr', (packet_loss_percent/100.0), - 'queue', queue_slots] - error_message = 'Failed to create Dummynet pipe. ' - if sys.platform.startswith('linux'): - error_message += ('Make sure you have loaded the ipfw_mod.ko module to ' - 'your kernel (sudo insmod /path/to/ipfw_mod.ko).') - _RunIpfwCommand(cmd, error_message) - return self._pipe_counter + self._pipe_counter += 1 + cmd = [ + 'pipe', self._pipe_counter, 'config', 'bw', + str(bandwidth_kbps / 8) + 'KByte/s', 'delay', + '%sms' % delay_ms, 'plr', (packet_loss_percent / 100.0), 'queue', + queue_slots + ] + error_message = 'Failed to create Dummynet pipe. ' + if sys.platform.startswith('linux'): + error_message += ( + 'Make sure you have loaded the ipfw_mod.ko module to ' + 'your kernel (sudo insmod /path/to/ipfw_mod.ko).') + _RunIpfwCommand(cmd, error_message) + return self._pipe_counter + def Cleanup(): - """Stops the network emulation by flushing all Dummynet rules. + """Stops the network emulation by flushing all Dummynet rules. Notice that this will flush any rules that may have been created previously before starting the emulation. """ - _RunIpfwCommand(['-f', 'flush'], - 'Failed to flush Dummynet rules!') - _RunIpfwCommand(['-f', 'pipe', 'flush'], - 'Failed to flush Dummynet pipes!') + _RunIpfwCommand(['-f', 'flush'], 'Failed to flush Dummynet rules!') + _RunIpfwCommand(['-f', 'pipe', 'flush'], 'Failed to flush Dummynet pipes!') + def _RunIpfwCommand(command, fail_msg=None): - """Executes a command and prefixes the appropriate command for + """Executes a command and prefixes the appropriate command for Windows or Linux/UNIX. Args: @@ -172,18 +184,19 @@ def _RunIpfwCommand(command, fail_msg=None): NetworkEmulatorError: If command fails a message is set by the fail_msg parameter. """ - if sys.platform == 'win32': - ipfw_command = ['ipfw.exe'] - else: - ipfw_command = ['sudo', '-n', 'ipfw'] - - cmd_list = ipfw_command[:] + [str(x) for x in command] - cmd_string = ' '.join(cmd_list) - logging.debug('Running command: %s', cmd_string) - process = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - output, error = process.communicate() - if process.returncode != 0: - raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode, output, - error) - return output.strip() + if sys.platform == 'win32': + ipfw_command = ['ipfw.exe'] + else: + ipfw_command = ['sudo', '-n', 'ipfw'] + + cmd_list = ipfw_command[:] + [str(x) for x in command] + cmd_string = ' '.join(cmd_list) + logging.debug('Running command: %s', cmd_string) + process = subprocess.Popen(cmd_list, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + output, error = process.communicate() + if process.returncode != 0: + raise NetworkEmulatorError(fail_msg, cmd_string, process.returncode, + output, error) + return output.strip() diff --git a/tools_webrtc/perf/catapult_uploader.py b/tools_webrtc/perf/catapult_uploader.py index f3ec2ce7ca..a10dd84cb5 100644 --- a/tools_webrtc/perf/catapult_uploader.py +++ b/tools_webrtc/perf/catapult_uploader.py @@ -7,10 +7,11 @@ # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - +import datetime import httplib2 import json import subprocess +import time import zlib from tracing.value import histogram @@ -20,103 +21,241 @@ def _GenerateOauthToken(): - args = ['luci-auth', 'token'] - p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if p.wait() == 0: - output = p.stdout.read() - return output.strip() - else: - raise RuntimeError( - 'Error generating authentication token.\nStdout: %s\nStderr:%s' % - (p.stdout.read(), p.stderr.read())) + args = ['luci-auth', 'token'] + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if p.wait() == 0: + output = p.stdout.read() + return output.strip() + else: + raise RuntimeError( + 'Error generating authentication token.\nStdout: %s\nStderr:%s' % + (p.stdout.read(), p.stderr.read())) + + +def _CreateHeaders(oauth_token): + return {'Authorization': 'Bearer %s' % oauth_token} + + +def _SendHistogramSet(url, histograms): + """Make a HTTP POST with the given JSON to the Performance Dashboard. + + Args: + url: URL of Performance Dashboard instance, e.g. + "https://chromeperf.appspot.com". + histograms: a histogram set object that contains the data to be sent. + """ + headers = _CreateHeaders(_GenerateOauthToken()) + + serialized = json.dumps(_ApplyHacks(histograms.AsDicts()), indent=4) + + if url.startswith('http://localhost'): + # The catapult server turns off compression in developer mode. + data = serialized + else: + data = zlib.compress(serialized) + + print 'Sending %d bytes to %s.' % (len(data), url + '/add_histograms') + + http = httplib2.Http() + response, content = http.request(url + '/add_histograms', + method='POST', + body=data, + headers=headers) + return response, content + + +def _WaitForUploadConfirmation(url, upload_token, wait_timeout, + wait_polling_period): + """Make a HTTP GET requests to the Performance Dashboard untill upload + status is known or the time is out. + + Args: + url: URL of Performance Dashboard instance, e.g. + "https://chromeperf.appspot.com". + upload_token: String that identifies Performance Dashboard and can be used + for the status check. + wait_timeout: (datetime.timedelta) Maximum time to wait for the + confirmation. + wait_polling_period: (datetime.timedelta) Performance Dashboard will be + polled every wait_polling_period amount of time. + """ + assert wait_polling_period <= wait_timeout + + headers = _CreateHeaders(_GenerateOauthToken()) + http = httplib2.Http() + + oauth_refreshed = False + response = None + resp_json = None + current_time = datetime.datetime.now() + end_time = current_time + wait_timeout + next_poll_time = current_time + wait_polling_period + while datetime.datetime.now() < end_time: + current_time = datetime.datetime.now() + if next_poll_time > current_time: + time.sleep((next_poll_time - current_time).total_seconds()) + next_poll_time = datetime.datetime.now() + wait_polling_period + + response, content = http.request(url + '/uploads/' + upload_token, + method='GET', headers=headers) + + print 'Upload state polled. Response: %r.' % content + if not oauth_refreshed and response.status == 403: + print 'Oauth token refreshed. Continue polling.' + headers = _CreateHeaders(_GenerateOauthToken()) + oauth_refreshed = True + continue -def _SendHistogramSet(url, histograms, oauth_token): - """Make a HTTP POST with the given JSON to the Performance Dashboard. + if response.status != 200: + break - Args: - url: URL of Performance Dashboard instance, e.g. - "https://chromeperf.appspot.com". - histograms: a histogram set object that contains the data to be sent. - oauth_token: An oauth token to use for authorization. - """ - headers = {'Authorization': 'Bearer %s' % oauth_token} + resp_json = json.loads(content) + if resp_json['state'] == 'COMPLETED' or resp_json['state'] == 'FAILED': + break - serialized = json.dumps(_ApplyHacks(histograms.AsDicts()), indent=4) + return response, resp_json - if url.startswith('http://localhost'): - # The catapult server turns off compression in developer mode. - data = serialized - else: - data = zlib.compress(serialized) - print 'Sending %d bytes to %s.' % (len(data), url + '/add_histograms') +# Because of an issues on the Dashboard side few measurements over a large set +# can fail to upload. That would lead to the whole upload to be marked as +# failed. Check it, so it doesn't increase flakiness of our tests. +# TODO(crbug.com/1145904): Remove check after fixed. +def _CheckFullUploadInfo(url, upload_token, + min_measurements_amount=100, + max_failed_measurements_amount=1): + """Make a HTTP GET requests to the Performance Dashboard to get full info + about upload (including measurements). Checks if upload is correct despite + not having status "COMPLETED". - http = httplib2.Http() - response, content = http.request(url + '/add_histograms', method='POST', - body=data, headers=headers) - return response, content + Args: + url: URL of Performance Dashboard instance, e.g. + "https://chromeperf.appspot.com". + upload_token: String that identifies Performance Dashboard and can be used + for the status check. + min_measurements_amount: minimal amount of measurements that the upload + should have to start tolerating failures in particular measurements. + max_failed_measurements_amount: maximal amount of failured measurements to + tolerate. + """ + headers = _CreateHeaders(_GenerateOauthToken()) + http = httplib2.Http() + + response, content = http.request(url + '/uploads/' + upload_token + + '?additional_info=measurements', + method='GET', headers=headers) + + if response.status != 200: + print 'Failed to reach the dashboard to get full upload info.' + return False + + resp_json = json.loads(content) + print 'Full upload info: %s.' % json.dumps(resp_json, indent=4) + + if 'measurements' in resp_json: + measurements_cnt = len(resp_json['measurements']) + not_completed_state_cnt = len([ + m for m in resp_json['measurements'] + if m['state'] != 'COMPLETED' + ]) + + if (measurements_cnt >= min_measurements_amount and + not_completed_state_cnt <= max_failed_measurements_amount): + print('Not all measurements were uploaded. Measurements count: %d, ' + 'failed to upload: %d' % + (measurements_cnt, not_completed_state_cnt)) + return True + + return False # TODO(https://crbug.com/1029452): HACKHACK # Remove once we have doubles in the proto and handle -infinity correctly. def _ApplyHacks(dicts): - for d in dicts: - if 'running' in d: - def _NoInf(value): + def _NoInf(value): if value == float('inf'): - return histogram.JS_MAX_VALUE + return histogram.JS_MAX_VALUE if value == float('-inf'): - return -histogram.JS_MAX_VALUE + return -histogram.JS_MAX_VALUE return value - d['running'] = [_NoInf(value) for value in d['running']] - return dicts + for d in dicts: + if 'running' in d: + d['running'] = [_NoInf(value) for value in d['running']] + if 'sampleValues' in d: + d['sampleValues'] = [_NoInf(value) for value in d['sampleValues']] + + return dicts def _LoadHistogramSetFromProto(options): - hs = histogram_set.HistogramSet() - with options.input_results_file as f: - hs.ImportProto(f.read()) + hs = histogram_set.HistogramSet() + with options.input_results_file as f: + hs.ImportProto(f.read()) - return hs + return hs def _AddBuildInfo(histograms, options): - common_diagnostics = { - reserved_infos.MASTERS: options.perf_dashboard_machine_group, - reserved_infos.BOTS: options.bot, - reserved_infos.POINT_ID: options.commit_position, - reserved_infos.BENCHMARKS: options.test_suite, - reserved_infos.WEBRTC_REVISIONS: str(options.webrtc_git_hash), - reserved_infos.BUILD_URLS: options.build_page_url, - } + common_diagnostics = { + reserved_infos.MASTERS: options.perf_dashboard_machine_group, + reserved_infos.BOTS: options.bot, + reserved_infos.POINT_ID: options.commit_position, + reserved_infos.BENCHMARKS: options.test_suite, + reserved_infos.WEBRTC_REVISIONS: str(options.webrtc_git_hash), + reserved_infos.BUILD_URLS: options.build_page_url, + } - for k, v in common_diagnostics.items(): - histograms.AddSharedDiagnosticToAllHistograms( - k.name, generic_set.GenericSet([v])) + for k, v in common_diagnostics.items(): + histograms.AddSharedDiagnosticToAllHistograms( + k.name, generic_set.GenericSet([v])) def _DumpOutput(histograms, output_file): - with output_file: - json.dump(_ApplyHacks(histograms.AsDicts()), output_file, indent=4) + with output_file: + json.dump(_ApplyHacks(histograms.AsDicts()), output_file, indent=4) def UploadToDashboard(options): - histograms = _LoadHistogramSetFromProto(options) - _AddBuildInfo(histograms, options) - - if options.output_json_file: - _DumpOutput(histograms, options.output_json_file) - - oauth_token = _GenerateOauthToken() - response, content = _SendHistogramSet( - options.dashboard_url, histograms, oauth_token) - - if response.status == 200: - print 'Received 200 from dashboard.' - return 0 - else: - print('Upload failed with %d: %s\n\n%s' % (response.status, response.reason, - content)) + histograms = _LoadHistogramSetFromProto(options) + _AddBuildInfo(histograms, options) + + if options.output_json_file: + _DumpOutput(histograms, options.output_json_file) + + response, content = _SendHistogramSet(options.dashboard_url, histograms) + + if response.status != 200: + print('Upload failed with %d: %s\n\n%s' % (response.status, + response.reason, content)) + return 1 + + upload_token = json.loads(content).get('token') + if not options.wait_for_upload or not upload_token: + print('Received 200 from dashboard. ', + 'Not waiting for the upload status confirmation.') + return 0 + + response, resp_json = _WaitForUploadConfirmation( + options.dashboard_url, + upload_token, + datetime.timedelta(seconds=options.wait_timeout_sec), + datetime.timedelta(seconds=options.wait_polling_period_sec)) + + if ((resp_json and resp_json['state'] == 'COMPLETED') or + _CheckFullUploadInfo(options.dashboard_url, upload_token)): + print 'Upload completed.' + return 0 + + if response.status != 200: + print('Upload status poll failed with %d: %s' % (response.status, + response.reason)) + return 1 + + if resp_json['state'] == 'FAILED': + print 'Upload failed.' + return 1 + + print('Upload wasn\'t completed in a given time: %d seconds.' % + options.wait_timeout_sec) return 1 diff --git a/tools_webrtc/perf/webrtc_dashboard_upload.py b/tools_webrtc/perf/webrtc_dashboard_upload.py index ed1b35e038..19db0250cf 100644 --- a/tools_webrtc/perf/webrtc_dashboard_upload.py +++ b/tools_webrtc/perf/webrtc_dashboard_upload.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env vpython # Copyright (c) 2019 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Adds build info to perf results and uploads them. The tests don't know which bot executed the tests or at what revision, so we @@ -22,80 +21,102 @@ import os import sys +# Even if protobuf is not used directly, this allows transitive imports +# of the protobuf library to use the vpython wheel specified in the root +# level .vpython (see bugs.webrtc.org/12211 for context). +import google.protobuf # pylint: disable=unused-import + def _CreateParser(): - parser = argparse.ArgumentParser() - parser.add_argument('--perf-dashboard-machine-group', required=True, - help='The "master" the bots are grouped under. This ' - 'string is the group in the the perf dashboard path ' - 'group/bot/perf_id/metric/subtest.') - parser.add_argument('--bot', required=True, - help='The bot running the test (e.g. ' - 'webrtc-win-large-tests).') - parser.add_argument('--test-suite', required=True, - help='The key for the test in the dashboard (i.e. what ' - 'you select in the top-level test suite selector in the ' - 'dashboard') - parser.add_argument('--webrtc-git-hash', required=True, - help='webrtc.googlesource.com commit hash.') - parser.add_argument('--commit-position', type=int, required=True, - help='Commit pos corresponding to the git hash.') - parser.add_argument('--build-page-url', required=True, - help='URL to the build page for this build.') - parser.add_argument('--dashboard-url', required=True, - help='Which dashboard to use.') - parser.add_argument('--input-results-file', type=argparse.FileType(), - required=True, - help='A JSON file with output from WebRTC tests.') - parser.add_argument('--output-json-file', type=argparse.FileType('w'), - help='Where to write the output (for debugging).') - parser.add_argument('--outdir', required=True, - help='Path to the local out/ dir (usually out/Default)') - return parser + parser = argparse.ArgumentParser() + parser.add_argument('--perf-dashboard-machine-group', required=True, + help='The "master" the bots are grouped under. This ' + 'string is the group in the the perf dashboard path ' + 'group/bot/perf_id/metric/subtest.') + parser.add_argument('--bot', required=True, + help='The bot running the test (e.g. ' + 'webrtc-win-large-tests).') + parser.add_argument('--test-suite', required=True, + help='The key for the test in the dashboard (i.e. what ' + 'you select in the top-level test suite selector in ' + 'the dashboard') + parser.add_argument('--webrtc-git-hash', required=True, + help='webrtc.googlesource.com commit hash.') + parser.add_argument('--commit-position', type=int, required=True, + help='Commit pos corresponding to the git hash.') + parser.add_argument('--build-page-url', required=True, + help='URL to the build page for this build.') + parser.add_argument('--dashboard-url', required=True, + help='Which dashboard to use.') + parser.add_argument('--input-results-file', type=argparse.FileType(), + required=True, + help='A HistogramSet proto file with output from ' + 'WebRTC tests.') + parser.add_argument('--output-json-file', type=argparse.FileType('w'), + help='Where to write the output (for debugging).') + parser.add_argument('--outdir', required=True, + help='Path to the local out/ dir (usually out/Default)') + parser.add_argument('--wait-for-upload', action='store_true', + help='If specified, script will wait untill Chrome ' + 'perf dashboard confirms that the data was succesfully ' + 'proccessed and uploaded') + parser.add_argument('--wait-timeout-sec', type=int, default=1200, + help='Used only if wait-for-upload is True. Maximum ' + 'amount of time in seconds that the script will wait ' + 'for the confirmation.') + parser.add_argument('--wait-polling-period-sec', type=int, default=120, + help='Used only if wait-for-upload is True. Status ' + 'will be requested from the Dashboard every ' + 'wait-polling-period-sec seconds.') + return parser def _ConfigurePythonPath(options): - # We just yank the python scripts we require into the PYTHONPATH. You could - # also imagine a solution where we use for instance protobuf:py_proto_runtime - # to copy catapult and protobuf code to out/. This is the convention in - # Chromium and WebRTC python scripts. We do need to build histogram_pb2 - # however, so that's why we add out/ to sys.path below. - # - # It would be better if there was an equivalent to py_binary in GN, but - # there's not. - script_dir = os.path.dirname(os.path.realpath(__file__)) - checkout_root = os.path.abspath( - os.path.join(script_dir, os.pardir, os.pardir)) - - sys.path.insert(0, os.path.join(checkout_root, 'third_party', 'catapult', - 'tracing')) - sys.path.insert(0, os.path.join(checkout_root, 'third_party', 'protobuf', - 'python')) - - # The webrtc_dashboard_upload gn rule will build the protobuf stub for python, - # so put it in the path for this script before we attempt to import it. - histogram_proto_path = os.path.join( - options.outdir, 'pyproto', 'tracing', 'tracing', 'proto') - sys.path.insert(0, histogram_proto_path) - - # Fail early in case the proto hasn't been built. - from tracing.proto import histogram_proto - if not histogram_proto.HAS_PROTO: - raise ImportError('Could not find histogram_pb2. You need to build the ' - 'webrtc_dashboard_upload target before invoking this ' - 'script. Expected to find ' - 'histogram_pb2.py in %s.' % histogram_proto_path) + # We just yank the python scripts we require into the PYTHONPATH. You could + # also imagine a solution where we use for instance + # protobuf:py_proto_runtime to copy catapult and protobuf code to out/. + # This is the convention in Chromium and WebRTC python scripts. We do need + # to build histogram_pb2 however, so that's why we add out/ to sys.path + # below. + # + # It would be better if there was an equivalent to py_binary in GN, but + # there's not. + script_dir = os.path.dirname(os.path.realpath(__file__)) + checkout_root = os.path.abspath( + os.path.join(script_dir, os.pardir, os.pardir)) + + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing')) + sys.path.insert( + 0, os.path.join(checkout_root, 'third_party', 'protobuf', 'python')) + + # The webrtc_dashboard_upload gn rule will build the protobuf stub for + # python, so put it in the path for this script before we attempt to import + # it. + histogram_proto_path = os.path.join(options.outdir, 'pyproto', 'tracing', + 'tracing', 'proto') + sys.path.insert(0, histogram_proto_path) + + # Fail early in case the proto hasn't been built. + from tracing.proto import histogram_proto + if not histogram_proto.HAS_PROTO: + raise ImportError( + 'Could not find histogram_pb2. You need to build the ' + 'webrtc_dashboard_upload target before invoking this ' + 'script. Expected to find ' + 'histogram_pb2.py in %s.' % histogram_proto_path) def main(args): - parser = _CreateParser() - options = parser.parse_args(args) + parser = _CreateParser() + options = parser.parse_args(args) + + _ConfigurePythonPath(options) - _ConfigurePythonPath(options) + import catapult_uploader - import catapult_uploader + return catapult_uploader.UploadToDashboard(options) - return catapult_uploader.UploadToDashboard(options) if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + sys.exit(main(sys.argv[1:])) diff --git a/tools_webrtc/presubmit_checks_lib/build_helpers.py b/tools_webrtc/presubmit_checks_lib/build_helpers.py index 1ad59bfd49..e276631ed4 100644 --- a/tools_webrtc/presubmit_checks_lib/build_helpers.py +++ b/tools_webrtc/presubmit_checks_lib/build_helpers.py @@ -5,7 +5,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """This script helps to invoke gn and ninja which lie in depot_tools repository.""" @@ -19,11 +18,11 @@ def FindSrcDirPath(): - """Returns the abs path to the src/ dir of the project.""" - src_dir = os.path.dirname(os.path.abspath(__file__)) - while os.path.basename(src_dir) != 'src': - src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) - return src_dir + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir SRC_DIR = FindSrcDirPath() @@ -32,16 +31,16 @@ def FindSrcDirPath(): def RunGnCommand(args, root_dir=None): - """Runs `gn` with provided args and return error if any.""" - try: - command = [ - sys.executable, - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') - ] + args - subprocess.check_output(command, cwd=root_dir) - except subprocess.CalledProcessError as err: - return err.output - return None + """Runs `gn` with provided args and return error if any.""" + try: + command = [ + sys.executable, + os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gn.py') + ] + args + subprocess.check_output(command, cwd=root_dir) + except subprocess.CalledProcessError as err: + return err.output + return None # GN_ERROR_RE matches the summary of an error output by `gn check`. @@ -51,49 +50,49 @@ def RunGnCommand(args, root_dir=None): def RunGnCheck(root_dir=None): - """Runs `gn gen --check` with default args to detect mismatches between + """Runs `gn gen --check` with default args to detect mismatches between #includes and dependencies in the BUILD.gn files, as well as general build errors. Returns a list of error summary strings. """ - out_dir = tempfile.mkdtemp('gn') - try: - error = RunGnCommand(['gen', '--check', out_dir], root_dir) - finally: - shutil.rmtree(out_dir, ignore_errors=True) - return GN_ERROR_RE.findall(error) if error else [] + out_dir = tempfile.mkdtemp('gn') + try: + error = RunGnCommand(['gen', '--check', out_dir], root_dir) + finally: + shutil.rmtree(out_dir, ignore_errors=True) + return GN_ERROR_RE.findall(error) if error else [] def RunNinjaCommand(args, root_dir=None): - """Runs ninja quietly. Any failure (e.g. clang not found) is + """Runs ninja quietly. Any failure (e.g. clang not found) is silently discarded, since this is unlikely an error in submitted CL.""" - command = [ - os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja') - ] + args - p = subprocess.Popen(command, cwd=root_dir, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, _ = p.communicate() - return out + command = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'ninja')] + args + p = subprocess.Popen(command, + cwd=root_dir, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, _ = p.communicate() + return out def GetClangTidyPath(): - """POC/WIP! Use the one we have, even it doesn't match clang's version.""" - tidy = ('third_party/android_ndk/toolchains/' - 'llvm/prebuilt/linux-x86_64/bin/clang-tidy') - return os.path.join(SRC_DIR, tidy) + """POC/WIP! Use the one we have, even it doesn't match clang's version.""" + tidy = ('third_party/android_ndk/toolchains/' + 'llvm/prebuilt/linux-x86_64/bin/clang-tidy') + return os.path.join(SRC_DIR, tidy) def GetCompilationDb(root_dir=None): - """Run ninja compdb tool to get proper flags, defines and include paths.""" - # The compdb tool expect a rule. - commands = json.loads(RunNinjaCommand(['-t', 'compdb', 'cxx'], root_dir)) - # Turns 'file' field into a key. - return {v['file']: v for v in commands} + """Run ninja compdb tool to get proper flags, defines and include paths.""" + # The compdb tool expect a rule. + commands = json.loads(RunNinjaCommand(['-t', 'compdb', 'cxx'], root_dir)) + # Turns 'file' field into a key. + return {v['file']: v for v in commands} def GetCompilationCommand(filepath, gn_args, work_dir): - """Get the whole command used to compile one cc file. + """Get the whole command used to compile one cc file. Typically, clang++ with flags, defines and include paths. Args: @@ -104,31 +103,30 @@ def GetCompilationCommand(filepath, gn_args, work_dir): Returns: Command as a list, ready to be consumed by subprocess.Popen. """ - gn_errors = RunGnCommand(['gen'] + gn_args + [work_dir]) - if gn_errors: - raise(RuntimeError( - 'FYI, cannot complete check due to gn error:\n%s\n' - 'Please open a bug.' % gn_errors)) - - # Needed for single file compilation. - commands = GetCompilationDb(work_dir) - - # Path as referenced by ninja. - rel_path = os.path.relpath(os.path.abspath(filepath), work_dir) - - # Gather defines, include path and flags (such as -std=c++11). - try: - compilation_entry = commands[rel_path] - except KeyError: - raise ValueError('%s: Not found in compilation database.\n' - 'Please check the path.' % filepath) - command = compilation_entry['command'].split() - - # Remove troublesome flags. May trigger an error otherwise. - if '-MMD' in command: - command.remove('-MMD') - if '-MF' in command: - index = command.index('-MF') - del command[index:index+2] # Remove filename as well. - - return command + gn_errors = RunGnCommand(['gen'] + gn_args + [work_dir]) + if gn_errors: + raise (RuntimeError('FYI, cannot complete check due to gn error:\n%s\n' + 'Please open a bug.' % gn_errors)) + + # Needed for single file compilation. + commands = GetCompilationDb(work_dir) + + # Path as referenced by ninja. + rel_path = os.path.relpath(os.path.abspath(filepath), work_dir) + + # Gather defines, include path and flags (such as -std=c++11). + try: + compilation_entry = commands[rel_path] + except KeyError: + raise ValueError('%s: Not found in compilation database.\n' + 'Please check the path.' % filepath) + command = compilation_entry['command'].split() + + # Remove troublesome flags. May trigger an error otherwise. + if '-MMD' in command: + command.remove('-MMD') + if '-MF' in command: + index = command.index('-MF') + del command[index:index + 2] # Remove filename as well. + + return command diff --git a/tools_webrtc/presubmit_checks_lib/build_helpers_test.py b/tools_webrtc/presubmit_checks_lib/build_helpers_test.py index 78973282f9..5eab10556c 100755 --- a/tools_webrtc/presubmit_checks_lib/build_helpers_test.py +++ b/tools_webrtc/presubmit_checks_lib/build_helpers_test.py @@ -14,19 +14,20 @@ #pylint: disable=relative-import import build_helpers - TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata') class GnCheckTest(unittest.TestCase): - def testCircularDependencyError(self): - test_dir = os.path.join(TESTDATA_DIR, 'circular_dependency') - expected_errors = ['ERROR Dependency cycle:\n' - ' //:bar ->\n //:foo ->\n //:bar'] - self.assertListEqual(expected_errors, - build_helpers.RunGnCheck(test_dir)) + def testCircularDependencyError(self): + test_dir = os.path.join(TESTDATA_DIR, 'circular_dependency') + expected_errors = [ + 'ERROR Dependency cycle:\n' + ' //:bar ->\n //:foo ->\n //:bar' + ] + self.assertListEqual(expected_errors, + build_helpers.RunGnCheck(test_dir)) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py b/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py index deb445cd8c..29509972e5 100644 --- a/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py +++ b/tools_webrtc/presubmit_checks_lib/check_orphan_headers.py @@ -11,12 +11,11 @@ import re import string - # TARGET_RE matches a GN target, and extracts the target name and the contents. -TARGET_RE = re.compile(r'(?P\s*)\w+\("(?P\w+)"\) {' - r'(?P.*?)' - r'(?P=indent)}', - re.MULTILINE | re.DOTALL) +TARGET_RE = re.compile( + r'(?P\s*)\w+\("(?P\w+)"\) {' + r'(?P.*?)' + r'(?P=indent)}', re.MULTILINE | re.DOTALL) # SOURCES_RE matches a block of sources inside a GN target. SOURCES_RE = re.compile( @@ -27,27 +26,27 @@ class NoBuildGnFoundError(Exception): - pass + pass class WrongFileTypeError(Exception): - pass + pass def _ReadFile(file_path): - """Returns the content of file_path in a string. + """Returns the content of file_path in a string. Args: file_path: the path of the file to read. Returns: A string with the content of the file. """ - with open(file_path) as f: - return f.read() + with open(file_path) as f: + return f.read() def GetBuildGnPathFromFilePath(file_path, file_exists_check, root_dir_path): - """Returns the BUILD.gn file responsible for file_path. + """Returns the BUILD.gn file responsible for file_path. Args: file_path: the absolute path to the .h file to check. @@ -59,23 +58,23 @@ def GetBuildGnPathFromFilePath(file_path, file_exists_check, root_dir_path): A string with the absolute path to the BUILD.gn file responsible to include file_path in a target. """ - if not file_path.endswith('.h'): - raise WrongFileTypeError( - 'File {} is not an header file (.h)'.format(file_path)) - candidate_dir = os.path.dirname(file_path) - while candidate_dir.startswith(root_dir_path): - candidate_build_gn_path = os.path.join(candidate_dir, 'BUILD.gn') - if file_exists_check(candidate_build_gn_path): - return candidate_build_gn_path - else: - candidate_dir = os.path.abspath(os.path.join(candidate_dir, - os.pardir)) - raise NoBuildGnFoundError( - 'No BUILD.gn file found for file: `{}`'.format(file_path)) + if not file_path.endswith('.h'): + raise WrongFileTypeError( + 'File {} is not an header file (.h)'.format(file_path)) + candidate_dir = os.path.dirname(file_path) + while candidate_dir.startswith(root_dir_path): + candidate_build_gn_path = os.path.join(candidate_dir, 'BUILD.gn') + if file_exists_check(candidate_build_gn_path): + return candidate_build_gn_path + else: + candidate_dir = os.path.abspath( + os.path.join(candidate_dir, os.pardir)) + raise NoBuildGnFoundError( + 'No BUILD.gn file found for file: `{}`'.format(file_path)) def IsHeaderInBuildGn(header_path, build_gn_path): - """Returns True if the header is listed in the BUILD.gn file. + """Returns True if the header is listed in the BUILD.gn file. Args: header_path: the absolute path to the header to check. @@ -86,15 +85,15 @@ def IsHeaderInBuildGn(header_path, build_gn_path): at least one GN target in the BUILD.gn file specified by the argument build_gn_path. """ - target_abs_path = os.path.dirname(build_gn_path) - build_gn_content = _ReadFile(build_gn_path) - headers_in_build_gn = GetHeadersInBuildGnFileSources(build_gn_content, - target_abs_path) - return header_path in headers_in_build_gn + target_abs_path = os.path.dirname(build_gn_path) + build_gn_content = _ReadFile(build_gn_path) + headers_in_build_gn = GetHeadersInBuildGnFileSources( + build_gn_content, target_abs_path) + return header_path in headers_in_build_gn def GetHeadersInBuildGnFileSources(file_content, target_abs_path): - """Returns a set with all the .h files in the file_content. + """Returns a set with all the .h files in the file_content. Args: file_content: a string with the content of the BUILD.gn file. @@ -105,15 +104,15 @@ def GetHeadersInBuildGnFileSources(file_content, target_abs_path): A set with all the headers (.h file) in the file_content. The set contains absolute paths. """ - headers_in_sources = set([]) - for target_match in TARGET_RE.finditer(file_content): - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - sources = sources_match.group('sources') - for source_file_match in SOURCE_FILE_RE.finditer(sources): - source_file = source_file_match.group('source_file') - if source_file.endswith('.h'): - source_file_tokens = string.split(source_file, '/') - headers_in_sources.add(os.path.join(target_abs_path, - *source_file_tokens)) - return headers_in_sources + headers_in_sources = set([]) + for target_match in TARGET_RE.finditer(file_content): + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + sources = sources_match.group('sources') + for source_file_match in SOURCE_FILE_RE.finditer(sources): + source_file = source_file_match.group('source_file') + if source_file.endswith('.h'): + source_file_tokens = string.split(source_file, '/') + headers_in_sources.add( + os.path.join(target_abs_path, *source_file_tokens)) + return headers_in_sources diff --git a/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py b/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py index 2dfc18999d..79ac6a4b49 100755 --- a/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py +++ b/tools_webrtc/presubmit_checks_lib/check_orphan_headers_test.py @@ -16,73 +16,67 @@ def _GetRootBasedOnPlatform(): - if sys.platform.startswith('win'): - return 'C:\\' - else: - return '/' + if sys.platform.startswith('win'): + return 'C:\\' + else: + return '/' def _GetPath(*path_chunks): - return os.path.join(_GetRootBasedOnPlatform(), - *path_chunks) + return os.path.join(_GetRootBasedOnPlatform(), *path_chunks) class GetBuildGnPathFromFilePathTest(unittest.TestCase): - - def testGetBuildGnFromSameDirectory(self): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') - expected_build_path = _GetPath('home', 'projects', 'webrtc', 'base', - 'BUILD.gn') - file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', - 'base', 'BUILD.gn') - src_dir_path = _GetPath('home', 'projects', 'webrtc') - self.assertEqual( - expected_build_path, - check_orphan_headers.GetBuildGnPathFromFilePath(file_path, - file_exists, - src_dir_path)) - - def testGetBuildPathFromParentDirectory(self): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') - expected_build_path = _GetPath('home', 'projects', 'webrtc', - 'BUILD.gn') - file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', - 'BUILD.gn') - src_dir_path = _GetPath('home', 'projects', 'webrtc') - self.assertEqual( - expected_build_path, - check_orphan_headers.GetBuildGnPathFromFilePath(file_path, - file_exists, - src_dir_path)) - - def testExceptionIfNoBuildGnFilesAreFound(self): - with self.assertRaises(check_orphan_headers.NoBuildGnFoundError): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') - file_exists = lambda p: False - src_dir_path = _GetPath('home', 'projects', 'webrtc') - check_orphan_headers.GetBuildGnPathFromFilePath(file_path, - file_exists, - src_dir_path) - - def testExceptionIfFilePathIsNotAnHeader(self): - with self.assertRaises(check_orphan_headers.WrongFileTypeError): - file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.cc') - file_exists = lambda p: False - src_dir_path = _GetPath('home', 'projects', 'webrtc') - check_orphan_headers.GetBuildGnPathFromFilePath(file_path, - file_exists, - src_dir_path) + def testGetBuildGnFromSameDirectory(self): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') + expected_build_path = _GetPath('home', 'projects', 'webrtc', 'base', + 'BUILD.gn') + file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', + 'base', 'BUILD.gn') + src_dir_path = _GetPath('home', 'projects', 'webrtc') + self.assertEqual( + expected_build_path, + check_orphan_headers.GetBuildGnPathFromFilePath( + file_path, file_exists, src_dir_path)) + + def testGetBuildPathFromParentDirectory(self): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') + expected_build_path = _GetPath('home', 'projects', 'webrtc', + 'BUILD.gn') + file_exists = lambda p: p == _GetPath('home', 'projects', 'webrtc', + 'BUILD.gn') + src_dir_path = _GetPath('home', 'projects', 'webrtc') + self.assertEqual( + expected_build_path, + check_orphan_headers.GetBuildGnPathFromFilePath( + file_path, file_exists, src_dir_path)) + + def testExceptionIfNoBuildGnFilesAreFound(self): + with self.assertRaises(check_orphan_headers.NoBuildGnFoundError): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', 'foo.h') + file_exists = lambda p: False + src_dir_path = _GetPath('home', 'projects', 'webrtc') + check_orphan_headers.GetBuildGnPathFromFilePath( + file_path, file_exists, src_dir_path) + + def testExceptionIfFilePathIsNotAnHeader(self): + with self.assertRaises(check_orphan_headers.WrongFileTypeError): + file_path = _GetPath('home', 'projects', 'webrtc', 'base', + 'foo.cc') + file_exists = lambda p: False + src_dir_path = _GetPath('home', 'projects', 'webrtc') + check_orphan_headers.GetBuildGnPathFromFilePath( + file_path, file_exists, src_dir_path) class GetHeadersInBuildGnFileSourcesTest(unittest.TestCase): + def testEmptyFileReturnsEmptySet(self): + self.assertEqual( + set([]), + check_orphan_headers.GetHeadersInBuildGnFileSources('', '/a/b')) - def testEmptyFileReturnsEmptySet(self): - self.assertEqual( - set([]), - check_orphan_headers.GetHeadersInBuildGnFileSources('', '/a/b')) - - def testReturnsSetOfHeadersFromFileContent(self): - file_content = """ + def testReturnsSetOfHeadersFromFileContent(self): + file_content = """ # Some comments if (is_android) { import("//a/b/c.gni") @@ -107,17 +101,17 @@ def testReturnsSetOfHeadersFromFileContent(self): sources = ["baz/foo.h"] } """ - target_abs_path = _GetPath('a', 'b') - self.assertEqual( - set([ - _GetPath('a', 'b', 'foo.h'), - _GetPath('a', 'b', 'bar.h'), - _GetPath('a', 'b', 'public_foo.h'), - _GetPath('a', 'b', 'baz', 'foo.h'), - ]), - check_orphan_headers.GetHeadersInBuildGnFileSources(file_content, - target_abs_path)) + target_abs_path = _GetPath('a', 'b') + self.assertEqual( + set([ + _GetPath('a', 'b', 'foo.h'), + _GetPath('a', 'b', 'bar.h'), + _GetPath('a', 'b', 'public_foo.h'), + _GetPath('a', 'b', 'baz', 'foo.h'), + ]), + check_orphan_headers.GetHeadersInBuildGnFileSources( + file_content, target_abs_path)) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py b/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py index 1b3c1f8e43..7d81bae16e 100644 --- a/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py +++ b/tools_webrtc/presubmit_checks_lib/check_package_boundaries.py @@ -14,12 +14,11 @@ import re import sys - # TARGET_RE matches a GN target, and extracts the target name and the contents. -TARGET_RE = re.compile(r'(?P\s*)\w+\("(?P\w+)"\) {' - r'(?P.*?)' - r'(?P=indent)}', - re.MULTILINE | re.DOTALL) +TARGET_RE = re.compile( + r'(?P\s*)\w+\("(?P\w+)"\) {' + r'(?P.*?)' + r'(?P=indent)}', re.MULTILINE | re.DOTALL) # SOURCES_RE matches a block of sources inside a GN target. SOURCES_RE = re.compile(r'sources \+?= \[(?P.*?)\]', @@ -31,96 +30,107 @@ class PackageBoundaryViolation( - collections.namedtuple('PackageBoundaryViolation', - 'build_file_path target_name source_file subpackage')): - def __str__(self): - return ERROR_MESSAGE.format(**self._asdict()) + collections.namedtuple( + 'PackageBoundaryViolation', + 'build_file_path target_name source_file subpackage')): + def __str__(self): + return ERROR_MESSAGE.format(**self._asdict()) def _BuildSubpackagesPattern(packages, query): - """Returns a regular expression that matches source files inside subpackages + """Returns a regular expression that matches source files inside subpackages of the given query.""" - query += os.path.sep - length = len(query) - pattern = r'\s*"(?P(?P' - pattern += '|'.join(re.escape(package[length:].replace(os.path.sep, '/')) - for package in packages if package.startswith(query)) - pattern += r')/[\w\./]*)"' - return re.compile(pattern) + query += os.path.sep + length = len(query) + pattern = r'\s*"(?P(?P' + pattern += '|'.join( + re.escape(package[length:].replace(os.path.sep, '/')) + for package in packages if package.startswith(query)) + pattern += r')/[\w\./]*)"' + return re.compile(pattern) def _ReadFileAndPrependLines(file_path): - """Reads the contents of a file.""" - with open(file_path) as f: - return "".join(f.readlines()) + """Reads the contents of a file.""" + with open(file_path) as f: + return "".join(f.readlines()) def _CheckBuildFile(build_file_path, packages): - """Iterates over all the targets of the given BUILD.gn file, and verifies that + """Iterates over all the targets of the given BUILD.gn file, and verifies that the source files referenced by it don't belong to any of it's subpackages. Returns an iterator over PackageBoundaryViolations for this package. """ - package = os.path.dirname(build_file_path) - subpackages_re = _BuildSubpackagesPattern(packages, package) - - build_file_contents = _ReadFileAndPrependLines(build_file_path) - for target_match in TARGET_RE.finditer(build_file_contents): - target_name = target_match.group('target_name') - target_contents = target_match.group('target_contents') - for sources_match in SOURCES_RE.finditer(target_contents): - sources = sources_match.group('sources') - for subpackages_match in subpackages_re.finditer(sources): - subpackage = subpackages_match.group('subpackage') - source_file = subpackages_match.group('source_file') - if subpackage: - yield PackageBoundaryViolation(build_file_path, - target_name, source_file, subpackage) + package = os.path.dirname(build_file_path) + subpackages_re = _BuildSubpackagesPattern(packages, package) + + build_file_contents = _ReadFileAndPrependLines(build_file_path) + for target_match in TARGET_RE.finditer(build_file_contents): + target_name = target_match.group('target_name') + target_contents = target_match.group('target_contents') + for sources_match in SOURCES_RE.finditer(target_contents): + sources = sources_match.group('sources') + for subpackages_match in subpackages_re.finditer(sources): + subpackage = subpackages_match.group('subpackage') + source_file = subpackages_match.group('source_file') + if subpackage: + yield PackageBoundaryViolation(build_file_path, + target_name, source_file, + subpackage) def CheckPackageBoundaries(root_dir, build_files=None): - packages = [root for root, _, files in os.walk(root_dir) - if 'BUILD.gn' in files] - - if build_files is not None: + packages = [ + root for root, _, files in os.walk(root_dir) if 'BUILD.gn' in files + ] + + if build_files is not None: + for build_file_path in build_files: + assert build_file_path.startswith(root_dir) + else: + build_files = [ + os.path.join(package, 'BUILD.gn') for package in packages + ] + + messages = [] for build_file_path in build_files: - assert build_file_path.startswith(root_dir) - else: - build_files = [os.path.join(package, 'BUILD.gn') for package in packages] - - messages = [] - for build_file_path in build_files: - messages.extend(_CheckBuildFile(build_file_path, packages)) - return messages + messages.extend(_CheckBuildFile(build_file_path, packages)) + return messages def main(argv): - parser = argparse.ArgumentParser( - description='Script that checks package boundary violations in GN ' - 'build files.') - - parser.add_argument('root_dir', metavar='ROOT_DIR', - help='The root directory that contains all BUILD.gn ' - 'files to be processed.') - parser.add_argument('build_files', metavar='BUILD_FILE', nargs='*', - help='A list of BUILD.gn files to be processed. If no ' - 'files are given, all BUILD.gn files under ROOT_DIR ' - 'will be processed.') - parser.add_argument('--max_messages', type=int, default=None, - help='If set, the maximum number of violations to be ' - 'displayed.') - - args = parser.parse_args(argv) - - messages = CheckPackageBoundaries(args.root_dir, args.build_files) - messages = messages[:args.max_messages] - - for i, message in enumerate(messages): - if i > 0: - print - print message - - return bool(messages) + parser = argparse.ArgumentParser( + description='Script that checks package boundary violations in GN ' + 'build files.') + + parser.add_argument('root_dir', + metavar='ROOT_DIR', + help='The root directory that contains all BUILD.gn ' + 'files to be processed.') + parser.add_argument('build_files', + metavar='BUILD_FILE', + nargs='*', + help='A list of BUILD.gn files to be processed. If no ' + 'files are given, all BUILD.gn files under ROOT_DIR ' + 'will be processed.') + parser.add_argument('--max_messages', + type=int, + default=None, + help='If set, the maximum number of violations to be ' + 'displayed.') + + args = parser.parse_args(argv) + + messages = CheckPackageBoundaries(args.root_dir, args.build_files) + messages = messages[:args.max_messages] + + for i, message in enumerate(messages): + if i > 0: + print + print message + + return bool(messages) if __name__ == '__main__': - sys.exit(main(sys.argv[1:])) + sys.exit(main(sys.argv[1:])) diff --git a/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py b/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py index abf232e678..8d173372c1 100755 --- a/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py +++ b/tools_webrtc/presubmit_checks_lib/check_package_boundaries_test.py @@ -15,58 +15,60 @@ #pylint: disable=relative-import from check_package_boundaries import CheckPackageBoundaries - MSG_FORMAT = 'ERROR:check_package_boundaries.py: Unexpected %s.' TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata') def ReadPylFile(file_path): - with open(file_path) as f: - return ast.literal_eval(f.read()) + with open(file_path) as f: + return ast.literal_eval(f.read()) class UnitTest(unittest.TestCase): - def _RunTest(self, test_dir, check_all_build_files=False): - build_files = [os.path.join(test_dir, 'BUILD.gn')] - if check_all_build_files: - build_files = None + def _RunTest(self, test_dir, check_all_build_files=False): + build_files = [os.path.join(test_dir, 'BUILD.gn')] + if check_all_build_files: + build_files = None - messages = [] - for violation in CheckPackageBoundaries(test_dir, build_files): - build_file_path = os.path.relpath(violation.build_file_path, test_dir) - build_file_path = build_file_path.replace(os.path.sep, '/') - messages.append(violation._replace(build_file_path=build_file_path)) + messages = [] + for violation in CheckPackageBoundaries(test_dir, build_files): + build_file_path = os.path.relpath(violation.build_file_path, + test_dir) + build_file_path = build_file_path.replace(os.path.sep, '/') + messages.append( + violation._replace(build_file_path=build_file_path)) - expected_messages = ReadPylFile(os.path.join(test_dir, 'expected.pyl')) - self.assertListEqual(sorted(expected_messages), sorted(messages)) + expected_messages = ReadPylFile(os.path.join(test_dir, 'expected.pyl')) + self.assertListEqual(sorted(expected_messages), sorted(messages)) - def testNoErrors(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'no_errors')) + def testNoErrors(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'no_errors')) - def testMultipleErrorsSingleTarget(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'multiple_errors_single_target')) + def testMultipleErrorsSingleTarget(self): + self._RunTest( + os.path.join(TESTDATA_DIR, 'multiple_errors_single_target')) - def testMultipleErrorsMultipleTargets(self): - self._RunTest(os.path.join(TESTDATA_DIR, - 'multiple_errors_multiple_targets')) + def testMultipleErrorsMultipleTargets(self): + self._RunTest( + os.path.join(TESTDATA_DIR, 'multiple_errors_multiple_targets')) - def testCommonPrefix(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'common_prefix')) + def testCommonPrefix(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'common_prefix')) - def testAllBuildFiles(self): - self._RunTest(os.path.join(TESTDATA_DIR, 'all_build_files'), True) + def testAllBuildFiles(self): + self._RunTest(os.path.join(TESTDATA_DIR, 'all_build_files'), True) - def testSanitizeFilename(self): - # The `dangerous_filename` test case contains a directory with '++' in its - # name. If it's not properly escaped, a regex error would be raised. - self._RunTest(os.path.join(TESTDATA_DIR, 'dangerous_filename'), True) + def testSanitizeFilename(self): + # The `dangerous_filename` test case contains a directory with '++' in its + # name. If it's not properly escaped, a regex error would be raised. + self._RunTest(os.path.join(TESTDATA_DIR, 'dangerous_filename'), True) - def testRelativeFilename(self): - test_dir = os.path.join(TESTDATA_DIR, 'all_build_files') - with self.assertRaises(AssertionError): - CheckPackageBoundaries(test_dir, ["BUILD.gn"]) + def testRelativeFilename(self): + test_dir = os.path.join(TESTDATA_DIR, 'all_build_files') + with self.assertRaises(AssertionError): + CheckPackageBoundaries(test_dir, ["BUILD.gn"]) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tools_webrtc/sancov/README b/tools_webrtc/sancov/README deleted file mode 100644 index c9b43e7ae0..0000000000 --- a/tools_webrtc/sancov/README +++ /dev/null @@ -1,9 +0,0 @@ -Blacklist for llvm's sancov - -See http://clang.llvm.org/docs/SanitizerCoverage.html . - -Example usage: -> cd out/Debug -> UBSAN_OPTIONS="coverage=1" ./peerconnection_unittests -> sancov -html-report -blacklist ../../tools/sancov/blacklist.txt \ -> peerconnection_unittests peerconnection_unittests.123.sancov diff --git a/tools_webrtc/sancov/blacklist.txt b/tools_webrtc/sancov/blacklist.txt deleted file mode 100644 index 7043a18ef2..0000000000 --- a/tools_webrtc/sancov/blacklist.txt +++ /dev/null @@ -1,21 +0,0 @@ -############################################################################# -# sancov blacklist. -# Please think twice before you add or remove these rules. - -############################################################################# -# no coverage report for third party -src:*/chromium/* -src:*/third_party/* - -# OpenH264 triggers some errors that are out of our control. -src:*/third_party/ffmpeg/libavcodec/* -src:*/third_party/openh264/* - -############################################################################# -# Ignore system libraries. -src:*/usr/* - -############################################################################# -# Ignore test source. -src:*/test/* -src:*_unittest.cc diff --git a/tools_webrtc/sanitizers/tsan_suppressions_webrtc.cc b/tools_webrtc/sanitizers/tsan_suppressions_webrtc.cc index 3177fbc74a..3eb85e9fb5 100644 --- a/tools_webrtc/sanitizers/tsan_suppressions_webrtc.cc +++ b/tools_webrtc/sanitizers/tsan_suppressions_webrtc.cc @@ -31,8 +31,6 @@ char kTSanDefaultSuppressions[] = // rtc_unittests // https://code.google.com/p/webrtc/issues/detail?id=2080 "race:rtc_base/logging.cc\n" - "race:rtc_base/shared_exclusive_lock_unittest.cc\n" - "race:rtc_base/signal_thread_unittest.cc\n" // rtc_pc_unittests // https://code.google.com/p/webrtc/issues/detail?id=2079 diff --git a/tools_webrtc/sslroots/generate_sslroots.py b/tools_webrtc/sslroots/generate_sslroots.py index 7d8bf14471..ff0052e3c7 100644 --- a/tools_webrtc/sslroots/generate_sslroots.py +++ b/tools_webrtc/sslroots/generate_sslroots.py @@ -6,8 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - - """This is a tool to transform a crt file into a C/C++ header. Usage: @@ -36,155 +34,185 @@ _CERTIFICATE_VARIABLE = 'Certificate' _CERTIFICATE_SIZE_VARIABLE = 'CertificateSize' _INT_TYPE = 'size_t' -_CHAR_TYPE = 'const unsigned char*' +_CHAR_TYPE = 'unsigned char* const' _VERBOSE = 'verbose' def main(): - """The main entrypoint.""" - parser = OptionParser('usage %prog FILE') - parser.add_option('-v', '--verbose', dest='verbose', action='store_true') - parser.add_option('-f', '--full_cert', dest='full_cert', action='store_true') - options, args = parser.parse_args() - if len(args) < 1: - parser.error('No crt file specified.') - return - root_dir = _SplitCrt(args[0], options) - _GenCFiles(root_dir, options) - _Cleanup(root_dir) + """The main entrypoint.""" + parser = OptionParser('usage %prog FILE') + parser.add_option('-v', '--verbose', dest='verbose', action='store_true') + parser.add_option('-f', + '--full_cert', + dest='full_cert', + action='store_true') + options, args = parser.parse_args() + if len(args) < 1: + parser.error('No crt file specified.') + return + root_dir = _SplitCrt(args[0], options) + _GenCFiles(root_dir, options) + _Cleanup(root_dir) def _SplitCrt(source_file, options): - sub_file_blocks = [] - label_name = '' - root_dir = os.path.dirname(os.path.abspath(source_file)) + '/' - _PrintOutput(root_dir, options) - f = open(source_file) - for line in f: - if line.startswith('# Label: '): - sub_file_blocks.append(line) - label = re.search(r'\".*\"', line) - temp_label = label.group(0) - end = len(temp_label)-1 - label_name = _SafeName(temp_label[1:end]) - elif line.startswith('-----END CERTIFICATE-----'): - sub_file_blocks.append(line) - new_file_name = root_dir + _PREFIX + label_name + _EXTENSION - _PrintOutput('Generating: ' + new_file_name, options) - new_file = open(new_file_name, 'w') - for out_line in sub_file_blocks: - new_file.write(out_line) - new_file.close() - sub_file_blocks = [] - else: - sub_file_blocks.append(line) - f.close() - return root_dir + sub_file_blocks = [] + label_name = '' + root_dir = os.path.dirname(os.path.abspath(source_file)) + '/' + _PrintOutput(root_dir, options) + f = open(source_file) + for line in f: + if line.startswith('# Label: '): + sub_file_blocks.append(line) + label = re.search(r'\".*\"', line) + temp_label = label.group(0) + end = len(temp_label) - 1 + label_name = _SafeName(temp_label[1:end]) + elif line.startswith('-----END CERTIFICATE-----'): + sub_file_blocks.append(line) + new_file_name = root_dir + _PREFIX + label_name + _EXTENSION + _PrintOutput('Generating: ' + new_file_name, options) + new_file = open(new_file_name, 'w') + for out_line in sub_file_blocks: + new_file.write(out_line) + new_file.close() + sub_file_blocks = [] + else: + sub_file_blocks.append(line) + f.close() + return root_dir def _GenCFiles(root_dir, options): - output_header_file = open(root_dir + _GENERATED_FILE, 'w') - output_header_file.write(_CreateOutputHeader()) - if options.full_cert: - subject_name_list = _CreateArraySectionHeader(_SUBJECT_NAME_VARIABLE, - _CHAR_TYPE, options) - public_key_list = _CreateArraySectionHeader(_PUBLIC_KEY_VARIABLE, - _CHAR_TYPE, options) - certificate_list = _CreateArraySectionHeader(_CERTIFICATE_VARIABLE, - _CHAR_TYPE, options) - certificate_size_list = _CreateArraySectionHeader(_CERTIFICATE_SIZE_VARIABLE, - _INT_TYPE, options) - - for _, _, files in os.walk(root_dir): - for current_file in files: - if current_file.startswith(_PREFIX): - prefix_length = len(_PREFIX) - length = len(current_file) - len(_EXTENSION) - label = current_file[prefix_length:length] - filtered_output, cert_size = _CreateCertSection(root_dir, current_file, - label, options) - output_header_file.write(filtered_output + '\n\n\n') - if options.full_cert: - subject_name_list += _AddLabelToArray(label, _SUBJECT_NAME_ARRAY) - public_key_list += _AddLabelToArray(label, _PUBLIC_KEY_ARRAY) - certificate_list += _AddLabelToArray(label, _CERTIFICATE_ARRAY) - certificate_size_list += (' %s,\n') %(cert_size) - - if options.full_cert: - subject_name_list += _CreateArraySectionFooter() - output_header_file.write(subject_name_list) - public_key_list += _CreateArraySectionFooter() - output_header_file.write(public_key_list) - certificate_list += _CreateArraySectionFooter() - output_header_file.write(certificate_list) - certificate_size_list += _CreateArraySectionFooter() - output_header_file.write(certificate_size_list) - output_header_file.close() + output_header_file = open(root_dir + _GENERATED_FILE, 'w') + output_header_file.write(_CreateOutputHeader()) + if options.full_cert: + subject_name_list = _CreateArraySectionHeader(_SUBJECT_NAME_VARIABLE, + _CHAR_TYPE, options) + public_key_list = _CreateArraySectionHeader(_PUBLIC_KEY_VARIABLE, + _CHAR_TYPE, options) + certificate_list = _CreateArraySectionHeader(_CERTIFICATE_VARIABLE, + _CHAR_TYPE, options) + certificate_size_list = _CreateArraySectionHeader( + _CERTIFICATE_SIZE_VARIABLE, _INT_TYPE, options) + + for _, _, files in os.walk(root_dir): + for current_file in files: + if current_file.startswith(_PREFIX): + prefix_length = len(_PREFIX) + length = len(current_file) - len(_EXTENSION) + label = current_file[prefix_length:length] + filtered_output, cert_size = _CreateCertSection( + root_dir, current_file, label, options) + output_header_file.write(filtered_output + '\n\n\n') + if options.full_cert: + subject_name_list += _AddLabelToArray( + label, _SUBJECT_NAME_ARRAY) + public_key_list += _AddLabelToArray( + label, _PUBLIC_KEY_ARRAY) + certificate_list += _AddLabelToArray(label, _CERTIFICATE_ARRAY) + certificate_size_list += (' %s,\n') % (cert_size) + + if options.full_cert: + subject_name_list += _CreateArraySectionFooter() + output_header_file.write(subject_name_list) + public_key_list += _CreateArraySectionFooter() + output_header_file.write(public_key_list) + certificate_list += _CreateArraySectionFooter() + output_header_file.write(certificate_list) + certificate_size_list += _CreateArraySectionFooter() + output_header_file.write(certificate_size_list) + output_header_file.write(_CreateOutputFooter()) + output_header_file.close() def _Cleanup(root_dir): - for f in os.listdir(root_dir): - if f.startswith(_PREFIX): - os.remove(root_dir + f) + for f in os.listdir(root_dir): + if f.startswith(_PREFIX): + os.remove(root_dir + f) def _CreateCertSection(root_dir, source_file, label, options): - command = 'openssl x509 -in %s%s -noout -C' %(root_dir, source_file) - _PrintOutput(command, options) - output = commands.getstatusoutput(command)[1] - renamed_output = output.replace('unsigned char XXX_', - 'const unsigned char ' + label + '_') - filtered_output = '' - cert_block = '^const unsigned char.*?};$' - prog = re.compile(cert_block, re.IGNORECASE | re.MULTILINE | re.DOTALL) - if not options.full_cert: - filtered_output = prog.sub('', renamed_output, count=2) - else: - filtered_output = renamed_output - - cert_size_block = r'\d\d\d+' - prog2 = re.compile(cert_size_block, re.MULTILINE | re.VERBOSE) - result = prog2.findall(renamed_output) - cert_size = result[len(result) - 1] - - return filtered_output, cert_size + command = 'openssl x509 -in %s%s -noout -C' % (root_dir, source_file) + _PrintOutput(command, options) + output = commands.getstatusoutput(command)[1] + renamed_output = output.replace('unsigned char XXX_', + 'const unsigned char ' + label + '_') + filtered_output = '' + cert_block = '^const unsigned char.*?};$' + prog = re.compile(cert_block, re.IGNORECASE | re.MULTILINE | re.DOTALL) + if not options.full_cert: + filtered_output = prog.sub('', renamed_output, count=2) + else: + filtered_output = renamed_output + + cert_size_block = r'\d\d\d+' + prog2 = re.compile(cert_size_block, re.MULTILINE | re.VERBOSE) + result = prog2.findall(renamed_output) + cert_size = result[len(result) - 1] + + return filtered_output, cert_size def _CreateOutputHeader(): - output = ('// This file is the root certificates in C form that are needed to' - ' connect to\n// Google.\n\n' - '// It was generated with the following command line:\n' - '// > python tools/certs/generate_sslroots.py' - '\n// https://pki.google.com/roots.pem\n\n') - return output + output = ( + '/*\n' + ' * Copyright 2004 The WebRTC Project Authors. All rights ' + 'reserved.\n' + ' *\n' + ' * Use of this source code is governed by a BSD-style license\n' + ' * that can be found in the LICENSE file in the root of the ' + 'source\n' + ' * tree. An additional intellectual property rights grant can be ' + 'found\n' + ' * in the file PATENTS. All contributing project authors may\n' + ' * be found in the AUTHORS file in the root of the source tree.\n' + ' */\n\n' + '#ifndef RTC_BASE_SSL_ROOTS_H_\n' + '#define RTC_BASE_SSL_ROOTS_H_\n\n' + '// This file is the root certificates in C form that are needed to' + ' connect to\n// Google.\n\n' + '// It was generated with the following command line:\n' + '// > python tools_webrtc/sslroots/generate_sslroots.py' + '\n// https://pki.goog/roots.pem\n\n' + '// clang-format off\n' + '// Don\'t bother formatting generated code,\n' + '// also it would breaks subject/issuer lines.\n\n') + return output + + +def _CreateOutputFooter(): + output = ('// clang-format on\n\n' '#endif // RTC_BASE_SSL_ROOTS_H_\n') + return output def _CreateArraySectionHeader(type_name, type_type, options): - output = ('const %s kSSLCert%sList[] = {\n') %(type_type, type_name) - _PrintOutput(output, options) - return output + output = ('const %s kSSLCert%sList[] = {\n') % (type_type, type_name) + _PrintOutput(output, options) + return output def _AddLabelToArray(label, type_name): - return ' %s_%s,\n' %(label, type_name) + return ' %s_%s,\n' % (label, type_name) def _CreateArraySectionFooter(): - return '};\n\n' + return '};\n\n' def _SafeName(original_file_name): - bad_chars = ' -./\\()áéíőú' - replacement_chars = '' - for _ in bad_chars: - replacement_chars += '_' - translation_table = string.maketrans(bad_chars, replacement_chars) - return original_file_name.translate(translation_table) + bad_chars = ' -./\\()áéíőú' + replacement_chars = '' + for _ in bad_chars: + replacement_chars += '_' + translation_table = string.maketrans(bad_chars, replacement_chars) + return original_file_name.translate(translation_table) def _PrintOutput(output, options): - if options.verbose: - print output + if options.verbose: + print output + if __name__ == '__main__': - main() + main() diff --git a/tools_webrtc/ubsan/blacklist.txt b/tools_webrtc/ubsan/suppressions.txt similarity index 90% rename from tools_webrtc/ubsan/blacklist.txt rename to tools_webrtc/ubsan/suppressions.txt index 50b66e915a..dc76f38c20 100644 --- a/tools_webrtc/ubsan/blacklist.txt +++ b/tools_webrtc/ubsan/suppressions.txt @@ -1,7 +1,7 @@ ############################################################################# -# UBSan blacklist. +# UBSan ignorelist. # -# This is a WebRTC-specific replacement of Chromium's blacklist.txt. +# This is a WebRTC-specific replacement of Chromium's ignorelist.txt. # Only exceptions for third party libraries go here. WebRTC's code should use # the RTC_NO_SANITIZE macro. Please think twice before adding new exceptions. diff --git a/tools_webrtc/ubsan/vptr_blacklist.txt b/tools_webrtc/ubsan/vptr_suppressions.txt similarity index 94% rename from tools_webrtc/ubsan/vptr_blacklist.txt rename to tools_webrtc/ubsan/vptr_suppressions.txt index 739de36659..617ba88f98 100644 --- a/tools_webrtc/ubsan/vptr_blacklist.txt +++ b/tools_webrtc/ubsan/vptr_suppressions.txt @@ -1,5 +1,5 @@ ############################################################################# -# UBSan vptr blacklist. +# UBSan vptr ignorelist. # Function and type based blacklisting use a mangled name, and it is especially # tricky to represent C++ types. For now, any possible changes by name manglings # are simply represented as wildcard expressions of regexp, and thus it might be @@ -8,7 +8,7 @@ # Please think twice before you add or remove these rules. # # This is a stripped down copy of Chromium's vptr_blacklist.txt, to enable -# adding WebRTC-specific blacklist entries. +# adding WebRTC-specific ignorelist entries. ############################################################################# # Using raw pointer values. diff --git a/tools_webrtc/version_updater/update_version.py b/tools_webrtc/version_updater/update_version.py new file mode 100644 index 0000000000..3c2be3fe75 --- /dev/null +++ b/tools_webrtc/version_updater/update_version.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python +# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. +# +# Use of this source code is governed by a BSD-style license +# that can be found in the LICENSE file in the root of the source +# tree. An additional intellectual property rights grant can be found +# in the file PATENTS. All contributing project authors may +# be found in the AUTHORS file in the root of the source tree. + +"""Script to auto-update the WebRTC source version in call/version.cc""" + +import argparse +import datetime +import logging +import os +import re +import subprocess +import sys + + +def FindSrcDirPath(): + """Returns the abs path to the src/ dir of the project.""" + src_dir = os.path.dirname(os.path.abspath(__file__)) + while os.path.basename(src_dir) != 'src': + src_dir = os.path.normpath(os.path.join(src_dir, os.pardir)) + return src_dir + + +UPDATE_BRANCH_NAME = 'webrtc_version_update' +CHECKOUT_SRC_DIR = FindSrcDirPath() + + +def _RemovePreviousUpdateBranch(): + active_branch, branches = _GetBranches() + if active_branch == UPDATE_BRANCH_NAME: + active_branch = 'master' + if UPDATE_BRANCH_NAME in branches: + logging.info('Removing previous update branch (%s)', + UPDATE_BRANCH_NAME) + subprocess.check_call(['git', 'checkout', active_branch]) + subprocess.check_call(['git', 'branch', '-D', UPDATE_BRANCH_NAME]) + logging.info('No branch to remove') + + +def _GetLastAuthor(): + """Returns a string with the author of the last commit.""" + author = subprocess.check_output(['git', 'log', + '-1', + '--pretty=format:"%an"']).splitlines() + return author + + +def _GetBranches(): + """Returns a tuple (active, branches). + + 'active' is a string with name of the currently active branch, while + 'branches' is the list of all branches. + """ + lines = subprocess.check_output(['git', 'branch']).splitlines() + branches = [] + active = '' + for line in lines: + if '*' in line: + # The assumption is that the first char will always be the '*'. + active = line[1:].strip() + branches.append(active) + else: + branch = line.strip() + if branch: + branches.append(branch) + return active, branches + + +def _CreateUpdateBranch(): + logging.info('Creating update branch: %s', UPDATE_BRANCH_NAME) + subprocess.check_call(['git', 'checkout', '-b', UPDATE_BRANCH_NAME]) + + +def _UpdateWebRTCVersion(filename): + with open(filename) as f: + content = f.read() + d = datetime.datetime.utcnow() + # pylint: disable=line-too-long + new_content = re.sub( + r'WebRTC source stamp [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}', + r'WebRTC source stamp %02d-%02d-%02dT%02d:%02d:%02d' % (d.year, + d.month, + d.day, + d.hour, + d.minute, + d.second), + content, + flags=re.MULTILINE) + # pylint: enable=line-too-long + with open(filename, 'w') as f: + f.write(new_content) + + +def _IsTreeClean(): + stdout = subprocess.check_output(['git', 'status', '--porcelain']) + if len(stdout) == 0: + return True + return False + + +def _LocalCommit(): + logging.info('Committing changes locally.') + d = datetime.datetime.utcnow() + + git_author = subprocess.check_output(['git', 'config', + 'user.email']).strip() + tbr_authors = git_author + ',' + 'mbonadei@webrtc.org' + tbr = 'TBR=%s' % tbr_authors + commit_msg = ('Update WebRTC code version (%02d-%02d-%02dT%02d:%02d:%02d).' + '\n\nTBR=%s\nBug: None') + commit_msg = commit_msg % (d.year, d.month, d.day, d.hour, d.minute, + d.second, tbr_authors) + subprocess.check_call(['git', 'add', '--update', '.']) + subprocess.check_call(['git', 'commit', '-m', commit_msg]) + + +def _UploadCL(commit_queue_mode): + """Upload the committed changes as a changelist to Gerrit. + + commit_queue_mode: + - 2: Submit to commit queue. + - 1: Run trybots but do not submit to CQ. + - 0: Skip CQ, upload only. + """ + cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks', + '--cc=""', '--bypass-watchlist'] + if commit_queue_mode >= 2: + logging.info('Sending the CL to the CQ...') + cmd.extend(['--use-commit-queue']) + elif commit_queue_mode >= 1: + logging.info('Starting CQ dry run...') + cmd.extend(['--cq-dry-run']) + subprocess.check_call(cmd) + + +def main(): + logging.basicConfig(level=logging.INFO) + p = argparse.ArgumentParser() + p.add_argument('--clean', + action='store_true', + default=False, + help='Removes any previous local update branch.') + opts = p.parse_args() + + if opts.clean: + _RemovePreviousUpdateBranch() + + if _GetLastAuthor() == 'webrtc-version-updater': + logging.info('Last commit is a version change, skipping CL.') + return 0 + + version_filename = os.path.join(CHECKOUT_SRC_DIR, 'call', 'version.cc') + _CreateUpdateBranch() + _UpdateWebRTCVersion(version_filename) + if _IsTreeClean(): + logging.info('No WebRTC version change detected, skipping CL.') + else: + _LocalCommit() + logging.info('Uploading CL...') + _UploadCL(2) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tools_webrtc/vim/webrtc.ycm_extra_conf.py b/tools_webrtc/vim/webrtc.ycm_extra_conf.py index fcc38fec01..b8727d9633 100644 --- a/tools_webrtc/vim/webrtc.ycm_extra_conf.py +++ b/tools_webrtc/vim/webrtc.ycm_extra_conf.py @@ -53,7 +53,6 @@ # # * This has only been tested on gPrecise. - import os import os.path import shlex @@ -62,25 +61,26 @@ # Flags from YCM's default config. _DEFAULT_FLAGS = [ - '-DUSE_CLANG_COMPLETER', - '-std=c++11', - '-x', - 'c++', + '-DUSE_CLANG_COMPLETER', + '-std=c++11', + '-x', + 'c++', ] _HEADER_ALTERNATES = ('.cc', '.cpp', '.c', '.mm', '.m') _EXTENSION_FLAGS = { - '.m': ['-x', 'objective-c'], - '.mm': ['-x', 'objective-c++'], + '.m': ['-x', 'objective-c'], + '.mm': ['-x', 'objective-c++'], } + def PathExists(*args): - return os.path.exists(os.path.join(*args)) + return os.path.exists(os.path.join(*args)) def FindWebrtcSrcFromFilename(filename): - """Searches for the root of the WebRTC checkout. + """Searches for the root of the WebRTC checkout. Simply checks parent directories until it finds .gclient and src/. @@ -90,20 +90,20 @@ def FindWebrtcSrcFromFilename(filename): Returns: (String) Path of 'src/', or None if unable to find. """ - curdir = os.path.normpath(os.path.dirname(filename)) - while not (os.path.basename(curdir) == 'src' - and PathExists(curdir, 'DEPS') - and (PathExists(curdir, '..', '.gclient') - or PathExists(curdir, '.git'))): - nextdir = os.path.normpath(os.path.join(curdir, '..')) - if nextdir == curdir: - return None - curdir = nextdir - return curdir + curdir = os.path.normpath(os.path.dirname(filename)) + while not (os.path.basename(curdir) == 'src' + and PathExists(curdir, 'DEPS') and + (PathExists(curdir, '..', '.gclient') + or PathExists(curdir, '.git'))): + nextdir = os.path.normpath(os.path.join(curdir, '..')) + if nextdir == curdir: + return None + curdir = nextdir + return curdir def GetDefaultSourceFile(webrtc_root, filename): - """Returns the default source file to use as an alternative to |filename|. + """Returns the default source file to use as an alternative to |filename|. Compile flags used to build the default source file is assumed to be a close-enough approximation for building |filename|. @@ -115,13 +115,13 @@ def GetDefaultSourceFile(webrtc_root, filename): Returns: (String) Absolute path to substitute source file. """ - if 'test.' in filename: - return os.path.join(webrtc_root, 'base', 'logging_unittest.cc') - return os.path.join(webrtc_root, 'base', 'logging.cc') + if 'test.' in filename: + return os.path.join(webrtc_root, 'base', 'logging_unittest.cc') + return os.path.join(webrtc_root, 'base', 'logging.cc') def GetNinjaBuildOutputsForSourceFile(out_dir, filename): - """Returns a list of build outputs for filename. + """Returns a list of build outputs for filename. The list is generated by invoking 'ninja -t query' tool to retrieve a list of inputs and outputs of |filename|. This list is then filtered to only include @@ -135,32 +135,35 @@ def GetNinjaBuildOutputsForSourceFile(out_dir, filename): (List of Strings) List of target names. Will return [] if |filename| doesn't yield any .o or .obj outputs. """ - # Ninja needs the path to the source file relative to the output build - # directory. - rel_filename = os.path.relpath(filename, out_dir) - - p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename], - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True) - stdout, _ = p.communicate() - if p.returncode != 0: - return [] - - # The output looks like: - # ../../relative/path/to/source.cc: - # outputs: - # obj/reative/path/to/target.source.o - # obj/some/other/target2.source.o - # another/target.txt - # - outputs_text = stdout.partition('\n outputs:\n')[2] - output_lines = [line.strip() for line in outputs_text.split('\n')] - return [target for target in output_lines - if target and (target.endswith('.o') or target.endswith('.obj'))] + # Ninja needs the path to the source file relative to the output build + # directory. + rel_filename = os.path.relpath(filename, out_dir) + + p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True) + stdout, _ = p.communicate() + if p.returncode != 0: + return [] + + # The output looks like: + # ../../relative/path/to/source.cc: + # outputs: + # obj/reative/path/to/target.source.o + # obj/some/other/target2.source.o + # another/target.txt + # + outputs_text = stdout.partition('\n outputs:\n')[2] + output_lines = [line.strip() for line in outputs_text.split('\n')] + return [ + target for target in output_lines + if target and (target.endswith('.o') or target.endswith('.obj')) + ] def GetClangCommandLineForNinjaOutput(out_dir, build_target): - """Returns the Clang command line for building |build_target| + """Returns the Clang command line for building |build_target| Asks ninja for the list of commands used to build |filename| and returns the final Clang invocation. @@ -173,24 +176,25 @@ def GetClangCommandLineForNinjaOutput(out_dir, build_target): (String or None) Clang command line or None if a Clang command line couldn't be determined. """ - p = subprocess.Popen(['ninja', '-v', '-C', out_dir, - '-t', 'commands', build_target], - stdout=subprocess.PIPE, universal_newlines=True) - stdout, _ = p.communicate() - if p.returncode != 0: + p = subprocess.Popen( + ['ninja', '-v', '-C', out_dir, '-t', 'commands', build_target], + stdout=subprocess.PIPE, + universal_newlines=True) + stdout, _ = p.communicate() + if p.returncode != 0: + return None + + # Ninja will return multiple build steps for all dependencies up to + # |build_target|. The build step we want is the last Clang invocation, which + # is expected to be the one that outputs |build_target|. + for line in reversed(stdout.split('\n')): + if 'clang' in line: + return line return None - # Ninja will return multiple build steps for all dependencies up to - # |build_target|. The build step we want is the last Clang invocation, which - # is expected to be the one that outputs |build_target|. - for line in reversed(stdout.split('\n')): - if 'clang' in line: - return line - return None - def GetClangCommandLineFromNinjaForSource(out_dir, filename): - """Returns a Clang command line used to build |filename|. + """Returns a Clang command line used to build |filename|. The same source file could be built multiple times using different tool chains. In such cases, this command returns the first Clang invocation. We @@ -206,17 +210,17 @@ def GetClangCommandLineFromNinjaForSource(out_dir, filename): (String or None): Command line for Clang invocation using |filename| as a source. Returns None if no such command line could be found. """ - build_targets = GetNinjaBuildOutputsForSourceFile(out_dir, filename) - for build_target in build_targets: - command_line = GetClangCommandLineForNinjaOutput(out_dir, build_target) - if command_line: - return command_line - return None + build_targets = GetNinjaBuildOutputsForSourceFile(out_dir, filename) + for build_target in build_targets: + command_line = GetClangCommandLineForNinjaOutput(out_dir, build_target) + if command_line: + return command_line + return None def GetClangOptionsFromCommandLine(clang_commandline, out_dir, additional_flags): - """Extracts relevant command line options from |clang_commandline| + """Extracts relevant command line options from |clang_commandline| Args: clang_commandline: (String) Full Clang invocation. @@ -228,46 +232,47 @@ def GetClangOptionsFromCommandLine(clang_commandline, out_dir, (List of Strings) The list of command line flags for this source file. Can be empty. """ - clang_flags = [] + additional_flags - - # Parse flags that are important for YCM's purposes. - clang_tokens = shlex.split(clang_commandline) - for flag_index, flag in enumerate(clang_tokens): - if flag.startswith('-I'): - # Relative paths need to be resolved, because they're relative to the - # output dir, not the source. - if flag[2] == '/': - clang_flags.append(flag) - else: - abs_path = os.path.normpath(os.path.join(out_dir, flag[2:])) - clang_flags.append('-I' + abs_path) - elif flag.startswith('-std'): - clang_flags.append(flag) - elif flag.startswith('-') and flag[1] in 'DWFfmO': - if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard': - # These flags causes libclang (3.3) to crash. Remove it until things - # are fixed. - continue - clang_flags.append(flag) - elif flag == '-isysroot': - # On Mac -isysroot is used to find the system headers. - # Copy over both flags. - if flag_index + 1 < len(clang_tokens): - clang_flags.append(flag) - clang_flags.append(clang_tokens[flag_index + 1]) - elif flag.startswith('--sysroot='): - # On Linux we use a sysroot image. - sysroot_path = flag.lstrip('--sysroot=') - if sysroot_path.startswith('/'): - clang_flags.append(flag) - else: - abs_path = os.path.normpath(os.path.join(out_dir, sysroot_path)) - clang_flags.append('--sysroot=' + abs_path) - return clang_flags + clang_flags = [] + additional_flags + + # Parse flags that are important for YCM's purposes. + clang_tokens = shlex.split(clang_commandline) + for flag_index, flag in enumerate(clang_tokens): + if flag.startswith('-I'): + # Relative paths need to be resolved, because they're relative to the + # output dir, not the source. + if flag[2] == '/': + clang_flags.append(flag) + else: + abs_path = os.path.normpath(os.path.join(out_dir, flag[2:])) + clang_flags.append('-I' + abs_path) + elif flag.startswith('-std'): + clang_flags.append(flag) + elif flag.startswith('-') and flag[1] in 'DWFfmO': + if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard': + # These flags causes libclang (3.3) to crash. Remove it until things + # are fixed. + continue + clang_flags.append(flag) + elif flag == '-isysroot': + # On Mac -isysroot is used to find the system headers. + # Copy over both flags. + if flag_index + 1 < len(clang_tokens): + clang_flags.append(flag) + clang_flags.append(clang_tokens[flag_index + 1]) + elif flag.startswith('--sysroot='): + # On Linux we use a sysroot image. + sysroot_path = flag.lstrip('--sysroot=') + if sysroot_path.startswith('/'): + clang_flags.append(flag) + else: + abs_path = os.path.normpath(os.path.join( + out_dir, sysroot_path)) + clang_flags.append('--sysroot=' + abs_path) + return clang_flags def GetClangOptionsFromNinjaForFilename(webrtc_root, filename): - """Returns the Clang command line options needed for building |filename|. + """Returns the Clang command line options needed for building |filename|. Command line options are based on the command used by ninja for building |filename|. If |filename| is a .h file, uses its companion .cc or .cpp file. @@ -283,54 +288,55 @@ def GetClangOptionsFromNinjaForFilename(webrtc_root, filename): (List of Strings) The list of command line flags for this source file. Can be empty. """ - if not webrtc_root: - return [] + if not webrtc_root: + return [] - # Generally, everyone benefits from including WebRTC's src/, because all of - # WebRTC's includes are relative to that. - additional_flags = ['-I' + os.path.join(webrtc_root)] + # Generally, everyone benefits from including WebRTC's src/, because all of + # WebRTC's includes are relative to that. + additional_flags = ['-I' + os.path.join(webrtc_root)] - # Version of Clang used to compile WebRTC can be newer then version of - # libclang that YCM uses for completion. So it's possible that YCM's libclang - # doesn't know about some used warning options, which causes compilation - # warnings (and errors, because of '-Werror'); - additional_flags.append('-Wno-unknown-warning-option') + # Version of Clang used to compile WebRTC can be newer then version of + # libclang that YCM uses for completion. So it's possible that YCM's libclang + # doesn't know about some used warning options, which causes compilation + # warnings (and errors, because of '-Werror'); + additional_flags.append('-Wno-unknown-warning-option') - sys.path.append(os.path.join(webrtc_root, 'tools', 'vim')) - from ninja_output import GetNinjaOutputDirectory - out_dir = GetNinjaOutputDirectory(webrtc_root) + sys.path.append(os.path.join(webrtc_root, 'tools', 'vim')) + from ninja_output import GetNinjaOutputDirectory + out_dir = GetNinjaOutputDirectory(webrtc_root) - basename, extension = os.path.splitext(filename) - if extension == '.h': - candidates = [basename + ext for ext in _HEADER_ALTERNATES] - else: - candidates = [filename] + basename, extension = os.path.splitext(filename) + if extension == '.h': + candidates = [basename + ext for ext in _HEADER_ALTERNATES] + else: + candidates = [filename] - clang_line = None - buildable_extension = extension - for candidate in candidates: - clang_line = GetClangCommandLineFromNinjaForSource(out_dir, candidate) - if clang_line: - buildable_extension = os.path.splitext(candidate)[1] - break + clang_line = None + buildable_extension = extension + for candidate in candidates: + clang_line = GetClangCommandLineFromNinjaForSource(out_dir, candidate) + if clang_line: + buildable_extension = os.path.splitext(candidate)[1] + break - additional_flags += _EXTENSION_FLAGS.get(buildable_extension, []) + additional_flags += _EXTENSION_FLAGS.get(buildable_extension, []) - if not clang_line: - # If ninja didn't know about filename or it's companion files, then try a - # default build target. It is possible that the file is new, or build.ninja - # is stale. - clang_line = GetClangCommandLineFromNinjaForSource( - out_dir, GetDefaultSourceFile(webrtc_root, filename)) + if not clang_line: + # If ninja didn't know about filename or it's companion files, then try a + # default build target. It is possible that the file is new, or build.ninja + # is stale. + clang_line = GetClangCommandLineFromNinjaForSource( + out_dir, GetDefaultSourceFile(webrtc_root, filename)) - if not clang_line: - return additional_flags + if not clang_line: + return additional_flags - return GetClangOptionsFromCommandLine(clang_line, out_dir, additional_flags) + return GetClangOptionsFromCommandLine(clang_line, out_dir, + additional_flags) def FlagsForFile(filename): - """This is the main entry point for YCM. Its interface is fixed. + """This is the main entry point for YCM. Its interface is fixed. Args: filename: (String) Path to source file being edited. @@ -340,18 +346,16 @@ def FlagsForFile(filename): 'flags': (List of Strings) Command line flags. 'do_cache': (Boolean) True if the result should be cached. """ - abs_filename = os.path.abspath(filename) - webrtc_root = FindWebrtcSrcFromFilename(abs_filename) - clang_flags = GetClangOptionsFromNinjaForFilename(webrtc_root, abs_filename) + abs_filename = os.path.abspath(filename) + webrtc_root = FindWebrtcSrcFromFilename(abs_filename) + clang_flags = GetClangOptionsFromNinjaForFilename(webrtc_root, + abs_filename) - # If clang_flags could not be determined, then assume that was due to a - # transient failure. Preventing YCM from caching the flags allows us to try to - # determine the flags again. - should_cache_flags_for_file = bool(clang_flags) + # If clang_flags could not be determined, then assume that was due to a + # transient failure. Preventing YCM from caching the flags allows us to try to + # determine the flags again. + should_cache_flags_for_file = bool(clang_flags) - final_flags = _DEFAULT_FLAGS + clang_flags + final_flags = _DEFAULT_FLAGS + clang_flags - return { - 'flags': final_flags, - 'do_cache': should_cache_flags_for_file - } + return {'flags': final_flags, 'do_cache': should_cache_flags_for_file} diff --git a/tools_webrtc/whitespace.txt b/tools_webrtc/whitespace.txt index f85a7d2cf8..b1cfabb590 100644 --- a/tools_webrtc/whitespace.txt +++ b/tools_webrtc/whitespace.txt @@ -14,3 +14,4 @@ Foo Bar Baz Bur Alios ego vidi ventos; alias prospexi animo procellas - Cicero +Lahiru modifiying the line numbber 17Lahiru modifiying the line numbber 17 diff --git a/video/BUILD.gn b/video/BUILD.gn index 07b03eebc1..7743aba944 100644 --- a/video/BUILD.gn +++ b/video/BUILD.gn @@ -12,8 +12,6 @@ rtc_library("video") { sources = [ "buffered_frame_decryptor.cc", "buffered_frame_decryptor.h", - "call_stats.cc", - "call_stats.h", "call_stats2.cc", "call_stats2.h", "encoder_rtcp_feedback.cc", @@ -22,18 +20,12 @@ rtc_library("video") { "quality_limitation_reason_tracker.h", "quality_threshold.cc", "quality_threshold.h", - "receive_statistics_proxy.cc", - "receive_statistics_proxy.h", "receive_statistics_proxy2.cc", "receive_statistics_proxy2.h", "report_block_stats.cc", "report_block_stats.h", - "rtp_streams_synchronizer.cc", - "rtp_streams_synchronizer.h", "rtp_streams_synchronizer2.cc", "rtp_streams_synchronizer2.h", - "rtp_video_stream_receiver.cc", - "rtp_video_stream_receiver.h", "rtp_video_stream_receiver2.cc", "rtp_video_stream_receiver2.h", "rtp_video_stream_receiver_frame_transformer_delegate.cc", @@ -48,38 +40,34 @@ rtc_library("video") { "stream_synchronization.h", "transport_adapter.cc", "transport_adapter.h", - "video_quality_observer.cc", - "video_quality_observer.h", "video_quality_observer2.cc", "video_quality_observer2.h", - "video_receive_stream.cc", - "video_receive_stream.h", "video_receive_stream2.cc", "video_receive_stream2.h", "video_send_stream.cc", "video_send_stream.h", "video_send_stream_impl.cc", "video_send_stream_impl.h", - "video_stream_decoder.cc", - "video_stream_decoder.h", "video_stream_decoder2.cc", "video_stream_decoder2.h", ] deps = [ ":frame_dumping_decoder", + ":video_stream_encoder_impl", "../api:array_view", "../api:fec_controller_api", "../api:frame_transformer_interface", "../api:libjingle_peerconnection_api", "../api:rtp_parameters", "../api:scoped_refptr", + "../api:sequence_checker", "../api:transport_api", "../api/crypto:frame_decryptor_interface", "../api/crypto:options", "../api/rtc_event_log", "../api/task_queue", - "../api/transport/media:media_transport_interface", + "../api/units:time_delta", "../api/units:timestamp", "../api/video:encoded_image", "../api/video:recordable_encoded_frame", @@ -87,10 +75,8 @@ rtc_library("video") { "../api/video:video_bitrate_allocator", "../api/video:video_codec_constants", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_rtp_headers", "../api/video:video_stream_encoder", - "../api/video:video_stream_encoder_create", "../api/video_codecs:video_codecs_api", "../call:bitrate_allocator", "../call:call_interfaces", @@ -99,7 +85,6 @@ rtc_library("video") { "../call:rtp_sender", "../call:video_stream_api", "../common_video", - "../media:rtc_h264_profile_id", "../modules:module_api", "../modules:module_api_public", "../modules/pacing", @@ -121,6 +106,7 @@ rtc_library("video") { "../rtc_base:rtc_numerics", "../rtc_base:rtc_task_queue", "../rtc_base:stringutils", + "../rtc_base:threading", "../rtc_base:weak_ptr", "../rtc_base/experiments:alr_experiment", "../rtc_base/experiments:field_trial_parser", @@ -128,7 +114,8 @@ rtc_library("video") { "../rtc_base/experiments:min_video_bitrate_experiment", "../rtc_base/experiments:quality_scaling_experiment", "../rtc_base/experiments:rate_control_settings", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:no_unique_address", "../rtc_base/system:thread_registry", "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:repeating_task", @@ -137,6 +124,9 @@ rtc_library("video") { "../system_wrappers", "../system_wrappers:field_trial", "../system_wrappers:metrics", + "./adaptation:video_adaptation", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/memory", @@ -149,6 +139,77 @@ rtc_library("video") { } } +rtc_source_set("video_legacy") { + sources = [ + "call_stats.cc", + "call_stats.h", + "receive_statistics_proxy.cc", + "receive_statistics_proxy.h", + "rtp_streams_synchronizer.cc", + "rtp_streams_synchronizer.h", + "rtp_video_stream_receiver.cc", + "rtp_video_stream_receiver.h", + "video_quality_observer.cc", + "video_quality_observer.h", + "video_receive_stream.cc", + "video_receive_stream.h", + "video_stream_decoder.cc", + "video_stream_decoder.h", + ] + deps = [ + ":frame_dumping_decoder", + ":video", + "../api:array_view", + "../api:scoped_refptr", + "../api:sequence_checker", + "../api/crypto:frame_decryptor_interface", + "../api/task_queue", + "../api/units:timestamp", + "../api/video:encoded_image", + "../api/video:recordable_encoded_frame", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../api/video_codecs:video_codecs_api", + "../call:call_interfaces", + "../call:rtp_interfaces", + "../call:rtp_receiver", # For RtxReceiveStream. + "../call:video_stream_api", + "../common_video", + "../modules:module_api", + "../modules/pacing", + "../modules/remote_bitrate_estimator", + "../modules/rtp_rtcp", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/rtp_rtcp:rtp_rtcp_legacy", + "../modules/rtp_rtcp:rtp_video_header", + "../modules/utility", + "../modules/video_coding", + "../modules/video_coding:video_codec_interface", + "../modules/video_coding:video_coding_utility", + "../modules/video_coding/deprecated:nack_module", + "../rtc_base:checks", + "../rtc_base:rtc_base_approved", + "../rtc_base:rtc_numerics", + "../rtc_base:rtc_task_queue", + "../rtc_base/experiments:field_trial_parser", + "../rtc_base/experiments:keyframe_interval_settings_experiment", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:no_unique_address", + "../rtc_base/system:thread_registry", + "../rtc_base/task_utils:to_queued_task", + "../system_wrappers", + "../system_wrappers:field_trial", + "../system_wrappers:metrics", + ] + if (!build_with_mozilla) { + deps += [ "../media:rtc_media_base" ] + } + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/types:optional", + ] +} + rtc_library("video_stream_decoder_impl") { visibility = [ "*" ] @@ -158,6 +219,7 @@ rtc_library("video_stream_decoder_impl") { ] deps = [ + "../api:sequence_checker", "../api/task_queue", "../api/video:encoded_frame", "../api/video:video_frame", @@ -167,9 +229,10 @@ rtc_library("video_stream_decoder_impl") { "../modules/video_coding", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_task_queue", + "../rtc_base/synchronization:mutex", "../system_wrappers", - "//third_party/abseil-cpp/absl/types:optional", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } rtc_library("frame_dumping_decoder") { @@ -197,6 +260,8 @@ rtc_library("video_stream_encoder_impl") { # visibility = [ "../api/video:video_stream_encoder_create" ] sources = [ + "alignment_adjuster.cc", + "alignment_adjuster.h", "encoder_bitrate_adjuster.cc", "encoder_bitrate_adjuster.h", "encoder_overshoot_detector.cc", @@ -211,6 +276,8 @@ rtc_library("video_stream_encoder_impl") { deps = [ "../api:rtp_parameters", + "../api:sequence_checker", + "../api/adaptation:resource_adaptation_api", "../api/task_queue:task_queue", "../api/units:data_rate", "../api/video:encoded_image", @@ -220,7 +287,7 @@ rtc_library("video_stream_encoder_impl") { "../api/video:video_bitrate_allocator_factory", "../api/video:video_codec_constants", "../api/video:video_frame", - "../api/video:video_frame_i420", + "../api/video:video_layers_allocation", "../api/video:video_rtp_headers", "../api/video:video_stream_encoder", "../api/video_codecs:video_codecs_api", @@ -234,7 +301,6 @@ rtc_library("video_stream_encoder_impl") { "../rtc_base:checks", "../rtc_base:criticalsection", "../rtc_base:logging", - "../rtc_base:macromagic", "../rtc_base:rtc_base_approved", "../rtc_base:rtc_event", "../rtc_base:rtc_numerics", @@ -242,16 +308,21 @@ rtc_library("video_stream_encoder_impl") { "../rtc_base:timeutils", "../rtc_base/experiments:alr_experiment", "../rtc_base/experiments:balanced_degradation_settings", + "../rtc_base/experiments:encoder_info_settings", "../rtc_base/experiments:field_trial_parser", "../rtc_base/experiments:quality_rampup_experiment", "../rtc_base/experiments:quality_scaler_settings", "../rtc_base/experiments:quality_scaling_experiment", "../rtc_base/experiments:rate_control_settings", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/synchronization:mutex", + "../rtc_base/system:no_unique_address", + "../rtc_base/task_utils:pending_task_safety_flag", "../rtc_base/task_utils:repeating_task", "../system_wrappers", "../system_wrappers:field_trial", "adaptation:video_adaptation", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/types:optional", @@ -267,227 +338,239 @@ if (rtc_include_tests) { "../test:test_support", ] } - rtc_library("video_quality_test") { - testonly = true + if (!build_with_chromium) { + rtc_library("video_quality_test") { + testonly = true - # Only targets in this file and api/ can depend on this. - visibility = [ - ":*", - "../api:create_video_quality_test_fixture_api", - ] - sources = [ - "video_analyzer.cc", - "video_analyzer.h", - "video_quality_test.cc", - "video_quality_test.h", - ] - deps = [ - ":frame_dumping_decoder", - "../api:create_frame_generator", - "../api:fec_controller_api", - "../api:frame_generator_api", - "../api:libjingle_peerconnection_api", - "../api:rtc_event_log_output_file", - "../api:test_dependency_factory", - "../api:video_quality_test_fixture_api", - "../api/rtc_event_log:rtc_event_log_factory", - "../api/task_queue", - "../api/task_queue:default_task_queue_factory", - "../api/transport/media:media_transport_interface", - "../api/video:builtin_video_bitrate_allocator_factory", - "../api/video:video_bitrate_allocator_factory", - "../api/video:video_frame", - "../api/video:video_rtp_headers", - "../api/video_codecs:video_codecs_api", - "../call:fake_network", - "../call:simulated_network", - "../common_video", - "../media:rtc_audio_video", - "../media:rtc_encoder_simulcast_proxy", - "../media:rtc_internal_video_codecs", - "../media:rtc_media_base", - "../modules/audio_device:audio_device_api", - "../modules/audio_device:audio_device_module_from_input_and_output", - "../modules/audio_device:windows_core_audio_utility", - "../modules/audio_mixer:audio_mixer_impl", - "../modules/rtp_rtcp", - "../modules/rtp_rtcp:rtp_rtcp_format", - "../modules/video_coding", - "../modules/video_coding:video_coding_utility", - "../modules/video_coding:webrtc_h264", - "../modules/video_coding:webrtc_multiplex", - "../modules/video_coding:webrtc_vp8", - "../modules/video_coding:webrtc_vp9", - "../rtc_base:rtc_base_approved", - "../rtc_base:rtc_base_tests_utils", - "../rtc_base:rtc_numerics", - "../rtc_base:task_queue_for_test", - "../rtc_base/task_utils:repeating_task", - "../system_wrappers", - "../test:fake_video_codecs", - "../test:fileutils", - "../test:perf_test", - "../test:platform_video_capturer", - "../test:rtp_test_utils", - "../test:test_common", - "../test:test_renderer", - "../test:test_support", - "../test:test_support_test_artifacts", - "../test:video_test_common", - "../test:video_test_support", - "//third_party/abseil-cpp/absl/algorithm:container", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - ] + # Only targets in this file and api/ can depend on this. + visibility = [ + ":*", + "../api:create_video_quality_test_fixture_api", + ] + sources = [ + "video_analyzer.cc", + "video_analyzer.h", + "video_quality_test.cc", + "video_quality_test.h", + ] + deps = [ + ":frame_dumping_decoder", + "../api:create_frame_generator", + "../api:fec_controller_api", + "../api:frame_generator_api", + "../api:libjingle_peerconnection_api", + "../api:rtc_event_log_output_file", + "../api:test_dependency_factory", + "../api:video_quality_test_fixture_api", + "../api/numerics", + "../api/rtc_event_log:rtc_event_log_factory", + "../api/task_queue", + "../api/task_queue:default_task_queue_factory", + "../api/video:builtin_video_bitrate_allocator_factory", + "../api/video:video_bitrate_allocator_factory", + "../api/video:video_frame", + "../api/video:video_rtp_headers", + "../api/video_codecs:video_codecs_api", + "../call:fake_network", + "../call:simulated_network", + "../common_video", + "../media:rtc_audio_video", + "../media:rtc_encoder_simulcast_proxy", + "../media:rtc_internal_video_codecs", + "../media:rtc_media_base", + "../modules/audio_device:audio_device_api", + "../modules/audio_device:audio_device_module_from_input_and_output", + "../modules/audio_device:windows_core_audio_utility", + "../modules/audio_mixer:audio_mixer_impl", + "../modules/rtp_rtcp", + "../modules/rtp_rtcp:rtp_rtcp_format", + "../modules/video_coding", + "../modules/video_coding:video_coding_utility", + "../modules/video_coding:webrtc_h264", + "../modules/video_coding:webrtc_multiplex", + "../modules/video_coding:webrtc_vp8", + "../modules/video_coding:webrtc_vp9", + "../rtc_base:rtc_base_approved", + "../rtc_base:rtc_base_tests_utils", + "../rtc_base:rtc_numerics", + "../rtc_base:task_queue_for_test", + "../rtc_base/synchronization:mutex", + "../rtc_base/task_utils:repeating_task", + "../system_wrappers", + "../test:fake_video_codecs", + "../test:fileutils", + "../test:perf_test", + "../test:platform_video_capturer", + "../test:rtp_test_utils", + "../test:test_common", + "../test:test_renderer", + "../test:test_support", + "../test:test_support_test_artifacts", + "../test:video_test_common", + "../test:video_test_support", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/algorithm:container", + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + ] - if (is_mac || is_ios) { - deps += [ "../test:video_test_mac" ] + if (is_mac || is_ios) { + deps += [ "../test:video_test_mac" ] + } } - } - - rtc_library("video_full_stack_tests") { - testonly = true - sources = [ "full_stack_tests.cc" ] - deps = [ - ":video_quality_test", - "../api:simulated_network_api", - "../api:test_dependency_factory", - "../api:video_quality_test_fixture_api", - "../api/video_codecs:video_codecs_api", - "../media:rtc_vp9_profile", - "../modules/pacing", - "../modules/video_coding:webrtc_vp9", - "../rtc_base/experiments:alr_experiment", - "../system_wrappers:field_trial", - "../test:field_trial", - "../test:fileutils", - "../test:test_common", - "../test:test_support", - "//testing/gtest", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - ] - } + rtc_library("video_full_stack_tests") { + testonly = true - rtc_library("video_pc_full_stack_tests") { - testonly = true + sources = [ "full_stack_tests.cc" ] + deps = [ + ":video_quality_test", + "../api:simulated_network_api", + "../api:test_dependency_factory", + "../api:video_quality_test_fixture_api", + "../api/video_codecs:video_codecs_api", + "../modules/pacing", + "../modules/video_coding:webrtc_vp9", + "../rtc_base/experiments:alr_experiment", + "../system_wrappers:field_trial", + "../test:field_trial", + "../test:fileutils", + "../test:test_common", + "../test:test_support", + "//testing/gtest", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + "//third_party/abseil-cpp/absl/types:optional", + ] + } - sources = [ "pc_full_stack_tests.cc" ] - deps = [ - "../api:create_network_emulation_manager", - "../api:create_peer_connection_quality_test_frame_generator", - "../api:create_peerconnection_quality_test_fixture", - "../api:frame_generator_api", - "../api:media_stream_interface", - "../api:network_emulation_manager_api", - "../api:peer_connection_quality_test_fixture_api", - "../api:simulated_network_api", - "../call:simulated_network", - "../media:rtc_vp9_profile", - "../modules/video_coding:webrtc_vp9", - "../system_wrappers:field_trial", - "../test:field_trial", - "../test:fileutils", - "../test:test_support", - "../test/pc/e2e:network_quality_metrics_reporter", - ] - } + rtc_library("video_pc_full_stack_tests") { + testonly = true - rtc_library("video_loopback_lib") { - testonly = true - sources = [ - "video_loopback.cc", - "video_loopback.h", - ] - deps = [ - ":video_quality_test", - "../api:libjingle_peerconnection_api", - "../api:simulated_network_api", - "../api:video_quality_test_fixture_api", - "../api/transport:bitrate_settings", - "../api/video_codecs:video_codecs_api", - "../rtc_base:checks", - "../rtc_base:logging", - "../system_wrappers:field_trial", - "../test:field_trial", - "../test:run_test", - "../test:run_test_interface", - "../test:test_common", - "../test:test_renderer", - "../test:test_support", - "//testing/gtest", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - ] - } + sources = [ "pc_full_stack_tests.cc" ] + deps = [ + "../api:create_network_emulation_manager", + "../api:create_peer_connection_quality_test_frame_generator", + "../api:create_peerconnection_quality_test_fixture", + "../api:frame_generator_api", + "../api:media_stream_interface", + "../api:network_emulation_manager_api", + "../api:peer_connection_quality_test_fixture_api", + "../api:simulated_network_api", + "../api:time_controller", + "../api/video_codecs:video_codecs_api", + "../call:simulated_network", + "../modules/video_coding:webrtc_vp9", + "../system_wrappers:field_trial", + "../test:field_trial", + "../test:fileutils", + "../test:test_support", + "../test/pc/e2e:network_quality_metrics_reporter", + ] + } - if (is_mac) { - mac_app_bundle("video_loopback") { + rtc_library("video_loopback_lib") { testonly = true - sources = [ "video_loopback_main.mm" ] - info_plist = "../test/mac/Info.plist" - deps = [ ":video_loopback_lib" ] + sources = [ + "video_loopback.cc", + "video_loopback.h", + ] + deps = [ + ":video_quality_test", + "../api:libjingle_peerconnection_api", + "../api:simulated_network_api", + "../api:video_quality_test_fixture_api", + "../api/transport:bitrate_settings", + "../api/video_codecs:video_codecs_api", + "../rtc_base:checks", + "../rtc_base:logging", + "../system_wrappers:field_trial", + "../test:field_trial", + "../test:run_test", + "../test:run_test_interface", + "../test:test_common", + "../test:test_renderer", + "../test:test_support", + "//testing/gtest", + ] + absl_deps = [ + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + "//third_party/abseil-cpp/absl/types:optional", + ] } - } else { - rtc_executable("video_loopback") { - testonly = true - sources = [ "video_loopback_main.cc" ] - deps = [ ":video_loopback_lib" ] + + if (is_mac) { + mac_app_bundle("video_loopback") { + testonly = true + sources = [ "video_loopback_main.mm" ] + info_plist = "../test/mac/Info.plist" + deps = [ ":video_loopback_lib" ] + } + } else { + rtc_executable("video_loopback") { + testonly = true + sources = [ "video_loopback_main.cc" ] + deps = [ ":video_loopback_lib" ] + } } - } - rtc_executable("screenshare_loopback") { - testonly = true - sources = [ "screenshare_loopback.cc" ] + rtc_executable("screenshare_loopback") { + testonly = true + sources = [ "screenshare_loopback.cc" ] - deps = [ - ":video_quality_test", - "../api:libjingle_peerconnection_api", - "../api:simulated_network_api", - "../api:video_quality_test_fixture_api", - "../api/transport:bitrate_settings", - "../api/video_codecs:video_codecs_api", - "../rtc_base:checks", - "../rtc_base:logging", - "../rtc_base:stringutils", - "../system_wrappers:field_trial", - "../test:field_trial", - "../test:run_test", - "../test:run_test_interface", - "../test:test_common", - "../test:test_renderer", - "../test:test_support", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - "//third_party/abseil-cpp/absl/types:optional", - ] - } + deps = [ + ":video_quality_test", + "../api:libjingle_peerconnection_api", + "../api:simulated_network_api", + "../api:video_quality_test_fixture_api", + "../api/transport:bitrate_settings", + "../api/video_codecs:video_codecs_api", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:stringutils", + "../system_wrappers:field_trial", + "../test:field_trial", + "../test:run_test", + "../test:run_test_interface", + "../test:test_common", + "../test:test_renderer", + "../test:test_support", + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + "//third_party/abseil-cpp/absl/types:optional", + ] + } - rtc_executable("sv_loopback") { - testonly = true - sources = [ "sv_loopback.cc" ] - deps = [ - ":video_quality_test", - "../api:libjingle_peerconnection_api", - "../api:simulated_network_api", - "../api:video_quality_test_fixture_api", - "../api/transport:bitrate_settings", - "../api/video_codecs:video_codecs_api", - "../rtc_base:checks", - "../rtc_base:logging", - "../rtc_base:stringutils", - "../system_wrappers:field_trial", - "../test:field_trial", - "../test:run_test", - "../test:run_test_interface", - "../test:test_common", - "../test:test_renderer", - "../test:test_support", - "//testing/gtest", - "//third_party/abseil-cpp/absl/flags:flag", - "//third_party/abseil-cpp/absl/flags:parse", - ] + rtc_executable("sv_loopback") { + testonly = true + sources = [ "sv_loopback.cc" ] + deps = [ + ":video_quality_test", + "../api:libjingle_peerconnection_api", + "../api:simulated_network_api", + "../api:video_quality_test_fixture_api", + "../api/transport:bitrate_settings", + "../api/video_codecs:video_codecs_api", + "../rtc_base:checks", + "../rtc_base:logging", + "../rtc_base:stringutils", + "../system_wrappers:field_trial", + "../test:field_trial", + "../test:run_test", + "../test:run_test_interface", + "../test:test_common", + "../test:test_renderer", + "../test:test_support", + "//testing/gtest", + "//third_party/abseil-cpp/absl/flags:flag", + "//third_party/abseil-cpp/absl/flags:parse", + "//third_party/abseil-cpp/absl/types:optional", + ] + } } # TODO(pbos): Rename test suite. @@ -496,6 +579,7 @@ if (rtc_include_tests) { defines = [] sources = [ + "alignment_adjuster_unittest.cc", "buffered_frame_decryptor_unittest.cc", "call_stats2_unittest.cc", "call_stats_unittest.cc", @@ -516,7 +600,7 @@ if (rtc_include_tests) { "end_to_end_tests/multi_stream_tester.h", "end_to_end_tests/multi_stream_tests.cc", "end_to_end_tests/network_state_tests.cc", - "end_to_end_tests/probing_tests.cc", + "end_to_end_tests/resolution_bitrate_limits_tests.cc", "end_to_end_tests/retransmission_tests.cc", "end_to_end_tests/rtp_rtcp_tests.cc", "end_to_end_tests/ssrc_tests.cc", @@ -547,6 +631,7 @@ if (rtc_include_tests) { ] deps = [ ":video", + ":video_legacy", ":video_mocks", ":video_stream_decoder_impl", ":video_stream_encoder_impl", @@ -557,12 +642,15 @@ if (rtc_include_tests) { "../api:libjingle_peerconnection_api", "../api:mock_fec_controller_override", "../api:mock_frame_decryptor", + "../api:mock_video_codec_factory", "../api:mock_video_encoder", "../api:rtp_headers", "../api:rtp_parameters", "../api:scoped_refptr", + "../api:sequence_checker", "../api:simulated_network_api", "../api:transport_api", + "../api/adaptation:resource_adaptation_api", "../api/crypto:options", "../api/rtc_event_log", "../api/task_queue", @@ -575,7 +663,6 @@ if (rtc_include_tests) { "../api/video:video_adaptation", "../api/video:video_bitrate_allocation", "../api/video:video_frame", - "../api/video:video_frame_i420", "../api/video:video_frame_type", "../api/video:video_rtp_headers", "../api/video_codecs:video_codecs_api", @@ -600,7 +687,6 @@ if (rtc_include_tests) { "../media:rtc_media_base", "../media:rtc_media_tests_utils", "../media:rtc_simulcast_encoder_adapter", - "../modules:module_api", "../modules:module_api_public", "../modules/pacing", "../modules/rtp_rtcp", @@ -616,6 +702,8 @@ if (rtc_include_tests) { "../modules/video_coding:webrtc_multiplex", "../modules/video_coding:webrtc_vp8", "../modules/video_coding:webrtc_vp9", + "../modules/video_coding:webrtc_vp9_helpers", + "../modules/video_coding/codecs/av1:libaom_av1_encoder", "../rtc_base", "../rtc_base:checks", "../rtc_base:gunit_helpers", @@ -625,8 +713,10 @@ if (rtc_include_tests) { "../rtc_base:rtc_numerics", "../rtc_base:rtc_task_queue", "../rtc_base:task_queue_for_test", + "../rtc_base:threading", "../rtc_base/experiments:alr_experiment", - "../rtc_base/synchronization:sequence_checker", + "../rtc_base/experiments:encoder_info_settings", + "../rtc_base/synchronization:mutex", "../rtc_base/task_utils:to_queued_task", "../system_wrappers", "../system_wrappers:field_trial", @@ -648,6 +738,8 @@ if (rtc_include_tests) { "../test/time_controller", "adaptation:video_adaptation", "//testing/gtest", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/memory", "//third_party/abseil-cpp/absl/types:optional", diff --git a/video/adaptation/BUILD.gn b/video/adaptation/BUILD.gn index 51e6a2d84e..20a2370b57 100644 --- a/video/adaptation/BUILD.gn +++ b/video/adaptation/BUILD.gn @@ -10,12 +10,22 @@ import("../../webrtc.gni") rtc_library("video_adaptation") { sources = [ + "balanced_constraint.cc", + "balanced_constraint.h", + "bitrate_constraint.cc", + "bitrate_constraint.h", "encode_usage_resource.cc", "encode_usage_resource.h", "overuse_frame_detector.cc", "overuse_frame_detector.h", + "pixel_limit_resource.cc", + "pixel_limit_resource.h", + "quality_rampup_experiment_helper.cc", + "quality_rampup_experiment_helper.h", "quality_scaler_resource.cc", "quality_scaler_resource.h", + "video_stream_encoder_resource.cc", + "video_stream_encoder_resource.h", "video_stream_encoder_resource_manager.cc", "video_stream_encoder_resource_manager.h", ] @@ -23,7 +33,11 @@ rtc_library("video_adaptation") { deps = [ "../../api:rtp_parameters", "../../api:scoped_refptr", + "../../api:sequence_checker", + "../../api/adaptation:resource_adaptation_api", "../../api/task_queue:task_queue", + "../../api/units:data_rate", + "../../api/units:time_delta", "../../api/video:video_adaptation", "../../api/video:video_frame", "../../api/video:video_stream_encoder", @@ -32,7 +46,6 @@ rtc_library("video_adaptation") { "../../modules/video_coding:video_coding_utility", "../../rtc_base:checks", "../../rtc_base:logging", - "../../rtc_base:macromagic", "../../rtc_base:rtc_base_approved", "../../rtc_base:rtc_event", "../../rtc_base:rtc_numerics", @@ -42,10 +55,14 @@ rtc_library("video_adaptation") { "../../rtc_base/experiments:field_trial_parser", "../../rtc_base/experiments:quality_rampup_experiment", "../../rtc_base/experiments:quality_scaler_settings", - "../../rtc_base/synchronization:sequence_checker", + "../../rtc_base/synchronization:mutex", + "../../rtc_base/system:no_unique_address", "../../rtc_base/task_utils:repeating_task", + "../../rtc_base/task_utils:to_queued_task", "../../system_wrappers:field_trial", "../../system_wrappers:system_wrappers", + ] + absl_deps = [ "//third_party/abseil-cpp/absl/algorithm:container", "//third_party/abseil-cpp/absl/base:core_headers", "//third_party/abseil-cpp/absl/types:optional", @@ -58,20 +75,23 @@ if (rtc_include_tests) { defines = [] sources = [ + "bitrate_constraint_unittest.cc", "overuse_frame_detector_unittest.cc", + "pixel_limit_resource_unittest.cc", "quality_scaler_resource_unittest.cc", - "video_stream_encoder_resource_manager_unittest.cc", ] deps = [ ":video_adaptation", "../../api:scoped_refptr", - "../../api/task_queue:default_task_queue_factory", "../../api/task_queue:task_queue", + "../../api/units:time_delta", + "../../api/units:timestamp", "../../api/video:encoded_image", "../../api/video:video_adaptation", - "../../api/video:video_frame_i420", + "../../api/video:video_frame", "../../api/video_codecs:video_codecs_api", "../../call/adaptation:resource_adaptation", + "../../call/adaptation:resource_adaptation_test_utilities", "../../modules/video_coding:video_coding_utility", "../../rtc_base:checks", "../../rtc_base:logging", @@ -81,11 +101,12 @@ if (rtc_include_tests) { "../../rtc_base:rtc_numerics", "../../rtc_base:rtc_task_queue", "../../rtc_base:task_queue_for_test", + "../../rtc_base/task_utils:to_queued_task", "../../test:field_trial", - "//test:rtc_expect_death", - "//test:test_support", - "//testing/gtest", - "//third_party/abseil-cpp/absl/types:optional", + "../../test:rtc_expect_death", + "../../test:test_support", + "../../test/time_controller:time_controller", ] + absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ] } } diff --git a/video/adaptation/OWNERS b/video/adaptation/OWNERS index e4bec4aebb..b65c763efc 100644 --- a/video/adaptation/OWNERS +++ b/video/adaptation/OWNERS @@ -1,2 +1,3 @@ eshr@google.com hbos@webrtc.org +ilnik@webrtc.org diff --git a/video/adaptation/balanced_constraint.cc b/video/adaptation/balanced_constraint.cc new file mode 100644 index 0000000000..ec0b8e41d5 --- /dev/null +++ b/video/adaptation/balanced_constraint.cc @@ -0,0 +1,61 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/balanced_constraint.h" + +#include +#include + +#include "api/sequence_checker.h" +#include "rtc_base/task_utils/to_queued_task.h" + +namespace webrtc { + +BalancedConstraint::BalancedConstraint( + DegradationPreferenceProvider* degradation_preference_provider) + : encoder_target_bitrate_bps_(absl::nullopt), + degradation_preference_provider_(degradation_preference_provider) { + RTC_DCHECK(degradation_preference_provider_); + sequence_checker_.Detach(); +} + +void BalancedConstraint::OnEncoderTargetBitrateUpdated( + absl::optional encoder_target_bitrate_bps) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + encoder_target_bitrate_bps_ = std::move(encoder_target_bitrate_bps); +} + +bool BalancedConstraint::IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + // Don't adapt if BalancedDegradationSettings applies and determines this will + // exceed bitrate constraints. + if (degradation_preference_provider_->degradation_preference() == + DegradationPreference::BALANCED) { + int frame_size_pixels = input_state.single_active_stream_pixels().value_or( + input_state.frame_size_pixels().value()); + if (!balanced_settings_.CanAdaptUp( + input_state.video_codec_type(), frame_size_pixels, + encoder_target_bitrate_bps_.value_or(0))) { + return false; + } + if (DidIncreaseResolution(restrictions_before, restrictions_after) && + !balanced_settings_.CanAdaptUpResolution( + input_state.video_codec_type(), frame_size_pixels, + encoder_target_bitrate_bps_.value_or(0))) { + return false; + } + } + return true; +} + +} // namespace webrtc diff --git a/video/adaptation/balanced_constraint.h b/video/adaptation/balanced_constraint.h new file mode 100644 index 0000000000..0bbd670408 --- /dev/null +++ b/video/adaptation/balanced_constraint.h @@ -0,0 +1,51 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_ +#define VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_ + +#include + +#include "absl/types/optional.h" +#include "api/sequence_checker.h" +#include "call/adaptation/adaptation_constraint.h" +#include "call/adaptation/degradation_preference_provider.h" +#include "rtc_base/experiments/balanced_degradation_settings.h" +#include "rtc_base/system/no_unique_address.h" + +namespace webrtc { + +class BalancedConstraint : public AdaptationConstraint { + public: + explicit BalancedConstraint( + DegradationPreferenceProvider* degradation_preference_provider); + ~BalancedConstraint() override = default; + + void OnEncoderTargetBitrateUpdated( + absl::optional encoder_target_bitrate_bps); + + // AdaptationConstraint implementation. + std::string Name() const override { return "BalancedConstraint"; } + bool IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const override; + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + absl::optional encoder_target_bitrate_bps_ + RTC_GUARDED_BY(&sequence_checker_); + const BalancedDegradationSettings balanced_settings_; + const DegradationPreferenceProvider* degradation_preference_provider_; +}; + +} // namespace webrtc + +#endif // VIDEO_ADAPTATION_BALANCED_CONSTRAINT_H_ diff --git a/video/adaptation/bitrate_constraint.cc b/video/adaptation/bitrate_constraint.cc new file mode 100644 index 0000000000..cd61e555cd --- /dev/null +++ b/video/adaptation/bitrate_constraint.cc @@ -0,0 +1,83 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/bitrate_constraint.h" + +#include +#include + +#include "api/sequence_checker.h" +#include "call/adaptation/video_stream_adapter.h" +#include "video/adaptation/video_stream_encoder_resource_manager.h" + +namespace webrtc { + +BitrateConstraint::BitrateConstraint() + : encoder_settings_(absl::nullopt), + encoder_target_bitrate_bps_(absl::nullopt) { + sequence_checker_.Detach(); +} + +void BitrateConstraint::OnEncoderSettingsUpdated( + absl::optional encoder_settings) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + encoder_settings_ = std::move(encoder_settings); +} + +void BitrateConstraint::OnEncoderTargetBitrateUpdated( + absl::optional encoder_target_bitrate_bps) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + encoder_target_bitrate_bps_ = std::move(encoder_target_bitrate_bps); +} + +bool BitrateConstraint::IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + // Make sure bitrate limits are not violated. + if (DidIncreaseResolution(restrictions_before, restrictions_after)) { + if (!encoder_settings_.has_value()) { + return true; + } + + uint32_t bitrate_bps = encoder_target_bitrate_bps_.value_or(0); + if (bitrate_bps == 0) { + return true; + } + + if (VideoStreamEncoderResourceManager::IsSimulcast( + encoder_settings_->encoder_config())) { + // Resolution bitrate limits usage is restricted to singlecast. + return true; + } + + absl::optional current_frame_size_px = + input_state.single_active_stream_pixels(); + if (!current_frame_size_px.has_value()) { + return true; + } + + absl::optional bitrate_limits = + encoder_settings_->encoder_info().GetEncoderBitrateLimitsForResolution( + // Need some sort of expected resulting pixels to be used + // instead of unrestricted. + GetHigherResolutionThan(*current_frame_size_px)); + + if (bitrate_limits.has_value()) { + RTC_DCHECK_GE(bitrate_limits->frame_size_pixels, *current_frame_size_px); + return bitrate_bps >= + static_cast(bitrate_limits->min_start_bitrate_bps); + } + } + return true; +} + +} // namespace webrtc diff --git a/video/adaptation/bitrate_constraint.h b/video/adaptation/bitrate_constraint.h new file mode 100644 index 0000000000..a608e5db5d --- /dev/null +++ b/video/adaptation/bitrate_constraint.h @@ -0,0 +1,53 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_ +#define VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_ + +#include + +#include "absl/types/optional.h" +#include "api/sequence_checker.h" +#include "call/adaptation/adaptation_constraint.h" +#include "call/adaptation/encoder_settings.h" +#include "call/adaptation/video_source_restrictions.h" +#include "call/adaptation/video_stream_input_state.h" +#include "rtc_base/system/no_unique_address.h" + +namespace webrtc { + +class BitrateConstraint : public AdaptationConstraint { + public: + BitrateConstraint(); + ~BitrateConstraint() override = default; + + void OnEncoderSettingsUpdated( + absl::optional encoder_settings); + void OnEncoderTargetBitrateUpdated( + absl::optional encoder_target_bitrate_bps); + + // AdaptationConstraint implementation. + std::string Name() const override { return "BitrateConstraint"; } + bool IsAdaptationUpAllowed( + const VideoStreamInputState& input_state, + const VideoSourceRestrictions& restrictions_before, + const VideoSourceRestrictions& restrictions_after) const override; + + private: + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + absl::optional encoder_settings_ + RTC_GUARDED_BY(&sequence_checker_); + absl::optional encoder_target_bitrate_bps_ + RTC_GUARDED_BY(&sequence_checker_); +}; + +} // namespace webrtc + +#endif // VIDEO_ADAPTATION_BITRATE_CONSTRAINT_H_ diff --git a/video/adaptation/bitrate_constraint_unittest.cc b/video/adaptation/bitrate_constraint_unittest.cc new file mode 100644 index 0000000000..d7865a12ed --- /dev/null +++ b/video/adaptation/bitrate_constraint_unittest.cc @@ -0,0 +1,191 @@ +/* + * Copyright 2021 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/bitrate_constraint.h" + +#include +#include + +#include "api/video_codecs/video_encoder.h" +#include "call/adaptation/encoder_settings.h" +#include "call/adaptation/test/fake_frame_rate_provider.h" +#include "call/adaptation/video_source_restrictions.h" +#include "call/adaptation/video_stream_input_state_provider.h" +#include "test/gtest.h" + +namespace webrtc { + +namespace { +const VideoSourceRestrictions k360p{/*max_pixels_per_frame=*/640 * 360, + /*target_pixels_per_frame=*/640 * 360, + /*max_frame_rate=*/30}; +const VideoSourceRestrictions k720p{/*max_pixels_per_frame=*/1280 * 720, + /*target_pixels_per_frame=*/1280 * 720, + /*max_frame_rate=*/30}; + +void FillCodecConfig(VideoCodec* video_codec, + VideoEncoderConfig* encoder_config, + int width_px, + int height_px, + std::vector active_flags) { + size_t num_layers = active_flags.size(); + video_codec->codecType = kVideoCodecVP8; + video_codec->numberOfSimulcastStreams = num_layers; + + encoder_config->number_of_streams = num_layers; + encoder_config->simulcast_layers.resize(num_layers); + + for (size_t layer_idx = 0; layer_idx < num_layers; ++layer_idx) { + int layer_width_px = width_px >> (num_layers - 1 - layer_idx); + int layer_height_px = height_px >> (num_layers - 1 - layer_idx); + + video_codec->simulcastStream[layer_idx].active = active_flags[layer_idx]; + video_codec->simulcastStream[layer_idx].width = layer_width_px; + video_codec->simulcastStream[layer_idx].height = layer_height_px; + + encoder_config->simulcast_layers[layer_idx].active = + active_flags[layer_idx]; + encoder_config->simulcast_layers[layer_idx].width = layer_width_px; + encoder_config->simulcast_layers[layer_idx].height = layer_height_px; + } +} + +constexpr int kStartBitrateBps720p = 1000000; + +VideoEncoder::EncoderInfo MakeEncoderInfo() { + VideoEncoder::EncoderInfo encoder_info; + encoder_info.resolution_bitrate_limits = { + {640 * 360, 500000, 0, 5000000}, + {1280 * 720, kStartBitrateBps720p, 0, 5000000}, + {1920 * 1080, 2000000, 0, 5000000}}; + return encoder_info; +} + +} // namespace + +class BitrateConstraintTest : public ::testing::Test { + public: + BitrateConstraintTest() + : frame_rate_provider_(), input_state_provider_(&frame_rate_provider_) {} + + protected: + void OnEncoderSettingsUpdated(int width_px, + int height_px, + std::vector active_flags) { + VideoCodec video_codec; + VideoEncoderConfig encoder_config; + FillCodecConfig(&video_codec, &encoder_config, width_px, height_px, + active_flags); + + EncoderSettings encoder_settings(MakeEncoderInfo(), + std::move(encoder_config), video_codec); + bitrate_constraint_.OnEncoderSettingsUpdated(encoder_settings); + input_state_provider_.OnEncoderSettingsChanged(encoder_settings); + } + + FakeFrameRateProvider frame_rate_provider_; + VideoStreamInputStateProvider input_state_provider_; + BitrateConstraint bitrate_constraint_; +}; + +TEST_F(BitrateConstraintTest, AdaptUpAllowedAtSinglecastIfBitrateIsEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{true}); + + bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p); + + EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k720p)); +} + +TEST_F(BitrateConstraintTest, + AdaptUpDisallowedAtSinglecastIfBitrateIsNotEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{true}); + + // 1 bps less than needed for 720p. + bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1); + + EXPECT_FALSE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k720p)); +} + +TEST_F(BitrateConstraintTest, + AdaptUpAllowedAtSinglecastUpperLayerActiveIfBitrateIsEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{false, true}); + + bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p); + + EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k720p)); +} + +TEST_F(BitrateConstraintTest, + AdaptUpDisallowedAtSinglecastUpperLayerActiveIfBitrateIsNotEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{false, true}); + + // 1 bps less than needed for 720p. + bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1); + + EXPECT_FALSE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k720p)); +} + +TEST_F(BitrateConstraintTest, + AdaptUpAllowedAtSinglecastLowestLayerActiveIfBitrateIsNotEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{true, false}); + + // 1 bps less than needed for 720p. + bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1); + + EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k720p)); +} + +TEST_F(BitrateConstraintTest, AdaptUpAllowedAtSimulcastIfBitrateIsNotEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{true, true}); + + // 1 bps less than needed for 720p. + bitrate_constraint_.OnEncoderTargetBitrateUpdated(kStartBitrateBps720p - 1); + + EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k720p)); +} + +TEST_F(BitrateConstraintTest, + AdaptUpInFpsAllowedAtNoResolutionIncreaseIfBitrateIsNotEnough) { + OnEncoderSettingsUpdated(/*width_px=*/640, /*height_px=*/360, + /*active_flags=*/{true}); + + bitrate_constraint_.OnEncoderTargetBitrateUpdated(1); + + EXPECT_TRUE(bitrate_constraint_.IsAdaptationUpAllowed( + input_state_provider_.InputState(), + /*restrictions_before=*/k360p, + /*restrictions_after=*/k360p)); +} + +} // namespace webrtc diff --git a/video/adaptation/encode_usage_resource.cc b/video/adaptation/encode_usage_resource.cc index 49531a3aa4..c42c63f4b7 100644 --- a/video/adaptation/encode_usage_resource.cc +++ b/video/adaptation/encode_usage_resource.cc @@ -13,20 +13,29 @@ #include #include -#include "api/video/video_adaptation_reason.h" #include "rtc_base/checks.h" +#include "rtc_base/ref_counted_object.h" namespace webrtc { +// static +rtc::scoped_refptr EncodeUsageResource::Create( + std::unique_ptr overuse_detector) { + return rtc::make_ref_counted( + std::move(overuse_detector)); +} + EncodeUsageResource::EncodeUsageResource( std::unique_ptr overuse_detector) - : rtc::RefCountedObject(), + : VideoStreamEncoderResource("EncoderUsageResource"), overuse_detector_(std::move(overuse_detector)), is_started_(false), target_frame_rate_(absl::nullopt) { RTC_DCHECK(overuse_detector_); } +EncodeUsageResource::~EncodeUsageResource() {} + bool EncodeUsageResource::is_started() const { RTC_DCHECK_RUN_ON(encoder_queue()); return is_started_; @@ -79,24 +88,12 @@ void EncodeUsageResource::OnEncodeCompleted( void EncodeUsageResource::AdaptUp() { RTC_DCHECK_RUN_ON(encoder_queue()); - // Reference counting guarantees that this object is still alive by the time - // the task is executed. - resource_adaptation_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this)] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse); - }); + OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse); } void EncodeUsageResource::AdaptDown() { RTC_DCHECK_RUN_ON(encoder_queue()); - // Reference counting guarantees that this object is still alive by the time - // the task is executed. - resource_adaptation_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this)] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kOveruse); - }); + OnResourceUsageStateMeasured(ResourceUsageState::kOveruse); } int EncodeUsageResource::TargetFrameRateAsInt() { diff --git a/video/adaptation/encode_usage_resource.h b/video/adaptation/encode_usage_resource.h index 3c6f02b243..257988fa12 100644 --- a/video/adaptation/encode_usage_resource.h +++ b/video/adaptation/encode_usage_resource.h @@ -15,11 +15,12 @@ #include #include "absl/types/optional.h" +#include "api/scoped_refptr.h" #include "api/video/video_adaptation_reason.h" -#include "call/adaptation/resource.h" #include "rtc_base/ref_counted_object.h" #include "rtc_base/task_queue.h" #include "video/adaptation/overuse_frame_detector.h" +#include "video/adaptation/video_stream_encoder_resource.h" namespace webrtc { @@ -28,11 +29,15 @@ namespace webrtc { // indirectly by usage in the ResourceAdaptationProcessor (which is only tested // because of its usage in VideoStreamEncoder); all tests are currently in // video_stream_encoder_unittest.cc. -class EncodeUsageResource : public rtc::RefCountedObject, +class EncodeUsageResource : public VideoStreamEncoderResource, public OveruseFrameDetectorObserverInterface { public: + static rtc::scoped_refptr Create( + std::unique_ptr overuse_detector); + explicit EncodeUsageResource( std::unique_ptr overuse_detector); + ~EncodeUsageResource() override; bool is_started() const; @@ -51,8 +56,6 @@ class EncodeUsageResource : public rtc::RefCountedObject, void AdaptUp() override; void AdaptDown() override; - std::string name() const override { return "EncoderUsageResource"; } - private: int TargetFrameRateAsInt(); diff --git a/video/adaptation/overuse_frame_detector.h b/video/adaptation/overuse_frame_detector.h index 16217fff84..2b4dd61d21 100644 --- a/video/adaptation/overuse_frame_detector.h +++ b/video/adaptation/overuse_frame_detector.h @@ -15,12 +15,13 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_base.h" #include "api/video/video_stream_encoder_observer.h" #include "rtc_base/constructor_magic.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/numerics/exp_filter.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" @@ -134,7 +135,7 @@ class OveruseFrameDetector { static std::unique_ptr CreateProcessingUsage( const CpuOveruseOptions& options); - SequenceChecker task_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker task_checker_; // Owned by the task queue from where StartCheckForOveruse is called. RepeatingTaskHandle check_overuse_task_ RTC_GUARDED_BY(task_checker_); diff --git a/video/adaptation/overuse_frame_detector_unittest.cc b/video/adaptation/overuse_frame_detector_unittest.cc index d4bf910faa..37ad974a4c 100644 --- a/video/adaptation/overuse_frame_detector_unittest.cc +++ b/video/adaptation/overuse_frame_detector_unittest.cc @@ -455,6 +455,8 @@ TEST_F(OveruseFrameDetectorTest, RunOnTqNormalUsage) { EXPECT_TRUE(event.Wait(10000)); } +// TODO(crbug.com/webrtc/12846): investigate why the test fails on MAC bots. +#if !defined(WEBRTC_MAC) TEST_F(OveruseFrameDetectorTest, MaxIntervalScalesWithFramerate) { const int kCapturerMaxFrameRate = 30; const int kEncodeMaxFrameRate = 20; // Maximum fps the encoder can sustain. @@ -490,6 +492,7 @@ TEST_F(OveruseFrameDetectorTest, MaxIntervalScalesWithFramerate) { processing_time_us); overuse_detector_->CheckForOveruse(observer_); } +#endif TEST_F(OveruseFrameDetectorTest, RespectsMinFramerate) { const int kMinFrameRate = 7; // Minimum fps allowed by current detector impl. @@ -835,7 +838,7 @@ TEST_F(OveruseFrameDetectorTest2, ConvergesSlowly) { // Should have started to approach correct load of 15%, but not very far. EXPECT_LT(UsagePercent(), InitialUsage()); - EXPECT_GT(UsagePercent(), (InitialUsage() * 3 + 15) / 4); + EXPECT_GT(UsagePercent(), (InitialUsage() * 3 + 8) / 4); // Run for roughly 10s more, should now be closer. InsertAndSendFramesWithInterval(300, kFrameIntervalUs, kWidth, kHeight, diff --git a/video/adaptation/pixel_limit_resource.cc b/video/adaptation/pixel_limit_resource.cc new file mode 100644 index 0000000000..789dac2c0a --- /dev/null +++ b/video/adaptation/pixel_limit_resource.cc @@ -0,0 +1,102 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/pixel_limit_resource.h" + +#include "api/sequence_checker.h" +#include "api/units/time_delta.h" +#include "call/adaptation/video_stream_adapter.h" +#include "rtc_base/checks.h" +#include "rtc_base/ref_counted_object.h" + +namespace webrtc { + +namespace { + +constexpr TimeDelta kResourceUsageCheckIntervalMs = TimeDelta::Seconds(5); + +} // namespace + +// static +rtc::scoped_refptr PixelLimitResource::Create( + TaskQueueBase* task_queue, + VideoStreamInputStateProvider* input_state_provider) { + return rtc::make_ref_counted(task_queue, + input_state_provider); +} + +PixelLimitResource::PixelLimitResource( + TaskQueueBase* task_queue, + VideoStreamInputStateProvider* input_state_provider) + : task_queue_(task_queue), + input_state_provider_(input_state_provider), + max_pixels_(absl::nullopt) { + RTC_DCHECK(task_queue_); + RTC_DCHECK(input_state_provider_); +} + +PixelLimitResource::~PixelLimitResource() { + RTC_DCHECK(!listener_); + RTC_DCHECK(!repeating_task_.Running()); +} + +void PixelLimitResource::SetMaxPixels(int max_pixels) { + RTC_DCHECK_RUN_ON(task_queue_); + max_pixels_ = max_pixels; +} + +void PixelLimitResource::SetResourceListener(ResourceListener* listener) { + RTC_DCHECK_RUN_ON(task_queue_); + listener_ = listener; + if (listener_) { + repeating_task_.Stop(); + repeating_task_ = RepeatingTaskHandle::Start(task_queue_, [&] { + RTC_DCHECK_RUN_ON(task_queue_); + if (!listener_) { + // We don't have a listener so resource adaptation must not be running, + // try again later. + return kResourceUsageCheckIntervalMs; + } + if (!max_pixels_.has_value()) { + // No pixel limit configured yet, try again later. + return kResourceUsageCheckIntervalMs; + } + absl::optional frame_size_pixels = + input_state_provider_->InputState().frame_size_pixels(); + if (!frame_size_pixels.has_value()) { + // We haven't observed a frame yet so we don't know if it's going to be + // too big or too small, try again later. + return kResourceUsageCheckIntervalMs; + } + int current_pixels = frame_size_pixels.value(); + int target_pixel_upper_bounds = max_pixels_.value(); + // To avoid toggling, we allow any resolutions between + // |target_pixel_upper_bounds| and video_stream_adapter.h's + // GetLowerResolutionThan(). This is the pixels we end up if we adapt down + // from |target_pixel_upper_bounds|. + int target_pixels_lower_bounds = + GetLowerResolutionThan(target_pixel_upper_bounds); + if (current_pixels > target_pixel_upper_bounds) { + listener_->OnResourceUsageStateMeasured(this, + ResourceUsageState::kOveruse); + } else if (current_pixels < target_pixels_lower_bounds) { + listener_->OnResourceUsageStateMeasured(this, + ResourceUsageState::kUnderuse); + } + return kResourceUsageCheckIntervalMs; + }); + } else { + repeating_task_.Stop(); + } + // The task must be running if we have a listener. + RTC_DCHECK(repeating_task_.Running() || !listener_); +} + +} // namespace webrtc diff --git a/video/adaptation/pixel_limit_resource.h b/video/adaptation/pixel_limit_resource.h new file mode 100644 index 0000000000..b42f92434f --- /dev/null +++ b/video/adaptation/pixel_limit_resource.h @@ -0,0 +1,60 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_ +#define VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_ + +#include + +#include "absl/types/optional.h" +#include "api/adaptation/resource.h" +#include "api/scoped_refptr.h" +#include "call/adaptation/video_stream_input_state_provider.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/thread_annotations.h" + +namespace webrtc { + +// An adaptation resource designed to be used in the TestBed. Used to simulate +// being CPU limited. +// +// Periodically reports "overuse" or "underuse" (every 5 seconds) until the +// stream is within the bounds specified in terms of a maximum resolution and +// one resolution step lower than that (this avoids toggling when this is the +// only resource in play). When multiple resources come in to play some amount +// of toggling is still possible in edge cases but that is OK for testing +// purposes. +class PixelLimitResource : public Resource { + public: + static rtc::scoped_refptr Create( + TaskQueueBase* task_queue, + VideoStreamInputStateProvider* input_state_provider); + + PixelLimitResource(TaskQueueBase* task_queue, + VideoStreamInputStateProvider* input_state_provider); + ~PixelLimitResource() override; + + void SetMaxPixels(int max_pixels); + + // Resource implementation. + std::string Name() const override { return "PixelLimitResource"; } + void SetResourceListener(ResourceListener* listener) override; + + private: + TaskQueueBase* const task_queue_; + VideoStreamInputStateProvider* const input_state_provider_; + absl::optional max_pixels_ RTC_GUARDED_BY(task_queue_); + webrtc::ResourceListener* listener_ RTC_GUARDED_BY(task_queue_); + RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(task_queue_); +}; + +} // namespace webrtc + +#endif // VIDEO_ADAPTATION_PIXEL_LIMIT_RESOURCE_H_ diff --git a/video/adaptation/pixel_limit_resource_unittest.cc b/video/adaptation/pixel_limit_resource_unittest.cc new file mode 100644 index 0000000000..7b633b3f68 --- /dev/null +++ b/video/adaptation/pixel_limit_resource_unittest.cc @@ -0,0 +1,147 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/pixel_limit_resource.h" + +#include +#include + +#include "api/units/timestamp.h" +#include "call/adaptation/test/fake_video_stream_input_state_provider.h" +#include "call/adaptation/test/mock_resource_listener.h" +#include "call/adaptation/video_stream_adapter.h" +#include "rtc_base/task_queue_for_test.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "test/gmock.h" +#include "test/gtest.h" +#include "test/time_controller/simulated_time_controller.h" + +using testing::_; + +namespace webrtc { + +namespace { + +constexpr TimeDelta kResourceUsageCheckIntervalMs = TimeDelta::Seconds(5); + +} // namespace + +class PixelLimitResourceTest : public ::testing::Test { + public: + PixelLimitResourceTest() + : time_controller_(Timestamp::Micros(1234)), + task_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue( + "TestQueue", + TaskQueueFactory::Priority::NORMAL)), + input_state_provider_() {} + + void SetCurrentPixels(int current_pixels) { + input_state_provider_.SetInputState(current_pixels, 30, current_pixels); + } + + void RunTaskOnTaskQueue(std::unique_ptr task) { + task_queue_->PostTask(std::move(task)); + time_controller_.AdvanceTime(TimeDelta::Millis(0)); + } + + protected: + // Posted tasks, including repeated tasks, are executed when simulated time is + // advanced by time_controller_.AdvanceTime(). + GlobalSimulatedTimeController time_controller_; + std::unique_ptr task_queue_; + FakeVideoStreamInputStateProvider input_state_provider_; +}; + +TEST_F(PixelLimitResourceTest, ResourceIsSilentByDefault) { + // Because our mock is strick, the test would fail if + // OnResourceUsageStateMeasured() is invoked. + testing::StrictMock resource_listener; + RunTaskOnTaskQueue(ToQueuedTask([&]() { + rtc::scoped_refptr pixel_limit_resource = + PixelLimitResource::Create(task_queue_.get(), &input_state_provider_); + pixel_limit_resource->SetResourceListener(&resource_listener); + // Set a current pixel count. + SetCurrentPixels(1280 * 720); + // Advance a significant amount of time. + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 10); + pixel_limit_resource->SetResourceListener(nullptr); + })); +} + +TEST_F(PixelLimitResourceTest, + OveruseIsReportedWhileCurrentPixelsIsGreaterThanMaxPixels) { + constexpr int kMaxPixels = 640 * 480; + testing::StrictMock resource_listener; + RunTaskOnTaskQueue(ToQueuedTask([&]() { + rtc::scoped_refptr pixel_limit_resource = + PixelLimitResource::Create(task_queue_.get(), &input_state_provider_); + pixel_limit_resource->SetResourceListener(&resource_listener); + time_controller_.AdvanceTime(TimeDelta::Millis(0)); + + pixel_limit_resource->SetMaxPixels(kMaxPixels); + SetCurrentPixels(kMaxPixels + 1); + EXPECT_CALL(resource_listener, + OnResourceUsageStateMeasured(_, ResourceUsageState::kOveruse)) + .Times(1); + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs); + + // As long as the current pixels has not updated, the overuse signal is + // repeated at a fixed interval. + EXPECT_CALL(resource_listener, + OnResourceUsageStateMeasured(_, ResourceUsageState::kOveruse)) + .Times(3); + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3); + + // When the overuse signal has resulted in a lower resolution, the overuse + // signals stops. + SetCurrentPixels(kMaxPixels); + EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)).Times(0); + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3); + + pixel_limit_resource->SetResourceListener(nullptr); + })); +} + +TEST_F(PixelLimitResourceTest, + UnderuseIsReportedWhileCurrentPixelsIsLessThanMinPixels) { + constexpr int kMaxPixels = 640 * 480; + const int kMinPixels = GetLowerResolutionThan(kMaxPixels); + testing::StrictMock resource_listener; + RunTaskOnTaskQueue(ToQueuedTask([&]() { + rtc::scoped_refptr pixel_limit_resource = + PixelLimitResource::Create(task_queue_.get(), &input_state_provider_); + pixel_limit_resource->SetResourceListener(&resource_listener); + time_controller_.AdvanceTime(TimeDelta::Millis(0)); + + pixel_limit_resource->SetMaxPixels(kMaxPixels); + SetCurrentPixels(kMinPixels - 1); + EXPECT_CALL(resource_listener, + OnResourceUsageStateMeasured(_, ResourceUsageState::kUnderuse)) + .Times(1); + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs); + + // As long as the current pixels has not updated, the underuse signal is + // repeated at a fixed interval. + EXPECT_CALL(resource_listener, + OnResourceUsageStateMeasured(_, ResourceUsageState::kUnderuse)) + .Times(3); + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3); + + // When the underuse signal has resulted in a higher resolution, the + // underuse signals stops. + SetCurrentPixels(kMinPixels); + EXPECT_CALL(resource_listener, OnResourceUsageStateMeasured(_, _)).Times(0); + time_controller_.AdvanceTime(kResourceUsageCheckIntervalMs * 3); + + pixel_limit_resource->SetResourceListener(nullptr); + })); +} + +} // namespace webrtc diff --git a/video/adaptation/quality_rampup_experiment_helper.cc b/video/adaptation/quality_rampup_experiment_helper.cc new file mode 100644 index 0000000000..6d82503fc6 --- /dev/null +++ b/video/adaptation/quality_rampup_experiment_helper.cc @@ -0,0 +1,80 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/quality_rampup_experiment_helper.h" + +#include +#include + +#include "rtc_base/logging.h" + +namespace webrtc { + +QualityRampUpExperimentHelper::QualityRampUpExperimentHelper( + QualityRampUpExperimentListener* experiment_listener, + Clock* clock, + QualityRampupExperiment experiment) + : experiment_listener_(experiment_listener), + clock_(clock), + quality_rampup_experiment_(std::move(experiment)), + cpu_adapted_(false), + qp_resolution_adaptations_(0) { + RTC_DCHECK(experiment_listener_); + RTC_DCHECK(clock_); +} + +std::unique_ptr +QualityRampUpExperimentHelper::CreateIfEnabled( + QualityRampUpExperimentListener* experiment_listener, + Clock* clock) { + QualityRampupExperiment experiment = QualityRampupExperiment::ParseSettings(); + if (experiment.Enabled()) { + return std::unique_ptr( + new QualityRampUpExperimentHelper(experiment_listener, clock, + experiment)); + } + return nullptr; +} + +void QualityRampUpExperimentHelper::PerformQualityRampupExperiment( + rtc::scoped_refptr quality_scaler_resource, + DataRate bandwidth, + DataRate encoder_target_bitrate, + DataRate max_bitrate, + int pixels) { + if (!quality_scaler_resource->is_started()) + return; + + int64_t now_ms = clock_->TimeInMilliseconds(); + quality_rampup_experiment_.SetMaxBitrate(pixels, max_bitrate.kbps()); + + bool try_quality_rampup = false; + if (quality_rampup_experiment_.BwHigh(now_ms, bandwidth.kbps())) { + // Verify that encoder is at max bitrate and the QP is low. + if (encoder_target_bitrate == max_bitrate && + quality_scaler_resource->QpFastFilterLow()) { + try_quality_rampup = true; + } + } + if (try_quality_rampup && qp_resolution_adaptations_ > 0 && !cpu_adapted_) { + experiment_listener_->OnQualityRampUp(); + } +} + +void QualityRampUpExperimentHelper::cpu_adapted(bool cpu_adapted) { + cpu_adapted_ = cpu_adapted; +} + +void QualityRampUpExperimentHelper::qp_resolution_adaptations( + int qp_resolution_adaptations) { + qp_resolution_adaptations_ = qp_resolution_adaptations; +} + +} // namespace webrtc diff --git a/video/adaptation/quality_rampup_experiment_helper.h b/video/adaptation/quality_rampup_experiment_helper.h new file mode 100644 index 0000000000..81be982e7c --- /dev/null +++ b/video/adaptation/quality_rampup_experiment_helper.h @@ -0,0 +1,68 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_ +#define VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_ + +#include + +#include "api/scoped_refptr.h" +#include "api/units/data_rate.h" +#include "rtc_base/experiments/quality_rampup_experiment.h" +#include "system_wrappers/include/clock.h" +#include "video/adaptation/quality_scaler_resource.h" + +namespace webrtc { + +class QualityRampUpExperimentListener { + public: + virtual ~QualityRampUpExperimentListener() = default; + virtual void OnQualityRampUp() = 0; +}; + +// Helper class for orchestrating the WebRTC-Video-QualityRampupSettings +// experiment. +class QualityRampUpExperimentHelper { + public: + // Returns a QualityRampUpExperimentHelper if the experiment is enabled, + // an nullptr otherwise. + static std::unique_ptr CreateIfEnabled( + QualityRampUpExperimentListener* experiment_listener, + Clock* clock); + + QualityRampUpExperimentHelper(const QualityRampUpExperimentHelper&) = delete; + QualityRampUpExperimentHelper& operator=( + const QualityRampUpExperimentHelper&) = delete; + + void cpu_adapted(bool cpu_adapted); + void qp_resolution_adaptations(int qp_adaptations); + + void PerformQualityRampupExperiment( + rtc::scoped_refptr quality_scaler_resource, + DataRate bandwidth, + DataRate encoder_target_bitrate, + DataRate max_bitrate, + int pixels); + + private: + QualityRampUpExperimentHelper( + QualityRampUpExperimentListener* experiment_listener, + Clock* clock, + QualityRampupExperiment experiment); + QualityRampUpExperimentListener* const experiment_listener_; + Clock* clock_; + QualityRampupExperiment quality_rampup_experiment_; + bool cpu_adapted_; + int qp_resolution_adaptations_; +}; + +} // namespace webrtc + +#endif // VIDEO_ADAPTATION_QUALITY_RAMPUP_EXPERIMENT_HELPER_H_ diff --git a/video/adaptation/quality_scaler_resource.cc b/video/adaptation/quality_scaler_resource.cc index 403f6080ca..c455252d45 100644 --- a/video/adaptation/quality_scaler_resource.cc +++ b/video/adaptation/quality_scaler_resource.cc @@ -12,27 +12,25 @@ #include +#include "rtc_base/checks.h" #include "rtc_base/experiments/balanced_degradation_settings.h" +#include "rtc_base/ref_counted_object.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "rtc_base/time_utils.h" namespace webrtc { +// static +rtc::scoped_refptr QualityScalerResource::Create() { + return rtc::make_ref_counted(); +} + QualityScalerResource::QualityScalerResource() - : rtc::RefCountedObject(), - quality_scaler_(nullptr), - num_handled_callbacks_(0), - pending_callbacks_(), - adaptation_processor_(nullptr), - clear_qp_samples_(false) {} + : VideoStreamEncoderResource("QualityScalerResource"), + quality_scaler_(nullptr) {} QualityScalerResource::~QualityScalerResource() { RTC_DCHECK(!quality_scaler_); - RTC_DCHECK(pending_callbacks_.empty()); -} - -void QualityScalerResource::SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - adaptation_processor_ = adaptation_processor; } bool QualityScalerResource::is_started() const { @@ -50,9 +48,9 @@ void QualityScalerResource::StartCheckForOveruse( void QualityScalerResource::StopCheckForOveruse() { RTC_DCHECK_RUN_ON(encoder_queue()); + RTC_DCHECK(is_started()); // Ensure we have no pending callbacks. This makes it safe to destroy the // QualityScaler and even task queues with tasks in-flight. - AbortPendingCallbacks(); quality_scaler_.reset(); } @@ -74,19 +72,6 @@ void QualityScalerResource::OnEncodeCompleted(const EncodedImage& encoded_image, RTC_DCHECK_RUN_ON(encoder_queue()); if (quality_scaler_ && encoded_image.qp_ >= 0) { quality_scaler_->ReportQp(encoded_image.qp_, time_sent_in_us); - } else if (!quality_scaler_) { - // Reference counting guarantees that this object is still alive by the time - // the task is executed. - // TODO(webrtc:11553): this is a workaround to ensure that all quality - // scaler imposed limitations are removed once qualty scaler is disabled - // mid call. - // Instead it should be done at a higher layer in the same way for all - // resources. - resource_adaptation_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this)] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse); - }); } } @@ -105,116 +90,12 @@ void QualityScalerResource::OnFrameDropped( } } -void QualityScalerResource::OnReportQpUsageHigh( - rtc::scoped_refptr callback) { - RTC_DCHECK_RUN_ON(encoder_queue()); - size_t callback_id = QueuePendingCallback(callback); - // Reference counting guarantees that this object is still alive by the time - // the task is executed. - resource_adaptation_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this), - callback_id] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->clear_qp_samples_ = false; - // If this OnResourceUsageStateMeasured() triggers an adaptation, - // OnAdaptationApplied() will occur between this line and the next. This - // allows modifying |clear_qp_samples_| based on the adaptation. - this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kOveruse); - this_ref->HandlePendingCallback(callback_id, - this_ref->clear_qp_samples_); - }); -} - -void QualityScalerResource::OnReportQpUsageLow( - rtc::scoped_refptr callback) { - RTC_DCHECK_RUN_ON(encoder_queue()); - size_t callback_id = QueuePendingCallback(callback); - // Reference counting guarantees that this object is still alive by the time - // the task is executed. - resource_adaptation_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this), - callback_id] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse); - this_ref->HandlePendingCallback(callback_id, true); - }); -} - -void QualityScalerResource::OnAdaptationApplied( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - // We only clear QP samples on adaptations triggered by the QualityScaler. - if (reason_resource != this) - return; - clear_qp_samples_ = true; - // If we're in "balanced" and the frame rate before and after adaptation did - // not differ that much, don't clear the QP samples and instead check for QP - // again in a short amount of time. This may trigger adapting down again soon. - // TODO(hbos): Can this be simplified by getting rid of special casing logic? - // For example, we could decide whether or not to clear QP samples based on - // how big the adaptation step was alone (regardless of degradation preference - // or what resource triggered the adaptation) and the QualityScaler could - // check for QP when it had enough QP samples rather than at a variable - // interval whose delay is calculated based on events such as these. Now there - // is much dependency on a specific OnReportQpUsageHigh() event and "balanced" - // but adaptations happening might not align with QualityScaler's CheckQpTask. - if (adaptation_processor_ && - adaptation_processor_->effective_degradation_preference() == - DegradationPreference::BALANCED && - DidDecreaseFrameRate(restrictions_before, restrictions_after)) { - absl::optional min_diff = BalancedDegradationSettings().MinFpsDiff( - input_state.frame_size_pixels().value()); - if (min_diff && input_state.frames_per_second() > 0) { - int fps_diff = input_state.frames_per_second() - - restrictions_after.max_frame_rate().value(); - if (fps_diff < min_diff.value()) { - clear_qp_samples_ = false; - } - } - } -} - -size_t QualityScalerResource::QueuePendingCallback( - rtc::scoped_refptr callback) { - RTC_DCHECK_RUN_ON(encoder_queue()); - pending_callbacks_.push(callback); - // The ID of a callback is its sequence number (1, 2, 3...). - return num_handled_callbacks_ + pending_callbacks_.size(); -} - -void QualityScalerResource::HandlePendingCallback(size_t callback_id, - bool clear_qp_samples) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - // Reference counting guarantees that this object is still alive by the time - // the task is executed. - encoder_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this), callback_id, - clear_qp_samples] { - RTC_DCHECK_RUN_ON(this_ref->encoder_queue()); - if (this_ref->num_handled_callbacks_ >= callback_id) { - // The callback with this ID has already been handled. - // This happens if AbortPendingCallbacks() is called while the task is - // in flight. - return; - } - RTC_DCHECK(!this_ref->pending_callbacks_.empty()); - this_ref->pending_callbacks_.front()->OnQpUsageHandled( - clear_qp_samples); - ++this_ref->num_handled_callbacks_; - this_ref->pending_callbacks_.pop(); - }); +void QualityScalerResource::OnReportQpUsageHigh() { + OnResourceUsageStateMeasured(ResourceUsageState::kOveruse); } -void QualityScalerResource::AbortPendingCallbacks() { - RTC_DCHECK_RUN_ON(encoder_queue()); - while (!pending_callbacks_.empty()) { - pending_callbacks_.front()->OnQpUsageHandled(false); - ++num_handled_callbacks_; - pending_callbacks_.pop(); - } +void QualityScalerResource::OnReportQpUsageLow() { + OnResourceUsageStateMeasured(ResourceUsageState::kUnderuse); } } // namespace webrtc diff --git a/video/adaptation/quality_scaler_resource.h b/video/adaptation/quality_scaler_resource.h index 78685823c3..06c22ca3c6 100644 --- a/video/adaptation/quality_scaler_resource.h +++ b/video/adaptation/quality_scaler_resource.h @@ -15,32 +15,32 @@ #include #include +#include "absl/types/optional.h" +#include "api/scoped_refptr.h" #include "api/video/video_adaptation_reason.h" #include "api/video_codecs/video_encoder.h" -#include "call/adaptation/resource.h" +#include "call/adaptation/degradation_preference_provider.h" #include "call/adaptation/resource_adaptation_processor_interface.h" #include "modules/video_coding/utility/quality_scaler.h" -#include "rtc_base/critical_section.h" #include "rtc_base/ref_counted_object.h" #include "rtc_base/task_queue.h" +#include "video/adaptation/video_stream_encoder_resource.h" namespace webrtc { // Handles interaction with the QualityScaler. -class QualityScalerResource : public rtc::RefCountedObject, +class QualityScalerResource : public VideoStreamEncoderResource, public QualityScalerQpUsageHandlerInterface { public: + static rtc::scoped_refptr Create(); + QualityScalerResource(); ~QualityScalerResource() override; - void SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor); - bool is_started() const; void StartCheckForOveruse(VideoEncoder::QpThresholds qp_thresholds); void StopCheckForOveruse(); - void SetQpThresholds(VideoEncoder::QpThresholds qp_thresholds); bool QpFastFilterLow(); void OnEncodeCompleted(const EncodedImage& encoded_image, @@ -48,45 +48,12 @@ class QualityScalerResource : public rtc::RefCountedObject, void OnFrameDropped(EncodedImageCallback::DropReason reason); // QualityScalerQpUsageHandlerInterface implementation. - void OnReportQpUsageHigh( - rtc::scoped_refptr callback) - override; - void OnReportQpUsageLow( - rtc::scoped_refptr callback) - override; - - std::string name() const override { return "QualityScalerResource"; } - - // Resource implementation. - void OnAdaptationApplied( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) override; + void OnReportQpUsageHigh() override; + void OnReportQpUsageLow() override; private: - size_t QueuePendingCallback( - rtc::scoped_refptr - callback); - void HandlePendingCallback(size_t callback_id, bool clear_qp_samples); - void AbortPendingCallbacks(); - - // Members accessed on the encoder queue. std::unique_ptr quality_scaler_ RTC_GUARDED_BY(encoder_queue()); - // Every OnReportQpUsageHigh/Low() operation has a callback that MUST be - // invoked on the |encoder_queue_|. Because usage measurements are reported on - // the |encoder_queue_| but handled by the processor on the the - // |resource_adaptation_queue_|, handling a measurement entails a task queue - // "ping" round-trip. Multiple callbacks in-flight is thus possible. - size_t num_handled_callbacks_ RTC_GUARDED_BY(encoder_queue()); - std::queue> - pending_callbacks_ RTC_GUARDED_BY(encoder_queue()); - - // Members accessed on the adaptation queue. - ResourceAdaptationProcessorInterface* adaptation_processor_ - RTC_GUARDED_BY(resource_adaptation_queue()); - bool clear_qp_samples_ RTC_GUARDED_BY(resource_adaptation_queue()); }; } // namespace webrtc diff --git a/video/adaptation/quality_scaler_resource_unittest.cc b/video/adaptation/quality_scaler_resource_unittest.cc index 66f4e13870..1a3175af00 100644 --- a/video/adaptation/quality_scaler_resource_unittest.cc +++ b/video/adaptation/quality_scaler_resource_unittest.cc @@ -13,53 +13,27 @@ #include #include "absl/types/optional.h" -#include "api/task_queue/default_task_queue_factory.h" -#include "api/task_queue/task_queue_factory.h" +#include "api/task_queue/task_queue_base.h" #include "api/video_codecs/video_encoder.h" -#include "rtc_base/event.h" -#include "rtc_base/task_queue.h" +#include "call/adaptation/test/mock_resource_listener.h" +#include "test/gmock.h" #include "test/gtest.h" namespace webrtc { -namespace { +using testing::_; +using testing::Eq; +using testing::StrictMock; -const int kDefaultTimeout = 5000; +namespace { -class FakeQualityScalerQpUsageHandlerCallback - : public QualityScalerQpUsageHandlerCallbackInterface { +class FakeDegradationPreferenceProvider : public DegradationPreferenceProvider { public: - explicit FakeQualityScalerQpUsageHandlerCallback( - rtc::TaskQueue* encoder_queue) - : QualityScalerQpUsageHandlerCallbackInterface(), - encoder_queue_(encoder_queue), - is_handled_(false), - qp_usage_handled_event_(true /* manual_reset */, false), - clear_qp_samples_result_(absl::nullopt) {} - ~FakeQualityScalerQpUsageHandlerCallback() override { - RTC_DCHECK(is_handled_) - << "The callback was destroyed without being invoked."; - } - - void OnQpUsageHandled(bool clear_qp_samples) override { - ASSERT_TRUE(encoder_queue_->IsCurrent()); - RTC_DCHECK(!is_handled_); - clear_qp_samples_result_ = clear_qp_samples; - is_handled_ = true; - qp_usage_handled_event_.Set(); - } + ~FakeDegradationPreferenceProvider() override = default; - bool is_handled() const { return is_handled_; } - rtc::Event* qp_usage_handled_event() { return &qp_usage_handled_event_; } - absl::optional clear_qp_samples_result() const { - return clear_qp_samples_result_; + DegradationPreference degradation_preference() const override { + return DegradationPreference::MAINTAIN_FRAMERATE; } - - private: - rtc::TaskQueue* const encoder_queue_; - bool is_handled_; - rtc::Event qp_usage_handled_event_; - absl::optional clear_qp_samples_result_; }; } // namespace @@ -67,91 +41,34 @@ class FakeQualityScalerQpUsageHandlerCallback class QualityScalerResourceTest : public ::testing::Test { public: QualityScalerResourceTest() - : task_queue_factory_(CreateDefaultTaskQueueFactory()), - resource_adaptation_queue_(task_queue_factory_->CreateTaskQueue( - "ResourceAdaptationQueue", - TaskQueueFactory::Priority::NORMAL)), - encoder_queue_(task_queue_factory_->CreateTaskQueue( - "EncoderQueue", - TaskQueueFactory::Priority::NORMAL)), - quality_scaler_resource_(new QualityScalerResource()) { - quality_scaler_resource_->Initialize(&encoder_queue_, - &resource_adaptation_queue_); - rtc::Event event; - encoder_queue_.PostTask([this, &event] { - quality_scaler_resource_->StartCheckForOveruse( - VideoEncoder::QpThresholds()); - event.Set(); - }); - event.Wait(kDefaultTimeout); + : quality_scaler_resource_(QualityScalerResource::Create()) { + quality_scaler_resource_->RegisterEncoderTaskQueue( + TaskQueueBase::Current()); + quality_scaler_resource_->SetResourceListener(&fake_resource_listener_); } - ~QualityScalerResourceTest() { - rtc::Event event; - encoder_queue_.PostTask([this, &event] { - quality_scaler_resource_->StopCheckForOveruse(); - event.Set(); - }); - event.Wait(kDefaultTimeout); + ~QualityScalerResourceTest() override { + quality_scaler_resource_->SetResourceListener(nullptr); } protected: - const std::unique_ptr task_queue_factory_; - rtc::TaskQueue resource_adaptation_queue_; - rtc::TaskQueue encoder_queue_; + StrictMock fake_resource_listener_; + FakeDegradationPreferenceProvider degradation_preference_provider_; rtc::scoped_refptr quality_scaler_resource_; }; TEST_F(QualityScalerResourceTest, ReportQpHigh) { - rtc::scoped_refptr callback = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - encoder_queue_.PostTask([this, callback] { - quality_scaler_resource_->OnReportQpUsageHigh(callback); - }); - callback->qp_usage_handled_event()->Wait(kDefaultTimeout); + EXPECT_CALL(fake_resource_listener_, + OnResourceUsageStateMeasured(Eq(quality_scaler_resource_), + Eq(ResourceUsageState::kOveruse))); + quality_scaler_resource_->OnReportQpUsageHigh(); } TEST_F(QualityScalerResourceTest, ReportQpLow) { - rtc::scoped_refptr callback = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - encoder_queue_.PostTask([this, callback] { - quality_scaler_resource_->OnReportQpUsageLow(callback); - }); - callback->qp_usage_handled_event()->Wait(kDefaultTimeout); -} - -TEST_F(QualityScalerResourceTest, MultipleCallbacksInFlight) { - rtc::scoped_refptr callback1 = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - rtc::scoped_refptr callback2 = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - rtc::scoped_refptr callback3 = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - encoder_queue_.PostTask([this, callback1, callback2, callback3] { - quality_scaler_resource_->OnReportQpUsageHigh(callback1); - quality_scaler_resource_->OnReportQpUsageLow(callback2); - quality_scaler_resource_->OnReportQpUsageHigh(callback3); - }); - callback1->qp_usage_handled_event()->Wait(kDefaultTimeout); - callback2->qp_usage_handled_event()->Wait(kDefaultTimeout); - callback3->qp_usage_handled_event()->Wait(kDefaultTimeout); -} - -TEST_F(QualityScalerResourceTest, AbortPendingCallbacksAndStartAgain) { - rtc::scoped_refptr callback1 = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - rtc::scoped_refptr callback2 = - new FakeQualityScalerQpUsageHandlerCallback(&encoder_queue_); - encoder_queue_.PostTask([this, callback1, callback2] { - quality_scaler_resource_->OnReportQpUsageHigh(callback1); - quality_scaler_resource_->StopCheckForOveruse(); - EXPECT_TRUE(callback1->qp_usage_handled_event()->Wait(0)); - quality_scaler_resource_->StartCheckForOveruse( - VideoEncoder::QpThresholds()); - quality_scaler_resource_->OnReportQpUsageHigh(callback2); - }); - callback1->qp_usage_handled_event()->Wait(kDefaultTimeout); - callback2->qp_usage_handled_event()->Wait(kDefaultTimeout); + EXPECT_CALL(fake_resource_listener_, + OnResourceUsageStateMeasured(Eq(quality_scaler_resource_), + Eq(ResourceUsageState::kUnderuse))); + quality_scaler_resource_->OnReportQpUsageLow(); } } // namespace webrtc diff --git a/video/adaptation/video_stream_encoder_resource.cc b/video/adaptation/video_stream_encoder_resource.cc new file mode 100644 index 0000000000..d26da708b6 --- /dev/null +++ b/video/adaptation/video_stream_encoder_resource.cc @@ -0,0 +1,62 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/adaptation/video_stream_encoder_resource.h" + +#include +#include + +namespace webrtc { + +VideoStreamEncoderResource::VideoStreamEncoderResource(std::string name) + : lock_(), + name_(std::move(name)), + encoder_queue_(nullptr), + listener_(nullptr) {} + +VideoStreamEncoderResource::~VideoStreamEncoderResource() { + RTC_DCHECK(!listener_) + << "There is a listener depending on a VideoStreamEncoderResource being " + << "destroyed."; +} + +void VideoStreamEncoderResource::RegisterEncoderTaskQueue( + TaskQueueBase* encoder_queue) { + RTC_DCHECK(!encoder_queue_); + RTC_DCHECK(encoder_queue); + encoder_queue_ = encoder_queue; +} + +void VideoStreamEncoderResource::SetResourceListener( + ResourceListener* listener) { + // If you want to change listener you need to unregister the old listener by + // setting it to null first. + MutexLock crit(&lock_); + RTC_DCHECK(!listener_ || !listener) << "A listener is already set"; + listener_ = listener; +} + +std::string VideoStreamEncoderResource::Name() const { + return name_; +} + +void VideoStreamEncoderResource::OnResourceUsageStateMeasured( + ResourceUsageState usage_state) { + MutexLock crit(&lock_); + if (listener_) { + listener_->OnResourceUsageStateMeasured(this, usage_state); + } +} + +TaskQueueBase* VideoStreamEncoderResource::encoder_queue() const { + return encoder_queue_; +} + +} // namespace webrtc diff --git a/video/adaptation/video_stream_encoder_resource.h b/video/adaptation/video_stream_encoder_resource.h new file mode 100644 index 0000000000..e10f595757 --- /dev/null +++ b/video/adaptation/video_stream_encoder_resource.h @@ -0,0 +1,55 @@ +/* + * Copyright 2020 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_ +#define VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_ + +#include +#include + +#include "absl/types/optional.h" +#include "api/adaptation/resource.h" +#include "api/sequence_checker.h" +#include "api/task_queue/task_queue_base.h" +#include "call/adaptation/adaptation_constraint.h" +#include "rtc_base/synchronization/mutex.h" + +namespace webrtc { + +class VideoStreamEncoderResource : public Resource { + public: + ~VideoStreamEncoderResource() override; + + // Registering task queues must be performed as part of initialization. + void RegisterEncoderTaskQueue(TaskQueueBase* encoder_queue); + + // Resource implementation. + std::string Name() const override; + void SetResourceListener(ResourceListener* listener) override; + + protected: + explicit VideoStreamEncoderResource(std::string name); + + void OnResourceUsageStateMeasured(ResourceUsageState usage_state); + + // The caller is responsible for ensuring the task queue is still valid. + TaskQueueBase* encoder_queue() const; + + private: + mutable Mutex lock_; + const std::string name_; + // Treated as const after initialization. + TaskQueueBase* encoder_queue_; + ResourceListener* listener_ RTC_GUARDED_BY(lock_); +}; + +} // namespace webrtc + +#endif // VIDEO_ADAPTATION_VIDEO_STREAM_ENCODER_RESOURCE_H_ diff --git a/video/adaptation/video_stream_encoder_resource_manager.cc b/video/adaptation/video_stream_encoder_resource_manager.cc index b309dd3455..2705bf9af7 100644 --- a/video/adaptation/video_stream_encoder_resource_manager.cc +++ b/video/adaptation/video_stream_encoder_resource_manager.cc @@ -10,24 +10,31 @@ #include "video/adaptation/video_stream_encoder_resource_manager.h" +#include + #include #include #include #include -#include #include #include "absl/algorithm/container.h" #include "absl/base/macros.h" +#include "api/adaptation/resource.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_base.h" #include "api/video/video_adaptation_reason.h" #include "api/video/video_source_interface.h" -#include "call/adaptation/resource.h" #include "call/adaptation/video_source_restrictions.h" +#include "rtc_base/checks.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/ref_counted_object.h" #include "rtc_base/strings/string_builder.h" #include "rtc_base/time_utils.h" +#include "rtc_base/trace_event.h" +#include "system_wrappers/include/field_trial.h" +#include "video/adaptation/quality_scaler_resource.h" namespace webrtc { @@ -36,6 +43,9 @@ const int kDefaultInputPixelsHeight = 144; namespace { +constexpr const char* kPixelLimitResourceFieldTrialName = + "WebRTC-PixelLimitResource"; + bool IsResolutionScalingEnabled(DegradationPreference degradation_preference) { return degradation_preference == DegradationPreference::MAINTAIN_FRAMERATE || degradation_preference == DegradationPreference::BALANCED; @@ -53,15 +63,29 @@ std::string ToString(VideoAdaptationReason reason) { case VideoAdaptationReason::kCpu: return "cpu"; } + RTC_CHECK_NOTREACHED(); } -VideoAdaptationReason OtherReason(VideoAdaptationReason reason) { - switch (reason) { - case VideoAdaptationReason::kQuality: - return VideoAdaptationReason::kCpu; - case VideoAdaptationReason::kCpu: - return VideoAdaptationReason::kQuality; +std::vector GetActiveLayersFlags(const VideoCodec& codec) { + std::vector flags; + if (codec.codecType == VideoCodecType::kVideoCodecVP9) { + flags.resize(codec.VP9().numberOfSpatialLayers); + for (size_t i = 0; i < flags.size(); ++i) { + flags[i] = codec.spatialLayers[i].active; + } + } else { + flags.resize(codec.numberOfSimulcastStreams); + for (size_t i = 0; i < flags.size(); ++i) { + flags[i] = codec.simulcastStream[i].active; + } } + return flags; +} + +bool EqualFlags(const std::vector& a, const std::vector& b) { + if (a.size() != b.size()) + return false; + return std::equal(a.begin(), a.end(), b.begin()); } } // namespace @@ -75,7 +99,11 @@ class VideoStreamEncoderResourceManager::InitialFrameDropper { has_seen_first_bwe_drop_(false), set_start_bitrate_(DataRate::Zero()), set_start_bitrate_time_ms_(0), - initial_framedrop_(0) { + initial_framedrop_(0), + use_bandwidth_allocation_(false), + bandwidth_allocation_(DataRate::Zero()), + last_input_width_(0), + last_input_height_(0) { RTC_DCHECK(quality_scaler_resource_); } @@ -84,12 +112,27 @@ class VideoStreamEncoderResourceManager::InitialFrameDropper { return initial_framedrop_ < kMaxInitialFramedrop; } + absl::optional single_active_stream_pixels() const { + return single_active_stream_pixels_; + } + + absl::optional UseBandwidthAllocationBps() const { + return (use_bandwidth_allocation_ && + bandwidth_allocation_ > DataRate::Zero()) + ? absl::optional(bandwidth_allocation_.bps()) + : absl::nullopt; + } + // Input signals. void SetStartBitrate(DataRate start_bitrate, int64_t now_ms) { set_start_bitrate_ = start_bitrate; set_start_bitrate_time_ms_ = now_ms; } + void SetBandwidthAllocation(DataRate bandwidth_allocation) { + bandwidth_allocation_ = bandwidth_allocation; + } + void SetTargetBitrate(DataRate target_bitrate, int64_t now_ms) { if (set_start_bitrate_ > DataRate::Zero() && !has_seen_first_bwe_drop_ && quality_scaler_resource_->is_started() && @@ -110,9 +153,48 @@ class VideoStreamEncoderResourceManager::InitialFrameDropper { } } + void OnEncoderSettingsUpdated( + const VideoCodec& codec, + const VideoAdaptationCounters& adaptation_counters) { + std::vector active_flags = GetActiveLayersFlags(codec); + // Check if the source resolution has changed for the external reasons, + // i.e. without any adaptation from WebRTC. + const bool source_resolution_changed = + (last_input_width_ != codec.width || + last_input_height_ != codec.height) && + adaptation_counters.resolution_adaptations == + last_adaptation_counters_.resolution_adaptations; + if (!EqualFlags(active_flags, last_active_flags_) || + source_resolution_changed) { + // Streams configuration has changed. + // Initial frame drop must be enabled because BWE might be way too low + // for the selected resolution. + if (quality_scaler_resource_->is_started()) { + RTC_LOG(LS_INFO) << "Resetting initial_framedrop_ due to changed " + "stream parameters"; + initial_framedrop_ = 0; + if (single_active_stream_pixels_ && + VideoStreamAdapter::GetSingleActiveLayerPixels(codec) > + *single_active_stream_pixels_) { + // Resolution increased. + use_bandwidth_allocation_ = true; + } + } + } + last_adaptation_counters_ = adaptation_counters; + last_active_flags_ = active_flags; + last_input_width_ = codec.width; + last_input_height_ = codec.height; + single_active_stream_pixels_ = + VideoStreamAdapter::GetSingleActiveLayerPixels(codec); + } + void OnFrameDroppedDueToSize() { ++initial_framedrop_; } - void OnMaybeEncodeFrame() { initial_framedrop_ = kMaxInitialFramedrop; } + void Disable() { + initial_framedrop_ = kMaxInitialFramedrop; + use_bandwidth_allocation_ = false; + } void OnQualityScalerSettingsUpdated() { if (quality_scaler_resource_->is_started()) { @@ -120,7 +202,7 @@ class VideoStreamEncoderResourceManager::InitialFrameDropper { initial_framedrop_ = 0; } else { // Quality scaling disabled so we shouldn't drop initial frames. - initial_framedrop_ = kMaxInitialFramedrop; + Disable(); } } @@ -136,202 +218,32 @@ class VideoStreamEncoderResourceManager::InitialFrameDropper { int64_t set_start_bitrate_time_ms_; // Counts how many frames we've dropped in the initial framedrop phase. int initial_framedrop_; + absl::optional single_active_stream_pixels_; + bool use_bandwidth_allocation_; + DataRate bandwidth_allocation_; + + std::vector last_active_flags_; + VideoAdaptationCounters last_adaptation_counters_; + int last_input_width_; + int last_input_height_; }; -VideoStreamEncoderResourceManager::PreventAdaptUpDueToActiveCounts:: - PreventAdaptUpDueToActiveCounts(VideoStreamEncoderResourceManager* manager) - : rtc::RefCountedObject(), - manager_(manager), - adaptation_processor_(nullptr) {} - -void VideoStreamEncoderResourceManager::PreventAdaptUpDueToActiveCounts:: - SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - adaptation_processor_ = adaptation_processor; -} - -bool VideoStreamEncoderResourceManager::PreventAdaptUpDueToActiveCounts:: - IsAdaptationUpAllowed(const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - RTC_DCHECK(adaptation_processor_); - VideoAdaptationReason reason = - manager_->GetReasonFromResource(reason_resource); - { - // This is the same as |resource_adaptation_queue_|, but need to - // RTC_DCHECK_RUN_ON() both to avoid compiler error when accessing - // |manager_->active_counts_|. - RTC_DCHECK_RUN_ON(manager_->resource_adaptation_queue_); - // We can't adapt up if we're already at the highest setting. - // Note that this only includes counts relevant to the current degradation - // preference. e.g. we previously adapted resolution, now prefer adpating - // fps, only count the fps adaptations and not the previous resolution - // adaptations. - // TODO(hbos): Why would the reason matter? If a particular resource doesn't - // want us to go up it should prevent us from doing so itself rather than to - // have this catch-all reason- and stats-based approach. - int num_downgrades = - FilterVideoAdaptationCountersByDegradationPreference( - manager_->active_counts_[reason], - adaptation_processor_->effective_degradation_preference()) - .Total(); - RTC_DCHECK_GE(num_downgrades, 0); - return num_downgrades > 0; - } -} - -VideoStreamEncoderResourceManager:: - PreventIncreaseResolutionDueToBitrateResource:: - PreventIncreaseResolutionDueToBitrateResource( - VideoStreamEncoderResourceManager* manager) - : rtc::RefCountedObject(), - manager_(manager), - encoder_settings_(absl::nullopt), - encoder_target_bitrate_bps_(absl::nullopt) {} - -void VideoStreamEncoderResourceManager:: - PreventIncreaseResolutionDueToBitrateResource::OnEncoderSettingsUpdated( - absl::optional encoder_settings) { - RTC_DCHECK_RUN_ON(encoder_queue()); - resource_adaptation_queue()->PostTask( - [this_ref = - rtc::scoped_refptr( - this), - encoder_settings] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->encoder_settings_ = std::move(encoder_settings); - }); -} - -void VideoStreamEncoderResourceManager:: - PreventIncreaseResolutionDueToBitrateResource:: - OnEncoderTargetBitrateUpdated( - absl::optional encoder_target_bitrate_bps) { - RTC_DCHECK_RUN_ON(encoder_queue()); - resource_adaptation_queue()->PostTask( - [this_ref = - rtc::scoped_refptr( - this), - encoder_target_bitrate_bps] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->encoder_target_bitrate_bps_ = encoder_target_bitrate_bps; - }); -} - -bool VideoStreamEncoderResourceManager:: - PreventIncreaseResolutionDueToBitrateResource::IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - VideoAdaptationReason reason = - manager_->GetReasonFromResource(reason_resource); - // If increasing resolution due to kQuality, make sure bitrate limits are not - // violated. - // TODO(hbos): Why are we allowing violating bitrate constraints if adapting - // due to CPU? Shouldn't this condition be checked regardless of reason? - if (reason == VideoAdaptationReason::kQuality && - DidIncreaseResolution(restrictions_before, restrictions_after)) { - uint32_t bitrate_bps = encoder_target_bitrate_bps_.value_or(0); - absl::optional bitrate_limits = - encoder_settings_.has_value() - ? encoder_settings_->encoder_info() - .GetEncoderBitrateLimitsForResolution( - // Need some sort of expected resulting pixels to be used - // instead of unrestricted. - GetHigherResolutionThan( - input_state.frame_size_pixels().value())) - : absl::nullopt; - if (bitrate_limits.has_value() && bitrate_bps != 0) { - RTC_DCHECK_GE(bitrate_limits->frame_size_pixels, - input_state.frame_size_pixels().value()); - return bitrate_bps >= - static_cast(bitrate_limits->min_start_bitrate_bps); - } - } - return true; -} - -VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource:: - PreventAdaptUpInBalancedResource(VideoStreamEncoderResourceManager* manager) - : rtc::RefCountedObject(), - manager_(manager), - adaptation_processor_(nullptr), - encoder_target_bitrate_bps_(absl::nullopt) {} - -void VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource:: - SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - adaptation_processor_ = adaptation_processor; -} - -void VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource:: - OnEncoderTargetBitrateUpdated( - absl::optional encoder_target_bitrate_bps) { - RTC_DCHECK_RUN_ON(encoder_queue()); - resource_adaptation_queue()->PostTask( - [this_ref = rtc::scoped_refptr(this), - encoder_target_bitrate_bps] { - RTC_DCHECK_RUN_ON(this_ref->resource_adaptation_queue()); - this_ref->encoder_target_bitrate_bps_ = encoder_target_bitrate_bps; - }); -} - -bool VideoStreamEncoderResourceManager::PreventAdaptUpInBalancedResource:: - IsAdaptationUpAllowed(const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const { - RTC_DCHECK_RUN_ON(resource_adaptation_queue()); - RTC_DCHECK(adaptation_processor_); - VideoAdaptationReason reason = - manager_->GetReasonFromResource(reason_resource); - // Don't adapt if BalancedDegradationSettings applies and determines this will - // exceed bitrate constraints. - // TODO(hbos): Why are we allowing violating balanced settings if adapting due - // CPU? Shouldn't this condition be checked regardless of reason? - if (reason == VideoAdaptationReason::kQuality && - adaptation_processor_->effective_degradation_preference() == - DegradationPreference::BALANCED && - !manager_->balanced_settings_.CanAdaptUp( - input_state.video_codec_type(), - input_state.frame_size_pixels().value(), - encoder_target_bitrate_bps_.value_or(0))) { - return false; - } - if (reason == VideoAdaptationReason::kQuality && - DidIncreaseResolution(restrictions_before, restrictions_after) && - !manager_->balanced_settings_.CanAdaptUpResolution( - input_state.video_codec_type(), - input_state.frame_size_pixels().value(), - encoder_target_bitrate_bps_.value_or(0))) { - return false; - } - return true; -} - VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager( VideoStreamInputStateProvider* input_state_provider, VideoStreamEncoderObserver* encoder_stats_observer, Clock* clock, bool experiment_cpu_load_estimator, - std::unique_ptr overuse_detector) - : prevent_adapt_up_due_to_active_counts_( - new PreventAdaptUpDueToActiveCounts(this)), - prevent_increase_resolution_due_to_bitrate_resource_( - new PreventIncreaseResolutionDueToBitrateResource(this)), - prevent_adapt_up_in_balanced_resource_( - new PreventAdaptUpInBalancedResource(this)), + std::unique_ptr overuse_detector, + DegradationPreferenceProvider* degradation_preference_provider) + : degradation_preference_provider_(degradation_preference_provider), + bitrate_constraint_(std::make_unique()), + balanced_constraint_(std::make_unique( + degradation_preference_provider_)), encode_usage_resource_( - new EncodeUsageResource(std::move(overuse_detector))), - quality_scaler_resource_(new QualityScalerResource()), + EncodeUsageResource::Create(std::move(overuse_detector))), + quality_scaler_resource_(QualityScalerResource::Create()), + pixel_limit_resource_(nullptr), encoder_queue_(nullptr), - resource_adaptation_queue_(nullptr), input_state_provider_(input_state_provider), adaptation_processor_(nullptr), encoder_stats_observer_(encoder_stats_observer), @@ -343,54 +255,34 @@ VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager( std::make_unique(quality_scaler_resource_)), quality_scaling_experiment_enabled_(QualityScalingExperiment::Enabled()), encoder_target_bitrate_bps_(absl::nullopt), - quality_rampup_done_(false), - quality_rampup_experiment_(QualityRampupExperiment::ParseSettings()), - encoder_settings_(absl::nullopt), - active_counts_() { - RTC_DCHECK(encoder_stats_observer_); - MapResourceToReason(prevent_adapt_up_due_to_active_counts_, - VideoAdaptationReason::kQuality); - MapResourceToReason(prevent_increase_resolution_due_to_bitrate_resource_, - VideoAdaptationReason::kQuality); - MapResourceToReason(prevent_adapt_up_in_balanced_resource_, - VideoAdaptationReason::kQuality); - MapResourceToReason(encode_usage_resource_, VideoAdaptationReason::kCpu); - MapResourceToReason(quality_scaler_resource_, - VideoAdaptationReason::kQuality); -} - -VideoStreamEncoderResourceManager::~VideoStreamEncoderResourceManager() {} + quality_rampup_experiment_( + QualityRampUpExperimentHelper::CreateIfEnabled(this, clock_)), + encoder_settings_(absl::nullopt) { + TRACE_EVENT0( + "webrtc", + "VideoStreamEncoderResourceManager::VideoStreamEncoderResourceManager"); + RTC_CHECK(degradation_preference_provider_); + RTC_CHECK(encoder_stats_observer_); +} + +VideoStreamEncoderResourceManager::~VideoStreamEncoderResourceManager() = + default; void VideoStreamEncoderResourceManager::Initialize( - rtc::TaskQueue* encoder_queue, - rtc::TaskQueue* resource_adaptation_queue) { + rtc::TaskQueue* encoder_queue) { RTC_DCHECK(!encoder_queue_); RTC_DCHECK(encoder_queue); - RTC_DCHECK(!resource_adaptation_queue_); - RTC_DCHECK(resource_adaptation_queue); encoder_queue_ = encoder_queue; - resource_adaptation_queue_ = resource_adaptation_queue; - prevent_adapt_up_due_to_active_counts_->Initialize( - encoder_queue_, resource_adaptation_queue_); - prevent_increase_resolution_due_to_bitrate_resource_->Initialize( - encoder_queue_, resource_adaptation_queue_); - prevent_adapt_up_in_balanced_resource_->Initialize( - encoder_queue_, resource_adaptation_queue_); - encode_usage_resource_->Initialize(encoder_queue_, - resource_adaptation_queue_); - quality_scaler_resource_->Initialize(encoder_queue_, - resource_adaptation_queue_); + encode_usage_resource_->RegisterEncoderTaskQueue(encoder_queue_->Get()); + quality_scaler_resource_->RegisterEncoderTaskQueue(encoder_queue_->Get()); } void VideoStreamEncoderResourceManager::SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); + ResourceAdaptationProcessorInterface* adaptation_processor, + VideoStreamAdapter* stream_adapter) { + RTC_DCHECK_RUN_ON(encoder_queue_); adaptation_processor_ = adaptation_processor; - prevent_adapt_up_due_to_active_counts_->SetAdaptationProcessor( - adaptation_processor); - prevent_adapt_up_in_balanced_resource_->SetAdaptationProcessor( - adaptation_processor); - quality_scaler_resource_->SetAdaptationProcessor(adaptation_processor); + stream_adapter_ = stream_adapter; } void VideoStreamEncoderResourceManager::SetDegradationPreferences( @@ -406,58 +298,101 @@ VideoStreamEncoderResourceManager::degradation_preference() const { return degradation_preference_; } -void VideoStreamEncoderResourceManager::StartEncodeUsageResource() { +void VideoStreamEncoderResourceManager::ConfigureEncodeUsageResource() { RTC_DCHECK_RUN_ON(encoder_queue_); - RTC_DCHECK(!encode_usage_resource_->is_started()); RTC_DCHECK(encoder_settings_.has_value()); + if (encode_usage_resource_->is_started()) { + encode_usage_resource_->StopCheckForOveruse(); + } else { + // If the resource has not yet started then it needs to be added. + AddResource(encode_usage_resource_, VideoAdaptationReason::kCpu); + } encode_usage_resource_->StartCheckForOveruse(GetCpuOveruseOptions()); } +void VideoStreamEncoderResourceManager::MaybeInitializePixelLimitResource() { + RTC_DCHECK_RUN_ON(encoder_queue_); + RTC_DCHECK(adaptation_processor_); + RTC_DCHECK(!pixel_limit_resource_); + if (!field_trial::IsEnabled(kPixelLimitResourceFieldTrialName)) { + // The field trial is not running. + return; + } + int max_pixels = 0; + std::string pixel_limit_field_trial = + field_trial::FindFullName(kPixelLimitResourceFieldTrialName); + if (sscanf(pixel_limit_field_trial.c_str(), "Enabled-%d", &max_pixels) != 1) { + RTC_LOG(LS_ERROR) << "Couldn't parse " << kPixelLimitResourceFieldTrialName + << " trial config: " << pixel_limit_field_trial; + return; + } + RTC_LOG(LS_INFO) << "Running field trial " + << kPixelLimitResourceFieldTrialName << " configured to " + << max_pixels << " max pixels"; + // Configure the specified max pixels from the field trial. The pixel limit + // resource is active for the lifetme of the stream (until + // StopManagedResources() is called). + pixel_limit_resource_ = + PixelLimitResource::Create(encoder_queue_->Get(), input_state_provider_); + pixel_limit_resource_->SetMaxPixels(max_pixels); + AddResource(pixel_limit_resource_, VideoAdaptationReason::kCpu); +} + void VideoStreamEncoderResourceManager::StopManagedResources() { RTC_DCHECK_RUN_ON(encoder_queue_); - encode_usage_resource_->StopCheckForOveruse(); - quality_scaler_resource_->StopCheckForOveruse(); + RTC_DCHECK(adaptation_processor_); + if (encode_usage_resource_->is_started()) { + encode_usage_resource_->StopCheckForOveruse(); + RemoveResource(encode_usage_resource_); + } + if (quality_scaler_resource_->is_started()) { + quality_scaler_resource_->StopCheckForOveruse(); + RemoveResource(quality_scaler_resource_); + } + if (pixel_limit_resource_) { + RemoveResource(pixel_limit_resource_); + pixel_limit_resource_ = nullptr; + } } -void VideoStreamEncoderResourceManager::MapResourceToReason( +void VideoStreamEncoderResourceManager::AddResource( rtc::scoped_refptr resource, VideoAdaptationReason reason) { - rtc::CritScope crit(&resource_lock_); + RTC_DCHECK_RUN_ON(encoder_queue_); RTC_DCHECK(resource); - RTC_DCHECK(absl::c_find_if(resources_, - [resource](const ResourceAndReason& r) { - return r.resource == resource; - }) == resources_.end()) - << "Resource " << resource->name() << " already was inserted"; - resources_.emplace_back(resource, reason); + bool inserted; + std::tie(std::ignore, inserted) = resources_.emplace(resource, reason); + RTC_DCHECK(inserted) << "Resource " << resource->Name() + << " already was inserted"; + adaptation_processor_->AddResource(resource); } -std::vector> -VideoStreamEncoderResourceManager::MappedResources() const { - rtc::CritScope crit(&resource_lock_); - std::vector> resources; - for (auto const& resource_and_reason : resources_) { - resources.push_back(resource_and_reason.resource); +void VideoStreamEncoderResourceManager::RemoveResource( + rtc::scoped_refptr resource) { + { + RTC_DCHECK_RUN_ON(encoder_queue_); + RTC_DCHECK(resource); + const auto& it = resources_.find(resource); + RTC_DCHECK(it != resources_.end()) + << "Resource \"" << resource->Name() << "\" not found."; + resources_.erase(it); } - return resources; + adaptation_processor_->RemoveResource(resource); } -rtc::scoped_refptr -VideoStreamEncoderResourceManager::quality_scaler_resource_for_testing() { - rtc::CritScope crit(&resource_lock_); - return quality_scaler_resource_; +std::vector +VideoStreamEncoderResourceManager::AdaptationConstraints() const { + RTC_DCHECK_RUN_ON(encoder_queue_); + return {bitrate_constraint_.get(), balanced_constraint_.get()}; } void VideoStreamEncoderResourceManager::SetEncoderSettings( EncoderSettings encoder_settings) { RTC_DCHECK_RUN_ON(encoder_queue_); encoder_settings_ = std::move(encoder_settings); - prevent_increase_resolution_due_to_bitrate_resource_ - ->OnEncoderSettingsUpdated(encoder_settings_); - - quality_rampup_experiment_.SetMaxBitrate( - LastInputFrameSizeOrDefault(), - encoder_settings_->video_codec().maxBitrate); + bitrate_constraint_->OnEncoderSettingsUpdated(encoder_settings_); + initial_frame_dropper_->OnEncoderSettingsUpdated( + encoder_settings_->video_codec(), current_adaptation_counters_); MaybeUpdateTargetFrameRate(); } @@ -466,9 +401,9 @@ void VideoStreamEncoderResourceManager::SetStartBitrate( RTC_DCHECK_RUN_ON(encoder_queue_); if (!start_bitrate.IsZero()) { encoder_target_bitrate_bps_ = start_bitrate.bps(); - prevent_increase_resolution_due_to_bitrate_resource_ - ->OnEncoderTargetBitrateUpdated(encoder_target_bitrate_bps_); - prevent_adapt_up_in_balanced_resource_->OnEncoderTargetBitrateUpdated( + bitrate_constraint_->OnEncoderTargetBitrateUpdated( + encoder_target_bitrate_bps_); + balanced_constraint_->OnEncoderTargetBitrateUpdated( encoder_target_bitrate_bps_); } initial_frame_dropper_->SetStartBitrate(start_bitrate, @@ -480,9 +415,9 @@ void VideoStreamEncoderResourceManager::SetTargetBitrate( RTC_DCHECK_RUN_ON(encoder_queue_); if (!target_bitrate.IsZero()) { encoder_target_bitrate_bps_ = target_bitrate.bps(); - prevent_increase_resolution_due_to_bitrate_resource_ - ->OnEncoderTargetBitrateUpdated(encoder_target_bitrate_bps_); - prevent_adapt_up_in_balanced_resource_->OnEncoderTargetBitrateUpdated( + bitrate_constraint_->OnEncoderTargetBitrateUpdated( + encoder_target_bitrate_bps_); + balanced_constraint_->OnEncoderTargetBitrateUpdated( encoder_target_bitrate_bps_); } initial_frame_dropper_->SetTargetBitrate(target_bitrate, @@ -493,25 +428,18 @@ void VideoStreamEncoderResourceManager::SetEncoderRates( const VideoEncoder::RateControlParameters& encoder_rates) { RTC_DCHECK_RUN_ON(encoder_queue_); encoder_rates_ = encoder_rates; + initial_frame_dropper_->SetBandwidthAllocation( + encoder_rates.bandwidth_allocation); } void VideoStreamEncoderResourceManager::OnFrameDroppedDueToSize() { RTC_DCHECK_RUN_ON(encoder_queue_); - // The VideoStreamEncoder makes the manager outlive the adaptation queue. This - // means that if the task gets executed, |this| has not been freed yet. - // TODO(https://crbug.com/webrtc/11565): When the manager no longer outlives - // the adaptation queue, add logic to prevent use-after-free on |this|. - resource_adaptation_queue_->PostTask([this] { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - if (!adaptation_processor_) { - // The processor nulled before this task had a chance to execute. This - // happens if the processor is destroyed. No action needed. - return; - } - adaptation_processor_->TriggerAdaptationDueToFrameDroppedDueToSize( - quality_scaler_resource_); - }); initial_frame_dropper_->OnFrameDroppedDueToSize(); + Adaptation reduce_resolution = stream_adapter_->GetAdaptDownResolution(); + if (reduce_resolution.status() == Adaptation::Status::kValid) { + stream_adapter_->ApplyAdaptation(reduce_resolution, + quality_scaler_resource_); + } } void VideoStreamEncoderResourceManager::OnEncodeStarted( @@ -533,7 +461,6 @@ void VideoStreamEncoderResourceManager::OnEncodeCompleted( encoded_image.capture_time_ms_ * rtc::kNumMicrosecsPerMillisec; encode_usage_resource_->OnEncodeCompleted( timestamp, time_sent_in_us, capture_time_us, encode_duration_us); - // Inform |quality_scaler_resource_| of the encode completed event. quality_scaler_resource_->OnEncodeCompleted(encoded_image, time_sent_in_us); } @@ -548,20 +475,46 @@ bool VideoStreamEncoderResourceManager::DropInitialFrames() const { return initial_frame_dropper_->DropInitialFrames(); } +absl::optional +VideoStreamEncoderResourceManager::SingleActiveStreamPixels() const { + RTC_DCHECK_RUN_ON(encoder_queue_); + return initial_frame_dropper_->single_active_stream_pixels(); +} + +absl::optional +VideoStreamEncoderResourceManager::UseBandwidthAllocationBps() const { + RTC_DCHECK_RUN_ON(encoder_queue_); + return initial_frame_dropper_->UseBandwidthAllocationBps(); +} + void VideoStreamEncoderResourceManager::OnMaybeEncodeFrame() { RTC_DCHECK_RUN_ON(encoder_queue_); - initial_frame_dropper_->OnMaybeEncodeFrame(); - MaybePerformQualityRampupExperiment(); + initial_frame_dropper_->Disable(); + if (quality_rampup_experiment_ && quality_scaler_resource_->is_started()) { + DataRate bandwidth = encoder_rates_.has_value() + ? encoder_rates_->bandwidth_allocation + : DataRate::Zero(); + quality_rampup_experiment_->PerformQualityRampupExperiment( + quality_scaler_resource_, bandwidth, + DataRate::BitsPerSec(encoder_target_bitrate_bps_.value_or(0)), + DataRate::KilobitsPerSec(encoder_settings_->video_codec().maxBitrate), + LastFrameSizeOrDefault()); + } } void VideoStreamEncoderResourceManager::UpdateQualityScalerSettings( absl::optional qp_thresholds) { RTC_DCHECK_RUN_ON(encoder_queue_); if (qp_thresholds.has_value()) { + if (quality_scaler_resource_->is_started()) { + quality_scaler_resource_->SetQpThresholds(qp_thresholds.value()); + } else { + quality_scaler_resource_->StartCheckForOveruse(qp_thresholds.value()); + AddResource(quality_scaler_resource_, VideoAdaptationReason::kQuality); + } + } else if (quality_scaler_resource_->is_started()) { quality_scaler_resource_->StopCheckForOveruse(); - quality_scaler_resource_->StartCheckForOveruse(qp_thresholds.value()); - } else { - quality_scaler_resource_->StopCheckForOveruse(); + RemoveResource(quality_scaler_resource_); } initial_frame_dropper_->OnQualityScalerSettingsUpdated(); } @@ -572,7 +525,9 @@ void VideoStreamEncoderResourceManager::ConfigureQualityScaler( const auto scaling_settings = encoder_info.scaling_settings; const bool quality_scaling_allowed = IsResolutionScalingEnabled(degradation_preference_) && - scaling_settings.thresholds; + (scaling_settings.thresholds.has_value() || + (encoder_settings_.has_value() && + encoder_settings_->encoder_config().is_quality_scaling_allowed)); // TODO(https://crbug.com/webrtc/11222): Should this move to // QualityScalerResource? @@ -586,9 +541,9 @@ void VideoStreamEncoderResourceManager::ConfigureQualityScaler( experimental_thresholds = QualityScalingExperiment::GetQpThresholds( GetVideoCodecTypeOrGeneric(encoder_settings_)); } - UpdateQualityScalerSettings(experimental_thresholds - ? *experimental_thresholds - : *(scaling_settings.thresholds)); + UpdateQualityScalerSettings(experimental_thresholds.has_value() + ? experimental_thresholds + : scaling_settings.thresholds); } } else { UpdateQualityScalerSettings(absl::nullopt); @@ -600,7 +555,7 @@ void VideoStreamEncoderResourceManager::ConfigureQualityScaler( absl::optional thresholds = balanced_settings_.GetQpThresholds( GetVideoCodecTypeOrGeneric(encoder_settings_), - LastInputFrameSizeOrDefault()); + LastFrameSizeOrDefault()); if (thresholds) { quality_scaler_resource_->SetQpThresholds(*thresholds); } @@ -610,14 +565,11 @@ void VideoStreamEncoderResourceManager::ConfigureQualityScaler( VideoAdaptationReason VideoStreamEncoderResourceManager::GetReasonFromResource( rtc::scoped_refptr resource) const { - rtc::CritScope crit(&resource_lock_); - const auto& registered_resource = - absl::c_find_if(resources_, [&resource](const ResourceAndReason& r) { - return r.resource == resource; - }); + RTC_DCHECK_RUN_ON(encoder_queue_); + const auto& registered_resource = resources_.find(resource); RTC_DCHECK(registered_resource != resources_.end()) - << resource->name() << " not found."; - return registered_resource->reason; + << resource->Name() << " not found."; + return registered_resource->second; } // TODO(pbos): Lower these thresholds (to closer to 100%) when we handle @@ -643,48 +595,70 @@ CpuOveruseOptions VideoStreamEncoderResourceManager::GetCpuOveruseOptions() return options; } -int VideoStreamEncoderResourceManager::LastInputFrameSizeOrDefault() const { +int VideoStreamEncoderResourceManager::LastFrameSizeOrDefault() const { RTC_DCHECK_RUN_ON(encoder_queue_); - return input_state_provider_->InputState().frame_size_pixels().value_or( - kDefaultInputPixelsWidth * kDefaultInputPixelsHeight); + return input_state_provider_->InputState() + .single_active_stream_pixels() + .value_or( + input_state_provider_->InputState().frame_size_pixels().value_or( + kDefaultInputPixelsWidth * kDefaultInputPixelsHeight)); } void VideoStreamEncoderResourceManager::OnVideoSourceRestrictionsUpdated( VideoSourceRestrictions restrictions, const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - VideoAdaptationCounters previous_adaptation_counters = - active_counts_[VideoAdaptationReason::kQuality] + - active_counts_[VideoAdaptationReason::kCpu]; - int adaptation_counters_total_abs_diff = std::abs( - adaptation_counters.Total() - previous_adaptation_counters.Total()); - if (reason) { - // A resource signal triggered this adaptation. The adaptation counters have - // to be updated every time the adaptation counter is incremented or - // decremented due to a resource. - RTC_DCHECK_EQ(adaptation_counters_total_abs_diff, 1); - VideoAdaptationReason reason_type = GetReasonFromResource(reason); - UpdateAdaptationStats(adaptation_counters, reason_type); - } else if (adaptation_counters.Total() == 0) { + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) { + RTC_DCHECK_RUN_ON(encoder_queue_); + current_adaptation_counters_ = adaptation_counters; + + // TODO(bugs.webrtc.org/11553) Remove reason parameter and add reset callback. + if (!reason && adaptation_counters.Total() == 0) { // Adaptation was manually reset - clear the per-reason counters too. - ResetActiveCounts(); encoder_stats_observer_->ClearAdaptationStats(); - } else { - // If a reason did not increase or decrease the Total() by 1 and the - // restrictions were not just reset, the adaptation counters MUST not have - // been modified and there is nothing to do stats-wise. - RTC_DCHECK_EQ(adaptation_counters_total_abs_diff, 0); } - RTC_LOG(LS_INFO) << ActiveCountsToString(); - // The VideoStreamEncoder makes the manager outlive the encoder queue. This - // means that if the task gets executed, |this| has not been freed yet. - encoder_queue_->PostTask([this, restrictions] { - RTC_DCHECK_RUN_ON(encoder_queue_); - video_source_restrictions_ = restrictions; - MaybeUpdateTargetFrameRate(); - }); + video_source_restrictions_ = FilterRestrictionsByDegradationPreference( + restrictions, degradation_preference_); + MaybeUpdateTargetFrameRate(); +} + +void VideoStreamEncoderResourceManager::OnResourceLimitationChanged( + rtc::scoped_refptr resource, + const std::map, VideoAdaptationCounters>& + resource_limitations) { + RTC_DCHECK_RUN_ON(encoder_queue_); + if (!resource) { + encoder_stats_observer_->ClearAdaptationStats(); + return; + } + + std::map limitations; + for (auto& resource_counter : resource_limitations) { + std::map::iterator it; + bool inserted; + std::tie(it, inserted) = limitations.emplace( + GetReasonFromResource(resource_counter.first), resource_counter.second); + if (!inserted && it->second.Total() < resource_counter.second.Total()) { + it->second = resource_counter.second; + } + } + + VideoAdaptationReason adaptation_reason = GetReasonFromResource(resource); + encoder_stats_observer_->OnAdaptationChanged( + adaptation_reason, limitations[VideoAdaptationReason::kCpu], + limitations[VideoAdaptationReason::kQuality]); + + if (quality_rampup_experiment_) { + bool cpu_limited = limitations.at(VideoAdaptationReason::kCpu).Total() > 0; + auto qp_resolution_adaptations = + limitations.at(VideoAdaptationReason::kQuality).resolution_adaptations; + quality_rampup_experiment_->cpu_adapted(cpu_limited); + quality_rampup_experiment_->qp_resolution_adaptations( + qp_resolution_adaptations); + } + + RTC_LOG(LS_INFO) << ActiveCountsToString(limitations); } void VideoStreamEncoderResourceManager::MaybeUpdateTargetFrameRate() { @@ -708,84 +682,6 @@ void VideoStreamEncoderResourceManager::MaybeUpdateTargetFrameRate() { encode_usage_resource_->SetTargetFrameRate(target_frame_rate); } -void VideoStreamEncoderResourceManager::OnAdaptationCountChanged( - const VideoAdaptationCounters& adaptation_count, - VideoAdaptationCounters* active_count, - VideoAdaptationCounters* other_active) { - RTC_DCHECK(active_count); - RTC_DCHECK(other_active); - const int active_total = active_count->Total(); - const int other_total = other_active->Total(); - const VideoAdaptationCounters prev_total = *active_count + *other_active; - const int delta_resolution_adaptations = - adaptation_count.resolution_adaptations - - prev_total.resolution_adaptations; - const int delta_fps_adaptations = - adaptation_count.fps_adaptations - prev_total.fps_adaptations; - - RTC_DCHECK_EQ( - std::abs(delta_resolution_adaptations) + std::abs(delta_fps_adaptations), - 1) - << "Adaptation took more than one step!"; - - if (delta_resolution_adaptations > 0) { - ++active_count->resolution_adaptations; - } else if (delta_resolution_adaptations < 0) { - if (active_count->resolution_adaptations == 0) { - RTC_DCHECK_GT(active_count->fps_adaptations, 0) << "No downgrades left"; - RTC_DCHECK_GT(other_active->resolution_adaptations, 0) - << "No resolution adaptation to borrow from"; - // Lend an fps adaptation to other and take one resolution adaptation. - --active_count->fps_adaptations; - ++other_active->fps_adaptations; - --other_active->resolution_adaptations; - } else { - --active_count->resolution_adaptations; - } - } - if (delta_fps_adaptations > 0) { - ++active_count->fps_adaptations; - } else if (delta_fps_adaptations < 0) { - if (active_count->fps_adaptations == 0) { - RTC_DCHECK_GT(active_count->resolution_adaptations, 0) - << "No downgrades left"; - RTC_DCHECK_GT(other_active->fps_adaptations, 0) - << "No fps adaptation to borrow from"; - // Lend a resolution adaptation to other and take one fps adaptation. - --active_count->resolution_adaptations; - ++other_active->resolution_adaptations; - --other_active->fps_adaptations; - } else { - --active_count->fps_adaptations; - } - } - - RTC_DCHECK(*active_count + *other_active == adaptation_count); - RTC_DCHECK_EQ(other_active->Total(), other_total); - RTC_DCHECK_EQ( - active_count->Total(), - active_total + delta_resolution_adaptations + delta_fps_adaptations); - RTC_DCHECK_GE(active_count->resolution_adaptations, 0); - RTC_DCHECK_GE(active_count->fps_adaptations, 0); - RTC_DCHECK_GE(other_active->resolution_adaptations, 0); - RTC_DCHECK_GE(other_active->fps_adaptations, 0); -} - -void VideoStreamEncoderResourceManager::UpdateAdaptationStats( - const VideoAdaptationCounters& total_counts, - VideoAdaptationReason reason) { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - // Update active counts - VideoAdaptationCounters& active_count = active_counts_[reason]; - VideoAdaptationCounters& other_active = active_counts_[OtherReason(reason)]; - - OnAdaptationCountChanged(total_counts, &active_count, &other_active); - - encoder_stats_observer_->OnAdaptationChanged( - reason, active_counts_[VideoAdaptationReason::kCpu], - active_counts_[VideoAdaptationReason::kQuality]); -} - void VideoStreamEncoderResourceManager::UpdateStatsAdaptationSettings() const { RTC_DCHECK_RUN_ON(encoder_queue_); VideoStreamEncoderObserver::AdaptationSettings cpu_settings( @@ -800,76 +696,19 @@ void VideoStreamEncoderResourceManager::UpdateStatsAdaptationSettings() const { quality_settings); } -void VideoStreamEncoderResourceManager::MaybePerformQualityRampupExperiment() { - RTC_DCHECK_RUN_ON(encoder_queue_); - if (!quality_scaler_resource_->is_started()) - return; - - if (quality_rampup_done_) - return; - - int64_t now_ms = clock_->TimeInMilliseconds(); - uint32_t bw_kbps = encoder_rates_.has_value() - ? encoder_rates_.value().bandwidth_allocation.kbps() - : 0; - - bool try_quality_rampup = false; - if (quality_rampup_experiment_.BwHigh(now_ms, bw_kbps)) { - // Verify that encoder is at max bitrate and the QP is low. - if (encoder_settings_ && - encoder_target_bitrate_bps_.value_or(0) == - encoder_settings_->video_codec().maxBitrate * 1000 && - quality_scaler_resource_->QpFastFilterLow()) { - try_quality_rampup = true; - } - } - if (try_quality_rampup) { - // The VideoStreamEncoder makes the manager outlive the adaptation queue. - // This means that if the task gets executed, |this| has not been freed yet. - // TODO(https://crbug.com/webrtc/11565): When the manager no longer outlives - // the adaptation queue, add logic to prevent use-after-free on |this|. - resource_adaptation_queue_->PostTask([this] { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - if (!adaptation_processor_) { - // The processor nulled before this task had a chance to execute. This - // happens if the processor is destroyed. No action needed. - return; - } - // TODO(https://crbug.com/webrtc/11392): See if we can rely on the total - // counts or the stats, and not the active counts. - const VideoAdaptationCounters& qp_counts = - active_counts_[VideoAdaptationReason::kQuality]; - const VideoAdaptationCounters& cpu_counts = - active_counts_[VideoAdaptationReason::kCpu]; - if (!quality_rampup_done_ && qp_counts.resolution_adaptations > 0 && - cpu_counts.Total() == 0) { - RTC_LOG(LS_INFO) << "Reset quality limitations."; - adaptation_processor_->ResetVideoSourceRestrictions(); - quality_rampup_done_ = true; - } - }); - } -} - -void VideoStreamEncoderResourceManager::ResetActiveCounts() { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - active_counts_.clear(); - active_counts_[VideoAdaptationReason::kCpu] = VideoAdaptationCounters(); - active_counts_[VideoAdaptationReason::kQuality] = VideoAdaptationCounters(); -} - -std::string VideoStreamEncoderResourceManager::ActiveCountsToString() const { - RTC_DCHECK_RUN_ON(resource_adaptation_queue_); - RTC_DCHECK_EQ(2, active_counts_.size()); +// static +std::string VideoStreamEncoderResourceManager::ActiveCountsToString( + const std::map& + active_counts) { rtc::StringBuilder ss; ss << "Downgrade counts: fps: {"; - for (auto& reason_count : active_counts_) { + for (auto& reason_count : active_counts) { ss << ToString(reason_count.first) << ":"; ss << reason_count.second.fps_adaptations; } ss << "}, resolution {"; - for (auto& reason_count : active_counts_) { + for (auto& reason_count : active_counts) { ss << ToString(reason_count.first) << ":"; ss << reason_count.second.resolution_adaptations; } @@ -877,4 +716,31 @@ std::string VideoStreamEncoderResourceManager::ActiveCountsToString() const { return ss.Release(); } + +void VideoStreamEncoderResourceManager::OnQualityRampUp() { + RTC_DCHECK_RUN_ON(encoder_queue_); + stream_adapter_->ClearRestrictions(); + quality_rampup_experiment_.reset(); +} + +bool VideoStreamEncoderResourceManager::IsSimulcast( + const VideoEncoderConfig& encoder_config) { + const std::vector& simulcast_layers = + encoder_config.simulcast_layers; + if (simulcast_layers.size() <= 1) { + return false; + } + + if (simulcast_layers[0].active) { + // We can't distinguish between simulcast and singlecast when only the + // lowest spatial layer is active. Treat this case as simulcast. + return true; + } + + int num_active_layers = + std::count_if(simulcast_layers.begin(), simulcast_layers.end(), + [](const VideoStream& layer) { return layer.active; }); + return num_active_layers > 1; +} + } // namespace webrtc diff --git a/video/adaptation/video_stream_encoder_resource_manager.h b/video/adaptation/video_stream_encoder_resource_manager.h index d028e5049a..e7174d2344 100644 --- a/video/adaptation/video_stream_encoder_resource_manager.h +++ b/video/adaptation/video_stream_encoder_resource_manager.h @@ -20,8 +20,10 @@ #include #include "absl/types/optional.h" +#include "api/adaptation/resource.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" +#include "api/task_queue/task_queue_base.h" #include "api/video/video_adaptation_counters.h" #include "api/video/video_adaptation_reason.h" #include "api/video/video_frame.h" @@ -30,19 +32,24 @@ #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/video_encoder_config.h" -#include "call/adaptation/resource.h" #include "call/adaptation/resource_adaptation_processor_interface.h" #include "call/adaptation/video_stream_adapter.h" #include "call/adaptation/video_stream_input_state_provider.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/experiments/quality_rampup_experiment.h" #include "rtc_base/experiments/quality_scaler_settings.h" +#include "rtc_base/ref_count.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" +#include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" +#include "video/adaptation/balanced_constraint.h" +#include "video/adaptation/bitrate_constraint.h" #include "video/adaptation/encode_usage_resource.h" #include "video/adaptation/overuse_frame_detector.h" +#include "video/adaptation/pixel_limit_resource.h" +#include "video/adaptation/quality_rampup_experiment_helper.h" #include "video/adaptation/quality_scaler_resource.h" +#include "video/adaptation/video_stream_encoder_resource.h" namespace webrtc { @@ -59,22 +66,25 @@ extern const int kDefaultInputPixelsHeight; // resources. // // The manager is also involved with various mitigations not part of the -// ResourceAdaptationProcessor code such as the inital frame dropping. +// ResourceAdaptationProcessor code such as the initial frame dropping. class VideoStreamEncoderResourceManager - : public ResourceAdaptationProcessorListener { + : public VideoSourceRestrictionsListener, + public ResourceLimitationsListener, + public QualityRampUpExperimentListener { public: VideoStreamEncoderResourceManager( VideoStreamInputStateProvider* input_state_provider, VideoStreamEncoderObserver* encoder_stats_observer, Clock* clock, bool experiment_cpu_load_estimator, - std::unique_ptr overuse_detector); + std::unique_ptr overuse_detector, + DegradationPreferenceProvider* degradation_preference_provider); ~VideoStreamEncoderResourceManager() override; - void Initialize(rtc::TaskQueue* encoder_queue, - rtc::TaskQueue* resource_adaptation_queue); + void Initialize(rtc::TaskQueue* encoder_queue); void SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor); + ResourceAdaptationProcessorInterface* adaptation_processor, + VideoStreamAdapter* stream_adapter); // TODO(https://crbug.com/webrtc/11563): The degradation preference is a // setting of the Processor, it does not belong to the Manager - can we get @@ -82,10 +92,12 @@ class VideoStreamEncoderResourceManager void SetDegradationPreferences(DegradationPreference degradation_preference); DegradationPreference degradation_preference() const; - // Starts the encode usage resource. The quality scaler resource is - // automatically started on being configured. - void StartEncodeUsageResource(); + void ConfigureEncodeUsageResource(); + // Initializes the pixel limit resource if the "WebRTC-PixelLimitResource" + // field trial is enabled. This can be used for testing. + void MaybeInitializePixelLimitResource(); // Stops the encode usage and quality scaler resources if not already stopped. + // If the pixel limit resource was created it is also stopped and nulled. void StopManagedResources(); // Settings that affect the VideoStreamEncoder-specific resources. @@ -109,37 +121,33 @@ class VideoStreamEncoderResourceManager void OnFrameDropped(EncodedImageCallback::DropReason reason); // Resources need to be mapped to an AdaptReason (kCpu or kQuality) in order - // to be able to update |active_counts_|, which is used... - // - Legacy getStats() purposes. - // - Preventing adapting up in some circumstances (which may be questionable). - // TODO(hbos): Can we get rid of this? - void MapResourceToReason(rtc::scoped_refptr resource, - VideoAdaptationReason reason); - std::vector> MappedResources() const; - rtc::scoped_refptr - quality_scaler_resource_for_testing(); - // If true, the VideoStreamEncoder should eexecute its logic to maybe drop - // frames baseed on size and bitrate. + // to update legacy getStats(). + void AddResource(rtc::scoped_refptr resource, + VideoAdaptationReason reason); + void RemoveResource(rtc::scoped_refptr resource); + std::vector AdaptationConstraints() const; + // If true, the VideoStreamEncoder should execute its logic to maybe drop + // frames based on size and bitrate. bool DropInitialFrames() const; + absl::optional SingleActiveStreamPixels() const; + absl::optional UseBandwidthAllocationBps() const; - // ResourceAdaptationProcessorListener implementation. - // Updates |video_source_restrictions_| and |active_counts_|. + // VideoSourceRestrictionsListener implementation. + // Updates |video_source_restrictions_|. void OnVideoSourceRestrictionsUpdated( VideoSourceRestrictions restrictions, const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) override; + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) override; + void OnResourceLimitationChanged( + rtc::scoped_refptr resource, + const std::map, VideoAdaptationCounters>& + resource_limitations) override; - // For reasons of adaptation and statistics, we not only count the total - // number of adaptations, but we also count the number of adaptations per - // reason. - // This method takes the new total number of adaptations and allocates that to - // the "active" count - number of adaptations for the current reason. - // The "other" count is the number of adaptations for the other reason. - // This must be called for each adaptation step made. - static void OnAdaptationCountChanged( - const VideoAdaptationCounters& adaptation_count, - VideoAdaptationCounters* active_count, - VideoAdaptationCounters* other_active); + // QualityRampUpExperimentListener implementation. + void OnQualityRampUp() override; + + static bool IsSimulcast(const VideoEncoderConfig& encoder_config); private: class InitialFrameDropper; @@ -148,7 +156,7 @@ class VideoStreamEncoderResourceManager rtc::scoped_refptr resource) const; CpuOveruseOptions GetCpuOveruseOptions() const; - int LastInputFrameSizeOrDefault() const; + int LastFrameSizeOrDefault() const; // Calculates an up-to-date value of the target frame rate and informs the // |encode_usage_resource_| of the new value. @@ -158,134 +166,26 @@ class VideoStreamEncoderResourceManager void UpdateQualityScalerSettings( absl::optional qp_thresholds); - void UpdateAdaptationStats(const VideoAdaptationCounters& total_counts, - VideoAdaptationReason reason); void UpdateStatsAdaptationSettings() const; - // Checks to see if we should execute the quality rampup experiment. The - // experiment resets all video restrictions at the start of the call in the - // case the bandwidth estimate is high enough. - // TODO(https://crbug.com/webrtc/11222) Move experiment details into an inner - // class. - void MaybePerformQualityRampupExperiment(); - - void ResetActiveCounts(); - std::string ActiveCountsToString() const; - - // TODO(hbos): Consider moving all of the manager's resources into separate - // files for testability. - - // Does not trigger adaptations, only prevents adapting up based on - // |active_counts_|. - class PreventAdaptUpDueToActiveCounts final - : public rtc::RefCountedObject { - public: - explicit PreventAdaptUpDueToActiveCounts( - VideoStreamEncoderResourceManager* manager); - ~PreventAdaptUpDueToActiveCounts() override = default; - - void SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor); - - // Resource overrides. - std::string name() const override { - return "PreventAdaptUpDueToActiveCounts"; - } - bool IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const override; - - private: - // The |manager_| must be alive as long as this resource is added to the - // ResourceAdaptationProcessor, i.e. when IsAdaptationUpAllowed() is called. - VideoStreamEncoderResourceManager* const manager_; - ResourceAdaptationProcessorInterface* adaptation_processor_ - RTC_GUARDED_BY(resource_adaptation_queue()); - }; - - // Does not trigger adaptations, only prevents adapting up resolution. - class PreventIncreaseResolutionDueToBitrateResource final - : public rtc::RefCountedObject { - public: - explicit PreventIncreaseResolutionDueToBitrateResource( - VideoStreamEncoderResourceManager* manager); - ~PreventIncreaseResolutionDueToBitrateResource() override = default; - - void OnEncoderSettingsUpdated( - absl::optional encoder_settings); - void OnEncoderTargetBitrateUpdated( - absl::optional encoder_target_bitrate_bps); - - // Resource overrides. - std::string name() const override { - return "PreventIncreaseResolutionDueToBitrateResource"; - } - bool IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const override; - - private: - // The |manager_| must be alive as long as this resource is added to the - // ResourceAdaptationProcessor, i.e. when IsAdaptationUpAllowed() is called. - VideoStreamEncoderResourceManager* const manager_; - absl::optional encoder_settings_ - RTC_GUARDED_BY(resource_adaptation_queue()); - absl::optional encoder_target_bitrate_bps_ - RTC_GUARDED_BY(resource_adaptation_queue()); - }; + static std::string ActiveCountsToString( + const std::map& + active_counts); - // Does not trigger adaptations, only prevents adapting up in BALANCED. - class PreventAdaptUpInBalancedResource final - : public rtc::RefCountedObject { - public: - explicit PreventAdaptUpInBalancedResource( - VideoStreamEncoderResourceManager* manager); - ~PreventAdaptUpInBalancedResource() override = default; - - void SetAdaptationProcessor( - ResourceAdaptationProcessorInterface* adaptation_processor); - void OnEncoderTargetBitrateUpdated( - absl::optional encoder_target_bitrate_bps); - - // Resource overrides. - std::string name() const override { - return "PreventAdaptUpInBalancedResource"; - } - bool IsAdaptationUpAllowed( - const VideoStreamInputState& input_state, - const VideoSourceRestrictions& restrictions_before, - const VideoSourceRestrictions& restrictions_after, - rtc::scoped_refptr reason_resource) const override; - - private: - // The |manager_| must be alive as long as this resource is added to the - // ResourceAdaptationProcessor, i.e. when IsAdaptationUpAllowed() is called. - VideoStreamEncoderResourceManager* const manager_; - ResourceAdaptationProcessorInterface* adaptation_processor_ - RTC_GUARDED_BY(resource_adaptation_queue()); - absl::optional encoder_target_bitrate_bps_ - RTC_GUARDED_BY(resource_adaptation_queue()); - }; - - const rtc::scoped_refptr - prevent_adapt_up_due_to_active_counts_; - const rtc::scoped_refptr - prevent_increase_resolution_due_to_bitrate_resource_; - const rtc::scoped_refptr - prevent_adapt_up_in_balanced_resource_; + DegradationPreferenceProvider* const degradation_preference_provider_; + std::unique_ptr bitrate_constraint_ + RTC_GUARDED_BY(encoder_queue_); + const std::unique_ptr balanced_constraint_ + RTC_GUARDED_BY(encoder_queue_); const rtc::scoped_refptr encode_usage_resource_; const rtc::scoped_refptr quality_scaler_resource_; + rtc::scoped_refptr pixel_limit_resource_; rtc::TaskQueue* encoder_queue_; - rtc::TaskQueue* resource_adaptation_queue_; VideoStreamInputStateProvider* const input_state_provider_ RTC_GUARDED_BY(encoder_queue_); - ResourceAdaptationProcessorInterface* adaptation_processor_ - RTC_GUARDED_BY(resource_adaptation_queue_); + ResourceAdaptationProcessorInterface* adaptation_processor_; + VideoStreamAdapter* stream_adapter_ RTC_GUARDED_BY(encoder_queue_); // Thread-safe. VideoStreamEncoderObserver* const encoder_stats_observer_; @@ -293,6 +193,9 @@ class VideoStreamEncoderResourceManager VideoSourceRestrictions video_source_restrictions_ RTC_GUARDED_BY(encoder_queue_); + VideoAdaptationCounters current_adaptation_counters_ + RTC_GUARDED_BY(encoder_queue_); + const BalancedDegradationSettings balanced_settings_; Clock* clock_ RTC_GUARDED_BY(encoder_queue_); const bool experiment_cpu_load_estimator_ RTC_GUARDED_BY(encoder_queue_); @@ -303,35 +206,15 @@ class VideoStreamEncoderResourceManager RTC_GUARDED_BY(encoder_queue_); absl::optional encoder_rates_ RTC_GUARDED_BY(encoder_queue_); - // Used on both the encoder queue and resource adaptation queue. - std::atomic quality_rampup_done_; - QualityRampupExperiment quality_rampup_experiment_ + std::unique_ptr quality_rampup_experiment_ RTC_GUARDED_BY(encoder_queue_); absl::optional encoder_settings_ RTC_GUARDED_BY(encoder_queue_); // Ties a resource to a reason for statistical reporting. This AdaptReason is // also used by this module to make decisions about how to adapt up/down. - struct ResourceAndReason { - ResourceAndReason(rtc::scoped_refptr resource, - VideoAdaptationReason reason) - : resource(resource), reason(reason) {} - virtual ~ResourceAndReason() = default; - - const rtc::scoped_refptr resource; - const VideoAdaptationReason reason; - }; - rtc::CriticalSection resource_lock_; - std::vector resources_ RTC_GUARDED_BY(&resource_lock_); - // One AdaptationCounter for each reason, tracking the number of times we have - // adapted for each reason. The sum of active_counts_ MUST always equal the - // total adaptation provided by the VideoSourceRestrictions. - // TODO(https://crbug.com/webrtc/11542): When we have an adaptation queue, - // guard the activec counts by it instead. The |encoder_stats_observer_| is - // thread-safe anyway, and active counts are used by - // PreventAdaptUpDueToActiveCounts to make decisions. - std::unordered_map - active_counts_ RTC_GUARDED_BY(resource_adaptation_queue_); + std::map, VideoAdaptationReason> resources_ + RTC_GUARDED_BY(encoder_queue_); }; } // namespace webrtc diff --git a/video/adaptation/video_stream_encoder_resource_manager_unittest.cc b/video/adaptation/video_stream_encoder_resource_manager_unittest.cc deleted file mode 100644 index 38ebba6334..0000000000 --- a/video/adaptation/video_stream_encoder_resource_manager_unittest.cc +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "video/adaptation/video_stream_encoder_resource_manager.h" - -#include "api/video/video_adaptation_counters.h" -#include "test/gmock.h" -#include "test/gtest.h" - -namespace webrtc { - -TEST(VideoStreamEncoderResourceManagerTest, FirstAdaptationDown_Fps) { - VideoAdaptationCounters cpu; - VideoAdaptationCounters qp; - VideoAdaptationCounters total(0, 1); - - VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp); - VideoAdaptationCounters expected_cpu(0, 1); - VideoAdaptationCounters expected_qp; - EXPECT_EQ(expected_cpu, cpu); - EXPECT_EQ(expected_qp, qp); -} - -TEST(VideoStreamEncoderResourceManagerTest, FirstAdaptationDown_Resolution) { - VideoAdaptationCounters cpu; - VideoAdaptationCounters qp; - VideoAdaptationCounters total(1, 0); - - VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp); - VideoAdaptationCounters expected_cpu(1, 0); - VideoAdaptationCounters expected_qp; - EXPECT_EQ(expected_cpu, cpu); - EXPECT_EQ(expected_qp, qp); -} - -TEST(VideoStreamEncoderResourceManagerTest, LastAdaptUp_Fps) { - VideoAdaptationCounters cpu(0, 1); - VideoAdaptationCounters qp; - VideoAdaptationCounters total; - - VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp); - VideoAdaptationCounters expected_cpu; - VideoAdaptationCounters expected_qp; - EXPECT_EQ(expected_cpu, cpu); - EXPECT_EQ(expected_qp, qp); -} - -TEST(VideoStreamEncoderResourceManagerTest, LastAdaptUp_Resolution) { - VideoAdaptationCounters cpu(1, 0); - VideoAdaptationCounters qp; - VideoAdaptationCounters total; - - VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp); - VideoAdaptationCounters expected_cpu; - VideoAdaptationCounters expected_qp; - EXPECT_EQ(expected_cpu, cpu); - EXPECT_EQ(expected_qp, qp); -} - -TEST(VideoStreamEncoderResourceManagerTest, AdaptUpWithBorrow_Resolution) { - VideoAdaptationCounters cpu(0, 1); - VideoAdaptationCounters qp(1, 0); - VideoAdaptationCounters total(0, 1); - - // CPU adaptation for resolution, but no resolution adaptation left from CPU. - // We then borrow the resolution adaptation from qp, and give qp the fps - // adaptation from CPU. - VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp); - - VideoAdaptationCounters expected_cpu(0, 0); - VideoAdaptationCounters expected_qp(0, 1); - EXPECT_EQ(expected_cpu, cpu); - EXPECT_EQ(expected_qp, qp); -} - -TEST(VideoStreamEncoderResourceManagerTest, AdaptUpWithBorrow_Fps) { - VideoAdaptationCounters cpu(1, 0); - VideoAdaptationCounters qp(0, 1); - VideoAdaptationCounters total(1, 0); - - // CPU adaptation for fps, but no fps adaptation left from CPU. We then borrow - // the fps adaptation from qp, and give qp the resolution adaptation from CPU. - VideoStreamEncoderResourceManager::OnAdaptationCountChanged(total, &cpu, &qp); - - VideoAdaptationCounters expected_cpu(0, 0); - VideoAdaptationCounters expected_qp(1, 0); - EXPECT_EQ(expected_cpu, cpu); - EXPECT_EQ(expected_qp, qp); -} - -} // namespace webrtc diff --git a/video/alignment_adjuster.cc b/video/alignment_adjuster.cc new file mode 100644 index 0000000000..6b1db9238b --- /dev/null +++ b/video/alignment_adjuster.cc @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/alignment_adjuster.h" + +#include +#include + +#include "absl/algorithm/container.h" +#include "rtc_base/logging.h" + +namespace webrtc { +namespace { +// Round each scale factor to the closest rational in form alignment/i where i +// is a multiple of |requested_alignment|. Each resolution divisible by +// |alignment| will be divisible by |requested_alignment| after the scale factor +// is applied. +double RoundToMultiple(int alignment, + int requested_alignment, + VideoEncoderConfig* config, + bool update_config) { + double diff = 0.0; + for (auto& layer : config->simulcast_layers) { + double min_dist = std::numeric_limits::max(); + double new_scale = 1.0; + for (int i = requested_alignment; i <= alignment; + i += requested_alignment) { + double dist = std::abs(layer.scale_resolution_down_by - + alignment / static_cast(i)); + if (dist <= min_dist) { + min_dist = dist; + new_scale = alignment / static_cast(i); + } + } + diff += std::abs(layer.scale_resolution_down_by - new_scale); + if (update_config) { + RTC_LOG(LS_INFO) << "scale_resolution_down_by " + << layer.scale_resolution_down_by << " -> " << new_scale; + layer.scale_resolution_down_by = new_scale; + } + } + return diff; +} +} // namespace + +// Input: encoder_info.requested_resolution_alignment (K) +// Input: encoder_info.apply_alignment_to_all_simulcast_layers (B) +// Input: vector config->simulcast_layers.scale_resolution_down_by (S[i]) +// Output: +// If B is false, returns K and does not adjust scaling factors. +// Otherwise, returns adjusted alignment (A), adjusted scaling factors (S'[i]) +// are written in |config| such that: +// +// A / S'[i] are integers divisible by K +// sum abs(S'[i] - S[i]) -> min +// A integer <= 16 +// +// Solution chooses closest S'[i] in a form A / j where j is a multiple of K. + +int AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors( + const VideoEncoder::EncoderInfo& encoder_info, + VideoEncoderConfig* config, + absl::optional max_layers) { + const int requested_alignment = encoder_info.requested_resolution_alignment; + if (!encoder_info.apply_alignment_to_all_simulcast_layers) { + return requested_alignment; + } + + if (requested_alignment < 1 || config->number_of_streams <= 1 || + config->simulcast_layers.size() <= 1) { + return requested_alignment; + } + + // Update alignment to also apply to simulcast layers. + const bool has_scale_resolution_down_by = absl::c_any_of( + config->simulcast_layers, [](const webrtc::VideoStream& layer) { + return layer.scale_resolution_down_by >= 1.0; + }); + + if (!has_scale_resolution_down_by) { + // Default resolution downscaling used (scale factors: 1, 2, 4, ...). + size_t size = config->simulcast_layers.size(); + if (max_layers && *max_layers > 0 && *max_layers < size) { + size = *max_layers; + } + return requested_alignment * (1 << (size - 1)); + } + + // Get alignment for downscaled layers. + // Adjust |scale_resolution_down_by| to a common multiple to limit the + // alignment value (to avoid largely cropped frames and possibly with an + // aspect ratio far from the original). + const int kMaxAlignment = 16; + + for (auto& layer : config->simulcast_layers) { + layer.scale_resolution_down_by = + std::max(layer.scale_resolution_down_by, 1.0); + layer.scale_resolution_down_by = + std::min(layer.scale_resolution_down_by, 10000.0); + } + + // Decide on common multiple to use. + double min_diff = std::numeric_limits::max(); + int best_alignment = 1; + for (int alignment = requested_alignment; alignment <= kMaxAlignment; + ++alignment) { + double diff = RoundToMultiple(alignment, requested_alignment, config, + /*update_config=*/false); + if (diff < min_diff) { + min_diff = diff; + best_alignment = alignment; + } + } + RoundToMultiple(best_alignment, requested_alignment, config, + /*update_config=*/true); + + return std::max(best_alignment, requested_alignment); +} +} // namespace webrtc diff --git a/video/alignment_adjuster.h b/video/alignment_adjuster.h new file mode 100644 index 0000000000..4b72623a19 --- /dev/null +++ b/video/alignment_adjuster.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VIDEO_ALIGNMENT_ADJUSTER_H_ +#define VIDEO_ALIGNMENT_ADJUSTER_H_ + +#include "api/video_codecs/video_encoder.h" +#include "api/video_codecs/video_encoder_config.h" + +namespace webrtc { + +class AlignmentAdjuster { + public: + // Returns the resolution alignment requested by the encoder (i.e + // |EncoderInfo::requested_resolution_alignment| which ensures that delivered + // frames to the encoder are divisible by this alignment). + // + // If |EncoderInfo::apply_alignment_to_all_simulcast_layers| is enabled, the + // alignment will be adjusted to ensure that each simulcast layer also is + // divisible by |requested_resolution_alignment|. The configured scale factors + // |scale_resolution_down_by| may be adjusted to a common multiple to limit + // the alignment value to avoid largely cropped frames and possibly with an + // aspect ratio far from the original. + + // Note: |max_layers| currently only taken into account when using default + // scale factors. + static int GetAlignmentAndMaybeAdjustScaleFactors( + const VideoEncoder::EncoderInfo& info, + VideoEncoderConfig* config, + absl::optional max_layers); +}; + +} // namespace webrtc + +#endif // VIDEO_ALIGNMENT_ADJUSTER_H_ diff --git a/video/alignment_adjuster_unittest.cc b/video/alignment_adjuster_unittest.cc new file mode 100644 index 0000000000..28e4bc0550 --- /dev/null +++ b/video/alignment_adjuster_unittest.cc @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2020 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "video/alignment_adjuster.h" + +#include +#include +#include + +#include "rtc_base/numerics/safe_conversions.h" +#include "test/encoder_settings.h" +#include "test/gtest.h" + +namespace webrtc { +namespace test { +namespace { +VideoEncoder::EncoderInfo GetEncoderInfo(int alignment, bool apply) { + VideoEncoder::EncoderInfo info; + info.requested_resolution_alignment = alignment; + info.apply_alignment_to_all_simulcast_layers = apply; + return info; +} +} // namespace + +class AlignmentAdjusterTest + : public ::testing::TestWithParam<::testing::tuple< + int, + std::tuple, std::vector, int>>> { + protected: + AlignmentAdjusterTest() + : kRequestedAlignment(std::get<0>(GetParam())), + kScaleFactors(std::get<0>(std::get<1>(GetParam()))), + kAdjustedScaleFactors(std::get<1>(std::get<1>(GetParam()))), + kAdjustedAlignment(std::get<2>(std::get<1>(GetParam()))) {} + + const int kRequestedAlignment; + const std::vector kScaleFactors; + const std::vector kAdjustedScaleFactors; + const int kAdjustedAlignment; +}; + +INSTANTIATE_TEST_SUITE_P( + ScaleFactorsAndAlignment, + AlignmentAdjusterTest, + ::testing::Combine( + ::testing::Values(2), // kRequestedAlignment + ::testing::Values( + std::make_tuple(std::vector{-1.0}, // kScaleFactors + std::vector{-1.0}, // kAdjustedScaleFactors + 2), // default: {1.0} // kAdjustedAlignment + std::make_tuple(std::vector{-1.0, -1.0}, + std::vector{-1.0, -1.0}, + 4), // default: {1.0, 2.0} + std::make_tuple(std::vector{-1.0, -1.0, -1.0}, + std::vector{-1.0, -1.0, -1.0}, + 8), // default: {1.0, 2.0, 4.0} + std::make_tuple(std::vector{1.0, 2.0, 4.0}, + std::vector{1.0, 2.0, 4.0}, + 8), + std::make_tuple(std::vector{9999.0, -1.0, 1.0}, + std::vector{8.0, 1.0, 1.0}, + 16), // kMaxAlignment + std::make_tuple(std::vector{3.99, 2.01, 1.0}, + std::vector{4.0, 2.0, 1.0}, + 8), + std::make_tuple(std::vector{2.9, 2.1}, + std::vector{6.0 / 2.0, 6.0 / 3.0}, + 12), + std::make_tuple(std::vector{4.9, 1.7, 1.2}, + std::vector{5.0, 5.0 / 3.0, 5.0 / 4.0}, + 10), + std::make_tuple(std::vector{1.0, 1.3}, + std::vector{4.0 / 4.0, 4.0 / 3.0}, + 8), + std::make_tuple(std::vector{1.75, 3.5}, + std::vector{7.0 / 4.0, 7.0 / 2.0}, + 7), + std::make_tuple(std::vector{1.5, 2.5}, + std::vector{1.5, 2.5}, + 15)))); + +class AlignmentAdjusterTestTwoLayers : public AlignmentAdjusterTest { + protected: + const int kMaxLayers = 2; +}; + +INSTANTIATE_TEST_SUITE_P( + ScaleFactorsAndAlignmentWithMaxLayers, + AlignmentAdjusterTestTwoLayers, + ::testing::Combine( + ::testing::Values(2), // kRequestedAlignment + ::testing::Values( + std::make_tuple(std::vector{-1.0}, // kScaleFactors + std::vector{-1.0}, // kAdjustedScaleFactors + 2), // default: {1.0} // kAdjustedAlignment + std::make_tuple(std::vector{-1.0, -1.0}, + std::vector{-1.0, -1.0}, + 4), // default: {1.0, 2.0} + std::make_tuple(std::vector{-1.0, -1.0, -1.0}, + std::vector{-1.0, -1.0, -1.0}, + 4), // default: {1.0, 2.0, 4.0} + std::make_tuple(std::vector{1.0, 2.0, 4.0}, + std::vector{1.0, 2.0, 4.0}, + 8)))); + +TEST_P(AlignmentAdjusterTest, AlignmentAppliedToAllLayers) { + const bool kApplyAlignmentToAllLayers = true; + + // Fill config with the scaling factor by which to reduce encoding size. + const int num_streams = kScaleFactors.size(); + VideoEncoderConfig config; + test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config); + for (int i = 0; i < num_streams; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i]; + } + + // Verify requested alignment from sink. + VideoEncoder::EncoderInfo info = + GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers); + int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors( + info, &config, absl::nullopt); + EXPECT_EQ(alignment, kAdjustedAlignment); + + // Verify adjusted scale factors. + for (int i = 0; i < num_streams; ++i) { + EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by, + kAdjustedScaleFactors[i]); + } +} + +TEST_P(AlignmentAdjusterTest, AlignmentNotAppliedToAllLayers) { + const bool kApplyAlignmentToAllLayers = false; + + // Fill config with the scaling factor by which to reduce encoding size. + const int num_streams = kScaleFactors.size(); + VideoEncoderConfig config; + test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config); + for (int i = 0; i < num_streams; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i]; + } + + // Verify requested alignment from sink, alignment is not adjusted. + VideoEncoder::EncoderInfo info = + GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers); + int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors( + info, &config, absl::nullopt); + EXPECT_EQ(alignment, kRequestedAlignment); + + // Verify that scale factors are not adjusted. + for (int i = 0; i < num_streams; ++i) { + EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by, + kScaleFactors[i]); + } +} + +TEST_P(AlignmentAdjusterTestTwoLayers, AlignmentAppliedToAllLayers) { + const bool kApplyAlignmentToAllLayers = true; + + // Fill config with the scaling factor by which to reduce encoding size. + const int num_streams = kScaleFactors.size(); + VideoEncoderConfig config; + test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config); + for (int i = 0; i < num_streams; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = kScaleFactors[i]; + } + + // Verify requested alignment from sink, alignment is not adjusted. + VideoEncoder::EncoderInfo info = + GetEncoderInfo(kRequestedAlignment, kApplyAlignmentToAllLayers); + int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors( + info, &config, absl::optional(kMaxLayers)); + EXPECT_EQ(alignment, kAdjustedAlignment); + + // Verify adjusted scale factors. + for (int i = 0; i < num_streams; ++i) { + EXPECT_EQ(config.simulcast_layers[i].scale_resolution_down_by, + kAdjustedScaleFactors[i]); + } +} + +} // namespace test +} // namespace webrtc diff --git a/video/buffered_frame_decryptor.cc b/video/buffered_frame_decryptor.cc index fc9dff5b02..436fff83f8 100644 --- a/video/buffered_frame_decryptor.cc +++ b/video/buffered_frame_decryptor.cc @@ -36,7 +36,7 @@ void BufferedFrameDecryptor::SetFrameDecryptor( } void BufferedFrameDecryptor::ManageEncryptedFrame( - std::unique_ptr encrypted_frame) { + std::unique_ptr encrypted_frame) { switch (DecryptFrame(encrypted_frame.get())) { case FrameDecision::kStash: if (stashed_frames_.size() >= kMaxStashedFrames) { @@ -55,7 +55,7 @@ void BufferedFrameDecryptor::ManageEncryptedFrame( } BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame( - video_coding::RtpFrameObject* frame) { + RtpFrameObject* frame) { // Optionally attempt to decrypt the raw video frame if it was provided. if (frame_decryptor_ == nullptr) { RTC_LOG(LS_INFO) << "Frame decryption required but not attached to this " @@ -73,7 +73,7 @@ BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame( frame->size()); RTC_CHECK_LE(max_plaintext_byte_size, frame->size()); // Place the decrypted frame inline into the existing frame. - rtc::ArrayView inline_decrypted_bitstream(frame->data(), + rtc::ArrayView inline_decrypted_bitstream(frame->mutable_data(), max_plaintext_byte_size); // Enable authenticating the header if the field trial isn't disabled. diff --git a/video/buffered_frame_decryptor.h b/video/buffered_frame_decryptor.h index ff04837bc0..f6dd8d8c2a 100644 --- a/video/buffered_frame_decryptor.h +++ b/video/buffered_frame_decryptor.h @@ -27,8 +27,7 @@ class OnDecryptedFrameCallback { public: virtual ~OnDecryptedFrameCallback() = default; // Called each time a decrypted frame is returned. - virtual void OnDecryptedFrame( - std::unique_ptr frame) = 0; + virtual void OnDecryptedFrame(std::unique_ptr frame) = 0; }; // This callback is called each time there is a status change in the decryption @@ -72,8 +71,7 @@ class BufferedFrameDecryptor final { // Determines whether the frame should be stashed, dropped or handed off to // the OnDecryptedFrameCallback. - void ManageEncryptedFrame( - std::unique_ptr encrypted_frame); + void ManageEncryptedFrame(std::unique_ptr encrypted_frame); private: // Represents what should be done with a given frame. @@ -82,7 +80,7 @@ class BufferedFrameDecryptor final { // Attempts to decrypt the frame, if it fails and no prior frames have been // decrypted it will return kStash. Otherwise fail to decrypts will return // kDrop. Successful decryptions will always return kDecrypted. - FrameDecision DecryptFrame(video_coding::RtpFrameObject* frame); + FrameDecision DecryptFrame(RtpFrameObject* frame); // Retries all the stashed frames this is triggered each time a kDecrypted // event occurs. void RetryStashedFrames(); @@ -96,7 +94,7 @@ class BufferedFrameDecryptor final { rtc::scoped_refptr frame_decryptor_; OnDecryptedFrameCallback* const decrypted_frame_callback_; OnDecryptionStatusChangeCallback* const decryption_status_change_callback_; - std::deque> stashed_frames_; + std::deque> stashed_frames_; }; } // namespace webrtc diff --git a/video/buffered_frame_decryptor_unittest.cc b/video/buffered_frame_decryptor_unittest.cc index bbc08b0da3..2f8a183ba1 100644 --- a/video/buffered_frame_decryptor_unittest.cc +++ b/video/buffered_frame_decryptor_unittest.cc @@ -43,8 +43,7 @@ class BufferedFrameDecryptorTest : public ::testing::Test, public OnDecryptionStatusChangeCallback { public: // Implements the OnDecryptedFrameCallbackInterface - void OnDecryptedFrame( - std::unique_ptr frame) override { + void OnDecryptedFrame(std::unique_ptr frame) override { decrypted_frame_call_count_++; } @@ -54,14 +53,13 @@ class BufferedFrameDecryptorTest : public ::testing::Test, // Returns a new fake RtpFrameObject it abstracts the difficult construction // of the RtpFrameObject to simplify testing. - std::unique_ptr CreateRtpFrameObject( - bool key_frame) { + std::unique_ptr CreateRtpFrameObject(bool key_frame) { seq_num_++; RTPVideoHeader rtp_video_header; rtp_video_header.generic.emplace(); // clang-format off - return std::make_unique( + return std::make_unique( seq_num_, seq_num_, /*markerBit=*/true, @@ -88,7 +86,7 @@ class BufferedFrameDecryptorTest : public ::testing::Test, decrypted_frame_call_count_ = 0; decryption_status_change_count_ = 0; seq_num_ = 0; - mock_frame_decryptor_ = new rtc::RefCountedObject(); + mock_frame_decryptor_ = rtc::make_ref_counted(); buffered_frame_decryptor_ = std::make_unique(this, this); buffered_frame_decryptor_->SetFrameDecryptor(mock_frame_decryptor_.get()); diff --git a/video/call_stats.cc b/video/call_stats.cc index 27e00ee7ca..d575e114d8 100644 --- a/video/call_stats.cc +++ b/video/call_stats.cc @@ -129,7 +129,7 @@ void CallStats::Process() { max_rtt_ms_ = GetMaxRttMs(reports_); avg_rtt_ms = GetNewAvgRttMs(reports_, avg_rtt_ms); { - rtc::CritScope lock(&avg_rtt_ms_lock_); + MutexLock lock(&avg_rtt_ms_lock_); avg_rtt_ms_ = avg_rtt_ms; } @@ -178,7 +178,7 @@ int64_t CallStats::LastProcessedRtt() const { // allow only reading this from the process thread (or TQ once we get there) // so that the lock isn't necessary. - rtc::CritScope cs(&avg_rtt_ms_lock_); + MutexLock lock(&avg_rtt_ms_lock_); return avg_rtt_ms_; } diff --git a/video/call_stats.h b/video/call_stats.h index 80030012b6..5dc8fa0cbb 100644 --- a/video/call_stats.h +++ b/video/call_stats.h @@ -14,12 +14,12 @@ #include #include +#include "api/sequence_checker.h" #include "modules/include/module.h" #include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -90,7 +90,7 @@ class CallStats : public Module, public RtcpRttStats { int64_t avg_rtt_ms_; // Protects |avg_rtt_ms_|. - rtc::CriticalSection avg_rtt_ms_lock_; + mutable Mutex avg_rtt_ms_lock_; // |sum_avg_rtt_ms_|, |num_avg_rtt_| and |time_of_first_rtt_ms_| are only used // on the ProcessThread when running. When the Process Thread is not running, @@ -110,8 +110,8 @@ class CallStats : public Module, public RtcpRttStats { // for the observers_ list, which makes the most common case lock free. std::list observers_; - rtc::ThreadChecker construction_thread_checker_; - rtc::ThreadChecker process_thread_checker_; + SequenceChecker construction_thread_checker_; + SequenceChecker process_thread_checker_; ProcessThread* const process_thread_; bool process_thread_running_ RTC_GUARDED_BY(construction_thread_checker_); diff --git a/video/call_stats2.cc b/video/call_stats2.cc index d190294c7f..2b7c61e0f8 100644 --- a/video/call_stats2.cc +++ b/video/call_stats2.cc @@ -12,6 +12,7 @@ #include #include +#include #include "absl/algorithm/container.h" #include "modules/utility/include/process_thread.h" @@ -75,16 +76,11 @@ CallStats::CallStats(Clock* clock, TaskQueueBase* task_queue) time_of_first_rtt_ms_(-1), task_queue_(task_queue) { RTC_DCHECK(task_queue_); - process_thread_checker_.Detach(); - repeating_task_ = - RepeatingTaskHandle::DelayedStart(task_queue_, kUpdateInterval, [this]() { - UpdateAndReport(); - return kUpdateInterval; - }); + RTC_DCHECK_RUN_ON(task_queue_); } CallStats::~CallStats() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(task_queue_); RTC_DCHECK(observers_.empty()); repeating_task_.Stop(); @@ -92,69 +88,72 @@ CallStats::~CallStats() { UpdateHistograms(); } +void CallStats::EnsureStarted() { + RTC_DCHECK_RUN_ON(task_queue_); + repeating_task_ = + RepeatingTaskHandle::DelayedStart(task_queue_, kUpdateInterval, [this]() { + UpdateAndReport(); + return kUpdateInterval; + }); +} + void CallStats::UpdateAndReport() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(task_queue_); - // |avg_rtt_ms_| is allowed to be read on the construction thread since that's - // the only thread that modifies the value. - int64_t avg_rtt_ms = avg_rtt_ms_; RemoveOldReports(clock_->CurrentTime().ms(), &reports_); max_rtt_ms_ = GetMaxRttMs(reports_); - avg_rtt_ms = GetNewAvgRttMs(reports_, avg_rtt_ms); - { - rtc::CritScope lock(&avg_rtt_ms_lock_); - avg_rtt_ms_ = avg_rtt_ms; - } + avg_rtt_ms_ = GetNewAvgRttMs(reports_, avg_rtt_ms_); // If there is a valid rtt, update all observers with the max rtt. if (max_rtt_ms_ >= 0) { - RTC_DCHECK_GE(avg_rtt_ms, 0); + RTC_DCHECK_GE(avg_rtt_ms_, 0); for (CallStatsObserver* observer : observers_) - observer->OnRttUpdate(avg_rtt_ms, max_rtt_ms_); + observer->OnRttUpdate(avg_rtt_ms_, max_rtt_ms_); // Sum for Histogram of average RTT reported over the entire call. - sum_avg_rtt_ms_ += avg_rtt_ms; + sum_avg_rtt_ms_ += avg_rtt_ms_; ++num_avg_rtt_; } } void CallStats::RegisterStatsObserver(CallStatsObserver* observer) { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(task_queue_); if (!absl::c_linear_search(observers_, observer)) observers_.push_back(observer); } void CallStats::DeregisterStatsObserver(CallStatsObserver* observer) { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(task_queue_); observers_.remove(observer); } int64_t CallStats::LastProcessedRtt() const { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(task_queue_); // No need for locking since we're on the construction thread. return avg_rtt_ms_; } -int64_t CallStats::LastProcessedRttFromProcessThread() const { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - rtc::CritScope lock(&avg_rtt_ms_lock_); - return avg_rtt_ms_; -} - void CallStats::OnRttUpdate(int64_t rtt) { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - + // This callback may for some RtpRtcp module instances (video send stream) be + // invoked from a separate task queue, in other cases, we should already be + // on the correct TQ. int64_t now_ms = clock_->TimeInMilliseconds(); - task_queue_->PostTask(ToQueuedTask(task_safety_, [this, rtt, now_ms]() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + auto update = [this, rtt, now_ms]() { + RTC_DCHECK_RUN_ON(task_queue_); reports_.push_back(RttTime(rtt, now_ms)); if (time_of_first_rtt_ms_ == -1) time_of_first_rtt_ms_ = now_ms; UpdateAndReport(); - })); + }; + + if (task_queue_->IsCurrent()) { + update(); + } else { + task_queue_->PostTask(ToQueuedTask(task_safety_, std::move(update))); + } } void CallStats::UpdateHistograms() { - RTC_DCHECK_RUN_ON(&construction_thread_checker_); + RTC_DCHECK_RUN_ON(task_queue_); if (time_of_first_rtt_ms_ == -1 || num_avg_rtt_ < 1) return; diff --git a/video/call_stats2.h b/video/call_stats2.h index 8f53358685..35a7935581 100644 --- a/video/call_stats2.h +++ b/video/call_stats2.h @@ -18,8 +18,6 @@ #include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/synchronization/sequence_checker.h" #include "rtc_base/task_queue.h" #include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/task_utils/repeating_task.h" @@ -33,9 +31,13 @@ class CallStats { // Time interval for updating the observers. static constexpr TimeDelta kUpdateInterval = TimeDelta::Millis(1000); + // Must be created and destroyed on the same task_queue. CallStats(Clock* clock, TaskQueueBase* task_queue); ~CallStats(); + // Ensure that necessary repeating tasks are started. + void EnsureStarted(); + // Expose an RtcpRttStats implementation without inheriting from RtcpRttStats. // That allows us to separate the threading model of how RtcpRttStats is // used (mostly on a process thread) and how CallStats is used (mostly on @@ -50,11 +52,6 @@ class CallStats { // Expose |LastProcessedRtt()| from RtcpRttStats to the public interface, as // it is the part of the API that is needed by direct users of CallStats. - // TODO(tommi): Threading or lifetime guarantees are not explicit in how - // CallStats is used as RtcpRttStats or how pointers are cached in a - // few different places (distributed via Call). It would be good to clarify - // from what thread/TQ calls to OnRttUpdate and LastProcessedRtt need to be - // allowed. int64_t LastProcessedRtt() const; // Exposed for tests to test histogram support. @@ -70,7 +67,6 @@ class CallStats { private: // Part of the RtcpRttStats implementation. Called by RtcpRttStatsImpl. void OnRttUpdate(int64_t rtt); - int64_t LastProcessedRttFromProcessThread() const; void UpdateAndReport(); @@ -80,64 +76,51 @@ class CallStats { class RtcpRttStatsImpl : public RtcpRttStats { public: - explicit RtcpRttStatsImpl(CallStats* owner) : owner_(owner) { - process_thread_checker_.Detach(); - } + explicit RtcpRttStatsImpl(CallStats* owner) : owner_(owner) {} ~RtcpRttStatsImpl() override = default; private: void OnRttUpdate(int64_t rtt) override { - RTC_DCHECK_RUN_ON(&process_thread_checker_); + // For video send streams (video/video_send_stream.cc), the RtpRtcp module + // is currently created on a transport worker TaskQueue and not the worker + // thread - which is what happens in other cases. We should probably fix + // that so that the call consistently comes in on the right thread. owner_->OnRttUpdate(rtt); } int64_t LastProcessedRtt() const override { - RTC_DCHECK_RUN_ON(&process_thread_checker_); - return owner_->LastProcessedRttFromProcessThread(); + // This call path shouldn't be used anymore. This impl is only for + // propagating the rtt from the RtpRtcp module, which does not call + // LastProcessedRtt(). Down the line we should consider removing + // LastProcessedRtt() and use the interface for event notifications only. + RTC_NOTREACHED() << "Legacy call path"; + return 0; } CallStats* const owner_; - SequenceChecker process_thread_checker_; } rtcp_rtt_stats_impl_{this}; Clock* const clock_; // Used to regularly call UpdateAndReport(). - RepeatingTaskHandle repeating_task_ - RTC_GUARDED_BY(construction_thread_checker_); + RepeatingTaskHandle repeating_task_ RTC_GUARDED_BY(task_queue_); // The last RTT in the statistics update (zero if there is no valid estimate). - int64_t max_rtt_ms_ RTC_GUARDED_BY(construction_thread_checker_); + int64_t max_rtt_ms_ RTC_GUARDED_BY(task_queue_); - // Accessed from two separate threads. - // |avg_rtt_ms_| may be read on the construction thread without a lock. - // |avg_rtt_ms_lock_| must be held elsewhere for reading. - // |avg_rtt_ms_lock_| must be held on the construction thread for writing. - int64_t avg_rtt_ms_; + // Last reported average RTT value. + int64_t avg_rtt_ms_ RTC_GUARDED_BY(task_queue_); - // Protects |avg_rtt_ms_|. - rtc::CriticalSection avg_rtt_ms_lock_; - - // |sum_avg_rtt_ms_|, |num_avg_rtt_| and |time_of_first_rtt_ms_| are only used - // on the ProcessThread when running. When the Process Thread is not running, - // (and only then) they can be used in UpdateHistograms(), usually called from - // the dtor. - int64_t sum_avg_rtt_ms_ RTC_GUARDED_BY(construction_thread_checker_); - int64_t num_avg_rtt_ RTC_GUARDED_BY(construction_thread_checker_); - int64_t time_of_first_rtt_ms_ RTC_GUARDED_BY(construction_thread_checker_); + int64_t sum_avg_rtt_ms_ RTC_GUARDED_BY(task_queue_); + int64_t num_avg_rtt_ RTC_GUARDED_BY(task_queue_); + int64_t time_of_first_rtt_ms_ RTC_GUARDED_BY(task_queue_); // All Rtt reports within valid time interval, oldest first. - std::list reports_ RTC_GUARDED_BY(construction_thread_checker_); + std::list reports_ RTC_GUARDED_BY(task_queue_); // Observers getting stats reports. - // When attached to ProcessThread, this is read-only. In order to allow - // modification, we detach from the process thread while the observer - // list is updated, to avoid races. This allows us to not require a lock - // for the observers_ list, which makes the most common case lock free. - std::list observers_; - - SequenceChecker construction_thread_checker_; - SequenceChecker process_thread_checker_; + std::list observers_ RTC_GUARDED_BY(task_queue_); + TaskQueueBase* const task_queue_; // Used to signal destruction to potentially pending tasks. diff --git a/video/call_stats2_unittest.cc b/video/call_stats2_unittest.cc index b3d43cb92a..33235faeaa 100644 --- a/video/call_stats2_unittest.cc +++ b/video/call_stats2_unittest.cc @@ -38,7 +38,10 @@ class MockStatsObserver : public CallStatsObserver { class CallStats2Test : public ::testing::Test { public: - CallStats2Test() { process_thread_->Start(); } + CallStats2Test() { + call_stats_.EnsureStarted(); + process_thread_->Start(); + } ~CallStats2Test() override { process_thread_->Stop(); } diff --git a/video/encoder_bitrate_adjuster.cc b/video/encoder_bitrate_adjuster.cc index 45d88875e3..6a2c99ffe3 100644 --- a/video/encoder_bitrate_adjuster.cc +++ b/video/encoder_bitrate_adjuster.cc @@ -314,15 +314,14 @@ void EncoderBitrateAdjuster::OnEncoderInfo( AdjustRateAllocation(current_rate_control_parameters_); } -void EncoderBitrateAdjuster::OnEncodedFrame(const EncodedImage& encoded_image, +void EncoderBitrateAdjuster::OnEncodedFrame(DataSize size, + int spatial_index, int temporal_index) { ++frames_since_layout_change_; // Detectors may not exist, for instance if ScreenshareLayers is used. - auto& detector = - overshoot_detectors_[encoded_image.SpatialIndex().value_or(0)] - [temporal_index]; + auto& detector = overshoot_detectors_[spatial_index][temporal_index]; if (detector) { - detector->OnEncodedFrame(encoded_image.size(), rtc::TimeMillis()); + detector->OnEncodedFrame(size.bytes(), rtc::TimeMillis()); } } diff --git a/video/encoder_bitrate_adjuster.h b/video/encoder_bitrate_adjuster.h index b142519b4e..74d0289ad0 100644 --- a/video/encoder_bitrate_adjuster.h +++ b/video/encoder_bitrate_adjuster.h @@ -47,7 +47,7 @@ class EncoderBitrateAdjuster { void OnEncoderInfo(const VideoEncoder::EncoderInfo& encoder_info); // Updates the overuse detectors according to the encoded image size. - void OnEncodedFrame(const EncodedImage& encoded_image, int temporal_index); + void OnEncodedFrame(DataSize size, int spatial_index, int temporal_index); void Reset(); diff --git a/video/encoder_bitrate_adjuster_unittest.cc b/video/encoder_bitrate_adjuster_unittest.cc index a521f1de78..c249a5cb79 100644 --- a/video/encoder_bitrate_adjuster_unittest.cc +++ b/video/encoder_bitrate_adjuster_unittest.cc @@ -100,9 +100,6 @@ class EncoderBitrateAdjusterTest : public ::testing::Test { RTC_DCHECK_EQ(media_utilization_factors.size(), network_utilization_factors.size()); - constexpr size_t kMaxFrameSize = 100000; - uint8_t buffer[kMaxFrameSize]; - const int64_t start_us = rtc::TimeMicros(); while (rtc::TimeMicros() < start_us + (duration_ms * rtc::kNumMicrosecsPerMillisec)) { @@ -163,15 +160,12 @@ class EncoderBitrateAdjusterTest : public ::testing::Test { int sequence_idx = sequence_idx_[si][ti]; sequence_idx_[si][ti] = (sequence_idx_[si][ti] + 1) % kSequenceLength; - const size_t frame_size_bytes = + const DataSize frame_size = DataSize::Bytes( (sequence_idx < kSequenceLength / 2) ? media_frame_size - network_frame_size_diff_bytes - : media_frame_size + network_frame_size_diff_bytes; + : media_frame_size + network_frame_size_diff_bytes); - EncodedImage image(buffer, 0, kMaxFrameSize); - image.set_size(frame_size_bytes); - image.SetSpatialIndex(si); - adjuster_->OnEncodedFrame(image, ti); + adjuster_->OnEncodedFrame(frame_size, si, ti); sequence_idx = ++sequence_idx % kSequenceLength; } } diff --git a/video/encoder_rtcp_feedback.cc b/video/encoder_rtcp_feedback.cc index a736d83b82..17095a0a0c 100644 --- a/video/encoder_rtcp_feedback.cc +++ b/video/encoder_rtcp_feedback.cc @@ -10,6 +10,9 @@ #include "video/encoder_rtcp_feedback.h" +#include +#include + #include "absl/types/optional.h" #include "api/video_codecs/video_encoder.h" #include "rtc_base/checks.h" @@ -21,47 +24,36 @@ namespace { constexpr int kMinKeyframeSendIntervalMs = 300; } // namespace -EncoderRtcpFeedback::EncoderRtcpFeedback(Clock* clock, - const std::vector& ssrcs, - VideoStreamEncoderInterface* encoder) +EncoderRtcpFeedback::EncoderRtcpFeedback( + Clock* clock, + const std::vector& ssrcs, + VideoStreamEncoderInterface* encoder, + std::function( + uint32_t ssrc, + const std::vector& seq_nums)> get_packet_infos) : clock_(clock), ssrcs_(ssrcs), - rtp_video_sender_(nullptr), + get_packet_infos_(std::move(get_packet_infos)), video_stream_encoder_(encoder), - time_last_intra_request_ms_(-1), - min_keyframe_send_interval_ms_( - KeyframeIntervalSettings::ParseFromFieldTrials() - .MinKeyframeSendIntervalMs() - .value_or(kMinKeyframeSendIntervalMs)) { + time_last_packet_delivery_queue_(Timestamp::Millis(0)), + min_keyframe_send_interval_( + TimeDelta::Millis(KeyframeIntervalSettings::ParseFromFieldTrials() + .MinKeyframeSendIntervalMs() + .value_or(kMinKeyframeSendIntervalMs))) { RTC_DCHECK(!ssrcs.empty()); + packet_delivery_queue_.Detach(); } -void EncoderRtcpFeedback::SetRtpVideoSender( - const RtpVideoSenderInterface* rtp_video_sender) { - RTC_DCHECK(rtp_video_sender); - RTC_DCHECK(!rtp_video_sender_); - rtp_video_sender_ = rtp_video_sender; -} +// Called via Call::DeliverRtcp. +void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) { + RTC_DCHECK_RUN_ON(&packet_delivery_queue_); + RTC_DCHECK(std::find(ssrcs_.begin(), ssrcs_.end(), ssrc) != ssrcs_.end()); -bool EncoderRtcpFeedback::HasSsrc(uint32_t ssrc) { - for (uint32_t registered_ssrc : ssrcs_) { - if (registered_ssrc == ssrc) { - return true; - } - } - return false; -} + const Timestamp now = clock_->CurrentTime(); + if (time_last_packet_delivery_queue_ + min_keyframe_send_interval_ > now) + return; -void EncoderRtcpFeedback::OnReceivedIntraFrameRequest(uint32_t ssrc) { - RTC_DCHECK(HasSsrc(ssrc)); - { - int64_t now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope lock(&crit_); - if (time_last_intra_request_ms_ + min_keyframe_send_interval_ms_ > now_ms) { - return; - } - time_last_intra_request_ms_ = now_ms; - } + time_last_packet_delivery_queue_ = now; // Always produce key frame for all streams. video_stream_encoder_->SendKeyFrame(); @@ -72,12 +64,12 @@ void EncoderRtcpFeedback::OnReceivedLossNotification( uint16_t seq_num_of_last_decodable, uint16_t seq_num_of_last_received, bool decodability_flag) { - RTC_DCHECK(rtp_video_sender_) << "Object initialization incomplete."; + RTC_DCHECK(get_packet_infos_) << "Object initialization incomplete."; const std::vector seq_nums = {seq_num_of_last_decodable, seq_num_of_last_received}; const std::vector infos = - rtp_video_sender_->GetSentRtpPacketInfos(ssrc, seq_nums); + get_packet_infos_(ssrc, seq_nums); if (infos.empty()) { return; } diff --git a/video/encoder_rtcp_feedback.h b/video/encoder_rtcp_feedback.h index b5dd0288f3..2aadcc34e7 100644 --- a/video/encoder_rtcp_feedback.h +++ b/video/encoder_rtcp_feedback.h @@ -10,12 +10,16 @@ #ifndef VIDEO_ENCODER_RTCP_FEEDBACK_H_ #define VIDEO_ENCODER_RTCP_FEEDBACK_H_ +#include #include +#include "api/sequence_checker.h" +#include "api/units/time_delta.h" +#include "api/units/timestamp.h" #include "api/video/video_stream_encoder_interface.h" #include "call/rtp_video_sender_interface.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/system/no_unique_address.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -27,13 +31,15 @@ class VideoStreamEncoderInterface; class EncoderRtcpFeedback : public RtcpIntraFrameObserver, public RtcpLossNotificationObserver { public: - EncoderRtcpFeedback(Clock* clock, - const std::vector& ssrcs, - VideoStreamEncoderInterface* encoder); + EncoderRtcpFeedback( + Clock* clock, + const std::vector& ssrcs, + VideoStreamEncoderInterface* encoder, + std::function( + uint32_t ssrc, + const std::vector& seq_nums)> get_packet_infos); ~EncoderRtcpFeedback() override = default; - void SetRtpVideoSender(const RtpVideoSenderInterface* rtp_video_sender); - void OnReceivedIntraFrameRequest(uint32_t ssrc) override; // Implements RtcpLossNotificationObserver. @@ -43,17 +49,19 @@ class EncoderRtcpFeedback : public RtcpIntraFrameObserver, bool decodability_flag) override; private: - bool HasSsrc(uint32_t ssrc); - Clock* const clock_; const std::vector ssrcs_; - const RtpVideoSenderInterface* rtp_video_sender_; + const std::function( + uint32_t ssrc, + const std::vector& seq_nums)> + get_packet_infos_; VideoStreamEncoderInterface* const video_stream_encoder_; - rtc::CriticalSection crit_; - int64_t time_last_intra_request_ms_ RTC_GUARDED_BY(crit_); + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_delivery_queue_; + Timestamp time_last_packet_delivery_queue_ + RTC_GUARDED_BY(packet_delivery_queue_); - const int min_keyframe_send_interval_ms_; + const TimeDelta min_keyframe_send_interval_; }; } // namespace webrtc diff --git a/video/encoder_rtcp_feedback_unittest.cc b/video/encoder_rtcp_feedback_unittest.cc index 81ac22b6c6..4cbb747e51 100644 --- a/video/encoder_rtcp_feedback_unittest.cc +++ b/video/encoder_rtcp_feedback_unittest.cc @@ -26,7 +26,8 @@ class VieKeyRequestTest : public ::testing::Test { encoder_rtcp_feedback_( &simulated_clock_, std::vector(1, VieKeyRequestTest::kSsrc), - &encoder_) {} + &encoder_, + nullptr) {} protected: const uint32_t kSsrc = 1234; diff --git a/video/end_to_end_tests/bandwidth_tests.cc b/video/end_to_end_tests/bandwidth_tests.cc index 6e8e11d76f..721738393b 100644 --- a/video/end_to_end_tests/bandwidth_tests.cc +++ b/video/end_to_end_tests/bandwidth_tests.cc @@ -16,8 +16,9 @@ #include "api/video/video_bitrate_allocation.h" #include "call/fake_network_pipe.h" #include "call/simulated_network.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "rtc_base/rate_limiter.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_utils/to_queued_task.h" #include "system_wrappers/include/sleep.h" @@ -238,13 +239,13 @@ TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) { encoder_config->max_bitrate_bps = 2000000; ASSERT_EQ(1u, receive_configs->size()); - RtpRtcp::Configuration config; + RtpRtcpInterface::Configuration config; config.receiver_only = true; config.clock = clock_; config.outgoing_transport = receive_transport_; config.retransmission_rate_limiter = &retransmission_rate_limiter_; config.local_media_ssrc = (*receive_configs)[0].rtp.local_ssrc; - rtp_rtcp_ = RtpRtcp::Create(config); + rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(config); rtp_rtcp_->SetRemoteSSRC((*receive_configs)[0].rtp.remote_ssrc); rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize); } @@ -303,7 +304,7 @@ TEST_F(BandwidthEndToEndTest, RembWithSendSideBwe) { Clock* const clock_; uint32_t sender_ssrc_; int remb_bitrate_bps_; - std::unique_ptr rtp_rtcp_; + std::unique_ptr rtp_rtcp_; test::PacketTransport* receive_transport_; TestState state_; RateLimiter retransmission_rate_limiter_; @@ -318,7 +319,6 @@ TEST_F(BandwidthEndToEndTest, ReportsSetEncoderRates) { // test, due to the packetization overhead and encoder pushback. webrtc::test::ScopedFieldTrials field_trials( std::string(field_trial::GetFieldTrialString()) + - "WebRTC-SubtractPacketizationOverhead/Disabled/" "WebRTC-VideoRateControl/bitrate_adjuster:false/"); class EncoderRateStatsTest : public test::EndToEndTest, public test::FakeEncoder { @@ -353,7 +353,7 @@ TEST_F(BandwidthEndToEndTest, ReportsSetEncoderRates) { // Make sure not to trigger on any default zero bitrates. if (parameters.bitrate.get_sum_bps() == 0) return; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); bitrate_kbps_ = parameters.bitrate.get_sum_kbps(); observation_complete_.Set(); } @@ -375,7 +375,7 @@ TEST_F(BandwidthEndToEndTest, ReportsSetEncoderRates) { for (int i = 0; i < kDefaultTimeoutMs; ++i) { VideoSendStream::Stats stats = send_stream_->GetStats(); { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if ((stats.target_media_bitrate_bps + 500) / 1000 == static_cast(bitrate_kbps_)) { return; @@ -399,11 +399,11 @@ TEST_F(BandwidthEndToEndTest, ReportsSetEncoderRates) { private: TaskQueueBase* const task_queue_; - rtc::CriticalSection crit_; + Mutex mutex_; VideoSendStream* send_stream_; test::VideoEncoderProxyFactory encoder_factory_; std::unique_ptr bitrate_allocator_factory_; - uint32_t bitrate_kbps_ RTC_GUARDED_BY(crit_); + uint32_t bitrate_kbps_ RTC_GUARDED_BY(mutex_); } test(task_queue()); RunBaseTest(&test); diff --git a/video/end_to_end_tests/codec_tests.cc b/video/end_to_end_tests/codec_tests.cc index b73b289ec8..e4eabcf73d 100644 --- a/video/end_to_end_tests/codec_tests.cc +++ b/video/end_to_end_tests/codec_tests.cc @@ -34,18 +34,14 @@ enum : int { // The first valid value is 1. }; } // namespace -class CodecEndToEndTest : public test::CallTest, - public ::testing::WithParamInterface { +class CodecEndToEndTest : public test::CallTest { public: - CodecEndToEndTest() : field_trial_(GetParam()) { + CodecEndToEndTest() { RegisterRtpExtension( RtpExtension(RtpExtension::kColorSpaceUri, kColorSpaceExtensionId)); RegisterRtpExtension(RtpExtension(RtpExtension::kVideoRotationUri, kVideoRotationExtensionId)); } - - private: - test::ScopedFieldTrials field_trial_; }; class CodecObserver : public test::EndToEndTest, @@ -88,7 +84,7 @@ class CodecObserver : public test::EndToEndTest, send_config->rtp.payload_type; (*receive_configs)[0].decoders[0].video_format = SdpVideoFormat(send_config->rtp.payload_name); - (*receive_configs)[0].decoders[0].decoder_factory = decoder_factory_; + (*receive_configs)[0].decoder_factory = decoder_factory_; } void OnFrame(const VideoFrame& video_frame) override { @@ -121,13 +117,7 @@ class CodecObserver : public test::EndToEndTest, int frame_counter_; }; -INSTANTIATE_TEST_SUITE_P( - GenericDescriptor, - CodecEndToEndTest, - ::testing::Values("WebRTC-GenericDescriptor/Disabled/", - "WebRTC-GenericDescriptor/Enabled/")); - -TEST_P(CodecEndToEndTest, SendsAndReceivesVP8) { +TEST_F(CodecEndToEndTest, SendsAndReceivesVP8) { test::FunctionVideoEncoderFactory encoder_factory( []() { return VP8Encoder::Create(); }); test::FunctionVideoDecoderFactory decoder_factory( @@ -137,7 +127,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP8) { RunBaseTest(&test); } -TEST_P(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) { +TEST_F(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) { test::FunctionVideoEncoderFactory encoder_factory( []() { return VP8Encoder::Create(); }); test::FunctionVideoDecoderFactory decoder_factory( @@ -148,7 +138,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP8Rotation90) { } #if defined(RTC_ENABLE_VP9) -TEST_P(CodecEndToEndTest, SendsAndReceivesVP9) { +TEST_F(CodecEndToEndTest, SendsAndReceivesVP9) { test::FunctionVideoEncoderFactory encoder_factory( []() { return VP9Encoder::Create(); }); test::FunctionVideoDecoderFactory decoder_factory( @@ -158,7 +148,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP9) { RunBaseTest(&test); } -TEST_P(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) { +TEST_F(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) { test::FunctionVideoEncoderFactory encoder_factory( []() { return VP9Encoder::Create(); }); test::FunctionVideoDecoderFactory decoder_factory( @@ -168,7 +158,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP9VideoRotation90) { RunBaseTest(&test); } -TEST_P(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) { +TEST_F(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) { test::FunctionVideoEncoderFactory encoder_factory( []() { return VP9Encoder::Create(); }); test::FunctionVideoDecoderFactory decoder_factory( @@ -179,7 +169,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpace) { RunBaseTest(&test); } -TEST_P(CodecEndToEndTest, +TEST_F(CodecEndToEndTest, SendsAndReceivesVP9ExplicitColorSpaceWithHdrMetadata) { test::FunctionVideoEncoderFactory encoder_factory( []() { return VP9Encoder::Create(); }); @@ -192,7 +182,7 @@ TEST_P(CodecEndToEndTest, } // Mutiplex tests are using VP9 as the underlying implementation. -TEST_P(CodecEndToEndTest, SendsAndReceivesMultiplex) { +TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplex) { InternalEncoderFactory internal_encoder_factory; InternalDecoderFactory internal_decoder_factory; test::FunctionVideoEncoderFactory encoder_factory( @@ -211,7 +201,7 @@ TEST_P(CodecEndToEndTest, SendsAndReceivesMultiplex) { RunBaseTest(&test); } -TEST_P(CodecEndToEndTest, SendsAndReceivesMultiplexVideoRotation90) { +TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplexVideoRotation90) { InternalEncoderFactory internal_encoder_factory; InternalDecoderFactory internal_decoder_factory; test::FunctionVideoEncoderFactory encoder_factory( diff --git a/video/end_to_end_tests/config_tests.cc b/video/end_to_end_tests/config_tests.cc index bf63e2a51f..1bd897cb34 100644 --- a/video/end_to_end_tests/config_tests.cc +++ b/video/end_to_end_tests/config_tests.cc @@ -104,7 +104,7 @@ TEST_F(ConfigEndToEndTest, VerifyDefaultFlexfecReceiveConfigParameters) { FlexfecReceiveStream::Config default_receive_config(&rtcp_send_transport); EXPECT_EQ(-1, default_receive_config.payload_type) << "Enabling FlexFEC requires rtpmap: flexfec negotiation."; - EXPECT_EQ(0U, default_receive_config.remote_ssrc) + EXPECT_EQ(0U, default_receive_config.rtp.remote_ssrc) << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation."; EXPECT_TRUE(default_receive_config.protected_media_ssrcs.empty()) << "Enabling FlexFEC requires ssrc-group: FEC-FR negotiation."; diff --git a/video/end_to_end_tests/extended_reports_tests.cc b/video/end_to_end_tests/extended_reports_tests.cc index f4938c943b..959042dac6 100644 --- a/video/end_to_end_tests/extended_reports_tests.cc +++ b/video/end_to_end_tests/extended_reports_tests.cc @@ -31,8 +31,8 @@ #include "call/video_send_stream.h" #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h" #include "modules/rtp_rtcp/source/rtcp_packet/target_bitrate.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" #include "test/call_test.h" @@ -83,7 +83,7 @@ class RtcpXrObserver : public test::EndToEndTest { private: // Receive stream should send RR packets (and RRTR packets if enabled). Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); test::RtcpPacketParser parser; EXPECT_TRUE(parser.Parse(packet, length)); @@ -100,7 +100,7 @@ class RtcpXrObserver : public test::EndToEndTest { } // Send stream should send SR packets (and DLRR packets if enabled). Action OnSendRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); test::RtcpPacketParser parser; EXPECT_TRUE(parser.Parse(packet, length)); @@ -198,16 +198,16 @@ class RtcpXrObserver : public test::EndToEndTest { static const int kNumRtcpReportPacketsToObserve = 5; - rtc::CriticalSection crit_; + Mutex mutex_; const bool enable_rrtr_; const bool expect_target_bitrate_; const bool enable_zero_target_bitrate_; const VideoEncoderConfig::ContentType content_type_; int sent_rtcp_sr_; - int sent_rtcp_rr_ RTC_GUARDED_BY(&crit_); - int sent_rtcp_rrtr_ RTC_GUARDED_BY(&crit_); - bool sent_rtcp_target_bitrate_ RTC_GUARDED_BY(&crit_); - bool sent_zero_rtcp_target_bitrate_ RTC_GUARDED_BY(&crit_); + int sent_rtcp_rr_ RTC_GUARDED_BY(&mutex_); + int sent_rtcp_rrtr_ RTC_GUARDED_BY(&mutex_); + bool sent_rtcp_target_bitrate_ RTC_GUARDED_BY(&mutex_); + bool sent_zero_rtcp_target_bitrate_ RTC_GUARDED_BY(&mutex_); int sent_rtcp_dlrr_; BuiltInNetworkBehaviorConfig forward_transport_config_; SimulatedNetwork* send_simulated_network_; @@ -246,7 +246,7 @@ TEST_F(ExtendedReportsEndToEndTest, } TEST_F(ExtendedReportsEndToEndTest, - TestExtendedReportsWithoutRrtrWithTargetBitrateFromFieldTrial) { + TestExtendedReportsWithoutRrtrWithTargetBitrateExplicitlySet) { test::ScopedFieldTrials field_trials("WebRTC-Target-Bitrate-Rtcp/Enabled/"); RtcpXrObserver test(/*enable_rrtr=*/false, /*expect_target_bitrate=*/true, /*enable_zero_target_bitrate=*/false, diff --git a/video/end_to_end_tests/fec_tests.cc b/video/end_to_end_tests/fec_tests.cc index c55c4dbee1..77ad9eb666 100644 --- a/video/end_to_end_tests/fec_tests.cc +++ b/video/end_to_end_tests/fec_tests.cc @@ -20,6 +20,7 @@ #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" +#include "rtc_base/synchronization/mutex.h" #include "test/call_test.h" #include "test/field_trial.h" #include "test/gmock.h" @@ -59,7 +60,7 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -98,7 +99,7 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) { } void OnFrame(const VideoFrame& video_frame) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Rendering frame with timestamp of packet that was dropped -> FEC // protection worked. auto it = dropped_timestamps_.find(video_frame.timestamp()); @@ -119,7 +120,7 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) { encoder_config->codec_type = kVideoCodecVP8; VideoReceiveStream::Decoder decoder = test::CreateMatchingDecoder(*send_config); - decoder.decoder_factory = &decoder_factory_; + (*receive_configs)[0].decoder_factory = &decoder_factory_; (*receive_configs)[0].decoders.clear(); (*receive_configs)[0].decoders.push_back(decoder); @@ -137,15 +138,15 @@ TEST_F(FecEndToEndTest, ReceivesUlpfec) { << "Timed out waiting for dropped frames to be rendered."; } - rtc::CriticalSection crit_; + Mutex mutex_; std::unique_ptr encoder_; test::FunctionVideoEncoderFactory encoder_factory_; InternalDecoderFactory decoder_factory_; - std::set dropped_sequence_numbers_ RTC_GUARDED_BY(crit_); + std::set dropped_sequence_numbers_ RTC_GUARDED_BY(mutex_); // Several packets can have the same timestamp. - std::multiset dropped_timestamps_ RTC_GUARDED_BY(crit_); + std::multiset dropped_timestamps_ RTC_GUARDED_BY(mutex_); Random random_; - int num_packets_sent_ RTC_GUARDED_BY(crit_); + int num_packets_sent_ RTC_GUARDED_BY(mutex_); } test; RunBaseTest(&test); @@ -169,7 +170,7 @@ class FlexfecRenderObserver : public test::EndToEndTest, private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -247,7 +248,7 @@ class FlexfecRenderObserver : public test::EndToEndTest, EXPECT_EQ(1U, report_blocks.size()); EXPECT_EQ(test::CallTest::kFlexfecSendSsrc, report_blocks[0].source_ssrc()); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); received_flexfec_rtcp_ = true; } } @@ -273,7 +274,7 @@ class FlexfecRenderObserver : public test::EndToEndTest, void OnFrame(const VideoFrame& video_frame) override { EXPECT_EQ(kVideoRotation_90, video_frame.rotation()); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Rendering frame with timestamp of packet that was dropped -> FEC // protection worked. auto it = dropped_timestamps_.find(video_frame.timestamp()); @@ -313,7 +314,7 @@ class FlexfecRenderObserver : public test::EndToEndTest, void ModifyFlexfecConfigs( std::vector* receive_configs) override { - (*receive_configs)[0].local_ssrc = kFlexfecLocalSsrc; + (*receive_configs)[0].rtp.local_ssrc = kFlexfecLocalSsrc; } void PerformTest() override { @@ -321,13 +322,13 @@ class FlexfecRenderObserver : public test::EndToEndTest, << "Timed out waiting for dropped frames to be rendered."; } - rtc::CriticalSection crit_; - std::set dropped_sequence_numbers_ RTC_GUARDED_BY(crit_); + Mutex mutex_; + std::set dropped_sequence_numbers_ RTC_GUARDED_BY(mutex_); // Several packets can have the same timestamp. - std::multiset dropped_timestamps_ RTC_GUARDED_BY(crit_); + std::multiset dropped_timestamps_ RTC_GUARDED_BY(mutex_); const bool enable_nack_; const bool expect_flexfec_rtcp_; - bool received_flexfec_rtcp_ RTC_GUARDED_BY(crit_); + bool received_flexfec_rtcp_ RTC_GUARDED_BY(mutex_); Random random_; int num_packets_sent_; }; @@ -360,7 +361,7 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock_(&crit_); + MutexLock lock_(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -424,7 +425,7 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) { } Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock_(&crit_); + MutexLock lock_(&mutex_); if (state_ == kVerifyUlpfecPacketNotInNackList) { test::RtcpPacketParser rtcp_parser; rtcp_parser.Parse(packet, length); @@ -486,7 +487,7 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) { send_config->rtp.payload_type; (*receive_configs)[0].decoders[0].video_format = SdpVideoFormat(send_config->rtp.payload_name); - (*receive_configs)[0].decoders[0].decoder_factory = &decoder_factory_; + (*receive_configs)[0].decoder_factory = &decoder_factory_; } void PerformTest() override { @@ -503,8 +504,8 @@ TEST_F(FecEndToEndTest, ReceivedUlpfecPacketsNotNacked) { kVerifyUlpfecPacketNotInNackList, } state_; - rtc::CriticalSection crit_; - uint16_t ulpfec_sequence_number_ RTC_GUARDED_BY(&crit_); + Mutex mutex_; + uint16_t ulpfec_sequence_number_ RTC_GUARDED_BY(&mutex_); bool has_last_sequence_number_; uint16_t last_sequence_number_; test::FunctionVideoEncoderFactory encoder_factory_; diff --git a/video/end_to_end_tests/frame_encryption_tests.cc b/video/end_to_end_tests/frame_encryption_tests.cc index 44ac3ecfdf..df54337be5 100644 --- a/video/end_to_end_tests/frame_encryption_tests.cc +++ b/video/end_to_end_tests/frame_encryption_tests.cc @@ -46,8 +46,8 @@ class DecryptedFrameObserver : public test::EndToEndTest, encoder_config->codec_type = kVideoCodecVP8; VideoReceiveStream::Decoder decoder = test::CreateMatchingDecoder(*send_config); - decoder.decoder_factory = &decoder_factory_; for (auto& recv_config : *receive_configs) { + recv_config.decoder_factory = &decoder_factory_; recv_config.decoders.clear(); recv_config.decoders.push_back(decoder); recv_config.renderer = this; diff --git a/video/end_to_end_tests/histogram_tests.cc b/video/end_to_end_tests/histogram_tests.cc index dd6de2543d..fa71c15e98 100644 --- a/video/end_to_end_tests/histogram_tests.cc +++ b/video/end_to_end_tests/histogram_tests.cc @@ -11,6 +11,7 @@ #include "absl/types/optional.h" #include "api/test/video/function_video_encoder_factory.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/metrics.h" #include "test/call_test.h" #include "test/gtest.h" @@ -59,7 +60,7 @@ void HistogramTest::VerifyHistogramStats(bool use_rtx, if (video_frame.ntp_time_ms() > 0 && Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >= video_frame.ntp_time_ms()) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++num_frames_received_; } } @@ -82,7 +83,7 @@ void HistogramTest::VerifyHistogramStats(bool use_rtx, bool MinNumberOfFramesReceived() const { const int kMinRequiredHistogramSamples = 200; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return num_frames_received_ > kMinRequiredHistogramSamples; } @@ -131,13 +132,13 @@ void HistogramTest::VerifyHistogramStats(bool use_rtx, EXPECT_TRUE(Wait()) << "Timed out waiting for min frames to be received."; } - rtc::CriticalSection crit_; + mutable Mutex mutex_; const bool use_rtx_; const bool use_fec_; const bool screenshare_; test::FunctionVideoEncoderFactory encoder_factory_; absl::optional start_runtime_ms_; - int num_frames_received_ RTC_GUARDED_BY(&crit_); + int num_frames_received_ RTC_GUARDED_BY(&mutex_); } test(use_rtx, use_fec, screenshare); metrics::Reset(); diff --git a/video/end_to_end_tests/multi_codec_receive_tests.cc b/video/end_to_end_tests/multi_codec_receive_tests.cc index 354ee44072..e529ae4201 100644 --- a/video/end_to_end_tests/multi_codec_receive_tests.cc +++ b/video/end_to_end_tests/multi_codec_receive_tests.cc @@ -19,6 +19,7 @@ #include "modules/video_coding/codecs/h264/include/h264.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "test/call_test.h" #include "test/gmock.h" @@ -65,7 +66,7 @@ class FrameObserver : public test::RtpRtcpObserver, FrameObserver() : test::RtpRtcpObserver(test::CallTest::kDefaultTimeoutMs) {} void Reset(uint8_t expected_payload_type) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); num_sent_frames_ = 0; num_rendered_frames_ = 0; expected_payload_type_ = expected_payload_type; @@ -74,7 +75,7 @@ class FrameObserver : public test::RtpRtcpObserver, private: // Sends kFramesToObserve. Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -103,7 +104,7 @@ class FrameObserver : public test::RtpRtcpObserver, // Verifies that all sent frames are decoded and rendered. void OnFrame(const VideoFrame& rendered_frame) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); EXPECT_THAT(sent_timestamps_, Contains(rendered_frame.timestamp())); // Remove old timestamps too, only the newest decoded frame is rendered. @@ -116,12 +117,12 @@ class FrameObserver : public test::RtpRtcpObserver, } } - rtc::CriticalSection crit_; + Mutex mutex_; absl::optional last_timestamp_; // Only accessed from pacer thread. - absl::optional expected_payload_type_ RTC_GUARDED_BY(crit_); - int num_sent_frames_ RTC_GUARDED_BY(crit_) = 0; - int num_rendered_frames_ RTC_GUARDED_BY(crit_) = 0; - std::vector sent_timestamps_ RTC_GUARDED_BY(crit_); + absl::optional expected_payload_type_ RTC_GUARDED_BY(mutex_); + int num_sent_frames_ RTC_GUARDED_BY(mutex_) = 0; + int num_rendered_frames_ RTC_GUARDED_BY(mutex_) = 0; + std::vector sent_timestamps_ RTC_GUARDED_BY(mutex_); }; } // namespace @@ -159,13 +160,13 @@ class MultiCodecReceiveTest : public test::CallTest { struct CodecConfig { std::string payload_name; - VideoEncoderFactory* encoder_factory; - VideoDecoderFactory* decoder_factory; size_t num_temporal_layers; }; - void ConfigureEncoder(const CodecConfig& config); - void ConfigureDecoders(const std::vector& configs); + void ConfigureEncoder(const CodecConfig& config, + VideoEncoderFactory* encoder_factory); + void ConfigureDecoders(const std::vector& configs, + VideoDecoderFactory* decoder_factory); void RunTestWithCodecs(const std::vector& configs); private: @@ -177,23 +178,25 @@ class MultiCodecReceiveTest : public test::CallTest { }; void MultiCodecReceiveTest::ConfigureDecoders( - const std::vector& configs) { + const std::vector& configs, + VideoDecoderFactory* decoder_factory) { video_receive_configs_[0].decoders.clear(); // Placing the payload names in a std::set retains the unique names only. + video_receive_configs_[0].decoder_factory = decoder_factory; std::set unique_payload_names; for (const auto& config : configs) if (unique_payload_names.insert(config.payload_name).second) { VideoReceiveStream::Decoder decoder = test::CreateMatchingDecoder( PayloadNameToPayloadType(config.payload_name), config.payload_name); - decoder.decoder_factory = config.decoder_factory; video_receive_configs_[0].decoders.push_back(decoder); } } -void MultiCodecReceiveTest::ConfigureEncoder(const CodecConfig& config) { - GetVideoSendConfig()->encoder_settings.encoder_factory = - config.encoder_factory; +void MultiCodecReceiveTest::ConfigureEncoder( + const CodecConfig& config, + VideoEncoderFactory* encoder_factory) { + GetVideoSendConfig()->encoder_settings.encoder_factory = encoder_factory; GetVideoSendConfig()->rtp.payload_name = config.payload_name; GetVideoSendConfig()->rtp.payload_type = PayloadNameToPayloadType(config.payload_name); @@ -202,39 +205,71 @@ void MultiCodecReceiveTest::ConfigureEncoder(const CodecConfig& config) { EXPECT_EQ(1u, GetVideoEncoderConfig()->simulcast_layers.size()); GetVideoEncoderConfig()->simulcast_layers[0].num_temporal_layers = config.num_temporal_layers; + GetVideoEncoderConfig()->video_format.name = config.payload_name; } void MultiCodecReceiveTest::RunTestWithCodecs( const std::vector& configs) { EXPECT_TRUE(!configs.empty()); + test::FunctionVideoEncoderFactory encoder_factory( + [](const SdpVideoFormat& format) -> std::unique_ptr { + if (format.name == "VP8") { + return VP8Encoder::Create(); + } + if (format.name == "VP9") { + return VP9Encoder::Create(); + } + if (format.name == "H264") { + return H264Encoder::Create(cricket::VideoCodec("H264")); + } + RTC_NOTREACHED() << format.name; + return nullptr; + }); + test::FunctionVideoDecoderFactory decoder_factory( + [](const SdpVideoFormat& format) -> std::unique_ptr { + if (format.name == "VP8") { + return VP8Decoder::Create(); + } + if (format.name == "VP9") { + return VP9Decoder::Create(); + } + if (format.name == "H264") { + return H264Decoder::Create(); + } + RTC_NOTREACHED() << format.name; + return nullptr; + }); // Create and start call. - SendTask(RTC_FROM_HERE, task_queue(), [this, &configs]() { - CreateSendConfig(1, 0, 0, send_transport_.get()); - ConfigureEncoder(configs[0]); - CreateMatchingReceiveConfigs(receive_transport_.get()); - video_receive_configs_[0].renderer = &observer_; - // Disable to avoid post-decode frame dropping in VideoRenderFrames. - video_receive_configs_[0].enable_prerenderer_smoothing = false; - ConfigureDecoders(configs); - CreateVideoStreams(); - CreateFrameGeneratorCapturer(kFps, kWidth, kHeight); - Start(); - }); + SendTask(RTC_FROM_HERE, task_queue(), + [this, &configs, &encoder_factory, &decoder_factory]() { + CreateSendConfig(1, 0, 0, send_transport_.get()); + ConfigureEncoder(configs[0], &encoder_factory); + CreateMatchingReceiveConfigs(receive_transport_.get()); + video_receive_configs_[0].renderer = &observer_; + // Disable to avoid post-decode frame dropping in + // VideoRenderFrames. + video_receive_configs_[0].enable_prerenderer_smoothing = false; + ConfigureDecoders(configs, &decoder_factory); + CreateVideoStreams(); + CreateFrameGeneratorCapturer(kFps, kWidth, kHeight); + Start(); + }); EXPECT_TRUE(observer_.Wait()) << "Timed out waiting for frames."; for (size_t i = 1; i < configs.size(); ++i) { // Recreate VideoSendStream with new config (codec, temporal layers). - SendTask(RTC_FROM_HERE, task_queue(), [this, i, &configs]() { - DestroyVideoSendStreams(); - observer_.Reset(PayloadNameToPayloadType(configs[i].payload_name)); - - ConfigureEncoder(configs[i]); - CreateVideoSendStreams(); - GetVideoSendStream()->Start(); - CreateFrameGeneratorCapturer(kFps, kWidth / 2, kHeight / 2); - ConnectVideoSourcesToStreams(); - }); + SendTask( + RTC_FROM_HERE, task_queue(), [this, i, &configs, &encoder_factory]() { + DestroyVideoSendStreams(); + observer_.Reset(PayloadNameToPayloadType(configs[i].payload_name)); + + ConfigureEncoder(configs[i], &encoder_factory); + CreateVideoSendStreams(); + GetVideoSendStream()->Start(); + CreateFrameGeneratorCapturer(kFps, kWidth / 2, kHeight / 2); + ConnectVideoSourcesToStreams(); + }); EXPECT_TRUE(observer_.Wait()) << "Timed out waiting for frames."; } @@ -245,98 +280,28 @@ void MultiCodecReceiveTest::RunTestWithCodecs( } TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9) { - test::FunctionVideoEncoderFactory vp8_encoder_factory( - []() { return VP8Encoder::Create(); }); - test::FunctionVideoEncoderFactory vp9_encoder_factory( - []() { return VP9Encoder::Create(); }); - test::FunctionVideoDecoderFactory vp8_decoder_factory( - []() { return VP8Decoder::Create(); }); - test::FunctionVideoDecoderFactory vp9_decoder_factory( - []() { return VP9Decoder::Create(); }); - RunTestWithCodecs({{"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 1}, - {"VP9", &vp9_encoder_factory, &vp9_decoder_factory, 1}, - {"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 1}}); + RunTestWithCodecs({{"VP8", 1}, {"VP9", 1}, {"VP8", 1}}); } TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9WithTl) { - test::FunctionVideoEncoderFactory vp8_encoder_factory( - []() { return VP8Encoder::Create(); }); - test::FunctionVideoEncoderFactory vp9_encoder_factory( - []() { return VP9Encoder::Create(); }); - test::FunctionVideoDecoderFactory vp8_decoder_factory( - []() { return VP8Decoder::Create(); }); - test::FunctionVideoDecoderFactory vp9_decoder_factory( - []() { return VP9Decoder::Create(); }); - RunTestWithCodecs({{"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 2}, - {"VP9", &vp9_encoder_factory, &vp9_decoder_factory, 2}, - {"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 2}}); + RunTestWithCodecs({{"VP8", 2}, {"VP9", 2}, {"VP8", 2}}); } #if defined(WEBRTC_USE_H264) TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8H264) { - test::FunctionVideoEncoderFactory vp8_encoder_factory( - []() { return VP8Encoder::Create(); }); - test::FunctionVideoEncoderFactory h264_encoder_factory( - []() { return H264Encoder::Create(cricket::VideoCodec("H264")); }); - test::FunctionVideoDecoderFactory vp8_decoder_factory( - []() { return VP8Decoder::Create(); }); - test::FunctionVideoDecoderFactory h264_decoder_factory( - []() { return H264Decoder::Create(); }); - RunTestWithCodecs({{"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 1}, - {"H264", &h264_encoder_factory, &h264_decoder_factory, 1}, - {"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 1}}); + RunTestWithCodecs({{"VP8", 1}, {"H264", 1}, {"VP8", 1}}); } TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8H264WithTl) { - test::FunctionVideoEncoderFactory vp8_encoder_factory( - []() { return VP8Encoder::Create(); }); - test::FunctionVideoEncoderFactory h264_encoder_factory( - []() { return H264Encoder::Create(cricket::VideoCodec("H264")); }); - test::FunctionVideoDecoderFactory vp8_decoder_factory( - []() { return VP8Decoder::Create(); }); - test::FunctionVideoDecoderFactory h264_decoder_factory( - []() { return H264Decoder::Create(); }); - RunTestWithCodecs({{"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 3}, - {"H264", &h264_encoder_factory, &h264_decoder_factory, 1}, - {"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 3}}); + RunTestWithCodecs({{"VP8", 3}, {"H264", 1}, {"VP8", 3}}); } TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9H264) { - test::FunctionVideoEncoderFactory vp8_encoder_factory( - []() { return VP8Encoder::Create(); }); - test::FunctionVideoEncoderFactory vp9_encoder_factory( - []() { return VP9Encoder::Create(); }); - test::FunctionVideoEncoderFactory h264_encoder_factory( - []() { return H264Encoder::Create(cricket::VideoCodec("H264")); }); - test::FunctionVideoDecoderFactory vp8_decoder_factory( - []() { return VP8Decoder::Create(); }); - test::FunctionVideoDecoderFactory vp9_decoder_factory( - []() { return VP9Decoder::Create(); }); - test::FunctionVideoDecoderFactory h264_decoder_factory( - []() { return H264Decoder::Create(); }); - RunTestWithCodecs({{"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 1}, - {"VP9", &vp9_encoder_factory, &vp9_decoder_factory, 1}, - {"H264", &h264_encoder_factory, &h264_decoder_factory, 1}, - {"VP9", &vp9_encoder_factory, &vp9_decoder_factory, 1}}); + RunTestWithCodecs({{"VP8", 1}, {"VP9", 1}, {"H264", 1}, {"VP9", 1}}); } TEST_F(MultiCodecReceiveTest, SingleStreamReceivesVp8Vp9H264WithTl) { - test::FunctionVideoEncoderFactory vp8_encoder_factory( - []() { return VP8Encoder::Create(); }); - test::FunctionVideoEncoderFactory vp9_encoder_factory( - []() { return VP9Encoder::Create(); }); - test::FunctionVideoEncoderFactory h264_encoder_factory( - []() { return H264Encoder::Create(cricket::VideoCodec("H264")); }); - test::FunctionVideoDecoderFactory vp8_decoder_factory( - []() { return VP8Decoder::Create(); }); - test::FunctionVideoDecoderFactory vp9_decoder_factory( - []() { return VP9Decoder::Create(); }); - test::FunctionVideoDecoderFactory h264_decoder_factory( - []() { return H264Decoder::Create(); }); - RunTestWithCodecs({{"VP8", &vp8_encoder_factory, &vp8_decoder_factory, 3}, - {"VP9", &vp9_encoder_factory, &vp9_decoder_factory, 2}, - {"H264", &h264_encoder_factory, &h264_decoder_factory, 1}, - {"VP9", &vp9_encoder_factory, &vp9_decoder_factory, 3}}); + RunTestWithCodecs({{"VP8", 3}, {"VP9", 2}, {"H264", 1}, {"VP9", 3}}); } #endif // defined(WEBRTC_USE_H264) diff --git a/video/end_to_end_tests/multi_stream_tester.cc b/video/end_to_end_tests/multi_stream_tester.cc index c3d41c37fa..20e128c2ad 100644 --- a/video/end_to_end_tests/multi_stream_tester.cc +++ b/video/end_to_end_tests/multi_stream_tester.cc @@ -102,9 +102,9 @@ void MultiStreamTester::RunTest() { VideoReceiveStream::Config receive_config(receiver_transport.get()); receive_config.rtp.remote_ssrc = ssrc; receive_config.rtp.local_ssrc = test::CallTest::kReceiverLocalVideoSsrc; + receive_config.decoder_factory = &decoder_factory; VideoReceiveStream::Decoder decoder = test::CreateMatchingDecoder(send_config); - decoder.decoder_factory = &decoder_factory; receive_config.decoders.push_back(decoder); UpdateReceiveConfig(i, &receive_config); diff --git a/video/end_to_end_tests/network_state_tests.cc b/video/end_to_end_tests/network_state_tests.cc index a0977ac773..4e0e86f987 100644 --- a/video/end_to_end_tests/network_state_tests.cc +++ b/video/end_to_end_tests/network_state_tests.cc @@ -10,12 +10,19 @@ #include +#include "api/media_types.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "api/task_queue/task_queue_base.h" +#include "api/task_queue/task_queue_factory.h" #include "api/test/simulated_network.h" #include "api/video_codecs/video_encoder.h" #include "call/fake_network_pipe.h" #include "call/simulated_network.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "rtc_base/location.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "system_wrappers/include/sleep.h" #include "test/call_test.h" #include "test/fake_encoder.h" @@ -60,19 +67,19 @@ class NetworkStateEndToEndTest : public test::CallTest { bool SendRtp(const uint8_t* packet, size_t length, const PacketOptions& options) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); need_rtp_ = false; return true; } bool SendRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); need_rtcp_ = false; return true; } bool need_rtp_; bool need_rtcp_; - rtc::CriticalSection crit_; + Mutex mutex_; }; void VerifyNewVideoSendStreamsRespectNetworkState( MediaType network_to_bring_up, @@ -165,7 +172,10 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { explicit NetworkStateTest(TaskQueueBase* task_queue) : EndToEndTest(kDefaultTimeoutMs), FakeEncoder(Clock::GetRealTimeClock()), - task_queue_(task_queue), + e2e_test_task_queue_(task_queue), + task_queue_(CreateDefaultTaskQueueFactory()->CreateTaskQueue( + "NetworkStateTest", + TaskQueueFactory::Priority::NORMAL)), sender_call_(nullptr), receiver_call_(nullptr), encoder_factory_(this), @@ -177,7 +187,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { down_frames_(0) {} Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); if (rtp_packet.payload_size() == 0) @@ -188,7 +198,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { } Action OnSendRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); ++sender_rtcp_; packet_event_.Set(); return SEND_PACKET; @@ -200,7 +210,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { } Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); ++receiver_rtcp_; packet_event_.Set(); return SEND_PACKET; @@ -218,28 +228,38 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { send_config->encoder_settings.encoder_factory = &encoder_factory_; } + void SignalChannelNetworkState(Call* call, + MediaType media_type, + NetworkState network_state) { + SendTask(RTC_FROM_HERE, e2e_test_task_queue_, + [call, media_type, network_state] { + call->SignalChannelNetworkState(media_type, network_state); + }); + } + void PerformTest() override { EXPECT_TRUE(encoded_frames_.Wait(kDefaultTimeoutMs)) << "No frames received by the encoder."; - SendTask(RTC_FROM_HERE, task_queue_, [this]() { + SendTask(RTC_FROM_HERE, task_queue_.get(), [this]() { // Wait for packets from both sender/receiver. WaitForPacketsOrSilence(false, false); // Sender-side network down for audio; there should be no effect on // video - sender_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkDown); + SignalChannelNetworkState(sender_call_, MediaType::AUDIO, kNetworkDown); + WaitForPacketsOrSilence(false, false); // Receiver-side network down for audio; no change expected - receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, - kNetworkDown); + SignalChannelNetworkState(receiver_call_, MediaType::AUDIO, + kNetworkDown); WaitForPacketsOrSilence(false, false); // Sender-side network down. - sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkDown); + SignalChannelNetworkState(sender_call_, MediaType::VIDEO, kNetworkDown); { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); // After network goes down we shouldn't be encoding more frames. sender_state_ = kNetworkDown; } @@ -247,25 +267,25 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { WaitForPacketsOrSilence(true, false); // Receiver-side network down. - receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, - kNetworkDown); + SignalChannelNetworkState(receiver_call_, MediaType::VIDEO, + kNetworkDown); WaitForPacketsOrSilence(true, true); // Network up for audio for both sides; video is still not expected to // start - sender_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp); - receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp); + SignalChannelNetworkState(sender_call_, MediaType::AUDIO, kNetworkUp); + SignalChannelNetworkState(receiver_call_, MediaType::AUDIO, kNetworkUp); WaitForPacketsOrSilence(true, true); // Network back up again for both. { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); // It's OK to encode frames again, as we're about to bring up the // network. sender_state_ = kNetworkUp; } - sender_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp); - receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp); + SignalChannelNetworkState(sender_call_, MediaType::VIDEO, kNetworkUp); + SignalChannelNetworkState(receiver_call_, MediaType::VIDEO, kNetworkUp); WaitForPacketsOrSilence(false, false); // TODO(skvlad): add tests to verify that the audio streams are stopped @@ -277,7 +297,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { int32_t Encode(const VideoFrame& input_image, const std::vector* frame_types) override { { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); if (sender_state_ == kNetworkDown) { ++down_frames_; EXPECT_LE(down_frames_, 1) @@ -298,7 +318,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { int initial_sender_rtcp; int initial_receiver_rtcp; { - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); initial_sender_rtp = sender_rtp_; initial_sender_rtcp = sender_rtcp_; initial_receiver_rtcp = receiver_rtcp_; @@ -308,7 +328,7 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { while (!sender_done || !receiver_done) { packet_event_.Wait(kSilenceTimeoutMs); int64_t time_now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope lock(&test_crit_); + MutexLock lock(&test_mutex_); if (sender_down) { ASSERT_LE(sender_rtp_ - initial_sender_rtp - sender_padding_, kNumAcceptedDowntimeRtp) @@ -339,19 +359,20 @@ TEST_F(NetworkStateEndToEndTest, RespectsNetworkState) { } } - TaskQueueBase* const task_queue_; - rtc::CriticalSection test_crit_; + TaskQueueBase* const e2e_test_task_queue_; + std::unique_ptr task_queue_; + Mutex test_mutex_; rtc::Event encoded_frames_; rtc::Event packet_event_; Call* sender_call_; Call* receiver_call_; test::VideoEncoderProxyFactory encoder_factory_; - NetworkState sender_state_ RTC_GUARDED_BY(test_crit_); - int sender_rtp_ RTC_GUARDED_BY(test_crit_); - int sender_padding_ RTC_GUARDED_BY(test_crit_); - int sender_rtcp_ RTC_GUARDED_BY(test_crit_); - int receiver_rtcp_ RTC_GUARDED_BY(test_crit_); - int down_frames_ RTC_GUARDED_BY(test_crit_); + NetworkState sender_state_ RTC_GUARDED_BY(test_mutex_); + int sender_rtp_ RTC_GUARDED_BY(test_mutex_); + int sender_padding_ RTC_GUARDED_BY(test_mutex_); + int sender_rtcp_ RTC_GUARDED_BY(test_mutex_); + int receiver_rtcp_ RTC_GUARDED_BY(test_mutex_); + int down_frames_ RTC_GUARDED_BY(test_mutex_); } test(task_queue()); RunBaseTest(&test); diff --git a/video/end_to_end_tests/probing_tests.cc b/video/end_to_end_tests/probing_tests.cc deleted file mode 100644 index 34f2a7953e..0000000000 --- a/video/end_to_end_tests/probing_tests.cc +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Copyright 2018 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "api/task_queue/task_queue_base.h" -#include "api/test/simulated_network.h" -#include "call/fake_network_pipe.h" -#include "call/simulated_network.h" -#include "rtc_base/task_queue_for_test.h" -#include "test/call_test.h" -#include "test/field_trial.h" -#include "test/gtest.h" - -namespace webrtc { -namespace { -enum : int { // The first valid value is 1. - kTransportSequenceNumberExtensionId = 1, -}; -} // namespace - -class ProbingEndToEndTest : public test::CallTest { - public: - ProbingEndToEndTest() { - RegisterRtpExtension(RtpExtension(RtpExtension::kTransportSequenceNumberUri, - kTransportSequenceNumberExtensionId)); - } -}; - -class ProbingTest : public test::EndToEndTest { - public: - explicit ProbingTest(int start_bitrate_bps) - : clock_(Clock::GetRealTimeClock()), - start_bitrate_bps_(start_bitrate_bps), - state_(0), - sender_call_(nullptr) {} - - void ModifySenderBitrateConfig(BitrateConstraints* bitrate_config) override { - bitrate_config->start_bitrate_bps = start_bitrate_bps_; - } - - void OnCallsCreated(Call* sender_call, Call* receiver_call) override { - sender_call_ = sender_call; - } - - protected: - Clock* const clock_; - const int start_bitrate_bps_; - int state_; - Call* sender_call_; -}; - -// Flaky under MemorySanitizer: bugs.webrtc.org/7419 -// Flaky on iOS bots: bugs.webrtc.org/7851 -#if defined(MEMORY_SANITIZER) -TEST_F(ProbingEndToEndTest, DISABLED_InitialProbing) { -#elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR -TEST_F(ProbingEndToEndTest, DISABLED_InitialProbing) { -#else -TEST_F(ProbingEndToEndTest, InitialProbing) { -#endif - - class InitialProbingTest : public ProbingTest { - public: - explicit InitialProbingTest(bool* success, TaskQueueBase* task_queue) - : ProbingTest(300000), success_(success), task_queue_(task_queue) { - *success_ = false; - } - - void PerformTest() override { - int64_t start_time_ms = clock_->TimeInMilliseconds(); - do { - if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs) - break; - - Call::Stats stats; - SendTask(RTC_FROM_HERE, task_queue_, - [this, &stats]() { stats = sender_call_->GetStats(); }); - // Initial probing is done with a x3 and x6 multiplier of the start - // bitrate, so a x4 multiplier is a high enough threshold. - if (stats.send_bandwidth_bps > 4 * 300000) { - *success_ = true; - break; - } - } while (!observation_complete_.Wait(20)); - } - - private: - const int kTimeoutMs = 1000; - bool* const success_; - TaskQueueBase* const task_queue_; - }; - - bool success = false; - const int kMaxAttempts = 3; - for (int i = 0; i < kMaxAttempts; ++i) { - InitialProbingTest test(&success, task_queue()); - RunBaseTest(&test); - if (success) - return; - } - EXPECT_TRUE(success) << "Failed to perform mid initial probing (" - << kMaxAttempts << " attempts)."; -} - -// Fails on Linux MSan: bugs.webrtc.org/7428 -#if defined(MEMORY_SANITIZER) -TEST_F(ProbingEndToEndTest, DISABLED_TriggerMidCallProbing) { -// Fails on iOS bots: bugs.webrtc.org/7851 -#elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR -TEST_F(ProbingEndToEndTest, DISABLED_TriggerMidCallProbing) { -#else -TEST_F(ProbingEndToEndTest, TriggerMidCallProbing) { -#endif - - class TriggerMidCallProbingTest : public ProbingTest { - public: - TriggerMidCallProbingTest(TaskQueueBase* task_queue, bool* success) - : ProbingTest(300000), success_(success), task_queue_(task_queue) {} - - void PerformTest() override { - *success_ = false; - int64_t start_time_ms = clock_->TimeInMilliseconds(); - do { - if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs) - break; - - Call::Stats stats; - SendTask(RTC_FROM_HERE, task_queue_, - [this, &stats]() { stats = sender_call_->GetStats(); }); - - switch (state_) { - case 0: - if (stats.send_bandwidth_bps > 5 * 300000) { - BitrateConstraints bitrate_config; - bitrate_config.max_bitrate_bps = 100000; - SendTask(RTC_FROM_HERE, task_queue_, [this, &bitrate_config]() { - sender_call_->GetTransportControllerSend() - ->SetSdpBitrateParameters(bitrate_config); - }); - ++state_; - } - break; - case 1: - if (stats.send_bandwidth_bps < 110000) { - BitrateConstraints bitrate_config; - bitrate_config.max_bitrate_bps = 2500000; - SendTask(RTC_FROM_HERE, task_queue_, [this, &bitrate_config]() { - sender_call_->GetTransportControllerSend() - ->SetSdpBitrateParameters(bitrate_config); - }); - ++state_; - } - break; - case 2: - // During high cpu load the pacer will not be able to pace packets - // at the correct speed, but if we go from 110 to 1250 kbps - // in 5 seconds then it is due to probing. - if (stats.send_bandwidth_bps > 1250000) { - *success_ = true; - observation_complete_.Set(); - } - break; - } - } while (!observation_complete_.Wait(20)); - } - - private: - const int kTimeoutMs = 5000; - bool* const success_; - TaskQueueBase* const task_queue_; - }; - - bool success = false; - const int kMaxAttempts = 3; - for (int i = 0; i < kMaxAttempts; ++i) { - TriggerMidCallProbingTest test(task_queue(), &success); - RunBaseTest(&test); - if (success) - return; - } - EXPECT_TRUE(success) << "Failed to perform mid call probing (" << kMaxAttempts - << " attempts)."; -} - -#if defined(MEMORY_SANITIZER) -TEST_F(ProbingEndToEndTest, DISABLED_ProbeOnVideoEncoderReconfiguration) { -#elif defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR -TEST_F(ProbingEndToEndTest, DISABLED_ProbeOnVideoEncoderReconfiguration) { -#else -TEST_F(ProbingEndToEndTest, ProbeOnVideoEncoderReconfiguration) { -#endif - - class ReconfigureTest : public ProbingTest { - public: - ReconfigureTest(TaskQueueBase* task_queue, bool* success) - : ProbingTest(50000), task_queue_(task_queue), success_(success) {} - - void ModifyVideoConfigs( - VideoSendStream::Config* send_config, - std::vector* receive_configs, - VideoEncoderConfig* encoder_config) override { - encoder_config_ = encoder_config; - } - - void OnVideoStreamsCreated( - VideoSendStream* send_stream, - const std::vector& receive_streams) override { - send_stream_ = send_stream; - } - - std::unique_ptr CreateSendTransport( - TaskQueueBase* task_queue, - Call* sender_call) override { - auto network = - std::make_unique(BuiltInNetworkBehaviorConfig()); - send_simulated_network_ = network.get(); - return std::make_unique( - task_queue, sender_call, this, test::PacketTransport::kSender, - CallTest::payload_type_map_, - std::make_unique(Clock::GetRealTimeClock(), - std::move(network))); - } - - void PerformTest() override { - *success_ = false; - int64_t start_time_ms = clock_->TimeInMilliseconds(); - int64_t max_allocation_change_time_ms = -1; - do { - if (clock_->TimeInMilliseconds() - start_time_ms > kTimeoutMs) - break; - - Call::Stats stats; - SendTask(RTC_FROM_HERE, task_queue_, - [this, &stats]() { stats = sender_call_->GetStats(); }); - - switch (state_) { - case 0: - // Wait until initial probing has been completed (6 times start - // bitrate). - if (stats.send_bandwidth_bps >= 250000 && - stats.send_bandwidth_bps <= 350000) { - BuiltInNetworkBehaviorConfig config; - config.link_capacity_kbps = 200; - send_simulated_network_->SetConfig(config); - - // In order to speed up the test we can interrupt exponential - // probing by toggling the network availability. The alternative - // is to wait for it to time out (1000 ms). - sender_call_->GetTransportControllerSend()->OnNetworkAvailability( - false); - sender_call_->GetTransportControllerSend()->OnNetworkAvailability( - true); - - ++state_; - } - break; - case 1: - if (stats.send_bandwidth_bps <= 200000) { - // Initial probing finished. Increase link capacity and wait - // until BWE ramped up enough to be in ALR. This takes a few - // seconds. - BuiltInNetworkBehaviorConfig config; - config.link_capacity_kbps = 5000; - send_simulated_network_->SetConfig(config); - ++state_; - } - break; - case 2: - if (stats.send_bandwidth_bps > 240000) { - // BWE ramped up enough to be in ALR. Setting higher max_bitrate - // should trigger an allocation probe and fast ramp-up. - encoder_config_->max_bitrate_bps = 2000000; - encoder_config_->simulcast_layers[0].max_bitrate_bps = 1200000; - SendTask(RTC_FROM_HERE, task_queue_, [this]() { - send_stream_->ReconfigureVideoEncoder(encoder_config_->Copy()); - }); - max_allocation_change_time_ms = clock_->TimeInMilliseconds(); - ++state_; - } - break; - case 3: - if (stats.send_bandwidth_bps >= 1000000) { - EXPECT_LT( - clock_->TimeInMilliseconds() - max_allocation_change_time_ms, - kRampUpMaxDurationMs); - *success_ = true; - observation_complete_.Set(); - } - break; - } - } while (!observation_complete_.Wait(20)); - } - - private: - const int kTimeoutMs = 10000; - const int kRampUpMaxDurationMs = 500; - - TaskQueueBase* const task_queue_; - bool* const success_; - SimulatedNetwork* send_simulated_network_; - VideoSendStream* send_stream_; - VideoEncoderConfig* encoder_config_; - }; - - bool success = false; - const int kMaxAttempts = 3; - for (int i = 0; i < kMaxAttempts; ++i) { - ReconfigureTest test(task_queue(), &success); - RunBaseTest(&test); - if (success) { - return; - } - } - EXPECT_TRUE(success) << "Failed to perform mid call probing (" << kMaxAttempts - << " attempts)."; -} - -} // namespace webrtc diff --git a/video/end_to_end_tests/resolution_bitrate_limits_tests.cc b/video/end_to_end_tests/resolution_bitrate_limits_tests.cc new file mode 100644 index 0000000000..d46c40cd1e --- /dev/null +++ b/video/end_to_end_tests/resolution_bitrate_limits_tests.cc @@ -0,0 +1,375 @@ +/* + * Copyright 2021 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "media/engine/webrtc_video_engine.h" +#include "rtc_base/experiments/encoder_info_settings.h" +#include "test/call_test.h" +#include "test/fake_encoder.h" +#include "test/field_trial.h" +#include "test/gtest.h" +#include "test/video_encoder_proxy_factory.h" + +namespace webrtc { +namespace test { +namespace { +void SetEncoderSpecific(VideoEncoderConfig* encoder_config, + VideoCodecType type, + size_t num_spatial_layers) { + if (type == kVideoCodecVP9) { + VideoCodecVP9 vp9 = VideoEncoder::GetDefaultVp9Settings(); + vp9.numberOfSpatialLayers = num_spatial_layers; + encoder_config->encoder_specific_settings = + rtc::make_ref_counted( + vp9); + } +} + +SpatialLayer GetLayer(int pixels, const VideoCodec& codec) { + if (codec.codecType == VideoCodecType::kVideoCodecVP9) { + for (size_t i = 0; i < codec.VP9().numberOfSpatialLayers; ++i) { + if (codec.spatialLayers[i].width * codec.spatialLayers[i].height == + pixels) { + return codec.spatialLayers[i]; + } + } + } else { + for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) { + if (codec.simulcastStream[i].width * codec.simulcastStream[i].height == + pixels) { + return codec.simulcastStream[i]; + } + } + } + ADD_FAILURE(); + return SpatialLayer(); +} + +} // namespace + +class ResolutionBitrateLimitsTest + : public test::CallTest, + public ::testing::WithParamInterface { + public: + ResolutionBitrateLimitsTest() : payload_name_(GetParam()) {} + + const std::string payload_name_; +}; + +INSTANTIATE_TEST_SUITE_P(PayloadName, + ResolutionBitrateLimitsTest, + ::testing::Values("VP8", "VP9")); + +class InitEncodeTest : public test::EndToEndTest, + public test::FrameGeneratorCapturer::SinkWantsObserver, + public test::FakeEncoder { + public: + struct Bitrate { + const absl::optional min; + const absl::optional max; + }; + struct TestConfig { + const bool active; + const Bitrate bitrate_bps; + }; + struct Expectation { + const uint32_t pixels = 0; + const Bitrate eq_bitrate_bps; + const Bitrate ne_bitrate_bps; + }; + + InitEncodeTest(const std::string& payload_name, + const std::vector& configs, + const std::vector& expectations) + : EndToEndTest(test::CallTest::kDefaultTimeoutMs), + FakeEncoder(Clock::GetRealTimeClock()), + encoder_factory_(this), + payload_name_(payload_name), + configs_(configs), + expectations_(expectations) {} + + void OnFrameGeneratorCapturerCreated( + test::FrameGeneratorCapturer* frame_generator_capturer) override { + frame_generator_capturer->SetSinkWantsObserver(this); + // Set initial resolution. + frame_generator_capturer->ChangeResolution(1280, 720); + } + + void OnSinkWantsChanged(rtc::VideoSinkInterface* sink, + const rtc::VideoSinkWants& wants) override {} + + size_t GetNumVideoStreams() const override { + return (payload_name_ == "VP9") ? 1 : configs_.size(); + } + + void ModifyVideoConfigs( + VideoSendStream::Config* send_config, + std::vector* receive_configs, + VideoEncoderConfig* encoder_config) override { + send_config->encoder_settings.encoder_factory = &encoder_factory_; + send_config->rtp.payload_name = payload_name_; + send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType; + const VideoCodecType codec_type = PayloadStringToCodecType(payload_name_); + encoder_config->codec_type = codec_type; + encoder_config->video_stream_factory = + rtc::make_ref_counted( + payload_name_, /*max qp*/ 0, /*screencast*/ false, + /*screenshare enabled*/ false); + encoder_config->max_bitrate_bps = -1; + if (configs_.size() == 1 && configs_[0].bitrate_bps.max) + encoder_config->max_bitrate_bps = *configs_[0].bitrate_bps.max; + if (payload_name_ == "VP9") { + // Simulcast layers indicates which spatial layers are active. + encoder_config->simulcast_layers.resize(configs_.size()); + } + double scale_factor = 1.0; + for (int i = configs_.size() - 1; i >= 0; --i) { + VideoStream& stream = encoder_config->simulcast_layers[i]; + stream.active = configs_[i].active; + if (configs_[i].bitrate_bps.min) + stream.min_bitrate_bps = *configs_[i].bitrate_bps.min; + if (configs_[i].bitrate_bps.max) + stream.max_bitrate_bps = *configs_[i].bitrate_bps.max; + stream.scale_resolution_down_by = scale_factor; + scale_factor *= (payload_name_ == "VP9") ? 1.0 : 2.0; + } + SetEncoderSpecific(encoder_config, codec_type, configs_.size()); + } + + int32_t InitEncode(const VideoCodec* codec, + const Settings& settings) override { + for (const auto& expected : expectations_) { + SpatialLayer layer = GetLayer(expected.pixels, *codec); + if (expected.eq_bitrate_bps.min) + EXPECT_EQ(*expected.eq_bitrate_bps.min, layer.minBitrate * 1000); + if (expected.eq_bitrate_bps.max) + EXPECT_EQ(*expected.eq_bitrate_bps.max, layer.maxBitrate * 1000); + EXPECT_NE(expected.ne_bitrate_bps.min, layer.minBitrate * 1000); + EXPECT_NE(expected.ne_bitrate_bps.max, layer.maxBitrate * 1000); + } + observation_complete_.Set(); + return 0; + } + + VideoEncoder::EncoderInfo GetEncoderInfo() const override { + EncoderInfo info = FakeEncoder::GetEncoderInfo(); + if (!encoder_info_override_.resolution_bitrate_limits().empty()) { + info.resolution_bitrate_limits = + encoder_info_override_.resolution_bitrate_limits(); + } + return info; + } + + void PerformTest() override { + ASSERT_TRUE(Wait()) << "Timed out while waiting for InitEncode() call."; + } + + private: + test::VideoEncoderProxyFactory encoder_factory_; + const std::string payload_name_; + const std::vector configs_; + const std::vector expectations_; + const LibvpxVp8EncoderInfoSettings encoder_info_override_; +}; + +TEST_P(ResolutionBitrateLimitsTest, LimitsApplied) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:921600," + "min_start_bitrate_bps:0," + "min_bitrate_bps:32000," + "max_bitrate_bps:3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{1280 * 720, + /*eq_bitrate_bps=*/{32000, 3333000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, EncodingsApplied) { + InitEncodeTest test(payload_name_, + {{/*active=*/true, /*bitrate_bps=*/{22000, 3555000}}}, + // Expectations: + {{1280 * 720, + /*eq_bitrate_bps=*/{22000, 3555000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, IntersectionApplied) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:921600," + "min_start_bitrate_bps:0," + "min_bitrate_bps:32000," + "max_bitrate_bps:3333000/"); + + InitEncodeTest test(payload_name_, + {{/*active=*/true, /*bitrate_bps=*/{22000, 1555000}}}, + // Expectations: + {{1280 * 720, + /*eq_bitrate_bps=*/{32000, 1555000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, LimitsAppliedMiddleActive) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:230400|921600," + "min_start_bitrate_bps:0|0," + "min_bitrate_bps:21000|32000," + "max_bitrate_bps:2222000|3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{640 * 360, + /*eq_bitrate_bps=*/{21000, 2222000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, IntersectionAppliedMiddleActive) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:230400|921600," + "min_start_bitrate_bps:0|0," + "min_bitrate_bps:31000|32000," + "max_bitrate_bps:2222000|3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/true, /*bitrate_bps=*/{30000, 1555000}}, + {/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{640 * 360, + /*eq_bitrate_bps=*/{31000, 1555000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, DefaultLimitsAppliedMiddleActive) { + const absl::optional + kDefaultSinglecastLimits360p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + PayloadStringToCodecType(payload_name_), 640 * 360); + + InitEncodeTest test( + payload_name_, + {{/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{640 * 360, + /*eq_bitrate_bps=*/ + {kDefaultSinglecastLimits360p->min_bitrate_bps, + kDefaultSinglecastLimits360p->max_bitrate_bps}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, LimitsAppliedHighestActive) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:230400|921600," + "min_start_bitrate_bps:0|0," + "min_bitrate_bps:31000|32000," + "max_bitrate_bps:2222000|3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{1280 * 720, + /*eq_bitrate_bps=*/{32000, 3333000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, IntersectionAppliedHighestActive) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:230400|921600," + "min_start_bitrate_bps:0|0," + "min_bitrate_bps:31000|32000," + "max_bitrate_bps:2222000|3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/true, /*bitrate_bps=*/{30000, 1555000}}}, + // Expectations: + {{1280 * 720, + /*eq_bitrate_bps=*/{32000, 1555000}, + /*ne_bitrate_bps=*/{absl::nullopt, absl::nullopt}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, LimitsNotAppliedLowestActive) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:230400|921600," + "min_start_bitrate_bps:0|0," + "min_bitrate_bps:31000|32000," + "max_bitrate_bps:2222000|3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/false, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{640 * 360, + /*eq_bitrate_bps=*/{absl::nullopt, absl::nullopt}, + /*ne_bitrate_bps=*/{31000, 2222000}}, + {1280 * 720, + /*eq_bitrate_bps=*/{absl::nullopt, absl::nullopt}, + /*ne_bitrate_bps=*/{32000, 3333000}}}); + RunBaseTest(&test); +} + +TEST_P(ResolutionBitrateLimitsTest, LimitsNotAppliedSimulcast) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-GetEncoderInfoOverride/" + "frame_size_pixels:230400|921600," + "min_start_bitrate_bps:0|0," + "min_bitrate_bps:31000|32000," + "max_bitrate_bps:2222000|3333000/"); + + InitEncodeTest test( + payload_name_, + {{/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}, + {/*active=*/true, /*bitrate_bps=*/{absl::nullopt, absl::nullopt}}}, + // Expectations: + {{640 * 360, + /*eq_bitrate_bps=*/{absl::nullopt, absl::nullopt}, + /*ne_bitrate_bps=*/{31000, 2222000}}, + {1280 * 720, + /*eq_bitrate_bps=*/{absl::nullopt, absl::nullopt}, + /*ne_bitrate_bps=*/{32000, 3333000}}}); + RunBaseTest(&test); +} + +} // namespace test +} // namespace webrtc diff --git a/video/end_to_end_tests/retransmission_tests.cc b/video/end_to_end_tests/retransmission_tests.cc index c28b12960f..19eb38d388 100644 --- a/video/end_to_end_tests/retransmission_tests.cc +++ b/video/end_to_end_tests/retransmission_tests.cc @@ -19,6 +19,7 @@ #include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "rtc_base/event.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "test/call_test.h" #include "test/field_trial.h" @@ -58,7 +59,7 @@ TEST_F(RetransmissionEndToEndTest, ReceivesAndRetransmitsNack) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -95,7 +96,7 @@ TEST_F(RetransmissionEndToEndTest, ReceivesAndRetransmitsNack) { } Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); test::RtcpPacketParser parser; EXPECT_TRUE(parser.Parse(packet, length)); nacks_left_ -= parser.nack()->num_packets(); @@ -116,12 +117,12 @@ TEST_F(RetransmissionEndToEndTest, ReceivesAndRetransmitsNack) { "rendered."; } - rtc::CriticalSection crit_; + Mutex mutex_; std::set dropped_packets_; std::set retransmitted_packets_; uint64_t sent_rtp_packets_; int packets_left_to_drop_; - int nacks_left_ RTC_GUARDED_BY(&crit_); + int nacks_left_ RTC_GUARDED_BY(&mutex_); } test; RunBaseTest(&test); @@ -290,7 +291,7 @@ void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -308,7 +309,7 @@ void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) { } Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); test::RtcpPacketParser parser; EXPECT_TRUE(parser.Parse(packet, length)); if (!nack_enabled_) @@ -319,7 +320,7 @@ void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) { } void OnFrame(const VideoFrame& video_frame) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (received_pli_ && video_frame.timestamp() > highest_dropped_timestamp_) { observation_complete_.Set(); @@ -343,12 +344,12 @@ void RetransmissionEndToEndTest::ReceivesPliAndRecovers(int rtp_history_ms) { "rendered afterwards."; } - rtc::CriticalSection crit_; + Mutex mutex_; int rtp_history_ms_; bool nack_enabled_; - uint32_t highest_dropped_timestamp_ RTC_GUARDED_BY(&crit_); - int frames_to_drop_ RTC_GUARDED_BY(&crit_); - bool received_pli_ RTC_GUARDED_BY(&crit_); + uint32_t highest_dropped_timestamp_ RTC_GUARDED_BY(&mutex_); + int frames_to_drop_ RTC_GUARDED_BY(&mutex_); + bool received_pli_ RTC_GUARDED_BY(&mutex_); } test(rtp_history_ms); RunBaseTest(&test); @@ -382,7 +383,7 @@ void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx, private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -427,7 +428,7 @@ void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx, void OnFrame(const VideoFrame& frame) override { EXPECT_EQ(kVideoRotation_90, frame.rotation()); { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (frame.timestamp() == retransmitted_timestamp_) observation_complete_.Set(); rendered_timestamps_.push_back(frame.timestamp()); @@ -502,7 +503,7 @@ void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx, return kFakeVideoSendPayloadType; } - rtc::CriticalSection crit_; + Mutex mutex_; rtc::VideoSinkInterface* orig_renderer_ = nullptr; const int payload_type_; const uint32_t retransmission_ssrc_; @@ -510,8 +511,8 @@ void RetransmissionEndToEndTest::DecodesRetransmittedFrame(bool enable_rtx, test::FunctionVideoEncoderFactory encoder_factory_; const std::string payload_name_; int marker_bits_observed_; - uint32_t retransmitted_timestamp_ RTC_GUARDED_BY(&crit_); - std::vector rendered_timestamps_ RTC_GUARDED_BY(&crit_); + uint32_t retransmitted_timestamp_ RTC_GUARDED_BY(&mutex_); + std::vector rendered_timestamps_ RTC_GUARDED_BY(&mutex_); } test(enable_rtx, enable_red); RunBaseTest(&test); diff --git a/video/end_to_end_tests/rtp_rtcp_tests.cc b/video/end_to_end_tests/rtp_rtcp_tests.cc index 71783febfe..a698328dad 100644 --- a/video/end_to_end_tests/rtp_rtcp_tests.cc +++ b/video/end_to_end_tests/rtp_rtcp_tests.cc @@ -16,6 +16,7 @@ #include "modules/include/module_common_types_public.h" #include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "test/call_test.h" #include "test/gtest.h" @@ -46,7 +47,7 @@ void RtpRtcpEndToEndTest::RespectsRtcpMode(RtcpMode rtcp_mode) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (++sent_rtp_ % 3 == 0) return DROP_PACKET; @@ -54,7 +55,7 @@ void RtpRtcpEndToEndTest::RespectsRtcpMode(RtcpMode rtcp_mode) { } Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++sent_rtcp_; test::RtcpPacketParser parser; EXPECT_TRUE(parser.Parse(packet, length)); @@ -105,11 +106,11 @@ void RtpRtcpEndToEndTest::RespectsRtcpMode(RtcpMode rtcp_mode) { } RtcpMode rtcp_mode_; - rtc::CriticalSection crit_; + Mutex mutex_; // Must be protected since RTCP can be sent by both the process thread // and the pacer thread. - int sent_rtp_ RTC_GUARDED_BY(&crit_); - int sent_rtcp_ RTC_GUARDED_BY(&crit_); + int sent_rtp_ RTC_GUARDED_BY(&mutex_); + int sent_rtcp_ RTC_GUARDED_BY(&mutex_); } test(rtcp_mode); RunBaseTest(&test); @@ -176,7 +177,7 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation( } void ResetExpectedSsrcs(size_t num_expected_ssrcs) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ssrc_observed_.clear(); ssrcs_to_observe_ = num_expected_ssrcs; } @@ -185,7 +186,7 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation( void ValidateTimestampGap(uint32_t ssrc, uint32_t timestamp, bool only_padding) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) { + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { static const int32_t kMaxTimestampGap = kDefaultTimeoutMs * 90; auto timestamp_it = last_observed_timestamp_.find(ssrc); if (timestamp_it == last_observed_timestamp_.end()) { @@ -240,7 +241,7 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation( } if (!ssrc_is_rtx_[ssrc]) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ValidateTimestampGap(ssrc, timestamp, only_padding); // Wait for media packets on all ssrcs. @@ -261,7 +262,7 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation( uint32_t ssrc = rtcp_parser.sender_report()->sender_ssrc(); uint32_t rtcp_timestamp = rtcp_parser.sender_report()->rtp_timestamp(); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ValidateTimestampGap(ssrc, rtcp_timestamp, false); } return SEND_PACKET; @@ -272,9 +273,9 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation( std::map last_observed_timestamp_; std::map ssrc_is_rtx_; - rtc::CriticalSection crit_; - size_t ssrcs_to_observe_ RTC_GUARDED_BY(crit_); - std::map ssrc_observed_ RTC_GUARDED_BY(crit_); + Mutex mutex_; + size_t ssrcs_to_observe_ RTC_GUARDED_BY(mutex_); + std::map ssrc_observed_ RTC_GUARDED_BY(mutex_); } observer(use_rtx); std::unique_ptr send_transport; @@ -315,7 +316,7 @@ void RtpRtcpEndToEndTest::TestRtpStatePreservation( } GetVideoEncoderConfig()->video_stream_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); // Use the same total bitrates when sending a single stream to avoid // lowering the bitrate estimate and requiring a subsequent rampup. one_stream = GetVideoEncoderConfig()->Copy(); @@ -414,13 +415,13 @@ TEST_F(RtpRtcpEndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) { num_flexfec_packets_sent_(0) {} void ResetPacketCount() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); num_flexfec_packets_sent_ = 0; } private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); @@ -468,10 +469,10 @@ TEST_F(RtpRtcpEndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) { } absl::optional last_observed_sequence_number_ - RTC_GUARDED_BY(crit_); - absl::optional last_observed_timestamp_ RTC_GUARDED_BY(crit_); - size_t num_flexfec_packets_sent_ RTC_GUARDED_BY(crit_); - rtc::CriticalSection crit_; + RTC_GUARDED_BY(mutex_); + absl::optional last_observed_timestamp_ RTC_GUARDED_BY(mutex_); + size_t num_flexfec_packets_sent_ RTC_GUARDED_BY(mutex_); + Mutex mutex_; } observer; static constexpr int kFrameMaxWidth = 320; @@ -536,12 +537,13 @@ TEST_F(RtpRtcpEndToEndTest, DISABLED_TestFlexfecRtpStatePreservation) { receive_transport.get()); flexfec_receive_config.payload_type = GetVideoSendConfig()->rtp.flexfec.payload_type; - flexfec_receive_config.remote_ssrc = GetVideoSendConfig()->rtp.flexfec.ssrc; + flexfec_receive_config.rtp.remote_ssrc = + GetVideoSendConfig()->rtp.flexfec.ssrc; flexfec_receive_config.protected_media_ssrcs = GetVideoSendConfig()->rtp.flexfec.protected_media_ssrcs; - flexfec_receive_config.local_ssrc = kReceiverLocalVideoSsrc; - flexfec_receive_config.transport_cc = true; - flexfec_receive_config.rtp_header_extensions.emplace_back( + flexfec_receive_config.rtp.local_ssrc = kReceiverLocalVideoSsrc; + flexfec_receive_config.rtp.transport_cc = true; + flexfec_receive_config.rtp.extensions.emplace_back( RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberExtensionId); flexfec_receive_configs_.push_back(flexfec_receive_config); diff --git a/video/end_to_end_tests/ssrc_tests.cc b/video/end_to_end_tests/ssrc_tests.cc index 3c7db803c0..bdca05d647 100644 --- a/video/end_to_end_tests/ssrc_tests.cc +++ b/video/end_to_end_tests/ssrc_tests.cc @@ -14,6 +14,7 @@ #include "call/fake_network_pipe.h" #include "call/simulated_network.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/task_queue_for_test.h" #include "test/call_test.h" #include "test/gtest.h" @@ -60,7 +61,7 @@ TEST_F(SsrcEndToEndTest, UnknownRtpPacketGivesUnknownSsrcReturnCode) { DeliveryStatus DeliverPacket(MediaType media_type, rtc::CopyOnWriteBuffer packet, int64_t packet_time_us) override { - if (RtpHeaderParser::IsRtcp(packet.cdata(), packet.size())) { + if (IsRtcpPacket(packet)) { return receiver_->DeliverPacket(media_type, std::move(packet), packet_time_us); } @@ -132,13 +133,15 @@ void SsrcEndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs, public: SendsSetSsrcs(const uint32_t* ssrcs, size_t num_ssrcs, - bool send_single_ssrc_first) + bool send_single_ssrc_first, + TaskQueueBase* task_queue) : EndToEndTest(kDefaultTimeoutMs), num_ssrcs_(num_ssrcs), send_single_ssrc_first_(send_single_ssrc_first), ssrcs_to_observe_(num_ssrcs), expect_single_ssrc_(send_single_ssrc_first), - send_stream_(nullptr) { + send_stream_(nullptr), + task_queue_(task_queue) { for (size_t i = 0; i < num_ssrcs; ++i) valid_ssrcs_[ssrcs[i]] = true; } @@ -171,38 +174,17 @@ void SsrcEndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs, size_t GetNumVideoStreams() const override { return num_ssrcs_; } - // This test use other VideoStream settings than the the default settings - // implemented in DefaultVideoStreamFactory. Therefore this test implement - // its own VideoEncoderConfig::VideoStreamFactoryInterface which is created - // in ModifyVideoConfigs. - class VideoStreamFactory - : public VideoEncoderConfig::VideoStreamFactoryInterface { - public: - VideoStreamFactory() {} - - private: - std::vector CreateEncoderStreams( - int width, - int height, - const VideoEncoderConfig& encoder_config) override { - std::vector streams = - test::CreateVideoStreams(width, height, encoder_config); - // Set low simulcast bitrates to not have to wait for bandwidth ramp-up. - for (size_t i = 0; i < encoder_config.number_of_streams; ++i) { - streams[i].min_bitrate_bps = 10000; - streams[i].target_bitrate_bps = 15000; - streams[i].max_bitrate_bps = 20000; - } - return streams; - } - }; - void ModifyVideoConfigs( VideoSendStream::Config* send_config, std::vector* receive_configs, VideoEncoderConfig* encoder_config) override { - encoder_config->video_stream_factory = - new rtc::RefCountedObject(); + // Set low simulcast bitrates to not have to wait for bandwidth ramp-up. + encoder_config->max_bitrate_bps = 50000; + for (auto& layer : encoder_config->simulcast_layers) { + layer.min_bitrate_bps = 10000; + layer.target_bitrate_bps = 15000; + layer.max_bitrate_bps = 20000; + } video_encoder_config_all_streams_ = encoder_config->Copy(); if (send_single_ssrc_first_) encoder_config->number_of_streams = 1; @@ -221,8 +203,10 @@ void SsrcEndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs, if (send_single_ssrc_first_) { // Set full simulcast and continue with the rest of the SSRCs. - send_stream_->ReconfigureVideoEncoder( - std::move(video_encoder_config_all_streams_)); + SendTask(RTC_FROM_HERE, task_queue_, [&]() { + send_stream_->ReconfigureVideoEncoder( + std::move(video_encoder_config_all_streams_)); + }); EXPECT_TRUE(Wait()) << "Timed out while waiting on additional SSRCs."; } } @@ -239,7 +223,8 @@ void SsrcEndToEndTest::TestSendsSetSsrcs(size_t num_ssrcs, VideoSendStream* send_stream_; VideoEncoderConfig video_encoder_config_all_streams_; - } test(kVideoSendSsrcs, num_ssrcs, send_single_ssrc_first); + TaskQueueBase* task_queue_; + } test(kVideoSendSsrcs, num_ssrcs, send_single_ssrc_first, task_queue()); RunBaseTest(&test); } diff --git a/video/end_to_end_tests/stats_tests.cc b/video/end_to_end_tests/stats_tests.cc index 32bcedb9c8..54e7bcff1c 100644 --- a/video/end_to_end_tests/stats_tests.cc +++ b/video/end_to_end_tests/stats_tests.cc @@ -17,9 +17,10 @@ #include "api/test/video/function_video_encoder_factory.h" #include "call/fake_network_pipe.h" #include "call/simulated_network.h" -#include "modules/rtp_rtcp/source/rtp_utility.h" +#include "modules/rtp_rtcp/source/rtp_packet.h" #include "modules/video_coding/include/video_coding_defines.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "system_wrappers/include/metrics.h" #include "system_wrappers/include/sleep.h" @@ -70,12 +71,11 @@ TEST_F(StatsEndToEndTest, GetStats) { Action OnSendRtp(const uint8_t* packet, size_t length) override { // Drop every 25th packet => 4% loss. static const int kPacketLossFrac = 25; - RTPHeader header; - RtpUtility::RtpHeaderParser parser(packet, length); - if (parser.Parse(&header) && - expected_send_ssrcs_.find(header.ssrc) != + RtpPacket header; + if (header.Parse(packet, length) && + expected_send_ssrcs_.find(header.Ssrc()) != expected_send_ssrcs_.end() && - header.sequenceNumber % kPacketLossFrac == 0) { + header.SequenceNumber() % kPacketLossFrac == 0) { return DROP_PACKET; } check_stats_event_.Set(); @@ -142,8 +142,8 @@ TEST_F(StatsEndToEndTest, GetStats) { stats.rtcp_packet_type_counts.nack_requests != 0 || stats.rtcp_packet_type_counts.unique_nack_requests != 0; - assert(stats.current_payload_type == -1 || - stats.current_payload_type == kFakeVideoSendPayloadType); + RTC_DCHECK(stats.current_payload_type == -1 || + stats.current_payload_type == kFakeVideoSendPayloadType); receive_stats_filled_["IncomingPayloadType"] |= stats.current_payload_type == kFakeVideoSendPayloadType; } @@ -153,7 +153,10 @@ TEST_F(StatsEndToEndTest, GetStats) { bool CheckSendStats() { RTC_DCHECK(send_stream_); - VideoSendStream::Stats stats = send_stream_->GetStats(); + + VideoSendStream::Stats stats; + SendTask(RTC_FROM_HERE, task_queue_, + [&]() { stats = send_stream_->GetStats(); }); size_t expected_num_streams = kNumSimulcastStreams + expected_send_ssrcs_.size(); @@ -178,9 +181,7 @@ TEST_F(StatsEndToEndTest, GetStats) { const VideoSendStream::StreamStats& stream_stats = kv.second; send_stats_filled_[CompoundKey("StatisticsUpdated", kv.first)] |= - stream_stats.rtcp_stats.packets_lost != 0 || - stream_stats.rtcp_stats.extended_highest_sequence_number != 0 || - stream_stats.rtcp_stats.fraction_lost != 0; + stream_stats.report_block_data.has_value(); send_stats_filled_[CompoundKey("DataCountersUpdated", kv.first)] |= stream_stats.rtp_stats.fec.packets != 0 || @@ -479,7 +480,7 @@ TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) { if (video_frame.ntp_time_ms() > 0 && Clock::GetRealTimeClock()->CurrentNtpInMilliseconds() >= video_frame.ntp_time_ms()) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++num_frames_received_; } } @@ -493,7 +494,7 @@ TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) { bool MinNumberOfFramesReceived() const { // Have some room for frames with wrong content type during switch. const int kMinRequiredHistogramSamples = 200 + 50; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return num_frames_received_ > kMinRequiredHistogramSamples; } @@ -502,13 +503,13 @@ TEST_F(StatsEndToEndTest, MAYBE_ContentTypeSwitches) { EXPECT_TRUE(Wait()) << "Timed out waiting for enough packets."; // Reset frame counter so next PerformTest() call will do something. { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); num_frames_received_ = 0; } } - rtc::CriticalSection crit_; - int num_frames_received_ RTC_GUARDED_BY(&crit_); + mutable Mutex mutex_; + int num_frames_received_ RTC_GUARDED_BY(&mutex_); } test; metrics::Reset(); @@ -609,13 +610,11 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (++sent_rtp_packets_ == kPacketNumberToDrop) { - std::unique_ptr parser( - RtpHeaderParser::CreateForTest()); - RTPHeader header; - EXPECT_TRUE(parser->Parse(packet, length, &header)); - dropped_rtp_packet_ = header.sequenceNumber; + RtpPacket header; + EXPECT_TRUE(header.Parse(packet, length)); + dropped_rtp_packet_ = header.SequenceNumber(); return DROP_PACKET; } task_queue_->PostTask(std::unique_ptr(this)); @@ -623,7 +622,7 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) { } Action OnReceiveRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); test::RtcpPacketParser rtcp_parser; rtcp_parser.Parse(packet, length); const std::vector& nacks = rtcp_parser.nack()->packet_ids(); @@ -633,7 +632,7 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) { return SEND_PACKET; } - void VerifyStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_) { + void VerifyStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_) { if (!dropped_rtp_packet_requested_) return; int send_stream_nack_packets = 0; @@ -684,7 +683,7 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) { } bool Run() override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VerifyStats(); return false; } @@ -694,10 +693,10 @@ TEST_F(StatsEndToEndTest, VerifyNackStats) { } test::FakeVideoRenderer fake_renderer_; - rtc::CriticalSection crit_; + Mutex mutex_; uint64_t sent_rtp_packets_; - uint16_t dropped_rtp_packet_ RTC_GUARDED_BY(&crit_); - bool dropped_rtp_packet_requested_ RTC_GUARDED_BY(&crit_); + uint16_t dropped_rtp_packet_ RTC_GUARDED_BY(&mutex_); + bool dropped_rtp_packet_requested_ RTC_GUARDED_BY(&mutex_); std::vector receive_streams_; VideoSendStream* send_stream_; absl::optional start_runtime_ms_; diff --git a/video/end_to_end_tests/transport_feedback_tests.cc b/video/end_to_end_tests/transport_feedback_tests.cc index 4291bc4812..a675d784bc 100644 --- a/video/end_to_end_tests/transport_feedback_tests.cc +++ b/video/end_to_end_tests/transport_feedback_tests.cc @@ -18,6 +18,7 @@ #include "modules/rtp_rtcp/source/byte_io.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "rtc_base/synchronization/mutex.h" #include "test/call_test.h" #include "test/field_trial.h" #include "test/gtest.h" @@ -65,7 +66,7 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) { size_t length, const PacketOptions& options) override { { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (IsDone()) return false; @@ -141,14 +142,14 @@ TEST(TransportFeedbackMultiStreamTest, AssignsTransportSequenceNumbers) { { // Can't be sure until this point that rtx_to_media_ssrcs_ etc have // been initialized and are OK to read. - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); started_ = true; } return done_.Wait(kDefaultTimeoutMs); } private: - rtc::CriticalSection lock_; + Mutex lock_; rtc::Event done_; RtpHeaderExtensionMap extensions_; SequenceNumberUnwrapper unwrapper_; @@ -326,7 +327,6 @@ TEST_F(TransportFeedbackEndToEndTest, VideoTransportFeedbackNotConfigured) { } TEST_F(TransportFeedbackEndToEndTest, AudioReceivesTransportFeedback) { - test::ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); TransportFeedbackTester test(true, 0, 1); RunBaseTest(&test); } @@ -366,7 +366,7 @@ TEST_F(TransportFeedbackEndToEndTest, RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); const bool only_padding = rtp_packet.payload_size() == 0; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Padding is expected in congested state to probe for connectivity when // packets has been dropped. if (only_padding) { @@ -386,7 +386,7 @@ TEST_F(TransportFeedbackEndToEndTest, } Action OnReceiveRtcp(const uint8_t* data, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // To fill up the congestion window we drop feedback on packets after 20 // packets have been sent. This means that any packets that has not yet // received feedback after that will be considered as oustanding data and @@ -425,16 +425,15 @@ TEST_F(TransportFeedbackEndToEndTest, private: const size_t num_video_streams_; const size_t num_audio_streams_; - rtc::CriticalSection crit_; - int media_sent_ RTC_GUARDED_BY(crit_); - int media_sent_before_ RTC_GUARDED_BY(crit_); - int padding_sent_ RTC_GUARDED_BY(crit_); + Mutex mutex_; + int media_sent_ RTC_GUARDED_BY(mutex_); + int media_sent_before_ RTC_GUARDED_BY(mutex_); + int padding_sent_ RTC_GUARDED_BY(mutex_); } test(1, 0); RunBaseTest(&test); } TEST_F(TransportFeedbackEndToEndTest, TransportSeqNumOnAudioAndVideo) { - test::ScopedFieldTrials field_trials("WebRTC-Audio-SendSideBwe/Enabled/"); static constexpr size_t kMinPacketsToWaitFor = 50; class TransportSequenceNumberTest : public test::EndToEndTest { public: diff --git a/video/frame_dumping_decoder.cc b/video/frame_dumping_decoder.cc index 4ccb333081..59202dd03c 100644 --- a/video/frame_dumping_decoder.cc +++ b/video/frame_dumping_decoder.cc @@ -32,7 +32,7 @@ class FrameDumpingDecoder : public VideoDecoder { int32_t RegisterDecodeCompleteCallback( DecodedImageCallback* callback) override; int32_t Release() override; - bool PrefersLateDecoding() const override; + DecoderInfo GetDecoderInfo() const override; const char* ImplementationName() const override; private: @@ -73,8 +73,8 @@ int32_t FrameDumpingDecoder::Release() { return decoder_->Release(); } -bool FrameDumpingDecoder::PrefersLateDecoding() const { - return decoder_->PrefersLateDecoding(); +VideoDecoder::DecoderInfo FrameDumpingDecoder::GetDecoderInfo() const { + return decoder_->GetDecoderInfo(); } const char* FrameDumpingDecoder::ImplementationName() const { diff --git a/video/frame_encode_metadata_writer.cc b/video/frame_encode_metadata_writer.cc index e5f55575ec..8a0f3b3867 100644 --- a/video/frame_encode_metadata_writer.cc +++ b/video/frame_encode_metadata_writer.cc @@ -11,7 +11,6 @@ #include "video/frame_encode_metadata_writer.h" #include -#include #include #include "common_video/h264/sps_vui_rewriter.h" @@ -60,7 +59,7 @@ FrameEncodeMetadataWriter::~FrameEncodeMetadataWriter() {} void FrameEncodeMetadataWriter::OnEncoderInit(const VideoCodec& codec, bool internal_source) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); codec_settings_ = codec; internal_source_ = internal_source; } @@ -68,7 +67,7 @@ void FrameEncodeMetadataWriter::OnEncoderInit(const VideoCodec& codec, void FrameEncodeMetadataWriter::OnSetRates( const VideoBitrateAllocation& bitrate_allocation, uint32_t framerate_fps) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); framerate_fps_ = framerate_fps; const size_t num_spatial_layers = NumSpatialLayers(); if (timing_frames_info_.size() < num_spatial_layers) { @@ -81,7 +80,7 @@ void FrameEncodeMetadataWriter::OnSetRates( } void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (internal_source_) { return; } @@ -128,7 +127,7 @@ void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) { void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); absl::optional outlier_frame_size; absl::optional encode_start_ms; uint8_t timing_flags = VideoSendTiming::kNotTriggered; @@ -202,40 +201,28 @@ void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx, } } -std::unique_ptr -FrameEncodeMetadataWriter::UpdateBitstream( +void FrameEncodeMetadataWriter::UpdateBitstream( const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation, EncodedImage* encoded_image) { if (!codec_specific_info || - codec_specific_info->codecType != kVideoCodecH264 || !fragmentation || + codec_specific_info->codecType != kVideoCodecH264 || encoded_image->_frameType != VideoFrameType::kVideoFrameKey) { - return nullptr; + return; } - rtc::Buffer modified_buffer; - std::unique_ptr modified_fragmentation = - std::make_unique(); - modified_fragmentation->CopyFrom(*fragmentation); - // Make sure that the data is not copied if owned by EncodedImage. const EncodedImage& buffer = *encoded_image; - SpsVuiRewriter::ParseOutgoingBitstreamAndRewriteSps( - buffer, fragmentation->fragmentationVectorSize, - fragmentation->fragmentationOffset, fragmentation->fragmentationLength, - encoded_image->ColorSpace(), &modified_buffer, - modified_fragmentation->fragmentationOffset, - modified_fragmentation->fragmentationLength); + rtc::Buffer modified_buffer = + SpsVuiRewriter::ParseOutgoingBitstreamAndRewrite( + buffer, encoded_image->ColorSpace()); encoded_image->SetEncodedData( - new rtc::RefCountedObject( + rtc::make_ref_counted( std::move(modified_buffer))); - - return modified_fragmentation; } void FrameEncodeMetadataWriter::Reset() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); for (auto& info : timing_frames_info_) { info.frames.clear(); } diff --git a/video/frame_encode_metadata_writer.h b/video/frame_encode_metadata_writer.h index 4ee2d7eec7..88471459c0 100644 --- a/video/frame_encode_metadata_writer.h +++ b/video/frame_encode_metadata_writer.h @@ -12,7 +12,6 @@ #define VIDEO_FRAME_ENCODE_METADATA_WRITER_H_ #include -#include #include #include "absl/types/optional.h" @@ -20,7 +19,7 @@ #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -37,10 +36,8 @@ class FrameEncodeMetadataWriter { void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image); - std::unique_ptr UpdateBitstream( - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation, - EncodedImage* encoded_image); + void UpdateBitstream(const CodecSpecificInfo* codec_specific_info, + EncodedImage* encoded_image); void Reset(); @@ -69,7 +66,7 @@ class FrameEncodeMetadataWriter { std::list frames; }; - rtc::CriticalSection lock_; + Mutex lock_; EncodedImageCallback* const frame_drop_callback_; VideoCodec codec_settings_ RTC_GUARDED_BY(&lock_); bool internal_source_ RTC_GUARDED_BY(&lock_); diff --git a/video/frame_encode_metadata_writer_unittest.cc b/video/frame_encode_metadata_writer_unittest.cc index 2f7459943f..da54c3307b 100644 --- a/video/frame_encode_metadata_writer_unittest.cc +++ b/video/frame_encode_metadata_writer_unittest.cc @@ -40,8 +40,7 @@ class FakeEncodedImageCallback : public EncodedImageCallback { public: FakeEncodedImageCallback() : num_frames_dropped_(0) {} Result OnEncodedImage(const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { return Result(Result::OK); } void OnDroppedFrame(DropReason reason) override { ++num_frames_dropped_; } @@ -462,83 +461,55 @@ TEST(FrameEncodeMetadataWriterTest, CopiesPacketInfos) { TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteBitstreamWithoutCodecInfo) { uint8_t buffer[] = {1, 2, 3}; - EncodedImage image(buffer, sizeof(buffer), sizeof(buffer)); - const RTPFragmentationHeader fragmentation; + auto image_buffer = EncodedImageBuffer::Create(buffer, sizeof(buffer)); + EncodedImage image; + image.SetEncodedData(image_buffer); FakeEncodedImageCallback sink; FrameEncodeMetadataWriter encode_metadata_writer(&sink); - EXPECT_EQ( - encode_metadata_writer.UpdateBitstream(nullptr, &fragmentation, &image), - nullptr); - EXPECT_EQ(image.data(), buffer); + encode_metadata_writer.UpdateBitstream(nullptr, &image); + EXPECT_EQ(image.GetEncodedData(), image_buffer); EXPECT_EQ(image.size(), sizeof(buffer)); } TEST(FrameEncodeMetadataWriterTest, DoesNotRewriteVp8Bitstream) { uint8_t buffer[] = {1, 2, 3}; - EncodedImage image(buffer, sizeof(buffer), sizeof(buffer)); + auto image_buffer = EncodedImageBuffer::Create(buffer, sizeof(buffer)); + EncodedImage image; + image.SetEncodedData(image_buffer); CodecSpecificInfo codec_specific_info; codec_specific_info.codecType = kVideoCodecVP8; - const RTPFragmentationHeader fragmentation; - - FakeEncodedImageCallback sink; - FrameEncodeMetadataWriter encode_metadata_writer(&sink); - EXPECT_EQ(encode_metadata_writer.UpdateBitstream(&codec_specific_info, - &fragmentation, &image), - nullptr); - EXPECT_EQ(image.data(), buffer); - EXPECT_EQ(image.size(), sizeof(buffer)); -} - -TEST(FrameEncodeMetadataWriterTest, - DoesNotRewriteH264BitstreamWithoutFragmentation) { - uint8_t buffer[] = {1, 2, 3}; - EncodedImage image(buffer, sizeof(buffer), sizeof(buffer)); - CodecSpecificInfo codec_specific_info; - codec_specific_info.codecType = kVideoCodecH264; FakeEncodedImageCallback sink; FrameEncodeMetadataWriter encode_metadata_writer(&sink); - EXPECT_EQ(encode_metadata_writer.UpdateBitstream(&codec_specific_info, - nullptr, &image), - nullptr); - EXPECT_EQ(image.data(), buffer); + encode_metadata_writer.UpdateBitstream(&codec_specific_info, &image); + EXPECT_EQ(image.GetEncodedData(), image_buffer); EXPECT_EQ(image.size(), sizeof(buffer)); } TEST(FrameEncodeMetadataWriterTest, RewritesH264BitstreamWithNonOptimalSps) { - uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps, - 0x00, 0x00, 0x03, 0x03, 0xF4, - 0x05, 0x03, 0xC7, 0xC0}; + const uint8_t kOriginalSps[] = {0, 0, 0, 1, H264::NaluType::kSps, + 0x00, 0x00, 0x03, 0x03, 0xF4, + 0x05, 0x03, 0xC7, 0xC0}; const uint8_t kRewrittenSps[] = {0, 0, 0, 1, H264::NaluType::kSps, 0x00, 0x00, 0x03, 0x03, 0xF4, 0x05, 0x03, 0xC7, 0xE0, 0x1B, 0x41, 0x10, 0x8D, 0x00}; - EncodedImage image(original_sps, sizeof(original_sps), sizeof(original_sps)); + EncodedImage image; + image.SetEncodedData( + EncodedImageBuffer::Create(kOriginalSps, sizeof(kOriginalSps))); image._frameType = VideoFrameType::kVideoFrameKey; CodecSpecificInfo codec_specific_info; codec_specific_info.codecType = kVideoCodecH264; - RTPFragmentationHeader fragmentation; - fragmentation.VerifyAndAllocateFragmentationHeader(1); - fragmentation.fragmentationOffset[0] = 4; - fragmentation.fragmentationLength[0] = sizeof(original_sps) - 4; - FakeEncodedImageCallback sink; FrameEncodeMetadataWriter encode_metadata_writer(&sink); - std::unique_ptr modified_fragmentation = - encode_metadata_writer.UpdateBitstream(&codec_specific_info, - &fragmentation, &image); + encode_metadata_writer.UpdateBitstream(&codec_specific_info, &image); - ASSERT_NE(modified_fragmentation, nullptr); EXPECT_THAT(std::vector(image.data(), image.data() + image.size()), testing::ElementsAreArray(kRewrittenSps)); - ASSERT_THAT(modified_fragmentation->fragmentationVectorSize, 1U); - EXPECT_EQ(modified_fragmentation->fragmentationOffset[0], 4U); - EXPECT_EQ(modified_fragmentation->fragmentationLength[0], - sizeof(kRewrittenSps) - 4); } } // namespace test diff --git a/video/full_stack_tests.cc b/video/full_stack_tests.cc index 7307b462b7..3831fdfcef 100644 --- a/video/full_stack_tests.cc +++ b/video/full_stack_tests.cc @@ -21,7 +21,7 @@ #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder_config.h" -#include "media/base/vp9_profile.h" +#include "api/video_codecs/vp9_profile.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "system_wrappers/include/field_trial.h" #include "test/field_trial.h" @@ -48,8 +48,6 @@ namespace webrtc { namespace { static const int kFullStackTestDurationSecs = 45; -const char kVp8TrustedRateControllerFieldTrial[] = - "WebRTC-LibvpxVp8TrustedRateController/Enabled/"; struct ParamsWithLogging : public VideoQualityTest::Params { public: @@ -90,28 +88,8 @@ std::string ClipNameToClipPath(const char* clip_name) { // logs // bool // }; -class GenericDescriptorTest : public ::testing::TestWithParam { - public: - GenericDescriptorTest() - : field_trial_(AppendFieldTrials(GetParam())), - generic_descriptor_enabled_( - field_trial::IsEnabled("WebRTC-GenericDescriptor")) {} - - std::string GetTestName(std::string base) { - if (generic_descriptor_enabled_) - base += "_generic_descriptor"; - return base; - } - - bool GenericDescriptorEnabled() const { return generic_descriptor_enabled_; } - - private: - test::ScopedFieldTrials field_trial_; - bool generic_descriptor_enabled_; -}; - #if defined(RTC_ENABLE_VP9) -TEST(FullStackTest, ForemanCifWithoutPacketLossVp9) { +TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_VP9) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -125,7 +103,8 @@ TEST(FullStackTest, ForemanCifWithoutPacketLossVp9) { fixture->RunWithAnalyzer(foreman_cif); } -TEST_P(GenericDescriptorTest, ForemanCifPlr5Vp9) { +TEST(GenericDescriptorTest, + Foreman_Cif_Delay_50_0_Plr_5_VP9_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -134,15 +113,15 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5Vp9) { 30000, 500000, 2000000, false, "VP9", 1, 0, 0, false, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5_VP9"), 0.0, - 0.0, kFullStackTestDurationSecs}; + foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_VP9_generic_descriptor", + 0.0, 0.0, kFullStackTestDurationSecs}; foreman_cif.config->loss_percent = 5; foreman_cif.config->queue_delay_ms = 50; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, GeneratorWithoutPacketLossVp9Profile2) { +TEST(FullStackTest, Generator_Net_Delay_0_0_Plr_0_VP9Profile2) { // Profile 2 might not be available on some platforms until // https://bugs.chromium.org/p/webm/issues/detail?id=1544 is solved. bool profile_2_is_supported = false; @@ -168,7 +147,7 @@ TEST(FullStackTest, GeneratorWithoutPacketLossVp9Profile2) { fixture->RunWithAnalyzer(generator); } -TEST(FullStackTest, ForemanCifWithoutPacketLossMultiplexI420Frame) { +TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_Multiplex) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -182,7 +161,7 @@ TEST(FullStackTest, ForemanCifWithoutPacketLossMultiplexI420Frame) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, GeneratorWithoutPacketLossMultiplexI420AFrame) { +TEST(FullStackTest, Generator_Net_Delay_0_0_Plr_0_Multiplex) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging generator; @@ -199,11 +178,11 @@ TEST(FullStackTest, GeneratorWithoutPacketLossMultiplexI420AFrame) { #if defined(WEBRTC_LINUX) // Crashes on the linux trusty perf bot: bugs.webrtc.org/9129. -#define MAYBE_ParisQcifWithoutPacketLoss DISABLED_ParisQcifWithoutPacketLoss +#define MAYBE_Net_Delay_0_0_Plr_0 DISABLED_Net_Delay_0_0_Plr_0 #else -#define MAYBE_ParisQcifWithoutPacketLoss ParisQcifWithoutPacketLoss +#define MAYBE_Net_Delay_0_0_Plr_0 Net_Delay_0_0_Plr_0 #endif -TEST(FullStackTest, MAYBE_ParisQcifWithoutPacketLoss) { +TEST(FullStackTest, MAYBE_Net_Delay_0_0_Plr_0) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging paris_qcif; paris_qcif.call.send_side_bwe = true; @@ -217,7 +196,8 @@ TEST(FullStackTest, MAYBE_ParisQcifWithoutPacketLoss) { fixture->RunWithAnalyzer(paris_qcif); } -TEST_P(GenericDescriptorTest, ForemanCifWithoutPacketLoss) { +TEST(GenericDescriptorTest, + Foreman_Cif_Net_Delay_0_0_Plr_0_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif. ParamsWithLogging foreman_cif; @@ -227,34 +207,15 @@ TEST_P(GenericDescriptorTest, ForemanCifWithoutPacketLoss) { 700000, 700000, 700000, false, "VP8", 1, 0, 0, false, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_net_delay_0_0_plr_0"), 0.0, - 0.0, kFullStackTestDurationSecs}; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); - fixture->RunWithAnalyzer(foreman_cif); -} - -TEST_P(GenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) { - auto fixture = CreateVideoQualityTestFixture(); - ParamsWithLogging foreman_cif; - foreman_cif.call.send_side_bwe = true; - foreman_cif.video[0] = { - true, 352, 288, 10, - 30000, 30000, 30000, false, - "VP8", 1, 0, 0, - false, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_30kbps_net_delay_0_0_plr_0"), + foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_generic_descriptor", 0.0, 0.0, kFullStackTestDurationSecs}; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } -// TODO(webrtc:9722): Remove when experiment is cleaned up. -TEST_P(GenericDescriptorTest, - ForemanCif30kbpsWithoutPacketLossTrustedRateControl) { - test::ScopedFieldTrials override_field_trials( - AppendFieldTrials(kVp8TrustedRateControllerFieldTrial)); +TEST(GenericDescriptorTest, + Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); - ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; foreman_cif.video[0] = { @@ -263,14 +224,14 @@ TEST_P(GenericDescriptorTest, "VP8", 1, 0, 0, false, false, true, ClipNameToClipPath("foreman_cif")}; foreman_cif.analyzer = { - GetTestName("foreman_cif_30kbps_net_delay_0_0_plr_0_trusted_rate_ctrl"), - 0.0, 0.0, kFullStackTestDurationSecs}; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + "foreman_cif_30kbps_net_delay_0_0_plr_0_generic_descriptor", 0.0, 0.0, + kFullStackTestDurationSecs}; + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } // Link capacity below default start rate. -TEST(FullStackTest, ForemanCifLink150kbpsWithoutPacketLoss) { +TEST(FullStackTest, Foreman_Cif_Link_150kbps_Net_Delay_0_0_Plr_0) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -286,7 +247,8 @@ TEST(FullStackTest, ForemanCifLink150kbpsWithoutPacketLoss) { } // Restricted network and encoder overproducing by 30%. -TEST(FullStackTest, ForemanCifLink150kbpsBadRateController) { +TEST(FullStackTest, + Foreman_Cif_Link_150kbps_Delay100ms_30pkts_Queue_Overshoot30) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -309,7 +271,7 @@ TEST(FullStackTest, ForemanCifLink150kbpsBadRateController) { // Packet rate and loss are low enough that loss will happen with ~3s interval. // This triggers protection overhead to toggle between zero and non-zero. // Link queue is restrictive enough to trigger loss on probes. -TEST(FullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) { +TEST(FullStackTest, Foreman_Cif_Link_250kbps_Delay100ms_10pkts_Loss1) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -328,7 +290,7 @@ TEST(FullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) { fixture->RunWithAnalyzer(foreman_cif); } -TEST_P(GenericDescriptorTest, ForemanCifPlr5) { +TEST(GenericDescriptorTest, Foreman_Cif_Delay_50_0_Plr_5_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -337,15 +299,16 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5) { 30000, 500000, 2000000, false, "VP8", 1, 0, 0, false, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5"), 0.0, 0.0, - kFullStackTestDurationSecs}; + foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_generic_descriptor", + 0.0, 0.0, kFullStackTestDurationSecs}; foreman_cif.config->loss_percent = 5; foreman_cif.config->queue_delay_ms = 50; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } -TEST_P(GenericDescriptorTest, ForemanCifPlr5Ulpfec) { +TEST(GenericDescriptorTest, + Foreman_Cif_Delay_50_0_Plr_5_Ulpfec_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -354,15 +317,16 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5Ulpfec) { 30000, 500000, 2000000, false, "VP8", 1, 0, 0, true, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5_ulpfec"), - 0.0, 0.0, kFullStackTestDurationSecs}; + foreman_cif.analyzer = { + "foreman_cif_delay_50_0_plr_5_ulpfec_generic_descriptor", 0.0, 0.0, + kFullStackTestDurationSecs}; foreman_cif.config->loss_percent = 5; foreman_cif.config->queue_delay_ms = 50; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCifPlr5Flexfec) { +TEST(FullStackTest, Foreman_Cif_Delay_50_0_Plr_5_Flexfec) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -378,7 +342,7 @@ TEST(FullStackTest, ForemanCifPlr5Flexfec) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif500kbpsPlr3Flexfec) { +TEST(FullStackTest, Foreman_Cif_500kbps_Delay_50_0_Plr_3_Flexfec) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -395,7 +359,7 @@ TEST(FullStackTest, ForemanCif500kbpsPlr3Flexfec) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif500kbpsPlr3Ulpfec) { +TEST(FullStackTest, Foreman_Cif_500kbps_Delay_50_0_Plr_3_Ulpfec) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -413,7 +377,7 @@ TEST(FullStackTest, ForemanCif500kbpsPlr3Ulpfec) { } #if defined(WEBRTC_USE_H264) -TEST(FullStackTest, ForemanCifWithoutPacketlossH264) { +TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_H264) { auto fixture = CreateVideoQualityTestFixture(); // TODO(pbos): Decide on psnr/ssim thresholds for foreman_cif. ParamsWithLogging foreman_cif; @@ -428,7 +392,7 @@ TEST(FullStackTest, ForemanCifWithoutPacketlossH264) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif30kbpsWithoutPacketlossH264) { +TEST(FullStackTest, Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_H264) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -442,7 +406,8 @@ TEST(FullStackTest, ForemanCif30kbpsWithoutPacketlossH264) { fixture->RunWithAnalyzer(foreman_cif); } -TEST_P(GenericDescriptorTest, ForemanCifPlr5H264) { +TEST(GenericDescriptorTest, + Foreman_Cif_Delay_50_0_Plr_5_H264_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -451,15 +416,16 @@ TEST_P(GenericDescriptorTest, ForemanCifPlr5H264) { 30000, 500000, 2000000, false, "H264", 1, 0, 0, false, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_delay_50_0_plr_5_H264"), 0.0, - 0.0, kFullStackTestDurationSecs}; + foreman_cif.analyzer = { + "foreman_cif_delay_50_0_plr_5_H264_generic_descriptor", 0.0, 0.0, + kFullStackTestDurationSecs}; foreman_cif.config->loss_percent = 5; foreman_cif.config->queue_delay_ms = 50; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) { +TEST(FullStackTest, Foreman_Cif_Delay_50_0_Plr_5_H264_Sps_Pps_Idr) { test::ScopedFieldTrials override_field_trials( AppendFieldTrials("WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); @@ -479,7 +445,7 @@ TEST(FullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) { } // Verify that this is worth the bot time, before enabling. -TEST(FullStackTest, ForemanCifPlr5H264Flexfec) { +TEST(FullStackTest, Foreman_Cif_Delay_50_0_Plr_5_H264_Flexfec) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -497,7 +463,7 @@ TEST(FullStackTest, ForemanCifPlr5H264Flexfec) { // Ulpfec with H264 is an unsupported combination, so this test is only useful // for debugging. It is therefore disabled by default. -TEST(FullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) { +TEST(FullStackTest, DISABLED_Foreman_Cif_Delay_50_0_Plr_5_H264_Ulpfec) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -514,7 +480,7 @@ TEST(FullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) { } #endif // defined(WEBRTC_USE_H264) -TEST(FullStackTest, ForemanCif500kbps) { +TEST(FullStackTest, Foreman_Cif_500kbps) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -531,7 +497,7 @@ TEST(FullStackTest, ForemanCif500kbps) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif500kbpsLimitedQueue) { +TEST(FullStackTest, Foreman_Cif_500kbps_32pkts_Queue) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -548,7 +514,7 @@ TEST(FullStackTest, ForemanCif500kbpsLimitedQueue) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif500kbps100ms) { +TEST(FullStackTest, Foreman_Cif_500kbps_100ms) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -565,7 +531,8 @@ TEST(FullStackTest, ForemanCif500kbps100ms) { fixture->RunWithAnalyzer(foreman_cif); } -TEST_P(GenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) { +TEST(GenericDescriptorTest, + Foreman_Cif_500kbps_100ms_32pkts_Queue_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -574,16 +541,17 @@ TEST_P(GenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) { 30000, 500000, 2000000, false, "VP8", 1, 0, 0, false, false, true, ClipNameToClipPath("foreman_cif")}; - foreman_cif.analyzer = {GetTestName("foreman_cif_500kbps_100ms_32pkts_queue"), - 0.0, 0.0, kFullStackTestDurationSecs}; + foreman_cif.analyzer = { + "foreman_cif_500kbps_100ms_32pkts_queue_generic_descriptor", 0.0, 0.0, + kFullStackTestDurationSecs}; foreman_cif.config->queue_length_packets = 32; foreman_cif.config->queue_delay_ms = 100; foreman_cif.config->link_capacity_kbps = 500; - foreman_cif.call.generic_descriptor = GenericDescriptorEnabled(); + foreman_cif.call.generic_descriptor = true; fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif500kbps100msLimitedQueueRecvBwe) { +TEST(FullStackTest, Foreman_Cif_500kbps_100ms_32pkts_Queue_Recv_Bwe) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = false; @@ -600,7 +568,7 @@ TEST(FullStackTest, ForemanCif500kbps100msLimitedQueueRecvBwe) { fixture->RunWithAnalyzer(foreman_cif); } -TEST(FullStackTest, ForemanCif1000kbps100msLimitedQueue) { +TEST(FullStackTest, Foreman_Cif_1000kbps_100ms_32pkts_Queue) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging foreman_cif; foreman_cif.call.send_side_bwe = true; @@ -618,7 +586,7 @@ TEST(FullStackTest, ForemanCif1000kbps100msLimitedQueue) { } // TODO(sprang): Remove this if we have the similar ModerateLimits below? -TEST(FullStackTest, ConferenceMotionHd2000kbps100msLimitedQueue) { +TEST(FullStackTest, Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging conf_motion_hd; conf_motion_hd.call.send_side_bwe = true; @@ -639,34 +607,8 @@ TEST(FullStackTest, ConferenceMotionHd2000kbps100msLimitedQueue) { fixture->RunWithAnalyzer(conf_motion_hd); } -// TODO(webrtc:9722): Remove when experiment is cleaned up. -TEST(FullStackTest, ConferenceMotionHd1TLModerateLimitsWhitelistVp8) { - test::ScopedFieldTrials override_field_trials( - AppendFieldTrials(kVp8TrustedRateControllerFieldTrial)); - auto fixture = CreateVideoQualityTestFixture(); - - ParamsWithLogging conf_motion_hd; - conf_motion_hd.call.send_side_bwe = true; - conf_motion_hd.video[0] = { - true, 1280, - 720, 50, - 30000, 3000000, - 3000000, false, - "VP8", 1, - -1, 0, - false, false, - false, ClipNameToClipPath("ConferenceMotion_1280_720_50")}; - conf_motion_hd.analyzer = { - "conference_motion_hd_1tl_moderate_limits_trusted_rate_ctrl", 0.0, 0.0, - kFullStackTestDurationSecs}; - conf_motion_hd.config->queue_length_packets = 50; - conf_motion_hd.config->loss_percent = 3; - conf_motion_hd.config->queue_delay_ms = 100; - conf_motion_hd.config->link_capacity_kbps = 2000; - fixture->RunWithAnalyzer(conf_motion_hd); -} - -TEST_P(GenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) { +TEST(GenericDescriptorTest, + Conference_Motion_Hd_2tl_Moderate_Limits_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging conf_motion_hd; conf_motion_hd.call.send_side_bwe = true; @@ -680,17 +622,17 @@ TEST_P(GenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) { false, false, false, ClipNameToClipPath("ConferenceMotion_1280_720_50")}; conf_motion_hd.analyzer = { - GetTestName("conference_motion_hd_2tl_moderate_limits"), 0.0, 0.0, + "conference_motion_hd_2tl_moderate_limits_generic_descriptor", 0.0, 0.0, kFullStackTestDurationSecs}; conf_motion_hd.config->queue_length_packets = 50; conf_motion_hd.config->loss_percent = 3; conf_motion_hd.config->queue_delay_ms = 100; conf_motion_hd.config->link_capacity_kbps = 2000; - conf_motion_hd.call.generic_descriptor = GenericDescriptorEnabled(); + conf_motion_hd.call.generic_descriptor = true; fixture->RunWithAnalyzer(conf_motion_hd); } -TEST(FullStackTest, ConferenceMotionHd3TLModerateLimits) { +TEST(FullStackTest, Conference_Motion_Hd_3tl_Moderate_Limits) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging conf_motion_hd; conf_motion_hd.call.send_side_bwe = true; @@ -712,7 +654,7 @@ TEST(FullStackTest, ConferenceMotionHd3TLModerateLimits) { fixture->RunWithAnalyzer(conf_motion_hd); } -TEST(FullStackTest, ConferenceMotionHd4TLModerateLimits) { +TEST(FullStackTest, Conference_Motion_Hd_4tl_Moderate_Limits) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging conf_motion_hd; conf_motion_hd.call.send_side_bwe = true; @@ -734,7 +676,7 @@ TEST(FullStackTest, ConferenceMotionHd4TLModerateLimits) { fixture->RunWithAnalyzer(conf_motion_hd); } -TEST(FullStackTest, ConferenceMotionHd3TLModerateLimitsAltTLPattern) { +TEST(FullStackTest, Conference_Motion_Hd_3tl_Alt_Moderate_Limits) { test::ScopedFieldTrials field_trial( AppendFieldTrials("WebRTC-UseShortVP8TL3Pattern/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); @@ -758,8 +700,7 @@ TEST(FullStackTest, ConferenceMotionHd3TLModerateLimitsAltTLPattern) { fixture->RunWithAnalyzer(conf_motion_hd); } -TEST(FullStackTest, - ConferenceMotionHd3TLModerateLimitsAltTLPatternAndBaseHeavyTLAllocation) { +TEST(FullStackTest, Conference_Motion_Hd_3tl_Alt_Heavy_Moderate_Limits) { auto fixture = CreateVideoQualityTestFixture(); test::ScopedFieldTrials field_trial( AppendFieldTrials("WebRTC-UseShortVP8TL3Pattern/Enabled/" @@ -786,7 +727,7 @@ TEST(FullStackTest, } #if defined(RTC_ENABLE_VP9) -TEST(FullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) { +TEST(FullStackTest, Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue_Vp9) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging conf_motion_hd; conf_motion_hd.call.send_side_bwe = true; @@ -809,7 +750,7 @@ TEST(FullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) { } #endif -TEST(FullStackTest, ScreenshareSlidesVP8_2TL) { +TEST(FullStackTest, Screenshare_Slides) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -824,7 +765,7 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL) { #if !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN) // TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac. -TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) { +TEST(FullStackTest, Screenshare_Slides_Simulcast) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -854,7 +795,7 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) { #endif // !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN) -TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Scroll) { +TEST(FullStackTest, Screenshare_Slides_Scrolling) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging config; config.call.send_side_bwe = true; @@ -867,7 +808,7 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL_Scroll) { fixture->RunWithAnalyzer(config); } -TEST_P(GenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) { +TEST(GenericDescriptorTest, Screenshare_Slides_Lossy_Net_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -875,16 +816,16 @@ TEST_P(GenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) { 1000000, false, "VP8", 2, 1, 400000, false, false, false, ""}; screenshare.screenshare[0] = {true, false, 10}; - screenshare.analyzer = {GetTestName("screenshare_slides_lossy_net"), 0.0, 0.0, - kFullStackTestDurationSecs}; + screenshare.analyzer = {"screenshare_slides_lossy_net_generic_descriptor", + 0.0, 0.0, kFullStackTestDurationSecs}; screenshare.config->loss_percent = 5; screenshare.config->queue_delay_ms = 200; screenshare.config->link_capacity_kbps = 500; - screenshare.call.generic_descriptor = GenericDescriptorEnabled(); + screenshare.call.generic_descriptor = true; fixture->RunWithAnalyzer(screenshare); } -TEST(FullStackTest, ScreenshareSlidesVP8_2TL_VeryLossyNet) { +TEST(FullStackTest, Screenshare_Slides_Very_Lossy) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -900,7 +841,7 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL_VeryLossyNet) { fixture->RunWithAnalyzer(screenshare); } -TEST(FullStackTest, ScreenshareSlidesVP8_2TL_LossyNetRestrictedQueue) { +TEST(FullStackTest, Screenshare_Slides_Lossy_Limited) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -917,7 +858,7 @@ TEST(FullStackTest, ScreenshareSlidesVP8_2TL_LossyNetRestrictedQueue) { fixture->RunWithAnalyzer(screenshare); } -TEST(FullStackTest, ScreenshareSlidesVP8_2TL_ModeratelyRestricted) { +TEST(FullStackTest, Screenshare_Slides_Moderately_Restricted) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -986,7 +927,7 @@ ParamsWithLogging::Video SimulcastVp8VideoLow() { #if defined(RTC_ENABLE_VP9) -TEST(FullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) { +TEST(FullStackTest, Screenshare_Slides_Vp9_3sl_High_Fps) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -1005,7 +946,7 @@ TEST(FullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) { // TODO(http://bugs.webrtc.org/9506): investigate. #if !defined(WEBRTC_MAC) -TEST(FullStackTest, VP9KSVC_3SL_High) { +TEST(FullStackTest, Vp9ksvc_3sl_High) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); @@ -1020,7 +961,7 @@ TEST(FullStackTest, VP9KSVC_3SL_High) { fixture->RunWithAnalyzer(simulcast); } -TEST(FullStackTest, VP9KSVC_3SL_Low) { +TEST(FullStackTest, Vp9ksvc_3sl_Low) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); @@ -1035,7 +976,7 @@ TEST(FullStackTest, VP9KSVC_3SL_Low) { fixture->RunWithAnalyzer(simulcast); } -TEST(FullStackTest, VP9KSVC_3SL_Low_Bw_Limited) { +TEST(FullStackTest, Vp9ksvc_3sl_Low_Bw_Limited) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/" "WebRTC-Vp9ExternalRefCtrl/Enabled/")); @@ -1052,7 +993,7 @@ TEST(FullStackTest, VP9KSVC_3SL_Low_Bw_Limited) { fixture->RunWithAnalyzer(simulcast); } -TEST(FullStackTest, VP9KSVC_3SL_Medium_Network_Restricted) { +TEST(FullStackTest, Vp9ksvc_3sl_Medium_Network_Restricted) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); @@ -1070,10 +1011,9 @@ TEST(FullStackTest, VP9KSVC_3SL_Medium_Network_Restricted) { } // TODO(webrtc:9722): Remove when experiment is cleaned up. -TEST(FullStackTest, VP9KSVC_3SL_Medium_Network_Restricted_Trusted_Rate) { +TEST(FullStackTest, Vp9ksvc_3sl_Medium_Network_Restricted_Trusted_Rate) { webrtc::test::ScopedFieldTrials override_trials( - AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/" - "WebRTC-LibvpxVp9TrustedRateController/Enabled/")); + AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging simulcast; simulcast.call.send_side_bwe = true; @@ -1094,12 +1034,12 @@ TEST(FullStackTest, VP9KSVC_3SL_Medium_Network_Restricted_Trusted_Rate) { // Android bots can't handle FullHD, so disable the test. // TODO(bugs.webrtc.org/9220): Investigate source of flakiness on Mac. #if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC) -#define MAYBE_SimulcastFullHdOveruse DISABLED_SimulcastFullHdOveruse +#define MAYBE_Simulcast_HD_High DISABLED_Simulcast_HD_High #else -#define MAYBE_SimulcastFullHdOveruse SimulcastFullHdOveruse +#define MAYBE_Simulcast_HD_High Simulcast_HD_High #endif -TEST(FullStackTest, MAYBE_SimulcastFullHdOveruse) { +TEST(FullStackTest, MAYBE_Simulcast_HD_High) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging simulcast; simulcast.call.send_side_bwe = true; @@ -1122,7 +1062,7 @@ TEST(FullStackTest, MAYBE_SimulcastFullHdOveruse) { fixture->RunWithAnalyzer(simulcast); } -TEST(FullStackTest, SimulcastVP8_3SL_High) { +TEST(FullStackTest, Simulcast_Vp8_3sl_High) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging simulcast; simulcast.call.send_side_bwe = true; @@ -1148,7 +1088,7 @@ TEST(FullStackTest, SimulcastVP8_3SL_High) { fixture->RunWithAnalyzer(simulcast); } -TEST(FullStackTest, SimulcastVP8_3SL_Low) { +TEST(FullStackTest, Simulcast_Vp8_3sl_Low) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging simulcast; simulcast.call.send_side_bwe = true; @@ -1178,11 +1118,11 @@ TEST(FullStackTest, SimulcastVP8_3SL_Low) { // available and exercises WebRTC calls with a high target bitrate(100 Mbps). // Android32 bots can't handle this high bitrate, so disable test for those. #if defined(WEBRTC_ANDROID) -#define MAYBE_HighBitrateWithFakeCodec DISABLED_HighBitrateWithFakeCodec +#define MAYBE_High_Bitrate_With_Fake_Codec DISABLED_High_Bitrate_With_Fake_Codec #else -#define MAYBE_HighBitrateWithFakeCodec HighBitrateWithFakeCodec +#define MAYBE_High_Bitrate_With_Fake_Codec High_Bitrate_With_Fake_Codec #endif // defined(WEBRTC_ANDROID) -TEST(FullStackTest, MAYBE_HighBitrateWithFakeCodec) { +TEST(FullStackTest, MAYBE_High_Bitrate_With_Fake_Codec) { auto fixture = CreateVideoQualityTestFixture(); const int target_bitrate = 100000000; ParamsWithLogging generator; @@ -1214,12 +1154,12 @@ TEST(FullStackTest, MAYBE_HighBitrateWithFakeCodec) { #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) // Fails on mobile devices: // https://bugs.chromium.org/p/webrtc/issues/detail?id=7301 -#define MAYBE_LargeRoomVP8_50thumb DISABLED_LargeRoomVP8_50thumb +#define MAYBE_Largeroom_50thumb DISABLED_Largeroom_50thumb #else -#define MAYBE_LargeRoomVP8_50thumb LargeRoomVP8_50thumb +#define MAYBE_Largeroom_50thumb Largeroom_50thumb #endif -TEST(FullStackTest, MAYBE_LargeRoomVP8_50thumb) { +TEST(FullStackTest, MAYBE_Largeroom_50thumb) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging large_room; large_room.call.send_side_bwe = true; @@ -1246,10 +1186,4 @@ TEST(FullStackTest, MAYBE_LargeRoomVP8_50thumb) { fixture->RunWithAnalyzer(large_room); } -INSTANTIATE_TEST_SUITE_P( - FullStackTest, - GenericDescriptorTest, - ::testing::Values("WebRTC-GenericDescriptor/Disabled/", - "WebRTC-GenericDescriptor/Enabled/")); - } // namespace webrtc diff --git a/video/full_stack_tests_plot.py b/video/full_stack_tests_plot.py index f50c297b17..c195b72a54 100755 --- a/video/full_stack_tests_plot.py +++ b/video/full_stack_tests_plot.py @@ -6,7 +6,6 @@ # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. - """Generate graphs for data generated by loopback tests. Usage examples: @@ -34,14 +33,14 @@ # Fields DROPPED = 0 -INPUT_TIME = 1 # ms (timestamp) -SEND_TIME = 2 # ms (timestamp) -RECV_TIME = 3 # ms (timestamp) -RENDER_TIME = 4 # ms (timestamp) -ENCODED_FRAME_SIZE = 5 # bytes +INPUT_TIME = 1 # ms (timestamp) +SEND_TIME = 2 # ms (timestamp) +RECV_TIME = 3 # ms (timestamp) +RENDER_TIME = 4 # ms (timestamp) +ENCODED_FRAME_SIZE = 5 # bytes PSNR = 6 SSIM = 7 -ENCODE_TIME = 8 # ms (time interval) +ENCODE_TIME = 8 # ms (time interval) TOTAL_RAW_FIELDS = 9 @@ -78,111 +77,116 @@ NAME_TO_ID = {field[1]: field[0] for field in _FIELDS} ID_TO_TITLE = {field[0]: field[2] for field in _FIELDS} + def FieldArgToId(arg): - if arg == "none": - return None - if arg in NAME_TO_ID: - return NAME_TO_ID[arg] - if arg + "_ms" in NAME_TO_ID: - return NAME_TO_ID[arg + "_ms"] - raise Exception("Unrecognized field name \"{}\"".format(arg)) + if arg == "none": + return None + if arg in NAME_TO_ID: + return NAME_TO_ID[arg] + if arg + "_ms" in NAME_TO_ID: + return NAME_TO_ID[arg + "_ms"] + raise Exception("Unrecognized field name \"{}\"".format(arg)) class PlotLine(object): - """Data for a single graph line.""" + """Data for a single graph line.""" - def __init__(self, label, values, flags): - self.label = label - self.values = values - self.flags = flags + def __init__(self, label, values, flags): + self.label = label + self.values = values + self.flags = flags class Data(object): - """Object representing one full stack test.""" - - def __init__(self, filename): - self.title = "" - self.length = 0 - self.samples = defaultdict(list) - - self._ReadSamples(filename) - - def _ReadSamples(self, filename): - """Reads graph data from the given file.""" - f = open(filename) - it = iter(f) - - self.title = it.next().strip() - self.length = int(it.next()) - field_names = [name.strip() for name in it.next().split()] - field_ids = [NAME_TO_ID[name] for name in field_names] - - for field_id in field_ids: - self.samples[field_id] = [0.0] * self.length - - for sample_id in xrange(self.length): - for col, value in enumerate(it.next().split()): - self.samples[field_ids[col]][sample_id] = float(value) - - self._SubtractFirstInputTime() - self._GenerateAdditionalData() - - f.close() - - def _SubtractFirstInputTime(self): - offset = self.samples[INPUT_TIME][0] - for field in [INPUT_TIME, SEND_TIME, RECV_TIME, RENDER_TIME]: - if field in self.samples: - self.samples[field] = [x - offset for x in self.samples[field]] - - def _GenerateAdditionalData(self): - """Calculates sender time, receiver time etc. from the raw data.""" - s = self.samples - last_render_time = 0 - for field_id in [SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA]: - s[field_id] = [0] * self.length - - for k in range(self.length): - s[SENDER_TIME][k] = s[SEND_TIME][k] - s[INPUT_TIME][k] - - decoded_time = s[RENDER_TIME][k] - s[RECEIVER_TIME][k] = decoded_time - s[RECV_TIME][k] - s[END_TO_END][k] = decoded_time - s[INPUT_TIME][k] - if not s[DROPPED][k]: - if k > 0: - s[RENDERED_DELTA][k] = decoded_time - last_render_time - last_render_time = decoded_time - - def _Hide(self, values): - """ + """Object representing one full stack test.""" + + def __init__(self, filename): + self.title = "" + self.length = 0 + self.samples = defaultdict(list) + + self._ReadSamples(filename) + + def _ReadSamples(self, filename): + """Reads graph data from the given file.""" + f = open(filename) + it = iter(f) + + self.title = it.next().strip() + self.length = int(it.next()) + field_names = [name.strip() for name in it.next().split()] + field_ids = [NAME_TO_ID[name] for name in field_names] + + for field_id in field_ids: + self.samples[field_id] = [0.0] * self.length + + for sample_id in xrange(self.length): + for col, value in enumerate(it.next().split()): + self.samples[field_ids[col]][sample_id] = float(value) + + self._SubtractFirstInputTime() + self._GenerateAdditionalData() + + f.close() + + def _SubtractFirstInputTime(self): + offset = self.samples[INPUT_TIME][0] + for field in [INPUT_TIME, SEND_TIME, RECV_TIME, RENDER_TIME]: + if field in self.samples: + self.samples[field] = [x - offset for x in self.samples[field]] + + def _GenerateAdditionalData(self): + """Calculates sender time, receiver time etc. from the raw data.""" + s = self.samples + last_render_time = 0 + for field_id in [ + SENDER_TIME, RECEIVER_TIME, END_TO_END, RENDERED_DELTA + ]: + s[field_id] = [0] * self.length + + for k in range(self.length): + s[SENDER_TIME][k] = s[SEND_TIME][k] - s[INPUT_TIME][k] + + decoded_time = s[RENDER_TIME][k] + s[RECEIVER_TIME][k] = decoded_time - s[RECV_TIME][k] + s[END_TO_END][k] = decoded_time - s[INPUT_TIME][k] + if not s[DROPPED][k]: + if k > 0: + s[RENDERED_DELTA][k] = decoded_time - last_render_time + last_render_time = decoded_time + + def _Hide(self, values): + """ Replaces values for dropped frames with None. These values are then skipped by the Plot() method. """ - return [None if self.samples[DROPPED][k] else values[k] - for k in range(len(values))] + return [ + None if self.samples[DROPPED][k] else values[k] + for k in range(len(values)) + ] - def AddSamples(self, config, target_lines_list): - """Creates graph lines from the current data set with given config.""" - for field in config.fields: - # field is None means the user wants just to skip the color. - if field is None: - target_lines_list.append(None) - continue + def AddSamples(self, config, target_lines_list): + """Creates graph lines from the current data set with given config.""" + for field in config.fields: + # field is None means the user wants just to skip the color. + if field is None: + target_lines_list.append(None) + continue - field_id = field & FIELD_MASK - values = self.samples[field_id] + field_id = field & FIELD_MASK + values = self.samples[field_id] - if field & HIDE_DROPPED: - values = self._Hide(values) + if field & HIDE_DROPPED: + values = self._Hide(values) - target_lines_list.append(PlotLine( - self.title + " " + ID_TO_TITLE[field_id], - values, field & ~FIELD_MASK)) + target_lines_list.append( + PlotLine(self.title + " " + ID_TO_TITLE[field_id], values, + field & ~FIELD_MASK)) def AverageOverCycle(values, length): - """ + """ Returns the list: [ avg(values[0], values[length], ...), @@ -194,221 +198,272 @@ def AverageOverCycle(values, length): Skips None values when calculating the average value. """ - total = [0.0] * length - count = [0] * length - for k, val in enumerate(values): - if val is not None: - total[k % length] += val - count[k % length] += 1 + total = [0.0] * length + count = [0] * length + for k, val in enumerate(values): + if val is not None: + total[k % length] += val + count[k % length] += 1 - result = [0.0] * length - for k in range(length): - result[k] = total[k] / count[k] if count[k] else None - return result + result = [0.0] * length + for k in range(length): + result[k] = total[k] / count[k] if count[k] else None + return result class PlotConfig(object): - """Object representing a single graph.""" - - def __init__(self, fields, data_list, cycle_length=None, frames=None, - offset=0, output_filename=None, title="Graph"): - self.fields = fields - self.data_list = data_list - self.cycle_length = cycle_length - self.frames = frames - self.offset = offset - self.output_filename = output_filename - self.title = title - - def Plot(self, ax1): - lines = [] - for data in self.data_list: - if not data: - # Add None lines to skip the colors. - lines.extend([None] * len(self.fields)) - else: - data.AddSamples(self, lines) - - def _SliceValues(values): - if self.offset: - values = values[self.offset:] - if self.frames: - values = values[:self.frames] - return values - - length = None - for line in lines: - if line is None: - continue - - line.values = _SliceValues(line.values) - if self.cycle_length: - line.values = AverageOverCycle(line.values, self.cycle_length) - - if length is None: - length = len(line.values) - elif length != len(line.values): - raise Exception("All arrays should have the same length!") - - ax1.set_xlabel("Frame", fontsize="large") - if any(line.flags & RIGHT_Y_AXIS for line in lines if line): - ax2 = ax1.twinx() - ax2.set_xlabel("Frame", fontsize="large") - else: - ax2 = None - - # Have to implement color_cycle manually, due to two scales in a graph. - color_cycle = ["b", "r", "g", "c", "m", "y", "k"] - color_iter = itertools.cycle(color_cycle) - - for line in lines: - if not line: - color_iter.next() - continue - - if self.cycle_length: - x = numpy.array(range(self.cycle_length)) - else: - x = numpy.array(range(self.offset, self.offset + len(line.values))) - y = numpy.array(line.values) - ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1 - ax.Plot(x, y, "o-", label=line.label, markersize=3.0, linewidth=1.0, - color=color_iter.next()) - - ax1.grid(True) - if ax2: - ax1.legend(loc="upper left", shadow=True, fontsize="large") - ax2.legend(loc="upper right", shadow=True, fontsize="large") - else: - ax1.legend(loc="best", shadow=True, fontsize="large") + """Object representing a single graph.""" + + def __init__(self, + fields, + data_list, + cycle_length=None, + frames=None, + offset=0, + output_filename=None, + title="Graph"): + self.fields = fields + self.data_list = data_list + self.cycle_length = cycle_length + self.frames = frames + self.offset = offset + self.output_filename = output_filename + self.title = title + + def Plot(self, ax1): + lines = [] + for data in self.data_list: + if not data: + # Add None lines to skip the colors. + lines.extend([None] * len(self.fields)) + else: + data.AddSamples(self, lines) + + def _SliceValues(values): + if self.offset: + values = values[self.offset:] + if self.frames: + values = values[:self.frames] + return values + + length = None + for line in lines: + if line is None: + continue + + line.values = _SliceValues(line.values) + if self.cycle_length: + line.values = AverageOverCycle(line.values, self.cycle_length) + + if length is None: + length = len(line.values) + elif length != len(line.values): + raise Exception("All arrays should have the same length!") + + ax1.set_xlabel("Frame", fontsize="large") + if any(line.flags & RIGHT_Y_AXIS for line in lines if line): + ax2 = ax1.twinx() + ax2.set_xlabel("Frame", fontsize="large") + else: + ax2 = None + + # Have to implement color_cycle manually, due to two scales in a graph. + color_cycle = ["b", "r", "g", "c", "m", "y", "k"] + color_iter = itertools.cycle(color_cycle) + + for line in lines: + if not line: + color_iter.next() + continue + + if self.cycle_length: + x = numpy.array(range(self.cycle_length)) + else: + x = numpy.array( + range(self.offset, self.offset + len(line.values))) + y = numpy.array(line.values) + ax = ax2 if line.flags & RIGHT_Y_AXIS else ax1 + ax.Plot(x, + y, + "o-", + label=line.label, + markersize=3.0, + linewidth=1.0, + color=color_iter.next()) + + ax1.grid(True) + if ax2: + ax1.legend(loc="upper left", shadow=True, fontsize="large") + ax2.legend(loc="upper right", shadow=True, fontsize="large") + else: + ax1.legend(loc="best", shadow=True, fontsize="large") def LoadFiles(filenames): - result = [] - for filename in filenames: - if filename in LoadFiles.cache: - result.append(LoadFiles.cache[filename]) - else: - data = Data(filename) - LoadFiles.cache[filename] = data - result.append(data) - return result + result = [] + for filename in filenames: + if filename in LoadFiles.cache: + result.append(LoadFiles.cache[filename]) + else: + data = Data(filename) + LoadFiles.cache[filename] = data + result.append(data) + return result + + LoadFiles.cache = {} def GetParser(): - class CustomAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - if "ordered_args" not in namespace: - namespace.ordered_args = [] - namespace.ordered_args.append((self.dest, values)) - - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - - parser.add_argument( - "-c", "--cycle_length", nargs=1, action=CustomAction, - type=int, help="Cycle length over which to average the values.") - parser.add_argument( - "-f", "--field", nargs=1, action=CustomAction, - help="Name of the field to show. Use 'none' to skip a color.") - parser.add_argument("-r", "--right", nargs=0, action=CustomAction, - help="Use right Y axis for given field.") - parser.add_argument("-d", "--drop", nargs=0, action=CustomAction, - help="Hide values for dropped frames.") - parser.add_argument("-o", "--offset", nargs=1, action=CustomAction, type=int, - help="Frame offset.") - parser.add_argument("-n", "--next", nargs=0, action=CustomAction, - help="Separator for multiple graphs.") - parser.add_argument( - "--frames", nargs=1, action=CustomAction, type=int, - help="Frame count to show or take into account while averaging.") - parser.add_argument("-t", "--title", nargs=1, action=CustomAction, - help="Title of the graph.") - parser.add_argument( - "-O", "--output_filename", nargs=1, action=CustomAction, - help="Use to save the graph into a file. " - "Otherwise, a window will be shown.") - parser.add_argument( - "files", nargs="+", action=CustomAction, - help="List of text-based files generated by loopback tests.") - return parser + class CustomAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + if "ordered_args" not in namespace: + namespace.ordered_args = [] + namespace.ordered_args.append((self.dest, values)) + + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + parser.add_argument("-c", + "--cycle_length", + nargs=1, + action=CustomAction, + type=int, + help="Cycle length over which to average the values.") + parser.add_argument( + "-f", + "--field", + nargs=1, + action=CustomAction, + help="Name of the field to show. Use 'none' to skip a color.") + parser.add_argument("-r", + "--right", + nargs=0, + action=CustomAction, + help="Use right Y axis for given field.") + parser.add_argument("-d", + "--drop", + nargs=0, + action=CustomAction, + help="Hide values for dropped frames.") + parser.add_argument("-o", + "--offset", + nargs=1, + action=CustomAction, + type=int, + help="Frame offset.") + parser.add_argument("-n", + "--next", + nargs=0, + action=CustomAction, + help="Separator for multiple graphs.") + parser.add_argument( + "--frames", + nargs=1, + action=CustomAction, + type=int, + help="Frame count to show or take into account while averaging.") + parser.add_argument("-t", + "--title", + nargs=1, + action=CustomAction, + help="Title of the graph.") + parser.add_argument("-O", + "--output_filename", + nargs=1, + action=CustomAction, + help="Use to save the graph into a file. " + "Otherwise, a window will be shown.") + parser.add_argument( + "files", + nargs="+", + action=CustomAction, + help="List of text-based files generated by loopback tests.") + return parser def _PlotConfigFromArgs(args, graph_num): - # Pylint complains about using kwargs, so have to do it this way. - cycle_length = None - frames = None - offset = 0 - output_filename = None - title = "Graph" - - fields = [] - files = [] - mask = 0 - for key, values in args: - if key == "cycle_length": - cycle_length = values[0] - elif key == "frames": - frames = values[0] - elif key == "offset": - offset = values[0] - elif key == "output_filename": - output_filename = values[0] - elif key == "title": - title = values[0] - elif key == "drop": - mask |= HIDE_DROPPED - elif key == "right": - mask |= RIGHT_Y_AXIS - elif key == "field": - field_id = FieldArgToId(values[0]) - fields.append(field_id | mask if field_id is not None else None) - mask = 0 # Reset mask after the field argument. - elif key == "files": - files.extend(values) - - if not files: - raise Exception("Missing file argument(s) for graph #{}".format(graph_num)) - if not fields: - raise Exception("Missing field argument(s) for graph #{}".format(graph_num)) - - return PlotConfig(fields, LoadFiles(files), cycle_length=cycle_length, - frames=frames, offset=offset, output_filename=output_filename, - title=title) + # Pylint complains about using kwargs, so have to do it this way. + cycle_length = None + frames = None + offset = 0 + output_filename = None + title = "Graph" + + fields = [] + files = [] + mask = 0 + for key, values in args: + if key == "cycle_length": + cycle_length = values[0] + elif key == "frames": + frames = values[0] + elif key == "offset": + offset = values[0] + elif key == "output_filename": + output_filename = values[0] + elif key == "title": + title = values[0] + elif key == "drop": + mask |= HIDE_DROPPED + elif key == "right": + mask |= RIGHT_Y_AXIS + elif key == "field": + field_id = FieldArgToId(values[0]) + fields.append(field_id | mask if field_id is not None else None) + mask = 0 # Reset mask after the field argument. + elif key == "files": + files.extend(values) + + if not files: + raise Exception( + "Missing file argument(s) for graph #{}".format(graph_num)) + if not fields: + raise Exception( + "Missing field argument(s) for graph #{}".format(graph_num)) + + return PlotConfig(fields, + LoadFiles(files), + cycle_length=cycle_length, + frames=frames, + offset=offset, + output_filename=output_filename, + title=title) def PlotConfigsFromArgs(args): - """Generates plot configs for given command line arguments.""" - # The way it works: - # First we detect separators -n/--next and split arguments into groups, one - # for each plot. For each group, we partially parse it with - # argparse.ArgumentParser, modified to remember the order of arguments. - # Then we traverse the argument list and fill the PlotConfig. - args = itertools.groupby(args, lambda x: x in ["-n", "--next"]) - prep_args = list(list(group) for match, group in args if not match) - - parser = GetParser() - plot_configs = [] - for index, raw_args in enumerate(prep_args): - graph_args = parser.parse_args(raw_args).ordered_args - plot_configs.append(_PlotConfigFromArgs(graph_args, index)) - return plot_configs + """Generates plot configs for given command line arguments.""" + # The way it works: + # First we detect separators -n/--next and split arguments into groups, one + # for each plot. For each group, we partially parse it with + # argparse.ArgumentParser, modified to remember the order of arguments. + # Then we traverse the argument list and fill the PlotConfig. + args = itertools.groupby(args, lambda x: x in ["-n", "--next"]) + prep_args = list(list(group) for match, group in args if not match) + + parser = GetParser() + plot_configs = [] + for index, raw_args in enumerate(prep_args): + graph_args = parser.parse_args(raw_args).ordered_args + plot_configs.append(_PlotConfigFromArgs(graph_args, index)) + return plot_configs def ShowOrSavePlots(plot_configs): - for config in plot_configs: - fig = plt.figure(figsize=(14.0, 10.0)) - ax = fig.add_subPlot(1, 1, 1) + for config in plot_configs: + fig = plt.figure(figsize=(14.0, 10.0)) + ax = fig.add_subPlot(1, 1, 1) + + plt.title(config.title) + config.Plot(ax) + if config.output_filename: + print "Saving to", config.output_filename + fig.savefig(config.output_filename) + plt.close(fig) - plt.title(config.title) - config.Plot(ax) - if config.output_filename: - print "Saving to", config.output_filename - fig.savefig(config.output_filename) - plt.close(fig) + plt.show() - plt.show() if __name__ == "__main__": - ShowOrSavePlots(PlotConfigsFromArgs(sys.argv[1:])) + ShowOrSavePlots(PlotConfigsFromArgs(sys.argv[1:])) diff --git a/video/g3doc/adaptation.md b/video/g3doc/adaptation.md new file mode 100644 index 0000000000..084a0fd3aa --- /dev/null +++ b/video/g3doc/adaptation.md @@ -0,0 +1,114 @@ + + + +# Video Adaptation + +Video adaptation is a mechanism which reduces the bandwidth or CPU consumption +by reducing encoded video quality. + +## Overview + +Adaptation occurs when a _Resource_ signals that it is currently underused or +overused. When overused, the video quality is decreased and when underused, the +video quality is increased. There are currently two dimensions in which the +quality can be adapted: frame-rate and resolution. The dimension that is adapted +is based on the degradation preference for the video track. + +## Resources + +_Resources_ monitor metrics from the system or the video stream. For example, a +resource could monitor system temperature or the bandwidth usage of the video +stream. A resource implements the [Resource][resource.h] interface. When a +resource detects that it is overused, it calls `SetUsageState(kOveruse)`. When +the resource is no longer overused, it can signal this using +`SetUsageState(kUnderuse)`. + +There are two resources that are used by default on all video tracks: Quality +scaler resource and encode overuse resource. + +### QP Scaler Resource + +The quality scaler resource monitors the quantization parameter (QP) of the +encoded video frames for video send stream and ensures that the quality of the +stream is acceptable for the current resolution. After each frame is encoded the +[QualityScaler][quality_scaler.h] is given the QP of the encoded frame. Overuse +or underuse is signalled when the average QP is outside of the +[QP thresholds][VideoEncoder::QpThresholds]. If the average QP is above the +_high_ threshold, the QP scaler signals _overuse_, and when below the _low_ +threshold the QP scaler signals _underuse_. + +The thresholds are set by the video encoder in the `scaling_settings` property +of the [EncoderInfo][EncoderInfo]. + +*Note:* that the QP scaler is only enabled when the degradation preference is +`MAINTAIN_FRAMERATE` or `BALANCED`. + +### Encode Usage Resource + +The [encoder usage resource][encode_usage_resource.h] monitors how long it takes +to encode a video frame. This works as a good proxy measurement for CPU usage as +contention increases when CPU usage is high, increasing the encode times of the +video frames. + +The time is tracked from when frame encoding starts to when it is completed. If +the average encoder usage exceeds the thresholds set, *overuse* is triggered. + +### Injecting other Resources + +A custom resource can be injected into the call using the +[Call::AddAdaptationResource][Call::AddAdaptationResource] method. + +## Adaptation + +When a a *resource* signals the it is over or underused, this signal reaches the +`ResourceAdaptationProcessor` who requests an `Adaptation` proposal from the +[VideoStreamAdapter][VideoStreamAdapter]. This proposal is based on the +degradation preference of the video stream. `ResourceAdaptationProcessor` will +determine if the `Adaptation` should be applied based on the current adaptation +status and the `Adaptation` proposal. + +### Degradation Preference + +There are 3 degradation preferences, described in the +[RtpParameters][RtpParameters] header. These are + +* `MAINTIAIN_FRAMERATE`: Adapt video resolution +* `MAINTIAIN_RESOLUTION`: Adapt video frame-rate. +* `BALANCED`: Adapt video frame-rate or resolution. + +The degradation preference is set for a video track using the +`degradation_preference` property in the [RtpParameters][RtpParameters]. + +## VideoSinkWants and video stream adaptation + +Once an adaptation is applied it notifies the video stream. The video stream +converts this adaptation to a [VideoSinkWants][VideoSinkWants]. These sink wants +indicate to the video stream that some restrictions should be applied to the +stream before it is sent to encoding. It has a few properties, but for +adaptation the properties that might be set are: + +* `target_pixel_count`: The desired number of pixels for each video frame. The + actual pixel count should be close to this but does not have to be exact so + that aspect ratio can be maintained. +* `max_pixel_count`: The maximum number of pixels in each video frame. This + value can not be exceeded if set. +* `max_framerate_fps`: The maximum frame-rate for the video source. The source + is expected to drop frames that cause this threshold to be exceeded. + +The `VideoSinkWants` can be applied by any video source, or one may use the +[AdaptedVideoTraceSource][adapted_video_track_source.h] which is a base class +for sources that need video adaptation. + +[RtpParameters]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/rtp_parameters.h?q=%22RTC_EXPORT%20RtpParameters%22 +[resource.h]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/adaptation/resource.h +[Call::AddAdaptationResource]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/call.h?q=Call::AddAdaptationResource +[quality_scaler.h]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/modules/video_coding/utility/quality_scaler.h +[VideoEncoder::QpThresholds]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video_codecs/video_encoder.h?q=VideoEncoder::QpThresholds +[EncoderInfo]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video_codecs/video_encoder.h?q=VideoEncoder::EncoderInfo +[encode_usage_resource.h]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/video/adaptation/encode_usage_resource.h +[VideoStreamAdapter]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/adaptation/video_stream_adapter.h +[adaptation_constraint.h]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/adaptation/adaptation_constraint.h +[bitrate_constraint.h]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/video/adaptation/bitrate_constraint.h +[AddOrUpdateSink]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video/video_source_interface.h?q=AddOrUpdateSink +[VideoSinkWants]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/api/video/video_source_interface.h?q=%22RTC_EXPORT%20VideoSinkWants%22 +[adapted_video_track_source.h]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/media/base/adapted_video_track_source.h diff --git a/video/g3doc/stats.md b/video/g3doc/stats.md new file mode 100644 index 0000000000..a5d15fe2fa --- /dev/null +++ b/video/g3doc/stats.md @@ -0,0 +1,217 @@ + + + +# Video stats + +Overview of collected statistics for [VideoSendStream] and [VideoReceiveStream]. + +## VideoSendStream + +[VideoSendStream::Stats] for a sending stream can be gathered via `VideoSendStream::GetStats()`. + +Some statistics are collected per RTP stream (see [StreamStats]) and can be of `StreamType`: `kMedia`, `kRtx`, `kFlexfec`. + +Multiple `StreamStats` objects are for example present if simulcast is used (multiple `kMedia` objects) or if RTX or FlexFEC is negotiated. + +### SendStatisticsProxy +`VideoSendStream` owns a [SendStatisticsProxy] which implements +`VideoStreamEncoderObserver`, +`RtcpStatisticsCallback`, +`ReportBlockDataObserver`, +`RtcpPacketTypeCounterObserver`, +`StreamDataCountersCallback`, +`BitrateStatisticsObserver`, +`FrameCountObserver`, +`SendSideDelayObserver` +and holds a `VideoSendStream::Stats` object. + +`SendStatisticsProxy` is called via these interfaces by different components (e.g. `RtpRtcp` module) to update stats. + +#### StreamStats +* `type` - kMedia, kRtx or kFlexfec. +* `referenced_media_ssrc` - only present for type kRtx/kFlexfec. The SSRC for the kMedia stream that retransmissions or FEC is performed for. + +Updated when a frame has been encoded, `VideoStreamEncoder::OnEncodedImage`. +* `frames_encoded `- total number of encoded frames. +* `encode_frame_rate` - number of encoded frames during the last second. +* `width` - width of last encoded frame [[rtcoutboundrtpstreamstats-framewidth]]. +* `height` - height of last encoded frame [[rtcoutboundrtpstreamstats-frameheight]]. +* `total_encode_time_ms` - total encode time for encoded frames. +* `qp_sum` - sum of quantizer values of encoded frames [[rtcoutboundrtpstreamstats-qpsum]]. +* `frame_counts` - total number of encoded key/delta frames [[rtcoutboundrtpstreamstats-keyframesencoded]]. + +Updated when a RTP packet is transmitted to the network, `RtpSenderEgress::SendPacket`. +* `rtp_stats` - total number of sent bytes/packets. +* `total_bitrate_bps` - total bitrate sent in bits per second (over a one second window). +* `retransmit_bitrate_bps` - total retransmit bitrate sent in bits per second (over a one second window). +* `avg_delay_ms` - average capture-to-send delay for sent packets (over a one second window). +* `max_delay_ms` - maximum capture-to-send delay for sent packets (over a one second window). +* `total_packet_send_delay_ms` - total capture-to-send delay for sent packets [[rtcoutboundrtpstreamstats-totalpacketsenddelay]]. + +Updated when an incoming RTCP packet is parsed, `RTCPReceiver::ParseCompoundPacket`. +* `rtcp_packet_type_counts` - total number of received NACK/FIR/PLI packets [rtcoutboundrtpstreamstats-[nackcount], [fircount], [plicount]]. + +Updated when a RTCP report block packet is received, `RTCPReceiver::TriggerCallbacksFromRtcpPacket`. +* `rtcp_stats` - RTCP report block data. +* `report_block_data` - RTCP report block data. + +#### Stats +* `std::map substreams` - StreamStats mapped per SSRC. + +Updated when a frame is received from the source, `VideoStreamEncoder::OnFrame`. +* `frames` - total number of frames fed to VideoStreamEncoder. +* `input_frame_rate` - number of frames fed to VideoStreamEncoder during the last second. +* `frames_dropped_by_congestion_window` - total number of dropped frames due to congestion window pushback. +* `frames_dropped_by_encoder_queue` - total number of dropped frames due to that the encoder is blocked. + +Updated if a frame from the source is dropped, `VideoStreamEncoder::OnDiscardedFrame`. +* `frames_dropped_by_capturer` - total number dropped frames by the source. + +Updated if a frame is dropped by `FrameDropper`, `VideoStreamEncoder::MaybeEncodeVideoFrame`. +* `frames_dropped_by_rate_limiter` - total number of dropped frames to avoid bitrate overuse. + +Updated (if changed) before a frame is passed to the encoder, `VideoStreamEncoder::EncodeVideoFrame`. +* `encoder_implementation_name` - name of encoder implementation [[rtcoutboundrtpstreamstats-encoderimplementation]]. + +Updated after a frame has been encoded, `VideoStreamEncoder::OnEncodedImage`. +* `frames_encoded `- total number of encoded frames [[rtcoutboundrtpstreamstats-framesencoded]]. +* `encode_frame_rate` - number of encoded frames during the last second [[rtcoutboundrtpstreamstats-framespersecond]]. +* `total_encoded_bytes_target` - total target frame size in bytes [[rtcoutboundrtpstreamstats-totalencodedbytestarget]]. +* `huge_frames_sent` - total number of huge frames sent [[rtcoutboundrtpstreamstats-hugeframessent]]. +* `media_bitrate_bps` - the actual bitrate the encoder is producing. +* `avg_encode_time_ms` - average encode time for encoded frames. +* `total_encode_time_ms` - total encode time for encoded frames [[rtcoutboundrtpstreamstats-totalencodetime]]. +* `frames_dropped_by_encoder`- total number of dropped frames by the encoder. + +Adaptation stats. +* `bw_limited_resolution` - shows if resolution is limited due to restricted bandwidth. +* `cpu_limited_resolution` - shows if resolution is limited due to cpu. +* `bw_limited_framerate` - shows if framerate is limited due to restricted bandwidth. +* `cpu_limited_framerate` - shows if framerate is limited due to cpu. +* `quality_limitation_reason` - current reason for limiting resolution and/or framerate [[rtcoutboundrtpstreamstats-qualitylimitationreason]]. +* `quality_limitation_durations_ms` - total time spent in quality limitation state [[rtcoutboundrtpstreamstats-qualitylimitationdurations]]. +* `quality_limitation_resolution_changes` - total number of times that resolution has changed due to quality limitation [[rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges]]. +* `number_of_cpu_adapt_changes` - total number of times resolution/framerate has changed due to cpu limitation. +* `number_of_quality_adapt_changes` - total number of times resolution/framerate has changed due to quality limitation. + +Updated when the encoder is configured, `VideoStreamEncoder::ReconfigureEncoder`. +* `content_type` - configured content type (UNSPECIFIED/SCREENSHARE). + +Updated when the available bitrate changes, `VideoSendStreamImpl::OnBitrateUpdated`. +* `target_media_bitrate_bps` - the bitrate the encoder is configured to use. +* `suspended` - shows if video is suspended due to zero target bitrate. + +## VideoReceiveStream +[VideoReceiveStream::Stats] for a receiving stream can be gathered via `VideoReceiveStream::GetStats()`. + +### ReceiveStatisticsProxy +`VideoReceiveStream` owns a [ReceiveStatisticsProxy] which implements +`VCMReceiveStatisticsCallback`, +`RtcpCnameCallback`, +`RtcpPacketTypeCounterObserver`, +`CallStatsObserver` +and holds a `VideoReceiveStream::Stats` object. + +`ReceiveStatisticsProxy` is called via these interfaces by different components (e.g. `RtpRtcp` module) to update stats. + +#### Stats +* `current_payload_type` - current payload type. +* `ssrc` - configured SSRC for the received stream. + +Updated when a complete frame is received, `FrameBuffer::InsertFrame`. +* `frame_counts` - total number of key/delta frames received [[rtcinboundrtpstreamstats-keyframesdecoded]]. +* `network_frame_rate` - number of frames received during the last second. + +Updated when a frame is ready for decoding, `FrameBuffer::GetNextFrame`. From `VCMTiming`: +* `jitter_buffer_ms` - jitter buffer delay in ms. +* `max_decode_ms` - the 95th percentile observed decode time within a time window (10 sec). +* `render_delay_ms` - render delay in ms. +* `min_playout_delay_ms` - minimum playout delay in ms. +* `target_delay_ms` - target playout delay in ms. Max(`min_playout_delay_ms`, `jitter_delay_ms` + `max_decode_ms` + `render_delay_ms`). +* `current_delay_ms` - actual playout delay in ms. +* `jitter_buffer_delay_seconds` - total jitter buffer delay in seconds [[rtcinboundrtpstreamstats-jitterbufferdelay]]. +* `jitter_buffer_emitted_count` - total number of frames that have come out from the jitter buffer [[rtcinboundrtpstreamstats-jitterbufferemittedcount]]. + +Updated (if changed) after a frame is passed to the decoder, `VCMGenericDecoder::Decode`. +* `decoder_implementation_name` - name of decoder implementation [[rtcinboundrtpstreamstats-decoderimplementation]]. + +Updated when a frame is ready for decoding, `FrameBuffer::GetNextFrame`. +* `timing_frame_info` - timestamps for a full lifetime of a frame. +* `first_frame_received_to_decoded_ms` - initial decoding latency between the first arrived frame and the first decoded frame. +* `frames_dropped` - total number of dropped frames prior to decoding or if the system is too slow [[rtcreceivedrtpstreamstats-framesdropped]]. + +Updated after a frame has been decoded, `VCMDecodedFrameCallback::Decoded`. +* `frames_decoded` - total number of decoded frames [[rtcinboundrtpstreamstats-framesdecoded]]. +* `decode_frame_rate` - number of decoded frames during the last second [[rtcinboundrtpstreamstats-framespersecond]]. +* `decode_ms` - time to decode last frame in ms. +* `total_decode_time_ms` - total decode time for decoded frames [[rtcinboundrtpstreamstats-totaldecodetime]]. +* `qp_sum` - sum of quantizer values of decoded frames [[rtcinboundrtpstreamstats-qpsum]]. +* `content_type` - content type (UNSPECIFIED/SCREENSHARE). +* `interframe_delay_max_ms` - max inter-frame delay within a time window between decoded frames. +* `total_inter_frame_delay` - sum of inter-frame delay in seconds between decoded frames [[rtcinboundrtpstreamstats-totalinterframedelay]]. +* `total_squared_inter_frame_delay` - sum of squared inter-frame delays in seconds between decoded frames [[rtcinboundrtpstreamstats-totalsquaredinterframedelay]]. + +Updated before a frame is sent to the renderer, `VideoReceiveStream2::OnFrame`. +* `frames_rendered` - total number of rendered frames. +* `render_frame_rate` - number of rendered frames during the last second. +* `width` - width of last frame fed to renderer [[rtcinboundrtpstreamstats-framewidth]]. +* `height` - height of last frame fed to renderer [[rtcinboundrtpstreamstats-frameheight]]. +* `estimated_playout_ntp_timestamp_ms` - estimated playout NTP timestamp [[rtcinboundrtpstreamstats-estimatedplayouttimestamp]]. +* `sync_offset_ms` - NTP timestamp difference between the last played out audio and video frame. +* `freeze_count` - total number of detected freezes. +* `pause_count` - total number of detected pauses. +* `total_freezes_duration_ms` - total duration of freezes in ms. +* `total_pauses_duration_ms` - total duration of pauses in ms. +* `total_frames_duration_ms` - time in ms between the last rendered frame and the first rendered frame. +* `sum_squared_frame_durations` - sum of squared inter-frame delays in seconds between rendered frames. + +`ReceiveStatisticsImpl::OnRtpPacket` is updated for received RTP packets. From `ReceiveStatistics`: +* `total_bitrate_bps` - incoming bitrate in bps. +* `rtp_stats` - RTP statistics for the received stream. + +Updated when a RTCP packet is sent, `RTCPSender::ComputeCompoundRTCPPacket`. +* `rtcp_packet_type_counts` - total number of sent NACK/FIR/PLI packets [rtcinboundrtpstreamstats-[nackcount], [fircount], [plicount]]. + + +[VideoSendStream]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/video_send_stream.h +[VideoSendStream::Stats]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/video_send_stream.h?q=VideoSendStream::Stats +[StreamStats]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/video_send_stream.h?q=VideoSendStream::StreamStats +[SendStatisticsProxy]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/video/send_statistics_proxy.h +[rtcoutboundrtpstreamstats-framewidth]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-framewidth +[rtcoutboundrtpstreamstats-frameheight]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-frameheight +[rtcoutboundrtpstreamstats-qpsum]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qpsum +[rtcoutboundrtpstreamstats-keyframesencoded]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-keyframesencoded +[rtcoutboundrtpstreamstats-totalpacketsenddelay]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalpacketsenddelay +[nackcount]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-nackcount +[fircount]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-fircount +[plicount]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-plicount +[rtcoutboundrtpstreamstats-encoderimplementation]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-encoderimplementation +[rtcoutboundrtpstreamstats-framesencoded]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-framesencoded +[rtcoutboundrtpstreamstats-framespersecond]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-framespersecond +[rtcoutboundrtpstreamstats-totalencodedbytestarget]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodedbytestarget +[rtcoutboundrtpstreamstats-hugeframessent]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-hugeframessent +[rtcoutboundrtpstreamstats-totalencodetime]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-totalencodetime +[rtcoutboundrtpstreamstats-qualitylimitationreason]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationreason +[rtcoutboundrtpstreamstats-qualitylimitationdurations]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationdurations +[rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges]: https://w3c.github.io/webrtc-stats/#dom-rtcoutboundrtpstreamstats-qualitylimitationresolutionchanges + +[VideoReceiveStream]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/video_receive_stream.h +[VideoReceiveStream::Stats]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/call/video_receive_stream.h?q=VideoReceiveStream::Stats +[ReceiveStatisticsProxy]: https://source.chromium.org/chromium/chromium/src/+/master:third_party/webrtc/video/receive_statistics_proxy2.h +[rtcinboundrtpstreamstats-keyframesdecoded]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-keyframesdecoded +[rtcinboundrtpstreamstats-jitterbufferdelay]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferdelay +[rtcinboundrtpstreamstats-jitterbufferemittedcount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-jitterbufferemittedcount +[rtcinboundrtpstreamstats-decoderimplementation]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-decoderimplementation +[rtcreceivedrtpstreamstats-framesdropped]: https://www.w3.org/TR/webrtc-stats/#dom-rtcreceivedrtpstreamstats-framesdropped +[rtcinboundrtpstreamstats-framesdecoded]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-framesdecoded +[rtcinboundrtpstreamstats-framespersecond]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-framespersecond +[rtcinboundrtpstreamstats-totaldecodetime]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totaldecodetime +[rtcinboundrtpstreamstats-qpsum]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-qpsum +[rtcinboundrtpstreamstats-totalinterframedelay]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalinterframedelay +[rtcinboundrtpstreamstats-totalsquaredinterframedelay]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-totalsquaredinterframedelay +[rtcinboundrtpstreamstats-estimatedplayouttimestamp]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-estimatedplayouttimestamp +[rtcinboundrtpstreamstats-framewidth]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-framewidth +[rtcinboundrtpstreamstats-frameheight]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-frameheight +[nackcount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-nackcount +[fircount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-fircount +[plicount]: https://w3c.github.io/webrtc-stats/#dom-rtcinboundrtpstreamstats-plicount diff --git a/video/pc_full_stack_tests.cc b/video/pc_full_stack_tests.cc index 4ec382ef54..5cebf41e91 100644 --- a/video/pc_full_stack_tests.cc +++ b/video/pc_full_stack_tests.cc @@ -20,8 +20,9 @@ #include "api/test/network_emulation_manager.h" #include "api/test/peerconnection_quality_test_fixture.h" #include "api/test/simulated_network.h" +#include "api/test/time_controller.h" +#include "api/video_codecs/vp9_profile.h" #include "call/simulated_network.h" -#include "media/base/vp9_profile.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "system_wrappers/include/field_trial.h" #include "test/field_trial.h" @@ -48,8 +49,6 @@ using VideoCodecConfig = namespace { constexpr int kTestDurationSec = 45; -constexpr char kVp8TrustedRateControllerFieldTrial[] = - "WebRTC-LibvpxVp8TrustedRateController/Enabled/"; EmulatedNetworkNode* CreateEmulatedNodeWithConfig( NetworkEmulationManager* emulation, @@ -78,12 +77,13 @@ CreateTwoNetworkLinks(NetworkEmulationManager* emulation, std::unique_ptr CreateTestFixture(const std::string& test_case_name, + TimeController& time_controller, std::pair network_links, rtc::FunctionView alice_configurer, rtc::FunctionView bob_configurer) { auto fixture = webrtc_pc_e2e::CreatePeerConnectionE2EQualityTestFixture( - test_case_name, /*audio_quality_analyzer=*/nullptr, + test_case_name, time_controller, /*audio_quality_analyzer=*/nullptr, /*video_quality_analyzer=*/nullptr); fixture->AddPeer(network_links.first->network_thread(), network_links.first->network_manager(), alice_configurer); @@ -106,30 +106,13 @@ std::string ClipNameToClipPath(const char* clip_name) { } // namespace -class PCGenericDescriptorTest : public ::testing::TestWithParam { - public: - PCGenericDescriptorTest() - : field_trial_(AppendFieldTrials(GetParam())), - generic_descriptor_enabled_( - field_trial::IsEnabled("WebRTC-GenericDescriptor")) {} - - std::string GetTestName(std::string base) { - if (generic_descriptor_enabled_) - base += "_generic_descriptor"; - return base; - } - - private: - test::ScopedFieldTrials field_trial_; - bool generic_descriptor_enabled_; -}; - #if defined(RTC_ENABLE_VP9) -TEST(PCFullStackTest, ForemanCifWithoutPacketLossVp9) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Net_Delay_0_0_Plr_0_VP9) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_foreman_cif_net_delay_0_0_plr_0_VP9", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -149,14 +132,16 @@ TEST(PCFullStackTest, ForemanCifWithoutPacketLossVp9) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Vp9) { +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_Delay_50_0_Plr_5_VP9_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; config.loss_percent = 5; config.queue_delay_ms = 50; auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_delay_50_0_plr_5_VP9"), + "pc_foreman_cif_delay_50_0_plr_5_VP9_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -179,17 +164,18 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Vp9) { #if (defined(WEBRTC_ANDROID) && \ (defined(WEBRTC_ARCH_ARM64) || defined(WEBRTC_ARCH_ARM))) || \ (defined(WEBRTC_IOS) && defined(WEBRTC_ARCH_ARM64)) -#define MAYBE_GeneratorWithoutPacketLossVp9Profile2 \ - DISABLED_GeneratorWithoutPacketLossVp9Profile2 +#define MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2 \ + DISABLED_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2 #else -#define MAYBE_GeneratorWithoutPacketLossVp9Profile2 \ - GeneratorWithoutPacketLossVp9Profile2 +#define MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2 \ + Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2 #endif -TEST(PCFullStackTest, MAYBE_GeneratorWithoutPacketLossVp9Profile2) { +TEST(PCFullStackTest, MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_generator_net_delay_0_0_plr_0_VP9Profile2", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -241,11 +227,11 @@ TEST(PCFullStackTest, GeneratorWithoutPacketLossMultiplexI420AFrame) { */ #endif // defined(RTC_ENABLE_VP9) -TEST(PCFullStackTest, ParisQcifWithoutPacketLoss) { +TEST(PCFullStackTest, Pc_Net_Delay_0_0_Plr_0) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( - "pc_net_delay_0_0_plr_0", + "pc_net_delay_0_0_plr_0", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -263,11 +249,13 @@ TEST(PCFullStackTest, ParisQcifWithoutPacketLoss) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCifWithoutPacketLoss) { +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_Net_Delay_0_0_Plr_0_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_net_delay_0_0_plr_0"), + "pc_foreman_cif_net_delay_0_0_plr_0_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -285,45 +273,14 @@ TEST_P(PCGenericDescriptorTest, ForemanCifWithoutPacketLoss) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCif30kbpsWithoutPacketLoss) { - std::unique_ptr network_emulation_manager = - CreateNetworkEmulationManager(); - BuiltInNetworkBehaviorConfig config; - auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_30kbps_net_delay_0_0_plr_0"), - CreateTwoNetworkLinks(network_emulation_manager.get(), config), - [](PeerConfigurer* alice) { - VideoConfig video(352, 288, 10); - video.stream_label = "alice-video"; - auto frame_generator = CreateFromYuvFileFrameGenerator( - video, ClipNameToClipPath("foreman_cif")); - alice->AddVideoConfig(std::move(video), std::move(frame_generator)); - - PeerConnectionInterface::BitrateParameters bitrate_params; - bitrate_params.min_bitrate_bps = 30000; - bitrate_params.current_bitrate_bps = 30000; - bitrate_params.max_bitrate_bps = 30000; - alice->SetBitrateParameters(bitrate_params); - }, - [](PeerConfigurer* bob) {}); - RunParams run_params(TimeDelta::Seconds(kTestDurationSec)); - run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)}; - run_params.use_flex_fec = false; - run_params.use_ulp_fec = false; - fixture->Run(std::move(run_params)); -} - -// TODO(webrtc:9722): Remove when experiment is cleaned up. -TEST_P(PCGenericDescriptorTest, - ForemanCif30kbpsWithoutPacketLossTrustedRateControl) { - test::ScopedFieldTrials override_field_trials( - AppendFieldTrials(kVp8TrustedRateControllerFieldTrial)); +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; auto fixture = CreateTestFixture( - GetTestName( - "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_trusted_rate_ctrl"), + "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 10); @@ -332,11 +289,11 @@ TEST_P(PCGenericDescriptorTest, video, ClipNameToClipPath("foreman_cif")); alice->AddVideoConfig(std::move(video), std::move(frame_generator)); - PeerConnectionInterface::BitrateParameters bitrate_params; - bitrate_params.min_bitrate_bps = 30000; - bitrate_params.current_bitrate_bps = 30000; - bitrate_params.max_bitrate_bps = 30000; - alice->SetBitrateParameters(bitrate_params); + BitrateSettings bitrate_settings; + bitrate_settings.min_bitrate_bps = 30000; + bitrate_settings.start_bitrate_bps = 30000; + bitrate_settings.max_bitrate_bps = 30000; + alice->SetBitrateSettings(bitrate_settings); }, [](PeerConfigurer* bob) {}); RunParams run_params(TimeDelta::Seconds(kTestDurationSec)); @@ -347,13 +304,14 @@ TEST_P(PCGenericDescriptorTest, } // Link capacity below default start rate. -TEST(PCFullStackTest, ForemanCifLink150kbpsWithoutPacketLoss) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Link_150kbps_Net_Delay_0_0_Plr_0) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; config.link_capacity_kbps = 150; auto fixture = CreateTestFixture( "pc_foreman_cif_link_150kbps_net_delay_0_0_plr_0", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -370,7 +328,7 @@ TEST(PCFullStackTest, ForemanCifLink150kbpsWithoutPacketLoss) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCifLink130kbps100msDelay1PercentPacketLossUlpfec) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Link_130kbps_Delay100ms_Loss1_Ulpfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -379,6 +337,7 @@ TEST(PCFullStackTest, ForemanCifLink130kbps100msDelay1PercentPacketLossUlpfec) { config.loss_percent = 1; auto fixture = CreateTestFixture( "pc_foreman_cif_link_130kbps_delay100ms_loss1_ulpfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -395,7 +354,7 @@ TEST(PCFullStackTest, ForemanCifLink130kbps100msDelay1PercentPacketLossUlpfec) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCifLink50kbps100msDelay1PercentPacketLossUlpfec) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Link_50kbps_Delay100ms_Loss1_Ulpfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -404,6 +363,7 @@ TEST(PCFullStackTest, ForemanCifLink50kbps100msDelay1PercentPacketLossUlpfec) { config.loss_percent = 1; auto fixture = CreateTestFixture( "pc_foreman_cif_link_50kbps_delay100ms_loss1_ulpfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -421,7 +381,8 @@ TEST(PCFullStackTest, ForemanCifLink50kbps100msDelay1PercentPacketLossUlpfec) { } // Restricted network and encoder overproducing by 30%. -TEST(PCFullStackTest, ForemanCifLink150kbpsBadRateController) { +TEST(PCFullStackTest, + Pc_Foreman_Cif_Link_150kbps_Delay100ms_30pkts_Queue_Overshoot30) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -430,6 +391,7 @@ TEST(PCFullStackTest, ForemanCifLink150kbpsBadRateController) { config.queue_delay_ms = 100; auto fixture = CreateTestFixture( "pc_foreman_cif_link_150kbps_delay100ms_30pkts_queue_overshoot30", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -451,7 +413,7 @@ TEST(PCFullStackTest, ForemanCifLink150kbpsBadRateController) { // Packet rate and loss are low enough that loss will happen with ~3s interval. // This triggers protection overhead to toggle between zero and non-zero. // Link queue is restrictive enough to trigger loss on probes. -TEST(PCFullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Link_250kbps_Delay100ms_10pkts_Loss1) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -461,6 +423,7 @@ TEST(PCFullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) { config.loss_percent = 1; auto fixture = CreateTestFixture( "pc_foreman_cif_link_250kbps_delay100ms_10pkts_loss1", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -478,14 +441,16 @@ TEST(PCFullStackTest, ForemanCifMediaCapacitySmallLossAndQueue) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCifPlr5) { +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_Delay_50_0_Plr_5_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; config.loss_percent = 5; config.queue_delay_ms = 50; auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_delay_50_0_plr_5"), + "pc_foreman_cif_delay_50_0_plr_5_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -502,14 +467,16 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Ulpfec) { +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_Delay_50_0_Plr_5_Ulpfec_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; config.loss_percent = 5; config.queue_delay_ms = 50; auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_delay_50_0_plr_5_ulpfec"), + "pc_foreman_cif_delay_50_0_plr_5_ulpfec_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -526,7 +493,7 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5Ulpfec) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCifPlr5Flexfec) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Delay_50_0_Plr_5_Flexfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -534,6 +501,7 @@ TEST(PCFullStackTest, ForemanCifPlr5Flexfec) { config.queue_delay_ms = 50; auto fixture = CreateTestFixture( "pc_foreman_cif_delay_50_0_plr_5_flexfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -550,7 +518,7 @@ TEST(PCFullStackTest, ForemanCifPlr5Flexfec) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCif500kbpsPlr3Flexfec) { +TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_Delay_50_0_Plr_3_Flexfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -559,6 +527,7 @@ TEST(PCFullStackTest, ForemanCif500kbpsPlr3Flexfec) { config.queue_delay_ms = 50; auto fixture = CreateTestFixture( "pc_foreman_cif_500kbps_delay_50_0_plr_3_flexfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -575,7 +544,7 @@ TEST(PCFullStackTest, ForemanCif500kbpsPlr3Flexfec) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCif500kbpsPlr3Ulpfec) { +TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_Delay_50_0_Plr_3_Ulpfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -584,6 +553,7 @@ TEST(PCFullStackTest, ForemanCif500kbpsPlr3Ulpfec) { config.queue_delay_ms = 50; auto fixture = CreateTestFixture( "pc_foreman_cif_500kbps_delay_50_0_plr_3_ulpfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -601,11 +571,12 @@ TEST(PCFullStackTest, ForemanCif500kbpsPlr3Ulpfec) { } #if defined(WEBRTC_USE_H264) -TEST(PCFullStackTest, ForemanCifWithoutPacketlossH264) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Net_Delay_0_0_Plr_0_H264) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_foreman_cif_net_delay_0_0_plr_0_H264", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -623,12 +594,13 @@ TEST(PCFullStackTest, ForemanCifWithoutPacketlossH264) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCif30kbpsWithoutPacketlossH264) { +TEST(PCFullStackTest, Pc_Foreman_Cif_30kbps_Net_Delay_0_0_Plr_0_H264) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; auto fixture = CreateTestFixture( "pc_foreman_cif_30kbps_net_delay_0_0_plr_0_H264", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 10); @@ -637,11 +609,11 @@ TEST(PCFullStackTest, ForemanCif30kbpsWithoutPacketlossH264) { video, ClipNameToClipPath("foreman_cif")); alice->AddVideoConfig(std::move(video), std::move(frame_generator)); - PeerConnectionInterface::BitrateParameters bitrate_params; - bitrate_params.min_bitrate_bps = 30000; - bitrate_params.current_bitrate_bps = 30000; - bitrate_params.max_bitrate_bps = 30000; - alice->SetBitrateParameters(bitrate_params); + BitrateSettings bitrate_settings; + bitrate_settings.min_bitrate_bps = 30000; + bitrate_settings.start_bitrate_bps = 30000; + bitrate_settings.max_bitrate_bps = 30000; + alice->SetBitrateSettings(bitrate_settings); }, [](PeerConfigurer* bob) {}); RunParams run_params(TimeDelta::Seconds(kTestDurationSec)); @@ -651,14 +623,16 @@ TEST(PCFullStackTest, ForemanCif30kbpsWithoutPacketlossH264) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCifPlr5H264) { +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; config.loss_percent = 5; config.queue_delay_ms = 50; auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_delay_50_0_plr_5_H264"), + "pc_foreman_cif_delay_50_0_plr_5_H264_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -675,7 +649,7 @@ TEST_P(PCGenericDescriptorTest, ForemanCifPlr5H264) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Sps_Pps_Idr) { test::ScopedFieldTrials override_field_trials( AppendFieldTrials("WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/")); @@ -686,6 +660,7 @@ TEST(PCFullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) { config.queue_delay_ms = 50; auto fixture = CreateTestFixture( "pc_foreman_cif_delay_50_0_plr_5_H264_sps_pps_idr", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -702,7 +677,7 @@ TEST(PCFullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCifPlr5H264Flexfec) { +TEST(PCFullStackTest, Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Flexfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -710,6 +685,7 @@ TEST(PCFullStackTest, ForemanCifPlr5H264Flexfec) { config.queue_delay_ms = 50; auto fixture = CreateTestFixture( "pc_foreman_cif_delay_50_0_plr_5_H264_flexfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -728,7 +704,7 @@ TEST(PCFullStackTest, ForemanCifPlr5H264Flexfec) { // Ulpfec with H264 is an unsupported combination, so this test is only useful // for debugging. It is therefore disabled by default. -TEST(PCFullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) { +TEST(PCFullStackTest, DISABLED_Pc_Foreman_Cif_Delay_50_0_Plr_5_H264_Ulpfec) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -736,6 +712,7 @@ TEST(PCFullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) { config.queue_delay_ms = 50; auto fixture = CreateTestFixture( "pc_foreman_cif_delay_50_0_plr_5_H264_ulpfec", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -753,7 +730,7 @@ TEST(PCFullStackTest, DISABLED_ForemanCifPlr5H264Ulpfec) { } #endif // defined(WEBRTC_USE_H264) -TEST(PCFullStackTest, ForemanCif500kbps) { +TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -761,7 +738,7 @@ TEST(PCFullStackTest, ForemanCif500kbps) { config.queue_delay_ms = 0; config.link_capacity_kbps = 500; auto fixture = CreateTestFixture( - "pc_foreman_cif_500kbps", + "pc_foreman_cif_500kbps", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -778,7 +755,7 @@ TEST(PCFullStackTest, ForemanCif500kbps) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCif500kbpsLimitedQueue) { +TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_32pkts_Queue) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -787,6 +764,7 @@ TEST(PCFullStackTest, ForemanCif500kbpsLimitedQueue) { config.link_capacity_kbps = 500; auto fixture = CreateTestFixture( "pc_foreman_cif_500kbps_32pkts_queue", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -803,7 +781,7 @@ TEST(PCFullStackTest, ForemanCif500kbpsLimitedQueue) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ForemanCif500kbps100ms) { +TEST(PCFullStackTest, Pc_Foreman_Cif_500kbps_100ms) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -812,6 +790,7 @@ TEST(PCFullStackTest, ForemanCif500kbps100ms) { config.link_capacity_kbps = 500; auto fixture = CreateTestFixture( "pc_foreman_cif_500kbps_100ms", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -828,7 +807,8 @@ TEST(PCFullStackTest, ForemanCif500kbps100ms) { fixture->Run(std::move(run_params)); } -TEST_P(PCGenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) { +TEST(PCGenericDescriptorTest, + Pc_Foreman_Cif_500kbps_100ms_32pkts_Queue_Generic_Descriptor) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -836,7 +816,8 @@ TEST_P(PCGenericDescriptorTest, ForemanCif500kbps100msLimitedQueue) { config.queue_delay_ms = 100; config.link_capacity_kbps = 500; auto fixture = CreateTestFixture( - GetTestName("pc_foreman_cif_500kbps_100ms_32pkts_queue"), + "pc_foreman_cif_500kbps_100ms_32pkts_queue_generic_descriptor", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -874,7 +855,7 @@ TEST(PCFullStackTest, ForemanCif500kbps100msLimitedQueueRecvBwe) { } */ -TEST(PCFullStackTest, ForemanCif1000kbps100msLimitedQueue) { +TEST(PCFullStackTest, Pc_Foreman_Cif_1000kbps_100ms_32pkts_Queue) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -883,6 +864,7 @@ TEST(PCFullStackTest, ForemanCif1000kbps100msLimitedQueue) { config.link_capacity_kbps = 1000; auto fixture = CreateTestFixture( "pc_foreman_cif_1000kbps_100ms_32pkts_queue", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(352, 288, 30); @@ -900,7 +882,7 @@ TEST(PCFullStackTest, ForemanCif1000kbps100msLimitedQueue) { } // TODO(sprang): Remove this if we have the similar ModerateLimits below? -TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueue) { +TEST(PCFullStackTest, Pc_Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -909,35 +891,7 @@ TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueue) { config.link_capacity_kbps = 2000; auto fixture = CreateTestFixture( "pc_conference_motion_hd_2000kbps_100ms_32pkts_queue", - CreateTwoNetworkLinks(network_emulation_manager.get(), config), - [](PeerConfigurer* alice) { - VideoConfig video(1280, 720, 50); - video.stream_label = "alice-video"; - auto frame_generator = CreateFromYuvFileFrameGenerator( - video, ClipNameToClipPath("ConferenceMotion_1280_720_50")); - alice->AddVideoConfig(std::move(video), std::move(frame_generator)); - }, - [](PeerConfigurer* bob) {}); - RunParams run_params(TimeDelta::Seconds(kTestDurationSec)); - run_params.video_codecs = {VideoCodecConfig(cricket::kVp8CodecName)}; - run_params.use_flex_fec = false; - run_params.use_ulp_fec = false; - fixture->Run(std::move(run_params)); -} - -// TODO(webrtc:9722): Remove when experiment is cleaned up. -TEST(PCFullStackTest, ConferenceMotionHd1TLModerateLimitsWhitelistVp8) { - test::ScopedFieldTrials override_field_trials( - AppendFieldTrials(kVp8TrustedRateControllerFieldTrial)); - std::unique_ptr network_emulation_manager = - CreateNetworkEmulationManager(); - BuiltInNetworkBehaviorConfig config; - config.queue_length_packets = 50; - config.loss_percent = 3; - config.queue_delay_ms = 100; - config.link_capacity_kbps = 2000; - auto fixture = CreateTestFixture( - "pc_conference_motion_hd_1tl_moderate_limits_trusted_rate_ctrl", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(1280, 720, 50); @@ -956,7 +910,7 @@ TEST(PCFullStackTest, ConferenceMotionHd1TLModerateLimitsWhitelistVp8) { /* // TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework -TEST_P(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) { +TEST(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging conf_motion_hd; conf_motion_hd.call.send_side_bwe = true; @@ -970,7 +924,7 @@ TEST_P(PCGenericDescriptorTest, ConferenceMotionHd2TLModerateLimits) { false, false, false, ClipNameToClipPath("ConferenceMotion_1280_720_50")}; conf_motion_hd.analyzer = { - GetTestName("conference_motion_hd_2tl_moderate_limits"), 0.0, 0.0, + "conference_motion_hd_2tl_moderate_limits_generic_descriptor", 0.0, 0.0, kTestDurationSec}; conf_motion_hd.config->queue_length_packets = 50; conf_motion_hd.config->loss_percent = 3; @@ -1081,7 +1035,7 @@ TEST(PCFullStackTest, */ #if defined(RTC_ENABLE_VP9) -TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) { +TEST(PCFullStackTest, Pc_Conference_Motion_Hd_2000kbps_100ms_32pkts_Queue_Vp9) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -1090,6 +1044,7 @@ TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) { config.link_capacity_kbps = 2000; auto fixture = CreateTestFixture( "pc_conference_motion_hd_2000kbps_100ms_32pkts_queue_vp9", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(1280, 720, 50); @@ -1109,11 +1064,12 @@ TEST(PCFullStackTest, ConferenceMotionHd2000kbps100msLimitedQueueVP9) { } #endif -TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_NoConferenceMode) { +TEST(PCFullStackTest, Pc_Screenshare_Slides_No_Conference_Mode) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_screenshare_slides_no_conference_mode", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1132,11 +1088,11 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_NoConferenceMode) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL) { +TEST(PCFullStackTest, Pc_Screenshare_Slides) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( - "pc_screenshare_slides", + "pc_screenshare_slides", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1158,11 +1114,12 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL) { // TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac. #if !defined(WEBRTC_MAC) && !defined(WEBRTC_WIN) -TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_NoConferenceMode) { +TEST(PCFullStackTest, Pc_Screenshare_Slides_Simulcast_No_Conference_Mode) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_screenshare_slides_simulcast_no_conference_mode", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1183,11 +1140,12 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_NoConferenceMode) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) { +TEST(PCFullStackTest, Pc_Screenshare_Slides_Simulcast) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_screenshare_slides_simulcast", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1214,40 +1172,6 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast) { #if !defined(WEBRTC_MAC) // TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on Win/Mac. #if !defined(WEBRTC_WIN) -const char kScreenshareSimulcastVariableFramerateExperiment[] = - "WebRTC-VP8VariableFramerateScreenshare/" - "Enabled,min_fps:5.0,min_qp:15,undershoot:30/"; -// TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework -TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_Variable_Framerate) { - test::ScopedFieldTrials field_trial( - AppendFieldTrials(kScreenshareSimulcastVariableFramerateExperiment)); - auto fixture = CreateVideoQualityTestFixture(); - ParamsWithLogging screenshare; - screenshare.call.send_side_bwe = true; - screenshare.screenshare[0] = {true, false, 10}; - screenshare.video[0] = {true, 1850, 1110, 30, 800000, 2500000, - 2500000, false, "VP8", 2, 1, 400000, - false, false, false, ""}; - screenshare.analyzer = {"screenshare_slides_simulcast_variable_framerate", - 0.0, 0.0, kTestDurationSec}; - ParamsWithLogging screenshare_params_high; - screenshare_params_high.video[0] = { - true, 1850, 1110, 60, 600000, 1250000, 1250000, false, - "VP8", 2, 0, 400000, false, false, false, ""}; - VideoQualityTest::Params screenshare_params_low; - screenshare_params_low.video[0] = {true, 1850, 1110, 5, 30000, 200000, - 1000000, false, "VP8", 2, 0, 400000, - false, false, false, ""}; - - std::vector streams = { - VideoQualityTest::DefaultVideoStream(screenshare_params_low, 0), - VideoQualityTest::DefaultVideoStream(screenshare_params_high, 0)}; - screenshare.ss[0] = { - streams, 1, 1, 0, InterLayerPredMode::kOn, std::vector(), - false}; - fixture->RunWithAnalyzer(screenshare); -} - // TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Simulcast_low) { auto fixture = CreateVideoQualityTestFixture(); @@ -1295,7 +1219,7 @@ TEST(PCFullStackTest, ScreenshareSlidesVP8_2TL_Scroll) { } // TODO(bugs.webrtc.org/10639) requires simulcast/SVC support in PC framework -TEST_P(PCGenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) { +TEST(PCGenericDescriptorTest, Screenshare_Slides_Lossy_Net_Generic_Descriptor) { auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging screenshare; screenshare.call.send_side_bwe = true; @@ -1303,12 +1227,12 @@ TEST_P(PCGenericDescriptorTest, ScreenshareSlidesVP8_2TL_LossyNet) { 1000000, false, "VP8", 2, 1, 400000, false, false, false, ""}; screenshare.screenshare[0] = {true, false, 10}; - screenshare.analyzer = {GetTestName("screenshare_slides_lossy_net"), 0.0, 0.0, - kTestDurationSec}; + screenshare.analyzer = {"screenshare_slides_lossy_net_generic_descriptor", + 0.0, 0.0, kTestDurationSec}; screenshare.config->loss_percent = 5; screenshare.config->queue_delay_ms = 200; screenshare.config->link_capacity_kbps = 500; - screenshare.call.generic_descriptor = GenericDescriptorEnabled(); + screenshare.call.generic_descriptor = true; fixture->RunWithAnalyzer(screenshare); } @@ -1421,7 +1345,7 @@ ParamsWithLogging::Video SimulcastVp8VideoLow() { #if defined(RTC_ENABLE_VP9) -TEST(PCFullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) { +TEST(PCFullStackTest, Pc_Screenshare_Slides_Vp9_3sl_High_Fps) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9InterLayerPred/" "Enabled,inter_layer_pred_mode:on/")); @@ -1429,6 +1353,7 @@ TEST(PCFullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) { CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( "pc_screenshare_slides_vp9_3sl_high_fps", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1450,14 +1375,14 @@ TEST(PCFullStackTest, ScreenshareSlidesVP9_3SL_High_Fps) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, VP9SVC_3SL_High) { +TEST(PCFullStackTest, Pc_Vp9svc_3sl_High) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9InterLayerPred/" "Enabled,inter_layer_pred_mode:on/")); std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( - "pc_vp9svc_3sl_high", + "pc_vp9svc_3sl_high", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1479,14 +1404,14 @@ TEST(PCFullStackTest, VP9SVC_3SL_High) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, VP9SVC_3SL_Low) { +TEST(PCFullStackTest, Pc_Vp9svc_3sl_Low) { webrtc::test::ScopedFieldTrials override_trials( AppendFieldTrials("WebRTC-Vp9InterLayerPred/" "Enabled,inter_layer_pred_mode:on/")); std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); auto fixture = CreateTestFixture( - "pc_vp9svc_3sl_low", + "pc_vp9svc_3sl_low", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), BuiltInNetworkBehaviorConfig()), [](PeerConfigurer* alice) { @@ -1581,8 +1506,7 @@ TEST(PCFullStackTest, VP9KSVC_3SL_Medium_Network_Restricted) { // TODO(webrtc:9722): Remove when experiment is cleaned up. TEST(PCFullStackTest, VP9KSVC_3SL_Medium_Network_Restricted_Trusted_Rate) { webrtc::test::ScopedFieldTrials override_trials( - AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/" - "WebRTC-LibvpxVp9TrustedRateController/Enabled/")); + AppendFieldTrials("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation/Enabled/")); auto fixture = CreateVideoQualityTestFixture(); ParamsWithLogging simulcast; simulcast.call.send_side_bwe = true; @@ -1604,11 +1528,11 @@ TEST(PCFullStackTest, VP9KSVC_3SL_Medium_Network_Restricted_Trusted_Rate) { // Android bots can't handle FullHD, so disable the test. // TODO(bugs.webrtc.org/9220): Investigate source of flakiness on Mac. #if defined(WEBRTC_ANDROID) || defined(WEBRTC_MAC) -#define MAYBE_SimulcastFullHdOveruse DISABLED_SimulcastFullHdOveruse +#define MAYBE_Pc_Simulcast_HD_High DISABLED_Pc_Simulcast_HD_High #else -#define MAYBE_SimulcastFullHdOveruse SimulcastFullHdOveruse +#define MAYBE_Pc_Simulcast_HD_High Pc_Simulcast_HD_High #endif -TEST(PCFullStackTest, MAYBE_SimulcastFullHdOveruse) { +TEST(PCFullStackTest, MAYBE_Pc_Simulcast_HD_High) { webrtc::test::ScopedFieldTrials override_trials(AppendFieldTrials( "WebRTC-ForceSimulatedOveruseIntervalMs/1000-50000-300/")); std::unique_ptr network_emulation_manager = @@ -1617,7 +1541,7 @@ TEST(PCFullStackTest, MAYBE_SimulcastFullHdOveruse) { config.loss_percent = 0; config.queue_delay_ms = 100; auto fixture = CreateTestFixture( - "pc_simulcast_HD_high", + "pc_simulcast_HD_high", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(1920, 1080, 30); @@ -1634,7 +1558,7 @@ TEST(PCFullStackTest, MAYBE_SimulcastFullHdOveruse) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, SimulcastVP8_3SL_High) { +TEST(PCFullStackTest, Pc_Simulcast_Vp8_3sl_High) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; @@ -1642,6 +1566,7 @@ TEST(PCFullStackTest, SimulcastVP8_3SL_High) { config.queue_delay_ms = 100; auto fixture = CreateTestFixture( "pc_simulcast_vp8_3sl_high", + *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(1280, 720, 30); @@ -1659,14 +1584,14 @@ TEST(PCFullStackTest, SimulcastVP8_3SL_High) { fixture->Run(std::move(run_params)); } -TEST(PCFullStackTest, SimulcastVP8_3SL_Low) { +TEST(PCFullStackTest, Pc_Simulcast_Vp8_3sl_Low) { std::unique_ptr network_emulation_manager = CreateNetworkEmulationManager(); BuiltInNetworkBehaviorConfig config; config.loss_percent = 0; config.queue_delay_ms = 100; auto fixture = CreateTestFixture( - "pc_simulcast_vp8_3sl_low", + "pc_simulcast_vp8_3sl_low", *network_emulation_manager->time_controller(), CreateTwoNetworkLinks(network_emulation_manager.get(), config), [](PeerConfigurer* alice) { VideoConfig video(1280, 720, 30); @@ -1813,15 +1738,9 @@ TEST(PCFullStackTest, MAYBE_LargeRoomVP8_50thumb) { } */ -INSTANTIATE_TEST_SUITE_P( - PCFullStackTest, - PCGenericDescriptorTest, - ::testing::Values("WebRTC-GenericDescriptor/Disabled/", - "WebRTC-GenericDescriptor/Enabled/")); - +/* class PCDualStreamsTest : public ::testing::TestWithParam {}; -/* // Disable dual video test on mobile device becuase it's too heavy. // TODO(bugs.webrtc.org/9840): Investigate why is this test flaky on MAC. #if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS) && !defined(WEBRTC_MAC) @@ -1923,10 +1842,10 @@ TEST_P(PCDualStreamsTest, Conference_Restricted) { auto fixture = CreateVideoQualityTestFixture(); fixture->RunWithAnalyzer(dual_streams); } -*/ INSTANTIATE_TEST_SUITE_P(PCFullStackTest, PCDualStreamsTest, ::testing::Values(0, 1)); +*/ } // namespace webrtc diff --git a/video/picture_id_tests.cc b/video/picture_id_tests.cc index 19c1141b0a..298919c096 100644 --- a/video/picture_id_tests.cc +++ b/video/picture_id_tests.cc @@ -22,6 +22,7 @@ #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "rtc_base/numerics/safe_conversions.h" #include "rtc_base/numerics/sequence_number_util.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "test/call_test.h" @@ -49,12 +50,12 @@ class PictureIdObserver : public test::RtpRtcpObserver { num_ssrcs_to_observe_(1) {} void SetExpectedSsrcs(size_t num_expected_ssrcs) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); num_ssrcs_to_observe_ = num_expected_ssrcs; } void ResetObservedSsrcs() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Do not clear the timestamp and picture_id, to ensure that we check // consistency between reinits and recreations. num_packets_sent_.clear(); @@ -62,7 +63,7 @@ class PictureIdObserver : public test::RtpRtcpObserver { } void SetMaxExpectedPictureIdGap(int max_expected_picture_id_gap) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); max_expected_picture_id_gap_ = max_expected_picture_id_gap; // Expect smaller gap for |tl0_pic_idx| (running index for temporal_idx 0). max_expected_tl0_idx_gap_ = max_expected_picture_id_gap_ / 2; @@ -120,7 +121,7 @@ class PictureIdObserver : public test::RtpRtcpObserver { // Verify continuity and monotonicity of picture_id sequence. void VerifyPictureId(const ParsedPacket& current, const ParsedPacket& last) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_) { + RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_) { if (current.timestamp == last.timestamp) { EXPECT_EQ(last.picture_id, current.picture_id); return; // Same frame. @@ -143,7 +144,7 @@ class PictureIdObserver : public test::RtpRtcpObserver { } void VerifyTl0Idx(const ParsedPacket& current, const ParsedPacket& last) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(&crit_) { + RTC_EXCLUSIVE_LOCKS_REQUIRED(&mutex_) { if (current.tl0_pic_idx == kNoTl0PicIdx || current.temporal_idx == kNoTemporalIdx) { return; // No temporal layers. @@ -169,7 +170,7 @@ class PictureIdObserver : public test::RtpRtcpObserver { } Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ParsedPacket parsed; if (!ParsePayload(packet, length, &parsed)) @@ -196,14 +197,14 @@ class PictureIdObserver : public test::RtpRtcpObserver { return SEND_PACKET; } - rtc::CriticalSection crit_; + Mutex mutex_; const std::unique_ptr depacketizer_; - std::map last_observed_packet_ RTC_GUARDED_BY(crit_); - std::map num_packets_sent_ RTC_GUARDED_BY(crit_); - int max_expected_picture_id_gap_ RTC_GUARDED_BY(crit_); - int max_expected_tl0_idx_gap_ RTC_GUARDED_BY(crit_); - size_t num_ssrcs_to_observe_ RTC_GUARDED_BY(crit_); - std::set observed_ssrcs_ RTC_GUARDED_BY(crit_); + std::map last_observed_packet_ RTC_GUARDED_BY(mutex_); + std::map num_packets_sent_ RTC_GUARDED_BY(mutex_); + int max_expected_picture_id_gap_ RTC_GUARDED_BY(mutex_); + int max_expected_tl0_idx_gap_ RTC_GUARDED_BY(mutex_); + size_t num_ssrcs_to_observe_ RTC_GUARDED_BY(mutex_); + std::set observed_ssrcs_ RTC_GUARDED_BY(mutex_); }; class PictureIdTest : public test::CallTest, diff --git a/video/quality_scaling_tests.cc b/video/quality_scaling_tests.cc index 19b9e8c36c..9837517b78 100644 --- a/video/quality_scaling_tests.cc +++ b/video/quality_scaling_tests.cc @@ -15,251 +15,513 @@ #include "modules/video_coding/codecs/h264/include/h264.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" +#include "rtc_base/experiments/encoder_info_settings.h" #include "test/call_test.h" #include "test/field_trial.h" #include "test/frame_generator_capturer.h" namespace webrtc { namespace { -constexpr int kWidth = 1280; -constexpr int kHeight = 720; +constexpr int kInitialWidth = 1280; +constexpr int kInitialHeight = 720; constexpr int kLowStartBps = 100000; -constexpr int kHighStartBps = 600000; -constexpr size_t kTimeoutMs = 10000; // Some tests are expected to time out. +constexpr int kHighStartBps = 1000000; +constexpr int kDefaultVgaMinStartBps = 500000; // From video_stream_encoder.cc +constexpr int kTimeoutMs = 10000; // Some tests are expected to time out. void SetEncoderSpecific(VideoEncoderConfig* encoder_config, VideoCodecType type, bool automatic_resize, - bool frame_dropping) { + size_t num_spatial_layers) { if (type == kVideoCodecVP8) { VideoCodecVP8 vp8 = VideoEncoder::GetDefaultVp8Settings(); vp8.automaticResizeOn = automatic_resize; - vp8.frameDroppingOn = frame_dropping; - encoder_config->encoder_specific_settings = new rtc::RefCountedObject< - VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8); + encoder_config->encoder_specific_settings = + rtc::make_ref_counted( + vp8); } else if (type == kVideoCodecVP9) { VideoCodecVP9 vp9 = VideoEncoder::GetDefaultVp9Settings(); vp9.automaticResizeOn = automatic_resize; - vp9.frameDroppingOn = frame_dropping; - encoder_config->encoder_specific_settings = new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9); - } else if (type == kVideoCodecH264) { - VideoCodecH264 h264 = VideoEncoder::GetDefaultH264Settings(); - h264.frameDroppingOn = frame_dropping; - encoder_config->encoder_specific_settings = new rtc::RefCountedObject< - VideoEncoderConfig::H264EncoderSpecificSettings>(h264); + vp9.numberOfSpatialLayers = num_spatial_layers; + encoder_config->encoder_specific_settings = + rtc::make_ref_counted( + vp9); } } } // namespace class QualityScalingTest : public test::CallTest { protected: - void RunTest(VideoEncoderFactory* encoder_factory, - const std::string& payload_name, - int start_bps, - bool automatic_resize, - bool frame_dropping, - bool expect_adaptation); - const std::string kPrefix = "WebRTC-Video-QualityScaling/Enabled-"; const std::string kEnd = ",0,0,0.9995,0.9999,1/"; + const absl::optional + kSinglecastLimits720pVp8 = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP8, + 1280 * 720); + const absl::optional + kSinglecastLimits360pVp9 = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP9, + 640 * 360); }; -void QualityScalingTest::RunTest(VideoEncoderFactory* encoder_factory, - const std::string& payload_name, - int start_bps, - bool automatic_resize, - bool frame_dropping, - bool expect_adaptation) { - class ScalingObserver - : public test::SendTest, - public test::FrameGeneratorCapturer::SinkWantsObserver { - public: - ScalingObserver(VideoEncoderFactory* encoder_factory, - const std::string& payload_name, - int start_bps, - bool automatic_resize, - bool frame_dropping, - bool expect_adaptation) - : SendTest(expect_adaptation ? kDefaultTimeoutMs : kTimeoutMs), - encoder_factory_(encoder_factory), - payload_name_(payload_name), - start_bps_(start_bps), - automatic_resize_(automatic_resize), - frame_dropping_(frame_dropping), - expect_adaptation_(expect_adaptation) {} - - private: - void OnFrameGeneratorCapturerCreated( - test::FrameGeneratorCapturer* frame_generator_capturer) override { - frame_generator_capturer->SetSinkWantsObserver(this); - // Set initial resolution. - frame_generator_capturer->ChangeResolution(kWidth, kHeight); - } +class ScalingObserver : public test::SendTest { + protected: + ScalingObserver(const std::string& payload_name, + const std::vector& streams_active, + int start_bps, + bool automatic_resize, + bool expect_scaling) + : SendTest(expect_scaling ? kTimeoutMs * 4 : kTimeoutMs), + encoder_factory_( + [](const SdpVideoFormat& format) -> std::unique_ptr { + if (format.name == "VP8") + return VP8Encoder::Create(); + if (format.name == "VP9") + return VP9Encoder::Create(); + if (format.name == "H264") + return H264Encoder::Create(cricket::VideoCodec("H264")); + RTC_NOTREACHED() << format.name; + return nullptr; + }), + payload_name_(payload_name), + streams_active_(streams_active), + start_bps_(start_bps), + automatic_resize_(automatic_resize), + expect_scaling_(expect_scaling) {} + + DegradationPreference degradation_preference_ = + DegradationPreference::MAINTAIN_FRAMERATE; + + private: + void ModifySenderBitrateConfig(BitrateConstraints* bitrate_config) override { + bitrate_config->start_bitrate_bps = start_bps_; + } - // Called when FrameGeneratorCapturer::AddOrUpdateSink is called. - void OnSinkWantsChanged(rtc::VideoSinkInterface* sink, - const rtc::VideoSinkWants& wants) override { - if (wants.max_pixel_count < kWidth * kHeight) - observation_complete_.Set(); + void ModifyVideoDegradationPreference( + DegradationPreference* degradation_preference) override { + *degradation_preference = degradation_preference_; + } + + size_t GetNumVideoStreams() const override { + return (payload_name_ == "VP9") ? 1 : streams_active_.size(); + } + + void ModifyVideoConfigs( + VideoSendStream::Config* send_config, + std::vector* receive_configs, + VideoEncoderConfig* encoder_config) override { + send_config->encoder_settings.encoder_factory = &encoder_factory_; + send_config->rtp.payload_name = payload_name_; + send_config->rtp.payload_type = test::CallTest::kVideoSendPayloadType; + encoder_config->video_format.name = payload_name_; + const VideoCodecType codec_type = PayloadStringToCodecType(payload_name_); + encoder_config->codec_type = codec_type; + encoder_config->max_bitrate_bps = + std::max(start_bps_, encoder_config->max_bitrate_bps); + if (payload_name_ == "VP9") { + // Simulcast layers indicates which spatial layers are active. + encoder_config->simulcast_layers.resize(streams_active_.size()); + encoder_config->simulcast_layers[0].max_bitrate_bps = + encoder_config->max_bitrate_bps; } - void ModifySenderBitrateConfig( - BitrateConstraints* bitrate_config) override { - bitrate_config->start_bitrate_bps = start_bps_; + double scale_factor = 1.0; + for (int i = streams_active_.size() - 1; i >= 0; --i) { + VideoStream& stream = encoder_config->simulcast_layers[i]; + stream.active = streams_active_[i]; + stream.scale_resolution_down_by = scale_factor; + scale_factor *= (payload_name_ == "VP9") ? 1.0 : 2.0; } + SetEncoderSpecific(encoder_config, codec_type, automatic_resize_, + streams_active_.size()); + } - void ModifyVideoConfigs( - VideoSendStream::Config* send_config, - std::vector* receive_configs, - VideoEncoderConfig* encoder_config) override { - send_config->encoder_settings.encoder_factory = encoder_factory_; - send_config->rtp.payload_name = payload_name_; - send_config->rtp.payload_type = kVideoSendPayloadType; - const VideoCodecType codec_type = PayloadStringToCodecType(payload_name_); - encoder_config->codec_type = codec_type; - encoder_config->max_bitrate_bps = start_bps_; - SetEncoderSpecific(encoder_config, codec_type, automatic_resize_, - frame_dropping_); - } + void PerformTest() override { EXPECT_EQ(expect_scaling_, Wait()); } + + test::FunctionVideoEncoderFactory encoder_factory_; + const std::string payload_name_; + const std::vector streams_active_; + const int start_bps_; + const bool automatic_resize_; + const bool expect_scaling_; +}; - void PerformTest() override { - EXPECT_EQ(expect_adaptation_, Wait()) - << "Timed out while waiting for a scale down."; +class DownscalingObserver + : public ScalingObserver, + public test::FrameGeneratorCapturer::SinkWantsObserver { + public: + DownscalingObserver(const std::string& payload_name, + const std::vector& streams_active, + int start_bps, + bool automatic_resize, + bool expect_downscale) + : ScalingObserver(payload_name, + streams_active, + start_bps, + automatic_resize, + expect_downscale) {} + + private: + void OnFrameGeneratorCapturerCreated( + test::FrameGeneratorCapturer* frame_generator_capturer) override { + frame_generator_capturer->SetSinkWantsObserver(this); + frame_generator_capturer->ChangeResolution(kInitialWidth, kInitialHeight); + } + + void OnSinkWantsChanged(rtc::VideoSinkInterface* sink, + const rtc::VideoSinkWants& wants) override { + if (wants.max_pixel_count < kInitialWidth * kInitialHeight) + observation_complete_.Set(); + } +}; + +class UpscalingObserver + : public ScalingObserver, + public test::FrameGeneratorCapturer::SinkWantsObserver { + public: + UpscalingObserver(const std::string& payload_name, + const std::vector& streams_active, + int start_bps, + bool automatic_resize, + bool expect_upscale) + : ScalingObserver(payload_name, + streams_active, + start_bps, + automatic_resize, + expect_upscale) {} + + void SetDegradationPreference(DegradationPreference preference) { + degradation_preference_ = preference; + } + + private: + void OnFrameGeneratorCapturerCreated( + test::FrameGeneratorCapturer* frame_generator_capturer) override { + frame_generator_capturer->SetSinkWantsObserver(this); + frame_generator_capturer->ChangeResolution(kInitialWidth, kInitialHeight); + } + + void OnSinkWantsChanged(rtc::VideoSinkInterface* sink, + const rtc::VideoSinkWants& wants) override { + if (wants.max_pixel_count > last_wants_.max_pixel_count) { + if (wants.max_pixel_count == std::numeric_limits::max()) + observation_complete_.Set(); } + last_wants_ = wants; + } + + rtc::VideoSinkWants last_wants_; +}; - VideoEncoderFactory* const encoder_factory_; - const std::string payload_name_; - const int start_bps_; - const bool automatic_resize_; - const bool frame_dropping_; - const bool expect_adaptation_; - } test(encoder_factory, payload_name, start_bps, automatic_resize, - frame_dropping, expect_adaptation); +TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp8) { + // qp_low:1, qp_high:1 -> kHighQp + test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); + DownscalingObserver test("VP8", /*streams_active=*/{true}, kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); RunBaseTest(&test); } -TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp8) { - // VP8 QP thresholds, low:1, high:1 -> high QP. +TEST_F(QualityScalingTest, NoAdaptDownForHighQpIfScalingOff_Vp8) { + // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); - // QualityScaler enabled. - const bool kAutomaticResize = true; - const bool kFrameDropping = true; - const bool kExpectAdapt = true; + DownscalingObserver test("VP8", /*streams_active=*/{true}, kHighStartBps, + /*automatic_resize=*/false, + /*expect_downscale=*/false); + RunBaseTest(&test); +} - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP8Encoder::Create(); }); - RunTest(&encoder_factory, "VP8", kHighStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); +TEST_F(QualityScalingTest, NoAdaptDownForNormalQp_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); + + DownscalingObserver test("VP8", /*streams_active=*/{true}, kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); } -TEST_F(QualityScalingTest, NoAdaptDownForHighQpWithResizeOff_Vp8) { - // VP8 QP thresholds, low:1, high:1 -> high QP. - test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); +TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); + + DownscalingObserver test("VP8", /*streams_active=*/{true}, kLowStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); +} - // QualityScaler disabled. - const bool kAutomaticResize = false; - const bool kFrameDropping = true; - const bool kExpectAdapt = false; +TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrateAndThenUp) { + // qp_low:127, qp_high:127 -> kLowQp + test::ScopedFieldTrials field_trials( + kPrefix + "127,127,0,0,0,0" + kEnd + + "WebRTC-Video-BalancedDegradationSettings/" + "pixels:230400|921600,fps:20|30,kbps:300|500/"); // should not affect - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP8Encoder::Create(); }); - RunTest(&encoder_factory, "VP8", kHighStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); + UpscalingObserver test("VP8", /*streams_active=*/{true}, + kDefaultVgaMinStartBps - 1, + /*automatic_resize=*/true, /*expect_upscale=*/true); + RunBaseTest(&test); } -// TODO(bugs.webrtc.org/10388): Fix and re-enable. -TEST_F(QualityScalingTest, - DISABLED_NoAdaptDownForHighQpWithFrameDroppingOff_Vp8) { - // VP8 QP thresholds, low:1, high:1 -> high QP. +TEST_F(QualityScalingTest, AdaptsDownAndThenUpWithBalanced) { + // qp_low:127, qp_high:127 -> kLowQp + test::ScopedFieldTrials field_trials( + kPrefix + "127,127,0,0,0,0" + kEnd + + "WebRTC-Video-BalancedDegradationSettings/" + "pixels:230400|921600,fps:20|30,kbps:300|499/"); + + UpscalingObserver test("VP8", /*streams_active=*/{true}, + kDefaultVgaMinStartBps - 1, + /*automatic_resize=*/true, /*expect_upscale=*/true); + test.SetDegradationPreference(DegradationPreference::BALANCED); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownButNotUpWithBalancedIfBitrateNotEnough) { + // qp_low:127, qp_high:127 -> kLowQp + test::ScopedFieldTrials field_trials( + kPrefix + "127,127,0,0,0,0" + kEnd + + "WebRTC-Video-BalancedDegradationSettings/" + "pixels:230400|921600,fps:20|30,kbps:300|500/"); + + UpscalingObserver test("VP8", /*streams_active=*/{true}, + kDefaultVgaMinStartBps - 1, + /*automatic_resize=*/true, /*expect_upscale=*/false); + test.SetDegradationPreference(DegradationPreference::BALANCED); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrate_Simulcast) { + // qp_low:1, qp_high:127 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); + + DownscalingObserver test("VP8", /*streams_active=*/{true, true}, kLowStartBps, + /*automatic_resize=*/false, + /*expect_downscale=*/false); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownForHighQp_HighestStreamActive_Vp8) { + // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); - // QualityScaler disabled. - const bool kAutomaticResize = true; - const bool kFrameDropping = false; - const bool kExpectAdapt = false; + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, + kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, + AdaptsDownForLowStartBitrate_HighestStreamActive_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP8Encoder::Create(); }); - RunTest(&encoder_factory, "VP8", kHighStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, + kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); } -TEST_F(QualityScalingTest, NoAdaptDownForNormalQp_Vp8) { - // VP8 QP thresholds, low:1, high:127 -> normal QP. +TEST_F(QualityScalingTest, AdaptsDownButNotUpWithMinStartBitrateLimit) { + // qp_low:127, qp_high:127 -> kLowQp + test::ScopedFieldTrials field_trials(kPrefix + "127,127,0,0,0,0" + kEnd); + + UpscalingObserver test("VP8", /*streams_active=*/{false, true}, + kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, + /*automatic_resize=*/true, /*expect_upscale=*/false); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfBitrateEnough_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - // QualityScaler enabled. - const bool kAutomaticResize = true; - const bool kFrameDropping = true; - const bool kExpectAdapt = false; + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, + kSinglecastLimits720pVp8->min_start_bitrate_bps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); +} - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP8Encoder::Create(); }); - RunTest(&encoder_factory, "VP8", kHighStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); +TEST_F(QualityScalingTest, + NoAdaptDownForLowStartBitrateIfDefaultLimitsDisabled_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp + test::ScopedFieldTrials field_trials( + kPrefix + "1,127,0,0,0,0" + kEnd + + "WebRTC-DefaultBitrateLimitsKillSwitch/Enabled/"); + + DownscalingObserver test("VP8", /*streams_active=*/{false, false, true}, + kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); } -TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate) { - // VP8 QP thresholds, low:1, high:127 -> normal QP. +TEST_F(QualityScalingTest, + NoAdaptDownForLowStartBitrate_OneStreamSinglecastLimitsNotUsed_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - // QualityScaler enabled. - const bool kAutomaticResize = true; - const bool kFrameDropping = true; - const bool kExpectAdapt = true; + DownscalingObserver test("VP8", /*streams_active=*/{true}, + kSinglecastLimits720pVp8->min_start_bitrate_bps - 1, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, NoAdaptDownForHighQp_LowestStreamActive_Vp8) { + // qp_low:1, qp_high:1 -> kHighQp + test::ScopedFieldTrials field_trials(kPrefix + "1,1,0,0,0,0" + kEnd); + + DownscalingObserver test("VP8", /*streams_active=*/{true, false, false}, + kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); +} - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP8Encoder::Create(); }); - RunTest(&encoder_factory, "VP8", kLowStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); +TEST_F(QualityScalingTest, + NoAdaptDownForLowStartBitrate_LowestStreamActive_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); + + DownscalingObserver test("VP8", /*streams_active=*/{true, false, false}, + kLowStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); } -TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateWithScalingOff) { - // VP8 QP thresholds, low:1, high:127 -> normal QP. +TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfScalingOff_Vp8) { + // qp_low:1, qp_high:127 -> kNormalQp test::ScopedFieldTrials field_trials(kPrefix + "1,127,0,0,0,0" + kEnd); - // QualityScaler disabled. - const bool kAutomaticResize = false; - const bool kFrameDropping = true; - const bool kExpectAdapt = false; + DownscalingObserver test("VP8", /*streams_active=*/{true}, kLowStartBps, + /*automatic_resize=*/false, + /*expect_downscale=*/false); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownForHighQp_Vp9) { + // qp_low:1, qp_high:1 -> kHighQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP8Encoder::Create(); }); - RunTest(&encoder_factory, "VP8", kLowStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); + DownscalingObserver test("VP9", /*streams_active=*/{true}, kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); } -TEST_F(QualityScalingTest, NoAdaptDownForHighQp_Vp9) { - // VP9 QP thresholds, low:1, high:1 -> high QP. - test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd); +TEST_F(QualityScalingTest, NoAdaptDownForHighQpIfScalingOff_Vp9) { + // qp_low:1, qp_high:1 -> kHighQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Disabled/"); + + DownscalingObserver test("VP9", /*streams_active=*/{true}, kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); +} - // QualityScaler always disabled. - const bool kAutomaticResize = true; - const bool kFrameDropping = true; - const bool kExpectAdapt = false; +TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_Vp9) { + // qp_low:1, qp_high:255 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); - test::FunctionVideoEncoderFactory encoder_factory( - []() { return VP9Encoder::Create(); }); - RunTest(&encoder_factory, "VP9", kHighStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); + DownscalingObserver test("VP9", /*streams_active=*/{true}, kLowStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, NoAdaptDownForHighQp_LowestStreamActive_Vp9) { + // qp_low:1, qp_high:1 -> kHighQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); + + DownscalingObserver test("VP9", /*streams_active=*/{true, false, false}, + kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, + NoAdaptDownForLowStartBitrate_LowestStreamActive_Vp9) { + // qp_low:1, qp_high:255 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); + + DownscalingObserver test("VP9", /*streams_active=*/{true, false, false}, + kLowStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownForHighQp_MiddleStreamActive_Vp9) { + // qp_low:1, qp_high:1 -> kHighQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,1,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); + + DownscalingObserver test("VP9", /*streams_active=*/{false, true, false}, + kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, + AdaptsDownForLowStartBitrate_MiddleStreamActive_Vp9) { + // qp_low:1, qp_high:255 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); + + DownscalingObserver test("VP9", /*streams_active=*/{false, true, false}, + kSinglecastLimits360pVp9->min_start_bitrate_bps - 1, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, NoAdaptDownForLowStartBitrateIfBitrateEnough_Vp9) { + // qp_low:1, qp_high:255 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,1,255,0,0" + kEnd + + "WebRTC-VP9QualityScaler/Enabled/"); + + DownscalingObserver test("VP9", /*streams_active=*/{false, true, false}, + kSinglecastLimits360pVp9->min_start_bitrate_bps, + /*automatic_resize=*/true, + /*expect_downscale=*/false); + RunBaseTest(&test); } #if defined(WEBRTC_USE_H264) TEST_F(QualityScalingTest, AdaptsDownForHighQp_H264) { - // H264 QP thresholds, low:1, high:1 -> high QP. + // qp_low:1, qp_high:1 -> kHighQp test::ScopedFieldTrials field_trials(kPrefix + "0,0,0,0,1,1" + kEnd); - // QualityScaler always enabled. - const bool kAutomaticResize = false; - const bool kFrameDropping = false; - const bool kExpectAdapt = true; + DownscalingObserver test("H264", /*streams_active=*/{true}, kHighStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); +} + +TEST_F(QualityScalingTest, AdaptsDownForLowStartBitrate_H264) { + // qp_low:1, qp_high:51 -> kNormalQp + test::ScopedFieldTrials field_trials(kPrefix + "0,0,0,0,1,51" + kEnd); - test::FunctionVideoEncoderFactory encoder_factory( - []() { return H264Encoder::Create(cricket::VideoCodec("H264")); }); - RunTest(&encoder_factory, "H264", kHighStartBps, kAutomaticResize, - kFrameDropping, kExpectAdapt); + DownscalingObserver test("H264", /*streams_active=*/{true}, kLowStartBps, + /*automatic_resize=*/true, + /*expect_downscale=*/true); + RunBaseTest(&test); } #endif // defined(WEBRTC_USE_H264) diff --git a/video/receive_statistics_proxy.cc b/video/receive_statistics_proxy.cc index 82951c8a50..7aec685a1c 100644 --- a/video/receive_statistics_proxy.cc +++ b/video/receive_statistics_proxy.cc @@ -133,7 +133,7 @@ void ReceiveStatisticsProxy::UpdateHistograms( // earlier. RTC_DCHECK_RUN_ON(&decode_thread_); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); char log_stream_buf[8 * 1024]; rtc::SimpleStringBuilder log_stream(log_stream_buf); @@ -623,7 +623,7 @@ ReceiveStatisticsProxy::GetCurrentEstimatedPlayoutNtpTimestampMs( } VideoReceiveStream::Stats ReceiveStatisticsProxy::GetStats() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Get current frame rates here, as only updating them on new frames prevents // us from ever correctly displaying frame rate of 0. int64_t now_ms = clock_->TimeInMilliseconds(); @@ -654,13 +654,13 @@ VideoReceiveStream::Stats ReceiveStatisticsProxy::GetStats() const { } void ReceiveStatisticsProxy::OnIncomingPayloadType(int payload_type) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); stats_.current_payload_type = payload_type; } void ReceiveStatisticsProxy::OnDecoderImplementationName( const char* implementation_name) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); stats_.decoder_implementation_name = implementation_name; } @@ -671,7 +671,7 @@ void ReceiveStatisticsProxy::OnFrameBufferTimingsUpdated( int jitter_buffer_ms, int min_playout_delay_ms, int render_delay_ms) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); stats_.max_decode_ms = max_decode_ms; stats_.current_delay_ms = current_delay_ms; stats_.target_delay_ms = target_delay_ms; @@ -687,13 +687,13 @@ void ReceiveStatisticsProxy::OnFrameBufferTimingsUpdated( } void ReceiveStatisticsProxy::OnUniqueFramesCounted(int num_unique_frames) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); num_unique_frames_.emplace(num_unique_frames); } void ReceiveStatisticsProxy::OnTimingFrameInfoUpdated( const TimingFrameInfo& info) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (info.flags != VideoSendTiming::kInvalid) { int64_t now_ms = clock_->TimeInMilliseconds(); timing_frame_info_counter_.Add(info, now_ms); @@ -714,14 +714,14 @@ void ReceiveStatisticsProxy::OnTimingFrameInfoUpdated( void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated( uint32_t ssrc, const RtcpPacketTypeCounter& packet_counter) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (stats_.ssrc != ssrc) return; stats_.rtcp_packet_type_counts = packet_counter; } void ReceiveStatisticsProxy::OnCname(uint32_t ssrc, absl::string_view cname) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // TODO(pbos): Handle both local and remote ssrcs here and RTC_DCHECK that we // receive stats from one of them. if (stats_.ssrc != ssrc) @@ -733,7 +733,7 @@ void ReceiveStatisticsProxy::OnDecodedFrame(const VideoFrame& frame, absl::optional qp, int32_t decode_time_ms, VideoContentType content_type) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); uint64_t now_ms = clock_->TimeInMilliseconds(); @@ -799,7 +799,7 @@ void ReceiveStatisticsProxy::OnRenderedFrame(const VideoFrame& frame) { RTC_DCHECK_GT(width, 0); RTC_DCHECK_GT(height, 0); int64_t now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); video_quality_observer_->OnRenderedFrame(frame, now_ms); @@ -833,7 +833,7 @@ void ReceiveStatisticsProxy::OnRenderedFrame(const VideoFrame& frame) { void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t video_playout_ntp_ms, int64_t sync_offset_ms, double estimated_freq_khz) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); sync_offset_counter_.Add(std::abs(sync_offset_ms)); stats_.sync_offset_ms = sync_offset_ms; last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms; @@ -851,7 +851,7 @@ void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t video_playout_ntp_ms, void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe, size_t size_bytes, VideoContentType content_type) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (is_keyframe) { ++stats_.frame_counts.key_frames; } else { @@ -881,13 +881,13 @@ void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe, } void ReceiveStatisticsProxy::OnDroppedFrames(uint32_t frames_dropped) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); stats_.frames_dropped += frames_dropped; } void ReceiveStatisticsProxy::OnPreDecode(VideoCodecType codec_type, int qp) { RTC_DCHECK_RUN_ON(&decode_thread_); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); last_codec_type_ = codec_type; if (last_codec_type_ == kVideoCodecVP8 && qp != -1) { qp_counters_.vp8.Add(qp); @@ -898,7 +898,7 @@ void ReceiveStatisticsProxy::OnPreDecode(VideoCodecType codec_type, int qp) { void ReceiveStatisticsProxy::OnStreamInactive() { // TODO(sprang): Figure out any other state that should be reset. - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Don't report inter-frame delay if stream was paused. last_decoded_frame_time_ms_.reset(); video_quality_observer_->OnStreamInactive(); @@ -906,7 +906,7 @@ void ReceiveStatisticsProxy::OnStreamInactive() { void ReceiveStatisticsProxy::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); avg_rtt_ms_ = avg_rtt_ms; } diff --git a/video/receive_statistics_proxy.h b/video/receive_statistics_proxy.h index 02043d6944..57738f29cf 100644 --- a/video/receive_statistics_proxy.h +++ b/video/receive_statistics_proxy.h @@ -17,17 +17,17 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "call/video_receive_stream.h" #include "modules/include/module_common_types.h" #include "modules/video_coding/include/video_coding_defines.h" -#include "rtc_base/critical_section.h" #include "rtc_base/numerics/histogram_percentile_counter.h" #include "rtc_base/numerics/moving_max_counter.h" #include "rtc_base/numerics/sample_counter.h" #include "rtc_base/rate_statistics.h" #include "rtc_base/rate_tracker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "video/quality_threshold.h" #include "video/stats_counter.h" #include "video/video_quality_observer.h" @@ -124,19 +124,19 @@ class ReceiveStatisticsProxy : public VCMReceiveStatisticsCallback, rtc::HistogramPercentileCounter interframe_delay_percentiles; }; - void QualitySample() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void QualitySample() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Removes info about old frames and then updates the framerate. void UpdateFramerate(int64_t now_ms) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void UpdateDecodeTimeHistograms(int width, int height, int decode_time_ms) const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); absl::optional GetCurrentEstimatedPlayoutNtpTimestampMs( - int64_t now_ms) const RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + int64_t now_ms) const RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* const clock_; // Ownership of this object lies with the owner of the ReceiveStatisticsProxy @@ -150,55 +150,55 @@ class ReceiveStatisticsProxy : public VCMReceiveStatisticsCallback, const int64_t start_ms_; const bool enable_decode_time_histograms_; - rtc::CriticalSection crit_; - int64_t last_sample_time_ RTC_GUARDED_BY(crit_); - QualityThreshold fps_threshold_ RTC_GUARDED_BY(crit_); - QualityThreshold qp_threshold_ RTC_GUARDED_BY(crit_); - QualityThreshold variance_threshold_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter qp_sample_ RTC_GUARDED_BY(crit_); - int num_bad_states_ RTC_GUARDED_BY(crit_); - int num_certain_states_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + int64_t last_sample_time_ RTC_GUARDED_BY(mutex_); + QualityThreshold fps_threshold_ RTC_GUARDED_BY(mutex_); + QualityThreshold qp_threshold_ RTC_GUARDED_BY(mutex_); + QualityThreshold variance_threshold_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter qp_sample_ RTC_GUARDED_BY(mutex_); + int num_bad_states_ RTC_GUARDED_BY(mutex_); + int num_certain_states_ RTC_GUARDED_BY(mutex_); // Note: The |stats_.rtp_stats| member is not used or populated by this class. - mutable VideoReceiveStream::Stats stats_ RTC_GUARDED_BY(crit_); - RateStatistics decode_fps_estimator_ RTC_GUARDED_BY(crit_); - RateStatistics renders_fps_estimator_ RTC_GUARDED_BY(crit_); - rtc::RateTracker render_fps_tracker_ RTC_GUARDED_BY(crit_); - rtc::RateTracker render_pixel_tracker_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter sync_offset_counter_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter decode_time_counter_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter jitter_buffer_delay_counter_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter target_delay_counter_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter current_delay_counter_ RTC_GUARDED_BY(crit_); - rtc::SampleCounter delay_counter_ RTC_GUARDED_BY(crit_); + mutable VideoReceiveStream::Stats stats_ RTC_GUARDED_BY(mutex_); + RateStatistics decode_fps_estimator_ RTC_GUARDED_BY(mutex_); + RateStatistics renders_fps_estimator_ RTC_GUARDED_BY(mutex_); + rtc::RateTracker render_fps_tracker_ RTC_GUARDED_BY(mutex_); + rtc::RateTracker render_pixel_tracker_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter sync_offset_counter_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter decode_time_counter_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter jitter_buffer_delay_counter_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter target_delay_counter_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter current_delay_counter_ RTC_GUARDED_BY(mutex_); + rtc::SampleCounter delay_counter_ RTC_GUARDED_BY(mutex_); std::unique_ptr video_quality_observer_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); mutable rtc::MovingMaxCounter interframe_delay_max_moving_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); std::map content_specific_stats_ - RTC_GUARDED_BY(crit_); - MaxCounter freq_offset_counter_ RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); + MaxCounter freq_offset_counter_ RTC_GUARDED_BY(mutex_); QpCounters qp_counters_ RTC_GUARDED_BY(decode_thread_); - int64_t avg_rtt_ms_ RTC_GUARDED_BY(crit_); - mutable std::map frame_window_ RTC_GUARDED_BY(&crit_); - VideoContentType last_content_type_ RTC_GUARDED_BY(&crit_); - VideoCodecType last_codec_type_ RTC_GUARDED_BY(&crit_); - absl::optional first_frame_received_time_ms_ RTC_GUARDED_BY(&crit_); - absl::optional first_decoded_frame_time_ms_ RTC_GUARDED_BY(&crit_); - absl::optional last_decoded_frame_time_ms_ RTC_GUARDED_BY(&crit_); - size_t num_delayed_frames_rendered_ RTC_GUARDED_BY(&crit_); - int64_t sum_missed_render_deadline_ms_ RTC_GUARDED_BY(&crit_); + int64_t avg_rtt_ms_ RTC_GUARDED_BY(mutex_); + mutable std::map frame_window_ RTC_GUARDED_BY(&mutex_); + VideoContentType last_content_type_ RTC_GUARDED_BY(&mutex_); + VideoCodecType last_codec_type_ RTC_GUARDED_BY(&mutex_); + absl::optional first_frame_received_time_ms_ RTC_GUARDED_BY(&mutex_); + absl::optional first_decoded_frame_time_ms_ RTC_GUARDED_BY(&mutex_); + absl::optional last_decoded_frame_time_ms_ RTC_GUARDED_BY(&mutex_); + size_t num_delayed_frames_rendered_ RTC_GUARDED_BY(&mutex_); + int64_t sum_missed_render_deadline_ms_ RTC_GUARDED_BY(&mutex_); // Mutable because calling Max() on MovingMaxCounter is not const. Yet it is // called from const GetStats(). mutable rtc::MovingMaxCounter timing_frame_info_counter_ - RTC_GUARDED_BY(&crit_); - absl::optional num_unique_frames_ RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(&mutex_); + absl::optional num_unique_frames_ RTC_GUARDED_BY(mutex_); absl::optional last_estimated_playout_ntp_timestamp_ms_ - RTC_GUARDED_BY(&crit_); + RTC_GUARDED_BY(&mutex_); absl::optional last_estimated_playout_time_ms_ - RTC_GUARDED_BY(&crit_); - rtc::ThreadChecker decode_thread_; - rtc::ThreadChecker network_thread_; - rtc::ThreadChecker main_thread_; + RTC_GUARDED_BY(&mutex_); + SequenceChecker decode_thread_; + SequenceChecker network_thread_; + SequenceChecker main_thread_; }; } // namespace webrtc diff --git a/video/receive_statistics_proxy2.cc b/video/receive_statistics_proxy2.cc index 15d08c4d26..af3cd221e7 100644 --- a/video/receive_statistics_proxy2.cc +++ b/video/receive_statistics_proxy2.cc @@ -782,10 +782,10 @@ void ReceiveStatisticsProxy::RtcpPacketTypesCounterUpdated( return; if (!IsCurrentTaskQueueOrThread(worker_thread_)) { - // RtpRtcp::Configuration has a single RtcpPacketTypeCounterObserver and - // that same configuration may be used for both receiver and sender - // (see ModuleRtpRtcpImpl::ModuleRtpRtcpImpl). - // The RTCPSender implementation currently makes calls to this function on a + // RtpRtcpInterface::Configuration has a single + // RtcpPacketTypeCounterObserver and that same configuration may be used for + // both receiver and sender (see ModuleRtpRtcpImpl::ModuleRtpRtcpImpl). The + // RTCPSender implementation currently makes calls to this function on a // process thread whereas the RTCPReceiver implementation calls back on the // [main] worker thread. // So until the sender implementation has been updated, we work around this @@ -946,26 +946,21 @@ void ReceiveStatisticsProxy::OnRenderedFrame( void ReceiveStatisticsProxy::OnSyncOffsetUpdated(int64_t video_playout_ntp_ms, int64_t sync_offset_ms, double estimated_freq_khz) { - RTC_DCHECK_RUN_ON(&incoming_render_queue_); - int64_t now_ms = clock_->TimeInMilliseconds(); - worker_thread_->PostTask( - ToQueuedTask(task_safety_, [video_playout_ntp_ms, sync_offset_ms, - estimated_freq_khz, now_ms, this]() { - RTC_DCHECK_RUN_ON(&main_thread_); - sync_offset_counter_.Add(std::abs(sync_offset_ms)); - stats_.sync_offset_ms = sync_offset_ms; - last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms; - last_estimated_playout_time_ms_ = now_ms; - - const double kMaxFreqKhz = 10000.0; - int offset_khz = kMaxFreqKhz; - // Should not be zero or negative. If so, report max. - if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0) - offset_khz = - static_cast(std::fabs(estimated_freq_khz - 90.0) + 0.5); - - freq_offset_counter_.Add(offset_khz); - })); + RTC_DCHECK_RUN_ON(&main_thread_); + + const int64_t now_ms = clock_->TimeInMilliseconds(); + sync_offset_counter_.Add(std::abs(sync_offset_ms)); + stats_.sync_offset_ms = sync_offset_ms; + last_estimated_playout_ntp_timestamp_ms_ = video_playout_ntp_ms; + last_estimated_playout_time_ms_ = now_ms; + + const double kMaxFreqKhz = 10000.0; + int offset_khz = kMaxFreqKhz; + // Should not be zero or negative. If so, report max. + if (estimated_freq_khz < kMaxFreqKhz && estimated_freq_khz > 0.0) + offset_khz = static_cast(std::fabs(estimated_freq_khz - 90.0) + 0.5); + + freq_offset_counter_.Add(offset_khz); } void ReceiveStatisticsProxy::OnCompleteFrame(bool is_keyframe, diff --git a/video/receive_statistics_proxy2.h b/video/receive_statistics_proxy2.h index 1357c407ad..7797d93217 100644 --- a/video/receive_statistics_proxy2.h +++ b/video/receive_statistics_proxy2.h @@ -17,6 +17,7 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_base.h" #include "api/units/timestamp.h" #include "call/video_receive_stream.h" @@ -27,10 +28,9 @@ #include "rtc_base/numerics/sample_counter.h" #include "rtc_base/rate_statistics.h" #include "rtc_base/rate_tracker.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "video/quality_threshold.h" #include "video/stats_counter.h" #include "video/video_quality_observer2.h" @@ -213,9 +213,9 @@ class ReceiveStatisticsProxy : public VCMReceiveStatisticsCallback, ScopedTaskSafety task_safety_; - SequenceChecker decode_queue_; - rtc::ThreadChecker main_thread_; - SequenceChecker incoming_render_queue_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker decode_queue_; + SequenceChecker main_thread_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker incoming_render_queue_; }; } // namespace internal diff --git a/video/report_block_stats.cc b/video/report_block_stats.cc index e3e95f9aed..bf60364682 100644 --- a/video/report_block_stats.cc +++ b/video/report_block_stats.cc @@ -31,16 +31,13 @@ ReportBlockStats::ReportBlockStats() ReportBlockStats::~ReportBlockStats() {} -void ReportBlockStats::Store(uint32_t ssrc, const RtcpStatistics& rtcp_stats) { +void ReportBlockStats::Store(uint32_t ssrc, + int packets_lost, + uint32_t extended_highest_sequence_number) { Report report; - report.packets_lost = rtcp_stats.packets_lost; - report.extended_highest_sequence_number = - rtcp_stats.extended_highest_sequence_number; - StoreAndAddPacketIncrement(ssrc, report); -} + report.packets_lost = packets_lost; + report.extended_highest_sequence_number = extended_highest_sequence_number; -void ReportBlockStats::StoreAndAddPacketIncrement(uint32_t ssrc, - const Report& report) { // Get diff with previous report block. const auto prev_report = prev_reports_.find(ssrc); if (prev_report != prev_reports_.end()) { diff --git a/video/report_block_stats.h b/video/report_block_stats.h index de4a079032..1d1140295c 100644 --- a/video/report_block_stats.h +++ b/video/report_block_stats.h @@ -15,8 +15,6 @@ #include -#include "modules/rtp_rtcp/include/rtcp_statistics.h" - namespace webrtc { // TODO(nisse): Usefulness of this class is somewhat unclear. The inputs are @@ -32,7 +30,9 @@ class ReportBlockStats { ~ReportBlockStats(); // Updates stats and stores report block. - void Store(uint32_t ssrc, const RtcpStatistics& rtcp_stats); + void Store(uint32_t ssrc, + int packets_lost, + uint32_t extended_highest_sequence_number); // Returns the total fraction of lost packets (or -1 if less than two report // blocks have been stored). @@ -45,10 +45,6 @@ class ReportBlockStats { int32_t packets_lost; }; - // Updates the total number of packets/lost packets. - // Stores the report. - void StoreAndAddPacketIncrement(uint32_t ssrc, const Report& report); - // The total number of packets/lost packets. uint32_t num_sequence_numbers_; uint32_t num_lost_sequence_numbers_; diff --git a/video/report_block_stats_unittest.cc b/video/report_block_stats_unittest.cc index 0b0230941f..bd66e571a0 100644 --- a/video/report_block_stats_unittest.cc +++ b/video/report_block_stats_unittest.cc @@ -13,65 +13,51 @@ #include "test/gtest.h" namespace webrtc { +namespace { -class ReportBlockStatsTest : public ::testing::Test { - protected: - ReportBlockStatsTest() { - // kSsrc1: report 1-3. - stats1_1_.packets_lost = 10; - stats1_1_.extended_highest_sequence_number = 24000; - stats1_2_.packets_lost = 15; - stats1_2_.extended_highest_sequence_number = 24100; - stats1_3_.packets_lost = 50; - stats1_3_.extended_highest_sequence_number = 24200; - // kSsrc2: report 1,2. - stats2_1_.packets_lost = 111; - stats2_1_.extended_highest_sequence_number = 8500; - stats2_2_.packets_lost = 136; - stats2_2_.extended_highest_sequence_number = 8800; - } +constexpr uint32_t kSsrc1 = 123; +constexpr uint32_t kSsrc2 = 234; - const uint32_t kSsrc1 = 123; - const uint32_t kSsrc2 = 234; - RtcpStatistics stats1_1_; - RtcpStatistics stats1_2_; - RtcpStatistics stats1_3_; - RtcpStatistics stats2_1_; - RtcpStatistics stats2_2_; -}; - -TEST_F(ReportBlockStatsTest, StoreAndGetFractionLost) { +TEST(ReportBlockStatsTest, StoreAndGetFractionLost) { ReportBlockStats stats; EXPECT_EQ(-1, stats.FractionLostInPercent()); // First report. - stats.Store(kSsrc1, stats1_1_); + stats.Store(kSsrc1, /*packets_lost=*/10, + /*extended_highest_sequence_number=*/24'000); EXPECT_EQ(-1, stats.FractionLostInPercent()); // fl: 100 * (15-10) / (24100-24000) = 5% - stats.Store(kSsrc1, stats1_2_); + stats.Store(kSsrc1, /*packets_lost=*/15, + /*extended_highest_sequence_number=*/24'100); EXPECT_EQ(5, stats.FractionLostInPercent()); // fl: 100 * (50-10) / (24200-24000) = 20% - stats.Store(kSsrc1, stats1_3_); + stats.Store(kSsrc1, /*packets_lost=*/50, + /*extended_highest_sequence_number=*/24'200); EXPECT_EQ(20, stats.FractionLostInPercent()); } -TEST_F(ReportBlockStatsTest, StoreAndGetFractionLost_TwoSsrcs) { +TEST(ReportBlockStatsTest, StoreAndGetFractionLost_TwoSsrcs) { ReportBlockStats stats; EXPECT_EQ(-1, stats.FractionLostInPercent()); // First report. - stats.Store(kSsrc1, stats1_1_); + stats.Store(kSsrc1, /*packets_lost=*/10, + /*extended_highest_sequence_number=*/24'000); EXPECT_EQ(-1, stats.FractionLostInPercent()); // fl: 100 * (15-10) / (24100-24000) = 5% - stats.Store(kSsrc1, stats1_2_); + stats.Store(kSsrc1, /*packets_lost=*/15, + /*extended_highest_sequence_number=*/24'100); EXPECT_EQ(5, stats.FractionLostInPercent()); // First report, kSsrc2. - stats.Store(kSsrc2, stats2_1_); + stats.Store(kSsrc2, /*packets_lost=*/111, + /*extended_highest_sequence_number=*/8'500); EXPECT_EQ(5, stats.FractionLostInPercent()); // fl: 100 * ((15-10) + (136-111)) / ((24100-24000) + (8800-8500)) = 7% - stats.Store(kSsrc2, stats2_2_); + stats.Store(kSsrc2, /*packets_lost=*/136, + /*extended_highest_sequence_number=*/8'800); EXPECT_EQ(7, stats.FractionLostInPercent()); } +} // namespace } // namespace webrtc diff --git a/video/rtp_streams_synchronizer.cc b/video/rtp_streams_synchronizer.cc index 3dedc43eaa..29ace90431 100644 --- a/video/rtp_streams_synchronizer.cc +++ b/video/rtp_streams_synchronizer.cc @@ -51,7 +51,7 @@ RtpStreamsSynchronizer::RtpStreamsSynchronizer(Syncable* syncable_video) RtpStreamsSynchronizer::~RtpStreamsSynchronizer() = default; void RtpStreamsSynchronizer::ConfigureSync(Syncable* syncable_audio) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (syncable_audio == syncable_audio_) { // This prevents expensive no-ops. return; @@ -76,7 +76,7 @@ void RtpStreamsSynchronizer::Process() { RTC_DCHECK_RUN_ON(&process_thread_checker_); last_sync_time_ = rtc::TimeNanos(); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (!syncable_audio_) { return; } @@ -89,11 +89,18 @@ void RtpStreamsSynchronizer::Process() { log_stats = true; } + int64_t last_audio_receive_time_ms = + audio_measurement_.latest_receive_time_ms; absl::optional audio_info = syncable_audio_->GetInfo(); if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) { return; } + if (last_audio_receive_time_ms == audio_measurement_.latest_receive_time_ms) { + // No new audio packet has been received since last update. + return; + } + int64_t last_video_receive_ms = video_measurement_.latest_receive_time_ms; absl::optional video_info = syncable_video_->GetInfo(); if (!video_info || !UpdateMeasurements(&video_measurement_, *video_info)) { @@ -157,7 +164,7 @@ bool RtpStreamsSynchronizer::GetStreamSyncOffsetInMs( int64_t* video_playout_ntp_ms, int64_t* stream_offset_ms, double* estimated_freq_khz) const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (!syncable_audio_) { return false; } diff --git a/video/rtp_streams_synchronizer.h b/video/rtp_streams_synchronizer.h index 6abf5bbe0e..574ccba70b 100644 --- a/video/rtp_streams_synchronizer.h +++ b/video/rtp_streams_synchronizer.h @@ -16,9 +16,9 @@ #include +#include "api/sequence_checker.h" #include "modules/include/module.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "video/stream_synchronization.h" namespace webrtc { @@ -51,13 +51,13 @@ class RtpStreamsSynchronizer : public Module { private: Syncable* syncable_video_; - rtc::CriticalSection crit_; - Syncable* syncable_audio_ RTC_GUARDED_BY(crit_); - std::unique_ptr sync_ RTC_GUARDED_BY(crit_); - StreamSynchronization::Measurements audio_measurement_ RTC_GUARDED_BY(crit_); - StreamSynchronization::Measurements video_measurement_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + Syncable* syncable_audio_ RTC_GUARDED_BY(mutex_); + std::unique_ptr sync_ RTC_GUARDED_BY(mutex_); + StreamSynchronization::Measurements audio_measurement_ RTC_GUARDED_BY(mutex_); + StreamSynchronization::Measurements video_measurement_ RTC_GUARDED_BY(mutex_); - rtc::ThreadChecker process_thread_checker_; + SequenceChecker process_thread_checker_; int64_t last_sync_time_ RTC_GUARDED_BY(&process_thread_checker_); int64_t last_stats_log_ms_ RTC_GUARDED_BY(&process_thread_checker_); }; diff --git a/video/rtp_streams_synchronizer2.cc b/video/rtp_streams_synchronizer2.cc index 49be355a38..4096fceb99 100644 --- a/video/rtp_streams_synchronizer2.cc +++ b/video/rtp_streams_synchronizer2.cc @@ -92,11 +92,18 @@ void RtpStreamsSynchronizer::UpdateDelay() { log_stats = true; } + int64_t last_audio_receive_time_ms = + audio_measurement_.latest_receive_time_ms; absl::optional audio_info = syncable_audio_->GetInfo(); if (!audio_info || !UpdateMeasurements(&audio_measurement_, *audio_info)) { return; } + if (last_audio_receive_time_ms == audio_measurement_.latest_receive_time_ms) { + // No new audio packet has been received since last update. + return; + } + int64_t last_video_receive_ms = video_measurement_.latest_receive_time_ms; absl::optional video_info = syncable_video_->GetInfo(); if (!video_info || !UpdateMeasurements(&video_measurement_, *video_info)) { @@ -147,8 +154,12 @@ void RtpStreamsSynchronizer::UpdateDelay() { << "target_delay_ms: " << target_video_delay_ms << "} "; } - syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms); - syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms); + if (!syncable_audio_->SetMinimumPlayoutDelay(target_audio_delay_ms)) { + sync_->ReduceAudioDelay(); + } + if (!syncable_video_->SetMinimumPlayoutDelay(target_video_delay_ms)) { + sync_->ReduceVideoDelay(); + } } // TODO(https://bugs.webrtc.org/7065): Move RtpToNtpEstimator out of diff --git a/video/rtp_streams_synchronizer2.h b/video/rtp_streams_synchronizer2.h index 6a522e801d..192378aba7 100644 --- a/video/rtp_streams_synchronizer2.h +++ b/video/rtp_streams_synchronizer2.h @@ -13,7 +13,8 @@ #include -#include "rtc_base/synchronization/sequence_checker.h" +#include "api/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" #include "rtc_base/task_utils/repeating_task.h" #include "video/stream_synchronization.h" @@ -54,7 +55,7 @@ class RtpStreamsSynchronizer { // we might be running on an rtc::Thread implementation of TaskQueue, which // does not consistently set itself as the active TaskQueue. // Instead, we rely on a SequenceChecker for now. - SequenceChecker main_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker main_checker_; Syncable* const syncable_video_; diff --git a/video/rtp_video_stream_receiver.cc b/video/rtp_video_stream_receiver.cc index 53fd637119..a0520cd350 100644 --- a/video/rtp_video_stream_receiver.cc +++ b/video/rtp_video_stream_receiver.cc @@ -25,7 +25,6 @@ #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/include/rtp_cvo.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/ulpfec_receiver.h" #include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" @@ -35,13 +34,14 @@ #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h" #include "modules/utility/include/process_thread.h" +#include "modules/video_coding/deprecated/nack_module.h" #include "modules/video_coding/frame_object.h" #include "modules/video_coding/h264_sprop_parameter_sets.h" #include "modules/video_coding/h264_sps_pps_tracker.h" -#include "modules/video_coding/nack_module.h" #include "modules/video_coding/packet_buffer.h" #include "rtc_base/checks.h" #include "rtc_base/location.h" @@ -84,8 +84,9 @@ std::unique_ptr CreateRtpRtcpModule( RtcpRttStats* rtt_stats, RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer, RtcpCnameCallback* rtcp_cname_callback, + bool non_sender_rtt_measurement, uint32_t local_ssrc) { - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.clock = clock; configuration.audio = false; configuration.receiver_only = true; @@ -96,8 +97,9 @@ std::unique_ptr CreateRtpRtcpModule( rtcp_packet_type_counter_observer; configuration.rtcp_cname_callback = rtcp_cname_callback; configuration.local_media_ssrc = local_ssrc; + configuration.non_sender_rtt_measurement = non_sender_rtt_measurement; - std::unique_ptr rtp_rtcp = RtpRtcp::Create(configuration); + std::unique_ptr rtp_rtcp = RtpRtcp::DEPRECATED_Create(configuration); rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound); return rtp_rtcp; @@ -121,7 +123,7 @@ RtpVideoStreamReceiver::RtcpFeedbackBuffer::RtcpFeedbackBuffer( } void RtpVideoStreamReceiver::RtcpFeedbackBuffer::RequestKeyFrame() { - rtc::CritScope lock(&cs_); + MutexLock lock(&mutex_); request_key_frame_ = true; } @@ -129,7 +131,7 @@ void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendNack( const std::vector& sequence_numbers, bool buffering_allowed) { RTC_DCHECK(!sequence_numbers.empty()); - rtc::CritScope lock(&cs_); + MutexLock lock(&mutex_); nack_sequence_numbers_.insert(nack_sequence_numbers_.end(), sequence_numbers.cbegin(), sequence_numbers.cend()); @@ -146,7 +148,7 @@ void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendLossNotification( bool decodability_flag, bool buffering_allowed) { RTC_DCHECK(buffering_allowed); - rtc::CritScope lock(&cs_); + MutexLock lock(&mutex_); RTC_DCHECK(!lntf_state_) << "SendLossNotification() called twice in a row with no call to " "SendBufferedRtcpFeedback() in between."; @@ -160,7 +162,7 @@ void RtpVideoStreamReceiver::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() { RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumedRtcpFeedback RtpVideoStreamReceiver::RtcpFeedbackBuffer::ConsumeRtcpFeedback() { - rtc::CritScope lock(&cs_); + MutexLock lock(&mutex_); return ConsumeRtcpFeedbackLocked(); } @@ -208,7 +210,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( ProcessThread* process_thread, NackSender* nack_sender, KeyFrameRequestSender* keyframe_request_sender, - video_coding::OnCompleteFrameCallback* complete_frame_callback, + OnCompleteFrameCallback* complete_frame_callback, rtc::scoped_refptr frame_decryptor, rtc::scoped_refptr frame_transformer) : RtpVideoStreamReceiver(clock, @@ -238,7 +240,7 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( ProcessThread* process_thread, NackSender* nack_sender, KeyFrameRequestSender* keyframe_request_sender, - video_coding::OnCompleteFrameCallback* complete_frame_callback, + OnCompleteFrameCallback* complete_frame_callback, rtc::scoped_refptr frame_decryptor, rtc::scoped_refptr frame_transformer) : clock_(clock), @@ -255,22 +257,25 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( config->rtp.extensions)), receiving_(false), last_packet_log_ms_(-1), - rtp_rtcp_(CreateRtpRtcpModule(clock, - rtp_receive_statistics_, - transport, - rtt_stats, - rtcp_packet_type_counter_observer, - rtcp_cname_callback, - config_.rtp.local_ssrc)), + rtp_rtcp_(CreateRtpRtcpModule( + clock, + rtp_receive_statistics_, + transport, + rtt_stats, + rtcp_packet_type_counter_observer, + rtcp_cname_callback, + config_.rtp.rtcp_xr.receiver_reference_time_report, + config_.rtp.local_ssrc)), complete_frame_callback_(complete_frame_callback), keyframe_request_sender_(keyframe_request_sender), // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate // directly with |rtp_rtcp_|. rtcp_feedback_buffer_(this, nack_sender, this), - packet_buffer_(clock_, kPacketBufferStartSize, PacketBufferMaxSize()), + packet_buffer_(kPacketBufferStartSize, PacketBufferMaxSize()), + reference_finder_(std::make_unique()), has_received_frame_(false), frames_decryptable_(false), - absolute_capture_time_receiver_(clock) { + absolute_capture_time_interpolator_(clock) { constexpr bool remb_candidate = true; if (packet_router_) packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate); @@ -299,9 +304,6 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( rtp_receive_statistics_->SetMaxReorderingThreshold( config_.rtp.rtx_ssrc, max_reordering_threshold); } - if (config_.rtp.rtcp_xr.receiver_reference_time_report) - rtp_rtcp_->SetRtcpXrRrtrStatus(true); - ParseFieldTrial( {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_}, field_trial::FindFullName("WebRTC-ForcePlayoutDelay")); @@ -315,14 +317,11 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( } if (config_.rtp.nack.rtp_history_ms != 0) { - nack_module_ = std::make_unique(clock_, &rtcp_feedback_buffer_, - &rtcp_feedback_buffer_); + nack_module_ = std::make_unique( + clock_, &rtcp_feedback_buffer_, &rtcp_feedback_buffer_); process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE); } - reference_finder_ = - std::make_unique(this); - // Only construct the encrypted receiver if frame encryption is enabled. if (config_.crypto_options.sframe.require_frame_encryption) { buffered_frame_decryptor_ = @@ -333,10 +332,10 @@ RtpVideoStreamReceiver::RtpVideoStreamReceiver( } if (frame_transformer) { - frame_transformer_delegate_ = new rtc::RefCountedObject< - RtpVideoStreamReceiverFrameTransformerDelegate>( - this, std::move(frame_transformer), rtc::Thread::Current(), - config_.rtp.remote_ssrc); + frame_transformer_delegate_ = + rtc::make_ref_counted( + this, std::move(frame_transformer), rtc::Thread::Current(), + config_.rtp.remote_ssrc); frame_transformer_delegate_->Init(); } } @@ -358,30 +357,38 @@ RtpVideoStreamReceiver::~RtpVideoStreamReceiver() { } void RtpVideoStreamReceiver::AddReceiveCodec( + uint8_t payload_type, const VideoCodec& video_codec, const std::map& codec_params, bool raw_payload) { + if (codec_params.count(cricket::kH264FmtpSpsPpsIdrInKeyframe) || + field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) { + MutexLock lock(&packet_buffer_lock_); + packet_buffer_.ForceSpsPpsIdrIsH264Keyframe(); + } payload_type_map_.emplace( - video_codec.plType, - raw_payload ? std::make_unique() - : CreateVideoRtpDepacketizer(video_codec.codecType)); - pt_codec_params_.emplace(video_codec.plType, codec_params); + payload_type, raw_payload + ? std::make_unique() + : CreateVideoRtpDepacketizer(video_codec.codecType)); + pt_codec_params_.emplace(payload_type, codec_params); } absl::optional RtpVideoStreamReceiver::GetSyncInfo() const { Syncable::Info info; if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs, - &info.capture_time_ntp_frac, nullptr, nullptr, + &info.capture_time_ntp_frac, + /*rtcp_arrival_time_secs=*/nullptr, + /*rtcp_arrival_time_frac=*/nullptr, &info.capture_time_source_clock) != 0) { return absl::nullopt; } { - rtc::CritScope lock(&sync_info_lock_); - if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) { + MutexLock lock(&sync_info_lock_); + if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_) { return absl::nullopt; } info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; - info.latest_receive_time_ms = *last_received_rtp_system_time_ms_; + info.latest_receive_time_ms = last_received_rtp_system_time_->ms(); } // Leaves info.current_delay_ms uninitialized. @@ -498,26 +505,15 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData( const RtpPacketReceived& rtp_packet, const RTPVideoHeader& video) { RTC_DCHECK_RUN_ON(&worker_task_checker_); - auto packet = std::make_unique( - rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()), - clock_->TimeInMilliseconds()); - - // Try to extrapolate absolute capture time if it is missing. - packet->packet_info.set_absolute_capture_time( - absolute_capture_time_receiver_.OnReceivePacket( - AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(), - packet->packet_info.csrcs()), - packet->packet_info.rtp_timestamp(), - // Assume frequency is the same one for all video frames. - kVideoPayloadTypeFrequency, - packet->packet_info.absolute_capture_time())); + + auto packet = + std::make_unique(rtp_packet, video); RTPVideoHeader& video_header = packet->video_header; video_header.rotation = kVideoRotation_0; video_header.content_type = VideoContentType::UNSPECIFIED; video_header.video_timing.flags = VideoSendTiming::kInvalid; video_header.is_last_packet_in_frame |= rtp_packet.Marker(); - video_header.frame_marking.temporal_id = kNoTemporalIdx; if (const auto* vp9_header = absl::get_if(&video_header.video_type_header)) { @@ -535,10 +531,15 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData( } else { rtp_packet.GetExtension(&video_header.playout_delay); } - rtp_packet.GetExtension(&video_header.frame_marking); ParseGenericDependenciesResult generic_descriptor_state = ParseGenericDependenciesExtension(rtp_packet, &video_header); + + if (!rtp_packet.recovered()) { + UpdatePacketReceiveTimestamps( + rtp_packet, video_header.frame_type == VideoFrameType::kVideoFrameKey); + } + if (generic_descriptor_state == kDropPacket) return; @@ -557,6 +558,8 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData( video_header.color_space = last_color_space_; } } + video_header.video_frame_tracking_id = + rtp_packet.GetExtension(); if (loss_notification_controller_) { if (rtp_packet.recovered()) { @@ -632,7 +635,35 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData( rtcp_feedback_buffer_.SendBufferedRtcpFeedback(); frame_counter_.Add(packet->timestamp); - OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet))); + video_coding::PacketBuffer::InsertResult insert_result; + { + MutexLock lock(&packet_buffer_lock_); + int64_t unwrapped_rtp_seq_num = + rtp_seq_num_unwrapper_.Unwrap(rtp_packet.SequenceNumber()); + auto& packet_info = + packet_infos_ + .emplace( + unwrapped_rtp_seq_num, + RtpPacketInfo( + rtp_packet.Ssrc(), rtp_packet.Csrcs(), + rtp_packet.Timestamp(), + /*audio_level=*/absl::nullopt, + rtp_packet.GetExtension(), + /*receive_time_ms=*/clock_->TimeInMilliseconds())) + .first->second; + + // Try to extrapolate absolute capture time if it is missing. + packet_info.set_absolute_capture_time( + absolute_capture_time_interpolator_.OnReceivePacket( + AbsoluteCaptureTimeInterpolator::GetSource(packet_info.ssrc(), + packet_info.csrcs()), + packet_info.rtp_timestamp(), + // Assume frequency is the same one for all video frames. + kVideoPayloadTypeFrequency, packet_info.absolute_capture_time())); + + insert_result = packet_buffer_.InsertPacket(std::move(packet)); + } + OnInsertedPacket(std::move(insert_result)); } void RtpVideoStreamReceiver::OnRecoveredPacket(const uint8_t* rtp_packet, @@ -665,35 +696,6 @@ void RtpVideoStreamReceiver::OnRtpPacket(const RtpPacketReceived& packet) { return; } - if (!packet.recovered()) { - // TODO(nisse): Exclude out-of-order packets? - int64_t now_ms = clock_->TimeInMilliseconds(); - { - rtc::CritScope cs(&sync_info_lock_); - last_received_rtp_timestamp_ = packet.Timestamp(); - last_received_rtp_system_time_ms_ = now_ms; - } - // Periodically log the RTP header of incoming packets. - if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) { - rtc::StringBuilder ss; - ss << "Packet received on SSRC: " << packet.Ssrc() - << " with payload type: " << static_cast(packet.PayloadType()) - << ", timestamp: " << packet.Timestamp() - << ", sequence number: " << packet.SequenceNumber() - << ", arrival time: " << packet.arrival_time_ms(); - int32_t time_offset; - if (packet.GetExtension(&time_offset)) { - ss << ", toffset: " << time_offset; - } - uint32_t send_time; - if (packet.GetExtension(&send_time)) { - ss << ", abs send time: " << send_time; - } - RTC_LOG(LS_INFO) << ss.str(); - last_packet_log_ms_ = now_ms; - } - } - ReceivePacket(packet); // Update receive statistics after ReceivePacket. @@ -748,76 +750,100 @@ bool RtpVideoStreamReceiver::IsDecryptable() const { void RtpVideoStreamReceiver::OnInsertedPacket( video_coding::PacketBuffer::InsertResult result) { - video_coding::PacketBuffer::Packet* first_packet = nullptr; - int max_nack_count; - int64_t min_recv_time; - int64_t max_recv_time; - std::vector> payloads; - RtpPacketInfos::vector_type packet_infos; - - bool frame_boundary = true; - for (auto& packet : result.packets) { - // PacketBuffer promisses frame boundaries are correctly set on each - // packet. Document that assumption with the DCHECKs. - RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame()); - if (packet->is_first_packet_in_frame()) { - first_packet = packet.get(); - max_nack_count = packet->times_nacked; - min_recv_time = packet->packet_info.receive_time_ms(); - max_recv_time = packet->packet_info.receive_time_ms(); - payloads.clear(); - packet_infos.clear(); - } else { - max_nack_count = std::max(max_nack_count, packet->times_nacked); - min_recv_time = - std::min(min_recv_time, packet->packet_info.receive_time_ms()); - max_recv_time = - std::max(max_recv_time, packet->packet_info.receive_time_ms()); - } - payloads.emplace_back(packet->video_payload); - packet_infos.push_back(packet->packet_info); - - frame_boundary = packet->is_last_packet_in_frame(); - if (packet->is_last_packet_in_frame()) { - auto depacketizer_it = payload_type_map_.find(first_packet->payload_type); - RTC_CHECK(depacketizer_it != payload_type_map_.end()); - - rtc::scoped_refptr bitstream = - depacketizer_it->second->AssembleFrame(payloads); - if (!bitstream) { - // Failed to assemble a frame. Discard and continue. - continue; + std::vector> assembled_frames; + { + MutexLock lock(&packet_buffer_lock_); + video_coding::PacketBuffer::Packet* first_packet = nullptr; + int max_nack_count; + int64_t min_recv_time; + int64_t max_recv_time; + std::vector> payloads; + RtpPacketInfos::vector_type packet_infos; + + bool frame_boundary = true; + for (auto& packet : result.packets) { + // PacketBuffer promisses frame boundaries are correctly set on each + // packet. Document that assumption with the DCHECKs. + RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame()); + int64_t unwrapped_rtp_seq_num = + rtp_seq_num_unwrapper_.Unwrap(packet->seq_num); + RTC_DCHECK(packet_infos_.count(unwrapped_rtp_seq_num) > 0); + RtpPacketInfo& packet_info = packet_infos_[unwrapped_rtp_seq_num]; + if (packet->is_first_packet_in_frame()) { + first_packet = packet.get(); + max_nack_count = packet->times_nacked; + min_recv_time = packet_info.receive_time().ms(); + max_recv_time = packet_info.receive_time().ms(); + payloads.clear(); + packet_infos.clear(); + } else { + max_nack_count = std::max(max_nack_count, packet->times_nacked); + min_recv_time = + std::min(min_recv_time, packet_info.receive_time().ms()); + max_recv_time = + std::max(max_recv_time, packet_info.receive_time().ms()); } + payloads.emplace_back(packet->video_payload); + packet_infos.push_back(packet_info); + + frame_boundary = packet->is_last_packet_in_frame(); + if (packet->is_last_packet_in_frame()) { + auto depacketizer_it = + payload_type_map_.find(first_packet->payload_type); + RTC_CHECK(depacketizer_it != payload_type_map_.end()); + + rtc::scoped_refptr bitstream = + depacketizer_it->second->AssembleFrame(payloads); + if (!bitstream) { + // Failed to assemble a frame. Discard and continue. + continue; + } + + const video_coding::PacketBuffer::Packet& last_packet = *packet; + assembled_frames.push_back(std::make_unique( + first_packet->seq_num, // + last_packet.seq_num, // + last_packet.marker_bit, // + max_nack_count, // + min_recv_time, // + max_recv_time, // + first_packet->timestamp, // + ntp_estimator_.Estimate(first_packet->timestamp), // + last_packet.video_header.video_timing, // + first_packet->payload_type, // + first_packet->codec(), // + last_packet.video_header.rotation, // + last_packet.video_header.content_type, // + first_packet->video_header, // + last_packet.video_header.color_space, // + RtpPacketInfos(std::move(packet_infos)), // + std::move(bitstream))); + } + } + RTC_DCHECK(frame_boundary); - const video_coding::PacketBuffer::Packet& last_packet = *packet; - OnAssembledFrame(std::make_unique( - first_packet->seq_num, // - last_packet.seq_num, // - last_packet.marker_bit, // - max_nack_count, // - min_recv_time, // - max_recv_time, // - first_packet->timestamp, // - first_packet->ntp_time_ms, // - last_packet.video_header.video_timing, // - first_packet->payload_type, // - first_packet->codec(), // - last_packet.video_header.rotation, // - last_packet.video_header.content_type, // - first_packet->video_header, // - last_packet.video_header.color_space, // - RtpPacketInfos(std::move(packet_infos)), // - std::move(bitstream))); + if (result.buffer_cleared) { + packet_infos_.clear(); } - } - RTC_DCHECK(frame_boundary); + } // packet_buffer_lock_ + if (result.buffer_cleared) { + { + MutexLock lock(&sync_info_lock_); + last_received_rtp_system_time_.reset(); + last_received_keyframe_rtp_system_time_.reset(); + last_received_keyframe_rtp_timestamp_.reset(); + } RequestKeyFrame(); } + + for (auto& frame : assembled_frames) { + OnAssembledFrame(std::move(frame)); + } } void RtpVideoStreamReceiver::OnAssembledFrame( - std::unique_ptr frame) { + std::unique_ptr frame) { RTC_DCHECK_RUN_ON(&network_tc_); RTC_DCHECK(frame); @@ -846,7 +872,7 @@ void RtpVideoStreamReceiver::OnAssembledFrame( has_received_frame_ = true; } - rtc::CritScope lock(&reference_finder_lock_); + MutexLock lock(&reference_finder_lock_); // Reset |reference_finder_| if |frame| is new and the codec have changed. if (current_codec_) { bool frame_is_newer = @@ -856,12 +882,10 @@ void RtpVideoStreamReceiver::OnAssembledFrame( if (frame_is_newer) { // When we reset the |reference_finder_| we don't want new picture ids // to overlap with old picture ids. To ensure that doesn't happen we - // start from the |last_completed_picture_id_| and add an offset in case - // of reordering. - reference_finder_ = - std::make_unique( - this, last_completed_picture_id_ + - std::numeric_limits::max()); + // start from the |last_completed_picture_id_| and add an offset in + // case of reordering. + reference_finder_ = std::make_unique( + last_completed_picture_id_ + std::numeric_limits::max()); current_codec_ = frame->codec_type(); } else { // Old frame from before the codec switch, discard it. @@ -882,28 +906,30 @@ void RtpVideoStreamReceiver::OnAssembledFrame( } else if (frame_transformer_delegate_) { frame_transformer_delegate_->TransformFrame(std::move(frame)); } else { - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } } -void RtpVideoStreamReceiver::OnCompleteFrame( - std::unique_ptr frame) { +void RtpVideoStreamReceiver::OnCompleteFrames( + RtpFrameReferenceFinder::ReturnVector frames) { { - rtc::CritScope lock(&last_seq_num_cs_); - video_coding::RtpFrameObject* rtp_frame = - static_cast(frame.get()); - last_seq_num_for_pic_id_[rtp_frame->id.picture_id] = - rtp_frame->last_seq_num(); + MutexLock lock(&last_seq_num_mutex_); + for (const auto& frame : frames) { + RtpFrameObject* rtp_frame = static_cast(frame.get()); + last_seq_num_for_pic_id_[rtp_frame->Id()] = rtp_frame->last_seq_num(); + } + } + for (auto& frame : frames) { + last_completed_picture_id_ = + std::max(last_completed_picture_id_, frame->Id()); + complete_frame_callback_->OnCompleteFrame(std::move(frame)); } - last_completed_picture_id_ = - std::max(last_completed_picture_id_, frame->id.picture_id); - complete_frame_callback_->OnCompleteFrame(std::move(frame)); } void RtpVideoStreamReceiver::OnDecryptedFrame( - std::unique_ptr frame) { - rtc::CritScope lock(&reference_finder_lock_); - reference_finder_->ManageFrame(std::move(frame)); + std::unique_ptr frame) { + MutexLock lock(&reference_finder_lock_); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void RtpVideoStreamReceiver::OnDecryptionStatusChange( @@ -927,7 +953,7 @@ void RtpVideoStreamReceiver::SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) { RTC_DCHECK_RUN_ON(&network_tc_); frame_transformer_delegate_ = - new rtc::RefCountedObject( + rtc::make_ref_counted( this, std::move(frame_transformer), rtc::Thread::Current(), config_.rtp.remote_ssrc); frame_transformer_delegate_->Init(); @@ -939,12 +965,21 @@ void RtpVideoStreamReceiver::UpdateRtt(int64_t max_rtt_ms) { } absl::optional RtpVideoStreamReceiver::LastReceivedPacketMs() const { - return packet_buffer_.LastReceivedPacketMs(); + MutexLock lock(&sync_info_lock_); + if (last_received_rtp_system_time_) { + return absl::optional(last_received_rtp_system_time_->ms()); + } + return absl::nullopt; } absl::optional RtpVideoStreamReceiver::LastReceivedKeyframePacketMs() const { - return packet_buffer_.LastReceivedKeyframePacketMs(); + MutexLock lock(&sync_info_lock_); + if (last_received_keyframe_rtp_system_time_) { + return absl::optional( + last_received_keyframe_rtp_system_time_->ms()); + } + return absl::nullopt; } void RtpVideoStreamReceiver::AddSecondarySink(RtpPacketSinkInterface* sink) { @@ -968,9 +1003,9 @@ void RtpVideoStreamReceiver::RemoveSecondarySink( } void RtpVideoStreamReceiver::ManageFrame( - std::unique_ptr frame) { - rtc::CritScope lock(&reference_finder_lock_); - reference_finder_->ManageFrame(std::move(frame)); + std::unique_ptr frame) { + MutexLock lock(&reference_finder_lock_); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) { @@ -1024,10 +1059,17 @@ void RtpVideoStreamReceiver::ParseAndHandleEncapsulatingHeader( // correctly calculate frame references. void RtpVideoStreamReceiver::NotifyReceiverOfEmptyPacket(uint16_t seq_num) { { - rtc::CritScope lock(&reference_finder_lock_); - reference_finder_->PaddingReceived(seq_num); + MutexLock lock(&reference_finder_lock_); + OnCompleteFrames(reference_finder_->PaddingReceived(seq_num)); + } + + video_coding::PacketBuffer::InsertResult insert_result; + { + MutexLock lock(&packet_buffer_lock_); + insert_result = packet_buffer_.InsertPadding(seq_num); } - OnInsertedPacket(packet_buffer_.InsertPadding(seq_num)); + OnInsertedPacket(std::move(insert_result)); + if (nack_module_) { nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false, /* is _recovered = */ false); @@ -1074,7 +1116,7 @@ bool RtpVideoStreamReceiver::DeliverRtcp(const uint8_t* rtcp_packet, absl::optional remote_to_local_clock_offset_ms = ntp_estimator_.EstimateRemoteToLocalClockOffsetMs(); if (remote_to_local_clock_offset_ms.has_value()) { - absolute_capture_time_receiver_.SetRemoteToLocalClockOffset( + capture_clock_offset_updater_.SetRemoteToLocalClockOffset( Int64MsToQ32x32(*remote_to_local_clock_offset_ms)); } } @@ -1088,7 +1130,7 @@ void RtpVideoStreamReceiver::FrameContinuous(int64_t picture_id) { int seq_num = -1; { - rtc::CritScope lock(&last_seq_num_cs_); + MutexLock lock(&last_seq_num_mutex_); auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); if (seq_num_it != last_seq_num_for_pic_id_.end()) seq_num = seq_num_it->second; @@ -1100,7 +1142,7 @@ void RtpVideoStreamReceiver::FrameContinuous(int64_t picture_id) { void RtpVideoStreamReceiver::FrameDecoded(int64_t picture_id) { int seq_num = -1; { - rtc::CritScope lock(&last_seq_num_cs_); + MutexLock lock(&last_seq_num_mutex_); auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); if (seq_num_it != last_seq_num_for_pic_id_.end()) { seq_num = seq_num_it->second; @@ -1109,8 +1151,14 @@ void RtpVideoStreamReceiver::FrameDecoded(int64_t picture_id) { } } if (seq_num != -1) { - packet_buffer_.ClearTo(seq_num); - rtc::CritScope lock(&reference_finder_lock_); + { + MutexLock lock(&packet_buffer_lock_); + packet_buffer_.ClearTo(seq_num); + int64_t unwrapped_rtp_seq_num = rtp_seq_num_unwrapper_.Unwrap(seq_num); + packet_infos_.erase(packet_infos_.begin(), + packet_infos_.upper_bound(unwrapped_rtp_seq_num)); + } + MutexLock lock(&reference_finder_lock_); reference_finder_->ClearTo(seq_num); } } @@ -1180,4 +1228,40 @@ void RtpVideoStreamReceiver::InsertSpsPpsIntoTracker(uint8_t payload_type) { sprop_decoder.pps_nalu()); } +void RtpVideoStreamReceiver::UpdatePacketReceiveTimestamps( + const RtpPacketReceived& packet, + bool is_keyframe) { + Timestamp now = clock_->CurrentTime(); + { + MutexLock lock(&sync_info_lock_); + if (is_keyframe || + last_received_keyframe_rtp_timestamp_ == packet.Timestamp()) { + last_received_keyframe_rtp_timestamp_ = packet.Timestamp(); + last_received_keyframe_rtp_system_time_ = now; + } + last_received_rtp_system_time_ = now; + last_received_rtp_timestamp_ = packet.Timestamp(); + } + + // Periodically log the RTP header of incoming packets. + if (now.ms() - last_packet_log_ms_ > kPacketLogIntervalMs) { + rtc::StringBuilder ss; + ss << "Packet received on SSRC: " << packet.Ssrc() + << " with payload type: " << static_cast(packet.PayloadType()) + << ", timestamp: " << packet.Timestamp() + << ", sequence number: " << packet.SequenceNumber() + << ", arrival time: " << ToString(packet.arrival_time()); + int32_t time_offset; + if (packet.GetExtension(&time_offset)) { + ss << ", toffset: " << time_offset; + } + uint32_t send_time; + if (packet.GetExtension(&send_time)) { + ss << ", abs send time: " << send_time; + } + RTC_LOG(LS_INFO) << ss.str(); + last_packet_log_ms_ = now.ms(); + } +} + } // namespace webrtc diff --git a/video/rtp_video_stream_receiver.h b/video/rtp_video_stream_receiver.h index 902c118b8c..b3d62f34a4 100644 --- a/video/rtp_video_stream_receiver.h +++ b/video/rtp_video_stream_receiver.h @@ -21,6 +21,8 @@ #include "absl/types/optional.h" #include "api/array_view.h" #include "api/crypto/frame_decryptor_interface.h" +#include "api/sequence_checker.h" +#include "api/units/timestamp.h" #include "api/video/color_space.h" #include "api/video_codecs/video_codec.h" #include "call/rtp_packet_sink_interface.h" @@ -31,7 +33,8 @@ #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" +#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h" #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" @@ -42,18 +45,17 @@ #include "modules/video_coding/rtp_frame_reference_finder.h" #include "modules/video_coding/unique_timestamp_counter.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/numerics/sequence_number_util.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "video/buffered_frame_decryptor.h" #include "video/rtp_video_stream_receiver_frame_transformer_delegate.h" namespace webrtc { -class NackModule; +class DEPRECATED_NackModule; class PacketRouter; class ProcessThread; class ReceiveStatistics; @@ -67,11 +69,18 @@ class RtpVideoStreamReceiver : public LossNotificationSender, public RecoveredPacketReceiver, public RtpPacketSinkInterface, public KeyFrameRequestSender, - public video_coding::OnCompleteFrameCallback, public OnDecryptedFrameCallback, public OnDecryptionStatusChangeCallback, public RtpVideoFrameReceiver { public: + // A complete frame is a frame which has received all its packets and all its + // references are known. + class OnCompleteFrameCallback { + public: + virtual ~OnCompleteFrameCallback() {} + virtual void OnCompleteFrame(std::unique_ptr frame) = 0; + }; + // DEPRECATED due to dependency on ReceiveStatisticsProxy. RtpVideoStreamReceiver( Clock* clock, @@ -89,7 +98,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender, // The KeyFrameRequestSender is optional; if not provided, key frame // requests are sent via the internal RtpRtcp module. KeyFrameRequestSender* keyframe_request_sender, - video_coding::OnCompleteFrameCallback* complete_frame_callback, + OnCompleteFrameCallback* complete_frame_callback, rtc::scoped_refptr frame_decryptor, rtc::scoped_refptr frame_transformer); @@ -110,12 +119,13 @@ class RtpVideoStreamReceiver : public LossNotificationSender, // The KeyFrameRequestSender is optional; if not provided, key frame // requests are sent via the internal RtpRtcp module. KeyFrameRequestSender* keyframe_request_sender, - video_coding::OnCompleteFrameCallback* complete_frame_callback, + OnCompleteFrameCallback* complete_frame_callback, rtc::scoped_refptr frame_decryptor, rtc::scoped_refptr frame_transformer); ~RtpVideoStreamReceiver() override; - void AddReceiveCodec(const VideoCodec& video_codec, + void AddReceiveCodec(uint8_t payload_type, + const VideoCodec& video_codec, const std::map& codec_params, bool raw_payload); @@ -142,8 +152,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender, // Implements RtpPacketSinkInterface. void OnRtpPacket(const RtpPacketReceived& packet) override; - // TODO(philipel): Stop using VCMPacket in the new jitter buffer and then - // remove this function. Public only for tests. + // Public only for tests. void OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload, const RtpPacketReceived& rtp_packet, const RTPVideoHeader& video); @@ -171,13 +180,10 @@ class RtpVideoStreamReceiver : public LossNotificationSender, // Don't use, still experimental. void RequestPacketRetransmit(const std::vector& sequence_numbers); - // Implements OnCompleteFrameCallback. - void OnCompleteFrame( - std::unique_ptr frame) override; + void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frames); // Implements OnDecryptedFrameCallback. - void OnDecryptedFrame( - std::unique_ptr frame) override; + void OnDecryptedFrame(std::unique_ptr frame) override; // Implements OnDecryptionStatusChangeCallback. void OnDecryptionStatusChange( @@ -208,8 +214,7 @@ class RtpVideoStreamReceiver : public LossNotificationSender, private: // Implements RtpVideoFrameReceiver. - void ManageFrame( - std::unique_ptr frame) override; + void ManageFrame(std::unique_ptr frame) override; // Used for buffering RTCP feedback messages and sending them all together. // Note: @@ -228,21 +233,21 @@ class RtpVideoStreamReceiver : public LossNotificationSender, ~RtcpFeedbackBuffer() override = default; // KeyFrameRequestSender implementation. - void RequestKeyFrame() RTC_LOCKS_EXCLUDED(cs_) override; + void RequestKeyFrame() RTC_LOCKS_EXCLUDED(mutex_) override; // NackSender implementation. void SendNack(const std::vector& sequence_numbers, - bool buffering_allowed) RTC_LOCKS_EXCLUDED(cs_) override; + bool buffering_allowed) RTC_LOCKS_EXCLUDED(mutex_) override; // LossNotificationSender implementation. void SendLossNotification(uint16_t last_decoded_seq_num, uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) - RTC_LOCKS_EXCLUDED(cs_) override; + RTC_LOCKS_EXCLUDED(mutex_) override; // Send all RTCP feedback messages buffered thus far. - void SendBufferedRtcpFeedback() RTC_LOCKS_EXCLUDED(cs_); + void SendBufferedRtcpFeedback() RTC_LOCKS_EXCLUDED(mutex_); private: // LNTF-related state. @@ -264,10 +269,10 @@ class RtpVideoStreamReceiver : public LossNotificationSender, absl::optional lntf_state; }; - ConsumedRtcpFeedback ConsumeRtcpFeedback() RTC_LOCKS_EXCLUDED(cs_); + ConsumedRtcpFeedback ConsumeRtcpFeedback() RTC_LOCKS_EXCLUDED(mutex_); ConsumedRtcpFeedback ConsumeRtcpFeedbackLocked() - RTC_EXCLUSIVE_LOCKS_REQUIRED(cs_); - // This method is called both with and without cs_ held. + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + // This method is called both with and without mutex_ held. void SendRtcpFeedback(ConsumedRtcpFeedback feedback); KeyFrameRequestSender* const key_frame_request_sender_; @@ -275,15 +280,15 @@ class RtpVideoStreamReceiver : public LossNotificationSender, LossNotificationSender* const loss_notification_sender_; // NACKs are accessible from two threads due to nack_module_ being a module. - rtc::CriticalSection cs_; + Mutex mutex_; // Key-frame-request-related state. - bool request_key_frame_ RTC_GUARDED_BY(cs_); + bool request_key_frame_ RTC_GUARDED_BY(mutex_); // NACK-related state. - std::vector nack_sequence_numbers_ RTC_GUARDED_BY(cs_); + std::vector nack_sequence_numbers_ RTC_GUARDED_BY(mutex_); - absl::optional lntf_state_ RTC_GUARDED_BY(cs_); + absl::optional lntf_state_ RTC_GUARDED_BY(mutex_); }; enum ParseGenericDependenciesResult { kDropPacket, @@ -305,7 +310,11 @@ class RtpVideoStreamReceiver : public LossNotificationSender, ParseGenericDependenciesResult ParseGenericDependenciesExtension( const RtpPacketReceived& rtp_packet, RTPVideoHeader* video_header) RTC_RUN_ON(worker_task_checker_); - void OnAssembledFrame(std::unique_ptr frame); + void OnAssembledFrame(std::unique_ptr frame) + RTC_LOCKS_EXCLUDED(packet_buffer_lock_); + void UpdatePacketReceiveTimestamps(const RtpPacketReceived& packet, + bool is_keyframe) + RTC_RUN_ON(worker_task_checker_); Clock* const clock_; // Ownership of this object lies with VideoReceiveStream, which owns |this|. @@ -323,20 +332,21 @@ class RtpVideoStreamReceiver : public LossNotificationSender, ReceiveStatistics* const rtp_receive_statistics_; std::unique_ptr ulpfec_receiver_; - SequenceChecker worker_task_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_task_checker_; bool receiving_ RTC_GUARDED_BY(worker_task_checker_); int64_t last_packet_log_ms_ RTC_GUARDED_BY(worker_task_checker_); const std::unique_ptr rtp_rtcp_; - video_coding::OnCompleteFrameCallback* complete_frame_callback_; + OnCompleteFrameCallback* complete_frame_callback_; KeyFrameRequestSender* const keyframe_request_sender_; RtcpFeedbackBuffer rtcp_feedback_buffer_; - std::unique_ptr nack_module_; + std::unique_ptr nack_module_; std::unique_ptr loss_notification_controller_; - video_coding::PacketBuffer packet_buffer_; + mutable Mutex packet_buffer_lock_; + video_coding::PacketBuffer packet_buffer_ RTC_GUARDED_BY(packet_buffer_lock_); UniqueTimestampCounter frame_counter_ RTC_GUARDED_BY(worker_task_checker_); SeqNumUnwrapper frame_id_unwrapper_ RTC_GUARDED_BY(worker_task_checker_); @@ -351,15 +361,15 @@ class RtpVideoStreamReceiver : public LossNotificationSender, absl::optional video_structure_frame_id_ RTC_GUARDED_BY(worker_task_checker_); - rtc::CriticalSection reference_finder_lock_; - std::unique_ptr reference_finder_ + Mutex reference_finder_lock_; + std::unique_ptr reference_finder_ RTC_GUARDED_BY(reference_finder_lock_); absl::optional current_codec_; uint32_t last_assembled_frame_rtp_timestamp_; - rtc::CriticalSection last_seq_num_cs_; + Mutex last_seq_num_mutex_; std::map last_seq_num_for_pic_id_ - RTC_GUARDED_BY(last_seq_num_cs_); + RTC_GUARDED_BY(last_seq_num_mutex_); video_coding::H264SpsPpsTracker tracker_; // Maps payload id to the depacketizer. @@ -378,15 +388,19 @@ class RtpVideoStreamReceiver : public LossNotificationSender, // Info for GetSyncInfo is updated on network or worker thread, and queried on // the worker thread. - rtc::CriticalSection sync_info_lock_; + mutable Mutex sync_info_lock_; absl::optional last_received_rtp_timestamp_ RTC_GUARDED_BY(sync_info_lock_); - absl::optional last_received_rtp_system_time_ms_ + absl::optional last_received_keyframe_rtp_timestamp_ + RTC_GUARDED_BY(sync_info_lock_); + absl::optional last_received_rtp_system_time_ + RTC_GUARDED_BY(sync_info_lock_); + absl::optional last_received_keyframe_rtp_system_time_ RTC_GUARDED_BY(sync_info_lock_); // Used to validate the buffered frame decryptor is always run on the correct // thread. - rtc::ThreadChecker network_tc_; + SequenceChecker network_tc_; // Handles incoming encrypted frames and forwards them to the // rtp_reference_finder if they are decryptable. std::unique_ptr buffered_frame_decryptor_ @@ -394,13 +408,21 @@ class RtpVideoStreamReceiver : public LossNotificationSender, std::atomic frames_decryptable_; absl::optional last_color_space_; - AbsoluteCaptureTimeReceiver absolute_capture_time_receiver_ + AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_ + RTC_GUARDED_BY(worker_task_checker_); + + CaptureClockOffsetUpdater capture_clock_offset_updater_ RTC_GUARDED_BY(worker_task_checker_); int64_t last_completed_picture_id_ = 0; rtc::scoped_refptr frame_transformer_delegate_; + + SeqNumUnwrapper rtp_seq_num_unwrapper_ + RTC_GUARDED_BY(packet_buffer_lock_); + std::map packet_infos_ + RTC_GUARDED_BY(packet_buffer_lock_); }; } // namespace webrtc diff --git a/video/rtp_video_stream_receiver2.cc b/video/rtp_video_stream_receiver2.cc index 98351c49cf..4b43247b18 100644 --- a/video/rtp_video_stream_receiver2.cc +++ b/video/rtp_video_stream_receiver2.cc @@ -25,7 +25,6 @@ #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/include/rtp_cvo.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/ulpfec_receiver.h" #include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" @@ -37,7 +36,6 @@ #include "modules/rtp_rtcp/source/rtp_rtcp_config.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_raw.h" -#include "modules/utility/include/process_thread.h" #include "modules/video_coding/frame_object.h" #include "modules/video_coding/h264_sprop_parameter_sets.h" #include "modules/video_coding/h264_sps_pps_tracker.h" @@ -50,7 +48,6 @@ #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" #include "system_wrappers/include/ntp_time.h" -#include "video/receive_statistics_proxy2.h" namespace webrtc { @@ -77,15 +74,16 @@ int PacketBufferMaxSize() { return packet_buffer_max_size; } -std::unique_ptr CreateRtpRtcpModule( +std::unique_ptr CreateRtpRtcpModule( Clock* clock, ReceiveStatistics* receive_statistics, Transport* outgoing_transport, RtcpRttStats* rtt_stats, RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer, RtcpCnameCallback* rtcp_cname_callback, + bool non_sender_rtt_measurement, uint32_t local_ssrc) { - RtpRtcp::Configuration configuration; + RtpRtcpInterface::Configuration configuration; configuration.clock = clock; configuration.audio = false; configuration.receiver_only = true; @@ -96,14 +94,31 @@ std::unique_ptr CreateRtpRtcpModule( rtcp_packet_type_counter_observer; configuration.rtcp_cname_callback = rtcp_cname_callback; configuration.local_media_ssrc = local_ssrc; + configuration.non_sender_rtt_measurement = non_sender_rtt_measurement; - std::unique_ptr rtp_rtcp = RtpRtcp::Create(configuration); + std::unique_ptr rtp_rtcp = + ModuleRtpRtcpImpl2::Create(configuration); rtp_rtcp->SetRTCPStatus(RtcpMode::kCompound); return rtp_rtcp; } +std::unique_ptr MaybeConstructNackModule( + TaskQueueBase* current_queue, + const VideoReceiveStream::Config& config, + Clock* clock, + NackSender* nack_sender, + KeyFrameRequestSender* keyframe_request_sender) { + if (config.rtp.nack.rtp_history_ms == 0) + return nullptr; + + // TODO(bugs.webrtc.org/12420): pass rtp_history_ms to the nack module. + return std::make_unique(current_queue, clock, nack_sender, + keyframe_request_sender); +} + static const int kPacketLogIntervalMs = 10000; + } // namespace RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RtcpFeedbackBuffer( @@ -117,25 +132,26 @@ RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RtcpFeedbackBuffer( RTC_DCHECK(key_frame_request_sender_); RTC_DCHECK(nack_sender_); RTC_DCHECK(loss_notification_sender_); + packet_sequence_checker_.Detach(); } void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::RequestKeyFrame() { - rtc::CritScope lock(&cs_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); request_key_frame_ = true; } void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendNack( const std::vector& sequence_numbers, bool buffering_allowed) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); RTC_DCHECK(!sequence_numbers.empty()); - rtc::CritScope lock(&cs_); nack_sequence_numbers_.insert(nack_sequence_numbers_.end(), sequence_numbers.cbegin(), sequence_numbers.cend()); if (!buffering_allowed) { // Note that while *buffering* is not allowed, *batching* is, meaning that // previously buffered messages may be sent along with the current message. - SendRtcpFeedback(ConsumeRtcpFeedbackLocked()); + SendBufferedRtcpFeedback(); } } @@ -144,8 +160,8 @@ void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification( uint16_t last_received_seq_num, bool decodability_flag, bool buffering_allowed) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); RTC_DCHECK(buffering_allowed); - rtc::CritScope lock(&cs_); RTC_DCHECK(!lntf_state_) << "SendLossNotification() called twice in a row with no call to " "SendBufferedRtcpFeedback() in between."; @@ -154,48 +170,38 @@ void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendLossNotification( } void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendBufferedRtcpFeedback() { - SendRtcpFeedback(ConsumeRtcpFeedback()); -} + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); -RtpVideoStreamReceiver2::RtcpFeedbackBuffer::ConsumedRtcpFeedback -RtpVideoStreamReceiver2::RtcpFeedbackBuffer::ConsumeRtcpFeedback() { - rtc::CritScope lock(&cs_); - return ConsumeRtcpFeedbackLocked(); -} + bool request_key_frame = false; + std::vector nack_sequence_numbers; + absl::optional lntf_state; -RtpVideoStreamReceiver2::RtcpFeedbackBuffer::ConsumedRtcpFeedback -RtpVideoStreamReceiver2::RtcpFeedbackBuffer::ConsumeRtcpFeedbackLocked() { - ConsumedRtcpFeedback feedback; - std::swap(feedback.request_key_frame, request_key_frame_); - std::swap(feedback.nack_sequence_numbers, nack_sequence_numbers_); - std::swap(feedback.lntf_state, lntf_state_); - return feedback; -} + std::swap(request_key_frame, request_key_frame_); + std::swap(nack_sequence_numbers, nack_sequence_numbers_); + std::swap(lntf_state, lntf_state_); -void RtpVideoStreamReceiver2::RtcpFeedbackBuffer::SendRtcpFeedback( - ConsumedRtcpFeedback feedback) { - if (feedback.lntf_state) { + if (lntf_state) { // If either a NACK or a key frame request is sent, we should buffer // the LNTF and wait for them (NACK or key frame request) to trigger // the compound feedback message. // Otherwise, the LNTF should be sent out immediately. const bool buffering_allowed = - feedback.request_key_frame || !feedback.nack_sequence_numbers.empty(); + request_key_frame || !nack_sequence_numbers.empty(); loss_notification_sender_->SendLossNotification( - feedback.lntf_state->last_decoded_seq_num, - feedback.lntf_state->last_received_seq_num, - feedback.lntf_state->decodability_flag, buffering_allowed); + lntf_state->last_decoded_seq_num, lntf_state->last_received_seq_num, + lntf_state->decodability_flag, buffering_allowed); } - if (feedback.request_key_frame) { + if (request_key_frame) { key_frame_request_sender_->RequestKeyFrame(); - } else if (!feedback.nack_sequence_numbers.empty()) { - nack_sender_->SendNack(feedback.nack_sequence_numbers, true); + } else if (!nack_sequence_numbers.empty()) { + nack_sender_->SendNack(nack_sequence_numbers, true); } } RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( + TaskQueueBase* current_queue, Clock* clock, Transport* transport, RtcpRttStats* rtt_stats, @@ -204,16 +210,14 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( ReceiveStatistics* rtp_receive_statistics, RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer, RtcpCnameCallback* rtcp_cname_callback, - ProcessThread* process_thread, NackSender* nack_sender, KeyFrameRequestSender* keyframe_request_sender, - video_coding::OnCompleteFrameCallback* complete_frame_callback, + OnCompleteFrameCallback* complete_frame_callback, rtc::scoped_refptr frame_decryptor, rtc::scoped_refptr frame_transformer) : clock_(clock), config_(*config), packet_router_(packet_router), - process_thread_(process_thread), ntp_estimator_(clock), rtp_header_extensions_(config_.rtp.extensions), forced_playout_delay_max_ms_("max_ms", absl::nullopt), @@ -224,22 +228,31 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( config->rtp.extensions)), receiving_(false), last_packet_log_ms_(-1), - rtp_rtcp_(CreateRtpRtcpModule(clock, - rtp_receive_statistics_, - transport, - rtt_stats, - rtcp_packet_type_counter_observer, - rtcp_cname_callback, - config_.rtp.local_ssrc)), + rtp_rtcp_(CreateRtpRtcpModule( + clock, + rtp_receive_statistics_, + transport, + rtt_stats, + rtcp_packet_type_counter_observer, + rtcp_cname_callback, + config_.rtp.rtcp_xr.receiver_reference_time_report, + config_.rtp.local_ssrc)), complete_frame_callback_(complete_frame_callback), keyframe_request_sender_(keyframe_request_sender), // TODO(bugs.webrtc.org/10336): Let |rtcp_feedback_buffer_| communicate // directly with |rtp_rtcp_|. rtcp_feedback_buffer_(this, nack_sender, this), - packet_buffer_(clock_, kPacketBufferStartSize, PacketBufferMaxSize()), + nack_module_(MaybeConstructNackModule(current_queue, + config_, + clock_, + &rtcp_feedback_buffer_, + &rtcp_feedback_buffer_)), + packet_buffer_(kPacketBufferStartSize, PacketBufferMaxSize()), + reference_finder_(std::make_unique()), has_received_frame_(false), frames_decryptable_(false), - absolute_capture_time_receiver_(clock) { + absolute_capture_time_interpolator_(clock) { + packet_sequence_checker_.Detach(); constexpr bool remb_candidate = true; if (packet_router_) packet_router_->AddReceiveRtpModule(rtp_rtcp_.get(), remb_candidate); @@ -268,30 +281,17 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( rtp_receive_statistics_->SetMaxReorderingThreshold( config_.rtp.rtx_ssrc, max_reordering_threshold); } - if (config_.rtp.rtcp_xr.receiver_reference_time_report) - rtp_rtcp_->SetRtcpXrRrtrStatus(true); ParseFieldTrial( {&forced_playout_delay_max_ms_, &forced_playout_delay_min_ms_}, field_trial::FindFullName("WebRTC-ForcePlayoutDelay")); - process_thread_->RegisterModule(rtp_rtcp_.get(), RTC_FROM_HERE); - if (config_.rtp.lntf.enabled) { loss_notification_controller_ = std::make_unique(&rtcp_feedback_buffer_, &rtcp_feedback_buffer_); } - if (config_.rtp.nack.rtp_history_ms != 0) { - nack_module_ = std::make_unique(clock_, &rtcp_feedback_buffer_, - &rtcp_feedback_buffer_); - process_thread_->RegisterModule(nack_module_.get(), RTC_FROM_HERE); - } - - reference_finder_ = - std::make_unique(this); - // Only construct the encrypted receiver if frame encryption is enabled. if (config_.crypto_options.sframe.require_frame_encryption) { buffered_frame_decryptor_ = @@ -302,23 +302,15 @@ RtpVideoStreamReceiver2::RtpVideoStreamReceiver2( } if (frame_transformer) { - frame_transformer_delegate_ = new rtc::RefCountedObject< - RtpVideoStreamReceiverFrameTransformerDelegate>( - this, std::move(frame_transformer), rtc::Thread::Current(), - config_.rtp.remote_ssrc); + frame_transformer_delegate_ = + rtc::make_ref_counted( + this, std::move(frame_transformer), rtc::Thread::Current(), + config_.rtp.remote_ssrc); frame_transformer_delegate_->Init(); } } RtpVideoStreamReceiver2::~RtpVideoStreamReceiver2() { - RTC_DCHECK(secondary_sinks_.empty()); - - if (nack_module_) { - process_thread_->DeRegisterModule(nack_module_.get()); - } - - process_thread_->DeRegisterModule(rtp_rtcp_.get()); - if (packet_router_) packet_router_->RemoveReceiveRtpModule(rtp_rtcp_.get()); UpdateHistograms(); @@ -327,36 +319,44 @@ RtpVideoStreamReceiver2::~RtpVideoStreamReceiver2() { } void RtpVideoStreamReceiver2::AddReceiveCodec( + uint8_t payload_type, const VideoCodec& video_codec, const std::map& codec_params, bool raw_payload) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + if (codec_params.count(cricket::kH264FmtpSpsPpsIdrInKeyframe) || + field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) { + packet_buffer_.ForceSpsPpsIdrIsH264Keyframe(); + } payload_type_map_.emplace( - video_codec.plType, - raw_payload ? std::make_unique() - : CreateVideoRtpDepacketizer(video_codec.codecType)); - pt_codec_params_.emplace(video_codec.plType, codec_params); + payload_type, raw_payload + ? std::make_unique() + : CreateVideoRtpDepacketizer(video_codec.codecType)); + pt_codec_params_.emplace(payload_type, codec_params); } absl::optional RtpVideoStreamReceiver2::GetSyncInfo() const { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); Syncable::Info info; if (rtp_rtcp_->RemoteNTP(&info.capture_time_ntp_secs, - &info.capture_time_ntp_frac, nullptr, nullptr, + &info.capture_time_ntp_frac, + /*rtcp_arrival_time_secs=*/nullptr, + /*rtcp_arrival_time_frac=*/nullptr, &info.capture_time_source_clock) != 0) { return absl::nullopt; } - { - rtc::CritScope lock(&sync_info_lock_); - if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_ms_) { - return absl::nullopt; - } - info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; - info.latest_receive_time_ms = *last_received_rtp_system_time_ms_; + + if (!last_received_rtp_timestamp_ || !last_received_rtp_system_time_) { + return absl::nullopt; } + info.latest_received_capture_timestamp = *last_received_rtp_timestamp_; + info.latest_receive_time_ms = last_received_rtp_system_time_->ms(); // Leaves info.current_delay_ms uninitialized. return info; } +// RTC_RUN_ON(packet_sequence_checker_) RtpVideoStreamReceiver2::ParseGenericDependenciesResult RtpVideoStreamReceiver2::ParseGenericDependenciesExtension( const RtpPacketReceived& rtp_packet, @@ -466,27 +466,38 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData( rtc::CopyOnWriteBuffer codec_payload, const RtpPacketReceived& rtp_packet, const RTPVideoHeader& video) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - auto packet = std::make_unique( - rtp_packet, video, ntp_estimator_.Estimate(rtp_packet.Timestamp()), - clock_->TimeInMilliseconds()); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + + auto packet = + std::make_unique(rtp_packet, video); + + int64_t unwrapped_rtp_seq_num = + rtp_seq_num_unwrapper_.Unwrap(rtp_packet.SequenceNumber()); + auto& packet_info = + packet_infos_ + .emplace( + unwrapped_rtp_seq_num, + RtpPacketInfo( + rtp_packet.Ssrc(), rtp_packet.Csrcs(), rtp_packet.Timestamp(), + /*audio_level=*/absl::nullopt, + rtp_packet.GetExtension(), + /*receive_time_ms=*/clock_->CurrentTime())) + .first->second; // Try to extrapolate absolute capture time if it is missing. - packet->packet_info.set_absolute_capture_time( - absolute_capture_time_receiver_.OnReceivePacket( - AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(), - packet->packet_info.csrcs()), - packet->packet_info.rtp_timestamp(), + packet_info.set_absolute_capture_time( + absolute_capture_time_interpolator_.OnReceivePacket( + AbsoluteCaptureTimeInterpolator::GetSource(packet_info.ssrc(), + packet_info.csrcs()), + packet_info.rtp_timestamp(), // Assume frequency is the same one for all video frames. - kVideoPayloadTypeFrequency, - packet->packet_info.absolute_capture_time())); + kVideoPayloadTypeFrequency, packet_info.absolute_capture_time())); RTPVideoHeader& video_header = packet->video_header; video_header.rotation = kVideoRotation_0; video_header.content_type = VideoContentType::UNSPECIFIED; video_header.video_timing.flags = VideoSendTiming::kInvalid; video_header.is_last_packet_in_frame |= rtp_packet.Marker(); - video_header.frame_marking.temporal_id = kNoTemporalIdx; if (const auto* vp9_header = absl::get_if(&video_header.video_type_header)) { @@ -504,10 +515,15 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData( } else { rtp_packet.GetExtension(&video_header.playout_delay); } - rtp_packet.GetExtension(&video_header.frame_marking); ParseGenericDependenciesResult generic_descriptor_state = ParseGenericDependenciesExtension(rtp_packet, &video_header); + + if (!rtp_packet.recovered()) { + UpdatePacketReceiveTimestamps( + rtp_packet, video_header.frame_type == VideoFrameType::kVideoFrameKey); + } + if (generic_descriptor_state == kDropPacket) return; @@ -526,6 +542,8 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData( video_header.color_space = last_color_space_; } } + video_header.video_frame_tracking_id = + rtp_packet.GetExtension(); if (loss_notification_controller_) { if (rtp_packet.recovered()) { @@ -606,6 +624,8 @@ void RtpVideoStreamReceiver2::OnReceivedPayloadData( void RtpVideoStreamReceiver2::OnRecoveredPacket(const uint8_t* rtp_packet, size_t rtp_packet_length) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RtpPacketReceived packet; if (!packet.Parse(rtp_packet, rtp_packet_length)) return; @@ -628,40 +648,10 @@ void RtpVideoStreamReceiver2::OnRecoveredPacket(const uint8_t* rtp_packet, // This method handles both regular RTP packets and packets recovered // via FlexFEC. void RtpVideoStreamReceiver2::OnRtpPacket(const RtpPacketReceived& packet) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); - if (!receiving_) { + if (!receiving_) return; - } - - if (!packet.recovered()) { - // TODO(nisse): Exclude out-of-order packets? - int64_t now_ms = clock_->TimeInMilliseconds(); - { - rtc::CritScope cs(&sync_info_lock_); - last_received_rtp_timestamp_ = packet.Timestamp(); - last_received_rtp_system_time_ms_ = now_ms; - } - // Periodically log the RTP header of incoming packets. - if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) { - rtc::StringBuilder ss; - ss << "Packet received on SSRC: " << packet.Ssrc() - << " with payload type: " << static_cast(packet.PayloadType()) - << ", timestamp: " << packet.Timestamp() - << ", sequence number: " << packet.SequenceNumber() - << ", arrival time: " << packet.arrival_time_ms(); - int32_t time_offset; - if (packet.GetExtension(&time_offset)) { - ss << ", toffset: " << time_offset; - } - uint32_t send_time; - if (packet.GetExtension(&send_time)) { - ss << ", abs send time: " << send_time; - } - RTC_LOG(LS_INFO) << ss.str(); - last_packet_log_ms_ = now_ms; - } - } ReceivePacket(packet); @@ -672,12 +662,13 @@ void RtpVideoStreamReceiver2::OnRtpPacket(const RtpPacketReceived& packet) { rtp_receive_statistics_->OnRtpPacket(packet); } - for (RtpPacketSinkInterface* secondary_sink : secondary_sinks_) { - secondary_sink->OnRtpPacket(packet); + if (config_.rtp.packet_sink_) { + config_.rtp.packet_sink_->OnRtpPacket(packet); } } void RtpVideoStreamReceiver2::RequestKeyFrame() { + RTC_DCHECK_RUN_ON(&worker_task_checker_); // TODO(bugs.webrtc.org/10336): Allow the sender to ignore key frame requests // issued by anything other than the LossNotificationController if it (the // sender) is relying on LNTF alone. @@ -708,15 +699,19 @@ bool RtpVideoStreamReceiver2::IsRetransmissionsEnabled() const { void RtpVideoStreamReceiver2::RequestPacketRetransmit( const std::vector& sequence_numbers) { + RTC_DCHECK_RUN_ON(&worker_task_checker_); rtp_rtcp_->SendNack(sequence_numbers); } bool RtpVideoStreamReceiver2::IsDecryptable() const { - return frames_decryptable_.load(); + RTC_DCHECK_RUN_ON(&worker_task_checker_); + return frames_decryptable_; } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::OnInsertedPacket( video_coding::PacketBuffer::InsertResult result) { + RTC_DCHECK_RUN_ON(&worker_task_checker_); video_coding::PacketBuffer::Packet* first_packet = nullptr; int max_nack_count; int64_t min_recv_time; @@ -729,22 +724,24 @@ void RtpVideoStreamReceiver2::OnInsertedPacket( // PacketBuffer promisses frame boundaries are correctly set on each // packet. Document that assumption with the DCHECKs. RTC_DCHECK_EQ(frame_boundary, packet->is_first_packet_in_frame()); + int64_t unwrapped_rtp_seq_num = + rtp_seq_num_unwrapper_.Unwrap(packet->seq_num); + RTC_DCHECK(packet_infos_.count(unwrapped_rtp_seq_num) > 0); + RtpPacketInfo& packet_info = packet_infos_[unwrapped_rtp_seq_num]; if (packet->is_first_packet_in_frame()) { first_packet = packet.get(); max_nack_count = packet->times_nacked; - min_recv_time = packet->packet_info.receive_time_ms(); - max_recv_time = packet->packet_info.receive_time_ms(); + min_recv_time = packet_info.receive_time().ms(); + max_recv_time = packet_info.receive_time().ms(); payloads.clear(); packet_infos.clear(); } else { max_nack_count = std::max(max_nack_count, packet->times_nacked); - min_recv_time = - std::min(min_recv_time, packet->packet_info.receive_time_ms()); - max_recv_time = - std::max(max_recv_time, packet->packet_info.receive_time_ms()); + min_recv_time = std::min(min_recv_time, packet_info.receive_time().ms()); + max_recv_time = std::max(max_recv_time, packet_info.receive_time().ms()); } payloads.emplace_back(packet->video_payload); - packet_infos.push_back(packet->packet_info); + packet_infos.push_back(packet_info); frame_boundary = packet->is_last_packet_in_frame(); if (packet->is_last_packet_in_frame()) { @@ -759,35 +756,39 @@ void RtpVideoStreamReceiver2::OnInsertedPacket( } const video_coding::PacketBuffer::Packet& last_packet = *packet; - OnAssembledFrame(std::make_unique( - first_packet->seq_num, // - last_packet.seq_num, // - last_packet.marker_bit, // - max_nack_count, // - min_recv_time, // - max_recv_time, // - first_packet->timestamp, // - first_packet->ntp_time_ms, // - last_packet.video_header.video_timing, // - first_packet->payload_type, // - first_packet->codec(), // - last_packet.video_header.rotation, // - last_packet.video_header.content_type, // - first_packet->video_header, // - last_packet.video_header.color_space, // - RtpPacketInfos(std::move(packet_infos)), // + OnAssembledFrame(std::make_unique( + first_packet->seq_num, // + last_packet.seq_num, // + last_packet.marker_bit, // + max_nack_count, // + min_recv_time, // + max_recv_time, // + first_packet->timestamp, // + ntp_estimator_.Estimate(first_packet->timestamp), // + last_packet.video_header.video_timing, // + first_packet->payload_type, // + first_packet->codec(), // + last_packet.video_header.rotation, // + last_packet.video_header.content_type, // + first_packet->video_header, // + last_packet.video_header.color_space, // + RtpPacketInfos(std::move(packet_infos)), // std::move(bitstream))); } } RTC_DCHECK(frame_boundary); if (result.buffer_cleared) { + last_received_rtp_system_time_.reset(); + last_received_keyframe_rtp_system_time_.reset(); + last_received_keyframe_rtp_timestamp_.reset(); + packet_infos_.clear(); RequestKeyFrame(); } } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::OnAssembledFrame( - std::unique_ptr frame) { - RTC_DCHECK_RUN_ON(&network_tc_); + std::unique_ptr frame) { RTC_DCHECK(frame); const absl::optional& descriptor = @@ -815,7 +816,6 @@ void RtpVideoStreamReceiver2::OnAssembledFrame( has_received_frame_ = true; } - rtc::CritScope lock(&reference_finder_lock_); // Reset |reference_finder_| if |frame| is new and the codec have changed. if (current_codec_) { bool frame_is_newer = @@ -827,10 +827,8 @@ void RtpVideoStreamReceiver2::OnAssembledFrame( // to overlap with old picture ids. To ensure that doesn't happen we // start from the |last_completed_picture_id_| and add an offset in case // of reordering. - reference_finder_ = - std::make_unique( - this, last_completed_picture_id_ + - std::numeric_limits::max()); + reference_finder_ = std::make_unique( + last_completed_picture_id_ + std::numeric_limits::max()); current_codec_ = frame->codec_type(); } else { // Old frame from before the codec switch, discard it. @@ -851,40 +849,43 @@ void RtpVideoStreamReceiver2::OnAssembledFrame( } else if (frame_transformer_delegate_) { frame_transformer_delegate_->TransformFrame(std::move(frame)); } else { - reference_finder_->ManageFrame(std::move(frame)); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } } -void RtpVideoStreamReceiver2::OnCompleteFrame( - std::unique_ptr frame) { - { - rtc::CritScope lock(&last_seq_num_cs_); - video_coding::RtpFrameObject* rtp_frame = - static_cast(frame.get()); - last_seq_num_for_pic_id_[rtp_frame->id.picture_id] = - rtp_frame->last_seq_num(); +// RTC_RUN_ON(packet_sequence_checker_) +void RtpVideoStreamReceiver2::OnCompleteFrames( + RtpFrameReferenceFinder::ReturnVector frames) { + for (auto& frame : frames) { + RtpFrameObject* rtp_frame = static_cast(frame.get()); + last_seq_num_for_pic_id_[rtp_frame->Id()] = rtp_frame->last_seq_num(); + + last_completed_picture_id_ = + std::max(last_completed_picture_id_, frame->Id()); + complete_frame_callback_->OnCompleteFrame(std::move(frame)); } - last_completed_picture_id_ = - std::max(last_completed_picture_id_, frame->id.picture_id); - complete_frame_callback_->OnCompleteFrame(std::move(frame)); } void RtpVideoStreamReceiver2::OnDecryptedFrame( - std::unique_ptr frame) { - rtc::CritScope lock(&reference_finder_lock_); - reference_finder_->ManageFrame(std::move(frame)); + std::unique_ptr frame) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } void RtpVideoStreamReceiver2::OnDecryptionStatusChange( FrameDecryptorInterface::Status status) { - frames_decryptable_.store( + RTC_DCHECK_RUN_ON(&worker_task_checker_); + // Called from BufferedFrameDecryptor::DecryptFrame. + frames_decryptable_ = (status == FrameDecryptorInterface::Status::kOk) || - (status == FrameDecryptorInterface::Status::kRecoverable)); + (status == FrameDecryptorInterface::Status::kRecoverable); } void RtpVideoStreamReceiver2::SetFrameDecryptor( rtc::scoped_refptr frame_decryptor) { - RTC_DCHECK_RUN_ON(&network_tc_); + // TODO(bugs.webrtc.org/11993): Update callers or post the operation over to + // the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (buffered_frame_decryptor_ == nullptr) { buffered_frame_decryptor_ = std::make_unique(this, this); @@ -894,55 +895,47 @@ void RtpVideoStreamReceiver2::SetFrameDecryptor( void RtpVideoStreamReceiver2::SetDepacketizerToDecoderFrameTransformer( rtc::scoped_refptr frame_transformer) { - RTC_DCHECK_RUN_ON(&network_tc_); + RTC_DCHECK_RUN_ON(&worker_task_checker_); frame_transformer_delegate_ = - new rtc::RefCountedObject( + rtc::make_ref_counted( this, std::move(frame_transformer), rtc::Thread::Current(), config_.rtp.remote_ssrc); frame_transformer_delegate_->Init(); } void RtpVideoStreamReceiver2::UpdateRtt(int64_t max_rtt_ms) { + RTC_DCHECK_RUN_ON(&worker_task_checker_); if (nack_module_) nack_module_->UpdateRtt(max_rtt_ms); } absl::optional RtpVideoStreamReceiver2::LastReceivedPacketMs() const { - return packet_buffer_.LastReceivedPacketMs(); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + if (last_received_rtp_system_time_) { + return absl::optional(last_received_rtp_system_time_->ms()); + } + return absl::nullopt; } absl::optional RtpVideoStreamReceiver2::LastReceivedKeyframePacketMs() const { - return packet_buffer_.LastReceivedKeyframePacketMs(); -} - -void RtpVideoStreamReceiver2::AddSecondarySink(RtpPacketSinkInterface* sink) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - RTC_DCHECK(!absl::c_linear_search(secondary_sinks_, sink)); - secondary_sinks_.push_back(sink); -} - -void RtpVideoStreamReceiver2::RemoveSecondarySink( - const RtpPacketSinkInterface* sink) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); - auto it = absl::c_find(secondary_sinks_, sink); - if (it == secondary_sinks_.end()) { - // We might be rolling-back a call whose setup failed mid-way. In such a - // case, it's simpler to remove "everything" rather than remember what - // has already been added. - RTC_LOG(LS_WARNING) << "Removal of unknown sink."; - return; + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + if (last_received_keyframe_rtp_system_time_) { + return absl::optional( + last_received_keyframe_rtp_system_time_->ms()); } - secondary_sinks_.erase(it); + return absl::nullopt; } void RtpVideoStreamReceiver2::ManageFrame( - std::unique_ptr frame) { - rtc::CritScope lock(&reference_finder_lock_); - reference_finder_->ManageFrame(std::move(frame)); + std::unique_ptr frame) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + OnCompleteFrames(reference_finder_->ManageFrame(std::move(frame))); } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) { + RTC_DCHECK_RUN_ON(&worker_task_checker_); if (packet.payload_size() == 0) { // Padding or keep-alive packet. // TODO(nisse): Could drop empty packets earlier, but need to figure out how @@ -970,9 +963,9 @@ void RtpVideoStreamReceiver2::ReceivePacket(const RtpPacketReceived& packet) { parsed_payload->video_header); } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader( const RtpPacketReceived& packet) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); if (packet.PayloadType() == config_.rtp.red_payload_type && packet.payload_size() > 0) { if (packet.payload()[0] == config_.rtp.ulpfec_payload_type) { @@ -991,11 +984,12 @@ void RtpVideoStreamReceiver2::ParseAndHandleEncapsulatingHeader( // In the case of a video stream without picture ids and no rtx the // RtpFrameReferenceFinder will need to know about padding to // correctly calculate frame references. +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) { - { - rtc::CritScope lock(&reference_finder_lock_); - reference_finder_->PaddingReceived(seq_num); - } + RTC_DCHECK_RUN_ON(&worker_task_checker_); + + OnCompleteFrames(reference_finder_->PaddingReceived(seq_num)); + OnInsertedPacket(packet_buffer_.InsertPadding(seq_num)); if (nack_module_) { nack_module_->OnReceivedPacket(seq_num, /* is_keyframe = */ false, @@ -1010,7 +1004,7 @@ void RtpVideoStreamReceiver2::NotifyReceiverOfEmptyPacket(uint16_t seq_num) { bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length) { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (!receiving_) { return false; @@ -1043,7 +1037,7 @@ bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet, absl::optional remote_to_local_clock_offset_ms = ntp_estimator_.EstimateRemoteToLocalClockOffsetMs(); if (remote_to_local_clock_offset_ms.has_value()) { - absolute_capture_time_receiver_.SetRemoteToLocalClockOffset( + capture_clock_offset_updater_.SetRemoteToLocalClockOffset( Int64MsToQ32x32(*remote_to_local_clock_offset_ms)); } } @@ -1052,50 +1046,50 @@ bool RtpVideoStreamReceiver2::DeliverRtcp(const uint8_t* rtcp_packet, } void RtpVideoStreamReceiver2::FrameContinuous(int64_t picture_id) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); if (!nack_module_) return; int seq_num = -1; - { - rtc::CritScope lock(&last_seq_num_cs_); - auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); - if (seq_num_it != last_seq_num_for_pic_id_.end()) - seq_num = seq_num_it->second; - } + auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); + if (seq_num_it != last_seq_num_for_pic_id_.end()) + seq_num = seq_num_it->second; if (seq_num != -1) nack_module_->ClearUpTo(seq_num); } void RtpVideoStreamReceiver2::FrameDecoded(int64_t picture_id) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); int seq_num = -1; - { - rtc::CritScope lock(&last_seq_num_cs_); - auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); - if (seq_num_it != last_seq_num_for_pic_id_.end()) { - seq_num = seq_num_it->second; - last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(), - ++seq_num_it); - } + auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id); + if (seq_num_it != last_seq_num_for_pic_id_.end()) { + seq_num = seq_num_it->second; + last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(), + ++seq_num_it); } + if (seq_num != -1) { + int64_t unwrapped_rtp_seq_num = rtp_seq_num_unwrapper_.Unwrap(seq_num); + packet_infos_.erase(packet_infos_.begin(), + packet_infos_.upper_bound(unwrapped_rtp_seq_num)); packet_buffer_.ClearTo(seq_num); - rtc::CritScope lock(&reference_finder_lock_); reference_finder_->ClearTo(seq_num); } } void RtpVideoStreamReceiver2::SignalNetworkState(NetworkState state) { + RTC_DCHECK_RUN_ON(&worker_task_checker_); rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode : RtcpMode::kOff); } void RtpVideoStreamReceiver2::StartReceive() { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); receiving_ = true; } void RtpVideoStreamReceiver2::StopReceive() { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); receiving_ = false; } @@ -1126,7 +1120,10 @@ void RtpVideoStreamReceiver2::UpdateHistograms() { } } +// RTC_RUN_ON(packet_sequence_checker_) void RtpVideoStreamReceiver2::InsertSpsPpsIntoTracker(uint8_t payload_type) { + RTC_DCHECK_RUN_ON(&worker_task_checker_); + auto codec_params_it = pt_codec_params_.find(payload_type); if (codec_params_it == pt_codec_params_.end()) return; @@ -1149,4 +1146,37 @@ void RtpVideoStreamReceiver2::InsertSpsPpsIntoTracker(uint8_t payload_type) { sprop_decoder.pps_nalu()); } +void RtpVideoStreamReceiver2::UpdatePacketReceiveTimestamps( + const RtpPacketReceived& packet, + bool is_keyframe) { + Timestamp now = clock_->CurrentTime(); + if (is_keyframe || + last_received_keyframe_rtp_timestamp_ == packet.Timestamp()) { + last_received_keyframe_rtp_timestamp_ = packet.Timestamp(); + last_received_keyframe_rtp_system_time_ = now; + } + last_received_rtp_system_time_ = now; + last_received_rtp_timestamp_ = packet.Timestamp(); + + // Periodically log the RTP header of incoming packets. + if (now.ms() - last_packet_log_ms_ > kPacketLogIntervalMs) { + rtc::StringBuilder ss; + ss << "Packet received on SSRC: " << packet.Ssrc() + << " with payload type: " << static_cast(packet.PayloadType()) + << ", timestamp: " << packet.Timestamp() + << ", sequence number: " << packet.SequenceNumber() + << ", arrival time: " << ToString(packet.arrival_time()); + int32_t time_offset; + if (packet.GetExtension(&time_offset)) { + ss << ", toffset: " << time_offset; + } + uint32_t send_time; + if (packet.GetExtension(&send_time)) { + ss << ", abs send time: " << send_time; + } + RTC_LOG(LS_INFO) << ss.str(); + last_packet_log_ms_ = now.ms(); + } +} + } // namespace webrtc diff --git a/video/rtp_video_stream_receiver2.h b/video/rtp_video_stream_receiver2.h index 3026e1dac3..ddff26b3bd 100644 --- a/video/rtp_video_stream_receiver2.h +++ b/video/rtp_video_stream_receiver2.h @@ -11,16 +11,15 @@ #ifndef VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_ #define VIDEO_RTP_VIDEO_STREAM_RECEIVER2_H_ -#include -#include #include #include #include #include #include "absl/types/optional.h" -#include "api/array_view.h" #include "api/crypto/frame_decryptor_interface.h" +#include "api/sequence_checker.h" +#include "api/units/timestamp.h" #include "api/video/color_space.h" #include "api/video_codecs/video_codec.h" #include "call/rtp_packet_sink_interface.h" @@ -29,11 +28,13 @@ #include "modules/rtp_rtcp/include/receive_statistics.h" #include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/rtp_rtcp/source/absolute_capture_time_receiver.h" +#include "modules/rtp_rtcp/source/absolute_capture_time_interpolator.h" +#include "modules/rtp_rtcp/source/capture_clock_offset_updater.h" #include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h" #include "modules/rtp_rtcp/source/rtp_packet_received.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_interface.h" #include "modules/rtp_rtcp/source/rtp_video_header.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer.h" #include "modules/video_coding/h264_sps_pps_tracker.h" @@ -42,12 +43,10 @@ #include "modules/video_coding/rtp_frame_reference_finder.h" #include "modules/video_coding/unique_timestamp_counter.h" #include "rtc_base/constructor_magic.h" -#include "rtc_base/critical_section.h" #include "rtc_base/experiments/field_trial_parser.h" #include "rtc_base/numerics/sequence_number_util.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/thread_checker.h" #include "video/buffered_frame_decryptor.h" #include "video/rtp_video_stream_receiver_frame_transformer_delegate.h" @@ -55,7 +54,6 @@ namespace webrtc { class NackModule2; class PacketRouter; -class ProcessThread; class ReceiveStatistics; class RtcpRttStats; class RtpPacketReceived; @@ -66,12 +64,20 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, public RecoveredPacketReceiver, public RtpPacketSinkInterface, public KeyFrameRequestSender, - public video_coding::OnCompleteFrameCallback, public OnDecryptedFrameCallback, public OnDecryptionStatusChangeCallback, public RtpVideoFrameReceiver { public: + // A complete frame is a frame which has received all its packets and all its + // references are known. + class OnCompleteFrameCallback { + public: + virtual ~OnCompleteFrameCallback() {} + virtual void OnCompleteFrame(std::unique_ptr frame) = 0; + }; + RtpVideoStreamReceiver2( + TaskQueueBase* current_queue, Clock* clock, Transport* transport, RtcpRttStats* rtt_stats, @@ -83,17 +89,17 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, ReceiveStatistics* rtp_receive_statistics, RtcpPacketTypeCounterObserver* rtcp_packet_type_counter_observer, RtcpCnameCallback* rtcp_cname_callback, - ProcessThread* process_thread, NackSender* nack_sender, // The KeyFrameRequestSender is optional; if not provided, key frame // requests are sent via the internal RtpRtcp module. KeyFrameRequestSender* keyframe_request_sender, - video_coding::OnCompleteFrameCallback* complete_frame_callback, + OnCompleteFrameCallback* complete_frame_callback, rtc::scoped_refptr frame_decryptor, rtc::scoped_refptr frame_transformer); ~RtpVideoStreamReceiver2() override; - void AddReceiveCodec(const VideoCodec& video_codec, + void AddReceiveCodec(uint8_t payload_type, + const VideoCodec& video_codec, const std::map& codec_params, bool raw_payload); @@ -113,15 +119,14 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Returns number of different frames seen. int GetUniqueFramesSeen() const { - RTC_DCHECK_RUN_ON(&worker_task_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); return frame_counter_.GetUniqueSeen(); } // Implements RtpPacketSinkInterface. void OnRtpPacket(const RtpPacketReceived& packet) override; - // TODO(philipel): Stop using VCMPacket in the new jitter buffer and then - // remove this function. Public only for tests. + // Public only for tests. void OnReceivedPayloadData(rtc::CopyOnWriteBuffer codec_payload, const RtpPacketReceived& rtp_packet, const RTPVideoHeader& video); @@ -146,16 +151,14 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Decryption not SRTP. bool IsDecryptable() const; - // Don't use, still experimental. + // Request packet retransmits via NACK. Called via + // VideoReceiveStream2::SendNack, which gets called when + // RtpVideoStreamReceiver2::RtcpFeedbackBuffer's SendNack and + // SendBufferedRtcpFeedback methods (see `rtcp_feedback_buffer_` below). void RequestPacketRetransmit(const std::vector& sequence_numbers); - // Implements OnCompleteFrameCallback. - void OnCompleteFrame( - std::unique_ptr frame) override; - // Implements OnDecryptedFrameCallback. - void OnDecryptedFrame( - std::unique_ptr frame) override; + void OnDecryptedFrame(std::unique_ptr frame) override; // Implements OnDecryptionStatusChangeCallback. void OnDecryptionStatusChange( @@ -177,17 +180,12 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, absl::optional LastReceivedPacketMs() const; absl::optional LastReceivedKeyframePacketMs() const; - // RtpDemuxer only forwards a given RTP packet to one sink. However, some - // sinks, such as FlexFEC, might wish to be informed of all of the packets - // a given sink receives (or any set of sinks). They may do so by registering - // themselves as secondary sinks. - void AddSecondarySink(RtpPacketSinkInterface* sink); - void RemoveSecondarySink(const RtpPacketSinkInterface* sink); - private: // Implements RtpVideoFrameReceiver. - void ManageFrame( - std::unique_ptr frame) override; + void ManageFrame(std::unique_ptr frame) override; + + void OnCompleteFrames(RtpFrameReferenceFinder::ReturnVector frame) + RTC_RUN_ON(packet_sequence_checker_); // Used for buffering RTCP feedback messages and sending them all together. // Note: @@ -206,21 +204,20 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, ~RtcpFeedbackBuffer() override = default; // KeyFrameRequestSender implementation. - void RequestKeyFrame() RTC_LOCKS_EXCLUDED(cs_) override; + void RequestKeyFrame() override; // NackSender implementation. void SendNack(const std::vector& sequence_numbers, - bool buffering_allowed) RTC_LOCKS_EXCLUDED(cs_) override; + bool buffering_allowed) override; // LossNotificationSender implementation. void SendLossNotification(uint16_t last_decoded_seq_num, uint16_t last_received_seq_num, bool decodability_flag, - bool buffering_allowed) - RTC_LOCKS_EXCLUDED(cs_) override; + bool buffering_allowed) override; // Send all RTCP feedback messages buffered thus far. - void SendBufferedRtcpFeedback() RTC_LOCKS_EXCLUDED(cs_); + void SendBufferedRtcpFeedback(); private: // LNTF-related state. @@ -236,32 +233,21 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, uint16_t last_received_seq_num; bool decodability_flag; }; - struct ConsumedRtcpFeedback { - bool request_key_frame = false; - std::vector nack_sequence_numbers; - absl::optional lntf_state; - }; - - ConsumedRtcpFeedback ConsumeRtcpFeedback() RTC_LOCKS_EXCLUDED(cs_); - ConsumedRtcpFeedback ConsumeRtcpFeedbackLocked() - RTC_EXCLUSIVE_LOCKS_REQUIRED(cs_); - // This method is called both with and without cs_ held. - void SendRtcpFeedback(ConsumedRtcpFeedback feedback); + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; KeyFrameRequestSender* const key_frame_request_sender_; NackSender* const nack_sender_; LossNotificationSender* const loss_notification_sender_; - // NACKs are accessible from two threads due to nack_module_ being a module. - rtc::CriticalSection cs_; - // Key-frame-request-related state. - bool request_key_frame_ RTC_GUARDED_BY(cs_); + bool request_key_frame_ RTC_GUARDED_BY(packet_sequence_checker_); // NACK-related state. - std::vector nack_sequence_numbers_ RTC_GUARDED_BY(cs_); + std::vector nack_sequence_numbers_ + RTC_GUARDED_BY(packet_sequence_checker_); - absl::optional lntf_state_ RTC_GUARDED_BY(cs_); + absl::optional lntf_state_ + RTC_GUARDED_BY(packet_sequence_checker_); }; enum ParseGenericDependenciesResult { kDropPacket, @@ -271,25 +257,34 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, // Entry point doing non-stats work for a received packet. Called // for the same packet both before and after RED decapsulation. - void ReceivePacket(const RtpPacketReceived& packet); + void ReceivePacket(const RtpPacketReceived& packet) + RTC_RUN_ON(packet_sequence_checker_); + // Parses and handles RED headers. // This function assumes that it's being called from only one thread. - void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet); - void NotifyReceiverOfEmptyPacket(uint16_t seq_num); + void ParseAndHandleEncapsulatingHeader(const RtpPacketReceived& packet) + RTC_RUN_ON(packet_sequence_checker_); + void NotifyReceiverOfEmptyPacket(uint16_t seq_num) + RTC_RUN_ON(packet_sequence_checker_); void UpdateHistograms(); bool IsRedEnabled() const; - void InsertSpsPpsIntoTracker(uint8_t payload_type); - void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result); + void InsertSpsPpsIntoTracker(uint8_t payload_type) + RTC_RUN_ON(packet_sequence_checker_); + void OnInsertedPacket(video_coding::PacketBuffer::InsertResult result) + RTC_RUN_ON(packet_sequence_checker_); ParseGenericDependenciesResult ParseGenericDependenciesExtension( const RtpPacketReceived& rtp_packet, - RTPVideoHeader* video_header) RTC_RUN_ON(worker_task_checker_); - void OnAssembledFrame(std::unique_ptr frame); + RTPVideoHeader* video_header) RTC_RUN_ON(packet_sequence_checker_); + void OnAssembledFrame(std::unique_ptr frame) + RTC_RUN_ON(packet_sequence_checker_); + void UpdatePacketReceiveTimestamps(const RtpPacketReceived& packet, + bool is_keyframe) + RTC_RUN_ON(packet_sequence_checker_); Clock* const clock_; // Ownership of this object lies with VideoReceiveStream, which owns |this|. const VideoReceiveStream::Config& config_; PacketRouter* const packet_router_; - ProcessThread* const process_thread_; RemoteNtpTimeEstimator ntp_estimator_; @@ -301,84 +296,100 @@ class RtpVideoStreamReceiver2 : public LossNotificationSender, ReceiveStatistics* const rtp_receive_statistics_; std::unique_ptr ulpfec_receiver_; - SequenceChecker worker_task_checker_; - bool receiving_ RTC_GUARDED_BY(worker_task_checker_); - int64_t last_packet_log_ms_ RTC_GUARDED_BY(worker_task_checker_); - - const std::unique_ptr rtp_rtcp_; - - video_coding::OnCompleteFrameCallback* complete_frame_callback_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_task_checker_; + // TODO(bugs.webrtc.org/11993): This checker conceptually represents + // operations that belong to the network thread. The Call class is currently + // moving towards handling network packets on the network thread and while + // that work is ongoing, this checker may in practice represent the worker + // thread, but still serves as a mechanism of grouping together concepts + // that belong to the network thread. Once the packets are fully delivered + // on the network thread, this comment will be deleted. + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; + bool receiving_ RTC_GUARDED_BY(packet_sequence_checker_); + int64_t last_packet_log_ms_ RTC_GUARDED_BY(packet_sequence_checker_); + + const std::unique_ptr rtp_rtcp_; + + OnCompleteFrameCallback* complete_frame_callback_; KeyFrameRequestSender* const keyframe_request_sender_; RtcpFeedbackBuffer rtcp_feedback_buffer_; - std::unique_ptr nack_module_; + const std::unique_ptr nack_module_; std::unique_ptr loss_notification_controller_; - video_coding::PacketBuffer packet_buffer_; - UniqueTimestampCounter frame_counter_ RTC_GUARDED_BY(worker_task_checker_); + video_coding::PacketBuffer packet_buffer_ + RTC_GUARDED_BY(packet_sequence_checker_); + UniqueTimestampCounter frame_counter_ + RTC_GUARDED_BY(packet_sequence_checker_); SeqNumUnwrapper frame_id_unwrapper_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); // Video structure provided in the dependency descriptor in a first packet // of a key frame. It is required to parse dependency descriptor in the // following delta packets. std::unique_ptr video_structure_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); // Frame id of the last frame with the attached video structure. // absl::nullopt when `video_structure_ == nullptr`; absl::optional video_structure_frame_id_ - RTC_GUARDED_BY(worker_task_checker_); + RTC_GUARDED_BY(packet_sequence_checker_); - rtc::CriticalSection reference_finder_lock_; - std::unique_ptr reference_finder_ - RTC_GUARDED_BY(reference_finder_lock_); - absl::optional current_codec_; - uint32_t last_assembled_frame_rtp_timestamp_; + std::unique_ptr reference_finder_ + RTC_GUARDED_BY(packet_sequence_checker_); + absl::optional current_codec_ + RTC_GUARDED_BY(packet_sequence_checker_); + uint32_t last_assembled_frame_rtp_timestamp_ + RTC_GUARDED_BY(packet_sequence_checker_); - rtc::CriticalSection last_seq_num_cs_; std::map last_seq_num_for_pic_id_ - RTC_GUARDED_BY(last_seq_num_cs_); - video_coding::H264SpsPpsTracker tracker_; + RTC_GUARDED_BY(packet_sequence_checker_); + video_coding::H264SpsPpsTracker tracker_ + RTC_GUARDED_BY(packet_sequence_checker_); // Maps payload id to the depacketizer. - std::map> payload_type_map_; + std::map> payload_type_map_ + RTC_GUARDED_BY(packet_sequence_checker_); // TODO(johan): Remove pt_codec_params_ once // https://bugs.chromium.org/p/webrtc/issues/detail?id=6883 is resolved. // Maps a payload type to a map of out-of-band supplied codec parameters. - std::map> pt_codec_params_; - int16_t last_payload_type_ = -1; + std::map> pt_codec_params_ + RTC_GUARDED_BY(packet_sequence_checker_); + int16_t last_payload_type_ RTC_GUARDED_BY(packet_sequence_checker_) = -1; - bool has_received_frame_; + bool has_received_frame_ RTC_GUARDED_BY(packet_sequence_checker_); - std::vector secondary_sinks_ - RTC_GUARDED_BY(worker_task_checker_); - - // Info for GetSyncInfo is updated on network or worker thread, and queried on - // the worker thread. - rtc::CriticalSection sync_info_lock_; absl::optional last_received_rtp_timestamp_ - RTC_GUARDED_BY(sync_info_lock_); - absl::optional last_received_rtp_system_time_ms_ - RTC_GUARDED_BY(sync_info_lock_); + RTC_GUARDED_BY(packet_sequence_checker_); + absl::optional last_received_keyframe_rtp_timestamp_ + RTC_GUARDED_BY(packet_sequence_checker_); + absl::optional last_received_rtp_system_time_ + RTC_GUARDED_BY(packet_sequence_checker_); + absl::optional last_received_keyframe_rtp_system_time_ + RTC_GUARDED_BY(packet_sequence_checker_); - // Used to validate the buffered frame decryptor is always run on the correct - // thread. - rtc::ThreadChecker network_tc_; // Handles incoming encrypted frames and forwards them to the // rtp_reference_finder if they are decryptable. std::unique_ptr buffered_frame_decryptor_ - RTC_PT_GUARDED_BY(network_tc_); - std::atomic frames_decryptable_; + RTC_PT_GUARDED_BY(packet_sequence_checker_); + bool frames_decryptable_ RTC_GUARDED_BY(worker_task_checker_); absl::optional last_color_space_; - AbsoluteCaptureTimeReceiver absolute_capture_time_receiver_ - RTC_GUARDED_BY(worker_task_checker_); + AbsoluteCaptureTimeInterpolator absolute_capture_time_interpolator_ + RTC_GUARDED_BY(packet_sequence_checker_); + + CaptureClockOffsetUpdater capture_clock_offset_updater_ + RTC_GUARDED_BY(packet_sequence_checker_); int64_t last_completed_picture_id_ = 0; rtc::scoped_refptr frame_transformer_delegate_; + + SeqNumUnwrapper rtp_seq_num_unwrapper_ + RTC_GUARDED_BY(packet_sequence_checker_); + std::map packet_infos_ + RTC_GUARDED_BY(packet_sequence_checker_); }; } // namespace webrtc diff --git a/video/rtp_video_stream_receiver2_unittest.cc b/video/rtp_video_stream_receiver2_unittest.cc index c8584fcd55..7ccf0a5faa 100644 --- a/video/rtp_video_stream_receiver2_unittest.cc +++ b/video/rtp_video_stream_receiver2_unittest.cc @@ -13,6 +13,7 @@ #include #include +#include "api/task_queue/task_queue_base.h" #include "api/video/video_codec_type.h" #include "api/video/video_frame_type.h" #include "common_video/h264/h264_common.h" @@ -37,6 +38,8 @@ #include "test/gmock.h" #include "test/gtest.h" #include "test/mock_frame_transformer.h" +#include "test/time_controller/simulated_task_queue.h" +#include "test/time_controller/simulated_time_controller.h" using ::testing::_; using ::testing::ElementsAre; @@ -50,8 +53,7 @@ namespace { const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01}; -std::vector GetAbsoluteCaptureTimestamps( - const video_coding::EncodedFrame* frame) { +std::vector GetAbsoluteCaptureTimestamps(const EncodedFrame* frame) { std::vector result; for (const auto& packet_info : frame->PacketInfos()) { if (packet_info.absolute_capture_time()) { @@ -95,23 +97,13 @@ class MockKeyFrameRequestSender : public KeyFrameRequestSender { }; class MockOnCompleteFrameCallback - : public video_coding::OnCompleteFrameCallback { + : public RtpVideoStreamReceiver2::OnCompleteFrameCallback { public: - MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ()); - MOCK_METHOD(void, - DoOnCompleteFrameFailNullptr, - (video_coding::EncodedFrame*), - ()); - MOCK_METHOD(void, - DoOnCompleteFrameFailLength, - (video_coding::EncodedFrame*), - ()); - MOCK_METHOD(void, - DoOnCompleteFrameFailBitstream, - (video_coding::EncodedFrame*), - ()); - void OnCompleteFrame( - std::unique_ptr frame) override { + MOCK_METHOD(void, DoOnCompleteFrame, (EncodedFrame*), ()); + MOCK_METHOD(void, DoOnCompleteFrameFailNullptr, (EncodedFrame*), ()); + MOCK_METHOD(void, DoOnCompleteFrameFailLength, (EncodedFrame*), ()); + MOCK_METHOD(void, DoOnCompleteFrameFailBitstream, (EncodedFrame*), ()); + void OnCompleteFrame(std::unique_ptr frame) override { if (!frame) { DoOnCompleteFrameFailNullptr(nullptr); return; @@ -144,11 +136,11 @@ class MockRtpPacketSink : public RtpPacketSinkInterface { }; constexpr uint32_t kSsrc = 111; -constexpr uint16_t kSequenceNumber = 222; constexpr int kPayloadType = 100; constexpr int kRedPayloadType = 125; std::unique_ptr CreateRtpPacketReceived() { + constexpr uint16_t kSequenceNumber = 222; auto packet = std::make_unique(); packet->SetSsrc(kSsrc); packet->SetSequenceNumber(kSequenceNumber); @@ -163,24 +155,28 @@ MATCHER_P(SamePacketAs, other, "") { } // namespace -class RtpVideoStreamReceiver2Test : public ::testing::Test { +class RtpVideoStreamReceiver2Test : public ::testing::Test, + public RtpPacketSinkInterface { public: RtpVideoStreamReceiver2Test() : RtpVideoStreamReceiver2Test("") {} explicit RtpVideoStreamReceiver2Test(std::string field_trials) - : override_field_trials_(field_trials), - config_(CreateConfig()), - process_thread_(ProcessThread::Create("TestThread")) { + : time_controller_(Timestamp::Millis(100)), + task_queue_(time_controller_.GetTaskQueueFactory()->CreateTaskQueue( + "RtpVideoStreamReceiver2Test", + TaskQueueFactory::Priority::NORMAL)), + task_queue_setter_(task_queue_.get()), + override_field_trials_(field_trials), + config_(CreateConfig()) { rtp_receive_statistics_ = ReceiveStatistics::Create(Clock::GetRealTimeClock()); rtp_video_stream_receiver_ = std::make_unique( - Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_, - rtp_receive_statistics_.get(), nullptr, nullptr, process_thread_.get(), - &mock_nack_sender_, &mock_key_frame_request_sender_, + TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_, + nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr, + nullptr, &mock_nack_sender_, &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_, nullptr, nullptr); VideoCodec codec; - codec.plType = kPayloadType; codec.codecType = kVideoCodecGeneric; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {}, /*raw_payload=*/false); } @@ -200,7 +196,7 @@ class RtpVideoStreamReceiver2Test : public ::testing::Test { info.type = H264::NaluType::kSps; info.sps_id = sps_id; info.pps_id = -1; - data->AppendData({H264::NaluType::kSps, sps_id}); + data->AppendData({H264::NaluType::kSps, sps_id}); auto& h264 = absl::get(video_header->video_type_header); h264.nalus[h264.nalus_length++] = info; } @@ -213,7 +209,7 @@ class RtpVideoStreamReceiver2Test : public ::testing::Test { info.type = H264::NaluType::kPps; info.sps_id = sps_id; info.pps_id = pps_id; - data->AppendData({H264::NaluType::kPps, pps_id}); + data->AppendData({H264::NaluType::kPps, pps_id}); auto& h264 = absl::get(video_header->video_type_header); h264.nalus[h264.nalus_length++] = info; } @@ -227,24 +223,34 @@ class RtpVideoStreamReceiver2Test : public ::testing::Test { h264.nalus[h264.nalus_length++] = info; } + void OnRtpPacket(const RtpPacketReceived& packet) override { + if (test_packet_sink_) + test_packet_sink_->OnRtpPacket(packet); + } + protected: - static VideoReceiveStream::Config CreateConfig() { + VideoReceiveStream::Config CreateConfig() { VideoReceiveStream::Config config(nullptr); config.rtp.remote_ssrc = 1111; config.rtp.local_ssrc = 2222; config.rtp.red_payload_type = kRedPayloadType; + config.rtp.packet_sink_ = this; return config; } + GlobalSimulatedTimeController time_controller_; + std::unique_ptr task_queue_; + TokenTaskQueue::CurrentTaskQueueSetter task_queue_setter_; + const webrtc::test::ScopedFieldTrials override_field_trials_; VideoReceiveStream::Config config_; MockNackSender mock_nack_sender_; MockKeyFrameRequestSender mock_key_frame_request_sender_; MockTransport mock_transport_; MockOnCompleteFrameCallback mock_on_complete_frame_callback_; - std::unique_ptr process_thread_; std::unique_ptr rtp_receive_statistics_; std::unique_ptr rtp_video_stream_receiver_; + RtpPacketSinkInterface* test_packet_sink_ = nullptr; }; TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) { @@ -312,10 +318,10 @@ TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) { // Prepare the receiver for VP9. VideoCodec codec; - codec.plType = kVp9PayloadType; codec.codecType = kVideoCodecVP9; std::map codec_params; - rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params, + rtp_video_stream_receiver_->AddReceiveCodec(kVp9PayloadType, codec, + codec_params, /*raw_payload=*/false); // Generate key frame packets. @@ -340,7 +346,7 @@ TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) { EXPECT_TRUE(key_frame_packet2.GetExtension()); rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) { ASSERT_TRUE(frame->EncodedImage().ColorSpace()); EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace); })); @@ -356,7 +362,7 @@ TEST_F(RtpVideoStreamReceiver2Test, CacheColorSpaceFromLastPacketOfKeyframe) { // included in the RTP packet. EXPECT_FALSE(delta_frame_packet.GetExtension()); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) { ASSERT_TRUE(frame->EncodedImage().ColorSpace()); EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace); })); @@ -398,11 +404,10 @@ TEST_F(RtpVideoStreamReceiver2Test, PacketInfoIsPropagatedIntoVideoFrames) { mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(), data.size()); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke( - [kAbsoluteCaptureTimestamp](video_coding::EncodedFrame* frame) { - EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), - ElementsAre(kAbsoluteCaptureTimestamp)); - })); + .WillOnce(Invoke([kAbsoluteCaptureTimestamp](EncodedFrame* frame) { + EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), + ElementsAre(kAbsoluteCaptureTimestamp)); + })); rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet, video_header); } @@ -446,7 +451,7 @@ TEST_F(RtpVideoStreamReceiver2Test, // Expect rtp video stream receiver to extrapolate it for the resulting video // frame using absolute capture time from the previous packet. EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([](EncodedFrame* frame) { EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1)); })); rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet, @@ -519,13 +524,7 @@ INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe, RtpVideoStreamReceiver2TestH264, Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/")); -// Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376. -#if defined(MEMORY_SANITIZER) -#define MAYBE_InBandSpsPps DISABLED_InBandSpsPps -#else -#define MAYBE_InBandSpsPps InBandSpsPps -#endif -TEST_P(RtpVideoStreamReceiver2TestH264, MAYBE_InBandSpsPps) { +TEST_P(RtpVideoStreamReceiver2TestH264, InBandSpsPps) { rtc::CopyOnWriteBuffer sps_data; RtpPacketReceived rtp_packet; RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader(); @@ -575,13 +574,12 @@ TEST_P(RtpVideoStreamReceiver2TestH264, MAYBE_InBandSpsPps) { TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) { constexpr int kPayloadType = 99; VideoCodec codec; - codec.plType = kPayloadType; std::map codec_params; // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2 // . codec_params.insert( {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="}); - rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params, + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, codec_params, /*raw_payload=*/false); const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96, 0x53, 0x05, 0x89, 0x88}; @@ -614,6 +612,75 @@ TEST_P(RtpVideoStreamReceiver2TestH264, OutOfBandFmtpSpsPps) { video_header); } +TEST_P(RtpVideoStreamReceiver2TestH264, ForceSpsPpsIdrIsKeyframe) { + constexpr int kPayloadType = 99; + VideoCodec codec; + std::map codec_params; + if (GetParam() == + "") { // Forcing can be done either with field trial or codec_params. + codec_params.insert({cricket::kH264FmtpSpsPpsIdrInKeyframe, ""}); + } + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, codec_params, + /*raw_payload=*/false); + rtc::CopyOnWriteBuffer sps_data; + RtpPacketReceived rtp_packet; + RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader(); + AddSps(&sps_video_header, 0, &sps_data); + rtp_packet.SetSequenceNumber(0); + rtp_packet.SetPayloadType(kPayloadType); + sps_video_header.is_first_packet_in_frame = true; + sps_video_header.frame_type = VideoFrameType::kEmptyFrame; + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(), + sps_data.size()); + rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet, + sps_video_header); + + rtc::CopyOnWriteBuffer pps_data; + RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader(); + AddPps(&pps_video_header, 0, 1, &pps_data); + rtp_packet.SetSequenceNumber(1); + pps_video_header.is_first_packet_in_frame = true; + pps_video_header.frame_type = VideoFrameType::kEmptyFrame; + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(), + pps_data.size()); + rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet, + pps_video_header); + + rtc::CopyOnWriteBuffer idr_data; + RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader(); + AddIdr(&idr_video_header, 1); + rtp_packet.SetSequenceNumber(2); + idr_video_header.is_first_packet_in_frame = true; + idr_video_header.is_last_packet_in_frame = true; + idr_video_header.frame_type = VideoFrameType::kVideoFrameKey; + const uint8_t idr[] = {0x65, 1, 2, 3}; + idr_data.AppendData(idr); + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(), + idr_data.size()); + EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_TRUE(frame->is_keyframe()); }); + rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet, + idr_video_header); + mock_on_complete_frame_callback_.ClearExpectedBitstream(); + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(), + idr_data.size()); + rtp_packet.SetSequenceNumber(3); + EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_FALSE(frame->is_keyframe()); }); + rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet, + idr_video_header); +} + TEST_F(RtpVideoStreamReceiver2Test, PaddingInMediaStream) { RtpPacketReceived rtp_packet; RTPVideoHeader video_header = GetDefaultH264VideoHeader(); @@ -687,83 +754,36 @@ TEST_F(RtpVideoStreamReceiver2Test, RequestKeyframeWhenPacketBufferGetsFull) { video_header); } -TEST_F(RtpVideoStreamReceiver2Test, SecondarySinksGetRtpNotifications) { +TEST_F(RtpVideoStreamReceiver2Test, SinkGetsRtpNotifications) { rtp_video_stream_receiver_->StartReceive(); - MockRtpPacketSink secondary_sink_1; - MockRtpPacketSink secondary_sink_2; - - rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_1); - rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink_2); + MockRtpPacketSink test_sink; + test_packet_sink_ = &test_sink; auto rtp_packet = CreateRtpPacketReceived(); - EXPECT_CALL(secondary_sink_1, OnRtpPacket(SamePacketAs(*rtp_packet))); - EXPECT_CALL(secondary_sink_2, OnRtpPacket(SamePacketAs(*rtp_packet))); + EXPECT_CALL(test_sink, OnRtpPacket(SamePacketAs(*rtp_packet))); rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet); // Test tear-down. rtp_video_stream_receiver_->StopReceive(); - rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_1); - rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink_2); + test_packet_sink_ = nullptr; } -TEST_F(RtpVideoStreamReceiver2Test, - RemovedSecondarySinksGetNoRtpNotifications) { - rtp_video_stream_receiver_->StartReceive(); - - MockRtpPacketSink secondary_sink; - - rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink); - rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink); - - auto rtp_packet = CreateRtpPacketReceived(); - - EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0); - - rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet); - - // Test tear-down. - rtp_video_stream_receiver_->StopReceive(); -} - -TEST_F(RtpVideoStreamReceiver2Test, - OnlyRemovedSecondarySinksExcludedFromNotifications) { - rtp_video_stream_receiver_->StartReceive(); - - MockRtpPacketSink kept_secondary_sink; - MockRtpPacketSink removed_secondary_sink; - - rtp_video_stream_receiver_->AddSecondarySink(&kept_secondary_sink); - rtp_video_stream_receiver_->AddSecondarySink(&removed_secondary_sink); - rtp_video_stream_receiver_->RemoveSecondarySink(&removed_secondary_sink); - - auto rtp_packet = CreateRtpPacketReceived(); - EXPECT_CALL(kept_secondary_sink, OnRtpPacket(SamePacketAs(*rtp_packet))); - - rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet); - - // Test tear-down. - rtp_video_stream_receiver_->StopReceive(); - rtp_video_stream_receiver_->RemoveSecondarySink(&kept_secondary_sink); -} - -TEST_F(RtpVideoStreamReceiver2Test, - SecondariesOfNonStartedStreamGetNoNotifications) { +TEST_F(RtpVideoStreamReceiver2Test, NonStartedStreamGetsNoRtpCallbacks) { // Explicitly showing that the stream is not in the |started| state, // regardless of whether streams start out |started| or |stopped|. rtp_video_stream_receiver_->StopReceive(); - MockRtpPacketSink secondary_sink; - rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink); + MockRtpPacketSink test_sink; + test_packet_sink_ = &test_sink; auto rtp_packet = CreateRtpPacketReceived(); - EXPECT_CALL(secondary_sink, OnRtpPacket(_)).Times(0); + EXPECT_CALL(test_sink, OnRtpPacket(_)).Times(0); rtp_video_stream_receiver_->OnRtpPacket(*rtp_packet); - // Test tear-down. - rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink); + test_packet_sink_ = nullptr; } TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) { @@ -798,11 +818,11 @@ TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorOnePacket) { rtp_packet.SetSequenceNumber(1); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) { EXPECT_EQ(frame->num_references, 2U); - EXPECT_EQ(frame->references[0], frame->id.picture_id - 90); - EXPECT_EQ(frame->references[1], frame->id.picture_id - 80); - EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex); + EXPECT_EQ(frame->references[0], frame->Id() - 90); + EXPECT_EQ(frame->references[1], frame->Id() - 80); + EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex); EXPECT_THAT(frame->PacketInfos(), SizeIs(1)); })); @@ -856,9 +876,9 @@ TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorTwoPackets) { data.size() - 1); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) { EXPECT_EQ(frame->num_references, 0U); - EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex); + EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex); EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u); EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u); EXPECT_THAT(frame->PacketInfos(), SizeIs(2)); @@ -872,8 +892,8 @@ TEST_F(RtpVideoStreamReceiver2Test, ParseGenericDescriptorRawPayload) { const int kRawPayloadType = 123; VideoCodec codec; - codec.plType = kRawPayloadType; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true); + rtp_video_stream_receiver_->AddReceiveCodec(kRawPayloadType, codec, {}, + /*raw_payload=*/true); rtp_video_stream_receiver_->StartReceive(); RtpHeaderExtensionMap extension_map; @@ -904,8 +924,8 @@ TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) { const int kPayloadType = 123; VideoCodec codec; - codec.plType = kPayloadType; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true); + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {}, + /*raw_payload=*/true); rtp_video_stream_receiver_->StartReceive(); RtpHeaderExtensionMap extension_map; extension_map.Register(5); @@ -935,14 +955,12 @@ TEST_F(RtpVideoStreamReceiver2Test, UnwrapsFrameId) { int64_t first_picture_id; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - first_picture_id = frame->id.picture_id; - }); + .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); }); inject_packet(/*wrapped_frame_id=*/0xffff); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - EXPECT_EQ(frame->id.picture_id - first_picture_id, 3); + .WillOnce([&](EncodedFrame* frame) { + EXPECT_EQ(frame->Id() - first_picture_id, 3); }); inject_packet(/*wrapped_frame_id=*/0x0002); } @@ -952,8 +970,7 @@ class RtpVideoStreamReceiver2DependencyDescriptorTest public: RtpVideoStreamReceiver2DependencyDescriptorTest() { VideoCodec codec; - codec.plType = payload_type_; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, + rtp_video_stream_receiver_->AddReceiveCodec(payload_type_, codec, {}, /*raw_payload=*/true); extension_map_.Register(7); rtp_video_stream_receiver_->StartReceive(); @@ -965,8 +982,8 @@ class RtpVideoStreamReceiver2DependencyDescriptorTest FrameDependencyStructure stream_structure; stream_structure.num_decode_targets = 1; stream_structure.templates = { - GenericFrameInfo::Builder().Dtis("S").Build(), - GenericFrameInfo::Builder().Dtis("S").Fdiffs({1}).Build(), + FrameDependencyTemplate().Dtis("S"), + FrameDependencyTemplate().Dtis("S").FrameDiffs({1}), }; return stream_structure; } @@ -1007,9 +1024,7 @@ TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) { // keyframe. Thus feed a key frame first, then test reodered delta frames. int64_t first_picture_id; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - first_picture_id = frame->id.picture_id; - }); + .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); }); InjectPacketWith(stream_structure, keyframe_descriptor); DependencyDescriptor deltaframe1_descriptor; @@ -1023,13 +1038,13 @@ TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, UnwrapsFrameId) { // Parser should unwrap frame ids correctly even if packets were reordered by // the network. EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { + .WillOnce([&](EncodedFrame* frame) { // 0x0002 - 0xfff0 - EXPECT_EQ(frame->id.picture_id - first_picture_id, 18); + EXPECT_EQ(frame->Id() - first_picture_id, 18); }) - .WillOnce([&](video_coding::EncodedFrame* frame) { + .WillOnce([&](EncodedFrame* frame) { // 0xfffe - 0xfff0 - EXPECT_EQ(frame->id.picture_id - first_picture_id, 14); + EXPECT_EQ(frame->Id() - first_picture_id, 14); }); InjectPacketWith(stream_structure, deltaframe2_descriptor); InjectPacketWith(stream_structure, deltaframe1_descriptor); @@ -1093,9 +1108,8 @@ TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, keyframe2_descriptor.frame_number = 3; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3); - }); + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 3); }); InjectPacketWith(stream_structure2, keyframe2_descriptor); InjectPacketWith(stream_structure1, keyframe1_descriptor); @@ -1105,39 +1119,25 @@ TEST_F(RtpVideoStreamReceiver2DependencyDescriptorTest, deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0]; deltaframe_descriptor.frame_number = 4; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4); - }); + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 4); }); InjectPacketWith(stream_structure2, deltaframe_descriptor); } -#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST_F(RtpVideoStreamReceiver2Test, RepeatedSecondarySinkDisallowed) { - MockRtpPacketSink secondary_sink; - - rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink); - EXPECT_DEATH(rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink), - ""); - - // Test tear-down. - rtp_video_stream_receiver_->RemoveSecondarySink(&secondary_sink); -} -#endif - TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) { rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); + rtc::make_ref_counted>(); EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc)); auto receiver = std::make_unique( - Clock::GetRealTimeClock(), &mock_transport_, nullptr, nullptr, &config_, - rtp_receive_statistics_.get(), nullptr, nullptr, process_thread_.get(), - &mock_nack_sender_, nullptr, &mock_on_complete_frame_callback_, nullptr, - mock_frame_transformer); + TaskQueueBase::Current(), Clock::GetRealTimeClock(), &mock_transport_, + nullptr, nullptr, &config_, rtp_receive_statistics_.get(), nullptr, + nullptr, &mock_nack_sender_, nullptr, &mock_on_complete_frame_callback_, + nullptr, mock_frame_transformer); VideoCodec video_codec; - video_codec.plType = kPayloadType; video_codec.codecType = kVideoCodecGeneric; - receiver->AddReceiveCodec(video_codec, {}, /*raw_payload=*/false); + receiver->AddReceiveCodec(kPayloadType, video_codec, {}, + /*raw_payload=*/false); RtpPacketReceived rtp_packet; rtp_packet.SetPayloadType(kPayloadType); @@ -1156,11 +1156,11 @@ TEST_F(RtpVideoStreamReceiver2Test, TransformFrame) { } // Test default behavior and when playout delay is overridden by field trial. -const PlayoutDelay kTransmittedPlayoutDelay = {100, 200}; -const PlayoutDelay kForcedPlayoutDelay = {70, 90}; +const VideoPlayoutDelay kTransmittedPlayoutDelay = {100, 200}; +const VideoPlayoutDelay kForcedPlayoutDelay = {70, 90}; struct PlayoutDelayOptions { std::string field_trial; - PlayoutDelay expected_delay; + VideoPlayoutDelay expected_delay; }; const PlayoutDelayOptions kDefaultBehavior = { /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay}; @@ -1204,8 +1204,8 @@ TEST_P(RtpVideoStreamReceiver2TestPlayoutDelay, PlayoutDelay) { // Expect the playout delay of encoded frame to be the same as the transmitted // playout delay unless it was overridden by a field trial. EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([expected_playout_delay = GetParam().expected_delay]( - video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([expected_playout_delay = + GetParam().expected_delay](EncodedFrame* frame) { EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay); })); rtp_video_stream_receiver_->OnReceivedPayloadData( diff --git a/video/rtp_video_stream_receiver_frame_transformer_delegate.cc b/video/rtp_video_stream_receiver_frame_transformer_delegate.cc index 31eb344d5b..f2f81df3ee 100644 --- a/video/rtp_video_stream_receiver_frame_transformer_delegate.cc +++ b/video/rtp_video_stream_receiver_frame_transformer_delegate.cc @@ -24,9 +24,8 @@ namespace { class TransformableVideoReceiverFrame : public TransformableVideoFrameInterface { public: - TransformableVideoReceiverFrame( - std::unique_ptr frame, - uint32_t ssrc) + TransformableVideoReceiverFrame(std::unique_ptr frame, + uint32_t ssrc) : frame_(std::move(frame)), metadata_(frame_->GetRtpVideoHeader()), ssrc_(ssrc) {} @@ -55,12 +54,12 @@ class TransformableVideoReceiverFrame const VideoFrameMetadata& GetMetadata() const override { return metadata_; } - std::unique_ptr ExtractFrame() && { + std::unique_ptr ExtractFrame() && { return std::move(frame_); } private: - std::unique_ptr frame_; + std::unique_ptr frame_; const VideoFrameMetadata metadata_; const uint32_t ssrc_; }; @@ -91,7 +90,7 @@ void RtpVideoStreamReceiverFrameTransformerDelegate::Reset() { } void RtpVideoStreamReceiverFrameTransformerDelegate::TransformFrame( - std::unique_ptr frame) { + std::unique_ptr frame) { RTC_DCHECK_RUN_ON(&network_sequence_checker_); frame_transformer_->Transform( std::make_unique(std::move(frame), diff --git a/video/rtp_video_stream_receiver_frame_transformer_delegate.h b/video/rtp_video_stream_receiver_frame_transformer_delegate.h index e687e7f47b..ef05d91fd3 100644 --- a/video/rtp_video_stream_receiver_frame_transformer_delegate.h +++ b/video/rtp_video_stream_receiver_frame_transformer_delegate.h @@ -14,8 +14,9 @@ #include #include "api/frame_transformer_interface.h" +#include "api/sequence_checker.h" #include "modules/video_coding/frame_object.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/thread.h" namespace webrtc { @@ -24,8 +25,7 @@ namespace webrtc { // thread after transformation. class RtpVideoFrameReceiver { public: - virtual void ManageFrame( - std::unique_ptr frame) = 0; + virtual void ManageFrame(std::unique_ptr frame) = 0; protected: virtual ~RtpVideoFrameReceiver() = default; @@ -46,7 +46,7 @@ class RtpVideoStreamReceiverFrameTransformerDelegate void Reset(); // Delegates the call to FrameTransformerInterface::TransformFrame. - void TransformFrame(std::unique_ptr frame); + void TransformFrame(std::unique_ptr frame); // Implements TransformedFrameCallback. Can be called on any thread. Posts // the transformed frame to be managed on the |network_thread_|. @@ -61,7 +61,7 @@ class RtpVideoStreamReceiverFrameTransformerDelegate ~RtpVideoStreamReceiverFrameTransformerDelegate() override = default; private: - SequenceChecker network_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker network_sequence_checker_; RtpVideoFrameReceiver* receiver_ RTC_GUARDED_BY(network_sequence_checker_); rtc::scoped_refptr frame_transformer_ RTC_GUARDED_BY(network_sequence_checker_); diff --git a/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc b/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc index a411ca6e9a..0d85cc08e2 100644 --- a/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc +++ b/video/rtp_video_stream_receiver_frame_transformer_delegate_unittest.cc @@ -35,15 +35,15 @@ using ::testing::ElementsAre; using ::testing::NiceMock; using ::testing::SaveArg; -std::unique_ptr CreateRtpFrameObject( +std::unique_ptr CreateRtpFrameObject( const RTPVideoHeader& video_header) { - return std::make_unique( + return std::make_unique( 0, 0, true, 0, 0, 0, 0, 0, VideoSendTiming(), 0, video_header.codec, kVideoRotation_0, VideoContentType::UNSPECIFIED, video_header, absl::nullopt, RtpPacketInfos(), EncodedImageBuffer::Create(0)); } -std::unique_ptr CreateRtpFrameObject() { +std::unique_ptr CreateRtpFrameObject() { return CreateRtpFrameObject(RTPVideoHeader()); } @@ -54,17 +54,16 @@ class TestRtpVideoFrameReceiver : public RtpVideoFrameReceiver { MOCK_METHOD(void, ManageFrame, - (std::unique_ptr frame), + (std::unique_ptr frame), (override)); }; TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, RegisterTransformedFrameCallbackSinkOnInit) { TestRtpVideoFrameReceiver receiver; - rtc::scoped_refptr frame_transformer( - new rtc::RefCountedObject()); - rtc::scoped_refptr delegate( - new rtc::RefCountedObject( + auto frame_transformer(rtc::make_ref_counted()); + auto delegate( + rtc::make_ref_counted( &receiver, frame_transformer, rtc::Thread::Current(), /*remote_ssrc*/ 1111)); EXPECT_CALL(*frame_transformer, @@ -75,10 +74,9 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, UnregisterTransformedFrameSinkCallbackOnReset) { TestRtpVideoFrameReceiver receiver; - rtc::scoped_refptr frame_transformer( - new rtc::RefCountedObject()); - rtc::scoped_refptr delegate( - new rtc::RefCountedObject( + auto frame_transformer(rtc::make_ref_counted()); + auto delegate( + rtc::make_ref_counted( &receiver, frame_transformer, rtc::Thread::Current(), /*remote_ssrc*/ 1111)); EXPECT_CALL(*frame_transformer, UnregisterTransformedFrameSinkCallback(1111)); @@ -87,10 +85,10 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformFrame) { TestRtpVideoFrameReceiver receiver; - rtc::scoped_refptr frame_transformer( - new rtc::RefCountedObject>()); - rtc::scoped_refptr delegate( - new rtc::RefCountedObject( + auto frame_transformer( + rtc::make_ref_counted>()); + auto delegate( + rtc::make_ref_counted( &receiver, frame_transformer, rtc::Thread::Current(), /*remote_ssrc*/ 1111)); auto frame = CreateRtpFrameObject(); @@ -101,10 +99,10 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformFrame) { TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, ManageFrameOnTransformedFrame) { TestRtpVideoFrameReceiver receiver; - rtc::scoped_refptr mock_frame_transformer( - new rtc::RefCountedObject>()); - rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + auto mock_frame_transformer( + rtc::make_ref_counted>()); + auto delegate = + rtc::make_ref_counted( &receiver, mock_frame_transformer, rtc::Thread::Current(), /*remote_ssrc*/ 1111); @@ -127,10 +125,10 @@ TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TEST(RtpVideoStreamReceiverFrameTransformerDelegateTest, TransformableFrameMetadataHasCorrectValue) { TestRtpVideoFrameReceiver receiver; - rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); - rtc::scoped_refptr delegate = - new rtc::RefCountedObject( + auto mock_frame_transformer = + rtc::make_ref_counted>(); + auto delegate = + rtc::make_ref_counted( &receiver, mock_frame_transformer, rtc::Thread::Current(), 1111); delegate->Init(); RTPVideoHeader video_header; diff --git a/video/rtp_video_stream_receiver_unittest.cc b/video/rtp_video_stream_receiver_unittest.cc index 510cad37c1..765e1e1716 100644 --- a/video/rtp_video_stream_receiver_unittest.cc +++ b/video/rtp_video_stream_receiver_unittest.cc @@ -50,8 +50,7 @@ namespace { const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01}; -std::vector GetAbsoluteCaptureTimestamps( - const video_coding::EncodedFrame* frame) { +std::vector GetAbsoluteCaptureTimestamps(const EncodedFrame* frame) { std::vector result; for (const auto& packet_info : frame->PacketInfos()) { if (packet_info.absolute_capture_time()) { @@ -95,23 +94,13 @@ class MockKeyFrameRequestSender : public KeyFrameRequestSender { }; class MockOnCompleteFrameCallback - : public video_coding::OnCompleteFrameCallback { + : public RtpVideoStreamReceiver::OnCompleteFrameCallback { public: - MOCK_METHOD(void, DoOnCompleteFrame, (video_coding::EncodedFrame*), ()); - MOCK_METHOD(void, - DoOnCompleteFrameFailNullptr, - (video_coding::EncodedFrame*), - ()); - MOCK_METHOD(void, - DoOnCompleteFrameFailLength, - (video_coding::EncodedFrame*), - ()); - MOCK_METHOD(void, - DoOnCompleteFrameFailBitstream, - (video_coding::EncodedFrame*), - ()); - void OnCompleteFrame( - std::unique_ptr frame) override { + MOCK_METHOD(void, DoOnCompleteFrame, (EncodedFrame*), ()); + MOCK_METHOD(void, DoOnCompleteFrameFailNullptr, (EncodedFrame*), ()); + MOCK_METHOD(void, DoOnCompleteFrameFailLength, (EncodedFrame*), ()); + MOCK_METHOD(void, DoOnCompleteFrameFailBitstream, (EncodedFrame*), ()); + void OnCompleteFrame(std::unique_ptr frame) override { if (!frame) { DoOnCompleteFrameFailNullptr(nullptr); return; @@ -178,9 +167,8 @@ class RtpVideoStreamReceiverTest : public ::testing::Test { &mock_nack_sender_, &mock_key_frame_request_sender_, &mock_on_complete_frame_callback_, nullptr, nullptr); VideoCodec codec; - codec.plType = kPayloadType; codec.codecType = kVideoCodecGeneric; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {}, /*raw_payload=*/false); } @@ -200,7 +188,7 @@ class RtpVideoStreamReceiverTest : public ::testing::Test { info.type = H264::NaluType::kSps; info.sps_id = sps_id; info.pps_id = -1; - data->AppendData({H264::NaluType::kSps, sps_id}); + data->AppendData({H264::NaluType::kSps, sps_id}); auto& h264 = absl::get(video_header->video_type_header); h264.nalus[h264.nalus_length++] = info; } @@ -213,7 +201,7 @@ class RtpVideoStreamReceiverTest : public ::testing::Test { info.type = H264::NaluType::kPps; info.sps_id = sps_id; info.pps_id = pps_id; - data->AppendData({H264::NaluType::kPps, pps_id}); + data->AppendData({H264::NaluType::kPps, pps_id}); auto& h264 = absl::get(video_header->video_type_header); h264.nalus[h264.nalus_length++] = info; } @@ -312,10 +300,10 @@ TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) { // Prepare the receiver for VP9. VideoCodec codec; - codec.plType = kVp9PayloadType; codec.codecType = kVideoCodecVP9; std::map codec_params; - rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params, + rtp_video_stream_receiver_->AddReceiveCodec(kVp9PayloadType, codec, + codec_params, /*raw_payload=*/false); // Generate key frame packets. @@ -340,7 +328,7 @@ TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) { EXPECT_TRUE(key_frame_packet2.GetExtension()); rtp_video_stream_receiver_->OnRtpPacket(key_frame_packet1); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) { ASSERT_TRUE(frame->EncodedImage().ColorSpace()); EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace); })); @@ -356,7 +344,7 @@ TEST_F(RtpVideoStreamReceiverTest, CacheColorSpaceFromLastPacketOfKeyframe) { // included in the RTP packet. EXPECT_FALSE(delta_frame_packet.GetExtension()); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([kColorSpace](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kColorSpace](EncodedFrame* frame) { ASSERT_TRUE(frame->EncodedImage().ColorSpace()); EXPECT_EQ(*frame->EncodedImage().ColorSpace(), kColorSpace); })); @@ -398,11 +386,10 @@ TEST_F(RtpVideoStreamReceiverTest, PacketInfoIsPropagatedIntoVideoFrames) { mock_on_complete_frame_callback_.AppendExpectedBitstream(data.data(), data.size()); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke( - [kAbsoluteCaptureTimestamp](video_coding::EncodedFrame* frame) { - EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), - ElementsAre(kAbsoluteCaptureTimestamp)); - })); + .WillOnce(Invoke([kAbsoluteCaptureTimestamp](EncodedFrame* frame) { + EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), + ElementsAre(kAbsoluteCaptureTimestamp)); + })); rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet, video_header); } @@ -446,7 +433,7 @@ TEST_F(RtpVideoStreamReceiverTest, // Expect rtp video stream receiver to extrapolate it for the resulting video // frame using absolute capture time from the previous packet. EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([](EncodedFrame* frame) { EXPECT_THAT(GetAbsoluteCaptureTimestamps(frame), SizeIs(1)); })); rtp_video_stream_receiver_->OnReceivedPayloadData(data, rtp_packet, @@ -518,13 +505,7 @@ INSTANTIATE_TEST_SUITE_P(SpsPpsIdrIsKeyframe, RtpVideoStreamReceiverTestH264, Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/")); -// Fails on MSAN: https://bugs.chromium.org/p/webrtc/issues/detail?id=11376. -#if defined(MEMORY_SANITIZER) -#define MAYBE_InBandSpsPps DISABLED_InBandSpsPps -#else -#define MAYBE_InBandSpsPps InBandSpsPps -#endif -TEST_P(RtpVideoStreamReceiverTestH264, MAYBE_InBandSpsPps) { +TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) { rtc::CopyOnWriteBuffer sps_data; RtpPacketReceived rtp_packet; RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader(); @@ -574,13 +555,12 @@ TEST_P(RtpVideoStreamReceiverTestH264, MAYBE_InBandSpsPps) { TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) { constexpr int kPayloadType = 99; VideoCodec codec; - codec.plType = kPayloadType; std::map codec_params; // Example parameter sets from https://tools.ietf.org/html/rfc3984#section-8.2 // . codec_params.insert( {cricket::kH264FmtpSpropParameterSets, "Z0IACpZTBYmI,aMljiA=="}); - rtp_video_stream_receiver_->AddReceiveCodec(codec, codec_params, + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, codec_params, /*raw_payload=*/false); const uint8_t binary_sps[] = {0x67, 0x42, 0x00, 0x0a, 0x96, 0x53, 0x05, 0x89, 0x88}; @@ -613,6 +593,75 @@ TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) { video_header); } +TEST_P(RtpVideoStreamReceiverTestH264, ForceSpsPpsIdrIsKeyframe) { + constexpr int kPayloadType = 99; + VideoCodec codec; + std::map codec_params; + if (GetParam() == + "") { // Forcing can be done either with field trial or codec_params. + codec_params.insert({cricket::kH264FmtpSpsPpsIdrInKeyframe, ""}); + } + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, codec_params, + /*raw_payload=*/false); + rtc::CopyOnWriteBuffer sps_data; + RtpPacketReceived rtp_packet; + RTPVideoHeader sps_video_header = GetDefaultH264VideoHeader(); + AddSps(&sps_video_header, 0, &sps_data); + rtp_packet.SetSequenceNumber(0); + rtp_packet.SetPayloadType(kPayloadType); + sps_video_header.is_first_packet_in_frame = true; + sps_video_header.frame_type = VideoFrameType::kEmptyFrame; + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(sps_data.data(), + sps_data.size()); + rtp_video_stream_receiver_->OnReceivedPayloadData(sps_data, rtp_packet, + sps_video_header); + + rtc::CopyOnWriteBuffer pps_data; + RTPVideoHeader pps_video_header = GetDefaultH264VideoHeader(); + AddPps(&pps_video_header, 0, 1, &pps_data); + rtp_packet.SetSequenceNumber(1); + pps_video_header.is_first_packet_in_frame = true; + pps_video_header.frame_type = VideoFrameType::kEmptyFrame; + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(pps_data.data(), + pps_data.size()); + rtp_video_stream_receiver_->OnReceivedPayloadData(pps_data, rtp_packet, + pps_video_header); + + rtc::CopyOnWriteBuffer idr_data; + RTPVideoHeader idr_video_header = GetDefaultH264VideoHeader(); + AddIdr(&idr_video_header, 1); + rtp_packet.SetSequenceNumber(2); + idr_video_header.is_first_packet_in_frame = true; + idr_video_header.is_last_packet_in_frame = true; + idr_video_header.frame_type = VideoFrameType::kVideoFrameKey; + const uint8_t idr[] = {0x65, 1, 2, 3}; + idr_data.AppendData(idr); + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(), + idr_data.size()); + EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_TRUE(frame->is_keyframe()); }); + rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet, + idr_video_header); + mock_on_complete_frame_callback_.ClearExpectedBitstream(); + mock_on_complete_frame_callback_.AppendExpectedBitstream( + kH264StartCode, sizeof(kH264StartCode)); + mock_on_complete_frame_callback_.AppendExpectedBitstream(idr_data.data(), + idr_data.size()); + rtp_packet.SetSequenceNumber(3); + EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_FALSE(frame->is_keyframe()); }); + rtp_video_stream_receiver_->OnReceivedPayloadData(idr_data, rtp_packet, + idr_video_header); +} + TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) { RtpPacketReceived rtp_packet; RTPVideoHeader video_header = GetDefaultH264VideoHeader(); @@ -796,11 +845,11 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorOnePacket) { rtp_packet.SetSequenceNumber(1); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) { EXPECT_EQ(frame->num_references, 2U); - EXPECT_EQ(frame->references[0], frame->id.picture_id - 90); - EXPECT_EQ(frame->references[1], frame->id.picture_id - 80); - EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex); + EXPECT_EQ(frame->references[0], frame->Id() - 90); + EXPECT_EQ(frame->references[1], frame->Id() - 80); + EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex); EXPECT_THAT(frame->PacketInfos(), SizeIs(1)); })); @@ -854,9 +903,9 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorTwoPackets) { data.size() - 1); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([kSpatialIndex](EncodedFrame* frame) { EXPECT_EQ(frame->num_references, 0U); - EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex); + EXPECT_EQ(frame->SpatialIndex(), kSpatialIndex); EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u); EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u); EXPECT_THAT(frame->PacketInfos(), SizeIs(2)); @@ -870,8 +919,8 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorRawPayload) { const int kRawPayloadType = 123; VideoCodec codec; - codec.plType = kRawPayloadType; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true); + rtp_video_stream_receiver_->AddReceiveCodec(kRawPayloadType, codec, {}, + /*raw_payload=*/true); rtp_video_stream_receiver_->StartReceive(); RtpHeaderExtensionMap extension_map; @@ -902,8 +951,8 @@ TEST_F(RtpVideoStreamReceiverTest, UnwrapsFrameId) { const int kPayloadType = 123; VideoCodec codec; - codec.plType = kPayloadType; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, /*raw_payload=*/true); + rtp_video_stream_receiver_->AddReceiveCodec(kPayloadType, codec, {}, + /*raw_payload=*/true); rtp_video_stream_receiver_->StartReceive(); RtpHeaderExtensionMap extension_map; extension_map.Register(5); @@ -933,14 +982,12 @@ TEST_F(RtpVideoStreamReceiverTest, UnwrapsFrameId) { int64_t first_picture_id; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - first_picture_id = frame->id.picture_id; - }); + .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); }); inject_packet(/*wrapped_frame_id=*/0xffff); EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - EXPECT_EQ(frame->id.picture_id - first_picture_id, 3); + .WillOnce([&](EncodedFrame* frame) { + EXPECT_EQ(frame->Id() - first_picture_id, 3); }); inject_packet(/*wrapped_frame_id=*/0x0002); } @@ -950,8 +997,7 @@ class RtpVideoStreamReceiverDependencyDescriptorTest public: RtpVideoStreamReceiverDependencyDescriptorTest() { VideoCodec codec; - codec.plType = payload_type_; - rtp_video_stream_receiver_->AddReceiveCodec(codec, {}, + rtp_video_stream_receiver_->AddReceiveCodec(payload_type_, codec, {}, /*raw_payload=*/true); extension_map_.Register(7); rtp_video_stream_receiver_->StartReceive(); @@ -963,8 +1009,8 @@ class RtpVideoStreamReceiverDependencyDescriptorTest FrameDependencyStructure stream_structure; stream_structure.num_decode_targets = 1; stream_structure.templates = { - GenericFrameInfo::Builder().Dtis("S").Build(), - GenericFrameInfo::Builder().Dtis("S").Fdiffs({1}).Build(), + FrameDependencyTemplate().Dtis("S"), + FrameDependencyTemplate().Dtis("S").FrameDiffs({1}), }; return stream_structure; } @@ -1005,9 +1051,7 @@ TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest, UnwrapsFrameId) { // keyframe. Thus feed a key frame first, then test reodered delta frames. int64_t first_picture_id; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - first_picture_id = frame->id.picture_id; - }); + .WillOnce([&](EncodedFrame* frame) { first_picture_id = frame->Id(); }); InjectPacketWith(stream_structure, keyframe_descriptor); DependencyDescriptor deltaframe1_descriptor; @@ -1021,13 +1065,13 @@ TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest, UnwrapsFrameId) { // Parser should unwrap frame ids correctly even if packets were reordered by // the network. EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { + .WillOnce([&](EncodedFrame* frame) { // 0x0002 - 0xfff0 - EXPECT_EQ(frame->id.picture_id - first_picture_id, 18); + EXPECT_EQ(frame->Id() - first_picture_id, 18); }) - .WillOnce([&](video_coding::EncodedFrame* frame) { + .WillOnce([&](EncodedFrame* frame) { // 0xfffe - 0xfff0 - EXPECT_EQ(frame->id.picture_id - first_picture_id, 14); + EXPECT_EQ(frame->Id() - first_picture_id, 14); }); InjectPacketWith(stream_structure, deltaframe2_descriptor); InjectPacketWith(stream_structure, deltaframe1_descriptor); @@ -1091,9 +1135,8 @@ TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest, keyframe2_descriptor.frame_number = 3; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - EXPECT_EQ(frame->id.picture_id & 0xFFFF, 3); - }); + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 3); }); InjectPacketWith(stream_structure2, keyframe2_descriptor); InjectPacketWith(stream_structure1, keyframe1_descriptor); @@ -1103,14 +1146,14 @@ TEST_F(RtpVideoStreamReceiverDependencyDescriptorTest, deltaframe_descriptor.frame_dependencies = stream_structure2.templates[0]; deltaframe_descriptor.frame_number = 4; EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame) - .WillOnce([&](video_coding::EncodedFrame* frame) { - EXPECT_EQ(frame->id.picture_id & 0xFFFF, 4); - }); + .WillOnce( + [&](EncodedFrame* frame) { EXPECT_EQ(frame->Id() & 0xFFFF, 4); }); InjectPacketWith(stream_structure2, deltaframe_descriptor); } #if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID) -TEST_F(RtpVideoStreamReceiverTest, RepeatedSecondarySinkDisallowed) { +using RtpVideoStreamReceiverDeathTest = RtpVideoStreamReceiverTest; +TEST_F(RtpVideoStreamReceiverDeathTest, RepeatedSecondarySinkDisallowed) { MockRtpPacketSink secondary_sink; rtp_video_stream_receiver_->AddSecondarySink(&secondary_sink); @@ -1123,8 +1166,8 @@ TEST_F(RtpVideoStreamReceiverTest, RepeatedSecondarySinkDisallowed) { #endif TEST_F(RtpVideoStreamReceiverTest, TransformFrame) { - rtc::scoped_refptr mock_frame_transformer = - new rtc::RefCountedObject>(); + auto mock_frame_transformer = + rtc::make_ref_counted>(); EXPECT_CALL(*mock_frame_transformer, RegisterTransformedFrameSinkCallback(_, config_.rtp.remote_ssrc)); auto receiver = std::make_unique( @@ -1133,9 +1176,9 @@ TEST_F(RtpVideoStreamReceiverTest, TransformFrame) { &mock_nack_sender_, nullptr, &mock_on_complete_frame_callback_, nullptr, mock_frame_transformer); VideoCodec video_codec; - video_codec.plType = kPayloadType; video_codec.codecType = kVideoCodecGeneric; - receiver->AddReceiveCodec(video_codec, {}, /*raw_payload=*/false); + receiver->AddReceiveCodec(kPayloadType, video_codec, {}, + /*raw_payload=*/false); RtpPacketReceived rtp_packet; rtp_packet.SetPayloadType(kPayloadType); @@ -1154,11 +1197,11 @@ TEST_F(RtpVideoStreamReceiverTest, TransformFrame) { } // Test default behavior and when playout delay is overridden by field trial. -const PlayoutDelay kTransmittedPlayoutDelay = {100, 200}; -const PlayoutDelay kForcedPlayoutDelay = {70, 90}; +const VideoPlayoutDelay kTransmittedPlayoutDelay = {100, 200}; +const VideoPlayoutDelay kForcedPlayoutDelay = {70, 90}; struct PlayoutDelayOptions { std::string field_trial; - PlayoutDelay expected_delay; + VideoPlayoutDelay expected_delay; }; const PlayoutDelayOptions kDefaultBehavior = { /*field_trial=*/"", /*expected_delay=*/kTransmittedPlayoutDelay}; @@ -1202,8 +1245,8 @@ TEST_P(RtpVideoStreamReceiverTestPlayoutDelay, PlayoutDelay) { // Expect the playout delay of encoded frame to be the same as the transmitted // playout delay unless it was overridden by a field trial. EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame(_)) - .WillOnce(Invoke([expected_playout_delay = GetParam().expected_delay]( - video_coding::EncodedFrame* frame) { + .WillOnce(Invoke([expected_playout_delay = + GetParam().expected_delay](EncodedFrame* frame) { EXPECT_EQ(frame->EncodedImage().playout_delay_, expected_playout_delay); })); rtp_video_stream_receiver_->OnReceivedPayloadData( diff --git a/video/screenshare_loopback.cc b/video/screenshare_loopback.cc index b8121dc85c..239e472f6e 100644 --- a/video/screenshare_loopback.cc +++ b/video/screenshare_loopback.cc @@ -325,32 +325,29 @@ void Loopback() { call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate. VideoQualityTest::Params params; - params.call = {absl::GetFlag(FLAGS_send_side_bwe), - absl::GetFlag(FLAGS_generic_descriptor), call_bitrate_config}; - params.video[0] = {true, - Width(), - Height(), - Fps(), - MinBitrateKbps() * 1000, - TargetBitrateKbps() * 1000, - MaxBitrateKbps() * 1000, - false, - Codec(), - NumTemporalLayers(), - SelectedTL(), - MinTransmitBitrateKbps() * 1000, - false, // ULPFEC disabled. - false, // FlexFEC disabled. - false, // Automatic scaling disabled. - "", - 0, // capture_device_index. - SdpVideoFormat::Parameters()}; - params.screenshare[0] = {true, GenerateSlides(), SlideChangeInterval(), - ScrollDuration(), Slides()}; - params.analyzer = {"screenshare", 0.0, 0.0, DurationSecs(), - OutputFilename(), GraphTitle()}; + params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe); + params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor); + params.call.call_bitrate_config = call_bitrate_config; + params.video[0].enabled = true; + params.video[0].width = Width(); + params.video[0].height = Height(); + params.video[0].fps = Fps(); + params.video[0].min_bitrate_bps = MinBitrateKbps() * 1000; + params.video[0].target_bitrate_bps = TargetBitrateKbps() * 1000; + params.video[0].max_bitrate_bps = MaxBitrateKbps() * 1000; + params.video[0].codec = Codec(); + params.video[0].num_temporal_layers = NumTemporalLayers(); + params.video[0].selected_tl = SelectedTL(); + params.video[0].min_transmit_bps = MinTransmitBitrateKbps() * 1000; + params.screenshare[0].enabled = true; + params.screenshare[0].generate_slides = GenerateSlides(); + params.screenshare[0].slide_change_interval = SlideChangeInterval(); + params.screenshare[0].scroll_duration = ScrollDuration(); + params.screenshare[0].slides = Slides(); params.config = pipe_config; - params.logging = {RtcEventLogName(), RtpDumpName(), EncodedFramePath()}; + params.logging.rtc_event_log_name = RtcEventLogName(); + params.logging.rtp_dump_name = RtpDumpName(); + params.logging.encoded_frame_base_path = EncodedFramePath(); if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) { params.ss[0].infer_streams = true; diff --git a/video/send_delay_stats.cc b/video/send_delay_stats.cc index a243eda292..56c4164424 100644 --- a/video/send_delay_stats.cc +++ b/video/send_delay_stats.cc @@ -41,7 +41,7 @@ SendDelayStats::~SendDelayStats() { } void SendDelayStats::UpdateHistograms() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); for (const auto& it : send_delay_counters_) { AggregatedStats stats = it.second->GetStats(); if (stats.num_samples >= kMinRequiredPeriodicSamples) { @@ -52,7 +52,7 @@ void SendDelayStats::UpdateHistograms() { } void SendDelayStats::AddSsrcs(const VideoSendStream::Config& config) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (ssrcs_.size() > kMaxSsrcMapSize) return; for (const auto& ssrc : config.rtp.ssrcs) @@ -73,7 +73,7 @@ void SendDelayStats::OnSendPacket(uint16_t packet_id, int64_t capture_time_ms, uint32_t ssrc) { // Packet sent to transport. - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (ssrcs_.find(ssrc) == ssrcs_.end()) return; @@ -93,7 +93,7 @@ bool SendDelayStats::OnSentPacket(int packet_id, int64_t time_ms) { if (packet_id == -1) return false; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); auto it = packets_.find(packet_id); if (it == packets_.end()) return false; diff --git a/video/send_delay_stats.h b/video/send_delay_stats.h index d9fa16a126..fa76a1e39c 100644 --- a/video/send_delay_stats.h +++ b/video/send_delay_stats.h @@ -20,13 +20,19 @@ #include "call/video_send_stream.h" #include "modules/include/module_common_types_public.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" #include "video/stats_counter.h" namespace webrtc { +// Used to collect delay stats for video streams. The class gets callbacks +// from more than one threads and internally uses a mutex for data access +// synchronization. +// TODO(bugs.webrtc.org/11993): OnSendPacket and OnSentPacket will eventually +// be called consistently on the same thread. Once we're there, we should be +// able to avoid locking (at least for the fast path). class SendDelayStats : public SendPacketObserver { public: explicit SendDelayStats(Clock* clock); @@ -66,22 +72,22 @@ class SendDelayStats : public SendPacketObserver { void UpdateHistograms(); void RemoveOld(int64_t now, PacketMap* packets) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); AvgCounter* GetSendDelayCounter(uint32_t ssrc) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* const clock_; - rtc::CriticalSection crit_; + Mutex mutex_; - PacketMap packets_ RTC_GUARDED_BY(crit_); - size_t num_old_packets_ RTC_GUARDED_BY(crit_); - size_t num_skipped_packets_ RTC_GUARDED_BY(crit_); + PacketMap packets_ RTC_GUARDED_BY(mutex_); + size_t num_old_packets_ RTC_GUARDED_BY(mutex_); + size_t num_skipped_packets_ RTC_GUARDED_BY(mutex_); - std::set ssrcs_ RTC_GUARDED_BY(crit_); + std::set ssrcs_ RTC_GUARDED_BY(mutex_); // Mapped by SSRC. std::map> send_delay_counters_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/video/send_statistics_proxy.cc b/video/send_statistics_proxy.cc index f8d768f9d2..1b968ef8f7 100644 --- a/video/send_statistics_proxy.cc +++ b/video/send_statistics_proxy.cc @@ -154,7 +154,7 @@ SendStatisticsProxy::SendStatisticsProxy( } SendStatisticsProxy::~SendStatisticsProxy() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); uma_container_->UpdateHistograms(rtp_config_, stats_); int64_t elapsed_sec = (clock_->TimeInMilliseconds() - start_ms_) / 1000; @@ -670,7 +670,8 @@ void SendStatisticsProxy::UmaSamplesContainer::UpdateHistograms( void SendStatisticsProxy::OnEncoderReconfigured( const VideoEncoderConfig& config, const std::vector& streams) { - rtc::CritScope lock(&crit_); + // Called on VideoStreamEncoder's encoder_queue_. + MutexLock lock(&mutex_); if (content_type_ != config.content_type) { uma_container_->UpdateHistograms(rtp_config_, stats_); @@ -687,7 +688,7 @@ void SendStatisticsProxy::OnEncoderReconfigured( void SendStatisticsProxy::OnEncodedFrameTimeMeasured(int encode_time_ms, int encode_usage_percent) { RTC_DCHECK_GE(encode_time_ms, 0); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); uma_container_->encode_time_counter_.Add(encode_time_ms); encode_time_.Apply(1.0f, encode_time_ms); stats_.avg_encode_time_ms = std::round(encode_time_.filtered()); @@ -697,7 +698,7 @@ void SendStatisticsProxy::OnEncodedFrameTimeMeasured(int encode_time_ms, void SendStatisticsProxy::OnSuspendChange(bool is_suspended) { int64_t now_ms = clock_->TimeInMilliseconds(); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); stats_.suspended = is_suspended; if (is_suspended) { // Pause framerate (add min pause time since there may be frames/packets @@ -717,9 +718,11 @@ void SendStatisticsProxy::OnSuspendChange(bool is_suspended) { uma_container_->quality_adapt_timer_.Stop(now_ms); } else { // Start adaptation stats if scaling is enabled. - if (adaptations_.MaskedCpuCounts().resolution_adaptations.has_value()) + if (adaptation_limitations_.MaskedCpuCounts() + .resolution_adaptations.has_value()) uma_container_->cpu_adapt_timer_.Start(now_ms); - if (adaptations_.MaskedQualityCounts().resolution_adaptations.has_value()) + if (adaptation_limitations_.MaskedQualityCounts() + .resolution_adaptations.has_value()) uma_container_->quality_adapt_timer_.Start(now_ms); // Stop pause explicitly for stats that may be zero/not updated for some // time. @@ -731,10 +734,12 @@ void SendStatisticsProxy::OnSuspendChange(bool is_suspended) { } VideoSendStream::Stats SendStatisticsProxy::GetStats() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); PurgeOldStats(); stats_.input_frame_rate = round(uma_container_->input_frame_rate_tracker_.ComputeRate()); + stats_.frames = + uma_container_->input_frame_rate_tracker_.TotalSampleCount(); stats_.content_type = content_type_ == VideoEncoderConfig::ContentType::kRealtimeVideo ? VideoContentType::UNSPECIFIED @@ -801,7 +806,7 @@ VideoSendStream::StreamStats* SendStatisticsProxy::GetStatsEntry( } void SendStatisticsProxy::OnInactiveSsrc(uint32_t ssrc) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); if (!stats) return; @@ -813,7 +818,7 @@ void SendStatisticsProxy::OnInactiveSsrc(uint32_t ssrc) { } void SendStatisticsProxy::OnSetEncoderTargetRate(uint32_t bitrate_bps) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (uma_container_->target_rate_updates_.last_ms == -1 && bitrate_bps == 0) return; // Start on first non-zero bitrate, may initially be zero. @@ -912,7 +917,7 @@ void SendStatisticsProxy::UpdateFallbackDisabledStats( } void SendStatisticsProxy::OnMinPixelLimitReached() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); uma_container_->fallback_info_disabled_.min_pixel_limit_reached = true; } @@ -927,7 +932,7 @@ void SendStatisticsProxy::OnSendEncodedImage( ? encoded_image.SpatialIndex().value_or(0) : 0; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++stats_.frames_encoded; // The current encode frame rate is based on previously encoded frames. double encode_frame_rate = encoded_frame_rate_tracker_.ComputeRate(); @@ -967,13 +972,11 @@ void SendStatisticsProxy::OnSendEncodedImage( stats->frames_encoded++; stats->total_encode_time_ms += encoded_image.timing_.encode_finish_ms - encoded_image.timing_.encode_start_ms; - // Report resolution of top spatial layer in case of VP9 SVC. - bool is_svc_low_spatial_layer = - (codec_info && codec_info->codecType == kVideoCodecVP9) - ? !codec_info->codecSpecific.VP9.end_of_picture - : false; + // Report resolution of the top spatial layer. + bool is_top_spatial_layer = + codec_info == nullptr || codec_info->end_of_picture; - if (!stats->width || !stats->height || !is_svc_low_spatial_layer) { + if (!stats->width || !stats->height || is_top_spatial_layer) { stats->width = encoded_image._encodedWidth; stats->height = encoded_image._encodedHeight; update_times_[ssrc].resolution_update_ms = clock_->TimeInMilliseconds(); @@ -1021,7 +1024,7 @@ void SendStatisticsProxy::OnSendEncodedImage( } absl::optional downscales = - adaptations_.MaskedQualityCounts().resolution_adaptations; + adaptation_limitations_.MaskedQualityCounts().resolution_adaptations; stats_.bw_limited_resolution |= (downscales.has_value() && downscales.value() > 0); @@ -1034,29 +1037,30 @@ void SendStatisticsProxy::OnSendEncodedImage( void SendStatisticsProxy::OnEncoderImplementationChanged( const std::string& implementation_name) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); encoder_changed_ = EncoderChangeEvent{stats_.encoder_implementation_name, implementation_name}; stats_.encoder_implementation_name = implementation_name; } int SendStatisticsProxy::GetInputFrameRate() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return round(uma_container_->input_frame_rate_tracker_.ComputeRate()); } int SendStatisticsProxy::GetSendFrameRate() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return round(encoded_frame_rate_tracker_.ComputeRate()); } void SendStatisticsProxy::OnIncomingFrame(int width, int height) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); uma_container_->input_frame_rate_tracker_.AddSamples(1); uma_container_->input_fps_counter_.Add(1); uma_container_->input_width_counter_.Add(width); uma_container_->input_height_counter_.Add(height); - if (adaptations_.MaskedCpuCounts().resolution_adaptations.has_value()) { + if (adaptation_limitations_.MaskedCpuCounts() + .resolution_adaptations.has_value()) { uma_container_->cpu_limited_frame_counter_.Add( stats_.cpu_limited_resolution); } @@ -1068,7 +1072,7 @@ void SendStatisticsProxy::OnIncomingFrame(int width, int height) { } void SendStatisticsProxy::OnFrameDropped(DropReason reason) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); switch (reason) { case DropReason::kSource: ++stats_.frames_dropped_by_capturer; @@ -1089,20 +1093,20 @@ void SendStatisticsProxy::OnFrameDropped(DropReason reason) { } void SendStatisticsProxy::ClearAdaptationStats() { - rtc::CritScope lock(&crit_); - adaptations_.set_cpu_counts(VideoAdaptationCounters()); - adaptations_.set_quality_counts(VideoAdaptationCounters()); + MutexLock lock(&mutex_); + adaptation_limitations_.set_cpu_counts(VideoAdaptationCounters()); + adaptation_limitations_.set_quality_counts(VideoAdaptationCounters()); UpdateAdaptationStats(); } void SendStatisticsProxy::UpdateAdaptationSettings( VideoStreamEncoderObserver::AdaptationSettings cpu_settings, VideoStreamEncoderObserver::AdaptationSettings quality_settings) { - rtc::CritScope lock(&crit_); - adaptations_.UpdateMaskingSettings(cpu_settings, quality_settings); - SetAdaptTimer(adaptations_.MaskedCpuCounts(), + MutexLock lock(&mutex_); + adaptation_limitations_.UpdateMaskingSettings(cpu_settings, quality_settings); + SetAdaptTimer(adaptation_limitations_.MaskedCpuCounts(), &uma_container_->cpu_adapt_timer_); - SetAdaptTimer(adaptations_.MaskedQualityCounts(), + SetAdaptTimer(adaptation_limitations_.MaskedQualityCounts(), &uma_container_->quality_adapt_timer_); UpdateAdaptationStats(); } @@ -1111,11 +1115,12 @@ void SendStatisticsProxy::OnAdaptationChanged( VideoAdaptationReason reason, const VideoAdaptationCounters& cpu_counters, const VideoAdaptationCounters& quality_counters) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); - MaskedAdaptationCounts receiver = adaptations_.MaskedQualityCounts(); - adaptations_.set_cpu_counts(cpu_counters); - adaptations_.set_quality_counts(quality_counters); + MaskedAdaptationCounts receiver = + adaptation_limitations_.MaskedQualityCounts(); + adaptation_limitations_.set_cpu_counts(cpu_counters); + adaptation_limitations_.set_quality_counts(quality_counters); switch (reason) { case VideoAdaptationReason::kCpu: ++stats_.number_of_cpu_adapt_changes; @@ -1123,7 +1128,7 @@ void SendStatisticsProxy::OnAdaptationChanged( case VideoAdaptationReason::kQuality: TryUpdateInitialQualityResolutionAdaptUp( receiver.resolution_adaptations, - adaptations_.MaskedQualityCounts().resolution_adaptations); + adaptation_limitations_.MaskedQualityCounts().resolution_adaptations); ++stats_.number_of_quality_adapt_changes; break; } @@ -1131,8 +1136,8 @@ void SendStatisticsProxy::OnAdaptationChanged( } void SendStatisticsProxy::UpdateAdaptationStats() { - auto cpu_counts = adaptations_.MaskedCpuCounts(); - auto quality_counts = adaptations_.MaskedQualityCounts(); + auto cpu_counts = adaptation_limitations_.MaskedCpuCounts(); + auto quality_counts = adaptation_limitations_.MaskedQualityCounts(); bool is_cpu_limited = cpu_counts.resolution_adaptations > 0 || cpu_counts.num_framerate_reductions > 0; @@ -1204,7 +1209,7 @@ void SendStatisticsProxy::OnBitrateAllocationUpdated( spatial_layers[i] = (allocation.GetSpatialLayerSum(i) > 0); } - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); bw_limited_layers_ = allocation.is_bw_limited(); UpdateAdaptationStats(); @@ -1227,14 +1232,14 @@ void SendStatisticsProxy::OnBitrateAllocationUpdated( // resolution or not. |is_scaled| is a flag indicating if the video is scaled // down. void SendStatisticsProxy::OnEncoderInternalScalerUpdate(bool is_scaled) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); internal_encoder_scaler_ = is_scaled; UpdateAdaptationStats(); } // TODO(asapersson): Include fps changes. void SendStatisticsProxy::OnInitialQualityResolutionAdaptDown() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++uma_container_->initial_quality_changes_.down; } @@ -1270,7 +1275,7 @@ void SendStatisticsProxy::SetAdaptTimer(const MaskedAdaptationCounts& counts, void SendStatisticsProxy::RtcpPacketTypesCounterUpdated( uint32_t ssrc, const RtcpPacketTypeCounter& packet_counter) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); if (!stats) return; @@ -1280,31 +1285,27 @@ void SendStatisticsProxy::RtcpPacketTypesCounterUpdated( uma_container_->first_rtcp_stats_time_ms_ = clock_->TimeInMilliseconds(); } -void SendStatisticsProxy::StatisticsUpdated(const RtcpStatistics& statistics, - uint32_t ssrc) { - rtc::CritScope lock(&crit_); - VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); - if (!stats) - return; - - stats->rtcp_stats = statistics; - uma_container_->report_block_stats_.Store(ssrc, statistics); -} - void SendStatisticsProxy::OnReportBlockDataUpdated( ReportBlockData report_block_data) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(report_block_data.report_block().source_ssrc); if (!stats) return; + const RTCPReportBlock& report_block = report_block_data.report_block(); + uma_container_->report_block_stats_.Store( + /*ssrc=*/report_block.source_ssrc, + /*packets_lost=*/report_block.packets_lost, + /*extended_highest_sequence_number=*/ + report_block.extended_highest_sequence_number); + stats->report_block_data = std::move(report_block_data); } void SendStatisticsProxy::DataCountersUpdated( const StreamDataCounters& counters, uint32_t ssrc) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); RTC_DCHECK(stats) << "DataCountersUpdated reported for unknown ssrc " << ssrc; @@ -1346,7 +1347,7 @@ void SendStatisticsProxy::DataCountersUpdated( void SendStatisticsProxy::Notify(uint32_t total_bitrate_bps, uint32_t retransmit_bitrate_bps, uint32_t ssrc) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); if (!stats) return; @@ -1357,7 +1358,7 @@ void SendStatisticsProxy::Notify(uint32_t total_bitrate_bps, void SendStatisticsProxy::FrameCountUpdated(const FrameCounts& frame_counts, uint32_t ssrc) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); if (!stats) return; @@ -1369,7 +1370,7 @@ void SendStatisticsProxy::SendSideDelayUpdated(int avg_delay_ms, int max_delay_ms, uint64_t total_delay_ms, uint32_t ssrc) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); VideoSendStream::StreamStats* stats = GetStatsEntry(ssrc); if (!stats) return; @@ -1459,6 +1460,16 @@ void SendStatisticsProxy::Adaptations::set_quality_counts( const VideoAdaptationCounters& quality_counts) { quality_counts_ = quality_counts; } + +VideoAdaptationCounters SendStatisticsProxy::Adaptations::cpu_counts() const { + return cpu_counts_; +} + +VideoAdaptationCounters SendStatisticsProxy::Adaptations::quality_counts() + const { + return quality_counts_; +} + void SendStatisticsProxy::Adaptations::UpdateMaskingSettings( VideoStreamEncoderObserver::AdaptationSettings cpu_settings, VideoStreamEncoderObserver::AdaptationSettings quality_settings) { diff --git a/video/send_statistics_proxy.h b/video/send_statistics_proxy.h index 1d2fd21cfa..bfb221f65c 100644 --- a/video/send_statistics_proxy.h +++ b/video/send_statistics_proxy.h @@ -25,9 +25,9 @@ #include "modules/rtp_rtcp/include/report_block_data.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_coding_defines.h" -#include "rtc_base/critical_section.h" #include "rtc_base/numerics/exp_filter.h" #include "rtc_base/rate_tracker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" #include "video/quality_limitation_reason_tracker.h" @@ -37,7 +37,6 @@ namespace webrtc { class SendStatisticsProxy : public VideoStreamEncoderObserver, - public RtcpStatisticsCallback, public ReportBlockDataObserver, public RtcpPacketTypeCounterObserver, public StreamDataCountersCallback, @@ -106,9 +105,6 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver, int GetSendFrameRate() const; protected: - // From RtcpStatisticsCallback. - void StatisticsUpdated(const RtcpStatistics& statistics, - uint32_t ssrc) override; // From ReportBlockDataObserver. void OnReportBlockDataUpdated(ReportBlockData report_block_data) override; // From RtcpPacketTypeCounterObserver. @@ -223,9 +219,9 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver, }; typedef std::map EncodedFrameMap; - void PurgeOldStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + void PurgeOldStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); VideoSendStream::StreamStats* GetStatsEntry(uint32_t ssrc) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); struct MaskedAdaptationCounts { absl::optional resolution_adaptations = absl::nullopt; @@ -240,6 +236,9 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver, void set_cpu_counts(const VideoAdaptationCounters& cpu_counts); void set_quality_counts(const VideoAdaptationCounters& quality_counts); + VideoAdaptationCounters cpu_counts() const; + VideoAdaptationCounters quality_counts() const; + void UpdateMaskingSettings(AdaptationSettings cpu_settings, AdaptationSettings quality_settings); @@ -254,52 +253,52 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver, }; void SetAdaptTimer(const MaskedAdaptationCounts& counts, StatsTimer* timer) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); - void UpdateAdaptationStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void UpdateAdaptationStats() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void TryUpdateInitialQualityResolutionAdaptUp( absl::optional old_quality_downscales, absl::optional updated_quality_downscales) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void UpdateEncoderFallbackStats(const CodecSpecificInfo* codec_info, int pixels, int simulcast_index) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); void UpdateFallbackDisabledStats(const CodecSpecificInfo* codec_info, int pixels, int simulcast_index) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_); Clock* const clock_; const std::string payload_name_; const RtpConfig rtp_config_; const absl::optional fallback_max_pixels_; const absl::optional fallback_max_pixels_disabled_; - rtc::CriticalSection crit_; - VideoEncoderConfig::ContentType content_type_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + VideoEncoderConfig::ContentType content_type_ RTC_GUARDED_BY(mutex_); const int64_t start_ms_; - VideoSendStream::Stats stats_ RTC_GUARDED_BY(crit_); - std::map update_times_ RTC_GUARDED_BY(crit_); - rtc::ExpFilter encode_time_ RTC_GUARDED_BY(crit_); + VideoSendStream::Stats stats_ RTC_GUARDED_BY(mutex_); + std::map update_times_ RTC_GUARDED_BY(mutex_); + rtc::ExpFilter encode_time_ RTC_GUARDED_BY(mutex_); QualityLimitationReasonTracker quality_limitation_reason_tracker_ - RTC_GUARDED_BY(crit_); - rtc::RateTracker media_byte_rate_tracker_ RTC_GUARDED_BY(crit_); - rtc::RateTracker encoded_frame_rate_tracker_ RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); + rtc::RateTracker media_byte_rate_tracker_ RTC_GUARDED_BY(mutex_); + rtc::RateTracker encoded_frame_rate_tracker_ RTC_GUARDED_BY(mutex_); std::map> - encoded_frame_rate_trackers_ RTC_GUARDED_BY(crit_); + encoded_frame_rate_trackers_ RTC_GUARDED_BY(mutex_); - absl::optional last_outlier_timestamp_ RTC_GUARDED_BY(crit_); + absl::optional last_outlier_timestamp_ RTC_GUARDED_BY(mutex_); - int last_num_spatial_layers_ RTC_GUARDED_BY(crit_); - int last_num_simulcast_streams_ RTC_GUARDED_BY(crit_); + int last_num_spatial_layers_ RTC_GUARDED_BY(mutex_); + int last_num_simulcast_streams_ RTC_GUARDED_BY(mutex_); std::array last_spatial_layer_use_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(mutex_); // Indicates if the latest bitrate allocation had layers disabled by low // available bandwidth. - bool bw_limited_layers_ RTC_GUARDED_BY(crit_); + bool bw_limited_layers_ RTC_GUARDED_BY(mutex_); // Indicastes if the encoder internally downscales input image. - bool internal_encoder_scaler_ RTC_GUARDED_BY(crit_); - Adaptations adaptations_ RTC_GUARDED_BY(crit_); + bool internal_encoder_scaler_ RTC_GUARDED_BY(mutex_); + Adaptations adaptation_limitations_ RTC_GUARDED_BY(mutex_); struct EncoderChangeEvent { std::string previous_encoder_implementation; @@ -371,7 +370,7 @@ class SendStatisticsProxy : public VideoStreamEncoderObserver, qp_counters_; // QP counters mapped by spatial idx. }; - std::unique_ptr uma_container_ RTC_GUARDED_BY(crit_); + std::unique_ptr uma_container_ RTC_GUARDED_BY(mutex_); }; } // namespace webrtc diff --git a/video/send_statistics_proxy_unittest.cc b/video/send_statistics_proxy_unittest.cc index ab5b491069..d4a7a49e39 100644 --- a/video/send_statistics_proxy_unittest.cc +++ b/video/send_statistics_proxy_unittest.cc @@ -64,9 +64,7 @@ class SendStatisticsProxyTest : public ::testing::Test { explicit SendStatisticsProxyTest(const std::string& field_trials) : override_field_trials_(field_trials), fake_clock_(1234), - config_(GetTestConfig()), - avg_delay_ms_(0), - max_delay_ms_(0) {} + config_(GetTestConfig()) {} virtual ~SendStatisticsProxyTest() {} protected: @@ -126,6 +124,7 @@ class SendStatisticsProxyTest : public ::testing::Test { } void ExpectEqual(VideoSendStream::Stats one, VideoSendStream::Stats other) { + EXPECT_EQ(one.frames, other.frames); EXPECT_EQ(one.input_frame_rate, other.input_frame_rate); EXPECT_EQ(one.encode_frame_rate, other.encode_frame_rate); EXPECT_EQ(one.media_bitrate_bps, other.media_bitrate_bps); @@ -160,11 +159,19 @@ class SendStatisticsProxyTest : public ::testing::Test { b.rtp_stats.retransmitted.packets); EXPECT_EQ(a.rtp_stats.fec.packets, b.rtp_stats.fec.packets); - EXPECT_EQ(a.rtcp_stats.fraction_lost, b.rtcp_stats.fraction_lost); - EXPECT_EQ(a.rtcp_stats.packets_lost, b.rtcp_stats.packets_lost); - EXPECT_EQ(a.rtcp_stats.extended_highest_sequence_number, - b.rtcp_stats.extended_highest_sequence_number); - EXPECT_EQ(a.rtcp_stats.jitter, b.rtcp_stats.jitter); + EXPECT_EQ(a.report_block_data.has_value(), + b.report_block_data.has_value()); + if (a.report_block_data.has_value()) { + const RTCPReportBlock& a_rtcp_stats = + a.report_block_data->report_block(); + const RTCPReportBlock& b_rtcp_stats = + b.report_block_data->report_block(); + EXPECT_EQ(a_rtcp_stats.fraction_lost, b_rtcp_stats.fraction_lost); + EXPECT_EQ(a_rtcp_stats.packets_lost, b_rtcp_stats.packets_lost); + EXPECT_EQ(a_rtcp_stats.extended_highest_sequence_number, + b_rtcp_stats.extended_highest_sequence_number); + EXPECT_EQ(a_rtcp_stats.jitter, b_rtcp_stats.jitter); + } } } @@ -172,36 +179,40 @@ class SendStatisticsProxyTest : public ::testing::Test { SimulatedClock fake_clock_; std::unique_ptr statistics_proxy_; VideoSendStream::Config config_; - int avg_delay_ms_; - int max_delay_ms_; VideoSendStream::Stats expected_; - typedef std::map::const_iterator - StreamIterator; }; -TEST_F(SendStatisticsProxyTest, RtcpStatistics) { - RtcpStatisticsCallback* callback = statistics_proxy_.get(); - for (const auto& ssrc : config_.rtp.ssrcs) { - VideoSendStream::StreamStats& ssrc_stats = expected_.substreams[ssrc]; - +TEST_F(SendStatisticsProxyTest, ReportBlockDataObserver) { + ReportBlockDataObserver* callback = statistics_proxy_.get(); + for (uint32_t ssrc : config_.rtp.ssrcs) { // Add statistics with some arbitrary, but unique, numbers. - uint32_t offset = ssrc * sizeof(RtcpStatistics); - ssrc_stats.rtcp_stats.packets_lost = offset; - ssrc_stats.rtcp_stats.extended_highest_sequence_number = offset + 1; - ssrc_stats.rtcp_stats.fraction_lost = offset + 2; - ssrc_stats.rtcp_stats.jitter = offset + 3; - callback->StatisticsUpdated(ssrc_stats.rtcp_stats, ssrc); + uint32_t offset = ssrc * 4; + RTCPReportBlock report_block; + report_block.source_ssrc = ssrc; + report_block.packets_lost = offset; + report_block.extended_highest_sequence_number = offset + 1; + report_block.fraction_lost = offset + 2; + report_block.jitter = offset + 3; + ReportBlockData data; + data.SetReportBlock(report_block, 0); + expected_.substreams[ssrc].report_block_data = data; + + callback->OnReportBlockDataUpdated(data); } - for (const auto& ssrc : config_.rtp.rtx.ssrcs) { - VideoSendStream::StreamStats& ssrc_stats = expected_.substreams[ssrc]; - + for (uint32_t ssrc : config_.rtp.rtx.ssrcs) { // Add statistics with some arbitrary, but unique, numbers. - uint32_t offset = ssrc * sizeof(RtcpStatistics); - ssrc_stats.rtcp_stats.packets_lost = offset; - ssrc_stats.rtcp_stats.extended_highest_sequence_number = offset + 1; - ssrc_stats.rtcp_stats.fraction_lost = offset + 2; - ssrc_stats.rtcp_stats.jitter = offset + 3; - callback->StatisticsUpdated(ssrc_stats.rtcp_stats, ssrc); + uint32_t offset = ssrc * 4; + RTCPReportBlock report_block; + report_block.source_ssrc = ssrc; + report_block.packets_lost = offset; + report_block.extended_highest_sequence_number = offset + 1; + report_block.fraction_lost = offset + 2; + report_block.jitter = offset + 3; + ReportBlockData data; + data.SetReportBlock(report_block, 0); + expected_.substreams[ssrc].report_block_data = data; + + callback->OnReportBlockDataUpdated(data); } VideoSendStream::Stats stats = statistics_proxy_->GetStats(); ExpectEqual(expected_, stats); @@ -283,21 +294,17 @@ TEST_F(SendStatisticsProxyTest, DataCounters) { TEST_F(SendStatisticsProxyTest, Bitrate) { BitrateStatisticsObserver* observer = statistics_proxy_.get(); for (const auto& ssrc : config_.rtp.ssrcs) { - uint32_t total; - uint32_t retransmit; // Use ssrc as bitrate_bps to get a unique value for each stream. - total = ssrc; - retransmit = ssrc + 1; + uint32_t total = ssrc; + uint32_t retransmit = ssrc + 1; observer->Notify(total, retransmit, ssrc); expected_.substreams[ssrc].total_bitrate_bps = total; expected_.substreams[ssrc].retransmit_bitrate_bps = retransmit; } for (const auto& ssrc : config_.rtp.rtx.ssrcs) { - uint32_t total; - uint32_t retransmit; // Use ssrc as bitrate_bps to get a unique value for each stream. - total = ssrc; - retransmit = ssrc + 1; + uint32_t total = ssrc; + uint32_t retransmit = ssrc + 1; observer->Notify(total, retransmit, ssrc); expected_.substreams[ssrc].total_bitrate_bps = total; expected_.substreams[ssrc].retransmit_bitrate_bps = retransmit; @@ -2180,10 +2187,13 @@ TEST_F(SendStatisticsProxyTest, NoSubstreams) { std::max(*absl::c_max_element(config_.rtp.ssrcs), *absl::c_max_element(config_.rtp.rtx.ssrcs)) + 1; - // From RtcpStatisticsCallback. - RtcpStatistics rtcp_stats; - RtcpStatisticsCallback* rtcp_callback = statistics_proxy_.get(); - rtcp_callback->StatisticsUpdated(rtcp_stats, excluded_ssrc); + // From ReportBlockDataObserver. + ReportBlockDataObserver* rtcp_callback = statistics_proxy_.get(); + RTCPReportBlock report_block; + report_block.source_ssrc = excluded_ssrc; + ReportBlockData data; + data.SetReportBlock(report_block, 0); + rtcp_callback->OnReportBlockDataUpdated(data); // From BitrateStatisticsObserver. uint32_t total = 0; @@ -2230,9 +2240,12 @@ TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) { // Update the first SSRC with bogus RTCP stats to make sure that encoded // resolution still times out (no global timeout for all stats). - RtcpStatistics rtcp_statistics; - RtcpStatisticsCallback* rtcp_stats = statistics_proxy_.get(); - rtcp_stats->StatisticsUpdated(rtcp_statistics, config_.rtp.ssrcs[0]); + ReportBlockDataObserver* rtcp_callback = statistics_proxy_.get(); + RTCPReportBlock report_block; + report_block.source_ssrc = config_.rtp.ssrcs[0]; + ReportBlockData data; + data.SetReportBlock(report_block, 0); + rtcp_callback->OnReportBlockDataUpdated(data); // Report stats for second SSRC to make sure it's not outdated along with the // first SSRC. @@ -2721,7 +2734,7 @@ TEST_F(SendStatisticsProxyTest, Vp9SvcLowSpatialLayerDoesNotUpdateResolution) { codec_info.codecType = kVideoCodecVP9; // For first picture, it is expected that low layer updates resolution. - codec_info.codecSpecific.VP9.end_of_picture = false; + codec_info.end_of_picture = false; statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info); VideoSendStream::Stats stats = statistics_proxy_->GetStats(); EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width); @@ -2730,7 +2743,7 @@ TEST_F(SendStatisticsProxyTest, Vp9SvcLowSpatialLayerDoesNotUpdateResolution) { // Top layer updates resolution. encoded_image._encodedWidth = kEncodedWidth * 2; encoded_image._encodedHeight = kEncodedHeight * 2; - codec_info.codecSpecific.VP9.end_of_picture = true; + codec_info.end_of_picture = true; statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info); stats = statistics_proxy_->GetStats(); EXPECT_EQ(kEncodedWidth * 2, stats.substreams[config_.rtp.ssrcs[0]].width); @@ -2739,7 +2752,7 @@ TEST_F(SendStatisticsProxyTest, Vp9SvcLowSpatialLayerDoesNotUpdateResolution) { // Low layer of next frame doesn't update resolution. encoded_image._encodedWidth = kEncodedWidth; encoded_image._encodedHeight = kEncodedHeight; - codec_info.codecSpecific.VP9.end_of_picture = false; + codec_info.end_of_picture = false; statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info); stats = statistics_proxy_->GetStats(); EXPECT_EQ(kEncodedWidth * 2, stats.substreams[config_.rtp.ssrcs[0]].width); diff --git a/video/stream_synchronization.cc b/video/stream_synchronization.cc index 8c808f13c6..d5c77c1eca 100644 --- a/video/stream_synchronization.cc +++ b/video/stream_synchronization.cc @@ -184,4 +184,12 @@ void StreamSynchronization::SetTargetBufferingDelay(int target_delay_ms) { base_target_delay_ms_ = target_delay_ms; } +void StreamSynchronization::ReduceAudioDelay() { + audio_delay_.extra_ms *= 0.9f; +} + +void StreamSynchronization::ReduceVideoDelay() { + video_delay_.extra_ms *= 0.9f; +} + } // namespace webrtc diff --git a/video/stream_synchronization.h b/video/stream_synchronization.h index 1aba62d1e7..2da6a49a14 100644 --- a/video/stream_synchronization.h +++ b/video/stream_synchronization.h @@ -44,6 +44,12 @@ class StreamSynchronization { // |target_delay_ms|. void SetTargetBufferingDelay(int target_delay_ms); + // Lowers the audio delay by 10%. Can be used to recover from errors. + void ReduceAudioDelay(); + + // Lowers the video delay by 10%. Can be used to recover from errors. + void ReduceVideoDelay(); + uint32_t audio_stream_id() const { return audio_stream_id_; } uint32_t video_stream_id() const { return video_stream_id_; } diff --git a/video/stream_synchronization_unittest.cc b/video/stream_synchronization_unittest.cc index 04a43c21f9..3d6fdd82a7 100644 --- a/video/stream_synchronization_unittest.cc +++ b/video/stream_synchronization_unittest.cc @@ -383,6 +383,63 @@ TEST_F(StreamSynchronizationTest, AudioDelayed) { total_audio_delay_ms); } +TEST_F(StreamSynchronizationTest, NoAudioIncomingUnboundedIncrease) { + // Test how audio delay can grow unbounded when audio stops coming in. + // This is handled in caller of RtpStreamsSynchronizer, for example in + // RtpStreamsSynchronizer by not updating delays when audio samples stop + // coming in. + const int kVideoDelayMs = 300; + const int kAudioDelayMs = 100; + int current_audio_delay_ms = kAudioDelayMs; + int total_audio_delay_ms = 0; + int total_video_delay_ms = 0; + + EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs, + current_audio_delay_ms, &total_audio_delay_ms, + &total_video_delay_ms)); + EXPECT_EQ(0, total_video_delay_ms); + // The delay is not allowed to change more than this. + EXPECT_EQ((kVideoDelayMs - kAudioDelayMs) / kSmoothingFilter, + total_audio_delay_ms); + int last_total_audio_delay_ms = total_audio_delay_ms; + + // Set new current audio delay: simulate audio samples are flowing in. + current_audio_delay_ms = total_audio_delay_ms; + + clock_sender_.AdvanceTimeMilliseconds(1000); + clock_receiver_.AdvanceTimeMilliseconds(1000); + EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs, + current_audio_delay_ms, &total_audio_delay_ms, + &total_video_delay_ms)); + EXPECT_EQ(0, total_video_delay_ms); + EXPECT_EQ(last_total_audio_delay_ms + + MaxAudioDelayChangeMs(current_audio_delay_ms, kVideoDelayMs), + total_audio_delay_ms); + last_total_audio_delay_ms = total_audio_delay_ms; + + // Simulate no incoming audio by not update audio delay. + const int kSimulationSecs = 300; // 5min + const int kMaxDeltaDelayMs = 10000; // max delay for audio in webrtc + for (auto time_secs = 0; time_secs < kSimulationSecs; time_secs++) { + clock_sender_.AdvanceTimeMilliseconds(1000); + clock_receiver_.AdvanceTimeMilliseconds(1000); + EXPECT_TRUE(DelayedStreams(/*audio_delay_ms=*/0, kVideoDelayMs, + current_audio_delay_ms, &total_audio_delay_ms, + &total_video_delay_ms)); + EXPECT_EQ(0, total_video_delay_ms); + + // Audio delay does not go above kMaxDeltaDelayMs. + EXPECT_EQ(std::min(kMaxDeltaDelayMs, + last_total_audio_delay_ms + + MaxAudioDelayChangeMs(current_audio_delay_ms, + kVideoDelayMs)), + total_audio_delay_ms); + last_total_audio_delay_ms = total_audio_delay_ms; + } + // By now the audio delay has grown unbounded to kMaxDeltaDelayMs. + EXPECT_EQ(kMaxDeltaDelayMs, last_total_audio_delay_ms); +} + TEST_F(StreamSynchronizationTest, BothDelayedVideoLater) { BothDelayedVideoLaterTest(0); } diff --git a/video/sv_loopback.cc b/video/sv_loopback.cc index ec236d6e62..af475ae4eb 100644 --- a/video/sv_loopback.cc +++ b/video/sv_loopback.cc @@ -605,56 +605,62 @@ void Loopback() { call_bitrate_config.max_bitrate_bps = (ScreenshareMaxBitrateKbps() + VideoMaxBitrateKbps()) * 1000; - VideoQualityTest::Params params, camera_params, screenshare_params; - params.call = {absl::GetFlag(FLAGS_send_side_bwe), - absl::GetFlag(FLAGS_generic_descriptor), call_bitrate_config, - 0}; + VideoQualityTest::Params params; + params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe); + params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor); + params.call.call_bitrate_config = call_bitrate_config; params.call.dual_video = true; - params.video[screenshare_idx] = {true, - ScreenshareWidth(), - ScreenshareHeight(), - ScreenshareFps(), - ScreenshareMinBitrateKbps() * 1000, - ScreenshareTargetBitrateKbps() * 1000, - ScreenshareMaxBitrateKbps() * 1000, - false, - Codec(), - ScreenshareNumTemporalLayers(), - ScreenshareSelectedTL(), - ScreenshareMinTransmitBitrateKbps() * 1000, - false, // ULPFEC disabled. - false, // FlexFEC disabled. - false, // Automatic scaling disabled - ""}; - params.video[camera_idx] = {absl::GetFlag(FLAGS_video), - VideoWidth(), - VideoHeight(), - VideoFps(), - VideoMinBitrateKbps() * 1000, - VideoTargetBitrateKbps() * 1000, - VideoMaxBitrateKbps() * 1000, - absl::GetFlag(FLAGS_suspend_below_min_bitrate), - Codec(), - VideoNumTemporalLayers(), - VideoSelectedTL(), - 0, // No min transmit bitrate. - absl::GetFlag(FLAGS_use_ulpfec), - absl::GetFlag(FLAGS_use_flexfec), - false, - VideoClip(), - GetCaptureDevice()}; - params.audio = {absl::GetFlag(FLAGS_audio), - absl::GetFlag(FLAGS_audio_video_sync), - absl::GetFlag(FLAGS_audio_dtx)}; - params.logging = {RtcEventLogName(), RtpDumpName(), EncodedFramePath()}; - params.analyzer = {"dual_streams", 0.0, 0.0, DurationSecs(), - OutputFilename(), GraphTitle()}; + params.video[screenshare_idx].enabled = true; + params.video[screenshare_idx].width = ScreenshareWidth(); + params.video[screenshare_idx].height = ScreenshareHeight(); + params.video[screenshare_idx].fps = ScreenshareFps(); + params.video[screenshare_idx].min_bitrate_bps = + ScreenshareMinBitrateKbps() * 1000; + params.video[screenshare_idx].target_bitrate_bps = + ScreenshareTargetBitrateKbps() * 1000; + params.video[screenshare_idx].max_bitrate_bps = + ScreenshareMaxBitrateKbps() * 1000; + params.video[screenshare_idx].codec = Codec(); + params.video[screenshare_idx].num_temporal_layers = + ScreenshareNumTemporalLayers(); + params.video[screenshare_idx].selected_tl = ScreenshareSelectedTL(); + params.video[screenshare_idx].min_transmit_bps = + ScreenshareMinTransmitBitrateKbps() * 1000; + params.video[camera_idx].enabled = absl::GetFlag(FLAGS_video); + params.video[camera_idx].width = VideoWidth(); + params.video[camera_idx].height = VideoHeight(); + params.video[camera_idx].fps = VideoFps(); + params.video[camera_idx].min_bitrate_bps = VideoMinBitrateKbps() * 1000; + params.video[camera_idx].target_bitrate_bps = VideoTargetBitrateKbps() * 1000; + params.video[camera_idx].max_bitrate_bps = VideoMaxBitrateKbps() * 1000; + params.video[camera_idx].suspend_below_min_bitrate = + absl::GetFlag(FLAGS_suspend_below_min_bitrate); + params.video[camera_idx].codec = Codec(); + params.video[camera_idx].num_temporal_layers = VideoNumTemporalLayers(); + params.video[camera_idx].selected_tl = VideoSelectedTL(); + params.video[camera_idx].ulpfec = absl::GetFlag(FLAGS_use_ulpfec); + params.video[camera_idx].flexfec = absl::GetFlag(FLAGS_use_flexfec); + params.video[camera_idx].clip_path = VideoClip(); + params.video[camera_idx].capture_device_index = GetCaptureDevice(); + params.audio.enabled = absl::GetFlag(FLAGS_audio); + params.audio.sync_video = absl::GetFlag(FLAGS_audio_video_sync); + params.audio.dtx = absl::GetFlag(FLAGS_audio_dtx); + params.logging.rtc_event_log_name = RtcEventLogName(); + params.logging.rtp_dump_name = RtpDumpName(); + params.logging.encoded_frame_base_path = EncodedFramePath(); + params.analyzer.test_label = "dual_streams"; + params.analyzer.test_durations_secs = DurationSecs(); + params.analyzer.graph_data_output_filename = OutputFilename(); + params.analyzer.graph_title = GraphTitle(); params.config = pipe_config; params.screenshare[camera_idx].enabled = false; - params.screenshare[screenshare_idx] = {true, GenerateSlides(), - SlideChangeInterval(), - ScrollDuration(), Slides()}; + params.screenshare[screenshare_idx].enabled = true; + params.screenshare[screenshare_idx].generate_slides = GenerateSlides(); + params.screenshare[screenshare_idx].slide_change_interval = + SlideChangeInterval(); + params.screenshare[screenshare_idx].scroll_duration = ScrollDuration(); + params.screenshare[screenshare_idx].slides = Slides(); if (VideoNumStreams() > 1 && VideoStream0().empty() && VideoStream1().empty()) { diff --git a/video/test/mock_video_stream_encoder.h b/video/test/mock_video_stream_encoder.h index 4c076eac89..2af613e3ad 100644 --- a/video/test/mock_video_stream_encoder.h +++ b/video/test/mock_video_stream_encoder.h @@ -10,6 +10,8 @@ #ifndef VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_ #define VIDEO_TEST_MOCK_VIDEO_STREAM_ENCODER_H_ +#include + #include "api/video/video_stream_encoder_interface.h" #include "test/gmock.h" @@ -17,6 +19,14 @@ namespace webrtc { class MockVideoStreamEncoder : public VideoStreamEncoderInterface { public: + MOCK_METHOD(void, + AddAdaptationResource, + (rtc::scoped_refptr), + (override)); + MOCK_METHOD(std::vector>, + GetAdaptationResources, + (), + (override)); MOCK_METHOD(void, SetSource, (rtc::VideoSourceInterface*, @@ -34,10 +44,6 @@ class MockVideoStreamEncoder : public VideoStreamEncoderInterface { (DataRate, DataRate, DataRate, uint8_t, int64_t, double), (override)); MOCK_METHOD(void, OnFrame, (const VideoFrame&), (override)); - MOCK_METHOD(void, - SetBitrateAllocationObserver, - (VideoBitrateAllocationObserver*), - (override)); MOCK_METHOD(void, SetFecControllerOverride, (FecControllerOverride*), diff --git a/video/video_analyzer.cc b/video/video_analyzer.cc index f4a1c96d74..b90ba2973a 100644 --- a/video/video_analyzer.cc +++ b/video/video_analyzer.cc @@ -18,11 +18,13 @@ #include "common_video/libyuv/include/webrtc_libyuv.h" #include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "rtc_base/cpu_time.h" #include "rtc_base/format_macros.h" #include "rtc_base/memory_usage.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_utils/repeating_task.h" +#include "rtc_base/time_utils.h" #include "system_wrappers/include/cpu_info.h" #include "test/call_test.h" #include "test/testsupport/file_utils.h" @@ -136,10 +138,12 @@ VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport, } for (uint32_t i = 0; i < num_cores; ++i) { - rtc::PlatformThread* thread = - new rtc::PlatformThread(&FrameComparisonThread, this, "Analyzer"); - thread->Start(); - comparison_thread_pool_.push_back(thread); + comparison_thread_pool_.push_back(rtc::PlatformThread::SpawnJoinable( + [this] { + while (CompareFrames()) { + } + }, + "Analyzer")); } if (!rtp_dump_name.empty()) { @@ -151,13 +155,11 @@ VideoAnalyzer::VideoAnalyzer(test::LayerFilteringTransport* transport, VideoAnalyzer::~VideoAnalyzer() { { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); quit_ = true; } - for (rtc::PlatformThread* thread : comparison_thread_pool_) { - thread->Stop(); - delete thread; - } + // Joins all threads. + comparison_thread_pool_.clear(); } void VideoAnalyzer::SetReceiver(PacketReceiver* receiver) { @@ -174,25 +176,25 @@ void VideoAnalyzer::SetSource( } void VideoAnalyzer::SetCall(Call* call) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); RTC_DCHECK(!call_); call_ = call; } void VideoAnalyzer::SetSendStream(VideoSendStream* stream) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); RTC_DCHECK(!send_stream_); send_stream_ = stream; } void VideoAnalyzer::SetReceiveStream(VideoReceiveStream* stream) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); RTC_DCHECK(!receive_stream_); receive_stream_ = stream; } void VideoAnalyzer::SetAudioReceiveStream(AudioReceiveStream* recv_stream) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); RTC_CHECK(!audio_receive_stream_); audio_receive_stream_ = recv_stream; } @@ -211,7 +213,7 @@ PacketReceiver::DeliveryStatus VideoAnalyzer::DeliverPacket( int64_t packet_time_us) { // Ignore timestamps of RTCP packets. They're not synchronized with // RTP packet timestamps and so they would confuse wrap_handler_. - if (RtpHeaderParser::IsRtcp(packet.cdata(), packet.size())) { + if (IsRtcpPacket(packet)) { return receiver_->DeliverPacket(media_type, std::move(packet), packet_time_us); } @@ -234,7 +236,7 @@ PacketReceiver::DeliveryStatus VideoAnalyzer::DeliverPacket( // (FlexFEC and media are sent on different SSRCs, which have different // timestamps spaces.) // Also ignore packets from wrong SSRC, but include retransmits. - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); int64_t timestamp = wrap_handler_.Unwrap(rtp_packet.Timestamp() - rtp_timestamp_delta_); recv_times_[timestamp] = clock_->CurrentNtpInMilliseconds(); @@ -245,7 +247,7 @@ PacketReceiver::DeliveryStatus VideoAnalyzer::DeliverPacket( } void VideoAnalyzer::PreEncodeOnFrame(const VideoFrame& video_frame) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); if (!first_encoded_timestamp_) { while (frames_.front().timestamp() != video_frame.timestamp()) { ++dropped_frames_before_first_encode_; @@ -257,7 +259,7 @@ void VideoAnalyzer::PreEncodeOnFrame(const VideoFrame& video_frame) { } void VideoAnalyzer::PostEncodeOnFrame(size_t stream_id, uint32_t timestamp) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); if (!first_sent_timestamp_ && stream_id == selected_stream_) { first_sent_timestamp_ = timestamp; } @@ -273,7 +275,7 @@ bool VideoAnalyzer::SendRtp(const uint8_t* packet, bool result = transport_->SendRtp(packet, length, options); { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); if (rtp_timestamp_delta_ == 0 && rtp_packet.Ssrc() == ssrc_to_analyze_) { RTC_CHECK(static_cast(first_sent_timestamp_)); rtp_timestamp_delta_ = rtp_packet.Timestamp() - *first_sent_timestamp_; @@ -304,7 +306,7 @@ bool VideoAnalyzer::SendRtcp(const uint8_t* packet, size_t length) { void VideoAnalyzer::OnFrame(const VideoFrame& video_frame) { int64_t render_time_ms = clock_->CurrentNtpInMilliseconds(); - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); StartExcludingCpuThreadTime(); @@ -361,7 +363,7 @@ void VideoAnalyzer::Wait() { int frames_processed; int frames_captured; { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); frames_processed = frames_processed_; frames_captured = captured_frames_; } @@ -401,29 +403,29 @@ void VideoAnalyzer::Wait() { } void VideoAnalyzer::StartMeasuringCpuProcessTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ -= rtc::GetProcessCpuTimeNanos(); wallclock_time_ -= rtc::SystemTimeNanos(); } void VideoAnalyzer::StopMeasuringCpuProcessTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ += rtc::GetProcessCpuTimeNanos(); wallclock_time_ += rtc::SystemTimeNanos(); } void VideoAnalyzer::StartExcludingCpuThreadTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ += rtc::GetThreadCpuTimeNanos(); } void VideoAnalyzer::StopExcludingCpuThreadTime() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); cpu_time_ -= rtc::GetThreadCpuTimeNanos(); } double VideoAnalyzer::GetCpuUsagePercent() { - rtc::CritScope lock(&cpu_measurement_lock_); + MutexLock lock(&cpu_measurement_lock_); return static_cast(cpu_time_) / wallclock_time_ * 100.0; } @@ -456,9 +458,15 @@ bool VideoAnalyzer::IsInSelectedSpatialAndTemporalLayer( } void VideoAnalyzer::PollStats() { - rtc::CritScope crit(&comparison_lock_); - + // Do not grab |comparison_lock_|, before |GetStats()| completes. + // Otherwise a deadlock may occur: + // 1) |comparison_lock_| is acquired after |lock_| + // 2) |lock_| is acquired after internal pacer lock in SendRtp() + // 3) internal pacer lock is acquired by GetStats(). Call::Stats call_stats = call_->GetStats(); + + MutexLock lock(&comparison_lock_); + send_bandwidth_bps_.AddSample(call_stats.send_bandwidth_bps); VideoSendStream::Stats send_stats = send_stream_->GetStats(); @@ -516,7 +524,8 @@ void VideoAnalyzer::PollStats() { } if (audio_receive_stream_ != nullptr) { - AudioReceiveStream::Stats receive_stats = audio_receive_stream_->GetStats(); + AudioReceiveStream::Stats receive_stats = + audio_receive_stream_->GetStats(/*get_and_clear_legacy_stats=*/true); audio_expand_rate_.AddSample(receive_stats.expand_rate); audio_accelerate_rate_.AddSample(receive_stats.accelerate_rate); audio_jitter_buffer_ms_.AddSample(receive_stats.jitter_buffer_ms); @@ -525,12 +534,6 @@ void VideoAnalyzer::PollStats() { memory_usage_.AddSample(rtc::GetProcessResidentSizeBytes()); } -void VideoAnalyzer::FrameComparisonThread(void* obj) { - VideoAnalyzer* analyzer = static_cast(obj); - while (analyzer->CompareFrames()) { - } -} - bool VideoAnalyzer::CompareFrames() { if (AllFramesRecorded()) return false; @@ -564,13 +567,13 @@ bool VideoAnalyzer::CompareFrames() { } bool VideoAnalyzer::PopComparison(VideoAnalyzer::FrameComparison* comparison) { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); // If AllFramesRecorded() is true, it means we have already popped // frames_to_process_ frames from comparisons_, so there is no more work // for this thread to be done. frames_processed_ might still be lower if // all comparisons are not done, but those frames are currently being // worked on by other threads. - if (comparisons_.empty() || AllFramesRecorded()) + if (comparisons_.empty() || AllFramesRecordedLocked()) return false; *comparison = comparisons_.front(); @@ -581,21 +584,24 @@ bool VideoAnalyzer::PopComparison(VideoAnalyzer::FrameComparison* comparison) { } void VideoAnalyzer::FrameRecorded() { - rtc::CritScope crit(&comparison_lock_); ++frames_recorded_; } bool VideoAnalyzer::AllFramesRecorded() { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); + return AllFramesRecordedLocked(); +} + +bool VideoAnalyzer::AllFramesRecordedLocked() { RTC_DCHECK(frames_recorded_ <= frames_to_process_); return frames_recorded_ == frames_to_process_ || (clock_->CurrentTime() > test_end_ && comparisons_.empty()) || quit_; } bool VideoAnalyzer::FrameProcessed() { - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); ++frames_processed_; - assert(frames_processed_ <= frames_to_process_); + RTC_DCHECK_LE(frames_processed_, frames_to_process_); return frames_processed_ == frames_to_process_ || (clock_->CurrentTime() > test_end_ && comparisons_.empty()); } @@ -606,11 +612,11 @@ void VideoAnalyzer::PrintResults() { StopMeasuringCpuProcessTime(); int dropped_frames_diff; { - rtc::CritScope crit(&crit_); + MutexLock lock(&lock_); dropped_frames_diff = dropped_frames_before_first_encode_ + dropped_frames_before_rendering_ + frames_.size(); } - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); PrintResult("psnr", psnr_, "dB", ImproveDirection::kBiggerIsBetter); PrintResult("ssim", ssim_, "unitless", ImproveDirection::kBiggerIsBetter); PrintResult("sender_time", sender_time_, "ms", @@ -753,7 +759,7 @@ void VideoAnalyzer::PerformFrameComparison( ssim = I420SSIM(&*comparison.reference, &*comparison.render); } - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); if (psnr >= 0.0 && (!worst_frame_ || worst_frame_->psnr > psnr)) { worst_frame_.emplace(FrameWithPsnr{psnr, *comparison.render}); @@ -842,7 +848,7 @@ void VideoAnalyzer::PrintResultWithExternalMean( void VideoAnalyzer::PrintSamplesToFile() { FILE* out = graph_data_output_file_; - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); absl::c_sort(samples_, [](const Sample& A, const Sample& B) -> bool { return A.input_time_ms < B.input_time_ms; }); @@ -873,14 +879,14 @@ void VideoAnalyzer::AddCapturedFrameForComparison( const VideoFrame& video_frame) { bool must_capture = false; { - rtc::CritScope lock(&comparison_lock_); + MutexLock lock(&comparison_lock_); must_capture = captured_frames_ < frames_to_process_; if (must_capture) { ++captured_frames_; } } if (must_capture) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); frames_.push_back(video_frame); } } @@ -903,7 +909,7 @@ void VideoAnalyzer::AddFrameComparison(const VideoFrame& reference, if (it != encoded_frame_sizes_.end()) encoded_frame_sizes_.erase(it); - rtc::CritScope crit(&comparison_lock_); + MutexLock lock(&comparison_lock_); if (comparisons_.size() < kMaxComparisons) { comparisons_.push_back(FrameComparison( reference, render, dropped, reference.ntp_time_ms(), send_time_ms, @@ -999,7 +1005,7 @@ void VideoAnalyzer::CapturedFrameForwarder::OnFrame( copy.set_ntp_time_ms(clock_->CurrentNtpInMilliseconds()); copy.set_timestamp(copy.ntp_time_ms() * 90); analyzer_->AddCapturedFrameForComparison(copy); - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); ++captured_frames_; if (send_stream_input_ && clock_->CurrentTime() <= test_end_ && captured_frames_ <= frames_to_capture_) { @@ -1011,7 +1017,7 @@ void VideoAnalyzer::CapturedFrameForwarder::AddOrUpdateSink( rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) { { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); RTC_DCHECK(!send_stream_input_ || send_stream_input_ == sink); send_stream_input_ = sink; } @@ -1022,7 +1028,7 @@ void VideoAnalyzer::CapturedFrameForwarder::AddOrUpdateSink( void VideoAnalyzer::CapturedFrameForwarder::RemoveSink( rtc::VideoSinkInterface* sink) { - rtc::CritScope lock(&crit_); + MutexLock lock(&lock_); RTC_DCHECK(sink == send_stream_input_); send_stream_input_ = nullptr; } diff --git a/video/video_analyzer.h b/video/video_analyzer.h index 14f77ac53c..68861d1b5f 100644 --- a/video/video_analyzer.h +++ b/video/video_analyzer.h @@ -23,6 +23,7 @@ #include "rtc_base/event.h" #include "rtc_base/numerics/running_statistics.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/time_utils.h" #include "test/layer_filtering_transport.h" #include "test/rtp_file_writer.h" @@ -34,7 +35,7 @@ class VideoAnalyzer : public PacketReceiver, public Transport, public rtc::VideoSinkInterface { public: - using Statistics = RunningStatistics; + using Statistics = webrtc_impl::RunningStatistics; VideoAnalyzer(test::LayerFilteringTransport* transport, const std::string& test_label, @@ -83,9 +84,9 @@ class VideoAnalyzer : public PacketReceiver, void StartMeasuringCpuProcessTime(); void StopMeasuringCpuProcessTime(); - void StartExcludingCpuThreadTime(); - void StopExcludingCpuThreadTime(); - double GetCpuUsagePercent(); + void StartExcludingCpuThreadTime() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_); + void StopExcludingCpuThreadTime() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_); + double GetCpuUsagePercent() RTC_LOCKS_EXCLUDED(cpu_measurement_lock_); test::LayerFilteringTransport* const transport_; PacketReceiver* receiver_; @@ -153,22 +154,25 @@ class VideoAnalyzer : public PacketReceiver, void SetSource(rtc::VideoSourceInterface* video_source); private: - void OnFrame(const VideoFrame& video_frame) override; + void OnFrame(const VideoFrame& video_frame) + RTC_LOCKS_EXCLUDED(lock_) override; // Called when |send_stream_.SetSource()| is called. void AddOrUpdateSink(rtc::VideoSinkInterface* sink, - const rtc::VideoSinkWants& wants) override; + const rtc::VideoSinkWants& wants) + RTC_LOCKS_EXCLUDED(lock_) override; // Called by |send_stream_| when |send_stream_.SetSource()| is called. - void RemoveSink(rtc::VideoSinkInterface* sink) override; + void RemoveSink(rtc::VideoSinkInterface* sink) + RTC_LOCKS_EXCLUDED(lock_) override; VideoAnalyzer* const analyzer_; - rtc::CriticalSection crit_; + Mutex lock_; rtc::VideoSinkInterface* send_stream_input_ - RTC_GUARDED_BY(crit_); + RTC_GUARDED_BY(lock_); VideoSourceInterface* video_source_; Clock* clock_; - int captured_frames_ RTC_GUARDED_BY(crit_); + int captured_frames_ RTC_GUARDED_BY(lock_); const int frames_to_capture_; const Timestamp test_end_; }; @@ -184,21 +188,23 @@ class VideoAnalyzer : public PacketReceiver, const VideoFrame& render, bool dropped, int64_t render_time_ms) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_); - void PollStats(); + void PollStats() RTC_LOCKS_EXCLUDED(comparison_lock_); static void FrameComparisonThread(void* obj); bool CompareFrames(); bool PopComparison(FrameComparison* comparison); // Increment counter for number of frames received for comparison. - void FrameRecorded(); + void FrameRecorded() RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_); // Returns true if all frames to be compared have been taken from the queue. - bool AllFramesRecorded(); + bool AllFramesRecorded() RTC_LOCKS_EXCLUDED(comparison_lock_); + bool AllFramesRecordedLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(comparison_lock_); // Increase count of number of frames processed. Returns true if this was the // last frame to be processed. - bool FrameProcessed(); - void PrintResults(); - void PerformFrameComparison(const FrameComparison& comparison); + bool FrameProcessed() RTC_LOCKS_EXCLUDED(comparison_lock_); + void PrintResults() RTC_LOCKS_EXCLUDED(lock_, comparison_lock_); + void PerformFrameComparison(const FrameComparison& comparison) + RTC_LOCKS_EXCLUDED(comparison_lock_); void PrintResult(const char* result_type, Statistics stats, const char* unit, @@ -209,8 +215,9 @@ class VideoAnalyzer : public PacketReceiver, Statistics stats, const char* unit, webrtc::test::ImproveDirection improve_direction); - void PrintSamplesToFile(void); - void AddCapturedFrameForComparison(const VideoFrame& video_frame); + void PrintSamplesToFile(void) RTC_LOCKS_EXCLUDED(comparison_lock_); + void AddCapturedFrameForComparison(const VideoFrame& video_frame) + RTC_LOCKS_EXCLUDED(lock_, comparison_lock_); Call* call_; VideoSendStream* send_stream_; @@ -226,7 +233,7 @@ class VideoAnalyzer : public PacketReceiver, const int selected_sl_; const int selected_tl_; - rtc::CriticalSection comparison_lock_; + Mutex comparison_lock_; std::vector samples_ RTC_GUARDED_BY(comparison_lock_); Statistics sender_time_ RTC_GUARDED_BY(comparison_lock_); Statistics receiver_time_ RTC_GUARDED_BY(comparison_lock_); @@ -264,37 +271,38 @@ class VideoAnalyzer : public PacketReceiver, size_t last_fec_bytes_; - rtc::CriticalSection crit_; + Mutex lock_ RTC_ACQUIRED_BEFORE(comparison_lock_) + RTC_ACQUIRED_BEFORE(cpu_measurement_lock_); const int frames_to_process_; const Timestamp test_end_; int frames_recorded_ RTC_GUARDED_BY(comparison_lock_); int frames_processed_ RTC_GUARDED_BY(comparison_lock_); int captured_frames_ RTC_GUARDED_BY(comparison_lock_); int dropped_frames_ RTC_GUARDED_BY(comparison_lock_); - int dropped_frames_before_first_encode_ RTC_GUARDED_BY(crit_); - int dropped_frames_before_rendering_ RTC_GUARDED_BY(crit_); + int dropped_frames_before_first_encode_ RTC_GUARDED_BY(lock_); + int dropped_frames_before_rendering_ RTC_GUARDED_BY(lock_); int64_t last_render_time_ RTC_GUARDED_BY(comparison_lock_); int64_t last_render_delta_ms_ RTC_GUARDED_BY(comparison_lock_); int64_t last_unfreeze_time_ms_ RTC_GUARDED_BY(comparison_lock_); - uint32_t rtp_timestamp_delta_ RTC_GUARDED_BY(crit_); + uint32_t rtp_timestamp_delta_ RTC_GUARDED_BY(lock_); - rtc::CriticalSection cpu_measurement_lock_; + Mutex cpu_measurement_lock_; int64_t cpu_time_ RTC_GUARDED_BY(cpu_measurement_lock_); int64_t wallclock_time_ RTC_GUARDED_BY(cpu_measurement_lock_); - std::deque frames_ RTC_GUARDED_BY(crit_); - absl::optional last_rendered_frame_ RTC_GUARDED_BY(crit_); - rtc::TimestampWrapAroundHandler wrap_handler_ RTC_GUARDED_BY(crit_); - std::map send_times_ RTC_GUARDED_BY(crit_); - std::map recv_times_ RTC_GUARDED_BY(crit_); - std::map encoded_frame_sizes_ RTC_GUARDED_BY(crit_); - absl::optional first_encoded_timestamp_ RTC_GUARDED_BY(crit_); - absl::optional first_sent_timestamp_ RTC_GUARDED_BY(crit_); + std::deque frames_ RTC_GUARDED_BY(lock_); + absl::optional last_rendered_frame_ RTC_GUARDED_BY(lock_); + rtc::TimestampWrapAroundHandler wrap_handler_ RTC_GUARDED_BY(lock_); + std::map send_times_ RTC_GUARDED_BY(lock_); + std::map recv_times_ RTC_GUARDED_BY(lock_); + std::map encoded_frame_sizes_ RTC_GUARDED_BY(lock_); + absl::optional first_encoded_timestamp_ RTC_GUARDED_BY(lock_); + absl::optional first_sent_timestamp_ RTC_GUARDED_BY(lock_); const double avg_psnr_threshold_; const double avg_ssim_threshold_; bool is_quick_test_enabled_; - std::vector comparison_thread_pool_; + std::vector comparison_thread_pool_; rtc::Event comparison_available_event_; std::deque comparisons_ RTC_GUARDED_BY(comparison_lock_); bool quit_ RTC_GUARDED_BY(comparison_lock_); diff --git a/video/video_loopback.cc b/video/video_loopback.cc index 99ff4493bd..7762d9653d 100644 --- a/video/video_loopback.cc +++ b/video/video_loopback.cc @@ -376,33 +376,40 @@ void Loopback() { call_bitrate_config.max_bitrate_bps = -1; // Don't cap bandwidth estimate. VideoQualityTest::Params params; - params.call = {absl::GetFlag(FLAGS_send_side_bwe), - absl::GetFlag(FLAGS_generic_descriptor), call_bitrate_config, - 0}; - params.video[0] = {absl::GetFlag(FLAGS_video), - Width(), - Height(), - Fps(), - MinBitrateKbps() * 1000, - TargetBitrateKbps() * 1000, - MaxBitrateKbps() * 1000, - absl::GetFlag(FLAGS_suspend_below_min_bitrate), - Codec(), - NumTemporalLayers(), - SelectedTL(), - 0, // No min transmit bitrate. - absl::GetFlag(FLAGS_use_ulpfec), - absl::GetFlag(FLAGS_use_flexfec), - NumStreams() < 2, // Automatic quality scaling. - Clip(), - GetCaptureDevice()}; - params.audio = { - absl::GetFlag(FLAGS_audio), absl::GetFlag(FLAGS_audio_video_sync), - absl::GetFlag(FLAGS_audio_dtx), absl::GetFlag(FLAGS_use_real_adm)}; - params.logging = {RtcEventLogName(), RtpDumpName(), EncodedFramePath()}; + params.call.send_side_bwe = absl::GetFlag(FLAGS_send_side_bwe); + params.call.generic_descriptor = absl::GetFlag(FLAGS_generic_descriptor); + params.call.call_bitrate_config = call_bitrate_config; + + params.video[0].enabled = absl::GetFlag(FLAGS_video); + params.video[0].width = Width(); + params.video[0].height = Height(); + params.video[0].fps = Fps(); + params.video[0].min_bitrate_bps = MinBitrateKbps() * 1000; + params.video[0].target_bitrate_bps = TargetBitrateKbps() * 1000; + params.video[0].max_bitrate_bps = MaxBitrateKbps() * 1000; + params.video[0].suspend_below_min_bitrate = + absl::GetFlag(FLAGS_suspend_below_min_bitrate); + params.video[0].codec = Codec(); + params.video[0].num_temporal_layers = NumTemporalLayers(); + params.video[0].selected_tl = SelectedTL(); + params.video[0].min_transmit_bps = 0; + params.video[0].ulpfec = absl::GetFlag(FLAGS_use_ulpfec); + params.video[0].flexfec = absl::GetFlag(FLAGS_use_flexfec); + params.video[0].automatic_scaling = NumStreams() < 2; + params.video[0].clip_path = Clip(); + params.video[0].capture_device_index = GetCaptureDevice(); + params.audio.enabled = absl::GetFlag(FLAGS_audio); + params.audio.sync_video = absl::GetFlag(FLAGS_audio_video_sync); + params.audio.dtx = absl::GetFlag(FLAGS_audio_dtx); + params.audio.use_real_adm = absl::GetFlag(FLAGS_use_real_adm); + params.logging.rtc_event_log_name = RtcEventLogName(); + params.logging.rtp_dump_name = RtpDumpName(); + params.logging.encoded_frame_base_path = EncodedFramePath(); params.screenshare[0].enabled = false; - params.analyzer = {"video", 0.0, 0.0, DurationSecs(), - OutputFilename(), GraphTitle()}; + params.analyzer.test_label = "video"; + params.analyzer.test_durations_secs = DurationSecs(); + params.analyzer.graph_data_output_filename = OutputFilename(); + params.analyzer.graph_title = GraphTitle(); params.config = pipe_config; if (NumStreams() > 1 && Stream0().empty() && Stream1().empty()) { diff --git a/video/video_quality_test.cc b/video/video_quality_test.cc index 94ce268fa9..b77a4759a2 100644 --- a/video/video_quality_test.cc +++ b/video/video_quality_test.cc @@ -238,8 +238,7 @@ class QualityTestVideoEncoder : public VideoEncoder, private: // Implement EncodedImageCallback Result OnEncodedImage(const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { + const CodecSpecificInfo* codec_specific_info) override { if (codec_specific_info) { int simulcast_index; if (codec_specific_info->codecType == kVideoCodecVP9) { @@ -258,8 +257,7 @@ class QualityTestVideoEncoder : public VideoEncoder, } } - return callback_->OnEncodedImage(encoded_image, codec_specific_info, - fragmentation); + return callback_->OnEncodedImage(encoded_image, codec_specific_info); } void OnDroppedFrame(DropReason reason) override { @@ -434,58 +432,6 @@ VideoQualityTest::VideoQualityTest( std::move(injection_components_->network_controller_factory); } -VideoQualityTest::Params::Params() - : call({false, false, BitrateConstraints(), 0}), - video{{false, - 640, - 480, - 30, - 50, - 800, - 800, - false, - "VP8", - 1, - -1, - 0, - false, - false, - false, - "", - 0, - {}, - 0.0}, - {false, - 640, - 480, - 30, - 50, - 800, - 800, - false, - "VP8", - 1, - -1, - 0, - false, - false, - false, - "", - 0, - {}, - 0.0}}, - audio({false, false, false, false}), - screenshare{{false, false, 10, 0}, {false, false, 10, 0}}, - analyzer({"", 0.0, 0.0, 0, "", ""}), - config(absl::nullopt), - ss{{std::vector(), 0, 0, -1, InterLayerPredMode::kOn, - std::vector()}, - {std::vector(), 0, 0, -1, InterLayerPredMode::kOn, - std::vector()}}, - logging({"", "", ""}) {} - -VideoQualityTest::Params::~Params() = default; - VideoQualityTest::InjectionComponents::InjectionComponents() = default; VideoQualityTest::InjectionComponents::~InjectionComponents() = default; @@ -680,7 +626,7 @@ void VideoQualityTest::FillScalabilitySettings( encoder_config.spatial_layers = params->ss[video_idx].spatial_layers; encoder_config.simulcast_layers = std::vector(num_streams); encoder_config.video_stream_factory = - new rtc::RefCountedObject( + rtc::make_ref_counted( params->video[video_idx].codec, kDefaultMaxQp, params->screenshare[video_idx].enabled, true); params->ss[video_idx].streams = @@ -815,11 +761,6 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, } if (params_.call.generic_descriptor) { - // The generic descriptor is currently behind a field trial, so it needs - // to be set for this flag to have any effect. - // TODO(philipel): Remove this check when the experiment is removed. - RTC_CHECK(field_trial::IsEnabled("WebRTC-GenericDescriptor")); - video_send_configs_[video_idx].rtp.extensions.emplace_back( RtpExtension::kGenericFrameDescriptorUri00, kGenericFrameDescriptorExtensionId00); @@ -859,7 +800,7 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, params_.ss[video_idx].streams; } video_encoder_configs_[video_idx].video_stream_factory = - new rtc::RefCountedObject( + rtc::make_ref_counted( params_.video[video_idx].codec, params_.ss[video_idx].streams[0].max_qp, params_.screenshare[video_idx].enabled, true); @@ -888,7 +829,7 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, vp8_settings.numberOfTemporalLayers = static_cast( params_.video[video_idx].num_temporal_layers); video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings); } else if (params_.video[video_idx].codec == "VP9") { VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); @@ -905,7 +846,7 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, vp9_settings.flexibleMode = true; } video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings); } } else if (params_.ss[video_idx].num_spatial_layers > 1) { @@ -919,14 +860,18 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, vp9_settings.interLayerPred = params_.ss[video_idx].inter_layer_pred; vp9_settings.automaticResizeOn = false; video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings); + rtc::make_ref_counted( + vp9_settings); + RTC_DCHECK_EQ(video_encoder_configs_[video_idx].simulcast_layers.size(), + 1); + // Min bitrate will be enforced by spatial layer config instead. + video_encoder_configs_[video_idx].simulcast_layers[0].min_bitrate_bps = 0; } else if (params_.video[video_idx].automatic_scaling) { if (params_.video[video_idx].codec == "VP8") { VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings(); vp8_settings.automaticResizeOn = true; video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings); } else if (params_.video[video_idx].codec == "VP9") { VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); @@ -934,7 +879,7 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, vp9_settings.automaticResizeOn = params_.ss[video_idx].num_spatial_layers == 1; video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings); } else if (params_.video[video_idx].codec == "H264") { // Quality scaling is always on for H.264. @@ -953,18 +898,18 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, VideoCodecVP8 vp8_settings = VideoEncoder::GetDefaultVp8Settings(); vp8_settings.automaticResizeOn = false; video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::Vp8EncoderSpecificSettings>(vp8_settings); } else if (params_.video[video_idx].codec == "VP9") { VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); vp9_settings.automaticResizeOn = false; video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings); } else if (params_.video[video_idx].codec == "H264") { VideoCodecH264 h264_settings = VideoEncoder::GetDefaultH264Settings(); video_encoder_configs_[video_idx].encoder_specific_settings = - new rtc::RefCountedObject< + rtc::make_ref_counted< VideoEncoderConfig::H264EncoderSpecificSettings>(h264_settings); } } @@ -980,13 +925,13 @@ void VideoQualityTest::SetupVideo(Transport* send_transport, } CreateMatchingFecConfig(recv_transport, *GetVideoSendConfig()); - GetFlexFecConfig()->transport_cc = params_.call.send_side_bwe; + GetFlexFecConfig()->rtp.transport_cc = params_.call.send_side_bwe; if (params_.call.send_side_bwe) { - GetFlexFecConfig()->rtp_header_extensions.push_back( + GetFlexFecConfig()->rtp.extensions.push_back( RtpExtension(RtpExtension::kTransportSequenceNumberUri, kTransportSequenceNumberExtensionId)); } else { - GetFlexFecConfig()->rtp_header_extensions.push_back( + GetFlexFecConfig()->rtp.extensions.push_back( RtpExtension(RtpExtension::kAbsSendTimeUri, kAbsSendTimeExtensionId)); } } @@ -1041,7 +986,7 @@ void VideoQualityTest::SetupThumbnails(Transport* send_transport, thumbnail_encoder_config.max_bitrate_bps = 50000; std::vector streams{params_.ss[0].streams[0]}; thumbnail_encoder_config.video_stream_factory = - new rtc::RefCountedObject(streams); + rtc::make_ref_counted(streams); thumbnail_encoder_config.spatial_layers = params_.ss[0].spatial_layers; thumbnail_encoder_configs_.push_back(thumbnail_encoder_config.Copy()); @@ -1163,6 +1108,11 @@ void VideoQualityTest::CreateCapturers() { static_cast(params_.video[video_idx].width), static_cast(params_.video[video_idx].height), test::FrameGeneratorInterface::OutputType::kI010, absl::nullopt); + } else if (params_.video[video_idx].clip_path == "GeneratorNV12") { + frame_generator = test::CreateSquareFrameGenerator( + static_cast(params_.video[video_idx].width), + static_cast(params_.video[video_idx].height), + test::FrameGeneratorInterface::OutputType::kNV12, absl::nullopt); } else if (params_.video[video_idx].clip_path.empty()) { video_sources_[video_idx] = test::CreateVideoCapturer( params_.video[video_idx].width, params_.video[video_idx].height, @@ -1398,8 +1348,8 @@ rtc::scoped_refptr VideoQualityTest::CreateAudioDevice() { // CO_E_NOTINITIALIZED otherwise. The legacy ADM for Windows used internal // COM initialization but the new ADM requires COM to be initialized // externally. - com_initializer_ = std::make_unique( - webrtc_win::ScopedCOMInitializer::kMTA); + com_initializer_ = + std::make_unique(ScopedCOMInitializer::kMTA); RTC_CHECK(com_initializer_->Succeeded()); RTC_CHECK(webrtc_win::core_audio_utility::IsSupported()); RTC_CHECK(webrtc_win::core_audio_utility::IsMMCSSSupported()); diff --git a/video/video_quality_test.h b/video/video_quality_test.h index 2177830794..f49ce385b6 100644 --- a/video/video_quality_test.h +++ b/video/video_quality_test.h @@ -30,6 +30,7 @@ #include "video/video_analyzer.h" #ifdef WEBRTC_WIN #include "modules/audio_device/win/core_audio_utility_win.h" +#include "rtc_base/win/scoped_com_initializer.h" #endif namespace webrtc { @@ -137,7 +138,7 @@ class VideoQualityTest : public test::CallTest, #ifdef WEBRTC_WIN // Windows Core Audio based ADM needs to run on a COM initialized thread. // Only referenced in combination with --audio --use_real_adm flags. - std::unique_ptr com_initializer_; + std::unique_ptr com_initializer_; #endif }; diff --git a/video/video_receive_stream.cc b/video/video_receive_stream.cc index f1b3fc7b5b..da8eb7de60 100644 --- a/video/video_receive_stream.cc +++ b/video/video_receive_stream.cc @@ -24,6 +24,7 @@ #include "api/array_view.h" #include "api/crypto/frame_decryptor_interface.h" #include "api/video/encoded_image.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_decoder_factory.h" @@ -31,7 +32,6 @@ #include "call/rtp_stream_receiver_controller_interface.h" #include "call/rtx_receive_stream.h" #include "common_video/include/incoming_video_stream.h" -#include "media/base/h264_profile_level_id.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_coding_defines.h" @@ -39,7 +39,6 @@ #include "modules/video_coding/timing.h" #include "modules/video_coding/utility/vp8_header_parser.h" #include "rtc_base/checks.h" -#include "rtc_base/experiments/keyframe_interval_settings.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" @@ -60,7 +59,6 @@ constexpr int VideoReceiveStream::kMaxWaitForKeyFrameMs; namespace { -using video_coding::EncodedFrame; using ReturnReason = video_coding::FrameBuffer::ReturnReason; constexpr int kMinBaseMinimumDelayMs = 0; @@ -69,7 +67,7 @@ constexpr int kMaxBaseMinimumDelayMs = 10000; constexpr int kMaxWaitForFrameMs = 3000; // Concrete instance of RecordableEncodedFrame wrapping needed content -// from video_coding::EncodedFrame. +// from EncodedFrame. class WebRtcRecordableEncodedFrame : public RecordableEncodedFrame { public: explicit WebRtcRecordableEncodedFrame(const EncodedFrame& frame) @@ -115,9 +113,6 @@ class WebRtcRecordableEncodedFrame : public RecordableEncodedFrame { VideoCodec CreateDecoderVideoCodec(const VideoReceiveStream::Decoder& decoder) { VideoCodec codec; - memset(&codec, 0, sizeof(codec)); - - codec.plType = decoder.payload_type; codec.codecType = PayloadStringToCodecType(decoder.video_format.name); if (codec.codecType == kVideoCodecVP8) { @@ -170,6 +165,11 @@ class NullVideoDecoder : public webrtc::VideoDecoder { int32_t Release() override { return WEBRTC_VIDEO_CODEC_OK; } + DecoderInfo GetDecoderInfo() const override { + DecoderInfo info; + info.implementation_name = "NullVideoDecoder"; + return info; + } const char* ImplementationName() const override { return "NullVideoDecoder"; } }; @@ -219,12 +219,8 @@ VideoReceiveStream::VideoReceiveStream( config_.frame_decryptor, config_.frame_transformer), rtp_stream_sync_(this), - max_wait_for_keyframe_ms_(KeyframeIntervalSettings::ParseFromFieldTrials() - .MaxWaitForKeyframeMs() - .value_or(kMaxWaitForKeyFrameMs)), - max_wait_for_frame_ms_(KeyframeIntervalSettings::ParseFromFieldTrials() - .MaxWaitForFrameMs() - .value_or(kMaxWaitForFrameMs)), + max_wait_for_keyframe_ms_(kMaxWaitForKeyFrameMs), + max_wait_for_frame_ms_(kMaxWaitForFrameMs), decode_queue_(task_queue_factory_->CreateTaskQueue( "DecodingQueue", TaskQueueFactory::Priority::HIGH)) { @@ -238,9 +234,9 @@ VideoReceiveStream::VideoReceiveStream( network_sequence_checker_.Detach(); RTC_DCHECK(!config_.decoders.empty()); + RTC_CHECK(config_.decoder_factory); std::set decoder_payload_types; for (const Decoder& decoder : config_.decoders) { - RTC_CHECK(decoder.decoder_factory); RTC_CHECK(decoder_payload_types.find(decoder.payload_type) == decoder_payload_types.end()) << "Duplicate payload type (" << decoder.payload_type @@ -319,8 +315,6 @@ void VideoReceiveStream::Start() { const bool protected_by_fec = config_.rtp.protected_by_flexfec || rtp_video_stream_receiver_.IsUlpfecEnabled(); - frame_buffer_->Start(); - if (rtp_video_stream_receiver_.IsRetransmissionsEnabled() && protected_by_fec) { frame_buffer_->SetProtectionMode(kProtectionNackFEC); @@ -338,8 +332,7 @@ void VideoReceiveStream::Start() { for (const Decoder& decoder : config_.decoders) { std::unique_ptr video_decoder = - decoder.decoder_factory->LegacyCreateVideoDecoder(decoder.video_format, - config_.stream_id); + config_.decoder_factory->CreateVideoDecoder(decoder.video_format); // If we still have no valid decoder, we have to create a "Null" decoder // that ignores all calls. The reason we can get into this state is that the // old decoder factory interface doesn't have a way to query supported @@ -374,11 +367,12 @@ void VideoReceiveStream::Start() { VideoCodec codec = CreateDecoderVideoCodec(decoder); const bool raw_payload = - config_.rtp.raw_payload_types.count(codec.plType) > 0; - rtp_video_stream_receiver_.AddReceiveCodec( - codec, decoder.video_format.parameters, raw_payload); + config_.rtp.raw_payload_types.count(decoder.payload_type) > 0; + rtp_video_stream_receiver_.AddReceiveCodec(decoder.payload_type, codec, + decoder.video_format.parameters, + raw_payload); RTC_CHECK_EQ(VCM_OK, video_receiver_.RegisterReceiveCodec( - &codec, num_cpu_cores_, false)); + decoder.payload_type, &codec, num_cpu_cores_)); } RTC_DCHECK(renderer != nullptr); @@ -494,7 +488,7 @@ bool VideoReceiveStream::SetBaseMinimumPlayoutDelayMs(int delay_ms) { return false; } - rtc::CritScope cs(&playout_delay_lock_); + MutexLock lock(&playout_delay_lock_); base_minimum_playout_delay_ms_ = delay_ms; UpdatePlayoutDelays(); return true; @@ -503,7 +497,7 @@ bool VideoReceiveStream::SetBaseMinimumPlayoutDelayMs(int delay_ms) { int VideoReceiveStream::GetBaseMinimumPlayoutDelayMs() const { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); - rtc::CritScope cs(&playout_delay_lock_); + MutexLock lock(&playout_delay_lock_); return base_minimum_playout_delay_ms_; } @@ -512,6 +506,10 @@ void VideoReceiveStream::OnFrame(const VideoFrame& video_frame) { int64_t video_playout_ntp_ms; int64_t sync_offset_ms; double estimated_freq_khz; + + // TODO(bugs.webrtc.org/10739): we should set local capture clock offset for + // |video_frame.packet_infos|. But VideoFrame is const qualified here. + // TODO(tommi): GetStreamSyncOffsetInMs grabs three locks. One inside the // function itself, another in GetChannel() and a third in // GetPlayoutTimestamp. Seems excessive. Anyhow, I'm assuming the function @@ -553,8 +551,7 @@ void VideoReceiveStream::RequestKeyFrame(int64_t timestamp_ms) { last_keyframe_request_ms_ = timestamp_ms; } -void VideoReceiveStream::OnCompleteFrame( - std::unique_ptr frame) { +void VideoReceiveStream::OnCompleteFrame(std::unique_ptr frame) { RTC_DCHECK_RUN_ON(&network_sequence_checker_); // TODO(https://bugs.webrtc.org/9974): Consider removing this workaround. int64_t time_now_ms = clock_->TimeInMilliseconds(); @@ -564,15 +561,15 @@ void VideoReceiveStream::OnCompleteFrame( } last_complete_frame_time_ms_ = time_now_ms; - const PlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_; + const VideoPlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_; if (playout_delay.min_ms >= 0) { - rtc::CritScope cs(&playout_delay_lock_); + MutexLock lock(&playout_delay_lock_); frame_minimum_playout_delay_ms_ = playout_delay.min_ms; UpdatePlayoutDelays(); } if (playout_delay.max_ms >= 0) { - rtc::CritScope cs(&playout_delay_lock_); + MutexLock lock(&playout_delay_lock_); frame_maximum_playout_delay_ms_ = playout_delay.max_ms; UpdatePlayoutDelays(); } @@ -617,11 +614,12 @@ void VideoReceiveStream::SetEstimatedPlayoutNtpTimestampMs( RTC_NOTREACHED(); } -void VideoReceiveStream::SetMinimumPlayoutDelay(int delay_ms) { +bool VideoReceiveStream::SetMinimumPlayoutDelay(int delay_ms) { RTC_DCHECK_RUN_ON(&module_process_sequence_checker_); - rtc::CritScope cs(&playout_delay_lock_); + MutexLock lock(&playout_delay_lock_); syncable_minimum_playout_delay_ms_ = delay_ms; UpdatePlayoutDelays(); + return true; } int64_t VideoReceiveStream::GetWaitMs() const { @@ -637,17 +635,15 @@ void VideoReceiveStream::StartNextDecode() { [this](std::unique_ptr frame, ReturnReason res) { RTC_DCHECK_EQ(frame == nullptr, res == ReturnReason::kTimeout); RTC_DCHECK_EQ(frame != nullptr, res == ReturnReason::kFrameFound); - decode_queue_.PostTask([this, frame = std::move(frame)]() mutable { - RTC_DCHECK_RUN_ON(&decode_queue_); - if (decoder_stopped_) - return; - if (frame) { - HandleEncodedFrame(std::move(frame)); - } else { - HandleFrameBufferTimeout(); - } - StartNextDecode(); - }); + RTC_DCHECK_RUN_ON(&decode_queue_); + if (decoder_stopped_) + return; + if (frame) { + HandleEncodedFrame(std::move(frame)); + } else { + HandleFrameBufferTimeout(); + } + StartNextDecode(); }); } @@ -670,7 +666,7 @@ void VideoReceiveStream::HandleEncodedFrame( decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) { keyframe_required_ = false; frame_decoded_ = true; - rtp_video_stream_receiver_.FrameDecoded(frame->id.picture_id); + rtp_video_stream_receiver_.FrameDecoded(frame->Id()); if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) RequestKeyFrame(now_ms); @@ -683,7 +679,6 @@ void VideoReceiveStream::HandleEncodedFrame( } if (encoded_frame_buffer_function_) { - frame->Retain(); encoded_frame_buffer_function_(WebRtcRecordableEncodedFrame(*frame)); } } @@ -767,7 +762,6 @@ VideoReceiveStream::RecordingState VideoReceiveStream::SetAndGetRecordingState( RTC_DCHECK_RUN_ON(&decode_queue_); // Save old state. old_state.callback = std::move(encoded_frame_buffer_function_); - old_state.keyframe_needed = keyframe_generation_requested_; old_state.last_keyframe_request_ms = last_keyframe_request_ms_; // Set new state. @@ -776,7 +770,7 @@ VideoReceiveStream::RecordingState VideoReceiveStream::SetAndGetRecordingState( RequestKeyFrame(clock_->TimeInMilliseconds()); keyframe_generation_requested_ = true; } else { - keyframe_generation_requested_ = state.keyframe_needed; + keyframe_generation_requested_ = false; last_keyframe_request_ms_ = state.last_keyframe_request_ms.value_or(0); } event.Set(); diff --git a/video/video_receive_stream.h b/video/video_receive_stream.h index c1ebf2b600..c778d74558 100644 --- a/video/video_receive_stream.h +++ b/video/video_receive_stream.h @@ -14,8 +14,8 @@ #include #include +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" -#include "api/transport/media/media_transport_interface.h" #include "api/video/recordable_encoded_frame.h" #include "call/rtp_packet_sink_interface.h" #include "call/syncable.h" @@ -24,7 +24,8 @@ #include "modules/rtp_rtcp/source/source_tracker.h" #include "modules/video_coding/frame_buffer2.h" #include "modules/video_coding/video_receiver2.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" #include "system_wrappers/include/clock.h" #include "video/receive_statistics_proxy.h" @@ -37,7 +38,6 @@ namespace webrtc { class CallStats; class ProcessThread; -class RTPFragmentationHeader; class RtpStreamReceiverInterface; class RtpStreamReceiverControllerInterface; class RtxReceiveStream; @@ -45,12 +45,13 @@ class VCMTiming; namespace internal { -class VideoReceiveStream : public webrtc::VideoReceiveStream, - public rtc::VideoSinkInterface, - public NackSender, - public video_coding::OnCompleteFrameCallback, - public Syncable, - public CallStatsObserver { +class VideoReceiveStream + : public webrtc::DEPRECATED_VideoReceiveStream, + public rtc::VideoSinkInterface, + public NackSender, + public RtpVideoStreamReceiver::OnCompleteFrameCallback, + public Syncable, + public CallStatsObserver { public: // The default number of milliseconds to pass before re-requesting a key frame // to be sent. @@ -86,6 +87,8 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream, void Start() override; void Stop() override; + const RtpConfig& rtp_config() const override { return config_.rtp; } + webrtc::VideoReceiveStream::Stats GetStats() const override; void AddSecondarySink(RtpPacketSinkInterface* sink) override; @@ -111,9 +114,8 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream, void SendNack(const std::vector& sequence_numbers, bool buffering_allowed) override; - // Implements video_coding::OnCompleteFrameCallback. - void OnCompleteFrame( - std::unique_ptr frame) override; + // Implements RtpVideoStreamReceiver::OnCompleteFrameCallback. + void OnCompleteFrame(std::unique_ptr frame) override; // Implements CallStatsObserver::OnRttUpdate void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override; @@ -127,7 +129,7 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream, int64_t time_ms) override; // SetMinimumPlayoutDelay is only called by A/V sync. - void SetMinimumPlayoutDelay(int delay_ms) override; + bool SetMinimumPlayoutDelay(int delay_ms) override; std::vector GetSources() const override; @@ -138,7 +140,7 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream, private: int64_t GetWaitMs() const; void StartNextDecode() RTC_RUN_ON(decode_queue_); - void HandleEncodedFrame(std::unique_ptr frame) + void HandleEncodedFrame(std::unique_ptr frame) RTC_RUN_ON(decode_queue_); void HandleFrameBufferTimeout() RTC_RUN_ON(decode_queue_); void UpdatePlayoutDelays() const @@ -151,9 +153,9 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream, void UpdateHistograms(); - SequenceChecker worker_sequence_checker_; - SequenceChecker module_process_sequence_checker_; - SequenceChecker network_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker module_process_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker network_sequence_checker_; TaskQueueFactory* const task_queue_factory_; @@ -206,7 +208,7 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream, const int max_wait_for_keyframe_ms_; const int max_wait_for_frame_ms_; - rtc::CriticalSection playout_delay_lock_; + mutable Mutex playout_delay_lock_; // All of them tries to change current min_playout_delay on |timing_| but // source of the change request is different in each case. Among them the diff --git a/video/video_receive_stream2.cc b/video/video_receive_stream2.cc index 1470123cd5..72257f01cc 100644 --- a/video/video_receive_stream2.cc +++ b/video/video_receive_stream2.cc @@ -24,6 +24,7 @@ #include "api/array_view.h" #include "api/crypto/frame_decryptor_interface.h" #include "api/video/encoded_image.h" +#include "api/video_codecs/h264_profile_level_id.h" #include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_decoder_factory.h" @@ -31,17 +32,16 @@ #include "call/rtp_stream_receiver_controller_interface.h" #include "call/rtx_receive_stream.h" #include "common_video/include/incoming_video_stream.h" -#include "media/base/h264_profile_level_id.h" #include "modules/video_coding/include/video_codec_interface.h" #include "modules/video_coding/include/video_coding_defines.h" #include "modules/video_coding/include/video_error_codes.h" #include "modules/video_coding/timing.h" #include "modules/video_coding/utility/vp8_header_parser.h" #include "rtc_base/checks.h" -#include "rtc_base/experiments/keyframe_interval_settings.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/system/thread_registry.h" #include "rtc_base/time_utils.h" #include "rtc_base/trace_event.h" @@ -58,7 +58,6 @@ constexpr int VideoReceiveStream2::kMaxWaitForKeyFrameMs; namespace { -using video_coding::EncodedFrame; using ReturnReason = video_coding::FrameBuffer::ReturnReason; constexpr int kMinBaseMinimumDelayMs = 0; @@ -66,17 +65,20 @@ constexpr int kMaxBaseMinimumDelayMs = 10000; constexpr int kMaxWaitForFrameMs = 3000; +constexpr int kDefaultMaximumPreStreamDecoders = 100; + // Concrete instance of RecordableEncodedFrame wrapping needed content -// from video_coding::EncodedFrame. +// from EncodedFrame. class WebRtcRecordableEncodedFrame : public RecordableEncodedFrame { public: - explicit WebRtcRecordableEncodedFrame(const EncodedFrame& frame) + explicit WebRtcRecordableEncodedFrame( + const EncodedFrame& frame, + RecordableEncodedFrame::EncodedResolution resolution) : buffer_(frame.GetEncodedData()), render_time_ms_(frame.RenderTime()), codec_(frame.CodecSpecific()->codecType), is_key_frame_(frame.FrameType() == VideoFrameType::kVideoFrameKey), - resolution_{frame.EncodedImage()._encodedWidth, - frame.EncodedImage()._encodedHeight} { + resolution_(resolution) { if (frame.ColorSpace()) { color_space_ = *frame.ColorSpace(); } @@ -113,9 +115,6 @@ class WebRtcRecordableEncodedFrame : public RecordableEncodedFrame { VideoCodec CreateDecoderVideoCodec(const VideoReceiveStream::Decoder& decoder) { VideoCodec codec; - memset(&codec, 0, sizeof(codec)); - - codec.plType = decoder.payload_type; codec.codecType = PayloadStringToCodecType(decoder.video_format.name); if (codec.codecType == kVideoCodecVP8) { @@ -133,8 +132,19 @@ VideoCodec CreateDecoderVideoCodec(const VideoReceiveStream::Decoder& decoder) { return associated_codec; } - codec.width = 320; - codec.height = 180; + FieldTrialOptional width("w"); + FieldTrialOptional height("h"); + ParseFieldTrial( + {&width, &height}, + field_trial::FindFullName("WebRTC-Video-InitialDecoderResolution")); + if (width && height) { + codec.width = width.Value(); + codec.height = height.Value(); + } else { + codec.width = 320; + codec.height = 180; + } + const int kDefaultStartBitrate = 300; codec.startBitrate = codec.minBitrate = codec.maxBitrate = kDefaultStartBitrate; @@ -171,6 +181,12 @@ class NullVideoDecoder : public webrtc::VideoDecoder { const char* ImplementationName() const override { return "NullVideoDecoder"; } }; +bool IsKeyFrameAndUnspecifiedResolution(const EncodedFrame& frame) { + return frame.FrameType() == VideoFrameType::kVideoFrameKey && + frame.EncodedImage()._encodedWidth == 0 && + frame.EncodedImage()._encodedHeight == 0; +} + // TODO(https://bugs.webrtc.org/9974): Consider removing this workaround. // Maximum time between frames before resetting the FrameBuffer to avoid RTP // timestamps wraparound to affect FrameBuffer. @@ -178,30 +194,45 @@ constexpr int kInactiveStreamThresholdMs = 600000; // 10 minutes. } // namespace -VideoReceiveStream2::VideoReceiveStream2( - TaskQueueFactory* task_queue_factory, - TaskQueueBase* current_queue, - RtpStreamReceiverControllerInterface* receiver_controller, - int num_cpu_cores, - PacketRouter* packet_router, - VideoReceiveStream::Config config, - ProcessThread* process_thread, - CallStats* call_stats, - Clock* clock, - VCMTiming* timing) +int DetermineMaxWaitForFrame(const VideoReceiveStream::Config& config, + bool is_keyframe) { + // A (arbitrary) conversion factor between the remotely signalled NACK buffer + // time (if not present defaults to 1000ms) and the maximum time we wait for a + // remote frame. Chosen to not change existing defaults when using not + // rtx-time. + const int conversion_factor = 3; + + if (config.rtp.nack.rtp_history_ms > 0 && + conversion_factor * config.rtp.nack.rtp_history_ms < kMaxWaitForFrameMs) { + return is_keyframe ? config.rtp.nack.rtp_history_ms + : conversion_factor * config.rtp.nack.rtp_history_ms; + } + return is_keyframe ? VideoReceiveStream2::kMaxWaitForKeyFrameMs + : kMaxWaitForFrameMs; +} + +VideoReceiveStream2::VideoReceiveStream2(TaskQueueFactory* task_queue_factory, + Call* call, + int num_cpu_cores, + PacketRouter* packet_router, + VideoReceiveStream::Config config, + CallStats* call_stats, + Clock* clock, + VCMTiming* timing) : task_queue_factory_(task_queue_factory), transport_adapter_(config.rtcp_send_transport), config_(std::move(config)), num_cpu_cores_(num_cpu_cores), - worker_thread_(current_queue), + call_(call), clock_(clock), call_stats_(call_stats), source_tracker_(clock_), - stats_proxy_(&config_, clock_, worker_thread_), + stats_proxy_(&config_, clock_, call->worker_thread()), rtp_receive_statistics_(ReceiveStatistics::Create(clock_)), timing_(timing), video_receiver_(clock_, timing_.get()), - rtp_video_stream_receiver_(clock_, + rtp_video_stream_receiver_(call->worker_thread(), + clock_, &transport_adapter_, call_stats->AsRtcpRttStats(), packet_router, @@ -209,35 +240,32 @@ VideoReceiveStream2::VideoReceiveStream2( rtp_receive_statistics_.get(), &stats_proxy_, &stats_proxy_, - process_thread, this, // NackSender nullptr, // Use default KeyFrameRequestSender this, // OnCompleteFrameCallback config_.frame_decryptor, config_.frame_transformer), - rtp_stream_sync_(current_queue, this), - max_wait_for_keyframe_ms_(KeyframeIntervalSettings::ParseFromFieldTrials() - .MaxWaitForKeyframeMs() - .value_or(kMaxWaitForKeyFrameMs)), - max_wait_for_frame_ms_(KeyframeIntervalSettings::ParseFromFieldTrials() - .MaxWaitForFrameMs() - .value_or(kMaxWaitForFrameMs)), + rtp_stream_sync_(call->worker_thread(), this), + max_wait_for_keyframe_ms_(DetermineMaxWaitForFrame(config, true)), + max_wait_for_frame_ms_(DetermineMaxWaitForFrame(config, false)), + low_latency_renderer_enabled_("enabled", true), + low_latency_renderer_include_predecode_buffer_("include_predecode_buffer", + true), + maximum_pre_stream_decoders_("max", kDefaultMaximumPreStreamDecoders), decode_queue_(task_queue_factory_->CreateTaskQueue( "DecodingQueue", TaskQueueFactory::Priority::HIGH)) { RTC_LOG(LS_INFO) << "VideoReceiveStream2: " << config_.ToString(); - RTC_DCHECK(worker_thread_); + RTC_DCHECK(call_->worker_thread()); RTC_DCHECK(config_.renderer); RTC_DCHECK(call_stats_); - - module_process_sequence_checker_.Detach(); - network_sequence_checker_.Detach(); + packet_sequence_checker_.Detach(); RTC_DCHECK(!config_.decoders.empty()); + RTC_CHECK(config_.decoder_factory); std::set decoder_payload_types; for (const Decoder& decoder : config_.decoders) { - RTC_CHECK(decoder.decoder_factory); RTC_CHECK(decoder_payload_types.find(decoder.payload_type) == decoder_payload_types.end()) << "Duplicate payload type (" << decoder.payload_type @@ -250,38 +278,67 @@ VideoReceiveStream2::VideoReceiveStream2( frame_buffer_.reset( new video_coding::FrameBuffer(clock_, timing_.get(), &stats_proxy_)); - // Register with RtpStreamReceiverController. - media_receiver_ = receiver_controller->CreateReceiver( - config_.rtp.remote_ssrc, &rtp_video_stream_receiver_); if (config_.rtp.rtx_ssrc) { rtx_receive_stream_ = std::make_unique( &rtp_video_stream_receiver_, config.rtp.rtx_associated_payload_types, config_.rtp.remote_ssrc, rtp_receive_statistics_.get()); - rtx_receiver_ = receiver_controller->CreateReceiver( - config_.rtp.rtx_ssrc, rtx_receive_stream_.get()); } else { rtp_receive_statistics_->EnableRetransmitDetection(config.rtp.remote_ssrc, true); } + + ParseFieldTrial({&low_latency_renderer_enabled_, + &low_latency_renderer_include_predecode_buffer_}, + field_trial::FindFullName("WebRTC-LowLatencyRenderer")); + ParseFieldTrial( + { + &maximum_pre_stream_decoders_, + }, + field_trial::FindFullName("WebRTC-PreStreamDecoders")); } VideoReceiveStream2::~VideoReceiveStream2() { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); RTC_LOG(LS_INFO) << "~VideoReceiveStream2: " << config_.ToString(); + RTC_DCHECK(!media_receiver_); + RTC_DCHECK(!rtx_receiver_); Stop(); } +void VideoReceiveStream2::RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RTC_DCHECK(!media_receiver_); + RTC_DCHECK(!rtx_receiver_); + + // Register with RtpStreamReceiverController. + media_receiver_ = receiver_controller->CreateReceiver( + config_.rtp.remote_ssrc, &rtp_video_stream_receiver_); + if (config_.rtp.rtx_ssrc) { + RTC_DCHECK(rtx_receive_stream_); + rtx_receiver_ = receiver_controller->CreateReceiver( + config_.rtp.rtx_ssrc, rtx_receive_stream_.get()); + } +} + +void VideoReceiveStream2::UnregisterFromTransport() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + media_receiver_.reset(); + rtx_receiver_.reset(); +} + void VideoReceiveStream2::SignalNetworkState(NetworkState state) { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); rtp_video_stream_receiver_.SignalNetworkState(state); } bool VideoReceiveStream2::DeliverRtcp(const uint8_t* packet, size_t length) { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); return rtp_video_stream_receiver_.DeliverRtcp(packet, length); } void VideoReceiveStream2::SetSync(Syncable* audio_syncable) { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); rtp_stream_sync_.ConfigureSync(audio_syncable); } @@ -295,8 +352,6 @@ void VideoReceiveStream2::Start() { const bool protected_by_fec = config_.rtp.protected_by_flexfec || rtp_video_stream_receiver_.IsUlpfecEnabled(); - frame_buffer_->Start(); - if (rtp_video_stream_receiver_.IsRetransmissionsEnabled() && protected_by_fec) { frame_buffer_->SetProtectionMode(kProtectionNackFEC); @@ -312,49 +367,29 @@ void VideoReceiveStream2::Start() { renderer = this; } + int decoders_count = 0; for (const Decoder& decoder : config_.decoders) { - std::unique_ptr video_decoder = - decoder.decoder_factory->LegacyCreateVideoDecoder(decoder.video_format, - config_.stream_id); - // If we still have no valid decoder, we have to create a "Null" decoder - // that ignores all calls. The reason we can get into this state is that the - // old decoder factory interface doesn't have a way to query supported - // codecs. - if (!video_decoder) { - video_decoder = std::make_unique(); + // Create up to maximum_pre_stream_decoders_ up front, wait the the other + // decoders until they are requested (i.e., we receive the corresponding + // payload). + if (decoders_count < maximum_pre_stream_decoders_) { + CreateAndRegisterExternalDecoder(decoder); + ++decoders_count; } - std::string decoded_output_file = - field_trial::FindFullName("WebRTC-DecoderDataDumpDirectory"); - // Because '/' can't be used inside a field trial parameter, we use ';' - // instead. - // This is only relevant to WebRTC-DecoderDataDumpDirectory - // field trial. ';' is chosen arbitrary. Even though it's a legal character - // in some file systems, we can sacrifice ability to use it in the path to - // dumped video, since it's developers-only feature for debugging. - absl::c_replace(decoded_output_file, ';', '/'); - if (!decoded_output_file.empty()) { - char filename_buffer[256]; - rtc::SimpleStringBuilder ssb(filename_buffer); - ssb << decoded_output_file << "/webrtc_receive_stream_" - << this->config_.rtp.remote_ssrc << "-" << rtc::TimeMicros() - << ".ivf"; - video_decoder = CreateFrameDumpingDecoderWrapper( - std::move(video_decoder), FileWrapper::OpenWriteOnly(ssb.str())); - } - - video_decoders_.push_back(std::move(video_decoder)); - - video_receiver_.RegisterExternalDecoder(video_decoders_.back().get(), - decoder.payload_type); VideoCodec codec = CreateDecoderVideoCodec(decoder); const bool raw_payload = - config_.rtp.raw_payload_types.count(codec.plType) > 0; - rtp_video_stream_receiver_.AddReceiveCodec( - codec, decoder.video_format.parameters, raw_payload); + config_.rtp.raw_payload_types.count(decoder.payload_type) > 0; + { + // TODO(bugs.webrtc.org/11993): Make this call on the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.AddReceiveCodec( + decoder.payload_type, codec, decoder.video_format.parameters, + raw_payload); + } RTC_CHECK_EQ(VCM_OK, video_receiver_.RegisterReceiveCodec( - &codec, num_cpu_cores_, false)); + decoder.payload_type, &codec, num_cpu_cores_)); } RTC_DCHECK(renderer != nullptr); @@ -374,12 +409,23 @@ void VideoReceiveStream2::Start() { StartNextDecode(); }); decoder_running_ = true; - rtp_video_stream_receiver_.StartReceive(); + + { + // TODO(bugs.webrtc.org/11993): Make this call on the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.StartReceive(); + } } void VideoReceiveStream2::Stop() { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); - rtp_video_stream_receiver_.StopReceive(); + { + // TODO(bugs.webrtc.org/11993): Make this call on the network thread. + // Also call `GetUniqueFramesSeen()` at the same time (since it's a counter + // that's updated on the network thread). + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.StopReceive(); + } stats_proxy_.OnUniqueFramesCounted( rtp_video_stream_receiver_.GetUniqueFramesSeen()); @@ -415,6 +461,43 @@ void VideoReceiveStream2::Stop() { transport_adapter_.Disable(); } +void VideoReceiveStream2::CreateAndRegisterExternalDecoder( + const Decoder& decoder) { + TRACE_EVENT0("webrtc", + "VideoReceiveStream2::CreateAndRegisterExternalDecoder"); + std::unique_ptr video_decoder = + config_.decoder_factory->CreateVideoDecoder(decoder.video_format); + // If we still have no valid decoder, we have to create a "Null" decoder + // that ignores all calls. The reason we can get into this state is that the + // old decoder factory interface doesn't have a way to query supported + // codecs. + if (!video_decoder) { + video_decoder = std::make_unique(); + } + + std::string decoded_output_file = + field_trial::FindFullName("WebRTC-DecoderDataDumpDirectory"); + // Because '/' can't be used inside a field trial parameter, we use ';' + // instead. + // This is only relevant to WebRTC-DecoderDataDumpDirectory + // field trial. ';' is chosen arbitrary. Even though it's a legal character + // in some file systems, we can sacrifice ability to use it in the path to + // dumped video, since it's developers-only feature for debugging. + absl::c_replace(decoded_output_file, ';', '/'); + if (!decoded_output_file.empty()) { + char filename_buffer[256]; + rtc::SimpleStringBuilder ssb(filename_buffer); + ssb << decoded_output_file << "/webrtc_receive_stream_" + << this->config_.rtp.remote_ssrc << "-" << rtc::TimeMicros() << ".ivf"; + video_decoder = CreateFrameDumpingDecoderWrapper( + std::move(video_decoder), FileWrapper::OpenWriteOnly(ssb.str())); + } + + video_decoders_.push_back(std::move(video_decoder)); + video_receiver_.RegisterExternalDecoder(video_decoders_.back().get(), + decoder.payload_type); +} + VideoReceiveStream::Stats VideoReceiveStream2::GetStats() const { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); VideoReceiveStream2::Stats stats = stats_proxy_.GetStats(); @@ -457,23 +540,12 @@ void VideoReceiveStream2::UpdateHistograms() { stats_proxy_.UpdateHistograms(fraction_lost, rtp_stats, nullptr); } -void VideoReceiveStream2::AddSecondarySink(RtpPacketSinkInterface* sink) { - rtp_video_stream_receiver_.AddSecondarySink(sink); -} - -void VideoReceiveStream2::RemoveSecondarySink( - const RtpPacketSinkInterface* sink) { - rtp_video_stream_receiver_.RemoveSecondarySink(sink); -} - bool VideoReceiveStream2::SetBaseMinimumPlayoutDelayMs(int delay_ms) { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); if (delay_ms < kMinBaseMinimumDelayMs || delay_ms > kMaxBaseMinimumDelayMs) { return false; } - // TODO(bugs.webrtc.org/11489): Consider posting to worker. - rtc::CritScope cs(&playout_delay_lock_); base_minimum_playout_delay_ms_ = delay_ms; UpdatePlayoutDelays(); return true; @@ -481,15 +553,16 @@ bool VideoReceiveStream2::SetBaseMinimumPlayoutDelayMs(int delay_ms) { int VideoReceiveStream2::GetBaseMinimumPlayoutDelayMs() const { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); - - rtc::CritScope cs(&playout_delay_lock_); return base_minimum_playout_delay_ms_; } void VideoReceiveStream2::OnFrame(const VideoFrame& video_frame) { VideoFrameMetaData frame_meta(video_frame, clock_->CurrentTime()); - worker_thread_->PostTask( + // TODO(bugs.webrtc.org/10739): we should set local capture clock offset for + // |video_frame.packet_infos|. But VideoFrame is const qualified here. + + call_->worker_thread()->PostTask( ToQueuedTask(task_safety_, [frame_meta, this]() { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); int64_t video_playout_ntp_ms; @@ -506,6 +579,22 @@ void VideoReceiveStream2::OnFrame(const VideoFrame& video_frame) { source_tracker_.OnFrameDelivered(video_frame.packet_infos()); config_.renderer->OnFrame(video_frame); + webrtc::MutexLock lock(&pending_resolution_mutex_); + if (pending_resolution_.has_value()) { + if (!pending_resolution_->empty() && + (video_frame.width() != static_cast(pending_resolution_->width) || + video_frame.height() != + static_cast(pending_resolution_->height))) { + RTC_LOG(LS_WARNING) + << "Recordable encoded frame stream resolution was reported as " + << pending_resolution_->width << "x" << pending_resolution_->height + << " but the stream is now " << video_frame.width() + << video_frame.height(); + } + pending_resolution_ = RecordableEncodedFrame::EncodedResolution{ + static_cast(video_frame.width()), + static_cast(video_frame.height())}; + } } void VideoReceiveStream2::SetFrameDecryptor( @@ -522,18 +611,25 @@ void VideoReceiveStream2::SetDepacketizerToDecoderFrameTransformer( void VideoReceiveStream2::SendNack( const std::vector& sequence_numbers, bool buffering_allowed) { + RTC_DCHECK_RUN_ON(&worker_sequence_checker_); RTC_DCHECK(buffering_allowed); rtp_video_stream_receiver_.RequestPacketRetransmit(sequence_numbers); } void VideoReceiveStream2::RequestKeyFrame(int64_t timestamp_ms) { + // Running on worker_sequence_checker_. + // Called from RtpVideoStreamReceiver (rtp_video_stream_receiver_ is + // ultimately responsible). rtp_video_stream_receiver_.RequestKeyFrame(); - last_keyframe_request_ms_ = timestamp_ms; + decode_queue_.PostTask([this, timestamp_ms]() { + RTC_DCHECK_RUN_ON(&decode_queue_); + last_keyframe_request_ms_ = timestamp_ms; + }); } -void VideoReceiveStream2::OnCompleteFrame( - std::unique_ptr frame) { - RTC_DCHECK_RUN_ON(&network_sequence_checker_); +void VideoReceiveStream2::OnCompleteFrame(std::unique_ptr frame) { + RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + // TODO(https://bugs.webrtc.org/9974): Consider removing this workaround. int64_t time_now_ms = clock_->TimeInMilliseconds(); if (last_complete_frame_time_ms_ > 0 && @@ -542,26 +638,25 @@ void VideoReceiveStream2::OnCompleteFrame( } last_complete_frame_time_ms_ = time_now_ms; - // TODO(bugs.webrtc.org/11489): We grab the playout_delay_lock_ lock - // potentially twice. Consider checking both min/max and posting to worker if - // there's a change. If we always update playout delays on the worker, we - // don't need a lock. - const PlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_; + const VideoPlayoutDelay& playout_delay = frame->EncodedImage().playout_delay_; if (playout_delay.min_ms >= 0) { - rtc::CritScope cs(&playout_delay_lock_); frame_minimum_playout_delay_ms_ = playout_delay.min_ms; UpdatePlayoutDelays(); } if (playout_delay.max_ms >= 0) { - rtc::CritScope cs(&playout_delay_lock_); frame_maximum_playout_delay_ms_ = playout_delay.max_ms; UpdatePlayoutDelays(); } int64_t last_continuous_pid = frame_buffer_->InsertFrame(std::move(frame)); - if (last_continuous_pid != -1) - rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid); + if (last_continuous_pid != -1) { + { + // TODO(bugs.webrtc.org/11993): Call on the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + rtp_video_stream_receiver_.FrameContinuous(last_continuous_pid); + } + } } void VideoReceiveStream2::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) { @@ -577,7 +672,7 @@ uint32_t VideoReceiveStream2::id() const { } absl::optional VideoReceiveStream2::GetInfo() const { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); absl::optional info = rtp_video_stream_receiver_.GetSyncInfo(); @@ -600,39 +695,42 @@ void VideoReceiveStream2::SetEstimatedPlayoutNtpTimestampMs( RTC_NOTREACHED(); } -void VideoReceiveStream2::SetMinimumPlayoutDelay(int delay_ms) { +bool VideoReceiveStream2::SetMinimumPlayoutDelay(int delay_ms) { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); - // TODO(bugs.webrtc.org/11489): See if we can't get rid of the - // |playout_delay_lock_| - rtc::CritScope cs(&playout_delay_lock_); syncable_minimum_playout_delay_ms_ = delay_ms; UpdatePlayoutDelays(); + return true; } -int64_t VideoReceiveStream2::GetWaitMs() const { +int64_t VideoReceiveStream2::GetMaxWaitMs() const { return keyframe_required_ ? max_wait_for_keyframe_ms_ : max_wait_for_frame_ms_; } void VideoReceiveStream2::StartNextDecode() { + // Running on the decode thread. TRACE_EVENT0("webrtc", "VideoReceiveStream2::StartNextDecode"); frame_buffer_->NextFrame( - GetWaitMs(), keyframe_required_, &decode_queue_, + GetMaxWaitMs(), keyframe_required_, &decode_queue_, /* encoded frame handler */ [this](std::unique_ptr frame, ReturnReason res) { RTC_DCHECK_EQ(frame == nullptr, res == ReturnReason::kTimeout); RTC_DCHECK_EQ(frame != nullptr, res == ReturnReason::kFrameFound); - decode_queue_.PostTask([this, frame = std::move(frame)]() mutable { - RTC_DCHECK_RUN_ON(&decode_queue_); - if (decoder_stopped_) - return; - if (frame) { - HandleEncodedFrame(std::move(frame)); - } else { - HandleFrameBufferTimeout(); - } - StartNextDecode(); - }); + RTC_DCHECK_RUN_ON(&decode_queue_); + if (decoder_stopped_) + return; + if (frame) { + HandleEncodedFrame(std::move(frame)); + } else { + int64_t now_ms = clock_->TimeInMilliseconds(); + // TODO(bugs.webrtc.org/11993): PostTask to the network thread. + call_->worker_thread()->PostTask(ToQueuedTask( + task_safety_, [this, now_ms, wait_ms = GetMaxWaitMs()]() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + HandleFrameBufferTimeout(now_ms, wait_ms); + })); + } + StartNextDecode(); }); } @@ -649,74 +747,174 @@ void VideoReceiveStream2::HandleEncodedFrame( } } stats_proxy_.OnPreDecode(frame->CodecSpecific()->codecType, qp); - HandleKeyFrameGeneration(frame->FrameType() == VideoFrameType::kVideoFrameKey, - now_ms); - int decode_result = video_receiver_.Decode(frame.get()); + + bool force_request_key_frame = false; + int64_t decoded_frame_picture_id = -1; + + const bool keyframe_request_is_due = + now_ms >= (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_); + + if (!video_receiver_.IsExternalDecoderRegistered(frame->PayloadType())) { + // Look for the decoder with this payload type. + for (const Decoder& decoder : config_.decoders) { + if (decoder.payload_type == frame->PayloadType()) { + CreateAndRegisterExternalDecoder(decoder); + break; + } + } + } + + int64_t frame_id = frame->Id(); + bool received_frame_is_keyframe = + frame->FrameType() == VideoFrameType::kVideoFrameKey; + int decode_result = DecodeAndMaybeDispatchEncodedFrame(std::move(frame)); if (decode_result == WEBRTC_VIDEO_CODEC_OK || decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) { keyframe_required_ = false; frame_decoded_ = true; - rtp_video_stream_receiver_.FrameDecoded(frame->id.picture_id); + + decoded_frame_picture_id = frame_id; if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) - RequestKeyFrame(now_ms); + force_request_key_frame = true; } else if (!frame_decoded_ || !keyframe_required_ || - (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ < now_ms)) { + keyframe_request_is_due) { keyframe_required_ = true; // TODO(philipel): Remove this keyframe request when downstream project // has been fixed. - RequestKeyFrame(now_ms); + force_request_key_frame = true; + } + + { + // TODO(bugs.webrtc.org/11993): Make this PostTask to the network thread. + call_->worker_thread()->PostTask(ToQueuedTask( + task_safety_, + [this, now_ms, received_frame_is_keyframe, force_request_key_frame, + decoded_frame_picture_id, keyframe_request_is_due]() { + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + + if (decoded_frame_picture_id != -1) + rtp_video_stream_receiver_.FrameDecoded(decoded_frame_picture_id); + + HandleKeyFrameGeneration(received_frame_is_keyframe, now_ms, + force_request_key_frame, + keyframe_request_is_due); + })); } +} - if (encoded_frame_buffer_function_) { - frame->Retain(); - encoded_frame_buffer_function_(WebRtcRecordableEncodedFrame(*frame)); +int VideoReceiveStream2::DecodeAndMaybeDispatchEncodedFrame( + std::unique_ptr frame) { + // Running on decode_queue_. + + // If |buffered_encoded_frames_| grows out of control (=60 queued frames), + // maybe due to a stuck decoder, we just halt the process here and log the + // error. + const bool encoded_frame_output_enabled = + encoded_frame_buffer_function_ != nullptr && + buffered_encoded_frames_.size() < kBufferedEncodedFramesMaxSize; + EncodedFrame* frame_ptr = frame.get(); + if (encoded_frame_output_enabled) { + // If we receive a key frame with unset resolution, hold on dispatching the + // frame and following ones until we know a resolution of the stream. + // NOTE: The code below has a race where it can report the wrong + // resolution for keyframes after an initial keyframe of other resolution. + // However, the only known consumer of this information is the W3C + // MediaRecorder and it will only use the resolution in the first encoded + // keyframe from WebRTC, so misreporting is fine. + buffered_encoded_frames_.push_back(std::move(frame)); + if (buffered_encoded_frames_.size() == kBufferedEncodedFramesMaxSize) + RTC_LOG(LS_ERROR) << "About to halt recordable encoded frame output due " + "to too many buffered frames."; + + webrtc::MutexLock lock(&pending_resolution_mutex_); + if (IsKeyFrameAndUnspecifiedResolution(*frame_ptr) && + !pending_resolution_.has_value()) + pending_resolution_.emplace(); + } + + int decode_result = video_receiver_.Decode(frame_ptr); + if (encoded_frame_output_enabled) { + absl::optional + pending_resolution; + { + // Fish out |pending_resolution_| to avoid taking the mutex on every lap + // or dispatching under the mutex in the flush loop. + webrtc::MutexLock lock(&pending_resolution_mutex_); + if (pending_resolution_.has_value()) + pending_resolution = *pending_resolution_; + } + if (!pending_resolution.has_value() || !pending_resolution->empty()) { + // Flush the buffered frames. + for (const auto& frame : buffered_encoded_frames_) { + RecordableEncodedFrame::EncodedResolution resolution{ + frame->EncodedImage()._encodedWidth, + frame->EncodedImage()._encodedHeight}; + if (IsKeyFrameAndUnspecifiedResolution(*frame)) { + RTC_DCHECK(!pending_resolution->empty()); + resolution = *pending_resolution; + } + encoded_frame_buffer_function_( + WebRtcRecordableEncodedFrame(*frame, resolution)); + } + buffered_encoded_frames_.clear(); + } } + return decode_result; } +// RTC_RUN_ON(packet_sequence_checker_) void VideoReceiveStream2::HandleKeyFrameGeneration( bool received_frame_is_keyframe, - int64_t now_ms) { + int64_t now_ms, + bool always_request_key_frame, + bool keyframe_request_is_due) { + bool request_key_frame = always_request_key_frame; + // Repeat sending keyframe requests if we've requested a keyframe. - if (!keyframe_generation_requested_) { - return; - } - if (received_frame_is_keyframe) { - keyframe_generation_requested_ = false; - } else if (last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ <= now_ms) { - if (!IsReceivingKeyFrame(now_ms)) { - RequestKeyFrame(now_ms); + if (keyframe_generation_requested_) { + if (received_frame_is_keyframe) { + keyframe_generation_requested_ = false; + } else if (keyframe_request_is_due) { + if (!IsReceivingKeyFrame(now_ms)) { + request_key_frame = true; + } + } else { + // It hasn't been long enough since the last keyframe request, do nothing. } - } else { - // It hasn't been long enough since the last keyframe request, do nothing. + } + + if (request_key_frame) { + // HandleKeyFrameGeneration is initated from the decode thread - + // RequestKeyFrame() triggers a call back to the decode thread. + // Perhaps there's a way to avoid that. + RequestKeyFrame(now_ms); } } -void VideoReceiveStream2::HandleFrameBufferTimeout() { - // Running on |decode_queue_|. - int64_t now_ms = clock_->TimeInMilliseconds(); +// RTC_RUN_ON(packet_sequence_checker_) +void VideoReceiveStream2::HandleFrameBufferTimeout(int64_t now_ms, + int64_t wait_ms) { absl::optional last_packet_ms = rtp_video_stream_receiver_.LastReceivedPacketMs(); // To avoid spamming keyframe requests for a stream that is not active we // check if we have received a packet within the last 5 seconds. - bool stream_is_active = last_packet_ms && now_ms - *last_packet_ms < 5000; - if (!stream_is_active) { - worker_thread_->PostTask(ToQueuedTask(task_safety_, [this]() { - RTC_DCHECK_RUN_ON(&worker_sequence_checker_); - stats_proxy_.OnStreamInactive(); - })); - } + const bool stream_is_active = + last_packet_ms && now_ms - *last_packet_ms < 5000; + if (!stream_is_active) + stats_proxy_.OnStreamInactive(); if (stream_is_active && !IsReceivingKeyFrame(now_ms) && (!config_.crypto_options.sframe.require_frame_encryption || rtp_video_stream_receiver_.IsDecryptable())) { - RTC_LOG(LS_WARNING) << "No decodable frame in " << GetWaitMs() + RTC_LOG(LS_WARNING) << "No decodable frame in " << wait_ms << " ms, requesting keyframe."; RequestKeyFrame(now_ms); } } +// RTC_RUN_ON(packet_sequence_checker_) bool VideoReceiveStream2::IsReceivingKeyFrame(int64_t timestamp_ms) const { absl::optional last_keyframe_packet_ms = rtp_video_stream_receiver_.LastReceivedKeyframePacketMs(); @@ -730,11 +928,28 @@ bool VideoReceiveStream2::IsReceivingKeyFrame(int64_t timestamp_ms) const { } void VideoReceiveStream2::UpdatePlayoutDelays() const { + // Running on worker_sequence_checker_. const int minimum_delay_ms = std::max({frame_minimum_playout_delay_ms_, base_minimum_playout_delay_ms_, syncable_minimum_playout_delay_ms_}); if (minimum_delay_ms >= 0) { timing_->set_min_playout_delay(minimum_delay_ms); + if (frame_minimum_playout_delay_ms_ == 0 && + frame_maximum_playout_delay_ms_ > 0 && low_latency_renderer_enabled_) { + // TODO(kron): Estimate frame rate from video stream. + constexpr double kFrameRate = 60.0; + // Convert playout delay in ms to number of frames. + int max_composition_delay_in_frames = std::lrint( + static_cast(frame_maximum_playout_delay_ms_ * kFrameRate) / + rtc::kNumMillisecsPerSec); + if (low_latency_renderer_include_predecode_buffer_) { + // Subtract frames in buffer. + max_composition_delay_in_frames = std::max( + max_composition_delay_in_frames - frame_buffer_->Size(), 0); + } + timing_->SetMaxCompositionDelayInFrames( + absl::make_optional(max_composition_delay_in_frames)); + } } const int maximum_delay_ms = frame_maximum_playout_delay_ms_; @@ -752,36 +967,43 @@ VideoReceiveStream2::SetAndGetRecordingState(RecordingState state, bool generate_key_frame) { RTC_DCHECK_RUN_ON(&worker_sequence_checker_); rtc::Event event; + + // Save old state, set the new state. RecordingState old_state; - decode_queue_.PostTask([this, &event, &old_state, generate_key_frame, - state = std::move(state)] { - RTC_DCHECK_RUN_ON(&decode_queue_); - // Save old state. - old_state.callback = std::move(encoded_frame_buffer_function_); - old_state.keyframe_needed = keyframe_generation_requested_; - old_state.last_keyframe_request_ms = last_keyframe_request_ms_; - - // Set new state. - encoded_frame_buffer_function_ = std::move(state.callback); - if (generate_key_frame) { - RequestKeyFrame(clock_->TimeInMilliseconds()); + + decode_queue_.PostTask( + [this, &event, &old_state, callback = std::move(state.callback), + generate_key_frame, + last_keyframe_request = state.last_keyframe_request_ms.value_or(0)] { + RTC_DCHECK_RUN_ON(&decode_queue_); + old_state.callback = std::move(encoded_frame_buffer_function_); + encoded_frame_buffer_function_ = std::move(callback); + + old_state.last_keyframe_request_ms = last_keyframe_request_ms_; + last_keyframe_request_ms_ = generate_key_frame + ? clock_->TimeInMilliseconds() + : last_keyframe_request; + + event.Set(); + }); + + if (generate_key_frame) { + rtp_video_stream_receiver_.RequestKeyFrame(); + { + // TODO(bugs.webrtc.org/11993): Post this to the network thread. + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); keyframe_generation_requested_ = true; - } else { - keyframe_generation_requested_ = state.keyframe_needed; - last_keyframe_request_ms_ = state.last_keyframe_request_ms.value_or(0); } - event.Set(); - }); + } + event.Wait(rtc::Event::kForever); return old_state; } void VideoReceiveStream2::GenerateKeyFrame() { - decode_queue_.PostTask([this]() { - RTC_DCHECK_RUN_ON(&decode_queue_); - RequestKeyFrame(clock_->TimeInMilliseconds()); - keyframe_generation_requested_ = true; - }); + RTC_DCHECK_RUN_ON(&packet_sequence_checker_); + RequestKeyFrame(clock_->TimeInMilliseconds()); + keyframe_generation_requested_ = true; } } // namespace internal diff --git a/video/video_receive_stream2.h b/video/video_receive_stream2.h index 0eab5dd293..9557044277 100644 --- a/video/video_receive_stream2.h +++ b/video/video_receive_stream2.h @@ -14,10 +14,11 @@ #include #include +#include "api/sequence_checker.h" #include "api/task_queue/task_queue_factory.h" -#include "api/transport/media/media_transport_interface.h" #include "api/units/timestamp.h" #include "api/video/recordable_encoded_frame.h" +#include "call/call.h" #include "call/rtp_packet_sink_interface.h" #include "call/syncable.h" #include "call/video_receive_stream.h" @@ -25,8 +26,10 @@ #include "modules/rtp_rtcp/source/source_tracker.h" #include "modules/video_coding/frame_buffer2.h" #include "modules/video_coding/video_receiver2.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" #include "video/receive_statistics_proxy2.h" #include "video/rtp_streams_synchronizer2.h" @@ -36,8 +39,6 @@ namespace webrtc { -class ProcessThread; -class RTPFragmentationHeader; class RtpStreamReceiverInterface; class RtpStreamReceiverControllerInterface; class RtxReceiveStream; @@ -74,29 +75,45 @@ struct VideoFrameMetaData { const Timestamp decode_timestamp; }; -class VideoReceiveStream2 : public webrtc::VideoReceiveStream, - public rtc::VideoSinkInterface, - public NackSender, - public video_coding::OnCompleteFrameCallback, - public Syncable, - public CallStatsObserver { +class VideoReceiveStream2 + : public webrtc::VideoReceiveStream, + public rtc::VideoSinkInterface, + public NackSender, + public RtpVideoStreamReceiver2::OnCompleteFrameCallback, + public Syncable, + public CallStatsObserver { public: // The default number of milliseconds to pass before re-requesting a key frame // to be sent. static constexpr int kMaxWaitForKeyFrameMs = 200; + // The maximum number of buffered encoded frames when encoded output is + // configured. + static constexpr size_t kBufferedEncodedFramesMaxSize = 60; VideoReceiveStream2(TaskQueueFactory* task_queue_factory, - TaskQueueBase* current_queue, - RtpStreamReceiverControllerInterface* receiver_controller, + Call* call, int num_cpu_cores, PacketRouter* packet_router, VideoReceiveStream::Config config, - ProcessThread* process_thread, CallStats* call_stats, Clock* clock, VCMTiming* timing); + // Destruction happens on the worker thread. Prior to destruction the caller + // must ensure that a registration with the transport has been cleared. See + // `RegisterWithTransport` for details. + // TODO(tommi): As a further improvement to this, performing the full + // destruction on the network thread could be made the default. ~VideoReceiveStream2() override; + // Called on `packet_sequence_checker_` to register/unregister with the + // network transport. + void RegisterWithTransport( + RtpStreamReceiverControllerInterface* receiver_controller); + // If registration has previously been done (via `RegisterWithTransport`) then + // `UnregisterFromTransport` must be called prior to destruction, on the + // network thread. + void UnregisterFromTransport(); + const Config& config() const { return config_; } void SignalNetworkState(NetworkState state); @@ -108,10 +125,9 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, void Start() override; void Stop() override; - webrtc::VideoReceiveStream::Stats GetStats() const override; + const RtpConfig& rtp_config() const override { return config_.rtp; } - void AddSecondarySink(RtpPacketSinkInterface* sink) override; - void RemoveSecondarySink(const RtpPacketSinkInterface* sink) override; + webrtc::VideoReceiveStream::Stats GetStats() const override; // SetBaseMinimumPlayoutDelayMs and GetBaseMinimumPlayoutDelayMs are called // from webrtc/api level and requested by user code. For e.g. blink/js layer @@ -133,9 +149,8 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, void SendNack(const std::vector& sequence_numbers, bool buffering_allowed) override; - // Implements video_coding::OnCompleteFrameCallback. - void OnCompleteFrame( - std::unique_ptr frame) override; + // Implements RtpVideoStreamReceiver2::OnCompleteFrameCallback. + void OnCompleteFrame(std::unique_ptr frame) override; // Implements CallStatsObserver::OnRttUpdate void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override; @@ -149,7 +164,7 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, int64_t time_ms) override; // SetMinimumPlayoutDelay is only called by A/V sync. - void SetMinimumPlayoutDelay(int delay_ms) override; + bool SetMinimumPlayoutDelay(int delay_ms) override; std::vector GetSources() const override; @@ -158,31 +173,45 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, void GenerateKeyFrame() override; private: - int64_t GetWaitMs() const; + void CreateAndRegisterExternalDecoder(const Decoder& decoder); + int64_t GetMaxWaitMs() const RTC_RUN_ON(decode_queue_); void StartNextDecode() RTC_RUN_ON(decode_queue_); - void HandleEncodedFrame(std::unique_ptr frame) + void HandleEncodedFrame(std::unique_ptr frame) RTC_RUN_ON(decode_queue_); - void HandleFrameBufferTimeout() RTC_RUN_ON(decode_queue_); + void HandleFrameBufferTimeout(int64_t now_ms, int64_t wait_ms) + RTC_RUN_ON(packet_sequence_checker_); void UpdatePlayoutDelays() const - RTC_EXCLUSIVE_LOCKS_REQUIRED(playout_delay_lock_); - void RequestKeyFrame(int64_t timestamp_ms) RTC_RUN_ON(decode_queue_); - void HandleKeyFrameGeneration(bool received_frame_is_keyframe, int64_t now_ms) - RTC_RUN_ON(decode_queue_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(worker_sequence_checker_); + void RequestKeyFrame(int64_t timestamp_ms) + RTC_RUN_ON(packet_sequence_checker_); + void HandleKeyFrameGeneration(bool received_frame_is_keyframe, + int64_t now_ms, + bool always_request_key_frame, + bool keyframe_request_is_due) + RTC_RUN_ON(packet_sequence_checker_); bool IsReceivingKeyFrame(int64_t timestamp_ms) const + RTC_RUN_ON(packet_sequence_checker_); + int DecodeAndMaybeDispatchEncodedFrame(std::unique_ptr frame) RTC_RUN_ON(decode_queue_); void UpdateHistograms(); - SequenceChecker worker_sequence_checker_; - SequenceChecker module_process_sequence_checker_; - SequenceChecker network_sequence_checker_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_sequence_checker_; + // TODO(bugs.webrtc.org/11993): This checker conceptually represents + // operations that belong to the network thread. The Call class is currently + // moving towards handling network packets on the network thread and while + // that work is ongoing, this checker may in practice represent the worker + // thread, but still serves as a mechanism of grouping together concepts + // that belong to the network thread. Once the packets are fully delivered + // on the network thread, this comment will be deleted. + RTC_NO_UNIQUE_ADDRESS SequenceChecker packet_sequence_checker_; TaskQueueFactory* const task_queue_factory_; TransportAdapter transport_adapter_; const VideoReceiveStream::Config config_; const int num_cpu_cores_; - TaskQueueBase* const worker_thread_; + Call* const call_; Clock* const clock_; CallStats* const call_stats_; @@ -210,46 +239,77 @@ class VideoReceiveStream2 : public webrtc::VideoReceiveStream, // Members for the new jitter buffer experiment. std::unique_ptr frame_buffer_; - std::unique_ptr media_receiver_; - std::unique_ptr rtx_receive_stream_; - std::unique_ptr rtx_receiver_; + std::unique_ptr media_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); + std::unique_ptr rtx_receive_stream_ + RTC_GUARDED_BY(packet_sequence_checker_); + std::unique_ptr rtx_receiver_ + RTC_GUARDED_BY(packet_sequence_checker_); // Whenever we are in an undecodable state (stream has just started or due to // a decoding error) we require a keyframe to restart the stream. - bool keyframe_required_ = true; + bool keyframe_required_ RTC_GUARDED_BY(decode_queue_) = true; // If we have successfully decoded any frame. - bool frame_decoded_ = false; + bool frame_decoded_ RTC_GUARDED_BY(decode_queue_) = false; - int64_t last_keyframe_request_ms_ = 0; - int64_t last_complete_frame_time_ms_ = 0; + int64_t last_keyframe_request_ms_ RTC_GUARDED_BY(decode_queue_) = 0; + int64_t last_complete_frame_time_ms_ + RTC_GUARDED_BY(worker_sequence_checker_) = 0; // Keyframe request intervals are configurable through field trials. const int max_wait_for_keyframe_ms_; const int max_wait_for_frame_ms_; - rtc::CriticalSection playout_delay_lock_; - // All of them tries to change current min_playout_delay on |timing_| but // source of the change request is different in each case. Among them the // biggest delay is used. -1 means use default value from the |timing_|. // // Minimum delay as decided by the RTP playout delay extension. - int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1; + int frame_minimum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) = + -1; // Minimum delay as decided by the setLatency function in "webrtc/api". - int base_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1; - // Minimum delay as decided by the A/V synchronization feature. - int syncable_minimum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = + int base_minimum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) = -1; + // Minimum delay as decided by the A/V synchronization feature. + int syncable_minimum_playout_delay_ms_ + RTC_GUARDED_BY(worker_sequence_checker_) = -1; // Maximum delay as decided by the RTP playout delay extension. - int frame_maximum_playout_delay_ms_ RTC_GUARDED_BY(playout_delay_lock_) = -1; + int frame_maximum_playout_delay_ms_ RTC_GUARDED_BY(worker_sequence_checker_) = + -1; // Function that is triggered with encoded frames, if not empty. std::function encoded_frame_buffer_function_ RTC_GUARDED_BY(decode_queue_); // Set to true while we're requesting keyframes but not yet received one. - bool keyframe_generation_requested_ RTC_GUARDED_BY(decode_queue_) = false; + bool keyframe_generation_requested_ RTC_GUARDED_BY(packet_sequence_checker_) = + false; + // Lock to avoid unnecessary per-frame idle wakeups in the code. + webrtc::Mutex pending_resolution_mutex_; + // Signal from decode queue to OnFrame callback to fill pending_resolution_. + // absl::nullopt - no resolution needed. 0x0 - next OnFrame to fill with + // received resolution. Not 0x0 - OnFrame has filled a resolution. + absl::optional pending_resolution_ + RTC_GUARDED_BY(pending_resolution_mutex_); + // Buffered encoded frames held while waiting for decoded resolution. + std::vector> buffered_encoded_frames_ + RTC_GUARDED_BY(decode_queue_); + + // Set by the field trial WebRTC-LowLatencyRenderer. The parameter |enabled| + // determines if the low-latency renderer algorithm should be used for the + // case min playout delay=0 and max playout delay>0. + FieldTrialParameter low_latency_renderer_enabled_; + // Set by the field trial WebRTC-LowLatencyRenderer. The parameter + // |include_predecode_buffer| determines if the predecode buffer should be + // taken into account when calculating maximum number of frames in composition + // queue. + FieldTrialParameter low_latency_renderer_include_predecode_buffer_; + + // Set by the field trial WebRTC-PreStreamDecoders. The parameter |max| + // determines the maximum number of decoders that are created up front before + // any video frame has been received. + FieldTrialParameter maximum_pre_stream_decoders_; // Defined last so they are destroyed before all other members. rtc::TaskQueue decode_queue_; diff --git a/video/video_receive_stream2_unittest.cc b/video/video_receive_stream2_unittest.cc index a411cec740..850fd0dbb5 100644 --- a/video/video_receive_stream2_unittest.cc +++ b/video/video_receive_stream2_unittest.cc @@ -11,21 +11,23 @@ #include "video/video_receive_stream2.h" #include +#include #include #include #include #include "api/task_queue/default_task_queue_factory.h" #include "api/test/video/function_video_decoder_factory.h" +#include "api/video/video_frame.h" #include "api/video_codecs/video_decoder.h" #include "call/rtp_stream_receiver_controller.h" #include "common_video/test/utilities.h" #include "media/base/fake_video_renderer.h" +#include "media/engine/fake_webrtc_call.h" #include "modules/pacing/packet_router.h" #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/encoded_frame.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "system_wrappers/include/clock.h" #include "test/fake_decoder.h" @@ -41,9 +43,13 @@ namespace webrtc { namespace { using ::testing::_; +using ::testing::AllOf; using ::testing::ElementsAreArray; +using ::testing::Field; +using ::testing::InSequence; using ::testing::Invoke; using ::testing::IsEmpty; +using ::testing::Property; using ::testing::SizeIs; constexpr int kDefaultTimeOutMs = 50; @@ -77,7 +83,15 @@ class MockVideoDecoder : public VideoDecoder { const char* ImplementationName() const { return "MockVideoDecoder"; } }; -class FrameObjectFake : public video_coding::EncodedFrame { +class MockVideoDecoderFactory : public VideoDecoderFactory { + public: + MOCK_CONST_METHOD0(GetSupportedFormats, std::vector()); + + MOCK_METHOD1(CreateVideoDecoder, + std::unique_ptr(const SdpVideoFormat& format)); +}; + +class FrameObjectFake : public EncodedFrame { public: void SetPayloadType(uint8_t payload_type) { _payloadType = payload_type; } @@ -95,14 +109,16 @@ class FrameObjectFake : public video_coding::EncodedFrame { class VideoReceiveStream2Test : public ::testing::Test { public: VideoReceiveStream2Test() - : process_thread_(ProcessThread::Create("TestThread")), - task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), - call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()), + : task_queue_factory_(CreateDefaultTaskQueueFactory()), h264_decoder_factory_(&mock_h264_video_decoder_), - null_decoder_factory_(&mock_null_video_decoder_) {} + config_(&mock_transport_, &h264_decoder_factory_), + call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {} + ~VideoReceiveStream2Test() override { + if (video_receive_stream_) + video_receive_stream_->UnregisterFromTransport(); + } - void SetUp() { + void SetUp() override { constexpr int kDefaultNumCpuCores = 2; config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; @@ -112,36 +128,29 @@ class VideoReceiveStream2Test : public ::testing::Test { h264_decoder.video_format = SdpVideoFormat("H264"); h264_decoder.video_format.parameters.insert( {"sprop-parameter-sets", "Z0IACpZTBYmI,aMljiA=="}); - h264_decoder.decoder_factory = &h264_decoder_factory_; + config_.decoders.clear(); config_.decoders.push_back(h264_decoder); - VideoReceiveStream::Decoder null_decoder; - null_decoder.payload_type = 98; - null_decoder.video_format = SdpVideoFormat("null"); - null_decoder.decoder_factory = &null_decoder_factory_; - config_.decoders.push_back(null_decoder); clock_ = Clock::GetRealTimeClock(); timing_ = new VCMTiming(clock_); video_receive_stream_ = std::make_unique( - task_queue_factory_.get(), loop_.task_queue(), - &rtp_stream_receiver_controller_, kDefaultNumCpuCores, - &packet_router_, config_.Copy(), process_thread_.get(), - &call_stats_, clock_, timing_); + task_queue_factory_.get(), &fake_call_, kDefaultNumCpuCores, + &packet_router_, config_.Copy(), &call_stats_, clock_, timing_); + video_receive_stream_->RegisterWithTransport( + &rtp_stream_receiver_controller_); } protected: test::RunLoop loop_; - std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; + test::VideoDecoderProxyFactory h264_decoder_factory_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; MockVideoDecoder mock_h264_video_decoder_; - MockVideoDecoder mock_null_video_decoder_; - test::VideoDecoderProxyFactory h264_decoder_factory_; - test::VideoDecoderProxyFactory null_decoder_factory_; cricket::FakeVideoRenderer fake_renderer_; + cricket::FakeCall fake_call_; MockTransport mock_transport_; PacketRouter packet_router_; RtpStreamReceiverController rtp_stream_receiver_controller_; @@ -179,9 +188,9 @@ TEST_F(VideoReceiveStream2Test, CreateFrameFromH264FmtpSpropAndIdr) { } TEST_F(VideoReceiveStream2Test, PlayoutDelay) { - const PlayoutDelay kPlayoutDelayMs = {123, 321}; + const VideoPlayoutDelay kPlayoutDelayMs = {123, 321}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); video_receive_stream_->OnCompleteFrame(std::move(test_frame)); @@ -209,10 +218,10 @@ TEST_F(VideoReceiveStream2Test, PlayoutDelay) { TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMaxValue) { const int default_max_playout_latency = timing_->max_playout_delay(); - const PlayoutDelay kPlayoutDelayMs = {123, -1}; + const VideoPlayoutDelay kPlayoutDelayMs = {123, -1}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); video_receive_stream_->OnCompleteFrame(std::move(test_frame)); @@ -225,10 +234,10 @@ TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMaxValue) { TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMinValue) { const int default_min_playout_latency = timing_->min_playout_delay(); - const PlayoutDelay kPlayoutDelayMs = {-1, 321}; + const VideoPlayoutDelay kPlayoutDelayMs = {-1, 321}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); video_receive_stream_->OnCompleteFrame(std::move(test_frame)); @@ -239,24 +248,60 @@ TEST_F(VideoReceiveStream2Test, PlayoutDelayPreservesDefaultMinValue) { EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay()); } +TEST_F(VideoReceiveStream2Test, MaxCompositionDelayNotSetByDefault) { + // Default with no playout delay set. + std::unique_ptr test_frame0(new FrameObjectFake()); + test_frame0->SetId(0); + video_receive_stream_->OnCompleteFrame(std::move(test_frame0)); + EXPECT_FALSE(timing_->MaxCompositionDelayInFrames()); + + // Max composition delay not set for playout delay 0,0. + std::unique_ptr test_frame1(new FrameObjectFake()); + test_frame1->SetId(1); + test_frame1->SetPlayoutDelay({0, 0}); + video_receive_stream_->OnCompleteFrame(std::move(test_frame1)); + EXPECT_FALSE(timing_->MaxCompositionDelayInFrames()); + + // Max composition delay not set for playout delay X,Y, where X,Y>0. + std::unique_ptr test_frame2(new FrameObjectFake()); + test_frame2->SetId(2); + test_frame2->SetPlayoutDelay({10, 30}); + video_receive_stream_->OnCompleteFrame(std::move(test_frame2)); + EXPECT_FALSE(timing_->MaxCompositionDelayInFrames()); +} + +TEST_F(VideoReceiveStream2Test, MaxCompositionDelaySetFromMaxPlayoutDelay) { + // Max composition delay set if playout delay X,Y, where X=0,Y>0. + const VideoPlayoutDelay kPlayoutDelayMs = {0, 50}; + const int kExpectedMaxCompositionDelayInFrames = 3; // ~50 ms at 60 fps. + std::unique_ptr test_frame(new FrameObjectFake()); + test_frame->SetId(0); + test_frame->SetPlayoutDelay(kPlayoutDelayMs); + video_receive_stream_->OnCompleteFrame(std::move(test_frame)); + EXPECT_EQ(kExpectedMaxCompositionDelayInFrames, + timing_->MaxCompositionDelayInFrames()); +} + class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test { public: VideoReceiveStream2TestWithFakeDecoder() : fake_decoder_factory_( []() { return std::make_unique(); }), - process_thread_(ProcessThread::Create("TestThread")), task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), + config_(&mock_transport_, &fake_decoder_factory_), call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {} + ~VideoReceiveStream2TestWithFakeDecoder() override { + if (video_receive_stream_) + video_receive_stream_->UnregisterFromTransport(); + } - void SetUp() { + void SetUp() override { config_.rtp.remote_ssrc = 1111; config_.rtp.local_ssrc = 2222; config_.renderer = &fake_renderer_; VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); - fake_decoder.decoder_factory = &fake_decoder_factory_; config_.decoders.push_back(fake_decoder); clock_ = Clock::GetRealTimeClock(); ReCreateReceiveStream(VideoReceiveStream::RecordingState()); @@ -264,19 +309,22 @@ class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test { void ReCreateReceiveStream(VideoReceiveStream::RecordingState state) { constexpr int kDefaultNumCpuCores = 2; - video_receive_stream_ = nullptr; + if (video_receive_stream_) { + video_receive_stream_->UnregisterFromTransport(); + video_receive_stream_ = nullptr; + } timing_ = new VCMTiming(clock_); video_receive_stream_.reset(new webrtc::internal::VideoReceiveStream2( - task_queue_factory_.get(), loop_.task_queue(), - &rtp_stream_receiver_controller_, kDefaultNumCpuCores, &packet_router_, - config_.Copy(), process_thread_.get(), &call_stats_, clock_, timing_)); + task_queue_factory_.get(), &fake_call_, kDefaultNumCpuCores, + &packet_router_, config_.Copy(), &call_stats_, clock_, timing_)); + video_receive_stream_->RegisterWithTransport( + &rtp_stream_receiver_controller_); video_receive_stream_->SetAndGetRecordingState(std::move(state), false); } protected: test::RunLoop loop_; test::FunctionVideoDecoderFactory fake_decoder_factory_; - std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; @@ -284,6 +332,7 @@ class VideoReceiveStream2TestWithFakeDecoder : public ::testing::Test { MockTransport mock_transport_; PacketRouter packet_router_; RtpStreamReceiverController rtp_stream_receiver_controller_; + cricket::FakeCall fake_call_; std::unique_ptr video_receive_stream_; Clock* clock_; VCMTiming* timing_; @@ -293,7 +342,7 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesNtpTime) { const int64_t kNtpTimestamp = 12345; auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetNtpTime(kNtpTimestamp); video_receive_stream_->Start(); @@ -306,7 +355,7 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesRotation) { const webrtc::VideoRotation kRotation = webrtc::kVideoRotation_180; auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetRotation(kRotation); video_receive_stream_->Start(); @@ -319,7 +368,7 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesRotation) { TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesPacketInfos) { auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); RtpPacketInfos packet_infos = CreatePacketInfos(3); test_frame->SetPacketInfos(packet_infos); @@ -338,7 +387,7 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, RenderedFrameUpdatesGetSources) { // Prepare one video frame with per-packet information. auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); RtpPacketInfos packet_infos; { RtpPacketInfos::vector_type infos; @@ -348,16 +397,16 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, RenderedFrameUpdatesGetSources) { info.set_csrcs({kCsrc}); info.set_rtp_timestamp(kRtpTimestamp); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 5000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(5000)); infos.push_back(info); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 3000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(3000)); infos.push_back(info); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 2000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(2000)); infos.push_back(info); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 4000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(1000)); infos.push_back(info); packet_infos = RtpPacketInfos(std::move(infos)); @@ -408,15 +457,25 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, RenderedFrameUpdatesGetSources) { } } -std::unique_ptr MakeFrame(VideoFrameType frame_type, - int picture_id) { +std::unique_ptr MakeFrameWithResolution( + VideoFrameType frame_type, + int picture_id, + int width, + int height) { auto frame = std::make_unique(); frame->SetPayloadType(99); - frame->id.picture_id = picture_id; + frame->SetId(picture_id); frame->SetFrameType(frame_type); + frame->_encodedWidth = width; + frame->_encodedHeight = height; return frame; } +std::unique_ptr MakeFrame(VideoFrameType frame_type, + int picture_id) { + return MakeFrameWithResolution(frame_type, picture_id, 320, 240); +} + TEST_F(VideoReceiveStream2TestWithFakeDecoder, PassesFrameWhenEncodedFramesCallbackSet) { testing::MockFunction callback; @@ -448,8 +507,30 @@ TEST_F(VideoReceiveStream2TestWithFakeDecoder, video_receive_stream_->Stop(); } -class VideoReceiveStream2TestWithSimulatedClock : public ::testing::Test { +class VideoReceiveStream2TestWithSimulatedClock + : public ::testing::TestWithParam { public: + class FakeRenderer : public rtc::VideoSinkInterface { + public: + void SignalDoneAfterFrames(int num_frames_received) { + signal_after_frame_count_ = num_frames_received; + if (frame_count_ == signal_after_frame_count_) + event_.Set(); + } + + void OnFrame(const webrtc::VideoFrame& frame) override { + if (++frame_count_ == signal_after_frame_count_) + event_.Set(); + } + + void WaitUntilDone() { event_.Wait(rtc::Event::kForever); } + + private: + int signal_after_frame_count_ = std::numeric_limits::max(); + int frame_count_ = 0; + rtc::Event event_; + }; + class FakeDecoder2 : public test::FakeDecoder { public: explicit FakeDecoder2(std::function decode_callback) @@ -472,14 +553,14 @@ class VideoReceiveStream2TestWithSimulatedClock : public ::testing::Test { Transport* transport, VideoDecoderFactory* decoder_factory, rtc::VideoSinkInterface* renderer) { - VideoReceiveStream::Config config(transport); + VideoReceiveStream::Config config(transport, decoder_factory); config.rtp.remote_ssrc = 1111; config.rtp.local_ssrc = 2222; + config.rtp.nack.rtp_history_ms = GetParam(); // rtx-time. config.renderer = renderer; VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); - fake_decoder.decoder_factory = decoder_factory; config.decoders.push_back(fake_decoder); return config; } @@ -489,28 +570,30 @@ class VideoReceiveStream2TestWithSimulatedClock : public ::testing::Test { fake_decoder_factory_([this] { return std::make_unique([this] { OnFrameDecoded(); }); }), - process_thread_(time_controller_.CreateProcessThread("ProcessThread")), config_(GetConfig(&mock_transport_, &fake_decoder_factory_, &fake_renderer_)), call_stats_(time_controller_.GetClock(), loop_.task_queue()), video_receive_stream_(time_controller_.GetTaskQueueFactory(), - loop_.task_queue(), - &rtp_stream_receiver_controller_, + &fake_call_, /*num_cores=*/2, &packet_router_, config_.Copy(), - process_thread_.get(), &call_stats_, time_controller_.GetClock(), new VCMTiming(time_controller_.GetClock())) { + video_receive_stream_.RegisterWithTransport( + &rtp_stream_receiver_controller_); video_receive_stream_.Start(); } + ~VideoReceiveStream2TestWithSimulatedClock() override { + video_receive_stream_.UnregisterFromTransport(); + } + void OnFrameDecoded() { event_->Set(); } - void PassEncodedFrameAndWait( - std::unique_ptr frame) { + void PassEncodedFrameAndWait(std::unique_ptr frame) { event_ = std::make_unique(); // This call will eventually end up in the Decoded method where the // event is set. @@ -522,9 +605,9 @@ class VideoReceiveStream2TestWithSimulatedClock : public ::testing::Test { GlobalSimulatedTimeController time_controller_; test::RunLoop loop_; test::FunctionVideoDecoderFactory fake_decoder_factory_; - std::unique_ptr process_thread_; MockTransport mock_transport_; - cricket::FakeVideoRenderer fake_renderer_; + FakeRenderer fake_renderer_; + cricket::FakeCall fake_call_; VideoReceiveStream::Config config_; internal::CallStats call_stats_; PacketRouter packet_router_; @@ -533,10 +616,9 @@ class VideoReceiveStream2TestWithSimulatedClock : public ::testing::Test { std::unique_ptr event_; }; -TEST_F(VideoReceiveStream2TestWithSimulatedClock, +TEST_P(VideoReceiveStream2TestWithSimulatedClock, RequestsKeyFramesUntilKeyFrameReceived) { - auto tick = TimeDelta::Millis( - internal::VideoReceiveStream2::kMaxWaitForKeyFrameMs / 2); + auto tick = TimeDelta::Millis(GetParam() / 2); EXPECT_CALL(mock_transport_, SendRtcp).Times(1).WillOnce(Invoke([this]() { loop_.Quit(); return 0; @@ -548,7 +630,8 @@ TEST_F(VideoReceiveStream2TestWithSimulatedClock, loop_.Run(); testing::Mock::VerifyAndClearExpectations(&mock_transport_); - // T+200ms: still no key frame received, expect key frame request sent again. + // T+keyframetimeout: still no key frame received, expect key frame request + // sent again. EXPECT_CALL(mock_transport_, SendRtcp).Times(1).WillOnce(Invoke([this]() { loop_.Quit(); return 0; @@ -558,8 +641,8 @@ TEST_F(VideoReceiveStream2TestWithSimulatedClock, loop_.Run(); testing::Mock::VerifyAndClearExpectations(&mock_transport_); - // T+200ms: now send a key frame - we should not observe new key frame - // requests after this. + // T+keyframetimeout: now send a key frame - we should not observe new key + // frame requests after this. EXPECT_CALL(mock_transport_, SendRtcp).Times(0); PassEncodedFrameAndWait(MakeFrame(VideoFrameType::kVideoFrameKey, 3)); time_controller_.AdvanceTime(2 * tick); @@ -568,4 +651,171 @@ TEST_F(VideoReceiveStream2TestWithSimulatedClock, loop_.Run(); } +TEST_P(VideoReceiveStream2TestWithSimulatedClock, + DispatchesEncodedFrameSequenceStartingWithKeyframeWithoutResolution) { + video_receive_stream_.Start(); + testing::MockFunction callback; + video_receive_stream_.SetAndGetRecordingState( + VideoReceiveStream::RecordingState(callback.AsStdFunction()), + /*generate_key_frame=*/false); + + InSequence s; + EXPECT_CALL( + callback, + Call(AllOf( + Property(&RecordableEncodedFrame::resolution, + Field(&RecordableEncodedFrame::EncodedResolution::width, + test::FakeDecoder::kDefaultWidth)), + Property(&RecordableEncodedFrame::resolution, + Field(&RecordableEncodedFrame::EncodedResolution::height, + test::FakeDecoder::kDefaultHeight))))); + EXPECT_CALL(callback, Call); + + fake_renderer_.SignalDoneAfterFrames(2); + PassEncodedFrameAndWait( + MakeFrameWithResolution(VideoFrameType::kVideoFrameKey, 0, 0, 0)); + PassEncodedFrameAndWait( + MakeFrameWithResolution(VideoFrameType::kVideoFrameDelta, 1, 0, 0)); + fake_renderer_.WaitUntilDone(); + + video_receive_stream_.Stop(); +} + +TEST_P(VideoReceiveStream2TestWithSimulatedClock, + DispatchesEncodedFrameSequenceStartingWithKeyframeWithResolution) { + video_receive_stream_.Start(); + testing::MockFunction callback; + video_receive_stream_.SetAndGetRecordingState( + VideoReceiveStream::RecordingState(callback.AsStdFunction()), + /*generate_key_frame=*/false); + + InSequence s; + EXPECT_CALL( + callback, + Call(AllOf( + Property( + &RecordableEncodedFrame::resolution, + Field(&RecordableEncodedFrame::EncodedResolution::width, 1080)), + Property(&RecordableEncodedFrame::resolution, + Field(&RecordableEncodedFrame::EncodedResolution::height, + 720))))); + EXPECT_CALL(callback, Call); + + fake_renderer_.SignalDoneAfterFrames(2); + PassEncodedFrameAndWait( + MakeFrameWithResolution(VideoFrameType::kVideoFrameKey, 0, 1080, 720)); + PassEncodedFrameAndWait( + MakeFrameWithResolution(VideoFrameType::kVideoFrameDelta, 1, 0, 0)); + fake_renderer_.WaitUntilDone(); + + video_receive_stream_.Stop(); +} + +INSTANTIATE_TEST_SUITE_P( + RtxTime, + VideoReceiveStream2TestWithSimulatedClock, + ::testing::Values(internal::VideoReceiveStream2::kMaxWaitForKeyFrameMs, + 50 /*ms*/)); + +class VideoReceiveStream2TestWithLazyDecoderCreation : public ::testing::Test { + public: + VideoReceiveStream2TestWithLazyDecoderCreation() + : task_queue_factory_(CreateDefaultTaskQueueFactory()), + config_(&mock_transport_, &mock_h264_decoder_factory_), + call_stats_(Clock::GetRealTimeClock(), loop_.task_queue()) {} + + ~VideoReceiveStream2TestWithLazyDecoderCreation() override { + video_receive_stream_->UnregisterFromTransport(); + } + + void SetUp() override { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-PreStreamDecoders/max:0/"); + constexpr int kDefaultNumCpuCores = 2; + config_.rtp.remote_ssrc = 1111; + config_.rtp.local_ssrc = 2222; + config_.renderer = &fake_renderer_; + VideoReceiveStream::Decoder h264_decoder; + h264_decoder.payload_type = 99; + h264_decoder.video_format = SdpVideoFormat("H264"); + h264_decoder.video_format.parameters.insert( + {"sprop-parameter-sets", "Z0IACpZTBYmI,aMljiA=="}); + config_.decoders.clear(); + config_.decoders.push_back(h264_decoder); + + clock_ = Clock::GetRealTimeClock(); + timing_ = new VCMTiming(clock_); + + video_receive_stream_ = + std::make_unique( + task_queue_factory_.get(), &fake_call_, kDefaultNumCpuCores, + &packet_router_, config_.Copy(), &call_stats_, clock_, timing_); + video_receive_stream_->RegisterWithTransport( + &rtp_stream_receiver_controller_); + } + + protected: + test::RunLoop loop_; + const std::unique_ptr task_queue_factory_; + MockVideoDecoderFactory mock_h264_decoder_factory_; + VideoReceiveStream::Config config_; + internal::CallStats call_stats_; + MockVideoDecoder mock_h264_video_decoder_; + cricket::FakeVideoRenderer fake_renderer_; + cricket::FakeCall fake_call_; + MockTransport mock_transport_; + PacketRouter packet_router_; + RtpStreamReceiverController rtp_stream_receiver_controller_; + std::unique_ptr video_receive_stream_; + Clock* clock_; + VCMTiming* timing_; +}; + +TEST_F(VideoReceiveStream2TestWithLazyDecoderCreation, LazyDecoderCreation) { + constexpr uint8_t idr_nalu[] = {0x05, 0xFF, 0xFF, 0xFF}; + RtpPacketToSend rtppacket(nullptr); + uint8_t* payload = rtppacket.AllocatePayload(sizeof(idr_nalu)); + memcpy(payload, idr_nalu, sizeof(idr_nalu)); + rtppacket.SetMarker(true); + rtppacket.SetSsrc(1111); + rtppacket.SetPayloadType(99); + rtppacket.SetSequenceNumber(1); + rtppacket.SetTimestamp(0); + + // No decoder is created here. + EXPECT_CALL(mock_h264_decoder_factory_, CreateVideoDecoder(_)).Times(0); + video_receive_stream_->Start(); + + EXPECT_CALL(mock_h264_decoder_factory_, CreateVideoDecoder(_)) + .WillOnce(Invoke([this](const SdpVideoFormat& format) { + test::VideoDecoderProxyFactory h264_decoder_factory( + &mock_h264_video_decoder_); + return h264_decoder_factory.CreateVideoDecoder(format); + })); + rtc::Event init_decode_event_; + EXPECT_CALL(mock_h264_video_decoder_, InitDecode(_, _)) + .WillOnce(Invoke([&init_decode_event_](const VideoCodec* config, + int32_t number_of_cores) { + init_decode_event_.Set(); + return 0; + })); + EXPECT_CALL(mock_h264_video_decoder_, RegisterDecodeCompleteCallback(_)); + EXPECT_CALL(mock_h264_video_decoder_, Decode(_, false, _)); + RtpPacketReceived parsed_packet; + ASSERT_TRUE(parsed_packet.Parse(rtppacket.data(), rtppacket.size())); + rtp_stream_receiver_controller_.OnRtpPacket(parsed_packet); + EXPECT_CALL(mock_h264_video_decoder_, Release()); + + // Make sure the decoder thread had a chance to run. + init_decode_event_.Wait(kDefaultTimeOutMs); +} + +TEST_F(VideoReceiveStream2TestWithLazyDecoderCreation, + DeregisterDecoderThatsNotCreated) { + // No decoder is created here. + EXPECT_CALL(mock_h264_decoder_factory_, CreateVideoDecoder(_)).Times(0); + video_receive_stream_->Start(); + video_receive_stream_->Stop(); +} + } // namespace webrtc diff --git a/video/video_receive_stream_unittest.cc b/video/video_receive_stream_unittest.cc index 07032fe468..cb14f7dc06 100644 --- a/video/video_receive_stream_unittest.cc +++ b/video/video_receive_stream_unittest.cc @@ -25,7 +25,6 @@ #include "modules/rtp_rtcp/source/rtp_packet_to_send.h" #include "modules/utility/include/process_thread.h" #include "modules/video_coding/encoded_frame.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "system_wrappers/include/clock.h" #include "test/fake_decoder.h" @@ -73,10 +72,11 @@ class MockVideoDecoder : public VideoDecoder { (DecodedImageCallback*), (override)); MOCK_METHOD(int32_t, Release, (), (override)); + MOCK_METHOD(VideoDecoder::DecoderInfo, GetDetcoderInfo, (), (const override)); const char* ImplementationName() const { return "MockVideoDecoder"; } }; -class FrameObjectFake : public video_coding::EncodedFrame { +class FrameObjectFake : public EncodedFrame { public: void SetPayloadType(uint8_t payload_type) { _payloadType = payload_type; } @@ -96,10 +96,9 @@ class VideoReceiveStreamTest : public ::testing::Test { VideoReceiveStreamTest() : process_thread_(ProcessThread::Create("TestThread")), task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), - call_stats_(Clock::GetRealTimeClock(), process_thread_.get()), h264_decoder_factory_(&mock_h264_video_decoder_), - null_decoder_factory_(&mock_null_video_decoder_) {} + config_(&mock_transport_, &h264_decoder_factory_), + call_stats_(Clock::GetRealTimeClock(), process_thread_.get()) {} void SetUp() { constexpr int kDefaultNumCpuCores = 2; @@ -111,13 +110,7 @@ class VideoReceiveStreamTest : public ::testing::Test { h264_decoder.video_format = SdpVideoFormat("H264"); h264_decoder.video_format.parameters.insert( {"sprop-parameter-sets", "Z0IACpZTBYmI,aMljiA=="}); - h264_decoder.decoder_factory = &h264_decoder_factory_; config_.decoders.push_back(h264_decoder); - VideoReceiveStream::Decoder null_decoder; - null_decoder.payload_type = 98; - null_decoder.video_format = SdpVideoFormat("null"); - null_decoder.decoder_factory = &null_decoder_factory_; - config_.decoders.push_back(null_decoder); clock_ = Clock::GetRealTimeClock(); timing_ = new VCMTiming(clock_); @@ -132,12 +125,10 @@ class VideoReceiveStreamTest : public ::testing::Test { protected: std::unique_ptr process_thread_; const std::unique_ptr task_queue_factory_; + test::VideoDecoderProxyFactory h264_decoder_factory_; VideoReceiveStream::Config config_; CallStats call_stats_; MockVideoDecoder mock_h264_video_decoder_; - MockVideoDecoder mock_null_video_decoder_; - test::VideoDecoderProxyFactory h264_decoder_factory_; - test::VideoDecoderProxyFactory null_decoder_factory_; cricket::FakeVideoRenderer fake_renderer_; MockTransport mock_transport_; PacketRouter packet_router_; @@ -176,9 +167,9 @@ TEST_F(VideoReceiveStreamTest, CreateFrameFromH264FmtpSpropAndIdr) { } TEST_F(VideoReceiveStreamTest, PlayoutDelay) { - const PlayoutDelay kPlayoutDelayMs = {123, 321}; + const VideoPlayoutDelay kPlayoutDelayMs = {123, 321}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); video_receive_stream_->OnCompleteFrame(std::move(test_frame)); @@ -206,10 +197,10 @@ TEST_F(VideoReceiveStreamTest, PlayoutDelay) { TEST_F(VideoReceiveStreamTest, PlayoutDelayPreservesDefaultMaxValue) { const int default_max_playout_latency = timing_->max_playout_delay(); - const PlayoutDelay kPlayoutDelayMs = {123, -1}; + const VideoPlayoutDelay kPlayoutDelayMs = {123, -1}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); video_receive_stream_->OnCompleteFrame(std::move(test_frame)); @@ -222,10 +213,10 @@ TEST_F(VideoReceiveStreamTest, PlayoutDelayPreservesDefaultMaxValue) { TEST_F(VideoReceiveStreamTest, PlayoutDelayPreservesDefaultMinValue) { const int default_min_playout_latency = timing_->min_playout_delay(); - const PlayoutDelay kPlayoutDelayMs = {-1, 321}; + const VideoPlayoutDelay kPlayoutDelayMs = {-1, 321}; std::unique_ptr test_frame(new FrameObjectFake()); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetPlayoutDelay(kPlayoutDelayMs); video_receive_stream_->OnCompleteFrame(std::move(test_frame)); @@ -243,7 +234,7 @@ class VideoReceiveStreamTestWithFakeDecoder : public ::testing::Test { []() { return std::make_unique(); }), process_thread_(ProcessThread::Create("TestThread")), task_queue_factory_(CreateDefaultTaskQueueFactory()), - config_(&mock_transport_), + config_(&mock_transport_, &fake_decoder_factory_), call_stats_(Clock::GetRealTimeClock(), process_thread_.get()) {} void SetUp() { @@ -253,7 +244,6 @@ class VideoReceiveStreamTestWithFakeDecoder : public ::testing::Test { VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); - fake_decoder.decoder_factory = &fake_decoder_factory_; config_.decoders.push_back(fake_decoder); clock_ = Clock::GetRealTimeClock(); ReCreateReceiveStream(VideoReceiveStream::RecordingState()); @@ -289,7 +279,7 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesNtpTime) { const int64_t kNtpTimestamp = 12345; auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetNtpTime(kNtpTimestamp); video_receive_stream_->Start(); @@ -302,7 +292,7 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesRotation) { const webrtc::VideoRotation kRotation = webrtc::kVideoRotation_180; auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); test_frame->SetRotation(kRotation); video_receive_stream_->Start(); @@ -315,7 +305,7 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesRotation) { TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesPacketInfos) { auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); RtpPacketInfos packet_infos = CreatePacketInfos(3); test_frame->SetPacketInfos(packet_infos); @@ -334,7 +324,7 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, RenderedFrameUpdatesGetSources) { // Prepare one video frame with per-packet information. auto test_frame = std::make_unique(); test_frame->SetPayloadType(99); - test_frame->id.picture_id = 0; + test_frame->SetId(0); RtpPacketInfos packet_infos; { RtpPacketInfos::vector_type infos; @@ -344,16 +334,16 @@ TEST_F(VideoReceiveStreamTestWithFakeDecoder, RenderedFrameUpdatesGetSources) { info.set_csrcs({kCsrc}); info.set_rtp_timestamp(kRtpTimestamp); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 5000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(5000)); infos.push_back(info); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 3000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(3000)); infos.push_back(info); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 2000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(2000)); infos.push_back(info); - info.set_receive_time_ms(clock_->TimeInMilliseconds() - 4000); + info.set_receive_time(clock_->CurrentTime() - TimeDelta::Millis(4000)); infos.push_back(info); packet_infos = RtpPacketInfos(std::move(infos)); @@ -408,7 +398,7 @@ std::unique_ptr MakeFrame(VideoFrameType frame_type, int picture_id) { auto frame = std::make_unique(); frame->SetPayloadType(99); - frame->id.picture_id = picture_id; + frame->SetId(picture_id); frame->SetFrameType(frame_type); return frame; } @@ -472,10 +462,10 @@ class VideoReceiveStreamTestWithSimulatedClock : public ::testing::Test { config.rtp.remote_ssrc = 1111; config.rtp.local_ssrc = 2222; config.renderer = renderer; + config.decoder_factory = decoder_factory; VideoReceiveStream::Decoder fake_decoder; fake_decoder.payload_type = 99; fake_decoder.video_format = SdpVideoFormat("VP8"); - fake_decoder.decoder_factory = decoder_factory; config.decoders.push_back(fake_decoder); return config; } @@ -504,13 +494,12 @@ class VideoReceiveStreamTestWithSimulatedClock : public ::testing::Test { void OnFrameDecoded() { event_->Set(); } - void PassEncodedFrameAndWait( - std::unique_ptr frame) { - event_ = std::make_unique(); - // This call will eventually end up in the Decoded method where the - // event is set. - video_receive_stream_.OnCompleteFrame(std::move(frame)); - event_->Wait(rtc::Event::kForever); + void PassEncodedFrameAndWait(std::unique_ptr frame) { + event_ = std::make_unique(); + // This call will eventually end up in the Decoded method where the + // event is set. + video_receive_stream_.OnCompleteFrame(std::move(frame)); + event_->Wait(rtc::Event::kForever); } protected: diff --git a/video/video_send_stream.cc b/video/video_send_stream.cc index bc9a0cd5f3..8c0f8f6f72 100644 --- a/video/video_send_stream.cc +++ b/video/video_send_stream.cc @@ -12,7 +12,6 @@ #include #include "api/array_view.h" -#include "api/video/video_stream_encoder_create.h" #include "api/video/video_stream_encoder_settings.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" #include "modules/rtp_rtcp/source/rtp_header_extension_size.h" @@ -23,14 +22,13 @@ #include "rtc_base/task_utils/to_queued_task.h" #include "system_wrappers/include/clock.h" #include "system_wrappers/include/field_trial.h" -#include "video/video_send_stream_impl.h" +#include "video/adaptation/overuse_frame_detector.h" +#include "video/video_stream_encoder.h" namespace webrtc { namespace { -constexpr char kTargetBitrateRtcpFieldTrial[] = "WebRTC-Target-Bitrate-Rtcp"; - size_t CalculateMaxHeaderSize(const RtpConfig& config) { size_t header_size = kRtpHeaderSize; size_t extensions_size = 0; @@ -62,6 +60,51 @@ size_t CalculateMaxHeaderSize(const RtpConfig& config) { return header_size; } +VideoStreamEncoder::BitrateAllocationCallbackType +GetBitrateAllocationCallbackType(const VideoSendStream::Config& config) { + if (webrtc::RtpExtension::FindHeaderExtensionByUri( + config.rtp.extensions, + webrtc::RtpExtension::kVideoLayersAllocationUri, + config.crypto_options.srtp.enable_encrypted_rtp_header_extensions + ? RtpExtension::Filter::kPreferEncryptedExtension + : RtpExtension::Filter::kDiscardEncryptedExtension)) { + return VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation; + } + if (field_trial::IsEnabled("WebRTC-Target-Bitrate-Rtcp")) { + return VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocation; + } + return VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocationWhenScreenSharing; +} + +RtpSenderFrameEncryptionConfig CreateFrameEncryptionConfig( + const VideoSendStream::Config* config) { + RtpSenderFrameEncryptionConfig frame_encryption_config; + frame_encryption_config.frame_encryptor = config->frame_encryptor; + frame_encryption_config.crypto_options = config->crypto_options; + return frame_encryption_config; +} + +RtpSenderObservers CreateObservers(RtcpRttStats* call_stats, + EncoderRtcpFeedback* encoder_feedback, + SendStatisticsProxy* stats_proxy, + SendDelayStats* send_delay_stats) { + RtpSenderObservers observers; + observers.rtcp_rtt_stats = call_stats; + observers.intra_frame_callback = encoder_feedback; + observers.rtcp_loss_notification_observer = encoder_feedback; + observers.report_block_data_observer = stats_proxy; + observers.rtp_stats = stats_proxy; + observers.bitrate_observer = stats_proxy; + observers.frame_count_observer = stats_proxy; + observers.rtcp_type_observer = stats_proxy; + observers.send_delay_observer = stats_proxy; + observers.send_packet_observer = send_delay_stats; + return observers; +} + } // namespace namespace internal { @@ -69,7 +112,6 @@ namespace internal { VideoSendStream::VideoSendStream( Clock* clock, int num_cpu_cores, - ProcessThread* module_process_thread, TaskQueueFactory* task_queue_factory, RtcpRttStats* call_stats, RtpTransportControllerSendInterface* transport, @@ -81,61 +123,79 @@ VideoSendStream::VideoSendStream( const std::map& suspended_ssrcs, const std::map& suspended_payload_states, std::unique_ptr fec_controller) - : worker_queue_(transport->GetWorkerQueue()), + : rtp_transport_queue_(transport->GetWorkerQueue()), + transport_(transport), stats_proxy_(clock, config, encoder_config.content_type), config_(std::move(config)), - content_type_(encoder_config.content_type) { + content_type_(encoder_config.content_type), + video_stream_encoder_(std::make_unique( + clock, + num_cpu_cores, + &stats_proxy_, + config_.encoder_settings, + std::make_unique(&stats_proxy_), + task_queue_factory, + GetBitrateAllocationCallbackType(config_))), + encoder_feedback_( + clock, + config_.rtp.ssrcs, + video_stream_encoder_.get(), + [this](uint32_t ssrc, const std::vector& seq_nums) { + return rtp_video_sender_->GetSentRtpPacketInfos(ssrc, seq_nums); + }), + rtp_video_sender_( + transport->CreateRtpVideoSender(suspended_ssrcs, + suspended_payload_states, + config_.rtp, + config_.rtcp_report_interval_ms, + config_.send_transport, + CreateObservers(call_stats, + &encoder_feedback_, + &stats_proxy_, + send_delay_stats), + event_log, + std::move(fec_controller), + CreateFrameEncryptionConfig(&config_), + config_.frame_transformer)), + send_stream_(clock, + &stats_proxy_, + rtp_transport_queue_, + transport, + bitrate_allocator, + video_stream_encoder_.get(), + &config_, + encoder_config.max_bitrate_bps, + encoder_config.bitrate_priority, + encoder_config.content_type, + rtp_video_sender_) { RTC_DCHECK(config_.encoder_settings.encoder_factory); RTC_DCHECK(config_.encoder_settings.bitrate_allocator_factory); - video_stream_encoder_ = - CreateVideoStreamEncoder(clock, task_queue_factory, num_cpu_cores, - &stats_proxy_, config_.encoder_settings); - // TODO(srte): Initialization should not be done posted on a task queue. - // Note that the posted task must not outlive this scope since the closure - // references local variables. - worker_queue_->PostTask(ToQueuedTask( - [this, clock, call_stats, transport, bitrate_allocator, send_delay_stats, - event_log, &suspended_ssrcs, &encoder_config, &suspended_payload_states, - &fec_controller]() { - send_stream_.reset(new VideoSendStreamImpl( - clock, &stats_proxy_, worker_queue_, call_stats, transport, - bitrate_allocator, send_delay_stats, video_stream_encoder_.get(), - event_log, &config_, encoder_config.max_bitrate_bps, - encoder_config.bitrate_priority, suspended_ssrcs, - suspended_payload_states, encoder_config.content_type, - std::move(fec_controller))); - }, - [this]() { thread_sync_event_.Set(); })); - - // Wait for ConstructionTask to complete so that |send_stream_| can be used. - // |module_process_thread| must be registered and deregistered on the thread - // it was created on. - thread_sync_event_.Wait(rtc::Event::kForever); - send_stream_->RegisterProcessThread(module_process_thread); - // TODO(sprang): Enable this also for regular video calls by default, if it - // works well. - if (encoder_config.content_type == VideoEncoderConfig::ContentType::kScreen || - field_trial::IsEnabled(kTargetBitrateRtcpFieldTrial)) { - video_stream_encoder_->SetBitrateAllocationObserver(send_stream_.get()); - } + video_stream_encoder_->SetFecControllerOverride(rtp_video_sender_); ReconfigureVideoEncoder(std::move(encoder_config)); } VideoSendStream::~VideoSendStream() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_DCHECK(!send_stream_); + RTC_DCHECK(!running_); + transport_->DestroyRtpVideoSender(rtp_video_sender_); } void VideoSendStream::UpdateActiveSimulcastLayers( const std::vector active_layers) { RTC_DCHECK_RUN_ON(&thread_checker_); + // Keep our `running_` flag expected state in sync with active layers since + // the `send_stream_` will be implicitly stopped/started depending on the + // state of the layers. + bool running = false; + rtc::StringBuilder active_layers_string; active_layers_string << "{"; for (size_t i = 0; i < active_layers.size(); ++i) { if (active_layers[i]) { + running = true; active_layers_string << "1"; } else { active_layers_string << "0"; @@ -148,35 +208,65 @@ void VideoSendStream::UpdateActiveSimulcastLayers( RTC_LOG(LS_INFO) << "UpdateActiveSimulcastLayers: " << active_layers_string.str(); - VideoSendStreamImpl* send_stream = send_stream_.get(); - worker_queue_->PostTask([this, send_stream, active_layers] { - send_stream->UpdateActiveSimulcastLayers(active_layers); - thread_sync_event_.Set(); - }); + rtp_transport_queue_->PostTask( + ToQueuedTask(transport_queue_safety_, [this, active_layers] { + send_stream_.UpdateActiveSimulcastLayers(active_layers); + })); - thread_sync_event_.Wait(rtc::Event::kForever); + running_ = running; } void VideoSendStream::Start() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_LOG(LS_INFO) << "VideoSendStream::Start"; - VideoSendStreamImpl* send_stream = send_stream_.get(); - worker_queue_->PostTask([this, send_stream] { - send_stream->Start(); + RTC_DLOG(LS_INFO) << "VideoSendStream::Start"; + if (running_) + return; + + running_ = true; + + rtp_transport_queue_->PostTask(ToQueuedTask([this] { + transport_queue_safety_->SetAlive(); + send_stream_.Start(); thread_sync_event_.Set(); - }); + })); // It is expected that after VideoSendStream::Start has been called, incoming // frames are not dropped in VideoStreamEncoder. To ensure this, Start has to // be synchronized. + // TODO(tommi): ^^^ Validate if this still holds. thread_sync_event_.Wait(rtc::Event::kForever); } void VideoSendStream::Stop() { RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_LOG(LS_INFO) << "VideoSendStream::Stop"; - VideoSendStreamImpl* send_stream = send_stream_.get(); - worker_queue_->PostTask([send_stream] { send_stream->Stop(); }); + if (!running_) + return; + RTC_DLOG(LS_INFO) << "VideoSendStream::Stop"; + running_ = false; + rtp_transport_queue_->PostTask(ToQueuedTask(transport_queue_safety_, [this] { + // As the stream can get re-used and implicitly restarted via changing + // the state of the active layers, we do not mark the + // `transport_queue_safety_` flag with `SetNotAlive()` here. That's only + // done when we stop permanently via `StopPermanentlyAndGetRtpStates()`. + send_stream_.Stop(); + })); +} + +bool VideoSendStream::started() { + RTC_DCHECK_RUN_ON(&thread_checker_); + return running_; +} + +void VideoSendStream::AddAdaptationResource( + rtc::scoped_refptr resource) { + RTC_DCHECK_RUN_ON(&thread_checker_); + video_stream_encoder_->AddAdaptationResource(resource); +} + +std::vector> +VideoSendStream::GetAdaptationResources() { + RTC_DCHECK_RUN_ON(&thread_checker_); + return video_stream_encoder_->GetAdaptationResources(); } void VideoSendStream::SetSource( @@ -187,10 +277,8 @@ void VideoSendStream::SetSource( } void VideoSendStream::ReconfigureVideoEncoder(VideoEncoderConfig config) { - // TODO(perkj): Some test cases in VideoSendStreamTest call - // ReconfigureVideoEncoder from the network thread. - // RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_DCHECK(content_type_ == config.content_type); + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DCHECK_EQ(content_type_, config.content_type); video_stream_encoder_->ConfigureEncoder( std::move(config), config_.rtp.max_packet_size - CalculateMaxHeaderSize(config_.rtp)); @@ -204,7 +292,7 @@ VideoSendStream::Stats VideoSendStream::GetStats() { } absl::optional VideoSendStream::GetPacingFactorOverride() const { - return send_stream_->configured_pacing_factor_; + return send_stream_.configured_pacing_factor(); } void VideoSendStream::StopPermanentlyAndGetRtpStates( @@ -212,12 +300,16 @@ void VideoSendStream::StopPermanentlyAndGetRtpStates( VideoSendStream::RtpPayloadStateMap* payload_state_map) { RTC_DCHECK_RUN_ON(&thread_checker_); video_stream_encoder_->Stop(); - send_stream_->DeRegisterProcessThread(); - worker_queue_->PostTask([this, rtp_state_map, payload_state_map]() { - send_stream_->Stop(); - *rtp_state_map = send_stream_->GetRtpStates(); - *payload_state_map = send_stream_->GetRtpPayloadStates(); - send_stream_.reset(); + + running_ = false; + // Always run these cleanup steps regardless of whether running_ was set + // or not. This will unregister callbacks before destruction. + // See `VideoSendStreamImpl::StopVideoSendStream` for more. + rtp_transport_queue_->PostTask([this, rtp_state_map, payload_state_map]() { + transport_queue_safety_->SetNotAlive(); + send_stream_.Stop(); + *rtp_state_map = send_stream_.GetRtpStates(); + *payload_state_map = send_stream_.GetRtpPayloadStates(); thread_sync_event_.Set(); }); thread_sync_event_.Wait(rtc::Event::kForever); @@ -225,7 +317,7 @@ void VideoSendStream::StopPermanentlyAndGetRtpStates( void VideoSendStream::DeliverRtcp(const uint8_t* packet, size_t length) { // Called on a network thread. - send_stream_->DeliverRtcp(packet, length); + send_stream_.DeliverRtcp(packet, length); } } // namespace internal diff --git a/video/video_send_stream.h b/video/video_send_stream.h index addaee49c2..0d132dd666 100644 --- a/video/video_send_stream.h +++ b/video/video_send_stream.h @@ -16,16 +16,19 @@ #include #include "api/fec_controller.h" +#include "api/sequence_checker.h" #include "api/video/video_stream_encoder_interface.h" #include "call/bitrate_allocator.h" #include "call/video_receive_stream.h" #include "call/video_send_stream.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "video/encoder_rtcp_feedback.h" #include "video/send_delay_stats.h" #include "video/send_statistics_proxy.h" +#include "video/video_send_stream_impl.h" namespace webrtc { namespace test { @@ -34,7 +37,6 @@ class VideoSendStreamPeer; class CallStats; class IvfFileWriter; -class ProcessThread; class RateLimiter; class RtpRtcp; class RtpTransportControllerSendInterface; @@ -46,8 +48,7 @@ class VideoSendStreamImpl; // VideoSendStream implements webrtc::VideoSendStream. // Internally, it delegates all public methods to VideoSendStreamImpl and / or -// VideoStreamEncoder. VideoSendStreamInternal is created and deleted on -// |worker_queue|. +// VideoStreamEncoder. class VideoSendStream : public webrtc::VideoSendStream { public: using RtpStateMap = std::map; @@ -56,7 +57,6 @@ class VideoSendStream : public webrtc::VideoSendStream { VideoSendStream( Clock* clock, int num_cpu_cores, - ProcessThread* module_process_thread, TaskQueueFactory* task_queue_factory, RtcpRttStats* call_stats, RtpTransportControllerSendInterface* transport, @@ -78,6 +78,10 @@ class VideoSendStream : public webrtc::VideoSendStream { const std::vector active_layers) override; void Start() override; void Stop() override; + bool started() override; + + void AddAdaptationResource(rtc::scoped_refptr resource) override; + std::vector> GetAdaptationResources() override; void SetSource(rtc::VideoSourceInterface* source, const DegradationPreference& degradation_preference) override; @@ -91,19 +95,23 @@ class VideoSendStream : public webrtc::VideoSendStream { private: friend class test::VideoSendStreamPeer; - class ConstructionTask; - absl::optional GetPacingFactorOverride() const; - rtc::ThreadChecker thread_checker_; - rtc::TaskQueue* const worker_queue_; + RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_; + rtc::TaskQueue* const rtp_transport_queue_; + RtpTransportControllerSendInterface* const transport_; rtc::Event thread_sync_event_; + rtc::scoped_refptr transport_queue_safety_ = + PendingTaskSafetyFlag::CreateDetached(); SendStatisticsProxy stats_proxy_; const VideoSendStream::Config config_; const VideoEncoderConfig::ContentType content_type_; - std::unique_ptr send_stream_; std::unique_ptr video_stream_encoder_; + EncoderRtcpFeedback encoder_feedback_; + RtpVideoSenderInterface* const rtp_video_sender_; + VideoSendStreamImpl send_stream_; + bool running_ RTC_GUARDED_BY(thread_checker_) = false; }; } // namespace internal diff --git a/video/video_send_stream_impl.cc b/video/video_send_stream_impl.cc index 03c9613ab4..3fc6b676dc 100644 --- a/video/video_send_stream_impl.cc +++ b/video/video_send_stream_impl.cc @@ -20,6 +20,7 @@ #include "api/crypto/crypto_options.h" #include "api/rtp_parameters.h" #include "api/scoped_refptr.h" +#include "api/sequence_checker.h" #include "api/video_codecs/video_codec.h" #include "call/rtp_transport_controller_send_interface.h" #include "call/video_send_stream.h" @@ -32,8 +33,7 @@ #include "rtc_base/experiments/rate_control_settings.h" #include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" -#include "rtc_base/synchronization/sequence_checker.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/trace_event.h" #include "system_wrappers/include/clock.h" #include "system_wrappers/include/field_trial.h" @@ -49,6 +49,13 @@ static constexpr int64_t kMaxVbaThrottleTimeMs = 500; constexpr TimeDelta kEncoderTimeOut = TimeDelta::Seconds(2); +// When send-side BWE is used a stricter 1.1x pacing factor is used, rather than +// the 2.5x which is used with receive-side BWE. Provides a more careful +// bandwidth rampup with less risk of overshoots causing adverse effects like +// packet loss. Not used for receive side BWE, since there we lack the probing +// feature and so may result in too slow initial rampup. +static constexpr double kStrictPacingMultiplier = 1.1; + bool TransportSeqNumExtensionConfigured(const VideoSendStream::Config& config) { const std::vector& extensions = config.rtp.extensions; return absl::c_any_of(extensions, [](const RtpExtension& ext) { @@ -92,17 +99,26 @@ int CalculateMaxPadBitrateBps(const std::vector& streams, const double hysteresis_factor = RateControlSettings::ParseFromFieldTrials() .GetSimulcastHysteresisFactor(content_type); - const size_t top_active_stream_idx = active_streams.size() - 1; - pad_up_to_bitrate_bps = std::min( - static_cast( - hysteresis_factor * - active_streams[top_active_stream_idx].min_bitrate_bps + - 0.5), - active_streams[top_active_stream_idx].target_bitrate_bps); - - // Add target_bitrate_bps of the lower active streams. - for (size_t i = 0; i < top_active_stream_idx; ++i) { - pad_up_to_bitrate_bps += active_streams[i].target_bitrate_bps; + if (is_svc) { + // For SVC, since there is only one "stream", the padding bitrate + // needed to enable the top spatial layer is stored in the + // |target_bitrate_bps| field. + // TODO(sprang): This behavior needs to die. + pad_up_to_bitrate_bps = static_cast( + hysteresis_factor * active_streams[0].target_bitrate_bps + 0.5); + } else { + const size_t top_active_stream_idx = active_streams.size() - 1; + pad_up_to_bitrate_bps = std::min( + static_cast( + hysteresis_factor * + active_streams[top_active_stream_idx].min_bitrate_bps + + 0.5), + active_streams[top_active_stream_idx].target_bitrate_bps); + + // Add target_bitrate_bps of the lower active streams. + for (size_t i = 0; i < top_active_stream_idx; ++i) { + pad_up_to_bitrate_bps += active_streams[i].target_bitrate_bps; + } } } } else if (!active_streams.empty() && pad_to_min_bitrate) { @@ -115,33 +131,6 @@ int CalculateMaxPadBitrateBps(const std::vector& streams, return pad_up_to_bitrate_bps; } -RtpSenderFrameEncryptionConfig CreateFrameEncryptionConfig( - const VideoSendStream::Config* config) { - RtpSenderFrameEncryptionConfig frame_encryption_config; - frame_encryption_config.frame_encryptor = config->frame_encryptor; - frame_encryption_config.crypto_options = config->crypto_options; - return frame_encryption_config; -} - -RtpSenderObservers CreateObservers(RtcpRttStats* call_stats, - EncoderRtcpFeedback* encoder_feedback, - SendStatisticsProxy* stats_proxy, - SendDelayStats* send_delay_stats) { - RtpSenderObservers observers; - observers.rtcp_rtt_stats = call_stats; - observers.intra_frame_callback = encoder_feedback; - observers.rtcp_loss_notification_observer = encoder_feedback; - observers.rtcp_stats = stats_proxy; - observers.report_block_data_observer = stats_proxy; - observers.rtp_stats = stats_proxy; - observers.bitrate_observer = stats_proxy; - observers.frame_count_observer = stats_proxy; - observers.rtcp_type_observer = stats_proxy; - observers.send_delay_observer = stats_proxy; - observers.send_packet_observer = send_delay_stats; - return observers; -} - absl::optional GetAlrSettings( VideoEncoderConfig::ContentType content_type) { if (content_type == VideoEncoderConfig::ContentType::kScreen) { @@ -163,10 +152,48 @@ bool SameStreamsEnabled(const VideoBitrateAllocation& lhs, } return true; } + +// Returns an optional that has value iff TransportSeqNumExtensionConfigured +// is `true` for the given video send stream config. +absl::optional GetConfiguredPacingFactor( + const VideoSendStream::Config& config, + VideoEncoderConfig::ContentType content_type, + const PacingConfig& default_pacing_config) { + if (!TransportSeqNumExtensionConfigured(config)) + return absl::nullopt; + + absl::optional alr_settings = + GetAlrSettings(content_type); + if (alr_settings) + return alr_settings->pacing_factor; + + RateControlSettings rate_control_settings = + RateControlSettings::ParseFromFieldTrials(); + return rate_control_settings.GetPacingFactor().value_or( + default_pacing_config.pacing_factor); +} + +uint32_t GetInitialEncoderMaxBitrate(int initial_encoder_max_bitrate) { + if (initial_encoder_max_bitrate > 0) + return rtc::dchecked_cast(initial_encoder_max_bitrate); + + // TODO(srte): Make sure max bitrate is not set to negative values. We don't + // have any way to handle unset values in downstream code, such as the + // bitrate allocator. Previously -1 was implicitly casted to UINT32_MAX, a + // behaviour that is not safe. Converting to 10 Mbps should be safe for + // reasonable use cases as it allows adding the max of multiple streams + // without wrappping around. + const int kFallbackMaxBitrateBps = 10000000; + RTC_DLOG(LS_ERROR) << "ERROR: Initial encoder max bitrate = " + << initial_encoder_max_bitrate << " which is <= 0!"; + RTC_DLOG(LS_INFO) << "Using default encoder max bitrate = 10 Mbps"; + return kFallbackMaxBitrateBps; +} + } // namespace PacingConfig::PacingConfig() - : pacing_factor("factor", PacedSender::kDefaultPaceMultiplier), + : pacing_factor("factor", kStrictPacingMultiplier), max_pacing_delay("max_delay", TimeDelta::Millis(PacedSender::kMaxQueueLengthMs)) { ParseFieldTrial({&pacing_factor, &max_pacing_delay}, @@ -178,158 +205,109 @@ PacingConfig::~PacingConfig() = default; VideoSendStreamImpl::VideoSendStreamImpl( Clock* clock, SendStatisticsProxy* stats_proxy, - rtc::TaskQueue* worker_queue, - RtcpRttStats* call_stats, + rtc::TaskQueue* rtp_transport_queue, RtpTransportControllerSendInterface* transport, BitrateAllocatorInterface* bitrate_allocator, - SendDelayStats* send_delay_stats, VideoStreamEncoderInterface* video_stream_encoder, - RtcEventLog* event_log, const VideoSendStream::Config* config, int initial_encoder_max_bitrate, double initial_encoder_bitrate_priority, - std::map suspended_ssrcs, - std::map suspended_payload_states, VideoEncoderConfig::ContentType content_type, - std::unique_ptr fec_controller) + RtpVideoSenderInterface* rtp_video_sender) : clock_(clock), has_alr_probing_(config->periodic_alr_bandwidth_probing || GetAlrSettings(content_type)), pacing_config_(PacingConfig()), stats_proxy_(stats_proxy), config_(config), - worker_queue_(worker_queue), + rtp_transport_queue_(rtp_transport_queue), timed_out_(false), transport_(transport), bitrate_allocator_(bitrate_allocator), disable_padding_(true), max_padding_bitrate_(0), encoder_min_bitrate_bps_(0), + encoder_max_bitrate_bps_( + GetInitialEncoderMaxBitrate(initial_encoder_max_bitrate)), encoder_target_rate_bps_(0), encoder_bitrate_priority_(initial_encoder_bitrate_priority), - has_packet_feedback_(false), video_stream_encoder_(video_stream_encoder), - encoder_feedback_(clock, config_->rtp.ssrcs, video_stream_encoder), bandwidth_observer_(transport->GetBandwidthObserver()), - rtp_video_sender_( - transport_->CreateRtpVideoSender(suspended_ssrcs, - suspended_payload_states, - config_->rtp, - config_->rtcp_report_interval_ms, - config_->send_transport, - CreateObservers(call_stats, - &encoder_feedback_, - stats_proxy_, - send_delay_stats), - event_log, - std::move(fec_controller), - CreateFrameEncryptionConfig(config_), - config->frame_transformer)), - weak_ptr_factory_(this) { - video_stream_encoder->SetFecControllerOverride(rtp_video_sender_); - RTC_DCHECK_RUN_ON(worker_queue_); - RTC_LOG(LS_INFO) << "VideoSendStreamInternal: " << config_->ToString(); - weak_ptr_ = weak_ptr_factory_.GetWeakPtr(); - - encoder_feedback_.SetRtpVideoSender(rtp_video_sender_); - + rtp_video_sender_(rtp_video_sender), + configured_pacing_factor_( + GetConfiguredPacingFactor(*config_, content_type, pacing_config_)) { + RTC_DCHECK_GE(config_->rtp.payload_type, 0); + RTC_DCHECK_LE(config_->rtp.payload_type, 127); RTC_DCHECK(!config_->rtp.ssrcs.empty()); RTC_DCHECK(transport_); RTC_DCHECK_NE(initial_encoder_max_bitrate, 0); - - if (initial_encoder_max_bitrate > 0) { - encoder_max_bitrate_bps_ = - rtc::dchecked_cast(initial_encoder_max_bitrate); - } else { - // TODO(srte): Make sure max bitrate is not set to negative values. We don't - // have any way to handle unset values in downstream code, such as the - // bitrate allocator. Previously -1 was implicitly casted to UINT32_MAX, a - // behaviour that is not safe. Converting to 10 Mbps should be safe for - // reasonable use cases as it allows adding the max of multiple streams - // without wrappping around. - const int kFallbackMaxBitrateBps = 10000000; - RTC_DLOG(LS_ERROR) << "ERROR: Initial encoder max bitrate = " - << initial_encoder_max_bitrate << " which is <= 0!"; - RTC_DLOG(LS_INFO) << "Using default encoder max bitrate = 10 Mbps"; - encoder_max_bitrate_bps_ = kFallbackMaxBitrateBps; - } + RTC_LOG(LS_INFO) << "VideoSendStreamImpl: " << config_->ToString(); RTC_CHECK(AlrExperimentSettings::MaxOneFieldTrialEnabled()); + + // Only request rotation at the source when we positively know that the remote + // side doesn't support the rotation extension. This allows us to prepare the + // encoder in the expectation that rotation is supported - which is the common + // case. + bool rotation_applied = absl::c_none_of( + config_->rtp.extensions, [](const RtpExtension& extension) { + return extension.uri == RtpExtension::kVideoRotationUri; + }); + + video_stream_encoder_->SetSink(this, rotation_applied); + + absl::optional enable_alr_bw_probing; + // If send-side BWE is enabled, check if we should apply updated probing and // pacing settings. - if (TransportSeqNumExtensionConfigured(*config_)) { - has_packet_feedback_ = true; - + if (configured_pacing_factor_) { absl::optional alr_settings = GetAlrSettings(content_type); + int queue_time_limit_ms; if (alr_settings) { - transport->EnablePeriodicAlrProbing(true); - transport->SetPacingFactor(alr_settings->pacing_factor); - configured_pacing_factor_ = alr_settings->pacing_factor; - transport->SetQueueTimeLimit(alr_settings->max_paced_queue_time); + enable_alr_bw_probing = true; + queue_time_limit_ms = alr_settings->max_paced_queue_time; } else { RateControlSettings rate_control_settings = RateControlSettings::ParseFromFieldTrials(); - - transport->EnablePeriodicAlrProbing( - rate_control_settings.UseAlrProbing()); - const double pacing_factor = - rate_control_settings.GetPacingFactor().value_or( - pacing_config_.pacing_factor); - transport->SetPacingFactor(pacing_factor); - configured_pacing_factor_ = pacing_factor; - transport->SetQueueTimeLimit(pacing_config_.max_pacing_delay.Get().ms()); + enable_alr_bw_probing = rate_control_settings.UseAlrProbing(); + queue_time_limit_ms = pacing_config_.max_pacing_delay.Get().ms(); } + + transport->SetQueueTimeLimit(queue_time_limit_ms); } if (config_->periodic_alr_bandwidth_probing) { - transport->EnablePeriodicAlrProbing(true); + enable_alr_bw_probing = config_->periodic_alr_bandwidth_probing; } - RTC_DCHECK_GE(config_->rtp.payload_type, 0); - RTC_DCHECK_LE(config_->rtp.payload_type, 127); - - video_stream_encoder_->SetStartBitrate( - bitrate_allocator_->GetStartBitrate(this)); + if (enable_alr_bw_probing) { + transport->EnablePeriodicAlrProbing(*enable_alr_bw_probing); + } - // Only request rotation at the source when we positively know that the remote - // side doesn't support the rotation extension. This allows us to prepare the - // encoder in the expectation that rotation is supported - which is the common - // case. - bool rotation_applied = absl::c_none_of( - config_->rtp.extensions, [](const RtpExtension& extension) { - return extension.uri == RtpExtension::kVideoRotationUri; - }); + rtp_transport_queue_->PostTask(ToQueuedTask(transport_queue_safety_, [this] { + if (configured_pacing_factor_) + transport_->SetPacingFactor(*configured_pacing_factor_); - video_stream_encoder_->SetSink(this, rotation_applied); + video_stream_encoder_->SetStartBitrate( + bitrate_allocator_->GetStartBitrate(this)); + })); } VideoSendStreamImpl::~VideoSendStreamImpl() { - RTC_DCHECK_RUN_ON(worker_queue_); - RTC_DCHECK(!rtp_video_sender_->IsActive()) - << "VideoSendStreamImpl::Stop not called"; - RTC_LOG(LS_INFO) << "~VideoSendStreamInternal: " << config_->ToString(); - transport_->DestroyRtpVideoSender(rtp_video_sender_); -} - -void VideoSendStreamImpl::RegisterProcessThread( - ProcessThread* module_process_thread) { - rtp_video_sender_->RegisterProcessThread(module_process_thread); -} - -void VideoSendStreamImpl::DeRegisterProcessThread() { - rtp_video_sender_->DeRegisterProcessThread(); + RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_LOG(LS_INFO) << "~VideoSendStreamImpl: " << config_->ToString(); } void VideoSendStreamImpl::DeliverRtcp(const uint8_t* packet, size_t length) { // Runs on a network thread. - RTC_DCHECK(!worker_queue_->IsCurrent()); + RTC_DCHECK(!rtp_transport_queue_->IsCurrent()); rtp_video_sender_->DeliverRtcp(packet, length); } void VideoSendStreamImpl::UpdateActiveSimulcastLayers( const std::vector active_layers) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); bool previously_active = rtp_video_sender_->IsActive(); rtp_video_sender_->SetActiveModules(active_layers); if (!rtp_video_sender_->IsActive() && previously_active) { @@ -342,17 +320,21 @@ void VideoSendStreamImpl::UpdateActiveSimulcastLayers( } void VideoSendStreamImpl::Start() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); RTC_LOG(LS_INFO) << "VideoSendStream::Start"; if (rtp_video_sender_->IsActive()) return; + TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Start"); rtp_video_sender_->SetActive(true); StartupVideoSendStream(); } void VideoSendStreamImpl::StartupVideoSendStream() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + + transport_queue_safety_->SetAlive(); + bitrate_allocator_->AddObserver(this, GetAllocationConfig()); // Start monitoring encoder activity. { @@ -361,8 +343,8 @@ void VideoSendStreamImpl::StartupVideoSendStream() { activity_ = false; timed_out_ = false; check_encoder_activity_task_ = RepeatingTaskHandle::DelayedStart( - worker_queue_->Get(), kEncoderTimeOut, [this] { - RTC_DCHECK_RUN_ON(worker_queue_); + rtp_transport_queue_->Get(), kEncoderTimeOut, [this] { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); if (!activity_) { if (!timed_out_) { SignalEncoderTimedOut(); @@ -382,25 +364,29 @@ void VideoSendStreamImpl::StartupVideoSendStream() { } void VideoSendStreamImpl::Stop() { - RTC_DCHECK_RUN_ON(worker_queue_); - RTC_LOG(LS_INFO) << "VideoSendStream::Stop"; + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + RTC_LOG(LS_INFO) << "VideoSendStreamImpl::Stop"; if (!rtp_video_sender_->IsActive()) return; + + RTC_DCHECK(transport_queue_safety_->alive()); TRACE_EVENT_INSTANT0("webrtc", "VideoSendStream::Stop"); rtp_video_sender_->SetActive(false); StopVideoSendStream(); } +// RTC_RUN_ON(rtp_transport_queue_) void VideoSendStreamImpl::StopVideoSendStream() { bitrate_allocator_->RemoveObserver(this); check_encoder_activity_task_.Stop(); video_stream_encoder_->OnBitrateUpdated(DataRate::Zero(), DataRate::Zero(), DataRate::Zero(), 0, 0, 0); stats_proxy_->OnSetEncoderTargetRate(0); + transport_queue_safety_->SetNotAlive(); } void VideoSendStreamImpl::SignalEncoderTimedOut() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); // If the encoder has not produced anything the last kEncoderTimeOut and it // is supposed to, deregister as BitrateAllocatorObserver. This can happen // if a camera stops producing frames. @@ -412,17 +398,14 @@ void VideoSendStreamImpl::SignalEncoderTimedOut() { void VideoSendStreamImpl::OnBitrateAllocationUpdated( const VideoBitrateAllocation& allocation) { - if (!worker_queue_->IsCurrent()) { - auto ptr = weak_ptr_; - worker_queue_->PostTask([=] { - if (!ptr.get()) - return; - ptr->OnBitrateAllocationUpdated(allocation); - }); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask(ToQueuedTask(transport_queue_safety_, [=] { + OnBitrateAllocationUpdated(allocation); + })); return; } - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); int64_t now_ms = clock_->TimeInMilliseconds(); if (encoder_target_rate_bps_ != 0) { @@ -459,8 +442,15 @@ void VideoSendStreamImpl::OnBitrateAllocationUpdated( } } +void VideoSendStreamImpl::OnVideoLayersAllocationUpdated( + VideoLayersAllocation allocation) { + // OnVideoLayersAllocationUpdated is handled on the encoder task queue in + // order to not race with OnEncodedImage callbacks. + rtp_video_sender_->OnVideoLayersAllocationUpdated(allocation); +} + void VideoSendStreamImpl::SignalEncoderActive() { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); if (rtp_video_sender_->IsActive()) { RTC_LOG(LS_INFO) << "SignalEncoderActive, Encoder is active."; bitrate_allocator_->AddObserver(this, GetAllocationConfig()); @@ -482,21 +472,20 @@ void VideoSendStreamImpl::OnEncoderConfigurationChanged( bool is_svc, VideoEncoderConfig::ContentType content_type, int min_transmit_bitrate_bps) { - if (!worker_queue_->IsCurrent()) { - rtc::WeakPtr send_stream = weak_ptr_; - worker_queue_->PostTask([send_stream, streams, is_svc, content_type, - min_transmit_bitrate_bps]() mutable { - if (send_stream) { - send_stream->OnEncoderConfigurationChanged( - std::move(streams), is_svc, content_type, min_transmit_bitrate_bps); - } - }); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask(ToQueuedTask( + transport_queue_safety_, + [this, streams = std::move(streams), is_svc, content_type, + min_transmit_bitrate_bps]() mutable { + OnEncoderConfigurationChanged(std::move(streams), is_svc, + content_type, min_transmit_bitrate_bps); + })); return; } RTC_DCHECK_GE(config_->rtp.ssrcs.size(), streams.size()); TRACE_EVENT0("webrtc", "VideoSendStream::OnEncoderConfigurationChanged"); - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); const VideoCodecType codec_type = PayloadStringToCodecType(config_->rtp.payload_name); @@ -549,8 +538,7 @@ void VideoSendStreamImpl::OnEncoderConfigurationChanged( EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { + const CodecSpecificInfo* codec_specific_info) { // Encoded is called on whatever thread the real encoder implementation run // on. In the case of hardware encoders, there might be several encoders // running in parallel on different threads. @@ -560,35 +548,34 @@ EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage( auto enable_padding_task = [this]() { if (disable_padding_) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); disable_padding_ = false; // To ensure that padding bitrate is propagated to the bitrate allocator. SignalEncoderActive(); } }; - if (!worker_queue_->IsCurrent()) { - worker_queue_->PostTask(enable_padding_task); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask( + ToQueuedTask(transport_queue_safety_, std::move(enable_padding_task))); } else { enable_padding_task(); } EncodedImageCallback::Result result(EncodedImageCallback::Result::OK); - result = rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info, - fragmentation); + result = + rtp_video_sender_->OnEncodedImage(encoded_image, codec_specific_info); // Check if there's a throttled VideoBitrateAllocation that we should try // sending. - rtc::WeakPtr send_stream = weak_ptr_; - auto update_task = [send_stream]() { - if (send_stream) { - RTC_DCHECK_RUN_ON(send_stream->worker_queue_); - auto& context = send_stream->video_bitrate_allocation_context_; - if (context && context->throttled_allocation) { - send_stream->OnBitrateAllocationUpdated(*context->throttled_allocation); - } + auto update_task = [this]() { + RTC_DCHECK_RUN_ON(rtp_transport_queue_); + auto& context = video_bitrate_allocation_context_; + if (context && context->throttled_allocation) { + OnBitrateAllocationUpdated(*context->throttled_allocation); } }; - if (!worker_queue_->IsCurrent()) { - worker_queue_->PostTask(update_task); + if (!rtp_transport_queue_->IsCurrent()) { + rtp_transport_queue_->PostTask( + ToQueuedTask(transport_queue_safety_, std::move(update_task))); } else { update_task(); } @@ -611,7 +598,7 @@ std::map VideoSendStreamImpl::GetRtpPayloadStates() } uint32_t VideoSendStreamImpl::OnBitrateUpdated(BitrateAllocationUpdate update) { - RTC_DCHECK_RUN_ON(worker_queue_); + RTC_DCHECK_RUN_ON(rtp_transport_queue_); RTC_DCHECK(rtp_video_sender_->IsActive()) << "VideoSendStream::Start has not been called."; diff --git a/video/video_send_stream_impl.h b/video/video_send_stream_impl.h index 8f30b630be..babf1dcfe5 100644 --- a/video/video_send_stream_impl.h +++ b/video/video_send_stream_impl.h @@ -19,8 +19,6 @@ #include #include "absl/types/optional.h" -#include "api/fec_controller.h" -#include "api/rtc_event_log/rtc_event_log.h" #include "api/video/encoded_image.h" #include "api/video/video_bitrate_allocation.h" #include "api/video/video_bitrate_allocator.h" @@ -33,18 +31,14 @@ #include "call/rtp_video_sender_interface.h" #include "modules/include/module_common_types.h" #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h" -#include "modules/utility/include/process_thread.h" #include "modules/video_coding/include/video_codec_interface.h" -#include "rtc_base/critical_section.h" #include "rtc_base/experiments/field_trial_parser.h" +#include "rtc_base/system/no_unique_address.h" #include "rtc_base/task_queue.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" #include "rtc_base/task_utils/repeating_task.h" #include "rtc_base/thread_annotations.h" -#include "rtc_base/weak_ptr.h" -#include "video/encoder_rtcp_feedback.h" -#include "video/send_delay_stats.h" #include "video/send_statistics_proxy.h" -#include "video/video_send_stream.h" namespace webrtc { namespace internal { @@ -60,43 +54,28 @@ struct PacingConfig { }; // VideoSendStreamImpl implements internal::VideoSendStream. -// It is created and destroyed on |worker_queue|. The intent is to decrease the -// need for locking and to ensure methods are called in sequence. -// Public methods except |DeliverRtcp| must be called on |worker_queue|. +// It is created and destroyed on `rtp_transport_queue`. The intent is to +// decrease the need for locking and to ensure methods are called in sequence. +// Public methods except `DeliverRtcp` must be called on `rtp_transport_queue`. // DeliverRtcp is called on the libjingle worker thread or a network thread. // An encoder may deliver frames through the EncodedImageCallback on an // arbitrary thread. class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, - public VideoStreamEncoderInterface::EncoderSink, - public VideoBitrateAllocationObserver { + public VideoStreamEncoderInterface::EncoderSink { public: - VideoSendStreamImpl( - Clock* clock, - SendStatisticsProxy* stats_proxy, - rtc::TaskQueue* worker_queue, - RtcpRttStats* call_stats, - RtpTransportControllerSendInterface* transport, - BitrateAllocatorInterface* bitrate_allocator, - SendDelayStats* send_delay_stats, - VideoStreamEncoderInterface* video_stream_encoder, - RtcEventLog* event_log, - const VideoSendStream::Config* config, - int initial_encoder_max_bitrate, - double initial_encoder_bitrate_priority, - std::map suspended_ssrcs, - std::map suspended_payload_states, - VideoEncoderConfig::ContentType content_type, - std::unique_ptr fec_controller); + VideoSendStreamImpl(Clock* clock, + SendStatisticsProxy* stats_proxy, + rtc::TaskQueue* rtp_transport_queue, + RtpTransportControllerSendInterface* transport, + BitrateAllocatorInterface* bitrate_allocator, + VideoStreamEncoderInterface* video_stream_encoder, + const VideoSendStream::Config* config, + int initial_encoder_max_bitrate, + double initial_encoder_bitrate_priority, + VideoEncoderConfig::ContentType content_type, + RtpVideoSenderInterface* rtp_video_sender); ~VideoSendStreamImpl() override; - // RegisterProcessThread register |module_process_thread| with those objects - // that use it. Registration has to happen on the thread were - // |module_process_thread| was created (libjingle's worker thread). - // TODO(perkj): Replace the use of |module_process_thread| with a TaskQueue, - // maybe |worker_queue|. - void RegisterProcessThread(ProcessThread* module_process_thread); - void DeRegisterProcessThread(); - void DeliverRtcp(const uint8_t* packet, size_t length); void UpdateActiveSimulcastLayers(const std::vector active_layers); void Start(); @@ -107,45 +86,50 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, std::map GetRtpPayloadStates() const; - absl::optional configured_pacing_factor_; + const absl::optional& configured_pacing_factor() const { + return configured_pacing_factor_; + } private: // Implements BitrateAllocatorObserver. uint32_t OnBitrateUpdated(BitrateAllocationUpdate update) override; + // Implements VideoStreamEncoderInterface::EncoderSink void OnEncoderConfigurationChanged( std::vector streams, bool is_svc, VideoEncoderConfig::ContentType content_type, int min_transmit_bitrate_bps) override; + void OnBitrateAllocationUpdated( + const VideoBitrateAllocation& allocation) override; + void OnVideoLayersAllocationUpdated( + VideoLayersAllocation allocation) override; + // Implements EncodedImageCallback. The implementation routes encoded frames // to the |payload_router_| and |config.pre_encode_callback| if set. // Called on an arbitrary encoder callback thread. EncodedImageCallback::Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override; + const CodecSpecificInfo* codec_specific_info) override; // Implements EncodedImageCallback. void OnDroppedFrame(EncodedImageCallback::DropReason reason) override; - // Implements VideoBitrateAllocationObserver. - void OnBitrateAllocationUpdated( - const VideoBitrateAllocation& allocation) override; - // Starts monitoring and sends a keyframe. void StartupVideoSendStream(); // Removes the bitrate observer, stops monitoring and notifies the video // encoder of the bitrate update. - void StopVideoSendStream() RTC_RUN_ON(worker_queue_); + void StopVideoSendStream() RTC_RUN_ON(rtp_transport_queue_); void ConfigureProtection(); void ConfigureSsrcs(); void SignalEncoderTimedOut(); void SignalEncoderActive(); MediaStreamAllocationConfig GetAllocationConfig() const - RTC_RUN_ON(worker_queue_); + RTC_RUN_ON(rtp_transport_queue_); + + RTC_NO_UNIQUE_ADDRESS SequenceChecker thread_checker_; Clock* const clock_; const bool has_alr_probing_; const PacingConfig pacing_config_; @@ -153,40 +137,31 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, SendStatisticsProxy* const stats_proxy_; const VideoSendStream::Config* const config_; - rtc::TaskQueue* const worker_queue_; + rtc::TaskQueue* const rtp_transport_queue_; RepeatingTaskHandle check_encoder_activity_task_ - RTC_GUARDED_BY(worker_queue_); + RTC_GUARDED_BY(rtp_transport_queue_); std::atomic_bool activity_; - bool timed_out_ RTC_GUARDED_BY(worker_queue_); + bool timed_out_ RTC_GUARDED_BY(rtp_transport_queue_); RtpTransportControllerSendInterface* const transport_; BitrateAllocatorInterface* const bitrate_allocator_; - rtc::CriticalSection ivf_writers_crit_; - bool disable_padding_; int max_padding_bitrate_; int encoder_min_bitrate_bps_; uint32_t encoder_max_bitrate_bps_; uint32_t encoder_target_rate_bps_; double encoder_bitrate_priority_; - bool has_packet_feedback_; VideoStreamEncoderInterface* const video_stream_encoder_; - EncoderRtcpFeedback encoder_feedback_; RtcpBandwidthObserver* const bandwidth_observer_; RtpVideoSenderInterface* const rtp_video_sender_; - // |weak_ptr_| to our self. This is used since we can not call - // |weak_ptr_factory_.GetWeakPtr| from multiple sequences but it is ok to copy - // an existing WeakPtr. - rtc::WeakPtr weak_ptr_; - // |weak_ptr_factory_| must be declared last to make sure all WeakPtr's are - // invalidated before any other members are destroyed. - rtc::WeakPtrFactory weak_ptr_factory_; + rtc::scoped_refptr transport_queue_safety_ = + PendingTaskSafetyFlag::CreateDetached(); // Context for the most recent and last sent video bitrate allocation. Used to // throttle sending of similar bitrate allocations. @@ -196,7 +171,8 @@ class VideoSendStreamImpl : public webrtc::BitrateAllocatorObserver, int64_t last_send_time_ms; }; absl::optional video_bitrate_allocation_context_ - RTC_GUARDED_BY(worker_queue_); + RTC_GUARDED_BY(rtp_transport_queue_); + const absl::optional configured_pacing_factor_; }; } // namespace internal } // namespace webrtc diff --git a/video/video_send_stream_impl_unittest.cc b/video/video_send_stream_impl_unittest.cc index 178d3865b7..30a4aacd92 100644 --- a/video/video_send_stream_impl_unittest.cc +++ b/video/video_send_stream_impl_unittest.cc @@ -10,6 +10,7 @@ #include "video/video_send_stream_impl.h" +#include #include #include @@ -30,6 +31,7 @@ #include "test/mock_transport.h" #include "video/call_stats.h" #include "video/test/mock_video_stream_encoder.h" +#include "video/video_send_stream.h" namespace webrtc { @@ -43,6 +45,8 @@ bool operator==(const BitrateAllocationUpdate& a, namespace internal { namespace { using ::testing::_; +using ::testing::AllOf; +using ::testing::Field; using ::testing::Invoke; using ::testing::NiceMock; using ::testing::Return; @@ -58,8 +62,6 @@ std::string GetAlrProbingExperimentString() { } class MockRtpVideoSender : public RtpVideoSenderInterface { public: - MOCK_METHOD(void, RegisterProcessThread, (ProcessThread*), (override)); - MOCK_METHOD(void, DeRegisterProcessThread, (), (override)); MOCK_METHOD(void, SetActive, (bool), (override)); MOCK_METHOD(void, SetActiveModules, (const std::vector), (override)); MOCK_METHOD(bool, IsActive, (), (override)); @@ -77,11 +79,13 @@ class MockRtpVideoSender : public RtpVideoSenderInterface { OnBitrateAllocationUpdated, (const VideoBitrateAllocation&), (override)); + MOCK_METHOD(void, + OnVideoLayersAllocationUpdated, + (const VideoLayersAllocation&), + (override)); MOCK_METHOD(EncodedImageCallback::Result, OnEncodedImage, - (const EncodedImage&, - const CodecSpecificInfo*, - const RTPFragmentationHeader*), + (const EncodedImage&, const CodecSpecificInfo*), (override)); MOCK_METHOD(void, OnTransportOverheadChanged, (size_t), (override)); MOCK_METHOD(void, @@ -140,17 +144,24 @@ class VideoSendStreamImplTest : public ::testing::Test { int initial_encoder_max_bitrate, double initial_encoder_bitrate_priority, VideoEncoderConfig::ContentType content_type) { + RTC_DCHECK(!test_queue_.IsCurrent()); + EXPECT_CALL(bitrate_allocator_, GetStartBitrate(_)) .WillOnce(Return(123000)); + std::map suspended_ssrcs; std::map suspended_payload_states; - return std::make_unique( - &clock_, &stats_proxy_, &test_queue_, &call_stats_, - &transport_controller_, &bitrate_allocator_, &send_delay_stats_, - &video_stream_encoder_, &event_log_, &config_, + auto ret = std::make_unique( + &clock_, &stats_proxy_, &test_queue_, &transport_controller_, + &bitrate_allocator_, &video_stream_encoder_, &config_, initial_encoder_max_bitrate, initial_encoder_bitrate_priority, - suspended_ssrcs, suspended_payload_states, content_type, - std::make_unique(&clock_)); + content_type, &rtp_video_sender_); + + // The call to GetStartBitrate() executes asynchronously on the tq. + test_queue_.WaitForPreviouslyPostedTasks(); + testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); + + return ret; } protected: @@ -174,22 +185,22 @@ class VideoSendStreamImplTest : public ::testing::Test { }; TEST_F(VideoSendStreamImplTest, RegistersAsBitrateObserverOnStart) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) + .WillOnce(Invoke( + [&](BitrateAllocatorObserver*, MediaStreamAllocationConfig config) { + EXPECT_EQ(config.min_bitrate_bps, 0u); + EXPECT_EQ(config.max_bitrate_bps, kDefaultInitialBitrateBps); + EXPECT_EQ(config.pad_up_bitrate_bps, 0u); + EXPECT_EQ(config.enforce_min_bitrate, !kSuspend); + EXPECT_EQ(config.bitrate_priority, kDefaultBitratePriority); + })); test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); - EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) - .WillOnce(Invoke([&](BitrateAllocatorObserver*, - MediaStreamAllocationConfig config) { - EXPECT_EQ(config.min_bitrate_bps, 0u); - EXPECT_EQ(config.max_bitrate_bps, kDefaultInitialBitrateBps); - EXPECT_EQ(config.pad_up_bitrate_bps, 0u); - EXPECT_EQ(config.enforce_min_bitrate, !kSuspend); - EXPECT_EQ(config.bitrate_priority, kDefaultBitratePriority); - })); + [&] { vss_impl->Start(); EXPECT_CALL(bitrate_allocator_, RemoveObserver(vss_impl.get())) .Times(1); @@ -199,15 +210,16 @@ TEST_F(VideoSendStreamImplTest, RegistersAsBitrateObserverOnStart) { } TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChange) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + 1); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); + test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); // QVGA + VGA configuration matching defaults in @@ -264,16 +276,16 @@ TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChange) { } TEST_F(VideoSendStreamImplTest, UpdatesObserverOnConfigurationChangeWithAlr) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + 1); + config_.periodic_alr_bandwidth_probing = true; + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - config_.periodic_alr_bandwidth_probing = true; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); // Simulcast screenshare. @@ -336,11 +348,12 @@ TEST_F(VideoSendStreamImplTest, test::ScopedFieldTrials hysteresis_experiment( "WebRTC-VideoRateControl/video_hysteresis:1.25/"); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); + test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); // 2-layer video simulcast. @@ -396,17 +409,17 @@ TEST_F(VideoSendStreamImplTest, TEST_F(VideoSendStreamImplTest, SetsScreensharePacingFactorWithFeedback) { test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString()); + constexpr int kId = 1; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + kId); + EXPECT_CALL(transport_controller_, + SetPacingFactor(kAlrProbingExperimentPaceMultiplier)) + .Times(1); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - constexpr int kId = 1; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, kId); - EXPECT_CALL(transport_controller_, - SetPacingFactor(kAlrProbingExperimentPaceMultiplier)) - .Times(1); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); vss_impl->Stop(); }, @@ -415,12 +428,12 @@ TEST_F(VideoSendStreamImplTest, SetsScreensharePacingFactorWithFeedback) { TEST_F(VideoSendStreamImplTest, DoesNotSetPacingFactorWithoutFeedback) { test::ScopedFieldTrials alr_experiment(GetAlrProbingExperimentString()); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { + [&] { EXPECT_CALL(transport_controller_, SetPacingFactor(_)).Times(0); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); vss_impl->Start(); vss_impl->Stop(); }, @@ -428,15 +441,16 @@ TEST_F(VideoSendStreamImplTest, DoesNotSetPacingFactorWithoutFeedback) { } TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { + [&] { EXPECT_CALL(transport_controller_, SetPacingFactor(_)).Times(0); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + VideoStreamEncoderInterface::EncoderSink* const sink = + static_cast( + vss_impl.get()); vss_impl->Start(); - VideoBitrateAllocationObserver* const observer = - static_cast(vss_impl.get()); // Populate a test instance of video bitrate allocation. VideoBitrateAllocation alloc; @@ -448,7 +462,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { // Encoder starts out paused, don't forward allocation. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); // Unpause encoder, allocation should be passed through. const uint32_t kBitrateBps = 100000; @@ -459,7 +473,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { ->OnBitrateUpdated(CreateAllocation(kBitrateBps)); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); // Pause encoder again, and block allocations. EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps()) @@ -469,7 +483,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { ->OnBitrateUpdated(CreateAllocation(0)); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); vss_impl->Stop(); }, @@ -477,11 +491,11 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationWhenEnabled) { } TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); // Unpause encoder, to allows allocations to be passed through. const uint32_t kBitrateBps = 100000; @@ -490,8 +504,9 @@ TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { .WillOnce(Return(kBitrateBps)); static_cast(vss_impl.get()) ->OnBitrateUpdated(CreateAllocation(kBitrateBps)); - VideoBitrateAllocationObserver* const observer = - static_cast(vss_impl.get()); + VideoStreamEncoderInterface::EncoderSink* const sink = + static_cast( + vss_impl.get()); // Populate a test instance of video bitrate allocation. VideoBitrateAllocation alloc; @@ -503,7 +518,7 @@ TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { // Initial value. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); VideoBitrateAllocation updated_alloc = alloc; // Needs 10% increase in bitrate to trigger immediate forward. @@ -513,22 +528,22 @@ TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { // Too small increase, don't forward. updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps - 1); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(_)).Times(0); - observer->OnBitrateAllocationUpdated(updated_alloc); + sink->OnBitrateAllocationUpdated(updated_alloc); // Large enough increase, do forward. updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(updated_alloc); + sink->OnBitrateAllocationUpdated(updated_alloc); - // This is now a decrease compared to last forward allocation, forward - // immediately. + // This is now a decrease compared to last forward allocation, + // forward immediately. updated_alloc.SetBitrate(0, 0, base_layer_min_update_bitrate_bps - 1); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(updated_alloc); + sink->OnBitrateAllocationUpdated(updated_alloc); vss_impl->Stop(); }, @@ -536,11 +551,11 @@ TEST_F(VideoSendStreamImplTest, ThrottlesVideoBitrateAllocationWhenTooSimilar) { } TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); // Unpause encoder, to allows allocations to be passed through. const uint32_t kBitrateBps = 100000; @@ -549,8 +564,9 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { .WillOnce(Return(kBitrateBps)); static_cast(vss_impl.get()) ->OnBitrateUpdated(CreateAllocation(kBitrateBps)); - VideoBitrateAllocationObserver* const observer = - static_cast(vss_impl.get()); + VideoStreamEncoderInterface::EncoderSink* const sink = + static_cast( + vss_impl.get()); // Populate a test instance of video bitrate allocation. VideoBitrateAllocation alloc; @@ -562,10 +578,10 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { // Initial value. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); - // Move some bitrate from one layer to a new one, but keep sum the same. - // Since layout has changed, immediately trigger forward. + // Move some bitrate from one layer to a new one, but keep sum the + // same. Since layout has changed, immediately trigger forward. VideoBitrateAllocation updated_alloc = alloc; updated_alloc.SetBitrate(2, 0, 10000); updated_alloc.SetBitrate(1, 1, alloc.GetBitrate(1, 1) - 10000); @@ -573,7 +589,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(updated_alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(updated_alloc); + sink->OnBitrateAllocationUpdated(updated_alloc); vss_impl->Stop(); }, @@ -581,11 +597,11 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationOnLayerChange) { } TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kScreen); test_queue_.SendTask( - [this] { - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); + [&] { vss_impl->Start(); const uint32_t kBitrateBps = 100000; // Unpause encoder, to allows allocations to be passed through. @@ -594,8 +610,9 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { .WillRepeatedly(Return(kBitrateBps)); static_cast(vss_impl.get()) ->OnBitrateUpdated(CreateAllocation(kBitrateBps)); - VideoBitrateAllocationObserver* const observer = - static_cast(vss_impl.get()); + VideoStreamEncoderInterface::EncoderSink* const sink = + static_cast( + vss_impl.get()); // Populate a test instance of video bitrate allocation. VideoBitrateAllocation alloc; @@ -606,7 +623,7 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { EncodedImage encoded_image; CodecSpecificInfo codec_specific; - EXPECT_CALL(rtp_video_sender_, OnEncodedImage(_, _, _)) + EXPECT_CALL(rtp_video_sender_, OnEncodedImage) .WillRepeatedly(Return(EncodedImageCallback::Result( EncodedImageCallback::Result::OK))); @@ -617,30 +634,31 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { // Initial value. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); } { // Sending same allocation again, this one should be throttled. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); } clock_.AdvanceTimeMicroseconds(kMaxVbaThrottleTimeMs * 1000); { - // Sending similar allocation again after timeout, should forward. + // Sending similar allocation again after timeout, should + // forward. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); } { // Sending similar allocation again without timeout, throttle. EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); - observer->OnBitrateAllocationUpdated(alloc); + sink->OnBitrateAllocationUpdated(alloc); } { @@ -648,27 +666,27 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); static_cast(vss_impl.get()) - ->OnEncodedImage(encoded_image, &codec_specific, nullptr); + ->OnEncodedImage(encoded_image, &codec_specific); } { - // Advance time and send encoded image, this should wake up and send - // cached bitrate allocation. + // Advance time and send encoded image, this should wake up and + // send cached bitrate allocation. clock_.AdvanceTimeMicroseconds(kMaxVbaThrottleTimeMs * 1000); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(1); static_cast(vss_impl.get()) - ->OnEncodedImage(encoded_image, &codec_specific, nullptr); + ->OnEncodedImage(encoded_image, &codec_specific); } { - // Advance time and send encoded image, there should be no cached - // allocation to send. + // Advance time and send encoded image, there should be no + // cached allocation to send. clock_.AdvanceTimeMicroseconds(kMaxVbaThrottleTimeMs * 1000); EXPECT_CALL(rtp_video_sender_, OnBitrateAllocationUpdated(alloc)) .Times(0); static_cast(vss_impl.get()) - ->OnEncodedImage(encoded_image, &codec_specific, nullptr); + ->OnEncodedImage(encoded_image, &codec_specific); } vss_impl->Stop(); @@ -677,15 +695,15 @@ TEST_F(VideoSendStreamImplTest, ForwardsVideoBitrateAllocationAfterTimeout) { } TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back(RtpExtension::kTransportSequenceNumberUri, + 1); + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); + [&] { vss_impl->Start(); VideoStream qvga_stream; @@ -724,8 +742,8 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { static_cast(vss_impl.get()) ->OnBitrateUpdated(update); - // Test allocation where the link allocation is larger than the target, - // meaning we have some headroom on the link. + // Test allocation where the link allocation is larger than the + // target, meaning we have some headroom on the link. const DataRate qvga_max_bitrate = DataRate::BitsPerSec(qvga_stream.max_bitrate_bps); const DataRate headroom = DataRate::BitsPerSec(50000); @@ -741,8 +759,8 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { static_cast(vss_impl.get()) ->OnBitrateUpdated(update); - // Add protection bitrate to the mix, this should be subtracted from the - // headroom. + // Add protection bitrate to the mix, this should be subtracted + // from the headroom. const uint32_t protection_bitrate_bps = 10000; EXPECT_CALL(rtp_video_sender_, GetProtectionBitrateBps()) .WillOnce(Return(protection_bitrate_bps)); @@ -782,14 +800,11 @@ TEST_F(VideoSendStreamImplTest, CallsVideoStreamEncoderOnBitrateUpdate) { TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { int padding_bitrate = 0; - std::unique_ptr vss_impl; - + std::unique_ptr vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( [&] { - vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); - // Capture padding bitrate for testing. EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) .WillRepeatedly(Invoke([&](BitrateAllocatorObserver*, @@ -801,7 +816,7 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { .WillRepeatedly(Invoke( [&](BitrateAllocatorObserver*) { padding_bitrate = 0; })); - EXPECT_CALL(rtp_video_sender_, OnEncodedImage(_, _, _)) + EXPECT_CALL(rtp_video_sender_, OnEncodedImage) .WillRepeatedly(Return(EncodedImageCallback::Result( EncodedImageCallback::Result::OK))); const bool kSuspend = false; @@ -849,7 +864,7 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { EncodedImage encoded_image; CodecSpecificInfo codec_specific; static_cast(vss_impl.get()) - ->OnEncodedImage(encoded_image, &codec_specific, nullptr); + ->OnEncodedImage(encoded_image, &codec_specific); // Only after actual frame is encoded are we enabling the padding. EXPECT_GT(padding_bitrate, 0); }, @@ -862,7 +877,6 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { EXPECT_EQ(0, padding_bitrate); testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); vss_impl->Stop(); - vss_impl.reset(); done.Set(); }, 5000); @@ -872,12 +886,11 @@ TEST_F(VideoSendStreamImplTest, DisablesPaddingOnPausedEncoder) { } TEST_F(VideoSendStreamImplTest, KeepAliveOnDroppedFrame) { - std::unique_ptr vss_impl; + std::unique_ptr vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + VideoEncoderConfig::ContentType::kRealtimeVideo); test_queue_.SendTask( [&] { - vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kRealtimeVideo); vss_impl->Start(); const uint32_t kBitrateBps = 100000; EXPECT_CALL(rtp_video_sender_, GetPayloadBitrateBps()) @@ -900,119 +913,128 @@ TEST_F(VideoSendStreamImplTest, KeepAliveOnDroppedFrame) { [&] { testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); vss_impl->Stop(); - vss_impl.reset(); done.Set(); }, 2000); ASSERT_TRUE(done.Wait(5000)); } -TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvcWithAlr) { - test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - config_.periodic_alr_bandwidth_probing = true; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); - vss_impl->Start(); - - // Svc - VideoStream stream; - stream.width = 1920; - stream.height = 1080; - stream.max_framerate = 30; - stream.min_bitrate_bps = 60000; - stream.target_bitrate_bps = 6000000; - stream.max_bitrate_bps = 1250000; - stream.num_temporal_layers = 2; - stream.max_qp = 56; - stream.bitrate_priority = 1; - - int min_transmit_bitrate_bps = 400000; - - config_.rtp.ssrcs.emplace_back(1); - config_.rtp.ssrcs.emplace_back(2); - - EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) - .WillRepeatedly(Invoke([&](BitrateAllocatorObserver*, - MediaStreamAllocationConfig config) { - EXPECT_EQ(config.min_bitrate_bps, - static_cast(stream.min_bitrate_bps)); - EXPECT_EQ(config.max_bitrate_bps, - static_cast(stream.max_bitrate_bps)); - if (config.pad_up_bitrate_bps != 0) { - EXPECT_EQ(config.pad_up_bitrate_bps, - static_cast(min_transmit_bitrate_bps)); - } - EXPECT_EQ(config.enforce_min_bitrate, !kSuspend); - })); - - static_cast(vss_impl.get()) - ->OnEncoderConfigurationChanged( - std::vector{stream}, true, - VideoEncoderConfig::ContentType::kScreen, - min_transmit_bitrate_bps); - vss_impl->Stop(); - }, - RTC_FROM_HERE); -} - -TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvcNoAlr) { - test_queue_.SendTask( - [this] { - const bool kSuspend = false; - config_.suspend_below_min_bitrate = kSuspend; - config_.rtp.extensions.emplace_back( - RtpExtension::kTransportSequenceNumberUri, 1); - config_.periodic_alr_bandwidth_probing = false; - auto vss_impl = CreateVideoSendStreamImpl( - kDefaultInitialBitrateBps, kDefaultBitratePriority, - VideoEncoderConfig::ContentType::kScreen); - vss_impl->Start(); - - // Svc - VideoStream stream; - stream.width = 1920; - stream.height = 1080; - stream.max_framerate = 30; - stream.min_bitrate_bps = 60000; - stream.target_bitrate_bps = 6000000; - stream.max_bitrate_bps = 1250000; - stream.num_temporal_layers = 2; - stream.max_qp = 56; - stream.bitrate_priority = 1; - - int min_transmit_bitrate_bps = 400000; - - config_.rtp.ssrcs.emplace_back(1); - config_.rtp.ssrcs.emplace_back(2); +TEST_F(VideoSendStreamImplTest, ConfiguresBitratesForSvc) { + struct TestConfig { + bool screenshare = false; + bool alr = false; + int min_padding_bitrate_bps = 0; + }; + + std::vector test_variants; + for (bool screenshare : {false, true}) { + for (bool alr : {false, true}) { + for (int min_padding : {0, 400000}) { + test_variants.push_back({screenshare, alr, min_padding}); + } + } + } - EXPECT_CALL(bitrate_allocator_, AddObserver(vss_impl.get(), _)) - .WillRepeatedly(Invoke([&](BitrateAllocatorObserver*, - MediaStreamAllocationConfig config) { - EXPECT_EQ(config.min_bitrate_bps, - static_cast(stream.min_bitrate_bps)); - EXPECT_EQ(config.max_bitrate_bps, - static_cast(stream.max_bitrate_bps)); - if (config.pad_up_bitrate_bps != 0) { - EXPECT_EQ(config.pad_up_bitrate_bps, - static_cast(stream.target_bitrate_bps)); - } - EXPECT_EQ(config.enforce_min_bitrate, !kSuspend); - })); + for (const TestConfig& test_config : test_variants) { + const bool kSuspend = false; + config_.suspend_below_min_bitrate = kSuspend; + config_.rtp.extensions.emplace_back( + RtpExtension::kTransportSequenceNumberUri, 1); + config_.periodic_alr_bandwidth_probing = test_config.alr; + auto vss_impl = CreateVideoSendStreamImpl( + kDefaultInitialBitrateBps, kDefaultBitratePriority, + test_config.screenshare + ? VideoEncoderConfig::ContentType::kScreen + : VideoEncoderConfig::ContentType::kRealtimeVideo); + test_queue_.SendTask( + [&] { + vss_impl->Start(); + + // Svc + VideoStream stream; + stream.width = 1920; + stream.height = 1080; + stream.max_framerate = 30; + stream.min_bitrate_bps = 60000; + stream.target_bitrate_bps = 6000000; + stream.max_bitrate_bps = 1250000; + stream.num_temporal_layers = 2; + stream.max_qp = 56; + stream.bitrate_priority = 1; + + config_.rtp.ssrcs.emplace_back(1); + config_.rtp.ssrcs.emplace_back(2); + + EXPECT_CALL( + bitrate_allocator_, + AddObserver( + vss_impl.get(), + AllOf(Field(&MediaStreamAllocationConfig::min_bitrate_bps, + static_cast(stream.min_bitrate_bps)), + Field(&MediaStreamAllocationConfig::max_bitrate_bps, + static_cast(stream.max_bitrate_bps)), + // Stream not yet active - no padding. + Field(&MediaStreamAllocationConfig::pad_up_bitrate_bps, + 0u), + Field(&MediaStreamAllocationConfig::enforce_min_bitrate, + !kSuspend)))); + + static_cast(vss_impl.get()) + ->OnEncoderConfigurationChanged( + std::vector{stream}, true, + test_config.screenshare + ? VideoEncoderConfig::ContentType::kScreen + : VideoEncoderConfig::ContentType::kRealtimeVideo, + test_config.min_padding_bitrate_bps); + ::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); + + // Simulate an encoded image, this will turn the stream active and + // enable padding. + EncodedImage encoded_image; + CodecSpecificInfo codec_specific; + EXPECT_CALL(rtp_video_sender_, OnEncodedImage) + .WillRepeatedly(Return(EncodedImageCallback::Result( + EncodedImageCallback::Result::OK))); + + // Screensharing implicitly forces ALR. + const bool using_alr = test_config.alr || test_config.screenshare; + // If ALR is used, pads only to min bitrate as rampup is handled by + // probing. Otherwise target_bitrate contains the padding target. + const RateControlSettings trials = + RateControlSettings::ParseFromFieldTrials(); + int expected_padding = + using_alr + ? stream.min_bitrate_bps + : static_cast(stream.target_bitrate_bps * + trials.GetSimulcastHysteresisFactor( + test_config.screenshare + ? VideoCodecMode::kScreensharing + : VideoCodecMode::kRealtimeVideo)); + // Min padding bitrate may override padding target. + expected_padding = + std::max(expected_padding, test_config.min_padding_bitrate_bps); + EXPECT_CALL( + bitrate_allocator_, + AddObserver( + vss_impl.get(), + AllOf(Field(&MediaStreamAllocationConfig::min_bitrate_bps, + static_cast(stream.min_bitrate_bps)), + Field(&MediaStreamAllocationConfig::max_bitrate_bps, + static_cast(stream.max_bitrate_bps)), + // Stream now active - min bitrate use as padding target + // when ALR is active. + Field(&MediaStreamAllocationConfig::pad_up_bitrate_bps, + expected_padding), + Field(&MediaStreamAllocationConfig::enforce_min_bitrate, + !kSuspend)))); + static_cast(vss_impl.get()) + ->OnEncodedImage(encoded_image, &codec_specific); + ::testing::Mock::VerifyAndClearExpectations(&bitrate_allocator_); - static_cast(vss_impl.get()) - ->OnEncoderConfigurationChanged( - std::vector{stream}, true, - VideoEncoderConfig::ContentType::kScreen, - min_transmit_bitrate_bps); - vss_impl->Stop(); - }, - RTC_FROM_HERE); + vss_impl->Stop(); + }, + RTC_FROM_HERE); + } } } // namespace internal } // namespace webrtc diff --git a/video/video_send_stream_tests.cc b/video/video_send_stream_tests.cc index 63acb80b8d..42963cb8ee 100644 --- a/video/video_send_stream_tests.cc +++ b/video/video_send_stream_tests.cc @@ -12,6 +12,7 @@ #include #include "absl/algorithm/container.h" +#include "api/sequence_checker.h" #include "api/task_queue/default_task_queue_factory.h" #include "api/task_queue/task_queue_base.h" #include "api/test/simulated_network.h" @@ -25,21 +26,21 @@ #include "call/simulated_network.h" #include "call/video_send_stream.h" #include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "modules/rtp_rtcp/include/rtp_rtcp.h" #include "modules/rtp_rtcp/source/rtcp_sender.h" #include "modules/rtp_rtcp/source/rtp_header_extensions.h" #include "modules/rtp_rtcp/source/rtp_packet.h" +#include "modules/rtp_rtcp/source/rtp_rtcp_impl2.h" +#include "modules/rtp_rtcp/source/rtp_util.h" #include "modules/rtp_rtcp/source/video_rtp_depacketizer_vp9.h" #include "modules/video_coding/codecs/vp8/include/vp8.h" #include "modules/video_coding/codecs/vp9/include/vp9.h" #include "rtc_base/checks.h" -#include "rtc_base/critical_section.h" #include "rtc_base/event.h" #include "rtc_base/experiments/alr_experiment.h" #include "rtc_base/logging.h" #include "rtc_base/platform_thread.h" #include "rtc_base/rate_limiter.h" -#include "rtc_base/synchronization/sequence_checker.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue_for_test.h" #include "rtc_base/task_utils/to_queued_task.h" #include "rtc_base/time_utils.h" @@ -57,7 +58,6 @@ #include "test/gtest.h" #include "test/null_transport.h" #include "test/rtcp_packet_parser.h" -#include "test/rtp_header_parser.h" #include "test/testsupport/perf_test.h" #include "test/video_encoder_proxy_factory.h" #include "video/send_statistics_proxy.h" @@ -90,6 +90,9 @@ enum : int { // The first valid value is 1. kVideoTimingExtensionId, }; +// Readability convenience enum for `WaitBitrateChanged()`. +enum class WaitUntil : bool { kZero = false, kNonZero = true }; + constexpr int64_t kRtcpIntervalMs = 1000; enum VideoFormat { @@ -948,10 +951,10 @@ void VideoSendStreamTest::TestNackRetransmission( non_padding_sequence_numbers_.end() - kNackedPacketsAtOnceCount, non_padding_sequence_numbers_.end()); - RtpRtcp::Configuration config; + RTCPSender::Configuration config; config.clock = Clock::GetRealTimeClock(); config.outgoing_transport = transport_adapter_.get(); - config.rtcp_report_interval_ms = kRtcpIntervalMs; + config.rtcp_report_interval = TimeDelta::Millis(kRtcpIntervalMs); config.local_media_ssrc = kReceiverLocalVideoSsrc; RTCPSender rtcp_sender(config); @@ -1140,7 +1143,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format, fec_packet_received_ = false; ++current_size_rtp_; - rtc::CritScope lock(&mutex_); + MutexLock lock(&mutex_); ++current_size_frame_; } } @@ -1164,11 +1167,11 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format, kVideoSendSsrcs[0], rtp_packet.SequenceNumber(), packets_lost_, // Cumulative lost. loss_ratio); // Loss percent. - RtpRtcp::Configuration config; + RTCPSender::Configuration config; config.clock = Clock::GetRealTimeClock(); config.receive_statistics = &lossy_receive_stats; config.outgoing_transport = transport_adapter_.get(); - config.rtcp_report_interval_ms = kRtcpIntervalMs; + config.rtcp_report_interval = TimeDelta::Millis(kRtcpIntervalMs); config.local_media_ssrc = kVideoSendSsrcs[0]; RTCPSender rtcp_sender(config); @@ -1182,7 +1185,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format, } void UpdateConfiguration() { - rtc::CritScope lock(&mutex_); + MutexLock lock(&mutex_); // Increase frame size for next encoded frame, in the context of the // encoder thread. if (!use_fec_ && current_size_frame_ < static_cast(stop_size_)) { @@ -1247,7 +1250,7 @@ void VideoSendStreamTest::TestPacketFragmentationSize(VideoFormat format, bool fec_packet_received_; size_t current_size_rtp_; - rtc::CriticalSection mutex_; + Mutex mutex_; int current_size_frame_ RTC_GUARDED_BY(mutex_); }; @@ -1276,180 +1279,6 @@ TEST_F(VideoSendStreamTest, FragmentsVp8AccordingToMaxPacketSizeWithFec) { TestPacketFragmentationSize(kVP8, true); } -// The test will go through a number of phases. -// 1. Start sending packets. -// 2. As soon as the RTP stream has been detected, signal a low REMB value to -// suspend the stream. -// 3. Wait until |kSuspendTimeFrames| have been captured without seeing any RTP -// packets. -// 4. Signal a high REMB and then wait for the RTP stream to start again. -// When the stream is detected again, and the stats show that the stream -// is no longer suspended, the test ends. -TEST_F(VideoSendStreamTest, SuspendBelowMinBitrate) { - static const int kSuspendTimeFrames = 60; // Suspend for 2 seconds @ 30 fps. - - class RembObserver : public test::SendTest { - public: - class CaptureObserver : public rtc::VideoSinkInterface { - public: - explicit CaptureObserver(RembObserver* remb_observer) - : remb_observer_(remb_observer) {} - - void OnFrame(const VideoFrame&) { - rtc::CritScope lock(&remb_observer_->crit_); - if (remb_observer_->test_state_ == kDuringSuspend && - ++remb_observer_->suspended_frame_count_ > kSuspendTimeFrames) { - VideoSendStream::Stats stats = remb_observer_->stream_->GetStats(); - EXPECT_TRUE(stats.suspended); - remb_observer_->SendRtcpFeedback(remb_observer_->high_remb_bps_); - remb_observer_->test_state_ = kWaitingForPacket; - } - } - - private: - RembObserver* const remb_observer_; - }; - - RembObserver() - : SendTest(kDefaultTimeoutMs), - clock_(Clock::GetRealTimeClock()), - capture_observer_(this), - stream_(nullptr), - test_state_(kBeforeSuspend), - rtp_count_(0), - last_sequence_number_(0), - suspended_frame_count_(0), - low_remb_bps_(0), - high_remb_bps_(0) {} - - private: - Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); - ++rtp_count_; - RtpPacket rtp_packet; - EXPECT_TRUE(rtp_packet.Parse(packet, length)); - last_sequence_number_ = rtp_packet.SequenceNumber(); - - if (test_state_ == kBeforeSuspend) { - // The stream has started. Try to suspend it. - SendRtcpFeedback(low_remb_bps_); - test_state_ = kDuringSuspend; - } else if (test_state_ == kDuringSuspend) { - if (rtp_packet.padding_size() == 0) { - // Received non-padding packet during suspension period. Reset the - // counter. - suspended_frame_count_ = 0; - } - SendRtcpFeedback(0); // REMB is only sent if value is > 0. - } else if (test_state_ == kWaitingForPacket) { - if (rtp_packet.padding_size() == 0) { - // Non-padding packet observed. Test is almost complete. Will just - // have to wait for the stats to change. - test_state_ = kWaitingForStats; - } - SendRtcpFeedback(0); // REMB is only sent if value is > 0. - } else if (test_state_ == kWaitingForStats) { - VideoSendStream::Stats stats = stream_->GetStats(); - if (stats.suspended == false) { - // Stats flipped to false. Test is complete. - observation_complete_.Set(); - } - SendRtcpFeedback(0); // REMB is only sent if value is > 0. - } - - return SEND_PACKET; - } - - void set_low_remb_bps(int value) { - rtc::CritScope lock(&crit_); - low_remb_bps_ = value; - } - - void set_high_remb_bps(int value) { - rtc::CritScope lock(&crit_); - high_remb_bps_ = value; - } - - void OnVideoStreamsCreated( - VideoSendStream* send_stream, - const std::vector& receive_streams) override { - stream_ = send_stream; - } - - void OnFrameGeneratorCapturerCreated( - test::FrameGeneratorCapturer* frame_generator_capturer) override { - frame_generator_capturer->AddOrUpdateSink(&capture_observer_, - rtc::VideoSinkWants()); - } - - void ModifyVideoConfigs( - VideoSendStream::Config* send_config, - std::vector* receive_configs, - VideoEncoderConfig* encoder_config) override { - RTC_DCHECK_EQ(1, encoder_config->number_of_streams); - transport_adapter_.reset( - new internal::TransportAdapter(send_config->send_transport)); - transport_adapter_->Enable(); - send_config->rtp.nack.rtp_history_ms = kNackRtpHistoryMs; - send_config->suspend_below_min_bitrate = true; - int min_bitrate_bps = - test::DefaultVideoStreamFactory::kDefaultMinBitratePerStream[0]; - set_low_remb_bps(min_bitrate_bps - 10000); - int threshold_window = std::max(min_bitrate_bps / 10, 20000); - ASSERT_GT(encoder_config->max_bitrate_bps, - min_bitrate_bps + threshold_window + 5000); - set_high_remb_bps(min_bitrate_bps + threshold_window + 5000); - } - - void PerformTest() override { - EXPECT_TRUE(Wait()) << "Timed out during suspend-below-min-bitrate test."; - } - - enum TestState { - kBeforeSuspend, - kDuringSuspend, - kWaitingForPacket, - kWaitingForStats - }; - - virtual void SendRtcpFeedback(int remb_value) - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) { - FakeReceiveStatistics receive_stats(kVideoSendSsrcs[0], - last_sequence_number_, rtp_count_, 0); - RtpRtcp::Configuration config; - config.clock = clock_; - config.receive_statistics = &receive_stats; - config.outgoing_transport = transport_adapter_.get(); - config.rtcp_report_interval_ms = kRtcpIntervalMs; - config.local_media_ssrc = kVideoSendSsrcs[0]; - RTCPSender rtcp_sender(config); - - rtcp_sender.SetRTCPStatus(RtcpMode::kReducedSize); - rtcp_sender.SetRemoteSSRC(kVideoSendSsrcs[0]); - if (remb_value > 0) { - rtcp_sender.SetRemb(remb_value, std::vector()); - } - RTCPSender::FeedbackState feedback_state; - EXPECT_EQ(0, rtcp_sender.SendRTCP(feedback_state, kRtcpRr)); - } - - std::unique_ptr transport_adapter_; - Clock* const clock_; - CaptureObserver capture_observer_; - VideoSendStream* stream_; - - rtc::CriticalSection crit_; - TestState test_state_ RTC_GUARDED_BY(crit_); - int rtp_count_ RTC_GUARDED_BY(crit_); - int last_sequence_number_ RTC_GUARDED_BY(crit_); - int suspended_frame_count_ RTC_GUARDED_BY(crit_); - int low_remb_bps_ RTC_GUARDED_BY(crit_); - int high_remb_bps_ RTC_GUARDED_BY(crit_); - } test; - - RunBaseTest(&test); -} - // This test that padding stops being send after a while if the Camera stops // producing video frames and that padding resumes if the camera restarts. TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) { @@ -1462,7 +1291,7 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); last_packet_time_ms_ = clock_->TimeInMilliseconds(); RtpPacket rtp_packet; @@ -1490,7 +1319,7 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) { } Action OnSendRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); const int kNoPacketsThresholdMs = 2000; if (test_state_ == kWaitingForNoPackets && (last_packet_time_ms_ && @@ -1513,7 +1342,7 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) { void OnFrameGeneratorCapturerCreated( test::FrameGeneratorCapturer* frame_generator_capturer) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); capturer_ = frame_generator_capturer; } @@ -1532,9 +1361,9 @@ TEST_F(VideoSendStreamTest, NoPaddingWhenVideoIsMuted) { TestState test_state_ = kBeforeStopCapture; Clock* const clock_; - rtc::CriticalSection crit_; - absl::optional last_packet_time_ms_ RTC_GUARDED_BY(crit_); - test::FrameGeneratorCapturer* capturer_ RTC_GUARDED_BY(crit_); + Mutex mutex_; + absl::optional last_packet_time_ms_ RTC_GUARDED_BY(mutex_); + test::FrameGeneratorCapturer* capturer_ RTC_GUARDED_BY(mutex_); } test; RunBaseTest(&test); @@ -1557,7 +1386,7 @@ TEST_F(VideoSendStreamTest, PaddingIsPrimarilyRetransmissions) { } Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; rtp_packet.Parse(packet, length); @@ -1597,16 +1426,16 @@ TEST_F(VideoSendStreamTest, PaddingIsPrimarilyRetransmissions) { // rid of this. SleepMs(5000); { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Expect padding to be a small percentage of total bytes sent. EXPECT_LT(padding_length_, .1 * total_length_); } } - rtc::CriticalSection crit_; + Mutex mutex_; Clock* const clock_; - size_t padding_length_ RTC_GUARDED_BY(crit_); - size_t total_length_ RTC_GUARDED_BY(crit_); + size_t padding_length_ RTC_GUARDED_BY(mutex_); + size_t total_length_ RTC_GUARDED_BY(mutex_); Call* call_; } test; @@ -1641,14 +1470,16 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - if (RtpHeaderParser::IsRtcp(packet, length)) + if (IsRtcpPacket(rtc::MakeArrayView(packet, length))) return DROP_PACKET; RtpPacket rtp_packet; if (!rtp_packet.Parse(packet, length)) return DROP_PACKET; RTC_DCHECK(stream_); - VideoSendStream::Stats stats = stream_->GetStats(); + VideoSendStream::Stats stats; + SendTask(RTC_FROM_HERE, task_queue_, + [&]() { stats = stream_->GetStats(); }); if (!stats.substreams.empty()) { EXPECT_EQ(1u, stats.substreams.size()); int total_bitrate_bps = @@ -1658,7 +1489,6 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) { "bps", false); if (total_bitrate_bps > kHighBitrateBps) { rtp_rtcp_->SetRemb(kRembBitrateBps, {rtp_packet.Ssrc()}); - rtp_rtcp_->Process(); bitrate_capped_ = true; } else if (bitrate_capped_ && total_bitrate_bps < kRembRespectedBitrateBps) { @@ -1673,11 +1503,11 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) { VideoSendStream* send_stream, const std::vector& receive_streams) override { stream_ = send_stream; - RtpRtcp::Configuration config; + RtpRtcpInterface::Configuration config; config.clock = Clock::GetRealTimeClock(); config.outgoing_transport = feedback_transport_.get(); config.retransmission_rate_limiter = &retranmission_rate_limiter_; - rtp_rtcp_ = RtpRtcp::Create(config); + rtp_rtcp_ = ModuleRtpRtcpImpl2::Create(config); rtp_rtcp_->SetRTCPStatus(RtcpMode::kReducedSize); } @@ -1697,7 +1527,7 @@ TEST_F(VideoSendStreamTest, MinTransmitBitrateRespectsRemb) { } TaskQueueBase* const task_queue_; - std::unique_ptr rtp_rtcp_; + std::unique_ptr rtp_rtcp_; std::unique_ptr feedback_transport_; RateLimiter retranmission_rate_limiter_; VideoSendStream* stream_; @@ -1946,7 +1776,7 @@ TEST_F(VideoSendStreamTest, ChangingTransportOverhead) { Action OnSendRtp(const uint8_t* packet, size_t length) override { EXPECT_LE(length, kMaxRtpPacketSize); - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (++packets_sent_ < 100) return SEND_PACKET; observation_complete_.Set(); @@ -1970,7 +1800,7 @@ TEST_F(VideoSendStreamTest, ChangingTransportOverhead) { EXPECT_TRUE(Wait()); { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); packets_sent_ = 0; } @@ -1986,7 +1816,7 @@ TEST_F(VideoSendStreamTest, ChangingTransportOverhead) { private: TaskQueueBase* const task_queue_; Call* call_; - rtc::CriticalSection lock_; + Mutex lock_; int packets_sent_ RTC_GUARDED_BY(lock_); int transport_overhead_; const size_t kMaxRtpPacketSize = 1000; @@ -2156,13 +1986,12 @@ TEST_F(VideoSendStreamTest, public: EncoderObserver() : FakeEncoder(Clock::GetRealTimeClock()), - number_of_initializations_(0), last_initialized_frame_width_(0), last_initialized_frame_height_(0) {} void WaitForResolution(int width, int height) { { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (last_initialized_frame_width_ == width && last_initialized_frame_height_ == height) { return; @@ -2171,7 +2000,7 @@ TEST_F(VideoSendStreamTest, EXPECT_TRUE( init_encode_called_.Wait(VideoSendStreamTest::kDefaultTimeoutMs)); { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); EXPECT_EQ(width, last_initialized_frame_width_); EXPECT_EQ(height, last_initialized_frame_height_); } @@ -2180,10 +2009,9 @@ TEST_F(VideoSendStreamTest, private: int32_t InitEncode(const VideoCodec* config, const Settings& settings) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); last_initialized_frame_width_ = config->width; last_initialized_frame_height_ = config->height; - ++number_of_initializations_; init_encode_called_.Set(); return FakeEncoder::InitEncode(config, settings); } @@ -2195,11 +2023,10 @@ TEST_F(VideoSendStreamTest, return 0; } - rtc::CriticalSection crit_; + Mutex mutex_; rtc::Event init_encode_called_; - size_t number_of_initializations_ RTC_GUARDED_BY(&crit_); - int last_initialized_frame_width_ RTC_GUARDED_BY(&crit_); - int last_initialized_frame_height_ RTC_GUARDED_BY(&crit_); + int last_initialized_frame_width_ RTC_GUARDED_BY(&mutex_); + int last_initialized_frame_height_ RTC_GUARDED_BY(&mutex_); }; test::NullTransport transport; @@ -2238,21 +2065,21 @@ TEST_F(VideoSendStreamTest, CanReconfigureToUseStartBitrateAbovePreviousMax) { : FakeEncoder(Clock::GetRealTimeClock()), start_bitrate_kbps_(0) {} int32_t InitEncode(const VideoCodec* config, const Settings& settings) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); start_bitrate_kbps_ = config->startBitrate; start_bitrate_changed_.Set(); return FakeEncoder::InitEncode(config, settings); } void SetRates(const RateControlParameters& parameters) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); start_bitrate_kbps_ = parameters.bitrate.get_sum_kbps(); start_bitrate_changed_.Set(); FakeEncoder::SetRates(parameters); } int GetStartBitrateKbps() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return start_bitrate_kbps_; } @@ -2262,9 +2089,9 @@ TEST_F(VideoSendStreamTest, CanReconfigureToUseStartBitrateAbovePreviousMax) { } private: - rtc::CriticalSection crit_; + mutable Mutex mutex_; rtc::Event start_bitrate_changed_; - int start_bitrate_kbps_ RTC_GUARDED_BY(crit_); + int start_bitrate_kbps_ RTC_GUARDED_BY(mutex_); }; CreateSenderCall(); @@ -2280,13 +2107,15 @@ TEST_F(VideoSendStreamTest, CanReconfigureToUseStartBitrateAbovePreviousMax) { StartBitrateObserver encoder; test::VideoEncoderProxyFactory encoder_factory(&encoder); - // Since this test does not use a capturer, set |internal_source| = true. - // Encoder configuration is otherwise updated on the next video frame. - encoder_factory.SetHasInternalSource(true); GetVideoSendConfig()->encoder_settings.encoder_factory = &encoder_factory; CreateVideoStreams(); + // Start capturing and encoding frames to force encoder reconfiguration. + CreateFrameGeneratorCapturer(kDefaultFramerate, kDefaultWidth, + kDefaultHeight); + frame_generator_capturer_->Start(); + EXPECT_TRUE(encoder.WaitForStartBitrate()); EXPECT_EQ(GetVideoEncoderConfig()->max_bitrate_bps / 1000, encoder.GetStartBitrateKbps()); @@ -2311,13 +2140,13 @@ class StartStopBitrateObserver : public test::FakeEncoder { StartStopBitrateObserver() : FakeEncoder(Clock::GetRealTimeClock()) {} int32_t InitEncode(const VideoCodec* config, const Settings& settings) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); encoder_init_.Set(); return FakeEncoder::InitEncode(config, settings); } void SetRates(const RateControlParameters& parameters) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); bitrate_kbps_ = parameters.bitrate.get_sum_kbps(); bitrate_changed_.Set(); FakeEncoder::SetRates(parameters); @@ -2327,18 +2156,18 @@ class StartStopBitrateObserver : public test::FakeEncoder { return encoder_init_.Wait(VideoSendStreamTest::kDefaultTimeoutMs); } - bool WaitBitrateChanged(bool non_zero) { + bool WaitBitrateChanged(WaitUntil until) { do { absl::optional bitrate_kbps; { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); bitrate_kbps = bitrate_kbps_; } if (!bitrate_kbps) continue; - if ((non_zero && *bitrate_kbps > 0) || - (!non_zero && *bitrate_kbps == 0)) { + if ((until == WaitUntil::kNonZero && *bitrate_kbps > 0) || + (until == WaitUntil::kZero && *bitrate_kbps == 0)) { return true; } } while (bitrate_changed_.Wait(VideoSendStreamTest::kDefaultTimeoutMs)); @@ -2346,10 +2175,10 @@ class StartStopBitrateObserver : public test::FakeEncoder { } private: - rtc::CriticalSection crit_; + Mutex mutex_; rtc::Event encoder_init_; rtc::Event bitrate_changed_; - absl::optional bitrate_kbps_ RTC_GUARDED_BY(crit_); + absl::optional bitrate_kbps_ RTC_GUARDED_BY(mutex_); }; // This test that if the encoder use an internal source, VideoEncoder::SetRates @@ -2385,15 +2214,15 @@ TEST_F(VideoSendStreamTest, VideoSendStreamStopSetEncoderRateToZero) { SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->Start(); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->Stop(); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(false)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->Start(); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { DestroyStreams(); @@ -2425,6 +2254,8 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) { CreateVideoStreams(); + EXPECT_FALSE(GetVideoSendStream()->started()); + // Inject a frame, to force encoder creation. GetVideoSendStream()->Start(); GetVideoSendStream()->SetSource(&forwarder, @@ -2438,8 +2269,9 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) { // which in turn updates the VideoEncoder's bitrate. SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->UpdateActiveSimulcastLayers({true, true}); + EXPECT_TRUE(GetVideoSendStream()->started()); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); GetVideoEncoderConfig()->simulcast_layers[0].active = true; GetVideoEncoderConfig()->simulcast_layers[1].active = false; @@ -2447,22 +2279,40 @@ TEST_F(VideoSendStreamTest, VideoSendStreamUpdateActiveSimulcastLayers) { GetVideoSendStream()->ReconfigureVideoEncoder( GetVideoEncoderConfig()->Copy()); }); - // TODO(bugs.webrtc.org/8807): Currently we require a hard reconfiguration to - // update the VideoBitrateAllocator and BitrateAllocator of which layers are - // active. Once the change is made for a "soft" reconfiguration we can remove - // the expecation for an encoder init. We can also test that bitrate changes - // when just updating individual active layers, which should change the - // bitrate set to the video encoder. - EXPECT_TRUE(encoder.WaitForEncoderInit()); - EXPECT_TRUE(encoder.WaitBitrateChanged(true)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); // Turning off both simulcast layers should trigger a bitrate change of 0. GetVideoEncoderConfig()->simulcast_layers[0].active = false; GetVideoEncoderConfig()->simulcast_layers[1].active = false; SendTask(RTC_FROM_HERE, task_queue(), [this]() { GetVideoSendStream()->UpdateActiveSimulcastLayers({false, false}); + EXPECT_FALSE(GetVideoSendStream()->started()); + }); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kZero)); + + // Re-activating a layer should resume sending and trigger a bitrate change. + GetVideoEncoderConfig()->simulcast_layers[0].active = true; + SendTask(RTC_FROM_HERE, task_queue(), [this]() { + GetVideoSendStream()->UpdateActiveSimulcastLayers({true, false}); + EXPECT_TRUE(GetVideoSendStream()->started()); + }); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); + + // Stop the stream and make sure the bit rate goes to zero again. + SendTask(RTC_FROM_HERE, task_queue(), [this]() { + GetVideoSendStream()->Stop(); + EXPECT_FALSE(GetVideoSendStream()->started()); + }); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kZero)); + + // One last test to verify that after `Stop()` we can still implicitly start + // the stream if needed. This is what will happen when a send stream gets + // re-used. See crbug.com/1241213. + SendTask(RTC_FROM_HERE, task_queue(), [this]() { + GetVideoSendStream()->UpdateActiveSimulcastLayers({true, true}); + EXPECT_TRUE(GetVideoSendStream()->started()); }); - EXPECT_TRUE(encoder.WaitBitrateChanged(false)); + EXPECT_TRUE(encoder.WaitBitrateChanged(WaitUntil::kNonZero)); SendTask(RTC_FROM_HERE, task_queue(), [this]() { DestroyStreams(); @@ -2483,30 +2333,35 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) { released_(false), encoder_factory_(this) {} - bool IsReleased() { - rtc::CritScope lock(&crit_); + bool IsReleased() RTC_LOCKS_EXCLUDED(mutex_) { + MutexLock lock(&mutex_); return released_; } - bool IsReadyForEncode() { - rtc::CritScope lock(&crit_); - return initialized_ && callback_registered_; + bool IsReadyForEncode() RTC_LOCKS_EXCLUDED(mutex_) { + MutexLock lock(&mutex_); + return IsReadyForEncodeLocked(); } - size_t num_releases() { - rtc::CritScope lock(&crit_); + size_t num_releases() RTC_LOCKS_EXCLUDED(mutex_) { + MutexLock lock(&mutex_); return num_releases_; } private: + bool IsReadyForEncodeLocked() RTC_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + return initialized_ && callback_registered_; + } + void SetFecControllerOverride( FecControllerOverride* fec_controller_override) override { // Ignored. } int32_t InitEncode(const VideoCodec* codecSettings, - const Settings& settings) override { - rtc::CritScope lock(&crit_); + const Settings& settings) override + RTC_LOCKS_EXCLUDED(mutex_) { + MutexLock lock(&mutex_); EXPECT_FALSE(initialized_); initialized_ = true; released_ = false; @@ -2522,16 +2377,16 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) { } int32_t RegisterEncodeCompleteCallback( - EncodedImageCallback* callback) override { - rtc::CritScope lock(&crit_); + EncodedImageCallback* callback) override RTC_LOCKS_EXCLUDED(mutex_) { + MutexLock lock(&mutex_); EXPECT_TRUE(initialized_); callback_registered_ = true; return 0; } - int32_t Release() override { - rtc::CritScope lock(&crit_); - EXPECT_TRUE(IsReadyForEncode()); + int32_t Release() override RTC_LOCKS_EXCLUDED(mutex_) { + MutexLock lock(&mutex_); + EXPECT_TRUE(IsReadyForEncodeLocked()); EXPECT_FALSE(released_); initialized_ = false; callback_registered_ = false; @@ -2577,12 +2432,12 @@ TEST_F(VideoSendStreamTest, EncoderIsProperlyInitializedAndDestroyed) { } TaskQueueBase* const task_queue_; - rtc::CriticalSection crit_; + Mutex mutex_; VideoSendStream* stream_; - bool initialized_ RTC_GUARDED_BY(crit_); - bool callback_registered_ RTC_GUARDED_BY(crit_); - size_t num_releases_ RTC_GUARDED_BY(crit_); - bool released_ RTC_GUARDED_BY(crit_); + bool initialized_ RTC_GUARDED_BY(mutex_); + bool callback_registered_ RTC_GUARDED_BY(mutex_); + size_t num_releases_ RTC_GUARDED_BY(mutex_); + bool released_ RTC_GUARDED_BY(mutex_); test::VideoEncoderProxyFactory encoder_factory_; VideoEncoderConfig encoder_config_; } test_encoder(task_queue()); @@ -2599,14 +2454,16 @@ class VideoCodecConfigObserver : public test::SendTest, public test::FakeEncoder { public: VideoCodecConfigObserver(VideoCodecType video_codec_type, - const char* codec_name) + const char* codec_name, + TaskQueueBase* task_queue) : SendTest(VideoSendStreamTest::kDefaultTimeoutMs), FakeEncoder(Clock::GetRealTimeClock()), video_codec_type_(video_codec_type), codec_name_(codec_name), num_initializations_(0), stream_(nullptr), - encoder_factory_(this) { + encoder_factory_(this), + task_queue_(task_queue) { InitCodecSpecifics(); } @@ -2654,7 +2511,9 @@ class VideoCodecConfigObserver : public test::SendTest, // Change encoder settings to actually trigger reconfiguration. encoder_settings_.frameDroppingOn = !encoder_settings_.frameDroppingOn; encoder_config_.encoder_specific_settings = GetEncoderSpecificSettings(); - stream_->ReconfigureVideoEncoder(std::move(encoder_config_)); + SendTask(RTC_FROM_HERE, task_queue_, [&]() { + stream_->ReconfigureVideoEncoder(std::move(encoder_config_)); + }); ASSERT_TRUE( init_encode_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs)); EXPECT_EQ(2u, num_initializations_) @@ -2676,6 +2535,7 @@ class VideoCodecConfigObserver : public test::SendTest, VideoSendStream* stream_; test::VideoEncoderProxyFactory encoder_factory_; VideoEncoderConfig encoder_config_; + TaskQueueBase* task_queue_; }; template <> @@ -2708,8 +2568,8 @@ void VideoCodecConfigObserver::VerifyCodecSpecifics( template <> rtc::scoped_refptr VideoCodecConfigObserver::GetEncoderSpecificSettings() const { - return new rtc::RefCountedObject< - VideoEncoderConfig::H264EncoderSpecificSettings>(encoder_settings_); + return rtc::make_ref_counted( + encoder_settings_); } template <> @@ -2742,8 +2602,8 @@ void VideoCodecConfigObserver::VerifyCodecSpecifics( template <> rtc::scoped_refptr VideoCodecConfigObserver::GetEncoderSpecificSettings() const { - return new rtc::RefCountedObject< - VideoEncoderConfig::Vp8EncoderSpecificSettings>(encoder_settings_); + return rtc::make_ref_counted( + encoder_settings_); } template <> @@ -2776,17 +2636,19 @@ void VideoCodecConfigObserver::VerifyCodecSpecifics( template <> rtc::scoped_refptr VideoCodecConfigObserver::GetEncoderSpecificSettings() const { - return new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(encoder_settings_); + return rtc::make_ref_counted( + encoder_settings_); } TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp8Config) { - VideoCodecConfigObserver test(kVideoCodecVP8, "VP8"); + VideoCodecConfigObserver test(kVideoCodecVP8, "VP8", + task_queue()); RunBaseTest(&test); } TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp9Config) { - VideoCodecConfigObserver test(kVideoCodecVP9, "VP9"); + VideoCodecConfigObserver test(kVideoCodecVP9, "VP9", + task_queue()); RunBaseTest(&test); } @@ -2798,7 +2660,8 @@ TEST_F(VideoSendStreamTest, EncoderSetupPropagatesVp9Config) { #define MAYBE_EncoderSetupPropagatesH264Config EncoderSetupPropagatesH264Config #endif TEST_F(VideoSendStreamTest, MAYBE_EncoderSetupPropagatesH264Config) { - VideoCodecConfigObserver test(kVideoCodecH264, "H264"); + VideoCodecConfigObserver test(kVideoCodecH264, "H264", + task_queue()); RunBaseTest(&test); } @@ -2812,7 +2675,7 @@ TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) { private: Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); RtpPacket rtp_packet; EXPECT_TRUE(rtp_packet.Parse(packet, length)); ++rtp_packets_sent_; @@ -2821,7 +2684,7 @@ TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) { } Action OnSendRtcp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); test::RtcpPacketParser parser; EXPECT_TRUE(parser.Parse(packet, length)); @@ -2845,9 +2708,9 @@ TEST_F(VideoSendStreamTest, RtcpSenderReportContainsMediaBytesSent) { EXPECT_TRUE(Wait()) << "Timed out while waiting for RTCP sender report."; } - rtc::CriticalSection crit_; - size_t rtp_packets_sent_ RTC_GUARDED_BY(&crit_); - size_t media_bytes_sent_ RTC_GUARDED_BY(&crit_); + Mutex mutex_; + size_t rtp_packets_sent_ RTC_GUARDED_BY(&mutex_); + size_t media_bytes_sent_ RTC_GUARDED_BY(&mutex_); } test; RunBaseTest(&test); @@ -2903,7 +2766,7 @@ TEST_F(VideoSendStreamTest, TranslatesTwoLayerScreencastToTargetBitrate) { send_config->encoder_settings.encoder_factory = &encoder_factory_; EXPECT_EQ(1u, encoder_config->number_of_streams); encoder_config->video_stream_factory = - new rtc::RefCountedObject(); + rtc::make_ref_counted(); EXPECT_EQ(1u, encoder_config->simulcast_layers.size()); encoder_config->simulcast_layers[0].num_temporal_layers = 2; encoder_config->content_type = VideoEncoderConfig::ContentType::kScreen; @@ -2928,12 +2791,13 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) { static const int kMaxBitrateKbps = 413; static const int kIncreasedStartBitrateKbps = 451; static const int kIncreasedMaxBitrateKbps = 597; - // If these fields trial are on, we get lower bitrates than expected by this - // test, due to the packetization overhead and encoder pushback. + // TODO(bugs.webrtc.org/12058): If these fields trial are on, we get lower + // bitrates than expected by this test, due to encoder pushback and subtracted + // overhead. webrtc::test::ScopedFieldTrials field_trials( std::string(field_trial::GetFieldTrialString()) + - "WebRTC-SubtractPacketizationOverhead/Disabled/" - "WebRTC-VideoRateControl/bitrate_adjuster:false/"); + "WebRTC-VideoRateControl/bitrate_adjuster:false/" + "WebRTC-SendSideBwe-WithOverhead/Disabled/"); class EncoderBitrateThresholdObserver : public test::SendTest, public VideoBitrateAllocatorFactory, @@ -3001,7 +2865,7 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) { void SetRates(const RateControlParameters& parameters) override { { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (target_bitrate_ == parameters.bitrate.get_sum_kbps()) { FakeEncoder::SetRates(parameters); return; @@ -3018,14 +2882,14 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) { // until the correct value has been observed. const int64_t start_time = rtc::TimeMillis(); do { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); if (target_bitrate_ == expected_bitrate) { return; } } while (bitrate_changed_event_.Wait( std::max(int64_t{1}, VideoSendStreamTest::kDefaultTimeoutMs - (rtc::TimeMillis() - start_time)))); - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); EXPECT_EQ(target_bitrate_, expected_bitrate) << "Timed out while waiting encoder rate to be set."; } @@ -3080,7 +2944,9 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) { // Encoder rate is capped by EncoderConfig max_bitrate_bps. WaitForSetRates(kMaxBitrateKbps); encoder_config_.max_bitrate_bps = kLowerMaxBitrateKbps * 1000; - send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy()); + SendTask(RTC_FROM_HERE, task_queue_, [&]() { + send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy()); + }); ASSERT_TRUE(create_rate_allocator_event_.Wait( VideoSendStreamTest::kDefaultTimeoutMs)); EXPECT_EQ(2, num_rate_allocator_creations_) @@ -3090,7 +2956,9 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) { EXPECT_EQ(1, num_encoder_initializations_); encoder_config_.max_bitrate_bps = kIncreasedMaxBitrateKbps * 1000; - send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy()); + SendTask(RTC_FROM_HERE, task_queue_, [&]() { + send_stream_->ReconfigureVideoEncoder(encoder_config_.Copy()); + }); ASSERT_TRUE(create_rate_allocator_event_.Wait( VideoSendStreamTest::kDefaultTimeoutMs)); EXPECT_EQ(3, num_rate_allocator_creations_) @@ -3106,8 +2974,8 @@ TEST_F(VideoSendStreamTest, ReconfigureBitratesSetsEncoderBitratesCorrectly) { rtc::Event create_rate_allocator_event_; rtc::Event init_encode_event_; rtc::Event bitrate_changed_event_; - rtc::CriticalSection crit_; - uint32_t target_bitrate_ RTC_GUARDED_BY(&crit_); + Mutex mutex_; + uint32_t target_bitrate_ RTC_GUARDED_BY(&mutex_); int num_rate_allocator_creations_; int num_encoder_initializations_; @@ -3131,11 +2999,12 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) { class ScreencastTargetBitrateTest : public test::SendTest, public test::FakeEncoder { public: - ScreencastTargetBitrateTest() + explicit ScreencastTargetBitrateTest(TaskQueueBase* task_queue) : SendTest(kDefaultTimeoutMs), test::FakeEncoder(Clock::GetRealTimeClock()), send_stream_(nullptr), - encoder_factory_(this) {} + encoder_factory_(this), + task_queue_(task_queue) {} private: int32_t Encode(const VideoFrame& input_image, @@ -3143,8 +3012,10 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) { CodecSpecificInfo specifics; specifics.codecType = kVideoCodecGeneric; - uint8_t buffer[16] = {0}; - EncodedImage encoded(buffer, sizeof(buffer), sizeof(buffer)); + EncodedImage encoded; + auto buffer = EncodedImageBuffer::Create(16); + memset(buffer->data(), 0, 16); + encoded.SetEncodedData(buffer); encoded.SetTimestamp(input_image.timestamp()); encoded.capture_time_ms_ = input_image.render_time_ms(); @@ -3155,11 +3026,11 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) { encoded.SetSpatialIndex(i); EncodedImageCallback* callback; { - rtc::CritScope cs(&crit_sect_); + MutexLock lock(&mutex_); callback = callback_; } RTC_DCHECK(callback); - if (callback->OnEncodedImage(encoded, &specifics, nullptr).error != + if (callback->OnEncodedImage(encoded, &specifics).error != EncodedImageCallback::Result::OK) { return -1; } @@ -3181,7 +3052,9 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) { void PerformTest() override { EXPECT_TRUE(Wait()) << "Timed out while waiting for the encoder to send one frame."; - VideoSendStream::Stats stats = send_stream_->GetStats(); + VideoSendStream::Stats stats; + SendTask(RTC_FROM_HERE, task_queue_, + [&]() { stats = send_stream_->GetStats(); }); for (size_t i = 0; i < kNumStreams; ++i) { ASSERT_TRUE(stats.substreams.find(kVideoSendSsrcs[i]) != @@ -3203,7 +3076,8 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) { VideoSendStream* send_stream_; test::VideoEncoderProxyFactory encoder_factory_; - } test; + TaskQueueBase* const task_queue_; + } test(task_queue()); RunBaseTest(&test); } @@ -3238,8 +3112,9 @@ class Vp9HeaderObserver : public test::SendTest { send_config->rtp.payload_name = "VP9"; send_config->rtp.payload_type = kVp9PayloadType; ModifyVideoConfigsHook(send_config, receive_configs, encoder_config); - encoder_config->encoder_specific_settings = new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings_); + encoder_config->encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings_); EXPECT_EQ(1u, encoder_config->number_of_streams); EXPECT_EQ(1u, encoder_config->simulcast_layers.size()); encoder_config->simulcast_layers[0].num_temporal_layers = @@ -3258,7 +3133,7 @@ class Vp9HeaderObserver : public test::SendTest { bool wait = Wait(); { // In case of time out, OnSendRtp might still access frames_sent_; - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); EXPECT_TRUE(wait) << "Test timed out waiting for VP9 packet, num frames " << frames_sent_; } @@ -3290,7 +3165,7 @@ class Vp9HeaderObserver : public test::SendTest { ++packets_sent_; if (rtp_packet.Marker()) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++frames_sent_; } last_packet_marker_ = rtp_packet.Marker(); @@ -3517,7 +3392,7 @@ class Vp9HeaderObserver : public test::SendTest { uint32_t last_packet_timestamp_ = 0; RTPVideoHeaderVP9 last_vp9_; size_t packets_sent_; - rtc::CriticalSection crit_; + Mutex mutex_; size_t frames_sent_; int expected_width_; int expected_height_; @@ -3808,7 +3683,7 @@ TEST_F(VideoSendStreamTest, RemoveOverheadFromBandwidth) { first_packet_sent_(false) {} void SetRates(const RateControlParameters& parameters) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); // Wait for the first sent packet so that videosendstream knows // rtp_overhead. if (first_packet_sent_) { @@ -3832,7 +3707,7 @@ TEST_F(VideoSendStreamTest, RemoveOverheadFromBandwidth) { } Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); first_packet_sent_ = true; return SEND_PACKET; } @@ -3857,7 +3732,7 @@ TEST_F(VideoSendStreamTest, RemoveOverheadFromBandwidth) { EXPECT_TRUE( bitrate_changed_event_.Wait(VideoSendStreamTest::kDefaultTimeoutMs)); { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); EXPECT_LE(max_bitrate_bps_, 57760u); } } @@ -3866,9 +3741,9 @@ TEST_F(VideoSendStreamTest, RemoveOverheadFromBandwidth) { TaskQueueBase* const task_queue_; test::VideoEncoderProxyFactory encoder_factory_; Call* call_; - rtc::CriticalSection crit_; - uint32_t max_bitrate_bps_ RTC_GUARDED_BY(&crit_); - bool first_packet_sent_ RTC_GUARDED_BY(&crit_); + Mutex mutex_; + uint32_t max_bitrate_bps_ RTC_GUARDED_BY(&mutex_); + bool first_packet_sent_ RTC_GUARDED_BY(&mutex_); rtc::Event bitrate_changed_event_; } test(task_queue()); RunBaseTest(&test); @@ -3973,21 +3848,22 @@ class ContentSwitchTest : public test::SendTest { }; static const uint32_t kMinPacketsToSend = 50; - explicit ContentSwitchTest(T* stream_reset_fun) + explicit ContentSwitchTest(T* stream_reset_fun, TaskQueueBase* task_queue) : SendTest(test::CallTest::kDefaultTimeoutMs), call_(nullptr), state_(StreamState::kBeforeSwitch), send_stream_(nullptr), send_stream_config_(nullptr), packets_sent_(0), - stream_resetter_(stream_reset_fun) { + stream_resetter_(stream_reset_fun), + task_queue_(task_queue) { RTC_DCHECK(stream_resetter_); } void OnVideoStreamsCreated( VideoSendStream* send_stream, const std::vector& receive_streams) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); send_stream_ = send_stream; } @@ -4008,14 +3884,16 @@ class ContentSwitchTest : public test::SendTest { } Action OnSendRtp(const uint8_t* packet, size_t length) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); auto internal_send_peer = test::VideoSendStreamPeer(send_stream_); float pacing_factor = internal_send_peer.GetPacingFactorOverride().value_or(0.0f); - float expected_pacing_factor = PacedSender::kDefaultPaceMultiplier; - if (send_stream_->GetStats().content_type == - webrtc::VideoContentType::SCREENSHARE) { + float expected_pacing_factor = 1.1; // Strict pacing factor. + VideoSendStream::Stats stats; + SendTask(RTC_FROM_HERE, task_queue_, + [&stats, stream = send_stream_]() { stats = stream->GetStats(); }); + if (stats.content_type == webrtc::VideoContentType::SCREENSHARE) { expected_pacing_factor = 1.0f; // Currently used pacing factor in ALR. } @@ -4070,19 +3948,20 @@ class ContentSwitchTest : public test::SendTest { private: StreamState GetStreamState() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return state_; } - rtc::CriticalSection crit_; + Mutex mutex_; rtc::Event content_switch_event_; Call* call_; - StreamState state_ RTC_GUARDED_BY(crit_); - VideoSendStream* send_stream_ RTC_GUARDED_BY(crit_); + StreamState state_ RTC_GUARDED_BY(mutex_); + VideoSendStream* send_stream_ RTC_GUARDED_BY(mutex_); VideoSendStream::Config send_stream_config_; VideoEncoderConfig encoder_config_; - uint32_t packets_sent_ RTC_GUARDED_BY(crit_); + uint32_t packets_sent_ RTC_GUARDED_BY(mutex_); T* stream_resetter_; + TaskQueueBase* task_queue_; }; TEST_F(VideoSendStreamTest, SwitchesToScreenshareAndBack) { @@ -4102,7 +3981,7 @@ TEST_F(VideoSendStreamTest, SwitchesToScreenshareAndBack) { Start(); }); }; - ContentSwitchTest test(&reset_fun); + ContentSwitchTest test(&reset_fun, task_queue()); RunBaseTest(&test); } diff --git a/video/video_source_sink_controller.cc b/video/video_source_sink_controller.cc index a649adc68c..4cd12d8a27 100644 --- a/video/video_source_sink_controller.cc +++ b/video/video_source_sink_controller.cc @@ -14,10 +14,35 @@ #include #include +#include "rtc_base/logging.h" #include "rtc_base/numerics/safe_conversions.h" +#include "rtc_base/strings/string_builder.h" namespace webrtc { +namespace { + +std::string WantsToString(const rtc::VideoSinkWants& wants) { + rtc::StringBuilder ss; + + ss << "max_fps=" << wants.max_framerate_fps + << " max_pixel_count=" << wants.max_pixel_count << " target_pixel_count=" + << (wants.target_pixel_count.has_value() + ? std::to_string(wants.target_pixel_count.value()) + : "null") + << " resolutions={"; + for (size_t i = 0; i < wants.resolutions.size(); ++i) { + if (i != 0) + ss << ","; + ss << wants.resolutions[i].width << "x" << wants.resolutions[i].height; + } + ss << "}"; + + return ss.Release(); +} + +} // namespace + VideoSourceSinkController::VideoSourceSinkController( rtc::VideoSinkInterface* sink, rtc::VideoSourceInterface* source) @@ -25,87 +50,109 @@ VideoSourceSinkController::VideoSourceSinkController( RTC_DCHECK(sink_); } +VideoSourceSinkController::~VideoSourceSinkController() { + RTC_DCHECK_RUN_ON(&sequence_checker_); +} + void VideoSourceSinkController::SetSource( rtc::VideoSourceInterface* source) { - rtc::VideoSourceInterface* old_source; - rtc::VideoSinkWants wants; - { - rtc::CritScope lock(&crit_); - old_source = source_; - source_ = source; - wants = CurrentSettingsToSinkWants(); - } + RTC_DCHECK_RUN_ON(&sequence_checker_); + + rtc::VideoSourceInterface* old_source = source_; + source_ = source; + if (old_source != source && old_source) old_source->RemoveSink(sink_); + if (!source) return; - source->AddOrUpdateSink(sink_, wants); + + source->AddOrUpdateSink(sink_, CurrentSettingsToSinkWants()); +} + +bool VideoSourceSinkController::HasSource() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return source_ != nullptr; } void VideoSourceSinkController::PushSourceSinkSettings() { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); if (!source_) return; - source_->AddOrUpdateSink(sink_, CurrentSettingsToSinkWants()); + rtc::VideoSinkWants wants = CurrentSettingsToSinkWants(); + RTC_LOG(INFO) << "Pushing SourceSink restrictions: " << WantsToString(wants); + source_->AddOrUpdateSink(sink_, wants); } VideoSourceRestrictions VideoSourceSinkController::restrictions() const { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return restrictions_; } absl::optional VideoSourceSinkController::pixels_per_frame_upper_limit() const { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return pixels_per_frame_upper_limit_; } absl::optional VideoSourceSinkController::frame_rate_upper_limit() const { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return frame_rate_upper_limit_; } bool VideoSourceSinkController::rotation_applied() const { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return rotation_applied_; } int VideoSourceSinkController::resolution_alignment() const { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); return resolution_alignment_; } +const std::vector& +VideoSourceSinkController::resolutions() const { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return resolutions_; +} + void VideoSourceSinkController::SetRestrictions( VideoSourceRestrictions restrictions) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); restrictions_ = std::move(restrictions); } void VideoSourceSinkController::SetPixelsPerFrameUpperLimit( absl::optional pixels_per_frame_upper_limit) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); pixels_per_frame_upper_limit_ = std::move(pixels_per_frame_upper_limit); } void VideoSourceSinkController::SetFrameRateUpperLimit( absl::optional frame_rate_upper_limit) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); frame_rate_upper_limit_ = std::move(frame_rate_upper_limit); } void VideoSourceSinkController::SetRotationApplied(bool rotation_applied) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); rotation_applied_ = rotation_applied; } void VideoSourceSinkController::SetResolutionAlignment( int resolution_alignment) { - rtc::CritScope lock(&crit_); + RTC_DCHECK_RUN_ON(&sequence_checker_); resolution_alignment_ = resolution_alignment; } -// RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_) +void VideoSourceSinkController::SetResolutions( + std::vector resolutions) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + resolutions_ = std::move(resolutions); +} + +// RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_) rtc::VideoSinkWants VideoSourceSinkController::CurrentSettingsToSinkWants() const { rtc::VideoSinkWants wants; @@ -133,6 +180,7 @@ rtc::VideoSinkWants VideoSourceSinkController::CurrentSettingsToSinkWants() frame_rate_upper_limit_.has_value() ? static_cast(frame_rate_upper_limit_.value()) : std::numeric_limits::max()); + wants.resolutions = resolutions_; return wants; } diff --git a/video/video_source_sink_controller.h b/video/video_source_sink_controller.h index 68fef3f071..c61084f99a 100644 --- a/video/video_source_sink_controller.h +++ b/video/video_source_sink_controller.h @@ -11,12 +11,16 @@ #ifndef VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_ #define VIDEO_VIDEO_SOURCE_SINK_CONTROLLER_H_ +#include +#include + #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/video/video_frame.h" #include "api/video/video_sink_interface.h" #include "api/video/video_source_interface.h" #include "call/adaptation/video_source_restrictions.h" -#include "rtc_base/critical_section.h" +#include "rtc_base/system/no_unique_address.h" namespace webrtc { @@ -29,7 +33,11 @@ class VideoSourceSinkController { VideoSourceSinkController(rtc::VideoSinkInterface* sink, rtc::VideoSourceInterface* source); + ~VideoSourceSinkController(); + void SetSource(rtc::VideoSourceInterface* source); + bool HasSource() const; + // Must be called in order for changes to settings to have an effect. This // allows you to modify multiple properties in a single push to the sink. void PushSourceSinkSettings(); @@ -39,6 +47,7 @@ class VideoSourceSinkController { absl::optional frame_rate_upper_limit() const; bool rotation_applied() const; int resolution_alignment() const; + const std::vector& resolutions() const; // Updates the settings stored internally. In order for these settings to be // applied to the sink, PushSourceSinkSettings() must subsequently be called. @@ -48,23 +57,33 @@ class VideoSourceSinkController { void SetFrameRateUpperLimit(absl::optional frame_rate_upper_limit); void SetRotationApplied(bool rotation_applied); void SetResolutionAlignment(int resolution_alignment); + void SetResolutions(std::vector resolutions); private: rtc::VideoSinkWants CurrentSettingsToSinkWants() const - RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_); + RTC_EXCLUSIVE_LOCKS_REQUIRED(sequence_checker_); + + // Used to ensure that this class is called on threads/sequences that it and + // downstream implementations were designed for. + // In practice, this represent's libjingle's worker thread. + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; - mutable rtc::CriticalSection crit_; rtc::VideoSinkInterface* const sink_; - rtc::VideoSourceInterface* source_ RTC_GUARDED_BY(&crit_); + rtc::VideoSourceInterface* source_ + RTC_GUARDED_BY(&sequence_checker_); // Pixel and frame rate restrictions. - VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&crit_); + VideoSourceRestrictions restrictions_ RTC_GUARDED_BY(&sequence_checker_); // Ensures that even if we are not restricted, the sink is never configured // above this limit. Example: We are not CPU limited (no |restrictions_|) but // our encoder is capped at 30 fps (= |frame_rate_upper_limit_|). - absl::optional pixels_per_frame_upper_limit_ RTC_GUARDED_BY(&crit_); - absl::optional frame_rate_upper_limit_ RTC_GUARDED_BY(&crit_); - bool rotation_applied_ RTC_GUARDED_BY(&crit_) = false; - int resolution_alignment_ RTC_GUARDED_BY(&crit_) = 1; + absl::optional pixels_per_frame_upper_limit_ + RTC_GUARDED_BY(&sequence_checker_); + absl::optional frame_rate_upper_limit_ + RTC_GUARDED_BY(&sequence_checker_); + bool rotation_applied_ RTC_GUARDED_BY(&sequence_checker_) = false; + int resolution_alignment_ RTC_GUARDED_BY(&sequence_checker_) = 1; + std::vector resolutions_ + RTC_GUARDED_BY(&sequence_checker_); }; } // namespace webrtc diff --git a/video/video_stream_decoder.h b/video/video_stream_decoder.h index 6b040c6a6f..bfe9252976 100644 --- a/video/video_stream_decoder.h +++ b/video/video_stream_decoder.h @@ -20,8 +20,8 @@ #include "api/video/video_sink_interface.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/video_coding/include/video_coding_defines.h" -#include "rtc_base/critical_section.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" namespace webrtc { @@ -50,7 +50,7 @@ class VideoStreamDecoder : public VCMReceiveCallback { private: // Used for all registered callbacks except rendering. - rtc::CriticalSection crit_; + Mutex mutex_; VideoReceiver2* const video_receiver_; diff --git a/video/video_stream_decoder2.h b/video/video_stream_decoder2.h index 04f98bc044..a301d32107 100644 --- a/video/video_stream_decoder2.h +++ b/video/video_stream_decoder2.h @@ -20,7 +20,6 @@ #include "api/video/video_sink_interface.h" #include "modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h" #include "modules/video_coding/include/video_coding_defines.h" -#include "rtc_base/critical_section.h" #include "rtc_base/platform_thread.h" namespace webrtc { diff --git a/video/video_stream_decoder_impl.cc b/video/video_stream_decoder_impl.cc index 1e11d38050..b6d754e8be 100644 --- a/video/video_stream_decoder_impl.cc +++ b/video/video_stream_decoder_impl.cc @@ -26,7 +26,7 @@ VideoStreamDecoderImpl::VideoStreamDecoderImpl( std::map> decoder_settings) : timing_(Clock::GetRealTimeClock()), decode_callbacks_(this), - next_frame_timestamps_index_(0), + next_frame_info_index_(0), callbacks_(callbacks), keyframe_required_(true), decoder_factory_(decoder_factory), @@ -39,7 +39,6 @@ VideoStreamDecoderImpl::VideoStreamDecoderImpl( decode_queue_(task_queue_factory->CreateTaskQueue( "video_stream_decoder_decode_queue", TaskQueueFactory::Priority::NORMAL)) { - frame_timestamps_.fill({-1, -1, -1}); bookkeeping_queue_.PostTask([this]() { RTC_DCHECK_RUN_ON(&bookkeeping_queue_); StartNextDecode(); @@ -47,12 +46,11 @@ VideoStreamDecoderImpl::VideoStreamDecoderImpl( } VideoStreamDecoderImpl::~VideoStreamDecoderImpl() { - rtc::CritScope lock(&shut_down_crit_); + MutexLock lock(&shut_down_mutex_); shut_down_ = true; } -void VideoStreamDecoderImpl::OnFrame( - std::unique_ptr frame) { +void VideoStreamDecoderImpl::OnFrame(std::unique_ptr frame) { if (!bookkeeping_queue_.IsCurrent()) { bookkeeping_queue_.PostTask([this, frame = std::move(frame)]() mutable { OnFrame(std::move(frame)); @@ -64,11 +62,10 @@ void VideoStreamDecoderImpl::OnFrame( RTC_DCHECK_RUN_ON(&bookkeeping_queue_); - uint64_t continuous_pid = frame_buffer_.InsertFrame(std::move(frame)); - video_coding::VideoLayerFrameId continuous_id(continuous_pid, 0); - if (last_continuous_id_ < continuous_id) { - last_continuous_id_ = continuous_id; - callbacks_->OnContinuousUntil(last_continuous_id_); + int64_t continuous_frame_id = frame_buffer_.InsertFrame(std::move(frame)); + if (last_continuous_frame_id_ < continuous_frame_id) { + last_continuous_frame_id_ = continuous_frame_id; + callbacks_->OnContinuousUntil(last_continuous_frame_id_); } } @@ -125,16 +122,14 @@ VideoDecoder* VideoStreamDecoderImpl::GetDecoder(int payload_type) { return decoder_.get(); } -void VideoStreamDecoderImpl::SaveFrameTimestamps( - const video_coding::EncodedFrame& frame) { - FrameTimestamps* frame_timestamps = - &frame_timestamps_[next_frame_timestamps_index_]; - frame_timestamps->timestamp = frame.Timestamp(); - frame_timestamps->decode_start_time_ms = rtc::TimeMillis(); - frame_timestamps->render_time_us = frame.RenderTimeMs() * 1000; +void VideoStreamDecoderImpl::SaveFrameInfo(const EncodedFrame& frame) { + FrameInfo* frame_info = &frame_info_[next_frame_info_index_]; + frame_info->timestamp = frame.Timestamp(); + frame_info->decode_start_time_ms = rtc::TimeMillis(); + frame_info->render_time_us = frame.RenderTimeMs() * 1000; + frame_info->content_type = frame.EncodedImage().content_type_; - next_frame_timestamps_index_ = - Add(next_frame_timestamps_index_, 1); + next_frame_info_index_ = Add(next_frame_info_index_, 1); } void VideoStreamDecoderImpl::StartNextDecode() { @@ -142,7 +137,7 @@ void VideoStreamDecoderImpl::StartNextDecode() { frame_buffer_.NextFrame( max_wait_time, keyframe_required_, &bookkeeping_queue_, - [this](std::unique_ptr frame, + [this](std::unique_ptr frame, video_coding::FrameBuffer::ReturnReason res) mutable { RTC_DCHECK_RUN_ON(&bookkeeping_queue_); OnNextFrameCallback(std::move(frame), res); @@ -150,14 +145,14 @@ void VideoStreamDecoderImpl::StartNextDecode() { } void VideoStreamDecoderImpl::OnNextFrameCallback( - std::unique_ptr frame, + std::unique_ptr frame, video_coding::FrameBuffer::ReturnReason result) { switch (result) { case video_coding::FrameBuffer::kFrameFound: { RTC_DCHECK(frame); - SaveFrameTimestamps(*frame); + SaveFrameInfo(*frame); - rtc::CritScope lock(&shut_down_crit_); + MutexLock lock(&shut_down_mutex_); if (shut_down_) { return; } @@ -207,7 +202,7 @@ void VideoStreamDecoderImpl::OnNextFrameCallback( } VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeFrame( - std::unique_ptr frame) { + std::unique_ptr frame) { RTC_DCHECK(frame); VideoDecoder* decoder = GetDecoder(frame->PayloadType()); @@ -230,14 +225,14 @@ VideoStreamDecoderImpl::DecodeResult VideoStreamDecoderImpl::DecodeFrame( } } -VideoStreamDecoderImpl::FrameTimestamps* -VideoStreamDecoderImpl::GetFrameTimestamps(int64_t timestamp) { - int start_time_index = next_frame_timestamps_index_; - for (int i = 0; i < kFrameTimestampsMemory; ++i) { - start_time_index = Subtract(start_time_index, 1); +VideoStreamDecoderImpl::FrameInfo* VideoStreamDecoderImpl::GetFrameInfo( + int64_t timestamp) { + int start_time_index = next_frame_info_index_; + for (int i = 0; i < kFrameInfoMemory; ++i) { + start_time_index = Subtract(start_time_index, 1); - if (frame_timestamps_[start_time_index].timestamp == timestamp) - return &frame_timestamps_[start_time_index]; + if (frame_info_[start_time_index].timestamp == timestamp) + return &frame_info_[start_time_index]; } return nullptr; @@ -250,29 +245,33 @@ void VideoStreamDecoderImpl::OnDecodedFrameCallback( int64_t decode_stop_time_ms = rtc::TimeMillis(); bookkeeping_queue_.PostTask([this, decode_stop_time_ms, decoded_image, - decode_time_ms, qp]() { + decode_time_ms, qp]() mutable { RTC_DCHECK_RUN_ON(&bookkeeping_queue_); - FrameTimestamps* frame_timestamps = - GetFrameTimestamps(decoded_image.timestamp()); - if (!frame_timestamps) { + FrameInfo* frame_info = GetFrameInfo(decoded_image.timestamp()); + if (!frame_info) { RTC_LOG(LS_ERROR) << "No frame information found for frame with timestamp" << decoded_image.timestamp(); return; } - absl::optional casted_qp; - if (qp) - casted_qp.emplace(*qp); - - absl::optional casted_decode_time_ms(decode_time_ms.value_or( - decode_stop_time_ms - frame_timestamps->decode_start_time_ms)); + Callbacks::FrameInfo callback_info; + callback_info.content_type = frame_info->content_type; - timing_.StopDecodeTimer(*casted_decode_time_ms, decode_stop_time_ms); + if (qp) + callback_info.qp.emplace(*qp); - VideoFrame copy = decoded_image; - copy.set_timestamp_us(frame_timestamps->render_time_us); - callbacks_->OnDecodedFrame(copy, casted_decode_time_ms, casted_qp); + if (!decode_time_ms) { + decode_time_ms = decode_stop_time_ms - frame_info->decode_start_time_ms; + } + decoded_image.set_processing_time( + {Timestamp::Millis(frame_info->decode_start_time_ms), + Timestamp::Millis(frame_info->decode_start_time_ms + + *decode_time_ms)}); + decoded_image.set_timestamp_us(frame_info->render_time_us); + timing_.StopDecodeTimer(*decode_time_ms, decode_stop_time_ms); + + callbacks_->OnDecodedFrame(decoded_image, callback_info); }); } diff --git a/video/video_stream_decoder_impl.h b/video/video_stream_decoder_impl.h index f3f09e4a79..106f38340a 100644 --- a/video/video_stream_decoder_impl.h +++ b/video/video_stream_decoder_impl.h @@ -16,12 +16,13 @@ #include #include "absl/types/optional.h" +#include "api/sequence_checker.h" #include "api/video/video_stream_decoder.h" #include "modules/video_coding/frame_buffer2.h" #include "modules/video_coding/timing.h" #include "rtc_base/platform_thread.h" +#include "rtc_base/synchronization/mutex.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" #include "system_wrappers/include/clock.h" namespace webrtc { @@ -36,7 +37,7 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface { ~VideoStreamDecoderImpl() override; - void OnFrame(std::unique_ptr frame) override; + void OnFrame(std::unique_ptr frame) override; void SetMinPlayoutDelay(TimeDelta min_delay) override; void SetMaxPlayoutDelay(TimeDelta max_delay) override; @@ -61,18 +62,17 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface { kDecodeFailure, }; - struct FrameTimestamps { - int64_t timestamp; + struct FrameInfo { + int64_t timestamp = -1; int64_t decode_start_time_ms; int64_t render_time_us; + VideoContentType content_type; }; - void SaveFrameTimestamps(const video_coding::EncodedFrame& frame) - RTC_RUN_ON(bookkeeping_queue_); - FrameTimestamps* GetFrameTimestamps(int64_t timestamp) - RTC_RUN_ON(bookkeeping_queue_); + void SaveFrameInfo(const EncodedFrame& frame) RTC_RUN_ON(bookkeeping_queue_); + FrameInfo* GetFrameInfo(int64_t timestamp) RTC_RUN_ON(bookkeeping_queue_); void StartNextDecode() RTC_RUN_ON(bookkeeping_queue_); - void OnNextFrameCallback(std::unique_ptr frame, + void OnNextFrameCallback(std::unique_ptr frame, video_coding::FrameBuffer::ReturnReason res) RTC_RUN_ON(bookkeeping_queue_); void OnDecodedFrameCallback(VideoFrame& decodedImage, // NOLINT @@ -81,22 +81,20 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface { VideoDecoder* GetDecoder(int payload_type) RTC_RUN_ON(decode_queue_); VideoStreamDecoderImpl::DecodeResult DecodeFrame( - std::unique_ptr frame) - RTC_RUN_ON(decode_queue_); + std::unique_ptr frame) RTC_RUN_ON(decode_queue_); VCMTiming timing_; DecodeCallbacks decode_callbacks_; // Some decoders are pipelined so it is not sufficient to save frame info // for the last frame only. - static constexpr int kFrameTimestampsMemory = 8; - std::array frame_timestamps_ + static constexpr int kFrameInfoMemory = 8; + std::array frame_info_ RTC_GUARDED_BY(bookkeeping_queue_); - int next_frame_timestamps_index_ RTC_GUARDED_BY(bookkeeping_queue_); + int next_frame_info_index_ RTC_GUARDED_BY(bookkeeping_queue_); VideoStreamDecoderInterface::Callbacks* const callbacks_ RTC_PT_GUARDED_BY(bookkeeping_queue_); - video_coding::VideoLayerFrameId last_continuous_id_ - RTC_GUARDED_BY(bookkeeping_queue_); + int64_t last_continuous_frame_id_ RTC_GUARDED_BY(bookkeeping_queue_) = -1; bool keyframe_required_ RTC_GUARDED_BY(bookkeeping_queue_); absl::optional current_payload_type_ RTC_GUARDED_BY(decode_queue_); @@ -112,8 +110,8 @@ class VideoStreamDecoderImpl : public VideoStreamDecoderInterface { // safe for the |decode_queue_| to be destructed. After that the |decoder_| // can be destructed, and then the |bookkeeping_queue_|. Finally the // |frame_buffer_| can be destructed. - rtc::CriticalSection shut_down_crit_; - bool shut_down_ RTC_GUARDED_BY(shut_down_crit_); + Mutex shut_down_mutex_; + bool shut_down_ RTC_GUARDED_BY(shut_down_mutex_); video_coding::FrameBuffer frame_buffer_ RTC_GUARDED_BY(bookkeeping_queue_); rtc::TaskQueue bookkeeping_queue_; std::unique_ptr decoder_ RTC_GUARDED_BY(decode_queue_); diff --git a/video/video_stream_decoder_impl_unittest.cc b/video/video_stream_decoder_impl_unittest.cc index a45a12ccae..a3e258976a 100644 --- a/video/video_stream_decoder_impl_unittest.cc +++ b/video/video_stream_decoder_impl_unittest.cc @@ -28,16 +28,13 @@ class MockVideoStreamDecoderCallbacks : public VideoStreamDecoderInterface::Callbacks { public: MOCK_METHOD(void, OnNonDecodableState, (), (override)); - MOCK_METHOD(void, - OnContinuousUntil, - (const video_coding::VideoLayerFrameId& key), - (override)); - MOCK_METHOD(void, - OnDecodedFrame, - (VideoFrame decodedImage, - absl::optional decode_time_ms, - absl::optional qp), - (override)); + MOCK_METHOD(void, OnContinuousUntil, (int64_t frame_id), (override)); + MOCK_METHOD( + void, + OnDecodedFrame, + (VideoFrame frame, + const VideoStreamDecoderInterface::Callbacks::FrameInfo& frame_info), + (override)); }; class StubVideoDecoder : public VideoDecoder { @@ -130,7 +127,7 @@ class FakeVideoDecoderFactory : public VideoDecoderFactory { NiceMock av1_decoder_; }; -class FakeEncodedFrame : public video_coding::EncodedFrame { +class FakeEncodedFrame : public EncodedFrame { public: int64_t ReceivedTime() const override { return 0; } int64_t RenderTime() const override { return 0; } @@ -149,7 +146,7 @@ class FrameBuilder { } FrameBuilder& WithPictureId(int picture_id) { - frame_->id.picture_id = picture_id; + frame_->SetId(picture_id); return *this; } diff --git a/video/video_stream_encoder.cc b/video/video_stream_encoder.cc index 1dd8d825d9..107110987b 100644 --- a/video/video_stream_encoder.cc +++ b/video/video_stream_encoder.cc @@ -19,26 +19,36 @@ #include "absl/algorithm/container.h" #include "absl/types/optional.h" +#include "api/sequence_checker.h" +#include "api/task_queue/queued_task.h" +#include "api/task_queue/task_queue_base.h" #include "api/video/encoded_image.h" #include "api/video/i420_buffer.h" #include "api/video/video_adaptation_reason.h" #include "api/video/video_bitrate_allocator_factory.h" #include "api/video/video_codec_constants.h" +#include "api/video/video_layers_allocation.h" #include "api/video_codecs/video_encoder.h" #include "call/adaptation/resource_adaptation_processor.h" +#include "call/adaptation/video_stream_adapter.h" #include "modules/video_coding/codecs/vp9/svc_rate_allocator.h" #include "modules/video_coding/include/video_codec_initializer.h" #include "rtc_base/arraysize.h" #include "rtc_base/checks.h" +#include "rtc_base/constructor_magic.h" +#include "rtc_base/event.h" #include "rtc_base/experiments/alr_experiment.h" +#include "rtc_base/experiments/encoder_info_settings.h" #include "rtc_base/experiments/rate_control_settings.h" #include "rtc_base/location.h" #include "rtc_base/logging.h" #include "rtc_base/strings/string_builder.h" -#include "rtc_base/time_utils.h" +#include "rtc_base/system/no_unique_address.h" +#include "rtc_base/thread_annotations.h" #include "rtc_base/trace_event.h" #include "system_wrappers/include/field_trial.h" #include "video/adaptation/video_stream_encoder_resource_manager.h" +#include "video/alignment_adjuster.h" namespace webrtc { @@ -108,21 +118,44 @@ bool RequiresEncoderReset(const VideoCodec& prev_send_codec, } for (unsigned char i = 0; i < new_send_codec.numberOfSimulcastStreams; ++i) { - if (new_send_codec.simulcastStream[i].width != + if (!new_send_codec.simulcastStream[i].active) { + // No need to reset when stream is inactive. + continue; + } + + if (!prev_send_codec.simulcastStream[i].active || + new_send_codec.simulcastStream[i].width != prev_send_codec.simulcastStream[i].width || new_send_codec.simulcastStream[i].height != prev_send_codec.simulcastStream[i].height || - new_send_codec.simulcastStream[i].maxFramerate != - prev_send_codec.simulcastStream[i].maxFramerate || new_send_codec.simulcastStream[i].numberOfTemporalLayers != prev_send_codec.simulcastStream[i].numberOfTemporalLayers || new_send_codec.simulcastStream[i].qpMax != - prev_send_codec.simulcastStream[i].qpMax || - new_send_codec.simulcastStream[i].active != - prev_send_codec.simulcastStream[i].active) { + prev_send_codec.simulcastStream[i].qpMax) { return true; } } + + if (new_send_codec.codecType == kVideoCodecVP9) { + size_t num_spatial_layers = new_send_codec.VP9().numberOfSpatialLayers; + for (unsigned char i = 0; i < num_spatial_layers; ++i) { + if (new_send_codec.spatialLayers[i].width != + prev_send_codec.spatialLayers[i].width || + new_send_codec.spatialLayers[i].height != + prev_send_codec.spatialLayers[i].height || + new_send_codec.spatialLayers[i].numberOfTemporalLayers != + prev_send_codec.spatialLayers[i].numberOfTemporalLayers || + new_send_codec.spatialLayers[i].qpMax != + prev_send_codec.spatialLayers[i].qpMax) { + return true; + } + } + } + + if (new_send_codec.ScalabilityMode() != prev_send_codec.ScalabilityMode()) { + return true; + } + return false; } @@ -172,6 +205,297 @@ VideoBitrateAllocation UpdateAllocationFromEncoderInfo( return new_allocation; } +// Converts a VideoBitrateAllocation that contains allocated bitrate per layer, +// and an EncoderInfo that contains information about the actual encoder +// structure used by a codec. Stream structures can be Ksvc, Full SVC, Simulcast +// etc. +VideoLayersAllocation CreateVideoLayersAllocation( + const VideoCodec& encoder_config, + const VideoEncoder::RateControlParameters& current_rate, + const VideoEncoder::EncoderInfo& encoder_info) { + const VideoBitrateAllocation& target_bitrate = current_rate.target_bitrate; + VideoLayersAllocation layers_allocation; + if (target_bitrate.get_sum_bps() == 0) { + return layers_allocation; + } + + if (encoder_config.numberOfSimulcastStreams > 1) { + layers_allocation.resolution_and_frame_rate_is_valid = true; + for (int si = 0; si < encoder_config.numberOfSimulcastStreams; ++si) { + if (!target_bitrate.IsSpatialLayerUsed(si) || + target_bitrate.GetSpatialLayerSum(si) == 0) { + continue; + } + layers_allocation.active_spatial_layers.emplace_back(); + VideoLayersAllocation::SpatialLayer& spatial_layer = + layers_allocation.active_spatial_layers.back(); + spatial_layer.width = encoder_config.simulcastStream[si].width; + spatial_layer.height = encoder_config.simulcastStream[si].height; + spatial_layer.rtp_stream_index = si; + spatial_layer.spatial_id = 0; + auto frame_rate_fraction = + VideoEncoder::EncoderInfo::kMaxFramerateFraction; + if (encoder_info.fps_allocation[si].size() == 1) { + // One TL is signalled to be used by the encoder. Do not distribute + // bitrate allocation across TLs (use sum at tl:0). + spatial_layer.target_bitrate_per_temporal_layer.push_back( + DataRate::BitsPerSec(target_bitrate.GetSpatialLayerSum(si))); + frame_rate_fraction = encoder_info.fps_allocation[si][0]; + } else { // Temporal layers are supported. + uint32_t temporal_layer_bitrate_bps = 0; + for (size_t ti = 0; + ti < encoder_config.simulcastStream[si].numberOfTemporalLayers; + ++ti) { + if (!target_bitrate.HasBitrate(si, ti)) { + break; + } + if (ti < encoder_info.fps_allocation[si].size()) { + // Use frame rate of the top used temporal layer. + frame_rate_fraction = encoder_info.fps_allocation[si][ti]; + } + temporal_layer_bitrate_bps += target_bitrate.GetBitrate(si, ti); + spatial_layer.target_bitrate_per_temporal_layer.push_back( + DataRate::BitsPerSec(temporal_layer_bitrate_bps)); + } + } + // Encoder may drop frames internally if `maxFramerate` is set. + spatial_layer.frame_rate_fps = std::min( + encoder_config.simulcastStream[si].maxFramerate, + rtc::saturated_cast( + (current_rate.framerate_fps * frame_rate_fraction) / + VideoEncoder::EncoderInfo::kMaxFramerateFraction)); + } + } else if (encoder_config.numberOfSimulcastStreams == 1) { + // TODO(bugs.webrtc.org/12000): Implement support for AV1 with + // scalability. + const bool higher_spatial_depend_on_lower = + encoder_config.codecType == kVideoCodecVP9 && + encoder_config.VP9().interLayerPred == InterLayerPredMode::kOn; + layers_allocation.resolution_and_frame_rate_is_valid = true; + + std::vector aggregated_spatial_bitrate( + webrtc::kMaxTemporalStreams, DataRate::Zero()); + for (int si = 0; si < webrtc::kMaxSpatialLayers; ++si) { + layers_allocation.resolution_and_frame_rate_is_valid = true; + if (!target_bitrate.IsSpatialLayerUsed(si) || + target_bitrate.GetSpatialLayerSum(si) == 0) { + break; + } + layers_allocation.active_spatial_layers.emplace_back(); + VideoLayersAllocation::SpatialLayer& spatial_layer = + layers_allocation.active_spatial_layers.back(); + spatial_layer.width = encoder_config.spatialLayers[si].width; + spatial_layer.height = encoder_config.spatialLayers[si].height; + spatial_layer.rtp_stream_index = 0; + spatial_layer.spatial_id = si; + auto frame_rate_fraction = + VideoEncoder::EncoderInfo::kMaxFramerateFraction; + if (encoder_info.fps_allocation[si].size() == 1) { + // One TL is signalled to be used by the encoder. Do not distribute + // bitrate allocation across TLs (use sum at tl:0). + DataRate aggregated_temporal_bitrate = + DataRate::BitsPerSec(target_bitrate.GetSpatialLayerSum(si)); + aggregated_spatial_bitrate[0] += aggregated_temporal_bitrate; + if (higher_spatial_depend_on_lower) { + spatial_layer.target_bitrate_per_temporal_layer.push_back( + aggregated_spatial_bitrate[0]); + } else { + spatial_layer.target_bitrate_per_temporal_layer.push_back( + aggregated_temporal_bitrate); + } + frame_rate_fraction = encoder_info.fps_allocation[si][0]; + } else { // Temporal layers are supported. + DataRate aggregated_temporal_bitrate = DataRate::Zero(); + for (size_t ti = 0; + ti < encoder_config.spatialLayers[si].numberOfTemporalLayers; + ++ti) { + if (!target_bitrate.HasBitrate(si, ti)) { + break; + } + if (ti < encoder_info.fps_allocation[si].size()) { + // Use frame rate of the top used temporal layer. + frame_rate_fraction = encoder_info.fps_allocation[si][ti]; + } + aggregated_temporal_bitrate += + DataRate::BitsPerSec(target_bitrate.GetBitrate(si, ti)); + if (higher_spatial_depend_on_lower) { + spatial_layer.target_bitrate_per_temporal_layer.push_back( + aggregated_temporal_bitrate + aggregated_spatial_bitrate[ti]); + aggregated_spatial_bitrate[ti] += aggregated_temporal_bitrate; + } else { + spatial_layer.target_bitrate_per_temporal_layer.push_back( + aggregated_temporal_bitrate); + } + } + } + // Encoder may drop frames internally if `maxFramerate` is set. + spatial_layer.frame_rate_fps = std::min( + encoder_config.spatialLayers[si].maxFramerate, + rtc::saturated_cast( + (current_rate.framerate_fps * frame_rate_fraction) / + VideoEncoder::EncoderInfo::kMaxFramerateFraction)); + } + } + + return layers_allocation; +} + +VideoEncoder::EncoderInfo GetEncoderInfoWithBitrateLimitUpdate( + const VideoEncoder::EncoderInfo& info, + const VideoEncoderConfig& encoder_config, + bool default_limits_allowed) { + if (!default_limits_allowed || !info.resolution_bitrate_limits.empty() || + encoder_config.simulcast_layers.size() <= 1) { + return info; + } + // Bitrate limits are not configured and more than one layer is used, use + // the default limits (bitrate limits are not used for simulcast). + VideoEncoder::EncoderInfo new_info = info; + new_info.resolution_bitrate_limits = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimits( + encoder_config.codec_type); + return new_info; +} + +int NumActiveStreams(const std::vector& streams) { + int num_active = 0; + for (const auto& stream : streams) { + if (stream.active) + ++num_active; + } + return num_active; +} + +void ApplyVp9BitrateLimits(const VideoEncoder::EncoderInfo& encoder_info, + const VideoEncoderConfig& encoder_config, + VideoCodec* codec) { + if (codec->codecType != VideoCodecType::kVideoCodecVP9 || + encoder_config.simulcast_layers.size() <= 1 || + VideoStreamEncoderResourceManager::IsSimulcast(encoder_config)) { + // Resolution bitrate limits usage is restricted to singlecast. + return; + } + + // Get bitrate limits for active stream. + absl::optional pixels = + VideoStreamAdapter::GetSingleActiveLayerPixels(*codec); + if (!pixels.has_value()) { + return; + } + absl::optional bitrate_limits = + encoder_info.GetEncoderBitrateLimitsForResolution(*pixels); + if (!bitrate_limits.has_value()) { + return; + } + + // Index for the active stream. + absl::optional index; + for (size_t i = 0; i < encoder_config.simulcast_layers.size(); ++i) { + if (encoder_config.simulcast_layers[i].active) + index = i; + } + if (!index.has_value()) { + return; + } + + int min_bitrate_bps; + if (encoder_config.simulcast_layers[*index].min_bitrate_bps <= 0) { + min_bitrate_bps = bitrate_limits->min_bitrate_bps; + } else { + min_bitrate_bps = + std::max(bitrate_limits->min_bitrate_bps, + encoder_config.simulcast_layers[*index].min_bitrate_bps); + } + int max_bitrate_bps; + if (encoder_config.simulcast_layers[*index].max_bitrate_bps <= 0) { + max_bitrate_bps = bitrate_limits->max_bitrate_bps; + } else { + max_bitrate_bps = + std::min(bitrate_limits->max_bitrate_bps, + encoder_config.simulcast_layers[*index].max_bitrate_bps); + } + if (min_bitrate_bps >= max_bitrate_bps) { + RTC_LOG(LS_WARNING) << "Bitrate limits not used, min_bitrate_bps " + << min_bitrate_bps << " >= max_bitrate_bps " + << max_bitrate_bps; + return; + } + + for (int i = 0; i < codec->VP9()->numberOfSpatialLayers; ++i) { + if (codec->spatialLayers[i].active) { + codec->spatialLayers[i].minBitrate = min_bitrate_bps / 1000; + codec->spatialLayers[i].maxBitrate = max_bitrate_bps / 1000; + codec->spatialLayers[i].targetBitrate = + std::min(codec->spatialLayers[i].targetBitrate, + codec->spatialLayers[i].maxBitrate); + break; + } + } +} + +void ApplyEncoderBitrateLimitsIfSingleActiveStream( + const VideoEncoder::EncoderInfo& encoder_info, + const std::vector& encoder_config_layers, + std::vector* streams) { + // Apply limits if simulcast with one active stream (expect lowest). + bool single_active_stream = + streams->size() > 1 && NumActiveStreams(*streams) == 1 && + !streams->front().active && NumActiveStreams(encoder_config_layers) == 1; + if (!single_active_stream) { + return; + } + + // Index for the active stream. + size_t index = 0; + for (size_t i = 0; i < encoder_config_layers.size(); ++i) { + if (encoder_config_layers[i].active) + index = i; + } + if (streams->size() < (index + 1) || !(*streams)[index].active) { + return; + } + + // Get bitrate limits for active stream. + absl::optional encoder_bitrate_limits = + encoder_info.GetEncoderBitrateLimitsForResolution( + (*streams)[index].width * (*streams)[index].height); + if (!encoder_bitrate_limits) { + return; + } + + // If bitrate limits are set by RtpEncodingParameters, use intersection. + int min_bitrate_bps; + if (encoder_config_layers[index].min_bitrate_bps <= 0) { + min_bitrate_bps = encoder_bitrate_limits->min_bitrate_bps; + } else { + min_bitrate_bps = std::max(encoder_bitrate_limits->min_bitrate_bps, + (*streams)[index].min_bitrate_bps); + } + int max_bitrate_bps; + if (encoder_config_layers[index].max_bitrate_bps <= 0) { + max_bitrate_bps = encoder_bitrate_limits->max_bitrate_bps; + } else { + max_bitrate_bps = std::min(encoder_bitrate_limits->max_bitrate_bps, + (*streams)[index].max_bitrate_bps); + } + if (min_bitrate_bps >= max_bitrate_bps) { + RTC_LOG(LS_WARNING) << "Encoder bitrate limits" + << " (min=" << encoder_bitrate_limits->min_bitrate_bps + << ", max=" << encoder_bitrate_limits->max_bitrate_bps + << ") do not intersect with stream limits" + << " (min=" << (*streams)[index].min_bitrate_bps + << ", max=" << (*streams)[index].max_bitrate_bps + << "). Encoder bitrate limits not used."; + return; + } + + (*streams)[index].min_bitrate_bps = min_bitrate_bps; + (*streams)[index].max_bitrate_bps = max_bitrate_bps; + (*streams)[index].target_bitrate_bps = + std::min((*streams)[index].target_bitrate_bps, + encoder_bitrate_limits->max_bitrate_bps); +} + } // namespace VideoStreamEncoder::EncoderRateSettings::EncoderRateSettings() @@ -201,18 +525,76 @@ bool VideoStreamEncoder::EncoderRateSettings::operator!=( return !(*this == rhs); } +class VideoStreamEncoder::DegradationPreferenceManager + : public DegradationPreferenceProvider { + public: + explicit DegradationPreferenceManager( + VideoStreamAdapter* video_stream_adapter) + : degradation_preference_(DegradationPreference::DISABLED), + is_screenshare_(false), + effective_degradation_preference_(DegradationPreference::DISABLED), + video_stream_adapter_(video_stream_adapter) { + RTC_DCHECK(video_stream_adapter_); + sequence_checker_.Detach(); + } + + ~DegradationPreferenceManager() override = default; + + DegradationPreference degradation_preference() const override { + RTC_DCHECK_RUN_ON(&sequence_checker_); + return effective_degradation_preference_; + } + + void SetDegradationPreference(DegradationPreference degradation_preference) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + degradation_preference_ = degradation_preference; + MaybeUpdateEffectiveDegradationPreference(); + } + + void SetIsScreenshare(bool is_screenshare) { + RTC_DCHECK_RUN_ON(&sequence_checker_); + is_screenshare_ = is_screenshare; + MaybeUpdateEffectiveDegradationPreference(); + } + + private: + void MaybeUpdateEffectiveDegradationPreference() + RTC_RUN_ON(&sequence_checker_) { + DegradationPreference effective_degradation_preference = + (is_screenshare_ && + degradation_preference_ == DegradationPreference::BALANCED) + ? DegradationPreference::MAINTAIN_RESOLUTION + : degradation_preference_; + + if (effective_degradation_preference != effective_degradation_preference_) { + effective_degradation_preference_ = effective_degradation_preference; + video_stream_adapter_->SetDegradationPreference( + effective_degradation_preference); + } + } + + RTC_NO_UNIQUE_ADDRESS SequenceChecker sequence_checker_; + DegradationPreference degradation_preference_ + RTC_GUARDED_BY(&sequence_checker_); + bool is_screenshare_ RTC_GUARDED_BY(&sequence_checker_); + DegradationPreference effective_degradation_preference_ + RTC_GUARDED_BY(&sequence_checker_); + VideoStreamAdapter* video_stream_adapter_ RTC_GUARDED_BY(&sequence_checker_); +}; + VideoStreamEncoder::VideoStreamEncoder( Clock* clock, uint32_t number_of_cores, VideoStreamEncoderObserver* encoder_stats_observer, const VideoStreamEncoderSettings& settings, std::unique_ptr overuse_detector, - TaskQueueFactory* task_queue_factory) - : shutdown_event_(true /* manual_reset */, false), + TaskQueueFactory* task_queue_factory, + BitrateAllocationCallbackType allocation_cb_type) + : main_queue_(TaskQueueBase::Current()), number_of_cores_(number_of_cores), - quality_scaling_experiment_enabled_(QualityScalingExperiment::Enabled()), sink_(nullptr), settings_(settings), + allocation_cb_type_(allocation_cb_type), rate_control_settings_(RateControlSettings::ParseFromFieldTrials()), encoder_selector_(settings.encoder_factory->GetEncoderSelector()), encoder_stats_observer_(encoder_stats_observer), @@ -242,7 +624,6 @@ VideoStreamEncoder::VideoStreamEncoder( animation_start_time_(Timestamp::PlusInfinity()), cap_resolution_due_to_video_content_(false), expect_resize_state_(ExpectResizeState::kNoResize), - bitrate_observer_(nullptr), fec_controller_override_(nullptr), force_disable_frame_dropper_(false), input_framerate_(kFrameRateAvergingWindowSizeMs, 1000), @@ -251,98 +632,100 @@ VideoStreamEncoder::VideoStreamEncoder( next_frame_types_(1, VideoFrameType::kVideoFrameDelta), frame_encode_metadata_writer_(this), experiment_groups_(GetExperimentGroups()), - encoder_switch_experiment_(ParseEncoderSwitchFieldTrial()), automatic_animation_detection_experiment_( ParseAutomatincAnimationDetectionFieldTrial()), - encoder_switch_requested_(false), input_state_provider_(encoder_stats_observer), + video_stream_adapter_( + std::make_unique(&input_state_provider_, + encoder_stats_observer)), resource_adaptation_processor_( std::make_unique( - &input_state_provider_, - encoder_stats_observer)), + video_stream_adapter_.get())), + degradation_preference_manager_( + std::make_unique( + video_stream_adapter_.get())), + adaptation_constraints_(), stream_resource_manager_(&input_state_provider_, encoder_stats_observer, clock_, settings_.experiment_cpu_load_estimator, - std::move(overuse_detector)), + std::move(overuse_detector), + degradation_preference_manager_.get()), video_source_sink_controller_(/*sink=*/this, /*source=*/nullptr), - resource_adaptation_queue_(task_queue_factory->CreateTaskQueue( - "ResourceAdaptationQueue", - TaskQueueFactory::Priority::NORMAL)), + default_limits_allowed_( + !field_trial::IsEnabled("WebRTC-DefaultBitrateLimitsKillSwitch")), + qp_parsing_allowed_( + !field_trial::IsEnabled("WebRTC-QpParsingKillSwitch")), encoder_queue_(task_queue_factory->CreateTaskQueue( "EncoderQueue", TaskQueueFactory::Priority::NORMAL)) { + TRACE_EVENT0("webrtc", "VideoStreamEncoder::VideoStreamEncoder"); + RTC_DCHECK(main_queue_); RTC_DCHECK(encoder_stats_observer); RTC_DCHECK_GE(number_of_cores, 1); - stream_resource_manager_.Initialize(&encoder_queue_, - &resource_adaptation_queue_); + stream_resource_manager_.Initialize(&encoder_queue_); rtc::Event initialize_processor_event; - resource_adaptation_queue_.PostTask([this, &initialize_processor_event] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); - resource_adaptation_processor_->InitializeOnResourceAdaptationQueue(); + encoder_queue_.PostTask([this, &initialize_processor_event] { + RTC_DCHECK_RUN_ON(&encoder_queue_); + resource_adaptation_processor_->SetTaskQueue(encoder_queue_.Get()); stream_resource_manager_.SetAdaptationProcessor( - resource_adaptation_processor_.get()); - resource_adaptation_processor_->AddAdaptationListener( + resource_adaptation_processor_.get(), video_stream_adapter_.get()); + resource_adaptation_processor_->AddResourceLimitationsListener( &stream_resource_manager_); - resource_adaptation_processor_->AddAdaptationListener(this); + video_stream_adapter_->AddRestrictionsListener(&stream_resource_manager_); + video_stream_adapter_->AddRestrictionsListener(this); + stream_resource_manager_.MaybeInitializePixelLimitResource(); + // Add the stream resource manager's resources to the processor. - for (Resource* resource : stream_resource_manager_.MappedResources()) - resource_adaptation_processor_->AddResource(resource); + adaptation_constraints_ = stream_resource_manager_.AdaptationConstraints(); + for (auto* constraint : adaptation_constraints_) { + video_stream_adapter_->AddAdaptationConstraint(constraint); + } initialize_processor_event.Set(); }); initialize_processor_event.Wait(rtc::Event::kForever); } VideoStreamEncoder::~VideoStreamEncoder() { - RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_DCHECK(shutdown_event_.Wait(0)) + RTC_DCHECK_RUN_ON(main_queue_); + RTC_DCHECK(!video_source_sink_controller_.HasSource()) << "Must call ::Stop() before destruction."; } void VideoStreamEncoder::Stop() { - RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DCHECK_RUN_ON(main_queue_); video_source_sink_controller_.SetSource(nullptr); - rtc::Event shutdown_adaptation_processor_event; - resource_adaptation_queue_.PostTask([this, - &shutdown_adaptation_processor_event] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); + rtc::Event shutdown_event; + + encoder_queue_.PostTask([this, &shutdown_event] { + RTC_DCHECK_RUN_ON(&encoder_queue_); if (resource_adaptation_processor_) { - resource_adaptation_processor_->StopResourceAdaptation(); - for (Resource* resource : stream_resource_manager_.MappedResources()) { - resource_adaptation_processor_->RemoveResource(resource); + stream_resource_manager_.StopManagedResources(); + for (auto* constraint : adaptation_constraints_) { + video_stream_adapter_->RemoveAdaptationConstraint(constraint); + } + for (auto& resource : additional_resources_) { + stream_resource_manager_.RemoveResource(resource); } - resource_adaptation_processor_->RemoveAdaptationListener(this); - resource_adaptation_processor_->RemoveAdaptationListener( + additional_resources_.clear(); + video_stream_adapter_->RemoveRestrictionsListener(this); + video_stream_adapter_->RemoveRestrictionsListener( + &stream_resource_manager_); + resource_adaptation_processor_->RemoveResourceLimitationsListener( &stream_resource_manager_); - stream_resource_manager_.SetAdaptationProcessor(nullptr); + stream_resource_manager_.SetAdaptationProcessor(nullptr, nullptr); resource_adaptation_processor_.reset(); } - shutdown_adaptation_processor_event.Set(); - }); - shutdown_adaptation_processor_event.Wait(rtc::Event::kForever); - encoder_queue_.PostTask([this] { - RTC_DCHECK_RUN_ON(&encoder_queue_); - stream_resource_manager_.StopManagedResources(); rate_allocator_ = nullptr; - bitrate_observer_ = nullptr; ReleaseEncoder(); - shutdown_event_.Set(); - }); - shutdown_event_.Wait(rtc::Event::kForever); -} - -void VideoStreamEncoder::SetBitrateAllocationObserver( - VideoBitrateAllocationObserver* bitrate_observer) { - RTC_DCHECK_RUN_ON(&thread_checker_); - encoder_queue_.PostTask([this, bitrate_observer] { - RTC_DCHECK_RUN_ON(&encoder_queue_); - RTC_DCHECK(!bitrate_observer_); - bitrate_observer_ = bitrate_observer; + encoder_ = nullptr; + shutdown_event.Set(); }); + shutdown_event.Wait(rtc::Event::kForever); } void VideoStreamEncoder::SetFecControllerOverride( @@ -357,27 +740,45 @@ void VideoStreamEncoder::SetFecControllerOverride( }); } +void VideoStreamEncoder::AddAdaptationResource( + rtc::scoped_refptr resource) { + RTC_DCHECK_RUN_ON(main_queue_); + TRACE_EVENT0("webrtc", "VideoStreamEncoder::AddAdaptationResource"); + // Map any externally added resources as kCpu for the sake of stats reporting. + // TODO(hbos): Make the manager map any unknown resources to kCpu and get rid + // of this MapResourceToReason() call. + TRACE_EVENT_ASYNC_BEGIN0( + "webrtc", "VideoStreamEncoder::AddAdaptationResource(latency)", this); + rtc::Event map_resource_event; + encoder_queue_.PostTask([this, resource, &map_resource_event] { + TRACE_EVENT_ASYNC_END0( + "webrtc", "VideoStreamEncoder::AddAdaptationResource(latency)", this); + RTC_DCHECK_RUN_ON(&encoder_queue_); + additional_resources_.push_back(resource); + stream_resource_manager_.AddResource(resource, VideoAdaptationReason::kCpu); + map_resource_event.Set(); + }); + map_resource_event.Wait(rtc::Event::kForever); +} + +std::vector> +VideoStreamEncoder::GetAdaptationResources() { + RTC_DCHECK_RUN_ON(main_queue_); + return resource_adaptation_processor_->GetResources(); +} + void VideoStreamEncoder::SetSource( rtc::VideoSourceInterface* source, const DegradationPreference& degradation_preference) { - RTC_DCHECK_RUN_ON(&thread_checker_); + RTC_DCHECK_RUN_ON(main_queue_); video_source_sink_controller_.SetSource(source); input_state_provider_.OnHasInputChanged(source); - // Set the degradation preference on the adaptation queue. - resource_adaptation_queue_.PostTask([this, degradation_preference] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); - if (!resource_adaptation_processor_) { - // The VideoStreamEncoder was stopped and the processor destroyed before - // this task had a chance to execute. No action needed. - return; - } - resource_adaptation_processor_->SetDegradationPreference( - degradation_preference); - }); // This may trigger reconfiguring the QualityScaler on the encoder queue. encoder_queue_.PostTask([this, degradation_preference] { RTC_DCHECK_RUN_ON(&encoder_queue_); + degradation_preference_manager_->SetDegradationPreference( + degradation_preference); stream_resource_manager_.SetDegradationPreferences(degradation_preference); if (encoder_) { stream_resource_manager_.ConfigureQualityScaler( @@ -387,8 +788,10 @@ void VideoStreamEncoder::SetSource( } void VideoStreamEncoder::SetSink(EncoderSink* sink, bool rotation_applied) { + RTC_DCHECK_RUN_ON(main_queue_); video_source_sink_controller_.SetRotationApplied(rotation_applied); video_source_sink_controller_.PushSourceSinkSettings(); + encoder_queue_.PostTask([this, sink] { RTC_DCHECK_RUN_ON(&encoder_queue_); sink_ = sink; @@ -398,6 +801,7 @@ void VideoStreamEncoder::SetSink(EncoderSink* sink, bool rotation_applied) { void VideoStreamEncoder::SetStartBitrate(int start_bitrate_bps) { encoder_queue_.PostTask([this, start_bitrate_bps] { RTC_DCHECK_RUN_ON(&encoder_queue_); + RTC_LOG(LS_INFO) << "SetStartBitrate " << start_bitrate_bps; encoder_target_bitrate_bps_ = start_bitrate_bps != 0 ? absl::optional(start_bitrate_bps) : absl::nullopt; @@ -445,25 +849,47 @@ void VideoStreamEncoder::ConfigureEncoder(VideoEncoderConfig config, // the VideoBitrateAllocator and call OnEncoderConfigurationChanged with a // "soft" reconfiguration. void VideoStreamEncoder::ReconfigureEncoder() { + // Running on the encoder queue. RTC_DCHECK(pending_encoder_reconfiguration_); - if (!encoder_selector_ && - encoder_switch_experiment_.IsPixelCountBelowThreshold( - last_frame_info_->width * last_frame_info_->height) && - !encoder_switch_requested_ && settings_.encoder_switch_request_callback) { - EncoderSwitchRequestCallback::Config conf; - conf.codec_name = encoder_switch_experiment_.to_codec; - conf.param = encoder_switch_experiment_.to_param; - conf.value = encoder_switch_experiment_.to_value; - settings_.encoder_switch_request_callback->RequestEncoderSwitch(conf); + bool encoder_reset_required = false; + if (pending_encoder_creation_) { + // Destroy existing encoder instance before creating a new one. Otherwise + // attempt to create another instance will fail if encoder factory + // supports only single instance of encoder of given type. + encoder_.reset(); + + encoder_ = settings_.encoder_factory->CreateVideoEncoder( + encoder_config_.video_format); + // TODO(nisse): What to do if creating the encoder fails? Crash, + // or just discard incoming frames? + RTC_CHECK(encoder_); + + if (encoder_selector_) { + encoder_selector_->OnCurrentEncoder(encoder_config_.video_format); + } - encoder_switch_requested_ = true; + encoder_->SetFecControllerOverride(fec_controller_override_); + + codec_info_ = settings_.encoder_factory->QueryVideoEncoder( + encoder_config_.video_format); + + encoder_reset_required = true; } + // Possibly adjusts scale_resolution_down_by in |encoder_config_| to limit the + // alignment value. + AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors( + encoder_->GetEncoderInfo(), &encoder_config_, absl::nullopt); + std::vector streams = encoder_config_.video_stream_factory->CreateEncoderStreams( last_frame_info_->width, last_frame_info_->height, encoder_config_); + // Get alignment when actual number of layers are known. + int alignment = AlignmentAdjuster::GetAlignmentAndMaybeAdjustScaleFactors( + encoder_->GetEncoderInfo(), &encoder_config_, streams.size()); + // Check that the higher layers do not try to set number of temporal layers // to less than 1. // TODO(brandtr): Get rid of the wrapping optional as it serves no purpose @@ -492,83 +918,75 @@ void VideoStreamEncoder::ReconfigureEncoder() { crop_width_ = last_frame_info_->width - highest_stream_width; crop_height_ = last_frame_info_->height - highest_stream_height; - bool encoder_reset_required = false; - if (pending_encoder_creation_) { - // Destroy existing encoder instance before creating a new one. Otherwise - // attempt to create another instance will fail if encoder factory - // supports only single instance of encoder of given type. - encoder_.reset(); - - encoder_ = settings_.encoder_factory->CreateVideoEncoder( - encoder_config_.video_format); - // TODO(nisse): What to do if creating the encoder fails? Crash, - // or just discard incoming frames? - RTC_CHECK(encoder_); - - if (encoder_selector_) { - encoder_selector_->OnCurrentEncoder(encoder_config_.video_format); - } - - encoder_->SetFecControllerOverride(fec_controller_override_); - - codec_info_ = settings_.encoder_factory->QueryVideoEncoder( - encoder_config_.video_format); - - encoder_reset_required = true; - } - - encoder_bitrate_limits_ = + absl::optional encoder_bitrate_limits = encoder_->GetEncoderInfo().GetEncoderBitrateLimitsForResolution( last_frame_info_->width * last_frame_info_->height); - if (streams.size() == 1 && encoder_bitrate_limits_) { - // Bitrate limits can be set by app (in SDP or RtpEncodingParameters) or/and - // can be provided by encoder. In presence of both set of limits, the final - // set is derived as their intersection. - int min_bitrate_bps; - if (encoder_config_.simulcast_layers.empty() || - encoder_config_.simulcast_layers[0].min_bitrate_bps <= 0) { - min_bitrate_bps = encoder_bitrate_limits_->min_bitrate_bps; - } else { - min_bitrate_bps = std::max(encoder_bitrate_limits_->min_bitrate_bps, - streams.back().min_bitrate_bps); - } + if (encoder_bitrate_limits) { + if (streams.size() == 1 && encoder_config_.simulcast_layers.size() == 1) { + // Bitrate limits can be set by app (in SDP or RtpEncodingParameters) + // or/and can be provided by encoder. In presence of both set of limits, + // the final set is derived as their intersection. + int min_bitrate_bps; + if (encoder_config_.simulcast_layers.empty() || + encoder_config_.simulcast_layers[0].min_bitrate_bps <= 0) { + min_bitrate_bps = encoder_bitrate_limits->min_bitrate_bps; + } else { + min_bitrate_bps = std::max(encoder_bitrate_limits->min_bitrate_bps, + streams.back().min_bitrate_bps); + } - int max_bitrate_bps; - // We don't check encoder_config_.simulcast_layers[0].max_bitrate_bps - // here since encoder_config_.max_bitrate_bps is derived from it (as - // well as from other inputs). - if (encoder_config_.max_bitrate_bps <= 0) { - max_bitrate_bps = encoder_bitrate_limits_->max_bitrate_bps; - } else { - max_bitrate_bps = std::min(encoder_bitrate_limits_->max_bitrate_bps, - streams.back().max_bitrate_bps); - } + int max_bitrate_bps; + // We don't check encoder_config_.simulcast_layers[0].max_bitrate_bps + // here since encoder_config_.max_bitrate_bps is derived from it (as + // well as from other inputs). + if (encoder_config_.max_bitrate_bps <= 0) { + max_bitrate_bps = encoder_bitrate_limits->max_bitrate_bps; + } else { + max_bitrate_bps = std::min(encoder_bitrate_limits->max_bitrate_bps, + streams.back().max_bitrate_bps); + } - if (min_bitrate_bps < max_bitrate_bps) { - streams.back().min_bitrate_bps = min_bitrate_bps; - streams.back().max_bitrate_bps = max_bitrate_bps; - streams.back().target_bitrate_bps = - std::min(streams.back().target_bitrate_bps, - encoder_bitrate_limits_->max_bitrate_bps); - } else { - RTC_LOG(LS_WARNING) << "Bitrate limits provided by encoder" - << " (min=" - << encoder_bitrate_limits_->min_bitrate_bps - << ", max=" - << encoder_bitrate_limits_->min_bitrate_bps - << ") do not intersect with limits set by app" - << " (min=" << streams.back().min_bitrate_bps - << ", max=" << encoder_config_.max_bitrate_bps - << "). The app bitrate limits will be used."; + if (min_bitrate_bps < max_bitrate_bps) { + streams.back().min_bitrate_bps = min_bitrate_bps; + streams.back().max_bitrate_bps = max_bitrate_bps; + streams.back().target_bitrate_bps = + std::min(streams.back().target_bitrate_bps, + encoder_bitrate_limits->max_bitrate_bps); + } else { + RTC_LOG(LS_WARNING) + << "Bitrate limits provided by encoder" + << " (min=" << encoder_bitrate_limits->min_bitrate_bps + << ", max=" << encoder_bitrate_limits->max_bitrate_bps + << ") do not intersect with limits set by app" + << " (min=" << streams.back().min_bitrate_bps + << ", max=" << encoder_config_.max_bitrate_bps + << "). The app bitrate limits will be used."; + } } } + ApplyEncoderBitrateLimitsIfSingleActiveStream( + GetEncoderInfoWithBitrateLimitUpdate( + encoder_->GetEncoderInfo(), encoder_config_, default_limits_allowed_), + encoder_config_.simulcast_layers, &streams); + VideoCodec codec; if (!VideoCodecInitializer::SetupCodec(encoder_config_, streams, &codec)) { RTC_LOG(LS_ERROR) << "Failed to create encoder configuration."; } + if (encoder_config_.codec_type == kVideoCodecVP9) { + // Spatial layers configuration might impose some parity restrictions, + // thus some cropping might be needed. + crop_width_ = last_frame_info_->width - codec.width; + crop_height_ = last_frame_info_->height - codec.height; + ApplyVp9BitrateLimits(GetEncoderInfoWithBitrateLimitUpdate( + encoder_->GetEncoderInfo(), encoder_config_, + default_limits_allowed_), + encoder_config_, &codec); + } + char log_stream_buf[4 * 1024]; rtc::SimpleStringBuilder log_stream(log_stream_buf); log_stream << "ReconfigureEncoder:\n"; @@ -617,13 +1035,33 @@ void VideoStreamEncoder::ReconfigureEncoder() { for (const auto& stream : streams) { max_framerate = std::max(stream.max_framerate, max_framerate); } - int alignment = encoder_->GetEncoderInfo().requested_resolution_alignment; - if (max_framerate != video_source_sink_controller_.frame_rate_upper_limit() || - alignment != video_source_sink_controller_.resolution_alignment()) { - video_source_sink_controller_.SetFrameRateUpperLimit(max_framerate); - video_source_sink_controller_.SetResolutionAlignment(alignment); - video_source_sink_controller_.PushSourceSinkSettings(); - } + + // The resolutions that we're actually encoding with. + std::vector encoder_resolutions; + // TODO(hbos): For the case of SVC, also make use of |codec.spatialLayers|. + // For now, SVC layers are handled by the VP9 encoder. + for (const auto& simulcastStream : codec.simulcastStream) { + if (!simulcastStream.active) + continue; + encoder_resolutions.emplace_back(simulcastStream.width, + simulcastStream.height); + } + main_queue_->PostTask(ToQueuedTask( + task_safety_, [this, max_framerate, alignment, + encoder_resolutions = std::move(encoder_resolutions)]() { + RTC_DCHECK_RUN_ON(main_queue_); + if (max_framerate != + video_source_sink_controller_.frame_rate_upper_limit() || + alignment != video_source_sink_controller_.resolution_alignment() || + encoder_resolutions != + video_source_sink_controller_.resolutions()) { + video_source_sink_controller_.SetFrameRateUpperLimit(max_framerate); + video_source_sink_controller_.SetResolutionAlignment(alignment); + video_source_sink_controller_.SetResolutions( + std::move(encoder_resolutions)); + video_source_sink_controller_.PushSourceSinkSettings(); + } + })); if (codec.maxBitrate == 0) { // max is one bit per pixel @@ -644,17 +1082,17 @@ void VideoStreamEncoder::ReconfigureEncoder() { rate_allocator_ = settings_.bitrate_allocator_factory->CreateVideoBitrateAllocator(codec); + rate_allocator_->SetLegacyConferenceMode( + encoder_config_.legacy_conference_mode); // Reset (release existing encoder) if one exists and anything except // start bitrate or max framerate has changed. if (!encoder_reset_required) { encoder_reset_required = RequiresEncoderReset( - codec, send_codec_, was_encode_called_since_last_initialization_); + send_codec_, codec, was_encode_called_since_last_initialization_); } send_codec_ = codec; - encoder_switch_experiment_.SetCodec(send_codec_.codecType); - // Keep the same encoder, as long as the video_format is unchanged. // Encoder creation block is split in two since EncoderInfo needed to start // CPU adaptation with the correct settings should be polled after @@ -680,6 +1118,10 @@ void VideoStreamEncoder::ReconfigureEncoder() { encoder_->RegisterEncodeCompleteCallback(this); frame_encode_metadata_writer_.OnEncoderInit(send_codec_, HasInternalSource()); + next_frame_types_.clear(); + next_frame_types_.resize( + std::max(static_cast(codec.numberOfSimulcastStreams), 1), + VideoFrameType::kVideoFrameKey); } frame_encode_metadata_writer_.Reset(); @@ -691,10 +1133,6 @@ void VideoStreamEncoder::ReconfigureEncoder() { OnEncoderSettingsChanged(); if (success) { - next_frame_types_.clear(); - next_frame_types_.resize( - std::max(static_cast(codec.numberOfSimulcastStreams), 1), - VideoFrameType::kVideoFrameKey); RTC_LOG(LS_VERBOSE) << " max bitrate " << codec.maxBitrate << " start bitrate " << codec.startBitrate << " max frame rate " << codec.maxFramerate @@ -705,22 +1143,7 @@ void VideoStreamEncoder::ReconfigureEncoder() { } if (pending_encoder_creation_) { - // TODO(hbos): Stopping and restarting for backwards compatibility reasons. - // We may be able to change this to "EnsureStarted()" if it took care of - // reconfiguring the QualityScaler as well. (ConfigureQualityScaler() is - // invoked later in this method.) - stream_resource_manager_.StopManagedResources(); - stream_resource_manager_.StartEncodeUsageResource(); - resource_adaptation_queue_.PostTask([this] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); - if (!resource_adaptation_processor_) { - // The VideoStreamEncoder was stopped and the processor destroyed before - // this task had a chance to execute. No action needed. - return; - } - // Ensures started. If already started this is a NO-OP. - resource_adaptation_processor_->StartResourceAdaptation(); - }); + stream_resource_manager_.ConfigureEncodeUsageResource(); pending_encoder_creation_ = false; } @@ -764,7 +1187,7 @@ void VideoStreamEncoder::ReconfigureEncoder() { last_encoder_rate_settings_.reset(); rate_settings.rate_control.framerate_fps = GetInputFramerateFps(); - SetEncoderRates(UpdateBitrateAllocationAndNotifyObserver(rate_settings)); + SetEncoderRates(UpdateBitrateAllocation(rate_settings)); } encoder_stats_observer_->OnEncoderReconfigured(encoder_config_, streams); @@ -797,21 +1220,15 @@ void VideoStreamEncoder::ReconfigureEncoder() { } void VideoStreamEncoder::OnEncoderSettingsChanged() { - EncoderSettings encoder_settings(encoder_->GetEncoderInfo(), - encoder_config_.Copy(), send_codec_); + EncoderSettings encoder_settings( + GetEncoderInfoWithBitrateLimitUpdate( + encoder_->GetEncoderInfo(), encoder_config_, default_limits_allowed_), + encoder_config_.Copy(), send_codec_); stream_resource_manager_.SetEncoderSettings(encoder_settings); input_state_provider_.OnEncoderSettingsChanged(encoder_settings); bool is_screenshare = encoder_settings.encoder_config().content_type == VideoEncoderConfig::ContentType::kScreen; - resource_adaptation_queue_.PostTask([this, is_screenshare] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); - if (!resource_adaptation_processor_) { - // The VideoStreamEncoder was stopped and the processor destroyed before - // this task had a chance to execute. No action needed. - return; - } - resource_adaptation_processor_->SetIsScreenshare(is_screenshare); - }); + degradation_preference_manager_->SetIsScreenshare(is_screenshare); } void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) { @@ -819,14 +1236,14 @@ void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) { VideoFrame incoming_frame = video_frame; // Local time in webrtc time base. - int64_t current_time_us = clock_->TimeInMicroseconds(); - int64_t current_time_ms = current_time_us / rtc::kNumMicrosecsPerMillisec; + Timestamp now = clock_->CurrentTime(); + // In some cases, e.g., when the frame from decoder is fed to encoder, // the timestamp may be set to the future. As the encoding pipeline assumes // capture time to be less than present time, we should reset the capture // timestamps here. Otherwise there may be issues with RTP send stream. - if (incoming_frame.timestamp_us() > current_time_us) - incoming_frame.set_timestamp_us(current_time_us); + if (incoming_frame.timestamp_us() > now.us()) + incoming_frame.set_timestamp_us(now.us()); // Capture time may come from clock with an offset and drift from clock_. int64_t capture_ntp_time_ms; @@ -835,7 +1252,7 @@ void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) { } else if (video_frame.render_time_ms() != 0) { capture_ntp_time_ms = video_frame.render_time_ms() + delta_ntp_internal_ms_; } else { - capture_ntp_time_ms = current_time_ms + delta_ntp_internal_ms_; + capture_ntp_time_ms = now.ms() + delta_ntp_internal_ms_; } incoming_frame.set_ntp_time_ms(capture_ntp_time_ms); @@ -859,14 +1276,14 @@ void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) { } bool log_stats = false; - if (current_time_ms - last_frame_log_ms_ > kFrameLogIntervalMs) { - last_frame_log_ms_ = current_time_ms; + if (now.ms() - last_frame_log_ms_ > kFrameLogIntervalMs) { + last_frame_log_ms_ = now.ms(); log_stats = true; } last_captured_timestamp_ = incoming_frame.ntp_time_ms(); - int64_t post_time_us = rtc::TimeMicros(); + int64_t post_time_us = clock_->CurrentTime().us(); ++posted_frames_waiting_for_encode_; encoder_queue_.PostTask( @@ -886,7 +1303,7 @@ void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) { MaybeEncodeVideoFrame(incoming_frame, post_time_us); } else { if (cwnd_frame_drop) { - // Frame drop by congestion window pusback. Do not encode this + // Frame drop by congestion window pushback. Do not encode this // frame. ++dropped_frame_cwnd_pushback_count_; encoder_stats_observer_->OnFrameDropped( @@ -951,7 +1368,7 @@ void VideoStreamEncoder::TraceFrameDropEnd() { } VideoStreamEncoder::EncoderRateSettings -VideoStreamEncoder::UpdateBitrateAllocationAndNotifyObserver( +VideoStreamEncoder::UpdateBitrateAllocation( const EncoderRateSettings& rate_settings) { VideoBitrateAllocation new_allocation; // Only call allocators if bitrate > 0 (ie, not suspended), otherwise they @@ -962,24 +1379,8 @@ VideoStreamEncoder::UpdateBitrateAllocationAndNotifyObserver( rate_settings.rate_control.framerate_fps)); } - if (bitrate_observer_ && new_allocation.get_sum_bps() > 0) { - if (encoder_ && encoder_initialized_) { - // Avoid too old encoder_info_. - const int64_t kMaxDiffMs = 100; - const bool updated_recently = - (last_encode_info_ms_ && ((clock_->TimeInMilliseconds() - - *last_encode_info_ms_) < kMaxDiffMs)); - // Update allocation according to info from encoder. - bitrate_observer_->OnBitrateAllocationUpdated( - UpdateAllocationFromEncoderInfo( - new_allocation, - updated_recently ? encoder_info_ : encoder_->GetEncoderInfo())); - } else { - bitrate_observer_->OnBitrateAllocationUpdated(new_allocation); - } - } - EncoderRateSettings new_rate_settings = rate_settings; + new_rate_settings.rate_control.target_bitrate = new_allocation; new_rate_settings.rate_control.bitrate = new_allocation; // VideoBitrateAllocator subclasses may allocate a bitrate higher than the // target in order to sustain the min bitrate of the video codec. In this @@ -1000,9 +1401,6 @@ VideoStreamEncoder::UpdateBitrateAllocationAndNotifyObserver( new_rate_settings.rate_control.bitrate = adjusted_allocation; } - encoder_stats_observer_->OnBitrateAllocationUpdated( - send_codec_, new_rate_settings.rate_control.bitrate); - return new_rate_settings; } @@ -1022,6 +1420,15 @@ void VideoStreamEncoder::SetEncoderRates( bool rate_control_changed = (!last_encoder_rate_settings_.has_value() || last_encoder_rate_settings_->rate_control != rate_settings.rate_control); + // For layer allocation signal we care only about the target bitrate (not the + // adjusted one) and the target fps. + bool layer_allocation_changed = + !last_encoder_rate_settings_.has_value() || + last_encoder_rate_settings_->rate_control.target_bitrate != + rate_settings.rate_control.target_bitrate || + last_encoder_rate_settings_->rate_control.framerate_fps != + rate_settings.rate_control.framerate_fps; + if (last_encoder_rate_settings_ != rate_settings) { last_encoder_rate_settings_ = rate_settings; } @@ -1033,7 +1440,7 @@ void VideoStreamEncoder::SetEncoderRates( // |bitrate_allocation| is 0 it means that the network is down or the send // pacer is full. We currently only report this if the encoder has an internal // source. If the encoder does not have an internal source, higher levels - // are expected to not call AddVideoFrame. We do this since its unclear + // are expected to not call AddVideoFrame. We do this since it is unclear // how current encoder implementations behave when given a zero target // bitrate. // TODO(perkj): Make sure all known encoder implementations handle zero @@ -1045,10 +1452,32 @@ void VideoStreamEncoder::SetEncoderRates( if (rate_control_changed) { encoder_->SetRates(rate_settings.rate_control); + + encoder_stats_observer_->OnBitrateAllocationUpdated( + send_codec_, rate_settings.rate_control.bitrate); frame_encode_metadata_writer_.OnSetRates( rate_settings.rate_control.bitrate, static_cast(rate_settings.rate_control.framerate_fps + 0.5)); stream_resource_manager_.SetEncoderRates(rate_settings.rate_control); + if (layer_allocation_changed && + allocation_cb_type_ == + BitrateAllocationCallbackType::kVideoLayersAllocation) { + sink_->OnVideoLayersAllocationUpdated(CreateVideoLayersAllocation( + send_codec_, rate_settings.rate_control, encoder_->GetEncoderInfo())); + } + } + if ((allocation_cb_type_ == + BitrateAllocationCallbackType::kVideoBitrateAllocation) || + (encoder_config_.content_type == + VideoEncoderConfig::ContentType::kScreen && + allocation_cb_type_ == BitrateAllocationCallbackType:: + kVideoBitrateAllocationWhenScreenSharing)) { + sink_->OnBitrateAllocationUpdated( + // Update allocation according to info from encoder. An encoder may + // choose to not use all layers due to for example HW. + UpdateAllocationFromEncoderInfo( + rate_settings.rate_control.target_bitrate, + encoder_->GetEncoderInfo())); } } @@ -1072,7 +1501,7 @@ void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame, VideoFrame::UpdateRect{0, 0, video_frame.width(), video_frame.height()}; } - // We have to create then encoder before the frame drop logic, + // We have to create the encoder before the frame drop logic, // because the latter depends on encoder_->GetScalingSettings. // According to the testcase // InitialFrameDropOffWhenEncoderDisabledScaling, the return value @@ -1100,8 +1529,7 @@ void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame, EncoderRateSettings new_rate_settings = *last_encoder_rate_settings_; new_rate_settings.rate_control.framerate_fps = static_cast(framerate_fps); - SetEncoderRates( - UpdateBitrateAllocationAndNotifyObserver(new_rate_settings)); + SetEncoderRates(UpdateBitrateAllocation(new_rate_settings)); } last_parameters_update_ms_.emplace(now_ms); } @@ -1188,6 +1616,12 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, if (encoder_failed_) return; + // It's possible that EncodeVideoFrame can be called after we've completed + // a Stop() operation. Check if the encoder_ is set before continuing. + // See: bugs.webrtc.org/12857 + if (!encoder_) + return; + TraceFrameDropEnd(); // Encoder metadata needs to be updated before encode complete callback. @@ -1203,6 +1637,7 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, if (encoder_info_ != info) { OnEncoderSettingsChanged(); + stream_resource_manager_.ConfigureEncodeUsageResource(); RTC_LOG(LS_INFO) << "Encoder settings changed from " << encoder_info_.ToString() << " to " << info.ToString(); } @@ -1219,66 +1654,32 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, last_encode_info_ms_ = clock_->TimeInMilliseconds(); VideoFrame out_frame(video_frame); - - const VideoFrameBuffer::Type buffer_type = - out_frame.video_frame_buffer()->type(); - const bool is_buffer_type_supported = - buffer_type == VideoFrameBuffer::Type::kI420 || - (buffer_type == VideoFrameBuffer::Type::kNative && - info.supports_native_handle); - - if (!is_buffer_type_supported) { - // This module only supports software encoding. - rtc::scoped_refptr converted_buffer( - out_frame.video_frame_buffer()->ToI420()); - - if (!converted_buffer) { - RTC_LOG(LS_ERROR) << "Frame conversion failed, dropping frame."; - return; - } - - VideoFrame::UpdateRect update_rect = out_frame.update_rect(); - if (!update_rect.IsEmpty() && - out_frame.video_frame_buffer()->GetI420() == nullptr) { - // UpdatedRect is reset to full update if it's not empty, and buffer was - // converted, therefore we can't guarantee that pixels outside of - // UpdateRect didn't change comparing to the previous frame. - update_rect = - VideoFrame::UpdateRect{0, 0, out_frame.width(), out_frame.height()}; - } - - out_frame.set_video_frame_buffer(converted_buffer); - out_frame.set_update_rect(update_rect); - } - - // Crop frame if needed. + // Crop or scale the frame if needed. Dimension may be reduced to fit encoder + // requirements, e.g. some encoders may require them to be divisible by 4. if ((crop_width_ > 0 || crop_height_ > 0) && - out_frame.video_frame_buffer()->type() != - VideoFrameBuffer::Type::kNative) { - // If the frame can't be converted to I420, drop it. - auto i420_buffer = video_frame.video_frame_buffer()->ToI420(); - if (!i420_buffer) { - RTC_LOG(LS_ERROR) << "Frame conversion for crop failed, dropping frame."; - return; - } + (out_frame.video_frame_buffer()->type() != + VideoFrameBuffer::Type::kNative || + !info.supports_native_handle)) { int cropped_width = video_frame.width() - crop_width_; int cropped_height = video_frame.height() - crop_height_; - rtc::scoped_refptr cropped_buffer = - I420Buffer::Create(cropped_width, cropped_height); + rtc::scoped_refptr cropped_buffer; // TODO(ilnik): Remove scaling if cropping is too big, as it should never // happen after SinkWants signaled correctly from ReconfigureEncoder. VideoFrame::UpdateRect update_rect = video_frame.update_rect(); if (crop_width_ < 4 && crop_height_ < 4) { - cropped_buffer->CropAndScaleFrom(*i420_buffer, crop_width_ / 2, - crop_height_ / 2, cropped_width, - cropped_height); + // The difference is small, crop without scaling. + cropped_buffer = video_frame.video_frame_buffer()->CropAndScale( + crop_width_ / 2, crop_height_ / 2, cropped_width, cropped_height, + cropped_width, cropped_height); update_rect.offset_x -= crop_width_ / 2; update_rect.offset_y -= crop_height_ / 2; update_rect.Intersect( VideoFrame::UpdateRect{0, 0, cropped_width, cropped_height}); } else { - cropped_buffer->ScaleFrom(*i420_buffer); + // The difference is large, scale it. + cropped_buffer = video_frame.video_frame_buffer()->Scale(cropped_width, + cropped_height); if (!update_rect.IsEmpty()) { // Since we can't reason about pixels after scaling, we invalidate whole // picture, if anything changed. @@ -1286,6 +1687,11 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, VideoFrame::UpdateRect{0, 0, cropped_width, cropped_height}; } } + if (!cropped_buffer) { + RTC_LOG(LS_ERROR) << "Cropping and scaling frame failed, dropping frame."; + return; + } + out_frame.set_video_frame_buffer(cropped_buffer); out_frame.set_update_rect(update_rect); out_frame.set_ntp_time_ms(video_frame.ntp_time_ms()); @@ -1316,14 +1722,12 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, stream_resource_manager_.OnEncodeStarted(out_frame, time_when_posted_us); - RTC_DCHECK_LE(send_codec_.width, out_frame.width()); - RTC_DCHECK_LE(send_codec_.height, out_frame.height()); - // Native frames should be scaled by the client. - // For internal encoders we scale everything in one place here. - RTC_DCHECK((out_frame.video_frame_buffer()->type() == - VideoFrameBuffer::Type::kNative) || - (send_codec_.width == out_frame.width() && - send_codec_.height == out_frame.height())); + // The encoder should get the size that it expects. + RTC_DCHECK(send_codec_.width <= out_frame.width() && + send_codec_.height <= out_frame.height()) + << "Encoder configured to " << send_codec_.width << "x" + << send_codec_.height << " received a too small frame " + << out_frame.width() << "x" << out_frame.height(); TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp", out_frame.timestamp()); @@ -1341,12 +1745,14 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame, if (settings_.encoder_switch_request_callback) { if (encoder_selector_) { if (auto encoder = encoder_selector_->OnEncoderBroken()) { - settings_.encoder_switch_request_callback->RequestEncoderSwitch( - *encoder); + QueueRequestEncoderSwitch(*encoder); } } else { encoder_failed_ = true; - settings_.encoder_switch_request_callback->RequestEncoderFallback(); + main_queue_->PostTask(ToQueuedTask(task_safety_, [this]() { + RTC_DCHECK_RUN_ON(main_queue_); + settings_.encoder_switch_request_callback->RequestEncoderFallback(); + })); } } else { RTC_LOG(LS_ERROR) @@ -1374,6 +1780,9 @@ void VideoStreamEncoder::SendKeyFrame() { TRACE_EVENT0("webrtc", "OnKeyFrameRequest"); RTC_DCHECK(!next_frame_types_.empty()); + if (!encoder_) + return; // Shutting down. + // TODO(webrtc:10615): Map keyframe request to spatial layer. std::fill(next_frame_types_.begin(), next_frame_types_.end(), VideoFrameType::kVideoFrameKey); @@ -1418,8 +1827,7 @@ void VideoStreamEncoder::OnLossNotification( EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { + const CodecSpecificInfo* codec_specific_info) { TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded", "timestamp", encoded_image.Timestamp()); const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0); @@ -1427,9 +1835,20 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage( frame_encode_metadata_writer_.FillTimingInfo(spatial_idx, &image_copy); - std::unique_ptr fragmentation_copy = - frame_encode_metadata_writer_.UpdateBitstream(codec_specific_info, - fragmentation, &image_copy); + frame_encode_metadata_writer_.UpdateBitstream(codec_specific_info, + &image_copy); + + VideoCodecType codec_type = codec_specific_info + ? codec_specific_info->codecType + : VideoCodecType::kVideoCodecGeneric; + + if (image_copy.qp_ < 0 && qp_parsing_allowed_) { + // Parse encoded frame QP if that was not provided by encoder. + image_copy.qp_ = qp_parser_ + .Parse(codec_type, spatial_idx, image_copy.data(), + image_copy.size()) + .value_or(-1); + } // Piggyback ALR experiment group id and simulcast id into the content type. const uint8_t experiment_id = @@ -1453,12 +1872,9 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage( // Post a task because |send_codec_| requires |encoder_queue_| lock. unsigned int image_width = image_copy._encodedWidth; unsigned int image_height = image_copy._encodedHeight; - VideoCodecType codec = codec_specific_info - ? codec_specific_info->codecType - : VideoCodecType::kVideoCodecGeneric; - encoder_queue_.PostTask([this, codec, image_width, image_height] { + encoder_queue_.PostTask([this, codec_type, image_width, image_height] { RTC_DCHECK_RUN_ON(&encoder_queue_); - if (codec == VideoCodecType::kVideoCodecVP9 && + if (codec_type == VideoCodecType::kVideoCodecVP9 && send_codec_.VP9()->automaticResizeOn) { unsigned int expected_width = send_codec_.width; unsigned int expected_height = send_codec_.height; @@ -1495,9 +1911,8 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage( simulcast_id = encoded_image.SpatialIndex().value_or(0); } - EncodedImageCallback::Result result = sink_->OnEncodedImage( - image_copy, codec_specific_info, - fragmentation_copy ? fragmentation_copy.get() : fragmentation); + EncodedImageCallback::Result result = + sink_->OnEncodedImage(image_copy, codec_specific_info); // We are only interested in propagating the meta-data about the image, not // encoded data itself, to the post encode function. Since we cannot be sure @@ -1517,7 +1932,8 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage( temporal_index = 0; } - RunPostEncode(image_copy, rtc::TimeMicros(), temporal_index, frame_size); + RunPostEncode(image_copy, clock_->CurrentTime().us(), temporal_index, + frame_size); if (result.error == Result::OK) { // In case of an internal encoder running on a separate thread, the @@ -1604,23 +2020,10 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate, const bool video_is_suspended = target_bitrate == DataRate::Zero(); const bool video_suspension_changed = video_is_suspended != EncoderPaused(); - if (!video_is_suspended && settings_.encoder_switch_request_callback) { - if (encoder_selector_) { - if (auto encoder = - encoder_selector_->OnAvailableBitrate(link_allocation)) { - settings_.encoder_switch_request_callback->RequestEncoderSwitch( - *encoder); - } - } else if (encoder_switch_experiment_.IsBitrateBelowThreshold( - target_bitrate) && - !encoder_switch_requested_) { - EncoderSwitchRequestCallback::Config conf; - conf.codec_name = encoder_switch_experiment_.to_codec; - conf.param = encoder_switch_experiment_.to_param; - conf.value = encoder_switch_experiment_.to_value; - settings_.encoder_switch_request_callback->RequestEncoderSwitch(conf); - - encoder_switch_requested_ = true; + if (!video_is_suspended && settings_.encoder_switch_request_callback && + encoder_selector_) { + if (auto encoder = encoder_selector_->OnAvailableBitrate(link_allocation)) { + QueueRequestEncoderSwitch(*encoder); } } @@ -1643,7 +2046,7 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate, EncoderRateSettings new_rate_settings{ VideoBitrateAllocation(), static_cast(framerate_fps), link_allocation, target_bitrate, stable_target_bitrate}; - SetEncoderRates(UpdateBitrateAllocationAndNotifyObserver(new_rate_settings)); + SetEncoderRates(UpdateBitrateAllocation(new_rate_settings)); if (target_bitrate.bps() != 0) encoder_target_bitrate_bps_ = target_bitrate.bps(); @@ -1657,7 +2060,8 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate, } if (video_suspension_changed && !video_is_suspended && pending_frame_ && !DropDueToSize(pending_frame_->size())) { - int64_t pending_time_us = rtc::TimeMicros() - pending_frame_post_time_us_; + int64_t pending_time_us = + clock_->CurrentTime().us() - pending_frame_post_time_us_; if (pending_time_us < kPendingFrameTimeoutMs * 1000) EncodeVideoFrame(*pending_frame_, pending_frame_post_time_us_); pending_frame_.reset(); @@ -1665,29 +2069,43 @@ void VideoStreamEncoder::OnBitrateUpdated(DataRate target_bitrate, } bool VideoStreamEncoder::DropDueToSize(uint32_t pixel_count) const { + if (!stream_resource_manager_.DropInitialFrames() || + !encoder_target_bitrate_bps_.has_value()) { + return false; + } + bool simulcast_or_svc = (send_codec_.codecType == VideoCodecType::kVideoCodecVP9 && send_codec_.VP9().numberOfSpatialLayers > 1) || - send_codec_.numberOfSimulcastStreams > 1; + (send_codec_.numberOfSimulcastStreams > 1 || + encoder_config_.simulcast_layers.size() > 1); - if (simulcast_or_svc || !stream_resource_manager_.DropInitialFrames() || - !encoder_target_bitrate_bps_.has_value()) { - return false; + if (simulcast_or_svc) { + if (stream_resource_manager_.SingleActiveStreamPixels()) { + pixel_count = stream_resource_manager_.SingleActiveStreamPixels().value(); + } else { + return false; + } } + uint32_t bitrate_bps = + stream_resource_manager_.UseBandwidthAllocationBps().value_or( + encoder_target_bitrate_bps_.value()); + absl::optional encoder_bitrate_limits = - encoder_->GetEncoderInfo().GetEncoderBitrateLimitsForResolution( - pixel_count); + GetEncoderInfoWithBitrateLimitUpdate( + encoder_->GetEncoderInfo(), encoder_config_, default_limits_allowed_) + .GetEncoderBitrateLimitsForResolution(pixel_count); if (encoder_bitrate_limits.has_value()) { // Use bitrate limits provided by encoder. - return encoder_target_bitrate_bps_.value() < + return bitrate_bps < static_cast(encoder_bitrate_limits->min_start_bitrate_bps); } - if (encoder_target_bitrate_bps_.value() < 300000 /* qvga */) { + if (bitrate_bps < 300000 /* qvga */) { return pixel_count > 320 * 240; - } else if (encoder_target_bitrate_bps_.value() < 500000 /* vga */) { + } else if (bitrate_bps < 500000 /* vga */) { return pixel_count > 640 * 480; } return false; @@ -1696,10 +2114,18 @@ bool VideoStreamEncoder::DropDueToSize(uint32_t pixel_count) const { void VideoStreamEncoder::OnVideoSourceRestrictionsUpdated( VideoSourceRestrictions restrictions, const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); - video_source_sink_controller_.SetRestrictions(std::move(restrictions)); - video_source_sink_controller_.PushSourceSinkSettings(); + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) { + RTC_DCHECK_RUN_ON(&encoder_queue_); + RTC_LOG(INFO) << "Updating sink restrictions from " + << (reason ? reason->Name() : std::string("")) << " to " + << restrictions.ToString(); + main_queue_->PostTask(ToQueuedTask( + task_safety_, [this, restrictions = std::move(restrictions)]() { + RTC_DCHECK_RUN_ON(main_queue_); + video_source_sink_controller_.SetRestrictions(std::move(restrictions)); + video_source_sink_controller_.PushSourceSinkSettings(); + })); } void VideoStreamEncoder::RunPostEncode(const EncodedImage& encoded_image, @@ -1720,9 +2146,9 @@ void VideoStreamEncoder::RunPostEncode(const EncodedImage& encoded_image, if (encoded_image.timing_.flags != VideoSendTiming::kInvalid) { encode_duration_us = // TODO(nisse): Maybe use capture_time_ms_ rather than encode_start_ms_? - rtc::kNumMicrosecsPerMillisec * - (encoded_image.timing_.encode_finish_ms - - encoded_image.timing_.encode_start_ms); + TimeDelta::Millis(encoded_image.timing_.encode_finish_ms - + encoded_image.timing_.encode_start_ms) + .us(); } // Run post encode tasks, such as overuse detection and frame rate/drop @@ -1747,7 +2173,8 @@ void VideoStreamEncoder::RunPostEncode(const EncodedImage& encoded_image, stream_resource_manager_.OnEncodeCompleted(encoded_image, time_sent_us, encode_duration_us); if (bitrate_adjuster_) { - bitrate_adjuster_->OnEncodedFrame(encoded_image, temporal_index); + bitrate_adjuster_->OnEncodedFrame( + frame_size, encoded_image.SpatialIndex().value_or(0), temporal_index); } } @@ -1766,113 +2193,6 @@ void VideoStreamEncoder::ReleaseEncoder() { TRACE_EVENT0("webrtc", "VCMGenericEncoder::Release"); } -bool VideoStreamEncoder::EncoderSwitchExperiment::IsBitrateBelowThreshold( - const DataRate& target_bitrate) { - DataRate rate = DataRate::KilobitsPerSec( - bitrate_filter.Apply(1.0, target_bitrate.kbps())); - return current_thresholds.bitrate && rate < *current_thresholds.bitrate; -} - -bool VideoStreamEncoder::EncoderSwitchExperiment::IsPixelCountBelowThreshold( - int pixel_count) const { - return current_thresholds.pixel_count && - pixel_count < *current_thresholds.pixel_count; -} - -void VideoStreamEncoder::EncoderSwitchExperiment::SetCodec( - VideoCodecType codec) { - auto it = codec_thresholds.find(codec); - if (it == codec_thresholds.end()) { - current_thresholds = {}; - } else { - current_thresholds = it->second; - } -} - -VideoStreamEncoder::EncoderSwitchExperiment -VideoStreamEncoder::ParseEncoderSwitchFieldTrial() const { - EncoderSwitchExperiment result; - - // Each "codec threshold" have the format - // ";;", and are separated by the "|" - // character. - webrtc::FieldTrialOptional codec_thresholds_string{ - "codec_thresholds"}; - webrtc::FieldTrialOptional to_codec{"to_codec"}; - webrtc::FieldTrialOptional to_param{"to_param"}; - webrtc::FieldTrialOptional to_value{"to_value"}; - webrtc::FieldTrialOptional window{"window"}; - - webrtc::ParseFieldTrial( - {&codec_thresholds_string, &to_codec, &to_param, &to_value, &window}, - webrtc::field_trial::FindFullName( - "WebRTC-NetworkCondition-EncoderSwitch")); - - if (!codec_thresholds_string || !to_codec || !window) { - return {}; - } - - result.bitrate_filter.Reset(1.0 - 1.0 / *window); - result.to_codec = *to_codec; - result.to_param = to_param.GetOptional(); - result.to_value = to_value.GetOptional(); - - std::vector codecs_thresholds; - if (rtc::split(*codec_thresholds_string, '|', &codecs_thresholds) == 0) { - return {}; - } - - for (const std::string& codec_threshold : codecs_thresholds) { - std::vector thresholds_split; - if (rtc::split(codec_threshold, ';', &thresholds_split) != 3) { - return {}; - } - - VideoCodecType codec = PayloadStringToCodecType(thresholds_split[0]); - int bitrate_kbps; - rtc::FromString(thresholds_split[1], &bitrate_kbps); - int pixel_count; - rtc::FromString(thresholds_split[2], &pixel_count); - - if (bitrate_kbps > 0) { - result.codec_thresholds[codec].bitrate = - DataRate::KilobitsPerSec(bitrate_kbps); - } - - if (pixel_count > 0) { - result.codec_thresholds[codec].pixel_count = pixel_count; - } - - if (!result.codec_thresholds[codec].bitrate && - !result.codec_thresholds[codec].pixel_count) { - return {}; - } - } - - rtc::StringBuilder ss; - ss << "Successfully parsed WebRTC-NetworkCondition-EncoderSwitch field " - "trial." - " to_codec:" - << result.to_codec << " to_param:" << result.to_param.value_or("") - << " to_value:" << result.to_value.value_or("") - << " codec_thresholds:"; - - for (auto kv : result.codec_thresholds) { - std::string codec_name = CodecTypeToPayloadString(kv.first); - std::string bitrate = kv.second.bitrate - ? std::to_string(kv.second.bitrate->kbps()) - : ""; - std::string pixels = kv.second.pixel_count - ? std::to_string(*kv.second.pixel_count) - : ""; - ss << " (" << codec_name << ":" << bitrate << ":" << pixels << ")"; - } - - RTC_LOG(LS_INFO) << ss.str(); - - return result; -} - VideoStreamEncoder::AutomaticAnimationDetectionExperiment VideoStreamEncoder::ParseAutomatincAnimationDetectionFieldTrial() const { AutomaticAnimationDetectionExperiment result; @@ -1958,60 +2278,84 @@ void VideoStreamEncoder::CheckForAnimatedContent( RTC_LOG(LS_INFO) << "Removing resolution cap due to no consistent " "animation detection."; } - video_source_sink_controller_.SetPixelsPerFrameUpperLimit( - should_cap_resolution ? absl::optional(kMaxAnimationPixels) - : absl::nullopt); - video_source_sink_controller_.PushSourceSinkSettings(); + main_queue_->PostTask(ToQueuedTask(task_safety_, [this, + should_cap_resolution]() { + RTC_DCHECK_RUN_ON(main_queue_); + video_source_sink_controller_.SetPixelsPerFrameUpperLimit( + should_cap_resolution ? absl::optional(kMaxAnimationPixels) + : absl::nullopt); + video_source_sink_controller_.PushSourceSinkSettings(); + })); } } + +// RTC_RUN_ON(&encoder_queue_) +void VideoStreamEncoder::QueueRequestEncoderSwitch( + const EncoderSwitchRequestCallback::Config& conf) { + main_queue_->PostTask(ToQueuedTask(task_safety_, [this, conf]() { + RTC_DCHECK_RUN_ON(main_queue_); + settings_.encoder_switch_request_callback->RequestEncoderSwitch(conf); + })); +} + +// RTC_RUN_ON(&encoder_queue_) +void VideoStreamEncoder::QueueRequestEncoderSwitch( + const webrtc::SdpVideoFormat& format) { + main_queue_->PostTask(ToQueuedTask(task_safety_, [this, format]() { + RTC_DCHECK_RUN_ON(main_queue_); + settings_.encoder_switch_request_callback->RequestEncoderSwitch(format); + })); +} + void VideoStreamEncoder::InjectAdaptationResource( rtc::scoped_refptr resource, VideoAdaptationReason reason) { rtc::Event map_resource_event; encoder_queue_.PostTask([this, resource, reason, &map_resource_event] { RTC_DCHECK_RUN_ON(&encoder_queue_); - stream_resource_manager_.MapResourceToReason(resource, reason); + additional_resources_.push_back(resource); + stream_resource_manager_.AddResource(resource, reason); map_resource_event.Set(); }); map_resource_event.Wait(rtc::Event::kForever); +} - resource_adaptation_queue_.PostTask([this, resource] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); +void VideoStreamEncoder::InjectAdaptationConstraint( + AdaptationConstraint* adaptation_constraint) { + rtc::Event event; + encoder_queue_.PostTask([this, adaptation_constraint, &event] { + RTC_DCHECK_RUN_ON(&encoder_queue_); if (!resource_adaptation_processor_) { // The VideoStreamEncoder was stopped and the processor destroyed before // this task had a chance to execute. No action needed. return; } - resource_adaptation_processor_->AddResource(resource); + adaptation_constraints_.push_back(adaptation_constraint); + video_stream_adapter_->AddAdaptationConstraint(adaptation_constraint); + event.Set(); }); + event.Wait(rtc::Event::kForever); } -rtc::scoped_refptr -VideoStreamEncoder::quality_scaler_resource_for_testing() { - RTC_DCHECK_RUN_ON(&encoder_queue_); - return stream_resource_manager_.quality_scaler_resource_for_testing(); -} - -void VideoStreamEncoder::AddAdaptationListenerForTesting( - ResourceAdaptationProcessorListener* adaptation_listener) { +void VideoStreamEncoder::AddRestrictionsListenerForTesting( + VideoSourceRestrictionsListener* restrictions_listener) { rtc::Event event; - resource_adaptation_queue_.PostTask([this, adaptation_listener, &event] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); + encoder_queue_.PostTask([this, restrictions_listener, &event] { + RTC_DCHECK_RUN_ON(&encoder_queue_); RTC_DCHECK(resource_adaptation_processor_); - resource_adaptation_processor_->AddAdaptationListener(adaptation_listener); + video_stream_adapter_->AddRestrictionsListener(restrictions_listener); event.Set(); }); event.Wait(rtc::Event::kForever); } -void VideoStreamEncoder::RemoveAdaptationListenerForTesting( - ResourceAdaptationProcessorListener* adaptation_listener) { +void VideoStreamEncoder::RemoveRestrictionsListenerForTesting( + VideoSourceRestrictionsListener* restrictions_listener) { rtc::Event event; - resource_adaptation_queue_.PostTask([this, adaptation_listener, &event] { - RTC_DCHECK_RUN_ON(&resource_adaptation_queue_); + encoder_queue_.PostTask([this, restrictions_listener, &event] { + RTC_DCHECK_RUN_ON(&encoder_queue_); RTC_DCHECK(resource_adaptation_processor_); - resource_adaptation_processor_->RemoveAdaptationListener( - adaptation_listener); + video_stream_adapter_->RemoveRestrictionsListener(restrictions_listener); event.Set(); }); event.Wait(rtc::Event::kForever); diff --git a/video/video_stream_encoder.h b/video/video_stream_encoder.h index 193aa1e8c9..9e70203661 100644 --- a/video/video_stream_encoder.h +++ b/video/video_stream_encoder.h @@ -17,6 +17,8 @@ #include #include +#include "api/adaptation/resource.h" +#include "api/sequence_checker.h" #include "api/units/data_rate.h" #include "api/video/video_bitrate_allocator.h" #include "api/video/video_rotation.h" @@ -26,24 +28,26 @@ #include "api/video/video_stream_encoder_settings.h" #include "api/video_codecs/video_codec.h" #include "api/video_codecs/video_encoder.h" +#include "call/adaptation/adaptation_constraint.h" +#include "call/adaptation/resource_adaptation_processor.h" #include "call/adaptation/resource_adaptation_processor_interface.h" #include "call/adaptation/video_source_restrictions.h" #include "call/adaptation/video_stream_input_state_provider.h" #include "modules/video_coding/utility/frame_dropper.h" -#include "rtc_base/critical_section.h" -#include "rtc_base/event.h" +#include "modules/video_coding/utility/qp_parser.h" #include "rtc_base/experiments/rate_control_settings.h" #include "rtc_base/numerics/exp_filter.h" #include "rtc_base/race_checker.h" #include "rtc_base/rate_statistics.h" -#include "rtc_base/synchronization/sequence_checker.h" #include "rtc_base/task_queue.h" -#include "rtc_base/thread_checker.h" +#include "rtc_base/task_utils/pending_task_safety_flag.h" +#include "rtc_base/thread_annotations.h" #include "system_wrappers/include/clock.h" #include "video/adaptation/video_stream_encoder_resource_manager.h" #include "video/encoder_bitrate_adjuster.h" #include "video/frame_encode_metadata_writer.h" #include "video/video_source_sink_controller.h" + namespace webrtc { // VideoStreamEncoder represent a video encoder that accepts raw video frames as @@ -56,16 +60,27 @@ namespace webrtc { // Call Stop() when done. class VideoStreamEncoder : public VideoStreamEncoderInterface, private EncodedImageCallback, - public ResourceAdaptationProcessorListener { + public VideoSourceRestrictionsListener { public: + // TODO(bugs.webrtc.org/12000): Reporting of VideoBitrateAllocation is being + // deprecated. Instead VideoLayersAllocation should be reported. + enum class BitrateAllocationCallbackType { + kVideoBitrateAllocation, + kVideoBitrateAllocationWhenScreenSharing, + kVideoLayersAllocation + }; VideoStreamEncoder(Clock* clock, uint32_t number_of_cores, VideoStreamEncoderObserver* encoder_stats_observer, const VideoStreamEncoderSettings& settings, std::unique_ptr overuse_detector, - TaskQueueFactory* task_queue_factory); + TaskQueueFactory* task_queue_factory, + BitrateAllocationCallbackType allocation_cb_type); ~VideoStreamEncoder() override; + void AddAdaptationResource(rtc::scoped_refptr resource) override; + std::vector> GetAdaptationResources() override; + void SetSource(rtc::VideoSourceInterface* source, const DegradationPreference& degradation_preference) override; @@ -74,9 +89,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, // TODO(perkj): Can we remove VideoCodec.startBitrate ? void SetStartBitrate(int start_bitrate_bps) override; - void SetBitrateAllocationObserver( - VideoBitrateAllocationObserver* bitrate_observer) override; - void SetFecControllerOverride( FecControllerOverride* fec_controller_override) override; @@ -106,28 +118,23 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, // Used for testing. For example the |ScalingObserverInterface| methods must // be called on |encoder_queue_|. rtc::TaskQueue* encoder_queue() { return &encoder_queue_; } - rtc::TaskQueue* resource_adaptation_queue() { - return &resource_adaptation_queue_; - } void OnVideoSourceRestrictionsUpdated( VideoSourceRestrictions restrictions, const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) override; + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) override; // Used for injected test resources. // TODO(eshr): Move all adaptation tests out of VideoStreamEncoder tests. void InjectAdaptationResource(rtc::scoped_refptr resource, - VideoAdaptationReason reason) - RTC_RUN_ON(&encoder_queue_); + VideoAdaptationReason reason); + void InjectAdaptationConstraint(AdaptationConstraint* adaptation_constraint); - rtc::scoped_refptr - quality_scaler_resource_for_testing(); - - void AddAdaptationListenerForTesting( - ResourceAdaptationProcessorListener* adaptation_listener); - void RemoveAdaptationListenerForTesting( - ResourceAdaptationProcessorListener* adaptation_listener); + void AddRestrictionsListenerForTesting( + VideoSourceRestrictionsListener* restrictions_listener); + void RemoveRestrictionsListenerForTesting( + VideoSourceRestrictionsListener* restrictions_listener); private: class VideoFrameInfo { @@ -161,6 +168,8 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, DataRate stable_encoder_target; }; + class DegradationPreferenceManager; + void ReconfigureEncoder() RTC_RUN_ON(&encoder_queue_); void OnEncoderSettingsChanged() RTC_RUN_ON(&encoder_queue_); @@ -173,15 +182,14 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, void EncodeVideoFrame(const VideoFrame& frame, int64_t time_when_posted_in_ms); - // Indicates wether frame should be dropped because the pixel count is too + // Indicates whether frame should be dropped because the pixel count is too // large for the current bitrate configuration. bool DropDueToSize(uint32_t pixel_count) const RTC_RUN_ON(&encoder_queue_); // Implements EncodedImageCallback. EncodedImageCallback::Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override; + const CodecSpecificInfo* codec_specific_info) override; void OnDroppedFrame(EncodedImageCallback::DropReason reason) override; @@ -190,9 +198,8 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, void TraceFrameDropEnd(); // Returns a copy of |rate_settings| with the |bitrate| field updated using - // the current VideoBitrateAllocator, and notifies any listeners of the new - // allocation. - EncoderRateSettings UpdateBitrateAllocationAndNotifyObserver( + // the current VideoBitrateAllocator. + EncoderRateSettings UpdateBitrateAllocation( const EncoderRateSettings& rate_settings) RTC_RUN_ON(&encoder_queue_); uint32_t GetInputFramerateFps() RTC_RUN_ON(&encoder_queue_); @@ -205,27 +212,32 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, DataSize frame_size); bool HasInternalSource() const RTC_RUN_ON(&encoder_queue_); void ReleaseEncoder() RTC_RUN_ON(&encoder_queue_); + // After calling this function |resource_adaptation_processor_| will be null. + void ShutdownResourceAdaptationQueue(); void CheckForAnimatedContent(const VideoFrame& frame, int64_t time_when_posted_in_ms) RTC_RUN_ON(&encoder_queue_); - rtc::Event shutdown_event_; + // TODO(bugs.webrtc.org/11341) : Remove this version of RequestEncoderSwitch. + void QueueRequestEncoderSwitch( + const EncoderSwitchRequestCallback::Config& conf) + RTC_RUN_ON(&encoder_queue_); + void QueueRequestEncoderSwitch(const webrtc::SdpVideoFormat& format) + RTC_RUN_ON(&encoder_queue_); - const uint32_t number_of_cores_; + TaskQueueBase* const main_queue_; - const bool quality_scaling_experiment_enabled_; + const uint32_t number_of_cores_; EncoderSink* sink_; const VideoStreamEncoderSettings settings_; + const BitrateAllocationCallbackType allocation_cb_type_; const RateControlSettings rate_control_settings_; std::unique_ptr const encoder_selector_; VideoStreamEncoderObserver* const encoder_stats_observer_; - // |thread_checker_| checks that public methods that are related to lifetime - // of VideoStreamEncoder are called on the same thread. - rtc::ThreadChecker thread_checker_; VideoEncoderConfig encoder_config_ RTC_GUARDED_BY(&encoder_queue_); std::unique_ptr encoder_ RTC_GUARDED_BY(&encoder_queue_) @@ -294,8 +306,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, kFirstFrameAfterResize // Resize observed. } expect_resize_state_ RTC_GUARDED_BY(&encoder_queue_); - VideoBitrateAllocationObserver* bitrate_observer_ - RTC_GUARDED_BY(&encoder_queue_); FecControllerOverride* fec_controller_override_ RTC_GUARDED_BY(&encoder_queue_); absl::optional last_parameters_update_ms_ @@ -303,8 +313,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, absl::optional last_encode_info_ms_ RTC_GUARDED_BY(&encoder_queue_); VideoEncoder::EncoderInfo encoder_info_ RTC_GUARDED_BY(&encoder_queue_); - absl::optional encoder_bitrate_limits_ - RTC_GUARDED_BY(&encoder_queue_); VideoEncoderFactory::CodecInfo codec_info_ RTC_GUARDED_BY(&encoder_queue_); VideoCodec send_codec_ RTC_GUARDED_BY(&encoder_queue_); @@ -341,38 +349,6 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, // experiment group numbers incremented by 1. const std::array experiment_groups_; - struct EncoderSwitchExperiment { - struct Thresholds { - absl::optional bitrate; - absl::optional pixel_count; - }; - - // Codec --> switching thresholds - std::map codec_thresholds; - - // To smooth out the target bitrate so that we don't trigger a switch - // too easily. - rtc::ExpFilter bitrate_filter{1.0}; - - // Codec/implementation to switch to - std::string to_codec; - absl::optional to_param; - absl::optional to_value; - - // Thresholds for the currently used codecs. - Thresholds current_thresholds; - - // Updates the |bitrate_filter|, so not const. - bool IsBitrateBelowThreshold(const DataRate& target_bitrate); - bool IsPixelCountBelowThreshold(int pixel_count) const; - void SetCodec(VideoCodecType codec); - }; - - EncoderSwitchExperiment ParseEncoderSwitchFieldTrial() const; - - EncoderSwitchExperiment encoder_switch_experiment_ - RTC_GUARDED_BY(&encoder_queue_); - struct AutomaticAnimationDetectionExperiment { bool enabled = false; int min_duration_ms = 2000; @@ -393,41 +369,53 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface, AutomaticAnimationDetectionExperiment automatic_animation_detection_experiment_ RTC_GUARDED_BY(&encoder_queue_); - // An encoder switch is only requested once, this variable is used to keep - // track of whether a request has been made or not. - bool encoder_switch_requested_ RTC_GUARDED_BY(&encoder_queue_); - - // Provies video stream input states: current resolution and frame rate. - // This class is thread-safe. + // Provides video stream input states: current resolution and frame rate. VideoStreamInputStateProvider input_state_provider_; + + std::unique_ptr video_stream_adapter_ + RTC_GUARDED_BY(&encoder_queue_); // Responsible for adapting input resolution or frame rate to ensure resources - // (e.g. CPU or bandwidth) are not overused. - // This class is single-threaded on the resource adaptation queue. + // (e.g. CPU or bandwidth) are not overused. Adding resources can occur on any + // thread. std::unique_ptr - resource_adaptation_processor_ - RTC_GUARDED_BY(&resource_adaptation_queue_); + resource_adaptation_processor_; + std::unique_ptr degradation_preference_manager_ + RTC_GUARDED_BY(&encoder_queue_); + std::vector adaptation_constraints_ + RTC_GUARDED_BY(&encoder_queue_); // Handles input, output and stats reporting related to VideoStreamEncoder // specific resources, such as "encode usage percent" measurements and "QP - // scaling". Also involved with various mitigations such as inital frame + // scaling". Also involved with various mitigations such as initial frame // dropping. // The manager primarily operates on the |encoder_queue_| but its lifetime is // tied to the VideoStreamEncoder (which is destroyed off the encoder queue) // and its resource list is accessible from any thread. - VideoStreamEncoderResourceManager stream_resource_manager_; + VideoStreamEncoderResourceManager stream_resource_manager_ + RTC_GUARDED_BY(&encoder_queue_); + std::vector> additional_resources_ + RTC_GUARDED_BY(&encoder_queue_); // Carries out the VideoSourceRestrictions provided by the // ResourceAdaptationProcessor, i.e. reconfigures the source of video frames // to provide us with different resolution or frame rate. // This class is thread-safe. - VideoSourceSinkController video_source_sink_controller_; + VideoSourceSinkController video_source_sink_controller_ + RTC_GUARDED_BY(main_queue_); + + // Default bitrate limits in EncoderInfoSettings allowed. + const bool default_limits_allowed_; + + // QP parser is used to extract QP value from encoded frame when that is not + // provided by encoder. + QpParser qp_parser_; + const bool qp_parsing_allowed_; // Public methods are proxied to the task queues. The queues must be destroyed // first to make sure no tasks run that use other members. - // TODO(https://crbug.com/webrtc/11172): Move ownership of the - // ResourceAdaptationProcessor and its task queue to Call when processors are - // multi-stream aware. - rtc::TaskQueue resource_adaptation_queue_; rtc::TaskQueue encoder_queue_; + // Used to cancel any potentially pending tasks to the main thread. + ScopedTaskSafety task_safety_; + RTC_DISALLOW_COPY_AND_ASSIGN(VideoStreamEncoder); }; diff --git a/video/video_stream_encoder_unittest.cc b/video/video_stream_encoder_unittest.cc index 9292933d89..cbfd93e9e2 100644 --- a/video/video_stream_encoder_unittest.cc +++ b/video/video_stream_encoder_unittest.cc @@ -13,39 +13,54 @@ #include #include #include +#include #include #include "absl/memory/memory.h" #include "api/task_queue/default_task_queue_factory.h" #include "api/test/mock_fec_controller_override.h" #include "api/test/mock_video_encoder.h" +#include "api/test/mock_video_encoder_factory.h" #include "api/video/builtin_video_bitrate_allocator_factory.h" #include "api/video/i420_buffer.h" +#include "api/video/nv12_buffer.h" #include "api/video/video_adaptation_reason.h" #include "api/video/video_bitrate_allocation.h" +#include "api/video_codecs/sdp_video_format.h" #include "api/video_codecs/video_encoder.h" #include "api/video_codecs/vp8_temporal_layers.h" #include "api/video_codecs/vp8_temporal_layers_factory.h" +#include "call/adaptation/test/fake_adaptation_constraint.h" #include "call/adaptation/test/fake_resource.h" #include "common_video/h264/h264_common.h" #include "common_video/include/video_frame_buffer.h" #include "media/base/video_adapter.h" +#include "media/engine/webrtc_video_engine.h" +#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h" +#include "modules/video_coding/codecs/h264/include/h264.h" +#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h" +#include "modules/video_coding/codecs/vp8/include/vp8.h" +#include "modules/video_coding/codecs/vp9/include/vp9.h" #include "modules/video_coding/codecs/vp9/include/vp9_globals.h" +#include "modules/video_coding/codecs/vp9/svc_config.h" #include "modules/video_coding/utility/quality_scaler.h" #include "modules/video_coding/utility/simulcast_rate_allocator.h" -#include "rtc_base/fake_clock.h" +#include "rtc_base/event.h" +#include "rtc_base/experiments/encoder_info_settings.h" #include "rtc_base/gunit.h" #include "rtc_base/logging.h" #include "rtc_base/ref_counted_object.h" +#include "rtc_base/synchronization/mutex.h" #include "system_wrappers/include/field_trial.h" #include "system_wrappers/include/metrics.h" -#include "system_wrappers/include/sleep.h" #include "test/encoder_settings.h" #include "test/fake_encoder.h" #include "test/field_trial.h" #include "test/frame_forwarder.h" #include "test/gmock.h" #include "test/gtest.h" +#include "test/mappable_native_buffer.h" +#include "test/time_controller/simulated_time_controller.h" #include "test/video_encoder_proxy_factory.h" #include "video/send_statistics_proxy.h" @@ -53,10 +68,17 @@ namespace webrtc { using ::testing::_; using ::testing::AllOf; +using ::testing::AtLeast; +using ::testing::Eq; using ::testing::Field; +using ::testing::Ge; +using ::testing::Gt; +using ::testing::Le; +using ::testing::Lt; using ::testing::Matcher; using ::testing::NiceMock; using ::testing::Return; +using ::testing::SizeIs; using ::testing::StrictMock; namespace { @@ -85,6 +107,11 @@ uint8_t optimal_sps[] = {0, 0, 0, 1, H264::NaluType::kSps, 0x05, 0x03, 0xC7, 0xE0, 0x1B, 0x41, 0x10, 0x8D, 0x00}; +const uint8_t kCodedFrameVp8Qp25[] = { + 0x10, 0x02, 0x00, 0x9d, 0x01, 0x2a, 0x10, 0x00, 0x10, 0x00, + 0x02, 0x47, 0x08, 0x85, 0x85, 0x88, 0x85, 0x84, 0x88, 0x0c, + 0x82, 0x00, 0x0c, 0x0d, 0x60, 0x00, 0xfe, 0xfc, 0x5c, 0xd0}; + class TestBuffer : public webrtc::I420Buffer { public: TestBuffer(rtc::Event* event, int width, int height) @@ -99,7 +126,8 @@ class TestBuffer : public webrtc::I420Buffer { rtc::Event* const event_; }; -// A fake native buffer that can't be converted to I420. +// A fake native buffer that can't be converted to I420. Upon scaling, it +// produces another FakeNativeBuffer. class FakeNativeBuffer : public webrtc::VideoFrameBuffer { public: FakeNativeBuffer(rtc::Event* event, int width, int height) @@ -110,6 +138,16 @@ class FakeNativeBuffer : public webrtc::VideoFrameBuffer { rtc::scoped_refptr ToI420() override { return nullptr; } + rtc::scoped_refptr CropAndScale( + int offset_x, + int offset_y, + int crop_width, + int crop_height, + int scaled_width, + int scaled_height) override { + return rtc::make_ref_counted(nullptr, scaled_width, + scaled_height); + } private: friend class rtc::RefCountedObject; @@ -122,6 +160,37 @@ class FakeNativeBuffer : public webrtc::VideoFrameBuffer { const int height_; }; +// A fake native buffer that is backed by an NV12 buffer. +class FakeNV12NativeBuffer : public webrtc::VideoFrameBuffer { + public: + FakeNV12NativeBuffer(rtc::Event* event, int width, int height) + : nv12_buffer_(NV12Buffer::Create(width, height)), event_(event) {} + + webrtc::VideoFrameBuffer::Type type() const override { return Type::kNative; } + int width() const override { return nv12_buffer_->width(); } + int height() const override { return nv12_buffer_->height(); } + rtc::scoped_refptr ToI420() override { + return nv12_buffer_->ToI420(); + } + rtc::scoped_refptr GetMappedFrameBuffer( + rtc::ArrayView types) override { + if (absl::c_find(types, Type::kNV12) != types.end()) { + return nv12_buffer_; + } + return nullptr; + } + const NV12BufferInterface* GetNV12() const { return nv12_buffer_; } + + private: + friend class rtc::RefCountedObject; + ~FakeNV12NativeBuffer() override { + if (event_) + event_->Set(); + } + rtc::scoped_refptr nv12_buffer_; + rtc::Event* const event_; +}; + class CpuOveruseDetectorProxy : public OveruseFrameDetector { public: explicit CpuOveruseDetectorProxy(CpuOveruseMetricsObserver* metrics_observer) @@ -132,14 +201,14 @@ class CpuOveruseDetectorProxy : public OveruseFrameDetector { virtual ~CpuOveruseDetectorProxy() {} void OnTargetFramerateUpdated(int framerate_fps) override { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); last_target_framerate_fps_ = framerate_fps; OveruseFrameDetector::OnTargetFramerateUpdated(framerate_fps); framerate_updated_event_.Set(); } int GetLastTargetFramerate() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); return last_target_framerate_fps_; } @@ -148,45 +217,17 @@ class CpuOveruseDetectorProxy : public OveruseFrameDetector { rtc::Event* framerate_updated_event() { return &framerate_updated_event_; } private: - rtc::CriticalSection lock_; + Mutex lock_; int last_target_framerate_fps_ RTC_GUARDED_BY(lock_); rtc::Event framerate_updated_event_; }; -class FakeQualityScalerQpUsageHandlerCallback - : public QualityScalerQpUsageHandlerCallbackInterface { - public: - FakeQualityScalerQpUsageHandlerCallback() - : QualityScalerQpUsageHandlerCallbackInterface(), - qp_usage_handled_event_(/*manual_reset=*/true, - /*initially_signaled=*/false), - clear_qp_samples_result_(absl::nullopt) {} - ~FakeQualityScalerQpUsageHandlerCallback() override { - RTC_DCHECK(clear_qp_samples_result_.has_value()); - } - - void OnQpUsageHandled(bool clear_qp_samples) override { - clear_qp_samples_result_ = clear_qp_samples; - qp_usage_handled_event_.Set(); - } - - bool WaitForQpUsageHandled() { return qp_usage_handled_event_.Wait(5000); } - - absl::optional clear_qp_samples_result() const { - return clear_qp_samples_result_; - } - - private: - rtc::Event qp_usage_handled_event_; - absl::optional clear_qp_samples_result_; -}; - -class VideoSourceRestrictionsUpdatedListener - : public ResourceAdaptationProcessorListener { +class FakeVideoSourceRestrictionsListener + : public VideoSourceRestrictionsListener { public: - VideoSourceRestrictionsUpdatedListener() + FakeVideoSourceRestrictionsListener() : was_restrictions_updated_(false), restrictions_updated_event_() {} - ~VideoSourceRestrictionsUpdatedListener() override { + ~FakeVideoSourceRestrictionsListener() override { RTC_DCHECK(was_restrictions_updated_); } @@ -194,11 +235,12 @@ class VideoSourceRestrictionsUpdatedListener return &restrictions_updated_event_; } - // ResourceAdaptationProcessorListener implementation. + // VideoSourceRestrictionsListener implementation. void OnVideoSourceRestrictionsUpdated( VideoSourceRestrictions restrictions, const VideoAdaptationCounters& adaptation_counters, - rtc::scoped_refptr reason) override { + rtc::scoped_refptr reason, + const VideoSourceRestrictions& unfiltered_restrictions) override { was_restrictions_updated_ = true; restrictions_updated_event_.Set(); } @@ -208,38 +250,131 @@ class VideoSourceRestrictionsUpdatedListener rtc::Event restrictions_updated_event_; }; +auto WantsFps(Matcher fps_matcher) { + return Field("max_framerate_fps", &rtc::VideoSinkWants::max_framerate_fps, + fps_matcher); +} + +auto WantsMaxPixels(Matcher max_pixel_matcher) { + return Field("max_pixel_count", &rtc::VideoSinkWants::max_pixel_count, + AllOf(max_pixel_matcher, Gt(0))); +} + +auto ResolutionMax() { + return AllOf( + WantsMaxPixels(Eq(std::numeric_limits::max())), + Field("target_pixel_count", &rtc::VideoSinkWants::target_pixel_count, + Eq(absl::nullopt))); +} + +auto FpsMax() { + return WantsFps(Eq(kDefaultFramerate)); +} + +auto FpsUnlimited() { + return WantsFps(Eq(std::numeric_limits::max())); +} + +auto FpsMatchesResolutionMax(Matcher fps_matcher) { + return AllOf(WantsFps(fps_matcher), ResolutionMax()); +} + +auto FpsMaxResolutionMatches(Matcher pixel_matcher) { + return AllOf(FpsMax(), WantsMaxPixels(pixel_matcher)); +} + +auto FpsMaxResolutionMax() { + return AllOf(FpsMax(), ResolutionMax()); +} + +auto UnlimitedSinkWants() { + return AllOf(FpsUnlimited(), ResolutionMax()); +} + +auto FpsInRangeForPixelsInBalanced(int last_frame_pixels) { + Matcher fps_range_matcher; + + if (last_frame_pixels <= 320 * 240) { + fps_range_matcher = AllOf(Ge(7), Le(10)); + } else if (last_frame_pixels <= 480 * 360) { + fps_range_matcher = AllOf(Ge(10), Le(15)); + } else if (last_frame_pixels <= 640 * 480) { + fps_range_matcher = Ge(15); + } else { + fps_range_matcher = Eq(kDefaultFramerate); + } + return Field("max_framerate_fps", &rtc::VideoSinkWants::max_framerate_fps, + fps_range_matcher); +} + +auto FpsEqResolutionEqTo(const rtc::VideoSinkWants& other_wants) { + return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)), + WantsMaxPixels(Eq(other_wants.max_pixel_count))); +} + +auto FpsMaxResolutionLt(const rtc::VideoSinkWants& other_wants) { + return AllOf(FpsMax(), WantsMaxPixels(Lt(other_wants.max_pixel_count))); +} + +auto FpsMaxResolutionGt(const rtc::VideoSinkWants& other_wants) { + return AllOf(FpsMax(), WantsMaxPixels(Gt(other_wants.max_pixel_count))); +} + +auto FpsLtResolutionEq(const rtc::VideoSinkWants& other_wants) { + return AllOf(WantsFps(Lt(other_wants.max_framerate_fps)), + WantsMaxPixels(Eq(other_wants.max_pixel_count))); +} + +auto FpsGtResolutionEq(const rtc::VideoSinkWants& other_wants) { + return AllOf(WantsFps(Gt(other_wants.max_framerate_fps)), + WantsMaxPixels(Eq(other_wants.max_pixel_count))); +} + +auto FpsEqResolutionLt(const rtc::VideoSinkWants& other_wants) { + return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)), + WantsMaxPixels(Lt(other_wants.max_pixel_count))); +} + +auto FpsEqResolutionGt(const rtc::VideoSinkWants& other_wants) { + return AllOf(WantsFps(Eq(other_wants.max_framerate_fps)), + WantsMaxPixels(Gt(other_wants.max_pixel_count))); +} + class VideoStreamEncoderUnderTest : public VideoStreamEncoder { public: - VideoStreamEncoderUnderTest(SendStatisticsProxy* stats_proxy, + VideoStreamEncoderUnderTest(TimeController* time_controller, + TaskQueueFactory* task_queue_factory, + SendStatisticsProxy* stats_proxy, const VideoStreamEncoderSettings& settings, - TaskQueueFactory* task_queue_factory) - : VideoStreamEncoder(Clock::GetRealTimeClock(), + VideoStreamEncoder::BitrateAllocationCallbackType + allocation_callback_type) + : VideoStreamEncoder(time_controller->GetClock(), 1 /* number_of_cores */, stats_proxy, settings, std::unique_ptr( overuse_detector_proxy_ = new CpuOveruseDetectorProxy(stats_proxy)), - task_queue_factory), - fake_cpu_resource_(new FakeResource("FakeResource[CPU]")), - fake_quality_resource_(new FakeResource("FakeResource[QP]")) { - fake_cpu_resource_->Initialize(encoder_queue(), - resource_adaptation_queue()); - fake_quality_resource_->Initialize(encoder_queue(), - resource_adaptation_queue()); + task_queue_factory, + allocation_callback_type), + time_controller_(time_controller), + fake_cpu_resource_(FakeResource::Create("FakeResource[CPU]")), + fake_quality_resource_(FakeResource::Create("FakeResource[QP]")), + fake_adaptation_constraint_("FakeAdaptationConstraint") { InjectAdaptationResource(fake_quality_resource_, VideoAdaptationReason::kQuality); InjectAdaptationResource(fake_cpu_resource_, VideoAdaptationReason::kCpu); + InjectAdaptationConstraint(&fake_adaptation_constraint_); } void SetSourceAndWaitForRestrictionsUpdated( rtc::VideoSourceInterface* source, const DegradationPreference& degradation_preference) { - VideoSourceRestrictionsUpdatedListener listener; - AddAdaptationListenerForTesting(&listener); + FakeVideoSourceRestrictionsListener listener; + AddRestrictionsListenerForTesting(&listener); SetSource(source, degradation_preference); listener.restrictions_updated_event()->Wait(5000); - RemoveAdaptationListenerForTesting(&listener); + RemoveRestrictionsListenerForTesting(&listener); } void SetSourceAndWaitForFramerateUpdated( @@ -261,15 +396,6 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder { fraction_lost, round_trip_time_ms, cwnd_reduce_ratio); // Bitrate is updated on the encoder queue. WaitUntilTaskQueueIsIdle(); - // Give the managed resources time to react to the new bitrate. - // TODO(hbos): Can we await an appropriate event instead? - WaitUntilAdaptationTaskQueueIsIdle(); - } - - void WaitUntilAdaptationTaskQueueIsIdle() { - rtc::Event event; - resource_adaptation_queue()->PostTask([&event] { event.Set(); }); - ASSERT_TRUE(event.Wait(5000)); } // This is used as a synchronisation mechanism, to make sure that the @@ -283,85 +409,49 @@ class VideoStreamEncoderUnderTest : public VideoStreamEncoder { // Triggers resource usage measurements on the fake CPU resource. void TriggerCpuOveruse() { rtc::Event event; - resource_adaptation_queue()->PostTask([this, &event] { - fake_cpu_resource_->set_usage_state(ResourceUsageState::kOveruse); + encoder_queue()->PostTask([this, &event] { + fake_cpu_resource_->SetUsageState(ResourceUsageState::kOveruse); event.Set(); }); ASSERT_TRUE(event.Wait(5000)); + time_controller_->AdvanceTime(TimeDelta::Millis(0)); } + void TriggerCpuUnderuse() { rtc::Event event; - resource_adaptation_queue()->PostTask([this, &event] { - fake_cpu_resource_->set_usage_state(ResourceUsageState::kUnderuse); + encoder_queue()->PostTask([this, &event] { + fake_cpu_resource_->SetUsageState(ResourceUsageState::kUnderuse); event.Set(); }); ASSERT_TRUE(event.Wait(5000)); + time_controller_->AdvanceTime(TimeDelta::Millis(0)); } // Triggers resource usage measurements on the fake quality resource. void TriggerQualityLow() { rtc::Event event; - resource_adaptation_queue()->PostTask([this, &event] { - fake_quality_resource_->set_usage_state(ResourceUsageState::kOveruse); + encoder_queue()->PostTask([this, &event] { + fake_quality_resource_->SetUsageState(ResourceUsageState::kOveruse); event.Set(); }); ASSERT_TRUE(event.Wait(5000)); + time_controller_->AdvanceTime(TimeDelta::Millis(0)); } void TriggerQualityHigh() { rtc::Event event; - resource_adaptation_queue()->PostTask([this, &event] { - fake_quality_resource_->set_usage_state(ResourceUsageState::kUnderuse); + encoder_queue()->PostTask([this, &event] { + fake_quality_resource_->SetUsageState(ResourceUsageState::kUnderuse); event.Set(); }); ASSERT_TRUE(event.Wait(5000)); + time_controller_->AdvanceTime(TimeDelta::Millis(0)); } - // Fakes high QP resource usage measurements on the real - // QualityScalerResource. Returns whether or not QP samples would have been - // cleared if this had been a real signal from the QualityScaler. - bool TriggerQualityScalerHighQpAndReturnIfQpSamplesShouldBeCleared() { - rtc::scoped_refptr callback = - new FakeQualityScalerQpUsageHandlerCallback(); - encoder_queue()->PostTask([this, callback] { - // This will cause a "ping" between adaptation task queue and encoder - // queue. When we have the result, the |callback| will be notified. - quality_scaler_resource_for_testing()->OnReportQpUsageHigh(callback); - }); - EXPECT_TRUE(callback->WaitForQpUsageHandled()); - EXPECT_TRUE(callback->clear_qp_samples_result().has_value()); - return callback->clear_qp_samples_result().value(); - } - + TimeController* const time_controller_; CpuOveruseDetectorProxy* overuse_detector_proxy_; rtc::scoped_refptr fake_cpu_resource_; rtc::scoped_refptr fake_quality_resource_; -}; - -class VideoStreamFactory - : public VideoEncoderConfig::VideoStreamFactoryInterface { - public: - explicit VideoStreamFactory(size_t num_temporal_layers, int framerate) - : num_temporal_layers_(num_temporal_layers), framerate_(framerate) { - EXPECT_GT(num_temporal_layers, 0u); - EXPECT_GT(framerate, 0); - } - - private: - std::vector CreateEncoderStreams( - int width, - int height, - const VideoEncoderConfig& encoder_config) override { - std::vector streams = - test::CreateVideoStreams(width, height, encoder_config); - for (VideoStream& stream : streams) { - stream.num_temporal_layers = num_temporal_layers_; - stream.max_framerate = framerate_; - } - return streams; - } - - const size_t num_temporal_layers_; - const int framerate_; + FakeAdaptationConstraint fake_adaptation_constraint_; }; // Simulates simulcast behavior and makes highest stream resolutions divisible @@ -369,11 +459,7 @@ class VideoStreamFactory class CroppingVideoStreamFactory : public VideoEncoderConfig::VideoStreamFactoryInterface { public: - explicit CroppingVideoStreamFactory(size_t num_temporal_layers, int framerate) - : num_temporal_layers_(num_temporal_layers), framerate_(framerate) { - EXPECT_GT(num_temporal_layers, 0u); - EXPECT_GT(framerate, 0); - } + CroppingVideoStreamFactory() {} private: std::vector CreateEncoderStreams( @@ -382,34 +468,32 @@ class CroppingVideoStreamFactory const VideoEncoderConfig& encoder_config) override { std::vector streams = test::CreateVideoStreams( width - width % 4, height - height % 4, encoder_config); - for (VideoStream& stream : streams) { - stream.num_temporal_layers = num_temporal_layers_; - stream.max_framerate = framerate_; - } return streams; } - - const size_t num_temporal_layers_; - const int framerate_; }; class AdaptingFrameForwarder : public test::FrameForwarder { public: - AdaptingFrameForwarder() : adaptation_enabled_(false) {} + explicit AdaptingFrameForwarder(TimeController* time_controller) + : time_controller_(time_controller), adaptation_enabled_(false) {} ~AdaptingFrameForwarder() override {} void set_adaptation_enabled(bool enabled) { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); adaptation_enabled_ = enabled; } bool adaption_enabled() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return adaptation_enabled_; } + // The "last wants" is a snapshot of the previous rtc::VideoSinkWants where + // the resolution or frame rate was different than it is currently. If + // something else is modified, such as encoder resolutions, but the resolution + // and frame rate stays the same, last wants is not updated. rtc::VideoSinkWants last_wants() const { - rtc::CritScope cs(&crit_); + MutexLock lock(&mutex_); return last_wants_; } @@ -417,24 +501,29 @@ class AdaptingFrameForwarder : public test::FrameForwarder { absl::optional last_sent_height() const { return last_height_; } void IncomingCapturedFrame(const VideoFrame& video_frame) override { + RTC_DCHECK(time_controller_->GetMainThread()->IsCurrent()); + time_controller_->AdvanceTime(TimeDelta::Millis(0)); + int cropped_width = 0; int cropped_height = 0; int out_width = 0; int out_height = 0; if (adaption_enabled()) { + RTC_DLOG(INFO) << "IncomingCapturedFrame: AdaptFrameResolution()" + << "w=" << video_frame.width() + << "h=" << video_frame.height(); if (adapter_.AdaptFrameResolution( video_frame.width(), video_frame.height(), video_frame.timestamp_us() * 1000, &cropped_width, &cropped_height, &out_width, &out_height)) { VideoFrame adapted_frame = VideoFrame::Builder() - .set_video_frame_buffer(new rtc::RefCountedObject( + .set_video_frame_buffer(rtc::make_ref_counted( nullptr, out_width, out_height)) - .set_timestamp_rtp(99) + .set_ntp_time_ms(video_frame.ntp_time_ms()) .set_timestamp_ms(99) .set_rotation(kVideoRotation_0) .build(); - adapted_frame.set_ntp_time_ms(video_frame.ntp_time_ms()); if (video_frame.has_update_rect()) { adapted_frame.set_update_rect( video_frame.update_rect().ScaleWithFrame( @@ -450,22 +539,41 @@ class AdaptingFrameForwarder : public test::FrameForwarder { last_height_ = absl::nullopt; } } else { + RTC_DLOG(INFO) << "IncomingCapturedFrame: adaptation not enabled"; test::FrameForwarder::IncomingCapturedFrame(video_frame); last_width_.emplace(video_frame.width()); last_height_.emplace(video_frame.height()); } } + void OnOutputFormatRequest(int width, int height) { + absl::optional> target_aspect_ratio = + std::make_pair(width, height); + absl::optional max_pixel_count = width * height; + absl::optional max_fps; + adapter_.OnOutputFormatRequest(target_aspect_ratio, max_pixel_count, + max_fps); + } + void AddOrUpdateSink(rtc::VideoSinkInterface* sink, const rtc::VideoSinkWants& wants) override { - rtc::CritScope cs(&crit_); - last_wants_ = sink_wants(); + MutexLock lock(&mutex_); + rtc::VideoSinkWants prev_wants = sink_wants_locked(); + bool did_adapt = + prev_wants.max_pixel_count != wants.max_pixel_count || + prev_wants.target_pixel_count != wants.target_pixel_count || + prev_wants.max_framerate_fps != wants.max_framerate_fps; + if (did_adapt) { + last_wants_ = prev_wants; + } adapter_.OnSinkWants(wants); - test::FrameForwarder::AddOrUpdateSink(sink, wants); + test::FrameForwarder::AddOrUpdateSinkLocked(sink, wants); } + + TimeController* const time_controller_; cricket::VideoAdapter adapter_; - bool adaptation_enabled_ RTC_GUARDED_BY(crit_); - rtc::VideoSinkWants last_wants_ RTC_GUARDED_BY(crit_); + bool adaptation_enabled_ RTC_GUARDED_BY(mutex_); + rtc::VideoSinkWants last_wants_ RTC_GUARDED_BY(mutex_); absl::optional last_width_; absl::optional last_height_; }; @@ -479,39 +587,42 @@ class MockableSendStatisticsProxy : public SendStatisticsProxy { : SendStatisticsProxy(clock, config, content_type) {} VideoSendStream::Stats GetStats() override { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mock_stats_) return *mock_stats_; return SendStatisticsProxy::GetStats(); } int GetInputFrameRate() const override { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); if (mock_stats_) return mock_stats_->input_frame_rate; return SendStatisticsProxy::GetInputFrameRate(); } void SetMockStats(const VideoSendStream::Stats& stats) { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); mock_stats_.emplace(stats); } void ResetMockStats() { - rtc::CritScope cs(&lock_); + MutexLock lock(&lock_); mock_stats_.reset(); } + void SetDroppedFrameCallback(std::function callback) { + on_frame_dropped_ = std::move(callback); + } + private: - rtc::CriticalSection lock_; - absl::optional mock_stats_ RTC_GUARDED_BY(lock_); -}; + void OnFrameDropped(DropReason reason) override { + SendStatisticsProxy::OnFrameDropped(reason); + if (on_frame_dropped_) + on_frame_dropped_(reason); + } -class MockBitrateObserver : public VideoBitrateAllocationObserver { - public: - MOCK_METHOD(void, - OnBitrateAllocationUpdated, - (const VideoBitrateAllocation&), - (override)); + mutable Mutex lock_; + absl::optional mock_stats_ RTC_GUARDED_BY(lock_); + std::function on_frame_dropped_; }; class MockEncoderSelector @@ -532,21 +643,20 @@ class MockEncoderSelector class VideoStreamEncoderTest : public ::testing::Test { public: - static const int kDefaultTimeoutMs = 30 * 1000; + static const int kDefaultTimeoutMs = 1000; VideoStreamEncoderTest() : video_send_config_(VideoSendStream::Config(nullptr)), codec_width_(320), codec_height_(240), max_framerate_(kDefaultFramerate), - task_queue_factory_(CreateDefaultTaskQueueFactory()), - fake_encoder_(), + fake_encoder_(&time_controller_), encoder_factory_(&fake_encoder_), stats_proxy_(new MockableSendStatisticsProxy( - Clock::GetRealTimeClock(), + time_controller_.GetClock(), video_send_config_, webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo)), - sink_(&fake_encoder_) {} + sink_(&time_controller_, &fake_encoder_) {} void SetUp() override { metrics::Reset(); @@ -559,26 +669,25 @@ class VideoStreamEncoderTest : public ::testing::Test { VideoEncoderConfig video_encoder_config; test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); - video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(1, max_framerate_); + EXPECT_EQ(1u, video_encoder_config.simulcast_layers.size()); + video_encoder_config.simulcast_layers[0].num_temporal_layers = 1; + video_encoder_config.simulcast_layers[0].max_framerate = max_framerate_; video_encoder_config_ = video_encoder_config.Copy(); - // Framerate limit is specified by the VideoStreamFactory. - std::vector streams = - video_encoder_config.video_stream_factory->CreateEncoderStreams( - codec_width_, codec_height_, video_encoder_config); - max_framerate_ = streams[0].max_framerate; - fake_clock_.SetTime(Timestamp::Micros(1234)); - ConfigureEncoder(std::move(video_encoder_config)); } - void ConfigureEncoder(VideoEncoderConfig video_encoder_config) { + void ConfigureEncoder( + VideoEncoderConfig video_encoder_config, + VideoStreamEncoder::BitrateAllocationCallbackType + allocation_callback_type = + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocationWhenScreenSharing) { if (video_stream_encoder_) video_stream_encoder_->Stop(); video_stream_encoder_.reset(new VideoStreamEncoderUnderTest( - stats_proxy_.get(), video_send_config_.encoder_settings, - task_queue_factory_.get())); + &time_controller_, GetTaskQueueFactory(), stats_proxy_.get(), + video_send_config_.encoder_settings, allocation_callback_type)); video_stream_encoder_->SetSink(&sink_, false /* rotation_applied */); video_stream_encoder_->SetSource( &video_source_, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); @@ -592,88 +701,104 @@ class VideoStreamEncoderTest : public ::testing::Test { size_t num_streams, size_t num_temporal_layers, unsigned char num_spatial_layers, - bool screenshare) { + bool screenshare, + VideoStreamEncoder::BitrateAllocationCallbackType + allocation_callback_type = + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocationWhenScreenSharing) { video_send_config_.rtp.payload_name = payload_name; VideoEncoderConfig video_encoder_config; - video_encoder_config.codec_type = PayloadStringToCodecType(payload_name); - video_encoder_config.number_of_streams = num_streams; + test::FillEncoderConfiguration(PayloadStringToCodecType(payload_name), + num_streams, &video_encoder_config); + for (auto& layer : video_encoder_config.simulcast_layers) { + layer.num_temporal_layers = num_temporal_layers; + layer.max_framerate = kDefaultFramerate; + } video_encoder_config.max_bitrate_bps = num_streams == 1 ? kTargetBitrateBps : kSimulcastTargetBitrateBps; - video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(num_temporal_layers, - kDefaultFramerate); video_encoder_config.content_type = screenshare ? VideoEncoderConfig::ContentType::kScreen : VideoEncoderConfig::ContentType::kRealtimeVideo; if (payload_name == "VP9") { VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); vp9_settings.numberOfSpatialLayers = num_spatial_layers; + vp9_settings.automaticResizeOn = num_spatial_layers <= 1; video_encoder_config.encoder_specific_settings = - new rtc::RefCountedObject< - VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings); + rtc::make_ref_counted( + vp9_settings); } - ConfigureEncoder(std::move(video_encoder_config)); + ConfigureEncoder(std::move(video_encoder_config), allocation_callback_type); } VideoFrame CreateFrame(int64_t ntp_time_ms, rtc::Event* destruction_event) const { - VideoFrame frame = - VideoFrame::Builder() - .set_video_frame_buffer(new rtc::RefCountedObject( - destruction_event, codec_width_, codec_height_)) - .set_timestamp_rtp(99) - .set_timestamp_ms(99) - .set_rotation(kVideoRotation_0) - .build(); - frame.set_ntp_time_ms(ntp_time_ms); - return frame; + return VideoFrame::Builder() + .set_video_frame_buffer(rtc::make_ref_counted( + destruction_event, codec_width_, codec_height_)) + .set_ntp_time_ms(ntp_time_ms) + .set_timestamp_ms(99) + .set_rotation(kVideoRotation_0) + .build(); } VideoFrame CreateFrameWithUpdatedPixel(int64_t ntp_time_ms, rtc::Event* destruction_event, int offset_x) const { - VideoFrame frame = - VideoFrame::Builder() - .set_video_frame_buffer(new rtc::RefCountedObject( - destruction_event, codec_width_, codec_height_)) - .set_timestamp_rtp(99) - .set_timestamp_ms(99) - .set_rotation(kVideoRotation_0) - .set_update_rect(VideoFrame::UpdateRect{offset_x, 0, 1, 1}) - .build(); - frame.set_ntp_time_ms(ntp_time_ms); - return frame; + return VideoFrame::Builder() + .set_video_frame_buffer(rtc::make_ref_counted( + destruction_event, codec_width_, codec_height_)) + .set_ntp_time_ms(ntp_time_ms) + .set_timestamp_ms(99) + .set_rotation(kVideoRotation_0) + .set_update_rect(VideoFrame::UpdateRect{offset_x, 0, 1, 1}) + .build(); } VideoFrame CreateFrame(int64_t ntp_time_ms, int width, int height) const { - VideoFrame frame = - VideoFrame::Builder() - .set_video_frame_buffer( - new rtc::RefCountedObject(nullptr, width, height)) - .set_timestamp_rtp(99) - .set_timestamp_ms(99) - .set_rotation(kVideoRotation_0) - .build(); - frame.set_ntp_time_ms(ntp_time_ms); - frame.set_timestamp_us(ntp_time_ms * 1000); - return frame; + auto buffer = rtc::make_ref_counted(nullptr, width, height); + I420Buffer::SetBlack(buffer.get()); + return VideoFrame::Builder() + .set_video_frame_buffer(std::move(buffer)) + .set_ntp_time_ms(ntp_time_ms) + .set_timestamp_ms(ntp_time_ms) + .set_rotation(kVideoRotation_0) + .build(); + } + + VideoFrame CreateNV12Frame(int64_t ntp_time_ms, int width, int height) const { + return VideoFrame::Builder() + .set_video_frame_buffer(NV12Buffer::Create(width, height)) + .set_ntp_time_ms(ntp_time_ms) + .set_timestamp_ms(ntp_time_ms) + .set_rotation(kVideoRotation_0) + .build(); } VideoFrame CreateFakeNativeFrame(int64_t ntp_time_ms, rtc::Event* destruction_event, int width, int height) const { - VideoFrame frame = - VideoFrame::Builder() - .set_video_frame_buffer(new rtc::RefCountedObject( - destruction_event, width, height)) - .set_timestamp_rtp(99) - .set_timestamp_ms(99) - .set_rotation(kVideoRotation_0) - .build(); - frame.set_ntp_time_ms(ntp_time_ms); - return frame; + return VideoFrame::Builder() + .set_video_frame_buffer(rtc::make_ref_counted( + destruction_event, width, height)) + .set_ntp_time_ms(ntp_time_ms) + .set_timestamp_ms(99) + .set_rotation(kVideoRotation_0) + .build(); + } + + VideoFrame CreateFakeNV12NativeFrame(int64_t ntp_time_ms, + rtc::Event* destruction_event, + int width, + int height) const { + return VideoFrame::Builder() + .set_video_frame_buffer(rtc::make_ref_counted( + destruction_event, width, height)) + .set_ntp_time_ms(ntp_time_ms) + .set_timestamp_ms(99) + .set_rotation(kVideoRotation_0) + .build(); } VideoFrame CreateFakeNativeFrame(int64_t ntp_time_ms, @@ -683,11 +808,6 @@ class VideoStreamEncoderTest : public ::testing::Test { } void VerifyAllocatedBitrate(const VideoBitrateAllocation& expected_bitrate) { - MockBitrateObserver bitrate_observer; - video_stream_encoder_->SetBitrateAllocationObserver(&bitrate_observer); - - EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate)) - .Times(1); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), @@ -696,152 +816,57 @@ class VideoStreamEncoderTest : public ::testing::Test { video_source_.IncomingCapturedFrame( CreateFrame(1, codec_width_, codec_height_)); WaitForEncodedFrame(1); - } - - void VerifyNoLimitation(const rtc::VideoSinkWants& wants) { - EXPECT_EQ(std::numeric_limits::max(), wants.max_framerate_fps); - EXPECT_EQ(std::numeric_limits::max(), wants.max_pixel_count); - EXPECT_FALSE(wants.target_pixel_count); - } - - void VerifyFpsEqResolutionEq(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps); - EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count); - } - - void VerifyFpsMaxResolutionMax(const rtc::VideoSinkWants& wants) { - EXPECT_EQ(kDefaultFramerate, wants.max_framerate_fps); - EXPECT_EQ(std::numeric_limits::max(), wants.max_pixel_count); - EXPECT_FALSE(wants.target_pixel_count); - } - - void VerifyFpsMaxResolutionLt(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_EQ(kDefaultFramerate, wants1.max_framerate_fps); - EXPECT_LT(wants1.max_pixel_count, wants2.max_pixel_count); - EXPECT_GT(wants1.max_pixel_count, 0); - } - - void VerifyFpsMaxResolutionGt(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_EQ(kDefaultFramerate, wants1.max_framerate_fps); - EXPECT_GT(wants1.max_pixel_count, wants2.max_pixel_count); - } - - void VerifyFpsMaxResolutionEq(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_EQ(kDefaultFramerate, wants1.max_framerate_fps); - EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count); - } - - void VerifyFpsLtResolutionEq(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_LT(wants1.max_framerate_fps, wants2.max_framerate_fps); - EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count); - } - - void VerifyFpsGtResolutionEq(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_GT(wants1.max_framerate_fps, wants2.max_framerate_fps); - EXPECT_EQ(wants1.max_pixel_count, wants2.max_pixel_count); - } - - void VerifyFpsEqResolutionLt(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps); - EXPECT_LT(wants1.max_pixel_count, wants2.max_pixel_count); - EXPECT_GT(wants1.max_pixel_count, 0); - } - - void VerifyFpsEqResolutionGt(const rtc::VideoSinkWants& wants1, - const rtc::VideoSinkWants& wants2) { - EXPECT_EQ(wants1.max_framerate_fps, wants2.max_framerate_fps); - EXPECT_GT(wants1.max_pixel_count, wants2.max_pixel_count); - } - - void VerifyFpsMaxResolutionLt(const rtc::VideoSinkWants& wants, - int pixel_count) { - EXPECT_EQ(kDefaultFramerate, wants.max_framerate_fps); - EXPECT_LT(wants.max_pixel_count, pixel_count); - EXPECT_GT(wants.max_pixel_count, 0); - } - - void VerifyFpsLtResolutionMax(const rtc::VideoSinkWants& wants, int fps) { - EXPECT_LT(wants.max_framerate_fps, fps); - EXPECT_EQ(std::numeric_limits::max(), wants.max_pixel_count); - EXPECT_FALSE(wants.target_pixel_count); - } - - void VerifyFpsEqResolutionMax(const rtc::VideoSinkWants& wants, - int expected_fps) { - EXPECT_EQ(expected_fps, wants.max_framerate_fps); - EXPECT_EQ(std::numeric_limits::max(), wants.max_pixel_count); - EXPECT_FALSE(wants.target_pixel_count); - } - - void VerifyBalancedModeFpsRange(const rtc::VideoSinkWants& wants, - int last_frame_pixels) { - // Balanced mode should always scale FPS to the desired range before - // attempting to scale resolution. - int fps_limit = wants.max_framerate_fps; - if (last_frame_pixels <= 320 * 240) { - EXPECT_LE(7, fps_limit); - EXPECT_LE(fps_limit, 10); - } else if (last_frame_pixels <= 480 * 270) { - EXPECT_LE(10, fps_limit); - EXPECT_LE(fps_limit, 15); - } else if (last_frame_pixels <= 640 * 480) { - EXPECT_LE(15, fps_limit); - } else { - EXPECT_EQ(kDefaultFramerate, fps_limit); - } + EXPECT_EQ(expected_bitrate, sink_.GetLastVideoBitrateAllocation()); } void WaitForEncodedFrame(int64_t expected_ntp_time) { sink_.WaitForEncodedFrame(expected_ntp_time); - fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); + AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); } bool TimedWaitForEncodedFrame(int64_t expected_ntp_time, int64_t timeout_ms) { bool ok = sink_.TimedWaitForEncodedFrame(expected_ntp_time, timeout_ms); - fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); + AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); return ok; } void WaitForEncodedFrame(uint32_t expected_width, uint32_t expected_height) { sink_.WaitForEncodedFrame(expected_width, expected_height); - fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); + AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); } void ExpectDroppedFrame() { sink_.ExpectDroppedFrame(); - fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); + AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); } bool WaitForFrame(int64_t timeout_ms) { bool ok = sink_.WaitForFrame(timeout_ms); - fake_clock_.AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); + AdvanceTime(TimeDelta::Seconds(1) / max_framerate_); return ok; } class TestEncoder : public test::FakeEncoder { public: - TestEncoder() : FakeEncoder(Clock::GetRealTimeClock()) {} + explicit TestEncoder(TimeController* time_controller) + : FakeEncoder(time_controller->GetClock()), + time_controller_(time_controller) { + RTC_DCHECK(time_controller_); + } VideoCodec codec_config() const { - rtc::CritScope lock(&crit_sect_); + MutexLock lock(&mutex_); return config_; } void BlockNextEncode() { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); block_next_encode_ = true; } VideoEncoder::EncoderInfo GetEncoderInfo() const override { - rtc::CritScope lock(&local_crit_sect_); - EncoderInfo info; + MutexLock lock(&local_mutex_); + EncoderInfo info = FakeEncoder::GetEncoderInfo(); if (initialized_ == EncoderState::kInitialized) { if (quality_scaling_) { info.scaling_settings = VideoEncoder::ScalingSettings( @@ -850,20 +875,25 @@ class VideoStreamEncoderTest : public ::testing::Test { info.is_hardware_accelerated = is_hardware_accelerated_; for (int i = 0; i < kMaxSpatialLayers; ++i) { if (temporal_layers_supported_[i]) { + info.fps_allocation[i].clear(); int num_layers = temporal_layers_supported_[i].value() ? 2 : 1; - info.fps_allocation[i].resize(num_layers); + for (int tid = 0; tid < num_layers; ++tid) + info.fps_allocation[i].push_back(255 / (num_layers - tid)); } } } info.resolution_bitrate_limits = resolution_bitrate_limits_; info.requested_resolution_alignment = requested_resolution_alignment_; + info.apply_alignment_to_all_simulcast_layers = + apply_alignment_to_all_simulcast_layers_; + info.preferred_pixel_formats = preferred_pixel_formats_; return info; } int32_t RegisterEncodeCompleteCallback( EncodedImageCallback* callback) override { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); encoded_image_callback_ = callback; return FakeEncoder::RegisterEncodeCompleteCallback(callback); } @@ -872,60 +902,65 @@ class VideoStreamEncoderTest : public ::testing::Test { void CheckLastTimeStampsMatch(int64_t ntp_time_ms, uint32_t timestamp) const { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); EXPECT_EQ(timestamp_, timestamp); EXPECT_EQ(ntp_time_ms_, ntp_time_ms); } void SetQualityScaling(bool b) { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); quality_scaling_ = b; } void SetRequestedResolutionAlignment(int requested_resolution_alignment) { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); requested_resolution_alignment_ = requested_resolution_alignment; } + void SetApplyAlignmentToAllSimulcastLayers(bool b) { + MutexLock lock(&local_mutex_); + apply_alignment_to_all_simulcast_layers_ = b; + } + void SetIsHardwareAccelerated(bool is_hardware_accelerated) { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); is_hardware_accelerated_ = is_hardware_accelerated; } void SetTemporalLayersSupported(size_t spatial_idx, bool supported) { RTC_DCHECK_LT(spatial_idx, kMaxSpatialLayers); - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); temporal_layers_supported_[spatial_idx] = supported; } void SetResolutionBitrateLimits( std::vector thresholds) { - rtc::CritScope cs(&local_crit_sect_); + MutexLock lock(&local_mutex_); resolution_bitrate_limits_ = thresholds; } void ForceInitEncodeFailure(bool force_failure) { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); force_init_encode_failed_ = force_failure; } void SimulateOvershoot(double rate_factor) { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); rate_factor_ = rate_factor; } uint32_t GetLastFramerate() const { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); return last_framerate_; } VideoFrame::UpdateRect GetLastUpdateRect() const { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); return last_update_rect_; } const std::vector& LastFrameTypes() const { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); return last_frame_types_; } @@ -934,27 +969,26 @@ class VideoStreamEncoderTest : public ::testing::Test { keyframe ? VideoFrameType::kVideoFrameKey : VideoFrameType::kVideoFrameDelta}; { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); last_frame_types_ = frame_type; } FakeEncoder::Encode(input_image, &frame_type); } - void InjectEncodedImage(const EncodedImage& image) { - rtc::CritScope lock(&local_crit_sect_); - encoded_image_callback_->OnEncodedImage(image, nullptr, nullptr); + void InjectEncodedImage(const EncodedImage& image, + const CodecSpecificInfo* codec_specific_info) { + MutexLock lock(&local_mutex_); + encoded_image_callback_->OnEncodedImage(image, codec_specific_info); } - void InjectEncodedImage(const EncodedImage& image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) { - rtc::CritScope lock(&local_crit_sect_); - encoded_image_callback_->OnEncodedImage(image, codec_specific_info, - fragmentation); + void SetEncodedImageData( + rtc::scoped_refptr encoded_image_data) { + MutexLock lock(&local_mutex_); + encoded_image_data_ = encoded_image_data; } void ExpectNullFrame() { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); expect_null_frame_ = true; } @@ -965,22 +999,49 @@ class VideoStreamEncoderTest : public ::testing::Test { return settings; } + int GetLastInputWidth() const { + MutexLock lock(&local_mutex_); + return last_input_width_; + } + + int GetLastInputHeight() const { + MutexLock lock(&local_mutex_); + return last_input_height_; + } + + absl::optional GetLastInputPixelFormat() { + MutexLock lock(&local_mutex_); + return last_input_pixel_format_; + } + int GetNumEncoderInitializations() const { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); return num_encoder_initializations_; } int GetNumSetRates() const { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); return num_set_rates_; } + VideoCodec video_codec() const { + MutexLock lock(&local_mutex_); + return video_codec_; + } + + void SetPreferredPixelFormats( + absl::InlinedVector + pixel_formats) { + MutexLock lock(&local_mutex_); + preferred_pixel_formats_ = std::move(pixel_formats); + } + private: int32_t Encode(const VideoFrame& input_image, const std::vector* frame_types) override { bool block_encode; { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); if (expect_null_frame_) { EXPECT_EQ(input_image.timestamp(), 0u); EXPECT_EQ(input_image.width(), 1); @@ -1000,21 +1061,39 @@ class VideoStreamEncoderTest : public ::testing::Test { block_next_encode_ = false; last_update_rect_ = input_image.update_rect(); last_frame_types_ = *frame_types; + last_input_pixel_format_ = input_image.video_frame_buffer()->type(); } int32_t result = FakeEncoder::Encode(input_image, frame_types); if (block_encode) EXPECT_TRUE(continue_encode_event_.Wait(kDefaultTimeoutMs)); + return result; } + CodecSpecificInfo EncodeHook( + EncodedImage& encoded_image, + rtc::scoped_refptr buffer) override { + CodecSpecificInfo codec_specific; + { + MutexLock lock(&mutex_); + codec_specific.codecType = config_.codecType; + } + MutexLock lock(&local_mutex_); + if (encoded_image_data_) { + encoded_image.SetEncodedData(encoded_image_data_); + } + return codec_specific; + } + int32_t InitEncode(const VideoCodec* config, const Settings& settings) override { int res = FakeEncoder::InitEncode(config, settings); - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); EXPECT_EQ(initialized_, EncoderState::kUninitialized); ++num_encoder_initializations_; + video_codec_ = *config; if (config->codecType == kVideoCodecVP8) { // Simulate setting up temporal layers, in order to validate the life @@ -1033,14 +1112,14 @@ class VideoStreamEncoderTest : public ::testing::Test { } int32_t Release() override { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); EXPECT_NE(initialized_, EncoderState::kUninitialized); initialized_ = EncoderState::kUninitialized; return FakeEncoder::Release(); } void SetRates(const RateControlParameters& parameters) { - rtc::CritScope lock(&local_crit_sect_); + MutexLock lock(&local_mutex_); num_set_rates_++; VideoBitrateAllocation adjusted_rate_allocation; for (size_t si = 0; si < kMaxSpatialLayers; ++si) { @@ -1060,49 +1139,60 @@ class VideoStreamEncoderTest : public ::testing::Test { FakeEncoder::SetRates(adjusted_paramters); } - rtc::CriticalSection local_crit_sect_; + TimeController* const time_controller_; + mutable Mutex local_mutex_; enum class EncoderState { kUninitialized, kInitializationFailed, kInitialized - } initialized_ RTC_GUARDED_BY(local_crit_sect_) = - EncoderState::kUninitialized; - bool block_next_encode_ RTC_GUARDED_BY(local_crit_sect_) = false; + } initialized_ RTC_GUARDED_BY(local_mutex_) = EncoderState::kUninitialized; + bool block_next_encode_ RTC_GUARDED_BY(local_mutex_) = false; rtc::Event continue_encode_event_; - uint32_t timestamp_ RTC_GUARDED_BY(local_crit_sect_) = 0; - int64_t ntp_time_ms_ RTC_GUARDED_BY(local_crit_sect_) = 0; - int last_input_width_ RTC_GUARDED_BY(local_crit_sect_) = 0; - int last_input_height_ RTC_GUARDED_BY(local_crit_sect_) = 0; - bool quality_scaling_ RTC_GUARDED_BY(local_crit_sect_) = true; - int requested_resolution_alignment_ RTC_GUARDED_BY(local_crit_sect_) = 1; - bool is_hardware_accelerated_ RTC_GUARDED_BY(local_crit_sect_) = false; + uint32_t timestamp_ RTC_GUARDED_BY(local_mutex_) = 0; + int64_t ntp_time_ms_ RTC_GUARDED_BY(local_mutex_) = 0; + int last_input_width_ RTC_GUARDED_BY(local_mutex_) = 0; + int last_input_height_ RTC_GUARDED_BY(local_mutex_) = 0; + bool quality_scaling_ RTC_GUARDED_BY(local_mutex_) = true; + int requested_resolution_alignment_ RTC_GUARDED_BY(local_mutex_) = 1; + bool apply_alignment_to_all_simulcast_layers_ RTC_GUARDED_BY(local_mutex_) = + false; + bool is_hardware_accelerated_ RTC_GUARDED_BY(local_mutex_) = false; + rtc::scoped_refptr encoded_image_data_ + RTC_GUARDED_BY(local_mutex_); std::unique_ptr frame_buffer_controller_ - RTC_GUARDED_BY(local_crit_sect_); + RTC_GUARDED_BY(local_mutex_); absl::optional temporal_layers_supported_[kMaxSpatialLayers] RTC_GUARDED_BY( - local_crit_sect_); - bool force_init_encode_failed_ RTC_GUARDED_BY(local_crit_sect_) = false; - double rate_factor_ RTC_GUARDED_BY(local_crit_sect_) = 1.0; - uint32_t last_framerate_ RTC_GUARDED_BY(local_crit_sect_) = 0; + local_mutex_); + bool force_init_encode_failed_ RTC_GUARDED_BY(local_mutex_) = false; + double rate_factor_ RTC_GUARDED_BY(local_mutex_) = 1.0; + uint32_t last_framerate_ RTC_GUARDED_BY(local_mutex_) = 0; absl::optional last_rate_control_settings_; - VideoFrame::UpdateRect last_update_rect_ - RTC_GUARDED_BY(local_crit_sect_) = {0, 0, 0, 0}; + VideoFrame::UpdateRect last_update_rect_ RTC_GUARDED_BY(local_mutex_) = { + 0, 0, 0, 0}; std::vector last_frame_types_; bool expect_null_frame_ = false; - EncodedImageCallback* encoded_image_callback_ - RTC_GUARDED_BY(local_crit_sect_) = nullptr; + EncodedImageCallback* encoded_image_callback_ RTC_GUARDED_BY(local_mutex_) = + nullptr; NiceMock fec_controller_override_; - int num_encoder_initializations_ RTC_GUARDED_BY(local_crit_sect_) = 0; + int num_encoder_initializations_ RTC_GUARDED_BY(local_mutex_) = 0; std::vector resolution_bitrate_limits_ - RTC_GUARDED_BY(local_crit_sect_); - int num_set_rates_ RTC_GUARDED_BY(local_crit_sect_) = 0; + RTC_GUARDED_BY(local_mutex_); + int num_set_rates_ RTC_GUARDED_BY(local_mutex_) = 0; + VideoCodec video_codec_ RTC_GUARDED_BY(local_mutex_); + absl::optional last_input_pixel_format_ + RTC_GUARDED_BY(local_mutex_); + absl::InlinedVector + preferred_pixel_formats_ RTC_GUARDED_BY(local_mutex_); }; class TestSink : public VideoStreamEncoder::EncoderSink { public: - explicit TestSink(TestEncoder* test_encoder) - : test_encoder_(test_encoder) {} + TestSink(TimeController* time_controller, TestEncoder* test_encoder) + : time_controller_(time_controller), test_encoder_(test_encoder) { + RTC_DCHECK(time_controller_); + } void WaitForEncodedFrame(int64_t expected_ntp_time) { EXPECT_TRUE( @@ -1112,10 +1202,10 @@ class VideoStreamEncoderTest : public ::testing::Test { bool TimedWaitForEncodedFrame(int64_t expected_ntp_time, int64_t timeout_ms) { uint32_t timestamp = 0; - if (!encoded_frame_event_.Wait(timeout_ms)) + if (!WaitForFrame(timeout_ms)) return false; { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); timestamp = last_timestamp_; } test_encoder_->CheckLastTimeStampsMatch(expected_ntp_time, timestamp); @@ -1124,7 +1214,7 @@ class VideoStreamEncoderTest : public ::testing::Test { void WaitForEncodedFrame(uint32_t expected_width, uint32_t expected_height) { - EXPECT_TRUE(encoded_frame_event_.Wait(kDefaultTimeoutMs)); + EXPECT_TRUE(WaitForFrame(kDefaultTimeoutMs)); CheckLastFrameSizeMatches(expected_width, expected_height); } @@ -1133,7 +1223,7 @@ class VideoStreamEncoderTest : public ::testing::Test { uint32_t width = 0; uint32_t height = 0; { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); width = last_width_; height = last_height_; } @@ -1141,90 +1231,100 @@ class VideoStreamEncoderTest : public ::testing::Test { EXPECT_EQ(expected_width, width); } - void CheckLastFrameSizeIsMultipleOf(int resolution_alignment) { - int width = 0; - int height = 0; - { - rtc::CritScope lock(&crit_); - width = last_width_; - height = last_height_; - } - EXPECT_EQ(width % resolution_alignment, 0); - EXPECT_EQ(height % resolution_alignment, 0); - } - void CheckLastFrameRotationMatches(VideoRotation expected_rotation) { VideoRotation rotation; { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); rotation = last_rotation_; } EXPECT_EQ(expected_rotation, rotation); } - void ExpectDroppedFrame() { EXPECT_FALSE(encoded_frame_event_.Wait(100)); } + void ExpectDroppedFrame() { EXPECT_FALSE(WaitForFrame(100)); } bool WaitForFrame(int64_t timeout_ms) { - return encoded_frame_event_.Wait(timeout_ms); + RTC_DCHECK(time_controller_->GetMainThread()->IsCurrent()); + bool ret = encoded_frame_event_.Wait(timeout_ms); + time_controller_->AdvanceTime(TimeDelta::Millis(0)); + return ret; } void SetExpectNoFrames() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); expect_frames_ = false; } int number_of_reconfigurations() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return number_of_reconfigurations_; } int last_min_transmit_bitrate() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return min_transmit_bitrate_bps_; } void SetNumExpectedLayers(size_t num_layers) { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); num_expected_layers_ = num_layers; } int64_t GetLastCaptureTimeMs() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return last_capture_time_ms_; } + const EncodedImage& GetLastEncodedImage() { + MutexLock lock(&mutex_); + return last_encoded_image_; + } + std::vector GetLastEncodedImageData() { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return std::move(last_encoded_image_data_); } - RTPFragmentationHeader GetLastFragmentation() { - rtc::CritScope lock(&crit_); - return std::move(last_fragmentation_); + VideoBitrateAllocation GetLastVideoBitrateAllocation() { + MutexLock lock(&mutex_); + return last_bitrate_allocation_; + } + + int number_of_bitrate_allocations() const { + MutexLock lock(&mutex_); + return number_of_bitrate_allocations_; + } + + VideoLayersAllocation GetLastVideoLayersAllocation() { + MutexLock lock(&mutex_); + return last_layers_allocation_; + } + + int number_of_layers_allocations() const { + MutexLock lock(&mutex_); + return number_of_layers_allocations_; } private: Result OnEncodedImage( const EncodedImage& encoded_image, - const CodecSpecificInfo* codec_specific_info, - const RTPFragmentationHeader* fragmentation) override { - rtc::CritScope lock(&crit_); + const CodecSpecificInfo* codec_specific_info) override { + MutexLock lock(&mutex_); EXPECT_TRUE(expect_frames_); + last_encoded_image_ = EncodedImage(encoded_image); last_encoded_image_data_ = std::vector( encoded_image.data(), encoded_image.data() + encoded_image.size()); - if (fragmentation) { - last_fragmentation_.CopyFrom(*fragmentation); - } uint32_t timestamp = encoded_image.Timestamp(); if (last_timestamp_ != timestamp) { num_received_layers_ = 1; + last_width_ = encoded_image._encodedWidth; + last_height_ = encoded_image._encodedHeight; } else { ++num_received_layers_; + last_width_ = std::max(encoded_image._encodedWidth, last_width_); + last_height_ = std::max(encoded_image._encodedHeight, last_height_); } last_timestamp_ = timestamp; last_capture_time_ms_ = encoded_image.capture_time_ms_; - last_width_ = encoded_image._encodedWidth; - last_height_ = encoded_image._encodedHeight; last_rotation_ = encoded_image.rotation_; if (num_received_layers_ == num_expected_layers_) { encoded_frame_event_.Set(); @@ -1237,16 +1337,42 @@ class VideoStreamEncoderTest : public ::testing::Test { bool is_svc, VideoEncoderConfig::ContentType content_type, int min_transmit_bitrate_bps) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); ++number_of_reconfigurations_; min_transmit_bitrate_bps_ = min_transmit_bitrate_bps; } - rtc::CriticalSection crit_; + void OnBitrateAllocationUpdated( + const VideoBitrateAllocation& allocation) override { + MutexLock lock(&mutex_); + ++number_of_bitrate_allocations_; + last_bitrate_allocation_ = allocation; + } + + void OnVideoLayersAllocationUpdated( + VideoLayersAllocation allocation) override { + MutexLock lock(&mutex_); + ++number_of_layers_allocations_; + last_layers_allocation_ = allocation; + rtc::StringBuilder log; + for (const auto& layer : allocation.active_spatial_layers) { + log << layer.width << "x" << layer.height << "@" << layer.frame_rate_fps + << "["; + for (const auto target_bitrate : + layer.target_bitrate_per_temporal_layer) { + log << target_bitrate.kbps() << ","; + } + log << "]"; + } + RTC_DLOG(INFO) << "OnVideoLayersAllocationUpdated " << log.str(); + } + + TimeController* const time_controller_; + mutable Mutex mutex_; TestEncoder* test_encoder_; rtc::Event encoded_frame_event_; + EncodedImage last_encoded_image_; std::vector last_encoded_image_data_; - RTPFragmentationHeader last_fragmentation_; uint32_t last_timestamp_ = 0; int64_t last_capture_time_ms_ = 0; uint32_t last_height_ = 0; @@ -1257,6 +1383,10 @@ class VideoStreamEncoderTest : public ::testing::Test { bool expect_frames_ = true; int number_of_reconfigurations_ = 0; int min_transmit_bitrate_bps_ = 0; + VideoBitrateAllocation last_bitrate_allocation_ RTC_GUARDED_BY(&mutex_); + int number_of_bitrate_allocations_ RTC_GUARDED_BY(&mutex_) = 0; + VideoLayersAllocation last_layers_allocation_ RTC_GUARDED_BY(&mutex_); + int number_of_layers_allocations_ RTC_GUARDED_BY(&mutex_) = 0; }; class VideoBitrateAllocatorProxyFactory @@ -1268,36 +1398,47 @@ class VideoStreamEncoderTest : public ::testing::Test { std::unique_ptr CreateVideoBitrateAllocator( const VideoCodec& codec) override { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); codec_config_ = codec; return bitrate_allocator_factory_->CreateVideoBitrateAllocator(codec); } VideoCodec codec_config() const { - rtc::CritScope lock(&crit_); + MutexLock lock(&mutex_); return codec_config_; } private: std::unique_ptr bitrate_allocator_factory_; - rtc::CriticalSection crit_; - VideoCodec codec_config_ RTC_GUARDED_BY(crit_); + mutable Mutex mutex_; + VideoCodec codec_config_ RTC_GUARDED_BY(mutex_); }; + Clock* clock() { return time_controller_.GetClock(); } + void AdvanceTime(TimeDelta duration) { + time_controller_.AdvanceTime(duration); + } + + int64_t CurrentTimeMs() { return clock()->CurrentTime().ms(); } + + protected: + virtual TaskQueueFactory* GetTaskQueueFactory() { + return time_controller_.GetTaskQueueFactory(); + } + + GlobalSimulatedTimeController time_controller_{Timestamp::Micros(1234)}; VideoSendStream::Config video_send_config_; VideoEncoderConfig video_encoder_config_; int codec_width_; int codec_height_; int max_framerate_; - rtc::ScopedFakeClock fake_clock_; - const std::unique_ptr task_queue_factory_; TestEncoder fake_encoder_; test::VideoEncoderProxyFactory encoder_factory_; VideoBitrateAllocatorProxyFactory bitrate_allocator_factory_; std::unique_ptr stats_proxy_; TestSink sink_; - AdaptingFrameForwarder video_source_; + AdaptingFrameForwarder video_source_{&time_controller_}; std::unique_ptr video_stream_encoder_; }; @@ -1320,6 +1461,7 @@ TEST_F(VideoStreamEncoderTest, DropsFramesBeforeFirstOnBitrateUpdated) { // frames means that the first frame will be dropped and the second frame will // be sent when the encoder is enabled. video_source_.IncomingCapturedFrame(CreateFrame(1, &frame_destroyed_event)); + AdvanceTime(TimeDelta::Millis(10)); video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr)); EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs)); @@ -1395,12 +1537,31 @@ TEST_F(VideoStreamEncoderTest, DropsFrameAfterStop) { EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs)); } -TEST_F(VideoStreamEncoderTest, DropsPendingFramesOnSlowEncode) { +class VideoStreamEncoderBlockedTest : public VideoStreamEncoderTest { + public: + VideoStreamEncoderBlockedTest() {} + + TaskQueueFactory* GetTaskQueueFactory() override { + return task_queue_factory_.get(); + } + + private: + std::unique_ptr task_queue_factory_ = + CreateDefaultTaskQueueFactory(); +}; + +TEST_F(VideoStreamEncoderBlockedTest, DropsPendingFramesOnSlowEncode) { video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + int dropped_count = 0; + stats_proxy_->SetDroppedFrameCallback( + [&dropped_count](VideoStreamEncoderObserver::DropReason) { + ++dropped_count; + }); + fake_encoder_.BlockNextEncode(); video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); WaitForEncodedFrame(1); @@ -1412,9 +1573,11 @@ TEST_F(VideoStreamEncoderTest, DropsPendingFramesOnSlowEncode) { WaitForEncodedFrame(3); video_stream_encoder_->Stop(); + + EXPECT_EQ(1, dropped_count); } -TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420Conversion) { +TEST_F(VideoStreamEncoderTest, NativeFrameWithoutI420SupportGetsDelivered) { video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), @@ -1423,15 +1586,21 @@ TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420Conversion) { rtc::Event frame_destroyed_event; video_source_.IncomingCapturedFrame( CreateFakeNativeFrame(1, &frame_destroyed_event)); - ExpectDroppedFrame(); - EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs)); + WaitForEncodedFrame(1); + EXPECT_EQ(VideoFrameBuffer::Type::kNative, + fake_encoder_.GetLastInputPixelFormat()); + EXPECT_EQ(fake_encoder_.codec_config().width, + fake_encoder_.GetLastInputWidth()); + EXPECT_EQ(fake_encoder_.codec_config().height, + fake_encoder_.GetLastInputHeight()); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420ConversionWithCrop) { +TEST_F(VideoStreamEncoderTest, + NativeFrameWithoutI420SupportGetsCroppedIfNecessary) { // Use the cropping factory. video_encoder_config_.video_stream_factory = - new rtc::RefCountedObject(1, 30); + rtc::make_ref_counted(); video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config_), kMaxPayloadLength); video_stream_encoder_->WaitUntilTaskQueueIsIdle(); @@ -1453,75 +1622,162 @@ TEST_F(VideoStreamEncoderTest, DropFrameWithFailedI420ConversionWithCrop) { rtc::Event frame_destroyed_event; video_source_.IncomingCapturedFrame(CreateFakeNativeFrame( 2, &frame_destroyed_event, codec_width_ + 1, codec_height_ + 1)); - ExpectDroppedFrame(); - EXPECT_TRUE(frame_destroyed_event.Wait(kDefaultTimeoutMs)); + WaitForEncodedFrame(2); + EXPECT_EQ(VideoFrameBuffer::Type::kNative, + fake_encoder_.GetLastInputPixelFormat()); + EXPECT_EQ(fake_encoder_.codec_config().width, + fake_encoder_.GetLastInputWidth()); + EXPECT_EQ(fake_encoder_.codec_config().height, + fake_encoder_.GetLastInputHeight()); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, DropsFramesWhenCongestionWindowPushbackSet) { +TEST_F(VideoStreamEncoderTest, NonI420FramesShouldNotBeConvertedToI420) { video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); + + video_source_.IncomingCapturedFrame( + CreateNV12Frame(1, codec_width_, codec_height_)); WaitForEncodedFrame(1); + EXPECT_EQ(VideoFrameBuffer::Type::kNV12, + fake_encoder_.GetLastInputPixelFormat()); + video_stream_encoder_->Stop(); +} +TEST_F(VideoStreamEncoderTest, NativeFrameGetsDelivered_NoFrameTypePreference) { video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0.5); - // The congestion window pushback is set to 0.5, which will drop 1/2 of - // frames. Adding two frames means that the first frame will be dropped and - // the second frame will be sent to the encoder. - video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr)); - video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr)); - WaitForEncodedFrame(3); - video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr)); - video_source_.IncomingCapturedFrame(CreateFrame(5, nullptr)); - WaitForEncodedFrame(5); - EXPECT_EQ(2u, stats_proxy_->GetStats().frames_dropped_by_congestion_window); + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + fake_encoder_.SetPreferredPixelFormats({}); + + rtc::Event frame_destroyed_event; + video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame( + 1, &frame_destroyed_event, codec_width_, codec_height_)); + WaitForEncodedFrame(1); + EXPECT_EQ(VideoFrameBuffer::Type::kNative, + fake_encoder_.GetLastInputPixelFormat()); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, - ConfigureEncoderTriggersOnEncoderConfigurationChanged) { + NativeFrameGetsDelivered_PixelFormatPreferenceMatches) { video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - EXPECT_EQ(0, sink_.number_of_reconfigurations()); - - // Capture a frame and wait for it to synchronize with the encoder thread. - video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); - WaitForEncodedFrame(1); - // The encoder will have been configured once when the first frame is - // received. - EXPECT_EQ(1, sink_.number_of_reconfigurations()); - - VideoEncoderConfig video_encoder_config; - test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); - video_encoder_config.min_transmit_bitrate_bps = 9999; - video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), - kMaxPayloadLength); - // Capture a frame and wait for it to synchronize with the encoder thread. - video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr)); - WaitForEncodedFrame(2); - EXPECT_EQ(2, sink_.number_of_reconfigurations()); - EXPECT_EQ(9999, sink_.last_min_transmit_bitrate()); + fake_encoder_.SetPreferredPixelFormats({VideoFrameBuffer::Type::kNV12}); + rtc::Event frame_destroyed_event; + video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame( + 1, &frame_destroyed_event, codec_width_, codec_height_)); + WaitForEncodedFrame(1); + EXPECT_EQ(VideoFrameBuffer::Type::kNative, + fake_encoder_.GetLastInputPixelFormat()); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, FrameResolutionChangeReconfigureEncoder) { +TEST_F(VideoStreamEncoderTest, NativeFrameGetsDelivered_MappingIsNotFeasible) { video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - // Capture a frame and wait for it to synchronize with the encoder thread. - video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); - WaitForEncodedFrame(1); + // Fake NV12 native frame does not allow mapping to I444. + fake_encoder_.SetPreferredPixelFormats({VideoFrameBuffer::Type::kI444}); + + rtc::Event frame_destroyed_event; + video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame( + 1, &frame_destroyed_event, codec_width_, codec_height_)); + WaitForEncodedFrame(1); + EXPECT_EQ(VideoFrameBuffer::Type::kNative, + fake_encoder_.GetLastInputPixelFormat()); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, NativeFrameGetsDelivered_BackedByNV12) { + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + rtc::Event frame_destroyed_event; + video_source_.IncomingCapturedFrame(CreateFakeNV12NativeFrame( + 1, &frame_destroyed_event, codec_width_, codec_height_)); + WaitForEncodedFrame(1); + EXPECT_EQ(VideoFrameBuffer::Type::kNative, + fake_encoder_.GetLastInputPixelFormat()); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, DropsFramesWhenCongestionWindowPushbackSet) { + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); + WaitForEncodedFrame(1); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0.5); + // The congestion window pushback is set to 0.5, which will drop 1/2 of + // frames. Adding two frames means that the first frame will be dropped and + // the second frame will be sent to the encoder. + video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr)); + video_source_.IncomingCapturedFrame(CreateFrame(3, nullptr)); + WaitForEncodedFrame(3); + video_source_.IncomingCapturedFrame(CreateFrame(4, nullptr)); + video_source_.IncomingCapturedFrame(CreateFrame(5, nullptr)); + WaitForEncodedFrame(5); + EXPECT_EQ(2u, stats_proxy_->GetStats().frames_dropped_by_congestion_window); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ConfigureEncoderTriggersOnEncoderConfigurationChanged) { + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + EXPECT_EQ(0, sink_.number_of_reconfigurations()); + + // Capture a frame and wait for it to synchronize with the encoder thread. + video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); + WaitForEncodedFrame(1); + // The encoder will have been configured once when the first frame is + // received. + EXPECT_EQ(1, sink_.number_of_reconfigurations()); + + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); + video_encoder_config.min_transmit_bitrate_bps = 9999; + video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), + kMaxPayloadLength); + + // Capture a frame and wait for it to synchronize with the encoder thread. + video_source_.IncomingCapturedFrame(CreateFrame(2, nullptr)); + WaitForEncodedFrame(2); + EXPECT_EQ(2, sink_.number_of_reconfigurations()); + EXPECT_EQ(9999, sink_.last_min_transmit_bitrate()); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, FrameResolutionChangeReconfigureEncoder) { + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Capture a frame and wait for it to synchronize with the encoder thread. + video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); + WaitForEncodedFrame(1); // The encoder will have been configured once. EXPECT_EQ(1, sink_.number_of_reconfigurations()); EXPECT_EQ(codec_width_, fake_encoder_.codec_config().width); @@ -1811,6 +2067,265 @@ TEST_F(VideoStreamEncoderTest, EncoderRecommendedMaxBitrateCapsTargetBitrate) { video_stream_encoder_->Stop(); } +TEST_F(VideoStreamEncoderTest, + EncoderMaxAndMinBitratesUsedForTwoStreamsHighestActive) { + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p( + 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p( + 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000); + fake_encoder_.SetResolutionBitrateLimits( + {kEncoderLimits270p, kEncoderLimits360p}); + + // Two streams, highest stream active. + VideoEncoderConfig config; + const int kNumStreams = 2; + test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config); + config.max_bitrate_bps = 0; + config.simulcast_layers[0].active = false; + config.simulcast_layers[1].active = true; + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + + // The encoder bitrate limits for 270p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, kNumStreams); + EXPECT_EQ(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // The encoder bitrate limits for 360p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kEncoderLimits360p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits360p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // Resolution b/w 270p and 360p. The encoder limits for 360p should be used. + video_source_.IncomingCapturedFrame( + CreateFrame(3, (640 + 480) / 2, (360 + 270) / 2)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kEncoderLimits360p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits360p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // Resolution higher than 360p. Encoder limits should be ignored. + video_source_.IncomingCapturedFrame(CreateFrame(4, 960, 540)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_NE(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_NE(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + EXPECT_NE(static_cast(kEncoderLimits360p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_NE(static_cast(kEncoderLimits360p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // Resolution lower than 270p. The encoder limits for 270p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(5, 320, 180)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + DefaultEncoderMaxAndMinBitratesUsedForTwoStreamsHighestActive) { + // Two streams, highest stream active. + VideoEncoderConfig config; + const int kNumStreams = 2; + test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config); + config.max_bitrate_bps = 0; + config.simulcast_layers[0].active = false; + config.simulcast_layers[1].active = true; + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + + // Default bitrate limits for 270p should be used. + const absl::optional + kDefaultLimits270p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP8, 480 * 270); + video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, kNumStreams); + EXPECT_EQ(static_cast(kDefaultLimits270p->min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kDefaultLimits270p->max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // Default bitrate limits for 360p should be used. + const absl::optional + kDefaultLimits360p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP8, 640 * 360); + video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kDefaultLimits360p->min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kDefaultLimits360p->max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // Resolution b/w 270p and 360p. The default limits for 360p should be used. + video_source_.IncomingCapturedFrame( + CreateFrame(3, (640 + 480) / 2, (360 + 270) / 2)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kDefaultLimits360p->min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kDefaultLimits360p->max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // Default bitrate limits for 540p should be used. + const absl::optional + kDefaultLimits540p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP8, 960 * 540); + video_source_.IncomingCapturedFrame(CreateFrame(4, 960, 540)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kDefaultLimits540p->min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kDefaultLimits540p->max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + EncoderMaxAndMinBitratesUsedForThreeStreamsMiddleActive) { + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p( + 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p( + 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p( + 1280 * 720, 54 * 1000, 31 * 1000, 3456 * 1000); + fake_encoder_.SetResolutionBitrateLimits( + {kEncoderLimits270p, kEncoderLimits360p, kEncoderLimits720p}); + + // Three streams, middle stream active. + VideoEncoderConfig config; + const int kNumStreams = 3; + test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config); + config.simulcast_layers[0].active = false; + config.simulcast_layers[1].active = true; + config.simulcast_layers[2].active = false; + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + + // The encoder bitrate limits for 360p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, kNumStreams); + EXPECT_EQ(static_cast(kEncoderLimits360p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits360p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // The encoder bitrate limits for 270p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(2, 960, 540)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + EncoderMaxAndMinBitratesNotUsedForThreeStreamsLowestActive) { + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p( + 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p( + 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p( + 1280 * 720, 54 * 1000, 31 * 1000, 3456 * 1000); + fake_encoder_.SetResolutionBitrateLimits( + {kEncoderLimits270p, kEncoderLimits360p, kEncoderLimits720p}); + + // Three streams, lowest stream active. + VideoEncoderConfig config; + const int kNumStreams = 3; + test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config); + config.simulcast_layers[0].active = true; + config.simulcast_layers[1].active = false; + config.simulcast_layers[2].active = false; + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + + // Resolution on lowest stream lower than 270p. The encoder limits not applied + // on lowest stream, limits for 270p should not be used + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, kNumStreams); + EXPECT_NE(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_NE(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + EncoderMaxBitrateCappedByConfigForTwoStreamsHighestActive) { + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p( + 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p( + 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000); + fake_encoder_.SetResolutionBitrateLimits( + {kEncoderLimits270p, kEncoderLimits360p}); + const int kMaxBitrateBps = kEncoderLimits360p.max_bitrate_bps - 100 * 1000; + + // Two streams, highest stream active. + VideoEncoderConfig config; + const int kNumStreams = 2; + test::FillEncoderConfiguration(kVideoCodecVP8, kNumStreams, &config); + config.simulcast_layers[0].active = false; + config.simulcast_layers[1].active = true; + config.simulcast_layers[1].max_bitrate_bps = kMaxBitrateBps; + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + + // The encoder bitrate limits for 270p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(1, 480, 270)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, kNumStreams); + EXPECT_EQ(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + // The max configured bitrate is less than the encoder limit for 360p. + video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(static_cast(kEncoderLimits360p.min_bitrate_bps), + fake_encoder_.video_codec().simulcastStream[1].minBitrate * 1000); + EXPECT_EQ(static_cast(kMaxBitrateBps), + fake_encoder_.video_codec().simulcastStream[1].maxBitrate * 1000); + + video_stream_encoder_->Stop(); +} + TEST_F(VideoStreamEncoderTest, SwitchSourceDeregisterEncoderAsSink) { EXPECT_TRUE(video_source_.has_sinks()); test::FrameForwarder new_video_source; @@ -1829,30 +2344,88 @@ TEST_F(VideoStreamEncoderTest, SinkWantsRotationApplied) { video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, SinkWantsResolutionAlignment) { - constexpr int kRequestedResolutionAlignment = 7; +class ResolutionAlignmentTest + : public VideoStreamEncoderTest, + public ::testing::WithParamInterface< + ::testing::tuple>> { + public: + ResolutionAlignmentTest() + : requested_alignment_(::testing::get<0>(GetParam())), + scale_factors_(::testing::get<1>(GetParam())) {} + + protected: + const int requested_alignment_; + const std::vector scale_factors_; +}; + +INSTANTIATE_TEST_SUITE_P( + AlignmentAndScaleFactors, + ResolutionAlignmentTest, + ::testing::Combine( + ::testing::Values(1, 2, 3, 4, 5, 6, 16, 22), // requested_alignment_ + ::testing::Values(std::vector{-1.0}, // scale_factors_ + std::vector{-1.0, -1.0}, + std::vector{-1.0, -1.0, -1.0}, + std::vector{4.0, 2.0, 1.0}, + std::vector{9999.0, -1.0, 1.0}, + std::vector{3.99, 2.01, 1.0}, + std::vector{4.9, 1.7, 1.25}, + std::vector{10.0, 4.0, 3.0}, + std::vector{1.75, 3.5}, + std::vector{1.5, 2.5}, + std::vector{1.3, 1.0}))); + +TEST_P(ResolutionAlignmentTest, SinkWantsAlignmentApplied) { + // Set requested resolution alignment. video_source_.set_adaptation_enabled(true); - fake_encoder_.SetRequestedResolutionAlignment(kRequestedResolutionAlignment); + fake_encoder_.SetRequestedResolutionAlignment(requested_alignment_); + fake_encoder_.SetApplyAlignmentToAllSimulcastLayers(true); + + // Fill config with the scaling factor by which to reduce encoding size. + const int num_streams = scale_factors_.size(); + VideoEncoderConfig config; + test::FillEncoderConfiguration(kVideoCodecVP8, num_streams, &config); + for (int i = 0; i < num_streams; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = scale_factors_[i]; + } + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->ConfigureEncoder(std::move(config), kMaxPayloadLength); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); + // Wait for all layers before triggering event. + sink_.SetNumExpectedLayers(num_streams); // On the 1st frame, we should have initialized the encoder and // asked for its resolution requirements. - video_source_.IncomingCapturedFrame( - CreateFrame(1, codec_width_, codec_height_)); - WaitForEncodedFrame(1); - EXPECT_EQ(video_source_.sink_wants().resolution_alignment, - kRequestedResolutionAlignment); + int64_t timestamp_ms = kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(1, fake_encoder_.GetNumEncoderInitializations()); // On the 2nd frame, we should be receiving a correctly aligned resolution. // (It's up the to the encoder to potentially drop the previous frame, // to avoid coding back-to-back keyframes.) - video_source_.IncomingCapturedFrame( - CreateFrame(2, codec_width_, codec_height_)); - WaitForEncodedFrame(2); - sink_.CheckLastFrameSizeIsMultipleOf(kRequestedResolutionAlignment); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_GE(fake_encoder_.GetNumEncoderInitializations(), 1); + + VideoCodec codec = fake_encoder_.video_codec(); + EXPECT_EQ(codec.numberOfSimulcastStreams, num_streams); + // Frame size should be a multiple of the requested alignment. + for (int i = 0; i < codec.numberOfSimulcastStreams; ++i) { + EXPECT_EQ(codec.simulcastStream[i].width % requested_alignment_, 0); + EXPECT_EQ(codec.simulcastStream[i].height % requested_alignment_, 0); + // Aspect ratio should match. + EXPECT_EQ(codec.width * codec.simulcastStream[i].height, + codec.height * codec.simulcastStream[i].width); + } video_stream_encoder_->Stop(); } @@ -1873,7 +2446,7 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) { DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); video_stream_encoder_->SetSource(&video_source_, webrtc::DegradationPreference::BALANCED); - VerifyNoLimitation(video_source_.sink_wants()); + EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -1898,9 +2471,10 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) { t += frame_interval_ms; video_stream_encoder_->TriggerCpuOveruse(); - VerifyBalancedModeFpsRange( + EXPECT_THAT( video_source_.sink_wants(), - *video_source_.last_sent_width() * *video_source_.last_sent_height()); + FpsInRangeForPixelsInBalanced(*video_source_.last_sent_width() * + *video_source_.last_sent_height())); } while (video_source_.sink_wants().max_pixel_count < last_wants.max_pixel_count || video_source_.sink_wants().max_framerate_fps < @@ -1933,16 +2507,17 @@ TEST_F(VideoStreamEncoderTest, TestCpuDowngrades_BalancedMode) { t += frame_interval_ms; video_stream_encoder_->TriggerCpuUnderuse(); - VerifyBalancedModeFpsRange( + EXPECT_THAT( video_source_.sink_wants(), - *video_source_.last_sent_width() * *video_source_.last_sent_height()); + FpsInRangeForPixelsInBalanced(*video_source_.last_sent_width() * + *video_source_.last_sent_height())); EXPECT_TRUE(video_source_.sink_wants().max_pixel_count > last_wants.max_pixel_count || video_source_.sink_wants().max_framerate_fps > last_wants.max_framerate_fps); } - VerifyFpsMaxResolutionMax(video_source_.sink_wants()); + EXPECT_THAT(video_source_.sink_wants(), FpsMaxResolutionMax()); stats_proxy_->ResetMockStats(); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); @@ -1958,7 +2533,7 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - VerifyNoLimitation(video_source_.sink_wants()); + EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants()); const int kFrameWidth = 1280; const int kFrameHeight = 720; @@ -1975,7 +2550,7 @@ TEST_F(VideoStreamEncoderTest, video_source_.set_adaptation_enabled(true); video_stream_encoder_->SetSource( &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); - VerifyNoLimitation(video_source_.sink_wants()); + EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants()); video_source_.IncomingCapturedFrame( CreateFrame(ntp_time, kFrameWidth, kFrameHeight)); sink_.WaitForEncodedFrame(ntp_time); @@ -2031,7 +2606,8 @@ TEST_F(VideoStreamEncoderTest, EXPECT_EQ(video_source_.sink_wants().max_pixel_count, pixel_count); EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, kInputFps); - // Change the degradation preference back. CPU underuse should now adapt. + // Change the degradation preference back. CPU underuse should not adapt since + // QP is most limited. video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated( &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); video_source_.IncomingCapturedFrame( @@ -2051,7 +2627,15 @@ TEST_F(VideoStreamEncoderTest, CreateFrame(ntp_time, kFrameWidth, kFrameHeight)); sink_.WaitForEncodedFrame(ntp_time); ntp_time += kFrameIntervalMs; - EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, kInputFps); + EXPECT_EQ(video_source_.sink_wants().max_framerate_fps, restricted_fps); + + // Trigger QP underuse, fps should return to normal. + video_stream_encoder_->TriggerQualityHigh(); + video_source_.IncomingCapturedFrame( + CreateFrame(ntp_time, kFrameWidth, kFrameHeight)); + sink_.WaitForEncodedFrame(ntp_time); + ntp_time += kFrameIntervalMs; + EXPECT_THAT(video_source_.sink_wants(), FpsMax()); video_stream_encoder_->Stop(); } @@ -2061,7 +2645,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) { DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - VerifyNoLimitation(video_source_.sink_wants()); + EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants()); const int kFrameWidth = 1280; const int kFrameHeight = 720; @@ -2098,7 +2682,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) { sink_.WaitForEncodedFrame(frame_timestamp); frame_timestamp += kFrameIntervalMs; // Initially no degradation registered. - VerifyFpsMaxResolutionMax(new_video_source.sink_wants()); + EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax()); // Force an input frame rate to be available, or the adaptation call won't // know what framerate to adapt form. @@ -2128,7 +2712,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) { CreateFrame(frame_timestamp, kFrameWidth, kFrameWidth)); sink_.WaitForEncodedFrame(frame_timestamp); frame_timestamp += kFrameIntervalMs; - VerifyFpsMaxResolutionMax(new_video_source.sink_wants()); + EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax()); video_stream_encoder_->TriggerCpuOveruse(); new_video_source.IncomingCapturedFrame( @@ -2137,7 +2721,7 @@ TEST_F(VideoStreamEncoderTest, SinkWantsStoredByDegradationPreference) { frame_timestamp += kFrameIntervalMs; // Still no degradation. - VerifyFpsMaxResolutionMax(new_video_source.sink_wants()); + EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax()); // Calling SetSource with resolution scaling enabled apply the old SinkWants. video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated( @@ -2464,7 +3048,7 @@ TEST_F(VideoStreamEncoderTest, // Set new degradation preference should clear restrictions since we changed // from BALANCED. - video_stream_encoder_->SetSource( + video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated( &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight)); WaitForEncodedFrame(sequence++); @@ -2488,8 +3072,8 @@ TEST_F(VideoStreamEncoderTest, EXPECT_EQ(2, stats.number_of_cpu_adapt_changes); // Back to BALANCED, should clear the restrictions again. - video_stream_encoder_->SetSource(&source, - webrtc::DegradationPreference::BALANCED); + video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated( + &source, webrtc::DegradationPreference::BALANCED); source.IncomingCapturedFrame(CreateFrame(sequence, kWidth, kHeight)); WaitForEncodedFrame(sequence++); stats = stats_proxy_->GetStats(); @@ -2647,7 +3231,7 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Expect no scaling to begin with. - VerifyNoLimitation(video_source_.sink_wants()); + EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants()); video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); WaitForEncodedFrame(1); @@ -2704,13 +3288,14 @@ TEST_F(VideoStreamEncoderTest, source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); WaitForEncodedFrame(1); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); // Trigger adapt down, expect scaled down resolution. video_stream_encoder_->TriggerCpuOveruse(); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); const int kLastMaxPixelCount = source.sink_wants().max_pixel_count; EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -2738,11 +3323,12 @@ TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) { webrtc::DegradationPreference::BALANCED); source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); sink_.WaitForEncodedFrame(1); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); // Trigger adapt down, expect scaled down resolution. video_stream_encoder_->TriggerQualityLow(); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); const int kLastMaxPixelCount = source.sink_wants().max_pixel_count; @@ -2767,125 +3353,258 @@ TEST_F(VideoStreamEncoderTest, SkipsSameOrLargerAdaptDownRequest_BalancedMode) { } TEST_F(VideoStreamEncoderTest, - NoChangeForInitialNormalUsage_MaintainFramerateMode) { - const int kWidth = 1280; - const int kHeight = 720; - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - - // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - test::FrameForwarder source; - video_stream_encoder_->SetSource( - &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); - - source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); - WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - - // Trigger adapt up, expect no change. - video_stream_encoder_->TriggerCpuUnderuse(); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - - video_stream_encoder_->Stop(); -} - -TEST_F(VideoStreamEncoderTest, - NoChangeForInitialNormalUsage_MaintainResolutionMode) { - const int kWidth = 1280; - const int kHeight = 720; - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - - // Enable MAINTAIN_RESOLUTION preference, no initial limitation. - test::FrameForwarder source; - video_stream_encoder_->SetSource( - &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); - - source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); - WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - - // Trigger adapt up, expect no change. - video_stream_encoder_->TriggerCpuUnderuse(); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - - video_stream_encoder_->Stop(); -} - -TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_BalancedMode) { - const int kWidth = 1280; - const int kHeight = 720; + FpsCountReturnsToZeroForFewerAdaptationsUpThanDown) { + const int kWidth = 640; + const int kHeight = 360; + const int64_t kFrameIntervalMs = 150; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable BALANCED preference, no initial limitation. - test::FrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); video_stream_encoder_->SetSource(&source, webrtc::DegradationPreference::BALANCED); - source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + int64_t timestamp_ms = kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger adapt up, expect no change. - video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + // Trigger adapt down, expect reduced fps (640x360@15fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), + FpsMatchesResolutionMax(Lt(kDefaultFramerate))); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests 270p, expect reduced resolution (480x270@15fps). + source.OnOutputFormatRequest(480, 270); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(480, 270); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (480x270@10fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests QVGA, expect reduced resolution (320x180@10fps). + source.OnOutputFormatRequest(320, 180); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(320, 180); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (320x180@7fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests VGA, expect increased resolution (640x360@7fps). + source.OnOutputFormatRequest(640, 360); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@(max-2)fps). + video_stream_encoder_->TriggerQualityHigh(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@(max-1)fps). + video_stream_encoder_->TriggerQualityHigh(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_DisabledMode) { +TEST_F(VideoStreamEncoderTest, + FpsCountReturnsToZeroForFewerAdaptationsUpThanDownWithTwoResources) { const int kWidth = 1280; const int kHeight = 720; + const int64_t kFrameIntervalMs = 150; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - // Enable DISABLED preference, no initial limitation. - test::FrameForwarder source; + // Enable BALANCED preference, no initial limitation. + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); video_stream_encoder_->SetSource(&source, - webrtc::DegradationPreference::DISABLED); + webrtc::DegradationPreference::BALANCED); - source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + int64_t timestamp_ms = kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger adapt up, expect no change. + // Trigger adapt down, expect scaled down resolution (960x540@maxfps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect scaled down resolution (640x360@maxfps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + sink_.WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect reduced fps (640x360@15fps). + video_stream_encoder_->TriggerQualityLow(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Source requests QVGA, expect reduced resolution (320x180@15fps). + source.OnOutputFormatRequest(320, 180); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(320, 180); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt down, expect reduced fps (320x180@7fps). + video_stream_encoder_->TriggerCpuOveruse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Source requests HD, expect increased resolution (640x360@7fps). + source.OnOutputFormatRequest(1280, 720); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@(max-1)fps). + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased fps (640x360@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased resolution (960x570@maxfps). + video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + // Trigger adapt up, expect increased resolution (1280x720@maxfps). video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, - AdaptsResolutionForLowQuality_MaintainFramerateMode) { + NoChangeForInitialNormalUsage_MaintainFramerateMode) { const int kWidth = 1280; const int kHeight = 720; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( @@ -2894,106 +3613,227 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - AdaptingFrameForwarder source; - source.set_adaptation_enabled(true); + test::FrameForwarder source; video_stream_encoder_->SetSource( &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); - WaitForEncodedFrame(1); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); - - // Trigger adapt down, expect scaled down resolution. - video_stream_encoder_->TriggerQualityLow(); - source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); - WaitForEncodedFrame(2); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); - EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + WaitForEncodedFrame(kWidth, kHeight); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - // Trigger adapt up, expect no restriction. - video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + // Trigger adapt up, expect no change. + video_stream_encoder_->TriggerCpuUnderuse(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, - AdaptsFramerateForLowQuality_MaintainResolutionMode) { + NoChangeForInitialNormalUsage_MaintainResolutionMode) { const int kWidth = 1280; const int kHeight = 720; - const int kInputFps = 30; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - VideoSendStream::Stats stats = stats_proxy_->GetStats(); - stats.input_frame_rate = kInputFps; - stats_proxy_->SetMockStats(stats); - - // Expect no scaling to begin with (preference: MAINTAIN_FRAMERATE). - video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); - sink_.WaitForEncodedFrame(1); - VerifyFpsMaxResolutionMax(video_source_.sink_wants()); - - // Trigger adapt down, expect scaled down resolution. - video_stream_encoder_->TriggerQualityLow(); - video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); - sink_.WaitForEncodedFrame(2); - VerifyFpsMaxResolutionLt(video_source_.sink_wants(), kWidth * kHeight); - - // Enable MAINTAIN_RESOLUTION preference. - test::FrameForwarder new_video_source; - video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated( - &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); - // Give the encoder queue time to process the change in degradation preference - // by waiting for an encoded frame. - new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight)); - sink_.WaitForEncodedFrame(3); - VerifyFpsMaxResolutionMax(new_video_source.sink_wants()); + // Enable MAINTAIN_RESOLUTION preference, no initial limitation. + test::FrameForwarder source; + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); - // Trigger adapt down, expect reduced framerate. - video_stream_encoder_->TriggerQualityLow(); - new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight)); - sink_.WaitForEncodedFrame(4); - VerifyFpsLtResolutionMax(new_video_source.sink_wants(), kInputFps); + source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + WaitForEncodedFrame(kWidth, kHeight); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - // Trigger adapt up, expect no restriction. - video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(new_video_source.sink_wants()); + // Trigger adapt up, expect no change. + video_stream_encoder_->TriggerCpuUnderuse(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, DoesNotScaleBelowSetResolutionLimit) { +TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_BalancedMode) { const int kWidth = 1280; const int kHeight = 720; - const size_t kNumFrames = 10; - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - // Enable adapter, expected input resolutions when downscaling: - // 1280x720 -> 960x540 -> 640x360 -> 480x270 -> 320x180 (kMinPixelsPerFrame) - video_source_.set_adaptation_enabled(true); + // Enable BALANCED preference, no initial limitation. + test::FrameForwarder source; + video_stream_encoder_->SetSource(&source, + webrtc::DegradationPreference::BALANCED); + source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + sink_.WaitForEncodedFrame(kWidth, kHeight); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); - int downscales = 0; - for (size_t i = 1; i <= kNumFrames; i++) { - video_source_.IncomingCapturedFrame( - CreateFrame(i * kFrameIntervalMs, kWidth, kHeight)); - WaitForEncodedFrame(i * kFrameIntervalMs); - + // Trigger adapt up, expect no change. + video_stream_encoder_->TriggerQualityHigh(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, NoChangeForInitialNormalUsage_DisabledMode) { + const int kWidth = 1280; + const int kHeight = 720; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable DISABLED preference, no initial limitation. + test::FrameForwarder source; + video_stream_encoder_->SetSource(&source, + webrtc::DegradationPreference::DISABLED); + + source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + sink_.WaitForEncodedFrame(kWidth, kHeight); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect no change. + video_stream_encoder_->TriggerQualityHigh(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + AdaptsResolutionForLowQuality_MaintainFramerateMode) { + const int kWidth = 1280; + const int kHeight = 720; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable MAINTAIN_FRAMERATE preference, no initial limitation. + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); + + source.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + WaitForEncodedFrame(1); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, expect scaled down resolution. + video_stream_encoder_->TriggerQualityLow(); + source.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); + WaitForEncodedFrame(2); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt up, expect no restriction. + video_stream_encoder_->TriggerQualityHigh(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + AdaptsFramerateForLowQuality_MaintainResolutionMode) { + const int kWidth = 1280; + const int kHeight = 720; + const int kInputFps = 30; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + VideoSendStream::Stats stats = stats_proxy_->GetStats(); + stats.input_frame_rate = kInputFps; + stats_proxy_->SetMockStats(stats); + + // Expect no scaling to begin with (preference: MAINTAIN_FRAMERATE). + video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + sink_.WaitForEncodedFrame(1); + EXPECT_THAT(video_source_.sink_wants(), FpsMaxResolutionMax()); + + // Trigger adapt down, expect scaled down resolution. + video_stream_encoder_->TriggerQualityLow(); + video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); + sink_.WaitForEncodedFrame(2); + EXPECT_THAT(video_source_.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); + + // Enable MAINTAIN_RESOLUTION preference. + test::FrameForwarder new_video_source; + video_stream_encoder_->SetSourceAndWaitForRestrictionsUpdated( + &new_video_source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); + // Give the encoder queue time to process the change in degradation preference + // by waiting for an encoded frame. + new_video_source.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight)); + sink_.WaitForEncodedFrame(3); + EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax()); + + // Trigger adapt down, expect reduced framerate. + video_stream_encoder_->TriggerQualityLow(); + new_video_source.IncomingCapturedFrame(CreateFrame(4, kWidth, kHeight)); + sink_.WaitForEncodedFrame(4); + EXPECT_THAT(new_video_source.sink_wants(), + FpsMatchesResolutionMax(Lt(kInputFps))); + + // Trigger adapt up, expect no restriction. + video_stream_encoder_->TriggerQualityHigh(); + EXPECT_THAT(new_video_source.sink_wants(), FpsMaxResolutionMax()); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, DoesNotScaleBelowSetResolutionLimit) { + const int kWidth = 1280; + const int kHeight = 720; + const size_t kNumFrames = 10; + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable adapter, expected input resolutions when downscaling: + // 1280x720 -> 960x540 -> 640x360 -> 480x270 -> 320x180 (kMinPixelsPerFrame) + video_source_.set_adaptation_enabled(true); + + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + int downscales = 0; + for (size_t i = 1; i <= kNumFrames; i++) { + video_source_.IncomingCapturedFrame( + CreateFrame(i * kFrameIntervalMs, kWidth, kHeight)); + WaitForEncodedFrame(i * kFrameIntervalMs); + // Trigger scale down. rtc::VideoSinkWants last_wants = video_source_.sink_wants(); video_stream_encoder_->TriggerQualityLow(); @@ -3020,7 +3860,7 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource( &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); @@ -3028,7 +3868,7 @@ TEST_F(VideoStreamEncoderTest, int64_t timestamp_ms = kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3037,7 +3877,8 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3046,7 +3887,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3055,7 +3896,8 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3064,7 +3906,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3081,7 +3923,7 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable BALANCED preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource(&source, webrtc::DegradationPreference::BALANCED); @@ -3089,7 +3931,7 @@ TEST_F(VideoStreamEncoderTest, int64_t timestamp_ms = kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -3098,7 +3940,8 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -3107,7 +3950,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -3116,7 +3959,8 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -3125,7 +3969,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); sink_.WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -3143,7 +3987,7 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) { 0, 0); // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource( &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); @@ -3169,7 +4013,7 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) { // Trigger adapt up. Higher resolution should not be requested duo to lack // of bitrate. video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionLt(source.sink_wants(), 1280 * 720); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMatches(Lt(1280 * 720))); // Increase bitrate. video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( @@ -3180,7 +4024,7 @@ TEST_F(VideoStreamEncoderTest, AdaptUpIfBwEstimateIsHigherThanMinBitrate) { // Trigger adapt up. Higher resolution should be requested. video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); video_stream_encoder_->Stop(); } @@ -3197,7 +4041,7 @@ TEST_F(VideoStreamEncoderTest, DropFirstFramesIfBwEstimateIsTooLow) { 0, 0); // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource( &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); @@ -3249,10 +4093,10 @@ class BalancedDegradationTest : public VideoStreamEncoderTest { const int kHeight = 360; const int64_t kFrameIntervalMs = 150; // Use low fps to not drop any frame. int64_t timestamp_ms_ = 0; - AdaptingFrameForwarder source_; + AdaptingFrameForwarder source_{&time_controller_}; }; -TEST_F(BalancedDegradationTest, AdaptDownReturnsFalseIfFpsDiffLtThreshold) { +TEST_F(BalancedDegradationTest, AdaptDownTwiceIfMinFpsDiffLtThreshold) { test::ScopedFieldTrials field_trials( "WebRTC-Video-BalancedDegradationSettings/" "pixels:57600|129600|230400,fps:7|10|24,fps_diff:1|1|1/"); @@ -3265,20 +4109,18 @@ TEST_F(BalancedDegradationTest, AdaptDownReturnsFalseIfFpsDiffLtThreshold) { stats_proxy_->SetMockStats(stats); InsertFrameAndWaitForEncoded(); - VerifyFpsMaxResolutionMax(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax()); - // Trigger adapt down, expect scaled down framerate (640x360@24fps). - // Fps diff (input-requested:0) < threshold, expect adapting down not to clear - // QP samples. - EXPECT_FALSE( - video_stream_encoder_ - ->TriggerQualityScalerHighQpAndReturnIfQpSamplesShouldBeCleared()); - VerifyFpsEqResolutionMax(source_.sink_wants(), 24); + // Trigger adapt down, expect scaled down framerate and resolution, + // since Fps diff (input-requested:0) < threshold. + video_stream_encoder_->TriggerQualityLow(); + EXPECT_THAT(source_.sink_wants(), + AllOf(WantsFps(Eq(24)), WantsMaxPixels(Le(230400)))); video_stream_encoder_->Stop(); } -TEST_F(BalancedDegradationTest, AdaptDownReturnsTrueIfFpsDiffGeThreshold) { +TEST_F(BalancedDegradationTest, AdaptDownOnceIfFpsDiffGeThreshold) { test::ScopedFieldTrials field_trials( "WebRTC-Video-BalancedDegradationSettings/" "pixels:57600|129600|230400,fps:7|10|24,fps_diff:1|1|1/"); @@ -3291,15 +4133,12 @@ TEST_F(BalancedDegradationTest, AdaptDownReturnsTrueIfFpsDiffGeThreshold) { stats_proxy_->SetMockStats(stats); InsertFrameAndWaitForEncoded(); - VerifyFpsMaxResolutionMax(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax()); - // Trigger adapt down, expect scaled down framerate (640x360@24fps). - // Fps diff (input-requested:1) == threshold, expect adapting down to clear QP - // samples. - EXPECT_TRUE( - video_stream_encoder_ - ->TriggerQualityScalerHighQpAndReturnIfQpSamplesShouldBeCleared()); - VerifyFpsEqResolutionMax(source_.sink_wants(), 24); + // Trigger adapt down, expect scaled down framerate only (640x360@24fps). + // Fps diff (input-requested:1) == threshold. + video_stream_encoder_->TriggerQualityLow(); + EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(24))); video_stream_encoder_->Stop(); } @@ -3313,11 +4152,11 @@ TEST_F(BalancedDegradationTest, AdaptDownUsesCodecSpecificFps) { EXPECT_EQ(kVideoCodecVP8, video_encoder_config_.codec_type); InsertFrameAndWaitForEncoded(); - VerifyFpsMaxResolutionMax(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax()); // Trigger adapt down, expect scaled down framerate (640x360@22fps). video_stream_encoder_->TriggerQualityLow(); - VerifyFpsEqResolutionMax(source_.sink_wants(), 22); + EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(22))); video_stream_encoder_->Stop(); } @@ -3333,25 +4172,25 @@ TEST_F(BalancedDegradationTest, NoAdaptUpIfBwEstimateIsLessThanMinBitrate) { OnBitrateUpdated(kTooLowMinBitrateBps); InsertFrameAndWaitForEncoded(); - VerifyFpsMaxResolutionMax(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax()); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down framerate (640x360@14fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionMax(source_.sink_wants(), 14); + EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14))); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down resolution (480x270@14fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionLt(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants())); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down framerate (480x270@10fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsLtResolutionEq(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants())); EXPECT_EQ(source_.sink_wants().max_framerate_fps, 10); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -3378,7 +4217,7 @@ TEST_F(BalancedDegradationTest, SetupTest(); OnBitrateUpdated(kLowTargetBitrateBps); - VerifyNoLimitation(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), UnlimitedSinkWants()); // Insert frame, expect scaled down: // framerate (640x360@24fps) -> resolution (480x270@24fps). @@ -3413,31 +4252,31 @@ TEST_F(BalancedDegradationTest, OnBitrateUpdated(kTooLowMinResolutionBitrateBps); InsertFrameAndWaitForEncoded(); - VerifyFpsMaxResolutionMax(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax()); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down framerate (640x360@14fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionMax(source_.sink_wants(), 14); + EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14))); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down resolution (480x270@14fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionLt(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants())); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down framerate (480x270@10fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsLtResolutionEq(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants())); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect upscaled fps (no bitrate limit) (480x270@14fps). video_stream_encoder_->TriggerQualityHigh(); InsertFrameAndWaitForEncoded(); - VerifyFpsGtResolutionEq(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsGtResolutionEq(source_.last_wants())); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect no upscale in res (target bitrate < min bitrate). @@ -3449,7 +4288,7 @@ TEST_F(BalancedDegradationTest, OnBitrateUpdated(kResolutionMinBitrateBps); video_stream_encoder_->TriggerQualityHigh(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionGt(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsEqResolutionGt(source_.last_wants())); EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); @@ -3469,25 +4308,25 @@ TEST_F(BalancedDegradationTest, OnBitrateUpdated(kTooLowMinBitrateBps); InsertFrameAndWaitForEncoded(); - VerifyFpsMaxResolutionMax(source_.sink_wants()); + EXPECT_THAT(source_.sink_wants(), FpsMaxResolutionMax()); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down framerate (640x360@14fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionMax(source_.sink_wants(), 14); + EXPECT_THAT(source_.sink_wants(), FpsMatchesResolutionMax(Eq(14))); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down resolution (480x270@14fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionLt(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsEqResolutionLt(source_.last_wants())); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt down, expect scaled down framerate (480x270@10fps). video_stream_encoder_->TriggerQualityLow(); InsertFrameAndWaitForEncoded(); - VerifyFpsLtResolutionEq(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsLtResolutionEq(source_.last_wants())); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect no upscale (target bitrate < min bitrate). @@ -3499,7 +4338,7 @@ TEST_F(BalancedDegradationTest, OnBitrateUpdated(kMinBitrateBps); video_stream_encoder_->TriggerQualityHigh(); InsertFrameAndWaitForEncoded(); - VerifyFpsGtResolutionEq(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsGtResolutionEq(source_.last_wants())); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect no upscale in res (target bitrate < min bitrate). @@ -3512,7 +4351,7 @@ TEST_F(BalancedDegradationTest, OnBitrateUpdated(kResolutionMinBitrateBps); video_stream_encoder_->TriggerQualityHigh(); InsertFrameAndWaitForEncoded(); - VerifyFpsEqResolutionGt(source_.sink_wants(), source_.last_wants()); + EXPECT_THAT(source_.sink_wants(), FpsEqResolutionGt(source_.last_wants())); EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); @@ -3528,7 +4367,7 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource( &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); @@ -3536,7 +4375,7 @@ TEST_F(VideoStreamEncoderTest, int64_t timestamp_ms = kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3547,7 +4386,8 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3558,7 +4398,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3569,7 +4409,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); @@ -3580,7 +4420,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); rtc::VideoSinkWants last_wants = source.sink_wants(); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); @@ -3592,68 +4432,74 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionEq(source.sink_wants(), last_wants); + EXPECT_THAT(source.sink_wants(), FpsMax()); + EXPECT_EQ(source.sink_wants().max_pixel_count, last_wants.max_pixel_count); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, expect upscaled resolution (480x270). - video_stream_encoder_->TriggerCpuUnderuse(); + // Trigger quality adapt up, expect upscaled resolution (480x270). + video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, expect upscaled resolution (640x360). + // Trigger quality and cpu adapt up since both are most limited, expect + // upscaled resolution (640x360). video_stream_encoder_->TriggerCpuUnderuse(); + video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, expect upscaled resolution (960x540). + // Trigger quality and cpu adapt up since both are most limited, expect + // upscaled resolution (960x540). video_stream_encoder_->TriggerCpuUnderuse(); + video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); last_wants = source.sink_wants(); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, no cpu downgrades, expect no change (960x540). + // Trigger cpu adapt up, expect no change since not most limited (960x540). + // However the stats will change since the CPU resource is no longer limited. video_stream_encoder_->TriggerCpuUnderuse(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionEq(source.sink_wants(), last_wants); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants)); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger quality adapt up, expect no restriction (1280x720). video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_EQ(6, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); } @@ -3714,9 +4560,10 @@ TEST_F(VideoStreamEncoderTest, metrics::NumSamples("WebRTC.Video.CpuLimitedResolutionInPercent")); } -TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) { - MockBitrateObserver bitrate_observer; - video_stream_encoder_->SetBitrateAllocationObserver(&bitrate_observer); +TEST_F(VideoStreamEncoderTest, ReportsVideoBitrateAllocation) { + ResetEncoder("FAKE", 1, 1, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocation); const int kDefaultFps = 30; const VideoBitrateAllocation expected_bitrate = @@ -3724,141 +4571,704 @@ TEST_F(VideoStreamEncoderTest, CallsBitrateObserver) { .Allocate(VideoBitrateAllocationParameters(kLowTargetBitrateBps, kDefaultFps)); - EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate)) - .Times(1); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kLowTargetBitrateBps), DataRate::BitsPerSec(kLowTargetBitrateBps), DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); video_source_.IncomingCapturedFrame( - CreateFrame(rtc::TimeMillis(), codec_width_, codec_height_)); - WaitForEncodedFrame(rtc::TimeMillis()); - VideoBitrateAllocation bitrate_allocation = - fake_encoder_.GetAndResetLastRateControlSettings()->bitrate; + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.GetLastVideoBitrateAllocation(), expected_bitrate); + EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1); + // Check that encoder has been updated too, not just allocation observer. - EXPECT_EQ(bitrate_allocation.get_sum_bps(), kLowTargetBitrateBps); - // TODO(srte): The use of millisecs here looks like an error, but the tests - // fails using seconds, this should be investigated. - fake_clock_.AdvanceTime(TimeDelta::Millis(1) / kDefaultFps); + EXPECT_TRUE(fake_encoder_.GetAndResetLastRateControlSettings().has_value()); + AdvanceTime(TimeDelta::Seconds(1) / kDefaultFps); - // Not called on second frame. - EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate)) - .Times(0); + // VideoBitrateAllocation not updated on second frame. video_source_.IncomingCapturedFrame( - CreateFrame(rtc::TimeMillis(), codec_width_, codec_height_)); - WaitForEncodedFrame(rtc::TimeMillis()); - fake_clock_.AdvanceTime(TimeDelta::Millis(1) / kDefaultFps); - - // Called after a process interval. - EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(expected_bitrate)) - .Times(1); - const int64_t start_time_ms = rtc::TimeMillis(); - while (rtc::TimeMillis() - start_time_ms < kProcessIntervalMs) { + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1); + AdvanceTime(TimeDelta::Millis(1) / kDefaultFps); + + // VideoBitrateAllocation updated after a process interval. + const int64_t start_time_ms = CurrentTimeMs(); + while (CurrentTimeMs() - start_time_ms < 5 * kProcessIntervalMs) { video_source_.IncomingCapturedFrame( - CreateFrame(rtc::TimeMillis(), codec_width_, codec_height_)); - WaitForEncodedFrame(rtc::TimeMillis()); - fake_clock_.AdvanceTime(TimeDelta::Millis(1) / kDefaultFps); + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + AdvanceTime(TimeDelta::Millis(1) / kDefaultFps); } - - // Since rates are unchanged, encoder should not be reconfigured. - EXPECT_FALSE(fake_encoder_.GetAndResetLastRateControlSettings().has_value()); + EXPECT_GT(sink_.number_of_bitrate_allocations(), 3); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, TemporalLayersNotDisabledIfSupported) { - // 2 TLs configured, temporal layers supported by encoder. - const int kNumTemporalLayers = 2; - ResetEncoder("VP8", 1, kNumTemporalLayers, 1, /*screenshare*/ false); - fake_encoder_.SetTemporalLayersSupported(0, true); +TEST_F(VideoStreamEncoderTest, ReportsVideoLayersAllocationForVP8Simulcast) { + ResetEncoder("VP8", /*num_streams*/ 2, 1, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); - // Bitrate allocated across temporal layers. - const int kTl0Bps = kTargetBitrateBps * - webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( - kNumTemporalLayers, /*temporal_id*/ 0, - /*base_heavy_tl3_alloc*/ false); - const int kTl1Bps = kTargetBitrateBps * - webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( - kNumTemporalLayers, /*temporal_id*/ 1, - /*base_heavy_tl3_alloc*/ false); - VideoBitrateAllocation expected_bitrate; - expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kTl0Bps); - expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 1, kTl1Bps - kTl0Bps); + const int kDefaultFps = 30; - VerifyAllocatedBitrate(expected_bitrate); - video_stream_encoder_->Stop(); -} + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); -TEST_F(VideoStreamEncoderTest, TemporalLayersDisabledIfNotSupported) { - // 2 TLs configured, temporal layers not supported by encoder. - ResetEncoder("VP8", 1, /*num_temporal_layers*/ 2, 1, /*screenshare*/ false); - fake_encoder_.SetTemporalLayersSupported(0, false); + video_source_.IncomingCapturedFrame( + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + // kLowTargetBitrateBps is only enough for one spatial layer. + ASSERT_EQ(last_layer_allocation.active_spatial_layers.size(), 1u); - // Temporal layers not supported by the encoder. - // Total bitrate should be at ti:0. - VideoBitrateAllocation expected_bitrate; - expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kTargetBitrateBps); + VideoBitrateAllocation bitrate_allocation = + fake_encoder_.GetAndResetLastRateControlSettings()->target_bitrate; + // Check that encoder has been updated too, not just allocation observer. + EXPECT_EQ(bitrate_allocation.get_sum_bps(), kLowTargetBitrateBps); + AdvanceTime(TimeDelta::Seconds(1) / kDefaultFps); - VerifyAllocatedBitrate(expected_bitrate); + // VideoLayersAllocation might be updated if frame rate changes. + int number_of_layers_allocation = 1; + const int64_t start_time_ms = CurrentTimeMs(); + while (CurrentTimeMs() - start_time_ms < 10 * kProcessIntervalMs) { + video_source_.IncomingCapturedFrame( + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + if (number_of_layers_allocation != sink_.number_of_layers_allocations()) { + number_of_layers_allocation = sink_.number_of_layers_allocations(); + VideoLayersAllocation new_allocation = + sink_.GetLastVideoLayersAllocation(); + ASSERT_EQ(new_allocation.active_spatial_layers.size(), 1u); + EXPECT_NE(new_allocation.active_spatial_layers[0].frame_rate_fps, + last_layer_allocation.active_spatial_layers[0].frame_rate_fps); + EXPECT_EQ(new_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer); + last_layer_allocation = new_allocation; + } + } + EXPECT_LE(sink_.number_of_layers_allocations(), 3); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, VerifyBitrateAllocationForTwoStreams) { - // 2 TLs configured, temporal layers only supported for first stream. - ResetEncoder("VP8", 2, /*num_temporal_layers*/ 2, 1, /*screenshare*/ false); - fake_encoder_.SetTemporalLayersSupported(0, true); - fake_encoder_.SetTemporalLayersSupported(1, false); +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForVP8WithMiddleLayerDisabled) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP8, + /* num_streams*/ 3, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + VideoEncoder::GetDefaultVp8Settings()); + for (auto& layer : video_encoder_config.simulcast_layers) { + layer.num_temporal_layers = 2; + } + // Simulcast layers are used for enabling/disabling streams. + video_encoder_config.simulcast_layers[0].active = true; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = true; + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); - const int kS0Bps = 150000; - const int kS0Tl0Bps = - kS0Bps * - webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( - /*num_layers*/ 2, /*temporal_id*/ 0, /*base_heavy_tl3_alloc*/ false); - const int kS0Tl1Bps = - kS0Bps * - webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( - /*num_layers*/ 2, /*temporal_id*/ 1, /*base_heavy_tl3_alloc*/ false); - const int kS1Bps = kTargetBitrateBps - kS0Tl1Bps; - // Temporal layers not supported by si:1. - VideoBitrateAllocation expected_bitrate; - expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kS0Tl0Bps); - expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 1, kS0Tl1Bps - kS0Tl0Bps); - expected_bitrate.SetBitrate(/*si*/ 1, /*ti*/ 0, kS1Bps); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - VerifyAllocatedBitrate(expected_bitrate); + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_LT(last_layer_allocation.active_spatial_layers[0].width, 1280); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 1280); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, OveruseDetectorUpdatedOnReconfigureAndAdaption) { - const int kFrameWidth = 1280; - const int kFrameHeight = 720; - const int kFramerate = 24; +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForVP8WithMiddleAndHighestLayerDisabled) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP8, + /* num_streams*/ 3, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + VideoEncoder::GetDefaultVp8Settings()); + for (auto& layer : video_encoder_config.simulcast_layers) { + layer.num_temporal_layers = 2; + } + // Simulcast layers are used for enabling/disabling streams. + video_encoder_config.simulcast_layers[0].active = true; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = false; + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - test::FrameForwarder source; - video_stream_encoder_->SetSource( - &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); - // Insert a single frame, triggering initial configuration. - source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight)); - video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); - EXPECT_EQ( - video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(), - kDefaultFramerate); + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(1)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_LT(last_layer_allocation.active_spatial_layers[0].width, 1280); - // Trigger reconfigure encoder (without resetting the entire instance). + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForV9SvcWithTemporalLayerSupport) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); VideoEncoderConfig video_encoder_config; - video_encoder_config.codec_type = kVideoCodecVP8; - video_encoder_config.max_bitrate_bps = kTargetBitrateBps; - video_encoder_config.number_of_streams = 1; - video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(1, kFramerate); - video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9, + /* num_streams*/ 1, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 2; + vp9_settings.numberOfTemporalLayers = 2; + vp9_settings.interLayerPred = InterLayerPredMode::kOn; + vp9_settings.automaticResizeOn = false; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 640); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].height, 360); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].frame_rate_fps, 30); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 1280); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].height, 720); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].frame_rate_fps, 30); + + // Since full SVC is used, expect the top layer to utilize the full target + // rate. + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer[1], + DataRate::BitsPerSec(kTargetBitrateBps)); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForV9SvcWithoutTemporalLayerSupport) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, false); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, false); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9, + /* num_streams*/ 1, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 2; + vp9_settings.numberOfTemporalLayers = 2; + vp9_settings.interLayerPred = InterLayerPredMode::kOn; + vp9_settings.automaticResizeOn = false; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(1)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer, + SizeIs(1)); + // Since full SVC is used, expect the top layer to utilize the full target + // rate. + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer[0], + DataRate::BitsPerSec(kTargetBitrateBps)); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForVP9KSvcWithTemporalLayerSupport) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9, + /* num_streams*/ 1, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 2; + vp9_settings.numberOfTemporalLayers = 2; + vp9_settings.interLayerPred = InterLayerPredMode::kOnKeyPic; + vp9_settings.automaticResizeOn = false; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + // Since KSVC is, spatial layers are independend except on key frames. + EXPECT_LT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer[1], + DataRate::BitsPerSec(kTargetBitrateBps)); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForV9SvcWithLowestLayerDisabled) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9, + /* num_streams*/ 1, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + vp9_settings.numberOfTemporalLayers = 2; + vp9_settings.interLayerPred = InterLayerPredMode::kOn; + vp9_settings.automaticResizeOn = false; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + // Simulcast layers are used for enabling/disabling streams. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = true; + video_encoder_config.simulcast_layers[2].active = true; + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 640); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].spatial_id, 0); + + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 1280); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].spatial_id, 1); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + // Since full SVC is used, expect the top layer to utilize the full target + // rate. + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer[1], + DataRate::BitsPerSec(kTargetBitrateBps)); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForV9SvcWithHighestLayerDisabled) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9, + /* num_streams*/ 1, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + vp9_settings.numberOfTemporalLayers = 2; + vp9_settings.interLayerPred = InterLayerPredMode::kOn; + vp9_settings.automaticResizeOn = false; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + // Simulcast layers are used for enabling/disabling streams. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[2].active = false; + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(2)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 320); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].spatial_id, 0); + + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].width, 640); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[1].spatial_id, 1); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsVideoLayersAllocationForV9SvcWithAllButHighestLayerDisabled) { + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx=*/0, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 1, true); + fake_encoder_.SetTemporalLayersSupported(/*spatial_idx*/ 2, true); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(VideoCodecType::kVideoCodecVP9, + /* num_streams*/ 1, &video_encoder_config); + video_encoder_config.max_bitrate_bps = 2 * kTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + vp9_settings.numberOfTemporalLayers = 2; + vp9_settings.interLayerPred = InterLayerPredMode::kOn; + vp9_settings.automaticResizeOn = false; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + // Simulcast layers are used for enabling/disabling streams. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = true; + ConfigureEncoder(std::move(video_encoder_config), + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(1)); + EXPECT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(2)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 1280); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].spatial_id, 0); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer[1], + DataRate::BitsPerSec(kTargetBitrateBps)); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, ReportsVideoLayersAllocationForH264) { + ResetEncoder("H264", 1, 1, 1, false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame(CreateFrame(CurrentTimeMs(), 1280, 720)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + + ASSERT_THAT(last_layer_allocation.active_spatial_layers, SizeIs(1)); + ASSERT_THAT(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer, + SizeIs(1)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer[0], + DataRate::BitsPerSec(kTargetBitrateBps)); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].width, 1280); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].height, 720); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0].frame_rate_fps, 30); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsUpdatedVideoLayersAllocationWhenBweChanges) { + ResetEncoder("VP8", /*num_streams*/ 2, 1, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame( + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + VideoLayersAllocation last_layer_allocation = + sink_.GetLastVideoLayersAllocation(); + // kLowTargetBitrateBps is only enough for one spatial layer. + ASSERT_EQ(last_layer_allocation.active_spatial_layers.size(), 1u); + EXPECT_EQ(last_layer_allocation.active_spatial_layers[0] + .target_bitrate_per_temporal_layer[0], + DataRate::BitsPerSec(kLowTargetBitrateBps)); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame( + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + + EXPECT_EQ(sink_.number_of_layers_allocations(), 2); + last_layer_allocation = sink_.GetLastVideoLayersAllocation(); + ASSERT_EQ(last_layer_allocation.active_spatial_layers.size(), 2u); + EXPECT_GT(last_layer_allocation.active_spatial_layers[1] + .target_bitrate_per_temporal_layer[0], + DataRate::Zero()); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ReportsUpdatedVideoLayersAllocationWhenResolutionChanges) { + ResetEncoder("VP8", /*num_streams*/ 2, 1, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoLayersAllocation); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); + + video_source_.IncomingCapturedFrame( + CreateFrame(CurrentTimeMs(), codec_width_, codec_height_)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 1); + ASSERT_THAT(sink_.GetLastVideoLayersAllocation().active_spatial_layers, + SizeIs(2)); + EXPECT_EQ(sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].width, + codec_width_); + EXPECT_EQ( + sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].height, + codec_height_); + + video_source_.IncomingCapturedFrame( + CreateFrame(CurrentTimeMs(), codec_width_ / 2, codec_height_ / 2)); + WaitForEncodedFrame(CurrentTimeMs()); + EXPECT_EQ(sink_.number_of_layers_allocations(), 2); + ASSERT_THAT(sink_.GetLastVideoLayersAllocation().active_spatial_layers, + SizeIs(2)); + EXPECT_EQ(sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].width, + codec_width_ / 2); + EXPECT_EQ( + sink_.GetLastVideoLayersAllocation().active_spatial_layers[1].height, + codec_height_ / 2); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, TemporalLayersNotDisabledIfSupported) { + // 2 TLs configured, temporal layers supported by encoder. + const int kNumTemporalLayers = 2; + ResetEncoder("VP8", 1, kNumTemporalLayers, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocation); + fake_encoder_.SetTemporalLayersSupported(0, true); + + // Bitrate allocated across temporal layers. + const int kTl0Bps = kTargetBitrateBps * + webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( + kNumTemporalLayers, /*temporal_id*/ 0, + /*base_heavy_tl3_alloc*/ false); + const int kTl1Bps = kTargetBitrateBps * + webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( + kNumTemporalLayers, /*temporal_id*/ 1, + /*base_heavy_tl3_alloc*/ false); + VideoBitrateAllocation expected_bitrate; + expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kTl0Bps); + expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 1, kTl1Bps - kTl0Bps); + + VerifyAllocatedBitrate(expected_bitrate); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, TemporalLayersDisabledIfNotSupported) { + // 2 TLs configured, temporal layers not supported by encoder. + ResetEncoder("VP8", 1, /*num_temporal_layers*/ 2, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocation); + fake_encoder_.SetTemporalLayersSupported(0, false); + + // Temporal layers not supported by the encoder. + // Total bitrate should be at ti:0. + VideoBitrateAllocation expected_bitrate; + expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kTargetBitrateBps); + + VerifyAllocatedBitrate(expected_bitrate); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, VerifyBitrateAllocationForTwoStreams) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Video-QualityScalerSettings/" + "initial_bitrate_interval_ms:1000,initial_bitrate_factor:0.2/"); + // Reset encoder for field trials to take effect. + ConfigureEncoder(video_encoder_config_.Copy()); + + // 2 TLs configured, temporal layers only supported for first stream. + ResetEncoder("VP8", 2, /*num_temporal_layers*/ 2, 1, /*screenshare*/ false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocation); + fake_encoder_.SetTemporalLayersSupported(0, true); + fake_encoder_.SetTemporalLayersSupported(1, false); + + const int kS0Bps = 150000; + const int kS0Tl0Bps = + kS0Bps * + webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( + /*num_layers*/ 2, /*temporal_id*/ 0, /*base_heavy_tl3_alloc*/ false); + const int kS0Tl1Bps = + kS0Bps * + webrtc::SimulcastRateAllocator::GetTemporalRateAllocation( + /*num_layers*/ 2, /*temporal_id*/ 1, /*base_heavy_tl3_alloc*/ false); + const int kS1Bps = kTargetBitrateBps - kS0Tl1Bps; + // Temporal layers not supported by si:1. + VideoBitrateAllocation expected_bitrate; + expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 0, kS0Tl0Bps); + expected_bitrate.SetBitrate(/*si*/ 0, /*ti*/ 1, kS0Tl1Bps - kS0Tl0Bps); + expected_bitrate.SetBitrate(/*si*/ 1, /*ti*/ 0, kS1Bps); + + VerifyAllocatedBitrate(expected_bitrate); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, OveruseDetectorUpdatedOnReconfigureAndAdaption) { + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + const int kFramerate = 24; + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + test::FrameForwarder source; + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); + + // Insert a single frame, triggering initial configuration. + source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight)); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + EXPECT_EQ( + video_stream_encoder_->overuse_detector_proxy_->GetLastTargetFramerate(), + kDefaultFramerate); + + // Trigger reconfigure encoder (without resetting the entire instance). + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); + video_encoder_config.simulcast_layers[0].max_framerate = kFramerate; + video_encoder_config.max_bitrate_bps = kTargetBitrateBps; + video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), kMaxPayloadLength); video_stream_encoder_->WaitUntilTaskQueueIsIdle(); @@ -3908,13 +5318,11 @@ TEST_F(VideoStreamEncoderTest, // Trigger initial configuration. VideoEncoderConfig video_encoder_config; - video_encoder_config.codec_type = kVideoCodecVP8; + test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); + video_encoder_config.simulcast_layers[0].max_framerate = kLowFramerate; video_encoder_config.max_bitrate_bps = kTargetBitrateBps; - video_encoder_config.number_of_streams = 1; - video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(1, kLowFramerate); source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight)); - video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), kMaxPayloadLength); video_stream_encoder_->WaitUntilTaskQueueIsIdle(); @@ -3934,8 +5342,7 @@ TEST_F(VideoStreamEncoderTest, // Reconfigure the encoder with a new (higher max framerate), max fps should // still respect the adaptation. - video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(1, kHighFramerate); + video_encoder_config.simulcast_layers[0].max_framerate = kHighFramerate; source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight)); video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), kMaxPayloadLength); @@ -3974,11 +5381,9 @@ TEST_F(VideoStreamEncoderTest, // Trigger initial configuration. VideoEncoderConfig video_encoder_config; - video_encoder_config.codec_type = kVideoCodecVP8; + test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); + video_encoder_config.simulcast_layers[0].max_framerate = kFramerate; video_encoder_config.max_bitrate_bps = kTargetBitrateBps; - video_encoder_config.number_of_streams = 1; - video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(1, kFramerate); source.IncomingCapturedFrame(CreateFrame(1, kFrameWidth, kFrameHeight)); video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), kMaxPayloadLength); @@ -4157,217 +5562,834 @@ TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenBweDrops) { video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) { +TEST_F(VideoStreamEncoderTest, + InitialFrameDropNotReactivatedWhenBweDropsWhenScalingDisabled) { webrtc::test::ScopedFieldTrials field_trials( - "WebRTC-Video-QualityRampupSettings/min_pixels:1,min_duration_ms:2000/"); + "WebRTC-Video-QualityScalerSettings/" + "initial_bitrate_interval_ms:1000,initial_bitrate_factor:0.2/"); + fake_encoder_.SetQualityScaling(false); + ConfigureEncoder(video_encoder_config_.Copy()); + const int kNotTooLowBitrateForFrameSizeBps = kTargetBitrateBps * 0.2; + const int kTooLowBitrateForFrameSizeBps = kTargetBitrateBps * 0.19; + const int kWidth = 640; + const int kHeight = 360; - // Reset encoder for field trials to take effect. - VideoEncoderConfig config = video_encoder_config_.Copy(); - config.max_bitrate_bps = kTargetBitrateBps; - DataRate max_bitrate = DataRate::BitsPerSec(config.max_bitrate_bps); - ConfigureEncoder(std::move(config)); - fake_encoder_.SetQp(kQpLow); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + // Frame should not be dropped. + WaitForEncodedFrame(1); - // Enable MAINTAIN_FRAMERATE preference. - AdaptingFrameForwarder source; - source.set_adaptation_enabled(true); - video_stream_encoder_->SetSource(&source, - DegradationPreference::MAINTAIN_FRAMERATE); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps), + DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps), + DataRate::BitsPerSec(kNotTooLowBitrateForFrameSizeBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); + // Frame should not be dropped. + WaitForEncodedFrame(2); - // Start at low bitrate. - const int kLowBitrateBps = 200000; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kLowBitrateBps), - DataRate::BitsPerSec(kLowBitrateBps), - DataRate::BitsPerSec(kLowBitrateBps), 0, 0, 0); + DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), + DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), + DataRate::BitsPerSec(kTooLowBitrateForFrameSizeBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight)); + // Not dropped since quality scaling is disabled. + WaitForEncodedFrame(3); - // Expect first frame to be dropped and resolution to be limited. - const int kWidth = 1280; - const int kHeight = 720; - const int64_t kFrameIntervalMs = 100; - int64_t timestamp_ms = kFrameIntervalMs; - source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); - ExpectDroppedFrame(); - EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < kWidth * kHeight, - 5000); + // Expect the sink_wants to specify a scaled frame. + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT(video_source_.sink_wants(), ResolutionMax()); - // Increase bitrate to encoder max. + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenLayersChange) { + const int kLowTargetBitrateBps = 400000; + // Set simulcast. + ResetEncoder("VP8", 3, 1, 1, false); + fake_encoder_.SetQualityScaling(true); + const int kWidth = 1280; + const int kHeight = 720; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - max_bitrate, max_bitrate, max_bitrate, 0, 0, 0); + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + // Frame should not be dropped. + WaitForEncodedFrame(1); - // Insert frames and advance |min_duration_ms|. - for (size_t i = 1; i <= 10; i++) { - timestamp_ms += kFrameIntervalMs; - source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); - WaitForEncodedFrame(timestamp_ms); + // Trigger QVGA "singlecast" + // Update the config. + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(PayloadStringToCodecType("VP8"), 3, + &video_encoder_config); + video_encoder_config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + for (auto& layer : video_encoder_config.simulcast_layers) { + layer.num_temporal_layers = 1; + layer.max_framerate = kDefaultFramerate; } - EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_LT(source.sink_wants().max_pixel_count, kWidth * kHeight); + video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; - fake_clock_.AdvanceTime(TimeDelta::Millis(2000)); + video_encoder_config.simulcast_layers[0].active = true; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = false; - // Insert frame should trigger high BW and release quality limitation. - timestamp_ms += kFrameIntervalMs; - source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); - WaitForEncodedFrame(timestamp_ms); - // The ramp-up code involves the adaptation queue, give it time to execute. - // TODO(hbos): Can we await an appropriate event instead? - video_stream_encoder_->WaitUntilAdaptationTaskQueueIsIdle(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - // Frame should not be adapted. - timestamp_ms += kFrameIntervalMs; - source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); - WaitForEncodedFrame(kWidth, kHeight); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); + // Frame should not be dropped. + WaitForEncodedFrame(2); + + // Trigger HD "singlecast" + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = true; + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight)); + // Frame should be dropped because of initial frame drop. + ExpectDroppedFrame(); + + // Expect the sink_wants to specify a scaled frame. + EXPECT_TRUE_WAIT( + video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, - ResolutionNotAdaptedForTooSmallFrame_MaintainFramerateMode) { - const int kTooSmallWidth = 10; - const int kTooSmallHeight = 10; +TEST_F(VideoStreamEncoderTest, InitialFrameDropActivatesWhenSVCLayersChange) { + const int kLowTargetBitrateBps = 400000; + // Set simulcast. + ResetEncoder("VP9", 1, 1, 3, false); + fake_encoder_.SetQualityScaling(true); + const int kWidth = 1280; + const int kHeight = 720; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth, kHeight)); + // Frame should not be dropped. + WaitForEncodedFrame(1); - // Enable MAINTAIN_FRAMERATE preference, no initial limitation. - test::FrameForwarder source; - video_stream_encoder_->SetSource( - &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); - VerifyNoLimitation(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); + // Trigger QVGA "singlecast" + // Update the config. + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1, + &video_encoder_config); + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + // Since only one layer is active - automatic resize should be enabled. + vp9_settings.automaticResizeOn = true; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + // Currently simulcast layers |active| flags are used to inidicate + // which SVC layers are active. + video_encoder_config.simulcast_layers.resize(3); + + video_encoder_config.simulcast_layers[0].active = true; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = false; - // Trigger adapt down, too small frame, expect no change. - source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight)); - WaitForEncodedFrame(1); - video_stream_encoder_->TriggerCpuOveruse(); - VerifyFpsMaxResolutionMax(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); - EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth, kHeight)); + // Frame should not be dropped. + WaitForEncodedFrame(2); + + // Trigger HD "singlecast" + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = true; + + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight)); + // Frame should be dropped because of initial frame drop. + ExpectDroppedFrame(); + // Expect the sink_wants to specify a scaled frame. + EXPECT_TRUE_WAIT( + video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, - ResolutionNotAdaptedForTooSmallFrame_BalancedMode) { - const int kTooSmallWidth = 10; - const int kTooSmallHeight = 10; - const int kFpsLimit = 7; - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + EncoderMaxAndMinBitratesUsedIfMiddleStreamActive) { + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits270p( + 480 * 270, 34 * 1000, 12 * 1000, 1234 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits360p( + 640 * 360, 43 * 1000, 21 * 1000, 2345 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p( + 1280 * 720, 54 * 1000, 31 * 1000, 2500 * 1000); + fake_encoder_.SetResolutionBitrateLimits( + {kEncoderLimits270p, kEncoderLimits360p, kEncoderLimits720p}); - // Enable BALANCED preference, no initial limitation. - test::FrameForwarder source; - video_stream_encoder_->SetSource(&source, - webrtc::DegradationPreference::BALANCED); - VerifyNoLimitation(source.sink_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1, + &video_encoder_config); + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + // Since only one layer is active - automatic resize should be enabled. + vp9_settings.automaticResizeOn = true; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + // Simulcast layers are used to indicate which spatial layers are active. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = true; + video_encoder_config.simulcast_layers[2].active = false; - // Trigger adapt down, expect limited framerate. - source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight)); - WaitForEncodedFrame(1); - video_stream_encoder_->TriggerQualityLow(); - VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - // Trigger adapt down, too small frame, expect no change. - source.IncomingCapturedFrame(CreateFrame(2, kTooSmallWidth, kTooSmallHeight)); - WaitForEncodedFrame(2); - video_stream_encoder_->TriggerQualityLow(); - VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + // The encoder bitrate limits for 360p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, + VideoCodecType::kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(640, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(360, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_EQ(static_cast(kEncoderLimits360p.min_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits360p.max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); + + // The encoder bitrate limits for 270p should be used. + video_source_.IncomingCapturedFrame(CreateFrame(2, 960, 540)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, + VideoCodecType::kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(480, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(270, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_EQ(static_cast(kEncoderLimits270p.min_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].minBitrate * 1000); + EXPECT_EQ(static_cast(kEncoderLimits270p.max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, FailingInitEncodeDoesntCauseCrash) { - fake_encoder_.ForceInitEncodeFailure(true); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - ResetEncoder("VP8", 2, 1, 1, false); - const int kFrameWidth = 1280; - const int kFrameHeight = 720; - video_source_.IncomingCapturedFrame( - CreateFrame(1, kFrameWidth, kFrameHeight)); - ExpectDroppedFrame(); +TEST_F(VideoStreamEncoderTest, + DefaultMaxAndMinBitratesUsedIfMiddleStreamActive) { + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1, + &video_encoder_config); + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + // Since only one layer is active - automatic resize should be enabled. + vp9_settings.automaticResizeOn = true; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + // Simulcast layers are used to indicate which spatial layers are active. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = true; + video_encoder_config.simulcast_layers[2].active = false; + + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // The default bitrate limits for 360p should be used. + const absl::optional kLimits360p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP9, 640 * 360); + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, + VideoCodecType::kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(640, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(360, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_EQ(static_cast(kLimits360p->min_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].minBitrate * 1000); + EXPECT_EQ(static_cast(kLimits360p->max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); + + // The default bitrate limits for 270p should be used. + const absl::optional kLimits270p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP9, 480 * 270); + video_source_.IncomingCapturedFrame(CreateFrame(2, 960, 540)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, + VideoCodecType::kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(480, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(270, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_EQ(static_cast(kLimits270p->min_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].minBitrate * 1000); + EXPECT_EQ(static_cast(kLimits270p->max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); + video_stream_encoder_->Stop(); } -// TODO(sprang): Extend this with fps throttling and any "balanced" extensions. -TEST_F(VideoStreamEncoderTest, - AdaptsResolutionOnOveruse_MaintainFramerateMode) { - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), - DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); +TEST_F(VideoStreamEncoderTest, DefaultMaxAndMinBitratesNotUsedIfDisabled) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-DefaultBitrateLimitsKillSwitch/Enabled/"); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1, + &video_encoder_config); + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + // Since only one layer is active - automatic resize should be enabled. + vp9_settings.automaticResizeOn = true; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + // Simulcast layers are used to indicate which spatial layers are active. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[0].active = false; + video_encoder_config.simulcast_layers[1].active = true; + video_encoder_config.simulcast_layers[2].active = false; - const int kFrameWidth = 1280; - const int kFrameHeight = 720; - // Enabled default VideoAdapter downscaling. First step is 3/4, not 3/5 as - // requested by - // VideoStreamEncoder::VideoSourceProxy::RequestResolutionLowerThan(). - video_source_.set_adaptation_enabled(true); + // Reset encoder for field trials to take effect. + ConfigureEncoder(video_encoder_config.Copy()); - video_source_.IncomingCapturedFrame( - CreateFrame(1 * kFrameIntervalMs, kFrameWidth, kFrameHeight)); - WaitForEncodedFrame(kFrameWidth, kFrameHeight); + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - // Trigger CPU overuse, downscale by 3/4. - video_stream_encoder_->TriggerCpuOveruse(); - video_source_.IncomingCapturedFrame( - CreateFrame(2 * kFrameIntervalMs, kFrameWidth, kFrameHeight)); - WaitForEncodedFrame((kFrameWidth * 3) / 4, (kFrameHeight * 3) / 4); + // The default bitrate limits for 360p should not be used. + const absl::optional kLimits360p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP9, 640 * 360); + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 2); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(640, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(360, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_NE(static_cast(kLimits360p->max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); - // Trigger CPU normal use, return to original resolution. - video_stream_encoder_->TriggerCpuUnderuse(); - video_source_.IncomingCapturedFrame( - CreateFrame(3 * kFrameIntervalMs, kFrameWidth, kFrameHeight)); - WaitForEncodedFrame(kFrameWidth, kFrameHeight); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, SinglecastBitrateLimitsNotUsedForOneStream) { + ResetEncoder("VP9", /*num_streams=*/1, /*num_temporal_layers=*/1, + /*num_spatial_layers=*/1, /*screenshare=*/false); + + // The default singlecast bitrate limits for 720p should not be used. + const absl::optional kLimits720p = + EncoderInfoSettings::GetDefaultSinglecastBitrateLimitsForResolution( + kVideoCodecVP9, 1280 * 720); + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, + VideoCodecType::kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 1); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(1280, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(720, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_NE(static_cast(kLimits720p->max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, - AdaptsFramerateOnOveruse_MaintainResolutionMode) { - const int kFrameWidth = 1280; - const int kFrameHeight = 720; + EncoderMaxAndMinBitratesNotUsedIfLowestStreamActive) { + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits180p( + 320 * 180, 34 * 1000, 12 * 1000, 1234 * 1000); + const VideoEncoder::ResolutionBitrateLimits kEncoderLimits720p( + 1280 * 720, 54 * 1000, 31 * 1000, 2500 * 1000); + fake_encoder_.SetResolutionBitrateLimits( + {kEncoderLimits180p, kEncoderLimits720p}); + + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(PayloadStringToCodecType("VP9"), 1, + &video_encoder_config); + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = 3; + // Since only one layer is active - automatic resize should be enabled. + vp9_settings.automaticResizeOn = true; + video_encoder_config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + video_encoder_config.max_bitrate_bps = kSimulcastTargetBitrateBps; + video_encoder_config.content_type = + VideoEncoderConfig::ContentType::kRealtimeVideo; + // Simulcast layers are used to indicate which spatial layers are active. + video_encoder_config.simulcast_layers.resize(3); + video_encoder_config.simulcast_layers[0].active = true; + video_encoder_config.simulcast_layers[1].active = false; + video_encoder_config.simulcast_layers[2].active = false; + + video_stream_encoder_->ConfigureEncoder(video_encoder_config.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Limits not applied on lowest stream, limits for 180p should not be used. + video_source_.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + EXPECT_FALSE(WaitForFrame(1000)); + EXPECT_EQ(fake_encoder_.video_codec().numberOfSimulcastStreams, 1); + EXPECT_EQ(fake_encoder_.video_codec().codecType, + VideoCodecType::kVideoCodecVP9); + EXPECT_EQ(fake_encoder_.video_codec().VP9()->numberOfSpatialLayers, 3); + EXPECT_TRUE(fake_encoder_.video_codec().spatialLayers[0].active); + EXPECT_EQ(320, fake_encoder_.video_codec().spatialLayers[0].width); + EXPECT_EQ(180, fake_encoder_.video_codec().spatialLayers[0].height); + EXPECT_NE(static_cast(kEncoderLimits180p.min_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].minBitrate * 1000); + EXPECT_NE(static_cast(kEncoderLimits180p.max_bitrate_bps), + fake_encoder_.video_codec().spatialLayers[0].maxBitrate * 1000); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + InitialFrameDropActivatesWhenResolutionIncreases) { + const int kWidth = 640; + const int kHeight = 360; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - video_stream_encoder_->SetSource( - &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); - video_source_.set_adaptation_enabled(true); + video_source_.IncomingCapturedFrame(CreateFrame(1, kWidth / 2, kHeight / 2)); + // Frame should not be dropped. + WaitForEncodedFrame(1); - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame(CreateFrame(2, kWidth / 2, kHeight / 2)); + // Frame should not be dropped, bitrate not too low for frame. + WaitForEncodedFrame(2); - video_source_.IncomingCapturedFrame( - CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); - WaitForEncodedFrame(timestamp_ms); + // Incoming resolution increases. + video_source_.IncomingCapturedFrame(CreateFrame(3, kWidth, kHeight)); + // Expect to drop this frame, bitrate too low for frame. + ExpectDroppedFrame(); - // Try to trigger overuse. No fps estimate available => no effect. - video_stream_encoder_->TriggerCpuOveruse(); + // Expect the sink_wants to specify a scaled frame. + EXPECT_TRUE_WAIT( + video_source_.sink_wants().max_pixel_count < kWidth * kHeight, 5000); + video_stream_encoder_->Stop(); +} - // Insert frames for one second to get a stable estimate. - for (int i = 0; i < max_framerate_; ++i) { - timestamp_ms += kFrameIntervalMs; - video_source_.IncomingCapturedFrame( - CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); - WaitForEncodedFrame(timestamp_ms); - } +TEST_F(VideoStreamEncoderTest, InitialFrameDropIsNotReactivatedWhenAdaptingUp) { + const int kWidth = 640; + const int kHeight = 360; + // So that quality scaling doesn't happen by itself. + fake_encoder_.SetQp(kQpHigh); + + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); + + int timestamp = 1; + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight)); + WaitForEncodedFrame(timestamp); + timestamp += 9000; + // Long pause to disable all first BWE drop logic. + AdvanceTime(TimeDelta::Millis(1000)); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), + DataRate::BitsPerSec(kLowTargetBitrateBps), 0, 0, 0); + source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight)); + // Not dropped frame, as initial frame drop is disabled by now. + WaitForEncodedFrame(timestamp); + timestamp += 9000; + AdvanceTime(TimeDelta::Millis(100)); + + // Quality adaptation down. + video_stream_encoder_->TriggerQualityLow(); + + // Adaptation has an effect. + EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < kWidth * kHeight, + 5000); + + // Frame isn't dropped as initial frame dropper is disabled. + source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight)); + WaitForEncodedFrame(timestamp); + timestamp += 9000; + AdvanceTime(TimeDelta::Millis(100)); + + // Quality adaptation up. + video_stream_encoder_->TriggerQualityHigh(); + + // Adaptation has an effect. + EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count > kWidth * kHeight, + 5000); + + source.IncomingCapturedFrame(CreateFrame(timestamp, kWidth, kHeight)); + // Frame should not be dropped, as initial framedropper is off. + WaitForEncodedFrame(timestamp); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + FrameDroppedWhenResolutionIncreasesAndLinkAllocationIsLow) { + const int kMinStartBps360p = 222000; + fake_encoder_.SetResolutionBitrateLimits( + {VideoEncoder::ResolutionBitrateLimits(320 * 180, 0, 30000, 400000), + VideoEncoder::ResolutionBitrateLimits(640 * 360, kMinStartBps360p, 30000, + 800000)}); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kMinStartBps360p - 1), // target_bitrate + DataRate::BitsPerSec(kMinStartBps360p - 1), // stable_target_bitrate + DataRate::BitsPerSec(kMinStartBps360p - 1), // link_allocation + 0, 0, 0); + // Frame should not be dropped, bitrate not too low for frame. + video_source_.IncomingCapturedFrame(CreateFrame(1, 320, 180)); + WaitForEncodedFrame(1); + + // Incoming resolution increases, initial frame drop activates. + // Frame should be dropped, link allocation too low for frame. + video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360)); + ExpectDroppedFrame(); + + // Expect sink_wants to specify a scaled frame. + EXPECT_TRUE_WAIT(video_source_.sink_wants().max_pixel_count < 640 * 360, + 5000); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + FrameNotDroppedWhenResolutionIncreasesAndLinkAllocationIsHigh) { + const int kMinStartBps360p = 222000; + fake_encoder_.SetResolutionBitrateLimits( + {VideoEncoder::ResolutionBitrateLimits(320 * 180, 0, 30000, 400000), + VideoEncoder::ResolutionBitrateLimits(640 * 360, kMinStartBps360p, 30000, + 800000)}); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kMinStartBps360p - 1), // target_bitrate + DataRate::BitsPerSec(kMinStartBps360p - 1), // stable_target_bitrate + DataRate::BitsPerSec(kMinStartBps360p), // link_allocation + 0, 0, 0); + // Frame should not be dropped, bitrate not too low for frame. + video_source_.IncomingCapturedFrame(CreateFrame(1, 320, 180)); + WaitForEncodedFrame(1); + + // Incoming resolution increases, initial frame drop activates. + // Frame should be dropped, link allocation not too low for frame. + video_source_.IncomingCapturedFrame(CreateFrame(2, 640, 360)); + WaitForEncodedFrame(2); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, RampsUpInQualityWhenBwIsHigh) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Video-QualityRampupSettings/min_pixels:1,min_duration_ms:2000/"); + + // Reset encoder for field trials to take effect. + VideoEncoderConfig config = video_encoder_config_.Copy(); + config.max_bitrate_bps = kTargetBitrateBps; + DataRate max_bitrate = DataRate::BitsPerSec(config.max_bitrate_bps); + ConfigureEncoder(std::move(config)); + fake_encoder_.SetQp(kQpLow); + + // Enable MAINTAIN_FRAMERATE preference. + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); + video_stream_encoder_->SetSource(&source, + DegradationPreference::MAINTAIN_FRAMERATE); + + // Start at low bitrate. + const int kLowBitrateBps = 200000; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kLowBitrateBps), + DataRate::BitsPerSec(kLowBitrateBps), + DataRate::BitsPerSec(kLowBitrateBps), 0, 0, 0); + + // Expect first frame to be dropped and resolution to be limited. + const int kWidth = 1280; + const int kHeight = 720; + const int64_t kFrameIntervalMs = 100; + int64_t timestamp_ms = kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + ExpectDroppedFrame(); + EXPECT_TRUE_WAIT(source.sink_wants().max_pixel_count < kWidth * kHeight, + 5000); + + // Increase bitrate to encoder max. + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + max_bitrate, max_bitrate, max_bitrate, 0, 0, 0); + + // Insert frames and advance |min_duration_ms|. + for (size_t i = 1; i <= 10; i++) { + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + } + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_LT(source.sink_wants().max_pixel_count, kWidth * kHeight); + + AdvanceTime(TimeDelta::Millis(2000)); + + // Insert frame should trigger high BW and release quality limitation. + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + // The ramp-up code involves the adaptation queue, give it time to execute. + // TODO(hbos): Can we await an appropriate event instead? + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + + // Frame should not be adapted. + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(kWidth, kHeight); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + QualityScalerAdaptationsRemovedWhenQualityScalingDisabled) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-Video-QualityScaling/Disabled/"); + AdaptingFrameForwarder source(&time_controller_); + source.set_adaptation_enabled(true); + video_stream_encoder_->SetSource(&source, + DegradationPreference::MAINTAIN_FRAMERATE); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + fake_encoder_.SetQp(kQpHigh + 1); + const int kWidth = 1280; + const int kHeight = 720; + const int64_t kFrameIntervalMs = 100; + int64_t timestamp_ms = kFrameIntervalMs; + for (size_t i = 1; i <= 100; i++) { + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + } + // Wait for QualityScaler, which will wait for 2000*2.5 ms until checking QP + // for the first time. + // TODO(eshr): We should avoid these waits by using threads with simulated + // time. + EXPECT_TRUE_WAIT(stats_proxy_->GetStats().bw_limited_resolution, + 2000 * 2.5 * 2); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT(source.sink_wants(), WantsMaxPixels(Lt(kWidth * kHeight))); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + + // Disable Quality scaling by turning off scaler on the encoder and + // reconfiguring. + fake_encoder_.SetQualityScaling(false); + video_stream_encoder_->ConfigureEncoder(video_encoder_config_.Copy(), + kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + AdvanceTime(TimeDelta::Millis(0)); + // Since we turned off the quality scaler, the adaptations made by it are + // removed. + EXPECT_THAT(source.sink_wants(), ResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ResolutionNotAdaptedForTooSmallFrame_MaintainFramerateMode) { + const int kTooSmallWidth = 10; + const int kTooSmallHeight = 10; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable MAINTAIN_FRAMERATE preference, no initial limitation. + test::FrameForwarder source; + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); + EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants()); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); + + // Trigger adapt down, too small frame, expect no change. + source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight)); + WaitForEncodedFrame(1); + video_stream_encoder_->TriggerCpuOveruse(); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); + EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); + EXPECT_EQ(0, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + ResolutionNotAdaptedForTooSmallFrame_BalancedMode) { + const int kTooSmallWidth = 10; + const int kTooSmallHeight = 10; + const int kFpsLimit = 7; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + // Enable BALANCED preference, no initial limitation. + test::FrameForwarder source; + video_stream_encoder_->SetSource(&source, + webrtc::DegradationPreference::BALANCED); + EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + + // Trigger adapt down, expect limited framerate. + source.IncomingCapturedFrame(CreateFrame(1, kTooSmallWidth, kTooSmallHeight)); + WaitForEncodedFrame(1); + video_stream_encoder_->TriggerQualityLow(); + EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit))); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + // Trigger adapt down, too small frame, expect no change. + source.IncomingCapturedFrame(CreateFrame(2, kTooSmallWidth, kTooSmallHeight)); + WaitForEncodedFrame(2); + video_stream_encoder_->TriggerQualityLow(); + EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit))); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, FailingInitEncodeDoesntCauseCrash) { + fake_encoder_.ForceInitEncodeFailure(true); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + ResetEncoder("VP8", 2, 1, 1, false); + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + video_source_.IncomingCapturedFrame( + CreateFrame(1, kFrameWidth, kFrameHeight)); + ExpectDroppedFrame(); + video_stream_encoder_->Stop(); +} + +// TODO(sprang): Extend this with fps throttling and any "balanced" extensions. +TEST_F(VideoStreamEncoderTest, + AdaptsResolutionOnOveruse_MaintainFramerateMode) { + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + // Enabled default VideoAdapter downscaling. First step is 3/4, not 3/5 as + // requested by + // VideoStreamEncoder::VideoSourceProxy::RequestResolutionLowerThan(). + video_source_.set_adaptation_enabled(true); + + video_source_.IncomingCapturedFrame( + CreateFrame(1 * kFrameIntervalMs, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(kFrameWidth, kFrameHeight); + + // Trigger CPU overuse, downscale by 3/4. + video_stream_encoder_->TriggerCpuOveruse(); + video_source_.IncomingCapturedFrame( + CreateFrame(2 * kFrameIntervalMs, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame((kFrameWidth * 3) / 4, (kFrameHeight * 3) / 4); + + // Trigger CPU normal use, return to original resolution. + video_stream_encoder_->TriggerCpuUnderuse(); + video_source_.IncomingCapturedFrame( + CreateFrame(3 * kFrameIntervalMs, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(kFrameWidth, kFrameHeight); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + AdaptsFramerateOnOveruse_MaintainResolutionMode) { + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + video_stream_encoder_->SetSource( + &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); + video_source_.set_adaptation_enabled(true); + + int64_t timestamp_ms = CurrentTimeMs(); + + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(timestamp_ms); + + // Try to trigger overuse. No fps estimate available => no effect. + video_stream_encoder_->TriggerCpuOveruse(); + + // Insert frames for one second to get a stable estimate. + for (int i = 0; i < max_framerate_; ++i) { + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(timestamp_ms); + } // Trigger CPU overuse, reduce framerate by 2/3. video_stream_encoder_->TriggerCpuOveruse(); @@ -4456,7 +6478,7 @@ TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) { &video_source_, webrtc::DegradationPreference::MAINTAIN_RESOLUTION); video_source_.set_adaptation_enabled(true); - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + int64_t timestamp_ms = CurrentTimeMs(); // Trigger overuse as much as we can. rtc::VideoSinkWants last_wants; @@ -4471,14 +6493,15 @@ TEST_F(VideoStreamEncoderTest, DoesntAdaptDownPastMinFramerate) { sink_.WaitForEncodedFrame(timestamp_ms); } timestamp_ms += kFrameIntervalMs; - fake_clock_.AdvanceTime(TimeDelta::Millis(kFrameIntervalMs)); + AdvanceTime(TimeDelta::Millis(kFrameIntervalMs)); } // ...and then try to adapt again. video_stream_encoder_->TriggerCpuOveruse(); } while (video_source_.sink_wants().max_framerate_fps < last_wants.max_framerate_fps); - VerifyFpsEqResolutionMax(video_source_.sink_wants(), kMinFramerateFps); + EXPECT_THAT(video_source_.sink_wants(), + FpsMatchesResolutionMax(Eq(kMinFramerateFps))); video_stream_encoder_->Stop(); } @@ -4495,14 +6518,14 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable BALANCED preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource(&source, webrtc::DegradationPreference::BALANCED); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(0, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4512,7 +6535,8 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4522,7 +6546,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4532,7 +6556,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4542,7 +6566,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4552,7 +6576,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(5, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4562,7 +6586,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(6, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4572,7 +6596,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); rtc::VideoSinkWants last_wants = source.sink_wants(); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); @@ -4583,17 +6607,17 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionEq(source.sink_wants(), last_wants); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(last_wants)); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(7, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger adapt down, expect expect increased fps (320x180@10fps). + // Trigger adapt up, expect increased fps (320x180@10fps). video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsGtResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(8, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4603,7 +6627,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(9, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4613,7 +6637,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsGtResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(10, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4623,7 +6647,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(11, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4633,7 +6657,9 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMax()); + EXPECT_EQ(source.sink_wants().max_pixel_count, + source.last_wants().max_pixel_count); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(12, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4643,7 +6669,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(13, stats_proxy_->GetStats().number_of_quality_adapt_changes); @@ -4653,15 +6679,15 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect no change. video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_EQ(14, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); @@ -4678,14 +6704,14 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) { DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable BALANCED preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource(&source, webrtc::DegradationPreference::BALANCED); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); @@ -4698,7 +6724,8 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) { timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), kWidth * kHeight); + EXPECT_THAT(source.sink_wants(), + FpsMaxResolutionMatches(Lt(kWidth * kHeight))); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); @@ -4711,7 +6738,7 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) { timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionLt(source.last_wants())); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); @@ -4724,59 +6751,77 @@ TEST_F(VideoStreamEncoderTest, AdaptWithTwoReasonsAndDifferentOrder_Framerate) { timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsLtResolutionEq(source.sink_wants(), source.last_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_THAT(source.sink_wants(), FpsLtResolutionEq(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, expect increased fps (640x360@30fps). - video_stream_encoder_->TriggerCpuUnderuse(); + // Trigger cpu adapt up, expect no change since QP is most limited. + { + // Store current sink wants since we expect no change and if there is no + // change then last_wants() is not updated. + auto previous_sink_wants = source.sink_wants(); + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants)); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + } + + // Trigger quality adapt up, expect increased fps (640x360@30fps). + video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionEq(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsGtResolutionEq(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); - EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger quality adapt up, expect upscaled resolution (960x540@30fps). + // Trigger quality adapt up and Cpu adapt up since both are most limited, + // expect increased resolution (960x540@30fps). video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(3, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, expect no restriction (1280x720fps@30fps). + // Trigger quality adapt up and Cpu adapt up since both are most limited, + // expect no restriction (1280x720fps@30fps). + video_stream_encoder_->TriggerQualityHigh(); video_stream_encoder_->TriggerCpuUnderuse(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionGt(source.sink_wants(), source.last_wants()); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionGt(source.last_wants())); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect no change. video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_EQ(4, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(4, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); } @@ -4794,14 +6839,14 @@ TEST_F(VideoStreamEncoderTest, DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Enable BALANCED preference, no initial limitation. - AdaptingFrameForwarder source; + AdaptingFrameForwarder source(&time_controller_); source.set_adaptation_enabled(true); video_stream_encoder_->SetSource(&source, webrtc::DegradationPreference::BALANCED); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(kWidth, kHeight); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); @@ -4814,7 +6859,7 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionMax(source.sink_wants(), kFpsLimit); + EXPECT_THAT(source.sink_wants(), FpsMatchesResolutionMax(Eq(kFpsLimit))); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); @@ -4827,45 +6872,60 @@ TEST_F(VideoStreamEncoderTest, timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionLt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionLt(source.last_wants())); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); - EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger cpu adapt up, expect upscaled resolution (640x360@15fps). - video_stream_encoder_->TriggerCpuUnderuse(); + // Trigger cpu adapt up, expect no change because quality is most limited. + { + auto previous_sink_wants = source.sink_wants(); + // Store current sink wants since we expect no change ind if there is no + // change then last__wants() is not updated. + video_stream_encoder_->TriggerCpuUnderuse(); + timestamp_ms += kFrameIntervalMs; + source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionEqTo(previous_sink_wants)); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + } + + // Trigger quality adapt up, expect upscaled resolution (640x360@15fps). + video_stream_encoder_->TriggerQualityHigh(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsEqResolutionGt(source.sink_wants(), source.last_wants()); + EXPECT_THAT(source.sink_wants(), FpsEqResolutionGt(source.last_wants())); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); - EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(1, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_TRUE(stats_proxy_->GetStats().cpu_limited_framerate); + EXPECT_EQ(1, stats_proxy_->GetStats().number_of_cpu_adapt_changes); + EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); - // Trigger quality adapt up, expect increased fps (640x360@30fps). + // Trigger quality and cpu adapt up, expect increased fps (640x360@30fps). video_stream_encoder_->TriggerQualityHigh(); + video_stream_encoder_->TriggerCpuUnderuse(); timestamp_ms += kFrameIntervalMs; source.IncomingCapturedFrame(CreateFrame(timestamp_ms, kWidth, kHeight)); WaitForEncodedFrame(timestamp_ms); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_framerate); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_resolution); EXPECT_FALSE(stats_proxy_->GetStats().cpu_limited_framerate); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); // Trigger adapt up, expect no change. video_stream_encoder_->TriggerQualityHigh(); - VerifyFpsMaxResolutionMax(source.sink_wants()); + EXPECT_THAT(source.sink_wants(), FpsMaxResolutionMax()); EXPECT_EQ(2, stats_proxy_->GetStats().number_of_cpu_adapt_changes); - EXPECT_EQ(2, stats_proxy_->GetStats().number_of_quality_adapt_changes); + EXPECT_EQ(3, stats_proxy_->GetStats().number_of_quality_adapt_changes); video_stream_encoder_->Stop(); } @@ -4885,11 +6945,11 @@ TEST_F(VideoStreamEncoderTest, AcceptsFullHdAdaptedDownSimulcastFrames) { DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); // Trigger reconfigure encoder (without resetting the entire instance). VideoEncoderConfig video_encoder_config; - video_encoder_config.codec_type = kVideoCodecVP8; + test::FillEncoderConfiguration(kVideoCodecVP8, 1, &video_encoder_config); + video_encoder_config.simulcast_layers[0].max_framerate = kFramerate; video_encoder_config.max_bitrate_bps = kTargetBitrateBps; - video_encoder_config.number_of_streams = 1; video_encoder_config.video_stream_factory = - new rtc::RefCountedObject(1, kFramerate); + rtc::make_ref_counted(); video_stream_encoder_->ConfigureEncoder(std::move(video_encoder_config), kMaxPayloadLength); video_stream_encoder_->WaitUntilTaskQueueIsIdle(); @@ -4920,7 +6980,7 @@ TEST_F(VideoStreamEncoderTest, PeriodicallyUpdatesChannelParameters) { DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + int64_t timestamp_ms = CurrentTimeMs(); max_framerate_ = kLowFps; // Insert 2 seconds of 2fps video. @@ -4969,9 +7029,10 @@ TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) { const int kFrameWidth = 1280; const int kFrameHeight = 720; const int kTargetBitrateBps = 1000000; + ResetEncoder("FAKE", 1, 1, 1, false, + VideoStreamEncoder::BitrateAllocationCallbackType:: + kVideoBitrateAllocation); - MockBitrateObserver bitrate_observer; - video_stream_encoder_->SetBitrateAllocationObserver(&bitrate_observer); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), @@ -4979,11 +7040,11 @@ TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) { video_stream_encoder_->WaitUntilTaskQueueIsIdle(); // Insert a first video frame, causes another bitrate update. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; - EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(_)).Times(1); + int64_t timestamp_ms = CurrentTimeMs(); video_source_.IncomingCapturedFrame( CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1); // Next, simulate video suspension due to pacer queue overrun. video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( @@ -4992,13 +7053,13 @@ TEST_F(VideoStreamEncoderTest, DoesNotUpdateBitrateAllocationWhenSuspended) { // Skip ahead until a new periodic parameter update should have occured. timestamp_ms += kProcessIntervalMs; - fake_clock_.AdvanceTime(TimeDelta::Millis(kProcessIntervalMs)); + AdvanceTime(TimeDelta::Millis(kProcessIntervalMs)); - // Bitrate observer should not be called. - EXPECT_CALL(bitrate_observer, OnBitrateAllocationUpdated(_)).Times(0); + // No more allocations has been made. video_source_.IncomingCapturedFrame( CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); ExpectDroppedFrame(); + EXPECT_EQ(sink_.number_of_bitrate_allocations(), 1); video_stream_encoder_->Stop(); } @@ -5049,6 +7110,45 @@ TEST_F(VideoStreamEncoderTest, video_stream_encoder_->Stop(); } +TEST_F(VideoStreamEncoderTest, + CpuAdaptationThresholdsUpdatesWhenHardwareAccelerationChange) { + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + + const CpuOveruseOptions default_options; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + video_source_.IncomingCapturedFrame( + CreateFrame(1, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(1); + EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions() + .low_encode_usage_threshold_percent, + default_options.low_encode_usage_threshold_percent); + EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions() + .high_encode_usage_threshold_percent, + default_options.high_encode_usage_threshold_percent); + + CpuOveruseOptions hardware_options; + hardware_options.low_encode_usage_threshold_percent = 150; + hardware_options.high_encode_usage_threshold_percent = 200; + fake_encoder_.SetIsHardwareAccelerated(true); + + video_source_.IncomingCapturedFrame( + CreateFrame(2, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(2); + + EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions() + .low_encode_usage_threshold_percent, + hardware_options.low_encode_usage_threshold_percent); + EXPECT_EQ(video_stream_encoder_->overuse_detector_proxy_->GetOptions() + .high_encode_usage_threshold_percent, + hardware_options.high_encode_usage_threshold_percent); + + video_stream_encoder_->Stop(); +} + TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) { const int kFrameWidth = 320; const int kFrameHeight = 240; @@ -5061,7 +7161,7 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) { DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + int64_t timestamp_ms = CurrentTimeMs(); max_framerate_ = kFps; // Insert 3 seconds of video, verify number of drops with normal bitrate. @@ -5087,10 +7187,16 @@ TEST_F(VideoStreamEncoderTest, DropsFramesWhenEncoderOvershoots) { // of video, verify number of drops. Rate needs to be slightly changed in // order to force the rate to be reconfigured. double overshoot_factor = 2.0; - if (RateControlSettings::ParseFromFieldTrials().UseEncoderBitrateAdjuster()) { + const RateControlSettings trials = + RateControlSettings::ParseFromFieldTrials(); + if (trials.UseEncoderBitrateAdjuster()) { // With bitrate adjuster, when need to overshoot even more to trigger - // frame dropping. - overshoot_factor *= 2; + // frame dropping since the adjuter will try to just lower the target + // bitrate rather than drop frames. If network headroom can be used, it + // doesn't push back as hard so we don't need quite as much overshoot. + // These numbers are unfortunately a bit magical but there's not trivial + // way to algebraically infer them. + overshoot_factor = 3.0; } fake_encoder_.SimulateOvershoot(overshoot_factor); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( @@ -5131,7 +7237,7 @@ TEST_F(VideoStreamEncoderTest, ConfiguresCorrectFrameRate) { ASSERT_GT(max_framerate_, kActualInputFps); - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + int64_t timestamp_ms = CurrentTimeMs(); max_framerate_ = kActualInputFps; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), @@ -5152,7 +7258,7 @@ TEST_F(VideoStreamEncoderTest, ConfiguresCorrectFrameRate) { video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, AccumulatesUpdateRectOnDroppedFrames) { +TEST_F(VideoStreamEncoderBlockedTest, AccumulatesUpdateRectOnDroppedFrames) { VideoFrame::UpdateRect rect; video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), @@ -5322,83 +7428,69 @@ TEST_F(VideoStreamEncoderTest, AdjustsTimestampInternalSource) { int64_t timestamp = 1; EncodedImage image; - image.SetEncodedData( - EncodedImageBuffer::Create(kTargetBitrateBps / kDefaultFramerate / 8)); image.capture_time_ms_ = ++timestamp; image.SetTimestamp(static_cast(timestamp * 90)); const int64_t kEncodeFinishDelayMs = 10; image.timing_.encode_start_ms = timestamp; image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs; - fake_encoder_.InjectEncodedImage(image); + fake_encoder_.InjectEncodedImage(image, /*codec_specific_info=*/nullptr); // Wait for frame without incrementing clock. EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs)); // Frame is captured kEncodeFinishDelayMs before it's encoded, so restored // capture timestamp should be kEncodeFinishDelayMs in the past. EXPECT_EQ(sink_.GetLastCaptureTimeMs(), - fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec - - kEncodeFinishDelayMs); + CurrentTimeMs() - kEncodeFinishDelayMs); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, DoesNotRewriteH264BitstreamWithOptimalSps) { - // Configure internal source factory and setup test again. - encoder_factory_.SetHasInternalSource(true); + // SPS contains VUI with restrictions on the maximum number of reordered + // pictures, there is no need to rewrite the bitstream to enable faster + // decoding. ResetEncoder("H264", 1, 1, 1, false); - EncodedImage image(optimal_sps, sizeof(optimal_sps), sizeof(optimal_sps)); - image._frameType = VideoFrameType::kVideoFrameKey; - - CodecSpecificInfo codec_specific_info; - codec_specific_info.codecType = kVideoCodecH264; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - RTPFragmentationHeader fragmentation; - fragmentation.VerifyAndAllocateFragmentationHeader(1); - fragmentation.fragmentationOffset[0] = 4; - fragmentation.fragmentationLength[0] = sizeof(optimal_sps) - 4; + fake_encoder_.SetEncodedImageData( + EncodedImageBuffer::Create(optimal_sps, sizeof(optimal_sps))); - fake_encoder_.InjectEncodedImage(image, &codec_specific_info, &fragmentation); - EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs)); + video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); + WaitForEncodedFrame(1); EXPECT_THAT(sink_.GetLastEncodedImageData(), testing::ElementsAreArray(optimal_sps)); - RTPFragmentationHeader last_fragmentation = sink_.GetLastFragmentation(); - ASSERT_THAT(last_fragmentation.fragmentationVectorSize, 1U); - EXPECT_EQ(last_fragmentation.fragmentationOffset[0], 4U); - EXPECT_EQ(last_fragmentation.fragmentationLength[0], sizeof(optimal_sps) - 4); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, RewritesH264BitstreamWithNonOptimalSps) { + // SPS does not contain VUI, the bitstream is will be rewritten with added + // VUI with restrictions on the maximum number of reordered pictures to + // enable faster decoding. uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps, 0x00, 0x00, 0x03, 0x03, 0xF4, 0x05, 0x03, 0xC7, 0xC0}; - - // Configure internal source factory and setup test again. - encoder_factory_.SetHasInternalSource(true); ResetEncoder("H264", 1, 1, 1, false); - EncodedImage image(original_sps, sizeof(original_sps), sizeof(original_sps)); - image._frameType = VideoFrameType::kVideoFrameKey; - - CodecSpecificInfo codec_specific_info; - codec_specific_info.codecType = kVideoCodecH264; + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - RTPFragmentationHeader fragmentation; - fragmentation.VerifyAndAllocateFragmentationHeader(1); - fragmentation.fragmentationOffset[0] = 4; - fragmentation.fragmentationLength[0] = sizeof(original_sps) - 4; + fake_encoder_.SetEncodedImageData( + EncodedImageBuffer::Create(original_sps, sizeof(original_sps))); - fake_encoder_.InjectEncodedImage(image, &codec_specific_info, &fragmentation); - EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs)); + video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr)); + WaitForEncodedFrame(1); EXPECT_THAT(sink_.GetLastEncodedImageData(), testing::ElementsAreArray(optimal_sps)); - RTPFragmentationHeader last_fragmentation = sink_.GetLastFragmentation(); - ASSERT_THAT(last_fragmentation.fragmentationVectorSize, 1U); - EXPECT_EQ(last_fragmentation.fragmentationOffset[0], 4U); - EXPECT_EQ(last_fragmentation.fragmentationLength[0], sizeof(optimal_sps) - 4); video_stream_encoder_->Stop(); } @@ -5416,7 +7508,7 @@ TEST_F(VideoStreamEncoderTest, CopiesVideoFrameMetadataAfterDownscale) { // Insert a first video frame. It should be dropped because of downscale in // resolution. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + int64_t timestamp_ms = CurrentTimeMs(); VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight); frame.set_rotation(kVideoRotation_270); video_source_.IncomingCapturedFrame(frame); @@ -5424,7 +7516,7 @@ TEST_F(VideoStreamEncoderTest, CopiesVideoFrameMetadataAfterDownscale) { ExpectDroppedFrame(); // Second frame is downscaled. - timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + timestamp_ms = CurrentTimeMs(); frame = CreateFrame(timestamp_ms, kFrameWidth / 2, kFrameHeight / 2); frame.set_rotation(kVideoRotation_90); video_source_.IncomingCapturedFrame(frame); @@ -5433,7 +7525,7 @@ TEST_F(VideoStreamEncoderTest, CopiesVideoFrameMetadataAfterDownscale) { sink_.CheckLastFrameRotationMatches(kVideoRotation_90); // Insert another frame, also downscaled. - timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + timestamp_ms = CurrentTimeMs(); frame = CreateFrame(timestamp_ms, kFrameWidth / 2, kFrameHeight / 2); frame.set_rotation(kVideoRotation_180); video_source_.IncomingCapturedFrame(frame); @@ -5458,7 +7550,7 @@ TEST_F(VideoStreamEncoderTest, BandwidthAllocationLowerBound) { /*cwnd_reduce_ratio=*/0); // Insert a first video frame so that encoder gets configured. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; + int64_t timestamp_ms = CurrentTimeMs(); VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight); frame.set_rotation(kVideoRotation_270); video_source_.IncomingCapturedFrame(frame); @@ -5476,425 +7568,968 @@ TEST_F(VideoStreamEncoderTest, BandwidthAllocationLowerBound) { /*rtt_ms=*/0, /*cwnd_reduce_ratio=*/0); video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - - // Target bitrate and bandwidth allocation should both be capped at min_rate. - auto rate_settings = fake_encoder_.GetAndResetLastRateControlSettings(); - ASSERT_TRUE(rate_settings.has_value()); - DataRate allocation_sum = - DataRate::BitsPerSec(rate_settings->bitrate.get_sum_bps()); - EXPECT_EQ(min_rate, allocation_sum); - EXPECT_EQ(rate_settings->bandwidth_allocation, min_rate); - + + // Target bitrate and bandwidth allocation should both be capped at min_rate. + auto rate_settings = fake_encoder_.GetAndResetLastRateControlSettings(); + ASSERT_TRUE(rate_settings.has_value()); + DataRate allocation_sum = + DataRate::BitsPerSec(rate_settings->bitrate.get_sum_bps()); + EXPECT_EQ(min_rate, allocation_sum); + EXPECT_EQ(rate_settings->bandwidth_allocation, min_rate); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, EncoderRatesPropagatedOnReconfigure) { + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + // Capture a frame and wait for it to synchronize with the encoder thread. + int64_t timestamp_ms = CurrentTimeMs(); + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr)); + WaitForEncodedFrame(1); + + auto prev_rate_settings = fake_encoder_.GetAndResetLastRateControlSettings(); + ASSERT_TRUE(prev_rate_settings.has_value()); + EXPECT_EQ(static_cast(prev_rate_settings->framerate_fps), + kDefaultFramerate); + + // Send 1s of video to ensure the framerate is stable at kDefaultFramerate. + for (int i = 0; i < 2 * kDefaultFramerate; i++) { + timestamp_ms += 1000 / kDefaultFramerate; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr)); + WaitForEncodedFrame(timestamp_ms); + } + EXPECT_EQ(static_cast(fake_encoder_.GetLastFramerate()), + kDefaultFramerate); + // Capture larger frame to trigger a reconfigure. + codec_height_ *= 2; + codec_width_ *= 2; + timestamp_ms += 1000 / kDefaultFramerate; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr)); + WaitForEncodedFrame(timestamp_ms); + + EXPECT_EQ(2, sink_.number_of_reconfigurations()); + auto current_rate_settings = + fake_encoder_.GetAndResetLastRateControlSettings(); + // Ensure we have actually reconfigured twice + // The rate settings should have been set again even though + // they haven't changed. + ASSERT_TRUE(current_rate_settings.has_value()); + EXPECT_EQ(prev_rate_settings, current_rate_settings); + + video_stream_encoder_->Stop(); +} + +struct MockEncoderSwitchRequestCallback : public EncoderSwitchRequestCallback { + MOCK_METHOD(void, RequestEncoderFallback, (), (override)); + MOCK_METHOD(void, RequestEncoderSwitch, (const Config& conf), (override)); + MOCK_METHOD(void, + RequestEncoderSwitch, + (const webrtc::SdpVideoFormat& format), + (override)); +}; + +TEST_F(VideoStreamEncoderTest, EncoderSelectorCurrentEncoderIsSignaled) { + constexpr int kDontCare = 100; + StrictMock encoder_selector; + auto encoder_factory = std::make_unique( + &fake_encoder_, &encoder_selector); + video_send_config_.encoder_settings.encoder_factory = encoder_factory.get(); + + // Reset encoder for new configuration to take effect. + ConfigureEncoder(video_encoder_config_.Copy()); + + EXPECT_CALL(encoder_selector, OnCurrentEncoder(_)); + + video_source_.IncomingCapturedFrame( + CreateFrame(kDontCare, kDontCare, kDontCare)); + video_stream_encoder_->Stop(); + + // The encoders produces by the VideoEncoderProxyFactory have a pointer back + // to it's factory, so in order for the encoder instance in the + // |video_stream_encoder_| to be destroyed before the |encoder_factory| we + // reset the |video_stream_encoder_| here. + video_stream_encoder_.reset(); +} + +TEST_F(VideoStreamEncoderTest, EncoderSelectorBitrateSwitch) { + constexpr int kDontCare = 100; + + NiceMock encoder_selector; + StrictMock switch_callback; + video_send_config_.encoder_settings.encoder_switch_request_callback = + &switch_callback; + auto encoder_factory = std::make_unique( + &fake_encoder_, &encoder_selector); + video_send_config_.encoder_settings.encoder_factory = encoder_factory.get(); + + // Reset encoder for new configuration to take effect. + ConfigureEncoder(video_encoder_config_.Copy()); + + ON_CALL(encoder_selector, OnAvailableBitrate(_)) + .WillByDefault(Return(SdpVideoFormat("AV1"))); + EXPECT_CALL(switch_callback, + RequestEncoderSwitch(Matcher( + Field(&SdpVideoFormat::name, "AV1")))); + + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + /*target_bitrate=*/DataRate::KilobitsPerSec(50), + /*stable_target_bitrate=*/DataRate::KilobitsPerSec(kDontCare), + /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare), + /*fraction_lost=*/0, + /*rtt_ms=*/0, + /*cwnd_reduce_ratio=*/0); + AdvanceTime(TimeDelta::Millis(0)); + + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, EncoderSelectorBrokenEncoderSwitch) { + constexpr int kSufficientBitrateToNotDrop = 1000; + constexpr int kDontCare = 100; + + NiceMock video_encoder; + NiceMock encoder_selector; + StrictMock switch_callback; + video_send_config_.encoder_settings.encoder_switch_request_callback = + &switch_callback; + auto encoder_factory = std::make_unique( + &video_encoder, &encoder_selector); + video_send_config_.encoder_settings.encoder_factory = encoder_factory.get(); + + // Reset encoder for new configuration to take effect. + ConfigureEncoder(video_encoder_config_.Copy()); + + // The VideoStreamEncoder needs some bitrate before it can start encoding, + // setting some bitrate so that subsequent calls to WaitForEncodedFrame does + // not fail. + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), + /*stable_target_bitrate=*/ + DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), + /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), + /*fraction_lost=*/0, + /*rtt_ms=*/0, + /*cwnd_reduce_ratio=*/0); + + ON_CALL(video_encoder, Encode(_, _)) + .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ENCODER_FAILURE)); + ON_CALL(encoder_selector, OnEncoderBroken()) + .WillByDefault(Return(SdpVideoFormat("AV2"))); + + rtc::Event encode_attempted; + EXPECT_CALL(switch_callback, + RequestEncoderSwitch(Matcher(_))) + .WillOnce([&encode_attempted](const SdpVideoFormat& format) { + EXPECT_EQ(format.name, "AV2"); + encode_attempted.Set(); + }); + + video_source_.IncomingCapturedFrame(CreateFrame(1, kDontCare, kDontCare)); + encode_attempted.Wait(3000); + + AdvanceTime(TimeDelta::Millis(0)); + + video_stream_encoder_->Stop(); + + // The encoders produces by the VideoEncoderProxyFactory have a pointer back + // to it's factory, so in order for the encoder instance in the + // |video_stream_encoder_| to be destroyed before the |encoder_factory| we + // reset the |video_stream_encoder_| here. + video_stream_encoder_.reset(); +} + +TEST_F(VideoStreamEncoderTest, + AllocationPropagatedToEncoderWhenTargetRateChanged) { + const int kFrameWidth = 320; + const int kFrameHeight = 180; + + // Set initial rate. + auto rate = DataRate::KilobitsPerSec(100); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + /*target_bitrate=*/rate, + /*stable_target_bitrate=*/rate, + /*link_allocation=*/rate, + /*fraction_lost=*/0, + /*rtt_ms=*/0, + /*cwnd_reduce_ratio=*/0); + + // Insert a first video frame so that encoder gets configured. + int64_t timestamp_ms = CurrentTimeMs(); + VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight); + frame.set_rotation(kVideoRotation_270); + video_source_.IncomingCapturedFrame(frame); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(1, fake_encoder_.GetNumSetRates()); + + // Change of target bitrate propagates to the encoder. + auto new_stable_rate = rate - DataRate::KilobitsPerSec(5); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + /*target_bitrate=*/new_stable_rate, + /*stable_target_bitrate=*/new_stable_rate, + /*link_allocation=*/rate, + /*fraction_lost=*/0, + /*rtt_ms=*/0, + /*cwnd_reduce_ratio=*/0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_EQ(2, fake_encoder_.GetNumSetRates()); + video_stream_encoder_->Stop(); +} + +TEST_F(VideoStreamEncoderTest, + AllocationNotPropagatedToEncoderWhenTargetRateUnchanged) { + const int kFrameWidth = 320; + const int kFrameHeight = 180; + + // Set initial rate. + auto rate = DataRate::KilobitsPerSec(100); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + /*target_bitrate=*/rate, + /*stable_target_bitrate=*/rate, + /*link_allocation=*/rate, + /*fraction_lost=*/0, + /*rtt_ms=*/0, + /*cwnd_reduce_ratio=*/0); + + // Insert a first video frame so that encoder gets configured. + int64_t timestamp_ms = CurrentTimeMs(); + VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight); + frame.set_rotation(kVideoRotation_270); + video_source_.IncomingCapturedFrame(frame); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(1, fake_encoder_.GetNumSetRates()); + + // Set a higher target rate without changing the link_allocation. Should not + // reset encoder's rate. + auto new_stable_rate = rate - DataRate::KilobitsPerSec(5); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + /*target_bitrate=*/rate, + /*stable_target_bitrate=*/new_stable_rate, + /*link_allocation=*/rate, + /*fraction_lost=*/0, + /*rtt_ms=*/0, + /*cwnd_reduce_ratio=*/0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_EQ(1, fake_encoder_.GetNumSetRates()); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, EncoderRatesPropagatedOnReconfigure) { +TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) { + test::ScopedFieldTrials field_trials( + "WebRTC-AutomaticAnimationDetectionScreenshare/" + "enabled:true,min_fps:20,min_duration_ms:1000,min_area_ratio:0.8/"); + const int kFramerateFps = 30; + const int kWidth = 1920; + const int kHeight = 1080; + const int kNumFrames = 2 * kFramerateFps; // >1 seconds of frames. + // Works on screenshare mode. + ResetEncoder("VP8", 1, 1, 1, /*screenshare*/ true); + // We rely on the automatic resolution adaptation, but we handle framerate + // adaptation manually by mocking the stats proxy. + video_source_.set_adaptation_enabled(true); + + // BALANCED degradation preference is required for this feature. video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - // Capture a frame and wait for it to synchronize with the encoder thread. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; - video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr)); - WaitForEncodedFrame(1); + video_stream_encoder_->SetSource(&video_source_, + webrtc::DegradationPreference::BALANCED); + EXPECT_THAT(video_source_.sink_wants(), UnlimitedSinkWants()); - auto prev_rate_settings = fake_encoder_.GetAndResetLastRateControlSettings(); - ASSERT_TRUE(prev_rate_settings.has_value()); - EXPECT_EQ(static_cast(prev_rate_settings->framerate_fps), - kDefaultFramerate); + VideoFrame frame = CreateFrame(1, kWidth, kHeight); + frame.set_update_rect(VideoFrame::UpdateRect{0, 0, kWidth, kHeight}); - // Send 1s of video to ensure the framerate is stable at kDefaultFramerate. - for (int i = 0; i < 2 * kDefaultFramerate; i++) { - timestamp_ms += 1000 / kDefaultFramerate; - video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr)); + // Pass enough frames with the full update to trigger animation detection. + for (int i = 0; i < kNumFrames; ++i) { + int64_t timestamp_ms = CurrentTimeMs(); + frame.set_ntp_time_ms(timestamp_ms); + frame.set_timestamp_us(timestamp_ms * 1000); + video_source_.IncomingCapturedFrame(frame); WaitForEncodedFrame(timestamp_ms); } - EXPECT_EQ(static_cast(fake_encoder_.GetLastFramerate()), - kDefaultFramerate); - // Capture larger frame to trigger a reconfigure. - codec_height_ *= 2; - codec_width_ *= 2; - timestamp_ms += 1000 / kDefaultFramerate; - video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, nullptr)); + + // Resolution should be limited. + rtc::VideoSinkWants expected; + expected.max_framerate_fps = kFramerateFps; + expected.max_pixel_count = 1280 * 720 + 1; + EXPECT_THAT(video_source_.sink_wants(), FpsEqResolutionLt(expected)); + + // Pass one frame with no known update. + // Resolution cap should be removed immediately. + int64_t timestamp_ms = CurrentTimeMs(); + frame.set_ntp_time_ms(timestamp_ms); + frame.set_timestamp_us(timestamp_ms * 1000); + frame.clear_update_rect(); + + video_source_.IncomingCapturedFrame(frame); WaitForEncodedFrame(timestamp_ms); - EXPECT_EQ(2, sink_.number_of_reconfigurations()); - auto current_rate_settings = - fake_encoder_.GetAndResetLastRateControlSettings(); - // Ensure we have actually reconfigured twice - // The rate settings should have been set again even though - // they haven't changed. - ASSERT_TRUE(current_rate_settings.has_value()); - EXPECT_EQ(prev_rate_settings, current_rate_settings); + // Resolution should be unlimited now. + EXPECT_THAT(video_source_.sink_wants(), + FpsMatchesResolutionMax(Eq(kFramerateFps))); video_stream_encoder_->Stop(); } -struct MockEncoderSwitchRequestCallback : public EncoderSwitchRequestCallback { - MOCK_METHOD(void, RequestEncoderFallback, (), (override)); - MOCK_METHOD(void, RequestEncoderSwitch, (const Config& conf), (override)); - MOCK_METHOD(void, - RequestEncoderSwitch, - (const webrtc::SdpVideoFormat& format), - (override)); -}; - -TEST_F(VideoStreamEncoderTest, BitrateEncoderSwitch) { - constexpr int kDontCare = 100; - - StrictMock switch_callback; - video_send_config_.encoder_settings.encoder_switch_request_callback = - &switch_callback; - VideoEncoderConfig encoder_config = video_encoder_config_.Copy(); - encoder_config.codec_type = kVideoCodecVP8; - webrtc::test::ScopedFieldTrials field_trial( - "WebRTC-NetworkCondition-EncoderSwitch/" - "codec_thresholds:VP8;100;-1|H264;-1;30000," - "to_codec:AV1,to_param:ping,to_value:pong,window:2.0/"); +TEST_F(VideoStreamEncoderTest, ConfiguresVp9SvcAtOddResolutions) { + const int kWidth = 720; // 540p adapted down. + const int kHeight = 405; + const int kNumFrames = 3; + // Works on screenshare mode. + ResetEncoder("VP9", /*num_streams=*/1, /*num_temporal_layers=*/1, + /*num_spatial_layers=*/2, /*screenshare=*/true); - // Reset encoder for new configuration to take effect. - ConfigureEncoder(std::move(encoder_config)); + video_source_.set_adaptation_enabled(true); - // Send one frame to trigger ReconfigureEncoder. - video_source_.IncomingCapturedFrame( - CreateFrame(kDontCare, kDontCare, kDontCare)); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - using Config = EncoderSwitchRequestCallback::Config; - EXPECT_CALL(switch_callback, RequestEncoderSwitch(Matcher( - AllOf(Field(&Config::codec_name, "AV1"), - Field(&Config::param, "ping"), - Field(&Config::value, "pong"))))); + VideoFrame frame = CreateFrame(1, kWidth, kHeight); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/DataRate::KilobitsPerSec(50), - /*stable_target_bitrate=*/DataRate::KilobitsPerSec(kDontCare), - /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare), - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); + // Pass enough frames with the full update to trigger animation detection. + for (int i = 0; i < kNumFrames; ++i) { + int64_t timestamp_ms = CurrentTimeMs(); + frame.set_ntp_time_ms(timestamp_ms); + frame.set_timestamp_us(timestamp_ms * 1000); + video_source_.IncomingCapturedFrame(frame); + WaitForEncodedFrame(timestamp_ms); + } video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, VideoSuspendedNoEncoderSwitch) { - constexpr int kDontCare = 100; - - StrictMock switch_callback; - video_send_config_.encoder_settings.encoder_switch_request_callback = - &switch_callback; - VideoEncoderConfig encoder_config = video_encoder_config_.Copy(); - encoder_config.codec_type = kVideoCodecVP8; - webrtc::test::ScopedFieldTrials field_trial( - "WebRTC-NetworkCondition-EncoderSwitch/" - "codec_thresholds:VP8;100;-1|H264;-1;30000," - "to_codec:AV1,to_param:ping,to_value:pong,window:2.0/"); +TEST_F(VideoStreamEncoderTest, EncoderResetAccordingToParameterChange) { + const float downscale_factors[] = {4.0, 2.0, 1.0}; + const int number_layers = + sizeof(downscale_factors) / sizeof(downscale_factors[0]); + VideoEncoderConfig config; + test::FillEncoderConfiguration(kVideoCodecVP8, number_layers, &config); + for (int i = 0; i < number_layers; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = downscale_factors[i]; + config.simulcast_layers[i].active = true; + } + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); - // Reset encoder for new configuration to take effect. - ConfigureEncoder(std::move(encoder_config)); + // First initialization. + // Encoder should be initialized. Next frame should be key frame. + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + sink_.SetNumExpectedLayers(number_layers); + int64_t timestamp_ms = kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(1, fake_encoder_.GetNumEncoderInitializations()); + EXPECT_THAT(fake_encoder_.LastFrameTypes(), + ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey, + VideoFrameType::kVideoFrameKey, + VideoFrameType::kVideoFrameKey})); - // Send one frame to trigger ReconfigureEncoder. - video_source_.IncomingCapturedFrame( - CreateFrame(kDontCare, kDontCare, kDontCare)); + // Disable top layer. + // Encoder shouldn't be re-initialized. Next frame should be delta frame. + config.simulcast_layers[number_layers - 1].active = false; + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + sink_.SetNumExpectedLayers(number_layers - 1); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(1, fake_encoder_.GetNumEncoderInitializations()); + EXPECT_THAT(fake_encoder_.LastFrameTypes(), + ::testing::ElementsAreArray({VideoFrameType::kVideoFrameDelta, + VideoFrameType::kVideoFrameDelta, + VideoFrameType::kVideoFrameDelta})); - using Config = EncoderSwitchRequestCallback::Config; - EXPECT_CALL(switch_callback, RequestEncoderSwitch(Matcher(_))) - .Times(0); + // Re-enable top layer. + // Encoder should be re-initialized. Next frame should be key frame. + config.simulcast_layers[number_layers - 1].active = true; + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + sink_.SetNumExpectedLayers(number_layers); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(2, fake_encoder_.GetNumEncoderInitializations()); + EXPECT_THAT(fake_encoder_.LastFrameTypes(), + ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey, + VideoFrameType::kVideoFrameKey, + VideoFrameType::kVideoFrameKey})); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/DataRate::KilobitsPerSec(0), - /*stable_target_bitrate=*/DataRate::KilobitsPerSec(0), - /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare), - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); + // Top layer max rate change. + // Encoder shouldn't be re-initialized. Next frame should be delta frame. + config.simulcast_layers[number_layers - 1].max_bitrate_bps -= 100; + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + sink_.SetNumExpectedLayers(number_layers); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(2, fake_encoder_.GetNumEncoderInitializations()); + EXPECT_THAT(fake_encoder_.LastFrameTypes(), + ::testing::ElementsAreArray({VideoFrameType::kVideoFrameDelta, + VideoFrameType::kVideoFrameDelta, + VideoFrameType::kVideoFrameDelta})); + // Top layer resolution change. + // Encoder should be re-initialized. Next frame should be key frame. + config.simulcast_layers[number_layers - 1].scale_resolution_down_by += 0.1; + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + sink_.SetNumExpectedLayers(number_layers); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame(CreateFrame(timestamp_ms, 1280, 720)); + WaitForEncodedFrame(timestamp_ms); + EXPECT_EQ(3, fake_encoder_.GetNumEncoderInitializations()); + EXPECT_THAT(fake_encoder_.LastFrameTypes(), + ::testing::ElementsAreArray({VideoFrameType::kVideoFrameKey, + VideoFrameType::kVideoFrameKey, + VideoFrameType::kVideoFrameKey})); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, ResolutionEncoderSwitch) { - constexpr int kSufficientBitrateToNotDrop = 1000; - constexpr int kHighRes = 500; - constexpr int kLowRes = 100; - - StrictMock switch_callback; - video_send_config_.encoder_settings.encoder_switch_request_callback = - &switch_callback; - webrtc::test::ScopedFieldTrials field_trial( - "WebRTC-NetworkCondition-EncoderSwitch/" - "codec_thresholds:VP8;120;-1|H264;-1;30000," - "to_codec:AV1,to_param:ping,to_value:pong,window:2.0/"); - VideoEncoderConfig encoder_config = video_encoder_config_.Copy(); - encoder_config.codec_type = kVideoCodecH264; - - // Reset encoder for new configuration to take effect. - ConfigureEncoder(std::move(encoder_config)); +TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSinglecast) { + const int kFrameWidth = 1280; + const int kFrameHeight = 720; - // The VideoStreamEncoder needs some bitrate before it can start encoding, - // setting some bitrate so that subsequent calls to WaitForEncodedFrame does - // not fail. + SetUp(); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), - /*stable_target_bitrate=*/ - DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), - /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); - - // Send one frame to trigger ReconfigureEncoder. - video_source_.IncomingCapturedFrame(CreateFrame(1, kHighRes, kHighRes)); - WaitForEncodedFrame(1); - - using Config = EncoderSwitchRequestCallback::Config; - EXPECT_CALL(switch_callback, RequestEncoderSwitch(Matcher( - AllOf(Field(&Config::codec_name, "AV1"), - Field(&Config::param, "ping"), - Field(&Config::value, "pong"))))); + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - video_source_.IncomingCapturedFrame(CreateFrame(2, kLowRes, kLowRes)); - WaitForEncodedFrame(2); + // Capturing a frame should reconfigure the encoder and expose the encoder + // resolution, which is the same as the input frame. + int64_t timestamp_ms = kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(timestamp_ms); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT(video_source_.sink_wants().resolutions, + ::testing::ElementsAreArray( + {rtc::VideoSinkWants::FrameSize(kFrameWidth, kFrameHeight)})); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, EncoderSelectorCurrentEncoderIsSignaled) { - constexpr int kDontCare = 100; - StrictMock encoder_selector; - auto encoder_factory = std::make_unique( - &fake_encoder_, &encoder_selector); - video_send_config_.encoder_settings.encoder_factory = encoder_factory.get(); - - // Reset encoder for new configuration to take effect. - ConfigureEncoder(video_encoder_config_.Copy()); - - EXPECT_CALL(encoder_selector, OnCurrentEncoder(_)); +TEST_F(VideoStreamEncoderTest, EncoderResolutionsExposedInSimulcast) { + // Pick downscale factors such that we never encode at full resolution - this + // is an interesting use case. The frame resolution influences the encoder + // resolutions, but if no layer has |scale_resolution_down_by| == 1 then the + // encoder should not ask for the frame resolution. This allows video frames + // to have the appearence of one resolution but optimize its internal buffers + // for what is actually encoded. + const size_t kNumSimulcastLayers = 3u; + const float kDownscaleFactors[] = {8.0, 4.0, 2.0}; + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + const rtc::VideoSinkWants::FrameSize kLayer0Size( + kFrameWidth / kDownscaleFactors[0], kFrameHeight / kDownscaleFactors[0]); + const rtc::VideoSinkWants::FrameSize kLayer1Size( + kFrameWidth / kDownscaleFactors[1], kFrameHeight / kDownscaleFactors[1]); + const rtc::VideoSinkWants::FrameSize kLayer2Size( + kFrameWidth / kDownscaleFactors[2], kFrameHeight / kDownscaleFactors[2]); + + VideoEncoderConfig config; + test::FillEncoderConfiguration(kVideoCodecVP8, kNumSimulcastLayers, &config); + for (size_t i = 0; i < kNumSimulcastLayers; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = kDownscaleFactors[i]; + config.simulcast_layers[i].active = true; + } + config.video_stream_factory = + rtc::make_ref_counted( + "VP8", /*max qp*/ 56, /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); + // Capture a frame with all layers active. + int64_t timestamp_ms = kFrameIntervalMs; + sink_.SetNumExpectedLayers(kNumSimulcastLayers); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); video_source_.IncomingCapturedFrame( - CreateFrame(kDontCare, kDontCare, kDontCare)); - video_stream_encoder_->Stop(); - - // The encoders produces by the VideoEncoderProxyFactory have a pointer back - // to it's factory, so in order for the encoder instance in the - // |video_stream_encoder_| to be destroyed before the |encoder_factory| we - // reset the |video_stream_encoder_| here. - video_stream_encoder_.reset(); -} - -TEST_F(VideoStreamEncoderTest, EncoderSelectorBitrateSwitch) { - constexpr int kDontCare = 100; + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(timestamp_ms); + // Expect encoded resolutions to match the expected simulcast layers. + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT( + video_source_.sink_wants().resolutions, + ::testing::ElementsAreArray({kLayer0Size, kLayer1Size, kLayer2Size})); - NiceMock encoder_selector; - StrictMock switch_callback; - video_send_config_.encoder_settings.encoder_switch_request_callback = - &switch_callback; - auto encoder_factory = std::make_unique( - &fake_encoder_, &encoder_selector); - video_send_config_.encoder_settings.encoder_factory = encoder_factory.get(); + // Capture a frame with one of the layers inactive. + timestamp_ms += kFrameIntervalMs; + config.simulcast_layers[2].active = false; + sink_.SetNumExpectedLayers(kNumSimulcastLayers - 1); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(timestamp_ms); - // Reset encoder for new configuration to take effect. - ConfigureEncoder(video_encoder_config_.Copy()); + // Expect encoded resolutions to match the expected simulcast layers. + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT(video_source_.sink_wants().resolutions, + ::testing::ElementsAreArray({kLayer0Size, kLayer1Size})); - ON_CALL(encoder_selector, OnAvailableBitrate(_)) - .WillByDefault(Return(SdpVideoFormat("AV1"))); - EXPECT_CALL(switch_callback, - RequestEncoderSwitch(Matcher( - Field(&SdpVideoFormat::name, "AV1")))); + // Capture a frame with all but one layer turned off. + timestamp_ms += kFrameIntervalMs; + config.simulcast_layers[1].active = false; + sink_.SetNumExpectedLayers(kNumSimulcastLayers - 2); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(timestamp_ms); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/DataRate::KilobitsPerSec(50), - /*stable_target_bitrate=*/DataRate::KilobitsPerSec(kDontCare), - /*link_allocation=*/DataRate::KilobitsPerSec(kDontCare), - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); + // Expect encoded resolutions to match the expected simulcast layers. + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + EXPECT_THAT(video_source_.sink_wants().resolutions, + ::testing::ElementsAreArray({kLayer0Size})); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, EncoderSelectorBrokenEncoderSwitch) { - constexpr int kSufficientBitrateToNotDrop = 1000; - constexpr int kDontCare = 100; - - NiceMock video_encoder; - NiceMock encoder_selector; - StrictMock switch_callback; - video_send_config_.encoder_settings.encoder_switch_request_callback = - &switch_callback; - auto encoder_factory = std::make_unique( - &video_encoder, &encoder_selector); - video_send_config_.encoder_settings.encoder_factory = encoder_factory.get(); - - // Reset encoder for new configuration to take effect. - ConfigureEncoder(video_encoder_config_.Copy()); +TEST_F(VideoStreamEncoderTest, QpPresent_QpKept) { + ResetEncoder("VP8", 1, 1, 1, false); - // The VideoStreamEncoder needs some bitrate before it can start encoding, - // setting some bitrate so that subsequent calls to WaitForEncodedFrame does - // not fail. - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), - /*stable_target_bitrate=*/ - DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), - /*link_allocation=*/DataRate::KilobitsPerSec(kSufficientBitrateToNotDrop), - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); + // Force encoder reconfig. + video_source_.IncomingCapturedFrame( + CreateFrame(1, codec_width_, codec_height_)); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - ON_CALL(video_encoder, Encode(_, _)) - .WillByDefault(Return(WEBRTC_VIDEO_CODEC_ENCODER_FAILURE)); - ON_CALL(encoder_selector, OnEncoderBroken()) - .WillByDefault(Return(SdpVideoFormat("AV2"))); + // Set QP on encoded frame and pass the frame to encode complete callback. + // Since QP is present QP parsing won't be triggered and the original value + // should be kept. + EncodedImage encoded_image; + encoded_image.qp_ = 123; + encoded_image.SetEncodedData(EncodedImageBuffer::Create( + kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25))); + CodecSpecificInfo codec_info; + codec_info.codecType = kVideoCodecVP8; + fake_encoder_.InjectEncodedImage(encoded_image, &codec_info); + EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs)); + EXPECT_EQ(sink_.GetLastEncodedImage().qp_, 123); + video_stream_encoder_->Stop(); +} - rtc::Event encode_attempted; - EXPECT_CALL(switch_callback, - RequestEncoderSwitch(Matcher(_))) - .WillOnce([&encode_attempted](const SdpVideoFormat& format) { - EXPECT_EQ(format.name, "AV2"); - encode_attempted.Set(); - }); +TEST_F(VideoStreamEncoderTest, QpAbsent_QpParsed) { + ResetEncoder("VP8", 1, 1, 1, false); - video_source_.IncomingCapturedFrame(CreateFrame(1, kDontCare, kDontCare)); - encode_attempted.Wait(3000); + // Force encoder reconfig. + video_source_.IncomingCapturedFrame( + CreateFrame(1, codec_width_, codec_height_)); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + // Pass an encoded frame without QP to encode complete callback. QP should be + // parsed and set. + EncodedImage encoded_image; + encoded_image.qp_ = -1; + encoded_image.SetEncodedData(EncodedImageBuffer::Create( + kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25))); + CodecSpecificInfo codec_info; + codec_info.codecType = kVideoCodecVP8; + fake_encoder_.InjectEncodedImage(encoded_image, &codec_info); + EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs)); + EXPECT_EQ(sink_.GetLastEncodedImage().qp_, 25); video_stream_encoder_->Stop(); - - // The encoders produces by the VideoEncoderProxyFactory have a pointer back - // to it's factory, so in order for the encoder instance in the - // |video_stream_encoder_| to be destroyed before the |encoder_factory| we - // reset the |video_stream_encoder_| here. - video_stream_encoder_.reset(); } -TEST_F(VideoStreamEncoderTest, - AllocationPropagatedToEncoderWhenTargetRateChanged) { - const int kFrameWidth = 320; - const int kFrameHeight = 180; - - // Set initial rate. - auto rate = DataRate::KilobitsPerSec(100); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/rate, - /*stable_target_bitrate=*/rate, - /*link_allocation=*/rate, - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); +TEST_F(VideoStreamEncoderTest, QpAbsentParsingDisabled_QpAbsent) { + webrtc::test::ScopedFieldTrials field_trials( + "WebRTC-QpParsingKillSwitch/Enabled/"); - // Insert a first video frame so that encoder gets configured. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; - VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight); - frame.set_rotation(kVideoRotation_270); - video_source_.IncomingCapturedFrame(frame); - WaitForEncodedFrame(timestamp_ms); - EXPECT_EQ(1, fake_encoder_.GetNumSetRates()); + ResetEncoder("VP8", 1, 1, 1, false); - // Change of target bitrate propagates to the encoder. - auto new_stable_rate = rate - DataRate::KilobitsPerSec(5); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/new_stable_rate, - /*stable_target_bitrate=*/new_stable_rate, - /*link_allocation=*/rate, - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); + // Force encoder reconfig. + video_source_.IncomingCapturedFrame( + CreateFrame(1, codec_width_, codec_height_)); video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - EXPECT_EQ(2, fake_encoder_.GetNumSetRates()); + + EncodedImage encoded_image; + encoded_image.qp_ = -1; + encoded_image.SetEncodedData(EncodedImageBuffer::Create( + kCodedFrameVp8Qp25, sizeof(kCodedFrameVp8Qp25))); + CodecSpecificInfo codec_info; + codec_info.codecType = kVideoCodecVP8; + fake_encoder_.InjectEncodedImage(encoded_image, &codec_info); + EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs)); + EXPECT_EQ(sink_.GetLastEncodedImage().qp_, -1); video_stream_encoder_->Stop(); } TEST_F(VideoStreamEncoderTest, - AllocationNotPropagatedToEncoderWhenTargetRateUnchanged) { - const int kFrameWidth = 320; - const int kFrameHeight = 180; + QualityScalingNotAllowed_QualityScalingDisabled) { + VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy(); + + // Disable scaling settings in encoder info. + fake_encoder_.SetQualityScaling(false); + // Disable quality scaling in encoder config. + video_encoder_config.is_quality_scaling_allowed = false; + ConfigureEncoder(std::move(video_encoder_config)); - // Set initial rate. - auto rate = DataRate::KilobitsPerSec(100); video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/rate, - /*stable_target_bitrate=*/rate, - /*link_allocation=*/rate, - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - // Insert a first video frame so that encoder gets configured. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; - VideoFrame frame = CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight); - frame.set_rotation(kVideoRotation_270); - video_source_.IncomingCapturedFrame(frame); - WaitForEncodedFrame(timestamp_ms); - EXPECT_EQ(1, fake_encoder_.GetNumSetRates()); + test::FrameForwarder source; + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); + EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); + + source.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + WaitForEncodedFrame(1); + video_stream_encoder_->TriggerQualityLow(); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - // Set a higher target rate without changing the link_allocation. Should not - // reset encoder's rate. - auto new_stable_rate = rate - DataRate::KilobitsPerSec(5); - video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( - /*target_bitrate=*/rate, - /*stable_target_bitrate=*/new_stable_rate, - /*link_allocation=*/rate, - /*fraction_lost=*/0, - /*rtt_ms=*/0, - /*cwnd_reduce_ratio=*/0); - video_stream_encoder_->WaitUntilTaskQueueIsIdle(); - EXPECT_EQ(1, fake_encoder_.GetNumSetRates()); video_stream_encoder_->Stop(); } -TEST_F(VideoStreamEncoderTest, AutomaticAnimationDetection) { - test::ScopedFieldTrials field_trials( - "WebRTC-AutomaticAnimationDetectionScreenshare/" - "enabled:true,min_fps:20,min_duration_ms:1000,min_area_ratio:0.8/"); - const int kFramerateFps = 30; - const int kWidth = 1920; - const int kHeight = 1080; - const int kNumFrames = 2 * kFramerateFps; // >1 seconds of frames. - // Works on screenshare mode. - ResetEncoder("VP8", 1, 1, 1, /*screenshare*/ true); - // We rely on the automatic resolution adaptation, but we handle framerate - // adaptation manually by mocking the stats proxy. - video_source_.set_adaptation_enabled(true); +#if !defined(WEBRTC_IOS) +// TODO(bugs.webrtc.org/12401): Disabled because WebRTC-Video-QualityScaling is +// disabled by default on iOS. +TEST_F(VideoStreamEncoderTest, QualityScalingAllowed_QualityScalingEnabled) { + VideoEncoderConfig video_encoder_config = video_encoder_config_.Copy(); + + // Disable scaling settings in encoder info. + fake_encoder_.SetQualityScaling(false); + // Enable quality scaling in encoder config. + video_encoder_config.is_quality_scaling_allowed = true; + ConfigureEncoder(std::move(video_encoder_config)); - // BALANCED degradation preference is required for this feature. video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); - video_stream_encoder_->SetSource(&video_source_, - webrtc::DegradationPreference::BALANCED); - VerifyNoLimitation(video_source_.sink_wants()); - VideoFrame frame = CreateFrame(1, kWidth, kHeight); - frame.set_update_rect(VideoFrame::UpdateRect{0, 0, kWidth, kHeight}); + test::FrameForwarder source; + video_stream_encoder_->SetSource( + &source, webrtc::DegradationPreference::MAINTAIN_FRAMERATE); + EXPECT_THAT(source.sink_wants(), UnlimitedSinkWants()); + EXPECT_FALSE(stats_proxy_->GetStats().bw_limited_resolution); - // Pass enough frames with the full update to trigger animation detection. - for (int i = 0; i < kNumFrames; ++i) { - int64_t timestamp_ms = - fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; - frame.set_ntp_time_ms(timestamp_ms); - frame.set_timestamp_us(timestamp_ms * 1000); - video_source_.IncomingCapturedFrame(frame); - WaitForEncodedFrame(timestamp_ms); + source.IncomingCapturedFrame(CreateFrame(1, 1280, 720)); + WaitForEncodedFrame(1); + video_stream_encoder_->TriggerQualityLow(); + EXPECT_TRUE(stats_proxy_->GetStats().bw_limited_resolution); + + video_stream_encoder_->Stop(); +} +#endif + +// Test parameters: (VideoCodecType codec, bool allow_i420_conversion) +class VideoStreamEncoderWithRealEncoderTest + : public VideoStreamEncoderTest, + public ::testing::WithParamInterface> { + public: + VideoStreamEncoderWithRealEncoderTest() + : VideoStreamEncoderTest(), + codec_type_(std::get<0>(GetParam())), + allow_i420_conversion_(std::get<1>(GetParam())) {} + + void SetUp() override { + VideoStreamEncoderTest::SetUp(); + std::unique_ptr encoder; + switch (codec_type_) { + case kVideoCodecVP8: + encoder = VP8Encoder::Create(); + break; + case kVideoCodecVP9: + encoder = VP9Encoder::Create(); + break; + case kVideoCodecAV1: + encoder = CreateLibaomAv1Encoder(); + break; + case kVideoCodecH264: + encoder = + H264Encoder::Create(cricket::VideoCodec(cricket::kH264CodecName)); + break; + case kVideoCodecMultiplex: + mock_encoder_factory_for_multiplex_ = + std::make_unique(); + EXPECT_CALL(*mock_encoder_factory_for_multiplex_, Die); + EXPECT_CALL(*mock_encoder_factory_for_multiplex_, CreateVideoEncoder) + .WillRepeatedly([] { return VP8Encoder::Create(); }); + encoder = std::make_unique( + mock_encoder_factory_for_multiplex_.get(), SdpVideoFormat("VP8"), + false); + break; + default: + RTC_NOTREACHED(); + } + ConfigureEncoderAndBitrate(codec_type_, std::move(encoder)); } - // Resolution should be limited. - rtc::VideoSinkWants expected; - expected.max_framerate_fps = kFramerateFps; - expected.max_pixel_count = 1280 * 720 + 1; - VerifyFpsEqResolutionLt(video_source_.sink_wants(), expected); + void TearDown() override { + video_stream_encoder_->Stop(); + // Ensure |video_stream_encoder_| is destroyed before + // |encoder_proxy_factory_|. + video_stream_encoder_.reset(); + VideoStreamEncoderTest::TearDown(); + } - // Pass one frame with no known update. - // Resolution cap should be removed immediately. - int64_t timestamp_ms = fake_clock_.TimeNanos() / rtc::kNumNanosecsPerMillisec; - frame.set_ntp_time_ms(timestamp_ms); - frame.set_timestamp_us(timestamp_ms * 1000); - frame.clear_update_rect(); + protected: + void ConfigureEncoderAndBitrate(VideoCodecType codec_type, + std::unique_ptr encoder) { + // Configure VSE to use the encoder. + encoder_ = std::move(encoder); + encoder_proxy_factory_ = std::make_unique( + encoder_.get(), &encoder_selector_); + video_send_config_.encoder_settings.encoder_factory = + encoder_proxy_factory_.get(); + VideoEncoderConfig video_encoder_config; + test::FillEncoderConfiguration(codec_type, 1, &video_encoder_config); + video_encoder_config_ = video_encoder_config.Copy(); + ConfigureEncoder(video_encoder_config_.Copy()); - video_source_.IncomingCapturedFrame(frame); - WaitForEncodedFrame(timestamp_ms); + // Set bitrate to ensure frame is not dropped. + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), + DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0); + } - // Resolution should be unlimited now. - VerifyFpsEqResolutionMax(video_source_.sink_wants(), kFramerateFps); + const VideoCodecType codec_type_; + const bool allow_i420_conversion_; + NiceMock encoder_selector_; + std::unique_ptr encoder_proxy_factory_; + std::unique_ptr encoder_; + std::unique_ptr mock_encoder_factory_for_multiplex_; +}; + +TEST_P(VideoStreamEncoderWithRealEncoderTest, EncoderMapsNativeI420) { + auto native_i420_frame = test::CreateMappableNativeFrame( + 1, VideoFrameBuffer::Type::kI420, codec_width_, codec_height_); + video_source_.IncomingCapturedFrame(native_i420_frame); + WaitForEncodedFrame(codec_width_, codec_height_); + + auto mappable_native_buffer = + test::GetMappableNativeBufferFromVideoFrame(native_i420_frame); + std::vector> mapped_frame_buffers = + mappable_native_buffer->GetMappedFramedBuffers(); + ASSERT_EQ(mapped_frame_buffers.size(), 1u); + EXPECT_EQ(mapped_frame_buffers[0]->width(), codec_width_); + EXPECT_EQ(mapped_frame_buffers[0]->height(), codec_height_); + EXPECT_EQ(mapped_frame_buffers[0]->type(), VideoFrameBuffer::Type::kI420); +} + +TEST_P(VideoStreamEncoderWithRealEncoderTest, EncoderMapsNativeNV12) { + auto native_nv12_frame = test::CreateMappableNativeFrame( + 1, VideoFrameBuffer::Type::kNV12, codec_width_, codec_height_); + video_source_.IncomingCapturedFrame(native_nv12_frame); + WaitForEncodedFrame(codec_width_, codec_height_); + + auto mappable_native_buffer = + test::GetMappableNativeBufferFromVideoFrame(native_nv12_frame); + std::vector> mapped_frame_buffers = + mappable_native_buffer->GetMappedFramedBuffers(); + ASSERT_EQ(mapped_frame_buffers.size(), 1u); + EXPECT_EQ(mapped_frame_buffers[0]->width(), codec_width_); + EXPECT_EQ(mapped_frame_buffers[0]->height(), codec_height_); + EXPECT_EQ(mapped_frame_buffers[0]->type(), VideoFrameBuffer::Type::kNV12); + + if (!allow_i420_conversion_) { + EXPECT_FALSE(mappable_native_buffer->DidConvertToI420()); + } +} + +TEST_P(VideoStreamEncoderWithRealEncoderTest, HandlesLayerToggling) { + if (codec_type_ == kVideoCodecMultiplex) { + // Multiplex codec here uses wrapped mock codecs, ignore for this test. + return; + } + + const size_t kNumSpatialLayers = 3u; + const float kDownscaleFactors[] = {4.0, 2.0, 1.0}; + const int kFrameWidth = 1280; + const int kFrameHeight = 720; + const rtc::VideoSinkWants::FrameSize kLayer0Size( + kFrameWidth / kDownscaleFactors[0], kFrameHeight / kDownscaleFactors[0]); + const rtc::VideoSinkWants::FrameSize kLayer1Size( + kFrameWidth / kDownscaleFactors[1], kFrameHeight / kDownscaleFactors[1]); + const rtc::VideoSinkWants::FrameSize kLayer2Size( + kFrameWidth / kDownscaleFactors[2], kFrameHeight / kDownscaleFactors[2]); + + VideoEncoderConfig config; + if (codec_type_ == VideoCodecType::kVideoCodecVP9) { + test::FillEncoderConfiguration(codec_type_, 1, &config); + config.max_bitrate_bps = kSimulcastTargetBitrateBps; + VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings(); + vp9_settings.numberOfSpatialLayers = kNumSpatialLayers; + vp9_settings.numberOfTemporalLayers = 3; + vp9_settings.automaticResizeOn = false; + config.encoder_specific_settings = + rtc::make_ref_counted( + vp9_settings); + config.spatial_layers = GetSvcConfig(kFrameWidth, kFrameHeight, + /*fps=*/30.0, + /*first_active_layer=*/0, + /*num_spatial_layers=*/3, + /*num_temporal_layers=*/3, + /*is_screenshare=*/false); + } else if (codec_type_ == VideoCodecType::kVideoCodecAV1) { + test::FillEncoderConfiguration(codec_type_, 1, &config); + config.max_bitrate_bps = kSimulcastTargetBitrateBps; + config.spatial_layers = GetSvcConfig(kFrameWidth, kFrameHeight, + /*fps=*/30.0, + /*first_active_layer=*/0, + /*num_spatial_layers=*/3, + /*num_temporal_layers=*/3, + /*is_screenshare=*/false); + config.simulcast_layers[0].scalability_mode = "L3T3_KEY"; + } else { + // Simulcast for VP8/H264. + test::FillEncoderConfiguration(codec_type_, kNumSpatialLayers, &config); + for (size_t i = 0; i < kNumSpatialLayers; ++i) { + config.simulcast_layers[i].scale_resolution_down_by = + kDownscaleFactors[i]; + config.simulcast_layers[i].active = true; + } + if (codec_type_ == VideoCodecType::kVideoCodecH264) { + // Turn off frame dropping to prevent flakiness. + VideoCodecH264 h264_settings = VideoEncoder::GetDefaultH264Settings(); + h264_settings.frameDroppingOn = false; + config.encoder_specific_settings = rtc::make_ref_counted< + VideoEncoderConfig::H264EncoderSpecificSettings>(h264_settings); + } + } + + auto set_layer_active = [&](int layer_idx, bool active) { + if (codec_type_ == VideoCodecType::kVideoCodecVP9 || + codec_type_ == VideoCodecType::kVideoCodecAV1) { + config.spatial_layers[layer_idx].active = active; + } else { + config.simulcast_layers[layer_idx].active = active; + } + }; + + config.video_stream_factory = + rtc::make_ref_counted( + CodecTypeToPayloadString(codec_type_), /*max qp*/ 56, + /*screencast*/ false, + /*screenshare enabled*/ false); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), + DataRate::BitsPerSec(kSimulcastTargetBitrateBps), 0, 0, 0); + + // Capture a frame with all layers active. + sink_.SetNumExpectedLayers(kNumSpatialLayers); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + int64_t timestamp_ms = kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + + WaitForEncodedFrame(kLayer2Size.width, kLayer2Size.height); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Capture a frame with one of the layers inactive. + set_layer_active(2, false); + sink_.SetNumExpectedLayers(kNumSpatialLayers - 1); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(kLayer1Size.width, kLayer1Size.height); + + // New target bitrates signaled based on lower resolution. + DataRate kTwoLayerBitrate = DataRate::KilobitsPerSec(833); + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + kTwoLayerBitrate, kTwoLayerBitrate, kTwoLayerBitrate, 0, 0, 0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Re-enable the top layer. + set_layer_active(2, true); + sink_.SetNumExpectedLayers(kNumSpatialLayers); + video_stream_encoder_->ConfigureEncoder(config.Copy(), kMaxPayloadLength); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // Bitrate target adjusted back up to enable HD layer... + video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources( + DataRate::KilobitsPerSec(1800), DataRate::KilobitsPerSec(1800), + DataRate::KilobitsPerSec(1800), 0, 0, 0); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); + + // ...then add a new frame. + timestamp_ms += kFrameIntervalMs; + video_source_.IncomingCapturedFrame( + CreateFrame(timestamp_ms, kFrameWidth, kFrameHeight)); + WaitForEncodedFrame(kLayer2Size.width, kLayer2Size.height); + video_stream_encoder_->WaitUntilTaskQueueIsIdle(); video_stream_encoder_->Stop(); } +std::string TestParametersVideoCodecAndAllowI420ConversionToString( + testing::TestParamInfo> info) { + VideoCodecType codec_type = std::get<0>(info.param); + bool allow_i420_conversion = std::get<1>(info.param); + std::string str; + switch (codec_type) { + case kVideoCodecGeneric: + str = "Generic"; + break; + case kVideoCodecVP8: + str = "VP8"; + break; + case kVideoCodecVP9: + str = "VP9"; + break; + case kVideoCodecAV1: + str = "AV1"; + break; + case kVideoCodecH264: + str = "H264"; + break; + case kVideoCodecMultiplex: + str = "Multiplex"; + break; + default: + RTC_NOTREACHED(); + } + str += allow_i420_conversion ? "_AllowToI420" : "_DisallowToI420"; + return str; +} + +constexpr std::pair kVP8DisallowConversion = + std::make_pair(kVideoCodecVP8, /*allow_i420_conversion=*/false); +constexpr std::pair kVP9DisallowConversion = + std::make_pair(kVideoCodecVP9, /*allow_i420_conversion=*/false); +constexpr std::pair kAV1AllowConversion = + std::make_pair(kVideoCodecAV1, /*allow_i420_conversion=*/true); +constexpr std::pair kMultiplexDisallowConversion = + std::make_pair(kVideoCodecMultiplex, /*allow_i420_conversion=*/false); +#if defined(WEBRTC_USE_H264) +constexpr std::pair kH264AllowConversion = + std::make_pair(kVideoCodecH264, /*allow_i420_conversion=*/true); + +// The windows compiler does not tolerate #if statements inside the +// INSTANTIATE_TEST_SUITE_P() macro, so we have to have two definitions (with +// and without H264). +INSTANTIATE_TEST_SUITE_P( + All, + VideoStreamEncoderWithRealEncoderTest, + ::testing::Values(kVP8DisallowConversion, + kVP9DisallowConversion, + kAV1AllowConversion, + kMultiplexDisallowConversion, + kH264AllowConversion), + TestParametersVideoCodecAndAllowI420ConversionToString); +#else +INSTANTIATE_TEST_SUITE_P( + All, + VideoStreamEncoderWithRealEncoderTest, + ::testing::Values(kVP8DisallowConversion, + kVP9DisallowConversion, + kAV1AllowConversion, + kMultiplexDisallowConversion), + TestParametersVideoCodecAndAllowI420ConversionToString); +#endif + } // namespace webrtc diff --git a/webrtc.gni b/webrtc.gni index 3708345685..c0ff14fe51 100644 --- a/webrtc.gni +++ b/webrtc.gni @@ -59,6 +59,12 @@ declare_args() { # provided. rtc_exclude_metrics_default = build_with_chromium + # Setting this to true will define WEBRTC_EXCLUDE_SYSTEM_TIME which + # will tell the pre-processor to remove the default definition of the + # SystemTimeNanos() which is defined in rtc_base/system_time.cc. In + # that case a new implementation needs to be provided. + rtc_exclude_system_time = build_with_chromium + # Setting this to false will require the API user to pass in their own # SSLCertificateVerifier to verify the certificates presented from a # TLS-TURN server. In return disabling this saves around 100kb in the binary. @@ -85,9 +91,6 @@ declare_args() { # library that comes with WebRTC (i.e. rtc_build_ssl == 0). rtc_ssl_root = "" - # Selects fixed-point code where possible. - rtc_prefer_fixed_point = false - # Enable when an external authentication mechanism is used for performing # packet authentication for RTP packets instead of libsrtp. rtc_enable_external_auth = build_with_chromium @@ -115,16 +118,20 @@ declare_args() { # By default it's only enabled on desktop Linux (excludes ChromeOS) and # only when using the sysroot as PipeWire is not available in older and # supported Ubuntu and Debian distributions. - rtc_use_pipewire = is_desktop_linux && use_sysroot + rtc_use_pipewire = is_linux && use_sysroot # Set this to link PipeWire directly instead of using the dlopen. rtc_link_pipewire = false + # Set this to use certain PipeWire version + # Currently WebRTC supports PipeWire 0.2 and PipeWire 0.3 (default) + rtc_pipewire_version = "0.3" + # Enable to use the Mozilla internal settings. build_with_mozilla = false - # Enable use of Android AAudio which requires Android SDK 26 or above and - # NDK r16 or above. + # Experimental: enable use of Android AAudio which requires Android SDK 26 or above + # and NDK r16 or above. rtc_enable_android_aaudio = false # Set to "func", "block", "edge" for coverage generation. @@ -134,6 +141,8 @@ declare_args() { # See http://clang.llvm.org/docs/SanitizerCoverage.html . rtc_sanitize_coverage = "" + # Selects fixed-point code where possible. + rtc_prefer_fixed_point = false if (current_cpu == "arm" || current_cpu == "arm64") { rtc_prefer_fixed_point = true } @@ -155,6 +164,9 @@ declare_args() { rtc_use_h264 = proprietary_codecs && !is_android && !is_ios && !(is_win && !is_clang) + # Enable this flag to make webrtc::Mutex be implemented by absl::Mutex. + rtc_use_absl_mutex = false + # By default, use normal platform audio support or dummy audio, but don't # use file-based audio playout and record. rtc_use_dummy_audio_file_devices = false @@ -171,8 +183,9 @@ declare_args() { rtc_apprtcmobile_broadcast_extension = false } - # Determines whether Metal is available on iOS/macOS. - rtc_use_metal_rendering = is_mac || (is_ios && current_cpu == "arm64") + # Determines whether OpenGL is available on iOS/macOS. + rtc_ios_macos_use_opengl_rendering = + !(is_ios && target_environment == "catalyst") # When set to false, builtin audio encoder/decoder factories and all the # audio codecs they depend on will not be included in libwebrtc.{a|lib} @@ -193,6 +206,11 @@ declare_args() { # doesn't assume /DUNICODE and /D_UNICODE but that it explicitly uses # wide character functions. rtc_win_undef_unicode = false + + # When set to true, a capturer implementation that uses the + # Windows.Graphics.Capture APIs will be available for use. This introduces a + # dependency on the Win 10 SDK v10.0.17763.0. + rtc_enable_win_wgc = is_win } if (!build_with_mozilla) { @@ -216,7 +234,6 @@ declare_args() { rtc_libvpx_build_vp9 = !build_with_mozilla rtc_build_opus = !build_with_mozilla rtc_build_ssl = !build_with_mozilla - rtc_build_usrsctp = !build_with_mozilla # Enable libevent task queues on platforms that support it. if (is_win || is_mac || is_ios || is_nacl || is_fuchsia || @@ -228,10 +245,6 @@ declare_args() { rtc_build_libevent = !build_with_mozilla } - # Build sources requiring GTK. NOTICE: This is not present in Chrome OS - # build environments, even if available for Chromium builds. - rtc_use_gtk = !build_with_chromium && !build_with_mozilla - # Excluded in Chromium since its prerequisites don't require Pulse Audio. rtc_include_pulse_audio = !build_with_chromium @@ -239,7 +252,16 @@ declare_args() { # standalone WebRTC. rtc_include_internal_audio_device = !build_with_chromium - # Include tests in standalone checkout. + # Set this to true to enable the avx2 support in webrtc. + # TODO: Make sure that AVX2 works also for non-clang compilers. + if (is_clang == true) { + rtc_enable_avx2 = true + } else { + rtc_enable_avx2 = false + } + + # Set this to true to build the unit tests. + # Disabled when building with Chromium or Mozilla. rtc_include_tests = !build_with_chromium && !build_with_mozilla # Set this to false to skip building code that also requires X11 extensions @@ -264,6 +286,14 @@ declare_args() { rtc_exclude_transient_suppressor = false } +declare_args() { + # Enable the dcsctp backend for DataChannels and related unittests + rtc_build_dcsctp = !build_with_mozilla && rtc_enable_sctp + + # Enable the usrsctp backend for DataChannels and related unittests + rtc_build_usrsctp = !build_with_mozilla && rtc_enable_sctp +} + # Make it possible to provide custom locations for some libraries (move these # up into declare_args should we need to actually use them for the GN build). rtc_libvpx_dir = "//third_party/libvpx" @@ -272,7 +302,7 @@ rtc_opus_dir = "//third_party/opus" # Desktop capturer is supported only on Windows, OSX and Linux. rtc_desktop_capture_supported = (is_win && current_os != "winuwp") || is_mac || - (is_linux && (rtc_use_x11_extensions || rtc_use_pipewire)) + ((is_linux || is_chromeos) && (rtc_use_x11_extensions || rtc_use_pipewire)) ############################################################################### # Templates @@ -323,16 +353,19 @@ set_defaults("rtc_test") { set_defaults("rtc_library") { configs = rtc_add_configs suppressed_configs = [] + absl_deps = [] } set_defaults("rtc_source_set") { configs = rtc_add_configs suppressed_configs = [] + absl_deps = [] } set_defaults("rtc_static_library") { configs = rtc_add_configs suppressed_configs = [] + absl_deps = [] } set_defaults("rtc_executable") { @@ -403,12 +436,20 @@ absl_define_config = "//third_party/abseil-cpp:absl_define_config" # that are testonly. absl_flags_config = webrtc_root + ":absl_flags_configs" +# WebRTC wrapper of Chromium's test() template. This template just adds some +# WebRTC only configuration in order to avoid to duplicate it for every WebRTC +# target. +# The parameter `is_xctest` is different from the one in the Chromium's test() +# template (and it is not forwarded to it). In rtc_test(), the argument +# `is_xctest` is used to avoid to take dependencies that are not needed +# in case the test is a real XCTest (using the XCTest framework). template("rtc_test") { test(target_name) { forward_variables_from(invoker, "*", [ "configs", + "is_xctest", "public_configs", "suppressed_configs", "visibility", @@ -432,8 +473,46 @@ template("rtc_test") { } if (!build_with_chromium && is_android) { android_manifest = webrtc_root + "test/android/AndroidManifest.xml" - min_sdk_version = 16 - deps += [ webrtc_root + "test:native_test_java" ] + use_raw_android_executable = false + min_sdk_version = 21 + target_sdk_version = 23 + deps += [ + "//build/android/gtest_apk:native_test_instrumentation_test_runner_java", + webrtc_root + "test:native_test_java", + ] + } + + # When not targeting a simulator, building //base/test:google_test_runner + # fails, so it is added only when the test is not a real XCTest and when + # targeting a simulator. + if (is_ios && target_cpu == "x64" && rtc_include_tests) { + if (!defined(invoker.is_xctest) || !invoker.is_xctest) { + xctest_module_target = "//base/test:google_test_runner" + } + } + + # If absl_deps is [], no action is needed. If not [], then it needs to be + # converted to //third_party/abseil-cpp:absl when build_with_chromium=true + # otherwise it just needs to be added to deps. + if (defined(absl_deps) && absl_deps != []) { + if (!defined(deps)) { + deps = [] + } + if (build_with_chromium) { + deps += [ "//third_party/abseil-cpp:absl" ] + } else { + deps += absl_deps + } + } + + if (using_sanitizer) { + if (is_linux) { + if (!defined(invoker.data)) { + data = [] + } + data += + [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ] + } } } } @@ -525,6 +604,20 @@ template("rtc_source_set") { if (defined(invoker.public_configs)) { public_configs += invoker.public_configs } + + # If absl_deps is [], no action is needed. If not [], then it needs to be + # converted to //third_party/abseil-cpp:absl when build_with_chromium=true + # otherwise it just needs to be added to deps. + if (absl_deps != []) { + if (!defined(deps)) { + deps = [] + } + if (build_with_chromium) { + deps += [ "//third_party/abseil-cpp:absl" ] + } else { + deps += absl_deps + } + } } } @@ -600,6 +693,20 @@ template("rtc_static_library") { if (defined(invoker.public_configs)) { public_configs += invoker.public_configs } + + # If absl_deps is [], no action is needed. If not [], then it needs to be + # converted to //third_party/abseil-cpp:absl when build_with_chromium=true + # otherwise it just needs to be added to deps. + if (absl_deps != []) { + if (!defined(deps)) { + deps = [] + } + if (build_with_chromium) { + deps += [ "//third_party/abseil-cpp:absl" ] + } else { + deps += absl_deps + } + } } } @@ -712,6 +819,20 @@ template("rtc_library") { if (defined(invoker.public_configs)) { public_configs += invoker.public_configs } + + # If absl_deps is [], no action is needed. If not [], then it needs to be + # converted to //third_party/abseil-cpp:absl when build_with_chromium=true + # otherwise it just needs to be added to deps. + if (absl_deps != []) { + if (!defined(deps)) { + deps = [] + } + if (build_with_chromium) { + deps += [ "//third_party/abseil-cpp:absl" ] + } else { + deps += absl_deps + } + } } } @@ -826,50 +947,43 @@ template("rtc_shared_library") { } if (is_ios) { - set_defaults("rtc_ios_xctest_test") { - configs = rtc_add_configs - suppressed_configs = [] - } - - template("rtc_ios_xctest_test") { - ios_xctest_test(target_name) { - forward_variables_from(invoker, - "*", - [ - "configs", - "public_configs", - "suppressed_configs", - "visibility", - ]) - forward_variables_from(invoker, [ "visibility" ]) - if (!defined(visibility)) { - visibility = webrtc_default_visibility - } - configs += invoker.configs - configs -= rtc_remove_configs - configs -= invoker.suppressed_configs - public_configs = [ rtc_common_inherited_config ] - if (defined(invoker.public_configs)) { - public_configs += invoker.public_configs - } - } - } - # TODO: Generate module.modulemap file to enable use in Swift # projects. See "mac_framework_bundle_with_umbrella_header". template("ios_framework_bundle_with_umbrella_header") { forward_variables_from(invoker, [ "output_name" ]) + this_target_name = target_name umbrella_header_path = - "$target_gen_dir/$output_name.framework/Headers/$output_name.h" + "$target_gen_dir/$output_name.framework/WebRTC/$output_name.h" + + action_foreach("create_bracket_include_headers_$target_name") { + script = "//tools_webrtc/apple/copy_framework_header.py" + sources = invoker.sources + output_name = invoker.output_name + outputs = [ + "$target_gen_dir/$output_name.framework/WebRTC/{{source_file_part}}", + ] + args = [ + "--input", + "{{source}}", + "--output", + rebase_path(target_gen_dir, root_build_dir) + + "/$output_name.framework/WebRTC/{{source_file_part}}", + ] + } ios_framework_bundle(target_name) { - forward_variables_from(invoker, "*", []) - - deps += [ ":copy_umbrella_header_$target_name" ] + forward_variables_from(invoker, "*", [ "public_headers" ]) + public_headers = get_target_outputs( + ":create_bracket_include_headers_$this_target_name") + deps += [ + ":copy_umbrella_header_$target_name", + ":create_bracket_include_headers_$target_name", + ] } action("umbrella_header_$target_name") { - forward_variables_from(invoker, [ "public_headers" ]) + public_headers = get_target_outputs( + ":create_bracket_include_headers_$this_target_name") script = "//tools_webrtc/ios/generate_umbrella_header.py" @@ -879,12 +993,19 @@ if (is_ios) { rebase_path(umbrella_header_path, root_build_dir), "--sources", ] + public_headers + deps = [ ":create_bracket_include_headers_$this_target_name" ] } + if (target_environment == "catalyst") { + # Catalyst frameworks use the same layout as regular Mac frameworks. + headers_dir = "Versions/A/Headers" + } else { + headers_dir = "Headers" + } copy("copy_umbrella_header_$target_name") { sources = [ umbrella_header_path ] outputs = - [ "$root_out_dir/$output_name.framework/Headers/$output_name.h" ] + [ "$root_out_dir/$output_name.framework/$headers_dir/$output_name.h" ] deps = [ ":umbrella_header_$target_name" ] } @@ -925,15 +1046,34 @@ if (is_mac) { ":copy_framework_headers_$this_target_name", ":copy_modulemap_$this_target_name", ":copy_umbrella_header_$this_target_name", + ":create_bracket_include_headers_$this_target_name", ":modulemap_$this_target_name", ":umbrella_header_$this_target_name", ] } + action_foreach("create_bracket_include_headers_$this_target_name") { + script = "//tools_webrtc/apple/copy_framework_header.py" + sources = invoker.sources + output_name = invoker.output_name + outputs = [ + "$target_gen_dir/$output_name.framework/WebRTC/{{source_file_part}}", + ] + args = [ + "--input", + "{{source}}", + "--output", + rebase_path(target_gen_dir, root_build_dir) + + "/$output_name.framework/WebRTC/{{source_file_part}}", + ] + } + bundle_data("copy_framework_headers_$this_target_name") { - forward_variables_from(invoker, [ "sources" ]) + sources = get_target_outputs( + ":create_bracket_include_headers_$this_target_name") outputs = [ "{{bundle_contents_dir}}/Headers/{{source_file_part}}" ] + deps = [ ":create_bracket_include_headers_$this_target_name" ] } action("modulemap_$this_target_name") { @@ -954,7 +1094,8 @@ if (is_mac) { } action("umbrella_header_$this_target_name") { - forward_variables_from(invoker, [ "sources" ]) + sources = get_target_outputs( + ":create_bracket_include_headers_$this_target_name") script = "//tools_webrtc/ios/generate_umbrella_header.py" @@ -964,6 +1105,7 @@ if (is_mac) { rebase_path(umbrella_header_path, root_build_dir), "--sources", ] + sources + deps = [ ":create_bracket_include_headers_$this_target_name" ] } bundle_data("copy_umbrella_header_$this_target_name") { @@ -1002,7 +1144,7 @@ if (is_android) { } no_build_hooks = true - not_needed(["android_manifest"]) + not_needed([ "android_manifest" ]) } } diff --git a/webrtc_lib_link_test.cc b/webrtc_lib_link_test.cc index 37e1b14eae..055bd969ff 100644 --- a/webrtc_lib_link_test.cc +++ b/webrtc_lib_link_test.cc @@ -65,9 +65,10 @@ void TestCase1ModularFactory() { auto peer_connection_factory = webrtc::CreateModularPeerConnectionFactory(std::move(pcf_deps)); webrtc::PeerConnectionInterface::RTCConfiguration rtc_config; - auto peer_connection = peer_connection_factory->CreatePeerConnection( - rtc_config, nullptr, nullptr, nullptr); - printf("peer_connection=%s\n", peer_connection == nullptr ? "nullptr" : "ok"); + auto result = peer_connection_factory->CreatePeerConnectionOrError( + rtc_config, PeerConnectionDependencies(nullptr)); + // Creation will fail because of null observer, but that's OK. + printf("peer_connection creation=%s\n", result.ok() ? "succeeded" : "failed"); } void TestCase2RegularFactory() { @@ -81,9 +82,10 @@ void TestCase2RegularFactory() { std::move(media_deps.video_encoder_factory), std::move(media_deps.video_decoder_factory), nullptr, nullptr); webrtc::PeerConnectionInterface::RTCConfiguration rtc_config; - auto peer_connection = peer_connection_factory->CreatePeerConnection( - rtc_config, nullptr, nullptr, nullptr); - printf("peer_connection=%s\n", peer_connection == nullptr ? "nullptr" : "ok"); + auto result = peer_connection_factory->CreatePeerConnectionOrError( + rtc_config, PeerConnectionDependencies(nullptr)); + // Creation will fail because of null observer, but that's OK. + printf("peer_connection creation=%s\n", result.ok() ? "succeeded" : "failed"); } } // namespace webrtc